4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "qemu/host-utils.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/translator.h"
29 #include "exec/helper-proto.h"
30 #include "exec/helper-gen.h"
31 #include "helper-tcg.h"
33 #include "trace-tcg.h"
36 #define PREFIX_REPZ 0x01
37 #define PREFIX_REPNZ 0x02
38 #define PREFIX_LOCK 0x04
39 #define PREFIX_DATA 0x08
40 #define PREFIX_ADR 0x10
41 #define PREFIX_VEX 0x20
42 #define PREFIX_REX 0x40
52 /* For a switch indexed by MODRM, match all memory operands for a given OP. */
53 #define CASE_MODRM_MEM_OP(OP) \
54 case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
55 case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
56 case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7
58 #define CASE_MODRM_OP(OP) \
59 case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
60 case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
61 case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7: \
62 case (3 << 6) | (OP << 3) | 0 ... (3 << 6) | (OP << 3) | 7
64 //#define MACRO_TEST 1
66 /* global register indexes */
67 static TCGv cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
;
68 static TCGv_i32 cpu_cc_op
;
69 static TCGv cpu_regs
[CPU_NB_REGS
];
70 static TCGv cpu_seg_base
[6];
71 static TCGv_i64 cpu_bndl
[4];
72 static TCGv_i64 cpu_bndu
[4];
74 #include "exec/gen-icount.h"
76 typedef struct DisasContext
{
77 DisasContextBase base
;
79 target_ulong pc
; /* pc = eip + cs_base */
80 target_ulong pc_start
; /* pc at TB entry */
81 target_ulong cs_base
; /* base of CS segment */
86 int8_t override
; /* -1 if no override, else R_CS, R_DS, etc */
89 #ifndef CONFIG_USER_ONLY
90 uint8_t cpl
; /* code priv level */
91 uint8_t iopl
; /* i/o priv level */
93 uint8_t vex_l
; /* vex vector length */
94 uint8_t vex_v
; /* vex vvvv register, without 1's complement. */
95 uint8_t popl_esp_hack
; /* for correct popl with esp base handling */
96 uint8_t rip_offset
; /* only used in x86_64, but left for simplicity */
104 bool jmp_opt
; /* use direct block chaining for direct jumps */
105 bool repz_opt
; /* optimize jumps within repz instructions */
108 CCOp cc_op
; /* current CC operation */
109 int mem_index
; /* select memory access functions */
110 uint32_t flags
; /* all execution flags */
112 int cpuid_ext_features
;
113 int cpuid_ext2_features
;
114 int cpuid_ext3_features
;
115 int cpuid_7_0_ebx_features
;
116 int cpuid_xsave_features
;
118 /* TCG local temps */
124 /* TCG local register indexes (only used inside old micro ops) */
136 /* The environment in which user-only runs is constrained. */
137 #ifdef CONFIG_USER_ONLY
142 #define PE(S) (((S)->flags & HF_PE_MASK) != 0)
143 #define CPL(S) ((S)->cpl)
144 #define IOPL(S) ((S)->iopl)
146 #if defined(CONFIG_USER_ONLY) && defined(TARGET_X86_64)
147 #define VM86(S) false
148 #define CODE32(S) true
150 #define ADDSEG(S) false
152 #define VM86(S) (((S)->flags & HF_VM_MASK) != 0)
153 #define CODE32(S) (((S)->flags & HF_CS32_MASK) != 0)
154 #define SS32(S) (((S)->flags & HF_SS32_MASK) != 0)
155 #define ADDSEG(S) (((S)->flags & HF_ADDSEG_MASK) != 0)
157 #if !defined(TARGET_X86_64)
158 #define CODE64(S) false
160 #elif defined(CONFIG_USER_ONLY)
161 #define CODE64(S) true
164 #define CODE64(S) (((S)->flags & HF_CS64_MASK) != 0)
165 #define LMA(S) (((S)->flags & HF_LMA_MASK) != 0)
169 #define REX_PREFIX(S) (((S)->prefix & PREFIX_REX) != 0)
170 #define REX_W(S) ((S)->rex_w)
171 #define REX_R(S) ((S)->rex_r + 0)
172 #define REX_X(S) ((S)->rex_x + 0)
173 #define REX_B(S) ((S)->rex_b + 0)
175 #define REX_PREFIX(S) false
176 #define REX_W(S) false
183 * Many sysemu-only helpers are not reachable for user-only.
184 * Define stub generators here, so that we need not either sprinkle
185 * ifdefs through the translator, nor provide the helper function.
187 #define STUB_HELPER(NAME, ...) \
188 static inline void gen_helper_##NAME(__VA_ARGS__) \
189 { qemu_build_not_reached(); }
191 #ifdef CONFIG_USER_ONLY
192 STUB_HELPER(set_dr
, TCGv_env env
, TCGv_i32 reg
, TCGv val
)
195 static void gen_eob(DisasContext
*s
);
196 static void gen_jr(DisasContext
*s
, TCGv dest
);
197 static void gen_jmp(DisasContext
*s
, target_ulong eip
);
198 static void gen_jmp_tb(DisasContext
*s
, target_ulong eip
, int tb_num
);
199 static void gen_op(DisasContext
*s1
, int op
, MemOp ot
, int d
);
201 /* i386 arith/logic operations */
221 OP_SHL1
, /* undocumented */
237 /* I386 int registers */
238 OR_EAX
, /* MUST be even numbered */
247 OR_TMP0
= 16, /* temporary operand register */
249 OR_A0
, /* temporary register used when doing address evaluation */
259 /* Bit set if the global variable is live after setting CC_OP to X. */
260 static const uint8_t cc_op_live
[CC_OP_NB
] = {
261 [CC_OP_DYNAMIC
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
262 [CC_OP_EFLAGS
] = USES_CC_SRC
,
263 [CC_OP_MULB
... CC_OP_MULQ
] = USES_CC_DST
| USES_CC_SRC
,
264 [CC_OP_ADDB
... CC_OP_ADDQ
] = USES_CC_DST
| USES_CC_SRC
,
265 [CC_OP_ADCB
... CC_OP_ADCQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
266 [CC_OP_SUBB
... CC_OP_SUBQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRCT
,
267 [CC_OP_SBBB
... CC_OP_SBBQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
268 [CC_OP_LOGICB
... CC_OP_LOGICQ
] = USES_CC_DST
,
269 [CC_OP_INCB
... CC_OP_INCQ
] = USES_CC_DST
| USES_CC_SRC
,
270 [CC_OP_DECB
... CC_OP_DECQ
] = USES_CC_DST
| USES_CC_SRC
,
271 [CC_OP_SHLB
... CC_OP_SHLQ
] = USES_CC_DST
| USES_CC_SRC
,
272 [CC_OP_SARB
... CC_OP_SARQ
] = USES_CC_DST
| USES_CC_SRC
,
273 [CC_OP_BMILGB
... CC_OP_BMILGQ
] = USES_CC_DST
| USES_CC_SRC
,
274 [CC_OP_ADCX
] = USES_CC_DST
| USES_CC_SRC
,
275 [CC_OP_ADOX
] = USES_CC_SRC
| USES_CC_SRC2
,
276 [CC_OP_ADCOX
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
278 [CC_OP_POPCNT
] = USES_CC_SRC
,
281 static void set_cc_op(DisasContext
*s
, CCOp op
)
285 if (s
->cc_op
== op
) {
289 /* Discard CC computation that will no longer be used. */
290 dead
= cc_op_live
[s
->cc_op
] & ~cc_op_live
[op
];
291 if (dead
& USES_CC_DST
) {
292 tcg_gen_discard_tl(cpu_cc_dst
);
294 if (dead
& USES_CC_SRC
) {
295 tcg_gen_discard_tl(cpu_cc_src
);
297 if (dead
& USES_CC_SRC2
) {
298 tcg_gen_discard_tl(cpu_cc_src2
);
300 if (dead
& USES_CC_SRCT
) {
301 tcg_gen_discard_tl(s
->cc_srcT
);
304 if (op
== CC_OP_DYNAMIC
) {
305 /* The DYNAMIC setting is translator only, and should never be
306 stored. Thus we always consider it clean. */
307 s
->cc_op_dirty
= false;
309 /* Discard any computed CC_OP value (see shifts). */
310 if (s
->cc_op
== CC_OP_DYNAMIC
) {
311 tcg_gen_discard_i32(cpu_cc_op
);
313 s
->cc_op_dirty
= true;
318 static void gen_update_cc_op(DisasContext
*s
)
320 if (s
->cc_op_dirty
) {
321 tcg_gen_movi_i32(cpu_cc_op
, s
->cc_op
);
322 s
->cc_op_dirty
= false;
328 #define NB_OP_SIZES 4
330 #else /* !TARGET_X86_64 */
332 #define NB_OP_SIZES 3
334 #endif /* !TARGET_X86_64 */
336 #if defined(HOST_WORDS_BIGENDIAN)
337 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
338 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
339 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
340 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
341 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
343 #define REG_B_OFFSET 0
344 #define REG_H_OFFSET 1
345 #define REG_W_OFFSET 0
346 #define REG_L_OFFSET 0
347 #define REG_LH_OFFSET 4
350 /* In instruction encodings for byte register accesses the
351 * register number usually indicates "low 8 bits of register N";
352 * however there are some special cases where N 4..7 indicates
353 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
354 * true for this special case, false otherwise.
356 static inline bool byte_reg_is_xH(DisasContext
*s
, int reg
)
358 /* Any time the REX prefix is present, byte registers are uniform */
359 if (reg
< 4 || REX_PREFIX(s
)) {
365 /* Select the size of a push/pop operation. */
366 static inline MemOp
mo_pushpop(DisasContext
*s
, MemOp ot
)
369 return ot
== MO_16
? MO_16
: MO_64
;
375 /* Select the size of the stack pointer. */
376 static inline MemOp
mo_stacksize(DisasContext
*s
)
378 return CODE64(s
) ? MO_64
: SS32(s
) ? MO_32
: MO_16
;
381 /* Select only size 64 else 32. Used for SSE operand sizes. */
382 static inline MemOp
mo_64_32(MemOp ot
)
385 return ot
== MO_64
? MO_64
: MO_32
;
391 /* Select size 8 if lsb of B is clear, else OT. Used for decoding
392 byte vs word opcodes. */
393 static inline MemOp
mo_b_d(int b
, MemOp ot
)
395 return b
& 1 ? ot
: MO_8
;
398 /* Select size 8 if lsb of B is clear, else OT capped at 32.
399 Used for decoding operand size of port opcodes. */
400 static inline MemOp
mo_b_d32(int b
, MemOp ot
)
402 return b
& 1 ? (ot
== MO_16
? MO_16
: MO_32
) : MO_8
;
405 static void gen_op_mov_reg_v(DisasContext
*s
, MemOp ot
, int reg
, TCGv t0
)
409 if (!byte_reg_is_xH(s
, reg
)) {
410 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], t0
, 0, 8);
412 tcg_gen_deposit_tl(cpu_regs
[reg
- 4], cpu_regs
[reg
- 4], t0
, 8, 8);
416 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], t0
, 0, 16);
419 /* For x86_64, this sets the higher half of register to zero.
420 For i386, this is equivalent to a mov. */
421 tcg_gen_ext32u_tl(cpu_regs
[reg
], t0
);
425 tcg_gen_mov_tl(cpu_regs
[reg
], t0
);
434 void gen_op_mov_v_reg(DisasContext
*s
, MemOp ot
, TCGv t0
, int reg
)
436 if (ot
== MO_8
&& byte_reg_is_xH(s
, reg
)) {
437 tcg_gen_extract_tl(t0
, cpu_regs
[reg
- 4], 8, 8);
439 tcg_gen_mov_tl(t0
, cpu_regs
[reg
]);
443 static void gen_add_A0_im(DisasContext
*s
, int val
)
445 tcg_gen_addi_tl(s
->A0
, s
->A0
, val
);
447 tcg_gen_ext32u_tl(s
->A0
, s
->A0
);
451 static inline void gen_op_jmp_v(TCGv dest
)
453 tcg_gen_st_tl(dest
, cpu_env
, offsetof(CPUX86State
, eip
));
457 void gen_op_add_reg_im(DisasContext
*s
, MemOp size
, int reg
, int32_t val
)
459 tcg_gen_addi_tl(s
->tmp0
, cpu_regs
[reg
], val
);
460 gen_op_mov_reg_v(s
, size
, reg
, s
->tmp0
);
463 static inline void gen_op_add_reg_T0(DisasContext
*s
, MemOp size
, int reg
)
465 tcg_gen_add_tl(s
->tmp0
, cpu_regs
[reg
], s
->T0
);
466 gen_op_mov_reg_v(s
, size
, reg
, s
->tmp0
);
469 static inline void gen_op_ld_v(DisasContext
*s
, int idx
, TCGv t0
, TCGv a0
)
471 tcg_gen_qemu_ld_tl(t0
, a0
, s
->mem_index
, idx
| MO_LE
);
474 static inline void gen_op_st_v(DisasContext
*s
, int idx
, TCGv t0
, TCGv a0
)
476 tcg_gen_qemu_st_tl(t0
, a0
, s
->mem_index
, idx
| MO_LE
);
479 static inline void gen_op_st_rm_T0_A0(DisasContext
*s
, int idx
, int d
)
482 gen_op_st_v(s
, idx
, s
->T0
, s
->A0
);
484 gen_op_mov_reg_v(s
, idx
, d
, s
->T0
);
488 static inline void gen_jmp_im(DisasContext
*s
, target_ulong pc
)
490 tcg_gen_movi_tl(s
->tmp0
, pc
);
491 gen_op_jmp_v(s
->tmp0
);
494 /* Compute SEG:REG into A0. SEG is selected from the override segment
495 (OVR_SEG) and the default segment (DEF_SEG). OVR_SEG may be -1 to
496 indicate no override. */
497 static void gen_lea_v_seg(DisasContext
*s
, MemOp aflag
, TCGv a0
,
498 int def_seg
, int ovr_seg
)
504 tcg_gen_mov_tl(s
->A0
, a0
);
511 if (ovr_seg
< 0 && ADDSEG(s
)) {
515 tcg_gen_ext32u_tl(s
->A0
, a0
);
521 tcg_gen_ext16u_tl(s
->A0
, a0
);
536 TCGv seg
= cpu_seg_base
[ovr_seg
];
538 if (aflag
== MO_64
) {
539 tcg_gen_add_tl(s
->A0
, a0
, seg
);
540 } else if (CODE64(s
)) {
541 tcg_gen_ext32u_tl(s
->A0
, a0
);
542 tcg_gen_add_tl(s
->A0
, s
->A0
, seg
);
544 tcg_gen_add_tl(s
->A0
, a0
, seg
);
545 tcg_gen_ext32u_tl(s
->A0
, s
->A0
);
550 static inline void gen_string_movl_A0_ESI(DisasContext
*s
)
552 gen_lea_v_seg(s
, s
->aflag
, cpu_regs
[R_ESI
], R_DS
, s
->override
);
555 static inline void gen_string_movl_A0_EDI(DisasContext
*s
)
557 gen_lea_v_seg(s
, s
->aflag
, cpu_regs
[R_EDI
], R_ES
, -1);
560 static inline void gen_op_movl_T0_Dshift(DisasContext
*s
, MemOp ot
)
562 tcg_gen_ld32s_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
, df
));
563 tcg_gen_shli_tl(s
->T0
, s
->T0
, ot
);
566 static TCGv
gen_ext_tl(TCGv dst
, TCGv src
, MemOp size
, bool sign
)
571 tcg_gen_ext8s_tl(dst
, src
);
573 tcg_gen_ext8u_tl(dst
, src
);
578 tcg_gen_ext16s_tl(dst
, src
);
580 tcg_gen_ext16u_tl(dst
, src
);
586 tcg_gen_ext32s_tl(dst
, src
);
588 tcg_gen_ext32u_tl(dst
, src
);
597 static void gen_extu(MemOp ot
, TCGv reg
)
599 gen_ext_tl(reg
, reg
, ot
, false);
602 static void gen_exts(MemOp ot
, TCGv reg
)
604 gen_ext_tl(reg
, reg
, ot
, true);
608 void gen_op_jnz_ecx(DisasContext
*s
, MemOp size
, TCGLabel
*label1
)
610 tcg_gen_mov_tl(s
->tmp0
, cpu_regs
[R_ECX
]);
611 gen_extu(size
, s
->tmp0
);
612 tcg_gen_brcondi_tl(TCG_COND_NE
, s
->tmp0
, 0, label1
);
616 void gen_op_jz_ecx(DisasContext
*s
, MemOp size
, TCGLabel
*label1
)
618 tcg_gen_mov_tl(s
->tmp0
, cpu_regs
[R_ECX
]);
619 gen_extu(size
, s
->tmp0
);
620 tcg_gen_brcondi_tl(TCG_COND_EQ
, s
->tmp0
, 0, label1
);
623 static void gen_helper_in_func(MemOp ot
, TCGv v
, TCGv_i32 n
)
627 gen_helper_inb(v
, cpu_env
, n
);
630 gen_helper_inw(v
, cpu_env
, n
);
633 gen_helper_inl(v
, cpu_env
, n
);
640 static void gen_helper_out_func(MemOp ot
, TCGv_i32 v
, TCGv_i32 n
)
644 gen_helper_outb(cpu_env
, v
, n
);
647 gen_helper_outw(cpu_env
, v
, n
);
650 gen_helper_outl(cpu_env
, v
, n
);
657 static void gen_check_io(DisasContext
*s
, MemOp ot
, target_ulong cur_eip
,
660 target_ulong next_eip
;
662 if (PE(s
) && (CPL(s
) > IOPL(s
) || VM86(s
))) {
663 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
666 gen_helper_check_iob(cpu_env
, s
->tmp2_i32
);
669 gen_helper_check_iow(cpu_env
, s
->tmp2_i32
);
672 gen_helper_check_iol(cpu_env
, s
->tmp2_i32
);
678 if(s
->flags
& HF_GUEST_MASK
) {
680 gen_jmp_im(s
, cur_eip
);
681 svm_flags
|= (1 << (4 + ot
));
682 next_eip
= s
->pc
- s
->cs_base
;
683 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
684 gen_helper_svm_check_io(cpu_env
, s
->tmp2_i32
,
685 tcg_const_i32(svm_flags
),
686 tcg_const_i32(next_eip
- cur_eip
));
690 static inline void gen_movs(DisasContext
*s
, MemOp ot
)
692 gen_string_movl_A0_ESI(s
);
693 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
694 gen_string_movl_A0_EDI(s
);
695 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
696 gen_op_movl_T0_Dshift(s
, ot
);
697 gen_op_add_reg_T0(s
, s
->aflag
, R_ESI
);
698 gen_op_add_reg_T0(s
, s
->aflag
, R_EDI
);
701 static void gen_op_update1_cc(DisasContext
*s
)
703 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
706 static void gen_op_update2_cc(DisasContext
*s
)
708 tcg_gen_mov_tl(cpu_cc_src
, s
->T1
);
709 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
712 static void gen_op_update3_cc(DisasContext
*s
, TCGv reg
)
714 tcg_gen_mov_tl(cpu_cc_src2
, reg
);
715 tcg_gen_mov_tl(cpu_cc_src
, s
->T1
);
716 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
719 static inline void gen_op_testl_T0_T1_cc(DisasContext
*s
)
721 tcg_gen_and_tl(cpu_cc_dst
, s
->T0
, s
->T1
);
724 static void gen_op_update_neg_cc(DisasContext
*s
)
726 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
727 tcg_gen_neg_tl(cpu_cc_src
, s
->T0
);
728 tcg_gen_movi_tl(s
->cc_srcT
, 0);
731 /* compute all eflags to cc_src */
732 static void gen_compute_eflags(DisasContext
*s
)
734 TCGv zero
, dst
, src1
, src2
;
737 if (s
->cc_op
== CC_OP_EFLAGS
) {
740 if (s
->cc_op
== CC_OP_CLR
) {
741 tcg_gen_movi_tl(cpu_cc_src
, CC_Z
| CC_P
);
742 set_cc_op(s
, CC_OP_EFLAGS
);
751 /* Take care to not read values that are not live. */
752 live
= cc_op_live
[s
->cc_op
] & ~USES_CC_SRCT
;
753 dead
= live
^ (USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
);
755 zero
= tcg_const_tl(0);
756 if (dead
& USES_CC_DST
) {
759 if (dead
& USES_CC_SRC
) {
762 if (dead
& USES_CC_SRC2
) {
768 gen_helper_cc_compute_all(cpu_cc_src
, dst
, src1
, src2
, cpu_cc_op
);
769 set_cc_op(s
, CC_OP_EFLAGS
);
776 typedef struct CCPrepare
{
786 /* compute eflags.C to reg */
787 static CCPrepare
gen_prepare_eflags_c(DisasContext
*s
, TCGv reg
)
793 case CC_OP_SUBB
... CC_OP_SUBQ
:
794 /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
795 size
= s
->cc_op
- CC_OP_SUBB
;
796 t1
= gen_ext_tl(s
->tmp0
, cpu_cc_src
, size
, false);
797 /* If no temporary was used, be careful not to alias t1 and t0. */
798 t0
= t1
== cpu_cc_src
? s
->tmp0
: reg
;
799 tcg_gen_mov_tl(t0
, s
->cc_srcT
);
803 case CC_OP_ADDB
... CC_OP_ADDQ
:
804 /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
805 size
= s
->cc_op
- CC_OP_ADDB
;
806 t1
= gen_ext_tl(s
->tmp0
, cpu_cc_src
, size
, false);
807 t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, false);
809 return (CCPrepare
) { .cond
= TCG_COND_LTU
, .reg
= t0
,
810 .reg2
= t1
, .mask
= -1, .use_reg2
= true };
812 case CC_OP_LOGICB
... CC_OP_LOGICQ
:
815 return (CCPrepare
) { .cond
= TCG_COND_NEVER
, .mask
= -1 };
817 case CC_OP_INCB
... CC_OP_INCQ
:
818 case CC_OP_DECB
... CC_OP_DECQ
:
819 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
820 .mask
= -1, .no_setcond
= true };
822 case CC_OP_SHLB
... CC_OP_SHLQ
:
823 /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
824 size
= s
->cc_op
- CC_OP_SHLB
;
825 shift
= (8 << size
) - 1;
826 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
827 .mask
= (target_ulong
)1 << shift
};
829 case CC_OP_MULB
... CC_OP_MULQ
:
830 return (CCPrepare
) { .cond
= TCG_COND_NE
,
831 .reg
= cpu_cc_src
, .mask
= -1 };
833 case CC_OP_BMILGB
... CC_OP_BMILGQ
:
834 size
= s
->cc_op
- CC_OP_BMILGB
;
835 t0
= gen_ext_tl(reg
, cpu_cc_src
, size
, false);
836 return (CCPrepare
) { .cond
= TCG_COND_EQ
, .reg
= t0
, .mask
= -1 };
840 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_dst
,
841 .mask
= -1, .no_setcond
= true };
844 case CC_OP_SARB
... CC_OP_SARQ
:
846 return (CCPrepare
) { .cond
= TCG_COND_NE
,
847 .reg
= cpu_cc_src
, .mask
= CC_C
};
850 /* The need to compute only C from CC_OP_DYNAMIC is important
851 in efficiently implementing e.g. INC at the start of a TB. */
853 gen_helper_cc_compute_c(reg
, cpu_cc_dst
, cpu_cc_src
,
854 cpu_cc_src2
, cpu_cc_op
);
855 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
856 .mask
= -1, .no_setcond
= true };
860 /* compute eflags.P to reg */
861 static CCPrepare
gen_prepare_eflags_p(DisasContext
*s
, TCGv reg
)
863 gen_compute_eflags(s
);
864 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
868 /* compute eflags.S to reg */
869 static CCPrepare
gen_prepare_eflags_s(DisasContext
*s
, TCGv reg
)
873 gen_compute_eflags(s
);
879 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
883 return (CCPrepare
) { .cond
= TCG_COND_NEVER
, .mask
= -1 };
886 MemOp size
= (s
->cc_op
- CC_OP_ADDB
) & 3;
887 TCGv t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, true);
888 return (CCPrepare
) { .cond
= TCG_COND_LT
, .reg
= t0
, .mask
= -1 };
893 /* compute eflags.O to reg */
894 static CCPrepare
gen_prepare_eflags_o(DisasContext
*s
, TCGv reg
)
899 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src2
,
900 .mask
= -1, .no_setcond
= true };
903 return (CCPrepare
) { .cond
= TCG_COND_NEVER
, .mask
= -1 };
905 gen_compute_eflags(s
);
906 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
911 /* compute eflags.Z to reg */
912 static CCPrepare
gen_prepare_eflags_z(DisasContext
*s
, TCGv reg
)
916 gen_compute_eflags(s
);
922 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
925 return (CCPrepare
) { .cond
= TCG_COND_ALWAYS
, .mask
= -1 };
927 return (CCPrepare
) { .cond
= TCG_COND_EQ
, .reg
= cpu_cc_src
,
931 MemOp size
= (s
->cc_op
- CC_OP_ADDB
) & 3;
932 TCGv t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, false);
933 return (CCPrepare
) { .cond
= TCG_COND_EQ
, .reg
= t0
, .mask
= -1 };
938 /* perform a conditional store into register 'reg' according to jump opcode
939 value 'b'. In the fast case, T0 is guaranted not to be used. */
940 static CCPrepare
gen_prepare_cc(DisasContext
*s
, int b
, TCGv reg
)
942 int inv
, jcc_op
, cond
;
948 jcc_op
= (b
>> 1) & 7;
951 case CC_OP_SUBB
... CC_OP_SUBQ
:
952 /* We optimize relational operators for the cmp/jcc case. */
953 size
= s
->cc_op
- CC_OP_SUBB
;
956 tcg_gen_mov_tl(s
->tmp4
, s
->cc_srcT
);
957 gen_extu(size
, s
->tmp4
);
958 t0
= gen_ext_tl(s
->tmp0
, cpu_cc_src
, size
, false);
959 cc
= (CCPrepare
) { .cond
= TCG_COND_LEU
, .reg
= s
->tmp4
,
960 .reg2
= t0
, .mask
= -1, .use_reg2
= true };
969 tcg_gen_mov_tl(s
->tmp4
, s
->cc_srcT
);
970 gen_exts(size
, s
->tmp4
);
971 t0
= gen_ext_tl(s
->tmp0
, cpu_cc_src
, size
, true);
972 cc
= (CCPrepare
) { .cond
= cond
, .reg
= s
->tmp4
,
973 .reg2
= t0
, .mask
= -1, .use_reg2
= true };
983 /* This actually generates good code for JC, JZ and JS. */
986 cc
= gen_prepare_eflags_o(s
, reg
);
989 cc
= gen_prepare_eflags_c(s
, reg
);
992 cc
= gen_prepare_eflags_z(s
, reg
);
995 gen_compute_eflags(s
);
996 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
997 .mask
= CC_Z
| CC_C
};
1000 cc
= gen_prepare_eflags_s(s
, reg
);
1003 cc
= gen_prepare_eflags_p(s
, reg
);
1006 gen_compute_eflags(s
);
1007 if (reg
== cpu_cc_src
) {
1010 tcg_gen_shri_tl(reg
, cpu_cc_src
, 4); /* CC_O -> CC_S */
1011 tcg_gen_xor_tl(reg
, reg
, cpu_cc_src
);
1012 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
1017 gen_compute_eflags(s
);
1018 if (reg
== cpu_cc_src
) {
1021 tcg_gen_shri_tl(reg
, cpu_cc_src
, 4); /* CC_O -> CC_S */
1022 tcg_gen_xor_tl(reg
, reg
, cpu_cc_src
);
1023 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
1024 .mask
= CC_S
| CC_Z
};
1031 cc
.cond
= tcg_invert_cond(cc
.cond
);
1036 static void gen_setcc1(DisasContext
*s
, int b
, TCGv reg
)
1038 CCPrepare cc
= gen_prepare_cc(s
, b
, reg
);
1040 if (cc
.no_setcond
) {
1041 if (cc
.cond
== TCG_COND_EQ
) {
1042 tcg_gen_xori_tl(reg
, cc
.reg
, 1);
1044 tcg_gen_mov_tl(reg
, cc
.reg
);
1049 if (cc
.cond
== TCG_COND_NE
&& !cc
.use_reg2
&& cc
.imm
== 0 &&
1050 cc
.mask
!= 0 && (cc
.mask
& (cc
.mask
- 1)) == 0) {
1051 tcg_gen_shri_tl(reg
, cc
.reg
, ctztl(cc
.mask
));
1052 tcg_gen_andi_tl(reg
, reg
, 1);
1055 if (cc
.mask
!= -1) {
1056 tcg_gen_andi_tl(reg
, cc
.reg
, cc
.mask
);
1060 tcg_gen_setcond_tl(cc
.cond
, reg
, cc
.reg
, cc
.reg2
);
1062 tcg_gen_setcondi_tl(cc
.cond
, reg
, cc
.reg
, cc
.imm
);
1066 static inline void gen_compute_eflags_c(DisasContext
*s
, TCGv reg
)
1068 gen_setcc1(s
, JCC_B
<< 1, reg
);
1071 /* generate a conditional jump to label 'l1' according to jump opcode
1072 value 'b'. In the fast case, T0 is guaranted not to be used. */
1073 static inline void gen_jcc1_noeob(DisasContext
*s
, int b
, TCGLabel
*l1
)
1075 CCPrepare cc
= gen_prepare_cc(s
, b
, s
->T0
);
1077 if (cc
.mask
!= -1) {
1078 tcg_gen_andi_tl(s
->T0
, cc
.reg
, cc
.mask
);
1082 tcg_gen_brcond_tl(cc
.cond
, cc
.reg
, cc
.reg2
, l1
);
1084 tcg_gen_brcondi_tl(cc
.cond
, cc
.reg
, cc
.imm
, l1
);
1088 /* Generate a conditional jump to label 'l1' according to jump opcode
1089 value 'b'. In the fast case, T0 is guaranted not to be used.
1090 A translation block must end soon. */
1091 static inline void gen_jcc1(DisasContext
*s
, int b
, TCGLabel
*l1
)
1093 CCPrepare cc
= gen_prepare_cc(s
, b
, s
->T0
);
1095 gen_update_cc_op(s
);
1096 if (cc
.mask
!= -1) {
1097 tcg_gen_andi_tl(s
->T0
, cc
.reg
, cc
.mask
);
1100 set_cc_op(s
, CC_OP_DYNAMIC
);
1102 tcg_gen_brcond_tl(cc
.cond
, cc
.reg
, cc
.reg2
, l1
);
1104 tcg_gen_brcondi_tl(cc
.cond
, cc
.reg
, cc
.imm
, l1
);
1108 /* XXX: does not work with gdbstub "ice" single step - not a
1110 static TCGLabel
*gen_jz_ecx_string(DisasContext
*s
, target_ulong next_eip
)
1112 TCGLabel
*l1
= gen_new_label();
1113 TCGLabel
*l2
= gen_new_label();
1114 gen_op_jnz_ecx(s
, s
->aflag
, l1
);
1116 gen_jmp_tb(s
, next_eip
, 1);
1121 static inline void gen_stos(DisasContext
*s
, MemOp ot
)
1123 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, R_EAX
);
1124 gen_string_movl_A0_EDI(s
);
1125 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
1126 gen_op_movl_T0_Dshift(s
, ot
);
1127 gen_op_add_reg_T0(s
, s
->aflag
, R_EDI
);
1130 static inline void gen_lods(DisasContext
*s
, MemOp ot
)
1132 gen_string_movl_A0_ESI(s
);
1133 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1134 gen_op_mov_reg_v(s
, ot
, R_EAX
, s
->T0
);
1135 gen_op_movl_T0_Dshift(s
, ot
);
1136 gen_op_add_reg_T0(s
, s
->aflag
, R_ESI
);
1139 static inline void gen_scas(DisasContext
*s
, MemOp ot
)
1141 gen_string_movl_A0_EDI(s
);
1142 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
1143 gen_op(s
, OP_CMPL
, ot
, R_EAX
);
1144 gen_op_movl_T0_Dshift(s
, ot
);
1145 gen_op_add_reg_T0(s
, s
->aflag
, R_EDI
);
1148 static inline void gen_cmps(DisasContext
*s
, MemOp ot
)
1150 gen_string_movl_A0_EDI(s
);
1151 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
1152 gen_string_movl_A0_ESI(s
);
1153 gen_op(s
, OP_CMPL
, ot
, OR_TMP0
);
1154 gen_op_movl_T0_Dshift(s
, ot
);
1155 gen_op_add_reg_T0(s
, s
->aflag
, R_ESI
);
1156 gen_op_add_reg_T0(s
, s
->aflag
, R_EDI
);
1159 static void gen_bpt_io(DisasContext
*s
, TCGv_i32 t_port
, int ot
)
1161 if (s
->flags
& HF_IOBPT_MASK
) {
1162 #ifdef CONFIG_USER_ONLY
1163 /* user-mode cpu should not be in IOBPT mode */
1164 g_assert_not_reached();
1166 TCGv_i32 t_size
= tcg_const_i32(1 << ot
);
1167 TCGv t_next
= tcg_const_tl(s
->pc
- s
->cs_base
);
1169 gen_helper_bpt_io(cpu_env
, t_port
, t_size
, t_next
);
1170 tcg_temp_free_i32(t_size
);
1171 tcg_temp_free(t_next
);
1172 #endif /* CONFIG_USER_ONLY */
1176 static inline void gen_ins(DisasContext
*s
, MemOp ot
)
1178 gen_string_movl_A0_EDI(s
);
1179 /* Note: we must do this dummy write first to be restartable in
1180 case of page fault. */
1181 tcg_gen_movi_tl(s
->T0
, 0);
1182 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
1183 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_EDX
]);
1184 tcg_gen_andi_i32(s
->tmp2_i32
, s
->tmp2_i32
, 0xffff);
1185 gen_helper_in_func(ot
, s
->T0
, s
->tmp2_i32
);
1186 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
1187 gen_op_movl_T0_Dshift(s
, ot
);
1188 gen_op_add_reg_T0(s
, s
->aflag
, R_EDI
);
1189 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
1192 static inline void gen_outs(DisasContext
*s
, MemOp ot
)
1194 gen_string_movl_A0_ESI(s
);
1195 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1197 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_EDX
]);
1198 tcg_gen_andi_i32(s
->tmp2_i32
, s
->tmp2_i32
, 0xffff);
1199 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T0
);
1200 gen_helper_out_func(ot
, s
->tmp2_i32
, s
->tmp3_i32
);
1201 gen_op_movl_T0_Dshift(s
, ot
);
1202 gen_op_add_reg_T0(s
, s
->aflag
, R_ESI
);
1203 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
1206 /* same method as Valgrind : we generate jumps to current or next
1208 #define GEN_REPZ(op) \
1209 static inline void gen_repz_ ## op(DisasContext *s, MemOp ot, \
1210 target_ulong cur_eip, target_ulong next_eip) \
1213 gen_update_cc_op(s); \
1214 l2 = gen_jz_ecx_string(s, next_eip); \
1215 gen_ ## op(s, ot); \
1216 gen_op_add_reg_im(s, s->aflag, R_ECX, -1); \
1217 /* a loop would cause two single step exceptions if ECX = 1 \
1218 before rep string_insn */ \
1220 gen_op_jz_ecx(s, s->aflag, l2); \
1221 gen_jmp(s, cur_eip); \
1224 #define GEN_REPZ2(op) \
1225 static inline void gen_repz_ ## op(DisasContext *s, MemOp ot, \
1226 target_ulong cur_eip, \
1227 target_ulong next_eip, \
1231 gen_update_cc_op(s); \
1232 l2 = gen_jz_ecx_string(s, next_eip); \
1233 gen_ ## op(s, ot); \
1234 gen_op_add_reg_im(s, s->aflag, R_ECX, -1); \
1235 gen_update_cc_op(s); \
1236 gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2); \
1238 gen_op_jz_ecx(s, s->aflag, l2); \
1239 gen_jmp(s, cur_eip); \
1250 static void gen_helper_fp_arith_ST0_FT0(int op
)
1254 gen_helper_fadd_ST0_FT0(cpu_env
);
1257 gen_helper_fmul_ST0_FT0(cpu_env
);
1260 gen_helper_fcom_ST0_FT0(cpu_env
);
1263 gen_helper_fcom_ST0_FT0(cpu_env
);
1266 gen_helper_fsub_ST0_FT0(cpu_env
);
1269 gen_helper_fsubr_ST0_FT0(cpu_env
);
1272 gen_helper_fdiv_ST0_FT0(cpu_env
);
1275 gen_helper_fdivr_ST0_FT0(cpu_env
);
1280 /* NOTE the exception in "r" op ordering */
1281 static void gen_helper_fp_arith_STN_ST0(int op
, int opreg
)
1283 TCGv_i32 tmp
= tcg_const_i32(opreg
);
1286 gen_helper_fadd_STN_ST0(cpu_env
, tmp
);
1289 gen_helper_fmul_STN_ST0(cpu_env
, tmp
);
1292 gen_helper_fsubr_STN_ST0(cpu_env
, tmp
);
1295 gen_helper_fsub_STN_ST0(cpu_env
, tmp
);
1298 gen_helper_fdivr_STN_ST0(cpu_env
, tmp
);
1301 gen_helper_fdiv_STN_ST0(cpu_env
, tmp
);
1306 static void gen_exception(DisasContext
*s
, int trapno
, target_ulong cur_eip
)
1308 gen_update_cc_op(s
);
1309 gen_jmp_im(s
, cur_eip
);
1310 gen_helper_raise_exception(cpu_env
, tcg_const_i32(trapno
));
1311 s
->base
.is_jmp
= DISAS_NORETURN
;
1314 /* Generate #UD for the current instruction. The assumption here is that
1315 the instruction is known, but it isn't allowed in the current cpu mode. */
1316 static void gen_illegal_opcode(DisasContext
*s
)
1318 gen_exception(s
, EXCP06_ILLOP
, s
->pc_start
- s
->cs_base
);
1321 /* Generate #GP for the current instruction. */
1322 static void gen_exception_gpf(DisasContext
*s
)
1324 gen_exception(s
, EXCP0D_GPF
, s
->pc_start
- s
->cs_base
);
1327 /* Check for cpl == 0; if not, raise #GP and return false. */
1328 static bool check_cpl0(DisasContext
*s
)
1333 gen_exception_gpf(s
);
1337 /* If vm86, check for iopl == 3; if not, raise #GP and return false. */
1338 static bool check_vm86_iopl(DisasContext
*s
)
1340 if (!VM86(s
) || IOPL(s
) == 3) {
1343 gen_exception_gpf(s
);
1347 /* Check for iopl allowing access; if not, raise #GP and return false. */
1348 static bool check_iopl(DisasContext
*s
)
1350 if (VM86(s
) ? IOPL(s
) == 3 : CPL(s
) <= IOPL(s
)) {
1353 gen_exception_gpf(s
);
1357 /* if d == OR_TMP0, it means memory operand (address in A0) */
1358 static void gen_op(DisasContext
*s1
, int op
, MemOp ot
, int d
)
1361 if (s1
->prefix
& PREFIX_LOCK
) {
1362 /* Lock prefix when destination is not memory. */
1363 gen_illegal_opcode(s1
);
1366 gen_op_mov_v_reg(s1
, ot
, s1
->T0
, d
);
1367 } else if (!(s1
->prefix
& PREFIX_LOCK
)) {
1368 gen_op_ld_v(s1
, ot
, s1
->T0
, s1
->A0
);
1372 gen_compute_eflags_c(s1
, s1
->tmp4
);
1373 if (s1
->prefix
& PREFIX_LOCK
) {
1374 tcg_gen_add_tl(s1
->T0
, s1
->tmp4
, s1
->T1
);
1375 tcg_gen_atomic_add_fetch_tl(s1
->T0
, s1
->A0
, s1
->T0
,
1376 s1
->mem_index
, ot
| MO_LE
);
1378 tcg_gen_add_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1379 tcg_gen_add_tl(s1
->T0
, s1
->T0
, s1
->tmp4
);
1380 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1382 gen_op_update3_cc(s1
, s1
->tmp4
);
1383 set_cc_op(s1
, CC_OP_ADCB
+ ot
);
1386 gen_compute_eflags_c(s1
, s1
->tmp4
);
1387 if (s1
->prefix
& PREFIX_LOCK
) {
1388 tcg_gen_add_tl(s1
->T0
, s1
->T1
, s1
->tmp4
);
1389 tcg_gen_neg_tl(s1
->T0
, s1
->T0
);
1390 tcg_gen_atomic_add_fetch_tl(s1
->T0
, s1
->A0
, s1
->T0
,
1391 s1
->mem_index
, ot
| MO_LE
);
1393 tcg_gen_sub_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1394 tcg_gen_sub_tl(s1
->T0
, s1
->T0
, s1
->tmp4
);
1395 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1397 gen_op_update3_cc(s1
, s1
->tmp4
);
1398 set_cc_op(s1
, CC_OP_SBBB
+ ot
);
1401 if (s1
->prefix
& PREFIX_LOCK
) {
1402 tcg_gen_atomic_add_fetch_tl(s1
->T0
, s1
->A0
, s1
->T1
,
1403 s1
->mem_index
, ot
| MO_LE
);
1405 tcg_gen_add_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1406 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1408 gen_op_update2_cc(s1
);
1409 set_cc_op(s1
, CC_OP_ADDB
+ ot
);
1412 if (s1
->prefix
& PREFIX_LOCK
) {
1413 tcg_gen_neg_tl(s1
->T0
, s1
->T1
);
1414 tcg_gen_atomic_fetch_add_tl(s1
->cc_srcT
, s1
->A0
, s1
->T0
,
1415 s1
->mem_index
, ot
| MO_LE
);
1416 tcg_gen_sub_tl(s1
->T0
, s1
->cc_srcT
, s1
->T1
);
1418 tcg_gen_mov_tl(s1
->cc_srcT
, s1
->T0
);
1419 tcg_gen_sub_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1420 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1422 gen_op_update2_cc(s1
);
1423 set_cc_op(s1
, CC_OP_SUBB
+ ot
);
1427 if (s1
->prefix
& PREFIX_LOCK
) {
1428 tcg_gen_atomic_and_fetch_tl(s1
->T0
, s1
->A0
, s1
->T1
,
1429 s1
->mem_index
, ot
| MO_LE
);
1431 tcg_gen_and_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1432 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1434 gen_op_update1_cc(s1
);
1435 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1438 if (s1
->prefix
& PREFIX_LOCK
) {
1439 tcg_gen_atomic_or_fetch_tl(s1
->T0
, s1
->A0
, s1
->T1
,
1440 s1
->mem_index
, ot
| MO_LE
);
1442 tcg_gen_or_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1443 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1445 gen_op_update1_cc(s1
);
1446 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1449 if (s1
->prefix
& PREFIX_LOCK
) {
1450 tcg_gen_atomic_xor_fetch_tl(s1
->T0
, s1
->A0
, s1
->T1
,
1451 s1
->mem_index
, ot
| MO_LE
);
1453 tcg_gen_xor_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1454 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1456 gen_op_update1_cc(s1
);
1457 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1460 tcg_gen_mov_tl(cpu_cc_src
, s1
->T1
);
1461 tcg_gen_mov_tl(s1
->cc_srcT
, s1
->T0
);
1462 tcg_gen_sub_tl(cpu_cc_dst
, s1
->T0
, s1
->T1
);
1463 set_cc_op(s1
, CC_OP_SUBB
+ ot
);
1468 /* if d == OR_TMP0, it means memory operand (address in A0) */
1469 static void gen_inc(DisasContext
*s1
, MemOp ot
, int d
, int c
)
1471 if (s1
->prefix
& PREFIX_LOCK
) {
1473 /* Lock prefix when destination is not memory */
1474 gen_illegal_opcode(s1
);
1477 tcg_gen_movi_tl(s1
->T0
, c
> 0 ? 1 : -1);
1478 tcg_gen_atomic_add_fetch_tl(s1
->T0
, s1
->A0
, s1
->T0
,
1479 s1
->mem_index
, ot
| MO_LE
);
1482 gen_op_mov_v_reg(s1
, ot
, s1
->T0
, d
);
1484 gen_op_ld_v(s1
, ot
, s1
->T0
, s1
->A0
);
1486 tcg_gen_addi_tl(s1
->T0
, s1
->T0
, (c
> 0 ? 1 : -1));
1487 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1490 gen_compute_eflags_c(s1
, cpu_cc_src
);
1491 tcg_gen_mov_tl(cpu_cc_dst
, s1
->T0
);
1492 set_cc_op(s1
, (c
> 0 ? CC_OP_INCB
: CC_OP_DECB
) + ot
);
1495 static void gen_shift_flags(DisasContext
*s
, MemOp ot
, TCGv result
,
1496 TCGv shm1
, TCGv count
, bool is_right
)
1498 TCGv_i32 z32
, s32
, oldop
;
1501 /* Store the results into the CC variables. If we know that the
1502 variable must be dead, store unconditionally. Otherwise we'll
1503 need to not disrupt the current contents. */
1504 z_tl
= tcg_const_tl(0);
1505 if (cc_op_live
[s
->cc_op
] & USES_CC_DST
) {
1506 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_cc_dst
, count
, z_tl
,
1507 result
, cpu_cc_dst
);
1509 tcg_gen_mov_tl(cpu_cc_dst
, result
);
1511 if (cc_op_live
[s
->cc_op
] & USES_CC_SRC
) {
1512 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_cc_src
, count
, z_tl
,
1515 tcg_gen_mov_tl(cpu_cc_src
, shm1
);
1517 tcg_temp_free(z_tl
);
1519 /* Get the two potential CC_OP values into temporaries. */
1520 tcg_gen_movi_i32(s
->tmp2_i32
, (is_right
? CC_OP_SARB
: CC_OP_SHLB
) + ot
);
1521 if (s
->cc_op
== CC_OP_DYNAMIC
) {
1524 tcg_gen_movi_i32(s
->tmp3_i32
, s
->cc_op
);
1525 oldop
= s
->tmp3_i32
;
1528 /* Conditionally store the CC_OP value. */
1529 z32
= tcg_const_i32(0);
1530 s32
= tcg_temp_new_i32();
1531 tcg_gen_trunc_tl_i32(s32
, count
);
1532 tcg_gen_movcond_i32(TCG_COND_NE
, cpu_cc_op
, s32
, z32
, s
->tmp2_i32
, oldop
);
1533 tcg_temp_free_i32(z32
);
1534 tcg_temp_free_i32(s32
);
1536 /* The CC_OP value is no longer predictable. */
1537 set_cc_op(s
, CC_OP_DYNAMIC
);
1540 static void gen_shift_rm_T1(DisasContext
*s
, MemOp ot
, int op1
,
1541 int is_right
, int is_arith
)
1543 target_ulong mask
= (ot
== MO_64
? 0x3f : 0x1f);
1546 if (op1
== OR_TMP0
) {
1547 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1549 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1552 tcg_gen_andi_tl(s
->T1
, s
->T1
, mask
);
1553 tcg_gen_subi_tl(s
->tmp0
, s
->T1
, 1);
1557 gen_exts(ot
, s
->T0
);
1558 tcg_gen_sar_tl(s
->tmp0
, s
->T0
, s
->tmp0
);
1559 tcg_gen_sar_tl(s
->T0
, s
->T0
, s
->T1
);
1561 gen_extu(ot
, s
->T0
);
1562 tcg_gen_shr_tl(s
->tmp0
, s
->T0
, s
->tmp0
);
1563 tcg_gen_shr_tl(s
->T0
, s
->T0
, s
->T1
);
1566 tcg_gen_shl_tl(s
->tmp0
, s
->T0
, s
->tmp0
);
1567 tcg_gen_shl_tl(s
->T0
, s
->T0
, s
->T1
);
1571 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1573 gen_shift_flags(s
, ot
, s
->T0
, s
->tmp0
, s
->T1
, is_right
);
1576 static void gen_shift_rm_im(DisasContext
*s
, MemOp ot
, int op1
, int op2
,
1577 int is_right
, int is_arith
)
1579 int mask
= (ot
== MO_64
? 0x3f : 0x1f);
1583 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1585 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1591 gen_exts(ot
, s
->T0
);
1592 tcg_gen_sari_tl(s
->tmp4
, s
->T0
, op2
- 1);
1593 tcg_gen_sari_tl(s
->T0
, s
->T0
, op2
);
1595 gen_extu(ot
, s
->T0
);
1596 tcg_gen_shri_tl(s
->tmp4
, s
->T0
, op2
- 1);
1597 tcg_gen_shri_tl(s
->T0
, s
->T0
, op2
);
1600 tcg_gen_shli_tl(s
->tmp4
, s
->T0
, op2
- 1);
1601 tcg_gen_shli_tl(s
->T0
, s
->T0
, op2
);
1606 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1608 /* update eflags if non zero shift */
1610 tcg_gen_mov_tl(cpu_cc_src
, s
->tmp4
);
1611 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
1612 set_cc_op(s
, (is_right
? CC_OP_SARB
: CC_OP_SHLB
) + ot
);
1616 static void gen_rot_rm_T1(DisasContext
*s
, MemOp ot
, int op1
, int is_right
)
1618 target_ulong mask
= (ot
== MO_64
? 0x3f : 0x1f);
1622 if (op1
== OR_TMP0
) {
1623 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1625 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1628 tcg_gen_andi_tl(s
->T1
, s
->T1
, mask
);
1632 /* Replicate the 8-bit input so that a 32-bit rotate works. */
1633 tcg_gen_ext8u_tl(s
->T0
, s
->T0
);
1634 tcg_gen_muli_tl(s
->T0
, s
->T0
, 0x01010101);
1637 /* Replicate the 16-bit input so that a 32-bit rotate works. */
1638 tcg_gen_deposit_tl(s
->T0
, s
->T0
, s
->T0
, 16, 16);
1641 #ifdef TARGET_X86_64
1643 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
1644 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T1
);
1646 tcg_gen_rotr_i32(s
->tmp2_i32
, s
->tmp2_i32
, s
->tmp3_i32
);
1648 tcg_gen_rotl_i32(s
->tmp2_i32
, s
->tmp2_i32
, s
->tmp3_i32
);
1650 tcg_gen_extu_i32_tl(s
->T0
, s
->tmp2_i32
);
1655 tcg_gen_rotr_tl(s
->T0
, s
->T0
, s
->T1
);
1657 tcg_gen_rotl_tl(s
->T0
, s
->T0
, s
->T1
);
1663 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1665 /* We'll need the flags computed into CC_SRC. */
1666 gen_compute_eflags(s
);
1668 /* The value that was "rotated out" is now present at the other end
1669 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1670 since we've computed the flags into CC_SRC, these variables are
1673 tcg_gen_shri_tl(cpu_cc_src2
, s
->T0
, mask
- 1);
1674 tcg_gen_shri_tl(cpu_cc_dst
, s
->T0
, mask
);
1675 tcg_gen_andi_tl(cpu_cc_dst
, cpu_cc_dst
, 1);
1677 tcg_gen_shri_tl(cpu_cc_src2
, s
->T0
, mask
);
1678 tcg_gen_andi_tl(cpu_cc_dst
, s
->T0
, 1);
1680 tcg_gen_andi_tl(cpu_cc_src2
, cpu_cc_src2
, 1);
1681 tcg_gen_xor_tl(cpu_cc_src2
, cpu_cc_src2
, cpu_cc_dst
);
1683 /* Now conditionally store the new CC_OP value. If the shift count
1684 is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live.
1685 Otherwise reuse CC_OP_ADCOX which have the C and O flags split out
1686 exactly as we computed above. */
1687 t0
= tcg_const_i32(0);
1688 t1
= tcg_temp_new_i32();
1689 tcg_gen_trunc_tl_i32(t1
, s
->T1
);
1690 tcg_gen_movi_i32(s
->tmp2_i32
, CC_OP_ADCOX
);
1691 tcg_gen_movi_i32(s
->tmp3_i32
, CC_OP_EFLAGS
);
1692 tcg_gen_movcond_i32(TCG_COND_NE
, cpu_cc_op
, t1
, t0
,
1693 s
->tmp2_i32
, s
->tmp3_i32
);
1694 tcg_temp_free_i32(t0
);
1695 tcg_temp_free_i32(t1
);
1697 /* The CC_OP value is no longer predictable. */
1698 set_cc_op(s
, CC_OP_DYNAMIC
);
1701 static void gen_rot_rm_im(DisasContext
*s
, MemOp ot
, int op1
, int op2
,
1704 int mask
= (ot
== MO_64
? 0x3f : 0x1f);
1708 if (op1
== OR_TMP0
) {
1709 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1711 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1717 #ifdef TARGET_X86_64
1719 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
1721 tcg_gen_rotri_i32(s
->tmp2_i32
, s
->tmp2_i32
, op2
);
1723 tcg_gen_rotli_i32(s
->tmp2_i32
, s
->tmp2_i32
, op2
);
1725 tcg_gen_extu_i32_tl(s
->T0
, s
->tmp2_i32
);
1730 tcg_gen_rotri_tl(s
->T0
, s
->T0
, op2
);
1732 tcg_gen_rotli_tl(s
->T0
, s
->T0
, op2
);
1743 shift
= mask
+ 1 - shift
;
1745 gen_extu(ot
, s
->T0
);
1746 tcg_gen_shli_tl(s
->tmp0
, s
->T0
, shift
);
1747 tcg_gen_shri_tl(s
->T0
, s
->T0
, mask
+ 1 - shift
);
1748 tcg_gen_or_tl(s
->T0
, s
->T0
, s
->tmp0
);
1754 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1757 /* Compute the flags into CC_SRC. */
1758 gen_compute_eflags(s
);
1760 /* The value that was "rotated out" is now present at the other end
1761 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1762 since we've computed the flags into CC_SRC, these variables are
1765 tcg_gen_shri_tl(cpu_cc_src2
, s
->T0
, mask
- 1);
1766 tcg_gen_shri_tl(cpu_cc_dst
, s
->T0
, mask
);
1767 tcg_gen_andi_tl(cpu_cc_dst
, cpu_cc_dst
, 1);
1769 tcg_gen_shri_tl(cpu_cc_src2
, s
->T0
, mask
);
1770 tcg_gen_andi_tl(cpu_cc_dst
, s
->T0
, 1);
1772 tcg_gen_andi_tl(cpu_cc_src2
, cpu_cc_src2
, 1);
1773 tcg_gen_xor_tl(cpu_cc_src2
, cpu_cc_src2
, cpu_cc_dst
);
1774 set_cc_op(s
, CC_OP_ADCOX
);
1778 /* XXX: add faster immediate = 1 case */
1779 static void gen_rotc_rm_T1(DisasContext
*s
, MemOp ot
, int op1
,
1782 gen_compute_eflags(s
);
1783 assert(s
->cc_op
== CC_OP_EFLAGS
);
1787 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1789 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1794 gen_helper_rcrb(s
->T0
, cpu_env
, s
->T0
, s
->T1
);
1797 gen_helper_rcrw(s
->T0
, cpu_env
, s
->T0
, s
->T1
);
1800 gen_helper_rcrl(s
->T0
, cpu_env
, s
->T0
, s
->T1
);
1802 #ifdef TARGET_X86_64
1804 gen_helper_rcrq(s
->T0
, cpu_env
, s
->T0
, s
->T1
);
1813 gen_helper_rclb(s
->T0
, cpu_env
, s
->T0
, s
->T1
);
1816 gen_helper_rclw(s
->T0
, cpu_env
, s
->T0
, s
->T1
);
1819 gen_helper_rcll(s
->T0
, cpu_env
, s
->T0
, s
->T1
);
1821 #ifdef TARGET_X86_64
1823 gen_helper_rclq(s
->T0
, cpu_env
, s
->T0
, s
->T1
);
1831 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1834 /* XXX: add faster immediate case */
1835 static void gen_shiftd_rm_T1(DisasContext
*s
, MemOp ot
, int op1
,
1836 bool is_right
, TCGv count_in
)
1838 target_ulong mask
= (ot
== MO_64
? 63 : 31);
1842 if (op1
== OR_TMP0
) {
1843 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1845 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1848 count
= tcg_temp_new();
1849 tcg_gen_andi_tl(count
, count_in
, mask
);
1853 /* Note: we implement the Intel behaviour for shift count > 16.
1854 This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A
1855 portion by constructing it as a 32-bit value. */
1857 tcg_gen_deposit_tl(s
->tmp0
, s
->T0
, s
->T1
, 16, 16);
1858 tcg_gen_mov_tl(s
->T1
, s
->T0
);
1859 tcg_gen_mov_tl(s
->T0
, s
->tmp0
);
1861 tcg_gen_deposit_tl(s
->T1
, s
->T0
, s
->T1
, 16, 16);
1864 * If TARGET_X86_64 defined then fall through into MO_32 case,
1865 * otherwise fall through default case.
1868 #ifdef TARGET_X86_64
1869 /* Concatenate the two 32-bit values and use a 64-bit shift. */
1870 tcg_gen_subi_tl(s
->tmp0
, count
, 1);
1872 tcg_gen_concat_tl_i64(s
->T0
, s
->T0
, s
->T1
);
1873 tcg_gen_shr_i64(s
->tmp0
, s
->T0
, s
->tmp0
);
1874 tcg_gen_shr_i64(s
->T0
, s
->T0
, count
);
1876 tcg_gen_concat_tl_i64(s
->T0
, s
->T1
, s
->T0
);
1877 tcg_gen_shl_i64(s
->tmp0
, s
->T0
, s
->tmp0
);
1878 tcg_gen_shl_i64(s
->T0
, s
->T0
, count
);
1879 tcg_gen_shri_i64(s
->tmp0
, s
->tmp0
, 32);
1880 tcg_gen_shri_i64(s
->T0
, s
->T0
, 32);
1885 tcg_gen_subi_tl(s
->tmp0
, count
, 1);
1887 tcg_gen_shr_tl(s
->tmp0
, s
->T0
, s
->tmp0
);
1889 tcg_gen_subfi_tl(s
->tmp4
, mask
+ 1, count
);
1890 tcg_gen_shr_tl(s
->T0
, s
->T0
, count
);
1891 tcg_gen_shl_tl(s
->T1
, s
->T1
, s
->tmp4
);
1893 tcg_gen_shl_tl(s
->tmp0
, s
->T0
, s
->tmp0
);
1895 /* Only needed if count > 16, for Intel behaviour. */
1896 tcg_gen_subfi_tl(s
->tmp4
, 33, count
);
1897 tcg_gen_shr_tl(s
->tmp4
, s
->T1
, s
->tmp4
);
1898 tcg_gen_or_tl(s
->tmp0
, s
->tmp0
, s
->tmp4
);
1901 tcg_gen_subfi_tl(s
->tmp4
, mask
+ 1, count
);
1902 tcg_gen_shl_tl(s
->T0
, s
->T0
, count
);
1903 tcg_gen_shr_tl(s
->T1
, s
->T1
, s
->tmp4
);
1905 tcg_gen_movi_tl(s
->tmp4
, 0);
1906 tcg_gen_movcond_tl(TCG_COND_EQ
, s
->T1
, count
, s
->tmp4
,
1908 tcg_gen_or_tl(s
->T0
, s
->T0
, s
->T1
);
1913 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1915 gen_shift_flags(s
, ot
, s
->T0
, s
->tmp0
, count
, is_right
);
1916 tcg_temp_free(count
);
1919 static void gen_shift(DisasContext
*s1
, int op
, MemOp ot
, int d
, int s
)
1922 gen_op_mov_v_reg(s1
, ot
, s1
->T1
, s
);
1925 gen_rot_rm_T1(s1
, ot
, d
, 0);
1928 gen_rot_rm_T1(s1
, ot
, d
, 1);
1932 gen_shift_rm_T1(s1
, ot
, d
, 0, 0);
1935 gen_shift_rm_T1(s1
, ot
, d
, 1, 0);
1938 gen_shift_rm_T1(s1
, ot
, d
, 1, 1);
1941 gen_rotc_rm_T1(s1
, ot
, d
, 0);
1944 gen_rotc_rm_T1(s1
, ot
, d
, 1);
1949 static void gen_shifti(DisasContext
*s1
, int op
, MemOp ot
, int d
, int c
)
1953 gen_rot_rm_im(s1
, ot
, d
, c
, 0);
1956 gen_rot_rm_im(s1
, ot
, d
, c
, 1);
1960 gen_shift_rm_im(s1
, ot
, d
, c
, 0, 0);
1963 gen_shift_rm_im(s1
, ot
, d
, c
, 1, 0);
1966 gen_shift_rm_im(s1
, ot
, d
, c
, 1, 1);
1969 /* currently not optimized */
1970 tcg_gen_movi_tl(s1
->T1
, c
);
1971 gen_shift(s1
, op
, ot
, d
, OR_TMP1
);
1976 #define X86_MAX_INSN_LENGTH 15
1978 static uint64_t advance_pc(CPUX86State
*env
, DisasContext
*s
, int num_bytes
)
1980 uint64_t pc
= s
->pc
;
1983 if (unlikely(s
->pc
- s
->pc_start
> X86_MAX_INSN_LENGTH
)) {
1984 /* If the instruction's 16th byte is on a different page than the 1st, a
1985 * page fault on the second page wins over the general protection fault
1986 * caused by the instruction being too long.
1987 * This can happen even if the operand is only one byte long!
1989 if (((s
->pc
- 1) ^ (pc
- 1)) & TARGET_PAGE_MASK
) {
1990 volatile uint8_t unused
=
1991 cpu_ldub_code(env
, (s
->pc
- 1) & TARGET_PAGE_MASK
);
1994 siglongjmp(s
->jmpbuf
, 1);
2000 static inline uint8_t x86_ldub_code(CPUX86State
*env
, DisasContext
*s
)
2002 return translator_ldub(env
, advance_pc(env
, s
, 1));
2005 static inline int16_t x86_ldsw_code(CPUX86State
*env
, DisasContext
*s
)
2007 return translator_ldsw(env
, advance_pc(env
, s
, 2));
2010 static inline uint16_t x86_lduw_code(CPUX86State
*env
, DisasContext
*s
)
2012 return translator_lduw(env
, advance_pc(env
, s
, 2));
2015 static inline uint32_t x86_ldl_code(CPUX86State
*env
, DisasContext
*s
)
2017 return translator_ldl(env
, advance_pc(env
, s
, 4));
2020 #ifdef TARGET_X86_64
2021 static inline uint64_t x86_ldq_code(CPUX86State
*env
, DisasContext
*s
)
2023 return translator_ldq(env
, advance_pc(env
, s
, 8));
2027 /* Decompose an address. */
2029 typedef struct AddressParts
{
2037 static AddressParts
gen_lea_modrm_0(CPUX86State
*env
, DisasContext
*s
,
2040 int def_seg
, base
, index
, scale
, mod
, rm
;
2049 mod
= (modrm
>> 6) & 3;
2051 base
= rm
| REX_B(s
);
2054 /* Normally filtered out earlier, but including this path
2055 simplifies multi-byte nop, as well as bndcl, bndcu, bndcn. */
2064 int code
= x86_ldub_code(env
, s
);
2065 scale
= (code
>> 6) & 3;
2066 index
= ((code
>> 3) & 7) | REX_X(s
);
2068 index
= -1; /* no index */
2070 base
= (code
& 7) | REX_B(s
);
2076 if ((base
& 7) == 5) {
2078 disp
= (int32_t)x86_ldl_code(env
, s
);
2079 if (CODE64(s
) && !havesib
) {
2081 disp
+= s
->pc
+ s
->rip_offset
;
2086 disp
= (int8_t)x86_ldub_code(env
, s
);
2090 disp
= (int32_t)x86_ldl_code(env
, s
);
2094 /* For correct popl handling with esp. */
2095 if (base
== R_ESP
&& s
->popl_esp_hack
) {
2096 disp
+= s
->popl_esp_hack
;
2098 if (base
== R_EBP
|| base
== R_ESP
) {
2107 disp
= x86_lduw_code(env
, s
);
2110 } else if (mod
== 1) {
2111 disp
= (int8_t)x86_ldub_code(env
, s
);
2113 disp
= (int16_t)x86_lduw_code(env
, s
);
2157 return (AddressParts
){ def_seg
, base
, index
, scale
, disp
};
2160 /* Compute the address, with a minimum number of TCG ops. */
2161 static TCGv
gen_lea_modrm_1(DisasContext
*s
, AddressParts a
)
2167 ea
= cpu_regs
[a
.index
];
2169 tcg_gen_shli_tl(s
->A0
, cpu_regs
[a
.index
], a
.scale
);
2173 tcg_gen_add_tl(s
->A0
, ea
, cpu_regs
[a
.base
]);
2176 } else if (a
.base
>= 0) {
2177 ea
= cpu_regs
[a
.base
];
2180 tcg_gen_movi_tl(s
->A0
, a
.disp
);
2182 } else if (a
.disp
!= 0) {
2183 tcg_gen_addi_tl(s
->A0
, ea
, a
.disp
);
2190 static void gen_lea_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
)
2192 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
2193 TCGv ea
= gen_lea_modrm_1(s
, a
);
2194 gen_lea_v_seg(s
, s
->aflag
, ea
, a
.def_seg
, s
->override
);
2197 static void gen_nop_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
)
2199 (void)gen_lea_modrm_0(env
, s
, modrm
);
2202 /* Used for BNDCL, BNDCU, BNDCN. */
2203 static void gen_bndck(CPUX86State
*env
, DisasContext
*s
, int modrm
,
2204 TCGCond cond
, TCGv_i64 bndv
)
2206 TCGv ea
= gen_lea_modrm_1(s
, gen_lea_modrm_0(env
, s
, modrm
));
2208 tcg_gen_extu_tl_i64(s
->tmp1_i64
, ea
);
2210 tcg_gen_ext32u_i64(s
->tmp1_i64
, s
->tmp1_i64
);
2212 tcg_gen_setcond_i64(cond
, s
->tmp1_i64
, s
->tmp1_i64
, bndv
);
2213 tcg_gen_extrl_i64_i32(s
->tmp2_i32
, s
->tmp1_i64
);
2214 gen_helper_bndck(cpu_env
, s
->tmp2_i32
);
2217 /* used for LEA and MOV AX, mem */
2218 static void gen_add_A0_ds_seg(DisasContext
*s
)
2220 gen_lea_v_seg(s
, s
->aflag
, s
->A0
, R_DS
, s
->override
);
2223 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2225 static void gen_ldst_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
,
2226 MemOp ot
, int reg
, int is_store
)
2230 mod
= (modrm
>> 6) & 3;
2231 rm
= (modrm
& 7) | REX_B(s
);
2235 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
2236 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
2238 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
2240 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
2243 gen_lea_modrm(env
, s
, modrm
);
2246 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
2247 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
2249 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
2251 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
2256 static inline uint32_t insn_get(CPUX86State
*env
, DisasContext
*s
, MemOp ot
)
2262 ret
= x86_ldub_code(env
, s
);
2265 ret
= x86_lduw_code(env
, s
);
2268 #ifdef TARGET_X86_64
2271 ret
= x86_ldl_code(env
, s
);
2279 static inline int insn_const_size(MemOp ot
)
2288 static inline bool use_goto_tb(DisasContext
*s
, target_ulong pc
)
2290 #ifndef CONFIG_USER_ONLY
2291 return (pc
& TARGET_PAGE_MASK
) == (s
->base
.tb
->pc
& TARGET_PAGE_MASK
) ||
2292 (pc
& TARGET_PAGE_MASK
) == (s
->pc_start
& TARGET_PAGE_MASK
);
2298 static inline void gen_goto_tb(DisasContext
*s
, int tb_num
, target_ulong eip
)
2300 target_ulong pc
= s
->cs_base
+ eip
;
2302 if (use_goto_tb(s
, pc
)) {
2303 /* jump to same page: we can use a direct jump */
2304 tcg_gen_goto_tb(tb_num
);
2306 tcg_gen_exit_tb(s
->base
.tb
, tb_num
);
2307 s
->base
.is_jmp
= DISAS_NORETURN
;
2309 /* jump to another page */
2315 static inline void gen_jcc(DisasContext
*s
, int b
,
2316 target_ulong val
, target_ulong next_eip
)
2321 l1
= gen_new_label();
2324 gen_goto_tb(s
, 0, next_eip
);
2327 gen_goto_tb(s
, 1, val
);
2329 l1
= gen_new_label();
2330 l2
= gen_new_label();
2333 gen_jmp_im(s
, next_eip
);
2343 static void gen_cmovcc1(CPUX86State
*env
, DisasContext
*s
, MemOp ot
, int b
,
2348 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
2350 cc
= gen_prepare_cc(s
, b
, s
->T1
);
2351 if (cc
.mask
!= -1) {
2352 TCGv t0
= tcg_temp_new();
2353 tcg_gen_andi_tl(t0
, cc
.reg
, cc
.mask
);
2357 cc
.reg2
= tcg_const_tl(cc
.imm
);
2360 tcg_gen_movcond_tl(cc
.cond
, s
->T0
, cc
.reg
, cc
.reg2
,
2361 s
->T0
, cpu_regs
[reg
]);
2362 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
2364 if (cc
.mask
!= -1) {
2365 tcg_temp_free(cc
.reg
);
2368 tcg_temp_free(cc
.reg2
);
2372 static inline void gen_op_movl_T0_seg(DisasContext
*s
, X86Seg seg_reg
)
2374 tcg_gen_ld32u_tl(s
->T0
, cpu_env
,
2375 offsetof(CPUX86State
,segs
[seg_reg
].selector
));
2378 static inline void gen_op_movl_seg_T0_vm(DisasContext
*s
, X86Seg seg_reg
)
2380 tcg_gen_ext16u_tl(s
->T0
, s
->T0
);
2381 tcg_gen_st32_tl(s
->T0
, cpu_env
,
2382 offsetof(CPUX86State
,segs
[seg_reg
].selector
));
2383 tcg_gen_shli_tl(cpu_seg_base
[seg_reg
], s
->T0
, 4);
2386 /* move T0 to seg_reg and compute if the CPU state may change. Never
2387 call this function with seg_reg == R_CS */
2388 static void gen_movl_seg_T0(DisasContext
*s
, X86Seg seg_reg
)
2390 if (PE(s
) && !VM86(s
)) {
2391 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
2392 gen_helper_load_seg(cpu_env
, tcg_const_i32(seg_reg
), s
->tmp2_i32
);
2393 /* abort translation because the addseg value may change or
2394 because ss32 may change. For R_SS, translation must always
2395 stop as a special handling must be done to disable hardware
2396 interrupts for the next instruction */
2397 if (seg_reg
== R_SS
|| (CODE32(s
) && seg_reg
< R_FS
)) {
2398 s
->base
.is_jmp
= DISAS_TOO_MANY
;
2401 gen_op_movl_seg_T0_vm(s
, seg_reg
);
2402 if (seg_reg
== R_SS
) {
2403 s
->base
.is_jmp
= DISAS_TOO_MANY
;
2408 static inline int svm_is_rep(int prefixes
)
2410 return ((prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) ? 8 : 0);
2414 gen_svm_check_intercept_param(DisasContext
*s
, target_ulong pc_start
,
2415 uint32_t type
, uint64_t param
)
2417 /* no SVM activated; fast case */
2418 if (likely(!(s
->flags
& HF_GUEST_MASK
)))
2420 gen_update_cc_op(s
);
2421 gen_jmp_im(s
, pc_start
- s
->cs_base
);
2422 gen_helper_svm_check_intercept_param(cpu_env
, tcg_const_i32(type
),
2423 tcg_const_i64(param
));
2427 gen_svm_check_intercept(DisasContext
*s
, target_ulong pc_start
, uint64_t type
)
2429 gen_svm_check_intercept_param(s
, pc_start
, type
, 0);
2432 static inline void gen_stack_update(DisasContext
*s
, int addend
)
2434 gen_op_add_reg_im(s
, mo_stacksize(s
), R_ESP
, addend
);
2437 /* Generate a push. It depends on ss32, addseg and dflag. */
2438 static void gen_push_v(DisasContext
*s
, TCGv val
)
2440 MemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2441 MemOp a_ot
= mo_stacksize(s
);
2442 int size
= 1 << d_ot
;
2443 TCGv new_esp
= s
->A0
;
2445 tcg_gen_subi_tl(s
->A0
, cpu_regs
[R_ESP
], size
);
2450 tcg_gen_mov_tl(new_esp
, s
->A0
);
2452 gen_lea_v_seg(s
, a_ot
, s
->A0
, R_SS
, -1);
2455 gen_op_st_v(s
, d_ot
, val
, s
->A0
);
2456 gen_op_mov_reg_v(s
, a_ot
, R_ESP
, new_esp
);
2459 /* two step pop is necessary for precise exceptions */
2460 static MemOp
gen_pop_T0(DisasContext
*s
)
2462 MemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2464 gen_lea_v_seg(s
, mo_stacksize(s
), cpu_regs
[R_ESP
], R_SS
, -1);
2465 gen_op_ld_v(s
, d_ot
, s
->T0
, s
->A0
);
2470 static inline void gen_pop_update(DisasContext
*s
, MemOp ot
)
2472 gen_stack_update(s
, 1 << ot
);
2475 static inline void gen_stack_A0(DisasContext
*s
)
2477 gen_lea_v_seg(s
, SS32(s
) ? MO_32
: MO_16
, cpu_regs
[R_ESP
], R_SS
, -1);
2480 static void gen_pusha(DisasContext
*s
)
2482 MemOp s_ot
= SS32(s
) ? MO_32
: MO_16
;
2483 MemOp d_ot
= s
->dflag
;
2484 int size
= 1 << d_ot
;
2487 for (i
= 0; i
< 8; i
++) {
2488 tcg_gen_addi_tl(s
->A0
, cpu_regs
[R_ESP
], (i
- 8) * size
);
2489 gen_lea_v_seg(s
, s_ot
, s
->A0
, R_SS
, -1);
2490 gen_op_st_v(s
, d_ot
, cpu_regs
[7 - i
], s
->A0
);
2493 gen_stack_update(s
, -8 * size
);
2496 static void gen_popa(DisasContext
*s
)
2498 MemOp s_ot
= SS32(s
) ? MO_32
: MO_16
;
2499 MemOp d_ot
= s
->dflag
;
2500 int size
= 1 << d_ot
;
2503 for (i
= 0; i
< 8; i
++) {
2504 /* ESP is not reloaded */
2505 if (7 - i
== R_ESP
) {
2508 tcg_gen_addi_tl(s
->A0
, cpu_regs
[R_ESP
], i
* size
);
2509 gen_lea_v_seg(s
, s_ot
, s
->A0
, R_SS
, -1);
2510 gen_op_ld_v(s
, d_ot
, s
->T0
, s
->A0
);
2511 gen_op_mov_reg_v(s
, d_ot
, 7 - i
, s
->T0
);
2514 gen_stack_update(s
, 8 * size
);
2517 static void gen_enter(DisasContext
*s
, int esp_addend
, int level
)
2519 MemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2520 MemOp a_ot
= CODE64(s
) ? MO_64
: SS32(s
) ? MO_32
: MO_16
;
2521 int size
= 1 << d_ot
;
2523 /* Push BP; compute FrameTemp into T1. */
2524 tcg_gen_subi_tl(s
->T1
, cpu_regs
[R_ESP
], size
);
2525 gen_lea_v_seg(s
, a_ot
, s
->T1
, R_SS
, -1);
2526 gen_op_st_v(s
, d_ot
, cpu_regs
[R_EBP
], s
->A0
);
2532 /* Copy level-1 pointers from the previous frame. */
2533 for (i
= 1; i
< level
; ++i
) {
2534 tcg_gen_subi_tl(s
->A0
, cpu_regs
[R_EBP
], size
* i
);
2535 gen_lea_v_seg(s
, a_ot
, s
->A0
, R_SS
, -1);
2536 gen_op_ld_v(s
, d_ot
, s
->tmp0
, s
->A0
);
2538 tcg_gen_subi_tl(s
->A0
, s
->T1
, size
* i
);
2539 gen_lea_v_seg(s
, a_ot
, s
->A0
, R_SS
, -1);
2540 gen_op_st_v(s
, d_ot
, s
->tmp0
, s
->A0
);
2543 /* Push the current FrameTemp as the last level. */
2544 tcg_gen_subi_tl(s
->A0
, s
->T1
, size
* level
);
2545 gen_lea_v_seg(s
, a_ot
, s
->A0
, R_SS
, -1);
2546 gen_op_st_v(s
, d_ot
, s
->T1
, s
->A0
);
2549 /* Copy the FrameTemp value to EBP. */
2550 gen_op_mov_reg_v(s
, a_ot
, R_EBP
, s
->T1
);
2552 /* Compute the final value of ESP. */
2553 tcg_gen_subi_tl(s
->T1
, s
->T1
, esp_addend
+ size
* level
);
2554 gen_op_mov_reg_v(s
, a_ot
, R_ESP
, s
->T1
);
2557 static void gen_leave(DisasContext
*s
)
2559 MemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2560 MemOp a_ot
= mo_stacksize(s
);
2562 gen_lea_v_seg(s
, a_ot
, cpu_regs
[R_EBP
], R_SS
, -1);
2563 gen_op_ld_v(s
, d_ot
, s
->T0
, s
->A0
);
2565 tcg_gen_addi_tl(s
->T1
, cpu_regs
[R_EBP
], 1 << d_ot
);
2567 gen_op_mov_reg_v(s
, d_ot
, R_EBP
, s
->T0
);
2568 gen_op_mov_reg_v(s
, a_ot
, R_ESP
, s
->T1
);
2571 /* Similarly, except that the assumption here is that we don't decode
2572 the instruction at all -- either a missing opcode, an unimplemented
2573 feature, or just a bogus instruction stream. */
2574 static void gen_unknown_opcode(CPUX86State
*env
, DisasContext
*s
)
2576 gen_illegal_opcode(s
);
2578 if (qemu_loglevel_mask(LOG_UNIMP
)) {
2579 FILE *logfile
= qemu_log_lock();
2580 target_ulong pc
= s
->pc_start
, end
= s
->pc
;
2582 qemu_log("ILLOPC: " TARGET_FMT_lx
":", pc
);
2583 for (; pc
< end
; ++pc
) {
2584 qemu_log(" %02x", cpu_ldub_code(env
, pc
));
2587 qemu_log_unlock(logfile
);
2591 /* an interrupt is different from an exception because of the
2593 static void gen_interrupt(DisasContext
*s
, int intno
,
2594 target_ulong cur_eip
, target_ulong next_eip
)
2596 gen_update_cc_op(s
);
2597 gen_jmp_im(s
, cur_eip
);
2598 gen_helper_raise_interrupt(cpu_env
, tcg_const_i32(intno
),
2599 tcg_const_i32(next_eip
- cur_eip
));
2600 s
->base
.is_jmp
= DISAS_NORETURN
;
2603 static void gen_debug(DisasContext
*s
, target_ulong cur_eip
)
2605 gen_update_cc_op(s
);
2606 gen_jmp_im(s
, cur_eip
);
2607 gen_helper_debug(cpu_env
);
2608 s
->base
.is_jmp
= DISAS_NORETURN
;
2611 static void gen_set_hflag(DisasContext
*s
, uint32_t mask
)
2613 if ((s
->flags
& mask
) == 0) {
2614 TCGv_i32 t
= tcg_temp_new_i32();
2615 tcg_gen_ld_i32(t
, cpu_env
, offsetof(CPUX86State
, hflags
));
2616 tcg_gen_ori_i32(t
, t
, mask
);
2617 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUX86State
, hflags
));
2618 tcg_temp_free_i32(t
);
2623 static void gen_reset_hflag(DisasContext
*s
, uint32_t mask
)
2625 if (s
->flags
& mask
) {
2626 TCGv_i32 t
= tcg_temp_new_i32();
2627 tcg_gen_ld_i32(t
, cpu_env
, offsetof(CPUX86State
, hflags
));
2628 tcg_gen_andi_i32(t
, t
, ~mask
);
2629 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUX86State
, hflags
));
2630 tcg_temp_free_i32(t
);
2635 /* Clear BND registers during legacy branches. */
2636 static void gen_bnd_jmp(DisasContext
*s
)
2638 /* Clear the registers only if BND prefix is missing, MPX is enabled,
2639 and if the BNDREGs are known to be in use (non-zero) already.
2640 The helper itself will check BNDPRESERVE at runtime. */
2641 if ((s
->prefix
& PREFIX_REPNZ
) == 0
2642 && (s
->flags
& HF_MPX_EN_MASK
) != 0
2643 && (s
->flags
& HF_MPX_IU_MASK
) != 0) {
2644 gen_helper_bnd_jmp(cpu_env
);
2648 /* Generate an end of block. Trace exception is also generated if needed.
2649 If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set.
2650 If RECHECK_TF, emit a rechecking helper for #DB, ignoring the state of
2651 S->TF. This is used by the syscall/sysret insns. */
2653 do_gen_eob_worker(DisasContext
*s
, bool inhibit
, bool recheck_tf
, bool jr
)
2655 gen_update_cc_op(s
);
2657 /* If several instructions disable interrupts, only the first does it. */
2658 if (inhibit
&& !(s
->flags
& HF_INHIBIT_IRQ_MASK
)) {
2659 gen_set_hflag(s
, HF_INHIBIT_IRQ_MASK
);
2661 gen_reset_hflag(s
, HF_INHIBIT_IRQ_MASK
);
2664 if (s
->base
.tb
->flags
& HF_RF_MASK
) {
2665 gen_helper_reset_rf(cpu_env
);
2667 if (s
->base
.singlestep_enabled
) {
2668 gen_helper_debug(cpu_env
);
2669 } else if (recheck_tf
) {
2670 gen_helper_rechecking_single_step(cpu_env
);
2671 tcg_gen_exit_tb(NULL
, 0);
2672 } else if (s
->flags
& HF_TF_MASK
) {
2673 gen_helper_single_step(cpu_env
);
2675 tcg_gen_lookup_and_goto_ptr();
2677 tcg_gen_exit_tb(NULL
, 0);
2679 s
->base
.is_jmp
= DISAS_NORETURN
;
2683 gen_eob_worker(DisasContext
*s
, bool inhibit
, bool recheck_tf
)
2685 do_gen_eob_worker(s
, inhibit
, recheck_tf
, false);
2689 If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set. */
2690 static void gen_eob_inhibit_irq(DisasContext
*s
, bool inhibit
)
2692 gen_eob_worker(s
, inhibit
, false);
2695 /* End of block, resetting the inhibit irq flag. */
2696 static void gen_eob(DisasContext
*s
)
2698 gen_eob_worker(s
, false, false);
2701 /* Jump to register */
2702 static void gen_jr(DisasContext
*s
, TCGv dest
)
2704 do_gen_eob_worker(s
, false, false, true);
2707 /* generate a jump to eip. No segment change must happen before as a
2708 direct call to the next block may occur */
2709 static void gen_jmp_tb(DisasContext
*s
, target_ulong eip
, int tb_num
)
2711 gen_update_cc_op(s
);
2712 set_cc_op(s
, CC_OP_DYNAMIC
);
2714 gen_goto_tb(s
, tb_num
, eip
);
2721 static void gen_jmp(DisasContext
*s
, target_ulong eip
)
2723 gen_jmp_tb(s
, eip
, 0);
2726 static inline void gen_ldq_env_A0(DisasContext
*s
, int offset
)
2728 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
, s
->mem_index
, MO_LEQ
);
2729 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
, offset
);
2732 static inline void gen_stq_env_A0(DisasContext
*s
, int offset
)
2734 tcg_gen_ld_i64(s
->tmp1_i64
, cpu_env
, offset
);
2735 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
, s
->mem_index
, MO_LEQ
);
2738 static inline void gen_ldo_env_A0(DisasContext
*s
, int offset
)
2740 int mem_index
= s
->mem_index
;
2741 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
, mem_index
, MO_LEQ
);
2742 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
, offset
+ offsetof(ZMMReg
, ZMM_Q(0)));
2743 tcg_gen_addi_tl(s
->tmp0
, s
->A0
, 8);
2744 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->tmp0
, mem_index
, MO_LEQ
);
2745 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
, offset
+ offsetof(ZMMReg
, ZMM_Q(1)));
2748 static inline void gen_sto_env_A0(DisasContext
*s
, int offset
)
2750 int mem_index
= s
->mem_index
;
2751 tcg_gen_ld_i64(s
->tmp1_i64
, cpu_env
, offset
+ offsetof(ZMMReg
, ZMM_Q(0)));
2752 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
, mem_index
, MO_LEQ
);
2753 tcg_gen_addi_tl(s
->tmp0
, s
->A0
, 8);
2754 tcg_gen_ld_i64(s
->tmp1_i64
, cpu_env
, offset
+ offsetof(ZMMReg
, ZMM_Q(1)));
2755 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->tmp0
, mem_index
, MO_LEQ
);
2758 static inline void gen_op_movo(DisasContext
*s
, int d_offset
, int s_offset
)
2760 tcg_gen_ld_i64(s
->tmp1_i64
, cpu_env
, s_offset
+ offsetof(ZMMReg
, ZMM_Q(0)));
2761 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
, d_offset
+ offsetof(ZMMReg
, ZMM_Q(0)));
2762 tcg_gen_ld_i64(s
->tmp1_i64
, cpu_env
, s_offset
+ offsetof(ZMMReg
, ZMM_Q(1)));
2763 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
, d_offset
+ offsetof(ZMMReg
, ZMM_Q(1)));
2766 static inline void gen_op_movq(DisasContext
*s
, int d_offset
, int s_offset
)
2768 tcg_gen_ld_i64(s
->tmp1_i64
, cpu_env
, s_offset
);
2769 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
, d_offset
);
2772 static inline void gen_op_movl(DisasContext
*s
, int d_offset
, int s_offset
)
2774 tcg_gen_ld_i32(s
->tmp2_i32
, cpu_env
, s_offset
);
2775 tcg_gen_st_i32(s
->tmp2_i32
, cpu_env
, d_offset
);
2778 static inline void gen_op_movq_env_0(DisasContext
*s
, int d_offset
)
2780 tcg_gen_movi_i64(s
->tmp1_i64
, 0);
2781 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
, d_offset
);
2784 typedef void (*SSEFunc_i_ep
)(TCGv_i32 val
, TCGv_ptr env
, TCGv_ptr reg
);
2785 typedef void (*SSEFunc_l_ep
)(TCGv_i64 val
, TCGv_ptr env
, TCGv_ptr reg
);
2786 typedef void (*SSEFunc_0_epi
)(TCGv_ptr env
, TCGv_ptr reg
, TCGv_i32 val
);
2787 typedef void (*SSEFunc_0_epl
)(TCGv_ptr env
, TCGv_ptr reg
, TCGv_i64 val
);
2788 typedef void (*SSEFunc_0_epp
)(TCGv_ptr env
, TCGv_ptr reg_a
, TCGv_ptr reg_b
);
2789 typedef void (*SSEFunc_0_eppi
)(TCGv_ptr env
, TCGv_ptr reg_a
, TCGv_ptr reg_b
,
2791 typedef void (*SSEFunc_0_ppi
)(TCGv_ptr reg_a
, TCGv_ptr reg_b
, TCGv_i32 val
);
2792 typedef void (*SSEFunc_0_eppt
)(TCGv_ptr env
, TCGv_ptr reg_a
, TCGv_ptr reg_b
,
2795 #define SSE_SPECIAL ((void *)1)
2796 #define SSE_DUMMY ((void *)2)
2798 #define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
2799 #define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
2800 gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
2802 static const SSEFunc_0_epp sse_op_table1
[256][4] = {
2803 /* 3DNow! extensions */
2804 [0x0e] = { SSE_DUMMY
}, /* femms */
2805 [0x0f] = { SSE_DUMMY
}, /* pf... */
2806 /* pure SSE operations */
2807 [0x10] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movups, movupd, movss, movsd */
2808 [0x11] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movups, movupd, movss, movsd */
2809 [0x12] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movlps, movlpd, movsldup, movddup */
2810 [0x13] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movlps, movlpd */
2811 [0x14] = { gen_helper_punpckldq_xmm
, gen_helper_punpcklqdq_xmm
},
2812 [0x15] = { gen_helper_punpckhdq_xmm
, gen_helper_punpckhqdq_xmm
},
2813 [0x16] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movhps, movhpd, movshdup */
2814 [0x17] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movhps, movhpd */
2816 [0x28] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movaps, movapd */
2817 [0x29] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movaps, movapd */
2818 [0x2a] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
2819 [0x2b] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movntps, movntpd, movntss, movntsd */
2820 [0x2c] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
2821 [0x2d] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
2822 [0x2e] = { gen_helper_ucomiss
, gen_helper_ucomisd
},
2823 [0x2f] = { gen_helper_comiss
, gen_helper_comisd
},
2824 [0x50] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movmskps, movmskpd */
2825 [0x51] = SSE_FOP(sqrt
),
2826 [0x52] = { gen_helper_rsqrtps
, NULL
, gen_helper_rsqrtss
, NULL
},
2827 [0x53] = { gen_helper_rcpps
, NULL
, gen_helper_rcpss
, NULL
},
2828 [0x54] = { gen_helper_pand_xmm
, gen_helper_pand_xmm
}, /* andps, andpd */
2829 [0x55] = { gen_helper_pandn_xmm
, gen_helper_pandn_xmm
}, /* andnps, andnpd */
2830 [0x56] = { gen_helper_por_xmm
, gen_helper_por_xmm
}, /* orps, orpd */
2831 [0x57] = { gen_helper_pxor_xmm
, gen_helper_pxor_xmm
}, /* xorps, xorpd */
2832 [0x58] = SSE_FOP(add
),
2833 [0x59] = SSE_FOP(mul
),
2834 [0x5a] = { gen_helper_cvtps2pd
, gen_helper_cvtpd2ps
,
2835 gen_helper_cvtss2sd
, gen_helper_cvtsd2ss
},
2836 [0x5b] = { gen_helper_cvtdq2ps
, gen_helper_cvtps2dq
, gen_helper_cvttps2dq
},
2837 [0x5c] = SSE_FOP(sub
),
2838 [0x5d] = SSE_FOP(min
),
2839 [0x5e] = SSE_FOP(div
),
2840 [0x5f] = SSE_FOP(max
),
2842 [0xc2] = SSE_FOP(cmpeq
),
2843 [0xc6] = { (SSEFunc_0_epp
)gen_helper_shufps
,
2844 (SSEFunc_0_epp
)gen_helper_shufpd
}, /* XXX: casts */
2846 /* SSSE3, SSE4, MOVBE, CRC32, BMI1, BMI2, ADX. */
2847 [0x38] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
},
2848 [0x3a] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
},
2850 /* MMX ops and their SSE extensions */
2851 [0x60] = MMX_OP2(punpcklbw
),
2852 [0x61] = MMX_OP2(punpcklwd
),
2853 [0x62] = MMX_OP2(punpckldq
),
2854 [0x63] = MMX_OP2(packsswb
),
2855 [0x64] = MMX_OP2(pcmpgtb
),
2856 [0x65] = MMX_OP2(pcmpgtw
),
2857 [0x66] = MMX_OP2(pcmpgtl
),
2858 [0x67] = MMX_OP2(packuswb
),
2859 [0x68] = MMX_OP2(punpckhbw
),
2860 [0x69] = MMX_OP2(punpckhwd
),
2861 [0x6a] = MMX_OP2(punpckhdq
),
2862 [0x6b] = MMX_OP2(packssdw
),
2863 [0x6c] = { NULL
, gen_helper_punpcklqdq_xmm
},
2864 [0x6d] = { NULL
, gen_helper_punpckhqdq_xmm
},
2865 [0x6e] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movd mm, ea */
2866 [0x6f] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movq, movdqa, , movqdu */
2867 [0x70] = { (SSEFunc_0_epp
)gen_helper_pshufw_mmx
,
2868 (SSEFunc_0_epp
)gen_helper_pshufd_xmm
,
2869 (SSEFunc_0_epp
)gen_helper_pshufhw_xmm
,
2870 (SSEFunc_0_epp
)gen_helper_pshuflw_xmm
}, /* XXX: casts */
2871 [0x71] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftw */
2872 [0x72] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftd */
2873 [0x73] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* shiftq */
2874 [0x74] = MMX_OP2(pcmpeqb
),
2875 [0x75] = MMX_OP2(pcmpeqw
),
2876 [0x76] = MMX_OP2(pcmpeql
),
2877 [0x77] = { SSE_DUMMY
}, /* emms */
2878 [0x78] = { NULL
, SSE_SPECIAL
, NULL
, SSE_SPECIAL
}, /* extrq_i, insertq_i */
2879 [0x79] = { NULL
, gen_helper_extrq_r
, NULL
, gen_helper_insertq_r
},
2880 [0x7c] = { NULL
, gen_helper_haddpd
, NULL
, gen_helper_haddps
},
2881 [0x7d] = { NULL
, gen_helper_hsubpd
, NULL
, gen_helper_hsubps
},
2882 [0x7e] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movd, movd, , movq */
2883 [0x7f] = { SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
}, /* movq, movdqa, movdqu */
2884 [0xc4] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pinsrw */
2885 [0xc5] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pextrw */
2886 [0xd0] = { NULL
, gen_helper_addsubpd
, NULL
, gen_helper_addsubps
},
2887 [0xd1] = MMX_OP2(psrlw
),
2888 [0xd2] = MMX_OP2(psrld
),
2889 [0xd3] = MMX_OP2(psrlq
),
2890 [0xd4] = MMX_OP2(paddq
),
2891 [0xd5] = MMX_OP2(pmullw
),
2892 [0xd6] = { NULL
, SSE_SPECIAL
, SSE_SPECIAL
, SSE_SPECIAL
},
2893 [0xd7] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* pmovmskb */
2894 [0xd8] = MMX_OP2(psubusb
),
2895 [0xd9] = MMX_OP2(psubusw
),
2896 [0xda] = MMX_OP2(pminub
),
2897 [0xdb] = MMX_OP2(pand
),
2898 [0xdc] = MMX_OP2(paddusb
),
2899 [0xdd] = MMX_OP2(paddusw
),
2900 [0xde] = MMX_OP2(pmaxub
),
2901 [0xdf] = MMX_OP2(pandn
),
2902 [0xe0] = MMX_OP2(pavgb
),
2903 [0xe1] = MMX_OP2(psraw
),
2904 [0xe2] = MMX_OP2(psrad
),
2905 [0xe3] = MMX_OP2(pavgw
),
2906 [0xe4] = MMX_OP2(pmulhuw
),
2907 [0xe5] = MMX_OP2(pmulhw
),
2908 [0xe6] = { NULL
, gen_helper_cvttpd2dq
, gen_helper_cvtdq2pd
, gen_helper_cvtpd2dq
},
2909 [0xe7] = { SSE_SPECIAL
, SSE_SPECIAL
}, /* movntq, movntq */
2910 [0xe8] = MMX_OP2(psubsb
),
2911 [0xe9] = MMX_OP2(psubsw
),
2912 [0xea] = MMX_OP2(pminsw
),
2913 [0xeb] = MMX_OP2(por
),
2914 [0xec] = MMX_OP2(paddsb
),
2915 [0xed] = MMX_OP2(paddsw
),
2916 [0xee] = MMX_OP2(pmaxsw
),
2917 [0xef] = MMX_OP2(pxor
),
2918 [0xf0] = { NULL
, NULL
, NULL
, SSE_SPECIAL
}, /* lddqu */
2919 [0xf1] = MMX_OP2(psllw
),
2920 [0xf2] = MMX_OP2(pslld
),
2921 [0xf3] = MMX_OP2(psllq
),
2922 [0xf4] = MMX_OP2(pmuludq
),
2923 [0xf5] = MMX_OP2(pmaddwd
),
2924 [0xf6] = MMX_OP2(psadbw
),
2925 [0xf7] = { (SSEFunc_0_epp
)gen_helper_maskmov_mmx
,
2926 (SSEFunc_0_epp
)gen_helper_maskmov_xmm
}, /* XXX: casts */
2927 [0xf8] = MMX_OP2(psubb
),
2928 [0xf9] = MMX_OP2(psubw
),
2929 [0xfa] = MMX_OP2(psubl
),
2930 [0xfb] = MMX_OP2(psubq
),
2931 [0xfc] = MMX_OP2(paddb
),
2932 [0xfd] = MMX_OP2(paddw
),
2933 [0xfe] = MMX_OP2(paddl
),
2936 static const SSEFunc_0_epp sse_op_table2
[3 * 8][2] = {
2937 [0 + 2] = MMX_OP2(psrlw
),
2938 [0 + 4] = MMX_OP2(psraw
),
2939 [0 + 6] = MMX_OP2(psllw
),
2940 [8 + 2] = MMX_OP2(psrld
),
2941 [8 + 4] = MMX_OP2(psrad
),
2942 [8 + 6] = MMX_OP2(pslld
),
2943 [16 + 2] = MMX_OP2(psrlq
),
2944 [16 + 3] = { NULL
, gen_helper_psrldq_xmm
},
2945 [16 + 6] = MMX_OP2(psllq
),
2946 [16 + 7] = { NULL
, gen_helper_pslldq_xmm
},
2949 static const SSEFunc_0_epi sse_op_table3ai
[] = {
2950 gen_helper_cvtsi2ss
,
2954 #ifdef TARGET_X86_64
2955 static const SSEFunc_0_epl sse_op_table3aq
[] = {
2956 gen_helper_cvtsq2ss
,
2961 static const SSEFunc_i_ep sse_op_table3bi
[] = {
2962 gen_helper_cvttss2si
,
2963 gen_helper_cvtss2si
,
2964 gen_helper_cvttsd2si
,
2968 #ifdef TARGET_X86_64
2969 static const SSEFunc_l_ep sse_op_table3bq
[] = {
2970 gen_helper_cvttss2sq
,
2971 gen_helper_cvtss2sq
,
2972 gen_helper_cvttsd2sq
,
2977 static const SSEFunc_0_epp sse_op_table4
[8][4] = {
2988 static const SSEFunc_0_epp sse_op_table5
[256] = {
2989 [0x0c] = gen_helper_pi2fw
,
2990 [0x0d] = gen_helper_pi2fd
,
2991 [0x1c] = gen_helper_pf2iw
,
2992 [0x1d] = gen_helper_pf2id
,
2993 [0x8a] = gen_helper_pfnacc
,
2994 [0x8e] = gen_helper_pfpnacc
,
2995 [0x90] = gen_helper_pfcmpge
,
2996 [0x94] = gen_helper_pfmin
,
2997 [0x96] = gen_helper_pfrcp
,
2998 [0x97] = gen_helper_pfrsqrt
,
2999 [0x9a] = gen_helper_pfsub
,
3000 [0x9e] = gen_helper_pfadd
,
3001 [0xa0] = gen_helper_pfcmpgt
,
3002 [0xa4] = gen_helper_pfmax
,
3003 [0xa6] = gen_helper_movq
, /* pfrcpit1; no need to actually increase precision */
3004 [0xa7] = gen_helper_movq
, /* pfrsqit1 */
3005 [0xaa] = gen_helper_pfsubr
,
3006 [0xae] = gen_helper_pfacc
,
3007 [0xb0] = gen_helper_pfcmpeq
,
3008 [0xb4] = gen_helper_pfmul
,
3009 [0xb6] = gen_helper_movq
, /* pfrcpit2 */
3010 [0xb7] = gen_helper_pmulhrw_mmx
,
3011 [0xbb] = gen_helper_pswapd
,
3012 [0xbf] = gen_helper_pavgb_mmx
/* pavgusb */
3015 struct SSEOpHelper_epp
{
3016 SSEFunc_0_epp op
[2];
3020 struct SSEOpHelper_eppi
{
3021 SSEFunc_0_eppi op
[2];
3025 #define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
3026 #define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
3027 #define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
3028 #define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
3029 #define PCLMULQDQ_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, \
3030 CPUID_EXT_PCLMULQDQ }
3031 #define AESNI_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_AES }
3033 static const struct SSEOpHelper_epp sse_op_table6
[256] = {
3034 [0x00] = SSSE3_OP(pshufb
),
3035 [0x01] = SSSE3_OP(phaddw
),
3036 [0x02] = SSSE3_OP(phaddd
),
3037 [0x03] = SSSE3_OP(phaddsw
),
3038 [0x04] = SSSE3_OP(pmaddubsw
),
3039 [0x05] = SSSE3_OP(phsubw
),
3040 [0x06] = SSSE3_OP(phsubd
),
3041 [0x07] = SSSE3_OP(phsubsw
),
3042 [0x08] = SSSE3_OP(psignb
),
3043 [0x09] = SSSE3_OP(psignw
),
3044 [0x0a] = SSSE3_OP(psignd
),
3045 [0x0b] = SSSE3_OP(pmulhrsw
),
3046 [0x10] = SSE41_OP(pblendvb
),
3047 [0x14] = SSE41_OP(blendvps
),
3048 [0x15] = SSE41_OP(blendvpd
),
3049 [0x17] = SSE41_OP(ptest
),
3050 [0x1c] = SSSE3_OP(pabsb
),
3051 [0x1d] = SSSE3_OP(pabsw
),
3052 [0x1e] = SSSE3_OP(pabsd
),
3053 [0x20] = SSE41_OP(pmovsxbw
),
3054 [0x21] = SSE41_OP(pmovsxbd
),
3055 [0x22] = SSE41_OP(pmovsxbq
),
3056 [0x23] = SSE41_OP(pmovsxwd
),
3057 [0x24] = SSE41_OP(pmovsxwq
),
3058 [0x25] = SSE41_OP(pmovsxdq
),
3059 [0x28] = SSE41_OP(pmuldq
),
3060 [0x29] = SSE41_OP(pcmpeqq
),
3061 [0x2a] = SSE41_SPECIAL
, /* movntqda */
3062 [0x2b] = SSE41_OP(packusdw
),
3063 [0x30] = SSE41_OP(pmovzxbw
),
3064 [0x31] = SSE41_OP(pmovzxbd
),
3065 [0x32] = SSE41_OP(pmovzxbq
),
3066 [0x33] = SSE41_OP(pmovzxwd
),
3067 [0x34] = SSE41_OP(pmovzxwq
),
3068 [0x35] = SSE41_OP(pmovzxdq
),
3069 [0x37] = SSE42_OP(pcmpgtq
),
3070 [0x38] = SSE41_OP(pminsb
),
3071 [0x39] = SSE41_OP(pminsd
),
3072 [0x3a] = SSE41_OP(pminuw
),
3073 [0x3b] = SSE41_OP(pminud
),
3074 [0x3c] = SSE41_OP(pmaxsb
),
3075 [0x3d] = SSE41_OP(pmaxsd
),
3076 [0x3e] = SSE41_OP(pmaxuw
),
3077 [0x3f] = SSE41_OP(pmaxud
),
3078 [0x40] = SSE41_OP(pmulld
),
3079 [0x41] = SSE41_OP(phminposuw
),
3080 [0xdb] = AESNI_OP(aesimc
),
3081 [0xdc] = AESNI_OP(aesenc
),
3082 [0xdd] = AESNI_OP(aesenclast
),
3083 [0xde] = AESNI_OP(aesdec
),
3084 [0xdf] = AESNI_OP(aesdeclast
),
3087 static const struct SSEOpHelper_eppi sse_op_table7
[256] = {
3088 [0x08] = SSE41_OP(roundps
),
3089 [0x09] = SSE41_OP(roundpd
),
3090 [0x0a] = SSE41_OP(roundss
),
3091 [0x0b] = SSE41_OP(roundsd
),
3092 [0x0c] = SSE41_OP(blendps
),
3093 [0x0d] = SSE41_OP(blendpd
),
3094 [0x0e] = SSE41_OP(pblendw
),
3095 [0x0f] = SSSE3_OP(palignr
),
3096 [0x14] = SSE41_SPECIAL
, /* pextrb */
3097 [0x15] = SSE41_SPECIAL
, /* pextrw */
3098 [0x16] = SSE41_SPECIAL
, /* pextrd/pextrq */
3099 [0x17] = SSE41_SPECIAL
, /* extractps */
3100 [0x20] = SSE41_SPECIAL
, /* pinsrb */
3101 [0x21] = SSE41_SPECIAL
, /* insertps */
3102 [0x22] = SSE41_SPECIAL
, /* pinsrd/pinsrq */
3103 [0x40] = SSE41_OP(dpps
),
3104 [0x41] = SSE41_OP(dppd
),
3105 [0x42] = SSE41_OP(mpsadbw
),
3106 [0x44] = PCLMULQDQ_OP(pclmulqdq
),
3107 [0x60] = SSE42_OP(pcmpestrm
),
3108 [0x61] = SSE42_OP(pcmpestri
),
3109 [0x62] = SSE42_OP(pcmpistrm
),
3110 [0x63] = SSE42_OP(pcmpistri
),
3111 [0xdf] = AESNI_OP(aeskeygenassist
),
3114 static void gen_sse(CPUX86State
*env
, DisasContext
*s
, int b
,
3115 target_ulong pc_start
)
3117 int b1
, op1_offset
, op2_offset
, is_xmm
, val
;
3118 int modrm
, mod
, rm
, reg
;
3119 SSEFunc_0_epp sse_fn_epp
;
3120 SSEFunc_0_eppi sse_fn_eppi
;
3121 SSEFunc_0_ppi sse_fn_ppi
;
3122 SSEFunc_0_eppt sse_fn_eppt
;
3126 if (s
->prefix
& PREFIX_DATA
)
3128 else if (s
->prefix
& PREFIX_REPZ
)
3130 else if (s
->prefix
& PREFIX_REPNZ
)
3134 sse_fn_epp
= sse_op_table1
[b
][b1
];
3138 if ((b
<= 0x5f && b
>= 0x10) || b
== 0xc6 || b
== 0xc2) {
3148 /* simple MMX/SSE operation */
3149 if (s
->flags
& HF_TS_MASK
) {
3150 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
3153 if (s
->flags
& HF_EM_MASK
) {
3155 gen_illegal_opcode(s
);
3159 && !(s
->flags
& HF_OSFXSR_MASK
)
3160 && (b
!= 0x38 && b
!= 0x3a)) {
3164 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_3DNOW
)) {
3165 /* If we were fully decoding this we might use illegal_op. */
3169 gen_helper_emms(cpu_env
);
3174 gen_helper_emms(cpu_env
);
3177 /* prepare MMX state (XXX: optimize by storing fptt and fptags in
3178 the static cpu state) */
3180 gen_helper_enter_mmx(cpu_env
);
3183 modrm
= x86_ldub_code(env
, s
);
3184 reg
= ((modrm
>> 3) & 7);
3188 mod
= (modrm
>> 6) & 3;
3189 if (sse_fn_epp
== SSE_SPECIAL
) {
3192 case 0x0e7: /* movntq */
3196 gen_lea_modrm(env
, s
, modrm
);
3197 gen_stq_env_A0(s
, offsetof(CPUX86State
, fpregs
[reg
].mmx
));
3199 case 0x1e7: /* movntdq */
3200 case 0x02b: /* movntps */
3201 case 0x12b: /* movntps */
3204 gen_lea_modrm(env
, s
, modrm
);
3205 gen_sto_env_A0(s
, offsetof(CPUX86State
, xmm_regs
[reg
]));
3207 case 0x3f0: /* lddqu */
3210 gen_lea_modrm(env
, s
, modrm
);
3211 gen_ldo_env_A0(s
, offsetof(CPUX86State
, xmm_regs
[reg
]));
3213 case 0x22b: /* movntss */
3214 case 0x32b: /* movntsd */
3217 gen_lea_modrm(env
, s
, modrm
);
3219 gen_stq_env_A0(s
, offsetof(CPUX86State
,
3220 xmm_regs
[reg
].ZMM_Q(0)));
3222 tcg_gen_ld32u_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
,
3223 xmm_regs
[reg
].ZMM_L(0)));
3224 gen_op_st_v(s
, MO_32
, s
->T0
, s
->A0
);
3227 case 0x6e: /* movd mm, ea */
3228 #ifdef TARGET_X86_64
3229 if (s
->dflag
== MO_64
) {
3230 gen_ldst_modrm(env
, s
, modrm
, MO_64
, OR_TMP0
, 0);
3231 tcg_gen_st_tl(s
->T0
, cpu_env
,
3232 offsetof(CPUX86State
, fpregs
[reg
].mmx
));
3236 gen_ldst_modrm(env
, s
, modrm
, MO_32
, OR_TMP0
, 0);
3237 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
,
3238 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3239 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
3240 gen_helper_movl_mm_T0_mmx(s
->ptr0
, s
->tmp2_i32
);
3243 case 0x16e: /* movd xmm, ea */
3244 #ifdef TARGET_X86_64
3245 if (s
->dflag
== MO_64
) {
3246 gen_ldst_modrm(env
, s
, modrm
, MO_64
, OR_TMP0
, 0);
3247 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
,
3248 offsetof(CPUX86State
,xmm_regs
[reg
]));
3249 gen_helper_movq_mm_T0_xmm(s
->ptr0
, s
->T0
);
3253 gen_ldst_modrm(env
, s
, modrm
, MO_32
, OR_TMP0
, 0);
3254 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
,
3255 offsetof(CPUX86State
,xmm_regs
[reg
]));
3256 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
3257 gen_helper_movl_mm_T0_xmm(s
->ptr0
, s
->tmp2_i32
);
3260 case 0x6f: /* movq mm, ea */
3262 gen_lea_modrm(env
, s
, modrm
);
3263 gen_ldq_env_A0(s
, offsetof(CPUX86State
, fpregs
[reg
].mmx
));
3266 tcg_gen_ld_i64(s
->tmp1_i64
, cpu_env
,
3267 offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3268 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
,
3269 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3272 case 0x010: /* movups */
3273 case 0x110: /* movupd */
3274 case 0x028: /* movaps */
3275 case 0x128: /* movapd */
3276 case 0x16f: /* movdqa xmm, ea */
3277 case 0x26f: /* movdqu xmm, ea */
3279 gen_lea_modrm(env
, s
, modrm
);
3280 gen_ldo_env_A0(s
, offsetof(CPUX86State
, xmm_regs
[reg
]));
3282 rm
= (modrm
& 7) | REX_B(s
);
3283 gen_op_movo(s
, offsetof(CPUX86State
, xmm_regs
[reg
]),
3284 offsetof(CPUX86State
,xmm_regs
[rm
]));
3287 case 0x210: /* movss xmm, ea */
3289 gen_lea_modrm(env
, s
, modrm
);
3290 gen_op_ld_v(s
, MO_32
, s
->T0
, s
->A0
);
3291 tcg_gen_st32_tl(s
->T0
, cpu_env
,
3292 offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(0)));
3293 tcg_gen_movi_tl(s
->T0
, 0);
3294 tcg_gen_st32_tl(s
->T0
, cpu_env
,
3295 offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(1)));
3296 tcg_gen_st32_tl(s
->T0
, cpu_env
,
3297 offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(2)));
3298 tcg_gen_st32_tl(s
->T0
, cpu_env
,
3299 offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(3)));
3301 rm
= (modrm
& 7) | REX_B(s
);
3302 gen_op_movl(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(0)),
3303 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_L(0)));
3306 case 0x310: /* movsd xmm, ea */
3308 gen_lea_modrm(env
, s
, modrm
);
3309 gen_ldq_env_A0(s
, offsetof(CPUX86State
,
3310 xmm_regs
[reg
].ZMM_Q(0)));
3311 tcg_gen_movi_tl(s
->T0
, 0);
3312 tcg_gen_st32_tl(s
->T0
, cpu_env
,
3313 offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(2)));
3314 tcg_gen_st32_tl(s
->T0
, cpu_env
,
3315 offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(3)));
3317 rm
= (modrm
& 7) | REX_B(s
);
3318 gen_op_movq(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_Q(0)),
3319 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_Q(0)));
3322 case 0x012: /* movlps */
3323 case 0x112: /* movlpd */
3325 gen_lea_modrm(env
, s
, modrm
);
3326 gen_ldq_env_A0(s
, offsetof(CPUX86State
,
3327 xmm_regs
[reg
].ZMM_Q(0)));
3330 rm
= (modrm
& 7) | REX_B(s
);
3331 gen_op_movq(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_Q(0)),
3332 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_Q(1)));
3335 case 0x212: /* movsldup */
3337 gen_lea_modrm(env
, s
, modrm
);
3338 gen_ldo_env_A0(s
, offsetof(CPUX86State
, xmm_regs
[reg
]));
3340 rm
= (modrm
& 7) | REX_B(s
);
3341 gen_op_movl(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(0)),
3342 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_L(0)));
3343 gen_op_movl(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(2)),
3344 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_L(2)));
3346 gen_op_movl(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(1)),
3347 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(0)));
3348 gen_op_movl(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(3)),
3349 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(2)));
3351 case 0x312: /* movddup */
3353 gen_lea_modrm(env
, s
, modrm
);
3354 gen_ldq_env_A0(s
, offsetof(CPUX86State
,
3355 xmm_regs
[reg
].ZMM_Q(0)));
3357 rm
= (modrm
& 7) | REX_B(s
);
3358 gen_op_movq(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_Q(0)),
3359 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_Q(0)));
3361 gen_op_movq(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_Q(1)),
3362 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(0)));
3364 case 0x016: /* movhps */
3365 case 0x116: /* movhpd */
3367 gen_lea_modrm(env
, s
, modrm
);
3368 gen_ldq_env_A0(s
, offsetof(CPUX86State
,
3369 xmm_regs
[reg
].ZMM_Q(1)));
3372 rm
= (modrm
& 7) | REX_B(s
);
3373 gen_op_movq(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_Q(1)),
3374 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_Q(0)));
3377 case 0x216: /* movshdup */
3379 gen_lea_modrm(env
, s
, modrm
);
3380 gen_ldo_env_A0(s
, offsetof(CPUX86State
, xmm_regs
[reg
]));
3382 rm
= (modrm
& 7) | REX_B(s
);
3383 gen_op_movl(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(1)),
3384 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_L(1)));
3385 gen_op_movl(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(3)),
3386 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_L(3)));
3388 gen_op_movl(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(0)),
3389 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(1)));
3390 gen_op_movl(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(2)),
3391 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(3)));
3396 int bit_index
, field_length
;
3398 if (b1
== 1 && reg
!= 0)
3400 field_length
= x86_ldub_code(env
, s
) & 0x3F;
3401 bit_index
= x86_ldub_code(env
, s
) & 0x3F;
3402 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
,
3403 offsetof(CPUX86State
,xmm_regs
[reg
]));
3405 gen_helper_extrq_i(cpu_env
, s
->ptr0
,
3406 tcg_const_i32(bit_index
),
3407 tcg_const_i32(field_length
));
3409 gen_helper_insertq_i(cpu_env
, s
->ptr0
,
3410 tcg_const_i32(bit_index
),
3411 tcg_const_i32(field_length
));
3414 case 0x7e: /* movd ea, mm */
3415 #ifdef TARGET_X86_64
3416 if (s
->dflag
== MO_64
) {
3417 tcg_gen_ld_i64(s
->T0
, cpu_env
,
3418 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3419 gen_ldst_modrm(env
, s
, modrm
, MO_64
, OR_TMP0
, 1);
3423 tcg_gen_ld32u_tl(s
->T0
, cpu_env
,
3424 offsetof(CPUX86State
,fpregs
[reg
].mmx
.MMX_L(0)));
3425 gen_ldst_modrm(env
, s
, modrm
, MO_32
, OR_TMP0
, 1);
3428 case 0x17e: /* movd ea, xmm */
3429 #ifdef TARGET_X86_64
3430 if (s
->dflag
== MO_64
) {
3431 tcg_gen_ld_i64(s
->T0
, cpu_env
,
3432 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(0)));
3433 gen_ldst_modrm(env
, s
, modrm
, MO_64
, OR_TMP0
, 1);
3437 tcg_gen_ld32u_tl(s
->T0
, cpu_env
,
3438 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(0)));
3439 gen_ldst_modrm(env
, s
, modrm
, MO_32
, OR_TMP0
, 1);
3442 case 0x27e: /* movq xmm, ea */
3444 gen_lea_modrm(env
, s
, modrm
);
3445 gen_ldq_env_A0(s
, offsetof(CPUX86State
,
3446 xmm_regs
[reg
].ZMM_Q(0)));
3448 rm
= (modrm
& 7) | REX_B(s
);
3449 gen_op_movq(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_Q(0)),
3450 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_Q(0)));
3452 gen_op_movq_env_0(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_Q(1)));
3454 case 0x7f: /* movq ea, mm */
3456 gen_lea_modrm(env
, s
, modrm
);
3457 gen_stq_env_A0(s
, offsetof(CPUX86State
, fpregs
[reg
].mmx
));
3460 gen_op_movq(s
, offsetof(CPUX86State
, fpregs
[rm
].mmx
),
3461 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3464 case 0x011: /* movups */
3465 case 0x111: /* movupd */
3466 case 0x029: /* movaps */
3467 case 0x129: /* movapd */
3468 case 0x17f: /* movdqa ea, xmm */
3469 case 0x27f: /* movdqu ea, xmm */
3471 gen_lea_modrm(env
, s
, modrm
);
3472 gen_sto_env_A0(s
, offsetof(CPUX86State
, xmm_regs
[reg
]));
3474 rm
= (modrm
& 7) | REX_B(s
);
3475 gen_op_movo(s
, offsetof(CPUX86State
, xmm_regs
[rm
]),
3476 offsetof(CPUX86State
,xmm_regs
[reg
]));
3479 case 0x211: /* movss ea, xmm */
3481 gen_lea_modrm(env
, s
, modrm
);
3482 tcg_gen_ld32u_tl(s
->T0
, cpu_env
,
3483 offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(0)));
3484 gen_op_st_v(s
, MO_32
, s
->T0
, s
->A0
);
3486 rm
= (modrm
& 7) | REX_B(s
);
3487 gen_op_movl(s
, offsetof(CPUX86State
, xmm_regs
[rm
].ZMM_L(0)),
3488 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(0)));
3491 case 0x311: /* movsd ea, xmm */
3493 gen_lea_modrm(env
, s
, modrm
);
3494 gen_stq_env_A0(s
, offsetof(CPUX86State
,
3495 xmm_regs
[reg
].ZMM_Q(0)));
3497 rm
= (modrm
& 7) | REX_B(s
);
3498 gen_op_movq(s
, offsetof(CPUX86State
, xmm_regs
[rm
].ZMM_Q(0)),
3499 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(0)));
3502 case 0x013: /* movlps */
3503 case 0x113: /* movlpd */
3505 gen_lea_modrm(env
, s
, modrm
);
3506 gen_stq_env_A0(s
, offsetof(CPUX86State
,
3507 xmm_regs
[reg
].ZMM_Q(0)));
3512 case 0x017: /* movhps */
3513 case 0x117: /* movhpd */
3515 gen_lea_modrm(env
, s
, modrm
);
3516 gen_stq_env_A0(s
, offsetof(CPUX86State
,
3517 xmm_regs
[reg
].ZMM_Q(1)));
3522 case 0x71: /* shift mm, im */
3525 case 0x171: /* shift xmm, im */
3531 val
= x86_ldub_code(env
, s
);
3533 tcg_gen_movi_tl(s
->T0
, val
);
3534 tcg_gen_st32_tl(s
->T0
, cpu_env
,
3535 offsetof(CPUX86State
, xmm_t0
.ZMM_L(0)));
3536 tcg_gen_movi_tl(s
->T0
, 0);
3537 tcg_gen_st32_tl(s
->T0
, cpu_env
,
3538 offsetof(CPUX86State
, xmm_t0
.ZMM_L(1)));
3539 op1_offset
= offsetof(CPUX86State
,xmm_t0
);
3541 tcg_gen_movi_tl(s
->T0
, val
);
3542 tcg_gen_st32_tl(s
->T0
, cpu_env
,
3543 offsetof(CPUX86State
, mmx_t0
.MMX_L(0)));
3544 tcg_gen_movi_tl(s
->T0
, 0);
3545 tcg_gen_st32_tl(s
->T0
, cpu_env
,
3546 offsetof(CPUX86State
, mmx_t0
.MMX_L(1)));
3547 op1_offset
= offsetof(CPUX86State
,mmx_t0
);
3549 sse_fn_epp
= sse_op_table2
[((b
- 1) & 3) * 8 +
3550 (((modrm
>> 3)) & 7)][b1
];
3555 rm
= (modrm
& 7) | REX_B(s
);
3556 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3559 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3561 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
, op2_offset
);
3562 tcg_gen_addi_ptr(s
->ptr1
, cpu_env
, op1_offset
);
3563 sse_fn_epp(cpu_env
, s
->ptr0
, s
->ptr1
);
3565 case 0x050: /* movmskps */
3566 rm
= (modrm
& 7) | REX_B(s
);
3567 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
,
3568 offsetof(CPUX86State
,xmm_regs
[rm
]));
3569 gen_helper_movmskps(s
->tmp2_i32
, cpu_env
, s
->ptr0
);
3570 tcg_gen_extu_i32_tl(cpu_regs
[reg
], s
->tmp2_i32
);
3572 case 0x150: /* movmskpd */
3573 rm
= (modrm
& 7) | REX_B(s
);
3574 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
,
3575 offsetof(CPUX86State
,xmm_regs
[rm
]));
3576 gen_helper_movmskpd(s
->tmp2_i32
, cpu_env
, s
->ptr0
);
3577 tcg_gen_extu_i32_tl(cpu_regs
[reg
], s
->tmp2_i32
);
3579 case 0x02a: /* cvtpi2ps */
3580 case 0x12a: /* cvtpi2pd */
3581 gen_helper_enter_mmx(cpu_env
);
3583 gen_lea_modrm(env
, s
, modrm
);
3584 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
3585 gen_ldq_env_A0(s
, op2_offset
);
3588 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3590 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3591 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
, op1_offset
);
3592 tcg_gen_addi_ptr(s
->ptr1
, cpu_env
, op2_offset
);
3595 gen_helper_cvtpi2ps(cpu_env
, s
->ptr0
, s
->ptr1
);
3599 gen_helper_cvtpi2pd(cpu_env
, s
->ptr0
, s
->ptr1
);
3603 case 0x22a: /* cvtsi2ss */
3604 case 0x32a: /* cvtsi2sd */
3605 ot
= mo_64_32(s
->dflag
);
3606 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3607 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3608 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
, op1_offset
);
3610 SSEFunc_0_epi sse_fn_epi
= sse_op_table3ai
[(b
>> 8) & 1];
3611 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
3612 sse_fn_epi(cpu_env
, s
->ptr0
, s
->tmp2_i32
);
3614 #ifdef TARGET_X86_64
3615 SSEFunc_0_epl sse_fn_epl
= sse_op_table3aq
[(b
>> 8) & 1];
3616 sse_fn_epl(cpu_env
, s
->ptr0
, s
->T0
);
3622 case 0x02c: /* cvttps2pi */
3623 case 0x12c: /* cvttpd2pi */
3624 case 0x02d: /* cvtps2pi */
3625 case 0x12d: /* cvtpd2pi */
3626 gen_helper_enter_mmx(cpu_env
);
3628 gen_lea_modrm(env
, s
, modrm
);
3629 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3630 gen_ldo_env_A0(s
, op2_offset
);
3632 rm
= (modrm
& 7) | REX_B(s
);
3633 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3635 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
& 7].mmx
);
3636 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
, op1_offset
);
3637 tcg_gen_addi_ptr(s
->ptr1
, cpu_env
, op2_offset
);
3640 gen_helper_cvttps2pi(cpu_env
, s
->ptr0
, s
->ptr1
);
3643 gen_helper_cvttpd2pi(cpu_env
, s
->ptr0
, s
->ptr1
);
3646 gen_helper_cvtps2pi(cpu_env
, s
->ptr0
, s
->ptr1
);
3649 gen_helper_cvtpd2pi(cpu_env
, s
->ptr0
, s
->ptr1
);
3653 case 0x22c: /* cvttss2si */
3654 case 0x32c: /* cvttsd2si */
3655 case 0x22d: /* cvtss2si */
3656 case 0x32d: /* cvtsd2si */
3657 ot
= mo_64_32(s
->dflag
);
3659 gen_lea_modrm(env
, s
, modrm
);
3661 gen_ldq_env_A0(s
, offsetof(CPUX86State
, xmm_t0
.ZMM_Q(0)));
3663 gen_op_ld_v(s
, MO_32
, s
->T0
, s
->A0
);
3664 tcg_gen_st32_tl(s
->T0
, cpu_env
,
3665 offsetof(CPUX86State
, xmm_t0
.ZMM_L(0)));
3667 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3669 rm
= (modrm
& 7) | REX_B(s
);
3670 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
3672 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
, op2_offset
);
3674 SSEFunc_i_ep sse_fn_i_ep
=
3675 sse_op_table3bi
[((b
>> 7) & 2) | (b
& 1)];
3676 sse_fn_i_ep(s
->tmp2_i32
, cpu_env
, s
->ptr0
);
3677 tcg_gen_extu_i32_tl(s
->T0
, s
->tmp2_i32
);
3679 #ifdef TARGET_X86_64
3680 SSEFunc_l_ep sse_fn_l_ep
=
3681 sse_op_table3bq
[((b
>> 7) & 2) | (b
& 1)];
3682 sse_fn_l_ep(s
->T0
, cpu_env
, s
->ptr0
);
3687 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
3689 case 0xc4: /* pinsrw */
3692 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
3693 val
= x86_ldub_code(env
, s
);
3696 tcg_gen_st16_tl(s
->T0
, cpu_env
,
3697 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_W(val
)));
3700 tcg_gen_st16_tl(s
->T0
, cpu_env
,
3701 offsetof(CPUX86State
,fpregs
[reg
].mmx
.MMX_W(val
)));
3704 case 0xc5: /* pextrw */
3708 ot
= mo_64_32(s
->dflag
);
3709 val
= x86_ldub_code(env
, s
);
3712 rm
= (modrm
& 7) | REX_B(s
);
3713 tcg_gen_ld16u_tl(s
->T0
, cpu_env
,
3714 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_W(val
)));
3718 tcg_gen_ld16u_tl(s
->T0
, cpu_env
,
3719 offsetof(CPUX86State
,fpregs
[rm
].mmx
.MMX_W(val
)));
3721 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
3722 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
3724 case 0x1d6: /* movq ea, xmm */
3726 gen_lea_modrm(env
, s
, modrm
);
3727 gen_stq_env_A0(s
, offsetof(CPUX86State
,
3728 xmm_regs
[reg
].ZMM_Q(0)));
3730 rm
= (modrm
& 7) | REX_B(s
);
3731 gen_op_movq(s
, offsetof(CPUX86State
, xmm_regs
[rm
].ZMM_Q(0)),
3732 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(0)));
3733 gen_op_movq_env_0(s
,
3734 offsetof(CPUX86State
, xmm_regs
[rm
].ZMM_Q(1)));
3737 case 0x2d6: /* movq2dq */
3738 gen_helper_enter_mmx(cpu_env
);
3740 gen_op_movq(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_Q(0)),
3741 offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3742 gen_op_movq_env_0(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_Q(1)));
3744 case 0x3d6: /* movdq2q */
3745 gen_helper_enter_mmx(cpu_env
);
3746 rm
= (modrm
& 7) | REX_B(s
);
3747 gen_op_movq(s
, offsetof(CPUX86State
, fpregs
[reg
& 7].mmx
),
3748 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_Q(0)));
3750 case 0xd7: /* pmovmskb */
3755 rm
= (modrm
& 7) | REX_B(s
);
3756 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
,
3757 offsetof(CPUX86State
, xmm_regs
[rm
]));
3758 gen_helper_pmovmskb_xmm(s
->tmp2_i32
, cpu_env
, s
->ptr0
);
3761 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
,
3762 offsetof(CPUX86State
, fpregs
[rm
].mmx
));
3763 gen_helper_pmovmskb_mmx(s
->tmp2_i32
, cpu_env
, s
->ptr0
);
3765 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
3766 tcg_gen_extu_i32_tl(cpu_regs
[reg
], s
->tmp2_i32
);
3772 if ((b
& 0xf0) == 0xf0) {
3775 modrm
= x86_ldub_code(env
, s
);
3777 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
3778 mod
= (modrm
>> 6) & 3;
3783 sse_fn_epp
= sse_op_table6
[b
].op
[b1
];
3787 if (!(s
->cpuid_ext_features
& sse_op_table6
[b
].ext_mask
))
3791 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
3793 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
| REX_B(s
)]);
3795 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3796 gen_lea_modrm(env
, s
, modrm
);
3798 case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
3799 case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
3800 case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
3801 gen_ldq_env_A0(s
, op2_offset
+
3802 offsetof(ZMMReg
, ZMM_Q(0)));
3804 case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
3805 case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
3806 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
3807 s
->mem_index
, MO_LEUL
);
3808 tcg_gen_st_i32(s
->tmp2_i32
, cpu_env
, op2_offset
+
3809 offsetof(ZMMReg
, ZMM_L(0)));
3811 case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
3812 tcg_gen_qemu_ld_tl(s
->tmp0
, s
->A0
,
3813 s
->mem_index
, MO_LEUW
);
3814 tcg_gen_st16_tl(s
->tmp0
, cpu_env
, op2_offset
+
3815 offsetof(ZMMReg
, ZMM_W(0)));
3817 case 0x2a: /* movntqda */
3818 gen_ldo_env_A0(s
, op1_offset
);
3821 gen_ldo_env_A0(s
, op2_offset
);
3825 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
3827 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3829 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
3830 gen_lea_modrm(env
, s
, modrm
);
3831 gen_ldq_env_A0(s
, op2_offset
);
3834 if (sse_fn_epp
== SSE_SPECIAL
) {
3838 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
, op1_offset
);
3839 tcg_gen_addi_ptr(s
->ptr1
, cpu_env
, op2_offset
);
3840 sse_fn_epp(cpu_env
, s
->ptr0
, s
->ptr1
);
3843 set_cc_op(s
, CC_OP_EFLAGS
);
3850 /* Various integer extensions at 0f 38 f[0-f]. */
3851 b
= modrm
| (b1
<< 8);
3852 modrm
= x86_ldub_code(env
, s
);
3853 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
3856 case 0x3f0: /* crc32 Gd,Eb */
3857 case 0x3f1: /* crc32 Gd,Ey */
3859 if (!(s
->cpuid_ext_features
& CPUID_EXT_SSE42
)) {
3862 if ((b
& 0xff) == 0xf0) {
3864 } else if (s
->dflag
!= MO_64
) {
3865 ot
= (s
->prefix
& PREFIX_DATA
? MO_16
: MO_32
);
3870 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[reg
]);
3871 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3872 gen_helper_crc32(s
->T0
, s
->tmp2_i32
,
3873 s
->T0
, tcg_const_i32(8 << ot
));
3875 ot
= mo_64_32(s
->dflag
);
3876 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
3879 case 0x1f0: /* crc32 or movbe */
3881 /* For these insns, the f3 prefix is supposed to have priority
3882 over the 66 prefix, but that's not what we implement above
3884 if (s
->prefix
& PREFIX_REPNZ
) {
3888 case 0x0f0: /* movbe Gy,My */
3889 case 0x0f1: /* movbe My,Gy */
3890 if (!(s
->cpuid_ext_features
& CPUID_EXT_MOVBE
)) {
3893 if (s
->dflag
!= MO_64
) {
3894 ot
= (s
->prefix
& PREFIX_DATA
? MO_16
: MO_32
);
3899 gen_lea_modrm(env
, s
, modrm
);
3901 tcg_gen_qemu_ld_tl(s
->T0
, s
->A0
,
3902 s
->mem_index
, ot
| MO_BE
);
3903 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
3905 tcg_gen_qemu_st_tl(cpu_regs
[reg
], s
->A0
,
3906 s
->mem_index
, ot
| MO_BE
);
3910 case 0x0f2: /* andn Gy, By, Ey */
3911 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)
3912 || !(s
->prefix
& PREFIX_VEX
)
3916 ot
= mo_64_32(s
->dflag
);
3917 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3918 tcg_gen_andc_tl(s
->T0
, s
->T0
, cpu_regs
[s
->vex_v
]);
3919 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
3920 gen_op_update1_cc(s
);
3921 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
3924 case 0x0f7: /* bextr Gy, Ey, By */
3925 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)
3926 || !(s
->prefix
& PREFIX_VEX
)
3930 ot
= mo_64_32(s
->dflag
);
3934 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3935 /* Extract START, and shift the operand.
3936 Shifts larger than operand size get zeros. */
3937 tcg_gen_ext8u_tl(s
->A0
, cpu_regs
[s
->vex_v
]);
3938 tcg_gen_shr_tl(s
->T0
, s
->T0
, s
->A0
);
3940 bound
= tcg_const_tl(ot
== MO_64
? 63 : 31);
3941 zero
= tcg_const_tl(0);
3942 tcg_gen_movcond_tl(TCG_COND_LEU
, s
->T0
, s
->A0
, bound
,
3944 tcg_temp_free(zero
);
3946 /* Extract the LEN into a mask. Lengths larger than
3947 operand size get all ones. */
3948 tcg_gen_extract_tl(s
->A0
, cpu_regs
[s
->vex_v
], 8, 8);
3949 tcg_gen_movcond_tl(TCG_COND_LEU
, s
->A0
, s
->A0
, bound
,
3951 tcg_temp_free(bound
);
3952 tcg_gen_movi_tl(s
->T1
, 1);
3953 tcg_gen_shl_tl(s
->T1
, s
->T1
, s
->A0
);
3954 tcg_gen_subi_tl(s
->T1
, s
->T1
, 1);
3955 tcg_gen_and_tl(s
->T0
, s
->T0
, s
->T1
);
3957 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
3958 gen_op_update1_cc(s
);
3959 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
3963 case 0x0f5: /* bzhi Gy, Ey, By */
3964 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
3965 || !(s
->prefix
& PREFIX_VEX
)
3969 ot
= mo_64_32(s
->dflag
);
3970 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3971 tcg_gen_ext8u_tl(s
->T1
, cpu_regs
[s
->vex_v
]);
3973 TCGv bound
= tcg_const_tl(ot
== MO_64
? 63 : 31);
3974 /* Note that since we're using BMILG (in order to get O
3975 cleared) we need to store the inverse into C. */
3976 tcg_gen_setcond_tl(TCG_COND_LT
, cpu_cc_src
,
3978 tcg_gen_movcond_tl(TCG_COND_GT
, s
->T1
, s
->T1
,
3979 bound
, bound
, s
->T1
);
3980 tcg_temp_free(bound
);
3982 tcg_gen_movi_tl(s
->A0
, -1);
3983 tcg_gen_shl_tl(s
->A0
, s
->A0
, s
->T1
);
3984 tcg_gen_andc_tl(s
->T0
, s
->T0
, s
->A0
);
3985 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
3986 gen_op_update1_cc(s
);
3987 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
3990 case 0x3f6: /* mulx By, Gy, rdx, Ey */
3991 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
3992 || !(s
->prefix
& PREFIX_VEX
)
3996 ot
= mo_64_32(s
->dflag
);
3997 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4000 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
4001 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, cpu_regs
[R_EDX
]);
4002 tcg_gen_mulu2_i32(s
->tmp2_i32
, s
->tmp3_i32
,
4003 s
->tmp2_i32
, s
->tmp3_i32
);
4004 tcg_gen_extu_i32_tl(cpu_regs
[s
->vex_v
], s
->tmp2_i32
);
4005 tcg_gen_extu_i32_tl(cpu_regs
[reg
], s
->tmp3_i32
);
4007 #ifdef TARGET_X86_64
4009 tcg_gen_mulu2_i64(s
->T0
, s
->T1
,
4010 s
->T0
, cpu_regs
[R_EDX
]);
4011 tcg_gen_mov_i64(cpu_regs
[s
->vex_v
], s
->T0
);
4012 tcg_gen_mov_i64(cpu_regs
[reg
], s
->T1
);
4018 case 0x3f5: /* pdep Gy, By, Ey */
4019 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
4020 || !(s
->prefix
& PREFIX_VEX
)
4024 ot
= mo_64_32(s
->dflag
);
4025 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4026 /* Note that by zero-extending the source operand, we
4027 automatically handle zero-extending the result. */
4029 tcg_gen_mov_tl(s
->T1
, cpu_regs
[s
->vex_v
]);
4031 tcg_gen_ext32u_tl(s
->T1
, cpu_regs
[s
->vex_v
]);
4033 gen_helper_pdep(cpu_regs
[reg
], s
->T1
, s
->T0
);
4036 case 0x2f5: /* pext Gy, By, Ey */
4037 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
4038 || !(s
->prefix
& PREFIX_VEX
)
4042 ot
= mo_64_32(s
->dflag
);
4043 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4044 /* Note that by zero-extending the source operand, we
4045 automatically handle zero-extending the result. */
4047 tcg_gen_mov_tl(s
->T1
, cpu_regs
[s
->vex_v
]);
4049 tcg_gen_ext32u_tl(s
->T1
, cpu_regs
[s
->vex_v
]);
4051 gen_helper_pext(cpu_regs
[reg
], s
->T1
, s
->T0
);
4054 case 0x1f6: /* adcx Gy, Ey */
4055 case 0x2f6: /* adox Gy, Ey */
4056 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_ADX
)) {
4059 TCGv carry_in
, carry_out
, zero
;
4062 ot
= mo_64_32(s
->dflag
);
4063 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4065 /* Re-use the carry-out from a previous round. */
4067 carry_out
= (b
== 0x1f6 ? cpu_cc_dst
: cpu_cc_src2
);
4071 carry_in
= cpu_cc_dst
;
4072 end_op
= CC_OP_ADCX
;
4074 end_op
= CC_OP_ADCOX
;
4079 end_op
= CC_OP_ADCOX
;
4081 carry_in
= cpu_cc_src2
;
4082 end_op
= CC_OP_ADOX
;
4086 end_op
= CC_OP_ADCOX
;
4087 carry_in
= carry_out
;
4090 end_op
= (b
== 0x1f6 ? CC_OP_ADCX
: CC_OP_ADOX
);
4093 /* If we can't reuse carry-out, get it out of EFLAGS. */
4095 if (s
->cc_op
!= CC_OP_ADCX
&& s
->cc_op
!= CC_OP_ADOX
) {
4096 gen_compute_eflags(s
);
4099 tcg_gen_extract_tl(carry_in
, cpu_cc_src
,
4100 ctz32(b
== 0x1f6 ? CC_C
: CC_O
), 1);
4104 #ifdef TARGET_X86_64
4106 /* If we know TL is 64-bit, and we want a 32-bit
4107 result, just do everything in 64-bit arithmetic. */
4108 tcg_gen_ext32u_i64(cpu_regs
[reg
], cpu_regs
[reg
]);
4109 tcg_gen_ext32u_i64(s
->T0
, s
->T0
);
4110 tcg_gen_add_i64(s
->T0
, s
->T0
, cpu_regs
[reg
]);
4111 tcg_gen_add_i64(s
->T0
, s
->T0
, carry_in
);
4112 tcg_gen_ext32u_i64(cpu_regs
[reg
], s
->T0
);
4113 tcg_gen_shri_i64(carry_out
, s
->T0
, 32);
4117 /* Otherwise compute the carry-out in two steps. */
4118 zero
= tcg_const_tl(0);
4119 tcg_gen_add2_tl(s
->T0
, carry_out
,
4122 tcg_gen_add2_tl(cpu_regs
[reg
], carry_out
,
4123 cpu_regs
[reg
], carry_out
,
4125 tcg_temp_free(zero
);
4128 set_cc_op(s
, end_op
);
4132 case 0x1f7: /* shlx Gy, Ey, By */
4133 case 0x2f7: /* sarx Gy, Ey, By */
4134 case 0x3f7: /* shrx Gy, Ey, By */
4135 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
4136 || !(s
->prefix
& PREFIX_VEX
)
4140 ot
= mo_64_32(s
->dflag
);
4141 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4143 tcg_gen_andi_tl(s
->T1
, cpu_regs
[s
->vex_v
], 63);
4145 tcg_gen_andi_tl(s
->T1
, cpu_regs
[s
->vex_v
], 31);
4148 tcg_gen_shl_tl(s
->T0
, s
->T0
, s
->T1
);
4149 } else if (b
== 0x2f7) {
4151 tcg_gen_ext32s_tl(s
->T0
, s
->T0
);
4153 tcg_gen_sar_tl(s
->T0
, s
->T0
, s
->T1
);
4156 tcg_gen_ext32u_tl(s
->T0
, s
->T0
);
4158 tcg_gen_shr_tl(s
->T0
, s
->T0
, s
->T1
);
4160 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
4166 case 0x3f3: /* Group 17 */
4167 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)
4168 || !(s
->prefix
& PREFIX_VEX
)
4172 ot
= mo_64_32(s
->dflag
);
4173 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4175 tcg_gen_mov_tl(cpu_cc_src
, s
->T0
);
4177 case 1: /* blsr By,Ey */
4178 tcg_gen_subi_tl(s
->T1
, s
->T0
, 1);
4179 tcg_gen_and_tl(s
->T0
, s
->T0
, s
->T1
);
4181 case 2: /* blsmsk By,Ey */
4182 tcg_gen_subi_tl(s
->T1
, s
->T0
, 1);
4183 tcg_gen_xor_tl(s
->T0
, s
->T0
, s
->T1
);
4185 case 3: /* blsi By, Ey */
4186 tcg_gen_neg_tl(s
->T1
, s
->T0
);
4187 tcg_gen_and_tl(s
->T0
, s
->T0
, s
->T1
);
4192 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
4193 gen_op_mov_reg_v(s
, ot
, s
->vex_v
, s
->T0
);
4194 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
4205 modrm
= x86_ldub_code(env
, s
);
4207 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4208 mod
= (modrm
>> 6) & 3;
4213 sse_fn_eppi
= sse_op_table7
[b
].op
[b1
];
4217 if (!(s
->cpuid_ext_features
& sse_op_table7
[b
].ext_mask
))
4222 if (sse_fn_eppi
== SSE_SPECIAL
) {
4223 ot
= mo_64_32(s
->dflag
);
4224 rm
= (modrm
& 7) | REX_B(s
);
4226 gen_lea_modrm(env
, s
, modrm
);
4227 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4228 val
= x86_ldub_code(env
, s
);
4230 case 0x14: /* pextrb */
4231 tcg_gen_ld8u_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
,
4232 xmm_regs
[reg
].ZMM_B(val
& 15)));
4234 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
4236 tcg_gen_qemu_st_tl(s
->T0
, s
->A0
,
4237 s
->mem_index
, MO_UB
);
4240 case 0x15: /* pextrw */
4241 tcg_gen_ld16u_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
,
4242 xmm_regs
[reg
].ZMM_W(val
& 7)));
4244 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
4246 tcg_gen_qemu_st_tl(s
->T0
, s
->A0
,
4247 s
->mem_index
, MO_LEUW
);
4251 if (ot
== MO_32
) { /* pextrd */
4252 tcg_gen_ld_i32(s
->tmp2_i32
, cpu_env
,
4253 offsetof(CPUX86State
,
4254 xmm_regs
[reg
].ZMM_L(val
& 3)));
4256 tcg_gen_extu_i32_tl(cpu_regs
[rm
], s
->tmp2_i32
);
4258 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
4259 s
->mem_index
, MO_LEUL
);
4261 } else { /* pextrq */
4262 #ifdef TARGET_X86_64
4263 tcg_gen_ld_i64(s
->tmp1_i64
, cpu_env
,
4264 offsetof(CPUX86State
,
4265 xmm_regs
[reg
].ZMM_Q(val
& 1)));
4267 tcg_gen_mov_i64(cpu_regs
[rm
], s
->tmp1_i64
);
4269 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
,
4270 s
->mem_index
, MO_LEQ
);
4277 case 0x17: /* extractps */
4278 tcg_gen_ld32u_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
,
4279 xmm_regs
[reg
].ZMM_L(val
& 3)));
4281 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
4283 tcg_gen_qemu_st_tl(s
->T0
, s
->A0
,
4284 s
->mem_index
, MO_LEUL
);
4287 case 0x20: /* pinsrb */
4289 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, rm
);
4291 tcg_gen_qemu_ld_tl(s
->T0
, s
->A0
,
4292 s
->mem_index
, MO_UB
);
4294 tcg_gen_st8_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
,
4295 xmm_regs
[reg
].ZMM_B(val
& 15)));
4297 case 0x21: /* insertps */
4299 tcg_gen_ld_i32(s
->tmp2_i32
, cpu_env
,
4300 offsetof(CPUX86State
,xmm_regs
[rm
]
4301 .ZMM_L((val
>> 6) & 3)));
4303 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
4304 s
->mem_index
, MO_LEUL
);
4306 tcg_gen_st_i32(s
->tmp2_i32
, cpu_env
,
4307 offsetof(CPUX86State
,xmm_regs
[reg
]
4308 .ZMM_L((val
>> 4) & 3)));
4310 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4311 cpu_env
, offsetof(CPUX86State
,
4312 xmm_regs
[reg
].ZMM_L(0)));
4314 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4315 cpu_env
, offsetof(CPUX86State
,
4316 xmm_regs
[reg
].ZMM_L(1)));
4318 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4319 cpu_env
, offsetof(CPUX86State
,
4320 xmm_regs
[reg
].ZMM_L(2)));
4322 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4323 cpu_env
, offsetof(CPUX86State
,
4324 xmm_regs
[reg
].ZMM_L(3)));
4327 if (ot
== MO_32
) { /* pinsrd */
4329 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[rm
]);
4331 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
4332 s
->mem_index
, MO_LEUL
);
4334 tcg_gen_st_i32(s
->tmp2_i32
, cpu_env
,
4335 offsetof(CPUX86State
,
4336 xmm_regs
[reg
].ZMM_L(val
& 3)));
4337 } else { /* pinsrq */
4338 #ifdef TARGET_X86_64
4340 gen_op_mov_v_reg(s
, ot
, s
->tmp1_i64
, rm
);
4342 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
,
4343 s
->mem_index
, MO_LEQ
);
4345 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
,
4346 offsetof(CPUX86State
,
4347 xmm_regs
[reg
].ZMM_Q(val
& 1)));
4358 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
4360 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
| REX_B(s
)]);
4362 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
4363 gen_lea_modrm(env
, s
, modrm
);
4364 gen_ldo_env_A0(s
, op2_offset
);
4367 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
4369 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
4371 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
4372 gen_lea_modrm(env
, s
, modrm
);
4373 gen_ldq_env_A0(s
, op2_offset
);
4376 val
= x86_ldub_code(env
, s
);
4378 if ((b
& 0xfc) == 0x60) { /* pcmpXstrX */
4379 set_cc_op(s
, CC_OP_EFLAGS
);
4381 if (s
->dflag
== MO_64
) {
4382 /* The helper must use entire 64-bit gp registers */
4387 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
, op1_offset
);
4388 tcg_gen_addi_ptr(s
->ptr1
, cpu_env
, op2_offset
);
4389 sse_fn_eppi(cpu_env
, s
->ptr0
, s
->ptr1
, tcg_const_i32(val
));
4393 /* Various integer extensions at 0f 3a f[0-f]. */
4394 b
= modrm
| (b1
<< 8);
4395 modrm
= x86_ldub_code(env
, s
);
4396 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4399 case 0x3f0: /* rorx Gy,Ey, Ib */
4400 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
4401 || !(s
->prefix
& PREFIX_VEX
)
4405 ot
= mo_64_32(s
->dflag
);
4406 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4407 b
= x86_ldub_code(env
, s
);
4409 tcg_gen_rotri_tl(s
->T0
, s
->T0
, b
& 63);
4411 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
4412 tcg_gen_rotri_i32(s
->tmp2_i32
, s
->tmp2_i32
, b
& 31);
4413 tcg_gen_extu_i32_tl(s
->T0
, s
->tmp2_i32
);
4415 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
4425 gen_unknown_opcode(env
, s
);
4429 /* generic MMX or SSE operation */
4431 case 0x70: /* pshufx insn */
4432 case 0xc6: /* pshufx insn */
4433 case 0xc2: /* compare insns */
4440 op1_offset
= offsetof(CPUX86State
,xmm_regs
[reg
]);
4444 gen_lea_modrm(env
, s
, modrm
);
4445 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
4451 /* Most sse scalar operations. */
4454 } else if (b1
== 3) {
4459 case 0x2e: /* ucomis[sd] */
4460 case 0x2f: /* comis[sd] */
4472 gen_op_ld_v(s
, MO_32
, s
->T0
, s
->A0
);
4473 tcg_gen_st32_tl(s
->T0
, cpu_env
,
4474 offsetof(CPUX86State
,xmm_t0
.ZMM_L(0)));
4478 gen_ldq_env_A0(s
, offsetof(CPUX86State
, xmm_t0
.ZMM_D(0)));
4481 /* 128 bit access */
4482 gen_ldo_env_A0(s
, op2_offset
);
4486 rm
= (modrm
& 7) | REX_B(s
);
4487 op2_offset
= offsetof(CPUX86State
,xmm_regs
[rm
]);
4490 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
4492 gen_lea_modrm(env
, s
, modrm
);
4493 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
4494 gen_ldq_env_A0(s
, op2_offset
);
4497 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
4501 case 0x0f: /* 3DNow! data insns */
4502 val
= x86_ldub_code(env
, s
);
4503 sse_fn_epp
= sse_op_table5
[val
];
4507 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_3DNOW
)) {
4510 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
, op1_offset
);
4511 tcg_gen_addi_ptr(s
->ptr1
, cpu_env
, op2_offset
);
4512 sse_fn_epp(cpu_env
, s
->ptr0
, s
->ptr1
);
4514 case 0x70: /* pshufx insn */
4515 case 0xc6: /* pshufx insn */
4516 val
= x86_ldub_code(env
, s
);
4517 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
, op1_offset
);
4518 tcg_gen_addi_ptr(s
->ptr1
, cpu_env
, op2_offset
);
4519 /* XXX: introduce a new table? */
4520 sse_fn_ppi
= (SSEFunc_0_ppi
)sse_fn_epp
;
4521 sse_fn_ppi(s
->ptr0
, s
->ptr1
, tcg_const_i32(val
));
4525 val
= x86_ldub_code(env
, s
);
4528 sse_fn_epp
= sse_op_table4
[val
][b1
];
4530 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
, op1_offset
);
4531 tcg_gen_addi_ptr(s
->ptr1
, cpu_env
, op2_offset
);
4532 sse_fn_epp(cpu_env
, s
->ptr0
, s
->ptr1
);
4535 /* maskmov : we must prepare A0 */
4538 tcg_gen_mov_tl(s
->A0
, cpu_regs
[R_EDI
]);
4539 gen_extu(s
->aflag
, s
->A0
);
4540 gen_add_A0_ds_seg(s
);
4542 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
, op1_offset
);
4543 tcg_gen_addi_ptr(s
->ptr1
, cpu_env
, op2_offset
);
4544 /* XXX: introduce a new table? */
4545 sse_fn_eppt
= (SSEFunc_0_eppt
)sse_fn_epp
;
4546 sse_fn_eppt(cpu_env
, s
->ptr0
, s
->ptr1
, s
->A0
);
4549 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
, op1_offset
);
4550 tcg_gen_addi_ptr(s
->ptr1
, cpu_env
, op2_offset
);
4551 sse_fn_epp(cpu_env
, s
->ptr0
, s
->ptr1
);
4554 if (b
== 0x2e || b
== 0x2f) {
4555 set_cc_op(s
, CC_OP_EFLAGS
);
4560 /* convert one instruction. s->base.is_jmp is set if the translation must
4561 be stopped. Return the next pc value */
4562 static target_ulong
disas_insn(DisasContext
*s
, CPUState
*cpu
)
4564 CPUX86State
*env
= cpu
->env_ptr
;
4567 MemOp ot
, aflag
, dflag
;
4568 int modrm
, reg
, rm
, mod
, op
, opreg
, val
;
4569 target_ulong next_eip
, tval
;
4570 target_ulong pc_start
= s
->base
.pc_next
;
4572 s
->pc_start
= s
->pc
= pc_start
;
4574 #ifdef TARGET_X86_64
4580 s
->rip_offset
= 0; /* for relative ip address */
4583 if (sigsetjmp(s
->jmpbuf
, 0) != 0) {
4584 gen_exception_gpf(s
);
4591 b
= x86_ldub_code(env
, s
);
4592 /* Collect prefixes. */
4595 prefixes
|= PREFIX_REPZ
;
4598 prefixes
|= PREFIX_REPNZ
;
4601 prefixes
|= PREFIX_LOCK
;
4622 prefixes
|= PREFIX_DATA
;
4625 prefixes
|= PREFIX_ADR
;
4627 #ifdef TARGET_X86_64
4631 prefixes
|= PREFIX_REX
;
4632 s
->rex_w
= (b
>> 3) & 1;
4633 s
->rex_r
= (b
& 0x4) << 1;
4634 s
->rex_x
= (b
& 0x2) << 2;
4635 s
->rex_b
= (b
& 0x1) << 3;
4640 case 0xc5: /* 2-byte VEX */
4641 case 0xc4: /* 3-byte VEX */
4642 /* VEX prefixes cannot be used except in 32-bit mode.
4643 Otherwise the instruction is LES or LDS. */
4644 if (CODE32(s
) && !VM86(s
)) {
4645 static const int pp_prefix
[4] = {
4646 0, PREFIX_DATA
, PREFIX_REPZ
, PREFIX_REPNZ
4648 int vex3
, vex2
= x86_ldub_code(env
, s
);
4650 if (!CODE64(s
) && (vex2
& 0xc0) != 0xc0) {
4651 /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
4652 otherwise the instruction is LES or LDS. */
4653 s
->pc
--; /* rewind the advance_pc() x86_ldub_code() did */
4657 /* 4.1.1-4.1.3: No preceding lock, 66, f2, f3, or rex prefixes. */
4658 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
4659 | PREFIX_LOCK
| PREFIX_DATA
| PREFIX_REX
)) {
4662 #ifdef TARGET_X86_64
4663 s
->rex_r
= (~vex2
>> 4) & 8;
4666 /* 2-byte VEX prefix: RVVVVlpp, implied 0f leading opcode byte */
4668 b
= x86_ldub_code(env
, s
) | 0x100;
4670 /* 3-byte VEX prefix: RXBmmmmm wVVVVlpp */
4671 vex3
= x86_ldub_code(env
, s
);
4672 #ifdef TARGET_X86_64
4673 s
->rex_x
= (~vex2
>> 3) & 8;
4674 s
->rex_b
= (~vex2
>> 2) & 8;
4675 s
->rex_w
= (vex3
>> 7) & 1;
4677 switch (vex2
& 0x1f) {
4678 case 0x01: /* Implied 0f leading opcode bytes. */
4679 b
= x86_ldub_code(env
, s
) | 0x100;
4681 case 0x02: /* Implied 0f 38 leading opcode bytes. */
4684 case 0x03: /* Implied 0f 3a leading opcode bytes. */
4687 default: /* Reserved for future use. */
4691 s
->vex_v
= (~vex3
>> 3) & 0xf;
4692 s
->vex_l
= (vex3
>> 2) & 1;
4693 prefixes
|= pp_prefix
[vex3
& 3] | PREFIX_VEX
;
4698 /* Post-process prefixes. */
4700 /* In 64-bit mode, the default data size is 32-bit. Select 64-bit
4701 data with rex_w, and 16-bit data with 0x66; rex_w takes precedence
4702 over 0x66 if both are present. */
4703 dflag
= (REX_W(s
) ? MO_64
: prefixes
& PREFIX_DATA
? MO_16
: MO_32
);
4704 /* In 64-bit mode, 0x67 selects 32-bit addressing. */
4705 aflag
= (prefixes
& PREFIX_ADR
? MO_32
: MO_64
);
4707 /* In 16/32-bit mode, 0x66 selects the opposite data size. */
4708 if (CODE32(s
) ^ ((prefixes
& PREFIX_DATA
) != 0)) {
4713 /* In 16/32-bit mode, 0x67 selects the opposite addressing. */
4714 if (CODE32(s
) ^ ((prefixes
& PREFIX_ADR
) != 0)) {
4721 s
->prefix
= prefixes
;
4725 /* now check op code */
4729 /**************************/
4730 /* extended op code */
4731 b
= x86_ldub_code(env
, s
) | 0x100;
4734 /**************************/
4749 ot
= mo_b_d(b
, dflag
);
4752 case 0: /* OP Ev, Gv */
4753 modrm
= x86_ldub_code(env
, s
);
4754 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4755 mod
= (modrm
>> 6) & 3;
4756 rm
= (modrm
& 7) | REX_B(s
);
4758 gen_lea_modrm(env
, s
, modrm
);
4760 } else if (op
== OP_XORL
&& rm
== reg
) {
4762 /* xor reg, reg optimisation */
4763 set_cc_op(s
, CC_OP_CLR
);
4764 tcg_gen_movi_tl(s
->T0
, 0);
4765 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
4770 gen_op_mov_v_reg(s
, ot
, s
->T1
, reg
);
4771 gen_op(s
, op
, ot
, opreg
);
4773 case 1: /* OP Gv, Ev */
4774 modrm
= x86_ldub_code(env
, s
);
4775 mod
= (modrm
>> 6) & 3;
4776 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4777 rm
= (modrm
& 7) | REX_B(s
);
4779 gen_lea_modrm(env
, s
, modrm
);
4780 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
4781 } else if (op
== OP_XORL
&& rm
== reg
) {
4784 gen_op_mov_v_reg(s
, ot
, s
->T1
, rm
);
4786 gen_op(s
, op
, ot
, reg
);
4788 case 2: /* OP A, Iv */
4789 val
= insn_get(env
, s
, ot
);
4790 tcg_gen_movi_tl(s
->T1
, val
);
4791 gen_op(s
, op
, ot
, OR_EAX
);
4801 case 0x80: /* GRP1 */
4807 ot
= mo_b_d(b
, dflag
);
4809 modrm
= x86_ldub_code(env
, s
);
4810 mod
= (modrm
>> 6) & 3;
4811 rm
= (modrm
& 7) | REX_B(s
);
4812 op
= (modrm
>> 3) & 7;
4818 s
->rip_offset
= insn_const_size(ot
);
4819 gen_lea_modrm(env
, s
, modrm
);
4830 val
= insn_get(env
, s
, ot
);
4833 val
= (int8_t)insn_get(env
, s
, MO_8
);
4836 tcg_gen_movi_tl(s
->T1
, val
);
4837 gen_op(s
, op
, ot
, opreg
);
4841 /**************************/
4842 /* inc, dec, and other misc arith */
4843 case 0x40 ... 0x47: /* inc Gv */
4845 gen_inc(s
, ot
, OR_EAX
+ (b
& 7), 1);
4847 case 0x48 ... 0x4f: /* dec Gv */
4849 gen_inc(s
, ot
, OR_EAX
+ (b
& 7), -1);
4851 case 0xf6: /* GRP3 */
4853 ot
= mo_b_d(b
, dflag
);
4855 modrm
= x86_ldub_code(env
, s
);
4856 mod
= (modrm
>> 6) & 3;
4857 rm
= (modrm
& 7) | REX_B(s
);
4858 op
= (modrm
>> 3) & 7;
4861 s
->rip_offset
= insn_const_size(ot
);
4863 gen_lea_modrm(env
, s
, modrm
);
4864 /* For those below that handle locked memory, don't load here. */
4865 if (!(s
->prefix
& PREFIX_LOCK
)
4867 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
4870 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
4875 val
= insn_get(env
, s
, ot
);
4876 tcg_gen_movi_tl(s
->T1
, val
);
4877 gen_op_testl_T0_T1_cc(s
);
4878 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
4881 if (s
->prefix
& PREFIX_LOCK
) {
4885 tcg_gen_movi_tl(s
->T0
, ~0);
4886 tcg_gen_atomic_xor_fetch_tl(s
->T0
, s
->A0
, s
->T0
,
4887 s
->mem_index
, ot
| MO_LE
);
4889 tcg_gen_not_tl(s
->T0
, s
->T0
);
4891 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
4893 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
4898 if (s
->prefix
& PREFIX_LOCK
) {
4900 TCGv a0
, t0
, t1
, t2
;
4905 a0
= tcg_temp_local_new();
4906 t0
= tcg_temp_local_new();
4907 label1
= gen_new_label();
4909 tcg_gen_mov_tl(a0
, s
->A0
);
4910 tcg_gen_mov_tl(t0
, s
->T0
);
4912 gen_set_label(label1
);
4913 t1
= tcg_temp_new();
4914 t2
= tcg_temp_new();
4915 tcg_gen_mov_tl(t2
, t0
);
4916 tcg_gen_neg_tl(t1
, t0
);
4917 tcg_gen_atomic_cmpxchg_tl(t0
, a0
, t0
, t1
,
4918 s
->mem_index
, ot
| MO_LE
);
4920 tcg_gen_brcond_tl(TCG_COND_NE
, t0
, t2
, label1
);
4924 tcg_gen_mov_tl(s
->T0
, t0
);
4927 tcg_gen_neg_tl(s
->T0
, s
->T0
);
4929 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
4931 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
4934 gen_op_update_neg_cc(s
);
4935 set_cc_op(s
, CC_OP_SUBB
+ ot
);
4940 gen_op_mov_v_reg(s
, MO_8
, s
->T1
, R_EAX
);
4941 tcg_gen_ext8u_tl(s
->T0
, s
->T0
);
4942 tcg_gen_ext8u_tl(s
->T1
, s
->T1
);
4943 /* XXX: use 32 bit mul which could be faster */
4944 tcg_gen_mul_tl(s
->T0
, s
->T0
, s
->T1
);
4945 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
4946 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
4947 tcg_gen_andi_tl(cpu_cc_src
, s
->T0
, 0xff00);
4948 set_cc_op(s
, CC_OP_MULB
);
4951 gen_op_mov_v_reg(s
, MO_16
, s
->T1
, R_EAX
);
4952 tcg_gen_ext16u_tl(s
->T0
, s
->T0
);
4953 tcg_gen_ext16u_tl(s
->T1
, s
->T1
);
4954 /* XXX: use 32 bit mul which could be faster */
4955 tcg_gen_mul_tl(s
->T0
, s
->T0
, s
->T1
);
4956 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
4957 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
4958 tcg_gen_shri_tl(s
->T0
, s
->T0
, 16);
4959 gen_op_mov_reg_v(s
, MO_16
, R_EDX
, s
->T0
);
4960 tcg_gen_mov_tl(cpu_cc_src
, s
->T0
);
4961 set_cc_op(s
, CC_OP_MULW
);
4965 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
4966 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, cpu_regs
[R_EAX
]);
4967 tcg_gen_mulu2_i32(s
->tmp2_i32
, s
->tmp3_i32
,
4968 s
->tmp2_i32
, s
->tmp3_i32
);
4969 tcg_gen_extu_i32_tl(cpu_regs
[R_EAX
], s
->tmp2_i32
);
4970 tcg_gen_extu_i32_tl(cpu_regs
[R_EDX
], s
->tmp3_i32
);
4971 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
4972 tcg_gen_mov_tl(cpu_cc_src
, cpu_regs
[R_EDX
]);
4973 set_cc_op(s
, CC_OP_MULL
);
4975 #ifdef TARGET_X86_64
4977 tcg_gen_mulu2_i64(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
],
4978 s
->T0
, cpu_regs
[R_EAX
]);
4979 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
4980 tcg_gen_mov_tl(cpu_cc_src
, cpu_regs
[R_EDX
]);
4981 set_cc_op(s
, CC_OP_MULQ
);
4989 gen_op_mov_v_reg(s
, MO_8
, s
->T1
, R_EAX
);
4990 tcg_gen_ext8s_tl(s
->T0
, s
->T0
);
4991 tcg_gen_ext8s_tl(s
->T1
, s
->T1
);
4992 /* XXX: use 32 bit mul which could be faster */
4993 tcg_gen_mul_tl(s
->T0
, s
->T0
, s
->T1
);
4994 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
4995 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
4996 tcg_gen_ext8s_tl(s
->tmp0
, s
->T0
);
4997 tcg_gen_sub_tl(cpu_cc_src
, s
->T0
, s
->tmp0
);
4998 set_cc_op(s
, CC_OP_MULB
);
5001 gen_op_mov_v_reg(s
, MO_16
, s
->T1
, R_EAX
);
5002 tcg_gen_ext16s_tl(s
->T0
, s
->T0
);
5003 tcg_gen_ext16s_tl(s
->T1
, s
->T1
);
5004 /* XXX: use 32 bit mul which could be faster */
5005 tcg_gen_mul_tl(s
->T0
, s
->T0
, s
->T1
);
5006 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
5007 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
5008 tcg_gen_ext16s_tl(s
->tmp0
, s
->T0
);
5009 tcg_gen_sub_tl(cpu_cc_src
, s
->T0
, s
->tmp0
);
5010 tcg_gen_shri_tl(s
->T0
, s
->T0
, 16);
5011 gen_op_mov_reg_v(s
, MO_16
, R_EDX
, s
->T0
);
5012 set_cc_op(s
, CC_OP_MULW
);
5016 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
5017 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, cpu_regs
[R_EAX
]);
5018 tcg_gen_muls2_i32(s
->tmp2_i32
, s
->tmp3_i32
,
5019 s
->tmp2_i32
, s
->tmp3_i32
);
5020 tcg_gen_extu_i32_tl(cpu_regs
[R_EAX
], s
->tmp2_i32
);
5021 tcg_gen_extu_i32_tl(cpu_regs
[R_EDX
], s
->tmp3_i32
);
5022 tcg_gen_sari_i32(s
->tmp2_i32
, s
->tmp2_i32
, 31);
5023 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
5024 tcg_gen_sub_i32(s
->tmp2_i32
, s
->tmp2_i32
, s
->tmp3_i32
);
5025 tcg_gen_extu_i32_tl(cpu_cc_src
, s
->tmp2_i32
);
5026 set_cc_op(s
, CC_OP_MULL
);
5028 #ifdef TARGET_X86_64
5030 tcg_gen_muls2_i64(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
],
5031 s
->T0
, cpu_regs
[R_EAX
]);
5032 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
5033 tcg_gen_sari_tl(cpu_cc_src
, cpu_regs
[R_EAX
], 63);
5034 tcg_gen_sub_tl(cpu_cc_src
, cpu_cc_src
, cpu_regs
[R_EDX
]);
5035 set_cc_op(s
, CC_OP_MULQ
);
5043 gen_helper_divb_AL(cpu_env
, s
->T0
);
5046 gen_helper_divw_AX(cpu_env
, s
->T0
);
5050 gen_helper_divl_EAX(cpu_env
, s
->T0
);
5052 #ifdef TARGET_X86_64
5054 gen_helper_divq_EAX(cpu_env
, s
->T0
);
5062 gen_helper_idivb_AL(cpu_env
, s
->T0
);
5065 gen_helper_idivw_AX(cpu_env
, s
->T0
);
5069 gen_helper_idivl_EAX(cpu_env
, s
->T0
);
5071 #ifdef TARGET_X86_64
5073 gen_helper_idivq_EAX(cpu_env
, s
->T0
);
5083 case 0xfe: /* GRP4 */
5084 case 0xff: /* GRP5 */
5085 ot
= mo_b_d(b
, dflag
);
5087 modrm
= x86_ldub_code(env
, s
);
5088 mod
= (modrm
>> 6) & 3;
5089 rm
= (modrm
& 7) | REX_B(s
);
5090 op
= (modrm
>> 3) & 7;
5091 if (op
>= 2 && b
== 0xfe) {
5095 if (op
== 2 || op
== 4) {
5096 /* operand size for jumps is 64 bit */
5098 } else if (op
== 3 || op
== 5) {
5099 ot
= dflag
!= MO_16
? MO_32
+ REX_W(s
) : MO_16
;
5100 } else if (op
== 6) {
5101 /* default push size is 64 bit */
5102 ot
= mo_pushpop(s
, dflag
);
5106 gen_lea_modrm(env
, s
, modrm
);
5107 if (op
>= 2 && op
!= 3 && op
!= 5)
5108 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
5110 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
5114 case 0: /* inc Ev */
5119 gen_inc(s
, ot
, opreg
, 1);
5121 case 1: /* dec Ev */
5126 gen_inc(s
, ot
, opreg
, -1);
5128 case 2: /* call Ev */
5129 /* XXX: optimize if memory (no 'and' is necessary) */
5130 if (dflag
== MO_16
) {
5131 tcg_gen_ext16u_tl(s
->T0
, s
->T0
);
5133 next_eip
= s
->pc
- s
->cs_base
;
5134 tcg_gen_movi_tl(s
->T1
, next_eip
);
5135 gen_push_v(s
, s
->T1
);
5136 gen_op_jmp_v(s
->T0
);
5140 case 3: /* lcall Ev */
5144 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
5145 gen_add_A0_im(s
, 1 << ot
);
5146 gen_op_ld_v(s
, MO_16
, s
->T0
, s
->A0
);
5148 if (PE(s
) && !VM86(s
)) {
5149 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
5150 gen_helper_lcall_protected(cpu_env
, s
->tmp2_i32
, s
->T1
,
5151 tcg_const_i32(dflag
- 1),
5152 tcg_const_tl(s
->pc
- s
->cs_base
));
5154 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
5155 gen_helper_lcall_real(cpu_env
, s
->tmp2_i32
, s
->T1
,
5156 tcg_const_i32(dflag
- 1),
5157 tcg_const_i32(s
->pc
- s
->cs_base
));
5159 tcg_gen_ld_tl(s
->tmp4
, cpu_env
, offsetof(CPUX86State
, eip
));
5162 case 4: /* jmp Ev */
5163 if (dflag
== MO_16
) {
5164 tcg_gen_ext16u_tl(s
->T0
, s
->T0
);
5166 gen_op_jmp_v(s
->T0
);
5170 case 5: /* ljmp Ev */
5174 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
5175 gen_add_A0_im(s
, 1 << ot
);
5176 gen_op_ld_v(s
, MO_16
, s
->T0
, s
->A0
);
5178 if (PE(s
) && !VM86(s
)) {
5179 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
5180 gen_helper_ljmp_protected(cpu_env
, s
->tmp2_i32
, s
->T1
,
5181 tcg_const_tl(s
->pc
- s
->cs_base
));
5183 gen_op_movl_seg_T0_vm(s
, R_CS
);
5184 gen_op_jmp_v(s
->T1
);
5186 tcg_gen_ld_tl(s
->tmp4
, cpu_env
, offsetof(CPUX86State
, eip
));
5189 case 6: /* push Ev */
5190 gen_push_v(s
, s
->T0
);
5197 case 0x84: /* test Ev, Gv */
5199 ot
= mo_b_d(b
, dflag
);
5201 modrm
= x86_ldub_code(env
, s
);
5202 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
5204 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
5205 gen_op_mov_v_reg(s
, ot
, s
->T1
, reg
);
5206 gen_op_testl_T0_T1_cc(s
);
5207 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
5210 case 0xa8: /* test eAX, Iv */
5212 ot
= mo_b_d(b
, dflag
);
5213 val
= insn_get(env
, s
, ot
);
5215 gen_op_mov_v_reg(s
, ot
, s
->T0
, OR_EAX
);
5216 tcg_gen_movi_tl(s
->T1
, val
);
5217 gen_op_testl_T0_T1_cc(s
);
5218 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
5221 case 0x98: /* CWDE/CBW */
5223 #ifdef TARGET_X86_64
5225 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, R_EAX
);
5226 tcg_gen_ext32s_tl(s
->T0
, s
->T0
);
5227 gen_op_mov_reg_v(s
, MO_64
, R_EAX
, s
->T0
);
5231 gen_op_mov_v_reg(s
, MO_16
, s
->T0
, R_EAX
);
5232 tcg_gen_ext16s_tl(s
->T0
, s
->T0
);
5233 gen_op_mov_reg_v(s
, MO_32
, R_EAX
, s
->T0
);
5236 gen_op_mov_v_reg(s
, MO_8
, s
->T0
, R_EAX
);
5237 tcg_gen_ext8s_tl(s
->T0
, s
->T0
);
5238 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
5244 case 0x99: /* CDQ/CWD */
5246 #ifdef TARGET_X86_64
5248 gen_op_mov_v_reg(s
, MO_64
, s
->T0
, R_EAX
);
5249 tcg_gen_sari_tl(s
->T0
, s
->T0
, 63);
5250 gen_op_mov_reg_v(s
, MO_64
, R_EDX
, s
->T0
);
5254 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, R_EAX
);
5255 tcg_gen_ext32s_tl(s
->T0
, s
->T0
);
5256 tcg_gen_sari_tl(s
->T0
, s
->T0
, 31);
5257 gen_op_mov_reg_v(s
, MO_32
, R_EDX
, s
->T0
);
5260 gen_op_mov_v_reg(s
, MO_16
, s
->T0
, R_EAX
);
5261 tcg_gen_ext16s_tl(s
->T0
, s
->T0
);
5262 tcg_gen_sari_tl(s
->T0
, s
->T0
, 15);
5263 gen_op_mov_reg_v(s
, MO_16
, R_EDX
, s
->T0
);
5269 case 0x1af: /* imul Gv, Ev */
5270 case 0x69: /* imul Gv, Ev, I */
5273 modrm
= x86_ldub_code(env
, s
);
5274 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
5276 s
->rip_offset
= insn_const_size(ot
);
5279 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
5281 val
= insn_get(env
, s
, ot
);
5282 tcg_gen_movi_tl(s
->T1
, val
);
5283 } else if (b
== 0x6b) {
5284 val
= (int8_t)insn_get(env
, s
, MO_8
);
5285 tcg_gen_movi_tl(s
->T1
, val
);
5287 gen_op_mov_v_reg(s
, ot
, s
->T1
, reg
);
5290 #ifdef TARGET_X86_64
5292 tcg_gen_muls2_i64(cpu_regs
[reg
], s
->T1
, s
->T0
, s
->T1
);
5293 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[reg
]);
5294 tcg_gen_sari_tl(cpu_cc_src
, cpu_cc_dst
, 63);
5295 tcg_gen_sub_tl(cpu_cc_src
, cpu_cc_src
, s
->T1
);
5299 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
5300 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T1
);
5301 tcg_gen_muls2_i32(s
->tmp2_i32
, s
->tmp3_i32
,
5302 s
->tmp2_i32
, s
->tmp3_i32
);
5303 tcg_gen_extu_i32_tl(cpu_regs
[reg
], s
->tmp2_i32
);
5304 tcg_gen_sari_i32(s
->tmp2_i32
, s
->tmp2_i32
, 31);
5305 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[reg
]);
5306 tcg_gen_sub_i32(s
->tmp2_i32
, s
->tmp2_i32
, s
->tmp3_i32
);
5307 tcg_gen_extu_i32_tl(cpu_cc_src
, s
->tmp2_i32
);
5310 tcg_gen_ext16s_tl(s
->T0
, s
->T0
);
5311 tcg_gen_ext16s_tl(s
->T1
, s
->T1
);
5312 /* XXX: use 32 bit mul which could be faster */
5313 tcg_gen_mul_tl(s
->T0
, s
->T0
, s
->T1
);
5314 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
5315 tcg_gen_ext16s_tl(s
->tmp0
, s
->T0
);
5316 tcg_gen_sub_tl(cpu_cc_src
, s
->T0
, s
->tmp0
);
5317 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
5320 set_cc_op(s
, CC_OP_MULB
+ ot
);
5323 case 0x1c1: /* xadd Ev, Gv */
5324 ot
= mo_b_d(b
, dflag
);
5325 modrm
= x86_ldub_code(env
, s
);
5326 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
5327 mod
= (modrm
>> 6) & 3;
5328 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
5330 rm
= (modrm
& 7) | REX_B(s
);
5331 gen_op_mov_v_reg(s
, ot
, s
->T1
, rm
);
5332 tcg_gen_add_tl(s
->T0
, s
->T0
, s
->T1
);
5333 gen_op_mov_reg_v(s
, ot
, reg
, s
->T1
);
5334 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
5336 gen_lea_modrm(env
, s
, modrm
);
5337 if (s
->prefix
& PREFIX_LOCK
) {
5338 tcg_gen_atomic_fetch_add_tl(s
->T1
, s
->A0
, s
->T0
,
5339 s
->mem_index
, ot
| MO_LE
);
5340 tcg_gen_add_tl(s
->T0
, s
->T0
, s
->T1
);
5342 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
5343 tcg_gen_add_tl(s
->T0
, s
->T0
, s
->T1
);
5344 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
5346 gen_op_mov_reg_v(s
, ot
, reg
, s
->T1
);
5348 gen_op_update2_cc(s
);
5349 set_cc_op(s
, CC_OP_ADDB
+ ot
);
5352 case 0x1b1: /* cmpxchg Ev, Gv */
5354 TCGv oldv
, newv
, cmpv
;
5356 ot
= mo_b_d(b
, dflag
);
5357 modrm
= x86_ldub_code(env
, s
);
5358 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
5359 mod
= (modrm
>> 6) & 3;
5360 oldv
= tcg_temp_new();
5361 newv
= tcg_temp_new();
5362 cmpv
= tcg_temp_new();
5363 gen_op_mov_v_reg(s
, ot
, newv
, reg
);
5364 tcg_gen_mov_tl(cmpv
, cpu_regs
[R_EAX
]);
5366 if (s
->prefix
& PREFIX_LOCK
) {
5370 gen_lea_modrm(env
, s
, modrm
);
5371 tcg_gen_atomic_cmpxchg_tl(oldv
, s
->A0
, cmpv
, newv
,
5372 s
->mem_index
, ot
| MO_LE
);
5373 gen_op_mov_reg_v(s
, ot
, R_EAX
, oldv
);
5376 rm
= (modrm
& 7) | REX_B(s
);
5377 gen_op_mov_v_reg(s
, ot
, oldv
, rm
);
5379 gen_lea_modrm(env
, s
, modrm
);
5380 gen_op_ld_v(s
, ot
, oldv
, s
->A0
);
5381 rm
= 0; /* avoid warning */
5385 /* store value = (old == cmp ? new : old); */
5386 tcg_gen_movcond_tl(TCG_COND_EQ
, newv
, oldv
, cmpv
, newv
, oldv
);
5388 gen_op_mov_reg_v(s
, ot
, R_EAX
, oldv
);
5389 gen_op_mov_reg_v(s
, ot
, rm
, newv
);
5391 /* Perform an unconditional store cycle like physical cpu;
5392 must be before changing accumulator to ensure
5393 idempotency if the store faults and the instruction
5395 gen_op_st_v(s
, ot
, newv
, s
->A0
);
5396 gen_op_mov_reg_v(s
, ot
, R_EAX
, oldv
);
5399 tcg_gen_mov_tl(cpu_cc_src
, oldv
);
5400 tcg_gen_mov_tl(s
->cc_srcT
, cmpv
);
5401 tcg_gen_sub_tl(cpu_cc_dst
, cmpv
, oldv
);
5402 set_cc_op(s
, CC_OP_SUBB
+ ot
);
5403 tcg_temp_free(oldv
);
5404 tcg_temp_free(newv
);
5405 tcg_temp_free(cmpv
);
5408 case 0x1c7: /* cmpxchg8b */
5409 modrm
= x86_ldub_code(env
, s
);
5410 mod
= (modrm
>> 6) & 3;
5411 switch ((modrm
>> 3) & 7) {
5412 case 1: /* CMPXCHG8, CMPXCHG16 */
5416 #ifdef TARGET_X86_64
5417 if (dflag
== MO_64
) {
5418 if (!(s
->cpuid_ext_features
& CPUID_EXT_CX16
)) {
5421 gen_lea_modrm(env
, s
, modrm
);
5422 if ((s
->prefix
& PREFIX_LOCK
) &&
5423 (tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
5424 gen_helper_cmpxchg16b(cpu_env
, s
->A0
);
5426 gen_helper_cmpxchg16b_unlocked(cpu_env
, s
->A0
);
5428 set_cc_op(s
, CC_OP_EFLAGS
);
5432 if (!(s
->cpuid_features
& CPUID_CX8
)) {
5435 gen_lea_modrm(env
, s
, modrm
);
5436 if ((s
->prefix
& PREFIX_LOCK
) &&
5437 (tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
5438 gen_helper_cmpxchg8b(cpu_env
, s
->A0
);
5440 gen_helper_cmpxchg8b_unlocked(cpu_env
, s
->A0
);
5442 set_cc_op(s
, CC_OP_EFLAGS
);
5445 case 7: /* RDSEED */
5446 case 6: /* RDRAND */
5448 (s
->prefix
& (PREFIX_LOCK
| PREFIX_REPZ
| PREFIX_REPNZ
)) ||
5449 !(s
->cpuid_ext_features
& CPUID_EXT_RDRAND
)) {
5452 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
5455 gen_helper_rdrand(s
->T0
, cpu_env
);
5456 rm
= (modrm
& 7) | REX_B(s
);
5457 gen_op_mov_reg_v(s
, dflag
, rm
, s
->T0
);
5458 set_cc_op(s
, CC_OP_EFLAGS
);
5459 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
5460 gen_jmp(s
, s
->pc
- s
->cs_base
);
5469 /**************************/
5471 case 0x50 ... 0x57: /* push */
5472 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, (b
& 7) | REX_B(s
));
5473 gen_push_v(s
, s
->T0
);
5475 case 0x58 ... 0x5f: /* pop */
5477 /* NOTE: order is important for pop %sp */
5478 gen_pop_update(s
, ot
);
5479 gen_op_mov_reg_v(s
, ot
, (b
& 7) | REX_B(s
), s
->T0
);
5481 case 0x60: /* pusha */
5486 case 0x61: /* popa */
5491 case 0x68: /* push Iv */
5493 ot
= mo_pushpop(s
, dflag
);
5495 val
= insn_get(env
, s
, ot
);
5497 val
= (int8_t)insn_get(env
, s
, MO_8
);
5498 tcg_gen_movi_tl(s
->T0
, val
);
5499 gen_push_v(s
, s
->T0
);
5501 case 0x8f: /* pop Ev */
5502 modrm
= x86_ldub_code(env
, s
);
5503 mod
= (modrm
>> 6) & 3;
5506 /* NOTE: order is important for pop %sp */
5507 gen_pop_update(s
, ot
);
5508 rm
= (modrm
& 7) | REX_B(s
);
5509 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
5511 /* NOTE: order is important too for MMU exceptions */
5512 s
->popl_esp_hack
= 1 << ot
;
5513 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
5514 s
->popl_esp_hack
= 0;
5515 gen_pop_update(s
, ot
);
5518 case 0xc8: /* enter */
5521 val
= x86_lduw_code(env
, s
);
5522 level
= x86_ldub_code(env
, s
);
5523 gen_enter(s
, val
, level
);
5526 case 0xc9: /* leave */
5529 case 0x06: /* push es */
5530 case 0x0e: /* push cs */
5531 case 0x16: /* push ss */
5532 case 0x1e: /* push ds */
5535 gen_op_movl_T0_seg(s
, b
>> 3);
5536 gen_push_v(s
, s
->T0
);
5538 case 0x1a0: /* push fs */
5539 case 0x1a8: /* push gs */
5540 gen_op_movl_T0_seg(s
, (b
>> 3) & 7);
5541 gen_push_v(s
, s
->T0
);
5543 case 0x07: /* pop es */
5544 case 0x17: /* pop ss */
5545 case 0x1f: /* pop ds */
5550 gen_movl_seg_T0(s
, reg
);
5551 gen_pop_update(s
, ot
);
5552 /* Note that reg == R_SS in gen_movl_seg_T0 always sets is_jmp. */
5553 if (s
->base
.is_jmp
) {
5554 gen_jmp_im(s
, s
->pc
- s
->cs_base
);
5556 s
->flags
&= ~HF_TF_MASK
;
5557 gen_eob_inhibit_irq(s
, true);
5563 case 0x1a1: /* pop fs */
5564 case 0x1a9: /* pop gs */
5566 gen_movl_seg_T0(s
, (b
>> 3) & 7);
5567 gen_pop_update(s
, ot
);
5568 if (s
->base
.is_jmp
) {
5569 gen_jmp_im(s
, s
->pc
- s
->cs_base
);
5574 /**************************/
5577 case 0x89: /* mov Gv, Ev */
5578 ot
= mo_b_d(b
, dflag
);
5579 modrm
= x86_ldub_code(env
, s
);
5580 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
5582 /* generate a generic store */
5583 gen_ldst_modrm(env
, s
, modrm
, ot
, reg
, 1);
5586 case 0xc7: /* mov Ev, Iv */
5587 ot
= mo_b_d(b
, dflag
);
5588 modrm
= x86_ldub_code(env
, s
);
5589 mod
= (modrm
>> 6) & 3;
5591 s
->rip_offset
= insn_const_size(ot
);
5592 gen_lea_modrm(env
, s
, modrm
);
5594 val
= insn_get(env
, s
, ot
);
5595 tcg_gen_movi_tl(s
->T0
, val
);
5597 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
5599 gen_op_mov_reg_v(s
, ot
, (modrm
& 7) | REX_B(s
), s
->T0
);
5603 case 0x8b: /* mov Ev, Gv */
5604 ot
= mo_b_d(b
, dflag
);
5605 modrm
= x86_ldub_code(env
, s
);
5606 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
5608 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
5609 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
5611 case 0x8e: /* mov seg, Gv */
5612 modrm
= x86_ldub_code(env
, s
);
5613 reg
= (modrm
>> 3) & 7;
5614 if (reg
>= 6 || reg
== R_CS
)
5616 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
5617 gen_movl_seg_T0(s
, reg
);
5618 /* Note that reg == R_SS in gen_movl_seg_T0 always sets is_jmp. */
5619 if (s
->base
.is_jmp
) {
5620 gen_jmp_im(s
, s
->pc
- s
->cs_base
);
5622 s
->flags
&= ~HF_TF_MASK
;
5623 gen_eob_inhibit_irq(s
, true);
5629 case 0x8c: /* mov Gv, seg */
5630 modrm
= x86_ldub_code(env
, s
);
5631 reg
= (modrm
>> 3) & 7;
5632 mod
= (modrm
>> 6) & 3;
5635 gen_op_movl_T0_seg(s
, reg
);
5636 ot
= mod
== 3 ? dflag
: MO_16
;
5637 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
5640 case 0x1b6: /* movzbS Gv, Eb */
5641 case 0x1b7: /* movzwS Gv, Eb */
5642 case 0x1be: /* movsbS Gv, Eb */
5643 case 0x1bf: /* movswS Gv, Eb */
5648 /* d_ot is the size of destination */
5650 /* ot is the size of source */
5651 ot
= (b
& 1) + MO_8
;
5652 /* s_ot is the sign+size of source */
5653 s_ot
= b
& 8 ? MO_SIGN
| ot
: ot
;
5655 modrm
= x86_ldub_code(env
, s
);
5656 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
5657 mod
= (modrm
>> 6) & 3;
5658 rm
= (modrm
& 7) | REX_B(s
);
5661 if (s_ot
== MO_SB
&& byte_reg_is_xH(s
, rm
)) {
5662 tcg_gen_sextract_tl(s
->T0
, cpu_regs
[rm
- 4], 8, 8);
5664 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
5667 tcg_gen_ext8u_tl(s
->T0
, s
->T0
);
5670 tcg_gen_ext8s_tl(s
->T0
, s
->T0
);
5673 tcg_gen_ext16u_tl(s
->T0
, s
->T0
);
5677 tcg_gen_ext16s_tl(s
->T0
, s
->T0
);
5681 gen_op_mov_reg_v(s
, d_ot
, reg
, s
->T0
);
5683 gen_lea_modrm(env
, s
, modrm
);
5684 gen_op_ld_v(s
, s_ot
, s
->T0
, s
->A0
);
5685 gen_op_mov_reg_v(s
, d_ot
, reg
, s
->T0
);
5690 case 0x8d: /* lea */
5691 modrm
= x86_ldub_code(env
, s
);
5692 mod
= (modrm
>> 6) & 3;
5695 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
5697 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
5698 TCGv ea
= gen_lea_modrm_1(s
, a
);
5699 gen_lea_v_seg(s
, s
->aflag
, ea
, -1, -1);
5700 gen_op_mov_reg_v(s
, dflag
, reg
, s
->A0
);
5704 case 0xa0: /* mov EAX, Ov */
5706 case 0xa2: /* mov Ov, EAX */
5709 target_ulong offset_addr
;
5711 ot
= mo_b_d(b
, dflag
);
5713 #ifdef TARGET_X86_64
5715 offset_addr
= x86_ldq_code(env
, s
);
5719 offset_addr
= insn_get(env
, s
, s
->aflag
);
5722 tcg_gen_movi_tl(s
->A0
, offset_addr
);
5723 gen_add_A0_ds_seg(s
);
5725 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
5726 gen_op_mov_reg_v(s
, ot
, R_EAX
, s
->T0
);
5728 gen_op_mov_v_reg(s
, ot
, s
->T0
, R_EAX
);
5729 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
5733 case 0xd7: /* xlat */
5734 tcg_gen_mov_tl(s
->A0
, cpu_regs
[R_EBX
]);
5735 tcg_gen_ext8u_tl(s
->T0
, cpu_regs
[R_EAX
]);
5736 tcg_gen_add_tl(s
->A0
, s
->A0
, s
->T0
);
5737 gen_extu(s
->aflag
, s
->A0
);
5738 gen_add_A0_ds_seg(s
);
5739 gen_op_ld_v(s
, MO_8
, s
->T0
, s
->A0
);
5740 gen_op_mov_reg_v(s
, MO_8
, R_EAX
, s
->T0
);
5742 case 0xb0 ... 0xb7: /* mov R, Ib */
5743 val
= insn_get(env
, s
, MO_8
);
5744 tcg_gen_movi_tl(s
->T0
, val
);
5745 gen_op_mov_reg_v(s
, MO_8
, (b
& 7) | REX_B(s
), s
->T0
);
5747 case 0xb8 ... 0xbf: /* mov R, Iv */
5748 #ifdef TARGET_X86_64
5749 if (dflag
== MO_64
) {
5752 tmp
= x86_ldq_code(env
, s
);
5753 reg
= (b
& 7) | REX_B(s
);
5754 tcg_gen_movi_tl(s
->T0
, tmp
);
5755 gen_op_mov_reg_v(s
, MO_64
, reg
, s
->T0
);
5760 val
= insn_get(env
, s
, ot
);
5761 reg
= (b
& 7) | REX_B(s
);
5762 tcg_gen_movi_tl(s
->T0
, val
);
5763 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
5767 case 0x91 ... 0x97: /* xchg R, EAX */
5770 reg
= (b
& 7) | REX_B(s
);
5774 case 0x87: /* xchg Ev, Gv */
5775 ot
= mo_b_d(b
, dflag
);
5776 modrm
= x86_ldub_code(env
, s
);
5777 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
5778 mod
= (modrm
>> 6) & 3;
5780 rm
= (modrm
& 7) | REX_B(s
);
5782 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
5783 gen_op_mov_v_reg(s
, ot
, s
->T1
, rm
);
5784 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
5785 gen_op_mov_reg_v(s
, ot
, reg
, s
->T1
);
5787 gen_lea_modrm(env
, s
, modrm
);
5788 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
5789 /* for xchg, lock is implicit */
5790 tcg_gen_atomic_xchg_tl(s
->T1
, s
->A0
, s
->T0
,
5791 s
->mem_index
, ot
| MO_LE
);
5792 gen_op_mov_reg_v(s
, ot
, reg
, s
->T1
);
5795 case 0xc4: /* les Gv */
5796 /* In CODE64 this is VEX3; see above. */
5799 case 0xc5: /* lds Gv */
5800 /* In CODE64 this is VEX2; see above. */
5803 case 0x1b2: /* lss Gv */
5806 case 0x1b4: /* lfs Gv */
5809 case 0x1b5: /* lgs Gv */
5812 ot
= dflag
!= MO_16
? MO_32
: MO_16
;
5813 modrm
= x86_ldub_code(env
, s
);
5814 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
5815 mod
= (modrm
>> 6) & 3;
5818 gen_lea_modrm(env
, s
, modrm
);
5819 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
5820 gen_add_A0_im(s
, 1 << ot
);
5821 /* load the segment first to handle exceptions properly */
5822 gen_op_ld_v(s
, MO_16
, s
->T0
, s
->A0
);
5823 gen_movl_seg_T0(s
, op
);
5824 /* then put the data */
5825 gen_op_mov_reg_v(s
, ot
, reg
, s
->T1
);
5826 if (s
->base
.is_jmp
) {
5827 gen_jmp_im(s
, s
->pc
- s
->cs_base
);
5832 /************************/
5840 ot
= mo_b_d(b
, dflag
);
5841 modrm
= x86_ldub_code(env
, s
);
5842 mod
= (modrm
>> 6) & 3;
5843 op
= (modrm
>> 3) & 7;
5849 gen_lea_modrm(env
, s
, modrm
);
5852 opreg
= (modrm
& 7) | REX_B(s
);
5857 gen_shift(s
, op
, ot
, opreg
, OR_ECX
);
5860 shift
= x86_ldub_code(env
, s
);
5862 gen_shifti(s
, op
, ot
, opreg
, shift
);
5877 case 0x1a4: /* shld imm */
5881 case 0x1a5: /* shld cl */
5885 case 0x1ac: /* shrd imm */
5889 case 0x1ad: /* shrd cl */
5894 modrm
= x86_ldub_code(env
, s
);
5895 mod
= (modrm
>> 6) & 3;
5896 rm
= (modrm
& 7) | REX_B(s
);
5897 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
5899 gen_lea_modrm(env
, s
, modrm
);
5904 gen_op_mov_v_reg(s
, ot
, s
->T1
, reg
);
5907 TCGv imm
= tcg_const_tl(x86_ldub_code(env
, s
));
5908 gen_shiftd_rm_T1(s
, ot
, opreg
, op
, imm
);
5911 gen_shiftd_rm_T1(s
, ot
, opreg
, op
, cpu_regs
[R_ECX
]);
5915 /************************/
5918 if (s
->flags
& (HF_EM_MASK
| HF_TS_MASK
)) {
5919 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
5920 /* XXX: what to do if illegal op ? */
5921 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
5924 modrm
= x86_ldub_code(env
, s
);
5925 mod
= (modrm
>> 6) & 3;
5927 op
= ((b
& 7) << 3) | ((modrm
>> 3) & 7);
5930 gen_lea_modrm(env
, s
, modrm
);
5932 case 0x00 ... 0x07: /* fxxxs */
5933 case 0x10 ... 0x17: /* fixxxl */
5934 case 0x20 ... 0x27: /* fxxxl */
5935 case 0x30 ... 0x37: /* fixxx */
5942 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
5943 s
->mem_index
, MO_LEUL
);
5944 gen_helper_flds_FT0(cpu_env
, s
->tmp2_i32
);
5947 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
5948 s
->mem_index
, MO_LEUL
);
5949 gen_helper_fildl_FT0(cpu_env
, s
->tmp2_i32
);
5952 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
,
5953 s
->mem_index
, MO_LEQ
);
5954 gen_helper_fldl_FT0(cpu_env
, s
->tmp1_i64
);
5958 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
5959 s
->mem_index
, MO_LESW
);
5960 gen_helper_fildl_FT0(cpu_env
, s
->tmp2_i32
);
5964 gen_helper_fp_arith_ST0_FT0(op1
);
5966 /* fcomp needs pop */
5967 gen_helper_fpop(cpu_env
);
5971 case 0x08: /* flds */
5972 case 0x0a: /* fsts */
5973 case 0x0b: /* fstps */
5974 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
5975 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
5976 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
5981 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
5982 s
->mem_index
, MO_LEUL
);
5983 gen_helper_flds_ST0(cpu_env
, s
->tmp2_i32
);
5986 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
5987 s
->mem_index
, MO_LEUL
);
5988 gen_helper_fildl_ST0(cpu_env
, s
->tmp2_i32
);
5991 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
,
5992 s
->mem_index
, MO_LEQ
);
5993 gen_helper_fldl_ST0(cpu_env
, s
->tmp1_i64
);
5997 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
5998 s
->mem_index
, MO_LESW
);
5999 gen_helper_fildl_ST0(cpu_env
, s
->tmp2_i32
);
6004 /* XXX: the corresponding CPUID bit must be tested ! */
6007 gen_helper_fisttl_ST0(s
->tmp2_i32
, cpu_env
);
6008 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
6009 s
->mem_index
, MO_LEUL
);
6012 gen_helper_fisttll_ST0(s
->tmp1_i64
, cpu_env
);
6013 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
,
6014 s
->mem_index
, MO_LEQ
);
6018 gen_helper_fistt_ST0(s
->tmp2_i32
, cpu_env
);
6019 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
6020 s
->mem_index
, MO_LEUW
);
6023 gen_helper_fpop(cpu_env
);
6028 gen_helper_fsts_ST0(s
->tmp2_i32
, cpu_env
);
6029 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
6030 s
->mem_index
, MO_LEUL
);
6033 gen_helper_fistl_ST0(s
->tmp2_i32
, cpu_env
);
6034 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
6035 s
->mem_index
, MO_LEUL
);
6038 gen_helper_fstl_ST0(s
->tmp1_i64
, cpu_env
);
6039 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
,
6040 s
->mem_index
, MO_LEQ
);
6044 gen_helper_fist_ST0(s
->tmp2_i32
, cpu_env
);
6045 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
6046 s
->mem_index
, MO_LEUW
);
6050 gen_helper_fpop(cpu_env
);
6054 case 0x0c: /* fldenv mem */
6055 gen_helper_fldenv(cpu_env
, s
->A0
, tcg_const_i32(dflag
- 1));
6057 case 0x0d: /* fldcw mem */
6058 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
6059 s
->mem_index
, MO_LEUW
);
6060 gen_helper_fldcw(cpu_env
, s
->tmp2_i32
);
6062 case 0x0e: /* fnstenv mem */
6063 gen_helper_fstenv(cpu_env
, s
->A0
, tcg_const_i32(dflag
- 1));
6065 case 0x0f: /* fnstcw mem */
6066 gen_helper_fnstcw(s
->tmp2_i32
, cpu_env
);
6067 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
6068 s
->mem_index
, MO_LEUW
);
6070 case 0x1d: /* fldt mem */
6071 gen_helper_fldt_ST0(cpu_env
, s
->A0
);
6073 case 0x1f: /* fstpt mem */
6074 gen_helper_fstt_ST0(cpu_env
, s
->A0
);
6075 gen_helper_fpop(cpu_env
);
6077 case 0x2c: /* frstor mem */
6078 gen_helper_frstor(cpu_env
, s
->A0
, tcg_const_i32(dflag
- 1));
6080 case 0x2e: /* fnsave mem */
6081 gen_helper_fsave(cpu_env
, s
->A0
, tcg_const_i32(dflag
- 1));
6083 case 0x2f: /* fnstsw mem */
6084 gen_helper_fnstsw(s
->tmp2_i32
, cpu_env
);
6085 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
6086 s
->mem_index
, MO_LEUW
);
6088 case 0x3c: /* fbld */
6089 gen_helper_fbld_ST0(cpu_env
, s
->A0
);
6091 case 0x3e: /* fbstp */
6092 gen_helper_fbst_ST0(cpu_env
, s
->A0
);
6093 gen_helper_fpop(cpu_env
);
6095 case 0x3d: /* fildll */
6096 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
, s
->mem_index
, MO_LEQ
);
6097 gen_helper_fildll_ST0(cpu_env
, s
->tmp1_i64
);
6099 case 0x3f: /* fistpll */
6100 gen_helper_fistll_ST0(s
->tmp1_i64
, cpu_env
);
6101 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
, s
->mem_index
, MO_LEQ
);
6102 gen_helper_fpop(cpu_env
);
6108 /* register float ops */
6112 case 0x08: /* fld sti */
6113 gen_helper_fpush(cpu_env
);
6114 gen_helper_fmov_ST0_STN(cpu_env
,
6115 tcg_const_i32((opreg
+ 1) & 7));
6117 case 0x09: /* fxchg sti */
6118 case 0x29: /* fxchg4 sti, undocumented op */
6119 case 0x39: /* fxchg7 sti, undocumented op */
6120 gen_helper_fxchg_ST0_STN(cpu_env
, tcg_const_i32(opreg
));
6122 case 0x0a: /* grp d9/2 */
6125 /* check exceptions (FreeBSD FPU probe) */
6126 gen_helper_fwait(cpu_env
);
6132 case 0x0c: /* grp d9/4 */
6135 gen_helper_fchs_ST0(cpu_env
);
6138 gen_helper_fabs_ST0(cpu_env
);
6141 gen_helper_fldz_FT0(cpu_env
);
6142 gen_helper_fcom_ST0_FT0(cpu_env
);
6145 gen_helper_fxam_ST0(cpu_env
);
6151 case 0x0d: /* grp d9/5 */
6155 gen_helper_fpush(cpu_env
);
6156 gen_helper_fld1_ST0(cpu_env
);
6159 gen_helper_fpush(cpu_env
);
6160 gen_helper_fldl2t_ST0(cpu_env
);
6163 gen_helper_fpush(cpu_env
);
6164 gen_helper_fldl2e_ST0(cpu_env
);
6167 gen_helper_fpush(cpu_env
);
6168 gen_helper_fldpi_ST0(cpu_env
);
6171 gen_helper_fpush(cpu_env
);
6172 gen_helper_fldlg2_ST0(cpu_env
);
6175 gen_helper_fpush(cpu_env
);
6176 gen_helper_fldln2_ST0(cpu_env
);
6179 gen_helper_fpush(cpu_env
);
6180 gen_helper_fldz_ST0(cpu_env
);
6187 case 0x0e: /* grp d9/6 */
6190 gen_helper_f2xm1(cpu_env
);
6193 gen_helper_fyl2x(cpu_env
);
6196 gen_helper_fptan(cpu_env
);
6198 case 3: /* fpatan */
6199 gen_helper_fpatan(cpu_env
);
6201 case 4: /* fxtract */
6202 gen_helper_fxtract(cpu_env
);
6204 case 5: /* fprem1 */
6205 gen_helper_fprem1(cpu_env
);
6207 case 6: /* fdecstp */
6208 gen_helper_fdecstp(cpu_env
);
6211 case 7: /* fincstp */
6212 gen_helper_fincstp(cpu_env
);
6216 case 0x0f: /* grp d9/7 */
6219 gen_helper_fprem(cpu_env
);
6221 case 1: /* fyl2xp1 */
6222 gen_helper_fyl2xp1(cpu_env
);
6225 gen_helper_fsqrt(cpu_env
);
6227 case 3: /* fsincos */
6228 gen_helper_fsincos(cpu_env
);
6230 case 5: /* fscale */
6231 gen_helper_fscale(cpu_env
);
6233 case 4: /* frndint */
6234 gen_helper_frndint(cpu_env
);
6237 gen_helper_fsin(cpu_env
);
6241 gen_helper_fcos(cpu_env
);
6245 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
6246 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
6247 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
6253 gen_helper_fp_arith_STN_ST0(op1
, opreg
);
6255 gen_helper_fpop(cpu_env
);
6257 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6258 gen_helper_fp_arith_ST0_FT0(op1
);
6262 case 0x02: /* fcom */
6263 case 0x22: /* fcom2, undocumented op */
6264 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6265 gen_helper_fcom_ST0_FT0(cpu_env
);
6267 case 0x03: /* fcomp */
6268 case 0x23: /* fcomp3, undocumented op */
6269 case 0x32: /* fcomp5, undocumented op */
6270 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6271 gen_helper_fcom_ST0_FT0(cpu_env
);
6272 gen_helper_fpop(cpu_env
);
6274 case 0x15: /* da/5 */
6276 case 1: /* fucompp */
6277 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(1));
6278 gen_helper_fucom_ST0_FT0(cpu_env
);
6279 gen_helper_fpop(cpu_env
);
6280 gen_helper_fpop(cpu_env
);
6288 case 0: /* feni (287 only, just do nop here) */
6290 case 1: /* fdisi (287 only, just do nop here) */
6293 gen_helper_fclex(cpu_env
);
6295 case 3: /* fninit */
6296 gen_helper_fninit(cpu_env
);
6298 case 4: /* fsetpm (287 only, just do nop here) */
6304 case 0x1d: /* fucomi */
6305 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
6308 gen_update_cc_op(s
);
6309 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6310 gen_helper_fucomi_ST0_FT0(cpu_env
);
6311 set_cc_op(s
, CC_OP_EFLAGS
);
6313 case 0x1e: /* fcomi */
6314 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
6317 gen_update_cc_op(s
);
6318 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6319 gen_helper_fcomi_ST0_FT0(cpu_env
);
6320 set_cc_op(s
, CC_OP_EFLAGS
);
6322 case 0x28: /* ffree sti */
6323 gen_helper_ffree_STN(cpu_env
, tcg_const_i32(opreg
));
6325 case 0x2a: /* fst sti */
6326 gen_helper_fmov_STN_ST0(cpu_env
, tcg_const_i32(opreg
));
6328 case 0x2b: /* fstp sti */
6329 case 0x0b: /* fstp1 sti, undocumented op */
6330 case 0x3a: /* fstp8 sti, undocumented op */
6331 case 0x3b: /* fstp9 sti, undocumented op */
6332 gen_helper_fmov_STN_ST0(cpu_env
, tcg_const_i32(opreg
));
6333 gen_helper_fpop(cpu_env
);
6335 case 0x2c: /* fucom st(i) */
6336 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6337 gen_helper_fucom_ST0_FT0(cpu_env
);
6339 case 0x2d: /* fucomp st(i) */
6340 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6341 gen_helper_fucom_ST0_FT0(cpu_env
);
6342 gen_helper_fpop(cpu_env
);
6344 case 0x33: /* de/3 */
6346 case 1: /* fcompp */
6347 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(1));
6348 gen_helper_fcom_ST0_FT0(cpu_env
);
6349 gen_helper_fpop(cpu_env
);
6350 gen_helper_fpop(cpu_env
);
6356 case 0x38: /* ffreep sti, undocumented op */
6357 gen_helper_ffree_STN(cpu_env
, tcg_const_i32(opreg
));
6358 gen_helper_fpop(cpu_env
);
6360 case 0x3c: /* df/4 */
6363 gen_helper_fnstsw(s
->tmp2_i32
, cpu_env
);
6364 tcg_gen_extu_i32_tl(s
->T0
, s
->tmp2_i32
);
6365 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
6371 case 0x3d: /* fucomip */
6372 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
6375 gen_update_cc_op(s
);
6376 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6377 gen_helper_fucomi_ST0_FT0(cpu_env
);
6378 gen_helper_fpop(cpu_env
);
6379 set_cc_op(s
, CC_OP_EFLAGS
);
6381 case 0x3e: /* fcomip */
6382 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
6385 gen_update_cc_op(s
);
6386 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6387 gen_helper_fcomi_ST0_FT0(cpu_env
);
6388 gen_helper_fpop(cpu_env
);
6389 set_cc_op(s
, CC_OP_EFLAGS
);
6391 case 0x10 ... 0x13: /* fcmovxx */
6396 static const uint8_t fcmov_cc
[8] = {
6403 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
6406 op1
= fcmov_cc
[op
& 3] | (((op
>> 3) & 1) ^ 1);
6407 l1
= gen_new_label();
6408 gen_jcc1_noeob(s
, op1
, l1
);
6409 gen_helper_fmov_ST0_STN(cpu_env
, tcg_const_i32(opreg
));
6418 /************************/
6421 case 0xa4: /* movsS */
6423 ot
= mo_b_d(b
, dflag
);
6424 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6425 gen_repz_movs(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6431 case 0xaa: /* stosS */
6433 ot
= mo_b_d(b
, dflag
);
6434 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6435 gen_repz_stos(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6440 case 0xac: /* lodsS */
6442 ot
= mo_b_d(b
, dflag
);
6443 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6444 gen_repz_lods(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6449 case 0xae: /* scasS */
6451 ot
= mo_b_d(b
, dflag
);
6452 if (prefixes
& PREFIX_REPNZ
) {
6453 gen_repz_scas(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 1);
6454 } else if (prefixes
& PREFIX_REPZ
) {
6455 gen_repz_scas(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 0);
6461 case 0xa6: /* cmpsS */
6463 ot
= mo_b_d(b
, dflag
);
6464 if (prefixes
& PREFIX_REPNZ
) {
6465 gen_repz_cmps(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 1);
6466 } else if (prefixes
& PREFIX_REPZ
) {
6467 gen_repz_cmps(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
, 0);
6472 case 0x6c: /* insS */
6474 ot
= mo_b_d32(b
, dflag
);
6475 tcg_gen_ext16u_tl(s
->T0
, cpu_regs
[R_EDX
]);
6476 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6477 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
) | 4);
6478 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
6481 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6482 gen_repz_ins(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6483 /* jump generated by gen_repz_ins */
6486 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
6487 gen_jmp(s
, s
->pc
- s
->cs_base
);
6491 case 0x6e: /* outsS */
6493 ot
= mo_b_d32(b
, dflag
);
6494 tcg_gen_ext16u_tl(s
->T0
, cpu_regs
[R_EDX
]);
6495 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6496 svm_is_rep(prefixes
) | 4);
6497 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
6500 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6501 gen_repz_outs(s
, ot
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
6502 /* jump generated by gen_repz_outs */
6505 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
6506 gen_jmp(s
, s
->pc
- s
->cs_base
);
6511 /************************/
6516 ot
= mo_b_d32(b
, dflag
);
6517 val
= x86_ldub_code(env
, s
);
6518 tcg_gen_movi_tl(s
->T0
, val
);
6519 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6520 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
));
6521 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
6524 tcg_gen_movi_i32(s
->tmp2_i32
, val
);
6525 gen_helper_in_func(ot
, s
->T1
, s
->tmp2_i32
);
6526 gen_op_mov_reg_v(s
, ot
, R_EAX
, s
->T1
);
6527 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
6528 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
6529 gen_jmp(s
, s
->pc
- s
->cs_base
);
6534 ot
= mo_b_d32(b
, dflag
);
6535 val
= x86_ldub_code(env
, s
);
6536 tcg_gen_movi_tl(s
->T0
, val
);
6537 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6538 svm_is_rep(prefixes
));
6539 gen_op_mov_v_reg(s
, ot
, s
->T1
, R_EAX
);
6541 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
6544 tcg_gen_movi_i32(s
->tmp2_i32
, val
);
6545 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T1
);
6546 gen_helper_out_func(ot
, s
->tmp2_i32
, s
->tmp3_i32
);
6547 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
6548 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
6549 gen_jmp(s
, s
->pc
- s
->cs_base
);
6554 ot
= mo_b_d32(b
, dflag
);
6555 tcg_gen_ext16u_tl(s
->T0
, cpu_regs
[R_EDX
]);
6556 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6557 SVM_IOIO_TYPE_MASK
| svm_is_rep(prefixes
));
6558 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
6561 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
6562 gen_helper_in_func(ot
, s
->T1
, s
->tmp2_i32
);
6563 gen_op_mov_reg_v(s
, ot
, R_EAX
, s
->T1
);
6564 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
6565 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
6566 gen_jmp(s
, s
->pc
- s
->cs_base
);
6571 ot
= mo_b_d32(b
, dflag
);
6572 tcg_gen_ext16u_tl(s
->T0
, cpu_regs
[R_EDX
]);
6573 gen_check_io(s
, ot
, pc_start
- s
->cs_base
,
6574 svm_is_rep(prefixes
));
6575 gen_op_mov_v_reg(s
, ot
, s
->T1
, R_EAX
);
6577 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
6580 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
6581 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T1
);
6582 gen_helper_out_func(ot
, s
->tmp2_i32
, s
->tmp3_i32
);
6583 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
6584 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
6585 gen_jmp(s
, s
->pc
- s
->cs_base
);
6589 /************************/
6591 case 0xc2: /* ret im */
6592 val
= x86_ldsw_code(env
, s
);
6594 gen_stack_update(s
, val
+ (1 << ot
));
6595 /* Note that gen_pop_T0 uses a zero-extending load. */
6596 gen_op_jmp_v(s
->T0
);
6600 case 0xc3: /* ret */
6602 gen_pop_update(s
, ot
);
6603 /* Note that gen_pop_T0 uses a zero-extending load. */
6604 gen_op_jmp_v(s
->T0
);
6608 case 0xca: /* lret im */
6609 val
= x86_ldsw_code(env
, s
);
6611 if (PE(s
) && !VM86(s
)) {
6612 gen_update_cc_op(s
);
6613 gen_jmp_im(s
, pc_start
- s
->cs_base
);
6614 gen_helper_lret_protected(cpu_env
, tcg_const_i32(dflag
- 1),
6615 tcg_const_i32(val
));
6619 gen_op_ld_v(s
, dflag
, s
->T0
, s
->A0
);
6620 /* NOTE: keeping EIP updated is not a problem in case of
6622 gen_op_jmp_v(s
->T0
);
6624 gen_add_A0_im(s
, 1 << dflag
);
6625 gen_op_ld_v(s
, dflag
, s
->T0
, s
->A0
);
6626 gen_op_movl_seg_T0_vm(s
, R_CS
);
6627 /* add stack offset */
6628 gen_stack_update(s
, val
+ (2 << dflag
));
6632 case 0xcb: /* lret */
6635 case 0xcf: /* iret */
6636 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_IRET
);
6637 if (!PE(s
) || VM86(s
)) {
6638 /* real mode or vm86 mode */
6639 if (!check_vm86_iopl(s
)) {
6642 gen_helper_iret_real(cpu_env
, tcg_const_i32(dflag
- 1));
6644 gen_helper_iret_protected(cpu_env
, tcg_const_i32(dflag
- 1),
6645 tcg_const_i32(s
->pc
- s
->cs_base
));
6647 set_cc_op(s
, CC_OP_EFLAGS
);
6650 case 0xe8: /* call im */
6652 if (dflag
!= MO_16
) {
6653 tval
= (int32_t)insn_get(env
, s
, MO_32
);
6655 tval
= (int16_t)insn_get(env
, s
, MO_16
);
6657 next_eip
= s
->pc
- s
->cs_base
;
6659 if (dflag
== MO_16
) {
6661 } else if (!CODE64(s
)) {
6664 tcg_gen_movi_tl(s
->T0
, next_eip
);
6665 gen_push_v(s
, s
->T0
);
6670 case 0x9a: /* lcall im */
6672 unsigned int selector
, offset
;
6677 offset
= insn_get(env
, s
, ot
);
6678 selector
= insn_get(env
, s
, MO_16
);
6680 tcg_gen_movi_tl(s
->T0
, selector
);
6681 tcg_gen_movi_tl(s
->T1
, offset
);
6684 case 0xe9: /* jmp im */
6685 if (dflag
!= MO_16
) {
6686 tval
= (int32_t)insn_get(env
, s
, MO_32
);
6688 tval
= (int16_t)insn_get(env
, s
, MO_16
);
6690 tval
+= s
->pc
- s
->cs_base
;
6691 if (dflag
== MO_16
) {
6693 } else if (!CODE64(s
)) {
6699 case 0xea: /* ljmp im */
6701 unsigned int selector
, offset
;
6706 offset
= insn_get(env
, s
, ot
);
6707 selector
= insn_get(env
, s
, MO_16
);
6709 tcg_gen_movi_tl(s
->T0
, selector
);
6710 tcg_gen_movi_tl(s
->T1
, offset
);
6713 case 0xeb: /* jmp Jb */
6714 tval
= (int8_t)insn_get(env
, s
, MO_8
);
6715 tval
+= s
->pc
- s
->cs_base
;
6716 if (dflag
== MO_16
) {
6721 case 0x70 ... 0x7f: /* jcc Jb */
6722 tval
= (int8_t)insn_get(env
, s
, MO_8
);
6724 case 0x180 ... 0x18f: /* jcc Jv */
6725 if (dflag
!= MO_16
) {
6726 tval
= (int32_t)insn_get(env
, s
, MO_32
);
6728 tval
= (int16_t)insn_get(env
, s
, MO_16
);
6731 next_eip
= s
->pc
- s
->cs_base
;
6733 if (dflag
== MO_16
) {
6737 gen_jcc(s
, b
, tval
, next_eip
);
6740 case 0x190 ... 0x19f: /* setcc Gv */
6741 modrm
= x86_ldub_code(env
, s
);
6742 gen_setcc1(s
, b
, s
->T0
);
6743 gen_ldst_modrm(env
, s
, modrm
, MO_8
, OR_TMP0
, 1);
6745 case 0x140 ... 0x14f: /* cmov Gv, Ev */
6746 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
6750 modrm
= x86_ldub_code(env
, s
);
6751 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6752 gen_cmovcc1(env
, s
, ot
, b
, modrm
, reg
);
6755 /************************/
6757 case 0x9c: /* pushf */
6758 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_PUSHF
);
6759 if (check_vm86_iopl(s
)) {
6760 gen_update_cc_op(s
);
6761 gen_helper_read_eflags(s
->T0
, cpu_env
);
6762 gen_push_v(s
, s
->T0
);
6765 case 0x9d: /* popf */
6766 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_POPF
);
6767 if (check_vm86_iopl(s
)) {
6770 if (dflag
!= MO_16
) {
6771 gen_helper_write_eflags(cpu_env
, s
->T0
,
6772 tcg_const_i32((TF_MASK
| AC_MASK
|
6777 gen_helper_write_eflags(cpu_env
, s
->T0
,
6778 tcg_const_i32((TF_MASK
| AC_MASK
|
6780 IF_MASK
| IOPL_MASK
)
6784 if (CPL(s
) <= IOPL(s
)) {
6785 if (dflag
!= MO_16
) {
6786 gen_helper_write_eflags(cpu_env
, s
->T0
,
6787 tcg_const_i32((TF_MASK
|
6793 gen_helper_write_eflags(cpu_env
, s
->T0
,
6794 tcg_const_i32((TF_MASK
|
6802 if (dflag
!= MO_16
) {
6803 gen_helper_write_eflags(cpu_env
, s
->T0
,
6804 tcg_const_i32((TF_MASK
| AC_MASK
|
6805 ID_MASK
| NT_MASK
)));
6807 gen_helper_write_eflags(cpu_env
, s
->T0
,
6808 tcg_const_i32((TF_MASK
| AC_MASK
|
6814 gen_pop_update(s
, ot
);
6815 set_cc_op(s
, CC_OP_EFLAGS
);
6816 /* abort translation because TF/AC flag may change */
6817 gen_jmp_im(s
, s
->pc
- s
->cs_base
);
6821 case 0x9e: /* sahf */
6822 if (CODE64(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_LAHF_LM
))
6824 gen_op_mov_v_reg(s
, MO_8
, s
->T0
, R_AH
);
6825 gen_compute_eflags(s
);
6826 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, CC_O
);
6827 tcg_gen_andi_tl(s
->T0
, s
->T0
, CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
6828 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, s
->T0
);
6830 case 0x9f: /* lahf */
6831 if (CODE64(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_LAHF_LM
))
6833 gen_compute_eflags(s
);
6834 /* Note: gen_compute_eflags() only gives the condition codes */
6835 tcg_gen_ori_tl(s
->T0
, cpu_cc_src
, 0x02);
6836 gen_op_mov_reg_v(s
, MO_8
, R_AH
, s
->T0
);
6838 case 0xf5: /* cmc */
6839 gen_compute_eflags(s
);
6840 tcg_gen_xori_tl(cpu_cc_src
, cpu_cc_src
, CC_C
);
6842 case 0xf8: /* clc */
6843 gen_compute_eflags(s
);
6844 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~CC_C
);
6846 case 0xf9: /* stc */
6847 gen_compute_eflags(s
);
6848 tcg_gen_ori_tl(cpu_cc_src
, cpu_cc_src
, CC_C
);
6850 case 0xfc: /* cld */
6851 tcg_gen_movi_i32(s
->tmp2_i32
, 1);
6852 tcg_gen_st_i32(s
->tmp2_i32
, cpu_env
, offsetof(CPUX86State
, df
));
6854 case 0xfd: /* std */
6855 tcg_gen_movi_i32(s
->tmp2_i32
, -1);
6856 tcg_gen_st_i32(s
->tmp2_i32
, cpu_env
, offsetof(CPUX86State
, df
));
6859 /************************/
6860 /* bit operations */
6861 case 0x1ba: /* bt/bts/btr/btc Gv, im */
6863 modrm
= x86_ldub_code(env
, s
);
6864 op
= (modrm
>> 3) & 7;
6865 mod
= (modrm
>> 6) & 3;
6866 rm
= (modrm
& 7) | REX_B(s
);
6869 gen_lea_modrm(env
, s
, modrm
);
6870 if (!(s
->prefix
& PREFIX_LOCK
)) {
6871 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
6874 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
6877 val
= x86_ldub_code(env
, s
);
6878 tcg_gen_movi_tl(s
->T1
, val
);
6883 case 0x1a3: /* bt Gv, Ev */
6886 case 0x1ab: /* bts */
6889 case 0x1b3: /* btr */
6892 case 0x1bb: /* btc */
6896 modrm
= x86_ldub_code(env
, s
);
6897 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6898 mod
= (modrm
>> 6) & 3;
6899 rm
= (modrm
& 7) | REX_B(s
);
6900 gen_op_mov_v_reg(s
, MO_32
, s
->T1
, reg
);
6902 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
6903 /* specific case: we need to add a displacement */
6904 gen_exts(ot
, s
->T1
);
6905 tcg_gen_sari_tl(s
->tmp0
, s
->T1
, 3 + ot
);
6906 tcg_gen_shli_tl(s
->tmp0
, s
->tmp0
, ot
);
6907 tcg_gen_add_tl(s
->A0
, gen_lea_modrm_1(s
, a
), s
->tmp0
);
6908 gen_lea_v_seg(s
, s
->aflag
, s
->A0
, a
.def_seg
, s
->override
);
6909 if (!(s
->prefix
& PREFIX_LOCK
)) {
6910 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
6913 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
6916 tcg_gen_andi_tl(s
->T1
, s
->T1
, (1 << (3 + ot
)) - 1);
6917 tcg_gen_movi_tl(s
->tmp0
, 1);
6918 tcg_gen_shl_tl(s
->tmp0
, s
->tmp0
, s
->T1
);
6919 if (s
->prefix
& PREFIX_LOCK
) {
6922 /* Needs no atomic ops; we surpressed the normal
6923 memory load for LOCK above so do it now. */
6924 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
6927 tcg_gen_atomic_fetch_or_tl(s
->T0
, s
->A0
, s
->tmp0
,
6928 s
->mem_index
, ot
| MO_LE
);
6931 tcg_gen_not_tl(s
->tmp0
, s
->tmp0
);
6932 tcg_gen_atomic_fetch_and_tl(s
->T0
, s
->A0
, s
->tmp0
,
6933 s
->mem_index
, ot
| MO_LE
);
6937 tcg_gen_atomic_fetch_xor_tl(s
->T0
, s
->A0
, s
->tmp0
,
6938 s
->mem_index
, ot
| MO_LE
);
6941 tcg_gen_shr_tl(s
->tmp4
, s
->T0
, s
->T1
);
6943 tcg_gen_shr_tl(s
->tmp4
, s
->T0
, s
->T1
);
6946 /* Data already loaded; nothing to do. */
6949 tcg_gen_or_tl(s
->T0
, s
->T0
, s
->tmp0
);
6952 tcg_gen_andc_tl(s
->T0
, s
->T0
, s
->tmp0
);
6956 tcg_gen_xor_tl(s
->T0
, s
->T0
, s
->tmp0
);
6961 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
6963 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
6968 /* Delay all CC updates until after the store above. Note that
6969 C is the result of the test, Z is unchanged, and the others
6970 are all undefined. */
6972 case CC_OP_MULB
... CC_OP_MULQ
:
6973 case CC_OP_ADDB
... CC_OP_ADDQ
:
6974 case CC_OP_ADCB
... CC_OP_ADCQ
:
6975 case CC_OP_SUBB
... CC_OP_SUBQ
:
6976 case CC_OP_SBBB
... CC_OP_SBBQ
:
6977 case CC_OP_LOGICB
... CC_OP_LOGICQ
:
6978 case CC_OP_INCB
... CC_OP_INCQ
:
6979 case CC_OP_DECB
... CC_OP_DECQ
:
6980 case CC_OP_SHLB
... CC_OP_SHLQ
:
6981 case CC_OP_SARB
... CC_OP_SARQ
:
6982 case CC_OP_BMILGB
... CC_OP_BMILGQ
:
6983 /* Z was going to be computed from the non-zero status of CC_DST.
6984 We can get that same Z value (and the new C value) by leaving
6985 CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the
6987 tcg_gen_mov_tl(cpu_cc_src
, s
->tmp4
);
6988 set_cc_op(s
, ((s
->cc_op
- CC_OP_MULB
) & 3) + CC_OP_SARB
);
6991 /* Otherwise, generate EFLAGS and replace the C bit. */
6992 gen_compute_eflags(s
);
6993 tcg_gen_deposit_tl(cpu_cc_src
, cpu_cc_src
, s
->tmp4
,
6998 case 0x1bc: /* bsf / tzcnt */
6999 case 0x1bd: /* bsr / lzcnt */
7001 modrm
= x86_ldub_code(env
, s
);
7002 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
7003 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
7004 gen_extu(ot
, s
->T0
);
7006 /* Note that lzcnt and tzcnt are in different extensions. */
7007 if ((prefixes
& PREFIX_REPZ
)
7009 ? s
->cpuid_ext3_features
& CPUID_EXT3_ABM
7010 : s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)) {
7012 /* For lzcnt/tzcnt, C bit is defined related to the input. */
7013 tcg_gen_mov_tl(cpu_cc_src
, s
->T0
);
7015 /* For lzcnt, reduce the target_ulong result by the
7016 number of zeros that we expect to find at the top. */
7017 tcg_gen_clzi_tl(s
->T0
, s
->T0
, TARGET_LONG_BITS
);
7018 tcg_gen_subi_tl(s
->T0
, s
->T0
, TARGET_LONG_BITS
- size
);
7020 /* For tzcnt, a zero input must return the operand size. */
7021 tcg_gen_ctzi_tl(s
->T0
, s
->T0
, size
);
7023 /* For lzcnt/tzcnt, Z bit is defined related to the result. */
7024 gen_op_update1_cc(s
);
7025 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
7027 /* For bsr/bsf, only the Z bit is defined and it is related
7028 to the input and not the result. */
7029 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
7030 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
7032 /* ??? The manual says that the output is undefined when the
7033 input is zero, but real hardware leaves it unchanged, and
7034 real programs appear to depend on that. Accomplish this
7035 by passing the output as the value to return upon zero. */
7037 /* For bsr, return the bit index of the first 1 bit,
7038 not the count of leading zeros. */
7039 tcg_gen_xori_tl(s
->T1
, cpu_regs
[reg
], TARGET_LONG_BITS
- 1);
7040 tcg_gen_clz_tl(s
->T0
, s
->T0
, s
->T1
);
7041 tcg_gen_xori_tl(s
->T0
, s
->T0
, TARGET_LONG_BITS
- 1);
7043 tcg_gen_ctz_tl(s
->T0
, s
->T0
, cpu_regs
[reg
]);
7046 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
7048 /************************/
7050 case 0x27: /* daa */
7053 gen_update_cc_op(s
);
7054 gen_helper_daa(cpu_env
);
7055 set_cc_op(s
, CC_OP_EFLAGS
);
7057 case 0x2f: /* das */
7060 gen_update_cc_op(s
);
7061 gen_helper_das(cpu_env
);
7062 set_cc_op(s
, CC_OP_EFLAGS
);
7064 case 0x37: /* aaa */
7067 gen_update_cc_op(s
);
7068 gen_helper_aaa(cpu_env
);
7069 set_cc_op(s
, CC_OP_EFLAGS
);
7071 case 0x3f: /* aas */
7074 gen_update_cc_op(s
);
7075 gen_helper_aas(cpu_env
);
7076 set_cc_op(s
, CC_OP_EFLAGS
);
7078 case 0xd4: /* aam */
7081 val
= x86_ldub_code(env
, s
);
7083 gen_exception(s
, EXCP00_DIVZ
, pc_start
- s
->cs_base
);
7085 gen_helper_aam(cpu_env
, tcg_const_i32(val
));
7086 set_cc_op(s
, CC_OP_LOGICB
);
7089 case 0xd5: /* aad */
7092 val
= x86_ldub_code(env
, s
);
7093 gen_helper_aad(cpu_env
, tcg_const_i32(val
));
7094 set_cc_op(s
, CC_OP_LOGICB
);
7096 /************************/
7098 case 0x90: /* nop */
7099 /* XXX: correct lock test for all insn */
7100 if (prefixes
& PREFIX_LOCK
) {
7103 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
7105 goto do_xchg_reg_eax
;
7107 if (prefixes
& PREFIX_REPZ
) {
7108 gen_update_cc_op(s
);
7109 gen_jmp_im(s
, pc_start
- s
->cs_base
);
7110 gen_helper_pause(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
7111 s
->base
.is_jmp
= DISAS_NORETURN
;
7114 case 0x9b: /* fwait */
7115 if ((s
->flags
& (HF_MP_MASK
| HF_TS_MASK
)) ==
7116 (HF_MP_MASK
| HF_TS_MASK
)) {
7117 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
7119 gen_helper_fwait(cpu_env
);
7122 case 0xcc: /* int3 */
7123 gen_interrupt(s
, EXCP03_INT3
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
7125 case 0xcd: /* int N */
7126 val
= x86_ldub_code(env
, s
);
7127 if (check_vm86_iopl(s
)) {
7128 gen_interrupt(s
, val
, pc_start
- s
->cs_base
, s
->pc
- s
->cs_base
);
7131 case 0xce: /* into */
7134 gen_update_cc_op(s
);
7135 gen_jmp_im(s
, pc_start
- s
->cs_base
);
7136 gen_helper_into(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
7139 case 0xf1: /* icebp (undocumented, exits to external debugger) */
7140 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_ICEBP
);
7141 gen_debug(s
, pc_start
- s
->cs_base
);
7144 case 0xfa: /* cli */
7145 if (check_iopl(s
)) {
7146 gen_helper_cli(cpu_env
);
7149 case 0xfb: /* sti */
7150 if (check_iopl(s
)) {
7151 gen_helper_sti(cpu_env
);
7152 /* interruptions are enabled only the first insn after sti */
7153 gen_jmp_im(s
, s
->pc
- s
->cs_base
);
7154 gen_eob_inhibit_irq(s
, true);
7157 case 0x62: /* bound */
7161 modrm
= x86_ldub_code(env
, s
);
7162 reg
= (modrm
>> 3) & 7;
7163 mod
= (modrm
>> 6) & 3;
7166 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
7167 gen_lea_modrm(env
, s
, modrm
);
7168 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
7170 gen_helper_boundw(cpu_env
, s
->A0
, s
->tmp2_i32
);
7172 gen_helper_boundl(cpu_env
, s
->A0
, s
->tmp2_i32
);
7175 case 0x1c8 ... 0x1cf: /* bswap reg */
7176 reg
= (b
& 7) | REX_B(s
);
7177 #ifdef TARGET_X86_64
7178 if (dflag
== MO_64
) {
7179 gen_op_mov_v_reg(s
, MO_64
, s
->T0
, reg
);
7180 tcg_gen_bswap64_i64(s
->T0
, s
->T0
);
7181 gen_op_mov_reg_v(s
, MO_64
, reg
, s
->T0
);
7185 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, reg
);
7186 tcg_gen_ext32u_tl(s
->T0
, s
->T0
);
7187 tcg_gen_bswap32_tl(s
->T0
, s
->T0
);
7188 gen_op_mov_reg_v(s
, MO_32
, reg
, s
->T0
);
7191 case 0xd6: /* salc */
7194 gen_compute_eflags_c(s
, s
->T0
);
7195 tcg_gen_neg_tl(s
->T0
, s
->T0
);
7196 gen_op_mov_reg_v(s
, MO_8
, R_EAX
, s
->T0
);
7198 case 0xe0: /* loopnz */
7199 case 0xe1: /* loopz */
7200 case 0xe2: /* loop */
7201 case 0xe3: /* jecxz */
7203 TCGLabel
*l1
, *l2
, *l3
;
7205 tval
= (int8_t)insn_get(env
, s
, MO_8
);
7206 next_eip
= s
->pc
- s
->cs_base
;
7208 if (dflag
== MO_16
) {
7212 l1
= gen_new_label();
7213 l2
= gen_new_label();
7214 l3
= gen_new_label();
7215 gen_update_cc_op(s
);
7218 case 0: /* loopnz */
7220 gen_op_add_reg_im(s
, s
->aflag
, R_ECX
, -1);
7221 gen_op_jz_ecx(s
, s
->aflag
, l3
);
7222 gen_jcc1(s
, (JCC_Z
<< 1) | (b
^ 1), l1
);
7225 gen_op_add_reg_im(s
, s
->aflag
, R_ECX
, -1);
7226 gen_op_jnz_ecx(s
, s
->aflag
, l1
);
7230 gen_op_jz_ecx(s
, s
->aflag
, l1
);
7235 gen_jmp_im(s
, next_eip
);
7239 gen_jmp_im(s
, tval
);
7244 case 0x130: /* wrmsr */
7245 case 0x132: /* rdmsr */
7246 if (check_cpl0(s
)) {
7247 gen_update_cc_op(s
);
7248 gen_jmp_im(s
, pc_start
- s
->cs_base
);
7250 gen_helper_rdmsr(cpu_env
);
7252 gen_helper_wrmsr(cpu_env
);
7256 case 0x131: /* rdtsc */
7257 gen_update_cc_op(s
);
7258 gen_jmp_im(s
, pc_start
- s
->cs_base
);
7259 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
7262 gen_helper_rdtsc(cpu_env
);
7263 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
7264 gen_jmp(s
, s
->pc
- s
->cs_base
);
7267 case 0x133: /* rdpmc */
7268 gen_update_cc_op(s
);
7269 gen_jmp_im(s
, pc_start
- s
->cs_base
);
7270 gen_helper_rdpmc(cpu_env
);
7272 case 0x134: /* sysenter */
7273 /* For Intel SYSENTER is valid on 64-bit */
7274 if (CODE64(s
) && env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
)
7277 gen_exception_gpf(s
);
7279 gen_helper_sysenter(cpu_env
);
7283 case 0x135: /* sysexit */
7284 /* For Intel SYSEXIT is valid on 64-bit */
7285 if (CODE64(s
) && env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
)
7288 gen_exception_gpf(s
);
7290 gen_helper_sysexit(cpu_env
, tcg_const_i32(dflag
- 1));
7294 #ifdef TARGET_X86_64
7295 case 0x105: /* syscall */
7296 /* XXX: is it usable in real mode ? */
7297 gen_update_cc_op(s
);
7298 gen_jmp_im(s
, pc_start
- s
->cs_base
);
7299 gen_helper_syscall(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
7300 /* TF handling for the syscall insn is different. The TF bit is checked
7301 after the syscall insn completes. This allows #DB to not be
7302 generated after one has entered CPL0 if TF is set in FMASK. */
7303 gen_eob_worker(s
, false, true);
7305 case 0x107: /* sysret */
7307 gen_exception_gpf(s
);
7309 gen_helper_sysret(cpu_env
, tcg_const_i32(dflag
- 1));
7310 /* condition codes are modified only in long mode */
7312 set_cc_op(s
, CC_OP_EFLAGS
);
7314 /* TF handling for the sysret insn is different. The TF bit is
7315 checked after the sysret insn completes. This allows #DB to be
7316 generated "as if" the syscall insn in userspace has just
7318 gen_eob_worker(s
, false, true);
7322 case 0x1a2: /* cpuid */
7323 gen_update_cc_op(s
);
7324 gen_jmp_im(s
, pc_start
- s
->cs_base
);
7325 gen_helper_cpuid(cpu_env
);
7327 case 0xf4: /* hlt */
7328 if (check_cpl0(s
)) {
7329 gen_update_cc_op(s
);
7330 gen_jmp_im(s
, pc_start
- s
->cs_base
);
7331 gen_helper_hlt(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
7332 s
->base
.is_jmp
= DISAS_NORETURN
;
7336 modrm
= x86_ldub_code(env
, s
);
7337 mod
= (modrm
>> 6) & 3;
7338 op
= (modrm
>> 3) & 7;
7341 if (!PE(s
) || VM86(s
))
7343 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_LDTR_READ
);
7344 tcg_gen_ld32u_tl(s
->T0
, cpu_env
,
7345 offsetof(CPUX86State
, ldt
.selector
));
7346 ot
= mod
== 3 ? dflag
: MO_16
;
7347 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
7350 if (!PE(s
) || VM86(s
))
7352 if (check_cpl0(s
)) {
7353 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_LDTR_WRITE
);
7354 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
7355 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
7356 gen_helper_lldt(cpu_env
, s
->tmp2_i32
);
7360 if (!PE(s
) || VM86(s
))
7362 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_TR_READ
);
7363 tcg_gen_ld32u_tl(s
->T0
, cpu_env
,
7364 offsetof(CPUX86State
, tr
.selector
));
7365 ot
= mod
== 3 ? dflag
: MO_16
;
7366 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
7369 if (!PE(s
) || VM86(s
))
7371 if (check_cpl0(s
)) {
7372 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_TR_WRITE
);
7373 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
7374 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
7375 gen_helper_ltr(cpu_env
, s
->tmp2_i32
);
7380 if (!PE(s
) || VM86(s
))
7382 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
7383 gen_update_cc_op(s
);
7385 gen_helper_verr(cpu_env
, s
->T0
);
7387 gen_helper_verw(cpu_env
, s
->T0
);
7389 set_cc_op(s
, CC_OP_EFLAGS
);
7397 modrm
= x86_ldub_code(env
, s
);
7399 CASE_MODRM_MEM_OP(0): /* sgdt */
7400 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_GDTR_READ
);
7401 gen_lea_modrm(env
, s
, modrm
);
7402 tcg_gen_ld32u_tl(s
->T0
,
7403 cpu_env
, offsetof(CPUX86State
, gdt
.limit
));
7404 gen_op_st_v(s
, MO_16
, s
->T0
, s
->A0
);
7405 gen_add_A0_im(s
, 2);
7406 tcg_gen_ld_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
, gdt
.base
));
7407 if (dflag
== MO_16
) {
7408 tcg_gen_andi_tl(s
->T0
, s
->T0
, 0xffffff);
7410 gen_op_st_v(s
, CODE64(s
) + MO_32
, s
->T0
, s
->A0
);
7413 case 0xc8: /* monitor */
7414 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) || CPL(s
) != 0) {
7417 gen_update_cc_op(s
);
7418 gen_jmp_im(s
, pc_start
- s
->cs_base
);
7419 tcg_gen_mov_tl(s
->A0
, cpu_regs
[R_EAX
]);
7420 gen_extu(s
->aflag
, s
->A0
);
7421 gen_add_A0_ds_seg(s
);
7422 gen_helper_monitor(cpu_env
, s
->A0
);
7425 case 0xc9: /* mwait */
7426 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) || CPL(s
) != 0) {
7429 gen_update_cc_op(s
);
7430 gen_jmp_im(s
, pc_start
- s
->cs_base
);
7431 gen_helper_mwait(cpu_env
, tcg_const_i32(s
->pc
- pc_start
));
7435 case 0xca: /* clac */
7436 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_SMAP
)
7440 gen_helper_clac(cpu_env
);
7441 gen_jmp_im(s
, s
->pc
- s
->cs_base
);
7445 case 0xcb: /* stac */
7446 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_SMAP
)
7450 gen_helper_stac(cpu_env
);
7451 gen_jmp_im(s
, s
->pc
- s
->cs_base
);
7455 CASE_MODRM_MEM_OP(1): /* sidt */
7456 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_IDTR_READ
);
7457 gen_lea_modrm(env
, s
, modrm
);
7458 tcg_gen_ld32u_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
, idt
.limit
));
7459 gen_op_st_v(s
, MO_16
, s
->T0
, s
->A0
);
7460 gen_add_A0_im(s
, 2);
7461 tcg_gen_ld_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
, idt
.base
));
7462 if (dflag
== MO_16
) {
7463 tcg_gen_andi_tl(s
->T0
, s
->T0
, 0xffffff);
7465 gen_op_st_v(s
, CODE64(s
) + MO_32
, s
->T0
, s
->A0
);
7468 case 0xd0: /* xgetbv */
7469 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
7470 || (s
->prefix
& (PREFIX_LOCK
| PREFIX_DATA
7471 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
7474 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_ECX
]);
7475 gen_helper_xgetbv(s
->tmp1_i64
, cpu_env
, s
->tmp2_i32
);
7476 tcg_gen_extr_i64_tl(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
], s
->tmp1_i64
);
7479 case 0xd1: /* xsetbv */
7480 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
7481 || (s
->prefix
& (PREFIX_LOCK
| PREFIX_DATA
7482 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
7485 if (!check_cpl0(s
)) {
7488 tcg_gen_concat_tl_i64(s
->tmp1_i64
, cpu_regs
[R_EAX
],
7490 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_ECX
]);
7491 gen_helper_xsetbv(cpu_env
, s
->tmp2_i32
, s
->tmp1_i64
);
7492 /* End TB because translation flags may change. */
7493 gen_jmp_im(s
, s
->pc
- s
->cs_base
);
7497 case 0xd8: /* VMRUN */
7498 if (!(s
->flags
& HF_SVME_MASK
) || !PE(s
)) {
7501 if (!check_cpl0(s
)) {
7504 gen_update_cc_op(s
);
7505 gen_jmp_im(s
, pc_start
- s
->cs_base
);
7506 gen_helper_vmrun(cpu_env
, tcg_const_i32(s
->aflag
- 1),
7507 tcg_const_i32(s
->pc
- pc_start
));
7508 tcg_gen_exit_tb(NULL
, 0);
7509 s
->base
.is_jmp
= DISAS_NORETURN
;
7512 case 0xd9: /* VMMCALL */
7513 if (!(s
->flags
& HF_SVME_MASK
)) {
7516 gen_update_cc_op(s
);
7517 gen_jmp_im(s
, pc_start
- s
->cs_base
);
7518 gen_helper_vmmcall(cpu_env
);
7521 case 0xda: /* VMLOAD */
7522 if (!(s
->flags
& HF_SVME_MASK
) || !PE(s
)) {
7525 if (!check_cpl0(s
)) {
7528 gen_update_cc_op(s
);
7529 gen_jmp_im(s
, pc_start
- s
->cs_base
);
7530 gen_helper_vmload(cpu_env
, tcg_const_i32(s
->aflag
- 1));
7533 case 0xdb: /* VMSAVE */
7534 if (!(s
->flags
& HF_SVME_MASK
) || !PE(s
)) {
7537 if (!check_cpl0(s
)) {
7540 gen_update_cc_op(s
);
7541 gen_jmp_im(s
, pc_start
- s
->cs_base
);
7542 gen_helper_vmsave(cpu_env
, tcg_const_i32(s
->aflag
- 1));
7545 case 0xdc: /* STGI */
7546 if ((!(s
->flags
& HF_SVME_MASK
)
7547 && !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
))
7551 if (!check_cpl0(s
)) {
7554 gen_update_cc_op(s
);
7555 gen_helper_stgi(cpu_env
);
7556 gen_jmp_im(s
, s
->pc
- s
->cs_base
);
7560 case 0xdd: /* CLGI */
7561 if (!(s
->flags
& HF_SVME_MASK
) || !PE(s
)) {
7564 if (!check_cpl0(s
)) {
7567 gen_update_cc_op(s
);
7568 gen_jmp_im(s
, pc_start
- s
->cs_base
);
7569 gen_helper_clgi(cpu_env
);
7572 case 0xde: /* SKINIT */
7573 if ((!(s
->flags
& HF_SVME_MASK
)
7574 && !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
))
7578 gen_update_cc_op(s
);
7579 gen_jmp_im(s
, pc_start
- s
->cs_base
);
7580 gen_helper_skinit(cpu_env
);
7583 case 0xdf: /* INVLPGA */
7584 if (!(s
->flags
& HF_SVME_MASK
) || !PE(s
)) {
7587 if (!check_cpl0(s
)) {
7590 gen_update_cc_op(s
);
7591 gen_jmp_im(s
, pc_start
- s
->cs_base
);
7592 gen_helper_invlpga(cpu_env
, tcg_const_i32(s
->aflag
- 1));
7595 CASE_MODRM_MEM_OP(2): /* lgdt */
7596 if (!check_cpl0(s
)) {
7599 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_GDTR_WRITE
);
7600 gen_lea_modrm(env
, s
, modrm
);
7601 gen_op_ld_v(s
, MO_16
, s
->T1
, s
->A0
);
7602 gen_add_A0_im(s
, 2);
7603 gen_op_ld_v(s
, CODE64(s
) + MO_32
, s
->T0
, s
->A0
);
7604 if (dflag
== MO_16
) {
7605 tcg_gen_andi_tl(s
->T0
, s
->T0
, 0xffffff);
7607 tcg_gen_st_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
, gdt
.base
));
7608 tcg_gen_st32_tl(s
->T1
, cpu_env
, offsetof(CPUX86State
, gdt
.limit
));
7611 CASE_MODRM_MEM_OP(3): /* lidt */
7612 if (!check_cpl0(s
)) {
7615 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_IDTR_WRITE
);
7616 gen_lea_modrm(env
, s
, modrm
);
7617 gen_op_ld_v(s
, MO_16
, s
->T1
, s
->A0
);
7618 gen_add_A0_im(s
, 2);
7619 gen_op_ld_v(s
, CODE64(s
) + MO_32
, s
->T0
, s
->A0
);
7620 if (dflag
== MO_16
) {
7621 tcg_gen_andi_tl(s
->T0
, s
->T0
, 0xffffff);
7623 tcg_gen_st_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
, idt
.base
));
7624 tcg_gen_st32_tl(s
->T1
, cpu_env
, offsetof(CPUX86State
, idt
.limit
));
7627 CASE_MODRM_OP(4): /* smsw */
7628 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_READ_CR0
);
7629 tcg_gen_ld_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
, cr
[0]));
7631 * In 32-bit mode, the higher 16 bits of the destination
7632 * register are undefined. In practice CR0[31:0] is stored
7633 * just like in 64-bit mode.
7635 mod
= (modrm
>> 6) & 3;
7636 ot
= (mod
!= 3 ? MO_16
: s
->dflag
);
7637 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
7639 case 0xee: /* rdpkru */
7640 if (prefixes
& PREFIX_LOCK
) {
7643 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_ECX
]);
7644 gen_helper_rdpkru(s
->tmp1_i64
, cpu_env
, s
->tmp2_i32
);
7645 tcg_gen_extr_i64_tl(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
], s
->tmp1_i64
);
7647 case 0xef: /* wrpkru */
7648 if (prefixes
& PREFIX_LOCK
) {
7651 tcg_gen_concat_tl_i64(s
->tmp1_i64
, cpu_regs
[R_EAX
],
7653 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_ECX
]);
7654 gen_helper_wrpkru(cpu_env
, s
->tmp2_i32
, s
->tmp1_i64
);
7656 CASE_MODRM_OP(6): /* lmsw */
7657 if (!check_cpl0(s
)) {
7660 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_CR0
);
7661 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
7662 gen_helper_lmsw(cpu_env
, s
->T0
);
7663 gen_jmp_im(s
, s
->pc
- s
->cs_base
);
7667 CASE_MODRM_MEM_OP(7): /* invlpg */
7668 if (!check_cpl0(s
)) {
7671 gen_update_cc_op(s
);
7672 gen_jmp_im(s
, pc_start
- s
->cs_base
);
7673 gen_lea_modrm(env
, s
, modrm
);
7674 gen_helper_invlpg(cpu_env
, s
->A0
);
7675 gen_jmp_im(s
, s
->pc
- s
->cs_base
);
7679 case 0xf8: /* swapgs */
7680 #ifdef TARGET_X86_64
7682 if (check_cpl0(s
)) {
7683 tcg_gen_mov_tl(s
->T0
, cpu_seg_base
[R_GS
]);
7684 tcg_gen_ld_tl(cpu_seg_base
[R_GS
], cpu_env
,
7685 offsetof(CPUX86State
, kernelgsbase
));
7686 tcg_gen_st_tl(s
->T0
, cpu_env
,
7687 offsetof(CPUX86State
, kernelgsbase
));
7694 case 0xf9: /* rdtscp */
7695 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_RDTSCP
)) {
7698 gen_update_cc_op(s
);
7699 gen_jmp_im(s
, pc_start
- s
->cs_base
);
7700 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
7703 gen_helper_rdtscp(cpu_env
);
7704 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
7705 gen_jmp(s
, s
->pc
- s
->cs_base
);
7714 case 0x108: /* invd */
7715 case 0x109: /* wbinvd */
7716 if (check_cpl0(s
)) {
7717 gen_svm_check_intercept(s
, pc_start
, (b
& 2) ? SVM_EXIT_INVD
: SVM_EXIT_WBINVD
);
7721 case 0x63: /* arpl or movslS (x86_64) */
7722 #ifdef TARGET_X86_64
7725 /* d_ot is the size of destination */
7728 modrm
= x86_ldub_code(env
, s
);
7729 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
7730 mod
= (modrm
>> 6) & 3;
7731 rm
= (modrm
& 7) | REX_B(s
);
7734 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, rm
);
7736 if (d_ot
== MO_64
) {
7737 tcg_gen_ext32s_tl(s
->T0
, s
->T0
);
7739 gen_op_mov_reg_v(s
, d_ot
, reg
, s
->T0
);
7741 gen_lea_modrm(env
, s
, modrm
);
7742 gen_op_ld_v(s
, MO_32
| MO_SIGN
, s
->T0
, s
->A0
);
7743 gen_op_mov_reg_v(s
, d_ot
, reg
, s
->T0
);
7749 TCGv t0
, t1
, t2
, a0
;
7751 if (!PE(s
) || VM86(s
))
7753 t0
= tcg_temp_local_new();
7754 t1
= tcg_temp_local_new();
7755 t2
= tcg_temp_local_new();
7757 modrm
= x86_ldub_code(env
, s
);
7758 reg
= (modrm
>> 3) & 7;
7759 mod
= (modrm
>> 6) & 3;
7762 gen_lea_modrm(env
, s
, modrm
);
7763 gen_op_ld_v(s
, ot
, t0
, s
->A0
);
7764 a0
= tcg_temp_local_new();
7765 tcg_gen_mov_tl(a0
, s
->A0
);
7767 gen_op_mov_v_reg(s
, ot
, t0
, rm
);
7770 gen_op_mov_v_reg(s
, ot
, t1
, reg
);
7771 tcg_gen_andi_tl(s
->tmp0
, t0
, 3);
7772 tcg_gen_andi_tl(t1
, t1
, 3);
7773 tcg_gen_movi_tl(t2
, 0);
7774 label1
= gen_new_label();
7775 tcg_gen_brcond_tl(TCG_COND_GE
, s
->tmp0
, t1
, label1
);
7776 tcg_gen_andi_tl(t0
, t0
, ~3);
7777 tcg_gen_or_tl(t0
, t0
, t1
);
7778 tcg_gen_movi_tl(t2
, CC_Z
);
7779 gen_set_label(label1
);
7781 gen_op_st_v(s
, ot
, t0
, a0
);
7784 gen_op_mov_reg_v(s
, ot
, rm
, t0
);
7786 gen_compute_eflags(s
);
7787 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~CC_Z
);
7788 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t2
);
7794 case 0x102: /* lar */
7795 case 0x103: /* lsl */
7799 if (!PE(s
) || VM86(s
))
7801 ot
= dflag
!= MO_16
? MO_32
: MO_16
;
7802 modrm
= x86_ldub_code(env
, s
);
7803 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
7804 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
7805 t0
= tcg_temp_local_new();
7806 gen_update_cc_op(s
);
7808 gen_helper_lar(t0
, cpu_env
, s
->T0
);
7810 gen_helper_lsl(t0
, cpu_env
, s
->T0
);
7812 tcg_gen_andi_tl(s
->tmp0
, cpu_cc_src
, CC_Z
);
7813 label1
= gen_new_label();
7814 tcg_gen_brcondi_tl(TCG_COND_EQ
, s
->tmp0
, 0, label1
);
7815 gen_op_mov_reg_v(s
, ot
, reg
, t0
);
7816 gen_set_label(label1
);
7817 set_cc_op(s
, CC_OP_EFLAGS
);
7822 modrm
= x86_ldub_code(env
, s
);
7823 mod
= (modrm
>> 6) & 3;
7824 op
= (modrm
>> 3) & 7;
7826 case 0: /* prefetchnta */
7827 case 1: /* prefetchnt0 */
7828 case 2: /* prefetchnt0 */
7829 case 3: /* prefetchnt0 */
7832 gen_nop_modrm(env
, s
, modrm
);
7833 /* nothing more to do */
7835 default: /* nop (multi byte) */
7836 gen_nop_modrm(env
, s
, modrm
);
7841 modrm
= x86_ldub_code(env
, s
);
7842 if (s
->flags
& HF_MPX_EN_MASK
) {
7843 mod
= (modrm
>> 6) & 3;
7844 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
7845 if (prefixes
& PREFIX_REPZ
) {
7848 || (prefixes
& PREFIX_LOCK
)
7849 || s
->aflag
== MO_16
) {
7852 gen_bndck(env
, s
, modrm
, TCG_COND_LTU
, cpu_bndl
[reg
]);
7853 } else if (prefixes
& PREFIX_REPNZ
) {
7856 || (prefixes
& PREFIX_LOCK
)
7857 || s
->aflag
== MO_16
) {
7860 TCGv_i64 notu
= tcg_temp_new_i64();
7861 tcg_gen_not_i64(notu
, cpu_bndu
[reg
]);
7862 gen_bndck(env
, s
, modrm
, TCG_COND_GTU
, notu
);
7863 tcg_temp_free_i64(notu
);
7864 } else if (prefixes
& PREFIX_DATA
) {
7865 /* bndmov -- from reg/mem */
7866 if (reg
>= 4 || s
->aflag
== MO_16
) {
7870 int reg2
= (modrm
& 7) | REX_B(s
);
7871 if (reg2
>= 4 || (prefixes
& PREFIX_LOCK
)) {
7874 if (s
->flags
& HF_MPX_IU_MASK
) {
7875 tcg_gen_mov_i64(cpu_bndl
[reg
], cpu_bndl
[reg2
]);
7876 tcg_gen_mov_i64(cpu_bndu
[reg
], cpu_bndu
[reg2
]);
7879 gen_lea_modrm(env
, s
, modrm
);
7881 tcg_gen_qemu_ld_i64(cpu_bndl
[reg
], s
->A0
,
7882 s
->mem_index
, MO_LEQ
);
7883 tcg_gen_addi_tl(s
->A0
, s
->A0
, 8);
7884 tcg_gen_qemu_ld_i64(cpu_bndu
[reg
], s
->A0
,
7885 s
->mem_index
, MO_LEQ
);
7887 tcg_gen_qemu_ld_i64(cpu_bndl
[reg
], s
->A0
,
7888 s
->mem_index
, MO_LEUL
);
7889 tcg_gen_addi_tl(s
->A0
, s
->A0
, 4);
7890 tcg_gen_qemu_ld_i64(cpu_bndu
[reg
], s
->A0
,
7891 s
->mem_index
, MO_LEUL
);
7893 /* bnd registers are now in-use */
7894 gen_set_hflag(s
, HF_MPX_IU_MASK
);
7896 } else if (mod
!= 3) {
7898 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
7900 || (prefixes
& PREFIX_LOCK
)
7901 || s
->aflag
== MO_16
7906 tcg_gen_addi_tl(s
->A0
, cpu_regs
[a
.base
], a
.disp
);
7908 tcg_gen_movi_tl(s
->A0
, 0);
7910 gen_lea_v_seg(s
, s
->aflag
, s
->A0
, a
.def_seg
, s
->override
);
7912 tcg_gen_mov_tl(s
->T0
, cpu_regs
[a
.index
]);
7914 tcg_gen_movi_tl(s
->T0
, 0);
7917 gen_helper_bndldx64(cpu_bndl
[reg
], cpu_env
, s
->A0
, s
->T0
);
7918 tcg_gen_ld_i64(cpu_bndu
[reg
], cpu_env
,
7919 offsetof(CPUX86State
, mmx_t0
.MMX_Q(0)));
7921 gen_helper_bndldx32(cpu_bndu
[reg
], cpu_env
, s
->A0
, s
->T0
);
7922 tcg_gen_ext32u_i64(cpu_bndl
[reg
], cpu_bndu
[reg
]);
7923 tcg_gen_shri_i64(cpu_bndu
[reg
], cpu_bndu
[reg
], 32);
7925 gen_set_hflag(s
, HF_MPX_IU_MASK
);
7928 gen_nop_modrm(env
, s
, modrm
);
7931 modrm
= x86_ldub_code(env
, s
);
7932 if (s
->flags
& HF_MPX_EN_MASK
) {
7933 mod
= (modrm
>> 6) & 3;
7934 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
7935 if (mod
!= 3 && (prefixes
& PREFIX_REPZ
)) {
7938 || (prefixes
& PREFIX_LOCK
)
7939 || s
->aflag
== MO_16
) {
7942 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
7944 tcg_gen_extu_tl_i64(cpu_bndl
[reg
], cpu_regs
[a
.base
]);
7946 tcg_gen_ext32u_i64(cpu_bndl
[reg
], cpu_bndl
[reg
]);
7948 } else if (a
.base
== -1) {
7949 /* no base register has lower bound of 0 */
7950 tcg_gen_movi_i64(cpu_bndl
[reg
], 0);
7952 /* rip-relative generates #ud */
7955 tcg_gen_not_tl(s
->A0
, gen_lea_modrm_1(s
, a
));
7957 tcg_gen_ext32u_tl(s
->A0
, s
->A0
);
7959 tcg_gen_extu_tl_i64(cpu_bndu
[reg
], s
->A0
);
7960 /* bnd registers are now in-use */
7961 gen_set_hflag(s
, HF_MPX_IU_MASK
);
7963 } else if (prefixes
& PREFIX_REPNZ
) {
7966 || (prefixes
& PREFIX_LOCK
)
7967 || s
->aflag
== MO_16
) {
7970 gen_bndck(env
, s
, modrm
, TCG_COND_GTU
, cpu_bndu
[reg
]);
7971 } else if (prefixes
& PREFIX_DATA
) {
7972 /* bndmov -- to reg/mem */
7973 if (reg
>= 4 || s
->aflag
== MO_16
) {
7977 int reg2
= (modrm
& 7) | REX_B(s
);
7978 if (reg2
>= 4 || (prefixes
& PREFIX_LOCK
)) {
7981 if (s
->flags
& HF_MPX_IU_MASK
) {
7982 tcg_gen_mov_i64(cpu_bndl
[reg2
], cpu_bndl
[reg
]);
7983 tcg_gen_mov_i64(cpu_bndu
[reg2
], cpu_bndu
[reg
]);
7986 gen_lea_modrm(env
, s
, modrm
);
7988 tcg_gen_qemu_st_i64(cpu_bndl
[reg
], s
->A0
,
7989 s
->mem_index
, MO_LEQ
);
7990 tcg_gen_addi_tl(s
->A0
, s
->A0
, 8);
7991 tcg_gen_qemu_st_i64(cpu_bndu
[reg
], s
->A0
,
7992 s
->mem_index
, MO_LEQ
);
7994 tcg_gen_qemu_st_i64(cpu_bndl
[reg
], s
->A0
,
7995 s
->mem_index
, MO_LEUL
);
7996 tcg_gen_addi_tl(s
->A0
, s
->A0
, 4);
7997 tcg_gen_qemu_st_i64(cpu_bndu
[reg
], s
->A0
,
7998 s
->mem_index
, MO_LEUL
);
8001 } else if (mod
!= 3) {
8003 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
8005 || (prefixes
& PREFIX_LOCK
)
8006 || s
->aflag
== MO_16
8011 tcg_gen_addi_tl(s
->A0
, cpu_regs
[a
.base
], a
.disp
);
8013 tcg_gen_movi_tl(s
->A0
, 0);
8015 gen_lea_v_seg(s
, s
->aflag
, s
->A0
, a
.def_seg
, s
->override
);
8017 tcg_gen_mov_tl(s
->T0
, cpu_regs
[a
.index
]);
8019 tcg_gen_movi_tl(s
->T0
, 0);
8022 gen_helper_bndstx64(cpu_env
, s
->A0
, s
->T0
,
8023 cpu_bndl
[reg
], cpu_bndu
[reg
]);
8025 gen_helper_bndstx32(cpu_env
, s
->A0
, s
->T0
,
8026 cpu_bndl
[reg
], cpu_bndu
[reg
]);
8030 gen_nop_modrm(env
, s
, modrm
);
8032 case 0x119: case 0x11c ... 0x11f: /* nop (multi byte) */
8033 modrm
= x86_ldub_code(env
, s
);
8034 gen_nop_modrm(env
, s
, modrm
);
8036 case 0x120: /* mov reg, crN */
8037 case 0x122: /* mov crN, reg */
8038 if (check_cpl0(s
)) {
8039 modrm
= x86_ldub_code(env
, s
);
8040 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
8041 * AMD documentation (24594.pdf) and testing of
8042 * intel 386 and 486 processors all show that the mod bits
8043 * are assumed to be 1's, regardless of actual values.
8045 rm
= (modrm
& 7) | REX_B(s
);
8046 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
8051 if ((prefixes
& PREFIX_LOCK
) && (reg
== 0) &&
8052 (s
->cpuid_ext3_features
& CPUID_EXT3_CR8LEG
)) {
8061 gen_update_cc_op(s
);
8062 gen_jmp_im(s
, pc_start
- s
->cs_base
);
8064 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
8067 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
8068 gen_helper_write_crN(cpu_env
, tcg_const_i32(reg
),
8070 gen_jmp_im(s
, s
->pc
- s
->cs_base
);
8073 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
8076 gen_helper_read_crN(s
->T0
, cpu_env
, tcg_const_i32(reg
));
8077 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
8078 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
8079 gen_jmp(s
, s
->pc
- s
->cs_base
);
8088 case 0x121: /* mov reg, drN */
8089 case 0x123: /* mov drN, reg */
8090 if (check_cpl0(s
)) {
8091 modrm
= x86_ldub_code(env
, s
);
8092 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
8093 * AMD documentation (24594.pdf) and testing of
8094 * intel 386 and 486 processors all show that the mod bits
8095 * are assumed to be 1's, regardless of actual values.
8097 rm
= (modrm
& 7) | REX_B(s
);
8098 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
8107 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_DR0
+ reg
);
8108 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
8109 tcg_gen_movi_i32(s
->tmp2_i32
, reg
);
8110 gen_helper_set_dr(cpu_env
, s
->tmp2_i32
, s
->T0
);
8111 gen_jmp_im(s
, s
->pc
- s
->cs_base
);
8114 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_READ_DR0
+ reg
);
8115 tcg_gen_movi_i32(s
->tmp2_i32
, reg
);
8116 gen_helper_get_dr(s
->T0
, cpu_env
, s
->tmp2_i32
);
8117 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
8121 case 0x106: /* clts */
8122 if (check_cpl0(s
)) {
8123 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_WRITE_CR0
);
8124 gen_helper_clts(cpu_env
);
8125 /* abort block because static cpu state changed */
8126 gen_jmp_im(s
, s
->pc
- s
->cs_base
);
8130 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
8131 case 0x1c3: /* MOVNTI reg, mem */
8132 if (!(s
->cpuid_features
& CPUID_SSE2
))
8134 ot
= mo_64_32(dflag
);
8135 modrm
= x86_ldub_code(env
, s
);
8136 mod
= (modrm
>> 6) & 3;
8139 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
8140 /* generate a generic store */
8141 gen_ldst_modrm(env
, s
, modrm
, ot
, reg
, 1);
8144 modrm
= x86_ldub_code(env
, s
);
8146 CASE_MODRM_MEM_OP(0): /* fxsave */
8147 if (!(s
->cpuid_features
& CPUID_FXSR
)
8148 || (prefixes
& PREFIX_LOCK
)) {
8151 if ((s
->flags
& HF_EM_MASK
) || (s
->flags
& HF_TS_MASK
)) {
8152 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
8155 gen_lea_modrm(env
, s
, modrm
);
8156 gen_helper_fxsave(cpu_env
, s
->A0
);
8159 CASE_MODRM_MEM_OP(1): /* fxrstor */
8160 if (!(s
->cpuid_features
& CPUID_FXSR
)
8161 || (prefixes
& PREFIX_LOCK
)) {
8164 if ((s
->flags
& HF_EM_MASK
) || (s
->flags
& HF_TS_MASK
)) {
8165 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
8168 gen_lea_modrm(env
, s
, modrm
);
8169 gen_helper_fxrstor(cpu_env
, s
->A0
);
8172 CASE_MODRM_MEM_OP(2): /* ldmxcsr */
8173 if ((s
->flags
& HF_EM_MASK
) || !(s
->flags
& HF_OSFXSR_MASK
)) {
8176 if (s
->flags
& HF_TS_MASK
) {
8177 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
8180 gen_lea_modrm(env
, s
, modrm
);
8181 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
, s
->mem_index
, MO_LEUL
);
8182 gen_helper_ldmxcsr(cpu_env
, s
->tmp2_i32
);
8185 CASE_MODRM_MEM_OP(3): /* stmxcsr */
8186 if ((s
->flags
& HF_EM_MASK
) || !(s
->flags
& HF_OSFXSR_MASK
)) {
8189 if (s
->flags
& HF_TS_MASK
) {
8190 gen_exception(s
, EXCP07_PREX
, pc_start
- s
->cs_base
);
8193 gen_helper_update_mxcsr(cpu_env
);
8194 gen_lea_modrm(env
, s
, modrm
);
8195 tcg_gen_ld32u_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
, mxcsr
));
8196 gen_op_st_v(s
, MO_32
, s
->T0
, s
->A0
);
8199 CASE_MODRM_MEM_OP(4): /* xsave */
8200 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
8201 || (prefixes
& (PREFIX_LOCK
| PREFIX_DATA
8202 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
8205 gen_lea_modrm(env
, s
, modrm
);
8206 tcg_gen_concat_tl_i64(s
->tmp1_i64
, cpu_regs
[R_EAX
],
8208 gen_helper_xsave(cpu_env
, s
->A0
, s
->tmp1_i64
);
8211 CASE_MODRM_MEM_OP(5): /* xrstor */
8212 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
8213 || (prefixes
& (PREFIX_LOCK
| PREFIX_DATA
8214 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
8217 gen_lea_modrm(env
, s
, modrm
);
8218 tcg_gen_concat_tl_i64(s
->tmp1_i64
, cpu_regs
[R_EAX
],
8220 gen_helper_xrstor(cpu_env
, s
->A0
, s
->tmp1_i64
);
8221 /* XRSTOR is how MPX is enabled, which changes how
8222 we translate. Thus we need to end the TB. */
8223 gen_update_cc_op(s
);
8224 gen_jmp_im(s
, s
->pc
- s
->cs_base
);
8228 CASE_MODRM_MEM_OP(6): /* xsaveopt / clwb */
8229 if (prefixes
& PREFIX_LOCK
) {
8232 if (prefixes
& PREFIX_DATA
) {
8234 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_CLWB
)) {
8237 gen_nop_modrm(env
, s
, modrm
);
8240 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
8241 || (s
->cpuid_xsave_features
& CPUID_XSAVE_XSAVEOPT
) == 0
8242 || (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
))) {
8245 gen_lea_modrm(env
, s
, modrm
);
8246 tcg_gen_concat_tl_i64(s
->tmp1_i64
, cpu_regs
[R_EAX
],
8248 gen_helper_xsaveopt(cpu_env
, s
->A0
, s
->tmp1_i64
);
8252 CASE_MODRM_MEM_OP(7): /* clflush / clflushopt */
8253 if (prefixes
& PREFIX_LOCK
) {
8256 if (prefixes
& PREFIX_DATA
) {
8258 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_CLFLUSHOPT
)) {
8263 if ((s
->prefix
& (PREFIX_REPZ
| PREFIX_REPNZ
))
8264 || !(s
->cpuid_features
& CPUID_CLFLUSH
)) {
8268 gen_nop_modrm(env
, s
, modrm
);
8271 case 0xc0 ... 0xc7: /* rdfsbase (f3 0f ae /0) */
8272 case 0xc8 ... 0xcf: /* rdgsbase (f3 0f ae /1) */
8273 case 0xd0 ... 0xd7: /* wrfsbase (f3 0f ae /2) */
8274 case 0xd8 ... 0xdf: /* wrgsbase (f3 0f ae /3) */
8276 && (prefixes
& PREFIX_REPZ
)
8277 && !(prefixes
& PREFIX_LOCK
)
8278 && (s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_FSGSBASE
)) {
8279 TCGv base
, treg
, src
, dst
;
8281 /* Preserve hflags bits by testing CR4 at runtime. */
8282 tcg_gen_movi_i32(s
->tmp2_i32
, CR4_FSGSBASE_MASK
);
8283 gen_helper_cr4_testbit(cpu_env
, s
->tmp2_i32
);
8285 base
= cpu_seg_base
[modrm
& 8 ? R_GS
: R_FS
];
8286 treg
= cpu_regs
[(modrm
& 7) | REX_B(s
)];
8290 dst
= base
, src
= treg
;
8293 dst
= treg
, src
= base
;
8296 if (s
->dflag
== MO_32
) {
8297 tcg_gen_ext32u_tl(dst
, src
);
8299 tcg_gen_mov_tl(dst
, src
);
8305 case 0xf8: /* sfence / pcommit */
8306 if (prefixes
& PREFIX_DATA
) {
8308 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_PCOMMIT
)
8309 || (prefixes
& PREFIX_LOCK
)) {
8315 case 0xf9 ... 0xff: /* sfence */
8316 if (!(s
->cpuid_features
& CPUID_SSE
)
8317 || (prefixes
& PREFIX_LOCK
)) {
8320 tcg_gen_mb(TCG_MO_ST_ST
| TCG_BAR_SC
);
8322 case 0xe8 ... 0xef: /* lfence */
8323 if (!(s
->cpuid_features
& CPUID_SSE
)
8324 || (prefixes
& PREFIX_LOCK
)) {
8327 tcg_gen_mb(TCG_MO_LD_LD
| TCG_BAR_SC
);
8329 case 0xf0 ... 0xf7: /* mfence */
8330 if (!(s
->cpuid_features
& CPUID_SSE2
)
8331 || (prefixes
& PREFIX_LOCK
)) {
8334 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
8342 case 0x10d: /* 3DNow! prefetch(w) */
8343 modrm
= x86_ldub_code(env
, s
);
8344 mod
= (modrm
>> 6) & 3;
8347 gen_nop_modrm(env
, s
, modrm
);
8349 case 0x1aa: /* rsm */
8350 gen_svm_check_intercept(s
, pc_start
, SVM_EXIT_RSM
);
8351 if (!(s
->flags
& HF_SMM_MASK
))
8353 #ifdef CONFIG_USER_ONLY
8354 /* we should not be in SMM mode */
8355 g_assert_not_reached();
8357 gen_update_cc_op(s
);
8358 gen_jmp_im(s
, s
->pc
- s
->cs_base
);
8359 gen_helper_rsm(cpu_env
);
8360 #endif /* CONFIG_USER_ONLY */
8363 case 0x1b8: /* SSE4.2 popcnt */
8364 if ((prefixes
& (PREFIX_REPZ
| PREFIX_LOCK
| PREFIX_REPNZ
)) !=
8367 if (!(s
->cpuid_ext_features
& CPUID_EXT_POPCNT
))
8370 modrm
= x86_ldub_code(env
, s
);
8371 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
8373 if (s
->prefix
& PREFIX_DATA
) {
8376 ot
= mo_64_32(dflag
);
8379 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
8380 gen_extu(ot
, s
->T0
);
8381 tcg_gen_mov_tl(cpu_cc_src
, s
->T0
);
8382 tcg_gen_ctpop_tl(s
->T0
, s
->T0
);
8383 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
8385 set_cc_op(s
, CC_OP_POPCNT
);
8387 case 0x10e ... 0x10f:
8388 /* 3DNow! instructions, ignore prefixes */
8389 s
->prefix
&= ~(PREFIX_REPZ
| PREFIX_REPNZ
| PREFIX_DATA
);
8391 case 0x110 ... 0x117:
8392 case 0x128 ... 0x12f:
8393 case 0x138 ... 0x13a:
8394 case 0x150 ... 0x179:
8395 case 0x17c ... 0x17f:
8397 case 0x1c4 ... 0x1c6:
8398 case 0x1d0 ... 0x1fe:
8399 gen_sse(env
, s
, b
, pc_start
);
8406 gen_illegal_opcode(s
);
8409 gen_unknown_opcode(env
, s
);
8413 void tcg_x86_init(void)
8415 static const char reg_names
[CPU_NB_REGS
][4] = {
8416 #ifdef TARGET_X86_64
8444 static const char seg_base_names
[6][8] = {
8452 static const char bnd_regl_names
[4][8] = {
8453 "bnd0_lb", "bnd1_lb", "bnd2_lb", "bnd3_lb"
8455 static const char bnd_regu_names
[4][8] = {
8456 "bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub"
8460 cpu_cc_op
= tcg_global_mem_new_i32(cpu_env
,
8461 offsetof(CPUX86State
, cc_op
), "cc_op");
8462 cpu_cc_dst
= tcg_global_mem_new(cpu_env
, offsetof(CPUX86State
, cc_dst
),
8464 cpu_cc_src
= tcg_global_mem_new(cpu_env
, offsetof(CPUX86State
, cc_src
),
8466 cpu_cc_src2
= tcg_global_mem_new(cpu_env
, offsetof(CPUX86State
, cc_src2
),
8469 for (i
= 0; i
< CPU_NB_REGS
; ++i
) {
8470 cpu_regs
[i
] = tcg_global_mem_new(cpu_env
,
8471 offsetof(CPUX86State
, regs
[i
]),
8475 for (i
= 0; i
< 6; ++i
) {
8477 = tcg_global_mem_new(cpu_env
,
8478 offsetof(CPUX86State
, segs
[i
].base
),
8482 for (i
= 0; i
< 4; ++i
) {
8484 = tcg_global_mem_new_i64(cpu_env
,
8485 offsetof(CPUX86State
, bnd_regs
[i
].lb
),
8488 = tcg_global_mem_new_i64(cpu_env
,
8489 offsetof(CPUX86State
, bnd_regs
[i
].ub
),
8494 static void i386_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cpu
)
8496 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
8497 CPUX86State
*env
= cpu
->env_ptr
;
8498 uint32_t flags
= dc
->base
.tb
->flags
;
8499 int cpl
= (flags
>> HF_CPL_SHIFT
) & 3;
8500 int iopl
= (flags
>> IOPL_SHIFT
) & 3;
8502 dc
->cs_base
= dc
->base
.tb
->cs_base
;
8504 #ifndef CONFIG_USER_ONLY
8509 /* We make some simplifying assumptions; validate they're correct. */
8510 g_assert(PE(dc
) == ((flags
& HF_PE_MASK
) != 0));
8511 g_assert(CPL(dc
) == cpl
);
8512 g_assert(IOPL(dc
) == iopl
);
8513 g_assert(VM86(dc
) == ((flags
& HF_VM_MASK
) != 0));
8514 g_assert(CODE32(dc
) == ((flags
& HF_CS32_MASK
) != 0));
8515 g_assert(CODE64(dc
) == ((flags
& HF_CS64_MASK
) != 0));
8516 g_assert(SS32(dc
) == ((flags
& HF_SS32_MASK
) != 0));
8517 g_assert(LMA(dc
) == ((flags
& HF_LMA_MASK
) != 0));
8518 g_assert(ADDSEG(dc
) == ((flags
& HF_ADDSEG_MASK
) != 0));
8520 dc
->cc_op
= CC_OP_DYNAMIC
;
8521 dc
->cc_op_dirty
= false;
8522 dc
->popl_esp_hack
= 0;
8523 /* select memory access functions */
8525 #ifdef CONFIG_SOFTMMU
8526 dc
->mem_index
= cpu_mmu_index(env
, false);
8528 dc
->cpuid_features
= env
->features
[FEAT_1_EDX
];
8529 dc
->cpuid_ext_features
= env
->features
[FEAT_1_ECX
];
8530 dc
->cpuid_ext2_features
= env
->features
[FEAT_8000_0001_EDX
];
8531 dc
->cpuid_ext3_features
= env
->features
[FEAT_8000_0001_ECX
];
8532 dc
->cpuid_7_0_ebx_features
= env
->features
[FEAT_7_0_EBX
];
8533 dc
->cpuid_xsave_features
= env
->features
[FEAT_XSAVE
];
8534 dc
->jmp_opt
= !(dc
->base
.singlestep_enabled
||
8535 (flags
& (HF_TF_MASK
| HF_INHIBIT_IRQ_MASK
)));
8537 * If jmp_opt, we want to handle each string instruction individually.
8538 * For icount also disable repz optimization so that each iteration
8539 * is accounted separately.
8541 dc
->repz_opt
= !dc
->jmp_opt
&& !(tb_cflags(dc
->base
.tb
) & CF_USE_ICOUNT
);
8543 dc
->T0
= tcg_temp_new();
8544 dc
->T1
= tcg_temp_new();
8545 dc
->A0
= tcg_temp_new();
8547 dc
->tmp0
= tcg_temp_new();
8548 dc
->tmp1_i64
= tcg_temp_new_i64();
8549 dc
->tmp2_i32
= tcg_temp_new_i32();
8550 dc
->tmp3_i32
= tcg_temp_new_i32();
8551 dc
->tmp4
= tcg_temp_new();
8552 dc
->ptr0
= tcg_temp_new_ptr();
8553 dc
->ptr1
= tcg_temp_new_ptr();
8554 dc
->cc_srcT
= tcg_temp_local_new();
8557 static void i386_tr_tb_start(DisasContextBase
*db
, CPUState
*cpu
)
8561 static void i386_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cpu
)
8563 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
8565 tcg_gen_insn_start(dc
->base
.pc_next
, dc
->cc_op
);
8568 static bool i386_tr_breakpoint_check(DisasContextBase
*dcbase
, CPUState
*cpu
,
8569 const CPUBreakpoint
*bp
)
8571 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
8572 /* If RF is set, suppress an internally generated breakpoint. */
8573 int flags
= dc
->base
.tb
->flags
& HF_RF_MASK
? BP_GDB
: BP_ANY
;
8574 if (bp
->flags
& flags
) {
8575 gen_debug(dc
, dc
->base
.pc_next
- dc
->cs_base
);
8576 dc
->base
.is_jmp
= DISAS_NORETURN
;
8577 /* The address covered by the breakpoint must be included in
8578 [tb->pc, tb->pc + tb->size) in order to for it to be
8579 properly cleared -- thus we increment the PC here so that
8580 the generic logic setting tb->size later does the right thing. */
8581 dc
->base
.pc_next
+= 1;
8588 static void i386_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cpu
)
8590 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
8591 target_ulong pc_next
;
8593 #ifdef TARGET_VSYSCALL_PAGE
8595 * Detect entry into the vsyscall page and invoke the syscall.
8597 if ((dc
->base
.pc_next
& TARGET_PAGE_MASK
) == TARGET_VSYSCALL_PAGE
) {
8598 gen_exception(dc
, EXCP_VSYSCALL
, dc
->base
.pc_next
);
8603 pc_next
= disas_insn(dc
, cpu
);
8605 if (dc
->flags
& (HF_TF_MASK
| HF_INHIBIT_IRQ_MASK
)) {
8606 /* if single step mode, we generate only one instruction and
8607 generate an exception */
8608 /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
8609 the flag and abort the translation to give the irqs a
8611 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
8612 } else if ((tb_cflags(dc
->base
.tb
) & CF_USE_ICOUNT
)
8613 && ((pc_next
& TARGET_PAGE_MASK
)
8614 != ((pc_next
+ TARGET_MAX_INSN_SIZE
- 1)
8616 || (pc_next
& ~TARGET_PAGE_MASK
) == 0)) {
8617 /* Do not cross the boundary of the pages in icount mode,
8618 it can cause an exception. Do it only when boundary is
8619 crossed by the first instruction in the block.
8620 If current instruction already crossed the bound - it's ok,
8621 because an exception hasn't stopped this code.
8623 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
8624 } else if ((pc_next
- dc
->base
.pc_first
) >= (TARGET_PAGE_SIZE
- 32)) {
8625 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
8628 dc
->base
.pc_next
= pc_next
;
8631 static void i386_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cpu
)
8633 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
8635 if (dc
->base
.is_jmp
== DISAS_TOO_MANY
) {
8636 gen_jmp_im(dc
, dc
->base
.pc_next
- dc
->cs_base
);
8641 static void i386_tr_disas_log(const DisasContextBase
*dcbase
,
8644 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
8646 qemu_log("IN: %s\n", lookup_symbol(dc
->base
.pc_first
));
8647 log_target_disas(cpu
, dc
->base
.pc_first
, dc
->base
.tb
->size
);
8650 static const TranslatorOps i386_tr_ops
= {
8651 .init_disas_context
= i386_tr_init_disas_context
,
8652 .tb_start
= i386_tr_tb_start
,
8653 .insn_start
= i386_tr_insn_start
,
8654 .breakpoint_check
= i386_tr_breakpoint_check
,
8655 .translate_insn
= i386_tr_translate_insn
,
8656 .tb_stop
= i386_tr_tb_stop
,
8657 .disas_log
= i386_tr_disas_log
,
8660 /* generate intermediate code for basic block 'tb'. */
8661 void gen_intermediate_code(CPUState
*cpu
, TranslationBlock
*tb
, int max_insns
)
8665 translator_loop(&i386_tr_ops
, &dc
.base
, cpu
, tb
, max_insns
);
8668 void restore_state_to_opc(CPUX86State
*env
, TranslationBlock
*tb
,
8671 int cc_op
= data
[1];
8672 env
->eip
= data
[0] - tb
->cs_base
;
8673 if (cc_op
!= CC_OP_DYNAMIC
) {