4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "qemu/host-utils.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "tcg/tcg-op-gvec.h"
27 #include "exec/cpu_ldst.h"
28 #include "exec/translator.h"
29 #include "fpu/softfloat.h"
31 #include "exec/helper-proto.h"
32 #include "exec/helper-gen.h"
33 #include "helper-tcg.h"
37 #define HELPER_H "helper.h"
38 #include "exec/helper-info.c.inc"
42 #define PREFIX_REPZ 0x01
43 #define PREFIX_REPNZ 0x02
44 #define PREFIX_LOCK 0x04
45 #define PREFIX_DATA 0x08
46 #define PREFIX_ADR 0x10
47 #define PREFIX_VEX 0x20
48 #define PREFIX_REX 0x40
58 /* For a switch indexed by MODRM, match all memory operands for a given OP. */
59 #define CASE_MODRM_MEM_OP(OP) \
60 case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
61 case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
62 case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7
64 #define CASE_MODRM_OP(OP) \
65 case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
66 case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
67 case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7: \
68 case (3 << 6) | (OP << 3) | 0 ... (3 << 6) | (OP << 3) | 7
70 //#define MACRO_TEST 1
72 /* global register indexes */
73 static TCGv cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
;
75 static TCGv_i32 cpu_cc_op
;
76 static TCGv cpu_regs
[CPU_NB_REGS
];
77 static TCGv cpu_seg_base
[6];
78 static TCGv_i64 cpu_bndl
[4];
79 static TCGv_i64 cpu_bndu
[4];
81 typedef struct DisasContext
{
82 DisasContextBase base
;
84 target_ulong pc
; /* pc = eip + cs_base */
85 target_ulong cs_base
; /* base of CS segment */
91 int8_t override
; /* -1 if no override, else R_CS, R_DS, etc */
97 #ifndef CONFIG_USER_ONLY
98 uint8_t cpl
; /* code priv level */
99 uint8_t iopl
; /* i/o priv level */
101 uint8_t vex_l
; /* vex vector length */
102 uint8_t vex_v
; /* vex vvvv register, without 1's complement. */
103 uint8_t popl_esp_hack
; /* for correct popl with esp base handling */
104 uint8_t rip_offset
; /* only used in x86_64, but left for simplicity */
111 bool vex_w
; /* used by AVX even on 32-bit processors */
112 bool jmp_opt
; /* use direct block chaining for direct jumps */
113 bool repz_opt
; /* optimize jumps within repz instructions */
116 CCOp cc_op
; /* current CC operation */
117 int mem_index
; /* select memory access functions */
118 uint32_t flags
; /* all execution flags */
120 int cpuid_ext_features
;
121 int cpuid_ext2_features
;
122 int cpuid_ext3_features
;
123 int cpuid_7_0_ebx_features
;
124 int cpuid_7_0_ecx_features
;
125 int cpuid_xsave_features
;
127 /* TCG local temps */
133 /* TCG local register indexes (only used inside old micro ops) */
141 TCGOp
*prev_insn_end
;
144 #define DISAS_EOB_ONLY DISAS_TARGET_0
145 #define DISAS_EOB_NEXT DISAS_TARGET_1
146 #define DISAS_EOB_INHIBIT_IRQ DISAS_TARGET_2
147 #define DISAS_JUMP DISAS_TARGET_3
149 /* The environment in which user-only runs is constrained. */
150 #ifdef CONFIG_USER_ONLY
154 #define SVME(S) false
155 #define GUEST(S) false
157 #define PE(S) (((S)->flags & HF_PE_MASK) != 0)
158 #define CPL(S) ((S)->cpl)
159 #define IOPL(S) ((S)->iopl)
160 #define SVME(S) (((S)->flags & HF_SVME_MASK) != 0)
161 #define GUEST(S) (((S)->flags & HF_GUEST_MASK) != 0)
163 #if defined(CONFIG_USER_ONLY) && defined(TARGET_X86_64)
164 #define VM86(S) false
165 #define CODE32(S) true
167 #define ADDSEG(S) false
169 #define VM86(S) (((S)->flags & HF_VM_MASK) != 0)
170 #define CODE32(S) (((S)->flags & HF_CS32_MASK) != 0)
171 #define SS32(S) (((S)->flags & HF_SS32_MASK) != 0)
172 #define ADDSEG(S) (((S)->flags & HF_ADDSEG_MASK) != 0)
174 #if !defined(TARGET_X86_64)
175 #define CODE64(S) false
176 #elif defined(CONFIG_USER_ONLY)
177 #define CODE64(S) true
179 #define CODE64(S) (((S)->flags & HF_CS64_MASK) != 0)
181 #if defined(CONFIG_SOFTMMU) && !defined(TARGET_X86_64)
184 #define LMA(S) (((S)->flags & HF_LMA_MASK) != 0)
188 #define REX_PREFIX(S) (((S)->prefix & PREFIX_REX) != 0)
189 #define REX_W(S) ((S)->vex_w)
190 #define REX_R(S) ((S)->rex_r + 0)
191 #define REX_X(S) ((S)->rex_x + 0)
192 #define REX_B(S) ((S)->rex_b + 0)
194 #define REX_PREFIX(S) false
195 #define REX_W(S) false
202 * Many sysemu-only helpers are not reachable for user-only.
203 * Define stub generators here, so that we need not either sprinkle
204 * ifdefs through the translator, nor provide the helper function.
206 #define STUB_HELPER(NAME, ...) \
207 static inline void gen_helper_##NAME(__VA_ARGS__) \
208 { qemu_build_not_reached(); }
210 #ifdef CONFIG_USER_ONLY
211 STUB_HELPER(clgi
, TCGv_env env
)
212 STUB_HELPER(flush_page
, TCGv_env env
, TCGv addr
)
213 STUB_HELPER(hlt
, TCGv_env env
, TCGv_i32 pc_ofs
)
214 STUB_HELPER(inb
, TCGv ret
, TCGv_env env
, TCGv_i32 port
)
215 STUB_HELPER(inw
, TCGv ret
, TCGv_env env
, TCGv_i32 port
)
216 STUB_HELPER(inl
, TCGv ret
, TCGv_env env
, TCGv_i32 port
)
217 STUB_HELPER(monitor
, TCGv_env env
, TCGv addr
)
218 STUB_HELPER(mwait
, TCGv_env env
, TCGv_i32 pc_ofs
)
219 STUB_HELPER(outb
, TCGv_env env
, TCGv_i32 port
, TCGv_i32 val
)
220 STUB_HELPER(outw
, TCGv_env env
, TCGv_i32 port
, TCGv_i32 val
)
221 STUB_HELPER(outl
, TCGv_env env
, TCGv_i32 port
, TCGv_i32 val
)
222 STUB_HELPER(rdmsr
, TCGv_env env
)
223 STUB_HELPER(read_crN
, TCGv ret
, TCGv_env env
, TCGv_i32 reg
)
224 STUB_HELPER(get_dr
, TCGv ret
, TCGv_env env
, TCGv_i32 reg
)
225 STUB_HELPER(set_dr
, TCGv_env env
, TCGv_i32 reg
, TCGv val
)
226 STUB_HELPER(stgi
, TCGv_env env
)
227 STUB_HELPER(svm_check_intercept
, TCGv_env env
, TCGv_i32 type
)
228 STUB_HELPER(vmload
, TCGv_env env
, TCGv_i32 aflag
)
229 STUB_HELPER(vmmcall
, TCGv_env env
)
230 STUB_HELPER(vmrun
, TCGv_env env
, TCGv_i32 aflag
, TCGv_i32 pc_ofs
)
231 STUB_HELPER(vmsave
, TCGv_env env
, TCGv_i32 aflag
)
232 STUB_HELPER(write_crN
, TCGv_env env
, TCGv_i32 reg
, TCGv val
)
233 STUB_HELPER(wrmsr
, TCGv_env env
)
236 static void gen_eob(DisasContext
*s
);
237 static void gen_jr(DisasContext
*s
);
238 static void gen_jmp_rel(DisasContext
*s
, MemOp ot
, int diff
, int tb_num
);
239 static void gen_jmp_rel_csize(DisasContext
*s
, int diff
, int tb_num
);
240 static void gen_op(DisasContext
*s1
, int op
, MemOp ot
, int d
);
241 static void gen_exception_gpf(DisasContext
*s
);
243 /* i386 arith/logic operations */
263 OP_SHL1
, /* undocumented */
279 /* I386 int registers */
280 OR_EAX
, /* MUST be even numbered */
289 OR_TMP0
= 16, /* temporary operand register */
291 OR_A0
, /* temporary register used when doing address evaluation */
301 /* Bit set if the global variable is live after setting CC_OP to X. */
302 static const uint8_t cc_op_live
[CC_OP_NB
] = {
303 [CC_OP_DYNAMIC
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
304 [CC_OP_EFLAGS
] = USES_CC_SRC
,
305 [CC_OP_MULB
... CC_OP_MULQ
] = USES_CC_DST
| USES_CC_SRC
,
306 [CC_OP_ADDB
... CC_OP_ADDQ
] = USES_CC_DST
| USES_CC_SRC
,
307 [CC_OP_ADCB
... CC_OP_ADCQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
308 [CC_OP_SUBB
... CC_OP_SUBQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRCT
,
309 [CC_OP_SBBB
... CC_OP_SBBQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
310 [CC_OP_LOGICB
... CC_OP_LOGICQ
] = USES_CC_DST
,
311 [CC_OP_INCB
... CC_OP_INCQ
] = USES_CC_DST
| USES_CC_SRC
,
312 [CC_OP_DECB
... CC_OP_DECQ
] = USES_CC_DST
| USES_CC_SRC
,
313 [CC_OP_SHLB
... CC_OP_SHLQ
] = USES_CC_DST
| USES_CC_SRC
,
314 [CC_OP_SARB
... CC_OP_SARQ
] = USES_CC_DST
| USES_CC_SRC
,
315 [CC_OP_BMILGB
... CC_OP_BMILGQ
] = USES_CC_DST
| USES_CC_SRC
,
316 [CC_OP_ADCX
] = USES_CC_DST
| USES_CC_SRC
,
317 [CC_OP_ADOX
] = USES_CC_SRC
| USES_CC_SRC2
,
318 [CC_OP_ADCOX
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
320 [CC_OP_POPCNT
] = USES_CC_SRC
,
323 static void set_cc_op(DisasContext
*s
, CCOp op
)
327 if (s
->cc_op
== op
) {
331 /* Discard CC computation that will no longer be used. */
332 dead
= cc_op_live
[s
->cc_op
] & ~cc_op_live
[op
];
333 if (dead
& USES_CC_DST
) {
334 tcg_gen_discard_tl(cpu_cc_dst
);
336 if (dead
& USES_CC_SRC
) {
337 tcg_gen_discard_tl(cpu_cc_src
);
339 if (dead
& USES_CC_SRC2
) {
340 tcg_gen_discard_tl(cpu_cc_src2
);
342 if (dead
& USES_CC_SRCT
) {
343 tcg_gen_discard_tl(s
->cc_srcT
);
346 if (op
== CC_OP_DYNAMIC
) {
347 /* The DYNAMIC setting is translator only, and should never be
348 stored. Thus we always consider it clean. */
349 s
->cc_op_dirty
= false;
351 /* Discard any computed CC_OP value (see shifts). */
352 if (s
->cc_op
== CC_OP_DYNAMIC
) {
353 tcg_gen_discard_i32(cpu_cc_op
);
355 s
->cc_op_dirty
= true;
360 static void gen_update_cc_op(DisasContext
*s
)
362 if (s
->cc_op_dirty
) {
363 tcg_gen_movi_i32(cpu_cc_op
, s
->cc_op
);
364 s
->cc_op_dirty
= false;
370 #define NB_OP_SIZES 4
372 #else /* !TARGET_X86_64 */
374 #define NB_OP_SIZES 3
376 #endif /* !TARGET_X86_64 */
379 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
380 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
381 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
382 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
383 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
385 #define REG_B_OFFSET 0
386 #define REG_H_OFFSET 1
387 #define REG_W_OFFSET 0
388 #define REG_L_OFFSET 0
389 #define REG_LH_OFFSET 4
392 /* In instruction encodings for byte register accesses the
393 * register number usually indicates "low 8 bits of register N";
394 * however there are some special cases where N 4..7 indicates
395 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
396 * true for this special case, false otherwise.
398 static inline bool byte_reg_is_xH(DisasContext
*s
, int reg
)
400 /* Any time the REX prefix is present, byte registers are uniform */
401 if (reg
< 4 || REX_PREFIX(s
)) {
407 /* Select the size of a push/pop operation. */
408 static inline MemOp
mo_pushpop(DisasContext
*s
, MemOp ot
)
411 return ot
== MO_16
? MO_16
: MO_64
;
417 /* Select the size of the stack pointer. */
418 static inline MemOp
mo_stacksize(DisasContext
*s
)
420 return CODE64(s
) ? MO_64
: SS32(s
) ? MO_32
: MO_16
;
423 /* Select only size 64 else 32. Used for SSE operand sizes. */
424 static inline MemOp
mo_64_32(MemOp ot
)
427 return ot
== MO_64
? MO_64
: MO_32
;
433 /* Select size 8 if lsb of B is clear, else OT. Used for decoding
434 byte vs word opcodes. */
435 static inline MemOp
mo_b_d(int b
, MemOp ot
)
437 return b
& 1 ? ot
: MO_8
;
440 /* Select size 8 if lsb of B is clear, else OT capped at 32.
441 Used for decoding operand size of port opcodes. */
442 static inline MemOp
mo_b_d32(int b
, MemOp ot
)
444 return b
& 1 ? (ot
== MO_16
? MO_16
: MO_32
) : MO_8
;
447 /* Compute the result of writing t0 to the OT-sized register REG.
449 * If DEST is NULL, store the result into the register and return the
452 * If DEST is not NULL, store the result into DEST and return the
455 static TCGv
gen_op_deposit_reg_v(DisasContext
*s
, MemOp ot
, int reg
, TCGv dest
, TCGv t0
)
459 if (byte_reg_is_xH(s
, reg
)) {
460 dest
= dest
? dest
: cpu_regs
[reg
- 4];
461 tcg_gen_deposit_tl(dest
, cpu_regs
[reg
- 4], t0
, 8, 8);
462 return cpu_regs
[reg
- 4];
464 dest
= dest
? dest
: cpu_regs
[reg
];
465 tcg_gen_deposit_tl(dest
, cpu_regs
[reg
], t0
, 0, 8);
468 dest
= dest
? dest
: cpu_regs
[reg
];
469 tcg_gen_deposit_tl(dest
, cpu_regs
[reg
], t0
, 0, 16);
472 /* For x86_64, this sets the higher half of register to zero.
473 For i386, this is equivalent to a mov. */
474 dest
= dest
? dest
: cpu_regs
[reg
];
475 tcg_gen_ext32u_tl(dest
, t0
);
479 dest
= dest
? dest
: cpu_regs
[reg
];
480 tcg_gen_mov_tl(dest
, t0
);
484 g_assert_not_reached();
486 return cpu_regs
[reg
];
489 static void gen_op_mov_reg_v(DisasContext
*s
, MemOp ot
, int reg
, TCGv t0
)
491 gen_op_deposit_reg_v(s
, ot
, reg
, NULL
, t0
);
495 void gen_op_mov_v_reg(DisasContext
*s
, MemOp ot
, TCGv t0
, int reg
)
497 if (ot
== MO_8
&& byte_reg_is_xH(s
, reg
)) {
498 tcg_gen_extract_tl(t0
, cpu_regs
[reg
- 4], 8, 8);
500 tcg_gen_mov_tl(t0
, cpu_regs
[reg
]);
504 static void gen_add_A0_im(DisasContext
*s
, int val
)
506 tcg_gen_addi_tl(s
->A0
, s
->A0
, val
);
508 tcg_gen_ext32u_tl(s
->A0
, s
->A0
);
512 static inline void gen_op_jmp_v(DisasContext
*s
, TCGv dest
)
514 tcg_gen_mov_tl(cpu_eip
, dest
);
519 void gen_op_add_reg_im(DisasContext
*s
, MemOp size
, int reg
, int32_t val
)
521 tcg_gen_addi_tl(s
->tmp0
, cpu_regs
[reg
], val
);
522 gen_op_mov_reg_v(s
, size
, reg
, s
->tmp0
);
525 static inline void gen_op_add_reg_T0(DisasContext
*s
, MemOp size
, int reg
)
527 tcg_gen_add_tl(s
->tmp0
, cpu_regs
[reg
], s
->T0
);
528 gen_op_mov_reg_v(s
, size
, reg
, s
->tmp0
);
531 static inline void gen_op_ld_v(DisasContext
*s
, int idx
, TCGv t0
, TCGv a0
)
533 tcg_gen_qemu_ld_tl(t0
, a0
, s
->mem_index
, idx
| MO_LE
);
536 static inline void gen_op_st_v(DisasContext
*s
, int idx
, TCGv t0
, TCGv a0
)
538 tcg_gen_qemu_st_tl(t0
, a0
, s
->mem_index
, idx
| MO_LE
);
541 static inline void gen_op_st_rm_T0_A0(DisasContext
*s
, int idx
, int d
)
544 gen_op_st_v(s
, idx
, s
->T0
, s
->A0
);
546 gen_op_mov_reg_v(s
, idx
, d
, s
->T0
);
550 static void gen_update_eip_cur(DisasContext
*s
)
552 assert(s
->pc_save
!= -1);
553 if (tb_cflags(s
->base
.tb
) & CF_PCREL
) {
554 tcg_gen_addi_tl(cpu_eip
, cpu_eip
, s
->base
.pc_next
- s
->pc_save
);
556 tcg_gen_movi_tl(cpu_eip
, s
->base
.pc_next
- s
->cs_base
);
558 s
->pc_save
= s
->base
.pc_next
;
561 static void gen_update_eip_next(DisasContext
*s
)
563 assert(s
->pc_save
!= -1);
564 if (tb_cflags(s
->base
.tb
) & CF_PCREL
) {
565 tcg_gen_addi_tl(cpu_eip
, cpu_eip
, s
->pc
- s
->pc_save
);
567 tcg_gen_movi_tl(cpu_eip
, s
->pc
- s
->cs_base
);
572 static int cur_insn_len(DisasContext
*s
)
574 return s
->pc
- s
->base
.pc_next
;
577 static TCGv_i32
cur_insn_len_i32(DisasContext
*s
)
579 return tcg_constant_i32(cur_insn_len(s
));
582 static TCGv_i32
eip_next_i32(DisasContext
*s
)
584 assert(s
->pc_save
!= -1);
586 * This function has two users: lcall_real (always 16-bit mode), and
587 * iret_protected (16, 32, or 64-bit mode). IRET only uses the value
588 * when EFLAGS.NT is set, which is illegal in 64-bit mode, which is
589 * why passing a 32-bit value isn't broken. To avoid using this where
590 * we shouldn't, return -1 in 64-bit mode so that execution goes into
594 return tcg_constant_i32(-1);
596 if (tb_cflags(s
->base
.tb
) & CF_PCREL
) {
597 TCGv_i32 ret
= tcg_temp_new_i32();
598 tcg_gen_trunc_tl_i32(ret
, cpu_eip
);
599 tcg_gen_addi_i32(ret
, ret
, s
->pc
- s
->pc_save
);
602 return tcg_constant_i32(s
->pc
- s
->cs_base
);
606 static TCGv
eip_next_tl(DisasContext
*s
)
608 assert(s
->pc_save
!= -1);
609 if (tb_cflags(s
->base
.tb
) & CF_PCREL
) {
610 TCGv ret
= tcg_temp_new();
611 tcg_gen_addi_tl(ret
, cpu_eip
, s
->pc
- s
->pc_save
);
614 return tcg_constant_tl(s
->pc
- s
->cs_base
);
618 static TCGv
eip_cur_tl(DisasContext
*s
)
620 assert(s
->pc_save
!= -1);
621 if (tb_cflags(s
->base
.tb
) & CF_PCREL
) {
622 TCGv ret
= tcg_temp_new();
623 tcg_gen_addi_tl(ret
, cpu_eip
, s
->base
.pc_next
- s
->pc_save
);
626 return tcg_constant_tl(s
->base
.pc_next
- s
->cs_base
);
630 /* Compute SEG:REG into A0. SEG is selected from the override segment
631 (OVR_SEG) and the default segment (DEF_SEG). OVR_SEG may be -1 to
632 indicate no override. */
633 static void gen_lea_v_seg(DisasContext
*s
, MemOp aflag
, TCGv a0
,
634 int def_seg
, int ovr_seg
)
640 tcg_gen_mov_tl(s
->A0
, a0
);
647 if (ovr_seg
< 0 && ADDSEG(s
)) {
651 tcg_gen_ext32u_tl(s
->A0
, a0
);
657 tcg_gen_ext16u_tl(s
->A0
, a0
);
668 g_assert_not_reached();
672 TCGv seg
= cpu_seg_base
[ovr_seg
];
674 if (aflag
== MO_64
) {
675 tcg_gen_add_tl(s
->A0
, a0
, seg
);
676 } else if (CODE64(s
)) {
677 tcg_gen_ext32u_tl(s
->A0
, a0
);
678 tcg_gen_add_tl(s
->A0
, s
->A0
, seg
);
680 tcg_gen_add_tl(s
->A0
, a0
, seg
);
681 tcg_gen_ext32u_tl(s
->A0
, s
->A0
);
686 static inline void gen_string_movl_A0_ESI(DisasContext
*s
)
688 gen_lea_v_seg(s
, s
->aflag
, cpu_regs
[R_ESI
], R_DS
, s
->override
);
691 static inline void gen_string_movl_A0_EDI(DisasContext
*s
)
693 gen_lea_v_seg(s
, s
->aflag
, cpu_regs
[R_EDI
], R_ES
, -1);
696 static inline void gen_op_movl_T0_Dshift(DisasContext
*s
, MemOp ot
)
698 tcg_gen_ld32s_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
, df
));
699 tcg_gen_shli_tl(s
->T0
, s
->T0
, ot
);
702 static TCGv
gen_ext_tl(TCGv dst
, TCGv src
, MemOp size
, bool sign
)
707 tcg_gen_ext8s_tl(dst
, src
);
709 tcg_gen_ext8u_tl(dst
, src
);
714 tcg_gen_ext16s_tl(dst
, src
);
716 tcg_gen_ext16u_tl(dst
, src
);
722 tcg_gen_ext32s_tl(dst
, src
);
724 tcg_gen_ext32u_tl(dst
, src
);
733 static void gen_extu(MemOp ot
, TCGv reg
)
735 gen_ext_tl(reg
, reg
, ot
, false);
738 static void gen_exts(MemOp ot
, TCGv reg
)
740 gen_ext_tl(reg
, reg
, ot
, true);
743 static void gen_op_j_ecx(DisasContext
*s
, TCGCond cond
, TCGLabel
*label1
)
745 tcg_gen_mov_tl(s
->tmp0
, cpu_regs
[R_ECX
]);
746 gen_extu(s
->aflag
, s
->tmp0
);
747 tcg_gen_brcondi_tl(cond
, s
->tmp0
, 0, label1
);
750 static inline void gen_op_jz_ecx(DisasContext
*s
, TCGLabel
*label1
)
752 gen_op_j_ecx(s
, TCG_COND_EQ
, label1
);
755 static inline void gen_op_jnz_ecx(DisasContext
*s
, TCGLabel
*label1
)
757 gen_op_j_ecx(s
, TCG_COND_NE
, label1
);
760 static void gen_helper_in_func(MemOp ot
, TCGv v
, TCGv_i32 n
)
764 gen_helper_inb(v
, cpu_env
, n
);
767 gen_helper_inw(v
, cpu_env
, n
);
770 gen_helper_inl(v
, cpu_env
, n
);
773 g_assert_not_reached();
777 static void gen_helper_out_func(MemOp ot
, TCGv_i32 v
, TCGv_i32 n
)
781 gen_helper_outb(cpu_env
, v
, n
);
784 gen_helper_outw(cpu_env
, v
, n
);
787 gen_helper_outl(cpu_env
, v
, n
);
790 g_assert_not_reached();
795 * Validate that access to [port, port + 1<<ot) is allowed.
796 * Raise #GP, or VMM exit if not.
798 static bool gen_check_io(DisasContext
*s
, MemOp ot
, TCGv_i32 port
,
801 #ifdef CONFIG_USER_ONLY
803 * We do not implement the ioperm(2) syscall, so the TSS check
806 gen_exception_gpf(s
);
809 if (PE(s
) && (CPL(s
) > IOPL(s
) || VM86(s
))) {
810 gen_helper_check_io(cpu_env
, port
, tcg_constant_i32(1 << ot
));
814 gen_update_eip_cur(s
);
815 if (s
->prefix
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
816 svm_flags
|= SVM_IOIO_REP_MASK
;
818 svm_flags
|= 1 << (SVM_IOIO_SIZE_SHIFT
+ ot
);
819 gen_helper_svm_check_io(cpu_env
, port
,
820 tcg_constant_i32(svm_flags
),
821 cur_insn_len_i32(s
));
827 static void gen_movs(DisasContext
*s
, MemOp ot
)
829 gen_string_movl_A0_ESI(s
);
830 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
831 gen_string_movl_A0_EDI(s
);
832 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
833 gen_op_movl_T0_Dshift(s
, ot
);
834 gen_op_add_reg_T0(s
, s
->aflag
, R_ESI
);
835 gen_op_add_reg_T0(s
, s
->aflag
, R_EDI
);
838 static void gen_op_update1_cc(DisasContext
*s
)
840 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
843 static void gen_op_update2_cc(DisasContext
*s
)
845 tcg_gen_mov_tl(cpu_cc_src
, s
->T1
);
846 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
849 static void gen_op_update3_cc(DisasContext
*s
, TCGv reg
)
851 tcg_gen_mov_tl(cpu_cc_src2
, reg
);
852 tcg_gen_mov_tl(cpu_cc_src
, s
->T1
);
853 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
856 static inline void gen_op_testl_T0_T1_cc(DisasContext
*s
)
858 tcg_gen_and_tl(cpu_cc_dst
, s
->T0
, s
->T1
);
861 static void gen_op_update_neg_cc(DisasContext
*s
)
863 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
864 tcg_gen_neg_tl(cpu_cc_src
, s
->T0
);
865 tcg_gen_movi_tl(s
->cc_srcT
, 0);
868 /* compute all eflags to cc_src */
869 static void gen_compute_eflags(DisasContext
*s
)
871 TCGv zero
, dst
, src1
, src2
;
874 if (s
->cc_op
== CC_OP_EFLAGS
) {
877 if (s
->cc_op
== CC_OP_CLR
) {
878 tcg_gen_movi_tl(cpu_cc_src
, CC_Z
| CC_P
);
879 set_cc_op(s
, CC_OP_EFLAGS
);
888 /* Take care to not read values that are not live. */
889 live
= cc_op_live
[s
->cc_op
] & ~USES_CC_SRCT
;
890 dead
= live
^ (USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
);
892 zero
= tcg_constant_tl(0);
893 if (dead
& USES_CC_DST
) {
896 if (dead
& USES_CC_SRC
) {
899 if (dead
& USES_CC_SRC2
) {
905 gen_helper_cc_compute_all(cpu_cc_src
, dst
, src1
, src2
, cpu_cc_op
);
906 set_cc_op(s
, CC_OP_EFLAGS
);
909 typedef struct CCPrepare
{
919 /* compute eflags.C to reg */
920 static CCPrepare
gen_prepare_eflags_c(DisasContext
*s
, TCGv reg
)
926 case CC_OP_SUBB
... CC_OP_SUBQ
:
927 /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
928 size
= s
->cc_op
- CC_OP_SUBB
;
929 t1
= gen_ext_tl(s
->tmp0
, cpu_cc_src
, size
, false);
930 /* If no temporary was used, be careful not to alias t1 and t0. */
931 t0
= t1
== cpu_cc_src
? s
->tmp0
: reg
;
932 tcg_gen_mov_tl(t0
, s
->cc_srcT
);
936 case CC_OP_ADDB
... CC_OP_ADDQ
:
937 /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
938 size
= s
->cc_op
- CC_OP_ADDB
;
939 t1
= gen_ext_tl(s
->tmp0
, cpu_cc_src
, size
, false);
940 t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, false);
942 return (CCPrepare
) { .cond
= TCG_COND_LTU
, .reg
= t0
,
943 .reg2
= t1
, .mask
= -1, .use_reg2
= true };
945 case CC_OP_LOGICB
... CC_OP_LOGICQ
:
948 return (CCPrepare
) { .cond
= TCG_COND_NEVER
, .mask
= -1 };
950 case CC_OP_INCB
... CC_OP_INCQ
:
951 case CC_OP_DECB
... CC_OP_DECQ
:
952 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
953 .mask
= -1, .no_setcond
= true };
955 case CC_OP_SHLB
... CC_OP_SHLQ
:
956 /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
957 size
= s
->cc_op
- CC_OP_SHLB
;
958 shift
= (8 << size
) - 1;
959 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
960 .mask
= (target_ulong
)1 << shift
};
962 case CC_OP_MULB
... CC_OP_MULQ
:
963 return (CCPrepare
) { .cond
= TCG_COND_NE
,
964 .reg
= cpu_cc_src
, .mask
= -1 };
966 case CC_OP_BMILGB
... CC_OP_BMILGQ
:
967 size
= s
->cc_op
- CC_OP_BMILGB
;
968 t0
= gen_ext_tl(reg
, cpu_cc_src
, size
, false);
969 return (CCPrepare
) { .cond
= TCG_COND_EQ
, .reg
= t0
, .mask
= -1 };
973 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_dst
,
974 .mask
= -1, .no_setcond
= true };
977 case CC_OP_SARB
... CC_OP_SARQ
:
979 return (CCPrepare
) { .cond
= TCG_COND_NE
,
980 .reg
= cpu_cc_src
, .mask
= CC_C
};
983 /* The need to compute only C from CC_OP_DYNAMIC is important
984 in efficiently implementing e.g. INC at the start of a TB. */
986 gen_helper_cc_compute_c(reg
, cpu_cc_dst
, cpu_cc_src
,
987 cpu_cc_src2
, cpu_cc_op
);
988 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
989 .mask
= -1, .no_setcond
= true };
993 /* compute eflags.P to reg */
994 static CCPrepare
gen_prepare_eflags_p(DisasContext
*s
, TCGv reg
)
996 gen_compute_eflags(s
);
997 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1001 /* compute eflags.S to reg */
1002 static CCPrepare
gen_prepare_eflags_s(DisasContext
*s
, TCGv reg
)
1006 gen_compute_eflags(s
);
1012 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1016 return (CCPrepare
) { .cond
= TCG_COND_NEVER
, .mask
= -1 };
1019 MemOp size
= (s
->cc_op
- CC_OP_ADDB
) & 3;
1020 TCGv t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, true);
1021 return (CCPrepare
) { .cond
= TCG_COND_LT
, .reg
= t0
, .mask
= -1 };
1026 /* compute eflags.O to reg */
1027 static CCPrepare
gen_prepare_eflags_o(DisasContext
*s
, TCGv reg
)
1032 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src2
,
1033 .mask
= -1, .no_setcond
= true };
1036 return (CCPrepare
) { .cond
= TCG_COND_NEVER
, .mask
= -1 };
1038 gen_compute_eflags(s
);
1039 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1044 /* compute eflags.Z to reg */
1045 static CCPrepare
gen_prepare_eflags_z(DisasContext
*s
, TCGv reg
)
1049 gen_compute_eflags(s
);
1055 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1058 return (CCPrepare
) { .cond
= TCG_COND_ALWAYS
, .mask
= -1 };
1060 return (CCPrepare
) { .cond
= TCG_COND_EQ
, .reg
= cpu_cc_src
,
1064 MemOp size
= (s
->cc_op
- CC_OP_ADDB
) & 3;
1065 TCGv t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, false);
1066 return (CCPrepare
) { .cond
= TCG_COND_EQ
, .reg
= t0
, .mask
= -1 };
1071 /* perform a conditional store into register 'reg' according to jump opcode
1072 value 'b'. In the fast case, T0 is guaranted not to be used. */
1073 static CCPrepare
gen_prepare_cc(DisasContext
*s
, int b
, TCGv reg
)
1075 int inv
, jcc_op
, cond
;
1081 jcc_op
= (b
>> 1) & 7;
1084 case CC_OP_SUBB
... CC_OP_SUBQ
:
1085 /* We optimize relational operators for the cmp/jcc case. */
1086 size
= s
->cc_op
- CC_OP_SUBB
;
1089 tcg_gen_mov_tl(s
->tmp4
, s
->cc_srcT
);
1090 gen_extu(size
, s
->tmp4
);
1091 t0
= gen_ext_tl(s
->tmp0
, cpu_cc_src
, size
, false);
1092 cc
= (CCPrepare
) { .cond
= TCG_COND_LEU
, .reg
= s
->tmp4
,
1093 .reg2
= t0
, .mask
= -1, .use_reg2
= true };
1102 tcg_gen_mov_tl(s
->tmp4
, s
->cc_srcT
);
1103 gen_exts(size
, s
->tmp4
);
1104 t0
= gen_ext_tl(s
->tmp0
, cpu_cc_src
, size
, true);
1105 cc
= (CCPrepare
) { .cond
= cond
, .reg
= s
->tmp4
,
1106 .reg2
= t0
, .mask
= -1, .use_reg2
= true };
1116 /* This actually generates good code for JC, JZ and JS. */
1119 cc
= gen_prepare_eflags_o(s
, reg
);
1122 cc
= gen_prepare_eflags_c(s
, reg
);
1125 cc
= gen_prepare_eflags_z(s
, reg
);
1128 gen_compute_eflags(s
);
1129 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1130 .mask
= CC_Z
| CC_C
};
1133 cc
= gen_prepare_eflags_s(s
, reg
);
1136 cc
= gen_prepare_eflags_p(s
, reg
);
1139 gen_compute_eflags(s
);
1140 if (reg
== cpu_cc_src
) {
1143 tcg_gen_shri_tl(reg
, cpu_cc_src
, 4); /* CC_O -> CC_S */
1144 tcg_gen_xor_tl(reg
, reg
, cpu_cc_src
);
1145 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
1150 gen_compute_eflags(s
);
1151 if (reg
== cpu_cc_src
) {
1154 tcg_gen_shri_tl(reg
, cpu_cc_src
, 4); /* CC_O -> CC_S */
1155 tcg_gen_xor_tl(reg
, reg
, cpu_cc_src
);
1156 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
1157 .mask
= CC_S
| CC_Z
};
1164 cc
.cond
= tcg_invert_cond(cc
.cond
);
1169 static void gen_setcc1(DisasContext
*s
, int b
, TCGv reg
)
1171 CCPrepare cc
= gen_prepare_cc(s
, b
, reg
);
1173 if (cc
.no_setcond
) {
1174 if (cc
.cond
== TCG_COND_EQ
) {
1175 tcg_gen_xori_tl(reg
, cc
.reg
, 1);
1177 tcg_gen_mov_tl(reg
, cc
.reg
);
1182 if (cc
.cond
== TCG_COND_NE
&& !cc
.use_reg2
&& cc
.imm
== 0 &&
1183 cc
.mask
!= 0 && (cc
.mask
& (cc
.mask
- 1)) == 0) {
1184 tcg_gen_shri_tl(reg
, cc
.reg
, ctztl(cc
.mask
));
1185 tcg_gen_andi_tl(reg
, reg
, 1);
1188 if (cc
.mask
!= -1) {
1189 tcg_gen_andi_tl(reg
, cc
.reg
, cc
.mask
);
1193 tcg_gen_setcond_tl(cc
.cond
, reg
, cc
.reg
, cc
.reg2
);
1195 tcg_gen_setcondi_tl(cc
.cond
, reg
, cc
.reg
, cc
.imm
);
1199 static inline void gen_compute_eflags_c(DisasContext
*s
, TCGv reg
)
1201 gen_setcc1(s
, JCC_B
<< 1, reg
);
1204 /* generate a conditional jump to label 'l1' according to jump opcode
1205 value 'b'. In the fast case, T0 is guaranted not to be used. */
1206 static inline void gen_jcc1_noeob(DisasContext
*s
, int b
, TCGLabel
*l1
)
1208 CCPrepare cc
= gen_prepare_cc(s
, b
, s
->T0
);
1210 if (cc
.mask
!= -1) {
1211 tcg_gen_andi_tl(s
->T0
, cc
.reg
, cc
.mask
);
1215 tcg_gen_brcond_tl(cc
.cond
, cc
.reg
, cc
.reg2
, l1
);
1217 tcg_gen_brcondi_tl(cc
.cond
, cc
.reg
, cc
.imm
, l1
);
1221 /* Generate a conditional jump to label 'l1' according to jump opcode
1222 value 'b'. In the fast case, T0 is guaranted not to be used.
1223 A translation block must end soon. */
1224 static inline void gen_jcc1(DisasContext
*s
, int b
, TCGLabel
*l1
)
1226 CCPrepare cc
= gen_prepare_cc(s
, b
, s
->T0
);
1228 gen_update_cc_op(s
);
1229 if (cc
.mask
!= -1) {
1230 tcg_gen_andi_tl(s
->T0
, cc
.reg
, cc
.mask
);
1233 set_cc_op(s
, CC_OP_DYNAMIC
);
1235 tcg_gen_brcond_tl(cc
.cond
, cc
.reg
, cc
.reg2
, l1
);
1237 tcg_gen_brcondi_tl(cc
.cond
, cc
.reg
, cc
.imm
, l1
);
1241 /* XXX: does not work with gdbstub "ice" single step - not a
1243 static TCGLabel
*gen_jz_ecx_string(DisasContext
*s
)
1245 TCGLabel
*l1
= gen_new_label();
1246 TCGLabel
*l2
= gen_new_label();
1247 gen_op_jnz_ecx(s
, l1
);
1249 gen_jmp_rel_csize(s
, 0, 1);
1254 static void gen_stos(DisasContext
*s
, MemOp ot
)
1256 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, R_EAX
);
1257 gen_string_movl_A0_EDI(s
);
1258 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
1259 gen_op_movl_T0_Dshift(s
, ot
);
1260 gen_op_add_reg_T0(s
, s
->aflag
, R_EDI
);
1263 static void gen_lods(DisasContext
*s
, MemOp ot
)
1265 gen_string_movl_A0_ESI(s
);
1266 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1267 gen_op_mov_reg_v(s
, ot
, R_EAX
, s
->T0
);
1268 gen_op_movl_T0_Dshift(s
, ot
);
1269 gen_op_add_reg_T0(s
, s
->aflag
, R_ESI
);
1272 static void gen_scas(DisasContext
*s
, MemOp ot
)
1274 gen_string_movl_A0_EDI(s
);
1275 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
1276 gen_op(s
, OP_CMPL
, ot
, R_EAX
);
1277 gen_op_movl_T0_Dshift(s
, ot
);
1278 gen_op_add_reg_T0(s
, s
->aflag
, R_EDI
);
1281 static void gen_cmps(DisasContext
*s
, MemOp ot
)
1283 gen_string_movl_A0_EDI(s
);
1284 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
1285 gen_string_movl_A0_ESI(s
);
1286 gen_op(s
, OP_CMPL
, ot
, OR_TMP0
);
1287 gen_op_movl_T0_Dshift(s
, ot
);
1288 gen_op_add_reg_T0(s
, s
->aflag
, R_ESI
);
1289 gen_op_add_reg_T0(s
, s
->aflag
, R_EDI
);
1292 static void gen_bpt_io(DisasContext
*s
, TCGv_i32 t_port
, int ot
)
1294 if (s
->flags
& HF_IOBPT_MASK
) {
1295 #ifdef CONFIG_USER_ONLY
1296 /* user-mode cpu should not be in IOBPT mode */
1297 g_assert_not_reached();
1299 TCGv_i32 t_size
= tcg_constant_i32(1 << ot
);
1300 TCGv t_next
= eip_next_tl(s
);
1301 gen_helper_bpt_io(cpu_env
, t_port
, t_size
, t_next
);
1302 #endif /* CONFIG_USER_ONLY */
1306 static void gen_ins(DisasContext
*s
, MemOp ot
)
1308 gen_string_movl_A0_EDI(s
);
1309 /* Note: we must do this dummy write first to be restartable in
1310 case of page fault. */
1311 tcg_gen_movi_tl(s
->T0
, 0);
1312 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
1313 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_EDX
]);
1314 tcg_gen_andi_i32(s
->tmp2_i32
, s
->tmp2_i32
, 0xffff);
1315 gen_helper_in_func(ot
, s
->T0
, s
->tmp2_i32
);
1316 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
1317 gen_op_movl_T0_Dshift(s
, ot
);
1318 gen_op_add_reg_T0(s
, s
->aflag
, R_EDI
);
1319 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
1322 static void gen_outs(DisasContext
*s
, MemOp ot
)
1324 gen_string_movl_A0_ESI(s
);
1325 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1327 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_EDX
]);
1328 tcg_gen_andi_i32(s
->tmp2_i32
, s
->tmp2_i32
, 0xffff);
1329 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T0
);
1330 gen_helper_out_func(ot
, s
->tmp2_i32
, s
->tmp3_i32
);
1331 gen_op_movl_T0_Dshift(s
, ot
);
1332 gen_op_add_reg_T0(s
, s
->aflag
, R_ESI
);
1333 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
1336 /* Generate jumps to current or next instruction */
1337 static void gen_repz(DisasContext
*s
, MemOp ot
,
1338 void (*fn
)(DisasContext
*s
, MemOp ot
))
1341 gen_update_cc_op(s
);
1342 l2
= gen_jz_ecx_string(s
);
1344 gen_op_add_reg_im(s
, s
->aflag
, R_ECX
, -1);
1346 * A loop would cause two single step exceptions if ECX = 1
1347 * before rep string_insn
1350 gen_op_jz_ecx(s
, l2
);
1352 gen_jmp_rel_csize(s
, -cur_insn_len(s
), 0);
1355 #define GEN_REPZ(op) \
1356 static inline void gen_repz_ ## op(DisasContext *s, MemOp ot) \
1357 { gen_repz(s, ot, gen_##op); }
1359 static void gen_repz2(DisasContext
*s
, MemOp ot
, int nz
,
1360 void (*fn
)(DisasContext
*s
, MemOp ot
))
1363 gen_update_cc_op(s
);
1364 l2
= gen_jz_ecx_string(s
);
1366 gen_op_add_reg_im(s
, s
->aflag
, R_ECX
, -1);
1367 gen_update_cc_op(s
);
1368 gen_jcc1(s
, (JCC_Z
<< 1) | (nz
^ 1), l2
);
1370 gen_op_jz_ecx(s
, l2
);
1372 gen_jmp_rel_csize(s
, -cur_insn_len(s
), 0);
1375 #define GEN_REPZ2(op) \
1376 static inline void gen_repz_ ## op(DisasContext *s, MemOp ot, int nz) \
1377 { gen_repz2(s, ot, nz, gen_##op); }
1387 static void gen_helper_fp_arith_ST0_FT0(int op
)
1391 gen_helper_fadd_ST0_FT0(cpu_env
);
1394 gen_helper_fmul_ST0_FT0(cpu_env
);
1397 gen_helper_fcom_ST0_FT0(cpu_env
);
1400 gen_helper_fcom_ST0_FT0(cpu_env
);
1403 gen_helper_fsub_ST0_FT0(cpu_env
);
1406 gen_helper_fsubr_ST0_FT0(cpu_env
);
1409 gen_helper_fdiv_ST0_FT0(cpu_env
);
1412 gen_helper_fdivr_ST0_FT0(cpu_env
);
1417 /* NOTE the exception in "r" op ordering */
1418 static void gen_helper_fp_arith_STN_ST0(int op
, int opreg
)
1420 TCGv_i32 tmp
= tcg_constant_i32(opreg
);
1423 gen_helper_fadd_STN_ST0(cpu_env
, tmp
);
1426 gen_helper_fmul_STN_ST0(cpu_env
, tmp
);
1429 gen_helper_fsubr_STN_ST0(cpu_env
, tmp
);
1432 gen_helper_fsub_STN_ST0(cpu_env
, tmp
);
1435 gen_helper_fdivr_STN_ST0(cpu_env
, tmp
);
1438 gen_helper_fdiv_STN_ST0(cpu_env
, tmp
);
1443 static void gen_exception(DisasContext
*s
, int trapno
)
1445 gen_update_cc_op(s
);
1446 gen_update_eip_cur(s
);
1447 gen_helper_raise_exception(cpu_env
, tcg_constant_i32(trapno
));
1448 s
->base
.is_jmp
= DISAS_NORETURN
;
1451 /* Generate #UD for the current instruction. The assumption here is that
1452 the instruction is known, but it isn't allowed in the current cpu mode. */
1453 static void gen_illegal_opcode(DisasContext
*s
)
1455 gen_exception(s
, EXCP06_ILLOP
);
1458 /* Generate #GP for the current instruction. */
1459 static void gen_exception_gpf(DisasContext
*s
)
1461 gen_exception(s
, EXCP0D_GPF
);
1464 /* Check for cpl == 0; if not, raise #GP and return false. */
1465 static bool check_cpl0(DisasContext
*s
)
1470 gen_exception_gpf(s
);
1474 /* If vm86, check for iopl == 3; if not, raise #GP and return false. */
1475 static bool check_vm86_iopl(DisasContext
*s
)
1477 if (!VM86(s
) || IOPL(s
) == 3) {
1480 gen_exception_gpf(s
);
1484 /* Check for iopl allowing access; if not, raise #GP and return false. */
1485 static bool check_iopl(DisasContext
*s
)
1487 if (VM86(s
) ? IOPL(s
) == 3 : CPL(s
) <= IOPL(s
)) {
1490 gen_exception_gpf(s
);
1494 /* if d == OR_TMP0, it means memory operand (address in A0) */
1495 static void gen_op(DisasContext
*s1
, int op
, MemOp ot
, int d
)
1498 if (s1
->prefix
& PREFIX_LOCK
) {
1499 /* Lock prefix when destination is not memory. */
1500 gen_illegal_opcode(s1
);
1503 gen_op_mov_v_reg(s1
, ot
, s1
->T0
, d
);
1504 } else if (!(s1
->prefix
& PREFIX_LOCK
)) {
1505 gen_op_ld_v(s1
, ot
, s1
->T0
, s1
->A0
);
1509 gen_compute_eflags_c(s1
, s1
->tmp4
);
1510 if (s1
->prefix
& PREFIX_LOCK
) {
1511 tcg_gen_add_tl(s1
->T0
, s1
->tmp4
, s1
->T1
);
1512 tcg_gen_atomic_add_fetch_tl(s1
->T0
, s1
->A0
, s1
->T0
,
1513 s1
->mem_index
, ot
| MO_LE
);
1515 tcg_gen_add_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1516 tcg_gen_add_tl(s1
->T0
, s1
->T0
, s1
->tmp4
);
1517 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1519 gen_op_update3_cc(s1
, s1
->tmp4
);
1520 set_cc_op(s1
, CC_OP_ADCB
+ ot
);
1523 gen_compute_eflags_c(s1
, s1
->tmp4
);
1524 if (s1
->prefix
& PREFIX_LOCK
) {
1525 tcg_gen_add_tl(s1
->T0
, s1
->T1
, s1
->tmp4
);
1526 tcg_gen_neg_tl(s1
->T0
, s1
->T0
);
1527 tcg_gen_atomic_add_fetch_tl(s1
->T0
, s1
->A0
, s1
->T0
,
1528 s1
->mem_index
, ot
| MO_LE
);
1530 tcg_gen_sub_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1531 tcg_gen_sub_tl(s1
->T0
, s1
->T0
, s1
->tmp4
);
1532 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1534 gen_op_update3_cc(s1
, s1
->tmp4
);
1535 set_cc_op(s1
, CC_OP_SBBB
+ ot
);
1538 if (s1
->prefix
& PREFIX_LOCK
) {
1539 tcg_gen_atomic_add_fetch_tl(s1
->T0
, s1
->A0
, s1
->T1
,
1540 s1
->mem_index
, ot
| MO_LE
);
1542 tcg_gen_add_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1543 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1545 gen_op_update2_cc(s1
);
1546 set_cc_op(s1
, CC_OP_ADDB
+ ot
);
1549 if (s1
->prefix
& PREFIX_LOCK
) {
1550 tcg_gen_neg_tl(s1
->T0
, s1
->T1
);
1551 tcg_gen_atomic_fetch_add_tl(s1
->cc_srcT
, s1
->A0
, s1
->T0
,
1552 s1
->mem_index
, ot
| MO_LE
);
1553 tcg_gen_sub_tl(s1
->T0
, s1
->cc_srcT
, s1
->T1
);
1555 tcg_gen_mov_tl(s1
->cc_srcT
, s1
->T0
);
1556 tcg_gen_sub_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1557 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1559 gen_op_update2_cc(s1
);
1560 set_cc_op(s1
, CC_OP_SUBB
+ ot
);
1564 if (s1
->prefix
& PREFIX_LOCK
) {
1565 tcg_gen_atomic_and_fetch_tl(s1
->T0
, s1
->A0
, s1
->T1
,
1566 s1
->mem_index
, ot
| MO_LE
);
1568 tcg_gen_and_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1569 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1571 gen_op_update1_cc(s1
);
1572 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1575 if (s1
->prefix
& PREFIX_LOCK
) {
1576 tcg_gen_atomic_or_fetch_tl(s1
->T0
, s1
->A0
, s1
->T1
,
1577 s1
->mem_index
, ot
| MO_LE
);
1579 tcg_gen_or_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1580 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1582 gen_op_update1_cc(s1
);
1583 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1586 if (s1
->prefix
& PREFIX_LOCK
) {
1587 tcg_gen_atomic_xor_fetch_tl(s1
->T0
, s1
->A0
, s1
->T1
,
1588 s1
->mem_index
, ot
| MO_LE
);
1590 tcg_gen_xor_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1591 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1593 gen_op_update1_cc(s1
);
1594 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1597 tcg_gen_mov_tl(cpu_cc_src
, s1
->T1
);
1598 tcg_gen_mov_tl(s1
->cc_srcT
, s1
->T0
);
1599 tcg_gen_sub_tl(cpu_cc_dst
, s1
->T0
, s1
->T1
);
1600 set_cc_op(s1
, CC_OP_SUBB
+ ot
);
1605 /* if d == OR_TMP0, it means memory operand (address in A0) */
1606 static void gen_inc(DisasContext
*s1
, MemOp ot
, int d
, int c
)
1608 if (s1
->prefix
& PREFIX_LOCK
) {
1610 /* Lock prefix when destination is not memory */
1611 gen_illegal_opcode(s1
);
1614 tcg_gen_movi_tl(s1
->T0
, c
> 0 ? 1 : -1);
1615 tcg_gen_atomic_add_fetch_tl(s1
->T0
, s1
->A0
, s1
->T0
,
1616 s1
->mem_index
, ot
| MO_LE
);
1619 gen_op_mov_v_reg(s1
, ot
, s1
->T0
, d
);
1621 gen_op_ld_v(s1
, ot
, s1
->T0
, s1
->A0
);
1623 tcg_gen_addi_tl(s1
->T0
, s1
->T0
, (c
> 0 ? 1 : -1));
1624 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1627 gen_compute_eflags_c(s1
, cpu_cc_src
);
1628 tcg_gen_mov_tl(cpu_cc_dst
, s1
->T0
);
1629 set_cc_op(s1
, (c
> 0 ? CC_OP_INCB
: CC_OP_DECB
) + ot
);
1632 static void gen_shift_flags(DisasContext
*s
, MemOp ot
, TCGv result
,
1633 TCGv shm1
, TCGv count
, bool is_right
)
1635 TCGv_i32 z32
, s32
, oldop
;
1638 /* Store the results into the CC variables. If we know that the
1639 variable must be dead, store unconditionally. Otherwise we'll
1640 need to not disrupt the current contents. */
1641 z_tl
= tcg_constant_tl(0);
1642 if (cc_op_live
[s
->cc_op
] & USES_CC_DST
) {
1643 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_cc_dst
, count
, z_tl
,
1644 result
, cpu_cc_dst
);
1646 tcg_gen_mov_tl(cpu_cc_dst
, result
);
1648 if (cc_op_live
[s
->cc_op
] & USES_CC_SRC
) {
1649 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_cc_src
, count
, z_tl
,
1652 tcg_gen_mov_tl(cpu_cc_src
, shm1
);
1655 /* Get the two potential CC_OP values into temporaries. */
1656 tcg_gen_movi_i32(s
->tmp2_i32
, (is_right
? CC_OP_SARB
: CC_OP_SHLB
) + ot
);
1657 if (s
->cc_op
== CC_OP_DYNAMIC
) {
1660 tcg_gen_movi_i32(s
->tmp3_i32
, s
->cc_op
);
1661 oldop
= s
->tmp3_i32
;
1664 /* Conditionally store the CC_OP value. */
1665 z32
= tcg_constant_i32(0);
1666 s32
= tcg_temp_new_i32();
1667 tcg_gen_trunc_tl_i32(s32
, count
);
1668 tcg_gen_movcond_i32(TCG_COND_NE
, cpu_cc_op
, s32
, z32
, s
->tmp2_i32
, oldop
);
1670 /* The CC_OP value is no longer predictable. */
1671 set_cc_op(s
, CC_OP_DYNAMIC
);
1674 static void gen_shift_rm_T1(DisasContext
*s
, MemOp ot
, int op1
,
1675 int is_right
, int is_arith
)
1677 target_ulong mask
= (ot
== MO_64
? 0x3f : 0x1f);
1680 if (op1
== OR_TMP0
) {
1681 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1683 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1686 tcg_gen_andi_tl(s
->T1
, s
->T1
, mask
);
1687 tcg_gen_subi_tl(s
->tmp0
, s
->T1
, 1);
1691 gen_exts(ot
, s
->T0
);
1692 tcg_gen_sar_tl(s
->tmp0
, s
->T0
, s
->tmp0
);
1693 tcg_gen_sar_tl(s
->T0
, s
->T0
, s
->T1
);
1695 gen_extu(ot
, s
->T0
);
1696 tcg_gen_shr_tl(s
->tmp0
, s
->T0
, s
->tmp0
);
1697 tcg_gen_shr_tl(s
->T0
, s
->T0
, s
->T1
);
1700 tcg_gen_shl_tl(s
->tmp0
, s
->T0
, s
->tmp0
);
1701 tcg_gen_shl_tl(s
->T0
, s
->T0
, s
->T1
);
1705 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1707 gen_shift_flags(s
, ot
, s
->T0
, s
->tmp0
, s
->T1
, is_right
);
1710 static void gen_shift_rm_im(DisasContext
*s
, MemOp ot
, int op1
, int op2
,
1711 int is_right
, int is_arith
)
1713 int mask
= (ot
== MO_64
? 0x3f : 0x1f);
1717 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1719 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1725 gen_exts(ot
, s
->T0
);
1726 tcg_gen_sari_tl(s
->tmp4
, s
->T0
, op2
- 1);
1727 tcg_gen_sari_tl(s
->T0
, s
->T0
, op2
);
1729 gen_extu(ot
, s
->T0
);
1730 tcg_gen_shri_tl(s
->tmp4
, s
->T0
, op2
- 1);
1731 tcg_gen_shri_tl(s
->T0
, s
->T0
, op2
);
1734 tcg_gen_shli_tl(s
->tmp4
, s
->T0
, op2
- 1);
1735 tcg_gen_shli_tl(s
->T0
, s
->T0
, op2
);
1740 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1742 /* update eflags if non zero shift */
1744 tcg_gen_mov_tl(cpu_cc_src
, s
->tmp4
);
1745 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
1746 set_cc_op(s
, (is_right
? CC_OP_SARB
: CC_OP_SHLB
) + ot
);
1750 static void gen_rot_rm_T1(DisasContext
*s
, MemOp ot
, int op1
, int is_right
)
1752 target_ulong mask
= (ot
== MO_64
? 0x3f : 0x1f);
1756 if (op1
== OR_TMP0
) {
1757 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1759 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1762 tcg_gen_andi_tl(s
->T1
, s
->T1
, mask
);
1766 /* Replicate the 8-bit input so that a 32-bit rotate works. */
1767 tcg_gen_ext8u_tl(s
->T0
, s
->T0
);
1768 tcg_gen_muli_tl(s
->T0
, s
->T0
, 0x01010101);
1771 /* Replicate the 16-bit input so that a 32-bit rotate works. */
1772 tcg_gen_deposit_tl(s
->T0
, s
->T0
, s
->T0
, 16, 16);
1775 #ifdef TARGET_X86_64
1777 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
1778 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T1
);
1780 tcg_gen_rotr_i32(s
->tmp2_i32
, s
->tmp2_i32
, s
->tmp3_i32
);
1782 tcg_gen_rotl_i32(s
->tmp2_i32
, s
->tmp2_i32
, s
->tmp3_i32
);
1784 tcg_gen_extu_i32_tl(s
->T0
, s
->tmp2_i32
);
1789 tcg_gen_rotr_tl(s
->T0
, s
->T0
, s
->T1
);
1791 tcg_gen_rotl_tl(s
->T0
, s
->T0
, s
->T1
);
1797 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1799 /* We'll need the flags computed into CC_SRC. */
1800 gen_compute_eflags(s
);
1802 /* The value that was "rotated out" is now present at the other end
1803 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1804 since we've computed the flags into CC_SRC, these variables are
1807 tcg_gen_shri_tl(cpu_cc_src2
, s
->T0
, mask
- 1);
1808 tcg_gen_shri_tl(cpu_cc_dst
, s
->T0
, mask
);
1809 tcg_gen_andi_tl(cpu_cc_dst
, cpu_cc_dst
, 1);
1811 tcg_gen_shri_tl(cpu_cc_src2
, s
->T0
, mask
);
1812 tcg_gen_andi_tl(cpu_cc_dst
, s
->T0
, 1);
1814 tcg_gen_andi_tl(cpu_cc_src2
, cpu_cc_src2
, 1);
1815 tcg_gen_xor_tl(cpu_cc_src2
, cpu_cc_src2
, cpu_cc_dst
);
1817 /* Now conditionally store the new CC_OP value. If the shift count
1818 is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live.
1819 Otherwise reuse CC_OP_ADCOX which have the C and O flags split out
1820 exactly as we computed above. */
1821 t0
= tcg_constant_i32(0);
1822 t1
= tcg_temp_new_i32();
1823 tcg_gen_trunc_tl_i32(t1
, s
->T1
);
1824 tcg_gen_movi_i32(s
->tmp2_i32
, CC_OP_ADCOX
);
1825 tcg_gen_movi_i32(s
->tmp3_i32
, CC_OP_EFLAGS
);
1826 tcg_gen_movcond_i32(TCG_COND_NE
, cpu_cc_op
, t1
, t0
,
1827 s
->tmp2_i32
, s
->tmp3_i32
);
1829 /* The CC_OP value is no longer predictable. */
1830 set_cc_op(s
, CC_OP_DYNAMIC
);
1833 static void gen_rot_rm_im(DisasContext
*s
, MemOp ot
, int op1
, int op2
,
1836 int mask
= (ot
== MO_64
? 0x3f : 0x1f);
1840 if (op1
== OR_TMP0
) {
1841 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1843 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1849 #ifdef TARGET_X86_64
1851 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
1853 tcg_gen_rotri_i32(s
->tmp2_i32
, s
->tmp2_i32
, op2
);
1855 tcg_gen_rotli_i32(s
->tmp2_i32
, s
->tmp2_i32
, op2
);
1857 tcg_gen_extu_i32_tl(s
->T0
, s
->tmp2_i32
);
1862 tcg_gen_rotri_tl(s
->T0
, s
->T0
, op2
);
1864 tcg_gen_rotli_tl(s
->T0
, s
->T0
, op2
);
1875 shift
= mask
+ 1 - shift
;
1877 gen_extu(ot
, s
->T0
);
1878 tcg_gen_shli_tl(s
->tmp0
, s
->T0
, shift
);
1879 tcg_gen_shri_tl(s
->T0
, s
->T0
, mask
+ 1 - shift
);
1880 tcg_gen_or_tl(s
->T0
, s
->T0
, s
->tmp0
);
1886 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1889 /* Compute the flags into CC_SRC. */
1890 gen_compute_eflags(s
);
1892 /* The value that was "rotated out" is now present at the other end
1893 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1894 since we've computed the flags into CC_SRC, these variables are
1897 tcg_gen_shri_tl(cpu_cc_src2
, s
->T0
, mask
- 1);
1898 tcg_gen_shri_tl(cpu_cc_dst
, s
->T0
, mask
);
1899 tcg_gen_andi_tl(cpu_cc_dst
, cpu_cc_dst
, 1);
1901 tcg_gen_shri_tl(cpu_cc_src2
, s
->T0
, mask
);
1902 tcg_gen_andi_tl(cpu_cc_dst
, s
->T0
, 1);
1904 tcg_gen_andi_tl(cpu_cc_src2
, cpu_cc_src2
, 1);
1905 tcg_gen_xor_tl(cpu_cc_src2
, cpu_cc_src2
, cpu_cc_dst
);
1906 set_cc_op(s
, CC_OP_ADCOX
);
1910 /* XXX: add faster immediate = 1 case */
1911 static void gen_rotc_rm_T1(DisasContext
*s
, MemOp ot
, int op1
,
1914 gen_compute_eflags(s
);
1915 assert(s
->cc_op
== CC_OP_EFLAGS
);
1919 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1921 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1926 gen_helper_rcrb(s
->T0
, cpu_env
, s
->T0
, s
->T1
);
1929 gen_helper_rcrw(s
->T0
, cpu_env
, s
->T0
, s
->T1
);
1932 gen_helper_rcrl(s
->T0
, cpu_env
, s
->T0
, s
->T1
);
1934 #ifdef TARGET_X86_64
1936 gen_helper_rcrq(s
->T0
, cpu_env
, s
->T0
, s
->T1
);
1940 g_assert_not_reached();
1945 gen_helper_rclb(s
->T0
, cpu_env
, s
->T0
, s
->T1
);
1948 gen_helper_rclw(s
->T0
, cpu_env
, s
->T0
, s
->T1
);
1951 gen_helper_rcll(s
->T0
, cpu_env
, s
->T0
, s
->T1
);
1953 #ifdef TARGET_X86_64
1955 gen_helper_rclq(s
->T0
, cpu_env
, s
->T0
, s
->T1
);
1959 g_assert_not_reached();
1963 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1966 /* XXX: add faster immediate case */
1967 static void gen_shiftd_rm_T1(DisasContext
*s
, MemOp ot
, int op1
,
1968 bool is_right
, TCGv count_in
)
1970 target_ulong mask
= (ot
== MO_64
? 63 : 31);
1974 if (op1
== OR_TMP0
) {
1975 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1977 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1980 count
= tcg_temp_new();
1981 tcg_gen_andi_tl(count
, count_in
, mask
);
1985 /* Note: we implement the Intel behaviour for shift count > 16.
1986 This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A
1987 portion by constructing it as a 32-bit value. */
1989 tcg_gen_deposit_tl(s
->tmp0
, s
->T0
, s
->T1
, 16, 16);
1990 tcg_gen_mov_tl(s
->T1
, s
->T0
);
1991 tcg_gen_mov_tl(s
->T0
, s
->tmp0
);
1993 tcg_gen_deposit_tl(s
->T1
, s
->T0
, s
->T1
, 16, 16);
1996 * If TARGET_X86_64 defined then fall through into MO_32 case,
1997 * otherwise fall through default case.
2000 #ifdef TARGET_X86_64
2001 /* Concatenate the two 32-bit values and use a 64-bit shift. */
2002 tcg_gen_subi_tl(s
->tmp0
, count
, 1);
2004 tcg_gen_concat_tl_i64(s
->T0
, s
->T0
, s
->T1
);
2005 tcg_gen_shr_i64(s
->tmp0
, s
->T0
, s
->tmp0
);
2006 tcg_gen_shr_i64(s
->T0
, s
->T0
, count
);
2008 tcg_gen_concat_tl_i64(s
->T0
, s
->T1
, s
->T0
);
2009 tcg_gen_shl_i64(s
->tmp0
, s
->T0
, s
->tmp0
);
2010 tcg_gen_shl_i64(s
->T0
, s
->T0
, count
);
2011 tcg_gen_shri_i64(s
->tmp0
, s
->tmp0
, 32);
2012 tcg_gen_shri_i64(s
->T0
, s
->T0
, 32);
2017 tcg_gen_subi_tl(s
->tmp0
, count
, 1);
2019 tcg_gen_shr_tl(s
->tmp0
, s
->T0
, s
->tmp0
);
2021 tcg_gen_subfi_tl(s
->tmp4
, mask
+ 1, count
);
2022 tcg_gen_shr_tl(s
->T0
, s
->T0
, count
);
2023 tcg_gen_shl_tl(s
->T1
, s
->T1
, s
->tmp4
);
2025 tcg_gen_shl_tl(s
->tmp0
, s
->T0
, s
->tmp0
);
2027 /* Only needed if count > 16, for Intel behaviour. */
2028 tcg_gen_subfi_tl(s
->tmp4
, 33, count
);
2029 tcg_gen_shr_tl(s
->tmp4
, s
->T1
, s
->tmp4
);
2030 tcg_gen_or_tl(s
->tmp0
, s
->tmp0
, s
->tmp4
);
2033 tcg_gen_subfi_tl(s
->tmp4
, mask
+ 1, count
);
2034 tcg_gen_shl_tl(s
->T0
, s
->T0
, count
);
2035 tcg_gen_shr_tl(s
->T1
, s
->T1
, s
->tmp4
);
2037 tcg_gen_movi_tl(s
->tmp4
, 0);
2038 tcg_gen_movcond_tl(TCG_COND_EQ
, s
->T1
, count
, s
->tmp4
,
2040 tcg_gen_or_tl(s
->T0
, s
->T0
, s
->T1
);
2045 gen_op_st_rm_T0_A0(s
, ot
, op1
);
2047 gen_shift_flags(s
, ot
, s
->T0
, s
->tmp0
, count
, is_right
);
2050 static void gen_shift(DisasContext
*s1
, int op
, MemOp ot
, int d
, int s
)
2053 gen_op_mov_v_reg(s1
, ot
, s1
->T1
, s
);
2056 gen_rot_rm_T1(s1
, ot
, d
, 0);
2059 gen_rot_rm_T1(s1
, ot
, d
, 1);
2063 gen_shift_rm_T1(s1
, ot
, d
, 0, 0);
2066 gen_shift_rm_T1(s1
, ot
, d
, 1, 0);
2069 gen_shift_rm_T1(s1
, ot
, d
, 1, 1);
2072 gen_rotc_rm_T1(s1
, ot
, d
, 0);
2075 gen_rotc_rm_T1(s1
, ot
, d
, 1);
2080 static void gen_shifti(DisasContext
*s1
, int op
, MemOp ot
, int d
, int c
)
2084 gen_rot_rm_im(s1
, ot
, d
, c
, 0);
2087 gen_rot_rm_im(s1
, ot
, d
, c
, 1);
2091 gen_shift_rm_im(s1
, ot
, d
, c
, 0, 0);
2094 gen_shift_rm_im(s1
, ot
, d
, c
, 1, 0);
2097 gen_shift_rm_im(s1
, ot
, d
, c
, 1, 1);
2100 /* currently not optimized */
2101 tcg_gen_movi_tl(s1
->T1
, c
);
2102 gen_shift(s1
, op
, ot
, d
, OR_TMP1
);
2107 #define X86_MAX_INSN_LENGTH 15
2109 static uint64_t advance_pc(CPUX86State
*env
, DisasContext
*s
, int num_bytes
)
2111 uint64_t pc
= s
->pc
;
2113 /* This is a subsequent insn that crosses a page boundary. */
2114 if (s
->base
.num_insns
> 1 &&
2115 !is_same_page(&s
->base
, s
->pc
+ num_bytes
- 1)) {
2116 siglongjmp(s
->jmpbuf
, 2);
2120 if (unlikely(cur_insn_len(s
) > X86_MAX_INSN_LENGTH
)) {
2121 /* If the instruction's 16th byte is on a different page than the 1st, a
2122 * page fault on the second page wins over the general protection fault
2123 * caused by the instruction being too long.
2124 * This can happen even if the operand is only one byte long!
2126 if (((s
->pc
- 1) ^ (pc
- 1)) & TARGET_PAGE_MASK
) {
2127 volatile uint8_t unused
=
2128 cpu_ldub_code(env
, (s
->pc
- 1) & TARGET_PAGE_MASK
);
2131 siglongjmp(s
->jmpbuf
, 1);
2137 static inline uint8_t x86_ldub_code(CPUX86State
*env
, DisasContext
*s
)
2139 return translator_ldub(env
, &s
->base
, advance_pc(env
, s
, 1));
2142 static inline int16_t x86_ldsw_code(CPUX86State
*env
, DisasContext
*s
)
2144 return translator_lduw(env
, &s
->base
, advance_pc(env
, s
, 2));
2147 static inline uint16_t x86_lduw_code(CPUX86State
*env
, DisasContext
*s
)
2149 return translator_lduw(env
, &s
->base
, advance_pc(env
, s
, 2));
2152 static inline uint32_t x86_ldl_code(CPUX86State
*env
, DisasContext
*s
)
2154 return translator_ldl(env
, &s
->base
, advance_pc(env
, s
, 4));
2157 #ifdef TARGET_X86_64
2158 static inline uint64_t x86_ldq_code(CPUX86State
*env
, DisasContext
*s
)
2160 return translator_ldq(env
, &s
->base
, advance_pc(env
, s
, 8));
2164 /* Decompose an address. */
2166 typedef struct AddressParts
{
2174 static AddressParts
gen_lea_modrm_0(CPUX86State
*env
, DisasContext
*s
,
2177 int def_seg
, base
, index
, scale
, mod
, rm
;
2186 mod
= (modrm
>> 6) & 3;
2188 base
= rm
| REX_B(s
);
2191 /* Normally filtered out earlier, but including this path
2192 simplifies multi-byte nop, as well as bndcl, bndcu, bndcn. */
2201 int code
= x86_ldub_code(env
, s
);
2202 scale
= (code
>> 6) & 3;
2203 index
= ((code
>> 3) & 7) | REX_X(s
);
2205 index
= -1; /* no index */
2207 base
= (code
& 7) | REX_B(s
);
2213 if ((base
& 7) == 5) {
2215 disp
= (int32_t)x86_ldl_code(env
, s
);
2216 if (CODE64(s
) && !havesib
) {
2218 disp
+= s
->pc
+ s
->rip_offset
;
2223 disp
= (int8_t)x86_ldub_code(env
, s
);
2227 disp
= (int32_t)x86_ldl_code(env
, s
);
2231 /* For correct popl handling with esp. */
2232 if (base
== R_ESP
&& s
->popl_esp_hack
) {
2233 disp
+= s
->popl_esp_hack
;
2235 if (base
== R_EBP
|| base
== R_ESP
) {
2244 disp
= x86_lduw_code(env
, s
);
2247 } else if (mod
== 1) {
2248 disp
= (int8_t)x86_ldub_code(env
, s
);
2250 disp
= (int16_t)x86_lduw_code(env
, s
);
2290 g_assert_not_reached();
2294 return (AddressParts
){ def_seg
, base
, index
, scale
, disp
};
2297 /* Compute the address, with a minimum number of TCG ops. */
2298 static TCGv
gen_lea_modrm_1(DisasContext
*s
, AddressParts a
, bool is_vsib
)
2302 if (a
.index
>= 0 && !is_vsib
) {
2304 ea
= cpu_regs
[a
.index
];
2306 tcg_gen_shli_tl(s
->A0
, cpu_regs
[a
.index
], a
.scale
);
2310 tcg_gen_add_tl(s
->A0
, ea
, cpu_regs
[a
.base
]);
2313 } else if (a
.base
>= 0) {
2314 ea
= cpu_regs
[a
.base
];
2317 if (tb_cflags(s
->base
.tb
) & CF_PCREL
&& a
.base
== -2) {
2318 /* With cpu_eip ~= pc_save, the expression is pc-relative. */
2319 tcg_gen_addi_tl(s
->A0
, cpu_eip
, a
.disp
- s
->pc_save
);
2321 tcg_gen_movi_tl(s
->A0
, a
.disp
);
2324 } else if (a
.disp
!= 0) {
2325 tcg_gen_addi_tl(s
->A0
, ea
, a
.disp
);
2332 static void gen_lea_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
)
2334 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
2335 TCGv ea
= gen_lea_modrm_1(s
, a
, false);
2336 gen_lea_v_seg(s
, s
->aflag
, ea
, a
.def_seg
, s
->override
);
2339 static void gen_nop_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
)
2341 (void)gen_lea_modrm_0(env
, s
, modrm
);
2344 /* Used for BNDCL, BNDCU, BNDCN. */
2345 static void gen_bndck(CPUX86State
*env
, DisasContext
*s
, int modrm
,
2346 TCGCond cond
, TCGv_i64 bndv
)
2348 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
2349 TCGv ea
= gen_lea_modrm_1(s
, a
, false);
2351 tcg_gen_extu_tl_i64(s
->tmp1_i64
, ea
);
2353 tcg_gen_ext32u_i64(s
->tmp1_i64
, s
->tmp1_i64
);
2355 tcg_gen_setcond_i64(cond
, s
->tmp1_i64
, s
->tmp1_i64
, bndv
);
2356 tcg_gen_extrl_i64_i32(s
->tmp2_i32
, s
->tmp1_i64
);
2357 gen_helper_bndck(cpu_env
, s
->tmp2_i32
);
2360 /* used for LEA and MOV AX, mem */
2361 static void gen_add_A0_ds_seg(DisasContext
*s
)
2363 gen_lea_v_seg(s
, s
->aflag
, s
->A0
, R_DS
, s
->override
);
2366 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2368 static void gen_ldst_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
,
2369 MemOp ot
, int reg
, int is_store
)
2373 mod
= (modrm
>> 6) & 3;
2374 rm
= (modrm
& 7) | REX_B(s
);
2378 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
2379 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
2381 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
2383 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
2386 gen_lea_modrm(env
, s
, modrm
);
2389 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
2390 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
2392 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
2394 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
2399 static target_ulong
insn_get_addr(CPUX86State
*env
, DisasContext
*s
, MemOp ot
)
2405 ret
= x86_ldub_code(env
, s
);
2408 ret
= x86_lduw_code(env
, s
);
2411 ret
= x86_ldl_code(env
, s
);
2413 #ifdef TARGET_X86_64
2415 ret
= x86_ldq_code(env
, s
);
2419 g_assert_not_reached();
2424 static inline uint32_t insn_get(CPUX86State
*env
, DisasContext
*s
, MemOp ot
)
2430 ret
= x86_ldub_code(env
, s
);
2433 ret
= x86_lduw_code(env
, s
);
2436 #ifdef TARGET_X86_64
2439 ret
= x86_ldl_code(env
, s
);
2442 g_assert_not_reached();
2447 static target_long
insn_get_signed(CPUX86State
*env
, DisasContext
*s
, MemOp ot
)
2453 ret
= (int8_t) x86_ldub_code(env
, s
);
2456 ret
= (int16_t) x86_lduw_code(env
, s
);
2459 ret
= (int32_t) x86_ldl_code(env
, s
);
2461 #ifdef TARGET_X86_64
2463 ret
= x86_ldq_code(env
, s
);
2467 g_assert_not_reached();
2472 static inline int insn_const_size(MemOp ot
)
2481 static void gen_jcc(DisasContext
*s
, int b
, int diff
)
2483 TCGLabel
*l1
= gen_new_label();
2486 gen_jmp_rel_csize(s
, 0, 1);
2488 gen_jmp_rel(s
, s
->dflag
, diff
, 0);
2491 static void gen_cmovcc1(CPUX86State
*env
, DisasContext
*s
, MemOp ot
, int b
,
2496 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
2498 cc
= gen_prepare_cc(s
, b
, s
->T1
);
2499 if (cc
.mask
!= -1) {
2500 TCGv t0
= tcg_temp_new();
2501 tcg_gen_andi_tl(t0
, cc
.reg
, cc
.mask
);
2505 cc
.reg2
= tcg_constant_tl(cc
.imm
);
2508 tcg_gen_movcond_tl(cc
.cond
, s
->T0
, cc
.reg
, cc
.reg2
,
2509 s
->T0
, cpu_regs
[reg
]);
2510 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
2513 static inline void gen_op_movl_T0_seg(DisasContext
*s
, X86Seg seg_reg
)
2515 tcg_gen_ld32u_tl(s
->T0
, cpu_env
,
2516 offsetof(CPUX86State
,segs
[seg_reg
].selector
));
2519 static inline void gen_op_movl_seg_T0_vm(DisasContext
*s
, X86Seg seg_reg
)
2521 tcg_gen_ext16u_tl(s
->T0
, s
->T0
);
2522 tcg_gen_st32_tl(s
->T0
, cpu_env
,
2523 offsetof(CPUX86State
,segs
[seg_reg
].selector
));
2524 tcg_gen_shli_tl(cpu_seg_base
[seg_reg
], s
->T0
, 4);
2527 /* move T0 to seg_reg and compute if the CPU state may change. Never
2528 call this function with seg_reg == R_CS */
2529 static void gen_movl_seg_T0(DisasContext
*s
, X86Seg seg_reg
)
2531 if (PE(s
) && !VM86(s
)) {
2532 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
2533 gen_helper_load_seg(cpu_env
, tcg_constant_i32(seg_reg
), s
->tmp2_i32
);
2534 /* abort translation because the addseg value may change or
2535 because ss32 may change. For R_SS, translation must always
2536 stop as a special handling must be done to disable hardware
2537 interrupts for the next instruction */
2538 if (seg_reg
== R_SS
) {
2539 s
->base
.is_jmp
= DISAS_EOB_INHIBIT_IRQ
;
2540 } else if (CODE32(s
) && seg_reg
< R_FS
) {
2541 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
2544 gen_op_movl_seg_T0_vm(s
, seg_reg
);
2545 if (seg_reg
== R_SS
) {
2546 s
->base
.is_jmp
= DISAS_EOB_INHIBIT_IRQ
;
2551 static void gen_svm_check_intercept(DisasContext
*s
, uint32_t type
)
2553 /* no SVM activated; fast case */
2554 if (likely(!GUEST(s
))) {
2557 gen_helper_svm_check_intercept(cpu_env
, tcg_constant_i32(type
));
2560 static inline void gen_stack_update(DisasContext
*s
, int addend
)
2562 gen_op_add_reg_im(s
, mo_stacksize(s
), R_ESP
, addend
);
2565 /* Generate a push. It depends on ss32, addseg and dflag. */
2566 static void gen_push_v(DisasContext
*s
, TCGv val
)
2568 MemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2569 MemOp a_ot
= mo_stacksize(s
);
2570 int size
= 1 << d_ot
;
2571 TCGv new_esp
= s
->A0
;
2573 tcg_gen_subi_tl(s
->A0
, cpu_regs
[R_ESP
], size
);
2578 tcg_gen_mov_tl(new_esp
, s
->A0
);
2580 gen_lea_v_seg(s
, a_ot
, s
->A0
, R_SS
, -1);
2583 gen_op_st_v(s
, d_ot
, val
, s
->A0
);
2584 gen_op_mov_reg_v(s
, a_ot
, R_ESP
, new_esp
);
2587 /* two step pop is necessary for precise exceptions */
2588 static MemOp
gen_pop_T0(DisasContext
*s
)
2590 MemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2592 gen_lea_v_seg(s
, mo_stacksize(s
), cpu_regs
[R_ESP
], R_SS
, -1);
2593 gen_op_ld_v(s
, d_ot
, s
->T0
, s
->A0
);
2598 static inline void gen_pop_update(DisasContext
*s
, MemOp ot
)
2600 gen_stack_update(s
, 1 << ot
);
2603 static inline void gen_stack_A0(DisasContext
*s
)
2605 gen_lea_v_seg(s
, SS32(s
) ? MO_32
: MO_16
, cpu_regs
[R_ESP
], R_SS
, -1);
2608 static void gen_pusha(DisasContext
*s
)
2610 MemOp s_ot
= SS32(s
) ? MO_32
: MO_16
;
2611 MemOp d_ot
= s
->dflag
;
2612 int size
= 1 << d_ot
;
2615 for (i
= 0; i
< 8; i
++) {
2616 tcg_gen_addi_tl(s
->A0
, cpu_regs
[R_ESP
], (i
- 8) * size
);
2617 gen_lea_v_seg(s
, s_ot
, s
->A0
, R_SS
, -1);
2618 gen_op_st_v(s
, d_ot
, cpu_regs
[7 - i
], s
->A0
);
2621 gen_stack_update(s
, -8 * size
);
2624 static void gen_popa(DisasContext
*s
)
2626 MemOp s_ot
= SS32(s
) ? MO_32
: MO_16
;
2627 MemOp d_ot
= s
->dflag
;
2628 int size
= 1 << d_ot
;
2631 for (i
= 0; i
< 8; i
++) {
2632 /* ESP is not reloaded */
2633 if (7 - i
== R_ESP
) {
2636 tcg_gen_addi_tl(s
->A0
, cpu_regs
[R_ESP
], i
* size
);
2637 gen_lea_v_seg(s
, s_ot
, s
->A0
, R_SS
, -1);
2638 gen_op_ld_v(s
, d_ot
, s
->T0
, s
->A0
);
2639 gen_op_mov_reg_v(s
, d_ot
, 7 - i
, s
->T0
);
2642 gen_stack_update(s
, 8 * size
);
2645 static void gen_enter(DisasContext
*s
, int esp_addend
, int level
)
2647 MemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2648 MemOp a_ot
= CODE64(s
) ? MO_64
: SS32(s
) ? MO_32
: MO_16
;
2649 int size
= 1 << d_ot
;
2651 /* Push BP; compute FrameTemp into T1. */
2652 tcg_gen_subi_tl(s
->T1
, cpu_regs
[R_ESP
], size
);
2653 gen_lea_v_seg(s
, a_ot
, s
->T1
, R_SS
, -1);
2654 gen_op_st_v(s
, d_ot
, cpu_regs
[R_EBP
], s
->A0
);
2660 /* Copy level-1 pointers from the previous frame. */
2661 for (i
= 1; i
< level
; ++i
) {
2662 tcg_gen_subi_tl(s
->A0
, cpu_regs
[R_EBP
], size
* i
);
2663 gen_lea_v_seg(s
, a_ot
, s
->A0
, R_SS
, -1);
2664 gen_op_ld_v(s
, d_ot
, s
->tmp0
, s
->A0
);
2666 tcg_gen_subi_tl(s
->A0
, s
->T1
, size
* i
);
2667 gen_lea_v_seg(s
, a_ot
, s
->A0
, R_SS
, -1);
2668 gen_op_st_v(s
, d_ot
, s
->tmp0
, s
->A0
);
2671 /* Push the current FrameTemp as the last level. */
2672 tcg_gen_subi_tl(s
->A0
, s
->T1
, size
* level
);
2673 gen_lea_v_seg(s
, a_ot
, s
->A0
, R_SS
, -1);
2674 gen_op_st_v(s
, d_ot
, s
->T1
, s
->A0
);
2677 /* Copy the FrameTemp value to EBP. */
2678 gen_op_mov_reg_v(s
, a_ot
, R_EBP
, s
->T1
);
2680 /* Compute the final value of ESP. */
2681 tcg_gen_subi_tl(s
->T1
, s
->T1
, esp_addend
+ size
* level
);
2682 gen_op_mov_reg_v(s
, a_ot
, R_ESP
, s
->T1
);
2685 static void gen_leave(DisasContext
*s
)
2687 MemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2688 MemOp a_ot
= mo_stacksize(s
);
2690 gen_lea_v_seg(s
, a_ot
, cpu_regs
[R_EBP
], R_SS
, -1);
2691 gen_op_ld_v(s
, d_ot
, s
->T0
, s
->A0
);
2693 tcg_gen_addi_tl(s
->T1
, cpu_regs
[R_EBP
], 1 << d_ot
);
2695 gen_op_mov_reg_v(s
, d_ot
, R_EBP
, s
->T0
);
2696 gen_op_mov_reg_v(s
, a_ot
, R_ESP
, s
->T1
);
2699 /* Similarly, except that the assumption here is that we don't decode
2700 the instruction at all -- either a missing opcode, an unimplemented
2701 feature, or just a bogus instruction stream. */
2702 static void gen_unknown_opcode(CPUX86State
*env
, DisasContext
*s
)
2704 gen_illegal_opcode(s
);
2706 if (qemu_loglevel_mask(LOG_UNIMP
)) {
2707 FILE *logfile
= qemu_log_trylock();
2709 target_ulong pc
= s
->base
.pc_next
, end
= s
->pc
;
2711 fprintf(logfile
, "ILLOPC: " TARGET_FMT_lx
":", pc
);
2712 for (; pc
< end
; ++pc
) {
2713 fprintf(logfile
, " %02x", cpu_ldub_code(env
, pc
));
2715 fprintf(logfile
, "\n");
2716 qemu_log_unlock(logfile
);
2721 /* an interrupt is different from an exception because of the
2723 static void gen_interrupt(DisasContext
*s
, int intno
)
2725 gen_update_cc_op(s
);
2726 gen_update_eip_cur(s
);
2727 gen_helper_raise_interrupt(cpu_env
, tcg_constant_i32(intno
),
2728 cur_insn_len_i32(s
));
2729 s
->base
.is_jmp
= DISAS_NORETURN
;
2732 static void gen_set_hflag(DisasContext
*s
, uint32_t mask
)
2734 if ((s
->flags
& mask
) == 0) {
2735 TCGv_i32 t
= tcg_temp_new_i32();
2736 tcg_gen_ld_i32(t
, cpu_env
, offsetof(CPUX86State
, hflags
));
2737 tcg_gen_ori_i32(t
, t
, mask
);
2738 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUX86State
, hflags
));
2743 static void gen_reset_hflag(DisasContext
*s
, uint32_t mask
)
2745 if (s
->flags
& mask
) {
2746 TCGv_i32 t
= tcg_temp_new_i32();
2747 tcg_gen_ld_i32(t
, cpu_env
, offsetof(CPUX86State
, hflags
));
2748 tcg_gen_andi_i32(t
, t
, ~mask
);
2749 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUX86State
, hflags
));
2754 static void gen_set_eflags(DisasContext
*s
, target_ulong mask
)
2756 TCGv t
= tcg_temp_new();
2758 tcg_gen_ld_tl(t
, cpu_env
, offsetof(CPUX86State
, eflags
));
2759 tcg_gen_ori_tl(t
, t
, mask
);
2760 tcg_gen_st_tl(t
, cpu_env
, offsetof(CPUX86State
, eflags
));
2763 static void gen_reset_eflags(DisasContext
*s
, target_ulong mask
)
2765 TCGv t
= tcg_temp_new();
2767 tcg_gen_ld_tl(t
, cpu_env
, offsetof(CPUX86State
, eflags
));
2768 tcg_gen_andi_tl(t
, t
, ~mask
);
2769 tcg_gen_st_tl(t
, cpu_env
, offsetof(CPUX86State
, eflags
));
2772 /* Clear BND registers during legacy branches. */
2773 static void gen_bnd_jmp(DisasContext
*s
)
2775 /* Clear the registers only if BND prefix is missing, MPX is enabled,
2776 and if the BNDREGs are known to be in use (non-zero) already.
2777 The helper itself will check BNDPRESERVE at runtime. */
2778 if ((s
->prefix
& PREFIX_REPNZ
) == 0
2779 && (s
->flags
& HF_MPX_EN_MASK
) != 0
2780 && (s
->flags
& HF_MPX_IU_MASK
) != 0) {
2781 gen_helper_bnd_jmp(cpu_env
);
2785 /* Generate an end of block. Trace exception is also generated if needed.
2786 If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set.
2787 If RECHECK_TF, emit a rechecking helper for #DB, ignoring the state of
2788 S->TF. This is used by the syscall/sysret insns. */
2790 do_gen_eob_worker(DisasContext
*s
, bool inhibit
, bool recheck_tf
, bool jr
)
2792 gen_update_cc_op(s
);
2794 /* If several instructions disable interrupts, only the first does it. */
2795 if (inhibit
&& !(s
->flags
& HF_INHIBIT_IRQ_MASK
)) {
2796 gen_set_hflag(s
, HF_INHIBIT_IRQ_MASK
);
2798 gen_reset_hflag(s
, HF_INHIBIT_IRQ_MASK
);
2801 if (s
->base
.tb
->flags
& HF_RF_MASK
) {
2802 gen_reset_eflags(s
, RF_MASK
);
2805 gen_helper_rechecking_single_step(cpu_env
);
2806 tcg_gen_exit_tb(NULL
, 0);
2807 } else if (s
->flags
& HF_TF_MASK
) {
2808 gen_helper_single_step(cpu_env
);
2810 tcg_gen_lookup_and_goto_ptr();
2812 tcg_gen_exit_tb(NULL
, 0);
2814 s
->base
.is_jmp
= DISAS_NORETURN
;
2818 gen_eob_worker(DisasContext
*s
, bool inhibit
, bool recheck_tf
)
2820 do_gen_eob_worker(s
, inhibit
, recheck_tf
, false);
2824 If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set. */
2825 static void gen_eob_inhibit_irq(DisasContext
*s
, bool inhibit
)
2827 gen_eob_worker(s
, inhibit
, false);
2830 /* End of block, resetting the inhibit irq flag. */
2831 static void gen_eob(DisasContext
*s
)
2833 gen_eob_worker(s
, false, false);
2836 /* Jump to register */
2837 static void gen_jr(DisasContext
*s
)
2839 do_gen_eob_worker(s
, false, false, true);
2842 /* Jump to eip+diff, truncating the result to OT. */
2843 static void gen_jmp_rel(DisasContext
*s
, MemOp ot
, int diff
, int tb_num
)
2845 bool use_goto_tb
= s
->jmp_opt
;
2846 target_ulong mask
= -1;
2847 target_ulong new_pc
= s
->pc
+ diff
;
2848 target_ulong new_eip
= new_pc
- s
->cs_base
;
2850 /* In 64-bit mode, operand size is fixed at 64 bits. */
2854 if (tb_cflags(s
->base
.tb
) & CF_PCREL
&& CODE32(s
)) {
2855 use_goto_tb
= false;
2863 gen_update_cc_op(s
);
2864 set_cc_op(s
, CC_OP_DYNAMIC
);
2866 if (tb_cflags(s
->base
.tb
) & CF_PCREL
) {
2867 tcg_gen_addi_tl(cpu_eip
, cpu_eip
, new_pc
- s
->pc_save
);
2869 * If we can prove the branch does not leave the page and we have
2870 * no extra masking to apply (data16 branch in code32, see above),
2871 * then we have also proven that the addition does not wrap.
2873 if (!use_goto_tb
|| !is_same_page(&s
->base
, new_pc
)) {
2874 tcg_gen_andi_tl(cpu_eip
, cpu_eip
, mask
);
2875 use_goto_tb
= false;
2880 translator_use_goto_tb(&s
->base
, new_eip
+ s
->cs_base
)) {
2881 /* jump to same page: we can use a direct jump */
2882 tcg_gen_goto_tb(tb_num
);
2883 if (!(tb_cflags(s
->base
.tb
) & CF_PCREL
)) {
2884 tcg_gen_movi_tl(cpu_eip
, new_eip
);
2886 tcg_gen_exit_tb(s
->base
.tb
, tb_num
);
2887 s
->base
.is_jmp
= DISAS_NORETURN
;
2889 if (!(tb_cflags(s
->base
.tb
) & CF_PCREL
)) {
2890 tcg_gen_movi_tl(cpu_eip
, new_eip
);
2893 gen_jr(s
); /* jump to another page */
2895 gen_eob(s
); /* exit to main loop */
2900 /* Jump to eip+diff, truncating to the current code size. */
2901 static void gen_jmp_rel_csize(DisasContext
*s
, int diff
, int tb_num
)
2903 /* CODE64 ignores the OT argument, so we need not consider it. */
2904 gen_jmp_rel(s
, CODE32(s
) ? MO_32
: MO_16
, diff
, tb_num
);
2907 static inline void gen_ldq_env_A0(DisasContext
*s
, int offset
)
2909 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
, s
->mem_index
, MO_LEUQ
);
2910 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
, offset
);
2913 static inline void gen_stq_env_A0(DisasContext
*s
, int offset
)
2915 tcg_gen_ld_i64(s
->tmp1_i64
, cpu_env
, offset
);
2916 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
, s
->mem_index
, MO_LEUQ
);
2919 static inline void gen_ldo_env_A0(DisasContext
*s
, int offset
, bool align
)
2921 int mem_index
= s
->mem_index
;
2922 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
, mem_index
,
2923 MO_LEUQ
| (align
? MO_ALIGN_16
: 0));
2924 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(0)));
2925 tcg_gen_addi_tl(s
->tmp0
, s
->A0
, 8);
2926 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->tmp0
, mem_index
, MO_LEUQ
);
2927 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(1)));
2930 static inline void gen_sto_env_A0(DisasContext
*s
, int offset
, bool align
)
2932 int mem_index
= s
->mem_index
;
2933 tcg_gen_ld_i64(s
->tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(0)));
2934 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
, mem_index
,
2935 MO_LEUQ
| (align
? MO_ALIGN_16
: 0));
2936 tcg_gen_addi_tl(s
->tmp0
, s
->A0
, 8);
2937 tcg_gen_ld_i64(s
->tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(1)));
2938 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->tmp0
, mem_index
, MO_LEUQ
);
2941 static void gen_ldy_env_A0(DisasContext
*s
, int offset
, bool align
)
2943 int mem_index
= s
->mem_index
;
2944 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
, mem_index
,
2945 MO_LEUQ
| (align
? MO_ALIGN_32
: 0));
2946 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
, offset
+ offsetof(YMMReg
, YMM_Q(0)));
2947 tcg_gen_addi_tl(s
->tmp0
, s
->A0
, 8);
2948 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->tmp0
, mem_index
, MO_LEUQ
);
2949 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
, offset
+ offsetof(YMMReg
, YMM_Q(1)));
2951 tcg_gen_addi_tl(s
->tmp0
, s
->A0
, 16);
2952 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->tmp0
, mem_index
, MO_LEUQ
);
2953 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
, offset
+ offsetof(YMMReg
, YMM_Q(2)));
2954 tcg_gen_addi_tl(s
->tmp0
, s
->A0
, 24);
2955 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->tmp0
, mem_index
, MO_LEUQ
);
2956 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
, offset
+ offsetof(YMMReg
, YMM_Q(3)));
2959 static void gen_sty_env_A0(DisasContext
*s
, int offset
, bool align
)
2961 int mem_index
= s
->mem_index
;
2962 tcg_gen_ld_i64(s
->tmp1_i64
, cpu_env
, offset
+ offsetof(YMMReg
, YMM_Q(0)));
2963 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
, mem_index
,
2964 MO_LEUQ
| (align
? MO_ALIGN_32
: 0));
2965 tcg_gen_addi_tl(s
->tmp0
, s
->A0
, 8);
2966 tcg_gen_ld_i64(s
->tmp1_i64
, cpu_env
, offset
+ offsetof(YMMReg
, YMM_Q(1)));
2967 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->tmp0
, mem_index
, MO_LEUQ
);
2968 tcg_gen_addi_tl(s
->tmp0
, s
->A0
, 16);
2969 tcg_gen_ld_i64(s
->tmp1_i64
, cpu_env
, offset
+ offsetof(YMMReg
, YMM_Q(2)));
2970 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->tmp0
, mem_index
, MO_LEUQ
);
2971 tcg_gen_addi_tl(s
->tmp0
, s
->A0
, 24);
2972 tcg_gen_ld_i64(s
->tmp1_i64
, cpu_env
, offset
+ offsetof(YMMReg
, YMM_Q(3)));
2973 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->tmp0
, mem_index
, MO_LEUQ
);
2976 #include "decode-new.h"
2977 #include "emit.c.inc"
2978 #include "decode-new.c.inc"
2980 static void gen_cmpxchg8b(DisasContext
*s
, CPUX86State
*env
, int modrm
)
2982 TCGv_i64 cmp
, val
, old
;
2985 gen_lea_modrm(env
, s
, modrm
);
2987 cmp
= tcg_temp_new_i64();
2988 val
= tcg_temp_new_i64();
2989 old
= tcg_temp_new_i64();
2991 /* Construct the comparison values from the register pair. */
2992 tcg_gen_concat_tl_i64(cmp
, cpu_regs
[R_EAX
], cpu_regs
[R_EDX
]);
2993 tcg_gen_concat_tl_i64(val
, cpu_regs
[R_EBX
], cpu_regs
[R_ECX
]);
2995 /* Only require atomic with LOCK; non-parallel handled in generator. */
2996 if (s
->prefix
& PREFIX_LOCK
) {
2997 tcg_gen_atomic_cmpxchg_i64(old
, s
->A0
, cmp
, val
, s
->mem_index
, MO_TEUQ
);
2999 tcg_gen_nonatomic_cmpxchg_i64(old
, s
->A0
, cmp
, val
,
3000 s
->mem_index
, MO_TEUQ
);
3003 /* Set tmp0 to match the required value of Z. */
3004 tcg_gen_setcond_i64(TCG_COND_EQ
, cmp
, old
, cmp
);
3006 tcg_gen_trunc_i64_tl(Z
, cmp
);
3009 * Extract the result values for the register pair.
3010 * For 32-bit, we may do this unconditionally, because on success (Z=1),
3011 * the old value matches the previous value in EDX:EAX. For x86_64,
3012 * the store must be conditional, because we must leave the source
3013 * registers unchanged on success, and zero-extend the writeback
3016 if (TARGET_LONG_BITS
== 32) {
3017 tcg_gen_extr_i64_tl(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
], old
);
3019 TCGv zero
= tcg_constant_tl(0);
3021 tcg_gen_extr_i64_tl(s
->T0
, s
->T1
, old
);
3022 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_regs
[R_EAX
], Z
, zero
,
3023 s
->T0
, cpu_regs
[R_EAX
]);
3024 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_regs
[R_EDX
], Z
, zero
,
3025 s
->T1
, cpu_regs
[R_EDX
]);
3029 gen_compute_eflags(s
);
3030 tcg_gen_deposit_tl(cpu_cc_src
, cpu_cc_src
, Z
, ctz32(CC_Z
), 1);
3033 #ifdef TARGET_X86_64
3034 static void gen_cmpxchg16b(DisasContext
*s
, CPUX86State
*env
, int modrm
)
3036 MemOp mop
= MO_TE
| MO_128
| MO_ALIGN
;
3040 gen_lea_modrm(env
, s
, modrm
);
3042 cmp
= tcg_temp_new_i128();
3043 val
= tcg_temp_new_i128();
3044 tcg_gen_concat_i64_i128(cmp
, cpu_regs
[R_EAX
], cpu_regs
[R_EDX
]);
3045 tcg_gen_concat_i64_i128(val
, cpu_regs
[R_EBX
], cpu_regs
[R_ECX
]);
3047 /* Only require atomic with LOCK; non-parallel handled in generator. */
3048 if (s
->prefix
& PREFIX_LOCK
) {
3049 tcg_gen_atomic_cmpxchg_i128(val
, s
->A0
, cmp
, val
, s
->mem_index
, mop
);
3051 tcg_gen_nonatomic_cmpxchg_i128(val
, s
->A0
, cmp
, val
, s
->mem_index
, mop
);
3054 tcg_gen_extr_i128_i64(s
->T0
, s
->T1
, val
);
3056 /* Determine success after the fact. */
3057 t0
= tcg_temp_new_i64();
3058 t1
= tcg_temp_new_i64();
3059 tcg_gen_xor_i64(t0
, s
->T0
, cpu_regs
[R_EAX
]);
3060 tcg_gen_xor_i64(t1
, s
->T1
, cpu_regs
[R_EDX
]);
3061 tcg_gen_or_i64(t0
, t0
, t1
);
3064 gen_compute_eflags(s
);
3065 tcg_gen_setcondi_i64(TCG_COND_EQ
, t0
, t0
, 0);
3066 tcg_gen_deposit_tl(cpu_cc_src
, cpu_cc_src
, t0
, ctz32(CC_Z
), 1);
3069 * Extract the result values for the register pair. We may do this
3070 * unconditionally, because on success (Z=1), the old value matches
3071 * the previous value in RDX:RAX.
3073 tcg_gen_mov_i64(cpu_regs
[R_EAX
], s
->T0
);
3074 tcg_gen_mov_i64(cpu_regs
[R_EDX
], s
->T1
);
3078 /* convert one instruction. s->base.is_jmp is set if the translation must
3079 be stopped. Return the next pc value */
3080 static bool disas_insn(DisasContext
*s
, CPUState
*cpu
)
3082 CPUX86State
*env
= cpu
->env_ptr
;
3085 MemOp ot
, aflag
, dflag
;
3086 int modrm
, reg
, rm
, mod
, op
, opreg
, val
;
3087 bool orig_cc_op_dirty
= s
->cc_op_dirty
;
3088 CCOp orig_cc_op
= s
->cc_op
;
3089 target_ulong orig_pc_save
= s
->pc_save
;
3091 s
->pc
= s
->base
.pc_next
;
3093 #ifdef TARGET_X86_64
3098 s
->rip_offset
= 0; /* for relative ip address */
3102 switch (sigsetjmp(s
->jmpbuf
, 0)) {
3106 gen_exception_gpf(s
);
3109 /* Restore state that may affect the next instruction. */
3110 s
->pc
= s
->base
.pc_next
;
3112 * TODO: These save/restore can be removed after the table-based
3113 * decoder is complete; we will be decoding the insn completely
3114 * before any code generation that might affect these variables.
3116 s
->cc_op_dirty
= orig_cc_op_dirty
;
3117 s
->cc_op
= orig_cc_op
;
3118 s
->pc_save
= orig_pc_save
;
3120 s
->base
.num_insns
--;
3121 tcg_remove_ops_after(s
->prev_insn_end
);
3122 s
->base
.is_jmp
= DISAS_TOO_MANY
;
3125 g_assert_not_reached();
3131 s
->prefix
= prefixes
;
3132 b
= x86_ldub_code(env
, s
);
3133 /* Collect prefixes. */
3138 b
= x86_ldub_code(env
, s
) + 0x100;
3141 prefixes
|= PREFIX_REPZ
;
3142 prefixes
&= ~PREFIX_REPNZ
;
3145 prefixes
|= PREFIX_REPNZ
;
3146 prefixes
&= ~PREFIX_REPZ
;
3149 prefixes
|= PREFIX_LOCK
;
3170 prefixes
|= PREFIX_DATA
;
3173 prefixes
|= PREFIX_ADR
;
3175 #ifdef TARGET_X86_64
3179 prefixes
|= PREFIX_REX
;
3180 s
->vex_w
= (b
>> 3) & 1;
3181 s
->rex_r
= (b
& 0x4) << 1;
3182 s
->rex_x
= (b
& 0x2) << 2;
3183 s
->rex_b
= (b
& 0x1) << 3;
3188 case 0xc5: /* 2-byte VEX */
3189 case 0xc4: /* 3-byte VEX */
3190 if (CODE32(s
) && !VM86(s
)) {
3191 int vex2
= x86_ldub_code(env
, s
);
3192 s
->pc
--; /* rewind the advance_pc() x86_ldub_code() did */
3194 if (!CODE64(s
) && (vex2
& 0xc0) != 0xc0) {
3195 /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
3196 otherwise the instruction is LES or LDS. */
3199 disas_insn_new(s
, cpu
, b
);
3205 /* Post-process prefixes. */
3207 /* In 64-bit mode, the default data size is 32-bit. Select 64-bit
3208 data with rex_w, and 16-bit data with 0x66; rex_w takes precedence
3209 over 0x66 if both are present. */
3210 dflag
= (REX_W(s
) ? MO_64
: prefixes
& PREFIX_DATA
? MO_16
: MO_32
);
3211 /* In 64-bit mode, 0x67 selects 32-bit addressing. */
3212 aflag
= (prefixes
& PREFIX_ADR
? MO_32
: MO_64
);
3214 /* In 16/32-bit mode, 0x66 selects the opposite data size. */
3215 if (CODE32(s
) ^ ((prefixes
& PREFIX_DATA
) != 0)) {
3220 /* In 16/32-bit mode, 0x67 selects the opposite addressing. */
3221 if (CODE32(s
) ^ ((prefixes
& PREFIX_ADR
) != 0)) {
3228 s
->prefix
= prefixes
;
3232 /* now check op code */
3234 /**************************/
3249 ot
= mo_b_d(b
, dflag
);
3252 case 0: /* OP Ev, Gv */
3253 modrm
= x86_ldub_code(env
, s
);
3254 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
3255 mod
= (modrm
>> 6) & 3;
3256 rm
= (modrm
& 7) | REX_B(s
);
3258 gen_lea_modrm(env
, s
, modrm
);
3260 } else if (op
== OP_XORL
&& rm
== reg
) {
3262 /* xor reg, reg optimisation */
3263 set_cc_op(s
, CC_OP_CLR
);
3264 tcg_gen_movi_tl(s
->T0
, 0);
3265 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
3270 gen_op_mov_v_reg(s
, ot
, s
->T1
, reg
);
3271 gen_op(s
, op
, ot
, opreg
);
3273 case 1: /* OP Gv, Ev */
3274 modrm
= x86_ldub_code(env
, s
);
3275 mod
= (modrm
>> 6) & 3;
3276 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
3277 rm
= (modrm
& 7) | REX_B(s
);
3279 gen_lea_modrm(env
, s
, modrm
);
3280 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
3281 } else if (op
== OP_XORL
&& rm
== reg
) {
3284 gen_op_mov_v_reg(s
, ot
, s
->T1
, rm
);
3286 gen_op(s
, op
, ot
, reg
);
3288 case 2: /* OP A, Iv */
3289 val
= insn_get(env
, s
, ot
);
3290 tcg_gen_movi_tl(s
->T1
, val
);
3291 gen_op(s
, op
, ot
, OR_EAX
);
3301 case 0x80: /* GRP1 */
3307 ot
= mo_b_d(b
, dflag
);
3309 modrm
= x86_ldub_code(env
, s
);
3310 mod
= (modrm
>> 6) & 3;
3311 rm
= (modrm
& 7) | REX_B(s
);
3312 op
= (modrm
>> 3) & 7;
3318 s
->rip_offset
= insn_const_size(ot
);
3319 gen_lea_modrm(env
, s
, modrm
);
3330 val
= insn_get(env
, s
, ot
);
3333 val
= (int8_t)insn_get(env
, s
, MO_8
);
3336 tcg_gen_movi_tl(s
->T1
, val
);
3337 gen_op(s
, op
, ot
, opreg
);
3341 /**************************/
3342 /* inc, dec, and other misc arith */
3343 case 0x40 ... 0x47: /* inc Gv */
3345 gen_inc(s
, ot
, OR_EAX
+ (b
& 7), 1);
3347 case 0x48 ... 0x4f: /* dec Gv */
3349 gen_inc(s
, ot
, OR_EAX
+ (b
& 7), -1);
3351 case 0xf6: /* GRP3 */
3353 ot
= mo_b_d(b
, dflag
);
3355 modrm
= x86_ldub_code(env
, s
);
3356 mod
= (modrm
>> 6) & 3;
3357 rm
= (modrm
& 7) | REX_B(s
);
3358 op
= (modrm
>> 3) & 7;
3361 s
->rip_offset
= insn_const_size(ot
);
3363 gen_lea_modrm(env
, s
, modrm
);
3364 /* For those below that handle locked memory, don't load here. */
3365 if (!(s
->prefix
& PREFIX_LOCK
)
3367 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
3370 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
3375 val
= insn_get(env
, s
, ot
);
3376 tcg_gen_movi_tl(s
->T1
, val
);
3377 gen_op_testl_T0_T1_cc(s
);
3378 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
3381 if (s
->prefix
& PREFIX_LOCK
) {
3385 tcg_gen_movi_tl(s
->T0
, ~0);
3386 tcg_gen_atomic_xor_fetch_tl(s
->T0
, s
->A0
, s
->T0
,
3387 s
->mem_index
, ot
| MO_LE
);
3389 tcg_gen_not_tl(s
->T0
, s
->T0
);
3391 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
3393 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
3398 if (s
->prefix
& PREFIX_LOCK
) {
3400 TCGv a0
, t0
, t1
, t2
;
3407 label1
= gen_new_label();
3409 gen_set_label(label1
);
3410 t1
= tcg_temp_new();
3411 t2
= tcg_temp_new();
3412 tcg_gen_mov_tl(t2
, t0
);
3413 tcg_gen_neg_tl(t1
, t0
);
3414 tcg_gen_atomic_cmpxchg_tl(t0
, a0
, t0
, t1
,
3415 s
->mem_index
, ot
| MO_LE
);
3416 tcg_gen_brcond_tl(TCG_COND_NE
, t0
, t2
, label1
);
3418 tcg_gen_neg_tl(s
->T0
, t0
);
3420 tcg_gen_neg_tl(s
->T0
, s
->T0
);
3422 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
3424 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
3427 gen_op_update_neg_cc(s
);
3428 set_cc_op(s
, CC_OP_SUBB
+ ot
);
3433 gen_op_mov_v_reg(s
, MO_8
, s
->T1
, R_EAX
);
3434 tcg_gen_ext8u_tl(s
->T0
, s
->T0
);
3435 tcg_gen_ext8u_tl(s
->T1
, s
->T1
);
3436 /* XXX: use 32 bit mul which could be faster */
3437 tcg_gen_mul_tl(s
->T0
, s
->T0
, s
->T1
);
3438 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
3439 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
3440 tcg_gen_andi_tl(cpu_cc_src
, s
->T0
, 0xff00);
3441 set_cc_op(s
, CC_OP_MULB
);
3444 gen_op_mov_v_reg(s
, MO_16
, s
->T1
, R_EAX
);
3445 tcg_gen_ext16u_tl(s
->T0
, s
->T0
);
3446 tcg_gen_ext16u_tl(s
->T1
, s
->T1
);
3447 /* XXX: use 32 bit mul which could be faster */
3448 tcg_gen_mul_tl(s
->T0
, s
->T0
, s
->T1
);
3449 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
3450 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
3451 tcg_gen_shri_tl(s
->T0
, s
->T0
, 16);
3452 gen_op_mov_reg_v(s
, MO_16
, R_EDX
, s
->T0
);
3453 tcg_gen_mov_tl(cpu_cc_src
, s
->T0
);
3454 set_cc_op(s
, CC_OP_MULW
);
3458 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
3459 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, cpu_regs
[R_EAX
]);
3460 tcg_gen_mulu2_i32(s
->tmp2_i32
, s
->tmp3_i32
,
3461 s
->tmp2_i32
, s
->tmp3_i32
);
3462 tcg_gen_extu_i32_tl(cpu_regs
[R_EAX
], s
->tmp2_i32
);
3463 tcg_gen_extu_i32_tl(cpu_regs
[R_EDX
], s
->tmp3_i32
);
3464 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
3465 tcg_gen_mov_tl(cpu_cc_src
, cpu_regs
[R_EDX
]);
3466 set_cc_op(s
, CC_OP_MULL
);
3468 #ifdef TARGET_X86_64
3470 tcg_gen_mulu2_i64(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
],
3471 s
->T0
, cpu_regs
[R_EAX
]);
3472 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
3473 tcg_gen_mov_tl(cpu_cc_src
, cpu_regs
[R_EDX
]);
3474 set_cc_op(s
, CC_OP_MULQ
);
3482 gen_op_mov_v_reg(s
, MO_8
, s
->T1
, R_EAX
);
3483 tcg_gen_ext8s_tl(s
->T0
, s
->T0
);
3484 tcg_gen_ext8s_tl(s
->T1
, s
->T1
);
3485 /* XXX: use 32 bit mul which could be faster */
3486 tcg_gen_mul_tl(s
->T0
, s
->T0
, s
->T1
);
3487 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
3488 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
3489 tcg_gen_ext8s_tl(s
->tmp0
, s
->T0
);
3490 tcg_gen_sub_tl(cpu_cc_src
, s
->T0
, s
->tmp0
);
3491 set_cc_op(s
, CC_OP_MULB
);
3494 gen_op_mov_v_reg(s
, MO_16
, s
->T1
, R_EAX
);
3495 tcg_gen_ext16s_tl(s
->T0
, s
->T0
);
3496 tcg_gen_ext16s_tl(s
->T1
, s
->T1
);
3497 /* XXX: use 32 bit mul which could be faster */
3498 tcg_gen_mul_tl(s
->T0
, s
->T0
, s
->T1
);
3499 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
3500 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
3501 tcg_gen_ext16s_tl(s
->tmp0
, s
->T0
);
3502 tcg_gen_sub_tl(cpu_cc_src
, s
->T0
, s
->tmp0
);
3503 tcg_gen_shri_tl(s
->T0
, s
->T0
, 16);
3504 gen_op_mov_reg_v(s
, MO_16
, R_EDX
, s
->T0
);
3505 set_cc_op(s
, CC_OP_MULW
);
3509 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
3510 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, cpu_regs
[R_EAX
]);
3511 tcg_gen_muls2_i32(s
->tmp2_i32
, s
->tmp3_i32
,
3512 s
->tmp2_i32
, s
->tmp3_i32
);
3513 tcg_gen_extu_i32_tl(cpu_regs
[R_EAX
], s
->tmp2_i32
);
3514 tcg_gen_extu_i32_tl(cpu_regs
[R_EDX
], s
->tmp3_i32
);
3515 tcg_gen_sari_i32(s
->tmp2_i32
, s
->tmp2_i32
, 31);
3516 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
3517 tcg_gen_sub_i32(s
->tmp2_i32
, s
->tmp2_i32
, s
->tmp3_i32
);
3518 tcg_gen_extu_i32_tl(cpu_cc_src
, s
->tmp2_i32
);
3519 set_cc_op(s
, CC_OP_MULL
);
3521 #ifdef TARGET_X86_64
3523 tcg_gen_muls2_i64(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
],
3524 s
->T0
, cpu_regs
[R_EAX
]);
3525 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
3526 tcg_gen_sari_tl(cpu_cc_src
, cpu_regs
[R_EAX
], 63);
3527 tcg_gen_sub_tl(cpu_cc_src
, cpu_cc_src
, cpu_regs
[R_EDX
]);
3528 set_cc_op(s
, CC_OP_MULQ
);
3536 gen_helper_divb_AL(cpu_env
, s
->T0
);
3539 gen_helper_divw_AX(cpu_env
, s
->T0
);
3543 gen_helper_divl_EAX(cpu_env
, s
->T0
);
3545 #ifdef TARGET_X86_64
3547 gen_helper_divq_EAX(cpu_env
, s
->T0
);
3555 gen_helper_idivb_AL(cpu_env
, s
->T0
);
3558 gen_helper_idivw_AX(cpu_env
, s
->T0
);
3562 gen_helper_idivl_EAX(cpu_env
, s
->T0
);
3564 #ifdef TARGET_X86_64
3566 gen_helper_idivq_EAX(cpu_env
, s
->T0
);
3576 case 0xfe: /* GRP4 */
3577 case 0xff: /* GRP5 */
3578 ot
= mo_b_d(b
, dflag
);
3580 modrm
= x86_ldub_code(env
, s
);
3581 mod
= (modrm
>> 6) & 3;
3582 rm
= (modrm
& 7) | REX_B(s
);
3583 op
= (modrm
>> 3) & 7;
3584 if (op
>= 2 && b
== 0xfe) {
3588 if (op
== 2 || op
== 4) {
3589 /* operand size for jumps is 64 bit */
3591 } else if (op
== 3 || op
== 5) {
3592 ot
= dflag
!= MO_16
? MO_32
+ REX_W(s
) : MO_16
;
3593 } else if (op
== 6) {
3594 /* default push size is 64 bit */
3595 ot
= mo_pushpop(s
, dflag
);
3599 gen_lea_modrm(env
, s
, modrm
);
3600 if (op
>= 2 && op
!= 3 && op
!= 5)
3601 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
3603 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
3607 case 0: /* inc Ev */
3612 gen_inc(s
, ot
, opreg
, 1);
3614 case 1: /* dec Ev */
3619 gen_inc(s
, ot
, opreg
, -1);
3621 case 2: /* call Ev */
3622 /* XXX: optimize if memory (no 'and' is necessary) */
3623 if (dflag
== MO_16
) {
3624 tcg_gen_ext16u_tl(s
->T0
, s
->T0
);
3626 gen_push_v(s
, eip_next_tl(s
));
3627 gen_op_jmp_v(s
, s
->T0
);
3629 s
->base
.is_jmp
= DISAS_JUMP
;
3631 case 3: /* lcall Ev */
3635 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
3636 gen_add_A0_im(s
, 1 << ot
);
3637 gen_op_ld_v(s
, MO_16
, s
->T0
, s
->A0
);
3639 if (PE(s
) && !VM86(s
)) {
3640 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
3641 gen_helper_lcall_protected(cpu_env
, s
->tmp2_i32
, s
->T1
,
3642 tcg_constant_i32(dflag
- 1),
3645 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
3646 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T1
);
3647 gen_helper_lcall_real(cpu_env
, s
->tmp2_i32
, s
->tmp3_i32
,
3648 tcg_constant_i32(dflag
- 1),
3651 s
->base
.is_jmp
= DISAS_JUMP
;
3653 case 4: /* jmp Ev */
3654 if (dflag
== MO_16
) {
3655 tcg_gen_ext16u_tl(s
->T0
, s
->T0
);
3657 gen_op_jmp_v(s
, s
->T0
);
3659 s
->base
.is_jmp
= DISAS_JUMP
;
3661 case 5: /* ljmp Ev */
3665 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
3666 gen_add_A0_im(s
, 1 << ot
);
3667 gen_op_ld_v(s
, MO_16
, s
->T0
, s
->A0
);
3669 if (PE(s
) && !VM86(s
)) {
3670 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
3671 gen_helper_ljmp_protected(cpu_env
, s
->tmp2_i32
, s
->T1
,
3674 gen_op_movl_seg_T0_vm(s
, R_CS
);
3675 gen_op_jmp_v(s
, s
->T1
);
3677 s
->base
.is_jmp
= DISAS_JUMP
;
3679 case 6: /* push Ev */
3680 gen_push_v(s
, s
->T0
);
3687 case 0x84: /* test Ev, Gv */
3689 ot
= mo_b_d(b
, dflag
);
3691 modrm
= x86_ldub_code(env
, s
);
3692 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
3694 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3695 gen_op_mov_v_reg(s
, ot
, s
->T1
, reg
);
3696 gen_op_testl_T0_T1_cc(s
);
3697 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
3700 case 0xa8: /* test eAX, Iv */
3702 ot
= mo_b_d(b
, dflag
);
3703 val
= insn_get(env
, s
, ot
);
3705 gen_op_mov_v_reg(s
, ot
, s
->T0
, OR_EAX
);
3706 tcg_gen_movi_tl(s
->T1
, val
);
3707 gen_op_testl_T0_T1_cc(s
);
3708 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
3711 case 0x98: /* CWDE/CBW */
3713 #ifdef TARGET_X86_64
3715 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, R_EAX
);
3716 tcg_gen_ext32s_tl(s
->T0
, s
->T0
);
3717 gen_op_mov_reg_v(s
, MO_64
, R_EAX
, s
->T0
);
3721 gen_op_mov_v_reg(s
, MO_16
, s
->T0
, R_EAX
);
3722 tcg_gen_ext16s_tl(s
->T0
, s
->T0
);
3723 gen_op_mov_reg_v(s
, MO_32
, R_EAX
, s
->T0
);
3726 gen_op_mov_v_reg(s
, MO_8
, s
->T0
, R_EAX
);
3727 tcg_gen_ext8s_tl(s
->T0
, s
->T0
);
3728 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
3731 g_assert_not_reached();
3734 case 0x99: /* CDQ/CWD */
3736 #ifdef TARGET_X86_64
3738 gen_op_mov_v_reg(s
, MO_64
, s
->T0
, R_EAX
);
3739 tcg_gen_sari_tl(s
->T0
, s
->T0
, 63);
3740 gen_op_mov_reg_v(s
, MO_64
, R_EDX
, s
->T0
);
3744 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, R_EAX
);
3745 tcg_gen_ext32s_tl(s
->T0
, s
->T0
);
3746 tcg_gen_sari_tl(s
->T0
, s
->T0
, 31);
3747 gen_op_mov_reg_v(s
, MO_32
, R_EDX
, s
->T0
);
3750 gen_op_mov_v_reg(s
, MO_16
, s
->T0
, R_EAX
);
3751 tcg_gen_ext16s_tl(s
->T0
, s
->T0
);
3752 tcg_gen_sari_tl(s
->T0
, s
->T0
, 15);
3753 gen_op_mov_reg_v(s
, MO_16
, R_EDX
, s
->T0
);
3756 g_assert_not_reached();
3759 case 0x1af: /* imul Gv, Ev */
3760 case 0x69: /* imul Gv, Ev, I */
3763 modrm
= x86_ldub_code(env
, s
);
3764 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
3766 s
->rip_offset
= insn_const_size(ot
);
3769 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3771 val
= insn_get(env
, s
, ot
);
3772 tcg_gen_movi_tl(s
->T1
, val
);
3773 } else if (b
== 0x6b) {
3774 val
= (int8_t)insn_get(env
, s
, MO_8
);
3775 tcg_gen_movi_tl(s
->T1
, val
);
3777 gen_op_mov_v_reg(s
, ot
, s
->T1
, reg
);
3780 #ifdef TARGET_X86_64
3782 tcg_gen_muls2_i64(cpu_regs
[reg
], s
->T1
, s
->T0
, s
->T1
);
3783 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[reg
]);
3784 tcg_gen_sari_tl(cpu_cc_src
, cpu_cc_dst
, 63);
3785 tcg_gen_sub_tl(cpu_cc_src
, cpu_cc_src
, s
->T1
);
3789 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
3790 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T1
);
3791 tcg_gen_muls2_i32(s
->tmp2_i32
, s
->tmp3_i32
,
3792 s
->tmp2_i32
, s
->tmp3_i32
);
3793 tcg_gen_extu_i32_tl(cpu_regs
[reg
], s
->tmp2_i32
);
3794 tcg_gen_sari_i32(s
->tmp2_i32
, s
->tmp2_i32
, 31);
3795 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[reg
]);
3796 tcg_gen_sub_i32(s
->tmp2_i32
, s
->tmp2_i32
, s
->tmp3_i32
);
3797 tcg_gen_extu_i32_tl(cpu_cc_src
, s
->tmp2_i32
);
3800 tcg_gen_ext16s_tl(s
->T0
, s
->T0
);
3801 tcg_gen_ext16s_tl(s
->T1
, s
->T1
);
3802 /* XXX: use 32 bit mul which could be faster */
3803 tcg_gen_mul_tl(s
->T0
, s
->T0
, s
->T1
);
3804 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
3805 tcg_gen_ext16s_tl(s
->tmp0
, s
->T0
);
3806 tcg_gen_sub_tl(cpu_cc_src
, s
->T0
, s
->tmp0
);
3807 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
3810 set_cc_op(s
, CC_OP_MULB
+ ot
);
3813 case 0x1c1: /* xadd Ev, Gv */
3814 ot
= mo_b_d(b
, dflag
);
3815 modrm
= x86_ldub_code(env
, s
);
3816 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
3817 mod
= (modrm
>> 6) & 3;
3818 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
3820 rm
= (modrm
& 7) | REX_B(s
);
3821 gen_op_mov_v_reg(s
, ot
, s
->T1
, rm
);
3822 tcg_gen_add_tl(s
->T0
, s
->T0
, s
->T1
);
3823 gen_op_mov_reg_v(s
, ot
, reg
, s
->T1
);
3824 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
3826 gen_lea_modrm(env
, s
, modrm
);
3827 if (s
->prefix
& PREFIX_LOCK
) {
3828 tcg_gen_atomic_fetch_add_tl(s
->T1
, s
->A0
, s
->T0
,
3829 s
->mem_index
, ot
| MO_LE
);
3830 tcg_gen_add_tl(s
->T0
, s
->T0
, s
->T1
);
3832 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
3833 tcg_gen_add_tl(s
->T0
, s
->T0
, s
->T1
);
3834 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
3836 gen_op_mov_reg_v(s
, ot
, reg
, s
->T1
);
3838 gen_op_update2_cc(s
);
3839 set_cc_op(s
, CC_OP_ADDB
+ ot
);
3842 case 0x1b1: /* cmpxchg Ev, Gv */
3844 TCGv oldv
, newv
, cmpv
, dest
;
3846 ot
= mo_b_d(b
, dflag
);
3847 modrm
= x86_ldub_code(env
, s
);
3848 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
3849 mod
= (modrm
>> 6) & 3;
3850 oldv
= tcg_temp_new();
3851 newv
= tcg_temp_new();
3852 cmpv
= tcg_temp_new();
3853 gen_op_mov_v_reg(s
, ot
, newv
, reg
);
3854 tcg_gen_mov_tl(cmpv
, cpu_regs
[R_EAX
]);
3856 if (s
->prefix
& PREFIX_LOCK
) {
3860 gen_lea_modrm(env
, s
, modrm
);
3861 tcg_gen_atomic_cmpxchg_tl(oldv
, s
->A0
, cmpv
, newv
,
3862 s
->mem_index
, ot
| MO_LE
);
3865 rm
= (modrm
& 7) | REX_B(s
);
3866 gen_op_mov_v_reg(s
, ot
, oldv
, rm
);
3870 * Unlike the memory case, where "the destination operand receives
3871 * a write cycle without regard to the result of the comparison",
3872 * rm must not be touched altogether if the write fails, including
3873 * not zero-extending it on 64-bit processors. So, precompute
3874 * the result of a successful writeback and perform the movcond
3875 * directly on cpu_regs. Also need to write accumulator first, in
3876 * case rm is part of RAX too.
3878 dest
= gen_op_deposit_reg_v(s
, ot
, rm
, newv
, newv
);
3879 tcg_gen_movcond_tl(TCG_COND_EQ
, dest
, oldv
, cmpv
, newv
, dest
);
3881 gen_lea_modrm(env
, s
, modrm
);
3882 gen_op_ld_v(s
, ot
, oldv
, s
->A0
);
3885 * Perform an unconditional store cycle like physical cpu;
3886 * must be before changing accumulator to ensure
3887 * idempotency if the store faults and the instruction
3890 tcg_gen_movcond_tl(TCG_COND_EQ
, newv
, oldv
, cmpv
, newv
, oldv
);
3891 gen_op_st_v(s
, ot
, newv
, s
->A0
);
3895 * Write EAX only if the cmpxchg fails; reuse newv as the destination,
3896 * since it's dead here.
3898 dest
= gen_op_deposit_reg_v(s
, ot
, R_EAX
, newv
, oldv
);
3899 tcg_gen_movcond_tl(TCG_COND_EQ
, dest
, oldv
, cmpv
, dest
, newv
);
3900 tcg_gen_mov_tl(cpu_cc_src
, oldv
);
3901 tcg_gen_mov_tl(s
->cc_srcT
, cmpv
);
3902 tcg_gen_sub_tl(cpu_cc_dst
, cmpv
, oldv
);
3903 set_cc_op(s
, CC_OP_SUBB
+ ot
);
3906 case 0x1c7: /* cmpxchg8b */
3907 modrm
= x86_ldub_code(env
, s
);
3908 mod
= (modrm
>> 6) & 3;
3909 switch ((modrm
>> 3) & 7) {
3910 case 1: /* CMPXCHG8, CMPXCHG16 */
3914 #ifdef TARGET_X86_64
3915 if (dflag
== MO_64
) {
3916 if (!(s
->cpuid_ext_features
& CPUID_EXT_CX16
)) {
3919 gen_cmpxchg16b(s
, env
, modrm
);
3923 if (!(s
->cpuid_features
& CPUID_CX8
)) {
3926 gen_cmpxchg8b(s
, env
, modrm
);
3929 case 7: /* RDSEED, RDPID with f3 prefix */
3931 (s
->prefix
& (PREFIX_LOCK
| PREFIX_REPNZ
))) {
3934 if (s
->prefix
& PREFIX_REPZ
) {
3935 if (!(s
->cpuid_ext_features
& CPUID_7_0_ECX_RDPID
)) {
3938 gen_helper_rdpid(s
->T0
, cpu_env
);
3939 rm
= (modrm
& 7) | REX_B(s
);
3940 gen_op_mov_reg_v(s
, dflag
, rm
, s
->T0
);
3943 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_RDSEED
)) {
3949 case 6: /* RDRAND */
3951 (s
->prefix
& (PREFIX_LOCK
| PREFIX_REPZ
| PREFIX_REPNZ
)) ||
3952 !(s
->cpuid_ext_features
& CPUID_EXT_RDRAND
)) {
3956 translator_io_start(&s
->base
);
3957 gen_helper_rdrand(s
->T0
, cpu_env
);
3958 rm
= (modrm
& 7) | REX_B(s
);
3959 gen_op_mov_reg_v(s
, dflag
, rm
, s
->T0
);
3960 set_cc_op(s
, CC_OP_EFLAGS
);
3968 /**************************/
3970 case 0x50 ... 0x57: /* push */
3971 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, (b
& 7) | REX_B(s
));
3972 gen_push_v(s
, s
->T0
);
3974 case 0x58 ... 0x5f: /* pop */
3976 /* NOTE: order is important for pop %sp */
3977 gen_pop_update(s
, ot
);
3978 gen_op_mov_reg_v(s
, ot
, (b
& 7) | REX_B(s
), s
->T0
);
3980 case 0x60: /* pusha */
3985 case 0x61: /* popa */
3990 case 0x68: /* push Iv */
3992 ot
= mo_pushpop(s
, dflag
);
3994 val
= insn_get(env
, s
, ot
);
3996 val
= (int8_t)insn_get(env
, s
, MO_8
);
3997 tcg_gen_movi_tl(s
->T0
, val
);
3998 gen_push_v(s
, s
->T0
);
4000 case 0x8f: /* pop Ev */
4001 modrm
= x86_ldub_code(env
, s
);
4002 mod
= (modrm
>> 6) & 3;
4005 /* NOTE: order is important for pop %sp */
4006 gen_pop_update(s
, ot
);
4007 rm
= (modrm
& 7) | REX_B(s
);
4008 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
4010 /* NOTE: order is important too for MMU exceptions */
4011 s
->popl_esp_hack
= 1 << ot
;
4012 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
4013 s
->popl_esp_hack
= 0;
4014 gen_pop_update(s
, ot
);
4017 case 0xc8: /* enter */
4020 val
= x86_lduw_code(env
, s
);
4021 level
= x86_ldub_code(env
, s
);
4022 gen_enter(s
, val
, level
);
4025 case 0xc9: /* leave */
4028 case 0x06: /* push es */
4029 case 0x0e: /* push cs */
4030 case 0x16: /* push ss */
4031 case 0x1e: /* push ds */
4034 gen_op_movl_T0_seg(s
, b
>> 3);
4035 gen_push_v(s
, s
->T0
);
4037 case 0x1a0: /* push fs */
4038 case 0x1a8: /* push gs */
4039 gen_op_movl_T0_seg(s
, (b
>> 3) & 7);
4040 gen_push_v(s
, s
->T0
);
4042 case 0x07: /* pop es */
4043 case 0x17: /* pop ss */
4044 case 0x1f: /* pop ds */
4049 gen_movl_seg_T0(s
, reg
);
4050 gen_pop_update(s
, ot
);
4052 case 0x1a1: /* pop fs */
4053 case 0x1a9: /* pop gs */
4055 gen_movl_seg_T0(s
, (b
>> 3) & 7);
4056 gen_pop_update(s
, ot
);
4059 /**************************/
4062 case 0x89: /* mov Gv, Ev */
4063 ot
= mo_b_d(b
, dflag
);
4064 modrm
= x86_ldub_code(env
, s
);
4065 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4067 /* generate a generic store */
4068 gen_ldst_modrm(env
, s
, modrm
, ot
, reg
, 1);
4071 case 0xc7: /* mov Ev, Iv */
4072 ot
= mo_b_d(b
, dflag
);
4073 modrm
= x86_ldub_code(env
, s
);
4074 mod
= (modrm
>> 6) & 3;
4076 s
->rip_offset
= insn_const_size(ot
);
4077 gen_lea_modrm(env
, s
, modrm
);
4079 val
= insn_get(env
, s
, ot
);
4080 tcg_gen_movi_tl(s
->T0
, val
);
4082 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
4084 gen_op_mov_reg_v(s
, ot
, (modrm
& 7) | REX_B(s
), s
->T0
);
4088 case 0x8b: /* mov Ev, Gv */
4089 ot
= mo_b_d(b
, dflag
);
4090 modrm
= x86_ldub_code(env
, s
);
4091 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4093 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4094 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
4096 case 0x8e: /* mov seg, Gv */
4097 modrm
= x86_ldub_code(env
, s
);
4098 reg
= (modrm
>> 3) & 7;
4099 if (reg
>= 6 || reg
== R_CS
)
4101 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
4102 gen_movl_seg_T0(s
, reg
);
4104 case 0x8c: /* mov Gv, seg */
4105 modrm
= x86_ldub_code(env
, s
);
4106 reg
= (modrm
>> 3) & 7;
4107 mod
= (modrm
>> 6) & 3;
4110 gen_op_movl_T0_seg(s
, reg
);
4111 ot
= mod
== 3 ? dflag
: MO_16
;
4112 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
4115 case 0x1b6: /* movzbS Gv, Eb */
4116 case 0x1b7: /* movzwS Gv, Eb */
4117 case 0x1be: /* movsbS Gv, Eb */
4118 case 0x1bf: /* movswS Gv, Eb */
4123 /* d_ot is the size of destination */
4125 /* ot is the size of source */
4126 ot
= (b
& 1) + MO_8
;
4127 /* s_ot is the sign+size of source */
4128 s_ot
= b
& 8 ? MO_SIGN
| ot
: ot
;
4130 modrm
= x86_ldub_code(env
, s
);
4131 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4132 mod
= (modrm
>> 6) & 3;
4133 rm
= (modrm
& 7) | REX_B(s
);
4136 if (s_ot
== MO_SB
&& byte_reg_is_xH(s
, rm
)) {
4137 tcg_gen_sextract_tl(s
->T0
, cpu_regs
[rm
- 4], 8, 8);
4139 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
4142 tcg_gen_ext8u_tl(s
->T0
, s
->T0
);
4145 tcg_gen_ext8s_tl(s
->T0
, s
->T0
);
4148 tcg_gen_ext16u_tl(s
->T0
, s
->T0
);
4152 tcg_gen_ext16s_tl(s
->T0
, s
->T0
);
4156 gen_op_mov_reg_v(s
, d_ot
, reg
, s
->T0
);
4158 gen_lea_modrm(env
, s
, modrm
);
4159 gen_op_ld_v(s
, s_ot
, s
->T0
, s
->A0
);
4160 gen_op_mov_reg_v(s
, d_ot
, reg
, s
->T0
);
4165 case 0x8d: /* lea */
4166 modrm
= x86_ldub_code(env
, s
);
4167 mod
= (modrm
>> 6) & 3;
4170 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4172 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
4173 TCGv ea
= gen_lea_modrm_1(s
, a
, false);
4174 gen_lea_v_seg(s
, s
->aflag
, ea
, -1, -1);
4175 gen_op_mov_reg_v(s
, dflag
, reg
, s
->A0
);
4179 case 0xa0: /* mov EAX, Ov */
4181 case 0xa2: /* mov Ov, EAX */
4184 target_ulong offset_addr
;
4186 ot
= mo_b_d(b
, dflag
);
4187 offset_addr
= insn_get_addr(env
, s
, s
->aflag
);
4188 tcg_gen_movi_tl(s
->A0
, offset_addr
);
4189 gen_add_A0_ds_seg(s
);
4191 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
4192 gen_op_mov_reg_v(s
, ot
, R_EAX
, s
->T0
);
4194 gen_op_mov_v_reg(s
, ot
, s
->T0
, R_EAX
);
4195 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
4199 case 0xd7: /* xlat */
4200 tcg_gen_mov_tl(s
->A0
, cpu_regs
[R_EBX
]);
4201 tcg_gen_ext8u_tl(s
->T0
, cpu_regs
[R_EAX
]);
4202 tcg_gen_add_tl(s
->A0
, s
->A0
, s
->T0
);
4203 gen_extu(s
->aflag
, s
->A0
);
4204 gen_add_A0_ds_seg(s
);
4205 gen_op_ld_v(s
, MO_8
, s
->T0
, s
->A0
);
4206 gen_op_mov_reg_v(s
, MO_8
, R_EAX
, s
->T0
);
4208 case 0xb0 ... 0xb7: /* mov R, Ib */
4209 val
= insn_get(env
, s
, MO_8
);
4210 tcg_gen_movi_tl(s
->T0
, val
);
4211 gen_op_mov_reg_v(s
, MO_8
, (b
& 7) | REX_B(s
), s
->T0
);
4213 case 0xb8 ... 0xbf: /* mov R, Iv */
4214 #ifdef TARGET_X86_64
4215 if (dflag
== MO_64
) {
4218 tmp
= x86_ldq_code(env
, s
);
4219 reg
= (b
& 7) | REX_B(s
);
4220 tcg_gen_movi_tl(s
->T0
, tmp
);
4221 gen_op_mov_reg_v(s
, MO_64
, reg
, s
->T0
);
4226 val
= insn_get(env
, s
, ot
);
4227 reg
= (b
& 7) | REX_B(s
);
4228 tcg_gen_movi_tl(s
->T0
, val
);
4229 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
4233 case 0x91 ... 0x97: /* xchg R, EAX */
4236 reg
= (b
& 7) | REX_B(s
);
4240 case 0x87: /* xchg Ev, Gv */
4241 ot
= mo_b_d(b
, dflag
);
4242 modrm
= x86_ldub_code(env
, s
);
4243 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4244 mod
= (modrm
>> 6) & 3;
4246 rm
= (modrm
& 7) | REX_B(s
);
4248 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
4249 gen_op_mov_v_reg(s
, ot
, s
->T1
, rm
);
4250 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
4251 gen_op_mov_reg_v(s
, ot
, reg
, s
->T1
);
4253 gen_lea_modrm(env
, s
, modrm
);
4254 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
4255 /* for xchg, lock is implicit */
4256 tcg_gen_atomic_xchg_tl(s
->T1
, s
->A0
, s
->T0
,
4257 s
->mem_index
, ot
| MO_LE
);
4258 gen_op_mov_reg_v(s
, ot
, reg
, s
->T1
);
4261 case 0xc4: /* les Gv */
4262 /* In CODE64 this is VEX3; see above. */
4265 case 0xc5: /* lds Gv */
4266 /* In CODE64 this is VEX2; see above. */
4269 case 0x1b2: /* lss Gv */
4272 case 0x1b4: /* lfs Gv */
4275 case 0x1b5: /* lgs Gv */
4278 ot
= dflag
!= MO_16
? MO_32
: MO_16
;
4279 modrm
= x86_ldub_code(env
, s
);
4280 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4281 mod
= (modrm
>> 6) & 3;
4284 gen_lea_modrm(env
, s
, modrm
);
4285 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
4286 gen_add_A0_im(s
, 1 << ot
);
4287 /* load the segment first to handle exceptions properly */
4288 gen_op_ld_v(s
, MO_16
, s
->T0
, s
->A0
);
4289 gen_movl_seg_T0(s
, op
);
4290 /* then put the data */
4291 gen_op_mov_reg_v(s
, ot
, reg
, s
->T1
);
4294 /************************/
4302 ot
= mo_b_d(b
, dflag
);
4303 modrm
= x86_ldub_code(env
, s
);
4304 mod
= (modrm
>> 6) & 3;
4305 op
= (modrm
>> 3) & 7;
4311 gen_lea_modrm(env
, s
, modrm
);
4314 opreg
= (modrm
& 7) | REX_B(s
);
4319 gen_shift(s
, op
, ot
, opreg
, OR_ECX
);
4322 shift
= x86_ldub_code(env
, s
);
4324 gen_shifti(s
, op
, ot
, opreg
, shift
);
4339 case 0x1a4: /* shld imm */
4343 case 0x1a5: /* shld cl */
4347 case 0x1ac: /* shrd imm */
4351 case 0x1ad: /* shrd cl */
4356 modrm
= x86_ldub_code(env
, s
);
4357 mod
= (modrm
>> 6) & 3;
4358 rm
= (modrm
& 7) | REX_B(s
);
4359 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4361 gen_lea_modrm(env
, s
, modrm
);
4366 gen_op_mov_v_reg(s
, ot
, s
->T1
, reg
);
4369 TCGv imm
= tcg_constant_tl(x86_ldub_code(env
, s
));
4370 gen_shiftd_rm_T1(s
, ot
, opreg
, op
, imm
);
4372 gen_shiftd_rm_T1(s
, ot
, opreg
, op
, cpu_regs
[R_ECX
]);
4376 /************************/
4380 bool update_fip
= true;
4382 if (s
->flags
& (HF_EM_MASK
| HF_TS_MASK
)) {
4383 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
4384 /* XXX: what to do if illegal op ? */
4385 gen_exception(s
, EXCP07_PREX
);
4388 modrm
= x86_ldub_code(env
, s
);
4389 mod
= (modrm
>> 6) & 3;
4391 op
= ((b
& 7) << 3) | ((modrm
>> 3) & 7);
4394 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
4395 TCGv ea
= gen_lea_modrm_1(s
, a
, false);
4396 TCGv last_addr
= tcg_temp_new();
4397 bool update_fdp
= true;
4399 tcg_gen_mov_tl(last_addr
, ea
);
4400 gen_lea_v_seg(s
, s
->aflag
, ea
, a
.def_seg
, s
->override
);
4403 case 0x00 ... 0x07: /* fxxxs */
4404 case 0x10 ... 0x17: /* fixxxl */
4405 case 0x20 ... 0x27: /* fxxxl */
4406 case 0x30 ... 0x37: /* fixxx */
4413 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
4414 s
->mem_index
, MO_LEUL
);
4415 gen_helper_flds_FT0(cpu_env
, s
->tmp2_i32
);
4418 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
4419 s
->mem_index
, MO_LEUL
);
4420 gen_helper_fildl_FT0(cpu_env
, s
->tmp2_i32
);
4423 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
,
4424 s
->mem_index
, MO_LEUQ
);
4425 gen_helper_fldl_FT0(cpu_env
, s
->tmp1_i64
);
4429 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
4430 s
->mem_index
, MO_LESW
);
4431 gen_helper_fildl_FT0(cpu_env
, s
->tmp2_i32
);
4435 gen_helper_fp_arith_ST0_FT0(op1
);
4437 /* fcomp needs pop */
4438 gen_helper_fpop(cpu_env
);
4442 case 0x08: /* flds */
4443 case 0x0a: /* fsts */
4444 case 0x0b: /* fstps */
4445 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
4446 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
4447 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
4452 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
4453 s
->mem_index
, MO_LEUL
);
4454 gen_helper_flds_ST0(cpu_env
, s
->tmp2_i32
);
4457 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
4458 s
->mem_index
, MO_LEUL
);
4459 gen_helper_fildl_ST0(cpu_env
, s
->tmp2_i32
);
4462 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
,
4463 s
->mem_index
, MO_LEUQ
);
4464 gen_helper_fldl_ST0(cpu_env
, s
->tmp1_i64
);
4468 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
4469 s
->mem_index
, MO_LESW
);
4470 gen_helper_fildl_ST0(cpu_env
, s
->tmp2_i32
);
4475 /* XXX: the corresponding CPUID bit must be tested ! */
4478 gen_helper_fisttl_ST0(s
->tmp2_i32
, cpu_env
);
4479 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
4480 s
->mem_index
, MO_LEUL
);
4483 gen_helper_fisttll_ST0(s
->tmp1_i64
, cpu_env
);
4484 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
,
4485 s
->mem_index
, MO_LEUQ
);
4489 gen_helper_fistt_ST0(s
->tmp2_i32
, cpu_env
);
4490 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
4491 s
->mem_index
, MO_LEUW
);
4494 gen_helper_fpop(cpu_env
);
4499 gen_helper_fsts_ST0(s
->tmp2_i32
, cpu_env
);
4500 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
4501 s
->mem_index
, MO_LEUL
);
4504 gen_helper_fistl_ST0(s
->tmp2_i32
, cpu_env
);
4505 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
4506 s
->mem_index
, MO_LEUL
);
4509 gen_helper_fstl_ST0(s
->tmp1_i64
, cpu_env
);
4510 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
,
4511 s
->mem_index
, MO_LEUQ
);
4515 gen_helper_fist_ST0(s
->tmp2_i32
, cpu_env
);
4516 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
4517 s
->mem_index
, MO_LEUW
);
4520 if ((op
& 7) == 3) {
4521 gen_helper_fpop(cpu_env
);
4526 case 0x0c: /* fldenv mem */
4527 gen_helper_fldenv(cpu_env
, s
->A0
,
4528 tcg_constant_i32(dflag
- 1));
4529 update_fip
= update_fdp
= false;
4531 case 0x0d: /* fldcw mem */
4532 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
4533 s
->mem_index
, MO_LEUW
);
4534 gen_helper_fldcw(cpu_env
, s
->tmp2_i32
);
4535 update_fip
= update_fdp
= false;
4537 case 0x0e: /* fnstenv mem */
4538 gen_helper_fstenv(cpu_env
, s
->A0
,
4539 tcg_constant_i32(dflag
- 1));
4540 update_fip
= update_fdp
= false;
4542 case 0x0f: /* fnstcw mem */
4543 gen_helper_fnstcw(s
->tmp2_i32
, cpu_env
);
4544 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
4545 s
->mem_index
, MO_LEUW
);
4546 update_fip
= update_fdp
= false;
4548 case 0x1d: /* fldt mem */
4549 gen_helper_fldt_ST0(cpu_env
, s
->A0
);
4551 case 0x1f: /* fstpt mem */
4552 gen_helper_fstt_ST0(cpu_env
, s
->A0
);
4553 gen_helper_fpop(cpu_env
);
4555 case 0x2c: /* frstor mem */
4556 gen_helper_frstor(cpu_env
, s
->A0
,
4557 tcg_constant_i32(dflag
- 1));
4558 update_fip
= update_fdp
= false;
4560 case 0x2e: /* fnsave mem */
4561 gen_helper_fsave(cpu_env
, s
->A0
,
4562 tcg_constant_i32(dflag
- 1));
4563 update_fip
= update_fdp
= false;
4565 case 0x2f: /* fnstsw mem */
4566 gen_helper_fnstsw(s
->tmp2_i32
, cpu_env
);
4567 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
4568 s
->mem_index
, MO_LEUW
);
4569 update_fip
= update_fdp
= false;
4571 case 0x3c: /* fbld */
4572 gen_helper_fbld_ST0(cpu_env
, s
->A0
);
4574 case 0x3e: /* fbstp */
4575 gen_helper_fbst_ST0(cpu_env
, s
->A0
);
4576 gen_helper_fpop(cpu_env
);
4578 case 0x3d: /* fildll */
4579 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
,
4580 s
->mem_index
, MO_LEUQ
);
4581 gen_helper_fildll_ST0(cpu_env
, s
->tmp1_i64
);
4583 case 0x3f: /* fistpll */
4584 gen_helper_fistll_ST0(s
->tmp1_i64
, cpu_env
);
4585 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
,
4586 s
->mem_index
, MO_LEUQ
);
4587 gen_helper_fpop(cpu_env
);
4594 int last_seg
= s
->override
>= 0 ? s
->override
: a
.def_seg
;
4596 tcg_gen_ld_i32(s
->tmp2_i32
, cpu_env
,
4597 offsetof(CPUX86State
,
4598 segs
[last_seg
].selector
));
4599 tcg_gen_st16_i32(s
->tmp2_i32
, cpu_env
,
4600 offsetof(CPUX86State
, fpds
));
4601 tcg_gen_st_tl(last_addr
, cpu_env
,
4602 offsetof(CPUX86State
, fpdp
));
4605 /* register float ops */
4609 case 0x08: /* fld sti */
4610 gen_helper_fpush(cpu_env
);
4611 gen_helper_fmov_ST0_STN(cpu_env
,
4612 tcg_constant_i32((opreg
+ 1) & 7));
4614 case 0x09: /* fxchg sti */
4615 case 0x29: /* fxchg4 sti, undocumented op */
4616 case 0x39: /* fxchg7 sti, undocumented op */
4617 gen_helper_fxchg_ST0_STN(cpu_env
, tcg_constant_i32(opreg
));
4619 case 0x0a: /* grp d9/2 */
4622 /* check exceptions (FreeBSD FPU probe) */
4623 gen_helper_fwait(cpu_env
);
4630 case 0x0c: /* grp d9/4 */
4633 gen_helper_fchs_ST0(cpu_env
);
4636 gen_helper_fabs_ST0(cpu_env
);
4639 gen_helper_fldz_FT0(cpu_env
);
4640 gen_helper_fcom_ST0_FT0(cpu_env
);
4643 gen_helper_fxam_ST0(cpu_env
);
4649 case 0x0d: /* grp d9/5 */
4653 gen_helper_fpush(cpu_env
);
4654 gen_helper_fld1_ST0(cpu_env
);
4657 gen_helper_fpush(cpu_env
);
4658 gen_helper_fldl2t_ST0(cpu_env
);
4661 gen_helper_fpush(cpu_env
);
4662 gen_helper_fldl2e_ST0(cpu_env
);
4665 gen_helper_fpush(cpu_env
);
4666 gen_helper_fldpi_ST0(cpu_env
);
4669 gen_helper_fpush(cpu_env
);
4670 gen_helper_fldlg2_ST0(cpu_env
);
4673 gen_helper_fpush(cpu_env
);
4674 gen_helper_fldln2_ST0(cpu_env
);
4677 gen_helper_fpush(cpu_env
);
4678 gen_helper_fldz_ST0(cpu_env
);
4685 case 0x0e: /* grp d9/6 */
4688 gen_helper_f2xm1(cpu_env
);
4691 gen_helper_fyl2x(cpu_env
);
4694 gen_helper_fptan(cpu_env
);
4696 case 3: /* fpatan */
4697 gen_helper_fpatan(cpu_env
);
4699 case 4: /* fxtract */
4700 gen_helper_fxtract(cpu_env
);
4702 case 5: /* fprem1 */
4703 gen_helper_fprem1(cpu_env
);
4705 case 6: /* fdecstp */
4706 gen_helper_fdecstp(cpu_env
);
4709 case 7: /* fincstp */
4710 gen_helper_fincstp(cpu_env
);
4714 case 0x0f: /* grp d9/7 */
4717 gen_helper_fprem(cpu_env
);
4719 case 1: /* fyl2xp1 */
4720 gen_helper_fyl2xp1(cpu_env
);
4723 gen_helper_fsqrt(cpu_env
);
4725 case 3: /* fsincos */
4726 gen_helper_fsincos(cpu_env
);
4728 case 5: /* fscale */
4729 gen_helper_fscale(cpu_env
);
4731 case 4: /* frndint */
4732 gen_helper_frndint(cpu_env
);
4735 gen_helper_fsin(cpu_env
);
4739 gen_helper_fcos(cpu_env
);
4743 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
4744 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
4745 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
4751 gen_helper_fp_arith_STN_ST0(op1
, opreg
);
4753 gen_helper_fpop(cpu_env
);
4756 gen_helper_fmov_FT0_STN(cpu_env
,
4757 tcg_constant_i32(opreg
));
4758 gen_helper_fp_arith_ST0_FT0(op1
);
4762 case 0x02: /* fcom */
4763 case 0x22: /* fcom2, undocumented op */
4764 gen_helper_fmov_FT0_STN(cpu_env
, tcg_constant_i32(opreg
));
4765 gen_helper_fcom_ST0_FT0(cpu_env
);
4767 case 0x03: /* fcomp */
4768 case 0x23: /* fcomp3, undocumented op */
4769 case 0x32: /* fcomp5, undocumented op */
4770 gen_helper_fmov_FT0_STN(cpu_env
, tcg_constant_i32(opreg
));
4771 gen_helper_fcom_ST0_FT0(cpu_env
);
4772 gen_helper_fpop(cpu_env
);
4774 case 0x15: /* da/5 */
4776 case 1: /* fucompp */
4777 gen_helper_fmov_FT0_STN(cpu_env
, tcg_constant_i32(1));
4778 gen_helper_fucom_ST0_FT0(cpu_env
);
4779 gen_helper_fpop(cpu_env
);
4780 gen_helper_fpop(cpu_env
);
4788 case 0: /* feni (287 only, just do nop here) */
4790 case 1: /* fdisi (287 only, just do nop here) */
4793 gen_helper_fclex(cpu_env
);
4796 case 3: /* fninit */
4797 gen_helper_fninit(cpu_env
);
4800 case 4: /* fsetpm (287 only, just do nop here) */
4806 case 0x1d: /* fucomi */
4807 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
4810 gen_update_cc_op(s
);
4811 gen_helper_fmov_FT0_STN(cpu_env
, tcg_constant_i32(opreg
));
4812 gen_helper_fucomi_ST0_FT0(cpu_env
);
4813 set_cc_op(s
, CC_OP_EFLAGS
);
4815 case 0x1e: /* fcomi */
4816 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
4819 gen_update_cc_op(s
);
4820 gen_helper_fmov_FT0_STN(cpu_env
, tcg_constant_i32(opreg
));
4821 gen_helper_fcomi_ST0_FT0(cpu_env
);
4822 set_cc_op(s
, CC_OP_EFLAGS
);
4824 case 0x28: /* ffree sti */
4825 gen_helper_ffree_STN(cpu_env
, tcg_constant_i32(opreg
));
4827 case 0x2a: /* fst sti */
4828 gen_helper_fmov_STN_ST0(cpu_env
, tcg_constant_i32(opreg
));
4830 case 0x2b: /* fstp sti */
4831 case 0x0b: /* fstp1 sti, undocumented op */
4832 case 0x3a: /* fstp8 sti, undocumented op */
4833 case 0x3b: /* fstp9 sti, undocumented op */
4834 gen_helper_fmov_STN_ST0(cpu_env
, tcg_constant_i32(opreg
));
4835 gen_helper_fpop(cpu_env
);
4837 case 0x2c: /* fucom st(i) */
4838 gen_helper_fmov_FT0_STN(cpu_env
, tcg_constant_i32(opreg
));
4839 gen_helper_fucom_ST0_FT0(cpu_env
);
4841 case 0x2d: /* fucomp st(i) */
4842 gen_helper_fmov_FT0_STN(cpu_env
, tcg_constant_i32(opreg
));
4843 gen_helper_fucom_ST0_FT0(cpu_env
);
4844 gen_helper_fpop(cpu_env
);
4846 case 0x33: /* de/3 */
4848 case 1: /* fcompp */
4849 gen_helper_fmov_FT0_STN(cpu_env
, tcg_constant_i32(1));
4850 gen_helper_fcom_ST0_FT0(cpu_env
);
4851 gen_helper_fpop(cpu_env
);
4852 gen_helper_fpop(cpu_env
);
4858 case 0x38: /* ffreep sti, undocumented op */
4859 gen_helper_ffree_STN(cpu_env
, tcg_constant_i32(opreg
));
4860 gen_helper_fpop(cpu_env
);
4862 case 0x3c: /* df/4 */
4865 gen_helper_fnstsw(s
->tmp2_i32
, cpu_env
);
4866 tcg_gen_extu_i32_tl(s
->T0
, s
->tmp2_i32
);
4867 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
4873 case 0x3d: /* fucomip */
4874 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
4877 gen_update_cc_op(s
);
4878 gen_helper_fmov_FT0_STN(cpu_env
, tcg_constant_i32(opreg
));
4879 gen_helper_fucomi_ST0_FT0(cpu_env
);
4880 gen_helper_fpop(cpu_env
);
4881 set_cc_op(s
, CC_OP_EFLAGS
);
4883 case 0x3e: /* fcomip */
4884 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
4887 gen_update_cc_op(s
);
4888 gen_helper_fmov_FT0_STN(cpu_env
, tcg_constant_i32(opreg
));
4889 gen_helper_fcomi_ST0_FT0(cpu_env
);
4890 gen_helper_fpop(cpu_env
);
4891 set_cc_op(s
, CC_OP_EFLAGS
);
4893 case 0x10 ... 0x13: /* fcmovxx */
4898 static const uint8_t fcmov_cc
[8] = {
4905 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
4908 op1
= fcmov_cc
[op
& 3] | (((op
>> 3) & 1) ^ 1);
4909 l1
= gen_new_label();
4910 gen_jcc1_noeob(s
, op1
, l1
);
4911 gen_helper_fmov_ST0_STN(cpu_env
,
4912 tcg_constant_i32(opreg
));
4922 tcg_gen_ld_i32(s
->tmp2_i32
, cpu_env
,
4923 offsetof(CPUX86State
, segs
[R_CS
].selector
));
4924 tcg_gen_st16_i32(s
->tmp2_i32
, cpu_env
,
4925 offsetof(CPUX86State
, fpcs
));
4926 tcg_gen_st_tl(eip_cur_tl(s
),
4927 cpu_env
, offsetof(CPUX86State
, fpip
));
4931 /************************/
4934 case 0xa4: /* movsS */
4936 ot
= mo_b_d(b
, dflag
);
4937 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
4938 gen_repz_movs(s
, ot
);
4944 case 0xaa: /* stosS */
4946 ot
= mo_b_d(b
, dflag
);
4947 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
4948 gen_repz_stos(s
, ot
);
4953 case 0xac: /* lodsS */
4955 ot
= mo_b_d(b
, dflag
);
4956 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
4957 gen_repz_lods(s
, ot
);
4962 case 0xae: /* scasS */
4964 ot
= mo_b_d(b
, dflag
);
4965 if (prefixes
& PREFIX_REPNZ
) {
4966 gen_repz_scas(s
, ot
, 1);
4967 } else if (prefixes
& PREFIX_REPZ
) {
4968 gen_repz_scas(s
, ot
, 0);
4974 case 0xa6: /* cmpsS */
4976 ot
= mo_b_d(b
, dflag
);
4977 if (prefixes
& PREFIX_REPNZ
) {
4978 gen_repz_cmps(s
, ot
, 1);
4979 } else if (prefixes
& PREFIX_REPZ
) {
4980 gen_repz_cmps(s
, ot
, 0);
4985 case 0x6c: /* insS */
4987 ot
= mo_b_d32(b
, dflag
);
4988 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_EDX
]);
4989 tcg_gen_ext16u_i32(s
->tmp2_i32
, s
->tmp2_i32
);
4990 if (!gen_check_io(s
, ot
, s
->tmp2_i32
,
4991 SVM_IOIO_TYPE_MASK
| SVM_IOIO_STR_MASK
)) {
4994 translator_io_start(&s
->base
);
4995 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
4996 gen_repz_ins(s
, ot
);
5001 case 0x6e: /* outsS */
5003 ot
= mo_b_d32(b
, dflag
);
5004 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_EDX
]);
5005 tcg_gen_ext16u_i32(s
->tmp2_i32
, s
->tmp2_i32
);
5006 if (!gen_check_io(s
, ot
, s
->tmp2_i32
, SVM_IOIO_STR_MASK
)) {
5009 translator_io_start(&s
->base
);
5010 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
5011 gen_repz_outs(s
, ot
);
5017 /************************/
5022 ot
= mo_b_d32(b
, dflag
);
5023 val
= x86_ldub_code(env
, s
);
5024 tcg_gen_movi_i32(s
->tmp2_i32
, val
);
5025 if (!gen_check_io(s
, ot
, s
->tmp2_i32
, SVM_IOIO_TYPE_MASK
)) {
5028 translator_io_start(&s
->base
);
5029 gen_helper_in_func(ot
, s
->T1
, s
->tmp2_i32
);
5030 gen_op_mov_reg_v(s
, ot
, R_EAX
, s
->T1
);
5031 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
5035 ot
= mo_b_d32(b
, dflag
);
5036 val
= x86_ldub_code(env
, s
);
5037 tcg_gen_movi_i32(s
->tmp2_i32
, val
);
5038 if (!gen_check_io(s
, ot
, s
->tmp2_i32
, 0)) {
5041 translator_io_start(&s
->base
);
5042 gen_op_mov_v_reg(s
, ot
, s
->T1
, R_EAX
);
5043 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T1
);
5044 gen_helper_out_func(ot
, s
->tmp2_i32
, s
->tmp3_i32
);
5045 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
5049 ot
= mo_b_d32(b
, dflag
);
5050 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_EDX
]);
5051 tcg_gen_ext16u_i32(s
->tmp2_i32
, s
->tmp2_i32
);
5052 if (!gen_check_io(s
, ot
, s
->tmp2_i32
, SVM_IOIO_TYPE_MASK
)) {
5055 translator_io_start(&s
->base
);
5056 gen_helper_in_func(ot
, s
->T1
, s
->tmp2_i32
);
5057 gen_op_mov_reg_v(s
, ot
, R_EAX
, s
->T1
);
5058 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
5062 ot
= mo_b_d32(b
, dflag
);
5063 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_EDX
]);
5064 tcg_gen_ext16u_i32(s
->tmp2_i32
, s
->tmp2_i32
);
5065 if (!gen_check_io(s
, ot
, s
->tmp2_i32
, 0)) {
5068 translator_io_start(&s
->base
);
5069 gen_op_mov_v_reg(s
, ot
, s
->T1
, R_EAX
);
5070 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T1
);
5071 gen_helper_out_func(ot
, s
->tmp2_i32
, s
->tmp3_i32
);
5072 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
5075 /************************/
5077 case 0xc2: /* ret im */
5078 val
= x86_ldsw_code(env
, s
);
5080 gen_stack_update(s
, val
+ (1 << ot
));
5081 /* Note that gen_pop_T0 uses a zero-extending load. */
5082 gen_op_jmp_v(s
, s
->T0
);
5084 s
->base
.is_jmp
= DISAS_JUMP
;
5086 case 0xc3: /* ret */
5088 gen_pop_update(s
, ot
);
5089 /* Note that gen_pop_T0 uses a zero-extending load. */
5090 gen_op_jmp_v(s
, s
->T0
);
5092 s
->base
.is_jmp
= DISAS_JUMP
;
5094 case 0xca: /* lret im */
5095 val
= x86_ldsw_code(env
, s
);
5097 if (PE(s
) && !VM86(s
)) {
5098 gen_update_cc_op(s
);
5099 gen_update_eip_cur(s
);
5100 gen_helper_lret_protected(cpu_env
, tcg_constant_i32(dflag
- 1),
5101 tcg_constant_i32(val
));
5105 gen_op_ld_v(s
, dflag
, s
->T0
, s
->A0
);
5106 /* NOTE: keeping EIP updated is not a problem in case of
5108 gen_op_jmp_v(s
, s
->T0
);
5110 gen_add_A0_im(s
, 1 << dflag
);
5111 gen_op_ld_v(s
, dflag
, s
->T0
, s
->A0
);
5112 gen_op_movl_seg_T0_vm(s
, R_CS
);
5113 /* add stack offset */
5114 gen_stack_update(s
, val
+ (2 << dflag
));
5116 s
->base
.is_jmp
= DISAS_EOB_ONLY
;
5118 case 0xcb: /* lret */
5121 case 0xcf: /* iret */
5122 gen_svm_check_intercept(s
, SVM_EXIT_IRET
);
5123 if (!PE(s
) || VM86(s
)) {
5124 /* real mode or vm86 mode */
5125 if (!check_vm86_iopl(s
)) {
5128 gen_helper_iret_real(cpu_env
, tcg_constant_i32(dflag
- 1));
5130 gen_helper_iret_protected(cpu_env
, tcg_constant_i32(dflag
- 1),
5133 set_cc_op(s
, CC_OP_EFLAGS
);
5134 s
->base
.is_jmp
= DISAS_EOB_ONLY
;
5136 case 0xe8: /* call im */
5138 int diff
= (dflag
!= MO_16
5139 ? (int32_t)insn_get(env
, s
, MO_32
)
5140 : (int16_t)insn_get(env
, s
, MO_16
));
5141 gen_push_v(s
, eip_next_tl(s
));
5143 gen_jmp_rel(s
, dflag
, diff
, 0);
5146 case 0x9a: /* lcall im */
5148 unsigned int selector
, offset
;
5153 offset
= insn_get(env
, s
, ot
);
5154 selector
= insn_get(env
, s
, MO_16
);
5156 tcg_gen_movi_tl(s
->T0
, selector
);
5157 tcg_gen_movi_tl(s
->T1
, offset
);
5160 case 0xe9: /* jmp im */
5162 int diff
= (dflag
!= MO_16
5163 ? (int32_t)insn_get(env
, s
, MO_32
)
5164 : (int16_t)insn_get(env
, s
, MO_16
));
5166 gen_jmp_rel(s
, dflag
, diff
, 0);
5169 case 0xea: /* ljmp im */
5171 unsigned int selector
, offset
;
5176 offset
= insn_get(env
, s
, ot
);
5177 selector
= insn_get(env
, s
, MO_16
);
5179 tcg_gen_movi_tl(s
->T0
, selector
);
5180 tcg_gen_movi_tl(s
->T1
, offset
);
5183 case 0xeb: /* jmp Jb */
5185 int diff
= (int8_t)insn_get(env
, s
, MO_8
);
5186 gen_jmp_rel(s
, dflag
, diff
, 0);
5189 case 0x70 ... 0x7f: /* jcc Jb */
5191 int diff
= (int8_t)insn_get(env
, s
, MO_8
);
5193 gen_jcc(s
, b
, diff
);
5196 case 0x180 ... 0x18f: /* jcc Jv */
5198 int diff
= (dflag
!= MO_16
5199 ? (int32_t)insn_get(env
, s
, MO_32
)
5200 : (int16_t)insn_get(env
, s
, MO_16
));
5202 gen_jcc(s
, b
, diff
);
5206 case 0x190 ... 0x19f: /* setcc Gv */
5207 modrm
= x86_ldub_code(env
, s
);
5208 gen_setcc1(s
, b
, s
->T0
);
5209 gen_ldst_modrm(env
, s
, modrm
, MO_8
, OR_TMP0
, 1);
5211 case 0x140 ... 0x14f: /* cmov Gv, Ev */
5212 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
5216 modrm
= x86_ldub_code(env
, s
);
5217 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
5218 gen_cmovcc1(env
, s
, ot
, b
, modrm
, reg
);
5221 /************************/
5223 case 0x9c: /* pushf */
5224 gen_svm_check_intercept(s
, SVM_EXIT_PUSHF
);
5225 if (check_vm86_iopl(s
)) {
5226 gen_update_cc_op(s
);
5227 gen_helper_read_eflags(s
->T0
, cpu_env
);
5228 gen_push_v(s
, s
->T0
);
5231 case 0x9d: /* popf */
5232 gen_svm_check_intercept(s
, SVM_EXIT_POPF
);
5233 if (check_vm86_iopl(s
)) {
5234 int mask
= TF_MASK
| AC_MASK
| ID_MASK
| NT_MASK
;
5237 mask
|= IF_MASK
| IOPL_MASK
;
5238 } else if (CPL(s
) <= IOPL(s
)) {
5241 if (dflag
== MO_16
) {
5246 gen_helper_write_eflags(cpu_env
, s
->T0
, tcg_constant_i32(mask
));
5247 gen_pop_update(s
, ot
);
5248 set_cc_op(s
, CC_OP_EFLAGS
);
5249 /* abort translation because TF/AC flag may change */
5250 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
5253 case 0x9e: /* sahf */
5254 if (CODE64(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_LAHF_LM
))
5256 tcg_gen_shri_tl(s
->T0
, cpu_regs
[R_EAX
], 8);
5257 gen_compute_eflags(s
);
5258 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, CC_O
);
5259 tcg_gen_andi_tl(s
->T0
, s
->T0
, CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
5260 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, s
->T0
);
5262 case 0x9f: /* lahf */
5263 if (CODE64(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_LAHF_LM
))
5265 gen_compute_eflags(s
);
5266 /* Note: gen_compute_eflags() only gives the condition codes */
5267 tcg_gen_ori_tl(s
->T0
, cpu_cc_src
, 0x02);
5268 tcg_gen_deposit_tl(cpu_regs
[R_EAX
], cpu_regs
[R_EAX
], s
->T0
, 8, 8);
5270 case 0xf5: /* cmc */
5271 gen_compute_eflags(s
);
5272 tcg_gen_xori_tl(cpu_cc_src
, cpu_cc_src
, CC_C
);
5274 case 0xf8: /* clc */
5275 gen_compute_eflags(s
);
5276 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~CC_C
);
5278 case 0xf9: /* stc */
5279 gen_compute_eflags(s
);
5280 tcg_gen_ori_tl(cpu_cc_src
, cpu_cc_src
, CC_C
);
5282 case 0xfc: /* cld */
5283 tcg_gen_movi_i32(s
->tmp2_i32
, 1);
5284 tcg_gen_st_i32(s
->tmp2_i32
, cpu_env
, offsetof(CPUX86State
, df
));
5286 case 0xfd: /* std */
5287 tcg_gen_movi_i32(s
->tmp2_i32
, -1);
5288 tcg_gen_st_i32(s
->tmp2_i32
, cpu_env
, offsetof(CPUX86State
, df
));
5291 /************************/
5292 /* bit operations */
5293 case 0x1ba: /* bt/bts/btr/btc Gv, im */
5295 modrm
= x86_ldub_code(env
, s
);
5296 op
= (modrm
>> 3) & 7;
5297 mod
= (modrm
>> 6) & 3;
5298 rm
= (modrm
& 7) | REX_B(s
);
5301 gen_lea_modrm(env
, s
, modrm
);
5302 if (!(s
->prefix
& PREFIX_LOCK
)) {
5303 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
5306 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
5309 val
= x86_ldub_code(env
, s
);
5310 tcg_gen_movi_tl(s
->T1
, val
);
5315 case 0x1a3: /* bt Gv, Ev */
5318 case 0x1ab: /* bts */
5321 case 0x1b3: /* btr */
5324 case 0x1bb: /* btc */
5328 modrm
= x86_ldub_code(env
, s
);
5329 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
5330 mod
= (modrm
>> 6) & 3;
5331 rm
= (modrm
& 7) | REX_B(s
);
5332 gen_op_mov_v_reg(s
, MO_32
, s
->T1
, reg
);
5334 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
5335 /* specific case: we need to add a displacement */
5336 gen_exts(ot
, s
->T1
);
5337 tcg_gen_sari_tl(s
->tmp0
, s
->T1
, 3 + ot
);
5338 tcg_gen_shli_tl(s
->tmp0
, s
->tmp0
, ot
);
5339 tcg_gen_add_tl(s
->A0
, gen_lea_modrm_1(s
, a
, false), s
->tmp0
);
5340 gen_lea_v_seg(s
, s
->aflag
, s
->A0
, a
.def_seg
, s
->override
);
5341 if (!(s
->prefix
& PREFIX_LOCK
)) {
5342 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
5345 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
5348 tcg_gen_andi_tl(s
->T1
, s
->T1
, (1 << (3 + ot
)) - 1);
5349 tcg_gen_movi_tl(s
->tmp0
, 1);
5350 tcg_gen_shl_tl(s
->tmp0
, s
->tmp0
, s
->T1
);
5351 if (s
->prefix
& PREFIX_LOCK
) {
5354 /* Needs no atomic ops; we surpressed the normal
5355 memory load for LOCK above so do it now. */
5356 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
5359 tcg_gen_atomic_fetch_or_tl(s
->T0
, s
->A0
, s
->tmp0
,
5360 s
->mem_index
, ot
| MO_LE
);
5363 tcg_gen_not_tl(s
->tmp0
, s
->tmp0
);
5364 tcg_gen_atomic_fetch_and_tl(s
->T0
, s
->A0
, s
->tmp0
,
5365 s
->mem_index
, ot
| MO_LE
);
5369 tcg_gen_atomic_fetch_xor_tl(s
->T0
, s
->A0
, s
->tmp0
,
5370 s
->mem_index
, ot
| MO_LE
);
5373 tcg_gen_shr_tl(s
->tmp4
, s
->T0
, s
->T1
);
5375 tcg_gen_shr_tl(s
->tmp4
, s
->T0
, s
->T1
);
5378 /* Data already loaded; nothing to do. */
5381 tcg_gen_or_tl(s
->T0
, s
->T0
, s
->tmp0
);
5384 tcg_gen_andc_tl(s
->T0
, s
->T0
, s
->tmp0
);
5388 tcg_gen_xor_tl(s
->T0
, s
->T0
, s
->tmp0
);
5393 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
5395 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
5400 /* Delay all CC updates until after the store above. Note that
5401 C is the result of the test, Z is unchanged, and the others
5402 are all undefined. */
5404 case CC_OP_MULB
... CC_OP_MULQ
:
5405 case CC_OP_ADDB
... CC_OP_ADDQ
:
5406 case CC_OP_ADCB
... CC_OP_ADCQ
:
5407 case CC_OP_SUBB
... CC_OP_SUBQ
:
5408 case CC_OP_SBBB
... CC_OP_SBBQ
:
5409 case CC_OP_LOGICB
... CC_OP_LOGICQ
:
5410 case CC_OP_INCB
... CC_OP_INCQ
:
5411 case CC_OP_DECB
... CC_OP_DECQ
:
5412 case CC_OP_SHLB
... CC_OP_SHLQ
:
5413 case CC_OP_SARB
... CC_OP_SARQ
:
5414 case CC_OP_BMILGB
... CC_OP_BMILGQ
:
5415 /* Z was going to be computed from the non-zero status of CC_DST.
5416 We can get that same Z value (and the new C value) by leaving
5417 CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the
5419 tcg_gen_mov_tl(cpu_cc_src
, s
->tmp4
);
5420 set_cc_op(s
, ((s
->cc_op
- CC_OP_MULB
) & 3) + CC_OP_SARB
);
5423 /* Otherwise, generate EFLAGS and replace the C bit. */
5424 gen_compute_eflags(s
);
5425 tcg_gen_deposit_tl(cpu_cc_src
, cpu_cc_src
, s
->tmp4
,
5430 case 0x1bc: /* bsf / tzcnt */
5431 case 0x1bd: /* bsr / lzcnt */
5433 modrm
= x86_ldub_code(env
, s
);
5434 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
5435 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
5436 gen_extu(ot
, s
->T0
);
5438 /* Note that lzcnt and tzcnt are in different extensions. */
5439 if ((prefixes
& PREFIX_REPZ
)
5441 ? s
->cpuid_ext3_features
& CPUID_EXT3_ABM
5442 : s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)) {
5444 /* For lzcnt/tzcnt, C bit is defined related to the input. */
5445 tcg_gen_mov_tl(cpu_cc_src
, s
->T0
);
5447 /* For lzcnt, reduce the target_ulong result by the
5448 number of zeros that we expect to find at the top. */
5449 tcg_gen_clzi_tl(s
->T0
, s
->T0
, TARGET_LONG_BITS
);
5450 tcg_gen_subi_tl(s
->T0
, s
->T0
, TARGET_LONG_BITS
- size
);
5452 /* For tzcnt, a zero input must return the operand size. */
5453 tcg_gen_ctzi_tl(s
->T0
, s
->T0
, size
);
5455 /* For lzcnt/tzcnt, Z bit is defined related to the result. */
5456 gen_op_update1_cc(s
);
5457 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
5459 /* For bsr/bsf, only the Z bit is defined and it is related
5460 to the input and not the result. */
5461 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
5462 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
5464 /* ??? The manual says that the output is undefined when the
5465 input is zero, but real hardware leaves it unchanged, and
5466 real programs appear to depend on that. Accomplish this
5467 by passing the output as the value to return upon zero. */
5469 /* For bsr, return the bit index of the first 1 bit,
5470 not the count of leading zeros. */
5471 tcg_gen_xori_tl(s
->T1
, cpu_regs
[reg
], TARGET_LONG_BITS
- 1);
5472 tcg_gen_clz_tl(s
->T0
, s
->T0
, s
->T1
);
5473 tcg_gen_xori_tl(s
->T0
, s
->T0
, TARGET_LONG_BITS
- 1);
5475 tcg_gen_ctz_tl(s
->T0
, s
->T0
, cpu_regs
[reg
]);
5478 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
5480 /************************/
5482 case 0x27: /* daa */
5485 gen_update_cc_op(s
);
5486 gen_helper_daa(cpu_env
);
5487 set_cc_op(s
, CC_OP_EFLAGS
);
5489 case 0x2f: /* das */
5492 gen_update_cc_op(s
);
5493 gen_helper_das(cpu_env
);
5494 set_cc_op(s
, CC_OP_EFLAGS
);
5496 case 0x37: /* aaa */
5499 gen_update_cc_op(s
);
5500 gen_helper_aaa(cpu_env
);
5501 set_cc_op(s
, CC_OP_EFLAGS
);
5503 case 0x3f: /* aas */
5506 gen_update_cc_op(s
);
5507 gen_helper_aas(cpu_env
);
5508 set_cc_op(s
, CC_OP_EFLAGS
);
5510 case 0xd4: /* aam */
5513 val
= x86_ldub_code(env
, s
);
5515 gen_exception(s
, EXCP00_DIVZ
);
5517 gen_helper_aam(cpu_env
, tcg_constant_i32(val
));
5518 set_cc_op(s
, CC_OP_LOGICB
);
5521 case 0xd5: /* aad */
5524 val
= x86_ldub_code(env
, s
);
5525 gen_helper_aad(cpu_env
, tcg_constant_i32(val
));
5526 set_cc_op(s
, CC_OP_LOGICB
);
5528 /************************/
5530 case 0x90: /* nop */
5531 /* XXX: correct lock test for all insn */
5532 if (prefixes
& PREFIX_LOCK
) {
5535 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
5537 goto do_xchg_reg_eax
;
5539 if (prefixes
& PREFIX_REPZ
) {
5540 gen_update_cc_op(s
);
5541 gen_update_eip_cur(s
);
5542 gen_helper_pause(cpu_env
, cur_insn_len_i32(s
));
5543 s
->base
.is_jmp
= DISAS_NORETURN
;
5546 case 0x9b: /* fwait */
5547 if ((s
->flags
& (HF_MP_MASK
| HF_TS_MASK
)) ==
5548 (HF_MP_MASK
| HF_TS_MASK
)) {
5549 gen_exception(s
, EXCP07_PREX
);
5551 gen_helper_fwait(cpu_env
);
5554 case 0xcc: /* int3 */
5555 gen_interrupt(s
, EXCP03_INT3
);
5557 case 0xcd: /* int N */
5558 val
= x86_ldub_code(env
, s
);
5559 if (check_vm86_iopl(s
)) {
5560 gen_interrupt(s
, val
);
5563 case 0xce: /* into */
5566 gen_update_cc_op(s
);
5567 gen_update_eip_cur(s
);
5568 gen_helper_into(cpu_env
, cur_insn_len_i32(s
));
5571 case 0xf1: /* icebp (undocumented, exits to external debugger) */
5572 gen_svm_check_intercept(s
, SVM_EXIT_ICEBP
);
5576 case 0xfa: /* cli */
5577 if (check_iopl(s
)) {
5578 gen_reset_eflags(s
, IF_MASK
);
5581 case 0xfb: /* sti */
5582 if (check_iopl(s
)) {
5583 gen_set_eflags(s
, IF_MASK
);
5584 /* interruptions are enabled only the first insn after sti */
5585 gen_update_eip_next(s
);
5586 gen_eob_inhibit_irq(s
, true);
5589 case 0x62: /* bound */
5593 modrm
= x86_ldub_code(env
, s
);
5594 reg
= (modrm
>> 3) & 7;
5595 mod
= (modrm
>> 6) & 3;
5598 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
5599 gen_lea_modrm(env
, s
, modrm
);
5600 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
5602 gen_helper_boundw(cpu_env
, s
->A0
, s
->tmp2_i32
);
5604 gen_helper_boundl(cpu_env
, s
->A0
, s
->tmp2_i32
);
5607 case 0x1c8 ... 0x1cf: /* bswap reg */
5608 reg
= (b
& 7) | REX_B(s
);
5609 #ifdef TARGET_X86_64
5610 if (dflag
== MO_64
) {
5611 tcg_gen_bswap64_i64(cpu_regs
[reg
], cpu_regs
[reg
]);
5615 tcg_gen_bswap32_tl(cpu_regs
[reg
], cpu_regs
[reg
], TCG_BSWAP_OZ
);
5617 case 0xd6: /* salc */
5620 gen_compute_eflags_c(s
, s
->T0
);
5621 tcg_gen_neg_tl(s
->T0
, s
->T0
);
5622 gen_op_mov_reg_v(s
, MO_8
, R_EAX
, s
->T0
);
5624 case 0xe0: /* loopnz */
5625 case 0xe1: /* loopz */
5626 case 0xe2: /* loop */
5627 case 0xe3: /* jecxz */
5630 int diff
= (int8_t)insn_get(env
, s
, MO_8
);
5632 l1
= gen_new_label();
5633 l2
= gen_new_label();
5634 gen_update_cc_op(s
);
5637 case 0: /* loopnz */
5639 gen_op_add_reg_im(s
, s
->aflag
, R_ECX
, -1);
5640 gen_op_jz_ecx(s
, l2
);
5641 gen_jcc1(s
, (JCC_Z
<< 1) | (b
^ 1), l1
);
5644 gen_op_add_reg_im(s
, s
->aflag
, R_ECX
, -1);
5645 gen_op_jnz_ecx(s
, l1
);
5649 gen_op_jz_ecx(s
, l1
);
5654 gen_jmp_rel_csize(s
, 0, 1);
5657 gen_jmp_rel(s
, dflag
, diff
, 0);
5660 case 0x130: /* wrmsr */
5661 case 0x132: /* rdmsr */
5662 if (check_cpl0(s
)) {
5663 gen_update_cc_op(s
);
5664 gen_update_eip_cur(s
);
5666 gen_helper_rdmsr(cpu_env
);
5668 gen_helper_wrmsr(cpu_env
);
5669 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
5673 case 0x131: /* rdtsc */
5674 gen_update_cc_op(s
);
5675 gen_update_eip_cur(s
);
5676 translator_io_start(&s
->base
);
5677 gen_helper_rdtsc(cpu_env
);
5679 case 0x133: /* rdpmc */
5680 gen_update_cc_op(s
);
5681 gen_update_eip_cur(s
);
5682 gen_helper_rdpmc(cpu_env
);
5683 s
->base
.is_jmp
= DISAS_NORETURN
;
5685 case 0x134: /* sysenter */
5686 /* For AMD SYSENTER is not valid in long mode */
5687 if (LMA(s
) && env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
) {
5691 gen_exception_gpf(s
);
5693 gen_helper_sysenter(cpu_env
);
5694 s
->base
.is_jmp
= DISAS_EOB_ONLY
;
5697 case 0x135: /* sysexit */
5698 /* For AMD SYSEXIT is not valid in long mode */
5699 if (LMA(s
) && env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
) {
5702 if (!PE(s
) || CPL(s
) != 0) {
5703 gen_exception_gpf(s
);
5705 gen_helper_sysexit(cpu_env
, tcg_constant_i32(dflag
- 1));
5706 s
->base
.is_jmp
= DISAS_EOB_ONLY
;
5709 case 0x105: /* syscall */
5710 /* For Intel SYSCALL is only valid in long mode */
5711 if (!LMA(s
) && env
->cpuid_vendor1
== CPUID_VENDOR_INTEL_1
) {
5714 gen_update_cc_op(s
);
5715 gen_update_eip_cur(s
);
5716 gen_helper_syscall(cpu_env
, cur_insn_len_i32(s
));
5717 /* TF handling for the syscall insn is different. The TF bit is checked
5718 after the syscall insn completes. This allows #DB to not be
5719 generated after one has entered CPL0 if TF is set in FMASK. */
5720 gen_eob_worker(s
, false, true);
5722 case 0x107: /* sysret */
5723 /* For Intel SYSRET is only valid in long mode */
5724 if (!LMA(s
) && env
->cpuid_vendor1
== CPUID_VENDOR_INTEL_1
) {
5727 if (!PE(s
) || CPL(s
) != 0) {
5728 gen_exception_gpf(s
);
5730 gen_helper_sysret(cpu_env
, tcg_constant_i32(dflag
- 1));
5731 /* condition codes are modified only in long mode */
5733 set_cc_op(s
, CC_OP_EFLAGS
);
5735 /* TF handling for the sysret insn is different. The TF bit is
5736 checked after the sysret insn completes. This allows #DB to be
5737 generated "as if" the syscall insn in userspace has just
5739 gen_eob_worker(s
, false, true);
5742 case 0x1a2: /* cpuid */
5743 gen_update_cc_op(s
);
5744 gen_update_eip_cur(s
);
5745 gen_helper_cpuid(cpu_env
);
5747 case 0xf4: /* hlt */
5748 if (check_cpl0(s
)) {
5749 gen_update_cc_op(s
);
5750 gen_update_eip_cur(s
);
5751 gen_helper_hlt(cpu_env
, cur_insn_len_i32(s
));
5752 s
->base
.is_jmp
= DISAS_NORETURN
;
5756 modrm
= x86_ldub_code(env
, s
);
5757 mod
= (modrm
>> 6) & 3;
5758 op
= (modrm
>> 3) & 7;
5761 if (!PE(s
) || VM86(s
))
5763 if (s
->flags
& HF_UMIP_MASK
&& !check_cpl0(s
)) {
5766 gen_svm_check_intercept(s
, SVM_EXIT_LDTR_READ
);
5767 tcg_gen_ld32u_tl(s
->T0
, cpu_env
,
5768 offsetof(CPUX86State
, ldt
.selector
));
5769 ot
= mod
== 3 ? dflag
: MO_16
;
5770 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
5773 if (!PE(s
) || VM86(s
))
5775 if (check_cpl0(s
)) {
5776 gen_svm_check_intercept(s
, SVM_EXIT_LDTR_WRITE
);
5777 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
5778 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
5779 gen_helper_lldt(cpu_env
, s
->tmp2_i32
);
5783 if (!PE(s
) || VM86(s
))
5785 if (s
->flags
& HF_UMIP_MASK
&& !check_cpl0(s
)) {
5788 gen_svm_check_intercept(s
, SVM_EXIT_TR_READ
);
5789 tcg_gen_ld32u_tl(s
->T0
, cpu_env
,
5790 offsetof(CPUX86State
, tr
.selector
));
5791 ot
= mod
== 3 ? dflag
: MO_16
;
5792 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
5795 if (!PE(s
) || VM86(s
))
5797 if (check_cpl0(s
)) {
5798 gen_svm_check_intercept(s
, SVM_EXIT_TR_WRITE
);
5799 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
5800 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
5801 gen_helper_ltr(cpu_env
, s
->tmp2_i32
);
5806 if (!PE(s
) || VM86(s
))
5808 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
5809 gen_update_cc_op(s
);
5811 gen_helper_verr(cpu_env
, s
->T0
);
5813 gen_helper_verw(cpu_env
, s
->T0
);
5815 set_cc_op(s
, CC_OP_EFLAGS
);
5823 modrm
= x86_ldub_code(env
, s
);
5825 CASE_MODRM_MEM_OP(0): /* sgdt */
5826 if (s
->flags
& HF_UMIP_MASK
&& !check_cpl0(s
)) {
5829 gen_svm_check_intercept(s
, SVM_EXIT_GDTR_READ
);
5830 gen_lea_modrm(env
, s
, modrm
);
5831 tcg_gen_ld32u_tl(s
->T0
,
5832 cpu_env
, offsetof(CPUX86State
, gdt
.limit
));
5833 gen_op_st_v(s
, MO_16
, s
->T0
, s
->A0
);
5834 gen_add_A0_im(s
, 2);
5835 tcg_gen_ld_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
, gdt
.base
));
5836 if (dflag
== MO_16
) {
5837 tcg_gen_andi_tl(s
->T0
, s
->T0
, 0xffffff);
5839 gen_op_st_v(s
, CODE64(s
) + MO_32
, s
->T0
, s
->A0
);
5842 case 0xc8: /* monitor */
5843 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) || CPL(s
) != 0) {
5846 gen_update_cc_op(s
);
5847 gen_update_eip_cur(s
);
5848 tcg_gen_mov_tl(s
->A0
, cpu_regs
[R_EAX
]);
5849 gen_extu(s
->aflag
, s
->A0
);
5850 gen_add_A0_ds_seg(s
);
5851 gen_helper_monitor(cpu_env
, s
->A0
);
5854 case 0xc9: /* mwait */
5855 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) || CPL(s
) != 0) {
5858 gen_update_cc_op(s
);
5859 gen_update_eip_cur(s
);
5860 gen_helper_mwait(cpu_env
, cur_insn_len_i32(s
));
5861 s
->base
.is_jmp
= DISAS_NORETURN
;
5864 case 0xca: /* clac */
5865 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_SMAP
)
5869 gen_reset_eflags(s
, AC_MASK
);
5870 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
5873 case 0xcb: /* stac */
5874 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_SMAP
)
5878 gen_set_eflags(s
, AC_MASK
);
5879 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
5882 CASE_MODRM_MEM_OP(1): /* sidt */
5883 if (s
->flags
& HF_UMIP_MASK
&& !check_cpl0(s
)) {
5886 gen_svm_check_intercept(s
, SVM_EXIT_IDTR_READ
);
5887 gen_lea_modrm(env
, s
, modrm
);
5888 tcg_gen_ld32u_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
, idt
.limit
));
5889 gen_op_st_v(s
, MO_16
, s
->T0
, s
->A0
);
5890 gen_add_A0_im(s
, 2);
5891 tcg_gen_ld_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
, idt
.base
));
5892 if (dflag
== MO_16
) {
5893 tcg_gen_andi_tl(s
->T0
, s
->T0
, 0xffffff);
5895 gen_op_st_v(s
, CODE64(s
) + MO_32
, s
->T0
, s
->A0
);
5898 case 0xd0: /* xgetbv */
5899 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
5900 || (s
->prefix
& (PREFIX_LOCK
| PREFIX_DATA
5901 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
5904 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_ECX
]);
5905 gen_helper_xgetbv(s
->tmp1_i64
, cpu_env
, s
->tmp2_i32
);
5906 tcg_gen_extr_i64_tl(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
], s
->tmp1_i64
);
5909 case 0xd1: /* xsetbv */
5910 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
5911 || (s
->prefix
& (PREFIX_LOCK
| PREFIX_DATA
5912 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
5915 if (!check_cpl0(s
)) {
5918 tcg_gen_concat_tl_i64(s
->tmp1_i64
, cpu_regs
[R_EAX
],
5920 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_ECX
]);
5921 gen_helper_xsetbv(cpu_env
, s
->tmp2_i32
, s
->tmp1_i64
);
5922 /* End TB because translation flags may change. */
5923 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
5926 case 0xd8: /* VMRUN */
5927 if (!SVME(s
) || !PE(s
)) {
5930 if (!check_cpl0(s
)) {
5933 gen_update_cc_op(s
);
5934 gen_update_eip_cur(s
);
5935 gen_helper_vmrun(cpu_env
, tcg_constant_i32(s
->aflag
- 1),
5936 cur_insn_len_i32(s
));
5937 tcg_gen_exit_tb(NULL
, 0);
5938 s
->base
.is_jmp
= DISAS_NORETURN
;
5941 case 0xd9: /* VMMCALL */
5945 gen_update_cc_op(s
);
5946 gen_update_eip_cur(s
);
5947 gen_helper_vmmcall(cpu_env
);
5950 case 0xda: /* VMLOAD */
5951 if (!SVME(s
) || !PE(s
)) {
5954 if (!check_cpl0(s
)) {
5957 gen_update_cc_op(s
);
5958 gen_update_eip_cur(s
);
5959 gen_helper_vmload(cpu_env
, tcg_constant_i32(s
->aflag
- 1));
5962 case 0xdb: /* VMSAVE */
5963 if (!SVME(s
) || !PE(s
)) {
5966 if (!check_cpl0(s
)) {
5969 gen_update_cc_op(s
);
5970 gen_update_eip_cur(s
);
5971 gen_helper_vmsave(cpu_env
, tcg_constant_i32(s
->aflag
- 1));
5974 case 0xdc: /* STGI */
5975 if ((!SVME(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
))
5979 if (!check_cpl0(s
)) {
5982 gen_update_cc_op(s
);
5983 gen_helper_stgi(cpu_env
);
5984 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
5987 case 0xdd: /* CLGI */
5988 if (!SVME(s
) || !PE(s
)) {
5991 if (!check_cpl0(s
)) {
5994 gen_update_cc_op(s
);
5995 gen_update_eip_cur(s
);
5996 gen_helper_clgi(cpu_env
);
5999 case 0xde: /* SKINIT */
6000 if ((!SVME(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
))
6004 gen_svm_check_intercept(s
, SVM_EXIT_SKINIT
);
6005 /* If not intercepted, not implemented -- raise #UD. */
6008 case 0xdf: /* INVLPGA */
6009 if (!SVME(s
) || !PE(s
)) {
6012 if (!check_cpl0(s
)) {
6015 gen_svm_check_intercept(s
, SVM_EXIT_INVLPGA
);
6016 if (s
->aflag
== MO_64
) {
6017 tcg_gen_mov_tl(s
->A0
, cpu_regs
[R_EAX
]);
6019 tcg_gen_ext32u_tl(s
->A0
, cpu_regs
[R_EAX
]);
6021 gen_helper_flush_page(cpu_env
, s
->A0
);
6022 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
6025 CASE_MODRM_MEM_OP(2): /* lgdt */
6026 if (!check_cpl0(s
)) {
6029 gen_svm_check_intercept(s
, SVM_EXIT_GDTR_WRITE
);
6030 gen_lea_modrm(env
, s
, modrm
);
6031 gen_op_ld_v(s
, MO_16
, s
->T1
, s
->A0
);
6032 gen_add_A0_im(s
, 2);
6033 gen_op_ld_v(s
, CODE64(s
) + MO_32
, s
->T0
, s
->A0
);
6034 if (dflag
== MO_16
) {
6035 tcg_gen_andi_tl(s
->T0
, s
->T0
, 0xffffff);
6037 tcg_gen_st_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
, gdt
.base
));
6038 tcg_gen_st32_tl(s
->T1
, cpu_env
, offsetof(CPUX86State
, gdt
.limit
));
6041 CASE_MODRM_MEM_OP(3): /* lidt */
6042 if (!check_cpl0(s
)) {
6045 gen_svm_check_intercept(s
, SVM_EXIT_IDTR_WRITE
);
6046 gen_lea_modrm(env
, s
, modrm
);
6047 gen_op_ld_v(s
, MO_16
, s
->T1
, s
->A0
);
6048 gen_add_A0_im(s
, 2);
6049 gen_op_ld_v(s
, CODE64(s
) + MO_32
, s
->T0
, s
->A0
);
6050 if (dflag
== MO_16
) {
6051 tcg_gen_andi_tl(s
->T0
, s
->T0
, 0xffffff);
6053 tcg_gen_st_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
, idt
.base
));
6054 tcg_gen_st32_tl(s
->T1
, cpu_env
, offsetof(CPUX86State
, idt
.limit
));
6057 CASE_MODRM_OP(4): /* smsw */
6058 if (s
->flags
& HF_UMIP_MASK
&& !check_cpl0(s
)) {
6061 gen_svm_check_intercept(s
, SVM_EXIT_READ_CR0
);
6062 tcg_gen_ld_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
, cr
[0]));
6064 * In 32-bit mode, the higher 16 bits of the destination
6065 * register are undefined. In practice CR0[31:0] is stored
6066 * just like in 64-bit mode.
6068 mod
= (modrm
>> 6) & 3;
6069 ot
= (mod
!= 3 ? MO_16
: s
->dflag
);
6070 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
6072 case 0xee: /* rdpkru */
6073 if (prefixes
& PREFIX_LOCK
) {
6076 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_ECX
]);
6077 gen_helper_rdpkru(s
->tmp1_i64
, cpu_env
, s
->tmp2_i32
);
6078 tcg_gen_extr_i64_tl(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
], s
->tmp1_i64
);
6080 case 0xef: /* wrpkru */
6081 if (prefixes
& PREFIX_LOCK
) {
6084 tcg_gen_concat_tl_i64(s
->tmp1_i64
, cpu_regs
[R_EAX
],
6086 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_ECX
]);
6087 gen_helper_wrpkru(cpu_env
, s
->tmp2_i32
, s
->tmp1_i64
);
6090 CASE_MODRM_OP(6): /* lmsw */
6091 if (!check_cpl0(s
)) {
6094 gen_svm_check_intercept(s
, SVM_EXIT_WRITE_CR0
);
6095 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
6097 * Only the 4 lower bits of CR0 are modified.
6098 * PE cannot be set to zero if already set to one.
6100 tcg_gen_ld_tl(s
->T1
, cpu_env
, offsetof(CPUX86State
, cr
[0]));
6101 tcg_gen_andi_tl(s
->T0
, s
->T0
, 0xf);
6102 tcg_gen_andi_tl(s
->T1
, s
->T1
, ~0xe);
6103 tcg_gen_or_tl(s
->T0
, s
->T0
, s
->T1
);
6104 gen_helper_write_crN(cpu_env
, tcg_constant_i32(0), s
->T0
);
6105 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
6108 CASE_MODRM_MEM_OP(7): /* invlpg */
6109 if (!check_cpl0(s
)) {
6112 gen_svm_check_intercept(s
, SVM_EXIT_INVLPG
);
6113 gen_lea_modrm(env
, s
, modrm
);
6114 gen_helper_flush_page(cpu_env
, s
->A0
);
6115 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
6118 case 0xf8: /* swapgs */
6119 #ifdef TARGET_X86_64
6121 if (check_cpl0(s
)) {
6122 tcg_gen_mov_tl(s
->T0
, cpu_seg_base
[R_GS
]);
6123 tcg_gen_ld_tl(cpu_seg_base
[R_GS
], cpu_env
,
6124 offsetof(CPUX86State
, kernelgsbase
));
6125 tcg_gen_st_tl(s
->T0
, cpu_env
,
6126 offsetof(CPUX86State
, kernelgsbase
));
6133 case 0xf9: /* rdtscp */
6134 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_RDTSCP
)) {
6137 gen_update_cc_op(s
);
6138 gen_update_eip_cur(s
);
6139 translator_io_start(&s
->base
);
6140 gen_helper_rdtsc(cpu_env
);
6141 gen_helper_rdpid(s
->T0
, cpu_env
);
6142 gen_op_mov_reg_v(s
, dflag
, R_ECX
, s
->T0
);
6150 case 0x108: /* invd */
6151 case 0x109: /* wbinvd; wbnoinvd with REPZ prefix */
6152 if (check_cpl0(s
)) {
6153 gen_svm_check_intercept(s
, (b
& 1) ? SVM_EXIT_WBINVD
: SVM_EXIT_INVD
);
6157 case 0x63: /* arpl or movslS (x86_64) */
6158 #ifdef TARGET_X86_64
6161 /* d_ot is the size of destination */
6164 modrm
= x86_ldub_code(env
, s
);
6165 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6166 mod
= (modrm
>> 6) & 3;
6167 rm
= (modrm
& 7) | REX_B(s
);
6170 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, rm
);
6172 if (d_ot
== MO_64
) {
6173 tcg_gen_ext32s_tl(s
->T0
, s
->T0
);
6175 gen_op_mov_reg_v(s
, d_ot
, reg
, s
->T0
);
6177 gen_lea_modrm(env
, s
, modrm
);
6178 gen_op_ld_v(s
, MO_32
| MO_SIGN
, s
->T0
, s
->A0
);
6179 gen_op_mov_reg_v(s
, d_ot
, reg
, s
->T0
);
6187 if (!PE(s
) || VM86(s
))
6189 t0
= tcg_temp_new();
6190 t1
= tcg_temp_new();
6191 t2
= tcg_temp_new();
6193 modrm
= x86_ldub_code(env
, s
);
6194 reg
= (modrm
>> 3) & 7;
6195 mod
= (modrm
>> 6) & 3;
6198 gen_lea_modrm(env
, s
, modrm
);
6199 gen_op_ld_v(s
, ot
, t0
, s
->A0
);
6201 gen_op_mov_v_reg(s
, ot
, t0
, rm
);
6203 gen_op_mov_v_reg(s
, ot
, t1
, reg
);
6204 tcg_gen_andi_tl(s
->tmp0
, t0
, 3);
6205 tcg_gen_andi_tl(t1
, t1
, 3);
6206 tcg_gen_movi_tl(t2
, 0);
6207 label1
= gen_new_label();
6208 tcg_gen_brcond_tl(TCG_COND_GE
, s
->tmp0
, t1
, label1
);
6209 tcg_gen_andi_tl(t0
, t0
, ~3);
6210 tcg_gen_or_tl(t0
, t0
, t1
);
6211 tcg_gen_movi_tl(t2
, CC_Z
);
6212 gen_set_label(label1
);
6214 gen_op_st_v(s
, ot
, t0
, s
->A0
);
6216 gen_op_mov_reg_v(s
, ot
, rm
, t0
);
6218 gen_compute_eflags(s
);
6219 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~CC_Z
);
6220 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t2
);
6223 case 0x102: /* lar */
6224 case 0x103: /* lsl */
6228 if (!PE(s
) || VM86(s
))
6230 ot
= dflag
!= MO_16
? MO_32
: MO_16
;
6231 modrm
= x86_ldub_code(env
, s
);
6232 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6233 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
6234 t0
= tcg_temp_new();
6235 gen_update_cc_op(s
);
6237 gen_helper_lar(t0
, cpu_env
, s
->T0
);
6239 gen_helper_lsl(t0
, cpu_env
, s
->T0
);
6241 tcg_gen_andi_tl(s
->tmp0
, cpu_cc_src
, CC_Z
);
6242 label1
= gen_new_label();
6243 tcg_gen_brcondi_tl(TCG_COND_EQ
, s
->tmp0
, 0, label1
);
6244 gen_op_mov_reg_v(s
, ot
, reg
, t0
);
6245 gen_set_label(label1
);
6246 set_cc_op(s
, CC_OP_EFLAGS
);
6250 modrm
= x86_ldub_code(env
, s
);
6251 mod
= (modrm
>> 6) & 3;
6252 op
= (modrm
>> 3) & 7;
6254 case 0: /* prefetchnta */
6255 case 1: /* prefetchnt0 */
6256 case 2: /* prefetchnt0 */
6257 case 3: /* prefetchnt0 */
6260 gen_nop_modrm(env
, s
, modrm
);
6261 /* nothing more to do */
6263 default: /* nop (multi byte) */
6264 gen_nop_modrm(env
, s
, modrm
);
6269 modrm
= x86_ldub_code(env
, s
);
6270 if (s
->flags
& HF_MPX_EN_MASK
) {
6271 mod
= (modrm
>> 6) & 3;
6272 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6273 if (prefixes
& PREFIX_REPZ
) {
6276 || (prefixes
& PREFIX_LOCK
)
6277 || s
->aflag
== MO_16
) {
6280 gen_bndck(env
, s
, modrm
, TCG_COND_LTU
, cpu_bndl
[reg
]);
6281 } else if (prefixes
& PREFIX_REPNZ
) {
6284 || (prefixes
& PREFIX_LOCK
)
6285 || s
->aflag
== MO_16
) {
6288 TCGv_i64 notu
= tcg_temp_new_i64();
6289 tcg_gen_not_i64(notu
, cpu_bndu
[reg
]);
6290 gen_bndck(env
, s
, modrm
, TCG_COND_GTU
, notu
);
6291 } else if (prefixes
& PREFIX_DATA
) {
6292 /* bndmov -- from reg/mem */
6293 if (reg
>= 4 || s
->aflag
== MO_16
) {
6297 int reg2
= (modrm
& 7) | REX_B(s
);
6298 if (reg2
>= 4 || (prefixes
& PREFIX_LOCK
)) {
6301 if (s
->flags
& HF_MPX_IU_MASK
) {
6302 tcg_gen_mov_i64(cpu_bndl
[reg
], cpu_bndl
[reg2
]);
6303 tcg_gen_mov_i64(cpu_bndu
[reg
], cpu_bndu
[reg2
]);
6306 gen_lea_modrm(env
, s
, modrm
);
6308 tcg_gen_qemu_ld_i64(cpu_bndl
[reg
], s
->A0
,
6309 s
->mem_index
, MO_LEUQ
);
6310 tcg_gen_addi_tl(s
->A0
, s
->A0
, 8);
6311 tcg_gen_qemu_ld_i64(cpu_bndu
[reg
], s
->A0
,
6312 s
->mem_index
, MO_LEUQ
);
6314 tcg_gen_qemu_ld_i64(cpu_bndl
[reg
], s
->A0
,
6315 s
->mem_index
, MO_LEUL
);
6316 tcg_gen_addi_tl(s
->A0
, s
->A0
, 4);
6317 tcg_gen_qemu_ld_i64(cpu_bndu
[reg
], s
->A0
,
6318 s
->mem_index
, MO_LEUL
);
6320 /* bnd registers are now in-use */
6321 gen_set_hflag(s
, HF_MPX_IU_MASK
);
6323 } else if (mod
!= 3) {
6325 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
6327 || (prefixes
& PREFIX_LOCK
)
6328 || s
->aflag
== MO_16
6333 tcg_gen_addi_tl(s
->A0
, cpu_regs
[a
.base
], a
.disp
);
6335 tcg_gen_movi_tl(s
->A0
, 0);
6337 gen_lea_v_seg(s
, s
->aflag
, s
->A0
, a
.def_seg
, s
->override
);
6339 tcg_gen_mov_tl(s
->T0
, cpu_regs
[a
.index
]);
6341 tcg_gen_movi_tl(s
->T0
, 0);
6344 gen_helper_bndldx64(cpu_bndl
[reg
], cpu_env
, s
->A0
, s
->T0
);
6345 tcg_gen_ld_i64(cpu_bndu
[reg
], cpu_env
,
6346 offsetof(CPUX86State
, mmx_t0
.MMX_Q(0)));
6348 gen_helper_bndldx32(cpu_bndu
[reg
], cpu_env
, s
->A0
, s
->T0
);
6349 tcg_gen_ext32u_i64(cpu_bndl
[reg
], cpu_bndu
[reg
]);
6350 tcg_gen_shri_i64(cpu_bndu
[reg
], cpu_bndu
[reg
], 32);
6352 gen_set_hflag(s
, HF_MPX_IU_MASK
);
6355 gen_nop_modrm(env
, s
, modrm
);
6358 modrm
= x86_ldub_code(env
, s
);
6359 if (s
->flags
& HF_MPX_EN_MASK
) {
6360 mod
= (modrm
>> 6) & 3;
6361 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6362 if (mod
!= 3 && (prefixes
& PREFIX_REPZ
)) {
6365 || (prefixes
& PREFIX_LOCK
)
6366 || s
->aflag
== MO_16
) {
6369 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
6371 tcg_gen_extu_tl_i64(cpu_bndl
[reg
], cpu_regs
[a
.base
]);
6373 tcg_gen_ext32u_i64(cpu_bndl
[reg
], cpu_bndl
[reg
]);
6375 } else if (a
.base
== -1) {
6376 /* no base register has lower bound of 0 */
6377 tcg_gen_movi_i64(cpu_bndl
[reg
], 0);
6379 /* rip-relative generates #ud */
6382 tcg_gen_not_tl(s
->A0
, gen_lea_modrm_1(s
, a
, false));
6384 tcg_gen_ext32u_tl(s
->A0
, s
->A0
);
6386 tcg_gen_extu_tl_i64(cpu_bndu
[reg
], s
->A0
);
6387 /* bnd registers are now in-use */
6388 gen_set_hflag(s
, HF_MPX_IU_MASK
);
6390 } else if (prefixes
& PREFIX_REPNZ
) {
6393 || (prefixes
& PREFIX_LOCK
)
6394 || s
->aflag
== MO_16
) {
6397 gen_bndck(env
, s
, modrm
, TCG_COND_GTU
, cpu_bndu
[reg
]);
6398 } else if (prefixes
& PREFIX_DATA
) {
6399 /* bndmov -- to reg/mem */
6400 if (reg
>= 4 || s
->aflag
== MO_16
) {
6404 int reg2
= (modrm
& 7) | REX_B(s
);
6405 if (reg2
>= 4 || (prefixes
& PREFIX_LOCK
)) {
6408 if (s
->flags
& HF_MPX_IU_MASK
) {
6409 tcg_gen_mov_i64(cpu_bndl
[reg2
], cpu_bndl
[reg
]);
6410 tcg_gen_mov_i64(cpu_bndu
[reg2
], cpu_bndu
[reg
]);
6413 gen_lea_modrm(env
, s
, modrm
);
6415 tcg_gen_qemu_st_i64(cpu_bndl
[reg
], s
->A0
,
6416 s
->mem_index
, MO_LEUQ
);
6417 tcg_gen_addi_tl(s
->A0
, s
->A0
, 8);
6418 tcg_gen_qemu_st_i64(cpu_bndu
[reg
], s
->A0
,
6419 s
->mem_index
, MO_LEUQ
);
6421 tcg_gen_qemu_st_i64(cpu_bndl
[reg
], s
->A0
,
6422 s
->mem_index
, MO_LEUL
);
6423 tcg_gen_addi_tl(s
->A0
, s
->A0
, 4);
6424 tcg_gen_qemu_st_i64(cpu_bndu
[reg
], s
->A0
,
6425 s
->mem_index
, MO_LEUL
);
6428 } else if (mod
!= 3) {
6430 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
6432 || (prefixes
& PREFIX_LOCK
)
6433 || s
->aflag
== MO_16
6438 tcg_gen_addi_tl(s
->A0
, cpu_regs
[a
.base
], a
.disp
);
6440 tcg_gen_movi_tl(s
->A0
, 0);
6442 gen_lea_v_seg(s
, s
->aflag
, s
->A0
, a
.def_seg
, s
->override
);
6444 tcg_gen_mov_tl(s
->T0
, cpu_regs
[a
.index
]);
6446 tcg_gen_movi_tl(s
->T0
, 0);
6449 gen_helper_bndstx64(cpu_env
, s
->A0
, s
->T0
,
6450 cpu_bndl
[reg
], cpu_bndu
[reg
]);
6452 gen_helper_bndstx32(cpu_env
, s
->A0
, s
->T0
,
6453 cpu_bndl
[reg
], cpu_bndu
[reg
]);
6457 gen_nop_modrm(env
, s
, modrm
);
6459 case 0x119: case 0x11c ... 0x11f: /* nop (multi byte) */
6460 modrm
= x86_ldub_code(env
, s
);
6461 gen_nop_modrm(env
, s
, modrm
);
6464 case 0x120: /* mov reg, crN */
6465 case 0x122: /* mov crN, reg */
6466 if (!check_cpl0(s
)) {
6469 modrm
= x86_ldub_code(env
, s
);
6471 * Ignore the mod bits (assume (modrm&0xc0)==0xc0).
6472 * AMD documentation (24594.pdf) and testing of Intel 386 and 486
6473 * processors all show that the mod bits are assumed to be 1's,
6474 * regardless of actual values.
6476 rm
= (modrm
& 7) | REX_B(s
);
6477 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6480 if ((prefixes
& PREFIX_LOCK
) &&
6481 (s
->cpuid_ext3_features
& CPUID_EXT3_CR8LEG
)) {
6493 ot
= (CODE64(s
) ? MO_64
: MO_32
);
6495 translator_io_start(&s
->base
);
6497 gen_svm_check_intercept(s
, SVM_EXIT_WRITE_CR0
+ reg
);
6498 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
6499 gen_helper_write_crN(cpu_env
, tcg_constant_i32(reg
), s
->T0
);
6500 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
6502 gen_svm_check_intercept(s
, SVM_EXIT_READ_CR0
+ reg
);
6503 gen_helper_read_crN(s
->T0
, cpu_env
, tcg_constant_i32(reg
));
6504 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
6508 case 0x121: /* mov reg, drN */
6509 case 0x123: /* mov drN, reg */
6510 if (check_cpl0(s
)) {
6511 modrm
= x86_ldub_code(env
, s
);
6512 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
6513 * AMD documentation (24594.pdf) and testing of
6514 * intel 386 and 486 processors all show that the mod bits
6515 * are assumed to be 1's, regardless of actual values.
6517 rm
= (modrm
& 7) | REX_B(s
);
6518 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6527 gen_svm_check_intercept(s
, SVM_EXIT_WRITE_DR0
+ reg
);
6528 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
6529 tcg_gen_movi_i32(s
->tmp2_i32
, reg
);
6530 gen_helper_set_dr(cpu_env
, s
->tmp2_i32
, s
->T0
);
6531 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
6533 gen_svm_check_intercept(s
, SVM_EXIT_READ_DR0
+ reg
);
6534 tcg_gen_movi_i32(s
->tmp2_i32
, reg
);
6535 gen_helper_get_dr(s
->T0
, cpu_env
, s
->tmp2_i32
);
6536 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
6540 case 0x106: /* clts */
6541 if (check_cpl0(s
)) {
6542 gen_svm_check_intercept(s
, SVM_EXIT_WRITE_CR0
);
6543 gen_helper_clts(cpu_env
);
6544 /* abort block because static cpu state changed */
6545 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
6548 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
6549 case 0x1c3: /* MOVNTI reg, mem */
6550 if (!(s
->cpuid_features
& CPUID_SSE2
))
6552 ot
= mo_64_32(dflag
);
6553 modrm
= x86_ldub_code(env
, s
);
6554 mod
= (modrm
>> 6) & 3;
6557 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6558 /* generate a generic store */
6559 gen_ldst_modrm(env
, s
, modrm
, ot
, reg
, 1);
6562 modrm
= x86_ldub_code(env
, s
);
6564 CASE_MODRM_MEM_OP(0): /* fxsave */
6565 if (!(s
->cpuid_features
& CPUID_FXSR
)
6566 || (prefixes
& PREFIX_LOCK
)) {
6569 if ((s
->flags
& HF_EM_MASK
) || (s
->flags
& HF_TS_MASK
)) {
6570 gen_exception(s
, EXCP07_PREX
);
6573 gen_lea_modrm(env
, s
, modrm
);
6574 gen_helper_fxsave(cpu_env
, s
->A0
);
6577 CASE_MODRM_MEM_OP(1): /* fxrstor */
6578 if (!(s
->cpuid_features
& CPUID_FXSR
)
6579 || (prefixes
& PREFIX_LOCK
)) {
6582 if ((s
->flags
& HF_EM_MASK
) || (s
->flags
& HF_TS_MASK
)) {
6583 gen_exception(s
, EXCP07_PREX
);
6586 gen_lea_modrm(env
, s
, modrm
);
6587 gen_helper_fxrstor(cpu_env
, s
->A0
);
6590 CASE_MODRM_MEM_OP(2): /* ldmxcsr */
6591 if ((s
->flags
& HF_EM_MASK
) || !(s
->flags
& HF_OSFXSR_MASK
)) {
6594 if (s
->flags
& HF_TS_MASK
) {
6595 gen_exception(s
, EXCP07_PREX
);
6598 gen_lea_modrm(env
, s
, modrm
);
6599 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
, s
->mem_index
, MO_LEUL
);
6600 gen_helper_ldmxcsr(cpu_env
, s
->tmp2_i32
);
6603 CASE_MODRM_MEM_OP(3): /* stmxcsr */
6604 if ((s
->flags
& HF_EM_MASK
) || !(s
->flags
& HF_OSFXSR_MASK
)) {
6607 if (s
->flags
& HF_TS_MASK
) {
6608 gen_exception(s
, EXCP07_PREX
);
6611 gen_helper_update_mxcsr(cpu_env
);
6612 gen_lea_modrm(env
, s
, modrm
);
6613 tcg_gen_ld32u_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
, mxcsr
));
6614 gen_op_st_v(s
, MO_32
, s
->T0
, s
->A0
);
6617 CASE_MODRM_MEM_OP(4): /* xsave */
6618 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
6619 || (prefixes
& (PREFIX_LOCK
| PREFIX_DATA
6620 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
6623 gen_lea_modrm(env
, s
, modrm
);
6624 tcg_gen_concat_tl_i64(s
->tmp1_i64
, cpu_regs
[R_EAX
],
6626 gen_helper_xsave(cpu_env
, s
->A0
, s
->tmp1_i64
);
6629 CASE_MODRM_MEM_OP(5): /* xrstor */
6630 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
6631 || (prefixes
& (PREFIX_LOCK
| PREFIX_DATA
6632 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
6635 gen_lea_modrm(env
, s
, modrm
);
6636 tcg_gen_concat_tl_i64(s
->tmp1_i64
, cpu_regs
[R_EAX
],
6638 gen_helper_xrstor(cpu_env
, s
->A0
, s
->tmp1_i64
);
6639 /* XRSTOR is how MPX is enabled, which changes how
6640 we translate. Thus we need to end the TB. */
6641 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
6644 CASE_MODRM_MEM_OP(6): /* xsaveopt / clwb */
6645 if (prefixes
& PREFIX_LOCK
) {
6648 if (prefixes
& PREFIX_DATA
) {
6650 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_CLWB
)) {
6653 gen_nop_modrm(env
, s
, modrm
);
6656 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
6657 || (s
->cpuid_xsave_features
& CPUID_XSAVE_XSAVEOPT
) == 0
6658 || (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
))) {
6661 gen_lea_modrm(env
, s
, modrm
);
6662 tcg_gen_concat_tl_i64(s
->tmp1_i64
, cpu_regs
[R_EAX
],
6664 gen_helper_xsaveopt(cpu_env
, s
->A0
, s
->tmp1_i64
);
6668 CASE_MODRM_MEM_OP(7): /* clflush / clflushopt */
6669 if (prefixes
& PREFIX_LOCK
) {
6672 if (prefixes
& PREFIX_DATA
) {
6674 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_CLFLUSHOPT
)) {
6679 if ((s
->prefix
& (PREFIX_REPZ
| PREFIX_REPNZ
))
6680 || !(s
->cpuid_features
& CPUID_CLFLUSH
)) {
6684 gen_nop_modrm(env
, s
, modrm
);
6687 case 0xc0 ... 0xc7: /* rdfsbase (f3 0f ae /0) */
6688 case 0xc8 ... 0xcf: /* rdgsbase (f3 0f ae /1) */
6689 case 0xd0 ... 0xd7: /* wrfsbase (f3 0f ae /2) */
6690 case 0xd8 ... 0xdf: /* wrgsbase (f3 0f ae /3) */
6692 && (prefixes
& PREFIX_REPZ
)
6693 && !(prefixes
& PREFIX_LOCK
)
6694 && (s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_FSGSBASE
)) {
6695 TCGv base
, treg
, src
, dst
;
6697 /* Preserve hflags bits by testing CR4 at runtime. */
6698 tcg_gen_movi_i32(s
->tmp2_i32
, CR4_FSGSBASE_MASK
);
6699 gen_helper_cr4_testbit(cpu_env
, s
->tmp2_i32
);
6701 base
= cpu_seg_base
[modrm
& 8 ? R_GS
: R_FS
];
6702 treg
= cpu_regs
[(modrm
& 7) | REX_B(s
)];
6706 dst
= base
, src
= treg
;
6709 dst
= treg
, src
= base
;
6712 if (s
->dflag
== MO_32
) {
6713 tcg_gen_ext32u_tl(dst
, src
);
6715 tcg_gen_mov_tl(dst
, src
);
6721 case 0xf8: /* sfence / pcommit */
6722 if (prefixes
& PREFIX_DATA
) {
6724 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_PCOMMIT
)
6725 || (prefixes
& PREFIX_LOCK
)) {
6731 case 0xf9 ... 0xff: /* sfence */
6732 if (!(s
->cpuid_features
& CPUID_SSE
)
6733 || (prefixes
& PREFIX_LOCK
)) {
6736 tcg_gen_mb(TCG_MO_ST_ST
| TCG_BAR_SC
);
6738 case 0xe8 ... 0xef: /* lfence */
6739 if (!(s
->cpuid_features
& CPUID_SSE
)
6740 || (prefixes
& PREFIX_LOCK
)) {
6743 tcg_gen_mb(TCG_MO_LD_LD
| TCG_BAR_SC
);
6745 case 0xf0 ... 0xf7: /* mfence */
6746 if (!(s
->cpuid_features
& CPUID_SSE2
)
6747 || (prefixes
& PREFIX_LOCK
)) {
6750 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
6758 case 0x10d: /* 3DNow! prefetch(w) */
6759 modrm
= x86_ldub_code(env
, s
);
6760 mod
= (modrm
>> 6) & 3;
6763 gen_nop_modrm(env
, s
, modrm
);
6765 case 0x1aa: /* rsm */
6766 gen_svm_check_intercept(s
, SVM_EXIT_RSM
);
6767 if (!(s
->flags
& HF_SMM_MASK
))
6769 #ifdef CONFIG_USER_ONLY
6770 /* we should not be in SMM mode */
6771 g_assert_not_reached();
6773 gen_update_cc_op(s
);
6774 gen_update_eip_next(s
);
6775 gen_helper_rsm(cpu_env
);
6776 #endif /* CONFIG_USER_ONLY */
6777 s
->base
.is_jmp
= DISAS_EOB_ONLY
;
6779 case 0x1b8: /* SSE4.2 popcnt */
6780 if ((prefixes
& (PREFIX_REPZ
| PREFIX_LOCK
| PREFIX_REPNZ
)) !=
6783 if (!(s
->cpuid_ext_features
& CPUID_EXT_POPCNT
))
6786 modrm
= x86_ldub_code(env
, s
);
6787 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6789 if (s
->prefix
& PREFIX_DATA
) {
6792 ot
= mo_64_32(dflag
);
6795 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
6796 gen_extu(ot
, s
->T0
);
6797 tcg_gen_mov_tl(cpu_cc_src
, s
->T0
);
6798 tcg_gen_ctpop_tl(s
->T0
, s
->T0
);
6799 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
6801 set_cc_op(s
, CC_OP_POPCNT
);
6803 case 0x10e ... 0x117:
6804 case 0x128 ... 0x12f:
6805 case 0x138 ... 0x13a:
6806 case 0x150 ... 0x179:
6807 case 0x17c ... 0x17f:
6809 case 0x1c4 ... 0x1c6:
6810 case 0x1d0 ... 0x1fe:
6811 disas_insn_new(s
, cpu
, b
);
6818 gen_illegal_opcode(s
);
6821 gen_unknown_opcode(env
, s
);
6825 void tcg_x86_init(void)
6827 static const char reg_names
[CPU_NB_REGS
][4] = {
6828 #ifdef TARGET_X86_64
6856 static const char eip_name
[] = {
6857 #ifdef TARGET_X86_64
6863 static const char seg_base_names
[6][8] = {
6871 static const char bnd_regl_names
[4][8] = {
6872 "bnd0_lb", "bnd1_lb", "bnd2_lb", "bnd3_lb"
6874 static const char bnd_regu_names
[4][8] = {
6875 "bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub"
6879 cpu_cc_op
= tcg_global_mem_new_i32(cpu_env
,
6880 offsetof(CPUX86State
, cc_op
), "cc_op");
6881 cpu_cc_dst
= tcg_global_mem_new(cpu_env
, offsetof(CPUX86State
, cc_dst
),
6883 cpu_cc_src
= tcg_global_mem_new(cpu_env
, offsetof(CPUX86State
, cc_src
),
6885 cpu_cc_src2
= tcg_global_mem_new(cpu_env
, offsetof(CPUX86State
, cc_src2
),
6887 cpu_eip
= tcg_global_mem_new(cpu_env
, offsetof(CPUX86State
, eip
), eip_name
);
6889 for (i
= 0; i
< CPU_NB_REGS
; ++i
) {
6890 cpu_regs
[i
] = tcg_global_mem_new(cpu_env
,
6891 offsetof(CPUX86State
, regs
[i
]),
6895 for (i
= 0; i
< 6; ++i
) {
6897 = tcg_global_mem_new(cpu_env
,
6898 offsetof(CPUX86State
, segs
[i
].base
),
6902 for (i
= 0; i
< 4; ++i
) {
6904 = tcg_global_mem_new_i64(cpu_env
,
6905 offsetof(CPUX86State
, bnd_regs
[i
].lb
),
6908 = tcg_global_mem_new_i64(cpu_env
,
6909 offsetof(CPUX86State
, bnd_regs
[i
].ub
),
6914 static void i386_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cpu
)
6916 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6917 CPUX86State
*env
= cpu
->env_ptr
;
6918 uint32_t flags
= dc
->base
.tb
->flags
;
6919 uint32_t cflags
= tb_cflags(dc
->base
.tb
);
6920 int cpl
= (flags
>> HF_CPL_SHIFT
) & 3;
6921 int iopl
= (flags
>> IOPL_SHIFT
) & 3;
6923 dc
->cs_base
= dc
->base
.tb
->cs_base
;
6924 dc
->pc_save
= dc
->base
.pc_next
;
6926 #ifndef CONFIG_USER_ONLY
6931 /* We make some simplifying assumptions; validate they're correct. */
6932 g_assert(PE(dc
) == ((flags
& HF_PE_MASK
) != 0));
6933 g_assert(CPL(dc
) == cpl
);
6934 g_assert(IOPL(dc
) == iopl
);
6935 g_assert(VM86(dc
) == ((flags
& HF_VM_MASK
) != 0));
6936 g_assert(CODE32(dc
) == ((flags
& HF_CS32_MASK
) != 0));
6937 g_assert(CODE64(dc
) == ((flags
& HF_CS64_MASK
) != 0));
6938 g_assert(SS32(dc
) == ((flags
& HF_SS32_MASK
) != 0));
6939 g_assert(LMA(dc
) == ((flags
& HF_LMA_MASK
) != 0));
6940 g_assert(ADDSEG(dc
) == ((flags
& HF_ADDSEG_MASK
) != 0));
6941 g_assert(SVME(dc
) == ((flags
& HF_SVME_MASK
) != 0));
6942 g_assert(GUEST(dc
) == ((flags
& HF_GUEST_MASK
) != 0));
6944 dc
->cc_op
= CC_OP_DYNAMIC
;
6945 dc
->cc_op_dirty
= false;
6946 dc
->popl_esp_hack
= 0;
6947 /* select memory access functions */
6948 dc
->mem_index
= cpu_mmu_index(env
, false);
6949 dc
->cpuid_features
= env
->features
[FEAT_1_EDX
];
6950 dc
->cpuid_ext_features
= env
->features
[FEAT_1_ECX
];
6951 dc
->cpuid_ext2_features
= env
->features
[FEAT_8000_0001_EDX
];
6952 dc
->cpuid_ext3_features
= env
->features
[FEAT_8000_0001_ECX
];
6953 dc
->cpuid_7_0_ebx_features
= env
->features
[FEAT_7_0_EBX
];
6954 dc
->cpuid_7_0_ecx_features
= env
->features
[FEAT_7_0_ECX
];
6955 dc
->cpuid_xsave_features
= env
->features
[FEAT_XSAVE
];
6956 dc
->jmp_opt
= !((cflags
& CF_NO_GOTO_TB
) ||
6957 (flags
& (HF_TF_MASK
| HF_INHIBIT_IRQ_MASK
)));
6959 * If jmp_opt, we want to handle each string instruction individually.
6960 * For icount also disable repz optimization so that each iteration
6961 * is accounted separately.
6963 dc
->repz_opt
= !dc
->jmp_opt
&& !(cflags
& CF_USE_ICOUNT
);
6965 dc
->T0
= tcg_temp_new();
6966 dc
->T1
= tcg_temp_new();
6967 dc
->A0
= tcg_temp_new();
6969 dc
->tmp0
= tcg_temp_new();
6970 dc
->tmp1_i64
= tcg_temp_new_i64();
6971 dc
->tmp2_i32
= tcg_temp_new_i32();
6972 dc
->tmp3_i32
= tcg_temp_new_i32();
6973 dc
->tmp4
= tcg_temp_new();
6974 dc
->cc_srcT
= tcg_temp_new();
6977 static void i386_tr_tb_start(DisasContextBase
*db
, CPUState
*cpu
)
6981 static void i386_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cpu
)
6983 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6984 target_ulong pc_arg
= dc
->base
.pc_next
;
6986 dc
->prev_insn_end
= tcg_last_op();
6987 if (tb_cflags(dcbase
->tb
) & CF_PCREL
) {
6988 pc_arg
-= dc
->cs_base
;
6989 pc_arg
&= ~TARGET_PAGE_MASK
;
6991 tcg_gen_insn_start(pc_arg
, dc
->cc_op
);
6994 static void i386_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cpu
)
6996 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6998 #ifdef TARGET_VSYSCALL_PAGE
7000 * Detect entry into the vsyscall page and invoke the syscall.
7002 if ((dc
->base
.pc_next
& TARGET_PAGE_MASK
) == TARGET_VSYSCALL_PAGE
) {
7003 gen_exception(dc
, EXCP_VSYSCALL
);
7004 dc
->base
.pc_next
= dc
->pc
+ 1;
7009 if (disas_insn(dc
, cpu
)) {
7010 target_ulong pc_next
= dc
->pc
;
7011 dc
->base
.pc_next
= pc_next
;
7013 if (dc
->base
.is_jmp
== DISAS_NEXT
) {
7014 if (dc
->flags
& (HF_TF_MASK
| HF_INHIBIT_IRQ_MASK
)) {
7016 * If single step mode, we generate only one instruction and
7017 * generate an exception.
7018 * If irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
7019 * the flag and abort the translation to give the irqs a
7022 dc
->base
.is_jmp
= DISAS_EOB_NEXT
;
7023 } else if (!is_same_page(&dc
->base
, pc_next
)) {
7024 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
7030 static void i386_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cpu
)
7032 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
7034 switch (dc
->base
.is_jmp
) {
7035 case DISAS_NORETURN
:
7037 case DISAS_TOO_MANY
:
7038 gen_update_cc_op(dc
);
7039 gen_jmp_rel_csize(dc
, 0, 0);
7041 case DISAS_EOB_NEXT
:
7042 gen_update_cc_op(dc
);
7043 gen_update_eip_cur(dc
);
7045 case DISAS_EOB_ONLY
:
7048 case DISAS_EOB_INHIBIT_IRQ
:
7049 gen_update_cc_op(dc
);
7050 gen_update_eip_cur(dc
);
7051 gen_eob_inhibit_irq(dc
, true);
7057 g_assert_not_reached();
7061 static void i386_tr_disas_log(const DisasContextBase
*dcbase
,
7062 CPUState
*cpu
, FILE *logfile
)
7064 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
7066 fprintf(logfile
, "IN: %s\n", lookup_symbol(dc
->base
.pc_first
));
7067 target_disas(logfile
, cpu
, dc
->base
.pc_first
, dc
->base
.tb
->size
);
7070 static const TranslatorOps i386_tr_ops
= {
7071 .init_disas_context
= i386_tr_init_disas_context
,
7072 .tb_start
= i386_tr_tb_start
,
7073 .insn_start
= i386_tr_insn_start
,
7074 .translate_insn
= i386_tr_translate_insn
,
7075 .tb_stop
= i386_tr_tb_stop
,
7076 .disas_log
= i386_tr_disas_log
,
7079 /* generate intermediate code for basic block 'tb'. */
7080 void gen_intermediate_code(CPUState
*cpu
, TranslationBlock
*tb
, int *max_insns
,
7081 target_ulong pc
, void *host_pc
)
7085 translator_loop(cpu
, tb
, max_insns
, pc
, host_pc
, &i386_tr_ops
, &dc
.base
);