4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "qemu/host-utils.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "tcg/tcg-op-gvec.h"
27 #include "exec/cpu_ldst.h"
28 #include "exec/translator.h"
29 #include "fpu/softfloat.h"
31 #include "exec/helper-proto.h"
32 #include "exec/helper-gen.h"
33 #include "helper-tcg.h"
37 #define HELPER_H "helper.h"
38 #include "exec/helper-info.c.inc"
42 #define PREFIX_REPZ 0x01
43 #define PREFIX_REPNZ 0x02
44 #define PREFIX_LOCK 0x04
45 #define PREFIX_DATA 0x08
46 #define PREFIX_ADR 0x10
47 #define PREFIX_VEX 0x20
48 #define PREFIX_REX 0x40
58 /* For a switch indexed by MODRM, match all memory operands for a given OP. */
59 #define CASE_MODRM_MEM_OP(OP) \
60 case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
61 case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
62 case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7
64 #define CASE_MODRM_OP(OP) \
65 case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
66 case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
67 case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7: \
68 case (3 << 6) | (OP << 3) | 0 ... (3 << 6) | (OP << 3) | 7
70 //#define MACRO_TEST 1
72 /* global register indexes */
73 static TCGv cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
;
75 static TCGv_i32 cpu_cc_op
;
76 static TCGv cpu_regs
[CPU_NB_REGS
];
77 static TCGv cpu_seg_base
[6];
78 static TCGv_i64 cpu_bndl
[4];
79 static TCGv_i64 cpu_bndu
[4];
81 typedef struct DisasContext
{
82 DisasContextBase base
;
84 target_ulong pc
; /* pc = eip + cs_base */
85 target_ulong cs_base
; /* base of CS segment */
91 int8_t override
; /* -1 if no override, else R_CS, R_DS, etc */
97 #ifndef CONFIG_USER_ONLY
98 uint8_t cpl
; /* code priv level */
99 uint8_t iopl
; /* i/o priv level */
101 uint8_t vex_l
; /* vex vector length */
102 uint8_t vex_v
; /* vex vvvv register, without 1's complement. */
103 uint8_t popl_esp_hack
; /* for correct popl with esp base handling */
104 uint8_t rip_offset
; /* only used in x86_64, but left for simplicity */
111 bool vex_w
; /* used by AVX even on 32-bit processors */
112 bool jmp_opt
; /* use direct block chaining for direct jumps */
113 bool repz_opt
; /* optimize jumps within repz instructions */
116 CCOp cc_op
; /* current CC operation */
117 int mem_index
; /* select memory access functions */
118 uint32_t flags
; /* all execution flags */
120 int cpuid_ext_features
;
121 int cpuid_ext2_features
;
122 int cpuid_ext3_features
;
123 int cpuid_7_0_ebx_features
;
124 int cpuid_7_0_ecx_features
;
125 int cpuid_7_1_eax_features
;
126 int cpuid_xsave_features
;
128 /* TCG local temps */
134 /* TCG local register indexes (only used inside old micro ops) */
142 TCGOp
*prev_insn_end
;
145 #define DISAS_EOB_ONLY DISAS_TARGET_0
146 #define DISAS_EOB_NEXT DISAS_TARGET_1
147 #define DISAS_EOB_INHIBIT_IRQ DISAS_TARGET_2
148 #define DISAS_JUMP DISAS_TARGET_3
150 /* The environment in which user-only runs is constrained. */
151 #ifdef CONFIG_USER_ONLY
155 #define SVME(S) false
156 #define GUEST(S) false
158 #define PE(S) (((S)->flags & HF_PE_MASK) != 0)
159 #define CPL(S) ((S)->cpl)
160 #define IOPL(S) ((S)->iopl)
161 #define SVME(S) (((S)->flags & HF_SVME_MASK) != 0)
162 #define GUEST(S) (((S)->flags & HF_GUEST_MASK) != 0)
164 #if defined(CONFIG_USER_ONLY) && defined(TARGET_X86_64)
165 #define VM86(S) false
166 #define CODE32(S) true
168 #define ADDSEG(S) false
170 #define VM86(S) (((S)->flags & HF_VM_MASK) != 0)
171 #define CODE32(S) (((S)->flags & HF_CS32_MASK) != 0)
172 #define SS32(S) (((S)->flags & HF_SS32_MASK) != 0)
173 #define ADDSEG(S) (((S)->flags & HF_ADDSEG_MASK) != 0)
175 #if !defined(TARGET_X86_64)
176 #define CODE64(S) false
177 #elif defined(CONFIG_USER_ONLY)
178 #define CODE64(S) true
180 #define CODE64(S) (((S)->flags & HF_CS64_MASK) != 0)
182 #if defined(CONFIG_USER_ONLY) || defined(TARGET_X86_64)
183 #define LMA(S) (((S)->flags & HF_LMA_MASK) != 0)
189 #define REX_PREFIX(S) (((S)->prefix & PREFIX_REX) != 0)
190 #define REX_W(S) ((S)->vex_w)
191 #define REX_R(S) ((S)->rex_r + 0)
192 #define REX_X(S) ((S)->rex_x + 0)
193 #define REX_B(S) ((S)->rex_b + 0)
195 #define REX_PREFIX(S) false
196 #define REX_W(S) false
203 * Many sysemu-only helpers are not reachable for user-only.
204 * Define stub generators here, so that we need not either sprinkle
205 * ifdefs through the translator, nor provide the helper function.
207 #define STUB_HELPER(NAME, ...) \
208 static inline void gen_helper_##NAME(__VA_ARGS__) \
209 { qemu_build_not_reached(); }
211 #ifdef CONFIG_USER_ONLY
212 STUB_HELPER(clgi
, TCGv_env env
)
213 STUB_HELPER(flush_page
, TCGv_env env
, TCGv addr
)
214 STUB_HELPER(hlt
, TCGv_env env
, TCGv_i32 pc_ofs
)
215 STUB_HELPER(inb
, TCGv ret
, TCGv_env env
, TCGv_i32 port
)
216 STUB_HELPER(inw
, TCGv ret
, TCGv_env env
, TCGv_i32 port
)
217 STUB_HELPER(inl
, TCGv ret
, TCGv_env env
, TCGv_i32 port
)
218 STUB_HELPER(monitor
, TCGv_env env
, TCGv addr
)
219 STUB_HELPER(mwait
, TCGv_env env
, TCGv_i32 pc_ofs
)
220 STUB_HELPER(outb
, TCGv_env env
, TCGv_i32 port
, TCGv_i32 val
)
221 STUB_HELPER(outw
, TCGv_env env
, TCGv_i32 port
, TCGv_i32 val
)
222 STUB_HELPER(outl
, TCGv_env env
, TCGv_i32 port
, TCGv_i32 val
)
223 STUB_HELPER(rdmsr
, TCGv_env env
)
224 STUB_HELPER(read_crN
, TCGv ret
, TCGv_env env
, TCGv_i32 reg
)
225 STUB_HELPER(get_dr
, TCGv ret
, TCGv_env env
, TCGv_i32 reg
)
226 STUB_HELPER(set_dr
, TCGv_env env
, TCGv_i32 reg
, TCGv val
)
227 STUB_HELPER(stgi
, TCGv_env env
)
228 STUB_HELPER(svm_check_intercept
, TCGv_env env
, TCGv_i32 type
)
229 STUB_HELPER(vmload
, TCGv_env env
, TCGv_i32 aflag
)
230 STUB_HELPER(vmmcall
, TCGv_env env
)
231 STUB_HELPER(vmrun
, TCGv_env env
, TCGv_i32 aflag
, TCGv_i32 pc_ofs
)
232 STUB_HELPER(vmsave
, TCGv_env env
, TCGv_i32 aflag
)
233 STUB_HELPER(write_crN
, TCGv_env env
, TCGv_i32 reg
, TCGv val
)
234 STUB_HELPER(wrmsr
, TCGv_env env
)
237 static void gen_eob(DisasContext
*s
);
238 static void gen_jr(DisasContext
*s
);
239 static void gen_jmp_rel(DisasContext
*s
, MemOp ot
, int diff
, int tb_num
);
240 static void gen_jmp_rel_csize(DisasContext
*s
, int diff
, int tb_num
);
241 static void gen_op(DisasContext
*s1
, int op
, MemOp ot
, int d
);
242 static void gen_exception_gpf(DisasContext
*s
);
244 /* i386 arith/logic operations */
264 OP_SHL1
, /* undocumented */
280 /* I386 int registers */
281 OR_EAX
, /* MUST be even numbered */
290 OR_TMP0
= 16, /* temporary operand register */
292 OR_A0
, /* temporary register used when doing address evaluation */
302 /* Bit set if the global variable is live after setting CC_OP to X. */
303 static const uint8_t cc_op_live
[CC_OP_NB
] = {
304 [CC_OP_DYNAMIC
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
305 [CC_OP_EFLAGS
] = USES_CC_SRC
,
306 [CC_OP_MULB
... CC_OP_MULQ
] = USES_CC_DST
| USES_CC_SRC
,
307 [CC_OP_ADDB
... CC_OP_ADDQ
] = USES_CC_DST
| USES_CC_SRC
,
308 [CC_OP_ADCB
... CC_OP_ADCQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
309 [CC_OP_SUBB
... CC_OP_SUBQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRCT
,
310 [CC_OP_SBBB
... CC_OP_SBBQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
311 [CC_OP_LOGICB
... CC_OP_LOGICQ
] = USES_CC_DST
,
312 [CC_OP_INCB
... CC_OP_INCQ
] = USES_CC_DST
| USES_CC_SRC
,
313 [CC_OP_DECB
... CC_OP_DECQ
] = USES_CC_DST
| USES_CC_SRC
,
314 [CC_OP_SHLB
... CC_OP_SHLQ
] = USES_CC_DST
| USES_CC_SRC
,
315 [CC_OP_SARB
... CC_OP_SARQ
] = USES_CC_DST
| USES_CC_SRC
,
316 [CC_OP_BMILGB
... CC_OP_BMILGQ
] = USES_CC_DST
| USES_CC_SRC
,
317 [CC_OP_ADCX
] = USES_CC_DST
| USES_CC_SRC
,
318 [CC_OP_ADOX
] = USES_CC_SRC
| USES_CC_SRC2
,
319 [CC_OP_ADCOX
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
321 [CC_OP_POPCNT
] = USES_CC_SRC
,
324 static void set_cc_op(DisasContext
*s
, CCOp op
)
328 if (s
->cc_op
== op
) {
332 /* Discard CC computation that will no longer be used. */
333 dead
= cc_op_live
[s
->cc_op
] & ~cc_op_live
[op
];
334 if (dead
& USES_CC_DST
) {
335 tcg_gen_discard_tl(cpu_cc_dst
);
337 if (dead
& USES_CC_SRC
) {
338 tcg_gen_discard_tl(cpu_cc_src
);
340 if (dead
& USES_CC_SRC2
) {
341 tcg_gen_discard_tl(cpu_cc_src2
);
343 if (dead
& USES_CC_SRCT
) {
344 tcg_gen_discard_tl(s
->cc_srcT
);
347 if (op
== CC_OP_DYNAMIC
) {
348 /* The DYNAMIC setting is translator only, and should never be
349 stored. Thus we always consider it clean. */
350 s
->cc_op_dirty
= false;
352 /* Discard any computed CC_OP value (see shifts). */
353 if (s
->cc_op
== CC_OP_DYNAMIC
) {
354 tcg_gen_discard_i32(cpu_cc_op
);
356 s
->cc_op_dirty
= true;
361 static void gen_update_cc_op(DisasContext
*s
)
363 if (s
->cc_op_dirty
) {
364 tcg_gen_movi_i32(cpu_cc_op
, s
->cc_op
);
365 s
->cc_op_dirty
= false;
371 #define NB_OP_SIZES 4
373 #else /* !TARGET_X86_64 */
375 #define NB_OP_SIZES 3
377 #endif /* !TARGET_X86_64 */
380 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
381 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
382 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
383 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
384 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
386 #define REG_B_OFFSET 0
387 #define REG_H_OFFSET 1
388 #define REG_W_OFFSET 0
389 #define REG_L_OFFSET 0
390 #define REG_LH_OFFSET 4
393 /* In instruction encodings for byte register accesses the
394 * register number usually indicates "low 8 bits of register N";
395 * however there are some special cases where N 4..7 indicates
396 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
397 * true for this special case, false otherwise.
399 static inline bool byte_reg_is_xH(DisasContext
*s
, int reg
)
401 /* Any time the REX prefix is present, byte registers are uniform */
402 if (reg
< 4 || REX_PREFIX(s
)) {
408 /* Select the size of a push/pop operation. */
409 static inline MemOp
mo_pushpop(DisasContext
*s
, MemOp ot
)
412 return ot
== MO_16
? MO_16
: MO_64
;
418 /* Select the size of the stack pointer. */
419 static inline MemOp
mo_stacksize(DisasContext
*s
)
421 return CODE64(s
) ? MO_64
: SS32(s
) ? MO_32
: MO_16
;
424 /* Select only size 64 else 32. Used for SSE operand sizes. */
425 static inline MemOp
mo_64_32(MemOp ot
)
428 return ot
== MO_64
? MO_64
: MO_32
;
434 /* Select size 8 if lsb of B is clear, else OT. Used for decoding
435 byte vs word opcodes. */
436 static inline MemOp
mo_b_d(int b
, MemOp ot
)
438 return b
& 1 ? ot
: MO_8
;
441 /* Select size 8 if lsb of B is clear, else OT capped at 32.
442 Used for decoding operand size of port opcodes. */
443 static inline MemOp
mo_b_d32(int b
, MemOp ot
)
445 return b
& 1 ? (ot
== MO_16
? MO_16
: MO_32
) : MO_8
;
448 /* Compute the result of writing t0 to the OT-sized register REG.
450 * If DEST is NULL, store the result into the register and return the
453 * If DEST is not NULL, store the result into DEST and return the
456 static TCGv
gen_op_deposit_reg_v(DisasContext
*s
, MemOp ot
, int reg
, TCGv dest
, TCGv t0
)
460 if (byte_reg_is_xH(s
, reg
)) {
461 dest
= dest
? dest
: cpu_regs
[reg
- 4];
462 tcg_gen_deposit_tl(dest
, cpu_regs
[reg
- 4], t0
, 8, 8);
463 return cpu_regs
[reg
- 4];
465 dest
= dest
? dest
: cpu_regs
[reg
];
466 tcg_gen_deposit_tl(dest
, cpu_regs
[reg
], t0
, 0, 8);
469 dest
= dest
? dest
: cpu_regs
[reg
];
470 tcg_gen_deposit_tl(dest
, cpu_regs
[reg
], t0
, 0, 16);
473 /* For x86_64, this sets the higher half of register to zero.
474 For i386, this is equivalent to a mov. */
475 dest
= dest
? dest
: cpu_regs
[reg
];
476 tcg_gen_ext32u_tl(dest
, t0
);
480 dest
= dest
? dest
: cpu_regs
[reg
];
481 tcg_gen_mov_tl(dest
, t0
);
485 g_assert_not_reached();
487 return cpu_regs
[reg
];
490 static void gen_op_mov_reg_v(DisasContext
*s
, MemOp ot
, int reg
, TCGv t0
)
492 gen_op_deposit_reg_v(s
, ot
, reg
, NULL
, t0
);
496 void gen_op_mov_v_reg(DisasContext
*s
, MemOp ot
, TCGv t0
, int reg
)
498 if (ot
== MO_8
&& byte_reg_is_xH(s
, reg
)) {
499 tcg_gen_extract_tl(t0
, cpu_regs
[reg
- 4], 8, 8);
501 tcg_gen_mov_tl(t0
, cpu_regs
[reg
]);
505 static void gen_add_A0_im(DisasContext
*s
, int val
)
507 tcg_gen_addi_tl(s
->A0
, s
->A0
, val
);
509 tcg_gen_ext32u_tl(s
->A0
, s
->A0
);
513 static inline void gen_op_jmp_v(DisasContext
*s
, TCGv dest
)
515 tcg_gen_mov_tl(cpu_eip
, dest
);
520 void gen_op_add_reg_im(DisasContext
*s
, MemOp size
, int reg
, int32_t val
)
522 tcg_gen_addi_tl(s
->tmp0
, cpu_regs
[reg
], val
);
523 gen_op_mov_reg_v(s
, size
, reg
, s
->tmp0
);
526 static inline void gen_op_add_reg(DisasContext
*s
, MemOp size
, int reg
, TCGv val
)
528 tcg_gen_add_tl(s
->tmp0
, cpu_regs
[reg
], val
);
529 gen_op_mov_reg_v(s
, size
, reg
, s
->tmp0
);
532 static inline void gen_op_ld_v(DisasContext
*s
, int idx
, TCGv t0
, TCGv a0
)
534 tcg_gen_qemu_ld_tl(t0
, a0
, s
->mem_index
, idx
| MO_LE
);
537 static inline void gen_op_st_v(DisasContext
*s
, int idx
, TCGv t0
, TCGv a0
)
539 tcg_gen_qemu_st_tl(t0
, a0
, s
->mem_index
, idx
| MO_LE
);
542 static inline void gen_op_st_rm_T0_A0(DisasContext
*s
, int idx
, int d
)
545 gen_op_st_v(s
, idx
, s
->T0
, s
->A0
);
547 gen_op_mov_reg_v(s
, idx
, d
, s
->T0
);
551 static void gen_update_eip_cur(DisasContext
*s
)
553 assert(s
->pc_save
!= -1);
554 if (tb_cflags(s
->base
.tb
) & CF_PCREL
) {
555 tcg_gen_addi_tl(cpu_eip
, cpu_eip
, s
->base
.pc_next
- s
->pc_save
);
556 } else if (CODE64(s
)) {
557 tcg_gen_movi_tl(cpu_eip
, s
->base
.pc_next
);
559 tcg_gen_movi_tl(cpu_eip
, (uint32_t)(s
->base
.pc_next
- s
->cs_base
));
561 s
->pc_save
= s
->base
.pc_next
;
564 static void gen_update_eip_next(DisasContext
*s
)
566 assert(s
->pc_save
!= -1);
567 if (tb_cflags(s
->base
.tb
) & CF_PCREL
) {
568 tcg_gen_addi_tl(cpu_eip
, cpu_eip
, s
->pc
- s
->pc_save
);
569 } else if (CODE64(s
)) {
570 tcg_gen_movi_tl(cpu_eip
, s
->pc
);
572 tcg_gen_movi_tl(cpu_eip
, (uint32_t)(s
->pc
- s
->cs_base
));
577 static int cur_insn_len(DisasContext
*s
)
579 return s
->pc
- s
->base
.pc_next
;
582 static TCGv_i32
cur_insn_len_i32(DisasContext
*s
)
584 return tcg_constant_i32(cur_insn_len(s
));
587 static TCGv_i32
eip_next_i32(DisasContext
*s
)
589 assert(s
->pc_save
!= -1);
591 * This function has two users: lcall_real (always 16-bit mode), and
592 * iret_protected (16, 32, or 64-bit mode). IRET only uses the value
593 * when EFLAGS.NT is set, which is illegal in 64-bit mode, which is
594 * why passing a 32-bit value isn't broken. To avoid using this where
595 * we shouldn't, return -1 in 64-bit mode so that execution goes into
599 return tcg_constant_i32(-1);
601 if (tb_cflags(s
->base
.tb
) & CF_PCREL
) {
602 TCGv_i32 ret
= tcg_temp_new_i32();
603 tcg_gen_trunc_tl_i32(ret
, cpu_eip
);
604 tcg_gen_addi_i32(ret
, ret
, s
->pc
- s
->pc_save
);
607 return tcg_constant_i32(s
->pc
- s
->cs_base
);
611 static TCGv
eip_next_tl(DisasContext
*s
)
613 assert(s
->pc_save
!= -1);
614 if (tb_cflags(s
->base
.tb
) & CF_PCREL
) {
615 TCGv ret
= tcg_temp_new();
616 tcg_gen_addi_tl(ret
, cpu_eip
, s
->pc
- s
->pc_save
);
618 } else if (CODE64(s
)) {
619 return tcg_constant_tl(s
->pc
);
621 return tcg_constant_tl((uint32_t)(s
->pc
- s
->cs_base
));
625 static TCGv
eip_cur_tl(DisasContext
*s
)
627 assert(s
->pc_save
!= -1);
628 if (tb_cflags(s
->base
.tb
) & CF_PCREL
) {
629 TCGv ret
= tcg_temp_new();
630 tcg_gen_addi_tl(ret
, cpu_eip
, s
->base
.pc_next
- s
->pc_save
);
632 } else if (CODE64(s
)) {
633 return tcg_constant_tl(s
->base
.pc_next
);
635 return tcg_constant_tl((uint32_t)(s
->base
.pc_next
- s
->cs_base
));
639 /* Compute SEG:REG into DEST. SEG is selected from the override segment
640 (OVR_SEG) and the default segment (DEF_SEG). OVR_SEG may be -1 to
641 indicate no override. */
642 static void gen_lea_v_seg_dest(DisasContext
*s
, MemOp aflag
, TCGv dest
, TCGv a0
,
643 int def_seg
, int ovr_seg
)
649 tcg_gen_mov_tl(dest
, a0
);
656 if (ovr_seg
< 0 && ADDSEG(s
)) {
660 tcg_gen_ext32u_tl(dest
, a0
);
666 tcg_gen_ext16u_tl(dest
, a0
);
677 g_assert_not_reached();
681 TCGv seg
= cpu_seg_base
[ovr_seg
];
683 if (aflag
== MO_64
) {
684 tcg_gen_add_tl(dest
, a0
, seg
);
685 } else if (CODE64(s
)) {
686 tcg_gen_ext32u_tl(dest
, a0
);
687 tcg_gen_add_tl(dest
, dest
, seg
);
689 tcg_gen_add_tl(dest
, a0
, seg
);
690 tcg_gen_ext32u_tl(dest
, dest
);
695 static void gen_lea_v_seg(DisasContext
*s
, MemOp aflag
, TCGv a0
,
696 int def_seg
, int ovr_seg
)
698 gen_lea_v_seg_dest(s
, aflag
, s
->A0
, a0
, def_seg
, ovr_seg
);
701 static inline void gen_string_movl_A0_ESI(DisasContext
*s
)
703 gen_lea_v_seg(s
, s
->aflag
, cpu_regs
[R_ESI
], R_DS
, s
->override
);
706 static inline void gen_string_movl_A0_EDI(DisasContext
*s
)
708 gen_lea_v_seg(s
, s
->aflag
, cpu_regs
[R_EDI
], R_ES
, -1);
711 static inline TCGv
gen_compute_Dshift(DisasContext
*s
, MemOp ot
)
713 TCGv dshift
= tcg_temp_new();
714 tcg_gen_ld32s_tl(dshift
, tcg_env
, offsetof(CPUX86State
, df
));
715 tcg_gen_shli_tl(dshift
, dshift
, ot
);
719 static TCGv
gen_ext_tl(TCGv dst
, TCGv src
, MemOp size
, bool sign
)
725 dst
= tcg_temp_new();
727 tcg_gen_ext_tl(dst
, src
, size
| (sign
? MO_SIGN
: 0));
731 static void gen_extu(MemOp ot
, TCGv reg
)
733 gen_ext_tl(reg
, reg
, ot
, false);
736 static void gen_exts(MemOp ot
, TCGv reg
)
738 gen_ext_tl(reg
, reg
, ot
, true);
741 static void gen_op_j_ecx(DisasContext
*s
, TCGCond cond
, TCGLabel
*label1
)
743 TCGv tmp
= gen_ext_tl(NULL
, cpu_regs
[R_ECX
], s
->aflag
, false);
745 tcg_gen_brcondi_tl(cond
, tmp
, 0, label1
);
748 static inline void gen_op_jz_ecx(DisasContext
*s
, TCGLabel
*label1
)
750 gen_op_j_ecx(s
, TCG_COND_EQ
, label1
);
753 static inline void gen_op_jnz_ecx(DisasContext
*s
, TCGLabel
*label1
)
755 gen_op_j_ecx(s
, TCG_COND_NE
, label1
);
758 static void gen_helper_in_func(MemOp ot
, TCGv v
, TCGv_i32 n
)
762 gen_helper_inb(v
, tcg_env
, n
);
765 gen_helper_inw(v
, tcg_env
, n
);
768 gen_helper_inl(v
, tcg_env
, n
);
771 g_assert_not_reached();
775 static void gen_helper_out_func(MemOp ot
, TCGv_i32 v
, TCGv_i32 n
)
779 gen_helper_outb(tcg_env
, v
, n
);
782 gen_helper_outw(tcg_env
, v
, n
);
785 gen_helper_outl(tcg_env
, v
, n
);
788 g_assert_not_reached();
793 * Validate that access to [port, port + 1<<ot) is allowed.
794 * Raise #GP, or VMM exit if not.
796 static bool gen_check_io(DisasContext
*s
, MemOp ot
, TCGv_i32 port
,
799 #ifdef CONFIG_USER_ONLY
801 * We do not implement the ioperm(2) syscall, so the TSS check
804 gen_exception_gpf(s
);
807 if (PE(s
) && (CPL(s
) > IOPL(s
) || VM86(s
))) {
808 gen_helper_check_io(tcg_env
, port
, tcg_constant_i32(1 << ot
));
812 gen_update_eip_cur(s
);
813 if (s
->prefix
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
814 svm_flags
|= SVM_IOIO_REP_MASK
;
816 svm_flags
|= 1 << (SVM_IOIO_SIZE_SHIFT
+ ot
);
817 gen_helper_svm_check_io(tcg_env
, port
,
818 tcg_constant_i32(svm_flags
),
819 cur_insn_len_i32(s
));
825 static void gen_movs(DisasContext
*s
, MemOp ot
)
829 gen_string_movl_A0_ESI(s
);
830 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
831 gen_string_movl_A0_EDI(s
);
832 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
834 dshift
= gen_compute_Dshift(s
, ot
);
835 gen_op_add_reg(s
, s
->aflag
, R_ESI
, dshift
);
836 gen_op_add_reg(s
, s
->aflag
, R_EDI
, dshift
);
839 static void gen_op_update1_cc(DisasContext
*s
)
841 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
844 static void gen_op_update2_cc(DisasContext
*s
)
846 tcg_gen_mov_tl(cpu_cc_src
, s
->T1
);
847 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
850 static void gen_op_update3_cc(DisasContext
*s
, TCGv reg
)
852 tcg_gen_mov_tl(cpu_cc_src2
, reg
);
853 tcg_gen_mov_tl(cpu_cc_src
, s
->T1
);
854 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
857 static inline void gen_op_testl_T0_T1_cc(DisasContext
*s
)
859 tcg_gen_and_tl(cpu_cc_dst
, s
->T0
, s
->T1
);
862 static void gen_op_update_neg_cc(DisasContext
*s
)
864 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
865 tcg_gen_neg_tl(cpu_cc_src
, s
->T0
);
866 tcg_gen_movi_tl(s
->cc_srcT
, 0);
869 /* compute all eflags to reg */
870 static void gen_mov_eflags(DisasContext
*s
, TCGv reg
)
872 TCGv dst
, src1
, src2
;
876 if (s
->cc_op
== CC_OP_EFLAGS
) {
877 tcg_gen_mov_tl(reg
, cpu_cc_src
);
880 if (s
->cc_op
== CC_OP_CLR
) {
881 tcg_gen_movi_tl(reg
, CC_Z
| CC_P
);
889 /* Take care to not read values that are not live. */
890 live
= cc_op_live
[s
->cc_op
] & ~USES_CC_SRCT
;
891 dead
= live
^ (USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
);
893 TCGv zero
= tcg_constant_tl(0);
894 if (dead
& USES_CC_DST
) {
897 if (dead
& USES_CC_SRC
) {
900 if (dead
& USES_CC_SRC2
) {
905 if (s
->cc_op
!= CC_OP_DYNAMIC
) {
906 cc_op
= tcg_constant_i32(s
->cc_op
);
910 gen_helper_cc_compute_all(reg
, dst
, src1
, src2
, cc_op
);
913 /* compute all eflags to cc_src */
914 static void gen_compute_eflags(DisasContext
*s
)
916 gen_mov_eflags(s
, cpu_cc_src
);
917 set_cc_op(s
, CC_OP_EFLAGS
);
920 typedef struct CCPrepare
{
930 /* compute eflags.C to reg */
931 static CCPrepare
gen_prepare_eflags_c(DisasContext
*s
, TCGv reg
)
937 case CC_OP_SUBB
... CC_OP_SUBQ
:
938 /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
939 size
= s
->cc_op
- CC_OP_SUBB
;
940 t1
= gen_ext_tl(s
->tmp0
, cpu_cc_src
, size
, false);
941 /* If no temporary was used, be careful not to alias t1 and t0. */
942 t0
= t1
== cpu_cc_src
? s
->tmp0
: reg
;
943 tcg_gen_mov_tl(t0
, s
->cc_srcT
);
947 case CC_OP_ADDB
... CC_OP_ADDQ
:
948 /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
949 size
= s
->cc_op
- CC_OP_ADDB
;
950 t1
= gen_ext_tl(s
->tmp0
, cpu_cc_src
, size
, false);
951 t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, false);
953 return (CCPrepare
) { .cond
= TCG_COND_LTU
, .reg
= t0
,
954 .reg2
= t1
, .mask
= -1, .use_reg2
= true };
956 case CC_OP_LOGICB
... CC_OP_LOGICQ
:
959 return (CCPrepare
) { .cond
= TCG_COND_NEVER
, .mask
= -1 };
961 case CC_OP_INCB
... CC_OP_INCQ
:
962 case CC_OP_DECB
... CC_OP_DECQ
:
963 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
964 .mask
= -1, .no_setcond
= true };
966 case CC_OP_SHLB
... CC_OP_SHLQ
:
967 /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
968 size
= s
->cc_op
- CC_OP_SHLB
;
969 shift
= (8 << size
) - 1;
970 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
971 .mask
= (target_ulong
)1 << shift
};
973 case CC_OP_MULB
... CC_OP_MULQ
:
974 return (CCPrepare
) { .cond
= TCG_COND_NE
,
975 .reg
= cpu_cc_src
, .mask
= -1 };
977 case CC_OP_BMILGB
... CC_OP_BMILGQ
:
978 size
= s
->cc_op
- CC_OP_BMILGB
;
979 t0
= gen_ext_tl(reg
, cpu_cc_src
, size
, false);
980 return (CCPrepare
) { .cond
= TCG_COND_EQ
, .reg
= t0
, .mask
= -1 };
984 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_dst
,
985 .mask
= -1, .no_setcond
= true };
988 case CC_OP_SARB
... CC_OP_SARQ
:
990 return (CCPrepare
) { .cond
= TCG_COND_NE
,
991 .reg
= cpu_cc_src
, .mask
= CC_C
};
994 /* The need to compute only C from CC_OP_DYNAMIC is important
995 in efficiently implementing e.g. INC at the start of a TB. */
997 gen_helper_cc_compute_c(reg
, cpu_cc_dst
, cpu_cc_src
,
998 cpu_cc_src2
, cpu_cc_op
);
999 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
1000 .mask
= -1, .no_setcond
= true };
1004 /* compute eflags.P to reg */
1005 static CCPrepare
gen_prepare_eflags_p(DisasContext
*s
, TCGv reg
)
1007 gen_compute_eflags(s
);
1008 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1012 /* compute eflags.S to reg */
1013 static CCPrepare
gen_prepare_eflags_s(DisasContext
*s
, TCGv reg
)
1017 gen_compute_eflags(s
);
1023 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1027 return (CCPrepare
) { .cond
= TCG_COND_NEVER
, .mask
= -1 };
1030 MemOp size
= (s
->cc_op
- CC_OP_ADDB
) & 3;
1031 TCGv t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, true);
1032 return (CCPrepare
) { .cond
= TCG_COND_LT
, .reg
= t0
, .mask
= -1 };
1037 /* compute eflags.O to reg */
1038 static CCPrepare
gen_prepare_eflags_o(DisasContext
*s
, TCGv reg
)
1043 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src2
,
1044 .mask
= -1, .no_setcond
= true };
1047 return (CCPrepare
) { .cond
= TCG_COND_NEVER
, .mask
= -1 };
1048 case CC_OP_MULB
... CC_OP_MULQ
:
1049 return (CCPrepare
) { .cond
= TCG_COND_NE
,
1050 .reg
= cpu_cc_src
, .mask
= -1 };
1052 gen_compute_eflags(s
);
1053 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1058 /* compute eflags.Z to reg */
1059 static CCPrepare
gen_prepare_eflags_z(DisasContext
*s
, TCGv reg
)
1063 gen_compute_eflags(s
);
1069 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1072 return (CCPrepare
) { .cond
= TCG_COND_ALWAYS
, .mask
= -1 };
1074 return (CCPrepare
) { .cond
= TCG_COND_EQ
, .reg
= cpu_cc_src
,
1078 MemOp size
= (s
->cc_op
- CC_OP_ADDB
) & 3;
1079 TCGv t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, false);
1080 return (CCPrepare
) { .cond
= TCG_COND_EQ
, .reg
= t0
, .mask
= -1 };
1085 /* perform a conditional store into register 'reg' according to jump opcode
1086 value 'b'. In the fast case, T0 is guaranteed not to be used. */
1087 static CCPrepare
gen_prepare_cc(DisasContext
*s
, int b
, TCGv reg
)
1089 int inv
, jcc_op
, cond
;
1095 jcc_op
= (b
>> 1) & 7;
1098 case CC_OP_SUBB
... CC_OP_SUBQ
:
1099 /* We optimize relational operators for the cmp/jcc case. */
1100 size
= s
->cc_op
- CC_OP_SUBB
;
1103 tcg_gen_mov_tl(s
->tmp4
, s
->cc_srcT
);
1104 gen_extu(size
, s
->tmp4
);
1105 t0
= gen_ext_tl(s
->tmp0
, cpu_cc_src
, size
, false);
1106 cc
= (CCPrepare
) { .cond
= TCG_COND_LEU
, .reg
= s
->tmp4
,
1107 .reg2
= t0
, .mask
= -1, .use_reg2
= true };
1116 tcg_gen_mov_tl(s
->tmp4
, s
->cc_srcT
);
1117 gen_exts(size
, s
->tmp4
);
1118 t0
= gen_ext_tl(s
->tmp0
, cpu_cc_src
, size
, true);
1119 cc
= (CCPrepare
) { .cond
= cond
, .reg
= s
->tmp4
,
1120 .reg2
= t0
, .mask
= -1, .use_reg2
= true };
1130 /* This actually generates good code for JC, JZ and JS. */
1133 cc
= gen_prepare_eflags_o(s
, reg
);
1136 cc
= gen_prepare_eflags_c(s
, reg
);
1139 cc
= gen_prepare_eflags_z(s
, reg
);
1142 gen_compute_eflags(s
);
1143 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1144 .mask
= CC_Z
| CC_C
};
1147 cc
= gen_prepare_eflags_s(s
, reg
);
1150 cc
= gen_prepare_eflags_p(s
, reg
);
1153 gen_compute_eflags(s
);
1154 if (reg
== cpu_cc_src
) {
1157 tcg_gen_addi_tl(reg
, cpu_cc_src
, CC_O
- CC_S
);
1158 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
1163 gen_compute_eflags(s
);
1164 if (reg
== cpu_cc_src
) {
1167 tcg_gen_addi_tl(reg
, cpu_cc_src
, CC_O
- CC_S
);
1168 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
1169 .mask
= CC_O
| CC_Z
};
1176 cc
.cond
= tcg_invert_cond(cc
.cond
);
1181 static void gen_setcc1(DisasContext
*s
, int b
, TCGv reg
)
1183 CCPrepare cc
= gen_prepare_cc(s
, b
, reg
);
1185 if (cc
.no_setcond
) {
1186 if (cc
.cond
== TCG_COND_EQ
) {
1187 tcg_gen_xori_tl(reg
, cc
.reg
, 1);
1189 tcg_gen_mov_tl(reg
, cc
.reg
);
1194 if (cc
.cond
== TCG_COND_NE
&& !cc
.use_reg2
&& cc
.imm
== 0 &&
1195 cc
.mask
!= 0 && (cc
.mask
& (cc
.mask
- 1)) == 0) {
1196 tcg_gen_shri_tl(reg
, cc
.reg
, ctztl(cc
.mask
));
1197 tcg_gen_andi_tl(reg
, reg
, 1);
1200 if (cc
.mask
!= -1) {
1201 tcg_gen_andi_tl(reg
, cc
.reg
, cc
.mask
);
1205 tcg_gen_setcond_tl(cc
.cond
, reg
, cc
.reg
, cc
.reg2
);
1207 tcg_gen_setcondi_tl(cc
.cond
, reg
, cc
.reg
, cc
.imm
);
1211 static inline void gen_compute_eflags_c(DisasContext
*s
, TCGv reg
)
1213 gen_setcc1(s
, JCC_B
<< 1, reg
);
1216 /* generate a conditional jump to label 'l1' according to jump opcode
1217 value 'b'. In the fast case, T0 is guaranteed not to be used. */
1218 static inline void gen_jcc1_noeob(DisasContext
*s
, int b
, TCGLabel
*l1
)
1220 CCPrepare cc
= gen_prepare_cc(s
, b
, s
->T0
);
1222 if (cc
.mask
!= -1) {
1223 tcg_gen_andi_tl(s
->T0
, cc
.reg
, cc
.mask
);
1227 tcg_gen_brcond_tl(cc
.cond
, cc
.reg
, cc
.reg2
, l1
);
1229 tcg_gen_brcondi_tl(cc
.cond
, cc
.reg
, cc
.imm
, l1
);
1233 /* Generate a conditional jump to label 'l1' according to jump opcode
1234 value 'b'. In the fast case, T0 is guaranteed not to be used.
1235 A translation block must end soon. */
1236 static inline void gen_jcc1(DisasContext
*s
, int b
, TCGLabel
*l1
)
1238 CCPrepare cc
= gen_prepare_cc(s
, b
, s
->T0
);
1240 gen_update_cc_op(s
);
1241 if (cc
.mask
!= -1) {
1242 tcg_gen_andi_tl(s
->T0
, cc
.reg
, cc
.mask
);
1245 set_cc_op(s
, CC_OP_DYNAMIC
);
1247 tcg_gen_brcond_tl(cc
.cond
, cc
.reg
, cc
.reg2
, l1
);
1249 tcg_gen_brcondi_tl(cc
.cond
, cc
.reg
, cc
.imm
, l1
);
1253 /* XXX: does not work with gdbstub "ice" single step - not a
1255 static TCGLabel
*gen_jz_ecx_string(DisasContext
*s
)
1257 TCGLabel
*l1
= gen_new_label();
1258 TCGLabel
*l2
= gen_new_label();
1259 gen_op_jnz_ecx(s
, l1
);
1261 gen_jmp_rel_csize(s
, 0, 1);
1266 static void gen_stos(DisasContext
*s
, MemOp ot
)
1268 gen_string_movl_A0_EDI(s
);
1269 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
1270 gen_op_add_reg(s
, s
->aflag
, R_EDI
, gen_compute_Dshift(s
, ot
));
1273 static void gen_lods(DisasContext
*s
, MemOp ot
)
1275 gen_string_movl_A0_ESI(s
);
1276 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1277 gen_op_mov_reg_v(s
, ot
, R_EAX
, s
->T0
);
1278 gen_op_add_reg(s
, s
->aflag
, R_ESI
, gen_compute_Dshift(s
, ot
));
1281 static void gen_scas(DisasContext
*s
, MemOp ot
)
1283 gen_string_movl_A0_EDI(s
);
1284 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
1285 tcg_gen_mov_tl(cpu_cc_src
, s
->T1
);
1286 tcg_gen_mov_tl(s
->cc_srcT
, s
->T0
);
1287 tcg_gen_sub_tl(cpu_cc_dst
, s
->T0
, s
->T1
);
1288 set_cc_op(s
, CC_OP_SUBB
+ ot
);
1290 gen_op_add_reg(s
, s
->aflag
, R_EDI
, gen_compute_Dshift(s
, ot
));
1293 static void gen_cmps(DisasContext
*s
, MemOp ot
)
1297 gen_string_movl_A0_EDI(s
);
1298 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
1299 gen_string_movl_A0_ESI(s
);
1300 gen_op(s
, OP_CMPL
, ot
, OR_TMP0
);
1302 dshift
= gen_compute_Dshift(s
, ot
);
1303 gen_op_add_reg(s
, s
->aflag
, R_ESI
, dshift
);
1304 gen_op_add_reg(s
, s
->aflag
, R_EDI
, dshift
);
1307 static void gen_bpt_io(DisasContext
*s
, TCGv_i32 t_port
, int ot
)
1309 if (s
->flags
& HF_IOBPT_MASK
) {
1310 #ifdef CONFIG_USER_ONLY
1311 /* user-mode cpu should not be in IOBPT mode */
1312 g_assert_not_reached();
1314 TCGv_i32 t_size
= tcg_constant_i32(1 << ot
);
1315 TCGv t_next
= eip_next_tl(s
);
1316 gen_helper_bpt_io(tcg_env
, t_port
, t_size
, t_next
);
1317 #endif /* CONFIG_USER_ONLY */
1321 static void gen_ins(DisasContext
*s
, MemOp ot
)
1323 gen_string_movl_A0_EDI(s
);
1324 /* Note: we must do this dummy write first to be restartable in
1325 case of page fault. */
1326 tcg_gen_movi_tl(s
->T0
, 0);
1327 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
1328 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_EDX
]);
1329 tcg_gen_andi_i32(s
->tmp2_i32
, s
->tmp2_i32
, 0xffff);
1330 gen_helper_in_func(ot
, s
->T0
, s
->tmp2_i32
);
1331 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
1332 gen_op_add_reg(s
, s
->aflag
, R_EDI
, gen_compute_Dshift(s
, ot
));
1333 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
1336 static void gen_outs(DisasContext
*s
, MemOp ot
)
1338 gen_string_movl_A0_ESI(s
);
1339 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1341 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_EDX
]);
1342 tcg_gen_andi_i32(s
->tmp2_i32
, s
->tmp2_i32
, 0xffff);
1343 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T0
);
1344 gen_helper_out_func(ot
, s
->tmp2_i32
, s
->tmp3_i32
);
1345 gen_op_add_reg(s
, s
->aflag
, R_ESI
, gen_compute_Dshift(s
, ot
));
1346 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
1349 /* Generate jumps to current or next instruction */
1350 static void gen_repz(DisasContext
*s
, MemOp ot
,
1351 void (*fn
)(DisasContext
*s
, MemOp ot
))
1354 gen_update_cc_op(s
);
1355 l2
= gen_jz_ecx_string(s
);
1357 gen_op_add_reg_im(s
, s
->aflag
, R_ECX
, -1);
1359 * A loop would cause two single step exceptions if ECX = 1
1360 * before rep string_insn
1363 gen_op_jz_ecx(s
, l2
);
1365 gen_jmp_rel_csize(s
, -cur_insn_len(s
), 0);
1368 #define GEN_REPZ(op) \
1369 static inline void gen_repz_ ## op(DisasContext *s, MemOp ot) \
1370 { gen_repz(s, ot, gen_##op); }
1372 static void gen_repz2(DisasContext
*s
, MemOp ot
, int nz
,
1373 void (*fn
)(DisasContext
*s
, MemOp ot
))
1376 gen_update_cc_op(s
);
1377 l2
= gen_jz_ecx_string(s
);
1379 gen_op_add_reg_im(s
, s
->aflag
, R_ECX
, -1);
1380 gen_update_cc_op(s
);
1381 gen_jcc1(s
, (JCC_Z
<< 1) | (nz
^ 1), l2
);
1383 gen_op_jz_ecx(s
, l2
);
1385 gen_jmp_rel_csize(s
, -cur_insn_len(s
), 0);
1388 #define GEN_REPZ2(op) \
1389 static inline void gen_repz_ ## op(DisasContext *s, MemOp ot, int nz) \
1390 { gen_repz2(s, ot, nz, gen_##op); }
1400 static void gen_helper_fp_arith_ST0_FT0(int op
)
1404 gen_helper_fadd_ST0_FT0(tcg_env
);
1407 gen_helper_fmul_ST0_FT0(tcg_env
);
1410 gen_helper_fcom_ST0_FT0(tcg_env
);
1413 gen_helper_fcom_ST0_FT0(tcg_env
);
1416 gen_helper_fsub_ST0_FT0(tcg_env
);
1419 gen_helper_fsubr_ST0_FT0(tcg_env
);
1422 gen_helper_fdiv_ST0_FT0(tcg_env
);
1425 gen_helper_fdivr_ST0_FT0(tcg_env
);
1430 /* NOTE the exception in "r" op ordering */
1431 static void gen_helper_fp_arith_STN_ST0(int op
, int opreg
)
1433 TCGv_i32 tmp
= tcg_constant_i32(opreg
);
1436 gen_helper_fadd_STN_ST0(tcg_env
, tmp
);
1439 gen_helper_fmul_STN_ST0(tcg_env
, tmp
);
1442 gen_helper_fsubr_STN_ST0(tcg_env
, tmp
);
1445 gen_helper_fsub_STN_ST0(tcg_env
, tmp
);
1448 gen_helper_fdivr_STN_ST0(tcg_env
, tmp
);
1451 gen_helper_fdiv_STN_ST0(tcg_env
, tmp
);
1456 static void gen_exception(DisasContext
*s
, int trapno
)
1458 gen_update_cc_op(s
);
1459 gen_update_eip_cur(s
);
1460 gen_helper_raise_exception(tcg_env
, tcg_constant_i32(trapno
));
1461 s
->base
.is_jmp
= DISAS_NORETURN
;
1464 /* Generate #UD for the current instruction. The assumption here is that
1465 the instruction is known, but it isn't allowed in the current cpu mode. */
1466 static void gen_illegal_opcode(DisasContext
*s
)
1468 gen_exception(s
, EXCP06_ILLOP
);
1471 /* Generate #GP for the current instruction. */
1472 static void gen_exception_gpf(DisasContext
*s
)
1474 gen_exception(s
, EXCP0D_GPF
);
1477 /* Check for cpl == 0; if not, raise #GP and return false. */
1478 static bool check_cpl0(DisasContext
*s
)
1483 gen_exception_gpf(s
);
1487 /* If vm86, check for iopl == 3; if not, raise #GP and return false. */
1488 static bool check_vm86_iopl(DisasContext
*s
)
1490 if (!VM86(s
) || IOPL(s
) == 3) {
1493 gen_exception_gpf(s
);
1497 /* Check for iopl allowing access; if not, raise #GP and return false. */
1498 static bool check_iopl(DisasContext
*s
)
1500 if (VM86(s
) ? IOPL(s
) == 3 : CPL(s
) <= IOPL(s
)) {
1503 gen_exception_gpf(s
);
1507 /* if d == OR_TMP0, it means memory operand (address in A0) */
1508 static void gen_op(DisasContext
*s1
, int op
, MemOp ot
, int d
)
1510 /* Invalid lock prefix when destination is not memory or OP_CMPL. */
1511 if ((d
!= OR_TMP0
|| op
== OP_CMPL
) && s1
->prefix
& PREFIX_LOCK
) {
1512 gen_illegal_opcode(s1
);
1517 gen_op_mov_v_reg(s1
, ot
, s1
->T0
, d
);
1518 } else if (!(s1
->prefix
& PREFIX_LOCK
)) {
1519 gen_op_ld_v(s1
, ot
, s1
->T0
, s1
->A0
);
1523 gen_compute_eflags_c(s1
, s1
->tmp4
);
1524 if (s1
->prefix
& PREFIX_LOCK
) {
1525 tcg_gen_add_tl(s1
->T0
, s1
->tmp4
, s1
->T1
);
1526 tcg_gen_atomic_add_fetch_tl(s1
->T0
, s1
->A0
, s1
->T0
,
1527 s1
->mem_index
, ot
| MO_LE
);
1529 tcg_gen_add_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1530 tcg_gen_add_tl(s1
->T0
, s1
->T0
, s1
->tmp4
);
1531 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1533 gen_op_update3_cc(s1
, s1
->tmp4
);
1534 set_cc_op(s1
, CC_OP_ADCB
+ ot
);
1537 gen_compute_eflags_c(s1
, s1
->tmp4
);
1538 if (s1
->prefix
& PREFIX_LOCK
) {
1539 tcg_gen_add_tl(s1
->T0
, s1
->T1
, s1
->tmp4
);
1540 tcg_gen_neg_tl(s1
->T0
, s1
->T0
);
1541 tcg_gen_atomic_add_fetch_tl(s1
->T0
, s1
->A0
, s1
->T0
,
1542 s1
->mem_index
, ot
| MO_LE
);
1544 tcg_gen_sub_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1545 tcg_gen_sub_tl(s1
->T0
, s1
->T0
, s1
->tmp4
);
1546 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1548 gen_op_update3_cc(s1
, s1
->tmp4
);
1549 set_cc_op(s1
, CC_OP_SBBB
+ ot
);
1552 if (s1
->prefix
& PREFIX_LOCK
) {
1553 tcg_gen_atomic_add_fetch_tl(s1
->T0
, s1
->A0
, s1
->T1
,
1554 s1
->mem_index
, ot
| MO_LE
);
1556 tcg_gen_add_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1557 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1559 gen_op_update2_cc(s1
);
1560 set_cc_op(s1
, CC_OP_ADDB
+ ot
);
1563 if (s1
->prefix
& PREFIX_LOCK
) {
1564 tcg_gen_neg_tl(s1
->T0
, s1
->T1
);
1565 tcg_gen_atomic_fetch_add_tl(s1
->cc_srcT
, s1
->A0
, s1
->T0
,
1566 s1
->mem_index
, ot
| MO_LE
);
1567 tcg_gen_sub_tl(s1
->T0
, s1
->cc_srcT
, s1
->T1
);
1569 tcg_gen_mov_tl(s1
->cc_srcT
, s1
->T0
);
1570 tcg_gen_sub_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1571 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1573 gen_op_update2_cc(s1
);
1574 set_cc_op(s1
, CC_OP_SUBB
+ ot
);
1578 if (s1
->prefix
& PREFIX_LOCK
) {
1579 tcg_gen_atomic_and_fetch_tl(s1
->T0
, s1
->A0
, s1
->T1
,
1580 s1
->mem_index
, ot
| MO_LE
);
1582 tcg_gen_and_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1583 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1585 gen_op_update1_cc(s1
);
1586 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1589 if (s1
->prefix
& PREFIX_LOCK
) {
1590 tcg_gen_atomic_or_fetch_tl(s1
->T0
, s1
->A0
, s1
->T1
,
1591 s1
->mem_index
, ot
| MO_LE
);
1593 tcg_gen_or_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1594 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1596 gen_op_update1_cc(s1
);
1597 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1600 if (s1
->prefix
& PREFIX_LOCK
) {
1601 tcg_gen_atomic_xor_fetch_tl(s1
->T0
, s1
->A0
, s1
->T1
,
1602 s1
->mem_index
, ot
| MO_LE
);
1604 tcg_gen_xor_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1605 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1607 gen_op_update1_cc(s1
);
1608 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1611 tcg_gen_mov_tl(cpu_cc_src
, s1
->T1
);
1612 tcg_gen_mov_tl(s1
->cc_srcT
, s1
->T0
);
1613 tcg_gen_sub_tl(cpu_cc_dst
, s1
->T0
, s1
->T1
);
1614 set_cc_op(s1
, CC_OP_SUBB
+ ot
);
1619 /* if d == OR_TMP0, it means memory operand (address in A0) */
1620 static void gen_inc(DisasContext
*s1
, MemOp ot
, int d
, int c
)
1622 if (s1
->prefix
& PREFIX_LOCK
) {
1624 /* Lock prefix when destination is not memory */
1625 gen_illegal_opcode(s1
);
1628 tcg_gen_movi_tl(s1
->T0
, c
> 0 ? 1 : -1);
1629 tcg_gen_atomic_add_fetch_tl(s1
->T0
, s1
->A0
, s1
->T0
,
1630 s1
->mem_index
, ot
| MO_LE
);
1633 gen_op_mov_v_reg(s1
, ot
, s1
->T0
, d
);
1635 gen_op_ld_v(s1
, ot
, s1
->T0
, s1
->A0
);
1637 tcg_gen_addi_tl(s1
->T0
, s1
->T0
, (c
> 0 ? 1 : -1));
1638 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1641 gen_compute_eflags_c(s1
, cpu_cc_src
);
1642 tcg_gen_mov_tl(cpu_cc_dst
, s1
->T0
);
1643 set_cc_op(s1
, (c
> 0 ? CC_OP_INCB
: CC_OP_DECB
) + ot
);
1646 static void gen_shift_flags(DisasContext
*s
, MemOp ot
, TCGv result
,
1647 TCGv shm1
, TCGv count
, bool is_right
)
1649 TCGv_i32 z32
, s32
, oldop
;
1652 /* Store the results into the CC variables. If we know that the
1653 variable must be dead, store unconditionally. Otherwise we'll
1654 need to not disrupt the current contents. */
1655 z_tl
= tcg_constant_tl(0);
1656 if (cc_op_live
[s
->cc_op
] & USES_CC_DST
) {
1657 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_cc_dst
, count
, z_tl
,
1658 result
, cpu_cc_dst
);
1660 tcg_gen_mov_tl(cpu_cc_dst
, result
);
1662 if (cc_op_live
[s
->cc_op
] & USES_CC_SRC
) {
1663 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_cc_src
, count
, z_tl
,
1666 tcg_gen_mov_tl(cpu_cc_src
, shm1
);
1669 /* Get the two potential CC_OP values into temporaries. */
1670 tcg_gen_movi_i32(s
->tmp2_i32
, (is_right
? CC_OP_SARB
: CC_OP_SHLB
) + ot
);
1671 if (s
->cc_op
== CC_OP_DYNAMIC
) {
1674 tcg_gen_movi_i32(s
->tmp3_i32
, s
->cc_op
);
1675 oldop
= s
->tmp3_i32
;
1678 /* Conditionally store the CC_OP value. */
1679 z32
= tcg_constant_i32(0);
1680 s32
= tcg_temp_new_i32();
1681 tcg_gen_trunc_tl_i32(s32
, count
);
1682 tcg_gen_movcond_i32(TCG_COND_NE
, cpu_cc_op
, s32
, z32
, s
->tmp2_i32
, oldop
);
1684 /* The CC_OP value is no longer predictable. */
1685 set_cc_op(s
, CC_OP_DYNAMIC
);
1688 static void gen_shift_rm_T1(DisasContext
*s
, MemOp ot
, int op1
,
1689 int is_right
, int is_arith
)
1691 target_ulong mask
= (ot
== MO_64
? 0x3f : 0x1f);
1694 if (op1
== OR_TMP0
) {
1695 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1697 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1700 tcg_gen_andi_tl(s
->T1
, s
->T1
, mask
);
1701 tcg_gen_subi_tl(s
->tmp0
, s
->T1
, 1);
1705 gen_exts(ot
, s
->T0
);
1706 tcg_gen_sar_tl(s
->tmp0
, s
->T0
, s
->tmp0
);
1707 tcg_gen_sar_tl(s
->T0
, s
->T0
, s
->T1
);
1709 gen_extu(ot
, s
->T0
);
1710 tcg_gen_shr_tl(s
->tmp0
, s
->T0
, s
->tmp0
);
1711 tcg_gen_shr_tl(s
->T0
, s
->T0
, s
->T1
);
1714 tcg_gen_shl_tl(s
->tmp0
, s
->T0
, s
->tmp0
);
1715 tcg_gen_shl_tl(s
->T0
, s
->T0
, s
->T1
);
1719 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1721 gen_shift_flags(s
, ot
, s
->T0
, s
->tmp0
, s
->T1
, is_right
);
1724 static void gen_shift_rm_im(DisasContext
*s
, MemOp ot
, int op1
, int op2
,
1725 int is_right
, int is_arith
)
1727 int mask
= (ot
== MO_64
? 0x3f : 0x1f);
1731 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1733 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1739 gen_exts(ot
, s
->T0
);
1740 tcg_gen_sari_tl(s
->tmp4
, s
->T0
, op2
- 1);
1741 tcg_gen_sari_tl(s
->T0
, s
->T0
, op2
);
1743 gen_extu(ot
, s
->T0
);
1744 tcg_gen_shri_tl(s
->tmp4
, s
->T0
, op2
- 1);
1745 tcg_gen_shri_tl(s
->T0
, s
->T0
, op2
);
1748 tcg_gen_shli_tl(s
->tmp4
, s
->T0
, op2
- 1);
1749 tcg_gen_shli_tl(s
->T0
, s
->T0
, op2
);
1754 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1756 /* update eflags if non zero shift */
1758 tcg_gen_mov_tl(cpu_cc_src
, s
->tmp4
);
1759 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
1760 set_cc_op(s
, (is_right
? CC_OP_SARB
: CC_OP_SHLB
) + ot
);
1764 static void gen_rot_rm_T1(DisasContext
*s
, MemOp ot
, int op1
, int is_right
)
1766 target_ulong mask
= (ot
== MO_64
? 0x3f : 0x1f);
1770 if (op1
== OR_TMP0
) {
1771 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1773 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1776 tcg_gen_andi_tl(s
->T1
, s
->T1
, mask
);
1780 /* Replicate the 8-bit input so that a 32-bit rotate works. */
1781 tcg_gen_ext8u_tl(s
->T0
, s
->T0
);
1782 tcg_gen_muli_tl(s
->T0
, s
->T0
, 0x01010101);
1785 /* Replicate the 16-bit input so that a 32-bit rotate works. */
1786 tcg_gen_deposit_tl(s
->T0
, s
->T0
, s
->T0
, 16, 16);
1789 #ifdef TARGET_X86_64
1791 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
1792 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T1
);
1794 tcg_gen_rotr_i32(s
->tmp2_i32
, s
->tmp2_i32
, s
->tmp3_i32
);
1796 tcg_gen_rotl_i32(s
->tmp2_i32
, s
->tmp2_i32
, s
->tmp3_i32
);
1798 tcg_gen_extu_i32_tl(s
->T0
, s
->tmp2_i32
);
1803 tcg_gen_rotr_tl(s
->T0
, s
->T0
, s
->T1
);
1805 tcg_gen_rotl_tl(s
->T0
, s
->T0
, s
->T1
);
1811 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1813 /* We'll need the flags computed into CC_SRC. */
1814 gen_compute_eflags(s
);
1816 /* The value that was "rotated out" is now present at the other end
1817 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1818 since we've computed the flags into CC_SRC, these variables are
1821 tcg_gen_shri_tl(cpu_cc_src2
, s
->T0
, mask
- 1);
1822 tcg_gen_shri_tl(cpu_cc_dst
, s
->T0
, mask
);
1823 tcg_gen_andi_tl(cpu_cc_dst
, cpu_cc_dst
, 1);
1825 tcg_gen_shri_tl(cpu_cc_src2
, s
->T0
, mask
);
1826 tcg_gen_andi_tl(cpu_cc_dst
, s
->T0
, 1);
1828 tcg_gen_andi_tl(cpu_cc_src2
, cpu_cc_src2
, 1);
1829 tcg_gen_xor_tl(cpu_cc_src2
, cpu_cc_src2
, cpu_cc_dst
);
1831 /* Now conditionally store the new CC_OP value. If the shift count
1832 is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live.
1833 Otherwise reuse CC_OP_ADCOX which have the C and O flags split out
1834 exactly as we computed above. */
1835 t0
= tcg_constant_i32(0);
1836 t1
= tcg_temp_new_i32();
1837 tcg_gen_trunc_tl_i32(t1
, s
->T1
);
1838 tcg_gen_movi_i32(s
->tmp2_i32
, CC_OP_ADCOX
);
1839 tcg_gen_movi_i32(s
->tmp3_i32
, CC_OP_EFLAGS
);
1840 tcg_gen_movcond_i32(TCG_COND_NE
, cpu_cc_op
, t1
, t0
,
1841 s
->tmp2_i32
, s
->tmp3_i32
);
1843 /* The CC_OP value is no longer predictable. */
1844 set_cc_op(s
, CC_OP_DYNAMIC
);
1847 static void gen_rot_rm_im(DisasContext
*s
, MemOp ot
, int op1
, int op2
,
1850 int mask
= (ot
== MO_64
? 0x3f : 0x1f);
1854 if (op1
== OR_TMP0
) {
1855 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1857 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1863 #ifdef TARGET_X86_64
1865 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
1867 tcg_gen_rotri_i32(s
->tmp2_i32
, s
->tmp2_i32
, op2
);
1869 tcg_gen_rotli_i32(s
->tmp2_i32
, s
->tmp2_i32
, op2
);
1871 tcg_gen_extu_i32_tl(s
->T0
, s
->tmp2_i32
);
1876 tcg_gen_rotri_tl(s
->T0
, s
->T0
, op2
);
1878 tcg_gen_rotli_tl(s
->T0
, s
->T0
, op2
);
1889 shift
= mask
+ 1 - shift
;
1891 gen_extu(ot
, s
->T0
);
1892 tcg_gen_shli_tl(s
->tmp0
, s
->T0
, shift
);
1893 tcg_gen_shri_tl(s
->T0
, s
->T0
, mask
+ 1 - shift
);
1894 tcg_gen_or_tl(s
->T0
, s
->T0
, s
->tmp0
);
1900 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1903 /* Compute the flags into CC_SRC. */
1904 gen_compute_eflags(s
);
1906 /* The value that was "rotated out" is now present at the other end
1907 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1908 since we've computed the flags into CC_SRC, these variables are
1911 tcg_gen_shri_tl(cpu_cc_src2
, s
->T0
, mask
- 1);
1912 tcg_gen_shri_tl(cpu_cc_dst
, s
->T0
, mask
);
1913 tcg_gen_andi_tl(cpu_cc_dst
, cpu_cc_dst
, 1);
1915 tcg_gen_shri_tl(cpu_cc_src2
, s
->T0
, mask
);
1916 tcg_gen_andi_tl(cpu_cc_dst
, s
->T0
, 1);
1918 tcg_gen_andi_tl(cpu_cc_src2
, cpu_cc_src2
, 1);
1919 tcg_gen_xor_tl(cpu_cc_src2
, cpu_cc_src2
, cpu_cc_dst
);
1920 set_cc_op(s
, CC_OP_ADCOX
);
1924 /* XXX: add faster immediate = 1 case */
1925 static void gen_rotc_rm_T1(DisasContext
*s
, MemOp ot
, int op1
,
1928 gen_compute_eflags(s
);
1929 assert(s
->cc_op
== CC_OP_EFLAGS
);
1933 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1935 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1940 gen_helper_rcrb(s
->T0
, tcg_env
, s
->T0
, s
->T1
);
1943 gen_helper_rcrw(s
->T0
, tcg_env
, s
->T0
, s
->T1
);
1946 gen_helper_rcrl(s
->T0
, tcg_env
, s
->T0
, s
->T1
);
1948 #ifdef TARGET_X86_64
1950 gen_helper_rcrq(s
->T0
, tcg_env
, s
->T0
, s
->T1
);
1954 g_assert_not_reached();
1959 gen_helper_rclb(s
->T0
, tcg_env
, s
->T0
, s
->T1
);
1962 gen_helper_rclw(s
->T0
, tcg_env
, s
->T0
, s
->T1
);
1965 gen_helper_rcll(s
->T0
, tcg_env
, s
->T0
, s
->T1
);
1967 #ifdef TARGET_X86_64
1969 gen_helper_rclq(s
->T0
, tcg_env
, s
->T0
, s
->T1
);
1973 g_assert_not_reached();
1977 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1980 /* XXX: add faster immediate case */
1981 static void gen_shiftd_rm_T1(DisasContext
*s
, MemOp ot
, int op1
,
1982 bool is_right
, TCGv count_in
)
1984 target_ulong mask
= (ot
== MO_64
? 63 : 31);
1988 if (op1
== OR_TMP0
) {
1989 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1991 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1994 count
= tcg_temp_new();
1995 tcg_gen_andi_tl(count
, count_in
, mask
);
1999 /* Note: we implement the Intel behaviour for shift count > 16.
2000 This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A
2001 portion by constructing it as a 32-bit value. */
2003 tcg_gen_deposit_tl(s
->tmp0
, s
->T0
, s
->T1
, 16, 16);
2004 tcg_gen_mov_tl(s
->T1
, s
->T0
);
2005 tcg_gen_mov_tl(s
->T0
, s
->tmp0
);
2007 tcg_gen_deposit_tl(s
->T1
, s
->T0
, s
->T1
, 16, 16);
2010 * If TARGET_X86_64 defined then fall through into MO_32 case,
2011 * otherwise fall through default case.
2014 #ifdef TARGET_X86_64
2015 /* Concatenate the two 32-bit values and use a 64-bit shift. */
2016 tcg_gen_subi_tl(s
->tmp0
, count
, 1);
2018 tcg_gen_concat_tl_i64(s
->T0
, s
->T0
, s
->T1
);
2019 tcg_gen_shr_i64(s
->tmp0
, s
->T0
, s
->tmp0
);
2020 tcg_gen_shr_i64(s
->T0
, s
->T0
, count
);
2022 tcg_gen_concat_tl_i64(s
->T0
, s
->T1
, s
->T0
);
2023 tcg_gen_shl_i64(s
->tmp0
, s
->T0
, s
->tmp0
);
2024 tcg_gen_shl_i64(s
->T0
, s
->T0
, count
);
2025 tcg_gen_shri_i64(s
->tmp0
, s
->tmp0
, 32);
2026 tcg_gen_shri_i64(s
->T0
, s
->T0
, 32);
2031 tcg_gen_subi_tl(s
->tmp0
, count
, 1);
2033 tcg_gen_shr_tl(s
->tmp0
, s
->T0
, s
->tmp0
);
2035 tcg_gen_subfi_tl(s
->tmp4
, mask
+ 1, count
);
2036 tcg_gen_shr_tl(s
->T0
, s
->T0
, count
);
2037 tcg_gen_shl_tl(s
->T1
, s
->T1
, s
->tmp4
);
2039 tcg_gen_shl_tl(s
->tmp0
, s
->T0
, s
->tmp0
);
2041 /* Only needed if count > 16, for Intel behaviour. */
2042 tcg_gen_subfi_tl(s
->tmp4
, 33, count
);
2043 tcg_gen_shr_tl(s
->tmp4
, s
->T1
, s
->tmp4
);
2044 tcg_gen_or_tl(s
->tmp0
, s
->tmp0
, s
->tmp4
);
2047 tcg_gen_subfi_tl(s
->tmp4
, mask
+ 1, count
);
2048 tcg_gen_shl_tl(s
->T0
, s
->T0
, count
);
2049 tcg_gen_shr_tl(s
->T1
, s
->T1
, s
->tmp4
);
2051 tcg_gen_movi_tl(s
->tmp4
, 0);
2052 tcg_gen_movcond_tl(TCG_COND_EQ
, s
->T1
, count
, s
->tmp4
,
2054 tcg_gen_or_tl(s
->T0
, s
->T0
, s
->T1
);
2059 gen_op_st_rm_T0_A0(s
, ot
, op1
);
2061 gen_shift_flags(s
, ot
, s
->T0
, s
->tmp0
, count
, is_right
);
2064 static void gen_shift(DisasContext
*s1
, int op
, MemOp ot
, int d
, int s
)
2067 gen_op_mov_v_reg(s1
, ot
, s1
->T1
, s
);
2070 gen_rot_rm_T1(s1
, ot
, d
, 0);
2073 gen_rot_rm_T1(s1
, ot
, d
, 1);
2077 gen_shift_rm_T1(s1
, ot
, d
, 0, 0);
2080 gen_shift_rm_T1(s1
, ot
, d
, 1, 0);
2083 gen_shift_rm_T1(s1
, ot
, d
, 1, 1);
2086 gen_rotc_rm_T1(s1
, ot
, d
, 0);
2089 gen_rotc_rm_T1(s1
, ot
, d
, 1);
2094 static void gen_shifti(DisasContext
*s1
, int op
, MemOp ot
, int d
, int c
)
2098 gen_rot_rm_im(s1
, ot
, d
, c
, 0);
2101 gen_rot_rm_im(s1
, ot
, d
, c
, 1);
2105 gen_shift_rm_im(s1
, ot
, d
, c
, 0, 0);
2108 gen_shift_rm_im(s1
, ot
, d
, c
, 1, 0);
2111 gen_shift_rm_im(s1
, ot
, d
, c
, 1, 1);
2114 /* currently not optimized */
2115 tcg_gen_movi_tl(s1
->T1
, c
);
2116 gen_shift(s1
, op
, ot
, d
, OR_TMP1
);
2121 #define X86_MAX_INSN_LENGTH 15
2123 static uint64_t advance_pc(CPUX86State
*env
, DisasContext
*s
, int num_bytes
)
2125 uint64_t pc
= s
->pc
;
2127 /* This is a subsequent insn that crosses a page boundary. */
2128 if (s
->base
.num_insns
> 1 &&
2129 !is_same_page(&s
->base
, s
->pc
+ num_bytes
- 1)) {
2130 siglongjmp(s
->jmpbuf
, 2);
2134 if (unlikely(cur_insn_len(s
) > X86_MAX_INSN_LENGTH
)) {
2135 /* If the instruction's 16th byte is on a different page than the 1st, a
2136 * page fault on the second page wins over the general protection fault
2137 * caused by the instruction being too long.
2138 * This can happen even if the operand is only one byte long!
2140 if (((s
->pc
- 1) ^ (pc
- 1)) & TARGET_PAGE_MASK
) {
2141 volatile uint8_t unused
=
2142 cpu_ldub_code(env
, (s
->pc
- 1) & TARGET_PAGE_MASK
);
2145 siglongjmp(s
->jmpbuf
, 1);
2151 static inline uint8_t x86_ldub_code(CPUX86State
*env
, DisasContext
*s
)
2153 return translator_ldub(env
, &s
->base
, advance_pc(env
, s
, 1));
2156 static inline int16_t x86_ldsw_code(CPUX86State
*env
, DisasContext
*s
)
2158 return translator_lduw(env
, &s
->base
, advance_pc(env
, s
, 2));
2161 static inline uint16_t x86_lduw_code(CPUX86State
*env
, DisasContext
*s
)
2163 return translator_lduw(env
, &s
->base
, advance_pc(env
, s
, 2));
2166 static inline uint32_t x86_ldl_code(CPUX86State
*env
, DisasContext
*s
)
2168 return translator_ldl(env
, &s
->base
, advance_pc(env
, s
, 4));
2171 #ifdef TARGET_X86_64
2172 static inline uint64_t x86_ldq_code(CPUX86State
*env
, DisasContext
*s
)
2174 return translator_ldq(env
, &s
->base
, advance_pc(env
, s
, 8));
2178 /* Decompose an address. */
2180 typedef struct AddressParts
{
2188 static AddressParts
gen_lea_modrm_0(CPUX86State
*env
, DisasContext
*s
,
2191 int def_seg
, base
, index
, scale
, mod
, rm
;
2200 mod
= (modrm
>> 6) & 3;
2202 base
= rm
| REX_B(s
);
2205 /* Normally filtered out earlier, but including this path
2206 simplifies multi-byte nop, as well as bndcl, bndcu, bndcn. */
2215 int code
= x86_ldub_code(env
, s
);
2216 scale
= (code
>> 6) & 3;
2217 index
= ((code
>> 3) & 7) | REX_X(s
);
2219 index
= -1; /* no index */
2221 base
= (code
& 7) | REX_B(s
);
2227 if ((base
& 7) == 5) {
2229 disp
= (int32_t)x86_ldl_code(env
, s
);
2230 if (CODE64(s
) && !havesib
) {
2232 disp
+= s
->pc
+ s
->rip_offset
;
2237 disp
= (int8_t)x86_ldub_code(env
, s
);
2241 disp
= (int32_t)x86_ldl_code(env
, s
);
2245 /* For correct popl handling with esp. */
2246 if (base
== R_ESP
&& s
->popl_esp_hack
) {
2247 disp
+= s
->popl_esp_hack
;
2249 if (base
== R_EBP
|| base
== R_ESP
) {
2258 disp
= x86_lduw_code(env
, s
);
2261 } else if (mod
== 1) {
2262 disp
= (int8_t)x86_ldub_code(env
, s
);
2264 disp
= (int16_t)x86_lduw_code(env
, s
);
2304 g_assert_not_reached();
2308 return (AddressParts
){ def_seg
, base
, index
, scale
, disp
};
2311 /* Compute the address, with a minimum number of TCG ops. */
2312 static TCGv
gen_lea_modrm_1(DisasContext
*s
, AddressParts a
, bool is_vsib
)
2316 if (a
.index
>= 0 && !is_vsib
) {
2318 ea
= cpu_regs
[a
.index
];
2320 tcg_gen_shli_tl(s
->A0
, cpu_regs
[a
.index
], a
.scale
);
2324 tcg_gen_add_tl(s
->A0
, ea
, cpu_regs
[a
.base
]);
2327 } else if (a
.base
>= 0) {
2328 ea
= cpu_regs
[a
.base
];
2331 if (tb_cflags(s
->base
.tb
) & CF_PCREL
&& a
.base
== -2) {
2332 /* With cpu_eip ~= pc_save, the expression is pc-relative. */
2333 tcg_gen_addi_tl(s
->A0
, cpu_eip
, a
.disp
- s
->pc_save
);
2335 tcg_gen_movi_tl(s
->A0
, a
.disp
);
2338 } else if (a
.disp
!= 0) {
2339 tcg_gen_addi_tl(s
->A0
, ea
, a
.disp
);
2346 static void gen_lea_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
)
2348 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
2349 TCGv ea
= gen_lea_modrm_1(s
, a
, false);
2350 gen_lea_v_seg(s
, s
->aflag
, ea
, a
.def_seg
, s
->override
);
2353 static void gen_nop_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
)
2355 (void)gen_lea_modrm_0(env
, s
, modrm
);
2358 /* Used for BNDCL, BNDCU, BNDCN. */
2359 static void gen_bndck(CPUX86State
*env
, DisasContext
*s
, int modrm
,
2360 TCGCond cond
, TCGv_i64 bndv
)
2362 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
2363 TCGv ea
= gen_lea_modrm_1(s
, a
, false);
2365 tcg_gen_extu_tl_i64(s
->tmp1_i64
, ea
);
2367 tcg_gen_ext32u_i64(s
->tmp1_i64
, s
->tmp1_i64
);
2369 tcg_gen_setcond_i64(cond
, s
->tmp1_i64
, s
->tmp1_i64
, bndv
);
2370 tcg_gen_extrl_i64_i32(s
->tmp2_i32
, s
->tmp1_i64
);
2371 gen_helper_bndck(tcg_env
, s
->tmp2_i32
);
2374 /* used for LEA and MOV AX, mem */
2375 static void gen_add_A0_ds_seg(DisasContext
*s
)
2377 gen_lea_v_seg(s
, s
->aflag
, s
->A0
, R_DS
, s
->override
);
2380 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2382 static void gen_ldst_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
,
2383 MemOp ot
, int reg
, int is_store
)
2387 mod
= (modrm
>> 6) & 3;
2388 rm
= (modrm
& 7) | REX_B(s
);
2392 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
2393 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
2395 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
2397 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
2400 gen_lea_modrm(env
, s
, modrm
);
2403 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
2404 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
2406 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
2408 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
2413 static target_ulong
insn_get_addr(CPUX86State
*env
, DisasContext
*s
, MemOp ot
)
2419 ret
= x86_ldub_code(env
, s
);
2422 ret
= x86_lduw_code(env
, s
);
2425 ret
= x86_ldl_code(env
, s
);
2427 #ifdef TARGET_X86_64
2429 ret
= x86_ldq_code(env
, s
);
2433 g_assert_not_reached();
2438 static inline uint32_t insn_get(CPUX86State
*env
, DisasContext
*s
, MemOp ot
)
2444 ret
= x86_ldub_code(env
, s
);
2447 ret
= x86_lduw_code(env
, s
);
2450 #ifdef TARGET_X86_64
2453 ret
= x86_ldl_code(env
, s
);
2456 g_assert_not_reached();
2461 static target_long
insn_get_signed(CPUX86State
*env
, DisasContext
*s
, MemOp ot
)
2467 ret
= (int8_t) x86_ldub_code(env
, s
);
2470 ret
= (int16_t) x86_lduw_code(env
, s
);
2473 ret
= (int32_t) x86_ldl_code(env
, s
);
2475 #ifdef TARGET_X86_64
2477 ret
= x86_ldq_code(env
, s
);
2481 g_assert_not_reached();
2486 static inline int insn_const_size(MemOp ot
)
2495 static void gen_jcc(DisasContext
*s
, int b
, int diff
)
2497 TCGLabel
*l1
= gen_new_label();
2500 gen_jmp_rel_csize(s
, 0, 1);
2502 gen_jmp_rel(s
, s
->dflag
, diff
, 0);
2505 static void gen_cmovcc1(DisasContext
*s
, int b
, TCGv dest
, TCGv src
)
2507 CCPrepare cc
= gen_prepare_cc(s
, b
, s
->T1
);
2509 if (cc
.mask
!= -1) {
2510 TCGv t0
= tcg_temp_new();
2511 tcg_gen_andi_tl(t0
, cc
.reg
, cc
.mask
);
2515 cc
.reg2
= tcg_constant_tl(cc
.imm
);
2518 tcg_gen_movcond_tl(cc
.cond
, dest
, cc
.reg
, cc
.reg2
, src
, dest
);
2521 static inline void gen_op_movl_T0_seg(DisasContext
*s
, X86Seg seg_reg
)
2523 tcg_gen_ld32u_tl(s
->T0
, tcg_env
,
2524 offsetof(CPUX86State
,segs
[seg_reg
].selector
));
2527 static inline void gen_op_movl_seg_T0_vm(DisasContext
*s
, X86Seg seg_reg
)
2529 tcg_gen_ext16u_tl(s
->T0
, s
->T0
);
2530 tcg_gen_st32_tl(s
->T0
, tcg_env
,
2531 offsetof(CPUX86State
,segs
[seg_reg
].selector
));
2532 tcg_gen_shli_tl(cpu_seg_base
[seg_reg
], s
->T0
, 4);
2535 /* move T0 to seg_reg and compute if the CPU state may change. Never
2536 call this function with seg_reg == R_CS */
2537 static void gen_movl_seg_T0(DisasContext
*s
, X86Seg seg_reg
)
2539 if (PE(s
) && !VM86(s
)) {
2540 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
2541 gen_helper_load_seg(tcg_env
, tcg_constant_i32(seg_reg
), s
->tmp2_i32
);
2542 /* abort translation because the addseg value may change or
2543 because ss32 may change. For R_SS, translation must always
2544 stop as a special handling must be done to disable hardware
2545 interrupts for the next instruction */
2546 if (seg_reg
== R_SS
) {
2547 s
->base
.is_jmp
= DISAS_EOB_INHIBIT_IRQ
;
2548 } else if (CODE32(s
) && seg_reg
< R_FS
) {
2549 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
2552 gen_op_movl_seg_T0_vm(s
, seg_reg
);
2553 if (seg_reg
== R_SS
) {
2554 s
->base
.is_jmp
= DISAS_EOB_INHIBIT_IRQ
;
2559 static void gen_svm_check_intercept(DisasContext
*s
, uint32_t type
)
2561 /* no SVM activated; fast case */
2562 if (likely(!GUEST(s
))) {
2565 gen_helper_svm_check_intercept(tcg_env
, tcg_constant_i32(type
));
2568 static inline void gen_stack_update(DisasContext
*s
, int addend
)
2570 gen_op_add_reg_im(s
, mo_stacksize(s
), R_ESP
, addend
);
2573 /* Generate a push. It depends on ss32, addseg and dflag. */
2574 static void gen_push_v(DisasContext
*s
, TCGv val
)
2576 MemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2577 MemOp a_ot
= mo_stacksize(s
);
2578 int size
= 1 << d_ot
;
2579 TCGv new_esp
= s
->A0
;
2581 tcg_gen_subi_tl(s
->A0
, cpu_regs
[R_ESP
], size
);
2585 new_esp
= tcg_temp_new();
2586 tcg_gen_mov_tl(new_esp
, s
->A0
);
2588 gen_lea_v_seg(s
, a_ot
, s
->A0
, R_SS
, -1);
2591 gen_op_st_v(s
, d_ot
, val
, s
->A0
);
2592 gen_op_mov_reg_v(s
, a_ot
, R_ESP
, new_esp
);
2595 /* two step pop is necessary for precise exceptions */
2596 static MemOp
gen_pop_T0(DisasContext
*s
)
2598 MemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2600 gen_lea_v_seg_dest(s
, mo_stacksize(s
), s
->T0
, cpu_regs
[R_ESP
], R_SS
, -1);
2601 gen_op_ld_v(s
, d_ot
, s
->T0
, s
->T0
);
2606 static inline void gen_pop_update(DisasContext
*s
, MemOp ot
)
2608 gen_stack_update(s
, 1 << ot
);
2611 static inline void gen_stack_A0(DisasContext
*s
)
2613 gen_lea_v_seg(s
, SS32(s
) ? MO_32
: MO_16
, cpu_regs
[R_ESP
], R_SS
, -1);
2616 static void gen_pusha(DisasContext
*s
)
2618 MemOp s_ot
= SS32(s
) ? MO_32
: MO_16
;
2619 MemOp d_ot
= s
->dflag
;
2620 int size
= 1 << d_ot
;
2623 for (i
= 0; i
< 8; i
++) {
2624 tcg_gen_addi_tl(s
->A0
, cpu_regs
[R_ESP
], (i
- 8) * size
);
2625 gen_lea_v_seg(s
, s_ot
, s
->A0
, R_SS
, -1);
2626 gen_op_st_v(s
, d_ot
, cpu_regs
[7 - i
], s
->A0
);
2629 gen_stack_update(s
, -8 * size
);
2632 static void gen_popa(DisasContext
*s
)
2634 MemOp s_ot
= SS32(s
) ? MO_32
: MO_16
;
2635 MemOp d_ot
= s
->dflag
;
2636 int size
= 1 << d_ot
;
2639 for (i
= 0; i
< 8; i
++) {
2640 /* ESP is not reloaded */
2641 if (7 - i
== R_ESP
) {
2644 tcg_gen_addi_tl(s
->A0
, cpu_regs
[R_ESP
], i
* size
);
2645 gen_lea_v_seg(s
, s_ot
, s
->A0
, R_SS
, -1);
2646 gen_op_ld_v(s
, d_ot
, s
->T0
, s
->A0
);
2647 gen_op_mov_reg_v(s
, d_ot
, 7 - i
, s
->T0
);
2650 gen_stack_update(s
, 8 * size
);
2653 static void gen_enter(DisasContext
*s
, int esp_addend
, int level
)
2655 MemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2656 MemOp a_ot
= CODE64(s
) ? MO_64
: SS32(s
) ? MO_32
: MO_16
;
2657 int size
= 1 << d_ot
;
2659 /* Push BP; compute FrameTemp into T1. */
2660 tcg_gen_subi_tl(s
->T1
, cpu_regs
[R_ESP
], size
);
2661 gen_lea_v_seg(s
, a_ot
, s
->T1
, R_SS
, -1);
2662 gen_op_st_v(s
, d_ot
, cpu_regs
[R_EBP
], s
->A0
);
2668 /* Copy level-1 pointers from the previous frame. */
2669 for (i
= 1; i
< level
; ++i
) {
2670 tcg_gen_subi_tl(s
->A0
, cpu_regs
[R_EBP
], size
* i
);
2671 gen_lea_v_seg(s
, a_ot
, s
->A0
, R_SS
, -1);
2672 gen_op_ld_v(s
, d_ot
, s
->tmp0
, s
->A0
);
2674 tcg_gen_subi_tl(s
->A0
, s
->T1
, size
* i
);
2675 gen_lea_v_seg(s
, a_ot
, s
->A0
, R_SS
, -1);
2676 gen_op_st_v(s
, d_ot
, s
->tmp0
, s
->A0
);
2679 /* Push the current FrameTemp as the last level. */
2680 tcg_gen_subi_tl(s
->A0
, s
->T1
, size
* level
);
2681 gen_lea_v_seg(s
, a_ot
, s
->A0
, R_SS
, -1);
2682 gen_op_st_v(s
, d_ot
, s
->T1
, s
->A0
);
2685 /* Copy the FrameTemp value to EBP. */
2686 gen_op_mov_reg_v(s
, a_ot
, R_EBP
, s
->T1
);
2688 /* Compute the final value of ESP. */
2689 tcg_gen_subi_tl(s
->T1
, s
->T1
, esp_addend
+ size
* level
);
2690 gen_op_mov_reg_v(s
, a_ot
, R_ESP
, s
->T1
);
2693 static void gen_leave(DisasContext
*s
)
2695 MemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2696 MemOp a_ot
= mo_stacksize(s
);
2698 gen_lea_v_seg(s
, a_ot
, cpu_regs
[R_EBP
], R_SS
, -1);
2699 gen_op_ld_v(s
, d_ot
, s
->T0
, s
->A0
);
2701 tcg_gen_addi_tl(s
->T1
, cpu_regs
[R_EBP
], 1 << d_ot
);
2703 gen_op_mov_reg_v(s
, d_ot
, R_EBP
, s
->T0
);
2704 gen_op_mov_reg_v(s
, a_ot
, R_ESP
, s
->T1
);
2707 /* Similarly, except that the assumption here is that we don't decode
2708 the instruction at all -- either a missing opcode, an unimplemented
2709 feature, or just a bogus instruction stream. */
2710 static void gen_unknown_opcode(CPUX86State
*env
, DisasContext
*s
)
2712 gen_illegal_opcode(s
);
2714 if (qemu_loglevel_mask(LOG_UNIMP
)) {
2715 FILE *logfile
= qemu_log_trylock();
2717 target_ulong pc
= s
->base
.pc_next
, end
= s
->pc
;
2719 fprintf(logfile
, "ILLOPC: " TARGET_FMT_lx
":", pc
);
2720 for (; pc
< end
; ++pc
) {
2721 fprintf(logfile
, " %02x", cpu_ldub_code(env
, pc
));
2723 fprintf(logfile
, "\n");
2724 qemu_log_unlock(logfile
);
2729 /* an interrupt is different from an exception because of the
2731 static void gen_interrupt(DisasContext
*s
, int intno
)
2733 gen_update_cc_op(s
);
2734 gen_update_eip_cur(s
);
2735 gen_helper_raise_interrupt(tcg_env
, tcg_constant_i32(intno
),
2736 cur_insn_len_i32(s
));
2737 s
->base
.is_jmp
= DISAS_NORETURN
;
2740 static void gen_set_hflag(DisasContext
*s
, uint32_t mask
)
2742 if ((s
->flags
& mask
) == 0) {
2743 TCGv_i32 t
= tcg_temp_new_i32();
2744 tcg_gen_ld_i32(t
, tcg_env
, offsetof(CPUX86State
, hflags
));
2745 tcg_gen_ori_i32(t
, t
, mask
);
2746 tcg_gen_st_i32(t
, tcg_env
, offsetof(CPUX86State
, hflags
));
2751 static void gen_reset_hflag(DisasContext
*s
, uint32_t mask
)
2753 if (s
->flags
& mask
) {
2754 TCGv_i32 t
= tcg_temp_new_i32();
2755 tcg_gen_ld_i32(t
, tcg_env
, offsetof(CPUX86State
, hflags
));
2756 tcg_gen_andi_i32(t
, t
, ~mask
);
2757 tcg_gen_st_i32(t
, tcg_env
, offsetof(CPUX86State
, hflags
));
2762 static void gen_set_eflags(DisasContext
*s
, target_ulong mask
)
2764 TCGv t
= tcg_temp_new();
2766 tcg_gen_ld_tl(t
, tcg_env
, offsetof(CPUX86State
, eflags
));
2767 tcg_gen_ori_tl(t
, t
, mask
);
2768 tcg_gen_st_tl(t
, tcg_env
, offsetof(CPUX86State
, eflags
));
2771 static void gen_reset_eflags(DisasContext
*s
, target_ulong mask
)
2773 TCGv t
= tcg_temp_new();
2775 tcg_gen_ld_tl(t
, tcg_env
, offsetof(CPUX86State
, eflags
));
2776 tcg_gen_andi_tl(t
, t
, ~mask
);
2777 tcg_gen_st_tl(t
, tcg_env
, offsetof(CPUX86State
, eflags
));
2780 /* Clear BND registers during legacy branches. */
2781 static void gen_bnd_jmp(DisasContext
*s
)
2783 /* Clear the registers only if BND prefix is missing, MPX is enabled,
2784 and if the BNDREGs are known to be in use (non-zero) already.
2785 The helper itself will check BNDPRESERVE at runtime. */
2786 if ((s
->prefix
& PREFIX_REPNZ
) == 0
2787 && (s
->flags
& HF_MPX_EN_MASK
) != 0
2788 && (s
->flags
& HF_MPX_IU_MASK
) != 0) {
2789 gen_helper_bnd_jmp(tcg_env
);
2793 /* Generate an end of block. Trace exception is also generated if needed.
2794 If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set.
2795 If RECHECK_TF, emit a rechecking helper for #DB, ignoring the state of
2796 S->TF. This is used by the syscall/sysret insns. */
2798 do_gen_eob_worker(DisasContext
*s
, bool inhibit
, bool recheck_tf
, bool jr
)
2800 gen_update_cc_op(s
);
2802 /* If several instructions disable interrupts, only the first does it. */
2803 if (inhibit
&& !(s
->flags
& HF_INHIBIT_IRQ_MASK
)) {
2804 gen_set_hflag(s
, HF_INHIBIT_IRQ_MASK
);
2806 gen_reset_hflag(s
, HF_INHIBIT_IRQ_MASK
);
2809 if (s
->base
.tb
->flags
& HF_RF_MASK
) {
2810 gen_reset_eflags(s
, RF_MASK
);
2813 gen_helper_rechecking_single_step(tcg_env
);
2814 tcg_gen_exit_tb(NULL
, 0);
2815 } else if (s
->flags
& HF_TF_MASK
) {
2816 gen_helper_single_step(tcg_env
);
2818 tcg_gen_lookup_and_goto_ptr();
2820 tcg_gen_exit_tb(NULL
, 0);
2822 s
->base
.is_jmp
= DISAS_NORETURN
;
2826 gen_eob_worker(DisasContext
*s
, bool inhibit
, bool recheck_tf
)
2828 do_gen_eob_worker(s
, inhibit
, recheck_tf
, false);
2832 If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set. */
2833 static void gen_eob_inhibit_irq(DisasContext
*s
, bool inhibit
)
2835 gen_eob_worker(s
, inhibit
, false);
2838 /* End of block, resetting the inhibit irq flag. */
2839 static void gen_eob(DisasContext
*s
)
2841 gen_eob_worker(s
, false, false);
2844 /* Jump to register */
2845 static void gen_jr(DisasContext
*s
)
2847 do_gen_eob_worker(s
, false, false, true);
2850 /* Jump to eip+diff, truncating the result to OT. */
2851 static void gen_jmp_rel(DisasContext
*s
, MemOp ot
, int diff
, int tb_num
)
2853 bool use_goto_tb
= s
->jmp_opt
;
2854 target_ulong mask
= -1;
2855 target_ulong new_pc
= s
->pc
+ diff
;
2856 target_ulong new_eip
= new_pc
- s
->cs_base
;
2858 /* In 64-bit mode, operand size is fixed at 64 bits. */
2862 if (tb_cflags(s
->base
.tb
) & CF_PCREL
&& CODE32(s
)) {
2863 use_goto_tb
= false;
2871 gen_update_cc_op(s
);
2872 set_cc_op(s
, CC_OP_DYNAMIC
);
2874 if (tb_cflags(s
->base
.tb
) & CF_PCREL
) {
2875 tcg_gen_addi_tl(cpu_eip
, cpu_eip
, new_pc
- s
->pc_save
);
2877 * If we can prove the branch does not leave the page and we have
2878 * no extra masking to apply (data16 branch in code32, see above),
2879 * then we have also proven that the addition does not wrap.
2881 if (!use_goto_tb
|| !is_same_page(&s
->base
, new_pc
)) {
2882 tcg_gen_andi_tl(cpu_eip
, cpu_eip
, mask
);
2883 use_goto_tb
= false;
2885 } else if (!CODE64(s
)) {
2886 new_pc
= (uint32_t)(new_eip
+ s
->cs_base
);
2889 if (use_goto_tb
&& translator_use_goto_tb(&s
->base
, new_pc
)) {
2890 /* jump to same page: we can use a direct jump */
2891 tcg_gen_goto_tb(tb_num
);
2892 if (!(tb_cflags(s
->base
.tb
) & CF_PCREL
)) {
2893 tcg_gen_movi_tl(cpu_eip
, new_eip
);
2895 tcg_gen_exit_tb(s
->base
.tb
, tb_num
);
2896 s
->base
.is_jmp
= DISAS_NORETURN
;
2898 if (!(tb_cflags(s
->base
.tb
) & CF_PCREL
)) {
2899 tcg_gen_movi_tl(cpu_eip
, new_eip
);
2902 gen_jr(s
); /* jump to another page */
2904 gen_eob(s
); /* exit to main loop */
2909 /* Jump to eip+diff, truncating to the current code size. */
2910 static void gen_jmp_rel_csize(DisasContext
*s
, int diff
, int tb_num
)
2912 /* CODE64 ignores the OT argument, so we need not consider it. */
2913 gen_jmp_rel(s
, CODE32(s
) ? MO_32
: MO_16
, diff
, tb_num
);
2916 static inline void gen_ldq_env_A0(DisasContext
*s
, int offset
)
2918 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
, s
->mem_index
, MO_LEUQ
);
2919 tcg_gen_st_i64(s
->tmp1_i64
, tcg_env
, offset
);
2922 static inline void gen_stq_env_A0(DisasContext
*s
, int offset
)
2924 tcg_gen_ld_i64(s
->tmp1_i64
, tcg_env
, offset
);
2925 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
, s
->mem_index
, MO_LEUQ
);
2928 static inline void gen_ldo_env_A0(DisasContext
*s
, int offset
, bool align
)
2930 MemOp atom
= (s
->cpuid_ext_features
& CPUID_EXT_AVX
2931 ? MO_ATOM_IFALIGN
: MO_ATOM_IFALIGN_PAIR
);
2932 MemOp mop
= MO_128
| MO_LE
| atom
| (align
? MO_ALIGN_16
: 0);
2933 int mem_index
= s
->mem_index
;
2934 TCGv_i128 t
= tcg_temp_new_i128();
2936 tcg_gen_qemu_ld_i128(t
, s
->A0
, mem_index
, mop
);
2937 tcg_gen_st_i128(t
, tcg_env
, offset
);
2940 static inline void gen_sto_env_A0(DisasContext
*s
, int offset
, bool align
)
2942 MemOp atom
= (s
->cpuid_ext_features
& CPUID_EXT_AVX
2943 ? MO_ATOM_IFALIGN
: MO_ATOM_IFALIGN_PAIR
);
2944 MemOp mop
= MO_128
| MO_LE
| atom
| (align
? MO_ALIGN_16
: 0);
2945 int mem_index
= s
->mem_index
;
2946 TCGv_i128 t
= tcg_temp_new_i128();
2948 tcg_gen_ld_i128(t
, tcg_env
, offset
);
2949 tcg_gen_qemu_st_i128(t
, s
->A0
, mem_index
, mop
);
2952 static void gen_ldy_env_A0(DisasContext
*s
, int offset
, bool align
)
2954 MemOp mop
= MO_128
| MO_LE
| MO_ATOM_IFALIGN_PAIR
;
2955 int mem_index
= s
->mem_index
;
2956 TCGv_i128 t0
= tcg_temp_new_i128();
2957 TCGv_i128 t1
= tcg_temp_new_i128();
2959 tcg_gen_qemu_ld_i128(t0
, s
->A0
, mem_index
, mop
| (align
? MO_ALIGN_32
: 0));
2960 tcg_gen_addi_tl(s
->tmp0
, s
->A0
, 16);
2961 tcg_gen_qemu_ld_i128(t1
, s
->tmp0
, mem_index
, mop
);
2963 tcg_gen_st_i128(t0
, tcg_env
, offset
+ offsetof(YMMReg
, YMM_X(0)));
2964 tcg_gen_st_i128(t1
, tcg_env
, offset
+ offsetof(YMMReg
, YMM_X(1)));
2967 static void gen_sty_env_A0(DisasContext
*s
, int offset
, bool align
)
2969 MemOp mop
= MO_128
| MO_LE
| MO_ATOM_IFALIGN_PAIR
;
2970 int mem_index
= s
->mem_index
;
2971 TCGv_i128 t
= tcg_temp_new_i128();
2973 tcg_gen_ld_i128(t
, tcg_env
, offset
+ offsetof(YMMReg
, YMM_X(0)));
2974 tcg_gen_qemu_st_i128(t
, s
->A0
, mem_index
, mop
| (align
? MO_ALIGN_32
: 0));
2975 tcg_gen_addi_tl(s
->tmp0
, s
->A0
, 16);
2976 tcg_gen_ld_i128(t
, tcg_env
, offset
+ offsetof(YMMReg
, YMM_X(1)));
2977 tcg_gen_qemu_st_i128(t
, s
->tmp0
, mem_index
, mop
);
2980 #include "decode-new.h"
2981 #include "emit.c.inc"
2982 #include "decode-new.c.inc"
2984 static void gen_cmpxchg8b(DisasContext
*s
, CPUX86State
*env
, int modrm
)
2986 TCGv_i64 cmp
, val
, old
;
2989 gen_lea_modrm(env
, s
, modrm
);
2991 cmp
= tcg_temp_new_i64();
2992 val
= tcg_temp_new_i64();
2993 old
= tcg_temp_new_i64();
2995 /* Construct the comparison values from the register pair. */
2996 tcg_gen_concat_tl_i64(cmp
, cpu_regs
[R_EAX
], cpu_regs
[R_EDX
]);
2997 tcg_gen_concat_tl_i64(val
, cpu_regs
[R_EBX
], cpu_regs
[R_ECX
]);
2999 /* Only require atomic with LOCK; non-parallel handled in generator. */
3000 if (s
->prefix
& PREFIX_LOCK
) {
3001 tcg_gen_atomic_cmpxchg_i64(old
, s
->A0
, cmp
, val
, s
->mem_index
, MO_TEUQ
);
3003 tcg_gen_nonatomic_cmpxchg_i64(old
, s
->A0
, cmp
, val
,
3004 s
->mem_index
, MO_TEUQ
);
3007 /* Set tmp0 to match the required value of Z. */
3008 tcg_gen_setcond_i64(TCG_COND_EQ
, cmp
, old
, cmp
);
3010 tcg_gen_trunc_i64_tl(Z
, cmp
);
3013 * Extract the result values for the register pair.
3014 * For 32-bit, we may do this unconditionally, because on success (Z=1),
3015 * the old value matches the previous value in EDX:EAX. For x86_64,
3016 * the store must be conditional, because we must leave the source
3017 * registers unchanged on success, and zero-extend the writeback
3020 if (TARGET_LONG_BITS
== 32) {
3021 tcg_gen_extr_i64_tl(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
], old
);
3023 TCGv zero
= tcg_constant_tl(0);
3025 tcg_gen_extr_i64_tl(s
->T0
, s
->T1
, old
);
3026 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_regs
[R_EAX
], Z
, zero
,
3027 s
->T0
, cpu_regs
[R_EAX
]);
3028 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_regs
[R_EDX
], Z
, zero
,
3029 s
->T1
, cpu_regs
[R_EDX
]);
3033 gen_compute_eflags(s
);
3034 tcg_gen_deposit_tl(cpu_cc_src
, cpu_cc_src
, Z
, ctz32(CC_Z
), 1);
3037 #ifdef TARGET_X86_64
3038 static void gen_cmpxchg16b(DisasContext
*s
, CPUX86State
*env
, int modrm
)
3040 MemOp mop
= MO_TE
| MO_128
| MO_ALIGN
;
3044 gen_lea_modrm(env
, s
, modrm
);
3046 cmp
= tcg_temp_new_i128();
3047 val
= tcg_temp_new_i128();
3048 tcg_gen_concat_i64_i128(cmp
, cpu_regs
[R_EAX
], cpu_regs
[R_EDX
]);
3049 tcg_gen_concat_i64_i128(val
, cpu_regs
[R_EBX
], cpu_regs
[R_ECX
]);
3051 /* Only require atomic with LOCK; non-parallel handled in generator. */
3052 if (s
->prefix
& PREFIX_LOCK
) {
3053 tcg_gen_atomic_cmpxchg_i128(val
, s
->A0
, cmp
, val
, s
->mem_index
, mop
);
3055 tcg_gen_nonatomic_cmpxchg_i128(val
, s
->A0
, cmp
, val
, s
->mem_index
, mop
);
3058 tcg_gen_extr_i128_i64(s
->T0
, s
->T1
, val
);
3060 /* Determine success after the fact. */
3061 t0
= tcg_temp_new_i64();
3062 t1
= tcg_temp_new_i64();
3063 tcg_gen_xor_i64(t0
, s
->T0
, cpu_regs
[R_EAX
]);
3064 tcg_gen_xor_i64(t1
, s
->T1
, cpu_regs
[R_EDX
]);
3065 tcg_gen_or_i64(t0
, t0
, t1
);
3068 gen_compute_eflags(s
);
3069 tcg_gen_setcondi_i64(TCG_COND_EQ
, t0
, t0
, 0);
3070 tcg_gen_deposit_tl(cpu_cc_src
, cpu_cc_src
, t0
, ctz32(CC_Z
), 1);
3073 * Extract the result values for the register pair. We may do this
3074 * unconditionally, because on success (Z=1), the old value matches
3075 * the previous value in RDX:RAX.
3077 tcg_gen_mov_i64(cpu_regs
[R_EAX
], s
->T0
);
3078 tcg_gen_mov_i64(cpu_regs
[R_EDX
], s
->T1
);
3082 /* convert one instruction. s->base.is_jmp is set if the translation must
3083 be stopped. Return the next pc value */
3084 static bool disas_insn(DisasContext
*s
, CPUState
*cpu
)
3086 CPUX86State
*env
= cpu_env(cpu
);
3089 MemOp ot
, aflag
, dflag
;
3090 int modrm
, reg
, rm
, mod
, op
, opreg
, val
;
3091 bool orig_cc_op_dirty
= s
->cc_op_dirty
;
3092 CCOp orig_cc_op
= s
->cc_op
;
3093 target_ulong orig_pc_save
= s
->pc_save
;
3095 s
->pc
= s
->base
.pc_next
;
3097 #ifdef TARGET_X86_64
3102 s
->rip_offset
= 0; /* for relative ip address */
3106 switch (sigsetjmp(s
->jmpbuf
, 0)) {
3110 gen_exception_gpf(s
);
3113 /* Restore state that may affect the next instruction. */
3114 s
->pc
= s
->base
.pc_next
;
3116 * TODO: These save/restore can be removed after the table-based
3117 * decoder is complete; we will be decoding the insn completely
3118 * before any code generation that might affect these variables.
3120 s
->cc_op_dirty
= orig_cc_op_dirty
;
3121 s
->cc_op
= orig_cc_op
;
3122 s
->pc_save
= orig_pc_save
;
3124 s
->base
.num_insns
--;
3125 tcg_remove_ops_after(s
->prev_insn_end
);
3126 s
->base
.is_jmp
= DISAS_TOO_MANY
;
3129 g_assert_not_reached();
3135 s
->prefix
= prefixes
;
3136 b
= x86_ldub_code(env
, s
);
3137 /* Collect prefixes. */
3142 b
= x86_ldub_code(env
, s
) + 0x100;
3145 prefixes
|= PREFIX_REPZ
;
3146 prefixes
&= ~PREFIX_REPNZ
;
3149 prefixes
|= PREFIX_REPNZ
;
3150 prefixes
&= ~PREFIX_REPZ
;
3153 prefixes
|= PREFIX_LOCK
;
3174 prefixes
|= PREFIX_DATA
;
3177 prefixes
|= PREFIX_ADR
;
3179 #ifdef TARGET_X86_64
3183 prefixes
|= PREFIX_REX
;
3184 s
->vex_w
= (b
>> 3) & 1;
3185 s
->rex_r
= (b
& 0x4) << 1;
3186 s
->rex_x
= (b
& 0x2) << 2;
3187 s
->rex_b
= (b
& 0x1) << 3;
3192 case 0xc5: /* 2-byte VEX */
3193 case 0xc4: /* 3-byte VEX */
3194 if (CODE32(s
) && !VM86(s
)) {
3195 int vex2
= x86_ldub_code(env
, s
);
3196 s
->pc
--; /* rewind the advance_pc() x86_ldub_code() did */
3198 if (!CODE64(s
) && (vex2
& 0xc0) != 0xc0) {
3199 /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
3200 otherwise the instruction is LES or LDS. */
3203 disas_insn_new(s
, cpu
, b
);
3209 /* Post-process prefixes. */
3211 /* In 64-bit mode, the default data size is 32-bit. Select 64-bit
3212 data with rex_w, and 16-bit data with 0x66; rex_w takes precedence
3213 over 0x66 if both are present. */
3214 dflag
= (REX_W(s
) ? MO_64
: prefixes
& PREFIX_DATA
? MO_16
: MO_32
);
3215 /* In 64-bit mode, 0x67 selects 32-bit addressing. */
3216 aflag
= (prefixes
& PREFIX_ADR
? MO_32
: MO_64
);
3218 /* In 16/32-bit mode, 0x66 selects the opposite data size. */
3219 if (CODE32(s
) ^ ((prefixes
& PREFIX_DATA
) != 0)) {
3224 /* In 16/32-bit mode, 0x67 selects the opposite addressing. */
3225 if (CODE32(s
) ^ ((prefixes
& PREFIX_ADR
) != 0)) {
3232 s
->prefix
= prefixes
;
3236 /* now check op code */
3238 /**************************/
3253 ot
= mo_b_d(b
, dflag
);
3256 case 0: /* OP Ev, Gv */
3257 modrm
= x86_ldub_code(env
, s
);
3258 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
3259 mod
= (modrm
>> 6) & 3;
3260 rm
= (modrm
& 7) | REX_B(s
);
3262 gen_lea_modrm(env
, s
, modrm
);
3264 } else if (op
== OP_XORL
&& rm
== reg
) {
3266 /* xor reg, reg optimisation */
3267 set_cc_op(s
, CC_OP_CLR
);
3268 tcg_gen_movi_tl(s
->T0
, 0);
3269 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
3274 gen_op_mov_v_reg(s
, ot
, s
->T1
, reg
);
3275 gen_op(s
, op
, ot
, opreg
);
3277 case 1: /* OP Gv, Ev */
3278 modrm
= x86_ldub_code(env
, s
);
3279 mod
= (modrm
>> 6) & 3;
3280 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
3281 rm
= (modrm
& 7) | REX_B(s
);
3283 gen_lea_modrm(env
, s
, modrm
);
3284 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
3285 } else if (op
== OP_XORL
&& rm
== reg
) {
3288 gen_op_mov_v_reg(s
, ot
, s
->T1
, rm
);
3290 gen_op(s
, op
, ot
, reg
);
3292 case 2: /* OP A, Iv */
3293 val
= insn_get(env
, s
, ot
);
3294 tcg_gen_movi_tl(s
->T1
, val
);
3295 gen_op(s
, op
, ot
, OR_EAX
);
3305 case 0x80: /* GRP1 */
3309 ot
= mo_b_d(b
, dflag
);
3311 modrm
= x86_ldub_code(env
, s
);
3312 mod
= (modrm
>> 6) & 3;
3313 rm
= (modrm
& 7) | REX_B(s
);
3314 op
= (modrm
>> 3) & 7;
3320 s
->rip_offset
= insn_const_size(ot
);
3321 gen_lea_modrm(env
, s
, modrm
);
3332 val
= insn_get(env
, s
, ot
);
3335 val
= (int8_t)insn_get(env
, s
, MO_8
);
3338 tcg_gen_movi_tl(s
->T1
, val
);
3339 gen_op(s
, op
, ot
, opreg
);
3343 /**************************/
3344 /* inc, dec, and other misc arith */
3345 case 0x40 ... 0x47: /* inc Gv */
3347 gen_inc(s
, ot
, OR_EAX
+ (b
& 7), 1);
3349 case 0x48 ... 0x4f: /* dec Gv */
3351 gen_inc(s
, ot
, OR_EAX
+ (b
& 7), -1);
3353 case 0xf6: /* GRP3 */
3355 ot
= mo_b_d(b
, dflag
);
3357 modrm
= x86_ldub_code(env
, s
);
3358 mod
= (modrm
>> 6) & 3;
3359 rm
= (modrm
& 7) | REX_B(s
);
3360 op
= (modrm
>> 3) & 7;
3363 s
->rip_offset
= insn_const_size(ot
);
3365 gen_lea_modrm(env
, s
, modrm
);
3366 /* For those below that handle locked memory, don't load here. */
3367 if (!(s
->prefix
& PREFIX_LOCK
)
3369 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
3372 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
3377 val
= insn_get(env
, s
, ot
);
3378 tcg_gen_movi_tl(s
->T1
, val
);
3379 gen_op_testl_T0_T1_cc(s
);
3380 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
3383 if (s
->prefix
& PREFIX_LOCK
) {
3387 tcg_gen_movi_tl(s
->T0
, ~0);
3388 tcg_gen_atomic_xor_fetch_tl(s
->T0
, s
->A0
, s
->T0
,
3389 s
->mem_index
, ot
| MO_LE
);
3391 tcg_gen_not_tl(s
->T0
, s
->T0
);
3393 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
3395 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
3400 if (s
->prefix
& PREFIX_LOCK
) {
3402 TCGv a0
, t0
, t1
, t2
;
3409 label1
= gen_new_label();
3411 gen_set_label(label1
);
3412 t1
= tcg_temp_new();
3413 t2
= tcg_temp_new();
3414 tcg_gen_mov_tl(t2
, t0
);
3415 tcg_gen_neg_tl(t1
, t0
);
3416 tcg_gen_atomic_cmpxchg_tl(t0
, a0
, t0
, t1
,
3417 s
->mem_index
, ot
| MO_LE
);
3418 tcg_gen_brcond_tl(TCG_COND_NE
, t0
, t2
, label1
);
3420 tcg_gen_neg_tl(s
->T0
, t0
);
3422 tcg_gen_neg_tl(s
->T0
, s
->T0
);
3424 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
3426 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
3429 gen_op_update_neg_cc(s
);
3430 set_cc_op(s
, CC_OP_SUBB
+ ot
);
3435 gen_op_mov_v_reg(s
, MO_8
, s
->T1
, R_EAX
);
3436 tcg_gen_ext8u_tl(s
->T0
, s
->T0
);
3437 tcg_gen_ext8u_tl(s
->T1
, s
->T1
);
3438 /* XXX: use 32 bit mul which could be faster */
3439 tcg_gen_mul_tl(s
->T0
, s
->T0
, s
->T1
);
3440 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
3441 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
3442 tcg_gen_andi_tl(cpu_cc_src
, s
->T0
, 0xff00);
3443 set_cc_op(s
, CC_OP_MULB
);
3446 gen_op_mov_v_reg(s
, MO_16
, s
->T1
, R_EAX
);
3447 tcg_gen_ext16u_tl(s
->T0
, s
->T0
);
3448 tcg_gen_ext16u_tl(s
->T1
, s
->T1
);
3449 /* XXX: use 32 bit mul which could be faster */
3450 tcg_gen_mul_tl(s
->T0
, s
->T0
, s
->T1
);
3451 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
3452 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
3453 tcg_gen_shri_tl(s
->T0
, s
->T0
, 16);
3454 gen_op_mov_reg_v(s
, MO_16
, R_EDX
, s
->T0
);
3455 tcg_gen_mov_tl(cpu_cc_src
, s
->T0
);
3456 set_cc_op(s
, CC_OP_MULW
);
3460 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
3461 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, cpu_regs
[R_EAX
]);
3462 tcg_gen_mulu2_i32(s
->tmp2_i32
, s
->tmp3_i32
,
3463 s
->tmp2_i32
, s
->tmp3_i32
);
3464 tcg_gen_extu_i32_tl(cpu_regs
[R_EAX
], s
->tmp2_i32
);
3465 tcg_gen_extu_i32_tl(cpu_regs
[R_EDX
], s
->tmp3_i32
);
3466 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
3467 tcg_gen_mov_tl(cpu_cc_src
, cpu_regs
[R_EDX
]);
3468 set_cc_op(s
, CC_OP_MULL
);
3470 #ifdef TARGET_X86_64
3472 tcg_gen_mulu2_i64(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
],
3473 s
->T0
, cpu_regs
[R_EAX
]);
3474 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
3475 tcg_gen_mov_tl(cpu_cc_src
, cpu_regs
[R_EDX
]);
3476 set_cc_op(s
, CC_OP_MULQ
);
3484 gen_op_mov_v_reg(s
, MO_8
, s
->T1
, R_EAX
);
3485 tcg_gen_ext8s_tl(s
->T0
, s
->T0
);
3486 tcg_gen_ext8s_tl(s
->T1
, s
->T1
);
3487 /* XXX: use 32 bit mul which could be faster */
3488 tcg_gen_mul_tl(s
->T0
, s
->T0
, s
->T1
);
3489 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
3490 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
3491 tcg_gen_ext8s_tl(s
->tmp0
, s
->T0
);
3492 tcg_gen_sub_tl(cpu_cc_src
, s
->T0
, s
->tmp0
);
3493 set_cc_op(s
, CC_OP_MULB
);
3496 gen_op_mov_v_reg(s
, MO_16
, s
->T1
, R_EAX
);
3497 tcg_gen_ext16s_tl(s
->T0
, s
->T0
);
3498 tcg_gen_ext16s_tl(s
->T1
, s
->T1
);
3499 /* XXX: use 32 bit mul which could be faster */
3500 tcg_gen_mul_tl(s
->T0
, s
->T0
, s
->T1
);
3501 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
3502 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
3503 tcg_gen_ext16s_tl(s
->tmp0
, s
->T0
);
3504 tcg_gen_sub_tl(cpu_cc_src
, s
->T0
, s
->tmp0
);
3505 tcg_gen_shri_tl(s
->T0
, s
->T0
, 16);
3506 gen_op_mov_reg_v(s
, MO_16
, R_EDX
, s
->T0
);
3507 set_cc_op(s
, CC_OP_MULW
);
3511 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
3512 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, cpu_regs
[R_EAX
]);
3513 tcg_gen_muls2_i32(s
->tmp2_i32
, s
->tmp3_i32
,
3514 s
->tmp2_i32
, s
->tmp3_i32
);
3515 tcg_gen_extu_i32_tl(cpu_regs
[R_EAX
], s
->tmp2_i32
);
3516 tcg_gen_extu_i32_tl(cpu_regs
[R_EDX
], s
->tmp3_i32
);
3517 tcg_gen_sari_i32(s
->tmp2_i32
, s
->tmp2_i32
, 31);
3518 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
3519 tcg_gen_sub_i32(s
->tmp2_i32
, s
->tmp2_i32
, s
->tmp3_i32
);
3520 tcg_gen_extu_i32_tl(cpu_cc_src
, s
->tmp2_i32
);
3521 set_cc_op(s
, CC_OP_MULL
);
3523 #ifdef TARGET_X86_64
3525 tcg_gen_muls2_i64(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
],
3526 s
->T0
, cpu_regs
[R_EAX
]);
3527 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
3528 tcg_gen_sari_tl(cpu_cc_src
, cpu_regs
[R_EAX
], 63);
3529 tcg_gen_sub_tl(cpu_cc_src
, cpu_cc_src
, cpu_regs
[R_EDX
]);
3530 set_cc_op(s
, CC_OP_MULQ
);
3538 gen_helper_divb_AL(tcg_env
, s
->T0
);
3541 gen_helper_divw_AX(tcg_env
, s
->T0
);
3545 gen_helper_divl_EAX(tcg_env
, s
->T0
);
3547 #ifdef TARGET_X86_64
3549 gen_helper_divq_EAX(tcg_env
, s
->T0
);
3557 gen_helper_idivb_AL(tcg_env
, s
->T0
);
3560 gen_helper_idivw_AX(tcg_env
, s
->T0
);
3564 gen_helper_idivl_EAX(tcg_env
, s
->T0
);
3566 #ifdef TARGET_X86_64
3568 gen_helper_idivq_EAX(tcg_env
, s
->T0
);
3578 case 0xfe: /* GRP4 */
3579 case 0xff: /* GRP5 */
3580 ot
= mo_b_d(b
, dflag
);
3582 modrm
= x86_ldub_code(env
, s
);
3583 mod
= (modrm
>> 6) & 3;
3584 rm
= (modrm
& 7) | REX_B(s
);
3585 op
= (modrm
>> 3) & 7;
3586 if (op
>= 2 && b
== 0xfe) {
3590 if (op
== 2 || op
== 4) {
3591 /* operand size for jumps is 64 bit */
3593 } else if (op
== 3 || op
== 5) {
3594 ot
= dflag
!= MO_16
? MO_32
+ REX_W(s
) : MO_16
;
3595 } else if (op
== 6) {
3596 /* default push size is 64 bit */
3597 ot
= mo_pushpop(s
, dflag
);
3601 gen_lea_modrm(env
, s
, modrm
);
3602 if (op
>= 2 && op
!= 3 && op
!= 5)
3603 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
3605 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
3609 case 0: /* inc Ev */
3614 gen_inc(s
, ot
, opreg
, 1);
3616 case 1: /* dec Ev */
3621 gen_inc(s
, ot
, opreg
, -1);
3623 case 2: /* call Ev */
3624 /* XXX: optimize if memory (no 'and' is necessary) */
3625 if (dflag
== MO_16
) {
3626 tcg_gen_ext16u_tl(s
->T0
, s
->T0
);
3628 gen_push_v(s
, eip_next_tl(s
));
3629 gen_op_jmp_v(s
, s
->T0
);
3631 s
->base
.is_jmp
= DISAS_JUMP
;
3633 case 3: /* lcall Ev */
3637 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
3638 gen_add_A0_im(s
, 1 << ot
);
3639 gen_op_ld_v(s
, MO_16
, s
->T0
, s
->A0
);
3641 if (PE(s
) && !VM86(s
)) {
3642 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
3643 gen_helper_lcall_protected(tcg_env
, s
->tmp2_i32
, s
->T1
,
3644 tcg_constant_i32(dflag
- 1),
3647 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
3648 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T1
);
3649 gen_helper_lcall_real(tcg_env
, s
->tmp2_i32
, s
->tmp3_i32
,
3650 tcg_constant_i32(dflag
- 1),
3653 s
->base
.is_jmp
= DISAS_JUMP
;
3655 case 4: /* jmp Ev */
3656 if (dflag
== MO_16
) {
3657 tcg_gen_ext16u_tl(s
->T0
, s
->T0
);
3659 gen_op_jmp_v(s
, s
->T0
);
3661 s
->base
.is_jmp
= DISAS_JUMP
;
3663 case 5: /* ljmp Ev */
3667 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
3668 gen_add_A0_im(s
, 1 << ot
);
3669 gen_op_ld_v(s
, MO_16
, s
->T0
, s
->A0
);
3671 if (PE(s
) && !VM86(s
)) {
3672 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
3673 gen_helper_ljmp_protected(tcg_env
, s
->tmp2_i32
, s
->T1
,
3676 gen_op_movl_seg_T0_vm(s
, R_CS
);
3677 gen_op_jmp_v(s
, s
->T1
);
3679 s
->base
.is_jmp
= DISAS_JUMP
;
3681 case 6: /* push Ev */
3682 gen_push_v(s
, s
->T0
);
3689 case 0x84: /* test Ev, Gv */
3691 ot
= mo_b_d(b
, dflag
);
3693 modrm
= x86_ldub_code(env
, s
);
3694 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
3696 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3697 gen_op_mov_v_reg(s
, ot
, s
->T1
, reg
);
3698 gen_op_testl_T0_T1_cc(s
);
3699 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
3702 case 0xa8: /* test eAX, Iv */
3704 ot
= mo_b_d(b
, dflag
);
3705 val
= insn_get(env
, s
, ot
);
3707 gen_op_mov_v_reg(s
, ot
, s
->T0
, OR_EAX
);
3708 tcg_gen_movi_tl(s
->T1
, val
);
3709 gen_op_testl_T0_T1_cc(s
);
3710 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
3713 case 0x98: /* CWDE/CBW */
3715 #ifdef TARGET_X86_64
3717 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, R_EAX
);
3718 tcg_gen_ext32s_tl(s
->T0
, s
->T0
);
3719 gen_op_mov_reg_v(s
, MO_64
, R_EAX
, s
->T0
);
3723 gen_op_mov_v_reg(s
, MO_16
, s
->T0
, R_EAX
);
3724 tcg_gen_ext16s_tl(s
->T0
, s
->T0
);
3725 gen_op_mov_reg_v(s
, MO_32
, R_EAX
, s
->T0
);
3728 gen_op_mov_v_reg(s
, MO_8
, s
->T0
, R_EAX
);
3729 tcg_gen_ext8s_tl(s
->T0
, s
->T0
);
3730 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
3733 g_assert_not_reached();
3736 case 0x99: /* CDQ/CWD */
3738 #ifdef TARGET_X86_64
3740 gen_op_mov_v_reg(s
, MO_64
, s
->T0
, R_EAX
);
3741 tcg_gen_sari_tl(s
->T0
, s
->T0
, 63);
3742 gen_op_mov_reg_v(s
, MO_64
, R_EDX
, s
->T0
);
3746 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, R_EAX
);
3747 tcg_gen_ext32s_tl(s
->T0
, s
->T0
);
3748 tcg_gen_sari_tl(s
->T0
, s
->T0
, 31);
3749 gen_op_mov_reg_v(s
, MO_32
, R_EDX
, s
->T0
);
3752 gen_op_mov_v_reg(s
, MO_16
, s
->T0
, R_EAX
);
3753 tcg_gen_ext16s_tl(s
->T0
, s
->T0
);
3754 tcg_gen_sari_tl(s
->T0
, s
->T0
, 15);
3755 gen_op_mov_reg_v(s
, MO_16
, R_EDX
, s
->T0
);
3758 g_assert_not_reached();
3761 case 0x1af: /* imul Gv, Ev */
3762 case 0x69: /* imul Gv, Ev, I */
3765 modrm
= x86_ldub_code(env
, s
);
3766 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
3768 s
->rip_offset
= insn_const_size(ot
);
3771 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3773 val
= insn_get(env
, s
, ot
);
3774 tcg_gen_movi_tl(s
->T1
, val
);
3775 } else if (b
== 0x6b) {
3776 val
= (int8_t)insn_get(env
, s
, MO_8
);
3777 tcg_gen_movi_tl(s
->T1
, val
);
3779 gen_op_mov_v_reg(s
, ot
, s
->T1
, reg
);
3782 #ifdef TARGET_X86_64
3784 tcg_gen_muls2_i64(cpu_regs
[reg
], s
->T1
, s
->T0
, s
->T1
);
3785 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[reg
]);
3786 tcg_gen_sari_tl(cpu_cc_src
, cpu_cc_dst
, 63);
3787 tcg_gen_sub_tl(cpu_cc_src
, cpu_cc_src
, s
->T1
);
3791 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
3792 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T1
);
3793 tcg_gen_muls2_i32(s
->tmp2_i32
, s
->tmp3_i32
,
3794 s
->tmp2_i32
, s
->tmp3_i32
);
3795 tcg_gen_extu_i32_tl(cpu_regs
[reg
], s
->tmp2_i32
);
3796 tcg_gen_sari_i32(s
->tmp2_i32
, s
->tmp2_i32
, 31);
3797 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[reg
]);
3798 tcg_gen_sub_i32(s
->tmp2_i32
, s
->tmp2_i32
, s
->tmp3_i32
);
3799 tcg_gen_extu_i32_tl(cpu_cc_src
, s
->tmp2_i32
);
3802 tcg_gen_ext16s_tl(s
->T0
, s
->T0
);
3803 tcg_gen_ext16s_tl(s
->T1
, s
->T1
);
3804 /* XXX: use 32 bit mul which could be faster */
3805 tcg_gen_mul_tl(s
->T0
, s
->T0
, s
->T1
);
3806 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
3807 tcg_gen_ext16s_tl(s
->tmp0
, s
->T0
);
3808 tcg_gen_sub_tl(cpu_cc_src
, s
->T0
, s
->tmp0
);
3809 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
3812 set_cc_op(s
, CC_OP_MULB
+ ot
);
3815 case 0x1c1: /* xadd Ev, Gv */
3816 ot
= mo_b_d(b
, dflag
);
3817 modrm
= x86_ldub_code(env
, s
);
3818 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
3819 mod
= (modrm
>> 6) & 3;
3820 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
3822 rm
= (modrm
& 7) | REX_B(s
);
3823 gen_op_mov_v_reg(s
, ot
, s
->T1
, rm
);
3824 tcg_gen_add_tl(s
->T0
, s
->T0
, s
->T1
);
3825 gen_op_mov_reg_v(s
, ot
, reg
, s
->T1
);
3826 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
3828 gen_lea_modrm(env
, s
, modrm
);
3829 if (s
->prefix
& PREFIX_LOCK
) {
3830 tcg_gen_atomic_fetch_add_tl(s
->T1
, s
->A0
, s
->T0
,
3831 s
->mem_index
, ot
| MO_LE
);
3832 tcg_gen_add_tl(s
->T0
, s
->T0
, s
->T1
);
3834 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
3835 tcg_gen_add_tl(s
->T0
, s
->T0
, s
->T1
);
3836 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
3838 gen_op_mov_reg_v(s
, ot
, reg
, s
->T1
);
3840 gen_op_update2_cc(s
);
3841 set_cc_op(s
, CC_OP_ADDB
+ ot
);
3844 case 0x1b1: /* cmpxchg Ev, Gv */
3846 TCGv oldv
, newv
, cmpv
, dest
;
3848 ot
= mo_b_d(b
, dflag
);
3849 modrm
= x86_ldub_code(env
, s
);
3850 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
3851 mod
= (modrm
>> 6) & 3;
3852 oldv
= tcg_temp_new();
3853 newv
= tcg_temp_new();
3854 cmpv
= tcg_temp_new();
3855 gen_op_mov_v_reg(s
, ot
, newv
, reg
);
3856 tcg_gen_mov_tl(cmpv
, cpu_regs
[R_EAX
]);
3858 if (s
->prefix
& PREFIX_LOCK
) {
3862 gen_lea_modrm(env
, s
, modrm
);
3863 tcg_gen_atomic_cmpxchg_tl(oldv
, s
->A0
, cmpv
, newv
,
3864 s
->mem_index
, ot
| MO_LE
);
3867 rm
= (modrm
& 7) | REX_B(s
);
3868 gen_op_mov_v_reg(s
, ot
, oldv
, rm
);
3872 * Unlike the memory case, where "the destination operand receives
3873 * a write cycle without regard to the result of the comparison",
3874 * rm must not be touched altogether if the write fails, including
3875 * not zero-extending it on 64-bit processors. So, precompute
3876 * the result of a successful writeback and perform the movcond
3877 * directly on cpu_regs. Also need to write accumulator first, in
3878 * case rm is part of RAX too.
3880 dest
= gen_op_deposit_reg_v(s
, ot
, rm
, newv
, newv
);
3881 tcg_gen_movcond_tl(TCG_COND_EQ
, dest
, oldv
, cmpv
, newv
, dest
);
3883 gen_lea_modrm(env
, s
, modrm
);
3884 gen_op_ld_v(s
, ot
, oldv
, s
->A0
);
3887 * Perform an unconditional store cycle like physical cpu;
3888 * must be before changing accumulator to ensure
3889 * idempotency if the store faults and the instruction
3892 tcg_gen_movcond_tl(TCG_COND_EQ
, newv
, oldv
, cmpv
, newv
, oldv
);
3893 gen_op_st_v(s
, ot
, newv
, s
->A0
);
3897 * Write EAX only if the cmpxchg fails; reuse newv as the destination,
3898 * since it's dead here.
3900 dest
= gen_op_deposit_reg_v(s
, ot
, R_EAX
, newv
, oldv
);
3901 tcg_gen_movcond_tl(TCG_COND_EQ
, dest
, oldv
, cmpv
, dest
, newv
);
3902 tcg_gen_mov_tl(cpu_cc_src
, oldv
);
3903 tcg_gen_mov_tl(s
->cc_srcT
, cmpv
);
3904 tcg_gen_sub_tl(cpu_cc_dst
, cmpv
, oldv
);
3905 set_cc_op(s
, CC_OP_SUBB
+ ot
);
3908 case 0x1c7: /* cmpxchg8b */
3909 modrm
= x86_ldub_code(env
, s
);
3910 mod
= (modrm
>> 6) & 3;
3911 switch ((modrm
>> 3) & 7) {
3912 case 1: /* CMPXCHG8, CMPXCHG16 */
3916 #ifdef TARGET_X86_64
3917 if (dflag
== MO_64
) {
3918 if (!(s
->cpuid_ext_features
& CPUID_EXT_CX16
)) {
3921 gen_cmpxchg16b(s
, env
, modrm
);
3925 if (!(s
->cpuid_features
& CPUID_CX8
)) {
3928 gen_cmpxchg8b(s
, env
, modrm
);
3931 case 7: /* RDSEED, RDPID with f3 prefix */
3933 (s
->prefix
& (PREFIX_LOCK
| PREFIX_REPNZ
))) {
3936 if (s
->prefix
& PREFIX_REPZ
) {
3937 if (!(s
->cpuid_ext_features
& CPUID_7_0_ECX_RDPID
)) {
3940 gen_helper_rdpid(s
->T0
, tcg_env
);
3941 rm
= (modrm
& 7) | REX_B(s
);
3942 gen_op_mov_reg_v(s
, dflag
, rm
, s
->T0
);
3945 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_RDSEED
)) {
3951 case 6: /* RDRAND */
3953 (s
->prefix
& (PREFIX_LOCK
| PREFIX_REPZ
| PREFIX_REPNZ
)) ||
3954 !(s
->cpuid_ext_features
& CPUID_EXT_RDRAND
)) {
3958 translator_io_start(&s
->base
);
3959 gen_helper_rdrand(s
->T0
, tcg_env
);
3960 rm
= (modrm
& 7) | REX_B(s
);
3961 gen_op_mov_reg_v(s
, dflag
, rm
, s
->T0
);
3962 set_cc_op(s
, CC_OP_EFLAGS
);
3970 /**************************/
3972 case 0x50 ... 0x57: /* push */
3973 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, (b
& 7) | REX_B(s
));
3974 gen_push_v(s
, s
->T0
);
3976 case 0x58 ... 0x5f: /* pop */
3978 /* NOTE: order is important for pop %sp */
3979 gen_pop_update(s
, ot
);
3980 gen_op_mov_reg_v(s
, ot
, (b
& 7) | REX_B(s
), s
->T0
);
3982 case 0x60: /* pusha */
3987 case 0x61: /* popa */
3992 case 0x68: /* push Iv */
3994 ot
= mo_pushpop(s
, dflag
);
3996 val
= insn_get(env
, s
, ot
);
3998 val
= (int8_t)insn_get(env
, s
, MO_8
);
3999 tcg_gen_movi_tl(s
->T0
, val
);
4000 gen_push_v(s
, s
->T0
);
4002 case 0x8f: /* pop Ev */
4003 modrm
= x86_ldub_code(env
, s
);
4004 mod
= (modrm
>> 6) & 3;
4007 /* NOTE: order is important for pop %sp */
4008 gen_pop_update(s
, ot
);
4009 rm
= (modrm
& 7) | REX_B(s
);
4010 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
4012 /* NOTE: order is important too for MMU exceptions */
4013 s
->popl_esp_hack
= 1 << ot
;
4014 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
4015 s
->popl_esp_hack
= 0;
4016 gen_pop_update(s
, ot
);
4019 case 0xc8: /* enter */
4022 val
= x86_lduw_code(env
, s
);
4023 level
= x86_ldub_code(env
, s
);
4024 gen_enter(s
, val
, level
);
4027 case 0xc9: /* leave */
4030 case 0x06: /* push es */
4031 case 0x0e: /* push cs */
4032 case 0x16: /* push ss */
4033 case 0x1e: /* push ds */
4036 gen_op_movl_T0_seg(s
, b
>> 3);
4037 gen_push_v(s
, s
->T0
);
4039 case 0x1a0: /* push fs */
4040 case 0x1a8: /* push gs */
4041 gen_op_movl_T0_seg(s
, (b
>> 3) & 7);
4042 gen_push_v(s
, s
->T0
);
4044 case 0x07: /* pop es */
4045 case 0x17: /* pop ss */
4046 case 0x1f: /* pop ds */
4051 gen_movl_seg_T0(s
, reg
);
4052 gen_pop_update(s
, ot
);
4054 case 0x1a1: /* pop fs */
4055 case 0x1a9: /* pop gs */
4057 gen_movl_seg_T0(s
, (b
>> 3) & 7);
4058 gen_pop_update(s
, ot
);
4061 /**************************/
4064 case 0x89: /* mov Gv, Ev */
4065 ot
= mo_b_d(b
, dflag
);
4066 modrm
= x86_ldub_code(env
, s
);
4067 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4069 /* generate a generic store */
4070 gen_ldst_modrm(env
, s
, modrm
, ot
, reg
, 1);
4073 case 0xc7: /* mov Ev, Iv */
4074 ot
= mo_b_d(b
, dflag
);
4075 modrm
= x86_ldub_code(env
, s
);
4076 mod
= (modrm
>> 6) & 3;
4078 s
->rip_offset
= insn_const_size(ot
);
4079 gen_lea_modrm(env
, s
, modrm
);
4081 val
= insn_get(env
, s
, ot
);
4082 tcg_gen_movi_tl(s
->T0
, val
);
4084 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
4086 gen_op_mov_reg_v(s
, ot
, (modrm
& 7) | REX_B(s
), s
->T0
);
4090 case 0x8b: /* mov Ev, Gv */
4091 ot
= mo_b_d(b
, dflag
);
4092 modrm
= x86_ldub_code(env
, s
);
4093 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4095 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4096 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
4098 case 0x8e: /* mov seg, Gv */
4099 modrm
= x86_ldub_code(env
, s
);
4100 reg
= (modrm
>> 3) & 7;
4101 if (reg
>= 6 || reg
== R_CS
)
4103 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
4104 gen_movl_seg_T0(s
, reg
);
4106 case 0x8c: /* mov Gv, seg */
4107 modrm
= x86_ldub_code(env
, s
);
4108 reg
= (modrm
>> 3) & 7;
4109 mod
= (modrm
>> 6) & 3;
4112 gen_op_movl_T0_seg(s
, reg
);
4113 ot
= mod
== 3 ? dflag
: MO_16
;
4114 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
4117 case 0x1b6: /* movzbS Gv, Eb */
4118 case 0x1b7: /* movzwS Gv, Eb */
4119 case 0x1be: /* movsbS Gv, Eb */
4120 case 0x1bf: /* movswS Gv, Eb */
4125 /* d_ot is the size of destination */
4127 /* ot is the size of source */
4128 ot
= (b
& 1) + MO_8
;
4129 /* s_ot is the sign+size of source */
4130 s_ot
= b
& 8 ? MO_SIGN
| ot
: ot
;
4132 modrm
= x86_ldub_code(env
, s
);
4133 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4134 mod
= (modrm
>> 6) & 3;
4135 rm
= (modrm
& 7) | REX_B(s
);
4138 if (s_ot
== MO_SB
&& byte_reg_is_xH(s
, rm
)) {
4139 tcg_gen_sextract_tl(s
->T0
, cpu_regs
[rm
- 4], 8, 8);
4141 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
4144 tcg_gen_ext8u_tl(s
->T0
, s
->T0
);
4147 tcg_gen_ext8s_tl(s
->T0
, s
->T0
);
4150 tcg_gen_ext16u_tl(s
->T0
, s
->T0
);
4154 tcg_gen_ext16s_tl(s
->T0
, s
->T0
);
4158 gen_op_mov_reg_v(s
, d_ot
, reg
, s
->T0
);
4160 gen_lea_modrm(env
, s
, modrm
);
4161 gen_op_ld_v(s
, s_ot
, s
->T0
, s
->A0
);
4162 gen_op_mov_reg_v(s
, d_ot
, reg
, s
->T0
);
4167 case 0x8d: /* lea */
4168 modrm
= x86_ldub_code(env
, s
);
4169 mod
= (modrm
>> 6) & 3;
4172 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4174 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
4175 TCGv ea
= gen_lea_modrm_1(s
, a
, false);
4176 gen_lea_v_seg(s
, s
->aflag
, ea
, -1, -1);
4177 gen_op_mov_reg_v(s
, dflag
, reg
, s
->A0
);
4181 case 0xa0: /* mov EAX, Ov */
4183 case 0xa2: /* mov Ov, EAX */
4186 target_ulong offset_addr
;
4188 ot
= mo_b_d(b
, dflag
);
4189 offset_addr
= insn_get_addr(env
, s
, s
->aflag
);
4190 tcg_gen_movi_tl(s
->A0
, offset_addr
);
4191 gen_add_A0_ds_seg(s
);
4193 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
4194 gen_op_mov_reg_v(s
, ot
, R_EAX
, s
->T0
);
4196 gen_op_mov_v_reg(s
, ot
, s
->T0
, R_EAX
);
4197 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
4201 case 0xd7: /* xlat */
4202 tcg_gen_mov_tl(s
->A0
, cpu_regs
[R_EBX
]);
4203 tcg_gen_ext8u_tl(s
->T0
, cpu_regs
[R_EAX
]);
4204 tcg_gen_add_tl(s
->A0
, s
->A0
, s
->T0
);
4205 gen_add_A0_ds_seg(s
);
4206 gen_op_ld_v(s
, MO_8
, s
->T0
, s
->A0
);
4207 gen_op_mov_reg_v(s
, MO_8
, R_EAX
, s
->T0
);
4209 case 0xb0 ... 0xb7: /* mov R, Ib */
4210 val
= insn_get(env
, s
, MO_8
);
4211 tcg_gen_movi_tl(s
->T0
, val
);
4212 gen_op_mov_reg_v(s
, MO_8
, (b
& 7) | REX_B(s
), s
->T0
);
4214 case 0xb8 ... 0xbf: /* mov R, Iv */
4215 #ifdef TARGET_X86_64
4216 if (dflag
== MO_64
) {
4219 tmp
= x86_ldq_code(env
, s
);
4220 reg
= (b
& 7) | REX_B(s
);
4221 tcg_gen_movi_tl(s
->T0
, tmp
);
4222 gen_op_mov_reg_v(s
, MO_64
, reg
, s
->T0
);
4227 val
= insn_get(env
, s
, ot
);
4228 reg
= (b
& 7) | REX_B(s
);
4229 tcg_gen_movi_tl(s
->T0
, val
);
4230 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
4234 case 0x91 ... 0x97: /* xchg R, EAX */
4237 reg
= (b
& 7) | REX_B(s
);
4241 case 0x87: /* xchg Ev, Gv */
4242 ot
= mo_b_d(b
, dflag
);
4243 modrm
= x86_ldub_code(env
, s
);
4244 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4245 mod
= (modrm
>> 6) & 3;
4247 rm
= (modrm
& 7) | REX_B(s
);
4249 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
4250 gen_op_mov_v_reg(s
, ot
, s
->T1
, rm
);
4251 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
4252 gen_op_mov_reg_v(s
, ot
, reg
, s
->T1
);
4254 gen_lea_modrm(env
, s
, modrm
);
4255 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
4256 /* for xchg, lock is implicit */
4257 tcg_gen_atomic_xchg_tl(s
->T1
, s
->A0
, s
->T0
,
4258 s
->mem_index
, ot
| MO_LE
);
4259 gen_op_mov_reg_v(s
, ot
, reg
, s
->T1
);
4262 case 0xc4: /* les Gv */
4263 /* In CODE64 this is VEX3; see above. */
4266 case 0xc5: /* lds Gv */
4267 /* In CODE64 this is VEX2; see above. */
4270 case 0x1b2: /* lss Gv */
4273 case 0x1b4: /* lfs Gv */
4276 case 0x1b5: /* lgs Gv */
4279 ot
= dflag
!= MO_16
? MO_32
: MO_16
;
4280 modrm
= x86_ldub_code(env
, s
);
4281 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4282 mod
= (modrm
>> 6) & 3;
4285 gen_lea_modrm(env
, s
, modrm
);
4286 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
4287 gen_add_A0_im(s
, 1 << ot
);
4288 /* load the segment first to handle exceptions properly */
4289 gen_op_ld_v(s
, MO_16
, s
->T0
, s
->A0
);
4290 gen_movl_seg_T0(s
, op
);
4291 /* then put the data */
4292 gen_op_mov_reg_v(s
, ot
, reg
, s
->T1
);
4295 /************************/
4303 ot
= mo_b_d(b
, dflag
);
4304 modrm
= x86_ldub_code(env
, s
);
4305 mod
= (modrm
>> 6) & 3;
4306 op
= (modrm
>> 3) & 7;
4312 gen_lea_modrm(env
, s
, modrm
);
4315 opreg
= (modrm
& 7) | REX_B(s
);
4320 gen_shift(s
, op
, ot
, opreg
, OR_ECX
);
4323 shift
= x86_ldub_code(env
, s
);
4325 gen_shifti(s
, op
, ot
, opreg
, shift
);
4340 case 0x1a4: /* shld imm */
4344 case 0x1a5: /* shld cl */
4348 case 0x1ac: /* shrd imm */
4352 case 0x1ad: /* shrd cl */
4357 modrm
= x86_ldub_code(env
, s
);
4358 mod
= (modrm
>> 6) & 3;
4359 rm
= (modrm
& 7) | REX_B(s
);
4360 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4362 gen_lea_modrm(env
, s
, modrm
);
4367 gen_op_mov_v_reg(s
, ot
, s
->T1
, reg
);
4370 TCGv imm
= tcg_constant_tl(x86_ldub_code(env
, s
));
4371 gen_shiftd_rm_T1(s
, ot
, opreg
, op
, imm
);
4373 gen_shiftd_rm_T1(s
, ot
, opreg
, op
, cpu_regs
[R_ECX
]);
4377 /************************/
4381 bool update_fip
= true;
4383 if (s
->flags
& (HF_EM_MASK
| HF_TS_MASK
)) {
4384 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
4385 /* XXX: what to do if illegal op ? */
4386 gen_exception(s
, EXCP07_PREX
);
4389 modrm
= x86_ldub_code(env
, s
);
4390 mod
= (modrm
>> 6) & 3;
4392 op
= ((b
& 7) << 3) | ((modrm
>> 3) & 7);
4395 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
4396 TCGv ea
= gen_lea_modrm_1(s
, a
, false);
4397 TCGv last_addr
= tcg_temp_new();
4398 bool update_fdp
= true;
4400 tcg_gen_mov_tl(last_addr
, ea
);
4401 gen_lea_v_seg(s
, s
->aflag
, ea
, a
.def_seg
, s
->override
);
4404 case 0x00 ... 0x07: /* fxxxs */
4405 case 0x10 ... 0x17: /* fixxxl */
4406 case 0x20 ... 0x27: /* fxxxl */
4407 case 0x30 ... 0x37: /* fixxx */
4414 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
4415 s
->mem_index
, MO_LEUL
);
4416 gen_helper_flds_FT0(tcg_env
, s
->tmp2_i32
);
4419 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
4420 s
->mem_index
, MO_LEUL
);
4421 gen_helper_fildl_FT0(tcg_env
, s
->tmp2_i32
);
4424 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
,
4425 s
->mem_index
, MO_LEUQ
);
4426 gen_helper_fldl_FT0(tcg_env
, s
->tmp1_i64
);
4430 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
4431 s
->mem_index
, MO_LESW
);
4432 gen_helper_fildl_FT0(tcg_env
, s
->tmp2_i32
);
4436 gen_helper_fp_arith_ST0_FT0(op1
);
4438 /* fcomp needs pop */
4439 gen_helper_fpop(tcg_env
);
4443 case 0x08: /* flds */
4444 case 0x0a: /* fsts */
4445 case 0x0b: /* fstps */
4446 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
4447 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
4448 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
4453 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
4454 s
->mem_index
, MO_LEUL
);
4455 gen_helper_flds_ST0(tcg_env
, s
->tmp2_i32
);
4458 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
4459 s
->mem_index
, MO_LEUL
);
4460 gen_helper_fildl_ST0(tcg_env
, s
->tmp2_i32
);
4463 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
,
4464 s
->mem_index
, MO_LEUQ
);
4465 gen_helper_fldl_ST0(tcg_env
, s
->tmp1_i64
);
4469 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
4470 s
->mem_index
, MO_LESW
);
4471 gen_helper_fildl_ST0(tcg_env
, s
->tmp2_i32
);
4476 /* XXX: the corresponding CPUID bit must be tested ! */
4479 gen_helper_fisttl_ST0(s
->tmp2_i32
, tcg_env
);
4480 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
4481 s
->mem_index
, MO_LEUL
);
4484 gen_helper_fisttll_ST0(s
->tmp1_i64
, tcg_env
);
4485 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
,
4486 s
->mem_index
, MO_LEUQ
);
4490 gen_helper_fistt_ST0(s
->tmp2_i32
, tcg_env
);
4491 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
4492 s
->mem_index
, MO_LEUW
);
4495 gen_helper_fpop(tcg_env
);
4500 gen_helper_fsts_ST0(s
->tmp2_i32
, tcg_env
);
4501 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
4502 s
->mem_index
, MO_LEUL
);
4505 gen_helper_fistl_ST0(s
->tmp2_i32
, tcg_env
);
4506 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
4507 s
->mem_index
, MO_LEUL
);
4510 gen_helper_fstl_ST0(s
->tmp1_i64
, tcg_env
);
4511 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
,
4512 s
->mem_index
, MO_LEUQ
);
4516 gen_helper_fist_ST0(s
->tmp2_i32
, tcg_env
);
4517 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
4518 s
->mem_index
, MO_LEUW
);
4521 if ((op
& 7) == 3) {
4522 gen_helper_fpop(tcg_env
);
4527 case 0x0c: /* fldenv mem */
4528 gen_helper_fldenv(tcg_env
, s
->A0
,
4529 tcg_constant_i32(dflag
- 1));
4530 update_fip
= update_fdp
= false;
4532 case 0x0d: /* fldcw mem */
4533 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
4534 s
->mem_index
, MO_LEUW
);
4535 gen_helper_fldcw(tcg_env
, s
->tmp2_i32
);
4536 update_fip
= update_fdp
= false;
4538 case 0x0e: /* fnstenv mem */
4539 gen_helper_fstenv(tcg_env
, s
->A0
,
4540 tcg_constant_i32(dflag
- 1));
4541 update_fip
= update_fdp
= false;
4543 case 0x0f: /* fnstcw mem */
4544 gen_helper_fnstcw(s
->tmp2_i32
, tcg_env
);
4545 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
4546 s
->mem_index
, MO_LEUW
);
4547 update_fip
= update_fdp
= false;
4549 case 0x1d: /* fldt mem */
4550 gen_helper_fldt_ST0(tcg_env
, s
->A0
);
4552 case 0x1f: /* fstpt mem */
4553 gen_helper_fstt_ST0(tcg_env
, s
->A0
);
4554 gen_helper_fpop(tcg_env
);
4556 case 0x2c: /* frstor mem */
4557 gen_helper_frstor(tcg_env
, s
->A0
,
4558 tcg_constant_i32(dflag
- 1));
4559 update_fip
= update_fdp
= false;
4561 case 0x2e: /* fnsave mem */
4562 gen_helper_fsave(tcg_env
, s
->A0
,
4563 tcg_constant_i32(dflag
- 1));
4564 update_fip
= update_fdp
= false;
4566 case 0x2f: /* fnstsw mem */
4567 gen_helper_fnstsw(s
->tmp2_i32
, tcg_env
);
4568 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
4569 s
->mem_index
, MO_LEUW
);
4570 update_fip
= update_fdp
= false;
4572 case 0x3c: /* fbld */
4573 gen_helper_fbld_ST0(tcg_env
, s
->A0
);
4575 case 0x3e: /* fbstp */
4576 gen_helper_fbst_ST0(tcg_env
, s
->A0
);
4577 gen_helper_fpop(tcg_env
);
4579 case 0x3d: /* fildll */
4580 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
,
4581 s
->mem_index
, MO_LEUQ
);
4582 gen_helper_fildll_ST0(tcg_env
, s
->tmp1_i64
);
4584 case 0x3f: /* fistpll */
4585 gen_helper_fistll_ST0(s
->tmp1_i64
, tcg_env
);
4586 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
,
4587 s
->mem_index
, MO_LEUQ
);
4588 gen_helper_fpop(tcg_env
);
4595 int last_seg
= s
->override
>= 0 ? s
->override
: a
.def_seg
;
4597 tcg_gen_ld_i32(s
->tmp2_i32
, tcg_env
,
4598 offsetof(CPUX86State
,
4599 segs
[last_seg
].selector
));
4600 tcg_gen_st16_i32(s
->tmp2_i32
, tcg_env
,
4601 offsetof(CPUX86State
, fpds
));
4602 tcg_gen_st_tl(last_addr
, tcg_env
,
4603 offsetof(CPUX86State
, fpdp
));
4606 /* register float ops */
4610 case 0x08: /* fld sti */
4611 gen_helper_fpush(tcg_env
);
4612 gen_helper_fmov_ST0_STN(tcg_env
,
4613 tcg_constant_i32((opreg
+ 1) & 7));
4615 case 0x09: /* fxchg sti */
4616 case 0x29: /* fxchg4 sti, undocumented op */
4617 case 0x39: /* fxchg7 sti, undocumented op */
4618 gen_helper_fxchg_ST0_STN(tcg_env
, tcg_constant_i32(opreg
));
4620 case 0x0a: /* grp d9/2 */
4624 * check exceptions (FreeBSD FPU probe)
4625 * needs to be treated as I/O because of ferr_irq
4627 translator_io_start(&s
->base
);
4628 gen_helper_fwait(tcg_env
);
4635 case 0x0c: /* grp d9/4 */
4638 gen_helper_fchs_ST0(tcg_env
);
4641 gen_helper_fabs_ST0(tcg_env
);
4644 gen_helper_fldz_FT0(tcg_env
);
4645 gen_helper_fcom_ST0_FT0(tcg_env
);
4648 gen_helper_fxam_ST0(tcg_env
);
4654 case 0x0d: /* grp d9/5 */
4658 gen_helper_fpush(tcg_env
);
4659 gen_helper_fld1_ST0(tcg_env
);
4662 gen_helper_fpush(tcg_env
);
4663 gen_helper_fldl2t_ST0(tcg_env
);
4666 gen_helper_fpush(tcg_env
);
4667 gen_helper_fldl2e_ST0(tcg_env
);
4670 gen_helper_fpush(tcg_env
);
4671 gen_helper_fldpi_ST0(tcg_env
);
4674 gen_helper_fpush(tcg_env
);
4675 gen_helper_fldlg2_ST0(tcg_env
);
4678 gen_helper_fpush(tcg_env
);
4679 gen_helper_fldln2_ST0(tcg_env
);
4682 gen_helper_fpush(tcg_env
);
4683 gen_helper_fldz_ST0(tcg_env
);
4690 case 0x0e: /* grp d9/6 */
4693 gen_helper_f2xm1(tcg_env
);
4696 gen_helper_fyl2x(tcg_env
);
4699 gen_helper_fptan(tcg_env
);
4701 case 3: /* fpatan */
4702 gen_helper_fpatan(tcg_env
);
4704 case 4: /* fxtract */
4705 gen_helper_fxtract(tcg_env
);
4707 case 5: /* fprem1 */
4708 gen_helper_fprem1(tcg_env
);
4710 case 6: /* fdecstp */
4711 gen_helper_fdecstp(tcg_env
);
4714 case 7: /* fincstp */
4715 gen_helper_fincstp(tcg_env
);
4719 case 0x0f: /* grp d9/7 */
4722 gen_helper_fprem(tcg_env
);
4724 case 1: /* fyl2xp1 */
4725 gen_helper_fyl2xp1(tcg_env
);
4728 gen_helper_fsqrt(tcg_env
);
4730 case 3: /* fsincos */
4731 gen_helper_fsincos(tcg_env
);
4733 case 5: /* fscale */
4734 gen_helper_fscale(tcg_env
);
4736 case 4: /* frndint */
4737 gen_helper_frndint(tcg_env
);
4740 gen_helper_fsin(tcg_env
);
4744 gen_helper_fcos(tcg_env
);
4748 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
4749 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
4750 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
4756 gen_helper_fp_arith_STN_ST0(op1
, opreg
);
4758 gen_helper_fpop(tcg_env
);
4761 gen_helper_fmov_FT0_STN(tcg_env
,
4762 tcg_constant_i32(opreg
));
4763 gen_helper_fp_arith_ST0_FT0(op1
);
4767 case 0x02: /* fcom */
4768 case 0x22: /* fcom2, undocumented op */
4769 gen_helper_fmov_FT0_STN(tcg_env
, tcg_constant_i32(opreg
));
4770 gen_helper_fcom_ST0_FT0(tcg_env
);
4772 case 0x03: /* fcomp */
4773 case 0x23: /* fcomp3, undocumented op */
4774 case 0x32: /* fcomp5, undocumented op */
4775 gen_helper_fmov_FT0_STN(tcg_env
, tcg_constant_i32(opreg
));
4776 gen_helper_fcom_ST0_FT0(tcg_env
);
4777 gen_helper_fpop(tcg_env
);
4779 case 0x15: /* da/5 */
4781 case 1: /* fucompp */
4782 gen_helper_fmov_FT0_STN(tcg_env
, tcg_constant_i32(1));
4783 gen_helper_fucom_ST0_FT0(tcg_env
);
4784 gen_helper_fpop(tcg_env
);
4785 gen_helper_fpop(tcg_env
);
4793 case 0: /* feni (287 only, just do nop here) */
4795 case 1: /* fdisi (287 only, just do nop here) */
4798 gen_helper_fclex(tcg_env
);
4801 case 3: /* fninit */
4802 gen_helper_fninit(tcg_env
);
4805 case 4: /* fsetpm (287 only, just do nop here) */
4811 case 0x1d: /* fucomi */
4812 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
4815 gen_update_cc_op(s
);
4816 gen_helper_fmov_FT0_STN(tcg_env
, tcg_constant_i32(opreg
));
4817 gen_helper_fucomi_ST0_FT0(tcg_env
);
4818 set_cc_op(s
, CC_OP_EFLAGS
);
4820 case 0x1e: /* fcomi */
4821 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
4824 gen_update_cc_op(s
);
4825 gen_helper_fmov_FT0_STN(tcg_env
, tcg_constant_i32(opreg
));
4826 gen_helper_fcomi_ST0_FT0(tcg_env
);
4827 set_cc_op(s
, CC_OP_EFLAGS
);
4829 case 0x28: /* ffree sti */
4830 gen_helper_ffree_STN(tcg_env
, tcg_constant_i32(opreg
));
4832 case 0x2a: /* fst sti */
4833 gen_helper_fmov_STN_ST0(tcg_env
, tcg_constant_i32(opreg
));
4835 case 0x2b: /* fstp sti */
4836 case 0x0b: /* fstp1 sti, undocumented op */
4837 case 0x3a: /* fstp8 sti, undocumented op */
4838 case 0x3b: /* fstp9 sti, undocumented op */
4839 gen_helper_fmov_STN_ST0(tcg_env
, tcg_constant_i32(opreg
));
4840 gen_helper_fpop(tcg_env
);
4842 case 0x2c: /* fucom st(i) */
4843 gen_helper_fmov_FT0_STN(tcg_env
, tcg_constant_i32(opreg
));
4844 gen_helper_fucom_ST0_FT0(tcg_env
);
4846 case 0x2d: /* fucomp st(i) */
4847 gen_helper_fmov_FT0_STN(tcg_env
, tcg_constant_i32(opreg
));
4848 gen_helper_fucom_ST0_FT0(tcg_env
);
4849 gen_helper_fpop(tcg_env
);
4851 case 0x33: /* de/3 */
4853 case 1: /* fcompp */
4854 gen_helper_fmov_FT0_STN(tcg_env
, tcg_constant_i32(1));
4855 gen_helper_fcom_ST0_FT0(tcg_env
);
4856 gen_helper_fpop(tcg_env
);
4857 gen_helper_fpop(tcg_env
);
4863 case 0x38: /* ffreep sti, undocumented op */
4864 gen_helper_ffree_STN(tcg_env
, tcg_constant_i32(opreg
));
4865 gen_helper_fpop(tcg_env
);
4867 case 0x3c: /* df/4 */
4870 gen_helper_fnstsw(s
->tmp2_i32
, tcg_env
);
4871 tcg_gen_extu_i32_tl(s
->T0
, s
->tmp2_i32
);
4872 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
4878 case 0x3d: /* fucomip */
4879 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
4882 gen_update_cc_op(s
);
4883 gen_helper_fmov_FT0_STN(tcg_env
, tcg_constant_i32(opreg
));
4884 gen_helper_fucomi_ST0_FT0(tcg_env
);
4885 gen_helper_fpop(tcg_env
);
4886 set_cc_op(s
, CC_OP_EFLAGS
);
4888 case 0x3e: /* fcomip */
4889 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
4892 gen_update_cc_op(s
);
4893 gen_helper_fmov_FT0_STN(tcg_env
, tcg_constant_i32(opreg
));
4894 gen_helper_fcomi_ST0_FT0(tcg_env
);
4895 gen_helper_fpop(tcg_env
);
4896 set_cc_op(s
, CC_OP_EFLAGS
);
4898 case 0x10 ... 0x13: /* fcmovxx */
4903 static const uint8_t fcmov_cc
[8] = {
4910 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
4913 op1
= fcmov_cc
[op
& 3] | (((op
>> 3) & 1) ^ 1);
4914 l1
= gen_new_label();
4915 gen_jcc1_noeob(s
, op1
, l1
);
4916 gen_helper_fmov_ST0_STN(tcg_env
,
4917 tcg_constant_i32(opreg
));
4927 tcg_gen_ld_i32(s
->tmp2_i32
, tcg_env
,
4928 offsetof(CPUX86State
, segs
[R_CS
].selector
));
4929 tcg_gen_st16_i32(s
->tmp2_i32
, tcg_env
,
4930 offsetof(CPUX86State
, fpcs
));
4931 tcg_gen_st_tl(eip_cur_tl(s
),
4932 tcg_env
, offsetof(CPUX86State
, fpip
));
4936 /************************/
4939 case 0xa4: /* movsS */
4941 ot
= mo_b_d(b
, dflag
);
4942 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
4943 gen_repz_movs(s
, ot
);
4949 case 0xaa: /* stosS */
4951 ot
= mo_b_d(b
, dflag
);
4952 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, R_EAX
);
4953 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
4954 gen_repz_stos(s
, ot
);
4959 case 0xac: /* lodsS */
4961 ot
= mo_b_d(b
, dflag
);
4962 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
4963 gen_repz_lods(s
, ot
);
4968 case 0xae: /* scasS */
4970 ot
= mo_b_d(b
, dflag
);
4971 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, R_EAX
);
4972 if (prefixes
& PREFIX_REPNZ
) {
4973 gen_repz_scas(s
, ot
, 1);
4974 } else if (prefixes
& PREFIX_REPZ
) {
4975 gen_repz_scas(s
, ot
, 0);
4981 case 0xa6: /* cmpsS */
4983 ot
= mo_b_d(b
, dflag
);
4984 if (prefixes
& PREFIX_REPNZ
) {
4985 gen_repz_cmps(s
, ot
, 1);
4986 } else if (prefixes
& PREFIX_REPZ
) {
4987 gen_repz_cmps(s
, ot
, 0);
4992 case 0x6c: /* insS */
4994 ot
= mo_b_d32(b
, dflag
);
4995 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_EDX
]);
4996 tcg_gen_ext16u_i32(s
->tmp2_i32
, s
->tmp2_i32
);
4997 if (!gen_check_io(s
, ot
, s
->tmp2_i32
,
4998 SVM_IOIO_TYPE_MASK
| SVM_IOIO_STR_MASK
)) {
5001 translator_io_start(&s
->base
);
5002 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
5003 gen_repz_ins(s
, ot
);
5008 case 0x6e: /* outsS */
5010 ot
= mo_b_d32(b
, dflag
);
5011 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_EDX
]);
5012 tcg_gen_ext16u_i32(s
->tmp2_i32
, s
->tmp2_i32
);
5013 if (!gen_check_io(s
, ot
, s
->tmp2_i32
, SVM_IOIO_STR_MASK
)) {
5016 translator_io_start(&s
->base
);
5017 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
5018 gen_repz_outs(s
, ot
);
5024 /************************/
5029 ot
= mo_b_d32(b
, dflag
);
5030 val
= x86_ldub_code(env
, s
);
5031 tcg_gen_movi_i32(s
->tmp2_i32
, val
);
5032 if (!gen_check_io(s
, ot
, s
->tmp2_i32
, SVM_IOIO_TYPE_MASK
)) {
5035 translator_io_start(&s
->base
);
5036 gen_helper_in_func(ot
, s
->T1
, s
->tmp2_i32
);
5037 gen_op_mov_reg_v(s
, ot
, R_EAX
, s
->T1
);
5038 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
5042 ot
= mo_b_d32(b
, dflag
);
5043 val
= x86_ldub_code(env
, s
);
5044 tcg_gen_movi_i32(s
->tmp2_i32
, val
);
5045 if (!gen_check_io(s
, ot
, s
->tmp2_i32
, 0)) {
5048 translator_io_start(&s
->base
);
5049 gen_op_mov_v_reg(s
, ot
, s
->T1
, R_EAX
);
5050 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T1
);
5051 gen_helper_out_func(ot
, s
->tmp2_i32
, s
->tmp3_i32
);
5052 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
5056 ot
= mo_b_d32(b
, dflag
);
5057 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_EDX
]);
5058 tcg_gen_ext16u_i32(s
->tmp2_i32
, s
->tmp2_i32
);
5059 if (!gen_check_io(s
, ot
, s
->tmp2_i32
, SVM_IOIO_TYPE_MASK
)) {
5062 translator_io_start(&s
->base
);
5063 gen_helper_in_func(ot
, s
->T1
, s
->tmp2_i32
);
5064 gen_op_mov_reg_v(s
, ot
, R_EAX
, s
->T1
);
5065 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
5069 ot
= mo_b_d32(b
, dflag
);
5070 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_EDX
]);
5071 tcg_gen_ext16u_i32(s
->tmp2_i32
, s
->tmp2_i32
);
5072 if (!gen_check_io(s
, ot
, s
->tmp2_i32
, 0)) {
5075 translator_io_start(&s
->base
);
5076 gen_op_mov_v_reg(s
, ot
, s
->T1
, R_EAX
);
5077 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T1
);
5078 gen_helper_out_func(ot
, s
->tmp2_i32
, s
->tmp3_i32
);
5079 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
5082 /************************/
5084 case 0xc2: /* ret im */
5085 val
= x86_ldsw_code(env
, s
);
5087 gen_stack_update(s
, val
+ (1 << ot
));
5088 /* Note that gen_pop_T0 uses a zero-extending load. */
5089 gen_op_jmp_v(s
, s
->T0
);
5091 s
->base
.is_jmp
= DISAS_JUMP
;
5093 case 0xc3: /* ret */
5095 gen_pop_update(s
, ot
);
5096 /* Note that gen_pop_T0 uses a zero-extending load. */
5097 gen_op_jmp_v(s
, s
->T0
);
5099 s
->base
.is_jmp
= DISAS_JUMP
;
5101 case 0xca: /* lret im */
5102 val
= x86_ldsw_code(env
, s
);
5104 if (PE(s
) && !VM86(s
)) {
5105 gen_update_cc_op(s
);
5106 gen_update_eip_cur(s
);
5107 gen_helper_lret_protected(tcg_env
, tcg_constant_i32(dflag
- 1),
5108 tcg_constant_i32(val
));
5112 gen_op_ld_v(s
, dflag
, s
->T0
, s
->A0
);
5113 /* NOTE: keeping EIP updated is not a problem in case of
5115 gen_op_jmp_v(s
, s
->T0
);
5117 gen_add_A0_im(s
, 1 << dflag
);
5118 gen_op_ld_v(s
, dflag
, s
->T0
, s
->A0
);
5119 gen_op_movl_seg_T0_vm(s
, R_CS
);
5120 /* add stack offset */
5121 gen_stack_update(s
, val
+ (2 << dflag
));
5123 s
->base
.is_jmp
= DISAS_EOB_ONLY
;
5125 case 0xcb: /* lret */
5128 case 0xcf: /* iret */
5129 gen_svm_check_intercept(s
, SVM_EXIT_IRET
);
5130 if (!PE(s
) || VM86(s
)) {
5131 /* real mode or vm86 mode */
5132 if (!check_vm86_iopl(s
)) {
5135 gen_helper_iret_real(tcg_env
, tcg_constant_i32(dflag
- 1));
5137 gen_helper_iret_protected(tcg_env
, tcg_constant_i32(dflag
- 1),
5140 set_cc_op(s
, CC_OP_EFLAGS
);
5141 s
->base
.is_jmp
= DISAS_EOB_ONLY
;
5143 case 0xe8: /* call im */
5145 int diff
= (dflag
!= MO_16
5146 ? (int32_t)insn_get(env
, s
, MO_32
)
5147 : (int16_t)insn_get(env
, s
, MO_16
));
5148 gen_push_v(s
, eip_next_tl(s
));
5150 gen_jmp_rel(s
, dflag
, diff
, 0);
5153 case 0x9a: /* lcall im */
5155 unsigned int selector
, offset
;
5160 offset
= insn_get(env
, s
, ot
);
5161 selector
= insn_get(env
, s
, MO_16
);
5163 tcg_gen_movi_tl(s
->T0
, selector
);
5164 tcg_gen_movi_tl(s
->T1
, offset
);
5167 case 0xe9: /* jmp im */
5169 int diff
= (dflag
!= MO_16
5170 ? (int32_t)insn_get(env
, s
, MO_32
)
5171 : (int16_t)insn_get(env
, s
, MO_16
));
5173 gen_jmp_rel(s
, dflag
, diff
, 0);
5176 case 0xea: /* ljmp im */
5178 unsigned int selector
, offset
;
5183 offset
= insn_get(env
, s
, ot
);
5184 selector
= insn_get(env
, s
, MO_16
);
5186 tcg_gen_movi_tl(s
->T0
, selector
);
5187 tcg_gen_movi_tl(s
->T1
, offset
);
5190 case 0xeb: /* jmp Jb */
5192 int diff
= (int8_t)insn_get(env
, s
, MO_8
);
5193 gen_jmp_rel(s
, dflag
, diff
, 0);
5196 case 0x70 ... 0x7f: /* jcc Jb */
5198 int diff
= (int8_t)insn_get(env
, s
, MO_8
);
5200 gen_jcc(s
, b
, diff
);
5203 case 0x180 ... 0x18f: /* jcc Jv */
5205 int diff
= (dflag
!= MO_16
5206 ? (int32_t)insn_get(env
, s
, MO_32
)
5207 : (int16_t)insn_get(env
, s
, MO_16
));
5209 gen_jcc(s
, b
, diff
);
5213 case 0x190 ... 0x19f: /* setcc Gv */
5214 modrm
= x86_ldub_code(env
, s
);
5215 gen_setcc1(s
, b
, s
->T0
);
5216 gen_ldst_modrm(env
, s
, modrm
, MO_8
, OR_TMP0
, 1);
5218 case 0x140 ... 0x14f: /* cmov Gv, Ev */
5219 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
5223 modrm
= x86_ldub_code(env
, s
);
5224 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
5225 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
5226 gen_cmovcc1(s
, b
^ 1, s
->T0
, cpu_regs
[reg
]);
5227 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
5230 /************************/
5232 case 0x9c: /* pushf */
5233 gen_svm_check_intercept(s
, SVM_EXIT_PUSHF
);
5234 if (check_vm86_iopl(s
)) {
5235 gen_update_cc_op(s
);
5236 gen_helper_read_eflags(s
->T0
, tcg_env
);
5237 gen_push_v(s
, s
->T0
);
5240 case 0x9d: /* popf */
5241 gen_svm_check_intercept(s
, SVM_EXIT_POPF
);
5242 if (check_vm86_iopl(s
)) {
5243 int mask
= TF_MASK
| AC_MASK
| ID_MASK
| NT_MASK
;
5246 mask
|= IF_MASK
| IOPL_MASK
;
5247 } else if (CPL(s
) <= IOPL(s
)) {
5250 if (dflag
== MO_16
) {
5255 gen_helper_write_eflags(tcg_env
, s
->T0
, tcg_constant_i32(mask
));
5256 gen_pop_update(s
, ot
);
5257 set_cc_op(s
, CC_OP_EFLAGS
);
5258 /* abort translation because TF/AC flag may change */
5259 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
5262 case 0x9e: /* sahf */
5263 if (CODE64(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_LAHF_LM
))
5265 tcg_gen_shri_tl(s
->T0
, cpu_regs
[R_EAX
], 8);
5266 gen_compute_eflags(s
);
5267 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, CC_O
);
5268 tcg_gen_andi_tl(s
->T0
, s
->T0
, CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
5269 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, s
->T0
);
5271 case 0x9f: /* lahf */
5272 if (CODE64(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_LAHF_LM
))
5274 gen_compute_eflags(s
);
5275 /* Note: gen_compute_eflags() only gives the condition codes */
5276 tcg_gen_ori_tl(s
->T0
, cpu_cc_src
, 0x02);
5277 tcg_gen_deposit_tl(cpu_regs
[R_EAX
], cpu_regs
[R_EAX
], s
->T0
, 8, 8);
5279 case 0xf5: /* cmc */
5280 gen_compute_eflags(s
);
5281 tcg_gen_xori_tl(cpu_cc_src
, cpu_cc_src
, CC_C
);
5283 case 0xf8: /* clc */
5284 gen_compute_eflags(s
);
5285 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~CC_C
);
5287 case 0xf9: /* stc */
5288 gen_compute_eflags(s
);
5289 tcg_gen_ori_tl(cpu_cc_src
, cpu_cc_src
, CC_C
);
5291 case 0xfc: /* cld */
5292 tcg_gen_movi_i32(s
->tmp2_i32
, 1);
5293 tcg_gen_st_i32(s
->tmp2_i32
, tcg_env
, offsetof(CPUX86State
, df
));
5295 case 0xfd: /* std */
5296 tcg_gen_movi_i32(s
->tmp2_i32
, -1);
5297 tcg_gen_st_i32(s
->tmp2_i32
, tcg_env
, offsetof(CPUX86State
, df
));
5300 /************************/
5301 /* bit operations */
5302 case 0x1ba: /* bt/bts/btr/btc Gv, im */
5304 modrm
= x86_ldub_code(env
, s
);
5305 op
= (modrm
>> 3) & 7;
5306 mod
= (modrm
>> 6) & 3;
5307 rm
= (modrm
& 7) | REX_B(s
);
5310 gen_lea_modrm(env
, s
, modrm
);
5311 if (!(s
->prefix
& PREFIX_LOCK
)) {
5312 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
5315 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
5318 val
= x86_ldub_code(env
, s
);
5319 tcg_gen_movi_tl(s
->T1
, val
);
5324 case 0x1a3: /* bt Gv, Ev */
5327 case 0x1ab: /* bts */
5330 case 0x1b3: /* btr */
5333 case 0x1bb: /* btc */
5337 modrm
= x86_ldub_code(env
, s
);
5338 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
5339 mod
= (modrm
>> 6) & 3;
5340 rm
= (modrm
& 7) | REX_B(s
);
5341 gen_op_mov_v_reg(s
, MO_32
, s
->T1
, reg
);
5343 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
5344 /* specific case: we need to add a displacement */
5345 gen_exts(ot
, s
->T1
);
5346 tcg_gen_sari_tl(s
->tmp0
, s
->T1
, 3 + ot
);
5347 tcg_gen_shli_tl(s
->tmp0
, s
->tmp0
, ot
);
5348 tcg_gen_add_tl(s
->A0
, gen_lea_modrm_1(s
, a
, false), s
->tmp0
);
5349 gen_lea_v_seg(s
, s
->aflag
, s
->A0
, a
.def_seg
, s
->override
);
5350 if (!(s
->prefix
& PREFIX_LOCK
)) {
5351 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
5354 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
5357 tcg_gen_andi_tl(s
->T1
, s
->T1
, (1 << (3 + ot
)) - 1);
5358 tcg_gen_movi_tl(s
->tmp0
, 1);
5359 tcg_gen_shl_tl(s
->tmp0
, s
->tmp0
, s
->T1
);
5360 if (s
->prefix
& PREFIX_LOCK
) {
5363 /* Needs no atomic ops; we suppressed the normal
5364 memory load for LOCK above so do it now. */
5365 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
5368 tcg_gen_atomic_fetch_or_tl(s
->T0
, s
->A0
, s
->tmp0
,
5369 s
->mem_index
, ot
| MO_LE
);
5372 tcg_gen_not_tl(s
->tmp0
, s
->tmp0
);
5373 tcg_gen_atomic_fetch_and_tl(s
->T0
, s
->A0
, s
->tmp0
,
5374 s
->mem_index
, ot
| MO_LE
);
5378 tcg_gen_atomic_fetch_xor_tl(s
->T0
, s
->A0
, s
->tmp0
,
5379 s
->mem_index
, ot
| MO_LE
);
5382 tcg_gen_shr_tl(s
->tmp4
, s
->T0
, s
->T1
);
5384 tcg_gen_shr_tl(s
->tmp4
, s
->T0
, s
->T1
);
5387 /* Data already loaded; nothing to do. */
5390 tcg_gen_or_tl(s
->T0
, s
->T0
, s
->tmp0
);
5393 tcg_gen_andc_tl(s
->T0
, s
->T0
, s
->tmp0
);
5397 tcg_gen_xor_tl(s
->T0
, s
->T0
, s
->tmp0
);
5402 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
5404 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
5409 /* Delay all CC updates until after the store above. Note that
5410 C is the result of the test, Z is unchanged, and the others
5411 are all undefined. */
5413 case CC_OP_MULB
... CC_OP_MULQ
:
5414 case CC_OP_ADDB
... CC_OP_ADDQ
:
5415 case CC_OP_ADCB
... CC_OP_ADCQ
:
5416 case CC_OP_SUBB
... CC_OP_SUBQ
:
5417 case CC_OP_SBBB
... CC_OP_SBBQ
:
5418 case CC_OP_LOGICB
... CC_OP_LOGICQ
:
5419 case CC_OP_INCB
... CC_OP_INCQ
:
5420 case CC_OP_DECB
... CC_OP_DECQ
:
5421 case CC_OP_SHLB
... CC_OP_SHLQ
:
5422 case CC_OP_SARB
... CC_OP_SARQ
:
5423 case CC_OP_BMILGB
... CC_OP_BMILGQ
:
5424 /* Z was going to be computed from the non-zero status of CC_DST.
5425 We can get that same Z value (and the new C value) by leaving
5426 CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the
5428 tcg_gen_mov_tl(cpu_cc_src
, s
->tmp4
);
5429 set_cc_op(s
, ((s
->cc_op
- CC_OP_MULB
) & 3) + CC_OP_SARB
);
5432 /* Otherwise, generate EFLAGS and replace the C bit. */
5433 gen_compute_eflags(s
);
5434 tcg_gen_deposit_tl(cpu_cc_src
, cpu_cc_src
, s
->tmp4
,
5439 case 0x1bc: /* bsf / tzcnt */
5440 case 0x1bd: /* bsr / lzcnt */
5442 modrm
= x86_ldub_code(env
, s
);
5443 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
5444 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
5445 gen_extu(ot
, s
->T0
);
5447 /* Note that lzcnt and tzcnt are in different extensions. */
5448 if ((prefixes
& PREFIX_REPZ
)
5450 ? s
->cpuid_ext3_features
& CPUID_EXT3_ABM
5451 : s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)) {
5453 /* For lzcnt/tzcnt, C bit is defined related to the input. */
5454 tcg_gen_mov_tl(cpu_cc_src
, s
->T0
);
5456 /* For lzcnt, reduce the target_ulong result by the
5457 number of zeros that we expect to find at the top. */
5458 tcg_gen_clzi_tl(s
->T0
, s
->T0
, TARGET_LONG_BITS
);
5459 tcg_gen_subi_tl(s
->T0
, s
->T0
, TARGET_LONG_BITS
- size
);
5461 /* For tzcnt, a zero input must return the operand size. */
5462 tcg_gen_ctzi_tl(s
->T0
, s
->T0
, size
);
5464 /* For lzcnt/tzcnt, Z bit is defined related to the result. */
5465 gen_op_update1_cc(s
);
5466 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
5468 /* For bsr/bsf, only the Z bit is defined and it is related
5469 to the input and not the result. */
5470 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
5471 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
5473 /* ??? The manual says that the output is undefined when the
5474 input is zero, but real hardware leaves it unchanged, and
5475 real programs appear to depend on that. Accomplish this
5476 by passing the output as the value to return upon zero. */
5478 /* For bsr, return the bit index of the first 1 bit,
5479 not the count of leading zeros. */
5480 tcg_gen_xori_tl(s
->T1
, cpu_regs
[reg
], TARGET_LONG_BITS
- 1);
5481 tcg_gen_clz_tl(s
->T0
, s
->T0
, s
->T1
);
5482 tcg_gen_xori_tl(s
->T0
, s
->T0
, TARGET_LONG_BITS
- 1);
5484 tcg_gen_ctz_tl(s
->T0
, s
->T0
, cpu_regs
[reg
]);
5487 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
5489 /************************/
5491 case 0x27: /* daa */
5494 gen_update_cc_op(s
);
5495 gen_helper_daa(tcg_env
);
5496 set_cc_op(s
, CC_OP_EFLAGS
);
5498 case 0x2f: /* das */
5501 gen_update_cc_op(s
);
5502 gen_helper_das(tcg_env
);
5503 set_cc_op(s
, CC_OP_EFLAGS
);
5505 case 0x37: /* aaa */
5508 gen_update_cc_op(s
);
5509 gen_helper_aaa(tcg_env
);
5510 set_cc_op(s
, CC_OP_EFLAGS
);
5512 case 0x3f: /* aas */
5515 gen_update_cc_op(s
);
5516 gen_helper_aas(tcg_env
);
5517 set_cc_op(s
, CC_OP_EFLAGS
);
5519 case 0xd4: /* aam */
5522 val
= x86_ldub_code(env
, s
);
5524 gen_exception(s
, EXCP00_DIVZ
);
5526 gen_helper_aam(tcg_env
, tcg_constant_i32(val
));
5527 set_cc_op(s
, CC_OP_LOGICB
);
5530 case 0xd5: /* aad */
5533 val
= x86_ldub_code(env
, s
);
5534 gen_helper_aad(tcg_env
, tcg_constant_i32(val
));
5535 set_cc_op(s
, CC_OP_LOGICB
);
5537 /************************/
5539 case 0x90: /* nop */
5540 /* XXX: correct lock test for all insn */
5541 if (prefixes
& PREFIX_LOCK
) {
5544 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
5546 goto do_xchg_reg_eax
;
5548 if (prefixes
& PREFIX_REPZ
) {
5549 gen_update_cc_op(s
);
5550 gen_update_eip_cur(s
);
5551 gen_helper_pause(tcg_env
, cur_insn_len_i32(s
));
5552 s
->base
.is_jmp
= DISAS_NORETURN
;
5555 case 0x9b: /* fwait */
5556 if ((s
->flags
& (HF_MP_MASK
| HF_TS_MASK
)) ==
5557 (HF_MP_MASK
| HF_TS_MASK
)) {
5558 gen_exception(s
, EXCP07_PREX
);
5560 /* needs to be treated as I/O because of ferr_irq */
5561 translator_io_start(&s
->base
);
5562 gen_helper_fwait(tcg_env
);
5565 case 0xcc: /* int3 */
5566 gen_interrupt(s
, EXCP03_INT3
);
5568 case 0xcd: /* int N */
5569 val
= x86_ldub_code(env
, s
);
5570 if (check_vm86_iopl(s
)) {
5571 gen_interrupt(s
, val
);
5574 case 0xce: /* into */
5577 gen_update_cc_op(s
);
5578 gen_update_eip_cur(s
);
5579 gen_helper_into(tcg_env
, cur_insn_len_i32(s
));
5582 case 0xf1: /* icebp (undocumented, exits to external debugger) */
5583 gen_svm_check_intercept(s
, SVM_EXIT_ICEBP
);
5587 case 0xfa: /* cli */
5588 if (check_iopl(s
)) {
5589 gen_reset_eflags(s
, IF_MASK
);
5592 case 0xfb: /* sti */
5593 if (check_iopl(s
)) {
5594 gen_set_eflags(s
, IF_MASK
);
5595 /* interruptions are enabled only the first insn after sti */
5596 gen_update_eip_next(s
);
5597 gen_eob_inhibit_irq(s
, true);
5600 case 0x62: /* bound */
5604 modrm
= x86_ldub_code(env
, s
);
5605 reg
= (modrm
>> 3) & 7;
5606 mod
= (modrm
>> 6) & 3;
5609 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
5610 gen_lea_modrm(env
, s
, modrm
);
5611 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
5613 gen_helper_boundw(tcg_env
, s
->A0
, s
->tmp2_i32
);
5615 gen_helper_boundl(tcg_env
, s
->A0
, s
->tmp2_i32
);
5618 case 0x1c8 ... 0x1cf: /* bswap reg */
5619 reg
= (b
& 7) | REX_B(s
);
5620 #ifdef TARGET_X86_64
5621 if (dflag
== MO_64
) {
5622 tcg_gen_bswap64_i64(cpu_regs
[reg
], cpu_regs
[reg
]);
5626 tcg_gen_bswap32_tl(cpu_regs
[reg
], cpu_regs
[reg
], TCG_BSWAP_OZ
);
5628 case 0xd6: /* salc */
5631 gen_compute_eflags_c(s
, s
->T0
);
5632 tcg_gen_neg_tl(s
->T0
, s
->T0
);
5633 gen_op_mov_reg_v(s
, MO_8
, R_EAX
, s
->T0
);
5635 case 0xe0: /* loopnz */
5636 case 0xe1: /* loopz */
5637 case 0xe2: /* loop */
5638 case 0xe3: /* jecxz */
5641 int diff
= (int8_t)insn_get(env
, s
, MO_8
);
5643 l1
= gen_new_label();
5644 l2
= gen_new_label();
5645 gen_update_cc_op(s
);
5648 case 0: /* loopnz */
5650 gen_op_add_reg_im(s
, s
->aflag
, R_ECX
, -1);
5651 gen_op_jz_ecx(s
, l2
);
5652 gen_jcc1(s
, (JCC_Z
<< 1) | (b
^ 1), l1
);
5655 gen_op_add_reg_im(s
, s
->aflag
, R_ECX
, -1);
5656 gen_op_jnz_ecx(s
, l1
);
5660 gen_op_jz_ecx(s
, l1
);
5665 gen_jmp_rel_csize(s
, 0, 1);
5668 gen_jmp_rel(s
, dflag
, diff
, 0);
5671 case 0x130: /* wrmsr */
5672 case 0x132: /* rdmsr */
5673 if (check_cpl0(s
)) {
5674 gen_update_cc_op(s
);
5675 gen_update_eip_cur(s
);
5677 gen_helper_rdmsr(tcg_env
);
5679 gen_helper_wrmsr(tcg_env
);
5680 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
5684 case 0x131: /* rdtsc */
5685 gen_update_cc_op(s
);
5686 gen_update_eip_cur(s
);
5687 translator_io_start(&s
->base
);
5688 gen_helper_rdtsc(tcg_env
);
5690 case 0x133: /* rdpmc */
5691 gen_update_cc_op(s
);
5692 gen_update_eip_cur(s
);
5693 gen_helper_rdpmc(tcg_env
);
5694 s
->base
.is_jmp
= DISAS_NORETURN
;
5696 case 0x134: /* sysenter */
5697 /* For AMD SYSENTER is not valid in long mode */
5698 if (LMA(s
) && env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
) {
5702 gen_exception_gpf(s
);
5704 gen_helper_sysenter(tcg_env
);
5705 s
->base
.is_jmp
= DISAS_EOB_ONLY
;
5708 case 0x135: /* sysexit */
5709 /* For AMD SYSEXIT is not valid in long mode */
5710 if (LMA(s
) && env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
) {
5713 if (!PE(s
) || CPL(s
) != 0) {
5714 gen_exception_gpf(s
);
5716 gen_helper_sysexit(tcg_env
, tcg_constant_i32(dflag
- 1));
5717 s
->base
.is_jmp
= DISAS_EOB_ONLY
;
5720 case 0x105: /* syscall */
5721 /* For Intel SYSCALL is only valid in long mode */
5722 if (!LMA(s
) && env
->cpuid_vendor1
== CPUID_VENDOR_INTEL_1
) {
5725 gen_update_cc_op(s
);
5726 gen_update_eip_cur(s
);
5727 gen_helper_syscall(tcg_env
, cur_insn_len_i32(s
));
5728 /* TF handling for the syscall insn is different. The TF bit is checked
5729 after the syscall insn completes. This allows #DB to not be
5730 generated after one has entered CPL0 if TF is set in FMASK. */
5731 gen_eob_worker(s
, false, true);
5733 case 0x107: /* sysret */
5734 /* For Intel SYSRET is only valid in long mode */
5735 if (!LMA(s
) && env
->cpuid_vendor1
== CPUID_VENDOR_INTEL_1
) {
5738 if (!PE(s
) || CPL(s
) != 0) {
5739 gen_exception_gpf(s
);
5741 gen_helper_sysret(tcg_env
, tcg_constant_i32(dflag
- 1));
5742 /* condition codes are modified only in long mode */
5744 set_cc_op(s
, CC_OP_EFLAGS
);
5746 /* TF handling for the sysret insn is different. The TF bit is
5747 checked after the sysret insn completes. This allows #DB to be
5748 generated "as if" the syscall insn in userspace has just
5750 gen_eob_worker(s
, false, true);
5753 case 0x1a2: /* cpuid */
5754 gen_update_cc_op(s
);
5755 gen_update_eip_cur(s
);
5756 gen_helper_cpuid(tcg_env
);
5758 case 0xf4: /* hlt */
5759 if (check_cpl0(s
)) {
5760 gen_update_cc_op(s
);
5761 gen_update_eip_cur(s
);
5762 gen_helper_hlt(tcg_env
, cur_insn_len_i32(s
));
5763 s
->base
.is_jmp
= DISAS_NORETURN
;
5767 modrm
= x86_ldub_code(env
, s
);
5768 mod
= (modrm
>> 6) & 3;
5769 op
= (modrm
>> 3) & 7;
5772 if (!PE(s
) || VM86(s
))
5774 if (s
->flags
& HF_UMIP_MASK
&& !check_cpl0(s
)) {
5777 gen_svm_check_intercept(s
, SVM_EXIT_LDTR_READ
);
5778 tcg_gen_ld32u_tl(s
->T0
, tcg_env
,
5779 offsetof(CPUX86State
, ldt
.selector
));
5780 ot
= mod
== 3 ? dflag
: MO_16
;
5781 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
5784 if (!PE(s
) || VM86(s
))
5786 if (check_cpl0(s
)) {
5787 gen_svm_check_intercept(s
, SVM_EXIT_LDTR_WRITE
);
5788 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
5789 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
5790 gen_helper_lldt(tcg_env
, s
->tmp2_i32
);
5794 if (!PE(s
) || VM86(s
))
5796 if (s
->flags
& HF_UMIP_MASK
&& !check_cpl0(s
)) {
5799 gen_svm_check_intercept(s
, SVM_EXIT_TR_READ
);
5800 tcg_gen_ld32u_tl(s
->T0
, tcg_env
,
5801 offsetof(CPUX86State
, tr
.selector
));
5802 ot
= mod
== 3 ? dflag
: MO_16
;
5803 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
5806 if (!PE(s
) || VM86(s
))
5808 if (check_cpl0(s
)) {
5809 gen_svm_check_intercept(s
, SVM_EXIT_TR_WRITE
);
5810 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
5811 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
5812 gen_helper_ltr(tcg_env
, s
->tmp2_i32
);
5817 if (!PE(s
) || VM86(s
))
5819 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
5820 gen_update_cc_op(s
);
5822 gen_helper_verr(tcg_env
, s
->T0
);
5824 gen_helper_verw(tcg_env
, s
->T0
);
5826 set_cc_op(s
, CC_OP_EFLAGS
);
5834 modrm
= x86_ldub_code(env
, s
);
5836 CASE_MODRM_MEM_OP(0): /* sgdt */
5837 if (s
->flags
& HF_UMIP_MASK
&& !check_cpl0(s
)) {
5840 gen_svm_check_intercept(s
, SVM_EXIT_GDTR_READ
);
5841 gen_lea_modrm(env
, s
, modrm
);
5842 tcg_gen_ld32u_tl(s
->T0
,
5843 tcg_env
, offsetof(CPUX86State
, gdt
.limit
));
5844 gen_op_st_v(s
, MO_16
, s
->T0
, s
->A0
);
5845 gen_add_A0_im(s
, 2);
5846 tcg_gen_ld_tl(s
->T0
, tcg_env
, offsetof(CPUX86State
, gdt
.base
));
5847 if (dflag
== MO_16
) {
5848 tcg_gen_andi_tl(s
->T0
, s
->T0
, 0xffffff);
5850 gen_op_st_v(s
, CODE64(s
) + MO_32
, s
->T0
, s
->A0
);
5853 case 0xc8: /* monitor */
5854 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) || CPL(s
) != 0) {
5857 gen_update_cc_op(s
);
5858 gen_update_eip_cur(s
);
5859 tcg_gen_mov_tl(s
->A0
, cpu_regs
[R_EAX
]);
5860 gen_add_A0_ds_seg(s
);
5861 gen_helper_monitor(tcg_env
, s
->A0
);
5864 case 0xc9: /* mwait */
5865 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) || CPL(s
) != 0) {
5868 gen_update_cc_op(s
);
5869 gen_update_eip_cur(s
);
5870 gen_helper_mwait(tcg_env
, cur_insn_len_i32(s
));
5871 s
->base
.is_jmp
= DISAS_NORETURN
;
5874 case 0xca: /* clac */
5875 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_SMAP
)
5879 gen_reset_eflags(s
, AC_MASK
);
5880 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
5883 case 0xcb: /* stac */
5884 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_SMAP
)
5888 gen_set_eflags(s
, AC_MASK
);
5889 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
5892 CASE_MODRM_MEM_OP(1): /* sidt */
5893 if (s
->flags
& HF_UMIP_MASK
&& !check_cpl0(s
)) {
5896 gen_svm_check_intercept(s
, SVM_EXIT_IDTR_READ
);
5897 gen_lea_modrm(env
, s
, modrm
);
5898 tcg_gen_ld32u_tl(s
->T0
, tcg_env
, offsetof(CPUX86State
, idt
.limit
));
5899 gen_op_st_v(s
, MO_16
, s
->T0
, s
->A0
);
5900 gen_add_A0_im(s
, 2);
5901 tcg_gen_ld_tl(s
->T0
, tcg_env
, offsetof(CPUX86State
, idt
.base
));
5902 if (dflag
== MO_16
) {
5903 tcg_gen_andi_tl(s
->T0
, s
->T0
, 0xffffff);
5905 gen_op_st_v(s
, CODE64(s
) + MO_32
, s
->T0
, s
->A0
);
5908 case 0xd0: /* xgetbv */
5909 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
5910 || (s
->prefix
& (PREFIX_LOCK
| PREFIX_DATA
5911 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
5914 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_ECX
]);
5915 gen_helper_xgetbv(s
->tmp1_i64
, tcg_env
, s
->tmp2_i32
);
5916 tcg_gen_extr_i64_tl(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
], s
->tmp1_i64
);
5919 case 0xd1: /* xsetbv */
5920 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
5921 || (s
->prefix
& (PREFIX_LOCK
| PREFIX_DATA
5922 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
5925 gen_svm_check_intercept(s
, SVM_EXIT_XSETBV
);
5926 if (!check_cpl0(s
)) {
5929 tcg_gen_concat_tl_i64(s
->tmp1_i64
, cpu_regs
[R_EAX
],
5931 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_ECX
]);
5932 gen_helper_xsetbv(tcg_env
, s
->tmp2_i32
, s
->tmp1_i64
);
5933 /* End TB because translation flags may change. */
5934 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
5937 case 0xd8: /* VMRUN */
5938 if (!SVME(s
) || !PE(s
)) {
5941 if (!check_cpl0(s
)) {
5944 gen_update_cc_op(s
);
5945 gen_update_eip_cur(s
);
5946 gen_helper_vmrun(tcg_env
, tcg_constant_i32(s
->aflag
- 1),
5947 cur_insn_len_i32(s
));
5948 tcg_gen_exit_tb(NULL
, 0);
5949 s
->base
.is_jmp
= DISAS_NORETURN
;
5952 case 0xd9: /* VMMCALL */
5956 gen_update_cc_op(s
);
5957 gen_update_eip_cur(s
);
5958 gen_helper_vmmcall(tcg_env
);
5961 case 0xda: /* VMLOAD */
5962 if (!SVME(s
) || !PE(s
)) {
5965 if (!check_cpl0(s
)) {
5968 gen_update_cc_op(s
);
5969 gen_update_eip_cur(s
);
5970 gen_helper_vmload(tcg_env
, tcg_constant_i32(s
->aflag
- 1));
5973 case 0xdb: /* VMSAVE */
5974 if (!SVME(s
) || !PE(s
)) {
5977 if (!check_cpl0(s
)) {
5980 gen_update_cc_op(s
);
5981 gen_update_eip_cur(s
);
5982 gen_helper_vmsave(tcg_env
, tcg_constant_i32(s
->aflag
- 1));
5985 case 0xdc: /* STGI */
5986 if ((!SVME(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
))
5990 if (!check_cpl0(s
)) {
5993 gen_update_cc_op(s
);
5994 gen_helper_stgi(tcg_env
);
5995 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
5998 case 0xdd: /* CLGI */
5999 if (!SVME(s
) || !PE(s
)) {
6002 if (!check_cpl0(s
)) {
6005 gen_update_cc_op(s
);
6006 gen_update_eip_cur(s
);
6007 gen_helper_clgi(tcg_env
);
6010 case 0xde: /* SKINIT */
6011 if ((!SVME(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
))
6015 gen_svm_check_intercept(s
, SVM_EXIT_SKINIT
);
6016 /* If not intercepted, not implemented -- raise #UD. */
6019 case 0xdf: /* INVLPGA */
6020 if (!SVME(s
) || !PE(s
)) {
6023 if (!check_cpl0(s
)) {
6026 gen_svm_check_intercept(s
, SVM_EXIT_INVLPGA
);
6027 if (s
->aflag
== MO_64
) {
6028 tcg_gen_mov_tl(s
->A0
, cpu_regs
[R_EAX
]);
6030 tcg_gen_ext32u_tl(s
->A0
, cpu_regs
[R_EAX
]);
6032 gen_helper_flush_page(tcg_env
, s
->A0
);
6033 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
6036 CASE_MODRM_MEM_OP(2): /* lgdt */
6037 if (!check_cpl0(s
)) {
6040 gen_svm_check_intercept(s
, SVM_EXIT_GDTR_WRITE
);
6041 gen_lea_modrm(env
, s
, modrm
);
6042 gen_op_ld_v(s
, MO_16
, s
->T1
, s
->A0
);
6043 gen_add_A0_im(s
, 2);
6044 gen_op_ld_v(s
, CODE64(s
) + MO_32
, s
->T0
, s
->A0
);
6045 if (dflag
== MO_16
) {
6046 tcg_gen_andi_tl(s
->T0
, s
->T0
, 0xffffff);
6048 tcg_gen_st_tl(s
->T0
, tcg_env
, offsetof(CPUX86State
, gdt
.base
));
6049 tcg_gen_st32_tl(s
->T1
, tcg_env
, offsetof(CPUX86State
, gdt
.limit
));
6052 CASE_MODRM_MEM_OP(3): /* lidt */
6053 if (!check_cpl0(s
)) {
6056 gen_svm_check_intercept(s
, SVM_EXIT_IDTR_WRITE
);
6057 gen_lea_modrm(env
, s
, modrm
);
6058 gen_op_ld_v(s
, MO_16
, s
->T1
, s
->A0
);
6059 gen_add_A0_im(s
, 2);
6060 gen_op_ld_v(s
, CODE64(s
) + MO_32
, s
->T0
, s
->A0
);
6061 if (dflag
== MO_16
) {
6062 tcg_gen_andi_tl(s
->T0
, s
->T0
, 0xffffff);
6064 tcg_gen_st_tl(s
->T0
, tcg_env
, offsetof(CPUX86State
, idt
.base
));
6065 tcg_gen_st32_tl(s
->T1
, tcg_env
, offsetof(CPUX86State
, idt
.limit
));
6068 CASE_MODRM_OP(4): /* smsw */
6069 if (s
->flags
& HF_UMIP_MASK
&& !check_cpl0(s
)) {
6072 gen_svm_check_intercept(s
, SVM_EXIT_READ_CR0
);
6073 tcg_gen_ld_tl(s
->T0
, tcg_env
, offsetof(CPUX86State
, cr
[0]));
6075 * In 32-bit mode, the higher 16 bits of the destination
6076 * register are undefined. In practice CR0[31:0] is stored
6077 * just like in 64-bit mode.
6079 mod
= (modrm
>> 6) & 3;
6080 ot
= (mod
!= 3 ? MO_16
: s
->dflag
);
6081 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
6083 case 0xee: /* rdpkru */
6084 if (prefixes
& PREFIX_LOCK
) {
6087 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_ECX
]);
6088 gen_helper_rdpkru(s
->tmp1_i64
, tcg_env
, s
->tmp2_i32
);
6089 tcg_gen_extr_i64_tl(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
], s
->tmp1_i64
);
6091 case 0xef: /* wrpkru */
6092 if (prefixes
& PREFIX_LOCK
) {
6095 tcg_gen_concat_tl_i64(s
->tmp1_i64
, cpu_regs
[R_EAX
],
6097 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_ECX
]);
6098 gen_helper_wrpkru(tcg_env
, s
->tmp2_i32
, s
->tmp1_i64
);
6101 CASE_MODRM_OP(6): /* lmsw */
6102 if (!check_cpl0(s
)) {
6105 gen_svm_check_intercept(s
, SVM_EXIT_WRITE_CR0
);
6106 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
6108 * Only the 4 lower bits of CR0 are modified.
6109 * PE cannot be set to zero if already set to one.
6111 tcg_gen_ld_tl(s
->T1
, tcg_env
, offsetof(CPUX86State
, cr
[0]));
6112 tcg_gen_andi_tl(s
->T0
, s
->T0
, 0xf);
6113 tcg_gen_andi_tl(s
->T1
, s
->T1
, ~0xe);
6114 tcg_gen_or_tl(s
->T0
, s
->T0
, s
->T1
);
6115 gen_helper_write_crN(tcg_env
, tcg_constant_i32(0), s
->T0
);
6116 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
6119 CASE_MODRM_MEM_OP(7): /* invlpg */
6120 if (!check_cpl0(s
)) {
6123 gen_svm_check_intercept(s
, SVM_EXIT_INVLPG
);
6124 gen_lea_modrm(env
, s
, modrm
);
6125 gen_helper_flush_page(tcg_env
, s
->A0
);
6126 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
6129 case 0xf8: /* swapgs */
6130 #ifdef TARGET_X86_64
6132 if (check_cpl0(s
)) {
6133 tcg_gen_mov_tl(s
->T0
, cpu_seg_base
[R_GS
]);
6134 tcg_gen_ld_tl(cpu_seg_base
[R_GS
], tcg_env
,
6135 offsetof(CPUX86State
, kernelgsbase
));
6136 tcg_gen_st_tl(s
->T0
, tcg_env
,
6137 offsetof(CPUX86State
, kernelgsbase
));
6144 case 0xf9: /* rdtscp */
6145 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_RDTSCP
)) {
6148 gen_update_cc_op(s
);
6149 gen_update_eip_cur(s
);
6150 translator_io_start(&s
->base
);
6151 gen_helper_rdtsc(tcg_env
);
6152 gen_helper_rdpid(s
->T0
, tcg_env
);
6153 gen_op_mov_reg_v(s
, dflag
, R_ECX
, s
->T0
);
6161 case 0x108: /* invd */
6162 case 0x109: /* wbinvd; wbnoinvd with REPZ prefix */
6163 if (check_cpl0(s
)) {
6164 gen_svm_check_intercept(s
, (b
& 1) ? SVM_EXIT_WBINVD
: SVM_EXIT_INVD
);
6168 case 0x63: /* arpl or movslS (x86_64) */
6169 #ifdef TARGET_X86_64
6172 /* d_ot is the size of destination */
6175 modrm
= x86_ldub_code(env
, s
);
6176 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6177 mod
= (modrm
>> 6) & 3;
6178 rm
= (modrm
& 7) | REX_B(s
);
6181 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, rm
);
6183 if (d_ot
== MO_64
) {
6184 tcg_gen_ext32s_tl(s
->T0
, s
->T0
);
6186 gen_op_mov_reg_v(s
, d_ot
, reg
, s
->T0
);
6188 gen_lea_modrm(env
, s
, modrm
);
6189 gen_op_ld_v(s
, MO_32
| MO_SIGN
, s
->T0
, s
->A0
);
6190 gen_op_mov_reg_v(s
, d_ot
, reg
, s
->T0
);
6198 if (!PE(s
) || VM86(s
))
6200 t0
= tcg_temp_new();
6201 t1
= tcg_temp_new();
6202 t2
= tcg_temp_new();
6204 modrm
= x86_ldub_code(env
, s
);
6205 reg
= (modrm
>> 3) & 7;
6206 mod
= (modrm
>> 6) & 3;
6209 gen_lea_modrm(env
, s
, modrm
);
6210 gen_op_ld_v(s
, ot
, t0
, s
->A0
);
6212 gen_op_mov_v_reg(s
, ot
, t0
, rm
);
6214 gen_op_mov_v_reg(s
, ot
, t1
, reg
);
6215 tcg_gen_andi_tl(s
->tmp0
, t0
, 3);
6216 tcg_gen_andi_tl(t1
, t1
, 3);
6217 tcg_gen_movi_tl(t2
, 0);
6218 label1
= gen_new_label();
6219 tcg_gen_brcond_tl(TCG_COND_GE
, s
->tmp0
, t1
, label1
);
6220 tcg_gen_andi_tl(t0
, t0
, ~3);
6221 tcg_gen_or_tl(t0
, t0
, t1
);
6222 tcg_gen_movi_tl(t2
, CC_Z
);
6223 gen_set_label(label1
);
6225 gen_op_st_v(s
, ot
, t0
, s
->A0
);
6227 gen_op_mov_reg_v(s
, ot
, rm
, t0
);
6229 gen_compute_eflags(s
);
6230 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~CC_Z
);
6231 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t2
);
6234 case 0x102: /* lar */
6235 case 0x103: /* lsl */
6239 if (!PE(s
) || VM86(s
))
6241 ot
= dflag
!= MO_16
? MO_32
: MO_16
;
6242 modrm
= x86_ldub_code(env
, s
);
6243 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6244 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
6245 t0
= tcg_temp_new();
6246 gen_update_cc_op(s
);
6248 gen_helper_lar(t0
, tcg_env
, s
->T0
);
6250 gen_helper_lsl(t0
, tcg_env
, s
->T0
);
6252 tcg_gen_andi_tl(s
->tmp0
, cpu_cc_src
, CC_Z
);
6253 label1
= gen_new_label();
6254 tcg_gen_brcondi_tl(TCG_COND_EQ
, s
->tmp0
, 0, label1
);
6255 gen_op_mov_reg_v(s
, ot
, reg
, t0
);
6256 gen_set_label(label1
);
6257 set_cc_op(s
, CC_OP_EFLAGS
);
6261 modrm
= x86_ldub_code(env
, s
);
6262 mod
= (modrm
>> 6) & 3;
6263 op
= (modrm
>> 3) & 7;
6265 case 0: /* prefetchnta */
6266 case 1: /* prefetchnt0 */
6267 case 2: /* prefetchnt0 */
6268 case 3: /* prefetchnt0 */
6271 gen_nop_modrm(env
, s
, modrm
);
6272 /* nothing more to do */
6274 default: /* nop (multi byte) */
6275 gen_nop_modrm(env
, s
, modrm
);
6280 modrm
= x86_ldub_code(env
, s
);
6281 if (s
->flags
& HF_MPX_EN_MASK
) {
6282 mod
= (modrm
>> 6) & 3;
6283 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6284 if (prefixes
& PREFIX_REPZ
) {
6287 || (prefixes
& PREFIX_LOCK
)
6288 || s
->aflag
== MO_16
) {
6291 gen_bndck(env
, s
, modrm
, TCG_COND_LTU
, cpu_bndl
[reg
]);
6292 } else if (prefixes
& PREFIX_REPNZ
) {
6295 || (prefixes
& PREFIX_LOCK
)
6296 || s
->aflag
== MO_16
) {
6299 TCGv_i64 notu
= tcg_temp_new_i64();
6300 tcg_gen_not_i64(notu
, cpu_bndu
[reg
]);
6301 gen_bndck(env
, s
, modrm
, TCG_COND_GTU
, notu
);
6302 } else if (prefixes
& PREFIX_DATA
) {
6303 /* bndmov -- from reg/mem */
6304 if (reg
>= 4 || s
->aflag
== MO_16
) {
6308 int reg2
= (modrm
& 7) | REX_B(s
);
6309 if (reg2
>= 4 || (prefixes
& PREFIX_LOCK
)) {
6312 if (s
->flags
& HF_MPX_IU_MASK
) {
6313 tcg_gen_mov_i64(cpu_bndl
[reg
], cpu_bndl
[reg2
]);
6314 tcg_gen_mov_i64(cpu_bndu
[reg
], cpu_bndu
[reg2
]);
6317 gen_lea_modrm(env
, s
, modrm
);
6319 tcg_gen_qemu_ld_i64(cpu_bndl
[reg
], s
->A0
,
6320 s
->mem_index
, MO_LEUQ
);
6321 tcg_gen_addi_tl(s
->A0
, s
->A0
, 8);
6322 tcg_gen_qemu_ld_i64(cpu_bndu
[reg
], s
->A0
,
6323 s
->mem_index
, MO_LEUQ
);
6325 tcg_gen_qemu_ld_i64(cpu_bndl
[reg
], s
->A0
,
6326 s
->mem_index
, MO_LEUL
);
6327 tcg_gen_addi_tl(s
->A0
, s
->A0
, 4);
6328 tcg_gen_qemu_ld_i64(cpu_bndu
[reg
], s
->A0
,
6329 s
->mem_index
, MO_LEUL
);
6331 /* bnd registers are now in-use */
6332 gen_set_hflag(s
, HF_MPX_IU_MASK
);
6334 } else if (mod
!= 3) {
6336 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
6338 || (prefixes
& PREFIX_LOCK
)
6339 || s
->aflag
== MO_16
6344 tcg_gen_addi_tl(s
->A0
, cpu_regs
[a
.base
], a
.disp
);
6346 tcg_gen_movi_tl(s
->A0
, 0);
6348 gen_lea_v_seg(s
, s
->aflag
, s
->A0
, a
.def_seg
, s
->override
);
6350 tcg_gen_mov_tl(s
->T0
, cpu_regs
[a
.index
]);
6352 tcg_gen_movi_tl(s
->T0
, 0);
6355 gen_helper_bndldx64(cpu_bndl
[reg
], tcg_env
, s
->A0
, s
->T0
);
6356 tcg_gen_ld_i64(cpu_bndu
[reg
], tcg_env
,
6357 offsetof(CPUX86State
, mmx_t0
.MMX_Q(0)));
6359 gen_helper_bndldx32(cpu_bndu
[reg
], tcg_env
, s
->A0
, s
->T0
);
6360 tcg_gen_ext32u_i64(cpu_bndl
[reg
], cpu_bndu
[reg
]);
6361 tcg_gen_shri_i64(cpu_bndu
[reg
], cpu_bndu
[reg
], 32);
6363 gen_set_hflag(s
, HF_MPX_IU_MASK
);
6366 gen_nop_modrm(env
, s
, modrm
);
6369 modrm
= x86_ldub_code(env
, s
);
6370 if (s
->flags
& HF_MPX_EN_MASK
) {
6371 mod
= (modrm
>> 6) & 3;
6372 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6373 if (mod
!= 3 && (prefixes
& PREFIX_REPZ
)) {
6376 || (prefixes
& PREFIX_LOCK
)
6377 || s
->aflag
== MO_16
) {
6380 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
6382 tcg_gen_extu_tl_i64(cpu_bndl
[reg
], cpu_regs
[a
.base
]);
6384 tcg_gen_ext32u_i64(cpu_bndl
[reg
], cpu_bndl
[reg
]);
6386 } else if (a
.base
== -1) {
6387 /* no base register has lower bound of 0 */
6388 tcg_gen_movi_i64(cpu_bndl
[reg
], 0);
6390 /* rip-relative generates #ud */
6393 tcg_gen_not_tl(s
->A0
, gen_lea_modrm_1(s
, a
, false));
6395 tcg_gen_ext32u_tl(s
->A0
, s
->A0
);
6397 tcg_gen_extu_tl_i64(cpu_bndu
[reg
], s
->A0
);
6398 /* bnd registers are now in-use */
6399 gen_set_hflag(s
, HF_MPX_IU_MASK
);
6401 } else if (prefixes
& PREFIX_REPNZ
) {
6404 || (prefixes
& PREFIX_LOCK
)
6405 || s
->aflag
== MO_16
) {
6408 gen_bndck(env
, s
, modrm
, TCG_COND_GTU
, cpu_bndu
[reg
]);
6409 } else if (prefixes
& PREFIX_DATA
) {
6410 /* bndmov -- to reg/mem */
6411 if (reg
>= 4 || s
->aflag
== MO_16
) {
6415 int reg2
= (modrm
& 7) | REX_B(s
);
6416 if (reg2
>= 4 || (prefixes
& PREFIX_LOCK
)) {
6419 if (s
->flags
& HF_MPX_IU_MASK
) {
6420 tcg_gen_mov_i64(cpu_bndl
[reg2
], cpu_bndl
[reg
]);
6421 tcg_gen_mov_i64(cpu_bndu
[reg2
], cpu_bndu
[reg
]);
6424 gen_lea_modrm(env
, s
, modrm
);
6426 tcg_gen_qemu_st_i64(cpu_bndl
[reg
], s
->A0
,
6427 s
->mem_index
, MO_LEUQ
);
6428 tcg_gen_addi_tl(s
->A0
, s
->A0
, 8);
6429 tcg_gen_qemu_st_i64(cpu_bndu
[reg
], s
->A0
,
6430 s
->mem_index
, MO_LEUQ
);
6432 tcg_gen_qemu_st_i64(cpu_bndl
[reg
], s
->A0
,
6433 s
->mem_index
, MO_LEUL
);
6434 tcg_gen_addi_tl(s
->A0
, s
->A0
, 4);
6435 tcg_gen_qemu_st_i64(cpu_bndu
[reg
], s
->A0
,
6436 s
->mem_index
, MO_LEUL
);
6439 } else if (mod
!= 3) {
6441 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
6443 || (prefixes
& PREFIX_LOCK
)
6444 || s
->aflag
== MO_16
6449 tcg_gen_addi_tl(s
->A0
, cpu_regs
[a
.base
], a
.disp
);
6451 tcg_gen_movi_tl(s
->A0
, 0);
6453 gen_lea_v_seg(s
, s
->aflag
, s
->A0
, a
.def_seg
, s
->override
);
6455 tcg_gen_mov_tl(s
->T0
, cpu_regs
[a
.index
]);
6457 tcg_gen_movi_tl(s
->T0
, 0);
6460 gen_helper_bndstx64(tcg_env
, s
->A0
, s
->T0
,
6461 cpu_bndl
[reg
], cpu_bndu
[reg
]);
6463 gen_helper_bndstx32(tcg_env
, s
->A0
, s
->T0
,
6464 cpu_bndl
[reg
], cpu_bndu
[reg
]);
6468 gen_nop_modrm(env
, s
, modrm
);
6470 case 0x119: case 0x11c ... 0x11f: /* nop (multi byte) */
6471 modrm
= x86_ldub_code(env
, s
);
6472 gen_nop_modrm(env
, s
, modrm
);
6475 case 0x120: /* mov reg, crN */
6476 case 0x122: /* mov crN, reg */
6477 if (!check_cpl0(s
)) {
6480 modrm
= x86_ldub_code(env
, s
);
6482 * Ignore the mod bits (assume (modrm&0xc0)==0xc0).
6483 * AMD documentation (24594.pdf) and testing of Intel 386 and 486
6484 * processors all show that the mod bits are assumed to be 1's,
6485 * regardless of actual values.
6487 rm
= (modrm
& 7) | REX_B(s
);
6488 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6491 if ((prefixes
& PREFIX_LOCK
) &&
6492 (s
->cpuid_ext3_features
& CPUID_EXT3_CR8LEG
)) {
6504 ot
= (CODE64(s
) ? MO_64
: MO_32
);
6506 translator_io_start(&s
->base
);
6508 gen_svm_check_intercept(s
, SVM_EXIT_WRITE_CR0
+ reg
);
6509 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
6510 gen_helper_write_crN(tcg_env
, tcg_constant_i32(reg
), s
->T0
);
6511 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
6513 gen_svm_check_intercept(s
, SVM_EXIT_READ_CR0
+ reg
);
6514 gen_helper_read_crN(s
->T0
, tcg_env
, tcg_constant_i32(reg
));
6515 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
6519 case 0x121: /* mov reg, drN */
6520 case 0x123: /* mov drN, reg */
6521 if (check_cpl0(s
)) {
6522 modrm
= x86_ldub_code(env
, s
);
6523 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
6524 * AMD documentation (24594.pdf) and testing of
6525 * intel 386 and 486 processors all show that the mod bits
6526 * are assumed to be 1's, regardless of actual values.
6528 rm
= (modrm
& 7) | REX_B(s
);
6529 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6538 gen_svm_check_intercept(s
, SVM_EXIT_WRITE_DR0
+ reg
);
6539 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
6540 tcg_gen_movi_i32(s
->tmp2_i32
, reg
);
6541 gen_helper_set_dr(tcg_env
, s
->tmp2_i32
, s
->T0
);
6542 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
6544 gen_svm_check_intercept(s
, SVM_EXIT_READ_DR0
+ reg
);
6545 tcg_gen_movi_i32(s
->tmp2_i32
, reg
);
6546 gen_helper_get_dr(s
->T0
, tcg_env
, s
->tmp2_i32
);
6547 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
6551 case 0x106: /* clts */
6552 if (check_cpl0(s
)) {
6553 gen_svm_check_intercept(s
, SVM_EXIT_WRITE_CR0
);
6554 gen_helper_clts(tcg_env
);
6555 /* abort block because static cpu state changed */
6556 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
6559 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
6560 case 0x1c3: /* MOVNTI reg, mem */
6561 if (!(s
->cpuid_features
& CPUID_SSE2
))
6563 ot
= mo_64_32(dflag
);
6564 modrm
= x86_ldub_code(env
, s
);
6565 mod
= (modrm
>> 6) & 3;
6568 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6569 /* generate a generic store */
6570 gen_ldst_modrm(env
, s
, modrm
, ot
, reg
, 1);
6573 modrm
= x86_ldub_code(env
, s
);
6575 CASE_MODRM_MEM_OP(0): /* fxsave */
6576 if (!(s
->cpuid_features
& CPUID_FXSR
)
6577 || (prefixes
& PREFIX_LOCK
)) {
6580 if ((s
->flags
& HF_EM_MASK
) || (s
->flags
& HF_TS_MASK
)) {
6581 gen_exception(s
, EXCP07_PREX
);
6584 gen_lea_modrm(env
, s
, modrm
);
6585 gen_helper_fxsave(tcg_env
, s
->A0
);
6588 CASE_MODRM_MEM_OP(1): /* fxrstor */
6589 if (!(s
->cpuid_features
& CPUID_FXSR
)
6590 || (prefixes
& PREFIX_LOCK
)) {
6593 if ((s
->flags
& HF_EM_MASK
) || (s
->flags
& HF_TS_MASK
)) {
6594 gen_exception(s
, EXCP07_PREX
);
6597 gen_lea_modrm(env
, s
, modrm
);
6598 gen_helper_fxrstor(tcg_env
, s
->A0
);
6601 CASE_MODRM_MEM_OP(2): /* ldmxcsr */
6602 if ((s
->flags
& HF_EM_MASK
) || !(s
->flags
& HF_OSFXSR_MASK
)) {
6605 if (s
->flags
& HF_TS_MASK
) {
6606 gen_exception(s
, EXCP07_PREX
);
6609 gen_lea_modrm(env
, s
, modrm
);
6610 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
, s
->mem_index
, MO_LEUL
);
6611 gen_helper_ldmxcsr(tcg_env
, s
->tmp2_i32
);
6614 CASE_MODRM_MEM_OP(3): /* stmxcsr */
6615 if ((s
->flags
& HF_EM_MASK
) || !(s
->flags
& HF_OSFXSR_MASK
)) {
6618 if (s
->flags
& HF_TS_MASK
) {
6619 gen_exception(s
, EXCP07_PREX
);
6622 gen_helper_update_mxcsr(tcg_env
);
6623 gen_lea_modrm(env
, s
, modrm
);
6624 tcg_gen_ld32u_tl(s
->T0
, tcg_env
, offsetof(CPUX86State
, mxcsr
));
6625 gen_op_st_v(s
, MO_32
, s
->T0
, s
->A0
);
6628 CASE_MODRM_MEM_OP(4): /* xsave */
6629 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
6630 || (prefixes
& (PREFIX_LOCK
| PREFIX_DATA
6631 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
6634 gen_lea_modrm(env
, s
, modrm
);
6635 tcg_gen_concat_tl_i64(s
->tmp1_i64
, cpu_regs
[R_EAX
],
6637 gen_helper_xsave(tcg_env
, s
->A0
, s
->tmp1_i64
);
6640 CASE_MODRM_MEM_OP(5): /* xrstor */
6641 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
6642 || (prefixes
& (PREFIX_LOCK
| PREFIX_DATA
6643 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
6646 gen_lea_modrm(env
, s
, modrm
);
6647 tcg_gen_concat_tl_i64(s
->tmp1_i64
, cpu_regs
[R_EAX
],
6649 gen_helper_xrstor(tcg_env
, s
->A0
, s
->tmp1_i64
);
6650 /* XRSTOR is how MPX is enabled, which changes how
6651 we translate. Thus we need to end the TB. */
6652 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
6655 CASE_MODRM_MEM_OP(6): /* xsaveopt / clwb */
6656 if (prefixes
& PREFIX_LOCK
) {
6659 if (prefixes
& PREFIX_DATA
) {
6661 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_CLWB
)) {
6664 gen_nop_modrm(env
, s
, modrm
);
6667 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
6668 || (s
->cpuid_xsave_features
& CPUID_XSAVE_XSAVEOPT
) == 0
6669 || (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
))) {
6672 gen_lea_modrm(env
, s
, modrm
);
6673 tcg_gen_concat_tl_i64(s
->tmp1_i64
, cpu_regs
[R_EAX
],
6675 gen_helper_xsaveopt(tcg_env
, s
->A0
, s
->tmp1_i64
);
6679 CASE_MODRM_MEM_OP(7): /* clflush / clflushopt */
6680 if (prefixes
& PREFIX_LOCK
) {
6683 if (prefixes
& PREFIX_DATA
) {
6685 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_CLFLUSHOPT
)) {
6690 if ((s
->prefix
& (PREFIX_REPZ
| PREFIX_REPNZ
))
6691 || !(s
->cpuid_features
& CPUID_CLFLUSH
)) {
6695 gen_nop_modrm(env
, s
, modrm
);
6698 case 0xc0 ... 0xc7: /* rdfsbase (f3 0f ae /0) */
6699 case 0xc8 ... 0xcf: /* rdgsbase (f3 0f ae /1) */
6700 case 0xd0 ... 0xd7: /* wrfsbase (f3 0f ae /2) */
6701 case 0xd8 ... 0xdf: /* wrgsbase (f3 0f ae /3) */
6703 && (prefixes
& PREFIX_REPZ
)
6704 && !(prefixes
& PREFIX_LOCK
)
6705 && (s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_FSGSBASE
)) {
6706 TCGv base
, treg
, src
, dst
;
6708 /* Preserve hflags bits by testing CR4 at runtime. */
6709 tcg_gen_movi_i32(s
->tmp2_i32
, CR4_FSGSBASE_MASK
);
6710 gen_helper_cr4_testbit(tcg_env
, s
->tmp2_i32
);
6712 base
= cpu_seg_base
[modrm
& 8 ? R_GS
: R_FS
];
6713 treg
= cpu_regs
[(modrm
& 7) | REX_B(s
)];
6717 dst
= base
, src
= treg
;
6720 dst
= treg
, src
= base
;
6723 if (s
->dflag
== MO_32
) {
6724 tcg_gen_ext32u_tl(dst
, src
);
6726 tcg_gen_mov_tl(dst
, src
);
6732 case 0xf8: /* sfence / pcommit */
6733 if (prefixes
& PREFIX_DATA
) {
6735 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_PCOMMIT
)
6736 || (prefixes
& PREFIX_LOCK
)) {
6742 case 0xf9 ... 0xff: /* sfence */
6743 if (!(s
->cpuid_features
& CPUID_SSE
)
6744 || (prefixes
& PREFIX_LOCK
)) {
6747 tcg_gen_mb(TCG_MO_ST_ST
| TCG_BAR_SC
);
6749 case 0xe8 ... 0xef: /* lfence */
6750 if (!(s
->cpuid_features
& CPUID_SSE
)
6751 || (prefixes
& PREFIX_LOCK
)) {
6754 tcg_gen_mb(TCG_MO_LD_LD
| TCG_BAR_SC
);
6756 case 0xf0 ... 0xf7: /* mfence */
6757 if (!(s
->cpuid_features
& CPUID_SSE2
)
6758 || (prefixes
& PREFIX_LOCK
)) {
6761 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
6769 case 0x10d: /* 3DNow! prefetch(w) */
6770 modrm
= x86_ldub_code(env
, s
);
6771 mod
= (modrm
>> 6) & 3;
6774 gen_nop_modrm(env
, s
, modrm
);
6776 case 0x1aa: /* rsm */
6777 gen_svm_check_intercept(s
, SVM_EXIT_RSM
);
6778 if (!(s
->flags
& HF_SMM_MASK
))
6780 #ifdef CONFIG_USER_ONLY
6781 /* we should not be in SMM mode */
6782 g_assert_not_reached();
6784 gen_update_cc_op(s
);
6785 gen_update_eip_next(s
);
6786 gen_helper_rsm(tcg_env
);
6787 #endif /* CONFIG_USER_ONLY */
6788 s
->base
.is_jmp
= DISAS_EOB_ONLY
;
6790 case 0x1b8: /* SSE4.2 popcnt */
6791 if ((prefixes
& (PREFIX_REPZ
| PREFIX_LOCK
| PREFIX_REPNZ
)) !=
6794 if (!(s
->cpuid_ext_features
& CPUID_EXT_POPCNT
))
6797 modrm
= x86_ldub_code(env
, s
);
6798 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6800 if (s
->prefix
& PREFIX_DATA
) {
6803 ot
= mo_64_32(dflag
);
6806 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
6807 gen_extu(ot
, s
->T0
);
6808 tcg_gen_mov_tl(cpu_cc_src
, s
->T0
);
6809 tcg_gen_ctpop_tl(s
->T0
, s
->T0
);
6810 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
6812 set_cc_op(s
, CC_OP_POPCNT
);
6814 case 0x10e ... 0x117:
6815 case 0x128 ... 0x12f:
6816 case 0x138 ... 0x13a:
6817 case 0x150 ... 0x179:
6818 case 0x17c ... 0x17f:
6820 case 0x1c4 ... 0x1c6:
6821 case 0x1d0 ... 0x1fe:
6822 disas_insn_new(s
, cpu
, b
);
6829 gen_illegal_opcode(s
);
6832 gen_unknown_opcode(env
, s
);
6836 void tcg_x86_init(void)
6838 static const char reg_names
[CPU_NB_REGS
][4] = {
6839 #ifdef TARGET_X86_64
6867 static const char eip_name
[] = {
6868 #ifdef TARGET_X86_64
6874 static const char seg_base_names
[6][8] = {
6882 static const char bnd_regl_names
[4][8] = {
6883 "bnd0_lb", "bnd1_lb", "bnd2_lb", "bnd3_lb"
6885 static const char bnd_regu_names
[4][8] = {
6886 "bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub"
6890 cpu_cc_op
= tcg_global_mem_new_i32(tcg_env
,
6891 offsetof(CPUX86State
, cc_op
), "cc_op");
6892 cpu_cc_dst
= tcg_global_mem_new(tcg_env
, offsetof(CPUX86State
, cc_dst
),
6894 cpu_cc_src
= tcg_global_mem_new(tcg_env
, offsetof(CPUX86State
, cc_src
),
6896 cpu_cc_src2
= tcg_global_mem_new(tcg_env
, offsetof(CPUX86State
, cc_src2
),
6898 cpu_eip
= tcg_global_mem_new(tcg_env
, offsetof(CPUX86State
, eip
), eip_name
);
6900 for (i
= 0; i
< CPU_NB_REGS
; ++i
) {
6901 cpu_regs
[i
] = tcg_global_mem_new(tcg_env
,
6902 offsetof(CPUX86State
, regs
[i
]),
6906 for (i
= 0; i
< 6; ++i
) {
6908 = tcg_global_mem_new(tcg_env
,
6909 offsetof(CPUX86State
, segs
[i
].base
),
6913 for (i
= 0; i
< 4; ++i
) {
6915 = tcg_global_mem_new_i64(tcg_env
,
6916 offsetof(CPUX86State
, bnd_regs
[i
].lb
),
6919 = tcg_global_mem_new_i64(tcg_env
,
6920 offsetof(CPUX86State
, bnd_regs
[i
].ub
),
6925 static void i386_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cpu
)
6927 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6928 CPUX86State
*env
= cpu_env(cpu
);
6929 uint32_t flags
= dc
->base
.tb
->flags
;
6930 uint32_t cflags
= tb_cflags(dc
->base
.tb
);
6931 int cpl
= (flags
>> HF_CPL_SHIFT
) & 3;
6932 int iopl
= (flags
>> IOPL_SHIFT
) & 3;
6934 dc
->cs_base
= dc
->base
.tb
->cs_base
;
6935 dc
->pc_save
= dc
->base
.pc_next
;
6937 #ifndef CONFIG_USER_ONLY
6942 /* We make some simplifying assumptions; validate they're correct. */
6943 g_assert(PE(dc
) == ((flags
& HF_PE_MASK
) != 0));
6944 g_assert(CPL(dc
) == cpl
);
6945 g_assert(IOPL(dc
) == iopl
);
6946 g_assert(VM86(dc
) == ((flags
& HF_VM_MASK
) != 0));
6947 g_assert(CODE32(dc
) == ((flags
& HF_CS32_MASK
) != 0));
6948 g_assert(CODE64(dc
) == ((flags
& HF_CS64_MASK
) != 0));
6949 g_assert(SS32(dc
) == ((flags
& HF_SS32_MASK
) != 0));
6950 g_assert(LMA(dc
) == ((flags
& HF_LMA_MASK
) != 0));
6951 g_assert(ADDSEG(dc
) == ((flags
& HF_ADDSEG_MASK
) != 0));
6952 g_assert(SVME(dc
) == ((flags
& HF_SVME_MASK
) != 0));
6953 g_assert(GUEST(dc
) == ((flags
& HF_GUEST_MASK
) != 0));
6955 dc
->cc_op
= CC_OP_DYNAMIC
;
6956 dc
->cc_op_dirty
= false;
6957 dc
->popl_esp_hack
= 0;
6958 /* select memory access functions */
6959 dc
->mem_index
= cpu_mmu_index(cpu
, false);
6960 dc
->cpuid_features
= env
->features
[FEAT_1_EDX
];
6961 dc
->cpuid_ext_features
= env
->features
[FEAT_1_ECX
];
6962 dc
->cpuid_ext2_features
= env
->features
[FEAT_8000_0001_EDX
];
6963 dc
->cpuid_ext3_features
= env
->features
[FEAT_8000_0001_ECX
];
6964 dc
->cpuid_7_0_ebx_features
= env
->features
[FEAT_7_0_EBX
];
6965 dc
->cpuid_7_0_ecx_features
= env
->features
[FEAT_7_0_ECX
];
6966 dc
->cpuid_7_1_eax_features
= env
->features
[FEAT_7_1_EAX
];
6967 dc
->cpuid_xsave_features
= env
->features
[FEAT_XSAVE
];
6968 dc
->jmp_opt
= !((cflags
& CF_NO_GOTO_TB
) ||
6969 (flags
& (HF_TF_MASK
| HF_INHIBIT_IRQ_MASK
)));
6971 * If jmp_opt, we want to handle each string instruction individually.
6972 * For icount also disable repz optimization so that each iteration
6973 * is accounted separately.
6975 dc
->repz_opt
= !dc
->jmp_opt
&& !(cflags
& CF_USE_ICOUNT
);
6977 dc
->T0
= tcg_temp_new();
6978 dc
->T1
= tcg_temp_new();
6979 dc
->A0
= tcg_temp_new();
6981 dc
->tmp0
= tcg_temp_new();
6982 dc
->tmp1_i64
= tcg_temp_new_i64();
6983 dc
->tmp2_i32
= tcg_temp_new_i32();
6984 dc
->tmp3_i32
= tcg_temp_new_i32();
6985 dc
->tmp4
= tcg_temp_new();
6986 dc
->cc_srcT
= tcg_temp_new();
6989 static void i386_tr_tb_start(DisasContextBase
*db
, CPUState
*cpu
)
6993 static void i386_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cpu
)
6995 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
6996 target_ulong pc_arg
= dc
->base
.pc_next
;
6998 dc
->prev_insn_end
= tcg_last_op();
6999 if (tb_cflags(dcbase
->tb
) & CF_PCREL
) {
7000 pc_arg
&= ~TARGET_PAGE_MASK
;
7002 tcg_gen_insn_start(pc_arg
, dc
->cc_op
);
7005 static void i386_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cpu
)
7007 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
7009 #ifdef TARGET_VSYSCALL_PAGE
7011 * Detect entry into the vsyscall page and invoke the syscall.
7013 if ((dc
->base
.pc_next
& TARGET_PAGE_MASK
) == TARGET_VSYSCALL_PAGE
) {
7014 gen_exception(dc
, EXCP_VSYSCALL
);
7015 dc
->base
.pc_next
= dc
->pc
+ 1;
7020 if (disas_insn(dc
, cpu
)) {
7021 target_ulong pc_next
= dc
->pc
;
7022 dc
->base
.pc_next
= pc_next
;
7024 if (dc
->base
.is_jmp
== DISAS_NEXT
) {
7025 if (dc
->flags
& (HF_TF_MASK
| HF_INHIBIT_IRQ_MASK
)) {
7027 * If single step mode, we generate only one instruction and
7028 * generate an exception.
7029 * If irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
7030 * the flag and abort the translation to give the irqs a
7033 dc
->base
.is_jmp
= DISAS_EOB_NEXT
;
7034 } else if (!is_same_page(&dc
->base
, pc_next
)) {
7035 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
7041 static void i386_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cpu
)
7043 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
7045 switch (dc
->base
.is_jmp
) {
7046 case DISAS_NORETURN
:
7048 case DISAS_TOO_MANY
:
7049 gen_update_cc_op(dc
);
7050 gen_jmp_rel_csize(dc
, 0, 0);
7052 case DISAS_EOB_NEXT
:
7053 gen_update_cc_op(dc
);
7054 gen_update_eip_cur(dc
);
7056 case DISAS_EOB_ONLY
:
7059 case DISAS_EOB_INHIBIT_IRQ
:
7060 gen_update_cc_op(dc
);
7061 gen_update_eip_cur(dc
);
7062 gen_eob_inhibit_irq(dc
, true);
7068 g_assert_not_reached();
7072 static void i386_tr_disas_log(const DisasContextBase
*dcbase
,
7073 CPUState
*cpu
, FILE *logfile
)
7075 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
7077 fprintf(logfile
, "IN: %s\n", lookup_symbol(dc
->base
.pc_first
));
7078 target_disas(logfile
, cpu
, dc
->base
.pc_first
, dc
->base
.tb
->size
);
7081 static const TranslatorOps i386_tr_ops
= {
7082 .init_disas_context
= i386_tr_init_disas_context
,
7083 .tb_start
= i386_tr_tb_start
,
7084 .insn_start
= i386_tr_insn_start
,
7085 .translate_insn
= i386_tr_translate_insn
,
7086 .tb_stop
= i386_tr_tb_stop
,
7087 .disas_log
= i386_tr_disas_log
,
7090 /* generate intermediate code for basic block 'tb'. */
7091 void gen_intermediate_code(CPUState
*cpu
, TranslationBlock
*tb
, int *max_insns
,
7092 vaddr pc
, void *host_pc
)
7096 translator_loop(cpu
, tb
, max_insns
, pc
, host_pc
, &i386_tr_ops
, &dc
.base
);