4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "qemu/host-utils.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "tcg/tcg-op-gvec.h"
27 #include "exec/cpu_ldst.h"
28 #include "exec/translator.h"
30 #include "exec/helper-proto.h"
31 #include "exec/helper-gen.h"
32 #include "helper-tcg.h"
36 #define PREFIX_REPZ 0x01
37 #define PREFIX_REPNZ 0x02
38 #define PREFIX_LOCK 0x04
39 #define PREFIX_DATA 0x08
40 #define PREFIX_ADR 0x10
41 #define PREFIX_VEX 0x20
42 #define PREFIX_REX 0x40
52 /* For a switch indexed by MODRM, match all memory operands for a given OP. */
53 #define CASE_MODRM_MEM_OP(OP) \
54 case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
55 case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
56 case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7
58 #define CASE_MODRM_OP(OP) \
59 case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
60 case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
61 case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7: \
62 case (3 << 6) | (OP << 3) | 0 ... (3 << 6) | (OP << 3) | 7
64 //#define MACRO_TEST 1
66 /* global register indexes */
67 static TCGv cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
;
69 static TCGv_i32 cpu_cc_op
;
70 static TCGv cpu_regs
[CPU_NB_REGS
];
71 static TCGv cpu_seg_base
[6];
72 static TCGv_i64 cpu_bndl
[4];
73 static TCGv_i64 cpu_bndu
[4];
75 #include "exec/gen-icount.h"
77 typedef struct DisasContext
{
78 DisasContextBase base
;
80 target_ulong pc
; /* pc = eip + cs_base */
81 target_ulong cs_base
; /* base of CS segment */
87 int8_t override
; /* -1 if no override, else R_CS, R_DS, etc */
93 #ifndef CONFIG_USER_ONLY
94 uint8_t cpl
; /* code priv level */
95 uint8_t iopl
; /* i/o priv level */
97 uint8_t vex_l
; /* vex vector length */
98 uint8_t vex_v
; /* vex vvvv register, without 1's complement. */
99 uint8_t popl_esp_hack
; /* for correct popl with esp base handling */
100 uint8_t rip_offset
; /* only used in x86_64, but left for simplicity */
107 bool vex_w
; /* used by AVX even on 32-bit processors */
108 bool jmp_opt
; /* use direct block chaining for direct jumps */
109 bool repz_opt
; /* optimize jumps within repz instructions */
112 CCOp cc_op
; /* current CC operation */
113 int mem_index
; /* select memory access functions */
114 uint32_t flags
; /* all execution flags */
116 int cpuid_ext_features
;
117 int cpuid_ext2_features
;
118 int cpuid_ext3_features
;
119 int cpuid_7_0_ebx_features
;
120 int cpuid_7_0_ecx_features
;
121 int cpuid_xsave_features
;
123 /* TCG local temps */
129 /* TCG local register indexes (only used inside old micro ops) */
139 TCGOp
*prev_insn_end
;
142 #define DISAS_EOB_ONLY DISAS_TARGET_0
143 #define DISAS_EOB_NEXT DISAS_TARGET_1
144 #define DISAS_EOB_INHIBIT_IRQ DISAS_TARGET_2
145 #define DISAS_JUMP DISAS_TARGET_3
147 /* The environment in which user-only runs is constrained. */
148 #ifdef CONFIG_USER_ONLY
152 #define SVME(S) false
153 #define GUEST(S) false
155 #define PE(S) (((S)->flags & HF_PE_MASK) != 0)
156 #define CPL(S) ((S)->cpl)
157 #define IOPL(S) ((S)->iopl)
158 #define SVME(S) (((S)->flags & HF_SVME_MASK) != 0)
159 #define GUEST(S) (((S)->flags & HF_GUEST_MASK) != 0)
161 #if defined(CONFIG_USER_ONLY) && defined(TARGET_X86_64)
162 #define VM86(S) false
163 #define CODE32(S) true
165 #define ADDSEG(S) false
167 #define VM86(S) (((S)->flags & HF_VM_MASK) != 0)
168 #define CODE32(S) (((S)->flags & HF_CS32_MASK) != 0)
169 #define SS32(S) (((S)->flags & HF_SS32_MASK) != 0)
170 #define ADDSEG(S) (((S)->flags & HF_ADDSEG_MASK) != 0)
172 #if !defined(TARGET_X86_64)
173 #define CODE64(S) false
175 #elif defined(CONFIG_USER_ONLY)
176 #define CODE64(S) true
179 #define CODE64(S) (((S)->flags & HF_CS64_MASK) != 0)
180 #define LMA(S) (((S)->flags & HF_LMA_MASK) != 0)
184 #define REX_PREFIX(S) (((S)->prefix & PREFIX_REX) != 0)
185 #define REX_W(S) ((S)->vex_w)
186 #define REX_R(S) ((S)->rex_r + 0)
187 #define REX_X(S) ((S)->rex_x + 0)
188 #define REX_B(S) ((S)->rex_b + 0)
190 #define REX_PREFIX(S) false
191 #define REX_W(S) false
198 * Many sysemu-only helpers are not reachable for user-only.
199 * Define stub generators here, so that we need not either sprinkle
200 * ifdefs through the translator, nor provide the helper function.
202 #define STUB_HELPER(NAME, ...) \
203 static inline void gen_helper_##NAME(__VA_ARGS__) \
204 { qemu_build_not_reached(); }
206 #ifdef CONFIG_USER_ONLY
207 STUB_HELPER(clgi
, TCGv_env env
)
208 STUB_HELPER(flush_page
, TCGv_env env
, TCGv addr
)
209 STUB_HELPER(hlt
, TCGv_env env
, TCGv_i32 pc_ofs
)
210 STUB_HELPER(inb
, TCGv ret
, TCGv_env env
, TCGv_i32 port
)
211 STUB_HELPER(inw
, TCGv ret
, TCGv_env env
, TCGv_i32 port
)
212 STUB_HELPER(inl
, TCGv ret
, TCGv_env env
, TCGv_i32 port
)
213 STUB_HELPER(monitor
, TCGv_env env
, TCGv addr
)
214 STUB_HELPER(mwait
, TCGv_env env
, TCGv_i32 pc_ofs
)
215 STUB_HELPER(outb
, TCGv_env env
, TCGv_i32 port
, TCGv_i32 val
)
216 STUB_HELPER(outw
, TCGv_env env
, TCGv_i32 port
, TCGv_i32 val
)
217 STUB_HELPER(outl
, TCGv_env env
, TCGv_i32 port
, TCGv_i32 val
)
218 STUB_HELPER(rdmsr
, TCGv_env env
)
219 STUB_HELPER(read_crN
, TCGv ret
, TCGv_env env
, TCGv_i32 reg
)
220 STUB_HELPER(get_dr
, TCGv ret
, TCGv_env env
, TCGv_i32 reg
)
221 STUB_HELPER(set_dr
, TCGv_env env
, TCGv_i32 reg
, TCGv val
)
222 STUB_HELPER(stgi
, TCGv_env env
)
223 STUB_HELPER(svm_check_intercept
, TCGv_env env
, TCGv_i32 type
)
224 STUB_HELPER(vmload
, TCGv_env env
, TCGv_i32 aflag
)
225 STUB_HELPER(vmmcall
, TCGv_env env
)
226 STUB_HELPER(vmrun
, TCGv_env env
, TCGv_i32 aflag
, TCGv_i32 pc_ofs
)
227 STUB_HELPER(vmsave
, TCGv_env env
, TCGv_i32 aflag
)
228 STUB_HELPER(write_crN
, TCGv_env env
, TCGv_i32 reg
, TCGv val
)
229 STUB_HELPER(wrmsr
, TCGv_env env
)
232 static void gen_eob(DisasContext
*s
);
233 static void gen_jr(DisasContext
*s
);
234 static void gen_jmp_rel(DisasContext
*s
, MemOp ot
, int diff
, int tb_num
);
235 static void gen_jmp_rel_csize(DisasContext
*s
, int diff
, int tb_num
);
236 static void gen_op(DisasContext
*s1
, int op
, MemOp ot
, int d
);
237 static void gen_exception_gpf(DisasContext
*s
);
239 /* i386 arith/logic operations */
259 OP_SHL1
, /* undocumented */
275 /* I386 int registers */
276 OR_EAX
, /* MUST be even numbered */
285 OR_TMP0
= 16, /* temporary operand register */
287 OR_A0
, /* temporary register used when doing address evaluation */
297 /* Bit set if the global variable is live after setting CC_OP to X. */
298 static const uint8_t cc_op_live
[CC_OP_NB
] = {
299 [CC_OP_DYNAMIC
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
300 [CC_OP_EFLAGS
] = USES_CC_SRC
,
301 [CC_OP_MULB
... CC_OP_MULQ
] = USES_CC_DST
| USES_CC_SRC
,
302 [CC_OP_ADDB
... CC_OP_ADDQ
] = USES_CC_DST
| USES_CC_SRC
,
303 [CC_OP_ADCB
... CC_OP_ADCQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
304 [CC_OP_SUBB
... CC_OP_SUBQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRCT
,
305 [CC_OP_SBBB
... CC_OP_SBBQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
306 [CC_OP_LOGICB
... CC_OP_LOGICQ
] = USES_CC_DST
,
307 [CC_OP_INCB
... CC_OP_INCQ
] = USES_CC_DST
| USES_CC_SRC
,
308 [CC_OP_DECB
... CC_OP_DECQ
] = USES_CC_DST
| USES_CC_SRC
,
309 [CC_OP_SHLB
... CC_OP_SHLQ
] = USES_CC_DST
| USES_CC_SRC
,
310 [CC_OP_SARB
... CC_OP_SARQ
] = USES_CC_DST
| USES_CC_SRC
,
311 [CC_OP_BMILGB
... CC_OP_BMILGQ
] = USES_CC_DST
| USES_CC_SRC
,
312 [CC_OP_ADCX
] = USES_CC_DST
| USES_CC_SRC
,
313 [CC_OP_ADOX
] = USES_CC_SRC
| USES_CC_SRC2
,
314 [CC_OP_ADCOX
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
316 [CC_OP_POPCNT
] = USES_CC_SRC
,
319 static void set_cc_op(DisasContext
*s
, CCOp op
)
323 if (s
->cc_op
== op
) {
327 /* Discard CC computation that will no longer be used. */
328 dead
= cc_op_live
[s
->cc_op
] & ~cc_op_live
[op
];
329 if (dead
& USES_CC_DST
) {
330 tcg_gen_discard_tl(cpu_cc_dst
);
332 if (dead
& USES_CC_SRC
) {
333 tcg_gen_discard_tl(cpu_cc_src
);
335 if (dead
& USES_CC_SRC2
) {
336 tcg_gen_discard_tl(cpu_cc_src2
);
338 if (dead
& USES_CC_SRCT
) {
339 tcg_gen_discard_tl(s
->cc_srcT
);
342 if (op
== CC_OP_DYNAMIC
) {
343 /* The DYNAMIC setting is translator only, and should never be
344 stored. Thus we always consider it clean. */
345 s
->cc_op_dirty
= false;
347 /* Discard any computed CC_OP value (see shifts). */
348 if (s
->cc_op
== CC_OP_DYNAMIC
) {
349 tcg_gen_discard_i32(cpu_cc_op
);
351 s
->cc_op_dirty
= true;
356 static void gen_update_cc_op(DisasContext
*s
)
358 if (s
->cc_op_dirty
) {
359 tcg_gen_movi_i32(cpu_cc_op
, s
->cc_op
);
360 s
->cc_op_dirty
= false;
366 #define NB_OP_SIZES 4
368 #else /* !TARGET_X86_64 */
370 #define NB_OP_SIZES 3
372 #endif /* !TARGET_X86_64 */
375 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
376 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
377 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
378 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
379 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
381 #define REG_B_OFFSET 0
382 #define REG_H_OFFSET 1
383 #define REG_W_OFFSET 0
384 #define REG_L_OFFSET 0
385 #define REG_LH_OFFSET 4
388 /* In instruction encodings for byte register accesses the
389 * register number usually indicates "low 8 bits of register N";
390 * however there are some special cases where N 4..7 indicates
391 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
392 * true for this special case, false otherwise.
394 static inline bool byte_reg_is_xH(DisasContext
*s
, int reg
)
396 /* Any time the REX prefix is present, byte registers are uniform */
397 if (reg
< 4 || REX_PREFIX(s
)) {
403 /* Select the size of a push/pop operation. */
404 static inline MemOp
mo_pushpop(DisasContext
*s
, MemOp ot
)
407 return ot
== MO_16
? MO_16
: MO_64
;
413 /* Select the size of the stack pointer. */
414 static inline MemOp
mo_stacksize(DisasContext
*s
)
416 return CODE64(s
) ? MO_64
: SS32(s
) ? MO_32
: MO_16
;
419 /* Select only size 64 else 32. Used for SSE operand sizes. */
420 static inline MemOp
mo_64_32(MemOp ot
)
423 return ot
== MO_64
? MO_64
: MO_32
;
429 /* Select size 8 if lsb of B is clear, else OT. Used for decoding
430 byte vs word opcodes. */
431 static inline MemOp
mo_b_d(int b
, MemOp ot
)
433 return b
& 1 ? ot
: MO_8
;
436 /* Select size 8 if lsb of B is clear, else OT capped at 32.
437 Used for decoding operand size of port opcodes. */
438 static inline MemOp
mo_b_d32(int b
, MemOp ot
)
440 return b
& 1 ? (ot
== MO_16
? MO_16
: MO_32
) : MO_8
;
443 static void gen_op_mov_reg_v(DisasContext
*s
, MemOp ot
, int reg
, TCGv t0
)
447 if (!byte_reg_is_xH(s
, reg
)) {
448 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], t0
, 0, 8);
450 tcg_gen_deposit_tl(cpu_regs
[reg
- 4], cpu_regs
[reg
- 4], t0
, 8, 8);
454 tcg_gen_deposit_tl(cpu_regs
[reg
], cpu_regs
[reg
], t0
, 0, 16);
457 /* For x86_64, this sets the higher half of register to zero.
458 For i386, this is equivalent to a mov. */
459 tcg_gen_ext32u_tl(cpu_regs
[reg
], t0
);
463 tcg_gen_mov_tl(cpu_regs
[reg
], t0
);
472 void gen_op_mov_v_reg(DisasContext
*s
, MemOp ot
, TCGv t0
, int reg
)
474 if (ot
== MO_8
&& byte_reg_is_xH(s
, reg
)) {
475 tcg_gen_extract_tl(t0
, cpu_regs
[reg
- 4], 8, 8);
477 tcg_gen_mov_tl(t0
, cpu_regs
[reg
]);
481 static void gen_add_A0_im(DisasContext
*s
, int val
)
483 tcg_gen_addi_tl(s
->A0
, s
->A0
, val
);
485 tcg_gen_ext32u_tl(s
->A0
, s
->A0
);
489 static inline void gen_op_jmp_v(DisasContext
*s
, TCGv dest
)
491 tcg_gen_mov_tl(cpu_eip
, dest
);
496 void gen_op_add_reg_im(DisasContext
*s
, MemOp size
, int reg
, int32_t val
)
498 tcg_gen_addi_tl(s
->tmp0
, cpu_regs
[reg
], val
);
499 gen_op_mov_reg_v(s
, size
, reg
, s
->tmp0
);
502 static inline void gen_op_add_reg_T0(DisasContext
*s
, MemOp size
, int reg
)
504 tcg_gen_add_tl(s
->tmp0
, cpu_regs
[reg
], s
->T0
);
505 gen_op_mov_reg_v(s
, size
, reg
, s
->tmp0
);
508 static inline void gen_op_ld_v(DisasContext
*s
, int idx
, TCGv t0
, TCGv a0
)
510 tcg_gen_qemu_ld_tl(t0
, a0
, s
->mem_index
, idx
| MO_LE
);
513 static inline void gen_op_st_v(DisasContext
*s
, int idx
, TCGv t0
, TCGv a0
)
515 tcg_gen_qemu_st_tl(t0
, a0
, s
->mem_index
, idx
| MO_LE
);
518 static inline void gen_op_st_rm_T0_A0(DisasContext
*s
, int idx
, int d
)
521 gen_op_st_v(s
, idx
, s
->T0
, s
->A0
);
523 gen_op_mov_reg_v(s
, idx
, d
, s
->T0
);
527 static void gen_update_eip_cur(DisasContext
*s
)
529 assert(s
->pc_save
!= -1);
530 if (TARGET_TB_PCREL
) {
531 tcg_gen_addi_tl(cpu_eip
, cpu_eip
, s
->base
.pc_next
- s
->pc_save
);
533 tcg_gen_movi_tl(cpu_eip
, s
->base
.pc_next
- s
->cs_base
);
535 s
->pc_save
= s
->base
.pc_next
;
538 static void gen_update_eip_next(DisasContext
*s
)
540 assert(s
->pc_save
!= -1);
541 if (TARGET_TB_PCREL
) {
542 tcg_gen_addi_tl(cpu_eip
, cpu_eip
, s
->pc
- s
->pc_save
);
544 tcg_gen_movi_tl(cpu_eip
, s
->pc
- s
->cs_base
);
549 static int cur_insn_len(DisasContext
*s
)
551 return s
->pc
- s
->base
.pc_next
;
554 static TCGv_i32
cur_insn_len_i32(DisasContext
*s
)
556 return tcg_constant_i32(cur_insn_len(s
));
559 static TCGv_i32
eip_next_i32(DisasContext
*s
)
561 assert(s
->pc_save
!= -1);
563 * This function has two users: lcall_real (always 16-bit mode), and
564 * iret_protected (16, 32, or 64-bit mode). IRET only uses the value
565 * when EFLAGS.NT is set, which is illegal in 64-bit mode, which is
566 * why passing a 32-bit value isn't broken. To avoid using this where
567 * we shouldn't, return -1 in 64-bit mode so that execution goes into
571 return tcg_constant_i32(-1);
573 if (TARGET_TB_PCREL
) {
574 TCGv_i32 ret
= tcg_temp_new_i32();
575 tcg_gen_trunc_tl_i32(ret
, cpu_eip
);
576 tcg_gen_addi_i32(ret
, ret
, s
->pc
- s
->pc_save
);
579 return tcg_constant_i32(s
->pc
- s
->cs_base
);
583 static TCGv
eip_next_tl(DisasContext
*s
)
585 assert(s
->pc_save
!= -1);
586 if (TARGET_TB_PCREL
) {
587 TCGv ret
= tcg_temp_new();
588 tcg_gen_addi_tl(ret
, cpu_eip
, s
->pc
- s
->pc_save
);
591 return tcg_constant_tl(s
->pc
- s
->cs_base
);
595 static TCGv
eip_cur_tl(DisasContext
*s
)
597 assert(s
->pc_save
!= -1);
598 if (TARGET_TB_PCREL
) {
599 TCGv ret
= tcg_temp_new();
600 tcg_gen_addi_tl(ret
, cpu_eip
, s
->base
.pc_next
- s
->pc_save
);
603 return tcg_constant_tl(s
->base
.pc_next
- s
->cs_base
);
607 /* Compute SEG:REG into A0. SEG is selected from the override segment
608 (OVR_SEG) and the default segment (DEF_SEG). OVR_SEG may be -1 to
609 indicate no override. */
610 static void gen_lea_v_seg(DisasContext
*s
, MemOp aflag
, TCGv a0
,
611 int def_seg
, int ovr_seg
)
617 tcg_gen_mov_tl(s
->A0
, a0
);
624 if (ovr_seg
< 0 && ADDSEG(s
)) {
628 tcg_gen_ext32u_tl(s
->A0
, a0
);
634 tcg_gen_ext16u_tl(s
->A0
, a0
);
649 TCGv seg
= cpu_seg_base
[ovr_seg
];
651 if (aflag
== MO_64
) {
652 tcg_gen_add_tl(s
->A0
, a0
, seg
);
653 } else if (CODE64(s
)) {
654 tcg_gen_ext32u_tl(s
->A0
, a0
);
655 tcg_gen_add_tl(s
->A0
, s
->A0
, seg
);
657 tcg_gen_add_tl(s
->A0
, a0
, seg
);
658 tcg_gen_ext32u_tl(s
->A0
, s
->A0
);
663 static inline void gen_string_movl_A0_ESI(DisasContext
*s
)
665 gen_lea_v_seg(s
, s
->aflag
, cpu_regs
[R_ESI
], R_DS
, s
->override
);
668 static inline void gen_string_movl_A0_EDI(DisasContext
*s
)
670 gen_lea_v_seg(s
, s
->aflag
, cpu_regs
[R_EDI
], R_ES
, -1);
673 static inline void gen_op_movl_T0_Dshift(DisasContext
*s
, MemOp ot
)
675 tcg_gen_ld32s_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
, df
));
676 tcg_gen_shli_tl(s
->T0
, s
->T0
, ot
);
679 static TCGv
gen_ext_tl(TCGv dst
, TCGv src
, MemOp size
, bool sign
)
684 tcg_gen_ext8s_tl(dst
, src
);
686 tcg_gen_ext8u_tl(dst
, src
);
691 tcg_gen_ext16s_tl(dst
, src
);
693 tcg_gen_ext16u_tl(dst
, src
);
699 tcg_gen_ext32s_tl(dst
, src
);
701 tcg_gen_ext32u_tl(dst
, src
);
710 static void gen_extu(MemOp ot
, TCGv reg
)
712 gen_ext_tl(reg
, reg
, ot
, false);
715 static void gen_exts(MemOp ot
, TCGv reg
)
717 gen_ext_tl(reg
, reg
, ot
, true);
720 static void gen_op_j_ecx(DisasContext
*s
, TCGCond cond
, TCGLabel
*label1
)
722 tcg_gen_mov_tl(s
->tmp0
, cpu_regs
[R_ECX
]);
723 gen_extu(s
->aflag
, s
->tmp0
);
724 tcg_gen_brcondi_tl(cond
, s
->tmp0
, 0, label1
);
727 static inline void gen_op_jz_ecx(DisasContext
*s
, TCGLabel
*label1
)
729 gen_op_j_ecx(s
, TCG_COND_EQ
, label1
);
732 static inline void gen_op_jnz_ecx(DisasContext
*s
, TCGLabel
*label1
)
734 gen_op_j_ecx(s
, TCG_COND_NE
, label1
);
737 static void gen_helper_in_func(MemOp ot
, TCGv v
, TCGv_i32 n
)
741 gen_helper_inb(v
, cpu_env
, n
);
744 gen_helper_inw(v
, cpu_env
, n
);
747 gen_helper_inl(v
, cpu_env
, n
);
754 static void gen_helper_out_func(MemOp ot
, TCGv_i32 v
, TCGv_i32 n
)
758 gen_helper_outb(cpu_env
, v
, n
);
761 gen_helper_outw(cpu_env
, v
, n
);
764 gen_helper_outl(cpu_env
, v
, n
);
772 * Validate that access to [port, port + 1<<ot) is allowed.
773 * Raise #GP, or VMM exit if not.
775 static bool gen_check_io(DisasContext
*s
, MemOp ot
, TCGv_i32 port
,
778 #ifdef CONFIG_USER_ONLY
780 * We do not implement the ioperm(2) syscall, so the TSS check
783 gen_exception_gpf(s
);
786 if (PE(s
) && (CPL(s
) > IOPL(s
) || VM86(s
))) {
787 gen_helper_check_io(cpu_env
, port
, tcg_constant_i32(1 << ot
));
791 gen_update_eip_cur(s
);
792 if (s
->prefix
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
793 svm_flags
|= SVM_IOIO_REP_MASK
;
795 svm_flags
|= 1 << (SVM_IOIO_SIZE_SHIFT
+ ot
);
796 gen_helper_svm_check_io(cpu_env
, port
,
797 tcg_constant_i32(svm_flags
),
798 cur_insn_len_i32(s
));
804 static void gen_movs(DisasContext
*s
, MemOp ot
)
806 gen_string_movl_A0_ESI(s
);
807 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
808 gen_string_movl_A0_EDI(s
);
809 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
810 gen_op_movl_T0_Dshift(s
, ot
);
811 gen_op_add_reg_T0(s
, s
->aflag
, R_ESI
);
812 gen_op_add_reg_T0(s
, s
->aflag
, R_EDI
);
815 static void gen_op_update1_cc(DisasContext
*s
)
817 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
820 static void gen_op_update2_cc(DisasContext
*s
)
822 tcg_gen_mov_tl(cpu_cc_src
, s
->T1
);
823 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
826 static void gen_op_update3_cc(DisasContext
*s
, TCGv reg
)
828 tcg_gen_mov_tl(cpu_cc_src2
, reg
);
829 tcg_gen_mov_tl(cpu_cc_src
, s
->T1
);
830 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
833 static inline void gen_op_testl_T0_T1_cc(DisasContext
*s
)
835 tcg_gen_and_tl(cpu_cc_dst
, s
->T0
, s
->T1
);
838 static void gen_op_update_neg_cc(DisasContext
*s
)
840 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
841 tcg_gen_neg_tl(cpu_cc_src
, s
->T0
);
842 tcg_gen_movi_tl(s
->cc_srcT
, 0);
845 /* compute all eflags to cc_src */
846 static void gen_compute_eflags(DisasContext
*s
)
848 TCGv zero
, dst
, src1
, src2
;
851 if (s
->cc_op
== CC_OP_EFLAGS
) {
854 if (s
->cc_op
== CC_OP_CLR
) {
855 tcg_gen_movi_tl(cpu_cc_src
, CC_Z
| CC_P
);
856 set_cc_op(s
, CC_OP_EFLAGS
);
865 /* Take care to not read values that are not live. */
866 live
= cc_op_live
[s
->cc_op
] & ~USES_CC_SRCT
;
867 dead
= live
^ (USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
);
869 zero
= tcg_const_tl(0);
870 if (dead
& USES_CC_DST
) {
873 if (dead
& USES_CC_SRC
) {
876 if (dead
& USES_CC_SRC2
) {
882 gen_helper_cc_compute_all(cpu_cc_src
, dst
, src1
, src2
, cpu_cc_op
);
883 set_cc_op(s
, CC_OP_EFLAGS
);
890 typedef struct CCPrepare
{
900 /* compute eflags.C to reg */
901 static CCPrepare
gen_prepare_eflags_c(DisasContext
*s
, TCGv reg
)
907 case CC_OP_SUBB
... CC_OP_SUBQ
:
908 /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
909 size
= s
->cc_op
- CC_OP_SUBB
;
910 t1
= gen_ext_tl(s
->tmp0
, cpu_cc_src
, size
, false);
911 /* If no temporary was used, be careful not to alias t1 and t0. */
912 t0
= t1
== cpu_cc_src
? s
->tmp0
: reg
;
913 tcg_gen_mov_tl(t0
, s
->cc_srcT
);
917 case CC_OP_ADDB
... CC_OP_ADDQ
:
918 /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
919 size
= s
->cc_op
- CC_OP_ADDB
;
920 t1
= gen_ext_tl(s
->tmp0
, cpu_cc_src
, size
, false);
921 t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, false);
923 return (CCPrepare
) { .cond
= TCG_COND_LTU
, .reg
= t0
,
924 .reg2
= t1
, .mask
= -1, .use_reg2
= true };
926 case CC_OP_LOGICB
... CC_OP_LOGICQ
:
929 return (CCPrepare
) { .cond
= TCG_COND_NEVER
, .mask
= -1 };
931 case CC_OP_INCB
... CC_OP_INCQ
:
932 case CC_OP_DECB
... CC_OP_DECQ
:
933 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
934 .mask
= -1, .no_setcond
= true };
936 case CC_OP_SHLB
... CC_OP_SHLQ
:
937 /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
938 size
= s
->cc_op
- CC_OP_SHLB
;
939 shift
= (8 << size
) - 1;
940 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
941 .mask
= (target_ulong
)1 << shift
};
943 case CC_OP_MULB
... CC_OP_MULQ
:
944 return (CCPrepare
) { .cond
= TCG_COND_NE
,
945 .reg
= cpu_cc_src
, .mask
= -1 };
947 case CC_OP_BMILGB
... CC_OP_BMILGQ
:
948 size
= s
->cc_op
- CC_OP_BMILGB
;
949 t0
= gen_ext_tl(reg
, cpu_cc_src
, size
, false);
950 return (CCPrepare
) { .cond
= TCG_COND_EQ
, .reg
= t0
, .mask
= -1 };
954 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_dst
,
955 .mask
= -1, .no_setcond
= true };
958 case CC_OP_SARB
... CC_OP_SARQ
:
960 return (CCPrepare
) { .cond
= TCG_COND_NE
,
961 .reg
= cpu_cc_src
, .mask
= CC_C
};
964 /* The need to compute only C from CC_OP_DYNAMIC is important
965 in efficiently implementing e.g. INC at the start of a TB. */
967 gen_helper_cc_compute_c(reg
, cpu_cc_dst
, cpu_cc_src
,
968 cpu_cc_src2
, cpu_cc_op
);
969 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
970 .mask
= -1, .no_setcond
= true };
974 /* compute eflags.P to reg */
975 static CCPrepare
gen_prepare_eflags_p(DisasContext
*s
, TCGv reg
)
977 gen_compute_eflags(s
);
978 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
982 /* compute eflags.S to reg */
983 static CCPrepare
gen_prepare_eflags_s(DisasContext
*s
, TCGv reg
)
987 gen_compute_eflags(s
);
993 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
997 return (CCPrepare
) { .cond
= TCG_COND_NEVER
, .mask
= -1 };
1000 MemOp size
= (s
->cc_op
- CC_OP_ADDB
) & 3;
1001 TCGv t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, true);
1002 return (CCPrepare
) { .cond
= TCG_COND_LT
, .reg
= t0
, .mask
= -1 };
1007 /* compute eflags.O to reg */
1008 static CCPrepare
gen_prepare_eflags_o(DisasContext
*s
, TCGv reg
)
1013 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src2
,
1014 .mask
= -1, .no_setcond
= true };
1017 return (CCPrepare
) { .cond
= TCG_COND_NEVER
, .mask
= -1 };
1019 gen_compute_eflags(s
);
1020 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1025 /* compute eflags.Z to reg */
1026 static CCPrepare
gen_prepare_eflags_z(DisasContext
*s
, TCGv reg
)
1030 gen_compute_eflags(s
);
1036 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1039 return (CCPrepare
) { .cond
= TCG_COND_ALWAYS
, .mask
= -1 };
1041 return (CCPrepare
) { .cond
= TCG_COND_EQ
, .reg
= cpu_cc_src
,
1045 MemOp size
= (s
->cc_op
- CC_OP_ADDB
) & 3;
1046 TCGv t0
= gen_ext_tl(reg
, cpu_cc_dst
, size
, false);
1047 return (CCPrepare
) { .cond
= TCG_COND_EQ
, .reg
= t0
, .mask
= -1 };
1052 /* perform a conditional store into register 'reg' according to jump opcode
1053 value 'b'. In the fast case, T0 is guaranted not to be used. */
1054 static CCPrepare
gen_prepare_cc(DisasContext
*s
, int b
, TCGv reg
)
1056 int inv
, jcc_op
, cond
;
1062 jcc_op
= (b
>> 1) & 7;
1065 case CC_OP_SUBB
... CC_OP_SUBQ
:
1066 /* We optimize relational operators for the cmp/jcc case. */
1067 size
= s
->cc_op
- CC_OP_SUBB
;
1070 tcg_gen_mov_tl(s
->tmp4
, s
->cc_srcT
);
1071 gen_extu(size
, s
->tmp4
);
1072 t0
= gen_ext_tl(s
->tmp0
, cpu_cc_src
, size
, false);
1073 cc
= (CCPrepare
) { .cond
= TCG_COND_LEU
, .reg
= s
->tmp4
,
1074 .reg2
= t0
, .mask
= -1, .use_reg2
= true };
1083 tcg_gen_mov_tl(s
->tmp4
, s
->cc_srcT
);
1084 gen_exts(size
, s
->tmp4
);
1085 t0
= gen_ext_tl(s
->tmp0
, cpu_cc_src
, size
, true);
1086 cc
= (CCPrepare
) { .cond
= cond
, .reg
= s
->tmp4
,
1087 .reg2
= t0
, .mask
= -1, .use_reg2
= true };
1097 /* This actually generates good code for JC, JZ and JS. */
1100 cc
= gen_prepare_eflags_o(s
, reg
);
1103 cc
= gen_prepare_eflags_c(s
, reg
);
1106 cc
= gen_prepare_eflags_z(s
, reg
);
1109 gen_compute_eflags(s
);
1110 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
1111 .mask
= CC_Z
| CC_C
};
1114 cc
= gen_prepare_eflags_s(s
, reg
);
1117 cc
= gen_prepare_eflags_p(s
, reg
);
1120 gen_compute_eflags(s
);
1121 if (reg
== cpu_cc_src
) {
1124 tcg_gen_shri_tl(reg
, cpu_cc_src
, 4); /* CC_O -> CC_S */
1125 tcg_gen_xor_tl(reg
, reg
, cpu_cc_src
);
1126 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
1131 gen_compute_eflags(s
);
1132 if (reg
== cpu_cc_src
) {
1135 tcg_gen_shri_tl(reg
, cpu_cc_src
, 4); /* CC_O -> CC_S */
1136 tcg_gen_xor_tl(reg
, reg
, cpu_cc_src
);
1137 cc
= (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
1138 .mask
= CC_S
| CC_Z
};
1145 cc
.cond
= tcg_invert_cond(cc
.cond
);
1150 static void gen_setcc1(DisasContext
*s
, int b
, TCGv reg
)
1152 CCPrepare cc
= gen_prepare_cc(s
, b
, reg
);
1154 if (cc
.no_setcond
) {
1155 if (cc
.cond
== TCG_COND_EQ
) {
1156 tcg_gen_xori_tl(reg
, cc
.reg
, 1);
1158 tcg_gen_mov_tl(reg
, cc
.reg
);
1163 if (cc
.cond
== TCG_COND_NE
&& !cc
.use_reg2
&& cc
.imm
== 0 &&
1164 cc
.mask
!= 0 && (cc
.mask
& (cc
.mask
- 1)) == 0) {
1165 tcg_gen_shri_tl(reg
, cc
.reg
, ctztl(cc
.mask
));
1166 tcg_gen_andi_tl(reg
, reg
, 1);
1169 if (cc
.mask
!= -1) {
1170 tcg_gen_andi_tl(reg
, cc
.reg
, cc
.mask
);
1174 tcg_gen_setcond_tl(cc
.cond
, reg
, cc
.reg
, cc
.reg2
);
1176 tcg_gen_setcondi_tl(cc
.cond
, reg
, cc
.reg
, cc
.imm
);
1180 static inline void gen_compute_eflags_c(DisasContext
*s
, TCGv reg
)
1182 gen_setcc1(s
, JCC_B
<< 1, reg
);
1185 /* generate a conditional jump to label 'l1' according to jump opcode
1186 value 'b'. In the fast case, T0 is guaranted not to be used. */
1187 static inline void gen_jcc1_noeob(DisasContext
*s
, int b
, TCGLabel
*l1
)
1189 CCPrepare cc
= gen_prepare_cc(s
, b
, s
->T0
);
1191 if (cc
.mask
!= -1) {
1192 tcg_gen_andi_tl(s
->T0
, cc
.reg
, cc
.mask
);
1196 tcg_gen_brcond_tl(cc
.cond
, cc
.reg
, cc
.reg2
, l1
);
1198 tcg_gen_brcondi_tl(cc
.cond
, cc
.reg
, cc
.imm
, l1
);
1202 /* Generate a conditional jump to label 'l1' according to jump opcode
1203 value 'b'. In the fast case, T0 is guaranted not to be used.
1204 A translation block must end soon. */
1205 static inline void gen_jcc1(DisasContext
*s
, int b
, TCGLabel
*l1
)
1207 CCPrepare cc
= gen_prepare_cc(s
, b
, s
->T0
);
1209 gen_update_cc_op(s
);
1210 if (cc
.mask
!= -1) {
1211 tcg_gen_andi_tl(s
->T0
, cc
.reg
, cc
.mask
);
1214 set_cc_op(s
, CC_OP_DYNAMIC
);
1216 tcg_gen_brcond_tl(cc
.cond
, cc
.reg
, cc
.reg2
, l1
);
1218 tcg_gen_brcondi_tl(cc
.cond
, cc
.reg
, cc
.imm
, l1
);
1222 /* XXX: does not work with gdbstub "ice" single step - not a
1224 static TCGLabel
*gen_jz_ecx_string(DisasContext
*s
)
1226 TCGLabel
*l1
= gen_new_label();
1227 TCGLabel
*l2
= gen_new_label();
1228 gen_op_jnz_ecx(s
, l1
);
1230 gen_jmp_rel_csize(s
, 0, 1);
1235 static void gen_stos(DisasContext
*s
, MemOp ot
)
1237 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, R_EAX
);
1238 gen_string_movl_A0_EDI(s
);
1239 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
1240 gen_op_movl_T0_Dshift(s
, ot
);
1241 gen_op_add_reg_T0(s
, s
->aflag
, R_EDI
);
1244 static void gen_lods(DisasContext
*s
, MemOp ot
)
1246 gen_string_movl_A0_ESI(s
);
1247 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1248 gen_op_mov_reg_v(s
, ot
, R_EAX
, s
->T0
);
1249 gen_op_movl_T0_Dshift(s
, ot
);
1250 gen_op_add_reg_T0(s
, s
->aflag
, R_ESI
);
1253 static void gen_scas(DisasContext
*s
, MemOp ot
)
1255 gen_string_movl_A0_EDI(s
);
1256 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
1257 gen_op(s
, OP_CMPL
, ot
, R_EAX
);
1258 gen_op_movl_T0_Dshift(s
, ot
);
1259 gen_op_add_reg_T0(s
, s
->aflag
, R_EDI
);
1262 static void gen_cmps(DisasContext
*s
, MemOp ot
)
1264 gen_string_movl_A0_EDI(s
);
1265 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
1266 gen_string_movl_A0_ESI(s
);
1267 gen_op(s
, OP_CMPL
, ot
, OR_TMP0
);
1268 gen_op_movl_T0_Dshift(s
, ot
);
1269 gen_op_add_reg_T0(s
, s
->aflag
, R_ESI
);
1270 gen_op_add_reg_T0(s
, s
->aflag
, R_EDI
);
1273 static void gen_bpt_io(DisasContext
*s
, TCGv_i32 t_port
, int ot
)
1275 if (s
->flags
& HF_IOBPT_MASK
) {
1276 #ifdef CONFIG_USER_ONLY
1277 /* user-mode cpu should not be in IOBPT mode */
1278 g_assert_not_reached();
1280 TCGv_i32 t_size
= tcg_constant_i32(1 << ot
);
1281 TCGv t_next
= eip_next_tl(s
);
1282 gen_helper_bpt_io(cpu_env
, t_port
, t_size
, t_next
);
1283 #endif /* CONFIG_USER_ONLY */
1287 static void gen_ins(DisasContext
*s
, MemOp ot
)
1289 gen_string_movl_A0_EDI(s
);
1290 /* Note: we must do this dummy write first to be restartable in
1291 case of page fault. */
1292 tcg_gen_movi_tl(s
->T0
, 0);
1293 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
1294 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_EDX
]);
1295 tcg_gen_andi_i32(s
->tmp2_i32
, s
->tmp2_i32
, 0xffff);
1296 gen_helper_in_func(ot
, s
->T0
, s
->tmp2_i32
);
1297 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
1298 gen_op_movl_T0_Dshift(s
, ot
);
1299 gen_op_add_reg_T0(s
, s
->aflag
, R_EDI
);
1300 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
1303 static void gen_outs(DisasContext
*s
, MemOp ot
)
1305 gen_string_movl_A0_ESI(s
);
1306 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1308 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_EDX
]);
1309 tcg_gen_andi_i32(s
->tmp2_i32
, s
->tmp2_i32
, 0xffff);
1310 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T0
);
1311 gen_helper_out_func(ot
, s
->tmp2_i32
, s
->tmp3_i32
);
1312 gen_op_movl_T0_Dshift(s
, ot
);
1313 gen_op_add_reg_T0(s
, s
->aflag
, R_ESI
);
1314 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
1317 /* Generate jumps to current or next instruction */
1318 static void gen_repz(DisasContext
*s
, MemOp ot
,
1319 void (*fn
)(DisasContext
*s
, MemOp ot
))
1322 gen_update_cc_op(s
);
1323 l2
= gen_jz_ecx_string(s
);
1325 gen_op_add_reg_im(s
, s
->aflag
, R_ECX
, -1);
1327 * A loop would cause two single step exceptions if ECX = 1
1328 * before rep string_insn
1331 gen_op_jz_ecx(s
, l2
);
1333 gen_jmp_rel_csize(s
, -cur_insn_len(s
), 0);
1336 #define GEN_REPZ(op) \
1337 static inline void gen_repz_ ## op(DisasContext *s, MemOp ot) \
1338 { gen_repz(s, ot, gen_##op); }
1340 static void gen_repz2(DisasContext
*s
, MemOp ot
, int nz
,
1341 void (*fn
)(DisasContext
*s
, MemOp ot
))
1344 gen_update_cc_op(s
);
1345 l2
= gen_jz_ecx_string(s
);
1347 gen_op_add_reg_im(s
, s
->aflag
, R_ECX
, -1);
1348 gen_update_cc_op(s
);
1349 gen_jcc1(s
, (JCC_Z
<< 1) | (nz
^ 1), l2
);
1351 gen_op_jz_ecx(s
, l2
);
1353 gen_jmp_rel_csize(s
, -cur_insn_len(s
), 0);
1356 #define GEN_REPZ2(op) \
1357 static inline void gen_repz_ ## op(DisasContext *s, MemOp ot, int nz) \
1358 { gen_repz2(s, ot, nz, gen_##op); }
1368 static void gen_helper_fp_arith_ST0_FT0(int op
)
1372 gen_helper_fadd_ST0_FT0(cpu_env
);
1375 gen_helper_fmul_ST0_FT0(cpu_env
);
1378 gen_helper_fcom_ST0_FT0(cpu_env
);
1381 gen_helper_fcom_ST0_FT0(cpu_env
);
1384 gen_helper_fsub_ST0_FT0(cpu_env
);
1387 gen_helper_fsubr_ST0_FT0(cpu_env
);
1390 gen_helper_fdiv_ST0_FT0(cpu_env
);
1393 gen_helper_fdivr_ST0_FT0(cpu_env
);
1398 /* NOTE the exception in "r" op ordering */
1399 static void gen_helper_fp_arith_STN_ST0(int op
, int opreg
)
1401 TCGv_i32 tmp
= tcg_const_i32(opreg
);
1404 gen_helper_fadd_STN_ST0(cpu_env
, tmp
);
1407 gen_helper_fmul_STN_ST0(cpu_env
, tmp
);
1410 gen_helper_fsubr_STN_ST0(cpu_env
, tmp
);
1413 gen_helper_fsub_STN_ST0(cpu_env
, tmp
);
1416 gen_helper_fdivr_STN_ST0(cpu_env
, tmp
);
1419 gen_helper_fdiv_STN_ST0(cpu_env
, tmp
);
1424 static void gen_exception(DisasContext
*s
, int trapno
)
1426 gen_update_cc_op(s
);
1427 gen_update_eip_cur(s
);
1428 gen_helper_raise_exception(cpu_env
, tcg_const_i32(trapno
));
1429 s
->base
.is_jmp
= DISAS_NORETURN
;
1432 /* Generate #UD for the current instruction. The assumption here is that
1433 the instruction is known, but it isn't allowed in the current cpu mode. */
1434 static void gen_illegal_opcode(DisasContext
*s
)
1436 gen_exception(s
, EXCP06_ILLOP
);
1439 /* Generate #GP for the current instruction. */
1440 static void gen_exception_gpf(DisasContext
*s
)
1442 gen_exception(s
, EXCP0D_GPF
);
1445 /* Check for cpl == 0; if not, raise #GP and return false. */
1446 static bool check_cpl0(DisasContext
*s
)
1451 gen_exception_gpf(s
);
1455 /* If vm86, check for iopl == 3; if not, raise #GP and return false. */
1456 static bool check_vm86_iopl(DisasContext
*s
)
1458 if (!VM86(s
) || IOPL(s
) == 3) {
1461 gen_exception_gpf(s
);
1465 /* Check for iopl allowing access; if not, raise #GP and return false. */
1466 static bool check_iopl(DisasContext
*s
)
1468 if (VM86(s
) ? IOPL(s
) == 3 : CPL(s
) <= IOPL(s
)) {
1471 gen_exception_gpf(s
);
1475 /* if d == OR_TMP0, it means memory operand (address in A0) */
1476 static void gen_op(DisasContext
*s1
, int op
, MemOp ot
, int d
)
1479 if (s1
->prefix
& PREFIX_LOCK
) {
1480 /* Lock prefix when destination is not memory. */
1481 gen_illegal_opcode(s1
);
1484 gen_op_mov_v_reg(s1
, ot
, s1
->T0
, d
);
1485 } else if (!(s1
->prefix
& PREFIX_LOCK
)) {
1486 gen_op_ld_v(s1
, ot
, s1
->T0
, s1
->A0
);
1490 gen_compute_eflags_c(s1
, s1
->tmp4
);
1491 if (s1
->prefix
& PREFIX_LOCK
) {
1492 tcg_gen_add_tl(s1
->T0
, s1
->tmp4
, s1
->T1
);
1493 tcg_gen_atomic_add_fetch_tl(s1
->T0
, s1
->A0
, s1
->T0
,
1494 s1
->mem_index
, ot
| MO_LE
);
1496 tcg_gen_add_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1497 tcg_gen_add_tl(s1
->T0
, s1
->T0
, s1
->tmp4
);
1498 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1500 gen_op_update3_cc(s1
, s1
->tmp4
);
1501 set_cc_op(s1
, CC_OP_ADCB
+ ot
);
1504 gen_compute_eflags_c(s1
, s1
->tmp4
);
1505 if (s1
->prefix
& PREFIX_LOCK
) {
1506 tcg_gen_add_tl(s1
->T0
, s1
->T1
, s1
->tmp4
);
1507 tcg_gen_neg_tl(s1
->T0
, s1
->T0
);
1508 tcg_gen_atomic_add_fetch_tl(s1
->T0
, s1
->A0
, s1
->T0
,
1509 s1
->mem_index
, ot
| MO_LE
);
1511 tcg_gen_sub_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1512 tcg_gen_sub_tl(s1
->T0
, s1
->T0
, s1
->tmp4
);
1513 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1515 gen_op_update3_cc(s1
, s1
->tmp4
);
1516 set_cc_op(s1
, CC_OP_SBBB
+ ot
);
1519 if (s1
->prefix
& PREFIX_LOCK
) {
1520 tcg_gen_atomic_add_fetch_tl(s1
->T0
, s1
->A0
, s1
->T1
,
1521 s1
->mem_index
, ot
| MO_LE
);
1523 tcg_gen_add_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1524 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1526 gen_op_update2_cc(s1
);
1527 set_cc_op(s1
, CC_OP_ADDB
+ ot
);
1530 if (s1
->prefix
& PREFIX_LOCK
) {
1531 tcg_gen_neg_tl(s1
->T0
, s1
->T1
);
1532 tcg_gen_atomic_fetch_add_tl(s1
->cc_srcT
, s1
->A0
, s1
->T0
,
1533 s1
->mem_index
, ot
| MO_LE
);
1534 tcg_gen_sub_tl(s1
->T0
, s1
->cc_srcT
, s1
->T1
);
1536 tcg_gen_mov_tl(s1
->cc_srcT
, s1
->T0
);
1537 tcg_gen_sub_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1538 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1540 gen_op_update2_cc(s1
);
1541 set_cc_op(s1
, CC_OP_SUBB
+ ot
);
1545 if (s1
->prefix
& PREFIX_LOCK
) {
1546 tcg_gen_atomic_and_fetch_tl(s1
->T0
, s1
->A0
, s1
->T1
,
1547 s1
->mem_index
, ot
| MO_LE
);
1549 tcg_gen_and_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1550 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1552 gen_op_update1_cc(s1
);
1553 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1556 if (s1
->prefix
& PREFIX_LOCK
) {
1557 tcg_gen_atomic_or_fetch_tl(s1
->T0
, s1
->A0
, s1
->T1
,
1558 s1
->mem_index
, ot
| MO_LE
);
1560 tcg_gen_or_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1561 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1563 gen_op_update1_cc(s1
);
1564 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1567 if (s1
->prefix
& PREFIX_LOCK
) {
1568 tcg_gen_atomic_xor_fetch_tl(s1
->T0
, s1
->A0
, s1
->T1
,
1569 s1
->mem_index
, ot
| MO_LE
);
1571 tcg_gen_xor_tl(s1
->T0
, s1
->T0
, s1
->T1
);
1572 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1574 gen_op_update1_cc(s1
);
1575 set_cc_op(s1
, CC_OP_LOGICB
+ ot
);
1578 tcg_gen_mov_tl(cpu_cc_src
, s1
->T1
);
1579 tcg_gen_mov_tl(s1
->cc_srcT
, s1
->T0
);
1580 tcg_gen_sub_tl(cpu_cc_dst
, s1
->T0
, s1
->T1
);
1581 set_cc_op(s1
, CC_OP_SUBB
+ ot
);
1586 /* if d == OR_TMP0, it means memory operand (address in A0) */
1587 static void gen_inc(DisasContext
*s1
, MemOp ot
, int d
, int c
)
1589 if (s1
->prefix
& PREFIX_LOCK
) {
1591 /* Lock prefix when destination is not memory */
1592 gen_illegal_opcode(s1
);
1595 tcg_gen_movi_tl(s1
->T0
, c
> 0 ? 1 : -1);
1596 tcg_gen_atomic_add_fetch_tl(s1
->T0
, s1
->A0
, s1
->T0
,
1597 s1
->mem_index
, ot
| MO_LE
);
1600 gen_op_mov_v_reg(s1
, ot
, s1
->T0
, d
);
1602 gen_op_ld_v(s1
, ot
, s1
->T0
, s1
->A0
);
1604 tcg_gen_addi_tl(s1
->T0
, s1
->T0
, (c
> 0 ? 1 : -1));
1605 gen_op_st_rm_T0_A0(s1
, ot
, d
);
1608 gen_compute_eflags_c(s1
, cpu_cc_src
);
1609 tcg_gen_mov_tl(cpu_cc_dst
, s1
->T0
);
1610 set_cc_op(s1
, (c
> 0 ? CC_OP_INCB
: CC_OP_DECB
) + ot
);
1613 static void gen_shift_flags(DisasContext
*s
, MemOp ot
, TCGv result
,
1614 TCGv shm1
, TCGv count
, bool is_right
)
1616 TCGv_i32 z32
, s32
, oldop
;
1619 /* Store the results into the CC variables. If we know that the
1620 variable must be dead, store unconditionally. Otherwise we'll
1621 need to not disrupt the current contents. */
1622 z_tl
= tcg_const_tl(0);
1623 if (cc_op_live
[s
->cc_op
] & USES_CC_DST
) {
1624 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_cc_dst
, count
, z_tl
,
1625 result
, cpu_cc_dst
);
1627 tcg_gen_mov_tl(cpu_cc_dst
, result
);
1629 if (cc_op_live
[s
->cc_op
] & USES_CC_SRC
) {
1630 tcg_gen_movcond_tl(TCG_COND_NE
, cpu_cc_src
, count
, z_tl
,
1633 tcg_gen_mov_tl(cpu_cc_src
, shm1
);
1635 tcg_temp_free(z_tl
);
1637 /* Get the two potential CC_OP values into temporaries. */
1638 tcg_gen_movi_i32(s
->tmp2_i32
, (is_right
? CC_OP_SARB
: CC_OP_SHLB
) + ot
);
1639 if (s
->cc_op
== CC_OP_DYNAMIC
) {
1642 tcg_gen_movi_i32(s
->tmp3_i32
, s
->cc_op
);
1643 oldop
= s
->tmp3_i32
;
1646 /* Conditionally store the CC_OP value. */
1647 z32
= tcg_const_i32(0);
1648 s32
= tcg_temp_new_i32();
1649 tcg_gen_trunc_tl_i32(s32
, count
);
1650 tcg_gen_movcond_i32(TCG_COND_NE
, cpu_cc_op
, s32
, z32
, s
->tmp2_i32
, oldop
);
1651 tcg_temp_free_i32(z32
);
1652 tcg_temp_free_i32(s32
);
1654 /* The CC_OP value is no longer predictable. */
1655 set_cc_op(s
, CC_OP_DYNAMIC
);
1658 static void gen_shift_rm_T1(DisasContext
*s
, MemOp ot
, int op1
,
1659 int is_right
, int is_arith
)
1661 target_ulong mask
= (ot
== MO_64
? 0x3f : 0x1f);
1664 if (op1
== OR_TMP0
) {
1665 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1667 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1670 tcg_gen_andi_tl(s
->T1
, s
->T1
, mask
);
1671 tcg_gen_subi_tl(s
->tmp0
, s
->T1
, 1);
1675 gen_exts(ot
, s
->T0
);
1676 tcg_gen_sar_tl(s
->tmp0
, s
->T0
, s
->tmp0
);
1677 tcg_gen_sar_tl(s
->T0
, s
->T0
, s
->T1
);
1679 gen_extu(ot
, s
->T0
);
1680 tcg_gen_shr_tl(s
->tmp0
, s
->T0
, s
->tmp0
);
1681 tcg_gen_shr_tl(s
->T0
, s
->T0
, s
->T1
);
1684 tcg_gen_shl_tl(s
->tmp0
, s
->T0
, s
->tmp0
);
1685 tcg_gen_shl_tl(s
->T0
, s
->T0
, s
->T1
);
1689 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1691 gen_shift_flags(s
, ot
, s
->T0
, s
->tmp0
, s
->T1
, is_right
);
1694 static void gen_shift_rm_im(DisasContext
*s
, MemOp ot
, int op1
, int op2
,
1695 int is_right
, int is_arith
)
1697 int mask
= (ot
== MO_64
? 0x3f : 0x1f);
1701 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1703 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1709 gen_exts(ot
, s
->T0
);
1710 tcg_gen_sari_tl(s
->tmp4
, s
->T0
, op2
- 1);
1711 tcg_gen_sari_tl(s
->T0
, s
->T0
, op2
);
1713 gen_extu(ot
, s
->T0
);
1714 tcg_gen_shri_tl(s
->tmp4
, s
->T0
, op2
- 1);
1715 tcg_gen_shri_tl(s
->T0
, s
->T0
, op2
);
1718 tcg_gen_shli_tl(s
->tmp4
, s
->T0
, op2
- 1);
1719 tcg_gen_shli_tl(s
->T0
, s
->T0
, op2
);
1724 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1726 /* update eflags if non zero shift */
1728 tcg_gen_mov_tl(cpu_cc_src
, s
->tmp4
);
1729 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
1730 set_cc_op(s
, (is_right
? CC_OP_SARB
: CC_OP_SHLB
) + ot
);
1734 static void gen_rot_rm_T1(DisasContext
*s
, MemOp ot
, int op1
, int is_right
)
1736 target_ulong mask
= (ot
== MO_64
? 0x3f : 0x1f);
1740 if (op1
== OR_TMP0
) {
1741 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1743 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1746 tcg_gen_andi_tl(s
->T1
, s
->T1
, mask
);
1750 /* Replicate the 8-bit input so that a 32-bit rotate works. */
1751 tcg_gen_ext8u_tl(s
->T0
, s
->T0
);
1752 tcg_gen_muli_tl(s
->T0
, s
->T0
, 0x01010101);
1755 /* Replicate the 16-bit input so that a 32-bit rotate works. */
1756 tcg_gen_deposit_tl(s
->T0
, s
->T0
, s
->T0
, 16, 16);
1759 #ifdef TARGET_X86_64
1761 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
1762 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T1
);
1764 tcg_gen_rotr_i32(s
->tmp2_i32
, s
->tmp2_i32
, s
->tmp3_i32
);
1766 tcg_gen_rotl_i32(s
->tmp2_i32
, s
->tmp2_i32
, s
->tmp3_i32
);
1768 tcg_gen_extu_i32_tl(s
->T0
, s
->tmp2_i32
);
1773 tcg_gen_rotr_tl(s
->T0
, s
->T0
, s
->T1
);
1775 tcg_gen_rotl_tl(s
->T0
, s
->T0
, s
->T1
);
1781 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1783 /* We'll need the flags computed into CC_SRC. */
1784 gen_compute_eflags(s
);
1786 /* The value that was "rotated out" is now present at the other end
1787 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1788 since we've computed the flags into CC_SRC, these variables are
1791 tcg_gen_shri_tl(cpu_cc_src2
, s
->T0
, mask
- 1);
1792 tcg_gen_shri_tl(cpu_cc_dst
, s
->T0
, mask
);
1793 tcg_gen_andi_tl(cpu_cc_dst
, cpu_cc_dst
, 1);
1795 tcg_gen_shri_tl(cpu_cc_src2
, s
->T0
, mask
);
1796 tcg_gen_andi_tl(cpu_cc_dst
, s
->T0
, 1);
1798 tcg_gen_andi_tl(cpu_cc_src2
, cpu_cc_src2
, 1);
1799 tcg_gen_xor_tl(cpu_cc_src2
, cpu_cc_src2
, cpu_cc_dst
);
1801 /* Now conditionally store the new CC_OP value. If the shift count
1802 is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live.
1803 Otherwise reuse CC_OP_ADCOX which have the C and O flags split out
1804 exactly as we computed above. */
1805 t0
= tcg_const_i32(0);
1806 t1
= tcg_temp_new_i32();
1807 tcg_gen_trunc_tl_i32(t1
, s
->T1
);
1808 tcg_gen_movi_i32(s
->tmp2_i32
, CC_OP_ADCOX
);
1809 tcg_gen_movi_i32(s
->tmp3_i32
, CC_OP_EFLAGS
);
1810 tcg_gen_movcond_i32(TCG_COND_NE
, cpu_cc_op
, t1
, t0
,
1811 s
->tmp2_i32
, s
->tmp3_i32
);
1812 tcg_temp_free_i32(t0
);
1813 tcg_temp_free_i32(t1
);
1815 /* The CC_OP value is no longer predictable. */
1816 set_cc_op(s
, CC_OP_DYNAMIC
);
1819 static void gen_rot_rm_im(DisasContext
*s
, MemOp ot
, int op1
, int op2
,
1822 int mask
= (ot
== MO_64
? 0x3f : 0x1f);
1826 if (op1
== OR_TMP0
) {
1827 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1829 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1835 #ifdef TARGET_X86_64
1837 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
1839 tcg_gen_rotri_i32(s
->tmp2_i32
, s
->tmp2_i32
, op2
);
1841 tcg_gen_rotli_i32(s
->tmp2_i32
, s
->tmp2_i32
, op2
);
1843 tcg_gen_extu_i32_tl(s
->T0
, s
->tmp2_i32
);
1848 tcg_gen_rotri_tl(s
->T0
, s
->T0
, op2
);
1850 tcg_gen_rotli_tl(s
->T0
, s
->T0
, op2
);
1861 shift
= mask
+ 1 - shift
;
1863 gen_extu(ot
, s
->T0
);
1864 tcg_gen_shli_tl(s
->tmp0
, s
->T0
, shift
);
1865 tcg_gen_shri_tl(s
->T0
, s
->T0
, mask
+ 1 - shift
);
1866 tcg_gen_or_tl(s
->T0
, s
->T0
, s
->tmp0
);
1872 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1875 /* Compute the flags into CC_SRC. */
1876 gen_compute_eflags(s
);
1878 /* The value that was "rotated out" is now present at the other end
1879 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1880 since we've computed the flags into CC_SRC, these variables are
1883 tcg_gen_shri_tl(cpu_cc_src2
, s
->T0
, mask
- 1);
1884 tcg_gen_shri_tl(cpu_cc_dst
, s
->T0
, mask
);
1885 tcg_gen_andi_tl(cpu_cc_dst
, cpu_cc_dst
, 1);
1887 tcg_gen_shri_tl(cpu_cc_src2
, s
->T0
, mask
);
1888 tcg_gen_andi_tl(cpu_cc_dst
, s
->T0
, 1);
1890 tcg_gen_andi_tl(cpu_cc_src2
, cpu_cc_src2
, 1);
1891 tcg_gen_xor_tl(cpu_cc_src2
, cpu_cc_src2
, cpu_cc_dst
);
1892 set_cc_op(s
, CC_OP_ADCOX
);
1896 /* XXX: add faster immediate = 1 case */
1897 static void gen_rotc_rm_T1(DisasContext
*s
, MemOp ot
, int op1
,
1900 gen_compute_eflags(s
);
1901 assert(s
->cc_op
== CC_OP_EFLAGS
);
1905 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1907 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1912 gen_helper_rcrb(s
->T0
, cpu_env
, s
->T0
, s
->T1
);
1915 gen_helper_rcrw(s
->T0
, cpu_env
, s
->T0
, s
->T1
);
1918 gen_helper_rcrl(s
->T0
, cpu_env
, s
->T0
, s
->T1
);
1920 #ifdef TARGET_X86_64
1922 gen_helper_rcrq(s
->T0
, cpu_env
, s
->T0
, s
->T1
);
1931 gen_helper_rclb(s
->T0
, cpu_env
, s
->T0
, s
->T1
);
1934 gen_helper_rclw(s
->T0
, cpu_env
, s
->T0
, s
->T1
);
1937 gen_helper_rcll(s
->T0
, cpu_env
, s
->T0
, s
->T1
);
1939 #ifdef TARGET_X86_64
1941 gen_helper_rclq(s
->T0
, cpu_env
, s
->T0
, s
->T1
);
1949 gen_op_st_rm_T0_A0(s
, ot
, op1
);
1952 /* XXX: add faster immediate case */
1953 static void gen_shiftd_rm_T1(DisasContext
*s
, MemOp ot
, int op1
,
1954 bool is_right
, TCGv count_in
)
1956 target_ulong mask
= (ot
== MO_64
? 63 : 31);
1960 if (op1
== OR_TMP0
) {
1961 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1963 gen_op_mov_v_reg(s
, ot
, s
->T0
, op1
);
1966 count
= tcg_temp_new();
1967 tcg_gen_andi_tl(count
, count_in
, mask
);
1971 /* Note: we implement the Intel behaviour for shift count > 16.
1972 This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A
1973 portion by constructing it as a 32-bit value. */
1975 tcg_gen_deposit_tl(s
->tmp0
, s
->T0
, s
->T1
, 16, 16);
1976 tcg_gen_mov_tl(s
->T1
, s
->T0
);
1977 tcg_gen_mov_tl(s
->T0
, s
->tmp0
);
1979 tcg_gen_deposit_tl(s
->T1
, s
->T0
, s
->T1
, 16, 16);
1982 * If TARGET_X86_64 defined then fall through into MO_32 case,
1983 * otherwise fall through default case.
1986 #ifdef TARGET_X86_64
1987 /* Concatenate the two 32-bit values and use a 64-bit shift. */
1988 tcg_gen_subi_tl(s
->tmp0
, count
, 1);
1990 tcg_gen_concat_tl_i64(s
->T0
, s
->T0
, s
->T1
);
1991 tcg_gen_shr_i64(s
->tmp0
, s
->T0
, s
->tmp0
);
1992 tcg_gen_shr_i64(s
->T0
, s
->T0
, count
);
1994 tcg_gen_concat_tl_i64(s
->T0
, s
->T1
, s
->T0
);
1995 tcg_gen_shl_i64(s
->tmp0
, s
->T0
, s
->tmp0
);
1996 tcg_gen_shl_i64(s
->T0
, s
->T0
, count
);
1997 tcg_gen_shri_i64(s
->tmp0
, s
->tmp0
, 32);
1998 tcg_gen_shri_i64(s
->T0
, s
->T0
, 32);
2003 tcg_gen_subi_tl(s
->tmp0
, count
, 1);
2005 tcg_gen_shr_tl(s
->tmp0
, s
->T0
, s
->tmp0
);
2007 tcg_gen_subfi_tl(s
->tmp4
, mask
+ 1, count
);
2008 tcg_gen_shr_tl(s
->T0
, s
->T0
, count
);
2009 tcg_gen_shl_tl(s
->T1
, s
->T1
, s
->tmp4
);
2011 tcg_gen_shl_tl(s
->tmp0
, s
->T0
, s
->tmp0
);
2013 /* Only needed if count > 16, for Intel behaviour. */
2014 tcg_gen_subfi_tl(s
->tmp4
, 33, count
);
2015 tcg_gen_shr_tl(s
->tmp4
, s
->T1
, s
->tmp4
);
2016 tcg_gen_or_tl(s
->tmp0
, s
->tmp0
, s
->tmp4
);
2019 tcg_gen_subfi_tl(s
->tmp4
, mask
+ 1, count
);
2020 tcg_gen_shl_tl(s
->T0
, s
->T0
, count
);
2021 tcg_gen_shr_tl(s
->T1
, s
->T1
, s
->tmp4
);
2023 tcg_gen_movi_tl(s
->tmp4
, 0);
2024 tcg_gen_movcond_tl(TCG_COND_EQ
, s
->T1
, count
, s
->tmp4
,
2026 tcg_gen_or_tl(s
->T0
, s
->T0
, s
->T1
);
2031 gen_op_st_rm_T0_A0(s
, ot
, op1
);
2033 gen_shift_flags(s
, ot
, s
->T0
, s
->tmp0
, count
, is_right
);
2034 tcg_temp_free(count
);
2037 static void gen_shift(DisasContext
*s1
, int op
, MemOp ot
, int d
, int s
)
2040 gen_op_mov_v_reg(s1
, ot
, s1
->T1
, s
);
2043 gen_rot_rm_T1(s1
, ot
, d
, 0);
2046 gen_rot_rm_T1(s1
, ot
, d
, 1);
2050 gen_shift_rm_T1(s1
, ot
, d
, 0, 0);
2053 gen_shift_rm_T1(s1
, ot
, d
, 1, 0);
2056 gen_shift_rm_T1(s1
, ot
, d
, 1, 1);
2059 gen_rotc_rm_T1(s1
, ot
, d
, 0);
2062 gen_rotc_rm_T1(s1
, ot
, d
, 1);
2067 static void gen_shifti(DisasContext
*s1
, int op
, MemOp ot
, int d
, int c
)
2071 gen_rot_rm_im(s1
, ot
, d
, c
, 0);
2074 gen_rot_rm_im(s1
, ot
, d
, c
, 1);
2078 gen_shift_rm_im(s1
, ot
, d
, c
, 0, 0);
2081 gen_shift_rm_im(s1
, ot
, d
, c
, 1, 0);
2084 gen_shift_rm_im(s1
, ot
, d
, c
, 1, 1);
2087 /* currently not optimized */
2088 tcg_gen_movi_tl(s1
->T1
, c
);
2089 gen_shift(s1
, op
, ot
, d
, OR_TMP1
);
2094 #define X86_MAX_INSN_LENGTH 15
2096 static uint64_t advance_pc(CPUX86State
*env
, DisasContext
*s
, int num_bytes
)
2098 uint64_t pc
= s
->pc
;
2100 /* This is a subsequent insn that crosses a page boundary. */
2101 if (s
->base
.num_insns
> 1 &&
2102 !is_same_page(&s
->base
, s
->pc
+ num_bytes
- 1)) {
2103 siglongjmp(s
->jmpbuf
, 2);
2107 if (unlikely(cur_insn_len(s
) > X86_MAX_INSN_LENGTH
)) {
2108 /* If the instruction's 16th byte is on a different page than the 1st, a
2109 * page fault on the second page wins over the general protection fault
2110 * caused by the instruction being too long.
2111 * This can happen even if the operand is only one byte long!
2113 if (((s
->pc
- 1) ^ (pc
- 1)) & TARGET_PAGE_MASK
) {
2114 volatile uint8_t unused
=
2115 cpu_ldub_code(env
, (s
->pc
- 1) & TARGET_PAGE_MASK
);
2118 siglongjmp(s
->jmpbuf
, 1);
2124 static inline uint8_t x86_ldub_code(CPUX86State
*env
, DisasContext
*s
)
2126 return translator_ldub(env
, &s
->base
, advance_pc(env
, s
, 1));
2129 static inline int16_t x86_ldsw_code(CPUX86State
*env
, DisasContext
*s
)
2131 return translator_lduw(env
, &s
->base
, advance_pc(env
, s
, 2));
2134 static inline uint16_t x86_lduw_code(CPUX86State
*env
, DisasContext
*s
)
2136 return translator_lduw(env
, &s
->base
, advance_pc(env
, s
, 2));
2139 static inline uint32_t x86_ldl_code(CPUX86State
*env
, DisasContext
*s
)
2141 return translator_ldl(env
, &s
->base
, advance_pc(env
, s
, 4));
2144 #ifdef TARGET_X86_64
2145 static inline uint64_t x86_ldq_code(CPUX86State
*env
, DisasContext
*s
)
2147 return translator_ldq(env
, &s
->base
, advance_pc(env
, s
, 8));
2151 /* Decompose an address. */
2153 typedef struct AddressParts
{
2161 static AddressParts
gen_lea_modrm_0(CPUX86State
*env
, DisasContext
*s
,
2164 int def_seg
, base
, index
, scale
, mod
, rm
;
2173 mod
= (modrm
>> 6) & 3;
2175 base
= rm
| REX_B(s
);
2178 /* Normally filtered out earlier, but including this path
2179 simplifies multi-byte nop, as well as bndcl, bndcu, bndcn. */
2188 int code
= x86_ldub_code(env
, s
);
2189 scale
= (code
>> 6) & 3;
2190 index
= ((code
>> 3) & 7) | REX_X(s
);
2192 index
= -1; /* no index */
2194 base
= (code
& 7) | REX_B(s
);
2200 if ((base
& 7) == 5) {
2202 disp
= (int32_t)x86_ldl_code(env
, s
);
2203 if (CODE64(s
) && !havesib
) {
2205 disp
+= s
->pc
+ s
->rip_offset
;
2210 disp
= (int8_t)x86_ldub_code(env
, s
);
2214 disp
= (int32_t)x86_ldl_code(env
, s
);
2218 /* For correct popl handling with esp. */
2219 if (base
== R_ESP
&& s
->popl_esp_hack
) {
2220 disp
+= s
->popl_esp_hack
;
2222 if (base
== R_EBP
|| base
== R_ESP
) {
2231 disp
= x86_lduw_code(env
, s
);
2234 } else if (mod
== 1) {
2235 disp
= (int8_t)x86_ldub_code(env
, s
);
2237 disp
= (int16_t)x86_lduw_code(env
, s
);
2281 return (AddressParts
){ def_seg
, base
, index
, scale
, disp
};
2284 /* Compute the address, with a minimum number of TCG ops. */
2285 static TCGv
gen_lea_modrm_1(DisasContext
*s
, AddressParts a
, bool is_vsib
)
2289 if (a
.index
>= 0 && !is_vsib
) {
2291 ea
= cpu_regs
[a
.index
];
2293 tcg_gen_shli_tl(s
->A0
, cpu_regs
[a
.index
], a
.scale
);
2297 tcg_gen_add_tl(s
->A0
, ea
, cpu_regs
[a
.base
]);
2300 } else if (a
.base
>= 0) {
2301 ea
= cpu_regs
[a
.base
];
2304 if (TARGET_TB_PCREL
&& a
.base
== -2) {
2305 /* With cpu_eip ~= pc_save, the expression is pc-relative. */
2306 tcg_gen_addi_tl(s
->A0
, cpu_eip
, a
.disp
- s
->pc_save
);
2308 tcg_gen_movi_tl(s
->A0
, a
.disp
);
2311 } else if (a
.disp
!= 0) {
2312 tcg_gen_addi_tl(s
->A0
, ea
, a
.disp
);
2319 static void gen_lea_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
)
2321 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
2322 TCGv ea
= gen_lea_modrm_1(s
, a
, false);
2323 gen_lea_v_seg(s
, s
->aflag
, ea
, a
.def_seg
, s
->override
);
2326 static void gen_nop_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
)
2328 (void)gen_lea_modrm_0(env
, s
, modrm
);
2331 /* Used for BNDCL, BNDCU, BNDCN. */
2332 static void gen_bndck(CPUX86State
*env
, DisasContext
*s
, int modrm
,
2333 TCGCond cond
, TCGv_i64 bndv
)
2335 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
2336 TCGv ea
= gen_lea_modrm_1(s
, a
, false);
2338 tcg_gen_extu_tl_i64(s
->tmp1_i64
, ea
);
2340 tcg_gen_ext32u_i64(s
->tmp1_i64
, s
->tmp1_i64
);
2342 tcg_gen_setcond_i64(cond
, s
->tmp1_i64
, s
->tmp1_i64
, bndv
);
2343 tcg_gen_extrl_i64_i32(s
->tmp2_i32
, s
->tmp1_i64
);
2344 gen_helper_bndck(cpu_env
, s
->tmp2_i32
);
2347 /* used for LEA and MOV AX, mem */
2348 static void gen_add_A0_ds_seg(DisasContext
*s
)
2350 gen_lea_v_seg(s
, s
->aflag
, s
->A0
, R_DS
, s
->override
);
2353 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2355 static void gen_ldst_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
,
2356 MemOp ot
, int reg
, int is_store
)
2360 mod
= (modrm
>> 6) & 3;
2361 rm
= (modrm
& 7) | REX_B(s
);
2365 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
2366 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
2368 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
2370 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
2373 gen_lea_modrm(env
, s
, modrm
);
2376 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
2377 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
2379 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
2381 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
2386 static target_ulong
insn_get_addr(CPUX86State
*env
, DisasContext
*s
, MemOp ot
)
2392 ret
= x86_ldub_code(env
, s
);
2395 ret
= x86_lduw_code(env
, s
);
2398 ret
= x86_ldl_code(env
, s
);
2400 #ifdef TARGET_X86_64
2402 ret
= x86_ldq_code(env
, s
);
2406 g_assert_not_reached();
2411 static inline uint32_t insn_get(CPUX86State
*env
, DisasContext
*s
, MemOp ot
)
2417 ret
= x86_ldub_code(env
, s
);
2420 ret
= x86_lduw_code(env
, s
);
2423 #ifdef TARGET_X86_64
2426 ret
= x86_ldl_code(env
, s
);
2434 static target_long
insn_get_signed(CPUX86State
*env
, DisasContext
*s
, MemOp ot
)
2440 ret
= (int8_t) x86_ldub_code(env
, s
);
2443 ret
= (int16_t) x86_lduw_code(env
, s
);
2446 ret
= (int32_t) x86_ldl_code(env
, s
);
2448 #ifdef TARGET_X86_64
2450 ret
= x86_ldq_code(env
, s
);
2454 g_assert_not_reached();
2459 static inline int insn_const_size(MemOp ot
)
2468 static void gen_jcc(DisasContext
*s
, int b
, int diff
)
2470 TCGLabel
*l1
= gen_new_label();
2473 gen_jmp_rel_csize(s
, 0, 1);
2475 gen_jmp_rel(s
, s
->dflag
, diff
, 0);
2478 static void gen_cmovcc1(CPUX86State
*env
, DisasContext
*s
, MemOp ot
, int b
,
2483 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
2485 cc
= gen_prepare_cc(s
, b
, s
->T1
);
2486 if (cc
.mask
!= -1) {
2487 TCGv t0
= tcg_temp_new();
2488 tcg_gen_andi_tl(t0
, cc
.reg
, cc
.mask
);
2492 cc
.reg2
= tcg_const_tl(cc
.imm
);
2495 tcg_gen_movcond_tl(cc
.cond
, s
->T0
, cc
.reg
, cc
.reg2
,
2496 s
->T0
, cpu_regs
[reg
]);
2497 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
2499 if (cc
.mask
!= -1) {
2500 tcg_temp_free(cc
.reg
);
2503 tcg_temp_free(cc
.reg2
);
2507 static inline void gen_op_movl_T0_seg(DisasContext
*s
, X86Seg seg_reg
)
2509 tcg_gen_ld32u_tl(s
->T0
, cpu_env
,
2510 offsetof(CPUX86State
,segs
[seg_reg
].selector
));
2513 static inline void gen_op_movl_seg_T0_vm(DisasContext
*s
, X86Seg seg_reg
)
2515 tcg_gen_ext16u_tl(s
->T0
, s
->T0
);
2516 tcg_gen_st32_tl(s
->T0
, cpu_env
,
2517 offsetof(CPUX86State
,segs
[seg_reg
].selector
));
2518 tcg_gen_shli_tl(cpu_seg_base
[seg_reg
], s
->T0
, 4);
2521 /* move T0 to seg_reg and compute if the CPU state may change. Never
2522 call this function with seg_reg == R_CS */
2523 static void gen_movl_seg_T0(DisasContext
*s
, X86Seg seg_reg
)
2525 if (PE(s
) && !VM86(s
)) {
2526 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
2527 gen_helper_load_seg(cpu_env
, tcg_const_i32(seg_reg
), s
->tmp2_i32
);
2528 /* abort translation because the addseg value may change or
2529 because ss32 may change. For R_SS, translation must always
2530 stop as a special handling must be done to disable hardware
2531 interrupts for the next instruction */
2532 if (seg_reg
== R_SS
) {
2533 s
->base
.is_jmp
= DISAS_EOB_INHIBIT_IRQ
;
2534 } else if (CODE32(s
) && seg_reg
< R_FS
) {
2535 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
2538 gen_op_movl_seg_T0_vm(s
, seg_reg
);
2539 if (seg_reg
== R_SS
) {
2540 s
->base
.is_jmp
= DISAS_EOB_INHIBIT_IRQ
;
2545 static void gen_svm_check_intercept(DisasContext
*s
, uint32_t type
)
2547 /* no SVM activated; fast case */
2548 if (likely(!GUEST(s
))) {
2551 gen_helper_svm_check_intercept(cpu_env
, tcg_constant_i32(type
));
2554 static inline void gen_stack_update(DisasContext
*s
, int addend
)
2556 gen_op_add_reg_im(s
, mo_stacksize(s
), R_ESP
, addend
);
2559 /* Generate a push. It depends on ss32, addseg and dflag. */
2560 static void gen_push_v(DisasContext
*s
, TCGv val
)
2562 MemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2563 MemOp a_ot
= mo_stacksize(s
);
2564 int size
= 1 << d_ot
;
2565 TCGv new_esp
= s
->A0
;
2567 tcg_gen_subi_tl(s
->A0
, cpu_regs
[R_ESP
], size
);
2572 tcg_gen_mov_tl(new_esp
, s
->A0
);
2574 gen_lea_v_seg(s
, a_ot
, s
->A0
, R_SS
, -1);
2577 gen_op_st_v(s
, d_ot
, val
, s
->A0
);
2578 gen_op_mov_reg_v(s
, a_ot
, R_ESP
, new_esp
);
2581 /* two step pop is necessary for precise exceptions */
2582 static MemOp
gen_pop_T0(DisasContext
*s
)
2584 MemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2586 gen_lea_v_seg(s
, mo_stacksize(s
), cpu_regs
[R_ESP
], R_SS
, -1);
2587 gen_op_ld_v(s
, d_ot
, s
->T0
, s
->A0
);
2592 static inline void gen_pop_update(DisasContext
*s
, MemOp ot
)
2594 gen_stack_update(s
, 1 << ot
);
2597 static inline void gen_stack_A0(DisasContext
*s
)
2599 gen_lea_v_seg(s
, SS32(s
) ? MO_32
: MO_16
, cpu_regs
[R_ESP
], R_SS
, -1);
2602 static void gen_pusha(DisasContext
*s
)
2604 MemOp s_ot
= SS32(s
) ? MO_32
: MO_16
;
2605 MemOp d_ot
= s
->dflag
;
2606 int size
= 1 << d_ot
;
2609 for (i
= 0; i
< 8; i
++) {
2610 tcg_gen_addi_tl(s
->A0
, cpu_regs
[R_ESP
], (i
- 8) * size
);
2611 gen_lea_v_seg(s
, s_ot
, s
->A0
, R_SS
, -1);
2612 gen_op_st_v(s
, d_ot
, cpu_regs
[7 - i
], s
->A0
);
2615 gen_stack_update(s
, -8 * size
);
2618 static void gen_popa(DisasContext
*s
)
2620 MemOp s_ot
= SS32(s
) ? MO_32
: MO_16
;
2621 MemOp d_ot
= s
->dflag
;
2622 int size
= 1 << d_ot
;
2625 for (i
= 0; i
< 8; i
++) {
2626 /* ESP is not reloaded */
2627 if (7 - i
== R_ESP
) {
2630 tcg_gen_addi_tl(s
->A0
, cpu_regs
[R_ESP
], i
* size
);
2631 gen_lea_v_seg(s
, s_ot
, s
->A0
, R_SS
, -1);
2632 gen_op_ld_v(s
, d_ot
, s
->T0
, s
->A0
);
2633 gen_op_mov_reg_v(s
, d_ot
, 7 - i
, s
->T0
);
2636 gen_stack_update(s
, 8 * size
);
2639 static void gen_enter(DisasContext
*s
, int esp_addend
, int level
)
2641 MemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2642 MemOp a_ot
= CODE64(s
) ? MO_64
: SS32(s
) ? MO_32
: MO_16
;
2643 int size
= 1 << d_ot
;
2645 /* Push BP; compute FrameTemp into T1. */
2646 tcg_gen_subi_tl(s
->T1
, cpu_regs
[R_ESP
], size
);
2647 gen_lea_v_seg(s
, a_ot
, s
->T1
, R_SS
, -1);
2648 gen_op_st_v(s
, d_ot
, cpu_regs
[R_EBP
], s
->A0
);
2654 /* Copy level-1 pointers from the previous frame. */
2655 for (i
= 1; i
< level
; ++i
) {
2656 tcg_gen_subi_tl(s
->A0
, cpu_regs
[R_EBP
], size
* i
);
2657 gen_lea_v_seg(s
, a_ot
, s
->A0
, R_SS
, -1);
2658 gen_op_ld_v(s
, d_ot
, s
->tmp0
, s
->A0
);
2660 tcg_gen_subi_tl(s
->A0
, s
->T1
, size
* i
);
2661 gen_lea_v_seg(s
, a_ot
, s
->A0
, R_SS
, -1);
2662 gen_op_st_v(s
, d_ot
, s
->tmp0
, s
->A0
);
2665 /* Push the current FrameTemp as the last level. */
2666 tcg_gen_subi_tl(s
->A0
, s
->T1
, size
* level
);
2667 gen_lea_v_seg(s
, a_ot
, s
->A0
, R_SS
, -1);
2668 gen_op_st_v(s
, d_ot
, s
->T1
, s
->A0
);
2671 /* Copy the FrameTemp value to EBP. */
2672 gen_op_mov_reg_v(s
, a_ot
, R_EBP
, s
->T1
);
2674 /* Compute the final value of ESP. */
2675 tcg_gen_subi_tl(s
->T1
, s
->T1
, esp_addend
+ size
* level
);
2676 gen_op_mov_reg_v(s
, a_ot
, R_ESP
, s
->T1
);
2679 static void gen_leave(DisasContext
*s
)
2681 MemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2682 MemOp a_ot
= mo_stacksize(s
);
2684 gen_lea_v_seg(s
, a_ot
, cpu_regs
[R_EBP
], R_SS
, -1);
2685 gen_op_ld_v(s
, d_ot
, s
->T0
, s
->A0
);
2687 tcg_gen_addi_tl(s
->T1
, cpu_regs
[R_EBP
], 1 << d_ot
);
2689 gen_op_mov_reg_v(s
, d_ot
, R_EBP
, s
->T0
);
2690 gen_op_mov_reg_v(s
, a_ot
, R_ESP
, s
->T1
);
2693 /* Similarly, except that the assumption here is that we don't decode
2694 the instruction at all -- either a missing opcode, an unimplemented
2695 feature, or just a bogus instruction stream. */
2696 static void gen_unknown_opcode(CPUX86State
*env
, DisasContext
*s
)
2698 gen_illegal_opcode(s
);
2700 if (qemu_loglevel_mask(LOG_UNIMP
)) {
2701 FILE *logfile
= qemu_log_trylock();
2703 target_ulong pc
= s
->base
.pc_next
, end
= s
->pc
;
2705 fprintf(logfile
, "ILLOPC: " TARGET_FMT_lx
":", pc
);
2706 for (; pc
< end
; ++pc
) {
2707 fprintf(logfile
, " %02x", cpu_ldub_code(env
, pc
));
2709 fprintf(logfile
, "\n");
2710 qemu_log_unlock(logfile
);
2715 /* an interrupt is different from an exception because of the
2717 static void gen_interrupt(DisasContext
*s
, int intno
)
2719 gen_update_cc_op(s
);
2720 gen_update_eip_cur(s
);
2721 gen_helper_raise_interrupt(cpu_env
, tcg_constant_i32(intno
),
2722 cur_insn_len_i32(s
));
2723 s
->base
.is_jmp
= DISAS_NORETURN
;
2726 static void gen_set_hflag(DisasContext
*s
, uint32_t mask
)
2728 if ((s
->flags
& mask
) == 0) {
2729 TCGv_i32 t
= tcg_temp_new_i32();
2730 tcg_gen_ld_i32(t
, cpu_env
, offsetof(CPUX86State
, hflags
));
2731 tcg_gen_ori_i32(t
, t
, mask
);
2732 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUX86State
, hflags
));
2733 tcg_temp_free_i32(t
);
2738 static void gen_reset_hflag(DisasContext
*s
, uint32_t mask
)
2740 if (s
->flags
& mask
) {
2741 TCGv_i32 t
= tcg_temp_new_i32();
2742 tcg_gen_ld_i32(t
, cpu_env
, offsetof(CPUX86State
, hflags
));
2743 tcg_gen_andi_i32(t
, t
, ~mask
);
2744 tcg_gen_st_i32(t
, cpu_env
, offsetof(CPUX86State
, hflags
));
2745 tcg_temp_free_i32(t
);
2750 /* Clear BND registers during legacy branches. */
2751 static void gen_bnd_jmp(DisasContext
*s
)
2753 /* Clear the registers only if BND prefix is missing, MPX is enabled,
2754 and if the BNDREGs are known to be in use (non-zero) already.
2755 The helper itself will check BNDPRESERVE at runtime. */
2756 if ((s
->prefix
& PREFIX_REPNZ
) == 0
2757 && (s
->flags
& HF_MPX_EN_MASK
) != 0
2758 && (s
->flags
& HF_MPX_IU_MASK
) != 0) {
2759 gen_helper_bnd_jmp(cpu_env
);
2763 /* Generate an end of block. Trace exception is also generated if needed.
2764 If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set.
2765 If RECHECK_TF, emit a rechecking helper for #DB, ignoring the state of
2766 S->TF. This is used by the syscall/sysret insns. */
2768 do_gen_eob_worker(DisasContext
*s
, bool inhibit
, bool recheck_tf
, bool jr
)
2770 gen_update_cc_op(s
);
2772 /* If several instructions disable interrupts, only the first does it. */
2773 if (inhibit
&& !(s
->flags
& HF_INHIBIT_IRQ_MASK
)) {
2774 gen_set_hflag(s
, HF_INHIBIT_IRQ_MASK
);
2776 gen_reset_hflag(s
, HF_INHIBIT_IRQ_MASK
);
2779 if (s
->base
.tb
->flags
& HF_RF_MASK
) {
2780 gen_helper_reset_rf(cpu_env
);
2783 gen_helper_rechecking_single_step(cpu_env
);
2784 tcg_gen_exit_tb(NULL
, 0);
2785 } else if (s
->flags
& HF_TF_MASK
) {
2786 gen_helper_single_step(cpu_env
);
2788 tcg_gen_lookup_and_goto_ptr();
2790 tcg_gen_exit_tb(NULL
, 0);
2792 s
->base
.is_jmp
= DISAS_NORETURN
;
2796 gen_eob_worker(DisasContext
*s
, bool inhibit
, bool recheck_tf
)
2798 do_gen_eob_worker(s
, inhibit
, recheck_tf
, false);
2802 If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set. */
2803 static void gen_eob_inhibit_irq(DisasContext
*s
, bool inhibit
)
2805 gen_eob_worker(s
, inhibit
, false);
2808 /* End of block, resetting the inhibit irq flag. */
2809 static void gen_eob(DisasContext
*s
)
2811 gen_eob_worker(s
, false, false);
2814 /* Jump to register */
2815 static void gen_jr(DisasContext
*s
)
2817 do_gen_eob_worker(s
, false, false, true);
2820 /* Jump to eip+diff, truncating the result to OT. */
2821 static void gen_jmp_rel(DisasContext
*s
, MemOp ot
, int diff
, int tb_num
)
2823 bool use_goto_tb
= s
->jmp_opt
;
2824 target_ulong mask
= -1;
2825 target_ulong new_pc
= s
->pc
+ diff
;
2826 target_ulong new_eip
= new_pc
- s
->cs_base
;
2828 /* In 64-bit mode, operand size is fixed at 64 bits. */
2832 if (TARGET_TB_PCREL
&& CODE32(s
)) {
2833 use_goto_tb
= false;
2841 gen_update_cc_op(s
);
2842 set_cc_op(s
, CC_OP_DYNAMIC
);
2844 if (TARGET_TB_PCREL
) {
2845 tcg_gen_addi_tl(cpu_eip
, cpu_eip
, new_pc
- s
->pc_save
);
2847 * If we can prove the branch does not leave the page and we have
2848 * no extra masking to apply (data16 branch in code32, see above),
2849 * then we have also proven that the addition does not wrap.
2851 if (!use_goto_tb
|| !is_same_page(&s
->base
, new_pc
)) {
2852 tcg_gen_andi_tl(cpu_eip
, cpu_eip
, mask
);
2853 use_goto_tb
= false;
2858 translator_use_goto_tb(&s
->base
, new_eip
+ s
->cs_base
)) {
2859 /* jump to same page: we can use a direct jump */
2860 tcg_gen_goto_tb(tb_num
);
2861 if (!TARGET_TB_PCREL
) {
2862 tcg_gen_movi_tl(cpu_eip
, new_eip
);
2864 tcg_gen_exit_tb(s
->base
.tb
, tb_num
);
2865 s
->base
.is_jmp
= DISAS_NORETURN
;
2867 if (!TARGET_TB_PCREL
) {
2868 tcg_gen_movi_tl(cpu_eip
, new_eip
);
2871 gen_jr(s
); /* jump to another page */
2873 gen_eob(s
); /* exit to main loop */
2878 /* Jump to eip+diff, truncating to the current code size. */
2879 static void gen_jmp_rel_csize(DisasContext
*s
, int diff
, int tb_num
)
2881 /* CODE64 ignores the OT argument, so we need not consider it. */
2882 gen_jmp_rel(s
, CODE32(s
) ? MO_32
: MO_16
, diff
, tb_num
);
2885 static inline void gen_ldq_env_A0(DisasContext
*s
, int offset
)
2887 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
, s
->mem_index
, MO_LEUQ
);
2888 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
, offset
);
2891 static inline void gen_stq_env_A0(DisasContext
*s
, int offset
)
2893 tcg_gen_ld_i64(s
->tmp1_i64
, cpu_env
, offset
);
2894 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
, s
->mem_index
, MO_LEUQ
);
2897 static inline void gen_ldo_env_A0(DisasContext
*s
, int offset
, bool align
)
2899 int mem_index
= s
->mem_index
;
2900 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
, mem_index
,
2901 MO_LEUQ
| (align
? MO_ALIGN_16
: 0));
2902 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(0)));
2903 tcg_gen_addi_tl(s
->tmp0
, s
->A0
, 8);
2904 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->tmp0
, mem_index
, MO_LEUQ
);
2905 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(1)));
2908 static inline void gen_sto_env_A0(DisasContext
*s
, int offset
, bool align
)
2910 int mem_index
= s
->mem_index
;
2911 tcg_gen_ld_i64(s
->tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(0)));
2912 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
, mem_index
,
2913 MO_LEUQ
| (align
? MO_ALIGN_16
: 0));
2914 tcg_gen_addi_tl(s
->tmp0
, s
->A0
, 8);
2915 tcg_gen_ld_i64(s
->tmp1_i64
, cpu_env
, offset
+ offsetof(XMMReg
, XMM_Q(1)));
2916 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->tmp0
, mem_index
, MO_LEUQ
);
2919 static void gen_ldy_env_A0(DisasContext
*s
, int offset
, bool align
)
2921 int mem_index
= s
->mem_index
;
2922 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
, mem_index
,
2923 MO_LEUQ
| (align
? MO_ALIGN_32
: 0));
2924 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
, offset
+ offsetof(YMMReg
, YMM_Q(0)));
2925 tcg_gen_addi_tl(s
->tmp0
, s
->A0
, 8);
2926 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->tmp0
, mem_index
, MO_LEUQ
);
2927 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
, offset
+ offsetof(YMMReg
, YMM_Q(1)));
2929 tcg_gen_addi_tl(s
->tmp0
, s
->A0
, 16);
2930 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->tmp0
, mem_index
, MO_LEUQ
);
2931 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
, offset
+ offsetof(YMMReg
, YMM_Q(2)));
2932 tcg_gen_addi_tl(s
->tmp0
, s
->A0
, 24);
2933 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->tmp0
, mem_index
, MO_LEUQ
);
2934 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
, offset
+ offsetof(YMMReg
, YMM_Q(3)));
2937 static inline void gen_op_movo(DisasContext
*s
, int d_offset
, int s_offset
)
2939 tcg_gen_ld_i64(s
->tmp1_i64
, cpu_env
, s_offset
+ offsetof(XMMReg
, XMM_Q(0)));
2940 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
, d_offset
+ offsetof(XMMReg
, XMM_Q(0)));
2941 tcg_gen_ld_i64(s
->tmp1_i64
, cpu_env
, s_offset
+ offsetof(XMMReg
, XMM_Q(1)));
2942 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
, d_offset
+ offsetof(XMMReg
, XMM_Q(1)));
2945 static inline void gen_op_movq(DisasContext
*s
, int d_offset
, int s_offset
)
2947 tcg_gen_ld_i64(s
->tmp1_i64
, cpu_env
, s_offset
);
2948 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
, d_offset
);
2951 static inline void gen_op_movl(DisasContext
*s
, int d_offset
, int s_offset
)
2953 tcg_gen_ld_i32(s
->tmp2_i32
, cpu_env
, s_offset
);
2954 tcg_gen_st_i32(s
->tmp2_i32
, cpu_env
, d_offset
);
2957 static inline void gen_op_movq_env_0(DisasContext
*s
, int d_offset
)
2959 tcg_gen_movi_i64(s
->tmp1_i64
, 0);
2960 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
, d_offset
);
2963 #define ZMM_OFFSET(reg) offsetof(CPUX86State, xmm_regs[reg])
2964 #define XMM_OFFSET(reg) offsetof(CPUX86State, xmm_regs[reg].ZMM_X(0))
2966 typedef void (*SSEFunc_i_ep
)(TCGv_i32 val
, TCGv_ptr env
, TCGv_ptr reg
);
2967 typedef void (*SSEFunc_l_ep
)(TCGv_i64 val
, TCGv_ptr env
, TCGv_ptr reg
);
2968 typedef void (*SSEFunc_0_epi
)(TCGv_ptr env
, TCGv_ptr reg
, TCGv_i32 val
);
2969 typedef void (*SSEFunc_0_epl
)(TCGv_ptr env
, TCGv_ptr reg
, TCGv_i64 val
);
2970 typedef void (*SSEFunc_0_epp
)(TCGv_ptr env
, TCGv_ptr reg_a
, TCGv_ptr reg_b
);
2971 typedef void (*SSEFunc_0_eppp
)(TCGv_ptr env
, TCGv_ptr reg_a
, TCGv_ptr reg_b
,
2973 typedef void (*SSEFunc_0_eppi
)(TCGv_ptr env
, TCGv_ptr reg_a
, TCGv_ptr reg_b
,
2975 typedef void (*SSEFunc_0_ppi
)(TCGv_ptr reg_a
, TCGv_ptr reg_b
, TCGv_i32 val
);
2976 typedef void (*SSEFunc_0_eppt
)(TCGv_ptr env
, TCGv_ptr reg_a
, TCGv_ptr reg_b
,
2979 static bool first
= true; static unsigned long limit
;
2980 #include "decode-new.h"
2981 #include "emit.c.inc"
2982 #include "decode-new.c.inc"
2984 #define SSE_OPF_CMP (1 << 1) /* does not write for first operand */
2985 #define SSE_OPF_SPECIAL (1 << 3) /* magic */
2986 #define SSE_OPF_3DNOW (1 << 4) /* 3DNow! instruction */
2987 #define SSE_OPF_MMX (1 << 5) /* MMX/integer/AVX2 instruction */
2988 #define SSE_OPF_SCALAR (1 << 6) /* Has SSE scalar variants */
2989 #define SSE_OPF_SHUF (1 << 9) /* pshufx/shufpx */
2991 #define OP(op, flags, a, b, c, d) \
2992 {flags, {{.op = a}, {.op = b}, {.op = c}, {.op = d} } }
2994 #define MMX_OP(x) OP(op1, SSE_OPF_MMX, \
2995 gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm, NULL, NULL)
2997 #define SSE_FOP(name) OP(op1, SSE_OPF_SCALAR, \
2998 gen_helper_##name##ps##_xmm, gen_helper_##name##pd##_xmm, \
2999 gen_helper_##name##ss, gen_helper_##name##sd)
3000 #define SSE_OP(sname, dname, op, flags) OP(op, flags, \
3001 gen_helper_##sname##_xmm, gen_helper_##dname##_xmm, NULL, NULL)
3003 typedef union SSEFuncs
{
3006 SSEFunc_0_eppt op1t
;
3009 struct SSEOpHelper_table1
{
3014 #define SSE_3DNOW { SSE_OPF_3DNOW }
3015 #define SSE_SPECIAL { SSE_OPF_SPECIAL }
3017 static const struct SSEOpHelper_table1 sse_op_table1
[256] = {
3018 /* 3DNow! extensions */
3019 [0x0e] = SSE_SPECIAL
, /* femms */
3020 [0x0f] = SSE_3DNOW
, /* pf... (sse_op_table5) */
3021 /* pure SSE operations */
3022 [0x10] = SSE_SPECIAL
, /* movups, movupd, movss, movsd */
3023 [0x11] = SSE_SPECIAL
, /* movups, movupd, movss, movsd */
3024 [0x12] = SSE_SPECIAL
, /* movlps, movlpd, movsldup, movddup */
3025 [0x13] = SSE_SPECIAL
, /* movlps, movlpd */
3026 [0x14] = SSE_OP(punpckldq
, punpcklqdq
, op1
, 0), /* unpcklps, unpcklpd */
3027 [0x15] = SSE_OP(punpckhdq
, punpckhqdq
, op1
, 0), /* unpckhps, unpckhpd */
3028 [0x16] = SSE_SPECIAL
, /* movhps, movhpd, movshdup */
3029 [0x17] = SSE_SPECIAL
, /* movhps, movhpd */
3031 [0x28] = SSE_SPECIAL
, /* movaps, movapd */
3032 [0x29] = SSE_SPECIAL
, /* movaps, movapd */
3033 [0x2a] = SSE_SPECIAL
, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
3034 [0x2b] = SSE_SPECIAL
, /* movntps, movntpd, movntss, movntsd */
3035 [0x2c] = SSE_SPECIAL
, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
3036 [0x2d] = SSE_SPECIAL
, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
3037 [0x2e] = OP(op1
, SSE_OPF_CMP
| SSE_OPF_SCALAR
,
3038 gen_helper_ucomiss
, gen_helper_ucomisd
, NULL
, NULL
),
3039 [0x2f] = OP(op1
, SSE_OPF_CMP
| SSE_OPF_SCALAR
,
3040 gen_helper_comiss
, gen_helper_comisd
, NULL
, NULL
),
3041 [0x50] = SSE_SPECIAL
, /* movmskps, movmskpd */
3042 [0x51] = OP(op1
, SSE_OPF_SCALAR
,
3043 gen_helper_sqrtps_xmm
, gen_helper_sqrtpd_xmm
,
3044 gen_helper_sqrtss
, gen_helper_sqrtsd
),
3045 [0x52] = OP(op1
, SSE_OPF_SCALAR
,
3046 gen_helper_rsqrtps_xmm
, NULL
, gen_helper_rsqrtss
, NULL
),
3047 [0x53] = OP(op1
, SSE_OPF_SCALAR
,
3048 gen_helper_rcpps_xmm
, NULL
, gen_helper_rcpss
, NULL
),
3049 [0x54] = SSE_OP(pand
, pand
, op1
, 0), /* andps, andpd */
3050 [0x55] = SSE_OP(pandn
, pandn
, op1
, 0), /* andnps, andnpd */
3051 [0x56] = SSE_OP(por
, por
, op1
, 0), /* orps, orpd */
3052 [0x57] = SSE_OP(pxor
, pxor
, op1
, 0), /* xorps, xorpd */
3053 [0x58] = SSE_FOP(add
),
3054 [0x59] = SSE_FOP(mul
),
3055 [0x5a] = OP(op1
, SSE_OPF_SCALAR
,
3056 gen_helper_cvtps2pd_xmm
, gen_helper_cvtpd2ps_xmm
,
3057 gen_helper_cvtss2sd
, gen_helper_cvtsd2ss
),
3059 gen_helper_cvtdq2ps_xmm
, gen_helper_cvtps2dq_xmm
,
3060 gen_helper_cvttps2dq_xmm
, NULL
),
3061 [0x5c] = SSE_FOP(sub
),
3062 [0x5d] = SSE_FOP(min
),
3063 [0x5e] = SSE_FOP(div
),
3064 [0x5f] = SSE_FOP(max
),
3066 [0xc2] = SSE_FOP(cmpeq
), /* sse_op_table4 */
3067 [0xc6] = SSE_OP(shufps
, shufpd
, op1i
, SSE_OPF_SHUF
),
3069 /* SSSE3, SSE4, MOVBE, CRC32, BMI1, BMI2, ADX. */
3070 [0x38] = SSE_SPECIAL
,
3071 [0x3a] = SSE_SPECIAL
,
3073 /* MMX ops and their SSE extensions */
3074 [0x60] = MMX_OP(punpcklbw
),
3075 [0x61] = MMX_OP(punpcklwd
),
3076 [0x62] = MMX_OP(punpckldq
),
3077 [0x63] = MMX_OP(packsswb
),
3078 [0x64] = MMX_OP(pcmpgtb
),
3079 [0x65] = MMX_OP(pcmpgtw
),
3080 [0x66] = MMX_OP(pcmpgtl
),
3081 [0x67] = MMX_OP(packuswb
),
3082 [0x68] = MMX_OP(punpckhbw
),
3083 [0x69] = MMX_OP(punpckhwd
),
3084 [0x6a] = MMX_OP(punpckhdq
),
3085 [0x6b] = MMX_OP(packssdw
),
3086 [0x6c] = OP(op1
, SSE_OPF_MMX
,
3087 NULL
, gen_helper_punpcklqdq_xmm
, NULL
, NULL
),
3088 [0x6d] = OP(op1
, SSE_OPF_MMX
,
3089 NULL
, gen_helper_punpckhqdq_xmm
, NULL
, NULL
),
3090 [0x6e] = SSE_SPECIAL
, /* movd mm, ea */
3091 [0x6f] = SSE_SPECIAL
, /* movq, movdqa, , movqdu */
3092 [0x70] = OP(op1i
, SSE_OPF_SHUF
| SSE_OPF_MMX
,
3093 gen_helper_pshufw_mmx
, gen_helper_pshufd_xmm
,
3094 gen_helper_pshufhw_xmm
, gen_helper_pshuflw_xmm
),
3095 [0x71] = SSE_SPECIAL
, /* shiftw */
3096 [0x72] = SSE_SPECIAL
, /* shiftd */
3097 [0x73] = SSE_SPECIAL
, /* shiftq */
3098 [0x74] = MMX_OP(pcmpeqb
),
3099 [0x75] = MMX_OP(pcmpeqw
),
3100 [0x76] = MMX_OP(pcmpeql
),
3101 [0x77] = SSE_SPECIAL
, /* emms */
3102 [0x78] = SSE_SPECIAL
, /* extrq_i, insertq_i (sse4a) */
3104 NULL
, gen_helper_extrq_r
, NULL
, gen_helper_insertq_r
),
3106 NULL
, gen_helper_haddpd_xmm
, NULL
, gen_helper_haddps_xmm
),
3108 NULL
, gen_helper_hsubpd_xmm
, NULL
, gen_helper_hsubps_xmm
),
3109 [0x7e] = SSE_SPECIAL
, /* movd, movd, , movq */
3110 [0x7f] = SSE_SPECIAL
, /* movq, movdqa, movdqu */
3111 [0xc4] = SSE_SPECIAL
, /* pinsrw */
3112 [0xc5] = SSE_SPECIAL
, /* pextrw */
3114 NULL
, gen_helper_addsubpd_xmm
, NULL
, gen_helper_addsubps_xmm
),
3115 [0xd1] = MMX_OP(psrlw
),
3116 [0xd2] = MMX_OP(psrld
),
3117 [0xd3] = MMX_OP(psrlq
),
3118 [0xd4] = MMX_OP(paddq
),
3119 [0xd5] = MMX_OP(pmullw
),
3120 [0xd6] = SSE_SPECIAL
,
3121 [0xd7] = SSE_SPECIAL
, /* pmovmskb */
3122 [0xd8] = MMX_OP(psubusb
),
3123 [0xd9] = MMX_OP(psubusw
),
3124 [0xda] = MMX_OP(pminub
),
3125 [0xdb] = MMX_OP(pand
),
3126 [0xdc] = MMX_OP(paddusb
),
3127 [0xdd] = MMX_OP(paddusw
),
3128 [0xde] = MMX_OP(pmaxub
),
3129 [0xdf] = MMX_OP(pandn
),
3130 [0xe0] = MMX_OP(pavgb
),
3131 [0xe1] = MMX_OP(psraw
),
3132 [0xe2] = MMX_OP(psrad
),
3133 [0xe3] = MMX_OP(pavgw
),
3134 [0xe4] = MMX_OP(pmulhuw
),
3135 [0xe5] = MMX_OP(pmulhw
),
3137 NULL
, gen_helper_cvttpd2dq_xmm
,
3138 gen_helper_cvtdq2pd_xmm
, gen_helper_cvtpd2dq_xmm
),
3139 [0xe7] = SSE_SPECIAL
, /* movntq, movntq */
3140 [0xe8] = MMX_OP(psubsb
),
3141 [0xe9] = MMX_OP(psubsw
),
3142 [0xea] = MMX_OP(pminsw
),
3143 [0xeb] = MMX_OP(por
),
3144 [0xec] = MMX_OP(paddsb
),
3145 [0xed] = MMX_OP(paddsw
),
3146 [0xee] = MMX_OP(pmaxsw
),
3147 [0xef] = MMX_OP(pxor
),
3148 [0xf0] = SSE_SPECIAL
, /* lddqu */
3149 [0xf1] = MMX_OP(psllw
),
3150 [0xf2] = MMX_OP(pslld
),
3151 [0xf3] = MMX_OP(psllq
),
3152 [0xf4] = MMX_OP(pmuludq
),
3153 [0xf5] = MMX_OP(pmaddwd
),
3154 [0xf6] = MMX_OP(psadbw
),
3155 [0xf7] = OP(op1t
, SSE_OPF_MMX
,
3156 gen_helper_maskmov_mmx
, gen_helper_maskmov_xmm
, NULL
, NULL
),
3157 [0xf8] = MMX_OP(psubb
),
3158 [0xf9] = MMX_OP(psubw
),
3159 [0xfa] = MMX_OP(psubl
),
3160 [0xfb] = MMX_OP(psubq
),
3161 [0xfc] = MMX_OP(paddb
),
3162 [0xfd] = MMX_OP(paddw
),
3163 [0xfe] = MMX_OP(paddl
),
3171 #define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
3173 static const SSEFunc_0_epp sse_op_table2
[3 * 8][2] = {
3174 [0 + 2] = MMX_OP2(psrlw
),
3175 [0 + 4] = MMX_OP2(psraw
),
3176 [0 + 6] = MMX_OP2(psllw
),
3177 [8 + 2] = MMX_OP2(psrld
),
3178 [8 + 4] = MMX_OP2(psrad
),
3179 [8 + 6] = MMX_OP2(pslld
),
3180 [16 + 2] = MMX_OP2(psrlq
),
3181 [16 + 3] = { NULL
, gen_helper_psrldq_xmm
},
3182 [16 + 6] = MMX_OP2(psllq
),
3183 [16 + 7] = { NULL
, gen_helper_pslldq_xmm
},
3186 static const SSEFunc_0_epi sse_op_table3ai
[] = {
3187 gen_helper_cvtsi2ss
,
3191 #ifdef TARGET_X86_64
3192 static const SSEFunc_0_epl sse_op_table3aq
[] = {
3193 gen_helper_cvtsq2ss
,
3198 static const SSEFunc_i_ep sse_op_table3bi
[] = {
3199 gen_helper_cvttss2si
,
3200 gen_helper_cvtss2si
,
3201 gen_helper_cvttsd2si
,
3205 #ifdef TARGET_X86_64
3206 static const SSEFunc_l_ep sse_op_table3bq
[] = {
3207 gen_helper_cvttss2sq
,
3208 gen_helper_cvtss2sq
,
3209 gen_helper_cvttsd2sq
,
3214 #define SSE_CMP(x) { \
3215 gen_helper_ ## x ## ps ## _xmm, gen_helper_ ## x ## pd ## _xmm, \
3216 gen_helper_ ## x ## ss, gen_helper_ ## x ## sd}
3217 static const SSEFunc_0_epp sse_op_table4
[8][4] = {
3229 static const SSEFunc_0_epp sse_op_table5
[256] = {
3230 [0x0c] = gen_helper_pi2fw
,
3231 [0x0d] = gen_helper_pi2fd
,
3232 [0x1c] = gen_helper_pf2iw
,
3233 [0x1d] = gen_helper_pf2id
,
3234 [0x8a] = gen_helper_pfnacc
,
3235 [0x8e] = gen_helper_pfpnacc
,
3236 [0x90] = gen_helper_pfcmpge
,
3237 [0x94] = gen_helper_pfmin
,
3238 [0x96] = gen_helper_pfrcp
,
3239 [0x97] = gen_helper_pfrsqrt
,
3240 [0x9a] = gen_helper_pfsub
,
3241 [0x9e] = gen_helper_pfadd
,
3242 [0xa0] = gen_helper_pfcmpgt
,
3243 [0xa4] = gen_helper_pfmax
,
3244 [0xa6] = gen_helper_movq
, /* pfrcpit1; no need to actually increase precision */
3245 [0xa7] = gen_helper_movq
, /* pfrsqit1 */
3246 [0xaa] = gen_helper_pfsubr
,
3247 [0xae] = gen_helper_pfacc
,
3248 [0xb0] = gen_helper_pfcmpeq
,
3249 [0xb4] = gen_helper_pfmul
,
3250 [0xb6] = gen_helper_movq
, /* pfrcpit2 */
3251 [0xb7] = gen_helper_pmulhrw_mmx
,
3252 [0xbb] = gen_helper_pswapd
,
3253 [0xbf] = gen_helper_pavgb_mmx
,
3256 struct SSEOpHelper_table6
{
3262 struct SSEOpHelper_table7
{
3270 #define gen_helper_special_xmm NULL
3272 #define OP(name, op, flags, ext, mmx_name) \
3273 {{{.op = mmx_name}, {.op = gen_helper_ ## name ## _xmm} }, \
3274 CPUID_EXT_ ## ext, flags}
3275 #define BINARY_OP_MMX(name, ext) \
3276 OP(name, op1, SSE_OPF_MMX, ext, gen_helper_ ## name ## _mmx)
3277 #define BINARY_OP(name, ext, flags) \
3278 OP(name, op1, flags, ext, NULL)
3279 #define UNARY_OP_MMX(name, ext) \
3280 OP(name, op1, SSE_OPF_MMX, ext, gen_helper_ ## name ## _mmx)
3281 #define UNARY_OP(name, ext, flags) \
3282 OP(name, op1, flags, ext, NULL)
3283 #define BLENDV_OP(name, ext, flags) OP(name, op1, 0, ext, NULL)
3284 #define CMP_OP(name, ext) OP(name, op1, SSE_OPF_CMP, ext, NULL)
3285 #define SPECIAL_OP(ext) OP(special, op1, SSE_OPF_SPECIAL, ext, NULL)
3287 /* prefix [66] 0f 38 */
3288 static const struct SSEOpHelper_table6 sse_op_table6
[256] = {
3289 [0x00] = BINARY_OP_MMX(pshufb
, SSSE3
),
3290 [0x01] = BINARY_OP_MMX(phaddw
, SSSE3
),
3291 [0x02] = BINARY_OP_MMX(phaddd
, SSSE3
),
3292 [0x03] = BINARY_OP_MMX(phaddsw
, SSSE3
),
3293 [0x04] = BINARY_OP_MMX(pmaddubsw
, SSSE3
),
3294 [0x05] = BINARY_OP_MMX(phsubw
, SSSE3
),
3295 [0x06] = BINARY_OP_MMX(phsubd
, SSSE3
),
3296 [0x07] = BINARY_OP_MMX(phsubsw
, SSSE3
),
3297 [0x08] = BINARY_OP_MMX(psignb
, SSSE3
),
3298 [0x09] = BINARY_OP_MMX(psignw
, SSSE3
),
3299 [0x0a] = BINARY_OP_MMX(psignd
, SSSE3
),
3300 [0x0b] = BINARY_OP_MMX(pmulhrsw
, SSSE3
),
3301 [0x10] = BLENDV_OP(pblendvb
, SSE41
, SSE_OPF_MMX
),
3302 [0x14] = BLENDV_OP(blendvps
, SSE41
, 0),
3303 [0x15] = BLENDV_OP(blendvpd
, SSE41
, 0),
3304 [0x17] = CMP_OP(ptest
, SSE41
),
3305 [0x1c] = UNARY_OP_MMX(pabsb
, SSSE3
),
3306 [0x1d] = UNARY_OP_MMX(pabsw
, SSSE3
),
3307 [0x1e] = UNARY_OP_MMX(pabsd
, SSSE3
),
3308 [0x20] = UNARY_OP(pmovsxbw
, SSE41
, SSE_OPF_MMX
),
3309 [0x21] = UNARY_OP(pmovsxbd
, SSE41
, SSE_OPF_MMX
),
3310 [0x22] = UNARY_OP(pmovsxbq
, SSE41
, SSE_OPF_MMX
),
3311 [0x23] = UNARY_OP(pmovsxwd
, SSE41
, SSE_OPF_MMX
),
3312 [0x24] = UNARY_OP(pmovsxwq
, SSE41
, SSE_OPF_MMX
),
3313 [0x25] = UNARY_OP(pmovsxdq
, SSE41
, SSE_OPF_MMX
),
3314 [0x28] = BINARY_OP(pmuldq
, SSE41
, SSE_OPF_MMX
),
3315 [0x29] = BINARY_OP(pcmpeqq
, SSE41
, SSE_OPF_MMX
),
3316 [0x2a] = SPECIAL_OP(SSE41
), /* movntdqa */
3317 [0x2b] = BINARY_OP(packusdw
, SSE41
, SSE_OPF_MMX
),
3318 [0x30] = UNARY_OP(pmovzxbw
, SSE41
, SSE_OPF_MMX
),
3319 [0x31] = UNARY_OP(pmovzxbd
, SSE41
, SSE_OPF_MMX
),
3320 [0x32] = UNARY_OP(pmovzxbq
, SSE41
, SSE_OPF_MMX
),
3321 [0x33] = UNARY_OP(pmovzxwd
, SSE41
, SSE_OPF_MMX
),
3322 [0x34] = UNARY_OP(pmovzxwq
, SSE41
, SSE_OPF_MMX
),
3323 [0x35] = UNARY_OP(pmovzxdq
, SSE41
, SSE_OPF_MMX
),
3324 [0x37] = BINARY_OP(pcmpgtq
, SSE41
, SSE_OPF_MMX
),
3325 [0x38] = BINARY_OP(pminsb
, SSE41
, SSE_OPF_MMX
),
3326 [0x39] = BINARY_OP(pminsd
, SSE41
, SSE_OPF_MMX
),
3327 [0x3a] = BINARY_OP(pminuw
, SSE41
, SSE_OPF_MMX
),
3328 [0x3b] = BINARY_OP(pminud
, SSE41
, SSE_OPF_MMX
),
3329 [0x3c] = BINARY_OP(pmaxsb
, SSE41
, SSE_OPF_MMX
),
3330 [0x3d] = BINARY_OP(pmaxsd
, SSE41
, SSE_OPF_MMX
),
3331 [0x3e] = BINARY_OP(pmaxuw
, SSE41
, SSE_OPF_MMX
),
3332 [0x3f] = BINARY_OP(pmaxud
, SSE41
, SSE_OPF_MMX
),
3333 [0x40] = BINARY_OP(pmulld
, SSE41
, SSE_OPF_MMX
),
3334 [0x41] = UNARY_OP(phminposuw
, SSE41
, 0),
3335 [0xdb] = UNARY_OP(aesimc
, AES
, 0),
3336 [0xdc] = BINARY_OP(aesenc
, AES
, 0),
3337 [0xdd] = BINARY_OP(aesenclast
, AES
, 0),
3338 [0xde] = BINARY_OP(aesdec
, AES
, 0),
3339 [0xdf] = BINARY_OP(aesdeclast
, AES
, 0),
3342 /* prefix [66] 0f 3a */
3343 static const struct SSEOpHelper_table7 sse_op_table7
[256] = {
3344 [0x08] = UNARY_OP(roundps
, SSE41
, 0),
3345 [0x09] = UNARY_OP(roundpd
, SSE41
, 0),
3346 [0x0a] = UNARY_OP(roundss
, SSE41
, SSE_OPF_SCALAR
),
3347 [0x0b] = UNARY_OP(roundsd
, SSE41
, SSE_OPF_SCALAR
),
3348 [0x0c] = BINARY_OP(blendps
, SSE41
, 0),
3349 [0x0d] = BINARY_OP(blendpd
, SSE41
, 0),
3350 [0x0e] = BINARY_OP(pblendw
, SSE41
, SSE_OPF_MMX
),
3351 [0x0f] = BINARY_OP_MMX(palignr
, SSSE3
),
3352 [0x14] = SPECIAL_OP(SSE41
), /* pextrb */
3353 [0x15] = SPECIAL_OP(SSE41
), /* pextrw */
3354 [0x16] = SPECIAL_OP(SSE41
), /* pextrd/pextrq */
3355 [0x17] = SPECIAL_OP(SSE41
), /* extractps */
3356 [0x20] = SPECIAL_OP(SSE41
), /* pinsrb */
3357 [0x21] = SPECIAL_OP(SSE41
), /* insertps */
3358 [0x22] = SPECIAL_OP(SSE41
), /* pinsrd/pinsrq */
3359 [0x40] = BINARY_OP(dpps
, SSE41
, 0),
3360 [0x41] = BINARY_OP(dppd
, SSE41
, 0),
3361 [0x42] = BINARY_OP(mpsadbw
, SSE41
, SSE_OPF_MMX
),
3362 [0x44] = BINARY_OP(pclmulqdq
, PCLMULQDQ
, 0),
3363 [0x60] = CMP_OP(pcmpestrm
, SSE42
),
3364 [0x61] = CMP_OP(pcmpestri
, SSE42
),
3365 [0x62] = CMP_OP(pcmpistrm
, SSE42
),
3366 [0x63] = CMP_OP(pcmpistri
, SSE42
),
3367 [0xdf] = UNARY_OP(aeskeygenassist
, AES
, 0),
3371 #undef BINARY_OP_MMX
3378 /* VEX prefix not allowed */
3379 #define CHECK_NO_VEX(s) do { \
3380 if (s->prefix & PREFIX_VEX) \
3384 static void gen_sse(CPUX86State
*env
, DisasContext
*s
, int b
)
3386 int b1
, op1_offset
, op2_offset
, is_xmm
, val
;
3387 int modrm
, mod
, rm
, reg
;
3390 const struct SSEOpHelper_table6
*op6
;
3391 const struct SSEOpHelper_table7
*op7
;
3395 if (s
->prefix
& PREFIX_DATA
)
3397 else if (s
->prefix
& PREFIX_REPZ
)
3399 else if (s
->prefix
& PREFIX_REPNZ
)
3403 sse_op_flags
= sse_op_table1
[b
].flags
;
3404 sse_op_fn
= sse_op_table1
[b
].fn
[b1
];
3405 if ((sse_op_flags
& (SSE_OPF_SPECIAL
| SSE_OPF_3DNOW
)) == 0
3406 && !sse_op_fn
.op1
) {
3409 if ((b
<= 0x5f && b
>= 0x10) || b
== 0xc6 || b
== 0xc2) {
3419 if (sse_op_flags
& SSE_OPF_3DNOW
) {
3420 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_3DNOW
)) {
3424 /* simple MMX/SSE operation */
3425 if (s
->flags
& HF_TS_MASK
) {
3426 gen_exception(s
, EXCP07_PREX
);
3429 if (s
->flags
& HF_EM_MASK
) {
3431 gen_illegal_opcode(s
);
3435 && !(s
->flags
& HF_OSFXSR_MASK
)
3436 && (b
!= 0x38 && b
!= 0x3a)) {
3440 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_3DNOW
)) {
3441 /* If we were fully decoding this we might use illegal_op. */
3445 gen_helper_emms(cpu_env
);
3450 gen_helper_emms(cpu_env
);
3453 /* prepare MMX state (XXX: optimize by storing fptt and fptags in
3454 the static cpu state) */
3456 gen_helper_enter_mmx(cpu_env
);
3459 modrm
= x86_ldub_code(env
, s
);
3460 reg
= ((modrm
>> 3) & 7);
3464 mod
= (modrm
>> 6) & 3;
3465 if (sse_op_flags
& SSE_OPF_SPECIAL
) {
3468 case 0x0e7: /* movntq */
3473 gen_lea_modrm(env
, s
, modrm
);
3474 gen_stq_env_A0(s
, offsetof(CPUX86State
, fpregs
[reg
].mmx
));
3476 case 0x1e7: /* movntdq */
3477 case 0x02b: /* movntps */
3478 case 0x12b: /* movntpd */
3481 gen_lea_modrm(env
, s
, modrm
);
3482 gen_sto_env_A0(s
, XMM_OFFSET(reg
), true);
3484 case 0x3f0: /* lddqu */
3487 gen_lea_modrm(env
, s
, modrm
);
3488 gen_ldo_env_A0(s
, XMM_OFFSET(reg
), true);
3490 case 0x22b: /* movntss */
3491 case 0x32b: /* movntsd */
3494 gen_lea_modrm(env
, s
, modrm
);
3496 gen_stq_env_A0(s
, offsetof(CPUX86State
,
3497 xmm_regs
[reg
].ZMM_Q(0)));
3499 tcg_gen_ld32u_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
,
3500 xmm_regs
[reg
].ZMM_L(0)));
3501 gen_op_st_v(s
, MO_32
, s
->T0
, s
->A0
);
3504 case 0x6e: /* movd mm, ea */
3506 #ifdef TARGET_X86_64
3507 if (s
->dflag
== MO_64
) {
3508 gen_ldst_modrm(env
, s
, modrm
, MO_64
, OR_TMP0
, 0);
3509 tcg_gen_st_tl(s
->T0
, cpu_env
,
3510 offsetof(CPUX86State
, fpregs
[reg
].mmx
));
3514 gen_ldst_modrm(env
, s
, modrm
, MO_32
, OR_TMP0
, 0);
3515 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
,
3516 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3517 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
3518 gen_helper_movl_mm_T0_mmx(s
->ptr0
, s
->tmp2_i32
);
3521 case 0x16e: /* movd xmm, ea */
3522 #ifdef TARGET_X86_64
3523 if (s
->dflag
== MO_64
) {
3524 gen_ldst_modrm(env
, s
, modrm
, MO_64
, OR_TMP0
, 0);
3525 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
, ZMM_OFFSET(reg
));
3526 gen_helper_movq_mm_T0_xmm(s
->ptr0
, s
->T0
);
3530 gen_ldst_modrm(env
, s
, modrm
, MO_32
, OR_TMP0
, 0);
3531 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
, ZMM_OFFSET(reg
));
3532 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
3533 gen_helper_movl_mm_T0_xmm(s
->ptr0
, s
->tmp2_i32
);
3536 case 0x6f: /* movq mm, ea */
3539 gen_lea_modrm(env
, s
, modrm
);
3540 gen_ldq_env_A0(s
, offsetof(CPUX86State
, fpregs
[reg
].mmx
));
3543 tcg_gen_ld_i64(s
->tmp1_i64
, cpu_env
,
3544 offsetof(CPUX86State
,fpregs
[rm
].mmx
));
3545 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
,
3546 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3549 case 0x010: /* movups */
3550 case 0x110: /* movupd */
3551 case 0x028: /* movaps */
3552 case 0x128: /* movapd */
3553 case 0x16f: /* movdqa xmm, ea */
3554 case 0x26f: /* movdqu xmm, ea */
3556 gen_lea_modrm(env
, s
, modrm
);
3557 gen_ldo_env_A0(s
, XMM_OFFSET(reg
),
3558 /* movaps, movapd, movdqa */
3559 b
== 0x028 || b
== 0x128 || b
== 0x16f);
3561 rm
= (modrm
& 7) | REX_B(s
);
3562 gen_op_movo(s
, XMM_OFFSET(reg
), XMM_OFFSET(rm
));
3565 case 0x210: /* movss xmm, ea */
3567 gen_lea_modrm(env
, s
, modrm
);
3568 gen_op_ld_v(s
, MO_32
, s
->T0
, s
->A0
);
3569 tcg_gen_st32_tl(s
->T0
, cpu_env
,
3570 offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(0)));
3571 tcg_gen_movi_tl(s
->T0
, 0);
3572 tcg_gen_st32_tl(s
->T0
, cpu_env
,
3573 offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(1)));
3574 tcg_gen_st32_tl(s
->T0
, cpu_env
,
3575 offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(2)));
3576 tcg_gen_st32_tl(s
->T0
, cpu_env
,
3577 offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(3)));
3579 rm
= (modrm
& 7) | REX_B(s
);
3580 tcg_gen_ld_i32(s
->tmp2_i32
, cpu_env
,
3581 offsetof(CPUX86State
, xmm_regs
[rm
].ZMM_L(0)));
3582 tcg_gen_st_i32(s
->tmp2_i32
, cpu_env
,
3583 offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(0)));
3586 case 0x310: /* movsd xmm, ea */
3588 gen_lea_modrm(env
, s
, modrm
);
3589 gen_ldq_env_A0(s
, offsetof(CPUX86State
,
3590 xmm_regs
[reg
].ZMM_Q(0)));
3591 tcg_gen_movi_tl(s
->T0
, 0);
3592 tcg_gen_st32_tl(s
->T0
, cpu_env
,
3593 offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(2)));
3594 tcg_gen_st32_tl(s
->T0
, cpu_env
,
3595 offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(3)));
3597 rm
= (modrm
& 7) | REX_B(s
);
3598 gen_op_movq(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_Q(0)),
3599 offsetof(CPUX86State
, xmm_regs
[rm
].ZMM_Q(0)));
3602 case 0x012: /* movlps */
3603 case 0x112: /* movlpd */
3605 gen_lea_modrm(env
, s
, modrm
);
3606 gen_ldq_env_A0(s
, offsetof(CPUX86State
,
3607 xmm_regs
[reg
].ZMM_Q(0)));
3610 rm
= (modrm
& 7) | REX_B(s
);
3611 gen_op_movq(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_Q(0)),
3612 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_Q(1)));
3615 case 0x212: /* movsldup */
3617 gen_lea_modrm(env
, s
, modrm
);
3618 gen_ldo_env_A0(s
, XMM_OFFSET(reg
), true);
3620 rm
= (modrm
& 7) | REX_B(s
);
3621 gen_op_movl(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(0)),
3622 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_L(0)));
3623 gen_op_movl(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(2)),
3624 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_L(2)));
3626 gen_op_movl(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(1)),
3627 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(0)));
3628 gen_op_movl(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(3)),
3629 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(2)));
3631 case 0x312: /* movddup */
3633 gen_lea_modrm(env
, s
, modrm
);
3634 gen_ldq_env_A0(s
, offsetof(CPUX86State
,
3635 xmm_regs
[reg
].ZMM_Q(0)));
3637 rm
= (modrm
& 7) | REX_B(s
);
3638 gen_op_movq(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_Q(0)),
3639 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_Q(0)));
3641 gen_op_movq(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_Q(1)),
3642 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(0)));
3644 case 0x016: /* movhps */
3645 case 0x116: /* movhpd */
3647 gen_lea_modrm(env
, s
, modrm
);
3648 gen_ldq_env_A0(s
, offsetof(CPUX86State
,
3649 xmm_regs
[reg
].ZMM_Q(1)));
3652 rm
= (modrm
& 7) | REX_B(s
);
3653 gen_op_movq(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_Q(1)),
3654 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_Q(0)));
3657 case 0x216: /* movshdup */
3659 gen_lea_modrm(env
, s
, modrm
);
3660 gen_ldo_env_A0(s
, XMM_OFFSET(reg
), true);
3662 rm
= (modrm
& 7) | REX_B(s
);
3663 gen_op_movl(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(1)),
3664 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_L(1)));
3665 gen_op_movl(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(3)),
3666 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_L(3)));
3668 gen_op_movl(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(0)),
3669 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(1)));
3670 gen_op_movl(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(2)),
3671 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(3)));
3677 int bit_index
, field_length
;
3679 if (b1
== 1 && reg
!= 0)
3681 field_length
= x86_ldub_code(env
, s
) & 0x3F;
3682 bit_index
= x86_ldub_code(env
, s
) & 0x3F;
3683 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
, ZMM_OFFSET(reg
));
3685 gen_helper_extrq_i(cpu_env
, s
->ptr0
,
3686 tcg_const_i32(bit_index
),
3687 tcg_const_i32(field_length
));
3690 gen_lea_modrm(env
, s
, modrm
);
3691 op2_offset
= offsetof(CPUX86State
, xmm_t0
);
3692 gen_ldq_env_A0(s
, offsetof(CPUX86State
, xmm_t0
.ZMM_D(0)));
3694 rm
= (modrm
& 7) | REX_B(s
);
3695 op2_offset
= ZMM_OFFSET(rm
);
3697 tcg_gen_addi_ptr(s
->ptr1
, cpu_env
, op2_offset
);
3698 gen_helper_insertq_i(cpu_env
, s
->ptr0
, s
->ptr1
,
3699 tcg_const_i32(bit_index
),
3700 tcg_const_i32(field_length
));
3704 case 0x7e: /* movd ea, mm */
3706 #ifdef TARGET_X86_64
3707 if (s
->dflag
== MO_64
) {
3708 tcg_gen_ld_i64(s
->T0
, cpu_env
,
3709 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3710 gen_ldst_modrm(env
, s
, modrm
, MO_64
, OR_TMP0
, 1);
3714 tcg_gen_ld32u_tl(s
->T0
, cpu_env
,
3715 offsetof(CPUX86State
,fpregs
[reg
].mmx
.MMX_L(0)));
3716 gen_ldst_modrm(env
, s
, modrm
, MO_32
, OR_TMP0
, 1);
3719 case 0x17e: /* movd ea, xmm */
3720 #ifdef TARGET_X86_64
3721 if (s
->dflag
== MO_64
) {
3722 tcg_gen_ld_i64(s
->T0
, cpu_env
,
3723 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(0)));
3724 gen_ldst_modrm(env
, s
, modrm
, MO_64
, OR_TMP0
, 1);
3728 tcg_gen_ld32u_tl(s
->T0
, cpu_env
,
3729 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(0)));
3730 gen_ldst_modrm(env
, s
, modrm
, MO_32
, OR_TMP0
, 1);
3733 case 0x27e: /* movq xmm, ea */
3735 gen_lea_modrm(env
, s
, modrm
);
3736 gen_ldq_env_A0(s
, offsetof(CPUX86State
,
3737 xmm_regs
[reg
].ZMM_Q(0)));
3739 rm
= (modrm
& 7) | REX_B(s
);
3740 gen_op_movq(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_Q(0)),
3741 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_Q(0)));
3743 gen_op_movq_env_0(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_Q(1)));
3745 case 0x7f: /* movq ea, mm */
3748 gen_lea_modrm(env
, s
, modrm
);
3749 gen_stq_env_A0(s
, offsetof(CPUX86State
, fpregs
[reg
].mmx
));
3752 gen_op_movq(s
, offsetof(CPUX86State
, fpregs
[rm
].mmx
),
3753 offsetof(CPUX86State
,fpregs
[reg
].mmx
));
3756 case 0x011: /* movups */
3757 case 0x111: /* movupd */
3758 case 0x029: /* movaps */
3759 case 0x129: /* movapd */
3760 case 0x17f: /* movdqa ea, xmm */
3761 case 0x27f: /* movdqu ea, xmm */
3763 gen_lea_modrm(env
, s
, modrm
);
3764 gen_sto_env_A0(s
, XMM_OFFSET(reg
),
3765 /* movaps, movapd, movdqa */
3766 b
== 0x029 || b
== 0x129 || b
== 0x17f);
3768 rm
= (modrm
& 7) | REX_B(s
);
3769 gen_op_movo(s
, XMM_OFFSET(rm
), XMM_OFFSET(reg
));
3772 case 0x211: /* movss ea, xmm */
3774 gen_lea_modrm(env
, s
, modrm
);
3775 tcg_gen_ld32u_tl(s
->T0
, cpu_env
,
3776 offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_L(0)));
3777 gen_op_st_v(s
, MO_32
, s
->T0
, s
->A0
);
3779 rm
= (modrm
& 7) | REX_B(s
);
3780 gen_op_movl(s
, offsetof(CPUX86State
, xmm_regs
[rm
].ZMM_L(0)),
3781 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_L(0)));
3784 case 0x311: /* movsd ea, xmm */
3786 gen_lea_modrm(env
, s
, modrm
);
3787 gen_stq_env_A0(s
, offsetof(CPUX86State
,
3788 xmm_regs
[reg
].ZMM_Q(0)));
3790 rm
= (modrm
& 7) | REX_B(s
);
3791 gen_op_movq(s
, offsetof(CPUX86State
, xmm_regs
[rm
].ZMM_Q(0)),
3792 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(0)));
3795 case 0x013: /* movlps */
3796 case 0x113: /* movlpd */
3798 gen_lea_modrm(env
, s
, modrm
);
3799 gen_stq_env_A0(s
, offsetof(CPUX86State
,
3800 xmm_regs
[reg
].ZMM_Q(0)));
3805 case 0x017: /* movhps */
3806 case 0x117: /* movhpd */
3808 gen_lea_modrm(env
, s
, modrm
);
3809 gen_stq_env_A0(s
, offsetof(CPUX86State
,
3810 xmm_regs
[reg
].ZMM_Q(1)));
3815 case 0x71: /* shift mm, im */
3818 case 0x171: /* shift xmm, im */
3821 val
= x86_ldub_code(env
, s
);
3823 tcg_gen_movi_tl(s
->T0
, val
);
3824 tcg_gen_st32_tl(s
->T0
, cpu_env
,
3825 offsetof(CPUX86State
, xmm_t0
.ZMM_L(0)));
3826 tcg_gen_movi_tl(s
->T0
, 0);
3827 tcg_gen_st32_tl(s
->T0
, cpu_env
,
3828 offsetof(CPUX86State
, xmm_t0
.ZMM_L(1)));
3829 op1_offset
= offsetof(CPUX86State
,xmm_t0
);
3832 tcg_gen_movi_tl(s
->T0
, val
);
3833 tcg_gen_st32_tl(s
->T0
, cpu_env
,
3834 offsetof(CPUX86State
, mmx_t0
.MMX_L(0)));
3835 tcg_gen_movi_tl(s
->T0
, 0);
3836 tcg_gen_st32_tl(s
->T0
, cpu_env
,
3837 offsetof(CPUX86State
, mmx_t0
.MMX_L(1)));
3838 op1_offset
= offsetof(CPUX86State
,mmx_t0
);
3841 SSEFunc_0_epp fn
= sse_op_table2
[((b
- 1) & 3) * 8 +
3842 (((modrm
>> 3)) & 7)][b1
];
3847 rm
= (modrm
& 7) | REX_B(s
);
3848 op2_offset
= ZMM_OFFSET(rm
);
3851 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3853 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
, op2_offset
);
3854 tcg_gen_addi_ptr(s
->ptr1
, cpu_env
, op1_offset
);
3855 fn(cpu_env
, s
->ptr0
, s
->ptr1
);
3857 case 0x050: /* movmskps */
3858 rm
= (modrm
& 7) | REX_B(s
);
3859 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
, ZMM_OFFSET(rm
));
3860 gen_helper_movmskps_xmm(s
->tmp2_i32
, cpu_env
, s
->ptr0
);
3861 tcg_gen_extu_i32_tl(cpu_regs
[reg
], s
->tmp2_i32
);
3863 case 0x150: /* movmskpd */
3864 rm
= (modrm
& 7) | REX_B(s
);
3865 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
, ZMM_OFFSET(rm
));
3866 gen_helper_movmskpd_xmm(s
->tmp2_i32
, cpu_env
, s
->ptr0
);
3867 tcg_gen_extu_i32_tl(cpu_regs
[reg
], s
->tmp2_i32
);
3869 case 0x02a: /* cvtpi2ps */
3870 case 0x12a: /* cvtpi2pd */
3872 gen_helper_enter_mmx(cpu_env
);
3874 gen_lea_modrm(env
, s
, modrm
);
3875 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
3876 gen_ldq_env_A0(s
, op2_offset
);
3879 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
3881 op1_offset
= ZMM_OFFSET(reg
);
3882 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
, op1_offset
);
3883 tcg_gen_addi_ptr(s
->ptr1
, cpu_env
, op2_offset
);
3886 gen_helper_cvtpi2ps(cpu_env
, s
->ptr0
, s
->ptr1
);
3890 gen_helper_cvtpi2pd(cpu_env
, s
->ptr0
, s
->ptr1
);
3894 case 0x22a: /* cvtsi2ss */
3895 case 0x32a: /* cvtsi2sd */
3896 ot
= mo_64_32(s
->dflag
);
3897 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
3898 op1_offset
= ZMM_OFFSET(reg
);
3899 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
, op1_offset
);
3901 SSEFunc_0_epi sse_fn_epi
= sse_op_table3ai
[(b
>> 8) & 1];
3902 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
3903 sse_fn_epi(cpu_env
, s
->ptr0
, s
->tmp2_i32
);
3905 #ifdef TARGET_X86_64
3906 SSEFunc_0_epl sse_fn_epl
= sse_op_table3aq
[(b
>> 8) & 1];
3907 sse_fn_epl(cpu_env
, s
->ptr0
, s
->T0
);
3913 case 0x02c: /* cvttps2pi */
3914 case 0x12c: /* cvttpd2pi */
3915 case 0x02d: /* cvtps2pi */
3916 case 0x12d: /* cvtpd2pi */
3918 gen_helper_enter_mmx(cpu_env
);
3920 gen_lea_modrm(env
, s
, modrm
);
3921 op2_offset
= offsetof(CPUX86State
, xmm_t0
.ZMM_X(0));
3922 /* FIXME: should be 64-bit access if b1 == 0. */
3923 gen_ldo_env_A0(s
, op2_offset
, !!b1
);
3925 rm
= (modrm
& 7) | REX_B(s
);
3926 op2_offset
= ZMM_OFFSET(rm
);
3928 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
& 7].mmx
);
3929 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
, op1_offset
);
3930 tcg_gen_addi_ptr(s
->ptr1
, cpu_env
, op2_offset
);
3933 gen_helper_cvttps2pi(cpu_env
, s
->ptr0
, s
->ptr1
);
3936 gen_helper_cvttpd2pi(cpu_env
, s
->ptr0
, s
->ptr1
);
3939 gen_helper_cvtps2pi(cpu_env
, s
->ptr0
, s
->ptr1
);
3942 gen_helper_cvtpd2pi(cpu_env
, s
->ptr0
, s
->ptr1
);
3946 case 0x22c: /* cvttss2si */
3947 case 0x32c: /* cvttsd2si */
3948 case 0x22d: /* cvtss2si */
3949 case 0x32d: /* cvtsd2si */
3950 ot
= mo_64_32(s
->dflag
);
3952 gen_lea_modrm(env
, s
, modrm
);
3954 gen_ldq_env_A0(s
, offsetof(CPUX86State
, xmm_t0
.ZMM_Q(0)));
3956 gen_op_ld_v(s
, MO_32
, s
->T0
, s
->A0
);
3957 tcg_gen_st32_tl(s
->T0
, cpu_env
,
3958 offsetof(CPUX86State
, xmm_t0
.ZMM_L(0)));
3960 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
3962 rm
= (modrm
& 7) | REX_B(s
);
3963 op2_offset
= ZMM_OFFSET(rm
);
3965 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
, op2_offset
);
3967 SSEFunc_i_ep sse_fn_i_ep
=
3968 sse_op_table3bi
[((b
>> 7) & 2) | (b
& 1)];
3969 sse_fn_i_ep(s
->tmp2_i32
, cpu_env
, s
->ptr0
);
3970 tcg_gen_extu_i32_tl(s
->T0
, s
->tmp2_i32
);
3972 #ifdef TARGET_X86_64
3973 SSEFunc_l_ep sse_fn_l_ep
=
3974 sse_op_table3bq
[((b
>> 7) & 2) | (b
& 1)];
3975 sse_fn_l_ep(s
->T0
, cpu_env
, s
->ptr0
);
3980 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
3982 case 0xc4: /* pinsrw */
3985 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
3986 val
= x86_ldub_code(env
, s
);
3989 tcg_gen_st16_tl(s
->T0
, cpu_env
,
3990 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_W(val
)));
3994 tcg_gen_st16_tl(s
->T0
, cpu_env
,
3995 offsetof(CPUX86State
,fpregs
[reg
].mmx
.MMX_W(val
)));
3998 case 0xc5: /* pextrw */
4002 ot
= mo_64_32(s
->dflag
);
4003 val
= x86_ldub_code(env
, s
);
4006 rm
= (modrm
& 7) | REX_B(s
);
4007 tcg_gen_ld16u_tl(s
->T0
, cpu_env
,
4008 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_W(val
)));
4012 tcg_gen_ld16u_tl(s
->T0
, cpu_env
,
4013 offsetof(CPUX86State
,fpregs
[rm
].mmx
.MMX_W(val
)));
4015 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4016 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
4018 case 0x1d6: /* movq ea, xmm */
4020 gen_lea_modrm(env
, s
, modrm
);
4021 gen_stq_env_A0(s
, offsetof(CPUX86State
,
4022 xmm_regs
[reg
].ZMM_Q(0)));
4024 rm
= (modrm
& 7) | REX_B(s
);
4025 gen_op_movq(s
, offsetof(CPUX86State
, xmm_regs
[rm
].ZMM_Q(0)),
4026 offsetof(CPUX86State
,xmm_regs
[reg
].ZMM_Q(0)));
4027 gen_op_movq_env_0(s
,
4028 offsetof(CPUX86State
, xmm_regs
[rm
].ZMM_Q(1)));
4031 case 0x2d6: /* movq2dq */
4033 gen_helper_enter_mmx(cpu_env
);
4035 gen_op_movq(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_Q(0)),
4036 offsetof(CPUX86State
,fpregs
[rm
].mmx
));
4037 gen_op_movq_env_0(s
, offsetof(CPUX86State
, xmm_regs
[reg
].ZMM_Q(1)));
4039 case 0x3d6: /* movdq2q */
4041 gen_helper_enter_mmx(cpu_env
);
4042 rm
= (modrm
& 7) | REX_B(s
);
4043 gen_op_movq(s
, offsetof(CPUX86State
, fpregs
[reg
& 7].mmx
),
4044 offsetof(CPUX86State
,xmm_regs
[rm
].ZMM_Q(0)));
4046 case 0xd7: /* pmovmskb */
4051 rm
= (modrm
& 7) | REX_B(s
);
4052 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
, ZMM_OFFSET(rm
));
4053 gen_helper_pmovmskb_xmm(s
->tmp2_i32
, cpu_env
, s
->ptr0
);
4057 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
,
4058 offsetof(CPUX86State
, fpregs
[rm
].mmx
));
4059 gen_helper_pmovmskb_mmx(s
->tmp2_i32
, cpu_env
, s
->ptr0
);
4061 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4062 tcg_gen_extu_i32_tl(cpu_regs
[reg
], s
->tmp2_i32
);
4068 if ((b
& 0xf0) == 0xf0) {
4071 modrm
= x86_ldub_code(env
, s
);
4073 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4074 mod
= (modrm
>> 6) & 3;
4077 op6
= &sse_op_table6
[b
];
4078 if (op6
->ext_mask
== 0) {
4081 if (!(s
->cpuid_ext_features
& op6
->ext_mask
)) {
4086 op1_offset
= ZMM_OFFSET(reg
);
4088 op2_offset
= ZMM_OFFSET(rm
| REX_B(s
));
4090 op2_offset
= offsetof(CPUX86State
,xmm_t0
);
4091 gen_lea_modrm(env
, s
, modrm
);
4093 case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
4094 case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
4095 case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
4096 gen_ldq_env_A0(s
, op2_offset
+
4097 offsetof(ZMMReg
, ZMM_Q(0)));
4099 case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
4100 case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
4101 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
4102 s
->mem_index
, MO_LEUL
);
4103 tcg_gen_st_i32(s
->tmp2_i32
, cpu_env
, op2_offset
+
4104 offsetof(ZMMReg
, ZMM_L(0)));
4106 case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
4107 tcg_gen_qemu_ld_tl(s
->tmp0
, s
->A0
,
4108 s
->mem_index
, MO_LEUW
);
4109 tcg_gen_st16_tl(s
->tmp0
, cpu_env
, op2_offset
+
4110 offsetof(ZMMReg
, ZMM_W(0)));
4112 case 0x2a: /* movntdqa */
4113 gen_ldo_env_A0(s
, op1_offset
+ offsetof(ZMMReg
, ZMM_X(0)), true);
4116 gen_ldo_env_A0(s
, op2_offset
+ offsetof(ZMMReg
, ZMM_X(0)), true);
4119 if (!op6
->fn
[b1
].op1
) {
4122 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
, op1_offset
);
4123 tcg_gen_addi_ptr(s
->ptr1
, cpu_env
, op2_offset
);
4124 op6
->fn
[b1
].op1(cpu_env
, s
->ptr0
, s
->ptr1
);
4127 if ((op6
->flags
& SSE_OPF_MMX
) == 0) {
4130 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
4132 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
4134 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
4135 gen_lea_modrm(env
, s
, modrm
);
4136 gen_ldq_env_A0(s
, op2_offset
);
4138 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
, op1_offset
);
4139 tcg_gen_addi_ptr(s
->ptr1
, cpu_env
, op2_offset
);
4140 op6
->fn
[0].op1(cpu_env
, s
->ptr0
, s
->ptr1
);
4143 if (op6
->flags
& SSE_OPF_CMP
) {
4144 set_cc_op(s
, CC_OP_EFLAGS
);
4151 /* Various integer extensions at 0f 38 f[0-f]. */
4152 b
= modrm
| (b1
<< 8);
4153 modrm
= x86_ldub_code(env
, s
);
4154 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4157 case 0x3f0: /* crc32 Gd,Eb */
4158 case 0x3f1: /* crc32 Gd,Ey */
4161 if (!(s
->cpuid_ext_features
& CPUID_EXT_SSE42
)) {
4164 if ((b
& 0xff) == 0xf0) {
4166 } else if (s
->dflag
!= MO_64
) {
4167 ot
= (s
->prefix
& PREFIX_DATA
? MO_16
: MO_32
);
4172 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[reg
]);
4173 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4174 gen_helper_crc32(s
->T0
, s
->tmp2_i32
,
4175 s
->T0
, tcg_const_i32(8 << ot
));
4177 ot
= mo_64_32(s
->dflag
);
4178 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
4181 case 0x1f0: /* crc32 or movbe */
4184 /* For these insns, the f3 prefix is supposed to have priority
4185 over the 66 prefix, but that's not what we implement above
4187 if (s
->prefix
& PREFIX_REPNZ
) {
4191 case 0x0f0: /* movbe Gy,My */
4192 case 0x0f1: /* movbe My,Gy */
4194 if (!(s
->cpuid_ext_features
& CPUID_EXT_MOVBE
)) {
4197 if (s
->dflag
!= MO_64
) {
4198 ot
= (s
->prefix
& PREFIX_DATA
? MO_16
: MO_32
);
4203 gen_lea_modrm(env
, s
, modrm
);
4205 tcg_gen_qemu_ld_tl(s
->T0
, s
->A0
,
4206 s
->mem_index
, ot
| MO_BE
);
4207 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
4209 tcg_gen_qemu_st_tl(cpu_regs
[reg
], s
->A0
,
4210 s
->mem_index
, ot
| MO_BE
);
4214 case 0x0f2: /* andn Gy, By, Ey */
4215 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)
4216 || !(s
->prefix
& PREFIX_VEX
)
4220 ot
= mo_64_32(s
->dflag
);
4221 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4222 tcg_gen_andc_tl(s
->T0
, s
->T0
, cpu_regs
[s
->vex_v
]);
4223 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
4224 gen_op_update1_cc(s
);
4225 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
4228 case 0x0f7: /* bextr Gy, Ey, By */
4229 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)
4230 || !(s
->prefix
& PREFIX_VEX
)
4234 ot
= mo_64_32(s
->dflag
);
4238 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4239 /* Extract START, and shift the operand.
4240 Shifts larger than operand size get zeros. */
4241 tcg_gen_ext8u_tl(s
->A0
, cpu_regs
[s
->vex_v
]);
4242 tcg_gen_shr_tl(s
->T0
, s
->T0
, s
->A0
);
4244 bound
= tcg_const_tl(ot
== MO_64
? 63 : 31);
4245 zero
= tcg_const_tl(0);
4246 tcg_gen_movcond_tl(TCG_COND_LEU
, s
->T0
, s
->A0
, bound
,
4248 tcg_temp_free(zero
);
4250 /* Extract the LEN into a mask. Lengths larger than
4251 operand size get all ones. */
4252 tcg_gen_extract_tl(s
->A0
, cpu_regs
[s
->vex_v
], 8, 8);
4253 tcg_gen_movcond_tl(TCG_COND_LEU
, s
->A0
, s
->A0
, bound
,
4255 tcg_temp_free(bound
);
4256 tcg_gen_movi_tl(s
->T1
, 1);
4257 tcg_gen_shl_tl(s
->T1
, s
->T1
, s
->A0
);
4258 tcg_gen_subi_tl(s
->T1
, s
->T1
, 1);
4259 tcg_gen_and_tl(s
->T0
, s
->T0
, s
->T1
);
4261 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
4262 gen_op_update1_cc(s
);
4263 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
4267 case 0x0f5: /* bzhi Gy, Ey, By */
4268 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
4269 || !(s
->prefix
& PREFIX_VEX
)
4273 ot
= mo_64_32(s
->dflag
);
4274 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4275 tcg_gen_ext8u_tl(s
->T1
, cpu_regs
[s
->vex_v
]);
4277 TCGv bound
= tcg_const_tl(ot
== MO_64
? 63 : 31);
4278 /* Note that since we're using BMILG (in order to get O
4279 cleared) we need to store the inverse into C. */
4280 tcg_gen_setcond_tl(TCG_COND_LT
, cpu_cc_src
,
4282 tcg_gen_movcond_tl(TCG_COND_GT
, s
->T1
, s
->T1
,
4283 bound
, bound
, s
->T1
);
4284 tcg_temp_free(bound
);
4286 tcg_gen_movi_tl(s
->A0
, -1);
4287 tcg_gen_shl_tl(s
->A0
, s
->A0
, s
->T1
);
4288 tcg_gen_andc_tl(s
->T0
, s
->T0
, s
->A0
);
4289 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
4290 gen_op_update1_cc(s
);
4291 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
4294 case 0x3f6: /* mulx By, Gy, rdx, Ey */
4295 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
4296 || !(s
->prefix
& PREFIX_VEX
)
4300 ot
= mo_64_32(s
->dflag
);
4301 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4304 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
4305 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, cpu_regs
[R_EDX
]);
4306 tcg_gen_mulu2_i32(s
->tmp2_i32
, s
->tmp3_i32
,
4307 s
->tmp2_i32
, s
->tmp3_i32
);
4308 tcg_gen_extu_i32_tl(cpu_regs
[s
->vex_v
], s
->tmp2_i32
);
4309 tcg_gen_extu_i32_tl(cpu_regs
[reg
], s
->tmp3_i32
);
4311 #ifdef TARGET_X86_64
4313 tcg_gen_mulu2_i64(s
->T0
, s
->T1
,
4314 s
->T0
, cpu_regs
[R_EDX
]);
4315 tcg_gen_mov_i64(cpu_regs
[s
->vex_v
], s
->T0
);
4316 tcg_gen_mov_i64(cpu_regs
[reg
], s
->T1
);
4322 case 0x3f5: /* pdep Gy, By, Ey */
4323 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
4324 || !(s
->prefix
& PREFIX_VEX
)
4328 ot
= mo_64_32(s
->dflag
);
4329 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4330 /* Note that by zero-extending the source operand, we
4331 automatically handle zero-extending the result. */
4333 tcg_gen_mov_tl(s
->T1
, cpu_regs
[s
->vex_v
]);
4335 tcg_gen_ext32u_tl(s
->T1
, cpu_regs
[s
->vex_v
]);
4337 gen_helper_pdep(cpu_regs
[reg
], s
->T1
, s
->T0
);
4340 case 0x2f5: /* pext Gy, By, Ey */
4341 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
4342 || !(s
->prefix
& PREFIX_VEX
)
4346 ot
= mo_64_32(s
->dflag
);
4347 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4348 /* Note that by zero-extending the source operand, we
4349 automatically handle zero-extending the result. */
4351 tcg_gen_mov_tl(s
->T1
, cpu_regs
[s
->vex_v
]);
4353 tcg_gen_ext32u_tl(s
->T1
, cpu_regs
[s
->vex_v
]);
4355 gen_helper_pext(cpu_regs
[reg
], s
->T1
, s
->T0
);
4358 case 0x1f6: /* adcx Gy, Ey */
4359 case 0x2f6: /* adox Gy, Ey */
4361 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_ADX
)) {
4364 TCGv carry_in
, carry_out
, zero
;
4367 ot
= mo_64_32(s
->dflag
);
4368 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4370 /* Re-use the carry-out from a previous round. */
4372 carry_out
= (b
== 0x1f6 ? cpu_cc_dst
: cpu_cc_src2
);
4376 carry_in
= cpu_cc_dst
;
4377 end_op
= CC_OP_ADCX
;
4379 end_op
= CC_OP_ADCOX
;
4384 end_op
= CC_OP_ADCOX
;
4386 carry_in
= cpu_cc_src2
;
4387 end_op
= CC_OP_ADOX
;
4391 end_op
= CC_OP_ADCOX
;
4392 carry_in
= carry_out
;
4395 end_op
= (b
== 0x1f6 ? CC_OP_ADCX
: CC_OP_ADOX
);
4398 /* If we can't reuse carry-out, get it out of EFLAGS. */
4400 if (s
->cc_op
!= CC_OP_ADCX
&& s
->cc_op
!= CC_OP_ADOX
) {
4401 gen_compute_eflags(s
);
4404 tcg_gen_extract_tl(carry_in
, cpu_cc_src
,
4405 ctz32(b
== 0x1f6 ? CC_C
: CC_O
), 1);
4409 #ifdef TARGET_X86_64
4411 /* If we know TL is 64-bit, and we want a 32-bit
4412 result, just do everything in 64-bit arithmetic. */
4413 tcg_gen_ext32u_i64(cpu_regs
[reg
], cpu_regs
[reg
]);
4414 tcg_gen_ext32u_i64(s
->T0
, s
->T0
);
4415 tcg_gen_add_i64(s
->T0
, s
->T0
, cpu_regs
[reg
]);
4416 tcg_gen_add_i64(s
->T0
, s
->T0
, carry_in
);
4417 tcg_gen_ext32u_i64(cpu_regs
[reg
], s
->T0
);
4418 tcg_gen_shri_i64(carry_out
, s
->T0
, 32);
4422 /* Otherwise compute the carry-out in two steps. */
4423 zero
= tcg_const_tl(0);
4424 tcg_gen_add2_tl(s
->T0
, carry_out
,
4427 tcg_gen_add2_tl(cpu_regs
[reg
], carry_out
,
4428 cpu_regs
[reg
], carry_out
,
4430 tcg_temp_free(zero
);
4433 set_cc_op(s
, end_op
);
4437 case 0x1f7: /* shlx Gy, Ey, By */
4438 case 0x2f7: /* sarx Gy, Ey, By */
4439 case 0x3f7: /* shrx Gy, Ey, By */
4440 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
4441 || !(s
->prefix
& PREFIX_VEX
)
4445 ot
= mo_64_32(s
->dflag
);
4446 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4448 tcg_gen_andi_tl(s
->T1
, cpu_regs
[s
->vex_v
], 63);
4450 tcg_gen_andi_tl(s
->T1
, cpu_regs
[s
->vex_v
], 31);
4453 tcg_gen_shl_tl(s
->T0
, s
->T0
, s
->T1
);
4454 } else if (b
== 0x2f7) {
4456 tcg_gen_ext32s_tl(s
->T0
, s
->T0
);
4458 tcg_gen_sar_tl(s
->T0
, s
->T0
, s
->T1
);
4461 tcg_gen_ext32u_tl(s
->T0
, s
->T0
);
4463 tcg_gen_shr_tl(s
->T0
, s
->T0
, s
->T1
);
4465 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
4471 case 0x3f3: /* Group 17 */
4472 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)
4473 || !(s
->prefix
& PREFIX_VEX
)
4477 ot
= mo_64_32(s
->dflag
);
4478 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4480 tcg_gen_mov_tl(cpu_cc_src
, s
->T0
);
4482 case 1: /* blsr By,Ey */
4483 tcg_gen_subi_tl(s
->T1
, s
->T0
, 1);
4484 tcg_gen_and_tl(s
->T0
, s
->T0
, s
->T1
);
4486 case 2: /* blsmsk By,Ey */
4487 tcg_gen_subi_tl(s
->T1
, s
->T0
, 1);
4488 tcg_gen_xor_tl(s
->T0
, s
->T0
, s
->T1
);
4490 case 3: /* blsi By, Ey */
4491 tcg_gen_neg_tl(s
->T1
, s
->T0
);
4492 tcg_gen_and_tl(s
->T0
, s
->T0
, s
->T1
);
4497 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
4498 gen_op_mov_reg_v(s
, ot
, s
->vex_v
, s
->T0
);
4499 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
4510 modrm
= x86_ldub_code(env
, s
);
4512 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4513 mod
= (modrm
>> 6) & 3;
4516 op7
= &sse_op_table7
[b
];
4517 if (op7
->ext_mask
== 0) {
4520 if (!(s
->cpuid_ext_features
& op7
->ext_mask
)) {
4526 if (op7
->flags
& SSE_OPF_SPECIAL
) {
4527 /* None of the "special" ops are valid on mmx registers */
4531 ot
= mo_64_32(s
->dflag
);
4532 rm
= (modrm
& 7) | REX_B(s
);
4534 gen_lea_modrm(env
, s
, modrm
);
4535 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4536 val
= x86_ldub_code(env
, s
);
4538 case 0x14: /* pextrb */
4539 tcg_gen_ld8u_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
,
4540 xmm_regs
[reg
].ZMM_B(val
& 15)));
4542 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
4544 tcg_gen_qemu_st_tl(s
->T0
, s
->A0
,
4545 s
->mem_index
, MO_UB
);
4548 case 0x15: /* pextrw */
4549 tcg_gen_ld16u_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
,
4550 xmm_regs
[reg
].ZMM_W(val
& 7)));
4552 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
4554 tcg_gen_qemu_st_tl(s
->T0
, s
->A0
,
4555 s
->mem_index
, MO_LEUW
);
4559 if (ot
== MO_32
) { /* pextrd */
4560 tcg_gen_ld_i32(s
->tmp2_i32
, cpu_env
,
4561 offsetof(CPUX86State
,
4562 xmm_regs
[reg
].ZMM_L(val
& 3)));
4564 tcg_gen_extu_i32_tl(cpu_regs
[rm
], s
->tmp2_i32
);
4566 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
4567 s
->mem_index
, MO_LEUL
);
4569 } else { /* pextrq */
4570 #ifdef TARGET_X86_64
4571 tcg_gen_ld_i64(s
->tmp1_i64
, cpu_env
,
4572 offsetof(CPUX86State
,
4573 xmm_regs
[reg
].ZMM_Q(val
& 1)));
4575 tcg_gen_mov_i64(cpu_regs
[rm
], s
->tmp1_i64
);
4577 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
,
4578 s
->mem_index
, MO_LEUQ
);
4585 case 0x17: /* extractps */
4586 tcg_gen_ld32u_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
,
4587 xmm_regs
[reg
].ZMM_L(val
& 3)));
4589 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
4591 tcg_gen_qemu_st_tl(s
->T0
, s
->A0
,
4592 s
->mem_index
, MO_LEUL
);
4595 case 0x20: /* pinsrb */
4597 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, rm
);
4599 tcg_gen_qemu_ld_tl(s
->T0
, s
->A0
,
4600 s
->mem_index
, MO_UB
);
4602 tcg_gen_st8_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
,
4603 xmm_regs
[reg
].ZMM_B(val
& 15)));
4605 case 0x21: /* insertps */
4607 tcg_gen_ld_i32(s
->tmp2_i32
, cpu_env
,
4608 offsetof(CPUX86State
,xmm_regs
[rm
]
4609 .ZMM_L((val
>> 6) & 3)));
4611 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
4612 s
->mem_index
, MO_LEUL
);
4614 tcg_gen_st_i32(s
->tmp2_i32
, cpu_env
,
4615 offsetof(CPUX86State
,xmm_regs
[reg
]
4616 .ZMM_L((val
>> 4) & 3)));
4618 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4619 cpu_env
, offsetof(CPUX86State
,
4620 xmm_regs
[reg
].ZMM_L(0)));
4622 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4623 cpu_env
, offsetof(CPUX86State
,
4624 xmm_regs
[reg
].ZMM_L(1)));
4626 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4627 cpu_env
, offsetof(CPUX86State
,
4628 xmm_regs
[reg
].ZMM_L(2)));
4630 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4631 cpu_env
, offsetof(CPUX86State
,
4632 xmm_regs
[reg
].ZMM_L(3)));
4635 if (ot
== MO_32
) { /* pinsrd */
4637 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[rm
]);
4639 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
4640 s
->mem_index
, MO_LEUL
);
4642 tcg_gen_st_i32(s
->tmp2_i32
, cpu_env
,
4643 offsetof(CPUX86State
,
4644 xmm_regs
[reg
].ZMM_L(val
& 3)));
4645 } else { /* pinsrq */
4646 #ifdef TARGET_X86_64
4648 gen_op_mov_v_reg(s
, ot
, s
->tmp1_i64
, rm
);
4650 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
,
4651 s
->mem_index
, MO_LEUQ
);
4653 tcg_gen_st_i64(s
->tmp1_i64
, cpu_env
,
4654 offsetof(CPUX86State
,
4655 xmm_regs
[reg
].ZMM_Q(val
& 1)));
4668 if ((op7
->flags
& SSE_OPF_MMX
) == 0) {
4671 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
4673 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
4675 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
4676 gen_lea_modrm(env
, s
, modrm
);
4677 gen_ldq_env_A0(s
, op2_offset
);
4679 val
= x86_ldub_code(env
, s
);
4680 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
, op1_offset
);
4681 tcg_gen_addi_ptr(s
->ptr1
, cpu_env
, op2_offset
);
4683 /* We only actually have one MMX instuction (palignr) */
4686 op7
->fn
[0].op1(cpu_env
, s
->ptr0
, s
->ptr1
,
4687 tcg_const_i32(val
));
4692 op1_offset
= ZMM_OFFSET(reg
);
4694 op2_offset
= ZMM_OFFSET(rm
| REX_B(s
));
4696 op2_offset
= offsetof(CPUX86State
, xmm_t0
);
4697 gen_lea_modrm(env
, s
, modrm
);
4698 gen_ldo_env_A0(s
, op2_offset
+ offsetof(ZMMReg
, ZMM_X(0)), true);
4701 val
= x86_ldub_code(env
, s
);
4702 if ((b
& 0xfc) == 0x60) { /* pcmpXstrX */
4703 set_cc_op(s
, CC_OP_EFLAGS
);
4705 if (s
->dflag
== MO_64
) {
4706 /* The helper must use entire 64-bit gp registers */
4711 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
, op1_offset
);
4712 tcg_gen_addi_ptr(s
->ptr1
, cpu_env
, op2_offset
);
4713 op7
->fn
[b1
].op1(cpu_env
, s
->ptr0
, s
->ptr1
, tcg_const_i32(val
));
4714 if (op7
->flags
& SSE_OPF_CMP
) {
4715 set_cc_op(s
, CC_OP_EFLAGS
);
4720 /* Various integer extensions at 0f 3a f[0-f]. */
4721 b
= modrm
| (b1
<< 8);
4722 modrm
= x86_ldub_code(env
, s
);
4723 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
4726 case 0x3f0: /* rorx Gy,Ey, Ib */
4727 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI2
)
4728 || !(s
->prefix
& PREFIX_VEX
)
4732 ot
= mo_64_32(s
->dflag
);
4733 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
4734 b
= x86_ldub_code(env
, s
);
4736 tcg_gen_rotri_tl(s
->T0
, s
->T0
, b
& 63);
4738 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
4739 tcg_gen_rotri_i32(s
->tmp2_i32
, s
->tmp2_i32
, b
& 31);
4740 tcg_gen_extu_i32_tl(s
->T0
, s
->tmp2_i32
);
4742 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
4752 gen_unknown_opcode(env
, s
);
4756 /* generic MMX or SSE operation */
4758 case 0x70: /* pshufx insn */
4759 case 0xc6: /* pshufx insn */
4760 case 0xc2: /* compare insns */
4767 op1_offset
= ZMM_OFFSET(reg
);
4771 gen_lea_modrm(env
, s
, modrm
);
4772 op2_offset
= offsetof(CPUX86State
, xmm_t0
);
4774 if (sse_op_flags
& SSE_OPF_SCALAR
) {
4775 if (sse_op_flags
& SSE_OPF_CMP
) {
4776 /* ucomis[sd], comis[sd] */
4783 /* Most sse scalar operations. */
4786 } else if (b1
== 3) {
4795 gen_op_ld_v(s
, MO_32
, s
->T0
, s
->A0
);
4796 tcg_gen_st32_tl(s
->T0
, cpu_env
,
4797 offsetof(CPUX86State
, xmm_t0
.ZMM_L(0)));
4801 gen_ldq_env_A0(s
, offsetof(CPUX86State
, xmm_t0
.ZMM_D(0)));
4804 /* 128 bit access */
4805 gen_ldo_env_A0(s
, offsetof(CPUX86State
, xmm_t0
.ZMM_X(0)), true);
4809 rm
= (modrm
& 7) | REX_B(s
);
4810 op2_offset
= ZMM_OFFSET(rm
);
4814 op1_offset
= offsetof(CPUX86State
,fpregs
[reg
].mmx
);
4816 gen_lea_modrm(env
, s
, modrm
);
4817 op2_offset
= offsetof(CPUX86State
,mmx_t0
);
4818 gen_ldq_env_A0(s
, op2_offset
);
4821 op2_offset
= offsetof(CPUX86State
,fpregs
[rm
].mmx
);
4823 if (sse_op_flags
& SSE_OPF_3DNOW
) {
4824 /* 3DNow! data insns */
4825 val
= x86_ldub_code(env
, s
);
4826 SSEFunc_0_epp op_3dnow
= sse_op_table5
[val
];
4830 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
, op1_offset
);
4831 tcg_gen_addi_ptr(s
->ptr1
, cpu_env
, op2_offset
);
4832 op_3dnow(cpu_env
, s
->ptr0
, s
->ptr1
);
4836 tcg_gen_addi_ptr(s
->ptr0
, cpu_env
, op1_offset
);
4837 tcg_gen_addi_ptr(s
->ptr1
, cpu_env
, op2_offset
);
4838 if (sse_op_flags
& SSE_OPF_SHUF
) {
4839 val
= x86_ldub_code(env
, s
);
4840 sse_op_fn
.op1i(s
->ptr0
, s
->ptr1
, tcg_const_i32(val
));
4841 } else if (b
== 0xf7) {
4842 /* maskmov : we must prepare A0 */
4846 tcg_gen_mov_tl(s
->A0
, cpu_regs
[R_EDI
]);
4847 gen_extu(s
->aflag
, s
->A0
);
4848 gen_add_A0_ds_seg(s
);
4849 sse_op_fn
.op1t(cpu_env
, s
->ptr0
, s
->ptr1
, s
->A0
);
4850 } else if (b
== 0xc2) {
4851 /* compare insns, bits 7:3 (7:5 for AVX) are ignored */
4852 val
= x86_ldub_code(env
, s
) & 7;
4853 sse_op_table4
[val
][b1
](cpu_env
, s
->ptr0
, s
->ptr1
);
4855 sse_op_fn
.op1(cpu_env
, s
->ptr0
, s
->ptr1
);
4858 if (sse_op_flags
& SSE_OPF_CMP
) {
4859 set_cc_op(s
, CC_OP_EFLAGS
);
4864 /* convert one instruction. s->base.is_jmp is set if the translation must
4865 be stopped. Return the next pc value */
4866 static bool disas_insn(DisasContext
*s
, CPUState
*cpu
)
4868 CPUX86State
*env
= cpu
->env_ptr
;
4871 MemOp ot
, aflag
, dflag
;
4872 int modrm
, reg
, rm
, mod
, op
, opreg
, val
;
4873 bool orig_cc_op_dirty
= s
->cc_op_dirty
;
4874 CCOp orig_cc_op
= s
->cc_op
;
4875 target_ulong orig_pc_save
= s
->pc_save
;
4877 s
->pc
= s
->base
.pc_next
;
4879 #ifdef TARGET_X86_64
4884 s
->rip_offset
= 0; /* for relative ip address */
4888 switch (sigsetjmp(s
->jmpbuf
, 0)) {
4892 gen_exception_gpf(s
);
4895 /* Restore state that may affect the next instruction. */
4896 s
->pc
= s
->base
.pc_next
;
4898 * TODO: These save/restore can be removed after the table-based
4899 * decoder is complete; we will be decoding the insn completely
4900 * before any code generation that might affect these variables.
4902 s
->cc_op_dirty
= orig_cc_op_dirty
;
4903 s
->cc_op
= orig_cc_op
;
4904 s
->pc_save
= orig_pc_save
;
4906 s
->base
.num_insns
--;
4907 tcg_remove_ops_after(s
->prev_insn_end
);
4908 s
->base
.is_jmp
= DISAS_TOO_MANY
;
4911 g_assert_not_reached();
4916 if (first
) first
= false, limit
= getenv("LIMIT") ? atol(getenv("LIMIT")) : -1;
4917 bool use_new
= true;
4918 #ifdef CONFIG_USER_ONLY
4919 use_new
&= limit
> 0;
4922 s
->prefix
= prefixes
;
4923 b
= x86_ldub_code(env
, s
);
4924 /* Collect prefixes. */
4927 #ifndef CONFIG_USER_ONLY
4928 use_new
&= b
<= limit
;
4931 disas_insn_new(s
, cpu
, b
);
4936 b
= x86_ldub_code(env
, s
) + 0x100;
4937 #ifndef CONFIG_USER_ONLY
4938 use_new
&= b
<= limit
;
4941 disas_insn_new(s
, cpu
, b
+ 0x100);
4946 prefixes
|= PREFIX_REPZ
;
4947 prefixes
&= ~PREFIX_REPNZ
;
4950 prefixes
|= PREFIX_REPNZ
;
4951 prefixes
&= ~PREFIX_REPZ
;
4954 prefixes
|= PREFIX_LOCK
;
4975 prefixes
|= PREFIX_DATA
;
4978 prefixes
|= PREFIX_ADR
;
4980 #ifdef TARGET_X86_64
4984 prefixes
|= PREFIX_REX
;
4985 s
->vex_w
= (b
>> 3) & 1;
4986 s
->rex_r
= (b
& 0x4) << 1;
4987 s
->rex_x
= (b
& 0x2) << 2;
4988 s
->rex_b
= (b
& 0x1) << 3;
4993 case 0xc5: /* 2-byte VEX */
4994 case 0xc4: /* 3-byte VEX */
4996 /* VEX prefixes cannot be used except in 32-bit mode.
4997 Otherwise the instruction is LES or LDS. */
4998 if (CODE32(s
) && !VM86(s
)) {
4999 static const int pp_prefix
[4] = {
5000 0, PREFIX_DATA
, PREFIX_REPZ
, PREFIX_REPNZ
5002 int vex3
, vex2
= x86_ldub_code(env
, s
);
5004 if (!CODE64(s
) && (vex2
& 0xc0) != 0xc0) {
5005 /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
5006 otherwise the instruction is LES or LDS. */
5007 s
->pc
--; /* rewind the advance_pc() x86_ldub_code() did */
5011 /* 4.1.1-4.1.3: No preceding lock, 66, f2, f3, or rex prefixes. */
5012 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
5013 | PREFIX_LOCK
| PREFIX_DATA
| PREFIX_REX
)) {
5016 #ifdef TARGET_X86_64
5017 s
->rex_r
= (~vex2
>> 4) & 8;
5020 /* 2-byte VEX prefix: RVVVVlpp, implied 0f leading opcode byte */
5022 b
= x86_ldub_code(env
, s
) | 0x100;
5024 /* 3-byte VEX prefix: RXBmmmmm wVVVVlpp */
5025 vex3
= x86_ldub_code(env
, s
);
5026 #ifdef TARGET_X86_64
5027 s
->rex_x
= (~vex2
>> 3) & 8;
5028 s
->rex_b
= (~vex2
>> 2) & 8;
5030 s
->vex_w
= (vex3
>> 7) & 1;
5031 switch (vex2
& 0x1f) {
5032 case 0x01: /* Implied 0f leading opcode bytes. */
5033 b
= x86_ldub_code(env
, s
) | 0x100;
5035 case 0x02: /* Implied 0f 38 leading opcode bytes. */
5038 case 0x03: /* Implied 0f 3a leading opcode bytes. */
5041 default: /* Reserved for future use. */
5045 s
->vex_v
= (~vex3
>> 3) & 0xf;
5046 s
->vex_l
= (vex3
>> 2) & 1;
5047 prefixes
|= pp_prefix
[vex3
& 3] | PREFIX_VEX
;
5052 /* Post-process prefixes. */
5054 /* In 64-bit mode, the default data size is 32-bit. Select 64-bit
5055 data with rex_w, and 16-bit data with 0x66; rex_w takes precedence
5056 over 0x66 if both are present. */
5057 dflag
= (REX_W(s
) ? MO_64
: prefixes
& PREFIX_DATA
? MO_16
: MO_32
);
5058 /* In 64-bit mode, 0x67 selects 32-bit addressing. */
5059 aflag
= (prefixes
& PREFIX_ADR
? MO_32
: MO_64
);
5061 /* In 16/32-bit mode, 0x66 selects the opposite data size. */
5062 if (CODE32(s
) ^ ((prefixes
& PREFIX_DATA
) != 0)) {
5067 /* In 16/32-bit mode, 0x67 selects the opposite addressing. */
5068 if (CODE32(s
) ^ ((prefixes
& PREFIX_ADR
) != 0)) {
5075 s
->prefix
= prefixes
;
5079 /* now check op code */
5081 /**************************/
5096 ot
= mo_b_d(b
, dflag
);
5099 case 0: /* OP Ev, Gv */
5100 modrm
= x86_ldub_code(env
, s
);
5101 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
5102 mod
= (modrm
>> 6) & 3;
5103 rm
= (modrm
& 7) | REX_B(s
);
5105 gen_lea_modrm(env
, s
, modrm
);
5107 } else if (op
== OP_XORL
&& rm
== reg
) {
5109 /* xor reg, reg optimisation */
5110 set_cc_op(s
, CC_OP_CLR
);
5111 tcg_gen_movi_tl(s
->T0
, 0);
5112 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
5117 gen_op_mov_v_reg(s
, ot
, s
->T1
, reg
);
5118 gen_op(s
, op
, ot
, opreg
);
5120 case 1: /* OP Gv, Ev */
5121 modrm
= x86_ldub_code(env
, s
);
5122 mod
= (modrm
>> 6) & 3;
5123 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
5124 rm
= (modrm
& 7) | REX_B(s
);
5126 gen_lea_modrm(env
, s
, modrm
);
5127 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
5128 } else if (op
== OP_XORL
&& rm
== reg
) {
5131 gen_op_mov_v_reg(s
, ot
, s
->T1
, rm
);
5133 gen_op(s
, op
, ot
, reg
);
5135 case 2: /* OP A, Iv */
5136 val
= insn_get(env
, s
, ot
);
5137 tcg_gen_movi_tl(s
->T1
, val
);
5138 gen_op(s
, op
, ot
, OR_EAX
);
5148 case 0x80: /* GRP1 */
5154 ot
= mo_b_d(b
, dflag
);
5156 modrm
= x86_ldub_code(env
, s
);
5157 mod
= (modrm
>> 6) & 3;
5158 rm
= (modrm
& 7) | REX_B(s
);
5159 op
= (modrm
>> 3) & 7;
5165 s
->rip_offset
= insn_const_size(ot
);
5166 gen_lea_modrm(env
, s
, modrm
);
5177 val
= insn_get(env
, s
, ot
);
5180 val
= (int8_t)insn_get(env
, s
, MO_8
);
5183 tcg_gen_movi_tl(s
->T1
, val
);
5184 gen_op(s
, op
, ot
, opreg
);
5188 /**************************/
5189 /* inc, dec, and other misc arith */
5190 case 0x40 ... 0x47: /* inc Gv */
5192 gen_inc(s
, ot
, OR_EAX
+ (b
& 7), 1);
5194 case 0x48 ... 0x4f: /* dec Gv */
5196 gen_inc(s
, ot
, OR_EAX
+ (b
& 7), -1);
5198 case 0xf6: /* GRP3 */
5200 ot
= mo_b_d(b
, dflag
);
5202 modrm
= x86_ldub_code(env
, s
);
5203 mod
= (modrm
>> 6) & 3;
5204 rm
= (modrm
& 7) | REX_B(s
);
5205 op
= (modrm
>> 3) & 7;
5208 s
->rip_offset
= insn_const_size(ot
);
5210 gen_lea_modrm(env
, s
, modrm
);
5211 /* For those below that handle locked memory, don't load here. */
5212 if (!(s
->prefix
& PREFIX_LOCK
)
5214 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
5217 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
5222 val
= insn_get(env
, s
, ot
);
5223 tcg_gen_movi_tl(s
->T1
, val
);
5224 gen_op_testl_T0_T1_cc(s
);
5225 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
5228 if (s
->prefix
& PREFIX_LOCK
) {
5232 tcg_gen_movi_tl(s
->T0
, ~0);
5233 tcg_gen_atomic_xor_fetch_tl(s
->T0
, s
->A0
, s
->T0
,
5234 s
->mem_index
, ot
| MO_LE
);
5236 tcg_gen_not_tl(s
->T0
, s
->T0
);
5238 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
5240 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
5245 if (s
->prefix
& PREFIX_LOCK
) {
5247 TCGv a0
, t0
, t1
, t2
;
5252 a0
= tcg_temp_local_new();
5253 t0
= tcg_temp_local_new();
5254 label1
= gen_new_label();
5256 tcg_gen_mov_tl(a0
, s
->A0
);
5257 tcg_gen_mov_tl(t0
, s
->T0
);
5259 gen_set_label(label1
);
5260 t1
= tcg_temp_new();
5261 t2
= tcg_temp_new();
5262 tcg_gen_mov_tl(t2
, t0
);
5263 tcg_gen_neg_tl(t1
, t0
);
5264 tcg_gen_atomic_cmpxchg_tl(t0
, a0
, t0
, t1
,
5265 s
->mem_index
, ot
| MO_LE
);
5267 tcg_gen_brcond_tl(TCG_COND_NE
, t0
, t2
, label1
);
5271 tcg_gen_mov_tl(s
->T0
, t0
);
5274 tcg_gen_neg_tl(s
->T0
, s
->T0
);
5276 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
5278 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
5281 gen_op_update_neg_cc(s
);
5282 set_cc_op(s
, CC_OP_SUBB
+ ot
);
5287 gen_op_mov_v_reg(s
, MO_8
, s
->T1
, R_EAX
);
5288 tcg_gen_ext8u_tl(s
->T0
, s
->T0
);
5289 tcg_gen_ext8u_tl(s
->T1
, s
->T1
);
5290 /* XXX: use 32 bit mul which could be faster */
5291 tcg_gen_mul_tl(s
->T0
, s
->T0
, s
->T1
);
5292 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
5293 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
5294 tcg_gen_andi_tl(cpu_cc_src
, s
->T0
, 0xff00);
5295 set_cc_op(s
, CC_OP_MULB
);
5298 gen_op_mov_v_reg(s
, MO_16
, s
->T1
, R_EAX
);
5299 tcg_gen_ext16u_tl(s
->T0
, s
->T0
);
5300 tcg_gen_ext16u_tl(s
->T1
, s
->T1
);
5301 /* XXX: use 32 bit mul which could be faster */
5302 tcg_gen_mul_tl(s
->T0
, s
->T0
, s
->T1
);
5303 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
5304 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
5305 tcg_gen_shri_tl(s
->T0
, s
->T0
, 16);
5306 gen_op_mov_reg_v(s
, MO_16
, R_EDX
, s
->T0
);
5307 tcg_gen_mov_tl(cpu_cc_src
, s
->T0
);
5308 set_cc_op(s
, CC_OP_MULW
);
5312 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
5313 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, cpu_regs
[R_EAX
]);
5314 tcg_gen_mulu2_i32(s
->tmp2_i32
, s
->tmp3_i32
,
5315 s
->tmp2_i32
, s
->tmp3_i32
);
5316 tcg_gen_extu_i32_tl(cpu_regs
[R_EAX
], s
->tmp2_i32
);
5317 tcg_gen_extu_i32_tl(cpu_regs
[R_EDX
], s
->tmp3_i32
);
5318 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
5319 tcg_gen_mov_tl(cpu_cc_src
, cpu_regs
[R_EDX
]);
5320 set_cc_op(s
, CC_OP_MULL
);
5322 #ifdef TARGET_X86_64
5324 tcg_gen_mulu2_i64(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
],
5325 s
->T0
, cpu_regs
[R_EAX
]);
5326 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
5327 tcg_gen_mov_tl(cpu_cc_src
, cpu_regs
[R_EDX
]);
5328 set_cc_op(s
, CC_OP_MULQ
);
5336 gen_op_mov_v_reg(s
, MO_8
, s
->T1
, R_EAX
);
5337 tcg_gen_ext8s_tl(s
->T0
, s
->T0
);
5338 tcg_gen_ext8s_tl(s
->T1
, s
->T1
);
5339 /* XXX: use 32 bit mul which could be faster */
5340 tcg_gen_mul_tl(s
->T0
, s
->T0
, s
->T1
);
5341 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
5342 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
5343 tcg_gen_ext8s_tl(s
->tmp0
, s
->T0
);
5344 tcg_gen_sub_tl(cpu_cc_src
, s
->T0
, s
->tmp0
);
5345 set_cc_op(s
, CC_OP_MULB
);
5348 gen_op_mov_v_reg(s
, MO_16
, s
->T1
, R_EAX
);
5349 tcg_gen_ext16s_tl(s
->T0
, s
->T0
);
5350 tcg_gen_ext16s_tl(s
->T1
, s
->T1
);
5351 /* XXX: use 32 bit mul which could be faster */
5352 tcg_gen_mul_tl(s
->T0
, s
->T0
, s
->T1
);
5353 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
5354 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
5355 tcg_gen_ext16s_tl(s
->tmp0
, s
->T0
);
5356 tcg_gen_sub_tl(cpu_cc_src
, s
->T0
, s
->tmp0
);
5357 tcg_gen_shri_tl(s
->T0
, s
->T0
, 16);
5358 gen_op_mov_reg_v(s
, MO_16
, R_EDX
, s
->T0
);
5359 set_cc_op(s
, CC_OP_MULW
);
5363 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
5364 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, cpu_regs
[R_EAX
]);
5365 tcg_gen_muls2_i32(s
->tmp2_i32
, s
->tmp3_i32
,
5366 s
->tmp2_i32
, s
->tmp3_i32
);
5367 tcg_gen_extu_i32_tl(cpu_regs
[R_EAX
], s
->tmp2_i32
);
5368 tcg_gen_extu_i32_tl(cpu_regs
[R_EDX
], s
->tmp3_i32
);
5369 tcg_gen_sari_i32(s
->tmp2_i32
, s
->tmp2_i32
, 31);
5370 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
5371 tcg_gen_sub_i32(s
->tmp2_i32
, s
->tmp2_i32
, s
->tmp3_i32
);
5372 tcg_gen_extu_i32_tl(cpu_cc_src
, s
->tmp2_i32
);
5373 set_cc_op(s
, CC_OP_MULL
);
5375 #ifdef TARGET_X86_64
5377 tcg_gen_muls2_i64(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
],
5378 s
->T0
, cpu_regs
[R_EAX
]);
5379 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[R_EAX
]);
5380 tcg_gen_sari_tl(cpu_cc_src
, cpu_regs
[R_EAX
], 63);
5381 tcg_gen_sub_tl(cpu_cc_src
, cpu_cc_src
, cpu_regs
[R_EDX
]);
5382 set_cc_op(s
, CC_OP_MULQ
);
5390 gen_helper_divb_AL(cpu_env
, s
->T0
);
5393 gen_helper_divw_AX(cpu_env
, s
->T0
);
5397 gen_helper_divl_EAX(cpu_env
, s
->T0
);
5399 #ifdef TARGET_X86_64
5401 gen_helper_divq_EAX(cpu_env
, s
->T0
);
5409 gen_helper_idivb_AL(cpu_env
, s
->T0
);
5412 gen_helper_idivw_AX(cpu_env
, s
->T0
);
5416 gen_helper_idivl_EAX(cpu_env
, s
->T0
);
5418 #ifdef TARGET_X86_64
5420 gen_helper_idivq_EAX(cpu_env
, s
->T0
);
5430 case 0xfe: /* GRP4 */
5431 case 0xff: /* GRP5 */
5432 ot
= mo_b_d(b
, dflag
);
5434 modrm
= x86_ldub_code(env
, s
);
5435 mod
= (modrm
>> 6) & 3;
5436 rm
= (modrm
& 7) | REX_B(s
);
5437 op
= (modrm
>> 3) & 7;
5438 if (op
>= 2 && b
== 0xfe) {
5442 if (op
== 2 || op
== 4) {
5443 /* operand size for jumps is 64 bit */
5445 } else if (op
== 3 || op
== 5) {
5446 ot
= dflag
!= MO_16
? MO_32
+ REX_W(s
) : MO_16
;
5447 } else if (op
== 6) {
5448 /* default push size is 64 bit */
5449 ot
= mo_pushpop(s
, dflag
);
5453 gen_lea_modrm(env
, s
, modrm
);
5454 if (op
>= 2 && op
!= 3 && op
!= 5)
5455 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
5457 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
5461 case 0: /* inc Ev */
5466 gen_inc(s
, ot
, opreg
, 1);
5468 case 1: /* dec Ev */
5473 gen_inc(s
, ot
, opreg
, -1);
5475 case 2: /* call Ev */
5476 /* XXX: optimize if memory (no 'and' is necessary) */
5477 if (dflag
== MO_16
) {
5478 tcg_gen_ext16u_tl(s
->T0
, s
->T0
);
5480 gen_push_v(s
, eip_next_tl(s
));
5481 gen_op_jmp_v(s
, s
->T0
);
5483 s
->base
.is_jmp
= DISAS_JUMP
;
5485 case 3: /* lcall Ev */
5489 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
5490 gen_add_A0_im(s
, 1 << ot
);
5491 gen_op_ld_v(s
, MO_16
, s
->T0
, s
->A0
);
5493 if (PE(s
) && !VM86(s
)) {
5494 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
5495 gen_helper_lcall_protected(cpu_env
, s
->tmp2_i32
, s
->T1
,
5496 tcg_constant_i32(dflag
- 1),
5499 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
5500 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T1
);
5501 gen_helper_lcall_real(cpu_env
, s
->tmp2_i32
, s
->tmp3_i32
,
5502 tcg_constant_i32(dflag
- 1),
5505 s
->base
.is_jmp
= DISAS_JUMP
;
5507 case 4: /* jmp Ev */
5508 if (dflag
== MO_16
) {
5509 tcg_gen_ext16u_tl(s
->T0
, s
->T0
);
5511 gen_op_jmp_v(s
, s
->T0
);
5513 s
->base
.is_jmp
= DISAS_JUMP
;
5515 case 5: /* ljmp Ev */
5519 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
5520 gen_add_A0_im(s
, 1 << ot
);
5521 gen_op_ld_v(s
, MO_16
, s
->T0
, s
->A0
);
5523 if (PE(s
) && !VM86(s
)) {
5524 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
5525 gen_helper_ljmp_protected(cpu_env
, s
->tmp2_i32
, s
->T1
,
5528 gen_op_movl_seg_T0_vm(s
, R_CS
);
5529 gen_op_jmp_v(s
, s
->T1
);
5531 s
->base
.is_jmp
= DISAS_JUMP
;
5533 case 6: /* push Ev */
5534 gen_push_v(s
, s
->T0
);
5541 case 0x84: /* test Ev, Gv */
5543 ot
= mo_b_d(b
, dflag
);
5545 modrm
= x86_ldub_code(env
, s
);
5546 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
5548 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
5549 gen_op_mov_v_reg(s
, ot
, s
->T1
, reg
);
5550 gen_op_testl_T0_T1_cc(s
);
5551 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
5554 case 0xa8: /* test eAX, Iv */
5556 ot
= mo_b_d(b
, dflag
);
5557 val
= insn_get(env
, s
, ot
);
5559 gen_op_mov_v_reg(s
, ot
, s
->T0
, OR_EAX
);
5560 tcg_gen_movi_tl(s
->T1
, val
);
5561 gen_op_testl_T0_T1_cc(s
);
5562 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
5565 case 0x98: /* CWDE/CBW */
5567 #ifdef TARGET_X86_64
5569 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, R_EAX
);
5570 tcg_gen_ext32s_tl(s
->T0
, s
->T0
);
5571 gen_op_mov_reg_v(s
, MO_64
, R_EAX
, s
->T0
);
5575 gen_op_mov_v_reg(s
, MO_16
, s
->T0
, R_EAX
);
5576 tcg_gen_ext16s_tl(s
->T0
, s
->T0
);
5577 gen_op_mov_reg_v(s
, MO_32
, R_EAX
, s
->T0
);
5580 gen_op_mov_v_reg(s
, MO_8
, s
->T0
, R_EAX
);
5581 tcg_gen_ext8s_tl(s
->T0
, s
->T0
);
5582 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
5588 case 0x99: /* CDQ/CWD */
5590 #ifdef TARGET_X86_64
5592 gen_op_mov_v_reg(s
, MO_64
, s
->T0
, R_EAX
);
5593 tcg_gen_sari_tl(s
->T0
, s
->T0
, 63);
5594 gen_op_mov_reg_v(s
, MO_64
, R_EDX
, s
->T0
);
5598 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, R_EAX
);
5599 tcg_gen_ext32s_tl(s
->T0
, s
->T0
);
5600 tcg_gen_sari_tl(s
->T0
, s
->T0
, 31);
5601 gen_op_mov_reg_v(s
, MO_32
, R_EDX
, s
->T0
);
5604 gen_op_mov_v_reg(s
, MO_16
, s
->T0
, R_EAX
);
5605 tcg_gen_ext16s_tl(s
->T0
, s
->T0
);
5606 tcg_gen_sari_tl(s
->T0
, s
->T0
, 15);
5607 gen_op_mov_reg_v(s
, MO_16
, R_EDX
, s
->T0
);
5613 case 0x1af: /* imul Gv, Ev */
5614 case 0x69: /* imul Gv, Ev, I */
5617 modrm
= x86_ldub_code(env
, s
);
5618 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
5620 s
->rip_offset
= insn_const_size(ot
);
5623 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
5625 val
= insn_get(env
, s
, ot
);
5626 tcg_gen_movi_tl(s
->T1
, val
);
5627 } else if (b
== 0x6b) {
5628 val
= (int8_t)insn_get(env
, s
, MO_8
);
5629 tcg_gen_movi_tl(s
->T1
, val
);
5631 gen_op_mov_v_reg(s
, ot
, s
->T1
, reg
);
5634 #ifdef TARGET_X86_64
5636 tcg_gen_muls2_i64(cpu_regs
[reg
], s
->T1
, s
->T0
, s
->T1
);
5637 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[reg
]);
5638 tcg_gen_sari_tl(cpu_cc_src
, cpu_cc_dst
, 63);
5639 tcg_gen_sub_tl(cpu_cc_src
, cpu_cc_src
, s
->T1
);
5643 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
5644 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T1
);
5645 tcg_gen_muls2_i32(s
->tmp2_i32
, s
->tmp3_i32
,
5646 s
->tmp2_i32
, s
->tmp3_i32
);
5647 tcg_gen_extu_i32_tl(cpu_regs
[reg
], s
->tmp2_i32
);
5648 tcg_gen_sari_i32(s
->tmp2_i32
, s
->tmp2_i32
, 31);
5649 tcg_gen_mov_tl(cpu_cc_dst
, cpu_regs
[reg
]);
5650 tcg_gen_sub_i32(s
->tmp2_i32
, s
->tmp2_i32
, s
->tmp3_i32
);
5651 tcg_gen_extu_i32_tl(cpu_cc_src
, s
->tmp2_i32
);
5654 tcg_gen_ext16s_tl(s
->T0
, s
->T0
);
5655 tcg_gen_ext16s_tl(s
->T1
, s
->T1
);
5656 /* XXX: use 32 bit mul which could be faster */
5657 tcg_gen_mul_tl(s
->T0
, s
->T0
, s
->T1
);
5658 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
5659 tcg_gen_ext16s_tl(s
->tmp0
, s
->T0
);
5660 tcg_gen_sub_tl(cpu_cc_src
, s
->T0
, s
->tmp0
);
5661 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
5664 set_cc_op(s
, CC_OP_MULB
+ ot
);
5667 case 0x1c1: /* xadd Ev, Gv */
5668 ot
= mo_b_d(b
, dflag
);
5669 modrm
= x86_ldub_code(env
, s
);
5670 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
5671 mod
= (modrm
>> 6) & 3;
5672 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
5674 rm
= (modrm
& 7) | REX_B(s
);
5675 gen_op_mov_v_reg(s
, ot
, s
->T1
, rm
);
5676 tcg_gen_add_tl(s
->T0
, s
->T0
, s
->T1
);
5677 gen_op_mov_reg_v(s
, ot
, reg
, s
->T1
);
5678 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
5680 gen_lea_modrm(env
, s
, modrm
);
5681 if (s
->prefix
& PREFIX_LOCK
) {
5682 tcg_gen_atomic_fetch_add_tl(s
->T1
, s
->A0
, s
->T0
,
5683 s
->mem_index
, ot
| MO_LE
);
5684 tcg_gen_add_tl(s
->T0
, s
->T0
, s
->T1
);
5686 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
5687 tcg_gen_add_tl(s
->T0
, s
->T0
, s
->T1
);
5688 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
5690 gen_op_mov_reg_v(s
, ot
, reg
, s
->T1
);
5692 gen_op_update2_cc(s
);
5693 set_cc_op(s
, CC_OP_ADDB
+ ot
);
5696 case 0x1b1: /* cmpxchg Ev, Gv */
5698 TCGv oldv
, newv
, cmpv
;
5700 ot
= mo_b_d(b
, dflag
);
5701 modrm
= x86_ldub_code(env
, s
);
5702 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
5703 mod
= (modrm
>> 6) & 3;
5704 oldv
= tcg_temp_new();
5705 newv
= tcg_temp_new();
5706 cmpv
= tcg_temp_new();
5707 gen_op_mov_v_reg(s
, ot
, newv
, reg
);
5708 tcg_gen_mov_tl(cmpv
, cpu_regs
[R_EAX
]);
5710 if (s
->prefix
& PREFIX_LOCK
) {
5714 gen_lea_modrm(env
, s
, modrm
);
5715 tcg_gen_atomic_cmpxchg_tl(oldv
, s
->A0
, cmpv
, newv
,
5716 s
->mem_index
, ot
| MO_LE
);
5717 gen_op_mov_reg_v(s
, ot
, R_EAX
, oldv
);
5720 rm
= (modrm
& 7) | REX_B(s
);
5721 gen_op_mov_v_reg(s
, ot
, oldv
, rm
);
5723 gen_lea_modrm(env
, s
, modrm
);
5724 gen_op_ld_v(s
, ot
, oldv
, s
->A0
);
5725 rm
= 0; /* avoid warning */
5729 /* store value = (old == cmp ? new : old); */
5730 tcg_gen_movcond_tl(TCG_COND_EQ
, newv
, oldv
, cmpv
, newv
, oldv
);
5732 gen_op_mov_reg_v(s
, ot
, R_EAX
, oldv
);
5733 gen_op_mov_reg_v(s
, ot
, rm
, newv
);
5735 /* Perform an unconditional store cycle like physical cpu;
5736 must be before changing accumulator to ensure
5737 idempotency if the store faults and the instruction
5739 gen_op_st_v(s
, ot
, newv
, s
->A0
);
5740 gen_op_mov_reg_v(s
, ot
, R_EAX
, oldv
);
5743 tcg_gen_mov_tl(cpu_cc_src
, oldv
);
5744 tcg_gen_mov_tl(s
->cc_srcT
, cmpv
);
5745 tcg_gen_sub_tl(cpu_cc_dst
, cmpv
, oldv
);
5746 set_cc_op(s
, CC_OP_SUBB
+ ot
);
5747 tcg_temp_free(oldv
);
5748 tcg_temp_free(newv
);
5749 tcg_temp_free(cmpv
);
5752 case 0x1c7: /* cmpxchg8b */
5753 modrm
= x86_ldub_code(env
, s
);
5754 mod
= (modrm
>> 6) & 3;
5755 switch ((modrm
>> 3) & 7) {
5756 case 1: /* CMPXCHG8, CMPXCHG16 */
5760 #ifdef TARGET_X86_64
5761 if (dflag
== MO_64
) {
5762 if (!(s
->cpuid_ext_features
& CPUID_EXT_CX16
)) {
5765 gen_lea_modrm(env
, s
, modrm
);
5766 if ((s
->prefix
& PREFIX_LOCK
) &&
5767 (tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
5768 gen_helper_cmpxchg16b(cpu_env
, s
->A0
);
5770 gen_helper_cmpxchg16b_unlocked(cpu_env
, s
->A0
);
5772 set_cc_op(s
, CC_OP_EFLAGS
);
5776 if (!(s
->cpuid_features
& CPUID_CX8
)) {
5779 gen_lea_modrm(env
, s
, modrm
);
5780 if ((s
->prefix
& PREFIX_LOCK
) &&
5781 (tb_cflags(s
->base
.tb
) & CF_PARALLEL
)) {
5782 gen_helper_cmpxchg8b(cpu_env
, s
->A0
);
5784 gen_helper_cmpxchg8b_unlocked(cpu_env
, s
->A0
);
5786 set_cc_op(s
, CC_OP_EFLAGS
);
5789 case 7: /* RDSEED */
5790 case 6: /* RDRAND */
5792 (s
->prefix
& (PREFIX_LOCK
| PREFIX_REPZ
| PREFIX_REPNZ
)) ||
5793 !(s
->cpuid_ext_features
& CPUID_EXT_RDRAND
)) {
5796 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
5798 s
->base
.is_jmp
= DISAS_TOO_MANY
;
5800 gen_helper_rdrand(s
->T0
, cpu_env
);
5801 rm
= (modrm
& 7) | REX_B(s
);
5802 gen_op_mov_reg_v(s
, dflag
, rm
, s
->T0
);
5803 set_cc_op(s
, CC_OP_EFLAGS
);
5811 /**************************/
5813 case 0x50 ... 0x57: /* push */
5814 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, (b
& 7) | REX_B(s
));
5815 gen_push_v(s
, s
->T0
);
5817 case 0x58 ... 0x5f: /* pop */
5819 /* NOTE: order is important for pop %sp */
5820 gen_pop_update(s
, ot
);
5821 gen_op_mov_reg_v(s
, ot
, (b
& 7) | REX_B(s
), s
->T0
);
5823 case 0x60: /* pusha */
5828 case 0x61: /* popa */
5833 case 0x68: /* push Iv */
5835 ot
= mo_pushpop(s
, dflag
);
5837 val
= insn_get(env
, s
, ot
);
5839 val
= (int8_t)insn_get(env
, s
, MO_8
);
5840 tcg_gen_movi_tl(s
->T0
, val
);
5841 gen_push_v(s
, s
->T0
);
5843 case 0x8f: /* pop Ev */
5844 modrm
= x86_ldub_code(env
, s
);
5845 mod
= (modrm
>> 6) & 3;
5848 /* NOTE: order is important for pop %sp */
5849 gen_pop_update(s
, ot
);
5850 rm
= (modrm
& 7) | REX_B(s
);
5851 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
5853 /* NOTE: order is important too for MMU exceptions */
5854 s
->popl_esp_hack
= 1 << ot
;
5855 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
5856 s
->popl_esp_hack
= 0;
5857 gen_pop_update(s
, ot
);
5860 case 0xc8: /* enter */
5863 val
= x86_lduw_code(env
, s
);
5864 level
= x86_ldub_code(env
, s
);
5865 gen_enter(s
, val
, level
);
5868 case 0xc9: /* leave */
5871 case 0x06: /* push es */
5872 case 0x0e: /* push cs */
5873 case 0x16: /* push ss */
5874 case 0x1e: /* push ds */
5877 gen_op_movl_T0_seg(s
, b
>> 3);
5878 gen_push_v(s
, s
->T0
);
5880 case 0x1a0: /* push fs */
5881 case 0x1a8: /* push gs */
5882 gen_op_movl_T0_seg(s
, (b
>> 3) & 7);
5883 gen_push_v(s
, s
->T0
);
5885 case 0x07: /* pop es */
5886 case 0x17: /* pop ss */
5887 case 0x1f: /* pop ds */
5892 gen_movl_seg_T0(s
, reg
);
5893 gen_pop_update(s
, ot
);
5895 case 0x1a1: /* pop fs */
5896 case 0x1a9: /* pop gs */
5898 gen_movl_seg_T0(s
, (b
>> 3) & 7);
5899 gen_pop_update(s
, ot
);
5902 /**************************/
5905 case 0x89: /* mov Gv, Ev */
5906 ot
= mo_b_d(b
, dflag
);
5907 modrm
= x86_ldub_code(env
, s
);
5908 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
5910 /* generate a generic store */
5911 gen_ldst_modrm(env
, s
, modrm
, ot
, reg
, 1);
5914 case 0xc7: /* mov Ev, Iv */
5915 ot
= mo_b_d(b
, dflag
);
5916 modrm
= x86_ldub_code(env
, s
);
5917 mod
= (modrm
>> 6) & 3;
5919 s
->rip_offset
= insn_const_size(ot
);
5920 gen_lea_modrm(env
, s
, modrm
);
5922 val
= insn_get(env
, s
, ot
);
5923 tcg_gen_movi_tl(s
->T0
, val
);
5925 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
5927 gen_op_mov_reg_v(s
, ot
, (modrm
& 7) | REX_B(s
), s
->T0
);
5931 case 0x8b: /* mov Ev, Gv */
5932 ot
= mo_b_d(b
, dflag
);
5933 modrm
= x86_ldub_code(env
, s
);
5934 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
5936 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
5937 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
5939 case 0x8e: /* mov seg, Gv */
5940 modrm
= x86_ldub_code(env
, s
);
5941 reg
= (modrm
>> 3) & 7;
5942 if (reg
>= 6 || reg
== R_CS
)
5944 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
5945 gen_movl_seg_T0(s
, reg
);
5947 case 0x8c: /* mov Gv, seg */
5948 modrm
= x86_ldub_code(env
, s
);
5949 reg
= (modrm
>> 3) & 7;
5950 mod
= (modrm
>> 6) & 3;
5953 gen_op_movl_T0_seg(s
, reg
);
5954 ot
= mod
== 3 ? dflag
: MO_16
;
5955 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
5958 case 0x1b6: /* movzbS Gv, Eb */
5959 case 0x1b7: /* movzwS Gv, Eb */
5960 case 0x1be: /* movsbS Gv, Eb */
5961 case 0x1bf: /* movswS Gv, Eb */
5966 /* d_ot is the size of destination */
5968 /* ot is the size of source */
5969 ot
= (b
& 1) + MO_8
;
5970 /* s_ot is the sign+size of source */
5971 s_ot
= b
& 8 ? MO_SIGN
| ot
: ot
;
5973 modrm
= x86_ldub_code(env
, s
);
5974 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
5975 mod
= (modrm
>> 6) & 3;
5976 rm
= (modrm
& 7) | REX_B(s
);
5979 if (s_ot
== MO_SB
&& byte_reg_is_xH(s
, rm
)) {
5980 tcg_gen_sextract_tl(s
->T0
, cpu_regs
[rm
- 4], 8, 8);
5982 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
5985 tcg_gen_ext8u_tl(s
->T0
, s
->T0
);
5988 tcg_gen_ext8s_tl(s
->T0
, s
->T0
);
5991 tcg_gen_ext16u_tl(s
->T0
, s
->T0
);
5995 tcg_gen_ext16s_tl(s
->T0
, s
->T0
);
5999 gen_op_mov_reg_v(s
, d_ot
, reg
, s
->T0
);
6001 gen_lea_modrm(env
, s
, modrm
);
6002 gen_op_ld_v(s
, s_ot
, s
->T0
, s
->A0
);
6003 gen_op_mov_reg_v(s
, d_ot
, reg
, s
->T0
);
6008 case 0x8d: /* lea */
6009 modrm
= x86_ldub_code(env
, s
);
6010 mod
= (modrm
>> 6) & 3;
6013 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6015 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
6016 TCGv ea
= gen_lea_modrm_1(s
, a
, false);
6017 gen_lea_v_seg(s
, s
->aflag
, ea
, -1, -1);
6018 gen_op_mov_reg_v(s
, dflag
, reg
, s
->A0
);
6022 case 0xa0: /* mov EAX, Ov */
6024 case 0xa2: /* mov Ov, EAX */
6027 target_ulong offset_addr
;
6029 ot
= mo_b_d(b
, dflag
);
6030 offset_addr
= insn_get_addr(env
, s
, s
->aflag
);
6031 tcg_gen_movi_tl(s
->A0
, offset_addr
);
6032 gen_add_A0_ds_seg(s
);
6034 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
6035 gen_op_mov_reg_v(s
, ot
, R_EAX
, s
->T0
);
6037 gen_op_mov_v_reg(s
, ot
, s
->T0
, R_EAX
);
6038 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
6042 case 0xd7: /* xlat */
6043 tcg_gen_mov_tl(s
->A0
, cpu_regs
[R_EBX
]);
6044 tcg_gen_ext8u_tl(s
->T0
, cpu_regs
[R_EAX
]);
6045 tcg_gen_add_tl(s
->A0
, s
->A0
, s
->T0
);
6046 gen_extu(s
->aflag
, s
->A0
);
6047 gen_add_A0_ds_seg(s
);
6048 gen_op_ld_v(s
, MO_8
, s
->T0
, s
->A0
);
6049 gen_op_mov_reg_v(s
, MO_8
, R_EAX
, s
->T0
);
6051 case 0xb0 ... 0xb7: /* mov R, Ib */
6052 val
= insn_get(env
, s
, MO_8
);
6053 tcg_gen_movi_tl(s
->T0
, val
);
6054 gen_op_mov_reg_v(s
, MO_8
, (b
& 7) | REX_B(s
), s
->T0
);
6056 case 0xb8 ... 0xbf: /* mov R, Iv */
6057 #ifdef TARGET_X86_64
6058 if (dflag
== MO_64
) {
6061 tmp
= x86_ldq_code(env
, s
);
6062 reg
= (b
& 7) | REX_B(s
);
6063 tcg_gen_movi_tl(s
->T0
, tmp
);
6064 gen_op_mov_reg_v(s
, MO_64
, reg
, s
->T0
);
6069 val
= insn_get(env
, s
, ot
);
6070 reg
= (b
& 7) | REX_B(s
);
6071 tcg_gen_movi_tl(s
->T0
, val
);
6072 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
6076 case 0x91 ... 0x97: /* xchg R, EAX */
6079 reg
= (b
& 7) | REX_B(s
);
6083 case 0x87: /* xchg Ev, Gv */
6084 ot
= mo_b_d(b
, dflag
);
6085 modrm
= x86_ldub_code(env
, s
);
6086 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6087 mod
= (modrm
>> 6) & 3;
6089 rm
= (modrm
& 7) | REX_B(s
);
6091 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
6092 gen_op_mov_v_reg(s
, ot
, s
->T1
, rm
);
6093 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
6094 gen_op_mov_reg_v(s
, ot
, reg
, s
->T1
);
6096 gen_lea_modrm(env
, s
, modrm
);
6097 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
6098 /* for xchg, lock is implicit */
6099 tcg_gen_atomic_xchg_tl(s
->T1
, s
->A0
, s
->T0
,
6100 s
->mem_index
, ot
| MO_LE
);
6101 gen_op_mov_reg_v(s
, ot
, reg
, s
->T1
);
6104 case 0xc4: /* les Gv */
6105 /* In CODE64 this is VEX3; see above. */
6108 case 0xc5: /* lds Gv */
6109 /* In CODE64 this is VEX2; see above. */
6112 case 0x1b2: /* lss Gv */
6115 case 0x1b4: /* lfs Gv */
6118 case 0x1b5: /* lgs Gv */
6121 ot
= dflag
!= MO_16
? MO_32
: MO_16
;
6122 modrm
= x86_ldub_code(env
, s
);
6123 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6124 mod
= (modrm
>> 6) & 3;
6127 gen_lea_modrm(env
, s
, modrm
);
6128 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
6129 gen_add_A0_im(s
, 1 << ot
);
6130 /* load the segment first to handle exceptions properly */
6131 gen_op_ld_v(s
, MO_16
, s
->T0
, s
->A0
);
6132 gen_movl_seg_T0(s
, op
);
6133 /* then put the data */
6134 gen_op_mov_reg_v(s
, ot
, reg
, s
->T1
);
6137 /************************/
6145 ot
= mo_b_d(b
, dflag
);
6146 modrm
= x86_ldub_code(env
, s
);
6147 mod
= (modrm
>> 6) & 3;
6148 op
= (modrm
>> 3) & 7;
6154 gen_lea_modrm(env
, s
, modrm
);
6157 opreg
= (modrm
& 7) | REX_B(s
);
6162 gen_shift(s
, op
, ot
, opreg
, OR_ECX
);
6165 shift
= x86_ldub_code(env
, s
);
6167 gen_shifti(s
, op
, ot
, opreg
, shift
);
6182 case 0x1a4: /* shld imm */
6186 case 0x1a5: /* shld cl */
6190 case 0x1ac: /* shrd imm */
6194 case 0x1ad: /* shrd cl */
6199 modrm
= x86_ldub_code(env
, s
);
6200 mod
= (modrm
>> 6) & 3;
6201 rm
= (modrm
& 7) | REX_B(s
);
6202 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
6204 gen_lea_modrm(env
, s
, modrm
);
6209 gen_op_mov_v_reg(s
, ot
, s
->T1
, reg
);
6212 TCGv imm
= tcg_const_tl(x86_ldub_code(env
, s
));
6213 gen_shiftd_rm_T1(s
, ot
, opreg
, op
, imm
);
6216 gen_shiftd_rm_T1(s
, ot
, opreg
, op
, cpu_regs
[R_ECX
]);
6220 /************************/
6224 bool update_fip
= true;
6226 if (s
->flags
& (HF_EM_MASK
| HF_TS_MASK
)) {
6227 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
6228 /* XXX: what to do if illegal op ? */
6229 gen_exception(s
, EXCP07_PREX
);
6232 modrm
= x86_ldub_code(env
, s
);
6233 mod
= (modrm
>> 6) & 3;
6235 op
= ((b
& 7) << 3) | ((modrm
>> 3) & 7);
6238 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
6239 TCGv ea
= gen_lea_modrm_1(s
, a
, false);
6240 TCGv last_addr
= tcg_temp_new();
6241 bool update_fdp
= true;
6243 tcg_gen_mov_tl(last_addr
, ea
);
6244 gen_lea_v_seg(s
, s
->aflag
, ea
, a
.def_seg
, s
->override
);
6247 case 0x00 ... 0x07: /* fxxxs */
6248 case 0x10 ... 0x17: /* fixxxl */
6249 case 0x20 ... 0x27: /* fxxxl */
6250 case 0x30 ... 0x37: /* fixxx */
6257 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
6258 s
->mem_index
, MO_LEUL
);
6259 gen_helper_flds_FT0(cpu_env
, s
->tmp2_i32
);
6262 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
6263 s
->mem_index
, MO_LEUL
);
6264 gen_helper_fildl_FT0(cpu_env
, s
->tmp2_i32
);
6267 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
,
6268 s
->mem_index
, MO_LEUQ
);
6269 gen_helper_fldl_FT0(cpu_env
, s
->tmp1_i64
);
6273 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
6274 s
->mem_index
, MO_LESW
);
6275 gen_helper_fildl_FT0(cpu_env
, s
->tmp2_i32
);
6279 gen_helper_fp_arith_ST0_FT0(op1
);
6281 /* fcomp needs pop */
6282 gen_helper_fpop(cpu_env
);
6286 case 0x08: /* flds */
6287 case 0x0a: /* fsts */
6288 case 0x0b: /* fstps */
6289 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
6290 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
6291 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
6296 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
6297 s
->mem_index
, MO_LEUL
);
6298 gen_helper_flds_ST0(cpu_env
, s
->tmp2_i32
);
6301 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
6302 s
->mem_index
, MO_LEUL
);
6303 gen_helper_fildl_ST0(cpu_env
, s
->tmp2_i32
);
6306 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
,
6307 s
->mem_index
, MO_LEUQ
);
6308 gen_helper_fldl_ST0(cpu_env
, s
->tmp1_i64
);
6312 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
6313 s
->mem_index
, MO_LESW
);
6314 gen_helper_fildl_ST0(cpu_env
, s
->tmp2_i32
);
6319 /* XXX: the corresponding CPUID bit must be tested ! */
6322 gen_helper_fisttl_ST0(s
->tmp2_i32
, cpu_env
);
6323 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
6324 s
->mem_index
, MO_LEUL
);
6327 gen_helper_fisttll_ST0(s
->tmp1_i64
, cpu_env
);
6328 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
,
6329 s
->mem_index
, MO_LEUQ
);
6333 gen_helper_fistt_ST0(s
->tmp2_i32
, cpu_env
);
6334 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
6335 s
->mem_index
, MO_LEUW
);
6338 gen_helper_fpop(cpu_env
);
6343 gen_helper_fsts_ST0(s
->tmp2_i32
, cpu_env
);
6344 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
6345 s
->mem_index
, MO_LEUL
);
6348 gen_helper_fistl_ST0(s
->tmp2_i32
, cpu_env
);
6349 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
6350 s
->mem_index
, MO_LEUL
);
6353 gen_helper_fstl_ST0(s
->tmp1_i64
, cpu_env
);
6354 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
,
6355 s
->mem_index
, MO_LEUQ
);
6359 gen_helper_fist_ST0(s
->tmp2_i32
, cpu_env
);
6360 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
6361 s
->mem_index
, MO_LEUW
);
6364 if ((op
& 7) == 3) {
6365 gen_helper_fpop(cpu_env
);
6370 case 0x0c: /* fldenv mem */
6371 gen_helper_fldenv(cpu_env
, s
->A0
,
6372 tcg_const_i32(dflag
- 1));
6373 update_fip
= update_fdp
= false;
6375 case 0x0d: /* fldcw mem */
6376 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
6377 s
->mem_index
, MO_LEUW
);
6378 gen_helper_fldcw(cpu_env
, s
->tmp2_i32
);
6379 update_fip
= update_fdp
= false;
6381 case 0x0e: /* fnstenv mem */
6382 gen_helper_fstenv(cpu_env
, s
->A0
,
6383 tcg_const_i32(dflag
- 1));
6384 update_fip
= update_fdp
= false;
6386 case 0x0f: /* fnstcw mem */
6387 gen_helper_fnstcw(s
->tmp2_i32
, cpu_env
);
6388 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
6389 s
->mem_index
, MO_LEUW
);
6390 update_fip
= update_fdp
= false;
6392 case 0x1d: /* fldt mem */
6393 gen_helper_fldt_ST0(cpu_env
, s
->A0
);
6395 case 0x1f: /* fstpt mem */
6396 gen_helper_fstt_ST0(cpu_env
, s
->A0
);
6397 gen_helper_fpop(cpu_env
);
6399 case 0x2c: /* frstor mem */
6400 gen_helper_frstor(cpu_env
, s
->A0
,
6401 tcg_const_i32(dflag
- 1));
6402 update_fip
= update_fdp
= false;
6404 case 0x2e: /* fnsave mem */
6405 gen_helper_fsave(cpu_env
, s
->A0
,
6406 tcg_const_i32(dflag
- 1));
6407 update_fip
= update_fdp
= false;
6409 case 0x2f: /* fnstsw mem */
6410 gen_helper_fnstsw(s
->tmp2_i32
, cpu_env
);
6411 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
6412 s
->mem_index
, MO_LEUW
);
6413 update_fip
= update_fdp
= false;
6415 case 0x3c: /* fbld */
6416 gen_helper_fbld_ST0(cpu_env
, s
->A0
);
6418 case 0x3e: /* fbstp */
6419 gen_helper_fbst_ST0(cpu_env
, s
->A0
);
6420 gen_helper_fpop(cpu_env
);
6422 case 0x3d: /* fildll */
6423 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
,
6424 s
->mem_index
, MO_LEUQ
);
6425 gen_helper_fildll_ST0(cpu_env
, s
->tmp1_i64
);
6427 case 0x3f: /* fistpll */
6428 gen_helper_fistll_ST0(s
->tmp1_i64
, cpu_env
);
6429 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
,
6430 s
->mem_index
, MO_LEUQ
);
6431 gen_helper_fpop(cpu_env
);
6438 int last_seg
= s
->override
>= 0 ? s
->override
: a
.def_seg
;
6440 tcg_gen_ld_i32(s
->tmp2_i32
, cpu_env
,
6441 offsetof(CPUX86State
,
6442 segs
[last_seg
].selector
));
6443 tcg_gen_st16_i32(s
->tmp2_i32
, cpu_env
,
6444 offsetof(CPUX86State
, fpds
));
6445 tcg_gen_st_tl(last_addr
, cpu_env
,
6446 offsetof(CPUX86State
, fpdp
));
6448 tcg_temp_free(last_addr
);
6450 /* register float ops */
6454 case 0x08: /* fld sti */
6455 gen_helper_fpush(cpu_env
);
6456 gen_helper_fmov_ST0_STN(cpu_env
,
6457 tcg_const_i32((opreg
+ 1) & 7));
6459 case 0x09: /* fxchg sti */
6460 case 0x29: /* fxchg4 sti, undocumented op */
6461 case 0x39: /* fxchg7 sti, undocumented op */
6462 gen_helper_fxchg_ST0_STN(cpu_env
, tcg_const_i32(opreg
));
6464 case 0x0a: /* grp d9/2 */
6467 /* check exceptions (FreeBSD FPU probe) */
6468 gen_helper_fwait(cpu_env
);
6475 case 0x0c: /* grp d9/4 */
6478 gen_helper_fchs_ST0(cpu_env
);
6481 gen_helper_fabs_ST0(cpu_env
);
6484 gen_helper_fldz_FT0(cpu_env
);
6485 gen_helper_fcom_ST0_FT0(cpu_env
);
6488 gen_helper_fxam_ST0(cpu_env
);
6494 case 0x0d: /* grp d9/5 */
6498 gen_helper_fpush(cpu_env
);
6499 gen_helper_fld1_ST0(cpu_env
);
6502 gen_helper_fpush(cpu_env
);
6503 gen_helper_fldl2t_ST0(cpu_env
);
6506 gen_helper_fpush(cpu_env
);
6507 gen_helper_fldl2e_ST0(cpu_env
);
6510 gen_helper_fpush(cpu_env
);
6511 gen_helper_fldpi_ST0(cpu_env
);
6514 gen_helper_fpush(cpu_env
);
6515 gen_helper_fldlg2_ST0(cpu_env
);
6518 gen_helper_fpush(cpu_env
);
6519 gen_helper_fldln2_ST0(cpu_env
);
6522 gen_helper_fpush(cpu_env
);
6523 gen_helper_fldz_ST0(cpu_env
);
6530 case 0x0e: /* grp d9/6 */
6533 gen_helper_f2xm1(cpu_env
);
6536 gen_helper_fyl2x(cpu_env
);
6539 gen_helper_fptan(cpu_env
);
6541 case 3: /* fpatan */
6542 gen_helper_fpatan(cpu_env
);
6544 case 4: /* fxtract */
6545 gen_helper_fxtract(cpu_env
);
6547 case 5: /* fprem1 */
6548 gen_helper_fprem1(cpu_env
);
6550 case 6: /* fdecstp */
6551 gen_helper_fdecstp(cpu_env
);
6554 case 7: /* fincstp */
6555 gen_helper_fincstp(cpu_env
);
6559 case 0x0f: /* grp d9/7 */
6562 gen_helper_fprem(cpu_env
);
6564 case 1: /* fyl2xp1 */
6565 gen_helper_fyl2xp1(cpu_env
);
6568 gen_helper_fsqrt(cpu_env
);
6570 case 3: /* fsincos */
6571 gen_helper_fsincos(cpu_env
);
6573 case 5: /* fscale */
6574 gen_helper_fscale(cpu_env
);
6576 case 4: /* frndint */
6577 gen_helper_frndint(cpu_env
);
6580 gen_helper_fsin(cpu_env
);
6584 gen_helper_fcos(cpu_env
);
6588 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
6589 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
6590 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
6596 gen_helper_fp_arith_STN_ST0(op1
, opreg
);
6598 gen_helper_fpop(cpu_env
);
6601 gen_helper_fmov_FT0_STN(cpu_env
,
6602 tcg_const_i32(opreg
));
6603 gen_helper_fp_arith_ST0_FT0(op1
);
6607 case 0x02: /* fcom */
6608 case 0x22: /* fcom2, undocumented op */
6609 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6610 gen_helper_fcom_ST0_FT0(cpu_env
);
6612 case 0x03: /* fcomp */
6613 case 0x23: /* fcomp3, undocumented op */
6614 case 0x32: /* fcomp5, undocumented op */
6615 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6616 gen_helper_fcom_ST0_FT0(cpu_env
);
6617 gen_helper_fpop(cpu_env
);
6619 case 0x15: /* da/5 */
6621 case 1: /* fucompp */
6622 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(1));
6623 gen_helper_fucom_ST0_FT0(cpu_env
);
6624 gen_helper_fpop(cpu_env
);
6625 gen_helper_fpop(cpu_env
);
6633 case 0: /* feni (287 only, just do nop here) */
6635 case 1: /* fdisi (287 only, just do nop here) */
6638 gen_helper_fclex(cpu_env
);
6641 case 3: /* fninit */
6642 gen_helper_fninit(cpu_env
);
6645 case 4: /* fsetpm (287 only, just do nop here) */
6651 case 0x1d: /* fucomi */
6652 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
6655 gen_update_cc_op(s
);
6656 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6657 gen_helper_fucomi_ST0_FT0(cpu_env
);
6658 set_cc_op(s
, CC_OP_EFLAGS
);
6660 case 0x1e: /* fcomi */
6661 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
6664 gen_update_cc_op(s
);
6665 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6666 gen_helper_fcomi_ST0_FT0(cpu_env
);
6667 set_cc_op(s
, CC_OP_EFLAGS
);
6669 case 0x28: /* ffree sti */
6670 gen_helper_ffree_STN(cpu_env
, tcg_const_i32(opreg
));
6672 case 0x2a: /* fst sti */
6673 gen_helper_fmov_STN_ST0(cpu_env
, tcg_const_i32(opreg
));
6675 case 0x2b: /* fstp sti */
6676 case 0x0b: /* fstp1 sti, undocumented op */
6677 case 0x3a: /* fstp8 sti, undocumented op */
6678 case 0x3b: /* fstp9 sti, undocumented op */
6679 gen_helper_fmov_STN_ST0(cpu_env
, tcg_const_i32(opreg
));
6680 gen_helper_fpop(cpu_env
);
6682 case 0x2c: /* fucom st(i) */
6683 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6684 gen_helper_fucom_ST0_FT0(cpu_env
);
6686 case 0x2d: /* fucomp st(i) */
6687 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6688 gen_helper_fucom_ST0_FT0(cpu_env
);
6689 gen_helper_fpop(cpu_env
);
6691 case 0x33: /* de/3 */
6693 case 1: /* fcompp */
6694 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(1));
6695 gen_helper_fcom_ST0_FT0(cpu_env
);
6696 gen_helper_fpop(cpu_env
);
6697 gen_helper_fpop(cpu_env
);
6703 case 0x38: /* ffreep sti, undocumented op */
6704 gen_helper_ffree_STN(cpu_env
, tcg_const_i32(opreg
));
6705 gen_helper_fpop(cpu_env
);
6707 case 0x3c: /* df/4 */
6710 gen_helper_fnstsw(s
->tmp2_i32
, cpu_env
);
6711 tcg_gen_extu_i32_tl(s
->T0
, s
->tmp2_i32
);
6712 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
6718 case 0x3d: /* fucomip */
6719 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
6722 gen_update_cc_op(s
);
6723 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6724 gen_helper_fucomi_ST0_FT0(cpu_env
);
6725 gen_helper_fpop(cpu_env
);
6726 set_cc_op(s
, CC_OP_EFLAGS
);
6728 case 0x3e: /* fcomip */
6729 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
6732 gen_update_cc_op(s
);
6733 gen_helper_fmov_FT0_STN(cpu_env
, tcg_const_i32(opreg
));
6734 gen_helper_fcomi_ST0_FT0(cpu_env
);
6735 gen_helper_fpop(cpu_env
);
6736 set_cc_op(s
, CC_OP_EFLAGS
);
6738 case 0x10 ... 0x13: /* fcmovxx */
6743 static const uint8_t fcmov_cc
[8] = {
6750 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
6753 op1
= fcmov_cc
[op
& 3] | (((op
>> 3) & 1) ^ 1);
6754 l1
= gen_new_label();
6755 gen_jcc1_noeob(s
, op1
, l1
);
6756 gen_helper_fmov_ST0_STN(cpu_env
, tcg_const_i32(opreg
));
6766 tcg_gen_ld_i32(s
->tmp2_i32
, cpu_env
,
6767 offsetof(CPUX86State
, segs
[R_CS
].selector
));
6768 tcg_gen_st16_i32(s
->tmp2_i32
, cpu_env
,
6769 offsetof(CPUX86State
, fpcs
));
6770 tcg_gen_st_tl(eip_cur_tl(s
),
6771 cpu_env
, offsetof(CPUX86State
, fpip
));
6775 /************************/
6778 case 0xa4: /* movsS */
6780 ot
= mo_b_d(b
, dflag
);
6781 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6782 gen_repz_movs(s
, ot
);
6788 case 0xaa: /* stosS */
6790 ot
= mo_b_d(b
, dflag
);
6791 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6792 gen_repz_stos(s
, ot
);
6797 case 0xac: /* lodsS */
6799 ot
= mo_b_d(b
, dflag
);
6800 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6801 gen_repz_lods(s
, ot
);
6806 case 0xae: /* scasS */
6808 ot
= mo_b_d(b
, dflag
);
6809 if (prefixes
& PREFIX_REPNZ
) {
6810 gen_repz_scas(s
, ot
, 1);
6811 } else if (prefixes
& PREFIX_REPZ
) {
6812 gen_repz_scas(s
, ot
, 0);
6818 case 0xa6: /* cmpsS */
6820 ot
= mo_b_d(b
, dflag
);
6821 if (prefixes
& PREFIX_REPNZ
) {
6822 gen_repz_cmps(s
, ot
, 1);
6823 } else if (prefixes
& PREFIX_REPZ
) {
6824 gen_repz_cmps(s
, ot
, 0);
6829 case 0x6c: /* insS */
6831 ot
= mo_b_d32(b
, dflag
);
6832 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_EDX
]);
6833 tcg_gen_ext16u_i32(s
->tmp2_i32
, s
->tmp2_i32
);
6834 if (!gen_check_io(s
, ot
, s
->tmp2_i32
,
6835 SVM_IOIO_TYPE_MASK
| SVM_IOIO_STR_MASK
)) {
6838 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
6840 s
->base
.is_jmp
= DISAS_TOO_MANY
;
6842 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6843 gen_repz_ins(s
, ot
);
6848 case 0x6e: /* outsS */
6850 ot
= mo_b_d32(b
, dflag
);
6851 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_EDX
]);
6852 tcg_gen_ext16u_i32(s
->tmp2_i32
, s
->tmp2_i32
);
6853 if (!gen_check_io(s
, ot
, s
->tmp2_i32
, SVM_IOIO_STR_MASK
)) {
6856 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
6858 s
->base
.is_jmp
= DISAS_TOO_MANY
;
6860 if (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
6861 gen_repz_outs(s
, ot
);
6867 /************************/
6872 ot
= mo_b_d32(b
, dflag
);
6873 val
= x86_ldub_code(env
, s
);
6874 tcg_gen_movi_i32(s
->tmp2_i32
, val
);
6875 if (!gen_check_io(s
, ot
, s
->tmp2_i32
, SVM_IOIO_TYPE_MASK
)) {
6878 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
6880 s
->base
.is_jmp
= DISAS_TOO_MANY
;
6882 gen_helper_in_func(ot
, s
->T1
, s
->tmp2_i32
);
6883 gen_op_mov_reg_v(s
, ot
, R_EAX
, s
->T1
);
6884 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
6888 ot
= mo_b_d32(b
, dflag
);
6889 val
= x86_ldub_code(env
, s
);
6890 tcg_gen_movi_i32(s
->tmp2_i32
, val
);
6891 if (!gen_check_io(s
, ot
, s
->tmp2_i32
, 0)) {
6894 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
6896 s
->base
.is_jmp
= DISAS_TOO_MANY
;
6898 gen_op_mov_v_reg(s
, ot
, s
->T1
, R_EAX
);
6899 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T1
);
6900 gen_helper_out_func(ot
, s
->tmp2_i32
, s
->tmp3_i32
);
6901 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
6905 ot
= mo_b_d32(b
, dflag
);
6906 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_EDX
]);
6907 tcg_gen_ext16u_i32(s
->tmp2_i32
, s
->tmp2_i32
);
6908 if (!gen_check_io(s
, ot
, s
->tmp2_i32
, SVM_IOIO_TYPE_MASK
)) {
6911 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
6913 s
->base
.is_jmp
= DISAS_TOO_MANY
;
6915 gen_helper_in_func(ot
, s
->T1
, s
->tmp2_i32
);
6916 gen_op_mov_reg_v(s
, ot
, R_EAX
, s
->T1
);
6917 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
6921 ot
= mo_b_d32(b
, dflag
);
6922 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_EDX
]);
6923 tcg_gen_ext16u_i32(s
->tmp2_i32
, s
->tmp2_i32
);
6924 if (!gen_check_io(s
, ot
, s
->tmp2_i32
, 0)) {
6927 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
6929 s
->base
.is_jmp
= DISAS_TOO_MANY
;
6931 gen_op_mov_v_reg(s
, ot
, s
->T1
, R_EAX
);
6932 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T1
);
6933 gen_helper_out_func(ot
, s
->tmp2_i32
, s
->tmp3_i32
);
6934 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
6937 /************************/
6939 case 0xc2: /* ret im */
6940 val
= x86_ldsw_code(env
, s
);
6942 gen_stack_update(s
, val
+ (1 << ot
));
6943 /* Note that gen_pop_T0 uses a zero-extending load. */
6944 gen_op_jmp_v(s
, s
->T0
);
6946 s
->base
.is_jmp
= DISAS_JUMP
;
6948 case 0xc3: /* ret */
6950 gen_pop_update(s
, ot
);
6951 /* Note that gen_pop_T0 uses a zero-extending load. */
6952 gen_op_jmp_v(s
, s
->T0
);
6954 s
->base
.is_jmp
= DISAS_JUMP
;
6956 case 0xca: /* lret im */
6957 val
= x86_ldsw_code(env
, s
);
6959 if (PE(s
) && !VM86(s
)) {
6960 gen_update_cc_op(s
);
6961 gen_update_eip_cur(s
);
6962 gen_helper_lret_protected(cpu_env
, tcg_const_i32(dflag
- 1),
6963 tcg_const_i32(val
));
6967 gen_op_ld_v(s
, dflag
, s
->T0
, s
->A0
);
6968 /* NOTE: keeping EIP updated is not a problem in case of
6970 gen_op_jmp_v(s
, s
->T0
);
6972 gen_add_A0_im(s
, 1 << dflag
);
6973 gen_op_ld_v(s
, dflag
, s
->T0
, s
->A0
);
6974 gen_op_movl_seg_T0_vm(s
, R_CS
);
6975 /* add stack offset */
6976 gen_stack_update(s
, val
+ (2 << dflag
));
6978 s
->base
.is_jmp
= DISAS_EOB_ONLY
;
6980 case 0xcb: /* lret */
6983 case 0xcf: /* iret */
6984 gen_svm_check_intercept(s
, SVM_EXIT_IRET
);
6985 if (!PE(s
) || VM86(s
)) {
6986 /* real mode or vm86 mode */
6987 if (!check_vm86_iopl(s
)) {
6990 gen_helper_iret_real(cpu_env
, tcg_const_i32(dflag
- 1));
6992 gen_helper_iret_protected(cpu_env
, tcg_constant_i32(dflag
- 1),
6995 set_cc_op(s
, CC_OP_EFLAGS
);
6996 s
->base
.is_jmp
= DISAS_EOB_ONLY
;
6998 case 0xe8: /* call im */
7000 int diff
= (dflag
!= MO_16
7001 ? (int32_t)insn_get(env
, s
, MO_32
)
7002 : (int16_t)insn_get(env
, s
, MO_16
));
7003 gen_push_v(s
, eip_next_tl(s
));
7005 gen_jmp_rel(s
, dflag
, diff
, 0);
7008 case 0x9a: /* lcall im */
7010 unsigned int selector
, offset
;
7015 offset
= insn_get(env
, s
, ot
);
7016 selector
= insn_get(env
, s
, MO_16
);
7018 tcg_gen_movi_tl(s
->T0
, selector
);
7019 tcg_gen_movi_tl(s
->T1
, offset
);
7022 case 0xe9: /* jmp im */
7024 int diff
= (dflag
!= MO_16
7025 ? (int32_t)insn_get(env
, s
, MO_32
)
7026 : (int16_t)insn_get(env
, s
, MO_16
));
7028 gen_jmp_rel(s
, dflag
, diff
, 0);
7031 case 0xea: /* ljmp im */
7033 unsigned int selector
, offset
;
7038 offset
= insn_get(env
, s
, ot
);
7039 selector
= insn_get(env
, s
, MO_16
);
7041 tcg_gen_movi_tl(s
->T0
, selector
);
7042 tcg_gen_movi_tl(s
->T1
, offset
);
7045 case 0xeb: /* jmp Jb */
7047 int diff
= (int8_t)insn_get(env
, s
, MO_8
);
7048 gen_jmp_rel(s
, dflag
, diff
, 0);
7051 case 0x70 ... 0x7f: /* jcc Jb */
7053 int diff
= (int8_t)insn_get(env
, s
, MO_8
);
7055 gen_jcc(s
, b
, diff
);
7058 case 0x180 ... 0x18f: /* jcc Jv */
7060 int diff
= (dflag
!= MO_16
7061 ? (int32_t)insn_get(env
, s
, MO_32
)
7062 : (int16_t)insn_get(env
, s
, MO_16
));
7064 gen_jcc(s
, b
, diff
);
7068 case 0x190 ... 0x19f: /* setcc Gv */
7069 modrm
= x86_ldub_code(env
, s
);
7070 gen_setcc1(s
, b
, s
->T0
);
7071 gen_ldst_modrm(env
, s
, modrm
, MO_8
, OR_TMP0
, 1);
7073 case 0x140 ... 0x14f: /* cmov Gv, Ev */
7074 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
7078 modrm
= x86_ldub_code(env
, s
);
7079 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
7080 gen_cmovcc1(env
, s
, ot
, b
, modrm
, reg
);
7083 /************************/
7085 case 0x9c: /* pushf */
7086 gen_svm_check_intercept(s
, SVM_EXIT_PUSHF
);
7087 if (check_vm86_iopl(s
)) {
7088 gen_update_cc_op(s
);
7089 gen_helper_read_eflags(s
->T0
, cpu_env
);
7090 gen_push_v(s
, s
->T0
);
7093 case 0x9d: /* popf */
7094 gen_svm_check_intercept(s
, SVM_EXIT_POPF
);
7095 if (check_vm86_iopl(s
)) {
7098 if (dflag
!= MO_16
) {
7099 gen_helper_write_eflags(cpu_env
, s
->T0
,
7100 tcg_const_i32((TF_MASK
| AC_MASK
|
7105 gen_helper_write_eflags(cpu_env
, s
->T0
,
7106 tcg_const_i32((TF_MASK
| AC_MASK
|
7108 IF_MASK
| IOPL_MASK
)
7112 if (CPL(s
) <= IOPL(s
)) {
7113 if (dflag
!= MO_16
) {
7114 gen_helper_write_eflags(cpu_env
, s
->T0
,
7115 tcg_const_i32((TF_MASK
|
7121 gen_helper_write_eflags(cpu_env
, s
->T0
,
7122 tcg_const_i32((TF_MASK
|
7130 if (dflag
!= MO_16
) {
7131 gen_helper_write_eflags(cpu_env
, s
->T0
,
7132 tcg_const_i32((TF_MASK
| AC_MASK
|
7133 ID_MASK
| NT_MASK
)));
7135 gen_helper_write_eflags(cpu_env
, s
->T0
,
7136 tcg_const_i32((TF_MASK
| AC_MASK
|
7142 gen_pop_update(s
, ot
);
7143 set_cc_op(s
, CC_OP_EFLAGS
);
7144 /* abort translation because TF/AC flag may change */
7145 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
7148 case 0x9e: /* sahf */
7149 if (CODE64(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_LAHF_LM
))
7151 gen_op_mov_v_reg(s
, MO_8
, s
->T0
, R_AH
);
7152 gen_compute_eflags(s
);
7153 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, CC_O
);
7154 tcg_gen_andi_tl(s
->T0
, s
->T0
, CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
);
7155 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, s
->T0
);
7157 case 0x9f: /* lahf */
7158 if (CODE64(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_LAHF_LM
))
7160 gen_compute_eflags(s
);
7161 /* Note: gen_compute_eflags() only gives the condition codes */
7162 tcg_gen_ori_tl(s
->T0
, cpu_cc_src
, 0x02);
7163 gen_op_mov_reg_v(s
, MO_8
, R_AH
, s
->T0
);
7165 case 0xf5: /* cmc */
7166 gen_compute_eflags(s
);
7167 tcg_gen_xori_tl(cpu_cc_src
, cpu_cc_src
, CC_C
);
7169 case 0xf8: /* clc */
7170 gen_compute_eflags(s
);
7171 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~CC_C
);
7173 case 0xf9: /* stc */
7174 gen_compute_eflags(s
);
7175 tcg_gen_ori_tl(cpu_cc_src
, cpu_cc_src
, CC_C
);
7177 case 0xfc: /* cld */
7178 tcg_gen_movi_i32(s
->tmp2_i32
, 1);
7179 tcg_gen_st_i32(s
->tmp2_i32
, cpu_env
, offsetof(CPUX86State
, df
));
7181 case 0xfd: /* std */
7182 tcg_gen_movi_i32(s
->tmp2_i32
, -1);
7183 tcg_gen_st_i32(s
->tmp2_i32
, cpu_env
, offsetof(CPUX86State
, df
));
7186 /************************/
7187 /* bit operations */
7188 case 0x1ba: /* bt/bts/btr/btc Gv, im */
7190 modrm
= x86_ldub_code(env
, s
);
7191 op
= (modrm
>> 3) & 7;
7192 mod
= (modrm
>> 6) & 3;
7193 rm
= (modrm
& 7) | REX_B(s
);
7196 gen_lea_modrm(env
, s
, modrm
);
7197 if (!(s
->prefix
& PREFIX_LOCK
)) {
7198 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
7201 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
7204 val
= x86_ldub_code(env
, s
);
7205 tcg_gen_movi_tl(s
->T1
, val
);
7210 case 0x1a3: /* bt Gv, Ev */
7213 case 0x1ab: /* bts */
7216 case 0x1b3: /* btr */
7219 case 0x1bb: /* btc */
7223 modrm
= x86_ldub_code(env
, s
);
7224 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
7225 mod
= (modrm
>> 6) & 3;
7226 rm
= (modrm
& 7) | REX_B(s
);
7227 gen_op_mov_v_reg(s
, MO_32
, s
->T1
, reg
);
7229 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
7230 /* specific case: we need to add a displacement */
7231 gen_exts(ot
, s
->T1
);
7232 tcg_gen_sari_tl(s
->tmp0
, s
->T1
, 3 + ot
);
7233 tcg_gen_shli_tl(s
->tmp0
, s
->tmp0
, ot
);
7234 tcg_gen_add_tl(s
->A0
, gen_lea_modrm_1(s
, a
, false), s
->tmp0
);
7235 gen_lea_v_seg(s
, s
->aflag
, s
->A0
, a
.def_seg
, s
->override
);
7236 if (!(s
->prefix
& PREFIX_LOCK
)) {
7237 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
7240 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
7243 tcg_gen_andi_tl(s
->T1
, s
->T1
, (1 << (3 + ot
)) - 1);
7244 tcg_gen_movi_tl(s
->tmp0
, 1);
7245 tcg_gen_shl_tl(s
->tmp0
, s
->tmp0
, s
->T1
);
7246 if (s
->prefix
& PREFIX_LOCK
) {
7249 /* Needs no atomic ops; we surpressed the normal
7250 memory load for LOCK above so do it now. */
7251 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
7254 tcg_gen_atomic_fetch_or_tl(s
->T0
, s
->A0
, s
->tmp0
,
7255 s
->mem_index
, ot
| MO_LE
);
7258 tcg_gen_not_tl(s
->tmp0
, s
->tmp0
);
7259 tcg_gen_atomic_fetch_and_tl(s
->T0
, s
->A0
, s
->tmp0
,
7260 s
->mem_index
, ot
| MO_LE
);
7264 tcg_gen_atomic_fetch_xor_tl(s
->T0
, s
->A0
, s
->tmp0
,
7265 s
->mem_index
, ot
| MO_LE
);
7268 tcg_gen_shr_tl(s
->tmp4
, s
->T0
, s
->T1
);
7270 tcg_gen_shr_tl(s
->tmp4
, s
->T0
, s
->T1
);
7273 /* Data already loaded; nothing to do. */
7276 tcg_gen_or_tl(s
->T0
, s
->T0
, s
->tmp0
);
7279 tcg_gen_andc_tl(s
->T0
, s
->T0
, s
->tmp0
);
7283 tcg_gen_xor_tl(s
->T0
, s
->T0
, s
->tmp0
);
7288 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
7290 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
7295 /* Delay all CC updates until after the store above. Note that
7296 C is the result of the test, Z is unchanged, and the others
7297 are all undefined. */
7299 case CC_OP_MULB
... CC_OP_MULQ
:
7300 case CC_OP_ADDB
... CC_OP_ADDQ
:
7301 case CC_OP_ADCB
... CC_OP_ADCQ
:
7302 case CC_OP_SUBB
... CC_OP_SUBQ
:
7303 case CC_OP_SBBB
... CC_OP_SBBQ
:
7304 case CC_OP_LOGICB
... CC_OP_LOGICQ
:
7305 case CC_OP_INCB
... CC_OP_INCQ
:
7306 case CC_OP_DECB
... CC_OP_DECQ
:
7307 case CC_OP_SHLB
... CC_OP_SHLQ
:
7308 case CC_OP_SARB
... CC_OP_SARQ
:
7309 case CC_OP_BMILGB
... CC_OP_BMILGQ
:
7310 /* Z was going to be computed from the non-zero status of CC_DST.
7311 We can get that same Z value (and the new C value) by leaving
7312 CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the
7314 tcg_gen_mov_tl(cpu_cc_src
, s
->tmp4
);
7315 set_cc_op(s
, ((s
->cc_op
- CC_OP_MULB
) & 3) + CC_OP_SARB
);
7318 /* Otherwise, generate EFLAGS and replace the C bit. */
7319 gen_compute_eflags(s
);
7320 tcg_gen_deposit_tl(cpu_cc_src
, cpu_cc_src
, s
->tmp4
,
7325 case 0x1bc: /* bsf / tzcnt */
7326 case 0x1bd: /* bsr / lzcnt */
7328 modrm
= x86_ldub_code(env
, s
);
7329 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
7330 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
7331 gen_extu(ot
, s
->T0
);
7333 /* Note that lzcnt and tzcnt are in different extensions. */
7334 if ((prefixes
& PREFIX_REPZ
)
7336 ? s
->cpuid_ext3_features
& CPUID_EXT3_ABM
7337 : s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_BMI1
)) {
7339 /* For lzcnt/tzcnt, C bit is defined related to the input. */
7340 tcg_gen_mov_tl(cpu_cc_src
, s
->T0
);
7342 /* For lzcnt, reduce the target_ulong result by the
7343 number of zeros that we expect to find at the top. */
7344 tcg_gen_clzi_tl(s
->T0
, s
->T0
, TARGET_LONG_BITS
);
7345 tcg_gen_subi_tl(s
->T0
, s
->T0
, TARGET_LONG_BITS
- size
);
7347 /* For tzcnt, a zero input must return the operand size. */
7348 tcg_gen_ctzi_tl(s
->T0
, s
->T0
, size
);
7350 /* For lzcnt/tzcnt, Z bit is defined related to the result. */
7351 gen_op_update1_cc(s
);
7352 set_cc_op(s
, CC_OP_BMILGB
+ ot
);
7354 /* For bsr/bsf, only the Z bit is defined and it is related
7355 to the input and not the result. */
7356 tcg_gen_mov_tl(cpu_cc_dst
, s
->T0
);
7357 set_cc_op(s
, CC_OP_LOGICB
+ ot
);
7359 /* ??? The manual says that the output is undefined when the
7360 input is zero, but real hardware leaves it unchanged, and
7361 real programs appear to depend on that. Accomplish this
7362 by passing the output as the value to return upon zero. */
7364 /* For bsr, return the bit index of the first 1 bit,
7365 not the count of leading zeros. */
7366 tcg_gen_xori_tl(s
->T1
, cpu_regs
[reg
], TARGET_LONG_BITS
- 1);
7367 tcg_gen_clz_tl(s
->T0
, s
->T0
, s
->T1
);
7368 tcg_gen_xori_tl(s
->T0
, s
->T0
, TARGET_LONG_BITS
- 1);
7370 tcg_gen_ctz_tl(s
->T0
, s
->T0
, cpu_regs
[reg
]);
7373 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
7375 /************************/
7377 case 0x27: /* daa */
7380 gen_update_cc_op(s
);
7381 gen_helper_daa(cpu_env
);
7382 set_cc_op(s
, CC_OP_EFLAGS
);
7384 case 0x2f: /* das */
7387 gen_update_cc_op(s
);
7388 gen_helper_das(cpu_env
);
7389 set_cc_op(s
, CC_OP_EFLAGS
);
7391 case 0x37: /* aaa */
7394 gen_update_cc_op(s
);
7395 gen_helper_aaa(cpu_env
);
7396 set_cc_op(s
, CC_OP_EFLAGS
);
7398 case 0x3f: /* aas */
7401 gen_update_cc_op(s
);
7402 gen_helper_aas(cpu_env
);
7403 set_cc_op(s
, CC_OP_EFLAGS
);
7405 case 0xd4: /* aam */
7408 val
= x86_ldub_code(env
, s
);
7410 gen_exception(s
, EXCP00_DIVZ
);
7412 gen_helper_aam(cpu_env
, tcg_const_i32(val
));
7413 set_cc_op(s
, CC_OP_LOGICB
);
7416 case 0xd5: /* aad */
7419 val
= x86_ldub_code(env
, s
);
7420 gen_helper_aad(cpu_env
, tcg_const_i32(val
));
7421 set_cc_op(s
, CC_OP_LOGICB
);
7423 /************************/
7425 case 0x90: /* nop */
7426 /* XXX: correct lock test for all insn */
7427 if (prefixes
& PREFIX_LOCK
) {
7430 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
7432 goto do_xchg_reg_eax
;
7434 if (prefixes
& PREFIX_REPZ
) {
7435 gen_update_cc_op(s
);
7436 gen_update_eip_cur(s
);
7437 gen_helper_pause(cpu_env
, cur_insn_len_i32(s
));
7438 s
->base
.is_jmp
= DISAS_NORETURN
;
7441 case 0x9b: /* fwait */
7442 if ((s
->flags
& (HF_MP_MASK
| HF_TS_MASK
)) ==
7443 (HF_MP_MASK
| HF_TS_MASK
)) {
7444 gen_exception(s
, EXCP07_PREX
);
7446 gen_helper_fwait(cpu_env
);
7449 case 0xcc: /* int3 */
7450 gen_interrupt(s
, EXCP03_INT3
);
7452 case 0xcd: /* int N */
7453 val
= x86_ldub_code(env
, s
);
7454 if (check_vm86_iopl(s
)) {
7455 gen_interrupt(s
, val
);
7458 case 0xce: /* into */
7461 gen_update_cc_op(s
);
7462 gen_update_eip_cur(s
);
7463 gen_helper_into(cpu_env
, cur_insn_len_i32(s
));
7466 case 0xf1: /* icebp (undocumented, exits to external debugger) */
7467 gen_svm_check_intercept(s
, SVM_EXIT_ICEBP
);
7471 case 0xfa: /* cli */
7472 if (check_iopl(s
)) {
7473 gen_helper_cli(cpu_env
);
7476 case 0xfb: /* sti */
7477 if (check_iopl(s
)) {
7478 gen_helper_sti(cpu_env
);
7479 /* interruptions are enabled only the first insn after sti */
7480 gen_update_eip_next(s
);
7481 gen_eob_inhibit_irq(s
, true);
7484 case 0x62: /* bound */
7488 modrm
= x86_ldub_code(env
, s
);
7489 reg
= (modrm
>> 3) & 7;
7490 mod
= (modrm
>> 6) & 3;
7493 gen_op_mov_v_reg(s
, ot
, s
->T0
, reg
);
7494 gen_lea_modrm(env
, s
, modrm
);
7495 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
7497 gen_helper_boundw(cpu_env
, s
->A0
, s
->tmp2_i32
);
7499 gen_helper_boundl(cpu_env
, s
->A0
, s
->tmp2_i32
);
7502 case 0x1c8 ... 0x1cf: /* bswap reg */
7503 reg
= (b
& 7) | REX_B(s
);
7504 #ifdef TARGET_X86_64
7505 if (dflag
== MO_64
) {
7506 tcg_gen_bswap64_i64(cpu_regs
[reg
], cpu_regs
[reg
]);
7510 tcg_gen_bswap32_tl(cpu_regs
[reg
], cpu_regs
[reg
], TCG_BSWAP_OZ
);
7512 case 0xd6: /* salc */
7515 gen_compute_eflags_c(s
, s
->T0
);
7516 tcg_gen_neg_tl(s
->T0
, s
->T0
);
7517 gen_op_mov_reg_v(s
, MO_8
, R_EAX
, s
->T0
);
7519 case 0xe0: /* loopnz */
7520 case 0xe1: /* loopz */
7521 case 0xe2: /* loop */
7522 case 0xe3: /* jecxz */
7525 int diff
= (int8_t)insn_get(env
, s
, MO_8
);
7527 l1
= gen_new_label();
7528 l2
= gen_new_label();
7529 gen_update_cc_op(s
);
7532 case 0: /* loopnz */
7534 gen_op_add_reg_im(s
, s
->aflag
, R_ECX
, -1);
7535 gen_op_jz_ecx(s
, l2
);
7536 gen_jcc1(s
, (JCC_Z
<< 1) | (b
^ 1), l1
);
7539 gen_op_add_reg_im(s
, s
->aflag
, R_ECX
, -1);
7540 gen_op_jnz_ecx(s
, l1
);
7544 gen_op_jz_ecx(s
, l1
);
7549 gen_jmp_rel_csize(s
, 0, 1);
7552 gen_jmp_rel(s
, dflag
, diff
, 0);
7555 case 0x130: /* wrmsr */
7556 case 0x132: /* rdmsr */
7557 if (check_cpl0(s
)) {
7558 gen_update_cc_op(s
);
7559 gen_update_eip_cur(s
);
7561 gen_helper_rdmsr(cpu_env
);
7563 gen_helper_wrmsr(cpu_env
);
7564 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
7568 case 0x131: /* rdtsc */
7569 gen_update_cc_op(s
);
7570 gen_update_eip_cur(s
);
7571 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
7573 s
->base
.is_jmp
= DISAS_TOO_MANY
;
7575 gen_helper_rdtsc(cpu_env
);
7577 case 0x133: /* rdpmc */
7578 gen_update_cc_op(s
);
7579 gen_update_eip_cur(s
);
7580 gen_helper_rdpmc(cpu_env
);
7581 s
->base
.is_jmp
= DISAS_NORETURN
;
7583 case 0x134: /* sysenter */
7584 /* For Intel SYSENTER is valid on 64-bit */
7585 if (CODE64(s
) && env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
)
7588 gen_exception_gpf(s
);
7590 gen_helper_sysenter(cpu_env
);
7591 s
->base
.is_jmp
= DISAS_EOB_ONLY
;
7594 case 0x135: /* sysexit */
7595 /* For Intel SYSEXIT is valid on 64-bit */
7596 if (CODE64(s
) && env
->cpuid_vendor1
!= CPUID_VENDOR_INTEL_1
)
7599 gen_exception_gpf(s
);
7601 gen_helper_sysexit(cpu_env
, tcg_const_i32(dflag
- 1));
7602 s
->base
.is_jmp
= DISAS_EOB_ONLY
;
7605 #ifdef TARGET_X86_64
7606 case 0x105: /* syscall */
7607 /* XXX: is it usable in real mode ? */
7608 gen_update_cc_op(s
);
7609 gen_update_eip_cur(s
);
7610 gen_helper_syscall(cpu_env
, cur_insn_len_i32(s
));
7611 /* TF handling for the syscall insn is different. The TF bit is checked
7612 after the syscall insn completes. This allows #DB to not be
7613 generated after one has entered CPL0 if TF is set in FMASK. */
7614 gen_eob_worker(s
, false, true);
7616 case 0x107: /* sysret */
7618 gen_exception_gpf(s
);
7620 gen_helper_sysret(cpu_env
, tcg_const_i32(dflag
- 1));
7621 /* condition codes are modified only in long mode */
7623 set_cc_op(s
, CC_OP_EFLAGS
);
7625 /* TF handling for the sysret insn is different. The TF bit is
7626 checked after the sysret insn completes. This allows #DB to be
7627 generated "as if" the syscall insn in userspace has just
7629 gen_eob_worker(s
, false, true);
7633 case 0x1a2: /* cpuid */
7634 gen_update_cc_op(s
);
7635 gen_update_eip_cur(s
);
7636 gen_helper_cpuid(cpu_env
);
7638 case 0xf4: /* hlt */
7639 if (check_cpl0(s
)) {
7640 gen_update_cc_op(s
);
7641 gen_update_eip_cur(s
);
7642 gen_helper_hlt(cpu_env
, cur_insn_len_i32(s
));
7643 s
->base
.is_jmp
= DISAS_NORETURN
;
7647 modrm
= x86_ldub_code(env
, s
);
7648 mod
= (modrm
>> 6) & 3;
7649 op
= (modrm
>> 3) & 7;
7652 if (!PE(s
) || VM86(s
))
7654 if (s
->flags
& HF_UMIP_MASK
&& !check_cpl0(s
)) {
7657 gen_svm_check_intercept(s
, SVM_EXIT_LDTR_READ
);
7658 tcg_gen_ld32u_tl(s
->T0
, cpu_env
,
7659 offsetof(CPUX86State
, ldt
.selector
));
7660 ot
= mod
== 3 ? dflag
: MO_16
;
7661 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
7664 if (!PE(s
) || VM86(s
))
7666 if (check_cpl0(s
)) {
7667 gen_svm_check_intercept(s
, SVM_EXIT_LDTR_WRITE
);
7668 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
7669 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
7670 gen_helper_lldt(cpu_env
, s
->tmp2_i32
);
7674 if (!PE(s
) || VM86(s
))
7676 if (s
->flags
& HF_UMIP_MASK
&& !check_cpl0(s
)) {
7679 gen_svm_check_intercept(s
, SVM_EXIT_TR_READ
);
7680 tcg_gen_ld32u_tl(s
->T0
, cpu_env
,
7681 offsetof(CPUX86State
, tr
.selector
));
7682 ot
= mod
== 3 ? dflag
: MO_16
;
7683 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
7686 if (!PE(s
) || VM86(s
))
7688 if (check_cpl0(s
)) {
7689 gen_svm_check_intercept(s
, SVM_EXIT_TR_WRITE
);
7690 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
7691 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
7692 gen_helper_ltr(cpu_env
, s
->tmp2_i32
);
7697 if (!PE(s
) || VM86(s
))
7699 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
7700 gen_update_cc_op(s
);
7702 gen_helper_verr(cpu_env
, s
->T0
);
7704 gen_helper_verw(cpu_env
, s
->T0
);
7706 set_cc_op(s
, CC_OP_EFLAGS
);
7714 modrm
= x86_ldub_code(env
, s
);
7716 CASE_MODRM_MEM_OP(0): /* sgdt */
7717 if (s
->flags
& HF_UMIP_MASK
&& !check_cpl0(s
)) {
7720 gen_svm_check_intercept(s
, SVM_EXIT_GDTR_READ
);
7721 gen_lea_modrm(env
, s
, modrm
);
7722 tcg_gen_ld32u_tl(s
->T0
,
7723 cpu_env
, offsetof(CPUX86State
, gdt
.limit
));
7724 gen_op_st_v(s
, MO_16
, s
->T0
, s
->A0
);
7725 gen_add_A0_im(s
, 2);
7726 tcg_gen_ld_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
, gdt
.base
));
7727 if (dflag
== MO_16
) {
7728 tcg_gen_andi_tl(s
->T0
, s
->T0
, 0xffffff);
7730 gen_op_st_v(s
, CODE64(s
) + MO_32
, s
->T0
, s
->A0
);
7733 case 0xc8: /* monitor */
7734 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) || CPL(s
) != 0) {
7737 gen_update_cc_op(s
);
7738 gen_update_eip_cur(s
);
7739 tcg_gen_mov_tl(s
->A0
, cpu_regs
[R_EAX
]);
7740 gen_extu(s
->aflag
, s
->A0
);
7741 gen_add_A0_ds_seg(s
);
7742 gen_helper_monitor(cpu_env
, s
->A0
);
7745 case 0xc9: /* mwait */
7746 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) || CPL(s
) != 0) {
7749 gen_update_cc_op(s
);
7750 gen_update_eip_cur(s
);
7751 gen_helper_mwait(cpu_env
, cur_insn_len_i32(s
));
7752 s
->base
.is_jmp
= DISAS_NORETURN
;
7755 case 0xca: /* clac */
7756 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_SMAP
)
7760 gen_helper_clac(cpu_env
);
7761 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
7764 case 0xcb: /* stac */
7765 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_SMAP
)
7769 gen_helper_stac(cpu_env
);
7770 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
7773 CASE_MODRM_MEM_OP(1): /* sidt */
7774 if (s
->flags
& HF_UMIP_MASK
&& !check_cpl0(s
)) {
7777 gen_svm_check_intercept(s
, SVM_EXIT_IDTR_READ
);
7778 gen_lea_modrm(env
, s
, modrm
);
7779 tcg_gen_ld32u_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
, idt
.limit
));
7780 gen_op_st_v(s
, MO_16
, s
->T0
, s
->A0
);
7781 gen_add_A0_im(s
, 2);
7782 tcg_gen_ld_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
, idt
.base
));
7783 if (dflag
== MO_16
) {
7784 tcg_gen_andi_tl(s
->T0
, s
->T0
, 0xffffff);
7786 gen_op_st_v(s
, CODE64(s
) + MO_32
, s
->T0
, s
->A0
);
7789 case 0xd0: /* xgetbv */
7790 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
7791 || (s
->prefix
& (PREFIX_LOCK
| PREFIX_DATA
7792 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
7795 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_ECX
]);
7796 gen_helper_xgetbv(s
->tmp1_i64
, cpu_env
, s
->tmp2_i32
);
7797 tcg_gen_extr_i64_tl(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
], s
->tmp1_i64
);
7800 case 0xd1: /* xsetbv */
7801 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
7802 || (s
->prefix
& (PREFIX_LOCK
| PREFIX_DATA
7803 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
7806 if (!check_cpl0(s
)) {
7809 tcg_gen_concat_tl_i64(s
->tmp1_i64
, cpu_regs
[R_EAX
],
7811 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_ECX
]);
7812 gen_helper_xsetbv(cpu_env
, s
->tmp2_i32
, s
->tmp1_i64
);
7813 /* End TB because translation flags may change. */
7814 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
7817 case 0xd8: /* VMRUN */
7818 if (!SVME(s
) || !PE(s
)) {
7821 if (!check_cpl0(s
)) {
7824 gen_update_cc_op(s
);
7825 gen_update_eip_cur(s
);
7826 gen_helper_vmrun(cpu_env
, tcg_const_i32(s
->aflag
- 1),
7827 cur_insn_len_i32(s
));
7828 tcg_gen_exit_tb(NULL
, 0);
7829 s
->base
.is_jmp
= DISAS_NORETURN
;
7832 case 0xd9: /* VMMCALL */
7836 gen_update_cc_op(s
);
7837 gen_update_eip_cur(s
);
7838 gen_helper_vmmcall(cpu_env
);
7841 case 0xda: /* VMLOAD */
7842 if (!SVME(s
) || !PE(s
)) {
7845 if (!check_cpl0(s
)) {
7848 gen_update_cc_op(s
);
7849 gen_update_eip_cur(s
);
7850 gen_helper_vmload(cpu_env
, tcg_const_i32(s
->aflag
- 1));
7853 case 0xdb: /* VMSAVE */
7854 if (!SVME(s
) || !PE(s
)) {
7857 if (!check_cpl0(s
)) {
7860 gen_update_cc_op(s
);
7861 gen_update_eip_cur(s
);
7862 gen_helper_vmsave(cpu_env
, tcg_const_i32(s
->aflag
- 1));
7865 case 0xdc: /* STGI */
7866 if ((!SVME(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
))
7870 if (!check_cpl0(s
)) {
7873 gen_update_cc_op(s
);
7874 gen_helper_stgi(cpu_env
);
7875 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
7878 case 0xdd: /* CLGI */
7879 if (!SVME(s
) || !PE(s
)) {
7882 if (!check_cpl0(s
)) {
7885 gen_update_cc_op(s
);
7886 gen_update_eip_cur(s
);
7887 gen_helper_clgi(cpu_env
);
7890 case 0xde: /* SKINIT */
7891 if ((!SVME(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
))
7895 gen_svm_check_intercept(s
, SVM_EXIT_SKINIT
);
7896 /* If not intercepted, not implemented -- raise #UD. */
7899 case 0xdf: /* INVLPGA */
7900 if (!SVME(s
) || !PE(s
)) {
7903 if (!check_cpl0(s
)) {
7906 gen_svm_check_intercept(s
, SVM_EXIT_INVLPGA
);
7907 if (s
->aflag
== MO_64
) {
7908 tcg_gen_mov_tl(s
->A0
, cpu_regs
[R_EAX
]);
7910 tcg_gen_ext32u_tl(s
->A0
, cpu_regs
[R_EAX
]);
7912 gen_helper_flush_page(cpu_env
, s
->A0
);
7913 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
7916 CASE_MODRM_MEM_OP(2): /* lgdt */
7917 if (!check_cpl0(s
)) {
7920 gen_svm_check_intercept(s
, SVM_EXIT_GDTR_WRITE
);
7921 gen_lea_modrm(env
, s
, modrm
);
7922 gen_op_ld_v(s
, MO_16
, s
->T1
, s
->A0
);
7923 gen_add_A0_im(s
, 2);
7924 gen_op_ld_v(s
, CODE64(s
) + MO_32
, s
->T0
, s
->A0
);
7925 if (dflag
== MO_16
) {
7926 tcg_gen_andi_tl(s
->T0
, s
->T0
, 0xffffff);
7928 tcg_gen_st_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
, gdt
.base
));
7929 tcg_gen_st32_tl(s
->T1
, cpu_env
, offsetof(CPUX86State
, gdt
.limit
));
7932 CASE_MODRM_MEM_OP(3): /* lidt */
7933 if (!check_cpl0(s
)) {
7936 gen_svm_check_intercept(s
, SVM_EXIT_IDTR_WRITE
);
7937 gen_lea_modrm(env
, s
, modrm
);
7938 gen_op_ld_v(s
, MO_16
, s
->T1
, s
->A0
);
7939 gen_add_A0_im(s
, 2);
7940 gen_op_ld_v(s
, CODE64(s
) + MO_32
, s
->T0
, s
->A0
);
7941 if (dflag
== MO_16
) {
7942 tcg_gen_andi_tl(s
->T0
, s
->T0
, 0xffffff);
7944 tcg_gen_st_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
, idt
.base
));
7945 tcg_gen_st32_tl(s
->T1
, cpu_env
, offsetof(CPUX86State
, idt
.limit
));
7948 CASE_MODRM_OP(4): /* smsw */
7949 if (s
->flags
& HF_UMIP_MASK
&& !check_cpl0(s
)) {
7952 gen_svm_check_intercept(s
, SVM_EXIT_READ_CR0
);
7953 tcg_gen_ld_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
, cr
[0]));
7955 * In 32-bit mode, the higher 16 bits of the destination
7956 * register are undefined. In practice CR0[31:0] is stored
7957 * just like in 64-bit mode.
7959 mod
= (modrm
>> 6) & 3;
7960 ot
= (mod
!= 3 ? MO_16
: s
->dflag
);
7961 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 1);
7963 case 0xee: /* rdpkru */
7964 if (prefixes
& PREFIX_LOCK
) {
7967 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_ECX
]);
7968 gen_helper_rdpkru(s
->tmp1_i64
, cpu_env
, s
->tmp2_i32
);
7969 tcg_gen_extr_i64_tl(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
], s
->tmp1_i64
);
7971 case 0xef: /* wrpkru */
7972 if (prefixes
& PREFIX_LOCK
) {
7975 tcg_gen_concat_tl_i64(s
->tmp1_i64
, cpu_regs
[R_EAX
],
7977 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_ECX
]);
7978 gen_helper_wrpkru(cpu_env
, s
->tmp2_i32
, s
->tmp1_i64
);
7981 CASE_MODRM_OP(6): /* lmsw */
7982 if (!check_cpl0(s
)) {
7985 gen_svm_check_intercept(s
, SVM_EXIT_WRITE_CR0
);
7986 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
7988 * Only the 4 lower bits of CR0 are modified.
7989 * PE cannot be set to zero if already set to one.
7991 tcg_gen_ld_tl(s
->T1
, cpu_env
, offsetof(CPUX86State
, cr
[0]));
7992 tcg_gen_andi_tl(s
->T0
, s
->T0
, 0xf);
7993 tcg_gen_andi_tl(s
->T1
, s
->T1
, ~0xe);
7994 tcg_gen_or_tl(s
->T0
, s
->T0
, s
->T1
);
7995 gen_helper_write_crN(cpu_env
, tcg_constant_i32(0), s
->T0
);
7996 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
7999 CASE_MODRM_MEM_OP(7): /* invlpg */
8000 if (!check_cpl0(s
)) {
8003 gen_svm_check_intercept(s
, SVM_EXIT_INVLPG
);
8004 gen_lea_modrm(env
, s
, modrm
);
8005 gen_helper_flush_page(cpu_env
, s
->A0
);
8006 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
8009 case 0xf8: /* swapgs */
8010 #ifdef TARGET_X86_64
8012 if (check_cpl0(s
)) {
8013 tcg_gen_mov_tl(s
->T0
, cpu_seg_base
[R_GS
]);
8014 tcg_gen_ld_tl(cpu_seg_base
[R_GS
], cpu_env
,
8015 offsetof(CPUX86State
, kernelgsbase
));
8016 tcg_gen_st_tl(s
->T0
, cpu_env
,
8017 offsetof(CPUX86State
, kernelgsbase
));
8024 case 0xf9: /* rdtscp */
8025 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_RDTSCP
)) {
8028 gen_update_cc_op(s
);
8029 gen_update_eip_cur(s
);
8030 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
8032 s
->base
.is_jmp
= DISAS_TOO_MANY
;
8034 gen_helper_rdtscp(cpu_env
);
8042 case 0x108: /* invd */
8043 case 0x109: /* wbinvd */
8044 if (check_cpl0(s
)) {
8045 gen_svm_check_intercept(s
, (b
& 2) ? SVM_EXIT_INVD
: SVM_EXIT_WBINVD
);
8049 case 0x63: /* arpl or movslS (x86_64) */
8050 #ifdef TARGET_X86_64
8053 /* d_ot is the size of destination */
8056 modrm
= x86_ldub_code(env
, s
);
8057 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
8058 mod
= (modrm
>> 6) & 3;
8059 rm
= (modrm
& 7) | REX_B(s
);
8062 gen_op_mov_v_reg(s
, MO_32
, s
->T0
, rm
);
8064 if (d_ot
== MO_64
) {
8065 tcg_gen_ext32s_tl(s
->T0
, s
->T0
);
8067 gen_op_mov_reg_v(s
, d_ot
, reg
, s
->T0
);
8069 gen_lea_modrm(env
, s
, modrm
);
8070 gen_op_ld_v(s
, MO_32
| MO_SIGN
, s
->T0
, s
->A0
);
8071 gen_op_mov_reg_v(s
, d_ot
, reg
, s
->T0
);
8077 TCGv t0
, t1
, t2
, a0
;
8079 if (!PE(s
) || VM86(s
))
8081 t0
= tcg_temp_local_new();
8082 t1
= tcg_temp_local_new();
8083 t2
= tcg_temp_local_new();
8085 modrm
= x86_ldub_code(env
, s
);
8086 reg
= (modrm
>> 3) & 7;
8087 mod
= (modrm
>> 6) & 3;
8090 gen_lea_modrm(env
, s
, modrm
);
8091 gen_op_ld_v(s
, ot
, t0
, s
->A0
);
8092 a0
= tcg_temp_local_new();
8093 tcg_gen_mov_tl(a0
, s
->A0
);
8095 gen_op_mov_v_reg(s
, ot
, t0
, rm
);
8098 gen_op_mov_v_reg(s
, ot
, t1
, reg
);
8099 tcg_gen_andi_tl(s
->tmp0
, t0
, 3);
8100 tcg_gen_andi_tl(t1
, t1
, 3);
8101 tcg_gen_movi_tl(t2
, 0);
8102 label1
= gen_new_label();
8103 tcg_gen_brcond_tl(TCG_COND_GE
, s
->tmp0
, t1
, label1
);
8104 tcg_gen_andi_tl(t0
, t0
, ~3);
8105 tcg_gen_or_tl(t0
, t0
, t1
);
8106 tcg_gen_movi_tl(t2
, CC_Z
);
8107 gen_set_label(label1
);
8109 gen_op_st_v(s
, ot
, t0
, a0
);
8112 gen_op_mov_reg_v(s
, ot
, rm
, t0
);
8114 gen_compute_eflags(s
);
8115 tcg_gen_andi_tl(cpu_cc_src
, cpu_cc_src
, ~CC_Z
);
8116 tcg_gen_or_tl(cpu_cc_src
, cpu_cc_src
, t2
);
8122 case 0x102: /* lar */
8123 case 0x103: /* lsl */
8127 if (!PE(s
) || VM86(s
))
8129 ot
= dflag
!= MO_16
? MO_32
: MO_16
;
8130 modrm
= x86_ldub_code(env
, s
);
8131 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
8132 gen_ldst_modrm(env
, s
, modrm
, MO_16
, OR_TMP0
, 0);
8133 t0
= tcg_temp_local_new();
8134 gen_update_cc_op(s
);
8136 gen_helper_lar(t0
, cpu_env
, s
->T0
);
8138 gen_helper_lsl(t0
, cpu_env
, s
->T0
);
8140 tcg_gen_andi_tl(s
->tmp0
, cpu_cc_src
, CC_Z
);
8141 label1
= gen_new_label();
8142 tcg_gen_brcondi_tl(TCG_COND_EQ
, s
->tmp0
, 0, label1
);
8143 gen_op_mov_reg_v(s
, ot
, reg
, t0
);
8144 gen_set_label(label1
);
8145 set_cc_op(s
, CC_OP_EFLAGS
);
8150 modrm
= x86_ldub_code(env
, s
);
8151 mod
= (modrm
>> 6) & 3;
8152 op
= (modrm
>> 3) & 7;
8154 case 0: /* prefetchnta */
8155 case 1: /* prefetchnt0 */
8156 case 2: /* prefetchnt0 */
8157 case 3: /* prefetchnt0 */
8160 gen_nop_modrm(env
, s
, modrm
);
8161 /* nothing more to do */
8163 default: /* nop (multi byte) */
8164 gen_nop_modrm(env
, s
, modrm
);
8169 modrm
= x86_ldub_code(env
, s
);
8170 if (s
->flags
& HF_MPX_EN_MASK
) {
8171 mod
= (modrm
>> 6) & 3;
8172 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
8173 if (prefixes
& PREFIX_REPZ
) {
8176 || (prefixes
& PREFIX_LOCK
)
8177 || s
->aflag
== MO_16
) {
8180 gen_bndck(env
, s
, modrm
, TCG_COND_LTU
, cpu_bndl
[reg
]);
8181 } else if (prefixes
& PREFIX_REPNZ
) {
8184 || (prefixes
& PREFIX_LOCK
)
8185 || s
->aflag
== MO_16
) {
8188 TCGv_i64 notu
= tcg_temp_new_i64();
8189 tcg_gen_not_i64(notu
, cpu_bndu
[reg
]);
8190 gen_bndck(env
, s
, modrm
, TCG_COND_GTU
, notu
);
8191 tcg_temp_free_i64(notu
);
8192 } else if (prefixes
& PREFIX_DATA
) {
8193 /* bndmov -- from reg/mem */
8194 if (reg
>= 4 || s
->aflag
== MO_16
) {
8198 int reg2
= (modrm
& 7) | REX_B(s
);
8199 if (reg2
>= 4 || (prefixes
& PREFIX_LOCK
)) {
8202 if (s
->flags
& HF_MPX_IU_MASK
) {
8203 tcg_gen_mov_i64(cpu_bndl
[reg
], cpu_bndl
[reg2
]);
8204 tcg_gen_mov_i64(cpu_bndu
[reg
], cpu_bndu
[reg2
]);
8207 gen_lea_modrm(env
, s
, modrm
);
8209 tcg_gen_qemu_ld_i64(cpu_bndl
[reg
], s
->A0
,
8210 s
->mem_index
, MO_LEUQ
);
8211 tcg_gen_addi_tl(s
->A0
, s
->A0
, 8);
8212 tcg_gen_qemu_ld_i64(cpu_bndu
[reg
], s
->A0
,
8213 s
->mem_index
, MO_LEUQ
);
8215 tcg_gen_qemu_ld_i64(cpu_bndl
[reg
], s
->A0
,
8216 s
->mem_index
, MO_LEUL
);
8217 tcg_gen_addi_tl(s
->A0
, s
->A0
, 4);
8218 tcg_gen_qemu_ld_i64(cpu_bndu
[reg
], s
->A0
,
8219 s
->mem_index
, MO_LEUL
);
8221 /* bnd registers are now in-use */
8222 gen_set_hflag(s
, HF_MPX_IU_MASK
);
8224 } else if (mod
!= 3) {
8226 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
8228 || (prefixes
& PREFIX_LOCK
)
8229 || s
->aflag
== MO_16
8234 tcg_gen_addi_tl(s
->A0
, cpu_regs
[a
.base
], a
.disp
);
8236 tcg_gen_movi_tl(s
->A0
, 0);
8238 gen_lea_v_seg(s
, s
->aflag
, s
->A0
, a
.def_seg
, s
->override
);
8240 tcg_gen_mov_tl(s
->T0
, cpu_regs
[a
.index
]);
8242 tcg_gen_movi_tl(s
->T0
, 0);
8245 gen_helper_bndldx64(cpu_bndl
[reg
], cpu_env
, s
->A0
, s
->T0
);
8246 tcg_gen_ld_i64(cpu_bndu
[reg
], cpu_env
,
8247 offsetof(CPUX86State
, mmx_t0
.MMX_Q(0)));
8249 gen_helper_bndldx32(cpu_bndu
[reg
], cpu_env
, s
->A0
, s
->T0
);
8250 tcg_gen_ext32u_i64(cpu_bndl
[reg
], cpu_bndu
[reg
]);
8251 tcg_gen_shri_i64(cpu_bndu
[reg
], cpu_bndu
[reg
], 32);
8253 gen_set_hflag(s
, HF_MPX_IU_MASK
);
8256 gen_nop_modrm(env
, s
, modrm
);
8259 modrm
= x86_ldub_code(env
, s
);
8260 if (s
->flags
& HF_MPX_EN_MASK
) {
8261 mod
= (modrm
>> 6) & 3;
8262 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
8263 if (mod
!= 3 && (prefixes
& PREFIX_REPZ
)) {
8266 || (prefixes
& PREFIX_LOCK
)
8267 || s
->aflag
== MO_16
) {
8270 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
8272 tcg_gen_extu_tl_i64(cpu_bndl
[reg
], cpu_regs
[a
.base
]);
8274 tcg_gen_ext32u_i64(cpu_bndl
[reg
], cpu_bndl
[reg
]);
8276 } else if (a
.base
== -1) {
8277 /* no base register has lower bound of 0 */
8278 tcg_gen_movi_i64(cpu_bndl
[reg
], 0);
8280 /* rip-relative generates #ud */
8283 tcg_gen_not_tl(s
->A0
, gen_lea_modrm_1(s
, a
, false));
8285 tcg_gen_ext32u_tl(s
->A0
, s
->A0
);
8287 tcg_gen_extu_tl_i64(cpu_bndu
[reg
], s
->A0
);
8288 /* bnd registers are now in-use */
8289 gen_set_hflag(s
, HF_MPX_IU_MASK
);
8291 } else if (prefixes
& PREFIX_REPNZ
) {
8294 || (prefixes
& PREFIX_LOCK
)
8295 || s
->aflag
== MO_16
) {
8298 gen_bndck(env
, s
, modrm
, TCG_COND_GTU
, cpu_bndu
[reg
]);
8299 } else if (prefixes
& PREFIX_DATA
) {
8300 /* bndmov -- to reg/mem */
8301 if (reg
>= 4 || s
->aflag
== MO_16
) {
8305 int reg2
= (modrm
& 7) | REX_B(s
);
8306 if (reg2
>= 4 || (prefixes
& PREFIX_LOCK
)) {
8309 if (s
->flags
& HF_MPX_IU_MASK
) {
8310 tcg_gen_mov_i64(cpu_bndl
[reg2
], cpu_bndl
[reg
]);
8311 tcg_gen_mov_i64(cpu_bndu
[reg2
], cpu_bndu
[reg
]);
8314 gen_lea_modrm(env
, s
, modrm
);
8316 tcg_gen_qemu_st_i64(cpu_bndl
[reg
], s
->A0
,
8317 s
->mem_index
, MO_LEUQ
);
8318 tcg_gen_addi_tl(s
->A0
, s
->A0
, 8);
8319 tcg_gen_qemu_st_i64(cpu_bndu
[reg
], s
->A0
,
8320 s
->mem_index
, MO_LEUQ
);
8322 tcg_gen_qemu_st_i64(cpu_bndl
[reg
], s
->A0
,
8323 s
->mem_index
, MO_LEUL
);
8324 tcg_gen_addi_tl(s
->A0
, s
->A0
, 4);
8325 tcg_gen_qemu_st_i64(cpu_bndu
[reg
], s
->A0
,
8326 s
->mem_index
, MO_LEUL
);
8329 } else if (mod
!= 3) {
8331 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
);
8333 || (prefixes
& PREFIX_LOCK
)
8334 || s
->aflag
== MO_16
8339 tcg_gen_addi_tl(s
->A0
, cpu_regs
[a
.base
], a
.disp
);
8341 tcg_gen_movi_tl(s
->A0
, 0);
8343 gen_lea_v_seg(s
, s
->aflag
, s
->A0
, a
.def_seg
, s
->override
);
8345 tcg_gen_mov_tl(s
->T0
, cpu_regs
[a
.index
]);
8347 tcg_gen_movi_tl(s
->T0
, 0);
8350 gen_helper_bndstx64(cpu_env
, s
->A0
, s
->T0
,
8351 cpu_bndl
[reg
], cpu_bndu
[reg
]);
8353 gen_helper_bndstx32(cpu_env
, s
->A0
, s
->T0
,
8354 cpu_bndl
[reg
], cpu_bndu
[reg
]);
8358 gen_nop_modrm(env
, s
, modrm
);
8360 case 0x119: case 0x11c ... 0x11f: /* nop (multi byte) */
8361 modrm
= x86_ldub_code(env
, s
);
8362 gen_nop_modrm(env
, s
, modrm
);
8365 case 0x120: /* mov reg, crN */
8366 case 0x122: /* mov crN, reg */
8367 if (!check_cpl0(s
)) {
8370 modrm
= x86_ldub_code(env
, s
);
8372 * Ignore the mod bits (assume (modrm&0xc0)==0xc0).
8373 * AMD documentation (24594.pdf) and testing of Intel 386 and 486
8374 * processors all show that the mod bits are assumed to be 1's,
8375 * regardless of actual values.
8377 rm
= (modrm
& 7) | REX_B(s
);
8378 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
8381 if ((prefixes
& PREFIX_LOCK
) &&
8382 (s
->cpuid_ext3_features
& CPUID_EXT3_CR8LEG
)) {
8394 ot
= (CODE64(s
) ? MO_64
: MO_32
);
8396 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
8398 s
->base
.is_jmp
= DISAS_TOO_MANY
;
8401 gen_svm_check_intercept(s
, SVM_EXIT_WRITE_CR0
+ reg
);
8402 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
8403 gen_helper_write_crN(cpu_env
, tcg_constant_i32(reg
), s
->T0
);
8404 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
8406 gen_svm_check_intercept(s
, SVM_EXIT_READ_CR0
+ reg
);
8407 gen_helper_read_crN(s
->T0
, cpu_env
, tcg_constant_i32(reg
));
8408 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
8412 case 0x121: /* mov reg, drN */
8413 case 0x123: /* mov drN, reg */
8414 if (check_cpl0(s
)) {
8415 modrm
= x86_ldub_code(env
, s
);
8416 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
8417 * AMD documentation (24594.pdf) and testing of
8418 * intel 386 and 486 processors all show that the mod bits
8419 * are assumed to be 1's, regardless of actual values.
8421 rm
= (modrm
& 7) | REX_B(s
);
8422 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
8431 gen_svm_check_intercept(s
, SVM_EXIT_WRITE_DR0
+ reg
);
8432 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
8433 tcg_gen_movi_i32(s
->tmp2_i32
, reg
);
8434 gen_helper_set_dr(cpu_env
, s
->tmp2_i32
, s
->T0
);
8435 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
8437 gen_svm_check_intercept(s
, SVM_EXIT_READ_DR0
+ reg
);
8438 tcg_gen_movi_i32(s
->tmp2_i32
, reg
);
8439 gen_helper_get_dr(s
->T0
, cpu_env
, s
->tmp2_i32
);
8440 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
8444 case 0x106: /* clts */
8445 if (check_cpl0(s
)) {
8446 gen_svm_check_intercept(s
, SVM_EXIT_WRITE_CR0
);
8447 gen_helper_clts(cpu_env
);
8448 /* abort block because static cpu state changed */
8449 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
8452 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
8453 case 0x1c3: /* MOVNTI reg, mem */
8454 if (!(s
->cpuid_features
& CPUID_SSE2
))
8456 ot
= mo_64_32(dflag
);
8457 modrm
= x86_ldub_code(env
, s
);
8458 mod
= (modrm
>> 6) & 3;
8461 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
8462 /* generate a generic store */
8463 gen_ldst_modrm(env
, s
, modrm
, ot
, reg
, 1);
8466 modrm
= x86_ldub_code(env
, s
);
8468 CASE_MODRM_MEM_OP(0): /* fxsave */
8469 if (!(s
->cpuid_features
& CPUID_FXSR
)
8470 || (prefixes
& PREFIX_LOCK
)) {
8473 if ((s
->flags
& HF_EM_MASK
) || (s
->flags
& HF_TS_MASK
)) {
8474 gen_exception(s
, EXCP07_PREX
);
8477 gen_lea_modrm(env
, s
, modrm
);
8478 gen_helper_fxsave(cpu_env
, s
->A0
);
8481 CASE_MODRM_MEM_OP(1): /* fxrstor */
8482 if (!(s
->cpuid_features
& CPUID_FXSR
)
8483 || (prefixes
& PREFIX_LOCK
)) {
8486 if ((s
->flags
& HF_EM_MASK
) || (s
->flags
& HF_TS_MASK
)) {
8487 gen_exception(s
, EXCP07_PREX
);
8490 gen_lea_modrm(env
, s
, modrm
);
8491 gen_helper_fxrstor(cpu_env
, s
->A0
);
8494 CASE_MODRM_MEM_OP(2): /* ldmxcsr */
8495 if ((s
->flags
& HF_EM_MASK
) || !(s
->flags
& HF_OSFXSR_MASK
)) {
8498 if (s
->flags
& HF_TS_MASK
) {
8499 gen_exception(s
, EXCP07_PREX
);
8502 gen_lea_modrm(env
, s
, modrm
);
8503 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
, s
->mem_index
, MO_LEUL
);
8504 gen_helper_ldmxcsr(cpu_env
, s
->tmp2_i32
);
8507 CASE_MODRM_MEM_OP(3): /* stmxcsr */
8508 if ((s
->flags
& HF_EM_MASK
) || !(s
->flags
& HF_OSFXSR_MASK
)) {
8511 if (s
->flags
& HF_TS_MASK
) {
8512 gen_exception(s
, EXCP07_PREX
);
8515 gen_helper_update_mxcsr(cpu_env
);
8516 gen_lea_modrm(env
, s
, modrm
);
8517 tcg_gen_ld32u_tl(s
->T0
, cpu_env
, offsetof(CPUX86State
, mxcsr
));
8518 gen_op_st_v(s
, MO_32
, s
->T0
, s
->A0
);
8521 CASE_MODRM_MEM_OP(4): /* xsave */
8522 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
8523 || (prefixes
& (PREFIX_LOCK
| PREFIX_DATA
8524 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
8527 gen_lea_modrm(env
, s
, modrm
);
8528 tcg_gen_concat_tl_i64(s
->tmp1_i64
, cpu_regs
[R_EAX
],
8530 gen_helper_xsave(cpu_env
, s
->A0
, s
->tmp1_i64
);
8533 CASE_MODRM_MEM_OP(5): /* xrstor */
8534 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
8535 || (prefixes
& (PREFIX_LOCK
| PREFIX_DATA
8536 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
8539 gen_lea_modrm(env
, s
, modrm
);
8540 tcg_gen_concat_tl_i64(s
->tmp1_i64
, cpu_regs
[R_EAX
],
8542 gen_helper_xrstor(cpu_env
, s
->A0
, s
->tmp1_i64
);
8543 /* XRSTOR is how MPX is enabled, which changes how
8544 we translate. Thus we need to end the TB. */
8545 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
8548 CASE_MODRM_MEM_OP(6): /* xsaveopt / clwb */
8549 if (prefixes
& PREFIX_LOCK
) {
8552 if (prefixes
& PREFIX_DATA
) {
8554 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_CLWB
)) {
8557 gen_nop_modrm(env
, s
, modrm
);
8560 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
8561 || (s
->cpuid_xsave_features
& CPUID_XSAVE_XSAVEOPT
) == 0
8562 || (prefixes
& (PREFIX_REPZ
| PREFIX_REPNZ
))) {
8565 gen_lea_modrm(env
, s
, modrm
);
8566 tcg_gen_concat_tl_i64(s
->tmp1_i64
, cpu_regs
[R_EAX
],
8568 gen_helper_xsaveopt(cpu_env
, s
->A0
, s
->tmp1_i64
);
8572 CASE_MODRM_MEM_OP(7): /* clflush / clflushopt */
8573 if (prefixes
& PREFIX_LOCK
) {
8576 if (prefixes
& PREFIX_DATA
) {
8578 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_CLFLUSHOPT
)) {
8583 if ((s
->prefix
& (PREFIX_REPZ
| PREFIX_REPNZ
))
8584 || !(s
->cpuid_features
& CPUID_CLFLUSH
)) {
8588 gen_nop_modrm(env
, s
, modrm
);
8591 case 0xc0 ... 0xc7: /* rdfsbase (f3 0f ae /0) */
8592 case 0xc8 ... 0xcf: /* rdgsbase (f3 0f ae /1) */
8593 case 0xd0 ... 0xd7: /* wrfsbase (f3 0f ae /2) */
8594 case 0xd8 ... 0xdf: /* wrgsbase (f3 0f ae /3) */
8596 && (prefixes
& PREFIX_REPZ
)
8597 && !(prefixes
& PREFIX_LOCK
)
8598 && (s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_FSGSBASE
)) {
8599 TCGv base
, treg
, src
, dst
;
8601 /* Preserve hflags bits by testing CR4 at runtime. */
8602 tcg_gen_movi_i32(s
->tmp2_i32
, CR4_FSGSBASE_MASK
);
8603 gen_helper_cr4_testbit(cpu_env
, s
->tmp2_i32
);
8605 base
= cpu_seg_base
[modrm
& 8 ? R_GS
: R_FS
];
8606 treg
= cpu_regs
[(modrm
& 7) | REX_B(s
)];
8610 dst
= base
, src
= treg
;
8613 dst
= treg
, src
= base
;
8616 if (s
->dflag
== MO_32
) {
8617 tcg_gen_ext32u_tl(dst
, src
);
8619 tcg_gen_mov_tl(dst
, src
);
8625 case 0xf8: /* sfence / pcommit */
8626 if (prefixes
& PREFIX_DATA
) {
8628 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_PCOMMIT
)
8629 || (prefixes
& PREFIX_LOCK
)) {
8635 case 0xf9 ... 0xff: /* sfence */
8636 if (!(s
->cpuid_features
& CPUID_SSE
)
8637 || (prefixes
& PREFIX_LOCK
)) {
8640 tcg_gen_mb(TCG_MO_ST_ST
| TCG_BAR_SC
);
8642 case 0xe8 ... 0xef: /* lfence */
8643 if (!(s
->cpuid_features
& CPUID_SSE
)
8644 || (prefixes
& PREFIX_LOCK
)) {
8647 tcg_gen_mb(TCG_MO_LD_LD
| TCG_BAR_SC
);
8649 case 0xf0 ... 0xf7: /* mfence */
8650 if (!(s
->cpuid_features
& CPUID_SSE2
)
8651 || (prefixes
& PREFIX_LOCK
)) {
8654 tcg_gen_mb(TCG_MO_ALL
| TCG_BAR_SC
);
8662 case 0x10d: /* 3DNow! prefetch(w) */
8663 modrm
= x86_ldub_code(env
, s
);
8664 mod
= (modrm
>> 6) & 3;
8667 gen_nop_modrm(env
, s
, modrm
);
8669 case 0x1aa: /* rsm */
8670 gen_svm_check_intercept(s
, SVM_EXIT_RSM
);
8671 if (!(s
->flags
& HF_SMM_MASK
))
8673 #ifdef CONFIG_USER_ONLY
8674 /* we should not be in SMM mode */
8675 g_assert_not_reached();
8677 gen_update_cc_op(s
);
8678 gen_update_eip_next(s
);
8679 gen_helper_rsm(cpu_env
);
8680 #endif /* CONFIG_USER_ONLY */
8681 s
->base
.is_jmp
= DISAS_EOB_ONLY
;
8683 case 0x1b8: /* SSE4.2 popcnt */
8684 if ((prefixes
& (PREFIX_REPZ
| PREFIX_LOCK
| PREFIX_REPNZ
)) !=
8687 if (!(s
->cpuid_ext_features
& CPUID_EXT_POPCNT
))
8690 modrm
= x86_ldub_code(env
, s
);
8691 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
8693 if (s
->prefix
& PREFIX_DATA
) {
8696 ot
= mo_64_32(dflag
);
8699 gen_ldst_modrm(env
, s
, modrm
, ot
, OR_TMP0
, 0);
8700 gen_extu(ot
, s
->T0
);
8701 tcg_gen_mov_tl(cpu_cc_src
, s
->T0
);
8702 tcg_gen_ctpop_tl(s
->T0
, s
->T0
);
8703 gen_op_mov_reg_v(s
, ot
, reg
, s
->T0
);
8705 set_cc_op(s
, CC_OP_POPCNT
);
8707 case 0x10e ... 0x10f:
8708 /* 3DNow! instructions, ignore prefixes */
8709 s
->prefix
&= ~(PREFIX_REPZ
| PREFIX_REPNZ
| PREFIX_DATA
);
8711 case 0x110 ... 0x117:
8712 case 0x128 ... 0x12f:
8713 case 0x138 ... 0x13a:
8714 case 0x150 ... 0x179:
8715 case 0x17c ... 0x17f:
8717 case 0x1c4 ... 0x1c6:
8718 case 0x1d0 ... 0x1fe:
8726 gen_illegal_opcode(s
);
8729 gen_unknown_opcode(env
, s
);
8733 void tcg_x86_init(void)
8735 static const char reg_names
[CPU_NB_REGS
][4] = {
8736 #ifdef TARGET_X86_64
8764 static const char eip_name
[] = {
8765 #ifdef TARGET_X86_64
8771 static const char seg_base_names
[6][8] = {
8779 static const char bnd_regl_names
[4][8] = {
8780 "bnd0_lb", "bnd1_lb", "bnd2_lb", "bnd3_lb"
8782 static const char bnd_regu_names
[4][8] = {
8783 "bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub"
8787 cpu_cc_op
= tcg_global_mem_new_i32(cpu_env
,
8788 offsetof(CPUX86State
, cc_op
), "cc_op");
8789 cpu_cc_dst
= tcg_global_mem_new(cpu_env
, offsetof(CPUX86State
, cc_dst
),
8791 cpu_cc_src
= tcg_global_mem_new(cpu_env
, offsetof(CPUX86State
, cc_src
),
8793 cpu_cc_src2
= tcg_global_mem_new(cpu_env
, offsetof(CPUX86State
, cc_src2
),
8795 cpu_eip
= tcg_global_mem_new(cpu_env
, offsetof(CPUX86State
, eip
), eip_name
);
8797 for (i
= 0; i
< CPU_NB_REGS
; ++i
) {
8798 cpu_regs
[i
] = tcg_global_mem_new(cpu_env
,
8799 offsetof(CPUX86State
, regs
[i
]),
8803 for (i
= 0; i
< 6; ++i
) {
8805 = tcg_global_mem_new(cpu_env
,
8806 offsetof(CPUX86State
, segs
[i
].base
),
8810 for (i
= 0; i
< 4; ++i
) {
8812 = tcg_global_mem_new_i64(cpu_env
,
8813 offsetof(CPUX86State
, bnd_regs
[i
].lb
),
8816 = tcg_global_mem_new_i64(cpu_env
,
8817 offsetof(CPUX86State
, bnd_regs
[i
].ub
),
8822 static void i386_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cpu
)
8824 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
8825 CPUX86State
*env
= cpu
->env_ptr
;
8826 uint32_t flags
= dc
->base
.tb
->flags
;
8827 uint32_t cflags
= tb_cflags(dc
->base
.tb
);
8828 int cpl
= (flags
>> HF_CPL_SHIFT
) & 3;
8829 int iopl
= (flags
>> IOPL_SHIFT
) & 3;
8831 dc
->cs_base
= dc
->base
.tb
->cs_base
;
8832 dc
->pc_save
= dc
->base
.pc_next
;
8834 #ifndef CONFIG_USER_ONLY
8839 /* We make some simplifying assumptions; validate they're correct. */
8840 g_assert(PE(dc
) == ((flags
& HF_PE_MASK
) != 0));
8841 g_assert(CPL(dc
) == cpl
);
8842 g_assert(IOPL(dc
) == iopl
);
8843 g_assert(VM86(dc
) == ((flags
& HF_VM_MASK
) != 0));
8844 g_assert(CODE32(dc
) == ((flags
& HF_CS32_MASK
) != 0));
8845 g_assert(CODE64(dc
) == ((flags
& HF_CS64_MASK
) != 0));
8846 g_assert(SS32(dc
) == ((flags
& HF_SS32_MASK
) != 0));
8847 g_assert(LMA(dc
) == ((flags
& HF_LMA_MASK
) != 0));
8848 g_assert(ADDSEG(dc
) == ((flags
& HF_ADDSEG_MASK
) != 0));
8849 g_assert(SVME(dc
) == ((flags
& HF_SVME_MASK
) != 0));
8850 g_assert(GUEST(dc
) == ((flags
& HF_GUEST_MASK
) != 0));
8852 dc
->cc_op
= CC_OP_DYNAMIC
;
8853 dc
->cc_op_dirty
= false;
8854 dc
->popl_esp_hack
= 0;
8855 /* select memory access functions */
8857 #ifdef CONFIG_SOFTMMU
8858 dc
->mem_index
= cpu_mmu_index(env
, false);
8860 dc
->cpuid_features
= env
->features
[FEAT_1_EDX
];
8861 dc
->cpuid_ext_features
= env
->features
[FEAT_1_ECX
];
8862 dc
->cpuid_ext2_features
= env
->features
[FEAT_8000_0001_EDX
];
8863 dc
->cpuid_ext3_features
= env
->features
[FEAT_8000_0001_ECX
];
8864 dc
->cpuid_7_0_ebx_features
= env
->features
[FEAT_7_0_EBX
];
8865 dc
->cpuid_7_0_ecx_features
= env
->features
[FEAT_7_0_ECX
];
8866 dc
->cpuid_xsave_features
= env
->features
[FEAT_XSAVE
];
8867 dc
->jmp_opt
= !((cflags
& CF_NO_GOTO_TB
) ||
8868 (flags
& (HF_TF_MASK
| HF_INHIBIT_IRQ_MASK
)));
8870 * If jmp_opt, we want to handle each string instruction individually.
8871 * For icount also disable repz optimization so that each iteration
8872 * is accounted separately.
8874 dc
->repz_opt
= !dc
->jmp_opt
&& !(cflags
& CF_USE_ICOUNT
);
8876 dc
->T0
= tcg_temp_new();
8877 dc
->T1
= tcg_temp_new();
8878 dc
->A0
= tcg_temp_new();
8880 dc
->tmp0
= tcg_temp_new();
8881 dc
->tmp1_i64
= tcg_temp_new_i64();
8882 dc
->tmp2_i32
= tcg_temp_new_i32();
8883 dc
->tmp3_i32
= tcg_temp_new_i32();
8884 dc
->tmp4
= tcg_temp_new();
8885 dc
->ptr0
= tcg_temp_new_ptr();
8886 dc
->ptr1
= tcg_temp_new_ptr();
8887 dc
->cc_srcT
= tcg_temp_local_new();
8890 static void i386_tr_tb_start(DisasContextBase
*db
, CPUState
*cpu
)
8894 static void i386_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cpu
)
8896 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
8897 target_ulong pc_arg
= dc
->base
.pc_next
;
8899 dc
->prev_insn_end
= tcg_last_op();
8900 if (TARGET_TB_PCREL
) {
8901 pc_arg
-= dc
->cs_base
;
8902 pc_arg
&= ~TARGET_PAGE_MASK
;
8904 tcg_gen_insn_start(pc_arg
, dc
->cc_op
);
8907 static void i386_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cpu
)
8909 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
8911 #ifdef TARGET_VSYSCALL_PAGE
8913 * Detect entry into the vsyscall page and invoke the syscall.
8915 if ((dc
->base
.pc_next
& TARGET_PAGE_MASK
) == TARGET_VSYSCALL_PAGE
) {
8916 gen_exception(dc
, EXCP_VSYSCALL
);
8917 dc
->base
.pc_next
= dc
->pc
+ 1;
8922 if (disas_insn(dc
, cpu
)) {
8923 target_ulong pc_next
= dc
->pc
;
8924 dc
->base
.pc_next
= pc_next
;
8926 if (dc
->base
.is_jmp
== DISAS_NEXT
) {
8927 if (dc
->flags
& (HF_TF_MASK
| HF_INHIBIT_IRQ_MASK
)) {
8929 * If single step mode, we generate only one instruction and
8930 * generate an exception.
8931 * If irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
8932 * the flag and abort the translation to give the irqs a
8935 dc
->base
.is_jmp
= DISAS_EOB_NEXT
;
8936 } else if (!is_same_page(&dc
->base
, pc_next
)) {
8937 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
8943 static void i386_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cpu
)
8945 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
8947 switch (dc
->base
.is_jmp
) {
8948 case DISAS_NORETURN
:
8950 case DISAS_TOO_MANY
:
8951 gen_update_cc_op(dc
);
8952 gen_jmp_rel_csize(dc
, 0, 0);
8954 case DISAS_EOB_NEXT
:
8955 gen_update_cc_op(dc
);
8956 gen_update_eip_cur(dc
);
8958 case DISAS_EOB_ONLY
:
8961 case DISAS_EOB_INHIBIT_IRQ
:
8962 gen_update_cc_op(dc
);
8963 gen_update_eip_cur(dc
);
8964 gen_eob_inhibit_irq(dc
, true);
8970 g_assert_not_reached();
8974 static void i386_tr_disas_log(const DisasContextBase
*dcbase
,
8975 CPUState
*cpu
, FILE *logfile
)
8977 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
8979 fprintf(logfile
, "IN: %s\n", lookup_symbol(dc
->base
.pc_first
));
8980 target_disas(logfile
, cpu
, dc
->base
.pc_first
, dc
->base
.tb
->size
);
8983 static const TranslatorOps i386_tr_ops
= {
8984 .init_disas_context
= i386_tr_init_disas_context
,
8985 .tb_start
= i386_tr_tb_start
,
8986 .insn_start
= i386_tr_insn_start
,
8987 .translate_insn
= i386_tr_translate_insn
,
8988 .tb_stop
= i386_tr_tb_stop
,
8989 .disas_log
= i386_tr_disas_log
,
8992 /* generate intermediate code for basic block 'tb'. */
8993 void gen_intermediate_code(CPUState
*cpu
, TranslationBlock
*tb
, int max_insns
,
8994 target_ulong pc
, void *host_pc
)
8998 translator_loop(cpu
, tb
, max_insns
, pc
, host_pc
, &i386_tr_ops
, &dc
.base
);
9001 void restore_state_to_opc(CPUX86State
*env
, TranslationBlock
*tb
,
9004 int cc_op
= data
[1];
9006 if (TARGET_TB_PCREL
) {
9007 env
->eip
= (env
->eip
& TARGET_PAGE_MASK
) | data
[0];
9009 env
->eip
= data
[0] - tb
->cs_base
;
9011 if (cc_op
!= CC_OP_DYNAMIC
) {