target-microblaze: Add CPU versions 9.4, 9.5 and 9.6
[qemu/ar7.git] / target / i386 / translate.c
blobed3b896db4a0d333704a0c1ffa1157409a419bd5
1 /*
2 * i386 translation
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "qemu/host-utils.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "exec/exec-all.h"
25 #include "tcg-op.h"
26 #include "exec/cpu_ldst.h"
28 #include "exec/helper-proto.h"
29 #include "exec/helper-gen.h"
31 #include "trace-tcg.h"
32 #include "exec/log.h"
35 #define PREFIX_REPZ 0x01
36 #define PREFIX_REPNZ 0x02
37 #define PREFIX_LOCK 0x04
38 #define PREFIX_DATA 0x08
39 #define PREFIX_ADR 0x10
40 #define PREFIX_VEX 0x20
42 #ifdef TARGET_X86_64
43 #define CODE64(s) ((s)->code64)
44 #define REX_X(s) ((s)->rex_x)
45 #define REX_B(s) ((s)->rex_b)
46 #else
47 #define CODE64(s) 0
48 #define REX_X(s) 0
49 #define REX_B(s) 0
50 #endif
52 #ifdef TARGET_X86_64
53 # define ctztl ctz64
54 # define clztl clz64
55 #else
56 # define ctztl ctz32
57 # define clztl clz32
58 #endif
60 /* For a switch indexed by MODRM, match all memory operands for a given OP. */
61 #define CASE_MODRM_MEM_OP(OP) \
62 case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
63 case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
64 case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7
66 #define CASE_MODRM_OP(OP) \
67 case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
68 case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
69 case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7: \
70 case (3 << 6) | (OP << 3) | 0 ... (3 << 6) | (OP << 3) | 7
72 //#define MACRO_TEST 1
74 /* global register indexes */
75 static TCGv_env cpu_env;
76 static TCGv cpu_A0;
77 static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2, cpu_cc_srcT;
78 static TCGv_i32 cpu_cc_op;
79 static TCGv cpu_regs[CPU_NB_REGS];
80 static TCGv cpu_seg_base[6];
81 static TCGv_i64 cpu_bndl[4];
82 static TCGv_i64 cpu_bndu[4];
83 /* local temps */
84 static TCGv cpu_T0, cpu_T1;
85 /* local register indexes (only used inside old micro ops) */
86 static TCGv cpu_tmp0, cpu_tmp4;
87 static TCGv_ptr cpu_ptr0, cpu_ptr1;
88 static TCGv_i32 cpu_tmp2_i32, cpu_tmp3_i32;
89 static TCGv_i64 cpu_tmp1_i64;
91 #include "exec/gen-icount.h"
93 #ifdef TARGET_X86_64
94 static int x86_64_hregs;
95 #endif
97 typedef struct DisasContext {
98 /* current insn context */
99 int override; /* -1 if no override */
100 int prefix;
101 TCGMemOp aflag;
102 TCGMemOp dflag;
103 target_ulong pc_start;
104 target_ulong pc; /* pc = eip + cs_base */
105 int is_jmp; /* 1 = means jump (stop translation), 2 means CPU
106 static state change (stop translation) */
107 /* current block context */
108 target_ulong cs_base; /* base of CS segment */
109 int pe; /* protected mode */
110 int code32; /* 32 bit code segment */
111 #ifdef TARGET_X86_64
112 int lma; /* long mode active */
113 int code64; /* 64 bit code segment */
114 int rex_x, rex_b;
115 #endif
116 int vex_l; /* vex vector length */
117 int vex_v; /* vex vvvv register, without 1's compliment. */
118 int ss32; /* 32 bit stack segment */
119 CCOp cc_op; /* current CC operation */
120 bool cc_op_dirty;
121 int addseg; /* non zero if either DS/ES/SS have a non zero base */
122 int f_st; /* currently unused */
123 int vm86; /* vm86 mode */
124 int cpl;
125 int iopl;
126 int tf; /* TF cpu flag */
127 int singlestep_enabled; /* "hardware" single step enabled */
128 int jmp_opt; /* use direct block chaining for direct jumps */
129 int repz_opt; /* optimize jumps within repz instructions */
130 int mem_index; /* select memory access functions */
131 uint64_t flags; /* all execution flags */
132 struct TranslationBlock *tb;
133 int popl_esp_hack; /* for correct popl with esp base handling */
134 int rip_offset; /* only used in x86_64, but left for simplicity */
135 int cpuid_features;
136 int cpuid_ext_features;
137 int cpuid_ext2_features;
138 int cpuid_ext3_features;
139 int cpuid_7_0_ebx_features;
140 int cpuid_xsave_features;
141 } DisasContext;
143 static void gen_eob(DisasContext *s);
144 static void gen_jr(DisasContext *s, TCGv dest);
145 static void gen_jmp(DisasContext *s, target_ulong eip);
146 static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num);
147 static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d);
149 /* i386 arith/logic operations */
150 enum {
151 OP_ADDL,
152 OP_ORL,
153 OP_ADCL,
154 OP_SBBL,
155 OP_ANDL,
156 OP_SUBL,
157 OP_XORL,
158 OP_CMPL,
161 /* i386 shift ops */
162 enum {
163 OP_ROL,
164 OP_ROR,
165 OP_RCL,
166 OP_RCR,
167 OP_SHL,
168 OP_SHR,
169 OP_SHL1, /* undocumented */
170 OP_SAR = 7,
173 enum {
174 JCC_O,
175 JCC_B,
176 JCC_Z,
177 JCC_BE,
178 JCC_S,
179 JCC_P,
180 JCC_L,
181 JCC_LE,
184 enum {
185 /* I386 int registers */
186 OR_EAX, /* MUST be even numbered */
187 OR_ECX,
188 OR_EDX,
189 OR_EBX,
190 OR_ESP,
191 OR_EBP,
192 OR_ESI,
193 OR_EDI,
195 OR_TMP0 = 16, /* temporary operand register */
196 OR_TMP1,
197 OR_A0, /* temporary register used when doing address evaluation */
200 enum {
201 USES_CC_DST = 1,
202 USES_CC_SRC = 2,
203 USES_CC_SRC2 = 4,
204 USES_CC_SRCT = 8,
207 /* Bit set if the global variable is live after setting CC_OP to X. */
208 static const uint8_t cc_op_live[CC_OP_NB] = {
209 [CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
210 [CC_OP_EFLAGS] = USES_CC_SRC,
211 [CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC,
212 [CC_OP_ADDB ... CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC,
213 [CC_OP_ADCB ... CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
214 [CC_OP_SUBB ... CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT,
215 [CC_OP_SBBB ... CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
216 [CC_OP_LOGICB ... CC_OP_LOGICQ] = USES_CC_DST,
217 [CC_OP_INCB ... CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC,
218 [CC_OP_DECB ... CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC,
219 [CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC,
220 [CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC,
221 [CC_OP_BMILGB ... CC_OP_BMILGQ] = USES_CC_DST | USES_CC_SRC,
222 [CC_OP_ADCX] = USES_CC_DST | USES_CC_SRC,
223 [CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2,
224 [CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
225 [CC_OP_CLR] = 0,
226 [CC_OP_POPCNT] = USES_CC_SRC,
229 static void set_cc_op(DisasContext *s, CCOp op)
231 int dead;
233 if (s->cc_op == op) {
234 return;
237 /* Discard CC computation that will no longer be used. */
238 dead = cc_op_live[s->cc_op] & ~cc_op_live[op];
239 if (dead & USES_CC_DST) {
240 tcg_gen_discard_tl(cpu_cc_dst);
242 if (dead & USES_CC_SRC) {
243 tcg_gen_discard_tl(cpu_cc_src);
245 if (dead & USES_CC_SRC2) {
246 tcg_gen_discard_tl(cpu_cc_src2);
248 if (dead & USES_CC_SRCT) {
249 tcg_gen_discard_tl(cpu_cc_srcT);
252 if (op == CC_OP_DYNAMIC) {
253 /* The DYNAMIC setting is translator only, and should never be
254 stored. Thus we always consider it clean. */
255 s->cc_op_dirty = false;
256 } else {
257 /* Discard any computed CC_OP value (see shifts). */
258 if (s->cc_op == CC_OP_DYNAMIC) {
259 tcg_gen_discard_i32(cpu_cc_op);
261 s->cc_op_dirty = true;
263 s->cc_op = op;
266 static void gen_update_cc_op(DisasContext *s)
268 if (s->cc_op_dirty) {
269 tcg_gen_movi_i32(cpu_cc_op, s->cc_op);
270 s->cc_op_dirty = false;
274 #ifdef TARGET_X86_64
276 #define NB_OP_SIZES 4
278 #else /* !TARGET_X86_64 */
280 #define NB_OP_SIZES 3
282 #endif /* !TARGET_X86_64 */
284 #if defined(HOST_WORDS_BIGENDIAN)
285 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
286 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
287 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
288 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
289 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
290 #else
291 #define REG_B_OFFSET 0
292 #define REG_H_OFFSET 1
293 #define REG_W_OFFSET 0
294 #define REG_L_OFFSET 0
295 #define REG_LH_OFFSET 4
296 #endif
298 /* In instruction encodings for byte register accesses the
299 * register number usually indicates "low 8 bits of register N";
300 * however there are some special cases where N 4..7 indicates
301 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
302 * true for this special case, false otherwise.
304 static inline bool byte_reg_is_xH(int reg)
306 if (reg < 4) {
307 return false;
309 #ifdef TARGET_X86_64
310 if (reg >= 8 || x86_64_hregs) {
311 return false;
313 #endif
314 return true;
317 /* Select the size of a push/pop operation. */
318 static inline TCGMemOp mo_pushpop(DisasContext *s, TCGMemOp ot)
320 if (CODE64(s)) {
321 return ot == MO_16 ? MO_16 : MO_64;
322 } else {
323 return ot;
327 /* Select the size of the stack pointer. */
328 static inline TCGMemOp mo_stacksize(DisasContext *s)
330 return CODE64(s) ? MO_64 : s->ss32 ? MO_32 : MO_16;
333 /* Select only size 64 else 32. Used for SSE operand sizes. */
334 static inline TCGMemOp mo_64_32(TCGMemOp ot)
336 #ifdef TARGET_X86_64
337 return ot == MO_64 ? MO_64 : MO_32;
338 #else
339 return MO_32;
340 #endif
343 /* Select size 8 if lsb of B is clear, else OT. Used for decoding
344 byte vs word opcodes. */
345 static inline TCGMemOp mo_b_d(int b, TCGMemOp ot)
347 return b & 1 ? ot : MO_8;
350 /* Select size 8 if lsb of B is clear, else OT capped at 32.
351 Used for decoding operand size of port opcodes. */
352 static inline TCGMemOp mo_b_d32(int b, TCGMemOp ot)
354 return b & 1 ? (ot == MO_16 ? MO_16 : MO_32) : MO_8;
357 static void gen_op_mov_reg_v(TCGMemOp ot, int reg, TCGv t0)
359 switch(ot) {
360 case MO_8:
361 if (!byte_reg_is_xH(reg)) {
362 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 8);
363 } else {
364 tcg_gen_deposit_tl(cpu_regs[reg - 4], cpu_regs[reg - 4], t0, 8, 8);
366 break;
367 case MO_16:
368 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 16);
369 break;
370 case MO_32:
371 /* For x86_64, this sets the higher half of register to zero.
372 For i386, this is equivalent to a mov. */
373 tcg_gen_ext32u_tl(cpu_regs[reg], t0);
374 break;
375 #ifdef TARGET_X86_64
376 case MO_64:
377 tcg_gen_mov_tl(cpu_regs[reg], t0);
378 break;
379 #endif
380 default:
381 tcg_abort();
385 static inline void gen_op_mov_v_reg(TCGMemOp ot, TCGv t0, int reg)
387 if (ot == MO_8 && byte_reg_is_xH(reg)) {
388 tcg_gen_extract_tl(t0, cpu_regs[reg - 4], 8, 8);
389 } else {
390 tcg_gen_mov_tl(t0, cpu_regs[reg]);
394 static void gen_add_A0_im(DisasContext *s, int val)
396 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
397 if (!CODE64(s)) {
398 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
402 static inline void gen_op_jmp_v(TCGv dest)
404 tcg_gen_st_tl(dest, cpu_env, offsetof(CPUX86State, eip));
407 static inline void gen_op_add_reg_im(TCGMemOp size, int reg, int32_t val)
409 tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val);
410 gen_op_mov_reg_v(size, reg, cpu_tmp0);
413 static inline void gen_op_add_reg_T0(TCGMemOp size, int reg)
415 tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T0);
416 gen_op_mov_reg_v(size, reg, cpu_tmp0);
419 static inline void gen_op_ld_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
421 tcg_gen_qemu_ld_tl(t0, a0, s->mem_index, idx | MO_LE);
424 static inline void gen_op_st_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
426 tcg_gen_qemu_st_tl(t0, a0, s->mem_index, idx | MO_LE);
429 static inline void gen_op_st_rm_T0_A0(DisasContext *s, int idx, int d)
431 if (d == OR_TMP0) {
432 gen_op_st_v(s, idx, cpu_T0, cpu_A0);
433 } else {
434 gen_op_mov_reg_v(idx, d, cpu_T0);
438 static inline void gen_jmp_im(target_ulong pc)
440 tcg_gen_movi_tl(cpu_tmp0, pc);
441 gen_op_jmp_v(cpu_tmp0);
444 /* Compute SEG:REG into A0. SEG is selected from the override segment
445 (OVR_SEG) and the default segment (DEF_SEG). OVR_SEG may be -1 to
446 indicate no override. */
447 static void gen_lea_v_seg(DisasContext *s, TCGMemOp aflag, TCGv a0,
448 int def_seg, int ovr_seg)
450 switch (aflag) {
451 #ifdef TARGET_X86_64
452 case MO_64:
453 if (ovr_seg < 0) {
454 tcg_gen_mov_tl(cpu_A0, a0);
455 return;
457 break;
458 #endif
459 case MO_32:
460 /* 32 bit address */
461 if (ovr_seg < 0 && s->addseg) {
462 ovr_seg = def_seg;
464 if (ovr_seg < 0) {
465 tcg_gen_ext32u_tl(cpu_A0, a0);
466 return;
468 break;
469 case MO_16:
470 /* 16 bit address */
471 tcg_gen_ext16u_tl(cpu_A0, a0);
472 a0 = cpu_A0;
473 if (ovr_seg < 0) {
474 if (s->addseg) {
475 ovr_seg = def_seg;
476 } else {
477 return;
480 break;
481 default:
482 tcg_abort();
485 if (ovr_seg >= 0) {
486 TCGv seg = cpu_seg_base[ovr_seg];
488 if (aflag == MO_64) {
489 tcg_gen_add_tl(cpu_A0, a0, seg);
490 } else if (CODE64(s)) {
491 tcg_gen_ext32u_tl(cpu_A0, a0);
492 tcg_gen_add_tl(cpu_A0, cpu_A0, seg);
493 } else {
494 tcg_gen_add_tl(cpu_A0, a0, seg);
495 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
500 static inline void gen_string_movl_A0_ESI(DisasContext *s)
502 gen_lea_v_seg(s, s->aflag, cpu_regs[R_ESI], R_DS, s->override);
505 static inline void gen_string_movl_A0_EDI(DisasContext *s)
507 gen_lea_v_seg(s, s->aflag, cpu_regs[R_EDI], R_ES, -1);
510 static inline void gen_op_movl_T0_Dshift(TCGMemOp ot)
512 tcg_gen_ld32s_tl(cpu_T0, cpu_env, offsetof(CPUX86State, df));
513 tcg_gen_shli_tl(cpu_T0, cpu_T0, ot);
516 static TCGv gen_ext_tl(TCGv dst, TCGv src, TCGMemOp size, bool sign)
518 switch (size) {
519 case MO_8:
520 if (sign) {
521 tcg_gen_ext8s_tl(dst, src);
522 } else {
523 tcg_gen_ext8u_tl(dst, src);
525 return dst;
526 case MO_16:
527 if (sign) {
528 tcg_gen_ext16s_tl(dst, src);
529 } else {
530 tcg_gen_ext16u_tl(dst, src);
532 return dst;
533 #ifdef TARGET_X86_64
534 case MO_32:
535 if (sign) {
536 tcg_gen_ext32s_tl(dst, src);
537 } else {
538 tcg_gen_ext32u_tl(dst, src);
540 return dst;
541 #endif
542 default:
543 return src;
547 static void gen_extu(TCGMemOp ot, TCGv reg)
549 gen_ext_tl(reg, reg, ot, false);
552 static void gen_exts(TCGMemOp ot, TCGv reg)
554 gen_ext_tl(reg, reg, ot, true);
557 static inline void gen_op_jnz_ecx(TCGMemOp size, TCGLabel *label1)
559 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
560 gen_extu(size, cpu_tmp0);
561 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, label1);
564 static inline void gen_op_jz_ecx(TCGMemOp size, TCGLabel *label1)
566 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
567 gen_extu(size, cpu_tmp0);
568 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
571 static void gen_helper_in_func(TCGMemOp ot, TCGv v, TCGv_i32 n)
573 switch (ot) {
574 case MO_8:
575 gen_helper_inb(v, cpu_env, n);
576 break;
577 case MO_16:
578 gen_helper_inw(v, cpu_env, n);
579 break;
580 case MO_32:
581 gen_helper_inl(v, cpu_env, n);
582 break;
583 default:
584 tcg_abort();
588 static void gen_helper_out_func(TCGMemOp ot, TCGv_i32 v, TCGv_i32 n)
590 switch (ot) {
591 case MO_8:
592 gen_helper_outb(cpu_env, v, n);
593 break;
594 case MO_16:
595 gen_helper_outw(cpu_env, v, n);
596 break;
597 case MO_32:
598 gen_helper_outl(cpu_env, v, n);
599 break;
600 default:
601 tcg_abort();
605 static void gen_check_io(DisasContext *s, TCGMemOp ot, target_ulong cur_eip,
606 uint32_t svm_flags)
608 target_ulong next_eip;
610 if (s->pe && (s->cpl > s->iopl || s->vm86)) {
611 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
612 switch (ot) {
613 case MO_8:
614 gen_helper_check_iob(cpu_env, cpu_tmp2_i32);
615 break;
616 case MO_16:
617 gen_helper_check_iow(cpu_env, cpu_tmp2_i32);
618 break;
619 case MO_32:
620 gen_helper_check_iol(cpu_env, cpu_tmp2_i32);
621 break;
622 default:
623 tcg_abort();
626 if(s->flags & HF_SVMI_MASK) {
627 gen_update_cc_op(s);
628 gen_jmp_im(cur_eip);
629 svm_flags |= (1 << (4 + ot));
630 next_eip = s->pc - s->cs_base;
631 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
632 gen_helper_svm_check_io(cpu_env, cpu_tmp2_i32,
633 tcg_const_i32(svm_flags),
634 tcg_const_i32(next_eip - cur_eip));
638 static inline void gen_movs(DisasContext *s, TCGMemOp ot)
640 gen_string_movl_A0_ESI(s);
641 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
642 gen_string_movl_A0_EDI(s);
643 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
644 gen_op_movl_T0_Dshift(ot);
645 gen_op_add_reg_T0(s->aflag, R_ESI);
646 gen_op_add_reg_T0(s->aflag, R_EDI);
649 static void gen_op_update1_cc(void)
651 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
654 static void gen_op_update2_cc(void)
656 tcg_gen_mov_tl(cpu_cc_src, cpu_T1);
657 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
660 static void gen_op_update3_cc(TCGv reg)
662 tcg_gen_mov_tl(cpu_cc_src2, reg);
663 tcg_gen_mov_tl(cpu_cc_src, cpu_T1);
664 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
667 static inline void gen_op_testl_T0_T1_cc(void)
669 tcg_gen_and_tl(cpu_cc_dst, cpu_T0, cpu_T1);
672 static void gen_op_update_neg_cc(void)
674 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
675 tcg_gen_neg_tl(cpu_cc_src, cpu_T0);
676 tcg_gen_movi_tl(cpu_cc_srcT, 0);
679 /* compute all eflags to cc_src */
680 static void gen_compute_eflags(DisasContext *s)
682 TCGv zero, dst, src1, src2;
683 int live, dead;
685 if (s->cc_op == CC_OP_EFLAGS) {
686 return;
688 if (s->cc_op == CC_OP_CLR) {
689 tcg_gen_movi_tl(cpu_cc_src, CC_Z | CC_P);
690 set_cc_op(s, CC_OP_EFLAGS);
691 return;
694 TCGV_UNUSED(zero);
695 dst = cpu_cc_dst;
696 src1 = cpu_cc_src;
697 src2 = cpu_cc_src2;
699 /* Take care to not read values that are not live. */
700 live = cc_op_live[s->cc_op] & ~USES_CC_SRCT;
701 dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2);
702 if (dead) {
703 zero = tcg_const_tl(0);
704 if (dead & USES_CC_DST) {
705 dst = zero;
707 if (dead & USES_CC_SRC) {
708 src1 = zero;
710 if (dead & USES_CC_SRC2) {
711 src2 = zero;
715 gen_update_cc_op(s);
716 gen_helper_cc_compute_all(cpu_cc_src, dst, src1, src2, cpu_cc_op);
717 set_cc_op(s, CC_OP_EFLAGS);
719 if (dead) {
720 tcg_temp_free(zero);
724 typedef struct CCPrepare {
725 TCGCond cond;
726 TCGv reg;
727 TCGv reg2;
728 target_ulong imm;
729 target_ulong mask;
730 bool use_reg2;
731 bool no_setcond;
732 } CCPrepare;
734 /* compute eflags.C to reg */
735 static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
737 TCGv t0, t1;
738 int size, shift;
740 switch (s->cc_op) {
741 case CC_OP_SUBB ... CC_OP_SUBQ:
742 /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
743 size = s->cc_op - CC_OP_SUBB;
744 t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
745 /* If no temporary was used, be careful not to alias t1 and t0. */
746 t0 = TCGV_EQUAL(t1, cpu_cc_src) ? cpu_tmp0 : reg;
747 tcg_gen_mov_tl(t0, cpu_cc_srcT);
748 gen_extu(size, t0);
749 goto add_sub;
751 case CC_OP_ADDB ... CC_OP_ADDQ:
752 /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
753 size = s->cc_op - CC_OP_ADDB;
754 t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
755 t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
756 add_sub:
757 return (CCPrepare) { .cond = TCG_COND_LTU, .reg = t0,
758 .reg2 = t1, .mask = -1, .use_reg2 = true };
760 case CC_OP_LOGICB ... CC_OP_LOGICQ:
761 case CC_OP_CLR:
762 case CC_OP_POPCNT:
763 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
765 case CC_OP_INCB ... CC_OP_INCQ:
766 case CC_OP_DECB ... CC_OP_DECQ:
767 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
768 .mask = -1, .no_setcond = true };
770 case CC_OP_SHLB ... CC_OP_SHLQ:
771 /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
772 size = s->cc_op - CC_OP_SHLB;
773 shift = (8 << size) - 1;
774 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
775 .mask = (target_ulong)1 << shift };
777 case CC_OP_MULB ... CC_OP_MULQ:
778 return (CCPrepare) { .cond = TCG_COND_NE,
779 .reg = cpu_cc_src, .mask = -1 };
781 case CC_OP_BMILGB ... CC_OP_BMILGQ:
782 size = s->cc_op - CC_OP_BMILGB;
783 t0 = gen_ext_tl(reg, cpu_cc_src, size, false);
784 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
786 case CC_OP_ADCX:
787 case CC_OP_ADCOX:
788 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_dst,
789 .mask = -1, .no_setcond = true };
791 case CC_OP_EFLAGS:
792 case CC_OP_SARB ... CC_OP_SARQ:
793 /* CC_SRC & 1 */
794 return (CCPrepare) { .cond = TCG_COND_NE,
795 .reg = cpu_cc_src, .mask = CC_C };
797 default:
798 /* The need to compute only C from CC_OP_DYNAMIC is important
799 in efficiently implementing e.g. INC at the start of a TB. */
800 gen_update_cc_op(s);
801 gen_helper_cc_compute_c(reg, cpu_cc_dst, cpu_cc_src,
802 cpu_cc_src2, cpu_cc_op);
803 return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
804 .mask = -1, .no_setcond = true };
808 /* compute eflags.P to reg */
809 static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg)
811 gen_compute_eflags(s);
812 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
813 .mask = CC_P };
816 /* compute eflags.S to reg */
817 static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg)
819 switch (s->cc_op) {
820 case CC_OP_DYNAMIC:
821 gen_compute_eflags(s);
822 /* FALLTHRU */
823 case CC_OP_EFLAGS:
824 case CC_OP_ADCX:
825 case CC_OP_ADOX:
826 case CC_OP_ADCOX:
827 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
828 .mask = CC_S };
829 case CC_OP_CLR:
830 case CC_OP_POPCNT:
831 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
832 default:
834 TCGMemOp size = (s->cc_op - CC_OP_ADDB) & 3;
835 TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, true);
836 return (CCPrepare) { .cond = TCG_COND_LT, .reg = t0, .mask = -1 };
841 /* compute eflags.O to reg */
842 static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
844 switch (s->cc_op) {
845 case CC_OP_ADOX:
846 case CC_OP_ADCOX:
847 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src2,
848 .mask = -1, .no_setcond = true };
849 case CC_OP_CLR:
850 case CC_OP_POPCNT:
851 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
852 default:
853 gen_compute_eflags(s);
854 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
855 .mask = CC_O };
859 /* compute eflags.Z to reg */
860 static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
862 switch (s->cc_op) {
863 case CC_OP_DYNAMIC:
864 gen_compute_eflags(s);
865 /* FALLTHRU */
866 case CC_OP_EFLAGS:
867 case CC_OP_ADCX:
868 case CC_OP_ADOX:
869 case CC_OP_ADCOX:
870 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
871 .mask = CC_Z };
872 case CC_OP_CLR:
873 return (CCPrepare) { .cond = TCG_COND_ALWAYS, .mask = -1 };
874 case CC_OP_POPCNT:
875 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = cpu_cc_src,
876 .mask = -1 };
877 default:
879 TCGMemOp size = (s->cc_op - CC_OP_ADDB) & 3;
880 TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
881 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
886 /* perform a conditional store into register 'reg' according to jump opcode
887 value 'b'. In the fast case, T0 is guaranted not to be used. */
888 static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
890 int inv, jcc_op, cond;
891 TCGMemOp size;
892 CCPrepare cc;
893 TCGv t0;
895 inv = b & 1;
896 jcc_op = (b >> 1) & 7;
898 switch (s->cc_op) {
899 case CC_OP_SUBB ... CC_OP_SUBQ:
900 /* We optimize relational operators for the cmp/jcc case. */
901 size = s->cc_op - CC_OP_SUBB;
902 switch (jcc_op) {
903 case JCC_BE:
904 tcg_gen_mov_tl(cpu_tmp4, cpu_cc_srcT);
905 gen_extu(size, cpu_tmp4);
906 t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
907 cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = cpu_tmp4,
908 .reg2 = t0, .mask = -1, .use_reg2 = true };
909 break;
911 case JCC_L:
912 cond = TCG_COND_LT;
913 goto fast_jcc_l;
914 case JCC_LE:
915 cond = TCG_COND_LE;
916 fast_jcc_l:
917 tcg_gen_mov_tl(cpu_tmp4, cpu_cc_srcT);
918 gen_exts(size, cpu_tmp4);
919 t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, true);
920 cc = (CCPrepare) { .cond = cond, .reg = cpu_tmp4,
921 .reg2 = t0, .mask = -1, .use_reg2 = true };
922 break;
924 default:
925 goto slow_jcc;
927 break;
929 default:
930 slow_jcc:
931 /* This actually generates good code for JC, JZ and JS. */
932 switch (jcc_op) {
933 case JCC_O:
934 cc = gen_prepare_eflags_o(s, reg);
935 break;
936 case JCC_B:
937 cc = gen_prepare_eflags_c(s, reg);
938 break;
939 case JCC_Z:
940 cc = gen_prepare_eflags_z(s, reg);
941 break;
942 case JCC_BE:
943 gen_compute_eflags(s);
944 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
945 .mask = CC_Z | CC_C };
946 break;
947 case JCC_S:
948 cc = gen_prepare_eflags_s(s, reg);
949 break;
950 case JCC_P:
951 cc = gen_prepare_eflags_p(s, reg);
952 break;
953 case JCC_L:
954 gen_compute_eflags(s);
955 if (TCGV_EQUAL(reg, cpu_cc_src)) {
956 reg = cpu_tmp0;
958 tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
959 tcg_gen_xor_tl(reg, reg, cpu_cc_src);
960 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
961 .mask = CC_S };
962 break;
963 default:
964 case JCC_LE:
965 gen_compute_eflags(s);
966 if (TCGV_EQUAL(reg, cpu_cc_src)) {
967 reg = cpu_tmp0;
969 tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
970 tcg_gen_xor_tl(reg, reg, cpu_cc_src);
971 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
972 .mask = CC_S | CC_Z };
973 break;
975 break;
978 if (inv) {
979 cc.cond = tcg_invert_cond(cc.cond);
981 return cc;
984 static void gen_setcc1(DisasContext *s, int b, TCGv reg)
986 CCPrepare cc = gen_prepare_cc(s, b, reg);
988 if (cc.no_setcond) {
989 if (cc.cond == TCG_COND_EQ) {
990 tcg_gen_xori_tl(reg, cc.reg, 1);
991 } else {
992 tcg_gen_mov_tl(reg, cc.reg);
994 return;
997 if (cc.cond == TCG_COND_NE && !cc.use_reg2 && cc.imm == 0 &&
998 cc.mask != 0 && (cc.mask & (cc.mask - 1)) == 0) {
999 tcg_gen_shri_tl(reg, cc.reg, ctztl(cc.mask));
1000 tcg_gen_andi_tl(reg, reg, 1);
1001 return;
1003 if (cc.mask != -1) {
1004 tcg_gen_andi_tl(reg, cc.reg, cc.mask);
1005 cc.reg = reg;
1007 if (cc.use_reg2) {
1008 tcg_gen_setcond_tl(cc.cond, reg, cc.reg, cc.reg2);
1009 } else {
1010 tcg_gen_setcondi_tl(cc.cond, reg, cc.reg, cc.imm);
1014 static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg)
1016 gen_setcc1(s, JCC_B << 1, reg);
1019 /* generate a conditional jump to label 'l1' according to jump opcode
1020 value 'b'. In the fast case, T0 is guaranted not to be used. */
1021 static inline void gen_jcc1_noeob(DisasContext *s, int b, TCGLabel *l1)
1023 CCPrepare cc = gen_prepare_cc(s, b, cpu_T0);
1025 if (cc.mask != -1) {
1026 tcg_gen_andi_tl(cpu_T0, cc.reg, cc.mask);
1027 cc.reg = cpu_T0;
1029 if (cc.use_reg2) {
1030 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1031 } else {
1032 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1036 /* Generate a conditional jump to label 'l1' according to jump opcode
1037 value 'b'. In the fast case, T0 is guaranted not to be used.
1038 A translation block must end soon. */
1039 static inline void gen_jcc1(DisasContext *s, int b, TCGLabel *l1)
1041 CCPrepare cc = gen_prepare_cc(s, b, cpu_T0);
1043 gen_update_cc_op(s);
1044 if (cc.mask != -1) {
1045 tcg_gen_andi_tl(cpu_T0, cc.reg, cc.mask);
1046 cc.reg = cpu_T0;
1048 set_cc_op(s, CC_OP_DYNAMIC);
1049 if (cc.use_reg2) {
1050 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1051 } else {
1052 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1056 /* XXX: does not work with gdbstub "ice" single step - not a
1057 serious problem */
1058 static TCGLabel *gen_jz_ecx_string(DisasContext *s, target_ulong next_eip)
1060 TCGLabel *l1 = gen_new_label();
1061 TCGLabel *l2 = gen_new_label();
1062 gen_op_jnz_ecx(s->aflag, l1);
1063 gen_set_label(l2);
1064 gen_jmp_tb(s, next_eip, 1);
1065 gen_set_label(l1);
1066 return l2;
1069 static inline void gen_stos(DisasContext *s, TCGMemOp ot)
1071 gen_op_mov_v_reg(MO_32, cpu_T0, R_EAX);
1072 gen_string_movl_A0_EDI(s);
1073 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
1074 gen_op_movl_T0_Dshift(ot);
1075 gen_op_add_reg_T0(s->aflag, R_EDI);
1078 static inline void gen_lods(DisasContext *s, TCGMemOp ot)
1080 gen_string_movl_A0_ESI(s);
1081 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
1082 gen_op_mov_reg_v(ot, R_EAX, cpu_T0);
1083 gen_op_movl_T0_Dshift(ot);
1084 gen_op_add_reg_T0(s->aflag, R_ESI);
1087 static inline void gen_scas(DisasContext *s, TCGMemOp ot)
1089 gen_string_movl_A0_EDI(s);
1090 gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
1091 gen_op(s, OP_CMPL, ot, R_EAX);
1092 gen_op_movl_T0_Dshift(ot);
1093 gen_op_add_reg_T0(s->aflag, R_EDI);
1096 static inline void gen_cmps(DisasContext *s, TCGMemOp ot)
1098 gen_string_movl_A0_EDI(s);
1099 gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
1100 gen_string_movl_A0_ESI(s);
1101 gen_op(s, OP_CMPL, ot, OR_TMP0);
1102 gen_op_movl_T0_Dshift(ot);
1103 gen_op_add_reg_T0(s->aflag, R_ESI);
1104 gen_op_add_reg_T0(s->aflag, R_EDI);
1107 static void gen_bpt_io(DisasContext *s, TCGv_i32 t_port, int ot)
1109 if (s->flags & HF_IOBPT_MASK) {
1110 TCGv_i32 t_size = tcg_const_i32(1 << ot);
1111 TCGv t_next = tcg_const_tl(s->pc - s->cs_base);
1113 gen_helper_bpt_io(cpu_env, t_port, t_size, t_next);
1114 tcg_temp_free_i32(t_size);
1115 tcg_temp_free(t_next);
1120 static inline void gen_ins(DisasContext *s, TCGMemOp ot)
1122 if (s->tb->cflags & CF_USE_ICOUNT) {
1123 gen_io_start();
1125 gen_string_movl_A0_EDI(s);
1126 /* Note: we must do this dummy write first to be restartable in
1127 case of page fault. */
1128 tcg_gen_movi_tl(cpu_T0, 0);
1129 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
1130 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_EDX]);
1131 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1132 gen_helper_in_func(ot, cpu_T0, cpu_tmp2_i32);
1133 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
1134 gen_op_movl_T0_Dshift(ot);
1135 gen_op_add_reg_T0(s->aflag, R_EDI);
1136 gen_bpt_io(s, cpu_tmp2_i32, ot);
1137 if (s->tb->cflags & CF_USE_ICOUNT) {
1138 gen_io_end();
1142 static inline void gen_outs(DisasContext *s, TCGMemOp ot)
1144 if (s->tb->cflags & CF_USE_ICOUNT) {
1145 gen_io_start();
1147 gen_string_movl_A0_ESI(s);
1148 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
1150 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_EDX]);
1151 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1152 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T0);
1153 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
1154 gen_op_movl_T0_Dshift(ot);
1155 gen_op_add_reg_T0(s->aflag, R_ESI);
1156 gen_bpt_io(s, cpu_tmp2_i32, ot);
1157 if (s->tb->cflags & CF_USE_ICOUNT) {
1158 gen_io_end();
1162 /* same method as Valgrind : we generate jumps to current or next
1163 instruction */
1164 #define GEN_REPZ(op) \
1165 static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \
1166 target_ulong cur_eip, target_ulong next_eip) \
1168 TCGLabel *l2; \
1169 gen_update_cc_op(s); \
1170 l2 = gen_jz_ecx_string(s, next_eip); \
1171 gen_ ## op(s, ot); \
1172 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1173 /* a loop would cause two single step exceptions if ECX = 1 \
1174 before rep string_insn */ \
1175 if (s->repz_opt) \
1176 gen_op_jz_ecx(s->aflag, l2); \
1177 gen_jmp(s, cur_eip); \
1180 #define GEN_REPZ2(op) \
1181 static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \
1182 target_ulong cur_eip, \
1183 target_ulong next_eip, \
1184 int nz) \
1186 TCGLabel *l2; \
1187 gen_update_cc_op(s); \
1188 l2 = gen_jz_ecx_string(s, next_eip); \
1189 gen_ ## op(s, ot); \
1190 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1191 gen_update_cc_op(s); \
1192 gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2); \
1193 if (s->repz_opt) \
1194 gen_op_jz_ecx(s->aflag, l2); \
1195 gen_jmp(s, cur_eip); \
1198 GEN_REPZ(movs)
1199 GEN_REPZ(stos)
1200 GEN_REPZ(lods)
1201 GEN_REPZ(ins)
1202 GEN_REPZ(outs)
1203 GEN_REPZ2(scas)
1204 GEN_REPZ2(cmps)
1206 static void gen_helper_fp_arith_ST0_FT0(int op)
1208 switch (op) {
1209 case 0:
1210 gen_helper_fadd_ST0_FT0(cpu_env);
1211 break;
1212 case 1:
1213 gen_helper_fmul_ST0_FT0(cpu_env);
1214 break;
1215 case 2:
1216 gen_helper_fcom_ST0_FT0(cpu_env);
1217 break;
1218 case 3:
1219 gen_helper_fcom_ST0_FT0(cpu_env);
1220 break;
1221 case 4:
1222 gen_helper_fsub_ST0_FT0(cpu_env);
1223 break;
1224 case 5:
1225 gen_helper_fsubr_ST0_FT0(cpu_env);
1226 break;
1227 case 6:
1228 gen_helper_fdiv_ST0_FT0(cpu_env);
1229 break;
1230 case 7:
1231 gen_helper_fdivr_ST0_FT0(cpu_env);
1232 break;
1236 /* NOTE the exception in "r" op ordering */
1237 static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1239 TCGv_i32 tmp = tcg_const_i32(opreg);
1240 switch (op) {
1241 case 0:
1242 gen_helper_fadd_STN_ST0(cpu_env, tmp);
1243 break;
1244 case 1:
1245 gen_helper_fmul_STN_ST0(cpu_env, tmp);
1246 break;
1247 case 4:
1248 gen_helper_fsubr_STN_ST0(cpu_env, tmp);
1249 break;
1250 case 5:
1251 gen_helper_fsub_STN_ST0(cpu_env, tmp);
1252 break;
1253 case 6:
1254 gen_helper_fdivr_STN_ST0(cpu_env, tmp);
1255 break;
1256 case 7:
1257 gen_helper_fdiv_STN_ST0(cpu_env, tmp);
1258 break;
1262 /* if d == OR_TMP0, it means memory operand (address in A0) */
1263 static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d)
1265 if (d != OR_TMP0) {
1266 gen_op_mov_v_reg(ot, cpu_T0, d);
1267 } else if (!(s1->prefix & PREFIX_LOCK)) {
1268 gen_op_ld_v(s1, ot, cpu_T0, cpu_A0);
1270 switch(op) {
1271 case OP_ADCL:
1272 gen_compute_eflags_c(s1, cpu_tmp4);
1273 if (s1->prefix & PREFIX_LOCK) {
1274 tcg_gen_add_tl(cpu_T0, cpu_tmp4, cpu_T1);
1275 tcg_gen_atomic_add_fetch_tl(cpu_T0, cpu_A0, cpu_T0,
1276 s1->mem_index, ot | MO_LE);
1277 } else {
1278 tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1);
1279 tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_tmp4);
1280 gen_op_st_rm_T0_A0(s1, ot, d);
1282 gen_op_update3_cc(cpu_tmp4);
1283 set_cc_op(s1, CC_OP_ADCB + ot);
1284 break;
1285 case OP_SBBL:
1286 gen_compute_eflags_c(s1, cpu_tmp4);
1287 if (s1->prefix & PREFIX_LOCK) {
1288 tcg_gen_add_tl(cpu_T0, cpu_T1, cpu_tmp4);
1289 tcg_gen_neg_tl(cpu_T0, cpu_T0);
1290 tcg_gen_atomic_add_fetch_tl(cpu_T0, cpu_A0, cpu_T0,
1291 s1->mem_index, ot | MO_LE);
1292 } else {
1293 tcg_gen_sub_tl(cpu_T0, cpu_T0, cpu_T1);
1294 tcg_gen_sub_tl(cpu_T0, cpu_T0, cpu_tmp4);
1295 gen_op_st_rm_T0_A0(s1, ot, d);
1297 gen_op_update3_cc(cpu_tmp4);
1298 set_cc_op(s1, CC_OP_SBBB + ot);
1299 break;
1300 case OP_ADDL:
1301 if (s1->prefix & PREFIX_LOCK) {
1302 tcg_gen_atomic_add_fetch_tl(cpu_T0, cpu_A0, cpu_T1,
1303 s1->mem_index, ot | MO_LE);
1304 } else {
1305 tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1);
1306 gen_op_st_rm_T0_A0(s1, ot, d);
1308 gen_op_update2_cc();
1309 set_cc_op(s1, CC_OP_ADDB + ot);
1310 break;
1311 case OP_SUBL:
1312 if (s1->prefix & PREFIX_LOCK) {
1313 tcg_gen_neg_tl(cpu_T0, cpu_T1);
1314 tcg_gen_atomic_fetch_add_tl(cpu_cc_srcT, cpu_A0, cpu_T0,
1315 s1->mem_index, ot | MO_LE);
1316 tcg_gen_sub_tl(cpu_T0, cpu_cc_srcT, cpu_T1);
1317 } else {
1318 tcg_gen_mov_tl(cpu_cc_srcT, cpu_T0);
1319 tcg_gen_sub_tl(cpu_T0, cpu_T0, cpu_T1);
1320 gen_op_st_rm_T0_A0(s1, ot, d);
1322 gen_op_update2_cc();
1323 set_cc_op(s1, CC_OP_SUBB + ot);
1324 break;
1325 default:
1326 case OP_ANDL:
1327 if (s1->prefix & PREFIX_LOCK) {
1328 tcg_gen_atomic_and_fetch_tl(cpu_T0, cpu_A0, cpu_T1,
1329 s1->mem_index, ot | MO_LE);
1330 } else {
1331 tcg_gen_and_tl(cpu_T0, cpu_T0, cpu_T1);
1332 gen_op_st_rm_T0_A0(s1, ot, d);
1334 gen_op_update1_cc();
1335 set_cc_op(s1, CC_OP_LOGICB + ot);
1336 break;
1337 case OP_ORL:
1338 if (s1->prefix & PREFIX_LOCK) {
1339 tcg_gen_atomic_or_fetch_tl(cpu_T0, cpu_A0, cpu_T1,
1340 s1->mem_index, ot | MO_LE);
1341 } else {
1342 tcg_gen_or_tl(cpu_T0, cpu_T0, cpu_T1);
1343 gen_op_st_rm_T0_A0(s1, ot, d);
1345 gen_op_update1_cc();
1346 set_cc_op(s1, CC_OP_LOGICB + ot);
1347 break;
1348 case OP_XORL:
1349 if (s1->prefix & PREFIX_LOCK) {
1350 tcg_gen_atomic_xor_fetch_tl(cpu_T0, cpu_A0, cpu_T1,
1351 s1->mem_index, ot | MO_LE);
1352 } else {
1353 tcg_gen_xor_tl(cpu_T0, cpu_T0, cpu_T1);
1354 gen_op_st_rm_T0_A0(s1, ot, d);
1356 gen_op_update1_cc();
1357 set_cc_op(s1, CC_OP_LOGICB + ot);
1358 break;
1359 case OP_CMPL:
1360 tcg_gen_mov_tl(cpu_cc_src, cpu_T1);
1361 tcg_gen_mov_tl(cpu_cc_srcT, cpu_T0);
1362 tcg_gen_sub_tl(cpu_cc_dst, cpu_T0, cpu_T1);
1363 set_cc_op(s1, CC_OP_SUBB + ot);
1364 break;
1368 /* if d == OR_TMP0, it means memory operand (address in A0) */
1369 static void gen_inc(DisasContext *s1, TCGMemOp ot, int d, int c)
1371 if (s1->prefix & PREFIX_LOCK) {
1372 tcg_gen_movi_tl(cpu_T0, c > 0 ? 1 : -1);
1373 tcg_gen_atomic_add_fetch_tl(cpu_T0, cpu_A0, cpu_T0,
1374 s1->mem_index, ot | MO_LE);
1375 } else {
1376 if (d != OR_TMP0) {
1377 gen_op_mov_v_reg(ot, cpu_T0, d);
1378 } else {
1379 gen_op_ld_v(s1, ot, cpu_T0, cpu_A0);
1381 tcg_gen_addi_tl(cpu_T0, cpu_T0, (c > 0 ? 1 : -1));
1382 gen_op_st_rm_T0_A0(s1, ot, d);
1385 gen_compute_eflags_c(s1, cpu_cc_src);
1386 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
1387 set_cc_op(s1, (c > 0 ? CC_OP_INCB : CC_OP_DECB) + ot);
1390 static void gen_shift_flags(DisasContext *s, TCGMemOp ot, TCGv result,
1391 TCGv shm1, TCGv count, bool is_right)
1393 TCGv_i32 z32, s32, oldop;
1394 TCGv z_tl;
1396 /* Store the results into the CC variables. If we know that the
1397 variable must be dead, store unconditionally. Otherwise we'll
1398 need to not disrupt the current contents. */
1399 z_tl = tcg_const_tl(0);
1400 if (cc_op_live[s->cc_op] & USES_CC_DST) {
1401 tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_dst, count, z_tl,
1402 result, cpu_cc_dst);
1403 } else {
1404 tcg_gen_mov_tl(cpu_cc_dst, result);
1406 if (cc_op_live[s->cc_op] & USES_CC_SRC) {
1407 tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_src, count, z_tl,
1408 shm1, cpu_cc_src);
1409 } else {
1410 tcg_gen_mov_tl(cpu_cc_src, shm1);
1412 tcg_temp_free(z_tl);
1414 /* Get the two potential CC_OP values into temporaries. */
1415 tcg_gen_movi_i32(cpu_tmp2_i32, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
1416 if (s->cc_op == CC_OP_DYNAMIC) {
1417 oldop = cpu_cc_op;
1418 } else {
1419 tcg_gen_movi_i32(cpu_tmp3_i32, s->cc_op);
1420 oldop = cpu_tmp3_i32;
1423 /* Conditionally store the CC_OP value. */
1424 z32 = tcg_const_i32(0);
1425 s32 = tcg_temp_new_i32();
1426 tcg_gen_trunc_tl_i32(s32, count);
1427 tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, s32, z32, cpu_tmp2_i32, oldop);
1428 tcg_temp_free_i32(z32);
1429 tcg_temp_free_i32(s32);
1431 /* The CC_OP value is no longer predictable. */
1432 set_cc_op(s, CC_OP_DYNAMIC);
1435 static void gen_shift_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
1436 int is_right, int is_arith)
1438 target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
1440 /* load */
1441 if (op1 == OR_TMP0) {
1442 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
1443 } else {
1444 gen_op_mov_v_reg(ot, cpu_T0, op1);
1447 tcg_gen_andi_tl(cpu_T1, cpu_T1, mask);
1448 tcg_gen_subi_tl(cpu_tmp0, cpu_T1, 1);
1450 if (is_right) {
1451 if (is_arith) {
1452 gen_exts(ot, cpu_T0);
1453 tcg_gen_sar_tl(cpu_tmp0, cpu_T0, cpu_tmp0);
1454 tcg_gen_sar_tl(cpu_T0, cpu_T0, cpu_T1);
1455 } else {
1456 gen_extu(ot, cpu_T0);
1457 tcg_gen_shr_tl(cpu_tmp0, cpu_T0, cpu_tmp0);
1458 tcg_gen_shr_tl(cpu_T0, cpu_T0, cpu_T1);
1460 } else {
1461 tcg_gen_shl_tl(cpu_tmp0, cpu_T0, cpu_tmp0);
1462 tcg_gen_shl_tl(cpu_T0, cpu_T0, cpu_T1);
1465 /* store */
1466 gen_op_st_rm_T0_A0(s, ot, op1);
1468 gen_shift_flags(s, ot, cpu_T0, cpu_tmp0, cpu_T1, is_right);
1471 static void gen_shift_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2,
1472 int is_right, int is_arith)
1474 int mask = (ot == MO_64 ? 0x3f : 0x1f);
1476 /* load */
1477 if (op1 == OR_TMP0)
1478 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
1479 else
1480 gen_op_mov_v_reg(ot, cpu_T0, op1);
1482 op2 &= mask;
1483 if (op2 != 0) {
1484 if (is_right) {
1485 if (is_arith) {
1486 gen_exts(ot, cpu_T0);
1487 tcg_gen_sari_tl(cpu_tmp4, cpu_T0, op2 - 1);
1488 tcg_gen_sari_tl(cpu_T0, cpu_T0, op2);
1489 } else {
1490 gen_extu(ot, cpu_T0);
1491 tcg_gen_shri_tl(cpu_tmp4, cpu_T0, op2 - 1);
1492 tcg_gen_shri_tl(cpu_T0, cpu_T0, op2);
1494 } else {
1495 tcg_gen_shli_tl(cpu_tmp4, cpu_T0, op2 - 1);
1496 tcg_gen_shli_tl(cpu_T0, cpu_T0, op2);
1500 /* store */
1501 gen_op_st_rm_T0_A0(s, ot, op1);
1503 /* update eflags if non zero shift */
1504 if (op2 != 0) {
1505 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
1506 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
1507 set_cc_op(s, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
1511 static void gen_rot_rm_T1(DisasContext *s, TCGMemOp ot, int op1, int is_right)
1513 target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
1514 TCGv_i32 t0, t1;
1516 /* load */
1517 if (op1 == OR_TMP0) {
1518 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
1519 } else {
1520 gen_op_mov_v_reg(ot, cpu_T0, op1);
1523 tcg_gen_andi_tl(cpu_T1, cpu_T1, mask);
1525 switch (ot) {
1526 case MO_8:
1527 /* Replicate the 8-bit input so that a 32-bit rotate works. */
1528 tcg_gen_ext8u_tl(cpu_T0, cpu_T0);
1529 tcg_gen_muli_tl(cpu_T0, cpu_T0, 0x01010101);
1530 goto do_long;
1531 case MO_16:
1532 /* Replicate the 16-bit input so that a 32-bit rotate works. */
1533 tcg_gen_deposit_tl(cpu_T0, cpu_T0, cpu_T0, 16, 16);
1534 goto do_long;
1535 do_long:
1536 #ifdef TARGET_X86_64
1537 case MO_32:
1538 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
1539 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1);
1540 if (is_right) {
1541 tcg_gen_rotr_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
1542 } else {
1543 tcg_gen_rotl_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
1545 tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);
1546 break;
1547 #endif
1548 default:
1549 if (is_right) {
1550 tcg_gen_rotr_tl(cpu_T0, cpu_T0, cpu_T1);
1551 } else {
1552 tcg_gen_rotl_tl(cpu_T0, cpu_T0, cpu_T1);
1554 break;
1557 /* store */
1558 gen_op_st_rm_T0_A0(s, ot, op1);
1560 /* We'll need the flags computed into CC_SRC. */
1561 gen_compute_eflags(s);
1563 /* The value that was "rotated out" is now present at the other end
1564 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1565 since we've computed the flags into CC_SRC, these variables are
1566 currently dead. */
1567 if (is_right) {
1568 tcg_gen_shri_tl(cpu_cc_src2, cpu_T0, mask - 1);
1569 tcg_gen_shri_tl(cpu_cc_dst, cpu_T0, mask);
1570 tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
1571 } else {
1572 tcg_gen_shri_tl(cpu_cc_src2, cpu_T0, mask);
1573 tcg_gen_andi_tl(cpu_cc_dst, cpu_T0, 1);
1575 tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1576 tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1578 /* Now conditionally store the new CC_OP value. If the shift count
1579 is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live.
1580 Otherwise reuse CC_OP_ADCOX which have the C and O flags split out
1581 exactly as we computed above. */
1582 t0 = tcg_const_i32(0);
1583 t1 = tcg_temp_new_i32();
1584 tcg_gen_trunc_tl_i32(t1, cpu_T1);
1585 tcg_gen_movi_i32(cpu_tmp2_i32, CC_OP_ADCOX);
1586 tcg_gen_movi_i32(cpu_tmp3_i32, CC_OP_EFLAGS);
1587 tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, t1, t0,
1588 cpu_tmp2_i32, cpu_tmp3_i32);
1589 tcg_temp_free_i32(t0);
1590 tcg_temp_free_i32(t1);
1592 /* The CC_OP value is no longer predictable. */
1593 set_cc_op(s, CC_OP_DYNAMIC);
1596 static void gen_rot_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2,
1597 int is_right)
1599 int mask = (ot == MO_64 ? 0x3f : 0x1f);
1600 int shift;
1602 /* load */
1603 if (op1 == OR_TMP0) {
1604 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
1605 } else {
1606 gen_op_mov_v_reg(ot, cpu_T0, op1);
1609 op2 &= mask;
1610 if (op2 != 0) {
1611 switch (ot) {
1612 #ifdef TARGET_X86_64
1613 case MO_32:
1614 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
1615 if (is_right) {
1616 tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, op2);
1617 } else {
1618 tcg_gen_rotli_i32(cpu_tmp2_i32, cpu_tmp2_i32, op2);
1620 tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);
1621 break;
1622 #endif
1623 default:
1624 if (is_right) {
1625 tcg_gen_rotri_tl(cpu_T0, cpu_T0, op2);
1626 } else {
1627 tcg_gen_rotli_tl(cpu_T0, cpu_T0, op2);
1629 break;
1630 case MO_8:
1631 mask = 7;
1632 goto do_shifts;
1633 case MO_16:
1634 mask = 15;
1635 do_shifts:
1636 shift = op2 & mask;
1637 if (is_right) {
1638 shift = mask + 1 - shift;
1640 gen_extu(ot, cpu_T0);
1641 tcg_gen_shli_tl(cpu_tmp0, cpu_T0, shift);
1642 tcg_gen_shri_tl(cpu_T0, cpu_T0, mask + 1 - shift);
1643 tcg_gen_or_tl(cpu_T0, cpu_T0, cpu_tmp0);
1644 break;
1648 /* store */
1649 gen_op_st_rm_T0_A0(s, ot, op1);
1651 if (op2 != 0) {
1652 /* Compute the flags into CC_SRC. */
1653 gen_compute_eflags(s);
1655 /* The value that was "rotated out" is now present at the other end
1656 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1657 since we've computed the flags into CC_SRC, these variables are
1658 currently dead. */
1659 if (is_right) {
1660 tcg_gen_shri_tl(cpu_cc_src2, cpu_T0, mask - 1);
1661 tcg_gen_shri_tl(cpu_cc_dst, cpu_T0, mask);
1662 tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
1663 } else {
1664 tcg_gen_shri_tl(cpu_cc_src2, cpu_T0, mask);
1665 tcg_gen_andi_tl(cpu_cc_dst, cpu_T0, 1);
1667 tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1668 tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1669 set_cc_op(s, CC_OP_ADCOX);
1673 /* XXX: add faster immediate = 1 case */
1674 static void gen_rotc_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
1675 int is_right)
1677 gen_compute_eflags(s);
1678 assert(s->cc_op == CC_OP_EFLAGS);
1680 /* load */
1681 if (op1 == OR_TMP0)
1682 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
1683 else
1684 gen_op_mov_v_reg(ot, cpu_T0, op1);
1686 if (is_right) {
1687 switch (ot) {
1688 case MO_8:
1689 gen_helper_rcrb(cpu_T0, cpu_env, cpu_T0, cpu_T1);
1690 break;
1691 case MO_16:
1692 gen_helper_rcrw(cpu_T0, cpu_env, cpu_T0, cpu_T1);
1693 break;
1694 case MO_32:
1695 gen_helper_rcrl(cpu_T0, cpu_env, cpu_T0, cpu_T1);
1696 break;
1697 #ifdef TARGET_X86_64
1698 case MO_64:
1699 gen_helper_rcrq(cpu_T0, cpu_env, cpu_T0, cpu_T1);
1700 break;
1701 #endif
1702 default:
1703 tcg_abort();
1705 } else {
1706 switch (ot) {
1707 case MO_8:
1708 gen_helper_rclb(cpu_T0, cpu_env, cpu_T0, cpu_T1);
1709 break;
1710 case MO_16:
1711 gen_helper_rclw(cpu_T0, cpu_env, cpu_T0, cpu_T1);
1712 break;
1713 case MO_32:
1714 gen_helper_rcll(cpu_T0, cpu_env, cpu_T0, cpu_T1);
1715 break;
1716 #ifdef TARGET_X86_64
1717 case MO_64:
1718 gen_helper_rclq(cpu_T0, cpu_env, cpu_T0, cpu_T1);
1719 break;
1720 #endif
1721 default:
1722 tcg_abort();
1725 /* store */
1726 gen_op_st_rm_T0_A0(s, ot, op1);
1729 /* XXX: add faster immediate case */
1730 static void gen_shiftd_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
1731 bool is_right, TCGv count_in)
1733 target_ulong mask = (ot == MO_64 ? 63 : 31);
1734 TCGv count;
1736 /* load */
1737 if (op1 == OR_TMP0) {
1738 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
1739 } else {
1740 gen_op_mov_v_reg(ot, cpu_T0, op1);
1743 count = tcg_temp_new();
1744 tcg_gen_andi_tl(count, count_in, mask);
1746 switch (ot) {
1747 case MO_16:
1748 /* Note: we implement the Intel behaviour for shift count > 16.
1749 This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A
1750 portion by constructing it as a 32-bit value. */
1751 if (is_right) {
1752 tcg_gen_deposit_tl(cpu_tmp0, cpu_T0, cpu_T1, 16, 16);
1753 tcg_gen_mov_tl(cpu_T1, cpu_T0);
1754 tcg_gen_mov_tl(cpu_T0, cpu_tmp0);
1755 } else {
1756 tcg_gen_deposit_tl(cpu_T1, cpu_T0, cpu_T1, 16, 16);
1758 /* FALLTHRU */
1759 #ifdef TARGET_X86_64
1760 case MO_32:
1761 /* Concatenate the two 32-bit values and use a 64-bit shift. */
1762 tcg_gen_subi_tl(cpu_tmp0, count, 1);
1763 if (is_right) {
1764 tcg_gen_concat_tl_i64(cpu_T0, cpu_T0, cpu_T1);
1765 tcg_gen_shr_i64(cpu_tmp0, cpu_T0, cpu_tmp0);
1766 tcg_gen_shr_i64(cpu_T0, cpu_T0, count);
1767 } else {
1768 tcg_gen_concat_tl_i64(cpu_T0, cpu_T1, cpu_T0);
1769 tcg_gen_shl_i64(cpu_tmp0, cpu_T0, cpu_tmp0);
1770 tcg_gen_shl_i64(cpu_T0, cpu_T0, count);
1771 tcg_gen_shri_i64(cpu_tmp0, cpu_tmp0, 32);
1772 tcg_gen_shri_i64(cpu_T0, cpu_T0, 32);
1774 break;
1775 #endif
1776 default:
1777 tcg_gen_subi_tl(cpu_tmp0, count, 1);
1778 if (is_right) {
1779 tcg_gen_shr_tl(cpu_tmp0, cpu_T0, cpu_tmp0);
1781 tcg_gen_subfi_tl(cpu_tmp4, mask + 1, count);
1782 tcg_gen_shr_tl(cpu_T0, cpu_T0, count);
1783 tcg_gen_shl_tl(cpu_T1, cpu_T1, cpu_tmp4);
1784 } else {
1785 tcg_gen_shl_tl(cpu_tmp0, cpu_T0, cpu_tmp0);
1786 if (ot == MO_16) {
1787 /* Only needed if count > 16, for Intel behaviour. */
1788 tcg_gen_subfi_tl(cpu_tmp4, 33, count);
1789 tcg_gen_shr_tl(cpu_tmp4, cpu_T1, cpu_tmp4);
1790 tcg_gen_or_tl(cpu_tmp0, cpu_tmp0, cpu_tmp4);
1793 tcg_gen_subfi_tl(cpu_tmp4, mask + 1, count);
1794 tcg_gen_shl_tl(cpu_T0, cpu_T0, count);
1795 tcg_gen_shr_tl(cpu_T1, cpu_T1, cpu_tmp4);
1797 tcg_gen_movi_tl(cpu_tmp4, 0);
1798 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_T1, count, cpu_tmp4,
1799 cpu_tmp4, cpu_T1);
1800 tcg_gen_or_tl(cpu_T0, cpu_T0, cpu_T1);
1801 break;
1804 /* store */
1805 gen_op_st_rm_T0_A0(s, ot, op1);
1807 gen_shift_flags(s, ot, cpu_T0, cpu_tmp0, count, is_right);
1808 tcg_temp_free(count);
1811 static void gen_shift(DisasContext *s1, int op, TCGMemOp ot, int d, int s)
1813 if (s != OR_TMP1)
1814 gen_op_mov_v_reg(ot, cpu_T1, s);
1815 switch(op) {
1816 case OP_ROL:
1817 gen_rot_rm_T1(s1, ot, d, 0);
1818 break;
1819 case OP_ROR:
1820 gen_rot_rm_T1(s1, ot, d, 1);
1821 break;
1822 case OP_SHL:
1823 case OP_SHL1:
1824 gen_shift_rm_T1(s1, ot, d, 0, 0);
1825 break;
1826 case OP_SHR:
1827 gen_shift_rm_T1(s1, ot, d, 1, 0);
1828 break;
1829 case OP_SAR:
1830 gen_shift_rm_T1(s1, ot, d, 1, 1);
1831 break;
1832 case OP_RCL:
1833 gen_rotc_rm_T1(s1, ot, d, 0);
1834 break;
1835 case OP_RCR:
1836 gen_rotc_rm_T1(s1, ot, d, 1);
1837 break;
1841 static void gen_shifti(DisasContext *s1, int op, TCGMemOp ot, int d, int c)
1843 switch(op) {
1844 case OP_ROL:
1845 gen_rot_rm_im(s1, ot, d, c, 0);
1846 break;
1847 case OP_ROR:
1848 gen_rot_rm_im(s1, ot, d, c, 1);
1849 break;
1850 case OP_SHL:
1851 case OP_SHL1:
1852 gen_shift_rm_im(s1, ot, d, c, 0, 0);
1853 break;
1854 case OP_SHR:
1855 gen_shift_rm_im(s1, ot, d, c, 1, 0);
1856 break;
1857 case OP_SAR:
1858 gen_shift_rm_im(s1, ot, d, c, 1, 1);
1859 break;
1860 default:
1861 /* currently not optimized */
1862 tcg_gen_movi_tl(cpu_T1, c);
1863 gen_shift(s1, op, ot, d, OR_TMP1);
1864 break;
1868 /* Decompose an address. */
1870 typedef struct AddressParts {
1871 int def_seg;
1872 int base;
1873 int index;
1874 int scale;
1875 target_long disp;
1876 } AddressParts;
1878 static AddressParts gen_lea_modrm_0(CPUX86State *env, DisasContext *s,
1879 int modrm)
1881 int def_seg, base, index, scale, mod, rm;
1882 target_long disp;
1883 bool havesib;
1885 def_seg = R_DS;
1886 index = -1;
1887 scale = 0;
1888 disp = 0;
1890 mod = (modrm >> 6) & 3;
1891 rm = modrm & 7;
1892 base = rm | REX_B(s);
1894 if (mod == 3) {
1895 /* Normally filtered out earlier, but including this path
1896 simplifies multi-byte nop, as well as bndcl, bndcu, bndcn. */
1897 goto done;
1900 switch (s->aflag) {
1901 case MO_64:
1902 case MO_32:
1903 havesib = 0;
1904 if (rm == 4) {
1905 int code = cpu_ldub_code(env, s->pc++);
1906 scale = (code >> 6) & 3;
1907 index = ((code >> 3) & 7) | REX_X(s);
1908 if (index == 4) {
1909 index = -1; /* no index */
1911 base = (code & 7) | REX_B(s);
1912 havesib = 1;
1915 switch (mod) {
1916 case 0:
1917 if ((base & 7) == 5) {
1918 base = -1;
1919 disp = (int32_t)cpu_ldl_code(env, s->pc);
1920 s->pc += 4;
1921 if (CODE64(s) && !havesib) {
1922 base = -2;
1923 disp += s->pc + s->rip_offset;
1926 break;
1927 case 1:
1928 disp = (int8_t)cpu_ldub_code(env, s->pc++);
1929 break;
1930 default:
1931 case 2:
1932 disp = (int32_t)cpu_ldl_code(env, s->pc);
1933 s->pc += 4;
1934 break;
1937 /* For correct popl handling with esp. */
1938 if (base == R_ESP && s->popl_esp_hack) {
1939 disp += s->popl_esp_hack;
1941 if (base == R_EBP || base == R_ESP) {
1942 def_seg = R_SS;
1944 break;
1946 case MO_16:
1947 if (mod == 0) {
1948 if (rm == 6) {
1949 base = -1;
1950 disp = cpu_lduw_code(env, s->pc);
1951 s->pc += 2;
1952 break;
1954 } else if (mod == 1) {
1955 disp = (int8_t)cpu_ldub_code(env, s->pc++);
1956 } else {
1957 disp = (int16_t)cpu_lduw_code(env, s->pc);
1958 s->pc += 2;
1961 switch (rm) {
1962 case 0:
1963 base = R_EBX;
1964 index = R_ESI;
1965 break;
1966 case 1:
1967 base = R_EBX;
1968 index = R_EDI;
1969 break;
1970 case 2:
1971 base = R_EBP;
1972 index = R_ESI;
1973 def_seg = R_SS;
1974 break;
1975 case 3:
1976 base = R_EBP;
1977 index = R_EDI;
1978 def_seg = R_SS;
1979 break;
1980 case 4:
1981 base = R_ESI;
1982 break;
1983 case 5:
1984 base = R_EDI;
1985 break;
1986 case 6:
1987 base = R_EBP;
1988 def_seg = R_SS;
1989 break;
1990 default:
1991 case 7:
1992 base = R_EBX;
1993 break;
1995 break;
1997 default:
1998 tcg_abort();
2001 done:
2002 return (AddressParts){ def_seg, base, index, scale, disp };
2005 /* Compute the address, with a minimum number of TCG ops. */
2006 static TCGv gen_lea_modrm_1(AddressParts a)
2008 TCGv ea;
2010 TCGV_UNUSED(ea);
2011 if (a.index >= 0) {
2012 if (a.scale == 0) {
2013 ea = cpu_regs[a.index];
2014 } else {
2015 tcg_gen_shli_tl(cpu_A0, cpu_regs[a.index], a.scale);
2016 ea = cpu_A0;
2018 if (a.base >= 0) {
2019 tcg_gen_add_tl(cpu_A0, ea, cpu_regs[a.base]);
2020 ea = cpu_A0;
2022 } else if (a.base >= 0) {
2023 ea = cpu_regs[a.base];
2025 if (TCGV_IS_UNUSED(ea)) {
2026 tcg_gen_movi_tl(cpu_A0, a.disp);
2027 ea = cpu_A0;
2028 } else if (a.disp != 0) {
2029 tcg_gen_addi_tl(cpu_A0, ea, a.disp);
2030 ea = cpu_A0;
2033 return ea;
2036 static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm)
2038 AddressParts a = gen_lea_modrm_0(env, s, modrm);
2039 TCGv ea = gen_lea_modrm_1(a);
2040 gen_lea_v_seg(s, s->aflag, ea, a.def_seg, s->override);
2043 static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm)
2045 (void)gen_lea_modrm_0(env, s, modrm);
2048 /* Used for BNDCL, BNDCU, BNDCN. */
2049 static void gen_bndck(CPUX86State *env, DisasContext *s, int modrm,
2050 TCGCond cond, TCGv_i64 bndv)
2052 TCGv ea = gen_lea_modrm_1(gen_lea_modrm_0(env, s, modrm));
2054 tcg_gen_extu_tl_i64(cpu_tmp1_i64, ea);
2055 if (!CODE64(s)) {
2056 tcg_gen_ext32u_i64(cpu_tmp1_i64, cpu_tmp1_i64);
2058 tcg_gen_setcond_i64(cond, cpu_tmp1_i64, cpu_tmp1_i64, bndv);
2059 tcg_gen_extrl_i64_i32(cpu_tmp2_i32, cpu_tmp1_i64);
2060 gen_helper_bndck(cpu_env, cpu_tmp2_i32);
2063 /* used for LEA and MOV AX, mem */
2064 static void gen_add_A0_ds_seg(DisasContext *s)
2066 gen_lea_v_seg(s, s->aflag, cpu_A0, R_DS, s->override);
2069 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2070 OR_TMP0 */
2071 static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm,
2072 TCGMemOp ot, int reg, int is_store)
2074 int mod, rm;
2076 mod = (modrm >> 6) & 3;
2077 rm = (modrm & 7) | REX_B(s);
2078 if (mod == 3) {
2079 if (is_store) {
2080 if (reg != OR_TMP0)
2081 gen_op_mov_v_reg(ot, cpu_T0, reg);
2082 gen_op_mov_reg_v(ot, rm, cpu_T0);
2083 } else {
2084 gen_op_mov_v_reg(ot, cpu_T0, rm);
2085 if (reg != OR_TMP0)
2086 gen_op_mov_reg_v(ot, reg, cpu_T0);
2088 } else {
2089 gen_lea_modrm(env, s, modrm);
2090 if (is_store) {
2091 if (reg != OR_TMP0)
2092 gen_op_mov_v_reg(ot, cpu_T0, reg);
2093 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
2094 } else {
2095 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
2096 if (reg != OR_TMP0)
2097 gen_op_mov_reg_v(ot, reg, cpu_T0);
2102 static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, TCGMemOp ot)
2104 uint32_t ret;
2106 switch (ot) {
2107 case MO_8:
2108 ret = cpu_ldub_code(env, s->pc);
2109 s->pc++;
2110 break;
2111 case MO_16:
2112 ret = cpu_lduw_code(env, s->pc);
2113 s->pc += 2;
2114 break;
2115 case MO_32:
2116 #ifdef TARGET_X86_64
2117 case MO_64:
2118 #endif
2119 ret = cpu_ldl_code(env, s->pc);
2120 s->pc += 4;
2121 break;
2122 default:
2123 tcg_abort();
2125 return ret;
2128 static inline int insn_const_size(TCGMemOp ot)
2130 if (ot <= MO_32) {
2131 return 1 << ot;
2132 } else {
2133 return 4;
2137 static inline bool use_goto_tb(DisasContext *s, target_ulong pc)
2139 #ifndef CONFIG_USER_ONLY
2140 return (pc & TARGET_PAGE_MASK) == (s->tb->pc & TARGET_PAGE_MASK) ||
2141 (pc & TARGET_PAGE_MASK) == (s->pc_start & TARGET_PAGE_MASK);
2142 #else
2143 return true;
2144 #endif
2147 static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip)
2149 target_ulong pc = s->cs_base + eip;
2151 if (use_goto_tb(s, pc)) {
2152 /* jump to same page: we can use a direct jump */
2153 tcg_gen_goto_tb(tb_num);
2154 gen_jmp_im(eip);
2155 tcg_gen_exit_tb((uintptr_t)s->tb + tb_num);
2156 } else {
2157 /* jump to another page */
2158 gen_jmp_im(eip);
2159 gen_jr(s, cpu_tmp0);
2163 static inline void gen_jcc(DisasContext *s, int b,
2164 target_ulong val, target_ulong next_eip)
2166 TCGLabel *l1, *l2;
2168 if (s->jmp_opt) {
2169 l1 = gen_new_label();
2170 gen_jcc1(s, b, l1);
2172 gen_goto_tb(s, 0, next_eip);
2174 gen_set_label(l1);
2175 gen_goto_tb(s, 1, val);
2176 s->is_jmp = DISAS_TB_JUMP;
2177 } else {
2178 l1 = gen_new_label();
2179 l2 = gen_new_label();
2180 gen_jcc1(s, b, l1);
2182 gen_jmp_im(next_eip);
2183 tcg_gen_br(l2);
2185 gen_set_label(l1);
2186 gen_jmp_im(val);
2187 gen_set_label(l2);
2188 gen_eob(s);
2192 static void gen_cmovcc1(CPUX86State *env, DisasContext *s, TCGMemOp ot, int b,
2193 int modrm, int reg)
2195 CCPrepare cc;
2197 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
2199 cc = gen_prepare_cc(s, b, cpu_T1);
2200 if (cc.mask != -1) {
2201 TCGv t0 = tcg_temp_new();
2202 tcg_gen_andi_tl(t0, cc.reg, cc.mask);
2203 cc.reg = t0;
2205 if (!cc.use_reg2) {
2206 cc.reg2 = tcg_const_tl(cc.imm);
2209 tcg_gen_movcond_tl(cc.cond, cpu_T0, cc.reg, cc.reg2,
2210 cpu_T0, cpu_regs[reg]);
2211 gen_op_mov_reg_v(ot, reg, cpu_T0);
2213 if (cc.mask != -1) {
2214 tcg_temp_free(cc.reg);
2216 if (!cc.use_reg2) {
2217 tcg_temp_free(cc.reg2);
2221 static inline void gen_op_movl_T0_seg(int seg_reg)
2223 tcg_gen_ld32u_tl(cpu_T0, cpu_env,
2224 offsetof(CPUX86State,segs[seg_reg].selector));
2227 static inline void gen_op_movl_seg_T0_vm(int seg_reg)
2229 tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
2230 tcg_gen_st32_tl(cpu_T0, cpu_env,
2231 offsetof(CPUX86State,segs[seg_reg].selector));
2232 tcg_gen_shli_tl(cpu_seg_base[seg_reg], cpu_T0, 4);
2235 /* move T0 to seg_reg and compute if the CPU state may change. Never
2236 call this function with seg_reg == R_CS */
2237 static void gen_movl_seg_T0(DisasContext *s, int seg_reg)
2239 if (s->pe && !s->vm86) {
2240 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
2241 gen_helper_load_seg(cpu_env, tcg_const_i32(seg_reg), cpu_tmp2_i32);
2242 /* abort translation because the addseg value may change or
2243 because ss32 may change. For R_SS, translation must always
2244 stop as a special handling must be done to disable hardware
2245 interrupts for the next instruction */
2246 if (seg_reg == R_SS || (s->code32 && seg_reg < R_FS))
2247 s->is_jmp = DISAS_TB_JUMP;
2248 } else {
2249 gen_op_movl_seg_T0_vm(seg_reg);
2250 if (seg_reg == R_SS)
2251 s->is_jmp = DISAS_TB_JUMP;
2255 static inline int svm_is_rep(int prefixes)
2257 return ((prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) ? 8 : 0);
2260 static inline void
2261 gen_svm_check_intercept_param(DisasContext *s, target_ulong pc_start,
2262 uint32_t type, uint64_t param)
2264 /* no SVM activated; fast case */
2265 if (likely(!(s->flags & HF_SVMI_MASK)))
2266 return;
2267 gen_update_cc_op(s);
2268 gen_jmp_im(pc_start - s->cs_base);
2269 gen_helper_svm_check_intercept_param(cpu_env, tcg_const_i32(type),
2270 tcg_const_i64(param));
2273 static inline void
2274 gen_svm_check_intercept(DisasContext *s, target_ulong pc_start, uint64_t type)
2276 gen_svm_check_intercept_param(s, pc_start, type, 0);
2279 static inline void gen_stack_update(DisasContext *s, int addend)
2281 gen_op_add_reg_im(mo_stacksize(s), R_ESP, addend);
2284 /* Generate a push. It depends on ss32, addseg and dflag. */
2285 static void gen_push_v(DisasContext *s, TCGv val)
2287 TCGMemOp d_ot = mo_pushpop(s, s->dflag);
2288 TCGMemOp a_ot = mo_stacksize(s);
2289 int size = 1 << d_ot;
2290 TCGv new_esp = cpu_A0;
2292 tcg_gen_subi_tl(cpu_A0, cpu_regs[R_ESP], size);
2294 if (!CODE64(s)) {
2295 if (s->addseg) {
2296 new_esp = cpu_tmp4;
2297 tcg_gen_mov_tl(new_esp, cpu_A0);
2299 gen_lea_v_seg(s, a_ot, cpu_A0, R_SS, -1);
2302 gen_op_st_v(s, d_ot, val, cpu_A0);
2303 gen_op_mov_reg_v(a_ot, R_ESP, new_esp);
2306 /* two step pop is necessary for precise exceptions */
2307 static TCGMemOp gen_pop_T0(DisasContext *s)
2309 TCGMemOp d_ot = mo_pushpop(s, s->dflag);
2311 gen_lea_v_seg(s, mo_stacksize(s), cpu_regs[R_ESP], R_SS, -1);
2312 gen_op_ld_v(s, d_ot, cpu_T0, cpu_A0);
2314 return d_ot;
2317 static inline void gen_pop_update(DisasContext *s, TCGMemOp ot)
2319 gen_stack_update(s, 1 << ot);
2322 static inline void gen_stack_A0(DisasContext *s)
2324 gen_lea_v_seg(s, s->ss32 ? MO_32 : MO_16, cpu_regs[R_ESP], R_SS, -1);
2327 static void gen_pusha(DisasContext *s)
2329 TCGMemOp s_ot = s->ss32 ? MO_32 : MO_16;
2330 TCGMemOp d_ot = s->dflag;
2331 int size = 1 << d_ot;
2332 int i;
2334 for (i = 0; i < 8; i++) {
2335 tcg_gen_addi_tl(cpu_A0, cpu_regs[R_ESP], (i - 8) * size);
2336 gen_lea_v_seg(s, s_ot, cpu_A0, R_SS, -1);
2337 gen_op_st_v(s, d_ot, cpu_regs[7 - i], cpu_A0);
2340 gen_stack_update(s, -8 * size);
2343 static void gen_popa(DisasContext *s)
2345 TCGMemOp s_ot = s->ss32 ? MO_32 : MO_16;
2346 TCGMemOp d_ot = s->dflag;
2347 int size = 1 << d_ot;
2348 int i;
2350 for (i = 0; i < 8; i++) {
2351 /* ESP is not reloaded */
2352 if (7 - i == R_ESP) {
2353 continue;
2355 tcg_gen_addi_tl(cpu_A0, cpu_regs[R_ESP], i * size);
2356 gen_lea_v_seg(s, s_ot, cpu_A0, R_SS, -1);
2357 gen_op_ld_v(s, d_ot, cpu_T0, cpu_A0);
2358 gen_op_mov_reg_v(d_ot, 7 - i, cpu_T0);
2361 gen_stack_update(s, 8 * size);
2364 static void gen_enter(DisasContext *s, int esp_addend, int level)
2366 TCGMemOp d_ot = mo_pushpop(s, s->dflag);
2367 TCGMemOp a_ot = CODE64(s) ? MO_64 : s->ss32 ? MO_32 : MO_16;
2368 int size = 1 << d_ot;
2370 /* Push BP; compute FrameTemp into T1. */
2371 tcg_gen_subi_tl(cpu_T1, cpu_regs[R_ESP], size);
2372 gen_lea_v_seg(s, a_ot, cpu_T1, R_SS, -1);
2373 gen_op_st_v(s, d_ot, cpu_regs[R_EBP], cpu_A0);
2375 level &= 31;
2376 if (level != 0) {
2377 int i;
2379 /* Copy level-1 pointers from the previous frame. */
2380 for (i = 1; i < level; ++i) {
2381 tcg_gen_subi_tl(cpu_A0, cpu_regs[R_EBP], size * i);
2382 gen_lea_v_seg(s, a_ot, cpu_A0, R_SS, -1);
2383 gen_op_ld_v(s, d_ot, cpu_tmp0, cpu_A0);
2385 tcg_gen_subi_tl(cpu_A0, cpu_T1, size * i);
2386 gen_lea_v_seg(s, a_ot, cpu_A0, R_SS, -1);
2387 gen_op_st_v(s, d_ot, cpu_tmp0, cpu_A0);
2390 /* Push the current FrameTemp as the last level. */
2391 tcg_gen_subi_tl(cpu_A0, cpu_T1, size * level);
2392 gen_lea_v_seg(s, a_ot, cpu_A0, R_SS, -1);
2393 gen_op_st_v(s, d_ot, cpu_T1, cpu_A0);
2396 /* Copy the FrameTemp value to EBP. */
2397 gen_op_mov_reg_v(a_ot, R_EBP, cpu_T1);
2399 /* Compute the final value of ESP. */
2400 tcg_gen_subi_tl(cpu_T1, cpu_T1, esp_addend + size * level);
2401 gen_op_mov_reg_v(a_ot, R_ESP, cpu_T1);
2404 static void gen_leave(DisasContext *s)
2406 TCGMemOp d_ot = mo_pushpop(s, s->dflag);
2407 TCGMemOp a_ot = mo_stacksize(s);
2409 gen_lea_v_seg(s, a_ot, cpu_regs[R_EBP], R_SS, -1);
2410 gen_op_ld_v(s, d_ot, cpu_T0, cpu_A0);
2412 tcg_gen_addi_tl(cpu_T1, cpu_regs[R_EBP], 1 << d_ot);
2414 gen_op_mov_reg_v(d_ot, R_EBP, cpu_T0);
2415 gen_op_mov_reg_v(a_ot, R_ESP, cpu_T1);
2418 static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip)
2420 gen_update_cc_op(s);
2421 gen_jmp_im(cur_eip);
2422 gen_helper_raise_exception(cpu_env, tcg_const_i32(trapno));
2423 s->is_jmp = DISAS_TB_JUMP;
2426 /* Generate #UD for the current instruction. The assumption here is that
2427 the instruction is known, but it isn't allowed in the current cpu mode. */
2428 static void gen_illegal_opcode(DisasContext *s)
2430 gen_exception(s, EXCP06_ILLOP, s->pc_start - s->cs_base);
2433 /* Similarly, except that the assumption here is that we don't decode
2434 the instruction at all -- either a missing opcode, an unimplemented
2435 feature, or just a bogus instruction stream. */
2436 static void gen_unknown_opcode(CPUX86State *env, DisasContext *s)
2438 gen_illegal_opcode(s);
2440 if (qemu_loglevel_mask(LOG_UNIMP)) {
2441 target_ulong pc = s->pc_start, end = s->pc;
2442 qemu_log_lock();
2443 qemu_log("ILLOPC: " TARGET_FMT_lx ":", pc);
2444 for (; pc < end; ++pc) {
2445 qemu_log(" %02x", cpu_ldub_code(env, pc));
2447 qemu_log("\n");
2448 qemu_log_unlock();
2452 /* an interrupt is different from an exception because of the
2453 privilege checks */
2454 static void gen_interrupt(DisasContext *s, int intno,
2455 target_ulong cur_eip, target_ulong next_eip)
2457 gen_update_cc_op(s);
2458 gen_jmp_im(cur_eip);
2459 gen_helper_raise_interrupt(cpu_env, tcg_const_i32(intno),
2460 tcg_const_i32(next_eip - cur_eip));
2461 s->is_jmp = DISAS_TB_JUMP;
2464 static void gen_debug(DisasContext *s, target_ulong cur_eip)
2466 gen_update_cc_op(s);
2467 gen_jmp_im(cur_eip);
2468 gen_helper_debug(cpu_env);
2469 s->is_jmp = DISAS_TB_JUMP;
2472 static void gen_set_hflag(DisasContext *s, uint32_t mask)
2474 if ((s->flags & mask) == 0) {
2475 TCGv_i32 t = tcg_temp_new_i32();
2476 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUX86State, hflags));
2477 tcg_gen_ori_i32(t, t, mask);
2478 tcg_gen_st_i32(t, cpu_env, offsetof(CPUX86State, hflags));
2479 tcg_temp_free_i32(t);
2480 s->flags |= mask;
2484 static void gen_reset_hflag(DisasContext *s, uint32_t mask)
2486 if (s->flags & mask) {
2487 TCGv_i32 t = tcg_temp_new_i32();
2488 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUX86State, hflags));
2489 tcg_gen_andi_i32(t, t, ~mask);
2490 tcg_gen_st_i32(t, cpu_env, offsetof(CPUX86State, hflags));
2491 tcg_temp_free_i32(t);
2492 s->flags &= ~mask;
2496 /* Clear BND registers during legacy branches. */
2497 static void gen_bnd_jmp(DisasContext *s)
2499 /* Clear the registers only if BND prefix is missing, MPX is enabled,
2500 and if the BNDREGs are known to be in use (non-zero) already.
2501 The helper itself will check BNDPRESERVE at runtime. */
2502 if ((s->prefix & PREFIX_REPNZ) == 0
2503 && (s->flags & HF_MPX_EN_MASK) != 0
2504 && (s->flags & HF_MPX_IU_MASK) != 0) {
2505 gen_helper_bnd_jmp(cpu_env);
2509 /* Generate an end of block. Trace exception is also generated if needed.
2510 If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set.
2511 If RECHECK_TF, emit a rechecking helper for #DB, ignoring the state of
2512 S->TF. This is used by the syscall/sysret insns. */
2513 static void
2514 do_gen_eob_worker(DisasContext *s, bool inhibit, bool recheck_tf, TCGv jr)
2516 gen_update_cc_op(s);
2518 /* If several instructions disable interrupts, only the first does it. */
2519 if (inhibit && !(s->flags & HF_INHIBIT_IRQ_MASK)) {
2520 gen_set_hflag(s, HF_INHIBIT_IRQ_MASK);
2521 } else {
2522 gen_reset_hflag(s, HF_INHIBIT_IRQ_MASK);
2525 if (s->tb->flags & HF_RF_MASK) {
2526 gen_helper_reset_rf(cpu_env);
2528 if (s->singlestep_enabled) {
2529 gen_helper_debug(cpu_env);
2530 } else if (recheck_tf) {
2531 gen_helper_rechecking_single_step(cpu_env);
2532 tcg_gen_exit_tb(0);
2533 } else if (s->tf) {
2534 gen_helper_single_step(cpu_env);
2535 } else if (!TCGV_IS_UNUSED(jr)) {
2536 TCGv vaddr = tcg_temp_new();
2538 tcg_gen_add_tl(vaddr, jr, cpu_seg_base[R_CS]);
2539 tcg_gen_lookup_and_goto_ptr(vaddr);
2540 tcg_temp_free(vaddr);
2541 } else {
2542 tcg_gen_exit_tb(0);
2544 s->is_jmp = DISAS_TB_JUMP;
2547 static inline void
2548 gen_eob_worker(DisasContext *s, bool inhibit, bool recheck_tf)
2550 TCGv unused;
2552 TCGV_UNUSED(unused);
2553 do_gen_eob_worker(s, inhibit, recheck_tf, unused);
2556 /* End of block.
2557 If INHIBIT, set HF_INHIBIT_IRQ_MASK if it isn't already set. */
2558 static void gen_eob_inhibit_irq(DisasContext *s, bool inhibit)
2560 gen_eob_worker(s, inhibit, false);
2563 /* End of block, resetting the inhibit irq flag. */
2564 static void gen_eob(DisasContext *s)
2566 gen_eob_worker(s, false, false);
2569 /* Jump to register */
2570 static void gen_jr(DisasContext *s, TCGv dest)
2572 do_gen_eob_worker(s, false, false, dest);
2575 /* generate a jump to eip. No segment change must happen before as a
2576 direct call to the next block may occur */
2577 static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num)
2579 gen_update_cc_op(s);
2580 set_cc_op(s, CC_OP_DYNAMIC);
2581 if (s->jmp_opt) {
2582 gen_goto_tb(s, tb_num, eip);
2583 s->is_jmp = DISAS_TB_JUMP;
2584 } else {
2585 gen_jmp_im(eip);
2586 gen_eob(s);
2590 static void gen_jmp(DisasContext *s, target_ulong eip)
2592 gen_jmp_tb(s, eip, 0);
2595 static inline void gen_ldq_env_A0(DisasContext *s, int offset)
2597 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
2598 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset);
2601 static inline void gen_stq_env_A0(DisasContext *s, int offset)
2603 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset);
2604 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
2607 static inline void gen_ldo_env_A0(DisasContext *s, int offset)
2609 int mem_index = s->mem_index;
2610 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, mem_index, MO_LEQ);
2611 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(0)));
2612 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2613 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_tmp0, mem_index, MO_LEQ);
2614 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(1)));
2617 static inline void gen_sto_env_A0(DisasContext *s, int offset)
2619 int mem_index = s->mem_index;
2620 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(0)));
2621 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, mem_index, MO_LEQ);
2622 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2623 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(1)));
2624 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_tmp0, mem_index, MO_LEQ);
2627 static inline void gen_op_movo(int d_offset, int s_offset)
2629 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + offsetof(ZMMReg, ZMM_Q(0)));
2630 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + offsetof(ZMMReg, ZMM_Q(0)));
2631 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + offsetof(ZMMReg, ZMM_Q(1)));
2632 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + offsetof(ZMMReg, ZMM_Q(1)));
2635 static inline void gen_op_movq(int d_offset, int s_offset)
2637 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2638 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2641 static inline void gen_op_movl(int d_offset, int s_offset)
2643 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env, s_offset);
2644 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, d_offset);
2647 static inline void gen_op_movq_env_0(int d_offset)
2649 tcg_gen_movi_i64(cpu_tmp1_i64, 0);
2650 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2653 typedef void (*SSEFunc_i_ep)(TCGv_i32 val, TCGv_ptr env, TCGv_ptr reg);
2654 typedef void (*SSEFunc_l_ep)(TCGv_i64 val, TCGv_ptr env, TCGv_ptr reg);
2655 typedef void (*SSEFunc_0_epi)(TCGv_ptr env, TCGv_ptr reg, TCGv_i32 val);
2656 typedef void (*SSEFunc_0_epl)(TCGv_ptr env, TCGv_ptr reg, TCGv_i64 val);
2657 typedef void (*SSEFunc_0_epp)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b);
2658 typedef void (*SSEFunc_0_eppi)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
2659 TCGv_i32 val);
2660 typedef void (*SSEFunc_0_ppi)(TCGv_ptr reg_a, TCGv_ptr reg_b, TCGv_i32 val);
2661 typedef void (*SSEFunc_0_eppt)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
2662 TCGv val);
2664 #define SSE_SPECIAL ((void *)1)
2665 #define SSE_DUMMY ((void *)2)
2667 #define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
2668 #define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
2669 gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
2671 static const SSEFunc_0_epp sse_op_table1[256][4] = {
2672 /* 3DNow! extensions */
2673 [0x0e] = { SSE_DUMMY }, /* femms */
2674 [0x0f] = { SSE_DUMMY }, /* pf... */
2675 /* pure SSE operations */
2676 [0x10] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2677 [0x11] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2678 [0x12] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd, movsldup, movddup */
2679 [0x13] = { SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd */
2680 [0x14] = { gen_helper_punpckldq_xmm, gen_helper_punpcklqdq_xmm },
2681 [0x15] = { gen_helper_punpckhdq_xmm, gen_helper_punpckhqdq_xmm },
2682 [0x16] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd, movshdup */
2683 [0x17] = { SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd */
2685 [0x28] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2686 [0x29] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2687 [0x2a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
2688 [0x2b] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movntps, movntpd, movntss, movntsd */
2689 [0x2c] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
2690 [0x2d] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
2691 [0x2e] = { gen_helper_ucomiss, gen_helper_ucomisd },
2692 [0x2f] = { gen_helper_comiss, gen_helper_comisd },
2693 [0x50] = { SSE_SPECIAL, SSE_SPECIAL }, /* movmskps, movmskpd */
2694 [0x51] = SSE_FOP(sqrt),
2695 [0x52] = { gen_helper_rsqrtps, NULL, gen_helper_rsqrtss, NULL },
2696 [0x53] = { gen_helper_rcpps, NULL, gen_helper_rcpss, NULL },
2697 [0x54] = { gen_helper_pand_xmm, gen_helper_pand_xmm }, /* andps, andpd */
2698 [0x55] = { gen_helper_pandn_xmm, gen_helper_pandn_xmm }, /* andnps, andnpd */
2699 [0x56] = { gen_helper_por_xmm, gen_helper_por_xmm }, /* orps, orpd */
2700 [0x57] = { gen_helper_pxor_xmm, gen_helper_pxor_xmm }, /* xorps, xorpd */
2701 [0x58] = SSE_FOP(add),
2702 [0x59] = SSE_FOP(mul),
2703 [0x5a] = { gen_helper_cvtps2pd, gen_helper_cvtpd2ps,
2704 gen_helper_cvtss2sd, gen_helper_cvtsd2ss },
2705 [0x5b] = { gen_helper_cvtdq2ps, gen_helper_cvtps2dq, gen_helper_cvttps2dq },
2706 [0x5c] = SSE_FOP(sub),
2707 [0x5d] = SSE_FOP(min),
2708 [0x5e] = SSE_FOP(div),
2709 [0x5f] = SSE_FOP(max),
2711 [0xc2] = SSE_FOP(cmpeq),
2712 [0xc6] = { (SSEFunc_0_epp)gen_helper_shufps,
2713 (SSEFunc_0_epp)gen_helper_shufpd }, /* XXX: casts */
2715 /* SSSE3, SSE4, MOVBE, CRC32, BMI1, BMI2, ADX. */
2716 [0x38] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2717 [0x3a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2719 /* MMX ops and their SSE extensions */
2720 [0x60] = MMX_OP2(punpcklbw),
2721 [0x61] = MMX_OP2(punpcklwd),
2722 [0x62] = MMX_OP2(punpckldq),
2723 [0x63] = MMX_OP2(packsswb),
2724 [0x64] = MMX_OP2(pcmpgtb),
2725 [0x65] = MMX_OP2(pcmpgtw),
2726 [0x66] = MMX_OP2(pcmpgtl),
2727 [0x67] = MMX_OP2(packuswb),
2728 [0x68] = MMX_OP2(punpckhbw),
2729 [0x69] = MMX_OP2(punpckhwd),
2730 [0x6a] = MMX_OP2(punpckhdq),
2731 [0x6b] = MMX_OP2(packssdw),
2732 [0x6c] = { NULL, gen_helper_punpcklqdq_xmm },
2733 [0x6d] = { NULL, gen_helper_punpckhqdq_xmm },
2734 [0x6e] = { SSE_SPECIAL, SSE_SPECIAL }, /* movd mm, ea */
2735 [0x6f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, , movqdu */
2736 [0x70] = { (SSEFunc_0_epp)gen_helper_pshufw_mmx,
2737 (SSEFunc_0_epp)gen_helper_pshufd_xmm,
2738 (SSEFunc_0_epp)gen_helper_pshufhw_xmm,
2739 (SSEFunc_0_epp)gen_helper_pshuflw_xmm }, /* XXX: casts */
2740 [0x71] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftw */
2741 [0x72] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftd */
2742 [0x73] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftq */
2743 [0x74] = MMX_OP2(pcmpeqb),
2744 [0x75] = MMX_OP2(pcmpeqw),
2745 [0x76] = MMX_OP2(pcmpeql),
2746 [0x77] = { SSE_DUMMY }, /* emms */
2747 [0x78] = { NULL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* extrq_i, insertq_i */
2748 [0x79] = { NULL, gen_helper_extrq_r, NULL, gen_helper_insertq_r },
2749 [0x7c] = { NULL, gen_helper_haddpd, NULL, gen_helper_haddps },
2750 [0x7d] = { NULL, gen_helper_hsubpd, NULL, gen_helper_hsubps },
2751 [0x7e] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movd, movd, , movq */
2752 [0x7f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, movdqu */
2753 [0xc4] = { SSE_SPECIAL, SSE_SPECIAL }, /* pinsrw */
2754 [0xc5] = { SSE_SPECIAL, SSE_SPECIAL }, /* pextrw */
2755 [0xd0] = { NULL, gen_helper_addsubpd, NULL, gen_helper_addsubps },
2756 [0xd1] = MMX_OP2(psrlw),
2757 [0xd2] = MMX_OP2(psrld),
2758 [0xd3] = MMX_OP2(psrlq),
2759 [0xd4] = MMX_OP2(paddq),
2760 [0xd5] = MMX_OP2(pmullw),
2761 [0xd6] = { NULL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2762 [0xd7] = { SSE_SPECIAL, SSE_SPECIAL }, /* pmovmskb */
2763 [0xd8] = MMX_OP2(psubusb),
2764 [0xd9] = MMX_OP2(psubusw),
2765 [0xda] = MMX_OP2(pminub),
2766 [0xdb] = MMX_OP2(pand),
2767 [0xdc] = MMX_OP2(paddusb),
2768 [0xdd] = MMX_OP2(paddusw),
2769 [0xde] = MMX_OP2(pmaxub),
2770 [0xdf] = MMX_OP2(pandn),
2771 [0xe0] = MMX_OP2(pavgb),
2772 [0xe1] = MMX_OP2(psraw),
2773 [0xe2] = MMX_OP2(psrad),
2774 [0xe3] = MMX_OP2(pavgw),
2775 [0xe4] = MMX_OP2(pmulhuw),
2776 [0xe5] = MMX_OP2(pmulhw),
2777 [0xe6] = { NULL, gen_helper_cvttpd2dq, gen_helper_cvtdq2pd, gen_helper_cvtpd2dq },
2778 [0xe7] = { SSE_SPECIAL , SSE_SPECIAL }, /* movntq, movntq */
2779 [0xe8] = MMX_OP2(psubsb),
2780 [0xe9] = MMX_OP2(psubsw),
2781 [0xea] = MMX_OP2(pminsw),
2782 [0xeb] = MMX_OP2(por),
2783 [0xec] = MMX_OP2(paddsb),
2784 [0xed] = MMX_OP2(paddsw),
2785 [0xee] = MMX_OP2(pmaxsw),
2786 [0xef] = MMX_OP2(pxor),
2787 [0xf0] = { NULL, NULL, NULL, SSE_SPECIAL }, /* lddqu */
2788 [0xf1] = MMX_OP2(psllw),
2789 [0xf2] = MMX_OP2(pslld),
2790 [0xf3] = MMX_OP2(psllq),
2791 [0xf4] = MMX_OP2(pmuludq),
2792 [0xf5] = MMX_OP2(pmaddwd),
2793 [0xf6] = MMX_OP2(psadbw),
2794 [0xf7] = { (SSEFunc_0_epp)gen_helper_maskmov_mmx,
2795 (SSEFunc_0_epp)gen_helper_maskmov_xmm }, /* XXX: casts */
2796 [0xf8] = MMX_OP2(psubb),
2797 [0xf9] = MMX_OP2(psubw),
2798 [0xfa] = MMX_OP2(psubl),
2799 [0xfb] = MMX_OP2(psubq),
2800 [0xfc] = MMX_OP2(paddb),
2801 [0xfd] = MMX_OP2(paddw),
2802 [0xfe] = MMX_OP2(paddl),
2805 static const SSEFunc_0_epp sse_op_table2[3 * 8][2] = {
2806 [0 + 2] = MMX_OP2(psrlw),
2807 [0 + 4] = MMX_OP2(psraw),
2808 [0 + 6] = MMX_OP2(psllw),
2809 [8 + 2] = MMX_OP2(psrld),
2810 [8 + 4] = MMX_OP2(psrad),
2811 [8 + 6] = MMX_OP2(pslld),
2812 [16 + 2] = MMX_OP2(psrlq),
2813 [16 + 3] = { NULL, gen_helper_psrldq_xmm },
2814 [16 + 6] = MMX_OP2(psllq),
2815 [16 + 7] = { NULL, gen_helper_pslldq_xmm },
2818 static const SSEFunc_0_epi sse_op_table3ai[] = {
2819 gen_helper_cvtsi2ss,
2820 gen_helper_cvtsi2sd
2823 #ifdef TARGET_X86_64
2824 static const SSEFunc_0_epl sse_op_table3aq[] = {
2825 gen_helper_cvtsq2ss,
2826 gen_helper_cvtsq2sd
2828 #endif
2830 static const SSEFunc_i_ep sse_op_table3bi[] = {
2831 gen_helper_cvttss2si,
2832 gen_helper_cvtss2si,
2833 gen_helper_cvttsd2si,
2834 gen_helper_cvtsd2si
2837 #ifdef TARGET_X86_64
2838 static const SSEFunc_l_ep sse_op_table3bq[] = {
2839 gen_helper_cvttss2sq,
2840 gen_helper_cvtss2sq,
2841 gen_helper_cvttsd2sq,
2842 gen_helper_cvtsd2sq
2844 #endif
2846 static const SSEFunc_0_epp sse_op_table4[8][4] = {
2847 SSE_FOP(cmpeq),
2848 SSE_FOP(cmplt),
2849 SSE_FOP(cmple),
2850 SSE_FOP(cmpunord),
2851 SSE_FOP(cmpneq),
2852 SSE_FOP(cmpnlt),
2853 SSE_FOP(cmpnle),
2854 SSE_FOP(cmpord),
2857 static const SSEFunc_0_epp sse_op_table5[256] = {
2858 [0x0c] = gen_helper_pi2fw,
2859 [0x0d] = gen_helper_pi2fd,
2860 [0x1c] = gen_helper_pf2iw,
2861 [0x1d] = gen_helper_pf2id,
2862 [0x8a] = gen_helper_pfnacc,
2863 [0x8e] = gen_helper_pfpnacc,
2864 [0x90] = gen_helper_pfcmpge,
2865 [0x94] = gen_helper_pfmin,
2866 [0x96] = gen_helper_pfrcp,
2867 [0x97] = gen_helper_pfrsqrt,
2868 [0x9a] = gen_helper_pfsub,
2869 [0x9e] = gen_helper_pfadd,
2870 [0xa0] = gen_helper_pfcmpgt,
2871 [0xa4] = gen_helper_pfmax,
2872 [0xa6] = gen_helper_movq, /* pfrcpit1; no need to actually increase precision */
2873 [0xa7] = gen_helper_movq, /* pfrsqit1 */
2874 [0xaa] = gen_helper_pfsubr,
2875 [0xae] = gen_helper_pfacc,
2876 [0xb0] = gen_helper_pfcmpeq,
2877 [0xb4] = gen_helper_pfmul,
2878 [0xb6] = gen_helper_movq, /* pfrcpit2 */
2879 [0xb7] = gen_helper_pmulhrw_mmx,
2880 [0xbb] = gen_helper_pswapd,
2881 [0xbf] = gen_helper_pavgb_mmx /* pavgusb */
2884 struct SSEOpHelper_epp {
2885 SSEFunc_0_epp op[2];
2886 uint32_t ext_mask;
2889 struct SSEOpHelper_eppi {
2890 SSEFunc_0_eppi op[2];
2891 uint32_t ext_mask;
2894 #define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
2895 #define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
2896 #define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
2897 #define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
2898 #define PCLMULQDQ_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, \
2899 CPUID_EXT_PCLMULQDQ }
2900 #define AESNI_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_AES }
2902 static const struct SSEOpHelper_epp sse_op_table6[256] = {
2903 [0x00] = SSSE3_OP(pshufb),
2904 [0x01] = SSSE3_OP(phaddw),
2905 [0x02] = SSSE3_OP(phaddd),
2906 [0x03] = SSSE3_OP(phaddsw),
2907 [0x04] = SSSE3_OP(pmaddubsw),
2908 [0x05] = SSSE3_OP(phsubw),
2909 [0x06] = SSSE3_OP(phsubd),
2910 [0x07] = SSSE3_OP(phsubsw),
2911 [0x08] = SSSE3_OP(psignb),
2912 [0x09] = SSSE3_OP(psignw),
2913 [0x0a] = SSSE3_OP(psignd),
2914 [0x0b] = SSSE3_OP(pmulhrsw),
2915 [0x10] = SSE41_OP(pblendvb),
2916 [0x14] = SSE41_OP(blendvps),
2917 [0x15] = SSE41_OP(blendvpd),
2918 [0x17] = SSE41_OP(ptest),
2919 [0x1c] = SSSE3_OP(pabsb),
2920 [0x1d] = SSSE3_OP(pabsw),
2921 [0x1e] = SSSE3_OP(pabsd),
2922 [0x20] = SSE41_OP(pmovsxbw),
2923 [0x21] = SSE41_OP(pmovsxbd),
2924 [0x22] = SSE41_OP(pmovsxbq),
2925 [0x23] = SSE41_OP(pmovsxwd),
2926 [0x24] = SSE41_OP(pmovsxwq),
2927 [0x25] = SSE41_OP(pmovsxdq),
2928 [0x28] = SSE41_OP(pmuldq),
2929 [0x29] = SSE41_OP(pcmpeqq),
2930 [0x2a] = SSE41_SPECIAL, /* movntqda */
2931 [0x2b] = SSE41_OP(packusdw),
2932 [0x30] = SSE41_OP(pmovzxbw),
2933 [0x31] = SSE41_OP(pmovzxbd),
2934 [0x32] = SSE41_OP(pmovzxbq),
2935 [0x33] = SSE41_OP(pmovzxwd),
2936 [0x34] = SSE41_OP(pmovzxwq),
2937 [0x35] = SSE41_OP(pmovzxdq),
2938 [0x37] = SSE42_OP(pcmpgtq),
2939 [0x38] = SSE41_OP(pminsb),
2940 [0x39] = SSE41_OP(pminsd),
2941 [0x3a] = SSE41_OP(pminuw),
2942 [0x3b] = SSE41_OP(pminud),
2943 [0x3c] = SSE41_OP(pmaxsb),
2944 [0x3d] = SSE41_OP(pmaxsd),
2945 [0x3e] = SSE41_OP(pmaxuw),
2946 [0x3f] = SSE41_OP(pmaxud),
2947 [0x40] = SSE41_OP(pmulld),
2948 [0x41] = SSE41_OP(phminposuw),
2949 [0xdb] = AESNI_OP(aesimc),
2950 [0xdc] = AESNI_OP(aesenc),
2951 [0xdd] = AESNI_OP(aesenclast),
2952 [0xde] = AESNI_OP(aesdec),
2953 [0xdf] = AESNI_OP(aesdeclast),
2956 static const struct SSEOpHelper_eppi sse_op_table7[256] = {
2957 [0x08] = SSE41_OP(roundps),
2958 [0x09] = SSE41_OP(roundpd),
2959 [0x0a] = SSE41_OP(roundss),
2960 [0x0b] = SSE41_OP(roundsd),
2961 [0x0c] = SSE41_OP(blendps),
2962 [0x0d] = SSE41_OP(blendpd),
2963 [0x0e] = SSE41_OP(pblendw),
2964 [0x0f] = SSSE3_OP(palignr),
2965 [0x14] = SSE41_SPECIAL, /* pextrb */
2966 [0x15] = SSE41_SPECIAL, /* pextrw */
2967 [0x16] = SSE41_SPECIAL, /* pextrd/pextrq */
2968 [0x17] = SSE41_SPECIAL, /* extractps */
2969 [0x20] = SSE41_SPECIAL, /* pinsrb */
2970 [0x21] = SSE41_SPECIAL, /* insertps */
2971 [0x22] = SSE41_SPECIAL, /* pinsrd/pinsrq */
2972 [0x40] = SSE41_OP(dpps),
2973 [0x41] = SSE41_OP(dppd),
2974 [0x42] = SSE41_OP(mpsadbw),
2975 [0x44] = PCLMULQDQ_OP(pclmulqdq),
2976 [0x60] = SSE42_OP(pcmpestrm),
2977 [0x61] = SSE42_OP(pcmpestri),
2978 [0x62] = SSE42_OP(pcmpistrm),
2979 [0x63] = SSE42_OP(pcmpistri),
2980 [0xdf] = AESNI_OP(aeskeygenassist),
2983 static void gen_sse(CPUX86State *env, DisasContext *s, int b,
2984 target_ulong pc_start, int rex_r)
2986 int b1, op1_offset, op2_offset, is_xmm, val;
2987 int modrm, mod, rm, reg;
2988 SSEFunc_0_epp sse_fn_epp;
2989 SSEFunc_0_eppi sse_fn_eppi;
2990 SSEFunc_0_ppi sse_fn_ppi;
2991 SSEFunc_0_eppt sse_fn_eppt;
2992 TCGMemOp ot;
2994 b &= 0xff;
2995 if (s->prefix & PREFIX_DATA)
2996 b1 = 1;
2997 else if (s->prefix & PREFIX_REPZ)
2998 b1 = 2;
2999 else if (s->prefix & PREFIX_REPNZ)
3000 b1 = 3;
3001 else
3002 b1 = 0;
3003 sse_fn_epp = sse_op_table1[b][b1];
3004 if (!sse_fn_epp) {
3005 goto unknown_op;
3007 if ((b <= 0x5f && b >= 0x10) || b == 0xc6 || b == 0xc2) {
3008 is_xmm = 1;
3009 } else {
3010 if (b1 == 0) {
3011 /* MMX case */
3012 is_xmm = 0;
3013 } else {
3014 is_xmm = 1;
3017 /* simple MMX/SSE operation */
3018 if (s->flags & HF_TS_MASK) {
3019 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
3020 return;
3022 if (s->flags & HF_EM_MASK) {
3023 illegal_op:
3024 gen_illegal_opcode(s);
3025 return;
3027 if (is_xmm
3028 && !(s->flags & HF_OSFXSR_MASK)
3029 && ((b != 0x38 && b != 0x3a) || (s->prefix & PREFIX_DATA))) {
3030 goto unknown_op;
3032 if (b == 0x0e) {
3033 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW)) {
3034 /* If we were fully decoding this we might use illegal_op. */
3035 goto unknown_op;
3037 /* femms */
3038 gen_helper_emms(cpu_env);
3039 return;
3041 if (b == 0x77) {
3042 /* emms */
3043 gen_helper_emms(cpu_env);
3044 return;
3046 /* prepare MMX state (XXX: optimize by storing fptt and fptags in
3047 the static cpu state) */
3048 if (!is_xmm) {
3049 gen_helper_enter_mmx(cpu_env);
3052 modrm = cpu_ldub_code(env, s->pc++);
3053 reg = ((modrm >> 3) & 7);
3054 if (is_xmm)
3055 reg |= rex_r;
3056 mod = (modrm >> 6) & 3;
3057 if (sse_fn_epp == SSE_SPECIAL) {
3058 b |= (b1 << 8);
3059 switch(b) {
3060 case 0x0e7: /* movntq */
3061 if (mod == 3) {
3062 goto illegal_op;
3064 gen_lea_modrm(env, s, modrm);
3065 gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
3066 break;
3067 case 0x1e7: /* movntdq */
3068 case 0x02b: /* movntps */
3069 case 0x12b: /* movntps */
3070 if (mod == 3)
3071 goto illegal_op;
3072 gen_lea_modrm(env, s, modrm);
3073 gen_sto_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3074 break;
3075 case 0x3f0: /* lddqu */
3076 if (mod == 3)
3077 goto illegal_op;
3078 gen_lea_modrm(env, s, modrm);
3079 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3080 break;
3081 case 0x22b: /* movntss */
3082 case 0x32b: /* movntsd */
3083 if (mod == 3)
3084 goto illegal_op;
3085 gen_lea_modrm(env, s, modrm);
3086 if (b1 & 1) {
3087 gen_stq_env_A0(s, offsetof(CPUX86State,
3088 xmm_regs[reg].ZMM_Q(0)));
3089 } else {
3090 tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
3091 xmm_regs[reg].ZMM_L(0)));
3092 gen_op_st_v(s, MO_32, cpu_T0, cpu_A0);
3094 break;
3095 case 0x6e: /* movd mm, ea */
3096 #ifdef TARGET_X86_64
3097 if (s->dflag == MO_64) {
3098 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0);
3099 tcg_gen_st_tl(cpu_T0, cpu_env, offsetof(CPUX86State,fpregs[reg].mmx));
3100 } else
3101 #endif
3103 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0);
3104 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3105 offsetof(CPUX86State,fpregs[reg].mmx));
3106 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
3107 gen_helper_movl_mm_T0_mmx(cpu_ptr0, cpu_tmp2_i32);
3109 break;
3110 case 0x16e: /* movd xmm, ea */
3111 #ifdef TARGET_X86_64
3112 if (s->dflag == MO_64) {
3113 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0);
3114 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3115 offsetof(CPUX86State,xmm_regs[reg]));
3116 gen_helper_movq_mm_T0_xmm(cpu_ptr0, cpu_T0);
3117 } else
3118 #endif
3120 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0);
3121 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3122 offsetof(CPUX86State,xmm_regs[reg]));
3123 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
3124 gen_helper_movl_mm_T0_xmm(cpu_ptr0, cpu_tmp2_i32);
3126 break;
3127 case 0x6f: /* movq mm, ea */
3128 if (mod != 3) {
3129 gen_lea_modrm(env, s, modrm);
3130 gen_ldq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
3131 } else {
3132 rm = (modrm & 7);
3133 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
3134 offsetof(CPUX86State,fpregs[rm].mmx));
3135 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
3136 offsetof(CPUX86State,fpregs[reg].mmx));
3138 break;
3139 case 0x010: /* movups */
3140 case 0x110: /* movupd */
3141 case 0x028: /* movaps */
3142 case 0x128: /* movapd */
3143 case 0x16f: /* movdqa xmm, ea */
3144 case 0x26f: /* movdqu xmm, ea */
3145 if (mod != 3) {
3146 gen_lea_modrm(env, s, modrm);
3147 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3148 } else {
3149 rm = (modrm & 7) | REX_B(s);
3150 gen_op_movo(offsetof(CPUX86State,xmm_regs[reg]),
3151 offsetof(CPUX86State,xmm_regs[rm]));
3153 break;
3154 case 0x210: /* movss xmm, ea */
3155 if (mod != 3) {
3156 gen_lea_modrm(env, s, modrm);
3157 gen_op_ld_v(s, MO_32, cpu_T0, cpu_A0);
3158 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
3159 tcg_gen_movi_tl(cpu_T0, 0);
3160 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1)));
3161 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)));
3162 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)));
3163 } else {
3164 rm = (modrm & 7) | REX_B(s);
3165 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)),
3166 offsetof(CPUX86State,xmm_regs[rm].ZMM_L(0)));
3168 break;
3169 case 0x310: /* movsd xmm, ea */
3170 if (mod != 3) {
3171 gen_lea_modrm(env, s, modrm);
3172 gen_ldq_env_A0(s, offsetof(CPUX86State,
3173 xmm_regs[reg].ZMM_Q(0)));
3174 tcg_gen_movi_tl(cpu_T0, 0);
3175 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)));
3176 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)));
3177 } else {
3178 rm = (modrm & 7) | REX_B(s);
3179 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
3180 offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
3182 break;
3183 case 0x012: /* movlps */
3184 case 0x112: /* movlpd */
3185 if (mod != 3) {
3186 gen_lea_modrm(env, s, modrm);
3187 gen_ldq_env_A0(s, offsetof(CPUX86State,
3188 xmm_regs[reg].ZMM_Q(0)));
3189 } else {
3190 /* movhlps */
3191 rm = (modrm & 7) | REX_B(s);
3192 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
3193 offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(1)));
3195 break;
3196 case 0x212: /* movsldup */
3197 if (mod != 3) {
3198 gen_lea_modrm(env, s, modrm);
3199 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3200 } else {
3201 rm = (modrm & 7) | REX_B(s);
3202 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)),
3203 offsetof(CPUX86State,xmm_regs[rm].ZMM_L(0)));
3204 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)),
3205 offsetof(CPUX86State,xmm_regs[rm].ZMM_L(2)));
3207 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1)),
3208 offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
3209 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)),
3210 offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)));
3211 break;
3212 case 0x312: /* movddup */
3213 if (mod != 3) {
3214 gen_lea_modrm(env, s, modrm);
3215 gen_ldq_env_A0(s, offsetof(CPUX86State,
3216 xmm_regs[reg].ZMM_Q(0)));
3217 } else {
3218 rm = (modrm & 7) | REX_B(s);
3219 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
3220 offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
3222 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(1)),
3223 offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)));
3224 break;
3225 case 0x016: /* movhps */
3226 case 0x116: /* movhpd */
3227 if (mod != 3) {
3228 gen_lea_modrm(env, s, modrm);
3229 gen_ldq_env_A0(s, offsetof(CPUX86State,
3230 xmm_regs[reg].ZMM_Q(1)));
3231 } else {
3232 /* movlhps */
3233 rm = (modrm & 7) | REX_B(s);
3234 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(1)),
3235 offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
3237 break;
3238 case 0x216: /* movshdup */
3239 if (mod != 3) {
3240 gen_lea_modrm(env, s, modrm);
3241 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3242 } else {
3243 rm = (modrm & 7) | REX_B(s);
3244 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1)),
3245 offsetof(CPUX86State,xmm_regs[rm].ZMM_L(1)));
3246 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)),
3247 offsetof(CPUX86State,xmm_regs[rm].ZMM_L(3)));
3249 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)),
3250 offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1)));
3251 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)),
3252 offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)));
3253 break;
3254 case 0x178:
3255 case 0x378:
3257 int bit_index, field_length;
3259 if (b1 == 1 && reg != 0)
3260 goto illegal_op;
3261 field_length = cpu_ldub_code(env, s->pc++) & 0x3F;
3262 bit_index = cpu_ldub_code(env, s->pc++) & 0x3F;
3263 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3264 offsetof(CPUX86State,xmm_regs[reg]));
3265 if (b1 == 1)
3266 gen_helper_extrq_i(cpu_env, cpu_ptr0,
3267 tcg_const_i32(bit_index),
3268 tcg_const_i32(field_length));
3269 else
3270 gen_helper_insertq_i(cpu_env, cpu_ptr0,
3271 tcg_const_i32(bit_index),
3272 tcg_const_i32(field_length));
3274 break;
3275 case 0x7e: /* movd ea, mm */
3276 #ifdef TARGET_X86_64
3277 if (s->dflag == MO_64) {
3278 tcg_gen_ld_i64(cpu_T0, cpu_env,
3279 offsetof(CPUX86State,fpregs[reg].mmx));
3280 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1);
3281 } else
3282 #endif
3284 tcg_gen_ld32u_tl(cpu_T0, cpu_env,
3285 offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0)));
3286 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1);
3288 break;
3289 case 0x17e: /* movd ea, xmm */
3290 #ifdef TARGET_X86_64
3291 if (s->dflag == MO_64) {
3292 tcg_gen_ld_i64(cpu_T0, cpu_env,
3293 offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)));
3294 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1);
3295 } else
3296 #endif
3298 tcg_gen_ld32u_tl(cpu_T0, cpu_env,
3299 offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
3300 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1);
3302 break;
3303 case 0x27e: /* movq xmm, ea */
3304 if (mod != 3) {
3305 gen_lea_modrm(env, s, modrm);
3306 gen_ldq_env_A0(s, offsetof(CPUX86State,
3307 xmm_regs[reg].ZMM_Q(0)));
3308 } else {
3309 rm = (modrm & 7) | REX_B(s);
3310 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
3311 offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
3313 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(1)));
3314 break;
3315 case 0x7f: /* movq ea, mm */
3316 if (mod != 3) {
3317 gen_lea_modrm(env, s, modrm);
3318 gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
3319 } else {
3320 rm = (modrm & 7);
3321 gen_op_movq(offsetof(CPUX86State,fpregs[rm].mmx),
3322 offsetof(CPUX86State,fpregs[reg].mmx));
3324 break;
3325 case 0x011: /* movups */
3326 case 0x111: /* movupd */
3327 case 0x029: /* movaps */
3328 case 0x129: /* movapd */
3329 case 0x17f: /* movdqa ea, xmm */
3330 case 0x27f: /* movdqu ea, xmm */
3331 if (mod != 3) {
3332 gen_lea_modrm(env, s, modrm);
3333 gen_sto_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3334 } else {
3335 rm = (modrm & 7) | REX_B(s);
3336 gen_op_movo(offsetof(CPUX86State,xmm_regs[rm]),
3337 offsetof(CPUX86State,xmm_regs[reg]));
3339 break;
3340 case 0x211: /* movss ea, xmm */
3341 if (mod != 3) {
3342 gen_lea_modrm(env, s, modrm);
3343 tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
3344 gen_op_st_v(s, MO_32, cpu_T0, cpu_A0);
3345 } else {
3346 rm = (modrm & 7) | REX_B(s);
3347 gen_op_movl(offsetof(CPUX86State,xmm_regs[rm].ZMM_L(0)),
3348 offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
3350 break;
3351 case 0x311: /* movsd ea, xmm */
3352 if (mod != 3) {
3353 gen_lea_modrm(env, s, modrm);
3354 gen_stq_env_A0(s, offsetof(CPUX86State,
3355 xmm_regs[reg].ZMM_Q(0)));
3356 } else {
3357 rm = (modrm & 7) | REX_B(s);
3358 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)),
3359 offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)));
3361 break;
3362 case 0x013: /* movlps */
3363 case 0x113: /* movlpd */
3364 if (mod != 3) {
3365 gen_lea_modrm(env, s, modrm);
3366 gen_stq_env_A0(s, offsetof(CPUX86State,
3367 xmm_regs[reg].ZMM_Q(0)));
3368 } else {
3369 goto illegal_op;
3371 break;
3372 case 0x017: /* movhps */
3373 case 0x117: /* movhpd */
3374 if (mod != 3) {
3375 gen_lea_modrm(env, s, modrm);
3376 gen_stq_env_A0(s, offsetof(CPUX86State,
3377 xmm_regs[reg].ZMM_Q(1)));
3378 } else {
3379 goto illegal_op;
3381 break;
3382 case 0x71: /* shift mm, im */
3383 case 0x72:
3384 case 0x73:
3385 case 0x171: /* shift xmm, im */
3386 case 0x172:
3387 case 0x173:
3388 if (b1 >= 2) {
3389 goto unknown_op;
3391 val = cpu_ldub_code(env, s->pc++);
3392 if (is_xmm) {
3393 tcg_gen_movi_tl(cpu_T0, val);
3394 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(0)));
3395 tcg_gen_movi_tl(cpu_T0, 0);
3396 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(1)));
3397 op1_offset = offsetof(CPUX86State,xmm_t0);
3398 } else {
3399 tcg_gen_movi_tl(cpu_T0, val);
3400 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(0)));
3401 tcg_gen_movi_tl(cpu_T0, 0);
3402 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(1)));
3403 op1_offset = offsetof(CPUX86State,mmx_t0);
3405 sse_fn_epp = sse_op_table2[((b - 1) & 3) * 8 +
3406 (((modrm >> 3)) & 7)][b1];
3407 if (!sse_fn_epp) {
3408 goto unknown_op;
3410 if (is_xmm) {
3411 rm = (modrm & 7) | REX_B(s);
3412 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3413 } else {
3414 rm = (modrm & 7);
3415 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3417 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3418 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op1_offset);
3419 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
3420 break;
3421 case 0x050: /* movmskps */
3422 rm = (modrm & 7) | REX_B(s);
3423 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3424 offsetof(CPUX86State,xmm_regs[rm]));
3425 gen_helper_movmskps(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3426 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
3427 break;
3428 case 0x150: /* movmskpd */
3429 rm = (modrm & 7) | REX_B(s);
3430 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3431 offsetof(CPUX86State,xmm_regs[rm]));
3432 gen_helper_movmskpd(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3433 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
3434 break;
3435 case 0x02a: /* cvtpi2ps */
3436 case 0x12a: /* cvtpi2pd */
3437 gen_helper_enter_mmx(cpu_env);
3438 if (mod != 3) {
3439 gen_lea_modrm(env, s, modrm);
3440 op2_offset = offsetof(CPUX86State,mmx_t0);
3441 gen_ldq_env_A0(s, op2_offset);
3442 } else {
3443 rm = (modrm & 7);
3444 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3446 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3447 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3448 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3449 switch(b >> 8) {
3450 case 0x0:
3451 gen_helper_cvtpi2ps(cpu_env, cpu_ptr0, cpu_ptr1);
3452 break;
3453 default:
3454 case 0x1:
3455 gen_helper_cvtpi2pd(cpu_env, cpu_ptr0, cpu_ptr1);
3456 break;
3458 break;
3459 case 0x22a: /* cvtsi2ss */
3460 case 0x32a: /* cvtsi2sd */
3461 ot = mo_64_32(s->dflag);
3462 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3463 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3464 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3465 if (ot == MO_32) {
3466 SSEFunc_0_epi sse_fn_epi = sse_op_table3ai[(b >> 8) & 1];
3467 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
3468 sse_fn_epi(cpu_env, cpu_ptr0, cpu_tmp2_i32);
3469 } else {
3470 #ifdef TARGET_X86_64
3471 SSEFunc_0_epl sse_fn_epl = sse_op_table3aq[(b >> 8) & 1];
3472 sse_fn_epl(cpu_env, cpu_ptr0, cpu_T0);
3473 #else
3474 goto illegal_op;
3475 #endif
3477 break;
3478 case 0x02c: /* cvttps2pi */
3479 case 0x12c: /* cvttpd2pi */
3480 case 0x02d: /* cvtps2pi */
3481 case 0x12d: /* cvtpd2pi */
3482 gen_helper_enter_mmx(cpu_env);
3483 if (mod != 3) {
3484 gen_lea_modrm(env, s, modrm);
3485 op2_offset = offsetof(CPUX86State,xmm_t0);
3486 gen_ldo_env_A0(s, op2_offset);
3487 } else {
3488 rm = (modrm & 7) | REX_B(s);
3489 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3491 op1_offset = offsetof(CPUX86State,fpregs[reg & 7].mmx);
3492 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3493 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3494 switch(b) {
3495 case 0x02c:
3496 gen_helper_cvttps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3497 break;
3498 case 0x12c:
3499 gen_helper_cvttpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3500 break;
3501 case 0x02d:
3502 gen_helper_cvtps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3503 break;
3504 case 0x12d:
3505 gen_helper_cvtpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3506 break;
3508 break;
3509 case 0x22c: /* cvttss2si */
3510 case 0x32c: /* cvttsd2si */
3511 case 0x22d: /* cvtss2si */
3512 case 0x32d: /* cvtsd2si */
3513 ot = mo_64_32(s->dflag);
3514 if (mod != 3) {
3515 gen_lea_modrm(env, s, modrm);
3516 if ((b >> 8) & 1) {
3517 gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.ZMM_Q(0)));
3518 } else {
3519 gen_op_ld_v(s, MO_32, cpu_T0, cpu_A0);
3520 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(0)));
3522 op2_offset = offsetof(CPUX86State,xmm_t0);
3523 } else {
3524 rm = (modrm & 7) | REX_B(s);
3525 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3527 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3528 if (ot == MO_32) {
3529 SSEFunc_i_ep sse_fn_i_ep =
3530 sse_op_table3bi[((b >> 7) & 2) | (b & 1)];
3531 sse_fn_i_ep(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3532 tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);
3533 } else {
3534 #ifdef TARGET_X86_64
3535 SSEFunc_l_ep sse_fn_l_ep =
3536 sse_op_table3bq[((b >> 7) & 2) | (b & 1)];
3537 sse_fn_l_ep(cpu_T0, cpu_env, cpu_ptr0);
3538 #else
3539 goto illegal_op;
3540 #endif
3542 gen_op_mov_reg_v(ot, reg, cpu_T0);
3543 break;
3544 case 0xc4: /* pinsrw */
3545 case 0x1c4:
3546 s->rip_offset = 1;
3547 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
3548 val = cpu_ldub_code(env, s->pc++);
3549 if (b1) {
3550 val &= 7;
3551 tcg_gen_st16_tl(cpu_T0, cpu_env,
3552 offsetof(CPUX86State,xmm_regs[reg].ZMM_W(val)));
3553 } else {
3554 val &= 3;
3555 tcg_gen_st16_tl(cpu_T0, cpu_env,
3556 offsetof(CPUX86State,fpregs[reg].mmx.MMX_W(val)));
3558 break;
3559 case 0xc5: /* pextrw */
3560 case 0x1c5:
3561 if (mod != 3)
3562 goto illegal_op;
3563 ot = mo_64_32(s->dflag);
3564 val = cpu_ldub_code(env, s->pc++);
3565 if (b1) {
3566 val &= 7;
3567 rm = (modrm & 7) | REX_B(s);
3568 tcg_gen_ld16u_tl(cpu_T0, cpu_env,
3569 offsetof(CPUX86State,xmm_regs[rm].ZMM_W(val)));
3570 } else {
3571 val &= 3;
3572 rm = (modrm & 7);
3573 tcg_gen_ld16u_tl(cpu_T0, cpu_env,
3574 offsetof(CPUX86State,fpregs[rm].mmx.MMX_W(val)));
3576 reg = ((modrm >> 3) & 7) | rex_r;
3577 gen_op_mov_reg_v(ot, reg, cpu_T0);
3578 break;
3579 case 0x1d6: /* movq ea, xmm */
3580 if (mod != 3) {
3581 gen_lea_modrm(env, s, modrm);
3582 gen_stq_env_A0(s, offsetof(CPUX86State,
3583 xmm_regs[reg].ZMM_Q(0)));
3584 } else {
3585 rm = (modrm & 7) | REX_B(s);
3586 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)),
3587 offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)));
3588 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(1)));
3590 break;
3591 case 0x2d6: /* movq2dq */
3592 gen_helper_enter_mmx(cpu_env);
3593 rm = (modrm & 7);
3594 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
3595 offsetof(CPUX86State,fpregs[rm].mmx));
3596 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(1)));
3597 break;
3598 case 0x3d6: /* movdq2q */
3599 gen_helper_enter_mmx(cpu_env);
3600 rm = (modrm & 7) | REX_B(s);
3601 gen_op_movq(offsetof(CPUX86State,fpregs[reg & 7].mmx),
3602 offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
3603 break;
3604 case 0xd7: /* pmovmskb */
3605 case 0x1d7:
3606 if (mod != 3)
3607 goto illegal_op;
3608 if (b1) {
3609 rm = (modrm & 7) | REX_B(s);
3610 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[rm]));
3611 gen_helper_pmovmskb_xmm(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3612 } else {
3613 rm = (modrm & 7);
3614 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,fpregs[rm].mmx));
3615 gen_helper_pmovmskb_mmx(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3617 reg = ((modrm >> 3) & 7) | rex_r;
3618 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
3619 break;
3621 case 0x138:
3622 case 0x038:
3623 b = modrm;
3624 if ((b & 0xf0) == 0xf0) {
3625 goto do_0f_38_fx;
3627 modrm = cpu_ldub_code(env, s->pc++);
3628 rm = modrm & 7;
3629 reg = ((modrm >> 3) & 7) | rex_r;
3630 mod = (modrm >> 6) & 3;
3631 if (b1 >= 2) {
3632 goto unknown_op;
3635 sse_fn_epp = sse_op_table6[b].op[b1];
3636 if (!sse_fn_epp) {
3637 goto unknown_op;
3639 if (!(s->cpuid_ext_features & sse_op_table6[b].ext_mask))
3640 goto illegal_op;
3642 if (b1) {
3643 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3644 if (mod == 3) {
3645 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
3646 } else {
3647 op2_offset = offsetof(CPUX86State,xmm_t0);
3648 gen_lea_modrm(env, s, modrm);
3649 switch (b) {
3650 case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
3651 case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
3652 case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
3653 gen_ldq_env_A0(s, op2_offset +
3654 offsetof(ZMMReg, ZMM_Q(0)));
3655 break;
3656 case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
3657 case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
3658 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
3659 s->mem_index, MO_LEUL);
3660 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, op2_offset +
3661 offsetof(ZMMReg, ZMM_L(0)));
3662 break;
3663 case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
3664 tcg_gen_qemu_ld_tl(cpu_tmp0, cpu_A0,
3665 s->mem_index, MO_LEUW);
3666 tcg_gen_st16_tl(cpu_tmp0, cpu_env, op2_offset +
3667 offsetof(ZMMReg, ZMM_W(0)));
3668 break;
3669 case 0x2a: /* movntqda */
3670 gen_ldo_env_A0(s, op1_offset);
3671 return;
3672 default:
3673 gen_ldo_env_A0(s, op2_offset);
3676 } else {
3677 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
3678 if (mod == 3) {
3679 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3680 } else {
3681 op2_offset = offsetof(CPUX86State,mmx_t0);
3682 gen_lea_modrm(env, s, modrm);
3683 gen_ldq_env_A0(s, op2_offset);
3686 if (sse_fn_epp == SSE_SPECIAL) {
3687 goto unknown_op;
3690 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3691 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3692 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
3694 if (b == 0x17) {
3695 set_cc_op(s, CC_OP_EFLAGS);
3697 break;
3699 case 0x238:
3700 case 0x338:
3701 do_0f_38_fx:
3702 /* Various integer extensions at 0f 38 f[0-f]. */
3703 b = modrm | (b1 << 8);
3704 modrm = cpu_ldub_code(env, s->pc++);
3705 reg = ((modrm >> 3) & 7) | rex_r;
3707 switch (b) {
3708 case 0x3f0: /* crc32 Gd,Eb */
3709 case 0x3f1: /* crc32 Gd,Ey */
3710 do_crc32:
3711 if (!(s->cpuid_ext_features & CPUID_EXT_SSE42)) {
3712 goto illegal_op;
3714 if ((b & 0xff) == 0xf0) {
3715 ot = MO_8;
3716 } else if (s->dflag != MO_64) {
3717 ot = (s->prefix & PREFIX_DATA ? MO_16 : MO_32);
3718 } else {
3719 ot = MO_64;
3722 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[reg]);
3723 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3724 gen_helper_crc32(cpu_T0, cpu_tmp2_i32,
3725 cpu_T0, tcg_const_i32(8 << ot));
3727 ot = mo_64_32(s->dflag);
3728 gen_op_mov_reg_v(ot, reg, cpu_T0);
3729 break;
3731 case 0x1f0: /* crc32 or movbe */
3732 case 0x1f1:
3733 /* For these insns, the f3 prefix is supposed to have priority
3734 over the 66 prefix, but that's not what we implement above
3735 setting b1. */
3736 if (s->prefix & PREFIX_REPNZ) {
3737 goto do_crc32;
3739 /* FALLTHRU */
3740 case 0x0f0: /* movbe Gy,My */
3741 case 0x0f1: /* movbe My,Gy */
3742 if (!(s->cpuid_ext_features & CPUID_EXT_MOVBE)) {
3743 goto illegal_op;
3745 if (s->dflag != MO_64) {
3746 ot = (s->prefix & PREFIX_DATA ? MO_16 : MO_32);
3747 } else {
3748 ot = MO_64;
3751 gen_lea_modrm(env, s, modrm);
3752 if ((b & 1) == 0) {
3753 tcg_gen_qemu_ld_tl(cpu_T0, cpu_A0,
3754 s->mem_index, ot | MO_BE);
3755 gen_op_mov_reg_v(ot, reg, cpu_T0);
3756 } else {
3757 tcg_gen_qemu_st_tl(cpu_regs[reg], cpu_A0,
3758 s->mem_index, ot | MO_BE);
3760 break;
3762 case 0x0f2: /* andn Gy, By, Ey */
3763 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
3764 || !(s->prefix & PREFIX_VEX)
3765 || s->vex_l != 0) {
3766 goto illegal_op;
3768 ot = mo_64_32(s->dflag);
3769 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3770 tcg_gen_andc_tl(cpu_T0, cpu_regs[s->vex_v], cpu_T0);
3771 gen_op_mov_reg_v(ot, reg, cpu_T0);
3772 gen_op_update1_cc();
3773 set_cc_op(s, CC_OP_LOGICB + ot);
3774 break;
3776 case 0x0f7: /* bextr Gy, Ey, By */
3777 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
3778 || !(s->prefix & PREFIX_VEX)
3779 || s->vex_l != 0) {
3780 goto illegal_op;
3782 ot = mo_64_32(s->dflag);
3784 TCGv bound, zero;
3786 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3787 /* Extract START, and shift the operand.
3788 Shifts larger than operand size get zeros. */
3789 tcg_gen_ext8u_tl(cpu_A0, cpu_regs[s->vex_v]);
3790 tcg_gen_shr_tl(cpu_T0, cpu_T0, cpu_A0);
3792 bound = tcg_const_tl(ot == MO_64 ? 63 : 31);
3793 zero = tcg_const_tl(0);
3794 tcg_gen_movcond_tl(TCG_COND_LEU, cpu_T0, cpu_A0, bound,
3795 cpu_T0, zero);
3796 tcg_temp_free(zero);
3798 /* Extract the LEN into a mask. Lengths larger than
3799 operand size get all ones. */
3800 tcg_gen_extract_tl(cpu_A0, cpu_regs[s->vex_v], 8, 8);
3801 tcg_gen_movcond_tl(TCG_COND_LEU, cpu_A0, cpu_A0, bound,
3802 cpu_A0, bound);
3803 tcg_temp_free(bound);
3804 tcg_gen_movi_tl(cpu_T1, 1);
3805 tcg_gen_shl_tl(cpu_T1, cpu_T1, cpu_A0);
3806 tcg_gen_subi_tl(cpu_T1, cpu_T1, 1);
3807 tcg_gen_and_tl(cpu_T0, cpu_T0, cpu_T1);
3809 gen_op_mov_reg_v(ot, reg, cpu_T0);
3810 gen_op_update1_cc();
3811 set_cc_op(s, CC_OP_LOGICB + ot);
3813 break;
3815 case 0x0f5: /* bzhi Gy, Ey, By */
3816 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3817 || !(s->prefix & PREFIX_VEX)
3818 || s->vex_l != 0) {
3819 goto illegal_op;
3821 ot = mo_64_32(s->dflag);
3822 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3823 tcg_gen_ext8u_tl(cpu_T1, cpu_regs[s->vex_v]);
3825 TCGv bound = tcg_const_tl(ot == MO_64 ? 63 : 31);
3826 /* Note that since we're using BMILG (in order to get O
3827 cleared) we need to store the inverse into C. */
3828 tcg_gen_setcond_tl(TCG_COND_LT, cpu_cc_src,
3829 cpu_T1, bound);
3830 tcg_gen_movcond_tl(TCG_COND_GT, cpu_T1, cpu_T1,
3831 bound, bound, cpu_T1);
3832 tcg_temp_free(bound);
3834 tcg_gen_movi_tl(cpu_A0, -1);
3835 tcg_gen_shl_tl(cpu_A0, cpu_A0, cpu_T1);
3836 tcg_gen_andc_tl(cpu_T0, cpu_T0, cpu_A0);
3837 gen_op_mov_reg_v(ot, reg, cpu_T0);
3838 gen_op_update1_cc();
3839 set_cc_op(s, CC_OP_BMILGB + ot);
3840 break;
3842 case 0x3f6: /* mulx By, Gy, rdx, Ey */
3843 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3844 || !(s->prefix & PREFIX_VEX)
3845 || s->vex_l != 0) {
3846 goto illegal_op;
3848 ot = mo_64_32(s->dflag);
3849 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3850 switch (ot) {
3851 default:
3852 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
3853 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EDX]);
3854 tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
3855 cpu_tmp2_i32, cpu_tmp3_i32);
3856 tcg_gen_extu_i32_tl(cpu_regs[s->vex_v], cpu_tmp2_i32);
3857 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp3_i32);
3858 break;
3859 #ifdef TARGET_X86_64
3860 case MO_64:
3861 tcg_gen_mulu2_i64(cpu_T0, cpu_T1,
3862 cpu_T0, cpu_regs[R_EDX]);
3863 tcg_gen_mov_i64(cpu_regs[s->vex_v], cpu_T0);
3864 tcg_gen_mov_i64(cpu_regs[reg], cpu_T1);
3865 break;
3866 #endif
3868 break;
3870 case 0x3f5: /* pdep Gy, By, Ey */
3871 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3872 || !(s->prefix & PREFIX_VEX)
3873 || s->vex_l != 0) {
3874 goto illegal_op;
3876 ot = mo_64_32(s->dflag);
3877 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3878 /* Note that by zero-extending the mask operand, we
3879 automatically handle zero-extending the result. */
3880 if (ot == MO_64) {
3881 tcg_gen_mov_tl(cpu_T1, cpu_regs[s->vex_v]);
3882 } else {
3883 tcg_gen_ext32u_tl(cpu_T1, cpu_regs[s->vex_v]);
3885 gen_helper_pdep(cpu_regs[reg], cpu_T0, cpu_T1);
3886 break;
3888 case 0x2f5: /* pext Gy, By, Ey */
3889 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3890 || !(s->prefix & PREFIX_VEX)
3891 || s->vex_l != 0) {
3892 goto illegal_op;
3894 ot = mo_64_32(s->dflag);
3895 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3896 /* Note that by zero-extending the mask operand, we
3897 automatically handle zero-extending the result. */
3898 if (ot == MO_64) {
3899 tcg_gen_mov_tl(cpu_T1, cpu_regs[s->vex_v]);
3900 } else {
3901 tcg_gen_ext32u_tl(cpu_T1, cpu_regs[s->vex_v]);
3903 gen_helper_pext(cpu_regs[reg], cpu_T0, cpu_T1);
3904 break;
3906 case 0x1f6: /* adcx Gy, Ey */
3907 case 0x2f6: /* adox Gy, Ey */
3908 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_ADX)) {
3909 goto illegal_op;
3910 } else {
3911 TCGv carry_in, carry_out, zero;
3912 int end_op;
3914 ot = mo_64_32(s->dflag);
3915 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3917 /* Re-use the carry-out from a previous round. */
3918 TCGV_UNUSED(carry_in);
3919 carry_out = (b == 0x1f6 ? cpu_cc_dst : cpu_cc_src2);
3920 switch (s->cc_op) {
3921 case CC_OP_ADCX:
3922 if (b == 0x1f6) {
3923 carry_in = cpu_cc_dst;
3924 end_op = CC_OP_ADCX;
3925 } else {
3926 end_op = CC_OP_ADCOX;
3928 break;
3929 case CC_OP_ADOX:
3930 if (b == 0x1f6) {
3931 end_op = CC_OP_ADCOX;
3932 } else {
3933 carry_in = cpu_cc_src2;
3934 end_op = CC_OP_ADOX;
3936 break;
3937 case CC_OP_ADCOX:
3938 end_op = CC_OP_ADCOX;
3939 carry_in = carry_out;
3940 break;
3941 default:
3942 end_op = (b == 0x1f6 ? CC_OP_ADCX : CC_OP_ADOX);
3943 break;
3945 /* If we can't reuse carry-out, get it out of EFLAGS. */
3946 if (TCGV_IS_UNUSED(carry_in)) {
3947 if (s->cc_op != CC_OP_ADCX && s->cc_op != CC_OP_ADOX) {
3948 gen_compute_eflags(s);
3950 carry_in = cpu_tmp0;
3951 tcg_gen_extract_tl(carry_in, cpu_cc_src,
3952 ctz32(b == 0x1f6 ? CC_C : CC_O), 1);
3955 switch (ot) {
3956 #ifdef TARGET_X86_64
3957 case MO_32:
3958 /* If we know TL is 64-bit, and we want a 32-bit
3959 result, just do everything in 64-bit arithmetic. */
3960 tcg_gen_ext32u_i64(cpu_regs[reg], cpu_regs[reg]);
3961 tcg_gen_ext32u_i64(cpu_T0, cpu_T0);
3962 tcg_gen_add_i64(cpu_T0, cpu_T0, cpu_regs[reg]);
3963 tcg_gen_add_i64(cpu_T0, cpu_T0, carry_in);
3964 tcg_gen_ext32u_i64(cpu_regs[reg], cpu_T0);
3965 tcg_gen_shri_i64(carry_out, cpu_T0, 32);
3966 break;
3967 #endif
3968 default:
3969 /* Otherwise compute the carry-out in two steps. */
3970 zero = tcg_const_tl(0);
3971 tcg_gen_add2_tl(cpu_T0, carry_out,
3972 cpu_T0, zero,
3973 carry_in, zero);
3974 tcg_gen_add2_tl(cpu_regs[reg], carry_out,
3975 cpu_regs[reg], carry_out,
3976 cpu_T0, zero);
3977 tcg_temp_free(zero);
3978 break;
3980 set_cc_op(s, end_op);
3982 break;
3984 case 0x1f7: /* shlx Gy, Ey, By */
3985 case 0x2f7: /* sarx Gy, Ey, By */
3986 case 0x3f7: /* shrx Gy, Ey, By */
3987 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3988 || !(s->prefix & PREFIX_VEX)
3989 || s->vex_l != 0) {
3990 goto illegal_op;
3992 ot = mo_64_32(s->dflag);
3993 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3994 if (ot == MO_64) {
3995 tcg_gen_andi_tl(cpu_T1, cpu_regs[s->vex_v], 63);
3996 } else {
3997 tcg_gen_andi_tl(cpu_T1, cpu_regs[s->vex_v], 31);
3999 if (b == 0x1f7) {
4000 tcg_gen_shl_tl(cpu_T0, cpu_T0, cpu_T1);
4001 } else if (b == 0x2f7) {
4002 if (ot != MO_64) {
4003 tcg_gen_ext32s_tl(cpu_T0, cpu_T0);
4005 tcg_gen_sar_tl(cpu_T0, cpu_T0, cpu_T1);
4006 } else {
4007 if (ot != MO_64) {
4008 tcg_gen_ext32u_tl(cpu_T0, cpu_T0);
4010 tcg_gen_shr_tl(cpu_T0, cpu_T0, cpu_T1);
4012 gen_op_mov_reg_v(ot, reg, cpu_T0);
4013 break;
4015 case 0x0f3:
4016 case 0x1f3:
4017 case 0x2f3:
4018 case 0x3f3: /* Group 17 */
4019 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
4020 || !(s->prefix & PREFIX_VEX)
4021 || s->vex_l != 0) {
4022 goto illegal_op;
4024 ot = mo_64_32(s->dflag);
4025 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4027 switch (reg & 7) {
4028 case 1: /* blsr By,Ey */
4029 tcg_gen_neg_tl(cpu_T1, cpu_T0);
4030 tcg_gen_and_tl(cpu_T0, cpu_T0, cpu_T1);
4031 gen_op_mov_reg_v(ot, s->vex_v, cpu_T0);
4032 gen_op_update2_cc();
4033 set_cc_op(s, CC_OP_BMILGB + ot);
4034 break;
4036 case 2: /* blsmsk By,Ey */
4037 tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
4038 tcg_gen_subi_tl(cpu_T0, cpu_T0, 1);
4039 tcg_gen_xor_tl(cpu_T0, cpu_T0, cpu_cc_src);
4040 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
4041 set_cc_op(s, CC_OP_BMILGB + ot);
4042 break;
4044 case 3: /* blsi By, Ey */
4045 tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
4046 tcg_gen_subi_tl(cpu_T0, cpu_T0, 1);
4047 tcg_gen_and_tl(cpu_T0, cpu_T0, cpu_cc_src);
4048 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
4049 set_cc_op(s, CC_OP_BMILGB + ot);
4050 break;
4052 default:
4053 goto unknown_op;
4055 break;
4057 default:
4058 goto unknown_op;
4060 break;
4062 case 0x03a:
4063 case 0x13a:
4064 b = modrm;
4065 modrm = cpu_ldub_code(env, s->pc++);
4066 rm = modrm & 7;
4067 reg = ((modrm >> 3) & 7) | rex_r;
4068 mod = (modrm >> 6) & 3;
4069 if (b1 >= 2) {
4070 goto unknown_op;
4073 sse_fn_eppi = sse_op_table7[b].op[b1];
4074 if (!sse_fn_eppi) {
4075 goto unknown_op;
4077 if (!(s->cpuid_ext_features & sse_op_table7[b].ext_mask))
4078 goto illegal_op;
4080 if (sse_fn_eppi == SSE_SPECIAL) {
4081 ot = mo_64_32(s->dflag);
4082 rm = (modrm & 7) | REX_B(s);
4083 if (mod != 3)
4084 gen_lea_modrm(env, s, modrm);
4085 reg = ((modrm >> 3) & 7) | rex_r;
4086 val = cpu_ldub_code(env, s->pc++);
4087 switch (b) {
4088 case 0x14: /* pextrb */
4089 tcg_gen_ld8u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
4090 xmm_regs[reg].ZMM_B(val & 15)));
4091 if (mod == 3) {
4092 gen_op_mov_reg_v(ot, rm, cpu_T0);
4093 } else {
4094 tcg_gen_qemu_st_tl(cpu_T0, cpu_A0,
4095 s->mem_index, MO_UB);
4097 break;
4098 case 0x15: /* pextrw */
4099 tcg_gen_ld16u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
4100 xmm_regs[reg].ZMM_W(val & 7)));
4101 if (mod == 3) {
4102 gen_op_mov_reg_v(ot, rm, cpu_T0);
4103 } else {
4104 tcg_gen_qemu_st_tl(cpu_T0, cpu_A0,
4105 s->mem_index, MO_LEUW);
4107 break;
4108 case 0x16:
4109 if (ot == MO_32) { /* pextrd */
4110 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
4111 offsetof(CPUX86State,
4112 xmm_regs[reg].ZMM_L(val & 3)));
4113 if (mod == 3) {
4114 tcg_gen_extu_i32_tl(cpu_regs[rm], cpu_tmp2_i32);
4115 } else {
4116 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
4117 s->mem_index, MO_LEUL);
4119 } else { /* pextrq */
4120 #ifdef TARGET_X86_64
4121 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
4122 offsetof(CPUX86State,
4123 xmm_regs[reg].ZMM_Q(val & 1)));
4124 if (mod == 3) {
4125 tcg_gen_mov_i64(cpu_regs[rm], cpu_tmp1_i64);
4126 } else {
4127 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
4128 s->mem_index, MO_LEQ);
4130 #else
4131 goto illegal_op;
4132 #endif
4134 break;
4135 case 0x17: /* extractps */
4136 tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
4137 xmm_regs[reg].ZMM_L(val & 3)));
4138 if (mod == 3) {
4139 gen_op_mov_reg_v(ot, rm, cpu_T0);
4140 } else {
4141 tcg_gen_qemu_st_tl(cpu_T0, cpu_A0,
4142 s->mem_index, MO_LEUL);
4144 break;
4145 case 0x20: /* pinsrb */
4146 if (mod == 3) {
4147 gen_op_mov_v_reg(MO_32, cpu_T0, rm);
4148 } else {
4149 tcg_gen_qemu_ld_tl(cpu_T0, cpu_A0,
4150 s->mem_index, MO_UB);
4152 tcg_gen_st8_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
4153 xmm_regs[reg].ZMM_B(val & 15)));
4154 break;
4155 case 0x21: /* insertps */
4156 if (mod == 3) {
4157 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
4158 offsetof(CPUX86State,xmm_regs[rm]
4159 .ZMM_L((val >> 6) & 3)));
4160 } else {
4161 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
4162 s->mem_index, MO_LEUL);
4164 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
4165 offsetof(CPUX86State,xmm_regs[reg]
4166 .ZMM_L((val >> 4) & 3)));
4167 if ((val >> 0) & 1)
4168 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4169 cpu_env, offsetof(CPUX86State,
4170 xmm_regs[reg].ZMM_L(0)));
4171 if ((val >> 1) & 1)
4172 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4173 cpu_env, offsetof(CPUX86State,
4174 xmm_regs[reg].ZMM_L(1)));
4175 if ((val >> 2) & 1)
4176 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4177 cpu_env, offsetof(CPUX86State,
4178 xmm_regs[reg].ZMM_L(2)));
4179 if ((val >> 3) & 1)
4180 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4181 cpu_env, offsetof(CPUX86State,
4182 xmm_regs[reg].ZMM_L(3)));
4183 break;
4184 case 0x22:
4185 if (ot == MO_32) { /* pinsrd */
4186 if (mod == 3) {
4187 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[rm]);
4188 } else {
4189 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
4190 s->mem_index, MO_LEUL);
4192 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
4193 offsetof(CPUX86State,
4194 xmm_regs[reg].ZMM_L(val & 3)));
4195 } else { /* pinsrq */
4196 #ifdef TARGET_X86_64
4197 if (mod == 3) {
4198 gen_op_mov_v_reg(ot, cpu_tmp1_i64, rm);
4199 } else {
4200 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
4201 s->mem_index, MO_LEQ);
4203 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
4204 offsetof(CPUX86State,
4205 xmm_regs[reg].ZMM_Q(val & 1)));
4206 #else
4207 goto illegal_op;
4208 #endif
4210 break;
4212 return;
4215 if (b1) {
4216 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
4217 if (mod == 3) {
4218 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
4219 } else {
4220 op2_offset = offsetof(CPUX86State,xmm_t0);
4221 gen_lea_modrm(env, s, modrm);
4222 gen_ldo_env_A0(s, op2_offset);
4224 } else {
4225 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4226 if (mod == 3) {
4227 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4228 } else {
4229 op2_offset = offsetof(CPUX86State,mmx_t0);
4230 gen_lea_modrm(env, s, modrm);
4231 gen_ldq_env_A0(s, op2_offset);
4234 val = cpu_ldub_code(env, s->pc++);
4236 if ((b & 0xfc) == 0x60) { /* pcmpXstrX */
4237 set_cc_op(s, CC_OP_EFLAGS);
4239 if (s->dflag == MO_64) {
4240 /* The helper must use entire 64-bit gp registers */
4241 val |= 1 << 8;
4245 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4246 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4247 sse_fn_eppi(cpu_env, cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
4248 break;
4250 case 0x33a:
4251 /* Various integer extensions at 0f 3a f[0-f]. */
4252 b = modrm | (b1 << 8);
4253 modrm = cpu_ldub_code(env, s->pc++);
4254 reg = ((modrm >> 3) & 7) | rex_r;
4256 switch (b) {
4257 case 0x3f0: /* rorx Gy,Ey, Ib */
4258 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
4259 || !(s->prefix & PREFIX_VEX)
4260 || s->vex_l != 0) {
4261 goto illegal_op;
4263 ot = mo_64_32(s->dflag);
4264 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4265 b = cpu_ldub_code(env, s->pc++);
4266 if (ot == MO_64) {
4267 tcg_gen_rotri_tl(cpu_T0, cpu_T0, b & 63);
4268 } else {
4269 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
4270 tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, b & 31);
4271 tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);
4273 gen_op_mov_reg_v(ot, reg, cpu_T0);
4274 break;
4276 default:
4277 goto unknown_op;
4279 break;
4281 default:
4282 unknown_op:
4283 gen_unknown_opcode(env, s);
4284 return;
4286 } else {
4287 /* generic MMX or SSE operation */
4288 switch(b) {
4289 case 0x70: /* pshufx insn */
4290 case 0xc6: /* pshufx insn */
4291 case 0xc2: /* compare insns */
4292 s->rip_offset = 1;
4293 break;
4294 default:
4295 break;
4297 if (is_xmm) {
4298 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
4299 if (mod != 3) {
4300 int sz = 4;
4302 gen_lea_modrm(env, s, modrm);
4303 op2_offset = offsetof(CPUX86State,xmm_t0);
4305 switch (b) {
4306 case 0x50 ... 0x5a:
4307 case 0x5c ... 0x5f:
4308 case 0xc2:
4309 /* Most sse scalar operations. */
4310 if (b1 == 2) {
4311 sz = 2;
4312 } else if (b1 == 3) {
4313 sz = 3;
4315 break;
4317 case 0x2e: /* ucomis[sd] */
4318 case 0x2f: /* comis[sd] */
4319 if (b1 == 0) {
4320 sz = 2;
4321 } else {
4322 sz = 3;
4324 break;
4327 switch (sz) {
4328 case 2:
4329 /* 32 bit access */
4330 gen_op_ld_v(s, MO_32, cpu_T0, cpu_A0);
4331 tcg_gen_st32_tl(cpu_T0, cpu_env,
4332 offsetof(CPUX86State,xmm_t0.ZMM_L(0)));
4333 break;
4334 case 3:
4335 /* 64 bit access */
4336 gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.ZMM_D(0)));
4337 break;
4338 default:
4339 /* 128 bit access */
4340 gen_ldo_env_A0(s, op2_offset);
4341 break;
4343 } else {
4344 rm = (modrm & 7) | REX_B(s);
4345 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
4347 } else {
4348 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4349 if (mod != 3) {
4350 gen_lea_modrm(env, s, modrm);
4351 op2_offset = offsetof(CPUX86State,mmx_t0);
4352 gen_ldq_env_A0(s, op2_offset);
4353 } else {
4354 rm = (modrm & 7);
4355 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4358 switch(b) {
4359 case 0x0f: /* 3DNow! data insns */
4360 val = cpu_ldub_code(env, s->pc++);
4361 sse_fn_epp = sse_op_table5[val];
4362 if (!sse_fn_epp) {
4363 goto unknown_op;
4365 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW)) {
4366 goto illegal_op;
4368 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4369 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4370 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
4371 break;
4372 case 0x70: /* pshufx insn */
4373 case 0xc6: /* pshufx insn */
4374 val = cpu_ldub_code(env, s->pc++);
4375 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4376 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4377 /* XXX: introduce a new table? */
4378 sse_fn_ppi = (SSEFunc_0_ppi)sse_fn_epp;
4379 sse_fn_ppi(cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
4380 break;
4381 case 0xc2:
4382 /* compare insns */
4383 val = cpu_ldub_code(env, s->pc++);
4384 if (val >= 8)
4385 goto unknown_op;
4386 sse_fn_epp = sse_op_table4[val][b1];
4388 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4389 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4390 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
4391 break;
4392 case 0xf7:
4393 /* maskmov : we must prepare A0 */
4394 if (mod != 3)
4395 goto illegal_op;
4396 tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EDI]);
4397 gen_extu(s->aflag, cpu_A0);
4398 gen_add_A0_ds_seg(s);
4400 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4401 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4402 /* XXX: introduce a new table? */
4403 sse_fn_eppt = (SSEFunc_0_eppt)sse_fn_epp;
4404 sse_fn_eppt(cpu_env, cpu_ptr0, cpu_ptr1, cpu_A0);
4405 break;
4406 default:
4407 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4408 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4409 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
4410 break;
4412 if (b == 0x2e || b == 0x2f) {
4413 set_cc_op(s, CC_OP_EFLAGS);
4418 /* convert one instruction. s->is_jmp is set if the translation must
4419 be stopped. Return the next pc value */
4420 static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
4421 target_ulong pc_start)
4423 int b, prefixes;
4424 int shift;
4425 TCGMemOp ot, aflag, dflag;
4426 int modrm, reg, rm, mod, op, opreg, val;
4427 target_ulong next_eip, tval;
4428 int rex_w, rex_r;
4430 s->pc_start = s->pc = pc_start;
4431 prefixes = 0;
4432 s->override = -1;
4433 rex_w = -1;
4434 rex_r = 0;
4435 #ifdef TARGET_X86_64
4436 s->rex_x = 0;
4437 s->rex_b = 0;
4438 x86_64_hregs = 0;
4439 #endif
4440 s->rip_offset = 0; /* for relative ip address */
4441 s->vex_l = 0;
4442 s->vex_v = 0;
4443 next_byte:
4444 /* x86 has an upper limit of 15 bytes for an instruction. Since we
4445 * do not want to decode and generate IR for an illegal
4446 * instruction, the following check limits the instruction size to
4447 * 25 bytes: 14 prefix + 1 opc + 6 (modrm+sib+ofs) + 4 imm */
4448 if (s->pc - pc_start > 14) {
4449 goto illegal_op;
4451 b = cpu_ldub_code(env, s->pc);
4452 s->pc++;
4453 /* Collect prefixes. */
4454 switch (b) {
4455 case 0xf3:
4456 prefixes |= PREFIX_REPZ;
4457 goto next_byte;
4458 case 0xf2:
4459 prefixes |= PREFIX_REPNZ;
4460 goto next_byte;
4461 case 0xf0:
4462 prefixes |= PREFIX_LOCK;
4463 goto next_byte;
4464 case 0x2e:
4465 s->override = R_CS;
4466 goto next_byte;
4467 case 0x36:
4468 s->override = R_SS;
4469 goto next_byte;
4470 case 0x3e:
4471 s->override = R_DS;
4472 goto next_byte;
4473 case 0x26:
4474 s->override = R_ES;
4475 goto next_byte;
4476 case 0x64:
4477 s->override = R_FS;
4478 goto next_byte;
4479 case 0x65:
4480 s->override = R_GS;
4481 goto next_byte;
4482 case 0x66:
4483 prefixes |= PREFIX_DATA;
4484 goto next_byte;
4485 case 0x67:
4486 prefixes |= PREFIX_ADR;
4487 goto next_byte;
4488 #ifdef TARGET_X86_64
4489 case 0x40 ... 0x4f:
4490 if (CODE64(s)) {
4491 /* REX prefix */
4492 rex_w = (b >> 3) & 1;
4493 rex_r = (b & 0x4) << 1;
4494 s->rex_x = (b & 0x2) << 2;
4495 REX_B(s) = (b & 0x1) << 3;
4496 x86_64_hregs = 1; /* select uniform byte register addressing */
4497 goto next_byte;
4499 break;
4500 #endif
4501 case 0xc5: /* 2-byte VEX */
4502 case 0xc4: /* 3-byte VEX */
4503 /* VEX prefixes cannot be used except in 32-bit mode.
4504 Otherwise the instruction is LES or LDS. */
4505 if (s->code32 && !s->vm86) {
4506 static const int pp_prefix[4] = {
4507 0, PREFIX_DATA, PREFIX_REPZ, PREFIX_REPNZ
4509 int vex3, vex2 = cpu_ldub_code(env, s->pc);
4511 if (!CODE64(s) && (vex2 & 0xc0) != 0xc0) {
4512 /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
4513 otherwise the instruction is LES or LDS. */
4514 break;
4516 s->pc++;
4518 /* 4.1.1-4.1.3: No preceding lock, 66, f2, f3, or rex prefixes. */
4519 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ
4520 | PREFIX_LOCK | PREFIX_DATA)) {
4521 goto illegal_op;
4523 #ifdef TARGET_X86_64
4524 if (x86_64_hregs) {
4525 goto illegal_op;
4527 #endif
4528 rex_r = (~vex2 >> 4) & 8;
4529 if (b == 0xc5) {
4530 vex3 = vex2;
4531 b = cpu_ldub_code(env, s->pc++);
4532 } else {
4533 #ifdef TARGET_X86_64
4534 s->rex_x = (~vex2 >> 3) & 8;
4535 s->rex_b = (~vex2 >> 2) & 8;
4536 #endif
4537 vex3 = cpu_ldub_code(env, s->pc++);
4538 rex_w = (vex3 >> 7) & 1;
4539 switch (vex2 & 0x1f) {
4540 case 0x01: /* Implied 0f leading opcode bytes. */
4541 b = cpu_ldub_code(env, s->pc++) | 0x100;
4542 break;
4543 case 0x02: /* Implied 0f 38 leading opcode bytes. */
4544 b = 0x138;
4545 break;
4546 case 0x03: /* Implied 0f 3a leading opcode bytes. */
4547 b = 0x13a;
4548 break;
4549 default: /* Reserved for future use. */
4550 goto unknown_op;
4553 s->vex_v = (~vex3 >> 3) & 0xf;
4554 s->vex_l = (vex3 >> 2) & 1;
4555 prefixes |= pp_prefix[vex3 & 3] | PREFIX_VEX;
4557 break;
4560 /* Post-process prefixes. */
4561 if (CODE64(s)) {
4562 /* In 64-bit mode, the default data size is 32-bit. Select 64-bit
4563 data with rex_w, and 16-bit data with 0x66; rex_w takes precedence
4564 over 0x66 if both are present. */
4565 dflag = (rex_w > 0 ? MO_64 : prefixes & PREFIX_DATA ? MO_16 : MO_32);
4566 /* In 64-bit mode, 0x67 selects 32-bit addressing. */
4567 aflag = (prefixes & PREFIX_ADR ? MO_32 : MO_64);
4568 } else {
4569 /* In 16/32-bit mode, 0x66 selects the opposite data size. */
4570 if (s->code32 ^ ((prefixes & PREFIX_DATA) != 0)) {
4571 dflag = MO_32;
4572 } else {
4573 dflag = MO_16;
4575 /* In 16/32-bit mode, 0x67 selects the opposite addressing. */
4576 if (s->code32 ^ ((prefixes & PREFIX_ADR) != 0)) {
4577 aflag = MO_32;
4578 } else {
4579 aflag = MO_16;
4583 s->prefix = prefixes;
4584 s->aflag = aflag;
4585 s->dflag = dflag;
4587 /* now check op code */
4588 reswitch:
4589 switch(b) {
4590 case 0x0f:
4591 /**************************/
4592 /* extended op code */
4593 b = cpu_ldub_code(env, s->pc++) | 0x100;
4594 goto reswitch;
4596 /**************************/
4597 /* arith & logic */
4598 case 0x00 ... 0x05:
4599 case 0x08 ... 0x0d:
4600 case 0x10 ... 0x15:
4601 case 0x18 ... 0x1d:
4602 case 0x20 ... 0x25:
4603 case 0x28 ... 0x2d:
4604 case 0x30 ... 0x35:
4605 case 0x38 ... 0x3d:
4607 int op, f, val;
4608 op = (b >> 3) & 7;
4609 f = (b >> 1) & 3;
4611 ot = mo_b_d(b, dflag);
4613 switch(f) {
4614 case 0: /* OP Ev, Gv */
4615 modrm = cpu_ldub_code(env, s->pc++);
4616 reg = ((modrm >> 3) & 7) | rex_r;
4617 mod = (modrm >> 6) & 3;
4618 rm = (modrm & 7) | REX_B(s);
4619 if (mod != 3) {
4620 gen_lea_modrm(env, s, modrm);
4621 opreg = OR_TMP0;
4622 } else if (op == OP_XORL && rm == reg) {
4623 xor_zero:
4624 /* xor reg, reg optimisation */
4625 set_cc_op(s, CC_OP_CLR);
4626 tcg_gen_movi_tl(cpu_T0, 0);
4627 gen_op_mov_reg_v(ot, reg, cpu_T0);
4628 break;
4629 } else {
4630 opreg = rm;
4632 gen_op_mov_v_reg(ot, cpu_T1, reg);
4633 gen_op(s, op, ot, opreg);
4634 break;
4635 case 1: /* OP Gv, Ev */
4636 modrm = cpu_ldub_code(env, s->pc++);
4637 mod = (modrm >> 6) & 3;
4638 reg = ((modrm >> 3) & 7) | rex_r;
4639 rm = (modrm & 7) | REX_B(s);
4640 if (mod != 3) {
4641 gen_lea_modrm(env, s, modrm);
4642 gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
4643 } else if (op == OP_XORL && rm == reg) {
4644 goto xor_zero;
4645 } else {
4646 gen_op_mov_v_reg(ot, cpu_T1, rm);
4648 gen_op(s, op, ot, reg);
4649 break;
4650 case 2: /* OP A, Iv */
4651 val = insn_get(env, s, ot);
4652 tcg_gen_movi_tl(cpu_T1, val);
4653 gen_op(s, op, ot, OR_EAX);
4654 break;
4657 break;
4659 case 0x82:
4660 if (CODE64(s))
4661 goto illegal_op;
4662 case 0x80: /* GRP1 */
4663 case 0x81:
4664 case 0x83:
4666 int val;
4668 ot = mo_b_d(b, dflag);
4670 modrm = cpu_ldub_code(env, s->pc++);
4671 mod = (modrm >> 6) & 3;
4672 rm = (modrm & 7) | REX_B(s);
4673 op = (modrm >> 3) & 7;
4675 if (mod != 3) {
4676 if (b == 0x83)
4677 s->rip_offset = 1;
4678 else
4679 s->rip_offset = insn_const_size(ot);
4680 gen_lea_modrm(env, s, modrm);
4681 opreg = OR_TMP0;
4682 } else {
4683 opreg = rm;
4686 switch(b) {
4687 default:
4688 case 0x80:
4689 case 0x81:
4690 case 0x82:
4691 val = insn_get(env, s, ot);
4692 break;
4693 case 0x83:
4694 val = (int8_t)insn_get(env, s, MO_8);
4695 break;
4697 tcg_gen_movi_tl(cpu_T1, val);
4698 gen_op(s, op, ot, opreg);
4700 break;
4702 /**************************/
4703 /* inc, dec, and other misc arith */
4704 case 0x40 ... 0x47: /* inc Gv */
4705 ot = dflag;
4706 gen_inc(s, ot, OR_EAX + (b & 7), 1);
4707 break;
4708 case 0x48 ... 0x4f: /* dec Gv */
4709 ot = dflag;
4710 gen_inc(s, ot, OR_EAX + (b & 7), -1);
4711 break;
4712 case 0xf6: /* GRP3 */
4713 case 0xf7:
4714 ot = mo_b_d(b, dflag);
4716 modrm = cpu_ldub_code(env, s->pc++);
4717 mod = (modrm >> 6) & 3;
4718 rm = (modrm & 7) | REX_B(s);
4719 op = (modrm >> 3) & 7;
4720 if (mod != 3) {
4721 if (op == 0) {
4722 s->rip_offset = insn_const_size(ot);
4724 gen_lea_modrm(env, s, modrm);
4725 /* For those below that handle locked memory, don't load here. */
4726 if (!(s->prefix & PREFIX_LOCK)
4727 || op != 2) {
4728 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
4730 } else {
4731 gen_op_mov_v_reg(ot, cpu_T0, rm);
4734 switch(op) {
4735 case 0: /* test */
4736 val = insn_get(env, s, ot);
4737 tcg_gen_movi_tl(cpu_T1, val);
4738 gen_op_testl_T0_T1_cc();
4739 set_cc_op(s, CC_OP_LOGICB + ot);
4740 break;
4741 case 2: /* not */
4742 if (s->prefix & PREFIX_LOCK) {
4743 if (mod == 3) {
4744 goto illegal_op;
4746 tcg_gen_movi_tl(cpu_T0, ~0);
4747 tcg_gen_atomic_xor_fetch_tl(cpu_T0, cpu_A0, cpu_T0,
4748 s->mem_index, ot | MO_LE);
4749 } else {
4750 tcg_gen_not_tl(cpu_T0, cpu_T0);
4751 if (mod != 3) {
4752 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
4753 } else {
4754 gen_op_mov_reg_v(ot, rm, cpu_T0);
4757 break;
4758 case 3: /* neg */
4759 if (s->prefix & PREFIX_LOCK) {
4760 TCGLabel *label1;
4761 TCGv a0, t0, t1, t2;
4763 if (mod == 3) {
4764 goto illegal_op;
4766 a0 = tcg_temp_local_new();
4767 t0 = tcg_temp_local_new();
4768 label1 = gen_new_label();
4770 tcg_gen_mov_tl(a0, cpu_A0);
4771 tcg_gen_mov_tl(t0, cpu_T0);
4773 gen_set_label(label1);
4774 t1 = tcg_temp_new();
4775 t2 = tcg_temp_new();
4776 tcg_gen_mov_tl(t2, t0);
4777 tcg_gen_neg_tl(t1, t0);
4778 tcg_gen_atomic_cmpxchg_tl(t0, a0, t0, t1,
4779 s->mem_index, ot | MO_LE);
4780 tcg_temp_free(t1);
4781 tcg_gen_brcond_tl(TCG_COND_NE, t0, t2, label1);
4783 tcg_temp_free(t2);
4784 tcg_temp_free(a0);
4785 tcg_gen_mov_tl(cpu_T0, t0);
4786 tcg_temp_free(t0);
4787 } else {
4788 tcg_gen_neg_tl(cpu_T0, cpu_T0);
4789 if (mod != 3) {
4790 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
4791 } else {
4792 gen_op_mov_reg_v(ot, rm, cpu_T0);
4795 gen_op_update_neg_cc();
4796 set_cc_op(s, CC_OP_SUBB + ot);
4797 break;
4798 case 4: /* mul */
4799 switch(ot) {
4800 case MO_8:
4801 gen_op_mov_v_reg(MO_8, cpu_T1, R_EAX);
4802 tcg_gen_ext8u_tl(cpu_T0, cpu_T0);
4803 tcg_gen_ext8u_tl(cpu_T1, cpu_T1);
4804 /* XXX: use 32 bit mul which could be faster */
4805 tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
4806 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
4807 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
4808 tcg_gen_andi_tl(cpu_cc_src, cpu_T0, 0xff00);
4809 set_cc_op(s, CC_OP_MULB);
4810 break;
4811 case MO_16:
4812 gen_op_mov_v_reg(MO_16, cpu_T1, R_EAX);
4813 tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
4814 tcg_gen_ext16u_tl(cpu_T1, cpu_T1);
4815 /* XXX: use 32 bit mul which could be faster */
4816 tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
4817 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
4818 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
4819 tcg_gen_shri_tl(cpu_T0, cpu_T0, 16);
4820 gen_op_mov_reg_v(MO_16, R_EDX, cpu_T0);
4821 tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
4822 set_cc_op(s, CC_OP_MULW);
4823 break;
4824 default:
4825 case MO_32:
4826 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
4827 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]);
4828 tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
4829 cpu_tmp2_i32, cpu_tmp3_i32);
4830 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32);
4831 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32);
4832 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4833 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
4834 set_cc_op(s, CC_OP_MULL);
4835 break;
4836 #ifdef TARGET_X86_64
4837 case MO_64:
4838 tcg_gen_mulu2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
4839 cpu_T0, cpu_regs[R_EAX]);
4840 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4841 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
4842 set_cc_op(s, CC_OP_MULQ);
4843 break;
4844 #endif
4846 break;
4847 case 5: /* imul */
4848 switch(ot) {
4849 case MO_8:
4850 gen_op_mov_v_reg(MO_8, cpu_T1, R_EAX);
4851 tcg_gen_ext8s_tl(cpu_T0, cpu_T0);
4852 tcg_gen_ext8s_tl(cpu_T1, cpu_T1);
4853 /* XXX: use 32 bit mul which could be faster */
4854 tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
4855 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
4856 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
4857 tcg_gen_ext8s_tl(cpu_tmp0, cpu_T0);
4858 tcg_gen_sub_tl(cpu_cc_src, cpu_T0, cpu_tmp0);
4859 set_cc_op(s, CC_OP_MULB);
4860 break;
4861 case MO_16:
4862 gen_op_mov_v_reg(MO_16, cpu_T1, R_EAX);
4863 tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
4864 tcg_gen_ext16s_tl(cpu_T1, cpu_T1);
4865 /* XXX: use 32 bit mul which could be faster */
4866 tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
4867 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
4868 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
4869 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T0);
4870 tcg_gen_sub_tl(cpu_cc_src, cpu_T0, cpu_tmp0);
4871 tcg_gen_shri_tl(cpu_T0, cpu_T0, 16);
4872 gen_op_mov_reg_v(MO_16, R_EDX, cpu_T0);
4873 set_cc_op(s, CC_OP_MULW);
4874 break;
4875 default:
4876 case MO_32:
4877 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
4878 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]);
4879 tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
4880 cpu_tmp2_i32, cpu_tmp3_i32);
4881 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32);
4882 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32);
4883 tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31);
4884 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4885 tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
4886 tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32);
4887 set_cc_op(s, CC_OP_MULL);
4888 break;
4889 #ifdef TARGET_X86_64
4890 case MO_64:
4891 tcg_gen_muls2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
4892 cpu_T0, cpu_regs[R_EAX]);
4893 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4894 tcg_gen_sari_tl(cpu_cc_src, cpu_regs[R_EAX], 63);
4895 tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_regs[R_EDX]);
4896 set_cc_op(s, CC_OP_MULQ);
4897 break;
4898 #endif
4900 break;
4901 case 6: /* div */
4902 switch(ot) {
4903 case MO_8:
4904 gen_helper_divb_AL(cpu_env, cpu_T0);
4905 break;
4906 case MO_16:
4907 gen_helper_divw_AX(cpu_env, cpu_T0);
4908 break;
4909 default:
4910 case MO_32:
4911 gen_helper_divl_EAX(cpu_env, cpu_T0);
4912 break;
4913 #ifdef TARGET_X86_64
4914 case MO_64:
4915 gen_helper_divq_EAX(cpu_env, cpu_T0);
4916 break;
4917 #endif
4919 break;
4920 case 7: /* idiv */
4921 switch(ot) {
4922 case MO_8:
4923 gen_helper_idivb_AL(cpu_env, cpu_T0);
4924 break;
4925 case MO_16:
4926 gen_helper_idivw_AX(cpu_env, cpu_T0);
4927 break;
4928 default:
4929 case MO_32:
4930 gen_helper_idivl_EAX(cpu_env, cpu_T0);
4931 break;
4932 #ifdef TARGET_X86_64
4933 case MO_64:
4934 gen_helper_idivq_EAX(cpu_env, cpu_T0);
4935 break;
4936 #endif
4938 break;
4939 default:
4940 goto unknown_op;
4942 break;
4944 case 0xfe: /* GRP4 */
4945 case 0xff: /* GRP5 */
4946 ot = mo_b_d(b, dflag);
4948 modrm = cpu_ldub_code(env, s->pc++);
4949 mod = (modrm >> 6) & 3;
4950 rm = (modrm & 7) | REX_B(s);
4951 op = (modrm >> 3) & 7;
4952 if (op >= 2 && b == 0xfe) {
4953 goto unknown_op;
4955 if (CODE64(s)) {
4956 if (op == 2 || op == 4) {
4957 /* operand size for jumps is 64 bit */
4958 ot = MO_64;
4959 } else if (op == 3 || op == 5) {
4960 ot = dflag != MO_16 ? MO_32 + (rex_w == 1) : MO_16;
4961 } else if (op == 6) {
4962 /* default push size is 64 bit */
4963 ot = mo_pushpop(s, dflag);
4966 if (mod != 3) {
4967 gen_lea_modrm(env, s, modrm);
4968 if (op >= 2 && op != 3 && op != 5)
4969 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
4970 } else {
4971 gen_op_mov_v_reg(ot, cpu_T0, rm);
4974 switch(op) {
4975 case 0: /* inc Ev */
4976 if (mod != 3)
4977 opreg = OR_TMP0;
4978 else
4979 opreg = rm;
4980 gen_inc(s, ot, opreg, 1);
4981 break;
4982 case 1: /* dec Ev */
4983 if (mod != 3)
4984 opreg = OR_TMP0;
4985 else
4986 opreg = rm;
4987 gen_inc(s, ot, opreg, -1);
4988 break;
4989 case 2: /* call Ev */
4990 /* XXX: optimize if memory (no 'and' is necessary) */
4991 if (dflag == MO_16) {
4992 tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
4994 next_eip = s->pc - s->cs_base;
4995 tcg_gen_movi_tl(cpu_T1, next_eip);
4996 gen_push_v(s, cpu_T1);
4997 gen_op_jmp_v(cpu_T0);
4998 gen_bnd_jmp(s);
4999 gen_jr(s, cpu_T0);
5000 break;
5001 case 3: /* lcall Ev */
5002 gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
5003 gen_add_A0_im(s, 1 << ot);
5004 gen_op_ld_v(s, MO_16, cpu_T0, cpu_A0);
5005 do_lcall:
5006 if (s->pe && !s->vm86) {
5007 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
5008 gen_helper_lcall_protected(cpu_env, cpu_tmp2_i32, cpu_T1,
5009 tcg_const_i32(dflag - 1),
5010 tcg_const_tl(s->pc - s->cs_base));
5011 } else {
5012 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
5013 gen_helper_lcall_real(cpu_env, cpu_tmp2_i32, cpu_T1,
5014 tcg_const_i32(dflag - 1),
5015 tcg_const_i32(s->pc - s->cs_base));
5017 tcg_gen_ld_tl(cpu_tmp4, cpu_env, offsetof(CPUX86State, eip));
5018 gen_jr(s, cpu_tmp4);
5019 break;
5020 case 4: /* jmp Ev */
5021 if (dflag == MO_16) {
5022 tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
5024 gen_op_jmp_v(cpu_T0);
5025 gen_bnd_jmp(s);
5026 gen_jr(s, cpu_T0);
5027 break;
5028 case 5: /* ljmp Ev */
5029 gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
5030 gen_add_A0_im(s, 1 << ot);
5031 gen_op_ld_v(s, MO_16, cpu_T0, cpu_A0);
5032 do_ljmp:
5033 if (s->pe && !s->vm86) {
5034 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
5035 gen_helper_ljmp_protected(cpu_env, cpu_tmp2_i32, cpu_T1,
5036 tcg_const_tl(s->pc - s->cs_base));
5037 } else {
5038 gen_op_movl_seg_T0_vm(R_CS);
5039 gen_op_jmp_v(cpu_T1);
5041 tcg_gen_ld_tl(cpu_tmp4, cpu_env, offsetof(CPUX86State, eip));
5042 gen_jr(s, cpu_tmp4);
5043 break;
5044 case 6: /* push Ev */
5045 gen_push_v(s, cpu_T0);
5046 break;
5047 default:
5048 goto unknown_op;
5050 break;
5052 case 0x84: /* test Ev, Gv */
5053 case 0x85:
5054 ot = mo_b_d(b, dflag);
5056 modrm = cpu_ldub_code(env, s->pc++);
5057 reg = ((modrm >> 3) & 7) | rex_r;
5059 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
5060 gen_op_mov_v_reg(ot, cpu_T1, reg);
5061 gen_op_testl_T0_T1_cc();
5062 set_cc_op(s, CC_OP_LOGICB + ot);
5063 break;
5065 case 0xa8: /* test eAX, Iv */
5066 case 0xa9:
5067 ot = mo_b_d(b, dflag);
5068 val = insn_get(env, s, ot);
5070 gen_op_mov_v_reg(ot, cpu_T0, OR_EAX);
5071 tcg_gen_movi_tl(cpu_T1, val);
5072 gen_op_testl_T0_T1_cc();
5073 set_cc_op(s, CC_OP_LOGICB + ot);
5074 break;
5076 case 0x98: /* CWDE/CBW */
5077 switch (dflag) {
5078 #ifdef TARGET_X86_64
5079 case MO_64:
5080 gen_op_mov_v_reg(MO_32, cpu_T0, R_EAX);
5081 tcg_gen_ext32s_tl(cpu_T0, cpu_T0);
5082 gen_op_mov_reg_v(MO_64, R_EAX, cpu_T0);
5083 break;
5084 #endif
5085 case MO_32:
5086 gen_op_mov_v_reg(MO_16, cpu_T0, R_EAX);
5087 tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
5088 gen_op_mov_reg_v(MO_32, R_EAX, cpu_T0);
5089 break;
5090 case MO_16:
5091 gen_op_mov_v_reg(MO_8, cpu_T0, R_EAX);
5092 tcg_gen_ext8s_tl(cpu_T0, cpu_T0);
5093 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
5094 break;
5095 default:
5096 tcg_abort();
5098 break;
5099 case 0x99: /* CDQ/CWD */
5100 switch (dflag) {
5101 #ifdef TARGET_X86_64
5102 case MO_64:
5103 gen_op_mov_v_reg(MO_64, cpu_T0, R_EAX);
5104 tcg_gen_sari_tl(cpu_T0, cpu_T0, 63);
5105 gen_op_mov_reg_v(MO_64, R_EDX, cpu_T0);
5106 break;
5107 #endif
5108 case MO_32:
5109 gen_op_mov_v_reg(MO_32, cpu_T0, R_EAX);
5110 tcg_gen_ext32s_tl(cpu_T0, cpu_T0);
5111 tcg_gen_sari_tl(cpu_T0, cpu_T0, 31);
5112 gen_op_mov_reg_v(MO_32, R_EDX, cpu_T0);
5113 break;
5114 case MO_16:
5115 gen_op_mov_v_reg(MO_16, cpu_T0, R_EAX);
5116 tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
5117 tcg_gen_sari_tl(cpu_T0, cpu_T0, 15);
5118 gen_op_mov_reg_v(MO_16, R_EDX, cpu_T0);
5119 break;
5120 default:
5121 tcg_abort();
5123 break;
5124 case 0x1af: /* imul Gv, Ev */
5125 case 0x69: /* imul Gv, Ev, I */
5126 case 0x6b:
5127 ot = dflag;
5128 modrm = cpu_ldub_code(env, s->pc++);
5129 reg = ((modrm >> 3) & 7) | rex_r;
5130 if (b == 0x69)
5131 s->rip_offset = insn_const_size(ot);
5132 else if (b == 0x6b)
5133 s->rip_offset = 1;
5134 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
5135 if (b == 0x69) {
5136 val = insn_get(env, s, ot);
5137 tcg_gen_movi_tl(cpu_T1, val);
5138 } else if (b == 0x6b) {
5139 val = (int8_t)insn_get(env, s, MO_8);
5140 tcg_gen_movi_tl(cpu_T1, val);
5141 } else {
5142 gen_op_mov_v_reg(ot, cpu_T1, reg);
5144 switch (ot) {
5145 #ifdef TARGET_X86_64
5146 case MO_64:
5147 tcg_gen_muls2_i64(cpu_regs[reg], cpu_T1, cpu_T0, cpu_T1);
5148 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
5149 tcg_gen_sari_tl(cpu_cc_src, cpu_cc_dst, 63);
5150 tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_T1);
5151 break;
5152 #endif
5153 case MO_32:
5154 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
5155 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1);
5156 tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
5157 cpu_tmp2_i32, cpu_tmp3_i32);
5158 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
5159 tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31);
5160 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
5161 tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
5162 tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32);
5163 break;
5164 default:
5165 tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
5166 tcg_gen_ext16s_tl(cpu_T1, cpu_T1);
5167 /* XXX: use 32 bit mul which could be faster */
5168 tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
5169 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
5170 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T0);
5171 tcg_gen_sub_tl(cpu_cc_src, cpu_T0, cpu_tmp0);
5172 gen_op_mov_reg_v(ot, reg, cpu_T0);
5173 break;
5175 set_cc_op(s, CC_OP_MULB + ot);
5176 break;
5177 case 0x1c0:
5178 case 0x1c1: /* xadd Ev, Gv */
5179 ot = mo_b_d(b, dflag);
5180 modrm = cpu_ldub_code(env, s->pc++);
5181 reg = ((modrm >> 3) & 7) | rex_r;
5182 mod = (modrm >> 6) & 3;
5183 gen_op_mov_v_reg(ot, cpu_T0, reg);
5184 if (mod == 3) {
5185 rm = (modrm & 7) | REX_B(s);
5186 gen_op_mov_v_reg(ot, cpu_T1, rm);
5187 tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1);
5188 gen_op_mov_reg_v(ot, reg, cpu_T1);
5189 gen_op_mov_reg_v(ot, rm, cpu_T0);
5190 } else {
5191 gen_lea_modrm(env, s, modrm);
5192 if (s->prefix & PREFIX_LOCK) {
5193 tcg_gen_atomic_fetch_add_tl(cpu_T1, cpu_A0, cpu_T0,
5194 s->mem_index, ot | MO_LE);
5195 tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1);
5196 } else {
5197 gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
5198 tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1);
5199 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
5201 gen_op_mov_reg_v(ot, reg, cpu_T1);
5203 gen_op_update2_cc();
5204 set_cc_op(s, CC_OP_ADDB + ot);
5205 break;
5206 case 0x1b0:
5207 case 0x1b1: /* cmpxchg Ev, Gv */
5209 TCGv oldv, newv, cmpv;
5211 ot = mo_b_d(b, dflag);
5212 modrm = cpu_ldub_code(env, s->pc++);
5213 reg = ((modrm >> 3) & 7) | rex_r;
5214 mod = (modrm >> 6) & 3;
5215 oldv = tcg_temp_new();
5216 newv = tcg_temp_new();
5217 cmpv = tcg_temp_new();
5218 gen_op_mov_v_reg(ot, newv, reg);
5219 tcg_gen_mov_tl(cmpv, cpu_regs[R_EAX]);
5221 if (s->prefix & PREFIX_LOCK) {
5222 if (mod == 3) {
5223 goto illegal_op;
5225 gen_lea_modrm(env, s, modrm);
5226 tcg_gen_atomic_cmpxchg_tl(oldv, cpu_A0, cmpv, newv,
5227 s->mem_index, ot | MO_LE);
5228 gen_op_mov_reg_v(ot, R_EAX, oldv);
5229 } else {
5230 if (mod == 3) {
5231 rm = (modrm & 7) | REX_B(s);
5232 gen_op_mov_v_reg(ot, oldv, rm);
5233 } else {
5234 gen_lea_modrm(env, s, modrm);
5235 gen_op_ld_v(s, ot, oldv, cpu_A0);
5236 rm = 0; /* avoid warning */
5238 gen_extu(ot, oldv);
5239 gen_extu(ot, cmpv);
5240 /* store value = (old == cmp ? new : old); */
5241 tcg_gen_movcond_tl(TCG_COND_EQ, newv, oldv, cmpv, newv, oldv);
5242 if (mod == 3) {
5243 gen_op_mov_reg_v(ot, R_EAX, oldv);
5244 gen_op_mov_reg_v(ot, rm, newv);
5245 } else {
5246 /* Perform an unconditional store cycle like physical cpu;
5247 must be before changing accumulator to ensure
5248 idempotency if the store faults and the instruction
5249 is restarted */
5250 gen_op_st_v(s, ot, newv, cpu_A0);
5251 gen_op_mov_reg_v(ot, R_EAX, oldv);
5254 tcg_gen_mov_tl(cpu_cc_src, oldv);
5255 tcg_gen_mov_tl(cpu_cc_srcT, cmpv);
5256 tcg_gen_sub_tl(cpu_cc_dst, cmpv, oldv);
5257 set_cc_op(s, CC_OP_SUBB + ot);
5258 tcg_temp_free(oldv);
5259 tcg_temp_free(newv);
5260 tcg_temp_free(cmpv);
5262 break;
5263 case 0x1c7: /* cmpxchg8b */
5264 modrm = cpu_ldub_code(env, s->pc++);
5265 mod = (modrm >> 6) & 3;
5266 if ((mod == 3) || ((modrm & 0x38) != 0x8))
5267 goto illegal_op;
5268 #ifdef TARGET_X86_64
5269 if (dflag == MO_64) {
5270 if (!(s->cpuid_ext_features & CPUID_EXT_CX16))
5271 goto illegal_op;
5272 gen_lea_modrm(env, s, modrm);
5273 if ((s->prefix & PREFIX_LOCK) && parallel_cpus) {
5274 gen_helper_cmpxchg16b(cpu_env, cpu_A0);
5275 } else {
5276 gen_helper_cmpxchg16b_unlocked(cpu_env, cpu_A0);
5278 } else
5279 #endif
5281 if (!(s->cpuid_features & CPUID_CX8))
5282 goto illegal_op;
5283 gen_lea_modrm(env, s, modrm);
5284 if ((s->prefix & PREFIX_LOCK) && parallel_cpus) {
5285 gen_helper_cmpxchg8b(cpu_env, cpu_A0);
5286 } else {
5287 gen_helper_cmpxchg8b_unlocked(cpu_env, cpu_A0);
5290 set_cc_op(s, CC_OP_EFLAGS);
5291 break;
5293 /**************************/
5294 /* push/pop */
5295 case 0x50 ... 0x57: /* push */
5296 gen_op_mov_v_reg(MO_32, cpu_T0, (b & 7) | REX_B(s));
5297 gen_push_v(s, cpu_T0);
5298 break;
5299 case 0x58 ... 0x5f: /* pop */
5300 ot = gen_pop_T0(s);
5301 /* NOTE: order is important for pop %sp */
5302 gen_pop_update(s, ot);
5303 gen_op_mov_reg_v(ot, (b & 7) | REX_B(s), cpu_T0);
5304 break;
5305 case 0x60: /* pusha */
5306 if (CODE64(s))
5307 goto illegal_op;
5308 gen_pusha(s);
5309 break;
5310 case 0x61: /* popa */
5311 if (CODE64(s))
5312 goto illegal_op;
5313 gen_popa(s);
5314 break;
5315 case 0x68: /* push Iv */
5316 case 0x6a:
5317 ot = mo_pushpop(s, dflag);
5318 if (b == 0x68)
5319 val = insn_get(env, s, ot);
5320 else
5321 val = (int8_t)insn_get(env, s, MO_8);
5322 tcg_gen_movi_tl(cpu_T0, val);
5323 gen_push_v(s, cpu_T0);
5324 break;
5325 case 0x8f: /* pop Ev */
5326 modrm = cpu_ldub_code(env, s->pc++);
5327 mod = (modrm >> 6) & 3;
5328 ot = gen_pop_T0(s);
5329 if (mod == 3) {
5330 /* NOTE: order is important for pop %sp */
5331 gen_pop_update(s, ot);
5332 rm = (modrm & 7) | REX_B(s);
5333 gen_op_mov_reg_v(ot, rm, cpu_T0);
5334 } else {
5335 /* NOTE: order is important too for MMU exceptions */
5336 s->popl_esp_hack = 1 << ot;
5337 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
5338 s->popl_esp_hack = 0;
5339 gen_pop_update(s, ot);
5341 break;
5342 case 0xc8: /* enter */
5344 int level;
5345 val = cpu_lduw_code(env, s->pc);
5346 s->pc += 2;
5347 level = cpu_ldub_code(env, s->pc++);
5348 gen_enter(s, val, level);
5350 break;
5351 case 0xc9: /* leave */
5352 gen_leave(s);
5353 break;
5354 case 0x06: /* push es */
5355 case 0x0e: /* push cs */
5356 case 0x16: /* push ss */
5357 case 0x1e: /* push ds */
5358 if (CODE64(s))
5359 goto illegal_op;
5360 gen_op_movl_T0_seg(b >> 3);
5361 gen_push_v(s, cpu_T0);
5362 break;
5363 case 0x1a0: /* push fs */
5364 case 0x1a8: /* push gs */
5365 gen_op_movl_T0_seg((b >> 3) & 7);
5366 gen_push_v(s, cpu_T0);
5367 break;
5368 case 0x07: /* pop es */
5369 case 0x17: /* pop ss */
5370 case 0x1f: /* pop ds */
5371 if (CODE64(s))
5372 goto illegal_op;
5373 reg = b >> 3;
5374 ot = gen_pop_T0(s);
5375 gen_movl_seg_T0(s, reg);
5376 gen_pop_update(s, ot);
5377 /* Note that reg == R_SS in gen_movl_seg_T0 always sets is_jmp. */
5378 if (s->is_jmp) {
5379 gen_jmp_im(s->pc - s->cs_base);
5380 if (reg == R_SS) {
5381 s->tf = 0;
5382 gen_eob_inhibit_irq(s, true);
5383 } else {
5384 gen_eob(s);
5387 break;
5388 case 0x1a1: /* pop fs */
5389 case 0x1a9: /* pop gs */
5390 ot = gen_pop_T0(s);
5391 gen_movl_seg_T0(s, (b >> 3) & 7);
5392 gen_pop_update(s, ot);
5393 if (s->is_jmp) {
5394 gen_jmp_im(s->pc - s->cs_base);
5395 gen_eob(s);
5397 break;
5399 /**************************/
5400 /* mov */
5401 case 0x88:
5402 case 0x89: /* mov Gv, Ev */
5403 ot = mo_b_d(b, dflag);
5404 modrm = cpu_ldub_code(env, s->pc++);
5405 reg = ((modrm >> 3) & 7) | rex_r;
5407 /* generate a generic store */
5408 gen_ldst_modrm(env, s, modrm, ot, reg, 1);
5409 break;
5410 case 0xc6:
5411 case 0xc7: /* mov Ev, Iv */
5412 ot = mo_b_d(b, dflag);
5413 modrm = cpu_ldub_code(env, s->pc++);
5414 mod = (modrm >> 6) & 3;
5415 if (mod != 3) {
5416 s->rip_offset = insn_const_size(ot);
5417 gen_lea_modrm(env, s, modrm);
5419 val = insn_get(env, s, ot);
5420 tcg_gen_movi_tl(cpu_T0, val);
5421 if (mod != 3) {
5422 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
5423 } else {
5424 gen_op_mov_reg_v(ot, (modrm & 7) | REX_B(s), cpu_T0);
5426 break;
5427 case 0x8a:
5428 case 0x8b: /* mov Ev, Gv */
5429 ot = mo_b_d(b, dflag);
5430 modrm = cpu_ldub_code(env, s->pc++);
5431 reg = ((modrm >> 3) & 7) | rex_r;
5433 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
5434 gen_op_mov_reg_v(ot, reg, cpu_T0);
5435 break;
5436 case 0x8e: /* mov seg, Gv */
5437 modrm = cpu_ldub_code(env, s->pc++);
5438 reg = (modrm >> 3) & 7;
5439 if (reg >= 6 || reg == R_CS)
5440 goto illegal_op;
5441 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
5442 gen_movl_seg_T0(s, reg);
5443 /* Note that reg == R_SS in gen_movl_seg_T0 always sets is_jmp. */
5444 if (s->is_jmp) {
5445 gen_jmp_im(s->pc - s->cs_base);
5446 if (reg == R_SS) {
5447 s->tf = 0;
5448 gen_eob_inhibit_irq(s, true);
5449 } else {
5450 gen_eob(s);
5453 break;
5454 case 0x8c: /* mov Gv, seg */
5455 modrm = cpu_ldub_code(env, s->pc++);
5456 reg = (modrm >> 3) & 7;
5457 mod = (modrm >> 6) & 3;
5458 if (reg >= 6)
5459 goto illegal_op;
5460 gen_op_movl_T0_seg(reg);
5461 ot = mod == 3 ? dflag : MO_16;
5462 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
5463 break;
5465 case 0x1b6: /* movzbS Gv, Eb */
5466 case 0x1b7: /* movzwS Gv, Eb */
5467 case 0x1be: /* movsbS Gv, Eb */
5468 case 0x1bf: /* movswS Gv, Eb */
5470 TCGMemOp d_ot;
5471 TCGMemOp s_ot;
5473 /* d_ot is the size of destination */
5474 d_ot = dflag;
5475 /* ot is the size of source */
5476 ot = (b & 1) + MO_8;
5477 /* s_ot is the sign+size of source */
5478 s_ot = b & 8 ? MO_SIGN | ot : ot;
5480 modrm = cpu_ldub_code(env, s->pc++);
5481 reg = ((modrm >> 3) & 7) | rex_r;
5482 mod = (modrm >> 6) & 3;
5483 rm = (modrm & 7) | REX_B(s);
5485 if (mod == 3) {
5486 if (s_ot == MO_SB && byte_reg_is_xH(rm)) {
5487 tcg_gen_sextract_tl(cpu_T0, cpu_regs[rm - 4], 8, 8);
5488 } else {
5489 gen_op_mov_v_reg(ot, cpu_T0, rm);
5490 switch (s_ot) {
5491 case MO_UB:
5492 tcg_gen_ext8u_tl(cpu_T0, cpu_T0);
5493 break;
5494 case MO_SB:
5495 tcg_gen_ext8s_tl(cpu_T0, cpu_T0);
5496 break;
5497 case MO_UW:
5498 tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
5499 break;
5500 default:
5501 case MO_SW:
5502 tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
5503 break;
5506 gen_op_mov_reg_v(d_ot, reg, cpu_T0);
5507 } else {
5508 gen_lea_modrm(env, s, modrm);
5509 gen_op_ld_v(s, s_ot, cpu_T0, cpu_A0);
5510 gen_op_mov_reg_v(d_ot, reg, cpu_T0);
5513 break;
5515 case 0x8d: /* lea */
5516 modrm = cpu_ldub_code(env, s->pc++);
5517 mod = (modrm >> 6) & 3;
5518 if (mod == 3)
5519 goto illegal_op;
5520 reg = ((modrm >> 3) & 7) | rex_r;
5522 AddressParts a = gen_lea_modrm_0(env, s, modrm);
5523 TCGv ea = gen_lea_modrm_1(a);
5524 gen_lea_v_seg(s, s->aflag, ea, -1, -1);
5525 gen_op_mov_reg_v(dflag, reg, cpu_A0);
5527 break;
5529 case 0xa0: /* mov EAX, Ov */
5530 case 0xa1:
5531 case 0xa2: /* mov Ov, EAX */
5532 case 0xa3:
5534 target_ulong offset_addr;
5536 ot = mo_b_d(b, dflag);
5537 switch (s->aflag) {
5538 #ifdef TARGET_X86_64
5539 case MO_64:
5540 offset_addr = cpu_ldq_code(env, s->pc);
5541 s->pc += 8;
5542 break;
5543 #endif
5544 default:
5545 offset_addr = insn_get(env, s, s->aflag);
5546 break;
5548 tcg_gen_movi_tl(cpu_A0, offset_addr);
5549 gen_add_A0_ds_seg(s);
5550 if ((b & 2) == 0) {
5551 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
5552 gen_op_mov_reg_v(ot, R_EAX, cpu_T0);
5553 } else {
5554 gen_op_mov_v_reg(ot, cpu_T0, R_EAX);
5555 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
5558 break;
5559 case 0xd7: /* xlat */
5560 tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EBX]);
5561 tcg_gen_ext8u_tl(cpu_T0, cpu_regs[R_EAX]);
5562 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T0);
5563 gen_extu(s->aflag, cpu_A0);
5564 gen_add_A0_ds_seg(s);
5565 gen_op_ld_v(s, MO_8, cpu_T0, cpu_A0);
5566 gen_op_mov_reg_v(MO_8, R_EAX, cpu_T0);
5567 break;
5568 case 0xb0 ... 0xb7: /* mov R, Ib */
5569 val = insn_get(env, s, MO_8);
5570 tcg_gen_movi_tl(cpu_T0, val);
5571 gen_op_mov_reg_v(MO_8, (b & 7) | REX_B(s), cpu_T0);
5572 break;
5573 case 0xb8 ... 0xbf: /* mov R, Iv */
5574 #ifdef TARGET_X86_64
5575 if (dflag == MO_64) {
5576 uint64_t tmp;
5577 /* 64 bit case */
5578 tmp = cpu_ldq_code(env, s->pc);
5579 s->pc += 8;
5580 reg = (b & 7) | REX_B(s);
5581 tcg_gen_movi_tl(cpu_T0, tmp);
5582 gen_op_mov_reg_v(MO_64, reg, cpu_T0);
5583 } else
5584 #endif
5586 ot = dflag;
5587 val = insn_get(env, s, ot);
5588 reg = (b & 7) | REX_B(s);
5589 tcg_gen_movi_tl(cpu_T0, val);
5590 gen_op_mov_reg_v(ot, reg, cpu_T0);
5592 break;
5594 case 0x91 ... 0x97: /* xchg R, EAX */
5595 do_xchg_reg_eax:
5596 ot = dflag;
5597 reg = (b & 7) | REX_B(s);
5598 rm = R_EAX;
5599 goto do_xchg_reg;
5600 case 0x86:
5601 case 0x87: /* xchg Ev, Gv */
5602 ot = mo_b_d(b, dflag);
5603 modrm = cpu_ldub_code(env, s->pc++);
5604 reg = ((modrm >> 3) & 7) | rex_r;
5605 mod = (modrm >> 6) & 3;
5606 if (mod == 3) {
5607 rm = (modrm & 7) | REX_B(s);
5608 do_xchg_reg:
5609 gen_op_mov_v_reg(ot, cpu_T0, reg);
5610 gen_op_mov_v_reg(ot, cpu_T1, rm);
5611 gen_op_mov_reg_v(ot, rm, cpu_T0);
5612 gen_op_mov_reg_v(ot, reg, cpu_T1);
5613 } else {
5614 gen_lea_modrm(env, s, modrm);
5615 gen_op_mov_v_reg(ot, cpu_T0, reg);
5616 /* for xchg, lock is implicit */
5617 tcg_gen_atomic_xchg_tl(cpu_T1, cpu_A0, cpu_T0,
5618 s->mem_index, ot | MO_LE);
5619 gen_op_mov_reg_v(ot, reg, cpu_T1);
5621 break;
5622 case 0xc4: /* les Gv */
5623 /* In CODE64 this is VEX3; see above. */
5624 op = R_ES;
5625 goto do_lxx;
5626 case 0xc5: /* lds Gv */
5627 /* In CODE64 this is VEX2; see above. */
5628 op = R_DS;
5629 goto do_lxx;
5630 case 0x1b2: /* lss Gv */
5631 op = R_SS;
5632 goto do_lxx;
5633 case 0x1b4: /* lfs Gv */
5634 op = R_FS;
5635 goto do_lxx;
5636 case 0x1b5: /* lgs Gv */
5637 op = R_GS;
5638 do_lxx:
5639 ot = dflag != MO_16 ? MO_32 : MO_16;
5640 modrm = cpu_ldub_code(env, s->pc++);
5641 reg = ((modrm >> 3) & 7) | rex_r;
5642 mod = (modrm >> 6) & 3;
5643 if (mod == 3)
5644 goto illegal_op;
5645 gen_lea_modrm(env, s, modrm);
5646 gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
5647 gen_add_A0_im(s, 1 << ot);
5648 /* load the segment first to handle exceptions properly */
5649 gen_op_ld_v(s, MO_16, cpu_T0, cpu_A0);
5650 gen_movl_seg_T0(s, op);
5651 /* then put the data */
5652 gen_op_mov_reg_v(ot, reg, cpu_T1);
5653 if (s->is_jmp) {
5654 gen_jmp_im(s->pc - s->cs_base);
5655 gen_eob(s);
5657 break;
5659 /************************/
5660 /* shifts */
5661 case 0xc0:
5662 case 0xc1:
5663 /* shift Ev,Ib */
5664 shift = 2;
5665 grp2:
5667 ot = mo_b_d(b, dflag);
5668 modrm = cpu_ldub_code(env, s->pc++);
5669 mod = (modrm >> 6) & 3;
5670 op = (modrm >> 3) & 7;
5672 if (mod != 3) {
5673 if (shift == 2) {
5674 s->rip_offset = 1;
5676 gen_lea_modrm(env, s, modrm);
5677 opreg = OR_TMP0;
5678 } else {
5679 opreg = (modrm & 7) | REX_B(s);
5682 /* simpler op */
5683 if (shift == 0) {
5684 gen_shift(s, op, ot, opreg, OR_ECX);
5685 } else {
5686 if (shift == 2) {
5687 shift = cpu_ldub_code(env, s->pc++);
5689 gen_shifti(s, op, ot, opreg, shift);
5692 break;
5693 case 0xd0:
5694 case 0xd1:
5695 /* shift Ev,1 */
5696 shift = 1;
5697 goto grp2;
5698 case 0xd2:
5699 case 0xd3:
5700 /* shift Ev,cl */
5701 shift = 0;
5702 goto grp2;
5704 case 0x1a4: /* shld imm */
5705 op = 0;
5706 shift = 1;
5707 goto do_shiftd;
5708 case 0x1a5: /* shld cl */
5709 op = 0;
5710 shift = 0;
5711 goto do_shiftd;
5712 case 0x1ac: /* shrd imm */
5713 op = 1;
5714 shift = 1;
5715 goto do_shiftd;
5716 case 0x1ad: /* shrd cl */
5717 op = 1;
5718 shift = 0;
5719 do_shiftd:
5720 ot = dflag;
5721 modrm = cpu_ldub_code(env, s->pc++);
5722 mod = (modrm >> 6) & 3;
5723 rm = (modrm & 7) | REX_B(s);
5724 reg = ((modrm >> 3) & 7) | rex_r;
5725 if (mod != 3) {
5726 gen_lea_modrm(env, s, modrm);
5727 opreg = OR_TMP0;
5728 } else {
5729 opreg = rm;
5731 gen_op_mov_v_reg(ot, cpu_T1, reg);
5733 if (shift) {
5734 TCGv imm = tcg_const_tl(cpu_ldub_code(env, s->pc++));
5735 gen_shiftd_rm_T1(s, ot, opreg, op, imm);
5736 tcg_temp_free(imm);
5737 } else {
5738 gen_shiftd_rm_T1(s, ot, opreg, op, cpu_regs[R_ECX]);
5740 break;
5742 /************************/
5743 /* floats */
5744 case 0xd8 ... 0xdf:
5745 if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
5746 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
5747 /* XXX: what to do if illegal op ? */
5748 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
5749 break;
5751 modrm = cpu_ldub_code(env, s->pc++);
5752 mod = (modrm >> 6) & 3;
5753 rm = modrm & 7;
5754 op = ((b & 7) << 3) | ((modrm >> 3) & 7);
5755 if (mod != 3) {
5756 /* memory op */
5757 gen_lea_modrm(env, s, modrm);
5758 switch(op) {
5759 case 0x00 ... 0x07: /* fxxxs */
5760 case 0x10 ... 0x17: /* fixxxl */
5761 case 0x20 ... 0x27: /* fxxxl */
5762 case 0x30 ... 0x37: /* fixxx */
5764 int op1;
5765 op1 = op & 7;
5767 switch(op >> 4) {
5768 case 0:
5769 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5770 s->mem_index, MO_LEUL);
5771 gen_helper_flds_FT0(cpu_env, cpu_tmp2_i32);
5772 break;
5773 case 1:
5774 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5775 s->mem_index, MO_LEUL);
5776 gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
5777 break;
5778 case 2:
5779 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
5780 s->mem_index, MO_LEQ);
5781 gen_helper_fldl_FT0(cpu_env, cpu_tmp1_i64);
5782 break;
5783 case 3:
5784 default:
5785 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5786 s->mem_index, MO_LESW);
5787 gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
5788 break;
5791 gen_helper_fp_arith_ST0_FT0(op1);
5792 if (op1 == 3) {
5793 /* fcomp needs pop */
5794 gen_helper_fpop(cpu_env);
5797 break;
5798 case 0x08: /* flds */
5799 case 0x0a: /* fsts */
5800 case 0x0b: /* fstps */
5801 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
5802 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
5803 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
5804 switch(op & 7) {
5805 case 0:
5806 switch(op >> 4) {
5807 case 0:
5808 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5809 s->mem_index, MO_LEUL);
5810 gen_helper_flds_ST0(cpu_env, cpu_tmp2_i32);
5811 break;
5812 case 1:
5813 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5814 s->mem_index, MO_LEUL);
5815 gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
5816 break;
5817 case 2:
5818 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
5819 s->mem_index, MO_LEQ);
5820 gen_helper_fldl_ST0(cpu_env, cpu_tmp1_i64);
5821 break;
5822 case 3:
5823 default:
5824 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5825 s->mem_index, MO_LESW);
5826 gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
5827 break;
5829 break;
5830 case 1:
5831 /* XXX: the corresponding CPUID bit must be tested ! */
5832 switch(op >> 4) {
5833 case 1:
5834 gen_helper_fisttl_ST0(cpu_tmp2_i32, cpu_env);
5835 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5836 s->mem_index, MO_LEUL);
5837 break;
5838 case 2:
5839 gen_helper_fisttll_ST0(cpu_tmp1_i64, cpu_env);
5840 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
5841 s->mem_index, MO_LEQ);
5842 break;
5843 case 3:
5844 default:
5845 gen_helper_fistt_ST0(cpu_tmp2_i32, cpu_env);
5846 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5847 s->mem_index, MO_LEUW);
5848 break;
5850 gen_helper_fpop(cpu_env);
5851 break;
5852 default:
5853 switch(op >> 4) {
5854 case 0:
5855 gen_helper_fsts_ST0(cpu_tmp2_i32, cpu_env);
5856 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5857 s->mem_index, MO_LEUL);
5858 break;
5859 case 1:
5860 gen_helper_fistl_ST0(cpu_tmp2_i32, cpu_env);
5861 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5862 s->mem_index, MO_LEUL);
5863 break;
5864 case 2:
5865 gen_helper_fstl_ST0(cpu_tmp1_i64, cpu_env);
5866 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
5867 s->mem_index, MO_LEQ);
5868 break;
5869 case 3:
5870 default:
5871 gen_helper_fist_ST0(cpu_tmp2_i32, cpu_env);
5872 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5873 s->mem_index, MO_LEUW);
5874 break;
5876 if ((op & 7) == 3)
5877 gen_helper_fpop(cpu_env);
5878 break;
5880 break;
5881 case 0x0c: /* fldenv mem */
5882 gen_helper_fldenv(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
5883 break;
5884 case 0x0d: /* fldcw mem */
5885 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5886 s->mem_index, MO_LEUW);
5887 gen_helper_fldcw(cpu_env, cpu_tmp2_i32);
5888 break;
5889 case 0x0e: /* fnstenv mem */
5890 gen_helper_fstenv(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
5891 break;
5892 case 0x0f: /* fnstcw mem */
5893 gen_helper_fnstcw(cpu_tmp2_i32, cpu_env);
5894 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5895 s->mem_index, MO_LEUW);
5896 break;
5897 case 0x1d: /* fldt mem */
5898 gen_helper_fldt_ST0(cpu_env, cpu_A0);
5899 break;
5900 case 0x1f: /* fstpt mem */
5901 gen_helper_fstt_ST0(cpu_env, cpu_A0);
5902 gen_helper_fpop(cpu_env);
5903 break;
5904 case 0x2c: /* frstor mem */
5905 gen_helper_frstor(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
5906 break;
5907 case 0x2e: /* fnsave mem */
5908 gen_helper_fsave(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
5909 break;
5910 case 0x2f: /* fnstsw mem */
5911 gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
5912 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5913 s->mem_index, MO_LEUW);
5914 break;
5915 case 0x3c: /* fbld */
5916 gen_helper_fbld_ST0(cpu_env, cpu_A0);
5917 break;
5918 case 0x3e: /* fbstp */
5919 gen_helper_fbst_ST0(cpu_env, cpu_A0);
5920 gen_helper_fpop(cpu_env);
5921 break;
5922 case 0x3d: /* fildll */
5923 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
5924 gen_helper_fildll_ST0(cpu_env, cpu_tmp1_i64);
5925 break;
5926 case 0x3f: /* fistpll */
5927 gen_helper_fistll_ST0(cpu_tmp1_i64, cpu_env);
5928 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
5929 gen_helper_fpop(cpu_env);
5930 break;
5931 default:
5932 goto unknown_op;
5934 } else {
5935 /* register float ops */
5936 opreg = rm;
5938 switch(op) {
5939 case 0x08: /* fld sti */
5940 gen_helper_fpush(cpu_env);
5941 gen_helper_fmov_ST0_STN(cpu_env,
5942 tcg_const_i32((opreg + 1) & 7));
5943 break;
5944 case 0x09: /* fxchg sti */
5945 case 0x29: /* fxchg4 sti, undocumented op */
5946 case 0x39: /* fxchg7 sti, undocumented op */
5947 gen_helper_fxchg_ST0_STN(cpu_env, tcg_const_i32(opreg));
5948 break;
5949 case 0x0a: /* grp d9/2 */
5950 switch(rm) {
5951 case 0: /* fnop */
5952 /* check exceptions (FreeBSD FPU probe) */
5953 gen_helper_fwait(cpu_env);
5954 break;
5955 default:
5956 goto unknown_op;
5958 break;
5959 case 0x0c: /* grp d9/4 */
5960 switch(rm) {
5961 case 0: /* fchs */
5962 gen_helper_fchs_ST0(cpu_env);
5963 break;
5964 case 1: /* fabs */
5965 gen_helper_fabs_ST0(cpu_env);
5966 break;
5967 case 4: /* ftst */
5968 gen_helper_fldz_FT0(cpu_env);
5969 gen_helper_fcom_ST0_FT0(cpu_env);
5970 break;
5971 case 5: /* fxam */
5972 gen_helper_fxam_ST0(cpu_env);
5973 break;
5974 default:
5975 goto unknown_op;
5977 break;
5978 case 0x0d: /* grp d9/5 */
5980 switch(rm) {
5981 case 0:
5982 gen_helper_fpush(cpu_env);
5983 gen_helper_fld1_ST0(cpu_env);
5984 break;
5985 case 1:
5986 gen_helper_fpush(cpu_env);
5987 gen_helper_fldl2t_ST0(cpu_env);
5988 break;
5989 case 2:
5990 gen_helper_fpush(cpu_env);
5991 gen_helper_fldl2e_ST0(cpu_env);
5992 break;
5993 case 3:
5994 gen_helper_fpush(cpu_env);
5995 gen_helper_fldpi_ST0(cpu_env);
5996 break;
5997 case 4:
5998 gen_helper_fpush(cpu_env);
5999 gen_helper_fldlg2_ST0(cpu_env);
6000 break;
6001 case 5:
6002 gen_helper_fpush(cpu_env);
6003 gen_helper_fldln2_ST0(cpu_env);
6004 break;
6005 case 6:
6006 gen_helper_fpush(cpu_env);
6007 gen_helper_fldz_ST0(cpu_env);
6008 break;
6009 default:
6010 goto unknown_op;
6013 break;
6014 case 0x0e: /* grp d9/6 */
6015 switch(rm) {
6016 case 0: /* f2xm1 */
6017 gen_helper_f2xm1(cpu_env);
6018 break;
6019 case 1: /* fyl2x */
6020 gen_helper_fyl2x(cpu_env);
6021 break;
6022 case 2: /* fptan */
6023 gen_helper_fptan(cpu_env);
6024 break;
6025 case 3: /* fpatan */
6026 gen_helper_fpatan(cpu_env);
6027 break;
6028 case 4: /* fxtract */
6029 gen_helper_fxtract(cpu_env);
6030 break;
6031 case 5: /* fprem1 */
6032 gen_helper_fprem1(cpu_env);
6033 break;
6034 case 6: /* fdecstp */
6035 gen_helper_fdecstp(cpu_env);
6036 break;
6037 default:
6038 case 7: /* fincstp */
6039 gen_helper_fincstp(cpu_env);
6040 break;
6042 break;
6043 case 0x0f: /* grp d9/7 */
6044 switch(rm) {
6045 case 0: /* fprem */
6046 gen_helper_fprem(cpu_env);
6047 break;
6048 case 1: /* fyl2xp1 */
6049 gen_helper_fyl2xp1(cpu_env);
6050 break;
6051 case 2: /* fsqrt */
6052 gen_helper_fsqrt(cpu_env);
6053 break;
6054 case 3: /* fsincos */
6055 gen_helper_fsincos(cpu_env);
6056 break;
6057 case 5: /* fscale */
6058 gen_helper_fscale(cpu_env);
6059 break;
6060 case 4: /* frndint */
6061 gen_helper_frndint(cpu_env);
6062 break;
6063 case 6: /* fsin */
6064 gen_helper_fsin(cpu_env);
6065 break;
6066 default:
6067 case 7: /* fcos */
6068 gen_helper_fcos(cpu_env);
6069 break;
6071 break;
6072 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
6073 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
6074 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
6076 int op1;
6078 op1 = op & 7;
6079 if (op >= 0x20) {
6080 gen_helper_fp_arith_STN_ST0(op1, opreg);
6081 if (op >= 0x30)
6082 gen_helper_fpop(cpu_env);
6083 } else {
6084 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6085 gen_helper_fp_arith_ST0_FT0(op1);
6088 break;
6089 case 0x02: /* fcom */
6090 case 0x22: /* fcom2, undocumented op */
6091 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6092 gen_helper_fcom_ST0_FT0(cpu_env);
6093 break;
6094 case 0x03: /* fcomp */
6095 case 0x23: /* fcomp3, undocumented op */
6096 case 0x32: /* fcomp5, undocumented op */
6097 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6098 gen_helper_fcom_ST0_FT0(cpu_env);
6099 gen_helper_fpop(cpu_env);
6100 break;
6101 case 0x15: /* da/5 */
6102 switch(rm) {
6103 case 1: /* fucompp */
6104 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
6105 gen_helper_fucom_ST0_FT0(cpu_env);
6106 gen_helper_fpop(cpu_env);
6107 gen_helper_fpop(cpu_env);
6108 break;
6109 default:
6110 goto unknown_op;
6112 break;
6113 case 0x1c:
6114 switch(rm) {
6115 case 0: /* feni (287 only, just do nop here) */
6116 break;
6117 case 1: /* fdisi (287 only, just do nop here) */
6118 break;
6119 case 2: /* fclex */
6120 gen_helper_fclex(cpu_env);
6121 break;
6122 case 3: /* fninit */
6123 gen_helper_fninit(cpu_env);
6124 break;
6125 case 4: /* fsetpm (287 only, just do nop here) */
6126 break;
6127 default:
6128 goto unknown_op;
6130 break;
6131 case 0x1d: /* fucomi */
6132 if (!(s->cpuid_features & CPUID_CMOV)) {
6133 goto illegal_op;
6135 gen_update_cc_op(s);
6136 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6137 gen_helper_fucomi_ST0_FT0(cpu_env);
6138 set_cc_op(s, CC_OP_EFLAGS);
6139 break;
6140 case 0x1e: /* fcomi */
6141 if (!(s->cpuid_features & CPUID_CMOV)) {
6142 goto illegal_op;
6144 gen_update_cc_op(s);
6145 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6146 gen_helper_fcomi_ST0_FT0(cpu_env);
6147 set_cc_op(s, CC_OP_EFLAGS);
6148 break;
6149 case 0x28: /* ffree sti */
6150 gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
6151 break;
6152 case 0x2a: /* fst sti */
6153 gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
6154 break;
6155 case 0x2b: /* fstp sti */
6156 case 0x0b: /* fstp1 sti, undocumented op */
6157 case 0x3a: /* fstp8 sti, undocumented op */
6158 case 0x3b: /* fstp9 sti, undocumented op */
6159 gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
6160 gen_helper_fpop(cpu_env);
6161 break;
6162 case 0x2c: /* fucom st(i) */
6163 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6164 gen_helper_fucom_ST0_FT0(cpu_env);
6165 break;
6166 case 0x2d: /* fucomp st(i) */
6167 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6168 gen_helper_fucom_ST0_FT0(cpu_env);
6169 gen_helper_fpop(cpu_env);
6170 break;
6171 case 0x33: /* de/3 */
6172 switch(rm) {
6173 case 1: /* fcompp */
6174 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
6175 gen_helper_fcom_ST0_FT0(cpu_env);
6176 gen_helper_fpop(cpu_env);
6177 gen_helper_fpop(cpu_env);
6178 break;
6179 default:
6180 goto unknown_op;
6182 break;
6183 case 0x38: /* ffreep sti, undocumented op */
6184 gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
6185 gen_helper_fpop(cpu_env);
6186 break;
6187 case 0x3c: /* df/4 */
6188 switch(rm) {
6189 case 0:
6190 gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
6191 tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);
6192 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
6193 break;
6194 default:
6195 goto unknown_op;
6197 break;
6198 case 0x3d: /* fucomip */
6199 if (!(s->cpuid_features & CPUID_CMOV)) {
6200 goto illegal_op;
6202 gen_update_cc_op(s);
6203 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6204 gen_helper_fucomi_ST0_FT0(cpu_env);
6205 gen_helper_fpop(cpu_env);
6206 set_cc_op(s, CC_OP_EFLAGS);
6207 break;
6208 case 0x3e: /* fcomip */
6209 if (!(s->cpuid_features & CPUID_CMOV)) {
6210 goto illegal_op;
6212 gen_update_cc_op(s);
6213 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6214 gen_helper_fcomi_ST0_FT0(cpu_env);
6215 gen_helper_fpop(cpu_env);
6216 set_cc_op(s, CC_OP_EFLAGS);
6217 break;
6218 case 0x10 ... 0x13: /* fcmovxx */
6219 case 0x18 ... 0x1b:
6221 int op1;
6222 TCGLabel *l1;
6223 static const uint8_t fcmov_cc[8] = {
6224 (JCC_B << 1),
6225 (JCC_Z << 1),
6226 (JCC_BE << 1),
6227 (JCC_P << 1),
6230 if (!(s->cpuid_features & CPUID_CMOV)) {
6231 goto illegal_op;
6233 op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
6234 l1 = gen_new_label();
6235 gen_jcc1_noeob(s, op1, l1);
6236 gen_helper_fmov_ST0_STN(cpu_env, tcg_const_i32(opreg));
6237 gen_set_label(l1);
6239 break;
6240 default:
6241 goto unknown_op;
6244 break;
6245 /************************/
6246 /* string ops */
6248 case 0xa4: /* movsS */
6249 case 0xa5:
6250 ot = mo_b_d(b, dflag);
6251 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6252 gen_repz_movs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6253 } else {
6254 gen_movs(s, ot);
6256 break;
6258 case 0xaa: /* stosS */
6259 case 0xab:
6260 ot = mo_b_d(b, dflag);
6261 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6262 gen_repz_stos(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6263 } else {
6264 gen_stos(s, ot);
6266 break;
6267 case 0xac: /* lodsS */
6268 case 0xad:
6269 ot = mo_b_d(b, dflag);
6270 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6271 gen_repz_lods(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6272 } else {
6273 gen_lods(s, ot);
6275 break;
6276 case 0xae: /* scasS */
6277 case 0xaf:
6278 ot = mo_b_d(b, dflag);
6279 if (prefixes & PREFIX_REPNZ) {
6280 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6281 } else if (prefixes & PREFIX_REPZ) {
6282 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6283 } else {
6284 gen_scas(s, ot);
6286 break;
6288 case 0xa6: /* cmpsS */
6289 case 0xa7:
6290 ot = mo_b_d(b, dflag);
6291 if (prefixes & PREFIX_REPNZ) {
6292 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6293 } else if (prefixes & PREFIX_REPZ) {
6294 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6295 } else {
6296 gen_cmps(s, ot);
6298 break;
6299 case 0x6c: /* insS */
6300 case 0x6d:
6301 ot = mo_b_d32(b, dflag);
6302 tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]);
6303 gen_check_io(s, ot, pc_start - s->cs_base,
6304 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4);
6305 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6306 gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6307 } else {
6308 gen_ins(s, ot);
6309 if (s->tb->cflags & CF_USE_ICOUNT) {
6310 gen_jmp(s, s->pc - s->cs_base);
6313 break;
6314 case 0x6e: /* outsS */
6315 case 0x6f:
6316 ot = mo_b_d32(b, dflag);
6317 tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]);
6318 gen_check_io(s, ot, pc_start - s->cs_base,
6319 svm_is_rep(prefixes) | 4);
6320 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6321 gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6322 } else {
6323 gen_outs(s, ot);
6324 if (s->tb->cflags & CF_USE_ICOUNT) {
6325 gen_jmp(s, s->pc - s->cs_base);
6328 break;
6330 /************************/
6331 /* port I/O */
6333 case 0xe4:
6334 case 0xe5:
6335 ot = mo_b_d32(b, dflag);
6336 val = cpu_ldub_code(env, s->pc++);
6337 tcg_gen_movi_tl(cpu_T0, val);
6338 gen_check_io(s, ot, pc_start - s->cs_base,
6339 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
6340 if (s->tb->cflags & CF_USE_ICOUNT) {
6341 gen_io_start();
6343 tcg_gen_movi_i32(cpu_tmp2_i32, val);
6344 gen_helper_in_func(ot, cpu_T1, cpu_tmp2_i32);
6345 gen_op_mov_reg_v(ot, R_EAX, cpu_T1);
6346 gen_bpt_io(s, cpu_tmp2_i32, ot);
6347 if (s->tb->cflags & CF_USE_ICOUNT) {
6348 gen_io_end();
6349 gen_jmp(s, s->pc - s->cs_base);
6351 break;
6352 case 0xe6:
6353 case 0xe7:
6354 ot = mo_b_d32(b, dflag);
6355 val = cpu_ldub_code(env, s->pc++);
6356 tcg_gen_movi_tl(cpu_T0, val);
6357 gen_check_io(s, ot, pc_start - s->cs_base,
6358 svm_is_rep(prefixes));
6359 gen_op_mov_v_reg(ot, cpu_T1, R_EAX);
6361 if (s->tb->cflags & CF_USE_ICOUNT) {
6362 gen_io_start();
6364 tcg_gen_movi_i32(cpu_tmp2_i32, val);
6365 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1);
6366 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
6367 gen_bpt_io(s, cpu_tmp2_i32, ot);
6368 if (s->tb->cflags & CF_USE_ICOUNT) {
6369 gen_io_end();
6370 gen_jmp(s, s->pc - s->cs_base);
6372 break;
6373 case 0xec:
6374 case 0xed:
6375 ot = mo_b_d32(b, dflag);
6376 tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]);
6377 gen_check_io(s, ot, pc_start - s->cs_base,
6378 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
6379 if (s->tb->cflags & CF_USE_ICOUNT) {
6380 gen_io_start();
6382 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
6383 gen_helper_in_func(ot, cpu_T1, cpu_tmp2_i32);
6384 gen_op_mov_reg_v(ot, R_EAX, cpu_T1);
6385 gen_bpt_io(s, cpu_tmp2_i32, ot);
6386 if (s->tb->cflags & CF_USE_ICOUNT) {
6387 gen_io_end();
6388 gen_jmp(s, s->pc - s->cs_base);
6390 break;
6391 case 0xee:
6392 case 0xef:
6393 ot = mo_b_d32(b, dflag);
6394 tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]);
6395 gen_check_io(s, ot, pc_start - s->cs_base,
6396 svm_is_rep(prefixes));
6397 gen_op_mov_v_reg(ot, cpu_T1, R_EAX);
6399 if (s->tb->cflags & CF_USE_ICOUNT) {
6400 gen_io_start();
6402 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
6403 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1);
6404 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
6405 gen_bpt_io(s, cpu_tmp2_i32, ot);
6406 if (s->tb->cflags & CF_USE_ICOUNT) {
6407 gen_io_end();
6408 gen_jmp(s, s->pc - s->cs_base);
6410 break;
6412 /************************/
6413 /* control */
6414 case 0xc2: /* ret im */
6415 val = cpu_ldsw_code(env, s->pc);
6416 s->pc += 2;
6417 ot = gen_pop_T0(s);
6418 gen_stack_update(s, val + (1 << ot));
6419 /* Note that gen_pop_T0 uses a zero-extending load. */
6420 gen_op_jmp_v(cpu_T0);
6421 gen_bnd_jmp(s);
6422 gen_jr(s, cpu_T0);
6423 break;
6424 case 0xc3: /* ret */
6425 ot = gen_pop_T0(s);
6426 gen_pop_update(s, ot);
6427 /* Note that gen_pop_T0 uses a zero-extending load. */
6428 gen_op_jmp_v(cpu_T0);
6429 gen_bnd_jmp(s);
6430 gen_jr(s, cpu_T0);
6431 break;
6432 case 0xca: /* lret im */
6433 val = cpu_ldsw_code(env, s->pc);
6434 s->pc += 2;
6435 do_lret:
6436 if (s->pe && !s->vm86) {
6437 gen_update_cc_op(s);
6438 gen_jmp_im(pc_start - s->cs_base);
6439 gen_helper_lret_protected(cpu_env, tcg_const_i32(dflag - 1),
6440 tcg_const_i32(val));
6441 } else {
6442 gen_stack_A0(s);
6443 /* pop offset */
6444 gen_op_ld_v(s, dflag, cpu_T0, cpu_A0);
6445 /* NOTE: keeping EIP updated is not a problem in case of
6446 exception */
6447 gen_op_jmp_v(cpu_T0);
6448 /* pop selector */
6449 gen_add_A0_im(s, 1 << dflag);
6450 gen_op_ld_v(s, dflag, cpu_T0, cpu_A0);
6451 gen_op_movl_seg_T0_vm(R_CS);
6452 /* add stack offset */
6453 gen_stack_update(s, val + (2 << dflag));
6455 gen_eob(s);
6456 break;
6457 case 0xcb: /* lret */
6458 val = 0;
6459 goto do_lret;
6460 case 0xcf: /* iret */
6461 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET);
6462 if (!s->pe) {
6463 /* real mode */
6464 gen_helper_iret_real(cpu_env, tcg_const_i32(dflag - 1));
6465 set_cc_op(s, CC_OP_EFLAGS);
6466 } else if (s->vm86) {
6467 if (s->iopl != 3) {
6468 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6469 } else {
6470 gen_helper_iret_real(cpu_env, tcg_const_i32(dflag - 1));
6471 set_cc_op(s, CC_OP_EFLAGS);
6473 } else {
6474 gen_helper_iret_protected(cpu_env, tcg_const_i32(dflag - 1),
6475 tcg_const_i32(s->pc - s->cs_base));
6476 set_cc_op(s, CC_OP_EFLAGS);
6478 gen_eob(s);
6479 break;
6480 case 0xe8: /* call im */
6482 if (dflag != MO_16) {
6483 tval = (int32_t)insn_get(env, s, MO_32);
6484 } else {
6485 tval = (int16_t)insn_get(env, s, MO_16);
6487 next_eip = s->pc - s->cs_base;
6488 tval += next_eip;
6489 if (dflag == MO_16) {
6490 tval &= 0xffff;
6491 } else if (!CODE64(s)) {
6492 tval &= 0xffffffff;
6494 tcg_gen_movi_tl(cpu_T0, next_eip);
6495 gen_push_v(s, cpu_T0);
6496 gen_bnd_jmp(s);
6497 gen_jmp(s, tval);
6499 break;
6500 case 0x9a: /* lcall im */
6502 unsigned int selector, offset;
6504 if (CODE64(s))
6505 goto illegal_op;
6506 ot = dflag;
6507 offset = insn_get(env, s, ot);
6508 selector = insn_get(env, s, MO_16);
6510 tcg_gen_movi_tl(cpu_T0, selector);
6511 tcg_gen_movi_tl(cpu_T1, offset);
6513 goto do_lcall;
6514 case 0xe9: /* jmp im */
6515 if (dflag != MO_16) {
6516 tval = (int32_t)insn_get(env, s, MO_32);
6517 } else {
6518 tval = (int16_t)insn_get(env, s, MO_16);
6520 tval += s->pc - s->cs_base;
6521 if (dflag == MO_16) {
6522 tval &= 0xffff;
6523 } else if (!CODE64(s)) {
6524 tval &= 0xffffffff;
6526 gen_bnd_jmp(s);
6527 gen_jmp(s, tval);
6528 break;
6529 case 0xea: /* ljmp im */
6531 unsigned int selector, offset;
6533 if (CODE64(s))
6534 goto illegal_op;
6535 ot = dflag;
6536 offset = insn_get(env, s, ot);
6537 selector = insn_get(env, s, MO_16);
6539 tcg_gen_movi_tl(cpu_T0, selector);
6540 tcg_gen_movi_tl(cpu_T1, offset);
6542 goto do_ljmp;
6543 case 0xeb: /* jmp Jb */
6544 tval = (int8_t)insn_get(env, s, MO_8);
6545 tval += s->pc - s->cs_base;
6546 if (dflag == MO_16) {
6547 tval &= 0xffff;
6549 gen_jmp(s, tval);
6550 break;
6551 case 0x70 ... 0x7f: /* jcc Jb */
6552 tval = (int8_t)insn_get(env, s, MO_8);
6553 goto do_jcc;
6554 case 0x180 ... 0x18f: /* jcc Jv */
6555 if (dflag != MO_16) {
6556 tval = (int32_t)insn_get(env, s, MO_32);
6557 } else {
6558 tval = (int16_t)insn_get(env, s, MO_16);
6560 do_jcc:
6561 next_eip = s->pc - s->cs_base;
6562 tval += next_eip;
6563 if (dflag == MO_16) {
6564 tval &= 0xffff;
6566 gen_bnd_jmp(s);
6567 gen_jcc(s, b, tval, next_eip);
6568 break;
6570 case 0x190 ... 0x19f: /* setcc Gv */
6571 modrm = cpu_ldub_code(env, s->pc++);
6572 gen_setcc1(s, b, cpu_T0);
6573 gen_ldst_modrm(env, s, modrm, MO_8, OR_TMP0, 1);
6574 break;
6575 case 0x140 ... 0x14f: /* cmov Gv, Ev */
6576 if (!(s->cpuid_features & CPUID_CMOV)) {
6577 goto illegal_op;
6579 ot = dflag;
6580 modrm = cpu_ldub_code(env, s->pc++);
6581 reg = ((modrm >> 3) & 7) | rex_r;
6582 gen_cmovcc1(env, s, ot, b, modrm, reg);
6583 break;
6585 /************************/
6586 /* flags */
6587 case 0x9c: /* pushf */
6588 gen_svm_check_intercept(s, pc_start, SVM_EXIT_PUSHF);
6589 if (s->vm86 && s->iopl != 3) {
6590 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6591 } else {
6592 gen_update_cc_op(s);
6593 gen_helper_read_eflags(cpu_T0, cpu_env);
6594 gen_push_v(s, cpu_T0);
6596 break;
6597 case 0x9d: /* popf */
6598 gen_svm_check_intercept(s, pc_start, SVM_EXIT_POPF);
6599 if (s->vm86 && s->iopl != 3) {
6600 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6601 } else {
6602 ot = gen_pop_T0(s);
6603 if (s->cpl == 0) {
6604 if (dflag != MO_16) {
6605 gen_helper_write_eflags(cpu_env, cpu_T0,
6606 tcg_const_i32((TF_MASK | AC_MASK |
6607 ID_MASK | NT_MASK |
6608 IF_MASK |
6609 IOPL_MASK)));
6610 } else {
6611 gen_helper_write_eflags(cpu_env, cpu_T0,
6612 tcg_const_i32((TF_MASK | AC_MASK |
6613 ID_MASK | NT_MASK |
6614 IF_MASK | IOPL_MASK)
6615 & 0xffff));
6617 } else {
6618 if (s->cpl <= s->iopl) {
6619 if (dflag != MO_16) {
6620 gen_helper_write_eflags(cpu_env, cpu_T0,
6621 tcg_const_i32((TF_MASK |
6622 AC_MASK |
6623 ID_MASK |
6624 NT_MASK |
6625 IF_MASK)));
6626 } else {
6627 gen_helper_write_eflags(cpu_env, cpu_T0,
6628 tcg_const_i32((TF_MASK |
6629 AC_MASK |
6630 ID_MASK |
6631 NT_MASK |
6632 IF_MASK)
6633 & 0xffff));
6635 } else {
6636 if (dflag != MO_16) {
6637 gen_helper_write_eflags(cpu_env, cpu_T0,
6638 tcg_const_i32((TF_MASK | AC_MASK |
6639 ID_MASK | NT_MASK)));
6640 } else {
6641 gen_helper_write_eflags(cpu_env, cpu_T0,
6642 tcg_const_i32((TF_MASK | AC_MASK |
6643 ID_MASK | NT_MASK)
6644 & 0xffff));
6648 gen_pop_update(s, ot);
6649 set_cc_op(s, CC_OP_EFLAGS);
6650 /* abort translation because TF/AC flag may change */
6651 gen_jmp_im(s->pc - s->cs_base);
6652 gen_eob(s);
6654 break;
6655 case 0x9e: /* sahf */
6656 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
6657 goto illegal_op;
6658 gen_op_mov_v_reg(MO_8, cpu_T0, R_AH);
6659 gen_compute_eflags(s);
6660 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O);
6661 tcg_gen_andi_tl(cpu_T0, cpu_T0, CC_S | CC_Z | CC_A | CC_P | CC_C);
6662 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T0);
6663 break;
6664 case 0x9f: /* lahf */
6665 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
6666 goto illegal_op;
6667 gen_compute_eflags(s);
6668 /* Note: gen_compute_eflags() only gives the condition codes */
6669 tcg_gen_ori_tl(cpu_T0, cpu_cc_src, 0x02);
6670 gen_op_mov_reg_v(MO_8, R_AH, cpu_T0);
6671 break;
6672 case 0xf5: /* cmc */
6673 gen_compute_eflags(s);
6674 tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C);
6675 break;
6676 case 0xf8: /* clc */
6677 gen_compute_eflags(s);
6678 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C);
6679 break;
6680 case 0xf9: /* stc */
6681 gen_compute_eflags(s);
6682 tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C);
6683 break;
6684 case 0xfc: /* cld */
6685 tcg_gen_movi_i32(cpu_tmp2_i32, 1);
6686 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
6687 break;
6688 case 0xfd: /* std */
6689 tcg_gen_movi_i32(cpu_tmp2_i32, -1);
6690 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
6691 break;
6693 /************************/
6694 /* bit operations */
6695 case 0x1ba: /* bt/bts/btr/btc Gv, im */
6696 ot = dflag;
6697 modrm = cpu_ldub_code(env, s->pc++);
6698 op = (modrm >> 3) & 7;
6699 mod = (modrm >> 6) & 3;
6700 rm = (modrm & 7) | REX_B(s);
6701 if (mod != 3) {
6702 s->rip_offset = 1;
6703 gen_lea_modrm(env, s, modrm);
6704 if (!(s->prefix & PREFIX_LOCK)) {
6705 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
6707 } else {
6708 gen_op_mov_v_reg(ot, cpu_T0, rm);
6710 /* load shift */
6711 val = cpu_ldub_code(env, s->pc++);
6712 tcg_gen_movi_tl(cpu_T1, val);
6713 if (op < 4)
6714 goto unknown_op;
6715 op -= 4;
6716 goto bt_op;
6717 case 0x1a3: /* bt Gv, Ev */
6718 op = 0;
6719 goto do_btx;
6720 case 0x1ab: /* bts */
6721 op = 1;
6722 goto do_btx;
6723 case 0x1b3: /* btr */
6724 op = 2;
6725 goto do_btx;
6726 case 0x1bb: /* btc */
6727 op = 3;
6728 do_btx:
6729 ot = dflag;
6730 modrm = cpu_ldub_code(env, s->pc++);
6731 reg = ((modrm >> 3) & 7) | rex_r;
6732 mod = (modrm >> 6) & 3;
6733 rm = (modrm & 7) | REX_B(s);
6734 gen_op_mov_v_reg(MO_32, cpu_T1, reg);
6735 if (mod != 3) {
6736 AddressParts a = gen_lea_modrm_0(env, s, modrm);
6737 /* specific case: we need to add a displacement */
6738 gen_exts(ot, cpu_T1);
6739 tcg_gen_sari_tl(cpu_tmp0, cpu_T1, 3 + ot);
6740 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, ot);
6741 tcg_gen_add_tl(cpu_A0, gen_lea_modrm_1(a), cpu_tmp0);
6742 gen_lea_v_seg(s, s->aflag, cpu_A0, a.def_seg, s->override);
6743 if (!(s->prefix & PREFIX_LOCK)) {
6744 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
6746 } else {
6747 gen_op_mov_v_reg(ot, cpu_T0, rm);
6749 bt_op:
6750 tcg_gen_andi_tl(cpu_T1, cpu_T1, (1 << (3 + ot)) - 1);
6751 tcg_gen_movi_tl(cpu_tmp0, 1);
6752 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T1);
6753 if (s->prefix & PREFIX_LOCK) {
6754 switch (op) {
6755 case 0: /* bt */
6756 /* Needs no atomic ops; we surpressed the normal
6757 memory load for LOCK above so do it now. */
6758 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
6759 break;
6760 case 1: /* bts */
6761 tcg_gen_atomic_fetch_or_tl(cpu_T0, cpu_A0, cpu_tmp0,
6762 s->mem_index, ot | MO_LE);
6763 break;
6764 case 2: /* btr */
6765 tcg_gen_not_tl(cpu_tmp0, cpu_tmp0);
6766 tcg_gen_atomic_fetch_and_tl(cpu_T0, cpu_A0, cpu_tmp0,
6767 s->mem_index, ot | MO_LE);
6768 break;
6769 default:
6770 case 3: /* btc */
6771 tcg_gen_atomic_fetch_xor_tl(cpu_T0, cpu_A0, cpu_tmp0,
6772 s->mem_index, ot | MO_LE);
6773 break;
6775 tcg_gen_shr_tl(cpu_tmp4, cpu_T0, cpu_T1);
6776 } else {
6777 tcg_gen_shr_tl(cpu_tmp4, cpu_T0, cpu_T1);
6778 switch (op) {
6779 case 0: /* bt */
6780 /* Data already loaded; nothing to do. */
6781 break;
6782 case 1: /* bts */
6783 tcg_gen_or_tl(cpu_T0, cpu_T0, cpu_tmp0);
6784 break;
6785 case 2: /* btr */
6786 tcg_gen_andc_tl(cpu_T0, cpu_T0, cpu_tmp0);
6787 break;
6788 default:
6789 case 3: /* btc */
6790 tcg_gen_xor_tl(cpu_T0, cpu_T0, cpu_tmp0);
6791 break;
6793 if (op != 0) {
6794 if (mod != 3) {
6795 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
6796 } else {
6797 gen_op_mov_reg_v(ot, rm, cpu_T0);
6802 /* Delay all CC updates until after the store above. Note that
6803 C is the result of the test, Z is unchanged, and the others
6804 are all undefined. */
6805 switch (s->cc_op) {
6806 case CC_OP_MULB ... CC_OP_MULQ:
6807 case CC_OP_ADDB ... CC_OP_ADDQ:
6808 case CC_OP_ADCB ... CC_OP_ADCQ:
6809 case CC_OP_SUBB ... CC_OP_SUBQ:
6810 case CC_OP_SBBB ... CC_OP_SBBQ:
6811 case CC_OP_LOGICB ... CC_OP_LOGICQ:
6812 case CC_OP_INCB ... CC_OP_INCQ:
6813 case CC_OP_DECB ... CC_OP_DECQ:
6814 case CC_OP_SHLB ... CC_OP_SHLQ:
6815 case CC_OP_SARB ... CC_OP_SARQ:
6816 case CC_OP_BMILGB ... CC_OP_BMILGQ:
6817 /* Z was going to be computed from the non-zero status of CC_DST.
6818 We can get that same Z value (and the new C value) by leaving
6819 CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the
6820 same width. */
6821 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
6822 set_cc_op(s, ((s->cc_op - CC_OP_MULB) & 3) + CC_OP_SARB);
6823 break;
6824 default:
6825 /* Otherwise, generate EFLAGS and replace the C bit. */
6826 gen_compute_eflags(s);
6827 tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, cpu_tmp4,
6828 ctz32(CC_C), 1);
6829 break;
6831 break;
6832 case 0x1bc: /* bsf / tzcnt */
6833 case 0x1bd: /* bsr / lzcnt */
6834 ot = dflag;
6835 modrm = cpu_ldub_code(env, s->pc++);
6836 reg = ((modrm >> 3) & 7) | rex_r;
6837 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
6838 gen_extu(ot, cpu_T0);
6840 /* Note that lzcnt and tzcnt are in different extensions. */
6841 if ((prefixes & PREFIX_REPZ)
6842 && (b & 1
6843 ? s->cpuid_ext3_features & CPUID_EXT3_ABM
6844 : s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)) {
6845 int size = 8 << ot;
6846 /* For lzcnt/tzcnt, C bit is defined related to the input. */
6847 tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
6848 if (b & 1) {
6849 /* For lzcnt, reduce the target_ulong result by the
6850 number of zeros that we expect to find at the top. */
6851 tcg_gen_clzi_tl(cpu_T0, cpu_T0, TARGET_LONG_BITS);
6852 tcg_gen_subi_tl(cpu_T0, cpu_T0, TARGET_LONG_BITS - size);
6853 } else {
6854 /* For tzcnt, a zero input must return the operand size. */
6855 tcg_gen_ctzi_tl(cpu_T0, cpu_T0, size);
6857 /* For lzcnt/tzcnt, Z bit is defined related to the result. */
6858 gen_op_update1_cc();
6859 set_cc_op(s, CC_OP_BMILGB + ot);
6860 } else {
6861 /* For bsr/bsf, only the Z bit is defined and it is related
6862 to the input and not the result. */
6863 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
6864 set_cc_op(s, CC_OP_LOGICB + ot);
6866 /* ??? The manual says that the output is undefined when the
6867 input is zero, but real hardware leaves it unchanged, and
6868 real programs appear to depend on that. Accomplish this
6869 by passing the output as the value to return upon zero. */
6870 if (b & 1) {
6871 /* For bsr, return the bit index of the first 1 bit,
6872 not the count of leading zeros. */
6873 tcg_gen_xori_tl(cpu_T1, cpu_regs[reg], TARGET_LONG_BITS - 1);
6874 tcg_gen_clz_tl(cpu_T0, cpu_T0, cpu_T1);
6875 tcg_gen_xori_tl(cpu_T0, cpu_T0, TARGET_LONG_BITS - 1);
6876 } else {
6877 tcg_gen_ctz_tl(cpu_T0, cpu_T0, cpu_regs[reg]);
6880 gen_op_mov_reg_v(ot, reg, cpu_T0);
6881 break;
6882 /************************/
6883 /* bcd */
6884 case 0x27: /* daa */
6885 if (CODE64(s))
6886 goto illegal_op;
6887 gen_update_cc_op(s);
6888 gen_helper_daa(cpu_env);
6889 set_cc_op(s, CC_OP_EFLAGS);
6890 break;
6891 case 0x2f: /* das */
6892 if (CODE64(s))
6893 goto illegal_op;
6894 gen_update_cc_op(s);
6895 gen_helper_das(cpu_env);
6896 set_cc_op(s, CC_OP_EFLAGS);
6897 break;
6898 case 0x37: /* aaa */
6899 if (CODE64(s))
6900 goto illegal_op;
6901 gen_update_cc_op(s);
6902 gen_helper_aaa(cpu_env);
6903 set_cc_op(s, CC_OP_EFLAGS);
6904 break;
6905 case 0x3f: /* aas */
6906 if (CODE64(s))
6907 goto illegal_op;
6908 gen_update_cc_op(s);
6909 gen_helper_aas(cpu_env);
6910 set_cc_op(s, CC_OP_EFLAGS);
6911 break;
6912 case 0xd4: /* aam */
6913 if (CODE64(s))
6914 goto illegal_op;
6915 val = cpu_ldub_code(env, s->pc++);
6916 if (val == 0) {
6917 gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base);
6918 } else {
6919 gen_helper_aam(cpu_env, tcg_const_i32(val));
6920 set_cc_op(s, CC_OP_LOGICB);
6922 break;
6923 case 0xd5: /* aad */
6924 if (CODE64(s))
6925 goto illegal_op;
6926 val = cpu_ldub_code(env, s->pc++);
6927 gen_helper_aad(cpu_env, tcg_const_i32(val));
6928 set_cc_op(s, CC_OP_LOGICB);
6929 break;
6930 /************************/
6931 /* misc */
6932 case 0x90: /* nop */
6933 /* XXX: correct lock test for all insn */
6934 if (prefixes & PREFIX_LOCK) {
6935 goto illegal_op;
6937 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
6938 if (REX_B(s)) {
6939 goto do_xchg_reg_eax;
6941 if (prefixes & PREFIX_REPZ) {
6942 gen_update_cc_op(s);
6943 gen_jmp_im(pc_start - s->cs_base);
6944 gen_helper_pause(cpu_env, tcg_const_i32(s->pc - pc_start));
6945 s->is_jmp = DISAS_TB_JUMP;
6947 break;
6948 case 0x9b: /* fwait */
6949 if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) ==
6950 (HF_MP_MASK | HF_TS_MASK)) {
6951 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
6952 } else {
6953 gen_helper_fwait(cpu_env);
6955 break;
6956 case 0xcc: /* int3 */
6957 gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base);
6958 break;
6959 case 0xcd: /* int N */
6960 val = cpu_ldub_code(env, s->pc++);
6961 if (s->vm86 && s->iopl != 3) {
6962 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6963 } else {
6964 gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base);
6966 break;
6967 case 0xce: /* into */
6968 if (CODE64(s))
6969 goto illegal_op;
6970 gen_update_cc_op(s);
6971 gen_jmp_im(pc_start - s->cs_base);
6972 gen_helper_into(cpu_env, tcg_const_i32(s->pc - pc_start));
6973 break;
6974 #ifdef WANT_ICEBP
6975 case 0xf1: /* icebp (undocumented, exits to external debugger) */
6976 gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP);
6977 #if 1
6978 gen_debug(s, pc_start - s->cs_base);
6979 #else
6980 /* start debug */
6981 tb_flush(CPU(x86_env_get_cpu(env)));
6982 qemu_set_log(CPU_LOG_INT | CPU_LOG_TB_IN_ASM);
6983 #endif
6984 break;
6985 #endif
6986 case 0xfa: /* cli */
6987 if (!s->vm86) {
6988 if (s->cpl <= s->iopl) {
6989 gen_helper_cli(cpu_env);
6990 } else {
6991 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6993 } else {
6994 if (s->iopl == 3) {
6995 gen_helper_cli(cpu_env);
6996 } else {
6997 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7000 break;
7001 case 0xfb: /* sti */
7002 if (s->vm86 ? s->iopl == 3 : s->cpl <= s->iopl) {
7003 gen_helper_sti(cpu_env);
7004 /* interruptions are enabled only the first insn after sti */
7005 gen_jmp_im(s->pc - s->cs_base);
7006 gen_eob_inhibit_irq(s, true);
7007 } else {
7008 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7010 break;
7011 case 0x62: /* bound */
7012 if (CODE64(s))
7013 goto illegal_op;
7014 ot = dflag;
7015 modrm = cpu_ldub_code(env, s->pc++);
7016 reg = (modrm >> 3) & 7;
7017 mod = (modrm >> 6) & 3;
7018 if (mod == 3)
7019 goto illegal_op;
7020 gen_op_mov_v_reg(ot, cpu_T0, reg);
7021 gen_lea_modrm(env, s, modrm);
7022 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
7023 if (ot == MO_16) {
7024 gen_helper_boundw(cpu_env, cpu_A0, cpu_tmp2_i32);
7025 } else {
7026 gen_helper_boundl(cpu_env, cpu_A0, cpu_tmp2_i32);
7028 break;
7029 case 0x1c8 ... 0x1cf: /* bswap reg */
7030 reg = (b & 7) | REX_B(s);
7031 #ifdef TARGET_X86_64
7032 if (dflag == MO_64) {
7033 gen_op_mov_v_reg(MO_64, cpu_T0, reg);
7034 tcg_gen_bswap64_i64(cpu_T0, cpu_T0);
7035 gen_op_mov_reg_v(MO_64, reg, cpu_T0);
7036 } else
7037 #endif
7039 gen_op_mov_v_reg(MO_32, cpu_T0, reg);
7040 tcg_gen_ext32u_tl(cpu_T0, cpu_T0);
7041 tcg_gen_bswap32_tl(cpu_T0, cpu_T0);
7042 gen_op_mov_reg_v(MO_32, reg, cpu_T0);
7044 break;
7045 case 0xd6: /* salc */
7046 if (CODE64(s))
7047 goto illegal_op;
7048 gen_compute_eflags_c(s, cpu_T0);
7049 tcg_gen_neg_tl(cpu_T0, cpu_T0);
7050 gen_op_mov_reg_v(MO_8, R_EAX, cpu_T0);
7051 break;
7052 case 0xe0: /* loopnz */
7053 case 0xe1: /* loopz */
7054 case 0xe2: /* loop */
7055 case 0xe3: /* jecxz */
7057 TCGLabel *l1, *l2, *l3;
7059 tval = (int8_t)insn_get(env, s, MO_8);
7060 next_eip = s->pc - s->cs_base;
7061 tval += next_eip;
7062 if (dflag == MO_16) {
7063 tval &= 0xffff;
7066 l1 = gen_new_label();
7067 l2 = gen_new_label();
7068 l3 = gen_new_label();
7069 b &= 3;
7070 switch(b) {
7071 case 0: /* loopnz */
7072 case 1: /* loopz */
7073 gen_op_add_reg_im(s->aflag, R_ECX, -1);
7074 gen_op_jz_ecx(s->aflag, l3);
7075 gen_jcc1(s, (JCC_Z << 1) | (b ^ 1), l1);
7076 break;
7077 case 2: /* loop */
7078 gen_op_add_reg_im(s->aflag, R_ECX, -1);
7079 gen_op_jnz_ecx(s->aflag, l1);
7080 break;
7081 default:
7082 case 3: /* jcxz */
7083 gen_op_jz_ecx(s->aflag, l1);
7084 break;
7087 gen_set_label(l3);
7088 gen_jmp_im(next_eip);
7089 tcg_gen_br(l2);
7091 gen_set_label(l1);
7092 gen_jmp_im(tval);
7093 gen_set_label(l2);
7094 gen_eob(s);
7096 break;
7097 case 0x130: /* wrmsr */
7098 case 0x132: /* rdmsr */
7099 if (s->cpl != 0) {
7100 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7101 } else {
7102 gen_update_cc_op(s);
7103 gen_jmp_im(pc_start - s->cs_base);
7104 if (b & 2) {
7105 gen_helper_rdmsr(cpu_env);
7106 } else {
7107 gen_helper_wrmsr(cpu_env);
7110 break;
7111 case 0x131: /* rdtsc */
7112 gen_update_cc_op(s);
7113 gen_jmp_im(pc_start - s->cs_base);
7114 if (s->tb->cflags & CF_USE_ICOUNT) {
7115 gen_io_start();
7117 gen_helper_rdtsc(cpu_env);
7118 if (s->tb->cflags & CF_USE_ICOUNT) {
7119 gen_io_end();
7120 gen_jmp(s, s->pc - s->cs_base);
7122 break;
7123 case 0x133: /* rdpmc */
7124 gen_update_cc_op(s);
7125 gen_jmp_im(pc_start - s->cs_base);
7126 gen_helper_rdpmc(cpu_env);
7127 break;
7128 case 0x134: /* sysenter */
7129 /* For Intel SYSENTER is valid on 64-bit */
7130 if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
7131 goto illegal_op;
7132 if (!s->pe) {
7133 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7134 } else {
7135 gen_helper_sysenter(cpu_env);
7136 gen_eob(s);
7138 break;
7139 case 0x135: /* sysexit */
7140 /* For Intel SYSEXIT is valid on 64-bit */
7141 if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
7142 goto illegal_op;
7143 if (!s->pe) {
7144 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7145 } else {
7146 gen_helper_sysexit(cpu_env, tcg_const_i32(dflag - 1));
7147 gen_eob(s);
7149 break;
7150 #ifdef TARGET_X86_64
7151 case 0x105: /* syscall */
7152 /* XXX: is it usable in real mode ? */
7153 gen_update_cc_op(s);
7154 gen_jmp_im(pc_start - s->cs_base);
7155 gen_helper_syscall(cpu_env, tcg_const_i32(s->pc - pc_start));
7156 /* TF handling for the syscall insn is different. The TF bit is checked
7157 after the syscall insn completes. This allows #DB to not be
7158 generated after one has entered CPL0 if TF is set in FMASK. */
7159 gen_eob_worker(s, false, true);
7160 break;
7161 case 0x107: /* sysret */
7162 if (!s->pe) {
7163 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7164 } else {
7165 gen_helper_sysret(cpu_env, tcg_const_i32(dflag - 1));
7166 /* condition codes are modified only in long mode */
7167 if (s->lma) {
7168 set_cc_op(s, CC_OP_EFLAGS);
7170 /* TF handling for the sysret insn is different. The TF bit is
7171 checked after the sysret insn completes. This allows #DB to be
7172 generated "as if" the syscall insn in userspace has just
7173 completed. */
7174 gen_eob_worker(s, false, true);
7176 break;
7177 #endif
7178 case 0x1a2: /* cpuid */
7179 gen_update_cc_op(s);
7180 gen_jmp_im(pc_start - s->cs_base);
7181 gen_helper_cpuid(cpu_env);
7182 break;
7183 case 0xf4: /* hlt */
7184 if (s->cpl != 0) {
7185 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7186 } else {
7187 gen_update_cc_op(s);
7188 gen_jmp_im(pc_start - s->cs_base);
7189 gen_helper_hlt(cpu_env, tcg_const_i32(s->pc - pc_start));
7190 s->is_jmp = DISAS_TB_JUMP;
7192 break;
7193 case 0x100:
7194 modrm = cpu_ldub_code(env, s->pc++);
7195 mod = (modrm >> 6) & 3;
7196 op = (modrm >> 3) & 7;
7197 switch(op) {
7198 case 0: /* sldt */
7199 if (!s->pe || s->vm86)
7200 goto illegal_op;
7201 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ);
7202 tcg_gen_ld32u_tl(cpu_T0, cpu_env,
7203 offsetof(CPUX86State, ldt.selector));
7204 ot = mod == 3 ? dflag : MO_16;
7205 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
7206 break;
7207 case 2: /* lldt */
7208 if (!s->pe || s->vm86)
7209 goto illegal_op;
7210 if (s->cpl != 0) {
7211 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7212 } else {
7213 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE);
7214 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
7215 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
7216 gen_helper_lldt(cpu_env, cpu_tmp2_i32);
7218 break;
7219 case 1: /* str */
7220 if (!s->pe || s->vm86)
7221 goto illegal_op;
7222 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ);
7223 tcg_gen_ld32u_tl(cpu_T0, cpu_env,
7224 offsetof(CPUX86State, tr.selector));
7225 ot = mod == 3 ? dflag : MO_16;
7226 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
7227 break;
7228 case 3: /* ltr */
7229 if (!s->pe || s->vm86)
7230 goto illegal_op;
7231 if (s->cpl != 0) {
7232 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7233 } else {
7234 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE);
7235 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
7236 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
7237 gen_helper_ltr(cpu_env, cpu_tmp2_i32);
7239 break;
7240 case 4: /* verr */
7241 case 5: /* verw */
7242 if (!s->pe || s->vm86)
7243 goto illegal_op;
7244 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
7245 gen_update_cc_op(s);
7246 if (op == 4) {
7247 gen_helper_verr(cpu_env, cpu_T0);
7248 } else {
7249 gen_helper_verw(cpu_env, cpu_T0);
7251 set_cc_op(s, CC_OP_EFLAGS);
7252 break;
7253 default:
7254 goto unknown_op;
7256 break;
7258 case 0x101:
7259 modrm = cpu_ldub_code(env, s->pc++);
7260 switch (modrm) {
7261 CASE_MODRM_MEM_OP(0): /* sgdt */
7262 gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ);
7263 gen_lea_modrm(env, s, modrm);
7264 tcg_gen_ld32u_tl(cpu_T0,
7265 cpu_env, offsetof(CPUX86State, gdt.limit));
7266 gen_op_st_v(s, MO_16, cpu_T0, cpu_A0);
7267 gen_add_A0_im(s, 2);
7268 tcg_gen_ld_tl(cpu_T0, cpu_env, offsetof(CPUX86State, gdt.base));
7269 if (dflag == MO_16) {
7270 tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff);
7272 gen_op_st_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0);
7273 break;
7275 case 0xc8: /* monitor */
7276 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || s->cpl != 0) {
7277 goto illegal_op;
7279 gen_update_cc_op(s);
7280 gen_jmp_im(pc_start - s->cs_base);
7281 tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EAX]);
7282 gen_extu(s->aflag, cpu_A0);
7283 gen_add_A0_ds_seg(s);
7284 gen_helper_monitor(cpu_env, cpu_A0);
7285 break;
7287 case 0xc9: /* mwait */
7288 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || s->cpl != 0) {
7289 goto illegal_op;
7291 gen_update_cc_op(s);
7292 gen_jmp_im(pc_start - s->cs_base);
7293 gen_helper_mwait(cpu_env, tcg_const_i32(s->pc - pc_start));
7294 gen_eob(s);
7295 break;
7297 case 0xca: /* clac */
7298 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
7299 || s->cpl != 0) {
7300 goto illegal_op;
7302 gen_helper_clac(cpu_env);
7303 gen_jmp_im(s->pc - s->cs_base);
7304 gen_eob(s);
7305 break;
7307 case 0xcb: /* stac */
7308 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
7309 || s->cpl != 0) {
7310 goto illegal_op;
7312 gen_helper_stac(cpu_env);
7313 gen_jmp_im(s->pc - s->cs_base);
7314 gen_eob(s);
7315 break;
7317 CASE_MODRM_MEM_OP(1): /* sidt */
7318 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ);
7319 gen_lea_modrm(env, s, modrm);
7320 tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State, idt.limit));
7321 gen_op_st_v(s, MO_16, cpu_T0, cpu_A0);
7322 gen_add_A0_im(s, 2);
7323 tcg_gen_ld_tl(cpu_T0, cpu_env, offsetof(CPUX86State, idt.base));
7324 if (dflag == MO_16) {
7325 tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff);
7327 gen_op_st_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0);
7328 break;
7330 case 0xd0: /* xgetbv */
7331 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
7332 || (s->prefix & (PREFIX_LOCK | PREFIX_DATA
7333 | PREFIX_REPZ | PREFIX_REPNZ))) {
7334 goto illegal_op;
7336 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]);
7337 gen_helper_xgetbv(cpu_tmp1_i64, cpu_env, cpu_tmp2_i32);
7338 tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], cpu_tmp1_i64);
7339 break;
7341 case 0xd1: /* xsetbv */
7342 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
7343 || (s->prefix & (PREFIX_LOCK | PREFIX_DATA
7344 | PREFIX_REPZ | PREFIX_REPNZ))) {
7345 goto illegal_op;
7347 if (s->cpl != 0) {
7348 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7349 break;
7351 tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
7352 cpu_regs[R_EDX]);
7353 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]);
7354 gen_helper_xsetbv(cpu_env, cpu_tmp2_i32, cpu_tmp1_i64);
7355 /* End TB because translation flags may change. */
7356 gen_jmp_im(s->pc - s->cs_base);
7357 gen_eob(s);
7358 break;
7360 case 0xd8: /* VMRUN */
7361 if (!(s->flags & HF_SVME_MASK) || !s->pe) {
7362 goto illegal_op;
7364 if (s->cpl != 0) {
7365 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7366 break;
7368 gen_update_cc_op(s);
7369 gen_jmp_im(pc_start - s->cs_base);
7370 gen_helper_vmrun(cpu_env, tcg_const_i32(s->aflag - 1),
7371 tcg_const_i32(s->pc - pc_start));
7372 tcg_gen_exit_tb(0);
7373 s->is_jmp = DISAS_TB_JUMP;
7374 break;
7376 case 0xd9: /* VMMCALL */
7377 if (!(s->flags & HF_SVME_MASK)) {
7378 goto illegal_op;
7380 gen_update_cc_op(s);
7381 gen_jmp_im(pc_start - s->cs_base);
7382 gen_helper_vmmcall(cpu_env);
7383 break;
7385 case 0xda: /* VMLOAD */
7386 if (!(s->flags & HF_SVME_MASK) || !s->pe) {
7387 goto illegal_op;
7389 if (s->cpl != 0) {
7390 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7391 break;
7393 gen_update_cc_op(s);
7394 gen_jmp_im(pc_start - s->cs_base);
7395 gen_helper_vmload(cpu_env, tcg_const_i32(s->aflag - 1));
7396 break;
7398 case 0xdb: /* VMSAVE */
7399 if (!(s->flags & HF_SVME_MASK) || !s->pe) {
7400 goto illegal_op;
7402 if (s->cpl != 0) {
7403 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7404 break;
7406 gen_update_cc_op(s);
7407 gen_jmp_im(pc_start - s->cs_base);
7408 gen_helper_vmsave(cpu_env, tcg_const_i32(s->aflag - 1));
7409 break;
7411 case 0xdc: /* STGI */
7412 if ((!(s->flags & HF_SVME_MASK)
7413 && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
7414 || !s->pe) {
7415 goto illegal_op;
7417 if (s->cpl != 0) {
7418 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7419 break;
7421 gen_update_cc_op(s);
7422 gen_jmp_im(pc_start - s->cs_base);
7423 gen_helper_stgi(cpu_env);
7424 break;
7426 case 0xdd: /* CLGI */
7427 if (!(s->flags & HF_SVME_MASK) || !s->pe) {
7428 goto illegal_op;
7430 if (s->cpl != 0) {
7431 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7432 break;
7434 gen_update_cc_op(s);
7435 gen_jmp_im(pc_start - s->cs_base);
7436 gen_helper_clgi(cpu_env);
7437 break;
7439 case 0xde: /* SKINIT */
7440 if ((!(s->flags & HF_SVME_MASK)
7441 && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
7442 || !s->pe) {
7443 goto illegal_op;
7445 gen_update_cc_op(s);
7446 gen_jmp_im(pc_start - s->cs_base);
7447 gen_helper_skinit(cpu_env);
7448 break;
7450 case 0xdf: /* INVLPGA */
7451 if (!(s->flags & HF_SVME_MASK) || !s->pe) {
7452 goto illegal_op;
7454 if (s->cpl != 0) {
7455 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7456 break;
7458 gen_update_cc_op(s);
7459 gen_jmp_im(pc_start - s->cs_base);
7460 gen_helper_invlpga(cpu_env, tcg_const_i32(s->aflag - 1));
7461 break;
7463 CASE_MODRM_MEM_OP(2): /* lgdt */
7464 if (s->cpl != 0) {
7465 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7466 break;
7468 gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_WRITE);
7469 gen_lea_modrm(env, s, modrm);
7470 gen_op_ld_v(s, MO_16, cpu_T1, cpu_A0);
7471 gen_add_A0_im(s, 2);
7472 gen_op_ld_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0);
7473 if (dflag == MO_16) {
7474 tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff);
7476 tcg_gen_st_tl(cpu_T0, cpu_env, offsetof(CPUX86State, gdt.base));
7477 tcg_gen_st32_tl(cpu_T1, cpu_env, offsetof(CPUX86State, gdt.limit));
7478 break;
7480 CASE_MODRM_MEM_OP(3): /* lidt */
7481 if (s->cpl != 0) {
7482 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7483 break;
7485 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_WRITE);
7486 gen_lea_modrm(env, s, modrm);
7487 gen_op_ld_v(s, MO_16, cpu_T1, cpu_A0);
7488 gen_add_A0_im(s, 2);
7489 gen_op_ld_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0);
7490 if (dflag == MO_16) {
7491 tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff);
7493 tcg_gen_st_tl(cpu_T0, cpu_env, offsetof(CPUX86State, idt.base));
7494 tcg_gen_st32_tl(cpu_T1, cpu_env, offsetof(CPUX86State, idt.limit));
7495 break;
7497 CASE_MODRM_OP(4): /* smsw */
7498 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0);
7499 tcg_gen_ld_tl(cpu_T0, cpu_env, offsetof(CPUX86State, cr[0]));
7500 if (CODE64(s)) {
7501 mod = (modrm >> 6) & 3;
7502 ot = (mod != 3 ? MO_16 : s->dflag);
7503 } else {
7504 ot = MO_16;
7506 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
7507 break;
7508 case 0xee: /* rdpkru */
7509 if (prefixes & PREFIX_LOCK) {
7510 goto illegal_op;
7512 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]);
7513 gen_helper_rdpkru(cpu_tmp1_i64, cpu_env, cpu_tmp2_i32);
7514 tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], cpu_tmp1_i64);
7515 break;
7516 case 0xef: /* wrpkru */
7517 if (prefixes & PREFIX_LOCK) {
7518 goto illegal_op;
7520 tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
7521 cpu_regs[R_EDX]);
7522 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]);
7523 gen_helper_wrpkru(cpu_env, cpu_tmp2_i32, cpu_tmp1_i64);
7524 break;
7525 CASE_MODRM_OP(6): /* lmsw */
7526 if (s->cpl != 0) {
7527 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7528 break;
7530 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
7531 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
7532 gen_helper_lmsw(cpu_env, cpu_T0);
7533 gen_jmp_im(s->pc - s->cs_base);
7534 gen_eob(s);
7535 break;
7537 CASE_MODRM_MEM_OP(7): /* invlpg */
7538 if (s->cpl != 0) {
7539 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7540 break;
7542 gen_update_cc_op(s);
7543 gen_jmp_im(pc_start - s->cs_base);
7544 gen_lea_modrm(env, s, modrm);
7545 gen_helper_invlpg(cpu_env, cpu_A0);
7546 gen_jmp_im(s->pc - s->cs_base);
7547 gen_eob(s);
7548 break;
7550 case 0xf8: /* swapgs */
7551 #ifdef TARGET_X86_64
7552 if (CODE64(s)) {
7553 if (s->cpl != 0) {
7554 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7555 } else {
7556 tcg_gen_mov_tl(cpu_T0, cpu_seg_base[R_GS]);
7557 tcg_gen_ld_tl(cpu_seg_base[R_GS], cpu_env,
7558 offsetof(CPUX86State, kernelgsbase));
7559 tcg_gen_st_tl(cpu_T0, cpu_env,
7560 offsetof(CPUX86State, kernelgsbase));
7562 break;
7564 #endif
7565 goto illegal_op;
7567 case 0xf9: /* rdtscp */
7568 if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP)) {
7569 goto illegal_op;
7571 gen_update_cc_op(s);
7572 gen_jmp_im(pc_start - s->cs_base);
7573 if (s->tb->cflags & CF_USE_ICOUNT) {
7574 gen_io_start();
7576 gen_helper_rdtscp(cpu_env);
7577 if (s->tb->cflags & CF_USE_ICOUNT) {
7578 gen_io_end();
7579 gen_jmp(s, s->pc - s->cs_base);
7581 break;
7583 default:
7584 goto unknown_op;
7586 break;
7588 case 0x108: /* invd */
7589 case 0x109: /* wbinvd */
7590 if (s->cpl != 0) {
7591 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7592 } else {
7593 gen_svm_check_intercept(s, pc_start, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD);
7594 /* nothing to do */
7596 break;
7597 case 0x63: /* arpl or movslS (x86_64) */
7598 #ifdef TARGET_X86_64
7599 if (CODE64(s)) {
7600 int d_ot;
7601 /* d_ot is the size of destination */
7602 d_ot = dflag;
7604 modrm = cpu_ldub_code(env, s->pc++);
7605 reg = ((modrm >> 3) & 7) | rex_r;
7606 mod = (modrm >> 6) & 3;
7607 rm = (modrm & 7) | REX_B(s);
7609 if (mod == 3) {
7610 gen_op_mov_v_reg(MO_32, cpu_T0, rm);
7611 /* sign extend */
7612 if (d_ot == MO_64) {
7613 tcg_gen_ext32s_tl(cpu_T0, cpu_T0);
7615 gen_op_mov_reg_v(d_ot, reg, cpu_T0);
7616 } else {
7617 gen_lea_modrm(env, s, modrm);
7618 gen_op_ld_v(s, MO_32 | MO_SIGN, cpu_T0, cpu_A0);
7619 gen_op_mov_reg_v(d_ot, reg, cpu_T0);
7621 } else
7622 #endif
7624 TCGLabel *label1;
7625 TCGv t0, t1, t2, a0;
7627 if (!s->pe || s->vm86)
7628 goto illegal_op;
7629 t0 = tcg_temp_local_new();
7630 t1 = tcg_temp_local_new();
7631 t2 = tcg_temp_local_new();
7632 ot = MO_16;
7633 modrm = cpu_ldub_code(env, s->pc++);
7634 reg = (modrm >> 3) & 7;
7635 mod = (modrm >> 6) & 3;
7636 rm = modrm & 7;
7637 if (mod != 3) {
7638 gen_lea_modrm(env, s, modrm);
7639 gen_op_ld_v(s, ot, t0, cpu_A0);
7640 a0 = tcg_temp_local_new();
7641 tcg_gen_mov_tl(a0, cpu_A0);
7642 } else {
7643 gen_op_mov_v_reg(ot, t0, rm);
7644 TCGV_UNUSED(a0);
7646 gen_op_mov_v_reg(ot, t1, reg);
7647 tcg_gen_andi_tl(cpu_tmp0, t0, 3);
7648 tcg_gen_andi_tl(t1, t1, 3);
7649 tcg_gen_movi_tl(t2, 0);
7650 label1 = gen_new_label();
7651 tcg_gen_brcond_tl(TCG_COND_GE, cpu_tmp0, t1, label1);
7652 tcg_gen_andi_tl(t0, t0, ~3);
7653 tcg_gen_or_tl(t0, t0, t1);
7654 tcg_gen_movi_tl(t2, CC_Z);
7655 gen_set_label(label1);
7656 if (mod != 3) {
7657 gen_op_st_v(s, ot, t0, a0);
7658 tcg_temp_free(a0);
7659 } else {
7660 gen_op_mov_reg_v(ot, rm, t0);
7662 gen_compute_eflags(s);
7663 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z);
7664 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2);
7665 tcg_temp_free(t0);
7666 tcg_temp_free(t1);
7667 tcg_temp_free(t2);
7669 break;
7670 case 0x102: /* lar */
7671 case 0x103: /* lsl */
7673 TCGLabel *label1;
7674 TCGv t0;
7675 if (!s->pe || s->vm86)
7676 goto illegal_op;
7677 ot = dflag != MO_16 ? MO_32 : MO_16;
7678 modrm = cpu_ldub_code(env, s->pc++);
7679 reg = ((modrm >> 3) & 7) | rex_r;
7680 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
7681 t0 = tcg_temp_local_new();
7682 gen_update_cc_op(s);
7683 if (b == 0x102) {
7684 gen_helper_lar(t0, cpu_env, cpu_T0);
7685 } else {
7686 gen_helper_lsl(t0, cpu_env, cpu_T0);
7688 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_src, CC_Z);
7689 label1 = gen_new_label();
7690 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
7691 gen_op_mov_reg_v(ot, reg, t0);
7692 gen_set_label(label1);
7693 set_cc_op(s, CC_OP_EFLAGS);
7694 tcg_temp_free(t0);
7696 break;
7697 case 0x118:
7698 modrm = cpu_ldub_code(env, s->pc++);
7699 mod = (modrm >> 6) & 3;
7700 op = (modrm >> 3) & 7;
7701 switch(op) {
7702 case 0: /* prefetchnta */
7703 case 1: /* prefetchnt0 */
7704 case 2: /* prefetchnt0 */
7705 case 3: /* prefetchnt0 */
7706 if (mod == 3)
7707 goto illegal_op;
7708 gen_nop_modrm(env, s, modrm);
7709 /* nothing more to do */
7710 break;
7711 default: /* nop (multi byte) */
7712 gen_nop_modrm(env, s, modrm);
7713 break;
7715 break;
7716 case 0x11a:
7717 modrm = cpu_ldub_code(env, s->pc++);
7718 if (s->flags & HF_MPX_EN_MASK) {
7719 mod = (modrm >> 6) & 3;
7720 reg = ((modrm >> 3) & 7) | rex_r;
7721 if (prefixes & PREFIX_REPZ) {
7722 /* bndcl */
7723 if (reg >= 4
7724 || (prefixes & PREFIX_LOCK)
7725 || s->aflag == MO_16) {
7726 goto illegal_op;
7728 gen_bndck(env, s, modrm, TCG_COND_LTU, cpu_bndl[reg]);
7729 } else if (prefixes & PREFIX_REPNZ) {
7730 /* bndcu */
7731 if (reg >= 4
7732 || (prefixes & PREFIX_LOCK)
7733 || s->aflag == MO_16) {
7734 goto illegal_op;
7736 TCGv_i64 notu = tcg_temp_new_i64();
7737 tcg_gen_not_i64(notu, cpu_bndu[reg]);
7738 gen_bndck(env, s, modrm, TCG_COND_GTU, notu);
7739 tcg_temp_free_i64(notu);
7740 } else if (prefixes & PREFIX_DATA) {
7741 /* bndmov -- from reg/mem */
7742 if (reg >= 4 || s->aflag == MO_16) {
7743 goto illegal_op;
7745 if (mod == 3) {
7746 int reg2 = (modrm & 7) | REX_B(s);
7747 if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
7748 goto illegal_op;
7750 if (s->flags & HF_MPX_IU_MASK) {
7751 tcg_gen_mov_i64(cpu_bndl[reg], cpu_bndl[reg2]);
7752 tcg_gen_mov_i64(cpu_bndu[reg], cpu_bndu[reg2]);
7754 } else {
7755 gen_lea_modrm(env, s, modrm);
7756 if (CODE64(s)) {
7757 tcg_gen_qemu_ld_i64(cpu_bndl[reg], cpu_A0,
7758 s->mem_index, MO_LEQ);
7759 tcg_gen_addi_tl(cpu_A0, cpu_A0, 8);
7760 tcg_gen_qemu_ld_i64(cpu_bndu[reg], cpu_A0,
7761 s->mem_index, MO_LEQ);
7762 } else {
7763 tcg_gen_qemu_ld_i64(cpu_bndl[reg], cpu_A0,
7764 s->mem_index, MO_LEUL);
7765 tcg_gen_addi_tl(cpu_A0, cpu_A0, 4);
7766 tcg_gen_qemu_ld_i64(cpu_bndu[reg], cpu_A0,
7767 s->mem_index, MO_LEUL);
7769 /* bnd registers are now in-use */
7770 gen_set_hflag(s, HF_MPX_IU_MASK);
7772 } else if (mod != 3) {
7773 /* bndldx */
7774 AddressParts a = gen_lea_modrm_0(env, s, modrm);
7775 if (reg >= 4
7776 || (prefixes & PREFIX_LOCK)
7777 || s->aflag == MO_16
7778 || a.base < -1) {
7779 goto illegal_op;
7781 if (a.base >= 0) {
7782 tcg_gen_addi_tl(cpu_A0, cpu_regs[a.base], a.disp);
7783 } else {
7784 tcg_gen_movi_tl(cpu_A0, 0);
7786 gen_lea_v_seg(s, s->aflag, cpu_A0, a.def_seg, s->override);
7787 if (a.index >= 0) {
7788 tcg_gen_mov_tl(cpu_T0, cpu_regs[a.index]);
7789 } else {
7790 tcg_gen_movi_tl(cpu_T0, 0);
7792 if (CODE64(s)) {
7793 gen_helper_bndldx64(cpu_bndl[reg], cpu_env, cpu_A0, cpu_T0);
7794 tcg_gen_ld_i64(cpu_bndu[reg], cpu_env,
7795 offsetof(CPUX86State, mmx_t0.MMX_Q(0)));
7796 } else {
7797 gen_helper_bndldx32(cpu_bndu[reg], cpu_env, cpu_A0, cpu_T0);
7798 tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndu[reg]);
7799 tcg_gen_shri_i64(cpu_bndu[reg], cpu_bndu[reg], 32);
7801 gen_set_hflag(s, HF_MPX_IU_MASK);
7804 gen_nop_modrm(env, s, modrm);
7805 break;
7806 case 0x11b:
7807 modrm = cpu_ldub_code(env, s->pc++);
7808 if (s->flags & HF_MPX_EN_MASK) {
7809 mod = (modrm >> 6) & 3;
7810 reg = ((modrm >> 3) & 7) | rex_r;
7811 if (mod != 3 && (prefixes & PREFIX_REPZ)) {
7812 /* bndmk */
7813 if (reg >= 4
7814 || (prefixes & PREFIX_LOCK)
7815 || s->aflag == MO_16) {
7816 goto illegal_op;
7818 AddressParts a = gen_lea_modrm_0(env, s, modrm);
7819 if (a.base >= 0) {
7820 tcg_gen_extu_tl_i64(cpu_bndl[reg], cpu_regs[a.base]);
7821 if (!CODE64(s)) {
7822 tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndl[reg]);
7824 } else if (a.base == -1) {
7825 /* no base register has lower bound of 0 */
7826 tcg_gen_movi_i64(cpu_bndl[reg], 0);
7827 } else {
7828 /* rip-relative generates #ud */
7829 goto illegal_op;
7831 tcg_gen_not_tl(cpu_A0, gen_lea_modrm_1(a));
7832 if (!CODE64(s)) {
7833 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
7835 tcg_gen_extu_tl_i64(cpu_bndu[reg], cpu_A0);
7836 /* bnd registers are now in-use */
7837 gen_set_hflag(s, HF_MPX_IU_MASK);
7838 break;
7839 } else if (prefixes & PREFIX_REPNZ) {
7840 /* bndcn */
7841 if (reg >= 4
7842 || (prefixes & PREFIX_LOCK)
7843 || s->aflag == MO_16) {
7844 goto illegal_op;
7846 gen_bndck(env, s, modrm, TCG_COND_GTU, cpu_bndu[reg]);
7847 } else if (prefixes & PREFIX_DATA) {
7848 /* bndmov -- to reg/mem */
7849 if (reg >= 4 || s->aflag == MO_16) {
7850 goto illegal_op;
7852 if (mod == 3) {
7853 int reg2 = (modrm & 7) | REX_B(s);
7854 if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
7855 goto illegal_op;
7857 if (s->flags & HF_MPX_IU_MASK) {
7858 tcg_gen_mov_i64(cpu_bndl[reg2], cpu_bndl[reg]);
7859 tcg_gen_mov_i64(cpu_bndu[reg2], cpu_bndu[reg]);
7861 } else {
7862 gen_lea_modrm(env, s, modrm);
7863 if (CODE64(s)) {
7864 tcg_gen_qemu_st_i64(cpu_bndl[reg], cpu_A0,
7865 s->mem_index, MO_LEQ);
7866 tcg_gen_addi_tl(cpu_A0, cpu_A0, 8);
7867 tcg_gen_qemu_st_i64(cpu_bndu[reg], cpu_A0,
7868 s->mem_index, MO_LEQ);
7869 } else {
7870 tcg_gen_qemu_st_i64(cpu_bndl[reg], cpu_A0,
7871 s->mem_index, MO_LEUL);
7872 tcg_gen_addi_tl(cpu_A0, cpu_A0, 4);
7873 tcg_gen_qemu_st_i64(cpu_bndu[reg], cpu_A0,
7874 s->mem_index, MO_LEUL);
7877 } else if (mod != 3) {
7878 /* bndstx */
7879 AddressParts a = gen_lea_modrm_0(env, s, modrm);
7880 if (reg >= 4
7881 || (prefixes & PREFIX_LOCK)
7882 || s->aflag == MO_16
7883 || a.base < -1) {
7884 goto illegal_op;
7886 if (a.base >= 0) {
7887 tcg_gen_addi_tl(cpu_A0, cpu_regs[a.base], a.disp);
7888 } else {
7889 tcg_gen_movi_tl(cpu_A0, 0);
7891 gen_lea_v_seg(s, s->aflag, cpu_A0, a.def_seg, s->override);
7892 if (a.index >= 0) {
7893 tcg_gen_mov_tl(cpu_T0, cpu_regs[a.index]);
7894 } else {
7895 tcg_gen_movi_tl(cpu_T0, 0);
7897 if (CODE64(s)) {
7898 gen_helper_bndstx64(cpu_env, cpu_A0, cpu_T0,
7899 cpu_bndl[reg], cpu_bndu[reg]);
7900 } else {
7901 gen_helper_bndstx32(cpu_env, cpu_A0, cpu_T0,
7902 cpu_bndl[reg], cpu_bndu[reg]);
7906 gen_nop_modrm(env, s, modrm);
7907 break;
7908 case 0x119: case 0x11c ... 0x11f: /* nop (multi byte) */
7909 modrm = cpu_ldub_code(env, s->pc++);
7910 gen_nop_modrm(env, s, modrm);
7911 break;
7912 case 0x120: /* mov reg, crN */
7913 case 0x122: /* mov crN, reg */
7914 if (s->cpl != 0) {
7915 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7916 } else {
7917 modrm = cpu_ldub_code(env, s->pc++);
7918 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7919 * AMD documentation (24594.pdf) and testing of
7920 * intel 386 and 486 processors all show that the mod bits
7921 * are assumed to be 1's, regardless of actual values.
7923 rm = (modrm & 7) | REX_B(s);
7924 reg = ((modrm >> 3) & 7) | rex_r;
7925 if (CODE64(s))
7926 ot = MO_64;
7927 else
7928 ot = MO_32;
7929 if ((prefixes & PREFIX_LOCK) && (reg == 0) &&
7930 (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) {
7931 reg = 8;
7933 switch(reg) {
7934 case 0:
7935 case 2:
7936 case 3:
7937 case 4:
7938 case 8:
7939 gen_update_cc_op(s);
7940 gen_jmp_im(pc_start - s->cs_base);
7941 if (b & 2) {
7942 if (s->tb->cflags & CF_USE_ICOUNT) {
7943 gen_io_start();
7945 gen_op_mov_v_reg(ot, cpu_T0, rm);
7946 gen_helper_write_crN(cpu_env, tcg_const_i32(reg),
7947 cpu_T0);
7948 if (s->tb->cflags & CF_USE_ICOUNT) {
7949 gen_io_end();
7951 gen_jmp_im(s->pc - s->cs_base);
7952 gen_eob(s);
7953 } else {
7954 if (s->tb->cflags & CF_USE_ICOUNT) {
7955 gen_io_start();
7957 gen_helper_read_crN(cpu_T0, cpu_env, tcg_const_i32(reg));
7958 gen_op_mov_reg_v(ot, rm, cpu_T0);
7959 if (s->tb->cflags & CF_USE_ICOUNT) {
7960 gen_io_end();
7963 break;
7964 default:
7965 goto unknown_op;
7968 break;
7969 case 0x121: /* mov reg, drN */
7970 case 0x123: /* mov drN, reg */
7971 if (s->cpl != 0) {
7972 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7973 } else {
7974 modrm = cpu_ldub_code(env, s->pc++);
7975 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7976 * AMD documentation (24594.pdf) and testing of
7977 * intel 386 and 486 processors all show that the mod bits
7978 * are assumed to be 1's, regardless of actual values.
7980 rm = (modrm & 7) | REX_B(s);
7981 reg = ((modrm >> 3) & 7) | rex_r;
7982 if (CODE64(s))
7983 ot = MO_64;
7984 else
7985 ot = MO_32;
7986 if (reg >= 8) {
7987 goto illegal_op;
7989 if (b & 2) {
7990 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg);
7991 gen_op_mov_v_reg(ot, cpu_T0, rm);
7992 tcg_gen_movi_i32(cpu_tmp2_i32, reg);
7993 gen_helper_set_dr(cpu_env, cpu_tmp2_i32, cpu_T0);
7994 gen_jmp_im(s->pc - s->cs_base);
7995 gen_eob(s);
7996 } else {
7997 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg);
7998 tcg_gen_movi_i32(cpu_tmp2_i32, reg);
7999 gen_helper_get_dr(cpu_T0, cpu_env, cpu_tmp2_i32);
8000 gen_op_mov_reg_v(ot, rm, cpu_T0);
8003 break;
8004 case 0x106: /* clts */
8005 if (s->cpl != 0) {
8006 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
8007 } else {
8008 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
8009 gen_helper_clts(cpu_env);
8010 /* abort block because static cpu state changed */
8011 gen_jmp_im(s->pc - s->cs_base);
8012 gen_eob(s);
8014 break;
8015 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
8016 case 0x1c3: /* MOVNTI reg, mem */
8017 if (!(s->cpuid_features & CPUID_SSE2))
8018 goto illegal_op;
8019 ot = mo_64_32(dflag);
8020 modrm = cpu_ldub_code(env, s->pc++);
8021 mod = (modrm >> 6) & 3;
8022 if (mod == 3)
8023 goto illegal_op;
8024 reg = ((modrm >> 3) & 7) | rex_r;
8025 /* generate a generic store */
8026 gen_ldst_modrm(env, s, modrm, ot, reg, 1);
8027 break;
8028 case 0x1ae:
8029 modrm = cpu_ldub_code(env, s->pc++);
8030 switch (modrm) {
8031 CASE_MODRM_MEM_OP(0): /* fxsave */
8032 if (!(s->cpuid_features & CPUID_FXSR)
8033 || (prefixes & PREFIX_LOCK)) {
8034 goto illegal_op;
8036 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
8037 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
8038 break;
8040 gen_lea_modrm(env, s, modrm);
8041 gen_helper_fxsave(cpu_env, cpu_A0);
8042 break;
8044 CASE_MODRM_MEM_OP(1): /* fxrstor */
8045 if (!(s->cpuid_features & CPUID_FXSR)
8046 || (prefixes & PREFIX_LOCK)) {
8047 goto illegal_op;
8049 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
8050 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
8051 break;
8053 gen_lea_modrm(env, s, modrm);
8054 gen_helper_fxrstor(cpu_env, cpu_A0);
8055 break;
8057 CASE_MODRM_MEM_OP(2): /* ldmxcsr */
8058 if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) {
8059 goto illegal_op;
8061 if (s->flags & HF_TS_MASK) {
8062 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
8063 break;
8065 gen_lea_modrm(env, s, modrm);
8066 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LEUL);
8067 gen_helper_ldmxcsr(cpu_env, cpu_tmp2_i32);
8068 break;
8070 CASE_MODRM_MEM_OP(3): /* stmxcsr */
8071 if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) {
8072 goto illegal_op;
8074 if (s->flags & HF_TS_MASK) {
8075 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
8076 break;
8078 gen_lea_modrm(env, s, modrm);
8079 tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State, mxcsr));
8080 gen_op_st_v(s, MO_32, cpu_T0, cpu_A0);
8081 break;
8083 CASE_MODRM_MEM_OP(4): /* xsave */
8084 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
8085 || (prefixes & (PREFIX_LOCK | PREFIX_DATA
8086 | PREFIX_REPZ | PREFIX_REPNZ))) {
8087 goto illegal_op;
8089 gen_lea_modrm(env, s, modrm);
8090 tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
8091 cpu_regs[R_EDX]);
8092 gen_helper_xsave(cpu_env, cpu_A0, cpu_tmp1_i64);
8093 break;
8095 CASE_MODRM_MEM_OP(5): /* xrstor */
8096 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
8097 || (prefixes & (PREFIX_LOCK | PREFIX_DATA
8098 | PREFIX_REPZ | PREFIX_REPNZ))) {
8099 goto illegal_op;
8101 gen_lea_modrm(env, s, modrm);
8102 tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
8103 cpu_regs[R_EDX]);
8104 gen_helper_xrstor(cpu_env, cpu_A0, cpu_tmp1_i64);
8105 /* XRSTOR is how MPX is enabled, which changes how
8106 we translate. Thus we need to end the TB. */
8107 gen_update_cc_op(s);
8108 gen_jmp_im(s->pc - s->cs_base);
8109 gen_eob(s);
8110 break;
8112 CASE_MODRM_MEM_OP(6): /* xsaveopt / clwb */
8113 if (prefixes & PREFIX_LOCK) {
8114 goto illegal_op;
8116 if (prefixes & PREFIX_DATA) {
8117 /* clwb */
8118 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLWB)) {
8119 goto illegal_op;
8121 gen_nop_modrm(env, s, modrm);
8122 } else {
8123 /* xsaveopt */
8124 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
8125 || (s->cpuid_xsave_features & CPUID_XSAVE_XSAVEOPT) == 0
8126 || (prefixes & (PREFIX_REPZ | PREFIX_REPNZ))) {
8127 goto illegal_op;
8129 gen_lea_modrm(env, s, modrm);
8130 tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
8131 cpu_regs[R_EDX]);
8132 gen_helper_xsaveopt(cpu_env, cpu_A0, cpu_tmp1_i64);
8134 break;
8136 CASE_MODRM_MEM_OP(7): /* clflush / clflushopt */
8137 if (prefixes & PREFIX_LOCK) {
8138 goto illegal_op;
8140 if (prefixes & PREFIX_DATA) {
8141 /* clflushopt */
8142 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLFLUSHOPT)) {
8143 goto illegal_op;
8145 } else {
8146 /* clflush */
8147 if ((s->prefix & (PREFIX_REPZ | PREFIX_REPNZ))
8148 || !(s->cpuid_features & CPUID_CLFLUSH)) {
8149 goto illegal_op;
8152 gen_nop_modrm(env, s, modrm);
8153 break;
8155 case 0xc0 ... 0xc7: /* rdfsbase (f3 0f ae /0) */
8156 case 0xc8 ... 0xc8: /* rdgsbase (f3 0f ae /1) */
8157 case 0xd0 ... 0xd7: /* wrfsbase (f3 0f ae /2) */
8158 case 0xd8 ... 0xd8: /* wrgsbase (f3 0f ae /3) */
8159 if (CODE64(s)
8160 && (prefixes & PREFIX_REPZ)
8161 && !(prefixes & PREFIX_LOCK)
8162 && (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_FSGSBASE)) {
8163 TCGv base, treg, src, dst;
8165 /* Preserve hflags bits by testing CR4 at runtime. */
8166 tcg_gen_movi_i32(cpu_tmp2_i32, CR4_FSGSBASE_MASK);
8167 gen_helper_cr4_testbit(cpu_env, cpu_tmp2_i32);
8169 base = cpu_seg_base[modrm & 8 ? R_GS : R_FS];
8170 treg = cpu_regs[(modrm & 7) | REX_B(s)];
8172 if (modrm & 0x10) {
8173 /* wr*base */
8174 dst = base, src = treg;
8175 } else {
8176 /* rd*base */
8177 dst = treg, src = base;
8180 if (s->dflag == MO_32) {
8181 tcg_gen_ext32u_tl(dst, src);
8182 } else {
8183 tcg_gen_mov_tl(dst, src);
8185 break;
8187 goto unknown_op;
8189 case 0xf8: /* sfence / pcommit */
8190 if (prefixes & PREFIX_DATA) {
8191 /* pcommit */
8192 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_PCOMMIT)
8193 || (prefixes & PREFIX_LOCK)) {
8194 goto illegal_op;
8196 break;
8198 /* fallthru */
8199 case 0xf9 ... 0xff: /* sfence */
8200 if (!(s->cpuid_features & CPUID_SSE)
8201 || (prefixes & PREFIX_LOCK)) {
8202 goto illegal_op;
8204 tcg_gen_mb(TCG_MO_ST_ST | TCG_BAR_SC);
8205 break;
8206 case 0xe8 ... 0xef: /* lfence */
8207 if (!(s->cpuid_features & CPUID_SSE)
8208 || (prefixes & PREFIX_LOCK)) {
8209 goto illegal_op;
8211 tcg_gen_mb(TCG_MO_LD_LD | TCG_BAR_SC);
8212 break;
8213 case 0xf0 ... 0xf7: /* mfence */
8214 if (!(s->cpuid_features & CPUID_SSE2)
8215 || (prefixes & PREFIX_LOCK)) {
8216 goto illegal_op;
8218 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC);
8219 break;
8221 default:
8222 goto unknown_op;
8224 break;
8226 case 0x10d: /* 3DNow! prefetch(w) */
8227 modrm = cpu_ldub_code(env, s->pc++);
8228 mod = (modrm >> 6) & 3;
8229 if (mod == 3)
8230 goto illegal_op;
8231 gen_nop_modrm(env, s, modrm);
8232 break;
8233 case 0x1aa: /* rsm */
8234 gen_svm_check_intercept(s, pc_start, SVM_EXIT_RSM);
8235 if (!(s->flags & HF_SMM_MASK))
8236 goto illegal_op;
8237 gen_update_cc_op(s);
8238 gen_jmp_im(s->pc - s->cs_base);
8239 gen_helper_rsm(cpu_env);
8240 gen_eob(s);
8241 break;
8242 case 0x1b8: /* SSE4.2 popcnt */
8243 if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) !=
8244 PREFIX_REPZ)
8245 goto illegal_op;
8246 if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
8247 goto illegal_op;
8249 modrm = cpu_ldub_code(env, s->pc++);
8250 reg = ((modrm >> 3) & 7) | rex_r;
8252 if (s->prefix & PREFIX_DATA) {
8253 ot = MO_16;
8254 } else {
8255 ot = mo_64_32(dflag);
8258 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
8259 gen_extu(ot, cpu_T0);
8260 tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
8261 tcg_gen_ctpop_tl(cpu_T0, cpu_T0);
8262 gen_op_mov_reg_v(ot, reg, cpu_T0);
8264 set_cc_op(s, CC_OP_POPCNT);
8265 break;
8266 case 0x10e ... 0x10f:
8267 /* 3DNow! instructions, ignore prefixes */
8268 s->prefix &= ~(PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA);
8269 case 0x110 ... 0x117:
8270 case 0x128 ... 0x12f:
8271 case 0x138 ... 0x13a:
8272 case 0x150 ... 0x179:
8273 case 0x17c ... 0x17f:
8274 case 0x1c2:
8275 case 0x1c4 ... 0x1c6:
8276 case 0x1d0 ... 0x1fe:
8277 gen_sse(env, s, b, pc_start, rex_r);
8278 break;
8279 default:
8280 goto unknown_op;
8282 return s->pc;
8283 illegal_op:
8284 gen_illegal_opcode(s);
8285 return s->pc;
8286 unknown_op:
8287 gen_unknown_opcode(env, s);
8288 return s->pc;
8291 void tcg_x86_init(void)
8293 static const char reg_names[CPU_NB_REGS][4] = {
8294 #ifdef TARGET_X86_64
8295 [R_EAX] = "rax",
8296 [R_EBX] = "rbx",
8297 [R_ECX] = "rcx",
8298 [R_EDX] = "rdx",
8299 [R_ESI] = "rsi",
8300 [R_EDI] = "rdi",
8301 [R_EBP] = "rbp",
8302 [R_ESP] = "rsp",
8303 [8] = "r8",
8304 [9] = "r9",
8305 [10] = "r10",
8306 [11] = "r11",
8307 [12] = "r12",
8308 [13] = "r13",
8309 [14] = "r14",
8310 [15] = "r15",
8311 #else
8312 [R_EAX] = "eax",
8313 [R_EBX] = "ebx",
8314 [R_ECX] = "ecx",
8315 [R_EDX] = "edx",
8316 [R_ESI] = "esi",
8317 [R_EDI] = "edi",
8318 [R_EBP] = "ebp",
8319 [R_ESP] = "esp",
8320 #endif
8322 static const char seg_base_names[6][8] = {
8323 [R_CS] = "cs_base",
8324 [R_DS] = "ds_base",
8325 [R_ES] = "es_base",
8326 [R_FS] = "fs_base",
8327 [R_GS] = "gs_base",
8328 [R_SS] = "ss_base",
8330 static const char bnd_regl_names[4][8] = {
8331 "bnd0_lb", "bnd1_lb", "bnd2_lb", "bnd3_lb"
8333 static const char bnd_regu_names[4][8] = {
8334 "bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub"
8336 int i;
8337 static bool initialized;
8339 if (initialized) {
8340 return;
8342 initialized = true;
8344 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
8345 tcg_ctx.tcg_env = cpu_env;
8346 cpu_cc_op = tcg_global_mem_new_i32(cpu_env,
8347 offsetof(CPUX86State, cc_op), "cc_op");
8348 cpu_cc_dst = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, cc_dst),
8349 "cc_dst");
8350 cpu_cc_src = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, cc_src),
8351 "cc_src");
8352 cpu_cc_src2 = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, cc_src2),
8353 "cc_src2");
8355 for (i = 0; i < CPU_NB_REGS; ++i) {
8356 cpu_regs[i] = tcg_global_mem_new(cpu_env,
8357 offsetof(CPUX86State, regs[i]),
8358 reg_names[i]);
8361 for (i = 0; i < 6; ++i) {
8362 cpu_seg_base[i]
8363 = tcg_global_mem_new(cpu_env,
8364 offsetof(CPUX86State, segs[i].base),
8365 seg_base_names[i]);
8368 for (i = 0; i < 4; ++i) {
8369 cpu_bndl[i]
8370 = tcg_global_mem_new_i64(cpu_env,
8371 offsetof(CPUX86State, bnd_regs[i].lb),
8372 bnd_regl_names[i]);
8373 cpu_bndu[i]
8374 = tcg_global_mem_new_i64(cpu_env,
8375 offsetof(CPUX86State, bnd_regs[i].ub),
8376 bnd_regu_names[i]);
8380 /* generate intermediate code for basic block 'tb'. */
8381 void gen_intermediate_code(CPUX86State *env, TranslationBlock *tb)
8383 X86CPU *cpu = x86_env_get_cpu(env);
8384 CPUState *cs = CPU(cpu);
8385 DisasContext dc1, *dc = &dc1;
8386 target_ulong pc_ptr;
8387 uint32_t flags;
8388 target_ulong pc_start;
8389 target_ulong cs_base;
8390 int num_insns;
8391 int max_insns;
8393 /* generate intermediate code */
8394 pc_start = tb->pc;
8395 cs_base = tb->cs_base;
8396 flags = tb->flags;
8398 dc->pe = (flags >> HF_PE_SHIFT) & 1;
8399 dc->code32 = (flags >> HF_CS32_SHIFT) & 1;
8400 dc->ss32 = (flags >> HF_SS32_SHIFT) & 1;
8401 dc->addseg = (flags >> HF_ADDSEG_SHIFT) & 1;
8402 dc->f_st = 0;
8403 dc->vm86 = (flags >> VM_SHIFT) & 1;
8404 dc->cpl = (flags >> HF_CPL_SHIFT) & 3;
8405 dc->iopl = (flags >> IOPL_SHIFT) & 3;
8406 dc->tf = (flags >> TF_SHIFT) & 1;
8407 dc->singlestep_enabled = cs->singlestep_enabled;
8408 dc->cc_op = CC_OP_DYNAMIC;
8409 dc->cc_op_dirty = false;
8410 dc->cs_base = cs_base;
8411 dc->tb = tb;
8412 dc->popl_esp_hack = 0;
8413 /* select memory access functions */
8414 dc->mem_index = 0;
8415 #ifdef CONFIG_SOFTMMU
8416 dc->mem_index = cpu_mmu_index(env, false);
8417 #endif
8418 dc->cpuid_features = env->features[FEAT_1_EDX];
8419 dc->cpuid_ext_features = env->features[FEAT_1_ECX];
8420 dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX];
8421 dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX];
8422 dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX];
8423 dc->cpuid_xsave_features = env->features[FEAT_XSAVE];
8424 #ifdef TARGET_X86_64
8425 dc->lma = (flags >> HF_LMA_SHIFT) & 1;
8426 dc->code64 = (flags >> HF_CS64_SHIFT) & 1;
8427 #endif
8428 dc->flags = flags;
8429 dc->jmp_opt = !(dc->tf || cs->singlestep_enabled ||
8430 (flags & HF_INHIBIT_IRQ_MASK));
8431 /* Do not optimize repz jumps at all in icount mode, because
8432 rep movsS instructions are execured with different paths
8433 in !repz_opt and repz_opt modes. The first one was used
8434 always except single step mode. And this setting
8435 disables jumps optimization and control paths become
8436 equivalent in run and single step modes.
8437 Now there will be no jump optimization for repz in
8438 record/replay modes and there will always be an
8439 additional step for ecx=0 when icount is enabled.
8441 dc->repz_opt = !dc->jmp_opt && !(tb->cflags & CF_USE_ICOUNT);
8442 #if 0
8443 /* check addseg logic */
8444 if (!dc->addseg && (dc->vm86 || !dc->pe || !dc->code32))
8445 printf("ERROR addseg\n");
8446 #endif
8448 cpu_T0 = tcg_temp_new();
8449 cpu_T1 = tcg_temp_new();
8450 cpu_A0 = tcg_temp_new();
8452 cpu_tmp0 = tcg_temp_new();
8453 cpu_tmp1_i64 = tcg_temp_new_i64();
8454 cpu_tmp2_i32 = tcg_temp_new_i32();
8455 cpu_tmp3_i32 = tcg_temp_new_i32();
8456 cpu_tmp4 = tcg_temp_new();
8457 cpu_ptr0 = tcg_temp_new_ptr();
8458 cpu_ptr1 = tcg_temp_new_ptr();
8459 cpu_cc_srcT = tcg_temp_local_new();
8461 dc->is_jmp = DISAS_NEXT;
8462 pc_ptr = pc_start;
8463 num_insns = 0;
8464 max_insns = tb->cflags & CF_COUNT_MASK;
8465 if (max_insns == 0) {
8466 max_insns = CF_COUNT_MASK;
8468 if (max_insns > TCG_MAX_INSNS) {
8469 max_insns = TCG_MAX_INSNS;
8472 gen_tb_start(tb);
8473 for(;;) {
8474 tcg_gen_insn_start(pc_ptr, dc->cc_op);
8475 num_insns++;
8477 /* If RF is set, suppress an internally generated breakpoint. */
8478 if (unlikely(cpu_breakpoint_test(cs, pc_ptr,
8479 tb->flags & HF_RF_MASK
8480 ? BP_GDB : BP_ANY))) {
8481 gen_debug(dc, pc_ptr - dc->cs_base);
8482 /* The address covered by the breakpoint must be included in
8483 [tb->pc, tb->pc + tb->size) in order to for it to be
8484 properly cleared -- thus we increment the PC here so that
8485 the logic setting tb->size below does the right thing. */
8486 pc_ptr += 1;
8487 goto done_generating;
8489 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
8490 gen_io_start();
8493 pc_ptr = disas_insn(env, dc, pc_ptr);
8494 /* stop translation if indicated */
8495 if (dc->is_jmp)
8496 break;
8497 /* if single step mode, we generate only one instruction and
8498 generate an exception */
8499 /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
8500 the flag and abort the translation to give the irqs a
8501 change to be happen */
8502 if (dc->tf || dc->singlestep_enabled ||
8503 (flags & HF_INHIBIT_IRQ_MASK)) {
8504 gen_jmp_im(pc_ptr - dc->cs_base);
8505 gen_eob(dc);
8506 break;
8508 /* Do not cross the boundary of the pages in icount mode,
8509 it can cause an exception. Do it only when boundary is
8510 crossed by the first instruction in the block.
8511 If current instruction already crossed the bound - it's ok,
8512 because an exception hasn't stopped this code.
8514 if ((tb->cflags & CF_USE_ICOUNT)
8515 && ((pc_ptr & TARGET_PAGE_MASK)
8516 != ((pc_ptr + TARGET_MAX_INSN_SIZE - 1) & TARGET_PAGE_MASK)
8517 || (pc_ptr & ~TARGET_PAGE_MASK) == 0)) {
8518 gen_jmp_im(pc_ptr - dc->cs_base);
8519 gen_eob(dc);
8520 break;
8522 /* if too long translation, stop generation too */
8523 if (tcg_op_buf_full() ||
8524 (pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32) ||
8525 num_insns >= max_insns) {
8526 gen_jmp_im(pc_ptr - dc->cs_base);
8527 gen_eob(dc);
8528 break;
8530 if (singlestep) {
8531 gen_jmp_im(pc_ptr - dc->cs_base);
8532 gen_eob(dc);
8533 break;
8536 if (tb->cflags & CF_LAST_IO)
8537 gen_io_end();
8538 done_generating:
8539 gen_tb_end(tb, num_insns);
8541 #ifdef DEBUG_DISAS
8542 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
8543 && qemu_log_in_addr_range(pc_start)) {
8544 int disas_flags;
8545 qemu_log_lock();
8546 qemu_log("----------------\n");
8547 qemu_log("IN: %s\n", lookup_symbol(pc_start));
8548 #ifdef TARGET_X86_64
8549 if (dc->code64)
8550 disas_flags = 2;
8551 else
8552 #endif
8553 disas_flags = !dc->code32;
8554 log_target_disas(cs, pc_start, pc_ptr - pc_start, disas_flags);
8555 qemu_log("\n");
8556 qemu_log_unlock();
8558 #endif
8560 tb->size = pc_ptr - pc_start;
8561 tb->icount = num_insns;
8564 void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb,
8565 target_ulong *data)
8567 int cc_op = data[1];
8568 env->eip = data[0] - tb->cs_base;
8569 if (cc_op != CC_OP_DYNAMIC) {
8570 env->cc_op = cc_op;