ARM: PL061: Checking register r/w accesses to reserved area
[qemu/ar7.git] / target-i386 / translate.c
blob9171929fc7e55561340784ce58317c025ab959d7
1 /*
2 * i386 translation
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "qemu/host-utils.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "tcg-op.h"
25 #include "exec/cpu_ldst.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
30 #include "trace-tcg.h"
31 #include "exec/log.h"
34 #define PREFIX_REPZ 0x01
35 #define PREFIX_REPNZ 0x02
36 #define PREFIX_LOCK 0x04
37 #define PREFIX_DATA 0x08
38 #define PREFIX_ADR 0x10
39 #define PREFIX_VEX 0x20
41 #ifdef TARGET_X86_64
42 #define CODE64(s) ((s)->code64)
43 #define REX_X(s) ((s)->rex_x)
44 #define REX_B(s) ((s)->rex_b)
45 #else
46 #define CODE64(s) 0
47 #define REX_X(s) 0
48 #define REX_B(s) 0
49 #endif
51 #ifdef TARGET_X86_64
52 # define ctztl ctz64
53 # define clztl clz64
54 #else
55 # define ctztl ctz32
56 # define clztl clz32
57 #endif
59 /* For a switch indexed by MODRM, match all memory operands for a given OP. */
60 #define CASE_MEM_OP(OP) \
61 case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
62 case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
63 case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7
65 //#define MACRO_TEST 1
67 /* global register indexes */
68 static TCGv_ptr cpu_env;
69 static TCGv cpu_A0;
70 static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2, cpu_cc_srcT;
71 static TCGv_i32 cpu_cc_op;
72 static TCGv cpu_regs[CPU_NB_REGS];
73 static TCGv cpu_seg_base[6];
74 static TCGv_i64 cpu_bndl[4];
75 static TCGv_i64 cpu_bndu[4];
76 /* local temps */
77 static TCGv cpu_T0, cpu_T1;
78 /* local register indexes (only used inside old micro ops) */
79 static TCGv cpu_tmp0, cpu_tmp4;
80 static TCGv_ptr cpu_ptr0, cpu_ptr1;
81 static TCGv_i32 cpu_tmp2_i32, cpu_tmp3_i32;
82 static TCGv_i64 cpu_tmp1_i64;
84 #include "exec/gen-icount.h"
86 #ifdef TARGET_X86_64
87 static int x86_64_hregs;
88 #endif
90 typedef struct DisasContext {
91 /* current insn context */
92 int override; /* -1 if no override */
93 int prefix;
94 TCGMemOp aflag;
95 TCGMemOp dflag;
96 target_ulong pc; /* pc = eip + cs_base */
97 int is_jmp; /* 1 = means jump (stop translation), 2 means CPU
98 static state change (stop translation) */
99 /* current block context */
100 target_ulong cs_base; /* base of CS segment */
101 int pe; /* protected mode */
102 int code32; /* 32 bit code segment */
103 #ifdef TARGET_X86_64
104 int lma; /* long mode active */
105 int code64; /* 64 bit code segment */
106 int rex_x, rex_b;
107 #endif
108 int vex_l; /* vex vector length */
109 int vex_v; /* vex vvvv register, without 1's compliment. */
110 int ss32; /* 32 bit stack segment */
111 CCOp cc_op; /* current CC operation */
112 bool cc_op_dirty;
113 int addseg; /* non zero if either DS/ES/SS have a non zero base */
114 int f_st; /* currently unused */
115 int vm86; /* vm86 mode */
116 int cpl;
117 int iopl;
118 int tf; /* TF cpu flag */
119 int singlestep_enabled; /* "hardware" single step enabled */
120 int jmp_opt; /* use direct block chaining for direct jumps */
121 int repz_opt; /* optimize jumps within repz instructions */
122 int mem_index; /* select memory access functions */
123 uint64_t flags; /* all execution flags */
124 struct TranslationBlock *tb;
125 int popl_esp_hack; /* for correct popl with esp base handling */
126 int rip_offset; /* only used in x86_64, but left for simplicity */
127 int cpuid_features;
128 int cpuid_ext_features;
129 int cpuid_ext2_features;
130 int cpuid_ext3_features;
131 int cpuid_7_0_ebx_features;
132 int cpuid_xsave_features;
133 } DisasContext;
135 static void gen_eob(DisasContext *s);
136 static void gen_jmp(DisasContext *s, target_ulong eip);
137 static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num);
138 static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d);
140 /* i386 arith/logic operations */
141 enum {
142 OP_ADDL,
143 OP_ORL,
144 OP_ADCL,
145 OP_SBBL,
146 OP_ANDL,
147 OP_SUBL,
148 OP_XORL,
149 OP_CMPL,
152 /* i386 shift ops */
153 enum {
154 OP_ROL,
155 OP_ROR,
156 OP_RCL,
157 OP_RCR,
158 OP_SHL,
159 OP_SHR,
160 OP_SHL1, /* undocumented */
161 OP_SAR = 7,
164 enum {
165 JCC_O,
166 JCC_B,
167 JCC_Z,
168 JCC_BE,
169 JCC_S,
170 JCC_P,
171 JCC_L,
172 JCC_LE,
175 enum {
176 /* I386 int registers */
177 OR_EAX, /* MUST be even numbered */
178 OR_ECX,
179 OR_EDX,
180 OR_EBX,
181 OR_ESP,
182 OR_EBP,
183 OR_ESI,
184 OR_EDI,
186 OR_TMP0 = 16, /* temporary operand register */
187 OR_TMP1,
188 OR_A0, /* temporary register used when doing address evaluation */
191 enum {
192 USES_CC_DST = 1,
193 USES_CC_SRC = 2,
194 USES_CC_SRC2 = 4,
195 USES_CC_SRCT = 8,
198 /* Bit set if the global variable is live after setting CC_OP to X. */
199 static const uint8_t cc_op_live[CC_OP_NB] = {
200 [CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
201 [CC_OP_EFLAGS] = USES_CC_SRC,
202 [CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC,
203 [CC_OP_ADDB ... CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC,
204 [CC_OP_ADCB ... CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
205 [CC_OP_SUBB ... CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT,
206 [CC_OP_SBBB ... CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
207 [CC_OP_LOGICB ... CC_OP_LOGICQ] = USES_CC_DST,
208 [CC_OP_INCB ... CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC,
209 [CC_OP_DECB ... CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC,
210 [CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC,
211 [CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC,
212 [CC_OP_BMILGB ... CC_OP_BMILGQ] = USES_CC_DST | USES_CC_SRC,
213 [CC_OP_ADCX] = USES_CC_DST | USES_CC_SRC,
214 [CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2,
215 [CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
216 [CC_OP_CLR] = 0,
219 static void set_cc_op(DisasContext *s, CCOp op)
221 int dead;
223 if (s->cc_op == op) {
224 return;
227 /* Discard CC computation that will no longer be used. */
228 dead = cc_op_live[s->cc_op] & ~cc_op_live[op];
229 if (dead & USES_CC_DST) {
230 tcg_gen_discard_tl(cpu_cc_dst);
232 if (dead & USES_CC_SRC) {
233 tcg_gen_discard_tl(cpu_cc_src);
235 if (dead & USES_CC_SRC2) {
236 tcg_gen_discard_tl(cpu_cc_src2);
238 if (dead & USES_CC_SRCT) {
239 tcg_gen_discard_tl(cpu_cc_srcT);
242 if (op == CC_OP_DYNAMIC) {
243 /* The DYNAMIC setting is translator only, and should never be
244 stored. Thus we always consider it clean. */
245 s->cc_op_dirty = false;
246 } else {
247 /* Discard any computed CC_OP value (see shifts). */
248 if (s->cc_op == CC_OP_DYNAMIC) {
249 tcg_gen_discard_i32(cpu_cc_op);
251 s->cc_op_dirty = true;
253 s->cc_op = op;
256 static void gen_update_cc_op(DisasContext *s)
258 if (s->cc_op_dirty) {
259 tcg_gen_movi_i32(cpu_cc_op, s->cc_op);
260 s->cc_op_dirty = false;
264 #ifdef TARGET_X86_64
266 #define NB_OP_SIZES 4
268 #else /* !TARGET_X86_64 */
270 #define NB_OP_SIZES 3
272 #endif /* !TARGET_X86_64 */
274 #if defined(HOST_WORDS_BIGENDIAN)
275 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
276 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
277 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
278 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
279 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
280 #else
281 #define REG_B_OFFSET 0
282 #define REG_H_OFFSET 1
283 #define REG_W_OFFSET 0
284 #define REG_L_OFFSET 0
285 #define REG_LH_OFFSET 4
286 #endif
288 /* In instruction encodings for byte register accesses the
289 * register number usually indicates "low 8 bits of register N";
290 * however there are some special cases where N 4..7 indicates
291 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
292 * true for this special case, false otherwise.
294 static inline bool byte_reg_is_xH(int reg)
296 if (reg < 4) {
297 return false;
299 #ifdef TARGET_X86_64
300 if (reg >= 8 || x86_64_hregs) {
301 return false;
303 #endif
304 return true;
307 /* Select the size of a push/pop operation. */
308 static inline TCGMemOp mo_pushpop(DisasContext *s, TCGMemOp ot)
310 if (CODE64(s)) {
311 return ot == MO_16 ? MO_16 : MO_64;
312 } else {
313 return ot;
317 /* Select the size of the stack pointer. */
318 static inline TCGMemOp mo_stacksize(DisasContext *s)
320 return CODE64(s) ? MO_64 : s->ss32 ? MO_32 : MO_16;
323 /* Select only size 64 else 32. Used for SSE operand sizes. */
324 static inline TCGMemOp mo_64_32(TCGMemOp ot)
326 #ifdef TARGET_X86_64
327 return ot == MO_64 ? MO_64 : MO_32;
328 #else
329 return MO_32;
330 #endif
333 /* Select size 8 if lsb of B is clear, else OT. Used for decoding
334 byte vs word opcodes. */
335 static inline TCGMemOp mo_b_d(int b, TCGMemOp ot)
337 return b & 1 ? ot : MO_8;
340 /* Select size 8 if lsb of B is clear, else OT capped at 32.
341 Used for decoding operand size of port opcodes. */
342 static inline TCGMemOp mo_b_d32(int b, TCGMemOp ot)
344 return b & 1 ? (ot == MO_16 ? MO_16 : MO_32) : MO_8;
347 static void gen_op_mov_reg_v(TCGMemOp ot, int reg, TCGv t0)
349 switch(ot) {
350 case MO_8:
351 if (!byte_reg_is_xH(reg)) {
352 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 8);
353 } else {
354 tcg_gen_deposit_tl(cpu_regs[reg - 4], cpu_regs[reg - 4], t0, 8, 8);
356 break;
357 case MO_16:
358 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 16);
359 break;
360 case MO_32:
361 /* For x86_64, this sets the higher half of register to zero.
362 For i386, this is equivalent to a mov. */
363 tcg_gen_ext32u_tl(cpu_regs[reg], t0);
364 break;
365 #ifdef TARGET_X86_64
366 case MO_64:
367 tcg_gen_mov_tl(cpu_regs[reg], t0);
368 break;
369 #endif
370 default:
371 tcg_abort();
375 static inline void gen_op_mov_v_reg(TCGMemOp ot, TCGv t0, int reg)
377 if (ot == MO_8 && byte_reg_is_xH(reg)) {
378 tcg_gen_shri_tl(t0, cpu_regs[reg - 4], 8);
379 tcg_gen_ext8u_tl(t0, t0);
380 } else {
381 tcg_gen_mov_tl(t0, cpu_regs[reg]);
385 static void gen_add_A0_im(DisasContext *s, int val)
387 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
388 if (!CODE64(s)) {
389 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
393 static inline void gen_op_jmp_v(TCGv dest)
395 tcg_gen_st_tl(dest, cpu_env, offsetof(CPUX86State, eip));
398 static inline void gen_op_add_reg_im(TCGMemOp size, int reg, int32_t val)
400 tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val);
401 gen_op_mov_reg_v(size, reg, cpu_tmp0);
404 static inline void gen_op_add_reg_T0(TCGMemOp size, int reg)
406 tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T0);
407 gen_op_mov_reg_v(size, reg, cpu_tmp0);
410 static inline void gen_op_ld_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
412 tcg_gen_qemu_ld_tl(t0, a0, s->mem_index, idx | MO_LE);
415 static inline void gen_op_st_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
417 tcg_gen_qemu_st_tl(t0, a0, s->mem_index, idx | MO_LE);
420 static inline void gen_op_st_rm_T0_A0(DisasContext *s, int idx, int d)
422 if (d == OR_TMP0) {
423 gen_op_st_v(s, idx, cpu_T0, cpu_A0);
424 } else {
425 gen_op_mov_reg_v(idx, d, cpu_T0);
429 static inline void gen_jmp_im(target_ulong pc)
431 tcg_gen_movi_tl(cpu_tmp0, pc);
432 gen_op_jmp_v(cpu_tmp0);
435 /* Compute SEG:REG into A0. SEG is selected from the override segment
436 (OVR_SEG) and the default segment (DEF_SEG). OVR_SEG may be -1 to
437 indicate no override. */
438 static void gen_lea_v_seg(DisasContext *s, TCGMemOp aflag, TCGv a0,
439 int def_seg, int ovr_seg)
441 switch (aflag) {
442 #ifdef TARGET_X86_64
443 case MO_64:
444 if (ovr_seg < 0) {
445 tcg_gen_mov_tl(cpu_A0, a0);
446 return;
448 break;
449 #endif
450 case MO_32:
451 /* 32 bit address */
452 if (ovr_seg < 0) {
453 if (s->addseg) {
454 ovr_seg = def_seg;
455 } else {
456 tcg_gen_ext32u_tl(cpu_A0, a0);
457 return;
460 break;
461 case MO_16:
462 /* 16 bit address */
463 if (ovr_seg < 0) {
464 ovr_seg = def_seg;
466 tcg_gen_ext16u_tl(cpu_A0, a0);
467 /* ADDSEG will only be false in 16-bit mode for LEA. */
468 if (!s->addseg) {
469 return;
471 a0 = cpu_A0;
472 break;
473 default:
474 tcg_abort();
477 if (ovr_seg >= 0) {
478 TCGv seg = cpu_seg_base[ovr_seg];
480 if (aflag == MO_64) {
481 tcg_gen_add_tl(cpu_A0, a0, seg);
482 } else if (CODE64(s)) {
483 tcg_gen_ext32u_tl(cpu_A0, a0);
484 tcg_gen_add_tl(cpu_A0, cpu_A0, seg);
485 } else {
486 tcg_gen_add_tl(cpu_A0, a0, seg);
487 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
492 static inline void gen_string_movl_A0_ESI(DisasContext *s)
494 gen_lea_v_seg(s, s->aflag, cpu_regs[R_ESI], R_DS, s->override);
497 static inline void gen_string_movl_A0_EDI(DisasContext *s)
499 gen_lea_v_seg(s, s->aflag, cpu_regs[R_EDI], R_ES, -1);
502 static inline void gen_op_movl_T0_Dshift(TCGMemOp ot)
504 tcg_gen_ld32s_tl(cpu_T0, cpu_env, offsetof(CPUX86State, df));
505 tcg_gen_shli_tl(cpu_T0, cpu_T0, ot);
508 static TCGv gen_ext_tl(TCGv dst, TCGv src, TCGMemOp size, bool sign)
510 switch (size) {
511 case MO_8:
512 if (sign) {
513 tcg_gen_ext8s_tl(dst, src);
514 } else {
515 tcg_gen_ext8u_tl(dst, src);
517 return dst;
518 case MO_16:
519 if (sign) {
520 tcg_gen_ext16s_tl(dst, src);
521 } else {
522 tcg_gen_ext16u_tl(dst, src);
524 return dst;
525 #ifdef TARGET_X86_64
526 case MO_32:
527 if (sign) {
528 tcg_gen_ext32s_tl(dst, src);
529 } else {
530 tcg_gen_ext32u_tl(dst, src);
532 return dst;
533 #endif
534 default:
535 return src;
539 static void gen_extu(TCGMemOp ot, TCGv reg)
541 gen_ext_tl(reg, reg, ot, false);
544 static void gen_exts(TCGMemOp ot, TCGv reg)
546 gen_ext_tl(reg, reg, ot, true);
549 static inline void gen_op_jnz_ecx(TCGMemOp size, TCGLabel *label1)
551 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
552 gen_extu(size, cpu_tmp0);
553 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, label1);
556 static inline void gen_op_jz_ecx(TCGMemOp size, TCGLabel *label1)
558 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
559 gen_extu(size, cpu_tmp0);
560 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
563 static void gen_helper_in_func(TCGMemOp ot, TCGv v, TCGv_i32 n)
565 switch (ot) {
566 case MO_8:
567 gen_helper_inb(v, cpu_env, n);
568 break;
569 case MO_16:
570 gen_helper_inw(v, cpu_env, n);
571 break;
572 case MO_32:
573 gen_helper_inl(v, cpu_env, n);
574 break;
575 default:
576 tcg_abort();
580 static void gen_helper_out_func(TCGMemOp ot, TCGv_i32 v, TCGv_i32 n)
582 switch (ot) {
583 case MO_8:
584 gen_helper_outb(cpu_env, v, n);
585 break;
586 case MO_16:
587 gen_helper_outw(cpu_env, v, n);
588 break;
589 case MO_32:
590 gen_helper_outl(cpu_env, v, n);
591 break;
592 default:
593 tcg_abort();
597 static void gen_check_io(DisasContext *s, TCGMemOp ot, target_ulong cur_eip,
598 uint32_t svm_flags)
600 target_ulong next_eip;
602 if (s->pe && (s->cpl > s->iopl || s->vm86)) {
603 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
604 switch (ot) {
605 case MO_8:
606 gen_helper_check_iob(cpu_env, cpu_tmp2_i32);
607 break;
608 case MO_16:
609 gen_helper_check_iow(cpu_env, cpu_tmp2_i32);
610 break;
611 case MO_32:
612 gen_helper_check_iol(cpu_env, cpu_tmp2_i32);
613 break;
614 default:
615 tcg_abort();
618 if(s->flags & HF_SVMI_MASK) {
619 gen_update_cc_op(s);
620 gen_jmp_im(cur_eip);
621 svm_flags |= (1 << (4 + ot));
622 next_eip = s->pc - s->cs_base;
623 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
624 gen_helper_svm_check_io(cpu_env, cpu_tmp2_i32,
625 tcg_const_i32(svm_flags),
626 tcg_const_i32(next_eip - cur_eip));
630 static inline void gen_movs(DisasContext *s, TCGMemOp ot)
632 gen_string_movl_A0_ESI(s);
633 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
634 gen_string_movl_A0_EDI(s);
635 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
636 gen_op_movl_T0_Dshift(ot);
637 gen_op_add_reg_T0(s->aflag, R_ESI);
638 gen_op_add_reg_T0(s->aflag, R_EDI);
641 static void gen_op_update1_cc(void)
643 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
646 static void gen_op_update2_cc(void)
648 tcg_gen_mov_tl(cpu_cc_src, cpu_T1);
649 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
652 static void gen_op_update3_cc(TCGv reg)
654 tcg_gen_mov_tl(cpu_cc_src2, reg);
655 tcg_gen_mov_tl(cpu_cc_src, cpu_T1);
656 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
659 static inline void gen_op_testl_T0_T1_cc(void)
661 tcg_gen_and_tl(cpu_cc_dst, cpu_T0, cpu_T1);
664 static void gen_op_update_neg_cc(void)
666 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
667 tcg_gen_neg_tl(cpu_cc_src, cpu_T0);
668 tcg_gen_movi_tl(cpu_cc_srcT, 0);
671 /* compute all eflags to cc_src */
672 static void gen_compute_eflags(DisasContext *s)
674 TCGv zero, dst, src1, src2;
675 int live, dead;
677 if (s->cc_op == CC_OP_EFLAGS) {
678 return;
680 if (s->cc_op == CC_OP_CLR) {
681 tcg_gen_movi_tl(cpu_cc_src, CC_Z | CC_P);
682 set_cc_op(s, CC_OP_EFLAGS);
683 return;
686 TCGV_UNUSED(zero);
687 dst = cpu_cc_dst;
688 src1 = cpu_cc_src;
689 src2 = cpu_cc_src2;
691 /* Take care to not read values that are not live. */
692 live = cc_op_live[s->cc_op] & ~USES_CC_SRCT;
693 dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2);
694 if (dead) {
695 zero = tcg_const_tl(0);
696 if (dead & USES_CC_DST) {
697 dst = zero;
699 if (dead & USES_CC_SRC) {
700 src1 = zero;
702 if (dead & USES_CC_SRC2) {
703 src2 = zero;
707 gen_update_cc_op(s);
708 gen_helper_cc_compute_all(cpu_cc_src, dst, src1, src2, cpu_cc_op);
709 set_cc_op(s, CC_OP_EFLAGS);
711 if (dead) {
712 tcg_temp_free(zero);
716 typedef struct CCPrepare {
717 TCGCond cond;
718 TCGv reg;
719 TCGv reg2;
720 target_ulong imm;
721 target_ulong mask;
722 bool use_reg2;
723 bool no_setcond;
724 } CCPrepare;
726 /* compute eflags.C to reg */
727 static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
729 TCGv t0, t1;
730 int size, shift;
732 switch (s->cc_op) {
733 case CC_OP_SUBB ... CC_OP_SUBQ:
734 /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
735 size = s->cc_op - CC_OP_SUBB;
736 t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
737 /* If no temporary was used, be careful not to alias t1 and t0. */
738 t0 = TCGV_EQUAL(t1, cpu_cc_src) ? cpu_tmp0 : reg;
739 tcg_gen_mov_tl(t0, cpu_cc_srcT);
740 gen_extu(size, t0);
741 goto add_sub;
743 case CC_OP_ADDB ... CC_OP_ADDQ:
744 /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
745 size = s->cc_op - CC_OP_ADDB;
746 t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
747 t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
748 add_sub:
749 return (CCPrepare) { .cond = TCG_COND_LTU, .reg = t0,
750 .reg2 = t1, .mask = -1, .use_reg2 = true };
752 case CC_OP_LOGICB ... CC_OP_LOGICQ:
753 case CC_OP_CLR:
754 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
756 case CC_OP_INCB ... CC_OP_INCQ:
757 case CC_OP_DECB ... CC_OP_DECQ:
758 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
759 .mask = -1, .no_setcond = true };
761 case CC_OP_SHLB ... CC_OP_SHLQ:
762 /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
763 size = s->cc_op - CC_OP_SHLB;
764 shift = (8 << size) - 1;
765 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
766 .mask = (target_ulong)1 << shift };
768 case CC_OP_MULB ... CC_OP_MULQ:
769 return (CCPrepare) { .cond = TCG_COND_NE,
770 .reg = cpu_cc_src, .mask = -1 };
772 case CC_OP_BMILGB ... CC_OP_BMILGQ:
773 size = s->cc_op - CC_OP_BMILGB;
774 t0 = gen_ext_tl(reg, cpu_cc_src, size, false);
775 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
777 case CC_OP_ADCX:
778 case CC_OP_ADCOX:
779 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_dst,
780 .mask = -1, .no_setcond = true };
782 case CC_OP_EFLAGS:
783 case CC_OP_SARB ... CC_OP_SARQ:
784 /* CC_SRC & 1 */
785 return (CCPrepare) { .cond = TCG_COND_NE,
786 .reg = cpu_cc_src, .mask = CC_C };
788 default:
789 /* The need to compute only C from CC_OP_DYNAMIC is important
790 in efficiently implementing e.g. INC at the start of a TB. */
791 gen_update_cc_op(s);
792 gen_helper_cc_compute_c(reg, cpu_cc_dst, cpu_cc_src,
793 cpu_cc_src2, cpu_cc_op);
794 return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
795 .mask = -1, .no_setcond = true };
799 /* compute eflags.P to reg */
800 static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg)
802 gen_compute_eflags(s);
803 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
804 .mask = CC_P };
807 /* compute eflags.S to reg */
808 static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg)
810 switch (s->cc_op) {
811 case CC_OP_DYNAMIC:
812 gen_compute_eflags(s);
813 /* FALLTHRU */
814 case CC_OP_EFLAGS:
815 case CC_OP_ADCX:
816 case CC_OP_ADOX:
817 case CC_OP_ADCOX:
818 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
819 .mask = CC_S };
820 case CC_OP_CLR:
821 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
822 default:
824 TCGMemOp size = (s->cc_op - CC_OP_ADDB) & 3;
825 TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, true);
826 return (CCPrepare) { .cond = TCG_COND_LT, .reg = t0, .mask = -1 };
831 /* compute eflags.O to reg */
832 static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
834 switch (s->cc_op) {
835 case CC_OP_ADOX:
836 case CC_OP_ADCOX:
837 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src2,
838 .mask = -1, .no_setcond = true };
839 case CC_OP_CLR:
840 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
841 default:
842 gen_compute_eflags(s);
843 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
844 .mask = CC_O };
848 /* compute eflags.Z to reg */
849 static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
851 switch (s->cc_op) {
852 case CC_OP_DYNAMIC:
853 gen_compute_eflags(s);
854 /* FALLTHRU */
855 case CC_OP_EFLAGS:
856 case CC_OP_ADCX:
857 case CC_OP_ADOX:
858 case CC_OP_ADCOX:
859 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
860 .mask = CC_Z };
861 case CC_OP_CLR:
862 return (CCPrepare) { .cond = TCG_COND_ALWAYS, .mask = -1 };
863 default:
865 TCGMemOp size = (s->cc_op - CC_OP_ADDB) & 3;
866 TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
867 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
872 /* perform a conditional store into register 'reg' according to jump opcode
873 value 'b'. In the fast case, T0 is guaranted not to be used. */
874 static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
876 int inv, jcc_op, cond;
877 TCGMemOp size;
878 CCPrepare cc;
879 TCGv t0;
881 inv = b & 1;
882 jcc_op = (b >> 1) & 7;
884 switch (s->cc_op) {
885 case CC_OP_SUBB ... CC_OP_SUBQ:
886 /* We optimize relational operators for the cmp/jcc case. */
887 size = s->cc_op - CC_OP_SUBB;
888 switch (jcc_op) {
889 case JCC_BE:
890 tcg_gen_mov_tl(cpu_tmp4, cpu_cc_srcT);
891 gen_extu(size, cpu_tmp4);
892 t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
893 cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = cpu_tmp4,
894 .reg2 = t0, .mask = -1, .use_reg2 = true };
895 break;
897 case JCC_L:
898 cond = TCG_COND_LT;
899 goto fast_jcc_l;
900 case JCC_LE:
901 cond = TCG_COND_LE;
902 fast_jcc_l:
903 tcg_gen_mov_tl(cpu_tmp4, cpu_cc_srcT);
904 gen_exts(size, cpu_tmp4);
905 t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, true);
906 cc = (CCPrepare) { .cond = cond, .reg = cpu_tmp4,
907 .reg2 = t0, .mask = -1, .use_reg2 = true };
908 break;
910 default:
911 goto slow_jcc;
913 break;
915 default:
916 slow_jcc:
917 /* This actually generates good code for JC, JZ and JS. */
918 switch (jcc_op) {
919 case JCC_O:
920 cc = gen_prepare_eflags_o(s, reg);
921 break;
922 case JCC_B:
923 cc = gen_prepare_eflags_c(s, reg);
924 break;
925 case JCC_Z:
926 cc = gen_prepare_eflags_z(s, reg);
927 break;
928 case JCC_BE:
929 gen_compute_eflags(s);
930 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
931 .mask = CC_Z | CC_C };
932 break;
933 case JCC_S:
934 cc = gen_prepare_eflags_s(s, reg);
935 break;
936 case JCC_P:
937 cc = gen_prepare_eflags_p(s, reg);
938 break;
939 case JCC_L:
940 gen_compute_eflags(s);
941 if (TCGV_EQUAL(reg, cpu_cc_src)) {
942 reg = cpu_tmp0;
944 tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
945 tcg_gen_xor_tl(reg, reg, cpu_cc_src);
946 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
947 .mask = CC_S };
948 break;
949 default:
950 case JCC_LE:
951 gen_compute_eflags(s);
952 if (TCGV_EQUAL(reg, cpu_cc_src)) {
953 reg = cpu_tmp0;
955 tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
956 tcg_gen_xor_tl(reg, reg, cpu_cc_src);
957 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
958 .mask = CC_S | CC_Z };
959 break;
961 break;
964 if (inv) {
965 cc.cond = tcg_invert_cond(cc.cond);
967 return cc;
970 static void gen_setcc1(DisasContext *s, int b, TCGv reg)
972 CCPrepare cc = gen_prepare_cc(s, b, reg);
974 if (cc.no_setcond) {
975 if (cc.cond == TCG_COND_EQ) {
976 tcg_gen_xori_tl(reg, cc.reg, 1);
977 } else {
978 tcg_gen_mov_tl(reg, cc.reg);
980 return;
983 if (cc.cond == TCG_COND_NE && !cc.use_reg2 && cc.imm == 0 &&
984 cc.mask != 0 && (cc.mask & (cc.mask - 1)) == 0) {
985 tcg_gen_shri_tl(reg, cc.reg, ctztl(cc.mask));
986 tcg_gen_andi_tl(reg, reg, 1);
987 return;
989 if (cc.mask != -1) {
990 tcg_gen_andi_tl(reg, cc.reg, cc.mask);
991 cc.reg = reg;
993 if (cc.use_reg2) {
994 tcg_gen_setcond_tl(cc.cond, reg, cc.reg, cc.reg2);
995 } else {
996 tcg_gen_setcondi_tl(cc.cond, reg, cc.reg, cc.imm);
1000 static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg)
1002 gen_setcc1(s, JCC_B << 1, reg);
1005 /* generate a conditional jump to label 'l1' according to jump opcode
1006 value 'b'. In the fast case, T0 is guaranted not to be used. */
1007 static inline void gen_jcc1_noeob(DisasContext *s, int b, TCGLabel *l1)
1009 CCPrepare cc = gen_prepare_cc(s, b, cpu_T0);
1011 if (cc.mask != -1) {
1012 tcg_gen_andi_tl(cpu_T0, cc.reg, cc.mask);
1013 cc.reg = cpu_T0;
1015 if (cc.use_reg2) {
1016 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1017 } else {
1018 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1022 /* Generate a conditional jump to label 'l1' according to jump opcode
1023 value 'b'. In the fast case, T0 is guaranted not to be used.
1024 A translation block must end soon. */
1025 static inline void gen_jcc1(DisasContext *s, int b, TCGLabel *l1)
1027 CCPrepare cc = gen_prepare_cc(s, b, cpu_T0);
1029 gen_update_cc_op(s);
1030 if (cc.mask != -1) {
1031 tcg_gen_andi_tl(cpu_T0, cc.reg, cc.mask);
1032 cc.reg = cpu_T0;
1034 set_cc_op(s, CC_OP_DYNAMIC);
1035 if (cc.use_reg2) {
1036 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1037 } else {
1038 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1042 /* XXX: does not work with gdbstub "ice" single step - not a
1043 serious problem */
1044 static TCGLabel *gen_jz_ecx_string(DisasContext *s, target_ulong next_eip)
1046 TCGLabel *l1 = gen_new_label();
1047 TCGLabel *l2 = gen_new_label();
1048 gen_op_jnz_ecx(s->aflag, l1);
1049 gen_set_label(l2);
1050 gen_jmp_tb(s, next_eip, 1);
1051 gen_set_label(l1);
1052 return l2;
1055 static inline void gen_stos(DisasContext *s, TCGMemOp ot)
1057 gen_op_mov_v_reg(MO_32, cpu_T0, R_EAX);
1058 gen_string_movl_A0_EDI(s);
1059 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
1060 gen_op_movl_T0_Dshift(ot);
1061 gen_op_add_reg_T0(s->aflag, R_EDI);
1064 static inline void gen_lods(DisasContext *s, TCGMemOp ot)
1066 gen_string_movl_A0_ESI(s);
1067 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
1068 gen_op_mov_reg_v(ot, R_EAX, cpu_T0);
1069 gen_op_movl_T0_Dshift(ot);
1070 gen_op_add_reg_T0(s->aflag, R_ESI);
1073 static inline void gen_scas(DisasContext *s, TCGMemOp ot)
1075 gen_string_movl_A0_EDI(s);
1076 gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
1077 gen_op(s, OP_CMPL, ot, R_EAX);
1078 gen_op_movl_T0_Dshift(ot);
1079 gen_op_add_reg_T0(s->aflag, R_EDI);
1082 static inline void gen_cmps(DisasContext *s, TCGMemOp ot)
1084 gen_string_movl_A0_EDI(s);
1085 gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
1086 gen_string_movl_A0_ESI(s);
1087 gen_op(s, OP_CMPL, ot, OR_TMP0);
1088 gen_op_movl_T0_Dshift(ot);
1089 gen_op_add_reg_T0(s->aflag, R_ESI);
1090 gen_op_add_reg_T0(s->aflag, R_EDI);
1093 static void gen_bpt_io(DisasContext *s, TCGv_i32 t_port, int ot)
1095 if (s->flags & HF_IOBPT_MASK) {
1096 TCGv_i32 t_size = tcg_const_i32(1 << ot);
1097 TCGv t_next = tcg_const_tl(s->pc - s->cs_base);
1099 gen_helper_bpt_io(cpu_env, t_port, t_size, t_next);
1100 tcg_temp_free_i32(t_size);
1101 tcg_temp_free(t_next);
1106 static inline void gen_ins(DisasContext *s, TCGMemOp ot)
1108 if (s->tb->cflags & CF_USE_ICOUNT) {
1109 gen_io_start();
1111 gen_string_movl_A0_EDI(s);
1112 /* Note: we must do this dummy write first to be restartable in
1113 case of page fault. */
1114 tcg_gen_movi_tl(cpu_T0, 0);
1115 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
1116 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_EDX]);
1117 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1118 gen_helper_in_func(ot, cpu_T0, cpu_tmp2_i32);
1119 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
1120 gen_op_movl_T0_Dshift(ot);
1121 gen_op_add_reg_T0(s->aflag, R_EDI);
1122 gen_bpt_io(s, cpu_tmp2_i32, ot);
1123 if (s->tb->cflags & CF_USE_ICOUNT) {
1124 gen_io_end();
1128 static inline void gen_outs(DisasContext *s, TCGMemOp ot)
1130 if (s->tb->cflags & CF_USE_ICOUNT) {
1131 gen_io_start();
1133 gen_string_movl_A0_ESI(s);
1134 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
1136 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_EDX]);
1137 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1138 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T0);
1139 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
1140 gen_op_movl_T0_Dshift(ot);
1141 gen_op_add_reg_T0(s->aflag, R_ESI);
1142 gen_bpt_io(s, cpu_tmp2_i32, ot);
1143 if (s->tb->cflags & CF_USE_ICOUNT) {
1144 gen_io_end();
1148 /* same method as Valgrind : we generate jumps to current or next
1149 instruction */
1150 #define GEN_REPZ(op) \
1151 static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \
1152 target_ulong cur_eip, target_ulong next_eip) \
1154 TCGLabel *l2; \
1155 gen_update_cc_op(s); \
1156 l2 = gen_jz_ecx_string(s, next_eip); \
1157 gen_ ## op(s, ot); \
1158 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1159 /* a loop would cause two single step exceptions if ECX = 1 \
1160 before rep string_insn */ \
1161 if (s->repz_opt) \
1162 gen_op_jz_ecx(s->aflag, l2); \
1163 gen_jmp(s, cur_eip); \
1166 #define GEN_REPZ2(op) \
1167 static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \
1168 target_ulong cur_eip, \
1169 target_ulong next_eip, \
1170 int nz) \
1172 TCGLabel *l2; \
1173 gen_update_cc_op(s); \
1174 l2 = gen_jz_ecx_string(s, next_eip); \
1175 gen_ ## op(s, ot); \
1176 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1177 gen_update_cc_op(s); \
1178 gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2); \
1179 if (s->repz_opt) \
1180 gen_op_jz_ecx(s->aflag, l2); \
1181 gen_jmp(s, cur_eip); \
1184 GEN_REPZ(movs)
1185 GEN_REPZ(stos)
1186 GEN_REPZ(lods)
1187 GEN_REPZ(ins)
1188 GEN_REPZ(outs)
1189 GEN_REPZ2(scas)
1190 GEN_REPZ2(cmps)
1192 static void gen_helper_fp_arith_ST0_FT0(int op)
1194 switch (op) {
1195 case 0:
1196 gen_helper_fadd_ST0_FT0(cpu_env);
1197 break;
1198 case 1:
1199 gen_helper_fmul_ST0_FT0(cpu_env);
1200 break;
1201 case 2:
1202 gen_helper_fcom_ST0_FT0(cpu_env);
1203 break;
1204 case 3:
1205 gen_helper_fcom_ST0_FT0(cpu_env);
1206 break;
1207 case 4:
1208 gen_helper_fsub_ST0_FT0(cpu_env);
1209 break;
1210 case 5:
1211 gen_helper_fsubr_ST0_FT0(cpu_env);
1212 break;
1213 case 6:
1214 gen_helper_fdiv_ST0_FT0(cpu_env);
1215 break;
1216 case 7:
1217 gen_helper_fdivr_ST0_FT0(cpu_env);
1218 break;
1222 /* NOTE the exception in "r" op ordering */
1223 static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1225 TCGv_i32 tmp = tcg_const_i32(opreg);
1226 switch (op) {
1227 case 0:
1228 gen_helper_fadd_STN_ST0(cpu_env, tmp);
1229 break;
1230 case 1:
1231 gen_helper_fmul_STN_ST0(cpu_env, tmp);
1232 break;
1233 case 4:
1234 gen_helper_fsubr_STN_ST0(cpu_env, tmp);
1235 break;
1236 case 5:
1237 gen_helper_fsub_STN_ST0(cpu_env, tmp);
1238 break;
1239 case 6:
1240 gen_helper_fdivr_STN_ST0(cpu_env, tmp);
1241 break;
1242 case 7:
1243 gen_helper_fdiv_STN_ST0(cpu_env, tmp);
1244 break;
1248 /* if d == OR_TMP0, it means memory operand (address in A0) */
1249 static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d)
1251 if (d != OR_TMP0) {
1252 gen_op_mov_v_reg(ot, cpu_T0, d);
1253 } else {
1254 gen_op_ld_v(s1, ot, cpu_T0, cpu_A0);
1256 switch(op) {
1257 case OP_ADCL:
1258 gen_compute_eflags_c(s1, cpu_tmp4);
1259 tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1);
1260 tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_tmp4);
1261 gen_op_st_rm_T0_A0(s1, ot, d);
1262 gen_op_update3_cc(cpu_tmp4);
1263 set_cc_op(s1, CC_OP_ADCB + ot);
1264 break;
1265 case OP_SBBL:
1266 gen_compute_eflags_c(s1, cpu_tmp4);
1267 tcg_gen_sub_tl(cpu_T0, cpu_T0, cpu_T1);
1268 tcg_gen_sub_tl(cpu_T0, cpu_T0, cpu_tmp4);
1269 gen_op_st_rm_T0_A0(s1, ot, d);
1270 gen_op_update3_cc(cpu_tmp4);
1271 set_cc_op(s1, CC_OP_SBBB + ot);
1272 break;
1273 case OP_ADDL:
1274 tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1);
1275 gen_op_st_rm_T0_A0(s1, ot, d);
1276 gen_op_update2_cc();
1277 set_cc_op(s1, CC_OP_ADDB + ot);
1278 break;
1279 case OP_SUBL:
1280 tcg_gen_mov_tl(cpu_cc_srcT, cpu_T0);
1281 tcg_gen_sub_tl(cpu_T0, cpu_T0, cpu_T1);
1282 gen_op_st_rm_T0_A0(s1, ot, d);
1283 gen_op_update2_cc();
1284 set_cc_op(s1, CC_OP_SUBB + ot);
1285 break;
1286 default:
1287 case OP_ANDL:
1288 tcg_gen_and_tl(cpu_T0, cpu_T0, cpu_T1);
1289 gen_op_st_rm_T0_A0(s1, ot, d);
1290 gen_op_update1_cc();
1291 set_cc_op(s1, CC_OP_LOGICB + ot);
1292 break;
1293 case OP_ORL:
1294 tcg_gen_or_tl(cpu_T0, cpu_T0, cpu_T1);
1295 gen_op_st_rm_T0_A0(s1, ot, d);
1296 gen_op_update1_cc();
1297 set_cc_op(s1, CC_OP_LOGICB + ot);
1298 break;
1299 case OP_XORL:
1300 tcg_gen_xor_tl(cpu_T0, cpu_T0, cpu_T1);
1301 gen_op_st_rm_T0_A0(s1, ot, d);
1302 gen_op_update1_cc();
1303 set_cc_op(s1, CC_OP_LOGICB + ot);
1304 break;
1305 case OP_CMPL:
1306 tcg_gen_mov_tl(cpu_cc_src, cpu_T1);
1307 tcg_gen_mov_tl(cpu_cc_srcT, cpu_T0);
1308 tcg_gen_sub_tl(cpu_cc_dst, cpu_T0, cpu_T1);
1309 set_cc_op(s1, CC_OP_SUBB + ot);
1310 break;
1314 /* if d == OR_TMP0, it means memory operand (address in A0) */
1315 static void gen_inc(DisasContext *s1, TCGMemOp ot, int d, int c)
1317 if (d != OR_TMP0) {
1318 gen_op_mov_v_reg(ot, cpu_T0, d);
1319 } else {
1320 gen_op_ld_v(s1, ot, cpu_T0, cpu_A0);
1322 gen_compute_eflags_c(s1, cpu_cc_src);
1323 if (c > 0) {
1324 tcg_gen_addi_tl(cpu_T0, cpu_T0, 1);
1325 set_cc_op(s1, CC_OP_INCB + ot);
1326 } else {
1327 tcg_gen_addi_tl(cpu_T0, cpu_T0, -1);
1328 set_cc_op(s1, CC_OP_DECB + ot);
1330 gen_op_st_rm_T0_A0(s1, ot, d);
1331 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
1334 static void gen_shift_flags(DisasContext *s, TCGMemOp ot, TCGv result,
1335 TCGv shm1, TCGv count, bool is_right)
1337 TCGv_i32 z32, s32, oldop;
1338 TCGv z_tl;
1340 /* Store the results into the CC variables. If we know that the
1341 variable must be dead, store unconditionally. Otherwise we'll
1342 need to not disrupt the current contents. */
1343 z_tl = tcg_const_tl(0);
1344 if (cc_op_live[s->cc_op] & USES_CC_DST) {
1345 tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_dst, count, z_tl,
1346 result, cpu_cc_dst);
1347 } else {
1348 tcg_gen_mov_tl(cpu_cc_dst, result);
1350 if (cc_op_live[s->cc_op] & USES_CC_SRC) {
1351 tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_src, count, z_tl,
1352 shm1, cpu_cc_src);
1353 } else {
1354 tcg_gen_mov_tl(cpu_cc_src, shm1);
1356 tcg_temp_free(z_tl);
1358 /* Get the two potential CC_OP values into temporaries. */
1359 tcg_gen_movi_i32(cpu_tmp2_i32, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
1360 if (s->cc_op == CC_OP_DYNAMIC) {
1361 oldop = cpu_cc_op;
1362 } else {
1363 tcg_gen_movi_i32(cpu_tmp3_i32, s->cc_op);
1364 oldop = cpu_tmp3_i32;
1367 /* Conditionally store the CC_OP value. */
1368 z32 = tcg_const_i32(0);
1369 s32 = tcg_temp_new_i32();
1370 tcg_gen_trunc_tl_i32(s32, count);
1371 tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, s32, z32, cpu_tmp2_i32, oldop);
1372 tcg_temp_free_i32(z32);
1373 tcg_temp_free_i32(s32);
1375 /* The CC_OP value is no longer predictable. */
1376 set_cc_op(s, CC_OP_DYNAMIC);
1379 static void gen_shift_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
1380 int is_right, int is_arith)
1382 target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
1384 /* load */
1385 if (op1 == OR_TMP0) {
1386 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
1387 } else {
1388 gen_op_mov_v_reg(ot, cpu_T0, op1);
1391 tcg_gen_andi_tl(cpu_T1, cpu_T1, mask);
1392 tcg_gen_subi_tl(cpu_tmp0, cpu_T1, 1);
1394 if (is_right) {
1395 if (is_arith) {
1396 gen_exts(ot, cpu_T0);
1397 tcg_gen_sar_tl(cpu_tmp0, cpu_T0, cpu_tmp0);
1398 tcg_gen_sar_tl(cpu_T0, cpu_T0, cpu_T1);
1399 } else {
1400 gen_extu(ot, cpu_T0);
1401 tcg_gen_shr_tl(cpu_tmp0, cpu_T0, cpu_tmp0);
1402 tcg_gen_shr_tl(cpu_T0, cpu_T0, cpu_T1);
1404 } else {
1405 tcg_gen_shl_tl(cpu_tmp0, cpu_T0, cpu_tmp0);
1406 tcg_gen_shl_tl(cpu_T0, cpu_T0, cpu_T1);
1409 /* store */
1410 gen_op_st_rm_T0_A0(s, ot, op1);
1412 gen_shift_flags(s, ot, cpu_T0, cpu_tmp0, cpu_T1, is_right);
1415 static void gen_shift_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2,
1416 int is_right, int is_arith)
1418 int mask = (ot == MO_64 ? 0x3f : 0x1f);
1420 /* load */
1421 if (op1 == OR_TMP0)
1422 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
1423 else
1424 gen_op_mov_v_reg(ot, cpu_T0, op1);
1426 op2 &= mask;
1427 if (op2 != 0) {
1428 if (is_right) {
1429 if (is_arith) {
1430 gen_exts(ot, cpu_T0);
1431 tcg_gen_sari_tl(cpu_tmp4, cpu_T0, op2 - 1);
1432 tcg_gen_sari_tl(cpu_T0, cpu_T0, op2);
1433 } else {
1434 gen_extu(ot, cpu_T0);
1435 tcg_gen_shri_tl(cpu_tmp4, cpu_T0, op2 - 1);
1436 tcg_gen_shri_tl(cpu_T0, cpu_T0, op2);
1438 } else {
1439 tcg_gen_shli_tl(cpu_tmp4, cpu_T0, op2 - 1);
1440 tcg_gen_shli_tl(cpu_T0, cpu_T0, op2);
1444 /* store */
1445 gen_op_st_rm_T0_A0(s, ot, op1);
1447 /* update eflags if non zero shift */
1448 if (op2 != 0) {
1449 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
1450 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
1451 set_cc_op(s, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
1455 static void gen_rot_rm_T1(DisasContext *s, TCGMemOp ot, int op1, int is_right)
1457 target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
1458 TCGv_i32 t0, t1;
1460 /* load */
1461 if (op1 == OR_TMP0) {
1462 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
1463 } else {
1464 gen_op_mov_v_reg(ot, cpu_T0, op1);
1467 tcg_gen_andi_tl(cpu_T1, cpu_T1, mask);
1469 switch (ot) {
1470 case MO_8:
1471 /* Replicate the 8-bit input so that a 32-bit rotate works. */
1472 tcg_gen_ext8u_tl(cpu_T0, cpu_T0);
1473 tcg_gen_muli_tl(cpu_T0, cpu_T0, 0x01010101);
1474 goto do_long;
1475 case MO_16:
1476 /* Replicate the 16-bit input so that a 32-bit rotate works. */
1477 tcg_gen_deposit_tl(cpu_T0, cpu_T0, cpu_T0, 16, 16);
1478 goto do_long;
1479 do_long:
1480 #ifdef TARGET_X86_64
1481 case MO_32:
1482 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
1483 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1);
1484 if (is_right) {
1485 tcg_gen_rotr_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
1486 } else {
1487 tcg_gen_rotl_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
1489 tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);
1490 break;
1491 #endif
1492 default:
1493 if (is_right) {
1494 tcg_gen_rotr_tl(cpu_T0, cpu_T0, cpu_T1);
1495 } else {
1496 tcg_gen_rotl_tl(cpu_T0, cpu_T0, cpu_T1);
1498 break;
1501 /* store */
1502 gen_op_st_rm_T0_A0(s, ot, op1);
1504 /* We'll need the flags computed into CC_SRC. */
1505 gen_compute_eflags(s);
1507 /* The value that was "rotated out" is now present at the other end
1508 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1509 since we've computed the flags into CC_SRC, these variables are
1510 currently dead. */
1511 if (is_right) {
1512 tcg_gen_shri_tl(cpu_cc_src2, cpu_T0, mask - 1);
1513 tcg_gen_shri_tl(cpu_cc_dst, cpu_T0, mask);
1514 tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
1515 } else {
1516 tcg_gen_shri_tl(cpu_cc_src2, cpu_T0, mask);
1517 tcg_gen_andi_tl(cpu_cc_dst, cpu_T0, 1);
1519 tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1520 tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1522 /* Now conditionally store the new CC_OP value. If the shift count
1523 is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live.
1524 Otherwise reuse CC_OP_ADCOX which have the C and O flags split out
1525 exactly as we computed above. */
1526 t0 = tcg_const_i32(0);
1527 t1 = tcg_temp_new_i32();
1528 tcg_gen_trunc_tl_i32(t1, cpu_T1);
1529 tcg_gen_movi_i32(cpu_tmp2_i32, CC_OP_ADCOX);
1530 tcg_gen_movi_i32(cpu_tmp3_i32, CC_OP_EFLAGS);
1531 tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, t1, t0,
1532 cpu_tmp2_i32, cpu_tmp3_i32);
1533 tcg_temp_free_i32(t0);
1534 tcg_temp_free_i32(t1);
1536 /* The CC_OP value is no longer predictable. */
1537 set_cc_op(s, CC_OP_DYNAMIC);
1540 static void gen_rot_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2,
1541 int is_right)
1543 int mask = (ot == MO_64 ? 0x3f : 0x1f);
1544 int shift;
1546 /* load */
1547 if (op1 == OR_TMP0) {
1548 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
1549 } else {
1550 gen_op_mov_v_reg(ot, cpu_T0, op1);
1553 op2 &= mask;
1554 if (op2 != 0) {
1555 switch (ot) {
1556 #ifdef TARGET_X86_64
1557 case MO_32:
1558 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
1559 if (is_right) {
1560 tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, op2);
1561 } else {
1562 tcg_gen_rotli_i32(cpu_tmp2_i32, cpu_tmp2_i32, op2);
1564 tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);
1565 break;
1566 #endif
1567 default:
1568 if (is_right) {
1569 tcg_gen_rotri_tl(cpu_T0, cpu_T0, op2);
1570 } else {
1571 tcg_gen_rotli_tl(cpu_T0, cpu_T0, op2);
1573 break;
1574 case MO_8:
1575 mask = 7;
1576 goto do_shifts;
1577 case MO_16:
1578 mask = 15;
1579 do_shifts:
1580 shift = op2 & mask;
1581 if (is_right) {
1582 shift = mask + 1 - shift;
1584 gen_extu(ot, cpu_T0);
1585 tcg_gen_shli_tl(cpu_tmp0, cpu_T0, shift);
1586 tcg_gen_shri_tl(cpu_T0, cpu_T0, mask + 1 - shift);
1587 tcg_gen_or_tl(cpu_T0, cpu_T0, cpu_tmp0);
1588 break;
1592 /* store */
1593 gen_op_st_rm_T0_A0(s, ot, op1);
1595 if (op2 != 0) {
1596 /* Compute the flags into CC_SRC. */
1597 gen_compute_eflags(s);
1599 /* The value that was "rotated out" is now present at the other end
1600 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1601 since we've computed the flags into CC_SRC, these variables are
1602 currently dead. */
1603 if (is_right) {
1604 tcg_gen_shri_tl(cpu_cc_src2, cpu_T0, mask - 1);
1605 tcg_gen_shri_tl(cpu_cc_dst, cpu_T0, mask);
1606 tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
1607 } else {
1608 tcg_gen_shri_tl(cpu_cc_src2, cpu_T0, mask);
1609 tcg_gen_andi_tl(cpu_cc_dst, cpu_T0, 1);
1611 tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1612 tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1613 set_cc_op(s, CC_OP_ADCOX);
1617 /* XXX: add faster immediate = 1 case */
1618 static void gen_rotc_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
1619 int is_right)
1621 gen_compute_eflags(s);
1622 assert(s->cc_op == CC_OP_EFLAGS);
1624 /* load */
1625 if (op1 == OR_TMP0)
1626 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
1627 else
1628 gen_op_mov_v_reg(ot, cpu_T0, op1);
1630 if (is_right) {
1631 switch (ot) {
1632 case MO_8:
1633 gen_helper_rcrb(cpu_T0, cpu_env, cpu_T0, cpu_T1);
1634 break;
1635 case MO_16:
1636 gen_helper_rcrw(cpu_T0, cpu_env, cpu_T0, cpu_T1);
1637 break;
1638 case MO_32:
1639 gen_helper_rcrl(cpu_T0, cpu_env, cpu_T0, cpu_T1);
1640 break;
1641 #ifdef TARGET_X86_64
1642 case MO_64:
1643 gen_helper_rcrq(cpu_T0, cpu_env, cpu_T0, cpu_T1);
1644 break;
1645 #endif
1646 default:
1647 tcg_abort();
1649 } else {
1650 switch (ot) {
1651 case MO_8:
1652 gen_helper_rclb(cpu_T0, cpu_env, cpu_T0, cpu_T1);
1653 break;
1654 case MO_16:
1655 gen_helper_rclw(cpu_T0, cpu_env, cpu_T0, cpu_T1);
1656 break;
1657 case MO_32:
1658 gen_helper_rcll(cpu_T0, cpu_env, cpu_T0, cpu_T1);
1659 break;
1660 #ifdef TARGET_X86_64
1661 case MO_64:
1662 gen_helper_rclq(cpu_T0, cpu_env, cpu_T0, cpu_T1);
1663 break;
1664 #endif
1665 default:
1666 tcg_abort();
1669 /* store */
1670 gen_op_st_rm_T0_A0(s, ot, op1);
1673 /* XXX: add faster immediate case */
1674 static void gen_shiftd_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
1675 bool is_right, TCGv count_in)
1677 target_ulong mask = (ot == MO_64 ? 63 : 31);
1678 TCGv count;
1680 /* load */
1681 if (op1 == OR_TMP0) {
1682 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
1683 } else {
1684 gen_op_mov_v_reg(ot, cpu_T0, op1);
1687 count = tcg_temp_new();
1688 tcg_gen_andi_tl(count, count_in, mask);
1690 switch (ot) {
1691 case MO_16:
1692 /* Note: we implement the Intel behaviour for shift count > 16.
1693 This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A
1694 portion by constructing it as a 32-bit value. */
1695 if (is_right) {
1696 tcg_gen_deposit_tl(cpu_tmp0, cpu_T0, cpu_T1, 16, 16);
1697 tcg_gen_mov_tl(cpu_T1, cpu_T0);
1698 tcg_gen_mov_tl(cpu_T0, cpu_tmp0);
1699 } else {
1700 tcg_gen_deposit_tl(cpu_T1, cpu_T0, cpu_T1, 16, 16);
1702 /* FALLTHRU */
1703 #ifdef TARGET_X86_64
1704 case MO_32:
1705 /* Concatenate the two 32-bit values and use a 64-bit shift. */
1706 tcg_gen_subi_tl(cpu_tmp0, count, 1);
1707 if (is_right) {
1708 tcg_gen_concat_tl_i64(cpu_T0, cpu_T0, cpu_T1);
1709 tcg_gen_shr_i64(cpu_tmp0, cpu_T0, cpu_tmp0);
1710 tcg_gen_shr_i64(cpu_T0, cpu_T0, count);
1711 } else {
1712 tcg_gen_concat_tl_i64(cpu_T0, cpu_T1, cpu_T0);
1713 tcg_gen_shl_i64(cpu_tmp0, cpu_T0, cpu_tmp0);
1714 tcg_gen_shl_i64(cpu_T0, cpu_T0, count);
1715 tcg_gen_shri_i64(cpu_tmp0, cpu_tmp0, 32);
1716 tcg_gen_shri_i64(cpu_T0, cpu_T0, 32);
1718 break;
1719 #endif
1720 default:
1721 tcg_gen_subi_tl(cpu_tmp0, count, 1);
1722 if (is_right) {
1723 tcg_gen_shr_tl(cpu_tmp0, cpu_T0, cpu_tmp0);
1725 tcg_gen_subfi_tl(cpu_tmp4, mask + 1, count);
1726 tcg_gen_shr_tl(cpu_T0, cpu_T0, count);
1727 tcg_gen_shl_tl(cpu_T1, cpu_T1, cpu_tmp4);
1728 } else {
1729 tcg_gen_shl_tl(cpu_tmp0, cpu_T0, cpu_tmp0);
1730 if (ot == MO_16) {
1731 /* Only needed if count > 16, for Intel behaviour. */
1732 tcg_gen_subfi_tl(cpu_tmp4, 33, count);
1733 tcg_gen_shr_tl(cpu_tmp4, cpu_T1, cpu_tmp4);
1734 tcg_gen_or_tl(cpu_tmp0, cpu_tmp0, cpu_tmp4);
1737 tcg_gen_subfi_tl(cpu_tmp4, mask + 1, count);
1738 tcg_gen_shl_tl(cpu_T0, cpu_T0, count);
1739 tcg_gen_shr_tl(cpu_T1, cpu_T1, cpu_tmp4);
1741 tcg_gen_movi_tl(cpu_tmp4, 0);
1742 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_T1, count, cpu_tmp4,
1743 cpu_tmp4, cpu_T1);
1744 tcg_gen_or_tl(cpu_T0, cpu_T0, cpu_T1);
1745 break;
1748 /* store */
1749 gen_op_st_rm_T0_A0(s, ot, op1);
1751 gen_shift_flags(s, ot, cpu_T0, cpu_tmp0, count, is_right);
1752 tcg_temp_free(count);
1755 static void gen_shift(DisasContext *s1, int op, TCGMemOp ot, int d, int s)
1757 if (s != OR_TMP1)
1758 gen_op_mov_v_reg(ot, cpu_T1, s);
1759 switch(op) {
1760 case OP_ROL:
1761 gen_rot_rm_T1(s1, ot, d, 0);
1762 break;
1763 case OP_ROR:
1764 gen_rot_rm_T1(s1, ot, d, 1);
1765 break;
1766 case OP_SHL:
1767 case OP_SHL1:
1768 gen_shift_rm_T1(s1, ot, d, 0, 0);
1769 break;
1770 case OP_SHR:
1771 gen_shift_rm_T1(s1, ot, d, 1, 0);
1772 break;
1773 case OP_SAR:
1774 gen_shift_rm_T1(s1, ot, d, 1, 1);
1775 break;
1776 case OP_RCL:
1777 gen_rotc_rm_T1(s1, ot, d, 0);
1778 break;
1779 case OP_RCR:
1780 gen_rotc_rm_T1(s1, ot, d, 1);
1781 break;
1785 static void gen_shifti(DisasContext *s1, int op, TCGMemOp ot, int d, int c)
1787 switch(op) {
1788 case OP_ROL:
1789 gen_rot_rm_im(s1, ot, d, c, 0);
1790 break;
1791 case OP_ROR:
1792 gen_rot_rm_im(s1, ot, d, c, 1);
1793 break;
1794 case OP_SHL:
1795 case OP_SHL1:
1796 gen_shift_rm_im(s1, ot, d, c, 0, 0);
1797 break;
1798 case OP_SHR:
1799 gen_shift_rm_im(s1, ot, d, c, 1, 0);
1800 break;
1801 case OP_SAR:
1802 gen_shift_rm_im(s1, ot, d, c, 1, 1);
1803 break;
1804 default:
1805 /* currently not optimized */
1806 tcg_gen_movi_tl(cpu_T1, c);
1807 gen_shift(s1, op, ot, d, OR_TMP1);
1808 break;
1812 /* Decompose an address. */
1814 typedef struct AddressParts {
1815 int def_seg;
1816 int base;
1817 int index;
1818 int scale;
1819 target_long disp;
1820 } AddressParts;
1822 static AddressParts gen_lea_modrm_0(CPUX86State *env, DisasContext *s,
1823 int modrm)
1825 int def_seg, base, index, scale, mod, rm;
1826 target_long disp;
1827 bool havesib;
1829 def_seg = R_DS;
1830 index = -1;
1831 scale = 0;
1832 disp = 0;
1834 mod = (modrm >> 6) & 3;
1835 rm = modrm & 7;
1836 base = rm | REX_B(s);
1838 if (mod == 3) {
1839 /* Normally filtered out earlier, but including this path
1840 simplifies multi-byte nop, as well as bndcl, bndcu, bndcn. */
1841 goto done;
1844 switch (s->aflag) {
1845 case MO_64:
1846 case MO_32:
1847 havesib = 0;
1848 if (rm == 4) {
1849 int code = cpu_ldub_code(env, s->pc++);
1850 scale = (code >> 6) & 3;
1851 index = ((code >> 3) & 7) | REX_X(s);
1852 if (index == 4) {
1853 index = -1; /* no index */
1855 base = (code & 7) | REX_B(s);
1856 havesib = 1;
1859 switch (mod) {
1860 case 0:
1861 if ((base & 7) == 5) {
1862 base = -1;
1863 disp = (int32_t)cpu_ldl_code(env, s->pc);
1864 s->pc += 4;
1865 if (CODE64(s) && !havesib) {
1866 base = -2;
1867 disp += s->pc + s->rip_offset;
1870 break;
1871 case 1:
1872 disp = (int8_t)cpu_ldub_code(env, s->pc++);
1873 break;
1874 default:
1875 case 2:
1876 disp = (int32_t)cpu_ldl_code(env, s->pc);
1877 s->pc += 4;
1878 break;
1881 /* For correct popl handling with esp. */
1882 if (base == R_ESP && s->popl_esp_hack) {
1883 disp += s->popl_esp_hack;
1885 if (base == R_EBP || base == R_ESP) {
1886 def_seg = R_SS;
1888 break;
1890 case MO_16:
1891 if (mod == 0) {
1892 if (rm == 6) {
1893 base = -1;
1894 disp = cpu_lduw_code(env, s->pc);
1895 s->pc += 2;
1896 break;
1898 } else if (mod == 1) {
1899 disp = (int8_t)cpu_ldub_code(env, s->pc++);
1900 } else {
1901 disp = (int16_t)cpu_lduw_code(env, s->pc);
1902 s->pc += 2;
1905 switch (rm) {
1906 case 0:
1907 base = R_EBX;
1908 index = R_ESI;
1909 break;
1910 case 1:
1911 base = R_EBX;
1912 index = R_EDI;
1913 break;
1914 case 2:
1915 base = R_EBP;
1916 index = R_ESI;
1917 def_seg = R_SS;
1918 break;
1919 case 3:
1920 base = R_EBP;
1921 index = R_EDI;
1922 def_seg = R_SS;
1923 break;
1924 case 4:
1925 base = R_ESI;
1926 break;
1927 case 5:
1928 base = R_EDI;
1929 break;
1930 case 6:
1931 base = R_EBP;
1932 def_seg = R_SS;
1933 break;
1934 default:
1935 case 7:
1936 base = R_EBX;
1937 break;
1939 break;
1941 default:
1942 tcg_abort();
1945 done:
1946 return (AddressParts){ def_seg, base, index, scale, disp };
1949 /* Compute the address, with a minimum number of TCG ops. */
1950 static TCGv gen_lea_modrm_1(AddressParts a)
1952 TCGv ea;
1954 TCGV_UNUSED(ea);
1955 if (a.index >= 0) {
1956 if (a.scale == 0) {
1957 ea = cpu_regs[a.index];
1958 } else {
1959 tcg_gen_shli_tl(cpu_A0, cpu_regs[a.index], a.scale);
1960 ea = cpu_A0;
1962 if (a.base >= 0) {
1963 tcg_gen_add_tl(cpu_A0, ea, cpu_regs[a.base]);
1964 ea = cpu_A0;
1966 } else if (a.base >= 0) {
1967 ea = cpu_regs[a.base];
1969 if (TCGV_IS_UNUSED(ea)) {
1970 tcg_gen_movi_tl(cpu_A0, a.disp);
1971 ea = cpu_A0;
1972 } else if (a.disp != 0) {
1973 tcg_gen_addi_tl(cpu_A0, ea, a.disp);
1974 ea = cpu_A0;
1977 return ea;
1980 static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm)
1982 AddressParts a = gen_lea_modrm_0(env, s, modrm);
1983 TCGv ea = gen_lea_modrm_1(a);
1984 gen_lea_v_seg(s, s->aflag, ea, a.def_seg, s->override);
1987 static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm)
1989 (void)gen_lea_modrm_0(env, s, modrm);
1992 /* Used for BNDCL, BNDCU, BNDCN. */
1993 static void gen_bndck(CPUX86State *env, DisasContext *s, int modrm,
1994 TCGCond cond, TCGv_i64 bndv)
1996 TCGv ea = gen_lea_modrm_1(gen_lea_modrm_0(env, s, modrm));
1998 tcg_gen_extu_tl_i64(cpu_tmp1_i64, ea);
1999 if (!CODE64(s)) {
2000 tcg_gen_ext32u_i64(cpu_tmp1_i64, cpu_tmp1_i64);
2002 tcg_gen_setcond_i64(cond, cpu_tmp1_i64, cpu_tmp1_i64, bndv);
2003 tcg_gen_extrl_i64_i32(cpu_tmp2_i32, cpu_tmp1_i64);
2004 gen_helper_bndck(cpu_env, cpu_tmp2_i32);
2007 /* used for LEA and MOV AX, mem */
2008 static void gen_add_A0_ds_seg(DisasContext *s)
2010 gen_lea_v_seg(s, s->aflag, cpu_A0, R_DS, s->override);
2013 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2014 OR_TMP0 */
2015 static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm,
2016 TCGMemOp ot, int reg, int is_store)
2018 int mod, rm;
2020 mod = (modrm >> 6) & 3;
2021 rm = (modrm & 7) | REX_B(s);
2022 if (mod == 3) {
2023 if (is_store) {
2024 if (reg != OR_TMP0)
2025 gen_op_mov_v_reg(ot, cpu_T0, reg);
2026 gen_op_mov_reg_v(ot, rm, cpu_T0);
2027 } else {
2028 gen_op_mov_v_reg(ot, cpu_T0, rm);
2029 if (reg != OR_TMP0)
2030 gen_op_mov_reg_v(ot, reg, cpu_T0);
2032 } else {
2033 gen_lea_modrm(env, s, modrm);
2034 if (is_store) {
2035 if (reg != OR_TMP0)
2036 gen_op_mov_v_reg(ot, cpu_T0, reg);
2037 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
2038 } else {
2039 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
2040 if (reg != OR_TMP0)
2041 gen_op_mov_reg_v(ot, reg, cpu_T0);
2046 static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, TCGMemOp ot)
2048 uint32_t ret;
2050 switch (ot) {
2051 case MO_8:
2052 ret = cpu_ldub_code(env, s->pc);
2053 s->pc++;
2054 break;
2055 case MO_16:
2056 ret = cpu_lduw_code(env, s->pc);
2057 s->pc += 2;
2058 break;
2059 case MO_32:
2060 #ifdef TARGET_X86_64
2061 case MO_64:
2062 #endif
2063 ret = cpu_ldl_code(env, s->pc);
2064 s->pc += 4;
2065 break;
2066 default:
2067 tcg_abort();
2069 return ret;
2072 static inline int insn_const_size(TCGMemOp ot)
2074 if (ot <= MO_32) {
2075 return 1 << ot;
2076 } else {
2077 return 4;
2081 static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip)
2083 TranslationBlock *tb;
2084 target_ulong pc;
2086 pc = s->cs_base + eip;
2087 tb = s->tb;
2088 /* NOTE: we handle the case where the TB spans two pages here */
2089 if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) ||
2090 (pc & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK)) {
2091 /* jump to same page: we can use a direct jump */
2092 tcg_gen_goto_tb(tb_num);
2093 gen_jmp_im(eip);
2094 tcg_gen_exit_tb((uintptr_t)tb + tb_num);
2095 } else {
2096 /* jump to another page: currently not optimized */
2097 gen_jmp_im(eip);
2098 gen_eob(s);
2102 static inline void gen_jcc(DisasContext *s, int b,
2103 target_ulong val, target_ulong next_eip)
2105 TCGLabel *l1, *l2;
2107 if (s->jmp_opt) {
2108 l1 = gen_new_label();
2109 gen_jcc1(s, b, l1);
2111 gen_goto_tb(s, 0, next_eip);
2113 gen_set_label(l1);
2114 gen_goto_tb(s, 1, val);
2115 s->is_jmp = DISAS_TB_JUMP;
2116 } else {
2117 l1 = gen_new_label();
2118 l2 = gen_new_label();
2119 gen_jcc1(s, b, l1);
2121 gen_jmp_im(next_eip);
2122 tcg_gen_br(l2);
2124 gen_set_label(l1);
2125 gen_jmp_im(val);
2126 gen_set_label(l2);
2127 gen_eob(s);
2131 static void gen_cmovcc1(CPUX86State *env, DisasContext *s, TCGMemOp ot, int b,
2132 int modrm, int reg)
2134 CCPrepare cc;
2136 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
2138 cc = gen_prepare_cc(s, b, cpu_T1);
2139 if (cc.mask != -1) {
2140 TCGv t0 = tcg_temp_new();
2141 tcg_gen_andi_tl(t0, cc.reg, cc.mask);
2142 cc.reg = t0;
2144 if (!cc.use_reg2) {
2145 cc.reg2 = tcg_const_tl(cc.imm);
2148 tcg_gen_movcond_tl(cc.cond, cpu_T0, cc.reg, cc.reg2,
2149 cpu_T0, cpu_regs[reg]);
2150 gen_op_mov_reg_v(ot, reg, cpu_T0);
2152 if (cc.mask != -1) {
2153 tcg_temp_free(cc.reg);
2155 if (!cc.use_reg2) {
2156 tcg_temp_free(cc.reg2);
2160 static inline void gen_op_movl_T0_seg(int seg_reg)
2162 tcg_gen_ld32u_tl(cpu_T0, cpu_env,
2163 offsetof(CPUX86State,segs[seg_reg].selector));
2166 static inline void gen_op_movl_seg_T0_vm(int seg_reg)
2168 tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
2169 tcg_gen_st32_tl(cpu_T0, cpu_env,
2170 offsetof(CPUX86State,segs[seg_reg].selector));
2171 tcg_gen_shli_tl(cpu_seg_base[seg_reg], cpu_T0, 4);
2174 /* move T0 to seg_reg and compute if the CPU state may change. Never
2175 call this function with seg_reg == R_CS */
2176 static void gen_movl_seg_T0(DisasContext *s, int seg_reg)
2178 if (s->pe && !s->vm86) {
2179 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
2180 gen_helper_load_seg(cpu_env, tcg_const_i32(seg_reg), cpu_tmp2_i32);
2181 /* abort translation because the addseg value may change or
2182 because ss32 may change. For R_SS, translation must always
2183 stop as a special handling must be done to disable hardware
2184 interrupts for the next instruction */
2185 if (seg_reg == R_SS || (s->code32 && seg_reg < R_FS))
2186 s->is_jmp = DISAS_TB_JUMP;
2187 } else {
2188 gen_op_movl_seg_T0_vm(seg_reg);
2189 if (seg_reg == R_SS)
2190 s->is_jmp = DISAS_TB_JUMP;
2194 static inline int svm_is_rep(int prefixes)
2196 return ((prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) ? 8 : 0);
2199 static inline void
2200 gen_svm_check_intercept_param(DisasContext *s, target_ulong pc_start,
2201 uint32_t type, uint64_t param)
2203 /* no SVM activated; fast case */
2204 if (likely(!(s->flags & HF_SVMI_MASK)))
2205 return;
2206 gen_update_cc_op(s);
2207 gen_jmp_im(pc_start - s->cs_base);
2208 gen_helper_svm_check_intercept_param(cpu_env, tcg_const_i32(type),
2209 tcg_const_i64(param));
2212 static inline void
2213 gen_svm_check_intercept(DisasContext *s, target_ulong pc_start, uint64_t type)
2215 gen_svm_check_intercept_param(s, pc_start, type, 0);
2218 static inline void gen_stack_update(DisasContext *s, int addend)
2220 gen_op_add_reg_im(mo_stacksize(s), R_ESP, addend);
2223 /* Generate a push. It depends on ss32, addseg and dflag. */
2224 static void gen_push_v(DisasContext *s, TCGv val)
2226 TCGMemOp d_ot = mo_pushpop(s, s->dflag);
2227 TCGMemOp a_ot = mo_stacksize(s);
2228 int size = 1 << d_ot;
2229 TCGv new_esp = cpu_A0;
2231 tcg_gen_subi_tl(cpu_A0, cpu_regs[R_ESP], size);
2233 if (!CODE64(s)) {
2234 if (s->addseg) {
2235 new_esp = cpu_tmp4;
2236 tcg_gen_mov_tl(new_esp, cpu_A0);
2238 gen_lea_v_seg(s, a_ot, cpu_A0, R_SS, -1);
2241 gen_op_st_v(s, d_ot, val, cpu_A0);
2242 gen_op_mov_reg_v(a_ot, R_ESP, new_esp);
2245 /* two step pop is necessary for precise exceptions */
2246 static TCGMemOp gen_pop_T0(DisasContext *s)
2248 TCGMemOp d_ot = mo_pushpop(s, s->dflag);
2250 gen_lea_v_seg(s, mo_stacksize(s), cpu_regs[R_ESP], R_SS, -1);
2251 gen_op_ld_v(s, d_ot, cpu_T0, cpu_A0);
2253 return d_ot;
2256 static inline void gen_pop_update(DisasContext *s, TCGMemOp ot)
2258 gen_stack_update(s, 1 << ot);
2261 static inline void gen_stack_A0(DisasContext *s)
2263 gen_lea_v_seg(s, s->ss32 ? MO_32 : MO_16, cpu_regs[R_ESP], R_SS, -1);
2266 static void gen_pusha(DisasContext *s)
2268 TCGMemOp s_ot = s->ss32 ? MO_32 : MO_16;
2269 TCGMemOp d_ot = s->dflag;
2270 int size = 1 << d_ot;
2271 int i;
2273 for (i = 0; i < 8; i++) {
2274 tcg_gen_addi_tl(cpu_A0, cpu_regs[R_ESP], (i - 8) * size);
2275 gen_lea_v_seg(s, s_ot, cpu_A0, R_SS, -1);
2276 gen_op_st_v(s, d_ot, cpu_regs[7 - i], cpu_A0);
2279 gen_stack_update(s, -8 * size);
2282 static void gen_popa(DisasContext *s)
2284 TCGMemOp s_ot = s->ss32 ? MO_32 : MO_16;
2285 TCGMemOp d_ot = s->dflag;
2286 int size = 1 << d_ot;
2287 int i;
2289 for (i = 0; i < 8; i++) {
2290 /* ESP is not reloaded */
2291 if (7 - i == R_ESP) {
2292 continue;
2294 tcg_gen_addi_tl(cpu_A0, cpu_regs[R_ESP], i * size);
2295 gen_lea_v_seg(s, s_ot, cpu_A0, R_SS, -1);
2296 gen_op_ld_v(s, d_ot, cpu_T0, cpu_A0);
2297 gen_op_mov_reg_v(d_ot, 7 - i, cpu_T0);
2300 gen_stack_update(s, 8 * size);
2303 static void gen_enter(DisasContext *s, int esp_addend, int level)
2305 TCGMemOp d_ot = mo_pushpop(s, s->dflag);
2306 TCGMemOp a_ot = CODE64(s) ? MO_64 : s->ss32 ? MO_32 : MO_16;
2307 int size = 1 << d_ot;
2309 /* Push BP; compute FrameTemp into T1. */
2310 tcg_gen_subi_tl(cpu_T1, cpu_regs[R_ESP], size);
2311 gen_lea_v_seg(s, a_ot, cpu_T1, R_SS, -1);
2312 gen_op_st_v(s, d_ot, cpu_regs[R_EBP], cpu_A0);
2314 level &= 31;
2315 if (level != 0) {
2316 int i;
2318 /* Copy level-1 pointers from the previous frame. */
2319 for (i = 1; i < level; ++i) {
2320 tcg_gen_subi_tl(cpu_A0, cpu_regs[R_EBP], size * i);
2321 gen_lea_v_seg(s, a_ot, cpu_A0, R_SS, -1);
2322 gen_op_ld_v(s, d_ot, cpu_tmp0, cpu_A0);
2324 tcg_gen_subi_tl(cpu_A0, cpu_T1, size * i);
2325 gen_lea_v_seg(s, a_ot, cpu_A0, R_SS, -1);
2326 gen_op_st_v(s, d_ot, cpu_tmp0, cpu_A0);
2329 /* Push the current FrameTemp as the last level. */
2330 tcg_gen_subi_tl(cpu_A0, cpu_T1, size * level);
2331 gen_lea_v_seg(s, a_ot, cpu_A0, R_SS, -1);
2332 gen_op_st_v(s, d_ot, cpu_T1, cpu_A0);
2335 /* Copy the FrameTemp value to EBP. */
2336 gen_op_mov_reg_v(a_ot, R_EBP, cpu_T1);
2338 /* Compute the final value of ESP. */
2339 tcg_gen_subi_tl(cpu_T1, cpu_T1, esp_addend + size * level);
2340 gen_op_mov_reg_v(a_ot, R_ESP, cpu_T1);
2343 static void gen_leave(DisasContext *s)
2345 TCGMemOp d_ot = mo_pushpop(s, s->dflag);
2346 TCGMemOp a_ot = mo_stacksize(s);
2348 gen_lea_v_seg(s, a_ot, cpu_regs[R_EBP], R_SS, -1);
2349 gen_op_ld_v(s, d_ot, cpu_T0, cpu_A0);
2351 tcg_gen_addi_tl(cpu_T1, cpu_regs[R_EBP], 1 << d_ot);
2353 gen_op_mov_reg_v(d_ot, R_EBP, cpu_T0);
2354 gen_op_mov_reg_v(a_ot, R_ESP, cpu_T1);
2357 static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip)
2359 gen_update_cc_op(s);
2360 gen_jmp_im(cur_eip);
2361 gen_helper_raise_exception(cpu_env, tcg_const_i32(trapno));
2362 s->is_jmp = DISAS_TB_JUMP;
2365 /* an interrupt is different from an exception because of the
2366 privilege checks */
2367 static void gen_interrupt(DisasContext *s, int intno,
2368 target_ulong cur_eip, target_ulong next_eip)
2370 gen_update_cc_op(s);
2371 gen_jmp_im(cur_eip);
2372 gen_helper_raise_interrupt(cpu_env, tcg_const_i32(intno),
2373 tcg_const_i32(next_eip - cur_eip));
2374 s->is_jmp = DISAS_TB_JUMP;
2377 static void gen_debug(DisasContext *s, target_ulong cur_eip)
2379 gen_update_cc_op(s);
2380 gen_jmp_im(cur_eip);
2381 gen_helper_debug(cpu_env);
2382 s->is_jmp = DISAS_TB_JUMP;
2385 static void gen_set_hflag(DisasContext *s, uint32_t mask)
2387 if ((s->flags & mask) == 0) {
2388 TCGv_i32 t = tcg_temp_new_i32();
2389 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUX86State, hflags));
2390 tcg_gen_ori_i32(t, t, mask);
2391 tcg_gen_st_i32(t, cpu_env, offsetof(CPUX86State, hflags));
2392 tcg_temp_free_i32(t);
2393 s->flags |= mask;
2397 static void gen_reset_hflag(DisasContext *s, uint32_t mask)
2399 if (s->flags & mask) {
2400 TCGv_i32 t = tcg_temp_new_i32();
2401 tcg_gen_ld_i32(t, cpu_env, offsetof(CPUX86State, hflags));
2402 tcg_gen_andi_i32(t, t, ~mask);
2403 tcg_gen_st_i32(t, cpu_env, offsetof(CPUX86State, hflags));
2404 tcg_temp_free_i32(t);
2405 s->flags &= ~mask;
2409 /* Clear BND registers during legacy branches. */
2410 static void gen_bnd_jmp(DisasContext *s)
2412 /* Do nothing if BND prefix present, MPX is disabled, or if the
2413 BNDREGs are known to be in INIT state already. The helper
2414 itself will check BNDPRESERVE at runtime. */
2415 if ((s->prefix & PREFIX_REPNZ) == 0
2416 && (s->flags & HF_MPX_EN_MASK) == 0
2417 && (s->flags & HF_MPX_IU_MASK) == 0) {
2418 gen_helper_bnd_jmp(cpu_env);
2422 /* generate a generic end of block. Trace exception is also generated
2423 if needed */
2424 static void gen_eob(DisasContext *s)
2426 gen_update_cc_op(s);
2427 gen_reset_hflag(s, HF_INHIBIT_IRQ_MASK);
2428 if (s->tb->flags & HF_RF_MASK) {
2429 gen_helper_reset_rf(cpu_env);
2431 if (s->singlestep_enabled) {
2432 gen_helper_debug(cpu_env);
2433 } else if (s->tf) {
2434 gen_helper_single_step(cpu_env);
2435 } else {
2436 tcg_gen_exit_tb(0);
2438 s->is_jmp = DISAS_TB_JUMP;
2441 /* generate a jump to eip. No segment change must happen before as a
2442 direct call to the next block may occur */
2443 static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num)
2445 gen_update_cc_op(s);
2446 set_cc_op(s, CC_OP_DYNAMIC);
2447 if (s->jmp_opt) {
2448 gen_goto_tb(s, tb_num, eip);
2449 s->is_jmp = DISAS_TB_JUMP;
2450 } else {
2451 gen_jmp_im(eip);
2452 gen_eob(s);
2456 static void gen_jmp(DisasContext *s, target_ulong eip)
2458 gen_jmp_tb(s, eip, 0);
2461 static inline void gen_ldq_env_A0(DisasContext *s, int offset)
2463 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
2464 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset);
2467 static inline void gen_stq_env_A0(DisasContext *s, int offset)
2469 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset);
2470 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
2473 static inline void gen_ldo_env_A0(DisasContext *s, int offset)
2475 int mem_index = s->mem_index;
2476 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, mem_index, MO_LEQ);
2477 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(0)));
2478 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2479 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_tmp0, mem_index, MO_LEQ);
2480 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(1)));
2483 static inline void gen_sto_env_A0(DisasContext *s, int offset)
2485 int mem_index = s->mem_index;
2486 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(0)));
2487 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, mem_index, MO_LEQ);
2488 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2489 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(1)));
2490 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_tmp0, mem_index, MO_LEQ);
2493 static inline void gen_op_movo(int d_offset, int s_offset)
2495 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + offsetof(ZMMReg, ZMM_Q(0)));
2496 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + offsetof(ZMMReg, ZMM_Q(0)));
2497 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + offsetof(ZMMReg, ZMM_Q(1)));
2498 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + offsetof(ZMMReg, ZMM_Q(1)));
2501 static inline void gen_op_movq(int d_offset, int s_offset)
2503 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2504 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2507 static inline void gen_op_movl(int d_offset, int s_offset)
2509 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env, s_offset);
2510 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, d_offset);
2513 static inline void gen_op_movq_env_0(int d_offset)
2515 tcg_gen_movi_i64(cpu_tmp1_i64, 0);
2516 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2519 typedef void (*SSEFunc_i_ep)(TCGv_i32 val, TCGv_ptr env, TCGv_ptr reg);
2520 typedef void (*SSEFunc_l_ep)(TCGv_i64 val, TCGv_ptr env, TCGv_ptr reg);
2521 typedef void (*SSEFunc_0_epi)(TCGv_ptr env, TCGv_ptr reg, TCGv_i32 val);
2522 typedef void (*SSEFunc_0_epl)(TCGv_ptr env, TCGv_ptr reg, TCGv_i64 val);
2523 typedef void (*SSEFunc_0_epp)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b);
2524 typedef void (*SSEFunc_0_eppi)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
2525 TCGv_i32 val);
2526 typedef void (*SSEFunc_0_ppi)(TCGv_ptr reg_a, TCGv_ptr reg_b, TCGv_i32 val);
2527 typedef void (*SSEFunc_0_eppt)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
2528 TCGv val);
2530 #define SSE_SPECIAL ((void *)1)
2531 #define SSE_DUMMY ((void *)2)
2533 #define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
2534 #define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
2535 gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
2537 static const SSEFunc_0_epp sse_op_table1[256][4] = {
2538 /* 3DNow! extensions */
2539 [0x0e] = { SSE_DUMMY }, /* femms */
2540 [0x0f] = { SSE_DUMMY }, /* pf... */
2541 /* pure SSE operations */
2542 [0x10] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2543 [0x11] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2544 [0x12] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd, movsldup, movddup */
2545 [0x13] = { SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd */
2546 [0x14] = { gen_helper_punpckldq_xmm, gen_helper_punpcklqdq_xmm },
2547 [0x15] = { gen_helper_punpckhdq_xmm, gen_helper_punpckhqdq_xmm },
2548 [0x16] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd, movshdup */
2549 [0x17] = { SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd */
2551 [0x28] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2552 [0x29] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2553 [0x2a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
2554 [0x2b] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movntps, movntpd, movntss, movntsd */
2555 [0x2c] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
2556 [0x2d] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
2557 [0x2e] = { gen_helper_ucomiss, gen_helper_ucomisd },
2558 [0x2f] = { gen_helper_comiss, gen_helper_comisd },
2559 [0x50] = { SSE_SPECIAL, SSE_SPECIAL }, /* movmskps, movmskpd */
2560 [0x51] = SSE_FOP(sqrt),
2561 [0x52] = { gen_helper_rsqrtps, NULL, gen_helper_rsqrtss, NULL },
2562 [0x53] = { gen_helper_rcpps, NULL, gen_helper_rcpss, NULL },
2563 [0x54] = { gen_helper_pand_xmm, gen_helper_pand_xmm }, /* andps, andpd */
2564 [0x55] = { gen_helper_pandn_xmm, gen_helper_pandn_xmm }, /* andnps, andnpd */
2565 [0x56] = { gen_helper_por_xmm, gen_helper_por_xmm }, /* orps, orpd */
2566 [0x57] = { gen_helper_pxor_xmm, gen_helper_pxor_xmm }, /* xorps, xorpd */
2567 [0x58] = SSE_FOP(add),
2568 [0x59] = SSE_FOP(mul),
2569 [0x5a] = { gen_helper_cvtps2pd, gen_helper_cvtpd2ps,
2570 gen_helper_cvtss2sd, gen_helper_cvtsd2ss },
2571 [0x5b] = { gen_helper_cvtdq2ps, gen_helper_cvtps2dq, gen_helper_cvttps2dq },
2572 [0x5c] = SSE_FOP(sub),
2573 [0x5d] = SSE_FOP(min),
2574 [0x5e] = SSE_FOP(div),
2575 [0x5f] = SSE_FOP(max),
2577 [0xc2] = SSE_FOP(cmpeq),
2578 [0xc6] = { (SSEFunc_0_epp)gen_helper_shufps,
2579 (SSEFunc_0_epp)gen_helper_shufpd }, /* XXX: casts */
2581 /* SSSE3, SSE4, MOVBE, CRC32, BMI1, BMI2, ADX. */
2582 [0x38] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2583 [0x3a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2585 /* MMX ops and their SSE extensions */
2586 [0x60] = MMX_OP2(punpcklbw),
2587 [0x61] = MMX_OP2(punpcklwd),
2588 [0x62] = MMX_OP2(punpckldq),
2589 [0x63] = MMX_OP2(packsswb),
2590 [0x64] = MMX_OP2(pcmpgtb),
2591 [0x65] = MMX_OP2(pcmpgtw),
2592 [0x66] = MMX_OP2(pcmpgtl),
2593 [0x67] = MMX_OP2(packuswb),
2594 [0x68] = MMX_OP2(punpckhbw),
2595 [0x69] = MMX_OP2(punpckhwd),
2596 [0x6a] = MMX_OP2(punpckhdq),
2597 [0x6b] = MMX_OP2(packssdw),
2598 [0x6c] = { NULL, gen_helper_punpcklqdq_xmm },
2599 [0x6d] = { NULL, gen_helper_punpckhqdq_xmm },
2600 [0x6e] = { SSE_SPECIAL, SSE_SPECIAL }, /* movd mm, ea */
2601 [0x6f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, , movqdu */
2602 [0x70] = { (SSEFunc_0_epp)gen_helper_pshufw_mmx,
2603 (SSEFunc_0_epp)gen_helper_pshufd_xmm,
2604 (SSEFunc_0_epp)gen_helper_pshufhw_xmm,
2605 (SSEFunc_0_epp)gen_helper_pshuflw_xmm }, /* XXX: casts */
2606 [0x71] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftw */
2607 [0x72] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftd */
2608 [0x73] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftq */
2609 [0x74] = MMX_OP2(pcmpeqb),
2610 [0x75] = MMX_OP2(pcmpeqw),
2611 [0x76] = MMX_OP2(pcmpeql),
2612 [0x77] = { SSE_DUMMY }, /* emms */
2613 [0x78] = { NULL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* extrq_i, insertq_i */
2614 [0x79] = { NULL, gen_helper_extrq_r, NULL, gen_helper_insertq_r },
2615 [0x7c] = { NULL, gen_helper_haddpd, NULL, gen_helper_haddps },
2616 [0x7d] = { NULL, gen_helper_hsubpd, NULL, gen_helper_hsubps },
2617 [0x7e] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movd, movd, , movq */
2618 [0x7f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, movdqu */
2619 [0xc4] = { SSE_SPECIAL, SSE_SPECIAL }, /* pinsrw */
2620 [0xc5] = { SSE_SPECIAL, SSE_SPECIAL }, /* pextrw */
2621 [0xd0] = { NULL, gen_helper_addsubpd, NULL, gen_helper_addsubps },
2622 [0xd1] = MMX_OP2(psrlw),
2623 [0xd2] = MMX_OP2(psrld),
2624 [0xd3] = MMX_OP2(psrlq),
2625 [0xd4] = MMX_OP2(paddq),
2626 [0xd5] = MMX_OP2(pmullw),
2627 [0xd6] = { NULL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2628 [0xd7] = { SSE_SPECIAL, SSE_SPECIAL }, /* pmovmskb */
2629 [0xd8] = MMX_OP2(psubusb),
2630 [0xd9] = MMX_OP2(psubusw),
2631 [0xda] = MMX_OP2(pminub),
2632 [0xdb] = MMX_OP2(pand),
2633 [0xdc] = MMX_OP2(paddusb),
2634 [0xdd] = MMX_OP2(paddusw),
2635 [0xde] = MMX_OP2(pmaxub),
2636 [0xdf] = MMX_OP2(pandn),
2637 [0xe0] = MMX_OP2(pavgb),
2638 [0xe1] = MMX_OP2(psraw),
2639 [0xe2] = MMX_OP2(psrad),
2640 [0xe3] = MMX_OP2(pavgw),
2641 [0xe4] = MMX_OP2(pmulhuw),
2642 [0xe5] = MMX_OP2(pmulhw),
2643 [0xe6] = { NULL, gen_helper_cvttpd2dq, gen_helper_cvtdq2pd, gen_helper_cvtpd2dq },
2644 [0xe7] = { SSE_SPECIAL , SSE_SPECIAL }, /* movntq, movntq */
2645 [0xe8] = MMX_OP2(psubsb),
2646 [0xe9] = MMX_OP2(psubsw),
2647 [0xea] = MMX_OP2(pminsw),
2648 [0xeb] = MMX_OP2(por),
2649 [0xec] = MMX_OP2(paddsb),
2650 [0xed] = MMX_OP2(paddsw),
2651 [0xee] = MMX_OP2(pmaxsw),
2652 [0xef] = MMX_OP2(pxor),
2653 [0xf0] = { NULL, NULL, NULL, SSE_SPECIAL }, /* lddqu */
2654 [0xf1] = MMX_OP2(psllw),
2655 [0xf2] = MMX_OP2(pslld),
2656 [0xf3] = MMX_OP2(psllq),
2657 [0xf4] = MMX_OP2(pmuludq),
2658 [0xf5] = MMX_OP2(pmaddwd),
2659 [0xf6] = MMX_OP2(psadbw),
2660 [0xf7] = { (SSEFunc_0_epp)gen_helper_maskmov_mmx,
2661 (SSEFunc_0_epp)gen_helper_maskmov_xmm }, /* XXX: casts */
2662 [0xf8] = MMX_OP2(psubb),
2663 [0xf9] = MMX_OP2(psubw),
2664 [0xfa] = MMX_OP2(psubl),
2665 [0xfb] = MMX_OP2(psubq),
2666 [0xfc] = MMX_OP2(paddb),
2667 [0xfd] = MMX_OP2(paddw),
2668 [0xfe] = MMX_OP2(paddl),
2671 static const SSEFunc_0_epp sse_op_table2[3 * 8][2] = {
2672 [0 + 2] = MMX_OP2(psrlw),
2673 [0 + 4] = MMX_OP2(psraw),
2674 [0 + 6] = MMX_OP2(psllw),
2675 [8 + 2] = MMX_OP2(psrld),
2676 [8 + 4] = MMX_OP2(psrad),
2677 [8 + 6] = MMX_OP2(pslld),
2678 [16 + 2] = MMX_OP2(psrlq),
2679 [16 + 3] = { NULL, gen_helper_psrldq_xmm },
2680 [16 + 6] = MMX_OP2(psllq),
2681 [16 + 7] = { NULL, gen_helper_pslldq_xmm },
2684 static const SSEFunc_0_epi sse_op_table3ai[] = {
2685 gen_helper_cvtsi2ss,
2686 gen_helper_cvtsi2sd
2689 #ifdef TARGET_X86_64
2690 static const SSEFunc_0_epl sse_op_table3aq[] = {
2691 gen_helper_cvtsq2ss,
2692 gen_helper_cvtsq2sd
2694 #endif
2696 static const SSEFunc_i_ep sse_op_table3bi[] = {
2697 gen_helper_cvttss2si,
2698 gen_helper_cvtss2si,
2699 gen_helper_cvttsd2si,
2700 gen_helper_cvtsd2si
2703 #ifdef TARGET_X86_64
2704 static const SSEFunc_l_ep sse_op_table3bq[] = {
2705 gen_helper_cvttss2sq,
2706 gen_helper_cvtss2sq,
2707 gen_helper_cvttsd2sq,
2708 gen_helper_cvtsd2sq
2710 #endif
2712 static const SSEFunc_0_epp sse_op_table4[8][4] = {
2713 SSE_FOP(cmpeq),
2714 SSE_FOP(cmplt),
2715 SSE_FOP(cmple),
2716 SSE_FOP(cmpunord),
2717 SSE_FOP(cmpneq),
2718 SSE_FOP(cmpnlt),
2719 SSE_FOP(cmpnle),
2720 SSE_FOP(cmpord),
2723 static const SSEFunc_0_epp sse_op_table5[256] = {
2724 [0x0c] = gen_helper_pi2fw,
2725 [0x0d] = gen_helper_pi2fd,
2726 [0x1c] = gen_helper_pf2iw,
2727 [0x1d] = gen_helper_pf2id,
2728 [0x8a] = gen_helper_pfnacc,
2729 [0x8e] = gen_helper_pfpnacc,
2730 [0x90] = gen_helper_pfcmpge,
2731 [0x94] = gen_helper_pfmin,
2732 [0x96] = gen_helper_pfrcp,
2733 [0x97] = gen_helper_pfrsqrt,
2734 [0x9a] = gen_helper_pfsub,
2735 [0x9e] = gen_helper_pfadd,
2736 [0xa0] = gen_helper_pfcmpgt,
2737 [0xa4] = gen_helper_pfmax,
2738 [0xa6] = gen_helper_movq, /* pfrcpit1; no need to actually increase precision */
2739 [0xa7] = gen_helper_movq, /* pfrsqit1 */
2740 [0xaa] = gen_helper_pfsubr,
2741 [0xae] = gen_helper_pfacc,
2742 [0xb0] = gen_helper_pfcmpeq,
2743 [0xb4] = gen_helper_pfmul,
2744 [0xb6] = gen_helper_movq, /* pfrcpit2 */
2745 [0xb7] = gen_helper_pmulhrw_mmx,
2746 [0xbb] = gen_helper_pswapd,
2747 [0xbf] = gen_helper_pavgb_mmx /* pavgusb */
2750 struct SSEOpHelper_epp {
2751 SSEFunc_0_epp op[2];
2752 uint32_t ext_mask;
2755 struct SSEOpHelper_eppi {
2756 SSEFunc_0_eppi op[2];
2757 uint32_t ext_mask;
2760 #define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
2761 #define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
2762 #define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
2763 #define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
2764 #define PCLMULQDQ_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, \
2765 CPUID_EXT_PCLMULQDQ }
2766 #define AESNI_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_AES }
2768 static const struct SSEOpHelper_epp sse_op_table6[256] = {
2769 [0x00] = SSSE3_OP(pshufb),
2770 [0x01] = SSSE3_OP(phaddw),
2771 [0x02] = SSSE3_OP(phaddd),
2772 [0x03] = SSSE3_OP(phaddsw),
2773 [0x04] = SSSE3_OP(pmaddubsw),
2774 [0x05] = SSSE3_OP(phsubw),
2775 [0x06] = SSSE3_OP(phsubd),
2776 [0x07] = SSSE3_OP(phsubsw),
2777 [0x08] = SSSE3_OP(psignb),
2778 [0x09] = SSSE3_OP(psignw),
2779 [0x0a] = SSSE3_OP(psignd),
2780 [0x0b] = SSSE3_OP(pmulhrsw),
2781 [0x10] = SSE41_OP(pblendvb),
2782 [0x14] = SSE41_OP(blendvps),
2783 [0x15] = SSE41_OP(blendvpd),
2784 [0x17] = SSE41_OP(ptest),
2785 [0x1c] = SSSE3_OP(pabsb),
2786 [0x1d] = SSSE3_OP(pabsw),
2787 [0x1e] = SSSE3_OP(pabsd),
2788 [0x20] = SSE41_OP(pmovsxbw),
2789 [0x21] = SSE41_OP(pmovsxbd),
2790 [0x22] = SSE41_OP(pmovsxbq),
2791 [0x23] = SSE41_OP(pmovsxwd),
2792 [0x24] = SSE41_OP(pmovsxwq),
2793 [0x25] = SSE41_OP(pmovsxdq),
2794 [0x28] = SSE41_OP(pmuldq),
2795 [0x29] = SSE41_OP(pcmpeqq),
2796 [0x2a] = SSE41_SPECIAL, /* movntqda */
2797 [0x2b] = SSE41_OP(packusdw),
2798 [0x30] = SSE41_OP(pmovzxbw),
2799 [0x31] = SSE41_OP(pmovzxbd),
2800 [0x32] = SSE41_OP(pmovzxbq),
2801 [0x33] = SSE41_OP(pmovzxwd),
2802 [0x34] = SSE41_OP(pmovzxwq),
2803 [0x35] = SSE41_OP(pmovzxdq),
2804 [0x37] = SSE42_OP(pcmpgtq),
2805 [0x38] = SSE41_OP(pminsb),
2806 [0x39] = SSE41_OP(pminsd),
2807 [0x3a] = SSE41_OP(pminuw),
2808 [0x3b] = SSE41_OP(pminud),
2809 [0x3c] = SSE41_OP(pmaxsb),
2810 [0x3d] = SSE41_OP(pmaxsd),
2811 [0x3e] = SSE41_OP(pmaxuw),
2812 [0x3f] = SSE41_OP(pmaxud),
2813 [0x40] = SSE41_OP(pmulld),
2814 [0x41] = SSE41_OP(phminposuw),
2815 [0xdb] = AESNI_OP(aesimc),
2816 [0xdc] = AESNI_OP(aesenc),
2817 [0xdd] = AESNI_OP(aesenclast),
2818 [0xde] = AESNI_OP(aesdec),
2819 [0xdf] = AESNI_OP(aesdeclast),
2822 static const struct SSEOpHelper_eppi sse_op_table7[256] = {
2823 [0x08] = SSE41_OP(roundps),
2824 [0x09] = SSE41_OP(roundpd),
2825 [0x0a] = SSE41_OP(roundss),
2826 [0x0b] = SSE41_OP(roundsd),
2827 [0x0c] = SSE41_OP(blendps),
2828 [0x0d] = SSE41_OP(blendpd),
2829 [0x0e] = SSE41_OP(pblendw),
2830 [0x0f] = SSSE3_OP(palignr),
2831 [0x14] = SSE41_SPECIAL, /* pextrb */
2832 [0x15] = SSE41_SPECIAL, /* pextrw */
2833 [0x16] = SSE41_SPECIAL, /* pextrd/pextrq */
2834 [0x17] = SSE41_SPECIAL, /* extractps */
2835 [0x20] = SSE41_SPECIAL, /* pinsrb */
2836 [0x21] = SSE41_SPECIAL, /* insertps */
2837 [0x22] = SSE41_SPECIAL, /* pinsrd/pinsrq */
2838 [0x40] = SSE41_OP(dpps),
2839 [0x41] = SSE41_OP(dppd),
2840 [0x42] = SSE41_OP(mpsadbw),
2841 [0x44] = PCLMULQDQ_OP(pclmulqdq),
2842 [0x60] = SSE42_OP(pcmpestrm),
2843 [0x61] = SSE42_OP(pcmpestri),
2844 [0x62] = SSE42_OP(pcmpistrm),
2845 [0x63] = SSE42_OP(pcmpistri),
2846 [0xdf] = AESNI_OP(aeskeygenassist),
2849 static void gen_sse(CPUX86State *env, DisasContext *s, int b,
2850 target_ulong pc_start, int rex_r)
2852 int b1, op1_offset, op2_offset, is_xmm, val;
2853 int modrm, mod, rm, reg;
2854 SSEFunc_0_epp sse_fn_epp;
2855 SSEFunc_0_eppi sse_fn_eppi;
2856 SSEFunc_0_ppi sse_fn_ppi;
2857 SSEFunc_0_eppt sse_fn_eppt;
2858 TCGMemOp ot;
2860 b &= 0xff;
2861 if (s->prefix & PREFIX_DATA)
2862 b1 = 1;
2863 else if (s->prefix & PREFIX_REPZ)
2864 b1 = 2;
2865 else if (s->prefix & PREFIX_REPNZ)
2866 b1 = 3;
2867 else
2868 b1 = 0;
2869 sse_fn_epp = sse_op_table1[b][b1];
2870 if (!sse_fn_epp) {
2871 goto illegal_op;
2873 if ((b <= 0x5f && b >= 0x10) || b == 0xc6 || b == 0xc2) {
2874 is_xmm = 1;
2875 } else {
2876 if (b1 == 0) {
2877 /* MMX case */
2878 is_xmm = 0;
2879 } else {
2880 is_xmm = 1;
2883 /* simple MMX/SSE operation */
2884 if (s->flags & HF_TS_MASK) {
2885 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
2886 return;
2888 if (s->flags & HF_EM_MASK) {
2889 illegal_op:
2890 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
2891 return;
2893 if (is_xmm && !(s->flags & HF_OSFXSR_MASK))
2894 if ((b != 0x38 && b != 0x3a) || (s->prefix & PREFIX_DATA))
2895 goto illegal_op;
2896 if (b == 0x0e) {
2897 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
2898 goto illegal_op;
2899 /* femms */
2900 gen_helper_emms(cpu_env);
2901 return;
2903 if (b == 0x77) {
2904 /* emms */
2905 gen_helper_emms(cpu_env);
2906 return;
2908 /* prepare MMX state (XXX: optimize by storing fptt and fptags in
2909 the static cpu state) */
2910 if (!is_xmm) {
2911 gen_helper_enter_mmx(cpu_env);
2914 modrm = cpu_ldub_code(env, s->pc++);
2915 reg = ((modrm >> 3) & 7);
2916 if (is_xmm)
2917 reg |= rex_r;
2918 mod = (modrm >> 6) & 3;
2919 if (sse_fn_epp == SSE_SPECIAL) {
2920 b |= (b1 << 8);
2921 switch(b) {
2922 case 0x0e7: /* movntq */
2923 if (mod == 3)
2924 goto illegal_op;
2925 gen_lea_modrm(env, s, modrm);
2926 gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
2927 break;
2928 case 0x1e7: /* movntdq */
2929 case 0x02b: /* movntps */
2930 case 0x12b: /* movntps */
2931 if (mod == 3)
2932 goto illegal_op;
2933 gen_lea_modrm(env, s, modrm);
2934 gen_sto_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
2935 break;
2936 case 0x3f0: /* lddqu */
2937 if (mod == 3)
2938 goto illegal_op;
2939 gen_lea_modrm(env, s, modrm);
2940 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
2941 break;
2942 case 0x22b: /* movntss */
2943 case 0x32b: /* movntsd */
2944 if (mod == 3)
2945 goto illegal_op;
2946 gen_lea_modrm(env, s, modrm);
2947 if (b1 & 1) {
2948 gen_stq_env_A0(s, offsetof(CPUX86State,
2949 xmm_regs[reg].ZMM_Q(0)));
2950 } else {
2951 tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
2952 xmm_regs[reg].ZMM_L(0)));
2953 gen_op_st_v(s, MO_32, cpu_T0, cpu_A0);
2955 break;
2956 case 0x6e: /* movd mm, ea */
2957 #ifdef TARGET_X86_64
2958 if (s->dflag == MO_64) {
2959 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0);
2960 tcg_gen_st_tl(cpu_T0, cpu_env, offsetof(CPUX86State,fpregs[reg].mmx));
2961 } else
2962 #endif
2964 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0);
2965 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
2966 offsetof(CPUX86State,fpregs[reg].mmx));
2967 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
2968 gen_helper_movl_mm_T0_mmx(cpu_ptr0, cpu_tmp2_i32);
2970 break;
2971 case 0x16e: /* movd xmm, ea */
2972 #ifdef TARGET_X86_64
2973 if (s->dflag == MO_64) {
2974 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0);
2975 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
2976 offsetof(CPUX86State,xmm_regs[reg]));
2977 gen_helper_movq_mm_T0_xmm(cpu_ptr0, cpu_T0);
2978 } else
2979 #endif
2981 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0);
2982 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
2983 offsetof(CPUX86State,xmm_regs[reg]));
2984 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
2985 gen_helper_movl_mm_T0_xmm(cpu_ptr0, cpu_tmp2_i32);
2987 break;
2988 case 0x6f: /* movq mm, ea */
2989 if (mod != 3) {
2990 gen_lea_modrm(env, s, modrm);
2991 gen_ldq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
2992 } else {
2993 rm = (modrm & 7);
2994 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
2995 offsetof(CPUX86State,fpregs[rm].mmx));
2996 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
2997 offsetof(CPUX86State,fpregs[reg].mmx));
2999 break;
3000 case 0x010: /* movups */
3001 case 0x110: /* movupd */
3002 case 0x028: /* movaps */
3003 case 0x128: /* movapd */
3004 case 0x16f: /* movdqa xmm, ea */
3005 case 0x26f: /* movdqu xmm, ea */
3006 if (mod != 3) {
3007 gen_lea_modrm(env, s, modrm);
3008 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3009 } else {
3010 rm = (modrm & 7) | REX_B(s);
3011 gen_op_movo(offsetof(CPUX86State,xmm_regs[reg]),
3012 offsetof(CPUX86State,xmm_regs[rm]));
3014 break;
3015 case 0x210: /* movss xmm, ea */
3016 if (mod != 3) {
3017 gen_lea_modrm(env, s, modrm);
3018 gen_op_ld_v(s, MO_32, cpu_T0, cpu_A0);
3019 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
3020 tcg_gen_movi_tl(cpu_T0, 0);
3021 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1)));
3022 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)));
3023 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)));
3024 } else {
3025 rm = (modrm & 7) | REX_B(s);
3026 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)),
3027 offsetof(CPUX86State,xmm_regs[rm].ZMM_L(0)));
3029 break;
3030 case 0x310: /* movsd xmm, ea */
3031 if (mod != 3) {
3032 gen_lea_modrm(env, s, modrm);
3033 gen_ldq_env_A0(s, offsetof(CPUX86State,
3034 xmm_regs[reg].ZMM_Q(0)));
3035 tcg_gen_movi_tl(cpu_T0, 0);
3036 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)));
3037 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)));
3038 } else {
3039 rm = (modrm & 7) | REX_B(s);
3040 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
3041 offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
3043 break;
3044 case 0x012: /* movlps */
3045 case 0x112: /* movlpd */
3046 if (mod != 3) {
3047 gen_lea_modrm(env, s, modrm);
3048 gen_ldq_env_A0(s, offsetof(CPUX86State,
3049 xmm_regs[reg].ZMM_Q(0)));
3050 } else {
3051 /* movhlps */
3052 rm = (modrm & 7) | REX_B(s);
3053 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
3054 offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(1)));
3056 break;
3057 case 0x212: /* movsldup */
3058 if (mod != 3) {
3059 gen_lea_modrm(env, s, modrm);
3060 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3061 } else {
3062 rm = (modrm & 7) | REX_B(s);
3063 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)),
3064 offsetof(CPUX86State,xmm_regs[rm].ZMM_L(0)));
3065 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)),
3066 offsetof(CPUX86State,xmm_regs[rm].ZMM_L(2)));
3068 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1)),
3069 offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
3070 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)),
3071 offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)));
3072 break;
3073 case 0x312: /* movddup */
3074 if (mod != 3) {
3075 gen_lea_modrm(env, s, modrm);
3076 gen_ldq_env_A0(s, offsetof(CPUX86State,
3077 xmm_regs[reg].ZMM_Q(0)));
3078 } else {
3079 rm = (modrm & 7) | REX_B(s);
3080 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
3081 offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
3083 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(1)),
3084 offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)));
3085 break;
3086 case 0x016: /* movhps */
3087 case 0x116: /* movhpd */
3088 if (mod != 3) {
3089 gen_lea_modrm(env, s, modrm);
3090 gen_ldq_env_A0(s, offsetof(CPUX86State,
3091 xmm_regs[reg].ZMM_Q(1)));
3092 } else {
3093 /* movlhps */
3094 rm = (modrm & 7) | REX_B(s);
3095 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(1)),
3096 offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
3098 break;
3099 case 0x216: /* movshdup */
3100 if (mod != 3) {
3101 gen_lea_modrm(env, s, modrm);
3102 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3103 } else {
3104 rm = (modrm & 7) | REX_B(s);
3105 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1)),
3106 offsetof(CPUX86State,xmm_regs[rm].ZMM_L(1)));
3107 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)),
3108 offsetof(CPUX86State,xmm_regs[rm].ZMM_L(3)));
3110 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)),
3111 offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1)));
3112 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)),
3113 offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)));
3114 break;
3115 case 0x178:
3116 case 0x378:
3118 int bit_index, field_length;
3120 if (b1 == 1 && reg != 0)
3121 goto illegal_op;
3122 field_length = cpu_ldub_code(env, s->pc++) & 0x3F;
3123 bit_index = cpu_ldub_code(env, s->pc++) & 0x3F;
3124 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3125 offsetof(CPUX86State,xmm_regs[reg]));
3126 if (b1 == 1)
3127 gen_helper_extrq_i(cpu_env, cpu_ptr0,
3128 tcg_const_i32(bit_index),
3129 tcg_const_i32(field_length));
3130 else
3131 gen_helper_insertq_i(cpu_env, cpu_ptr0,
3132 tcg_const_i32(bit_index),
3133 tcg_const_i32(field_length));
3135 break;
3136 case 0x7e: /* movd ea, mm */
3137 #ifdef TARGET_X86_64
3138 if (s->dflag == MO_64) {
3139 tcg_gen_ld_i64(cpu_T0, cpu_env,
3140 offsetof(CPUX86State,fpregs[reg].mmx));
3141 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1);
3142 } else
3143 #endif
3145 tcg_gen_ld32u_tl(cpu_T0, cpu_env,
3146 offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0)));
3147 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1);
3149 break;
3150 case 0x17e: /* movd ea, xmm */
3151 #ifdef TARGET_X86_64
3152 if (s->dflag == MO_64) {
3153 tcg_gen_ld_i64(cpu_T0, cpu_env,
3154 offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)));
3155 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1);
3156 } else
3157 #endif
3159 tcg_gen_ld32u_tl(cpu_T0, cpu_env,
3160 offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
3161 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1);
3163 break;
3164 case 0x27e: /* movq xmm, ea */
3165 if (mod != 3) {
3166 gen_lea_modrm(env, s, modrm);
3167 gen_ldq_env_A0(s, offsetof(CPUX86State,
3168 xmm_regs[reg].ZMM_Q(0)));
3169 } else {
3170 rm = (modrm & 7) | REX_B(s);
3171 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
3172 offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
3174 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(1)));
3175 break;
3176 case 0x7f: /* movq ea, mm */
3177 if (mod != 3) {
3178 gen_lea_modrm(env, s, modrm);
3179 gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
3180 } else {
3181 rm = (modrm & 7);
3182 gen_op_movq(offsetof(CPUX86State,fpregs[rm].mmx),
3183 offsetof(CPUX86State,fpregs[reg].mmx));
3185 break;
3186 case 0x011: /* movups */
3187 case 0x111: /* movupd */
3188 case 0x029: /* movaps */
3189 case 0x129: /* movapd */
3190 case 0x17f: /* movdqa ea, xmm */
3191 case 0x27f: /* movdqu ea, xmm */
3192 if (mod != 3) {
3193 gen_lea_modrm(env, s, modrm);
3194 gen_sto_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3195 } else {
3196 rm = (modrm & 7) | REX_B(s);
3197 gen_op_movo(offsetof(CPUX86State,xmm_regs[rm]),
3198 offsetof(CPUX86State,xmm_regs[reg]));
3200 break;
3201 case 0x211: /* movss ea, xmm */
3202 if (mod != 3) {
3203 gen_lea_modrm(env, s, modrm);
3204 tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
3205 gen_op_st_v(s, MO_32, cpu_T0, cpu_A0);
3206 } else {
3207 rm = (modrm & 7) | REX_B(s);
3208 gen_op_movl(offsetof(CPUX86State,xmm_regs[rm].ZMM_L(0)),
3209 offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
3211 break;
3212 case 0x311: /* movsd ea, xmm */
3213 if (mod != 3) {
3214 gen_lea_modrm(env, s, modrm);
3215 gen_stq_env_A0(s, offsetof(CPUX86State,
3216 xmm_regs[reg].ZMM_Q(0)));
3217 } else {
3218 rm = (modrm & 7) | REX_B(s);
3219 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)),
3220 offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)));
3222 break;
3223 case 0x013: /* movlps */
3224 case 0x113: /* movlpd */
3225 if (mod != 3) {
3226 gen_lea_modrm(env, s, modrm);
3227 gen_stq_env_A0(s, offsetof(CPUX86State,
3228 xmm_regs[reg].ZMM_Q(0)));
3229 } else {
3230 goto illegal_op;
3232 break;
3233 case 0x017: /* movhps */
3234 case 0x117: /* movhpd */
3235 if (mod != 3) {
3236 gen_lea_modrm(env, s, modrm);
3237 gen_stq_env_A0(s, offsetof(CPUX86State,
3238 xmm_regs[reg].ZMM_Q(1)));
3239 } else {
3240 goto illegal_op;
3242 break;
3243 case 0x71: /* shift mm, im */
3244 case 0x72:
3245 case 0x73:
3246 case 0x171: /* shift xmm, im */
3247 case 0x172:
3248 case 0x173:
3249 if (b1 >= 2) {
3250 goto illegal_op;
3252 val = cpu_ldub_code(env, s->pc++);
3253 if (is_xmm) {
3254 tcg_gen_movi_tl(cpu_T0, val);
3255 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(0)));
3256 tcg_gen_movi_tl(cpu_T0, 0);
3257 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(1)));
3258 op1_offset = offsetof(CPUX86State,xmm_t0);
3259 } else {
3260 tcg_gen_movi_tl(cpu_T0, val);
3261 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(0)));
3262 tcg_gen_movi_tl(cpu_T0, 0);
3263 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(1)));
3264 op1_offset = offsetof(CPUX86State,mmx_t0);
3266 sse_fn_epp = sse_op_table2[((b - 1) & 3) * 8 +
3267 (((modrm >> 3)) & 7)][b1];
3268 if (!sse_fn_epp) {
3269 goto illegal_op;
3271 if (is_xmm) {
3272 rm = (modrm & 7) | REX_B(s);
3273 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3274 } else {
3275 rm = (modrm & 7);
3276 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3278 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3279 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op1_offset);
3280 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
3281 break;
3282 case 0x050: /* movmskps */
3283 rm = (modrm & 7) | REX_B(s);
3284 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3285 offsetof(CPUX86State,xmm_regs[rm]));
3286 gen_helper_movmskps(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3287 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
3288 break;
3289 case 0x150: /* movmskpd */
3290 rm = (modrm & 7) | REX_B(s);
3291 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3292 offsetof(CPUX86State,xmm_regs[rm]));
3293 gen_helper_movmskpd(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3294 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
3295 break;
3296 case 0x02a: /* cvtpi2ps */
3297 case 0x12a: /* cvtpi2pd */
3298 gen_helper_enter_mmx(cpu_env);
3299 if (mod != 3) {
3300 gen_lea_modrm(env, s, modrm);
3301 op2_offset = offsetof(CPUX86State,mmx_t0);
3302 gen_ldq_env_A0(s, op2_offset);
3303 } else {
3304 rm = (modrm & 7);
3305 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3307 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3308 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3309 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3310 switch(b >> 8) {
3311 case 0x0:
3312 gen_helper_cvtpi2ps(cpu_env, cpu_ptr0, cpu_ptr1);
3313 break;
3314 default:
3315 case 0x1:
3316 gen_helper_cvtpi2pd(cpu_env, cpu_ptr0, cpu_ptr1);
3317 break;
3319 break;
3320 case 0x22a: /* cvtsi2ss */
3321 case 0x32a: /* cvtsi2sd */
3322 ot = mo_64_32(s->dflag);
3323 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3324 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3325 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3326 if (ot == MO_32) {
3327 SSEFunc_0_epi sse_fn_epi = sse_op_table3ai[(b >> 8) & 1];
3328 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
3329 sse_fn_epi(cpu_env, cpu_ptr0, cpu_tmp2_i32);
3330 } else {
3331 #ifdef TARGET_X86_64
3332 SSEFunc_0_epl sse_fn_epl = sse_op_table3aq[(b >> 8) & 1];
3333 sse_fn_epl(cpu_env, cpu_ptr0, cpu_T0);
3334 #else
3335 goto illegal_op;
3336 #endif
3338 break;
3339 case 0x02c: /* cvttps2pi */
3340 case 0x12c: /* cvttpd2pi */
3341 case 0x02d: /* cvtps2pi */
3342 case 0x12d: /* cvtpd2pi */
3343 gen_helper_enter_mmx(cpu_env);
3344 if (mod != 3) {
3345 gen_lea_modrm(env, s, modrm);
3346 op2_offset = offsetof(CPUX86State,xmm_t0);
3347 gen_ldo_env_A0(s, op2_offset);
3348 } else {
3349 rm = (modrm & 7) | REX_B(s);
3350 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3352 op1_offset = offsetof(CPUX86State,fpregs[reg & 7].mmx);
3353 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3354 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3355 switch(b) {
3356 case 0x02c:
3357 gen_helper_cvttps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3358 break;
3359 case 0x12c:
3360 gen_helper_cvttpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3361 break;
3362 case 0x02d:
3363 gen_helper_cvtps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3364 break;
3365 case 0x12d:
3366 gen_helper_cvtpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3367 break;
3369 break;
3370 case 0x22c: /* cvttss2si */
3371 case 0x32c: /* cvttsd2si */
3372 case 0x22d: /* cvtss2si */
3373 case 0x32d: /* cvtsd2si */
3374 ot = mo_64_32(s->dflag);
3375 if (mod != 3) {
3376 gen_lea_modrm(env, s, modrm);
3377 if ((b >> 8) & 1) {
3378 gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.ZMM_Q(0)));
3379 } else {
3380 gen_op_ld_v(s, MO_32, cpu_T0, cpu_A0);
3381 tcg_gen_st32_tl(cpu_T0, cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(0)));
3383 op2_offset = offsetof(CPUX86State,xmm_t0);
3384 } else {
3385 rm = (modrm & 7) | REX_B(s);
3386 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3388 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3389 if (ot == MO_32) {
3390 SSEFunc_i_ep sse_fn_i_ep =
3391 sse_op_table3bi[((b >> 7) & 2) | (b & 1)];
3392 sse_fn_i_ep(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3393 tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);
3394 } else {
3395 #ifdef TARGET_X86_64
3396 SSEFunc_l_ep sse_fn_l_ep =
3397 sse_op_table3bq[((b >> 7) & 2) | (b & 1)];
3398 sse_fn_l_ep(cpu_T0, cpu_env, cpu_ptr0);
3399 #else
3400 goto illegal_op;
3401 #endif
3403 gen_op_mov_reg_v(ot, reg, cpu_T0);
3404 break;
3405 case 0xc4: /* pinsrw */
3406 case 0x1c4:
3407 s->rip_offset = 1;
3408 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
3409 val = cpu_ldub_code(env, s->pc++);
3410 if (b1) {
3411 val &= 7;
3412 tcg_gen_st16_tl(cpu_T0, cpu_env,
3413 offsetof(CPUX86State,xmm_regs[reg].ZMM_W(val)));
3414 } else {
3415 val &= 3;
3416 tcg_gen_st16_tl(cpu_T0, cpu_env,
3417 offsetof(CPUX86State,fpregs[reg].mmx.MMX_W(val)));
3419 break;
3420 case 0xc5: /* pextrw */
3421 case 0x1c5:
3422 if (mod != 3)
3423 goto illegal_op;
3424 ot = mo_64_32(s->dflag);
3425 val = cpu_ldub_code(env, s->pc++);
3426 if (b1) {
3427 val &= 7;
3428 rm = (modrm & 7) | REX_B(s);
3429 tcg_gen_ld16u_tl(cpu_T0, cpu_env,
3430 offsetof(CPUX86State,xmm_regs[rm].ZMM_W(val)));
3431 } else {
3432 val &= 3;
3433 rm = (modrm & 7);
3434 tcg_gen_ld16u_tl(cpu_T0, cpu_env,
3435 offsetof(CPUX86State,fpregs[rm].mmx.MMX_W(val)));
3437 reg = ((modrm >> 3) & 7) | rex_r;
3438 gen_op_mov_reg_v(ot, reg, cpu_T0);
3439 break;
3440 case 0x1d6: /* movq ea, xmm */
3441 if (mod != 3) {
3442 gen_lea_modrm(env, s, modrm);
3443 gen_stq_env_A0(s, offsetof(CPUX86State,
3444 xmm_regs[reg].ZMM_Q(0)));
3445 } else {
3446 rm = (modrm & 7) | REX_B(s);
3447 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)),
3448 offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)));
3449 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(1)));
3451 break;
3452 case 0x2d6: /* movq2dq */
3453 gen_helper_enter_mmx(cpu_env);
3454 rm = (modrm & 7);
3455 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
3456 offsetof(CPUX86State,fpregs[rm].mmx));
3457 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(1)));
3458 break;
3459 case 0x3d6: /* movdq2q */
3460 gen_helper_enter_mmx(cpu_env);
3461 rm = (modrm & 7) | REX_B(s);
3462 gen_op_movq(offsetof(CPUX86State,fpregs[reg & 7].mmx),
3463 offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
3464 break;
3465 case 0xd7: /* pmovmskb */
3466 case 0x1d7:
3467 if (mod != 3)
3468 goto illegal_op;
3469 if (b1) {
3470 rm = (modrm & 7) | REX_B(s);
3471 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[rm]));
3472 gen_helper_pmovmskb_xmm(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3473 } else {
3474 rm = (modrm & 7);
3475 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,fpregs[rm].mmx));
3476 gen_helper_pmovmskb_mmx(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3478 reg = ((modrm >> 3) & 7) | rex_r;
3479 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
3480 break;
3482 case 0x138:
3483 case 0x038:
3484 b = modrm;
3485 if ((b & 0xf0) == 0xf0) {
3486 goto do_0f_38_fx;
3488 modrm = cpu_ldub_code(env, s->pc++);
3489 rm = modrm & 7;
3490 reg = ((modrm >> 3) & 7) | rex_r;
3491 mod = (modrm >> 6) & 3;
3492 if (b1 >= 2) {
3493 goto illegal_op;
3496 sse_fn_epp = sse_op_table6[b].op[b1];
3497 if (!sse_fn_epp) {
3498 goto illegal_op;
3500 if (!(s->cpuid_ext_features & sse_op_table6[b].ext_mask))
3501 goto illegal_op;
3503 if (b1) {
3504 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3505 if (mod == 3) {
3506 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
3507 } else {
3508 op2_offset = offsetof(CPUX86State,xmm_t0);
3509 gen_lea_modrm(env, s, modrm);
3510 switch (b) {
3511 case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
3512 case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
3513 case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
3514 gen_ldq_env_A0(s, op2_offset +
3515 offsetof(ZMMReg, ZMM_Q(0)));
3516 break;
3517 case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
3518 case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
3519 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
3520 s->mem_index, MO_LEUL);
3521 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, op2_offset +
3522 offsetof(ZMMReg, ZMM_L(0)));
3523 break;
3524 case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
3525 tcg_gen_qemu_ld_tl(cpu_tmp0, cpu_A0,
3526 s->mem_index, MO_LEUW);
3527 tcg_gen_st16_tl(cpu_tmp0, cpu_env, op2_offset +
3528 offsetof(ZMMReg, ZMM_W(0)));
3529 break;
3530 case 0x2a: /* movntqda */
3531 gen_ldo_env_A0(s, op1_offset);
3532 return;
3533 default:
3534 gen_ldo_env_A0(s, op2_offset);
3537 } else {
3538 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
3539 if (mod == 3) {
3540 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3541 } else {
3542 op2_offset = offsetof(CPUX86State,mmx_t0);
3543 gen_lea_modrm(env, s, modrm);
3544 gen_ldq_env_A0(s, op2_offset);
3547 if (sse_fn_epp == SSE_SPECIAL) {
3548 goto illegal_op;
3551 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3552 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3553 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
3555 if (b == 0x17) {
3556 set_cc_op(s, CC_OP_EFLAGS);
3558 break;
3560 case 0x238:
3561 case 0x338:
3562 do_0f_38_fx:
3563 /* Various integer extensions at 0f 38 f[0-f]. */
3564 b = modrm | (b1 << 8);
3565 modrm = cpu_ldub_code(env, s->pc++);
3566 reg = ((modrm >> 3) & 7) | rex_r;
3568 switch (b) {
3569 case 0x3f0: /* crc32 Gd,Eb */
3570 case 0x3f1: /* crc32 Gd,Ey */
3571 do_crc32:
3572 if (!(s->cpuid_ext_features & CPUID_EXT_SSE42)) {
3573 goto illegal_op;
3575 if ((b & 0xff) == 0xf0) {
3576 ot = MO_8;
3577 } else if (s->dflag != MO_64) {
3578 ot = (s->prefix & PREFIX_DATA ? MO_16 : MO_32);
3579 } else {
3580 ot = MO_64;
3583 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[reg]);
3584 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3585 gen_helper_crc32(cpu_T0, cpu_tmp2_i32,
3586 cpu_T0, tcg_const_i32(8 << ot));
3588 ot = mo_64_32(s->dflag);
3589 gen_op_mov_reg_v(ot, reg, cpu_T0);
3590 break;
3592 case 0x1f0: /* crc32 or movbe */
3593 case 0x1f1:
3594 /* For these insns, the f3 prefix is supposed to have priority
3595 over the 66 prefix, but that's not what we implement above
3596 setting b1. */
3597 if (s->prefix & PREFIX_REPNZ) {
3598 goto do_crc32;
3600 /* FALLTHRU */
3601 case 0x0f0: /* movbe Gy,My */
3602 case 0x0f1: /* movbe My,Gy */
3603 if (!(s->cpuid_ext_features & CPUID_EXT_MOVBE)) {
3604 goto illegal_op;
3606 if (s->dflag != MO_64) {
3607 ot = (s->prefix & PREFIX_DATA ? MO_16 : MO_32);
3608 } else {
3609 ot = MO_64;
3612 gen_lea_modrm(env, s, modrm);
3613 if ((b & 1) == 0) {
3614 tcg_gen_qemu_ld_tl(cpu_T0, cpu_A0,
3615 s->mem_index, ot | MO_BE);
3616 gen_op_mov_reg_v(ot, reg, cpu_T0);
3617 } else {
3618 tcg_gen_qemu_st_tl(cpu_regs[reg], cpu_A0,
3619 s->mem_index, ot | MO_BE);
3621 break;
3623 case 0x0f2: /* andn Gy, By, Ey */
3624 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
3625 || !(s->prefix & PREFIX_VEX)
3626 || s->vex_l != 0) {
3627 goto illegal_op;
3629 ot = mo_64_32(s->dflag);
3630 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3631 tcg_gen_andc_tl(cpu_T0, cpu_regs[s->vex_v], cpu_T0);
3632 gen_op_mov_reg_v(ot, reg, cpu_T0);
3633 gen_op_update1_cc();
3634 set_cc_op(s, CC_OP_LOGICB + ot);
3635 break;
3637 case 0x0f7: /* bextr Gy, Ey, By */
3638 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
3639 || !(s->prefix & PREFIX_VEX)
3640 || s->vex_l != 0) {
3641 goto illegal_op;
3643 ot = mo_64_32(s->dflag);
3645 TCGv bound, zero;
3647 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3648 /* Extract START, and shift the operand.
3649 Shifts larger than operand size get zeros. */
3650 tcg_gen_ext8u_tl(cpu_A0, cpu_regs[s->vex_v]);
3651 tcg_gen_shr_tl(cpu_T0, cpu_T0, cpu_A0);
3653 bound = tcg_const_tl(ot == MO_64 ? 63 : 31);
3654 zero = tcg_const_tl(0);
3655 tcg_gen_movcond_tl(TCG_COND_LEU, cpu_T0, cpu_A0, bound,
3656 cpu_T0, zero);
3657 tcg_temp_free(zero);
3659 /* Extract the LEN into a mask. Lengths larger than
3660 operand size get all ones. */
3661 tcg_gen_shri_tl(cpu_A0, cpu_regs[s->vex_v], 8);
3662 tcg_gen_ext8u_tl(cpu_A0, cpu_A0);
3663 tcg_gen_movcond_tl(TCG_COND_LEU, cpu_A0, cpu_A0, bound,
3664 cpu_A0, bound);
3665 tcg_temp_free(bound);
3666 tcg_gen_movi_tl(cpu_T1, 1);
3667 tcg_gen_shl_tl(cpu_T1, cpu_T1, cpu_A0);
3668 tcg_gen_subi_tl(cpu_T1, cpu_T1, 1);
3669 tcg_gen_and_tl(cpu_T0, cpu_T0, cpu_T1);
3671 gen_op_mov_reg_v(ot, reg, cpu_T0);
3672 gen_op_update1_cc();
3673 set_cc_op(s, CC_OP_LOGICB + ot);
3675 break;
3677 case 0x0f5: /* bzhi Gy, Ey, By */
3678 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3679 || !(s->prefix & PREFIX_VEX)
3680 || s->vex_l != 0) {
3681 goto illegal_op;
3683 ot = mo_64_32(s->dflag);
3684 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3685 tcg_gen_ext8u_tl(cpu_T1, cpu_regs[s->vex_v]);
3687 TCGv bound = tcg_const_tl(ot == MO_64 ? 63 : 31);
3688 /* Note that since we're using BMILG (in order to get O
3689 cleared) we need to store the inverse into C. */
3690 tcg_gen_setcond_tl(TCG_COND_LT, cpu_cc_src,
3691 cpu_T1, bound);
3692 tcg_gen_movcond_tl(TCG_COND_GT, cpu_T1, cpu_T1,
3693 bound, bound, cpu_T1);
3694 tcg_temp_free(bound);
3696 tcg_gen_movi_tl(cpu_A0, -1);
3697 tcg_gen_shl_tl(cpu_A0, cpu_A0, cpu_T1);
3698 tcg_gen_andc_tl(cpu_T0, cpu_T0, cpu_A0);
3699 gen_op_mov_reg_v(ot, reg, cpu_T0);
3700 gen_op_update1_cc();
3701 set_cc_op(s, CC_OP_BMILGB + ot);
3702 break;
3704 case 0x3f6: /* mulx By, Gy, rdx, Ey */
3705 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3706 || !(s->prefix & PREFIX_VEX)
3707 || s->vex_l != 0) {
3708 goto illegal_op;
3710 ot = mo_64_32(s->dflag);
3711 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3712 switch (ot) {
3713 default:
3714 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
3715 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EDX]);
3716 tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
3717 cpu_tmp2_i32, cpu_tmp3_i32);
3718 tcg_gen_extu_i32_tl(cpu_regs[s->vex_v], cpu_tmp2_i32);
3719 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp3_i32);
3720 break;
3721 #ifdef TARGET_X86_64
3722 case MO_64:
3723 tcg_gen_mulu2_i64(cpu_T0, cpu_T1,
3724 cpu_T0, cpu_regs[R_EDX]);
3725 tcg_gen_mov_i64(cpu_regs[s->vex_v], cpu_T0);
3726 tcg_gen_mov_i64(cpu_regs[reg], cpu_T1);
3727 break;
3728 #endif
3730 break;
3732 case 0x3f5: /* pdep Gy, By, Ey */
3733 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3734 || !(s->prefix & PREFIX_VEX)
3735 || s->vex_l != 0) {
3736 goto illegal_op;
3738 ot = mo_64_32(s->dflag);
3739 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3740 /* Note that by zero-extending the mask operand, we
3741 automatically handle zero-extending the result. */
3742 if (ot == MO_64) {
3743 tcg_gen_mov_tl(cpu_T1, cpu_regs[s->vex_v]);
3744 } else {
3745 tcg_gen_ext32u_tl(cpu_T1, cpu_regs[s->vex_v]);
3747 gen_helper_pdep(cpu_regs[reg], cpu_T0, cpu_T1);
3748 break;
3750 case 0x2f5: /* pext Gy, By, Ey */
3751 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3752 || !(s->prefix & PREFIX_VEX)
3753 || s->vex_l != 0) {
3754 goto illegal_op;
3756 ot = mo_64_32(s->dflag);
3757 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3758 /* Note that by zero-extending the mask operand, we
3759 automatically handle zero-extending the result. */
3760 if (ot == MO_64) {
3761 tcg_gen_mov_tl(cpu_T1, cpu_regs[s->vex_v]);
3762 } else {
3763 tcg_gen_ext32u_tl(cpu_T1, cpu_regs[s->vex_v]);
3765 gen_helper_pext(cpu_regs[reg], cpu_T0, cpu_T1);
3766 break;
3768 case 0x1f6: /* adcx Gy, Ey */
3769 case 0x2f6: /* adox Gy, Ey */
3770 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_ADX)) {
3771 goto illegal_op;
3772 } else {
3773 TCGv carry_in, carry_out, zero;
3774 int end_op;
3776 ot = mo_64_32(s->dflag);
3777 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3779 /* Re-use the carry-out from a previous round. */
3780 TCGV_UNUSED(carry_in);
3781 carry_out = (b == 0x1f6 ? cpu_cc_dst : cpu_cc_src2);
3782 switch (s->cc_op) {
3783 case CC_OP_ADCX:
3784 if (b == 0x1f6) {
3785 carry_in = cpu_cc_dst;
3786 end_op = CC_OP_ADCX;
3787 } else {
3788 end_op = CC_OP_ADCOX;
3790 break;
3791 case CC_OP_ADOX:
3792 if (b == 0x1f6) {
3793 end_op = CC_OP_ADCOX;
3794 } else {
3795 carry_in = cpu_cc_src2;
3796 end_op = CC_OP_ADOX;
3798 break;
3799 case CC_OP_ADCOX:
3800 end_op = CC_OP_ADCOX;
3801 carry_in = carry_out;
3802 break;
3803 default:
3804 end_op = (b == 0x1f6 ? CC_OP_ADCX : CC_OP_ADOX);
3805 break;
3807 /* If we can't reuse carry-out, get it out of EFLAGS. */
3808 if (TCGV_IS_UNUSED(carry_in)) {
3809 if (s->cc_op != CC_OP_ADCX && s->cc_op != CC_OP_ADOX) {
3810 gen_compute_eflags(s);
3812 carry_in = cpu_tmp0;
3813 tcg_gen_shri_tl(carry_in, cpu_cc_src,
3814 ctz32(b == 0x1f6 ? CC_C : CC_O));
3815 tcg_gen_andi_tl(carry_in, carry_in, 1);
3818 switch (ot) {
3819 #ifdef TARGET_X86_64
3820 case MO_32:
3821 /* If we know TL is 64-bit, and we want a 32-bit
3822 result, just do everything in 64-bit arithmetic. */
3823 tcg_gen_ext32u_i64(cpu_regs[reg], cpu_regs[reg]);
3824 tcg_gen_ext32u_i64(cpu_T0, cpu_T0);
3825 tcg_gen_add_i64(cpu_T0, cpu_T0, cpu_regs[reg]);
3826 tcg_gen_add_i64(cpu_T0, cpu_T0, carry_in);
3827 tcg_gen_ext32u_i64(cpu_regs[reg], cpu_T0);
3828 tcg_gen_shri_i64(carry_out, cpu_T0, 32);
3829 break;
3830 #endif
3831 default:
3832 /* Otherwise compute the carry-out in two steps. */
3833 zero = tcg_const_tl(0);
3834 tcg_gen_add2_tl(cpu_T0, carry_out,
3835 cpu_T0, zero,
3836 carry_in, zero);
3837 tcg_gen_add2_tl(cpu_regs[reg], carry_out,
3838 cpu_regs[reg], carry_out,
3839 cpu_T0, zero);
3840 tcg_temp_free(zero);
3841 break;
3843 set_cc_op(s, end_op);
3845 break;
3847 case 0x1f7: /* shlx Gy, Ey, By */
3848 case 0x2f7: /* sarx Gy, Ey, By */
3849 case 0x3f7: /* shrx Gy, Ey, By */
3850 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3851 || !(s->prefix & PREFIX_VEX)
3852 || s->vex_l != 0) {
3853 goto illegal_op;
3855 ot = mo_64_32(s->dflag);
3856 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3857 if (ot == MO_64) {
3858 tcg_gen_andi_tl(cpu_T1, cpu_regs[s->vex_v], 63);
3859 } else {
3860 tcg_gen_andi_tl(cpu_T1, cpu_regs[s->vex_v], 31);
3862 if (b == 0x1f7) {
3863 tcg_gen_shl_tl(cpu_T0, cpu_T0, cpu_T1);
3864 } else if (b == 0x2f7) {
3865 if (ot != MO_64) {
3866 tcg_gen_ext32s_tl(cpu_T0, cpu_T0);
3868 tcg_gen_sar_tl(cpu_T0, cpu_T0, cpu_T1);
3869 } else {
3870 if (ot != MO_64) {
3871 tcg_gen_ext32u_tl(cpu_T0, cpu_T0);
3873 tcg_gen_shr_tl(cpu_T0, cpu_T0, cpu_T1);
3875 gen_op_mov_reg_v(ot, reg, cpu_T0);
3876 break;
3878 case 0x0f3:
3879 case 0x1f3:
3880 case 0x2f3:
3881 case 0x3f3: /* Group 17 */
3882 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
3883 || !(s->prefix & PREFIX_VEX)
3884 || s->vex_l != 0) {
3885 goto illegal_op;
3887 ot = mo_64_32(s->dflag);
3888 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3890 switch (reg & 7) {
3891 case 1: /* blsr By,Ey */
3892 tcg_gen_neg_tl(cpu_T1, cpu_T0);
3893 tcg_gen_and_tl(cpu_T0, cpu_T0, cpu_T1);
3894 gen_op_mov_reg_v(ot, s->vex_v, cpu_T0);
3895 gen_op_update2_cc();
3896 set_cc_op(s, CC_OP_BMILGB + ot);
3897 break;
3899 case 2: /* blsmsk By,Ey */
3900 tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
3901 tcg_gen_subi_tl(cpu_T0, cpu_T0, 1);
3902 tcg_gen_xor_tl(cpu_T0, cpu_T0, cpu_cc_src);
3903 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
3904 set_cc_op(s, CC_OP_BMILGB + ot);
3905 break;
3907 case 3: /* blsi By, Ey */
3908 tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
3909 tcg_gen_subi_tl(cpu_T0, cpu_T0, 1);
3910 tcg_gen_and_tl(cpu_T0, cpu_T0, cpu_cc_src);
3911 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
3912 set_cc_op(s, CC_OP_BMILGB + ot);
3913 break;
3915 default:
3916 goto illegal_op;
3918 break;
3920 default:
3921 goto illegal_op;
3923 break;
3925 case 0x03a:
3926 case 0x13a:
3927 b = modrm;
3928 modrm = cpu_ldub_code(env, s->pc++);
3929 rm = modrm & 7;
3930 reg = ((modrm >> 3) & 7) | rex_r;
3931 mod = (modrm >> 6) & 3;
3932 if (b1 >= 2) {
3933 goto illegal_op;
3936 sse_fn_eppi = sse_op_table7[b].op[b1];
3937 if (!sse_fn_eppi) {
3938 goto illegal_op;
3940 if (!(s->cpuid_ext_features & sse_op_table7[b].ext_mask))
3941 goto illegal_op;
3943 if (sse_fn_eppi == SSE_SPECIAL) {
3944 ot = mo_64_32(s->dflag);
3945 rm = (modrm & 7) | REX_B(s);
3946 if (mod != 3)
3947 gen_lea_modrm(env, s, modrm);
3948 reg = ((modrm >> 3) & 7) | rex_r;
3949 val = cpu_ldub_code(env, s->pc++);
3950 switch (b) {
3951 case 0x14: /* pextrb */
3952 tcg_gen_ld8u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
3953 xmm_regs[reg].ZMM_B(val & 15)));
3954 if (mod == 3) {
3955 gen_op_mov_reg_v(ot, rm, cpu_T0);
3956 } else {
3957 tcg_gen_qemu_st_tl(cpu_T0, cpu_A0,
3958 s->mem_index, MO_UB);
3960 break;
3961 case 0x15: /* pextrw */
3962 tcg_gen_ld16u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
3963 xmm_regs[reg].ZMM_W(val & 7)));
3964 if (mod == 3) {
3965 gen_op_mov_reg_v(ot, rm, cpu_T0);
3966 } else {
3967 tcg_gen_qemu_st_tl(cpu_T0, cpu_A0,
3968 s->mem_index, MO_LEUW);
3970 break;
3971 case 0x16:
3972 if (ot == MO_32) { /* pextrd */
3973 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
3974 offsetof(CPUX86State,
3975 xmm_regs[reg].ZMM_L(val & 3)));
3976 if (mod == 3) {
3977 tcg_gen_extu_i32_tl(cpu_regs[rm], cpu_tmp2_i32);
3978 } else {
3979 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
3980 s->mem_index, MO_LEUL);
3982 } else { /* pextrq */
3983 #ifdef TARGET_X86_64
3984 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
3985 offsetof(CPUX86State,
3986 xmm_regs[reg].ZMM_Q(val & 1)));
3987 if (mod == 3) {
3988 tcg_gen_mov_i64(cpu_regs[rm], cpu_tmp1_i64);
3989 } else {
3990 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
3991 s->mem_index, MO_LEQ);
3993 #else
3994 goto illegal_op;
3995 #endif
3997 break;
3998 case 0x17: /* extractps */
3999 tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
4000 xmm_regs[reg].ZMM_L(val & 3)));
4001 if (mod == 3) {
4002 gen_op_mov_reg_v(ot, rm, cpu_T0);
4003 } else {
4004 tcg_gen_qemu_st_tl(cpu_T0, cpu_A0,
4005 s->mem_index, MO_LEUL);
4007 break;
4008 case 0x20: /* pinsrb */
4009 if (mod == 3) {
4010 gen_op_mov_v_reg(MO_32, cpu_T0, rm);
4011 } else {
4012 tcg_gen_qemu_ld_tl(cpu_T0, cpu_A0,
4013 s->mem_index, MO_UB);
4015 tcg_gen_st8_tl(cpu_T0, cpu_env, offsetof(CPUX86State,
4016 xmm_regs[reg].ZMM_B(val & 15)));
4017 break;
4018 case 0x21: /* insertps */
4019 if (mod == 3) {
4020 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
4021 offsetof(CPUX86State,xmm_regs[rm]
4022 .ZMM_L((val >> 6) & 3)));
4023 } else {
4024 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
4025 s->mem_index, MO_LEUL);
4027 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
4028 offsetof(CPUX86State,xmm_regs[reg]
4029 .ZMM_L((val >> 4) & 3)));
4030 if ((val >> 0) & 1)
4031 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4032 cpu_env, offsetof(CPUX86State,
4033 xmm_regs[reg].ZMM_L(0)));
4034 if ((val >> 1) & 1)
4035 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4036 cpu_env, offsetof(CPUX86State,
4037 xmm_regs[reg].ZMM_L(1)));
4038 if ((val >> 2) & 1)
4039 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4040 cpu_env, offsetof(CPUX86State,
4041 xmm_regs[reg].ZMM_L(2)));
4042 if ((val >> 3) & 1)
4043 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4044 cpu_env, offsetof(CPUX86State,
4045 xmm_regs[reg].ZMM_L(3)));
4046 break;
4047 case 0x22:
4048 if (ot == MO_32) { /* pinsrd */
4049 if (mod == 3) {
4050 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[rm]);
4051 } else {
4052 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
4053 s->mem_index, MO_LEUL);
4055 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
4056 offsetof(CPUX86State,
4057 xmm_regs[reg].ZMM_L(val & 3)));
4058 } else { /* pinsrq */
4059 #ifdef TARGET_X86_64
4060 if (mod == 3) {
4061 gen_op_mov_v_reg(ot, cpu_tmp1_i64, rm);
4062 } else {
4063 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
4064 s->mem_index, MO_LEQ);
4066 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
4067 offsetof(CPUX86State,
4068 xmm_regs[reg].ZMM_Q(val & 1)));
4069 #else
4070 goto illegal_op;
4071 #endif
4073 break;
4075 return;
4078 if (b1) {
4079 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
4080 if (mod == 3) {
4081 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
4082 } else {
4083 op2_offset = offsetof(CPUX86State,xmm_t0);
4084 gen_lea_modrm(env, s, modrm);
4085 gen_ldo_env_A0(s, op2_offset);
4087 } else {
4088 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4089 if (mod == 3) {
4090 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4091 } else {
4092 op2_offset = offsetof(CPUX86State,mmx_t0);
4093 gen_lea_modrm(env, s, modrm);
4094 gen_ldq_env_A0(s, op2_offset);
4097 val = cpu_ldub_code(env, s->pc++);
4099 if ((b & 0xfc) == 0x60) { /* pcmpXstrX */
4100 set_cc_op(s, CC_OP_EFLAGS);
4102 if (s->dflag == MO_64) {
4103 /* The helper must use entire 64-bit gp registers */
4104 val |= 1 << 8;
4108 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4109 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4110 sse_fn_eppi(cpu_env, cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
4111 break;
4113 case 0x33a:
4114 /* Various integer extensions at 0f 3a f[0-f]. */
4115 b = modrm | (b1 << 8);
4116 modrm = cpu_ldub_code(env, s->pc++);
4117 reg = ((modrm >> 3) & 7) | rex_r;
4119 switch (b) {
4120 case 0x3f0: /* rorx Gy,Ey, Ib */
4121 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
4122 || !(s->prefix & PREFIX_VEX)
4123 || s->vex_l != 0) {
4124 goto illegal_op;
4126 ot = mo_64_32(s->dflag);
4127 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4128 b = cpu_ldub_code(env, s->pc++);
4129 if (ot == MO_64) {
4130 tcg_gen_rotri_tl(cpu_T0, cpu_T0, b & 63);
4131 } else {
4132 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
4133 tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, b & 31);
4134 tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);
4136 gen_op_mov_reg_v(ot, reg, cpu_T0);
4137 break;
4139 default:
4140 goto illegal_op;
4142 break;
4144 default:
4145 goto illegal_op;
4147 } else {
4148 /* generic MMX or SSE operation */
4149 switch(b) {
4150 case 0x70: /* pshufx insn */
4151 case 0xc6: /* pshufx insn */
4152 case 0xc2: /* compare insns */
4153 s->rip_offset = 1;
4154 break;
4155 default:
4156 break;
4158 if (is_xmm) {
4159 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
4160 if (mod != 3) {
4161 int sz = 4;
4163 gen_lea_modrm(env, s, modrm);
4164 op2_offset = offsetof(CPUX86State,xmm_t0);
4166 switch (b) {
4167 case 0x50 ... 0x5a:
4168 case 0x5c ... 0x5f:
4169 case 0xc2:
4170 /* Most sse scalar operations. */
4171 if (b1 == 2) {
4172 sz = 2;
4173 } else if (b1 == 3) {
4174 sz = 3;
4176 break;
4178 case 0x2e: /* ucomis[sd] */
4179 case 0x2f: /* comis[sd] */
4180 if (b1 == 0) {
4181 sz = 2;
4182 } else {
4183 sz = 3;
4185 break;
4188 switch (sz) {
4189 case 2:
4190 /* 32 bit access */
4191 gen_op_ld_v(s, MO_32, cpu_T0, cpu_A0);
4192 tcg_gen_st32_tl(cpu_T0, cpu_env,
4193 offsetof(CPUX86State,xmm_t0.ZMM_L(0)));
4194 break;
4195 case 3:
4196 /* 64 bit access */
4197 gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.ZMM_D(0)));
4198 break;
4199 default:
4200 /* 128 bit access */
4201 gen_ldo_env_A0(s, op2_offset);
4202 break;
4204 } else {
4205 rm = (modrm & 7) | REX_B(s);
4206 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
4208 } else {
4209 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4210 if (mod != 3) {
4211 gen_lea_modrm(env, s, modrm);
4212 op2_offset = offsetof(CPUX86State,mmx_t0);
4213 gen_ldq_env_A0(s, op2_offset);
4214 } else {
4215 rm = (modrm & 7);
4216 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4219 switch(b) {
4220 case 0x0f: /* 3DNow! data insns */
4221 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
4222 goto illegal_op;
4223 val = cpu_ldub_code(env, s->pc++);
4224 sse_fn_epp = sse_op_table5[val];
4225 if (!sse_fn_epp) {
4226 goto illegal_op;
4228 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4229 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4230 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
4231 break;
4232 case 0x70: /* pshufx insn */
4233 case 0xc6: /* pshufx insn */
4234 val = cpu_ldub_code(env, s->pc++);
4235 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4236 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4237 /* XXX: introduce a new table? */
4238 sse_fn_ppi = (SSEFunc_0_ppi)sse_fn_epp;
4239 sse_fn_ppi(cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
4240 break;
4241 case 0xc2:
4242 /* compare insns */
4243 val = cpu_ldub_code(env, s->pc++);
4244 if (val >= 8)
4245 goto illegal_op;
4246 sse_fn_epp = sse_op_table4[val][b1];
4248 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4249 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4250 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
4251 break;
4252 case 0xf7:
4253 /* maskmov : we must prepare A0 */
4254 if (mod != 3)
4255 goto illegal_op;
4256 tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EDI]);
4257 gen_extu(s->aflag, cpu_A0);
4258 gen_add_A0_ds_seg(s);
4260 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4261 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4262 /* XXX: introduce a new table? */
4263 sse_fn_eppt = (SSEFunc_0_eppt)sse_fn_epp;
4264 sse_fn_eppt(cpu_env, cpu_ptr0, cpu_ptr1, cpu_A0);
4265 break;
4266 default:
4267 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4268 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4269 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
4270 break;
4272 if (b == 0x2e || b == 0x2f) {
4273 set_cc_op(s, CC_OP_EFLAGS);
4278 /* convert one instruction. s->is_jmp is set if the translation must
4279 be stopped. Return the next pc value */
4280 static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
4281 target_ulong pc_start)
4283 int b, prefixes;
4284 int shift;
4285 TCGMemOp ot, aflag, dflag;
4286 int modrm, reg, rm, mod, op, opreg, val;
4287 target_ulong next_eip, tval;
4288 int rex_w, rex_r;
4290 s->pc = pc_start;
4291 prefixes = 0;
4292 s->override = -1;
4293 rex_w = -1;
4294 rex_r = 0;
4295 #ifdef TARGET_X86_64
4296 s->rex_x = 0;
4297 s->rex_b = 0;
4298 x86_64_hregs = 0;
4299 #endif
4300 s->rip_offset = 0; /* for relative ip address */
4301 s->vex_l = 0;
4302 s->vex_v = 0;
4303 next_byte:
4304 b = cpu_ldub_code(env, s->pc);
4305 s->pc++;
4306 /* Collect prefixes. */
4307 switch (b) {
4308 case 0xf3:
4309 prefixes |= PREFIX_REPZ;
4310 goto next_byte;
4311 case 0xf2:
4312 prefixes |= PREFIX_REPNZ;
4313 goto next_byte;
4314 case 0xf0:
4315 prefixes |= PREFIX_LOCK;
4316 goto next_byte;
4317 case 0x2e:
4318 s->override = R_CS;
4319 goto next_byte;
4320 case 0x36:
4321 s->override = R_SS;
4322 goto next_byte;
4323 case 0x3e:
4324 s->override = R_DS;
4325 goto next_byte;
4326 case 0x26:
4327 s->override = R_ES;
4328 goto next_byte;
4329 case 0x64:
4330 s->override = R_FS;
4331 goto next_byte;
4332 case 0x65:
4333 s->override = R_GS;
4334 goto next_byte;
4335 case 0x66:
4336 prefixes |= PREFIX_DATA;
4337 goto next_byte;
4338 case 0x67:
4339 prefixes |= PREFIX_ADR;
4340 goto next_byte;
4341 #ifdef TARGET_X86_64
4342 case 0x40 ... 0x4f:
4343 if (CODE64(s)) {
4344 /* REX prefix */
4345 rex_w = (b >> 3) & 1;
4346 rex_r = (b & 0x4) << 1;
4347 s->rex_x = (b & 0x2) << 2;
4348 REX_B(s) = (b & 0x1) << 3;
4349 x86_64_hregs = 1; /* select uniform byte register addressing */
4350 goto next_byte;
4352 break;
4353 #endif
4354 case 0xc5: /* 2-byte VEX */
4355 case 0xc4: /* 3-byte VEX */
4356 /* VEX prefixes cannot be used except in 32-bit mode.
4357 Otherwise the instruction is LES or LDS. */
4358 if (s->code32 && !s->vm86) {
4359 static const int pp_prefix[4] = {
4360 0, PREFIX_DATA, PREFIX_REPZ, PREFIX_REPNZ
4362 int vex3, vex2 = cpu_ldub_code(env, s->pc);
4364 if (!CODE64(s) && (vex2 & 0xc0) != 0xc0) {
4365 /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
4366 otherwise the instruction is LES or LDS. */
4367 break;
4369 s->pc++;
4371 /* 4.1.1-4.1.3: No preceding lock, 66, f2, f3, or rex prefixes. */
4372 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ
4373 | PREFIX_LOCK | PREFIX_DATA)) {
4374 goto illegal_op;
4376 #ifdef TARGET_X86_64
4377 if (x86_64_hregs) {
4378 goto illegal_op;
4380 #endif
4381 rex_r = (~vex2 >> 4) & 8;
4382 if (b == 0xc5) {
4383 vex3 = vex2;
4384 b = cpu_ldub_code(env, s->pc++);
4385 } else {
4386 #ifdef TARGET_X86_64
4387 s->rex_x = (~vex2 >> 3) & 8;
4388 s->rex_b = (~vex2 >> 2) & 8;
4389 #endif
4390 vex3 = cpu_ldub_code(env, s->pc++);
4391 rex_w = (vex3 >> 7) & 1;
4392 switch (vex2 & 0x1f) {
4393 case 0x01: /* Implied 0f leading opcode bytes. */
4394 b = cpu_ldub_code(env, s->pc++) | 0x100;
4395 break;
4396 case 0x02: /* Implied 0f 38 leading opcode bytes. */
4397 b = 0x138;
4398 break;
4399 case 0x03: /* Implied 0f 3a leading opcode bytes. */
4400 b = 0x13a;
4401 break;
4402 default: /* Reserved for future use. */
4403 goto illegal_op;
4406 s->vex_v = (~vex3 >> 3) & 0xf;
4407 s->vex_l = (vex3 >> 2) & 1;
4408 prefixes |= pp_prefix[vex3 & 3] | PREFIX_VEX;
4410 break;
4413 /* Post-process prefixes. */
4414 if (CODE64(s)) {
4415 /* In 64-bit mode, the default data size is 32-bit. Select 64-bit
4416 data with rex_w, and 16-bit data with 0x66; rex_w takes precedence
4417 over 0x66 if both are present. */
4418 dflag = (rex_w > 0 ? MO_64 : prefixes & PREFIX_DATA ? MO_16 : MO_32);
4419 /* In 64-bit mode, 0x67 selects 32-bit addressing. */
4420 aflag = (prefixes & PREFIX_ADR ? MO_32 : MO_64);
4421 } else {
4422 /* In 16/32-bit mode, 0x66 selects the opposite data size. */
4423 if (s->code32 ^ ((prefixes & PREFIX_DATA) != 0)) {
4424 dflag = MO_32;
4425 } else {
4426 dflag = MO_16;
4428 /* In 16/32-bit mode, 0x67 selects the opposite addressing. */
4429 if (s->code32 ^ ((prefixes & PREFIX_ADR) != 0)) {
4430 aflag = MO_32;
4431 } else {
4432 aflag = MO_16;
4436 s->prefix = prefixes;
4437 s->aflag = aflag;
4438 s->dflag = dflag;
4440 /* lock generation */
4441 if (prefixes & PREFIX_LOCK)
4442 gen_helper_lock();
4444 /* now check op code */
4445 reswitch:
4446 switch(b) {
4447 case 0x0f:
4448 /**************************/
4449 /* extended op code */
4450 b = cpu_ldub_code(env, s->pc++) | 0x100;
4451 goto reswitch;
4453 /**************************/
4454 /* arith & logic */
4455 case 0x00 ... 0x05:
4456 case 0x08 ... 0x0d:
4457 case 0x10 ... 0x15:
4458 case 0x18 ... 0x1d:
4459 case 0x20 ... 0x25:
4460 case 0x28 ... 0x2d:
4461 case 0x30 ... 0x35:
4462 case 0x38 ... 0x3d:
4464 int op, f, val;
4465 op = (b >> 3) & 7;
4466 f = (b >> 1) & 3;
4468 ot = mo_b_d(b, dflag);
4470 switch(f) {
4471 case 0: /* OP Ev, Gv */
4472 modrm = cpu_ldub_code(env, s->pc++);
4473 reg = ((modrm >> 3) & 7) | rex_r;
4474 mod = (modrm >> 6) & 3;
4475 rm = (modrm & 7) | REX_B(s);
4476 if (mod != 3) {
4477 gen_lea_modrm(env, s, modrm);
4478 opreg = OR_TMP0;
4479 } else if (op == OP_XORL && rm == reg) {
4480 xor_zero:
4481 /* xor reg, reg optimisation */
4482 set_cc_op(s, CC_OP_CLR);
4483 tcg_gen_movi_tl(cpu_T0, 0);
4484 gen_op_mov_reg_v(ot, reg, cpu_T0);
4485 break;
4486 } else {
4487 opreg = rm;
4489 gen_op_mov_v_reg(ot, cpu_T1, reg);
4490 gen_op(s, op, ot, opreg);
4491 break;
4492 case 1: /* OP Gv, Ev */
4493 modrm = cpu_ldub_code(env, s->pc++);
4494 mod = (modrm >> 6) & 3;
4495 reg = ((modrm >> 3) & 7) | rex_r;
4496 rm = (modrm & 7) | REX_B(s);
4497 if (mod != 3) {
4498 gen_lea_modrm(env, s, modrm);
4499 gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
4500 } else if (op == OP_XORL && rm == reg) {
4501 goto xor_zero;
4502 } else {
4503 gen_op_mov_v_reg(ot, cpu_T1, rm);
4505 gen_op(s, op, ot, reg);
4506 break;
4507 case 2: /* OP A, Iv */
4508 val = insn_get(env, s, ot);
4509 tcg_gen_movi_tl(cpu_T1, val);
4510 gen_op(s, op, ot, OR_EAX);
4511 break;
4514 break;
4516 case 0x82:
4517 if (CODE64(s))
4518 goto illegal_op;
4519 case 0x80: /* GRP1 */
4520 case 0x81:
4521 case 0x83:
4523 int val;
4525 ot = mo_b_d(b, dflag);
4527 modrm = cpu_ldub_code(env, s->pc++);
4528 mod = (modrm >> 6) & 3;
4529 rm = (modrm & 7) | REX_B(s);
4530 op = (modrm >> 3) & 7;
4532 if (mod != 3) {
4533 if (b == 0x83)
4534 s->rip_offset = 1;
4535 else
4536 s->rip_offset = insn_const_size(ot);
4537 gen_lea_modrm(env, s, modrm);
4538 opreg = OR_TMP0;
4539 } else {
4540 opreg = rm;
4543 switch(b) {
4544 default:
4545 case 0x80:
4546 case 0x81:
4547 case 0x82:
4548 val = insn_get(env, s, ot);
4549 break;
4550 case 0x83:
4551 val = (int8_t)insn_get(env, s, MO_8);
4552 break;
4554 tcg_gen_movi_tl(cpu_T1, val);
4555 gen_op(s, op, ot, opreg);
4557 break;
4559 /**************************/
4560 /* inc, dec, and other misc arith */
4561 case 0x40 ... 0x47: /* inc Gv */
4562 ot = dflag;
4563 gen_inc(s, ot, OR_EAX + (b & 7), 1);
4564 break;
4565 case 0x48 ... 0x4f: /* dec Gv */
4566 ot = dflag;
4567 gen_inc(s, ot, OR_EAX + (b & 7), -1);
4568 break;
4569 case 0xf6: /* GRP3 */
4570 case 0xf7:
4571 ot = mo_b_d(b, dflag);
4573 modrm = cpu_ldub_code(env, s->pc++);
4574 mod = (modrm >> 6) & 3;
4575 rm = (modrm & 7) | REX_B(s);
4576 op = (modrm >> 3) & 7;
4577 if (mod != 3) {
4578 if (op == 0)
4579 s->rip_offset = insn_const_size(ot);
4580 gen_lea_modrm(env, s, modrm);
4581 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
4582 } else {
4583 gen_op_mov_v_reg(ot, cpu_T0, rm);
4586 switch(op) {
4587 case 0: /* test */
4588 val = insn_get(env, s, ot);
4589 tcg_gen_movi_tl(cpu_T1, val);
4590 gen_op_testl_T0_T1_cc();
4591 set_cc_op(s, CC_OP_LOGICB + ot);
4592 break;
4593 case 2: /* not */
4594 tcg_gen_not_tl(cpu_T0, cpu_T0);
4595 if (mod != 3) {
4596 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
4597 } else {
4598 gen_op_mov_reg_v(ot, rm, cpu_T0);
4600 break;
4601 case 3: /* neg */
4602 tcg_gen_neg_tl(cpu_T0, cpu_T0);
4603 if (mod != 3) {
4604 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
4605 } else {
4606 gen_op_mov_reg_v(ot, rm, cpu_T0);
4608 gen_op_update_neg_cc();
4609 set_cc_op(s, CC_OP_SUBB + ot);
4610 break;
4611 case 4: /* mul */
4612 switch(ot) {
4613 case MO_8:
4614 gen_op_mov_v_reg(MO_8, cpu_T1, R_EAX);
4615 tcg_gen_ext8u_tl(cpu_T0, cpu_T0);
4616 tcg_gen_ext8u_tl(cpu_T1, cpu_T1);
4617 /* XXX: use 32 bit mul which could be faster */
4618 tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
4619 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
4620 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
4621 tcg_gen_andi_tl(cpu_cc_src, cpu_T0, 0xff00);
4622 set_cc_op(s, CC_OP_MULB);
4623 break;
4624 case MO_16:
4625 gen_op_mov_v_reg(MO_16, cpu_T1, R_EAX);
4626 tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
4627 tcg_gen_ext16u_tl(cpu_T1, cpu_T1);
4628 /* XXX: use 32 bit mul which could be faster */
4629 tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
4630 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
4631 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
4632 tcg_gen_shri_tl(cpu_T0, cpu_T0, 16);
4633 gen_op_mov_reg_v(MO_16, R_EDX, cpu_T0);
4634 tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
4635 set_cc_op(s, CC_OP_MULW);
4636 break;
4637 default:
4638 case MO_32:
4639 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
4640 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]);
4641 tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
4642 cpu_tmp2_i32, cpu_tmp3_i32);
4643 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32);
4644 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32);
4645 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4646 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
4647 set_cc_op(s, CC_OP_MULL);
4648 break;
4649 #ifdef TARGET_X86_64
4650 case MO_64:
4651 tcg_gen_mulu2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
4652 cpu_T0, cpu_regs[R_EAX]);
4653 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4654 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
4655 set_cc_op(s, CC_OP_MULQ);
4656 break;
4657 #endif
4659 break;
4660 case 5: /* imul */
4661 switch(ot) {
4662 case MO_8:
4663 gen_op_mov_v_reg(MO_8, cpu_T1, R_EAX);
4664 tcg_gen_ext8s_tl(cpu_T0, cpu_T0);
4665 tcg_gen_ext8s_tl(cpu_T1, cpu_T1);
4666 /* XXX: use 32 bit mul which could be faster */
4667 tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
4668 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
4669 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
4670 tcg_gen_ext8s_tl(cpu_tmp0, cpu_T0);
4671 tcg_gen_sub_tl(cpu_cc_src, cpu_T0, cpu_tmp0);
4672 set_cc_op(s, CC_OP_MULB);
4673 break;
4674 case MO_16:
4675 gen_op_mov_v_reg(MO_16, cpu_T1, R_EAX);
4676 tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
4677 tcg_gen_ext16s_tl(cpu_T1, cpu_T1);
4678 /* XXX: use 32 bit mul which could be faster */
4679 tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
4680 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
4681 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
4682 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T0);
4683 tcg_gen_sub_tl(cpu_cc_src, cpu_T0, cpu_tmp0);
4684 tcg_gen_shri_tl(cpu_T0, cpu_T0, 16);
4685 gen_op_mov_reg_v(MO_16, R_EDX, cpu_T0);
4686 set_cc_op(s, CC_OP_MULW);
4687 break;
4688 default:
4689 case MO_32:
4690 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
4691 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]);
4692 tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
4693 cpu_tmp2_i32, cpu_tmp3_i32);
4694 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32);
4695 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32);
4696 tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31);
4697 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4698 tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
4699 tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32);
4700 set_cc_op(s, CC_OP_MULL);
4701 break;
4702 #ifdef TARGET_X86_64
4703 case MO_64:
4704 tcg_gen_muls2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
4705 cpu_T0, cpu_regs[R_EAX]);
4706 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4707 tcg_gen_sari_tl(cpu_cc_src, cpu_regs[R_EAX], 63);
4708 tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_regs[R_EDX]);
4709 set_cc_op(s, CC_OP_MULQ);
4710 break;
4711 #endif
4713 break;
4714 case 6: /* div */
4715 switch(ot) {
4716 case MO_8:
4717 gen_helper_divb_AL(cpu_env, cpu_T0);
4718 break;
4719 case MO_16:
4720 gen_helper_divw_AX(cpu_env, cpu_T0);
4721 break;
4722 default:
4723 case MO_32:
4724 gen_helper_divl_EAX(cpu_env, cpu_T0);
4725 break;
4726 #ifdef TARGET_X86_64
4727 case MO_64:
4728 gen_helper_divq_EAX(cpu_env, cpu_T0);
4729 break;
4730 #endif
4732 break;
4733 case 7: /* idiv */
4734 switch(ot) {
4735 case MO_8:
4736 gen_helper_idivb_AL(cpu_env, cpu_T0);
4737 break;
4738 case MO_16:
4739 gen_helper_idivw_AX(cpu_env, cpu_T0);
4740 break;
4741 default:
4742 case MO_32:
4743 gen_helper_idivl_EAX(cpu_env, cpu_T0);
4744 break;
4745 #ifdef TARGET_X86_64
4746 case MO_64:
4747 gen_helper_idivq_EAX(cpu_env, cpu_T0);
4748 break;
4749 #endif
4751 break;
4752 default:
4753 goto illegal_op;
4755 break;
4757 case 0xfe: /* GRP4 */
4758 case 0xff: /* GRP5 */
4759 ot = mo_b_d(b, dflag);
4761 modrm = cpu_ldub_code(env, s->pc++);
4762 mod = (modrm >> 6) & 3;
4763 rm = (modrm & 7) | REX_B(s);
4764 op = (modrm >> 3) & 7;
4765 if (op >= 2 && b == 0xfe) {
4766 goto illegal_op;
4768 if (CODE64(s)) {
4769 if (op == 2 || op == 4) {
4770 /* operand size for jumps is 64 bit */
4771 ot = MO_64;
4772 } else if (op == 3 || op == 5) {
4773 ot = dflag != MO_16 ? MO_32 + (rex_w == 1) : MO_16;
4774 } else if (op == 6) {
4775 /* default push size is 64 bit */
4776 ot = mo_pushpop(s, dflag);
4779 if (mod != 3) {
4780 gen_lea_modrm(env, s, modrm);
4781 if (op >= 2 && op != 3 && op != 5)
4782 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
4783 } else {
4784 gen_op_mov_v_reg(ot, cpu_T0, rm);
4787 switch(op) {
4788 case 0: /* inc Ev */
4789 if (mod != 3)
4790 opreg = OR_TMP0;
4791 else
4792 opreg = rm;
4793 gen_inc(s, ot, opreg, 1);
4794 break;
4795 case 1: /* dec Ev */
4796 if (mod != 3)
4797 opreg = OR_TMP0;
4798 else
4799 opreg = rm;
4800 gen_inc(s, ot, opreg, -1);
4801 break;
4802 case 2: /* call Ev */
4803 /* XXX: optimize if memory (no 'and' is necessary) */
4804 if (dflag == MO_16) {
4805 tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
4807 next_eip = s->pc - s->cs_base;
4808 tcg_gen_movi_tl(cpu_T1, next_eip);
4809 gen_push_v(s, cpu_T1);
4810 gen_op_jmp_v(cpu_T0);
4811 gen_bnd_jmp(s);
4812 gen_eob(s);
4813 break;
4814 case 3: /* lcall Ev */
4815 gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
4816 gen_add_A0_im(s, 1 << ot);
4817 gen_op_ld_v(s, MO_16, cpu_T0, cpu_A0);
4818 do_lcall:
4819 if (s->pe && !s->vm86) {
4820 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
4821 gen_helper_lcall_protected(cpu_env, cpu_tmp2_i32, cpu_T1,
4822 tcg_const_i32(dflag - 1),
4823 tcg_const_tl(s->pc - s->cs_base));
4824 } else {
4825 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
4826 gen_helper_lcall_real(cpu_env, cpu_tmp2_i32, cpu_T1,
4827 tcg_const_i32(dflag - 1),
4828 tcg_const_i32(s->pc - s->cs_base));
4830 gen_eob(s);
4831 break;
4832 case 4: /* jmp Ev */
4833 if (dflag == MO_16) {
4834 tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
4836 gen_op_jmp_v(cpu_T0);
4837 gen_bnd_jmp(s);
4838 gen_eob(s);
4839 break;
4840 case 5: /* ljmp Ev */
4841 gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
4842 gen_add_A0_im(s, 1 << ot);
4843 gen_op_ld_v(s, MO_16, cpu_T0, cpu_A0);
4844 do_ljmp:
4845 if (s->pe && !s->vm86) {
4846 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
4847 gen_helper_ljmp_protected(cpu_env, cpu_tmp2_i32, cpu_T1,
4848 tcg_const_tl(s->pc - s->cs_base));
4849 } else {
4850 gen_op_movl_seg_T0_vm(R_CS);
4851 gen_op_jmp_v(cpu_T1);
4853 gen_eob(s);
4854 break;
4855 case 6: /* push Ev */
4856 gen_push_v(s, cpu_T0);
4857 break;
4858 default:
4859 goto illegal_op;
4861 break;
4863 case 0x84: /* test Ev, Gv */
4864 case 0x85:
4865 ot = mo_b_d(b, dflag);
4867 modrm = cpu_ldub_code(env, s->pc++);
4868 reg = ((modrm >> 3) & 7) | rex_r;
4870 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4871 gen_op_mov_v_reg(ot, cpu_T1, reg);
4872 gen_op_testl_T0_T1_cc();
4873 set_cc_op(s, CC_OP_LOGICB + ot);
4874 break;
4876 case 0xa8: /* test eAX, Iv */
4877 case 0xa9:
4878 ot = mo_b_d(b, dflag);
4879 val = insn_get(env, s, ot);
4881 gen_op_mov_v_reg(ot, cpu_T0, OR_EAX);
4882 tcg_gen_movi_tl(cpu_T1, val);
4883 gen_op_testl_T0_T1_cc();
4884 set_cc_op(s, CC_OP_LOGICB + ot);
4885 break;
4887 case 0x98: /* CWDE/CBW */
4888 switch (dflag) {
4889 #ifdef TARGET_X86_64
4890 case MO_64:
4891 gen_op_mov_v_reg(MO_32, cpu_T0, R_EAX);
4892 tcg_gen_ext32s_tl(cpu_T0, cpu_T0);
4893 gen_op_mov_reg_v(MO_64, R_EAX, cpu_T0);
4894 break;
4895 #endif
4896 case MO_32:
4897 gen_op_mov_v_reg(MO_16, cpu_T0, R_EAX);
4898 tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
4899 gen_op_mov_reg_v(MO_32, R_EAX, cpu_T0);
4900 break;
4901 case MO_16:
4902 gen_op_mov_v_reg(MO_8, cpu_T0, R_EAX);
4903 tcg_gen_ext8s_tl(cpu_T0, cpu_T0);
4904 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
4905 break;
4906 default:
4907 tcg_abort();
4909 break;
4910 case 0x99: /* CDQ/CWD */
4911 switch (dflag) {
4912 #ifdef TARGET_X86_64
4913 case MO_64:
4914 gen_op_mov_v_reg(MO_64, cpu_T0, R_EAX);
4915 tcg_gen_sari_tl(cpu_T0, cpu_T0, 63);
4916 gen_op_mov_reg_v(MO_64, R_EDX, cpu_T0);
4917 break;
4918 #endif
4919 case MO_32:
4920 gen_op_mov_v_reg(MO_32, cpu_T0, R_EAX);
4921 tcg_gen_ext32s_tl(cpu_T0, cpu_T0);
4922 tcg_gen_sari_tl(cpu_T0, cpu_T0, 31);
4923 gen_op_mov_reg_v(MO_32, R_EDX, cpu_T0);
4924 break;
4925 case MO_16:
4926 gen_op_mov_v_reg(MO_16, cpu_T0, R_EAX);
4927 tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
4928 tcg_gen_sari_tl(cpu_T0, cpu_T0, 15);
4929 gen_op_mov_reg_v(MO_16, R_EDX, cpu_T0);
4930 break;
4931 default:
4932 tcg_abort();
4934 break;
4935 case 0x1af: /* imul Gv, Ev */
4936 case 0x69: /* imul Gv, Ev, I */
4937 case 0x6b:
4938 ot = dflag;
4939 modrm = cpu_ldub_code(env, s->pc++);
4940 reg = ((modrm >> 3) & 7) | rex_r;
4941 if (b == 0x69)
4942 s->rip_offset = insn_const_size(ot);
4943 else if (b == 0x6b)
4944 s->rip_offset = 1;
4945 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4946 if (b == 0x69) {
4947 val = insn_get(env, s, ot);
4948 tcg_gen_movi_tl(cpu_T1, val);
4949 } else if (b == 0x6b) {
4950 val = (int8_t)insn_get(env, s, MO_8);
4951 tcg_gen_movi_tl(cpu_T1, val);
4952 } else {
4953 gen_op_mov_v_reg(ot, cpu_T1, reg);
4955 switch (ot) {
4956 #ifdef TARGET_X86_64
4957 case MO_64:
4958 tcg_gen_muls2_i64(cpu_regs[reg], cpu_T1, cpu_T0, cpu_T1);
4959 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
4960 tcg_gen_sari_tl(cpu_cc_src, cpu_cc_dst, 63);
4961 tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_T1);
4962 break;
4963 #endif
4964 case MO_32:
4965 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
4966 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1);
4967 tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
4968 cpu_tmp2_i32, cpu_tmp3_i32);
4969 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
4970 tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31);
4971 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
4972 tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
4973 tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32);
4974 break;
4975 default:
4976 tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
4977 tcg_gen_ext16s_tl(cpu_T1, cpu_T1);
4978 /* XXX: use 32 bit mul which could be faster */
4979 tcg_gen_mul_tl(cpu_T0, cpu_T0, cpu_T1);
4980 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
4981 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T0);
4982 tcg_gen_sub_tl(cpu_cc_src, cpu_T0, cpu_tmp0);
4983 gen_op_mov_reg_v(ot, reg, cpu_T0);
4984 break;
4986 set_cc_op(s, CC_OP_MULB + ot);
4987 break;
4988 case 0x1c0:
4989 case 0x1c1: /* xadd Ev, Gv */
4990 ot = mo_b_d(b, dflag);
4991 modrm = cpu_ldub_code(env, s->pc++);
4992 reg = ((modrm >> 3) & 7) | rex_r;
4993 mod = (modrm >> 6) & 3;
4994 if (mod == 3) {
4995 rm = (modrm & 7) | REX_B(s);
4996 gen_op_mov_v_reg(ot, cpu_T0, reg);
4997 gen_op_mov_v_reg(ot, cpu_T1, rm);
4998 tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1);
4999 gen_op_mov_reg_v(ot, reg, cpu_T1);
5000 gen_op_mov_reg_v(ot, rm, cpu_T0);
5001 } else {
5002 gen_lea_modrm(env, s, modrm);
5003 gen_op_mov_v_reg(ot, cpu_T0, reg);
5004 gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
5005 tcg_gen_add_tl(cpu_T0, cpu_T0, cpu_T1);
5006 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
5007 gen_op_mov_reg_v(ot, reg, cpu_T1);
5009 gen_op_update2_cc();
5010 set_cc_op(s, CC_OP_ADDB + ot);
5011 break;
5012 case 0x1b0:
5013 case 0x1b1: /* cmpxchg Ev, Gv */
5015 TCGLabel *label1, *label2;
5016 TCGv t0, t1, t2, a0;
5018 ot = mo_b_d(b, dflag);
5019 modrm = cpu_ldub_code(env, s->pc++);
5020 reg = ((modrm >> 3) & 7) | rex_r;
5021 mod = (modrm >> 6) & 3;
5022 t0 = tcg_temp_local_new();
5023 t1 = tcg_temp_local_new();
5024 t2 = tcg_temp_local_new();
5025 a0 = tcg_temp_local_new();
5026 gen_op_mov_v_reg(ot, t1, reg);
5027 if (mod == 3) {
5028 rm = (modrm & 7) | REX_B(s);
5029 gen_op_mov_v_reg(ot, t0, rm);
5030 } else {
5031 gen_lea_modrm(env, s, modrm);
5032 tcg_gen_mov_tl(a0, cpu_A0);
5033 gen_op_ld_v(s, ot, t0, a0);
5034 rm = 0; /* avoid warning */
5036 label1 = gen_new_label();
5037 tcg_gen_mov_tl(t2, cpu_regs[R_EAX]);
5038 gen_extu(ot, t0);
5039 gen_extu(ot, t2);
5040 tcg_gen_brcond_tl(TCG_COND_EQ, t2, t0, label1);
5041 label2 = gen_new_label();
5042 if (mod == 3) {
5043 gen_op_mov_reg_v(ot, R_EAX, t0);
5044 tcg_gen_br(label2);
5045 gen_set_label(label1);
5046 gen_op_mov_reg_v(ot, rm, t1);
5047 } else {
5048 /* perform no-op store cycle like physical cpu; must be
5049 before changing accumulator to ensure idempotency if
5050 the store faults and the instruction is restarted */
5051 gen_op_st_v(s, ot, t0, a0);
5052 gen_op_mov_reg_v(ot, R_EAX, t0);
5053 tcg_gen_br(label2);
5054 gen_set_label(label1);
5055 gen_op_st_v(s, ot, t1, a0);
5057 gen_set_label(label2);
5058 tcg_gen_mov_tl(cpu_cc_src, t0);
5059 tcg_gen_mov_tl(cpu_cc_srcT, t2);
5060 tcg_gen_sub_tl(cpu_cc_dst, t2, t0);
5061 set_cc_op(s, CC_OP_SUBB + ot);
5062 tcg_temp_free(t0);
5063 tcg_temp_free(t1);
5064 tcg_temp_free(t2);
5065 tcg_temp_free(a0);
5067 break;
5068 case 0x1c7: /* cmpxchg8b */
5069 modrm = cpu_ldub_code(env, s->pc++);
5070 mod = (modrm >> 6) & 3;
5071 if ((mod == 3) || ((modrm & 0x38) != 0x8))
5072 goto illegal_op;
5073 #ifdef TARGET_X86_64
5074 if (dflag == MO_64) {
5075 if (!(s->cpuid_ext_features & CPUID_EXT_CX16))
5076 goto illegal_op;
5077 gen_lea_modrm(env, s, modrm);
5078 gen_helper_cmpxchg16b(cpu_env, cpu_A0);
5079 } else
5080 #endif
5082 if (!(s->cpuid_features & CPUID_CX8))
5083 goto illegal_op;
5084 gen_lea_modrm(env, s, modrm);
5085 gen_helper_cmpxchg8b(cpu_env, cpu_A0);
5087 set_cc_op(s, CC_OP_EFLAGS);
5088 break;
5090 /**************************/
5091 /* push/pop */
5092 case 0x50 ... 0x57: /* push */
5093 gen_op_mov_v_reg(MO_32, cpu_T0, (b & 7) | REX_B(s));
5094 gen_push_v(s, cpu_T0);
5095 break;
5096 case 0x58 ... 0x5f: /* pop */
5097 ot = gen_pop_T0(s);
5098 /* NOTE: order is important for pop %sp */
5099 gen_pop_update(s, ot);
5100 gen_op_mov_reg_v(ot, (b & 7) | REX_B(s), cpu_T0);
5101 break;
5102 case 0x60: /* pusha */
5103 if (CODE64(s))
5104 goto illegal_op;
5105 gen_pusha(s);
5106 break;
5107 case 0x61: /* popa */
5108 if (CODE64(s))
5109 goto illegal_op;
5110 gen_popa(s);
5111 break;
5112 case 0x68: /* push Iv */
5113 case 0x6a:
5114 ot = mo_pushpop(s, dflag);
5115 if (b == 0x68)
5116 val = insn_get(env, s, ot);
5117 else
5118 val = (int8_t)insn_get(env, s, MO_8);
5119 tcg_gen_movi_tl(cpu_T0, val);
5120 gen_push_v(s, cpu_T0);
5121 break;
5122 case 0x8f: /* pop Ev */
5123 modrm = cpu_ldub_code(env, s->pc++);
5124 mod = (modrm >> 6) & 3;
5125 ot = gen_pop_T0(s);
5126 if (mod == 3) {
5127 /* NOTE: order is important for pop %sp */
5128 gen_pop_update(s, ot);
5129 rm = (modrm & 7) | REX_B(s);
5130 gen_op_mov_reg_v(ot, rm, cpu_T0);
5131 } else {
5132 /* NOTE: order is important too for MMU exceptions */
5133 s->popl_esp_hack = 1 << ot;
5134 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
5135 s->popl_esp_hack = 0;
5136 gen_pop_update(s, ot);
5138 break;
5139 case 0xc8: /* enter */
5141 int level;
5142 val = cpu_lduw_code(env, s->pc);
5143 s->pc += 2;
5144 level = cpu_ldub_code(env, s->pc++);
5145 gen_enter(s, val, level);
5147 break;
5148 case 0xc9: /* leave */
5149 gen_leave(s);
5150 break;
5151 case 0x06: /* push es */
5152 case 0x0e: /* push cs */
5153 case 0x16: /* push ss */
5154 case 0x1e: /* push ds */
5155 if (CODE64(s))
5156 goto illegal_op;
5157 gen_op_movl_T0_seg(b >> 3);
5158 gen_push_v(s, cpu_T0);
5159 break;
5160 case 0x1a0: /* push fs */
5161 case 0x1a8: /* push gs */
5162 gen_op_movl_T0_seg((b >> 3) & 7);
5163 gen_push_v(s, cpu_T0);
5164 break;
5165 case 0x07: /* pop es */
5166 case 0x17: /* pop ss */
5167 case 0x1f: /* pop ds */
5168 if (CODE64(s))
5169 goto illegal_op;
5170 reg = b >> 3;
5171 ot = gen_pop_T0(s);
5172 gen_movl_seg_T0(s, reg);
5173 gen_pop_update(s, ot);
5174 if (reg == R_SS) {
5175 /* if reg == SS, inhibit interrupts/trace. */
5176 /* If several instructions disable interrupts, only the
5177 _first_ does it */
5178 gen_set_hflag(s, HF_INHIBIT_IRQ_MASK);
5179 s->tf = 0;
5181 if (s->is_jmp) {
5182 gen_jmp_im(s->pc - s->cs_base);
5183 gen_eob(s);
5185 break;
5186 case 0x1a1: /* pop fs */
5187 case 0x1a9: /* pop gs */
5188 ot = gen_pop_T0(s);
5189 gen_movl_seg_T0(s, (b >> 3) & 7);
5190 gen_pop_update(s, ot);
5191 if (s->is_jmp) {
5192 gen_jmp_im(s->pc - s->cs_base);
5193 gen_eob(s);
5195 break;
5197 /**************************/
5198 /* mov */
5199 case 0x88:
5200 case 0x89: /* mov Gv, Ev */
5201 ot = mo_b_d(b, dflag);
5202 modrm = cpu_ldub_code(env, s->pc++);
5203 reg = ((modrm >> 3) & 7) | rex_r;
5205 /* generate a generic store */
5206 gen_ldst_modrm(env, s, modrm, ot, reg, 1);
5207 break;
5208 case 0xc6:
5209 case 0xc7: /* mov Ev, Iv */
5210 ot = mo_b_d(b, dflag);
5211 modrm = cpu_ldub_code(env, s->pc++);
5212 mod = (modrm >> 6) & 3;
5213 if (mod != 3) {
5214 s->rip_offset = insn_const_size(ot);
5215 gen_lea_modrm(env, s, modrm);
5217 val = insn_get(env, s, ot);
5218 tcg_gen_movi_tl(cpu_T0, val);
5219 if (mod != 3) {
5220 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
5221 } else {
5222 gen_op_mov_reg_v(ot, (modrm & 7) | REX_B(s), cpu_T0);
5224 break;
5225 case 0x8a:
5226 case 0x8b: /* mov Ev, Gv */
5227 ot = mo_b_d(b, dflag);
5228 modrm = cpu_ldub_code(env, s->pc++);
5229 reg = ((modrm >> 3) & 7) | rex_r;
5231 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
5232 gen_op_mov_reg_v(ot, reg, cpu_T0);
5233 break;
5234 case 0x8e: /* mov seg, Gv */
5235 modrm = cpu_ldub_code(env, s->pc++);
5236 reg = (modrm >> 3) & 7;
5237 if (reg >= 6 || reg == R_CS)
5238 goto illegal_op;
5239 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
5240 gen_movl_seg_T0(s, reg);
5241 if (reg == R_SS) {
5242 /* if reg == SS, inhibit interrupts/trace */
5243 /* If several instructions disable interrupts, only the
5244 _first_ does it */
5245 gen_set_hflag(s, HF_INHIBIT_IRQ_MASK);
5246 s->tf = 0;
5248 if (s->is_jmp) {
5249 gen_jmp_im(s->pc - s->cs_base);
5250 gen_eob(s);
5252 break;
5253 case 0x8c: /* mov Gv, seg */
5254 modrm = cpu_ldub_code(env, s->pc++);
5255 reg = (modrm >> 3) & 7;
5256 mod = (modrm >> 6) & 3;
5257 if (reg >= 6)
5258 goto illegal_op;
5259 gen_op_movl_T0_seg(reg);
5260 ot = mod == 3 ? dflag : MO_16;
5261 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
5262 break;
5264 case 0x1b6: /* movzbS Gv, Eb */
5265 case 0x1b7: /* movzwS Gv, Eb */
5266 case 0x1be: /* movsbS Gv, Eb */
5267 case 0x1bf: /* movswS Gv, Eb */
5269 TCGMemOp d_ot;
5270 TCGMemOp s_ot;
5272 /* d_ot is the size of destination */
5273 d_ot = dflag;
5274 /* ot is the size of source */
5275 ot = (b & 1) + MO_8;
5276 /* s_ot is the sign+size of source */
5277 s_ot = b & 8 ? MO_SIGN | ot : ot;
5279 modrm = cpu_ldub_code(env, s->pc++);
5280 reg = ((modrm >> 3) & 7) | rex_r;
5281 mod = (modrm >> 6) & 3;
5282 rm = (modrm & 7) | REX_B(s);
5284 if (mod == 3) {
5285 gen_op_mov_v_reg(ot, cpu_T0, rm);
5286 switch (s_ot) {
5287 case MO_UB:
5288 tcg_gen_ext8u_tl(cpu_T0, cpu_T0);
5289 break;
5290 case MO_SB:
5291 tcg_gen_ext8s_tl(cpu_T0, cpu_T0);
5292 break;
5293 case MO_UW:
5294 tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
5295 break;
5296 default:
5297 case MO_SW:
5298 tcg_gen_ext16s_tl(cpu_T0, cpu_T0);
5299 break;
5301 gen_op_mov_reg_v(d_ot, reg, cpu_T0);
5302 } else {
5303 gen_lea_modrm(env, s, modrm);
5304 gen_op_ld_v(s, s_ot, cpu_T0, cpu_A0);
5305 gen_op_mov_reg_v(d_ot, reg, cpu_T0);
5308 break;
5310 case 0x8d: /* lea */
5311 modrm = cpu_ldub_code(env, s->pc++);
5312 mod = (modrm >> 6) & 3;
5313 if (mod == 3)
5314 goto illegal_op;
5315 reg = ((modrm >> 3) & 7) | rex_r;
5317 AddressParts a = gen_lea_modrm_0(env, s, modrm);
5318 TCGv ea = gen_lea_modrm_1(a);
5319 gen_op_mov_reg_v(dflag, reg, ea);
5321 break;
5323 case 0xa0: /* mov EAX, Ov */
5324 case 0xa1:
5325 case 0xa2: /* mov Ov, EAX */
5326 case 0xa3:
5328 target_ulong offset_addr;
5330 ot = mo_b_d(b, dflag);
5331 switch (s->aflag) {
5332 #ifdef TARGET_X86_64
5333 case MO_64:
5334 offset_addr = cpu_ldq_code(env, s->pc);
5335 s->pc += 8;
5336 break;
5337 #endif
5338 default:
5339 offset_addr = insn_get(env, s, s->aflag);
5340 break;
5342 tcg_gen_movi_tl(cpu_A0, offset_addr);
5343 gen_add_A0_ds_seg(s);
5344 if ((b & 2) == 0) {
5345 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
5346 gen_op_mov_reg_v(ot, R_EAX, cpu_T0);
5347 } else {
5348 gen_op_mov_v_reg(ot, cpu_T0, R_EAX);
5349 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
5352 break;
5353 case 0xd7: /* xlat */
5354 tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EBX]);
5355 tcg_gen_ext8u_tl(cpu_T0, cpu_regs[R_EAX]);
5356 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T0);
5357 gen_extu(s->aflag, cpu_A0);
5358 gen_add_A0_ds_seg(s);
5359 gen_op_ld_v(s, MO_8, cpu_T0, cpu_A0);
5360 gen_op_mov_reg_v(MO_8, R_EAX, cpu_T0);
5361 break;
5362 case 0xb0 ... 0xb7: /* mov R, Ib */
5363 val = insn_get(env, s, MO_8);
5364 tcg_gen_movi_tl(cpu_T0, val);
5365 gen_op_mov_reg_v(MO_8, (b & 7) | REX_B(s), cpu_T0);
5366 break;
5367 case 0xb8 ... 0xbf: /* mov R, Iv */
5368 #ifdef TARGET_X86_64
5369 if (dflag == MO_64) {
5370 uint64_t tmp;
5371 /* 64 bit case */
5372 tmp = cpu_ldq_code(env, s->pc);
5373 s->pc += 8;
5374 reg = (b & 7) | REX_B(s);
5375 tcg_gen_movi_tl(cpu_T0, tmp);
5376 gen_op_mov_reg_v(MO_64, reg, cpu_T0);
5377 } else
5378 #endif
5380 ot = dflag;
5381 val = insn_get(env, s, ot);
5382 reg = (b & 7) | REX_B(s);
5383 tcg_gen_movi_tl(cpu_T0, val);
5384 gen_op_mov_reg_v(ot, reg, cpu_T0);
5386 break;
5388 case 0x91 ... 0x97: /* xchg R, EAX */
5389 do_xchg_reg_eax:
5390 ot = dflag;
5391 reg = (b & 7) | REX_B(s);
5392 rm = R_EAX;
5393 goto do_xchg_reg;
5394 case 0x86:
5395 case 0x87: /* xchg Ev, Gv */
5396 ot = mo_b_d(b, dflag);
5397 modrm = cpu_ldub_code(env, s->pc++);
5398 reg = ((modrm >> 3) & 7) | rex_r;
5399 mod = (modrm >> 6) & 3;
5400 if (mod == 3) {
5401 rm = (modrm & 7) | REX_B(s);
5402 do_xchg_reg:
5403 gen_op_mov_v_reg(ot, cpu_T0, reg);
5404 gen_op_mov_v_reg(ot, cpu_T1, rm);
5405 gen_op_mov_reg_v(ot, rm, cpu_T0);
5406 gen_op_mov_reg_v(ot, reg, cpu_T1);
5407 } else {
5408 gen_lea_modrm(env, s, modrm);
5409 gen_op_mov_v_reg(ot, cpu_T0, reg);
5410 /* for xchg, lock is implicit */
5411 if (!(prefixes & PREFIX_LOCK))
5412 gen_helper_lock();
5413 gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
5414 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
5415 if (!(prefixes & PREFIX_LOCK))
5416 gen_helper_unlock();
5417 gen_op_mov_reg_v(ot, reg, cpu_T1);
5419 break;
5420 case 0xc4: /* les Gv */
5421 /* In CODE64 this is VEX3; see above. */
5422 op = R_ES;
5423 goto do_lxx;
5424 case 0xc5: /* lds Gv */
5425 /* In CODE64 this is VEX2; see above. */
5426 op = R_DS;
5427 goto do_lxx;
5428 case 0x1b2: /* lss Gv */
5429 op = R_SS;
5430 goto do_lxx;
5431 case 0x1b4: /* lfs Gv */
5432 op = R_FS;
5433 goto do_lxx;
5434 case 0x1b5: /* lgs Gv */
5435 op = R_GS;
5436 do_lxx:
5437 ot = dflag != MO_16 ? MO_32 : MO_16;
5438 modrm = cpu_ldub_code(env, s->pc++);
5439 reg = ((modrm >> 3) & 7) | rex_r;
5440 mod = (modrm >> 6) & 3;
5441 if (mod == 3)
5442 goto illegal_op;
5443 gen_lea_modrm(env, s, modrm);
5444 gen_op_ld_v(s, ot, cpu_T1, cpu_A0);
5445 gen_add_A0_im(s, 1 << ot);
5446 /* load the segment first to handle exceptions properly */
5447 gen_op_ld_v(s, MO_16, cpu_T0, cpu_A0);
5448 gen_movl_seg_T0(s, op);
5449 /* then put the data */
5450 gen_op_mov_reg_v(ot, reg, cpu_T1);
5451 if (s->is_jmp) {
5452 gen_jmp_im(s->pc - s->cs_base);
5453 gen_eob(s);
5455 break;
5457 /************************/
5458 /* shifts */
5459 case 0xc0:
5460 case 0xc1:
5461 /* shift Ev,Ib */
5462 shift = 2;
5463 grp2:
5465 ot = mo_b_d(b, dflag);
5466 modrm = cpu_ldub_code(env, s->pc++);
5467 mod = (modrm >> 6) & 3;
5468 op = (modrm >> 3) & 7;
5470 if (mod != 3) {
5471 if (shift == 2) {
5472 s->rip_offset = 1;
5474 gen_lea_modrm(env, s, modrm);
5475 opreg = OR_TMP0;
5476 } else {
5477 opreg = (modrm & 7) | REX_B(s);
5480 /* simpler op */
5481 if (shift == 0) {
5482 gen_shift(s, op, ot, opreg, OR_ECX);
5483 } else {
5484 if (shift == 2) {
5485 shift = cpu_ldub_code(env, s->pc++);
5487 gen_shifti(s, op, ot, opreg, shift);
5490 break;
5491 case 0xd0:
5492 case 0xd1:
5493 /* shift Ev,1 */
5494 shift = 1;
5495 goto grp2;
5496 case 0xd2:
5497 case 0xd3:
5498 /* shift Ev,cl */
5499 shift = 0;
5500 goto grp2;
5502 case 0x1a4: /* shld imm */
5503 op = 0;
5504 shift = 1;
5505 goto do_shiftd;
5506 case 0x1a5: /* shld cl */
5507 op = 0;
5508 shift = 0;
5509 goto do_shiftd;
5510 case 0x1ac: /* shrd imm */
5511 op = 1;
5512 shift = 1;
5513 goto do_shiftd;
5514 case 0x1ad: /* shrd cl */
5515 op = 1;
5516 shift = 0;
5517 do_shiftd:
5518 ot = dflag;
5519 modrm = cpu_ldub_code(env, s->pc++);
5520 mod = (modrm >> 6) & 3;
5521 rm = (modrm & 7) | REX_B(s);
5522 reg = ((modrm >> 3) & 7) | rex_r;
5523 if (mod != 3) {
5524 gen_lea_modrm(env, s, modrm);
5525 opreg = OR_TMP0;
5526 } else {
5527 opreg = rm;
5529 gen_op_mov_v_reg(ot, cpu_T1, reg);
5531 if (shift) {
5532 TCGv imm = tcg_const_tl(cpu_ldub_code(env, s->pc++));
5533 gen_shiftd_rm_T1(s, ot, opreg, op, imm);
5534 tcg_temp_free(imm);
5535 } else {
5536 gen_shiftd_rm_T1(s, ot, opreg, op, cpu_regs[R_ECX]);
5538 break;
5540 /************************/
5541 /* floats */
5542 case 0xd8 ... 0xdf:
5543 if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
5544 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
5545 /* XXX: what to do if illegal op ? */
5546 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
5547 break;
5549 modrm = cpu_ldub_code(env, s->pc++);
5550 mod = (modrm >> 6) & 3;
5551 rm = modrm & 7;
5552 op = ((b & 7) << 3) | ((modrm >> 3) & 7);
5553 if (mod != 3) {
5554 /* memory op */
5555 gen_lea_modrm(env, s, modrm);
5556 switch(op) {
5557 case 0x00 ... 0x07: /* fxxxs */
5558 case 0x10 ... 0x17: /* fixxxl */
5559 case 0x20 ... 0x27: /* fxxxl */
5560 case 0x30 ... 0x37: /* fixxx */
5562 int op1;
5563 op1 = op & 7;
5565 switch(op >> 4) {
5566 case 0:
5567 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5568 s->mem_index, MO_LEUL);
5569 gen_helper_flds_FT0(cpu_env, cpu_tmp2_i32);
5570 break;
5571 case 1:
5572 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5573 s->mem_index, MO_LEUL);
5574 gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
5575 break;
5576 case 2:
5577 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
5578 s->mem_index, MO_LEQ);
5579 gen_helper_fldl_FT0(cpu_env, cpu_tmp1_i64);
5580 break;
5581 case 3:
5582 default:
5583 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5584 s->mem_index, MO_LESW);
5585 gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
5586 break;
5589 gen_helper_fp_arith_ST0_FT0(op1);
5590 if (op1 == 3) {
5591 /* fcomp needs pop */
5592 gen_helper_fpop(cpu_env);
5595 break;
5596 case 0x08: /* flds */
5597 case 0x0a: /* fsts */
5598 case 0x0b: /* fstps */
5599 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
5600 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
5601 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
5602 switch(op & 7) {
5603 case 0:
5604 switch(op >> 4) {
5605 case 0:
5606 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5607 s->mem_index, MO_LEUL);
5608 gen_helper_flds_ST0(cpu_env, cpu_tmp2_i32);
5609 break;
5610 case 1:
5611 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5612 s->mem_index, MO_LEUL);
5613 gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
5614 break;
5615 case 2:
5616 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
5617 s->mem_index, MO_LEQ);
5618 gen_helper_fldl_ST0(cpu_env, cpu_tmp1_i64);
5619 break;
5620 case 3:
5621 default:
5622 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5623 s->mem_index, MO_LESW);
5624 gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
5625 break;
5627 break;
5628 case 1:
5629 /* XXX: the corresponding CPUID bit must be tested ! */
5630 switch(op >> 4) {
5631 case 1:
5632 gen_helper_fisttl_ST0(cpu_tmp2_i32, cpu_env);
5633 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5634 s->mem_index, MO_LEUL);
5635 break;
5636 case 2:
5637 gen_helper_fisttll_ST0(cpu_tmp1_i64, cpu_env);
5638 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
5639 s->mem_index, MO_LEQ);
5640 break;
5641 case 3:
5642 default:
5643 gen_helper_fistt_ST0(cpu_tmp2_i32, cpu_env);
5644 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5645 s->mem_index, MO_LEUW);
5646 break;
5648 gen_helper_fpop(cpu_env);
5649 break;
5650 default:
5651 switch(op >> 4) {
5652 case 0:
5653 gen_helper_fsts_ST0(cpu_tmp2_i32, cpu_env);
5654 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5655 s->mem_index, MO_LEUL);
5656 break;
5657 case 1:
5658 gen_helper_fistl_ST0(cpu_tmp2_i32, cpu_env);
5659 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5660 s->mem_index, MO_LEUL);
5661 break;
5662 case 2:
5663 gen_helper_fstl_ST0(cpu_tmp1_i64, cpu_env);
5664 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
5665 s->mem_index, MO_LEQ);
5666 break;
5667 case 3:
5668 default:
5669 gen_helper_fist_ST0(cpu_tmp2_i32, cpu_env);
5670 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5671 s->mem_index, MO_LEUW);
5672 break;
5674 if ((op & 7) == 3)
5675 gen_helper_fpop(cpu_env);
5676 break;
5678 break;
5679 case 0x0c: /* fldenv mem */
5680 gen_helper_fldenv(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
5681 break;
5682 case 0x0d: /* fldcw mem */
5683 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5684 s->mem_index, MO_LEUW);
5685 gen_helper_fldcw(cpu_env, cpu_tmp2_i32);
5686 break;
5687 case 0x0e: /* fnstenv mem */
5688 gen_helper_fstenv(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
5689 break;
5690 case 0x0f: /* fnstcw mem */
5691 gen_helper_fnstcw(cpu_tmp2_i32, cpu_env);
5692 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5693 s->mem_index, MO_LEUW);
5694 break;
5695 case 0x1d: /* fldt mem */
5696 gen_helper_fldt_ST0(cpu_env, cpu_A0);
5697 break;
5698 case 0x1f: /* fstpt mem */
5699 gen_helper_fstt_ST0(cpu_env, cpu_A0);
5700 gen_helper_fpop(cpu_env);
5701 break;
5702 case 0x2c: /* frstor mem */
5703 gen_helper_frstor(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
5704 break;
5705 case 0x2e: /* fnsave mem */
5706 gen_helper_fsave(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
5707 break;
5708 case 0x2f: /* fnstsw mem */
5709 gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
5710 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5711 s->mem_index, MO_LEUW);
5712 break;
5713 case 0x3c: /* fbld */
5714 gen_helper_fbld_ST0(cpu_env, cpu_A0);
5715 break;
5716 case 0x3e: /* fbstp */
5717 gen_helper_fbst_ST0(cpu_env, cpu_A0);
5718 gen_helper_fpop(cpu_env);
5719 break;
5720 case 0x3d: /* fildll */
5721 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
5722 gen_helper_fildll_ST0(cpu_env, cpu_tmp1_i64);
5723 break;
5724 case 0x3f: /* fistpll */
5725 gen_helper_fistll_ST0(cpu_tmp1_i64, cpu_env);
5726 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
5727 gen_helper_fpop(cpu_env);
5728 break;
5729 default:
5730 goto illegal_op;
5732 } else {
5733 /* register float ops */
5734 opreg = rm;
5736 switch(op) {
5737 case 0x08: /* fld sti */
5738 gen_helper_fpush(cpu_env);
5739 gen_helper_fmov_ST0_STN(cpu_env,
5740 tcg_const_i32((opreg + 1) & 7));
5741 break;
5742 case 0x09: /* fxchg sti */
5743 case 0x29: /* fxchg4 sti, undocumented op */
5744 case 0x39: /* fxchg7 sti, undocumented op */
5745 gen_helper_fxchg_ST0_STN(cpu_env, tcg_const_i32(opreg));
5746 break;
5747 case 0x0a: /* grp d9/2 */
5748 switch(rm) {
5749 case 0: /* fnop */
5750 /* check exceptions (FreeBSD FPU probe) */
5751 gen_helper_fwait(cpu_env);
5752 break;
5753 default:
5754 goto illegal_op;
5756 break;
5757 case 0x0c: /* grp d9/4 */
5758 switch(rm) {
5759 case 0: /* fchs */
5760 gen_helper_fchs_ST0(cpu_env);
5761 break;
5762 case 1: /* fabs */
5763 gen_helper_fabs_ST0(cpu_env);
5764 break;
5765 case 4: /* ftst */
5766 gen_helper_fldz_FT0(cpu_env);
5767 gen_helper_fcom_ST0_FT0(cpu_env);
5768 break;
5769 case 5: /* fxam */
5770 gen_helper_fxam_ST0(cpu_env);
5771 break;
5772 default:
5773 goto illegal_op;
5775 break;
5776 case 0x0d: /* grp d9/5 */
5778 switch(rm) {
5779 case 0:
5780 gen_helper_fpush(cpu_env);
5781 gen_helper_fld1_ST0(cpu_env);
5782 break;
5783 case 1:
5784 gen_helper_fpush(cpu_env);
5785 gen_helper_fldl2t_ST0(cpu_env);
5786 break;
5787 case 2:
5788 gen_helper_fpush(cpu_env);
5789 gen_helper_fldl2e_ST0(cpu_env);
5790 break;
5791 case 3:
5792 gen_helper_fpush(cpu_env);
5793 gen_helper_fldpi_ST0(cpu_env);
5794 break;
5795 case 4:
5796 gen_helper_fpush(cpu_env);
5797 gen_helper_fldlg2_ST0(cpu_env);
5798 break;
5799 case 5:
5800 gen_helper_fpush(cpu_env);
5801 gen_helper_fldln2_ST0(cpu_env);
5802 break;
5803 case 6:
5804 gen_helper_fpush(cpu_env);
5805 gen_helper_fldz_ST0(cpu_env);
5806 break;
5807 default:
5808 goto illegal_op;
5811 break;
5812 case 0x0e: /* grp d9/6 */
5813 switch(rm) {
5814 case 0: /* f2xm1 */
5815 gen_helper_f2xm1(cpu_env);
5816 break;
5817 case 1: /* fyl2x */
5818 gen_helper_fyl2x(cpu_env);
5819 break;
5820 case 2: /* fptan */
5821 gen_helper_fptan(cpu_env);
5822 break;
5823 case 3: /* fpatan */
5824 gen_helper_fpatan(cpu_env);
5825 break;
5826 case 4: /* fxtract */
5827 gen_helper_fxtract(cpu_env);
5828 break;
5829 case 5: /* fprem1 */
5830 gen_helper_fprem1(cpu_env);
5831 break;
5832 case 6: /* fdecstp */
5833 gen_helper_fdecstp(cpu_env);
5834 break;
5835 default:
5836 case 7: /* fincstp */
5837 gen_helper_fincstp(cpu_env);
5838 break;
5840 break;
5841 case 0x0f: /* grp d9/7 */
5842 switch(rm) {
5843 case 0: /* fprem */
5844 gen_helper_fprem(cpu_env);
5845 break;
5846 case 1: /* fyl2xp1 */
5847 gen_helper_fyl2xp1(cpu_env);
5848 break;
5849 case 2: /* fsqrt */
5850 gen_helper_fsqrt(cpu_env);
5851 break;
5852 case 3: /* fsincos */
5853 gen_helper_fsincos(cpu_env);
5854 break;
5855 case 5: /* fscale */
5856 gen_helper_fscale(cpu_env);
5857 break;
5858 case 4: /* frndint */
5859 gen_helper_frndint(cpu_env);
5860 break;
5861 case 6: /* fsin */
5862 gen_helper_fsin(cpu_env);
5863 break;
5864 default:
5865 case 7: /* fcos */
5866 gen_helper_fcos(cpu_env);
5867 break;
5869 break;
5870 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
5871 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
5872 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
5874 int op1;
5876 op1 = op & 7;
5877 if (op >= 0x20) {
5878 gen_helper_fp_arith_STN_ST0(op1, opreg);
5879 if (op >= 0x30)
5880 gen_helper_fpop(cpu_env);
5881 } else {
5882 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
5883 gen_helper_fp_arith_ST0_FT0(op1);
5886 break;
5887 case 0x02: /* fcom */
5888 case 0x22: /* fcom2, undocumented op */
5889 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
5890 gen_helper_fcom_ST0_FT0(cpu_env);
5891 break;
5892 case 0x03: /* fcomp */
5893 case 0x23: /* fcomp3, undocumented op */
5894 case 0x32: /* fcomp5, undocumented op */
5895 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
5896 gen_helper_fcom_ST0_FT0(cpu_env);
5897 gen_helper_fpop(cpu_env);
5898 break;
5899 case 0x15: /* da/5 */
5900 switch(rm) {
5901 case 1: /* fucompp */
5902 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
5903 gen_helper_fucom_ST0_FT0(cpu_env);
5904 gen_helper_fpop(cpu_env);
5905 gen_helper_fpop(cpu_env);
5906 break;
5907 default:
5908 goto illegal_op;
5910 break;
5911 case 0x1c:
5912 switch(rm) {
5913 case 0: /* feni (287 only, just do nop here) */
5914 break;
5915 case 1: /* fdisi (287 only, just do nop here) */
5916 break;
5917 case 2: /* fclex */
5918 gen_helper_fclex(cpu_env);
5919 break;
5920 case 3: /* fninit */
5921 gen_helper_fninit(cpu_env);
5922 break;
5923 case 4: /* fsetpm (287 only, just do nop here) */
5924 break;
5925 default:
5926 goto illegal_op;
5928 break;
5929 case 0x1d: /* fucomi */
5930 if (!(s->cpuid_features & CPUID_CMOV)) {
5931 goto illegal_op;
5933 gen_update_cc_op(s);
5934 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
5935 gen_helper_fucomi_ST0_FT0(cpu_env);
5936 set_cc_op(s, CC_OP_EFLAGS);
5937 break;
5938 case 0x1e: /* fcomi */
5939 if (!(s->cpuid_features & CPUID_CMOV)) {
5940 goto illegal_op;
5942 gen_update_cc_op(s);
5943 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
5944 gen_helper_fcomi_ST0_FT0(cpu_env);
5945 set_cc_op(s, CC_OP_EFLAGS);
5946 break;
5947 case 0x28: /* ffree sti */
5948 gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
5949 break;
5950 case 0x2a: /* fst sti */
5951 gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
5952 break;
5953 case 0x2b: /* fstp sti */
5954 case 0x0b: /* fstp1 sti, undocumented op */
5955 case 0x3a: /* fstp8 sti, undocumented op */
5956 case 0x3b: /* fstp9 sti, undocumented op */
5957 gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
5958 gen_helper_fpop(cpu_env);
5959 break;
5960 case 0x2c: /* fucom st(i) */
5961 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
5962 gen_helper_fucom_ST0_FT0(cpu_env);
5963 break;
5964 case 0x2d: /* fucomp st(i) */
5965 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
5966 gen_helper_fucom_ST0_FT0(cpu_env);
5967 gen_helper_fpop(cpu_env);
5968 break;
5969 case 0x33: /* de/3 */
5970 switch(rm) {
5971 case 1: /* fcompp */
5972 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
5973 gen_helper_fcom_ST0_FT0(cpu_env);
5974 gen_helper_fpop(cpu_env);
5975 gen_helper_fpop(cpu_env);
5976 break;
5977 default:
5978 goto illegal_op;
5980 break;
5981 case 0x38: /* ffreep sti, undocumented op */
5982 gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
5983 gen_helper_fpop(cpu_env);
5984 break;
5985 case 0x3c: /* df/4 */
5986 switch(rm) {
5987 case 0:
5988 gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
5989 tcg_gen_extu_i32_tl(cpu_T0, cpu_tmp2_i32);
5990 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T0);
5991 break;
5992 default:
5993 goto illegal_op;
5995 break;
5996 case 0x3d: /* fucomip */
5997 if (!(s->cpuid_features & CPUID_CMOV)) {
5998 goto illegal_op;
6000 gen_update_cc_op(s);
6001 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6002 gen_helper_fucomi_ST0_FT0(cpu_env);
6003 gen_helper_fpop(cpu_env);
6004 set_cc_op(s, CC_OP_EFLAGS);
6005 break;
6006 case 0x3e: /* fcomip */
6007 if (!(s->cpuid_features & CPUID_CMOV)) {
6008 goto illegal_op;
6010 gen_update_cc_op(s);
6011 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6012 gen_helper_fcomi_ST0_FT0(cpu_env);
6013 gen_helper_fpop(cpu_env);
6014 set_cc_op(s, CC_OP_EFLAGS);
6015 break;
6016 case 0x10 ... 0x13: /* fcmovxx */
6017 case 0x18 ... 0x1b:
6019 int op1;
6020 TCGLabel *l1;
6021 static const uint8_t fcmov_cc[8] = {
6022 (JCC_B << 1),
6023 (JCC_Z << 1),
6024 (JCC_BE << 1),
6025 (JCC_P << 1),
6028 if (!(s->cpuid_features & CPUID_CMOV)) {
6029 goto illegal_op;
6031 op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
6032 l1 = gen_new_label();
6033 gen_jcc1_noeob(s, op1, l1);
6034 gen_helper_fmov_ST0_STN(cpu_env, tcg_const_i32(opreg));
6035 gen_set_label(l1);
6037 break;
6038 default:
6039 goto illegal_op;
6042 break;
6043 /************************/
6044 /* string ops */
6046 case 0xa4: /* movsS */
6047 case 0xa5:
6048 ot = mo_b_d(b, dflag);
6049 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6050 gen_repz_movs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6051 } else {
6052 gen_movs(s, ot);
6054 break;
6056 case 0xaa: /* stosS */
6057 case 0xab:
6058 ot = mo_b_d(b, dflag);
6059 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6060 gen_repz_stos(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6061 } else {
6062 gen_stos(s, ot);
6064 break;
6065 case 0xac: /* lodsS */
6066 case 0xad:
6067 ot = mo_b_d(b, dflag);
6068 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6069 gen_repz_lods(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6070 } else {
6071 gen_lods(s, ot);
6073 break;
6074 case 0xae: /* scasS */
6075 case 0xaf:
6076 ot = mo_b_d(b, dflag);
6077 if (prefixes & PREFIX_REPNZ) {
6078 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6079 } else if (prefixes & PREFIX_REPZ) {
6080 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6081 } else {
6082 gen_scas(s, ot);
6084 break;
6086 case 0xa6: /* cmpsS */
6087 case 0xa7:
6088 ot = mo_b_d(b, dflag);
6089 if (prefixes & PREFIX_REPNZ) {
6090 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6091 } else if (prefixes & PREFIX_REPZ) {
6092 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6093 } else {
6094 gen_cmps(s, ot);
6096 break;
6097 case 0x6c: /* insS */
6098 case 0x6d:
6099 ot = mo_b_d32(b, dflag);
6100 tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]);
6101 gen_check_io(s, ot, pc_start - s->cs_base,
6102 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4);
6103 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6104 gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6105 } else {
6106 gen_ins(s, ot);
6107 if (s->tb->cflags & CF_USE_ICOUNT) {
6108 gen_jmp(s, s->pc - s->cs_base);
6111 break;
6112 case 0x6e: /* outsS */
6113 case 0x6f:
6114 ot = mo_b_d32(b, dflag);
6115 tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]);
6116 gen_check_io(s, ot, pc_start - s->cs_base,
6117 svm_is_rep(prefixes) | 4);
6118 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6119 gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6120 } else {
6121 gen_outs(s, ot);
6122 if (s->tb->cflags & CF_USE_ICOUNT) {
6123 gen_jmp(s, s->pc - s->cs_base);
6126 break;
6128 /************************/
6129 /* port I/O */
6131 case 0xe4:
6132 case 0xe5:
6133 ot = mo_b_d32(b, dflag);
6134 val = cpu_ldub_code(env, s->pc++);
6135 tcg_gen_movi_tl(cpu_T0, val);
6136 gen_check_io(s, ot, pc_start - s->cs_base,
6137 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
6138 if (s->tb->cflags & CF_USE_ICOUNT) {
6139 gen_io_start();
6141 tcg_gen_movi_i32(cpu_tmp2_i32, val);
6142 gen_helper_in_func(ot, cpu_T1, cpu_tmp2_i32);
6143 gen_op_mov_reg_v(ot, R_EAX, cpu_T1);
6144 gen_bpt_io(s, cpu_tmp2_i32, ot);
6145 if (s->tb->cflags & CF_USE_ICOUNT) {
6146 gen_io_end();
6147 gen_jmp(s, s->pc - s->cs_base);
6149 break;
6150 case 0xe6:
6151 case 0xe7:
6152 ot = mo_b_d32(b, dflag);
6153 val = cpu_ldub_code(env, s->pc++);
6154 tcg_gen_movi_tl(cpu_T0, val);
6155 gen_check_io(s, ot, pc_start - s->cs_base,
6156 svm_is_rep(prefixes));
6157 gen_op_mov_v_reg(ot, cpu_T1, R_EAX);
6159 if (s->tb->cflags & CF_USE_ICOUNT) {
6160 gen_io_start();
6162 tcg_gen_movi_i32(cpu_tmp2_i32, val);
6163 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1);
6164 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
6165 gen_bpt_io(s, cpu_tmp2_i32, ot);
6166 if (s->tb->cflags & CF_USE_ICOUNT) {
6167 gen_io_end();
6168 gen_jmp(s, s->pc - s->cs_base);
6170 break;
6171 case 0xec:
6172 case 0xed:
6173 ot = mo_b_d32(b, dflag);
6174 tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]);
6175 gen_check_io(s, ot, pc_start - s->cs_base,
6176 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
6177 if (s->tb->cflags & CF_USE_ICOUNT) {
6178 gen_io_start();
6180 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
6181 gen_helper_in_func(ot, cpu_T1, cpu_tmp2_i32);
6182 gen_op_mov_reg_v(ot, R_EAX, cpu_T1);
6183 gen_bpt_io(s, cpu_tmp2_i32, ot);
6184 if (s->tb->cflags & CF_USE_ICOUNT) {
6185 gen_io_end();
6186 gen_jmp(s, s->pc - s->cs_base);
6188 break;
6189 case 0xee:
6190 case 0xef:
6191 ot = mo_b_d32(b, dflag);
6192 tcg_gen_ext16u_tl(cpu_T0, cpu_regs[R_EDX]);
6193 gen_check_io(s, ot, pc_start - s->cs_base,
6194 svm_is_rep(prefixes));
6195 gen_op_mov_v_reg(ot, cpu_T1, R_EAX);
6197 if (s->tb->cflags & CF_USE_ICOUNT) {
6198 gen_io_start();
6200 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
6201 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T1);
6202 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
6203 gen_bpt_io(s, cpu_tmp2_i32, ot);
6204 if (s->tb->cflags & CF_USE_ICOUNT) {
6205 gen_io_end();
6206 gen_jmp(s, s->pc - s->cs_base);
6208 break;
6210 /************************/
6211 /* control */
6212 case 0xc2: /* ret im */
6213 val = cpu_ldsw_code(env, s->pc);
6214 s->pc += 2;
6215 ot = gen_pop_T0(s);
6216 gen_stack_update(s, val + (1 << ot));
6217 /* Note that gen_pop_T0 uses a zero-extending load. */
6218 gen_op_jmp_v(cpu_T0);
6219 gen_bnd_jmp(s);
6220 gen_eob(s);
6221 break;
6222 case 0xc3: /* ret */
6223 ot = gen_pop_T0(s);
6224 gen_pop_update(s, ot);
6225 /* Note that gen_pop_T0 uses a zero-extending load. */
6226 gen_op_jmp_v(cpu_T0);
6227 gen_bnd_jmp(s);
6228 gen_eob(s);
6229 break;
6230 case 0xca: /* lret im */
6231 val = cpu_ldsw_code(env, s->pc);
6232 s->pc += 2;
6233 do_lret:
6234 if (s->pe && !s->vm86) {
6235 gen_update_cc_op(s);
6236 gen_jmp_im(pc_start - s->cs_base);
6237 gen_helper_lret_protected(cpu_env, tcg_const_i32(dflag - 1),
6238 tcg_const_i32(val));
6239 } else {
6240 gen_stack_A0(s);
6241 /* pop offset */
6242 gen_op_ld_v(s, dflag, cpu_T0, cpu_A0);
6243 /* NOTE: keeping EIP updated is not a problem in case of
6244 exception */
6245 gen_op_jmp_v(cpu_T0);
6246 /* pop selector */
6247 gen_add_A0_im(s, 1 << dflag);
6248 gen_op_ld_v(s, dflag, cpu_T0, cpu_A0);
6249 gen_op_movl_seg_T0_vm(R_CS);
6250 /* add stack offset */
6251 gen_stack_update(s, val + (2 << dflag));
6253 gen_eob(s);
6254 break;
6255 case 0xcb: /* lret */
6256 val = 0;
6257 goto do_lret;
6258 case 0xcf: /* iret */
6259 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET);
6260 if (!s->pe) {
6261 /* real mode */
6262 gen_helper_iret_real(cpu_env, tcg_const_i32(dflag - 1));
6263 set_cc_op(s, CC_OP_EFLAGS);
6264 } else if (s->vm86) {
6265 if (s->iopl != 3) {
6266 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6267 } else {
6268 gen_helper_iret_real(cpu_env, tcg_const_i32(dflag - 1));
6269 set_cc_op(s, CC_OP_EFLAGS);
6271 } else {
6272 gen_helper_iret_protected(cpu_env, tcg_const_i32(dflag - 1),
6273 tcg_const_i32(s->pc - s->cs_base));
6274 set_cc_op(s, CC_OP_EFLAGS);
6276 gen_eob(s);
6277 break;
6278 case 0xe8: /* call im */
6280 if (dflag != MO_16) {
6281 tval = (int32_t)insn_get(env, s, MO_32);
6282 } else {
6283 tval = (int16_t)insn_get(env, s, MO_16);
6285 next_eip = s->pc - s->cs_base;
6286 tval += next_eip;
6287 if (dflag == MO_16) {
6288 tval &= 0xffff;
6289 } else if (!CODE64(s)) {
6290 tval &= 0xffffffff;
6292 tcg_gen_movi_tl(cpu_T0, next_eip);
6293 gen_push_v(s, cpu_T0);
6294 gen_bnd_jmp(s);
6295 gen_jmp(s, tval);
6297 break;
6298 case 0x9a: /* lcall im */
6300 unsigned int selector, offset;
6302 if (CODE64(s))
6303 goto illegal_op;
6304 ot = dflag;
6305 offset = insn_get(env, s, ot);
6306 selector = insn_get(env, s, MO_16);
6308 tcg_gen_movi_tl(cpu_T0, selector);
6309 tcg_gen_movi_tl(cpu_T1, offset);
6311 goto do_lcall;
6312 case 0xe9: /* jmp im */
6313 if (dflag != MO_16) {
6314 tval = (int32_t)insn_get(env, s, MO_32);
6315 } else {
6316 tval = (int16_t)insn_get(env, s, MO_16);
6318 tval += s->pc - s->cs_base;
6319 if (dflag == MO_16) {
6320 tval &= 0xffff;
6321 } else if (!CODE64(s)) {
6322 tval &= 0xffffffff;
6324 gen_bnd_jmp(s);
6325 gen_jmp(s, tval);
6326 break;
6327 case 0xea: /* ljmp im */
6329 unsigned int selector, offset;
6331 if (CODE64(s))
6332 goto illegal_op;
6333 ot = dflag;
6334 offset = insn_get(env, s, ot);
6335 selector = insn_get(env, s, MO_16);
6337 tcg_gen_movi_tl(cpu_T0, selector);
6338 tcg_gen_movi_tl(cpu_T1, offset);
6340 goto do_ljmp;
6341 case 0xeb: /* jmp Jb */
6342 tval = (int8_t)insn_get(env, s, MO_8);
6343 tval += s->pc - s->cs_base;
6344 if (dflag == MO_16) {
6345 tval &= 0xffff;
6347 gen_jmp(s, tval);
6348 break;
6349 case 0x70 ... 0x7f: /* jcc Jb */
6350 tval = (int8_t)insn_get(env, s, MO_8);
6351 goto do_jcc;
6352 case 0x180 ... 0x18f: /* jcc Jv */
6353 if (dflag != MO_16) {
6354 tval = (int32_t)insn_get(env, s, MO_32);
6355 } else {
6356 tval = (int16_t)insn_get(env, s, MO_16);
6358 do_jcc:
6359 next_eip = s->pc - s->cs_base;
6360 tval += next_eip;
6361 if (dflag == MO_16) {
6362 tval &= 0xffff;
6364 gen_bnd_jmp(s);
6365 gen_jcc(s, b, tval, next_eip);
6366 break;
6368 case 0x190 ... 0x19f: /* setcc Gv */
6369 modrm = cpu_ldub_code(env, s->pc++);
6370 gen_setcc1(s, b, cpu_T0);
6371 gen_ldst_modrm(env, s, modrm, MO_8, OR_TMP0, 1);
6372 break;
6373 case 0x140 ... 0x14f: /* cmov Gv, Ev */
6374 if (!(s->cpuid_features & CPUID_CMOV)) {
6375 goto illegal_op;
6377 ot = dflag;
6378 modrm = cpu_ldub_code(env, s->pc++);
6379 reg = ((modrm >> 3) & 7) | rex_r;
6380 gen_cmovcc1(env, s, ot, b, modrm, reg);
6381 break;
6383 /************************/
6384 /* flags */
6385 case 0x9c: /* pushf */
6386 gen_svm_check_intercept(s, pc_start, SVM_EXIT_PUSHF);
6387 if (s->vm86 && s->iopl != 3) {
6388 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6389 } else {
6390 gen_update_cc_op(s);
6391 gen_helper_read_eflags(cpu_T0, cpu_env);
6392 gen_push_v(s, cpu_T0);
6394 break;
6395 case 0x9d: /* popf */
6396 gen_svm_check_intercept(s, pc_start, SVM_EXIT_POPF);
6397 if (s->vm86 && s->iopl != 3) {
6398 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6399 } else {
6400 ot = gen_pop_T0(s);
6401 if (s->cpl == 0) {
6402 if (dflag != MO_16) {
6403 gen_helper_write_eflags(cpu_env, cpu_T0,
6404 tcg_const_i32((TF_MASK | AC_MASK |
6405 ID_MASK | NT_MASK |
6406 IF_MASK |
6407 IOPL_MASK)));
6408 } else {
6409 gen_helper_write_eflags(cpu_env, cpu_T0,
6410 tcg_const_i32((TF_MASK | AC_MASK |
6411 ID_MASK | NT_MASK |
6412 IF_MASK | IOPL_MASK)
6413 & 0xffff));
6415 } else {
6416 if (s->cpl <= s->iopl) {
6417 if (dflag != MO_16) {
6418 gen_helper_write_eflags(cpu_env, cpu_T0,
6419 tcg_const_i32((TF_MASK |
6420 AC_MASK |
6421 ID_MASK |
6422 NT_MASK |
6423 IF_MASK)));
6424 } else {
6425 gen_helper_write_eflags(cpu_env, cpu_T0,
6426 tcg_const_i32((TF_MASK |
6427 AC_MASK |
6428 ID_MASK |
6429 NT_MASK |
6430 IF_MASK)
6431 & 0xffff));
6433 } else {
6434 if (dflag != MO_16) {
6435 gen_helper_write_eflags(cpu_env, cpu_T0,
6436 tcg_const_i32((TF_MASK | AC_MASK |
6437 ID_MASK | NT_MASK)));
6438 } else {
6439 gen_helper_write_eflags(cpu_env, cpu_T0,
6440 tcg_const_i32((TF_MASK | AC_MASK |
6441 ID_MASK | NT_MASK)
6442 & 0xffff));
6446 gen_pop_update(s, ot);
6447 set_cc_op(s, CC_OP_EFLAGS);
6448 /* abort translation because TF/AC flag may change */
6449 gen_jmp_im(s->pc - s->cs_base);
6450 gen_eob(s);
6452 break;
6453 case 0x9e: /* sahf */
6454 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
6455 goto illegal_op;
6456 gen_op_mov_v_reg(MO_8, cpu_T0, R_AH);
6457 gen_compute_eflags(s);
6458 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O);
6459 tcg_gen_andi_tl(cpu_T0, cpu_T0, CC_S | CC_Z | CC_A | CC_P | CC_C);
6460 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T0);
6461 break;
6462 case 0x9f: /* lahf */
6463 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
6464 goto illegal_op;
6465 gen_compute_eflags(s);
6466 /* Note: gen_compute_eflags() only gives the condition codes */
6467 tcg_gen_ori_tl(cpu_T0, cpu_cc_src, 0x02);
6468 gen_op_mov_reg_v(MO_8, R_AH, cpu_T0);
6469 break;
6470 case 0xf5: /* cmc */
6471 gen_compute_eflags(s);
6472 tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C);
6473 break;
6474 case 0xf8: /* clc */
6475 gen_compute_eflags(s);
6476 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C);
6477 break;
6478 case 0xf9: /* stc */
6479 gen_compute_eflags(s);
6480 tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C);
6481 break;
6482 case 0xfc: /* cld */
6483 tcg_gen_movi_i32(cpu_tmp2_i32, 1);
6484 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
6485 break;
6486 case 0xfd: /* std */
6487 tcg_gen_movi_i32(cpu_tmp2_i32, -1);
6488 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
6489 break;
6491 /************************/
6492 /* bit operations */
6493 case 0x1ba: /* bt/bts/btr/btc Gv, im */
6494 ot = dflag;
6495 modrm = cpu_ldub_code(env, s->pc++);
6496 op = (modrm >> 3) & 7;
6497 mod = (modrm >> 6) & 3;
6498 rm = (modrm & 7) | REX_B(s);
6499 if (mod != 3) {
6500 s->rip_offset = 1;
6501 gen_lea_modrm(env, s, modrm);
6502 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
6503 } else {
6504 gen_op_mov_v_reg(ot, cpu_T0, rm);
6506 /* load shift */
6507 val = cpu_ldub_code(env, s->pc++);
6508 tcg_gen_movi_tl(cpu_T1, val);
6509 if (op < 4)
6510 goto illegal_op;
6511 op -= 4;
6512 goto bt_op;
6513 case 0x1a3: /* bt Gv, Ev */
6514 op = 0;
6515 goto do_btx;
6516 case 0x1ab: /* bts */
6517 op = 1;
6518 goto do_btx;
6519 case 0x1b3: /* btr */
6520 op = 2;
6521 goto do_btx;
6522 case 0x1bb: /* btc */
6523 op = 3;
6524 do_btx:
6525 ot = dflag;
6526 modrm = cpu_ldub_code(env, s->pc++);
6527 reg = ((modrm >> 3) & 7) | rex_r;
6528 mod = (modrm >> 6) & 3;
6529 rm = (modrm & 7) | REX_B(s);
6530 gen_op_mov_v_reg(MO_32, cpu_T1, reg);
6531 if (mod != 3) {
6532 gen_lea_modrm(env, s, modrm);
6533 /* specific case: we need to add a displacement */
6534 gen_exts(ot, cpu_T1);
6535 tcg_gen_sari_tl(cpu_tmp0, cpu_T1, 3 + ot);
6536 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, ot);
6537 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
6538 gen_op_ld_v(s, ot, cpu_T0, cpu_A0);
6539 } else {
6540 gen_op_mov_v_reg(ot, cpu_T0, rm);
6542 bt_op:
6543 tcg_gen_andi_tl(cpu_T1, cpu_T1, (1 << (3 + ot)) - 1);
6544 tcg_gen_shr_tl(cpu_tmp4, cpu_T0, cpu_T1);
6545 switch(op) {
6546 case 0:
6547 break;
6548 case 1:
6549 tcg_gen_movi_tl(cpu_tmp0, 1);
6550 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T1);
6551 tcg_gen_or_tl(cpu_T0, cpu_T0, cpu_tmp0);
6552 break;
6553 case 2:
6554 tcg_gen_movi_tl(cpu_tmp0, 1);
6555 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T1);
6556 tcg_gen_andc_tl(cpu_T0, cpu_T0, cpu_tmp0);
6557 break;
6558 default:
6559 case 3:
6560 tcg_gen_movi_tl(cpu_tmp0, 1);
6561 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T1);
6562 tcg_gen_xor_tl(cpu_T0, cpu_T0, cpu_tmp0);
6563 break;
6565 if (op != 0) {
6566 if (mod != 3) {
6567 gen_op_st_v(s, ot, cpu_T0, cpu_A0);
6568 } else {
6569 gen_op_mov_reg_v(ot, rm, cpu_T0);
6573 /* Delay all CC updates until after the store above. Note that
6574 C is the result of the test, Z is unchanged, and the others
6575 are all undefined. */
6576 switch (s->cc_op) {
6577 case CC_OP_MULB ... CC_OP_MULQ:
6578 case CC_OP_ADDB ... CC_OP_ADDQ:
6579 case CC_OP_ADCB ... CC_OP_ADCQ:
6580 case CC_OP_SUBB ... CC_OP_SUBQ:
6581 case CC_OP_SBBB ... CC_OP_SBBQ:
6582 case CC_OP_LOGICB ... CC_OP_LOGICQ:
6583 case CC_OP_INCB ... CC_OP_INCQ:
6584 case CC_OP_DECB ... CC_OP_DECQ:
6585 case CC_OP_SHLB ... CC_OP_SHLQ:
6586 case CC_OP_SARB ... CC_OP_SARQ:
6587 case CC_OP_BMILGB ... CC_OP_BMILGQ:
6588 /* Z was going to be computed from the non-zero status of CC_DST.
6589 We can get that same Z value (and the new C value) by leaving
6590 CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the
6591 same width. */
6592 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
6593 set_cc_op(s, ((s->cc_op - CC_OP_MULB) & 3) + CC_OP_SARB);
6594 break;
6595 default:
6596 /* Otherwise, generate EFLAGS and replace the C bit. */
6597 gen_compute_eflags(s);
6598 tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, cpu_tmp4,
6599 ctz32(CC_C), 1);
6600 break;
6602 break;
6603 case 0x1bc: /* bsf / tzcnt */
6604 case 0x1bd: /* bsr / lzcnt */
6605 ot = dflag;
6606 modrm = cpu_ldub_code(env, s->pc++);
6607 reg = ((modrm >> 3) & 7) | rex_r;
6608 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
6609 gen_extu(ot, cpu_T0);
6611 /* Note that lzcnt and tzcnt are in different extensions. */
6612 if ((prefixes & PREFIX_REPZ)
6613 && (b & 1
6614 ? s->cpuid_ext3_features & CPUID_EXT3_ABM
6615 : s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)) {
6616 int size = 8 << ot;
6617 tcg_gen_mov_tl(cpu_cc_src, cpu_T0);
6618 if (b & 1) {
6619 /* For lzcnt, reduce the target_ulong result by the
6620 number of zeros that we expect to find at the top. */
6621 gen_helper_clz(cpu_T0, cpu_T0);
6622 tcg_gen_subi_tl(cpu_T0, cpu_T0, TARGET_LONG_BITS - size);
6623 } else {
6624 /* For tzcnt, a zero input must return the operand size:
6625 force all bits outside the operand size to 1. */
6626 target_ulong mask = (target_ulong)-2 << (size - 1);
6627 tcg_gen_ori_tl(cpu_T0, cpu_T0, mask);
6628 gen_helper_ctz(cpu_T0, cpu_T0);
6630 /* For lzcnt/tzcnt, C and Z bits are defined and are
6631 related to the result. */
6632 gen_op_update1_cc();
6633 set_cc_op(s, CC_OP_BMILGB + ot);
6634 } else {
6635 /* For bsr/bsf, only the Z bit is defined and it is related
6636 to the input and not the result. */
6637 tcg_gen_mov_tl(cpu_cc_dst, cpu_T0);
6638 set_cc_op(s, CC_OP_LOGICB + ot);
6639 if (b & 1) {
6640 /* For bsr, return the bit index of the first 1 bit,
6641 not the count of leading zeros. */
6642 gen_helper_clz(cpu_T0, cpu_T0);
6643 tcg_gen_xori_tl(cpu_T0, cpu_T0, TARGET_LONG_BITS - 1);
6644 } else {
6645 gen_helper_ctz(cpu_T0, cpu_T0);
6647 /* ??? The manual says that the output is undefined when the
6648 input is zero, but real hardware leaves it unchanged, and
6649 real programs appear to depend on that. */
6650 tcg_gen_movi_tl(cpu_tmp0, 0);
6651 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_T0, cpu_cc_dst, cpu_tmp0,
6652 cpu_regs[reg], cpu_T0);
6654 gen_op_mov_reg_v(ot, reg, cpu_T0);
6655 break;
6656 /************************/
6657 /* bcd */
6658 case 0x27: /* daa */
6659 if (CODE64(s))
6660 goto illegal_op;
6661 gen_update_cc_op(s);
6662 gen_helper_daa(cpu_env);
6663 set_cc_op(s, CC_OP_EFLAGS);
6664 break;
6665 case 0x2f: /* das */
6666 if (CODE64(s))
6667 goto illegal_op;
6668 gen_update_cc_op(s);
6669 gen_helper_das(cpu_env);
6670 set_cc_op(s, CC_OP_EFLAGS);
6671 break;
6672 case 0x37: /* aaa */
6673 if (CODE64(s))
6674 goto illegal_op;
6675 gen_update_cc_op(s);
6676 gen_helper_aaa(cpu_env);
6677 set_cc_op(s, CC_OP_EFLAGS);
6678 break;
6679 case 0x3f: /* aas */
6680 if (CODE64(s))
6681 goto illegal_op;
6682 gen_update_cc_op(s);
6683 gen_helper_aas(cpu_env);
6684 set_cc_op(s, CC_OP_EFLAGS);
6685 break;
6686 case 0xd4: /* aam */
6687 if (CODE64(s))
6688 goto illegal_op;
6689 val = cpu_ldub_code(env, s->pc++);
6690 if (val == 0) {
6691 gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base);
6692 } else {
6693 gen_helper_aam(cpu_env, tcg_const_i32(val));
6694 set_cc_op(s, CC_OP_LOGICB);
6696 break;
6697 case 0xd5: /* aad */
6698 if (CODE64(s))
6699 goto illegal_op;
6700 val = cpu_ldub_code(env, s->pc++);
6701 gen_helper_aad(cpu_env, tcg_const_i32(val));
6702 set_cc_op(s, CC_OP_LOGICB);
6703 break;
6704 /************************/
6705 /* misc */
6706 case 0x90: /* nop */
6707 /* XXX: correct lock test for all insn */
6708 if (prefixes & PREFIX_LOCK) {
6709 goto illegal_op;
6711 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
6712 if (REX_B(s)) {
6713 goto do_xchg_reg_eax;
6715 if (prefixes & PREFIX_REPZ) {
6716 gen_update_cc_op(s);
6717 gen_jmp_im(pc_start - s->cs_base);
6718 gen_helper_pause(cpu_env, tcg_const_i32(s->pc - pc_start));
6719 s->is_jmp = DISAS_TB_JUMP;
6721 break;
6722 case 0x9b: /* fwait */
6723 if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) ==
6724 (HF_MP_MASK | HF_TS_MASK)) {
6725 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
6726 } else {
6727 gen_helper_fwait(cpu_env);
6729 break;
6730 case 0xcc: /* int3 */
6731 gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base);
6732 break;
6733 case 0xcd: /* int N */
6734 val = cpu_ldub_code(env, s->pc++);
6735 if (s->vm86 && s->iopl != 3) {
6736 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6737 } else {
6738 gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base);
6740 break;
6741 case 0xce: /* into */
6742 if (CODE64(s))
6743 goto illegal_op;
6744 gen_update_cc_op(s);
6745 gen_jmp_im(pc_start - s->cs_base);
6746 gen_helper_into(cpu_env, tcg_const_i32(s->pc - pc_start));
6747 break;
6748 #ifdef WANT_ICEBP
6749 case 0xf1: /* icebp (undocumented, exits to external debugger) */
6750 gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP);
6751 #if 1
6752 gen_debug(s, pc_start - s->cs_base);
6753 #else
6754 /* start debug */
6755 tb_flush(CPU(x86_env_get_cpu(env)));
6756 qemu_set_log(CPU_LOG_INT | CPU_LOG_TB_IN_ASM);
6757 #endif
6758 break;
6759 #endif
6760 case 0xfa: /* cli */
6761 if (!s->vm86) {
6762 if (s->cpl <= s->iopl) {
6763 gen_helper_cli(cpu_env);
6764 } else {
6765 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6767 } else {
6768 if (s->iopl == 3) {
6769 gen_helper_cli(cpu_env);
6770 } else {
6771 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6774 break;
6775 case 0xfb: /* sti */
6776 if (!s->vm86) {
6777 if (s->cpl <= s->iopl) {
6778 gen_sti:
6779 gen_helper_sti(cpu_env);
6780 /* interruptions are enabled only the first insn after sti */
6781 /* If several instructions disable interrupts, only the
6782 _first_ does it */
6783 gen_set_hflag(s, HF_INHIBIT_IRQ_MASK);
6784 /* give a chance to handle pending irqs */
6785 gen_jmp_im(s->pc - s->cs_base);
6786 gen_eob(s);
6787 } else {
6788 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6790 } else {
6791 if (s->iopl == 3) {
6792 goto gen_sti;
6793 } else {
6794 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6797 break;
6798 case 0x62: /* bound */
6799 if (CODE64(s))
6800 goto illegal_op;
6801 ot = dflag;
6802 modrm = cpu_ldub_code(env, s->pc++);
6803 reg = (modrm >> 3) & 7;
6804 mod = (modrm >> 6) & 3;
6805 if (mod == 3)
6806 goto illegal_op;
6807 gen_op_mov_v_reg(ot, cpu_T0, reg);
6808 gen_lea_modrm(env, s, modrm);
6809 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
6810 if (ot == MO_16) {
6811 gen_helper_boundw(cpu_env, cpu_A0, cpu_tmp2_i32);
6812 } else {
6813 gen_helper_boundl(cpu_env, cpu_A0, cpu_tmp2_i32);
6815 break;
6816 case 0x1c8 ... 0x1cf: /* bswap reg */
6817 reg = (b & 7) | REX_B(s);
6818 #ifdef TARGET_X86_64
6819 if (dflag == MO_64) {
6820 gen_op_mov_v_reg(MO_64, cpu_T0, reg);
6821 tcg_gen_bswap64_i64(cpu_T0, cpu_T0);
6822 gen_op_mov_reg_v(MO_64, reg, cpu_T0);
6823 } else
6824 #endif
6826 gen_op_mov_v_reg(MO_32, cpu_T0, reg);
6827 tcg_gen_ext32u_tl(cpu_T0, cpu_T0);
6828 tcg_gen_bswap32_tl(cpu_T0, cpu_T0);
6829 gen_op_mov_reg_v(MO_32, reg, cpu_T0);
6831 break;
6832 case 0xd6: /* salc */
6833 if (CODE64(s))
6834 goto illegal_op;
6835 gen_compute_eflags_c(s, cpu_T0);
6836 tcg_gen_neg_tl(cpu_T0, cpu_T0);
6837 gen_op_mov_reg_v(MO_8, R_EAX, cpu_T0);
6838 break;
6839 case 0xe0: /* loopnz */
6840 case 0xe1: /* loopz */
6841 case 0xe2: /* loop */
6842 case 0xe3: /* jecxz */
6844 TCGLabel *l1, *l2, *l3;
6846 tval = (int8_t)insn_get(env, s, MO_8);
6847 next_eip = s->pc - s->cs_base;
6848 tval += next_eip;
6849 if (dflag == MO_16) {
6850 tval &= 0xffff;
6853 l1 = gen_new_label();
6854 l2 = gen_new_label();
6855 l3 = gen_new_label();
6856 b &= 3;
6857 switch(b) {
6858 case 0: /* loopnz */
6859 case 1: /* loopz */
6860 gen_op_add_reg_im(s->aflag, R_ECX, -1);
6861 gen_op_jz_ecx(s->aflag, l3);
6862 gen_jcc1(s, (JCC_Z << 1) | (b ^ 1), l1);
6863 break;
6864 case 2: /* loop */
6865 gen_op_add_reg_im(s->aflag, R_ECX, -1);
6866 gen_op_jnz_ecx(s->aflag, l1);
6867 break;
6868 default:
6869 case 3: /* jcxz */
6870 gen_op_jz_ecx(s->aflag, l1);
6871 break;
6874 gen_set_label(l3);
6875 gen_jmp_im(next_eip);
6876 tcg_gen_br(l2);
6878 gen_set_label(l1);
6879 gen_jmp_im(tval);
6880 gen_set_label(l2);
6881 gen_eob(s);
6883 break;
6884 case 0x130: /* wrmsr */
6885 case 0x132: /* rdmsr */
6886 if (s->cpl != 0) {
6887 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6888 } else {
6889 gen_update_cc_op(s);
6890 gen_jmp_im(pc_start - s->cs_base);
6891 if (b & 2) {
6892 gen_helper_rdmsr(cpu_env);
6893 } else {
6894 gen_helper_wrmsr(cpu_env);
6897 break;
6898 case 0x131: /* rdtsc */
6899 gen_update_cc_op(s);
6900 gen_jmp_im(pc_start - s->cs_base);
6901 if (s->tb->cflags & CF_USE_ICOUNT) {
6902 gen_io_start();
6904 gen_helper_rdtsc(cpu_env);
6905 if (s->tb->cflags & CF_USE_ICOUNT) {
6906 gen_io_end();
6907 gen_jmp(s, s->pc - s->cs_base);
6909 break;
6910 case 0x133: /* rdpmc */
6911 gen_update_cc_op(s);
6912 gen_jmp_im(pc_start - s->cs_base);
6913 gen_helper_rdpmc(cpu_env);
6914 break;
6915 case 0x134: /* sysenter */
6916 /* For Intel SYSENTER is valid on 64-bit */
6917 if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
6918 goto illegal_op;
6919 if (!s->pe) {
6920 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6921 } else {
6922 gen_helper_sysenter(cpu_env);
6923 gen_eob(s);
6925 break;
6926 case 0x135: /* sysexit */
6927 /* For Intel SYSEXIT is valid on 64-bit */
6928 if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
6929 goto illegal_op;
6930 if (!s->pe) {
6931 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6932 } else {
6933 gen_helper_sysexit(cpu_env, tcg_const_i32(dflag - 1));
6934 gen_eob(s);
6936 break;
6937 #ifdef TARGET_X86_64
6938 case 0x105: /* syscall */
6939 /* XXX: is it usable in real mode ? */
6940 gen_update_cc_op(s);
6941 gen_jmp_im(pc_start - s->cs_base);
6942 gen_helper_syscall(cpu_env, tcg_const_i32(s->pc - pc_start));
6943 gen_eob(s);
6944 break;
6945 case 0x107: /* sysret */
6946 if (!s->pe) {
6947 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6948 } else {
6949 gen_helper_sysret(cpu_env, tcg_const_i32(dflag - 1));
6950 /* condition codes are modified only in long mode */
6951 if (s->lma) {
6952 set_cc_op(s, CC_OP_EFLAGS);
6954 gen_eob(s);
6956 break;
6957 #endif
6958 case 0x1a2: /* cpuid */
6959 gen_update_cc_op(s);
6960 gen_jmp_im(pc_start - s->cs_base);
6961 gen_helper_cpuid(cpu_env);
6962 break;
6963 case 0xf4: /* hlt */
6964 if (s->cpl != 0) {
6965 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6966 } else {
6967 gen_update_cc_op(s);
6968 gen_jmp_im(pc_start - s->cs_base);
6969 gen_helper_hlt(cpu_env, tcg_const_i32(s->pc - pc_start));
6970 s->is_jmp = DISAS_TB_JUMP;
6972 break;
6973 case 0x100:
6974 modrm = cpu_ldub_code(env, s->pc++);
6975 mod = (modrm >> 6) & 3;
6976 op = (modrm >> 3) & 7;
6977 switch(op) {
6978 case 0: /* sldt */
6979 if (!s->pe || s->vm86)
6980 goto illegal_op;
6981 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ);
6982 tcg_gen_ld32u_tl(cpu_T0, cpu_env,
6983 offsetof(CPUX86State, ldt.selector));
6984 ot = mod == 3 ? dflag : MO_16;
6985 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
6986 break;
6987 case 2: /* lldt */
6988 if (!s->pe || s->vm86)
6989 goto illegal_op;
6990 if (s->cpl != 0) {
6991 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6992 } else {
6993 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE);
6994 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
6995 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
6996 gen_helper_lldt(cpu_env, cpu_tmp2_i32);
6998 break;
6999 case 1: /* str */
7000 if (!s->pe || s->vm86)
7001 goto illegal_op;
7002 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ);
7003 tcg_gen_ld32u_tl(cpu_T0, cpu_env,
7004 offsetof(CPUX86State, tr.selector));
7005 ot = mod == 3 ? dflag : MO_16;
7006 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
7007 break;
7008 case 3: /* ltr */
7009 if (!s->pe || s->vm86)
7010 goto illegal_op;
7011 if (s->cpl != 0) {
7012 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7013 } else {
7014 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE);
7015 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
7016 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T0);
7017 gen_helper_ltr(cpu_env, cpu_tmp2_i32);
7019 break;
7020 case 4: /* verr */
7021 case 5: /* verw */
7022 if (!s->pe || s->vm86)
7023 goto illegal_op;
7024 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
7025 gen_update_cc_op(s);
7026 if (op == 4) {
7027 gen_helper_verr(cpu_env, cpu_T0);
7028 } else {
7029 gen_helper_verw(cpu_env, cpu_T0);
7031 set_cc_op(s, CC_OP_EFLAGS);
7032 break;
7033 default:
7034 goto illegal_op;
7036 break;
7038 case 0x101:
7039 modrm = cpu_ldub_code(env, s->pc++);
7040 switch (modrm) {
7041 CASE_MEM_OP(0): /* sgdt */
7042 gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ);
7043 gen_lea_modrm(env, s, modrm);
7044 tcg_gen_ld32u_tl(cpu_T0,
7045 cpu_env, offsetof(CPUX86State, gdt.limit));
7046 gen_op_st_v(s, MO_16, cpu_T0, cpu_A0);
7047 gen_add_A0_im(s, 2);
7048 tcg_gen_ld_tl(cpu_T0, cpu_env, offsetof(CPUX86State, gdt.base));
7049 if (dflag == MO_16) {
7050 tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff);
7052 gen_op_st_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0);
7053 break;
7055 case 0xc8: /* monitor */
7056 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || s->cpl != 0) {
7057 goto illegal_op;
7059 gen_update_cc_op(s);
7060 gen_jmp_im(pc_start - s->cs_base);
7061 tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EAX]);
7062 gen_extu(s->aflag, cpu_A0);
7063 gen_add_A0_ds_seg(s);
7064 gen_helper_monitor(cpu_env, cpu_A0);
7065 break;
7067 case 0xc9: /* mwait */
7068 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || s->cpl != 0) {
7069 goto illegal_op;
7071 gen_update_cc_op(s);
7072 gen_jmp_im(pc_start - s->cs_base);
7073 gen_helper_mwait(cpu_env, tcg_const_i32(s->pc - pc_start));
7074 gen_eob(s);
7075 break;
7077 case 0xca: /* clac */
7078 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
7079 || s->cpl != 0) {
7080 goto illegal_op;
7082 gen_helper_clac(cpu_env);
7083 gen_jmp_im(s->pc - s->cs_base);
7084 gen_eob(s);
7085 break;
7087 case 0xcb: /* stac */
7088 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
7089 || s->cpl != 0) {
7090 goto illegal_op;
7092 gen_helper_stac(cpu_env);
7093 gen_jmp_im(s->pc - s->cs_base);
7094 gen_eob(s);
7095 break;
7097 CASE_MEM_OP(1): /* sidt */
7098 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ);
7099 gen_lea_modrm(env, s, modrm);
7100 tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State, idt.limit));
7101 gen_op_st_v(s, MO_16, cpu_T0, cpu_A0);
7102 gen_add_A0_im(s, 2);
7103 tcg_gen_ld_tl(cpu_T0, cpu_env, offsetof(CPUX86State, idt.base));
7104 if (dflag == MO_16) {
7105 tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff);
7107 gen_op_st_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0);
7108 break;
7110 case 0xd0: /* xgetbv */
7111 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
7112 || (s->prefix & (PREFIX_LOCK | PREFIX_DATA
7113 | PREFIX_REPZ | PREFIX_REPNZ))) {
7114 goto illegal_op;
7116 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]);
7117 gen_helper_xgetbv(cpu_tmp1_i64, cpu_env, cpu_tmp2_i32);
7118 tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], cpu_tmp1_i64);
7119 break;
7121 case 0xd1: /* xsetbv */
7122 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
7123 || (s->prefix & (PREFIX_LOCK | PREFIX_DATA
7124 | PREFIX_REPZ | PREFIX_REPNZ))) {
7125 goto illegal_op;
7127 if (s->cpl != 0) {
7128 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7129 break;
7131 tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
7132 cpu_regs[R_EDX]);
7133 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]);
7134 gen_helper_xsetbv(cpu_env, cpu_tmp2_i32, cpu_tmp1_i64);
7135 /* End TB because translation flags may change. */
7136 gen_jmp_im(s->pc - pc_start);
7137 gen_eob(s);
7138 break;
7140 case 0xd8: /* VMRUN */
7141 if (!(s->flags & HF_SVME_MASK) || !s->pe) {
7142 goto illegal_op;
7144 if (s->cpl != 0) {
7145 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7146 break;
7148 gen_update_cc_op(s);
7149 gen_jmp_im(pc_start - s->cs_base);
7150 gen_helper_vmrun(cpu_env, tcg_const_i32(s->aflag - 1),
7151 tcg_const_i32(s->pc - pc_start));
7152 tcg_gen_exit_tb(0);
7153 s->is_jmp = DISAS_TB_JUMP;
7154 break;
7156 case 0xd9: /* VMMCALL */
7157 if (!(s->flags & HF_SVME_MASK)) {
7158 goto illegal_op;
7160 gen_update_cc_op(s);
7161 gen_jmp_im(pc_start - s->cs_base);
7162 gen_helper_vmmcall(cpu_env);
7163 break;
7165 case 0xda: /* VMLOAD */
7166 if (!(s->flags & HF_SVME_MASK) || !s->pe) {
7167 goto illegal_op;
7169 if (s->cpl != 0) {
7170 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7171 break;
7173 gen_update_cc_op(s);
7174 gen_jmp_im(pc_start - s->cs_base);
7175 gen_helper_vmload(cpu_env, tcg_const_i32(s->aflag - 1));
7176 break;
7178 case 0xdb: /* VMSAVE */
7179 if (!(s->flags & HF_SVME_MASK) || !s->pe) {
7180 goto illegal_op;
7182 if (s->cpl != 0) {
7183 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7184 break;
7186 gen_update_cc_op(s);
7187 gen_jmp_im(pc_start - s->cs_base);
7188 gen_helper_vmsave(cpu_env, tcg_const_i32(s->aflag - 1));
7189 break;
7191 case 0xdc: /* STGI */
7192 if ((!(s->flags & HF_SVME_MASK)
7193 && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
7194 || !s->pe) {
7195 goto illegal_op;
7197 if (s->cpl != 0) {
7198 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7199 break;
7201 gen_update_cc_op(s);
7202 gen_jmp_im(pc_start - s->cs_base);
7203 gen_helper_stgi(cpu_env);
7204 break;
7206 case 0xdd: /* CLGI */
7207 if (!(s->flags & HF_SVME_MASK) || !s->pe) {
7208 goto illegal_op;
7210 if (s->cpl != 0) {
7211 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7212 break;
7214 gen_update_cc_op(s);
7215 gen_jmp_im(pc_start - s->cs_base);
7216 gen_helper_clgi(cpu_env);
7217 break;
7219 case 0xde: /* SKINIT */
7220 if ((!(s->flags & HF_SVME_MASK)
7221 && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
7222 || !s->pe) {
7223 goto illegal_op;
7225 gen_update_cc_op(s);
7226 gen_jmp_im(pc_start - s->cs_base);
7227 gen_helper_skinit(cpu_env);
7228 break;
7230 case 0xdf: /* INVLPGA */
7231 if (!(s->flags & HF_SVME_MASK) || !s->pe) {
7232 goto illegal_op;
7234 if (s->cpl != 0) {
7235 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7236 break;
7238 gen_update_cc_op(s);
7239 gen_jmp_im(pc_start - s->cs_base);
7240 gen_helper_invlpga(cpu_env, tcg_const_i32(s->aflag - 1));
7241 break;
7243 CASE_MEM_OP(2): /* lgdt */
7244 if (s->cpl != 0) {
7245 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7246 break;
7248 gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_WRITE);
7249 gen_lea_modrm(env, s, modrm);
7250 gen_op_ld_v(s, MO_16, cpu_T1, cpu_A0);
7251 gen_add_A0_im(s, 2);
7252 gen_op_ld_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0);
7253 if (dflag == MO_16) {
7254 tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff);
7256 tcg_gen_st_tl(cpu_T0, cpu_env, offsetof(CPUX86State, gdt.base));
7257 tcg_gen_st32_tl(cpu_T1, cpu_env, offsetof(CPUX86State, gdt.limit));
7258 break;
7260 CASE_MEM_OP(3): /* lidt */
7261 if (s->cpl != 0) {
7262 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7263 break;
7265 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_WRITE);
7266 gen_lea_modrm(env, s, modrm);
7267 gen_op_ld_v(s, MO_16, cpu_T1, cpu_A0);
7268 gen_add_A0_im(s, 2);
7269 gen_op_ld_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0);
7270 if (dflag == MO_16) {
7271 tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff);
7273 tcg_gen_st_tl(cpu_T0, cpu_env, offsetof(CPUX86State, idt.base));
7274 tcg_gen_st32_tl(cpu_T1, cpu_env, offsetof(CPUX86State, idt.limit));
7275 break;
7277 CASE_MEM_OP(4): /* smsw */
7278 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0);
7279 #if defined TARGET_X86_64 && defined HOST_WORDS_BIGENDIAN
7280 tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State, cr[0]) + 4);
7281 #else
7282 tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State, cr[0]));
7283 #endif
7284 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 1);
7285 break;
7287 CASE_MEM_OP(6): /* lmsw */
7288 if (s->cpl != 0) {
7289 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7290 break;
7292 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
7293 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
7294 gen_helper_lmsw(cpu_env, cpu_T0);
7295 gen_jmp_im(s->pc - s->cs_base);
7296 gen_eob(s);
7297 break;
7299 CASE_MEM_OP(7): /* invlpg */
7300 if (s->cpl != 0) {
7301 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7302 break;
7304 gen_update_cc_op(s);
7305 gen_jmp_im(pc_start - s->cs_base);
7306 gen_lea_modrm(env, s, modrm);
7307 gen_helper_invlpg(cpu_env, cpu_A0);
7308 gen_jmp_im(s->pc - s->cs_base);
7309 gen_eob(s);
7310 break;
7312 case 0xf8: /* swapgs */
7313 #ifdef TARGET_X86_64
7314 if (CODE64(s)) {
7315 if (s->cpl != 0) {
7316 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7317 } else {
7318 tcg_gen_mov_tl(cpu_T0, cpu_seg_base[R_GS]);
7319 tcg_gen_ld_tl(cpu_seg_base[R_GS], cpu_env,
7320 offsetof(CPUX86State, kernelgsbase));
7321 tcg_gen_st_tl(cpu_T0, cpu_env,
7322 offsetof(CPUX86State, kernelgsbase));
7324 break;
7326 #endif
7327 goto illegal_op;
7329 case 0xf9: /* rdtscp */
7330 if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP)) {
7331 goto illegal_op;
7333 gen_update_cc_op(s);
7334 gen_jmp_im(pc_start - s->cs_base);
7335 if (s->tb->cflags & CF_USE_ICOUNT) {
7336 gen_io_start();
7338 gen_helper_rdtscp(cpu_env);
7339 if (s->tb->cflags & CF_USE_ICOUNT) {
7340 gen_io_end();
7341 gen_jmp(s, s->pc - s->cs_base);
7343 break;
7345 default:
7346 goto illegal_op;
7348 break;
7350 case 0x108: /* invd */
7351 case 0x109: /* wbinvd */
7352 if (s->cpl != 0) {
7353 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7354 } else {
7355 gen_svm_check_intercept(s, pc_start, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD);
7356 /* nothing to do */
7358 break;
7359 case 0x63: /* arpl or movslS (x86_64) */
7360 #ifdef TARGET_X86_64
7361 if (CODE64(s)) {
7362 int d_ot;
7363 /* d_ot is the size of destination */
7364 d_ot = dflag;
7366 modrm = cpu_ldub_code(env, s->pc++);
7367 reg = ((modrm >> 3) & 7) | rex_r;
7368 mod = (modrm >> 6) & 3;
7369 rm = (modrm & 7) | REX_B(s);
7371 if (mod == 3) {
7372 gen_op_mov_v_reg(MO_32, cpu_T0, rm);
7373 /* sign extend */
7374 if (d_ot == MO_64) {
7375 tcg_gen_ext32s_tl(cpu_T0, cpu_T0);
7377 gen_op_mov_reg_v(d_ot, reg, cpu_T0);
7378 } else {
7379 gen_lea_modrm(env, s, modrm);
7380 gen_op_ld_v(s, MO_32 | MO_SIGN, cpu_T0, cpu_A0);
7381 gen_op_mov_reg_v(d_ot, reg, cpu_T0);
7383 } else
7384 #endif
7386 TCGLabel *label1;
7387 TCGv t0, t1, t2, a0;
7389 if (!s->pe || s->vm86)
7390 goto illegal_op;
7391 t0 = tcg_temp_local_new();
7392 t1 = tcg_temp_local_new();
7393 t2 = tcg_temp_local_new();
7394 ot = MO_16;
7395 modrm = cpu_ldub_code(env, s->pc++);
7396 reg = (modrm >> 3) & 7;
7397 mod = (modrm >> 6) & 3;
7398 rm = modrm & 7;
7399 if (mod != 3) {
7400 gen_lea_modrm(env, s, modrm);
7401 gen_op_ld_v(s, ot, t0, cpu_A0);
7402 a0 = tcg_temp_local_new();
7403 tcg_gen_mov_tl(a0, cpu_A0);
7404 } else {
7405 gen_op_mov_v_reg(ot, t0, rm);
7406 TCGV_UNUSED(a0);
7408 gen_op_mov_v_reg(ot, t1, reg);
7409 tcg_gen_andi_tl(cpu_tmp0, t0, 3);
7410 tcg_gen_andi_tl(t1, t1, 3);
7411 tcg_gen_movi_tl(t2, 0);
7412 label1 = gen_new_label();
7413 tcg_gen_brcond_tl(TCG_COND_GE, cpu_tmp0, t1, label1);
7414 tcg_gen_andi_tl(t0, t0, ~3);
7415 tcg_gen_or_tl(t0, t0, t1);
7416 tcg_gen_movi_tl(t2, CC_Z);
7417 gen_set_label(label1);
7418 if (mod != 3) {
7419 gen_op_st_v(s, ot, t0, a0);
7420 tcg_temp_free(a0);
7421 } else {
7422 gen_op_mov_reg_v(ot, rm, t0);
7424 gen_compute_eflags(s);
7425 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z);
7426 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2);
7427 tcg_temp_free(t0);
7428 tcg_temp_free(t1);
7429 tcg_temp_free(t2);
7431 break;
7432 case 0x102: /* lar */
7433 case 0x103: /* lsl */
7435 TCGLabel *label1;
7436 TCGv t0;
7437 if (!s->pe || s->vm86)
7438 goto illegal_op;
7439 ot = dflag != MO_16 ? MO_32 : MO_16;
7440 modrm = cpu_ldub_code(env, s->pc++);
7441 reg = ((modrm >> 3) & 7) | rex_r;
7442 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
7443 t0 = tcg_temp_local_new();
7444 gen_update_cc_op(s);
7445 if (b == 0x102) {
7446 gen_helper_lar(t0, cpu_env, cpu_T0);
7447 } else {
7448 gen_helper_lsl(t0, cpu_env, cpu_T0);
7450 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_src, CC_Z);
7451 label1 = gen_new_label();
7452 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
7453 gen_op_mov_reg_v(ot, reg, t0);
7454 gen_set_label(label1);
7455 set_cc_op(s, CC_OP_EFLAGS);
7456 tcg_temp_free(t0);
7458 break;
7459 case 0x118:
7460 modrm = cpu_ldub_code(env, s->pc++);
7461 mod = (modrm >> 6) & 3;
7462 op = (modrm >> 3) & 7;
7463 switch(op) {
7464 case 0: /* prefetchnta */
7465 case 1: /* prefetchnt0 */
7466 case 2: /* prefetchnt0 */
7467 case 3: /* prefetchnt0 */
7468 if (mod == 3)
7469 goto illegal_op;
7470 gen_lea_modrm(env, s, modrm);
7471 /* nothing more to do */
7472 break;
7473 default: /* nop (multi byte) */
7474 gen_nop_modrm(env, s, modrm);
7475 break;
7477 break;
7478 case 0x11a:
7479 modrm = cpu_ldub_code(env, s->pc++);
7480 if (s->flags & HF_MPX_EN_MASK) {
7481 mod = (modrm >> 6) & 3;
7482 reg = ((modrm >> 3) & 7) | rex_r;
7483 if (prefixes & PREFIX_REPZ) {
7484 /* bndcl */
7485 if (reg >= 4
7486 || (prefixes & PREFIX_LOCK)
7487 || s->aflag == MO_16) {
7488 goto illegal_op;
7490 gen_bndck(env, s, modrm, TCG_COND_LTU, cpu_bndl[reg]);
7491 } else if (prefixes & PREFIX_REPNZ) {
7492 /* bndcu */
7493 if (reg >= 4
7494 || (prefixes & PREFIX_LOCK)
7495 || s->aflag == MO_16) {
7496 goto illegal_op;
7498 TCGv_i64 notu = tcg_temp_new_i64();
7499 tcg_gen_not_i64(notu, cpu_bndu[reg]);
7500 gen_bndck(env, s, modrm, TCG_COND_GTU, notu);
7501 tcg_temp_free_i64(notu);
7502 } else if (prefixes & PREFIX_DATA) {
7503 /* bndmov -- from reg/mem */
7504 if (reg >= 4 || s->aflag == MO_16) {
7505 goto illegal_op;
7507 if (mod == 3) {
7508 int reg2 = (modrm & 7) | REX_B(s);
7509 if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
7510 goto illegal_op;
7512 if (s->flags & HF_MPX_IU_MASK) {
7513 tcg_gen_mov_i64(cpu_bndl[reg], cpu_bndl[reg2]);
7514 tcg_gen_mov_i64(cpu_bndu[reg], cpu_bndu[reg2]);
7516 } else {
7517 gen_lea_modrm(env, s, modrm);
7518 if (CODE64(s)) {
7519 tcg_gen_qemu_ld_i64(cpu_bndl[reg], cpu_A0,
7520 s->mem_index, MO_LEQ);
7521 tcg_gen_addi_tl(cpu_A0, cpu_A0, 8);
7522 tcg_gen_qemu_ld_i64(cpu_bndu[reg], cpu_A0,
7523 s->mem_index, MO_LEQ);
7524 } else {
7525 tcg_gen_qemu_ld_i64(cpu_bndl[reg], cpu_A0,
7526 s->mem_index, MO_LEUL);
7527 tcg_gen_addi_tl(cpu_A0, cpu_A0, 4);
7528 tcg_gen_qemu_ld_i64(cpu_bndu[reg], cpu_A0,
7529 s->mem_index, MO_LEUL);
7531 /* bnd registers are now in-use */
7532 gen_set_hflag(s, HF_MPX_IU_MASK);
7534 } else if (mod != 3) {
7535 /* bndldx */
7536 AddressParts a = gen_lea_modrm_0(env, s, modrm);
7537 if (reg >= 4
7538 || (prefixes & PREFIX_LOCK)
7539 || s->aflag == MO_16
7540 || a.base < -1) {
7541 goto illegal_op;
7543 if (a.base >= 0) {
7544 tcg_gen_addi_tl(cpu_A0, cpu_regs[a.base], a.disp);
7545 } else {
7546 tcg_gen_movi_tl(cpu_A0, 0);
7548 gen_lea_v_seg(s, s->aflag, cpu_A0, a.def_seg, s->override);
7549 if (a.index >= 0) {
7550 tcg_gen_mov_tl(cpu_T0, cpu_regs[a.index]);
7551 } else {
7552 tcg_gen_movi_tl(cpu_T0, 0);
7554 if (CODE64(s)) {
7555 gen_helper_bndldx64(cpu_bndl[reg], cpu_env, cpu_A0, cpu_T0);
7556 tcg_gen_ld_i64(cpu_bndu[reg], cpu_env,
7557 offsetof(CPUX86State, mmx_t0.MMX_Q(0)));
7558 } else {
7559 gen_helper_bndldx32(cpu_bndu[reg], cpu_env, cpu_A0, cpu_T0);
7560 tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndu[reg]);
7561 tcg_gen_shri_i64(cpu_bndu[reg], cpu_bndu[reg], 32);
7563 gen_set_hflag(s, HF_MPX_IU_MASK);
7566 gen_nop_modrm(env, s, modrm);
7567 break;
7568 case 0x11b:
7569 modrm = cpu_ldub_code(env, s->pc++);
7570 if (s->flags & HF_MPX_EN_MASK) {
7571 mod = (modrm >> 6) & 3;
7572 reg = ((modrm >> 3) & 7) | rex_r;
7573 if (mod != 3 && (prefixes & PREFIX_REPZ)) {
7574 /* bndmk */
7575 if (reg >= 4
7576 || (prefixes & PREFIX_LOCK)
7577 || s->aflag == MO_16) {
7578 goto illegal_op;
7580 AddressParts a = gen_lea_modrm_0(env, s, modrm);
7581 if (a.base >= 0) {
7582 tcg_gen_extu_tl_i64(cpu_bndl[reg], cpu_regs[a.base]);
7583 if (!CODE64(s)) {
7584 tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndl[reg]);
7586 } else if (a.base == -1) {
7587 /* no base register has lower bound of 0 */
7588 tcg_gen_movi_i64(cpu_bndl[reg], 0);
7589 } else {
7590 /* rip-relative generates #ud */
7591 goto illegal_op;
7593 tcg_gen_not_tl(cpu_A0, gen_lea_modrm_1(a));
7594 if (!CODE64(s)) {
7595 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
7597 tcg_gen_extu_tl_i64(cpu_bndu[reg], cpu_A0);
7598 /* bnd registers are now in-use */
7599 gen_set_hflag(s, HF_MPX_IU_MASK);
7600 break;
7601 } else if (prefixes & PREFIX_REPNZ) {
7602 /* bndcn */
7603 if (reg >= 4
7604 || (prefixes & PREFIX_LOCK)
7605 || s->aflag == MO_16) {
7606 goto illegal_op;
7608 gen_bndck(env, s, modrm, TCG_COND_GTU, cpu_bndu[reg]);
7609 } else if (prefixes & PREFIX_DATA) {
7610 /* bndmov -- to reg/mem */
7611 if (reg >= 4 || s->aflag == MO_16) {
7612 goto illegal_op;
7614 if (mod == 3) {
7615 int reg2 = (modrm & 7) | REX_B(s);
7616 if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
7617 goto illegal_op;
7619 if (s->flags & HF_MPX_IU_MASK) {
7620 tcg_gen_mov_i64(cpu_bndl[reg2], cpu_bndl[reg]);
7621 tcg_gen_mov_i64(cpu_bndu[reg2], cpu_bndu[reg]);
7623 } else {
7624 gen_lea_modrm(env, s, modrm);
7625 if (CODE64(s)) {
7626 tcg_gen_qemu_st_i64(cpu_bndl[reg], cpu_A0,
7627 s->mem_index, MO_LEQ);
7628 tcg_gen_addi_tl(cpu_A0, cpu_A0, 8);
7629 tcg_gen_qemu_st_i64(cpu_bndu[reg], cpu_A0,
7630 s->mem_index, MO_LEQ);
7631 } else {
7632 tcg_gen_qemu_st_i64(cpu_bndl[reg], cpu_A0,
7633 s->mem_index, MO_LEUL);
7634 tcg_gen_addi_tl(cpu_A0, cpu_A0, 4);
7635 tcg_gen_qemu_st_i64(cpu_bndu[reg], cpu_A0,
7636 s->mem_index, MO_LEUL);
7639 } else if (mod != 3) {
7640 /* bndstx */
7641 AddressParts a = gen_lea_modrm_0(env, s, modrm);
7642 if (reg >= 4
7643 || (prefixes & PREFIX_LOCK)
7644 || s->aflag == MO_16
7645 || a.base < -1) {
7646 goto illegal_op;
7648 if (a.base >= 0) {
7649 tcg_gen_addi_tl(cpu_A0, cpu_regs[a.base], a.disp);
7650 } else {
7651 tcg_gen_movi_tl(cpu_A0, 0);
7653 gen_lea_v_seg(s, s->aflag, cpu_A0, a.def_seg, s->override);
7654 if (a.index >= 0) {
7655 tcg_gen_mov_tl(cpu_T0, cpu_regs[a.index]);
7656 } else {
7657 tcg_gen_movi_tl(cpu_T0, 0);
7659 if (CODE64(s)) {
7660 gen_helper_bndstx64(cpu_env, cpu_A0, cpu_T0,
7661 cpu_bndl[reg], cpu_bndu[reg]);
7662 } else {
7663 gen_helper_bndstx32(cpu_env, cpu_A0, cpu_T0,
7664 cpu_bndl[reg], cpu_bndu[reg]);
7668 gen_nop_modrm(env, s, modrm);
7669 break;
7670 case 0x119: case 0x11c ... 0x11f: /* nop (multi byte) */
7671 modrm = cpu_ldub_code(env, s->pc++);
7672 gen_nop_modrm(env, s, modrm);
7673 break;
7674 case 0x120: /* mov reg, crN */
7675 case 0x122: /* mov crN, reg */
7676 if (s->cpl != 0) {
7677 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7678 } else {
7679 modrm = cpu_ldub_code(env, s->pc++);
7680 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7681 * AMD documentation (24594.pdf) and testing of
7682 * intel 386 and 486 processors all show that the mod bits
7683 * are assumed to be 1's, regardless of actual values.
7685 rm = (modrm & 7) | REX_B(s);
7686 reg = ((modrm >> 3) & 7) | rex_r;
7687 if (CODE64(s))
7688 ot = MO_64;
7689 else
7690 ot = MO_32;
7691 if ((prefixes & PREFIX_LOCK) && (reg == 0) &&
7692 (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) {
7693 reg = 8;
7695 switch(reg) {
7696 case 0:
7697 case 2:
7698 case 3:
7699 case 4:
7700 case 8:
7701 gen_update_cc_op(s);
7702 gen_jmp_im(pc_start - s->cs_base);
7703 if (b & 2) {
7704 gen_op_mov_v_reg(ot, cpu_T0, rm);
7705 gen_helper_write_crN(cpu_env, tcg_const_i32(reg),
7706 cpu_T0);
7707 gen_jmp_im(s->pc - s->cs_base);
7708 gen_eob(s);
7709 } else {
7710 gen_helper_read_crN(cpu_T0, cpu_env, tcg_const_i32(reg));
7711 gen_op_mov_reg_v(ot, rm, cpu_T0);
7713 break;
7714 default:
7715 goto illegal_op;
7718 break;
7719 case 0x121: /* mov reg, drN */
7720 case 0x123: /* mov drN, reg */
7721 if (s->cpl != 0) {
7722 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7723 } else {
7724 modrm = cpu_ldub_code(env, s->pc++);
7725 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7726 * AMD documentation (24594.pdf) and testing of
7727 * intel 386 and 486 processors all show that the mod bits
7728 * are assumed to be 1's, regardless of actual values.
7730 rm = (modrm & 7) | REX_B(s);
7731 reg = ((modrm >> 3) & 7) | rex_r;
7732 if (CODE64(s))
7733 ot = MO_64;
7734 else
7735 ot = MO_32;
7736 if (reg >= 8) {
7737 goto illegal_op;
7739 if (b & 2) {
7740 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg);
7741 gen_op_mov_v_reg(ot, cpu_T0, rm);
7742 tcg_gen_movi_i32(cpu_tmp2_i32, reg);
7743 gen_helper_set_dr(cpu_env, cpu_tmp2_i32, cpu_T0);
7744 gen_jmp_im(s->pc - s->cs_base);
7745 gen_eob(s);
7746 } else {
7747 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg);
7748 tcg_gen_movi_i32(cpu_tmp2_i32, reg);
7749 gen_helper_get_dr(cpu_T0, cpu_env, cpu_tmp2_i32);
7750 gen_op_mov_reg_v(ot, rm, cpu_T0);
7753 break;
7754 case 0x106: /* clts */
7755 if (s->cpl != 0) {
7756 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7757 } else {
7758 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
7759 gen_helper_clts(cpu_env);
7760 /* abort block because static cpu state changed */
7761 gen_jmp_im(s->pc - s->cs_base);
7762 gen_eob(s);
7764 break;
7765 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
7766 case 0x1c3: /* MOVNTI reg, mem */
7767 if (!(s->cpuid_features & CPUID_SSE2))
7768 goto illegal_op;
7769 ot = mo_64_32(dflag);
7770 modrm = cpu_ldub_code(env, s->pc++);
7771 mod = (modrm >> 6) & 3;
7772 if (mod == 3)
7773 goto illegal_op;
7774 reg = ((modrm >> 3) & 7) | rex_r;
7775 /* generate a generic store */
7776 gen_ldst_modrm(env, s, modrm, ot, reg, 1);
7777 break;
7778 case 0x1ae:
7779 modrm = cpu_ldub_code(env, s->pc++);
7780 switch (modrm) {
7781 CASE_MEM_OP(0): /* fxsave */
7782 if (!(s->cpuid_features & CPUID_FXSR)
7783 || (prefixes & PREFIX_LOCK)) {
7784 goto illegal_op;
7786 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
7787 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7788 break;
7790 gen_lea_modrm(env, s, modrm);
7791 gen_helper_fxsave(cpu_env, cpu_A0);
7792 break;
7794 CASE_MEM_OP(1): /* fxrstor */
7795 if (!(s->cpuid_features & CPUID_FXSR)
7796 || (prefixes & PREFIX_LOCK)) {
7797 goto illegal_op;
7799 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
7800 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7801 break;
7803 gen_lea_modrm(env, s, modrm);
7804 gen_helper_fxrstor(cpu_env, cpu_A0);
7805 break;
7807 CASE_MEM_OP(2): /* ldmxcsr */
7808 if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) {
7809 goto illegal_op;
7811 if (s->flags & HF_TS_MASK) {
7812 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7813 break;
7815 gen_lea_modrm(env, s, modrm);
7816 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LEUL);
7817 gen_helper_ldmxcsr(cpu_env, cpu_tmp2_i32);
7818 break;
7820 CASE_MEM_OP(3): /* stmxcsr */
7821 if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) {
7822 goto illegal_op;
7824 if (s->flags & HF_TS_MASK) {
7825 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7826 break;
7828 gen_lea_modrm(env, s, modrm);
7829 tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State, mxcsr));
7830 gen_op_st_v(s, MO_32, cpu_T0, cpu_A0);
7831 break;
7833 CASE_MEM_OP(4): /* xsave */
7834 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
7835 || (prefixes & (PREFIX_LOCK | PREFIX_DATA
7836 | PREFIX_REPZ | PREFIX_REPNZ))) {
7837 goto illegal_op;
7839 gen_lea_modrm(env, s, modrm);
7840 tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
7841 cpu_regs[R_EDX]);
7842 gen_helper_xsave(cpu_env, cpu_A0, cpu_tmp1_i64);
7843 break;
7845 CASE_MEM_OP(5): /* xrstor */
7846 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
7847 || (prefixes & (PREFIX_LOCK | PREFIX_DATA
7848 | PREFIX_REPZ | PREFIX_REPNZ))) {
7849 goto illegal_op;
7851 gen_lea_modrm(env, s, modrm);
7852 tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
7853 cpu_regs[R_EDX]);
7854 gen_helper_xrstor(cpu_env, cpu_A0, cpu_tmp1_i64);
7855 /* XRSTOR is how MPX is enabled, which changes how
7856 we translate. Thus we need to end the TB. */
7857 gen_update_cc_op(s);
7858 gen_jmp_im(s->pc - s->cs_base);
7859 gen_eob(s);
7860 break;
7862 CASE_MEM_OP(6): /* xsaveopt / clwb */
7863 if (prefixes & PREFIX_LOCK) {
7864 goto illegal_op;
7866 if (prefixes & PREFIX_DATA) {
7867 /* clwb */
7868 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLWB)) {
7869 goto illegal_op;
7871 gen_nop_modrm(env, s, modrm);
7872 } else {
7873 /* xsaveopt */
7874 if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
7875 || (s->cpuid_xsave_features & CPUID_XSAVE_XSAVEOPT) == 0
7876 || (prefixes & (PREFIX_REPZ | PREFIX_REPNZ))) {
7877 goto illegal_op;
7879 gen_lea_modrm(env, s, modrm);
7880 tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
7881 cpu_regs[R_EDX]);
7882 gen_helper_xsaveopt(cpu_env, cpu_A0, cpu_tmp1_i64);
7884 break;
7886 CASE_MEM_OP(7): /* clflush / clflushopt */
7887 if (prefixes & PREFIX_LOCK) {
7888 goto illegal_op;
7890 if (prefixes & PREFIX_DATA) {
7891 /* clflushopt */
7892 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLFLUSHOPT)) {
7893 goto illegal_op;
7895 } else {
7896 /* clflush */
7897 if ((s->prefix & (PREFIX_REPZ | PREFIX_REPNZ))
7898 || !(s->cpuid_features & CPUID_CLFLUSH)) {
7899 goto illegal_op;
7902 gen_nop_modrm(env, s, modrm);
7903 break;
7905 case 0xc0 ... 0xc7: /* rdfsbase (f3 0f ae /0) */
7906 case 0xc8 ... 0xc8: /* rdgsbase (f3 0f ae /1) */
7907 case 0xd0 ... 0xd7: /* wrfsbase (f3 0f ae /2) */
7908 case 0xd8 ... 0xd8: /* wrgsbase (f3 0f ae /3) */
7909 if (CODE64(s)
7910 && (prefixes & PREFIX_REPZ)
7911 && !(prefixes & PREFIX_LOCK)
7912 && (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_FSGSBASE)) {
7913 TCGv base, treg, src, dst;
7915 /* Preserve hflags bits by testing CR4 at runtime. */
7916 tcg_gen_movi_i32(cpu_tmp2_i32, CR4_FSGSBASE_MASK);
7917 gen_helper_cr4_testbit(cpu_env, cpu_tmp2_i32);
7919 base = cpu_seg_base[modrm & 8 ? R_GS : R_FS];
7920 treg = cpu_regs[(modrm & 7) | REX_B(s)];
7922 if (modrm & 0x10) {
7923 /* wr*base */
7924 dst = base, src = treg;
7925 } else {
7926 /* rd*base */
7927 dst = treg, src = base;
7930 if (s->dflag == MO_32) {
7931 tcg_gen_ext32u_tl(dst, src);
7932 } else {
7933 tcg_gen_mov_tl(dst, src);
7935 break;
7937 goto illegal_op;
7939 case 0xf8: /* sfence / pcommit */
7940 if (prefixes & PREFIX_DATA) {
7941 /* pcommit */
7942 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_PCOMMIT)
7943 || (prefixes & PREFIX_LOCK)) {
7944 goto illegal_op;
7946 break;
7948 /* fallthru */
7949 case 0xf9 ... 0xff: /* sfence */
7950 case 0xe8 ... 0xef: /* lfence */
7951 case 0xf0 ... 0xf7: /* mfence */
7952 if (!(s->cpuid_features & CPUID_SSE2)
7953 || (prefixes & PREFIX_LOCK)) {
7954 goto illegal_op;
7956 break;
7958 default:
7959 goto illegal_op;
7961 break;
7963 case 0x10d: /* 3DNow! prefetch(w) */
7964 modrm = cpu_ldub_code(env, s->pc++);
7965 mod = (modrm >> 6) & 3;
7966 if (mod == 3)
7967 goto illegal_op;
7968 gen_lea_modrm(env, s, modrm);
7969 /* ignore for now */
7970 break;
7971 case 0x1aa: /* rsm */
7972 gen_svm_check_intercept(s, pc_start, SVM_EXIT_RSM);
7973 if (!(s->flags & HF_SMM_MASK))
7974 goto illegal_op;
7975 gen_update_cc_op(s);
7976 gen_jmp_im(s->pc - s->cs_base);
7977 gen_helper_rsm(cpu_env);
7978 gen_eob(s);
7979 break;
7980 case 0x1b8: /* SSE4.2 popcnt */
7981 if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) !=
7982 PREFIX_REPZ)
7983 goto illegal_op;
7984 if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
7985 goto illegal_op;
7987 modrm = cpu_ldub_code(env, s->pc++);
7988 reg = ((modrm >> 3) & 7) | rex_r;
7990 if (s->prefix & PREFIX_DATA) {
7991 ot = MO_16;
7992 } else {
7993 ot = mo_64_32(dflag);
7996 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
7997 gen_helper_popcnt(cpu_T0, cpu_env, cpu_T0, tcg_const_i32(ot));
7998 gen_op_mov_reg_v(ot, reg, cpu_T0);
8000 set_cc_op(s, CC_OP_EFLAGS);
8001 break;
8002 case 0x10e ... 0x10f:
8003 /* 3DNow! instructions, ignore prefixes */
8004 s->prefix &= ~(PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA);
8005 case 0x110 ... 0x117:
8006 case 0x128 ... 0x12f:
8007 case 0x138 ... 0x13a:
8008 case 0x150 ... 0x179:
8009 case 0x17c ... 0x17f:
8010 case 0x1c2:
8011 case 0x1c4 ... 0x1c6:
8012 case 0x1d0 ... 0x1fe:
8013 gen_sse(env, s, b, pc_start, rex_r);
8014 break;
8015 default:
8016 goto illegal_op;
8018 /* lock generation */
8019 if (s->prefix & PREFIX_LOCK)
8020 gen_helper_unlock();
8021 return s->pc;
8022 illegal_op:
8023 if (s->prefix & PREFIX_LOCK)
8024 gen_helper_unlock();
8025 /* XXX: ensure that no lock was generated */
8026 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
8027 return s->pc;
8030 void tcg_x86_init(void)
8032 static const char reg_names[CPU_NB_REGS][4] = {
8033 #ifdef TARGET_X86_64
8034 [R_EAX] = "rax",
8035 [R_EBX] = "rbx",
8036 [R_ECX] = "rcx",
8037 [R_EDX] = "rdx",
8038 [R_ESI] = "rsi",
8039 [R_EDI] = "rdi",
8040 [R_EBP] = "rbp",
8041 [R_ESP] = "rsp",
8042 [8] = "r8",
8043 [9] = "r9",
8044 [10] = "r10",
8045 [11] = "r11",
8046 [12] = "r12",
8047 [13] = "r13",
8048 [14] = "r14",
8049 [15] = "r15",
8050 #else
8051 [R_EAX] = "eax",
8052 [R_EBX] = "ebx",
8053 [R_ECX] = "ecx",
8054 [R_EDX] = "edx",
8055 [R_ESI] = "esi",
8056 [R_EDI] = "edi",
8057 [R_EBP] = "ebp",
8058 [R_ESP] = "esp",
8059 #endif
8061 static const char seg_base_names[6][8] = {
8062 [R_CS] = "cs_base",
8063 [R_DS] = "ds_base",
8064 [R_ES] = "es_base",
8065 [R_FS] = "fs_base",
8066 [R_GS] = "gs_base",
8067 [R_SS] = "ss_base",
8069 static const char bnd_regl_names[4][8] = {
8070 "bnd0_lb", "bnd1_lb", "bnd2_lb", "bnd3_lb"
8072 static const char bnd_regu_names[4][8] = {
8073 "bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub"
8075 int i;
8077 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
8078 cpu_cc_op = tcg_global_mem_new_i32(cpu_env,
8079 offsetof(CPUX86State, cc_op), "cc_op");
8080 cpu_cc_dst = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, cc_dst),
8081 "cc_dst");
8082 cpu_cc_src = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, cc_src),
8083 "cc_src");
8084 cpu_cc_src2 = tcg_global_mem_new(cpu_env, offsetof(CPUX86State, cc_src2),
8085 "cc_src2");
8087 for (i = 0; i < CPU_NB_REGS; ++i) {
8088 cpu_regs[i] = tcg_global_mem_new(cpu_env,
8089 offsetof(CPUX86State, regs[i]),
8090 reg_names[i]);
8093 for (i = 0; i < 6; ++i) {
8094 cpu_seg_base[i]
8095 = tcg_global_mem_new(cpu_env,
8096 offsetof(CPUX86State, segs[i].base),
8097 seg_base_names[i]);
8100 for (i = 0; i < 4; ++i) {
8101 cpu_bndl[i]
8102 = tcg_global_mem_new_i64(cpu_env,
8103 offsetof(CPUX86State, bnd_regs[i].lb),
8104 bnd_regl_names[i]);
8105 cpu_bndu[i]
8106 = tcg_global_mem_new_i64(cpu_env,
8107 offsetof(CPUX86State, bnd_regs[i].ub),
8108 bnd_regu_names[i]);
8111 helper_lock_init();
8114 /* generate intermediate code for basic block 'tb'. */
8115 void gen_intermediate_code(CPUX86State *env, TranslationBlock *tb)
8117 X86CPU *cpu = x86_env_get_cpu(env);
8118 CPUState *cs = CPU(cpu);
8119 DisasContext dc1, *dc = &dc1;
8120 target_ulong pc_ptr;
8121 uint64_t flags;
8122 target_ulong pc_start;
8123 target_ulong cs_base;
8124 int num_insns;
8125 int max_insns;
8127 /* generate intermediate code */
8128 pc_start = tb->pc;
8129 cs_base = tb->cs_base;
8130 flags = tb->flags;
8132 dc->pe = (flags >> HF_PE_SHIFT) & 1;
8133 dc->code32 = (flags >> HF_CS32_SHIFT) & 1;
8134 dc->ss32 = (flags >> HF_SS32_SHIFT) & 1;
8135 dc->addseg = (flags >> HF_ADDSEG_SHIFT) & 1;
8136 dc->f_st = 0;
8137 dc->vm86 = (flags >> VM_SHIFT) & 1;
8138 dc->cpl = (flags >> HF_CPL_SHIFT) & 3;
8139 dc->iopl = (flags >> IOPL_SHIFT) & 3;
8140 dc->tf = (flags >> TF_SHIFT) & 1;
8141 dc->singlestep_enabled = cs->singlestep_enabled;
8142 dc->cc_op = CC_OP_DYNAMIC;
8143 dc->cc_op_dirty = false;
8144 dc->cs_base = cs_base;
8145 dc->tb = tb;
8146 dc->popl_esp_hack = 0;
8147 /* select memory access functions */
8148 dc->mem_index = 0;
8149 if (flags & HF_SOFTMMU_MASK) {
8150 dc->mem_index = cpu_mmu_index(env, false);
8152 dc->cpuid_features = env->features[FEAT_1_EDX];
8153 dc->cpuid_ext_features = env->features[FEAT_1_ECX];
8154 dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX];
8155 dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX];
8156 dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX];
8157 dc->cpuid_xsave_features = env->features[FEAT_XSAVE];
8158 #ifdef TARGET_X86_64
8159 dc->lma = (flags >> HF_LMA_SHIFT) & 1;
8160 dc->code64 = (flags >> HF_CS64_SHIFT) & 1;
8161 #endif
8162 dc->flags = flags;
8163 dc->jmp_opt = !(dc->tf || cs->singlestep_enabled ||
8164 (flags & HF_INHIBIT_IRQ_MASK)
8165 #ifndef CONFIG_SOFTMMU
8166 || (flags & HF_SOFTMMU_MASK)
8167 #endif
8169 /* Do not optimize repz jumps at all in icount mode, because
8170 rep movsS instructions are execured with different paths
8171 in !repz_opt and repz_opt modes. The first one was used
8172 always except single step mode. And this setting
8173 disables jumps optimization and control paths become
8174 equivalent in run and single step modes.
8175 Now there will be no jump optimization for repz in
8176 record/replay modes and there will always be an
8177 additional step for ecx=0 when icount is enabled.
8179 dc->repz_opt = !dc->jmp_opt && !(tb->cflags & CF_USE_ICOUNT);
8180 #if 0
8181 /* check addseg logic */
8182 if (!dc->addseg && (dc->vm86 || !dc->pe || !dc->code32))
8183 printf("ERROR addseg\n");
8184 #endif
8186 cpu_T0 = tcg_temp_new();
8187 cpu_T1 = tcg_temp_new();
8188 cpu_A0 = tcg_temp_new();
8190 cpu_tmp0 = tcg_temp_new();
8191 cpu_tmp1_i64 = tcg_temp_new_i64();
8192 cpu_tmp2_i32 = tcg_temp_new_i32();
8193 cpu_tmp3_i32 = tcg_temp_new_i32();
8194 cpu_tmp4 = tcg_temp_new();
8195 cpu_ptr0 = tcg_temp_new_ptr();
8196 cpu_ptr1 = tcg_temp_new_ptr();
8197 cpu_cc_srcT = tcg_temp_local_new();
8199 dc->is_jmp = DISAS_NEXT;
8200 pc_ptr = pc_start;
8201 num_insns = 0;
8202 max_insns = tb->cflags & CF_COUNT_MASK;
8203 if (max_insns == 0) {
8204 max_insns = CF_COUNT_MASK;
8206 if (max_insns > TCG_MAX_INSNS) {
8207 max_insns = TCG_MAX_INSNS;
8210 gen_tb_start(tb);
8211 for(;;) {
8212 tcg_gen_insn_start(pc_ptr, dc->cc_op);
8213 num_insns++;
8215 /* If RF is set, suppress an internally generated breakpoint. */
8216 if (unlikely(cpu_breakpoint_test(cs, pc_ptr,
8217 tb->flags & HF_RF_MASK
8218 ? BP_GDB : BP_ANY))) {
8219 gen_debug(dc, pc_ptr - dc->cs_base);
8220 /* The address covered by the breakpoint must be included in
8221 [tb->pc, tb->pc + tb->size) in order to for it to be
8222 properly cleared -- thus we increment the PC here so that
8223 the logic setting tb->size below does the right thing. */
8224 pc_ptr += 1;
8225 goto done_generating;
8227 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
8228 gen_io_start();
8231 pc_ptr = disas_insn(env, dc, pc_ptr);
8232 /* stop translation if indicated */
8233 if (dc->is_jmp)
8234 break;
8235 /* if single step mode, we generate only one instruction and
8236 generate an exception */
8237 /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
8238 the flag and abort the translation to give the irqs a
8239 change to be happen */
8240 if (dc->tf || dc->singlestep_enabled ||
8241 (flags & HF_INHIBIT_IRQ_MASK)) {
8242 gen_jmp_im(pc_ptr - dc->cs_base);
8243 gen_eob(dc);
8244 break;
8246 /* Do not cross the boundary of the pages in icount mode,
8247 it can cause an exception. Do it only when boundary is
8248 crossed by the first instruction in the block.
8249 If current instruction already crossed the bound - it's ok,
8250 because an exception hasn't stopped this code.
8252 if ((tb->cflags & CF_USE_ICOUNT)
8253 && ((pc_ptr & TARGET_PAGE_MASK)
8254 != ((pc_ptr + TARGET_MAX_INSN_SIZE - 1) & TARGET_PAGE_MASK)
8255 || (pc_ptr & ~TARGET_PAGE_MASK) == 0)) {
8256 gen_jmp_im(pc_ptr - dc->cs_base);
8257 gen_eob(dc);
8258 break;
8260 /* if too long translation, stop generation too */
8261 if (tcg_op_buf_full() ||
8262 (pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32) ||
8263 num_insns >= max_insns) {
8264 gen_jmp_im(pc_ptr - dc->cs_base);
8265 gen_eob(dc);
8266 break;
8268 if (singlestep) {
8269 gen_jmp_im(pc_ptr - dc->cs_base);
8270 gen_eob(dc);
8271 break;
8274 if (tb->cflags & CF_LAST_IO)
8275 gen_io_end();
8276 done_generating:
8277 gen_tb_end(tb, num_insns);
8279 #ifdef DEBUG_DISAS
8280 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
8281 int disas_flags;
8282 qemu_log("----------------\n");
8283 qemu_log("IN: %s\n", lookup_symbol(pc_start));
8284 #ifdef TARGET_X86_64
8285 if (dc->code64)
8286 disas_flags = 2;
8287 else
8288 #endif
8289 disas_flags = !dc->code32;
8290 log_target_disas(cs, pc_start, pc_ptr - pc_start, disas_flags);
8291 qemu_log("\n");
8293 #endif
8295 tb->size = pc_ptr - pc_start;
8296 tb->icount = num_insns;
8299 void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb,
8300 target_ulong *data)
8302 int cc_op = data[1];
8303 env->eip = data[0] - tb->cs_base;
8304 if (cc_op != CC_OP_DYNAMIC) {
8305 env->cc_op = cc_op;