ehci: update irq on reset
[qemu/kevin.git] / target-i386 / translate.c
blob957a92d5910967a02734ed03fb0766632e11df2c
1 /*
2 * i386 translation
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "qemu/host-utils.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "tcg-op.h"
25 #include "exec/cpu_ldst.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
30 #include "trace-tcg.h"
33 #define PREFIX_REPZ 0x01
34 #define PREFIX_REPNZ 0x02
35 #define PREFIX_LOCK 0x04
36 #define PREFIX_DATA 0x08
37 #define PREFIX_ADR 0x10
38 #define PREFIX_VEX 0x20
40 #ifdef TARGET_X86_64
41 #define CODE64(s) ((s)->code64)
42 #define REX_X(s) ((s)->rex_x)
43 #define REX_B(s) ((s)->rex_b)
44 #else
45 #define CODE64(s) 0
46 #define REX_X(s) 0
47 #define REX_B(s) 0
48 #endif
50 #ifdef TARGET_X86_64
51 # define ctztl ctz64
52 # define clztl clz64
53 #else
54 # define ctztl ctz32
55 # define clztl clz32
56 #endif
58 //#define MACRO_TEST 1
60 /* global register indexes */
61 static TCGv_ptr cpu_env;
62 static TCGv cpu_A0;
63 static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2, cpu_cc_srcT;
64 static TCGv_i32 cpu_cc_op;
65 static TCGv cpu_regs[CPU_NB_REGS];
66 /* local temps */
67 static TCGv cpu_T[2];
68 /* local register indexes (only used inside old micro ops) */
69 static TCGv cpu_tmp0, cpu_tmp4;
70 static TCGv_ptr cpu_ptr0, cpu_ptr1;
71 static TCGv_i32 cpu_tmp2_i32, cpu_tmp3_i32;
72 static TCGv_i64 cpu_tmp1_i64;
74 #include "exec/gen-icount.h"
76 #ifdef TARGET_X86_64
77 static int x86_64_hregs;
78 #endif
80 typedef struct DisasContext {
81 /* current insn context */
82 int override; /* -1 if no override */
83 int prefix;
84 TCGMemOp aflag;
85 TCGMemOp dflag;
86 target_ulong pc; /* pc = eip + cs_base */
87 int is_jmp; /* 1 = means jump (stop translation), 2 means CPU
88 static state change (stop translation) */
89 /* current block context */
90 target_ulong cs_base; /* base of CS segment */
91 int pe; /* protected mode */
92 int code32; /* 32 bit code segment */
93 #ifdef TARGET_X86_64
94 int lma; /* long mode active */
95 int code64; /* 64 bit code segment */
96 int rex_x, rex_b;
97 #endif
98 int vex_l; /* vex vector length */
99 int vex_v; /* vex vvvv register, without 1's compliment. */
100 int ss32; /* 32 bit stack segment */
101 CCOp cc_op; /* current CC operation */
102 bool cc_op_dirty;
103 int addseg; /* non zero if either DS/ES/SS have a non zero base */
104 int f_st; /* currently unused */
105 int vm86; /* vm86 mode */
106 int cpl;
107 int iopl;
108 int tf; /* TF cpu flag */
109 int singlestep_enabled; /* "hardware" single step enabled */
110 int jmp_opt; /* use direct block chaining for direct jumps */
111 int repz_opt; /* optimize jumps within repz instructions */
112 int mem_index; /* select memory access functions */
113 uint64_t flags; /* all execution flags */
114 struct TranslationBlock *tb;
115 int popl_esp_hack; /* for correct popl with esp base handling */
116 int rip_offset; /* only used in x86_64, but left for simplicity */
117 int cpuid_features;
118 int cpuid_ext_features;
119 int cpuid_ext2_features;
120 int cpuid_ext3_features;
121 int cpuid_7_0_ebx_features;
122 } DisasContext;
124 static void gen_eob(DisasContext *s);
125 static void gen_jmp(DisasContext *s, target_ulong eip);
126 static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num);
127 static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d);
129 /* i386 arith/logic operations */
130 enum {
131 OP_ADDL,
132 OP_ORL,
133 OP_ADCL,
134 OP_SBBL,
135 OP_ANDL,
136 OP_SUBL,
137 OP_XORL,
138 OP_CMPL,
141 /* i386 shift ops */
142 enum {
143 OP_ROL,
144 OP_ROR,
145 OP_RCL,
146 OP_RCR,
147 OP_SHL,
148 OP_SHR,
149 OP_SHL1, /* undocumented */
150 OP_SAR = 7,
153 enum {
154 JCC_O,
155 JCC_B,
156 JCC_Z,
157 JCC_BE,
158 JCC_S,
159 JCC_P,
160 JCC_L,
161 JCC_LE,
164 enum {
165 /* I386 int registers */
166 OR_EAX, /* MUST be even numbered */
167 OR_ECX,
168 OR_EDX,
169 OR_EBX,
170 OR_ESP,
171 OR_EBP,
172 OR_ESI,
173 OR_EDI,
175 OR_TMP0 = 16, /* temporary operand register */
176 OR_TMP1,
177 OR_A0, /* temporary register used when doing address evaluation */
180 enum {
181 USES_CC_DST = 1,
182 USES_CC_SRC = 2,
183 USES_CC_SRC2 = 4,
184 USES_CC_SRCT = 8,
187 /* Bit set if the global variable is live after setting CC_OP to X. */
188 static const uint8_t cc_op_live[CC_OP_NB] = {
189 [CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
190 [CC_OP_EFLAGS] = USES_CC_SRC,
191 [CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC,
192 [CC_OP_ADDB ... CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC,
193 [CC_OP_ADCB ... CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
194 [CC_OP_SUBB ... CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT,
195 [CC_OP_SBBB ... CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
196 [CC_OP_LOGICB ... CC_OP_LOGICQ] = USES_CC_DST,
197 [CC_OP_INCB ... CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC,
198 [CC_OP_DECB ... CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC,
199 [CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC,
200 [CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC,
201 [CC_OP_BMILGB ... CC_OP_BMILGQ] = USES_CC_DST | USES_CC_SRC,
202 [CC_OP_ADCX] = USES_CC_DST | USES_CC_SRC,
203 [CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2,
204 [CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
205 [CC_OP_CLR] = 0,
208 static void set_cc_op(DisasContext *s, CCOp op)
210 int dead;
212 if (s->cc_op == op) {
213 return;
216 /* Discard CC computation that will no longer be used. */
217 dead = cc_op_live[s->cc_op] & ~cc_op_live[op];
218 if (dead & USES_CC_DST) {
219 tcg_gen_discard_tl(cpu_cc_dst);
221 if (dead & USES_CC_SRC) {
222 tcg_gen_discard_tl(cpu_cc_src);
224 if (dead & USES_CC_SRC2) {
225 tcg_gen_discard_tl(cpu_cc_src2);
227 if (dead & USES_CC_SRCT) {
228 tcg_gen_discard_tl(cpu_cc_srcT);
231 if (op == CC_OP_DYNAMIC) {
232 /* The DYNAMIC setting is translator only, and should never be
233 stored. Thus we always consider it clean. */
234 s->cc_op_dirty = false;
235 } else {
236 /* Discard any computed CC_OP value (see shifts). */
237 if (s->cc_op == CC_OP_DYNAMIC) {
238 tcg_gen_discard_i32(cpu_cc_op);
240 s->cc_op_dirty = true;
242 s->cc_op = op;
245 static void gen_update_cc_op(DisasContext *s)
247 if (s->cc_op_dirty) {
248 tcg_gen_movi_i32(cpu_cc_op, s->cc_op);
249 s->cc_op_dirty = false;
253 #ifdef TARGET_X86_64
255 #define NB_OP_SIZES 4
257 #else /* !TARGET_X86_64 */
259 #define NB_OP_SIZES 3
261 #endif /* !TARGET_X86_64 */
263 #if defined(HOST_WORDS_BIGENDIAN)
264 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
265 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
266 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
267 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
268 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
269 #else
270 #define REG_B_OFFSET 0
271 #define REG_H_OFFSET 1
272 #define REG_W_OFFSET 0
273 #define REG_L_OFFSET 0
274 #define REG_LH_OFFSET 4
275 #endif
277 /* In instruction encodings for byte register accesses the
278 * register number usually indicates "low 8 bits of register N";
279 * however there are some special cases where N 4..7 indicates
280 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
281 * true for this special case, false otherwise.
283 static inline bool byte_reg_is_xH(int reg)
285 if (reg < 4) {
286 return false;
288 #ifdef TARGET_X86_64
289 if (reg >= 8 || x86_64_hregs) {
290 return false;
292 #endif
293 return true;
296 /* Select the size of a push/pop operation. */
297 static inline TCGMemOp mo_pushpop(DisasContext *s, TCGMemOp ot)
299 if (CODE64(s)) {
300 return ot == MO_16 ? MO_16 : MO_64;
301 } else {
302 return ot;
306 /* Select only size 64 else 32. Used for SSE operand sizes. */
307 static inline TCGMemOp mo_64_32(TCGMemOp ot)
309 #ifdef TARGET_X86_64
310 return ot == MO_64 ? MO_64 : MO_32;
311 #else
312 return MO_32;
313 #endif
316 /* Select size 8 if lsb of B is clear, else OT. Used for decoding
317 byte vs word opcodes. */
318 static inline TCGMemOp mo_b_d(int b, TCGMemOp ot)
320 return b & 1 ? ot : MO_8;
323 /* Select size 8 if lsb of B is clear, else OT capped at 32.
324 Used for decoding operand size of port opcodes. */
325 static inline TCGMemOp mo_b_d32(int b, TCGMemOp ot)
327 return b & 1 ? (ot == MO_16 ? MO_16 : MO_32) : MO_8;
330 static void gen_op_mov_reg_v(TCGMemOp ot, int reg, TCGv t0)
332 switch(ot) {
333 case MO_8:
334 if (!byte_reg_is_xH(reg)) {
335 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 8);
336 } else {
337 tcg_gen_deposit_tl(cpu_regs[reg - 4], cpu_regs[reg - 4], t0, 8, 8);
339 break;
340 case MO_16:
341 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 16);
342 break;
343 case MO_32:
344 /* For x86_64, this sets the higher half of register to zero.
345 For i386, this is equivalent to a mov. */
346 tcg_gen_ext32u_tl(cpu_regs[reg], t0);
347 break;
348 #ifdef TARGET_X86_64
349 case MO_64:
350 tcg_gen_mov_tl(cpu_regs[reg], t0);
351 break;
352 #endif
353 default:
354 tcg_abort();
358 static inline void gen_op_mov_v_reg(TCGMemOp ot, TCGv t0, int reg)
360 if (ot == MO_8 && byte_reg_is_xH(reg)) {
361 tcg_gen_shri_tl(t0, cpu_regs[reg - 4], 8);
362 tcg_gen_ext8u_tl(t0, t0);
363 } else {
364 tcg_gen_mov_tl(t0, cpu_regs[reg]);
368 static inline void gen_op_movl_A0_reg(int reg)
370 tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
373 static inline void gen_op_addl_A0_im(int32_t val)
375 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
376 #ifdef TARGET_X86_64
377 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
378 #endif
381 #ifdef TARGET_X86_64
382 static inline void gen_op_addq_A0_im(int64_t val)
384 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
386 #endif
388 static void gen_add_A0_im(DisasContext *s, int val)
390 #ifdef TARGET_X86_64
391 if (CODE64(s))
392 gen_op_addq_A0_im(val);
393 else
394 #endif
395 gen_op_addl_A0_im(val);
398 static inline void gen_op_jmp_v(TCGv dest)
400 tcg_gen_st_tl(dest, cpu_env, offsetof(CPUX86State, eip));
403 static inline void gen_op_add_reg_im(TCGMemOp size, int reg, int32_t val)
405 tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val);
406 gen_op_mov_reg_v(size, reg, cpu_tmp0);
409 static inline void gen_op_add_reg_T0(TCGMemOp size, int reg)
411 tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T[0]);
412 gen_op_mov_reg_v(size, reg, cpu_tmp0);
415 static inline void gen_op_addl_A0_reg_sN(int shift, int reg)
417 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
418 if (shift != 0)
419 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
420 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
421 /* For x86_64, this sets the higher half of register to zero.
422 For i386, this is equivalent to a nop. */
423 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
426 static inline void gen_op_movl_A0_seg(int reg)
428 tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base) + REG_L_OFFSET);
431 static inline void gen_op_addl_A0_seg(DisasContext *s, int reg)
433 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base));
434 #ifdef TARGET_X86_64
435 if (CODE64(s)) {
436 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
437 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
438 } else {
439 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
440 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
442 #else
443 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
444 #endif
447 #ifdef TARGET_X86_64
448 static inline void gen_op_movq_A0_seg(int reg)
450 tcg_gen_ld_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base));
453 static inline void gen_op_addq_A0_seg(int reg)
455 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base));
456 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
459 static inline void gen_op_movq_A0_reg(int reg)
461 tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
464 static inline void gen_op_addq_A0_reg_sN(int shift, int reg)
466 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
467 if (shift != 0)
468 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
469 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
471 #endif
473 static inline void gen_op_ld_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
475 tcg_gen_qemu_ld_tl(t0, a0, s->mem_index, idx | MO_LE);
478 static inline void gen_op_st_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
480 tcg_gen_qemu_st_tl(t0, a0, s->mem_index, idx | MO_LE);
483 static inline void gen_op_st_rm_T0_A0(DisasContext *s, int idx, int d)
485 if (d == OR_TMP0) {
486 gen_op_st_v(s, idx, cpu_T[0], cpu_A0);
487 } else {
488 gen_op_mov_reg_v(idx, d, cpu_T[0]);
492 static inline void gen_jmp_im(target_ulong pc)
494 tcg_gen_movi_tl(cpu_tmp0, pc);
495 gen_op_jmp_v(cpu_tmp0);
498 static inline void gen_string_movl_A0_ESI(DisasContext *s)
500 int override;
502 override = s->override;
503 switch (s->aflag) {
504 #ifdef TARGET_X86_64
505 case MO_64:
506 if (override >= 0) {
507 gen_op_movq_A0_seg(override);
508 gen_op_addq_A0_reg_sN(0, R_ESI);
509 } else {
510 gen_op_movq_A0_reg(R_ESI);
512 break;
513 #endif
514 case MO_32:
515 /* 32 bit address */
516 if (s->addseg && override < 0)
517 override = R_DS;
518 if (override >= 0) {
519 gen_op_movl_A0_seg(override);
520 gen_op_addl_A0_reg_sN(0, R_ESI);
521 } else {
522 gen_op_movl_A0_reg(R_ESI);
524 break;
525 case MO_16:
526 /* 16 address, always override */
527 if (override < 0)
528 override = R_DS;
529 tcg_gen_ext16u_tl(cpu_A0, cpu_regs[R_ESI]);
530 gen_op_addl_A0_seg(s, override);
531 break;
532 default:
533 tcg_abort();
537 static inline void gen_string_movl_A0_EDI(DisasContext *s)
539 switch (s->aflag) {
540 #ifdef TARGET_X86_64
541 case MO_64:
542 gen_op_movq_A0_reg(R_EDI);
543 break;
544 #endif
545 case MO_32:
546 if (s->addseg) {
547 gen_op_movl_A0_seg(R_ES);
548 gen_op_addl_A0_reg_sN(0, R_EDI);
549 } else {
550 gen_op_movl_A0_reg(R_EDI);
552 break;
553 case MO_16:
554 tcg_gen_ext16u_tl(cpu_A0, cpu_regs[R_EDI]);
555 gen_op_addl_A0_seg(s, R_ES);
556 break;
557 default:
558 tcg_abort();
562 static inline void gen_op_movl_T0_Dshift(TCGMemOp ot)
564 tcg_gen_ld32s_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, df));
565 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], ot);
568 static TCGv gen_ext_tl(TCGv dst, TCGv src, TCGMemOp size, bool sign)
570 switch (size) {
571 case MO_8:
572 if (sign) {
573 tcg_gen_ext8s_tl(dst, src);
574 } else {
575 tcg_gen_ext8u_tl(dst, src);
577 return dst;
578 case MO_16:
579 if (sign) {
580 tcg_gen_ext16s_tl(dst, src);
581 } else {
582 tcg_gen_ext16u_tl(dst, src);
584 return dst;
585 #ifdef TARGET_X86_64
586 case MO_32:
587 if (sign) {
588 tcg_gen_ext32s_tl(dst, src);
589 } else {
590 tcg_gen_ext32u_tl(dst, src);
592 return dst;
593 #endif
594 default:
595 return src;
599 static void gen_extu(TCGMemOp ot, TCGv reg)
601 gen_ext_tl(reg, reg, ot, false);
604 static void gen_exts(TCGMemOp ot, TCGv reg)
606 gen_ext_tl(reg, reg, ot, true);
609 static inline void gen_op_jnz_ecx(TCGMemOp size, TCGLabel *label1)
611 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
612 gen_extu(size, cpu_tmp0);
613 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, label1);
616 static inline void gen_op_jz_ecx(TCGMemOp size, TCGLabel *label1)
618 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
619 gen_extu(size, cpu_tmp0);
620 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
623 static void gen_helper_in_func(TCGMemOp ot, TCGv v, TCGv_i32 n)
625 switch (ot) {
626 case MO_8:
627 gen_helper_inb(v, cpu_env, n);
628 break;
629 case MO_16:
630 gen_helper_inw(v, cpu_env, n);
631 break;
632 case MO_32:
633 gen_helper_inl(v, cpu_env, n);
634 break;
635 default:
636 tcg_abort();
640 static void gen_helper_out_func(TCGMemOp ot, TCGv_i32 v, TCGv_i32 n)
642 switch (ot) {
643 case MO_8:
644 gen_helper_outb(cpu_env, v, n);
645 break;
646 case MO_16:
647 gen_helper_outw(cpu_env, v, n);
648 break;
649 case MO_32:
650 gen_helper_outl(cpu_env, v, n);
651 break;
652 default:
653 tcg_abort();
657 static void gen_check_io(DisasContext *s, TCGMemOp ot, target_ulong cur_eip,
658 uint32_t svm_flags)
660 target_ulong next_eip;
662 if (s->pe && (s->cpl > s->iopl || s->vm86)) {
663 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
664 switch (ot) {
665 case MO_8:
666 gen_helper_check_iob(cpu_env, cpu_tmp2_i32);
667 break;
668 case MO_16:
669 gen_helper_check_iow(cpu_env, cpu_tmp2_i32);
670 break;
671 case MO_32:
672 gen_helper_check_iol(cpu_env, cpu_tmp2_i32);
673 break;
674 default:
675 tcg_abort();
678 if(s->flags & HF_SVMI_MASK) {
679 gen_update_cc_op(s);
680 gen_jmp_im(cur_eip);
681 svm_flags |= (1 << (4 + ot));
682 next_eip = s->pc - s->cs_base;
683 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
684 gen_helper_svm_check_io(cpu_env, cpu_tmp2_i32,
685 tcg_const_i32(svm_flags),
686 tcg_const_i32(next_eip - cur_eip));
690 static inline void gen_movs(DisasContext *s, TCGMemOp ot)
692 gen_string_movl_A0_ESI(s);
693 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
694 gen_string_movl_A0_EDI(s);
695 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
696 gen_op_movl_T0_Dshift(ot);
697 gen_op_add_reg_T0(s->aflag, R_ESI);
698 gen_op_add_reg_T0(s->aflag, R_EDI);
701 static void gen_op_update1_cc(void)
703 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
706 static void gen_op_update2_cc(void)
708 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
709 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
712 static void gen_op_update3_cc(TCGv reg)
714 tcg_gen_mov_tl(cpu_cc_src2, reg);
715 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
716 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
719 static inline void gen_op_testl_T0_T1_cc(void)
721 tcg_gen_and_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
724 static void gen_op_update_neg_cc(void)
726 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
727 tcg_gen_neg_tl(cpu_cc_src, cpu_T[0]);
728 tcg_gen_movi_tl(cpu_cc_srcT, 0);
731 /* compute all eflags to cc_src */
732 static void gen_compute_eflags(DisasContext *s)
734 TCGv zero, dst, src1, src2;
735 int live, dead;
737 if (s->cc_op == CC_OP_EFLAGS) {
738 return;
740 if (s->cc_op == CC_OP_CLR) {
741 tcg_gen_movi_tl(cpu_cc_src, CC_Z | CC_P);
742 set_cc_op(s, CC_OP_EFLAGS);
743 return;
746 TCGV_UNUSED(zero);
747 dst = cpu_cc_dst;
748 src1 = cpu_cc_src;
749 src2 = cpu_cc_src2;
751 /* Take care to not read values that are not live. */
752 live = cc_op_live[s->cc_op] & ~USES_CC_SRCT;
753 dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2);
754 if (dead) {
755 zero = tcg_const_tl(0);
756 if (dead & USES_CC_DST) {
757 dst = zero;
759 if (dead & USES_CC_SRC) {
760 src1 = zero;
762 if (dead & USES_CC_SRC2) {
763 src2 = zero;
767 gen_update_cc_op(s);
768 gen_helper_cc_compute_all(cpu_cc_src, dst, src1, src2, cpu_cc_op);
769 set_cc_op(s, CC_OP_EFLAGS);
771 if (dead) {
772 tcg_temp_free(zero);
776 typedef struct CCPrepare {
777 TCGCond cond;
778 TCGv reg;
779 TCGv reg2;
780 target_ulong imm;
781 target_ulong mask;
782 bool use_reg2;
783 bool no_setcond;
784 } CCPrepare;
786 /* compute eflags.C to reg */
787 static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
789 TCGv t0, t1;
790 int size, shift;
792 switch (s->cc_op) {
793 case CC_OP_SUBB ... CC_OP_SUBQ:
794 /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
795 size = s->cc_op - CC_OP_SUBB;
796 t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
797 /* If no temporary was used, be careful not to alias t1 and t0. */
798 t0 = TCGV_EQUAL(t1, cpu_cc_src) ? cpu_tmp0 : reg;
799 tcg_gen_mov_tl(t0, cpu_cc_srcT);
800 gen_extu(size, t0);
801 goto add_sub;
803 case CC_OP_ADDB ... CC_OP_ADDQ:
804 /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
805 size = s->cc_op - CC_OP_ADDB;
806 t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
807 t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
808 add_sub:
809 return (CCPrepare) { .cond = TCG_COND_LTU, .reg = t0,
810 .reg2 = t1, .mask = -1, .use_reg2 = true };
812 case CC_OP_LOGICB ... CC_OP_LOGICQ:
813 case CC_OP_CLR:
814 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
816 case CC_OP_INCB ... CC_OP_INCQ:
817 case CC_OP_DECB ... CC_OP_DECQ:
818 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
819 .mask = -1, .no_setcond = true };
821 case CC_OP_SHLB ... CC_OP_SHLQ:
822 /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
823 size = s->cc_op - CC_OP_SHLB;
824 shift = (8 << size) - 1;
825 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
826 .mask = (target_ulong)1 << shift };
828 case CC_OP_MULB ... CC_OP_MULQ:
829 return (CCPrepare) { .cond = TCG_COND_NE,
830 .reg = cpu_cc_src, .mask = -1 };
832 case CC_OP_BMILGB ... CC_OP_BMILGQ:
833 size = s->cc_op - CC_OP_BMILGB;
834 t0 = gen_ext_tl(reg, cpu_cc_src, size, false);
835 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
837 case CC_OP_ADCX:
838 case CC_OP_ADCOX:
839 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_dst,
840 .mask = -1, .no_setcond = true };
842 case CC_OP_EFLAGS:
843 case CC_OP_SARB ... CC_OP_SARQ:
844 /* CC_SRC & 1 */
845 return (CCPrepare) { .cond = TCG_COND_NE,
846 .reg = cpu_cc_src, .mask = CC_C };
848 default:
849 /* The need to compute only C from CC_OP_DYNAMIC is important
850 in efficiently implementing e.g. INC at the start of a TB. */
851 gen_update_cc_op(s);
852 gen_helper_cc_compute_c(reg, cpu_cc_dst, cpu_cc_src,
853 cpu_cc_src2, cpu_cc_op);
854 return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
855 .mask = -1, .no_setcond = true };
859 /* compute eflags.P to reg */
860 static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg)
862 gen_compute_eflags(s);
863 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
864 .mask = CC_P };
867 /* compute eflags.S to reg */
868 static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg)
870 switch (s->cc_op) {
871 case CC_OP_DYNAMIC:
872 gen_compute_eflags(s);
873 /* FALLTHRU */
874 case CC_OP_EFLAGS:
875 case CC_OP_ADCX:
876 case CC_OP_ADOX:
877 case CC_OP_ADCOX:
878 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
879 .mask = CC_S };
880 case CC_OP_CLR:
881 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
882 default:
884 TCGMemOp size = (s->cc_op - CC_OP_ADDB) & 3;
885 TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, true);
886 return (CCPrepare) { .cond = TCG_COND_LT, .reg = t0, .mask = -1 };
891 /* compute eflags.O to reg */
892 static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
894 switch (s->cc_op) {
895 case CC_OP_ADOX:
896 case CC_OP_ADCOX:
897 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src2,
898 .mask = -1, .no_setcond = true };
899 case CC_OP_CLR:
900 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
901 default:
902 gen_compute_eflags(s);
903 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
904 .mask = CC_O };
908 /* compute eflags.Z to reg */
909 static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
911 switch (s->cc_op) {
912 case CC_OP_DYNAMIC:
913 gen_compute_eflags(s);
914 /* FALLTHRU */
915 case CC_OP_EFLAGS:
916 case CC_OP_ADCX:
917 case CC_OP_ADOX:
918 case CC_OP_ADCOX:
919 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
920 .mask = CC_Z };
921 case CC_OP_CLR:
922 return (CCPrepare) { .cond = TCG_COND_ALWAYS, .mask = -1 };
923 default:
925 TCGMemOp size = (s->cc_op - CC_OP_ADDB) & 3;
926 TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
927 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
932 /* perform a conditional store into register 'reg' according to jump opcode
933 value 'b'. In the fast case, T0 is guaranted not to be used. */
934 static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
936 int inv, jcc_op, cond;
937 TCGMemOp size;
938 CCPrepare cc;
939 TCGv t0;
941 inv = b & 1;
942 jcc_op = (b >> 1) & 7;
944 switch (s->cc_op) {
945 case CC_OP_SUBB ... CC_OP_SUBQ:
946 /* We optimize relational operators for the cmp/jcc case. */
947 size = s->cc_op - CC_OP_SUBB;
948 switch (jcc_op) {
949 case JCC_BE:
950 tcg_gen_mov_tl(cpu_tmp4, cpu_cc_srcT);
951 gen_extu(size, cpu_tmp4);
952 t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
953 cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = cpu_tmp4,
954 .reg2 = t0, .mask = -1, .use_reg2 = true };
955 break;
957 case JCC_L:
958 cond = TCG_COND_LT;
959 goto fast_jcc_l;
960 case JCC_LE:
961 cond = TCG_COND_LE;
962 fast_jcc_l:
963 tcg_gen_mov_tl(cpu_tmp4, cpu_cc_srcT);
964 gen_exts(size, cpu_tmp4);
965 t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, true);
966 cc = (CCPrepare) { .cond = cond, .reg = cpu_tmp4,
967 .reg2 = t0, .mask = -1, .use_reg2 = true };
968 break;
970 default:
971 goto slow_jcc;
973 break;
975 default:
976 slow_jcc:
977 /* This actually generates good code for JC, JZ and JS. */
978 switch (jcc_op) {
979 case JCC_O:
980 cc = gen_prepare_eflags_o(s, reg);
981 break;
982 case JCC_B:
983 cc = gen_prepare_eflags_c(s, reg);
984 break;
985 case JCC_Z:
986 cc = gen_prepare_eflags_z(s, reg);
987 break;
988 case JCC_BE:
989 gen_compute_eflags(s);
990 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
991 .mask = CC_Z | CC_C };
992 break;
993 case JCC_S:
994 cc = gen_prepare_eflags_s(s, reg);
995 break;
996 case JCC_P:
997 cc = gen_prepare_eflags_p(s, reg);
998 break;
999 case JCC_L:
1000 gen_compute_eflags(s);
1001 if (TCGV_EQUAL(reg, cpu_cc_src)) {
1002 reg = cpu_tmp0;
1004 tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
1005 tcg_gen_xor_tl(reg, reg, cpu_cc_src);
1006 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1007 .mask = CC_S };
1008 break;
1009 default:
1010 case JCC_LE:
1011 gen_compute_eflags(s);
1012 if (TCGV_EQUAL(reg, cpu_cc_src)) {
1013 reg = cpu_tmp0;
1015 tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
1016 tcg_gen_xor_tl(reg, reg, cpu_cc_src);
1017 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1018 .mask = CC_S | CC_Z };
1019 break;
1021 break;
1024 if (inv) {
1025 cc.cond = tcg_invert_cond(cc.cond);
1027 return cc;
1030 static void gen_setcc1(DisasContext *s, int b, TCGv reg)
1032 CCPrepare cc = gen_prepare_cc(s, b, reg);
1034 if (cc.no_setcond) {
1035 if (cc.cond == TCG_COND_EQ) {
1036 tcg_gen_xori_tl(reg, cc.reg, 1);
1037 } else {
1038 tcg_gen_mov_tl(reg, cc.reg);
1040 return;
1043 if (cc.cond == TCG_COND_NE && !cc.use_reg2 && cc.imm == 0 &&
1044 cc.mask != 0 && (cc.mask & (cc.mask - 1)) == 0) {
1045 tcg_gen_shri_tl(reg, cc.reg, ctztl(cc.mask));
1046 tcg_gen_andi_tl(reg, reg, 1);
1047 return;
1049 if (cc.mask != -1) {
1050 tcg_gen_andi_tl(reg, cc.reg, cc.mask);
1051 cc.reg = reg;
1053 if (cc.use_reg2) {
1054 tcg_gen_setcond_tl(cc.cond, reg, cc.reg, cc.reg2);
1055 } else {
1056 tcg_gen_setcondi_tl(cc.cond, reg, cc.reg, cc.imm);
1060 static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg)
1062 gen_setcc1(s, JCC_B << 1, reg);
1065 /* generate a conditional jump to label 'l1' according to jump opcode
1066 value 'b'. In the fast case, T0 is guaranted not to be used. */
1067 static inline void gen_jcc1_noeob(DisasContext *s, int b, TCGLabel *l1)
1069 CCPrepare cc = gen_prepare_cc(s, b, cpu_T[0]);
1071 if (cc.mask != -1) {
1072 tcg_gen_andi_tl(cpu_T[0], cc.reg, cc.mask);
1073 cc.reg = cpu_T[0];
1075 if (cc.use_reg2) {
1076 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1077 } else {
1078 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1082 /* Generate a conditional jump to label 'l1' according to jump opcode
1083 value 'b'. In the fast case, T0 is guaranted not to be used.
1084 A translation block must end soon. */
1085 static inline void gen_jcc1(DisasContext *s, int b, TCGLabel *l1)
1087 CCPrepare cc = gen_prepare_cc(s, b, cpu_T[0]);
1089 gen_update_cc_op(s);
1090 if (cc.mask != -1) {
1091 tcg_gen_andi_tl(cpu_T[0], cc.reg, cc.mask);
1092 cc.reg = cpu_T[0];
1094 set_cc_op(s, CC_OP_DYNAMIC);
1095 if (cc.use_reg2) {
1096 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1097 } else {
1098 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1102 /* XXX: does not work with gdbstub "ice" single step - not a
1103 serious problem */
1104 static TCGLabel *gen_jz_ecx_string(DisasContext *s, target_ulong next_eip)
1106 TCGLabel *l1 = gen_new_label();
1107 TCGLabel *l2 = gen_new_label();
1108 gen_op_jnz_ecx(s->aflag, l1);
1109 gen_set_label(l2);
1110 gen_jmp_tb(s, next_eip, 1);
1111 gen_set_label(l1);
1112 return l2;
1115 static inline void gen_stos(DisasContext *s, TCGMemOp ot)
1117 gen_op_mov_v_reg(MO_32, cpu_T[0], R_EAX);
1118 gen_string_movl_A0_EDI(s);
1119 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
1120 gen_op_movl_T0_Dshift(ot);
1121 gen_op_add_reg_T0(s->aflag, R_EDI);
1124 static inline void gen_lods(DisasContext *s, TCGMemOp ot)
1126 gen_string_movl_A0_ESI(s);
1127 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1128 gen_op_mov_reg_v(ot, R_EAX, cpu_T[0]);
1129 gen_op_movl_T0_Dshift(ot);
1130 gen_op_add_reg_T0(s->aflag, R_ESI);
1133 static inline void gen_scas(DisasContext *s, TCGMemOp ot)
1135 gen_string_movl_A0_EDI(s);
1136 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
1137 gen_op(s, OP_CMPL, ot, R_EAX);
1138 gen_op_movl_T0_Dshift(ot);
1139 gen_op_add_reg_T0(s->aflag, R_EDI);
1142 static inline void gen_cmps(DisasContext *s, TCGMemOp ot)
1144 gen_string_movl_A0_EDI(s);
1145 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
1146 gen_string_movl_A0_ESI(s);
1147 gen_op(s, OP_CMPL, ot, OR_TMP0);
1148 gen_op_movl_T0_Dshift(ot);
1149 gen_op_add_reg_T0(s->aflag, R_ESI);
1150 gen_op_add_reg_T0(s->aflag, R_EDI);
1153 static void gen_bpt_io(DisasContext *s, TCGv_i32 t_port, int ot)
1155 if (s->flags & HF_IOBPT_MASK) {
1156 TCGv_i32 t_size = tcg_const_i32(1 << ot);
1157 TCGv t_next = tcg_const_tl(s->pc - s->cs_base);
1159 gen_helper_bpt_io(cpu_env, t_port, t_size, t_next);
1160 tcg_temp_free_i32(t_size);
1161 tcg_temp_free(t_next);
1166 static inline void gen_ins(DisasContext *s, TCGMemOp ot)
1168 if (s->tb->cflags & CF_USE_ICOUNT) {
1169 gen_io_start();
1171 gen_string_movl_A0_EDI(s);
1172 /* Note: we must do this dummy write first to be restartable in
1173 case of page fault. */
1174 tcg_gen_movi_tl(cpu_T[0], 0);
1175 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
1176 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_EDX]);
1177 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1178 gen_helper_in_func(ot, cpu_T[0], cpu_tmp2_i32);
1179 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
1180 gen_op_movl_T0_Dshift(ot);
1181 gen_op_add_reg_T0(s->aflag, R_EDI);
1182 gen_bpt_io(s, cpu_tmp2_i32, ot);
1183 if (s->tb->cflags & CF_USE_ICOUNT) {
1184 gen_io_end();
1188 static inline void gen_outs(DisasContext *s, TCGMemOp ot)
1190 if (s->tb->cflags & CF_USE_ICOUNT) {
1191 gen_io_start();
1193 gen_string_movl_A0_ESI(s);
1194 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1196 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_EDX]);
1197 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1198 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[0]);
1199 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
1200 gen_op_movl_T0_Dshift(ot);
1201 gen_op_add_reg_T0(s->aflag, R_ESI);
1202 gen_bpt_io(s, cpu_tmp2_i32, ot);
1203 if (s->tb->cflags & CF_USE_ICOUNT) {
1204 gen_io_end();
1208 /* same method as Valgrind : we generate jumps to current or next
1209 instruction */
1210 #define GEN_REPZ(op) \
1211 static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \
1212 target_ulong cur_eip, target_ulong next_eip) \
1214 TCGLabel *l2; \
1215 gen_update_cc_op(s); \
1216 l2 = gen_jz_ecx_string(s, next_eip); \
1217 gen_ ## op(s, ot); \
1218 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1219 /* a loop would cause two single step exceptions if ECX = 1 \
1220 before rep string_insn */ \
1221 if (s->repz_opt) \
1222 gen_op_jz_ecx(s->aflag, l2); \
1223 gen_jmp(s, cur_eip); \
1226 #define GEN_REPZ2(op) \
1227 static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \
1228 target_ulong cur_eip, \
1229 target_ulong next_eip, \
1230 int nz) \
1232 TCGLabel *l2; \
1233 gen_update_cc_op(s); \
1234 l2 = gen_jz_ecx_string(s, next_eip); \
1235 gen_ ## op(s, ot); \
1236 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1237 gen_update_cc_op(s); \
1238 gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2); \
1239 if (s->repz_opt) \
1240 gen_op_jz_ecx(s->aflag, l2); \
1241 gen_jmp(s, cur_eip); \
1244 GEN_REPZ(movs)
1245 GEN_REPZ(stos)
1246 GEN_REPZ(lods)
1247 GEN_REPZ(ins)
1248 GEN_REPZ(outs)
1249 GEN_REPZ2(scas)
1250 GEN_REPZ2(cmps)
1252 static void gen_helper_fp_arith_ST0_FT0(int op)
1254 switch (op) {
1255 case 0:
1256 gen_helper_fadd_ST0_FT0(cpu_env);
1257 break;
1258 case 1:
1259 gen_helper_fmul_ST0_FT0(cpu_env);
1260 break;
1261 case 2:
1262 gen_helper_fcom_ST0_FT0(cpu_env);
1263 break;
1264 case 3:
1265 gen_helper_fcom_ST0_FT0(cpu_env);
1266 break;
1267 case 4:
1268 gen_helper_fsub_ST0_FT0(cpu_env);
1269 break;
1270 case 5:
1271 gen_helper_fsubr_ST0_FT0(cpu_env);
1272 break;
1273 case 6:
1274 gen_helper_fdiv_ST0_FT0(cpu_env);
1275 break;
1276 case 7:
1277 gen_helper_fdivr_ST0_FT0(cpu_env);
1278 break;
1282 /* NOTE the exception in "r" op ordering */
1283 static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1285 TCGv_i32 tmp = tcg_const_i32(opreg);
1286 switch (op) {
1287 case 0:
1288 gen_helper_fadd_STN_ST0(cpu_env, tmp);
1289 break;
1290 case 1:
1291 gen_helper_fmul_STN_ST0(cpu_env, tmp);
1292 break;
1293 case 4:
1294 gen_helper_fsubr_STN_ST0(cpu_env, tmp);
1295 break;
1296 case 5:
1297 gen_helper_fsub_STN_ST0(cpu_env, tmp);
1298 break;
1299 case 6:
1300 gen_helper_fdivr_STN_ST0(cpu_env, tmp);
1301 break;
1302 case 7:
1303 gen_helper_fdiv_STN_ST0(cpu_env, tmp);
1304 break;
1308 /* if d == OR_TMP0, it means memory operand (address in A0) */
1309 static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d)
1311 if (d != OR_TMP0) {
1312 gen_op_mov_v_reg(ot, cpu_T[0], d);
1313 } else {
1314 gen_op_ld_v(s1, ot, cpu_T[0], cpu_A0);
1316 switch(op) {
1317 case OP_ADCL:
1318 gen_compute_eflags_c(s1, cpu_tmp4);
1319 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1320 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1321 gen_op_st_rm_T0_A0(s1, ot, d);
1322 gen_op_update3_cc(cpu_tmp4);
1323 set_cc_op(s1, CC_OP_ADCB + ot);
1324 break;
1325 case OP_SBBL:
1326 gen_compute_eflags_c(s1, cpu_tmp4);
1327 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1328 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1329 gen_op_st_rm_T0_A0(s1, ot, d);
1330 gen_op_update3_cc(cpu_tmp4);
1331 set_cc_op(s1, CC_OP_SBBB + ot);
1332 break;
1333 case OP_ADDL:
1334 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1335 gen_op_st_rm_T0_A0(s1, ot, d);
1336 gen_op_update2_cc();
1337 set_cc_op(s1, CC_OP_ADDB + ot);
1338 break;
1339 case OP_SUBL:
1340 tcg_gen_mov_tl(cpu_cc_srcT, cpu_T[0]);
1341 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1342 gen_op_st_rm_T0_A0(s1, ot, d);
1343 gen_op_update2_cc();
1344 set_cc_op(s1, CC_OP_SUBB + ot);
1345 break;
1346 default:
1347 case OP_ANDL:
1348 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1349 gen_op_st_rm_T0_A0(s1, ot, d);
1350 gen_op_update1_cc();
1351 set_cc_op(s1, CC_OP_LOGICB + ot);
1352 break;
1353 case OP_ORL:
1354 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1355 gen_op_st_rm_T0_A0(s1, ot, d);
1356 gen_op_update1_cc();
1357 set_cc_op(s1, CC_OP_LOGICB + ot);
1358 break;
1359 case OP_XORL:
1360 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1361 gen_op_st_rm_T0_A0(s1, ot, d);
1362 gen_op_update1_cc();
1363 set_cc_op(s1, CC_OP_LOGICB + ot);
1364 break;
1365 case OP_CMPL:
1366 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
1367 tcg_gen_mov_tl(cpu_cc_srcT, cpu_T[0]);
1368 tcg_gen_sub_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
1369 set_cc_op(s1, CC_OP_SUBB + ot);
1370 break;
1374 /* if d == OR_TMP0, it means memory operand (address in A0) */
1375 static void gen_inc(DisasContext *s1, TCGMemOp ot, int d, int c)
1377 if (d != OR_TMP0) {
1378 gen_op_mov_v_reg(ot, cpu_T[0], d);
1379 } else {
1380 gen_op_ld_v(s1, ot, cpu_T[0], cpu_A0);
1382 gen_compute_eflags_c(s1, cpu_cc_src);
1383 if (c > 0) {
1384 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], 1);
1385 set_cc_op(s1, CC_OP_INCB + ot);
1386 } else {
1387 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], -1);
1388 set_cc_op(s1, CC_OP_DECB + ot);
1390 gen_op_st_rm_T0_A0(s1, ot, d);
1391 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1394 static void gen_shift_flags(DisasContext *s, TCGMemOp ot, TCGv result,
1395 TCGv shm1, TCGv count, bool is_right)
1397 TCGv_i32 z32, s32, oldop;
1398 TCGv z_tl;
1400 /* Store the results into the CC variables. If we know that the
1401 variable must be dead, store unconditionally. Otherwise we'll
1402 need to not disrupt the current contents. */
1403 z_tl = tcg_const_tl(0);
1404 if (cc_op_live[s->cc_op] & USES_CC_DST) {
1405 tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_dst, count, z_tl,
1406 result, cpu_cc_dst);
1407 } else {
1408 tcg_gen_mov_tl(cpu_cc_dst, result);
1410 if (cc_op_live[s->cc_op] & USES_CC_SRC) {
1411 tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_src, count, z_tl,
1412 shm1, cpu_cc_src);
1413 } else {
1414 tcg_gen_mov_tl(cpu_cc_src, shm1);
1416 tcg_temp_free(z_tl);
1418 /* Get the two potential CC_OP values into temporaries. */
1419 tcg_gen_movi_i32(cpu_tmp2_i32, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
1420 if (s->cc_op == CC_OP_DYNAMIC) {
1421 oldop = cpu_cc_op;
1422 } else {
1423 tcg_gen_movi_i32(cpu_tmp3_i32, s->cc_op);
1424 oldop = cpu_tmp3_i32;
1427 /* Conditionally store the CC_OP value. */
1428 z32 = tcg_const_i32(0);
1429 s32 = tcg_temp_new_i32();
1430 tcg_gen_trunc_tl_i32(s32, count);
1431 tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, s32, z32, cpu_tmp2_i32, oldop);
1432 tcg_temp_free_i32(z32);
1433 tcg_temp_free_i32(s32);
1435 /* The CC_OP value is no longer predictable. */
1436 set_cc_op(s, CC_OP_DYNAMIC);
1439 static void gen_shift_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
1440 int is_right, int is_arith)
1442 target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
1444 /* load */
1445 if (op1 == OR_TMP0) {
1446 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1447 } else {
1448 gen_op_mov_v_reg(ot, cpu_T[0], op1);
1451 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], mask);
1452 tcg_gen_subi_tl(cpu_tmp0, cpu_T[1], 1);
1454 if (is_right) {
1455 if (is_arith) {
1456 gen_exts(ot, cpu_T[0]);
1457 tcg_gen_sar_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1458 tcg_gen_sar_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1459 } else {
1460 gen_extu(ot, cpu_T[0]);
1461 tcg_gen_shr_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1462 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1464 } else {
1465 tcg_gen_shl_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1466 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1469 /* store */
1470 gen_op_st_rm_T0_A0(s, ot, op1);
1472 gen_shift_flags(s, ot, cpu_T[0], cpu_tmp0, cpu_T[1], is_right);
1475 static void gen_shift_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2,
1476 int is_right, int is_arith)
1478 int mask = (ot == MO_64 ? 0x3f : 0x1f);
1480 /* load */
1481 if (op1 == OR_TMP0)
1482 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1483 else
1484 gen_op_mov_v_reg(ot, cpu_T[0], op1);
1486 op2 &= mask;
1487 if (op2 != 0) {
1488 if (is_right) {
1489 if (is_arith) {
1490 gen_exts(ot, cpu_T[0]);
1491 tcg_gen_sari_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1492 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], op2);
1493 } else {
1494 gen_extu(ot, cpu_T[0]);
1495 tcg_gen_shri_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1496 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], op2);
1498 } else {
1499 tcg_gen_shli_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1500 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], op2);
1504 /* store */
1505 gen_op_st_rm_T0_A0(s, ot, op1);
1507 /* update eflags if non zero shift */
1508 if (op2 != 0) {
1509 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
1510 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1511 set_cc_op(s, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
1515 static void gen_rot_rm_T1(DisasContext *s, TCGMemOp ot, int op1, int is_right)
1517 target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
1518 TCGv_i32 t0, t1;
1520 /* load */
1521 if (op1 == OR_TMP0) {
1522 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1523 } else {
1524 gen_op_mov_v_reg(ot, cpu_T[0], op1);
1527 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], mask);
1529 switch (ot) {
1530 case MO_8:
1531 /* Replicate the 8-bit input so that a 32-bit rotate works. */
1532 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
1533 tcg_gen_muli_tl(cpu_T[0], cpu_T[0], 0x01010101);
1534 goto do_long;
1535 case MO_16:
1536 /* Replicate the 16-bit input so that a 32-bit rotate works. */
1537 tcg_gen_deposit_tl(cpu_T[0], cpu_T[0], cpu_T[0], 16, 16);
1538 goto do_long;
1539 do_long:
1540 #ifdef TARGET_X86_64
1541 case MO_32:
1542 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
1543 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
1544 if (is_right) {
1545 tcg_gen_rotr_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
1546 } else {
1547 tcg_gen_rotl_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
1549 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
1550 break;
1551 #endif
1552 default:
1553 if (is_right) {
1554 tcg_gen_rotr_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1555 } else {
1556 tcg_gen_rotl_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1558 break;
1561 /* store */
1562 gen_op_st_rm_T0_A0(s, ot, op1);
1564 /* We'll need the flags computed into CC_SRC. */
1565 gen_compute_eflags(s);
1567 /* The value that was "rotated out" is now present at the other end
1568 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1569 since we've computed the flags into CC_SRC, these variables are
1570 currently dead. */
1571 if (is_right) {
1572 tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask - 1);
1573 tcg_gen_shri_tl(cpu_cc_dst, cpu_T[0], mask);
1574 tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
1575 } else {
1576 tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask);
1577 tcg_gen_andi_tl(cpu_cc_dst, cpu_T[0], 1);
1579 tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1580 tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1582 /* Now conditionally store the new CC_OP value. If the shift count
1583 is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live.
1584 Otherwise reuse CC_OP_ADCOX which have the C and O flags split out
1585 exactly as we computed above. */
1586 t0 = tcg_const_i32(0);
1587 t1 = tcg_temp_new_i32();
1588 tcg_gen_trunc_tl_i32(t1, cpu_T[1]);
1589 tcg_gen_movi_i32(cpu_tmp2_i32, CC_OP_ADCOX);
1590 tcg_gen_movi_i32(cpu_tmp3_i32, CC_OP_EFLAGS);
1591 tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, t1, t0,
1592 cpu_tmp2_i32, cpu_tmp3_i32);
1593 tcg_temp_free_i32(t0);
1594 tcg_temp_free_i32(t1);
1596 /* The CC_OP value is no longer predictable. */
1597 set_cc_op(s, CC_OP_DYNAMIC);
1600 static void gen_rot_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2,
1601 int is_right)
1603 int mask = (ot == MO_64 ? 0x3f : 0x1f);
1604 int shift;
1606 /* load */
1607 if (op1 == OR_TMP0) {
1608 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1609 } else {
1610 gen_op_mov_v_reg(ot, cpu_T[0], op1);
1613 op2 &= mask;
1614 if (op2 != 0) {
1615 switch (ot) {
1616 #ifdef TARGET_X86_64
1617 case MO_32:
1618 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
1619 if (is_right) {
1620 tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, op2);
1621 } else {
1622 tcg_gen_rotli_i32(cpu_tmp2_i32, cpu_tmp2_i32, op2);
1624 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
1625 break;
1626 #endif
1627 default:
1628 if (is_right) {
1629 tcg_gen_rotri_tl(cpu_T[0], cpu_T[0], op2);
1630 } else {
1631 tcg_gen_rotli_tl(cpu_T[0], cpu_T[0], op2);
1633 break;
1634 case MO_8:
1635 mask = 7;
1636 goto do_shifts;
1637 case MO_16:
1638 mask = 15;
1639 do_shifts:
1640 shift = op2 & mask;
1641 if (is_right) {
1642 shift = mask + 1 - shift;
1644 gen_extu(ot, cpu_T[0]);
1645 tcg_gen_shli_tl(cpu_tmp0, cpu_T[0], shift);
1646 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], mask + 1 - shift);
1647 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
1648 break;
1652 /* store */
1653 gen_op_st_rm_T0_A0(s, ot, op1);
1655 if (op2 != 0) {
1656 /* Compute the flags into CC_SRC. */
1657 gen_compute_eflags(s);
1659 /* The value that was "rotated out" is now present at the other end
1660 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1661 since we've computed the flags into CC_SRC, these variables are
1662 currently dead. */
1663 if (is_right) {
1664 tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask - 1);
1665 tcg_gen_shri_tl(cpu_cc_dst, cpu_T[0], mask);
1666 tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
1667 } else {
1668 tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask);
1669 tcg_gen_andi_tl(cpu_cc_dst, cpu_T[0], 1);
1671 tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1672 tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1673 set_cc_op(s, CC_OP_ADCOX);
1677 /* XXX: add faster immediate = 1 case */
1678 static void gen_rotc_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
1679 int is_right)
1681 gen_compute_eflags(s);
1682 assert(s->cc_op == CC_OP_EFLAGS);
1684 /* load */
1685 if (op1 == OR_TMP0)
1686 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1687 else
1688 gen_op_mov_v_reg(ot, cpu_T[0], op1);
1690 if (is_right) {
1691 switch (ot) {
1692 case MO_8:
1693 gen_helper_rcrb(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1694 break;
1695 case MO_16:
1696 gen_helper_rcrw(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1697 break;
1698 case MO_32:
1699 gen_helper_rcrl(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1700 break;
1701 #ifdef TARGET_X86_64
1702 case MO_64:
1703 gen_helper_rcrq(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1704 break;
1705 #endif
1706 default:
1707 tcg_abort();
1709 } else {
1710 switch (ot) {
1711 case MO_8:
1712 gen_helper_rclb(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1713 break;
1714 case MO_16:
1715 gen_helper_rclw(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1716 break;
1717 case MO_32:
1718 gen_helper_rcll(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1719 break;
1720 #ifdef TARGET_X86_64
1721 case MO_64:
1722 gen_helper_rclq(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1723 break;
1724 #endif
1725 default:
1726 tcg_abort();
1729 /* store */
1730 gen_op_st_rm_T0_A0(s, ot, op1);
1733 /* XXX: add faster immediate case */
1734 static void gen_shiftd_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
1735 bool is_right, TCGv count_in)
1737 target_ulong mask = (ot == MO_64 ? 63 : 31);
1738 TCGv count;
1740 /* load */
1741 if (op1 == OR_TMP0) {
1742 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1743 } else {
1744 gen_op_mov_v_reg(ot, cpu_T[0], op1);
1747 count = tcg_temp_new();
1748 tcg_gen_andi_tl(count, count_in, mask);
1750 switch (ot) {
1751 case MO_16:
1752 /* Note: we implement the Intel behaviour for shift count > 16.
1753 This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A
1754 portion by constructing it as a 32-bit value. */
1755 if (is_right) {
1756 tcg_gen_deposit_tl(cpu_tmp0, cpu_T[0], cpu_T[1], 16, 16);
1757 tcg_gen_mov_tl(cpu_T[1], cpu_T[0]);
1758 tcg_gen_mov_tl(cpu_T[0], cpu_tmp0);
1759 } else {
1760 tcg_gen_deposit_tl(cpu_T[1], cpu_T[0], cpu_T[1], 16, 16);
1762 /* FALLTHRU */
1763 #ifdef TARGET_X86_64
1764 case MO_32:
1765 /* Concatenate the two 32-bit values and use a 64-bit shift. */
1766 tcg_gen_subi_tl(cpu_tmp0, count, 1);
1767 if (is_right) {
1768 tcg_gen_concat_tl_i64(cpu_T[0], cpu_T[0], cpu_T[1]);
1769 tcg_gen_shr_i64(cpu_tmp0, cpu_T[0], cpu_tmp0);
1770 tcg_gen_shr_i64(cpu_T[0], cpu_T[0], count);
1771 } else {
1772 tcg_gen_concat_tl_i64(cpu_T[0], cpu_T[1], cpu_T[0]);
1773 tcg_gen_shl_i64(cpu_tmp0, cpu_T[0], cpu_tmp0);
1774 tcg_gen_shl_i64(cpu_T[0], cpu_T[0], count);
1775 tcg_gen_shri_i64(cpu_tmp0, cpu_tmp0, 32);
1776 tcg_gen_shri_i64(cpu_T[0], cpu_T[0], 32);
1778 break;
1779 #endif
1780 default:
1781 tcg_gen_subi_tl(cpu_tmp0, count, 1);
1782 if (is_right) {
1783 tcg_gen_shr_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1785 tcg_gen_subfi_tl(cpu_tmp4, mask + 1, count);
1786 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], count);
1787 tcg_gen_shl_tl(cpu_T[1], cpu_T[1], cpu_tmp4);
1788 } else {
1789 tcg_gen_shl_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1790 if (ot == MO_16) {
1791 /* Only needed if count > 16, for Intel behaviour. */
1792 tcg_gen_subfi_tl(cpu_tmp4, 33, count);
1793 tcg_gen_shr_tl(cpu_tmp4, cpu_T[1], cpu_tmp4);
1794 tcg_gen_or_tl(cpu_tmp0, cpu_tmp0, cpu_tmp4);
1797 tcg_gen_subfi_tl(cpu_tmp4, mask + 1, count);
1798 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], count);
1799 tcg_gen_shr_tl(cpu_T[1], cpu_T[1], cpu_tmp4);
1801 tcg_gen_movi_tl(cpu_tmp4, 0);
1802 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_T[1], count, cpu_tmp4,
1803 cpu_tmp4, cpu_T[1]);
1804 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1805 break;
1808 /* store */
1809 gen_op_st_rm_T0_A0(s, ot, op1);
1811 gen_shift_flags(s, ot, cpu_T[0], cpu_tmp0, count, is_right);
1812 tcg_temp_free(count);
1815 static void gen_shift(DisasContext *s1, int op, TCGMemOp ot, int d, int s)
1817 if (s != OR_TMP1)
1818 gen_op_mov_v_reg(ot, cpu_T[1], s);
1819 switch(op) {
1820 case OP_ROL:
1821 gen_rot_rm_T1(s1, ot, d, 0);
1822 break;
1823 case OP_ROR:
1824 gen_rot_rm_T1(s1, ot, d, 1);
1825 break;
1826 case OP_SHL:
1827 case OP_SHL1:
1828 gen_shift_rm_T1(s1, ot, d, 0, 0);
1829 break;
1830 case OP_SHR:
1831 gen_shift_rm_T1(s1, ot, d, 1, 0);
1832 break;
1833 case OP_SAR:
1834 gen_shift_rm_T1(s1, ot, d, 1, 1);
1835 break;
1836 case OP_RCL:
1837 gen_rotc_rm_T1(s1, ot, d, 0);
1838 break;
1839 case OP_RCR:
1840 gen_rotc_rm_T1(s1, ot, d, 1);
1841 break;
1845 static void gen_shifti(DisasContext *s1, int op, TCGMemOp ot, int d, int c)
1847 switch(op) {
1848 case OP_ROL:
1849 gen_rot_rm_im(s1, ot, d, c, 0);
1850 break;
1851 case OP_ROR:
1852 gen_rot_rm_im(s1, ot, d, c, 1);
1853 break;
1854 case OP_SHL:
1855 case OP_SHL1:
1856 gen_shift_rm_im(s1, ot, d, c, 0, 0);
1857 break;
1858 case OP_SHR:
1859 gen_shift_rm_im(s1, ot, d, c, 1, 0);
1860 break;
1861 case OP_SAR:
1862 gen_shift_rm_im(s1, ot, d, c, 1, 1);
1863 break;
1864 default:
1865 /* currently not optimized */
1866 tcg_gen_movi_tl(cpu_T[1], c);
1867 gen_shift(s1, op, ot, d, OR_TMP1);
1868 break;
1872 static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm)
1874 target_long disp;
1875 int havesib;
1876 int base;
1877 int index;
1878 int scale;
1879 int mod, rm, code, override, must_add_seg;
1880 TCGv sum;
1882 override = s->override;
1883 must_add_seg = s->addseg;
1884 if (override >= 0)
1885 must_add_seg = 1;
1886 mod = (modrm >> 6) & 3;
1887 rm = modrm & 7;
1889 switch (s->aflag) {
1890 case MO_64:
1891 case MO_32:
1892 havesib = 0;
1893 base = rm;
1894 index = -1;
1895 scale = 0;
1897 if (base == 4) {
1898 havesib = 1;
1899 code = cpu_ldub_code(env, s->pc++);
1900 scale = (code >> 6) & 3;
1901 index = ((code >> 3) & 7) | REX_X(s);
1902 if (index == 4) {
1903 index = -1; /* no index */
1905 base = (code & 7);
1907 base |= REX_B(s);
1909 switch (mod) {
1910 case 0:
1911 if ((base & 7) == 5) {
1912 base = -1;
1913 disp = (int32_t)cpu_ldl_code(env, s->pc);
1914 s->pc += 4;
1915 if (CODE64(s) && !havesib) {
1916 disp += s->pc + s->rip_offset;
1918 } else {
1919 disp = 0;
1921 break;
1922 case 1:
1923 disp = (int8_t)cpu_ldub_code(env, s->pc++);
1924 break;
1925 default:
1926 case 2:
1927 disp = (int32_t)cpu_ldl_code(env, s->pc);
1928 s->pc += 4;
1929 break;
1932 /* For correct popl handling with esp. */
1933 if (base == R_ESP && s->popl_esp_hack) {
1934 disp += s->popl_esp_hack;
1937 /* Compute the address, with a minimum number of TCG ops. */
1938 TCGV_UNUSED(sum);
1939 if (index >= 0) {
1940 if (scale == 0) {
1941 sum = cpu_regs[index];
1942 } else {
1943 tcg_gen_shli_tl(cpu_A0, cpu_regs[index], scale);
1944 sum = cpu_A0;
1946 if (base >= 0) {
1947 tcg_gen_add_tl(cpu_A0, sum, cpu_regs[base]);
1948 sum = cpu_A0;
1950 } else if (base >= 0) {
1951 sum = cpu_regs[base];
1953 if (TCGV_IS_UNUSED(sum)) {
1954 tcg_gen_movi_tl(cpu_A0, disp);
1955 } else {
1956 tcg_gen_addi_tl(cpu_A0, sum, disp);
1959 if (must_add_seg) {
1960 if (override < 0) {
1961 if (base == R_EBP || base == R_ESP) {
1962 override = R_SS;
1963 } else {
1964 override = R_DS;
1968 tcg_gen_ld_tl(cpu_tmp0, cpu_env,
1969 offsetof(CPUX86State, segs[override].base));
1970 if (CODE64(s)) {
1971 if (s->aflag == MO_32) {
1972 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
1974 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
1975 return;
1978 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
1981 if (s->aflag == MO_32) {
1982 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
1984 break;
1986 case MO_16:
1987 switch (mod) {
1988 case 0:
1989 if (rm == 6) {
1990 disp = cpu_lduw_code(env, s->pc);
1991 s->pc += 2;
1992 tcg_gen_movi_tl(cpu_A0, disp);
1993 rm = 0; /* avoid SS override */
1994 goto no_rm;
1995 } else {
1996 disp = 0;
1998 break;
1999 case 1:
2000 disp = (int8_t)cpu_ldub_code(env, s->pc++);
2001 break;
2002 default:
2003 case 2:
2004 disp = (int16_t)cpu_lduw_code(env, s->pc);
2005 s->pc += 2;
2006 break;
2009 sum = cpu_A0;
2010 switch (rm) {
2011 case 0:
2012 tcg_gen_add_tl(cpu_A0, cpu_regs[R_EBX], cpu_regs[R_ESI]);
2013 break;
2014 case 1:
2015 tcg_gen_add_tl(cpu_A0, cpu_regs[R_EBX], cpu_regs[R_EDI]);
2016 break;
2017 case 2:
2018 tcg_gen_add_tl(cpu_A0, cpu_regs[R_EBP], cpu_regs[R_ESI]);
2019 break;
2020 case 3:
2021 tcg_gen_add_tl(cpu_A0, cpu_regs[R_EBP], cpu_regs[R_EDI]);
2022 break;
2023 case 4:
2024 sum = cpu_regs[R_ESI];
2025 break;
2026 case 5:
2027 sum = cpu_regs[R_EDI];
2028 break;
2029 case 6:
2030 sum = cpu_regs[R_EBP];
2031 break;
2032 default:
2033 case 7:
2034 sum = cpu_regs[R_EBX];
2035 break;
2037 tcg_gen_addi_tl(cpu_A0, sum, disp);
2038 tcg_gen_ext16u_tl(cpu_A0, cpu_A0);
2039 no_rm:
2040 if (must_add_seg) {
2041 if (override < 0) {
2042 if (rm == 2 || rm == 3 || rm == 6) {
2043 override = R_SS;
2044 } else {
2045 override = R_DS;
2048 gen_op_addl_A0_seg(s, override);
2050 break;
2052 default:
2053 tcg_abort();
2057 static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm)
2059 int mod, rm, base, code;
2061 mod = (modrm >> 6) & 3;
2062 if (mod == 3)
2063 return;
2064 rm = modrm & 7;
2066 switch (s->aflag) {
2067 case MO_64:
2068 case MO_32:
2069 base = rm;
2071 if (base == 4) {
2072 code = cpu_ldub_code(env, s->pc++);
2073 base = (code & 7);
2076 switch (mod) {
2077 case 0:
2078 if (base == 5) {
2079 s->pc += 4;
2081 break;
2082 case 1:
2083 s->pc++;
2084 break;
2085 default:
2086 case 2:
2087 s->pc += 4;
2088 break;
2090 break;
2092 case MO_16:
2093 switch (mod) {
2094 case 0:
2095 if (rm == 6) {
2096 s->pc += 2;
2098 break;
2099 case 1:
2100 s->pc++;
2101 break;
2102 default:
2103 case 2:
2104 s->pc += 2;
2105 break;
2107 break;
2109 default:
2110 tcg_abort();
2114 /* used for LEA and MOV AX, mem */
2115 static void gen_add_A0_ds_seg(DisasContext *s)
2117 int override, must_add_seg;
2118 must_add_seg = s->addseg;
2119 override = R_DS;
2120 if (s->override >= 0) {
2121 override = s->override;
2122 must_add_seg = 1;
2124 if (must_add_seg) {
2125 #ifdef TARGET_X86_64
2126 if (CODE64(s)) {
2127 gen_op_addq_A0_seg(override);
2128 } else
2129 #endif
2131 gen_op_addl_A0_seg(s, override);
2136 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2137 OR_TMP0 */
2138 static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm,
2139 TCGMemOp ot, int reg, int is_store)
2141 int mod, rm;
2143 mod = (modrm >> 6) & 3;
2144 rm = (modrm & 7) | REX_B(s);
2145 if (mod == 3) {
2146 if (is_store) {
2147 if (reg != OR_TMP0)
2148 gen_op_mov_v_reg(ot, cpu_T[0], reg);
2149 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
2150 } else {
2151 gen_op_mov_v_reg(ot, cpu_T[0], rm);
2152 if (reg != OR_TMP0)
2153 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
2155 } else {
2156 gen_lea_modrm(env, s, modrm);
2157 if (is_store) {
2158 if (reg != OR_TMP0)
2159 gen_op_mov_v_reg(ot, cpu_T[0], reg);
2160 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
2161 } else {
2162 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
2163 if (reg != OR_TMP0)
2164 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
2169 static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, TCGMemOp ot)
2171 uint32_t ret;
2173 switch (ot) {
2174 case MO_8:
2175 ret = cpu_ldub_code(env, s->pc);
2176 s->pc++;
2177 break;
2178 case MO_16:
2179 ret = cpu_lduw_code(env, s->pc);
2180 s->pc += 2;
2181 break;
2182 case MO_32:
2183 #ifdef TARGET_X86_64
2184 case MO_64:
2185 #endif
2186 ret = cpu_ldl_code(env, s->pc);
2187 s->pc += 4;
2188 break;
2189 default:
2190 tcg_abort();
2192 return ret;
2195 static inline int insn_const_size(TCGMemOp ot)
2197 if (ot <= MO_32) {
2198 return 1 << ot;
2199 } else {
2200 return 4;
2204 static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip)
2206 TranslationBlock *tb;
2207 target_ulong pc;
2209 pc = s->cs_base + eip;
2210 tb = s->tb;
2211 /* NOTE: we handle the case where the TB spans two pages here */
2212 if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) ||
2213 (pc & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK)) {
2214 /* jump to same page: we can use a direct jump */
2215 tcg_gen_goto_tb(tb_num);
2216 gen_jmp_im(eip);
2217 tcg_gen_exit_tb((uintptr_t)tb + tb_num);
2218 } else {
2219 /* jump to another page: currently not optimized */
2220 gen_jmp_im(eip);
2221 gen_eob(s);
2225 static inline void gen_jcc(DisasContext *s, int b,
2226 target_ulong val, target_ulong next_eip)
2228 TCGLabel *l1, *l2;
2230 if (s->jmp_opt) {
2231 l1 = gen_new_label();
2232 gen_jcc1(s, b, l1);
2234 gen_goto_tb(s, 0, next_eip);
2236 gen_set_label(l1);
2237 gen_goto_tb(s, 1, val);
2238 s->is_jmp = DISAS_TB_JUMP;
2239 } else {
2240 l1 = gen_new_label();
2241 l2 = gen_new_label();
2242 gen_jcc1(s, b, l1);
2244 gen_jmp_im(next_eip);
2245 tcg_gen_br(l2);
2247 gen_set_label(l1);
2248 gen_jmp_im(val);
2249 gen_set_label(l2);
2250 gen_eob(s);
2254 static void gen_cmovcc1(CPUX86State *env, DisasContext *s, TCGMemOp ot, int b,
2255 int modrm, int reg)
2257 CCPrepare cc;
2259 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
2261 cc = gen_prepare_cc(s, b, cpu_T[1]);
2262 if (cc.mask != -1) {
2263 TCGv t0 = tcg_temp_new();
2264 tcg_gen_andi_tl(t0, cc.reg, cc.mask);
2265 cc.reg = t0;
2267 if (!cc.use_reg2) {
2268 cc.reg2 = tcg_const_tl(cc.imm);
2271 tcg_gen_movcond_tl(cc.cond, cpu_T[0], cc.reg, cc.reg2,
2272 cpu_T[0], cpu_regs[reg]);
2273 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
2275 if (cc.mask != -1) {
2276 tcg_temp_free(cc.reg);
2278 if (!cc.use_reg2) {
2279 tcg_temp_free(cc.reg2);
2283 static inline void gen_op_movl_T0_seg(int seg_reg)
2285 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
2286 offsetof(CPUX86State,segs[seg_reg].selector));
2289 static inline void gen_op_movl_seg_T0_vm(int seg_reg)
2291 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
2292 tcg_gen_st32_tl(cpu_T[0], cpu_env,
2293 offsetof(CPUX86State,segs[seg_reg].selector));
2294 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], 4);
2295 tcg_gen_st_tl(cpu_T[0], cpu_env,
2296 offsetof(CPUX86State,segs[seg_reg].base));
2299 /* move T0 to seg_reg and compute if the CPU state may change. Never
2300 call this function with seg_reg == R_CS */
2301 static void gen_movl_seg_T0(DisasContext *s, int seg_reg)
2303 if (s->pe && !s->vm86) {
2304 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2305 gen_helper_load_seg(cpu_env, tcg_const_i32(seg_reg), cpu_tmp2_i32);
2306 /* abort translation because the addseg value may change or
2307 because ss32 may change. For R_SS, translation must always
2308 stop as a special handling must be done to disable hardware
2309 interrupts for the next instruction */
2310 if (seg_reg == R_SS || (s->code32 && seg_reg < R_FS))
2311 s->is_jmp = DISAS_TB_JUMP;
2312 } else {
2313 gen_op_movl_seg_T0_vm(seg_reg);
2314 if (seg_reg == R_SS)
2315 s->is_jmp = DISAS_TB_JUMP;
2319 static inline int svm_is_rep(int prefixes)
2321 return ((prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) ? 8 : 0);
2324 static inline void
2325 gen_svm_check_intercept_param(DisasContext *s, target_ulong pc_start,
2326 uint32_t type, uint64_t param)
2328 /* no SVM activated; fast case */
2329 if (likely(!(s->flags & HF_SVMI_MASK)))
2330 return;
2331 gen_update_cc_op(s);
2332 gen_jmp_im(pc_start - s->cs_base);
2333 gen_helper_svm_check_intercept_param(cpu_env, tcg_const_i32(type),
2334 tcg_const_i64(param));
2337 static inline void
2338 gen_svm_check_intercept(DisasContext *s, target_ulong pc_start, uint64_t type)
2340 gen_svm_check_intercept_param(s, pc_start, type, 0);
2343 static inline void gen_stack_update(DisasContext *s, int addend)
2345 #ifdef TARGET_X86_64
2346 if (CODE64(s)) {
2347 gen_op_add_reg_im(MO_64, R_ESP, addend);
2348 } else
2349 #endif
2350 if (s->ss32) {
2351 gen_op_add_reg_im(MO_32, R_ESP, addend);
2352 } else {
2353 gen_op_add_reg_im(MO_16, R_ESP, addend);
2357 /* Generate a push. It depends on ss32, addseg and dflag. */
2358 static void gen_push_v(DisasContext *s, TCGv val)
2360 TCGMemOp a_ot, d_ot = mo_pushpop(s, s->dflag);
2361 int size = 1 << d_ot;
2362 TCGv new_esp = cpu_A0;
2364 tcg_gen_subi_tl(cpu_A0, cpu_regs[R_ESP], size);
2366 if (CODE64(s)) {
2367 a_ot = MO_64;
2368 } else if (s->ss32) {
2369 a_ot = MO_32;
2370 if (s->addseg) {
2371 new_esp = cpu_tmp4;
2372 tcg_gen_mov_tl(new_esp, cpu_A0);
2373 gen_op_addl_A0_seg(s, R_SS);
2374 } else {
2375 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
2377 } else {
2378 a_ot = MO_16;
2379 new_esp = cpu_tmp4;
2380 tcg_gen_ext16u_tl(cpu_A0, cpu_A0);
2381 tcg_gen_mov_tl(new_esp, cpu_A0);
2382 gen_op_addl_A0_seg(s, R_SS);
2385 gen_op_st_v(s, d_ot, val, cpu_A0);
2386 gen_op_mov_reg_v(a_ot, R_ESP, new_esp);
2389 /* two step pop is necessary for precise exceptions */
2390 static TCGMemOp gen_pop_T0(DisasContext *s)
2392 TCGMemOp d_ot = mo_pushpop(s, s->dflag);
2393 TCGv addr = cpu_A0;
2395 if (CODE64(s)) {
2396 addr = cpu_regs[R_ESP];
2397 } else if (!s->ss32) {
2398 tcg_gen_ext16u_tl(cpu_A0, cpu_regs[R_ESP]);
2399 gen_op_addl_A0_seg(s, R_SS);
2400 } else if (s->addseg) {
2401 tcg_gen_mov_tl(cpu_A0, cpu_regs[R_ESP]);
2402 gen_op_addl_A0_seg(s, R_SS);
2403 } else {
2404 tcg_gen_ext32u_tl(cpu_A0, cpu_regs[R_ESP]);
2407 gen_op_ld_v(s, d_ot, cpu_T[0], addr);
2408 return d_ot;
2411 static void gen_pop_update(DisasContext *s, TCGMemOp ot)
2413 gen_stack_update(s, 1 << ot);
2416 static void gen_stack_A0(DisasContext *s)
2418 gen_op_movl_A0_reg(R_ESP);
2419 if (!s->ss32)
2420 tcg_gen_ext16u_tl(cpu_A0, cpu_A0);
2421 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2422 if (s->addseg)
2423 gen_op_addl_A0_seg(s, R_SS);
2426 /* NOTE: wrap around in 16 bit not fully handled */
2427 static void gen_pusha(DisasContext *s)
2429 int i;
2430 gen_op_movl_A0_reg(R_ESP);
2431 gen_op_addl_A0_im(-(8 << s->dflag));
2432 if (!s->ss32)
2433 tcg_gen_ext16u_tl(cpu_A0, cpu_A0);
2434 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2435 if (s->addseg)
2436 gen_op_addl_A0_seg(s, R_SS);
2437 for(i = 0;i < 8; i++) {
2438 gen_op_mov_v_reg(MO_32, cpu_T[0], 7 - i);
2439 gen_op_st_v(s, s->dflag, cpu_T[0], cpu_A0);
2440 gen_op_addl_A0_im(1 << s->dflag);
2442 gen_op_mov_reg_v(MO_16 + s->ss32, R_ESP, cpu_T[1]);
2445 /* NOTE: wrap around in 16 bit not fully handled */
2446 static void gen_popa(DisasContext *s)
2448 int i;
2449 gen_op_movl_A0_reg(R_ESP);
2450 if (!s->ss32)
2451 tcg_gen_ext16u_tl(cpu_A0, cpu_A0);
2452 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2453 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], 8 << s->dflag);
2454 if (s->addseg)
2455 gen_op_addl_A0_seg(s, R_SS);
2456 for(i = 0;i < 8; i++) {
2457 /* ESP is not reloaded */
2458 if (i != 3) {
2459 gen_op_ld_v(s, s->dflag, cpu_T[0], cpu_A0);
2460 gen_op_mov_reg_v(s->dflag, 7 - i, cpu_T[0]);
2462 gen_op_addl_A0_im(1 << s->dflag);
2464 gen_op_mov_reg_v(MO_16 + s->ss32, R_ESP, cpu_T[1]);
2467 static void gen_enter(DisasContext *s, int esp_addend, int level)
2469 TCGMemOp ot = mo_pushpop(s, s->dflag);
2470 int opsize = 1 << ot;
2472 level &= 0x1f;
2473 #ifdef TARGET_X86_64
2474 if (CODE64(s)) {
2475 gen_op_movl_A0_reg(R_ESP);
2476 gen_op_addq_A0_im(-opsize);
2477 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2479 /* push bp */
2480 gen_op_mov_v_reg(MO_32, cpu_T[0], R_EBP);
2481 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
2482 if (level) {
2483 /* XXX: must save state */
2484 gen_helper_enter64_level(cpu_env, tcg_const_i32(level),
2485 tcg_const_i32((ot == MO_64)),
2486 cpu_T[1]);
2488 gen_op_mov_reg_v(ot, R_EBP, cpu_T[1]);
2489 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
2490 gen_op_mov_reg_v(MO_64, R_ESP, cpu_T[1]);
2491 } else
2492 #endif
2494 gen_op_movl_A0_reg(R_ESP);
2495 gen_op_addl_A0_im(-opsize);
2496 if (!s->ss32)
2497 tcg_gen_ext16u_tl(cpu_A0, cpu_A0);
2498 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2499 if (s->addseg)
2500 gen_op_addl_A0_seg(s, R_SS);
2501 /* push bp */
2502 gen_op_mov_v_reg(MO_32, cpu_T[0], R_EBP);
2503 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
2504 if (level) {
2505 /* XXX: must save state */
2506 gen_helper_enter_level(cpu_env, tcg_const_i32(level),
2507 tcg_const_i32(s->dflag - 1),
2508 cpu_T[1]);
2510 gen_op_mov_reg_v(ot, R_EBP, cpu_T[1]);
2511 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
2512 gen_op_mov_reg_v(MO_16 + s->ss32, R_ESP, cpu_T[1]);
2516 static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip)
2518 gen_update_cc_op(s);
2519 gen_jmp_im(cur_eip);
2520 gen_helper_raise_exception(cpu_env, tcg_const_i32(trapno));
2521 s->is_jmp = DISAS_TB_JUMP;
2524 /* an interrupt is different from an exception because of the
2525 privilege checks */
2526 static void gen_interrupt(DisasContext *s, int intno,
2527 target_ulong cur_eip, target_ulong next_eip)
2529 gen_update_cc_op(s);
2530 gen_jmp_im(cur_eip);
2531 gen_helper_raise_interrupt(cpu_env, tcg_const_i32(intno),
2532 tcg_const_i32(next_eip - cur_eip));
2533 s->is_jmp = DISAS_TB_JUMP;
2536 static void gen_debug(DisasContext *s, target_ulong cur_eip)
2538 gen_update_cc_op(s);
2539 gen_jmp_im(cur_eip);
2540 gen_helper_debug(cpu_env);
2541 s->is_jmp = DISAS_TB_JUMP;
2544 /* generate a generic end of block. Trace exception is also generated
2545 if needed */
2546 static void gen_eob(DisasContext *s)
2548 gen_update_cc_op(s);
2549 if (s->tb->flags & HF_INHIBIT_IRQ_MASK) {
2550 gen_helper_reset_inhibit_irq(cpu_env);
2552 if (s->tb->flags & HF_RF_MASK) {
2553 gen_helper_reset_rf(cpu_env);
2555 if (s->singlestep_enabled) {
2556 gen_helper_debug(cpu_env);
2557 } else if (s->tf) {
2558 gen_helper_single_step(cpu_env);
2559 } else {
2560 tcg_gen_exit_tb(0);
2562 s->is_jmp = DISAS_TB_JUMP;
2565 /* generate a jump to eip. No segment change must happen before as a
2566 direct call to the next block may occur */
2567 static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num)
2569 gen_update_cc_op(s);
2570 set_cc_op(s, CC_OP_DYNAMIC);
2571 if (s->jmp_opt) {
2572 gen_goto_tb(s, tb_num, eip);
2573 s->is_jmp = DISAS_TB_JUMP;
2574 } else {
2575 gen_jmp_im(eip);
2576 gen_eob(s);
2580 static void gen_jmp(DisasContext *s, target_ulong eip)
2582 gen_jmp_tb(s, eip, 0);
2585 static inline void gen_ldq_env_A0(DisasContext *s, int offset)
2587 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
2588 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset);
2591 static inline void gen_stq_env_A0(DisasContext *s, int offset)
2593 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset);
2594 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
2597 static inline void gen_ldo_env_A0(DisasContext *s, int offset)
2599 int mem_index = s->mem_index;
2600 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, mem_index, MO_LEQ);
2601 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(0)));
2602 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2603 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_tmp0, mem_index, MO_LEQ);
2604 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(1)));
2607 static inline void gen_sto_env_A0(DisasContext *s, int offset)
2609 int mem_index = s->mem_index;
2610 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(0)));
2611 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, mem_index, MO_LEQ);
2612 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2613 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(1)));
2614 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_tmp0, mem_index, MO_LEQ);
2617 static inline void gen_op_movo(int d_offset, int s_offset)
2619 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + offsetof(ZMMReg, ZMM_Q(0)));
2620 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + offsetof(ZMMReg, ZMM_Q(0)));
2621 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + offsetof(ZMMReg, ZMM_Q(1)));
2622 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + offsetof(ZMMReg, ZMM_Q(1)));
2625 static inline void gen_op_movq(int d_offset, int s_offset)
2627 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2628 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2631 static inline void gen_op_movl(int d_offset, int s_offset)
2633 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env, s_offset);
2634 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, d_offset);
2637 static inline void gen_op_movq_env_0(int d_offset)
2639 tcg_gen_movi_i64(cpu_tmp1_i64, 0);
2640 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2643 typedef void (*SSEFunc_i_ep)(TCGv_i32 val, TCGv_ptr env, TCGv_ptr reg);
2644 typedef void (*SSEFunc_l_ep)(TCGv_i64 val, TCGv_ptr env, TCGv_ptr reg);
2645 typedef void (*SSEFunc_0_epi)(TCGv_ptr env, TCGv_ptr reg, TCGv_i32 val);
2646 typedef void (*SSEFunc_0_epl)(TCGv_ptr env, TCGv_ptr reg, TCGv_i64 val);
2647 typedef void (*SSEFunc_0_epp)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b);
2648 typedef void (*SSEFunc_0_eppi)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
2649 TCGv_i32 val);
2650 typedef void (*SSEFunc_0_ppi)(TCGv_ptr reg_a, TCGv_ptr reg_b, TCGv_i32 val);
2651 typedef void (*SSEFunc_0_eppt)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
2652 TCGv val);
2654 #define SSE_SPECIAL ((void *)1)
2655 #define SSE_DUMMY ((void *)2)
2657 #define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
2658 #define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
2659 gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
2661 static const SSEFunc_0_epp sse_op_table1[256][4] = {
2662 /* 3DNow! extensions */
2663 [0x0e] = { SSE_DUMMY }, /* femms */
2664 [0x0f] = { SSE_DUMMY }, /* pf... */
2665 /* pure SSE operations */
2666 [0x10] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2667 [0x11] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2668 [0x12] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd, movsldup, movddup */
2669 [0x13] = { SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd */
2670 [0x14] = { gen_helper_punpckldq_xmm, gen_helper_punpcklqdq_xmm },
2671 [0x15] = { gen_helper_punpckhdq_xmm, gen_helper_punpckhqdq_xmm },
2672 [0x16] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd, movshdup */
2673 [0x17] = { SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd */
2675 [0x28] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2676 [0x29] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2677 [0x2a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
2678 [0x2b] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movntps, movntpd, movntss, movntsd */
2679 [0x2c] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
2680 [0x2d] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
2681 [0x2e] = { gen_helper_ucomiss, gen_helper_ucomisd },
2682 [0x2f] = { gen_helper_comiss, gen_helper_comisd },
2683 [0x50] = { SSE_SPECIAL, SSE_SPECIAL }, /* movmskps, movmskpd */
2684 [0x51] = SSE_FOP(sqrt),
2685 [0x52] = { gen_helper_rsqrtps, NULL, gen_helper_rsqrtss, NULL },
2686 [0x53] = { gen_helper_rcpps, NULL, gen_helper_rcpss, NULL },
2687 [0x54] = { gen_helper_pand_xmm, gen_helper_pand_xmm }, /* andps, andpd */
2688 [0x55] = { gen_helper_pandn_xmm, gen_helper_pandn_xmm }, /* andnps, andnpd */
2689 [0x56] = { gen_helper_por_xmm, gen_helper_por_xmm }, /* orps, orpd */
2690 [0x57] = { gen_helper_pxor_xmm, gen_helper_pxor_xmm }, /* xorps, xorpd */
2691 [0x58] = SSE_FOP(add),
2692 [0x59] = SSE_FOP(mul),
2693 [0x5a] = { gen_helper_cvtps2pd, gen_helper_cvtpd2ps,
2694 gen_helper_cvtss2sd, gen_helper_cvtsd2ss },
2695 [0x5b] = { gen_helper_cvtdq2ps, gen_helper_cvtps2dq, gen_helper_cvttps2dq },
2696 [0x5c] = SSE_FOP(sub),
2697 [0x5d] = SSE_FOP(min),
2698 [0x5e] = SSE_FOP(div),
2699 [0x5f] = SSE_FOP(max),
2701 [0xc2] = SSE_FOP(cmpeq),
2702 [0xc6] = { (SSEFunc_0_epp)gen_helper_shufps,
2703 (SSEFunc_0_epp)gen_helper_shufpd }, /* XXX: casts */
2705 /* SSSE3, SSE4, MOVBE, CRC32, BMI1, BMI2, ADX. */
2706 [0x38] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2707 [0x3a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2709 /* MMX ops and their SSE extensions */
2710 [0x60] = MMX_OP2(punpcklbw),
2711 [0x61] = MMX_OP2(punpcklwd),
2712 [0x62] = MMX_OP2(punpckldq),
2713 [0x63] = MMX_OP2(packsswb),
2714 [0x64] = MMX_OP2(pcmpgtb),
2715 [0x65] = MMX_OP2(pcmpgtw),
2716 [0x66] = MMX_OP2(pcmpgtl),
2717 [0x67] = MMX_OP2(packuswb),
2718 [0x68] = MMX_OP2(punpckhbw),
2719 [0x69] = MMX_OP2(punpckhwd),
2720 [0x6a] = MMX_OP2(punpckhdq),
2721 [0x6b] = MMX_OP2(packssdw),
2722 [0x6c] = { NULL, gen_helper_punpcklqdq_xmm },
2723 [0x6d] = { NULL, gen_helper_punpckhqdq_xmm },
2724 [0x6e] = { SSE_SPECIAL, SSE_SPECIAL }, /* movd mm, ea */
2725 [0x6f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, , movqdu */
2726 [0x70] = { (SSEFunc_0_epp)gen_helper_pshufw_mmx,
2727 (SSEFunc_0_epp)gen_helper_pshufd_xmm,
2728 (SSEFunc_0_epp)gen_helper_pshufhw_xmm,
2729 (SSEFunc_0_epp)gen_helper_pshuflw_xmm }, /* XXX: casts */
2730 [0x71] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftw */
2731 [0x72] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftd */
2732 [0x73] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftq */
2733 [0x74] = MMX_OP2(pcmpeqb),
2734 [0x75] = MMX_OP2(pcmpeqw),
2735 [0x76] = MMX_OP2(pcmpeql),
2736 [0x77] = { SSE_DUMMY }, /* emms */
2737 [0x78] = { NULL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* extrq_i, insertq_i */
2738 [0x79] = { NULL, gen_helper_extrq_r, NULL, gen_helper_insertq_r },
2739 [0x7c] = { NULL, gen_helper_haddpd, NULL, gen_helper_haddps },
2740 [0x7d] = { NULL, gen_helper_hsubpd, NULL, gen_helper_hsubps },
2741 [0x7e] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movd, movd, , movq */
2742 [0x7f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, movdqu */
2743 [0xc4] = { SSE_SPECIAL, SSE_SPECIAL }, /* pinsrw */
2744 [0xc5] = { SSE_SPECIAL, SSE_SPECIAL }, /* pextrw */
2745 [0xd0] = { NULL, gen_helper_addsubpd, NULL, gen_helper_addsubps },
2746 [0xd1] = MMX_OP2(psrlw),
2747 [0xd2] = MMX_OP2(psrld),
2748 [0xd3] = MMX_OP2(psrlq),
2749 [0xd4] = MMX_OP2(paddq),
2750 [0xd5] = MMX_OP2(pmullw),
2751 [0xd6] = { NULL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2752 [0xd7] = { SSE_SPECIAL, SSE_SPECIAL }, /* pmovmskb */
2753 [0xd8] = MMX_OP2(psubusb),
2754 [0xd9] = MMX_OP2(psubusw),
2755 [0xda] = MMX_OP2(pminub),
2756 [0xdb] = MMX_OP2(pand),
2757 [0xdc] = MMX_OP2(paddusb),
2758 [0xdd] = MMX_OP2(paddusw),
2759 [0xde] = MMX_OP2(pmaxub),
2760 [0xdf] = MMX_OP2(pandn),
2761 [0xe0] = MMX_OP2(pavgb),
2762 [0xe1] = MMX_OP2(psraw),
2763 [0xe2] = MMX_OP2(psrad),
2764 [0xe3] = MMX_OP2(pavgw),
2765 [0xe4] = MMX_OP2(pmulhuw),
2766 [0xe5] = MMX_OP2(pmulhw),
2767 [0xe6] = { NULL, gen_helper_cvttpd2dq, gen_helper_cvtdq2pd, gen_helper_cvtpd2dq },
2768 [0xe7] = { SSE_SPECIAL , SSE_SPECIAL }, /* movntq, movntq */
2769 [0xe8] = MMX_OP2(psubsb),
2770 [0xe9] = MMX_OP2(psubsw),
2771 [0xea] = MMX_OP2(pminsw),
2772 [0xeb] = MMX_OP2(por),
2773 [0xec] = MMX_OP2(paddsb),
2774 [0xed] = MMX_OP2(paddsw),
2775 [0xee] = MMX_OP2(pmaxsw),
2776 [0xef] = MMX_OP2(pxor),
2777 [0xf0] = { NULL, NULL, NULL, SSE_SPECIAL }, /* lddqu */
2778 [0xf1] = MMX_OP2(psllw),
2779 [0xf2] = MMX_OP2(pslld),
2780 [0xf3] = MMX_OP2(psllq),
2781 [0xf4] = MMX_OP2(pmuludq),
2782 [0xf5] = MMX_OP2(pmaddwd),
2783 [0xf6] = MMX_OP2(psadbw),
2784 [0xf7] = { (SSEFunc_0_epp)gen_helper_maskmov_mmx,
2785 (SSEFunc_0_epp)gen_helper_maskmov_xmm }, /* XXX: casts */
2786 [0xf8] = MMX_OP2(psubb),
2787 [0xf9] = MMX_OP2(psubw),
2788 [0xfa] = MMX_OP2(psubl),
2789 [0xfb] = MMX_OP2(psubq),
2790 [0xfc] = MMX_OP2(paddb),
2791 [0xfd] = MMX_OP2(paddw),
2792 [0xfe] = MMX_OP2(paddl),
2795 static const SSEFunc_0_epp sse_op_table2[3 * 8][2] = {
2796 [0 + 2] = MMX_OP2(psrlw),
2797 [0 + 4] = MMX_OP2(psraw),
2798 [0 + 6] = MMX_OP2(psllw),
2799 [8 + 2] = MMX_OP2(psrld),
2800 [8 + 4] = MMX_OP2(psrad),
2801 [8 + 6] = MMX_OP2(pslld),
2802 [16 + 2] = MMX_OP2(psrlq),
2803 [16 + 3] = { NULL, gen_helper_psrldq_xmm },
2804 [16 + 6] = MMX_OP2(psllq),
2805 [16 + 7] = { NULL, gen_helper_pslldq_xmm },
2808 static const SSEFunc_0_epi sse_op_table3ai[] = {
2809 gen_helper_cvtsi2ss,
2810 gen_helper_cvtsi2sd
2813 #ifdef TARGET_X86_64
2814 static const SSEFunc_0_epl sse_op_table3aq[] = {
2815 gen_helper_cvtsq2ss,
2816 gen_helper_cvtsq2sd
2818 #endif
2820 static const SSEFunc_i_ep sse_op_table3bi[] = {
2821 gen_helper_cvttss2si,
2822 gen_helper_cvtss2si,
2823 gen_helper_cvttsd2si,
2824 gen_helper_cvtsd2si
2827 #ifdef TARGET_X86_64
2828 static const SSEFunc_l_ep sse_op_table3bq[] = {
2829 gen_helper_cvttss2sq,
2830 gen_helper_cvtss2sq,
2831 gen_helper_cvttsd2sq,
2832 gen_helper_cvtsd2sq
2834 #endif
2836 static const SSEFunc_0_epp sse_op_table4[8][4] = {
2837 SSE_FOP(cmpeq),
2838 SSE_FOP(cmplt),
2839 SSE_FOP(cmple),
2840 SSE_FOP(cmpunord),
2841 SSE_FOP(cmpneq),
2842 SSE_FOP(cmpnlt),
2843 SSE_FOP(cmpnle),
2844 SSE_FOP(cmpord),
2847 static const SSEFunc_0_epp sse_op_table5[256] = {
2848 [0x0c] = gen_helper_pi2fw,
2849 [0x0d] = gen_helper_pi2fd,
2850 [0x1c] = gen_helper_pf2iw,
2851 [0x1d] = gen_helper_pf2id,
2852 [0x8a] = gen_helper_pfnacc,
2853 [0x8e] = gen_helper_pfpnacc,
2854 [0x90] = gen_helper_pfcmpge,
2855 [0x94] = gen_helper_pfmin,
2856 [0x96] = gen_helper_pfrcp,
2857 [0x97] = gen_helper_pfrsqrt,
2858 [0x9a] = gen_helper_pfsub,
2859 [0x9e] = gen_helper_pfadd,
2860 [0xa0] = gen_helper_pfcmpgt,
2861 [0xa4] = gen_helper_pfmax,
2862 [0xa6] = gen_helper_movq, /* pfrcpit1; no need to actually increase precision */
2863 [0xa7] = gen_helper_movq, /* pfrsqit1 */
2864 [0xaa] = gen_helper_pfsubr,
2865 [0xae] = gen_helper_pfacc,
2866 [0xb0] = gen_helper_pfcmpeq,
2867 [0xb4] = gen_helper_pfmul,
2868 [0xb6] = gen_helper_movq, /* pfrcpit2 */
2869 [0xb7] = gen_helper_pmulhrw_mmx,
2870 [0xbb] = gen_helper_pswapd,
2871 [0xbf] = gen_helper_pavgb_mmx /* pavgusb */
2874 struct SSEOpHelper_epp {
2875 SSEFunc_0_epp op[2];
2876 uint32_t ext_mask;
2879 struct SSEOpHelper_eppi {
2880 SSEFunc_0_eppi op[2];
2881 uint32_t ext_mask;
2884 #define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
2885 #define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
2886 #define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
2887 #define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
2888 #define PCLMULQDQ_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, \
2889 CPUID_EXT_PCLMULQDQ }
2890 #define AESNI_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_AES }
2892 static const struct SSEOpHelper_epp sse_op_table6[256] = {
2893 [0x00] = SSSE3_OP(pshufb),
2894 [0x01] = SSSE3_OP(phaddw),
2895 [0x02] = SSSE3_OP(phaddd),
2896 [0x03] = SSSE3_OP(phaddsw),
2897 [0x04] = SSSE3_OP(pmaddubsw),
2898 [0x05] = SSSE3_OP(phsubw),
2899 [0x06] = SSSE3_OP(phsubd),
2900 [0x07] = SSSE3_OP(phsubsw),
2901 [0x08] = SSSE3_OP(psignb),
2902 [0x09] = SSSE3_OP(psignw),
2903 [0x0a] = SSSE3_OP(psignd),
2904 [0x0b] = SSSE3_OP(pmulhrsw),
2905 [0x10] = SSE41_OP(pblendvb),
2906 [0x14] = SSE41_OP(blendvps),
2907 [0x15] = SSE41_OP(blendvpd),
2908 [0x17] = SSE41_OP(ptest),
2909 [0x1c] = SSSE3_OP(pabsb),
2910 [0x1d] = SSSE3_OP(pabsw),
2911 [0x1e] = SSSE3_OP(pabsd),
2912 [0x20] = SSE41_OP(pmovsxbw),
2913 [0x21] = SSE41_OP(pmovsxbd),
2914 [0x22] = SSE41_OP(pmovsxbq),
2915 [0x23] = SSE41_OP(pmovsxwd),
2916 [0x24] = SSE41_OP(pmovsxwq),
2917 [0x25] = SSE41_OP(pmovsxdq),
2918 [0x28] = SSE41_OP(pmuldq),
2919 [0x29] = SSE41_OP(pcmpeqq),
2920 [0x2a] = SSE41_SPECIAL, /* movntqda */
2921 [0x2b] = SSE41_OP(packusdw),
2922 [0x30] = SSE41_OP(pmovzxbw),
2923 [0x31] = SSE41_OP(pmovzxbd),
2924 [0x32] = SSE41_OP(pmovzxbq),
2925 [0x33] = SSE41_OP(pmovzxwd),
2926 [0x34] = SSE41_OP(pmovzxwq),
2927 [0x35] = SSE41_OP(pmovzxdq),
2928 [0x37] = SSE42_OP(pcmpgtq),
2929 [0x38] = SSE41_OP(pminsb),
2930 [0x39] = SSE41_OP(pminsd),
2931 [0x3a] = SSE41_OP(pminuw),
2932 [0x3b] = SSE41_OP(pminud),
2933 [0x3c] = SSE41_OP(pmaxsb),
2934 [0x3d] = SSE41_OP(pmaxsd),
2935 [0x3e] = SSE41_OP(pmaxuw),
2936 [0x3f] = SSE41_OP(pmaxud),
2937 [0x40] = SSE41_OP(pmulld),
2938 [0x41] = SSE41_OP(phminposuw),
2939 [0xdb] = AESNI_OP(aesimc),
2940 [0xdc] = AESNI_OP(aesenc),
2941 [0xdd] = AESNI_OP(aesenclast),
2942 [0xde] = AESNI_OP(aesdec),
2943 [0xdf] = AESNI_OP(aesdeclast),
2946 static const struct SSEOpHelper_eppi sse_op_table7[256] = {
2947 [0x08] = SSE41_OP(roundps),
2948 [0x09] = SSE41_OP(roundpd),
2949 [0x0a] = SSE41_OP(roundss),
2950 [0x0b] = SSE41_OP(roundsd),
2951 [0x0c] = SSE41_OP(blendps),
2952 [0x0d] = SSE41_OP(blendpd),
2953 [0x0e] = SSE41_OP(pblendw),
2954 [0x0f] = SSSE3_OP(palignr),
2955 [0x14] = SSE41_SPECIAL, /* pextrb */
2956 [0x15] = SSE41_SPECIAL, /* pextrw */
2957 [0x16] = SSE41_SPECIAL, /* pextrd/pextrq */
2958 [0x17] = SSE41_SPECIAL, /* extractps */
2959 [0x20] = SSE41_SPECIAL, /* pinsrb */
2960 [0x21] = SSE41_SPECIAL, /* insertps */
2961 [0x22] = SSE41_SPECIAL, /* pinsrd/pinsrq */
2962 [0x40] = SSE41_OP(dpps),
2963 [0x41] = SSE41_OP(dppd),
2964 [0x42] = SSE41_OP(mpsadbw),
2965 [0x44] = PCLMULQDQ_OP(pclmulqdq),
2966 [0x60] = SSE42_OP(pcmpestrm),
2967 [0x61] = SSE42_OP(pcmpestri),
2968 [0x62] = SSE42_OP(pcmpistrm),
2969 [0x63] = SSE42_OP(pcmpistri),
2970 [0xdf] = AESNI_OP(aeskeygenassist),
2973 static void gen_sse(CPUX86State *env, DisasContext *s, int b,
2974 target_ulong pc_start, int rex_r)
2976 int b1, op1_offset, op2_offset, is_xmm, val;
2977 int modrm, mod, rm, reg;
2978 SSEFunc_0_epp sse_fn_epp;
2979 SSEFunc_0_eppi sse_fn_eppi;
2980 SSEFunc_0_ppi sse_fn_ppi;
2981 SSEFunc_0_eppt sse_fn_eppt;
2982 TCGMemOp ot;
2984 b &= 0xff;
2985 if (s->prefix & PREFIX_DATA)
2986 b1 = 1;
2987 else if (s->prefix & PREFIX_REPZ)
2988 b1 = 2;
2989 else if (s->prefix & PREFIX_REPNZ)
2990 b1 = 3;
2991 else
2992 b1 = 0;
2993 sse_fn_epp = sse_op_table1[b][b1];
2994 if (!sse_fn_epp) {
2995 goto illegal_op;
2997 if ((b <= 0x5f && b >= 0x10) || b == 0xc6 || b == 0xc2) {
2998 is_xmm = 1;
2999 } else {
3000 if (b1 == 0) {
3001 /* MMX case */
3002 is_xmm = 0;
3003 } else {
3004 is_xmm = 1;
3007 /* simple MMX/SSE operation */
3008 if (s->flags & HF_TS_MASK) {
3009 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
3010 return;
3012 if (s->flags & HF_EM_MASK) {
3013 illegal_op:
3014 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
3015 return;
3017 if (is_xmm && !(s->flags & HF_OSFXSR_MASK))
3018 if ((b != 0x38 && b != 0x3a) || (s->prefix & PREFIX_DATA))
3019 goto illegal_op;
3020 if (b == 0x0e) {
3021 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
3022 goto illegal_op;
3023 /* femms */
3024 gen_helper_emms(cpu_env);
3025 return;
3027 if (b == 0x77) {
3028 /* emms */
3029 gen_helper_emms(cpu_env);
3030 return;
3032 /* prepare MMX state (XXX: optimize by storing fptt and fptags in
3033 the static cpu state) */
3034 if (!is_xmm) {
3035 gen_helper_enter_mmx(cpu_env);
3038 modrm = cpu_ldub_code(env, s->pc++);
3039 reg = ((modrm >> 3) & 7);
3040 if (is_xmm)
3041 reg |= rex_r;
3042 mod = (modrm >> 6) & 3;
3043 if (sse_fn_epp == SSE_SPECIAL) {
3044 b |= (b1 << 8);
3045 switch(b) {
3046 case 0x0e7: /* movntq */
3047 if (mod == 3)
3048 goto illegal_op;
3049 gen_lea_modrm(env, s, modrm);
3050 gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
3051 break;
3052 case 0x1e7: /* movntdq */
3053 case 0x02b: /* movntps */
3054 case 0x12b: /* movntps */
3055 if (mod == 3)
3056 goto illegal_op;
3057 gen_lea_modrm(env, s, modrm);
3058 gen_sto_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3059 break;
3060 case 0x3f0: /* lddqu */
3061 if (mod == 3)
3062 goto illegal_op;
3063 gen_lea_modrm(env, s, modrm);
3064 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3065 break;
3066 case 0x22b: /* movntss */
3067 case 0x32b: /* movntsd */
3068 if (mod == 3)
3069 goto illegal_op;
3070 gen_lea_modrm(env, s, modrm);
3071 if (b1 & 1) {
3072 gen_stq_env_A0(s, offsetof(CPUX86State,
3073 xmm_regs[reg].ZMM_Q(0)));
3074 } else {
3075 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3076 xmm_regs[reg].ZMM_L(0)));
3077 gen_op_st_v(s, MO_32, cpu_T[0], cpu_A0);
3079 break;
3080 case 0x6e: /* movd mm, ea */
3081 #ifdef TARGET_X86_64
3082 if (s->dflag == MO_64) {
3083 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0);
3084 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,fpregs[reg].mmx));
3085 } else
3086 #endif
3088 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0);
3089 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3090 offsetof(CPUX86State,fpregs[reg].mmx));
3091 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3092 gen_helper_movl_mm_T0_mmx(cpu_ptr0, cpu_tmp2_i32);
3094 break;
3095 case 0x16e: /* movd xmm, ea */
3096 #ifdef TARGET_X86_64
3097 if (s->dflag == MO_64) {
3098 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0);
3099 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3100 offsetof(CPUX86State,xmm_regs[reg]));
3101 gen_helper_movq_mm_T0_xmm(cpu_ptr0, cpu_T[0]);
3102 } else
3103 #endif
3105 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0);
3106 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3107 offsetof(CPUX86State,xmm_regs[reg]));
3108 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3109 gen_helper_movl_mm_T0_xmm(cpu_ptr0, cpu_tmp2_i32);
3111 break;
3112 case 0x6f: /* movq mm, ea */
3113 if (mod != 3) {
3114 gen_lea_modrm(env, s, modrm);
3115 gen_ldq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
3116 } else {
3117 rm = (modrm & 7);
3118 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
3119 offsetof(CPUX86State,fpregs[rm].mmx));
3120 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
3121 offsetof(CPUX86State,fpregs[reg].mmx));
3123 break;
3124 case 0x010: /* movups */
3125 case 0x110: /* movupd */
3126 case 0x028: /* movaps */
3127 case 0x128: /* movapd */
3128 case 0x16f: /* movdqa xmm, ea */
3129 case 0x26f: /* movdqu xmm, ea */
3130 if (mod != 3) {
3131 gen_lea_modrm(env, s, modrm);
3132 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3133 } else {
3134 rm = (modrm & 7) | REX_B(s);
3135 gen_op_movo(offsetof(CPUX86State,xmm_regs[reg]),
3136 offsetof(CPUX86State,xmm_regs[rm]));
3138 break;
3139 case 0x210: /* movss xmm, ea */
3140 if (mod != 3) {
3141 gen_lea_modrm(env, s, modrm);
3142 gen_op_ld_v(s, MO_32, cpu_T[0], cpu_A0);
3143 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
3144 tcg_gen_movi_tl(cpu_T[0], 0);
3145 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1)));
3146 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)));
3147 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)));
3148 } else {
3149 rm = (modrm & 7) | REX_B(s);
3150 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)),
3151 offsetof(CPUX86State,xmm_regs[rm].ZMM_L(0)));
3153 break;
3154 case 0x310: /* movsd xmm, ea */
3155 if (mod != 3) {
3156 gen_lea_modrm(env, s, modrm);
3157 gen_ldq_env_A0(s, offsetof(CPUX86State,
3158 xmm_regs[reg].ZMM_Q(0)));
3159 tcg_gen_movi_tl(cpu_T[0], 0);
3160 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)));
3161 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)));
3162 } else {
3163 rm = (modrm & 7) | REX_B(s);
3164 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
3165 offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
3167 break;
3168 case 0x012: /* movlps */
3169 case 0x112: /* movlpd */
3170 if (mod != 3) {
3171 gen_lea_modrm(env, s, modrm);
3172 gen_ldq_env_A0(s, offsetof(CPUX86State,
3173 xmm_regs[reg].ZMM_Q(0)));
3174 } else {
3175 /* movhlps */
3176 rm = (modrm & 7) | REX_B(s);
3177 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
3178 offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(1)));
3180 break;
3181 case 0x212: /* movsldup */
3182 if (mod != 3) {
3183 gen_lea_modrm(env, s, modrm);
3184 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3185 } else {
3186 rm = (modrm & 7) | REX_B(s);
3187 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)),
3188 offsetof(CPUX86State,xmm_regs[rm].ZMM_L(0)));
3189 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)),
3190 offsetof(CPUX86State,xmm_regs[rm].ZMM_L(2)));
3192 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1)),
3193 offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
3194 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)),
3195 offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)));
3196 break;
3197 case 0x312: /* movddup */
3198 if (mod != 3) {
3199 gen_lea_modrm(env, s, modrm);
3200 gen_ldq_env_A0(s, offsetof(CPUX86State,
3201 xmm_regs[reg].ZMM_Q(0)));
3202 } else {
3203 rm = (modrm & 7) | REX_B(s);
3204 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
3205 offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
3207 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(1)),
3208 offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)));
3209 break;
3210 case 0x016: /* movhps */
3211 case 0x116: /* movhpd */
3212 if (mod != 3) {
3213 gen_lea_modrm(env, s, modrm);
3214 gen_ldq_env_A0(s, offsetof(CPUX86State,
3215 xmm_regs[reg].ZMM_Q(1)));
3216 } else {
3217 /* movlhps */
3218 rm = (modrm & 7) | REX_B(s);
3219 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(1)),
3220 offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
3222 break;
3223 case 0x216: /* movshdup */
3224 if (mod != 3) {
3225 gen_lea_modrm(env, s, modrm);
3226 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3227 } else {
3228 rm = (modrm & 7) | REX_B(s);
3229 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1)),
3230 offsetof(CPUX86State,xmm_regs[rm].ZMM_L(1)));
3231 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)),
3232 offsetof(CPUX86State,xmm_regs[rm].ZMM_L(3)));
3234 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)),
3235 offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1)));
3236 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)),
3237 offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)));
3238 break;
3239 case 0x178:
3240 case 0x378:
3242 int bit_index, field_length;
3244 if (b1 == 1 && reg != 0)
3245 goto illegal_op;
3246 field_length = cpu_ldub_code(env, s->pc++) & 0x3F;
3247 bit_index = cpu_ldub_code(env, s->pc++) & 0x3F;
3248 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3249 offsetof(CPUX86State,xmm_regs[reg]));
3250 if (b1 == 1)
3251 gen_helper_extrq_i(cpu_env, cpu_ptr0,
3252 tcg_const_i32(bit_index),
3253 tcg_const_i32(field_length));
3254 else
3255 gen_helper_insertq_i(cpu_env, cpu_ptr0,
3256 tcg_const_i32(bit_index),
3257 tcg_const_i32(field_length));
3259 break;
3260 case 0x7e: /* movd ea, mm */
3261 #ifdef TARGET_X86_64
3262 if (s->dflag == MO_64) {
3263 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3264 offsetof(CPUX86State,fpregs[reg].mmx));
3265 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1);
3266 } else
3267 #endif
3269 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3270 offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0)));
3271 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1);
3273 break;
3274 case 0x17e: /* movd ea, xmm */
3275 #ifdef TARGET_X86_64
3276 if (s->dflag == MO_64) {
3277 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3278 offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)));
3279 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1);
3280 } else
3281 #endif
3283 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3284 offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
3285 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1);
3287 break;
3288 case 0x27e: /* movq xmm, ea */
3289 if (mod != 3) {
3290 gen_lea_modrm(env, s, modrm);
3291 gen_ldq_env_A0(s, offsetof(CPUX86State,
3292 xmm_regs[reg].ZMM_Q(0)));
3293 } else {
3294 rm = (modrm & 7) | REX_B(s);
3295 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
3296 offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
3298 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(1)));
3299 break;
3300 case 0x7f: /* movq ea, mm */
3301 if (mod != 3) {
3302 gen_lea_modrm(env, s, modrm);
3303 gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
3304 } else {
3305 rm = (modrm & 7);
3306 gen_op_movq(offsetof(CPUX86State,fpregs[rm].mmx),
3307 offsetof(CPUX86State,fpregs[reg].mmx));
3309 break;
3310 case 0x011: /* movups */
3311 case 0x111: /* movupd */
3312 case 0x029: /* movaps */
3313 case 0x129: /* movapd */
3314 case 0x17f: /* movdqa ea, xmm */
3315 case 0x27f: /* movdqu ea, xmm */
3316 if (mod != 3) {
3317 gen_lea_modrm(env, s, modrm);
3318 gen_sto_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3319 } else {
3320 rm = (modrm & 7) | REX_B(s);
3321 gen_op_movo(offsetof(CPUX86State,xmm_regs[rm]),
3322 offsetof(CPUX86State,xmm_regs[reg]));
3324 break;
3325 case 0x211: /* movss ea, xmm */
3326 if (mod != 3) {
3327 gen_lea_modrm(env, s, modrm);
3328 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
3329 gen_op_st_v(s, MO_32, cpu_T[0], cpu_A0);
3330 } else {
3331 rm = (modrm & 7) | REX_B(s);
3332 gen_op_movl(offsetof(CPUX86State,xmm_regs[rm].ZMM_L(0)),
3333 offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
3335 break;
3336 case 0x311: /* movsd ea, xmm */
3337 if (mod != 3) {
3338 gen_lea_modrm(env, s, modrm);
3339 gen_stq_env_A0(s, offsetof(CPUX86State,
3340 xmm_regs[reg].ZMM_Q(0)));
3341 } else {
3342 rm = (modrm & 7) | REX_B(s);
3343 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)),
3344 offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)));
3346 break;
3347 case 0x013: /* movlps */
3348 case 0x113: /* movlpd */
3349 if (mod != 3) {
3350 gen_lea_modrm(env, s, modrm);
3351 gen_stq_env_A0(s, offsetof(CPUX86State,
3352 xmm_regs[reg].ZMM_Q(0)));
3353 } else {
3354 goto illegal_op;
3356 break;
3357 case 0x017: /* movhps */
3358 case 0x117: /* movhpd */
3359 if (mod != 3) {
3360 gen_lea_modrm(env, s, modrm);
3361 gen_stq_env_A0(s, offsetof(CPUX86State,
3362 xmm_regs[reg].ZMM_Q(1)));
3363 } else {
3364 goto illegal_op;
3366 break;
3367 case 0x71: /* shift mm, im */
3368 case 0x72:
3369 case 0x73:
3370 case 0x171: /* shift xmm, im */
3371 case 0x172:
3372 case 0x173:
3373 if (b1 >= 2) {
3374 goto illegal_op;
3376 val = cpu_ldub_code(env, s->pc++);
3377 if (is_xmm) {
3378 tcg_gen_movi_tl(cpu_T[0], val);
3379 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(0)));
3380 tcg_gen_movi_tl(cpu_T[0], 0);
3381 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(1)));
3382 op1_offset = offsetof(CPUX86State,xmm_t0);
3383 } else {
3384 tcg_gen_movi_tl(cpu_T[0], val);
3385 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(0)));
3386 tcg_gen_movi_tl(cpu_T[0], 0);
3387 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(1)));
3388 op1_offset = offsetof(CPUX86State,mmx_t0);
3390 sse_fn_epp = sse_op_table2[((b - 1) & 3) * 8 +
3391 (((modrm >> 3)) & 7)][b1];
3392 if (!sse_fn_epp) {
3393 goto illegal_op;
3395 if (is_xmm) {
3396 rm = (modrm & 7) | REX_B(s);
3397 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3398 } else {
3399 rm = (modrm & 7);
3400 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3402 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3403 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op1_offset);
3404 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
3405 break;
3406 case 0x050: /* movmskps */
3407 rm = (modrm & 7) | REX_B(s);
3408 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3409 offsetof(CPUX86State,xmm_regs[rm]));
3410 gen_helper_movmskps(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3411 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
3412 break;
3413 case 0x150: /* movmskpd */
3414 rm = (modrm & 7) | REX_B(s);
3415 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3416 offsetof(CPUX86State,xmm_regs[rm]));
3417 gen_helper_movmskpd(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3418 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
3419 break;
3420 case 0x02a: /* cvtpi2ps */
3421 case 0x12a: /* cvtpi2pd */
3422 gen_helper_enter_mmx(cpu_env);
3423 if (mod != 3) {
3424 gen_lea_modrm(env, s, modrm);
3425 op2_offset = offsetof(CPUX86State,mmx_t0);
3426 gen_ldq_env_A0(s, op2_offset);
3427 } else {
3428 rm = (modrm & 7);
3429 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3431 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3432 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3433 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3434 switch(b >> 8) {
3435 case 0x0:
3436 gen_helper_cvtpi2ps(cpu_env, cpu_ptr0, cpu_ptr1);
3437 break;
3438 default:
3439 case 0x1:
3440 gen_helper_cvtpi2pd(cpu_env, cpu_ptr0, cpu_ptr1);
3441 break;
3443 break;
3444 case 0x22a: /* cvtsi2ss */
3445 case 0x32a: /* cvtsi2sd */
3446 ot = mo_64_32(s->dflag);
3447 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3448 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3449 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3450 if (ot == MO_32) {
3451 SSEFunc_0_epi sse_fn_epi = sse_op_table3ai[(b >> 8) & 1];
3452 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3453 sse_fn_epi(cpu_env, cpu_ptr0, cpu_tmp2_i32);
3454 } else {
3455 #ifdef TARGET_X86_64
3456 SSEFunc_0_epl sse_fn_epl = sse_op_table3aq[(b >> 8) & 1];
3457 sse_fn_epl(cpu_env, cpu_ptr0, cpu_T[0]);
3458 #else
3459 goto illegal_op;
3460 #endif
3462 break;
3463 case 0x02c: /* cvttps2pi */
3464 case 0x12c: /* cvttpd2pi */
3465 case 0x02d: /* cvtps2pi */
3466 case 0x12d: /* cvtpd2pi */
3467 gen_helper_enter_mmx(cpu_env);
3468 if (mod != 3) {
3469 gen_lea_modrm(env, s, modrm);
3470 op2_offset = offsetof(CPUX86State,xmm_t0);
3471 gen_ldo_env_A0(s, op2_offset);
3472 } else {
3473 rm = (modrm & 7) | REX_B(s);
3474 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3476 op1_offset = offsetof(CPUX86State,fpregs[reg & 7].mmx);
3477 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3478 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3479 switch(b) {
3480 case 0x02c:
3481 gen_helper_cvttps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3482 break;
3483 case 0x12c:
3484 gen_helper_cvttpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3485 break;
3486 case 0x02d:
3487 gen_helper_cvtps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3488 break;
3489 case 0x12d:
3490 gen_helper_cvtpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3491 break;
3493 break;
3494 case 0x22c: /* cvttss2si */
3495 case 0x32c: /* cvttsd2si */
3496 case 0x22d: /* cvtss2si */
3497 case 0x32d: /* cvtsd2si */
3498 ot = mo_64_32(s->dflag);
3499 if (mod != 3) {
3500 gen_lea_modrm(env, s, modrm);
3501 if ((b >> 8) & 1) {
3502 gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.ZMM_Q(0)));
3503 } else {
3504 gen_op_ld_v(s, MO_32, cpu_T[0], cpu_A0);
3505 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(0)));
3507 op2_offset = offsetof(CPUX86State,xmm_t0);
3508 } else {
3509 rm = (modrm & 7) | REX_B(s);
3510 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3512 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3513 if (ot == MO_32) {
3514 SSEFunc_i_ep sse_fn_i_ep =
3515 sse_op_table3bi[((b >> 7) & 2) | (b & 1)];
3516 sse_fn_i_ep(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3517 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3518 } else {
3519 #ifdef TARGET_X86_64
3520 SSEFunc_l_ep sse_fn_l_ep =
3521 sse_op_table3bq[((b >> 7) & 2) | (b & 1)];
3522 sse_fn_l_ep(cpu_T[0], cpu_env, cpu_ptr0);
3523 #else
3524 goto illegal_op;
3525 #endif
3527 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
3528 break;
3529 case 0xc4: /* pinsrw */
3530 case 0x1c4:
3531 s->rip_offset = 1;
3532 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
3533 val = cpu_ldub_code(env, s->pc++);
3534 if (b1) {
3535 val &= 7;
3536 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3537 offsetof(CPUX86State,xmm_regs[reg].ZMM_W(val)));
3538 } else {
3539 val &= 3;
3540 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3541 offsetof(CPUX86State,fpregs[reg].mmx.MMX_W(val)));
3543 break;
3544 case 0xc5: /* pextrw */
3545 case 0x1c5:
3546 if (mod != 3)
3547 goto illegal_op;
3548 ot = mo_64_32(s->dflag);
3549 val = cpu_ldub_code(env, s->pc++);
3550 if (b1) {
3551 val &= 7;
3552 rm = (modrm & 7) | REX_B(s);
3553 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3554 offsetof(CPUX86State,xmm_regs[rm].ZMM_W(val)));
3555 } else {
3556 val &= 3;
3557 rm = (modrm & 7);
3558 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3559 offsetof(CPUX86State,fpregs[rm].mmx.MMX_W(val)));
3561 reg = ((modrm >> 3) & 7) | rex_r;
3562 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
3563 break;
3564 case 0x1d6: /* movq ea, xmm */
3565 if (mod != 3) {
3566 gen_lea_modrm(env, s, modrm);
3567 gen_stq_env_A0(s, offsetof(CPUX86State,
3568 xmm_regs[reg].ZMM_Q(0)));
3569 } else {
3570 rm = (modrm & 7) | REX_B(s);
3571 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)),
3572 offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)));
3573 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(1)));
3575 break;
3576 case 0x2d6: /* movq2dq */
3577 gen_helper_enter_mmx(cpu_env);
3578 rm = (modrm & 7);
3579 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
3580 offsetof(CPUX86State,fpregs[rm].mmx));
3581 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(1)));
3582 break;
3583 case 0x3d6: /* movdq2q */
3584 gen_helper_enter_mmx(cpu_env);
3585 rm = (modrm & 7) | REX_B(s);
3586 gen_op_movq(offsetof(CPUX86State,fpregs[reg & 7].mmx),
3587 offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
3588 break;
3589 case 0xd7: /* pmovmskb */
3590 case 0x1d7:
3591 if (mod != 3)
3592 goto illegal_op;
3593 if (b1) {
3594 rm = (modrm & 7) | REX_B(s);
3595 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[rm]));
3596 gen_helper_pmovmskb_xmm(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3597 } else {
3598 rm = (modrm & 7);
3599 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,fpregs[rm].mmx));
3600 gen_helper_pmovmskb_mmx(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3602 reg = ((modrm >> 3) & 7) | rex_r;
3603 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
3604 break;
3606 case 0x138:
3607 case 0x038:
3608 b = modrm;
3609 if ((b & 0xf0) == 0xf0) {
3610 goto do_0f_38_fx;
3612 modrm = cpu_ldub_code(env, s->pc++);
3613 rm = modrm & 7;
3614 reg = ((modrm >> 3) & 7) | rex_r;
3615 mod = (modrm >> 6) & 3;
3616 if (b1 >= 2) {
3617 goto illegal_op;
3620 sse_fn_epp = sse_op_table6[b].op[b1];
3621 if (!sse_fn_epp) {
3622 goto illegal_op;
3624 if (!(s->cpuid_ext_features & sse_op_table6[b].ext_mask))
3625 goto illegal_op;
3627 if (b1) {
3628 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3629 if (mod == 3) {
3630 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
3631 } else {
3632 op2_offset = offsetof(CPUX86State,xmm_t0);
3633 gen_lea_modrm(env, s, modrm);
3634 switch (b) {
3635 case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
3636 case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
3637 case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
3638 gen_ldq_env_A0(s, op2_offset +
3639 offsetof(ZMMReg, ZMM_Q(0)));
3640 break;
3641 case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
3642 case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
3643 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
3644 s->mem_index, MO_LEUL);
3645 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, op2_offset +
3646 offsetof(ZMMReg, ZMM_L(0)));
3647 break;
3648 case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
3649 tcg_gen_qemu_ld_tl(cpu_tmp0, cpu_A0,
3650 s->mem_index, MO_LEUW);
3651 tcg_gen_st16_tl(cpu_tmp0, cpu_env, op2_offset +
3652 offsetof(ZMMReg, ZMM_W(0)));
3653 break;
3654 case 0x2a: /* movntqda */
3655 gen_ldo_env_A0(s, op1_offset);
3656 return;
3657 default:
3658 gen_ldo_env_A0(s, op2_offset);
3661 } else {
3662 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
3663 if (mod == 3) {
3664 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3665 } else {
3666 op2_offset = offsetof(CPUX86State,mmx_t0);
3667 gen_lea_modrm(env, s, modrm);
3668 gen_ldq_env_A0(s, op2_offset);
3671 if (sse_fn_epp == SSE_SPECIAL) {
3672 goto illegal_op;
3675 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3676 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3677 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
3679 if (b == 0x17) {
3680 set_cc_op(s, CC_OP_EFLAGS);
3682 break;
3684 case 0x238:
3685 case 0x338:
3686 do_0f_38_fx:
3687 /* Various integer extensions at 0f 38 f[0-f]. */
3688 b = modrm | (b1 << 8);
3689 modrm = cpu_ldub_code(env, s->pc++);
3690 reg = ((modrm >> 3) & 7) | rex_r;
3692 switch (b) {
3693 case 0x3f0: /* crc32 Gd,Eb */
3694 case 0x3f1: /* crc32 Gd,Ey */
3695 do_crc32:
3696 if (!(s->cpuid_ext_features & CPUID_EXT_SSE42)) {
3697 goto illegal_op;
3699 if ((b & 0xff) == 0xf0) {
3700 ot = MO_8;
3701 } else if (s->dflag != MO_64) {
3702 ot = (s->prefix & PREFIX_DATA ? MO_16 : MO_32);
3703 } else {
3704 ot = MO_64;
3707 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[reg]);
3708 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3709 gen_helper_crc32(cpu_T[0], cpu_tmp2_i32,
3710 cpu_T[0], tcg_const_i32(8 << ot));
3712 ot = mo_64_32(s->dflag);
3713 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
3714 break;
3716 case 0x1f0: /* crc32 or movbe */
3717 case 0x1f1:
3718 /* For these insns, the f3 prefix is supposed to have priority
3719 over the 66 prefix, but that's not what we implement above
3720 setting b1. */
3721 if (s->prefix & PREFIX_REPNZ) {
3722 goto do_crc32;
3724 /* FALLTHRU */
3725 case 0x0f0: /* movbe Gy,My */
3726 case 0x0f1: /* movbe My,Gy */
3727 if (!(s->cpuid_ext_features & CPUID_EXT_MOVBE)) {
3728 goto illegal_op;
3730 if (s->dflag != MO_64) {
3731 ot = (s->prefix & PREFIX_DATA ? MO_16 : MO_32);
3732 } else {
3733 ot = MO_64;
3736 gen_lea_modrm(env, s, modrm);
3737 if ((b & 1) == 0) {
3738 tcg_gen_qemu_ld_tl(cpu_T[0], cpu_A0,
3739 s->mem_index, ot | MO_BE);
3740 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
3741 } else {
3742 tcg_gen_qemu_st_tl(cpu_regs[reg], cpu_A0,
3743 s->mem_index, ot | MO_BE);
3745 break;
3747 case 0x0f2: /* andn Gy, By, Ey */
3748 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
3749 || !(s->prefix & PREFIX_VEX)
3750 || s->vex_l != 0) {
3751 goto illegal_op;
3753 ot = mo_64_32(s->dflag);
3754 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3755 tcg_gen_andc_tl(cpu_T[0], cpu_regs[s->vex_v], cpu_T[0]);
3756 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
3757 gen_op_update1_cc();
3758 set_cc_op(s, CC_OP_LOGICB + ot);
3759 break;
3761 case 0x0f7: /* bextr Gy, Ey, By */
3762 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
3763 || !(s->prefix & PREFIX_VEX)
3764 || s->vex_l != 0) {
3765 goto illegal_op;
3767 ot = mo_64_32(s->dflag);
3769 TCGv bound, zero;
3771 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3772 /* Extract START, and shift the operand.
3773 Shifts larger than operand size get zeros. */
3774 tcg_gen_ext8u_tl(cpu_A0, cpu_regs[s->vex_v]);
3775 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_A0);
3777 bound = tcg_const_tl(ot == MO_64 ? 63 : 31);
3778 zero = tcg_const_tl(0);
3779 tcg_gen_movcond_tl(TCG_COND_LEU, cpu_T[0], cpu_A0, bound,
3780 cpu_T[0], zero);
3781 tcg_temp_free(zero);
3783 /* Extract the LEN into a mask. Lengths larger than
3784 operand size get all ones. */
3785 tcg_gen_shri_tl(cpu_A0, cpu_regs[s->vex_v], 8);
3786 tcg_gen_ext8u_tl(cpu_A0, cpu_A0);
3787 tcg_gen_movcond_tl(TCG_COND_LEU, cpu_A0, cpu_A0, bound,
3788 cpu_A0, bound);
3789 tcg_temp_free(bound);
3790 tcg_gen_movi_tl(cpu_T[1], 1);
3791 tcg_gen_shl_tl(cpu_T[1], cpu_T[1], cpu_A0);
3792 tcg_gen_subi_tl(cpu_T[1], cpu_T[1], 1);
3793 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
3795 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
3796 gen_op_update1_cc();
3797 set_cc_op(s, CC_OP_LOGICB + ot);
3799 break;
3801 case 0x0f5: /* bzhi Gy, Ey, By */
3802 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3803 || !(s->prefix & PREFIX_VEX)
3804 || s->vex_l != 0) {
3805 goto illegal_op;
3807 ot = mo_64_32(s->dflag);
3808 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3809 tcg_gen_ext8u_tl(cpu_T[1], cpu_regs[s->vex_v]);
3811 TCGv bound = tcg_const_tl(ot == MO_64 ? 63 : 31);
3812 /* Note that since we're using BMILG (in order to get O
3813 cleared) we need to store the inverse into C. */
3814 tcg_gen_setcond_tl(TCG_COND_LT, cpu_cc_src,
3815 cpu_T[1], bound);
3816 tcg_gen_movcond_tl(TCG_COND_GT, cpu_T[1], cpu_T[1],
3817 bound, bound, cpu_T[1]);
3818 tcg_temp_free(bound);
3820 tcg_gen_movi_tl(cpu_A0, -1);
3821 tcg_gen_shl_tl(cpu_A0, cpu_A0, cpu_T[1]);
3822 tcg_gen_andc_tl(cpu_T[0], cpu_T[0], cpu_A0);
3823 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
3824 gen_op_update1_cc();
3825 set_cc_op(s, CC_OP_BMILGB + ot);
3826 break;
3828 case 0x3f6: /* mulx By, Gy, rdx, Ey */
3829 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3830 || !(s->prefix & PREFIX_VEX)
3831 || s->vex_l != 0) {
3832 goto illegal_op;
3834 ot = mo_64_32(s->dflag);
3835 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3836 switch (ot) {
3837 default:
3838 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3839 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EDX]);
3840 tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
3841 cpu_tmp2_i32, cpu_tmp3_i32);
3842 tcg_gen_extu_i32_tl(cpu_regs[s->vex_v], cpu_tmp2_i32);
3843 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp3_i32);
3844 break;
3845 #ifdef TARGET_X86_64
3846 case MO_64:
3847 tcg_gen_mulu2_i64(cpu_T[0], cpu_T[1],
3848 cpu_T[0], cpu_regs[R_EDX]);
3849 tcg_gen_mov_i64(cpu_regs[s->vex_v], cpu_T[0]);
3850 tcg_gen_mov_i64(cpu_regs[reg], cpu_T[1]);
3851 break;
3852 #endif
3854 break;
3856 case 0x3f5: /* pdep Gy, By, Ey */
3857 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3858 || !(s->prefix & PREFIX_VEX)
3859 || s->vex_l != 0) {
3860 goto illegal_op;
3862 ot = mo_64_32(s->dflag);
3863 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3864 /* Note that by zero-extending the mask operand, we
3865 automatically handle zero-extending the result. */
3866 if (ot == MO_64) {
3867 tcg_gen_mov_tl(cpu_T[1], cpu_regs[s->vex_v]);
3868 } else {
3869 tcg_gen_ext32u_tl(cpu_T[1], cpu_regs[s->vex_v]);
3871 gen_helper_pdep(cpu_regs[reg], cpu_T[0], cpu_T[1]);
3872 break;
3874 case 0x2f5: /* pext Gy, By, Ey */
3875 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3876 || !(s->prefix & PREFIX_VEX)
3877 || s->vex_l != 0) {
3878 goto illegal_op;
3880 ot = mo_64_32(s->dflag);
3881 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3882 /* Note that by zero-extending the mask operand, we
3883 automatically handle zero-extending the result. */
3884 if (ot == MO_64) {
3885 tcg_gen_mov_tl(cpu_T[1], cpu_regs[s->vex_v]);
3886 } else {
3887 tcg_gen_ext32u_tl(cpu_T[1], cpu_regs[s->vex_v]);
3889 gen_helper_pext(cpu_regs[reg], cpu_T[0], cpu_T[1]);
3890 break;
3892 case 0x1f6: /* adcx Gy, Ey */
3893 case 0x2f6: /* adox Gy, Ey */
3894 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_ADX)) {
3895 goto illegal_op;
3896 } else {
3897 TCGv carry_in, carry_out, zero;
3898 int end_op;
3900 ot = mo_64_32(s->dflag);
3901 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3903 /* Re-use the carry-out from a previous round. */
3904 TCGV_UNUSED(carry_in);
3905 carry_out = (b == 0x1f6 ? cpu_cc_dst : cpu_cc_src2);
3906 switch (s->cc_op) {
3907 case CC_OP_ADCX:
3908 if (b == 0x1f6) {
3909 carry_in = cpu_cc_dst;
3910 end_op = CC_OP_ADCX;
3911 } else {
3912 end_op = CC_OP_ADCOX;
3914 break;
3915 case CC_OP_ADOX:
3916 if (b == 0x1f6) {
3917 end_op = CC_OP_ADCOX;
3918 } else {
3919 carry_in = cpu_cc_src2;
3920 end_op = CC_OP_ADOX;
3922 break;
3923 case CC_OP_ADCOX:
3924 end_op = CC_OP_ADCOX;
3925 carry_in = carry_out;
3926 break;
3927 default:
3928 end_op = (b == 0x1f6 ? CC_OP_ADCX : CC_OP_ADOX);
3929 break;
3931 /* If we can't reuse carry-out, get it out of EFLAGS. */
3932 if (TCGV_IS_UNUSED(carry_in)) {
3933 if (s->cc_op != CC_OP_ADCX && s->cc_op != CC_OP_ADOX) {
3934 gen_compute_eflags(s);
3936 carry_in = cpu_tmp0;
3937 tcg_gen_shri_tl(carry_in, cpu_cc_src,
3938 ctz32(b == 0x1f6 ? CC_C : CC_O));
3939 tcg_gen_andi_tl(carry_in, carry_in, 1);
3942 switch (ot) {
3943 #ifdef TARGET_X86_64
3944 case MO_32:
3945 /* If we know TL is 64-bit, and we want a 32-bit
3946 result, just do everything in 64-bit arithmetic. */
3947 tcg_gen_ext32u_i64(cpu_regs[reg], cpu_regs[reg]);
3948 tcg_gen_ext32u_i64(cpu_T[0], cpu_T[0]);
3949 tcg_gen_add_i64(cpu_T[0], cpu_T[0], cpu_regs[reg]);
3950 tcg_gen_add_i64(cpu_T[0], cpu_T[0], carry_in);
3951 tcg_gen_ext32u_i64(cpu_regs[reg], cpu_T[0]);
3952 tcg_gen_shri_i64(carry_out, cpu_T[0], 32);
3953 break;
3954 #endif
3955 default:
3956 /* Otherwise compute the carry-out in two steps. */
3957 zero = tcg_const_tl(0);
3958 tcg_gen_add2_tl(cpu_T[0], carry_out,
3959 cpu_T[0], zero,
3960 carry_in, zero);
3961 tcg_gen_add2_tl(cpu_regs[reg], carry_out,
3962 cpu_regs[reg], carry_out,
3963 cpu_T[0], zero);
3964 tcg_temp_free(zero);
3965 break;
3967 set_cc_op(s, end_op);
3969 break;
3971 case 0x1f7: /* shlx Gy, Ey, By */
3972 case 0x2f7: /* sarx Gy, Ey, By */
3973 case 0x3f7: /* shrx Gy, Ey, By */
3974 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3975 || !(s->prefix & PREFIX_VEX)
3976 || s->vex_l != 0) {
3977 goto illegal_op;
3979 ot = mo_64_32(s->dflag);
3980 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3981 if (ot == MO_64) {
3982 tcg_gen_andi_tl(cpu_T[1], cpu_regs[s->vex_v], 63);
3983 } else {
3984 tcg_gen_andi_tl(cpu_T[1], cpu_regs[s->vex_v], 31);
3986 if (b == 0x1f7) {
3987 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
3988 } else if (b == 0x2f7) {
3989 if (ot != MO_64) {
3990 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
3992 tcg_gen_sar_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
3993 } else {
3994 if (ot != MO_64) {
3995 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
3997 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
3999 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
4000 break;
4002 case 0x0f3:
4003 case 0x1f3:
4004 case 0x2f3:
4005 case 0x3f3: /* Group 17 */
4006 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
4007 || !(s->prefix & PREFIX_VEX)
4008 || s->vex_l != 0) {
4009 goto illegal_op;
4011 ot = mo_64_32(s->dflag);
4012 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4014 switch (reg & 7) {
4015 case 1: /* blsr By,Ey */
4016 tcg_gen_neg_tl(cpu_T[1], cpu_T[0]);
4017 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4018 gen_op_mov_reg_v(ot, s->vex_v, cpu_T[0]);
4019 gen_op_update2_cc();
4020 set_cc_op(s, CC_OP_BMILGB + ot);
4021 break;
4023 case 2: /* blsmsk By,Ey */
4024 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4025 tcg_gen_subi_tl(cpu_T[0], cpu_T[0], 1);
4026 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_cc_src);
4027 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4028 set_cc_op(s, CC_OP_BMILGB + ot);
4029 break;
4031 case 3: /* blsi By, Ey */
4032 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4033 tcg_gen_subi_tl(cpu_T[0], cpu_T[0], 1);
4034 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_cc_src);
4035 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4036 set_cc_op(s, CC_OP_BMILGB + ot);
4037 break;
4039 default:
4040 goto illegal_op;
4042 break;
4044 default:
4045 goto illegal_op;
4047 break;
4049 case 0x03a:
4050 case 0x13a:
4051 b = modrm;
4052 modrm = cpu_ldub_code(env, s->pc++);
4053 rm = modrm & 7;
4054 reg = ((modrm >> 3) & 7) | rex_r;
4055 mod = (modrm >> 6) & 3;
4056 if (b1 >= 2) {
4057 goto illegal_op;
4060 sse_fn_eppi = sse_op_table7[b].op[b1];
4061 if (!sse_fn_eppi) {
4062 goto illegal_op;
4064 if (!(s->cpuid_ext_features & sse_op_table7[b].ext_mask))
4065 goto illegal_op;
4067 if (sse_fn_eppi == SSE_SPECIAL) {
4068 ot = mo_64_32(s->dflag);
4069 rm = (modrm & 7) | REX_B(s);
4070 if (mod != 3)
4071 gen_lea_modrm(env, s, modrm);
4072 reg = ((modrm >> 3) & 7) | rex_r;
4073 val = cpu_ldub_code(env, s->pc++);
4074 switch (b) {
4075 case 0x14: /* pextrb */
4076 tcg_gen_ld8u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4077 xmm_regs[reg].ZMM_B(val & 15)));
4078 if (mod == 3) {
4079 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
4080 } else {
4081 tcg_gen_qemu_st_tl(cpu_T[0], cpu_A0,
4082 s->mem_index, MO_UB);
4084 break;
4085 case 0x15: /* pextrw */
4086 tcg_gen_ld16u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4087 xmm_regs[reg].ZMM_W(val & 7)));
4088 if (mod == 3) {
4089 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
4090 } else {
4091 tcg_gen_qemu_st_tl(cpu_T[0], cpu_A0,
4092 s->mem_index, MO_LEUW);
4094 break;
4095 case 0x16:
4096 if (ot == MO_32) { /* pextrd */
4097 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
4098 offsetof(CPUX86State,
4099 xmm_regs[reg].ZMM_L(val & 3)));
4100 if (mod == 3) {
4101 tcg_gen_extu_i32_tl(cpu_regs[rm], cpu_tmp2_i32);
4102 } else {
4103 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
4104 s->mem_index, MO_LEUL);
4106 } else { /* pextrq */
4107 #ifdef TARGET_X86_64
4108 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
4109 offsetof(CPUX86State,
4110 xmm_regs[reg].ZMM_Q(val & 1)));
4111 if (mod == 3) {
4112 tcg_gen_mov_i64(cpu_regs[rm], cpu_tmp1_i64);
4113 } else {
4114 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
4115 s->mem_index, MO_LEQ);
4117 #else
4118 goto illegal_op;
4119 #endif
4121 break;
4122 case 0x17: /* extractps */
4123 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4124 xmm_regs[reg].ZMM_L(val & 3)));
4125 if (mod == 3) {
4126 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
4127 } else {
4128 tcg_gen_qemu_st_tl(cpu_T[0], cpu_A0,
4129 s->mem_index, MO_LEUL);
4131 break;
4132 case 0x20: /* pinsrb */
4133 if (mod == 3) {
4134 gen_op_mov_v_reg(MO_32, cpu_T[0], rm);
4135 } else {
4136 tcg_gen_qemu_ld_tl(cpu_T[0], cpu_A0,
4137 s->mem_index, MO_UB);
4139 tcg_gen_st8_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4140 xmm_regs[reg].ZMM_B(val & 15)));
4141 break;
4142 case 0x21: /* insertps */
4143 if (mod == 3) {
4144 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
4145 offsetof(CPUX86State,xmm_regs[rm]
4146 .ZMM_L((val >> 6) & 3)));
4147 } else {
4148 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
4149 s->mem_index, MO_LEUL);
4151 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
4152 offsetof(CPUX86State,xmm_regs[reg]
4153 .ZMM_L((val >> 4) & 3)));
4154 if ((val >> 0) & 1)
4155 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4156 cpu_env, offsetof(CPUX86State,
4157 xmm_regs[reg].ZMM_L(0)));
4158 if ((val >> 1) & 1)
4159 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4160 cpu_env, offsetof(CPUX86State,
4161 xmm_regs[reg].ZMM_L(1)));
4162 if ((val >> 2) & 1)
4163 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4164 cpu_env, offsetof(CPUX86State,
4165 xmm_regs[reg].ZMM_L(2)));
4166 if ((val >> 3) & 1)
4167 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4168 cpu_env, offsetof(CPUX86State,
4169 xmm_regs[reg].ZMM_L(3)));
4170 break;
4171 case 0x22:
4172 if (ot == MO_32) { /* pinsrd */
4173 if (mod == 3) {
4174 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[rm]);
4175 } else {
4176 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
4177 s->mem_index, MO_LEUL);
4179 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
4180 offsetof(CPUX86State,
4181 xmm_regs[reg].ZMM_L(val & 3)));
4182 } else { /* pinsrq */
4183 #ifdef TARGET_X86_64
4184 if (mod == 3) {
4185 gen_op_mov_v_reg(ot, cpu_tmp1_i64, rm);
4186 } else {
4187 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
4188 s->mem_index, MO_LEQ);
4190 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
4191 offsetof(CPUX86State,
4192 xmm_regs[reg].ZMM_Q(val & 1)));
4193 #else
4194 goto illegal_op;
4195 #endif
4197 break;
4199 return;
4202 if (b1) {
4203 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
4204 if (mod == 3) {
4205 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
4206 } else {
4207 op2_offset = offsetof(CPUX86State,xmm_t0);
4208 gen_lea_modrm(env, s, modrm);
4209 gen_ldo_env_A0(s, op2_offset);
4211 } else {
4212 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4213 if (mod == 3) {
4214 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4215 } else {
4216 op2_offset = offsetof(CPUX86State,mmx_t0);
4217 gen_lea_modrm(env, s, modrm);
4218 gen_ldq_env_A0(s, op2_offset);
4221 val = cpu_ldub_code(env, s->pc++);
4223 if ((b & 0xfc) == 0x60) { /* pcmpXstrX */
4224 set_cc_op(s, CC_OP_EFLAGS);
4226 if (s->dflag == MO_64) {
4227 /* The helper must use entire 64-bit gp registers */
4228 val |= 1 << 8;
4232 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4233 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4234 sse_fn_eppi(cpu_env, cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
4235 break;
4237 case 0x33a:
4238 /* Various integer extensions at 0f 3a f[0-f]. */
4239 b = modrm | (b1 << 8);
4240 modrm = cpu_ldub_code(env, s->pc++);
4241 reg = ((modrm >> 3) & 7) | rex_r;
4243 switch (b) {
4244 case 0x3f0: /* rorx Gy,Ey, Ib */
4245 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
4246 || !(s->prefix & PREFIX_VEX)
4247 || s->vex_l != 0) {
4248 goto illegal_op;
4250 ot = mo_64_32(s->dflag);
4251 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4252 b = cpu_ldub_code(env, s->pc++);
4253 if (ot == MO_64) {
4254 tcg_gen_rotri_tl(cpu_T[0], cpu_T[0], b & 63);
4255 } else {
4256 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4257 tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, b & 31);
4258 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
4260 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
4261 break;
4263 default:
4264 goto illegal_op;
4266 break;
4268 default:
4269 goto illegal_op;
4271 } else {
4272 /* generic MMX or SSE operation */
4273 switch(b) {
4274 case 0x70: /* pshufx insn */
4275 case 0xc6: /* pshufx insn */
4276 case 0xc2: /* compare insns */
4277 s->rip_offset = 1;
4278 break;
4279 default:
4280 break;
4282 if (is_xmm) {
4283 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
4284 if (mod != 3) {
4285 int sz = 4;
4287 gen_lea_modrm(env, s, modrm);
4288 op2_offset = offsetof(CPUX86State,xmm_t0);
4290 switch (b) {
4291 case 0x50 ... 0x5a:
4292 case 0x5c ... 0x5f:
4293 case 0xc2:
4294 /* Most sse scalar operations. */
4295 if (b1 == 2) {
4296 sz = 2;
4297 } else if (b1 == 3) {
4298 sz = 3;
4300 break;
4302 case 0x2e: /* ucomis[sd] */
4303 case 0x2f: /* comis[sd] */
4304 if (b1 == 0) {
4305 sz = 2;
4306 } else {
4307 sz = 3;
4309 break;
4312 switch (sz) {
4313 case 2:
4314 /* 32 bit access */
4315 gen_op_ld_v(s, MO_32, cpu_T[0], cpu_A0);
4316 tcg_gen_st32_tl(cpu_T[0], cpu_env,
4317 offsetof(CPUX86State,xmm_t0.ZMM_L(0)));
4318 break;
4319 case 3:
4320 /* 64 bit access */
4321 gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.ZMM_D(0)));
4322 break;
4323 default:
4324 /* 128 bit access */
4325 gen_ldo_env_A0(s, op2_offset);
4326 break;
4328 } else {
4329 rm = (modrm & 7) | REX_B(s);
4330 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
4332 } else {
4333 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4334 if (mod != 3) {
4335 gen_lea_modrm(env, s, modrm);
4336 op2_offset = offsetof(CPUX86State,mmx_t0);
4337 gen_ldq_env_A0(s, op2_offset);
4338 } else {
4339 rm = (modrm & 7);
4340 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4343 switch(b) {
4344 case 0x0f: /* 3DNow! data insns */
4345 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
4346 goto illegal_op;
4347 val = cpu_ldub_code(env, s->pc++);
4348 sse_fn_epp = sse_op_table5[val];
4349 if (!sse_fn_epp) {
4350 goto illegal_op;
4352 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4353 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4354 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
4355 break;
4356 case 0x70: /* pshufx insn */
4357 case 0xc6: /* pshufx insn */
4358 val = cpu_ldub_code(env, s->pc++);
4359 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4360 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4361 /* XXX: introduce a new table? */
4362 sse_fn_ppi = (SSEFunc_0_ppi)sse_fn_epp;
4363 sse_fn_ppi(cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
4364 break;
4365 case 0xc2:
4366 /* compare insns */
4367 val = cpu_ldub_code(env, s->pc++);
4368 if (val >= 8)
4369 goto illegal_op;
4370 sse_fn_epp = sse_op_table4[val][b1];
4372 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4373 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4374 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
4375 break;
4376 case 0xf7:
4377 /* maskmov : we must prepare A0 */
4378 if (mod != 3)
4379 goto illegal_op;
4380 tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EDI]);
4381 gen_extu(s->aflag, cpu_A0);
4382 gen_add_A0_ds_seg(s);
4384 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4385 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4386 /* XXX: introduce a new table? */
4387 sse_fn_eppt = (SSEFunc_0_eppt)sse_fn_epp;
4388 sse_fn_eppt(cpu_env, cpu_ptr0, cpu_ptr1, cpu_A0);
4389 break;
4390 default:
4391 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4392 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4393 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
4394 break;
4396 if (b == 0x2e || b == 0x2f) {
4397 set_cc_op(s, CC_OP_EFLAGS);
4402 /* convert one instruction. s->is_jmp is set if the translation must
4403 be stopped. Return the next pc value */
4404 static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
4405 target_ulong pc_start)
4407 int b, prefixes;
4408 int shift;
4409 TCGMemOp ot, aflag, dflag;
4410 int modrm, reg, rm, mod, op, opreg, val;
4411 target_ulong next_eip, tval;
4412 int rex_w, rex_r;
4414 s->pc = pc_start;
4415 prefixes = 0;
4416 s->override = -1;
4417 rex_w = -1;
4418 rex_r = 0;
4419 #ifdef TARGET_X86_64
4420 s->rex_x = 0;
4421 s->rex_b = 0;
4422 x86_64_hregs = 0;
4423 #endif
4424 s->rip_offset = 0; /* for relative ip address */
4425 s->vex_l = 0;
4426 s->vex_v = 0;
4427 next_byte:
4428 b = cpu_ldub_code(env, s->pc);
4429 s->pc++;
4430 /* Collect prefixes. */
4431 switch (b) {
4432 case 0xf3:
4433 prefixes |= PREFIX_REPZ;
4434 goto next_byte;
4435 case 0xf2:
4436 prefixes |= PREFIX_REPNZ;
4437 goto next_byte;
4438 case 0xf0:
4439 prefixes |= PREFIX_LOCK;
4440 goto next_byte;
4441 case 0x2e:
4442 s->override = R_CS;
4443 goto next_byte;
4444 case 0x36:
4445 s->override = R_SS;
4446 goto next_byte;
4447 case 0x3e:
4448 s->override = R_DS;
4449 goto next_byte;
4450 case 0x26:
4451 s->override = R_ES;
4452 goto next_byte;
4453 case 0x64:
4454 s->override = R_FS;
4455 goto next_byte;
4456 case 0x65:
4457 s->override = R_GS;
4458 goto next_byte;
4459 case 0x66:
4460 prefixes |= PREFIX_DATA;
4461 goto next_byte;
4462 case 0x67:
4463 prefixes |= PREFIX_ADR;
4464 goto next_byte;
4465 #ifdef TARGET_X86_64
4466 case 0x40 ... 0x4f:
4467 if (CODE64(s)) {
4468 /* REX prefix */
4469 rex_w = (b >> 3) & 1;
4470 rex_r = (b & 0x4) << 1;
4471 s->rex_x = (b & 0x2) << 2;
4472 REX_B(s) = (b & 0x1) << 3;
4473 x86_64_hregs = 1; /* select uniform byte register addressing */
4474 goto next_byte;
4476 break;
4477 #endif
4478 case 0xc5: /* 2-byte VEX */
4479 case 0xc4: /* 3-byte VEX */
4480 /* VEX prefixes cannot be used except in 32-bit mode.
4481 Otherwise the instruction is LES or LDS. */
4482 if (s->code32 && !s->vm86) {
4483 static const int pp_prefix[4] = {
4484 0, PREFIX_DATA, PREFIX_REPZ, PREFIX_REPNZ
4486 int vex3, vex2 = cpu_ldub_code(env, s->pc);
4488 if (!CODE64(s) && (vex2 & 0xc0) != 0xc0) {
4489 /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
4490 otherwise the instruction is LES or LDS. */
4491 break;
4493 s->pc++;
4495 /* 4.1.1-4.1.3: No preceding lock, 66, f2, f3, or rex prefixes. */
4496 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ
4497 | PREFIX_LOCK | PREFIX_DATA)) {
4498 goto illegal_op;
4500 #ifdef TARGET_X86_64
4501 if (x86_64_hregs) {
4502 goto illegal_op;
4504 #endif
4505 rex_r = (~vex2 >> 4) & 8;
4506 if (b == 0xc5) {
4507 vex3 = vex2;
4508 b = cpu_ldub_code(env, s->pc++);
4509 } else {
4510 #ifdef TARGET_X86_64
4511 s->rex_x = (~vex2 >> 3) & 8;
4512 s->rex_b = (~vex2 >> 2) & 8;
4513 #endif
4514 vex3 = cpu_ldub_code(env, s->pc++);
4515 rex_w = (vex3 >> 7) & 1;
4516 switch (vex2 & 0x1f) {
4517 case 0x01: /* Implied 0f leading opcode bytes. */
4518 b = cpu_ldub_code(env, s->pc++) | 0x100;
4519 break;
4520 case 0x02: /* Implied 0f 38 leading opcode bytes. */
4521 b = 0x138;
4522 break;
4523 case 0x03: /* Implied 0f 3a leading opcode bytes. */
4524 b = 0x13a;
4525 break;
4526 default: /* Reserved for future use. */
4527 goto illegal_op;
4530 s->vex_v = (~vex3 >> 3) & 0xf;
4531 s->vex_l = (vex3 >> 2) & 1;
4532 prefixes |= pp_prefix[vex3 & 3] | PREFIX_VEX;
4534 break;
4537 /* Post-process prefixes. */
4538 if (CODE64(s)) {
4539 /* In 64-bit mode, the default data size is 32-bit. Select 64-bit
4540 data with rex_w, and 16-bit data with 0x66; rex_w takes precedence
4541 over 0x66 if both are present. */
4542 dflag = (rex_w > 0 ? MO_64 : prefixes & PREFIX_DATA ? MO_16 : MO_32);
4543 /* In 64-bit mode, 0x67 selects 32-bit addressing. */
4544 aflag = (prefixes & PREFIX_ADR ? MO_32 : MO_64);
4545 } else {
4546 /* In 16/32-bit mode, 0x66 selects the opposite data size. */
4547 if (s->code32 ^ ((prefixes & PREFIX_DATA) != 0)) {
4548 dflag = MO_32;
4549 } else {
4550 dflag = MO_16;
4552 /* In 16/32-bit mode, 0x67 selects the opposite addressing. */
4553 if (s->code32 ^ ((prefixes & PREFIX_ADR) != 0)) {
4554 aflag = MO_32;
4555 } else {
4556 aflag = MO_16;
4560 s->prefix = prefixes;
4561 s->aflag = aflag;
4562 s->dflag = dflag;
4564 /* lock generation */
4565 if (prefixes & PREFIX_LOCK)
4566 gen_helper_lock();
4568 /* now check op code */
4569 reswitch:
4570 switch(b) {
4571 case 0x0f:
4572 /**************************/
4573 /* extended op code */
4574 b = cpu_ldub_code(env, s->pc++) | 0x100;
4575 goto reswitch;
4577 /**************************/
4578 /* arith & logic */
4579 case 0x00 ... 0x05:
4580 case 0x08 ... 0x0d:
4581 case 0x10 ... 0x15:
4582 case 0x18 ... 0x1d:
4583 case 0x20 ... 0x25:
4584 case 0x28 ... 0x2d:
4585 case 0x30 ... 0x35:
4586 case 0x38 ... 0x3d:
4588 int op, f, val;
4589 op = (b >> 3) & 7;
4590 f = (b >> 1) & 3;
4592 ot = mo_b_d(b, dflag);
4594 switch(f) {
4595 case 0: /* OP Ev, Gv */
4596 modrm = cpu_ldub_code(env, s->pc++);
4597 reg = ((modrm >> 3) & 7) | rex_r;
4598 mod = (modrm >> 6) & 3;
4599 rm = (modrm & 7) | REX_B(s);
4600 if (mod != 3) {
4601 gen_lea_modrm(env, s, modrm);
4602 opreg = OR_TMP0;
4603 } else if (op == OP_XORL && rm == reg) {
4604 xor_zero:
4605 /* xor reg, reg optimisation */
4606 set_cc_op(s, CC_OP_CLR);
4607 tcg_gen_movi_tl(cpu_T[0], 0);
4608 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
4609 break;
4610 } else {
4611 opreg = rm;
4613 gen_op_mov_v_reg(ot, cpu_T[1], reg);
4614 gen_op(s, op, ot, opreg);
4615 break;
4616 case 1: /* OP Gv, Ev */
4617 modrm = cpu_ldub_code(env, s->pc++);
4618 mod = (modrm >> 6) & 3;
4619 reg = ((modrm >> 3) & 7) | rex_r;
4620 rm = (modrm & 7) | REX_B(s);
4621 if (mod != 3) {
4622 gen_lea_modrm(env, s, modrm);
4623 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
4624 } else if (op == OP_XORL && rm == reg) {
4625 goto xor_zero;
4626 } else {
4627 gen_op_mov_v_reg(ot, cpu_T[1], rm);
4629 gen_op(s, op, ot, reg);
4630 break;
4631 case 2: /* OP A, Iv */
4632 val = insn_get(env, s, ot);
4633 tcg_gen_movi_tl(cpu_T[1], val);
4634 gen_op(s, op, ot, OR_EAX);
4635 break;
4638 break;
4640 case 0x82:
4641 if (CODE64(s))
4642 goto illegal_op;
4643 case 0x80: /* GRP1 */
4644 case 0x81:
4645 case 0x83:
4647 int val;
4649 ot = mo_b_d(b, dflag);
4651 modrm = cpu_ldub_code(env, s->pc++);
4652 mod = (modrm >> 6) & 3;
4653 rm = (modrm & 7) | REX_B(s);
4654 op = (modrm >> 3) & 7;
4656 if (mod != 3) {
4657 if (b == 0x83)
4658 s->rip_offset = 1;
4659 else
4660 s->rip_offset = insn_const_size(ot);
4661 gen_lea_modrm(env, s, modrm);
4662 opreg = OR_TMP0;
4663 } else {
4664 opreg = rm;
4667 switch(b) {
4668 default:
4669 case 0x80:
4670 case 0x81:
4671 case 0x82:
4672 val = insn_get(env, s, ot);
4673 break;
4674 case 0x83:
4675 val = (int8_t)insn_get(env, s, MO_8);
4676 break;
4678 tcg_gen_movi_tl(cpu_T[1], val);
4679 gen_op(s, op, ot, opreg);
4681 break;
4683 /**************************/
4684 /* inc, dec, and other misc arith */
4685 case 0x40 ... 0x47: /* inc Gv */
4686 ot = dflag;
4687 gen_inc(s, ot, OR_EAX + (b & 7), 1);
4688 break;
4689 case 0x48 ... 0x4f: /* dec Gv */
4690 ot = dflag;
4691 gen_inc(s, ot, OR_EAX + (b & 7), -1);
4692 break;
4693 case 0xf6: /* GRP3 */
4694 case 0xf7:
4695 ot = mo_b_d(b, dflag);
4697 modrm = cpu_ldub_code(env, s->pc++);
4698 mod = (modrm >> 6) & 3;
4699 rm = (modrm & 7) | REX_B(s);
4700 op = (modrm >> 3) & 7;
4701 if (mod != 3) {
4702 if (op == 0)
4703 s->rip_offset = insn_const_size(ot);
4704 gen_lea_modrm(env, s, modrm);
4705 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
4706 } else {
4707 gen_op_mov_v_reg(ot, cpu_T[0], rm);
4710 switch(op) {
4711 case 0: /* test */
4712 val = insn_get(env, s, ot);
4713 tcg_gen_movi_tl(cpu_T[1], val);
4714 gen_op_testl_T0_T1_cc();
4715 set_cc_op(s, CC_OP_LOGICB + ot);
4716 break;
4717 case 2: /* not */
4718 tcg_gen_not_tl(cpu_T[0], cpu_T[0]);
4719 if (mod != 3) {
4720 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
4721 } else {
4722 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
4724 break;
4725 case 3: /* neg */
4726 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
4727 if (mod != 3) {
4728 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
4729 } else {
4730 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
4732 gen_op_update_neg_cc();
4733 set_cc_op(s, CC_OP_SUBB + ot);
4734 break;
4735 case 4: /* mul */
4736 switch(ot) {
4737 case MO_8:
4738 gen_op_mov_v_reg(MO_8, cpu_T[1], R_EAX);
4739 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
4740 tcg_gen_ext8u_tl(cpu_T[1], cpu_T[1]);
4741 /* XXX: use 32 bit mul which could be faster */
4742 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4743 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T[0]);
4744 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4745 tcg_gen_andi_tl(cpu_cc_src, cpu_T[0], 0xff00);
4746 set_cc_op(s, CC_OP_MULB);
4747 break;
4748 case MO_16:
4749 gen_op_mov_v_reg(MO_16, cpu_T[1], R_EAX);
4750 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
4751 tcg_gen_ext16u_tl(cpu_T[1], cpu_T[1]);
4752 /* XXX: use 32 bit mul which could be faster */
4753 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4754 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T[0]);
4755 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4756 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
4757 gen_op_mov_reg_v(MO_16, R_EDX, cpu_T[0]);
4758 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4759 set_cc_op(s, CC_OP_MULW);
4760 break;
4761 default:
4762 case MO_32:
4763 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4764 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]);
4765 tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
4766 cpu_tmp2_i32, cpu_tmp3_i32);
4767 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32);
4768 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32);
4769 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4770 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
4771 set_cc_op(s, CC_OP_MULL);
4772 break;
4773 #ifdef TARGET_X86_64
4774 case MO_64:
4775 tcg_gen_mulu2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
4776 cpu_T[0], cpu_regs[R_EAX]);
4777 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4778 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
4779 set_cc_op(s, CC_OP_MULQ);
4780 break;
4781 #endif
4783 break;
4784 case 5: /* imul */
4785 switch(ot) {
4786 case MO_8:
4787 gen_op_mov_v_reg(MO_8, cpu_T[1], R_EAX);
4788 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
4789 tcg_gen_ext8s_tl(cpu_T[1], cpu_T[1]);
4790 /* XXX: use 32 bit mul which could be faster */
4791 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4792 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T[0]);
4793 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4794 tcg_gen_ext8s_tl(cpu_tmp0, cpu_T[0]);
4795 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4796 set_cc_op(s, CC_OP_MULB);
4797 break;
4798 case MO_16:
4799 gen_op_mov_v_reg(MO_16, cpu_T[1], R_EAX);
4800 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4801 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
4802 /* XXX: use 32 bit mul which could be faster */
4803 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4804 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T[0]);
4805 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4806 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
4807 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4808 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
4809 gen_op_mov_reg_v(MO_16, R_EDX, cpu_T[0]);
4810 set_cc_op(s, CC_OP_MULW);
4811 break;
4812 default:
4813 case MO_32:
4814 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4815 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]);
4816 tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
4817 cpu_tmp2_i32, cpu_tmp3_i32);
4818 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32);
4819 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32);
4820 tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31);
4821 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4822 tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
4823 tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32);
4824 set_cc_op(s, CC_OP_MULL);
4825 break;
4826 #ifdef TARGET_X86_64
4827 case MO_64:
4828 tcg_gen_muls2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
4829 cpu_T[0], cpu_regs[R_EAX]);
4830 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4831 tcg_gen_sari_tl(cpu_cc_src, cpu_regs[R_EAX], 63);
4832 tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_regs[R_EDX]);
4833 set_cc_op(s, CC_OP_MULQ);
4834 break;
4835 #endif
4837 break;
4838 case 6: /* div */
4839 switch(ot) {
4840 case MO_8:
4841 gen_helper_divb_AL(cpu_env, cpu_T[0]);
4842 break;
4843 case MO_16:
4844 gen_helper_divw_AX(cpu_env, cpu_T[0]);
4845 break;
4846 default:
4847 case MO_32:
4848 gen_helper_divl_EAX(cpu_env, cpu_T[0]);
4849 break;
4850 #ifdef TARGET_X86_64
4851 case MO_64:
4852 gen_helper_divq_EAX(cpu_env, cpu_T[0]);
4853 break;
4854 #endif
4856 break;
4857 case 7: /* idiv */
4858 switch(ot) {
4859 case MO_8:
4860 gen_helper_idivb_AL(cpu_env, cpu_T[0]);
4861 break;
4862 case MO_16:
4863 gen_helper_idivw_AX(cpu_env, cpu_T[0]);
4864 break;
4865 default:
4866 case MO_32:
4867 gen_helper_idivl_EAX(cpu_env, cpu_T[0]);
4868 break;
4869 #ifdef TARGET_X86_64
4870 case MO_64:
4871 gen_helper_idivq_EAX(cpu_env, cpu_T[0]);
4872 break;
4873 #endif
4875 break;
4876 default:
4877 goto illegal_op;
4879 break;
4881 case 0xfe: /* GRP4 */
4882 case 0xff: /* GRP5 */
4883 ot = mo_b_d(b, dflag);
4885 modrm = cpu_ldub_code(env, s->pc++);
4886 mod = (modrm >> 6) & 3;
4887 rm = (modrm & 7) | REX_B(s);
4888 op = (modrm >> 3) & 7;
4889 if (op >= 2 && b == 0xfe) {
4890 goto illegal_op;
4892 if (CODE64(s)) {
4893 if (op == 2 || op == 4) {
4894 /* operand size for jumps is 64 bit */
4895 ot = MO_64;
4896 } else if (op == 3 || op == 5) {
4897 ot = dflag != MO_16 ? MO_32 + (rex_w == 1) : MO_16;
4898 } else if (op == 6) {
4899 /* default push size is 64 bit */
4900 ot = mo_pushpop(s, dflag);
4903 if (mod != 3) {
4904 gen_lea_modrm(env, s, modrm);
4905 if (op >= 2 && op != 3 && op != 5)
4906 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
4907 } else {
4908 gen_op_mov_v_reg(ot, cpu_T[0], rm);
4911 switch(op) {
4912 case 0: /* inc Ev */
4913 if (mod != 3)
4914 opreg = OR_TMP0;
4915 else
4916 opreg = rm;
4917 gen_inc(s, ot, opreg, 1);
4918 break;
4919 case 1: /* dec Ev */
4920 if (mod != 3)
4921 opreg = OR_TMP0;
4922 else
4923 opreg = rm;
4924 gen_inc(s, ot, opreg, -1);
4925 break;
4926 case 2: /* call Ev */
4927 /* XXX: optimize if memory (no 'and' is necessary) */
4928 if (dflag == MO_16) {
4929 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
4931 next_eip = s->pc - s->cs_base;
4932 tcg_gen_movi_tl(cpu_T[1], next_eip);
4933 gen_push_v(s, cpu_T[1]);
4934 gen_op_jmp_v(cpu_T[0]);
4935 gen_eob(s);
4936 break;
4937 case 3: /* lcall Ev */
4938 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
4939 gen_add_A0_im(s, 1 << ot);
4940 gen_op_ld_v(s, MO_16, cpu_T[0], cpu_A0);
4941 do_lcall:
4942 if (s->pe && !s->vm86) {
4943 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4944 gen_helper_lcall_protected(cpu_env, cpu_tmp2_i32, cpu_T[1],
4945 tcg_const_i32(dflag - 1),
4946 tcg_const_tl(s->pc - s->cs_base));
4947 } else {
4948 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4949 gen_helper_lcall_real(cpu_env, cpu_tmp2_i32, cpu_T[1],
4950 tcg_const_i32(dflag - 1),
4951 tcg_const_i32(s->pc - s->cs_base));
4953 gen_eob(s);
4954 break;
4955 case 4: /* jmp Ev */
4956 if (dflag == MO_16) {
4957 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
4959 gen_op_jmp_v(cpu_T[0]);
4960 gen_eob(s);
4961 break;
4962 case 5: /* ljmp Ev */
4963 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
4964 gen_add_A0_im(s, 1 << ot);
4965 gen_op_ld_v(s, MO_16, cpu_T[0], cpu_A0);
4966 do_ljmp:
4967 if (s->pe && !s->vm86) {
4968 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4969 gen_helper_ljmp_protected(cpu_env, cpu_tmp2_i32, cpu_T[1],
4970 tcg_const_tl(s->pc - s->cs_base));
4971 } else {
4972 gen_op_movl_seg_T0_vm(R_CS);
4973 gen_op_jmp_v(cpu_T[1]);
4975 gen_eob(s);
4976 break;
4977 case 6: /* push Ev */
4978 gen_push_v(s, cpu_T[0]);
4979 break;
4980 default:
4981 goto illegal_op;
4983 break;
4985 case 0x84: /* test Ev, Gv */
4986 case 0x85:
4987 ot = mo_b_d(b, dflag);
4989 modrm = cpu_ldub_code(env, s->pc++);
4990 reg = ((modrm >> 3) & 7) | rex_r;
4992 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4993 gen_op_mov_v_reg(ot, cpu_T[1], reg);
4994 gen_op_testl_T0_T1_cc();
4995 set_cc_op(s, CC_OP_LOGICB + ot);
4996 break;
4998 case 0xa8: /* test eAX, Iv */
4999 case 0xa9:
5000 ot = mo_b_d(b, dflag);
5001 val = insn_get(env, s, ot);
5003 gen_op_mov_v_reg(ot, cpu_T[0], OR_EAX);
5004 tcg_gen_movi_tl(cpu_T[1], val);
5005 gen_op_testl_T0_T1_cc();
5006 set_cc_op(s, CC_OP_LOGICB + ot);
5007 break;
5009 case 0x98: /* CWDE/CBW */
5010 switch (dflag) {
5011 #ifdef TARGET_X86_64
5012 case MO_64:
5013 gen_op_mov_v_reg(MO_32, cpu_T[0], R_EAX);
5014 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
5015 gen_op_mov_reg_v(MO_64, R_EAX, cpu_T[0]);
5016 break;
5017 #endif
5018 case MO_32:
5019 gen_op_mov_v_reg(MO_16, cpu_T[0], R_EAX);
5020 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5021 gen_op_mov_reg_v(MO_32, R_EAX, cpu_T[0]);
5022 break;
5023 case MO_16:
5024 gen_op_mov_v_reg(MO_8, cpu_T[0], R_EAX);
5025 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
5026 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T[0]);
5027 break;
5028 default:
5029 tcg_abort();
5031 break;
5032 case 0x99: /* CDQ/CWD */
5033 switch (dflag) {
5034 #ifdef TARGET_X86_64
5035 case MO_64:
5036 gen_op_mov_v_reg(MO_64, cpu_T[0], R_EAX);
5037 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 63);
5038 gen_op_mov_reg_v(MO_64, R_EDX, cpu_T[0]);
5039 break;
5040 #endif
5041 case MO_32:
5042 gen_op_mov_v_reg(MO_32, cpu_T[0], R_EAX);
5043 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
5044 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 31);
5045 gen_op_mov_reg_v(MO_32, R_EDX, cpu_T[0]);
5046 break;
5047 case MO_16:
5048 gen_op_mov_v_reg(MO_16, cpu_T[0], R_EAX);
5049 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5050 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 15);
5051 gen_op_mov_reg_v(MO_16, R_EDX, cpu_T[0]);
5052 break;
5053 default:
5054 tcg_abort();
5056 break;
5057 case 0x1af: /* imul Gv, Ev */
5058 case 0x69: /* imul Gv, Ev, I */
5059 case 0x6b:
5060 ot = dflag;
5061 modrm = cpu_ldub_code(env, s->pc++);
5062 reg = ((modrm >> 3) & 7) | rex_r;
5063 if (b == 0x69)
5064 s->rip_offset = insn_const_size(ot);
5065 else if (b == 0x6b)
5066 s->rip_offset = 1;
5067 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
5068 if (b == 0x69) {
5069 val = insn_get(env, s, ot);
5070 tcg_gen_movi_tl(cpu_T[1], val);
5071 } else if (b == 0x6b) {
5072 val = (int8_t)insn_get(env, s, MO_8);
5073 tcg_gen_movi_tl(cpu_T[1], val);
5074 } else {
5075 gen_op_mov_v_reg(ot, cpu_T[1], reg);
5077 switch (ot) {
5078 #ifdef TARGET_X86_64
5079 case MO_64:
5080 tcg_gen_muls2_i64(cpu_regs[reg], cpu_T[1], cpu_T[0], cpu_T[1]);
5081 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
5082 tcg_gen_sari_tl(cpu_cc_src, cpu_cc_dst, 63);
5083 tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_T[1]);
5084 break;
5085 #endif
5086 case MO_32:
5087 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5088 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
5089 tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
5090 cpu_tmp2_i32, cpu_tmp3_i32);
5091 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
5092 tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31);
5093 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
5094 tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
5095 tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32);
5096 break;
5097 default:
5098 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5099 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
5100 /* XXX: use 32 bit mul which could be faster */
5101 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
5102 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
5103 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
5104 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
5105 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
5106 break;
5108 set_cc_op(s, CC_OP_MULB + ot);
5109 break;
5110 case 0x1c0:
5111 case 0x1c1: /* xadd Ev, Gv */
5112 ot = mo_b_d(b, dflag);
5113 modrm = cpu_ldub_code(env, s->pc++);
5114 reg = ((modrm >> 3) & 7) | rex_r;
5115 mod = (modrm >> 6) & 3;
5116 if (mod == 3) {
5117 rm = (modrm & 7) | REX_B(s);
5118 gen_op_mov_v_reg(ot, cpu_T[0], reg);
5119 gen_op_mov_v_reg(ot, cpu_T[1], rm);
5120 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
5121 gen_op_mov_reg_v(ot, reg, cpu_T[1]);
5122 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
5123 } else {
5124 gen_lea_modrm(env, s, modrm);
5125 gen_op_mov_v_reg(ot, cpu_T[0], reg);
5126 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
5127 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
5128 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
5129 gen_op_mov_reg_v(ot, reg, cpu_T[1]);
5131 gen_op_update2_cc();
5132 set_cc_op(s, CC_OP_ADDB + ot);
5133 break;
5134 case 0x1b0:
5135 case 0x1b1: /* cmpxchg Ev, Gv */
5137 TCGLabel *label1, *label2;
5138 TCGv t0, t1, t2, a0;
5140 ot = mo_b_d(b, dflag);
5141 modrm = cpu_ldub_code(env, s->pc++);
5142 reg = ((modrm >> 3) & 7) | rex_r;
5143 mod = (modrm >> 6) & 3;
5144 t0 = tcg_temp_local_new();
5145 t1 = tcg_temp_local_new();
5146 t2 = tcg_temp_local_new();
5147 a0 = tcg_temp_local_new();
5148 gen_op_mov_v_reg(ot, t1, reg);
5149 if (mod == 3) {
5150 rm = (modrm & 7) | REX_B(s);
5151 gen_op_mov_v_reg(ot, t0, rm);
5152 } else {
5153 gen_lea_modrm(env, s, modrm);
5154 tcg_gen_mov_tl(a0, cpu_A0);
5155 gen_op_ld_v(s, ot, t0, a0);
5156 rm = 0; /* avoid warning */
5158 label1 = gen_new_label();
5159 tcg_gen_mov_tl(t2, cpu_regs[R_EAX]);
5160 gen_extu(ot, t0);
5161 gen_extu(ot, t2);
5162 tcg_gen_brcond_tl(TCG_COND_EQ, t2, t0, label1);
5163 label2 = gen_new_label();
5164 if (mod == 3) {
5165 gen_op_mov_reg_v(ot, R_EAX, t0);
5166 tcg_gen_br(label2);
5167 gen_set_label(label1);
5168 gen_op_mov_reg_v(ot, rm, t1);
5169 } else {
5170 /* perform no-op store cycle like physical cpu; must be
5171 before changing accumulator to ensure idempotency if
5172 the store faults and the instruction is restarted */
5173 gen_op_st_v(s, ot, t0, a0);
5174 gen_op_mov_reg_v(ot, R_EAX, t0);
5175 tcg_gen_br(label2);
5176 gen_set_label(label1);
5177 gen_op_st_v(s, ot, t1, a0);
5179 gen_set_label(label2);
5180 tcg_gen_mov_tl(cpu_cc_src, t0);
5181 tcg_gen_mov_tl(cpu_cc_srcT, t2);
5182 tcg_gen_sub_tl(cpu_cc_dst, t2, t0);
5183 set_cc_op(s, CC_OP_SUBB + ot);
5184 tcg_temp_free(t0);
5185 tcg_temp_free(t1);
5186 tcg_temp_free(t2);
5187 tcg_temp_free(a0);
5189 break;
5190 case 0x1c7: /* cmpxchg8b */
5191 modrm = cpu_ldub_code(env, s->pc++);
5192 mod = (modrm >> 6) & 3;
5193 if ((mod == 3) || ((modrm & 0x38) != 0x8))
5194 goto illegal_op;
5195 #ifdef TARGET_X86_64
5196 if (dflag == MO_64) {
5197 if (!(s->cpuid_ext_features & CPUID_EXT_CX16))
5198 goto illegal_op;
5199 gen_lea_modrm(env, s, modrm);
5200 gen_helper_cmpxchg16b(cpu_env, cpu_A0);
5201 } else
5202 #endif
5204 if (!(s->cpuid_features & CPUID_CX8))
5205 goto illegal_op;
5206 gen_lea_modrm(env, s, modrm);
5207 gen_helper_cmpxchg8b(cpu_env, cpu_A0);
5209 set_cc_op(s, CC_OP_EFLAGS);
5210 break;
5212 /**************************/
5213 /* push/pop */
5214 case 0x50 ... 0x57: /* push */
5215 gen_op_mov_v_reg(MO_32, cpu_T[0], (b & 7) | REX_B(s));
5216 gen_push_v(s, cpu_T[0]);
5217 break;
5218 case 0x58 ... 0x5f: /* pop */
5219 ot = gen_pop_T0(s);
5220 /* NOTE: order is important for pop %sp */
5221 gen_pop_update(s, ot);
5222 gen_op_mov_reg_v(ot, (b & 7) | REX_B(s), cpu_T[0]);
5223 break;
5224 case 0x60: /* pusha */
5225 if (CODE64(s))
5226 goto illegal_op;
5227 gen_pusha(s);
5228 break;
5229 case 0x61: /* popa */
5230 if (CODE64(s))
5231 goto illegal_op;
5232 gen_popa(s);
5233 break;
5234 case 0x68: /* push Iv */
5235 case 0x6a:
5236 ot = mo_pushpop(s, dflag);
5237 if (b == 0x68)
5238 val = insn_get(env, s, ot);
5239 else
5240 val = (int8_t)insn_get(env, s, MO_8);
5241 tcg_gen_movi_tl(cpu_T[0], val);
5242 gen_push_v(s, cpu_T[0]);
5243 break;
5244 case 0x8f: /* pop Ev */
5245 modrm = cpu_ldub_code(env, s->pc++);
5246 mod = (modrm >> 6) & 3;
5247 ot = gen_pop_T0(s);
5248 if (mod == 3) {
5249 /* NOTE: order is important for pop %sp */
5250 gen_pop_update(s, ot);
5251 rm = (modrm & 7) | REX_B(s);
5252 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
5253 } else {
5254 /* NOTE: order is important too for MMU exceptions */
5255 s->popl_esp_hack = 1 << ot;
5256 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
5257 s->popl_esp_hack = 0;
5258 gen_pop_update(s, ot);
5260 break;
5261 case 0xc8: /* enter */
5263 int level;
5264 val = cpu_lduw_code(env, s->pc);
5265 s->pc += 2;
5266 level = cpu_ldub_code(env, s->pc++);
5267 gen_enter(s, val, level);
5269 break;
5270 case 0xc9: /* leave */
5271 /* XXX: exception not precise (ESP is updated before potential exception) */
5272 if (CODE64(s)) {
5273 gen_op_mov_v_reg(MO_64, cpu_T[0], R_EBP);
5274 gen_op_mov_reg_v(MO_64, R_ESP, cpu_T[0]);
5275 } else if (s->ss32) {
5276 gen_op_mov_v_reg(MO_32, cpu_T[0], R_EBP);
5277 gen_op_mov_reg_v(MO_32, R_ESP, cpu_T[0]);
5278 } else {
5279 gen_op_mov_v_reg(MO_16, cpu_T[0], R_EBP);
5280 gen_op_mov_reg_v(MO_16, R_ESP, cpu_T[0]);
5282 ot = gen_pop_T0(s);
5283 gen_op_mov_reg_v(ot, R_EBP, cpu_T[0]);
5284 gen_pop_update(s, ot);
5285 break;
5286 case 0x06: /* push es */
5287 case 0x0e: /* push cs */
5288 case 0x16: /* push ss */
5289 case 0x1e: /* push ds */
5290 if (CODE64(s))
5291 goto illegal_op;
5292 gen_op_movl_T0_seg(b >> 3);
5293 gen_push_v(s, cpu_T[0]);
5294 break;
5295 case 0x1a0: /* push fs */
5296 case 0x1a8: /* push gs */
5297 gen_op_movl_T0_seg((b >> 3) & 7);
5298 gen_push_v(s, cpu_T[0]);
5299 break;
5300 case 0x07: /* pop es */
5301 case 0x17: /* pop ss */
5302 case 0x1f: /* pop ds */
5303 if (CODE64(s))
5304 goto illegal_op;
5305 reg = b >> 3;
5306 ot = gen_pop_T0(s);
5307 gen_movl_seg_T0(s, reg);
5308 gen_pop_update(s, ot);
5309 if (reg == R_SS) {
5310 /* if reg == SS, inhibit interrupts/trace. */
5311 /* If several instructions disable interrupts, only the
5312 _first_ does it */
5313 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
5314 gen_helper_set_inhibit_irq(cpu_env);
5315 s->tf = 0;
5317 if (s->is_jmp) {
5318 gen_jmp_im(s->pc - s->cs_base);
5319 gen_eob(s);
5321 break;
5322 case 0x1a1: /* pop fs */
5323 case 0x1a9: /* pop gs */
5324 ot = gen_pop_T0(s);
5325 gen_movl_seg_T0(s, (b >> 3) & 7);
5326 gen_pop_update(s, ot);
5327 if (s->is_jmp) {
5328 gen_jmp_im(s->pc - s->cs_base);
5329 gen_eob(s);
5331 break;
5333 /**************************/
5334 /* mov */
5335 case 0x88:
5336 case 0x89: /* mov Gv, Ev */
5337 ot = mo_b_d(b, dflag);
5338 modrm = cpu_ldub_code(env, s->pc++);
5339 reg = ((modrm >> 3) & 7) | rex_r;
5341 /* generate a generic store */
5342 gen_ldst_modrm(env, s, modrm, ot, reg, 1);
5343 break;
5344 case 0xc6:
5345 case 0xc7: /* mov Ev, Iv */
5346 ot = mo_b_d(b, dflag);
5347 modrm = cpu_ldub_code(env, s->pc++);
5348 mod = (modrm >> 6) & 3;
5349 if (mod != 3) {
5350 s->rip_offset = insn_const_size(ot);
5351 gen_lea_modrm(env, s, modrm);
5353 val = insn_get(env, s, ot);
5354 tcg_gen_movi_tl(cpu_T[0], val);
5355 if (mod != 3) {
5356 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
5357 } else {
5358 gen_op_mov_reg_v(ot, (modrm & 7) | REX_B(s), cpu_T[0]);
5360 break;
5361 case 0x8a:
5362 case 0x8b: /* mov Ev, Gv */
5363 ot = mo_b_d(b, dflag);
5364 modrm = cpu_ldub_code(env, s->pc++);
5365 reg = ((modrm >> 3) & 7) | rex_r;
5367 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
5368 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
5369 break;
5370 case 0x8e: /* mov seg, Gv */
5371 modrm = cpu_ldub_code(env, s->pc++);
5372 reg = (modrm >> 3) & 7;
5373 if (reg >= 6 || reg == R_CS)
5374 goto illegal_op;
5375 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
5376 gen_movl_seg_T0(s, reg);
5377 if (reg == R_SS) {
5378 /* if reg == SS, inhibit interrupts/trace */
5379 /* If several instructions disable interrupts, only the
5380 _first_ does it */
5381 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
5382 gen_helper_set_inhibit_irq(cpu_env);
5383 s->tf = 0;
5385 if (s->is_jmp) {
5386 gen_jmp_im(s->pc - s->cs_base);
5387 gen_eob(s);
5389 break;
5390 case 0x8c: /* mov Gv, seg */
5391 modrm = cpu_ldub_code(env, s->pc++);
5392 reg = (modrm >> 3) & 7;
5393 mod = (modrm >> 6) & 3;
5394 if (reg >= 6)
5395 goto illegal_op;
5396 gen_op_movl_T0_seg(reg);
5397 ot = mod == 3 ? dflag : MO_16;
5398 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
5399 break;
5401 case 0x1b6: /* movzbS Gv, Eb */
5402 case 0x1b7: /* movzwS Gv, Eb */
5403 case 0x1be: /* movsbS Gv, Eb */
5404 case 0x1bf: /* movswS Gv, Eb */
5406 TCGMemOp d_ot;
5407 TCGMemOp s_ot;
5409 /* d_ot is the size of destination */
5410 d_ot = dflag;
5411 /* ot is the size of source */
5412 ot = (b & 1) + MO_8;
5413 /* s_ot is the sign+size of source */
5414 s_ot = b & 8 ? MO_SIGN | ot : ot;
5416 modrm = cpu_ldub_code(env, s->pc++);
5417 reg = ((modrm >> 3) & 7) | rex_r;
5418 mod = (modrm >> 6) & 3;
5419 rm = (modrm & 7) | REX_B(s);
5421 if (mod == 3) {
5422 gen_op_mov_v_reg(ot, cpu_T[0], rm);
5423 switch (s_ot) {
5424 case MO_UB:
5425 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
5426 break;
5427 case MO_SB:
5428 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
5429 break;
5430 case MO_UW:
5431 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
5432 break;
5433 default:
5434 case MO_SW:
5435 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5436 break;
5438 gen_op_mov_reg_v(d_ot, reg, cpu_T[0]);
5439 } else {
5440 gen_lea_modrm(env, s, modrm);
5441 gen_op_ld_v(s, s_ot, cpu_T[0], cpu_A0);
5442 gen_op_mov_reg_v(d_ot, reg, cpu_T[0]);
5445 break;
5447 case 0x8d: /* lea */
5448 ot = dflag;
5449 modrm = cpu_ldub_code(env, s->pc++);
5450 mod = (modrm >> 6) & 3;
5451 if (mod == 3)
5452 goto illegal_op;
5453 reg = ((modrm >> 3) & 7) | rex_r;
5454 /* we must ensure that no segment is added */
5455 s->override = -1;
5456 val = s->addseg;
5457 s->addseg = 0;
5458 gen_lea_modrm(env, s, modrm);
5459 s->addseg = val;
5460 gen_op_mov_reg_v(ot, reg, cpu_A0);
5461 break;
5463 case 0xa0: /* mov EAX, Ov */
5464 case 0xa1:
5465 case 0xa2: /* mov Ov, EAX */
5466 case 0xa3:
5468 target_ulong offset_addr;
5470 ot = mo_b_d(b, dflag);
5471 switch (s->aflag) {
5472 #ifdef TARGET_X86_64
5473 case MO_64:
5474 offset_addr = cpu_ldq_code(env, s->pc);
5475 s->pc += 8;
5476 break;
5477 #endif
5478 default:
5479 offset_addr = insn_get(env, s, s->aflag);
5480 break;
5482 tcg_gen_movi_tl(cpu_A0, offset_addr);
5483 gen_add_A0_ds_seg(s);
5484 if ((b & 2) == 0) {
5485 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
5486 gen_op_mov_reg_v(ot, R_EAX, cpu_T[0]);
5487 } else {
5488 gen_op_mov_v_reg(ot, cpu_T[0], R_EAX);
5489 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
5492 break;
5493 case 0xd7: /* xlat */
5494 tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EBX]);
5495 tcg_gen_ext8u_tl(cpu_T[0], cpu_regs[R_EAX]);
5496 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]);
5497 gen_extu(s->aflag, cpu_A0);
5498 gen_add_A0_ds_seg(s);
5499 gen_op_ld_v(s, MO_8, cpu_T[0], cpu_A0);
5500 gen_op_mov_reg_v(MO_8, R_EAX, cpu_T[0]);
5501 break;
5502 case 0xb0 ... 0xb7: /* mov R, Ib */
5503 val = insn_get(env, s, MO_8);
5504 tcg_gen_movi_tl(cpu_T[0], val);
5505 gen_op_mov_reg_v(MO_8, (b & 7) | REX_B(s), cpu_T[0]);
5506 break;
5507 case 0xb8 ... 0xbf: /* mov R, Iv */
5508 #ifdef TARGET_X86_64
5509 if (dflag == MO_64) {
5510 uint64_t tmp;
5511 /* 64 bit case */
5512 tmp = cpu_ldq_code(env, s->pc);
5513 s->pc += 8;
5514 reg = (b & 7) | REX_B(s);
5515 tcg_gen_movi_tl(cpu_T[0], tmp);
5516 gen_op_mov_reg_v(MO_64, reg, cpu_T[0]);
5517 } else
5518 #endif
5520 ot = dflag;
5521 val = insn_get(env, s, ot);
5522 reg = (b & 7) | REX_B(s);
5523 tcg_gen_movi_tl(cpu_T[0], val);
5524 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
5526 break;
5528 case 0x91 ... 0x97: /* xchg R, EAX */
5529 do_xchg_reg_eax:
5530 ot = dflag;
5531 reg = (b & 7) | REX_B(s);
5532 rm = R_EAX;
5533 goto do_xchg_reg;
5534 case 0x86:
5535 case 0x87: /* xchg Ev, Gv */
5536 ot = mo_b_d(b, dflag);
5537 modrm = cpu_ldub_code(env, s->pc++);
5538 reg = ((modrm >> 3) & 7) | rex_r;
5539 mod = (modrm >> 6) & 3;
5540 if (mod == 3) {
5541 rm = (modrm & 7) | REX_B(s);
5542 do_xchg_reg:
5543 gen_op_mov_v_reg(ot, cpu_T[0], reg);
5544 gen_op_mov_v_reg(ot, cpu_T[1], rm);
5545 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
5546 gen_op_mov_reg_v(ot, reg, cpu_T[1]);
5547 } else {
5548 gen_lea_modrm(env, s, modrm);
5549 gen_op_mov_v_reg(ot, cpu_T[0], reg);
5550 /* for xchg, lock is implicit */
5551 if (!(prefixes & PREFIX_LOCK))
5552 gen_helper_lock();
5553 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
5554 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
5555 if (!(prefixes & PREFIX_LOCK))
5556 gen_helper_unlock();
5557 gen_op_mov_reg_v(ot, reg, cpu_T[1]);
5559 break;
5560 case 0xc4: /* les Gv */
5561 /* In CODE64 this is VEX3; see above. */
5562 op = R_ES;
5563 goto do_lxx;
5564 case 0xc5: /* lds Gv */
5565 /* In CODE64 this is VEX2; see above. */
5566 op = R_DS;
5567 goto do_lxx;
5568 case 0x1b2: /* lss Gv */
5569 op = R_SS;
5570 goto do_lxx;
5571 case 0x1b4: /* lfs Gv */
5572 op = R_FS;
5573 goto do_lxx;
5574 case 0x1b5: /* lgs Gv */
5575 op = R_GS;
5576 do_lxx:
5577 ot = dflag != MO_16 ? MO_32 : MO_16;
5578 modrm = cpu_ldub_code(env, s->pc++);
5579 reg = ((modrm >> 3) & 7) | rex_r;
5580 mod = (modrm >> 6) & 3;
5581 if (mod == 3)
5582 goto illegal_op;
5583 gen_lea_modrm(env, s, modrm);
5584 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
5585 gen_add_A0_im(s, 1 << ot);
5586 /* load the segment first to handle exceptions properly */
5587 gen_op_ld_v(s, MO_16, cpu_T[0], cpu_A0);
5588 gen_movl_seg_T0(s, op);
5589 /* then put the data */
5590 gen_op_mov_reg_v(ot, reg, cpu_T[1]);
5591 if (s->is_jmp) {
5592 gen_jmp_im(s->pc - s->cs_base);
5593 gen_eob(s);
5595 break;
5597 /************************/
5598 /* shifts */
5599 case 0xc0:
5600 case 0xc1:
5601 /* shift Ev,Ib */
5602 shift = 2;
5603 grp2:
5605 ot = mo_b_d(b, dflag);
5606 modrm = cpu_ldub_code(env, s->pc++);
5607 mod = (modrm >> 6) & 3;
5608 op = (modrm >> 3) & 7;
5610 if (mod != 3) {
5611 if (shift == 2) {
5612 s->rip_offset = 1;
5614 gen_lea_modrm(env, s, modrm);
5615 opreg = OR_TMP0;
5616 } else {
5617 opreg = (modrm & 7) | REX_B(s);
5620 /* simpler op */
5621 if (shift == 0) {
5622 gen_shift(s, op, ot, opreg, OR_ECX);
5623 } else {
5624 if (shift == 2) {
5625 shift = cpu_ldub_code(env, s->pc++);
5627 gen_shifti(s, op, ot, opreg, shift);
5630 break;
5631 case 0xd0:
5632 case 0xd1:
5633 /* shift Ev,1 */
5634 shift = 1;
5635 goto grp2;
5636 case 0xd2:
5637 case 0xd3:
5638 /* shift Ev,cl */
5639 shift = 0;
5640 goto grp2;
5642 case 0x1a4: /* shld imm */
5643 op = 0;
5644 shift = 1;
5645 goto do_shiftd;
5646 case 0x1a5: /* shld cl */
5647 op = 0;
5648 shift = 0;
5649 goto do_shiftd;
5650 case 0x1ac: /* shrd imm */
5651 op = 1;
5652 shift = 1;
5653 goto do_shiftd;
5654 case 0x1ad: /* shrd cl */
5655 op = 1;
5656 shift = 0;
5657 do_shiftd:
5658 ot = dflag;
5659 modrm = cpu_ldub_code(env, s->pc++);
5660 mod = (modrm >> 6) & 3;
5661 rm = (modrm & 7) | REX_B(s);
5662 reg = ((modrm >> 3) & 7) | rex_r;
5663 if (mod != 3) {
5664 gen_lea_modrm(env, s, modrm);
5665 opreg = OR_TMP0;
5666 } else {
5667 opreg = rm;
5669 gen_op_mov_v_reg(ot, cpu_T[1], reg);
5671 if (shift) {
5672 TCGv imm = tcg_const_tl(cpu_ldub_code(env, s->pc++));
5673 gen_shiftd_rm_T1(s, ot, opreg, op, imm);
5674 tcg_temp_free(imm);
5675 } else {
5676 gen_shiftd_rm_T1(s, ot, opreg, op, cpu_regs[R_ECX]);
5678 break;
5680 /************************/
5681 /* floats */
5682 case 0xd8 ... 0xdf:
5683 if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
5684 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
5685 /* XXX: what to do if illegal op ? */
5686 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
5687 break;
5689 modrm = cpu_ldub_code(env, s->pc++);
5690 mod = (modrm >> 6) & 3;
5691 rm = modrm & 7;
5692 op = ((b & 7) << 3) | ((modrm >> 3) & 7);
5693 if (mod != 3) {
5694 /* memory op */
5695 gen_lea_modrm(env, s, modrm);
5696 switch(op) {
5697 case 0x00 ... 0x07: /* fxxxs */
5698 case 0x10 ... 0x17: /* fixxxl */
5699 case 0x20 ... 0x27: /* fxxxl */
5700 case 0x30 ... 0x37: /* fixxx */
5702 int op1;
5703 op1 = op & 7;
5705 switch(op >> 4) {
5706 case 0:
5707 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5708 s->mem_index, MO_LEUL);
5709 gen_helper_flds_FT0(cpu_env, cpu_tmp2_i32);
5710 break;
5711 case 1:
5712 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5713 s->mem_index, MO_LEUL);
5714 gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
5715 break;
5716 case 2:
5717 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
5718 s->mem_index, MO_LEQ);
5719 gen_helper_fldl_FT0(cpu_env, cpu_tmp1_i64);
5720 break;
5721 case 3:
5722 default:
5723 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5724 s->mem_index, MO_LESW);
5725 gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
5726 break;
5729 gen_helper_fp_arith_ST0_FT0(op1);
5730 if (op1 == 3) {
5731 /* fcomp needs pop */
5732 gen_helper_fpop(cpu_env);
5735 break;
5736 case 0x08: /* flds */
5737 case 0x0a: /* fsts */
5738 case 0x0b: /* fstps */
5739 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
5740 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
5741 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
5742 switch(op & 7) {
5743 case 0:
5744 switch(op >> 4) {
5745 case 0:
5746 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5747 s->mem_index, MO_LEUL);
5748 gen_helper_flds_ST0(cpu_env, cpu_tmp2_i32);
5749 break;
5750 case 1:
5751 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5752 s->mem_index, MO_LEUL);
5753 gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
5754 break;
5755 case 2:
5756 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
5757 s->mem_index, MO_LEQ);
5758 gen_helper_fldl_ST0(cpu_env, cpu_tmp1_i64);
5759 break;
5760 case 3:
5761 default:
5762 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5763 s->mem_index, MO_LESW);
5764 gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
5765 break;
5767 break;
5768 case 1:
5769 /* XXX: the corresponding CPUID bit must be tested ! */
5770 switch(op >> 4) {
5771 case 1:
5772 gen_helper_fisttl_ST0(cpu_tmp2_i32, cpu_env);
5773 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5774 s->mem_index, MO_LEUL);
5775 break;
5776 case 2:
5777 gen_helper_fisttll_ST0(cpu_tmp1_i64, cpu_env);
5778 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
5779 s->mem_index, MO_LEQ);
5780 break;
5781 case 3:
5782 default:
5783 gen_helper_fistt_ST0(cpu_tmp2_i32, cpu_env);
5784 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5785 s->mem_index, MO_LEUW);
5786 break;
5788 gen_helper_fpop(cpu_env);
5789 break;
5790 default:
5791 switch(op >> 4) {
5792 case 0:
5793 gen_helper_fsts_ST0(cpu_tmp2_i32, cpu_env);
5794 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5795 s->mem_index, MO_LEUL);
5796 break;
5797 case 1:
5798 gen_helper_fistl_ST0(cpu_tmp2_i32, cpu_env);
5799 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5800 s->mem_index, MO_LEUL);
5801 break;
5802 case 2:
5803 gen_helper_fstl_ST0(cpu_tmp1_i64, cpu_env);
5804 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
5805 s->mem_index, MO_LEQ);
5806 break;
5807 case 3:
5808 default:
5809 gen_helper_fist_ST0(cpu_tmp2_i32, cpu_env);
5810 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5811 s->mem_index, MO_LEUW);
5812 break;
5814 if ((op & 7) == 3)
5815 gen_helper_fpop(cpu_env);
5816 break;
5818 break;
5819 case 0x0c: /* fldenv mem */
5820 gen_helper_fldenv(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
5821 break;
5822 case 0x0d: /* fldcw mem */
5823 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5824 s->mem_index, MO_LEUW);
5825 gen_helper_fldcw(cpu_env, cpu_tmp2_i32);
5826 break;
5827 case 0x0e: /* fnstenv mem */
5828 gen_helper_fstenv(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
5829 break;
5830 case 0x0f: /* fnstcw mem */
5831 gen_helper_fnstcw(cpu_tmp2_i32, cpu_env);
5832 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5833 s->mem_index, MO_LEUW);
5834 break;
5835 case 0x1d: /* fldt mem */
5836 gen_helper_fldt_ST0(cpu_env, cpu_A0);
5837 break;
5838 case 0x1f: /* fstpt mem */
5839 gen_helper_fstt_ST0(cpu_env, cpu_A0);
5840 gen_helper_fpop(cpu_env);
5841 break;
5842 case 0x2c: /* frstor mem */
5843 gen_helper_frstor(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
5844 break;
5845 case 0x2e: /* fnsave mem */
5846 gen_helper_fsave(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
5847 break;
5848 case 0x2f: /* fnstsw mem */
5849 gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
5850 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5851 s->mem_index, MO_LEUW);
5852 break;
5853 case 0x3c: /* fbld */
5854 gen_helper_fbld_ST0(cpu_env, cpu_A0);
5855 break;
5856 case 0x3e: /* fbstp */
5857 gen_helper_fbst_ST0(cpu_env, cpu_A0);
5858 gen_helper_fpop(cpu_env);
5859 break;
5860 case 0x3d: /* fildll */
5861 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
5862 gen_helper_fildll_ST0(cpu_env, cpu_tmp1_i64);
5863 break;
5864 case 0x3f: /* fistpll */
5865 gen_helper_fistll_ST0(cpu_tmp1_i64, cpu_env);
5866 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
5867 gen_helper_fpop(cpu_env);
5868 break;
5869 default:
5870 goto illegal_op;
5872 } else {
5873 /* register float ops */
5874 opreg = rm;
5876 switch(op) {
5877 case 0x08: /* fld sti */
5878 gen_helper_fpush(cpu_env);
5879 gen_helper_fmov_ST0_STN(cpu_env,
5880 tcg_const_i32((opreg + 1) & 7));
5881 break;
5882 case 0x09: /* fxchg sti */
5883 case 0x29: /* fxchg4 sti, undocumented op */
5884 case 0x39: /* fxchg7 sti, undocumented op */
5885 gen_helper_fxchg_ST0_STN(cpu_env, tcg_const_i32(opreg));
5886 break;
5887 case 0x0a: /* grp d9/2 */
5888 switch(rm) {
5889 case 0: /* fnop */
5890 /* check exceptions (FreeBSD FPU probe) */
5891 gen_helper_fwait(cpu_env);
5892 break;
5893 default:
5894 goto illegal_op;
5896 break;
5897 case 0x0c: /* grp d9/4 */
5898 switch(rm) {
5899 case 0: /* fchs */
5900 gen_helper_fchs_ST0(cpu_env);
5901 break;
5902 case 1: /* fabs */
5903 gen_helper_fabs_ST0(cpu_env);
5904 break;
5905 case 4: /* ftst */
5906 gen_helper_fldz_FT0(cpu_env);
5907 gen_helper_fcom_ST0_FT0(cpu_env);
5908 break;
5909 case 5: /* fxam */
5910 gen_helper_fxam_ST0(cpu_env);
5911 break;
5912 default:
5913 goto illegal_op;
5915 break;
5916 case 0x0d: /* grp d9/5 */
5918 switch(rm) {
5919 case 0:
5920 gen_helper_fpush(cpu_env);
5921 gen_helper_fld1_ST0(cpu_env);
5922 break;
5923 case 1:
5924 gen_helper_fpush(cpu_env);
5925 gen_helper_fldl2t_ST0(cpu_env);
5926 break;
5927 case 2:
5928 gen_helper_fpush(cpu_env);
5929 gen_helper_fldl2e_ST0(cpu_env);
5930 break;
5931 case 3:
5932 gen_helper_fpush(cpu_env);
5933 gen_helper_fldpi_ST0(cpu_env);
5934 break;
5935 case 4:
5936 gen_helper_fpush(cpu_env);
5937 gen_helper_fldlg2_ST0(cpu_env);
5938 break;
5939 case 5:
5940 gen_helper_fpush(cpu_env);
5941 gen_helper_fldln2_ST0(cpu_env);
5942 break;
5943 case 6:
5944 gen_helper_fpush(cpu_env);
5945 gen_helper_fldz_ST0(cpu_env);
5946 break;
5947 default:
5948 goto illegal_op;
5951 break;
5952 case 0x0e: /* grp d9/6 */
5953 switch(rm) {
5954 case 0: /* f2xm1 */
5955 gen_helper_f2xm1(cpu_env);
5956 break;
5957 case 1: /* fyl2x */
5958 gen_helper_fyl2x(cpu_env);
5959 break;
5960 case 2: /* fptan */
5961 gen_helper_fptan(cpu_env);
5962 break;
5963 case 3: /* fpatan */
5964 gen_helper_fpatan(cpu_env);
5965 break;
5966 case 4: /* fxtract */
5967 gen_helper_fxtract(cpu_env);
5968 break;
5969 case 5: /* fprem1 */
5970 gen_helper_fprem1(cpu_env);
5971 break;
5972 case 6: /* fdecstp */
5973 gen_helper_fdecstp(cpu_env);
5974 break;
5975 default:
5976 case 7: /* fincstp */
5977 gen_helper_fincstp(cpu_env);
5978 break;
5980 break;
5981 case 0x0f: /* grp d9/7 */
5982 switch(rm) {
5983 case 0: /* fprem */
5984 gen_helper_fprem(cpu_env);
5985 break;
5986 case 1: /* fyl2xp1 */
5987 gen_helper_fyl2xp1(cpu_env);
5988 break;
5989 case 2: /* fsqrt */
5990 gen_helper_fsqrt(cpu_env);
5991 break;
5992 case 3: /* fsincos */
5993 gen_helper_fsincos(cpu_env);
5994 break;
5995 case 5: /* fscale */
5996 gen_helper_fscale(cpu_env);
5997 break;
5998 case 4: /* frndint */
5999 gen_helper_frndint(cpu_env);
6000 break;
6001 case 6: /* fsin */
6002 gen_helper_fsin(cpu_env);
6003 break;
6004 default:
6005 case 7: /* fcos */
6006 gen_helper_fcos(cpu_env);
6007 break;
6009 break;
6010 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
6011 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
6012 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
6014 int op1;
6016 op1 = op & 7;
6017 if (op >= 0x20) {
6018 gen_helper_fp_arith_STN_ST0(op1, opreg);
6019 if (op >= 0x30)
6020 gen_helper_fpop(cpu_env);
6021 } else {
6022 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6023 gen_helper_fp_arith_ST0_FT0(op1);
6026 break;
6027 case 0x02: /* fcom */
6028 case 0x22: /* fcom2, undocumented op */
6029 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6030 gen_helper_fcom_ST0_FT0(cpu_env);
6031 break;
6032 case 0x03: /* fcomp */
6033 case 0x23: /* fcomp3, undocumented op */
6034 case 0x32: /* fcomp5, undocumented op */
6035 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6036 gen_helper_fcom_ST0_FT0(cpu_env);
6037 gen_helper_fpop(cpu_env);
6038 break;
6039 case 0x15: /* da/5 */
6040 switch(rm) {
6041 case 1: /* fucompp */
6042 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
6043 gen_helper_fucom_ST0_FT0(cpu_env);
6044 gen_helper_fpop(cpu_env);
6045 gen_helper_fpop(cpu_env);
6046 break;
6047 default:
6048 goto illegal_op;
6050 break;
6051 case 0x1c:
6052 switch(rm) {
6053 case 0: /* feni (287 only, just do nop here) */
6054 break;
6055 case 1: /* fdisi (287 only, just do nop here) */
6056 break;
6057 case 2: /* fclex */
6058 gen_helper_fclex(cpu_env);
6059 break;
6060 case 3: /* fninit */
6061 gen_helper_fninit(cpu_env);
6062 break;
6063 case 4: /* fsetpm (287 only, just do nop here) */
6064 break;
6065 default:
6066 goto illegal_op;
6068 break;
6069 case 0x1d: /* fucomi */
6070 if (!(s->cpuid_features & CPUID_CMOV)) {
6071 goto illegal_op;
6073 gen_update_cc_op(s);
6074 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6075 gen_helper_fucomi_ST0_FT0(cpu_env);
6076 set_cc_op(s, CC_OP_EFLAGS);
6077 break;
6078 case 0x1e: /* fcomi */
6079 if (!(s->cpuid_features & CPUID_CMOV)) {
6080 goto illegal_op;
6082 gen_update_cc_op(s);
6083 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6084 gen_helper_fcomi_ST0_FT0(cpu_env);
6085 set_cc_op(s, CC_OP_EFLAGS);
6086 break;
6087 case 0x28: /* ffree sti */
6088 gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
6089 break;
6090 case 0x2a: /* fst sti */
6091 gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
6092 break;
6093 case 0x2b: /* fstp sti */
6094 case 0x0b: /* fstp1 sti, undocumented op */
6095 case 0x3a: /* fstp8 sti, undocumented op */
6096 case 0x3b: /* fstp9 sti, undocumented op */
6097 gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
6098 gen_helper_fpop(cpu_env);
6099 break;
6100 case 0x2c: /* fucom st(i) */
6101 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6102 gen_helper_fucom_ST0_FT0(cpu_env);
6103 break;
6104 case 0x2d: /* fucomp st(i) */
6105 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6106 gen_helper_fucom_ST0_FT0(cpu_env);
6107 gen_helper_fpop(cpu_env);
6108 break;
6109 case 0x33: /* de/3 */
6110 switch(rm) {
6111 case 1: /* fcompp */
6112 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
6113 gen_helper_fcom_ST0_FT0(cpu_env);
6114 gen_helper_fpop(cpu_env);
6115 gen_helper_fpop(cpu_env);
6116 break;
6117 default:
6118 goto illegal_op;
6120 break;
6121 case 0x38: /* ffreep sti, undocumented op */
6122 gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
6123 gen_helper_fpop(cpu_env);
6124 break;
6125 case 0x3c: /* df/4 */
6126 switch(rm) {
6127 case 0:
6128 gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
6129 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
6130 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T[0]);
6131 break;
6132 default:
6133 goto illegal_op;
6135 break;
6136 case 0x3d: /* fucomip */
6137 if (!(s->cpuid_features & CPUID_CMOV)) {
6138 goto illegal_op;
6140 gen_update_cc_op(s);
6141 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6142 gen_helper_fucomi_ST0_FT0(cpu_env);
6143 gen_helper_fpop(cpu_env);
6144 set_cc_op(s, CC_OP_EFLAGS);
6145 break;
6146 case 0x3e: /* fcomip */
6147 if (!(s->cpuid_features & CPUID_CMOV)) {
6148 goto illegal_op;
6150 gen_update_cc_op(s);
6151 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6152 gen_helper_fcomi_ST0_FT0(cpu_env);
6153 gen_helper_fpop(cpu_env);
6154 set_cc_op(s, CC_OP_EFLAGS);
6155 break;
6156 case 0x10 ... 0x13: /* fcmovxx */
6157 case 0x18 ... 0x1b:
6159 int op1;
6160 TCGLabel *l1;
6161 static const uint8_t fcmov_cc[8] = {
6162 (JCC_B << 1),
6163 (JCC_Z << 1),
6164 (JCC_BE << 1),
6165 (JCC_P << 1),
6168 if (!(s->cpuid_features & CPUID_CMOV)) {
6169 goto illegal_op;
6171 op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
6172 l1 = gen_new_label();
6173 gen_jcc1_noeob(s, op1, l1);
6174 gen_helper_fmov_ST0_STN(cpu_env, tcg_const_i32(opreg));
6175 gen_set_label(l1);
6177 break;
6178 default:
6179 goto illegal_op;
6182 break;
6183 /************************/
6184 /* string ops */
6186 case 0xa4: /* movsS */
6187 case 0xa5:
6188 ot = mo_b_d(b, dflag);
6189 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6190 gen_repz_movs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6191 } else {
6192 gen_movs(s, ot);
6194 break;
6196 case 0xaa: /* stosS */
6197 case 0xab:
6198 ot = mo_b_d(b, dflag);
6199 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6200 gen_repz_stos(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6201 } else {
6202 gen_stos(s, ot);
6204 break;
6205 case 0xac: /* lodsS */
6206 case 0xad:
6207 ot = mo_b_d(b, dflag);
6208 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6209 gen_repz_lods(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6210 } else {
6211 gen_lods(s, ot);
6213 break;
6214 case 0xae: /* scasS */
6215 case 0xaf:
6216 ot = mo_b_d(b, dflag);
6217 if (prefixes & PREFIX_REPNZ) {
6218 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6219 } else if (prefixes & PREFIX_REPZ) {
6220 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6221 } else {
6222 gen_scas(s, ot);
6224 break;
6226 case 0xa6: /* cmpsS */
6227 case 0xa7:
6228 ot = mo_b_d(b, dflag);
6229 if (prefixes & PREFIX_REPNZ) {
6230 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6231 } else if (prefixes & PREFIX_REPZ) {
6232 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6233 } else {
6234 gen_cmps(s, ot);
6236 break;
6237 case 0x6c: /* insS */
6238 case 0x6d:
6239 ot = mo_b_d32(b, dflag);
6240 tcg_gen_ext16u_tl(cpu_T[0], cpu_regs[R_EDX]);
6241 gen_check_io(s, ot, pc_start - s->cs_base,
6242 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4);
6243 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6244 gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6245 } else {
6246 gen_ins(s, ot);
6247 if (s->tb->cflags & CF_USE_ICOUNT) {
6248 gen_jmp(s, s->pc - s->cs_base);
6251 break;
6252 case 0x6e: /* outsS */
6253 case 0x6f:
6254 ot = mo_b_d32(b, dflag);
6255 tcg_gen_ext16u_tl(cpu_T[0], cpu_regs[R_EDX]);
6256 gen_check_io(s, ot, pc_start - s->cs_base,
6257 svm_is_rep(prefixes) | 4);
6258 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6259 gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6260 } else {
6261 gen_outs(s, ot);
6262 if (s->tb->cflags & CF_USE_ICOUNT) {
6263 gen_jmp(s, s->pc - s->cs_base);
6266 break;
6268 /************************/
6269 /* port I/O */
6271 case 0xe4:
6272 case 0xe5:
6273 ot = mo_b_d32(b, dflag);
6274 val = cpu_ldub_code(env, s->pc++);
6275 tcg_gen_movi_tl(cpu_T[0], val);
6276 gen_check_io(s, ot, pc_start - s->cs_base,
6277 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
6278 if (s->tb->cflags & CF_USE_ICOUNT) {
6279 gen_io_start();
6281 tcg_gen_movi_i32(cpu_tmp2_i32, val);
6282 gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
6283 gen_op_mov_reg_v(ot, R_EAX, cpu_T[1]);
6284 gen_bpt_io(s, cpu_tmp2_i32, ot);
6285 if (s->tb->cflags & CF_USE_ICOUNT) {
6286 gen_io_end();
6287 gen_jmp(s, s->pc - s->cs_base);
6289 break;
6290 case 0xe6:
6291 case 0xe7:
6292 ot = mo_b_d32(b, dflag);
6293 val = cpu_ldub_code(env, s->pc++);
6294 tcg_gen_movi_tl(cpu_T[0], val);
6295 gen_check_io(s, ot, pc_start - s->cs_base,
6296 svm_is_rep(prefixes));
6297 gen_op_mov_v_reg(ot, cpu_T[1], R_EAX);
6299 if (s->tb->cflags & CF_USE_ICOUNT) {
6300 gen_io_start();
6302 tcg_gen_movi_i32(cpu_tmp2_i32, val);
6303 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
6304 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
6305 gen_bpt_io(s, cpu_tmp2_i32, ot);
6306 if (s->tb->cflags & CF_USE_ICOUNT) {
6307 gen_io_end();
6308 gen_jmp(s, s->pc - s->cs_base);
6310 break;
6311 case 0xec:
6312 case 0xed:
6313 ot = mo_b_d32(b, dflag);
6314 tcg_gen_ext16u_tl(cpu_T[0], cpu_regs[R_EDX]);
6315 gen_check_io(s, ot, pc_start - s->cs_base,
6316 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
6317 if (s->tb->cflags & CF_USE_ICOUNT) {
6318 gen_io_start();
6320 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6321 gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
6322 gen_op_mov_reg_v(ot, R_EAX, cpu_T[1]);
6323 gen_bpt_io(s, cpu_tmp2_i32, ot);
6324 if (s->tb->cflags & CF_USE_ICOUNT) {
6325 gen_io_end();
6326 gen_jmp(s, s->pc - s->cs_base);
6328 break;
6329 case 0xee:
6330 case 0xef:
6331 ot = mo_b_d32(b, dflag);
6332 tcg_gen_ext16u_tl(cpu_T[0], cpu_regs[R_EDX]);
6333 gen_check_io(s, ot, pc_start - s->cs_base,
6334 svm_is_rep(prefixes));
6335 gen_op_mov_v_reg(ot, cpu_T[1], R_EAX);
6337 if (s->tb->cflags & CF_USE_ICOUNT) {
6338 gen_io_start();
6340 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6341 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
6342 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
6343 gen_bpt_io(s, cpu_tmp2_i32, ot);
6344 if (s->tb->cflags & CF_USE_ICOUNT) {
6345 gen_io_end();
6346 gen_jmp(s, s->pc - s->cs_base);
6348 break;
6350 /************************/
6351 /* control */
6352 case 0xc2: /* ret im */
6353 val = cpu_ldsw_code(env, s->pc);
6354 s->pc += 2;
6355 ot = gen_pop_T0(s);
6356 gen_stack_update(s, val + (1 << ot));
6357 /* Note that gen_pop_T0 uses a zero-extending load. */
6358 gen_op_jmp_v(cpu_T[0]);
6359 gen_eob(s);
6360 break;
6361 case 0xc3: /* ret */
6362 ot = gen_pop_T0(s);
6363 gen_pop_update(s, ot);
6364 /* Note that gen_pop_T0 uses a zero-extending load. */
6365 gen_op_jmp_v(cpu_T[0]);
6366 gen_eob(s);
6367 break;
6368 case 0xca: /* lret im */
6369 val = cpu_ldsw_code(env, s->pc);
6370 s->pc += 2;
6371 do_lret:
6372 if (s->pe && !s->vm86) {
6373 gen_update_cc_op(s);
6374 gen_jmp_im(pc_start - s->cs_base);
6375 gen_helper_lret_protected(cpu_env, tcg_const_i32(dflag - 1),
6376 tcg_const_i32(val));
6377 } else {
6378 gen_stack_A0(s);
6379 /* pop offset */
6380 gen_op_ld_v(s, dflag, cpu_T[0], cpu_A0);
6381 /* NOTE: keeping EIP updated is not a problem in case of
6382 exception */
6383 gen_op_jmp_v(cpu_T[0]);
6384 /* pop selector */
6385 gen_op_addl_A0_im(1 << dflag);
6386 gen_op_ld_v(s, dflag, cpu_T[0], cpu_A0);
6387 gen_op_movl_seg_T0_vm(R_CS);
6388 /* add stack offset */
6389 gen_stack_update(s, val + (2 << dflag));
6391 gen_eob(s);
6392 break;
6393 case 0xcb: /* lret */
6394 val = 0;
6395 goto do_lret;
6396 case 0xcf: /* iret */
6397 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET);
6398 if (!s->pe) {
6399 /* real mode */
6400 gen_helper_iret_real(cpu_env, tcg_const_i32(dflag - 1));
6401 set_cc_op(s, CC_OP_EFLAGS);
6402 } else if (s->vm86) {
6403 if (s->iopl != 3) {
6404 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6405 } else {
6406 gen_helper_iret_real(cpu_env, tcg_const_i32(dflag - 1));
6407 set_cc_op(s, CC_OP_EFLAGS);
6409 } else {
6410 gen_helper_iret_protected(cpu_env, tcg_const_i32(dflag - 1),
6411 tcg_const_i32(s->pc - s->cs_base));
6412 set_cc_op(s, CC_OP_EFLAGS);
6414 gen_eob(s);
6415 break;
6416 case 0xe8: /* call im */
6418 if (dflag != MO_16) {
6419 tval = (int32_t)insn_get(env, s, MO_32);
6420 } else {
6421 tval = (int16_t)insn_get(env, s, MO_16);
6423 next_eip = s->pc - s->cs_base;
6424 tval += next_eip;
6425 if (dflag == MO_16) {
6426 tval &= 0xffff;
6427 } else if (!CODE64(s)) {
6428 tval &= 0xffffffff;
6430 tcg_gen_movi_tl(cpu_T[0], next_eip);
6431 gen_push_v(s, cpu_T[0]);
6432 gen_jmp(s, tval);
6434 break;
6435 case 0x9a: /* lcall im */
6437 unsigned int selector, offset;
6439 if (CODE64(s))
6440 goto illegal_op;
6441 ot = dflag;
6442 offset = insn_get(env, s, ot);
6443 selector = insn_get(env, s, MO_16);
6445 tcg_gen_movi_tl(cpu_T[0], selector);
6446 tcg_gen_movi_tl(cpu_T[1], offset);
6448 goto do_lcall;
6449 case 0xe9: /* jmp im */
6450 if (dflag != MO_16) {
6451 tval = (int32_t)insn_get(env, s, MO_32);
6452 } else {
6453 tval = (int16_t)insn_get(env, s, MO_16);
6455 tval += s->pc - s->cs_base;
6456 if (dflag == MO_16) {
6457 tval &= 0xffff;
6458 } else if (!CODE64(s)) {
6459 tval &= 0xffffffff;
6461 gen_jmp(s, tval);
6462 break;
6463 case 0xea: /* ljmp im */
6465 unsigned int selector, offset;
6467 if (CODE64(s))
6468 goto illegal_op;
6469 ot = dflag;
6470 offset = insn_get(env, s, ot);
6471 selector = insn_get(env, s, MO_16);
6473 tcg_gen_movi_tl(cpu_T[0], selector);
6474 tcg_gen_movi_tl(cpu_T[1], offset);
6476 goto do_ljmp;
6477 case 0xeb: /* jmp Jb */
6478 tval = (int8_t)insn_get(env, s, MO_8);
6479 tval += s->pc - s->cs_base;
6480 if (dflag == MO_16) {
6481 tval &= 0xffff;
6483 gen_jmp(s, tval);
6484 break;
6485 case 0x70 ... 0x7f: /* jcc Jb */
6486 tval = (int8_t)insn_get(env, s, MO_8);
6487 goto do_jcc;
6488 case 0x180 ... 0x18f: /* jcc Jv */
6489 if (dflag != MO_16) {
6490 tval = (int32_t)insn_get(env, s, MO_32);
6491 } else {
6492 tval = (int16_t)insn_get(env, s, MO_16);
6494 do_jcc:
6495 next_eip = s->pc - s->cs_base;
6496 tval += next_eip;
6497 if (dflag == MO_16) {
6498 tval &= 0xffff;
6500 gen_jcc(s, b, tval, next_eip);
6501 break;
6503 case 0x190 ... 0x19f: /* setcc Gv */
6504 modrm = cpu_ldub_code(env, s->pc++);
6505 gen_setcc1(s, b, cpu_T[0]);
6506 gen_ldst_modrm(env, s, modrm, MO_8, OR_TMP0, 1);
6507 break;
6508 case 0x140 ... 0x14f: /* cmov Gv, Ev */
6509 if (!(s->cpuid_features & CPUID_CMOV)) {
6510 goto illegal_op;
6512 ot = dflag;
6513 modrm = cpu_ldub_code(env, s->pc++);
6514 reg = ((modrm >> 3) & 7) | rex_r;
6515 gen_cmovcc1(env, s, ot, b, modrm, reg);
6516 break;
6518 /************************/
6519 /* flags */
6520 case 0x9c: /* pushf */
6521 gen_svm_check_intercept(s, pc_start, SVM_EXIT_PUSHF);
6522 if (s->vm86 && s->iopl != 3) {
6523 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6524 } else {
6525 gen_update_cc_op(s);
6526 gen_helper_read_eflags(cpu_T[0], cpu_env);
6527 gen_push_v(s, cpu_T[0]);
6529 break;
6530 case 0x9d: /* popf */
6531 gen_svm_check_intercept(s, pc_start, SVM_EXIT_POPF);
6532 if (s->vm86 && s->iopl != 3) {
6533 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6534 } else {
6535 ot = gen_pop_T0(s);
6536 if (s->cpl == 0) {
6537 if (dflag != MO_16) {
6538 gen_helper_write_eflags(cpu_env, cpu_T[0],
6539 tcg_const_i32((TF_MASK | AC_MASK |
6540 ID_MASK | NT_MASK |
6541 IF_MASK |
6542 IOPL_MASK)));
6543 } else {
6544 gen_helper_write_eflags(cpu_env, cpu_T[0],
6545 tcg_const_i32((TF_MASK | AC_MASK |
6546 ID_MASK | NT_MASK |
6547 IF_MASK | IOPL_MASK)
6548 & 0xffff));
6550 } else {
6551 if (s->cpl <= s->iopl) {
6552 if (dflag != MO_16) {
6553 gen_helper_write_eflags(cpu_env, cpu_T[0],
6554 tcg_const_i32((TF_MASK |
6555 AC_MASK |
6556 ID_MASK |
6557 NT_MASK |
6558 IF_MASK)));
6559 } else {
6560 gen_helper_write_eflags(cpu_env, cpu_T[0],
6561 tcg_const_i32((TF_MASK |
6562 AC_MASK |
6563 ID_MASK |
6564 NT_MASK |
6565 IF_MASK)
6566 & 0xffff));
6568 } else {
6569 if (dflag != MO_16) {
6570 gen_helper_write_eflags(cpu_env, cpu_T[0],
6571 tcg_const_i32((TF_MASK | AC_MASK |
6572 ID_MASK | NT_MASK)));
6573 } else {
6574 gen_helper_write_eflags(cpu_env, cpu_T[0],
6575 tcg_const_i32((TF_MASK | AC_MASK |
6576 ID_MASK | NT_MASK)
6577 & 0xffff));
6581 gen_pop_update(s, ot);
6582 set_cc_op(s, CC_OP_EFLAGS);
6583 /* abort translation because TF/AC flag may change */
6584 gen_jmp_im(s->pc - s->cs_base);
6585 gen_eob(s);
6587 break;
6588 case 0x9e: /* sahf */
6589 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
6590 goto illegal_op;
6591 gen_op_mov_v_reg(MO_8, cpu_T[0], R_AH);
6592 gen_compute_eflags(s);
6593 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O);
6594 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], CC_S | CC_Z | CC_A | CC_P | CC_C);
6595 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T[0]);
6596 break;
6597 case 0x9f: /* lahf */
6598 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
6599 goto illegal_op;
6600 gen_compute_eflags(s);
6601 /* Note: gen_compute_eflags() only gives the condition codes */
6602 tcg_gen_ori_tl(cpu_T[0], cpu_cc_src, 0x02);
6603 gen_op_mov_reg_v(MO_8, R_AH, cpu_T[0]);
6604 break;
6605 case 0xf5: /* cmc */
6606 gen_compute_eflags(s);
6607 tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C);
6608 break;
6609 case 0xf8: /* clc */
6610 gen_compute_eflags(s);
6611 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C);
6612 break;
6613 case 0xf9: /* stc */
6614 gen_compute_eflags(s);
6615 tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C);
6616 break;
6617 case 0xfc: /* cld */
6618 tcg_gen_movi_i32(cpu_tmp2_i32, 1);
6619 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
6620 break;
6621 case 0xfd: /* std */
6622 tcg_gen_movi_i32(cpu_tmp2_i32, -1);
6623 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
6624 break;
6626 /************************/
6627 /* bit operations */
6628 case 0x1ba: /* bt/bts/btr/btc Gv, im */
6629 ot = dflag;
6630 modrm = cpu_ldub_code(env, s->pc++);
6631 op = (modrm >> 3) & 7;
6632 mod = (modrm >> 6) & 3;
6633 rm = (modrm & 7) | REX_B(s);
6634 if (mod != 3) {
6635 s->rip_offset = 1;
6636 gen_lea_modrm(env, s, modrm);
6637 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
6638 } else {
6639 gen_op_mov_v_reg(ot, cpu_T[0], rm);
6641 /* load shift */
6642 val = cpu_ldub_code(env, s->pc++);
6643 tcg_gen_movi_tl(cpu_T[1], val);
6644 if (op < 4)
6645 goto illegal_op;
6646 op -= 4;
6647 goto bt_op;
6648 case 0x1a3: /* bt Gv, Ev */
6649 op = 0;
6650 goto do_btx;
6651 case 0x1ab: /* bts */
6652 op = 1;
6653 goto do_btx;
6654 case 0x1b3: /* btr */
6655 op = 2;
6656 goto do_btx;
6657 case 0x1bb: /* btc */
6658 op = 3;
6659 do_btx:
6660 ot = dflag;
6661 modrm = cpu_ldub_code(env, s->pc++);
6662 reg = ((modrm >> 3) & 7) | rex_r;
6663 mod = (modrm >> 6) & 3;
6664 rm = (modrm & 7) | REX_B(s);
6665 gen_op_mov_v_reg(MO_32, cpu_T[1], reg);
6666 if (mod != 3) {
6667 gen_lea_modrm(env, s, modrm);
6668 /* specific case: we need to add a displacement */
6669 gen_exts(ot, cpu_T[1]);
6670 tcg_gen_sari_tl(cpu_tmp0, cpu_T[1], 3 + ot);
6671 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, ot);
6672 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
6673 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
6674 } else {
6675 gen_op_mov_v_reg(ot, cpu_T[0], rm);
6677 bt_op:
6678 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], (1 << (3 + ot)) - 1);
6679 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6680 switch(op) {
6681 case 0:
6682 break;
6683 case 1:
6684 tcg_gen_movi_tl(cpu_tmp0, 1);
6685 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6686 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6687 break;
6688 case 2:
6689 tcg_gen_movi_tl(cpu_tmp0, 1);
6690 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6691 tcg_gen_andc_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6692 break;
6693 default:
6694 case 3:
6695 tcg_gen_movi_tl(cpu_tmp0, 1);
6696 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6697 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6698 break;
6700 if (op != 0) {
6701 if (mod != 3) {
6702 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
6703 } else {
6704 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
6708 /* Delay all CC updates until after the store above. Note that
6709 C is the result of the test, Z is unchanged, and the others
6710 are all undefined. */
6711 switch (s->cc_op) {
6712 case CC_OP_MULB ... CC_OP_MULQ:
6713 case CC_OP_ADDB ... CC_OP_ADDQ:
6714 case CC_OP_ADCB ... CC_OP_ADCQ:
6715 case CC_OP_SUBB ... CC_OP_SUBQ:
6716 case CC_OP_SBBB ... CC_OP_SBBQ:
6717 case CC_OP_LOGICB ... CC_OP_LOGICQ:
6718 case CC_OP_INCB ... CC_OP_INCQ:
6719 case CC_OP_DECB ... CC_OP_DECQ:
6720 case CC_OP_SHLB ... CC_OP_SHLQ:
6721 case CC_OP_SARB ... CC_OP_SARQ:
6722 case CC_OP_BMILGB ... CC_OP_BMILGQ:
6723 /* Z was going to be computed from the non-zero status of CC_DST.
6724 We can get that same Z value (and the new C value) by leaving
6725 CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the
6726 same width. */
6727 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
6728 set_cc_op(s, ((s->cc_op - CC_OP_MULB) & 3) + CC_OP_SARB);
6729 break;
6730 default:
6731 /* Otherwise, generate EFLAGS and replace the C bit. */
6732 gen_compute_eflags(s);
6733 tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, cpu_tmp4,
6734 ctz32(CC_C), 1);
6735 break;
6737 break;
6738 case 0x1bc: /* bsf / tzcnt */
6739 case 0x1bd: /* bsr / lzcnt */
6740 ot = dflag;
6741 modrm = cpu_ldub_code(env, s->pc++);
6742 reg = ((modrm >> 3) & 7) | rex_r;
6743 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
6744 gen_extu(ot, cpu_T[0]);
6746 /* Note that lzcnt and tzcnt are in different extensions. */
6747 if ((prefixes & PREFIX_REPZ)
6748 && (b & 1
6749 ? s->cpuid_ext3_features & CPUID_EXT3_ABM
6750 : s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)) {
6751 int size = 8 << ot;
6752 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
6753 if (b & 1) {
6754 /* For lzcnt, reduce the target_ulong result by the
6755 number of zeros that we expect to find at the top. */
6756 gen_helper_clz(cpu_T[0], cpu_T[0]);
6757 tcg_gen_subi_tl(cpu_T[0], cpu_T[0], TARGET_LONG_BITS - size);
6758 } else {
6759 /* For tzcnt, a zero input must return the operand size:
6760 force all bits outside the operand size to 1. */
6761 target_ulong mask = (target_ulong)-2 << (size - 1);
6762 tcg_gen_ori_tl(cpu_T[0], cpu_T[0], mask);
6763 gen_helper_ctz(cpu_T[0], cpu_T[0]);
6765 /* For lzcnt/tzcnt, C and Z bits are defined and are
6766 related to the result. */
6767 gen_op_update1_cc();
6768 set_cc_op(s, CC_OP_BMILGB + ot);
6769 } else {
6770 /* For bsr/bsf, only the Z bit is defined and it is related
6771 to the input and not the result. */
6772 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
6773 set_cc_op(s, CC_OP_LOGICB + ot);
6774 if (b & 1) {
6775 /* For bsr, return the bit index of the first 1 bit,
6776 not the count of leading zeros. */
6777 gen_helper_clz(cpu_T[0], cpu_T[0]);
6778 tcg_gen_xori_tl(cpu_T[0], cpu_T[0], TARGET_LONG_BITS - 1);
6779 } else {
6780 gen_helper_ctz(cpu_T[0], cpu_T[0]);
6782 /* ??? The manual says that the output is undefined when the
6783 input is zero, but real hardware leaves it unchanged, and
6784 real programs appear to depend on that. */
6785 tcg_gen_movi_tl(cpu_tmp0, 0);
6786 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_T[0], cpu_cc_dst, cpu_tmp0,
6787 cpu_regs[reg], cpu_T[0]);
6789 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
6790 break;
6791 /************************/
6792 /* bcd */
6793 case 0x27: /* daa */
6794 if (CODE64(s))
6795 goto illegal_op;
6796 gen_update_cc_op(s);
6797 gen_helper_daa(cpu_env);
6798 set_cc_op(s, CC_OP_EFLAGS);
6799 break;
6800 case 0x2f: /* das */
6801 if (CODE64(s))
6802 goto illegal_op;
6803 gen_update_cc_op(s);
6804 gen_helper_das(cpu_env);
6805 set_cc_op(s, CC_OP_EFLAGS);
6806 break;
6807 case 0x37: /* aaa */
6808 if (CODE64(s))
6809 goto illegal_op;
6810 gen_update_cc_op(s);
6811 gen_helper_aaa(cpu_env);
6812 set_cc_op(s, CC_OP_EFLAGS);
6813 break;
6814 case 0x3f: /* aas */
6815 if (CODE64(s))
6816 goto illegal_op;
6817 gen_update_cc_op(s);
6818 gen_helper_aas(cpu_env);
6819 set_cc_op(s, CC_OP_EFLAGS);
6820 break;
6821 case 0xd4: /* aam */
6822 if (CODE64(s))
6823 goto illegal_op;
6824 val = cpu_ldub_code(env, s->pc++);
6825 if (val == 0) {
6826 gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base);
6827 } else {
6828 gen_helper_aam(cpu_env, tcg_const_i32(val));
6829 set_cc_op(s, CC_OP_LOGICB);
6831 break;
6832 case 0xd5: /* aad */
6833 if (CODE64(s))
6834 goto illegal_op;
6835 val = cpu_ldub_code(env, s->pc++);
6836 gen_helper_aad(cpu_env, tcg_const_i32(val));
6837 set_cc_op(s, CC_OP_LOGICB);
6838 break;
6839 /************************/
6840 /* misc */
6841 case 0x90: /* nop */
6842 /* XXX: correct lock test for all insn */
6843 if (prefixes & PREFIX_LOCK) {
6844 goto illegal_op;
6846 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
6847 if (REX_B(s)) {
6848 goto do_xchg_reg_eax;
6850 if (prefixes & PREFIX_REPZ) {
6851 gen_update_cc_op(s);
6852 gen_jmp_im(pc_start - s->cs_base);
6853 gen_helper_pause(cpu_env, tcg_const_i32(s->pc - pc_start));
6854 s->is_jmp = DISAS_TB_JUMP;
6856 break;
6857 case 0x9b: /* fwait */
6858 if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) ==
6859 (HF_MP_MASK | HF_TS_MASK)) {
6860 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
6861 } else {
6862 gen_helper_fwait(cpu_env);
6864 break;
6865 case 0xcc: /* int3 */
6866 gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base);
6867 break;
6868 case 0xcd: /* int N */
6869 val = cpu_ldub_code(env, s->pc++);
6870 if (s->vm86 && s->iopl != 3) {
6871 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6872 } else {
6873 gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base);
6875 break;
6876 case 0xce: /* into */
6877 if (CODE64(s))
6878 goto illegal_op;
6879 gen_update_cc_op(s);
6880 gen_jmp_im(pc_start - s->cs_base);
6881 gen_helper_into(cpu_env, tcg_const_i32(s->pc - pc_start));
6882 break;
6883 #ifdef WANT_ICEBP
6884 case 0xf1: /* icebp (undocumented, exits to external debugger) */
6885 gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP);
6886 #if 1
6887 gen_debug(s, pc_start - s->cs_base);
6888 #else
6889 /* start debug */
6890 tb_flush(CPU(x86_env_get_cpu(env)));
6891 qemu_set_log(CPU_LOG_INT | CPU_LOG_TB_IN_ASM);
6892 #endif
6893 break;
6894 #endif
6895 case 0xfa: /* cli */
6896 if (!s->vm86) {
6897 if (s->cpl <= s->iopl) {
6898 gen_helper_cli(cpu_env);
6899 } else {
6900 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6902 } else {
6903 if (s->iopl == 3) {
6904 gen_helper_cli(cpu_env);
6905 } else {
6906 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6909 break;
6910 case 0xfb: /* sti */
6911 if (!s->vm86) {
6912 if (s->cpl <= s->iopl) {
6913 gen_sti:
6914 gen_helper_sti(cpu_env);
6915 /* interruptions are enabled only the first insn after sti */
6916 /* If several instructions disable interrupts, only the
6917 _first_ does it */
6918 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
6919 gen_helper_set_inhibit_irq(cpu_env);
6920 /* give a chance to handle pending irqs */
6921 gen_jmp_im(s->pc - s->cs_base);
6922 gen_eob(s);
6923 } else {
6924 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6926 } else {
6927 if (s->iopl == 3) {
6928 goto gen_sti;
6929 } else {
6930 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6933 break;
6934 case 0x62: /* bound */
6935 if (CODE64(s))
6936 goto illegal_op;
6937 ot = dflag;
6938 modrm = cpu_ldub_code(env, s->pc++);
6939 reg = (modrm >> 3) & 7;
6940 mod = (modrm >> 6) & 3;
6941 if (mod == 3)
6942 goto illegal_op;
6943 gen_op_mov_v_reg(ot, cpu_T[0], reg);
6944 gen_lea_modrm(env, s, modrm);
6945 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6946 if (ot == MO_16) {
6947 gen_helper_boundw(cpu_env, cpu_A0, cpu_tmp2_i32);
6948 } else {
6949 gen_helper_boundl(cpu_env, cpu_A0, cpu_tmp2_i32);
6951 break;
6952 case 0x1c8 ... 0x1cf: /* bswap reg */
6953 reg = (b & 7) | REX_B(s);
6954 #ifdef TARGET_X86_64
6955 if (dflag == MO_64) {
6956 gen_op_mov_v_reg(MO_64, cpu_T[0], reg);
6957 tcg_gen_bswap64_i64(cpu_T[0], cpu_T[0]);
6958 gen_op_mov_reg_v(MO_64, reg, cpu_T[0]);
6959 } else
6960 #endif
6962 gen_op_mov_v_reg(MO_32, cpu_T[0], reg);
6963 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
6964 tcg_gen_bswap32_tl(cpu_T[0], cpu_T[0]);
6965 gen_op_mov_reg_v(MO_32, reg, cpu_T[0]);
6967 break;
6968 case 0xd6: /* salc */
6969 if (CODE64(s))
6970 goto illegal_op;
6971 gen_compute_eflags_c(s, cpu_T[0]);
6972 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
6973 gen_op_mov_reg_v(MO_8, R_EAX, cpu_T[0]);
6974 break;
6975 case 0xe0: /* loopnz */
6976 case 0xe1: /* loopz */
6977 case 0xe2: /* loop */
6978 case 0xe3: /* jecxz */
6980 TCGLabel *l1, *l2, *l3;
6982 tval = (int8_t)insn_get(env, s, MO_8);
6983 next_eip = s->pc - s->cs_base;
6984 tval += next_eip;
6985 if (dflag == MO_16) {
6986 tval &= 0xffff;
6989 l1 = gen_new_label();
6990 l2 = gen_new_label();
6991 l3 = gen_new_label();
6992 b &= 3;
6993 switch(b) {
6994 case 0: /* loopnz */
6995 case 1: /* loopz */
6996 gen_op_add_reg_im(s->aflag, R_ECX, -1);
6997 gen_op_jz_ecx(s->aflag, l3);
6998 gen_jcc1(s, (JCC_Z << 1) | (b ^ 1), l1);
6999 break;
7000 case 2: /* loop */
7001 gen_op_add_reg_im(s->aflag, R_ECX, -1);
7002 gen_op_jnz_ecx(s->aflag, l1);
7003 break;
7004 default:
7005 case 3: /* jcxz */
7006 gen_op_jz_ecx(s->aflag, l1);
7007 break;
7010 gen_set_label(l3);
7011 gen_jmp_im(next_eip);
7012 tcg_gen_br(l2);
7014 gen_set_label(l1);
7015 gen_jmp_im(tval);
7016 gen_set_label(l2);
7017 gen_eob(s);
7019 break;
7020 case 0x130: /* wrmsr */
7021 case 0x132: /* rdmsr */
7022 if (s->cpl != 0) {
7023 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7024 } else {
7025 gen_update_cc_op(s);
7026 gen_jmp_im(pc_start - s->cs_base);
7027 if (b & 2) {
7028 gen_helper_rdmsr(cpu_env);
7029 } else {
7030 gen_helper_wrmsr(cpu_env);
7033 break;
7034 case 0x131: /* rdtsc */
7035 gen_update_cc_op(s);
7036 gen_jmp_im(pc_start - s->cs_base);
7037 if (s->tb->cflags & CF_USE_ICOUNT) {
7038 gen_io_start();
7040 gen_helper_rdtsc(cpu_env);
7041 if (s->tb->cflags & CF_USE_ICOUNT) {
7042 gen_io_end();
7043 gen_jmp(s, s->pc - s->cs_base);
7045 break;
7046 case 0x133: /* rdpmc */
7047 gen_update_cc_op(s);
7048 gen_jmp_im(pc_start - s->cs_base);
7049 gen_helper_rdpmc(cpu_env);
7050 break;
7051 case 0x134: /* sysenter */
7052 /* For Intel SYSENTER is valid on 64-bit */
7053 if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
7054 goto illegal_op;
7055 if (!s->pe) {
7056 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7057 } else {
7058 gen_helper_sysenter(cpu_env);
7059 gen_eob(s);
7061 break;
7062 case 0x135: /* sysexit */
7063 /* For Intel SYSEXIT is valid on 64-bit */
7064 if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
7065 goto illegal_op;
7066 if (!s->pe) {
7067 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7068 } else {
7069 gen_helper_sysexit(cpu_env, tcg_const_i32(dflag - 1));
7070 gen_eob(s);
7072 break;
7073 #ifdef TARGET_X86_64
7074 case 0x105: /* syscall */
7075 /* XXX: is it usable in real mode ? */
7076 gen_update_cc_op(s);
7077 gen_jmp_im(pc_start - s->cs_base);
7078 gen_helper_syscall(cpu_env, tcg_const_i32(s->pc - pc_start));
7079 gen_eob(s);
7080 break;
7081 case 0x107: /* sysret */
7082 if (!s->pe) {
7083 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7084 } else {
7085 gen_helper_sysret(cpu_env, tcg_const_i32(dflag - 1));
7086 /* condition codes are modified only in long mode */
7087 if (s->lma) {
7088 set_cc_op(s, CC_OP_EFLAGS);
7090 gen_eob(s);
7092 break;
7093 #endif
7094 case 0x1a2: /* cpuid */
7095 gen_update_cc_op(s);
7096 gen_jmp_im(pc_start - s->cs_base);
7097 gen_helper_cpuid(cpu_env);
7098 break;
7099 case 0xf4: /* hlt */
7100 if (s->cpl != 0) {
7101 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7102 } else {
7103 gen_update_cc_op(s);
7104 gen_jmp_im(pc_start - s->cs_base);
7105 gen_helper_hlt(cpu_env, tcg_const_i32(s->pc - pc_start));
7106 s->is_jmp = DISAS_TB_JUMP;
7108 break;
7109 case 0x100:
7110 modrm = cpu_ldub_code(env, s->pc++);
7111 mod = (modrm >> 6) & 3;
7112 op = (modrm >> 3) & 7;
7113 switch(op) {
7114 case 0: /* sldt */
7115 if (!s->pe || s->vm86)
7116 goto illegal_op;
7117 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ);
7118 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,ldt.selector));
7119 ot = mod == 3 ? dflag : MO_16;
7120 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
7121 break;
7122 case 2: /* lldt */
7123 if (!s->pe || s->vm86)
7124 goto illegal_op;
7125 if (s->cpl != 0) {
7126 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7127 } else {
7128 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE);
7129 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
7130 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
7131 gen_helper_lldt(cpu_env, cpu_tmp2_i32);
7133 break;
7134 case 1: /* str */
7135 if (!s->pe || s->vm86)
7136 goto illegal_op;
7137 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ);
7138 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,tr.selector));
7139 ot = mod == 3 ? dflag : MO_16;
7140 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
7141 break;
7142 case 3: /* ltr */
7143 if (!s->pe || s->vm86)
7144 goto illegal_op;
7145 if (s->cpl != 0) {
7146 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7147 } else {
7148 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE);
7149 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
7150 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
7151 gen_helper_ltr(cpu_env, cpu_tmp2_i32);
7153 break;
7154 case 4: /* verr */
7155 case 5: /* verw */
7156 if (!s->pe || s->vm86)
7157 goto illegal_op;
7158 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
7159 gen_update_cc_op(s);
7160 if (op == 4) {
7161 gen_helper_verr(cpu_env, cpu_T[0]);
7162 } else {
7163 gen_helper_verw(cpu_env, cpu_T[0]);
7165 set_cc_op(s, CC_OP_EFLAGS);
7166 break;
7167 default:
7168 goto illegal_op;
7170 break;
7171 case 0x101:
7172 modrm = cpu_ldub_code(env, s->pc++);
7173 mod = (modrm >> 6) & 3;
7174 op = (modrm >> 3) & 7;
7175 rm = modrm & 7;
7176 switch(op) {
7177 case 0: /* sgdt */
7178 if (mod == 3)
7179 goto illegal_op;
7180 gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ);
7181 gen_lea_modrm(env, s, modrm);
7182 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.limit));
7183 gen_op_st_v(s, MO_16, cpu_T[0], cpu_A0);
7184 gen_add_A0_im(s, 2);
7185 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.base));
7186 if (dflag == MO_16) {
7187 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffffff);
7189 gen_op_st_v(s, CODE64(s) + MO_32, cpu_T[0], cpu_A0);
7190 break;
7191 case 1:
7192 if (mod == 3) {
7193 switch (rm) {
7194 case 0: /* monitor */
7195 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
7196 s->cpl != 0)
7197 goto illegal_op;
7198 gen_update_cc_op(s);
7199 gen_jmp_im(pc_start - s->cs_base);
7200 tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EAX]);
7201 gen_extu(s->aflag, cpu_A0);
7202 gen_add_A0_ds_seg(s);
7203 gen_helper_monitor(cpu_env, cpu_A0);
7204 break;
7205 case 1: /* mwait */
7206 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
7207 s->cpl != 0)
7208 goto illegal_op;
7209 gen_update_cc_op(s);
7210 gen_jmp_im(pc_start - s->cs_base);
7211 gen_helper_mwait(cpu_env, tcg_const_i32(s->pc - pc_start));
7212 gen_eob(s);
7213 break;
7214 case 2: /* clac */
7215 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) ||
7216 s->cpl != 0) {
7217 goto illegal_op;
7219 gen_helper_clac(cpu_env);
7220 gen_jmp_im(s->pc - s->cs_base);
7221 gen_eob(s);
7222 break;
7223 case 3: /* stac */
7224 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) ||
7225 s->cpl != 0) {
7226 goto illegal_op;
7228 gen_helper_stac(cpu_env);
7229 gen_jmp_im(s->pc - s->cs_base);
7230 gen_eob(s);
7231 break;
7232 default:
7233 goto illegal_op;
7235 } else { /* sidt */
7236 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ);
7237 gen_lea_modrm(env, s, modrm);
7238 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.limit));
7239 gen_op_st_v(s, MO_16, cpu_T[0], cpu_A0);
7240 gen_add_A0_im(s, 2);
7241 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.base));
7242 if (dflag == MO_16) {
7243 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffffff);
7245 gen_op_st_v(s, CODE64(s) + MO_32, cpu_T[0], cpu_A0);
7247 break;
7248 case 2: /* lgdt */
7249 case 3: /* lidt */
7250 if (mod == 3) {
7251 gen_update_cc_op(s);
7252 gen_jmp_im(pc_start - s->cs_base);
7253 switch(rm) {
7254 case 0: /* VMRUN */
7255 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7256 goto illegal_op;
7257 if (s->cpl != 0) {
7258 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7259 break;
7260 } else {
7261 gen_helper_vmrun(cpu_env, tcg_const_i32(s->aflag - 1),
7262 tcg_const_i32(s->pc - pc_start));
7263 tcg_gen_exit_tb(0);
7264 s->is_jmp = DISAS_TB_JUMP;
7266 break;
7267 case 1: /* VMMCALL */
7268 if (!(s->flags & HF_SVME_MASK))
7269 goto illegal_op;
7270 gen_helper_vmmcall(cpu_env);
7271 break;
7272 case 2: /* VMLOAD */
7273 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7274 goto illegal_op;
7275 if (s->cpl != 0) {
7276 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7277 break;
7278 } else {
7279 gen_helper_vmload(cpu_env, tcg_const_i32(s->aflag - 1));
7281 break;
7282 case 3: /* VMSAVE */
7283 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7284 goto illegal_op;
7285 if (s->cpl != 0) {
7286 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7287 break;
7288 } else {
7289 gen_helper_vmsave(cpu_env, tcg_const_i32(s->aflag - 1));
7291 break;
7292 case 4: /* STGI */
7293 if ((!(s->flags & HF_SVME_MASK) &&
7294 !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
7295 !s->pe)
7296 goto illegal_op;
7297 if (s->cpl != 0) {
7298 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7299 break;
7300 } else {
7301 gen_helper_stgi(cpu_env);
7303 break;
7304 case 5: /* CLGI */
7305 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7306 goto illegal_op;
7307 if (s->cpl != 0) {
7308 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7309 break;
7310 } else {
7311 gen_helper_clgi(cpu_env);
7313 break;
7314 case 6: /* SKINIT */
7315 if ((!(s->flags & HF_SVME_MASK) &&
7316 !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
7317 !s->pe)
7318 goto illegal_op;
7319 gen_helper_skinit(cpu_env);
7320 break;
7321 case 7: /* INVLPGA */
7322 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7323 goto illegal_op;
7324 if (s->cpl != 0) {
7325 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7326 break;
7327 } else {
7328 gen_helper_invlpga(cpu_env,
7329 tcg_const_i32(s->aflag - 1));
7331 break;
7332 default:
7333 goto illegal_op;
7335 } else if (s->cpl != 0) {
7336 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7337 } else {
7338 gen_svm_check_intercept(s, pc_start,
7339 op==2 ? SVM_EXIT_GDTR_WRITE : SVM_EXIT_IDTR_WRITE);
7340 gen_lea_modrm(env, s, modrm);
7341 gen_op_ld_v(s, MO_16, cpu_T[1], cpu_A0);
7342 gen_add_A0_im(s, 2);
7343 gen_op_ld_v(s, CODE64(s) + MO_32, cpu_T[0], cpu_A0);
7344 if (dflag == MO_16) {
7345 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffffff);
7347 if (op == 2) {
7348 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,gdt.base));
7349 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,gdt.limit));
7350 } else {
7351 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,idt.base));
7352 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,idt.limit));
7355 break;
7356 case 4: /* smsw */
7357 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0);
7358 #if defined TARGET_X86_64 && defined HOST_WORDS_BIGENDIAN
7359 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]) + 4);
7360 #else
7361 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]));
7362 #endif
7363 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 1);
7364 break;
7365 case 6: /* lmsw */
7366 if (s->cpl != 0) {
7367 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7368 } else {
7369 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
7370 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
7371 gen_helper_lmsw(cpu_env, cpu_T[0]);
7372 gen_jmp_im(s->pc - s->cs_base);
7373 gen_eob(s);
7375 break;
7376 case 7:
7377 if (mod != 3) { /* invlpg */
7378 if (s->cpl != 0) {
7379 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7380 } else {
7381 gen_update_cc_op(s);
7382 gen_jmp_im(pc_start - s->cs_base);
7383 gen_lea_modrm(env, s, modrm);
7384 gen_helper_invlpg(cpu_env, cpu_A0);
7385 gen_jmp_im(s->pc - s->cs_base);
7386 gen_eob(s);
7388 } else {
7389 switch (rm) {
7390 case 0: /* swapgs */
7391 #ifdef TARGET_X86_64
7392 if (CODE64(s)) {
7393 if (s->cpl != 0) {
7394 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7395 } else {
7396 tcg_gen_ld_tl(cpu_T[0], cpu_env,
7397 offsetof(CPUX86State,segs[R_GS].base));
7398 tcg_gen_ld_tl(cpu_T[1], cpu_env,
7399 offsetof(CPUX86State,kernelgsbase));
7400 tcg_gen_st_tl(cpu_T[1], cpu_env,
7401 offsetof(CPUX86State,segs[R_GS].base));
7402 tcg_gen_st_tl(cpu_T[0], cpu_env,
7403 offsetof(CPUX86State,kernelgsbase));
7405 } else
7406 #endif
7408 goto illegal_op;
7410 break;
7411 case 1: /* rdtscp */
7412 if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP))
7413 goto illegal_op;
7414 gen_update_cc_op(s);
7415 gen_jmp_im(pc_start - s->cs_base);
7416 if (s->tb->cflags & CF_USE_ICOUNT) {
7417 gen_io_start();
7419 gen_helper_rdtscp(cpu_env);
7420 if (s->tb->cflags & CF_USE_ICOUNT) {
7421 gen_io_end();
7422 gen_jmp(s, s->pc - s->cs_base);
7424 break;
7425 default:
7426 goto illegal_op;
7429 break;
7430 default:
7431 goto illegal_op;
7433 break;
7434 case 0x108: /* invd */
7435 case 0x109: /* wbinvd */
7436 if (s->cpl != 0) {
7437 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7438 } else {
7439 gen_svm_check_intercept(s, pc_start, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD);
7440 /* nothing to do */
7442 break;
7443 case 0x63: /* arpl or movslS (x86_64) */
7444 #ifdef TARGET_X86_64
7445 if (CODE64(s)) {
7446 int d_ot;
7447 /* d_ot is the size of destination */
7448 d_ot = dflag;
7450 modrm = cpu_ldub_code(env, s->pc++);
7451 reg = ((modrm >> 3) & 7) | rex_r;
7452 mod = (modrm >> 6) & 3;
7453 rm = (modrm & 7) | REX_B(s);
7455 if (mod == 3) {
7456 gen_op_mov_v_reg(MO_32, cpu_T[0], rm);
7457 /* sign extend */
7458 if (d_ot == MO_64) {
7459 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
7461 gen_op_mov_reg_v(d_ot, reg, cpu_T[0]);
7462 } else {
7463 gen_lea_modrm(env, s, modrm);
7464 gen_op_ld_v(s, MO_32 | MO_SIGN, cpu_T[0], cpu_A0);
7465 gen_op_mov_reg_v(d_ot, reg, cpu_T[0]);
7467 } else
7468 #endif
7470 TCGLabel *label1;
7471 TCGv t0, t1, t2, a0;
7473 if (!s->pe || s->vm86)
7474 goto illegal_op;
7475 t0 = tcg_temp_local_new();
7476 t1 = tcg_temp_local_new();
7477 t2 = tcg_temp_local_new();
7478 ot = MO_16;
7479 modrm = cpu_ldub_code(env, s->pc++);
7480 reg = (modrm >> 3) & 7;
7481 mod = (modrm >> 6) & 3;
7482 rm = modrm & 7;
7483 if (mod != 3) {
7484 gen_lea_modrm(env, s, modrm);
7485 gen_op_ld_v(s, ot, t0, cpu_A0);
7486 a0 = tcg_temp_local_new();
7487 tcg_gen_mov_tl(a0, cpu_A0);
7488 } else {
7489 gen_op_mov_v_reg(ot, t0, rm);
7490 TCGV_UNUSED(a0);
7492 gen_op_mov_v_reg(ot, t1, reg);
7493 tcg_gen_andi_tl(cpu_tmp0, t0, 3);
7494 tcg_gen_andi_tl(t1, t1, 3);
7495 tcg_gen_movi_tl(t2, 0);
7496 label1 = gen_new_label();
7497 tcg_gen_brcond_tl(TCG_COND_GE, cpu_tmp0, t1, label1);
7498 tcg_gen_andi_tl(t0, t0, ~3);
7499 tcg_gen_or_tl(t0, t0, t1);
7500 tcg_gen_movi_tl(t2, CC_Z);
7501 gen_set_label(label1);
7502 if (mod != 3) {
7503 gen_op_st_v(s, ot, t0, a0);
7504 tcg_temp_free(a0);
7505 } else {
7506 gen_op_mov_reg_v(ot, rm, t0);
7508 gen_compute_eflags(s);
7509 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z);
7510 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2);
7511 tcg_temp_free(t0);
7512 tcg_temp_free(t1);
7513 tcg_temp_free(t2);
7515 break;
7516 case 0x102: /* lar */
7517 case 0x103: /* lsl */
7519 TCGLabel *label1;
7520 TCGv t0;
7521 if (!s->pe || s->vm86)
7522 goto illegal_op;
7523 ot = dflag != MO_16 ? MO_32 : MO_16;
7524 modrm = cpu_ldub_code(env, s->pc++);
7525 reg = ((modrm >> 3) & 7) | rex_r;
7526 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
7527 t0 = tcg_temp_local_new();
7528 gen_update_cc_op(s);
7529 if (b == 0x102) {
7530 gen_helper_lar(t0, cpu_env, cpu_T[0]);
7531 } else {
7532 gen_helper_lsl(t0, cpu_env, cpu_T[0]);
7534 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_src, CC_Z);
7535 label1 = gen_new_label();
7536 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
7537 gen_op_mov_reg_v(ot, reg, t0);
7538 gen_set_label(label1);
7539 set_cc_op(s, CC_OP_EFLAGS);
7540 tcg_temp_free(t0);
7542 break;
7543 case 0x118:
7544 modrm = cpu_ldub_code(env, s->pc++);
7545 mod = (modrm >> 6) & 3;
7546 op = (modrm >> 3) & 7;
7547 switch(op) {
7548 case 0: /* prefetchnta */
7549 case 1: /* prefetchnt0 */
7550 case 2: /* prefetchnt0 */
7551 case 3: /* prefetchnt0 */
7552 if (mod == 3)
7553 goto illegal_op;
7554 gen_lea_modrm(env, s, modrm);
7555 /* nothing more to do */
7556 break;
7557 default: /* nop (multi byte) */
7558 gen_nop_modrm(env, s, modrm);
7559 break;
7561 break;
7562 case 0x119 ... 0x11f: /* nop (multi byte) */
7563 modrm = cpu_ldub_code(env, s->pc++);
7564 gen_nop_modrm(env, s, modrm);
7565 break;
7566 case 0x120: /* mov reg, crN */
7567 case 0x122: /* mov crN, reg */
7568 if (s->cpl != 0) {
7569 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7570 } else {
7571 modrm = cpu_ldub_code(env, s->pc++);
7572 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7573 * AMD documentation (24594.pdf) and testing of
7574 * intel 386 and 486 processors all show that the mod bits
7575 * are assumed to be 1's, regardless of actual values.
7577 rm = (modrm & 7) | REX_B(s);
7578 reg = ((modrm >> 3) & 7) | rex_r;
7579 if (CODE64(s))
7580 ot = MO_64;
7581 else
7582 ot = MO_32;
7583 if ((prefixes & PREFIX_LOCK) && (reg == 0) &&
7584 (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) {
7585 reg = 8;
7587 switch(reg) {
7588 case 0:
7589 case 2:
7590 case 3:
7591 case 4:
7592 case 8:
7593 gen_update_cc_op(s);
7594 gen_jmp_im(pc_start - s->cs_base);
7595 if (b & 2) {
7596 gen_op_mov_v_reg(ot, cpu_T[0], rm);
7597 gen_helper_write_crN(cpu_env, tcg_const_i32(reg),
7598 cpu_T[0]);
7599 gen_jmp_im(s->pc - s->cs_base);
7600 gen_eob(s);
7601 } else {
7602 gen_helper_read_crN(cpu_T[0], cpu_env, tcg_const_i32(reg));
7603 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
7605 break;
7606 default:
7607 goto illegal_op;
7610 break;
7611 case 0x121: /* mov reg, drN */
7612 case 0x123: /* mov drN, reg */
7613 if (s->cpl != 0) {
7614 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7615 } else {
7616 modrm = cpu_ldub_code(env, s->pc++);
7617 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7618 * AMD documentation (24594.pdf) and testing of
7619 * intel 386 and 486 processors all show that the mod bits
7620 * are assumed to be 1's, regardless of actual values.
7622 rm = (modrm & 7) | REX_B(s);
7623 reg = ((modrm >> 3) & 7) | rex_r;
7624 if (CODE64(s))
7625 ot = MO_64;
7626 else
7627 ot = MO_32;
7628 if (reg >= 8) {
7629 goto illegal_op;
7631 if (b & 2) {
7632 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg);
7633 gen_op_mov_v_reg(ot, cpu_T[0], rm);
7634 tcg_gen_movi_i32(cpu_tmp2_i32, reg);
7635 gen_helper_set_dr(cpu_env, cpu_tmp2_i32, cpu_T[0]);
7636 gen_jmp_im(s->pc - s->cs_base);
7637 gen_eob(s);
7638 } else {
7639 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg);
7640 tcg_gen_movi_i32(cpu_tmp2_i32, reg);
7641 gen_helper_get_dr(cpu_T[0], cpu_env, cpu_tmp2_i32);
7642 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
7645 break;
7646 case 0x106: /* clts */
7647 if (s->cpl != 0) {
7648 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7649 } else {
7650 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
7651 gen_helper_clts(cpu_env);
7652 /* abort block because static cpu state changed */
7653 gen_jmp_im(s->pc - s->cs_base);
7654 gen_eob(s);
7656 break;
7657 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
7658 case 0x1c3: /* MOVNTI reg, mem */
7659 if (!(s->cpuid_features & CPUID_SSE2))
7660 goto illegal_op;
7661 ot = mo_64_32(dflag);
7662 modrm = cpu_ldub_code(env, s->pc++);
7663 mod = (modrm >> 6) & 3;
7664 if (mod == 3)
7665 goto illegal_op;
7666 reg = ((modrm >> 3) & 7) | rex_r;
7667 /* generate a generic store */
7668 gen_ldst_modrm(env, s, modrm, ot, reg, 1);
7669 break;
7670 case 0x1ae:
7671 modrm = cpu_ldub_code(env, s->pc++);
7672 mod = (modrm >> 6) & 3;
7673 op = (modrm >> 3) & 7;
7674 switch(op) {
7675 case 0: /* fxsave */
7676 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
7677 (s->prefix & PREFIX_LOCK))
7678 goto illegal_op;
7679 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
7680 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7681 break;
7683 gen_lea_modrm(env, s, modrm);
7684 gen_helper_fxsave(cpu_env, cpu_A0, tcg_const_i32(dflag == MO_64));
7685 break;
7686 case 1: /* fxrstor */
7687 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
7688 (s->prefix & PREFIX_LOCK))
7689 goto illegal_op;
7690 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
7691 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7692 break;
7694 gen_lea_modrm(env, s, modrm);
7695 gen_helper_fxrstor(cpu_env, cpu_A0, tcg_const_i32(dflag == MO_64));
7696 break;
7697 case 2: /* ldmxcsr */
7698 case 3: /* stmxcsr */
7699 if (s->flags & HF_TS_MASK) {
7700 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7701 break;
7703 if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK) ||
7704 mod == 3)
7705 goto illegal_op;
7706 gen_lea_modrm(env, s, modrm);
7707 if (op == 2) {
7708 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
7709 s->mem_index, MO_LEUL);
7710 gen_helper_ldmxcsr(cpu_env, cpu_tmp2_i32);
7711 } else {
7712 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, mxcsr));
7713 gen_op_st_v(s, MO_32, cpu_T[0], cpu_A0);
7715 break;
7716 case 5: /* lfence */
7717 if ((modrm & 0xc7) != 0xc0 || !(s->cpuid_features & CPUID_SSE2))
7718 goto illegal_op;
7719 break;
7720 case 6: /* mfence/clwb */
7721 if (s->prefix & PREFIX_DATA) {
7722 /* clwb */
7723 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLWB))
7724 goto illegal_op;
7725 gen_nop_modrm(env, s, modrm);
7726 } else {
7727 /* mfence */
7728 if ((modrm & 0xc7) != 0xc0 || !(s->cpuid_features & CPUID_SSE2))
7729 goto illegal_op;
7731 break;
7732 case 7: /* sfence / clflush / clflushopt / pcommit */
7733 if ((modrm & 0xc7) == 0xc0) {
7734 if (s->prefix & PREFIX_DATA) {
7735 /* pcommit */
7736 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_PCOMMIT))
7737 goto illegal_op;
7738 } else {
7739 /* sfence */
7740 /* XXX: also check for cpuid_ext2_features & CPUID_EXT2_EMMX */
7741 if (!(s->cpuid_features & CPUID_SSE))
7742 goto illegal_op;
7744 } else {
7745 if (s->prefix & PREFIX_DATA) {
7746 /* clflushopt */
7747 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLFLUSHOPT))
7748 goto illegal_op;
7749 } else {
7750 /* clflush */
7751 if (!(s->cpuid_features & CPUID_CLFLUSH))
7752 goto illegal_op;
7754 gen_lea_modrm(env, s, modrm);
7756 break;
7757 default:
7758 goto illegal_op;
7760 break;
7761 case 0x10d: /* 3DNow! prefetch(w) */
7762 modrm = cpu_ldub_code(env, s->pc++);
7763 mod = (modrm >> 6) & 3;
7764 if (mod == 3)
7765 goto illegal_op;
7766 gen_lea_modrm(env, s, modrm);
7767 /* ignore for now */
7768 break;
7769 case 0x1aa: /* rsm */
7770 gen_svm_check_intercept(s, pc_start, SVM_EXIT_RSM);
7771 if (!(s->flags & HF_SMM_MASK))
7772 goto illegal_op;
7773 gen_update_cc_op(s);
7774 gen_jmp_im(s->pc - s->cs_base);
7775 gen_helper_rsm(cpu_env);
7776 gen_eob(s);
7777 break;
7778 case 0x1b8: /* SSE4.2 popcnt */
7779 if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) !=
7780 PREFIX_REPZ)
7781 goto illegal_op;
7782 if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
7783 goto illegal_op;
7785 modrm = cpu_ldub_code(env, s->pc++);
7786 reg = ((modrm >> 3) & 7) | rex_r;
7788 if (s->prefix & PREFIX_DATA) {
7789 ot = MO_16;
7790 } else {
7791 ot = mo_64_32(dflag);
7794 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
7795 gen_helper_popcnt(cpu_T[0], cpu_env, cpu_T[0], tcg_const_i32(ot));
7796 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
7798 set_cc_op(s, CC_OP_EFLAGS);
7799 break;
7800 case 0x10e ... 0x10f:
7801 /* 3DNow! instructions, ignore prefixes */
7802 s->prefix &= ~(PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA);
7803 case 0x110 ... 0x117:
7804 case 0x128 ... 0x12f:
7805 case 0x138 ... 0x13a:
7806 case 0x150 ... 0x179:
7807 case 0x17c ... 0x17f:
7808 case 0x1c2:
7809 case 0x1c4 ... 0x1c6:
7810 case 0x1d0 ... 0x1fe:
7811 gen_sse(env, s, b, pc_start, rex_r);
7812 break;
7813 default:
7814 goto illegal_op;
7816 /* lock generation */
7817 if (s->prefix & PREFIX_LOCK)
7818 gen_helper_unlock();
7819 return s->pc;
7820 illegal_op:
7821 if (s->prefix & PREFIX_LOCK)
7822 gen_helper_unlock();
7823 /* XXX: ensure that no lock was generated */
7824 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
7825 return s->pc;
7828 void tcg_x86_init(void)
7830 static const char reg_names[CPU_NB_REGS][4] = {
7831 #ifdef TARGET_X86_64
7832 [R_EAX] = "rax",
7833 [R_EBX] = "rbx",
7834 [R_ECX] = "rcx",
7835 [R_EDX] = "rdx",
7836 [R_ESI] = "rsi",
7837 [R_EDI] = "rdi",
7838 [R_EBP] = "rbp",
7839 [R_ESP] = "rsp",
7840 [8] = "r8",
7841 [9] = "r9",
7842 [10] = "r10",
7843 [11] = "r11",
7844 [12] = "r12",
7845 [13] = "r13",
7846 [14] = "r14",
7847 [15] = "r15",
7848 #else
7849 [R_EAX] = "eax",
7850 [R_EBX] = "ebx",
7851 [R_ECX] = "ecx",
7852 [R_EDX] = "edx",
7853 [R_ESI] = "esi",
7854 [R_EDI] = "edi",
7855 [R_EBP] = "ebp",
7856 [R_ESP] = "esp",
7857 #endif
7859 int i;
7861 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
7862 cpu_cc_op = tcg_global_mem_new_i32(TCG_AREG0,
7863 offsetof(CPUX86State, cc_op), "cc_op");
7864 cpu_cc_dst = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_dst),
7865 "cc_dst");
7866 cpu_cc_src = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_src),
7867 "cc_src");
7868 cpu_cc_src2 = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_src2),
7869 "cc_src2");
7871 for (i = 0; i < CPU_NB_REGS; ++i) {
7872 cpu_regs[i] = tcg_global_mem_new(TCG_AREG0,
7873 offsetof(CPUX86State, regs[i]),
7874 reg_names[i]);
7877 helper_lock_init();
7880 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
7881 basic block 'tb'. */
7882 void gen_intermediate_code(CPUX86State *env, TranslationBlock *tb)
7884 X86CPU *cpu = x86_env_get_cpu(env);
7885 CPUState *cs = CPU(cpu);
7886 DisasContext dc1, *dc = &dc1;
7887 target_ulong pc_ptr;
7888 uint64_t flags;
7889 target_ulong pc_start;
7890 target_ulong cs_base;
7891 int num_insns;
7892 int max_insns;
7894 /* generate intermediate code */
7895 pc_start = tb->pc;
7896 cs_base = tb->cs_base;
7897 flags = tb->flags;
7899 dc->pe = (flags >> HF_PE_SHIFT) & 1;
7900 dc->code32 = (flags >> HF_CS32_SHIFT) & 1;
7901 dc->ss32 = (flags >> HF_SS32_SHIFT) & 1;
7902 dc->addseg = (flags >> HF_ADDSEG_SHIFT) & 1;
7903 dc->f_st = 0;
7904 dc->vm86 = (flags >> VM_SHIFT) & 1;
7905 dc->cpl = (flags >> HF_CPL_SHIFT) & 3;
7906 dc->iopl = (flags >> IOPL_SHIFT) & 3;
7907 dc->tf = (flags >> TF_SHIFT) & 1;
7908 dc->singlestep_enabled = cs->singlestep_enabled;
7909 dc->cc_op = CC_OP_DYNAMIC;
7910 dc->cc_op_dirty = false;
7911 dc->cs_base = cs_base;
7912 dc->tb = tb;
7913 dc->popl_esp_hack = 0;
7914 /* select memory access functions */
7915 dc->mem_index = 0;
7916 if (flags & HF_SOFTMMU_MASK) {
7917 dc->mem_index = cpu_mmu_index(env, false);
7919 dc->cpuid_features = env->features[FEAT_1_EDX];
7920 dc->cpuid_ext_features = env->features[FEAT_1_ECX];
7921 dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX];
7922 dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX];
7923 dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX];
7924 #ifdef TARGET_X86_64
7925 dc->lma = (flags >> HF_LMA_SHIFT) & 1;
7926 dc->code64 = (flags >> HF_CS64_SHIFT) & 1;
7927 #endif
7928 dc->flags = flags;
7929 dc->jmp_opt = !(dc->tf || cs->singlestep_enabled ||
7930 (flags & HF_INHIBIT_IRQ_MASK)
7931 #ifndef CONFIG_SOFTMMU
7932 || (flags & HF_SOFTMMU_MASK)
7933 #endif
7935 /* Do not optimize repz jumps at all in icount mode, because
7936 rep movsS instructions are execured with different paths
7937 in !repz_opt and repz_opt modes. The first one was used
7938 always except single step mode. And this setting
7939 disables jumps optimization and control paths become
7940 equivalent in run and single step modes.
7941 Now there will be no jump optimization for repz in
7942 record/replay modes and there will always be an
7943 additional step for ecx=0 when icount is enabled.
7945 dc->repz_opt = !dc->jmp_opt && !(tb->cflags & CF_USE_ICOUNT);
7946 #if 0
7947 /* check addseg logic */
7948 if (!dc->addseg && (dc->vm86 || !dc->pe || !dc->code32))
7949 printf("ERROR addseg\n");
7950 #endif
7952 cpu_T[0] = tcg_temp_new();
7953 cpu_T[1] = tcg_temp_new();
7954 cpu_A0 = tcg_temp_new();
7956 cpu_tmp0 = tcg_temp_new();
7957 cpu_tmp1_i64 = tcg_temp_new_i64();
7958 cpu_tmp2_i32 = tcg_temp_new_i32();
7959 cpu_tmp3_i32 = tcg_temp_new_i32();
7960 cpu_tmp4 = tcg_temp_new();
7961 cpu_ptr0 = tcg_temp_new_ptr();
7962 cpu_ptr1 = tcg_temp_new_ptr();
7963 cpu_cc_srcT = tcg_temp_local_new();
7965 dc->is_jmp = DISAS_NEXT;
7966 pc_ptr = pc_start;
7967 num_insns = 0;
7968 max_insns = tb->cflags & CF_COUNT_MASK;
7969 if (max_insns == 0) {
7970 max_insns = CF_COUNT_MASK;
7972 if (max_insns > TCG_MAX_INSNS) {
7973 max_insns = TCG_MAX_INSNS;
7976 gen_tb_start(tb);
7977 for(;;) {
7978 tcg_gen_insn_start(pc_ptr, dc->cc_op);
7979 num_insns++;
7981 /* If RF is set, suppress an internally generated breakpoint. */
7982 if (unlikely(cpu_breakpoint_test(cs, pc_ptr,
7983 tb->flags & HF_RF_MASK
7984 ? BP_GDB : BP_ANY))) {
7985 gen_debug(dc, pc_ptr - dc->cs_base);
7986 /* The address covered by the breakpoint must be included in
7987 [tb->pc, tb->pc + tb->size) in order to for it to be
7988 properly cleared -- thus we increment the PC here so that
7989 the logic setting tb->size below does the right thing. */
7990 pc_ptr += 1;
7991 goto done_generating;
7993 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
7994 gen_io_start();
7997 pc_ptr = disas_insn(env, dc, pc_ptr);
7998 /* stop translation if indicated */
7999 if (dc->is_jmp)
8000 break;
8001 /* if single step mode, we generate only one instruction and
8002 generate an exception */
8003 /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
8004 the flag and abort the translation to give the irqs a
8005 change to be happen */
8006 if (dc->tf || dc->singlestep_enabled ||
8007 (flags & HF_INHIBIT_IRQ_MASK)) {
8008 gen_jmp_im(pc_ptr - dc->cs_base);
8009 gen_eob(dc);
8010 break;
8012 /* Do not cross the boundary of the pages in icount mode,
8013 it can cause an exception. Do it only when boundary is
8014 crossed by the first instruction in the block.
8015 If current instruction already crossed the bound - it's ok,
8016 because an exception hasn't stopped this code.
8018 if ((tb->cflags & CF_USE_ICOUNT)
8019 && ((pc_ptr & TARGET_PAGE_MASK)
8020 != ((pc_ptr + TARGET_MAX_INSN_SIZE - 1) & TARGET_PAGE_MASK)
8021 || (pc_ptr & ~TARGET_PAGE_MASK) == 0)) {
8022 gen_jmp_im(pc_ptr - dc->cs_base);
8023 gen_eob(dc);
8024 break;
8026 /* if too long translation, stop generation too */
8027 if (tcg_op_buf_full() ||
8028 (pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32) ||
8029 num_insns >= max_insns) {
8030 gen_jmp_im(pc_ptr - dc->cs_base);
8031 gen_eob(dc);
8032 break;
8034 if (singlestep) {
8035 gen_jmp_im(pc_ptr - dc->cs_base);
8036 gen_eob(dc);
8037 break;
8040 if (tb->cflags & CF_LAST_IO)
8041 gen_io_end();
8042 done_generating:
8043 gen_tb_end(tb, num_insns);
8045 #ifdef DEBUG_DISAS
8046 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
8047 int disas_flags;
8048 qemu_log("----------------\n");
8049 qemu_log("IN: %s\n", lookup_symbol(pc_start));
8050 #ifdef TARGET_X86_64
8051 if (dc->code64)
8052 disas_flags = 2;
8053 else
8054 #endif
8055 disas_flags = !dc->code32;
8056 log_target_disas(cs, pc_start, pc_ptr - pc_start, disas_flags);
8057 qemu_log("\n");
8059 #endif
8061 tb->size = pc_ptr - pc_start;
8062 tb->icount = num_insns;
8065 void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb,
8066 target_ulong *data)
8068 int cc_op = data[1];
8069 env->eip = data[0] - tb->cs_base;
8070 if (cc_op != CC_OP_DYNAMIC) {
8071 env->cc_op = cc_op;