net: set endianness on all backend devices
[qemu/cris-port.git] / target-i386 / translate.c
blob73a45c872e9afda5be9104dee8c573c3e6d5a4e6
1 /*
2 * i386 translation
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "qemu/host-utils.h"
22 #include "cpu.h"
23 #include "disas/disas.h"
24 #include "tcg-op.h"
25 #include "exec/cpu_ldst.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
30 #include "trace-tcg.h"
31 #include "exec/log.h"
34 #define PREFIX_REPZ 0x01
35 #define PREFIX_REPNZ 0x02
36 #define PREFIX_LOCK 0x04
37 #define PREFIX_DATA 0x08
38 #define PREFIX_ADR 0x10
39 #define PREFIX_VEX 0x20
41 #ifdef TARGET_X86_64
42 #define CODE64(s) ((s)->code64)
43 #define REX_X(s) ((s)->rex_x)
44 #define REX_B(s) ((s)->rex_b)
45 #else
46 #define CODE64(s) 0
47 #define REX_X(s) 0
48 #define REX_B(s) 0
49 #endif
51 #ifdef TARGET_X86_64
52 # define ctztl ctz64
53 # define clztl clz64
54 #else
55 # define ctztl ctz32
56 # define clztl clz32
57 #endif
59 //#define MACRO_TEST 1
61 /* global register indexes */
62 static TCGv_ptr cpu_env;
63 static TCGv cpu_A0;
64 static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2, cpu_cc_srcT;
65 static TCGv_i32 cpu_cc_op;
66 static TCGv cpu_regs[CPU_NB_REGS];
67 /* local temps */
68 static TCGv cpu_T[2];
69 /* local register indexes (only used inside old micro ops) */
70 static TCGv cpu_tmp0, cpu_tmp4;
71 static TCGv_ptr cpu_ptr0, cpu_ptr1;
72 static TCGv_i32 cpu_tmp2_i32, cpu_tmp3_i32;
73 static TCGv_i64 cpu_tmp1_i64;
75 #include "exec/gen-icount.h"
77 #ifdef TARGET_X86_64
78 static int x86_64_hregs;
79 #endif
81 typedef struct DisasContext {
82 /* current insn context */
83 int override; /* -1 if no override */
84 int prefix;
85 TCGMemOp aflag;
86 TCGMemOp dflag;
87 target_ulong pc; /* pc = eip + cs_base */
88 int is_jmp; /* 1 = means jump (stop translation), 2 means CPU
89 static state change (stop translation) */
90 /* current block context */
91 target_ulong cs_base; /* base of CS segment */
92 int pe; /* protected mode */
93 int code32; /* 32 bit code segment */
94 #ifdef TARGET_X86_64
95 int lma; /* long mode active */
96 int code64; /* 64 bit code segment */
97 int rex_x, rex_b;
98 #endif
99 int vex_l; /* vex vector length */
100 int vex_v; /* vex vvvv register, without 1's compliment. */
101 int ss32; /* 32 bit stack segment */
102 CCOp cc_op; /* current CC operation */
103 bool cc_op_dirty;
104 int addseg; /* non zero if either DS/ES/SS have a non zero base */
105 int f_st; /* currently unused */
106 int vm86; /* vm86 mode */
107 int cpl;
108 int iopl;
109 int tf; /* TF cpu flag */
110 int singlestep_enabled; /* "hardware" single step enabled */
111 int jmp_opt; /* use direct block chaining for direct jumps */
112 int repz_opt; /* optimize jumps within repz instructions */
113 int mem_index; /* select memory access functions */
114 uint64_t flags; /* all execution flags */
115 struct TranslationBlock *tb;
116 int popl_esp_hack; /* for correct popl with esp base handling */
117 int rip_offset; /* only used in x86_64, but left for simplicity */
118 int cpuid_features;
119 int cpuid_ext_features;
120 int cpuid_ext2_features;
121 int cpuid_ext3_features;
122 int cpuid_7_0_ebx_features;
123 } DisasContext;
125 static void gen_eob(DisasContext *s);
126 static void gen_jmp(DisasContext *s, target_ulong eip);
127 static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num);
128 static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d);
130 /* i386 arith/logic operations */
131 enum {
132 OP_ADDL,
133 OP_ORL,
134 OP_ADCL,
135 OP_SBBL,
136 OP_ANDL,
137 OP_SUBL,
138 OP_XORL,
139 OP_CMPL,
142 /* i386 shift ops */
143 enum {
144 OP_ROL,
145 OP_ROR,
146 OP_RCL,
147 OP_RCR,
148 OP_SHL,
149 OP_SHR,
150 OP_SHL1, /* undocumented */
151 OP_SAR = 7,
154 enum {
155 JCC_O,
156 JCC_B,
157 JCC_Z,
158 JCC_BE,
159 JCC_S,
160 JCC_P,
161 JCC_L,
162 JCC_LE,
165 enum {
166 /* I386 int registers */
167 OR_EAX, /* MUST be even numbered */
168 OR_ECX,
169 OR_EDX,
170 OR_EBX,
171 OR_ESP,
172 OR_EBP,
173 OR_ESI,
174 OR_EDI,
176 OR_TMP0 = 16, /* temporary operand register */
177 OR_TMP1,
178 OR_A0, /* temporary register used when doing address evaluation */
181 enum {
182 USES_CC_DST = 1,
183 USES_CC_SRC = 2,
184 USES_CC_SRC2 = 4,
185 USES_CC_SRCT = 8,
188 /* Bit set if the global variable is live after setting CC_OP to X. */
189 static const uint8_t cc_op_live[CC_OP_NB] = {
190 [CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
191 [CC_OP_EFLAGS] = USES_CC_SRC,
192 [CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC,
193 [CC_OP_ADDB ... CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC,
194 [CC_OP_ADCB ... CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
195 [CC_OP_SUBB ... CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT,
196 [CC_OP_SBBB ... CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
197 [CC_OP_LOGICB ... CC_OP_LOGICQ] = USES_CC_DST,
198 [CC_OP_INCB ... CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC,
199 [CC_OP_DECB ... CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC,
200 [CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC,
201 [CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC,
202 [CC_OP_BMILGB ... CC_OP_BMILGQ] = USES_CC_DST | USES_CC_SRC,
203 [CC_OP_ADCX] = USES_CC_DST | USES_CC_SRC,
204 [CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2,
205 [CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
206 [CC_OP_CLR] = 0,
209 static void set_cc_op(DisasContext *s, CCOp op)
211 int dead;
213 if (s->cc_op == op) {
214 return;
217 /* Discard CC computation that will no longer be used. */
218 dead = cc_op_live[s->cc_op] & ~cc_op_live[op];
219 if (dead & USES_CC_DST) {
220 tcg_gen_discard_tl(cpu_cc_dst);
222 if (dead & USES_CC_SRC) {
223 tcg_gen_discard_tl(cpu_cc_src);
225 if (dead & USES_CC_SRC2) {
226 tcg_gen_discard_tl(cpu_cc_src2);
228 if (dead & USES_CC_SRCT) {
229 tcg_gen_discard_tl(cpu_cc_srcT);
232 if (op == CC_OP_DYNAMIC) {
233 /* The DYNAMIC setting is translator only, and should never be
234 stored. Thus we always consider it clean. */
235 s->cc_op_dirty = false;
236 } else {
237 /* Discard any computed CC_OP value (see shifts). */
238 if (s->cc_op == CC_OP_DYNAMIC) {
239 tcg_gen_discard_i32(cpu_cc_op);
241 s->cc_op_dirty = true;
243 s->cc_op = op;
246 static void gen_update_cc_op(DisasContext *s)
248 if (s->cc_op_dirty) {
249 tcg_gen_movi_i32(cpu_cc_op, s->cc_op);
250 s->cc_op_dirty = false;
254 #ifdef TARGET_X86_64
256 #define NB_OP_SIZES 4
258 #else /* !TARGET_X86_64 */
260 #define NB_OP_SIZES 3
262 #endif /* !TARGET_X86_64 */
264 #if defined(HOST_WORDS_BIGENDIAN)
265 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
266 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
267 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
268 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
269 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
270 #else
271 #define REG_B_OFFSET 0
272 #define REG_H_OFFSET 1
273 #define REG_W_OFFSET 0
274 #define REG_L_OFFSET 0
275 #define REG_LH_OFFSET 4
276 #endif
278 /* In instruction encodings for byte register accesses the
279 * register number usually indicates "low 8 bits of register N";
280 * however there are some special cases where N 4..7 indicates
281 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
282 * true for this special case, false otherwise.
284 static inline bool byte_reg_is_xH(int reg)
286 if (reg < 4) {
287 return false;
289 #ifdef TARGET_X86_64
290 if (reg >= 8 || x86_64_hregs) {
291 return false;
293 #endif
294 return true;
297 /* Select the size of a push/pop operation. */
298 static inline TCGMemOp mo_pushpop(DisasContext *s, TCGMemOp ot)
300 if (CODE64(s)) {
301 return ot == MO_16 ? MO_16 : MO_64;
302 } else {
303 return ot;
307 /* Select only size 64 else 32. Used for SSE operand sizes. */
308 static inline TCGMemOp mo_64_32(TCGMemOp ot)
310 #ifdef TARGET_X86_64
311 return ot == MO_64 ? MO_64 : MO_32;
312 #else
313 return MO_32;
314 #endif
317 /* Select size 8 if lsb of B is clear, else OT. Used for decoding
318 byte vs word opcodes. */
319 static inline TCGMemOp mo_b_d(int b, TCGMemOp ot)
321 return b & 1 ? ot : MO_8;
324 /* Select size 8 if lsb of B is clear, else OT capped at 32.
325 Used for decoding operand size of port opcodes. */
326 static inline TCGMemOp mo_b_d32(int b, TCGMemOp ot)
328 return b & 1 ? (ot == MO_16 ? MO_16 : MO_32) : MO_8;
331 static void gen_op_mov_reg_v(TCGMemOp ot, int reg, TCGv t0)
333 switch(ot) {
334 case MO_8:
335 if (!byte_reg_is_xH(reg)) {
336 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 8);
337 } else {
338 tcg_gen_deposit_tl(cpu_regs[reg - 4], cpu_regs[reg - 4], t0, 8, 8);
340 break;
341 case MO_16:
342 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 16);
343 break;
344 case MO_32:
345 /* For x86_64, this sets the higher half of register to zero.
346 For i386, this is equivalent to a mov. */
347 tcg_gen_ext32u_tl(cpu_regs[reg], t0);
348 break;
349 #ifdef TARGET_X86_64
350 case MO_64:
351 tcg_gen_mov_tl(cpu_regs[reg], t0);
352 break;
353 #endif
354 default:
355 tcg_abort();
359 static inline void gen_op_mov_v_reg(TCGMemOp ot, TCGv t0, int reg)
361 if (ot == MO_8 && byte_reg_is_xH(reg)) {
362 tcg_gen_shri_tl(t0, cpu_regs[reg - 4], 8);
363 tcg_gen_ext8u_tl(t0, t0);
364 } else {
365 tcg_gen_mov_tl(t0, cpu_regs[reg]);
369 static inline void gen_op_movl_A0_reg(int reg)
371 tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
374 static inline void gen_op_addl_A0_im(int32_t val)
376 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
377 #ifdef TARGET_X86_64
378 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
379 #endif
382 #ifdef TARGET_X86_64
383 static inline void gen_op_addq_A0_im(int64_t val)
385 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
387 #endif
389 static void gen_add_A0_im(DisasContext *s, int val)
391 #ifdef TARGET_X86_64
392 if (CODE64(s))
393 gen_op_addq_A0_im(val);
394 else
395 #endif
396 gen_op_addl_A0_im(val);
399 static inline void gen_op_jmp_v(TCGv dest)
401 tcg_gen_st_tl(dest, cpu_env, offsetof(CPUX86State, eip));
404 static inline void gen_op_add_reg_im(TCGMemOp size, int reg, int32_t val)
406 tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val);
407 gen_op_mov_reg_v(size, reg, cpu_tmp0);
410 static inline void gen_op_add_reg_T0(TCGMemOp size, int reg)
412 tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T[0]);
413 gen_op_mov_reg_v(size, reg, cpu_tmp0);
416 static inline void gen_op_addl_A0_reg_sN(int shift, int reg)
418 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
419 if (shift != 0)
420 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
421 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
422 /* For x86_64, this sets the higher half of register to zero.
423 For i386, this is equivalent to a nop. */
424 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
427 static inline void gen_op_movl_A0_seg(int reg)
429 tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base) + REG_L_OFFSET);
432 static inline void gen_op_addl_A0_seg(DisasContext *s, int reg)
434 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base));
435 #ifdef TARGET_X86_64
436 if (CODE64(s)) {
437 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
438 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
439 } else {
440 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
441 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
443 #else
444 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
445 #endif
448 #ifdef TARGET_X86_64
449 static inline void gen_op_movq_A0_seg(int reg)
451 tcg_gen_ld_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base));
454 static inline void gen_op_addq_A0_seg(int reg)
456 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base));
457 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
460 static inline void gen_op_movq_A0_reg(int reg)
462 tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
465 static inline void gen_op_addq_A0_reg_sN(int shift, int reg)
467 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
468 if (shift != 0)
469 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
470 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
472 #endif
474 static inline void gen_op_ld_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
476 tcg_gen_qemu_ld_tl(t0, a0, s->mem_index, idx | MO_LE);
479 static inline void gen_op_st_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
481 tcg_gen_qemu_st_tl(t0, a0, s->mem_index, idx | MO_LE);
484 static inline void gen_op_st_rm_T0_A0(DisasContext *s, int idx, int d)
486 if (d == OR_TMP0) {
487 gen_op_st_v(s, idx, cpu_T[0], cpu_A0);
488 } else {
489 gen_op_mov_reg_v(idx, d, cpu_T[0]);
493 static inline void gen_jmp_im(target_ulong pc)
495 tcg_gen_movi_tl(cpu_tmp0, pc);
496 gen_op_jmp_v(cpu_tmp0);
499 static inline void gen_string_movl_A0_ESI(DisasContext *s)
501 int override;
503 override = s->override;
504 switch (s->aflag) {
505 #ifdef TARGET_X86_64
506 case MO_64:
507 if (override >= 0) {
508 gen_op_movq_A0_seg(override);
509 gen_op_addq_A0_reg_sN(0, R_ESI);
510 } else {
511 gen_op_movq_A0_reg(R_ESI);
513 break;
514 #endif
515 case MO_32:
516 /* 32 bit address */
517 if (s->addseg && override < 0)
518 override = R_DS;
519 if (override >= 0) {
520 gen_op_movl_A0_seg(override);
521 gen_op_addl_A0_reg_sN(0, R_ESI);
522 } else {
523 gen_op_movl_A0_reg(R_ESI);
525 break;
526 case MO_16:
527 /* 16 address, always override */
528 if (override < 0)
529 override = R_DS;
530 tcg_gen_ext16u_tl(cpu_A0, cpu_regs[R_ESI]);
531 gen_op_addl_A0_seg(s, override);
532 break;
533 default:
534 tcg_abort();
538 static inline void gen_string_movl_A0_EDI(DisasContext *s)
540 switch (s->aflag) {
541 #ifdef TARGET_X86_64
542 case MO_64:
543 gen_op_movq_A0_reg(R_EDI);
544 break;
545 #endif
546 case MO_32:
547 if (s->addseg) {
548 gen_op_movl_A0_seg(R_ES);
549 gen_op_addl_A0_reg_sN(0, R_EDI);
550 } else {
551 gen_op_movl_A0_reg(R_EDI);
553 break;
554 case MO_16:
555 tcg_gen_ext16u_tl(cpu_A0, cpu_regs[R_EDI]);
556 gen_op_addl_A0_seg(s, R_ES);
557 break;
558 default:
559 tcg_abort();
563 static inline void gen_op_movl_T0_Dshift(TCGMemOp ot)
565 tcg_gen_ld32s_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, df));
566 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], ot);
569 static TCGv gen_ext_tl(TCGv dst, TCGv src, TCGMemOp size, bool sign)
571 switch (size) {
572 case MO_8:
573 if (sign) {
574 tcg_gen_ext8s_tl(dst, src);
575 } else {
576 tcg_gen_ext8u_tl(dst, src);
578 return dst;
579 case MO_16:
580 if (sign) {
581 tcg_gen_ext16s_tl(dst, src);
582 } else {
583 tcg_gen_ext16u_tl(dst, src);
585 return dst;
586 #ifdef TARGET_X86_64
587 case MO_32:
588 if (sign) {
589 tcg_gen_ext32s_tl(dst, src);
590 } else {
591 tcg_gen_ext32u_tl(dst, src);
593 return dst;
594 #endif
595 default:
596 return src;
600 static void gen_extu(TCGMemOp ot, TCGv reg)
602 gen_ext_tl(reg, reg, ot, false);
605 static void gen_exts(TCGMemOp ot, TCGv reg)
607 gen_ext_tl(reg, reg, ot, true);
610 static inline void gen_op_jnz_ecx(TCGMemOp size, TCGLabel *label1)
612 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
613 gen_extu(size, cpu_tmp0);
614 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, label1);
617 static inline void gen_op_jz_ecx(TCGMemOp size, TCGLabel *label1)
619 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
620 gen_extu(size, cpu_tmp0);
621 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
624 static void gen_helper_in_func(TCGMemOp ot, TCGv v, TCGv_i32 n)
626 switch (ot) {
627 case MO_8:
628 gen_helper_inb(v, cpu_env, n);
629 break;
630 case MO_16:
631 gen_helper_inw(v, cpu_env, n);
632 break;
633 case MO_32:
634 gen_helper_inl(v, cpu_env, n);
635 break;
636 default:
637 tcg_abort();
641 static void gen_helper_out_func(TCGMemOp ot, TCGv_i32 v, TCGv_i32 n)
643 switch (ot) {
644 case MO_8:
645 gen_helper_outb(cpu_env, v, n);
646 break;
647 case MO_16:
648 gen_helper_outw(cpu_env, v, n);
649 break;
650 case MO_32:
651 gen_helper_outl(cpu_env, v, n);
652 break;
653 default:
654 tcg_abort();
658 static void gen_check_io(DisasContext *s, TCGMemOp ot, target_ulong cur_eip,
659 uint32_t svm_flags)
661 target_ulong next_eip;
663 if (s->pe && (s->cpl > s->iopl || s->vm86)) {
664 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
665 switch (ot) {
666 case MO_8:
667 gen_helper_check_iob(cpu_env, cpu_tmp2_i32);
668 break;
669 case MO_16:
670 gen_helper_check_iow(cpu_env, cpu_tmp2_i32);
671 break;
672 case MO_32:
673 gen_helper_check_iol(cpu_env, cpu_tmp2_i32);
674 break;
675 default:
676 tcg_abort();
679 if(s->flags & HF_SVMI_MASK) {
680 gen_update_cc_op(s);
681 gen_jmp_im(cur_eip);
682 svm_flags |= (1 << (4 + ot));
683 next_eip = s->pc - s->cs_base;
684 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
685 gen_helper_svm_check_io(cpu_env, cpu_tmp2_i32,
686 tcg_const_i32(svm_flags),
687 tcg_const_i32(next_eip - cur_eip));
691 static inline void gen_movs(DisasContext *s, TCGMemOp ot)
693 gen_string_movl_A0_ESI(s);
694 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
695 gen_string_movl_A0_EDI(s);
696 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
697 gen_op_movl_T0_Dshift(ot);
698 gen_op_add_reg_T0(s->aflag, R_ESI);
699 gen_op_add_reg_T0(s->aflag, R_EDI);
702 static void gen_op_update1_cc(void)
704 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
707 static void gen_op_update2_cc(void)
709 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
710 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
713 static void gen_op_update3_cc(TCGv reg)
715 tcg_gen_mov_tl(cpu_cc_src2, reg);
716 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
717 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
720 static inline void gen_op_testl_T0_T1_cc(void)
722 tcg_gen_and_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
725 static void gen_op_update_neg_cc(void)
727 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
728 tcg_gen_neg_tl(cpu_cc_src, cpu_T[0]);
729 tcg_gen_movi_tl(cpu_cc_srcT, 0);
732 /* compute all eflags to cc_src */
733 static void gen_compute_eflags(DisasContext *s)
735 TCGv zero, dst, src1, src2;
736 int live, dead;
738 if (s->cc_op == CC_OP_EFLAGS) {
739 return;
741 if (s->cc_op == CC_OP_CLR) {
742 tcg_gen_movi_tl(cpu_cc_src, CC_Z | CC_P);
743 set_cc_op(s, CC_OP_EFLAGS);
744 return;
747 TCGV_UNUSED(zero);
748 dst = cpu_cc_dst;
749 src1 = cpu_cc_src;
750 src2 = cpu_cc_src2;
752 /* Take care to not read values that are not live. */
753 live = cc_op_live[s->cc_op] & ~USES_CC_SRCT;
754 dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2);
755 if (dead) {
756 zero = tcg_const_tl(0);
757 if (dead & USES_CC_DST) {
758 dst = zero;
760 if (dead & USES_CC_SRC) {
761 src1 = zero;
763 if (dead & USES_CC_SRC2) {
764 src2 = zero;
768 gen_update_cc_op(s);
769 gen_helper_cc_compute_all(cpu_cc_src, dst, src1, src2, cpu_cc_op);
770 set_cc_op(s, CC_OP_EFLAGS);
772 if (dead) {
773 tcg_temp_free(zero);
777 typedef struct CCPrepare {
778 TCGCond cond;
779 TCGv reg;
780 TCGv reg2;
781 target_ulong imm;
782 target_ulong mask;
783 bool use_reg2;
784 bool no_setcond;
785 } CCPrepare;
787 /* compute eflags.C to reg */
788 static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
790 TCGv t0, t1;
791 int size, shift;
793 switch (s->cc_op) {
794 case CC_OP_SUBB ... CC_OP_SUBQ:
795 /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
796 size = s->cc_op - CC_OP_SUBB;
797 t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
798 /* If no temporary was used, be careful not to alias t1 and t0. */
799 t0 = TCGV_EQUAL(t1, cpu_cc_src) ? cpu_tmp0 : reg;
800 tcg_gen_mov_tl(t0, cpu_cc_srcT);
801 gen_extu(size, t0);
802 goto add_sub;
804 case CC_OP_ADDB ... CC_OP_ADDQ:
805 /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
806 size = s->cc_op - CC_OP_ADDB;
807 t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
808 t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
809 add_sub:
810 return (CCPrepare) { .cond = TCG_COND_LTU, .reg = t0,
811 .reg2 = t1, .mask = -1, .use_reg2 = true };
813 case CC_OP_LOGICB ... CC_OP_LOGICQ:
814 case CC_OP_CLR:
815 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
817 case CC_OP_INCB ... CC_OP_INCQ:
818 case CC_OP_DECB ... CC_OP_DECQ:
819 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
820 .mask = -1, .no_setcond = true };
822 case CC_OP_SHLB ... CC_OP_SHLQ:
823 /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
824 size = s->cc_op - CC_OP_SHLB;
825 shift = (8 << size) - 1;
826 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
827 .mask = (target_ulong)1 << shift };
829 case CC_OP_MULB ... CC_OP_MULQ:
830 return (CCPrepare) { .cond = TCG_COND_NE,
831 .reg = cpu_cc_src, .mask = -1 };
833 case CC_OP_BMILGB ... CC_OP_BMILGQ:
834 size = s->cc_op - CC_OP_BMILGB;
835 t0 = gen_ext_tl(reg, cpu_cc_src, size, false);
836 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
838 case CC_OP_ADCX:
839 case CC_OP_ADCOX:
840 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_dst,
841 .mask = -1, .no_setcond = true };
843 case CC_OP_EFLAGS:
844 case CC_OP_SARB ... CC_OP_SARQ:
845 /* CC_SRC & 1 */
846 return (CCPrepare) { .cond = TCG_COND_NE,
847 .reg = cpu_cc_src, .mask = CC_C };
849 default:
850 /* The need to compute only C from CC_OP_DYNAMIC is important
851 in efficiently implementing e.g. INC at the start of a TB. */
852 gen_update_cc_op(s);
853 gen_helper_cc_compute_c(reg, cpu_cc_dst, cpu_cc_src,
854 cpu_cc_src2, cpu_cc_op);
855 return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
856 .mask = -1, .no_setcond = true };
860 /* compute eflags.P to reg */
861 static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg)
863 gen_compute_eflags(s);
864 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
865 .mask = CC_P };
868 /* compute eflags.S to reg */
869 static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg)
871 switch (s->cc_op) {
872 case CC_OP_DYNAMIC:
873 gen_compute_eflags(s);
874 /* FALLTHRU */
875 case CC_OP_EFLAGS:
876 case CC_OP_ADCX:
877 case CC_OP_ADOX:
878 case CC_OP_ADCOX:
879 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
880 .mask = CC_S };
881 case CC_OP_CLR:
882 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
883 default:
885 TCGMemOp size = (s->cc_op - CC_OP_ADDB) & 3;
886 TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, true);
887 return (CCPrepare) { .cond = TCG_COND_LT, .reg = t0, .mask = -1 };
892 /* compute eflags.O to reg */
893 static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
895 switch (s->cc_op) {
896 case CC_OP_ADOX:
897 case CC_OP_ADCOX:
898 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src2,
899 .mask = -1, .no_setcond = true };
900 case CC_OP_CLR:
901 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
902 default:
903 gen_compute_eflags(s);
904 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
905 .mask = CC_O };
909 /* compute eflags.Z to reg */
910 static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
912 switch (s->cc_op) {
913 case CC_OP_DYNAMIC:
914 gen_compute_eflags(s);
915 /* FALLTHRU */
916 case CC_OP_EFLAGS:
917 case CC_OP_ADCX:
918 case CC_OP_ADOX:
919 case CC_OP_ADCOX:
920 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
921 .mask = CC_Z };
922 case CC_OP_CLR:
923 return (CCPrepare) { .cond = TCG_COND_ALWAYS, .mask = -1 };
924 default:
926 TCGMemOp size = (s->cc_op - CC_OP_ADDB) & 3;
927 TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
928 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
933 /* perform a conditional store into register 'reg' according to jump opcode
934 value 'b'. In the fast case, T0 is guaranted not to be used. */
935 static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
937 int inv, jcc_op, cond;
938 TCGMemOp size;
939 CCPrepare cc;
940 TCGv t0;
942 inv = b & 1;
943 jcc_op = (b >> 1) & 7;
945 switch (s->cc_op) {
946 case CC_OP_SUBB ... CC_OP_SUBQ:
947 /* We optimize relational operators for the cmp/jcc case. */
948 size = s->cc_op - CC_OP_SUBB;
949 switch (jcc_op) {
950 case JCC_BE:
951 tcg_gen_mov_tl(cpu_tmp4, cpu_cc_srcT);
952 gen_extu(size, cpu_tmp4);
953 t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
954 cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = cpu_tmp4,
955 .reg2 = t0, .mask = -1, .use_reg2 = true };
956 break;
958 case JCC_L:
959 cond = TCG_COND_LT;
960 goto fast_jcc_l;
961 case JCC_LE:
962 cond = TCG_COND_LE;
963 fast_jcc_l:
964 tcg_gen_mov_tl(cpu_tmp4, cpu_cc_srcT);
965 gen_exts(size, cpu_tmp4);
966 t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, true);
967 cc = (CCPrepare) { .cond = cond, .reg = cpu_tmp4,
968 .reg2 = t0, .mask = -1, .use_reg2 = true };
969 break;
971 default:
972 goto slow_jcc;
974 break;
976 default:
977 slow_jcc:
978 /* This actually generates good code for JC, JZ and JS. */
979 switch (jcc_op) {
980 case JCC_O:
981 cc = gen_prepare_eflags_o(s, reg);
982 break;
983 case JCC_B:
984 cc = gen_prepare_eflags_c(s, reg);
985 break;
986 case JCC_Z:
987 cc = gen_prepare_eflags_z(s, reg);
988 break;
989 case JCC_BE:
990 gen_compute_eflags(s);
991 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
992 .mask = CC_Z | CC_C };
993 break;
994 case JCC_S:
995 cc = gen_prepare_eflags_s(s, reg);
996 break;
997 case JCC_P:
998 cc = gen_prepare_eflags_p(s, reg);
999 break;
1000 case JCC_L:
1001 gen_compute_eflags(s);
1002 if (TCGV_EQUAL(reg, cpu_cc_src)) {
1003 reg = cpu_tmp0;
1005 tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
1006 tcg_gen_xor_tl(reg, reg, cpu_cc_src);
1007 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1008 .mask = CC_S };
1009 break;
1010 default:
1011 case JCC_LE:
1012 gen_compute_eflags(s);
1013 if (TCGV_EQUAL(reg, cpu_cc_src)) {
1014 reg = cpu_tmp0;
1016 tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
1017 tcg_gen_xor_tl(reg, reg, cpu_cc_src);
1018 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1019 .mask = CC_S | CC_Z };
1020 break;
1022 break;
1025 if (inv) {
1026 cc.cond = tcg_invert_cond(cc.cond);
1028 return cc;
1031 static void gen_setcc1(DisasContext *s, int b, TCGv reg)
1033 CCPrepare cc = gen_prepare_cc(s, b, reg);
1035 if (cc.no_setcond) {
1036 if (cc.cond == TCG_COND_EQ) {
1037 tcg_gen_xori_tl(reg, cc.reg, 1);
1038 } else {
1039 tcg_gen_mov_tl(reg, cc.reg);
1041 return;
1044 if (cc.cond == TCG_COND_NE && !cc.use_reg2 && cc.imm == 0 &&
1045 cc.mask != 0 && (cc.mask & (cc.mask - 1)) == 0) {
1046 tcg_gen_shri_tl(reg, cc.reg, ctztl(cc.mask));
1047 tcg_gen_andi_tl(reg, reg, 1);
1048 return;
1050 if (cc.mask != -1) {
1051 tcg_gen_andi_tl(reg, cc.reg, cc.mask);
1052 cc.reg = reg;
1054 if (cc.use_reg2) {
1055 tcg_gen_setcond_tl(cc.cond, reg, cc.reg, cc.reg2);
1056 } else {
1057 tcg_gen_setcondi_tl(cc.cond, reg, cc.reg, cc.imm);
1061 static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg)
1063 gen_setcc1(s, JCC_B << 1, reg);
1066 /* generate a conditional jump to label 'l1' according to jump opcode
1067 value 'b'. In the fast case, T0 is guaranted not to be used. */
1068 static inline void gen_jcc1_noeob(DisasContext *s, int b, TCGLabel *l1)
1070 CCPrepare cc = gen_prepare_cc(s, b, cpu_T[0]);
1072 if (cc.mask != -1) {
1073 tcg_gen_andi_tl(cpu_T[0], cc.reg, cc.mask);
1074 cc.reg = cpu_T[0];
1076 if (cc.use_reg2) {
1077 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1078 } else {
1079 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1083 /* Generate a conditional jump to label 'l1' according to jump opcode
1084 value 'b'. In the fast case, T0 is guaranted not to be used.
1085 A translation block must end soon. */
1086 static inline void gen_jcc1(DisasContext *s, int b, TCGLabel *l1)
1088 CCPrepare cc = gen_prepare_cc(s, b, cpu_T[0]);
1090 gen_update_cc_op(s);
1091 if (cc.mask != -1) {
1092 tcg_gen_andi_tl(cpu_T[0], cc.reg, cc.mask);
1093 cc.reg = cpu_T[0];
1095 set_cc_op(s, CC_OP_DYNAMIC);
1096 if (cc.use_reg2) {
1097 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1098 } else {
1099 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1103 /* XXX: does not work with gdbstub "ice" single step - not a
1104 serious problem */
1105 static TCGLabel *gen_jz_ecx_string(DisasContext *s, target_ulong next_eip)
1107 TCGLabel *l1 = gen_new_label();
1108 TCGLabel *l2 = gen_new_label();
1109 gen_op_jnz_ecx(s->aflag, l1);
1110 gen_set_label(l2);
1111 gen_jmp_tb(s, next_eip, 1);
1112 gen_set_label(l1);
1113 return l2;
1116 static inline void gen_stos(DisasContext *s, TCGMemOp ot)
1118 gen_op_mov_v_reg(MO_32, cpu_T[0], R_EAX);
1119 gen_string_movl_A0_EDI(s);
1120 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
1121 gen_op_movl_T0_Dshift(ot);
1122 gen_op_add_reg_T0(s->aflag, R_EDI);
1125 static inline void gen_lods(DisasContext *s, TCGMemOp ot)
1127 gen_string_movl_A0_ESI(s);
1128 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1129 gen_op_mov_reg_v(ot, R_EAX, cpu_T[0]);
1130 gen_op_movl_T0_Dshift(ot);
1131 gen_op_add_reg_T0(s->aflag, R_ESI);
1134 static inline void gen_scas(DisasContext *s, TCGMemOp ot)
1136 gen_string_movl_A0_EDI(s);
1137 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
1138 gen_op(s, OP_CMPL, ot, R_EAX);
1139 gen_op_movl_T0_Dshift(ot);
1140 gen_op_add_reg_T0(s->aflag, R_EDI);
1143 static inline void gen_cmps(DisasContext *s, TCGMemOp ot)
1145 gen_string_movl_A0_EDI(s);
1146 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
1147 gen_string_movl_A0_ESI(s);
1148 gen_op(s, OP_CMPL, ot, OR_TMP0);
1149 gen_op_movl_T0_Dshift(ot);
1150 gen_op_add_reg_T0(s->aflag, R_ESI);
1151 gen_op_add_reg_T0(s->aflag, R_EDI);
1154 static void gen_bpt_io(DisasContext *s, TCGv_i32 t_port, int ot)
1156 if (s->flags & HF_IOBPT_MASK) {
1157 TCGv_i32 t_size = tcg_const_i32(1 << ot);
1158 TCGv t_next = tcg_const_tl(s->pc - s->cs_base);
1160 gen_helper_bpt_io(cpu_env, t_port, t_size, t_next);
1161 tcg_temp_free_i32(t_size);
1162 tcg_temp_free(t_next);
1167 static inline void gen_ins(DisasContext *s, TCGMemOp ot)
1169 if (s->tb->cflags & CF_USE_ICOUNT) {
1170 gen_io_start();
1172 gen_string_movl_A0_EDI(s);
1173 /* Note: we must do this dummy write first to be restartable in
1174 case of page fault. */
1175 tcg_gen_movi_tl(cpu_T[0], 0);
1176 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
1177 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_EDX]);
1178 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1179 gen_helper_in_func(ot, cpu_T[0], cpu_tmp2_i32);
1180 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
1181 gen_op_movl_T0_Dshift(ot);
1182 gen_op_add_reg_T0(s->aflag, R_EDI);
1183 gen_bpt_io(s, cpu_tmp2_i32, ot);
1184 if (s->tb->cflags & CF_USE_ICOUNT) {
1185 gen_io_end();
1189 static inline void gen_outs(DisasContext *s, TCGMemOp ot)
1191 if (s->tb->cflags & CF_USE_ICOUNT) {
1192 gen_io_start();
1194 gen_string_movl_A0_ESI(s);
1195 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1197 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_EDX]);
1198 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1199 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[0]);
1200 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
1201 gen_op_movl_T0_Dshift(ot);
1202 gen_op_add_reg_T0(s->aflag, R_ESI);
1203 gen_bpt_io(s, cpu_tmp2_i32, ot);
1204 if (s->tb->cflags & CF_USE_ICOUNT) {
1205 gen_io_end();
1209 /* same method as Valgrind : we generate jumps to current or next
1210 instruction */
1211 #define GEN_REPZ(op) \
1212 static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \
1213 target_ulong cur_eip, target_ulong next_eip) \
1215 TCGLabel *l2; \
1216 gen_update_cc_op(s); \
1217 l2 = gen_jz_ecx_string(s, next_eip); \
1218 gen_ ## op(s, ot); \
1219 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1220 /* a loop would cause two single step exceptions if ECX = 1 \
1221 before rep string_insn */ \
1222 if (s->repz_opt) \
1223 gen_op_jz_ecx(s->aflag, l2); \
1224 gen_jmp(s, cur_eip); \
1227 #define GEN_REPZ2(op) \
1228 static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \
1229 target_ulong cur_eip, \
1230 target_ulong next_eip, \
1231 int nz) \
1233 TCGLabel *l2; \
1234 gen_update_cc_op(s); \
1235 l2 = gen_jz_ecx_string(s, next_eip); \
1236 gen_ ## op(s, ot); \
1237 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1238 gen_update_cc_op(s); \
1239 gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2); \
1240 if (s->repz_opt) \
1241 gen_op_jz_ecx(s->aflag, l2); \
1242 gen_jmp(s, cur_eip); \
1245 GEN_REPZ(movs)
1246 GEN_REPZ(stos)
1247 GEN_REPZ(lods)
1248 GEN_REPZ(ins)
1249 GEN_REPZ(outs)
1250 GEN_REPZ2(scas)
1251 GEN_REPZ2(cmps)
1253 static void gen_helper_fp_arith_ST0_FT0(int op)
1255 switch (op) {
1256 case 0:
1257 gen_helper_fadd_ST0_FT0(cpu_env);
1258 break;
1259 case 1:
1260 gen_helper_fmul_ST0_FT0(cpu_env);
1261 break;
1262 case 2:
1263 gen_helper_fcom_ST0_FT0(cpu_env);
1264 break;
1265 case 3:
1266 gen_helper_fcom_ST0_FT0(cpu_env);
1267 break;
1268 case 4:
1269 gen_helper_fsub_ST0_FT0(cpu_env);
1270 break;
1271 case 5:
1272 gen_helper_fsubr_ST0_FT0(cpu_env);
1273 break;
1274 case 6:
1275 gen_helper_fdiv_ST0_FT0(cpu_env);
1276 break;
1277 case 7:
1278 gen_helper_fdivr_ST0_FT0(cpu_env);
1279 break;
1283 /* NOTE the exception in "r" op ordering */
1284 static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1286 TCGv_i32 tmp = tcg_const_i32(opreg);
1287 switch (op) {
1288 case 0:
1289 gen_helper_fadd_STN_ST0(cpu_env, tmp);
1290 break;
1291 case 1:
1292 gen_helper_fmul_STN_ST0(cpu_env, tmp);
1293 break;
1294 case 4:
1295 gen_helper_fsubr_STN_ST0(cpu_env, tmp);
1296 break;
1297 case 5:
1298 gen_helper_fsub_STN_ST0(cpu_env, tmp);
1299 break;
1300 case 6:
1301 gen_helper_fdivr_STN_ST0(cpu_env, tmp);
1302 break;
1303 case 7:
1304 gen_helper_fdiv_STN_ST0(cpu_env, tmp);
1305 break;
1309 /* if d == OR_TMP0, it means memory operand (address in A0) */
1310 static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d)
1312 if (d != OR_TMP0) {
1313 gen_op_mov_v_reg(ot, cpu_T[0], d);
1314 } else {
1315 gen_op_ld_v(s1, ot, cpu_T[0], cpu_A0);
1317 switch(op) {
1318 case OP_ADCL:
1319 gen_compute_eflags_c(s1, cpu_tmp4);
1320 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1321 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1322 gen_op_st_rm_T0_A0(s1, ot, d);
1323 gen_op_update3_cc(cpu_tmp4);
1324 set_cc_op(s1, CC_OP_ADCB + ot);
1325 break;
1326 case OP_SBBL:
1327 gen_compute_eflags_c(s1, cpu_tmp4);
1328 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1329 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1330 gen_op_st_rm_T0_A0(s1, ot, d);
1331 gen_op_update3_cc(cpu_tmp4);
1332 set_cc_op(s1, CC_OP_SBBB + ot);
1333 break;
1334 case OP_ADDL:
1335 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1336 gen_op_st_rm_T0_A0(s1, ot, d);
1337 gen_op_update2_cc();
1338 set_cc_op(s1, CC_OP_ADDB + ot);
1339 break;
1340 case OP_SUBL:
1341 tcg_gen_mov_tl(cpu_cc_srcT, cpu_T[0]);
1342 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1343 gen_op_st_rm_T0_A0(s1, ot, d);
1344 gen_op_update2_cc();
1345 set_cc_op(s1, CC_OP_SUBB + ot);
1346 break;
1347 default:
1348 case OP_ANDL:
1349 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1350 gen_op_st_rm_T0_A0(s1, ot, d);
1351 gen_op_update1_cc();
1352 set_cc_op(s1, CC_OP_LOGICB + ot);
1353 break;
1354 case OP_ORL:
1355 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1356 gen_op_st_rm_T0_A0(s1, ot, d);
1357 gen_op_update1_cc();
1358 set_cc_op(s1, CC_OP_LOGICB + ot);
1359 break;
1360 case OP_XORL:
1361 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1362 gen_op_st_rm_T0_A0(s1, ot, d);
1363 gen_op_update1_cc();
1364 set_cc_op(s1, CC_OP_LOGICB + ot);
1365 break;
1366 case OP_CMPL:
1367 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
1368 tcg_gen_mov_tl(cpu_cc_srcT, cpu_T[0]);
1369 tcg_gen_sub_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
1370 set_cc_op(s1, CC_OP_SUBB + ot);
1371 break;
1375 /* if d == OR_TMP0, it means memory operand (address in A0) */
1376 static void gen_inc(DisasContext *s1, TCGMemOp ot, int d, int c)
1378 if (d != OR_TMP0) {
1379 gen_op_mov_v_reg(ot, cpu_T[0], d);
1380 } else {
1381 gen_op_ld_v(s1, ot, cpu_T[0], cpu_A0);
1383 gen_compute_eflags_c(s1, cpu_cc_src);
1384 if (c > 0) {
1385 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], 1);
1386 set_cc_op(s1, CC_OP_INCB + ot);
1387 } else {
1388 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], -1);
1389 set_cc_op(s1, CC_OP_DECB + ot);
1391 gen_op_st_rm_T0_A0(s1, ot, d);
1392 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1395 static void gen_shift_flags(DisasContext *s, TCGMemOp ot, TCGv result,
1396 TCGv shm1, TCGv count, bool is_right)
1398 TCGv_i32 z32, s32, oldop;
1399 TCGv z_tl;
1401 /* Store the results into the CC variables. If we know that the
1402 variable must be dead, store unconditionally. Otherwise we'll
1403 need to not disrupt the current contents. */
1404 z_tl = tcg_const_tl(0);
1405 if (cc_op_live[s->cc_op] & USES_CC_DST) {
1406 tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_dst, count, z_tl,
1407 result, cpu_cc_dst);
1408 } else {
1409 tcg_gen_mov_tl(cpu_cc_dst, result);
1411 if (cc_op_live[s->cc_op] & USES_CC_SRC) {
1412 tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_src, count, z_tl,
1413 shm1, cpu_cc_src);
1414 } else {
1415 tcg_gen_mov_tl(cpu_cc_src, shm1);
1417 tcg_temp_free(z_tl);
1419 /* Get the two potential CC_OP values into temporaries. */
1420 tcg_gen_movi_i32(cpu_tmp2_i32, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
1421 if (s->cc_op == CC_OP_DYNAMIC) {
1422 oldop = cpu_cc_op;
1423 } else {
1424 tcg_gen_movi_i32(cpu_tmp3_i32, s->cc_op);
1425 oldop = cpu_tmp3_i32;
1428 /* Conditionally store the CC_OP value. */
1429 z32 = tcg_const_i32(0);
1430 s32 = tcg_temp_new_i32();
1431 tcg_gen_trunc_tl_i32(s32, count);
1432 tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, s32, z32, cpu_tmp2_i32, oldop);
1433 tcg_temp_free_i32(z32);
1434 tcg_temp_free_i32(s32);
1436 /* The CC_OP value is no longer predictable. */
1437 set_cc_op(s, CC_OP_DYNAMIC);
1440 static void gen_shift_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
1441 int is_right, int is_arith)
1443 target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
1445 /* load */
1446 if (op1 == OR_TMP0) {
1447 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1448 } else {
1449 gen_op_mov_v_reg(ot, cpu_T[0], op1);
1452 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], mask);
1453 tcg_gen_subi_tl(cpu_tmp0, cpu_T[1], 1);
1455 if (is_right) {
1456 if (is_arith) {
1457 gen_exts(ot, cpu_T[0]);
1458 tcg_gen_sar_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1459 tcg_gen_sar_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1460 } else {
1461 gen_extu(ot, cpu_T[0]);
1462 tcg_gen_shr_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1463 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1465 } else {
1466 tcg_gen_shl_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1467 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1470 /* store */
1471 gen_op_st_rm_T0_A0(s, ot, op1);
1473 gen_shift_flags(s, ot, cpu_T[0], cpu_tmp0, cpu_T[1], is_right);
1476 static void gen_shift_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2,
1477 int is_right, int is_arith)
1479 int mask = (ot == MO_64 ? 0x3f : 0x1f);
1481 /* load */
1482 if (op1 == OR_TMP0)
1483 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1484 else
1485 gen_op_mov_v_reg(ot, cpu_T[0], op1);
1487 op2 &= mask;
1488 if (op2 != 0) {
1489 if (is_right) {
1490 if (is_arith) {
1491 gen_exts(ot, cpu_T[0]);
1492 tcg_gen_sari_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1493 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], op2);
1494 } else {
1495 gen_extu(ot, cpu_T[0]);
1496 tcg_gen_shri_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1497 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], op2);
1499 } else {
1500 tcg_gen_shli_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1501 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], op2);
1505 /* store */
1506 gen_op_st_rm_T0_A0(s, ot, op1);
1508 /* update eflags if non zero shift */
1509 if (op2 != 0) {
1510 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
1511 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1512 set_cc_op(s, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
1516 static void gen_rot_rm_T1(DisasContext *s, TCGMemOp ot, int op1, int is_right)
1518 target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
1519 TCGv_i32 t0, t1;
1521 /* load */
1522 if (op1 == OR_TMP0) {
1523 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1524 } else {
1525 gen_op_mov_v_reg(ot, cpu_T[0], op1);
1528 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], mask);
1530 switch (ot) {
1531 case MO_8:
1532 /* Replicate the 8-bit input so that a 32-bit rotate works. */
1533 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
1534 tcg_gen_muli_tl(cpu_T[0], cpu_T[0], 0x01010101);
1535 goto do_long;
1536 case MO_16:
1537 /* Replicate the 16-bit input so that a 32-bit rotate works. */
1538 tcg_gen_deposit_tl(cpu_T[0], cpu_T[0], cpu_T[0], 16, 16);
1539 goto do_long;
1540 do_long:
1541 #ifdef TARGET_X86_64
1542 case MO_32:
1543 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
1544 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
1545 if (is_right) {
1546 tcg_gen_rotr_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
1547 } else {
1548 tcg_gen_rotl_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
1550 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
1551 break;
1552 #endif
1553 default:
1554 if (is_right) {
1555 tcg_gen_rotr_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1556 } else {
1557 tcg_gen_rotl_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1559 break;
1562 /* store */
1563 gen_op_st_rm_T0_A0(s, ot, op1);
1565 /* We'll need the flags computed into CC_SRC. */
1566 gen_compute_eflags(s);
1568 /* The value that was "rotated out" is now present at the other end
1569 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1570 since we've computed the flags into CC_SRC, these variables are
1571 currently dead. */
1572 if (is_right) {
1573 tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask - 1);
1574 tcg_gen_shri_tl(cpu_cc_dst, cpu_T[0], mask);
1575 tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
1576 } else {
1577 tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask);
1578 tcg_gen_andi_tl(cpu_cc_dst, cpu_T[0], 1);
1580 tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1581 tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1583 /* Now conditionally store the new CC_OP value. If the shift count
1584 is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live.
1585 Otherwise reuse CC_OP_ADCOX which have the C and O flags split out
1586 exactly as we computed above. */
1587 t0 = tcg_const_i32(0);
1588 t1 = tcg_temp_new_i32();
1589 tcg_gen_trunc_tl_i32(t1, cpu_T[1]);
1590 tcg_gen_movi_i32(cpu_tmp2_i32, CC_OP_ADCOX);
1591 tcg_gen_movi_i32(cpu_tmp3_i32, CC_OP_EFLAGS);
1592 tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, t1, t0,
1593 cpu_tmp2_i32, cpu_tmp3_i32);
1594 tcg_temp_free_i32(t0);
1595 tcg_temp_free_i32(t1);
1597 /* The CC_OP value is no longer predictable. */
1598 set_cc_op(s, CC_OP_DYNAMIC);
1601 static void gen_rot_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2,
1602 int is_right)
1604 int mask = (ot == MO_64 ? 0x3f : 0x1f);
1605 int shift;
1607 /* load */
1608 if (op1 == OR_TMP0) {
1609 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1610 } else {
1611 gen_op_mov_v_reg(ot, cpu_T[0], op1);
1614 op2 &= mask;
1615 if (op2 != 0) {
1616 switch (ot) {
1617 #ifdef TARGET_X86_64
1618 case MO_32:
1619 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
1620 if (is_right) {
1621 tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, op2);
1622 } else {
1623 tcg_gen_rotli_i32(cpu_tmp2_i32, cpu_tmp2_i32, op2);
1625 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
1626 break;
1627 #endif
1628 default:
1629 if (is_right) {
1630 tcg_gen_rotri_tl(cpu_T[0], cpu_T[0], op2);
1631 } else {
1632 tcg_gen_rotli_tl(cpu_T[0], cpu_T[0], op2);
1634 break;
1635 case MO_8:
1636 mask = 7;
1637 goto do_shifts;
1638 case MO_16:
1639 mask = 15;
1640 do_shifts:
1641 shift = op2 & mask;
1642 if (is_right) {
1643 shift = mask + 1 - shift;
1645 gen_extu(ot, cpu_T[0]);
1646 tcg_gen_shli_tl(cpu_tmp0, cpu_T[0], shift);
1647 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], mask + 1 - shift);
1648 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
1649 break;
1653 /* store */
1654 gen_op_st_rm_T0_A0(s, ot, op1);
1656 if (op2 != 0) {
1657 /* Compute the flags into CC_SRC. */
1658 gen_compute_eflags(s);
1660 /* The value that was "rotated out" is now present at the other end
1661 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1662 since we've computed the flags into CC_SRC, these variables are
1663 currently dead. */
1664 if (is_right) {
1665 tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask - 1);
1666 tcg_gen_shri_tl(cpu_cc_dst, cpu_T[0], mask);
1667 tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
1668 } else {
1669 tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask);
1670 tcg_gen_andi_tl(cpu_cc_dst, cpu_T[0], 1);
1672 tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1673 tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1674 set_cc_op(s, CC_OP_ADCOX);
1678 /* XXX: add faster immediate = 1 case */
1679 static void gen_rotc_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
1680 int is_right)
1682 gen_compute_eflags(s);
1683 assert(s->cc_op == CC_OP_EFLAGS);
1685 /* load */
1686 if (op1 == OR_TMP0)
1687 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1688 else
1689 gen_op_mov_v_reg(ot, cpu_T[0], op1);
1691 if (is_right) {
1692 switch (ot) {
1693 case MO_8:
1694 gen_helper_rcrb(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1695 break;
1696 case MO_16:
1697 gen_helper_rcrw(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1698 break;
1699 case MO_32:
1700 gen_helper_rcrl(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1701 break;
1702 #ifdef TARGET_X86_64
1703 case MO_64:
1704 gen_helper_rcrq(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1705 break;
1706 #endif
1707 default:
1708 tcg_abort();
1710 } else {
1711 switch (ot) {
1712 case MO_8:
1713 gen_helper_rclb(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1714 break;
1715 case MO_16:
1716 gen_helper_rclw(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1717 break;
1718 case MO_32:
1719 gen_helper_rcll(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1720 break;
1721 #ifdef TARGET_X86_64
1722 case MO_64:
1723 gen_helper_rclq(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1724 break;
1725 #endif
1726 default:
1727 tcg_abort();
1730 /* store */
1731 gen_op_st_rm_T0_A0(s, ot, op1);
1734 /* XXX: add faster immediate case */
1735 static void gen_shiftd_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
1736 bool is_right, TCGv count_in)
1738 target_ulong mask = (ot == MO_64 ? 63 : 31);
1739 TCGv count;
1741 /* load */
1742 if (op1 == OR_TMP0) {
1743 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1744 } else {
1745 gen_op_mov_v_reg(ot, cpu_T[0], op1);
1748 count = tcg_temp_new();
1749 tcg_gen_andi_tl(count, count_in, mask);
1751 switch (ot) {
1752 case MO_16:
1753 /* Note: we implement the Intel behaviour for shift count > 16.
1754 This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A
1755 portion by constructing it as a 32-bit value. */
1756 if (is_right) {
1757 tcg_gen_deposit_tl(cpu_tmp0, cpu_T[0], cpu_T[1], 16, 16);
1758 tcg_gen_mov_tl(cpu_T[1], cpu_T[0]);
1759 tcg_gen_mov_tl(cpu_T[0], cpu_tmp0);
1760 } else {
1761 tcg_gen_deposit_tl(cpu_T[1], cpu_T[0], cpu_T[1], 16, 16);
1763 /* FALLTHRU */
1764 #ifdef TARGET_X86_64
1765 case MO_32:
1766 /* Concatenate the two 32-bit values and use a 64-bit shift. */
1767 tcg_gen_subi_tl(cpu_tmp0, count, 1);
1768 if (is_right) {
1769 tcg_gen_concat_tl_i64(cpu_T[0], cpu_T[0], cpu_T[1]);
1770 tcg_gen_shr_i64(cpu_tmp0, cpu_T[0], cpu_tmp0);
1771 tcg_gen_shr_i64(cpu_T[0], cpu_T[0], count);
1772 } else {
1773 tcg_gen_concat_tl_i64(cpu_T[0], cpu_T[1], cpu_T[0]);
1774 tcg_gen_shl_i64(cpu_tmp0, cpu_T[0], cpu_tmp0);
1775 tcg_gen_shl_i64(cpu_T[0], cpu_T[0], count);
1776 tcg_gen_shri_i64(cpu_tmp0, cpu_tmp0, 32);
1777 tcg_gen_shri_i64(cpu_T[0], cpu_T[0], 32);
1779 break;
1780 #endif
1781 default:
1782 tcg_gen_subi_tl(cpu_tmp0, count, 1);
1783 if (is_right) {
1784 tcg_gen_shr_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1786 tcg_gen_subfi_tl(cpu_tmp4, mask + 1, count);
1787 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], count);
1788 tcg_gen_shl_tl(cpu_T[1], cpu_T[1], cpu_tmp4);
1789 } else {
1790 tcg_gen_shl_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1791 if (ot == MO_16) {
1792 /* Only needed if count > 16, for Intel behaviour. */
1793 tcg_gen_subfi_tl(cpu_tmp4, 33, count);
1794 tcg_gen_shr_tl(cpu_tmp4, cpu_T[1], cpu_tmp4);
1795 tcg_gen_or_tl(cpu_tmp0, cpu_tmp0, cpu_tmp4);
1798 tcg_gen_subfi_tl(cpu_tmp4, mask + 1, count);
1799 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], count);
1800 tcg_gen_shr_tl(cpu_T[1], cpu_T[1], cpu_tmp4);
1802 tcg_gen_movi_tl(cpu_tmp4, 0);
1803 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_T[1], count, cpu_tmp4,
1804 cpu_tmp4, cpu_T[1]);
1805 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1806 break;
1809 /* store */
1810 gen_op_st_rm_T0_A0(s, ot, op1);
1812 gen_shift_flags(s, ot, cpu_T[0], cpu_tmp0, count, is_right);
1813 tcg_temp_free(count);
1816 static void gen_shift(DisasContext *s1, int op, TCGMemOp ot, int d, int s)
1818 if (s != OR_TMP1)
1819 gen_op_mov_v_reg(ot, cpu_T[1], s);
1820 switch(op) {
1821 case OP_ROL:
1822 gen_rot_rm_T1(s1, ot, d, 0);
1823 break;
1824 case OP_ROR:
1825 gen_rot_rm_T1(s1, ot, d, 1);
1826 break;
1827 case OP_SHL:
1828 case OP_SHL1:
1829 gen_shift_rm_T1(s1, ot, d, 0, 0);
1830 break;
1831 case OP_SHR:
1832 gen_shift_rm_T1(s1, ot, d, 1, 0);
1833 break;
1834 case OP_SAR:
1835 gen_shift_rm_T1(s1, ot, d, 1, 1);
1836 break;
1837 case OP_RCL:
1838 gen_rotc_rm_T1(s1, ot, d, 0);
1839 break;
1840 case OP_RCR:
1841 gen_rotc_rm_T1(s1, ot, d, 1);
1842 break;
1846 static void gen_shifti(DisasContext *s1, int op, TCGMemOp ot, int d, int c)
1848 switch(op) {
1849 case OP_ROL:
1850 gen_rot_rm_im(s1, ot, d, c, 0);
1851 break;
1852 case OP_ROR:
1853 gen_rot_rm_im(s1, ot, d, c, 1);
1854 break;
1855 case OP_SHL:
1856 case OP_SHL1:
1857 gen_shift_rm_im(s1, ot, d, c, 0, 0);
1858 break;
1859 case OP_SHR:
1860 gen_shift_rm_im(s1, ot, d, c, 1, 0);
1861 break;
1862 case OP_SAR:
1863 gen_shift_rm_im(s1, ot, d, c, 1, 1);
1864 break;
1865 default:
1866 /* currently not optimized */
1867 tcg_gen_movi_tl(cpu_T[1], c);
1868 gen_shift(s1, op, ot, d, OR_TMP1);
1869 break;
1873 static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm)
1875 target_long disp;
1876 int havesib;
1877 int base;
1878 int index;
1879 int scale;
1880 int mod, rm, code, override, must_add_seg;
1881 TCGv sum;
1883 override = s->override;
1884 must_add_seg = s->addseg;
1885 if (override >= 0)
1886 must_add_seg = 1;
1887 mod = (modrm >> 6) & 3;
1888 rm = modrm & 7;
1890 switch (s->aflag) {
1891 case MO_64:
1892 case MO_32:
1893 havesib = 0;
1894 base = rm;
1895 index = -1;
1896 scale = 0;
1898 if (base == 4) {
1899 havesib = 1;
1900 code = cpu_ldub_code(env, s->pc++);
1901 scale = (code >> 6) & 3;
1902 index = ((code >> 3) & 7) | REX_X(s);
1903 if (index == 4) {
1904 index = -1; /* no index */
1906 base = (code & 7);
1908 base |= REX_B(s);
1910 switch (mod) {
1911 case 0:
1912 if ((base & 7) == 5) {
1913 base = -1;
1914 disp = (int32_t)cpu_ldl_code(env, s->pc);
1915 s->pc += 4;
1916 if (CODE64(s) && !havesib) {
1917 disp += s->pc + s->rip_offset;
1919 } else {
1920 disp = 0;
1922 break;
1923 case 1:
1924 disp = (int8_t)cpu_ldub_code(env, s->pc++);
1925 break;
1926 default:
1927 case 2:
1928 disp = (int32_t)cpu_ldl_code(env, s->pc);
1929 s->pc += 4;
1930 break;
1933 /* For correct popl handling with esp. */
1934 if (base == R_ESP && s->popl_esp_hack) {
1935 disp += s->popl_esp_hack;
1938 /* Compute the address, with a minimum number of TCG ops. */
1939 TCGV_UNUSED(sum);
1940 if (index >= 0) {
1941 if (scale == 0) {
1942 sum = cpu_regs[index];
1943 } else {
1944 tcg_gen_shli_tl(cpu_A0, cpu_regs[index], scale);
1945 sum = cpu_A0;
1947 if (base >= 0) {
1948 tcg_gen_add_tl(cpu_A0, sum, cpu_regs[base]);
1949 sum = cpu_A0;
1951 } else if (base >= 0) {
1952 sum = cpu_regs[base];
1954 if (TCGV_IS_UNUSED(sum)) {
1955 tcg_gen_movi_tl(cpu_A0, disp);
1956 } else {
1957 tcg_gen_addi_tl(cpu_A0, sum, disp);
1960 if (must_add_seg) {
1961 if (override < 0) {
1962 if (base == R_EBP || base == R_ESP) {
1963 override = R_SS;
1964 } else {
1965 override = R_DS;
1969 tcg_gen_ld_tl(cpu_tmp0, cpu_env,
1970 offsetof(CPUX86State, segs[override].base));
1971 if (CODE64(s)) {
1972 if (s->aflag == MO_32) {
1973 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
1975 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
1976 return;
1979 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
1982 if (s->aflag == MO_32) {
1983 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
1985 break;
1987 case MO_16:
1988 switch (mod) {
1989 case 0:
1990 if (rm == 6) {
1991 disp = cpu_lduw_code(env, s->pc);
1992 s->pc += 2;
1993 tcg_gen_movi_tl(cpu_A0, disp);
1994 rm = 0; /* avoid SS override */
1995 goto no_rm;
1996 } else {
1997 disp = 0;
1999 break;
2000 case 1:
2001 disp = (int8_t)cpu_ldub_code(env, s->pc++);
2002 break;
2003 default:
2004 case 2:
2005 disp = (int16_t)cpu_lduw_code(env, s->pc);
2006 s->pc += 2;
2007 break;
2010 sum = cpu_A0;
2011 switch (rm) {
2012 case 0:
2013 tcg_gen_add_tl(cpu_A0, cpu_regs[R_EBX], cpu_regs[R_ESI]);
2014 break;
2015 case 1:
2016 tcg_gen_add_tl(cpu_A0, cpu_regs[R_EBX], cpu_regs[R_EDI]);
2017 break;
2018 case 2:
2019 tcg_gen_add_tl(cpu_A0, cpu_regs[R_EBP], cpu_regs[R_ESI]);
2020 break;
2021 case 3:
2022 tcg_gen_add_tl(cpu_A0, cpu_regs[R_EBP], cpu_regs[R_EDI]);
2023 break;
2024 case 4:
2025 sum = cpu_regs[R_ESI];
2026 break;
2027 case 5:
2028 sum = cpu_regs[R_EDI];
2029 break;
2030 case 6:
2031 sum = cpu_regs[R_EBP];
2032 break;
2033 default:
2034 case 7:
2035 sum = cpu_regs[R_EBX];
2036 break;
2038 tcg_gen_addi_tl(cpu_A0, sum, disp);
2039 tcg_gen_ext16u_tl(cpu_A0, cpu_A0);
2040 no_rm:
2041 if (must_add_seg) {
2042 if (override < 0) {
2043 if (rm == 2 || rm == 3 || rm == 6) {
2044 override = R_SS;
2045 } else {
2046 override = R_DS;
2049 gen_op_addl_A0_seg(s, override);
2051 break;
2053 default:
2054 tcg_abort();
2058 static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm)
2060 int mod, rm, base, code;
2062 mod = (modrm >> 6) & 3;
2063 if (mod == 3)
2064 return;
2065 rm = modrm & 7;
2067 switch (s->aflag) {
2068 case MO_64:
2069 case MO_32:
2070 base = rm;
2072 if (base == 4) {
2073 code = cpu_ldub_code(env, s->pc++);
2074 base = (code & 7);
2077 switch (mod) {
2078 case 0:
2079 if (base == 5) {
2080 s->pc += 4;
2082 break;
2083 case 1:
2084 s->pc++;
2085 break;
2086 default:
2087 case 2:
2088 s->pc += 4;
2089 break;
2091 break;
2093 case MO_16:
2094 switch (mod) {
2095 case 0:
2096 if (rm == 6) {
2097 s->pc += 2;
2099 break;
2100 case 1:
2101 s->pc++;
2102 break;
2103 default:
2104 case 2:
2105 s->pc += 2;
2106 break;
2108 break;
2110 default:
2111 tcg_abort();
2115 /* used for LEA and MOV AX, mem */
2116 static void gen_add_A0_ds_seg(DisasContext *s)
2118 int override, must_add_seg;
2119 must_add_seg = s->addseg;
2120 override = R_DS;
2121 if (s->override >= 0) {
2122 override = s->override;
2123 must_add_seg = 1;
2125 if (must_add_seg) {
2126 #ifdef TARGET_X86_64
2127 if (CODE64(s)) {
2128 gen_op_addq_A0_seg(override);
2129 } else
2130 #endif
2132 gen_op_addl_A0_seg(s, override);
2137 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2138 OR_TMP0 */
2139 static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm,
2140 TCGMemOp ot, int reg, int is_store)
2142 int mod, rm;
2144 mod = (modrm >> 6) & 3;
2145 rm = (modrm & 7) | REX_B(s);
2146 if (mod == 3) {
2147 if (is_store) {
2148 if (reg != OR_TMP0)
2149 gen_op_mov_v_reg(ot, cpu_T[0], reg);
2150 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
2151 } else {
2152 gen_op_mov_v_reg(ot, cpu_T[0], rm);
2153 if (reg != OR_TMP0)
2154 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
2156 } else {
2157 gen_lea_modrm(env, s, modrm);
2158 if (is_store) {
2159 if (reg != OR_TMP0)
2160 gen_op_mov_v_reg(ot, cpu_T[0], reg);
2161 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
2162 } else {
2163 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
2164 if (reg != OR_TMP0)
2165 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
2170 static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, TCGMemOp ot)
2172 uint32_t ret;
2174 switch (ot) {
2175 case MO_8:
2176 ret = cpu_ldub_code(env, s->pc);
2177 s->pc++;
2178 break;
2179 case MO_16:
2180 ret = cpu_lduw_code(env, s->pc);
2181 s->pc += 2;
2182 break;
2183 case MO_32:
2184 #ifdef TARGET_X86_64
2185 case MO_64:
2186 #endif
2187 ret = cpu_ldl_code(env, s->pc);
2188 s->pc += 4;
2189 break;
2190 default:
2191 tcg_abort();
2193 return ret;
2196 static inline int insn_const_size(TCGMemOp ot)
2198 if (ot <= MO_32) {
2199 return 1 << ot;
2200 } else {
2201 return 4;
2205 static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip)
2207 TranslationBlock *tb;
2208 target_ulong pc;
2210 pc = s->cs_base + eip;
2211 tb = s->tb;
2212 /* NOTE: we handle the case where the TB spans two pages here */
2213 if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) ||
2214 (pc & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK)) {
2215 /* jump to same page: we can use a direct jump */
2216 tcg_gen_goto_tb(tb_num);
2217 gen_jmp_im(eip);
2218 tcg_gen_exit_tb((uintptr_t)tb + tb_num);
2219 } else {
2220 /* jump to another page: currently not optimized */
2221 gen_jmp_im(eip);
2222 gen_eob(s);
2226 static inline void gen_jcc(DisasContext *s, int b,
2227 target_ulong val, target_ulong next_eip)
2229 TCGLabel *l1, *l2;
2231 if (s->jmp_opt) {
2232 l1 = gen_new_label();
2233 gen_jcc1(s, b, l1);
2235 gen_goto_tb(s, 0, next_eip);
2237 gen_set_label(l1);
2238 gen_goto_tb(s, 1, val);
2239 s->is_jmp = DISAS_TB_JUMP;
2240 } else {
2241 l1 = gen_new_label();
2242 l2 = gen_new_label();
2243 gen_jcc1(s, b, l1);
2245 gen_jmp_im(next_eip);
2246 tcg_gen_br(l2);
2248 gen_set_label(l1);
2249 gen_jmp_im(val);
2250 gen_set_label(l2);
2251 gen_eob(s);
2255 static void gen_cmovcc1(CPUX86State *env, DisasContext *s, TCGMemOp ot, int b,
2256 int modrm, int reg)
2258 CCPrepare cc;
2260 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
2262 cc = gen_prepare_cc(s, b, cpu_T[1]);
2263 if (cc.mask != -1) {
2264 TCGv t0 = tcg_temp_new();
2265 tcg_gen_andi_tl(t0, cc.reg, cc.mask);
2266 cc.reg = t0;
2268 if (!cc.use_reg2) {
2269 cc.reg2 = tcg_const_tl(cc.imm);
2272 tcg_gen_movcond_tl(cc.cond, cpu_T[0], cc.reg, cc.reg2,
2273 cpu_T[0], cpu_regs[reg]);
2274 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
2276 if (cc.mask != -1) {
2277 tcg_temp_free(cc.reg);
2279 if (!cc.use_reg2) {
2280 tcg_temp_free(cc.reg2);
2284 static inline void gen_op_movl_T0_seg(int seg_reg)
2286 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
2287 offsetof(CPUX86State,segs[seg_reg].selector));
2290 static inline void gen_op_movl_seg_T0_vm(int seg_reg)
2292 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
2293 tcg_gen_st32_tl(cpu_T[0], cpu_env,
2294 offsetof(CPUX86State,segs[seg_reg].selector));
2295 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], 4);
2296 tcg_gen_st_tl(cpu_T[0], cpu_env,
2297 offsetof(CPUX86State,segs[seg_reg].base));
2300 /* move T0 to seg_reg and compute if the CPU state may change. Never
2301 call this function with seg_reg == R_CS */
2302 static void gen_movl_seg_T0(DisasContext *s, int seg_reg)
2304 if (s->pe && !s->vm86) {
2305 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2306 gen_helper_load_seg(cpu_env, tcg_const_i32(seg_reg), cpu_tmp2_i32);
2307 /* abort translation because the addseg value may change or
2308 because ss32 may change. For R_SS, translation must always
2309 stop as a special handling must be done to disable hardware
2310 interrupts for the next instruction */
2311 if (seg_reg == R_SS || (s->code32 && seg_reg < R_FS))
2312 s->is_jmp = DISAS_TB_JUMP;
2313 } else {
2314 gen_op_movl_seg_T0_vm(seg_reg);
2315 if (seg_reg == R_SS)
2316 s->is_jmp = DISAS_TB_JUMP;
2320 static inline int svm_is_rep(int prefixes)
2322 return ((prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) ? 8 : 0);
2325 static inline void
2326 gen_svm_check_intercept_param(DisasContext *s, target_ulong pc_start,
2327 uint32_t type, uint64_t param)
2329 /* no SVM activated; fast case */
2330 if (likely(!(s->flags & HF_SVMI_MASK)))
2331 return;
2332 gen_update_cc_op(s);
2333 gen_jmp_im(pc_start - s->cs_base);
2334 gen_helper_svm_check_intercept_param(cpu_env, tcg_const_i32(type),
2335 tcg_const_i64(param));
2338 static inline void
2339 gen_svm_check_intercept(DisasContext *s, target_ulong pc_start, uint64_t type)
2341 gen_svm_check_intercept_param(s, pc_start, type, 0);
2344 static inline void gen_stack_update(DisasContext *s, int addend)
2346 #ifdef TARGET_X86_64
2347 if (CODE64(s)) {
2348 gen_op_add_reg_im(MO_64, R_ESP, addend);
2349 } else
2350 #endif
2351 if (s->ss32) {
2352 gen_op_add_reg_im(MO_32, R_ESP, addend);
2353 } else {
2354 gen_op_add_reg_im(MO_16, R_ESP, addend);
2358 /* Generate a push. It depends on ss32, addseg and dflag. */
2359 static void gen_push_v(DisasContext *s, TCGv val)
2361 TCGMemOp a_ot, d_ot = mo_pushpop(s, s->dflag);
2362 int size = 1 << d_ot;
2363 TCGv new_esp = cpu_A0;
2365 tcg_gen_subi_tl(cpu_A0, cpu_regs[R_ESP], size);
2367 if (CODE64(s)) {
2368 a_ot = MO_64;
2369 } else if (s->ss32) {
2370 a_ot = MO_32;
2371 if (s->addseg) {
2372 new_esp = cpu_tmp4;
2373 tcg_gen_mov_tl(new_esp, cpu_A0);
2374 gen_op_addl_A0_seg(s, R_SS);
2375 } else {
2376 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
2378 } else {
2379 a_ot = MO_16;
2380 new_esp = cpu_tmp4;
2381 tcg_gen_ext16u_tl(cpu_A0, cpu_A0);
2382 tcg_gen_mov_tl(new_esp, cpu_A0);
2383 gen_op_addl_A0_seg(s, R_SS);
2386 gen_op_st_v(s, d_ot, val, cpu_A0);
2387 gen_op_mov_reg_v(a_ot, R_ESP, new_esp);
2390 /* two step pop is necessary for precise exceptions */
2391 static TCGMemOp gen_pop_T0(DisasContext *s)
2393 TCGMemOp d_ot = mo_pushpop(s, s->dflag);
2394 TCGv addr = cpu_A0;
2396 if (CODE64(s)) {
2397 addr = cpu_regs[R_ESP];
2398 } else if (!s->ss32) {
2399 tcg_gen_ext16u_tl(cpu_A0, cpu_regs[R_ESP]);
2400 gen_op_addl_A0_seg(s, R_SS);
2401 } else if (s->addseg) {
2402 tcg_gen_mov_tl(cpu_A0, cpu_regs[R_ESP]);
2403 gen_op_addl_A0_seg(s, R_SS);
2404 } else {
2405 tcg_gen_ext32u_tl(cpu_A0, cpu_regs[R_ESP]);
2408 gen_op_ld_v(s, d_ot, cpu_T[0], addr);
2409 return d_ot;
2412 static void gen_pop_update(DisasContext *s, TCGMemOp ot)
2414 gen_stack_update(s, 1 << ot);
2417 static void gen_stack_A0(DisasContext *s)
2419 gen_op_movl_A0_reg(R_ESP);
2420 if (!s->ss32)
2421 tcg_gen_ext16u_tl(cpu_A0, cpu_A0);
2422 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2423 if (s->addseg)
2424 gen_op_addl_A0_seg(s, R_SS);
2427 /* NOTE: wrap around in 16 bit not fully handled */
2428 static void gen_pusha(DisasContext *s)
2430 int i;
2431 gen_op_movl_A0_reg(R_ESP);
2432 gen_op_addl_A0_im(-(8 << s->dflag));
2433 if (!s->ss32)
2434 tcg_gen_ext16u_tl(cpu_A0, cpu_A0);
2435 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2436 if (s->addseg)
2437 gen_op_addl_A0_seg(s, R_SS);
2438 for(i = 0;i < 8; i++) {
2439 gen_op_mov_v_reg(MO_32, cpu_T[0], 7 - i);
2440 gen_op_st_v(s, s->dflag, cpu_T[0], cpu_A0);
2441 gen_op_addl_A0_im(1 << s->dflag);
2443 gen_op_mov_reg_v(MO_16 + s->ss32, R_ESP, cpu_T[1]);
2446 /* NOTE: wrap around in 16 bit not fully handled */
2447 static void gen_popa(DisasContext *s)
2449 int i;
2450 gen_op_movl_A0_reg(R_ESP);
2451 if (!s->ss32)
2452 tcg_gen_ext16u_tl(cpu_A0, cpu_A0);
2453 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2454 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], 8 << s->dflag);
2455 if (s->addseg)
2456 gen_op_addl_A0_seg(s, R_SS);
2457 for(i = 0;i < 8; i++) {
2458 /* ESP is not reloaded */
2459 if (i != 3) {
2460 gen_op_ld_v(s, s->dflag, cpu_T[0], cpu_A0);
2461 gen_op_mov_reg_v(s->dflag, 7 - i, cpu_T[0]);
2463 gen_op_addl_A0_im(1 << s->dflag);
2465 gen_op_mov_reg_v(MO_16 + s->ss32, R_ESP, cpu_T[1]);
2468 static void gen_enter(DisasContext *s, int esp_addend, int level)
2470 TCGMemOp ot = mo_pushpop(s, s->dflag);
2471 int opsize = 1 << ot;
2473 level &= 0x1f;
2474 #ifdef TARGET_X86_64
2475 if (CODE64(s)) {
2476 gen_op_movl_A0_reg(R_ESP);
2477 gen_op_addq_A0_im(-opsize);
2478 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2480 /* push bp */
2481 gen_op_mov_v_reg(MO_32, cpu_T[0], R_EBP);
2482 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
2483 if (level) {
2484 /* XXX: must save state */
2485 gen_helper_enter64_level(cpu_env, tcg_const_i32(level),
2486 tcg_const_i32((ot == MO_64)),
2487 cpu_T[1]);
2489 gen_op_mov_reg_v(ot, R_EBP, cpu_T[1]);
2490 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
2491 gen_op_mov_reg_v(MO_64, R_ESP, cpu_T[1]);
2492 } else
2493 #endif
2495 gen_op_movl_A0_reg(R_ESP);
2496 gen_op_addl_A0_im(-opsize);
2497 if (!s->ss32)
2498 tcg_gen_ext16u_tl(cpu_A0, cpu_A0);
2499 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2500 if (s->addseg)
2501 gen_op_addl_A0_seg(s, R_SS);
2502 /* push bp */
2503 gen_op_mov_v_reg(MO_32, cpu_T[0], R_EBP);
2504 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
2505 if (level) {
2506 /* XXX: must save state */
2507 gen_helper_enter_level(cpu_env, tcg_const_i32(level),
2508 tcg_const_i32(s->dflag - 1),
2509 cpu_T[1]);
2511 gen_op_mov_reg_v(ot, R_EBP, cpu_T[1]);
2512 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
2513 gen_op_mov_reg_v(MO_16 + s->ss32, R_ESP, cpu_T[1]);
2517 static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip)
2519 gen_update_cc_op(s);
2520 gen_jmp_im(cur_eip);
2521 gen_helper_raise_exception(cpu_env, tcg_const_i32(trapno));
2522 s->is_jmp = DISAS_TB_JUMP;
2525 /* an interrupt is different from an exception because of the
2526 privilege checks */
2527 static void gen_interrupt(DisasContext *s, int intno,
2528 target_ulong cur_eip, target_ulong next_eip)
2530 gen_update_cc_op(s);
2531 gen_jmp_im(cur_eip);
2532 gen_helper_raise_interrupt(cpu_env, tcg_const_i32(intno),
2533 tcg_const_i32(next_eip - cur_eip));
2534 s->is_jmp = DISAS_TB_JUMP;
2537 static void gen_debug(DisasContext *s, target_ulong cur_eip)
2539 gen_update_cc_op(s);
2540 gen_jmp_im(cur_eip);
2541 gen_helper_debug(cpu_env);
2542 s->is_jmp = DISAS_TB_JUMP;
2545 /* generate a generic end of block. Trace exception is also generated
2546 if needed */
2547 static void gen_eob(DisasContext *s)
2549 gen_update_cc_op(s);
2550 if (s->tb->flags & HF_INHIBIT_IRQ_MASK) {
2551 gen_helper_reset_inhibit_irq(cpu_env);
2553 if (s->tb->flags & HF_RF_MASK) {
2554 gen_helper_reset_rf(cpu_env);
2556 if (s->singlestep_enabled) {
2557 gen_helper_debug(cpu_env);
2558 } else if (s->tf) {
2559 gen_helper_single_step(cpu_env);
2560 } else {
2561 tcg_gen_exit_tb(0);
2563 s->is_jmp = DISAS_TB_JUMP;
2566 /* generate a jump to eip. No segment change must happen before as a
2567 direct call to the next block may occur */
2568 static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num)
2570 gen_update_cc_op(s);
2571 set_cc_op(s, CC_OP_DYNAMIC);
2572 if (s->jmp_opt) {
2573 gen_goto_tb(s, tb_num, eip);
2574 s->is_jmp = DISAS_TB_JUMP;
2575 } else {
2576 gen_jmp_im(eip);
2577 gen_eob(s);
2581 static void gen_jmp(DisasContext *s, target_ulong eip)
2583 gen_jmp_tb(s, eip, 0);
2586 static inline void gen_ldq_env_A0(DisasContext *s, int offset)
2588 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
2589 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset);
2592 static inline void gen_stq_env_A0(DisasContext *s, int offset)
2594 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset);
2595 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
2598 static inline void gen_ldo_env_A0(DisasContext *s, int offset)
2600 int mem_index = s->mem_index;
2601 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, mem_index, MO_LEQ);
2602 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(0)));
2603 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2604 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_tmp0, mem_index, MO_LEQ);
2605 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(1)));
2608 static inline void gen_sto_env_A0(DisasContext *s, int offset)
2610 int mem_index = s->mem_index;
2611 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(0)));
2612 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, mem_index, MO_LEQ);
2613 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2614 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(1)));
2615 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_tmp0, mem_index, MO_LEQ);
2618 static inline void gen_op_movo(int d_offset, int s_offset)
2620 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + offsetof(ZMMReg, ZMM_Q(0)));
2621 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + offsetof(ZMMReg, ZMM_Q(0)));
2622 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + offsetof(ZMMReg, ZMM_Q(1)));
2623 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + offsetof(ZMMReg, ZMM_Q(1)));
2626 static inline void gen_op_movq(int d_offset, int s_offset)
2628 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2629 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2632 static inline void gen_op_movl(int d_offset, int s_offset)
2634 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env, s_offset);
2635 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, d_offset);
2638 static inline void gen_op_movq_env_0(int d_offset)
2640 tcg_gen_movi_i64(cpu_tmp1_i64, 0);
2641 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2644 typedef void (*SSEFunc_i_ep)(TCGv_i32 val, TCGv_ptr env, TCGv_ptr reg);
2645 typedef void (*SSEFunc_l_ep)(TCGv_i64 val, TCGv_ptr env, TCGv_ptr reg);
2646 typedef void (*SSEFunc_0_epi)(TCGv_ptr env, TCGv_ptr reg, TCGv_i32 val);
2647 typedef void (*SSEFunc_0_epl)(TCGv_ptr env, TCGv_ptr reg, TCGv_i64 val);
2648 typedef void (*SSEFunc_0_epp)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b);
2649 typedef void (*SSEFunc_0_eppi)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
2650 TCGv_i32 val);
2651 typedef void (*SSEFunc_0_ppi)(TCGv_ptr reg_a, TCGv_ptr reg_b, TCGv_i32 val);
2652 typedef void (*SSEFunc_0_eppt)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
2653 TCGv val);
2655 #define SSE_SPECIAL ((void *)1)
2656 #define SSE_DUMMY ((void *)2)
2658 #define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
2659 #define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
2660 gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
2662 static const SSEFunc_0_epp sse_op_table1[256][4] = {
2663 /* 3DNow! extensions */
2664 [0x0e] = { SSE_DUMMY }, /* femms */
2665 [0x0f] = { SSE_DUMMY }, /* pf... */
2666 /* pure SSE operations */
2667 [0x10] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2668 [0x11] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2669 [0x12] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd, movsldup, movddup */
2670 [0x13] = { SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd */
2671 [0x14] = { gen_helper_punpckldq_xmm, gen_helper_punpcklqdq_xmm },
2672 [0x15] = { gen_helper_punpckhdq_xmm, gen_helper_punpckhqdq_xmm },
2673 [0x16] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd, movshdup */
2674 [0x17] = { SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd */
2676 [0x28] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2677 [0x29] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2678 [0x2a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
2679 [0x2b] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movntps, movntpd, movntss, movntsd */
2680 [0x2c] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
2681 [0x2d] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
2682 [0x2e] = { gen_helper_ucomiss, gen_helper_ucomisd },
2683 [0x2f] = { gen_helper_comiss, gen_helper_comisd },
2684 [0x50] = { SSE_SPECIAL, SSE_SPECIAL }, /* movmskps, movmskpd */
2685 [0x51] = SSE_FOP(sqrt),
2686 [0x52] = { gen_helper_rsqrtps, NULL, gen_helper_rsqrtss, NULL },
2687 [0x53] = { gen_helper_rcpps, NULL, gen_helper_rcpss, NULL },
2688 [0x54] = { gen_helper_pand_xmm, gen_helper_pand_xmm }, /* andps, andpd */
2689 [0x55] = { gen_helper_pandn_xmm, gen_helper_pandn_xmm }, /* andnps, andnpd */
2690 [0x56] = { gen_helper_por_xmm, gen_helper_por_xmm }, /* orps, orpd */
2691 [0x57] = { gen_helper_pxor_xmm, gen_helper_pxor_xmm }, /* xorps, xorpd */
2692 [0x58] = SSE_FOP(add),
2693 [0x59] = SSE_FOP(mul),
2694 [0x5a] = { gen_helper_cvtps2pd, gen_helper_cvtpd2ps,
2695 gen_helper_cvtss2sd, gen_helper_cvtsd2ss },
2696 [0x5b] = { gen_helper_cvtdq2ps, gen_helper_cvtps2dq, gen_helper_cvttps2dq },
2697 [0x5c] = SSE_FOP(sub),
2698 [0x5d] = SSE_FOP(min),
2699 [0x5e] = SSE_FOP(div),
2700 [0x5f] = SSE_FOP(max),
2702 [0xc2] = SSE_FOP(cmpeq),
2703 [0xc6] = { (SSEFunc_0_epp)gen_helper_shufps,
2704 (SSEFunc_0_epp)gen_helper_shufpd }, /* XXX: casts */
2706 /* SSSE3, SSE4, MOVBE, CRC32, BMI1, BMI2, ADX. */
2707 [0x38] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2708 [0x3a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2710 /* MMX ops and their SSE extensions */
2711 [0x60] = MMX_OP2(punpcklbw),
2712 [0x61] = MMX_OP2(punpcklwd),
2713 [0x62] = MMX_OP2(punpckldq),
2714 [0x63] = MMX_OP2(packsswb),
2715 [0x64] = MMX_OP2(pcmpgtb),
2716 [0x65] = MMX_OP2(pcmpgtw),
2717 [0x66] = MMX_OP2(pcmpgtl),
2718 [0x67] = MMX_OP2(packuswb),
2719 [0x68] = MMX_OP2(punpckhbw),
2720 [0x69] = MMX_OP2(punpckhwd),
2721 [0x6a] = MMX_OP2(punpckhdq),
2722 [0x6b] = MMX_OP2(packssdw),
2723 [0x6c] = { NULL, gen_helper_punpcklqdq_xmm },
2724 [0x6d] = { NULL, gen_helper_punpckhqdq_xmm },
2725 [0x6e] = { SSE_SPECIAL, SSE_SPECIAL }, /* movd mm, ea */
2726 [0x6f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, , movqdu */
2727 [0x70] = { (SSEFunc_0_epp)gen_helper_pshufw_mmx,
2728 (SSEFunc_0_epp)gen_helper_pshufd_xmm,
2729 (SSEFunc_0_epp)gen_helper_pshufhw_xmm,
2730 (SSEFunc_0_epp)gen_helper_pshuflw_xmm }, /* XXX: casts */
2731 [0x71] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftw */
2732 [0x72] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftd */
2733 [0x73] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftq */
2734 [0x74] = MMX_OP2(pcmpeqb),
2735 [0x75] = MMX_OP2(pcmpeqw),
2736 [0x76] = MMX_OP2(pcmpeql),
2737 [0x77] = { SSE_DUMMY }, /* emms */
2738 [0x78] = { NULL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* extrq_i, insertq_i */
2739 [0x79] = { NULL, gen_helper_extrq_r, NULL, gen_helper_insertq_r },
2740 [0x7c] = { NULL, gen_helper_haddpd, NULL, gen_helper_haddps },
2741 [0x7d] = { NULL, gen_helper_hsubpd, NULL, gen_helper_hsubps },
2742 [0x7e] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movd, movd, , movq */
2743 [0x7f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, movdqu */
2744 [0xc4] = { SSE_SPECIAL, SSE_SPECIAL }, /* pinsrw */
2745 [0xc5] = { SSE_SPECIAL, SSE_SPECIAL }, /* pextrw */
2746 [0xd0] = { NULL, gen_helper_addsubpd, NULL, gen_helper_addsubps },
2747 [0xd1] = MMX_OP2(psrlw),
2748 [0xd2] = MMX_OP2(psrld),
2749 [0xd3] = MMX_OP2(psrlq),
2750 [0xd4] = MMX_OP2(paddq),
2751 [0xd5] = MMX_OP2(pmullw),
2752 [0xd6] = { NULL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2753 [0xd7] = { SSE_SPECIAL, SSE_SPECIAL }, /* pmovmskb */
2754 [0xd8] = MMX_OP2(psubusb),
2755 [0xd9] = MMX_OP2(psubusw),
2756 [0xda] = MMX_OP2(pminub),
2757 [0xdb] = MMX_OP2(pand),
2758 [0xdc] = MMX_OP2(paddusb),
2759 [0xdd] = MMX_OP2(paddusw),
2760 [0xde] = MMX_OP2(pmaxub),
2761 [0xdf] = MMX_OP2(pandn),
2762 [0xe0] = MMX_OP2(pavgb),
2763 [0xe1] = MMX_OP2(psraw),
2764 [0xe2] = MMX_OP2(psrad),
2765 [0xe3] = MMX_OP2(pavgw),
2766 [0xe4] = MMX_OP2(pmulhuw),
2767 [0xe5] = MMX_OP2(pmulhw),
2768 [0xe6] = { NULL, gen_helper_cvttpd2dq, gen_helper_cvtdq2pd, gen_helper_cvtpd2dq },
2769 [0xe7] = { SSE_SPECIAL , SSE_SPECIAL }, /* movntq, movntq */
2770 [0xe8] = MMX_OP2(psubsb),
2771 [0xe9] = MMX_OP2(psubsw),
2772 [0xea] = MMX_OP2(pminsw),
2773 [0xeb] = MMX_OP2(por),
2774 [0xec] = MMX_OP2(paddsb),
2775 [0xed] = MMX_OP2(paddsw),
2776 [0xee] = MMX_OP2(pmaxsw),
2777 [0xef] = MMX_OP2(pxor),
2778 [0xf0] = { NULL, NULL, NULL, SSE_SPECIAL }, /* lddqu */
2779 [0xf1] = MMX_OP2(psllw),
2780 [0xf2] = MMX_OP2(pslld),
2781 [0xf3] = MMX_OP2(psllq),
2782 [0xf4] = MMX_OP2(pmuludq),
2783 [0xf5] = MMX_OP2(pmaddwd),
2784 [0xf6] = MMX_OP2(psadbw),
2785 [0xf7] = { (SSEFunc_0_epp)gen_helper_maskmov_mmx,
2786 (SSEFunc_0_epp)gen_helper_maskmov_xmm }, /* XXX: casts */
2787 [0xf8] = MMX_OP2(psubb),
2788 [0xf9] = MMX_OP2(psubw),
2789 [0xfa] = MMX_OP2(psubl),
2790 [0xfb] = MMX_OP2(psubq),
2791 [0xfc] = MMX_OP2(paddb),
2792 [0xfd] = MMX_OP2(paddw),
2793 [0xfe] = MMX_OP2(paddl),
2796 static const SSEFunc_0_epp sse_op_table2[3 * 8][2] = {
2797 [0 + 2] = MMX_OP2(psrlw),
2798 [0 + 4] = MMX_OP2(psraw),
2799 [0 + 6] = MMX_OP2(psllw),
2800 [8 + 2] = MMX_OP2(psrld),
2801 [8 + 4] = MMX_OP2(psrad),
2802 [8 + 6] = MMX_OP2(pslld),
2803 [16 + 2] = MMX_OP2(psrlq),
2804 [16 + 3] = { NULL, gen_helper_psrldq_xmm },
2805 [16 + 6] = MMX_OP2(psllq),
2806 [16 + 7] = { NULL, gen_helper_pslldq_xmm },
2809 static const SSEFunc_0_epi sse_op_table3ai[] = {
2810 gen_helper_cvtsi2ss,
2811 gen_helper_cvtsi2sd
2814 #ifdef TARGET_X86_64
2815 static const SSEFunc_0_epl sse_op_table3aq[] = {
2816 gen_helper_cvtsq2ss,
2817 gen_helper_cvtsq2sd
2819 #endif
2821 static const SSEFunc_i_ep sse_op_table3bi[] = {
2822 gen_helper_cvttss2si,
2823 gen_helper_cvtss2si,
2824 gen_helper_cvttsd2si,
2825 gen_helper_cvtsd2si
2828 #ifdef TARGET_X86_64
2829 static const SSEFunc_l_ep sse_op_table3bq[] = {
2830 gen_helper_cvttss2sq,
2831 gen_helper_cvtss2sq,
2832 gen_helper_cvttsd2sq,
2833 gen_helper_cvtsd2sq
2835 #endif
2837 static const SSEFunc_0_epp sse_op_table4[8][4] = {
2838 SSE_FOP(cmpeq),
2839 SSE_FOP(cmplt),
2840 SSE_FOP(cmple),
2841 SSE_FOP(cmpunord),
2842 SSE_FOP(cmpneq),
2843 SSE_FOP(cmpnlt),
2844 SSE_FOP(cmpnle),
2845 SSE_FOP(cmpord),
2848 static const SSEFunc_0_epp sse_op_table5[256] = {
2849 [0x0c] = gen_helper_pi2fw,
2850 [0x0d] = gen_helper_pi2fd,
2851 [0x1c] = gen_helper_pf2iw,
2852 [0x1d] = gen_helper_pf2id,
2853 [0x8a] = gen_helper_pfnacc,
2854 [0x8e] = gen_helper_pfpnacc,
2855 [0x90] = gen_helper_pfcmpge,
2856 [0x94] = gen_helper_pfmin,
2857 [0x96] = gen_helper_pfrcp,
2858 [0x97] = gen_helper_pfrsqrt,
2859 [0x9a] = gen_helper_pfsub,
2860 [0x9e] = gen_helper_pfadd,
2861 [0xa0] = gen_helper_pfcmpgt,
2862 [0xa4] = gen_helper_pfmax,
2863 [0xa6] = gen_helper_movq, /* pfrcpit1; no need to actually increase precision */
2864 [0xa7] = gen_helper_movq, /* pfrsqit1 */
2865 [0xaa] = gen_helper_pfsubr,
2866 [0xae] = gen_helper_pfacc,
2867 [0xb0] = gen_helper_pfcmpeq,
2868 [0xb4] = gen_helper_pfmul,
2869 [0xb6] = gen_helper_movq, /* pfrcpit2 */
2870 [0xb7] = gen_helper_pmulhrw_mmx,
2871 [0xbb] = gen_helper_pswapd,
2872 [0xbf] = gen_helper_pavgb_mmx /* pavgusb */
2875 struct SSEOpHelper_epp {
2876 SSEFunc_0_epp op[2];
2877 uint32_t ext_mask;
2880 struct SSEOpHelper_eppi {
2881 SSEFunc_0_eppi op[2];
2882 uint32_t ext_mask;
2885 #define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
2886 #define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
2887 #define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
2888 #define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
2889 #define PCLMULQDQ_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, \
2890 CPUID_EXT_PCLMULQDQ }
2891 #define AESNI_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_AES }
2893 static const struct SSEOpHelper_epp sse_op_table6[256] = {
2894 [0x00] = SSSE3_OP(pshufb),
2895 [0x01] = SSSE3_OP(phaddw),
2896 [0x02] = SSSE3_OP(phaddd),
2897 [0x03] = SSSE3_OP(phaddsw),
2898 [0x04] = SSSE3_OP(pmaddubsw),
2899 [0x05] = SSSE3_OP(phsubw),
2900 [0x06] = SSSE3_OP(phsubd),
2901 [0x07] = SSSE3_OP(phsubsw),
2902 [0x08] = SSSE3_OP(psignb),
2903 [0x09] = SSSE3_OP(psignw),
2904 [0x0a] = SSSE3_OP(psignd),
2905 [0x0b] = SSSE3_OP(pmulhrsw),
2906 [0x10] = SSE41_OP(pblendvb),
2907 [0x14] = SSE41_OP(blendvps),
2908 [0x15] = SSE41_OP(blendvpd),
2909 [0x17] = SSE41_OP(ptest),
2910 [0x1c] = SSSE3_OP(pabsb),
2911 [0x1d] = SSSE3_OP(pabsw),
2912 [0x1e] = SSSE3_OP(pabsd),
2913 [0x20] = SSE41_OP(pmovsxbw),
2914 [0x21] = SSE41_OP(pmovsxbd),
2915 [0x22] = SSE41_OP(pmovsxbq),
2916 [0x23] = SSE41_OP(pmovsxwd),
2917 [0x24] = SSE41_OP(pmovsxwq),
2918 [0x25] = SSE41_OP(pmovsxdq),
2919 [0x28] = SSE41_OP(pmuldq),
2920 [0x29] = SSE41_OP(pcmpeqq),
2921 [0x2a] = SSE41_SPECIAL, /* movntqda */
2922 [0x2b] = SSE41_OP(packusdw),
2923 [0x30] = SSE41_OP(pmovzxbw),
2924 [0x31] = SSE41_OP(pmovzxbd),
2925 [0x32] = SSE41_OP(pmovzxbq),
2926 [0x33] = SSE41_OP(pmovzxwd),
2927 [0x34] = SSE41_OP(pmovzxwq),
2928 [0x35] = SSE41_OP(pmovzxdq),
2929 [0x37] = SSE42_OP(pcmpgtq),
2930 [0x38] = SSE41_OP(pminsb),
2931 [0x39] = SSE41_OP(pminsd),
2932 [0x3a] = SSE41_OP(pminuw),
2933 [0x3b] = SSE41_OP(pminud),
2934 [0x3c] = SSE41_OP(pmaxsb),
2935 [0x3d] = SSE41_OP(pmaxsd),
2936 [0x3e] = SSE41_OP(pmaxuw),
2937 [0x3f] = SSE41_OP(pmaxud),
2938 [0x40] = SSE41_OP(pmulld),
2939 [0x41] = SSE41_OP(phminposuw),
2940 [0xdb] = AESNI_OP(aesimc),
2941 [0xdc] = AESNI_OP(aesenc),
2942 [0xdd] = AESNI_OP(aesenclast),
2943 [0xde] = AESNI_OP(aesdec),
2944 [0xdf] = AESNI_OP(aesdeclast),
2947 static const struct SSEOpHelper_eppi sse_op_table7[256] = {
2948 [0x08] = SSE41_OP(roundps),
2949 [0x09] = SSE41_OP(roundpd),
2950 [0x0a] = SSE41_OP(roundss),
2951 [0x0b] = SSE41_OP(roundsd),
2952 [0x0c] = SSE41_OP(blendps),
2953 [0x0d] = SSE41_OP(blendpd),
2954 [0x0e] = SSE41_OP(pblendw),
2955 [0x0f] = SSSE3_OP(palignr),
2956 [0x14] = SSE41_SPECIAL, /* pextrb */
2957 [0x15] = SSE41_SPECIAL, /* pextrw */
2958 [0x16] = SSE41_SPECIAL, /* pextrd/pextrq */
2959 [0x17] = SSE41_SPECIAL, /* extractps */
2960 [0x20] = SSE41_SPECIAL, /* pinsrb */
2961 [0x21] = SSE41_SPECIAL, /* insertps */
2962 [0x22] = SSE41_SPECIAL, /* pinsrd/pinsrq */
2963 [0x40] = SSE41_OP(dpps),
2964 [0x41] = SSE41_OP(dppd),
2965 [0x42] = SSE41_OP(mpsadbw),
2966 [0x44] = PCLMULQDQ_OP(pclmulqdq),
2967 [0x60] = SSE42_OP(pcmpestrm),
2968 [0x61] = SSE42_OP(pcmpestri),
2969 [0x62] = SSE42_OP(pcmpistrm),
2970 [0x63] = SSE42_OP(pcmpistri),
2971 [0xdf] = AESNI_OP(aeskeygenassist),
2974 static void gen_sse(CPUX86State *env, DisasContext *s, int b,
2975 target_ulong pc_start, int rex_r)
2977 int b1, op1_offset, op2_offset, is_xmm, val;
2978 int modrm, mod, rm, reg;
2979 SSEFunc_0_epp sse_fn_epp;
2980 SSEFunc_0_eppi sse_fn_eppi;
2981 SSEFunc_0_ppi sse_fn_ppi;
2982 SSEFunc_0_eppt sse_fn_eppt;
2983 TCGMemOp ot;
2985 b &= 0xff;
2986 if (s->prefix & PREFIX_DATA)
2987 b1 = 1;
2988 else if (s->prefix & PREFIX_REPZ)
2989 b1 = 2;
2990 else if (s->prefix & PREFIX_REPNZ)
2991 b1 = 3;
2992 else
2993 b1 = 0;
2994 sse_fn_epp = sse_op_table1[b][b1];
2995 if (!sse_fn_epp) {
2996 goto illegal_op;
2998 if ((b <= 0x5f && b >= 0x10) || b == 0xc6 || b == 0xc2) {
2999 is_xmm = 1;
3000 } else {
3001 if (b1 == 0) {
3002 /* MMX case */
3003 is_xmm = 0;
3004 } else {
3005 is_xmm = 1;
3008 /* simple MMX/SSE operation */
3009 if (s->flags & HF_TS_MASK) {
3010 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
3011 return;
3013 if (s->flags & HF_EM_MASK) {
3014 illegal_op:
3015 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
3016 return;
3018 if (is_xmm && !(s->flags & HF_OSFXSR_MASK))
3019 if ((b != 0x38 && b != 0x3a) || (s->prefix & PREFIX_DATA))
3020 goto illegal_op;
3021 if (b == 0x0e) {
3022 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
3023 goto illegal_op;
3024 /* femms */
3025 gen_helper_emms(cpu_env);
3026 return;
3028 if (b == 0x77) {
3029 /* emms */
3030 gen_helper_emms(cpu_env);
3031 return;
3033 /* prepare MMX state (XXX: optimize by storing fptt and fptags in
3034 the static cpu state) */
3035 if (!is_xmm) {
3036 gen_helper_enter_mmx(cpu_env);
3039 modrm = cpu_ldub_code(env, s->pc++);
3040 reg = ((modrm >> 3) & 7);
3041 if (is_xmm)
3042 reg |= rex_r;
3043 mod = (modrm >> 6) & 3;
3044 if (sse_fn_epp == SSE_SPECIAL) {
3045 b |= (b1 << 8);
3046 switch(b) {
3047 case 0x0e7: /* movntq */
3048 if (mod == 3)
3049 goto illegal_op;
3050 gen_lea_modrm(env, s, modrm);
3051 gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
3052 break;
3053 case 0x1e7: /* movntdq */
3054 case 0x02b: /* movntps */
3055 case 0x12b: /* movntps */
3056 if (mod == 3)
3057 goto illegal_op;
3058 gen_lea_modrm(env, s, modrm);
3059 gen_sto_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3060 break;
3061 case 0x3f0: /* lddqu */
3062 if (mod == 3)
3063 goto illegal_op;
3064 gen_lea_modrm(env, s, modrm);
3065 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3066 break;
3067 case 0x22b: /* movntss */
3068 case 0x32b: /* movntsd */
3069 if (mod == 3)
3070 goto illegal_op;
3071 gen_lea_modrm(env, s, modrm);
3072 if (b1 & 1) {
3073 gen_stq_env_A0(s, offsetof(CPUX86State,
3074 xmm_regs[reg].ZMM_Q(0)));
3075 } else {
3076 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3077 xmm_regs[reg].ZMM_L(0)));
3078 gen_op_st_v(s, MO_32, cpu_T[0], cpu_A0);
3080 break;
3081 case 0x6e: /* movd mm, ea */
3082 #ifdef TARGET_X86_64
3083 if (s->dflag == MO_64) {
3084 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0);
3085 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,fpregs[reg].mmx));
3086 } else
3087 #endif
3089 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0);
3090 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3091 offsetof(CPUX86State,fpregs[reg].mmx));
3092 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3093 gen_helper_movl_mm_T0_mmx(cpu_ptr0, cpu_tmp2_i32);
3095 break;
3096 case 0x16e: /* movd xmm, ea */
3097 #ifdef TARGET_X86_64
3098 if (s->dflag == MO_64) {
3099 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0);
3100 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3101 offsetof(CPUX86State,xmm_regs[reg]));
3102 gen_helper_movq_mm_T0_xmm(cpu_ptr0, cpu_T[0]);
3103 } else
3104 #endif
3106 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0);
3107 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3108 offsetof(CPUX86State,xmm_regs[reg]));
3109 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3110 gen_helper_movl_mm_T0_xmm(cpu_ptr0, cpu_tmp2_i32);
3112 break;
3113 case 0x6f: /* movq mm, ea */
3114 if (mod != 3) {
3115 gen_lea_modrm(env, s, modrm);
3116 gen_ldq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
3117 } else {
3118 rm = (modrm & 7);
3119 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
3120 offsetof(CPUX86State,fpregs[rm].mmx));
3121 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
3122 offsetof(CPUX86State,fpregs[reg].mmx));
3124 break;
3125 case 0x010: /* movups */
3126 case 0x110: /* movupd */
3127 case 0x028: /* movaps */
3128 case 0x128: /* movapd */
3129 case 0x16f: /* movdqa xmm, ea */
3130 case 0x26f: /* movdqu xmm, ea */
3131 if (mod != 3) {
3132 gen_lea_modrm(env, s, modrm);
3133 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3134 } else {
3135 rm = (modrm & 7) | REX_B(s);
3136 gen_op_movo(offsetof(CPUX86State,xmm_regs[reg]),
3137 offsetof(CPUX86State,xmm_regs[rm]));
3139 break;
3140 case 0x210: /* movss xmm, ea */
3141 if (mod != 3) {
3142 gen_lea_modrm(env, s, modrm);
3143 gen_op_ld_v(s, MO_32, cpu_T[0], cpu_A0);
3144 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
3145 tcg_gen_movi_tl(cpu_T[0], 0);
3146 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1)));
3147 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)));
3148 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)));
3149 } else {
3150 rm = (modrm & 7) | REX_B(s);
3151 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)),
3152 offsetof(CPUX86State,xmm_regs[rm].ZMM_L(0)));
3154 break;
3155 case 0x310: /* movsd xmm, ea */
3156 if (mod != 3) {
3157 gen_lea_modrm(env, s, modrm);
3158 gen_ldq_env_A0(s, offsetof(CPUX86State,
3159 xmm_regs[reg].ZMM_Q(0)));
3160 tcg_gen_movi_tl(cpu_T[0], 0);
3161 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)));
3162 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)));
3163 } else {
3164 rm = (modrm & 7) | REX_B(s);
3165 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
3166 offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
3168 break;
3169 case 0x012: /* movlps */
3170 case 0x112: /* movlpd */
3171 if (mod != 3) {
3172 gen_lea_modrm(env, s, modrm);
3173 gen_ldq_env_A0(s, offsetof(CPUX86State,
3174 xmm_regs[reg].ZMM_Q(0)));
3175 } else {
3176 /* movhlps */
3177 rm = (modrm & 7) | REX_B(s);
3178 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
3179 offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(1)));
3181 break;
3182 case 0x212: /* movsldup */
3183 if (mod != 3) {
3184 gen_lea_modrm(env, s, modrm);
3185 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3186 } else {
3187 rm = (modrm & 7) | REX_B(s);
3188 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)),
3189 offsetof(CPUX86State,xmm_regs[rm].ZMM_L(0)));
3190 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)),
3191 offsetof(CPUX86State,xmm_regs[rm].ZMM_L(2)));
3193 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1)),
3194 offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
3195 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)),
3196 offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)));
3197 break;
3198 case 0x312: /* movddup */
3199 if (mod != 3) {
3200 gen_lea_modrm(env, s, modrm);
3201 gen_ldq_env_A0(s, offsetof(CPUX86State,
3202 xmm_regs[reg].ZMM_Q(0)));
3203 } else {
3204 rm = (modrm & 7) | REX_B(s);
3205 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
3206 offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
3208 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(1)),
3209 offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)));
3210 break;
3211 case 0x016: /* movhps */
3212 case 0x116: /* movhpd */
3213 if (mod != 3) {
3214 gen_lea_modrm(env, s, modrm);
3215 gen_ldq_env_A0(s, offsetof(CPUX86State,
3216 xmm_regs[reg].ZMM_Q(1)));
3217 } else {
3218 /* movlhps */
3219 rm = (modrm & 7) | REX_B(s);
3220 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(1)),
3221 offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
3223 break;
3224 case 0x216: /* movshdup */
3225 if (mod != 3) {
3226 gen_lea_modrm(env, s, modrm);
3227 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3228 } else {
3229 rm = (modrm & 7) | REX_B(s);
3230 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1)),
3231 offsetof(CPUX86State,xmm_regs[rm].ZMM_L(1)));
3232 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)),
3233 offsetof(CPUX86State,xmm_regs[rm].ZMM_L(3)));
3235 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)),
3236 offsetof(CPUX86State,xmm_regs[reg].ZMM_L(1)));
3237 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].ZMM_L(2)),
3238 offsetof(CPUX86State,xmm_regs[reg].ZMM_L(3)));
3239 break;
3240 case 0x178:
3241 case 0x378:
3243 int bit_index, field_length;
3245 if (b1 == 1 && reg != 0)
3246 goto illegal_op;
3247 field_length = cpu_ldub_code(env, s->pc++) & 0x3F;
3248 bit_index = cpu_ldub_code(env, s->pc++) & 0x3F;
3249 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3250 offsetof(CPUX86State,xmm_regs[reg]));
3251 if (b1 == 1)
3252 gen_helper_extrq_i(cpu_env, cpu_ptr0,
3253 tcg_const_i32(bit_index),
3254 tcg_const_i32(field_length));
3255 else
3256 gen_helper_insertq_i(cpu_env, cpu_ptr0,
3257 tcg_const_i32(bit_index),
3258 tcg_const_i32(field_length));
3260 break;
3261 case 0x7e: /* movd ea, mm */
3262 #ifdef TARGET_X86_64
3263 if (s->dflag == MO_64) {
3264 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3265 offsetof(CPUX86State,fpregs[reg].mmx));
3266 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1);
3267 } else
3268 #endif
3270 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3271 offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0)));
3272 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1);
3274 break;
3275 case 0x17e: /* movd ea, xmm */
3276 #ifdef TARGET_X86_64
3277 if (s->dflag == MO_64) {
3278 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3279 offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)));
3280 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1);
3281 } else
3282 #endif
3284 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3285 offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
3286 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1);
3288 break;
3289 case 0x27e: /* movq xmm, ea */
3290 if (mod != 3) {
3291 gen_lea_modrm(env, s, modrm);
3292 gen_ldq_env_A0(s, offsetof(CPUX86State,
3293 xmm_regs[reg].ZMM_Q(0)));
3294 } else {
3295 rm = (modrm & 7) | REX_B(s);
3296 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
3297 offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
3299 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(1)));
3300 break;
3301 case 0x7f: /* movq ea, mm */
3302 if (mod != 3) {
3303 gen_lea_modrm(env, s, modrm);
3304 gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
3305 } else {
3306 rm = (modrm & 7);
3307 gen_op_movq(offsetof(CPUX86State,fpregs[rm].mmx),
3308 offsetof(CPUX86State,fpregs[reg].mmx));
3310 break;
3311 case 0x011: /* movups */
3312 case 0x111: /* movupd */
3313 case 0x029: /* movaps */
3314 case 0x129: /* movapd */
3315 case 0x17f: /* movdqa ea, xmm */
3316 case 0x27f: /* movdqu ea, xmm */
3317 if (mod != 3) {
3318 gen_lea_modrm(env, s, modrm);
3319 gen_sto_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3320 } else {
3321 rm = (modrm & 7) | REX_B(s);
3322 gen_op_movo(offsetof(CPUX86State,xmm_regs[rm]),
3323 offsetof(CPUX86State,xmm_regs[reg]));
3325 break;
3326 case 0x211: /* movss ea, xmm */
3327 if (mod != 3) {
3328 gen_lea_modrm(env, s, modrm);
3329 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
3330 gen_op_st_v(s, MO_32, cpu_T[0], cpu_A0);
3331 } else {
3332 rm = (modrm & 7) | REX_B(s);
3333 gen_op_movl(offsetof(CPUX86State,xmm_regs[rm].ZMM_L(0)),
3334 offsetof(CPUX86State,xmm_regs[reg].ZMM_L(0)));
3336 break;
3337 case 0x311: /* movsd ea, xmm */
3338 if (mod != 3) {
3339 gen_lea_modrm(env, s, modrm);
3340 gen_stq_env_A0(s, offsetof(CPUX86State,
3341 xmm_regs[reg].ZMM_Q(0)));
3342 } else {
3343 rm = (modrm & 7) | REX_B(s);
3344 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)),
3345 offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)));
3347 break;
3348 case 0x013: /* movlps */
3349 case 0x113: /* movlpd */
3350 if (mod != 3) {
3351 gen_lea_modrm(env, s, modrm);
3352 gen_stq_env_A0(s, offsetof(CPUX86State,
3353 xmm_regs[reg].ZMM_Q(0)));
3354 } else {
3355 goto illegal_op;
3357 break;
3358 case 0x017: /* movhps */
3359 case 0x117: /* movhpd */
3360 if (mod != 3) {
3361 gen_lea_modrm(env, s, modrm);
3362 gen_stq_env_A0(s, offsetof(CPUX86State,
3363 xmm_regs[reg].ZMM_Q(1)));
3364 } else {
3365 goto illegal_op;
3367 break;
3368 case 0x71: /* shift mm, im */
3369 case 0x72:
3370 case 0x73:
3371 case 0x171: /* shift xmm, im */
3372 case 0x172:
3373 case 0x173:
3374 if (b1 >= 2) {
3375 goto illegal_op;
3377 val = cpu_ldub_code(env, s->pc++);
3378 if (is_xmm) {
3379 tcg_gen_movi_tl(cpu_T[0], val);
3380 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(0)));
3381 tcg_gen_movi_tl(cpu_T[0], 0);
3382 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(1)));
3383 op1_offset = offsetof(CPUX86State,xmm_t0);
3384 } else {
3385 tcg_gen_movi_tl(cpu_T[0], val);
3386 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(0)));
3387 tcg_gen_movi_tl(cpu_T[0], 0);
3388 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(1)));
3389 op1_offset = offsetof(CPUX86State,mmx_t0);
3391 sse_fn_epp = sse_op_table2[((b - 1) & 3) * 8 +
3392 (((modrm >> 3)) & 7)][b1];
3393 if (!sse_fn_epp) {
3394 goto illegal_op;
3396 if (is_xmm) {
3397 rm = (modrm & 7) | REX_B(s);
3398 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3399 } else {
3400 rm = (modrm & 7);
3401 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3403 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3404 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op1_offset);
3405 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
3406 break;
3407 case 0x050: /* movmskps */
3408 rm = (modrm & 7) | REX_B(s);
3409 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3410 offsetof(CPUX86State,xmm_regs[rm]));
3411 gen_helper_movmskps(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3412 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
3413 break;
3414 case 0x150: /* movmskpd */
3415 rm = (modrm & 7) | REX_B(s);
3416 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3417 offsetof(CPUX86State,xmm_regs[rm]));
3418 gen_helper_movmskpd(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3419 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
3420 break;
3421 case 0x02a: /* cvtpi2ps */
3422 case 0x12a: /* cvtpi2pd */
3423 gen_helper_enter_mmx(cpu_env);
3424 if (mod != 3) {
3425 gen_lea_modrm(env, s, modrm);
3426 op2_offset = offsetof(CPUX86State,mmx_t0);
3427 gen_ldq_env_A0(s, op2_offset);
3428 } else {
3429 rm = (modrm & 7);
3430 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3432 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3433 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3434 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3435 switch(b >> 8) {
3436 case 0x0:
3437 gen_helper_cvtpi2ps(cpu_env, cpu_ptr0, cpu_ptr1);
3438 break;
3439 default:
3440 case 0x1:
3441 gen_helper_cvtpi2pd(cpu_env, cpu_ptr0, cpu_ptr1);
3442 break;
3444 break;
3445 case 0x22a: /* cvtsi2ss */
3446 case 0x32a: /* cvtsi2sd */
3447 ot = mo_64_32(s->dflag);
3448 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3449 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3450 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3451 if (ot == MO_32) {
3452 SSEFunc_0_epi sse_fn_epi = sse_op_table3ai[(b >> 8) & 1];
3453 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3454 sse_fn_epi(cpu_env, cpu_ptr0, cpu_tmp2_i32);
3455 } else {
3456 #ifdef TARGET_X86_64
3457 SSEFunc_0_epl sse_fn_epl = sse_op_table3aq[(b >> 8) & 1];
3458 sse_fn_epl(cpu_env, cpu_ptr0, cpu_T[0]);
3459 #else
3460 goto illegal_op;
3461 #endif
3463 break;
3464 case 0x02c: /* cvttps2pi */
3465 case 0x12c: /* cvttpd2pi */
3466 case 0x02d: /* cvtps2pi */
3467 case 0x12d: /* cvtpd2pi */
3468 gen_helper_enter_mmx(cpu_env);
3469 if (mod != 3) {
3470 gen_lea_modrm(env, s, modrm);
3471 op2_offset = offsetof(CPUX86State,xmm_t0);
3472 gen_ldo_env_A0(s, op2_offset);
3473 } else {
3474 rm = (modrm & 7) | REX_B(s);
3475 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3477 op1_offset = offsetof(CPUX86State,fpregs[reg & 7].mmx);
3478 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3479 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3480 switch(b) {
3481 case 0x02c:
3482 gen_helper_cvttps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3483 break;
3484 case 0x12c:
3485 gen_helper_cvttpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3486 break;
3487 case 0x02d:
3488 gen_helper_cvtps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3489 break;
3490 case 0x12d:
3491 gen_helper_cvtpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3492 break;
3494 break;
3495 case 0x22c: /* cvttss2si */
3496 case 0x32c: /* cvttsd2si */
3497 case 0x22d: /* cvtss2si */
3498 case 0x32d: /* cvtsd2si */
3499 ot = mo_64_32(s->dflag);
3500 if (mod != 3) {
3501 gen_lea_modrm(env, s, modrm);
3502 if ((b >> 8) & 1) {
3503 gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.ZMM_Q(0)));
3504 } else {
3505 gen_op_ld_v(s, MO_32, cpu_T[0], cpu_A0);
3506 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.ZMM_L(0)));
3508 op2_offset = offsetof(CPUX86State,xmm_t0);
3509 } else {
3510 rm = (modrm & 7) | REX_B(s);
3511 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3513 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3514 if (ot == MO_32) {
3515 SSEFunc_i_ep sse_fn_i_ep =
3516 sse_op_table3bi[((b >> 7) & 2) | (b & 1)];
3517 sse_fn_i_ep(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3518 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3519 } else {
3520 #ifdef TARGET_X86_64
3521 SSEFunc_l_ep sse_fn_l_ep =
3522 sse_op_table3bq[((b >> 7) & 2) | (b & 1)];
3523 sse_fn_l_ep(cpu_T[0], cpu_env, cpu_ptr0);
3524 #else
3525 goto illegal_op;
3526 #endif
3528 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
3529 break;
3530 case 0xc4: /* pinsrw */
3531 case 0x1c4:
3532 s->rip_offset = 1;
3533 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
3534 val = cpu_ldub_code(env, s->pc++);
3535 if (b1) {
3536 val &= 7;
3537 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3538 offsetof(CPUX86State,xmm_regs[reg].ZMM_W(val)));
3539 } else {
3540 val &= 3;
3541 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3542 offsetof(CPUX86State,fpregs[reg].mmx.MMX_W(val)));
3544 break;
3545 case 0xc5: /* pextrw */
3546 case 0x1c5:
3547 if (mod != 3)
3548 goto illegal_op;
3549 ot = mo_64_32(s->dflag);
3550 val = cpu_ldub_code(env, s->pc++);
3551 if (b1) {
3552 val &= 7;
3553 rm = (modrm & 7) | REX_B(s);
3554 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3555 offsetof(CPUX86State,xmm_regs[rm].ZMM_W(val)));
3556 } else {
3557 val &= 3;
3558 rm = (modrm & 7);
3559 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3560 offsetof(CPUX86State,fpregs[rm].mmx.MMX_W(val)));
3562 reg = ((modrm >> 3) & 7) | rex_r;
3563 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
3564 break;
3565 case 0x1d6: /* movq ea, xmm */
3566 if (mod != 3) {
3567 gen_lea_modrm(env, s, modrm);
3568 gen_stq_env_A0(s, offsetof(CPUX86State,
3569 xmm_regs[reg].ZMM_Q(0)));
3570 } else {
3571 rm = (modrm & 7) | REX_B(s);
3572 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)),
3573 offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)));
3574 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(1)));
3576 break;
3577 case 0x2d6: /* movq2dq */
3578 gen_helper_enter_mmx(cpu_env);
3579 rm = (modrm & 7);
3580 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(0)),
3581 offsetof(CPUX86State,fpregs[rm].mmx));
3582 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].ZMM_Q(1)));
3583 break;
3584 case 0x3d6: /* movdq2q */
3585 gen_helper_enter_mmx(cpu_env);
3586 rm = (modrm & 7) | REX_B(s);
3587 gen_op_movq(offsetof(CPUX86State,fpregs[reg & 7].mmx),
3588 offsetof(CPUX86State,xmm_regs[rm].ZMM_Q(0)));
3589 break;
3590 case 0xd7: /* pmovmskb */
3591 case 0x1d7:
3592 if (mod != 3)
3593 goto illegal_op;
3594 if (b1) {
3595 rm = (modrm & 7) | REX_B(s);
3596 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[rm]));
3597 gen_helper_pmovmskb_xmm(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3598 } else {
3599 rm = (modrm & 7);
3600 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,fpregs[rm].mmx));
3601 gen_helper_pmovmskb_mmx(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3603 reg = ((modrm >> 3) & 7) | rex_r;
3604 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
3605 break;
3607 case 0x138:
3608 case 0x038:
3609 b = modrm;
3610 if ((b & 0xf0) == 0xf0) {
3611 goto do_0f_38_fx;
3613 modrm = cpu_ldub_code(env, s->pc++);
3614 rm = modrm & 7;
3615 reg = ((modrm >> 3) & 7) | rex_r;
3616 mod = (modrm >> 6) & 3;
3617 if (b1 >= 2) {
3618 goto illegal_op;
3621 sse_fn_epp = sse_op_table6[b].op[b1];
3622 if (!sse_fn_epp) {
3623 goto illegal_op;
3625 if (!(s->cpuid_ext_features & sse_op_table6[b].ext_mask))
3626 goto illegal_op;
3628 if (b1) {
3629 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3630 if (mod == 3) {
3631 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
3632 } else {
3633 op2_offset = offsetof(CPUX86State,xmm_t0);
3634 gen_lea_modrm(env, s, modrm);
3635 switch (b) {
3636 case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
3637 case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
3638 case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
3639 gen_ldq_env_A0(s, op2_offset +
3640 offsetof(ZMMReg, ZMM_Q(0)));
3641 break;
3642 case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
3643 case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
3644 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
3645 s->mem_index, MO_LEUL);
3646 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, op2_offset +
3647 offsetof(ZMMReg, ZMM_L(0)));
3648 break;
3649 case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
3650 tcg_gen_qemu_ld_tl(cpu_tmp0, cpu_A0,
3651 s->mem_index, MO_LEUW);
3652 tcg_gen_st16_tl(cpu_tmp0, cpu_env, op2_offset +
3653 offsetof(ZMMReg, ZMM_W(0)));
3654 break;
3655 case 0x2a: /* movntqda */
3656 gen_ldo_env_A0(s, op1_offset);
3657 return;
3658 default:
3659 gen_ldo_env_A0(s, op2_offset);
3662 } else {
3663 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
3664 if (mod == 3) {
3665 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3666 } else {
3667 op2_offset = offsetof(CPUX86State,mmx_t0);
3668 gen_lea_modrm(env, s, modrm);
3669 gen_ldq_env_A0(s, op2_offset);
3672 if (sse_fn_epp == SSE_SPECIAL) {
3673 goto illegal_op;
3676 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3677 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3678 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
3680 if (b == 0x17) {
3681 set_cc_op(s, CC_OP_EFLAGS);
3683 break;
3685 case 0x238:
3686 case 0x338:
3687 do_0f_38_fx:
3688 /* Various integer extensions at 0f 38 f[0-f]. */
3689 b = modrm | (b1 << 8);
3690 modrm = cpu_ldub_code(env, s->pc++);
3691 reg = ((modrm >> 3) & 7) | rex_r;
3693 switch (b) {
3694 case 0x3f0: /* crc32 Gd,Eb */
3695 case 0x3f1: /* crc32 Gd,Ey */
3696 do_crc32:
3697 if (!(s->cpuid_ext_features & CPUID_EXT_SSE42)) {
3698 goto illegal_op;
3700 if ((b & 0xff) == 0xf0) {
3701 ot = MO_8;
3702 } else if (s->dflag != MO_64) {
3703 ot = (s->prefix & PREFIX_DATA ? MO_16 : MO_32);
3704 } else {
3705 ot = MO_64;
3708 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[reg]);
3709 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3710 gen_helper_crc32(cpu_T[0], cpu_tmp2_i32,
3711 cpu_T[0], tcg_const_i32(8 << ot));
3713 ot = mo_64_32(s->dflag);
3714 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
3715 break;
3717 case 0x1f0: /* crc32 or movbe */
3718 case 0x1f1:
3719 /* For these insns, the f3 prefix is supposed to have priority
3720 over the 66 prefix, but that's not what we implement above
3721 setting b1. */
3722 if (s->prefix & PREFIX_REPNZ) {
3723 goto do_crc32;
3725 /* FALLTHRU */
3726 case 0x0f0: /* movbe Gy,My */
3727 case 0x0f1: /* movbe My,Gy */
3728 if (!(s->cpuid_ext_features & CPUID_EXT_MOVBE)) {
3729 goto illegal_op;
3731 if (s->dflag != MO_64) {
3732 ot = (s->prefix & PREFIX_DATA ? MO_16 : MO_32);
3733 } else {
3734 ot = MO_64;
3737 gen_lea_modrm(env, s, modrm);
3738 if ((b & 1) == 0) {
3739 tcg_gen_qemu_ld_tl(cpu_T[0], cpu_A0,
3740 s->mem_index, ot | MO_BE);
3741 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
3742 } else {
3743 tcg_gen_qemu_st_tl(cpu_regs[reg], cpu_A0,
3744 s->mem_index, ot | MO_BE);
3746 break;
3748 case 0x0f2: /* andn Gy, By, Ey */
3749 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
3750 || !(s->prefix & PREFIX_VEX)
3751 || s->vex_l != 0) {
3752 goto illegal_op;
3754 ot = mo_64_32(s->dflag);
3755 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3756 tcg_gen_andc_tl(cpu_T[0], cpu_regs[s->vex_v], cpu_T[0]);
3757 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
3758 gen_op_update1_cc();
3759 set_cc_op(s, CC_OP_LOGICB + ot);
3760 break;
3762 case 0x0f7: /* bextr Gy, Ey, By */
3763 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
3764 || !(s->prefix & PREFIX_VEX)
3765 || s->vex_l != 0) {
3766 goto illegal_op;
3768 ot = mo_64_32(s->dflag);
3770 TCGv bound, zero;
3772 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3773 /* Extract START, and shift the operand.
3774 Shifts larger than operand size get zeros. */
3775 tcg_gen_ext8u_tl(cpu_A0, cpu_regs[s->vex_v]);
3776 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_A0);
3778 bound = tcg_const_tl(ot == MO_64 ? 63 : 31);
3779 zero = tcg_const_tl(0);
3780 tcg_gen_movcond_tl(TCG_COND_LEU, cpu_T[0], cpu_A0, bound,
3781 cpu_T[0], zero);
3782 tcg_temp_free(zero);
3784 /* Extract the LEN into a mask. Lengths larger than
3785 operand size get all ones. */
3786 tcg_gen_shri_tl(cpu_A0, cpu_regs[s->vex_v], 8);
3787 tcg_gen_ext8u_tl(cpu_A0, cpu_A0);
3788 tcg_gen_movcond_tl(TCG_COND_LEU, cpu_A0, cpu_A0, bound,
3789 cpu_A0, bound);
3790 tcg_temp_free(bound);
3791 tcg_gen_movi_tl(cpu_T[1], 1);
3792 tcg_gen_shl_tl(cpu_T[1], cpu_T[1], cpu_A0);
3793 tcg_gen_subi_tl(cpu_T[1], cpu_T[1], 1);
3794 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
3796 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
3797 gen_op_update1_cc();
3798 set_cc_op(s, CC_OP_LOGICB + ot);
3800 break;
3802 case 0x0f5: /* bzhi Gy, Ey, By */
3803 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3804 || !(s->prefix & PREFIX_VEX)
3805 || s->vex_l != 0) {
3806 goto illegal_op;
3808 ot = mo_64_32(s->dflag);
3809 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3810 tcg_gen_ext8u_tl(cpu_T[1], cpu_regs[s->vex_v]);
3812 TCGv bound = tcg_const_tl(ot == MO_64 ? 63 : 31);
3813 /* Note that since we're using BMILG (in order to get O
3814 cleared) we need to store the inverse into C. */
3815 tcg_gen_setcond_tl(TCG_COND_LT, cpu_cc_src,
3816 cpu_T[1], bound);
3817 tcg_gen_movcond_tl(TCG_COND_GT, cpu_T[1], cpu_T[1],
3818 bound, bound, cpu_T[1]);
3819 tcg_temp_free(bound);
3821 tcg_gen_movi_tl(cpu_A0, -1);
3822 tcg_gen_shl_tl(cpu_A0, cpu_A0, cpu_T[1]);
3823 tcg_gen_andc_tl(cpu_T[0], cpu_T[0], cpu_A0);
3824 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
3825 gen_op_update1_cc();
3826 set_cc_op(s, CC_OP_BMILGB + ot);
3827 break;
3829 case 0x3f6: /* mulx By, Gy, rdx, Ey */
3830 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3831 || !(s->prefix & PREFIX_VEX)
3832 || s->vex_l != 0) {
3833 goto illegal_op;
3835 ot = mo_64_32(s->dflag);
3836 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3837 switch (ot) {
3838 default:
3839 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3840 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EDX]);
3841 tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
3842 cpu_tmp2_i32, cpu_tmp3_i32);
3843 tcg_gen_extu_i32_tl(cpu_regs[s->vex_v], cpu_tmp2_i32);
3844 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp3_i32);
3845 break;
3846 #ifdef TARGET_X86_64
3847 case MO_64:
3848 tcg_gen_mulu2_i64(cpu_T[0], cpu_T[1],
3849 cpu_T[0], cpu_regs[R_EDX]);
3850 tcg_gen_mov_i64(cpu_regs[s->vex_v], cpu_T[0]);
3851 tcg_gen_mov_i64(cpu_regs[reg], cpu_T[1]);
3852 break;
3853 #endif
3855 break;
3857 case 0x3f5: /* pdep Gy, By, Ey */
3858 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3859 || !(s->prefix & PREFIX_VEX)
3860 || s->vex_l != 0) {
3861 goto illegal_op;
3863 ot = mo_64_32(s->dflag);
3864 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3865 /* Note that by zero-extending the mask operand, we
3866 automatically handle zero-extending the result. */
3867 if (ot == MO_64) {
3868 tcg_gen_mov_tl(cpu_T[1], cpu_regs[s->vex_v]);
3869 } else {
3870 tcg_gen_ext32u_tl(cpu_T[1], cpu_regs[s->vex_v]);
3872 gen_helper_pdep(cpu_regs[reg], cpu_T[0], cpu_T[1]);
3873 break;
3875 case 0x2f5: /* pext Gy, By, Ey */
3876 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3877 || !(s->prefix & PREFIX_VEX)
3878 || s->vex_l != 0) {
3879 goto illegal_op;
3881 ot = mo_64_32(s->dflag);
3882 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3883 /* Note that by zero-extending the mask operand, we
3884 automatically handle zero-extending the result. */
3885 if (ot == MO_64) {
3886 tcg_gen_mov_tl(cpu_T[1], cpu_regs[s->vex_v]);
3887 } else {
3888 tcg_gen_ext32u_tl(cpu_T[1], cpu_regs[s->vex_v]);
3890 gen_helper_pext(cpu_regs[reg], cpu_T[0], cpu_T[1]);
3891 break;
3893 case 0x1f6: /* adcx Gy, Ey */
3894 case 0x2f6: /* adox Gy, Ey */
3895 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_ADX)) {
3896 goto illegal_op;
3897 } else {
3898 TCGv carry_in, carry_out, zero;
3899 int end_op;
3901 ot = mo_64_32(s->dflag);
3902 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3904 /* Re-use the carry-out from a previous round. */
3905 TCGV_UNUSED(carry_in);
3906 carry_out = (b == 0x1f6 ? cpu_cc_dst : cpu_cc_src2);
3907 switch (s->cc_op) {
3908 case CC_OP_ADCX:
3909 if (b == 0x1f6) {
3910 carry_in = cpu_cc_dst;
3911 end_op = CC_OP_ADCX;
3912 } else {
3913 end_op = CC_OP_ADCOX;
3915 break;
3916 case CC_OP_ADOX:
3917 if (b == 0x1f6) {
3918 end_op = CC_OP_ADCOX;
3919 } else {
3920 carry_in = cpu_cc_src2;
3921 end_op = CC_OP_ADOX;
3923 break;
3924 case CC_OP_ADCOX:
3925 end_op = CC_OP_ADCOX;
3926 carry_in = carry_out;
3927 break;
3928 default:
3929 end_op = (b == 0x1f6 ? CC_OP_ADCX : CC_OP_ADOX);
3930 break;
3932 /* If we can't reuse carry-out, get it out of EFLAGS. */
3933 if (TCGV_IS_UNUSED(carry_in)) {
3934 if (s->cc_op != CC_OP_ADCX && s->cc_op != CC_OP_ADOX) {
3935 gen_compute_eflags(s);
3937 carry_in = cpu_tmp0;
3938 tcg_gen_shri_tl(carry_in, cpu_cc_src,
3939 ctz32(b == 0x1f6 ? CC_C : CC_O));
3940 tcg_gen_andi_tl(carry_in, carry_in, 1);
3943 switch (ot) {
3944 #ifdef TARGET_X86_64
3945 case MO_32:
3946 /* If we know TL is 64-bit, and we want a 32-bit
3947 result, just do everything in 64-bit arithmetic. */
3948 tcg_gen_ext32u_i64(cpu_regs[reg], cpu_regs[reg]);
3949 tcg_gen_ext32u_i64(cpu_T[0], cpu_T[0]);
3950 tcg_gen_add_i64(cpu_T[0], cpu_T[0], cpu_regs[reg]);
3951 tcg_gen_add_i64(cpu_T[0], cpu_T[0], carry_in);
3952 tcg_gen_ext32u_i64(cpu_regs[reg], cpu_T[0]);
3953 tcg_gen_shri_i64(carry_out, cpu_T[0], 32);
3954 break;
3955 #endif
3956 default:
3957 /* Otherwise compute the carry-out in two steps. */
3958 zero = tcg_const_tl(0);
3959 tcg_gen_add2_tl(cpu_T[0], carry_out,
3960 cpu_T[0], zero,
3961 carry_in, zero);
3962 tcg_gen_add2_tl(cpu_regs[reg], carry_out,
3963 cpu_regs[reg], carry_out,
3964 cpu_T[0], zero);
3965 tcg_temp_free(zero);
3966 break;
3968 set_cc_op(s, end_op);
3970 break;
3972 case 0x1f7: /* shlx Gy, Ey, By */
3973 case 0x2f7: /* sarx Gy, Ey, By */
3974 case 0x3f7: /* shrx Gy, Ey, By */
3975 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3976 || !(s->prefix & PREFIX_VEX)
3977 || s->vex_l != 0) {
3978 goto illegal_op;
3980 ot = mo_64_32(s->dflag);
3981 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3982 if (ot == MO_64) {
3983 tcg_gen_andi_tl(cpu_T[1], cpu_regs[s->vex_v], 63);
3984 } else {
3985 tcg_gen_andi_tl(cpu_T[1], cpu_regs[s->vex_v], 31);
3987 if (b == 0x1f7) {
3988 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
3989 } else if (b == 0x2f7) {
3990 if (ot != MO_64) {
3991 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
3993 tcg_gen_sar_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
3994 } else {
3995 if (ot != MO_64) {
3996 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
3998 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4000 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
4001 break;
4003 case 0x0f3:
4004 case 0x1f3:
4005 case 0x2f3:
4006 case 0x3f3: /* Group 17 */
4007 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
4008 || !(s->prefix & PREFIX_VEX)
4009 || s->vex_l != 0) {
4010 goto illegal_op;
4012 ot = mo_64_32(s->dflag);
4013 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4015 switch (reg & 7) {
4016 case 1: /* blsr By,Ey */
4017 tcg_gen_neg_tl(cpu_T[1], cpu_T[0]);
4018 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4019 gen_op_mov_reg_v(ot, s->vex_v, cpu_T[0]);
4020 gen_op_update2_cc();
4021 set_cc_op(s, CC_OP_BMILGB + ot);
4022 break;
4024 case 2: /* blsmsk By,Ey */
4025 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4026 tcg_gen_subi_tl(cpu_T[0], cpu_T[0], 1);
4027 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_cc_src);
4028 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4029 set_cc_op(s, CC_OP_BMILGB + ot);
4030 break;
4032 case 3: /* blsi By, Ey */
4033 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4034 tcg_gen_subi_tl(cpu_T[0], cpu_T[0], 1);
4035 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_cc_src);
4036 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4037 set_cc_op(s, CC_OP_BMILGB + ot);
4038 break;
4040 default:
4041 goto illegal_op;
4043 break;
4045 default:
4046 goto illegal_op;
4048 break;
4050 case 0x03a:
4051 case 0x13a:
4052 b = modrm;
4053 modrm = cpu_ldub_code(env, s->pc++);
4054 rm = modrm & 7;
4055 reg = ((modrm >> 3) & 7) | rex_r;
4056 mod = (modrm >> 6) & 3;
4057 if (b1 >= 2) {
4058 goto illegal_op;
4061 sse_fn_eppi = sse_op_table7[b].op[b1];
4062 if (!sse_fn_eppi) {
4063 goto illegal_op;
4065 if (!(s->cpuid_ext_features & sse_op_table7[b].ext_mask))
4066 goto illegal_op;
4068 if (sse_fn_eppi == SSE_SPECIAL) {
4069 ot = mo_64_32(s->dflag);
4070 rm = (modrm & 7) | REX_B(s);
4071 if (mod != 3)
4072 gen_lea_modrm(env, s, modrm);
4073 reg = ((modrm >> 3) & 7) | rex_r;
4074 val = cpu_ldub_code(env, s->pc++);
4075 switch (b) {
4076 case 0x14: /* pextrb */
4077 tcg_gen_ld8u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4078 xmm_regs[reg].ZMM_B(val & 15)));
4079 if (mod == 3) {
4080 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
4081 } else {
4082 tcg_gen_qemu_st_tl(cpu_T[0], cpu_A0,
4083 s->mem_index, MO_UB);
4085 break;
4086 case 0x15: /* pextrw */
4087 tcg_gen_ld16u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4088 xmm_regs[reg].ZMM_W(val & 7)));
4089 if (mod == 3) {
4090 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
4091 } else {
4092 tcg_gen_qemu_st_tl(cpu_T[0], cpu_A0,
4093 s->mem_index, MO_LEUW);
4095 break;
4096 case 0x16:
4097 if (ot == MO_32) { /* pextrd */
4098 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
4099 offsetof(CPUX86State,
4100 xmm_regs[reg].ZMM_L(val & 3)));
4101 if (mod == 3) {
4102 tcg_gen_extu_i32_tl(cpu_regs[rm], cpu_tmp2_i32);
4103 } else {
4104 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
4105 s->mem_index, MO_LEUL);
4107 } else { /* pextrq */
4108 #ifdef TARGET_X86_64
4109 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
4110 offsetof(CPUX86State,
4111 xmm_regs[reg].ZMM_Q(val & 1)));
4112 if (mod == 3) {
4113 tcg_gen_mov_i64(cpu_regs[rm], cpu_tmp1_i64);
4114 } else {
4115 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
4116 s->mem_index, MO_LEQ);
4118 #else
4119 goto illegal_op;
4120 #endif
4122 break;
4123 case 0x17: /* extractps */
4124 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4125 xmm_regs[reg].ZMM_L(val & 3)));
4126 if (mod == 3) {
4127 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
4128 } else {
4129 tcg_gen_qemu_st_tl(cpu_T[0], cpu_A0,
4130 s->mem_index, MO_LEUL);
4132 break;
4133 case 0x20: /* pinsrb */
4134 if (mod == 3) {
4135 gen_op_mov_v_reg(MO_32, cpu_T[0], rm);
4136 } else {
4137 tcg_gen_qemu_ld_tl(cpu_T[0], cpu_A0,
4138 s->mem_index, MO_UB);
4140 tcg_gen_st8_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4141 xmm_regs[reg].ZMM_B(val & 15)));
4142 break;
4143 case 0x21: /* insertps */
4144 if (mod == 3) {
4145 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
4146 offsetof(CPUX86State,xmm_regs[rm]
4147 .ZMM_L((val >> 6) & 3)));
4148 } else {
4149 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
4150 s->mem_index, MO_LEUL);
4152 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
4153 offsetof(CPUX86State,xmm_regs[reg]
4154 .ZMM_L((val >> 4) & 3)));
4155 if ((val >> 0) & 1)
4156 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4157 cpu_env, offsetof(CPUX86State,
4158 xmm_regs[reg].ZMM_L(0)));
4159 if ((val >> 1) & 1)
4160 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4161 cpu_env, offsetof(CPUX86State,
4162 xmm_regs[reg].ZMM_L(1)));
4163 if ((val >> 2) & 1)
4164 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4165 cpu_env, offsetof(CPUX86State,
4166 xmm_regs[reg].ZMM_L(2)));
4167 if ((val >> 3) & 1)
4168 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4169 cpu_env, offsetof(CPUX86State,
4170 xmm_regs[reg].ZMM_L(3)));
4171 break;
4172 case 0x22:
4173 if (ot == MO_32) { /* pinsrd */
4174 if (mod == 3) {
4175 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[rm]);
4176 } else {
4177 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
4178 s->mem_index, MO_LEUL);
4180 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
4181 offsetof(CPUX86State,
4182 xmm_regs[reg].ZMM_L(val & 3)));
4183 } else { /* pinsrq */
4184 #ifdef TARGET_X86_64
4185 if (mod == 3) {
4186 gen_op_mov_v_reg(ot, cpu_tmp1_i64, rm);
4187 } else {
4188 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
4189 s->mem_index, MO_LEQ);
4191 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
4192 offsetof(CPUX86State,
4193 xmm_regs[reg].ZMM_Q(val & 1)));
4194 #else
4195 goto illegal_op;
4196 #endif
4198 break;
4200 return;
4203 if (b1) {
4204 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
4205 if (mod == 3) {
4206 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
4207 } else {
4208 op2_offset = offsetof(CPUX86State,xmm_t0);
4209 gen_lea_modrm(env, s, modrm);
4210 gen_ldo_env_A0(s, op2_offset);
4212 } else {
4213 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4214 if (mod == 3) {
4215 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4216 } else {
4217 op2_offset = offsetof(CPUX86State,mmx_t0);
4218 gen_lea_modrm(env, s, modrm);
4219 gen_ldq_env_A0(s, op2_offset);
4222 val = cpu_ldub_code(env, s->pc++);
4224 if ((b & 0xfc) == 0x60) { /* pcmpXstrX */
4225 set_cc_op(s, CC_OP_EFLAGS);
4227 if (s->dflag == MO_64) {
4228 /* The helper must use entire 64-bit gp registers */
4229 val |= 1 << 8;
4233 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4234 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4235 sse_fn_eppi(cpu_env, cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
4236 break;
4238 case 0x33a:
4239 /* Various integer extensions at 0f 3a f[0-f]. */
4240 b = modrm | (b1 << 8);
4241 modrm = cpu_ldub_code(env, s->pc++);
4242 reg = ((modrm >> 3) & 7) | rex_r;
4244 switch (b) {
4245 case 0x3f0: /* rorx Gy,Ey, Ib */
4246 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
4247 || !(s->prefix & PREFIX_VEX)
4248 || s->vex_l != 0) {
4249 goto illegal_op;
4251 ot = mo_64_32(s->dflag);
4252 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4253 b = cpu_ldub_code(env, s->pc++);
4254 if (ot == MO_64) {
4255 tcg_gen_rotri_tl(cpu_T[0], cpu_T[0], b & 63);
4256 } else {
4257 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4258 tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, b & 31);
4259 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
4261 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
4262 break;
4264 default:
4265 goto illegal_op;
4267 break;
4269 default:
4270 goto illegal_op;
4272 } else {
4273 /* generic MMX or SSE operation */
4274 switch(b) {
4275 case 0x70: /* pshufx insn */
4276 case 0xc6: /* pshufx insn */
4277 case 0xc2: /* compare insns */
4278 s->rip_offset = 1;
4279 break;
4280 default:
4281 break;
4283 if (is_xmm) {
4284 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
4285 if (mod != 3) {
4286 int sz = 4;
4288 gen_lea_modrm(env, s, modrm);
4289 op2_offset = offsetof(CPUX86State,xmm_t0);
4291 switch (b) {
4292 case 0x50 ... 0x5a:
4293 case 0x5c ... 0x5f:
4294 case 0xc2:
4295 /* Most sse scalar operations. */
4296 if (b1 == 2) {
4297 sz = 2;
4298 } else if (b1 == 3) {
4299 sz = 3;
4301 break;
4303 case 0x2e: /* ucomis[sd] */
4304 case 0x2f: /* comis[sd] */
4305 if (b1 == 0) {
4306 sz = 2;
4307 } else {
4308 sz = 3;
4310 break;
4313 switch (sz) {
4314 case 2:
4315 /* 32 bit access */
4316 gen_op_ld_v(s, MO_32, cpu_T[0], cpu_A0);
4317 tcg_gen_st32_tl(cpu_T[0], cpu_env,
4318 offsetof(CPUX86State,xmm_t0.ZMM_L(0)));
4319 break;
4320 case 3:
4321 /* 64 bit access */
4322 gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.ZMM_D(0)));
4323 break;
4324 default:
4325 /* 128 bit access */
4326 gen_ldo_env_A0(s, op2_offset);
4327 break;
4329 } else {
4330 rm = (modrm & 7) | REX_B(s);
4331 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
4333 } else {
4334 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4335 if (mod != 3) {
4336 gen_lea_modrm(env, s, modrm);
4337 op2_offset = offsetof(CPUX86State,mmx_t0);
4338 gen_ldq_env_A0(s, op2_offset);
4339 } else {
4340 rm = (modrm & 7);
4341 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4344 switch(b) {
4345 case 0x0f: /* 3DNow! data insns */
4346 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
4347 goto illegal_op;
4348 val = cpu_ldub_code(env, s->pc++);
4349 sse_fn_epp = sse_op_table5[val];
4350 if (!sse_fn_epp) {
4351 goto illegal_op;
4353 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4354 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4355 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
4356 break;
4357 case 0x70: /* pshufx insn */
4358 case 0xc6: /* pshufx insn */
4359 val = cpu_ldub_code(env, s->pc++);
4360 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4361 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4362 /* XXX: introduce a new table? */
4363 sse_fn_ppi = (SSEFunc_0_ppi)sse_fn_epp;
4364 sse_fn_ppi(cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
4365 break;
4366 case 0xc2:
4367 /* compare insns */
4368 val = cpu_ldub_code(env, s->pc++);
4369 if (val >= 8)
4370 goto illegal_op;
4371 sse_fn_epp = sse_op_table4[val][b1];
4373 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4374 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4375 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
4376 break;
4377 case 0xf7:
4378 /* maskmov : we must prepare A0 */
4379 if (mod != 3)
4380 goto illegal_op;
4381 tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EDI]);
4382 gen_extu(s->aflag, cpu_A0);
4383 gen_add_A0_ds_seg(s);
4385 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4386 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4387 /* XXX: introduce a new table? */
4388 sse_fn_eppt = (SSEFunc_0_eppt)sse_fn_epp;
4389 sse_fn_eppt(cpu_env, cpu_ptr0, cpu_ptr1, cpu_A0);
4390 break;
4391 default:
4392 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4393 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4394 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
4395 break;
4397 if (b == 0x2e || b == 0x2f) {
4398 set_cc_op(s, CC_OP_EFLAGS);
4403 /* convert one instruction. s->is_jmp is set if the translation must
4404 be stopped. Return the next pc value */
4405 static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
4406 target_ulong pc_start)
4408 int b, prefixes;
4409 int shift;
4410 TCGMemOp ot, aflag, dflag;
4411 int modrm, reg, rm, mod, op, opreg, val;
4412 target_ulong next_eip, tval;
4413 int rex_w, rex_r;
4415 s->pc = pc_start;
4416 prefixes = 0;
4417 s->override = -1;
4418 rex_w = -1;
4419 rex_r = 0;
4420 #ifdef TARGET_X86_64
4421 s->rex_x = 0;
4422 s->rex_b = 0;
4423 x86_64_hregs = 0;
4424 #endif
4425 s->rip_offset = 0; /* for relative ip address */
4426 s->vex_l = 0;
4427 s->vex_v = 0;
4428 next_byte:
4429 b = cpu_ldub_code(env, s->pc);
4430 s->pc++;
4431 /* Collect prefixes. */
4432 switch (b) {
4433 case 0xf3:
4434 prefixes |= PREFIX_REPZ;
4435 goto next_byte;
4436 case 0xf2:
4437 prefixes |= PREFIX_REPNZ;
4438 goto next_byte;
4439 case 0xf0:
4440 prefixes |= PREFIX_LOCK;
4441 goto next_byte;
4442 case 0x2e:
4443 s->override = R_CS;
4444 goto next_byte;
4445 case 0x36:
4446 s->override = R_SS;
4447 goto next_byte;
4448 case 0x3e:
4449 s->override = R_DS;
4450 goto next_byte;
4451 case 0x26:
4452 s->override = R_ES;
4453 goto next_byte;
4454 case 0x64:
4455 s->override = R_FS;
4456 goto next_byte;
4457 case 0x65:
4458 s->override = R_GS;
4459 goto next_byte;
4460 case 0x66:
4461 prefixes |= PREFIX_DATA;
4462 goto next_byte;
4463 case 0x67:
4464 prefixes |= PREFIX_ADR;
4465 goto next_byte;
4466 #ifdef TARGET_X86_64
4467 case 0x40 ... 0x4f:
4468 if (CODE64(s)) {
4469 /* REX prefix */
4470 rex_w = (b >> 3) & 1;
4471 rex_r = (b & 0x4) << 1;
4472 s->rex_x = (b & 0x2) << 2;
4473 REX_B(s) = (b & 0x1) << 3;
4474 x86_64_hregs = 1; /* select uniform byte register addressing */
4475 goto next_byte;
4477 break;
4478 #endif
4479 case 0xc5: /* 2-byte VEX */
4480 case 0xc4: /* 3-byte VEX */
4481 /* VEX prefixes cannot be used except in 32-bit mode.
4482 Otherwise the instruction is LES or LDS. */
4483 if (s->code32 && !s->vm86) {
4484 static const int pp_prefix[4] = {
4485 0, PREFIX_DATA, PREFIX_REPZ, PREFIX_REPNZ
4487 int vex3, vex2 = cpu_ldub_code(env, s->pc);
4489 if (!CODE64(s) && (vex2 & 0xc0) != 0xc0) {
4490 /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
4491 otherwise the instruction is LES or LDS. */
4492 break;
4494 s->pc++;
4496 /* 4.1.1-4.1.3: No preceding lock, 66, f2, f3, or rex prefixes. */
4497 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ
4498 | PREFIX_LOCK | PREFIX_DATA)) {
4499 goto illegal_op;
4501 #ifdef TARGET_X86_64
4502 if (x86_64_hregs) {
4503 goto illegal_op;
4505 #endif
4506 rex_r = (~vex2 >> 4) & 8;
4507 if (b == 0xc5) {
4508 vex3 = vex2;
4509 b = cpu_ldub_code(env, s->pc++);
4510 } else {
4511 #ifdef TARGET_X86_64
4512 s->rex_x = (~vex2 >> 3) & 8;
4513 s->rex_b = (~vex2 >> 2) & 8;
4514 #endif
4515 vex3 = cpu_ldub_code(env, s->pc++);
4516 rex_w = (vex3 >> 7) & 1;
4517 switch (vex2 & 0x1f) {
4518 case 0x01: /* Implied 0f leading opcode bytes. */
4519 b = cpu_ldub_code(env, s->pc++) | 0x100;
4520 break;
4521 case 0x02: /* Implied 0f 38 leading opcode bytes. */
4522 b = 0x138;
4523 break;
4524 case 0x03: /* Implied 0f 3a leading opcode bytes. */
4525 b = 0x13a;
4526 break;
4527 default: /* Reserved for future use. */
4528 goto illegal_op;
4531 s->vex_v = (~vex3 >> 3) & 0xf;
4532 s->vex_l = (vex3 >> 2) & 1;
4533 prefixes |= pp_prefix[vex3 & 3] | PREFIX_VEX;
4535 break;
4538 /* Post-process prefixes. */
4539 if (CODE64(s)) {
4540 /* In 64-bit mode, the default data size is 32-bit. Select 64-bit
4541 data with rex_w, and 16-bit data with 0x66; rex_w takes precedence
4542 over 0x66 if both are present. */
4543 dflag = (rex_w > 0 ? MO_64 : prefixes & PREFIX_DATA ? MO_16 : MO_32);
4544 /* In 64-bit mode, 0x67 selects 32-bit addressing. */
4545 aflag = (prefixes & PREFIX_ADR ? MO_32 : MO_64);
4546 } else {
4547 /* In 16/32-bit mode, 0x66 selects the opposite data size. */
4548 if (s->code32 ^ ((prefixes & PREFIX_DATA) != 0)) {
4549 dflag = MO_32;
4550 } else {
4551 dflag = MO_16;
4553 /* In 16/32-bit mode, 0x67 selects the opposite addressing. */
4554 if (s->code32 ^ ((prefixes & PREFIX_ADR) != 0)) {
4555 aflag = MO_32;
4556 } else {
4557 aflag = MO_16;
4561 s->prefix = prefixes;
4562 s->aflag = aflag;
4563 s->dflag = dflag;
4565 /* lock generation */
4566 if (prefixes & PREFIX_LOCK)
4567 gen_helper_lock();
4569 /* now check op code */
4570 reswitch:
4571 switch(b) {
4572 case 0x0f:
4573 /**************************/
4574 /* extended op code */
4575 b = cpu_ldub_code(env, s->pc++) | 0x100;
4576 goto reswitch;
4578 /**************************/
4579 /* arith & logic */
4580 case 0x00 ... 0x05:
4581 case 0x08 ... 0x0d:
4582 case 0x10 ... 0x15:
4583 case 0x18 ... 0x1d:
4584 case 0x20 ... 0x25:
4585 case 0x28 ... 0x2d:
4586 case 0x30 ... 0x35:
4587 case 0x38 ... 0x3d:
4589 int op, f, val;
4590 op = (b >> 3) & 7;
4591 f = (b >> 1) & 3;
4593 ot = mo_b_d(b, dflag);
4595 switch(f) {
4596 case 0: /* OP Ev, Gv */
4597 modrm = cpu_ldub_code(env, s->pc++);
4598 reg = ((modrm >> 3) & 7) | rex_r;
4599 mod = (modrm >> 6) & 3;
4600 rm = (modrm & 7) | REX_B(s);
4601 if (mod != 3) {
4602 gen_lea_modrm(env, s, modrm);
4603 opreg = OR_TMP0;
4604 } else if (op == OP_XORL && rm == reg) {
4605 xor_zero:
4606 /* xor reg, reg optimisation */
4607 set_cc_op(s, CC_OP_CLR);
4608 tcg_gen_movi_tl(cpu_T[0], 0);
4609 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
4610 break;
4611 } else {
4612 opreg = rm;
4614 gen_op_mov_v_reg(ot, cpu_T[1], reg);
4615 gen_op(s, op, ot, opreg);
4616 break;
4617 case 1: /* OP Gv, Ev */
4618 modrm = cpu_ldub_code(env, s->pc++);
4619 mod = (modrm >> 6) & 3;
4620 reg = ((modrm >> 3) & 7) | rex_r;
4621 rm = (modrm & 7) | REX_B(s);
4622 if (mod != 3) {
4623 gen_lea_modrm(env, s, modrm);
4624 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
4625 } else if (op == OP_XORL && rm == reg) {
4626 goto xor_zero;
4627 } else {
4628 gen_op_mov_v_reg(ot, cpu_T[1], rm);
4630 gen_op(s, op, ot, reg);
4631 break;
4632 case 2: /* OP A, Iv */
4633 val = insn_get(env, s, ot);
4634 tcg_gen_movi_tl(cpu_T[1], val);
4635 gen_op(s, op, ot, OR_EAX);
4636 break;
4639 break;
4641 case 0x82:
4642 if (CODE64(s))
4643 goto illegal_op;
4644 case 0x80: /* GRP1 */
4645 case 0x81:
4646 case 0x83:
4648 int val;
4650 ot = mo_b_d(b, dflag);
4652 modrm = cpu_ldub_code(env, s->pc++);
4653 mod = (modrm >> 6) & 3;
4654 rm = (modrm & 7) | REX_B(s);
4655 op = (modrm >> 3) & 7;
4657 if (mod != 3) {
4658 if (b == 0x83)
4659 s->rip_offset = 1;
4660 else
4661 s->rip_offset = insn_const_size(ot);
4662 gen_lea_modrm(env, s, modrm);
4663 opreg = OR_TMP0;
4664 } else {
4665 opreg = rm;
4668 switch(b) {
4669 default:
4670 case 0x80:
4671 case 0x81:
4672 case 0x82:
4673 val = insn_get(env, s, ot);
4674 break;
4675 case 0x83:
4676 val = (int8_t)insn_get(env, s, MO_8);
4677 break;
4679 tcg_gen_movi_tl(cpu_T[1], val);
4680 gen_op(s, op, ot, opreg);
4682 break;
4684 /**************************/
4685 /* inc, dec, and other misc arith */
4686 case 0x40 ... 0x47: /* inc Gv */
4687 ot = dflag;
4688 gen_inc(s, ot, OR_EAX + (b & 7), 1);
4689 break;
4690 case 0x48 ... 0x4f: /* dec Gv */
4691 ot = dflag;
4692 gen_inc(s, ot, OR_EAX + (b & 7), -1);
4693 break;
4694 case 0xf6: /* GRP3 */
4695 case 0xf7:
4696 ot = mo_b_d(b, dflag);
4698 modrm = cpu_ldub_code(env, s->pc++);
4699 mod = (modrm >> 6) & 3;
4700 rm = (modrm & 7) | REX_B(s);
4701 op = (modrm >> 3) & 7;
4702 if (mod != 3) {
4703 if (op == 0)
4704 s->rip_offset = insn_const_size(ot);
4705 gen_lea_modrm(env, s, modrm);
4706 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
4707 } else {
4708 gen_op_mov_v_reg(ot, cpu_T[0], rm);
4711 switch(op) {
4712 case 0: /* test */
4713 val = insn_get(env, s, ot);
4714 tcg_gen_movi_tl(cpu_T[1], val);
4715 gen_op_testl_T0_T1_cc();
4716 set_cc_op(s, CC_OP_LOGICB + ot);
4717 break;
4718 case 2: /* not */
4719 tcg_gen_not_tl(cpu_T[0], cpu_T[0]);
4720 if (mod != 3) {
4721 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
4722 } else {
4723 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
4725 break;
4726 case 3: /* neg */
4727 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
4728 if (mod != 3) {
4729 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
4730 } else {
4731 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
4733 gen_op_update_neg_cc();
4734 set_cc_op(s, CC_OP_SUBB + ot);
4735 break;
4736 case 4: /* mul */
4737 switch(ot) {
4738 case MO_8:
4739 gen_op_mov_v_reg(MO_8, cpu_T[1], R_EAX);
4740 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
4741 tcg_gen_ext8u_tl(cpu_T[1], cpu_T[1]);
4742 /* XXX: use 32 bit mul which could be faster */
4743 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4744 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T[0]);
4745 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4746 tcg_gen_andi_tl(cpu_cc_src, cpu_T[0], 0xff00);
4747 set_cc_op(s, CC_OP_MULB);
4748 break;
4749 case MO_16:
4750 gen_op_mov_v_reg(MO_16, cpu_T[1], R_EAX);
4751 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
4752 tcg_gen_ext16u_tl(cpu_T[1], cpu_T[1]);
4753 /* XXX: use 32 bit mul which could be faster */
4754 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4755 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T[0]);
4756 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4757 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
4758 gen_op_mov_reg_v(MO_16, R_EDX, cpu_T[0]);
4759 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4760 set_cc_op(s, CC_OP_MULW);
4761 break;
4762 default:
4763 case MO_32:
4764 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4765 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]);
4766 tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
4767 cpu_tmp2_i32, cpu_tmp3_i32);
4768 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32);
4769 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32);
4770 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4771 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
4772 set_cc_op(s, CC_OP_MULL);
4773 break;
4774 #ifdef TARGET_X86_64
4775 case MO_64:
4776 tcg_gen_mulu2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
4777 cpu_T[0], cpu_regs[R_EAX]);
4778 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4779 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
4780 set_cc_op(s, CC_OP_MULQ);
4781 break;
4782 #endif
4784 break;
4785 case 5: /* imul */
4786 switch(ot) {
4787 case MO_8:
4788 gen_op_mov_v_reg(MO_8, cpu_T[1], R_EAX);
4789 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
4790 tcg_gen_ext8s_tl(cpu_T[1], cpu_T[1]);
4791 /* XXX: use 32 bit mul which could be faster */
4792 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4793 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T[0]);
4794 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4795 tcg_gen_ext8s_tl(cpu_tmp0, cpu_T[0]);
4796 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4797 set_cc_op(s, CC_OP_MULB);
4798 break;
4799 case MO_16:
4800 gen_op_mov_v_reg(MO_16, cpu_T[1], R_EAX);
4801 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4802 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
4803 /* XXX: use 32 bit mul which could be faster */
4804 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4805 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T[0]);
4806 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4807 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
4808 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4809 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
4810 gen_op_mov_reg_v(MO_16, R_EDX, cpu_T[0]);
4811 set_cc_op(s, CC_OP_MULW);
4812 break;
4813 default:
4814 case MO_32:
4815 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4816 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]);
4817 tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
4818 cpu_tmp2_i32, cpu_tmp3_i32);
4819 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32);
4820 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32);
4821 tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31);
4822 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4823 tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
4824 tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32);
4825 set_cc_op(s, CC_OP_MULL);
4826 break;
4827 #ifdef TARGET_X86_64
4828 case MO_64:
4829 tcg_gen_muls2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
4830 cpu_T[0], cpu_regs[R_EAX]);
4831 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4832 tcg_gen_sari_tl(cpu_cc_src, cpu_regs[R_EAX], 63);
4833 tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_regs[R_EDX]);
4834 set_cc_op(s, CC_OP_MULQ);
4835 break;
4836 #endif
4838 break;
4839 case 6: /* div */
4840 switch(ot) {
4841 case MO_8:
4842 gen_helper_divb_AL(cpu_env, cpu_T[0]);
4843 break;
4844 case MO_16:
4845 gen_helper_divw_AX(cpu_env, cpu_T[0]);
4846 break;
4847 default:
4848 case MO_32:
4849 gen_helper_divl_EAX(cpu_env, cpu_T[0]);
4850 break;
4851 #ifdef TARGET_X86_64
4852 case MO_64:
4853 gen_helper_divq_EAX(cpu_env, cpu_T[0]);
4854 break;
4855 #endif
4857 break;
4858 case 7: /* idiv */
4859 switch(ot) {
4860 case MO_8:
4861 gen_helper_idivb_AL(cpu_env, cpu_T[0]);
4862 break;
4863 case MO_16:
4864 gen_helper_idivw_AX(cpu_env, cpu_T[0]);
4865 break;
4866 default:
4867 case MO_32:
4868 gen_helper_idivl_EAX(cpu_env, cpu_T[0]);
4869 break;
4870 #ifdef TARGET_X86_64
4871 case MO_64:
4872 gen_helper_idivq_EAX(cpu_env, cpu_T[0]);
4873 break;
4874 #endif
4876 break;
4877 default:
4878 goto illegal_op;
4880 break;
4882 case 0xfe: /* GRP4 */
4883 case 0xff: /* GRP5 */
4884 ot = mo_b_d(b, dflag);
4886 modrm = cpu_ldub_code(env, s->pc++);
4887 mod = (modrm >> 6) & 3;
4888 rm = (modrm & 7) | REX_B(s);
4889 op = (modrm >> 3) & 7;
4890 if (op >= 2 && b == 0xfe) {
4891 goto illegal_op;
4893 if (CODE64(s)) {
4894 if (op == 2 || op == 4) {
4895 /* operand size for jumps is 64 bit */
4896 ot = MO_64;
4897 } else if (op == 3 || op == 5) {
4898 ot = dflag != MO_16 ? MO_32 + (rex_w == 1) : MO_16;
4899 } else if (op == 6) {
4900 /* default push size is 64 bit */
4901 ot = mo_pushpop(s, dflag);
4904 if (mod != 3) {
4905 gen_lea_modrm(env, s, modrm);
4906 if (op >= 2 && op != 3 && op != 5)
4907 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
4908 } else {
4909 gen_op_mov_v_reg(ot, cpu_T[0], rm);
4912 switch(op) {
4913 case 0: /* inc Ev */
4914 if (mod != 3)
4915 opreg = OR_TMP0;
4916 else
4917 opreg = rm;
4918 gen_inc(s, ot, opreg, 1);
4919 break;
4920 case 1: /* dec Ev */
4921 if (mod != 3)
4922 opreg = OR_TMP0;
4923 else
4924 opreg = rm;
4925 gen_inc(s, ot, opreg, -1);
4926 break;
4927 case 2: /* call Ev */
4928 /* XXX: optimize if memory (no 'and' is necessary) */
4929 if (dflag == MO_16) {
4930 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
4932 next_eip = s->pc - s->cs_base;
4933 tcg_gen_movi_tl(cpu_T[1], next_eip);
4934 gen_push_v(s, cpu_T[1]);
4935 gen_op_jmp_v(cpu_T[0]);
4936 gen_eob(s);
4937 break;
4938 case 3: /* lcall Ev */
4939 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
4940 gen_add_A0_im(s, 1 << ot);
4941 gen_op_ld_v(s, MO_16, cpu_T[0], cpu_A0);
4942 do_lcall:
4943 if (s->pe && !s->vm86) {
4944 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4945 gen_helper_lcall_protected(cpu_env, cpu_tmp2_i32, cpu_T[1],
4946 tcg_const_i32(dflag - 1),
4947 tcg_const_tl(s->pc - s->cs_base));
4948 } else {
4949 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4950 gen_helper_lcall_real(cpu_env, cpu_tmp2_i32, cpu_T[1],
4951 tcg_const_i32(dflag - 1),
4952 tcg_const_i32(s->pc - s->cs_base));
4954 gen_eob(s);
4955 break;
4956 case 4: /* jmp Ev */
4957 if (dflag == MO_16) {
4958 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
4960 gen_op_jmp_v(cpu_T[0]);
4961 gen_eob(s);
4962 break;
4963 case 5: /* ljmp Ev */
4964 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
4965 gen_add_A0_im(s, 1 << ot);
4966 gen_op_ld_v(s, MO_16, cpu_T[0], cpu_A0);
4967 do_ljmp:
4968 if (s->pe && !s->vm86) {
4969 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4970 gen_helper_ljmp_protected(cpu_env, cpu_tmp2_i32, cpu_T[1],
4971 tcg_const_tl(s->pc - s->cs_base));
4972 } else {
4973 gen_op_movl_seg_T0_vm(R_CS);
4974 gen_op_jmp_v(cpu_T[1]);
4976 gen_eob(s);
4977 break;
4978 case 6: /* push Ev */
4979 gen_push_v(s, cpu_T[0]);
4980 break;
4981 default:
4982 goto illegal_op;
4984 break;
4986 case 0x84: /* test Ev, Gv */
4987 case 0x85:
4988 ot = mo_b_d(b, dflag);
4990 modrm = cpu_ldub_code(env, s->pc++);
4991 reg = ((modrm >> 3) & 7) | rex_r;
4993 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4994 gen_op_mov_v_reg(ot, cpu_T[1], reg);
4995 gen_op_testl_T0_T1_cc();
4996 set_cc_op(s, CC_OP_LOGICB + ot);
4997 break;
4999 case 0xa8: /* test eAX, Iv */
5000 case 0xa9:
5001 ot = mo_b_d(b, dflag);
5002 val = insn_get(env, s, ot);
5004 gen_op_mov_v_reg(ot, cpu_T[0], OR_EAX);
5005 tcg_gen_movi_tl(cpu_T[1], val);
5006 gen_op_testl_T0_T1_cc();
5007 set_cc_op(s, CC_OP_LOGICB + ot);
5008 break;
5010 case 0x98: /* CWDE/CBW */
5011 switch (dflag) {
5012 #ifdef TARGET_X86_64
5013 case MO_64:
5014 gen_op_mov_v_reg(MO_32, cpu_T[0], R_EAX);
5015 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
5016 gen_op_mov_reg_v(MO_64, R_EAX, cpu_T[0]);
5017 break;
5018 #endif
5019 case MO_32:
5020 gen_op_mov_v_reg(MO_16, cpu_T[0], R_EAX);
5021 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5022 gen_op_mov_reg_v(MO_32, R_EAX, cpu_T[0]);
5023 break;
5024 case MO_16:
5025 gen_op_mov_v_reg(MO_8, cpu_T[0], R_EAX);
5026 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
5027 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T[0]);
5028 break;
5029 default:
5030 tcg_abort();
5032 break;
5033 case 0x99: /* CDQ/CWD */
5034 switch (dflag) {
5035 #ifdef TARGET_X86_64
5036 case MO_64:
5037 gen_op_mov_v_reg(MO_64, cpu_T[0], R_EAX);
5038 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 63);
5039 gen_op_mov_reg_v(MO_64, R_EDX, cpu_T[0]);
5040 break;
5041 #endif
5042 case MO_32:
5043 gen_op_mov_v_reg(MO_32, cpu_T[0], R_EAX);
5044 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
5045 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 31);
5046 gen_op_mov_reg_v(MO_32, R_EDX, cpu_T[0]);
5047 break;
5048 case MO_16:
5049 gen_op_mov_v_reg(MO_16, cpu_T[0], R_EAX);
5050 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5051 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 15);
5052 gen_op_mov_reg_v(MO_16, R_EDX, cpu_T[0]);
5053 break;
5054 default:
5055 tcg_abort();
5057 break;
5058 case 0x1af: /* imul Gv, Ev */
5059 case 0x69: /* imul Gv, Ev, I */
5060 case 0x6b:
5061 ot = dflag;
5062 modrm = cpu_ldub_code(env, s->pc++);
5063 reg = ((modrm >> 3) & 7) | rex_r;
5064 if (b == 0x69)
5065 s->rip_offset = insn_const_size(ot);
5066 else if (b == 0x6b)
5067 s->rip_offset = 1;
5068 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
5069 if (b == 0x69) {
5070 val = insn_get(env, s, ot);
5071 tcg_gen_movi_tl(cpu_T[1], val);
5072 } else if (b == 0x6b) {
5073 val = (int8_t)insn_get(env, s, MO_8);
5074 tcg_gen_movi_tl(cpu_T[1], val);
5075 } else {
5076 gen_op_mov_v_reg(ot, cpu_T[1], reg);
5078 switch (ot) {
5079 #ifdef TARGET_X86_64
5080 case MO_64:
5081 tcg_gen_muls2_i64(cpu_regs[reg], cpu_T[1], cpu_T[0], cpu_T[1]);
5082 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
5083 tcg_gen_sari_tl(cpu_cc_src, cpu_cc_dst, 63);
5084 tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_T[1]);
5085 break;
5086 #endif
5087 case MO_32:
5088 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5089 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
5090 tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
5091 cpu_tmp2_i32, cpu_tmp3_i32);
5092 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
5093 tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31);
5094 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
5095 tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
5096 tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32);
5097 break;
5098 default:
5099 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5100 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
5101 /* XXX: use 32 bit mul which could be faster */
5102 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
5103 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
5104 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
5105 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
5106 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
5107 break;
5109 set_cc_op(s, CC_OP_MULB + ot);
5110 break;
5111 case 0x1c0:
5112 case 0x1c1: /* xadd Ev, Gv */
5113 ot = mo_b_d(b, dflag);
5114 modrm = cpu_ldub_code(env, s->pc++);
5115 reg = ((modrm >> 3) & 7) | rex_r;
5116 mod = (modrm >> 6) & 3;
5117 if (mod == 3) {
5118 rm = (modrm & 7) | REX_B(s);
5119 gen_op_mov_v_reg(ot, cpu_T[0], reg);
5120 gen_op_mov_v_reg(ot, cpu_T[1], rm);
5121 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
5122 gen_op_mov_reg_v(ot, reg, cpu_T[1]);
5123 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
5124 } else {
5125 gen_lea_modrm(env, s, modrm);
5126 gen_op_mov_v_reg(ot, cpu_T[0], reg);
5127 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
5128 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
5129 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
5130 gen_op_mov_reg_v(ot, reg, cpu_T[1]);
5132 gen_op_update2_cc();
5133 set_cc_op(s, CC_OP_ADDB + ot);
5134 break;
5135 case 0x1b0:
5136 case 0x1b1: /* cmpxchg Ev, Gv */
5138 TCGLabel *label1, *label2;
5139 TCGv t0, t1, t2, a0;
5141 ot = mo_b_d(b, dflag);
5142 modrm = cpu_ldub_code(env, s->pc++);
5143 reg = ((modrm >> 3) & 7) | rex_r;
5144 mod = (modrm >> 6) & 3;
5145 t0 = tcg_temp_local_new();
5146 t1 = tcg_temp_local_new();
5147 t2 = tcg_temp_local_new();
5148 a0 = tcg_temp_local_new();
5149 gen_op_mov_v_reg(ot, t1, reg);
5150 if (mod == 3) {
5151 rm = (modrm & 7) | REX_B(s);
5152 gen_op_mov_v_reg(ot, t0, rm);
5153 } else {
5154 gen_lea_modrm(env, s, modrm);
5155 tcg_gen_mov_tl(a0, cpu_A0);
5156 gen_op_ld_v(s, ot, t0, a0);
5157 rm = 0; /* avoid warning */
5159 label1 = gen_new_label();
5160 tcg_gen_mov_tl(t2, cpu_regs[R_EAX]);
5161 gen_extu(ot, t0);
5162 gen_extu(ot, t2);
5163 tcg_gen_brcond_tl(TCG_COND_EQ, t2, t0, label1);
5164 label2 = gen_new_label();
5165 if (mod == 3) {
5166 gen_op_mov_reg_v(ot, R_EAX, t0);
5167 tcg_gen_br(label2);
5168 gen_set_label(label1);
5169 gen_op_mov_reg_v(ot, rm, t1);
5170 } else {
5171 /* perform no-op store cycle like physical cpu; must be
5172 before changing accumulator to ensure idempotency if
5173 the store faults and the instruction is restarted */
5174 gen_op_st_v(s, ot, t0, a0);
5175 gen_op_mov_reg_v(ot, R_EAX, t0);
5176 tcg_gen_br(label2);
5177 gen_set_label(label1);
5178 gen_op_st_v(s, ot, t1, a0);
5180 gen_set_label(label2);
5181 tcg_gen_mov_tl(cpu_cc_src, t0);
5182 tcg_gen_mov_tl(cpu_cc_srcT, t2);
5183 tcg_gen_sub_tl(cpu_cc_dst, t2, t0);
5184 set_cc_op(s, CC_OP_SUBB + ot);
5185 tcg_temp_free(t0);
5186 tcg_temp_free(t1);
5187 tcg_temp_free(t2);
5188 tcg_temp_free(a0);
5190 break;
5191 case 0x1c7: /* cmpxchg8b */
5192 modrm = cpu_ldub_code(env, s->pc++);
5193 mod = (modrm >> 6) & 3;
5194 if ((mod == 3) || ((modrm & 0x38) != 0x8))
5195 goto illegal_op;
5196 #ifdef TARGET_X86_64
5197 if (dflag == MO_64) {
5198 if (!(s->cpuid_ext_features & CPUID_EXT_CX16))
5199 goto illegal_op;
5200 gen_lea_modrm(env, s, modrm);
5201 gen_helper_cmpxchg16b(cpu_env, cpu_A0);
5202 } else
5203 #endif
5205 if (!(s->cpuid_features & CPUID_CX8))
5206 goto illegal_op;
5207 gen_lea_modrm(env, s, modrm);
5208 gen_helper_cmpxchg8b(cpu_env, cpu_A0);
5210 set_cc_op(s, CC_OP_EFLAGS);
5211 break;
5213 /**************************/
5214 /* push/pop */
5215 case 0x50 ... 0x57: /* push */
5216 gen_op_mov_v_reg(MO_32, cpu_T[0], (b & 7) | REX_B(s));
5217 gen_push_v(s, cpu_T[0]);
5218 break;
5219 case 0x58 ... 0x5f: /* pop */
5220 ot = gen_pop_T0(s);
5221 /* NOTE: order is important for pop %sp */
5222 gen_pop_update(s, ot);
5223 gen_op_mov_reg_v(ot, (b & 7) | REX_B(s), cpu_T[0]);
5224 break;
5225 case 0x60: /* pusha */
5226 if (CODE64(s))
5227 goto illegal_op;
5228 gen_pusha(s);
5229 break;
5230 case 0x61: /* popa */
5231 if (CODE64(s))
5232 goto illegal_op;
5233 gen_popa(s);
5234 break;
5235 case 0x68: /* push Iv */
5236 case 0x6a:
5237 ot = mo_pushpop(s, dflag);
5238 if (b == 0x68)
5239 val = insn_get(env, s, ot);
5240 else
5241 val = (int8_t)insn_get(env, s, MO_8);
5242 tcg_gen_movi_tl(cpu_T[0], val);
5243 gen_push_v(s, cpu_T[0]);
5244 break;
5245 case 0x8f: /* pop Ev */
5246 modrm = cpu_ldub_code(env, s->pc++);
5247 mod = (modrm >> 6) & 3;
5248 ot = gen_pop_T0(s);
5249 if (mod == 3) {
5250 /* NOTE: order is important for pop %sp */
5251 gen_pop_update(s, ot);
5252 rm = (modrm & 7) | REX_B(s);
5253 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
5254 } else {
5255 /* NOTE: order is important too for MMU exceptions */
5256 s->popl_esp_hack = 1 << ot;
5257 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
5258 s->popl_esp_hack = 0;
5259 gen_pop_update(s, ot);
5261 break;
5262 case 0xc8: /* enter */
5264 int level;
5265 val = cpu_lduw_code(env, s->pc);
5266 s->pc += 2;
5267 level = cpu_ldub_code(env, s->pc++);
5268 gen_enter(s, val, level);
5270 break;
5271 case 0xc9: /* leave */
5272 /* XXX: exception not precise (ESP is updated before potential exception) */
5273 if (CODE64(s)) {
5274 gen_op_mov_v_reg(MO_64, cpu_T[0], R_EBP);
5275 gen_op_mov_reg_v(MO_64, R_ESP, cpu_T[0]);
5276 } else if (s->ss32) {
5277 gen_op_mov_v_reg(MO_32, cpu_T[0], R_EBP);
5278 gen_op_mov_reg_v(MO_32, R_ESP, cpu_T[0]);
5279 } else {
5280 gen_op_mov_v_reg(MO_16, cpu_T[0], R_EBP);
5281 gen_op_mov_reg_v(MO_16, R_ESP, cpu_T[0]);
5283 ot = gen_pop_T0(s);
5284 gen_op_mov_reg_v(ot, R_EBP, cpu_T[0]);
5285 gen_pop_update(s, ot);
5286 break;
5287 case 0x06: /* push es */
5288 case 0x0e: /* push cs */
5289 case 0x16: /* push ss */
5290 case 0x1e: /* push ds */
5291 if (CODE64(s))
5292 goto illegal_op;
5293 gen_op_movl_T0_seg(b >> 3);
5294 gen_push_v(s, cpu_T[0]);
5295 break;
5296 case 0x1a0: /* push fs */
5297 case 0x1a8: /* push gs */
5298 gen_op_movl_T0_seg((b >> 3) & 7);
5299 gen_push_v(s, cpu_T[0]);
5300 break;
5301 case 0x07: /* pop es */
5302 case 0x17: /* pop ss */
5303 case 0x1f: /* pop ds */
5304 if (CODE64(s))
5305 goto illegal_op;
5306 reg = b >> 3;
5307 ot = gen_pop_T0(s);
5308 gen_movl_seg_T0(s, reg);
5309 gen_pop_update(s, ot);
5310 if (reg == R_SS) {
5311 /* if reg == SS, inhibit interrupts/trace. */
5312 /* If several instructions disable interrupts, only the
5313 _first_ does it */
5314 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
5315 gen_helper_set_inhibit_irq(cpu_env);
5316 s->tf = 0;
5318 if (s->is_jmp) {
5319 gen_jmp_im(s->pc - s->cs_base);
5320 gen_eob(s);
5322 break;
5323 case 0x1a1: /* pop fs */
5324 case 0x1a9: /* pop gs */
5325 ot = gen_pop_T0(s);
5326 gen_movl_seg_T0(s, (b >> 3) & 7);
5327 gen_pop_update(s, ot);
5328 if (s->is_jmp) {
5329 gen_jmp_im(s->pc - s->cs_base);
5330 gen_eob(s);
5332 break;
5334 /**************************/
5335 /* mov */
5336 case 0x88:
5337 case 0x89: /* mov Gv, Ev */
5338 ot = mo_b_d(b, dflag);
5339 modrm = cpu_ldub_code(env, s->pc++);
5340 reg = ((modrm >> 3) & 7) | rex_r;
5342 /* generate a generic store */
5343 gen_ldst_modrm(env, s, modrm, ot, reg, 1);
5344 break;
5345 case 0xc6:
5346 case 0xc7: /* mov Ev, Iv */
5347 ot = mo_b_d(b, dflag);
5348 modrm = cpu_ldub_code(env, s->pc++);
5349 mod = (modrm >> 6) & 3;
5350 if (mod != 3) {
5351 s->rip_offset = insn_const_size(ot);
5352 gen_lea_modrm(env, s, modrm);
5354 val = insn_get(env, s, ot);
5355 tcg_gen_movi_tl(cpu_T[0], val);
5356 if (mod != 3) {
5357 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
5358 } else {
5359 gen_op_mov_reg_v(ot, (modrm & 7) | REX_B(s), cpu_T[0]);
5361 break;
5362 case 0x8a:
5363 case 0x8b: /* mov Ev, Gv */
5364 ot = mo_b_d(b, dflag);
5365 modrm = cpu_ldub_code(env, s->pc++);
5366 reg = ((modrm >> 3) & 7) | rex_r;
5368 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
5369 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
5370 break;
5371 case 0x8e: /* mov seg, Gv */
5372 modrm = cpu_ldub_code(env, s->pc++);
5373 reg = (modrm >> 3) & 7;
5374 if (reg >= 6 || reg == R_CS)
5375 goto illegal_op;
5376 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
5377 gen_movl_seg_T0(s, reg);
5378 if (reg == R_SS) {
5379 /* if reg == SS, inhibit interrupts/trace */
5380 /* If several instructions disable interrupts, only the
5381 _first_ does it */
5382 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
5383 gen_helper_set_inhibit_irq(cpu_env);
5384 s->tf = 0;
5386 if (s->is_jmp) {
5387 gen_jmp_im(s->pc - s->cs_base);
5388 gen_eob(s);
5390 break;
5391 case 0x8c: /* mov Gv, seg */
5392 modrm = cpu_ldub_code(env, s->pc++);
5393 reg = (modrm >> 3) & 7;
5394 mod = (modrm >> 6) & 3;
5395 if (reg >= 6)
5396 goto illegal_op;
5397 gen_op_movl_T0_seg(reg);
5398 ot = mod == 3 ? dflag : MO_16;
5399 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
5400 break;
5402 case 0x1b6: /* movzbS Gv, Eb */
5403 case 0x1b7: /* movzwS Gv, Eb */
5404 case 0x1be: /* movsbS Gv, Eb */
5405 case 0x1bf: /* movswS Gv, Eb */
5407 TCGMemOp d_ot;
5408 TCGMemOp s_ot;
5410 /* d_ot is the size of destination */
5411 d_ot = dflag;
5412 /* ot is the size of source */
5413 ot = (b & 1) + MO_8;
5414 /* s_ot is the sign+size of source */
5415 s_ot = b & 8 ? MO_SIGN | ot : ot;
5417 modrm = cpu_ldub_code(env, s->pc++);
5418 reg = ((modrm >> 3) & 7) | rex_r;
5419 mod = (modrm >> 6) & 3;
5420 rm = (modrm & 7) | REX_B(s);
5422 if (mod == 3) {
5423 gen_op_mov_v_reg(ot, cpu_T[0], rm);
5424 switch (s_ot) {
5425 case MO_UB:
5426 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
5427 break;
5428 case MO_SB:
5429 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
5430 break;
5431 case MO_UW:
5432 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
5433 break;
5434 default:
5435 case MO_SW:
5436 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5437 break;
5439 gen_op_mov_reg_v(d_ot, reg, cpu_T[0]);
5440 } else {
5441 gen_lea_modrm(env, s, modrm);
5442 gen_op_ld_v(s, s_ot, cpu_T[0], cpu_A0);
5443 gen_op_mov_reg_v(d_ot, reg, cpu_T[0]);
5446 break;
5448 case 0x8d: /* lea */
5449 ot = dflag;
5450 modrm = cpu_ldub_code(env, s->pc++);
5451 mod = (modrm >> 6) & 3;
5452 if (mod == 3)
5453 goto illegal_op;
5454 reg = ((modrm >> 3) & 7) | rex_r;
5455 /* we must ensure that no segment is added */
5456 s->override = -1;
5457 val = s->addseg;
5458 s->addseg = 0;
5459 gen_lea_modrm(env, s, modrm);
5460 s->addseg = val;
5461 gen_op_mov_reg_v(ot, reg, cpu_A0);
5462 break;
5464 case 0xa0: /* mov EAX, Ov */
5465 case 0xa1:
5466 case 0xa2: /* mov Ov, EAX */
5467 case 0xa3:
5469 target_ulong offset_addr;
5471 ot = mo_b_d(b, dflag);
5472 switch (s->aflag) {
5473 #ifdef TARGET_X86_64
5474 case MO_64:
5475 offset_addr = cpu_ldq_code(env, s->pc);
5476 s->pc += 8;
5477 break;
5478 #endif
5479 default:
5480 offset_addr = insn_get(env, s, s->aflag);
5481 break;
5483 tcg_gen_movi_tl(cpu_A0, offset_addr);
5484 gen_add_A0_ds_seg(s);
5485 if ((b & 2) == 0) {
5486 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
5487 gen_op_mov_reg_v(ot, R_EAX, cpu_T[0]);
5488 } else {
5489 gen_op_mov_v_reg(ot, cpu_T[0], R_EAX);
5490 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
5493 break;
5494 case 0xd7: /* xlat */
5495 tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EBX]);
5496 tcg_gen_ext8u_tl(cpu_T[0], cpu_regs[R_EAX]);
5497 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]);
5498 gen_extu(s->aflag, cpu_A0);
5499 gen_add_A0_ds_seg(s);
5500 gen_op_ld_v(s, MO_8, cpu_T[0], cpu_A0);
5501 gen_op_mov_reg_v(MO_8, R_EAX, cpu_T[0]);
5502 break;
5503 case 0xb0 ... 0xb7: /* mov R, Ib */
5504 val = insn_get(env, s, MO_8);
5505 tcg_gen_movi_tl(cpu_T[0], val);
5506 gen_op_mov_reg_v(MO_8, (b & 7) | REX_B(s), cpu_T[0]);
5507 break;
5508 case 0xb8 ... 0xbf: /* mov R, Iv */
5509 #ifdef TARGET_X86_64
5510 if (dflag == MO_64) {
5511 uint64_t tmp;
5512 /* 64 bit case */
5513 tmp = cpu_ldq_code(env, s->pc);
5514 s->pc += 8;
5515 reg = (b & 7) | REX_B(s);
5516 tcg_gen_movi_tl(cpu_T[0], tmp);
5517 gen_op_mov_reg_v(MO_64, reg, cpu_T[0]);
5518 } else
5519 #endif
5521 ot = dflag;
5522 val = insn_get(env, s, ot);
5523 reg = (b & 7) | REX_B(s);
5524 tcg_gen_movi_tl(cpu_T[0], val);
5525 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
5527 break;
5529 case 0x91 ... 0x97: /* xchg R, EAX */
5530 do_xchg_reg_eax:
5531 ot = dflag;
5532 reg = (b & 7) | REX_B(s);
5533 rm = R_EAX;
5534 goto do_xchg_reg;
5535 case 0x86:
5536 case 0x87: /* xchg Ev, Gv */
5537 ot = mo_b_d(b, dflag);
5538 modrm = cpu_ldub_code(env, s->pc++);
5539 reg = ((modrm >> 3) & 7) | rex_r;
5540 mod = (modrm >> 6) & 3;
5541 if (mod == 3) {
5542 rm = (modrm & 7) | REX_B(s);
5543 do_xchg_reg:
5544 gen_op_mov_v_reg(ot, cpu_T[0], reg);
5545 gen_op_mov_v_reg(ot, cpu_T[1], rm);
5546 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
5547 gen_op_mov_reg_v(ot, reg, cpu_T[1]);
5548 } else {
5549 gen_lea_modrm(env, s, modrm);
5550 gen_op_mov_v_reg(ot, cpu_T[0], reg);
5551 /* for xchg, lock is implicit */
5552 if (!(prefixes & PREFIX_LOCK))
5553 gen_helper_lock();
5554 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
5555 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
5556 if (!(prefixes & PREFIX_LOCK))
5557 gen_helper_unlock();
5558 gen_op_mov_reg_v(ot, reg, cpu_T[1]);
5560 break;
5561 case 0xc4: /* les Gv */
5562 /* In CODE64 this is VEX3; see above. */
5563 op = R_ES;
5564 goto do_lxx;
5565 case 0xc5: /* lds Gv */
5566 /* In CODE64 this is VEX2; see above. */
5567 op = R_DS;
5568 goto do_lxx;
5569 case 0x1b2: /* lss Gv */
5570 op = R_SS;
5571 goto do_lxx;
5572 case 0x1b4: /* lfs Gv */
5573 op = R_FS;
5574 goto do_lxx;
5575 case 0x1b5: /* lgs Gv */
5576 op = R_GS;
5577 do_lxx:
5578 ot = dflag != MO_16 ? MO_32 : MO_16;
5579 modrm = cpu_ldub_code(env, s->pc++);
5580 reg = ((modrm >> 3) & 7) | rex_r;
5581 mod = (modrm >> 6) & 3;
5582 if (mod == 3)
5583 goto illegal_op;
5584 gen_lea_modrm(env, s, modrm);
5585 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
5586 gen_add_A0_im(s, 1 << ot);
5587 /* load the segment first to handle exceptions properly */
5588 gen_op_ld_v(s, MO_16, cpu_T[0], cpu_A0);
5589 gen_movl_seg_T0(s, op);
5590 /* then put the data */
5591 gen_op_mov_reg_v(ot, reg, cpu_T[1]);
5592 if (s->is_jmp) {
5593 gen_jmp_im(s->pc - s->cs_base);
5594 gen_eob(s);
5596 break;
5598 /************************/
5599 /* shifts */
5600 case 0xc0:
5601 case 0xc1:
5602 /* shift Ev,Ib */
5603 shift = 2;
5604 grp2:
5606 ot = mo_b_d(b, dflag);
5607 modrm = cpu_ldub_code(env, s->pc++);
5608 mod = (modrm >> 6) & 3;
5609 op = (modrm >> 3) & 7;
5611 if (mod != 3) {
5612 if (shift == 2) {
5613 s->rip_offset = 1;
5615 gen_lea_modrm(env, s, modrm);
5616 opreg = OR_TMP0;
5617 } else {
5618 opreg = (modrm & 7) | REX_B(s);
5621 /* simpler op */
5622 if (shift == 0) {
5623 gen_shift(s, op, ot, opreg, OR_ECX);
5624 } else {
5625 if (shift == 2) {
5626 shift = cpu_ldub_code(env, s->pc++);
5628 gen_shifti(s, op, ot, opreg, shift);
5631 break;
5632 case 0xd0:
5633 case 0xd1:
5634 /* shift Ev,1 */
5635 shift = 1;
5636 goto grp2;
5637 case 0xd2:
5638 case 0xd3:
5639 /* shift Ev,cl */
5640 shift = 0;
5641 goto grp2;
5643 case 0x1a4: /* shld imm */
5644 op = 0;
5645 shift = 1;
5646 goto do_shiftd;
5647 case 0x1a5: /* shld cl */
5648 op = 0;
5649 shift = 0;
5650 goto do_shiftd;
5651 case 0x1ac: /* shrd imm */
5652 op = 1;
5653 shift = 1;
5654 goto do_shiftd;
5655 case 0x1ad: /* shrd cl */
5656 op = 1;
5657 shift = 0;
5658 do_shiftd:
5659 ot = dflag;
5660 modrm = cpu_ldub_code(env, s->pc++);
5661 mod = (modrm >> 6) & 3;
5662 rm = (modrm & 7) | REX_B(s);
5663 reg = ((modrm >> 3) & 7) | rex_r;
5664 if (mod != 3) {
5665 gen_lea_modrm(env, s, modrm);
5666 opreg = OR_TMP0;
5667 } else {
5668 opreg = rm;
5670 gen_op_mov_v_reg(ot, cpu_T[1], reg);
5672 if (shift) {
5673 TCGv imm = tcg_const_tl(cpu_ldub_code(env, s->pc++));
5674 gen_shiftd_rm_T1(s, ot, opreg, op, imm);
5675 tcg_temp_free(imm);
5676 } else {
5677 gen_shiftd_rm_T1(s, ot, opreg, op, cpu_regs[R_ECX]);
5679 break;
5681 /************************/
5682 /* floats */
5683 case 0xd8 ... 0xdf:
5684 if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
5685 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
5686 /* XXX: what to do if illegal op ? */
5687 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
5688 break;
5690 modrm = cpu_ldub_code(env, s->pc++);
5691 mod = (modrm >> 6) & 3;
5692 rm = modrm & 7;
5693 op = ((b & 7) << 3) | ((modrm >> 3) & 7);
5694 if (mod != 3) {
5695 /* memory op */
5696 gen_lea_modrm(env, s, modrm);
5697 switch(op) {
5698 case 0x00 ... 0x07: /* fxxxs */
5699 case 0x10 ... 0x17: /* fixxxl */
5700 case 0x20 ... 0x27: /* fxxxl */
5701 case 0x30 ... 0x37: /* fixxx */
5703 int op1;
5704 op1 = op & 7;
5706 switch(op >> 4) {
5707 case 0:
5708 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5709 s->mem_index, MO_LEUL);
5710 gen_helper_flds_FT0(cpu_env, cpu_tmp2_i32);
5711 break;
5712 case 1:
5713 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5714 s->mem_index, MO_LEUL);
5715 gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
5716 break;
5717 case 2:
5718 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
5719 s->mem_index, MO_LEQ);
5720 gen_helper_fldl_FT0(cpu_env, cpu_tmp1_i64);
5721 break;
5722 case 3:
5723 default:
5724 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5725 s->mem_index, MO_LESW);
5726 gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
5727 break;
5730 gen_helper_fp_arith_ST0_FT0(op1);
5731 if (op1 == 3) {
5732 /* fcomp needs pop */
5733 gen_helper_fpop(cpu_env);
5736 break;
5737 case 0x08: /* flds */
5738 case 0x0a: /* fsts */
5739 case 0x0b: /* fstps */
5740 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
5741 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
5742 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
5743 switch(op & 7) {
5744 case 0:
5745 switch(op >> 4) {
5746 case 0:
5747 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5748 s->mem_index, MO_LEUL);
5749 gen_helper_flds_ST0(cpu_env, cpu_tmp2_i32);
5750 break;
5751 case 1:
5752 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5753 s->mem_index, MO_LEUL);
5754 gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
5755 break;
5756 case 2:
5757 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
5758 s->mem_index, MO_LEQ);
5759 gen_helper_fldl_ST0(cpu_env, cpu_tmp1_i64);
5760 break;
5761 case 3:
5762 default:
5763 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5764 s->mem_index, MO_LESW);
5765 gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
5766 break;
5768 break;
5769 case 1:
5770 /* XXX: the corresponding CPUID bit must be tested ! */
5771 switch(op >> 4) {
5772 case 1:
5773 gen_helper_fisttl_ST0(cpu_tmp2_i32, cpu_env);
5774 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5775 s->mem_index, MO_LEUL);
5776 break;
5777 case 2:
5778 gen_helper_fisttll_ST0(cpu_tmp1_i64, cpu_env);
5779 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
5780 s->mem_index, MO_LEQ);
5781 break;
5782 case 3:
5783 default:
5784 gen_helper_fistt_ST0(cpu_tmp2_i32, cpu_env);
5785 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5786 s->mem_index, MO_LEUW);
5787 break;
5789 gen_helper_fpop(cpu_env);
5790 break;
5791 default:
5792 switch(op >> 4) {
5793 case 0:
5794 gen_helper_fsts_ST0(cpu_tmp2_i32, cpu_env);
5795 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5796 s->mem_index, MO_LEUL);
5797 break;
5798 case 1:
5799 gen_helper_fistl_ST0(cpu_tmp2_i32, cpu_env);
5800 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5801 s->mem_index, MO_LEUL);
5802 break;
5803 case 2:
5804 gen_helper_fstl_ST0(cpu_tmp1_i64, cpu_env);
5805 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
5806 s->mem_index, MO_LEQ);
5807 break;
5808 case 3:
5809 default:
5810 gen_helper_fist_ST0(cpu_tmp2_i32, cpu_env);
5811 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5812 s->mem_index, MO_LEUW);
5813 break;
5815 if ((op & 7) == 3)
5816 gen_helper_fpop(cpu_env);
5817 break;
5819 break;
5820 case 0x0c: /* fldenv mem */
5821 gen_helper_fldenv(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
5822 break;
5823 case 0x0d: /* fldcw mem */
5824 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5825 s->mem_index, MO_LEUW);
5826 gen_helper_fldcw(cpu_env, cpu_tmp2_i32);
5827 break;
5828 case 0x0e: /* fnstenv mem */
5829 gen_helper_fstenv(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
5830 break;
5831 case 0x0f: /* fnstcw mem */
5832 gen_helper_fnstcw(cpu_tmp2_i32, cpu_env);
5833 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5834 s->mem_index, MO_LEUW);
5835 break;
5836 case 0x1d: /* fldt mem */
5837 gen_helper_fldt_ST0(cpu_env, cpu_A0);
5838 break;
5839 case 0x1f: /* fstpt mem */
5840 gen_helper_fstt_ST0(cpu_env, cpu_A0);
5841 gen_helper_fpop(cpu_env);
5842 break;
5843 case 0x2c: /* frstor mem */
5844 gen_helper_frstor(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
5845 break;
5846 case 0x2e: /* fnsave mem */
5847 gen_helper_fsave(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
5848 break;
5849 case 0x2f: /* fnstsw mem */
5850 gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
5851 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5852 s->mem_index, MO_LEUW);
5853 break;
5854 case 0x3c: /* fbld */
5855 gen_helper_fbld_ST0(cpu_env, cpu_A0);
5856 break;
5857 case 0x3e: /* fbstp */
5858 gen_helper_fbst_ST0(cpu_env, cpu_A0);
5859 gen_helper_fpop(cpu_env);
5860 break;
5861 case 0x3d: /* fildll */
5862 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
5863 gen_helper_fildll_ST0(cpu_env, cpu_tmp1_i64);
5864 break;
5865 case 0x3f: /* fistpll */
5866 gen_helper_fistll_ST0(cpu_tmp1_i64, cpu_env);
5867 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
5868 gen_helper_fpop(cpu_env);
5869 break;
5870 default:
5871 goto illegal_op;
5873 } else {
5874 /* register float ops */
5875 opreg = rm;
5877 switch(op) {
5878 case 0x08: /* fld sti */
5879 gen_helper_fpush(cpu_env);
5880 gen_helper_fmov_ST0_STN(cpu_env,
5881 tcg_const_i32((opreg + 1) & 7));
5882 break;
5883 case 0x09: /* fxchg sti */
5884 case 0x29: /* fxchg4 sti, undocumented op */
5885 case 0x39: /* fxchg7 sti, undocumented op */
5886 gen_helper_fxchg_ST0_STN(cpu_env, tcg_const_i32(opreg));
5887 break;
5888 case 0x0a: /* grp d9/2 */
5889 switch(rm) {
5890 case 0: /* fnop */
5891 /* check exceptions (FreeBSD FPU probe) */
5892 gen_helper_fwait(cpu_env);
5893 break;
5894 default:
5895 goto illegal_op;
5897 break;
5898 case 0x0c: /* grp d9/4 */
5899 switch(rm) {
5900 case 0: /* fchs */
5901 gen_helper_fchs_ST0(cpu_env);
5902 break;
5903 case 1: /* fabs */
5904 gen_helper_fabs_ST0(cpu_env);
5905 break;
5906 case 4: /* ftst */
5907 gen_helper_fldz_FT0(cpu_env);
5908 gen_helper_fcom_ST0_FT0(cpu_env);
5909 break;
5910 case 5: /* fxam */
5911 gen_helper_fxam_ST0(cpu_env);
5912 break;
5913 default:
5914 goto illegal_op;
5916 break;
5917 case 0x0d: /* grp d9/5 */
5919 switch(rm) {
5920 case 0:
5921 gen_helper_fpush(cpu_env);
5922 gen_helper_fld1_ST0(cpu_env);
5923 break;
5924 case 1:
5925 gen_helper_fpush(cpu_env);
5926 gen_helper_fldl2t_ST0(cpu_env);
5927 break;
5928 case 2:
5929 gen_helper_fpush(cpu_env);
5930 gen_helper_fldl2e_ST0(cpu_env);
5931 break;
5932 case 3:
5933 gen_helper_fpush(cpu_env);
5934 gen_helper_fldpi_ST0(cpu_env);
5935 break;
5936 case 4:
5937 gen_helper_fpush(cpu_env);
5938 gen_helper_fldlg2_ST0(cpu_env);
5939 break;
5940 case 5:
5941 gen_helper_fpush(cpu_env);
5942 gen_helper_fldln2_ST0(cpu_env);
5943 break;
5944 case 6:
5945 gen_helper_fpush(cpu_env);
5946 gen_helper_fldz_ST0(cpu_env);
5947 break;
5948 default:
5949 goto illegal_op;
5952 break;
5953 case 0x0e: /* grp d9/6 */
5954 switch(rm) {
5955 case 0: /* f2xm1 */
5956 gen_helper_f2xm1(cpu_env);
5957 break;
5958 case 1: /* fyl2x */
5959 gen_helper_fyl2x(cpu_env);
5960 break;
5961 case 2: /* fptan */
5962 gen_helper_fptan(cpu_env);
5963 break;
5964 case 3: /* fpatan */
5965 gen_helper_fpatan(cpu_env);
5966 break;
5967 case 4: /* fxtract */
5968 gen_helper_fxtract(cpu_env);
5969 break;
5970 case 5: /* fprem1 */
5971 gen_helper_fprem1(cpu_env);
5972 break;
5973 case 6: /* fdecstp */
5974 gen_helper_fdecstp(cpu_env);
5975 break;
5976 default:
5977 case 7: /* fincstp */
5978 gen_helper_fincstp(cpu_env);
5979 break;
5981 break;
5982 case 0x0f: /* grp d9/7 */
5983 switch(rm) {
5984 case 0: /* fprem */
5985 gen_helper_fprem(cpu_env);
5986 break;
5987 case 1: /* fyl2xp1 */
5988 gen_helper_fyl2xp1(cpu_env);
5989 break;
5990 case 2: /* fsqrt */
5991 gen_helper_fsqrt(cpu_env);
5992 break;
5993 case 3: /* fsincos */
5994 gen_helper_fsincos(cpu_env);
5995 break;
5996 case 5: /* fscale */
5997 gen_helper_fscale(cpu_env);
5998 break;
5999 case 4: /* frndint */
6000 gen_helper_frndint(cpu_env);
6001 break;
6002 case 6: /* fsin */
6003 gen_helper_fsin(cpu_env);
6004 break;
6005 default:
6006 case 7: /* fcos */
6007 gen_helper_fcos(cpu_env);
6008 break;
6010 break;
6011 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
6012 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
6013 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
6015 int op1;
6017 op1 = op & 7;
6018 if (op >= 0x20) {
6019 gen_helper_fp_arith_STN_ST0(op1, opreg);
6020 if (op >= 0x30)
6021 gen_helper_fpop(cpu_env);
6022 } else {
6023 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6024 gen_helper_fp_arith_ST0_FT0(op1);
6027 break;
6028 case 0x02: /* fcom */
6029 case 0x22: /* fcom2, undocumented op */
6030 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6031 gen_helper_fcom_ST0_FT0(cpu_env);
6032 break;
6033 case 0x03: /* fcomp */
6034 case 0x23: /* fcomp3, undocumented op */
6035 case 0x32: /* fcomp5, undocumented op */
6036 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6037 gen_helper_fcom_ST0_FT0(cpu_env);
6038 gen_helper_fpop(cpu_env);
6039 break;
6040 case 0x15: /* da/5 */
6041 switch(rm) {
6042 case 1: /* fucompp */
6043 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
6044 gen_helper_fucom_ST0_FT0(cpu_env);
6045 gen_helper_fpop(cpu_env);
6046 gen_helper_fpop(cpu_env);
6047 break;
6048 default:
6049 goto illegal_op;
6051 break;
6052 case 0x1c:
6053 switch(rm) {
6054 case 0: /* feni (287 only, just do nop here) */
6055 break;
6056 case 1: /* fdisi (287 only, just do nop here) */
6057 break;
6058 case 2: /* fclex */
6059 gen_helper_fclex(cpu_env);
6060 break;
6061 case 3: /* fninit */
6062 gen_helper_fninit(cpu_env);
6063 break;
6064 case 4: /* fsetpm (287 only, just do nop here) */
6065 break;
6066 default:
6067 goto illegal_op;
6069 break;
6070 case 0x1d: /* fucomi */
6071 if (!(s->cpuid_features & CPUID_CMOV)) {
6072 goto illegal_op;
6074 gen_update_cc_op(s);
6075 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6076 gen_helper_fucomi_ST0_FT0(cpu_env);
6077 set_cc_op(s, CC_OP_EFLAGS);
6078 break;
6079 case 0x1e: /* fcomi */
6080 if (!(s->cpuid_features & CPUID_CMOV)) {
6081 goto illegal_op;
6083 gen_update_cc_op(s);
6084 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6085 gen_helper_fcomi_ST0_FT0(cpu_env);
6086 set_cc_op(s, CC_OP_EFLAGS);
6087 break;
6088 case 0x28: /* ffree sti */
6089 gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
6090 break;
6091 case 0x2a: /* fst sti */
6092 gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
6093 break;
6094 case 0x2b: /* fstp sti */
6095 case 0x0b: /* fstp1 sti, undocumented op */
6096 case 0x3a: /* fstp8 sti, undocumented op */
6097 case 0x3b: /* fstp9 sti, undocumented op */
6098 gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
6099 gen_helper_fpop(cpu_env);
6100 break;
6101 case 0x2c: /* fucom st(i) */
6102 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6103 gen_helper_fucom_ST0_FT0(cpu_env);
6104 break;
6105 case 0x2d: /* fucomp st(i) */
6106 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6107 gen_helper_fucom_ST0_FT0(cpu_env);
6108 gen_helper_fpop(cpu_env);
6109 break;
6110 case 0x33: /* de/3 */
6111 switch(rm) {
6112 case 1: /* fcompp */
6113 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
6114 gen_helper_fcom_ST0_FT0(cpu_env);
6115 gen_helper_fpop(cpu_env);
6116 gen_helper_fpop(cpu_env);
6117 break;
6118 default:
6119 goto illegal_op;
6121 break;
6122 case 0x38: /* ffreep sti, undocumented op */
6123 gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
6124 gen_helper_fpop(cpu_env);
6125 break;
6126 case 0x3c: /* df/4 */
6127 switch(rm) {
6128 case 0:
6129 gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
6130 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
6131 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T[0]);
6132 break;
6133 default:
6134 goto illegal_op;
6136 break;
6137 case 0x3d: /* fucomip */
6138 if (!(s->cpuid_features & CPUID_CMOV)) {
6139 goto illegal_op;
6141 gen_update_cc_op(s);
6142 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6143 gen_helper_fucomi_ST0_FT0(cpu_env);
6144 gen_helper_fpop(cpu_env);
6145 set_cc_op(s, CC_OP_EFLAGS);
6146 break;
6147 case 0x3e: /* fcomip */
6148 if (!(s->cpuid_features & CPUID_CMOV)) {
6149 goto illegal_op;
6151 gen_update_cc_op(s);
6152 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6153 gen_helper_fcomi_ST0_FT0(cpu_env);
6154 gen_helper_fpop(cpu_env);
6155 set_cc_op(s, CC_OP_EFLAGS);
6156 break;
6157 case 0x10 ... 0x13: /* fcmovxx */
6158 case 0x18 ... 0x1b:
6160 int op1;
6161 TCGLabel *l1;
6162 static const uint8_t fcmov_cc[8] = {
6163 (JCC_B << 1),
6164 (JCC_Z << 1),
6165 (JCC_BE << 1),
6166 (JCC_P << 1),
6169 if (!(s->cpuid_features & CPUID_CMOV)) {
6170 goto illegal_op;
6172 op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
6173 l1 = gen_new_label();
6174 gen_jcc1_noeob(s, op1, l1);
6175 gen_helper_fmov_ST0_STN(cpu_env, tcg_const_i32(opreg));
6176 gen_set_label(l1);
6178 break;
6179 default:
6180 goto illegal_op;
6183 break;
6184 /************************/
6185 /* string ops */
6187 case 0xa4: /* movsS */
6188 case 0xa5:
6189 ot = mo_b_d(b, dflag);
6190 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6191 gen_repz_movs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6192 } else {
6193 gen_movs(s, ot);
6195 break;
6197 case 0xaa: /* stosS */
6198 case 0xab:
6199 ot = mo_b_d(b, dflag);
6200 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6201 gen_repz_stos(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6202 } else {
6203 gen_stos(s, ot);
6205 break;
6206 case 0xac: /* lodsS */
6207 case 0xad:
6208 ot = mo_b_d(b, dflag);
6209 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6210 gen_repz_lods(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6211 } else {
6212 gen_lods(s, ot);
6214 break;
6215 case 0xae: /* scasS */
6216 case 0xaf:
6217 ot = mo_b_d(b, dflag);
6218 if (prefixes & PREFIX_REPNZ) {
6219 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6220 } else if (prefixes & PREFIX_REPZ) {
6221 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6222 } else {
6223 gen_scas(s, ot);
6225 break;
6227 case 0xa6: /* cmpsS */
6228 case 0xa7:
6229 ot = mo_b_d(b, dflag);
6230 if (prefixes & PREFIX_REPNZ) {
6231 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6232 } else if (prefixes & PREFIX_REPZ) {
6233 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6234 } else {
6235 gen_cmps(s, ot);
6237 break;
6238 case 0x6c: /* insS */
6239 case 0x6d:
6240 ot = mo_b_d32(b, dflag);
6241 tcg_gen_ext16u_tl(cpu_T[0], cpu_regs[R_EDX]);
6242 gen_check_io(s, ot, pc_start - s->cs_base,
6243 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4);
6244 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6245 gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6246 } else {
6247 gen_ins(s, ot);
6248 if (s->tb->cflags & CF_USE_ICOUNT) {
6249 gen_jmp(s, s->pc - s->cs_base);
6252 break;
6253 case 0x6e: /* outsS */
6254 case 0x6f:
6255 ot = mo_b_d32(b, dflag);
6256 tcg_gen_ext16u_tl(cpu_T[0], cpu_regs[R_EDX]);
6257 gen_check_io(s, ot, pc_start - s->cs_base,
6258 svm_is_rep(prefixes) | 4);
6259 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6260 gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6261 } else {
6262 gen_outs(s, ot);
6263 if (s->tb->cflags & CF_USE_ICOUNT) {
6264 gen_jmp(s, s->pc - s->cs_base);
6267 break;
6269 /************************/
6270 /* port I/O */
6272 case 0xe4:
6273 case 0xe5:
6274 ot = mo_b_d32(b, dflag);
6275 val = cpu_ldub_code(env, s->pc++);
6276 tcg_gen_movi_tl(cpu_T[0], val);
6277 gen_check_io(s, ot, pc_start - s->cs_base,
6278 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
6279 if (s->tb->cflags & CF_USE_ICOUNT) {
6280 gen_io_start();
6282 tcg_gen_movi_i32(cpu_tmp2_i32, val);
6283 gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
6284 gen_op_mov_reg_v(ot, R_EAX, cpu_T[1]);
6285 gen_bpt_io(s, cpu_tmp2_i32, ot);
6286 if (s->tb->cflags & CF_USE_ICOUNT) {
6287 gen_io_end();
6288 gen_jmp(s, s->pc - s->cs_base);
6290 break;
6291 case 0xe6:
6292 case 0xe7:
6293 ot = mo_b_d32(b, dflag);
6294 val = cpu_ldub_code(env, s->pc++);
6295 tcg_gen_movi_tl(cpu_T[0], val);
6296 gen_check_io(s, ot, pc_start - s->cs_base,
6297 svm_is_rep(prefixes));
6298 gen_op_mov_v_reg(ot, cpu_T[1], R_EAX);
6300 if (s->tb->cflags & CF_USE_ICOUNT) {
6301 gen_io_start();
6303 tcg_gen_movi_i32(cpu_tmp2_i32, val);
6304 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
6305 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
6306 gen_bpt_io(s, cpu_tmp2_i32, ot);
6307 if (s->tb->cflags & CF_USE_ICOUNT) {
6308 gen_io_end();
6309 gen_jmp(s, s->pc - s->cs_base);
6311 break;
6312 case 0xec:
6313 case 0xed:
6314 ot = mo_b_d32(b, dflag);
6315 tcg_gen_ext16u_tl(cpu_T[0], cpu_regs[R_EDX]);
6316 gen_check_io(s, ot, pc_start - s->cs_base,
6317 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
6318 if (s->tb->cflags & CF_USE_ICOUNT) {
6319 gen_io_start();
6321 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6322 gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
6323 gen_op_mov_reg_v(ot, R_EAX, cpu_T[1]);
6324 gen_bpt_io(s, cpu_tmp2_i32, ot);
6325 if (s->tb->cflags & CF_USE_ICOUNT) {
6326 gen_io_end();
6327 gen_jmp(s, s->pc - s->cs_base);
6329 break;
6330 case 0xee:
6331 case 0xef:
6332 ot = mo_b_d32(b, dflag);
6333 tcg_gen_ext16u_tl(cpu_T[0], cpu_regs[R_EDX]);
6334 gen_check_io(s, ot, pc_start - s->cs_base,
6335 svm_is_rep(prefixes));
6336 gen_op_mov_v_reg(ot, cpu_T[1], R_EAX);
6338 if (s->tb->cflags & CF_USE_ICOUNT) {
6339 gen_io_start();
6341 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6342 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
6343 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
6344 gen_bpt_io(s, cpu_tmp2_i32, ot);
6345 if (s->tb->cflags & CF_USE_ICOUNT) {
6346 gen_io_end();
6347 gen_jmp(s, s->pc - s->cs_base);
6349 break;
6351 /************************/
6352 /* control */
6353 case 0xc2: /* ret im */
6354 val = cpu_ldsw_code(env, s->pc);
6355 s->pc += 2;
6356 ot = gen_pop_T0(s);
6357 gen_stack_update(s, val + (1 << ot));
6358 /* Note that gen_pop_T0 uses a zero-extending load. */
6359 gen_op_jmp_v(cpu_T[0]);
6360 gen_eob(s);
6361 break;
6362 case 0xc3: /* ret */
6363 ot = gen_pop_T0(s);
6364 gen_pop_update(s, ot);
6365 /* Note that gen_pop_T0 uses a zero-extending load. */
6366 gen_op_jmp_v(cpu_T[0]);
6367 gen_eob(s);
6368 break;
6369 case 0xca: /* lret im */
6370 val = cpu_ldsw_code(env, s->pc);
6371 s->pc += 2;
6372 do_lret:
6373 if (s->pe && !s->vm86) {
6374 gen_update_cc_op(s);
6375 gen_jmp_im(pc_start - s->cs_base);
6376 gen_helper_lret_protected(cpu_env, tcg_const_i32(dflag - 1),
6377 tcg_const_i32(val));
6378 } else {
6379 gen_stack_A0(s);
6380 /* pop offset */
6381 gen_op_ld_v(s, dflag, cpu_T[0], cpu_A0);
6382 /* NOTE: keeping EIP updated is not a problem in case of
6383 exception */
6384 gen_op_jmp_v(cpu_T[0]);
6385 /* pop selector */
6386 gen_op_addl_A0_im(1 << dflag);
6387 gen_op_ld_v(s, dflag, cpu_T[0], cpu_A0);
6388 gen_op_movl_seg_T0_vm(R_CS);
6389 /* add stack offset */
6390 gen_stack_update(s, val + (2 << dflag));
6392 gen_eob(s);
6393 break;
6394 case 0xcb: /* lret */
6395 val = 0;
6396 goto do_lret;
6397 case 0xcf: /* iret */
6398 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET);
6399 if (!s->pe) {
6400 /* real mode */
6401 gen_helper_iret_real(cpu_env, tcg_const_i32(dflag - 1));
6402 set_cc_op(s, CC_OP_EFLAGS);
6403 } else if (s->vm86) {
6404 if (s->iopl != 3) {
6405 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6406 } else {
6407 gen_helper_iret_real(cpu_env, tcg_const_i32(dflag - 1));
6408 set_cc_op(s, CC_OP_EFLAGS);
6410 } else {
6411 gen_helper_iret_protected(cpu_env, tcg_const_i32(dflag - 1),
6412 tcg_const_i32(s->pc - s->cs_base));
6413 set_cc_op(s, CC_OP_EFLAGS);
6415 gen_eob(s);
6416 break;
6417 case 0xe8: /* call im */
6419 if (dflag != MO_16) {
6420 tval = (int32_t)insn_get(env, s, MO_32);
6421 } else {
6422 tval = (int16_t)insn_get(env, s, MO_16);
6424 next_eip = s->pc - s->cs_base;
6425 tval += next_eip;
6426 if (dflag == MO_16) {
6427 tval &= 0xffff;
6428 } else if (!CODE64(s)) {
6429 tval &= 0xffffffff;
6431 tcg_gen_movi_tl(cpu_T[0], next_eip);
6432 gen_push_v(s, cpu_T[0]);
6433 gen_jmp(s, tval);
6435 break;
6436 case 0x9a: /* lcall im */
6438 unsigned int selector, offset;
6440 if (CODE64(s))
6441 goto illegal_op;
6442 ot = dflag;
6443 offset = insn_get(env, s, ot);
6444 selector = insn_get(env, s, MO_16);
6446 tcg_gen_movi_tl(cpu_T[0], selector);
6447 tcg_gen_movi_tl(cpu_T[1], offset);
6449 goto do_lcall;
6450 case 0xe9: /* jmp im */
6451 if (dflag != MO_16) {
6452 tval = (int32_t)insn_get(env, s, MO_32);
6453 } else {
6454 tval = (int16_t)insn_get(env, s, MO_16);
6456 tval += s->pc - s->cs_base;
6457 if (dflag == MO_16) {
6458 tval &= 0xffff;
6459 } else if (!CODE64(s)) {
6460 tval &= 0xffffffff;
6462 gen_jmp(s, tval);
6463 break;
6464 case 0xea: /* ljmp im */
6466 unsigned int selector, offset;
6468 if (CODE64(s))
6469 goto illegal_op;
6470 ot = dflag;
6471 offset = insn_get(env, s, ot);
6472 selector = insn_get(env, s, MO_16);
6474 tcg_gen_movi_tl(cpu_T[0], selector);
6475 tcg_gen_movi_tl(cpu_T[1], offset);
6477 goto do_ljmp;
6478 case 0xeb: /* jmp Jb */
6479 tval = (int8_t)insn_get(env, s, MO_8);
6480 tval += s->pc - s->cs_base;
6481 if (dflag == MO_16) {
6482 tval &= 0xffff;
6484 gen_jmp(s, tval);
6485 break;
6486 case 0x70 ... 0x7f: /* jcc Jb */
6487 tval = (int8_t)insn_get(env, s, MO_8);
6488 goto do_jcc;
6489 case 0x180 ... 0x18f: /* jcc Jv */
6490 if (dflag != MO_16) {
6491 tval = (int32_t)insn_get(env, s, MO_32);
6492 } else {
6493 tval = (int16_t)insn_get(env, s, MO_16);
6495 do_jcc:
6496 next_eip = s->pc - s->cs_base;
6497 tval += next_eip;
6498 if (dflag == MO_16) {
6499 tval &= 0xffff;
6501 gen_jcc(s, b, tval, next_eip);
6502 break;
6504 case 0x190 ... 0x19f: /* setcc Gv */
6505 modrm = cpu_ldub_code(env, s->pc++);
6506 gen_setcc1(s, b, cpu_T[0]);
6507 gen_ldst_modrm(env, s, modrm, MO_8, OR_TMP0, 1);
6508 break;
6509 case 0x140 ... 0x14f: /* cmov Gv, Ev */
6510 if (!(s->cpuid_features & CPUID_CMOV)) {
6511 goto illegal_op;
6513 ot = dflag;
6514 modrm = cpu_ldub_code(env, s->pc++);
6515 reg = ((modrm >> 3) & 7) | rex_r;
6516 gen_cmovcc1(env, s, ot, b, modrm, reg);
6517 break;
6519 /************************/
6520 /* flags */
6521 case 0x9c: /* pushf */
6522 gen_svm_check_intercept(s, pc_start, SVM_EXIT_PUSHF);
6523 if (s->vm86 && s->iopl != 3) {
6524 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6525 } else {
6526 gen_update_cc_op(s);
6527 gen_helper_read_eflags(cpu_T[0], cpu_env);
6528 gen_push_v(s, cpu_T[0]);
6530 break;
6531 case 0x9d: /* popf */
6532 gen_svm_check_intercept(s, pc_start, SVM_EXIT_POPF);
6533 if (s->vm86 && s->iopl != 3) {
6534 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6535 } else {
6536 ot = gen_pop_T0(s);
6537 if (s->cpl == 0) {
6538 if (dflag != MO_16) {
6539 gen_helper_write_eflags(cpu_env, cpu_T[0],
6540 tcg_const_i32((TF_MASK | AC_MASK |
6541 ID_MASK | NT_MASK |
6542 IF_MASK |
6543 IOPL_MASK)));
6544 } else {
6545 gen_helper_write_eflags(cpu_env, cpu_T[0],
6546 tcg_const_i32((TF_MASK | AC_MASK |
6547 ID_MASK | NT_MASK |
6548 IF_MASK | IOPL_MASK)
6549 & 0xffff));
6551 } else {
6552 if (s->cpl <= s->iopl) {
6553 if (dflag != MO_16) {
6554 gen_helper_write_eflags(cpu_env, cpu_T[0],
6555 tcg_const_i32((TF_MASK |
6556 AC_MASK |
6557 ID_MASK |
6558 NT_MASK |
6559 IF_MASK)));
6560 } else {
6561 gen_helper_write_eflags(cpu_env, cpu_T[0],
6562 tcg_const_i32((TF_MASK |
6563 AC_MASK |
6564 ID_MASK |
6565 NT_MASK |
6566 IF_MASK)
6567 & 0xffff));
6569 } else {
6570 if (dflag != MO_16) {
6571 gen_helper_write_eflags(cpu_env, cpu_T[0],
6572 tcg_const_i32((TF_MASK | AC_MASK |
6573 ID_MASK | NT_MASK)));
6574 } else {
6575 gen_helper_write_eflags(cpu_env, cpu_T[0],
6576 tcg_const_i32((TF_MASK | AC_MASK |
6577 ID_MASK | NT_MASK)
6578 & 0xffff));
6582 gen_pop_update(s, ot);
6583 set_cc_op(s, CC_OP_EFLAGS);
6584 /* abort translation because TF/AC flag may change */
6585 gen_jmp_im(s->pc - s->cs_base);
6586 gen_eob(s);
6588 break;
6589 case 0x9e: /* sahf */
6590 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
6591 goto illegal_op;
6592 gen_op_mov_v_reg(MO_8, cpu_T[0], R_AH);
6593 gen_compute_eflags(s);
6594 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O);
6595 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], CC_S | CC_Z | CC_A | CC_P | CC_C);
6596 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T[0]);
6597 break;
6598 case 0x9f: /* lahf */
6599 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
6600 goto illegal_op;
6601 gen_compute_eflags(s);
6602 /* Note: gen_compute_eflags() only gives the condition codes */
6603 tcg_gen_ori_tl(cpu_T[0], cpu_cc_src, 0x02);
6604 gen_op_mov_reg_v(MO_8, R_AH, cpu_T[0]);
6605 break;
6606 case 0xf5: /* cmc */
6607 gen_compute_eflags(s);
6608 tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C);
6609 break;
6610 case 0xf8: /* clc */
6611 gen_compute_eflags(s);
6612 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C);
6613 break;
6614 case 0xf9: /* stc */
6615 gen_compute_eflags(s);
6616 tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C);
6617 break;
6618 case 0xfc: /* cld */
6619 tcg_gen_movi_i32(cpu_tmp2_i32, 1);
6620 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
6621 break;
6622 case 0xfd: /* std */
6623 tcg_gen_movi_i32(cpu_tmp2_i32, -1);
6624 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
6625 break;
6627 /************************/
6628 /* bit operations */
6629 case 0x1ba: /* bt/bts/btr/btc Gv, im */
6630 ot = dflag;
6631 modrm = cpu_ldub_code(env, s->pc++);
6632 op = (modrm >> 3) & 7;
6633 mod = (modrm >> 6) & 3;
6634 rm = (modrm & 7) | REX_B(s);
6635 if (mod != 3) {
6636 s->rip_offset = 1;
6637 gen_lea_modrm(env, s, modrm);
6638 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
6639 } else {
6640 gen_op_mov_v_reg(ot, cpu_T[0], rm);
6642 /* load shift */
6643 val = cpu_ldub_code(env, s->pc++);
6644 tcg_gen_movi_tl(cpu_T[1], val);
6645 if (op < 4)
6646 goto illegal_op;
6647 op -= 4;
6648 goto bt_op;
6649 case 0x1a3: /* bt Gv, Ev */
6650 op = 0;
6651 goto do_btx;
6652 case 0x1ab: /* bts */
6653 op = 1;
6654 goto do_btx;
6655 case 0x1b3: /* btr */
6656 op = 2;
6657 goto do_btx;
6658 case 0x1bb: /* btc */
6659 op = 3;
6660 do_btx:
6661 ot = dflag;
6662 modrm = cpu_ldub_code(env, s->pc++);
6663 reg = ((modrm >> 3) & 7) | rex_r;
6664 mod = (modrm >> 6) & 3;
6665 rm = (modrm & 7) | REX_B(s);
6666 gen_op_mov_v_reg(MO_32, cpu_T[1], reg);
6667 if (mod != 3) {
6668 gen_lea_modrm(env, s, modrm);
6669 /* specific case: we need to add a displacement */
6670 gen_exts(ot, cpu_T[1]);
6671 tcg_gen_sari_tl(cpu_tmp0, cpu_T[1], 3 + ot);
6672 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, ot);
6673 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
6674 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
6675 } else {
6676 gen_op_mov_v_reg(ot, cpu_T[0], rm);
6678 bt_op:
6679 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], (1 << (3 + ot)) - 1);
6680 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6681 switch(op) {
6682 case 0:
6683 break;
6684 case 1:
6685 tcg_gen_movi_tl(cpu_tmp0, 1);
6686 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6687 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6688 break;
6689 case 2:
6690 tcg_gen_movi_tl(cpu_tmp0, 1);
6691 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6692 tcg_gen_andc_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6693 break;
6694 default:
6695 case 3:
6696 tcg_gen_movi_tl(cpu_tmp0, 1);
6697 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6698 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6699 break;
6701 if (op != 0) {
6702 if (mod != 3) {
6703 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
6704 } else {
6705 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
6709 /* Delay all CC updates until after the store above. Note that
6710 C is the result of the test, Z is unchanged, and the others
6711 are all undefined. */
6712 switch (s->cc_op) {
6713 case CC_OP_MULB ... CC_OP_MULQ:
6714 case CC_OP_ADDB ... CC_OP_ADDQ:
6715 case CC_OP_ADCB ... CC_OP_ADCQ:
6716 case CC_OP_SUBB ... CC_OP_SUBQ:
6717 case CC_OP_SBBB ... CC_OP_SBBQ:
6718 case CC_OP_LOGICB ... CC_OP_LOGICQ:
6719 case CC_OP_INCB ... CC_OP_INCQ:
6720 case CC_OP_DECB ... CC_OP_DECQ:
6721 case CC_OP_SHLB ... CC_OP_SHLQ:
6722 case CC_OP_SARB ... CC_OP_SARQ:
6723 case CC_OP_BMILGB ... CC_OP_BMILGQ:
6724 /* Z was going to be computed from the non-zero status of CC_DST.
6725 We can get that same Z value (and the new C value) by leaving
6726 CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the
6727 same width. */
6728 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
6729 set_cc_op(s, ((s->cc_op - CC_OP_MULB) & 3) + CC_OP_SARB);
6730 break;
6731 default:
6732 /* Otherwise, generate EFLAGS and replace the C bit. */
6733 gen_compute_eflags(s);
6734 tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, cpu_tmp4,
6735 ctz32(CC_C), 1);
6736 break;
6738 break;
6739 case 0x1bc: /* bsf / tzcnt */
6740 case 0x1bd: /* bsr / lzcnt */
6741 ot = dflag;
6742 modrm = cpu_ldub_code(env, s->pc++);
6743 reg = ((modrm >> 3) & 7) | rex_r;
6744 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
6745 gen_extu(ot, cpu_T[0]);
6747 /* Note that lzcnt and tzcnt are in different extensions. */
6748 if ((prefixes & PREFIX_REPZ)
6749 && (b & 1
6750 ? s->cpuid_ext3_features & CPUID_EXT3_ABM
6751 : s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)) {
6752 int size = 8 << ot;
6753 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
6754 if (b & 1) {
6755 /* For lzcnt, reduce the target_ulong result by the
6756 number of zeros that we expect to find at the top. */
6757 gen_helper_clz(cpu_T[0], cpu_T[0]);
6758 tcg_gen_subi_tl(cpu_T[0], cpu_T[0], TARGET_LONG_BITS - size);
6759 } else {
6760 /* For tzcnt, a zero input must return the operand size:
6761 force all bits outside the operand size to 1. */
6762 target_ulong mask = (target_ulong)-2 << (size - 1);
6763 tcg_gen_ori_tl(cpu_T[0], cpu_T[0], mask);
6764 gen_helper_ctz(cpu_T[0], cpu_T[0]);
6766 /* For lzcnt/tzcnt, C and Z bits are defined and are
6767 related to the result. */
6768 gen_op_update1_cc();
6769 set_cc_op(s, CC_OP_BMILGB + ot);
6770 } else {
6771 /* For bsr/bsf, only the Z bit is defined and it is related
6772 to the input and not the result. */
6773 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
6774 set_cc_op(s, CC_OP_LOGICB + ot);
6775 if (b & 1) {
6776 /* For bsr, return the bit index of the first 1 bit,
6777 not the count of leading zeros. */
6778 gen_helper_clz(cpu_T[0], cpu_T[0]);
6779 tcg_gen_xori_tl(cpu_T[0], cpu_T[0], TARGET_LONG_BITS - 1);
6780 } else {
6781 gen_helper_ctz(cpu_T[0], cpu_T[0]);
6783 /* ??? The manual says that the output is undefined when the
6784 input is zero, but real hardware leaves it unchanged, and
6785 real programs appear to depend on that. */
6786 tcg_gen_movi_tl(cpu_tmp0, 0);
6787 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_T[0], cpu_cc_dst, cpu_tmp0,
6788 cpu_regs[reg], cpu_T[0]);
6790 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
6791 break;
6792 /************************/
6793 /* bcd */
6794 case 0x27: /* daa */
6795 if (CODE64(s))
6796 goto illegal_op;
6797 gen_update_cc_op(s);
6798 gen_helper_daa(cpu_env);
6799 set_cc_op(s, CC_OP_EFLAGS);
6800 break;
6801 case 0x2f: /* das */
6802 if (CODE64(s))
6803 goto illegal_op;
6804 gen_update_cc_op(s);
6805 gen_helper_das(cpu_env);
6806 set_cc_op(s, CC_OP_EFLAGS);
6807 break;
6808 case 0x37: /* aaa */
6809 if (CODE64(s))
6810 goto illegal_op;
6811 gen_update_cc_op(s);
6812 gen_helper_aaa(cpu_env);
6813 set_cc_op(s, CC_OP_EFLAGS);
6814 break;
6815 case 0x3f: /* aas */
6816 if (CODE64(s))
6817 goto illegal_op;
6818 gen_update_cc_op(s);
6819 gen_helper_aas(cpu_env);
6820 set_cc_op(s, CC_OP_EFLAGS);
6821 break;
6822 case 0xd4: /* aam */
6823 if (CODE64(s))
6824 goto illegal_op;
6825 val = cpu_ldub_code(env, s->pc++);
6826 if (val == 0) {
6827 gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base);
6828 } else {
6829 gen_helper_aam(cpu_env, tcg_const_i32(val));
6830 set_cc_op(s, CC_OP_LOGICB);
6832 break;
6833 case 0xd5: /* aad */
6834 if (CODE64(s))
6835 goto illegal_op;
6836 val = cpu_ldub_code(env, s->pc++);
6837 gen_helper_aad(cpu_env, tcg_const_i32(val));
6838 set_cc_op(s, CC_OP_LOGICB);
6839 break;
6840 /************************/
6841 /* misc */
6842 case 0x90: /* nop */
6843 /* XXX: correct lock test for all insn */
6844 if (prefixes & PREFIX_LOCK) {
6845 goto illegal_op;
6847 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
6848 if (REX_B(s)) {
6849 goto do_xchg_reg_eax;
6851 if (prefixes & PREFIX_REPZ) {
6852 gen_update_cc_op(s);
6853 gen_jmp_im(pc_start - s->cs_base);
6854 gen_helper_pause(cpu_env, tcg_const_i32(s->pc - pc_start));
6855 s->is_jmp = DISAS_TB_JUMP;
6857 break;
6858 case 0x9b: /* fwait */
6859 if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) ==
6860 (HF_MP_MASK | HF_TS_MASK)) {
6861 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
6862 } else {
6863 gen_helper_fwait(cpu_env);
6865 break;
6866 case 0xcc: /* int3 */
6867 gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base);
6868 break;
6869 case 0xcd: /* int N */
6870 val = cpu_ldub_code(env, s->pc++);
6871 if (s->vm86 && s->iopl != 3) {
6872 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6873 } else {
6874 gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base);
6876 break;
6877 case 0xce: /* into */
6878 if (CODE64(s))
6879 goto illegal_op;
6880 gen_update_cc_op(s);
6881 gen_jmp_im(pc_start - s->cs_base);
6882 gen_helper_into(cpu_env, tcg_const_i32(s->pc - pc_start));
6883 break;
6884 #ifdef WANT_ICEBP
6885 case 0xf1: /* icebp (undocumented, exits to external debugger) */
6886 gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP);
6887 #if 1
6888 gen_debug(s, pc_start - s->cs_base);
6889 #else
6890 /* start debug */
6891 tb_flush(CPU(x86_env_get_cpu(env)));
6892 qemu_set_log(CPU_LOG_INT | CPU_LOG_TB_IN_ASM);
6893 #endif
6894 break;
6895 #endif
6896 case 0xfa: /* cli */
6897 if (!s->vm86) {
6898 if (s->cpl <= s->iopl) {
6899 gen_helper_cli(cpu_env);
6900 } else {
6901 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6903 } else {
6904 if (s->iopl == 3) {
6905 gen_helper_cli(cpu_env);
6906 } else {
6907 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6910 break;
6911 case 0xfb: /* sti */
6912 if (!s->vm86) {
6913 if (s->cpl <= s->iopl) {
6914 gen_sti:
6915 gen_helper_sti(cpu_env);
6916 /* interruptions are enabled only the first insn after sti */
6917 /* If several instructions disable interrupts, only the
6918 _first_ does it */
6919 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
6920 gen_helper_set_inhibit_irq(cpu_env);
6921 /* give a chance to handle pending irqs */
6922 gen_jmp_im(s->pc - s->cs_base);
6923 gen_eob(s);
6924 } else {
6925 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6927 } else {
6928 if (s->iopl == 3) {
6929 goto gen_sti;
6930 } else {
6931 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6934 break;
6935 case 0x62: /* bound */
6936 if (CODE64(s))
6937 goto illegal_op;
6938 ot = dflag;
6939 modrm = cpu_ldub_code(env, s->pc++);
6940 reg = (modrm >> 3) & 7;
6941 mod = (modrm >> 6) & 3;
6942 if (mod == 3)
6943 goto illegal_op;
6944 gen_op_mov_v_reg(ot, cpu_T[0], reg);
6945 gen_lea_modrm(env, s, modrm);
6946 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6947 if (ot == MO_16) {
6948 gen_helper_boundw(cpu_env, cpu_A0, cpu_tmp2_i32);
6949 } else {
6950 gen_helper_boundl(cpu_env, cpu_A0, cpu_tmp2_i32);
6952 break;
6953 case 0x1c8 ... 0x1cf: /* bswap reg */
6954 reg = (b & 7) | REX_B(s);
6955 #ifdef TARGET_X86_64
6956 if (dflag == MO_64) {
6957 gen_op_mov_v_reg(MO_64, cpu_T[0], reg);
6958 tcg_gen_bswap64_i64(cpu_T[0], cpu_T[0]);
6959 gen_op_mov_reg_v(MO_64, reg, cpu_T[0]);
6960 } else
6961 #endif
6963 gen_op_mov_v_reg(MO_32, cpu_T[0], reg);
6964 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
6965 tcg_gen_bswap32_tl(cpu_T[0], cpu_T[0]);
6966 gen_op_mov_reg_v(MO_32, reg, cpu_T[0]);
6968 break;
6969 case 0xd6: /* salc */
6970 if (CODE64(s))
6971 goto illegal_op;
6972 gen_compute_eflags_c(s, cpu_T[0]);
6973 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
6974 gen_op_mov_reg_v(MO_8, R_EAX, cpu_T[0]);
6975 break;
6976 case 0xe0: /* loopnz */
6977 case 0xe1: /* loopz */
6978 case 0xe2: /* loop */
6979 case 0xe3: /* jecxz */
6981 TCGLabel *l1, *l2, *l3;
6983 tval = (int8_t)insn_get(env, s, MO_8);
6984 next_eip = s->pc - s->cs_base;
6985 tval += next_eip;
6986 if (dflag == MO_16) {
6987 tval &= 0xffff;
6990 l1 = gen_new_label();
6991 l2 = gen_new_label();
6992 l3 = gen_new_label();
6993 b &= 3;
6994 switch(b) {
6995 case 0: /* loopnz */
6996 case 1: /* loopz */
6997 gen_op_add_reg_im(s->aflag, R_ECX, -1);
6998 gen_op_jz_ecx(s->aflag, l3);
6999 gen_jcc1(s, (JCC_Z << 1) | (b ^ 1), l1);
7000 break;
7001 case 2: /* loop */
7002 gen_op_add_reg_im(s->aflag, R_ECX, -1);
7003 gen_op_jnz_ecx(s->aflag, l1);
7004 break;
7005 default:
7006 case 3: /* jcxz */
7007 gen_op_jz_ecx(s->aflag, l1);
7008 break;
7011 gen_set_label(l3);
7012 gen_jmp_im(next_eip);
7013 tcg_gen_br(l2);
7015 gen_set_label(l1);
7016 gen_jmp_im(tval);
7017 gen_set_label(l2);
7018 gen_eob(s);
7020 break;
7021 case 0x130: /* wrmsr */
7022 case 0x132: /* rdmsr */
7023 if (s->cpl != 0) {
7024 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7025 } else {
7026 gen_update_cc_op(s);
7027 gen_jmp_im(pc_start - s->cs_base);
7028 if (b & 2) {
7029 gen_helper_rdmsr(cpu_env);
7030 } else {
7031 gen_helper_wrmsr(cpu_env);
7034 break;
7035 case 0x131: /* rdtsc */
7036 gen_update_cc_op(s);
7037 gen_jmp_im(pc_start - s->cs_base);
7038 if (s->tb->cflags & CF_USE_ICOUNT) {
7039 gen_io_start();
7041 gen_helper_rdtsc(cpu_env);
7042 if (s->tb->cflags & CF_USE_ICOUNT) {
7043 gen_io_end();
7044 gen_jmp(s, s->pc - s->cs_base);
7046 break;
7047 case 0x133: /* rdpmc */
7048 gen_update_cc_op(s);
7049 gen_jmp_im(pc_start - s->cs_base);
7050 gen_helper_rdpmc(cpu_env);
7051 break;
7052 case 0x134: /* sysenter */
7053 /* For Intel SYSENTER is valid on 64-bit */
7054 if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
7055 goto illegal_op;
7056 if (!s->pe) {
7057 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7058 } else {
7059 gen_helper_sysenter(cpu_env);
7060 gen_eob(s);
7062 break;
7063 case 0x135: /* sysexit */
7064 /* For Intel SYSEXIT is valid on 64-bit */
7065 if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
7066 goto illegal_op;
7067 if (!s->pe) {
7068 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7069 } else {
7070 gen_helper_sysexit(cpu_env, tcg_const_i32(dflag - 1));
7071 gen_eob(s);
7073 break;
7074 #ifdef TARGET_X86_64
7075 case 0x105: /* syscall */
7076 /* XXX: is it usable in real mode ? */
7077 gen_update_cc_op(s);
7078 gen_jmp_im(pc_start - s->cs_base);
7079 gen_helper_syscall(cpu_env, tcg_const_i32(s->pc - pc_start));
7080 gen_eob(s);
7081 break;
7082 case 0x107: /* sysret */
7083 if (!s->pe) {
7084 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7085 } else {
7086 gen_helper_sysret(cpu_env, tcg_const_i32(dflag - 1));
7087 /* condition codes are modified only in long mode */
7088 if (s->lma) {
7089 set_cc_op(s, CC_OP_EFLAGS);
7091 gen_eob(s);
7093 break;
7094 #endif
7095 case 0x1a2: /* cpuid */
7096 gen_update_cc_op(s);
7097 gen_jmp_im(pc_start - s->cs_base);
7098 gen_helper_cpuid(cpu_env);
7099 break;
7100 case 0xf4: /* hlt */
7101 if (s->cpl != 0) {
7102 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7103 } else {
7104 gen_update_cc_op(s);
7105 gen_jmp_im(pc_start - s->cs_base);
7106 gen_helper_hlt(cpu_env, tcg_const_i32(s->pc - pc_start));
7107 s->is_jmp = DISAS_TB_JUMP;
7109 break;
7110 case 0x100:
7111 modrm = cpu_ldub_code(env, s->pc++);
7112 mod = (modrm >> 6) & 3;
7113 op = (modrm >> 3) & 7;
7114 switch(op) {
7115 case 0: /* sldt */
7116 if (!s->pe || s->vm86)
7117 goto illegal_op;
7118 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ);
7119 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,ldt.selector));
7120 ot = mod == 3 ? dflag : MO_16;
7121 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
7122 break;
7123 case 2: /* lldt */
7124 if (!s->pe || s->vm86)
7125 goto illegal_op;
7126 if (s->cpl != 0) {
7127 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7128 } else {
7129 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE);
7130 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
7131 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
7132 gen_helper_lldt(cpu_env, cpu_tmp2_i32);
7134 break;
7135 case 1: /* str */
7136 if (!s->pe || s->vm86)
7137 goto illegal_op;
7138 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ);
7139 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,tr.selector));
7140 ot = mod == 3 ? dflag : MO_16;
7141 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
7142 break;
7143 case 3: /* ltr */
7144 if (!s->pe || s->vm86)
7145 goto illegal_op;
7146 if (s->cpl != 0) {
7147 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7148 } else {
7149 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE);
7150 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
7151 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
7152 gen_helper_ltr(cpu_env, cpu_tmp2_i32);
7154 break;
7155 case 4: /* verr */
7156 case 5: /* verw */
7157 if (!s->pe || s->vm86)
7158 goto illegal_op;
7159 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
7160 gen_update_cc_op(s);
7161 if (op == 4) {
7162 gen_helper_verr(cpu_env, cpu_T[0]);
7163 } else {
7164 gen_helper_verw(cpu_env, cpu_T[0]);
7166 set_cc_op(s, CC_OP_EFLAGS);
7167 break;
7168 default:
7169 goto illegal_op;
7171 break;
7172 case 0x101:
7173 modrm = cpu_ldub_code(env, s->pc++);
7174 mod = (modrm >> 6) & 3;
7175 op = (modrm >> 3) & 7;
7176 rm = modrm & 7;
7177 switch(op) {
7178 case 0: /* sgdt */
7179 if (mod == 3)
7180 goto illegal_op;
7181 gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ);
7182 gen_lea_modrm(env, s, modrm);
7183 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.limit));
7184 gen_op_st_v(s, MO_16, cpu_T[0], cpu_A0);
7185 gen_add_A0_im(s, 2);
7186 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.base));
7187 if (dflag == MO_16) {
7188 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffffff);
7190 gen_op_st_v(s, CODE64(s) + MO_32, cpu_T[0], cpu_A0);
7191 break;
7192 case 1:
7193 if (mod == 3) {
7194 switch (rm) {
7195 case 0: /* monitor */
7196 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
7197 s->cpl != 0)
7198 goto illegal_op;
7199 gen_update_cc_op(s);
7200 gen_jmp_im(pc_start - s->cs_base);
7201 tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EAX]);
7202 gen_extu(s->aflag, cpu_A0);
7203 gen_add_A0_ds_seg(s);
7204 gen_helper_monitor(cpu_env, cpu_A0);
7205 break;
7206 case 1: /* mwait */
7207 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
7208 s->cpl != 0)
7209 goto illegal_op;
7210 gen_update_cc_op(s);
7211 gen_jmp_im(pc_start - s->cs_base);
7212 gen_helper_mwait(cpu_env, tcg_const_i32(s->pc - pc_start));
7213 gen_eob(s);
7214 break;
7215 case 2: /* clac */
7216 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) ||
7217 s->cpl != 0) {
7218 goto illegal_op;
7220 gen_helper_clac(cpu_env);
7221 gen_jmp_im(s->pc - s->cs_base);
7222 gen_eob(s);
7223 break;
7224 case 3: /* stac */
7225 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) ||
7226 s->cpl != 0) {
7227 goto illegal_op;
7229 gen_helper_stac(cpu_env);
7230 gen_jmp_im(s->pc - s->cs_base);
7231 gen_eob(s);
7232 break;
7233 default:
7234 goto illegal_op;
7236 } else { /* sidt */
7237 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ);
7238 gen_lea_modrm(env, s, modrm);
7239 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.limit));
7240 gen_op_st_v(s, MO_16, cpu_T[0], cpu_A0);
7241 gen_add_A0_im(s, 2);
7242 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.base));
7243 if (dflag == MO_16) {
7244 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffffff);
7246 gen_op_st_v(s, CODE64(s) + MO_32, cpu_T[0], cpu_A0);
7248 break;
7249 case 2: /* lgdt */
7250 case 3: /* lidt */
7251 if (mod == 3) {
7252 gen_update_cc_op(s);
7253 gen_jmp_im(pc_start - s->cs_base);
7254 switch(rm) {
7255 case 0: /* VMRUN */
7256 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7257 goto illegal_op;
7258 if (s->cpl != 0) {
7259 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7260 break;
7261 } else {
7262 gen_helper_vmrun(cpu_env, tcg_const_i32(s->aflag - 1),
7263 tcg_const_i32(s->pc - pc_start));
7264 tcg_gen_exit_tb(0);
7265 s->is_jmp = DISAS_TB_JUMP;
7267 break;
7268 case 1: /* VMMCALL */
7269 if (!(s->flags & HF_SVME_MASK))
7270 goto illegal_op;
7271 gen_helper_vmmcall(cpu_env);
7272 break;
7273 case 2: /* VMLOAD */
7274 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7275 goto illegal_op;
7276 if (s->cpl != 0) {
7277 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7278 break;
7279 } else {
7280 gen_helper_vmload(cpu_env, tcg_const_i32(s->aflag - 1));
7282 break;
7283 case 3: /* VMSAVE */
7284 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7285 goto illegal_op;
7286 if (s->cpl != 0) {
7287 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7288 break;
7289 } else {
7290 gen_helper_vmsave(cpu_env, tcg_const_i32(s->aflag - 1));
7292 break;
7293 case 4: /* STGI */
7294 if ((!(s->flags & HF_SVME_MASK) &&
7295 !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
7296 !s->pe)
7297 goto illegal_op;
7298 if (s->cpl != 0) {
7299 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7300 break;
7301 } else {
7302 gen_helper_stgi(cpu_env);
7304 break;
7305 case 5: /* CLGI */
7306 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7307 goto illegal_op;
7308 if (s->cpl != 0) {
7309 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7310 break;
7311 } else {
7312 gen_helper_clgi(cpu_env);
7314 break;
7315 case 6: /* SKINIT */
7316 if ((!(s->flags & HF_SVME_MASK) &&
7317 !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
7318 !s->pe)
7319 goto illegal_op;
7320 gen_helper_skinit(cpu_env);
7321 break;
7322 case 7: /* INVLPGA */
7323 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7324 goto illegal_op;
7325 if (s->cpl != 0) {
7326 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7327 break;
7328 } else {
7329 gen_helper_invlpga(cpu_env,
7330 tcg_const_i32(s->aflag - 1));
7332 break;
7333 default:
7334 goto illegal_op;
7336 } else if (s->cpl != 0) {
7337 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7338 } else {
7339 gen_svm_check_intercept(s, pc_start,
7340 op==2 ? SVM_EXIT_GDTR_WRITE : SVM_EXIT_IDTR_WRITE);
7341 gen_lea_modrm(env, s, modrm);
7342 gen_op_ld_v(s, MO_16, cpu_T[1], cpu_A0);
7343 gen_add_A0_im(s, 2);
7344 gen_op_ld_v(s, CODE64(s) + MO_32, cpu_T[0], cpu_A0);
7345 if (dflag == MO_16) {
7346 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffffff);
7348 if (op == 2) {
7349 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,gdt.base));
7350 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,gdt.limit));
7351 } else {
7352 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,idt.base));
7353 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,idt.limit));
7356 break;
7357 case 4: /* smsw */
7358 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0);
7359 #if defined TARGET_X86_64 && defined HOST_WORDS_BIGENDIAN
7360 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]) + 4);
7361 #else
7362 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]));
7363 #endif
7364 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 1);
7365 break;
7366 case 6: /* lmsw */
7367 if (s->cpl != 0) {
7368 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7369 } else {
7370 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
7371 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
7372 gen_helper_lmsw(cpu_env, cpu_T[0]);
7373 gen_jmp_im(s->pc - s->cs_base);
7374 gen_eob(s);
7376 break;
7377 case 7:
7378 if (mod != 3) { /* invlpg */
7379 if (s->cpl != 0) {
7380 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7381 } else {
7382 gen_update_cc_op(s);
7383 gen_jmp_im(pc_start - s->cs_base);
7384 gen_lea_modrm(env, s, modrm);
7385 gen_helper_invlpg(cpu_env, cpu_A0);
7386 gen_jmp_im(s->pc - s->cs_base);
7387 gen_eob(s);
7389 } else {
7390 switch (rm) {
7391 case 0: /* swapgs */
7392 #ifdef TARGET_X86_64
7393 if (CODE64(s)) {
7394 if (s->cpl != 0) {
7395 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7396 } else {
7397 tcg_gen_ld_tl(cpu_T[0], cpu_env,
7398 offsetof(CPUX86State,segs[R_GS].base));
7399 tcg_gen_ld_tl(cpu_T[1], cpu_env,
7400 offsetof(CPUX86State,kernelgsbase));
7401 tcg_gen_st_tl(cpu_T[1], cpu_env,
7402 offsetof(CPUX86State,segs[R_GS].base));
7403 tcg_gen_st_tl(cpu_T[0], cpu_env,
7404 offsetof(CPUX86State,kernelgsbase));
7406 } else
7407 #endif
7409 goto illegal_op;
7411 break;
7412 case 1: /* rdtscp */
7413 if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP))
7414 goto illegal_op;
7415 gen_update_cc_op(s);
7416 gen_jmp_im(pc_start - s->cs_base);
7417 if (s->tb->cflags & CF_USE_ICOUNT) {
7418 gen_io_start();
7420 gen_helper_rdtscp(cpu_env);
7421 if (s->tb->cflags & CF_USE_ICOUNT) {
7422 gen_io_end();
7423 gen_jmp(s, s->pc - s->cs_base);
7425 break;
7426 default:
7427 goto illegal_op;
7430 break;
7431 default:
7432 goto illegal_op;
7434 break;
7435 case 0x108: /* invd */
7436 case 0x109: /* wbinvd */
7437 if (s->cpl != 0) {
7438 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7439 } else {
7440 gen_svm_check_intercept(s, pc_start, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD);
7441 /* nothing to do */
7443 break;
7444 case 0x63: /* arpl or movslS (x86_64) */
7445 #ifdef TARGET_X86_64
7446 if (CODE64(s)) {
7447 int d_ot;
7448 /* d_ot is the size of destination */
7449 d_ot = dflag;
7451 modrm = cpu_ldub_code(env, s->pc++);
7452 reg = ((modrm >> 3) & 7) | rex_r;
7453 mod = (modrm >> 6) & 3;
7454 rm = (modrm & 7) | REX_B(s);
7456 if (mod == 3) {
7457 gen_op_mov_v_reg(MO_32, cpu_T[0], rm);
7458 /* sign extend */
7459 if (d_ot == MO_64) {
7460 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
7462 gen_op_mov_reg_v(d_ot, reg, cpu_T[0]);
7463 } else {
7464 gen_lea_modrm(env, s, modrm);
7465 gen_op_ld_v(s, MO_32 | MO_SIGN, cpu_T[0], cpu_A0);
7466 gen_op_mov_reg_v(d_ot, reg, cpu_T[0]);
7468 } else
7469 #endif
7471 TCGLabel *label1;
7472 TCGv t0, t1, t2, a0;
7474 if (!s->pe || s->vm86)
7475 goto illegal_op;
7476 t0 = tcg_temp_local_new();
7477 t1 = tcg_temp_local_new();
7478 t2 = tcg_temp_local_new();
7479 ot = MO_16;
7480 modrm = cpu_ldub_code(env, s->pc++);
7481 reg = (modrm >> 3) & 7;
7482 mod = (modrm >> 6) & 3;
7483 rm = modrm & 7;
7484 if (mod != 3) {
7485 gen_lea_modrm(env, s, modrm);
7486 gen_op_ld_v(s, ot, t0, cpu_A0);
7487 a0 = tcg_temp_local_new();
7488 tcg_gen_mov_tl(a0, cpu_A0);
7489 } else {
7490 gen_op_mov_v_reg(ot, t0, rm);
7491 TCGV_UNUSED(a0);
7493 gen_op_mov_v_reg(ot, t1, reg);
7494 tcg_gen_andi_tl(cpu_tmp0, t0, 3);
7495 tcg_gen_andi_tl(t1, t1, 3);
7496 tcg_gen_movi_tl(t2, 0);
7497 label1 = gen_new_label();
7498 tcg_gen_brcond_tl(TCG_COND_GE, cpu_tmp0, t1, label1);
7499 tcg_gen_andi_tl(t0, t0, ~3);
7500 tcg_gen_or_tl(t0, t0, t1);
7501 tcg_gen_movi_tl(t2, CC_Z);
7502 gen_set_label(label1);
7503 if (mod != 3) {
7504 gen_op_st_v(s, ot, t0, a0);
7505 tcg_temp_free(a0);
7506 } else {
7507 gen_op_mov_reg_v(ot, rm, t0);
7509 gen_compute_eflags(s);
7510 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z);
7511 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2);
7512 tcg_temp_free(t0);
7513 tcg_temp_free(t1);
7514 tcg_temp_free(t2);
7516 break;
7517 case 0x102: /* lar */
7518 case 0x103: /* lsl */
7520 TCGLabel *label1;
7521 TCGv t0;
7522 if (!s->pe || s->vm86)
7523 goto illegal_op;
7524 ot = dflag != MO_16 ? MO_32 : MO_16;
7525 modrm = cpu_ldub_code(env, s->pc++);
7526 reg = ((modrm >> 3) & 7) | rex_r;
7527 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
7528 t0 = tcg_temp_local_new();
7529 gen_update_cc_op(s);
7530 if (b == 0x102) {
7531 gen_helper_lar(t0, cpu_env, cpu_T[0]);
7532 } else {
7533 gen_helper_lsl(t0, cpu_env, cpu_T[0]);
7535 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_src, CC_Z);
7536 label1 = gen_new_label();
7537 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
7538 gen_op_mov_reg_v(ot, reg, t0);
7539 gen_set_label(label1);
7540 set_cc_op(s, CC_OP_EFLAGS);
7541 tcg_temp_free(t0);
7543 break;
7544 case 0x118:
7545 modrm = cpu_ldub_code(env, s->pc++);
7546 mod = (modrm >> 6) & 3;
7547 op = (modrm >> 3) & 7;
7548 switch(op) {
7549 case 0: /* prefetchnta */
7550 case 1: /* prefetchnt0 */
7551 case 2: /* prefetchnt0 */
7552 case 3: /* prefetchnt0 */
7553 if (mod == 3)
7554 goto illegal_op;
7555 gen_lea_modrm(env, s, modrm);
7556 /* nothing more to do */
7557 break;
7558 default: /* nop (multi byte) */
7559 gen_nop_modrm(env, s, modrm);
7560 break;
7562 break;
7563 case 0x119 ... 0x11f: /* nop (multi byte) */
7564 modrm = cpu_ldub_code(env, s->pc++);
7565 gen_nop_modrm(env, s, modrm);
7566 break;
7567 case 0x120: /* mov reg, crN */
7568 case 0x122: /* mov crN, reg */
7569 if (s->cpl != 0) {
7570 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7571 } else {
7572 modrm = cpu_ldub_code(env, s->pc++);
7573 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7574 * AMD documentation (24594.pdf) and testing of
7575 * intel 386 and 486 processors all show that the mod bits
7576 * are assumed to be 1's, regardless of actual values.
7578 rm = (modrm & 7) | REX_B(s);
7579 reg = ((modrm >> 3) & 7) | rex_r;
7580 if (CODE64(s))
7581 ot = MO_64;
7582 else
7583 ot = MO_32;
7584 if ((prefixes & PREFIX_LOCK) && (reg == 0) &&
7585 (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) {
7586 reg = 8;
7588 switch(reg) {
7589 case 0:
7590 case 2:
7591 case 3:
7592 case 4:
7593 case 8:
7594 gen_update_cc_op(s);
7595 gen_jmp_im(pc_start - s->cs_base);
7596 if (b & 2) {
7597 gen_op_mov_v_reg(ot, cpu_T[0], rm);
7598 gen_helper_write_crN(cpu_env, tcg_const_i32(reg),
7599 cpu_T[0]);
7600 gen_jmp_im(s->pc - s->cs_base);
7601 gen_eob(s);
7602 } else {
7603 gen_helper_read_crN(cpu_T[0], cpu_env, tcg_const_i32(reg));
7604 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
7606 break;
7607 default:
7608 goto illegal_op;
7611 break;
7612 case 0x121: /* mov reg, drN */
7613 case 0x123: /* mov drN, reg */
7614 if (s->cpl != 0) {
7615 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7616 } else {
7617 modrm = cpu_ldub_code(env, s->pc++);
7618 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7619 * AMD documentation (24594.pdf) and testing of
7620 * intel 386 and 486 processors all show that the mod bits
7621 * are assumed to be 1's, regardless of actual values.
7623 rm = (modrm & 7) | REX_B(s);
7624 reg = ((modrm >> 3) & 7) | rex_r;
7625 if (CODE64(s))
7626 ot = MO_64;
7627 else
7628 ot = MO_32;
7629 if (reg >= 8) {
7630 goto illegal_op;
7632 if (b & 2) {
7633 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg);
7634 gen_op_mov_v_reg(ot, cpu_T[0], rm);
7635 tcg_gen_movi_i32(cpu_tmp2_i32, reg);
7636 gen_helper_set_dr(cpu_env, cpu_tmp2_i32, cpu_T[0]);
7637 gen_jmp_im(s->pc - s->cs_base);
7638 gen_eob(s);
7639 } else {
7640 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg);
7641 tcg_gen_movi_i32(cpu_tmp2_i32, reg);
7642 gen_helper_get_dr(cpu_T[0], cpu_env, cpu_tmp2_i32);
7643 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
7646 break;
7647 case 0x106: /* clts */
7648 if (s->cpl != 0) {
7649 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7650 } else {
7651 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
7652 gen_helper_clts(cpu_env);
7653 /* abort block because static cpu state changed */
7654 gen_jmp_im(s->pc - s->cs_base);
7655 gen_eob(s);
7657 break;
7658 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
7659 case 0x1c3: /* MOVNTI reg, mem */
7660 if (!(s->cpuid_features & CPUID_SSE2))
7661 goto illegal_op;
7662 ot = mo_64_32(dflag);
7663 modrm = cpu_ldub_code(env, s->pc++);
7664 mod = (modrm >> 6) & 3;
7665 if (mod == 3)
7666 goto illegal_op;
7667 reg = ((modrm >> 3) & 7) | rex_r;
7668 /* generate a generic store */
7669 gen_ldst_modrm(env, s, modrm, ot, reg, 1);
7670 break;
7671 case 0x1ae:
7672 modrm = cpu_ldub_code(env, s->pc++);
7673 mod = (modrm >> 6) & 3;
7674 op = (modrm >> 3) & 7;
7675 switch(op) {
7676 case 0: /* fxsave */
7677 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
7678 (s->prefix & PREFIX_LOCK))
7679 goto illegal_op;
7680 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
7681 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7682 break;
7684 gen_lea_modrm(env, s, modrm);
7685 gen_helper_fxsave(cpu_env, cpu_A0, tcg_const_i32(dflag == MO_64));
7686 break;
7687 case 1: /* fxrstor */
7688 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
7689 (s->prefix & PREFIX_LOCK))
7690 goto illegal_op;
7691 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
7692 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7693 break;
7695 gen_lea_modrm(env, s, modrm);
7696 gen_helper_fxrstor(cpu_env, cpu_A0, tcg_const_i32(dflag == MO_64));
7697 break;
7698 case 2: /* ldmxcsr */
7699 case 3: /* stmxcsr */
7700 if (s->flags & HF_TS_MASK) {
7701 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7702 break;
7704 if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK) ||
7705 mod == 3)
7706 goto illegal_op;
7707 gen_lea_modrm(env, s, modrm);
7708 if (op == 2) {
7709 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
7710 s->mem_index, MO_LEUL);
7711 gen_helper_ldmxcsr(cpu_env, cpu_tmp2_i32);
7712 } else {
7713 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, mxcsr));
7714 gen_op_st_v(s, MO_32, cpu_T[0], cpu_A0);
7716 break;
7717 case 5: /* lfence */
7718 if ((modrm & 0xc7) != 0xc0 || !(s->cpuid_features & CPUID_SSE2))
7719 goto illegal_op;
7720 break;
7721 case 6: /* mfence/clwb */
7722 if (s->prefix & PREFIX_DATA) {
7723 /* clwb */
7724 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLWB))
7725 goto illegal_op;
7726 gen_nop_modrm(env, s, modrm);
7727 } else {
7728 /* mfence */
7729 if ((modrm & 0xc7) != 0xc0 || !(s->cpuid_features & CPUID_SSE2))
7730 goto illegal_op;
7732 break;
7733 case 7: /* sfence / clflush / clflushopt / pcommit */
7734 if ((modrm & 0xc7) == 0xc0) {
7735 if (s->prefix & PREFIX_DATA) {
7736 /* pcommit */
7737 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_PCOMMIT))
7738 goto illegal_op;
7739 } else {
7740 /* sfence */
7741 /* XXX: also check for cpuid_ext2_features & CPUID_EXT2_EMMX */
7742 if (!(s->cpuid_features & CPUID_SSE))
7743 goto illegal_op;
7745 } else {
7746 if (s->prefix & PREFIX_DATA) {
7747 /* clflushopt */
7748 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLFLUSHOPT))
7749 goto illegal_op;
7750 } else {
7751 /* clflush */
7752 if (!(s->cpuid_features & CPUID_CLFLUSH))
7753 goto illegal_op;
7755 gen_lea_modrm(env, s, modrm);
7757 break;
7758 default:
7759 goto illegal_op;
7761 break;
7762 case 0x10d: /* 3DNow! prefetch(w) */
7763 modrm = cpu_ldub_code(env, s->pc++);
7764 mod = (modrm >> 6) & 3;
7765 if (mod == 3)
7766 goto illegal_op;
7767 gen_lea_modrm(env, s, modrm);
7768 /* ignore for now */
7769 break;
7770 case 0x1aa: /* rsm */
7771 gen_svm_check_intercept(s, pc_start, SVM_EXIT_RSM);
7772 if (!(s->flags & HF_SMM_MASK))
7773 goto illegal_op;
7774 gen_update_cc_op(s);
7775 gen_jmp_im(s->pc - s->cs_base);
7776 gen_helper_rsm(cpu_env);
7777 gen_eob(s);
7778 break;
7779 case 0x1b8: /* SSE4.2 popcnt */
7780 if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) !=
7781 PREFIX_REPZ)
7782 goto illegal_op;
7783 if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
7784 goto illegal_op;
7786 modrm = cpu_ldub_code(env, s->pc++);
7787 reg = ((modrm >> 3) & 7) | rex_r;
7789 if (s->prefix & PREFIX_DATA) {
7790 ot = MO_16;
7791 } else {
7792 ot = mo_64_32(dflag);
7795 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
7796 gen_helper_popcnt(cpu_T[0], cpu_env, cpu_T[0], tcg_const_i32(ot));
7797 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
7799 set_cc_op(s, CC_OP_EFLAGS);
7800 break;
7801 case 0x10e ... 0x10f:
7802 /* 3DNow! instructions, ignore prefixes */
7803 s->prefix &= ~(PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA);
7804 case 0x110 ... 0x117:
7805 case 0x128 ... 0x12f:
7806 case 0x138 ... 0x13a:
7807 case 0x150 ... 0x179:
7808 case 0x17c ... 0x17f:
7809 case 0x1c2:
7810 case 0x1c4 ... 0x1c6:
7811 case 0x1d0 ... 0x1fe:
7812 gen_sse(env, s, b, pc_start, rex_r);
7813 break;
7814 default:
7815 goto illegal_op;
7817 /* lock generation */
7818 if (s->prefix & PREFIX_LOCK)
7819 gen_helper_unlock();
7820 return s->pc;
7821 illegal_op:
7822 if (s->prefix & PREFIX_LOCK)
7823 gen_helper_unlock();
7824 /* XXX: ensure that no lock was generated */
7825 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
7826 return s->pc;
7829 void tcg_x86_init(void)
7831 static const char reg_names[CPU_NB_REGS][4] = {
7832 #ifdef TARGET_X86_64
7833 [R_EAX] = "rax",
7834 [R_EBX] = "rbx",
7835 [R_ECX] = "rcx",
7836 [R_EDX] = "rdx",
7837 [R_ESI] = "rsi",
7838 [R_EDI] = "rdi",
7839 [R_EBP] = "rbp",
7840 [R_ESP] = "rsp",
7841 [8] = "r8",
7842 [9] = "r9",
7843 [10] = "r10",
7844 [11] = "r11",
7845 [12] = "r12",
7846 [13] = "r13",
7847 [14] = "r14",
7848 [15] = "r15",
7849 #else
7850 [R_EAX] = "eax",
7851 [R_EBX] = "ebx",
7852 [R_ECX] = "ecx",
7853 [R_EDX] = "edx",
7854 [R_ESI] = "esi",
7855 [R_EDI] = "edi",
7856 [R_EBP] = "ebp",
7857 [R_ESP] = "esp",
7858 #endif
7860 int i;
7862 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
7863 cpu_cc_op = tcg_global_mem_new_i32(TCG_AREG0,
7864 offsetof(CPUX86State, cc_op), "cc_op");
7865 cpu_cc_dst = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_dst),
7866 "cc_dst");
7867 cpu_cc_src = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_src),
7868 "cc_src");
7869 cpu_cc_src2 = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_src2),
7870 "cc_src2");
7872 for (i = 0; i < CPU_NB_REGS; ++i) {
7873 cpu_regs[i] = tcg_global_mem_new(TCG_AREG0,
7874 offsetof(CPUX86State, regs[i]),
7875 reg_names[i]);
7878 helper_lock_init();
7881 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
7882 basic block 'tb'. */
7883 void gen_intermediate_code(CPUX86State *env, TranslationBlock *tb)
7885 X86CPU *cpu = x86_env_get_cpu(env);
7886 CPUState *cs = CPU(cpu);
7887 DisasContext dc1, *dc = &dc1;
7888 target_ulong pc_ptr;
7889 uint64_t flags;
7890 target_ulong pc_start;
7891 target_ulong cs_base;
7892 int num_insns;
7893 int max_insns;
7895 /* generate intermediate code */
7896 pc_start = tb->pc;
7897 cs_base = tb->cs_base;
7898 flags = tb->flags;
7900 dc->pe = (flags >> HF_PE_SHIFT) & 1;
7901 dc->code32 = (flags >> HF_CS32_SHIFT) & 1;
7902 dc->ss32 = (flags >> HF_SS32_SHIFT) & 1;
7903 dc->addseg = (flags >> HF_ADDSEG_SHIFT) & 1;
7904 dc->f_st = 0;
7905 dc->vm86 = (flags >> VM_SHIFT) & 1;
7906 dc->cpl = (flags >> HF_CPL_SHIFT) & 3;
7907 dc->iopl = (flags >> IOPL_SHIFT) & 3;
7908 dc->tf = (flags >> TF_SHIFT) & 1;
7909 dc->singlestep_enabled = cs->singlestep_enabled;
7910 dc->cc_op = CC_OP_DYNAMIC;
7911 dc->cc_op_dirty = false;
7912 dc->cs_base = cs_base;
7913 dc->tb = tb;
7914 dc->popl_esp_hack = 0;
7915 /* select memory access functions */
7916 dc->mem_index = 0;
7917 if (flags & HF_SOFTMMU_MASK) {
7918 dc->mem_index = cpu_mmu_index(env, false);
7920 dc->cpuid_features = env->features[FEAT_1_EDX];
7921 dc->cpuid_ext_features = env->features[FEAT_1_ECX];
7922 dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX];
7923 dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX];
7924 dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX];
7925 #ifdef TARGET_X86_64
7926 dc->lma = (flags >> HF_LMA_SHIFT) & 1;
7927 dc->code64 = (flags >> HF_CS64_SHIFT) & 1;
7928 #endif
7929 dc->flags = flags;
7930 dc->jmp_opt = !(dc->tf || cs->singlestep_enabled ||
7931 (flags & HF_INHIBIT_IRQ_MASK)
7932 #ifndef CONFIG_SOFTMMU
7933 || (flags & HF_SOFTMMU_MASK)
7934 #endif
7936 /* Do not optimize repz jumps at all in icount mode, because
7937 rep movsS instructions are execured with different paths
7938 in !repz_opt and repz_opt modes. The first one was used
7939 always except single step mode. And this setting
7940 disables jumps optimization and control paths become
7941 equivalent in run and single step modes.
7942 Now there will be no jump optimization for repz in
7943 record/replay modes and there will always be an
7944 additional step for ecx=0 when icount is enabled.
7946 dc->repz_opt = !dc->jmp_opt && !(tb->cflags & CF_USE_ICOUNT);
7947 #if 0
7948 /* check addseg logic */
7949 if (!dc->addseg && (dc->vm86 || !dc->pe || !dc->code32))
7950 printf("ERROR addseg\n");
7951 #endif
7953 cpu_T[0] = tcg_temp_new();
7954 cpu_T[1] = tcg_temp_new();
7955 cpu_A0 = tcg_temp_new();
7957 cpu_tmp0 = tcg_temp_new();
7958 cpu_tmp1_i64 = tcg_temp_new_i64();
7959 cpu_tmp2_i32 = tcg_temp_new_i32();
7960 cpu_tmp3_i32 = tcg_temp_new_i32();
7961 cpu_tmp4 = tcg_temp_new();
7962 cpu_ptr0 = tcg_temp_new_ptr();
7963 cpu_ptr1 = tcg_temp_new_ptr();
7964 cpu_cc_srcT = tcg_temp_local_new();
7966 dc->is_jmp = DISAS_NEXT;
7967 pc_ptr = pc_start;
7968 num_insns = 0;
7969 max_insns = tb->cflags & CF_COUNT_MASK;
7970 if (max_insns == 0) {
7971 max_insns = CF_COUNT_MASK;
7973 if (max_insns > TCG_MAX_INSNS) {
7974 max_insns = TCG_MAX_INSNS;
7977 gen_tb_start(tb);
7978 for(;;) {
7979 tcg_gen_insn_start(pc_ptr, dc->cc_op);
7980 num_insns++;
7982 /* If RF is set, suppress an internally generated breakpoint. */
7983 if (unlikely(cpu_breakpoint_test(cs, pc_ptr,
7984 tb->flags & HF_RF_MASK
7985 ? BP_GDB : BP_ANY))) {
7986 gen_debug(dc, pc_ptr - dc->cs_base);
7987 /* The address covered by the breakpoint must be included in
7988 [tb->pc, tb->pc + tb->size) in order to for it to be
7989 properly cleared -- thus we increment the PC here so that
7990 the logic setting tb->size below does the right thing. */
7991 pc_ptr += 1;
7992 goto done_generating;
7994 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
7995 gen_io_start();
7998 pc_ptr = disas_insn(env, dc, pc_ptr);
7999 /* stop translation if indicated */
8000 if (dc->is_jmp)
8001 break;
8002 /* if single step mode, we generate only one instruction and
8003 generate an exception */
8004 /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
8005 the flag and abort the translation to give the irqs a
8006 change to be happen */
8007 if (dc->tf || dc->singlestep_enabled ||
8008 (flags & HF_INHIBIT_IRQ_MASK)) {
8009 gen_jmp_im(pc_ptr - dc->cs_base);
8010 gen_eob(dc);
8011 break;
8013 /* Do not cross the boundary of the pages in icount mode,
8014 it can cause an exception. Do it only when boundary is
8015 crossed by the first instruction in the block.
8016 If current instruction already crossed the bound - it's ok,
8017 because an exception hasn't stopped this code.
8019 if ((tb->cflags & CF_USE_ICOUNT)
8020 && ((pc_ptr & TARGET_PAGE_MASK)
8021 != ((pc_ptr + TARGET_MAX_INSN_SIZE - 1) & TARGET_PAGE_MASK)
8022 || (pc_ptr & ~TARGET_PAGE_MASK) == 0)) {
8023 gen_jmp_im(pc_ptr - dc->cs_base);
8024 gen_eob(dc);
8025 break;
8027 /* if too long translation, stop generation too */
8028 if (tcg_op_buf_full() ||
8029 (pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32) ||
8030 num_insns >= max_insns) {
8031 gen_jmp_im(pc_ptr - dc->cs_base);
8032 gen_eob(dc);
8033 break;
8035 if (singlestep) {
8036 gen_jmp_im(pc_ptr - dc->cs_base);
8037 gen_eob(dc);
8038 break;
8041 if (tb->cflags & CF_LAST_IO)
8042 gen_io_end();
8043 done_generating:
8044 gen_tb_end(tb, num_insns);
8046 #ifdef DEBUG_DISAS
8047 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
8048 int disas_flags;
8049 qemu_log("----------------\n");
8050 qemu_log("IN: %s\n", lookup_symbol(pc_start));
8051 #ifdef TARGET_X86_64
8052 if (dc->code64)
8053 disas_flags = 2;
8054 else
8055 #endif
8056 disas_flags = !dc->code32;
8057 log_target_disas(cs, pc_start, pc_ptr - pc_start, disas_flags);
8058 qemu_log("\n");
8060 #endif
8062 tb->size = pc_ptr - pc_start;
8063 tb->icount = num_insns;
8066 void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb,
8067 target_ulong *data)
8069 int cc_op = data[1];
8070 env->eip = data[0] - tb->cs_base;
8071 if (cc_op != CC_OP_DYNAMIC) {
8072 env->cc_op = cc_op;