configure: qemu-ga: move MSI installer probe after qga probe
[qemu.git] / target-i386 / translate.c
blob82e2245bfdb9de30bc9f66f3132199d21b3cfa52
1 /*
2 * i386 translation
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdarg.h>
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <string.h>
23 #include <inttypes.h>
24 #include <signal.h>
26 #include "qemu/host-utils.h"
27 #include "cpu.h"
28 #include "disas/disas.h"
29 #include "tcg-op.h"
30 #include "exec/cpu_ldst.h"
32 #include "exec/helper-proto.h"
33 #include "exec/helper-gen.h"
35 #include "trace-tcg.h"
38 #define PREFIX_REPZ 0x01
39 #define PREFIX_REPNZ 0x02
40 #define PREFIX_LOCK 0x04
41 #define PREFIX_DATA 0x08
42 #define PREFIX_ADR 0x10
43 #define PREFIX_VEX 0x20
45 #ifdef TARGET_X86_64
46 #define CODE64(s) ((s)->code64)
47 #define REX_X(s) ((s)->rex_x)
48 #define REX_B(s) ((s)->rex_b)
49 #else
50 #define CODE64(s) 0
51 #define REX_X(s) 0
52 #define REX_B(s) 0
53 #endif
55 #ifdef TARGET_X86_64
56 # define ctztl ctz64
57 # define clztl clz64
58 #else
59 # define ctztl ctz32
60 # define clztl clz32
61 #endif
63 //#define MACRO_TEST 1
65 /* global register indexes */
66 static TCGv_ptr cpu_env;
67 static TCGv cpu_A0;
68 static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2, cpu_cc_srcT;
69 static TCGv_i32 cpu_cc_op;
70 static TCGv cpu_regs[CPU_NB_REGS];
71 /* local temps */
72 static TCGv cpu_T[2];
73 /* local register indexes (only used inside old micro ops) */
74 static TCGv cpu_tmp0, cpu_tmp4;
75 static TCGv_ptr cpu_ptr0, cpu_ptr1;
76 static TCGv_i32 cpu_tmp2_i32, cpu_tmp3_i32;
77 static TCGv_i64 cpu_tmp1_i64;
79 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
81 #include "exec/gen-icount.h"
83 #ifdef TARGET_X86_64
84 static int x86_64_hregs;
85 #endif
87 typedef struct DisasContext {
88 /* current insn context */
89 int override; /* -1 if no override */
90 int prefix;
91 TCGMemOp aflag;
92 TCGMemOp dflag;
93 target_ulong pc; /* pc = eip + cs_base */
94 int is_jmp; /* 1 = means jump (stop translation), 2 means CPU
95 static state change (stop translation) */
96 /* current block context */
97 target_ulong cs_base; /* base of CS segment */
98 int pe; /* protected mode */
99 int code32; /* 32 bit code segment */
100 #ifdef TARGET_X86_64
101 int lma; /* long mode active */
102 int code64; /* 64 bit code segment */
103 int rex_x, rex_b;
104 #endif
105 int vex_l; /* vex vector length */
106 int vex_v; /* vex vvvv register, without 1's compliment. */
107 int ss32; /* 32 bit stack segment */
108 CCOp cc_op; /* current CC operation */
109 bool cc_op_dirty;
110 int addseg; /* non zero if either DS/ES/SS have a non zero base */
111 int f_st; /* currently unused */
112 int vm86; /* vm86 mode */
113 int cpl;
114 int iopl;
115 int tf; /* TF cpu flag */
116 int singlestep_enabled; /* "hardware" single step enabled */
117 int jmp_opt; /* use direct block chaining for direct jumps */
118 int repz_opt; /* optimize jumps within repz instructions */
119 int mem_index; /* select memory access functions */
120 uint64_t flags; /* all execution flags */
121 struct TranslationBlock *tb;
122 int popl_esp_hack; /* for correct popl with esp base handling */
123 int rip_offset; /* only used in x86_64, but left for simplicity */
124 int cpuid_features;
125 int cpuid_ext_features;
126 int cpuid_ext2_features;
127 int cpuid_ext3_features;
128 int cpuid_7_0_ebx_features;
129 } DisasContext;
131 static void gen_eob(DisasContext *s);
132 static void gen_jmp(DisasContext *s, target_ulong eip);
133 static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num);
134 static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d);
136 /* i386 arith/logic operations */
137 enum {
138 OP_ADDL,
139 OP_ORL,
140 OP_ADCL,
141 OP_SBBL,
142 OP_ANDL,
143 OP_SUBL,
144 OP_XORL,
145 OP_CMPL,
148 /* i386 shift ops */
149 enum {
150 OP_ROL,
151 OP_ROR,
152 OP_RCL,
153 OP_RCR,
154 OP_SHL,
155 OP_SHR,
156 OP_SHL1, /* undocumented */
157 OP_SAR = 7,
160 enum {
161 JCC_O,
162 JCC_B,
163 JCC_Z,
164 JCC_BE,
165 JCC_S,
166 JCC_P,
167 JCC_L,
168 JCC_LE,
171 enum {
172 /* I386 int registers */
173 OR_EAX, /* MUST be even numbered */
174 OR_ECX,
175 OR_EDX,
176 OR_EBX,
177 OR_ESP,
178 OR_EBP,
179 OR_ESI,
180 OR_EDI,
182 OR_TMP0 = 16, /* temporary operand register */
183 OR_TMP1,
184 OR_A0, /* temporary register used when doing address evaluation */
187 enum {
188 USES_CC_DST = 1,
189 USES_CC_SRC = 2,
190 USES_CC_SRC2 = 4,
191 USES_CC_SRCT = 8,
194 /* Bit set if the global variable is live after setting CC_OP to X. */
195 static const uint8_t cc_op_live[CC_OP_NB] = {
196 [CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
197 [CC_OP_EFLAGS] = USES_CC_SRC,
198 [CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC,
199 [CC_OP_ADDB ... CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC,
200 [CC_OP_ADCB ... CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
201 [CC_OP_SUBB ... CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT,
202 [CC_OP_SBBB ... CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
203 [CC_OP_LOGICB ... CC_OP_LOGICQ] = USES_CC_DST,
204 [CC_OP_INCB ... CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC,
205 [CC_OP_DECB ... CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC,
206 [CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC,
207 [CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC,
208 [CC_OP_BMILGB ... CC_OP_BMILGQ] = USES_CC_DST | USES_CC_SRC,
209 [CC_OP_ADCX] = USES_CC_DST | USES_CC_SRC,
210 [CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2,
211 [CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
212 [CC_OP_CLR] = 0,
215 static void set_cc_op(DisasContext *s, CCOp op)
217 int dead;
219 if (s->cc_op == op) {
220 return;
223 /* Discard CC computation that will no longer be used. */
224 dead = cc_op_live[s->cc_op] & ~cc_op_live[op];
225 if (dead & USES_CC_DST) {
226 tcg_gen_discard_tl(cpu_cc_dst);
228 if (dead & USES_CC_SRC) {
229 tcg_gen_discard_tl(cpu_cc_src);
231 if (dead & USES_CC_SRC2) {
232 tcg_gen_discard_tl(cpu_cc_src2);
234 if (dead & USES_CC_SRCT) {
235 tcg_gen_discard_tl(cpu_cc_srcT);
238 if (op == CC_OP_DYNAMIC) {
239 /* The DYNAMIC setting is translator only, and should never be
240 stored. Thus we always consider it clean. */
241 s->cc_op_dirty = false;
242 } else {
243 /* Discard any computed CC_OP value (see shifts). */
244 if (s->cc_op == CC_OP_DYNAMIC) {
245 tcg_gen_discard_i32(cpu_cc_op);
247 s->cc_op_dirty = true;
249 s->cc_op = op;
252 static void gen_update_cc_op(DisasContext *s)
254 if (s->cc_op_dirty) {
255 tcg_gen_movi_i32(cpu_cc_op, s->cc_op);
256 s->cc_op_dirty = false;
260 #ifdef TARGET_X86_64
262 #define NB_OP_SIZES 4
264 #else /* !TARGET_X86_64 */
266 #define NB_OP_SIZES 3
268 #endif /* !TARGET_X86_64 */
270 #if defined(HOST_WORDS_BIGENDIAN)
271 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
272 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
273 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
274 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
275 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
276 #else
277 #define REG_B_OFFSET 0
278 #define REG_H_OFFSET 1
279 #define REG_W_OFFSET 0
280 #define REG_L_OFFSET 0
281 #define REG_LH_OFFSET 4
282 #endif
284 /* In instruction encodings for byte register accesses the
285 * register number usually indicates "low 8 bits of register N";
286 * however there are some special cases where N 4..7 indicates
287 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
288 * true for this special case, false otherwise.
290 static inline bool byte_reg_is_xH(int reg)
292 if (reg < 4) {
293 return false;
295 #ifdef TARGET_X86_64
296 if (reg >= 8 || x86_64_hregs) {
297 return false;
299 #endif
300 return true;
303 /* Select the size of a push/pop operation. */
304 static inline TCGMemOp mo_pushpop(DisasContext *s, TCGMemOp ot)
306 if (CODE64(s)) {
307 return ot == MO_16 ? MO_16 : MO_64;
308 } else {
309 return ot;
313 /* Select only size 64 else 32. Used for SSE operand sizes. */
314 static inline TCGMemOp mo_64_32(TCGMemOp ot)
316 #ifdef TARGET_X86_64
317 return ot == MO_64 ? MO_64 : MO_32;
318 #else
319 return MO_32;
320 #endif
323 /* Select size 8 if lsb of B is clear, else OT. Used for decoding
324 byte vs word opcodes. */
325 static inline TCGMemOp mo_b_d(int b, TCGMemOp ot)
327 return b & 1 ? ot : MO_8;
330 /* Select size 8 if lsb of B is clear, else OT capped at 32.
331 Used for decoding operand size of port opcodes. */
332 static inline TCGMemOp mo_b_d32(int b, TCGMemOp ot)
334 return b & 1 ? (ot == MO_16 ? MO_16 : MO_32) : MO_8;
337 static void gen_op_mov_reg_v(TCGMemOp ot, int reg, TCGv t0)
339 switch(ot) {
340 case MO_8:
341 if (!byte_reg_is_xH(reg)) {
342 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 8);
343 } else {
344 tcg_gen_deposit_tl(cpu_regs[reg - 4], cpu_regs[reg - 4], t0, 8, 8);
346 break;
347 case MO_16:
348 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 16);
349 break;
350 case MO_32:
351 /* For x86_64, this sets the higher half of register to zero.
352 For i386, this is equivalent to a mov. */
353 tcg_gen_ext32u_tl(cpu_regs[reg], t0);
354 break;
355 #ifdef TARGET_X86_64
356 case MO_64:
357 tcg_gen_mov_tl(cpu_regs[reg], t0);
358 break;
359 #endif
360 default:
361 tcg_abort();
365 static inline void gen_op_mov_v_reg(TCGMemOp ot, TCGv t0, int reg)
367 if (ot == MO_8 && byte_reg_is_xH(reg)) {
368 tcg_gen_shri_tl(t0, cpu_regs[reg - 4], 8);
369 tcg_gen_ext8u_tl(t0, t0);
370 } else {
371 tcg_gen_mov_tl(t0, cpu_regs[reg]);
375 static inline void gen_op_movl_A0_reg(int reg)
377 tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
380 static inline void gen_op_addl_A0_im(int32_t val)
382 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
383 #ifdef TARGET_X86_64
384 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
385 #endif
388 #ifdef TARGET_X86_64
389 static inline void gen_op_addq_A0_im(int64_t val)
391 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
393 #endif
395 static void gen_add_A0_im(DisasContext *s, int val)
397 #ifdef TARGET_X86_64
398 if (CODE64(s))
399 gen_op_addq_A0_im(val);
400 else
401 #endif
402 gen_op_addl_A0_im(val);
405 static inline void gen_op_jmp_v(TCGv dest)
407 tcg_gen_st_tl(dest, cpu_env, offsetof(CPUX86State, eip));
410 static inline void gen_op_add_reg_im(TCGMemOp size, int reg, int32_t val)
412 tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val);
413 gen_op_mov_reg_v(size, reg, cpu_tmp0);
416 static inline void gen_op_add_reg_T0(TCGMemOp size, int reg)
418 tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T[0]);
419 gen_op_mov_reg_v(size, reg, cpu_tmp0);
422 static inline void gen_op_addl_A0_reg_sN(int shift, int reg)
424 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
425 if (shift != 0)
426 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
427 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
428 /* For x86_64, this sets the higher half of register to zero.
429 For i386, this is equivalent to a nop. */
430 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
433 static inline void gen_op_movl_A0_seg(int reg)
435 tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base) + REG_L_OFFSET);
438 static inline void gen_op_addl_A0_seg(DisasContext *s, int reg)
440 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base));
441 #ifdef TARGET_X86_64
442 if (CODE64(s)) {
443 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
444 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
445 } else {
446 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
447 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
449 #else
450 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
451 #endif
454 #ifdef TARGET_X86_64
455 static inline void gen_op_movq_A0_seg(int reg)
457 tcg_gen_ld_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base));
460 static inline void gen_op_addq_A0_seg(int reg)
462 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base));
463 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
466 static inline void gen_op_movq_A0_reg(int reg)
468 tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
471 static inline void gen_op_addq_A0_reg_sN(int shift, int reg)
473 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
474 if (shift != 0)
475 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
476 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
478 #endif
480 static inline void gen_op_ld_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
482 tcg_gen_qemu_ld_tl(t0, a0, s->mem_index, idx | MO_LE);
485 static inline void gen_op_st_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
487 tcg_gen_qemu_st_tl(t0, a0, s->mem_index, idx | MO_LE);
490 static inline void gen_op_st_rm_T0_A0(DisasContext *s, int idx, int d)
492 if (d == OR_TMP0) {
493 gen_op_st_v(s, idx, cpu_T[0], cpu_A0);
494 } else {
495 gen_op_mov_reg_v(idx, d, cpu_T[0]);
499 static inline void gen_jmp_im(target_ulong pc)
501 tcg_gen_movi_tl(cpu_tmp0, pc);
502 gen_op_jmp_v(cpu_tmp0);
505 static inline void gen_string_movl_A0_ESI(DisasContext *s)
507 int override;
509 override = s->override;
510 switch (s->aflag) {
511 #ifdef TARGET_X86_64
512 case MO_64:
513 if (override >= 0) {
514 gen_op_movq_A0_seg(override);
515 gen_op_addq_A0_reg_sN(0, R_ESI);
516 } else {
517 gen_op_movq_A0_reg(R_ESI);
519 break;
520 #endif
521 case MO_32:
522 /* 32 bit address */
523 if (s->addseg && override < 0)
524 override = R_DS;
525 if (override >= 0) {
526 gen_op_movl_A0_seg(override);
527 gen_op_addl_A0_reg_sN(0, R_ESI);
528 } else {
529 gen_op_movl_A0_reg(R_ESI);
531 break;
532 case MO_16:
533 /* 16 address, always override */
534 if (override < 0)
535 override = R_DS;
536 tcg_gen_ext16u_tl(cpu_A0, cpu_regs[R_ESI]);
537 gen_op_addl_A0_seg(s, override);
538 break;
539 default:
540 tcg_abort();
544 static inline void gen_string_movl_A0_EDI(DisasContext *s)
546 switch (s->aflag) {
547 #ifdef TARGET_X86_64
548 case MO_64:
549 gen_op_movq_A0_reg(R_EDI);
550 break;
551 #endif
552 case MO_32:
553 if (s->addseg) {
554 gen_op_movl_A0_seg(R_ES);
555 gen_op_addl_A0_reg_sN(0, R_EDI);
556 } else {
557 gen_op_movl_A0_reg(R_EDI);
559 break;
560 case MO_16:
561 tcg_gen_ext16u_tl(cpu_A0, cpu_regs[R_EDI]);
562 gen_op_addl_A0_seg(s, R_ES);
563 break;
564 default:
565 tcg_abort();
569 static inline void gen_op_movl_T0_Dshift(TCGMemOp ot)
571 tcg_gen_ld32s_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, df));
572 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], ot);
575 static TCGv gen_ext_tl(TCGv dst, TCGv src, TCGMemOp size, bool sign)
577 switch (size) {
578 case MO_8:
579 if (sign) {
580 tcg_gen_ext8s_tl(dst, src);
581 } else {
582 tcg_gen_ext8u_tl(dst, src);
584 return dst;
585 case MO_16:
586 if (sign) {
587 tcg_gen_ext16s_tl(dst, src);
588 } else {
589 tcg_gen_ext16u_tl(dst, src);
591 return dst;
592 #ifdef TARGET_X86_64
593 case MO_32:
594 if (sign) {
595 tcg_gen_ext32s_tl(dst, src);
596 } else {
597 tcg_gen_ext32u_tl(dst, src);
599 return dst;
600 #endif
601 default:
602 return src;
606 static void gen_extu(TCGMemOp ot, TCGv reg)
608 gen_ext_tl(reg, reg, ot, false);
611 static void gen_exts(TCGMemOp ot, TCGv reg)
613 gen_ext_tl(reg, reg, ot, true);
616 static inline void gen_op_jnz_ecx(TCGMemOp size, TCGLabel *label1)
618 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
619 gen_extu(size, cpu_tmp0);
620 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, label1);
623 static inline void gen_op_jz_ecx(TCGMemOp size, TCGLabel *label1)
625 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
626 gen_extu(size, cpu_tmp0);
627 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
630 static void gen_helper_in_func(TCGMemOp ot, TCGv v, TCGv_i32 n)
632 switch (ot) {
633 case MO_8:
634 gen_helper_inb(v, cpu_env, n);
635 break;
636 case MO_16:
637 gen_helper_inw(v, cpu_env, n);
638 break;
639 case MO_32:
640 gen_helper_inl(v, cpu_env, n);
641 break;
642 default:
643 tcg_abort();
647 static void gen_helper_out_func(TCGMemOp ot, TCGv_i32 v, TCGv_i32 n)
649 switch (ot) {
650 case MO_8:
651 gen_helper_outb(cpu_env, v, n);
652 break;
653 case MO_16:
654 gen_helper_outw(cpu_env, v, n);
655 break;
656 case MO_32:
657 gen_helper_outl(cpu_env, v, n);
658 break;
659 default:
660 tcg_abort();
664 static void gen_check_io(DisasContext *s, TCGMemOp ot, target_ulong cur_eip,
665 uint32_t svm_flags)
667 int state_saved;
668 target_ulong next_eip;
670 state_saved = 0;
671 if (s->pe && (s->cpl > s->iopl || s->vm86)) {
672 gen_update_cc_op(s);
673 gen_jmp_im(cur_eip);
674 state_saved = 1;
675 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
676 switch (ot) {
677 case MO_8:
678 gen_helper_check_iob(cpu_env, cpu_tmp2_i32);
679 break;
680 case MO_16:
681 gen_helper_check_iow(cpu_env, cpu_tmp2_i32);
682 break;
683 case MO_32:
684 gen_helper_check_iol(cpu_env, cpu_tmp2_i32);
685 break;
686 default:
687 tcg_abort();
690 if(s->flags & HF_SVMI_MASK) {
691 if (!state_saved) {
692 gen_update_cc_op(s);
693 gen_jmp_im(cur_eip);
695 svm_flags |= (1 << (4 + ot));
696 next_eip = s->pc - s->cs_base;
697 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
698 gen_helper_svm_check_io(cpu_env, cpu_tmp2_i32,
699 tcg_const_i32(svm_flags),
700 tcg_const_i32(next_eip - cur_eip));
704 static inline void gen_movs(DisasContext *s, TCGMemOp ot)
706 gen_string_movl_A0_ESI(s);
707 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
708 gen_string_movl_A0_EDI(s);
709 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
710 gen_op_movl_T0_Dshift(ot);
711 gen_op_add_reg_T0(s->aflag, R_ESI);
712 gen_op_add_reg_T0(s->aflag, R_EDI);
715 static void gen_op_update1_cc(void)
717 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
720 static void gen_op_update2_cc(void)
722 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
723 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
726 static void gen_op_update3_cc(TCGv reg)
728 tcg_gen_mov_tl(cpu_cc_src2, reg);
729 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
730 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
733 static inline void gen_op_testl_T0_T1_cc(void)
735 tcg_gen_and_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
738 static void gen_op_update_neg_cc(void)
740 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
741 tcg_gen_neg_tl(cpu_cc_src, cpu_T[0]);
742 tcg_gen_movi_tl(cpu_cc_srcT, 0);
745 /* compute all eflags to cc_src */
746 static void gen_compute_eflags(DisasContext *s)
748 TCGv zero, dst, src1, src2;
749 int live, dead;
751 if (s->cc_op == CC_OP_EFLAGS) {
752 return;
754 if (s->cc_op == CC_OP_CLR) {
755 tcg_gen_movi_tl(cpu_cc_src, CC_Z | CC_P);
756 set_cc_op(s, CC_OP_EFLAGS);
757 return;
760 TCGV_UNUSED(zero);
761 dst = cpu_cc_dst;
762 src1 = cpu_cc_src;
763 src2 = cpu_cc_src2;
765 /* Take care to not read values that are not live. */
766 live = cc_op_live[s->cc_op] & ~USES_CC_SRCT;
767 dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2);
768 if (dead) {
769 zero = tcg_const_tl(0);
770 if (dead & USES_CC_DST) {
771 dst = zero;
773 if (dead & USES_CC_SRC) {
774 src1 = zero;
776 if (dead & USES_CC_SRC2) {
777 src2 = zero;
781 gen_update_cc_op(s);
782 gen_helper_cc_compute_all(cpu_cc_src, dst, src1, src2, cpu_cc_op);
783 set_cc_op(s, CC_OP_EFLAGS);
785 if (dead) {
786 tcg_temp_free(zero);
790 typedef struct CCPrepare {
791 TCGCond cond;
792 TCGv reg;
793 TCGv reg2;
794 target_ulong imm;
795 target_ulong mask;
796 bool use_reg2;
797 bool no_setcond;
798 } CCPrepare;
800 /* compute eflags.C to reg */
801 static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
803 TCGv t0, t1;
804 int size, shift;
806 switch (s->cc_op) {
807 case CC_OP_SUBB ... CC_OP_SUBQ:
808 /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
809 size = s->cc_op - CC_OP_SUBB;
810 t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
811 /* If no temporary was used, be careful not to alias t1 and t0. */
812 t0 = TCGV_EQUAL(t1, cpu_cc_src) ? cpu_tmp0 : reg;
813 tcg_gen_mov_tl(t0, cpu_cc_srcT);
814 gen_extu(size, t0);
815 goto add_sub;
817 case CC_OP_ADDB ... CC_OP_ADDQ:
818 /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
819 size = s->cc_op - CC_OP_ADDB;
820 t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
821 t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
822 add_sub:
823 return (CCPrepare) { .cond = TCG_COND_LTU, .reg = t0,
824 .reg2 = t1, .mask = -1, .use_reg2 = true };
826 case CC_OP_LOGICB ... CC_OP_LOGICQ:
827 case CC_OP_CLR:
828 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
830 case CC_OP_INCB ... CC_OP_INCQ:
831 case CC_OP_DECB ... CC_OP_DECQ:
832 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
833 .mask = -1, .no_setcond = true };
835 case CC_OP_SHLB ... CC_OP_SHLQ:
836 /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
837 size = s->cc_op - CC_OP_SHLB;
838 shift = (8 << size) - 1;
839 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
840 .mask = (target_ulong)1 << shift };
842 case CC_OP_MULB ... CC_OP_MULQ:
843 return (CCPrepare) { .cond = TCG_COND_NE,
844 .reg = cpu_cc_src, .mask = -1 };
846 case CC_OP_BMILGB ... CC_OP_BMILGQ:
847 size = s->cc_op - CC_OP_BMILGB;
848 t0 = gen_ext_tl(reg, cpu_cc_src, size, false);
849 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
851 case CC_OP_ADCX:
852 case CC_OP_ADCOX:
853 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_dst,
854 .mask = -1, .no_setcond = true };
856 case CC_OP_EFLAGS:
857 case CC_OP_SARB ... CC_OP_SARQ:
858 /* CC_SRC & 1 */
859 return (CCPrepare) { .cond = TCG_COND_NE,
860 .reg = cpu_cc_src, .mask = CC_C };
862 default:
863 /* The need to compute only C from CC_OP_DYNAMIC is important
864 in efficiently implementing e.g. INC at the start of a TB. */
865 gen_update_cc_op(s);
866 gen_helper_cc_compute_c(reg, cpu_cc_dst, cpu_cc_src,
867 cpu_cc_src2, cpu_cc_op);
868 return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
869 .mask = -1, .no_setcond = true };
873 /* compute eflags.P to reg */
874 static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg)
876 gen_compute_eflags(s);
877 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
878 .mask = CC_P };
881 /* compute eflags.S to reg */
882 static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg)
884 switch (s->cc_op) {
885 case CC_OP_DYNAMIC:
886 gen_compute_eflags(s);
887 /* FALLTHRU */
888 case CC_OP_EFLAGS:
889 case CC_OP_ADCX:
890 case CC_OP_ADOX:
891 case CC_OP_ADCOX:
892 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
893 .mask = CC_S };
894 case CC_OP_CLR:
895 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
896 default:
898 TCGMemOp size = (s->cc_op - CC_OP_ADDB) & 3;
899 TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, true);
900 return (CCPrepare) { .cond = TCG_COND_LT, .reg = t0, .mask = -1 };
905 /* compute eflags.O to reg */
906 static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
908 switch (s->cc_op) {
909 case CC_OP_ADOX:
910 case CC_OP_ADCOX:
911 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src2,
912 .mask = -1, .no_setcond = true };
913 case CC_OP_CLR:
914 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
915 default:
916 gen_compute_eflags(s);
917 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
918 .mask = CC_O };
922 /* compute eflags.Z to reg */
923 static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
925 switch (s->cc_op) {
926 case CC_OP_DYNAMIC:
927 gen_compute_eflags(s);
928 /* FALLTHRU */
929 case CC_OP_EFLAGS:
930 case CC_OP_ADCX:
931 case CC_OP_ADOX:
932 case CC_OP_ADCOX:
933 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
934 .mask = CC_Z };
935 case CC_OP_CLR:
936 return (CCPrepare) { .cond = TCG_COND_ALWAYS, .mask = -1 };
937 default:
939 TCGMemOp size = (s->cc_op - CC_OP_ADDB) & 3;
940 TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
941 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
946 /* perform a conditional store into register 'reg' according to jump opcode
947 value 'b'. In the fast case, T0 is guaranted not to be used. */
948 static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
950 int inv, jcc_op, cond;
951 TCGMemOp size;
952 CCPrepare cc;
953 TCGv t0;
955 inv = b & 1;
956 jcc_op = (b >> 1) & 7;
958 switch (s->cc_op) {
959 case CC_OP_SUBB ... CC_OP_SUBQ:
960 /* We optimize relational operators for the cmp/jcc case. */
961 size = s->cc_op - CC_OP_SUBB;
962 switch (jcc_op) {
963 case JCC_BE:
964 tcg_gen_mov_tl(cpu_tmp4, cpu_cc_srcT);
965 gen_extu(size, cpu_tmp4);
966 t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
967 cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = cpu_tmp4,
968 .reg2 = t0, .mask = -1, .use_reg2 = true };
969 break;
971 case JCC_L:
972 cond = TCG_COND_LT;
973 goto fast_jcc_l;
974 case JCC_LE:
975 cond = TCG_COND_LE;
976 fast_jcc_l:
977 tcg_gen_mov_tl(cpu_tmp4, cpu_cc_srcT);
978 gen_exts(size, cpu_tmp4);
979 t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, true);
980 cc = (CCPrepare) { .cond = cond, .reg = cpu_tmp4,
981 .reg2 = t0, .mask = -1, .use_reg2 = true };
982 break;
984 default:
985 goto slow_jcc;
987 break;
989 default:
990 slow_jcc:
991 /* This actually generates good code for JC, JZ and JS. */
992 switch (jcc_op) {
993 case JCC_O:
994 cc = gen_prepare_eflags_o(s, reg);
995 break;
996 case JCC_B:
997 cc = gen_prepare_eflags_c(s, reg);
998 break;
999 case JCC_Z:
1000 cc = gen_prepare_eflags_z(s, reg);
1001 break;
1002 case JCC_BE:
1003 gen_compute_eflags(s);
1004 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1005 .mask = CC_Z | CC_C };
1006 break;
1007 case JCC_S:
1008 cc = gen_prepare_eflags_s(s, reg);
1009 break;
1010 case JCC_P:
1011 cc = gen_prepare_eflags_p(s, reg);
1012 break;
1013 case JCC_L:
1014 gen_compute_eflags(s);
1015 if (TCGV_EQUAL(reg, cpu_cc_src)) {
1016 reg = cpu_tmp0;
1018 tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
1019 tcg_gen_xor_tl(reg, reg, cpu_cc_src);
1020 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1021 .mask = CC_S };
1022 break;
1023 default:
1024 case JCC_LE:
1025 gen_compute_eflags(s);
1026 if (TCGV_EQUAL(reg, cpu_cc_src)) {
1027 reg = cpu_tmp0;
1029 tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
1030 tcg_gen_xor_tl(reg, reg, cpu_cc_src);
1031 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1032 .mask = CC_S | CC_Z };
1033 break;
1035 break;
1038 if (inv) {
1039 cc.cond = tcg_invert_cond(cc.cond);
1041 return cc;
1044 static void gen_setcc1(DisasContext *s, int b, TCGv reg)
1046 CCPrepare cc = gen_prepare_cc(s, b, reg);
1048 if (cc.no_setcond) {
1049 if (cc.cond == TCG_COND_EQ) {
1050 tcg_gen_xori_tl(reg, cc.reg, 1);
1051 } else {
1052 tcg_gen_mov_tl(reg, cc.reg);
1054 return;
1057 if (cc.cond == TCG_COND_NE && !cc.use_reg2 && cc.imm == 0 &&
1058 cc.mask != 0 && (cc.mask & (cc.mask - 1)) == 0) {
1059 tcg_gen_shri_tl(reg, cc.reg, ctztl(cc.mask));
1060 tcg_gen_andi_tl(reg, reg, 1);
1061 return;
1063 if (cc.mask != -1) {
1064 tcg_gen_andi_tl(reg, cc.reg, cc.mask);
1065 cc.reg = reg;
1067 if (cc.use_reg2) {
1068 tcg_gen_setcond_tl(cc.cond, reg, cc.reg, cc.reg2);
1069 } else {
1070 tcg_gen_setcondi_tl(cc.cond, reg, cc.reg, cc.imm);
1074 static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg)
1076 gen_setcc1(s, JCC_B << 1, reg);
1079 /* generate a conditional jump to label 'l1' according to jump opcode
1080 value 'b'. In the fast case, T0 is guaranted not to be used. */
1081 static inline void gen_jcc1_noeob(DisasContext *s, int b, TCGLabel *l1)
1083 CCPrepare cc = gen_prepare_cc(s, b, cpu_T[0]);
1085 if (cc.mask != -1) {
1086 tcg_gen_andi_tl(cpu_T[0], cc.reg, cc.mask);
1087 cc.reg = cpu_T[0];
1089 if (cc.use_reg2) {
1090 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1091 } else {
1092 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1096 /* Generate a conditional jump to label 'l1' according to jump opcode
1097 value 'b'. In the fast case, T0 is guaranted not to be used.
1098 A translation block must end soon. */
1099 static inline void gen_jcc1(DisasContext *s, int b, TCGLabel *l1)
1101 CCPrepare cc = gen_prepare_cc(s, b, cpu_T[0]);
1103 gen_update_cc_op(s);
1104 if (cc.mask != -1) {
1105 tcg_gen_andi_tl(cpu_T[0], cc.reg, cc.mask);
1106 cc.reg = cpu_T[0];
1108 set_cc_op(s, CC_OP_DYNAMIC);
1109 if (cc.use_reg2) {
1110 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1111 } else {
1112 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1116 /* XXX: does not work with gdbstub "ice" single step - not a
1117 serious problem */
1118 static TCGLabel *gen_jz_ecx_string(DisasContext *s, target_ulong next_eip)
1120 TCGLabel *l1 = gen_new_label();
1121 TCGLabel *l2 = gen_new_label();
1122 gen_op_jnz_ecx(s->aflag, l1);
1123 gen_set_label(l2);
1124 gen_jmp_tb(s, next_eip, 1);
1125 gen_set_label(l1);
1126 return l2;
1129 static inline void gen_stos(DisasContext *s, TCGMemOp ot)
1131 gen_op_mov_v_reg(MO_32, cpu_T[0], R_EAX);
1132 gen_string_movl_A0_EDI(s);
1133 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
1134 gen_op_movl_T0_Dshift(ot);
1135 gen_op_add_reg_T0(s->aflag, R_EDI);
1138 static inline void gen_lods(DisasContext *s, TCGMemOp ot)
1140 gen_string_movl_A0_ESI(s);
1141 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1142 gen_op_mov_reg_v(ot, R_EAX, cpu_T[0]);
1143 gen_op_movl_T0_Dshift(ot);
1144 gen_op_add_reg_T0(s->aflag, R_ESI);
1147 static inline void gen_scas(DisasContext *s, TCGMemOp ot)
1149 gen_string_movl_A0_EDI(s);
1150 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
1151 gen_op(s, OP_CMPL, ot, R_EAX);
1152 gen_op_movl_T0_Dshift(ot);
1153 gen_op_add_reg_T0(s->aflag, R_EDI);
1156 static inline void gen_cmps(DisasContext *s, TCGMemOp ot)
1158 gen_string_movl_A0_EDI(s);
1159 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
1160 gen_string_movl_A0_ESI(s);
1161 gen_op(s, OP_CMPL, ot, OR_TMP0);
1162 gen_op_movl_T0_Dshift(ot);
1163 gen_op_add_reg_T0(s->aflag, R_ESI);
1164 gen_op_add_reg_T0(s->aflag, R_EDI);
1167 static inline void gen_ins(DisasContext *s, TCGMemOp ot)
1169 if (s->tb->cflags & CF_USE_ICOUNT) {
1170 gen_io_start();
1172 gen_string_movl_A0_EDI(s);
1173 /* Note: we must do this dummy write first to be restartable in
1174 case of page fault. */
1175 tcg_gen_movi_tl(cpu_T[0], 0);
1176 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
1177 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_EDX]);
1178 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1179 gen_helper_in_func(ot, cpu_T[0], cpu_tmp2_i32);
1180 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
1181 gen_op_movl_T0_Dshift(ot);
1182 gen_op_add_reg_T0(s->aflag, R_EDI);
1183 if (s->tb->cflags & CF_USE_ICOUNT) {
1184 gen_io_end();
1188 static inline void gen_outs(DisasContext *s, TCGMemOp ot)
1190 if (s->tb->cflags & CF_USE_ICOUNT) {
1191 gen_io_start();
1193 gen_string_movl_A0_ESI(s);
1194 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1196 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_EDX]);
1197 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1198 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[0]);
1199 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
1201 gen_op_movl_T0_Dshift(ot);
1202 gen_op_add_reg_T0(s->aflag, R_ESI);
1203 if (s->tb->cflags & CF_USE_ICOUNT) {
1204 gen_io_end();
1208 /* same method as Valgrind : we generate jumps to current or next
1209 instruction */
1210 #define GEN_REPZ(op) \
1211 static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \
1212 target_ulong cur_eip, target_ulong next_eip) \
1214 TCGLabel *l2; \
1215 gen_update_cc_op(s); \
1216 l2 = gen_jz_ecx_string(s, next_eip); \
1217 gen_ ## op(s, ot); \
1218 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1219 /* a loop would cause two single step exceptions if ECX = 1 \
1220 before rep string_insn */ \
1221 if (s->repz_opt) \
1222 gen_op_jz_ecx(s->aflag, l2); \
1223 gen_jmp(s, cur_eip); \
1226 #define GEN_REPZ2(op) \
1227 static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \
1228 target_ulong cur_eip, \
1229 target_ulong next_eip, \
1230 int nz) \
1232 TCGLabel *l2; \
1233 gen_update_cc_op(s); \
1234 l2 = gen_jz_ecx_string(s, next_eip); \
1235 gen_ ## op(s, ot); \
1236 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1237 gen_update_cc_op(s); \
1238 gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2); \
1239 if (s->repz_opt) \
1240 gen_op_jz_ecx(s->aflag, l2); \
1241 gen_jmp(s, cur_eip); \
1244 GEN_REPZ(movs)
1245 GEN_REPZ(stos)
1246 GEN_REPZ(lods)
1247 GEN_REPZ(ins)
1248 GEN_REPZ(outs)
1249 GEN_REPZ2(scas)
1250 GEN_REPZ2(cmps)
1252 static void gen_helper_fp_arith_ST0_FT0(int op)
1254 switch (op) {
1255 case 0:
1256 gen_helper_fadd_ST0_FT0(cpu_env);
1257 break;
1258 case 1:
1259 gen_helper_fmul_ST0_FT0(cpu_env);
1260 break;
1261 case 2:
1262 gen_helper_fcom_ST0_FT0(cpu_env);
1263 break;
1264 case 3:
1265 gen_helper_fcom_ST0_FT0(cpu_env);
1266 break;
1267 case 4:
1268 gen_helper_fsub_ST0_FT0(cpu_env);
1269 break;
1270 case 5:
1271 gen_helper_fsubr_ST0_FT0(cpu_env);
1272 break;
1273 case 6:
1274 gen_helper_fdiv_ST0_FT0(cpu_env);
1275 break;
1276 case 7:
1277 gen_helper_fdivr_ST0_FT0(cpu_env);
1278 break;
1282 /* NOTE the exception in "r" op ordering */
1283 static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1285 TCGv_i32 tmp = tcg_const_i32(opreg);
1286 switch (op) {
1287 case 0:
1288 gen_helper_fadd_STN_ST0(cpu_env, tmp);
1289 break;
1290 case 1:
1291 gen_helper_fmul_STN_ST0(cpu_env, tmp);
1292 break;
1293 case 4:
1294 gen_helper_fsubr_STN_ST0(cpu_env, tmp);
1295 break;
1296 case 5:
1297 gen_helper_fsub_STN_ST0(cpu_env, tmp);
1298 break;
1299 case 6:
1300 gen_helper_fdivr_STN_ST0(cpu_env, tmp);
1301 break;
1302 case 7:
1303 gen_helper_fdiv_STN_ST0(cpu_env, tmp);
1304 break;
1308 /* if d == OR_TMP0, it means memory operand (address in A0) */
1309 static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d)
1311 if (d != OR_TMP0) {
1312 gen_op_mov_v_reg(ot, cpu_T[0], d);
1313 } else {
1314 gen_op_ld_v(s1, ot, cpu_T[0], cpu_A0);
1316 switch(op) {
1317 case OP_ADCL:
1318 gen_compute_eflags_c(s1, cpu_tmp4);
1319 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1320 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1321 gen_op_st_rm_T0_A0(s1, ot, d);
1322 gen_op_update3_cc(cpu_tmp4);
1323 set_cc_op(s1, CC_OP_ADCB + ot);
1324 break;
1325 case OP_SBBL:
1326 gen_compute_eflags_c(s1, cpu_tmp4);
1327 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1328 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1329 gen_op_st_rm_T0_A0(s1, ot, d);
1330 gen_op_update3_cc(cpu_tmp4);
1331 set_cc_op(s1, CC_OP_SBBB + ot);
1332 break;
1333 case OP_ADDL:
1334 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1335 gen_op_st_rm_T0_A0(s1, ot, d);
1336 gen_op_update2_cc();
1337 set_cc_op(s1, CC_OP_ADDB + ot);
1338 break;
1339 case OP_SUBL:
1340 tcg_gen_mov_tl(cpu_cc_srcT, cpu_T[0]);
1341 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1342 gen_op_st_rm_T0_A0(s1, ot, d);
1343 gen_op_update2_cc();
1344 set_cc_op(s1, CC_OP_SUBB + ot);
1345 break;
1346 default:
1347 case OP_ANDL:
1348 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1349 gen_op_st_rm_T0_A0(s1, ot, d);
1350 gen_op_update1_cc();
1351 set_cc_op(s1, CC_OP_LOGICB + ot);
1352 break;
1353 case OP_ORL:
1354 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1355 gen_op_st_rm_T0_A0(s1, ot, d);
1356 gen_op_update1_cc();
1357 set_cc_op(s1, CC_OP_LOGICB + ot);
1358 break;
1359 case OP_XORL:
1360 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1361 gen_op_st_rm_T0_A0(s1, ot, d);
1362 gen_op_update1_cc();
1363 set_cc_op(s1, CC_OP_LOGICB + ot);
1364 break;
1365 case OP_CMPL:
1366 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
1367 tcg_gen_mov_tl(cpu_cc_srcT, cpu_T[0]);
1368 tcg_gen_sub_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
1369 set_cc_op(s1, CC_OP_SUBB + ot);
1370 break;
1374 /* if d == OR_TMP0, it means memory operand (address in A0) */
1375 static void gen_inc(DisasContext *s1, TCGMemOp ot, int d, int c)
1377 if (d != OR_TMP0) {
1378 gen_op_mov_v_reg(ot, cpu_T[0], d);
1379 } else {
1380 gen_op_ld_v(s1, ot, cpu_T[0], cpu_A0);
1382 gen_compute_eflags_c(s1, cpu_cc_src);
1383 if (c > 0) {
1384 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], 1);
1385 set_cc_op(s1, CC_OP_INCB + ot);
1386 } else {
1387 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], -1);
1388 set_cc_op(s1, CC_OP_DECB + ot);
1390 gen_op_st_rm_T0_A0(s1, ot, d);
1391 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1394 static void gen_shift_flags(DisasContext *s, TCGMemOp ot, TCGv result,
1395 TCGv shm1, TCGv count, bool is_right)
1397 TCGv_i32 z32, s32, oldop;
1398 TCGv z_tl;
1400 /* Store the results into the CC variables. If we know that the
1401 variable must be dead, store unconditionally. Otherwise we'll
1402 need to not disrupt the current contents. */
1403 z_tl = tcg_const_tl(0);
1404 if (cc_op_live[s->cc_op] & USES_CC_DST) {
1405 tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_dst, count, z_tl,
1406 result, cpu_cc_dst);
1407 } else {
1408 tcg_gen_mov_tl(cpu_cc_dst, result);
1410 if (cc_op_live[s->cc_op] & USES_CC_SRC) {
1411 tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_src, count, z_tl,
1412 shm1, cpu_cc_src);
1413 } else {
1414 tcg_gen_mov_tl(cpu_cc_src, shm1);
1416 tcg_temp_free(z_tl);
1418 /* Get the two potential CC_OP values into temporaries. */
1419 tcg_gen_movi_i32(cpu_tmp2_i32, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
1420 if (s->cc_op == CC_OP_DYNAMIC) {
1421 oldop = cpu_cc_op;
1422 } else {
1423 tcg_gen_movi_i32(cpu_tmp3_i32, s->cc_op);
1424 oldop = cpu_tmp3_i32;
1427 /* Conditionally store the CC_OP value. */
1428 z32 = tcg_const_i32(0);
1429 s32 = tcg_temp_new_i32();
1430 tcg_gen_trunc_tl_i32(s32, count);
1431 tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, s32, z32, cpu_tmp2_i32, oldop);
1432 tcg_temp_free_i32(z32);
1433 tcg_temp_free_i32(s32);
1435 /* The CC_OP value is no longer predictable. */
1436 set_cc_op(s, CC_OP_DYNAMIC);
1439 static void gen_shift_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
1440 int is_right, int is_arith)
1442 target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
1444 /* load */
1445 if (op1 == OR_TMP0) {
1446 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1447 } else {
1448 gen_op_mov_v_reg(ot, cpu_T[0], op1);
1451 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], mask);
1452 tcg_gen_subi_tl(cpu_tmp0, cpu_T[1], 1);
1454 if (is_right) {
1455 if (is_arith) {
1456 gen_exts(ot, cpu_T[0]);
1457 tcg_gen_sar_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1458 tcg_gen_sar_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1459 } else {
1460 gen_extu(ot, cpu_T[0]);
1461 tcg_gen_shr_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1462 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1464 } else {
1465 tcg_gen_shl_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1466 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1469 /* store */
1470 gen_op_st_rm_T0_A0(s, ot, op1);
1472 gen_shift_flags(s, ot, cpu_T[0], cpu_tmp0, cpu_T[1], is_right);
1475 static void gen_shift_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2,
1476 int is_right, int is_arith)
1478 int mask = (ot == MO_64 ? 0x3f : 0x1f);
1480 /* load */
1481 if (op1 == OR_TMP0)
1482 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1483 else
1484 gen_op_mov_v_reg(ot, cpu_T[0], op1);
1486 op2 &= mask;
1487 if (op2 != 0) {
1488 if (is_right) {
1489 if (is_arith) {
1490 gen_exts(ot, cpu_T[0]);
1491 tcg_gen_sari_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1492 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], op2);
1493 } else {
1494 gen_extu(ot, cpu_T[0]);
1495 tcg_gen_shri_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1496 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], op2);
1498 } else {
1499 tcg_gen_shli_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1500 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], op2);
1504 /* store */
1505 gen_op_st_rm_T0_A0(s, ot, op1);
1507 /* update eflags if non zero shift */
1508 if (op2 != 0) {
1509 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
1510 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1511 set_cc_op(s, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
1515 static void gen_rot_rm_T1(DisasContext *s, TCGMemOp ot, int op1, int is_right)
1517 target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
1518 TCGv_i32 t0, t1;
1520 /* load */
1521 if (op1 == OR_TMP0) {
1522 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1523 } else {
1524 gen_op_mov_v_reg(ot, cpu_T[0], op1);
1527 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], mask);
1529 switch (ot) {
1530 case MO_8:
1531 /* Replicate the 8-bit input so that a 32-bit rotate works. */
1532 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
1533 tcg_gen_muli_tl(cpu_T[0], cpu_T[0], 0x01010101);
1534 goto do_long;
1535 case MO_16:
1536 /* Replicate the 16-bit input so that a 32-bit rotate works. */
1537 tcg_gen_deposit_tl(cpu_T[0], cpu_T[0], cpu_T[0], 16, 16);
1538 goto do_long;
1539 do_long:
1540 #ifdef TARGET_X86_64
1541 case MO_32:
1542 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
1543 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
1544 if (is_right) {
1545 tcg_gen_rotr_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
1546 } else {
1547 tcg_gen_rotl_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
1549 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
1550 break;
1551 #endif
1552 default:
1553 if (is_right) {
1554 tcg_gen_rotr_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1555 } else {
1556 tcg_gen_rotl_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1558 break;
1561 /* store */
1562 gen_op_st_rm_T0_A0(s, ot, op1);
1564 /* We'll need the flags computed into CC_SRC. */
1565 gen_compute_eflags(s);
1567 /* The value that was "rotated out" is now present at the other end
1568 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1569 since we've computed the flags into CC_SRC, these variables are
1570 currently dead. */
1571 if (is_right) {
1572 tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask - 1);
1573 tcg_gen_shri_tl(cpu_cc_dst, cpu_T[0], mask);
1574 tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
1575 } else {
1576 tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask);
1577 tcg_gen_andi_tl(cpu_cc_dst, cpu_T[0], 1);
1579 tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1580 tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1582 /* Now conditionally store the new CC_OP value. If the shift count
1583 is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live.
1584 Otherwise reuse CC_OP_ADCOX which have the C and O flags split out
1585 exactly as we computed above. */
1586 t0 = tcg_const_i32(0);
1587 t1 = tcg_temp_new_i32();
1588 tcg_gen_trunc_tl_i32(t1, cpu_T[1]);
1589 tcg_gen_movi_i32(cpu_tmp2_i32, CC_OP_ADCOX);
1590 tcg_gen_movi_i32(cpu_tmp3_i32, CC_OP_EFLAGS);
1591 tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, t1, t0,
1592 cpu_tmp2_i32, cpu_tmp3_i32);
1593 tcg_temp_free_i32(t0);
1594 tcg_temp_free_i32(t1);
1596 /* The CC_OP value is no longer predictable. */
1597 set_cc_op(s, CC_OP_DYNAMIC);
1600 static void gen_rot_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2,
1601 int is_right)
1603 int mask = (ot == MO_64 ? 0x3f : 0x1f);
1604 int shift;
1606 /* load */
1607 if (op1 == OR_TMP0) {
1608 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1609 } else {
1610 gen_op_mov_v_reg(ot, cpu_T[0], op1);
1613 op2 &= mask;
1614 if (op2 != 0) {
1615 switch (ot) {
1616 #ifdef TARGET_X86_64
1617 case MO_32:
1618 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
1619 if (is_right) {
1620 tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, op2);
1621 } else {
1622 tcg_gen_rotli_i32(cpu_tmp2_i32, cpu_tmp2_i32, op2);
1624 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
1625 break;
1626 #endif
1627 default:
1628 if (is_right) {
1629 tcg_gen_rotri_tl(cpu_T[0], cpu_T[0], op2);
1630 } else {
1631 tcg_gen_rotli_tl(cpu_T[0], cpu_T[0], op2);
1633 break;
1634 case MO_8:
1635 mask = 7;
1636 goto do_shifts;
1637 case MO_16:
1638 mask = 15;
1639 do_shifts:
1640 shift = op2 & mask;
1641 if (is_right) {
1642 shift = mask + 1 - shift;
1644 gen_extu(ot, cpu_T[0]);
1645 tcg_gen_shli_tl(cpu_tmp0, cpu_T[0], shift);
1646 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], mask + 1 - shift);
1647 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
1648 break;
1652 /* store */
1653 gen_op_st_rm_T0_A0(s, ot, op1);
1655 if (op2 != 0) {
1656 /* Compute the flags into CC_SRC. */
1657 gen_compute_eflags(s);
1659 /* The value that was "rotated out" is now present at the other end
1660 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1661 since we've computed the flags into CC_SRC, these variables are
1662 currently dead. */
1663 if (is_right) {
1664 tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask - 1);
1665 tcg_gen_shri_tl(cpu_cc_dst, cpu_T[0], mask);
1666 tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
1667 } else {
1668 tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask);
1669 tcg_gen_andi_tl(cpu_cc_dst, cpu_T[0], 1);
1671 tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1672 tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1673 set_cc_op(s, CC_OP_ADCOX);
1677 /* XXX: add faster immediate = 1 case */
1678 static void gen_rotc_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
1679 int is_right)
1681 gen_compute_eflags(s);
1682 assert(s->cc_op == CC_OP_EFLAGS);
1684 /* load */
1685 if (op1 == OR_TMP0)
1686 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1687 else
1688 gen_op_mov_v_reg(ot, cpu_T[0], op1);
1690 if (is_right) {
1691 switch (ot) {
1692 case MO_8:
1693 gen_helper_rcrb(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1694 break;
1695 case MO_16:
1696 gen_helper_rcrw(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1697 break;
1698 case MO_32:
1699 gen_helper_rcrl(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1700 break;
1701 #ifdef TARGET_X86_64
1702 case MO_64:
1703 gen_helper_rcrq(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1704 break;
1705 #endif
1706 default:
1707 tcg_abort();
1709 } else {
1710 switch (ot) {
1711 case MO_8:
1712 gen_helper_rclb(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1713 break;
1714 case MO_16:
1715 gen_helper_rclw(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1716 break;
1717 case MO_32:
1718 gen_helper_rcll(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1719 break;
1720 #ifdef TARGET_X86_64
1721 case MO_64:
1722 gen_helper_rclq(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1723 break;
1724 #endif
1725 default:
1726 tcg_abort();
1729 /* store */
1730 gen_op_st_rm_T0_A0(s, ot, op1);
1733 /* XXX: add faster immediate case */
1734 static void gen_shiftd_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
1735 bool is_right, TCGv count_in)
1737 target_ulong mask = (ot == MO_64 ? 63 : 31);
1738 TCGv count;
1740 /* load */
1741 if (op1 == OR_TMP0) {
1742 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1743 } else {
1744 gen_op_mov_v_reg(ot, cpu_T[0], op1);
1747 count = tcg_temp_new();
1748 tcg_gen_andi_tl(count, count_in, mask);
1750 switch (ot) {
1751 case MO_16:
1752 /* Note: we implement the Intel behaviour for shift count > 16.
1753 This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A
1754 portion by constructing it as a 32-bit value. */
1755 if (is_right) {
1756 tcg_gen_deposit_tl(cpu_tmp0, cpu_T[0], cpu_T[1], 16, 16);
1757 tcg_gen_mov_tl(cpu_T[1], cpu_T[0]);
1758 tcg_gen_mov_tl(cpu_T[0], cpu_tmp0);
1759 } else {
1760 tcg_gen_deposit_tl(cpu_T[1], cpu_T[0], cpu_T[1], 16, 16);
1762 /* FALLTHRU */
1763 #ifdef TARGET_X86_64
1764 case MO_32:
1765 /* Concatenate the two 32-bit values and use a 64-bit shift. */
1766 tcg_gen_subi_tl(cpu_tmp0, count, 1);
1767 if (is_right) {
1768 tcg_gen_concat_tl_i64(cpu_T[0], cpu_T[0], cpu_T[1]);
1769 tcg_gen_shr_i64(cpu_tmp0, cpu_T[0], cpu_tmp0);
1770 tcg_gen_shr_i64(cpu_T[0], cpu_T[0], count);
1771 } else {
1772 tcg_gen_concat_tl_i64(cpu_T[0], cpu_T[1], cpu_T[0]);
1773 tcg_gen_shl_i64(cpu_tmp0, cpu_T[0], cpu_tmp0);
1774 tcg_gen_shl_i64(cpu_T[0], cpu_T[0], count);
1775 tcg_gen_shri_i64(cpu_tmp0, cpu_tmp0, 32);
1776 tcg_gen_shri_i64(cpu_T[0], cpu_T[0], 32);
1778 break;
1779 #endif
1780 default:
1781 tcg_gen_subi_tl(cpu_tmp0, count, 1);
1782 if (is_right) {
1783 tcg_gen_shr_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1785 tcg_gen_subfi_tl(cpu_tmp4, mask + 1, count);
1786 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], count);
1787 tcg_gen_shl_tl(cpu_T[1], cpu_T[1], cpu_tmp4);
1788 } else {
1789 tcg_gen_shl_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1790 if (ot == MO_16) {
1791 /* Only needed if count > 16, for Intel behaviour. */
1792 tcg_gen_subfi_tl(cpu_tmp4, 33, count);
1793 tcg_gen_shr_tl(cpu_tmp4, cpu_T[1], cpu_tmp4);
1794 tcg_gen_or_tl(cpu_tmp0, cpu_tmp0, cpu_tmp4);
1797 tcg_gen_subfi_tl(cpu_tmp4, mask + 1, count);
1798 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], count);
1799 tcg_gen_shr_tl(cpu_T[1], cpu_T[1], cpu_tmp4);
1801 tcg_gen_movi_tl(cpu_tmp4, 0);
1802 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_T[1], count, cpu_tmp4,
1803 cpu_tmp4, cpu_T[1]);
1804 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1805 break;
1808 /* store */
1809 gen_op_st_rm_T0_A0(s, ot, op1);
1811 gen_shift_flags(s, ot, cpu_T[0], cpu_tmp0, count, is_right);
1812 tcg_temp_free(count);
1815 static void gen_shift(DisasContext *s1, int op, TCGMemOp ot, int d, int s)
1817 if (s != OR_TMP1)
1818 gen_op_mov_v_reg(ot, cpu_T[1], s);
1819 switch(op) {
1820 case OP_ROL:
1821 gen_rot_rm_T1(s1, ot, d, 0);
1822 break;
1823 case OP_ROR:
1824 gen_rot_rm_T1(s1, ot, d, 1);
1825 break;
1826 case OP_SHL:
1827 case OP_SHL1:
1828 gen_shift_rm_T1(s1, ot, d, 0, 0);
1829 break;
1830 case OP_SHR:
1831 gen_shift_rm_T1(s1, ot, d, 1, 0);
1832 break;
1833 case OP_SAR:
1834 gen_shift_rm_T1(s1, ot, d, 1, 1);
1835 break;
1836 case OP_RCL:
1837 gen_rotc_rm_T1(s1, ot, d, 0);
1838 break;
1839 case OP_RCR:
1840 gen_rotc_rm_T1(s1, ot, d, 1);
1841 break;
1845 static void gen_shifti(DisasContext *s1, int op, TCGMemOp ot, int d, int c)
1847 switch(op) {
1848 case OP_ROL:
1849 gen_rot_rm_im(s1, ot, d, c, 0);
1850 break;
1851 case OP_ROR:
1852 gen_rot_rm_im(s1, ot, d, c, 1);
1853 break;
1854 case OP_SHL:
1855 case OP_SHL1:
1856 gen_shift_rm_im(s1, ot, d, c, 0, 0);
1857 break;
1858 case OP_SHR:
1859 gen_shift_rm_im(s1, ot, d, c, 1, 0);
1860 break;
1861 case OP_SAR:
1862 gen_shift_rm_im(s1, ot, d, c, 1, 1);
1863 break;
1864 default:
1865 /* currently not optimized */
1866 tcg_gen_movi_tl(cpu_T[1], c);
1867 gen_shift(s1, op, ot, d, OR_TMP1);
1868 break;
1872 static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm)
1874 target_long disp;
1875 int havesib;
1876 int base;
1877 int index;
1878 int scale;
1879 int mod, rm, code, override, must_add_seg;
1880 TCGv sum;
1882 override = s->override;
1883 must_add_seg = s->addseg;
1884 if (override >= 0)
1885 must_add_seg = 1;
1886 mod = (modrm >> 6) & 3;
1887 rm = modrm & 7;
1889 switch (s->aflag) {
1890 case MO_64:
1891 case MO_32:
1892 havesib = 0;
1893 base = rm;
1894 index = -1;
1895 scale = 0;
1897 if (base == 4) {
1898 havesib = 1;
1899 code = cpu_ldub_code(env, s->pc++);
1900 scale = (code >> 6) & 3;
1901 index = ((code >> 3) & 7) | REX_X(s);
1902 if (index == 4) {
1903 index = -1; /* no index */
1905 base = (code & 7);
1907 base |= REX_B(s);
1909 switch (mod) {
1910 case 0:
1911 if ((base & 7) == 5) {
1912 base = -1;
1913 disp = (int32_t)cpu_ldl_code(env, s->pc);
1914 s->pc += 4;
1915 if (CODE64(s) && !havesib) {
1916 disp += s->pc + s->rip_offset;
1918 } else {
1919 disp = 0;
1921 break;
1922 case 1:
1923 disp = (int8_t)cpu_ldub_code(env, s->pc++);
1924 break;
1925 default:
1926 case 2:
1927 disp = (int32_t)cpu_ldl_code(env, s->pc);
1928 s->pc += 4;
1929 break;
1932 /* For correct popl handling with esp. */
1933 if (base == R_ESP && s->popl_esp_hack) {
1934 disp += s->popl_esp_hack;
1937 /* Compute the address, with a minimum number of TCG ops. */
1938 TCGV_UNUSED(sum);
1939 if (index >= 0) {
1940 if (scale == 0) {
1941 sum = cpu_regs[index];
1942 } else {
1943 tcg_gen_shli_tl(cpu_A0, cpu_regs[index], scale);
1944 sum = cpu_A0;
1946 if (base >= 0) {
1947 tcg_gen_add_tl(cpu_A0, sum, cpu_regs[base]);
1948 sum = cpu_A0;
1950 } else if (base >= 0) {
1951 sum = cpu_regs[base];
1953 if (TCGV_IS_UNUSED(sum)) {
1954 tcg_gen_movi_tl(cpu_A0, disp);
1955 } else {
1956 tcg_gen_addi_tl(cpu_A0, sum, disp);
1959 if (must_add_seg) {
1960 if (override < 0) {
1961 if (base == R_EBP || base == R_ESP) {
1962 override = R_SS;
1963 } else {
1964 override = R_DS;
1968 tcg_gen_ld_tl(cpu_tmp0, cpu_env,
1969 offsetof(CPUX86State, segs[override].base));
1970 if (CODE64(s)) {
1971 if (s->aflag == MO_32) {
1972 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
1974 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
1975 return;
1978 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
1981 if (s->aflag == MO_32) {
1982 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
1984 break;
1986 case MO_16:
1987 switch (mod) {
1988 case 0:
1989 if (rm == 6) {
1990 disp = cpu_lduw_code(env, s->pc);
1991 s->pc += 2;
1992 tcg_gen_movi_tl(cpu_A0, disp);
1993 rm = 0; /* avoid SS override */
1994 goto no_rm;
1995 } else {
1996 disp = 0;
1998 break;
1999 case 1:
2000 disp = (int8_t)cpu_ldub_code(env, s->pc++);
2001 break;
2002 default:
2003 case 2:
2004 disp = (int16_t)cpu_lduw_code(env, s->pc);
2005 s->pc += 2;
2006 break;
2009 sum = cpu_A0;
2010 switch (rm) {
2011 case 0:
2012 tcg_gen_add_tl(cpu_A0, cpu_regs[R_EBX], cpu_regs[R_ESI]);
2013 break;
2014 case 1:
2015 tcg_gen_add_tl(cpu_A0, cpu_regs[R_EBX], cpu_regs[R_EDI]);
2016 break;
2017 case 2:
2018 tcg_gen_add_tl(cpu_A0, cpu_regs[R_EBP], cpu_regs[R_ESI]);
2019 break;
2020 case 3:
2021 tcg_gen_add_tl(cpu_A0, cpu_regs[R_EBP], cpu_regs[R_EDI]);
2022 break;
2023 case 4:
2024 sum = cpu_regs[R_ESI];
2025 break;
2026 case 5:
2027 sum = cpu_regs[R_EDI];
2028 break;
2029 case 6:
2030 sum = cpu_regs[R_EBP];
2031 break;
2032 default:
2033 case 7:
2034 sum = cpu_regs[R_EBX];
2035 break;
2037 tcg_gen_addi_tl(cpu_A0, sum, disp);
2038 tcg_gen_ext16u_tl(cpu_A0, cpu_A0);
2039 no_rm:
2040 if (must_add_seg) {
2041 if (override < 0) {
2042 if (rm == 2 || rm == 3 || rm == 6) {
2043 override = R_SS;
2044 } else {
2045 override = R_DS;
2048 gen_op_addl_A0_seg(s, override);
2050 break;
2052 default:
2053 tcg_abort();
2057 static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm)
2059 int mod, rm, base, code;
2061 mod = (modrm >> 6) & 3;
2062 if (mod == 3)
2063 return;
2064 rm = modrm & 7;
2066 switch (s->aflag) {
2067 case MO_64:
2068 case MO_32:
2069 base = rm;
2071 if (base == 4) {
2072 code = cpu_ldub_code(env, s->pc++);
2073 base = (code & 7);
2076 switch (mod) {
2077 case 0:
2078 if (base == 5) {
2079 s->pc += 4;
2081 break;
2082 case 1:
2083 s->pc++;
2084 break;
2085 default:
2086 case 2:
2087 s->pc += 4;
2088 break;
2090 break;
2092 case MO_16:
2093 switch (mod) {
2094 case 0:
2095 if (rm == 6) {
2096 s->pc += 2;
2098 break;
2099 case 1:
2100 s->pc++;
2101 break;
2102 default:
2103 case 2:
2104 s->pc += 2;
2105 break;
2107 break;
2109 default:
2110 tcg_abort();
2114 /* used for LEA and MOV AX, mem */
2115 static void gen_add_A0_ds_seg(DisasContext *s)
2117 int override, must_add_seg;
2118 must_add_seg = s->addseg;
2119 override = R_DS;
2120 if (s->override >= 0) {
2121 override = s->override;
2122 must_add_seg = 1;
2124 if (must_add_seg) {
2125 #ifdef TARGET_X86_64
2126 if (CODE64(s)) {
2127 gen_op_addq_A0_seg(override);
2128 } else
2129 #endif
2131 gen_op_addl_A0_seg(s, override);
2136 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2137 OR_TMP0 */
2138 static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm,
2139 TCGMemOp ot, int reg, int is_store)
2141 int mod, rm;
2143 mod = (modrm >> 6) & 3;
2144 rm = (modrm & 7) | REX_B(s);
2145 if (mod == 3) {
2146 if (is_store) {
2147 if (reg != OR_TMP0)
2148 gen_op_mov_v_reg(ot, cpu_T[0], reg);
2149 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
2150 } else {
2151 gen_op_mov_v_reg(ot, cpu_T[0], rm);
2152 if (reg != OR_TMP0)
2153 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
2155 } else {
2156 gen_lea_modrm(env, s, modrm);
2157 if (is_store) {
2158 if (reg != OR_TMP0)
2159 gen_op_mov_v_reg(ot, cpu_T[0], reg);
2160 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
2161 } else {
2162 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
2163 if (reg != OR_TMP0)
2164 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
2169 static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, TCGMemOp ot)
2171 uint32_t ret;
2173 switch (ot) {
2174 case MO_8:
2175 ret = cpu_ldub_code(env, s->pc);
2176 s->pc++;
2177 break;
2178 case MO_16:
2179 ret = cpu_lduw_code(env, s->pc);
2180 s->pc += 2;
2181 break;
2182 case MO_32:
2183 #ifdef TARGET_X86_64
2184 case MO_64:
2185 #endif
2186 ret = cpu_ldl_code(env, s->pc);
2187 s->pc += 4;
2188 break;
2189 default:
2190 tcg_abort();
2192 return ret;
2195 static inline int insn_const_size(TCGMemOp ot)
2197 if (ot <= MO_32) {
2198 return 1 << ot;
2199 } else {
2200 return 4;
2204 static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip)
2206 TranslationBlock *tb;
2207 target_ulong pc;
2209 pc = s->cs_base + eip;
2210 tb = s->tb;
2211 /* NOTE: we handle the case where the TB spans two pages here */
2212 if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) ||
2213 (pc & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK)) {
2214 /* jump to same page: we can use a direct jump */
2215 tcg_gen_goto_tb(tb_num);
2216 gen_jmp_im(eip);
2217 tcg_gen_exit_tb((uintptr_t)tb + tb_num);
2218 } else {
2219 /* jump to another page: currently not optimized */
2220 gen_jmp_im(eip);
2221 gen_eob(s);
2225 static inline void gen_jcc(DisasContext *s, int b,
2226 target_ulong val, target_ulong next_eip)
2228 TCGLabel *l1, *l2;
2230 if (s->jmp_opt) {
2231 l1 = gen_new_label();
2232 gen_jcc1(s, b, l1);
2234 gen_goto_tb(s, 0, next_eip);
2236 gen_set_label(l1);
2237 gen_goto_tb(s, 1, val);
2238 s->is_jmp = DISAS_TB_JUMP;
2239 } else {
2240 l1 = gen_new_label();
2241 l2 = gen_new_label();
2242 gen_jcc1(s, b, l1);
2244 gen_jmp_im(next_eip);
2245 tcg_gen_br(l2);
2247 gen_set_label(l1);
2248 gen_jmp_im(val);
2249 gen_set_label(l2);
2250 gen_eob(s);
2254 static void gen_cmovcc1(CPUX86State *env, DisasContext *s, TCGMemOp ot, int b,
2255 int modrm, int reg)
2257 CCPrepare cc;
2259 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
2261 cc = gen_prepare_cc(s, b, cpu_T[1]);
2262 if (cc.mask != -1) {
2263 TCGv t0 = tcg_temp_new();
2264 tcg_gen_andi_tl(t0, cc.reg, cc.mask);
2265 cc.reg = t0;
2267 if (!cc.use_reg2) {
2268 cc.reg2 = tcg_const_tl(cc.imm);
2271 tcg_gen_movcond_tl(cc.cond, cpu_T[0], cc.reg, cc.reg2,
2272 cpu_T[0], cpu_regs[reg]);
2273 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
2275 if (cc.mask != -1) {
2276 tcg_temp_free(cc.reg);
2278 if (!cc.use_reg2) {
2279 tcg_temp_free(cc.reg2);
2283 static inline void gen_op_movl_T0_seg(int seg_reg)
2285 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
2286 offsetof(CPUX86State,segs[seg_reg].selector));
2289 static inline void gen_op_movl_seg_T0_vm(int seg_reg)
2291 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
2292 tcg_gen_st32_tl(cpu_T[0], cpu_env,
2293 offsetof(CPUX86State,segs[seg_reg].selector));
2294 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], 4);
2295 tcg_gen_st_tl(cpu_T[0], cpu_env,
2296 offsetof(CPUX86State,segs[seg_reg].base));
2299 /* move T0 to seg_reg and compute if the CPU state may change. Never
2300 call this function with seg_reg == R_CS */
2301 static void gen_movl_seg_T0(DisasContext *s, int seg_reg, target_ulong cur_eip)
2303 if (s->pe && !s->vm86) {
2304 /* XXX: optimize by finding processor state dynamically */
2305 gen_update_cc_op(s);
2306 gen_jmp_im(cur_eip);
2307 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2308 gen_helper_load_seg(cpu_env, tcg_const_i32(seg_reg), cpu_tmp2_i32);
2309 /* abort translation because the addseg value may change or
2310 because ss32 may change. For R_SS, translation must always
2311 stop as a special handling must be done to disable hardware
2312 interrupts for the next instruction */
2313 if (seg_reg == R_SS || (s->code32 && seg_reg < R_FS))
2314 s->is_jmp = DISAS_TB_JUMP;
2315 } else {
2316 gen_op_movl_seg_T0_vm(seg_reg);
2317 if (seg_reg == R_SS)
2318 s->is_jmp = DISAS_TB_JUMP;
2322 static inline int svm_is_rep(int prefixes)
2324 return ((prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) ? 8 : 0);
2327 static inline void
2328 gen_svm_check_intercept_param(DisasContext *s, target_ulong pc_start,
2329 uint32_t type, uint64_t param)
2331 /* no SVM activated; fast case */
2332 if (likely(!(s->flags & HF_SVMI_MASK)))
2333 return;
2334 gen_update_cc_op(s);
2335 gen_jmp_im(pc_start - s->cs_base);
2336 gen_helper_svm_check_intercept_param(cpu_env, tcg_const_i32(type),
2337 tcg_const_i64(param));
2340 static inline void
2341 gen_svm_check_intercept(DisasContext *s, target_ulong pc_start, uint64_t type)
2343 gen_svm_check_intercept_param(s, pc_start, type, 0);
2346 static inline void gen_stack_update(DisasContext *s, int addend)
2348 #ifdef TARGET_X86_64
2349 if (CODE64(s)) {
2350 gen_op_add_reg_im(MO_64, R_ESP, addend);
2351 } else
2352 #endif
2353 if (s->ss32) {
2354 gen_op_add_reg_im(MO_32, R_ESP, addend);
2355 } else {
2356 gen_op_add_reg_im(MO_16, R_ESP, addend);
2360 /* Generate a push. It depends on ss32, addseg and dflag. */
2361 static void gen_push_v(DisasContext *s, TCGv val)
2363 TCGMemOp a_ot, d_ot = mo_pushpop(s, s->dflag);
2364 int size = 1 << d_ot;
2365 TCGv new_esp = cpu_A0;
2367 tcg_gen_subi_tl(cpu_A0, cpu_regs[R_ESP], size);
2369 if (CODE64(s)) {
2370 a_ot = MO_64;
2371 } else if (s->ss32) {
2372 a_ot = MO_32;
2373 if (s->addseg) {
2374 new_esp = cpu_tmp4;
2375 tcg_gen_mov_tl(new_esp, cpu_A0);
2376 gen_op_addl_A0_seg(s, R_SS);
2377 } else {
2378 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
2380 } else {
2381 a_ot = MO_16;
2382 new_esp = cpu_tmp4;
2383 tcg_gen_ext16u_tl(cpu_A0, cpu_A0);
2384 tcg_gen_mov_tl(new_esp, cpu_A0);
2385 gen_op_addl_A0_seg(s, R_SS);
2388 gen_op_st_v(s, d_ot, val, cpu_A0);
2389 gen_op_mov_reg_v(a_ot, R_ESP, new_esp);
2392 /* two step pop is necessary for precise exceptions */
2393 static TCGMemOp gen_pop_T0(DisasContext *s)
2395 TCGMemOp d_ot = mo_pushpop(s, s->dflag);
2396 TCGv addr = cpu_A0;
2398 if (CODE64(s)) {
2399 addr = cpu_regs[R_ESP];
2400 } else if (!s->ss32) {
2401 tcg_gen_ext16u_tl(cpu_A0, cpu_regs[R_ESP]);
2402 gen_op_addl_A0_seg(s, R_SS);
2403 } else if (s->addseg) {
2404 tcg_gen_mov_tl(cpu_A0, cpu_regs[R_ESP]);
2405 gen_op_addl_A0_seg(s, R_SS);
2406 } else {
2407 tcg_gen_ext32u_tl(cpu_A0, cpu_regs[R_ESP]);
2410 gen_op_ld_v(s, d_ot, cpu_T[0], addr);
2411 return d_ot;
2414 static void gen_pop_update(DisasContext *s, TCGMemOp ot)
2416 gen_stack_update(s, 1 << ot);
2419 static void gen_stack_A0(DisasContext *s)
2421 gen_op_movl_A0_reg(R_ESP);
2422 if (!s->ss32)
2423 tcg_gen_ext16u_tl(cpu_A0, cpu_A0);
2424 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2425 if (s->addseg)
2426 gen_op_addl_A0_seg(s, R_SS);
2429 /* NOTE: wrap around in 16 bit not fully handled */
2430 static void gen_pusha(DisasContext *s)
2432 int i;
2433 gen_op_movl_A0_reg(R_ESP);
2434 gen_op_addl_A0_im(-8 << s->dflag);
2435 if (!s->ss32)
2436 tcg_gen_ext16u_tl(cpu_A0, cpu_A0);
2437 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2438 if (s->addseg)
2439 gen_op_addl_A0_seg(s, R_SS);
2440 for(i = 0;i < 8; i++) {
2441 gen_op_mov_v_reg(MO_32, cpu_T[0], 7 - i);
2442 gen_op_st_v(s, s->dflag, cpu_T[0], cpu_A0);
2443 gen_op_addl_A0_im(1 << s->dflag);
2445 gen_op_mov_reg_v(MO_16 + s->ss32, R_ESP, cpu_T[1]);
2448 /* NOTE: wrap around in 16 bit not fully handled */
2449 static void gen_popa(DisasContext *s)
2451 int i;
2452 gen_op_movl_A0_reg(R_ESP);
2453 if (!s->ss32)
2454 tcg_gen_ext16u_tl(cpu_A0, cpu_A0);
2455 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2456 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], 8 << s->dflag);
2457 if (s->addseg)
2458 gen_op_addl_A0_seg(s, R_SS);
2459 for(i = 0;i < 8; i++) {
2460 /* ESP is not reloaded */
2461 if (i != 3) {
2462 gen_op_ld_v(s, s->dflag, cpu_T[0], cpu_A0);
2463 gen_op_mov_reg_v(s->dflag, 7 - i, cpu_T[0]);
2465 gen_op_addl_A0_im(1 << s->dflag);
2467 gen_op_mov_reg_v(MO_16 + s->ss32, R_ESP, cpu_T[1]);
2470 static void gen_enter(DisasContext *s, int esp_addend, int level)
2472 TCGMemOp ot = mo_pushpop(s, s->dflag);
2473 int opsize = 1 << ot;
2475 level &= 0x1f;
2476 #ifdef TARGET_X86_64
2477 if (CODE64(s)) {
2478 gen_op_movl_A0_reg(R_ESP);
2479 gen_op_addq_A0_im(-opsize);
2480 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2482 /* push bp */
2483 gen_op_mov_v_reg(MO_32, cpu_T[0], R_EBP);
2484 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
2485 if (level) {
2486 /* XXX: must save state */
2487 gen_helper_enter64_level(cpu_env, tcg_const_i32(level),
2488 tcg_const_i32((ot == MO_64)),
2489 cpu_T[1]);
2491 gen_op_mov_reg_v(ot, R_EBP, cpu_T[1]);
2492 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
2493 gen_op_mov_reg_v(MO_64, R_ESP, cpu_T[1]);
2494 } else
2495 #endif
2497 gen_op_movl_A0_reg(R_ESP);
2498 gen_op_addl_A0_im(-opsize);
2499 if (!s->ss32)
2500 tcg_gen_ext16u_tl(cpu_A0, cpu_A0);
2501 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2502 if (s->addseg)
2503 gen_op_addl_A0_seg(s, R_SS);
2504 /* push bp */
2505 gen_op_mov_v_reg(MO_32, cpu_T[0], R_EBP);
2506 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
2507 if (level) {
2508 /* XXX: must save state */
2509 gen_helper_enter_level(cpu_env, tcg_const_i32(level),
2510 tcg_const_i32(s->dflag - 1),
2511 cpu_T[1]);
2513 gen_op_mov_reg_v(ot, R_EBP, cpu_T[1]);
2514 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
2515 gen_op_mov_reg_v(MO_16 + s->ss32, R_ESP, cpu_T[1]);
2519 static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip)
2521 gen_update_cc_op(s);
2522 gen_jmp_im(cur_eip);
2523 gen_helper_raise_exception(cpu_env, tcg_const_i32(trapno));
2524 s->is_jmp = DISAS_TB_JUMP;
2527 /* an interrupt is different from an exception because of the
2528 privilege checks */
2529 static void gen_interrupt(DisasContext *s, int intno,
2530 target_ulong cur_eip, target_ulong next_eip)
2532 gen_update_cc_op(s);
2533 gen_jmp_im(cur_eip);
2534 gen_helper_raise_interrupt(cpu_env, tcg_const_i32(intno),
2535 tcg_const_i32(next_eip - cur_eip));
2536 s->is_jmp = DISAS_TB_JUMP;
2539 static void gen_debug(DisasContext *s, target_ulong cur_eip)
2541 gen_update_cc_op(s);
2542 gen_jmp_im(cur_eip);
2543 gen_helper_debug(cpu_env);
2544 s->is_jmp = DISAS_TB_JUMP;
2547 /* generate a generic end of block. Trace exception is also generated
2548 if needed */
2549 static void gen_eob(DisasContext *s)
2551 gen_update_cc_op(s);
2552 if (s->tb->flags & HF_INHIBIT_IRQ_MASK) {
2553 gen_helper_reset_inhibit_irq(cpu_env);
2555 if (s->tb->flags & HF_RF_MASK) {
2556 gen_helper_reset_rf(cpu_env);
2558 if (s->singlestep_enabled) {
2559 gen_helper_debug(cpu_env);
2560 } else if (s->tf) {
2561 gen_helper_single_step(cpu_env);
2562 } else {
2563 tcg_gen_exit_tb(0);
2565 s->is_jmp = DISAS_TB_JUMP;
2568 /* generate a jump to eip. No segment change must happen before as a
2569 direct call to the next block may occur */
2570 static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num)
2572 gen_update_cc_op(s);
2573 set_cc_op(s, CC_OP_DYNAMIC);
2574 if (s->jmp_opt) {
2575 gen_goto_tb(s, tb_num, eip);
2576 s->is_jmp = DISAS_TB_JUMP;
2577 } else {
2578 gen_jmp_im(eip);
2579 gen_eob(s);
2583 static void gen_jmp(DisasContext *s, target_ulong eip)
2585 gen_jmp_tb(s, eip, 0);
2588 static inline void gen_ldq_env_A0(DisasContext *s, int offset)
2590 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
2591 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset);
2594 static inline void gen_stq_env_A0(DisasContext *s, int offset)
2596 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset);
2597 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
2600 static inline void gen_ldo_env_A0(DisasContext *s, int offset)
2602 int mem_index = s->mem_index;
2603 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, mem_index, MO_LEQ);
2604 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
2605 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2606 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_tmp0, mem_index, MO_LEQ);
2607 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
2610 static inline void gen_sto_env_A0(DisasContext *s, int offset)
2612 int mem_index = s->mem_index;
2613 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
2614 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, mem_index, MO_LEQ);
2615 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2616 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
2617 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_tmp0, mem_index, MO_LEQ);
2620 static inline void gen_op_movo(int d_offset, int s_offset)
2622 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + offsetof(XMMReg, XMM_Q(0)));
2623 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + offsetof(XMMReg, XMM_Q(0)));
2624 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + offsetof(XMMReg, XMM_Q(1)));
2625 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + offsetof(XMMReg, XMM_Q(1)));
2628 static inline void gen_op_movq(int d_offset, int s_offset)
2630 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2631 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2634 static inline void gen_op_movl(int d_offset, int s_offset)
2636 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env, s_offset);
2637 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, d_offset);
2640 static inline void gen_op_movq_env_0(int d_offset)
2642 tcg_gen_movi_i64(cpu_tmp1_i64, 0);
2643 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2646 typedef void (*SSEFunc_i_ep)(TCGv_i32 val, TCGv_ptr env, TCGv_ptr reg);
2647 typedef void (*SSEFunc_l_ep)(TCGv_i64 val, TCGv_ptr env, TCGv_ptr reg);
2648 typedef void (*SSEFunc_0_epi)(TCGv_ptr env, TCGv_ptr reg, TCGv_i32 val);
2649 typedef void (*SSEFunc_0_epl)(TCGv_ptr env, TCGv_ptr reg, TCGv_i64 val);
2650 typedef void (*SSEFunc_0_epp)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b);
2651 typedef void (*SSEFunc_0_eppi)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
2652 TCGv_i32 val);
2653 typedef void (*SSEFunc_0_ppi)(TCGv_ptr reg_a, TCGv_ptr reg_b, TCGv_i32 val);
2654 typedef void (*SSEFunc_0_eppt)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
2655 TCGv val);
2657 #define SSE_SPECIAL ((void *)1)
2658 #define SSE_DUMMY ((void *)2)
2660 #define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
2661 #define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
2662 gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
2664 static const SSEFunc_0_epp sse_op_table1[256][4] = {
2665 /* 3DNow! extensions */
2666 [0x0e] = { SSE_DUMMY }, /* femms */
2667 [0x0f] = { SSE_DUMMY }, /* pf... */
2668 /* pure SSE operations */
2669 [0x10] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2670 [0x11] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2671 [0x12] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd, movsldup, movddup */
2672 [0x13] = { SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd */
2673 [0x14] = { gen_helper_punpckldq_xmm, gen_helper_punpcklqdq_xmm },
2674 [0x15] = { gen_helper_punpckhdq_xmm, gen_helper_punpckhqdq_xmm },
2675 [0x16] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd, movshdup */
2676 [0x17] = { SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd */
2678 [0x28] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2679 [0x29] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2680 [0x2a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
2681 [0x2b] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movntps, movntpd, movntss, movntsd */
2682 [0x2c] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
2683 [0x2d] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
2684 [0x2e] = { gen_helper_ucomiss, gen_helper_ucomisd },
2685 [0x2f] = { gen_helper_comiss, gen_helper_comisd },
2686 [0x50] = { SSE_SPECIAL, SSE_SPECIAL }, /* movmskps, movmskpd */
2687 [0x51] = SSE_FOP(sqrt),
2688 [0x52] = { gen_helper_rsqrtps, NULL, gen_helper_rsqrtss, NULL },
2689 [0x53] = { gen_helper_rcpps, NULL, gen_helper_rcpss, NULL },
2690 [0x54] = { gen_helper_pand_xmm, gen_helper_pand_xmm }, /* andps, andpd */
2691 [0x55] = { gen_helper_pandn_xmm, gen_helper_pandn_xmm }, /* andnps, andnpd */
2692 [0x56] = { gen_helper_por_xmm, gen_helper_por_xmm }, /* orps, orpd */
2693 [0x57] = { gen_helper_pxor_xmm, gen_helper_pxor_xmm }, /* xorps, xorpd */
2694 [0x58] = SSE_FOP(add),
2695 [0x59] = SSE_FOP(mul),
2696 [0x5a] = { gen_helper_cvtps2pd, gen_helper_cvtpd2ps,
2697 gen_helper_cvtss2sd, gen_helper_cvtsd2ss },
2698 [0x5b] = { gen_helper_cvtdq2ps, gen_helper_cvtps2dq, gen_helper_cvttps2dq },
2699 [0x5c] = SSE_FOP(sub),
2700 [0x5d] = SSE_FOP(min),
2701 [0x5e] = SSE_FOP(div),
2702 [0x5f] = SSE_FOP(max),
2704 [0xc2] = SSE_FOP(cmpeq),
2705 [0xc6] = { (SSEFunc_0_epp)gen_helper_shufps,
2706 (SSEFunc_0_epp)gen_helper_shufpd }, /* XXX: casts */
2708 /* SSSE3, SSE4, MOVBE, CRC32, BMI1, BMI2, ADX. */
2709 [0x38] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2710 [0x3a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2712 /* MMX ops and their SSE extensions */
2713 [0x60] = MMX_OP2(punpcklbw),
2714 [0x61] = MMX_OP2(punpcklwd),
2715 [0x62] = MMX_OP2(punpckldq),
2716 [0x63] = MMX_OP2(packsswb),
2717 [0x64] = MMX_OP2(pcmpgtb),
2718 [0x65] = MMX_OP2(pcmpgtw),
2719 [0x66] = MMX_OP2(pcmpgtl),
2720 [0x67] = MMX_OP2(packuswb),
2721 [0x68] = MMX_OP2(punpckhbw),
2722 [0x69] = MMX_OP2(punpckhwd),
2723 [0x6a] = MMX_OP2(punpckhdq),
2724 [0x6b] = MMX_OP2(packssdw),
2725 [0x6c] = { NULL, gen_helper_punpcklqdq_xmm },
2726 [0x6d] = { NULL, gen_helper_punpckhqdq_xmm },
2727 [0x6e] = { SSE_SPECIAL, SSE_SPECIAL }, /* movd mm, ea */
2728 [0x6f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, , movqdu */
2729 [0x70] = { (SSEFunc_0_epp)gen_helper_pshufw_mmx,
2730 (SSEFunc_0_epp)gen_helper_pshufd_xmm,
2731 (SSEFunc_0_epp)gen_helper_pshufhw_xmm,
2732 (SSEFunc_0_epp)gen_helper_pshuflw_xmm }, /* XXX: casts */
2733 [0x71] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftw */
2734 [0x72] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftd */
2735 [0x73] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftq */
2736 [0x74] = MMX_OP2(pcmpeqb),
2737 [0x75] = MMX_OP2(pcmpeqw),
2738 [0x76] = MMX_OP2(pcmpeql),
2739 [0x77] = { SSE_DUMMY }, /* emms */
2740 [0x78] = { NULL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* extrq_i, insertq_i */
2741 [0x79] = { NULL, gen_helper_extrq_r, NULL, gen_helper_insertq_r },
2742 [0x7c] = { NULL, gen_helper_haddpd, NULL, gen_helper_haddps },
2743 [0x7d] = { NULL, gen_helper_hsubpd, NULL, gen_helper_hsubps },
2744 [0x7e] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movd, movd, , movq */
2745 [0x7f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, movdqu */
2746 [0xc4] = { SSE_SPECIAL, SSE_SPECIAL }, /* pinsrw */
2747 [0xc5] = { SSE_SPECIAL, SSE_SPECIAL }, /* pextrw */
2748 [0xd0] = { NULL, gen_helper_addsubpd, NULL, gen_helper_addsubps },
2749 [0xd1] = MMX_OP2(psrlw),
2750 [0xd2] = MMX_OP2(psrld),
2751 [0xd3] = MMX_OP2(psrlq),
2752 [0xd4] = MMX_OP2(paddq),
2753 [0xd5] = MMX_OP2(pmullw),
2754 [0xd6] = { NULL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2755 [0xd7] = { SSE_SPECIAL, SSE_SPECIAL }, /* pmovmskb */
2756 [0xd8] = MMX_OP2(psubusb),
2757 [0xd9] = MMX_OP2(psubusw),
2758 [0xda] = MMX_OP2(pminub),
2759 [0xdb] = MMX_OP2(pand),
2760 [0xdc] = MMX_OP2(paddusb),
2761 [0xdd] = MMX_OP2(paddusw),
2762 [0xde] = MMX_OP2(pmaxub),
2763 [0xdf] = MMX_OP2(pandn),
2764 [0xe0] = MMX_OP2(pavgb),
2765 [0xe1] = MMX_OP2(psraw),
2766 [0xe2] = MMX_OP2(psrad),
2767 [0xe3] = MMX_OP2(pavgw),
2768 [0xe4] = MMX_OP2(pmulhuw),
2769 [0xe5] = MMX_OP2(pmulhw),
2770 [0xe6] = { NULL, gen_helper_cvttpd2dq, gen_helper_cvtdq2pd, gen_helper_cvtpd2dq },
2771 [0xe7] = { SSE_SPECIAL , SSE_SPECIAL }, /* movntq, movntq */
2772 [0xe8] = MMX_OP2(psubsb),
2773 [0xe9] = MMX_OP2(psubsw),
2774 [0xea] = MMX_OP2(pminsw),
2775 [0xeb] = MMX_OP2(por),
2776 [0xec] = MMX_OP2(paddsb),
2777 [0xed] = MMX_OP2(paddsw),
2778 [0xee] = MMX_OP2(pmaxsw),
2779 [0xef] = MMX_OP2(pxor),
2780 [0xf0] = { NULL, NULL, NULL, SSE_SPECIAL }, /* lddqu */
2781 [0xf1] = MMX_OP2(psllw),
2782 [0xf2] = MMX_OP2(pslld),
2783 [0xf3] = MMX_OP2(psllq),
2784 [0xf4] = MMX_OP2(pmuludq),
2785 [0xf5] = MMX_OP2(pmaddwd),
2786 [0xf6] = MMX_OP2(psadbw),
2787 [0xf7] = { (SSEFunc_0_epp)gen_helper_maskmov_mmx,
2788 (SSEFunc_0_epp)gen_helper_maskmov_xmm }, /* XXX: casts */
2789 [0xf8] = MMX_OP2(psubb),
2790 [0xf9] = MMX_OP2(psubw),
2791 [0xfa] = MMX_OP2(psubl),
2792 [0xfb] = MMX_OP2(psubq),
2793 [0xfc] = MMX_OP2(paddb),
2794 [0xfd] = MMX_OP2(paddw),
2795 [0xfe] = MMX_OP2(paddl),
2798 static const SSEFunc_0_epp sse_op_table2[3 * 8][2] = {
2799 [0 + 2] = MMX_OP2(psrlw),
2800 [0 + 4] = MMX_OP2(psraw),
2801 [0 + 6] = MMX_OP2(psllw),
2802 [8 + 2] = MMX_OP2(psrld),
2803 [8 + 4] = MMX_OP2(psrad),
2804 [8 + 6] = MMX_OP2(pslld),
2805 [16 + 2] = MMX_OP2(psrlq),
2806 [16 + 3] = { NULL, gen_helper_psrldq_xmm },
2807 [16 + 6] = MMX_OP2(psllq),
2808 [16 + 7] = { NULL, gen_helper_pslldq_xmm },
2811 static const SSEFunc_0_epi sse_op_table3ai[] = {
2812 gen_helper_cvtsi2ss,
2813 gen_helper_cvtsi2sd
2816 #ifdef TARGET_X86_64
2817 static const SSEFunc_0_epl sse_op_table3aq[] = {
2818 gen_helper_cvtsq2ss,
2819 gen_helper_cvtsq2sd
2821 #endif
2823 static const SSEFunc_i_ep sse_op_table3bi[] = {
2824 gen_helper_cvttss2si,
2825 gen_helper_cvtss2si,
2826 gen_helper_cvttsd2si,
2827 gen_helper_cvtsd2si
2830 #ifdef TARGET_X86_64
2831 static const SSEFunc_l_ep sse_op_table3bq[] = {
2832 gen_helper_cvttss2sq,
2833 gen_helper_cvtss2sq,
2834 gen_helper_cvttsd2sq,
2835 gen_helper_cvtsd2sq
2837 #endif
2839 static const SSEFunc_0_epp sse_op_table4[8][4] = {
2840 SSE_FOP(cmpeq),
2841 SSE_FOP(cmplt),
2842 SSE_FOP(cmple),
2843 SSE_FOP(cmpunord),
2844 SSE_FOP(cmpneq),
2845 SSE_FOP(cmpnlt),
2846 SSE_FOP(cmpnle),
2847 SSE_FOP(cmpord),
2850 static const SSEFunc_0_epp sse_op_table5[256] = {
2851 [0x0c] = gen_helper_pi2fw,
2852 [0x0d] = gen_helper_pi2fd,
2853 [0x1c] = gen_helper_pf2iw,
2854 [0x1d] = gen_helper_pf2id,
2855 [0x8a] = gen_helper_pfnacc,
2856 [0x8e] = gen_helper_pfpnacc,
2857 [0x90] = gen_helper_pfcmpge,
2858 [0x94] = gen_helper_pfmin,
2859 [0x96] = gen_helper_pfrcp,
2860 [0x97] = gen_helper_pfrsqrt,
2861 [0x9a] = gen_helper_pfsub,
2862 [0x9e] = gen_helper_pfadd,
2863 [0xa0] = gen_helper_pfcmpgt,
2864 [0xa4] = gen_helper_pfmax,
2865 [0xa6] = gen_helper_movq, /* pfrcpit1; no need to actually increase precision */
2866 [0xa7] = gen_helper_movq, /* pfrsqit1 */
2867 [0xaa] = gen_helper_pfsubr,
2868 [0xae] = gen_helper_pfacc,
2869 [0xb0] = gen_helper_pfcmpeq,
2870 [0xb4] = gen_helper_pfmul,
2871 [0xb6] = gen_helper_movq, /* pfrcpit2 */
2872 [0xb7] = gen_helper_pmulhrw_mmx,
2873 [0xbb] = gen_helper_pswapd,
2874 [0xbf] = gen_helper_pavgb_mmx /* pavgusb */
2877 struct SSEOpHelper_epp {
2878 SSEFunc_0_epp op[2];
2879 uint32_t ext_mask;
2882 struct SSEOpHelper_eppi {
2883 SSEFunc_0_eppi op[2];
2884 uint32_t ext_mask;
2887 #define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
2888 #define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
2889 #define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
2890 #define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
2891 #define PCLMULQDQ_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, \
2892 CPUID_EXT_PCLMULQDQ }
2893 #define AESNI_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_AES }
2895 static const struct SSEOpHelper_epp sse_op_table6[256] = {
2896 [0x00] = SSSE3_OP(pshufb),
2897 [0x01] = SSSE3_OP(phaddw),
2898 [0x02] = SSSE3_OP(phaddd),
2899 [0x03] = SSSE3_OP(phaddsw),
2900 [0x04] = SSSE3_OP(pmaddubsw),
2901 [0x05] = SSSE3_OP(phsubw),
2902 [0x06] = SSSE3_OP(phsubd),
2903 [0x07] = SSSE3_OP(phsubsw),
2904 [0x08] = SSSE3_OP(psignb),
2905 [0x09] = SSSE3_OP(psignw),
2906 [0x0a] = SSSE3_OP(psignd),
2907 [0x0b] = SSSE3_OP(pmulhrsw),
2908 [0x10] = SSE41_OP(pblendvb),
2909 [0x14] = SSE41_OP(blendvps),
2910 [0x15] = SSE41_OP(blendvpd),
2911 [0x17] = SSE41_OP(ptest),
2912 [0x1c] = SSSE3_OP(pabsb),
2913 [0x1d] = SSSE3_OP(pabsw),
2914 [0x1e] = SSSE3_OP(pabsd),
2915 [0x20] = SSE41_OP(pmovsxbw),
2916 [0x21] = SSE41_OP(pmovsxbd),
2917 [0x22] = SSE41_OP(pmovsxbq),
2918 [0x23] = SSE41_OP(pmovsxwd),
2919 [0x24] = SSE41_OP(pmovsxwq),
2920 [0x25] = SSE41_OP(pmovsxdq),
2921 [0x28] = SSE41_OP(pmuldq),
2922 [0x29] = SSE41_OP(pcmpeqq),
2923 [0x2a] = SSE41_SPECIAL, /* movntqda */
2924 [0x2b] = SSE41_OP(packusdw),
2925 [0x30] = SSE41_OP(pmovzxbw),
2926 [0x31] = SSE41_OP(pmovzxbd),
2927 [0x32] = SSE41_OP(pmovzxbq),
2928 [0x33] = SSE41_OP(pmovzxwd),
2929 [0x34] = SSE41_OP(pmovzxwq),
2930 [0x35] = SSE41_OP(pmovzxdq),
2931 [0x37] = SSE42_OP(pcmpgtq),
2932 [0x38] = SSE41_OP(pminsb),
2933 [0x39] = SSE41_OP(pminsd),
2934 [0x3a] = SSE41_OP(pminuw),
2935 [0x3b] = SSE41_OP(pminud),
2936 [0x3c] = SSE41_OP(pmaxsb),
2937 [0x3d] = SSE41_OP(pmaxsd),
2938 [0x3e] = SSE41_OP(pmaxuw),
2939 [0x3f] = SSE41_OP(pmaxud),
2940 [0x40] = SSE41_OP(pmulld),
2941 [0x41] = SSE41_OP(phminposuw),
2942 [0xdb] = AESNI_OP(aesimc),
2943 [0xdc] = AESNI_OP(aesenc),
2944 [0xdd] = AESNI_OP(aesenclast),
2945 [0xde] = AESNI_OP(aesdec),
2946 [0xdf] = AESNI_OP(aesdeclast),
2949 static const struct SSEOpHelper_eppi sse_op_table7[256] = {
2950 [0x08] = SSE41_OP(roundps),
2951 [0x09] = SSE41_OP(roundpd),
2952 [0x0a] = SSE41_OP(roundss),
2953 [0x0b] = SSE41_OP(roundsd),
2954 [0x0c] = SSE41_OP(blendps),
2955 [0x0d] = SSE41_OP(blendpd),
2956 [0x0e] = SSE41_OP(pblendw),
2957 [0x0f] = SSSE3_OP(palignr),
2958 [0x14] = SSE41_SPECIAL, /* pextrb */
2959 [0x15] = SSE41_SPECIAL, /* pextrw */
2960 [0x16] = SSE41_SPECIAL, /* pextrd/pextrq */
2961 [0x17] = SSE41_SPECIAL, /* extractps */
2962 [0x20] = SSE41_SPECIAL, /* pinsrb */
2963 [0x21] = SSE41_SPECIAL, /* insertps */
2964 [0x22] = SSE41_SPECIAL, /* pinsrd/pinsrq */
2965 [0x40] = SSE41_OP(dpps),
2966 [0x41] = SSE41_OP(dppd),
2967 [0x42] = SSE41_OP(mpsadbw),
2968 [0x44] = PCLMULQDQ_OP(pclmulqdq),
2969 [0x60] = SSE42_OP(pcmpestrm),
2970 [0x61] = SSE42_OP(pcmpestri),
2971 [0x62] = SSE42_OP(pcmpistrm),
2972 [0x63] = SSE42_OP(pcmpistri),
2973 [0xdf] = AESNI_OP(aeskeygenassist),
2976 static void gen_sse(CPUX86State *env, DisasContext *s, int b,
2977 target_ulong pc_start, int rex_r)
2979 int b1, op1_offset, op2_offset, is_xmm, val;
2980 int modrm, mod, rm, reg;
2981 SSEFunc_0_epp sse_fn_epp;
2982 SSEFunc_0_eppi sse_fn_eppi;
2983 SSEFunc_0_ppi sse_fn_ppi;
2984 SSEFunc_0_eppt sse_fn_eppt;
2985 TCGMemOp ot;
2987 b &= 0xff;
2988 if (s->prefix & PREFIX_DATA)
2989 b1 = 1;
2990 else if (s->prefix & PREFIX_REPZ)
2991 b1 = 2;
2992 else if (s->prefix & PREFIX_REPNZ)
2993 b1 = 3;
2994 else
2995 b1 = 0;
2996 sse_fn_epp = sse_op_table1[b][b1];
2997 if (!sse_fn_epp) {
2998 goto illegal_op;
3000 if ((b <= 0x5f && b >= 0x10) || b == 0xc6 || b == 0xc2) {
3001 is_xmm = 1;
3002 } else {
3003 if (b1 == 0) {
3004 /* MMX case */
3005 is_xmm = 0;
3006 } else {
3007 is_xmm = 1;
3010 /* simple MMX/SSE operation */
3011 if (s->flags & HF_TS_MASK) {
3012 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
3013 return;
3015 if (s->flags & HF_EM_MASK) {
3016 illegal_op:
3017 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
3018 return;
3020 if (is_xmm && !(s->flags & HF_OSFXSR_MASK))
3021 if ((b != 0x38 && b != 0x3a) || (s->prefix & PREFIX_DATA))
3022 goto illegal_op;
3023 if (b == 0x0e) {
3024 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
3025 goto illegal_op;
3026 /* femms */
3027 gen_helper_emms(cpu_env);
3028 return;
3030 if (b == 0x77) {
3031 /* emms */
3032 gen_helper_emms(cpu_env);
3033 return;
3035 /* prepare MMX state (XXX: optimize by storing fptt and fptags in
3036 the static cpu state) */
3037 if (!is_xmm) {
3038 gen_helper_enter_mmx(cpu_env);
3041 modrm = cpu_ldub_code(env, s->pc++);
3042 reg = ((modrm >> 3) & 7);
3043 if (is_xmm)
3044 reg |= rex_r;
3045 mod = (modrm >> 6) & 3;
3046 if (sse_fn_epp == SSE_SPECIAL) {
3047 b |= (b1 << 8);
3048 switch(b) {
3049 case 0x0e7: /* movntq */
3050 if (mod == 3)
3051 goto illegal_op;
3052 gen_lea_modrm(env, s, modrm);
3053 gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
3054 break;
3055 case 0x1e7: /* movntdq */
3056 case 0x02b: /* movntps */
3057 case 0x12b: /* movntps */
3058 if (mod == 3)
3059 goto illegal_op;
3060 gen_lea_modrm(env, s, modrm);
3061 gen_sto_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3062 break;
3063 case 0x3f0: /* lddqu */
3064 if (mod == 3)
3065 goto illegal_op;
3066 gen_lea_modrm(env, s, modrm);
3067 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3068 break;
3069 case 0x22b: /* movntss */
3070 case 0x32b: /* movntsd */
3071 if (mod == 3)
3072 goto illegal_op;
3073 gen_lea_modrm(env, s, modrm);
3074 if (b1 & 1) {
3075 gen_stq_env_A0(s, offsetof(CPUX86State,
3076 xmm_regs[reg].XMM_Q(0)));
3077 } else {
3078 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3079 xmm_regs[reg].XMM_L(0)));
3080 gen_op_st_v(s, MO_32, cpu_T[0], cpu_A0);
3082 break;
3083 case 0x6e: /* movd mm, ea */
3084 #ifdef TARGET_X86_64
3085 if (s->dflag == MO_64) {
3086 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0);
3087 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,fpregs[reg].mmx));
3088 } else
3089 #endif
3091 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0);
3092 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3093 offsetof(CPUX86State,fpregs[reg].mmx));
3094 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3095 gen_helper_movl_mm_T0_mmx(cpu_ptr0, cpu_tmp2_i32);
3097 break;
3098 case 0x16e: /* movd xmm, ea */
3099 #ifdef TARGET_X86_64
3100 if (s->dflag == MO_64) {
3101 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0);
3102 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3103 offsetof(CPUX86State,xmm_regs[reg]));
3104 gen_helper_movq_mm_T0_xmm(cpu_ptr0, cpu_T[0]);
3105 } else
3106 #endif
3108 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0);
3109 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3110 offsetof(CPUX86State,xmm_regs[reg]));
3111 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3112 gen_helper_movl_mm_T0_xmm(cpu_ptr0, cpu_tmp2_i32);
3114 break;
3115 case 0x6f: /* movq mm, ea */
3116 if (mod != 3) {
3117 gen_lea_modrm(env, s, modrm);
3118 gen_ldq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
3119 } else {
3120 rm = (modrm & 7);
3121 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
3122 offsetof(CPUX86State,fpregs[rm].mmx));
3123 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
3124 offsetof(CPUX86State,fpregs[reg].mmx));
3126 break;
3127 case 0x010: /* movups */
3128 case 0x110: /* movupd */
3129 case 0x028: /* movaps */
3130 case 0x128: /* movapd */
3131 case 0x16f: /* movdqa xmm, ea */
3132 case 0x26f: /* movdqu xmm, ea */
3133 if (mod != 3) {
3134 gen_lea_modrm(env, s, modrm);
3135 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3136 } else {
3137 rm = (modrm & 7) | REX_B(s);
3138 gen_op_movo(offsetof(CPUX86State,xmm_regs[reg]),
3139 offsetof(CPUX86State,xmm_regs[rm]));
3141 break;
3142 case 0x210: /* movss xmm, ea */
3143 if (mod != 3) {
3144 gen_lea_modrm(env, s, modrm);
3145 gen_op_ld_v(s, MO_32, cpu_T[0], cpu_A0);
3146 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3147 tcg_gen_movi_tl(cpu_T[0], 0);
3148 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3149 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3150 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3151 } else {
3152 rm = (modrm & 7) | REX_B(s);
3153 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3154 offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3156 break;
3157 case 0x310: /* movsd xmm, ea */
3158 if (mod != 3) {
3159 gen_lea_modrm(env, s, modrm);
3160 gen_ldq_env_A0(s, offsetof(CPUX86State,
3161 xmm_regs[reg].XMM_Q(0)));
3162 tcg_gen_movi_tl(cpu_T[0], 0);
3163 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3164 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3165 } else {
3166 rm = (modrm & 7) | REX_B(s);
3167 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3168 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3170 break;
3171 case 0x012: /* movlps */
3172 case 0x112: /* movlpd */
3173 if (mod != 3) {
3174 gen_lea_modrm(env, s, modrm);
3175 gen_ldq_env_A0(s, offsetof(CPUX86State,
3176 xmm_regs[reg].XMM_Q(0)));
3177 } else {
3178 /* movhlps */
3179 rm = (modrm & 7) | REX_B(s);
3180 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3181 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3183 break;
3184 case 0x212: /* movsldup */
3185 if (mod != 3) {
3186 gen_lea_modrm(env, s, modrm);
3187 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3188 } else {
3189 rm = (modrm & 7) | REX_B(s);
3190 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3191 offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3192 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3193 offsetof(CPUX86State,xmm_regs[rm].XMM_L(2)));
3195 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3196 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3197 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3198 offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3199 break;
3200 case 0x312: /* movddup */
3201 if (mod != 3) {
3202 gen_lea_modrm(env, s, modrm);
3203 gen_ldq_env_A0(s, offsetof(CPUX86State,
3204 xmm_regs[reg].XMM_Q(0)));
3205 } else {
3206 rm = (modrm & 7) | REX_B(s);
3207 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3208 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3210 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
3211 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3212 break;
3213 case 0x016: /* movhps */
3214 case 0x116: /* movhpd */
3215 if (mod != 3) {
3216 gen_lea_modrm(env, s, modrm);
3217 gen_ldq_env_A0(s, offsetof(CPUX86State,
3218 xmm_regs[reg].XMM_Q(1)));
3219 } else {
3220 /* movlhps */
3221 rm = (modrm & 7) | REX_B(s);
3222 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
3223 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3225 break;
3226 case 0x216: /* movshdup */
3227 if (mod != 3) {
3228 gen_lea_modrm(env, s, modrm);
3229 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3230 } else {
3231 rm = (modrm & 7) | REX_B(s);
3232 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3233 offsetof(CPUX86State,xmm_regs[rm].XMM_L(1)));
3234 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3235 offsetof(CPUX86State,xmm_regs[rm].XMM_L(3)));
3237 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3238 offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3239 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3240 offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3241 break;
3242 case 0x178:
3243 case 0x378:
3245 int bit_index, field_length;
3247 if (b1 == 1 && reg != 0)
3248 goto illegal_op;
3249 field_length = cpu_ldub_code(env, s->pc++) & 0x3F;
3250 bit_index = cpu_ldub_code(env, s->pc++) & 0x3F;
3251 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3252 offsetof(CPUX86State,xmm_regs[reg]));
3253 if (b1 == 1)
3254 gen_helper_extrq_i(cpu_env, cpu_ptr0,
3255 tcg_const_i32(bit_index),
3256 tcg_const_i32(field_length));
3257 else
3258 gen_helper_insertq_i(cpu_env, cpu_ptr0,
3259 tcg_const_i32(bit_index),
3260 tcg_const_i32(field_length));
3262 break;
3263 case 0x7e: /* movd ea, mm */
3264 #ifdef TARGET_X86_64
3265 if (s->dflag == MO_64) {
3266 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3267 offsetof(CPUX86State,fpregs[reg].mmx));
3268 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1);
3269 } else
3270 #endif
3272 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3273 offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0)));
3274 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1);
3276 break;
3277 case 0x17e: /* movd ea, xmm */
3278 #ifdef TARGET_X86_64
3279 if (s->dflag == MO_64) {
3280 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3281 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3282 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1);
3283 } else
3284 #endif
3286 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3287 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3288 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1);
3290 break;
3291 case 0x27e: /* movq xmm, ea */
3292 if (mod != 3) {
3293 gen_lea_modrm(env, s, modrm);
3294 gen_ldq_env_A0(s, offsetof(CPUX86State,
3295 xmm_regs[reg].XMM_Q(0)));
3296 } else {
3297 rm = (modrm & 7) | REX_B(s);
3298 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3299 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3301 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3302 break;
3303 case 0x7f: /* movq ea, mm */
3304 if (mod != 3) {
3305 gen_lea_modrm(env, s, modrm);
3306 gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
3307 } else {
3308 rm = (modrm & 7);
3309 gen_op_movq(offsetof(CPUX86State,fpregs[rm].mmx),
3310 offsetof(CPUX86State,fpregs[reg].mmx));
3312 break;
3313 case 0x011: /* movups */
3314 case 0x111: /* movupd */
3315 case 0x029: /* movaps */
3316 case 0x129: /* movapd */
3317 case 0x17f: /* movdqa ea, xmm */
3318 case 0x27f: /* movdqu ea, xmm */
3319 if (mod != 3) {
3320 gen_lea_modrm(env, s, modrm);
3321 gen_sto_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3322 } else {
3323 rm = (modrm & 7) | REX_B(s);
3324 gen_op_movo(offsetof(CPUX86State,xmm_regs[rm]),
3325 offsetof(CPUX86State,xmm_regs[reg]));
3327 break;
3328 case 0x211: /* movss ea, xmm */
3329 if (mod != 3) {
3330 gen_lea_modrm(env, s, modrm);
3331 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3332 gen_op_st_v(s, MO_32, cpu_T[0], cpu_A0);
3333 } else {
3334 rm = (modrm & 7) | REX_B(s);
3335 gen_op_movl(offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)),
3336 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3338 break;
3339 case 0x311: /* movsd ea, xmm */
3340 if (mod != 3) {
3341 gen_lea_modrm(env, s, modrm);
3342 gen_stq_env_A0(s, offsetof(CPUX86State,
3343 xmm_regs[reg].XMM_Q(0)));
3344 } else {
3345 rm = (modrm & 7) | REX_B(s);
3346 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3347 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3349 break;
3350 case 0x013: /* movlps */
3351 case 0x113: /* movlpd */
3352 if (mod != 3) {
3353 gen_lea_modrm(env, s, modrm);
3354 gen_stq_env_A0(s, offsetof(CPUX86State,
3355 xmm_regs[reg].XMM_Q(0)));
3356 } else {
3357 goto illegal_op;
3359 break;
3360 case 0x017: /* movhps */
3361 case 0x117: /* movhpd */
3362 if (mod != 3) {
3363 gen_lea_modrm(env, s, modrm);
3364 gen_stq_env_A0(s, offsetof(CPUX86State,
3365 xmm_regs[reg].XMM_Q(1)));
3366 } else {
3367 goto illegal_op;
3369 break;
3370 case 0x71: /* shift mm, im */
3371 case 0x72:
3372 case 0x73:
3373 case 0x171: /* shift xmm, im */
3374 case 0x172:
3375 case 0x173:
3376 if (b1 >= 2) {
3377 goto illegal_op;
3379 val = cpu_ldub_code(env, s->pc++);
3380 if (is_xmm) {
3381 tcg_gen_movi_tl(cpu_T[0], val);
3382 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
3383 tcg_gen_movi_tl(cpu_T[0], 0);
3384 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(1)));
3385 op1_offset = offsetof(CPUX86State,xmm_t0);
3386 } else {
3387 tcg_gen_movi_tl(cpu_T[0], val);
3388 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(0)));
3389 tcg_gen_movi_tl(cpu_T[0], 0);
3390 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(1)));
3391 op1_offset = offsetof(CPUX86State,mmx_t0);
3393 sse_fn_epp = sse_op_table2[((b - 1) & 3) * 8 +
3394 (((modrm >> 3)) & 7)][b1];
3395 if (!sse_fn_epp) {
3396 goto illegal_op;
3398 if (is_xmm) {
3399 rm = (modrm & 7) | REX_B(s);
3400 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3401 } else {
3402 rm = (modrm & 7);
3403 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3405 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3406 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op1_offset);
3407 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
3408 break;
3409 case 0x050: /* movmskps */
3410 rm = (modrm & 7) | REX_B(s);
3411 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3412 offsetof(CPUX86State,xmm_regs[rm]));
3413 gen_helper_movmskps(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3414 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
3415 break;
3416 case 0x150: /* movmskpd */
3417 rm = (modrm & 7) | REX_B(s);
3418 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3419 offsetof(CPUX86State,xmm_regs[rm]));
3420 gen_helper_movmskpd(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3421 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
3422 break;
3423 case 0x02a: /* cvtpi2ps */
3424 case 0x12a: /* cvtpi2pd */
3425 gen_helper_enter_mmx(cpu_env);
3426 if (mod != 3) {
3427 gen_lea_modrm(env, s, modrm);
3428 op2_offset = offsetof(CPUX86State,mmx_t0);
3429 gen_ldq_env_A0(s, op2_offset);
3430 } else {
3431 rm = (modrm & 7);
3432 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3434 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3435 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3436 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3437 switch(b >> 8) {
3438 case 0x0:
3439 gen_helper_cvtpi2ps(cpu_env, cpu_ptr0, cpu_ptr1);
3440 break;
3441 default:
3442 case 0x1:
3443 gen_helper_cvtpi2pd(cpu_env, cpu_ptr0, cpu_ptr1);
3444 break;
3446 break;
3447 case 0x22a: /* cvtsi2ss */
3448 case 0x32a: /* cvtsi2sd */
3449 ot = mo_64_32(s->dflag);
3450 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3451 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3452 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3453 if (ot == MO_32) {
3454 SSEFunc_0_epi sse_fn_epi = sse_op_table3ai[(b >> 8) & 1];
3455 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3456 sse_fn_epi(cpu_env, cpu_ptr0, cpu_tmp2_i32);
3457 } else {
3458 #ifdef TARGET_X86_64
3459 SSEFunc_0_epl sse_fn_epl = sse_op_table3aq[(b >> 8) & 1];
3460 sse_fn_epl(cpu_env, cpu_ptr0, cpu_T[0]);
3461 #else
3462 goto illegal_op;
3463 #endif
3465 break;
3466 case 0x02c: /* cvttps2pi */
3467 case 0x12c: /* cvttpd2pi */
3468 case 0x02d: /* cvtps2pi */
3469 case 0x12d: /* cvtpd2pi */
3470 gen_helper_enter_mmx(cpu_env);
3471 if (mod != 3) {
3472 gen_lea_modrm(env, s, modrm);
3473 op2_offset = offsetof(CPUX86State,xmm_t0);
3474 gen_ldo_env_A0(s, op2_offset);
3475 } else {
3476 rm = (modrm & 7) | REX_B(s);
3477 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3479 op1_offset = offsetof(CPUX86State,fpregs[reg & 7].mmx);
3480 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3481 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3482 switch(b) {
3483 case 0x02c:
3484 gen_helper_cvttps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3485 break;
3486 case 0x12c:
3487 gen_helper_cvttpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3488 break;
3489 case 0x02d:
3490 gen_helper_cvtps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3491 break;
3492 case 0x12d:
3493 gen_helper_cvtpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3494 break;
3496 break;
3497 case 0x22c: /* cvttss2si */
3498 case 0x32c: /* cvttsd2si */
3499 case 0x22d: /* cvtss2si */
3500 case 0x32d: /* cvtsd2si */
3501 ot = mo_64_32(s->dflag);
3502 if (mod != 3) {
3503 gen_lea_modrm(env, s, modrm);
3504 if ((b >> 8) & 1) {
3505 gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.XMM_Q(0)));
3506 } else {
3507 gen_op_ld_v(s, MO_32, cpu_T[0], cpu_A0);
3508 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
3510 op2_offset = offsetof(CPUX86State,xmm_t0);
3511 } else {
3512 rm = (modrm & 7) | REX_B(s);
3513 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3515 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3516 if (ot == MO_32) {
3517 SSEFunc_i_ep sse_fn_i_ep =
3518 sse_op_table3bi[((b >> 7) & 2) | (b & 1)];
3519 sse_fn_i_ep(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3520 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3521 } else {
3522 #ifdef TARGET_X86_64
3523 SSEFunc_l_ep sse_fn_l_ep =
3524 sse_op_table3bq[((b >> 7) & 2) | (b & 1)];
3525 sse_fn_l_ep(cpu_T[0], cpu_env, cpu_ptr0);
3526 #else
3527 goto illegal_op;
3528 #endif
3530 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
3531 break;
3532 case 0xc4: /* pinsrw */
3533 case 0x1c4:
3534 s->rip_offset = 1;
3535 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
3536 val = cpu_ldub_code(env, s->pc++);
3537 if (b1) {
3538 val &= 7;
3539 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3540 offsetof(CPUX86State,xmm_regs[reg].XMM_W(val)));
3541 } else {
3542 val &= 3;
3543 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3544 offsetof(CPUX86State,fpregs[reg].mmx.MMX_W(val)));
3546 break;
3547 case 0xc5: /* pextrw */
3548 case 0x1c5:
3549 if (mod != 3)
3550 goto illegal_op;
3551 ot = mo_64_32(s->dflag);
3552 val = cpu_ldub_code(env, s->pc++);
3553 if (b1) {
3554 val &= 7;
3555 rm = (modrm & 7) | REX_B(s);
3556 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3557 offsetof(CPUX86State,xmm_regs[rm].XMM_W(val)));
3558 } else {
3559 val &= 3;
3560 rm = (modrm & 7);
3561 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3562 offsetof(CPUX86State,fpregs[rm].mmx.MMX_W(val)));
3564 reg = ((modrm >> 3) & 7) | rex_r;
3565 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
3566 break;
3567 case 0x1d6: /* movq ea, xmm */
3568 if (mod != 3) {
3569 gen_lea_modrm(env, s, modrm);
3570 gen_stq_env_A0(s, offsetof(CPUX86State,
3571 xmm_regs[reg].XMM_Q(0)));
3572 } else {
3573 rm = (modrm & 7) | REX_B(s);
3574 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3575 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3576 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3578 break;
3579 case 0x2d6: /* movq2dq */
3580 gen_helper_enter_mmx(cpu_env);
3581 rm = (modrm & 7);
3582 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3583 offsetof(CPUX86State,fpregs[rm].mmx));
3584 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3585 break;
3586 case 0x3d6: /* movdq2q */
3587 gen_helper_enter_mmx(cpu_env);
3588 rm = (modrm & 7) | REX_B(s);
3589 gen_op_movq(offsetof(CPUX86State,fpregs[reg & 7].mmx),
3590 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3591 break;
3592 case 0xd7: /* pmovmskb */
3593 case 0x1d7:
3594 if (mod != 3)
3595 goto illegal_op;
3596 if (b1) {
3597 rm = (modrm & 7) | REX_B(s);
3598 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[rm]));
3599 gen_helper_pmovmskb_xmm(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3600 } else {
3601 rm = (modrm & 7);
3602 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,fpregs[rm].mmx));
3603 gen_helper_pmovmskb_mmx(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3605 reg = ((modrm >> 3) & 7) | rex_r;
3606 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
3607 break;
3609 case 0x138:
3610 case 0x038:
3611 b = modrm;
3612 if ((b & 0xf0) == 0xf0) {
3613 goto do_0f_38_fx;
3615 modrm = cpu_ldub_code(env, s->pc++);
3616 rm = modrm & 7;
3617 reg = ((modrm >> 3) & 7) | rex_r;
3618 mod = (modrm >> 6) & 3;
3619 if (b1 >= 2) {
3620 goto illegal_op;
3623 sse_fn_epp = sse_op_table6[b].op[b1];
3624 if (!sse_fn_epp) {
3625 goto illegal_op;
3627 if (!(s->cpuid_ext_features & sse_op_table6[b].ext_mask))
3628 goto illegal_op;
3630 if (b1) {
3631 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3632 if (mod == 3) {
3633 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
3634 } else {
3635 op2_offset = offsetof(CPUX86State,xmm_t0);
3636 gen_lea_modrm(env, s, modrm);
3637 switch (b) {
3638 case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
3639 case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
3640 case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
3641 gen_ldq_env_A0(s, op2_offset +
3642 offsetof(XMMReg, XMM_Q(0)));
3643 break;
3644 case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
3645 case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
3646 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
3647 s->mem_index, MO_LEUL);
3648 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, op2_offset +
3649 offsetof(XMMReg, XMM_L(0)));
3650 break;
3651 case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
3652 tcg_gen_qemu_ld_tl(cpu_tmp0, cpu_A0,
3653 s->mem_index, MO_LEUW);
3654 tcg_gen_st16_tl(cpu_tmp0, cpu_env, op2_offset +
3655 offsetof(XMMReg, XMM_W(0)));
3656 break;
3657 case 0x2a: /* movntqda */
3658 gen_ldo_env_A0(s, op1_offset);
3659 return;
3660 default:
3661 gen_ldo_env_A0(s, op2_offset);
3664 } else {
3665 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
3666 if (mod == 3) {
3667 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3668 } else {
3669 op2_offset = offsetof(CPUX86State,mmx_t0);
3670 gen_lea_modrm(env, s, modrm);
3671 gen_ldq_env_A0(s, op2_offset);
3674 if (sse_fn_epp == SSE_SPECIAL) {
3675 goto illegal_op;
3678 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3679 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3680 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
3682 if (b == 0x17) {
3683 set_cc_op(s, CC_OP_EFLAGS);
3685 break;
3687 case 0x238:
3688 case 0x338:
3689 do_0f_38_fx:
3690 /* Various integer extensions at 0f 38 f[0-f]. */
3691 b = modrm | (b1 << 8);
3692 modrm = cpu_ldub_code(env, s->pc++);
3693 reg = ((modrm >> 3) & 7) | rex_r;
3695 switch (b) {
3696 case 0x3f0: /* crc32 Gd,Eb */
3697 case 0x3f1: /* crc32 Gd,Ey */
3698 do_crc32:
3699 if (!(s->cpuid_ext_features & CPUID_EXT_SSE42)) {
3700 goto illegal_op;
3702 if ((b & 0xff) == 0xf0) {
3703 ot = MO_8;
3704 } else if (s->dflag != MO_64) {
3705 ot = (s->prefix & PREFIX_DATA ? MO_16 : MO_32);
3706 } else {
3707 ot = MO_64;
3710 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[reg]);
3711 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3712 gen_helper_crc32(cpu_T[0], cpu_tmp2_i32,
3713 cpu_T[0], tcg_const_i32(8 << ot));
3715 ot = mo_64_32(s->dflag);
3716 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
3717 break;
3719 case 0x1f0: /* crc32 or movbe */
3720 case 0x1f1:
3721 /* For these insns, the f3 prefix is supposed to have priority
3722 over the 66 prefix, but that's not what we implement above
3723 setting b1. */
3724 if (s->prefix & PREFIX_REPNZ) {
3725 goto do_crc32;
3727 /* FALLTHRU */
3728 case 0x0f0: /* movbe Gy,My */
3729 case 0x0f1: /* movbe My,Gy */
3730 if (!(s->cpuid_ext_features & CPUID_EXT_MOVBE)) {
3731 goto illegal_op;
3733 if (s->dflag != MO_64) {
3734 ot = (s->prefix & PREFIX_DATA ? MO_16 : MO_32);
3735 } else {
3736 ot = MO_64;
3739 gen_lea_modrm(env, s, modrm);
3740 if ((b & 1) == 0) {
3741 tcg_gen_qemu_ld_tl(cpu_T[0], cpu_A0,
3742 s->mem_index, ot | MO_BE);
3743 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
3744 } else {
3745 tcg_gen_qemu_st_tl(cpu_regs[reg], cpu_A0,
3746 s->mem_index, ot | MO_BE);
3748 break;
3750 case 0x0f2: /* andn Gy, By, Ey */
3751 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
3752 || !(s->prefix & PREFIX_VEX)
3753 || s->vex_l != 0) {
3754 goto illegal_op;
3756 ot = mo_64_32(s->dflag);
3757 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3758 tcg_gen_andc_tl(cpu_T[0], cpu_regs[s->vex_v], cpu_T[0]);
3759 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
3760 gen_op_update1_cc();
3761 set_cc_op(s, CC_OP_LOGICB + ot);
3762 break;
3764 case 0x0f7: /* bextr Gy, Ey, By */
3765 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
3766 || !(s->prefix & PREFIX_VEX)
3767 || s->vex_l != 0) {
3768 goto illegal_op;
3770 ot = mo_64_32(s->dflag);
3772 TCGv bound, zero;
3774 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3775 /* Extract START, and shift the operand.
3776 Shifts larger than operand size get zeros. */
3777 tcg_gen_ext8u_tl(cpu_A0, cpu_regs[s->vex_v]);
3778 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_A0);
3780 bound = tcg_const_tl(ot == MO_64 ? 63 : 31);
3781 zero = tcg_const_tl(0);
3782 tcg_gen_movcond_tl(TCG_COND_LEU, cpu_T[0], cpu_A0, bound,
3783 cpu_T[0], zero);
3784 tcg_temp_free(zero);
3786 /* Extract the LEN into a mask. Lengths larger than
3787 operand size get all ones. */
3788 tcg_gen_shri_tl(cpu_A0, cpu_regs[s->vex_v], 8);
3789 tcg_gen_ext8u_tl(cpu_A0, cpu_A0);
3790 tcg_gen_movcond_tl(TCG_COND_LEU, cpu_A0, cpu_A0, bound,
3791 cpu_A0, bound);
3792 tcg_temp_free(bound);
3793 tcg_gen_movi_tl(cpu_T[1], 1);
3794 tcg_gen_shl_tl(cpu_T[1], cpu_T[1], cpu_A0);
3795 tcg_gen_subi_tl(cpu_T[1], cpu_T[1], 1);
3796 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
3798 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
3799 gen_op_update1_cc();
3800 set_cc_op(s, CC_OP_LOGICB + ot);
3802 break;
3804 case 0x0f5: /* bzhi Gy, Ey, By */
3805 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3806 || !(s->prefix & PREFIX_VEX)
3807 || s->vex_l != 0) {
3808 goto illegal_op;
3810 ot = mo_64_32(s->dflag);
3811 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3812 tcg_gen_ext8u_tl(cpu_T[1], cpu_regs[s->vex_v]);
3814 TCGv bound = tcg_const_tl(ot == MO_64 ? 63 : 31);
3815 /* Note that since we're using BMILG (in order to get O
3816 cleared) we need to store the inverse into C. */
3817 tcg_gen_setcond_tl(TCG_COND_LT, cpu_cc_src,
3818 cpu_T[1], bound);
3819 tcg_gen_movcond_tl(TCG_COND_GT, cpu_T[1], cpu_T[1],
3820 bound, bound, cpu_T[1]);
3821 tcg_temp_free(bound);
3823 tcg_gen_movi_tl(cpu_A0, -1);
3824 tcg_gen_shl_tl(cpu_A0, cpu_A0, cpu_T[1]);
3825 tcg_gen_andc_tl(cpu_T[0], cpu_T[0], cpu_A0);
3826 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
3827 gen_op_update1_cc();
3828 set_cc_op(s, CC_OP_BMILGB + ot);
3829 break;
3831 case 0x3f6: /* mulx By, Gy, rdx, Ey */
3832 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3833 || !(s->prefix & PREFIX_VEX)
3834 || s->vex_l != 0) {
3835 goto illegal_op;
3837 ot = mo_64_32(s->dflag);
3838 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3839 switch (ot) {
3840 default:
3841 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3842 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EDX]);
3843 tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
3844 cpu_tmp2_i32, cpu_tmp3_i32);
3845 tcg_gen_extu_i32_tl(cpu_regs[s->vex_v], cpu_tmp2_i32);
3846 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp3_i32);
3847 break;
3848 #ifdef TARGET_X86_64
3849 case MO_64:
3850 tcg_gen_mulu2_i64(cpu_regs[s->vex_v], cpu_regs[reg],
3851 cpu_T[0], cpu_regs[R_EDX]);
3852 break;
3853 #endif
3855 break;
3857 case 0x3f5: /* pdep Gy, By, Ey */
3858 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3859 || !(s->prefix & PREFIX_VEX)
3860 || s->vex_l != 0) {
3861 goto illegal_op;
3863 ot = mo_64_32(s->dflag);
3864 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3865 /* Note that by zero-extending the mask operand, we
3866 automatically handle zero-extending the result. */
3867 if (ot == MO_64) {
3868 tcg_gen_mov_tl(cpu_T[1], cpu_regs[s->vex_v]);
3869 } else {
3870 tcg_gen_ext32u_tl(cpu_T[1], cpu_regs[s->vex_v]);
3872 gen_helper_pdep(cpu_regs[reg], cpu_T[0], cpu_T[1]);
3873 break;
3875 case 0x2f5: /* pext Gy, By, Ey */
3876 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3877 || !(s->prefix & PREFIX_VEX)
3878 || s->vex_l != 0) {
3879 goto illegal_op;
3881 ot = mo_64_32(s->dflag);
3882 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3883 /* Note that by zero-extending the mask operand, we
3884 automatically handle zero-extending the result. */
3885 if (ot == MO_64) {
3886 tcg_gen_mov_tl(cpu_T[1], cpu_regs[s->vex_v]);
3887 } else {
3888 tcg_gen_ext32u_tl(cpu_T[1], cpu_regs[s->vex_v]);
3890 gen_helper_pext(cpu_regs[reg], cpu_T[0], cpu_T[1]);
3891 break;
3893 case 0x1f6: /* adcx Gy, Ey */
3894 case 0x2f6: /* adox Gy, Ey */
3895 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_ADX)) {
3896 goto illegal_op;
3897 } else {
3898 TCGv carry_in, carry_out, zero;
3899 int end_op;
3901 ot = mo_64_32(s->dflag);
3902 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3904 /* Re-use the carry-out from a previous round. */
3905 TCGV_UNUSED(carry_in);
3906 carry_out = (b == 0x1f6 ? cpu_cc_dst : cpu_cc_src2);
3907 switch (s->cc_op) {
3908 case CC_OP_ADCX:
3909 if (b == 0x1f6) {
3910 carry_in = cpu_cc_dst;
3911 end_op = CC_OP_ADCX;
3912 } else {
3913 end_op = CC_OP_ADCOX;
3915 break;
3916 case CC_OP_ADOX:
3917 if (b == 0x1f6) {
3918 end_op = CC_OP_ADCOX;
3919 } else {
3920 carry_in = cpu_cc_src2;
3921 end_op = CC_OP_ADOX;
3923 break;
3924 case CC_OP_ADCOX:
3925 end_op = CC_OP_ADCOX;
3926 carry_in = carry_out;
3927 break;
3928 default:
3929 end_op = (b == 0x1f6 ? CC_OP_ADCX : CC_OP_ADOX);
3930 break;
3932 /* If we can't reuse carry-out, get it out of EFLAGS. */
3933 if (TCGV_IS_UNUSED(carry_in)) {
3934 if (s->cc_op != CC_OP_ADCX && s->cc_op != CC_OP_ADOX) {
3935 gen_compute_eflags(s);
3937 carry_in = cpu_tmp0;
3938 tcg_gen_shri_tl(carry_in, cpu_cc_src,
3939 ctz32(b == 0x1f6 ? CC_C : CC_O));
3940 tcg_gen_andi_tl(carry_in, carry_in, 1);
3943 switch (ot) {
3944 #ifdef TARGET_X86_64
3945 case MO_32:
3946 /* If we know TL is 64-bit, and we want a 32-bit
3947 result, just do everything in 64-bit arithmetic. */
3948 tcg_gen_ext32u_i64(cpu_regs[reg], cpu_regs[reg]);
3949 tcg_gen_ext32u_i64(cpu_T[0], cpu_T[0]);
3950 tcg_gen_add_i64(cpu_T[0], cpu_T[0], cpu_regs[reg]);
3951 tcg_gen_add_i64(cpu_T[0], cpu_T[0], carry_in);
3952 tcg_gen_ext32u_i64(cpu_regs[reg], cpu_T[0]);
3953 tcg_gen_shri_i64(carry_out, cpu_T[0], 32);
3954 break;
3955 #endif
3956 default:
3957 /* Otherwise compute the carry-out in two steps. */
3958 zero = tcg_const_tl(0);
3959 tcg_gen_add2_tl(cpu_T[0], carry_out,
3960 cpu_T[0], zero,
3961 carry_in, zero);
3962 tcg_gen_add2_tl(cpu_regs[reg], carry_out,
3963 cpu_regs[reg], carry_out,
3964 cpu_T[0], zero);
3965 tcg_temp_free(zero);
3966 break;
3968 set_cc_op(s, end_op);
3970 break;
3972 case 0x1f7: /* shlx Gy, Ey, By */
3973 case 0x2f7: /* sarx Gy, Ey, By */
3974 case 0x3f7: /* shrx Gy, Ey, By */
3975 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3976 || !(s->prefix & PREFIX_VEX)
3977 || s->vex_l != 0) {
3978 goto illegal_op;
3980 ot = mo_64_32(s->dflag);
3981 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3982 if (ot == MO_64) {
3983 tcg_gen_andi_tl(cpu_T[1], cpu_regs[s->vex_v], 63);
3984 } else {
3985 tcg_gen_andi_tl(cpu_T[1], cpu_regs[s->vex_v], 31);
3987 if (b == 0x1f7) {
3988 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
3989 } else if (b == 0x2f7) {
3990 if (ot != MO_64) {
3991 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
3993 tcg_gen_sar_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
3994 } else {
3995 if (ot != MO_64) {
3996 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
3998 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4000 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
4001 break;
4003 case 0x0f3:
4004 case 0x1f3:
4005 case 0x2f3:
4006 case 0x3f3: /* Group 17 */
4007 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
4008 || !(s->prefix & PREFIX_VEX)
4009 || s->vex_l != 0) {
4010 goto illegal_op;
4012 ot = mo_64_32(s->dflag);
4013 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4015 switch (reg & 7) {
4016 case 1: /* blsr By,Ey */
4017 tcg_gen_neg_tl(cpu_T[1], cpu_T[0]);
4018 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4019 gen_op_mov_reg_v(ot, s->vex_v, cpu_T[0]);
4020 gen_op_update2_cc();
4021 set_cc_op(s, CC_OP_BMILGB + ot);
4022 break;
4024 case 2: /* blsmsk By,Ey */
4025 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4026 tcg_gen_subi_tl(cpu_T[0], cpu_T[0], 1);
4027 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_cc_src);
4028 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4029 set_cc_op(s, CC_OP_BMILGB + ot);
4030 break;
4032 case 3: /* blsi By, Ey */
4033 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4034 tcg_gen_subi_tl(cpu_T[0], cpu_T[0], 1);
4035 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_cc_src);
4036 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4037 set_cc_op(s, CC_OP_BMILGB + ot);
4038 break;
4040 default:
4041 goto illegal_op;
4043 break;
4045 default:
4046 goto illegal_op;
4048 break;
4050 case 0x03a:
4051 case 0x13a:
4052 b = modrm;
4053 modrm = cpu_ldub_code(env, s->pc++);
4054 rm = modrm & 7;
4055 reg = ((modrm >> 3) & 7) | rex_r;
4056 mod = (modrm >> 6) & 3;
4057 if (b1 >= 2) {
4058 goto illegal_op;
4061 sse_fn_eppi = sse_op_table7[b].op[b1];
4062 if (!sse_fn_eppi) {
4063 goto illegal_op;
4065 if (!(s->cpuid_ext_features & sse_op_table7[b].ext_mask))
4066 goto illegal_op;
4068 if (sse_fn_eppi == SSE_SPECIAL) {
4069 ot = mo_64_32(s->dflag);
4070 rm = (modrm & 7) | REX_B(s);
4071 if (mod != 3)
4072 gen_lea_modrm(env, s, modrm);
4073 reg = ((modrm >> 3) & 7) | rex_r;
4074 val = cpu_ldub_code(env, s->pc++);
4075 switch (b) {
4076 case 0x14: /* pextrb */
4077 tcg_gen_ld8u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4078 xmm_regs[reg].XMM_B(val & 15)));
4079 if (mod == 3) {
4080 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
4081 } else {
4082 tcg_gen_qemu_st_tl(cpu_T[0], cpu_A0,
4083 s->mem_index, MO_UB);
4085 break;
4086 case 0x15: /* pextrw */
4087 tcg_gen_ld16u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4088 xmm_regs[reg].XMM_W(val & 7)));
4089 if (mod == 3) {
4090 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
4091 } else {
4092 tcg_gen_qemu_st_tl(cpu_T[0], cpu_A0,
4093 s->mem_index, MO_LEUW);
4095 break;
4096 case 0x16:
4097 if (ot == MO_32) { /* pextrd */
4098 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
4099 offsetof(CPUX86State,
4100 xmm_regs[reg].XMM_L(val & 3)));
4101 if (mod == 3) {
4102 tcg_gen_extu_i32_tl(cpu_regs[rm], cpu_tmp2_i32);
4103 } else {
4104 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
4105 s->mem_index, MO_LEUL);
4107 } else { /* pextrq */
4108 #ifdef TARGET_X86_64
4109 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
4110 offsetof(CPUX86State,
4111 xmm_regs[reg].XMM_Q(val & 1)));
4112 if (mod == 3) {
4113 tcg_gen_mov_i64(cpu_regs[rm], cpu_tmp1_i64);
4114 } else {
4115 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
4116 s->mem_index, MO_LEQ);
4118 #else
4119 goto illegal_op;
4120 #endif
4122 break;
4123 case 0x17: /* extractps */
4124 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4125 xmm_regs[reg].XMM_L(val & 3)));
4126 if (mod == 3) {
4127 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
4128 } else {
4129 tcg_gen_qemu_st_tl(cpu_T[0], cpu_A0,
4130 s->mem_index, MO_LEUL);
4132 break;
4133 case 0x20: /* pinsrb */
4134 if (mod == 3) {
4135 gen_op_mov_v_reg(MO_32, cpu_T[0], rm);
4136 } else {
4137 tcg_gen_qemu_ld_tl(cpu_T[0], cpu_A0,
4138 s->mem_index, MO_UB);
4140 tcg_gen_st8_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4141 xmm_regs[reg].XMM_B(val & 15)));
4142 break;
4143 case 0x21: /* insertps */
4144 if (mod == 3) {
4145 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
4146 offsetof(CPUX86State,xmm_regs[rm]
4147 .XMM_L((val >> 6) & 3)));
4148 } else {
4149 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
4150 s->mem_index, MO_LEUL);
4152 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
4153 offsetof(CPUX86State,xmm_regs[reg]
4154 .XMM_L((val >> 4) & 3)));
4155 if ((val >> 0) & 1)
4156 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4157 cpu_env, offsetof(CPUX86State,
4158 xmm_regs[reg].XMM_L(0)));
4159 if ((val >> 1) & 1)
4160 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4161 cpu_env, offsetof(CPUX86State,
4162 xmm_regs[reg].XMM_L(1)));
4163 if ((val >> 2) & 1)
4164 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4165 cpu_env, offsetof(CPUX86State,
4166 xmm_regs[reg].XMM_L(2)));
4167 if ((val >> 3) & 1)
4168 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4169 cpu_env, offsetof(CPUX86State,
4170 xmm_regs[reg].XMM_L(3)));
4171 break;
4172 case 0x22:
4173 if (ot == MO_32) { /* pinsrd */
4174 if (mod == 3) {
4175 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[rm]);
4176 } else {
4177 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
4178 s->mem_index, MO_LEUL);
4180 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
4181 offsetof(CPUX86State,
4182 xmm_regs[reg].XMM_L(val & 3)));
4183 } else { /* pinsrq */
4184 #ifdef TARGET_X86_64
4185 if (mod == 3) {
4186 gen_op_mov_v_reg(ot, cpu_tmp1_i64, rm);
4187 } else {
4188 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
4189 s->mem_index, MO_LEQ);
4191 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
4192 offsetof(CPUX86State,
4193 xmm_regs[reg].XMM_Q(val & 1)));
4194 #else
4195 goto illegal_op;
4196 #endif
4198 break;
4200 return;
4203 if (b1) {
4204 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
4205 if (mod == 3) {
4206 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
4207 } else {
4208 op2_offset = offsetof(CPUX86State,xmm_t0);
4209 gen_lea_modrm(env, s, modrm);
4210 gen_ldo_env_A0(s, op2_offset);
4212 } else {
4213 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4214 if (mod == 3) {
4215 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4216 } else {
4217 op2_offset = offsetof(CPUX86State,mmx_t0);
4218 gen_lea_modrm(env, s, modrm);
4219 gen_ldq_env_A0(s, op2_offset);
4222 val = cpu_ldub_code(env, s->pc++);
4224 if ((b & 0xfc) == 0x60) { /* pcmpXstrX */
4225 set_cc_op(s, CC_OP_EFLAGS);
4227 if (s->dflag == MO_64) {
4228 /* The helper must use entire 64-bit gp registers */
4229 val |= 1 << 8;
4233 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4234 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4235 sse_fn_eppi(cpu_env, cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
4236 break;
4238 case 0x33a:
4239 /* Various integer extensions at 0f 3a f[0-f]. */
4240 b = modrm | (b1 << 8);
4241 modrm = cpu_ldub_code(env, s->pc++);
4242 reg = ((modrm >> 3) & 7) | rex_r;
4244 switch (b) {
4245 case 0x3f0: /* rorx Gy,Ey, Ib */
4246 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
4247 || !(s->prefix & PREFIX_VEX)
4248 || s->vex_l != 0) {
4249 goto illegal_op;
4251 ot = mo_64_32(s->dflag);
4252 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4253 b = cpu_ldub_code(env, s->pc++);
4254 if (ot == MO_64) {
4255 tcg_gen_rotri_tl(cpu_T[0], cpu_T[0], b & 63);
4256 } else {
4257 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4258 tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, b & 31);
4259 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
4261 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
4262 break;
4264 default:
4265 goto illegal_op;
4267 break;
4269 default:
4270 goto illegal_op;
4272 } else {
4273 /* generic MMX or SSE operation */
4274 switch(b) {
4275 case 0x70: /* pshufx insn */
4276 case 0xc6: /* pshufx insn */
4277 case 0xc2: /* compare insns */
4278 s->rip_offset = 1;
4279 break;
4280 default:
4281 break;
4283 if (is_xmm) {
4284 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
4285 if (mod != 3) {
4286 int sz = 4;
4288 gen_lea_modrm(env, s, modrm);
4289 op2_offset = offsetof(CPUX86State,xmm_t0);
4291 switch (b) {
4292 case 0x50 ... 0x5a:
4293 case 0x5c ... 0x5f:
4294 case 0xc2:
4295 /* Most sse scalar operations. */
4296 if (b1 == 2) {
4297 sz = 2;
4298 } else if (b1 == 3) {
4299 sz = 3;
4301 break;
4303 case 0x2e: /* ucomis[sd] */
4304 case 0x2f: /* comis[sd] */
4305 if (b1 == 0) {
4306 sz = 2;
4307 } else {
4308 sz = 3;
4310 break;
4313 switch (sz) {
4314 case 2:
4315 /* 32 bit access */
4316 gen_op_ld_v(s, MO_32, cpu_T[0], cpu_A0);
4317 tcg_gen_st32_tl(cpu_T[0], cpu_env,
4318 offsetof(CPUX86State,xmm_t0.XMM_L(0)));
4319 break;
4320 case 3:
4321 /* 64 bit access */
4322 gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.XMM_D(0)));
4323 break;
4324 default:
4325 /* 128 bit access */
4326 gen_ldo_env_A0(s, op2_offset);
4327 break;
4329 } else {
4330 rm = (modrm & 7) | REX_B(s);
4331 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
4333 } else {
4334 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4335 if (mod != 3) {
4336 gen_lea_modrm(env, s, modrm);
4337 op2_offset = offsetof(CPUX86State,mmx_t0);
4338 gen_ldq_env_A0(s, op2_offset);
4339 } else {
4340 rm = (modrm & 7);
4341 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4344 switch(b) {
4345 case 0x0f: /* 3DNow! data insns */
4346 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
4347 goto illegal_op;
4348 val = cpu_ldub_code(env, s->pc++);
4349 sse_fn_epp = sse_op_table5[val];
4350 if (!sse_fn_epp) {
4351 goto illegal_op;
4353 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4354 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4355 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
4356 break;
4357 case 0x70: /* pshufx insn */
4358 case 0xc6: /* pshufx insn */
4359 val = cpu_ldub_code(env, s->pc++);
4360 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4361 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4362 /* XXX: introduce a new table? */
4363 sse_fn_ppi = (SSEFunc_0_ppi)sse_fn_epp;
4364 sse_fn_ppi(cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
4365 break;
4366 case 0xc2:
4367 /* compare insns */
4368 val = cpu_ldub_code(env, s->pc++);
4369 if (val >= 8)
4370 goto illegal_op;
4371 sse_fn_epp = sse_op_table4[val][b1];
4373 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4374 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4375 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
4376 break;
4377 case 0xf7:
4378 /* maskmov : we must prepare A0 */
4379 if (mod != 3)
4380 goto illegal_op;
4381 tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EDI]);
4382 gen_extu(s->aflag, cpu_A0);
4383 gen_add_A0_ds_seg(s);
4385 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4386 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4387 /* XXX: introduce a new table? */
4388 sse_fn_eppt = (SSEFunc_0_eppt)sse_fn_epp;
4389 sse_fn_eppt(cpu_env, cpu_ptr0, cpu_ptr1, cpu_A0);
4390 break;
4391 default:
4392 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4393 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4394 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
4395 break;
4397 if (b == 0x2e || b == 0x2f) {
4398 set_cc_op(s, CC_OP_EFLAGS);
4403 /* convert one instruction. s->is_jmp is set if the translation must
4404 be stopped. Return the next pc value */
4405 static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
4406 target_ulong pc_start)
4408 int b, prefixes;
4409 int shift;
4410 TCGMemOp ot, aflag, dflag;
4411 int modrm, reg, rm, mod, op, opreg, val;
4412 target_ulong next_eip, tval;
4413 int rex_w, rex_r;
4415 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
4416 tcg_gen_debug_insn_start(pc_start);
4418 s->pc = pc_start;
4419 prefixes = 0;
4420 s->override = -1;
4421 rex_w = -1;
4422 rex_r = 0;
4423 #ifdef TARGET_X86_64
4424 s->rex_x = 0;
4425 s->rex_b = 0;
4426 x86_64_hregs = 0;
4427 #endif
4428 s->rip_offset = 0; /* for relative ip address */
4429 s->vex_l = 0;
4430 s->vex_v = 0;
4431 next_byte:
4432 b = cpu_ldub_code(env, s->pc);
4433 s->pc++;
4434 /* Collect prefixes. */
4435 switch (b) {
4436 case 0xf3:
4437 prefixes |= PREFIX_REPZ;
4438 goto next_byte;
4439 case 0xf2:
4440 prefixes |= PREFIX_REPNZ;
4441 goto next_byte;
4442 case 0xf0:
4443 prefixes |= PREFIX_LOCK;
4444 goto next_byte;
4445 case 0x2e:
4446 s->override = R_CS;
4447 goto next_byte;
4448 case 0x36:
4449 s->override = R_SS;
4450 goto next_byte;
4451 case 0x3e:
4452 s->override = R_DS;
4453 goto next_byte;
4454 case 0x26:
4455 s->override = R_ES;
4456 goto next_byte;
4457 case 0x64:
4458 s->override = R_FS;
4459 goto next_byte;
4460 case 0x65:
4461 s->override = R_GS;
4462 goto next_byte;
4463 case 0x66:
4464 prefixes |= PREFIX_DATA;
4465 goto next_byte;
4466 case 0x67:
4467 prefixes |= PREFIX_ADR;
4468 goto next_byte;
4469 #ifdef TARGET_X86_64
4470 case 0x40 ... 0x4f:
4471 if (CODE64(s)) {
4472 /* REX prefix */
4473 rex_w = (b >> 3) & 1;
4474 rex_r = (b & 0x4) << 1;
4475 s->rex_x = (b & 0x2) << 2;
4476 REX_B(s) = (b & 0x1) << 3;
4477 x86_64_hregs = 1; /* select uniform byte register addressing */
4478 goto next_byte;
4480 break;
4481 #endif
4482 case 0xc5: /* 2-byte VEX */
4483 case 0xc4: /* 3-byte VEX */
4484 /* VEX prefixes cannot be used except in 32-bit mode.
4485 Otherwise the instruction is LES or LDS. */
4486 if (s->code32 && !s->vm86) {
4487 static const int pp_prefix[4] = {
4488 0, PREFIX_DATA, PREFIX_REPZ, PREFIX_REPNZ
4490 int vex3, vex2 = cpu_ldub_code(env, s->pc);
4492 if (!CODE64(s) && (vex2 & 0xc0) != 0xc0) {
4493 /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
4494 otherwise the instruction is LES or LDS. */
4495 break;
4497 s->pc++;
4499 /* 4.1.1-4.1.3: No preceding lock, 66, f2, f3, or rex prefixes. */
4500 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ
4501 | PREFIX_LOCK | PREFIX_DATA)) {
4502 goto illegal_op;
4504 #ifdef TARGET_X86_64
4505 if (x86_64_hregs) {
4506 goto illegal_op;
4508 #endif
4509 rex_r = (~vex2 >> 4) & 8;
4510 if (b == 0xc5) {
4511 vex3 = vex2;
4512 b = cpu_ldub_code(env, s->pc++);
4513 } else {
4514 #ifdef TARGET_X86_64
4515 s->rex_x = (~vex2 >> 3) & 8;
4516 s->rex_b = (~vex2 >> 2) & 8;
4517 #endif
4518 vex3 = cpu_ldub_code(env, s->pc++);
4519 rex_w = (vex3 >> 7) & 1;
4520 switch (vex2 & 0x1f) {
4521 case 0x01: /* Implied 0f leading opcode bytes. */
4522 b = cpu_ldub_code(env, s->pc++) | 0x100;
4523 break;
4524 case 0x02: /* Implied 0f 38 leading opcode bytes. */
4525 b = 0x138;
4526 break;
4527 case 0x03: /* Implied 0f 3a leading opcode bytes. */
4528 b = 0x13a;
4529 break;
4530 default: /* Reserved for future use. */
4531 goto illegal_op;
4534 s->vex_v = (~vex3 >> 3) & 0xf;
4535 s->vex_l = (vex3 >> 2) & 1;
4536 prefixes |= pp_prefix[vex3 & 3] | PREFIX_VEX;
4538 break;
4541 /* Post-process prefixes. */
4542 if (CODE64(s)) {
4543 /* In 64-bit mode, the default data size is 32-bit. Select 64-bit
4544 data with rex_w, and 16-bit data with 0x66; rex_w takes precedence
4545 over 0x66 if both are present. */
4546 dflag = (rex_w > 0 ? MO_64 : prefixes & PREFIX_DATA ? MO_16 : MO_32);
4547 /* In 64-bit mode, 0x67 selects 32-bit addressing. */
4548 aflag = (prefixes & PREFIX_ADR ? MO_32 : MO_64);
4549 } else {
4550 /* In 16/32-bit mode, 0x66 selects the opposite data size. */
4551 if (s->code32 ^ ((prefixes & PREFIX_DATA) != 0)) {
4552 dflag = MO_32;
4553 } else {
4554 dflag = MO_16;
4556 /* In 16/32-bit mode, 0x67 selects the opposite addressing. */
4557 if (s->code32 ^ ((prefixes & PREFIX_ADR) != 0)) {
4558 aflag = MO_32;
4559 } else {
4560 aflag = MO_16;
4564 s->prefix = prefixes;
4565 s->aflag = aflag;
4566 s->dflag = dflag;
4568 /* lock generation */
4569 if (prefixes & PREFIX_LOCK)
4570 gen_helper_lock();
4572 /* now check op code */
4573 reswitch:
4574 switch(b) {
4575 case 0x0f:
4576 /**************************/
4577 /* extended op code */
4578 b = cpu_ldub_code(env, s->pc++) | 0x100;
4579 goto reswitch;
4581 /**************************/
4582 /* arith & logic */
4583 case 0x00 ... 0x05:
4584 case 0x08 ... 0x0d:
4585 case 0x10 ... 0x15:
4586 case 0x18 ... 0x1d:
4587 case 0x20 ... 0x25:
4588 case 0x28 ... 0x2d:
4589 case 0x30 ... 0x35:
4590 case 0x38 ... 0x3d:
4592 int op, f, val;
4593 op = (b >> 3) & 7;
4594 f = (b >> 1) & 3;
4596 ot = mo_b_d(b, dflag);
4598 switch(f) {
4599 case 0: /* OP Ev, Gv */
4600 modrm = cpu_ldub_code(env, s->pc++);
4601 reg = ((modrm >> 3) & 7) | rex_r;
4602 mod = (modrm >> 6) & 3;
4603 rm = (modrm & 7) | REX_B(s);
4604 if (mod != 3) {
4605 gen_lea_modrm(env, s, modrm);
4606 opreg = OR_TMP0;
4607 } else if (op == OP_XORL && rm == reg) {
4608 xor_zero:
4609 /* xor reg, reg optimisation */
4610 set_cc_op(s, CC_OP_CLR);
4611 tcg_gen_movi_tl(cpu_T[0], 0);
4612 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
4613 break;
4614 } else {
4615 opreg = rm;
4617 gen_op_mov_v_reg(ot, cpu_T[1], reg);
4618 gen_op(s, op, ot, opreg);
4619 break;
4620 case 1: /* OP Gv, Ev */
4621 modrm = cpu_ldub_code(env, s->pc++);
4622 mod = (modrm >> 6) & 3;
4623 reg = ((modrm >> 3) & 7) | rex_r;
4624 rm = (modrm & 7) | REX_B(s);
4625 if (mod != 3) {
4626 gen_lea_modrm(env, s, modrm);
4627 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
4628 } else if (op == OP_XORL && rm == reg) {
4629 goto xor_zero;
4630 } else {
4631 gen_op_mov_v_reg(ot, cpu_T[1], rm);
4633 gen_op(s, op, ot, reg);
4634 break;
4635 case 2: /* OP A, Iv */
4636 val = insn_get(env, s, ot);
4637 tcg_gen_movi_tl(cpu_T[1], val);
4638 gen_op(s, op, ot, OR_EAX);
4639 break;
4642 break;
4644 case 0x82:
4645 if (CODE64(s))
4646 goto illegal_op;
4647 case 0x80: /* GRP1 */
4648 case 0x81:
4649 case 0x83:
4651 int val;
4653 ot = mo_b_d(b, dflag);
4655 modrm = cpu_ldub_code(env, s->pc++);
4656 mod = (modrm >> 6) & 3;
4657 rm = (modrm & 7) | REX_B(s);
4658 op = (modrm >> 3) & 7;
4660 if (mod != 3) {
4661 if (b == 0x83)
4662 s->rip_offset = 1;
4663 else
4664 s->rip_offset = insn_const_size(ot);
4665 gen_lea_modrm(env, s, modrm);
4666 opreg = OR_TMP0;
4667 } else {
4668 opreg = rm;
4671 switch(b) {
4672 default:
4673 case 0x80:
4674 case 0x81:
4675 case 0x82:
4676 val = insn_get(env, s, ot);
4677 break;
4678 case 0x83:
4679 val = (int8_t)insn_get(env, s, MO_8);
4680 break;
4682 tcg_gen_movi_tl(cpu_T[1], val);
4683 gen_op(s, op, ot, opreg);
4685 break;
4687 /**************************/
4688 /* inc, dec, and other misc arith */
4689 case 0x40 ... 0x47: /* inc Gv */
4690 ot = dflag;
4691 gen_inc(s, ot, OR_EAX + (b & 7), 1);
4692 break;
4693 case 0x48 ... 0x4f: /* dec Gv */
4694 ot = dflag;
4695 gen_inc(s, ot, OR_EAX + (b & 7), -1);
4696 break;
4697 case 0xf6: /* GRP3 */
4698 case 0xf7:
4699 ot = mo_b_d(b, dflag);
4701 modrm = cpu_ldub_code(env, s->pc++);
4702 mod = (modrm >> 6) & 3;
4703 rm = (modrm & 7) | REX_B(s);
4704 op = (modrm >> 3) & 7;
4705 if (mod != 3) {
4706 if (op == 0)
4707 s->rip_offset = insn_const_size(ot);
4708 gen_lea_modrm(env, s, modrm);
4709 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
4710 } else {
4711 gen_op_mov_v_reg(ot, cpu_T[0], rm);
4714 switch(op) {
4715 case 0: /* test */
4716 val = insn_get(env, s, ot);
4717 tcg_gen_movi_tl(cpu_T[1], val);
4718 gen_op_testl_T0_T1_cc();
4719 set_cc_op(s, CC_OP_LOGICB + ot);
4720 break;
4721 case 2: /* not */
4722 tcg_gen_not_tl(cpu_T[0], cpu_T[0]);
4723 if (mod != 3) {
4724 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
4725 } else {
4726 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
4728 break;
4729 case 3: /* neg */
4730 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
4731 if (mod != 3) {
4732 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
4733 } else {
4734 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
4736 gen_op_update_neg_cc();
4737 set_cc_op(s, CC_OP_SUBB + ot);
4738 break;
4739 case 4: /* mul */
4740 switch(ot) {
4741 case MO_8:
4742 gen_op_mov_v_reg(MO_8, cpu_T[1], R_EAX);
4743 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
4744 tcg_gen_ext8u_tl(cpu_T[1], cpu_T[1]);
4745 /* XXX: use 32 bit mul which could be faster */
4746 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4747 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T[0]);
4748 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4749 tcg_gen_andi_tl(cpu_cc_src, cpu_T[0], 0xff00);
4750 set_cc_op(s, CC_OP_MULB);
4751 break;
4752 case MO_16:
4753 gen_op_mov_v_reg(MO_16, cpu_T[1], R_EAX);
4754 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
4755 tcg_gen_ext16u_tl(cpu_T[1], cpu_T[1]);
4756 /* XXX: use 32 bit mul which could be faster */
4757 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4758 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T[0]);
4759 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4760 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
4761 gen_op_mov_reg_v(MO_16, R_EDX, cpu_T[0]);
4762 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4763 set_cc_op(s, CC_OP_MULW);
4764 break;
4765 default:
4766 case MO_32:
4767 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4768 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]);
4769 tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
4770 cpu_tmp2_i32, cpu_tmp3_i32);
4771 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32);
4772 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32);
4773 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4774 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
4775 set_cc_op(s, CC_OP_MULL);
4776 break;
4777 #ifdef TARGET_X86_64
4778 case MO_64:
4779 tcg_gen_mulu2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
4780 cpu_T[0], cpu_regs[R_EAX]);
4781 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4782 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
4783 set_cc_op(s, CC_OP_MULQ);
4784 break;
4785 #endif
4787 break;
4788 case 5: /* imul */
4789 switch(ot) {
4790 case MO_8:
4791 gen_op_mov_v_reg(MO_8, cpu_T[1], R_EAX);
4792 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
4793 tcg_gen_ext8s_tl(cpu_T[1], cpu_T[1]);
4794 /* XXX: use 32 bit mul which could be faster */
4795 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4796 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T[0]);
4797 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4798 tcg_gen_ext8s_tl(cpu_tmp0, cpu_T[0]);
4799 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4800 set_cc_op(s, CC_OP_MULB);
4801 break;
4802 case MO_16:
4803 gen_op_mov_v_reg(MO_16, cpu_T[1], R_EAX);
4804 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4805 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
4806 /* XXX: use 32 bit mul which could be faster */
4807 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4808 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T[0]);
4809 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4810 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
4811 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4812 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
4813 gen_op_mov_reg_v(MO_16, R_EDX, cpu_T[0]);
4814 set_cc_op(s, CC_OP_MULW);
4815 break;
4816 default:
4817 case MO_32:
4818 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4819 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]);
4820 tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
4821 cpu_tmp2_i32, cpu_tmp3_i32);
4822 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32);
4823 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32);
4824 tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31);
4825 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4826 tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
4827 tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32);
4828 set_cc_op(s, CC_OP_MULL);
4829 break;
4830 #ifdef TARGET_X86_64
4831 case MO_64:
4832 tcg_gen_muls2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
4833 cpu_T[0], cpu_regs[R_EAX]);
4834 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4835 tcg_gen_sari_tl(cpu_cc_src, cpu_regs[R_EAX], 63);
4836 tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_regs[R_EDX]);
4837 set_cc_op(s, CC_OP_MULQ);
4838 break;
4839 #endif
4841 break;
4842 case 6: /* div */
4843 switch(ot) {
4844 case MO_8:
4845 gen_jmp_im(pc_start - s->cs_base);
4846 gen_helper_divb_AL(cpu_env, cpu_T[0]);
4847 break;
4848 case MO_16:
4849 gen_jmp_im(pc_start - s->cs_base);
4850 gen_helper_divw_AX(cpu_env, cpu_T[0]);
4851 break;
4852 default:
4853 case MO_32:
4854 gen_jmp_im(pc_start - s->cs_base);
4855 gen_helper_divl_EAX(cpu_env, cpu_T[0]);
4856 break;
4857 #ifdef TARGET_X86_64
4858 case MO_64:
4859 gen_jmp_im(pc_start - s->cs_base);
4860 gen_helper_divq_EAX(cpu_env, cpu_T[0]);
4861 break;
4862 #endif
4864 break;
4865 case 7: /* idiv */
4866 switch(ot) {
4867 case MO_8:
4868 gen_jmp_im(pc_start - s->cs_base);
4869 gen_helper_idivb_AL(cpu_env, cpu_T[0]);
4870 break;
4871 case MO_16:
4872 gen_jmp_im(pc_start - s->cs_base);
4873 gen_helper_idivw_AX(cpu_env, cpu_T[0]);
4874 break;
4875 default:
4876 case MO_32:
4877 gen_jmp_im(pc_start - s->cs_base);
4878 gen_helper_idivl_EAX(cpu_env, cpu_T[0]);
4879 break;
4880 #ifdef TARGET_X86_64
4881 case MO_64:
4882 gen_jmp_im(pc_start - s->cs_base);
4883 gen_helper_idivq_EAX(cpu_env, cpu_T[0]);
4884 break;
4885 #endif
4887 break;
4888 default:
4889 goto illegal_op;
4891 break;
4893 case 0xfe: /* GRP4 */
4894 case 0xff: /* GRP5 */
4895 ot = mo_b_d(b, dflag);
4897 modrm = cpu_ldub_code(env, s->pc++);
4898 mod = (modrm >> 6) & 3;
4899 rm = (modrm & 7) | REX_B(s);
4900 op = (modrm >> 3) & 7;
4901 if (op >= 2 && b == 0xfe) {
4902 goto illegal_op;
4904 if (CODE64(s)) {
4905 if (op == 2 || op == 4) {
4906 /* operand size for jumps is 64 bit */
4907 ot = MO_64;
4908 } else if (op == 3 || op == 5) {
4909 ot = dflag != MO_16 ? MO_32 + (rex_w == 1) : MO_16;
4910 } else if (op == 6) {
4911 /* default push size is 64 bit */
4912 ot = mo_pushpop(s, dflag);
4915 if (mod != 3) {
4916 gen_lea_modrm(env, s, modrm);
4917 if (op >= 2 && op != 3 && op != 5)
4918 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
4919 } else {
4920 gen_op_mov_v_reg(ot, cpu_T[0], rm);
4923 switch(op) {
4924 case 0: /* inc Ev */
4925 if (mod != 3)
4926 opreg = OR_TMP0;
4927 else
4928 opreg = rm;
4929 gen_inc(s, ot, opreg, 1);
4930 break;
4931 case 1: /* dec Ev */
4932 if (mod != 3)
4933 opreg = OR_TMP0;
4934 else
4935 opreg = rm;
4936 gen_inc(s, ot, opreg, -1);
4937 break;
4938 case 2: /* call Ev */
4939 /* XXX: optimize if memory (no 'and' is necessary) */
4940 if (dflag == MO_16) {
4941 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
4943 next_eip = s->pc - s->cs_base;
4944 tcg_gen_movi_tl(cpu_T[1], next_eip);
4945 gen_push_v(s, cpu_T[1]);
4946 gen_op_jmp_v(cpu_T[0]);
4947 gen_eob(s);
4948 break;
4949 case 3: /* lcall Ev */
4950 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
4951 gen_add_A0_im(s, 1 << ot);
4952 gen_op_ld_v(s, MO_16, cpu_T[0], cpu_A0);
4953 do_lcall:
4954 if (s->pe && !s->vm86) {
4955 gen_update_cc_op(s);
4956 gen_jmp_im(pc_start - s->cs_base);
4957 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4958 gen_helper_lcall_protected(cpu_env, cpu_tmp2_i32, cpu_T[1],
4959 tcg_const_i32(dflag - 1),
4960 tcg_const_i32(s->pc - pc_start));
4961 } else {
4962 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4963 gen_helper_lcall_real(cpu_env, cpu_tmp2_i32, cpu_T[1],
4964 tcg_const_i32(dflag - 1),
4965 tcg_const_i32(s->pc - s->cs_base));
4967 gen_eob(s);
4968 break;
4969 case 4: /* jmp Ev */
4970 if (dflag == MO_16) {
4971 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
4973 gen_op_jmp_v(cpu_T[0]);
4974 gen_eob(s);
4975 break;
4976 case 5: /* ljmp Ev */
4977 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
4978 gen_add_A0_im(s, 1 << ot);
4979 gen_op_ld_v(s, MO_16, cpu_T[0], cpu_A0);
4980 do_ljmp:
4981 if (s->pe && !s->vm86) {
4982 gen_update_cc_op(s);
4983 gen_jmp_im(pc_start - s->cs_base);
4984 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4985 gen_helper_ljmp_protected(cpu_env, cpu_tmp2_i32, cpu_T[1],
4986 tcg_const_i32(s->pc - pc_start));
4987 } else {
4988 gen_op_movl_seg_T0_vm(R_CS);
4989 gen_op_jmp_v(cpu_T[1]);
4991 gen_eob(s);
4992 break;
4993 case 6: /* push Ev */
4994 gen_push_v(s, cpu_T[0]);
4995 break;
4996 default:
4997 goto illegal_op;
4999 break;
5001 case 0x84: /* test Ev, Gv */
5002 case 0x85:
5003 ot = mo_b_d(b, dflag);
5005 modrm = cpu_ldub_code(env, s->pc++);
5006 reg = ((modrm >> 3) & 7) | rex_r;
5008 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
5009 gen_op_mov_v_reg(ot, cpu_T[1], reg);
5010 gen_op_testl_T0_T1_cc();
5011 set_cc_op(s, CC_OP_LOGICB + ot);
5012 break;
5014 case 0xa8: /* test eAX, Iv */
5015 case 0xa9:
5016 ot = mo_b_d(b, dflag);
5017 val = insn_get(env, s, ot);
5019 gen_op_mov_v_reg(ot, cpu_T[0], OR_EAX);
5020 tcg_gen_movi_tl(cpu_T[1], val);
5021 gen_op_testl_T0_T1_cc();
5022 set_cc_op(s, CC_OP_LOGICB + ot);
5023 break;
5025 case 0x98: /* CWDE/CBW */
5026 switch (dflag) {
5027 #ifdef TARGET_X86_64
5028 case MO_64:
5029 gen_op_mov_v_reg(MO_32, cpu_T[0], R_EAX);
5030 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
5031 gen_op_mov_reg_v(MO_64, R_EAX, cpu_T[0]);
5032 break;
5033 #endif
5034 case MO_32:
5035 gen_op_mov_v_reg(MO_16, cpu_T[0], R_EAX);
5036 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5037 gen_op_mov_reg_v(MO_32, R_EAX, cpu_T[0]);
5038 break;
5039 case MO_16:
5040 gen_op_mov_v_reg(MO_8, cpu_T[0], R_EAX);
5041 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
5042 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T[0]);
5043 break;
5044 default:
5045 tcg_abort();
5047 break;
5048 case 0x99: /* CDQ/CWD */
5049 switch (dflag) {
5050 #ifdef TARGET_X86_64
5051 case MO_64:
5052 gen_op_mov_v_reg(MO_64, cpu_T[0], R_EAX);
5053 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 63);
5054 gen_op_mov_reg_v(MO_64, R_EDX, cpu_T[0]);
5055 break;
5056 #endif
5057 case MO_32:
5058 gen_op_mov_v_reg(MO_32, cpu_T[0], R_EAX);
5059 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
5060 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 31);
5061 gen_op_mov_reg_v(MO_32, R_EDX, cpu_T[0]);
5062 break;
5063 case MO_16:
5064 gen_op_mov_v_reg(MO_16, cpu_T[0], R_EAX);
5065 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5066 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 15);
5067 gen_op_mov_reg_v(MO_16, R_EDX, cpu_T[0]);
5068 break;
5069 default:
5070 tcg_abort();
5072 break;
5073 case 0x1af: /* imul Gv, Ev */
5074 case 0x69: /* imul Gv, Ev, I */
5075 case 0x6b:
5076 ot = dflag;
5077 modrm = cpu_ldub_code(env, s->pc++);
5078 reg = ((modrm >> 3) & 7) | rex_r;
5079 if (b == 0x69)
5080 s->rip_offset = insn_const_size(ot);
5081 else if (b == 0x6b)
5082 s->rip_offset = 1;
5083 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
5084 if (b == 0x69) {
5085 val = insn_get(env, s, ot);
5086 tcg_gen_movi_tl(cpu_T[1], val);
5087 } else if (b == 0x6b) {
5088 val = (int8_t)insn_get(env, s, MO_8);
5089 tcg_gen_movi_tl(cpu_T[1], val);
5090 } else {
5091 gen_op_mov_v_reg(ot, cpu_T[1], reg);
5093 switch (ot) {
5094 #ifdef TARGET_X86_64
5095 case MO_64:
5096 tcg_gen_muls2_i64(cpu_regs[reg], cpu_T[1], cpu_T[0], cpu_T[1]);
5097 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
5098 tcg_gen_sari_tl(cpu_cc_src, cpu_cc_dst, 63);
5099 tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_T[1]);
5100 break;
5101 #endif
5102 case MO_32:
5103 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5104 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
5105 tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
5106 cpu_tmp2_i32, cpu_tmp3_i32);
5107 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
5108 tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31);
5109 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
5110 tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
5111 tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32);
5112 break;
5113 default:
5114 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5115 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
5116 /* XXX: use 32 bit mul which could be faster */
5117 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
5118 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
5119 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
5120 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
5121 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
5122 break;
5124 set_cc_op(s, CC_OP_MULB + ot);
5125 break;
5126 case 0x1c0:
5127 case 0x1c1: /* xadd Ev, Gv */
5128 ot = mo_b_d(b, dflag);
5129 modrm = cpu_ldub_code(env, s->pc++);
5130 reg = ((modrm >> 3) & 7) | rex_r;
5131 mod = (modrm >> 6) & 3;
5132 if (mod == 3) {
5133 rm = (modrm & 7) | REX_B(s);
5134 gen_op_mov_v_reg(ot, cpu_T[0], reg);
5135 gen_op_mov_v_reg(ot, cpu_T[1], rm);
5136 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
5137 gen_op_mov_reg_v(ot, reg, cpu_T[1]);
5138 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
5139 } else {
5140 gen_lea_modrm(env, s, modrm);
5141 gen_op_mov_v_reg(ot, cpu_T[0], reg);
5142 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
5143 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
5144 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
5145 gen_op_mov_reg_v(ot, reg, cpu_T[1]);
5147 gen_op_update2_cc();
5148 set_cc_op(s, CC_OP_ADDB + ot);
5149 break;
5150 case 0x1b0:
5151 case 0x1b1: /* cmpxchg Ev, Gv */
5153 TCGLabel *label1, *label2;
5154 TCGv t0, t1, t2, a0;
5156 ot = mo_b_d(b, dflag);
5157 modrm = cpu_ldub_code(env, s->pc++);
5158 reg = ((modrm >> 3) & 7) | rex_r;
5159 mod = (modrm >> 6) & 3;
5160 t0 = tcg_temp_local_new();
5161 t1 = tcg_temp_local_new();
5162 t2 = tcg_temp_local_new();
5163 a0 = tcg_temp_local_new();
5164 gen_op_mov_v_reg(ot, t1, reg);
5165 if (mod == 3) {
5166 rm = (modrm & 7) | REX_B(s);
5167 gen_op_mov_v_reg(ot, t0, rm);
5168 } else {
5169 gen_lea_modrm(env, s, modrm);
5170 tcg_gen_mov_tl(a0, cpu_A0);
5171 gen_op_ld_v(s, ot, t0, a0);
5172 rm = 0; /* avoid warning */
5174 label1 = gen_new_label();
5175 tcg_gen_mov_tl(t2, cpu_regs[R_EAX]);
5176 gen_extu(ot, t0);
5177 gen_extu(ot, t2);
5178 tcg_gen_brcond_tl(TCG_COND_EQ, t2, t0, label1);
5179 label2 = gen_new_label();
5180 if (mod == 3) {
5181 gen_op_mov_reg_v(ot, R_EAX, t0);
5182 tcg_gen_br(label2);
5183 gen_set_label(label1);
5184 gen_op_mov_reg_v(ot, rm, t1);
5185 } else {
5186 /* perform no-op store cycle like physical cpu; must be
5187 before changing accumulator to ensure idempotency if
5188 the store faults and the instruction is restarted */
5189 gen_op_st_v(s, ot, t0, a0);
5190 gen_op_mov_reg_v(ot, R_EAX, t0);
5191 tcg_gen_br(label2);
5192 gen_set_label(label1);
5193 gen_op_st_v(s, ot, t1, a0);
5195 gen_set_label(label2);
5196 tcg_gen_mov_tl(cpu_cc_src, t0);
5197 tcg_gen_mov_tl(cpu_cc_srcT, t2);
5198 tcg_gen_sub_tl(cpu_cc_dst, t2, t0);
5199 set_cc_op(s, CC_OP_SUBB + ot);
5200 tcg_temp_free(t0);
5201 tcg_temp_free(t1);
5202 tcg_temp_free(t2);
5203 tcg_temp_free(a0);
5205 break;
5206 case 0x1c7: /* cmpxchg8b */
5207 modrm = cpu_ldub_code(env, s->pc++);
5208 mod = (modrm >> 6) & 3;
5209 if ((mod == 3) || ((modrm & 0x38) != 0x8))
5210 goto illegal_op;
5211 #ifdef TARGET_X86_64
5212 if (dflag == MO_64) {
5213 if (!(s->cpuid_ext_features & CPUID_EXT_CX16))
5214 goto illegal_op;
5215 gen_jmp_im(pc_start - s->cs_base);
5216 gen_update_cc_op(s);
5217 gen_lea_modrm(env, s, modrm);
5218 gen_helper_cmpxchg16b(cpu_env, cpu_A0);
5219 } else
5220 #endif
5222 if (!(s->cpuid_features & CPUID_CX8))
5223 goto illegal_op;
5224 gen_jmp_im(pc_start - s->cs_base);
5225 gen_update_cc_op(s);
5226 gen_lea_modrm(env, s, modrm);
5227 gen_helper_cmpxchg8b(cpu_env, cpu_A0);
5229 set_cc_op(s, CC_OP_EFLAGS);
5230 break;
5232 /**************************/
5233 /* push/pop */
5234 case 0x50 ... 0x57: /* push */
5235 gen_op_mov_v_reg(MO_32, cpu_T[0], (b & 7) | REX_B(s));
5236 gen_push_v(s, cpu_T[0]);
5237 break;
5238 case 0x58 ... 0x5f: /* pop */
5239 ot = gen_pop_T0(s);
5240 /* NOTE: order is important for pop %sp */
5241 gen_pop_update(s, ot);
5242 gen_op_mov_reg_v(ot, (b & 7) | REX_B(s), cpu_T[0]);
5243 break;
5244 case 0x60: /* pusha */
5245 if (CODE64(s))
5246 goto illegal_op;
5247 gen_pusha(s);
5248 break;
5249 case 0x61: /* popa */
5250 if (CODE64(s))
5251 goto illegal_op;
5252 gen_popa(s);
5253 break;
5254 case 0x68: /* push Iv */
5255 case 0x6a:
5256 ot = mo_pushpop(s, dflag);
5257 if (b == 0x68)
5258 val = insn_get(env, s, ot);
5259 else
5260 val = (int8_t)insn_get(env, s, MO_8);
5261 tcg_gen_movi_tl(cpu_T[0], val);
5262 gen_push_v(s, cpu_T[0]);
5263 break;
5264 case 0x8f: /* pop Ev */
5265 modrm = cpu_ldub_code(env, s->pc++);
5266 mod = (modrm >> 6) & 3;
5267 ot = gen_pop_T0(s);
5268 if (mod == 3) {
5269 /* NOTE: order is important for pop %sp */
5270 gen_pop_update(s, ot);
5271 rm = (modrm & 7) | REX_B(s);
5272 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
5273 } else {
5274 /* NOTE: order is important too for MMU exceptions */
5275 s->popl_esp_hack = 1 << ot;
5276 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
5277 s->popl_esp_hack = 0;
5278 gen_pop_update(s, ot);
5280 break;
5281 case 0xc8: /* enter */
5283 int level;
5284 val = cpu_lduw_code(env, s->pc);
5285 s->pc += 2;
5286 level = cpu_ldub_code(env, s->pc++);
5287 gen_enter(s, val, level);
5289 break;
5290 case 0xc9: /* leave */
5291 /* XXX: exception not precise (ESP is updated before potential exception) */
5292 if (CODE64(s)) {
5293 gen_op_mov_v_reg(MO_64, cpu_T[0], R_EBP);
5294 gen_op_mov_reg_v(MO_64, R_ESP, cpu_T[0]);
5295 } else if (s->ss32) {
5296 gen_op_mov_v_reg(MO_32, cpu_T[0], R_EBP);
5297 gen_op_mov_reg_v(MO_32, R_ESP, cpu_T[0]);
5298 } else {
5299 gen_op_mov_v_reg(MO_16, cpu_T[0], R_EBP);
5300 gen_op_mov_reg_v(MO_16, R_ESP, cpu_T[0]);
5302 ot = gen_pop_T0(s);
5303 gen_op_mov_reg_v(ot, R_EBP, cpu_T[0]);
5304 gen_pop_update(s, ot);
5305 break;
5306 case 0x06: /* push es */
5307 case 0x0e: /* push cs */
5308 case 0x16: /* push ss */
5309 case 0x1e: /* push ds */
5310 if (CODE64(s))
5311 goto illegal_op;
5312 gen_op_movl_T0_seg(b >> 3);
5313 gen_push_v(s, cpu_T[0]);
5314 break;
5315 case 0x1a0: /* push fs */
5316 case 0x1a8: /* push gs */
5317 gen_op_movl_T0_seg((b >> 3) & 7);
5318 gen_push_v(s, cpu_T[0]);
5319 break;
5320 case 0x07: /* pop es */
5321 case 0x17: /* pop ss */
5322 case 0x1f: /* pop ds */
5323 if (CODE64(s))
5324 goto illegal_op;
5325 reg = b >> 3;
5326 ot = gen_pop_T0(s);
5327 gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
5328 gen_pop_update(s, ot);
5329 if (reg == R_SS) {
5330 /* if reg == SS, inhibit interrupts/trace. */
5331 /* If several instructions disable interrupts, only the
5332 _first_ does it */
5333 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
5334 gen_helper_set_inhibit_irq(cpu_env);
5335 s->tf = 0;
5337 if (s->is_jmp) {
5338 gen_jmp_im(s->pc - s->cs_base);
5339 gen_eob(s);
5341 break;
5342 case 0x1a1: /* pop fs */
5343 case 0x1a9: /* pop gs */
5344 ot = gen_pop_T0(s);
5345 gen_movl_seg_T0(s, (b >> 3) & 7, pc_start - s->cs_base);
5346 gen_pop_update(s, ot);
5347 if (s->is_jmp) {
5348 gen_jmp_im(s->pc - s->cs_base);
5349 gen_eob(s);
5351 break;
5353 /**************************/
5354 /* mov */
5355 case 0x88:
5356 case 0x89: /* mov Gv, Ev */
5357 ot = mo_b_d(b, dflag);
5358 modrm = cpu_ldub_code(env, s->pc++);
5359 reg = ((modrm >> 3) & 7) | rex_r;
5361 /* generate a generic store */
5362 gen_ldst_modrm(env, s, modrm, ot, reg, 1);
5363 break;
5364 case 0xc6:
5365 case 0xc7: /* mov Ev, Iv */
5366 ot = mo_b_d(b, dflag);
5367 modrm = cpu_ldub_code(env, s->pc++);
5368 mod = (modrm >> 6) & 3;
5369 if (mod != 3) {
5370 s->rip_offset = insn_const_size(ot);
5371 gen_lea_modrm(env, s, modrm);
5373 val = insn_get(env, s, ot);
5374 tcg_gen_movi_tl(cpu_T[0], val);
5375 if (mod != 3) {
5376 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
5377 } else {
5378 gen_op_mov_reg_v(ot, (modrm & 7) | REX_B(s), cpu_T[0]);
5380 break;
5381 case 0x8a:
5382 case 0x8b: /* mov Ev, Gv */
5383 ot = mo_b_d(b, dflag);
5384 modrm = cpu_ldub_code(env, s->pc++);
5385 reg = ((modrm >> 3) & 7) | rex_r;
5387 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
5388 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
5389 break;
5390 case 0x8e: /* mov seg, Gv */
5391 modrm = cpu_ldub_code(env, s->pc++);
5392 reg = (modrm >> 3) & 7;
5393 if (reg >= 6 || reg == R_CS)
5394 goto illegal_op;
5395 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
5396 gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
5397 if (reg == R_SS) {
5398 /* if reg == SS, inhibit interrupts/trace */
5399 /* If several instructions disable interrupts, only the
5400 _first_ does it */
5401 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
5402 gen_helper_set_inhibit_irq(cpu_env);
5403 s->tf = 0;
5405 if (s->is_jmp) {
5406 gen_jmp_im(s->pc - s->cs_base);
5407 gen_eob(s);
5409 break;
5410 case 0x8c: /* mov Gv, seg */
5411 modrm = cpu_ldub_code(env, s->pc++);
5412 reg = (modrm >> 3) & 7;
5413 mod = (modrm >> 6) & 3;
5414 if (reg >= 6)
5415 goto illegal_op;
5416 gen_op_movl_T0_seg(reg);
5417 ot = mod == 3 ? dflag : MO_16;
5418 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
5419 break;
5421 case 0x1b6: /* movzbS Gv, Eb */
5422 case 0x1b7: /* movzwS Gv, Eb */
5423 case 0x1be: /* movsbS Gv, Eb */
5424 case 0x1bf: /* movswS Gv, Eb */
5426 TCGMemOp d_ot;
5427 TCGMemOp s_ot;
5429 /* d_ot is the size of destination */
5430 d_ot = dflag;
5431 /* ot is the size of source */
5432 ot = (b & 1) + MO_8;
5433 /* s_ot is the sign+size of source */
5434 s_ot = b & 8 ? MO_SIGN | ot : ot;
5436 modrm = cpu_ldub_code(env, s->pc++);
5437 reg = ((modrm >> 3) & 7) | rex_r;
5438 mod = (modrm >> 6) & 3;
5439 rm = (modrm & 7) | REX_B(s);
5441 if (mod == 3) {
5442 gen_op_mov_v_reg(ot, cpu_T[0], rm);
5443 switch (s_ot) {
5444 case MO_UB:
5445 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
5446 break;
5447 case MO_SB:
5448 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
5449 break;
5450 case MO_UW:
5451 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
5452 break;
5453 default:
5454 case MO_SW:
5455 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5456 break;
5458 gen_op_mov_reg_v(d_ot, reg, cpu_T[0]);
5459 } else {
5460 gen_lea_modrm(env, s, modrm);
5461 gen_op_ld_v(s, s_ot, cpu_T[0], cpu_A0);
5462 gen_op_mov_reg_v(d_ot, reg, cpu_T[0]);
5465 break;
5467 case 0x8d: /* lea */
5468 ot = dflag;
5469 modrm = cpu_ldub_code(env, s->pc++);
5470 mod = (modrm >> 6) & 3;
5471 if (mod == 3)
5472 goto illegal_op;
5473 reg = ((modrm >> 3) & 7) | rex_r;
5474 /* we must ensure that no segment is added */
5475 s->override = -1;
5476 val = s->addseg;
5477 s->addseg = 0;
5478 gen_lea_modrm(env, s, modrm);
5479 s->addseg = val;
5480 gen_op_mov_reg_v(ot, reg, cpu_A0);
5481 break;
5483 case 0xa0: /* mov EAX, Ov */
5484 case 0xa1:
5485 case 0xa2: /* mov Ov, EAX */
5486 case 0xa3:
5488 target_ulong offset_addr;
5490 ot = mo_b_d(b, dflag);
5491 switch (s->aflag) {
5492 #ifdef TARGET_X86_64
5493 case MO_64:
5494 offset_addr = cpu_ldq_code(env, s->pc);
5495 s->pc += 8;
5496 break;
5497 #endif
5498 default:
5499 offset_addr = insn_get(env, s, s->aflag);
5500 break;
5502 tcg_gen_movi_tl(cpu_A0, offset_addr);
5503 gen_add_A0_ds_seg(s);
5504 if ((b & 2) == 0) {
5505 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
5506 gen_op_mov_reg_v(ot, R_EAX, cpu_T[0]);
5507 } else {
5508 gen_op_mov_v_reg(ot, cpu_T[0], R_EAX);
5509 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
5512 break;
5513 case 0xd7: /* xlat */
5514 tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EBX]);
5515 tcg_gen_ext8u_tl(cpu_T[0], cpu_regs[R_EAX]);
5516 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]);
5517 gen_extu(s->aflag, cpu_A0);
5518 gen_add_A0_ds_seg(s);
5519 gen_op_ld_v(s, MO_8, cpu_T[0], cpu_A0);
5520 gen_op_mov_reg_v(MO_8, R_EAX, cpu_T[0]);
5521 break;
5522 case 0xb0 ... 0xb7: /* mov R, Ib */
5523 val = insn_get(env, s, MO_8);
5524 tcg_gen_movi_tl(cpu_T[0], val);
5525 gen_op_mov_reg_v(MO_8, (b & 7) | REX_B(s), cpu_T[0]);
5526 break;
5527 case 0xb8 ... 0xbf: /* mov R, Iv */
5528 #ifdef TARGET_X86_64
5529 if (dflag == MO_64) {
5530 uint64_t tmp;
5531 /* 64 bit case */
5532 tmp = cpu_ldq_code(env, s->pc);
5533 s->pc += 8;
5534 reg = (b & 7) | REX_B(s);
5535 tcg_gen_movi_tl(cpu_T[0], tmp);
5536 gen_op_mov_reg_v(MO_64, reg, cpu_T[0]);
5537 } else
5538 #endif
5540 ot = dflag;
5541 val = insn_get(env, s, ot);
5542 reg = (b & 7) | REX_B(s);
5543 tcg_gen_movi_tl(cpu_T[0], val);
5544 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
5546 break;
5548 case 0x91 ... 0x97: /* xchg R, EAX */
5549 do_xchg_reg_eax:
5550 ot = dflag;
5551 reg = (b & 7) | REX_B(s);
5552 rm = R_EAX;
5553 goto do_xchg_reg;
5554 case 0x86:
5555 case 0x87: /* xchg Ev, Gv */
5556 ot = mo_b_d(b, dflag);
5557 modrm = cpu_ldub_code(env, s->pc++);
5558 reg = ((modrm >> 3) & 7) | rex_r;
5559 mod = (modrm >> 6) & 3;
5560 if (mod == 3) {
5561 rm = (modrm & 7) | REX_B(s);
5562 do_xchg_reg:
5563 gen_op_mov_v_reg(ot, cpu_T[0], reg);
5564 gen_op_mov_v_reg(ot, cpu_T[1], rm);
5565 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
5566 gen_op_mov_reg_v(ot, reg, cpu_T[1]);
5567 } else {
5568 gen_lea_modrm(env, s, modrm);
5569 gen_op_mov_v_reg(ot, cpu_T[0], reg);
5570 /* for xchg, lock is implicit */
5571 if (!(prefixes & PREFIX_LOCK))
5572 gen_helper_lock();
5573 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
5574 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
5575 if (!(prefixes & PREFIX_LOCK))
5576 gen_helper_unlock();
5577 gen_op_mov_reg_v(ot, reg, cpu_T[1]);
5579 break;
5580 case 0xc4: /* les Gv */
5581 /* In CODE64 this is VEX3; see above. */
5582 op = R_ES;
5583 goto do_lxx;
5584 case 0xc5: /* lds Gv */
5585 /* In CODE64 this is VEX2; see above. */
5586 op = R_DS;
5587 goto do_lxx;
5588 case 0x1b2: /* lss Gv */
5589 op = R_SS;
5590 goto do_lxx;
5591 case 0x1b4: /* lfs Gv */
5592 op = R_FS;
5593 goto do_lxx;
5594 case 0x1b5: /* lgs Gv */
5595 op = R_GS;
5596 do_lxx:
5597 ot = dflag != MO_16 ? MO_32 : MO_16;
5598 modrm = cpu_ldub_code(env, s->pc++);
5599 reg = ((modrm >> 3) & 7) | rex_r;
5600 mod = (modrm >> 6) & 3;
5601 if (mod == 3)
5602 goto illegal_op;
5603 gen_lea_modrm(env, s, modrm);
5604 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
5605 gen_add_A0_im(s, 1 << ot);
5606 /* load the segment first to handle exceptions properly */
5607 gen_op_ld_v(s, MO_16, cpu_T[0], cpu_A0);
5608 gen_movl_seg_T0(s, op, pc_start - s->cs_base);
5609 /* then put the data */
5610 gen_op_mov_reg_v(ot, reg, cpu_T[1]);
5611 if (s->is_jmp) {
5612 gen_jmp_im(s->pc - s->cs_base);
5613 gen_eob(s);
5615 break;
5617 /************************/
5618 /* shifts */
5619 case 0xc0:
5620 case 0xc1:
5621 /* shift Ev,Ib */
5622 shift = 2;
5623 grp2:
5625 ot = mo_b_d(b, dflag);
5626 modrm = cpu_ldub_code(env, s->pc++);
5627 mod = (modrm >> 6) & 3;
5628 op = (modrm >> 3) & 7;
5630 if (mod != 3) {
5631 if (shift == 2) {
5632 s->rip_offset = 1;
5634 gen_lea_modrm(env, s, modrm);
5635 opreg = OR_TMP0;
5636 } else {
5637 opreg = (modrm & 7) | REX_B(s);
5640 /* simpler op */
5641 if (shift == 0) {
5642 gen_shift(s, op, ot, opreg, OR_ECX);
5643 } else {
5644 if (shift == 2) {
5645 shift = cpu_ldub_code(env, s->pc++);
5647 gen_shifti(s, op, ot, opreg, shift);
5650 break;
5651 case 0xd0:
5652 case 0xd1:
5653 /* shift Ev,1 */
5654 shift = 1;
5655 goto grp2;
5656 case 0xd2:
5657 case 0xd3:
5658 /* shift Ev,cl */
5659 shift = 0;
5660 goto grp2;
5662 case 0x1a4: /* shld imm */
5663 op = 0;
5664 shift = 1;
5665 goto do_shiftd;
5666 case 0x1a5: /* shld cl */
5667 op = 0;
5668 shift = 0;
5669 goto do_shiftd;
5670 case 0x1ac: /* shrd imm */
5671 op = 1;
5672 shift = 1;
5673 goto do_shiftd;
5674 case 0x1ad: /* shrd cl */
5675 op = 1;
5676 shift = 0;
5677 do_shiftd:
5678 ot = dflag;
5679 modrm = cpu_ldub_code(env, s->pc++);
5680 mod = (modrm >> 6) & 3;
5681 rm = (modrm & 7) | REX_B(s);
5682 reg = ((modrm >> 3) & 7) | rex_r;
5683 if (mod != 3) {
5684 gen_lea_modrm(env, s, modrm);
5685 opreg = OR_TMP0;
5686 } else {
5687 opreg = rm;
5689 gen_op_mov_v_reg(ot, cpu_T[1], reg);
5691 if (shift) {
5692 TCGv imm = tcg_const_tl(cpu_ldub_code(env, s->pc++));
5693 gen_shiftd_rm_T1(s, ot, opreg, op, imm);
5694 tcg_temp_free(imm);
5695 } else {
5696 gen_shiftd_rm_T1(s, ot, opreg, op, cpu_regs[R_ECX]);
5698 break;
5700 /************************/
5701 /* floats */
5702 case 0xd8 ... 0xdf:
5703 if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
5704 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
5705 /* XXX: what to do if illegal op ? */
5706 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
5707 break;
5709 modrm = cpu_ldub_code(env, s->pc++);
5710 mod = (modrm >> 6) & 3;
5711 rm = modrm & 7;
5712 op = ((b & 7) << 3) | ((modrm >> 3) & 7);
5713 if (mod != 3) {
5714 /* memory op */
5715 gen_lea_modrm(env, s, modrm);
5716 switch(op) {
5717 case 0x00 ... 0x07: /* fxxxs */
5718 case 0x10 ... 0x17: /* fixxxl */
5719 case 0x20 ... 0x27: /* fxxxl */
5720 case 0x30 ... 0x37: /* fixxx */
5722 int op1;
5723 op1 = op & 7;
5725 switch(op >> 4) {
5726 case 0:
5727 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5728 s->mem_index, MO_LEUL);
5729 gen_helper_flds_FT0(cpu_env, cpu_tmp2_i32);
5730 break;
5731 case 1:
5732 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5733 s->mem_index, MO_LEUL);
5734 gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
5735 break;
5736 case 2:
5737 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
5738 s->mem_index, MO_LEQ);
5739 gen_helper_fldl_FT0(cpu_env, cpu_tmp1_i64);
5740 break;
5741 case 3:
5742 default:
5743 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5744 s->mem_index, MO_LESW);
5745 gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
5746 break;
5749 gen_helper_fp_arith_ST0_FT0(op1);
5750 if (op1 == 3) {
5751 /* fcomp needs pop */
5752 gen_helper_fpop(cpu_env);
5755 break;
5756 case 0x08: /* flds */
5757 case 0x0a: /* fsts */
5758 case 0x0b: /* fstps */
5759 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
5760 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
5761 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
5762 switch(op & 7) {
5763 case 0:
5764 switch(op >> 4) {
5765 case 0:
5766 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5767 s->mem_index, MO_LEUL);
5768 gen_helper_flds_ST0(cpu_env, cpu_tmp2_i32);
5769 break;
5770 case 1:
5771 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5772 s->mem_index, MO_LEUL);
5773 gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
5774 break;
5775 case 2:
5776 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
5777 s->mem_index, MO_LEQ);
5778 gen_helper_fldl_ST0(cpu_env, cpu_tmp1_i64);
5779 break;
5780 case 3:
5781 default:
5782 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5783 s->mem_index, MO_LESW);
5784 gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
5785 break;
5787 break;
5788 case 1:
5789 /* XXX: the corresponding CPUID bit must be tested ! */
5790 switch(op >> 4) {
5791 case 1:
5792 gen_helper_fisttl_ST0(cpu_tmp2_i32, cpu_env);
5793 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5794 s->mem_index, MO_LEUL);
5795 break;
5796 case 2:
5797 gen_helper_fisttll_ST0(cpu_tmp1_i64, cpu_env);
5798 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
5799 s->mem_index, MO_LEQ);
5800 break;
5801 case 3:
5802 default:
5803 gen_helper_fistt_ST0(cpu_tmp2_i32, cpu_env);
5804 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5805 s->mem_index, MO_LEUW);
5806 break;
5808 gen_helper_fpop(cpu_env);
5809 break;
5810 default:
5811 switch(op >> 4) {
5812 case 0:
5813 gen_helper_fsts_ST0(cpu_tmp2_i32, cpu_env);
5814 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5815 s->mem_index, MO_LEUL);
5816 break;
5817 case 1:
5818 gen_helper_fistl_ST0(cpu_tmp2_i32, cpu_env);
5819 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5820 s->mem_index, MO_LEUL);
5821 break;
5822 case 2:
5823 gen_helper_fstl_ST0(cpu_tmp1_i64, cpu_env);
5824 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
5825 s->mem_index, MO_LEQ);
5826 break;
5827 case 3:
5828 default:
5829 gen_helper_fist_ST0(cpu_tmp2_i32, cpu_env);
5830 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5831 s->mem_index, MO_LEUW);
5832 break;
5834 if ((op & 7) == 3)
5835 gen_helper_fpop(cpu_env);
5836 break;
5838 break;
5839 case 0x0c: /* fldenv mem */
5840 gen_update_cc_op(s);
5841 gen_jmp_im(pc_start - s->cs_base);
5842 gen_helper_fldenv(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
5843 break;
5844 case 0x0d: /* fldcw mem */
5845 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5846 s->mem_index, MO_LEUW);
5847 gen_helper_fldcw(cpu_env, cpu_tmp2_i32);
5848 break;
5849 case 0x0e: /* fnstenv mem */
5850 gen_update_cc_op(s);
5851 gen_jmp_im(pc_start - s->cs_base);
5852 gen_helper_fstenv(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
5853 break;
5854 case 0x0f: /* fnstcw mem */
5855 gen_helper_fnstcw(cpu_tmp2_i32, cpu_env);
5856 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5857 s->mem_index, MO_LEUW);
5858 break;
5859 case 0x1d: /* fldt mem */
5860 gen_update_cc_op(s);
5861 gen_jmp_im(pc_start - s->cs_base);
5862 gen_helper_fldt_ST0(cpu_env, cpu_A0);
5863 break;
5864 case 0x1f: /* fstpt mem */
5865 gen_update_cc_op(s);
5866 gen_jmp_im(pc_start - s->cs_base);
5867 gen_helper_fstt_ST0(cpu_env, cpu_A0);
5868 gen_helper_fpop(cpu_env);
5869 break;
5870 case 0x2c: /* frstor mem */
5871 gen_update_cc_op(s);
5872 gen_jmp_im(pc_start - s->cs_base);
5873 gen_helper_frstor(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
5874 break;
5875 case 0x2e: /* fnsave mem */
5876 gen_update_cc_op(s);
5877 gen_jmp_im(pc_start - s->cs_base);
5878 gen_helper_fsave(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
5879 break;
5880 case 0x2f: /* fnstsw mem */
5881 gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
5882 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5883 s->mem_index, MO_LEUW);
5884 break;
5885 case 0x3c: /* fbld */
5886 gen_update_cc_op(s);
5887 gen_jmp_im(pc_start - s->cs_base);
5888 gen_helper_fbld_ST0(cpu_env, cpu_A0);
5889 break;
5890 case 0x3e: /* fbstp */
5891 gen_update_cc_op(s);
5892 gen_jmp_im(pc_start - s->cs_base);
5893 gen_helper_fbst_ST0(cpu_env, cpu_A0);
5894 gen_helper_fpop(cpu_env);
5895 break;
5896 case 0x3d: /* fildll */
5897 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
5898 gen_helper_fildll_ST0(cpu_env, cpu_tmp1_i64);
5899 break;
5900 case 0x3f: /* fistpll */
5901 gen_helper_fistll_ST0(cpu_tmp1_i64, cpu_env);
5902 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
5903 gen_helper_fpop(cpu_env);
5904 break;
5905 default:
5906 goto illegal_op;
5908 } else {
5909 /* register float ops */
5910 opreg = rm;
5912 switch(op) {
5913 case 0x08: /* fld sti */
5914 gen_helper_fpush(cpu_env);
5915 gen_helper_fmov_ST0_STN(cpu_env,
5916 tcg_const_i32((opreg + 1) & 7));
5917 break;
5918 case 0x09: /* fxchg sti */
5919 case 0x29: /* fxchg4 sti, undocumented op */
5920 case 0x39: /* fxchg7 sti, undocumented op */
5921 gen_helper_fxchg_ST0_STN(cpu_env, tcg_const_i32(opreg));
5922 break;
5923 case 0x0a: /* grp d9/2 */
5924 switch(rm) {
5925 case 0: /* fnop */
5926 /* check exceptions (FreeBSD FPU probe) */
5927 gen_update_cc_op(s);
5928 gen_jmp_im(pc_start - s->cs_base);
5929 gen_helper_fwait(cpu_env);
5930 break;
5931 default:
5932 goto illegal_op;
5934 break;
5935 case 0x0c: /* grp d9/4 */
5936 switch(rm) {
5937 case 0: /* fchs */
5938 gen_helper_fchs_ST0(cpu_env);
5939 break;
5940 case 1: /* fabs */
5941 gen_helper_fabs_ST0(cpu_env);
5942 break;
5943 case 4: /* ftst */
5944 gen_helper_fldz_FT0(cpu_env);
5945 gen_helper_fcom_ST0_FT0(cpu_env);
5946 break;
5947 case 5: /* fxam */
5948 gen_helper_fxam_ST0(cpu_env);
5949 break;
5950 default:
5951 goto illegal_op;
5953 break;
5954 case 0x0d: /* grp d9/5 */
5956 switch(rm) {
5957 case 0:
5958 gen_helper_fpush(cpu_env);
5959 gen_helper_fld1_ST0(cpu_env);
5960 break;
5961 case 1:
5962 gen_helper_fpush(cpu_env);
5963 gen_helper_fldl2t_ST0(cpu_env);
5964 break;
5965 case 2:
5966 gen_helper_fpush(cpu_env);
5967 gen_helper_fldl2e_ST0(cpu_env);
5968 break;
5969 case 3:
5970 gen_helper_fpush(cpu_env);
5971 gen_helper_fldpi_ST0(cpu_env);
5972 break;
5973 case 4:
5974 gen_helper_fpush(cpu_env);
5975 gen_helper_fldlg2_ST0(cpu_env);
5976 break;
5977 case 5:
5978 gen_helper_fpush(cpu_env);
5979 gen_helper_fldln2_ST0(cpu_env);
5980 break;
5981 case 6:
5982 gen_helper_fpush(cpu_env);
5983 gen_helper_fldz_ST0(cpu_env);
5984 break;
5985 default:
5986 goto illegal_op;
5989 break;
5990 case 0x0e: /* grp d9/6 */
5991 switch(rm) {
5992 case 0: /* f2xm1 */
5993 gen_helper_f2xm1(cpu_env);
5994 break;
5995 case 1: /* fyl2x */
5996 gen_helper_fyl2x(cpu_env);
5997 break;
5998 case 2: /* fptan */
5999 gen_helper_fptan(cpu_env);
6000 break;
6001 case 3: /* fpatan */
6002 gen_helper_fpatan(cpu_env);
6003 break;
6004 case 4: /* fxtract */
6005 gen_helper_fxtract(cpu_env);
6006 break;
6007 case 5: /* fprem1 */
6008 gen_helper_fprem1(cpu_env);
6009 break;
6010 case 6: /* fdecstp */
6011 gen_helper_fdecstp(cpu_env);
6012 break;
6013 default:
6014 case 7: /* fincstp */
6015 gen_helper_fincstp(cpu_env);
6016 break;
6018 break;
6019 case 0x0f: /* grp d9/7 */
6020 switch(rm) {
6021 case 0: /* fprem */
6022 gen_helper_fprem(cpu_env);
6023 break;
6024 case 1: /* fyl2xp1 */
6025 gen_helper_fyl2xp1(cpu_env);
6026 break;
6027 case 2: /* fsqrt */
6028 gen_helper_fsqrt(cpu_env);
6029 break;
6030 case 3: /* fsincos */
6031 gen_helper_fsincos(cpu_env);
6032 break;
6033 case 5: /* fscale */
6034 gen_helper_fscale(cpu_env);
6035 break;
6036 case 4: /* frndint */
6037 gen_helper_frndint(cpu_env);
6038 break;
6039 case 6: /* fsin */
6040 gen_helper_fsin(cpu_env);
6041 break;
6042 default:
6043 case 7: /* fcos */
6044 gen_helper_fcos(cpu_env);
6045 break;
6047 break;
6048 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
6049 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
6050 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
6052 int op1;
6054 op1 = op & 7;
6055 if (op >= 0x20) {
6056 gen_helper_fp_arith_STN_ST0(op1, opreg);
6057 if (op >= 0x30)
6058 gen_helper_fpop(cpu_env);
6059 } else {
6060 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6061 gen_helper_fp_arith_ST0_FT0(op1);
6064 break;
6065 case 0x02: /* fcom */
6066 case 0x22: /* fcom2, undocumented op */
6067 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6068 gen_helper_fcom_ST0_FT0(cpu_env);
6069 break;
6070 case 0x03: /* fcomp */
6071 case 0x23: /* fcomp3, undocumented op */
6072 case 0x32: /* fcomp5, undocumented op */
6073 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6074 gen_helper_fcom_ST0_FT0(cpu_env);
6075 gen_helper_fpop(cpu_env);
6076 break;
6077 case 0x15: /* da/5 */
6078 switch(rm) {
6079 case 1: /* fucompp */
6080 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
6081 gen_helper_fucom_ST0_FT0(cpu_env);
6082 gen_helper_fpop(cpu_env);
6083 gen_helper_fpop(cpu_env);
6084 break;
6085 default:
6086 goto illegal_op;
6088 break;
6089 case 0x1c:
6090 switch(rm) {
6091 case 0: /* feni (287 only, just do nop here) */
6092 break;
6093 case 1: /* fdisi (287 only, just do nop here) */
6094 break;
6095 case 2: /* fclex */
6096 gen_helper_fclex(cpu_env);
6097 break;
6098 case 3: /* fninit */
6099 gen_helper_fninit(cpu_env);
6100 break;
6101 case 4: /* fsetpm (287 only, just do nop here) */
6102 break;
6103 default:
6104 goto illegal_op;
6106 break;
6107 case 0x1d: /* fucomi */
6108 if (!(s->cpuid_features & CPUID_CMOV)) {
6109 goto illegal_op;
6111 gen_update_cc_op(s);
6112 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6113 gen_helper_fucomi_ST0_FT0(cpu_env);
6114 set_cc_op(s, CC_OP_EFLAGS);
6115 break;
6116 case 0x1e: /* fcomi */
6117 if (!(s->cpuid_features & CPUID_CMOV)) {
6118 goto illegal_op;
6120 gen_update_cc_op(s);
6121 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6122 gen_helper_fcomi_ST0_FT0(cpu_env);
6123 set_cc_op(s, CC_OP_EFLAGS);
6124 break;
6125 case 0x28: /* ffree sti */
6126 gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
6127 break;
6128 case 0x2a: /* fst sti */
6129 gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
6130 break;
6131 case 0x2b: /* fstp sti */
6132 case 0x0b: /* fstp1 sti, undocumented op */
6133 case 0x3a: /* fstp8 sti, undocumented op */
6134 case 0x3b: /* fstp9 sti, undocumented op */
6135 gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
6136 gen_helper_fpop(cpu_env);
6137 break;
6138 case 0x2c: /* fucom st(i) */
6139 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6140 gen_helper_fucom_ST0_FT0(cpu_env);
6141 break;
6142 case 0x2d: /* fucomp st(i) */
6143 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6144 gen_helper_fucom_ST0_FT0(cpu_env);
6145 gen_helper_fpop(cpu_env);
6146 break;
6147 case 0x33: /* de/3 */
6148 switch(rm) {
6149 case 1: /* fcompp */
6150 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
6151 gen_helper_fcom_ST0_FT0(cpu_env);
6152 gen_helper_fpop(cpu_env);
6153 gen_helper_fpop(cpu_env);
6154 break;
6155 default:
6156 goto illegal_op;
6158 break;
6159 case 0x38: /* ffreep sti, undocumented op */
6160 gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
6161 gen_helper_fpop(cpu_env);
6162 break;
6163 case 0x3c: /* df/4 */
6164 switch(rm) {
6165 case 0:
6166 gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
6167 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
6168 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T[0]);
6169 break;
6170 default:
6171 goto illegal_op;
6173 break;
6174 case 0x3d: /* fucomip */
6175 if (!(s->cpuid_features & CPUID_CMOV)) {
6176 goto illegal_op;
6178 gen_update_cc_op(s);
6179 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6180 gen_helper_fucomi_ST0_FT0(cpu_env);
6181 gen_helper_fpop(cpu_env);
6182 set_cc_op(s, CC_OP_EFLAGS);
6183 break;
6184 case 0x3e: /* fcomip */
6185 if (!(s->cpuid_features & CPUID_CMOV)) {
6186 goto illegal_op;
6188 gen_update_cc_op(s);
6189 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6190 gen_helper_fcomi_ST0_FT0(cpu_env);
6191 gen_helper_fpop(cpu_env);
6192 set_cc_op(s, CC_OP_EFLAGS);
6193 break;
6194 case 0x10 ... 0x13: /* fcmovxx */
6195 case 0x18 ... 0x1b:
6197 int op1;
6198 TCGLabel *l1;
6199 static const uint8_t fcmov_cc[8] = {
6200 (JCC_B << 1),
6201 (JCC_Z << 1),
6202 (JCC_BE << 1),
6203 (JCC_P << 1),
6206 if (!(s->cpuid_features & CPUID_CMOV)) {
6207 goto illegal_op;
6209 op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
6210 l1 = gen_new_label();
6211 gen_jcc1_noeob(s, op1, l1);
6212 gen_helper_fmov_ST0_STN(cpu_env, tcg_const_i32(opreg));
6213 gen_set_label(l1);
6215 break;
6216 default:
6217 goto illegal_op;
6220 break;
6221 /************************/
6222 /* string ops */
6224 case 0xa4: /* movsS */
6225 case 0xa5:
6226 ot = mo_b_d(b, dflag);
6227 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6228 gen_repz_movs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6229 } else {
6230 gen_movs(s, ot);
6232 break;
6234 case 0xaa: /* stosS */
6235 case 0xab:
6236 ot = mo_b_d(b, dflag);
6237 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6238 gen_repz_stos(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6239 } else {
6240 gen_stos(s, ot);
6242 break;
6243 case 0xac: /* lodsS */
6244 case 0xad:
6245 ot = mo_b_d(b, dflag);
6246 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6247 gen_repz_lods(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6248 } else {
6249 gen_lods(s, ot);
6251 break;
6252 case 0xae: /* scasS */
6253 case 0xaf:
6254 ot = mo_b_d(b, dflag);
6255 if (prefixes & PREFIX_REPNZ) {
6256 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6257 } else if (prefixes & PREFIX_REPZ) {
6258 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6259 } else {
6260 gen_scas(s, ot);
6262 break;
6264 case 0xa6: /* cmpsS */
6265 case 0xa7:
6266 ot = mo_b_d(b, dflag);
6267 if (prefixes & PREFIX_REPNZ) {
6268 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6269 } else if (prefixes & PREFIX_REPZ) {
6270 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6271 } else {
6272 gen_cmps(s, ot);
6274 break;
6275 case 0x6c: /* insS */
6276 case 0x6d:
6277 ot = mo_b_d32(b, dflag);
6278 tcg_gen_ext16u_tl(cpu_T[0], cpu_regs[R_EDX]);
6279 gen_check_io(s, ot, pc_start - s->cs_base,
6280 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4);
6281 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6282 gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6283 } else {
6284 gen_ins(s, ot);
6285 if (s->tb->cflags & CF_USE_ICOUNT) {
6286 gen_jmp(s, s->pc - s->cs_base);
6289 break;
6290 case 0x6e: /* outsS */
6291 case 0x6f:
6292 ot = mo_b_d32(b, dflag);
6293 tcg_gen_ext16u_tl(cpu_T[0], cpu_regs[R_EDX]);
6294 gen_check_io(s, ot, pc_start - s->cs_base,
6295 svm_is_rep(prefixes) | 4);
6296 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6297 gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6298 } else {
6299 gen_outs(s, ot);
6300 if (s->tb->cflags & CF_USE_ICOUNT) {
6301 gen_jmp(s, s->pc - s->cs_base);
6304 break;
6306 /************************/
6307 /* port I/O */
6309 case 0xe4:
6310 case 0xe5:
6311 ot = mo_b_d32(b, dflag);
6312 val = cpu_ldub_code(env, s->pc++);
6313 tcg_gen_movi_tl(cpu_T[0], val);
6314 gen_check_io(s, ot, pc_start - s->cs_base,
6315 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
6316 if (s->tb->cflags & CF_USE_ICOUNT) {
6317 gen_io_start();
6319 tcg_gen_movi_i32(cpu_tmp2_i32, val);
6320 gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
6321 gen_op_mov_reg_v(ot, R_EAX, cpu_T[1]);
6322 if (s->tb->cflags & CF_USE_ICOUNT) {
6323 gen_io_end();
6324 gen_jmp(s, s->pc - s->cs_base);
6326 break;
6327 case 0xe6:
6328 case 0xe7:
6329 ot = mo_b_d32(b, dflag);
6330 val = cpu_ldub_code(env, s->pc++);
6331 tcg_gen_movi_tl(cpu_T[0], val);
6332 gen_check_io(s, ot, pc_start - s->cs_base,
6333 svm_is_rep(prefixes));
6334 gen_op_mov_v_reg(ot, cpu_T[1], R_EAX);
6336 if (s->tb->cflags & CF_USE_ICOUNT) {
6337 gen_io_start();
6339 tcg_gen_movi_i32(cpu_tmp2_i32, val);
6340 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
6341 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
6342 if (s->tb->cflags & CF_USE_ICOUNT) {
6343 gen_io_end();
6344 gen_jmp(s, s->pc - s->cs_base);
6346 break;
6347 case 0xec:
6348 case 0xed:
6349 ot = mo_b_d32(b, dflag);
6350 tcg_gen_ext16u_tl(cpu_T[0], cpu_regs[R_EDX]);
6351 gen_check_io(s, ot, pc_start - s->cs_base,
6352 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
6353 if (s->tb->cflags & CF_USE_ICOUNT) {
6354 gen_io_start();
6356 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6357 gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
6358 gen_op_mov_reg_v(ot, R_EAX, cpu_T[1]);
6359 if (s->tb->cflags & CF_USE_ICOUNT) {
6360 gen_io_end();
6361 gen_jmp(s, s->pc - s->cs_base);
6363 break;
6364 case 0xee:
6365 case 0xef:
6366 ot = mo_b_d32(b, dflag);
6367 tcg_gen_ext16u_tl(cpu_T[0], cpu_regs[R_EDX]);
6368 gen_check_io(s, ot, pc_start - s->cs_base,
6369 svm_is_rep(prefixes));
6370 gen_op_mov_v_reg(ot, cpu_T[1], R_EAX);
6372 if (s->tb->cflags & CF_USE_ICOUNT) {
6373 gen_io_start();
6375 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6376 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
6377 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
6378 if (s->tb->cflags & CF_USE_ICOUNT) {
6379 gen_io_end();
6380 gen_jmp(s, s->pc - s->cs_base);
6382 break;
6384 /************************/
6385 /* control */
6386 case 0xc2: /* ret im */
6387 val = cpu_ldsw_code(env, s->pc);
6388 s->pc += 2;
6389 ot = gen_pop_T0(s);
6390 gen_stack_update(s, val + (1 << ot));
6391 /* Note that gen_pop_T0 uses a zero-extending load. */
6392 gen_op_jmp_v(cpu_T[0]);
6393 gen_eob(s);
6394 break;
6395 case 0xc3: /* ret */
6396 ot = gen_pop_T0(s);
6397 gen_pop_update(s, ot);
6398 /* Note that gen_pop_T0 uses a zero-extending load. */
6399 gen_op_jmp_v(cpu_T[0]);
6400 gen_eob(s);
6401 break;
6402 case 0xca: /* lret im */
6403 val = cpu_ldsw_code(env, s->pc);
6404 s->pc += 2;
6405 do_lret:
6406 if (s->pe && !s->vm86) {
6407 gen_update_cc_op(s);
6408 gen_jmp_im(pc_start - s->cs_base);
6409 gen_helper_lret_protected(cpu_env, tcg_const_i32(dflag - 1),
6410 tcg_const_i32(val));
6411 } else {
6412 gen_stack_A0(s);
6413 /* pop offset */
6414 gen_op_ld_v(s, dflag, cpu_T[0], cpu_A0);
6415 /* NOTE: keeping EIP updated is not a problem in case of
6416 exception */
6417 gen_op_jmp_v(cpu_T[0]);
6418 /* pop selector */
6419 gen_op_addl_A0_im(1 << dflag);
6420 gen_op_ld_v(s, dflag, cpu_T[0], cpu_A0);
6421 gen_op_movl_seg_T0_vm(R_CS);
6422 /* add stack offset */
6423 gen_stack_update(s, val + (2 << dflag));
6425 gen_eob(s);
6426 break;
6427 case 0xcb: /* lret */
6428 val = 0;
6429 goto do_lret;
6430 case 0xcf: /* iret */
6431 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET);
6432 if (!s->pe) {
6433 /* real mode */
6434 gen_helper_iret_real(cpu_env, tcg_const_i32(dflag - 1));
6435 set_cc_op(s, CC_OP_EFLAGS);
6436 } else if (s->vm86) {
6437 if (s->iopl != 3) {
6438 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6439 } else {
6440 gen_helper_iret_real(cpu_env, tcg_const_i32(dflag - 1));
6441 set_cc_op(s, CC_OP_EFLAGS);
6443 } else {
6444 gen_update_cc_op(s);
6445 gen_jmp_im(pc_start - s->cs_base);
6446 gen_helper_iret_protected(cpu_env, tcg_const_i32(dflag - 1),
6447 tcg_const_i32(s->pc - s->cs_base));
6448 set_cc_op(s, CC_OP_EFLAGS);
6450 gen_eob(s);
6451 break;
6452 case 0xe8: /* call im */
6454 if (dflag != MO_16) {
6455 tval = (int32_t)insn_get(env, s, MO_32);
6456 } else {
6457 tval = (int16_t)insn_get(env, s, MO_16);
6459 next_eip = s->pc - s->cs_base;
6460 tval += next_eip;
6461 if (dflag == MO_16) {
6462 tval &= 0xffff;
6463 } else if (!CODE64(s)) {
6464 tval &= 0xffffffff;
6466 tcg_gen_movi_tl(cpu_T[0], next_eip);
6467 gen_push_v(s, cpu_T[0]);
6468 gen_jmp(s, tval);
6470 break;
6471 case 0x9a: /* lcall im */
6473 unsigned int selector, offset;
6475 if (CODE64(s))
6476 goto illegal_op;
6477 ot = dflag;
6478 offset = insn_get(env, s, ot);
6479 selector = insn_get(env, s, MO_16);
6481 tcg_gen_movi_tl(cpu_T[0], selector);
6482 tcg_gen_movi_tl(cpu_T[1], offset);
6484 goto do_lcall;
6485 case 0xe9: /* jmp im */
6486 if (dflag != MO_16) {
6487 tval = (int32_t)insn_get(env, s, MO_32);
6488 } else {
6489 tval = (int16_t)insn_get(env, s, MO_16);
6491 tval += s->pc - s->cs_base;
6492 if (dflag == MO_16) {
6493 tval &= 0xffff;
6494 } else if (!CODE64(s)) {
6495 tval &= 0xffffffff;
6497 gen_jmp(s, tval);
6498 break;
6499 case 0xea: /* ljmp im */
6501 unsigned int selector, offset;
6503 if (CODE64(s))
6504 goto illegal_op;
6505 ot = dflag;
6506 offset = insn_get(env, s, ot);
6507 selector = insn_get(env, s, MO_16);
6509 tcg_gen_movi_tl(cpu_T[0], selector);
6510 tcg_gen_movi_tl(cpu_T[1], offset);
6512 goto do_ljmp;
6513 case 0xeb: /* jmp Jb */
6514 tval = (int8_t)insn_get(env, s, MO_8);
6515 tval += s->pc - s->cs_base;
6516 if (dflag == MO_16) {
6517 tval &= 0xffff;
6519 gen_jmp(s, tval);
6520 break;
6521 case 0x70 ... 0x7f: /* jcc Jb */
6522 tval = (int8_t)insn_get(env, s, MO_8);
6523 goto do_jcc;
6524 case 0x180 ... 0x18f: /* jcc Jv */
6525 if (dflag != MO_16) {
6526 tval = (int32_t)insn_get(env, s, MO_32);
6527 } else {
6528 tval = (int16_t)insn_get(env, s, MO_16);
6530 do_jcc:
6531 next_eip = s->pc - s->cs_base;
6532 tval += next_eip;
6533 if (dflag == MO_16) {
6534 tval &= 0xffff;
6536 gen_jcc(s, b, tval, next_eip);
6537 break;
6539 case 0x190 ... 0x19f: /* setcc Gv */
6540 modrm = cpu_ldub_code(env, s->pc++);
6541 gen_setcc1(s, b, cpu_T[0]);
6542 gen_ldst_modrm(env, s, modrm, MO_8, OR_TMP0, 1);
6543 break;
6544 case 0x140 ... 0x14f: /* cmov Gv, Ev */
6545 if (!(s->cpuid_features & CPUID_CMOV)) {
6546 goto illegal_op;
6548 ot = dflag;
6549 modrm = cpu_ldub_code(env, s->pc++);
6550 reg = ((modrm >> 3) & 7) | rex_r;
6551 gen_cmovcc1(env, s, ot, b, modrm, reg);
6552 break;
6554 /************************/
6555 /* flags */
6556 case 0x9c: /* pushf */
6557 gen_svm_check_intercept(s, pc_start, SVM_EXIT_PUSHF);
6558 if (s->vm86 && s->iopl != 3) {
6559 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6560 } else {
6561 gen_update_cc_op(s);
6562 gen_helper_read_eflags(cpu_T[0], cpu_env);
6563 gen_push_v(s, cpu_T[0]);
6565 break;
6566 case 0x9d: /* popf */
6567 gen_svm_check_intercept(s, pc_start, SVM_EXIT_POPF);
6568 if (s->vm86 && s->iopl != 3) {
6569 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6570 } else {
6571 ot = gen_pop_T0(s);
6572 if (s->cpl == 0) {
6573 if (dflag != MO_16) {
6574 gen_helper_write_eflags(cpu_env, cpu_T[0],
6575 tcg_const_i32((TF_MASK | AC_MASK |
6576 ID_MASK | NT_MASK |
6577 IF_MASK |
6578 IOPL_MASK)));
6579 } else {
6580 gen_helper_write_eflags(cpu_env, cpu_T[0],
6581 tcg_const_i32((TF_MASK | AC_MASK |
6582 ID_MASK | NT_MASK |
6583 IF_MASK | IOPL_MASK)
6584 & 0xffff));
6586 } else {
6587 if (s->cpl <= s->iopl) {
6588 if (dflag != MO_16) {
6589 gen_helper_write_eflags(cpu_env, cpu_T[0],
6590 tcg_const_i32((TF_MASK |
6591 AC_MASK |
6592 ID_MASK |
6593 NT_MASK |
6594 IF_MASK)));
6595 } else {
6596 gen_helper_write_eflags(cpu_env, cpu_T[0],
6597 tcg_const_i32((TF_MASK |
6598 AC_MASK |
6599 ID_MASK |
6600 NT_MASK |
6601 IF_MASK)
6602 & 0xffff));
6604 } else {
6605 if (dflag != MO_16) {
6606 gen_helper_write_eflags(cpu_env, cpu_T[0],
6607 tcg_const_i32((TF_MASK | AC_MASK |
6608 ID_MASK | NT_MASK)));
6609 } else {
6610 gen_helper_write_eflags(cpu_env, cpu_T[0],
6611 tcg_const_i32((TF_MASK | AC_MASK |
6612 ID_MASK | NT_MASK)
6613 & 0xffff));
6617 gen_pop_update(s, ot);
6618 set_cc_op(s, CC_OP_EFLAGS);
6619 /* abort translation because TF/AC flag may change */
6620 gen_jmp_im(s->pc - s->cs_base);
6621 gen_eob(s);
6623 break;
6624 case 0x9e: /* sahf */
6625 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
6626 goto illegal_op;
6627 gen_op_mov_v_reg(MO_8, cpu_T[0], R_AH);
6628 gen_compute_eflags(s);
6629 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O);
6630 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], CC_S | CC_Z | CC_A | CC_P | CC_C);
6631 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T[0]);
6632 break;
6633 case 0x9f: /* lahf */
6634 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
6635 goto illegal_op;
6636 gen_compute_eflags(s);
6637 /* Note: gen_compute_eflags() only gives the condition codes */
6638 tcg_gen_ori_tl(cpu_T[0], cpu_cc_src, 0x02);
6639 gen_op_mov_reg_v(MO_8, R_AH, cpu_T[0]);
6640 break;
6641 case 0xf5: /* cmc */
6642 gen_compute_eflags(s);
6643 tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C);
6644 break;
6645 case 0xf8: /* clc */
6646 gen_compute_eflags(s);
6647 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C);
6648 break;
6649 case 0xf9: /* stc */
6650 gen_compute_eflags(s);
6651 tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C);
6652 break;
6653 case 0xfc: /* cld */
6654 tcg_gen_movi_i32(cpu_tmp2_i32, 1);
6655 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
6656 break;
6657 case 0xfd: /* std */
6658 tcg_gen_movi_i32(cpu_tmp2_i32, -1);
6659 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
6660 break;
6662 /************************/
6663 /* bit operations */
6664 case 0x1ba: /* bt/bts/btr/btc Gv, im */
6665 ot = dflag;
6666 modrm = cpu_ldub_code(env, s->pc++);
6667 op = (modrm >> 3) & 7;
6668 mod = (modrm >> 6) & 3;
6669 rm = (modrm & 7) | REX_B(s);
6670 if (mod != 3) {
6671 s->rip_offset = 1;
6672 gen_lea_modrm(env, s, modrm);
6673 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
6674 } else {
6675 gen_op_mov_v_reg(ot, cpu_T[0], rm);
6677 /* load shift */
6678 val = cpu_ldub_code(env, s->pc++);
6679 tcg_gen_movi_tl(cpu_T[1], val);
6680 if (op < 4)
6681 goto illegal_op;
6682 op -= 4;
6683 goto bt_op;
6684 case 0x1a3: /* bt Gv, Ev */
6685 op = 0;
6686 goto do_btx;
6687 case 0x1ab: /* bts */
6688 op = 1;
6689 goto do_btx;
6690 case 0x1b3: /* btr */
6691 op = 2;
6692 goto do_btx;
6693 case 0x1bb: /* btc */
6694 op = 3;
6695 do_btx:
6696 ot = dflag;
6697 modrm = cpu_ldub_code(env, s->pc++);
6698 reg = ((modrm >> 3) & 7) | rex_r;
6699 mod = (modrm >> 6) & 3;
6700 rm = (modrm & 7) | REX_B(s);
6701 gen_op_mov_v_reg(MO_32, cpu_T[1], reg);
6702 if (mod != 3) {
6703 gen_lea_modrm(env, s, modrm);
6704 /* specific case: we need to add a displacement */
6705 gen_exts(ot, cpu_T[1]);
6706 tcg_gen_sari_tl(cpu_tmp0, cpu_T[1], 3 + ot);
6707 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, ot);
6708 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
6709 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
6710 } else {
6711 gen_op_mov_v_reg(ot, cpu_T[0], rm);
6713 bt_op:
6714 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], (1 << (3 + ot)) - 1);
6715 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6716 switch(op) {
6717 case 0:
6718 break;
6719 case 1:
6720 tcg_gen_movi_tl(cpu_tmp0, 1);
6721 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6722 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6723 break;
6724 case 2:
6725 tcg_gen_movi_tl(cpu_tmp0, 1);
6726 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6727 tcg_gen_andc_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6728 break;
6729 default:
6730 case 3:
6731 tcg_gen_movi_tl(cpu_tmp0, 1);
6732 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6733 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6734 break;
6736 if (op != 0) {
6737 if (mod != 3) {
6738 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
6739 } else {
6740 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
6744 /* Delay all CC updates until after the store above. Note that
6745 C is the result of the test, Z is unchanged, and the others
6746 are all undefined. */
6747 switch (s->cc_op) {
6748 case CC_OP_MULB ... CC_OP_MULQ:
6749 case CC_OP_ADDB ... CC_OP_ADDQ:
6750 case CC_OP_ADCB ... CC_OP_ADCQ:
6751 case CC_OP_SUBB ... CC_OP_SUBQ:
6752 case CC_OP_SBBB ... CC_OP_SBBQ:
6753 case CC_OP_LOGICB ... CC_OP_LOGICQ:
6754 case CC_OP_INCB ... CC_OP_INCQ:
6755 case CC_OP_DECB ... CC_OP_DECQ:
6756 case CC_OP_SHLB ... CC_OP_SHLQ:
6757 case CC_OP_SARB ... CC_OP_SARQ:
6758 case CC_OP_BMILGB ... CC_OP_BMILGQ:
6759 /* Z was going to be computed from the non-zero status of CC_DST.
6760 We can get that same Z value (and the new C value) by leaving
6761 CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the
6762 same width. */
6763 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
6764 set_cc_op(s, ((s->cc_op - CC_OP_MULB) & 3) + CC_OP_SARB);
6765 break;
6766 default:
6767 /* Otherwise, generate EFLAGS and replace the C bit. */
6768 gen_compute_eflags(s);
6769 tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, cpu_tmp4,
6770 ctz32(CC_C), 1);
6771 break;
6773 break;
6774 case 0x1bc: /* bsf / tzcnt */
6775 case 0x1bd: /* bsr / lzcnt */
6776 ot = dflag;
6777 modrm = cpu_ldub_code(env, s->pc++);
6778 reg = ((modrm >> 3) & 7) | rex_r;
6779 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
6780 gen_extu(ot, cpu_T[0]);
6782 /* Note that lzcnt and tzcnt are in different extensions. */
6783 if ((prefixes & PREFIX_REPZ)
6784 && (b & 1
6785 ? s->cpuid_ext3_features & CPUID_EXT3_ABM
6786 : s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)) {
6787 int size = 8 << ot;
6788 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
6789 if (b & 1) {
6790 /* For lzcnt, reduce the target_ulong result by the
6791 number of zeros that we expect to find at the top. */
6792 gen_helper_clz(cpu_T[0], cpu_T[0]);
6793 tcg_gen_subi_tl(cpu_T[0], cpu_T[0], TARGET_LONG_BITS - size);
6794 } else {
6795 /* For tzcnt, a zero input must return the operand size:
6796 force all bits outside the operand size to 1. */
6797 target_ulong mask = (target_ulong)-2 << (size - 1);
6798 tcg_gen_ori_tl(cpu_T[0], cpu_T[0], mask);
6799 gen_helper_ctz(cpu_T[0], cpu_T[0]);
6801 /* For lzcnt/tzcnt, C and Z bits are defined and are
6802 related to the result. */
6803 gen_op_update1_cc();
6804 set_cc_op(s, CC_OP_BMILGB + ot);
6805 } else {
6806 /* For bsr/bsf, only the Z bit is defined and it is related
6807 to the input and not the result. */
6808 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
6809 set_cc_op(s, CC_OP_LOGICB + ot);
6810 if (b & 1) {
6811 /* For bsr, return the bit index of the first 1 bit,
6812 not the count of leading zeros. */
6813 gen_helper_clz(cpu_T[0], cpu_T[0]);
6814 tcg_gen_xori_tl(cpu_T[0], cpu_T[0], TARGET_LONG_BITS - 1);
6815 } else {
6816 gen_helper_ctz(cpu_T[0], cpu_T[0]);
6818 /* ??? The manual says that the output is undefined when the
6819 input is zero, but real hardware leaves it unchanged, and
6820 real programs appear to depend on that. */
6821 tcg_gen_movi_tl(cpu_tmp0, 0);
6822 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_T[0], cpu_cc_dst, cpu_tmp0,
6823 cpu_regs[reg], cpu_T[0]);
6825 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
6826 break;
6827 /************************/
6828 /* bcd */
6829 case 0x27: /* daa */
6830 if (CODE64(s))
6831 goto illegal_op;
6832 gen_update_cc_op(s);
6833 gen_helper_daa(cpu_env);
6834 set_cc_op(s, CC_OP_EFLAGS);
6835 break;
6836 case 0x2f: /* das */
6837 if (CODE64(s))
6838 goto illegal_op;
6839 gen_update_cc_op(s);
6840 gen_helper_das(cpu_env);
6841 set_cc_op(s, CC_OP_EFLAGS);
6842 break;
6843 case 0x37: /* aaa */
6844 if (CODE64(s))
6845 goto illegal_op;
6846 gen_update_cc_op(s);
6847 gen_helper_aaa(cpu_env);
6848 set_cc_op(s, CC_OP_EFLAGS);
6849 break;
6850 case 0x3f: /* aas */
6851 if (CODE64(s))
6852 goto illegal_op;
6853 gen_update_cc_op(s);
6854 gen_helper_aas(cpu_env);
6855 set_cc_op(s, CC_OP_EFLAGS);
6856 break;
6857 case 0xd4: /* aam */
6858 if (CODE64(s))
6859 goto illegal_op;
6860 val = cpu_ldub_code(env, s->pc++);
6861 if (val == 0) {
6862 gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base);
6863 } else {
6864 gen_helper_aam(cpu_env, tcg_const_i32(val));
6865 set_cc_op(s, CC_OP_LOGICB);
6867 break;
6868 case 0xd5: /* aad */
6869 if (CODE64(s))
6870 goto illegal_op;
6871 val = cpu_ldub_code(env, s->pc++);
6872 gen_helper_aad(cpu_env, tcg_const_i32(val));
6873 set_cc_op(s, CC_OP_LOGICB);
6874 break;
6875 /************************/
6876 /* misc */
6877 case 0x90: /* nop */
6878 /* XXX: correct lock test for all insn */
6879 if (prefixes & PREFIX_LOCK) {
6880 goto illegal_op;
6882 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
6883 if (REX_B(s)) {
6884 goto do_xchg_reg_eax;
6886 if (prefixes & PREFIX_REPZ) {
6887 gen_update_cc_op(s);
6888 gen_jmp_im(pc_start - s->cs_base);
6889 gen_helper_pause(cpu_env, tcg_const_i32(s->pc - pc_start));
6890 s->is_jmp = DISAS_TB_JUMP;
6892 break;
6893 case 0x9b: /* fwait */
6894 if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) ==
6895 (HF_MP_MASK | HF_TS_MASK)) {
6896 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
6897 } else {
6898 gen_update_cc_op(s);
6899 gen_jmp_im(pc_start - s->cs_base);
6900 gen_helper_fwait(cpu_env);
6902 break;
6903 case 0xcc: /* int3 */
6904 gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base);
6905 break;
6906 case 0xcd: /* int N */
6907 val = cpu_ldub_code(env, s->pc++);
6908 if (s->vm86 && s->iopl != 3) {
6909 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6910 } else {
6911 gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base);
6913 break;
6914 case 0xce: /* into */
6915 if (CODE64(s))
6916 goto illegal_op;
6917 gen_update_cc_op(s);
6918 gen_jmp_im(pc_start - s->cs_base);
6919 gen_helper_into(cpu_env, tcg_const_i32(s->pc - pc_start));
6920 break;
6921 #ifdef WANT_ICEBP
6922 case 0xf1: /* icebp (undocumented, exits to external debugger) */
6923 gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP);
6924 #if 1
6925 gen_debug(s, pc_start - s->cs_base);
6926 #else
6927 /* start debug */
6928 tb_flush(CPU(x86_env_get_cpu(env)));
6929 qemu_set_log(CPU_LOG_INT | CPU_LOG_TB_IN_ASM);
6930 #endif
6931 break;
6932 #endif
6933 case 0xfa: /* cli */
6934 if (!s->vm86) {
6935 if (s->cpl <= s->iopl) {
6936 gen_helper_cli(cpu_env);
6937 } else {
6938 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6940 } else {
6941 if (s->iopl == 3) {
6942 gen_helper_cli(cpu_env);
6943 } else {
6944 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6947 break;
6948 case 0xfb: /* sti */
6949 if (!s->vm86) {
6950 if (s->cpl <= s->iopl) {
6951 gen_sti:
6952 gen_helper_sti(cpu_env);
6953 /* interruptions are enabled only the first insn after sti */
6954 /* If several instructions disable interrupts, only the
6955 _first_ does it */
6956 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
6957 gen_helper_set_inhibit_irq(cpu_env);
6958 /* give a chance to handle pending irqs */
6959 gen_jmp_im(s->pc - s->cs_base);
6960 gen_eob(s);
6961 } else {
6962 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6964 } else {
6965 if (s->iopl == 3) {
6966 goto gen_sti;
6967 } else {
6968 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6971 break;
6972 case 0x62: /* bound */
6973 if (CODE64(s))
6974 goto illegal_op;
6975 ot = dflag;
6976 modrm = cpu_ldub_code(env, s->pc++);
6977 reg = (modrm >> 3) & 7;
6978 mod = (modrm >> 6) & 3;
6979 if (mod == 3)
6980 goto illegal_op;
6981 gen_op_mov_v_reg(ot, cpu_T[0], reg);
6982 gen_lea_modrm(env, s, modrm);
6983 gen_jmp_im(pc_start - s->cs_base);
6984 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6985 if (ot == MO_16) {
6986 gen_helper_boundw(cpu_env, cpu_A0, cpu_tmp2_i32);
6987 } else {
6988 gen_helper_boundl(cpu_env, cpu_A0, cpu_tmp2_i32);
6990 break;
6991 case 0x1c8 ... 0x1cf: /* bswap reg */
6992 reg = (b & 7) | REX_B(s);
6993 #ifdef TARGET_X86_64
6994 if (dflag == MO_64) {
6995 gen_op_mov_v_reg(MO_64, cpu_T[0], reg);
6996 tcg_gen_bswap64_i64(cpu_T[0], cpu_T[0]);
6997 gen_op_mov_reg_v(MO_64, reg, cpu_T[0]);
6998 } else
6999 #endif
7001 gen_op_mov_v_reg(MO_32, cpu_T[0], reg);
7002 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
7003 tcg_gen_bswap32_tl(cpu_T[0], cpu_T[0]);
7004 gen_op_mov_reg_v(MO_32, reg, cpu_T[0]);
7006 break;
7007 case 0xd6: /* salc */
7008 if (CODE64(s))
7009 goto illegal_op;
7010 gen_compute_eflags_c(s, cpu_T[0]);
7011 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
7012 gen_op_mov_reg_v(MO_8, R_EAX, cpu_T[0]);
7013 break;
7014 case 0xe0: /* loopnz */
7015 case 0xe1: /* loopz */
7016 case 0xe2: /* loop */
7017 case 0xe3: /* jecxz */
7019 TCGLabel *l1, *l2, *l3;
7021 tval = (int8_t)insn_get(env, s, MO_8);
7022 next_eip = s->pc - s->cs_base;
7023 tval += next_eip;
7024 if (dflag == MO_16) {
7025 tval &= 0xffff;
7028 l1 = gen_new_label();
7029 l2 = gen_new_label();
7030 l3 = gen_new_label();
7031 b &= 3;
7032 switch(b) {
7033 case 0: /* loopnz */
7034 case 1: /* loopz */
7035 gen_op_add_reg_im(s->aflag, R_ECX, -1);
7036 gen_op_jz_ecx(s->aflag, l3);
7037 gen_jcc1(s, (JCC_Z << 1) | (b ^ 1), l1);
7038 break;
7039 case 2: /* loop */
7040 gen_op_add_reg_im(s->aflag, R_ECX, -1);
7041 gen_op_jnz_ecx(s->aflag, l1);
7042 break;
7043 default:
7044 case 3: /* jcxz */
7045 gen_op_jz_ecx(s->aflag, l1);
7046 break;
7049 gen_set_label(l3);
7050 gen_jmp_im(next_eip);
7051 tcg_gen_br(l2);
7053 gen_set_label(l1);
7054 gen_jmp_im(tval);
7055 gen_set_label(l2);
7056 gen_eob(s);
7058 break;
7059 case 0x130: /* wrmsr */
7060 case 0x132: /* rdmsr */
7061 if (s->cpl != 0) {
7062 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7063 } else {
7064 gen_update_cc_op(s);
7065 gen_jmp_im(pc_start - s->cs_base);
7066 if (b & 2) {
7067 gen_helper_rdmsr(cpu_env);
7068 } else {
7069 gen_helper_wrmsr(cpu_env);
7072 break;
7073 case 0x131: /* rdtsc */
7074 gen_update_cc_op(s);
7075 gen_jmp_im(pc_start - s->cs_base);
7076 if (s->tb->cflags & CF_USE_ICOUNT) {
7077 gen_io_start();
7079 gen_helper_rdtsc(cpu_env);
7080 if (s->tb->cflags & CF_USE_ICOUNT) {
7081 gen_io_end();
7082 gen_jmp(s, s->pc - s->cs_base);
7084 break;
7085 case 0x133: /* rdpmc */
7086 gen_update_cc_op(s);
7087 gen_jmp_im(pc_start - s->cs_base);
7088 gen_helper_rdpmc(cpu_env);
7089 break;
7090 case 0x134: /* sysenter */
7091 /* For Intel SYSENTER is valid on 64-bit */
7092 if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
7093 goto illegal_op;
7094 if (!s->pe) {
7095 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7096 } else {
7097 gen_update_cc_op(s);
7098 gen_jmp_im(pc_start - s->cs_base);
7099 gen_helper_sysenter(cpu_env);
7100 gen_eob(s);
7102 break;
7103 case 0x135: /* sysexit */
7104 /* For Intel SYSEXIT is valid on 64-bit */
7105 if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
7106 goto illegal_op;
7107 if (!s->pe) {
7108 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7109 } else {
7110 gen_update_cc_op(s);
7111 gen_jmp_im(pc_start - s->cs_base);
7112 gen_helper_sysexit(cpu_env, tcg_const_i32(dflag - 1));
7113 gen_eob(s);
7115 break;
7116 #ifdef TARGET_X86_64
7117 case 0x105: /* syscall */
7118 /* XXX: is it usable in real mode ? */
7119 gen_update_cc_op(s);
7120 gen_jmp_im(pc_start - s->cs_base);
7121 gen_helper_syscall(cpu_env, tcg_const_i32(s->pc - pc_start));
7122 gen_eob(s);
7123 break;
7124 case 0x107: /* sysret */
7125 if (!s->pe) {
7126 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7127 } else {
7128 gen_update_cc_op(s);
7129 gen_jmp_im(pc_start - s->cs_base);
7130 gen_helper_sysret(cpu_env, tcg_const_i32(dflag - 1));
7131 /* condition codes are modified only in long mode */
7132 if (s->lma) {
7133 set_cc_op(s, CC_OP_EFLAGS);
7135 gen_eob(s);
7137 break;
7138 #endif
7139 case 0x1a2: /* cpuid */
7140 gen_update_cc_op(s);
7141 gen_jmp_im(pc_start - s->cs_base);
7142 gen_helper_cpuid(cpu_env);
7143 break;
7144 case 0xf4: /* hlt */
7145 if (s->cpl != 0) {
7146 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7147 } else {
7148 gen_update_cc_op(s);
7149 gen_jmp_im(pc_start - s->cs_base);
7150 gen_helper_hlt(cpu_env, tcg_const_i32(s->pc - pc_start));
7151 s->is_jmp = DISAS_TB_JUMP;
7153 break;
7154 case 0x100:
7155 modrm = cpu_ldub_code(env, s->pc++);
7156 mod = (modrm >> 6) & 3;
7157 op = (modrm >> 3) & 7;
7158 switch(op) {
7159 case 0: /* sldt */
7160 if (!s->pe || s->vm86)
7161 goto illegal_op;
7162 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ);
7163 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,ldt.selector));
7164 ot = mod == 3 ? dflag : MO_16;
7165 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
7166 break;
7167 case 2: /* lldt */
7168 if (!s->pe || s->vm86)
7169 goto illegal_op;
7170 if (s->cpl != 0) {
7171 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7172 } else {
7173 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE);
7174 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
7175 gen_jmp_im(pc_start - s->cs_base);
7176 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
7177 gen_helper_lldt(cpu_env, cpu_tmp2_i32);
7179 break;
7180 case 1: /* str */
7181 if (!s->pe || s->vm86)
7182 goto illegal_op;
7183 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ);
7184 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,tr.selector));
7185 ot = mod == 3 ? dflag : MO_16;
7186 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
7187 break;
7188 case 3: /* ltr */
7189 if (!s->pe || s->vm86)
7190 goto illegal_op;
7191 if (s->cpl != 0) {
7192 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7193 } else {
7194 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE);
7195 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
7196 gen_jmp_im(pc_start - s->cs_base);
7197 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
7198 gen_helper_ltr(cpu_env, cpu_tmp2_i32);
7200 break;
7201 case 4: /* verr */
7202 case 5: /* verw */
7203 if (!s->pe || s->vm86)
7204 goto illegal_op;
7205 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
7206 gen_update_cc_op(s);
7207 if (op == 4) {
7208 gen_helper_verr(cpu_env, cpu_T[0]);
7209 } else {
7210 gen_helper_verw(cpu_env, cpu_T[0]);
7212 set_cc_op(s, CC_OP_EFLAGS);
7213 break;
7214 default:
7215 goto illegal_op;
7217 break;
7218 case 0x101:
7219 modrm = cpu_ldub_code(env, s->pc++);
7220 mod = (modrm >> 6) & 3;
7221 op = (modrm >> 3) & 7;
7222 rm = modrm & 7;
7223 switch(op) {
7224 case 0: /* sgdt */
7225 if (mod == 3)
7226 goto illegal_op;
7227 gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ);
7228 gen_lea_modrm(env, s, modrm);
7229 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.limit));
7230 gen_op_st_v(s, MO_16, cpu_T[0], cpu_A0);
7231 gen_add_A0_im(s, 2);
7232 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.base));
7233 if (dflag == MO_16) {
7234 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffffff);
7236 gen_op_st_v(s, CODE64(s) + MO_32, cpu_T[0], cpu_A0);
7237 break;
7238 case 1:
7239 if (mod == 3) {
7240 switch (rm) {
7241 case 0: /* monitor */
7242 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
7243 s->cpl != 0)
7244 goto illegal_op;
7245 gen_update_cc_op(s);
7246 gen_jmp_im(pc_start - s->cs_base);
7247 tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EAX]);
7248 gen_extu(s->aflag, cpu_A0);
7249 gen_add_A0_ds_seg(s);
7250 gen_helper_monitor(cpu_env, cpu_A0);
7251 break;
7252 case 1: /* mwait */
7253 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
7254 s->cpl != 0)
7255 goto illegal_op;
7256 gen_update_cc_op(s);
7257 gen_jmp_im(pc_start - s->cs_base);
7258 gen_helper_mwait(cpu_env, tcg_const_i32(s->pc - pc_start));
7259 gen_eob(s);
7260 break;
7261 case 2: /* clac */
7262 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) ||
7263 s->cpl != 0) {
7264 goto illegal_op;
7266 gen_helper_clac(cpu_env);
7267 gen_jmp_im(s->pc - s->cs_base);
7268 gen_eob(s);
7269 break;
7270 case 3: /* stac */
7271 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) ||
7272 s->cpl != 0) {
7273 goto illegal_op;
7275 gen_helper_stac(cpu_env);
7276 gen_jmp_im(s->pc - s->cs_base);
7277 gen_eob(s);
7278 break;
7279 default:
7280 goto illegal_op;
7282 } else { /* sidt */
7283 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ);
7284 gen_lea_modrm(env, s, modrm);
7285 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.limit));
7286 gen_op_st_v(s, MO_16, cpu_T[0], cpu_A0);
7287 gen_add_A0_im(s, 2);
7288 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.base));
7289 if (dflag == MO_16) {
7290 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffffff);
7292 gen_op_st_v(s, CODE64(s) + MO_32, cpu_T[0], cpu_A0);
7294 break;
7295 case 2: /* lgdt */
7296 case 3: /* lidt */
7297 if (mod == 3) {
7298 gen_update_cc_op(s);
7299 gen_jmp_im(pc_start - s->cs_base);
7300 switch(rm) {
7301 case 0: /* VMRUN */
7302 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7303 goto illegal_op;
7304 if (s->cpl != 0) {
7305 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7306 break;
7307 } else {
7308 gen_helper_vmrun(cpu_env, tcg_const_i32(s->aflag - 1),
7309 tcg_const_i32(s->pc - pc_start));
7310 tcg_gen_exit_tb(0);
7311 s->is_jmp = DISAS_TB_JUMP;
7313 break;
7314 case 1: /* VMMCALL */
7315 if (!(s->flags & HF_SVME_MASK))
7316 goto illegal_op;
7317 gen_helper_vmmcall(cpu_env);
7318 break;
7319 case 2: /* VMLOAD */
7320 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7321 goto illegal_op;
7322 if (s->cpl != 0) {
7323 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7324 break;
7325 } else {
7326 gen_helper_vmload(cpu_env, tcg_const_i32(s->aflag - 1));
7328 break;
7329 case 3: /* VMSAVE */
7330 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7331 goto illegal_op;
7332 if (s->cpl != 0) {
7333 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7334 break;
7335 } else {
7336 gen_helper_vmsave(cpu_env, tcg_const_i32(s->aflag - 1));
7338 break;
7339 case 4: /* STGI */
7340 if ((!(s->flags & HF_SVME_MASK) &&
7341 !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
7342 !s->pe)
7343 goto illegal_op;
7344 if (s->cpl != 0) {
7345 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7346 break;
7347 } else {
7348 gen_helper_stgi(cpu_env);
7350 break;
7351 case 5: /* CLGI */
7352 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7353 goto illegal_op;
7354 if (s->cpl != 0) {
7355 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7356 break;
7357 } else {
7358 gen_helper_clgi(cpu_env);
7360 break;
7361 case 6: /* SKINIT */
7362 if ((!(s->flags & HF_SVME_MASK) &&
7363 !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
7364 !s->pe)
7365 goto illegal_op;
7366 gen_helper_skinit(cpu_env);
7367 break;
7368 case 7: /* INVLPGA */
7369 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7370 goto illegal_op;
7371 if (s->cpl != 0) {
7372 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7373 break;
7374 } else {
7375 gen_helper_invlpga(cpu_env,
7376 tcg_const_i32(s->aflag - 1));
7378 break;
7379 default:
7380 goto illegal_op;
7382 } else if (s->cpl != 0) {
7383 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7384 } else {
7385 gen_svm_check_intercept(s, pc_start,
7386 op==2 ? SVM_EXIT_GDTR_WRITE : SVM_EXIT_IDTR_WRITE);
7387 gen_lea_modrm(env, s, modrm);
7388 gen_op_ld_v(s, MO_16, cpu_T[1], cpu_A0);
7389 gen_add_A0_im(s, 2);
7390 gen_op_ld_v(s, CODE64(s) + MO_32, cpu_T[0], cpu_A0);
7391 if (dflag == MO_16) {
7392 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffffff);
7394 if (op == 2) {
7395 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,gdt.base));
7396 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,gdt.limit));
7397 } else {
7398 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,idt.base));
7399 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,idt.limit));
7402 break;
7403 case 4: /* smsw */
7404 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0);
7405 #if defined TARGET_X86_64 && defined HOST_WORDS_BIGENDIAN
7406 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]) + 4);
7407 #else
7408 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]));
7409 #endif
7410 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 1);
7411 break;
7412 case 6: /* lmsw */
7413 if (s->cpl != 0) {
7414 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7415 } else {
7416 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
7417 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
7418 gen_helper_lmsw(cpu_env, cpu_T[0]);
7419 gen_jmp_im(s->pc - s->cs_base);
7420 gen_eob(s);
7422 break;
7423 case 7:
7424 if (mod != 3) { /* invlpg */
7425 if (s->cpl != 0) {
7426 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7427 } else {
7428 gen_update_cc_op(s);
7429 gen_jmp_im(pc_start - s->cs_base);
7430 gen_lea_modrm(env, s, modrm);
7431 gen_helper_invlpg(cpu_env, cpu_A0);
7432 gen_jmp_im(s->pc - s->cs_base);
7433 gen_eob(s);
7435 } else {
7436 switch (rm) {
7437 case 0: /* swapgs */
7438 #ifdef TARGET_X86_64
7439 if (CODE64(s)) {
7440 if (s->cpl != 0) {
7441 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7442 } else {
7443 tcg_gen_ld_tl(cpu_T[0], cpu_env,
7444 offsetof(CPUX86State,segs[R_GS].base));
7445 tcg_gen_ld_tl(cpu_T[1], cpu_env,
7446 offsetof(CPUX86State,kernelgsbase));
7447 tcg_gen_st_tl(cpu_T[1], cpu_env,
7448 offsetof(CPUX86State,segs[R_GS].base));
7449 tcg_gen_st_tl(cpu_T[0], cpu_env,
7450 offsetof(CPUX86State,kernelgsbase));
7452 } else
7453 #endif
7455 goto illegal_op;
7457 break;
7458 case 1: /* rdtscp */
7459 if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP))
7460 goto illegal_op;
7461 gen_update_cc_op(s);
7462 gen_jmp_im(pc_start - s->cs_base);
7463 if (s->tb->cflags & CF_USE_ICOUNT) {
7464 gen_io_start();
7466 gen_helper_rdtscp(cpu_env);
7467 if (s->tb->cflags & CF_USE_ICOUNT) {
7468 gen_io_end();
7469 gen_jmp(s, s->pc - s->cs_base);
7471 break;
7472 default:
7473 goto illegal_op;
7476 break;
7477 default:
7478 goto illegal_op;
7480 break;
7481 case 0x108: /* invd */
7482 case 0x109: /* wbinvd */
7483 if (s->cpl != 0) {
7484 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7485 } else {
7486 gen_svm_check_intercept(s, pc_start, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD);
7487 /* nothing to do */
7489 break;
7490 case 0x63: /* arpl or movslS (x86_64) */
7491 #ifdef TARGET_X86_64
7492 if (CODE64(s)) {
7493 int d_ot;
7494 /* d_ot is the size of destination */
7495 d_ot = dflag;
7497 modrm = cpu_ldub_code(env, s->pc++);
7498 reg = ((modrm >> 3) & 7) | rex_r;
7499 mod = (modrm >> 6) & 3;
7500 rm = (modrm & 7) | REX_B(s);
7502 if (mod == 3) {
7503 gen_op_mov_v_reg(MO_32, cpu_T[0], rm);
7504 /* sign extend */
7505 if (d_ot == MO_64) {
7506 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
7508 gen_op_mov_reg_v(d_ot, reg, cpu_T[0]);
7509 } else {
7510 gen_lea_modrm(env, s, modrm);
7511 gen_op_ld_v(s, MO_32 | MO_SIGN, cpu_T[0], cpu_A0);
7512 gen_op_mov_reg_v(d_ot, reg, cpu_T[0]);
7514 } else
7515 #endif
7517 TCGLabel *label1;
7518 TCGv t0, t1, t2, a0;
7520 if (!s->pe || s->vm86)
7521 goto illegal_op;
7522 t0 = tcg_temp_local_new();
7523 t1 = tcg_temp_local_new();
7524 t2 = tcg_temp_local_new();
7525 ot = MO_16;
7526 modrm = cpu_ldub_code(env, s->pc++);
7527 reg = (modrm >> 3) & 7;
7528 mod = (modrm >> 6) & 3;
7529 rm = modrm & 7;
7530 if (mod != 3) {
7531 gen_lea_modrm(env, s, modrm);
7532 gen_op_ld_v(s, ot, t0, cpu_A0);
7533 a0 = tcg_temp_local_new();
7534 tcg_gen_mov_tl(a0, cpu_A0);
7535 } else {
7536 gen_op_mov_v_reg(ot, t0, rm);
7537 TCGV_UNUSED(a0);
7539 gen_op_mov_v_reg(ot, t1, reg);
7540 tcg_gen_andi_tl(cpu_tmp0, t0, 3);
7541 tcg_gen_andi_tl(t1, t1, 3);
7542 tcg_gen_movi_tl(t2, 0);
7543 label1 = gen_new_label();
7544 tcg_gen_brcond_tl(TCG_COND_GE, cpu_tmp0, t1, label1);
7545 tcg_gen_andi_tl(t0, t0, ~3);
7546 tcg_gen_or_tl(t0, t0, t1);
7547 tcg_gen_movi_tl(t2, CC_Z);
7548 gen_set_label(label1);
7549 if (mod != 3) {
7550 gen_op_st_v(s, ot, t0, a0);
7551 tcg_temp_free(a0);
7552 } else {
7553 gen_op_mov_reg_v(ot, rm, t0);
7555 gen_compute_eflags(s);
7556 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z);
7557 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2);
7558 tcg_temp_free(t0);
7559 tcg_temp_free(t1);
7560 tcg_temp_free(t2);
7562 break;
7563 case 0x102: /* lar */
7564 case 0x103: /* lsl */
7566 TCGLabel *label1;
7567 TCGv t0;
7568 if (!s->pe || s->vm86)
7569 goto illegal_op;
7570 ot = dflag != MO_16 ? MO_32 : MO_16;
7571 modrm = cpu_ldub_code(env, s->pc++);
7572 reg = ((modrm >> 3) & 7) | rex_r;
7573 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
7574 t0 = tcg_temp_local_new();
7575 gen_update_cc_op(s);
7576 if (b == 0x102) {
7577 gen_helper_lar(t0, cpu_env, cpu_T[0]);
7578 } else {
7579 gen_helper_lsl(t0, cpu_env, cpu_T[0]);
7581 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_src, CC_Z);
7582 label1 = gen_new_label();
7583 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
7584 gen_op_mov_reg_v(ot, reg, t0);
7585 gen_set_label(label1);
7586 set_cc_op(s, CC_OP_EFLAGS);
7587 tcg_temp_free(t0);
7589 break;
7590 case 0x118:
7591 modrm = cpu_ldub_code(env, s->pc++);
7592 mod = (modrm >> 6) & 3;
7593 op = (modrm >> 3) & 7;
7594 switch(op) {
7595 case 0: /* prefetchnta */
7596 case 1: /* prefetchnt0 */
7597 case 2: /* prefetchnt0 */
7598 case 3: /* prefetchnt0 */
7599 if (mod == 3)
7600 goto illegal_op;
7601 gen_lea_modrm(env, s, modrm);
7602 /* nothing more to do */
7603 break;
7604 default: /* nop (multi byte) */
7605 gen_nop_modrm(env, s, modrm);
7606 break;
7608 break;
7609 case 0x119 ... 0x11f: /* nop (multi byte) */
7610 modrm = cpu_ldub_code(env, s->pc++);
7611 gen_nop_modrm(env, s, modrm);
7612 break;
7613 case 0x120: /* mov reg, crN */
7614 case 0x122: /* mov crN, reg */
7615 if (s->cpl != 0) {
7616 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7617 } else {
7618 modrm = cpu_ldub_code(env, s->pc++);
7619 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7620 * AMD documentation (24594.pdf) and testing of
7621 * intel 386 and 486 processors all show that the mod bits
7622 * are assumed to be 1's, regardless of actual values.
7624 rm = (modrm & 7) | REX_B(s);
7625 reg = ((modrm >> 3) & 7) | rex_r;
7626 if (CODE64(s))
7627 ot = MO_64;
7628 else
7629 ot = MO_32;
7630 if ((prefixes & PREFIX_LOCK) && (reg == 0) &&
7631 (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) {
7632 reg = 8;
7634 switch(reg) {
7635 case 0:
7636 case 2:
7637 case 3:
7638 case 4:
7639 case 8:
7640 gen_update_cc_op(s);
7641 gen_jmp_im(pc_start - s->cs_base);
7642 if (b & 2) {
7643 gen_op_mov_v_reg(ot, cpu_T[0], rm);
7644 gen_helper_write_crN(cpu_env, tcg_const_i32(reg),
7645 cpu_T[0]);
7646 gen_jmp_im(s->pc - s->cs_base);
7647 gen_eob(s);
7648 } else {
7649 gen_helper_read_crN(cpu_T[0], cpu_env, tcg_const_i32(reg));
7650 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
7652 break;
7653 default:
7654 goto illegal_op;
7657 break;
7658 case 0x121: /* mov reg, drN */
7659 case 0x123: /* mov drN, reg */
7660 if (s->cpl != 0) {
7661 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7662 } else {
7663 modrm = cpu_ldub_code(env, s->pc++);
7664 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7665 * AMD documentation (24594.pdf) and testing of
7666 * intel 386 and 486 processors all show that the mod bits
7667 * are assumed to be 1's, regardless of actual values.
7669 rm = (modrm & 7) | REX_B(s);
7670 reg = ((modrm >> 3) & 7) | rex_r;
7671 if (CODE64(s))
7672 ot = MO_64;
7673 else
7674 ot = MO_32;
7675 /* XXX: do it dynamically with CR4.DE bit */
7676 if (reg == 4 || reg == 5 || reg >= 8)
7677 goto illegal_op;
7678 if (b & 2) {
7679 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg);
7680 gen_op_mov_v_reg(ot, cpu_T[0], rm);
7681 gen_helper_movl_drN_T0(cpu_env, tcg_const_i32(reg), cpu_T[0]);
7682 gen_jmp_im(s->pc - s->cs_base);
7683 gen_eob(s);
7684 } else {
7685 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg);
7686 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,dr[reg]));
7687 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
7690 break;
7691 case 0x106: /* clts */
7692 if (s->cpl != 0) {
7693 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7694 } else {
7695 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
7696 gen_helper_clts(cpu_env);
7697 /* abort block because static cpu state changed */
7698 gen_jmp_im(s->pc - s->cs_base);
7699 gen_eob(s);
7701 break;
7702 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
7703 case 0x1c3: /* MOVNTI reg, mem */
7704 if (!(s->cpuid_features & CPUID_SSE2))
7705 goto illegal_op;
7706 ot = mo_64_32(dflag);
7707 modrm = cpu_ldub_code(env, s->pc++);
7708 mod = (modrm >> 6) & 3;
7709 if (mod == 3)
7710 goto illegal_op;
7711 reg = ((modrm >> 3) & 7) | rex_r;
7712 /* generate a generic store */
7713 gen_ldst_modrm(env, s, modrm, ot, reg, 1);
7714 break;
7715 case 0x1ae:
7716 modrm = cpu_ldub_code(env, s->pc++);
7717 mod = (modrm >> 6) & 3;
7718 op = (modrm >> 3) & 7;
7719 switch(op) {
7720 case 0: /* fxsave */
7721 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
7722 (s->prefix & PREFIX_LOCK))
7723 goto illegal_op;
7724 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
7725 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7726 break;
7728 gen_lea_modrm(env, s, modrm);
7729 gen_update_cc_op(s);
7730 gen_jmp_im(pc_start - s->cs_base);
7731 gen_helper_fxsave(cpu_env, cpu_A0, tcg_const_i32(dflag == MO_64));
7732 break;
7733 case 1: /* fxrstor */
7734 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
7735 (s->prefix & PREFIX_LOCK))
7736 goto illegal_op;
7737 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
7738 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7739 break;
7741 gen_lea_modrm(env, s, modrm);
7742 gen_update_cc_op(s);
7743 gen_jmp_im(pc_start - s->cs_base);
7744 gen_helper_fxrstor(cpu_env, cpu_A0, tcg_const_i32(dflag == MO_64));
7745 break;
7746 case 2: /* ldmxcsr */
7747 case 3: /* stmxcsr */
7748 if (s->flags & HF_TS_MASK) {
7749 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7750 break;
7752 if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK) ||
7753 mod == 3)
7754 goto illegal_op;
7755 gen_lea_modrm(env, s, modrm);
7756 if (op == 2) {
7757 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
7758 s->mem_index, MO_LEUL);
7759 gen_helper_ldmxcsr(cpu_env, cpu_tmp2_i32);
7760 } else {
7761 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, mxcsr));
7762 gen_op_st_v(s, MO_32, cpu_T[0], cpu_A0);
7764 break;
7765 case 5: /* lfence */
7766 case 6: /* mfence */
7767 if ((modrm & 0xc7) != 0xc0 || !(s->cpuid_features & CPUID_SSE2))
7768 goto illegal_op;
7769 break;
7770 case 7: /* sfence / clflush */
7771 if ((modrm & 0xc7) == 0xc0) {
7772 /* sfence */
7773 /* XXX: also check for cpuid_ext2_features & CPUID_EXT2_EMMX */
7774 if (!(s->cpuid_features & CPUID_SSE))
7775 goto illegal_op;
7776 } else {
7777 /* clflush */
7778 if (!(s->cpuid_features & CPUID_CLFLUSH))
7779 goto illegal_op;
7780 gen_lea_modrm(env, s, modrm);
7782 break;
7783 default:
7784 goto illegal_op;
7786 break;
7787 case 0x10d: /* 3DNow! prefetch(w) */
7788 modrm = cpu_ldub_code(env, s->pc++);
7789 mod = (modrm >> 6) & 3;
7790 if (mod == 3)
7791 goto illegal_op;
7792 gen_lea_modrm(env, s, modrm);
7793 /* ignore for now */
7794 break;
7795 case 0x1aa: /* rsm */
7796 gen_svm_check_intercept(s, pc_start, SVM_EXIT_RSM);
7797 if (!(s->flags & HF_SMM_MASK))
7798 goto illegal_op;
7799 gen_update_cc_op(s);
7800 gen_jmp_im(s->pc - s->cs_base);
7801 gen_helper_rsm(cpu_env);
7802 gen_eob(s);
7803 break;
7804 case 0x1b8: /* SSE4.2 popcnt */
7805 if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) !=
7806 PREFIX_REPZ)
7807 goto illegal_op;
7808 if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
7809 goto illegal_op;
7811 modrm = cpu_ldub_code(env, s->pc++);
7812 reg = ((modrm >> 3) & 7) | rex_r;
7814 if (s->prefix & PREFIX_DATA) {
7815 ot = MO_16;
7816 } else {
7817 ot = mo_64_32(dflag);
7820 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
7821 gen_helper_popcnt(cpu_T[0], cpu_env, cpu_T[0], tcg_const_i32(ot));
7822 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
7824 set_cc_op(s, CC_OP_EFLAGS);
7825 break;
7826 case 0x10e ... 0x10f:
7827 /* 3DNow! instructions, ignore prefixes */
7828 s->prefix &= ~(PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA);
7829 case 0x110 ... 0x117:
7830 case 0x128 ... 0x12f:
7831 case 0x138 ... 0x13a:
7832 case 0x150 ... 0x179:
7833 case 0x17c ... 0x17f:
7834 case 0x1c2:
7835 case 0x1c4 ... 0x1c6:
7836 case 0x1d0 ... 0x1fe:
7837 gen_sse(env, s, b, pc_start, rex_r);
7838 break;
7839 default:
7840 goto illegal_op;
7842 /* lock generation */
7843 if (s->prefix & PREFIX_LOCK)
7844 gen_helper_unlock();
7845 return s->pc;
7846 illegal_op:
7847 if (s->prefix & PREFIX_LOCK)
7848 gen_helper_unlock();
7849 /* XXX: ensure that no lock was generated */
7850 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
7851 return s->pc;
7854 void optimize_flags_init(void)
7856 static const char reg_names[CPU_NB_REGS][4] = {
7857 #ifdef TARGET_X86_64
7858 [R_EAX] = "rax",
7859 [R_EBX] = "rbx",
7860 [R_ECX] = "rcx",
7861 [R_EDX] = "rdx",
7862 [R_ESI] = "rsi",
7863 [R_EDI] = "rdi",
7864 [R_EBP] = "rbp",
7865 [R_ESP] = "rsp",
7866 [8] = "r8",
7867 [9] = "r9",
7868 [10] = "r10",
7869 [11] = "r11",
7870 [12] = "r12",
7871 [13] = "r13",
7872 [14] = "r14",
7873 [15] = "r15",
7874 #else
7875 [R_EAX] = "eax",
7876 [R_EBX] = "ebx",
7877 [R_ECX] = "ecx",
7878 [R_EDX] = "edx",
7879 [R_ESI] = "esi",
7880 [R_EDI] = "edi",
7881 [R_EBP] = "ebp",
7882 [R_ESP] = "esp",
7883 #endif
7885 int i;
7887 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
7888 cpu_cc_op = tcg_global_mem_new_i32(TCG_AREG0,
7889 offsetof(CPUX86State, cc_op), "cc_op");
7890 cpu_cc_dst = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_dst),
7891 "cc_dst");
7892 cpu_cc_src = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_src),
7893 "cc_src");
7894 cpu_cc_src2 = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_src2),
7895 "cc_src2");
7897 for (i = 0; i < CPU_NB_REGS; ++i) {
7898 cpu_regs[i] = tcg_global_mem_new(TCG_AREG0,
7899 offsetof(CPUX86State, regs[i]),
7900 reg_names[i]);
7904 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
7905 basic block 'tb'. If search_pc is TRUE, also generate PC
7906 information for each intermediate instruction. */
7907 static inline void gen_intermediate_code_internal(X86CPU *cpu,
7908 TranslationBlock *tb,
7909 bool search_pc)
7911 CPUState *cs = CPU(cpu);
7912 CPUX86State *env = &cpu->env;
7913 DisasContext dc1, *dc = &dc1;
7914 target_ulong pc_ptr;
7915 CPUBreakpoint *bp;
7916 int j, lj;
7917 uint64_t flags;
7918 target_ulong pc_start;
7919 target_ulong cs_base;
7920 int num_insns;
7921 int max_insns;
7923 /* generate intermediate code */
7924 pc_start = tb->pc;
7925 cs_base = tb->cs_base;
7926 flags = tb->flags;
7928 dc->pe = (flags >> HF_PE_SHIFT) & 1;
7929 dc->code32 = (flags >> HF_CS32_SHIFT) & 1;
7930 dc->ss32 = (flags >> HF_SS32_SHIFT) & 1;
7931 dc->addseg = (flags >> HF_ADDSEG_SHIFT) & 1;
7932 dc->f_st = 0;
7933 dc->vm86 = (flags >> VM_SHIFT) & 1;
7934 dc->cpl = (flags >> HF_CPL_SHIFT) & 3;
7935 dc->iopl = (flags >> IOPL_SHIFT) & 3;
7936 dc->tf = (flags >> TF_SHIFT) & 1;
7937 dc->singlestep_enabled = cs->singlestep_enabled;
7938 dc->cc_op = CC_OP_DYNAMIC;
7939 dc->cc_op_dirty = false;
7940 dc->cs_base = cs_base;
7941 dc->tb = tb;
7942 dc->popl_esp_hack = 0;
7943 /* select memory access functions */
7944 dc->mem_index = 0;
7945 if (flags & HF_SOFTMMU_MASK) {
7946 dc->mem_index = cpu_mmu_index(env);
7948 dc->cpuid_features = env->features[FEAT_1_EDX];
7949 dc->cpuid_ext_features = env->features[FEAT_1_ECX];
7950 dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX];
7951 dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX];
7952 dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX];
7953 #ifdef TARGET_X86_64
7954 dc->lma = (flags >> HF_LMA_SHIFT) & 1;
7955 dc->code64 = (flags >> HF_CS64_SHIFT) & 1;
7956 #endif
7957 dc->flags = flags;
7958 dc->jmp_opt = !(dc->tf || cs->singlestep_enabled ||
7959 (flags & HF_INHIBIT_IRQ_MASK)
7960 #ifndef CONFIG_SOFTMMU
7961 || (flags & HF_SOFTMMU_MASK)
7962 #endif
7964 /* Do not optimize repz jumps at all in icount mode, because
7965 rep movsS instructions are execured with different paths
7966 in !repz_opt and repz_opt modes. The first one was used
7967 always except single step mode. And this setting
7968 disables jumps optimization and control paths become
7969 equivalent in run and single step modes.
7970 Now there will be no jump optimization for repz in
7971 record/replay modes and there will always be an
7972 additional step for ecx=0 when icount is enabled.
7974 dc->repz_opt = !dc->jmp_opt && !(tb->cflags & CF_USE_ICOUNT);
7975 #if 0
7976 /* check addseg logic */
7977 if (!dc->addseg && (dc->vm86 || !dc->pe || !dc->code32))
7978 printf("ERROR addseg\n");
7979 #endif
7981 cpu_T[0] = tcg_temp_new();
7982 cpu_T[1] = tcg_temp_new();
7983 cpu_A0 = tcg_temp_new();
7985 cpu_tmp0 = tcg_temp_new();
7986 cpu_tmp1_i64 = tcg_temp_new_i64();
7987 cpu_tmp2_i32 = tcg_temp_new_i32();
7988 cpu_tmp3_i32 = tcg_temp_new_i32();
7989 cpu_tmp4 = tcg_temp_new();
7990 cpu_ptr0 = tcg_temp_new_ptr();
7991 cpu_ptr1 = tcg_temp_new_ptr();
7992 cpu_cc_srcT = tcg_temp_local_new();
7994 dc->is_jmp = DISAS_NEXT;
7995 pc_ptr = pc_start;
7996 lj = -1;
7997 num_insns = 0;
7998 max_insns = tb->cflags & CF_COUNT_MASK;
7999 if (max_insns == 0)
8000 max_insns = CF_COUNT_MASK;
8002 gen_tb_start(tb);
8003 for(;;) {
8004 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
8005 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
8006 if (bp->pc == pc_ptr &&
8007 !((bp->flags & BP_CPU) && (tb->flags & HF_RF_MASK))) {
8008 gen_debug(dc, pc_ptr - dc->cs_base);
8009 goto done_generating;
8013 if (search_pc) {
8014 j = tcg_op_buf_count();
8015 if (lj < j) {
8016 lj++;
8017 while (lj < j)
8018 tcg_ctx.gen_opc_instr_start[lj++] = 0;
8020 tcg_ctx.gen_opc_pc[lj] = pc_ptr;
8021 gen_opc_cc_op[lj] = dc->cc_op;
8022 tcg_ctx.gen_opc_instr_start[lj] = 1;
8023 tcg_ctx.gen_opc_icount[lj] = num_insns;
8025 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
8026 gen_io_start();
8028 pc_ptr = disas_insn(env, dc, pc_ptr);
8029 num_insns++;
8030 /* stop translation if indicated */
8031 if (dc->is_jmp)
8032 break;
8033 /* if single step mode, we generate only one instruction and
8034 generate an exception */
8035 /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
8036 the flag and abort the translation to give the irqs a
8037 change to be happen */
8038 if (dc->tf || dc->singlestep_enabled ||
8039 (flags & HF_INHIBIT_IRQ_MASK)) {
8040 gen_jmp_im(pc_ptr - dc->cs_base);
8041 gen_eob(dc);
8042 break;
8044 /* Do not cross the boundary of the pages in icount mode,
8045 it can cause an exception. Do it only when boundary is
8046 crossed by the first instruction in the block.
8047 If current instruction already crossed the bound - it's ok,
8048 because an exception hasn't stopped this code.
8050 if ((tb->cflags & CF_USE_ICOUNT)
8051 && ((pc_ptr & TARGET_PAGE_MASK)
8052 != ((pc_ptr + TARGET_MAX_INSN_SIZE - 1) & TARGET_PAGE_MASK)
8053 || (pc_ptr & ~TARGET_PAGE_MASK) == 0)) {
8054 gen_jmp_im(pc_ptr - dc->cs_base);
8055 gen_eob(dc);
8056 break;
8058 /* if too long translation, stop generation too */
8059 if (tcg_op_buf_full() ||
8060 (pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32) ||
8061 num_insns >= max_insns) {
8062 gen_jmp_im(pc_ptr - dc->cs_base);
8063 gen_eob(dc);
8064 break;
8066 if (singlestep) {
8067 gen_jmp_im(pc_ptr - dc->cs_base);
8068 gen_eob(dc);
8069 break;
8072 if (tb->cflags & CF_LAST_IO)
8073 gen_io_end();
8074 done_generating:
8075 gen_tb_end(tb, num_insns);
8077 /* we don't forget to fill the last values */
8078 if (search_pc) {
8079 j = tcg_op_buf_count();
8080 lj++;
8081 while (lj <= j)
8082 tcg_ctx.gen_opc_instr_start[lj++] = 0;
8085 #ifdef DEBUG_DISAS
8086 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
8087 int disas_flags;
8088 qemu_log("----------------\n");
8089 qemu_log("IN: %s\n", lookup_symbol(pc_start));
8090 #ifdef TARGET_X86_64
8091 if (dc->code64)
8092 disas_flags = 2;
8093 else
8094 #endif
8095 disas_flags = !dc->code32;
8096 log_target_disas(cs, pc_start, pc_ptr - pc_start, disas_flags);
8097 qemu_log("\n");
8099 #endif
8101 if (!search_pc) {
8102 tb->size = pc_ptr - pc_start;
8103 tb->icount = num_insns;
8107 void gen_intermediate_code(CPUX86State *env, TranslationBlock *tb)
8109 gen_intermediate_code_internal(x86_env_get_cpu(env), tb, false);
8112 void gen_intermediate_code_pc(CPUX86State *env, TranslationBlock *tb)
8114 gen_intermediate_code_internal(x86_env_get_cpu(env), tb, true);
8117 void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb, int pc_pos)
8119 int cc_op;
8120 #ifdef DEBUG_DISAS
8121 if (qemu_loglevel_mask(CPU_LOG_TB_OP)) {
8122 int i;
8123 qemu_log("RESTORE:\n");
8124 for(i = 0;i <= pc_pos; i++) {
8125 if (tcg_ctx.gen_opc_instr_start[i]) {
8126 qemu_log("0x%04x: " TARGET_FMT_lx "\n", i,
8127 tcg_ctx.gen_opc_pc[i]);
8130 qemu_log("pc_pos=0x%x eip=" TARGET_FMT_lx " cs_base=%x\n",
8131 pc_pos, tcg_ctx.gen_opc_pc[pc_pos] - tb->cs_base,
8132 (uint32_t)tb->cs_base);
8134 #endif
8135 env->eip = tcg_ctx.gen_opc_pc[pc_pos] - tb->cs_base;
8136 cc_op = gen_opc_cc_op[pc_pos];
8137 if (cc_op != CC_OP_DYNAMIC)
8138 env->cc_op = cc_op;