virtio-net: broken RX filtering logic fixed
[qemu.git] / target-i386 / translate.c
blobeb0ea93dbbd4aa0bc2c7f337a869f3562254ab66
1 /*
2 * i386 translation
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdarg.h>
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <string.h>
23 #include <inttypes.h>
24 #include <signal.h>
26 #include "qemu/host-utils.h"
27 #include "cpu.h"
28 #include "disas/disas.h"
29 #include "tcg-op.h"
31 #include "helper.h"
32 #define GEN_HELPER 1
33 #include "helper.h"
35 #define PREFIX_REPZ 0x01
36 #define PREFIX_REPNZ 0x02
37 #define PREFIX_LOCK 0x04
38 #define PREFIX_DATA 0x08
39 #define PREFIX_ADR 0x10
40 #define PREFIX_VEX 0x20
42 #ifdef TARGET_X86_64
43 #define CODE64(s) ((s)->code64)
44 #define REX_X(s) ((s)->rex_x)
45 #define REX_B(s) ((s)->rex_b)
46 #else
47 #define CODE64(s) 0
48 #define REX_X(s) 0
49 #define REX_B(s) 0
50 #endif
52 #ifdef TARGET_X86_64
53 # define ctztl ctz64
54 # define clztl clz64
55 #else
56 # define ctztl ctz32
57 # define clztl clz32
58 #endif
60 //#define MACRO_TEST 1
62 /* global register indexes */
63 static TCGv_ptr cpu_env;
64 static TCGv cpu_A0;
65 static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2, cpu_cc_srcT;
66 static TCGv_i32 cpu_cc_op;
67 static TCGv cpu_regs[CPU_NB_REGS];
68 /* local temps */
69 static TCGv cpu_T[2];
70 /* local register indexes (only used inside old micro ops) */
71 static TCGv cpu_tmp0, cpu_tmp4;
72 static TCGv_ptr cpu_ptr0, cpu_ptr1;
73 static TCGv_i32 cpu_tmp2_i32, cpu_tmp3_i32;
74 static TCGv_i64 cpu_tmp1_i64;
76 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
78 #include "exec/gen-icount.h"
80 #ifdef TARGET_X86_64
81 static int x86_64_hregs;
82 #endif
84 typedef struct DisasContext {
85 /* current insn context */
86 int override; /* -1 if no override */
87 int prefix;
88 int aflag, dflag;
89 target_ulong pc; /* pc = eip + cs_base */
90 int is_jmp; /* 1 = means jump (stop translation), 2 means CPU
91 static state change (stop translation) */
92 /* current block context */
93 target_ulong cs_base; /* base of CS segment */
94 int pe; /* protected mode */
95 int code32; /* 32 bit code segment */
96 #ifdef TARGET_X86_64
97 int lma; /* long mode active */
98 int code64; /* 64 bit code segment */
99 int rex_x, rex_b;
100 #endif
101 int vex_l; /* vex vector length */
102 int vex_v; /* vex vvvv register, without 1's compliment. */
103 int ss32; /* 32 bit stack segment */
104 CCOp cc_op; /* current CC operation */
105 bool cc_op_dirty;
106 int addseg; /* non zero if either DS/ES/SS have a non zero base */
107 int f_st; /* currently unused */
108 int vm86; /* vm86 mode */
109 int cpl;
110 int iopl;
111 int tf; /* TF cpu flag */
112 int singlestep_enabled; /* "hardware" single step enabled */
113 int jmp_opt; /* use direct block chaining for direct jumps */
114 int mem_index; /* select memory access functions */
115 uint64_t flags; /* all execution flags */
116 struct TranslationBlock *tb;
117 int popl_esp_hack; /* for correct popl with esp base handling */
118 int rip_offset; /* only used in x86_64, but left for simplicity */
119 int cpuid_features;
120 int cpuid_ext_features;
121 int cpuid_ext2_features;
122 int cpuid_ext3_features;
123 int cpuid_7_0_ebx_features;
124 } DisasContext;
126 static void gen_eob(DisasContext *s);
127 static void gen_jmp(DisasContext *s, target_ulong eip);
128 static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num);
129 static void gen_op(DisasContext *s1, int op, int ot, int d);
131 /* i386 arith/logic operations */
132 enum {
133 OP_ADDL,
134 OP_ORL,
135 OP_ADCL,
136 OP_SBBL,
137 OP_ANDL,
138 OP_SUBL,
139 OP_XORL,
140 OP_CMPL,
143 /* i386 shift ops */
144 enum {
145 OP_ROL,
146 OP_ROR,
147 OP_RCL,
148 OP_RCR,
149 OP_SHL,
150 OP_SHR,
151 OP_SHL1, /* undocumented */
152 OP_SAR = 7,
155 enum {
156 JCC_O,
157 JCC_B,
158 JCC_Z,
159 JCC_BE,
160 JCC_S,
161 JCC_P,
162 JCC_L,
163 JCC_LE,
166 /* operand size */
167 enum {
168 OT_BYTE = 0,
169 OT_WORD,
170 OT_LONG,
171 OT_QUAD,
174 enum {
175 /* I386 int registers */
176 OR_EAX, /* MUST be even numbered */
177 OR_ECX,
178 OR_EDX,
179 OR_EBX,
180 OR_ESP,
181 OR_EBP,
182 OR_ESI,
183 OR_EDI,
185 OR_TMP0 = 16, /* temporary operand register */
186 OR_TMP1,
187 OR_A0, /* temporary register used when doing address evaluation */
190 enum {
191 USES_CC_DST = 1,
192 USES_CC_SRC = 2,
193 USES_CC_SRC2 = 4,
194 USES_CC_SRCT = 8,
197 /* Bit set if the global variable is live after setting CC_OP to X. */
198 static const uint8_t cc_op_live[CC_OP_NB] = {
199 [CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
200 [CC_OP_EFLAGS] = USES_CC_SRC,
201 [CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC,
202 [CC_OP_ADDB ... CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC,
203 [CC_OP_ADCB ... CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
204 [CC_OP_SUBB ... CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT,
205 [CC_OP_SBBB ... CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
206 [CC_OP_LOGICB ... CC_OP_LOGICQ] = USES_CC_DST,
207 [CC_OP_INCB ... CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC,
208 [CC_OP_DECB ... CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC,
209 [CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC,
210 [CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC,
211 [CC_OP_BMILGB ... CC_OP_BMILGQ] = USES_CC_DST | USES_CC_SRC,
212 [CC_OP_ADCX] = USES_CC_DST | USES_CC_SRC,
213 [CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2,
214 [CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
215 [CC_OP_CLR] = 0,
218 static void set_cc_op(DisasContext *s, CCOp op)
220 int dead;
222 if (s->cc_op == op) {
223 return;
226 /* Discard CC computation that will no longer be used. */
227 dead = cc_op_live[s->cc_op] & ~cc_op_live[op];
228 if (dead & USES_CC_DST) {
229 tcg_gen_discard_tl(cpu_cc_dst);
231 if (dead & USES_CC_SRC) {
232 tcg_gen_discard_tl(cpu_cc_src);
234 if (dead & USES_CC_SRC2) {
235 tcg_gen_discard_tl(cpu_cc_src2);
237 if (dead & USES_CC_SRCT) {
238 tcg_gen_discard_tl(cpu_cc_srcT);
241 if (op == CC_OP_DYNAMIC) {
242 /* The DYNAMIC setting is translator only, and should never be
243 stored. Thus we always consider it clean. */
244 s->cc_op_dirty = false;
245 } else {
246 /* Discard any computed CC_OP value (see shifts). */
247 if (s->cc_op == CC_OP_DYNAMIC) {
248 tcg_gen_discard_i32(cpu_cc_op);
250 s->cc_op_dirty = true;
252 s->cc_op = op;
255 static void gen_update_cc_op(DisasContext *s)
257 if (s->cc_op_dirty) {
258 tcg_gen_movi_i32(cpu_cc_op, s->cc_op);
259 s->cc_op_dirty = false;
263 static inline void gen_op_movl_T0_0(void)
265 tcg_gen_movi_tl(cpu_T[0], 0);
268 static inline void gen_op_movl_T0_im(int32_t val)
270 tcg_gen_movi_tl(cpu_T[0], val);
273 static inline void gen_op_movl_T0_imu(uint32_t val)
275 tcg_gen_movi_tl(cpu_T[0], val);
278 static inline void gen_op_movl_T1_im(int32_t val)
280 tcg_gen_movi_tl(cpu_T[1], val);
283 static inline void gen_op_movl_T1_imu(uint32_t val)
285 tcg_gen_movi_tl(cpu_T[1], val);
288 static inline void gen_op_movl_A0_im(uint32_t val)
290 tcg_gen_movi_tl(cpu_A0, val);
293 #ifdef TARGET_X86_64
294 static inline void gen_op_movq_A0_im(int64_t val)
296 tcg_gen_movi_tl(cpu_A0, val);
298 #endif
300 static inline void gen_movtl_T0_im(target_ulong val)
302 tcg_gen_movi_tl(cpu_T[0], val);
305 static inline void gen_movtl_T1_im(target_ulong val)
307 tcg_gen_movi_tl(cpu_T[1], val);
310 static inline void gen_op_andl_T0_ffff(void)
312 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
315 static inline void gen_op_andl_T0_im(uint32_t val)
317 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], val);
320 static inline void gen_op_movl_T0_T1(void)
322 tcg_gen_mov_tl(cpu_T[0], cpu_T[1]);
325 static inline void gen_op_andl_A0_ffff(void)
327 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffff);
330 #ifdef TARGET_X86_64
332 #define NB_OP_SIZES 4
334 #else /* !TARGET_X86_64 */
336 #define NB_OP_SIZES 3
338 #endif /* !TARGET_X86_64 */
340 #if defined(HOST_WORDS_BIGENDIAN)
341 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
342 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
343 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
344 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
345 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
346 #else
347 #define REG_B_OFFSET 0
348 #define REG_H_OFFSET 1
349 #define REG_W_OFFSET 0
350 #define REG_L_OFFSET 0
351 #define REG_LH_OFFSET 4
352 #endif
354 /* In instruction encodings for byte register accesses the
355 * register number usually indicates "low 8 bits of register N";
356 * however there are some special cases where N 4..7 indicates
357 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
358 * true for this special case, false otherwise.
360 static inline bool byte_reg_is_xH(int reg)
362 if (reg < 4) {
363 return false;
365 #ifdef TARGET_X86_64
366 if (reg >= 8 || x86_64_hregs) {
367 return false;
369 #endif
370 return true;
373 static inline void gen_op_mov_reg_v(int ot, int reg, TCGv t0)
375 switch(ot) {
376 case OT_BYTE:
377 if (!byte_reg_is_xH(reg)) {
378 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 8);
379 } else {
380 tcg_gen_deposit_tl(cpu_regs[reg - 4], cpu_regs[reg - 4], t0, 8, 8);
382 break;
383 case OT_WORD:
384 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 16);
385 break;
386 default: /* XXX this shouldn't be reached; abort? */
387 case OT_LONG:
388 /* For x86_64, this sets the higher half of register to zero.
389 For i386, this is equivalent to a mov. */
390 tcg_gen_ext32u_tl(cpu_regs[reg], t0);
391 break;
392 #ifdef TARGET_X86_64
393 case OT_QUAD:
394 tcg_gen_mov_tl(cpu_regs[reg], t0);
395 break;
396 #endif
400 static inline void gen_op_mov_reg_T0(int ot, int reg)
402 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
405 static inline void gen_op_mov_reg_T1(int ot, int reg)
407 gen_op_mov_reg_v(ot, reg, cpu_T[1]);
410 static inline void gen_op_mov_reg_A0(int size, int reg)
412 switch(size) {
413 case OT_BYTE:
414 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], cpu_A0, 0, 16);
415 break;
416 default: /* XXX this shouldn't be reached; abort? */
417 case OT_WORD:
418 /* For x86_64, this sets the higher half of register to zero.
419 For i386, this is equivalent to a mov. */
420 tcg_gen_ext32u_tl(cpu_regs[reg], cpu_A0);
421 break;
422 #ifdef TARGET_X86_64
423 case OT_LONG:
424 tcg_gen_mov_tl(cpu_regs[reg], cpu_A0);
425 break;
426 #endif
430 static inline void gen_op_mov_v_reg(int ot, TCGv t0, int reg)
432 if (ot == OT_BYTE && byte_reg_is_xH(reg)) {
433 tcg_gen_shri_tl(t0, cpu_regs[reg - 4], 8);
434 tcg_gen_ext8u_tl(t0, t0);
435 } else {
436 tcg_gen_mov_tl(t0, cpu_regs[reg]);
440 static inline void gen_op_mov_TN_reg(int ot, int t_index, int reg)
442 gen_op_mov_v_reg(ot, cpu_T[t_index], reg);
445 static inline void gen_op_movl_A0_reg(int reg)
447 tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
450 static inline void gen_op_addl_A0_im(int32_t val)
452 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
453 #ifdef TARGET_X86_64
454 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
455 #endif
458 #ifdef TARGET_X86_64
459 static inline void gen_op_addq_A0_im(int64_t val)
461 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
463 #endif
465 static void gen_add_A0_im(DisasContext *s, int val)
467 #ifdef TARGET_X86_64
468 if (CODE64(s))
469 gen_op_addq_A0_im(val);
470 else
471 #endif
472 gen_op_addl_A0_im(val);
475 static inline void gen_op_addl_T0_T1(void)
477 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
480 static inline void gen_op_jmp_T0(void)
482 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, eip));
485 static inline void gen_op_add_reg_im(int size, int reg, int32_t val)
487 switch(size) {
488 case OT_BYTE:
489 tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val);
490 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], cpu_tmp0, 0, 16);
491 break;
492 case OT_WORD:
493 tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val);
494 /* For x86_64, this sets the higher half of register to zero.
495 For i386, this is equivalent to a nop. */
496 tcg_gen_ext32u_tl(cpu_tmp0, cpu_tmp0);
497 tcg_gen_mov_tl(cpu_regs[reg], cpu_tmp0);
498 break;
499 #ifdef TARGET_X86_64
500 case OT_LONG:
501 tcg_gen_addi_tl(cpu_regs[reg], cpu_regs[reg], val);
502 break;
503 #endif
507 static inline void gen_op_add_reg_T0(int size, int reg)
509 switch(size) {
510 case OT_BYTE:
511 tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T[0]);
512 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], cpu_tmp0, 0, 16);
513 break;
514 case OT_WORD:
515 tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T[0]);
516 /* For x86_64, this sets the higher half of register to zero.
517 For i386, this is equivalent to a nop. */
518 tcg_gen_ext32u_tl(cpu_tmp0, cpu_tmp0);
519 tcg_gen_mov_tl(cpu_regs[reg], cpu_tmp0);
520 break;
521 #ifdef TARGET_X86_64
522 case OT_LONG:
523 tcg_gen_add_tl(cpu_regs[reg], cpu_regs[reg], cpu_T[0]);
524 break;
525 #endif
529 static inline void gen_op_addl_A0_reg_sN(int shift, int reg)
531 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
532 if (shift != 0)
533 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
534 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
535 /* For x86_64, this sets the higher half of register to zero.
536 For i386, this is equivalent to a nop. */
537 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
540 static inline void gen_op_movl_A0_seg(int reg)
542 tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base) + REG_L_OFFSET);
545 static inline void gen_op_addl_A0_seg(DisasContext *s, int reg)
547 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base));
548 #ifdef TARGET_X86_64
549 if (CODE64(s)) {
550 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
551 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
552 } else {
553 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
554 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
556 #else
557 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
558 #endif
561 #ifdef TARGET_X86_64
562 static inline void gen_op_movq_A0_seg(int reg)
564 tcg_gen_ld_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base));
567 static inline void gen_op_addq_A0_seg(int reg)
569 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base));
570 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
573 static inline void gen_op_movq_A0_reg(int reg)
575 tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
578 static inline void gen_op_addq_A0_reg_sN(int shift, int reg)
580 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
581 if (shift != 0)
582 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
583 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
585 #endif
587 static inline void gen_op_lds_T0_A0(int idx)
589 int mem_index = (idx >> 2) - 1;
590 switch(idx & 3) {
591 case OT_BYTE:
592 tcg_gen_qemu_ld8s(cpu_T[0], cpu_A0, mem_index);
593 break;
594 case OT_WORD:
595 tcg_gen_qemu_ld16s(cpu_T[0], cpu_A0, mem_index);
596 break;
597 default:
598 case OT_LONG:
599 tcg_gen_qemu_ld32s(cpu_T[0], cpu_A0, mem_index);
600 break;
604 static inline void gen_op_ld_v(int idx, TCGv t0, TCGv a0)
606 int mem_index = (idx >> 2) - 1;
607 switch(idx & 3) {
608 case OT_BYTE:
609 tcg_gen_qemu_ld8u(t0, a0, mem_index);
610 break;
611 case OT_WORD:
612 tcg_gen_qemu_ld16u(t0, a0, mem_index);
613 break;
614 case OT_LONG:
615 tcg_gen_qemu_ld32u(t0, a0, mem_index);
616 break;
617 default:
618 case OT_QUAD:
619 /* Should never happen on 32-bit targets. */
620 #ifdef TARGET_X86_64
621 tcg_gen_qemu_ld64(t0, a0, mem_index);
622 #endif
623 break;
627 /* XXX: always use ldu or lds */
628 static inline void gen_op_ld_T0_A0(int idx)
630 gen_op_ld_v(idx, cpu_T[0], cpu_A0);
633 static inline void gen_op_ldu_T0_A0(int idx)
635 gen_op_ld_v(idx, cpu_T[0], cpu_A0);
638 static inline void gen_op_ld_T1_A0(int idx)
640 gen_op_ld_v(idx, cpu_T[1], cpu_A0);
643 static inline void gen_op_st_v(int idx, TCGv t0, TCGv a0)
645 int mem_index = (idx >> 2) - 1;
646 switch(idx & 3) {
647 case OT_BYTE:
648 tcg_gen_qemu_st8(t0, a0, mem_index);
649 break;
650 case OT_WORD:
651 tcg_gen_qemu_st16(t0, a0, mem_index);
652 break;
653 case OT_LONG:
654 tcg_gen_qemu_st32(t0, a0, mem_index);
655 break;
656 default:
657 case OT_QUAD:
658 /* Should never happen on 32-bit targets. */
659 #ifdef TARGET_X86_64
660 tcg_gen_qemu_st64(t0, a0, mem_index);
661 #endif
662 break;
666 static inline void gen_op_st_T0_A0(int idx)
668 gen_op_st_v(idx, cpu_T[0], cpu_A0);
671 static inline void gen_op_st_T1_A0(int idx)
673 gen_op_st_v(idx, cpu_T[1], cpu_A0);
676 static inline void gen_jmp_im(target_ulong pc)
678 tcg_gen_movi_tl(cpu_tmp0, pc);
679 tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, eip));
682 static inline void gen_string_movl_A0_ESI(DisasContext *s)
684 int override;
686 override = s->override;
687 #ifdef TARGET_X86_64
688 if (s->aflag == 2) {
689 if (override >= 0) {
690 gen_op_movq_A0_seg(override);
691 gen_op_addq_A0_reg_sN(0, R_ESI);
692 } else {
693 gen_op_movq_A0_reg(R_ESI);
695 } else
696 #endif
697 if (s->aflag) {
698 /* 32 bit address */
699 if (s->addseg && override < 0)
700 override = R_DS;
701 if (override >= 0) {
702 gen_op_movl_A0_seg(override);
703 gen_op_addl_A0_reg_sN(0, R_ESI);
704 } else {
705 gen_op_movl_A0_reg(R_ESI);
707 } else {
708 /* 16 address, always override */
709 if (override < 0)
710 override = R_DS;
711 gen_op_movl_A0_reg(R_ESI);
712 gen_op_andl_A0_ffff();
713 gen_op_addl_A0_seg(s, override);
717 static inline void gen_string_movl_A0_EDI(DisasContext *s)
719 #ifdef TARGET_X86_64
720 if (s->aflag == 2) {
721 gen_op_movq_A0_reg(R_EDI);
722 } else
723 #endif
724 if (s->aflag) {
725 if (s->addseg) {
726 gen_op_movl_A0_seg(R_ES);
727 gen_op_addl_A0_reg_sN(0, R_EDI);
728 } else {
729 gen_op_movl_A0_reg(R_EDI);
731 } else {
732 gen_op_movl_A0_reg(R_EDI);
733 gen_op_andl_A0_ffff();
734 gen_op_addl_A0_seg(s, R_ES);
738 static inline void gen_op_movl_T0_Dshift(int ot)
740 tcg_gen_ld32s_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, df));
741 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], ot);
744 static TCGv gen_ext_tl(TCGv dst, TCGv src, int size, bool sign)
746 switch (size) {
747 case OT_BYTE:
748 if (sign) {
749 tcg_gen_ext8s_tl(dst, src);
750 } else {
751 tcg_gen_ext8u_tl(dst, src);
753 return dst;
754 case OT_WORD:
755 if (sign) {
756 tcg_gen_ext16s_tl(dst, src);
757 } else {
758 tcg_gen_ext16u_tl(dst, src);
760 return dst;
761 #ifdef TARGET_X86_64
762 case OT_LONG:
763 if (sign) {
764 tcg_gen_ext32s_tl(dst, src);
765 } else {
766 tcg_gen_ext32u_tl(dst, src);
768 return dst;
769 #endif
770 default:
771 return src;
775 static void gen_extu(int ot, TCGv reg)
777 gen_ext_tl(reg, reg, ot, false);
780 static void gen_exts(int ot, TCGv reg)
782 gen_ext_tl(reg, reg, ot, true);
785 static inline void gen_op_jnz_ecx(int size, int label1)
787 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
788 gen_extu(size + 1, cpu_tmp0);
789 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, label1);
792 static inline void gen_op_jz_ecx(int size, int label1)
794 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
795 gen_extu(size + 1, cpu_tmp0);
796 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
799 static void gen_helper_in_func(int ot, TCGv v, TCGv_i32 n)
801 switch (ot) {
802 case OT_BYTE:
803 gen_helper_inb(v, n);
804 break;
805 case OT_WORD:
806 gen_helper_inw(v, n);
807 break;
808 case OT_LONG:
809 gen_helper_inl(v, n);
810 break;
814 static void gen_helper_out_func(int ot, TCGv_i32 v, TCGv_i32 n)
816 switch (ot) {
817 case OT_BYTE:
818 gen_helper_outb(v, n);
819 break;
820 case OT_WORD:
821 gen_helper_outw(v, n);
822 break;
823 case OT_LONG:
824 gen_helper_outl(v, n);
825 break;
829 static void gen_check_io(DisasContext *s, int ot, target_ulong cur_eip,
830 uint32_t svm_flags)
832 int state_saved;
833 target_ulong next_eip;
835 state_saved = 0;
836 if (s->pe && (s->cpl > s->iopl || s->vm86)) {
837 gen_update_cc_op(s);
838 gen_jmp_im(cur_eip);
839 state_saved = 1;
840 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
841 switch (ot) {
842 case OT_BYTE:
843 gen_helper_check_iob(cpu_env, cpu_tmp2_i32);
844 break;
845 case OT_WORD:
846 gen_helper_check_iow(cpu_env, cpu_tmp2_i32);
847 break;
848 case OT_LONG:
849 gen_helper_check_iol(cpu_env, cpu_tmp2_i32);
850 break;
853 if(s->flags & HF_SVMI_MASK) {
854 if (!state_saved) {
855 gen_update_cc_op(s);
856 gen_jmp_im(cur_eip);
858 svm_flags |= (1 << (4 + ot));
859 next_eip = s->pc - s->cs_base;
860 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
861 gen_helper_svm_check_io(cpu_env, cpu_tmp2_i32,
862 tcg_const_i32(svm_flags),
863 tcg_const_i32(next_eip - cur_eip));
867 static inline void gen_movs(DisasContext *s, int ot)
869 gen_string_movl_A0_ESI(s);
870 gen_op_ld_T0_A0(ot + s->mem_index);
871 gen_string_movl_A0_EDI(s);
872 gen_op_st_T0_A0(ot + s->mem_index);
873 gen_op_movl_T0_Dshift(ot);
874 gen_op_add_reg_T0(s->aflag, R_ESI);
875 gen_op_add_reg_T0(s->aflag, R_EDI);
878 static void gen_op_update1_cc(void)
880 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
883 static void gen_op_update2_cc(void)
885 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
886 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
889 static void gen_op_update3_cc(TCGv reg)
891 tcg_gen_mov_tl(cpu_cc_src2, reg);
892 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
893 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
896 static inline void gen_op_testl_T0_T1_cc(void)
898 tcg_gen_and_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
901 static void gen_op_update_neg_cc(void)
903 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
904 tcg_gen_neg_tl(cpu_cc_src, cpu_T[0]);
905 tcg_gen_movi_tl(cpu_cc_srcT, 0);
908 /* compute all eflags to cc_src */
909 static void gen_compute_eflags(DisasContext *s)
911 TCGv zero, dst, src1, src2;
912 int live, dead;
914 if (s->cc_op == CC_OP_EFLAGS) {
915 return;
917 if (s->cc_op == CC_OP_CLR) {
918 tcg_gen_movi_tl(cpu_cc_src, CC_Z);
919 set_cc_op(s, CC_OP_EFLAGS);
920 return;
923 TCGV_UNUSED(zero);
924 dst = cpu_cc_dst;
925 src1 = cpu_cc_src;
926 src2 = cpu_cc_src2;
928 /* Take care to not read values that are not live. */
929 live = cc_op_live[s->cc_op] & ~USES_CC_SRCT;
930 dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2);
931 if (dead) {
932 zero = tcg_const_tl(0);
933 if (dead & USES_CC_DST) {
934 dst = zero;
936 if (dead & USES_CC_SRC) {
937 src1 = zero;
939 if (dead & USES_CC_SRC2) {
940 src2 = zero;
944 gen_update_cc_op(s);
945 gen_helper_cc_compute_all(cpu_cc_src, dst, src1, src2, cpu_cc_op);
946 set_cc_op(s, CC_OP_EFLAGS);
948 if (dead) {
949 tcg_temp_free(zero);
953 typedef struct CCPrepare {
954 TCGCond cond;
955 TCGv reg;
956 TCGv reg2;
957 target_ulong imm;
958 target_ulong mask;
959 bool use_reg2;
960 bool no_setcond;
961 } CCPrepare;
963 /* compute eflags.C to reg */
964 static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
966 TCGv t0, t1;
967 int size, shift;
969 switch (s->cc_op) {
970 case CC_OP_SUBB ... CC_OP_SUBQ:
971 /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
972 size = s->cc_op - CC_OP_SUBB;
973 t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
974 /* If no temporary was used, be careful not to alias t1 and t0. */
975 t0 = TCGV_EQUAL(t1, cpu_cc_src) ? cpu_tmp0 : reg;
976 tcg_gen_mov_tl(t0, cpu_cc_srcT);
977 gen_extu(size, t0);
978 goto add_sub;
980 case CC_OP_ADDB ... CC_OP_ADDQ:
981 /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
982 size = s->cc_op - CC_OP_ADDB;
983 t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
984 t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
985 add_sub:
986 return (CCPrepare) { .cond = TCG_COND_LTU, .reg = t0,
987 .reg2 = t1, .mask = -1, .use_reg2 = true };
989 case CC_OP_LOGICB ... CC_OP_LOGICQ:
990 case CC_OP_CLR:
991 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
993 case CC_OP_INCB ... CC_OP_INCQ:
994 case CC_OP_DECB ... CC_OP_DECQ:
995 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
996 .mask = -1, .no_setcond = true };
998 case CC_OP_SHLB ... CC_OP_SHLQ:
999 /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
1000 size = s->cc_op - CC_OP_SHLB;
1001 shift = (8 << size) - 1;
1002 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1003 .mask = (target_ulong)1 << shift };
1005 case CC_OP_MULB ... CC_OP_MULQ:
1006 return (CCPrepare) { .cond = TCG_COND_NE,
1007 .reg = cpu_cc_src, .mask = -1 };
1009 case CC_OP_BMILGB ... CC_OP_BMILGQ:
1010 size = s->cc_op - CC_OP_BMILGB;
1011 t0 = gen_ext_tl(reg, cpu_cc_src, size, false);
1012 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
1014 case CC_OP_ADCX:
1015 case CC_OP_ADCOX:
1016 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_dst,
1017 .mask = -1, .no_setcond = true };
1019 case CC_OP_EFLAGS:
1020 case CC_OP_SARB ... CC_OP_SARQ:
1021 /* CC_SRC & 1 */
1022 return (CCPrepare) { .cond = TCG_COND_NE,
1023 .reg = cpu_cc_src, .mask = CC_C };
1025 default:
1026 /* The need to compute only C from CC_OP_DYNAMIC is important
1027 in efficiently implementing e.g. INC at the start of a TB. */
1028 gen_update_cc_op(s);
1029 gen_helper_cc_compute_c(reg, cpu_cc_dst, cpu_cc_src,
1030 cpu_cc_src2, cpu_cc_op);
1031 return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1032 .mask = -1, .no_setcond = true };
1036 /* compute eflags.P to reg */
1037 static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg)
1039 gen_compute_eflags(s);
1040 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1041 .mask = CC_P };
1044 /* compute eflags.S to reg */
1045 static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg)
1047 switch (s->cc_op) {
1048 case CC_OP_DYNAMIC:
1049 gen_compute_eflags(s);
1050 /* FALLTHRU */
1051 case CC_OP_EFLAGS:
1052 case CC_OP_ADCX:
1053 case CC_OP_ADOX:
1054 case CC_OP_ADCOX:
1055 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1056 .mask = CC_S };
1057 case CC_OP_CLR:
1058 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
1059 default:
1061 int size = (s->cc_op - CC_OP_ADDB) & 3;
1062 TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, true);
1063 return (CCPrepare) { .cond = TCG_COND_LT, .reg = t0, .mask = -1 };
1068 /* compute eflags.O to reg */
1069 static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
1071 switch (s->cc_op) {
1072 case CC_OP_ADOX:
1073 case CC_OP_ADCOX:
1074 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src2,
1075 .mask = -1, .no_setcond = true };
1076 case CC_OP_CLR:
1077 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
1078 default:
1079 gen_compute_eflags(s);
1080 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1081 .mask = CC_O };
1085 /* compute eflags.Z to reg */
1086 static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
1088 switch (s->cc_op) {
1089 case CC_OP_DYNAMIC:
1090 gen_compute_eflags(s);
1091 /* FALLTHRU */
1092 case CC_OP_EFLAGS:
1093 case CC_OP_ADCX:
1094 case CC_OP_ADOX:
1095 case CC_OP_ADCOX:
1096 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1097 .mask = CC_Z };
1098 case CC_OP_CLR:
1099 return (CCPrepare) { .cond = TCG_COND_ALWAYS, .mask = -1 };
1100 default:
1102 int size = (s->cc_op - CC_OP_ADDB) & 3;
1103 TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
1104 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
1109 /* perform a conditional store into register 'reg' according to jump opcode
1110 value 'b'. In the fast case, T0 is guaranted not to be used. */
1111 static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
1113 int inv, jcc_op, size, cond;
1114 CCPrepare cc;
1115 TCGv t0;
1117 inv = b & 1;
1118 jcc_op = (b >> 1) & 7;
1120 switch (s->cc_op) {
1121 case CC_OP_SUBB ... CC_OP_SUBQ:
1122 /* We optimize relational operators for the cmp/jcc case. */
1123 size = s->cc_op - CC_OP_SUBB;
1124 switch (jcc_op) {
1125 case JCC_BE:
1126 tcg_gen_mov_tl(cpu_tmp4, cpu_cc_srcT);
1127 gen_extu(size, cpu_tmp4);
1128 t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
1129 cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = cpu_tmp4,
1130 .reg2 = t0, .mask = -1, .use_reg2 = true };
1131 break;
1133 case JCC_L:
1134 cond = TCG_COND_LT;
1135 goto fast_jcc_l;
1136 case JCC_LE:
1137 cond = TCG_COND_LE;
1138 fast_jcc_l:
1139 tcg_gen_mov_tl(cpu_tmp4, cpu_cc_srcT);
1140 gen_exts(size, cpu_tmp4);
1141 t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, true);
1142 cc = (CCPrepare) { .cond = cond, .reg = cpu_tmp4,
1143 .reg2 = t0, .mask = -1, .use_reg2 = true };
1144 break;
1146 default:
1147 goto slow_jcc;
1149 break;
1151 default:
1152 slow_jcc:
1153 /* This actually generates good code for JC, JZ and JS. */
1154 switch (jcc_op) {
1155 case JCC_O:
1156 cc = gen_prepare_eflags_o(s, reg);
1157 break;
1158 case JCC_B:
1159 cc = gen_prepare_eflags_c(s, reg);
1160 break;
1161 case JCC_Z:
1162 cc = gen_prepare_eflags_z(s, reg);
1163 break;
1164 case JCC_BE:
1165 gen_compute_eflags(s);
1166 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1167 .mask = CC_Z | CC_C };
1168 break;
1169 case JCC_S:
1170 cc = gen_prepare_eflags_s(s, reg);
1171 break;
1172 case JCC_P:
1173 cc = gen_prepare_eflags_p(s, reg);
1174 break;
1175 case JCC_L:
1176 gen_compute_eflags(s);
1177 if (TCGV_EQUAL(reg, cpu_cc_src)) {
1178 reg = cpu_tmp0;
1180 tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
1181 tcg_gen_xor_tl(reg, reg, cpu_cc_src);
1182 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1183 .mask = CC_S };
1184 break;
1185 default:
1186 case JCC_LE:
1187 gen_compute_eflags(s);
1188 if (TCGV_EQUAL(reg, cpu_cc_src)) {
1189 reg = cpu_tmp0;
1191 tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
1192 tcg_gen_xor_tl(reg, reg, cpu_cc_src);
1193 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1194 .mask = CC_S | CC_Z };
1195 break;
1197 break;
1200 if (inv) {
1201 cc.cond = tcg_invert_cond(cc.cond);
1203 return cc;
1206 static void gen_setcc1(DisasContext *s, int b, TCGv reg)
1208 CCPrepare cc = gen_prepare_cc(s, b, reg);
1210 if (cc.no_setcond) {
1211 if (cc.cond == TCG_COND_EQ) {
1212 tcg_gen_xori_tl(reg, cc.reg, 1);
1213 } else {
1214 tcg_gen_mov_tl(reg, cc.reg);
1216 return;
1219 if (cc.cond == TCG_COND_NE && !cc.use_reg2 && cc.imm == 0 &&
1220 cc.mask != 0 && (cc.mask & (cc.mask - 1)) == 0) {
1221 tcg_gen_shri_tl(reg, cc.reg, ctztl(cc.mask));
1222 tcg_gen_andi_tl(reg, reg, 1);
1223 return;
1225 if (cc.mask != -1) {
1226 tcg_gen_andi_tl(reg, cc.reg, cc.mask);
1227 cc.reg = reg;
1229 if (cc.use_reg2) {
1230 tcg_gen_setcond_tl(cc.cond, reg, cc.reg, cc.reg2);
1231 } else {
1232 tcg_gen_setcondi_tl(cc.cond, reg, cc.reg, cc.imm);
1236 static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg)
1238 gen_setcc1(s, JCC_B << 1, reg);
1241 /* generate a conditional jump to label 'l1' according to jump opcode
1242 value 'b'. In the fast case, T0 is guaranted not to be used. */
1243 static inline void gen_jcc1_noeob(DisasContext *s, int b, int l1)
1245 CCPrepare cc = gen_prepare_cc(s, b, cpu_T[0]);
1247 if (cc.mask != -1) {
1248 tcg_gen_andi_tl(cpu_T[0], cc.reg, cc.mask);
1249 cc.reg = cpu_T[0];
1251 if (cc.use_reg2) {
1252 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1253 } else {
1254 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1258 /* Generate a conditional jump to label 'l1' according to jump opcode
1259 value 'b'. In the fast case, T0 is guaranted not to be used.
1260 A translation block must end soon. */
1261 static inline void gen_jcc1(DisasContext *s, int b, int l1)
1263 CCPrepare cc = gen_prepare_cc(s, b, cpu_T[0]);
1265 gen_update_cc_op(s);
1266 if (cc.mask != -1) {
1267 tcg_gen_andi_tl(cpu_T[0], cc.reg, cc.mask);
1268 cc.reg = cpu_T[0];
1270 set_cc_op(s, CC_OP_DYNAMIC);
1271 if (cc.use_reg2) {
1272 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1273 } else {
1274 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1278 /* XXX: does not work with gdbstub "ice" single step - not a
1279 serious problem */
1280 static int gen_jz_ecx_string(DisasContext *s, target_ulong next_eip)
1282 int l1, l2;
1284 l1 = gen_new_label();
1285 l2 = gen_new_label();
1286 gen_op_jnz_ecx(s->aflag, l1);
1287 gen_set_label(l2);
1288 gen_jmp_tb(s, next_eip, 1);
1289 gen_set_label(l1);
1290 return l2;
1293 static inline void gen_stos(DisasContext *s, int ot)
1295 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
1296 gen_string_movl_A0_EDI(s);
1297 gen_op_st_T0_A0(ot + s->mem_index);
1298 gen_op_movl_T0_Dshift(ot);
1299 gen_op_add_reg_T0(s->aflag, R_EDI);
1302 static inline void gen_lods(DisasContext *s, int ot)
1304 gen_string_movl_A0_ESI(s);
1305 gen_op_ld_T0_A0(ot + s->mem_index);
1306 gen_op_mov_reg_T0(ot, R_EAX);
1307 gen_op_movl_T0_Dshift(ot);
1308 gen_op_add_reg_T0(s->aflag, R_ESI);
1311 static inline void gen_scas(DisasContext *s, int ot)
1313 gen_string_movl_A0_EDI(s);
1314 gen_op_ld_T1_A0(ot + s->mem_index);
1315 gen_op(s, OP_CMPL, ot, R_EAX);
1316 gen_op_movl_T0_Dshift(ot);
1317 gen_op_add_reg_T0(s->aflag, R_EDI);
1320 static inline void gen_cmps(DisasContext *s, int ot)
1322 gen_string_movl_A0_EDI(s);
1323 gen_op_ld_T1_A0(ot + s->mem_index);
1324 gen_string_movl_A0_ESI(s);
1325 gen_op(s, OP_CMPL, ot, OR_TMP0);
1326 gen_op_movl_T0_Dshift(ot);
1327 gen_op_add_reg_T0(s->aflag, R_ESI);
1328 gen_op_add_reg_T0(s->aflag, R_EDI);
1331 static inline void gen_ins(DisasContext *s, int ot)
1333 if (use_icount)
1334 gen_io_start();
1335 gen_string_movl_A0_EDI(s);
1336 /* Note: we must do this dummy write first to be restartable in
1337 case of page fault. */
1338 gen_op_movl_T0_0();
1339 gen_op_st_T0_A0(ot + s->mem_index);
1340 gen_op_mov_TN_reg(OT_WORD, 1, R_EDX);
1341 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[1]);
1342 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1343 gen_helper_in_func(ot, cpu_T[0], cpu_tmp2_i32);
1344 gen_op_st_T0_A0(ot + s->mem_index);
1345 gen_op_movl_T0_Dshift(ot);
1346 gen_op_add_reg_T0(s->aflag, R_EDI);
1347 if (use_icount)
1348 gen_io_end();
1351 static inline void gen_outs(DisasContext *s, int ot)
1353 if (use_icount)
1354 gen_io_start();
1355 gen_string_movl_A0_ESI(s);
1356 gen_op_ld_T0_A0(ot + s->mem_index);
1358 gen_op_mov_TN_reg(OT_WORD, 1, R_EDX);
1359 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[1]);
1360 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1361 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[0]);
1362 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
1364 gen_op_movl_T0_Dshift(ot);
1365 gen_op_add_reg_T0(s->aflag, R_ESI);
1366 if (use_icount)
1367 gen_io_end();
1370 /* same method as Valgrind : we generate jumps to current or next
1371 instruction */
1372 #define GEN_REPZ(op) \
1373 static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1374 target_ulong cur_eip, target_ulong next_eip) \
1376 int l2;\
1377 gen_update_cc_op(s); \
1378 l2 = gen_jz_ecx_string(s, next_eip); \
1379 gen_ ## op(s, ot); \
1380 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1381 /* a loop would cause two single step exceptions if ECX = 1 \
1382 before rep string_insn */ \
1383 if (!s->jmp_opt) \
1384 gen_op_jz_ecx(s->aflag, l2); \
1385 gen_jmp(s, cur_eip); \
1388 #define GEN_REPZ2(op) \
1389 static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1390 target_ulong cur_eip, \
1391 target_ulong next_eip, \
1392 int nz) \
1394 int l2;\
1395 gen_update_cc_op(s); \
1396 l2 = gen_jz_ecx_string(s, next_eip); \
1397 gen_ ## op(s, ot); \
1398 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1399 gen_update_cc_op(s); \
1400 gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2); \
1401 if (!s->jmp_opt) \
1402 gen_op_jz_ecx(s->aflag, l2); \
1403 gen_jmp(s, cur_eip); \
1406 GEN_REPZ(movs)
1407 GEN_REPZ(stos)
1408 GEN_REPZ(lods)
1409 GEN_REPZ(ins)
1410 GEN_REPZ(outs)
1411 GEN_REPZ2(scas)
1412 GEN_REPZ2(cmps)
1414 static void gen_helper_fp_arith_ST0_FT0(int op)
1416 switch (op) {
1417 case 0:
1418 gen_helper_fadd_ST0_FT0(cpu_env);
1419 break;
1420 case 1:
1421 gen_helper_fmul_ST0_FT0(cpu_env);
1422 break;
1423 case 2:
1424 gen_helper_fcom_ST0_FT0(cpu_env);
1425 break;
1426 case 3:
1427 gen_helper_fcom_ST0_FT0(cpu_env);
1428 break;
1429 case 4:
1430 gen_helper_fsub_ST0_FT0(cpu_env);
1431 break;
1432 case 5:
1433 gen_helper_fsubr_ST0_FT0(cpu_env);
1434 break;
1435 case 6:
1436 gen_helper_fdiv_ST0_FT0(cpu_env);
1437 break;
1438 case 7:
1439 gen_helper_fdivr_ST0_FT0(cpu_env);
1440 break;
1444 /* NOTE the exception in "r" op ordering */
1445 static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1447 TCGv_i32 tmp = tcg_const_i32(opreg);
1448 switch (op) {
1449 case 0:
1450 gen_helper_fadd_STN_ST0(cpu_env, tmp);
1451 break;
1452 case 1:
1453 gen_helper_fmul_STN_ST0(cpu_env, tmp);
1454 break;
1455 case 4:
1456 gen_helper_fsubr_STN_ST0(cpu_env, tmp);
1457 break;
1458 case 5:
1459 gen_helper_fsub_STN_ST0(cpu_env, tmp);
1460 break;
1461 case 6:
1462 gen_helper_fdivr_STN_ST0(cpu_env, tmp);
1463 break;
1464 case 7:
1465 gen_helper_fdiv_STN_ST0(cpu_env, tmp);
1466 break;
1470 /* if d == OR_TMP0, it means memory operand (address in A0) */
1471 static void gen_op(DisasContext *s1, int op, int ot, int d)
1473 if (d != OR_TMP0) {
1474 gen_op_mov_TN_reg(ot, 0, d);
1475 } else {
1476 gen_op_ld_T0_A0(ot + s1->mem_index);
1478 switch(op) {
1479 case OP_ADCL:
1480 gen_compute_eflags_c(s1, cpu_tmp4);
1481 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1482 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1483 if (d != OR_TMP0)
1484 gen_op_mov_reg_T0(ot, d);
1485 else
1486 gen_op_st_T0_A0(ot + s1->mem_index);
1487 gen_op_update3_cc(cpu_tmp4);
1488 set_cc_op(s1, CC_OP_ADCB + ot);
1489 break;
1490 case OP_SBBL:
1491 gen_compute_eflags_c(s1, cpu_tmp4);
1492 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1493 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1494 if (d != OR_TMP0)
1495 gen_op_mov_reg_T0(ot, d);
1496 else
1497 gen_op_st_T0_A0(ot + s1->mem_index);
1498 gen_op_update3_cc(cpu_tmp4);
1499 set_cc_op(s1, CC_OP_SBBB + ot);
1500 break;
1501 case OP_ADDL:
1502 gen_op_addl_T0_T1();
1503 if (d != OR_TMP0)
1504 gen_op_mov_reg_T0(ot, d);
1505 else
1506 gen_op_st_T0_A0(ot + s1->mem_index);
1507 gen_op_update2_cc();
1508 set_cc_op(s1, CC_OP_ADDB + ot);
1509 break;
1510 case OP_SUBL:
1511 tcg_gen_mov_tl(cpu_cc_srcT, cpu_T[0]);
1512 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1513 if (d != OR_TMP0)
1514 gen_op_mov_reg_T0(ot, d);
1515 else
1516 gen_op_st_T0_A0(ot + s1->mem_index);
1517 gen_op_update2_cc();
1518 set_cc_op(s1, CC_OP_SUBB + ot);
1519 break;
1520 default:
1521 case OP_ANDL:
1522 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1523 if (d != OR_TMP0)
1524 gen_op_mov_reg_T0(ot, d);
1525 else
1526 gen_op_st_T0_A0(ot + s1->mem_index);
1527 gen_op_update1_cc();
1528 set_cc_op(s1, CC_OP_LOGICB + ot);
1529 break;
1530 case OP_ORL:
1531 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1532 if (d != OR_TMP0)
1533 gen_op_mov_reg_T0(ot, d);
1534 else
1535 gen_op_st_T0_A0(ot + s1->mem_index);
1536 gen_op_update1_cc();
1537 set_cc_op(s1, CC_OP_LOGICB + ot);
1538 break;
1539 case OP_XORL:
1540 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1541 if (d != OR_TMP0)
1542 gen_op_mov_reg_T0(ot, d);
1543 else
1544 gen_op_st_T0_A0(ot + s1->mem_index);
1545 gen_op_update1_cc();
1546 set_cc_op(s1, CC_OP_LOGICB + ot);
1547 break;
1548 case OP_CMPL:
1549 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
1550 tcg_gen_mov_tl(cpu_cc_srcT, cpu_T[0]);
1551 tcg_gen_sub_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
1552 set_cc_op(s1, CC_OP_SUBB + ot);
1553 break;
1557 /* if d == OR_TMP0, it means memory operand (address in A0) */
1558 static void gen_inc(DisasContext *s1, int ot, int d, int c)
1560 if (d != OR_TMP0)
1561 gen_op_mov_TN_reg(ot, 0, d);
1562 else
1563 gen_op_ld_T0_A0(ot + s1->mem_index);
1564 gen_compute_eflags_c(s1, cpu_cc_src);
1565 if (c > 0) {
1566 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], 1);
1567 set_cc_op(s1, CC_OP_INCB + ot);
1568 } else {
1569 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], -1);
1570 set_cc_op(s1, CC_OP_DECB + ot);
1572 if (d != OR_TMP0)
1573 gen_op_mov_reg_T0(ot, d);
1574 else
1575 gen_op_st_T0_A0(ot + s1->mem_index);
1576 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1579 static void gen_shift_flags(DisasContext *s, int ot, TCGv result, TCGv shm1,
1580 TCGv count, bool is_right)
1582 TCGv_i32 z32, s32, oldop;
1583 TCGv z_tl;
1585 /* Store the results into the CC variables. If we know that the
1586 variable must be dead, store unconditionally. Otherwise we'll
1587 need to not disrupt the current contents. */
1588 z_tl = tcg_const_tl(0);
1589 if (cc_op_live[s->cc_op] & USES_CC_DST) {
1590 tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_dst, count, z_tl,
1591 result, cpu_cc_dst);
1592 } else {
1593 tcg_gen_mov_tl(cpu_cc_dst, result);
1595 if (cc_op_live[s->cc_op] & USES_CC_SRC) {
1596 tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_src, count, z_tl,
1597 shm1, cpu_cc_src);
1598 } else {
1599 tcg_gen_mov_tl(cpu_cc_src, shm1);
1601 tcg_temp_free(z_tl);
1603 /* Get the two potential CC_OP values into temporaries. */
1604 tcg_gen_movi_i32(cpu_tmp2_i32, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
1605 if (s->cc_op == CC_OP_DYNAMIC) {
1606 oldop = cpu_cc_op;
1607 } else {
1608 tcg_gen_movi_i32(cpu_tmp3_i32, s->cc_op);
1609 oldop = cpu_tmp3_i32;
1612 /* Conditionally store the CC_OP value. */
1613 z32 = tcg_const_i32(0);
1614 s32 = tcg_temp_new_i32();
1615 tcg_gen_trunc_tl_i32(s32, count);
1616 tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, s32, z32, cpu_tmp2_i32, oldop);
1617 tcg_temp_free_i32(z32);
1618 tcg_temp_free_i32(s32);
1620 /* The CC_OP value is no longer predictable. */
1621 set_cc_op(s, CC_OP_DYNAMIC);
1624 static void gen_shift_rm_T1(DisasContext *s, int ot, int op1,
1625 int is_right, int is_arith)
1627 target_ulong mask = (ot == OT_QUAD ? 0x3f : 0x1f);
1629 /* load */
1630 if (op1 == OR_TMP0) {
1631 gen_op_ld_T0_A0(ot + s->mem_index);
1632 } else {
1633 gen_op_mov_TN_reg(ot, 0, op1);
1636 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], mask);
1637 tcg_gen_subi_tl(cpu_tmp0, cpu_T[1], 1);
1639 if (is_right) {
1640 if (is_arith) {
1641 gen_exts(ot, cpu_T[0]);
1642 tcg_gen_sar_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1643 tcg_gen_sar_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1644 } else {
1645 gen_extu(ot, cpu_T[0]);
1646 tcg_gen_shr_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1647 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1649 } else {
1650 tcg_gen_shl_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1651 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1654 /* store */
1655 if (op1 == OR_TMP0) {
1656 gen_op_st_T0_A0(ot + s->mem_index);
1657 } else {
1658 gen_op_mov_reg_T0(ot, op1);
1661 gen_shift_flags(s, ot, cpu_T[0], cpu_tmp0, cpu_T[1], is_right);
1664 static void gen_shift_rm_im(DisasContext *s, int ot, int op1, int op2,
1665 int is_right, int is_arith)
1667 int mask = (ot == OT_QUAD ? 0x3f : 0x1f);
1669 /* load */
1670 if (op1 == OR_TMP0)
1671 gen_op_ld_T0_A0(ot + s->mem_index);
1672 else
1673 gen_op_mov_TN_reg(ot, 0, op1);
1675 op2 &= mask;
1676 if (op2 != 0) {
1677 if (is_right) {
1678 if (is_arith) {
1679 gen_exts(ot, cpu_T[0]);
1680 tcg_gen_sari_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1681 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], op2);
1682 } else {
1683 gen_extu(ot, cpu_T[0]);
1684 tcg_gen_shri_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1685 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], op2);
1687 } else {
1688 tcg_gen_shli_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1689 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], op2);
1693 /* store */
1694 if (op1 == OR_TMP0)
1695 gen_op_st_T0_A0(ot + s->mem_index);
1696 else
1697 gen_op_mov_reg_T0(ot, op1);
1699 /* update eflags if non zero shift */
1700 if (op2 != 0) {
1701 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
1702 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1703 set_cc_op(s, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
1707 static inline void tcg_gen_lshift(TCGv ret, TCGv arg1, target_long arg2)
1709 if (arg2 >= 0)
1710 tcg_gen_shli_tl(ret, arg1, arg2);
1711 else
1712 tcg_gen_shri_tl(ret, arg1, -arg2);
1715 static void gen_rot_rm_T1(DisasContext *s, int ot, int op1, int is_right)
1717 target_ulong mask = (ot == OT_QUAD ? 0x3f : 0x1f);
1718 TCGv_i32 t0, t1;
1720 /* load */
1721 if (op1 == OR_TMP0) {
1722 gen_op_ld_T0_A0(ot + s->mem_index);
1723 } else {
1724 gen_op_mov_TN_reg(ot, 0, op1);
1727 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], mask);
1729 switch (ot) {
1730 case OT_BYTE:
1731 /* Replicate the 8-bit input so that a 32-bit rotate works. */
1732 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
1733 tcg_gen_muli_tl(cpu_T[0], cpu_T[0], 0x01010101);
1734 goto do_long;
1735 case OT_WORD:
1736 /* Replicate the 16-bit input so that a 32-bit rotate works. */
1737 tcg_gen_deposit_tl(cpu_T[0], cpu_T[0], cpu_T[0], 16, 16);
1738 goto do_long;
1739 do_long:
1740 #ifdef TARGET_X86_64
1741 case OT_LONG:
1742 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
1743 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
1744 if (is_right) {
1745 tcg_gen_rotr_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
1746 } else {
1747 tcg_gen_rotl_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
1749 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
1750 break;
1751 #endif
1752 default:
1753 if (is_right) {
1754 tcg_gen_rotr_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1755 } else {
1756 tcg_gen_rotl_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1758 break;
1761 /* store */
1762 if (op1 == OR_TMP0) {
1763 gen_op_st_T0_A0(ot + s->mem_index);
1764 } else {
1765 gen_op_mov_reg_T0(ot, op1);
1768 /* We'll need the flags computed into CC_SRC. */
1769 gen_compute_eflags(s);
1771 /* The value that was "rotated out" is now present at the other end
1772 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1773 since we've computed the flags into CC_SRC, these variables are
1774 currently dead. */
1775 if (is_right) {
1776 tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask - 1);
1777 tcg_gen_shri_tl(cpu_cc_dst, cpu_T[0], mask);
1778 tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
1779 } else {
1780 tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask);
1781 tcg_gen_andi_tl(cpu_cc_dst, cpu_T[0], 1);
1783 tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1784 tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1786 /* Now conditionally store the new CC_OP value. If the shift count
1787 is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live.
1788 Otherwise reuse CC_OP_ADCOX which have the C and O flags split out
1789 exactly as we computed above. */
1790 t0 = tcg_const_i32(0);
1791 t1 = tcg_temp_new_i32();
1792 tcg_gen_trunc_tl_i32(t1, cpu_T[1]);
1793 tcg_gen_movi_i32(cpu_tmp2_i32, CC_OP_ADCOX);
1794 tcg_gen_movi_i32(cpu_tmp3_i32, CC_OP_EFLAGS);
1795 tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, t1, t0,
1796 cpu_tmp2_i32, cpu_tmp3_i32);
1797 tcg_temp_free_i32(t0);
1798 tcg_temp_free_i32(t1);
1800 /* The CC_OP value is no longer predictable. */
1801 set_cc_op(s, CC_OP_DYNAMIC);
1804 static void gen_rot_rm_im(DisasContext *s, int ot, int op1, int op2,
1805 int is_right)
1807 int mask = (ot == OT_QUAD ? 0x3f : 0x1f);
1808 int shift;
1810 /* load */
1811 if (op1 == OR_TMP0) {
1812 gen_op_ld_T0_A0(ot + s->mem_index);
1813 } else {
1814 gen_op_mov_TN_reg(ot, 0, op1);
1817 op2 &= mask;
1818 if (op2 != 0) {
1819 switch (ot) {
1820 #ifdef TARGET_X86_64
1821 case OT_LONG:
1822 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
1823 if (is_right) {
1824 tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, op2);
1825 } else {
1826 tcg_gen_rotli_i32(cpu_tmp2_i32, cpu_tmp2_i32, op2);
1828 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
1829 break;
1830 #endif
1831 default:
1832 if (is_right) {
1833 tcg_gen_rotri_tl(cpu_T[0], cpu_T[0], op2);
1834 } else {
1835 tcg_gen_rotli_tl(cpu_T[0], cpu_T[0], op2);
1837 break;
1838 case OT_BYTE:
1839 mask = 7;
1840 goto do_shifts;
1841 case OT_WORD:
1842 mask = 15;
1843 do_shifts:
1844 shift = op2 & mask;
1845 if (is_right) {
1846 shift = mask + 1 - shift;
1848 gen_extu(ot, cpu_T[0]);
1849 tcg_gen_shli_tl(cpu_tmp0, cpu_T[0], shift);
1850 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], mask + 1 - shift);
1851 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
1852 break;
1856 /* store */
1857 if (op1 == OR_TMP0) {
1858 gen_op_st_T0_A0(ot + s->mem_index);
1859 } else {
1860 gen_op_mov_reg_T0(ot, op1);
1863 if (op2 != 0) {
1864 /* Compute the flags into CC_SRC. */
1865 gen_compute_eflags(s);
1867 /* The value that was "rotated out" is now present at the other end
1868 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1869 since we've computed the flags into CC_SRC, these variables are
1870 currently dead. */
1871 if (is_right) {
1872 tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask - 1);
1873 tcg_gen_shri_tl(cpu_cc_dst, cpu_T[0], mask);
1874 tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
1875 } else {
1876 tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask);
1877 tcg_gen_andi_tl(cpu_cc_dst, cpu_T[0], 1);
1879 tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1880 tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1881 set_cc_op(s, CC_OP_ADCOX);
1885 /* XXX: add faster immediate = 1 case */
1886 static void gen_rotc_rm_T1(DisasContext *s, int ot, int op1,
1887 int is_right)
1889 gen_compute_eflags(s);
1890 assert(s->cc_op == CC_OP_EFLAGS);
1892 /* load */
1893 if (op1 == OR_TMP0)
1894 gen_op_ld_T0_A0(ot + s->mem_index);
1895 else
1896 gen_op_mov_TN_reg(ot, 0, op1);
1898 if (is_right) {
1899 switch (ot) {
1900 case OT_BYTE:
1901 gen_helper_rcrb(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1902 break;
1903 case OT_WORD:
1904 gen_helper_rcrw(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1905 break;
1906 case OT_LONG:
1907 gen_helper_rcrl(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1908 break;
1909 #ifdef TARGET_X86_64
1910 case OT_QUAD:
1911 gen_helper_rcrq(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1912 break;
1913 #endif
1915 } else {
1916 switch (ot) {
1917 case OT_BYTE:
1918 gen_helper_rclb(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1919 break;
1920 case OT_WORD:
1921 gen_helper_rclw(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1922 break;
1923 case OT_LONG:
1924 gen_helper_rcll(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1925 break;
1926 #ifdef TARGET_X86_64
1927 case OT_QUAD:
1928 gen_helper_rclq(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1929 break;
1930 #endif
1933 /* store */
1934 if (op1 == OR_TMP0)
1935 gen_op_st_T0_A0(ot + s->mem_index);
1936 else
1937 gen_op_mov_reg_T0(ot, op1);
1940 /* XXX: add faster immediate case */
1941 static void gen_shiftd_rm_T1(DisasContext *s, int ot, int op1,
1942 bool is_right, TCGv count_in)
1944 target_ulong mask = (ot == OT_QUAD ? 63 : 31);
1945 TCGv count;
1947 /* load */
1948 if (op1 == OR_TMP0) {
1949 gen_op_ld_T0_A0(ot + s->mem_index);
1950 } else {
1951 gen_op_mov_TN_reg(ot, 0, op1);
1954 count = tcg_temp_new();
1955 tcg_gen_andi_tl(count, count_in, mask);
1957 switch (ot) {
1958 case OT_WORD:
1959 /* Note: we implement the Intel behaviour for shift count > 16.
1960 This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A
1961 portion by constructing it as a 32-bit value. */
1962 if (is_right) {
1963 tcg_gen_deposit_tl(cpu_tmp0, cpu_T[0], cpu_T[1], 16, 16);
1964 tcg_gen_mov_tl(cpu_T[1], cpu_T[0]);
1965 tcg_gen_mov_tl(cpu_T[0], cpu_tmp0);
1966 } else {
1967 tcg_gen_deposit_tl(cpu_T[1], cpu_T[0], cpu_T[1], 16, 16);
1969 /* FALLTHRU */
1970 #ifdef TARGET_X86_64
1971 case OT_LONG:
1972 /* Concatenate the two 32-bit values and use a 64-bit shift. */
1973 tcg_gen_subi_tl(cpu_tmp0, count, 1);
1974 if (is_right) {
1975 tcg_gen_concat_tl_i64(cpu_T[0], cpu_T[0], cpu_T[1]);
1976 tcg_gen_shr_i64(cpu_tmp0, cpu_T[0], cpu_tmp0);
1977 tcg_gen_shr_i64(cpu_T[0], cpu_T[0], count);
1978 } else {
1979 tcg_gen_concat_tl_i64(cpu_T[0], cpu_T[1], cpu_T[0]);
1980 tcg_gen_shl_i64(cpu_tmp0, cpu_T[0], cpu_tmp0);
1981 tcg_gen_shl_i64(cpu_T[0], cpu_T[0], count);
1982 tcg_gen_shri_i64(cpu_tmp0, cpu_tmp0, 32);
1983 tcg_gen_shri_i64(cpu_T[0], cpu_T[0], 32);
1985 break;
1986 #endif
1987 default:
1988 tcg_gen_subi_tl(cpu_tmp0, count, 1);
1989 if (is_right) {
1990 tcg_gen_shr_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1992 tcg_gen_subfi_tl(cpu_tmp4, mask + 1, count);
1993 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], count);
1994 tcg_gen_shl_tl(cpu_T[1], cpu_T[1], cpu_tmp4);
1995 } else {
1996 tcg_gen_shl_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1997 if (ot == OT_WORD) {
1998 /* Only needed if count > 16, for Intel behaviour. */
1999 tcg_gen_subfi_tl(cpu_tmp4, 33, count);
2000 tcg_gen_shr_tl(cpu_tmp4, cpu_T[1], cpu_tmp4);
2001 tcg_gen_or_tl(cpu_tmp0, cpu_tmp0, cpu_tmp4);
2004 tcg_gen_subfi_tl(cpu_tmp4, mask + 1, count);
2005 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], count);
2006 tcg_gen_shr_tl(cpu_T[1], cpu_T[1], cpu_tmp4);
2008 tcg_gen_movi_tl(cpu_tmp4, 0);
2009 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_T[1], count, cpu_tmp4,
2010 cpu_tmp4, cpu_T[1]);
2011 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
2012 break;
2015 /* store */
2016 if (op1 == OR_TMP0) {
2017 gen_op_st_T0_A0(ot + s->mem_index);
2018 } else {
2019 gen_op_mov_reg_T0(ot, op1);
2022 gen_shift_flags(s, ot, cpu_T[0], cpu_tmp0, count, is_right);
2023 tcg_temp_free(count);
2026 static void gen_shift(DisasContext *s1, int op, int ot, int d, int s)
2028 if (s != OR_TMP1)
2029 gen_op_mov_TN_reg(ot, 1, s);
2030 switch(op) {
2031 case OP_ROL:
2032 gen_rot_rm_T1(s1, ot, d, 0);
2033 break;
2034 case OP_ROR:
2035 gen_rot_rm_T1(s1, ot, d, 1);
2036 break;
2037 case OP_SHL:
2038 case OP_SHL1:
2039 gen_shift_rm_T1(s1, ot, d, 0, 0);
2040 break;
2041 case OP_SHR:
2042 gen_shift_rm_T1(s1, ot, d, 1, 0);
2043 break;
2044 case OP_SAR:
2045 gen_shift_rm_T1(s1, ot, d, 1, 1);
2046 break;
2047 case OP_RCL:
2048 gen_rotc_rm_T1(s1, ot, d, 0);
2049 break;
2050 case OP_RCR:
2051 gen_rotc_rm_T1(s1, ot, d, 1);
2052 break;
2056 static void gen_shifti(DisasContext *s1, int op, int ot, int d, int c)
2058 switch(op) {
2059 case OP_ROL:
2060 gen_rot_rm_im(s1, ot, d, c, 0);
2061 break;
2062 case OP_ROR:
2063 gen_rot_rm_im(s1, ot, d, c, 1);
2064 break;
2065 case OP_SHL:
2066 case OP_SHL1:
2067 gen_shift_rm_im(s1, ot, d, c, 0, 0);
2068 break;
2069 case OP_SHR:
2070 gen_shift_rm_im(s1, ot, d, c, 1, 0);
2071 break;
2072 case OP_SAR:
2073 gen_shift_rm_im(s1, ot, d, c, 1, 1);
2074 break;
2075 default:
2076 /* currently not optimized */
2077 gen_op_movl_T1_im(c);
2078 gen_shift(s1, op, ot, d, OR_TMP1);
2079 break;
2083 static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm,
2084 int *reg_ptr, int *offset_ptr)
2086 target_long disp;
2087 int havesib;
2088 int base;
2089 int index;
2090 int scale;
2091 int opreg;
2092 int mod, rm, code, override, must_add_seg;
2094 override = s->override;
2095 must_add_seg = s->addseg;
2096 if (override >= 0)
2097 must_add_seg = 1;
2098 mod = (modrm >> 6) & 3;
2099 rm = modrm & 7;
2101 if (s->aflag) {
2103 havesib = 0;
2104 base = rm;
2105 index = 0;
2106 scale = 0;
2108 if (base == 4) {
2109 havesib = 1;
2110 code = cpu_ldub_code(env, s->pc++);
2111 scale = (code >> 6) & 3;
2112 index = ((code >> 3) & 7) | REX_X(s);
2113 base = (code & 7);
2115 base |= REX_B(s);
2117 switch (mod) {
2118 case 0:
2119 if ((base & 7) == 5) {
2120 base = -1;
2121 disp = (int32_t)cpu_ldl_code(env, s->pc);
2122 s->pc += 4;
2123 if (CODE64(s) && !havesib) {
2124 disp += s->pc + s->rip_offset;
2126 } else {
2127 disp = 0;
2129 break;
2130 case 1:
2131 disp = (int8_t)cpu_ldub_code(env, s->pc++);
2132 break;
2133 default:
2134 case 2:
2135 disp = (int32_t)cpu_ldl_code(env, s->pc);
2136 s->pc += 4;
2137 break;
2140 if (base >= 0) {
2141 /* for correct popl handling with esp */
2142 if (base == 4 && s->popl_esp_hack)
2143 disp += s->popl_esp_hack;
2144 #ifdef TARGET_X86_64
2145 if (s->aflag == 2) {
2146 gen_op_movq_A0_reg(base);
2147 if (disp != 0) {
2148 gen_op_addq_A0_im(disp);
2150 } else
2151 #endif
2153 gen_op_movl_A0_reg(base);
2154 if (disp != 0)
2155 gen_op_addl_A0_im(disp);
2157 } else {
2158 #ifdef TARGET_X86_64
2159 if (s->aflag == 2) {
2160 gen_op_movq_A0_im(disp);
2161 } else
2162 #endif
2164 gen_op_movl_A0_im(disp);
2167 /* index == 4 means no index */
2168 if (havesib && (index != 4)) {
2169 #ifdef TARGET_X86_64
2170 if (s->aflag == 2) {
2171 gen_op_addq_A0_reg_sN(scale, index);
2172 } else
2173 #endif
2175 gen_op_addl_A0_reg_sN(scale, index);
2178 if (must_add_seg) {
2179 if (override < 0) {
2180 if (base == R_EBP || base == R_ESP)
2181 override = R_SS;
2182 else
2183 override = R_DS;
2185 #ifdef TARGET_X86_64
2186 if (s->aflag == 2) {
2187 gen_op_addq_A0_seg(override);
2188 } else
2189 #endif
2191 gen_op_addl_A0_seg(s, override);
2194 } else {
2195 switch (mod) {
2196 case 0:
2197 if (rm == 6) {
2198 disp = cpu_lduw_code(env, s->pc);
2199 s->pc += 2;
2200 gen_op_movl_A0_im(disp);
2201 rm = 0; /* avoid SS override */
2202 goto no_rm;
2203 } else {
2204 disp = 0;
2206 break;
2207 case 1:
2208 disp = (int8_t)cpu_ldub_code(env, s->pc++);
2209 break;
2210 default:
2211 case 2:
2212 disp = cpu_lduw_code(env, s->pc);
2213 s->pc += 2;
2214 break;
2216 switch(rm) {
2217 case 0:
2218 gen_op_movl_A0_reg(R_EBX);
2219 gen_op_addl_A0_reg_sN(0, R_ESI);
2220 break;
2221 case 1:
2222 gen_op_movl_A0_reg(R_EBX);
2223 gen_op_addl_A0_reg_sN(0, R_EDI);
2224 break;
2225 case 2:
2226 gen_op_movl_A0_reg(R_EBP);
2227 gen_op_addl_A0_reg_sN(0, R_ESI);
2228 break;
2229 case 3:
2230 gen_op_movl_A0_reg(R_EBP);
2231 gen_op_addl_A0_reg_sN(0, R_EDI);
2232 break;
2233 case 4:
2234 gen_op_movl_A0_reg(R_ESI);
2235 break;
2236 case 5:
2237 gen_op_movl_A0_reg(R_EDI);
2238 break;
2239 case 6:
2240 gen_op_movl_A0_reg(R_EBP);
2241 break;
2242 default:
2243 case 7:
2244 gen_op_movl_A0_reg(R_EBX);
2245 break;
2247 if (disp != 0)
2248 gen_op_addl_A0_im(disp);
2249 gen_op_andl_A0_ffff();
2250 no_rm:
2251 if (must_add_seg) {
2252 if (override < 0) {
2253 if (rm == 2 || rm == 3 || rm == 6)
2254 override = R_SS;
2255 else
2256 override = R_DS;
2258 gen_op_addl_A0_seg(s, override);
2262 opreg = OR_A0;
2263 disp = 0;
2264 *reg_ptr = opreg;
2265 *offset_ptr = disp;
2268 static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm)
2270 int mod, rm, base, code;
2272 mod = (modrm >> 6) & 3;
2273 if (mod == 3)
2274 return;
2275 rm = modrm & 7;
2277 if (s->aflag) {
2279 base = rm;
2281 if (base == 4) {
2282 code = cpu_ldub_code(env, s->pc++);
2283 base = (code & 7);
2286 switch (mod) {
2287 case 0:
2288 if (base == 5) {
2289 s->pc += 4;
2291 break;
2292 case 1:
2293 s->pc++;
2294 break;
2295 default:
2296 case 2:
2297 s->pc += 4;
2298 break;
2300 } else {
2301 switch (mod) {
2302 case 0:
2303 if (rm == 6) {
2304 s->pc += 2;
2306 break;
2307 case 1:
2308 s->pc++;
2309 break;
2310 default:
2311 case 2:
2312 s->pc += 2;
2313 break;
2318 /* used for LEA and MOV AX, mem */
2319 static void gen_add_A0_ds_seg(DisasContext *s)
2321 int override, must_add_seg;
2322 must_add_seg = s->addseg;
2323 override = R_DS;
2324 if (s->override >= 0) {
2325 override = s->override;
2326 must_add_seg = 1;
2328 if (must_add_seg) {
2329 #ifdef TARGET_X86_64
2330 if (CODE64(s)) {
2331 gen_op_addq_A0_seg(override);
2332 } else
2333 #endif
2335 gen_op_addl_A0_seg(s, override);
2340 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2341 OR_TMP0 */
2342 static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm,
2343 int ot, int reg, int is_store)
2345 int mod, rm, opreg, disp;
2347 mod = (modrm >> 6) & 3;
2348 rm = (modrm & 7) | REX_B(s);
2349 if (mod == 3) {
2350 if (is_store) {
2351 if (reg != OR_TMP0)
2352 gen_op_mov_TN_reg(ot, 0, reg);
2353 gen_op_mov_reg_T0(ot, rm);
2354 } else {
2355 gen_op_mov_TN_reg(ot, 0, rm);
2356 if (reg != OR_TMP0)
2357 gen_op_mov_reg_T0(ot, reg);
2359 } else {
2360 gen_lea_modrm(env, s, modrm, &opreg, &disp);
2361 if (is_store) {
2362 if (reg != OR_TMP0)
2363 gen_op_mov_TN_reg(ot, 0, reg);
2364 gen_op_st_T0_A0(ot + s->mem_index);
2365 } else {
2366 gen_op_ld_T0_A0(ot + s->mem_index);
2367 if (reg != OR_TMP0)
2368 gen_op_mov_reg_T0(ot, reg);
2373 static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, int ot)
2375 uint32_t ret;
2377 switch(ot) {
2378 case OT_BYTE:
2379 ret = cpu_ldub_code(env, s->pc);
2380 s->pc++;
2381 break;
2382 case OT_WORD:
2383 ret = cpu_lduw_code(env, s->pc);
2384 s->pc += 2;
2385 break;
2386 default:
2387 case OT_LONG:
2388 ret = cpu_ldl_code(env, s->pc);
2389 s->pc += 4;
2390 break;
2392 return ret;
2395 static inline int insn_const_size(unsigned int ot)
2397 if (ot <= OT_LONG)
2398 return 1 << ot;
2399 else
2400 return 4;
2403 static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip)
2405 TranslationBlock *tb;
2406 target_ulong pc;
2408 pc = s->cs_base + eip;
2409 tb = s->tb;
2410 /* NOTE: we handle the case where the TB spans two pages here */
2411 if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) ||
2412 (pc & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK)) {
2413 /* jump to same page: we can use a direct jump */
2414 tcg_gen_goto_tb(tb_num);
2415 gen_jmp_im(eip);
2416 tcg_gen_exit_tb((uintptr_t)tb + tb_num);
2417 } else {
2418 /* jump to another page: currently not optimized */
2419 gen_jmp_im(eip);
2420 gen_eob(s);
2424 static inline void gen_jcc(DisasContext *s, int b,
2425 target_ulong val, target_ulong next_eip)
2427 int l1, l2;
2429 if (s->jmp_opt) {
2430 l1 = gen_new_label();
2431 gen_jcc1(s, b, l1);
2433 gen_goto_tb(s, 0, next_eip);
2435 gen_set_label(l1);
2436 gen_goto_tb(s, 1, val);
2437 s->is_jmp = DISAS_TB_JUMP;
2438 } else {
2439 l1 = gen_new_label();
2440 l2 = gen_new_label();
2441 gen_jcc1(s, b, l1);
2443 gen_jmp_im(next_eip);
2444 tcg_gen_br(l2);
2446 gen_set_label(l1);
2447 gen_jmp_im(val);
2448 gen_set_label(l2);
2449 gen_eob(s);
2453 static void gen_cmovcc1(CPUX86State *env, DisasContext *s, int ot, int b,
2454 int modrm, int reg)
2456 CCPrepare cc;
2458 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
2460 cc = gen_prepare_cc(s, b, cpu_T[1]);
2461 if (cc.mask != -1) {
2462 TCGv t0 = tcg_temp_new();
2463 tcg_gen_andi_tl(t0, cc.reg, cc.mask);
2464 cc.reg = t0;
2466 if (!cc.use_reg2) {
2467 cc.reg2 = tcg_const_tl(cc.imm);
2470 tcg_gen_movcond_tl(cc.cond, cpu_T[0], cc.reg, cc.reg2,
2471 cpu_T[0], cpu_regs[reg]);
2472 gen_op_mov_reg_T0(ot, reg);
2474 if (cc.mask != -1) {
2475 tcg_temp_free(cc.reg);
2477 if (!cc.use_reg2) {
2478 tcg_temp_free(cc.reg2);
2482 static inline void gen_op_movl_T0_seg(int seg_reg)
2484 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
2485 offsetof(CPUX86State,segs[seg_reg].selector));
2488 static inline void gen_op_movl_seg_T0_vm(int seg_reg)
2490 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
2491 tcg_gen_st32_tl(cpu_T[0], cpu_env,
2492 offsetof(CPUX86State,segs[seg_reg].selector));
2493 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], 4);
2494 tcg_gen_st_tl(cpu_T[0], cpu_env,
2495 offsetof(CPUX86State,segs[seg_reg].base));
2498 /* move T0 to seg_reg and compute if the CPU state may change. Never
2499 call this function with seg_reg == R_CS */
2500 static void gen_movl_seg_T0(DisasContext *s, int seg_reg, target_ulong cur_eip)
2502 if (s->pe && !s->vm86) {
2503 /* XXX: optimize by finding processor state dynamically */
2504 gen_update_cc_op(s);
2505 gen_jmp_im(cur_eip);
2506 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2507 gen_helper_load_seg(cpu_env, tcg_const_i32(seg_reg), cpu_tmp2_i32);
2508 /* abort translation because the addseg value may change or
2509 because ss32 may change. For R_SS, translation must always
2510 stop as a special handling must be done to disable hardware
2511 interrupts for the next instruction */
2512 if (seg_reg == R_SS || (s->code32 && seg_reg < R_FS))
2513 s->is_jmp = DISAS_TB_JUMP;
2514 } else {
2515 gen_op_movl_seg_T0_vm(seg_reg);
2516 if (seg_reg == R_SS)
2517 s->is_jmp = DISAS_TB_JUMP;
2521 static inline int svm_is_rep(int prefixes)
2523 return ((prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) ? 8 : 0);
2526 static inline void
2527 gen_svm_check_intercept_param(DisasContext *s, target_ulong pc_start,
2528 uint32_t type, uint64_t param)
2530 /* no SVM activated; fast case */
2531 if (likely(!(s->flags & HF_SVMI_MASK)))
2532 return;
2533 gen_update_cc_op(s);
2534 gen_jmp_im(pc_start - s->cs_base);
2535 gen_helper_svm_check_intercept_param(cpu_env, tcg_const_i32(type),
2536 tcg_const_i64(param));
2539 static inline void
2540 gen_svm_check_intercept(DisasContext *s, target_ulong pc_start, uint64_t type)
2542 gen_svm_check_intercept_param(s, pc_start, type, 0);
2545 static inline void gen_stack_update(DisasContext *s, int addend)
2547 #ifdef TARGET_X86_64
2548 if (CODE64(s)) {
2549 gen_op_add_reg_im(2, R_ESP, addend);
2550 } else
2551 #endif
2552 if (s->ss32) {
2553 gen_op_add_reg_im(1, R_ESP, addend);
2554 } else {
2555 gen_op_add_reg_im(0, R_ESP, addend);
2559 /* generate a push. It depends on ss32, addseg and dflag */
2560 static void gen_push_T0(DisasContext *s)
2562 #ifdef TARGET_X86_64
2563 if (CODE64(s)) {
2564 gen_op_movq_A0_reg(R_ESP);
2565 if (s->dflag) {
2566 gen_op_addq_A0_im(-8);
2567 gen_op_st_T0_A0(OT_QUAD + s->mem_index);
2568 } else {
2569 gen_op_addq_A0_im(-2);
2570 gen_op_st_T0_A0(OT_WORD + s->mem_index);
2572 gen_op_mov_reg_A0(2, R_ESP);
2573 } else
2574 #endif
2576 gen_op_movl_A0_reg(R_ESP);
2577 if (!s->dflag)
2578 gen_op_addl_A0_im(-2);
2579 else
2580 gen_op_addl_A0_im(-4);
2581 if (s->ss32) {
2582 if (s->addseg) {
2583 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2584 gen_op_addl_A0_seg(s, R_SS);
2586 } else {
2587 gen_op_andl_A0_ffff();
2588 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2589 gen_op_addl_A0_seg(s, R_SS);
2591 gen_op_st_T0_A0(s->dflag + 1 + s->mem_index);
2592 if (s->ss32 && !s->addseg)
2593 gen_op_mov_reg_A0(1, R_ESP);
2594 else
2595 gen_op_mov_reg_T1(s->ss32 + 1, R_ESP);
2599 /* generate a push. It depends on ss32, addseg and dflag */
2600 /* slower version for T1, only used for call Ev */
2601 static void gen_push_T1(DisasContext *s)
2603 #ifdef TARGET_X86_64
2604 if (CODE64(s)) {
2605 gen_op_movq_A0_reg(R_ESP);
2606 if (s->dflag) {
2607 gen_op_addq_A0_im(-8);
2608 gen_op_st_T1_A0(OT_QUAD + s->mem_index);
2609 } else {
2610 gen_op_addq_A0_im(-2);
2611 gen_op_st_T0_A0(OT_WORD + s->mem_index);
2613 gen_op_mov_reg_A0(2, R_ESP);
2614 } else
2615 #endif
2617 gen_op_movl_A0_reg(R_ESP);
2618 if (!s->dflag)
2619 gen_op_addl_A0_im(-2);
2620 else
2621 gen_op_addl_A0_im(-4);
2622 if (s->ss32) {
2623 if (s->addseg) {
2624 gen_op_addl_A0_seg(s, R_SS);
2626 } else {
2627 gen_op_andl_A0_ffff();
2628 gen_op_addl_A0_seg(s, R_SS);
2630 gen_op_st_T1_A0(s->dflag + 1 + s->mem_index);
2632 if (s->ss32 && !s->addseg)
2633 gen_op_mov_reg_A0(1, R_ESP);
2634 else
2635 gen_stack_update(s, (-2) << s->dflag);
2639 /* two step pop is necessary for precise exceptions */
2640 static void gen_pop_T0(DisasContext *s)
2642 #ifdef TARGET_X86_64
2643 if (CODE64(s)) {
2644 gen_op_movq_A0_reg(R_ESP);
2645 gen_op_ld_T0_A0((s->dflag ? OT_QUAD : OT_WORD) + s->mem_index);
2646 } else
2647 #endif
2649 gen_op_movl_A0_reg(R_ESP);
2650 if (s->ss32) {
2651 if (s->addseg)
2652 gen_op_addl_A0_seg(s, R_SS);
2653 } else {
2654 gen_op_andl_A0_ffff();
2655 gen_op_addl_A0_seg(s, R_SS);
2657 gen_op_ld_T0_A0(s->dflag + 1 + s->mem_index);
2661 static void gen_pop_update(DisasContext *s)
2663 #ifdef TARGET_X86_64
2664 if (CODE64(s) && s->dflag) {
2665 gen_stack_update(s, 8);
2666 } else
2667 #endif
2669 gen_stack_update(s, 2 << s->dflag);
2673 static void gen_stack_A0(DisasContext *s)
2675 gen_op_movl_A0_reg(R_ESP);
2676 if (!s->ss32)
2677 gen_op_andl_A0_ffff();
2678 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2679 if (s->addseg)
2680 gen_op_addl_A0_seg(s, R_SS);
2683 /* NOTE: wrap around in 16 bit not fully handled */
2684 static void gen_pusha(DisasContext *s)
2686 int i;
2687 gen_op_movl_A0_reg(R_ESP);
2688 gen_op_addl_A0_im(-16 << s->dflag);
2689 if (!s->ss32)
2690 gen_op_andl_A0_ffff();
2691 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2692 if (s->addseg)
2693 gen_op_addl_A0_seg(s, R_SS);
2694 for(i = 0;i < 8; i++) {
2695 gen_op_mov_TN_reg(OT_LONG, 0, 7 - i);
2696 gen_op_st_T0_A0(OT_WORD + s->dflag + s->mem_index);
2697 gen_op_addl_A0_im(2 << s->dflag);
2699 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2702 /* NOTE: wrap around in 16 bit not fully handled */
2703 static void gen_popa(DisasContext *s)
2705 int i;
2706 gen_op_movl_A0_reg(R_ESP);
2707 if (!s->ss32)
2708 gen_op_andl_A0_ffff();
2709 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2710 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], 16 << s->dflag);
2711 if (s->addseg)
2712 gen_op_addl_A0_seg(s, R_SS);
2713 for(i = 0;i < 8; i++) {
2714 /* ESP is not reloaded */
2715 if (i != 3) {
2716 gen_op_ld_T0_A0(OT_WORD + s->dflag + s->mem_index);
2717 gen_op_mov_reg_T0(OT_WORD + s->dflag, 7 - i);
2719 gen_op_addl_A0_im(2 << s->dflag);
2721 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2724 static void gen_enter(DisasContext *s, int esp_addend, int level)
2726 int ot, opsize;
2728 level &= 0x1f;
2729 #ifdef TARGET_X86_64
2730 if (CODE64(s)) {
2731 ot = s->dflag ? OT_QUAD : OT_WORD;
2732 opsize = 1 << ot;
2734 gen_op_movl_A0_reg(R_ESP);
2735 gen_op_addq_A0_im(-opsize);
2736 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2738 /* push bp */
2739 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
2740 gen_op_st_T0_A0(ot + s->mem_index);
2741 if (level) {
2742 /* XXX: must save state */
2743 gen_helper_enter64_level(cpu_env, tcg_const_i32(level),
2744 tcg_const_i32((ot == OT_QUAD)),
2745 cpu_T[1]);
2747 gen_op_mov_reg_T1(ot, R_EBP);
2748 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
2749 gen_op_mov_reg_T1(OT_QUAD, R_ESP);
2750 } else
2751 #endif
2753 ot = s->dflag + OT_WORD;
2754 opsize = 2 << s->dflag;
2756 gen_op_movl_A0_reg(R_ESP);
2757 gen_op_addl_A0_im(-opsize);
2758 if (!s->ss32)
2759 gen_op_andl_A0_ffff();
2760 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2761 if (s->addseg)
2762 gen_op_addl_A0_seg(s, R_SS);
2763 /* push bp */
2764 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
2765 gen_op_st_T0_A0(ot + s->mem_index);
2766 if (level) {
2767 /* XXX: must save state */
2768 gen_helper_enter_level(cpu_env, tcg_const_i32(level),
2769 tcg_const_i32(s->dflag),
2770 cpu_T[1]);
2772 gen_op_mov_reg_T1(ot, R_EBP);
2773 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
2774 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2778 static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip)
2780 gen_update_cc_op(s);
2781 gen_jmp_im(cur_eip);
2782 gen_helper_raise_exception(cpu_env, tcg_const_i32(trapno));
2783 s->is_jmp = DISAS_TB_JUMP;
2786 /* an interrupt is different from an exception because of the
2787 privilege checks */
2788 static void gen_interrupt(DisasContext *s, int intno,
2789 target_ulong cur_eip, target_ulong next_eip)
2791 gen_update_cc_op(s);
2792 gen_jmp_im(cur_eip);
2793 gen_helper_raise_interrupt(cpu_env, tcg_const_i32(intno),
2794 tcg_const_i32(next_eip - cur_eip));
2795 s->is_jmp = DISAS_TB_JUMP;
2798 static void gen_debug(DisasContext *s, target_ulong cur_eip)
2800 gen_update_cc_op(s);
2801 gen_jmp_im(cur_eip);
2802 gen_helper_debug(cpu_env);
2803 s->is_jmp = DISAS_TB_JUMP;
2806 /* generate a generic end of block. Trace exception is also generated
2807 if needed */
2808 static void gen_eob(DisasContext *s)
2810 gen_update_cc_op(s);
2811 if (s->tb->flags & HF_INHIBIT_IRQ_MASK) {
2812 gen_helper_reset_inhibit_irq(cpu_env);
2814 if (s->tb->flags & HF_RF_MASK) {
2815 gen_helper_reset_rf(cpu_env);
2817 if (s->singlestep_enabled) {
2818 gen_helper_debug(cpu_env);
2819 } else if (s->tf) {
2820 gen_helper_single_step(cpu_env);
2821 } else {
2822 tcg_gen_exit_tb(0);
2824 s->is_jmp = DISAS_TB_JUMP;
2827 /* generate a jump to eip. No segment change must happen before as a
2828 direct call to the next block may occur */
2829 static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num)
2831 gen_update_cc_op(s);
2832 set_cc_op(s, CC_OP_DYNAMIC);
2833 if (s->jmp_opt) {
2834 gen_goto_tb(s, tb_num, eip);
2835 s->is_jmp = DISAS_TB_JUMP;
2836 } else {
2837 gen_jmp_im(eip);
2838 gen_eob(s);
2842 static void gen_jmp(DisasContext *s, target_ulong eip)
2844 gen_jmp_tb(s, eip, 0);
2847 static inline void gen_ldq_env_A0(int idx, int offset)
2849 int mem_index = (idx >> 2) - 1;
2850 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, mem_index);
2851 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset);
2854 static inline void gen_stq_env_A0(int idx, int offset)
2856 int mem_index = (idx >> 2) - 1;
2857 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset);
2858 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, mem_index);
2861 static inline void gen_ldo_env_A0(int idx, int offset)
2863 int mem_index = (idx >> 2) - 1;
2864 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, mem_index);
2865 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
2866 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2867 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_tmp0, mem_index);
2868 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
2871 static inline void gen_sto_env_A0(int idx, int offset)
2873 int mem_index = (idx >> 2) - 1;
2874 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
2875 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, mem_index);
2876 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2877 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
2878 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_tmp0, mem_index);
2881 static inline void gen_op_movo(int d_offset, int s_offset)
2883 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2884 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2885 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + 8);
2886 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + 8);
2889 static inline void gen_op_movq(int d_offset, int s_offset)
2891 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2892 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2895 static inline void gen_op_movl(int d_offset, int s_offset)
2897 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env, s_offset);
2898 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, d_offset);
2901 static inline void gen_op_movq_env_0(int d_offset)
2903 tcg_gen_movi_i64(cpu_tmp1_i64, 0);
2904 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2907 typedef void (*SSEFunc_i_ep)(TCGv_i32 val, TCGv_ptr env, TCGv_ptr reg);
2908 typedef void (*SSEFunc_l_ep)(TCGv_i64 val, TCGv_ptr env, TCGv_ptr reg);
2909 typedef void (*SSEFunc_0_epi)(TCGv_ptr env, TCGv_ptr reg, TCGv_i32 val);
2910 typedef void (*SSEFunc_0_epl)(TCGv_ptr env, TCGv_ptr reg, TCGv_i64 val);
2911 typedef void (*SSEFunc_0_epp)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b);
2912 typedef void (*SSEFunc_0_eppi)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
2913 TCGv_i32 val);
2914 typedef void (*SSEFunc_0_ppi)(TCGv_ptr reg_a, TCGv_ptr reg_b, TCGv_i32 val);
2915 typedef void (*SSEFunc_0_eppt)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
2916 TCGv val);
2918 #define SSE_SPECIAL ((void *)1)
2919 #define SSE_DUMMY ((void *)2)
2921 #define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
2922 #define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
2923 gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
2925 static const SSEFunc_0_epp sse_op_table1[256][4] = {
2926 /* 3DNow! extensions */
2927 [0x0e] = { SSE_DUMMY }, /* femms */
2928 [0x0f] = { SSE_DUMMY }, /* pf... */
2929 /* pure SSE operations */
2930 [0x10] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2931 [0x11] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2932 [0x12] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd, movsldup, movddup */
2933 [0x13] = { SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd */
2934 [0x14] = { gen_helper_punpckldq_xmm, gen_helper_punpcklqdq_xmm },
2935 [0x15] = { gen_helper_punpckhdq_xmm, gen_helper_punpckhqdq_xmm },
2936 [0x16] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd, movshdup */
2937 [0x17] = { SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd */
2939 [0x28] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2940 [0x29] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2941 [0x2a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
2942 [0x2b] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movntps, movntpd, movntss, movntsd */
2943 [0x2c] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
2944 [0x2d] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
2945 [0x2e] = { gen_helper_ucomiss, gen_helper_ucomisd },
2946 [0x2f] = { gen_helper_comiss, gen_helper_comisd },
2947 [0x50] = { SSE_SPECIAL, SSE_SPECIAL }, /* movmskps, movmskpd */
2948 [0x51] = SSE_FOP(sqrt),
2949 [0x52] = { gen_helper_rsqrtps, NULL, gen_helper_rsqrtss, NULL },
2950 [0x53] = { gen_helper_rcpps, NULL, gen_helper_rcpss, NULL },
2951 [0x54] = { gen_helper_pand_xmm, gen_helper_pand_xmm }, /* andps, andpd */
2952 [0x55] = { gen_helper_pandn_xmm, gen_helper_pandn_xmm }, /* andnps, andnpd */
2953 [0x56] = { gen_helper_por_xmm, gen_helper_por_xmm }, /* orps, orpd */
2954 [0x57] = { gen_helper_pxor_xmm, gen_helper_pxor_xmm }, /* xorps, xorpd */
2955 [0x58] = SSE_FOP(add),
2956 [0x59] = SSE_FOP(mul),
2957 [0x5a] = { gen_helper_cvtps2pd, gen_helper_cvtpd2ps,
2958 gen_helper_cvtss2sd, gen_helper_cvtsd2ss },
2959 [0x5b] = { gen_helper_cvtdq2ps, gen_helper_cvtps2dq, gen_helper_cvttps2dq },
2960 [0x5c] = SSE_FOP(sub),
2961 [0x5d] = SSE_FOP(min),
2962 [0x5e] = SSE_FOP(div),
2963 [0x5f] = SSE_FOP(max),
2965 [0xc2] = SSE_FOP(cmpeq),
2966 [0xc6] = { (SSEFunc_0_epp)gen_helper_shufps,
2967 (SSEFunc_0_epp)gen_helper_shufpd }, /* XXX: casts */
2969 /* SSSE3, SSE4, MOVBE, CRC32, BMI1, BMI2, ADX. */
2970 [0x38] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2971 [0x3a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2973 /* MMX ops and their SSE extensions */
2974 [0x60] = MMX_OP2(punpcklbw),
2975 [0x61] = MMX_OP2(punpcklwd),
2976 [0x62] = MMX_OP2(punpckldq),
2977 [0x63] = MMX_OP2(packsswb),
2978 [0x64] = MMX_OP2(pcmpgtb),
2979 [0x65] = MMX_OP2(pcmpgtw),
2980 [0x66] = MMX_OP2(pcmpgtl),
2981 [0x67] = MMX_OP2(packuswb),
2982 [0x68] = MMX_OP2(punpckhbw),
2983 [0x69] = MMX_OP2(punpckhwd),
2984 [0x6a] = MMX_OP2(punpckhdq),
2985 [0x6b] = MMX_OP2(packssdw),
2986 [0x6c] = { NULL, gen_helper_punpcklqdq_xmm },
2987 [0x6d] = { NULL, gen_helper_punpckhqdq_xmm },
2988 [0x6e] = { SSE_SPECIAL, SSE_SPECIAL }, /* movd mm, ea */
2989 [0x6f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, , movqdu */
2990 [0x70] = { (SSEFunc_0_epp)gen_helper_pshufw_mmx,
2991 (SSEFunc_0_epp)gen_helper_pshufd_xmm,
2992 (SSEFunc_0_epp)gen_helper_pshufhw_xmm,
2993 (SSEFunc_0_epp)gen_helper_pshuflw_xmm }, /* XXX: casts */
2994 [0x71] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftw */
2995 [0x72] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftd */
2996 [0x73] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftq */
2997 [0x74] = MMX_OP2(pcmpeqb),
2998 [0x75] = MMX_OP2(pcmpeqw),
2999 [0x76] = MMX_OP2(pcmpeql),
3000 [0x77] = { SSE_DUMMY }, /* emms */
3001 [0x78] = { NULL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* extrq_i, insertq_i */
3002 [0x79] = { NULL, gen_helper_extrq_r, NULL, gen_helper_insertq_r },
3003 [0x7c] = { NULL, gen_helper_haddpd, NULL, gen_helper_haddps },
3004 [0x7d] = { NULL, gen_helper_hsubpd, NULL, gen_helper_hsubps },
3005 [0x7e] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movd, movd, , movq */
3006 [0x7f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, movdqu */
3007 [0xc4] = { SSE_SPECIAL, SSE_SPECIAL }, /* pinsrw */
3008 [0xc5] = { SSE_SPECIAL, SSE_SPECIAL }, /* pextrw */
3009 [0xd0] = { NULL, gen_helper_addsubpd, NULL, gen_helper_addsubps },
3010 [0xd1] = MMX_OP2(psrlw),
3011 [0xd2] = MMX_OP2(psrld),
3012 [0xd3] = MMX_OP2(psrlq),
3013 [0xd4] = MMX_OP2(paddq),
3014 [0xd5] = MMX_OP2(pmullw),
3015 [0xd6] = { NULL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
3016 [0xd7] = { SSE_SPECIAL, SSE_SPECIAL }, /* pmovmskb */
3017 [0xd8] = MMX_OP2(psubusb),
3018 [0xd9] = MMX_OP2(psubusw),
3019 [0xda] = MMX_OP2(pminub),
3020 [0xdb] = MMX_OP2(pand),
3021 [0xdc] = MMX_OP2(paddusb),
3022 [0xdd] = MMX_OP2(paddusw),
3023 [0xde] = MMX_OP2(pmaxub),
3024 [0xdf] = MMX_OP2(pandn),
3025 [0xe0] = MMX_OP2(pavgb),
3026 [0xe1] = MMX_OP2(psraw),
3027 [0xe2] = MMX_OP2(psrad),
3028 [0xe3] = MMX_OP2(pavgw),
3029 [0xe4] = MMX_OP2(pmulhuw),
3030 [0xe5] = MMX_OP2(pmulhw),
3031 [0xe6] = { NULL, gen_helper_cvttpd2dq, gen_helper_cvtdq2pd, gen_helper_cvtpd2dq },
3032 [0xe7] = { SSE_SPECIAL , SSE_SPECIAL }, /* movntq, movntq */
3033 [0xe8] = MMX_OP2(psubsb),
3034 [0xe9] = MMX_OP2(psubsw),
3035 [0xea] = MMX_OP2(pminsw),
3036 [0xeb] = MMX_OP2(por),
3037 [0xec] = MMX_OP2(paddsb),
3038 [0xed] = MMX_OP2(paddsw),
3039 [0xee] = MMX_OP2(pmaxsw),
3040 [0xef] = MMX_OP2(pxor),
3041 [0xf0] = { NULL, NULL, NULL, SSE_SPECIAL }, /* lddqu */
3042 [0xf1] = MMX_OP2(psllw),
3043 [0xf2] = MMX_OP2(pslld),
3044 [0xf3] = MMX_OP2(psllq),
3045 [0xf4] = MMX_OP2(pmuludq),
3046 [0xf5] = MMX_OP2(pmaddwd),
3047 [0xf6] = MMX_OP2(psadbw),
3048 [0xf7] = { (SSEFunc_0_epp)gen_helper_maskmov_mmx,
3049 (SSEFunc_0_epp)gen_helper_maskmov_xmm }, /* XXX: casts */
3050 [0xf8] = MMX_OP2(psubb),
3051 [0xf9] = MMX_OP2(psubw),
3052 [0xfa] = MMX_OP2(psubl),
3053 [0xfb] = MMX_OP2(psubq),
3054 [0xfc] = MMX_OP2(paddb),
3055 [0xfd] = MMX_OP2(paddw),
3056 [0xfe] = MMX_OP2(paddl),
3059 static const SSEFunc_0_epp sse_op_table2[3 * 8][2] = {
3060 [0 + 2] = MMX_OP2(psrlw),
3061 [0 + 4] = MMX_OP2(psraw),
3062 [0 + 6] = MMX_OP2(psllw),
3063 [8 + 2] = MMX_OP2(psrld),
3064 [8 + 4] = MMX_OP2(psrad),
3065 [8 + 6] = MMX_OP2(pslld),
3066 [16 + 2] = MMX_OP2(psrlq),
3067 [16 + 3] = { NULL, gen_helper_psrldq_xmm },
3068 [16 + 6] = MMX_OP2(psllq),
3069 [16 + 7] = { NULL, gen_helper_pslldq_xmm },
3072 static const SSEFunc_0_epi sse_op_table3ai[] = {
3073 gen_helper_cvtsi2ss,
3074 gen_helper_cvtsi2sd
3077 #ifdef TARGET_X86_64
3078 static const SSEFunc_0_epl sse_op_table3aq[] = {
3079 gen_helper_cvtsq2ss,
3080 gen_helper_cvtsq2sd
3082 #endif
3084 static const SSEFunc_i_ep sse_op_table3bi[] = {
3085 gen_helper_cvttss2si,
3086 gen_helper_cvtss2si,
3087 gen_helper_cvttsd2si,
3088 gen_helper_cvtsd2si
3091 #ifdef TARGET_X86_64
3092 static const SSEFunc_l_ep sse_op_table3bq[] = {
3093 gen_helper_cvttss2sq,
3094 gen_helper_cvtss2sq,
3095 gen_helper_cvttsd2sq,
3096 gen_helper_cvtsd2sq
3098 #endif
3100 static const SSEFunc_0_epp sse_op_table4[8][4] = {
3101 SSE_FOP(cmpeq),
3102 SSE_FOP(cmplt),
3103 SSE_FOP(cmple),
3104 SSE_FOP(cmpunord),
3105 SSE_FOP(cmpneq),
3106 SSE_FOP(cmpnlt),
3107 SSE_FOP(cmpnle),
3108 SSE_FOP(cmpord),
3111 static const SSEFunc_0_epp sse_op_table5[256] = {
3112 [0x0c] = gen_helper_pi2fw,
3113 [0x0d] = gen_helper_pi2fd,
3114 [0x1c] = gen_helper_pf2iw,
3115 [0x1d] = gen_helper_pf2id,
3116 [0x8a] = gen_helper_pfnacc,
3117 [0x8e] = gen_helper_pfpnacc,
3118 [0x90] = gen_helper_pfcmpge,
3119 [0x94] = gen_helper_pfmin,
3120 [0x96] = gen_helper_pfrcp,
3121 [0x97] = gen_helper_pfrsqrt,
3122 [0x9a] = gen_helper_pfsub,
3123 [0x9e] = gen_helper_pfadd,
3124 [0xa0] = gen_helper_pfcmpgt,
3125 [0xa4] = gen_helper_pfmax,
3126 [0xa6] = gen_helper_movq, /* pfrcpit1; no need to actually increase precision */
3127 [0xa7] = gen_helper_movq, /* pfrsqit1 */
3128 [0xaa] = gen_helper_pfsubr,
3129 [0xae] = gen_helper_pfacc,
3130 [0xb0] = gen_helper_pfcmpeq,
3131 [0xb4] = gen_helper_pfmul,
3132 [0xb6] = gen_helper_movq, /* pfrcpit2 */
3133 [0xb7] = gen_helper_pmulhrw_mmx,
3134 [0xbb] = gen_helper_pswapd,
3135 [0xbf] = gen_helper_pavgb_mmx /* pavgusb */
3138 struct SSEOpHelper_epp {
3139 SSEFunc_0_epp op[2];
3140 uint32_t ext_mask;
3143 struct SSEOpHelper_eppi {
3144 SSEFunc_0_eppi op[2];
3145 uint32_t ext_mask;
3148 #define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
3149 #define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
3150 #define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
3151 #define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
3152 #define PCLMULQDQ_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, \
3153 CPUID_EXT_PCLMULQDQ }
3154 #define AESNI_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_AES }
3156 static const struct SSEOpHelper_epp sse_op_table6[256] = {
3157 [0x00] = SSSE3_OP(pshufb),
3158 [0x01] = SSSE3_OP(phaddw),
3159 [0x02] = SSSE3_OP(phaddd),
3160 [0x03] = SSSE3_OP(phaddsw),
3161 [0x04] = SSSE3_OP(pmaddubsw),
3162 [0x05] = SSSE3_OP(phsubw),
3163 [0x06] = SSSE3_OP(phsubd),
3164 [0x07] = SSSE3_OP(phsubsw),
3165 [0x08] = SSSE3_OP(psignb),
3166 [0x09] = SSSE3_OP(psignw),
3167 [0x0a] = SSSE3_OP(psignd),
3168 [0x0b] = SSSE3_OP(pmulhrsw),
3169 [0x10] = SSE41_OP(pblendvb),
3170 [0x14] = SSE41_OP(blendvps),
3171 [0x15] = SSE41_OP(blendvpd),
3172 [0x17] = SSE41_OP(ptest),
3173 [0x1c] = SSSE3_OP(pabsb),
3174 [0x1d] = SSSE3_OP(pabsw),
3175 [0x1e] = SSSE3_OP(pabsd),
3176 [0x20] = SSE41_OP(pmovsxbw),
3177 [0x21] = SSE41_OP(pmovsxbd),
3178 [0x22] = SSE41_OP(pmovsxbq),
3179 [0x23] = SSE41_OP(pmovsxwd),
3180 [0x24] = SSE41_OP(pmovsxwq),
3181 [0x25] = SSE41_OP(pmovsxdq),
3182 [0x28] = SSE41_OP(pmuldq),
3183 [0x29] = SSE41_OP(pcmpeqq),
3184 [0x2a] = SSE41_SPECIAL, /* movntqda */
3185 [0x2b] = SSE41_OP(packusdw),
3186 [0x30] = SSE41_OP(pmovzxbw),
3187 [0x31] = SSE41_OP(pmovzxbd),
3188 [0x32] = SSE41_OP(pmovzxbq),
3189 [0x33] = SSE41_OP(pmovzxwd),
3190 [0x34] = SSE41_OP(pmovzxwq),
3191 [0x35] = SSE41_OP(pmovzxdq),
3192 [0x37] = SSE42_OP(pcmpgtq),
3193 [0x38] = SSE41_OP(pminsb),
3194 [0x39] = SSE41_OP(pminsd),
3195 [0x3a] = SSE41_OP(pminuw),
3196 [0x3b] = SSE41_OP(pminud),
3197 [0x3c] = SSE41_OP(pmaxsb),
3198 [0x3d] = SSE41_OP(pmaxsd),
3199 [0x3e] = SSE41_OP(pmaxuw),
3200 [0x3f] = SSE41_OP(pmaxud),
3201 [0x40] = SSE41_OP(pmulld),
3202 [0x41] = SSE41_OP(phminposuw),
3203 [0xdb] = AESNI_OP(aesimc),
3204 [0xdc] = AESNI_OP(aesenc),
3205 [0xdd] = AESNI_OP(aesenclast),
3206 [0xde] = AESNI_OP(aesdec),
3207 [0xdf] = AESNI_OP(aesdeclast),
3210 static const struct SSEOpHelper_eppi sse_op_table7[256] = {
3211 [0x08] = SSE41_OP(roundps),
3212 [0x09] = SSE41_OP(roundpd),
3213 [0x0a] = SSE41_OP(roundss),
3214 [0x0b] = SSE41_OP(roundsd),
3215 [0x0c] = SSE41_OP(blendps),
3216 [0x0d] = SSE41_OP(blendpd),
3217 [0x0e] = SSE41_OP(pblendw),
3218 [0x0f] = SSSE3_OP(palignr),
3219 [0x14] = SSE41_SPECIAL, /* pextrb */
3220 [0x15] = SSE41_SPECIAL, /* pextrw */
3221 [0x16] = SSE41_SPECIAL, /* pextrd/pextrq */
3222 [0x17] = SSE41_SPECIAL, /* extractps */
3223 [0x20] = SSE41_SPECIAL, /* pinsrb */
3224 [0x21] = SSE41_SPECIAL, /* insertps */
3225 [0x22] = SSE41_SPECIAL, /* pinsrd/pinsrq */
3226 [0x40] = SSE41_OP(dpps),
3227 [0x41] = SSE41_OP(dppd),
3228 [0x42] = SSE41_OP(mpsadbw),
3229 [0x44] = PCLMULQDQ_OP(pclmulqdq),
3230 [0x60] = SSE42_OP(pcmpestrm),
3231 [0x61] = SSE42_OP(pcmpestri),
3232 [0x62] = SSE42_OP(pcmpistrm),
3233 [0x63] = SSE42_OP(pcmpistri),
3234 [0xdf] = AESNI_OP(aeskeygenassist),
3237 static void gen_sse(CPUX86State *env, DisasContext *s, int b,
3238 target_ulong pc_start, int rex_r)
3240 int b1, op1_offset, op2_offset, is_xmm, val, ot;
3241 int modrm, mod, rm, reg, reg_addr, offset_addr;
3242 SSEFunc_0_epp sse_fn_epp;
3243 SSEFunc_0_eppi sse_fn_eppi;
3244 SSEFunc_0_ppi sse_fn_ppi;
3245 SSEFunc_0_eppt sse_fn_eppt;
3247 b &= 0xff;
3248 if (s->prefix & PREFIX_DATA)
3249 b1 = 1;
3250 else if (s->prefix & PREFIX_REPZ)
3251 b1 = 2;
3252 else if (s->prefix & PREFIX_REPNZ)
3253 b1 = 3;
3254 else
3255 b1 = 0;
3256 sse_fn_epp = sse_op_table1[b][b1];
3257 if (!sse_fn_epp) {
3258 goto illegal_op;
3260 if ((b <= 0x5f && b >= 0x10) || b == 0xc6 || b == 0xc2) {
3261 is_xmm = 1;
3262 } else {
3263 if (b1 == 0) {
3264 /* MMX case */
3265 is_xmm = 0;
3266 } else {
3267 is_xmm = 1;
3270 /* simple MMX/SSE operation */
3271 if (s->flags & HF_TS_MASK) {
3272 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
3273 return;
3275 if (s->flags & HF_EM_MASK) {
3276 illegal_op:
3277 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
3278 return;
3280 if (is_xmm && !(s->flags & HF_OSFXSR_MASK))
3281 if ((b != 0x38 && b != 0x3a) || (s->prefix & PREFIX_DATA))
3282 goto illegal_op;
3283 if (b == 0x0e) {
3284 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
3285 goto illegal_op;
3286 /* femms */
3287 gen_helper_emms(cpu_env);
3288 return;
3290 if (b == 0x77) {
3291 /* emms */
3292 gen_helper_emms(cpu_env);
3293 return;
3295 /* prepare MMX state (XXX: optimize by storing fptt and fptags in
3296 the static cpu state) */
3297 if (!is_xmm) {
3298 gen_helper_enter_mmx(cpu_env);
3301 modrm = cpu_ldub_code(env, s->pc++);
3302 reg = ((modrm >> 3) & 7);
3303 if (is_xmm)
3304 reg |= rex_r;
3305 mod = (modrm >> 6) & 3;
3306 if (sse_fn_epp == SSE_SPECIAL) {
3307 b |= (b1 << 8);
3308 switch(b) {
3309 case 0x0e7: /* movntq */
3310 if (mod == 3)
3311 goto illegal_op;
3312 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3313 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
3314 break;
3315 case 0x1e7: /* movntdq */
3316 case 0x02b: /* movntps */
3317 case 0x12b: /* movntps */
3318 if (mod == 3)
3319 goto illegal_op;
3320 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3321 gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3322 break;
3323 case 0x3f0: /* lddqu */
3324 if (mod == 3)
3325 goto illegal_op;
3326 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3327 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3328 break;
3329 case 0x22b: /* movntss */
3330 case 0x32b: /* movntsd */
3331 if (mod == 3)
3332 goto illegal_op;
3333 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3334 if (b1 & 1) {
3335 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,
3336 xmm_regs[reg]));
3337 } else {
3338 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3339 xmm_regs[reg].XMM_L(0)));
3340 gen_op_st_T0_A0(OT_LONG + s->mem_index);
3342 break;
3343 case 0x6e: /* movd mm, ea */
3344 #ifdef TARGET_X86_64
3345 if (s->dflag == 2) {
3346 gen_ldst_modrm(env, s, modrm, OT_QUAD, OR_TMP0, 0);
3347 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,fpregs[reg].mmx));
3348 } else
3349 #endif
3351 gen_ldst_modrm(env, s, modrm, OT_LONG, OR_TMP0, 0);
3352 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3353 offsetof(CPUX86State,fpregs[reg].mmx));
3354 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3355 gen_helper_movl_mm_T0_mmx(cpu_ptr0, cpu_tmp2_i32);
3357 break;
3358 case 0x16e: /* movd xmm, ea */
3359 #ifdef TARGET_X86_64
3360 if (s->dflag == 2) {
3361 gen_ldst_modrm(env, s, modrm, OT_QUAD, OR_TMP0, 0);
3362 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3363 offsetof(CPUX86State,xmm_regs[reg]));
3364 gen_helper_movq_mm_T0_xmm(cpu_ptr0, cpu_T[0]);
3365 } else
3366 #endif
3368 gen_ldst_modrm(env, s, modrm, OT_LONG, OR_TMP0, 0);
3369 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3370 offsetof(CPUX86State,xmm_regs[reg]));
3371 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3372 gen_helper_movl_mm_T0_xmm(cpu_ptr0, cpu_tmp2_i32);
3374 break;
3375 case 0x6f: /* movq mm, ea */
3376 if (mod != 3) {
3377 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3378 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
3379 } else {
3380 rm = (modrm & 7);
3381 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
3382 offsetof(CPUX86State,fpregs[rm].mmx));
3383 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
3384 offsetof(CPUX86State,fpregs[reg].mmx));
3386 break;
3387 case 0x010: /* movups */
3388 case 0x110: /* movupd */
3389 case 0x028: /* movaps */
3390 case 0x128: /* movapd */
3391 case 0x16f: /* movdqa xmm, ea */
3392 case 0x26f: /* movdqu xmm, ea */
3393 if (mod != 3) {
3394 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3395 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3396 } else {
3397 rm = (modrm & 7) | REX_B(s);
3398 gen_op_movo(offsetof(CPUX86State,xmm_regs[reg]),
3399 offsetof(CPUX86State,xmm_regs[rm]));
3401 break;
3402 case 0x210: /* movss xmm, ea */
3403 if (mod != 3) {
3404 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3405 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
3406 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3407 gen_op_movl_T0_0();
3408 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3409 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3410 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3411 } else {
3412 rm = (modrm & 7) | REX_B(s);
3413 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3414 offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3416 break;
3417 case 0x310: /* movsd xmm, ea */
3418 if (mod != 3) {
3419 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3420 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3421 gen_op_movl_T0_0();
3422 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3423 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3424 } else {
3425 rm = (modrm & 7) | REX_B(s);
3426 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3427 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3429 break;
3430 case 0x012: /* movlps */
3431 case 0x112: /* movlpd */
3432 if (mod != 3) {
3433 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3434 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3435 } else {
3436 /* movhlps */
3437 rm = (modrm & 7) | REX_B(s);
3438 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3439 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3441 break;
3442 case 0x212: /* movsldup */
3443 if (mod != 3) {
3444 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3445 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3446 } else {
3447 rm = (modrm & 7) | REX_B(s);
3448 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3449 offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3450 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3451 offsetof(CPUX86State,xmm_regs[rm].XMM_L(2)));
3453 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3454 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3455 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3456 offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3457 break;
3458 case 0x312: /* movddup */
3459 if (mod != 3) {
3460 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3461 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3462 } else {
3463 rm = (modrm & 7) | REX_B(s);
3464 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3465 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3467 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
3468 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3469 break;
3470 case 0x016: /* movhps */
3471 case 0x116: /* movhpd */
3472 if (mod != 3) {
3473 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3474 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3475 } else {
3476 /* movlhps */
3477 rm = (modrm & 7) | REX_B(s);
3478 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
3479 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3481 break;
3482 case 0x216: /* movshdup */
3483 if (mod != 3) {
3484 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3485 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3486 } else {
3487 rm = (modrm & 7) | REX_B(s);
3488 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3489 offsetof(CPUX86State,xmm_regs[rm].XMM_L(1)));
3490 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3491 offsetof(CPUX86State,xmm_regs[rm].XMM_L(3)));
3493 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3494 offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3495 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3496 offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3497 break;
3498 case 0x178:
3499 case 0x378:
3501 int bit_index, field_length;
3503 if (b1 == 1 && reg != 0)
3504 goto illegal_op;
3505 field_length = cpu_ldub_code(env, s->pc++) & 0x3F;
3506 bit_index = cpu_ldub_code(env, s->pc++) & 0x3F;
3507 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3508 offsetof(CPUX86State,xmm_regs[reg]));
3509 if (b1 == 1)
3510 gen_helper_extrq_i(cpu_env, cpu_ptr0,
3511 tcg_const_i32(bit_index),
3512 tcg_const_i32(field_length));
3513 else
3514 gen_helper_insertq_i(cpu_env, cpu_ptr0,
3515 tcg_const_i32(bit_index),
3516 tcg_const_i32(field_length));
3518 break;
3519 case 0x7e: /* movd ea, mm */
3520 #ifdef TARGET_X86_64
3521 if (s->dflag == 2) {
3522 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3523 offsetof(CPUX86State,fpregs[reg].mmx));
3524 gen_ldst_modrm(env, s, modrm, OT_QUAD, OR_TMP0, 1);
3525 } else
3526 #endif
3528 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3529 offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0)));
3530 gen_ldst_modrm(env, s, modrm, OT_LONG, OR_TMP0, 1);
3532 break;
3533 case 0x17e: /* movd ea, xmm */
3534 #ifdef TARGET_X86_64
3535 if (s->dflag == 2) {
3536 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3537 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3538 gen_ldst_modrm(env, s, modrm, OT_QUAD, OR_TMP0, 1);
3539 } else
3540 #endif
3542 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3543 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3544 gen_ldst_modrm(env, s, modrm, OT_LONG, OR_TMP0, 1);
3546 break;
3547 case 0x27e: /* movq xmm, ea */
3548 if (mod != 3) {
3549 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3550 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3551 } else {
3552 rm = (modrm & 7) | REX_B(s);
3553 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3554 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3556 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3557 break;
3558 case 0x7f: /* movq ea, mm */
3559 if (mod != 3) {
3560 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3561 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
3562 } else {
3563 rm = (modrm & 7);
3564 gen_op_movq(offsetof(CPUX86State,fpregs[rm].mmx),
3565 offsetof(CPUX86State,fpregs[reg].mmx));
3567 break;
3568 case 0x011: /* movups */
3569 case 0x111: /* movupd */
3570 case 0x029: /* movaps */
3571 case 0x129: /* movapd */
3572 case 0x17f: /* movdqa ea, xmm */
3573 case 0x27f: /* movdqu ea, xmm */
3574 if (mod != 3) {
3575 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3576 gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3577 } else {
3578 rm = (modrm & 7) | REX_B(s);
3579 gen_op_movo(offsetof(CPUX86State,xmm_regs[rm]),
3580 offsetof(CPUX86State,xmm_regs[reg]));
3582 break;
3583 case 0x211: /* movss ea, xmm */
3584 if (mod != 3) {
3585 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3586 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3587 gen_op_st_T0_A0(OT_LONG + s->mem_index);
3588 } else {
3589 rm = (modrm & 7) | REX_B(s);
3590 gen_op_movl(offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)),
3591 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3593 break;
3594 case 0x311: /* movsd ea, xmm */
3595 if (mod != 3) {
3596 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3597 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3598 } else {
3599 rm = (modrm & 7) | REX_B(s);
3600 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3601 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3603 break;
3604 case 0x013: /* movlps */
3605 case 0x113: /* movlpd */
3606 if (mod != 3) {
3607 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3608 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3609 } else {
3610 goto illegal_op;
3612 break;
3613 case 0x017: /* movhps */
3614 case 0x117: /* movhpd */
3615 if (mod != 3) {
3616 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3617 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3618 } else {
3619 goto illegal_op;
3621 break;
3622 case 0x71: /* shift mm, im */
3623 case 0x72:
3624 case 0x73:
3625 case 0x171: /* shift xmm, im */
3626 case 0x172:
3627 case 0x173:
3628 if (b1 >= 2) {
3629 goto illegal_op;
3631 val = cpu_ldub_code(env, s->pc++);
3632 if (is_xmm) {
3633 gen_op_movl_T0_im(val);
3634 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
3635 gen_op_movl_T0_0();
3636 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(1)));
3637 op1_offset = offsetof(CPUX86State,xmm_t0);
3638 } else {
3639 gen_op_movl_T0_im(val);
3640 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(0)));
3641 gen_op_movl_T0_0();
3642 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(1)));
3643 op1_offset = offsetof(CPUX86State,mmx_t0);
3645 sse_fn_epp = sse_op_table2[((b - 1) & 3) * 8 +
3646 (((modrm >> 3)) & 7)][b1];
3647 if (!sse_fn_epp) {
3648 goto illegal_op;
3650 if (is_xmm) {
3651 rm = (modrm & 7) | REX_B(s);
3652 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3653 } else {
3654 rm = (modrm & 7);
3655 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3657 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3658 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op1_offset);
3659 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
3660 break;
3661 case 0x050: /* movmskps */
3662 rm = (modrm & 7) | REX_B(s);
3663 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3664 offsetof(CPUX86State,xmm_regs[rm]));
3665 gen_helper_movmskps(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3666 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3667 gen_op_mov_reg_T0(OT_LONG, reg);
3668 break;
3669 case 0x150: /* movmskpd */
3670 rm = (modrm & 7) | REX_B(s);
3671 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3672 offsetof(CPUX86State,xmm_regs[rm]));
3673 gen_helper_movmskpd(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3674 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3675 gen_op_mov_reg_T0(OT_LONG, reg);
3676 break;
3677 case 0x02a: /* cvtpi2ps */
3678 case 0x12a: /* cvtpi2pd */
3679 gen_helper_enter_mmx(cpu_env);
3680 if (mod != 3) {
3681 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3682 op2_offset = offsetof(CPUX86State,mmx_t0);
3683 gen_ldq_env_A0(s->mem_index, op2_offset);
3684 } else {
3685 rm = (modrm & 7);
3686 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3688 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3689 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3690 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3691 switch(b >> 8) {
3692 case 0x0:
3693 gen_helper_cvtpi2ps(cpu_env, cpu_ptr0, cpu_ptr1);
3694 break;
3695 default:
3696 case 0x1:
3697 gen_helper_cvtpi2pd(cpu_env, cpu_ptr0, cpu_ptr1);
3698 break;
3700 break;
3701 case 0x22a: /* cvtsi2ss */
3702 case 0x32a: /* cvtsi2sd */
3703 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3704 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3705 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3706 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3707 if (ot == OT_LONG) {
3708 SSEFunc_0_epi sse_fn_epi = sse_op_table3ai[(b >> 8) & 1];
3709 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3710 sse_fn_epi(cpu_env, cpu_ptr0, cpu_tmp2_i32);
3711 } else {
3712 #ifdef TARGET_X86_64
3713 SSEFunc_0_epl sse_fn_epl = sse_op_table3aq[(b >> 8) & 1];
3714 sse_fn_epl(cpu_env, cpu_ptr0, cpu_T[0]);
3715 #else
3716 goto illegal_op;
3717 #endif
3719 break;
3720 case 0x02c: /* cvttps2pi */
3721 case 0x12c: /* cvttpd2pi */
3722 case 0x02d: /* cvtps2pi */
3723 case 0x12d: /* cvtpd2pi */
3724 gen_helper_enter_mmx(cpu_env);
3725 if (mod != 3) {
3726 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3727 op2_offset = offsetof(CPUX86State,xmm_t0);
3728 gen_ldo_env_A0(s->mem_index, op2_offset);
3729 } else {
3730 rm = (modrm & 7) | REX_B(s);
3731 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3733 op1_offset = offsetof(CPUX86State,fpregs[reg & 7].mmx);
3734 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3735 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3736 switch(b) {
3737 case 0x02c:
3738 gen_helper_cvttps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3739 break;
3740 case 0x12c:
3741 gen_helper_cvttpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3742 break;
3743 case 0x02d:
3744 gen_helper_cvtps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3745 break;
3746 case 0x12d:
3747 gen_helper_cvtpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3748 break;
3750 break;
3751 case 0x22c: /* cvttss2si */
3752 case 0x32c: /* cvttsd2si */
3753 case 0x22d: /* cvtss2si */
3754 case 0x32d: /* cvtsd2si */
3755 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3756 if (mod != 3) {
3757 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3758 if ((b >> 8) & 1) {
3759 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_t0.XMM_Q(0)));
3760 } else {
3761 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
3762 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
3764 op2_offset = offsetof(CPUX86State,xmm_t0);
3765 } else {
3766 rm = (modrm & 7) | REX_B(s);
3767 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3769 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3770 if (ot == OT_LONG) {
3771 SSEFunc_i_ep sse_fn_i_ep =
3772 sse_op_table3bi[((b >> 7) & 2) | (b & 1)];
3773 sse_fn_i_ep(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3774 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3775 } else {
3776 #ifdef TARGET_X86_64
3777 SSEFunc_l_ep sse_fn_l_ep =
3778 sse_op_table3bq[((b >> 7) & 2) | (b & 1)];
3779 sse_fn_l_ep(cpu_T[0], cpu_env, cpu_ptr0);
3780 #else
3781 goto illegal_op;
3782 #endif
3784 gen_op_mov_reg_T0(ot, reg);
3785 break;
3786 case 0xc4: /* pinsrw */
3787 case 0x1c4:
3788 s->rip_offset = 1;
3789 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
3790 val = cpu_ldub_code(env, s->pc++);
3791 if (b1) {
3792 val &= 7;
3793 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3794 offsetof(CPUX86State,xmm_regs[reg].XMM_W(val)));
3795 } else {
3796 val &= 3;
3797 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3798 offsetof(CPUX86State,fpregs[reg].mmx.MMX_W(val)));
3800 break;
3801 case 0xc5: /* pextrw */
3802 case 0x1c5:
3803 if (mod != 3)
3804 goto illegal_op;
3805 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3806 val = cpu_ldub_code(env, s->pc++);
3807 if (b1) {
3808 val &= 7;
3809 rm = (modrm & 7) | REX_B(s);
3810 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3811 offsetof(CPUX86State,xmm_regs[rm].XMM_W(val)));
3812 } else {
3813 val &= 3;
3814 rm = (modrm & 7);
3815 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3816 offsetof(CPUX86State,fpregs[rm].mmx.MMX_W(val)));
3818 reg = ((modrm >> 3) & 7) | rex_r;
3819 gen_op_mov_reg_T0(ot, reg);
3820 break;
3821 case 0x1d6: /* movq ea, xmm */
3822 if (mod != 3) {
3823 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3824 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3825 } else {
3826 rm = (modrm & 7) | REX_B(s);
3827 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3828 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3829 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3831 break;
3832 case 0x2d6: /* movq2dq */
3833 gen_helper_enter_mmx(cpu_env);
3834 rm = (modrm & 7);
3835 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3836 offsetof(CPUX86State,fpregs[rm].mmx));
3837 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3838 break;
3839 case 0x3d6: /* movdq2q */
3840 gen_helper_enter_mmx(cpu_env);
3841 rm = (modrm & 7) | REX_B(s);
3842 gen_op_movq(offsetof(CPUX86State,fpregs[reg & 7].mmx),
3843 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3844 break;
3845 case 0xd7: /* pmovmskb */
3846 case 0x1d7:
3847 if (mod != 3)
3848 goto illegal_op;
3849 if (b1) {
3850 rm = (modrm & 7) | REX_B(s);
3851 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[rm]));
3852 gen_helper_pmovmskb_xmm(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3853 } else {
3854 rm = (modrm & 7);
3855 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,fpregs[rm].mmx));
3856 gen_helper_pmovmskb_mmx(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3858 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3859 reg = ((modrm >> 3) & 7) | rex_r;
3860 gen_op_mov_reg_T0(OT_LONG, reg);
3861 break;
3863 case 0x138:
3864 case 0x038:
3865 b = modrm;
3866 if ((b & 0xf0) == 0xf0) {
3867 goto do_0f_38_fx;
3869 modrm = cpu_ldub_code(env, s->pc++);
3870 rm = modrm & 7;
3871 reg = ((modrm >> 3) & 7) | rex_r;
3872 mod = (modrm >> 6) & 3;
3873 if (b1 >= 2) {
3874 goto illegal_op;
3877 sse_fn_epp = sse_op_table6[b].op[b1];
3878 if (!sse_fn_epp) {
3879 goto illegal_op;
3881 if (!(s->cpuid_ext_features & sse_op_table6[b].ext_mask))
3882 goto illegal_op;
3884 if (b1) {
3885 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3886 if (mod == 3) {
3887 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
3888 } else {
3889 op2_offset = offsetof(CPUX86State,xmm_t0);
3890 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3891 switch (b) {
3892 case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
3893 case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
3894 case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
3895 gen_ldq_env_A0(s->mem_index, op2_offset +
3896 offsetof(XMMReg, XMM_Q(0)));
3897 break;
3898 case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
3899 case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
3900 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0,
3901 (s->mem_index >> 2) - 1);
3902 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);
3903 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, op2_offset +
3904 offsetof(XMMReg, XMM_L(0)));
3905 break;
3906 case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
3907 tcg_gen_qemu_ld16u(cpu_tmp0, cpu_A0,
3908 (s->mem_index >> 2) - 1);
3909 tcg_gen_st16_tl(cpu_tmp0, cpu_env, op2_offset +
3910 offsetof(XMMReg, XMM_W(0)));
3911 break;
3912 case 0x2a: /* movntqda */
3913 gen_ldo_env_A0(s->mem_index, op1_offset);
3914 return;
3915 default:
3916 gen_ldo_env_A0(s->mem_index, op2_offset);
3919 } else {
3920 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
3921 if (mod == 3) {
3922 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3923 } else {
3924 op2_offset = offsetof(CPUX86State,mmx_t0);
3925 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3926 gen_ldq_env_A0(s->mem_index, op2_offset);
3929 if (sse_fn_epp == SSE_SPECIAL) {
3930 goto illegal_op;
3933 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3934 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3935 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
3937 if (b == 0x17) {
3938 set_cc_op(s, CC_OP_EFLAGS);
3940 break;
3942 case 0x238:
3943 case 0x338:
3944 do_0f_38_fx:
3945 /* Various integer extensions at 0f 38 f[0-f]. */
3946 b = modrm | (b1 << 8);
3947 modrm = cpu_ldub_code(env, s->pc++);
3948 reg = ((modrm >> 3) & 7) | rex_r;
3950 switch (b) {
3951 case 0x3f0: /* crc32 Gd,Eb */
3952 case 0x3f1: /* crc32 Gd,Ey */
3953 do_crc32:
3954 if (!(s->cpuid_ext_features & CPUID_EXT_SSE42)) {
3955 goto illegal_op;
3957 if ((b & 0xff) == 0xf0) {
3958 ot = OT_BYTE;
3959 } else if (s->dflag != 2) {
3960 ot = (s->prefix & PREFIX_DATA ? OT_WORD : OT_LONG);
3961 } else {
3962 ot = OT_QUAD;
3965 gen_op_mov_TN_reg(OT_LONG, 0, reg);
3966 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3967 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3968 gen_helper_crc32(cpu_T[0], cpu_tmp2_i32,
3969 cpu_T[0], tcg_const_i32(8 << ot));
3971 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3972 gen_op_mov_reg_T0(ot, reg);
3973 break;
3975 case 0x1f0: /* crc32 or movbe */
3976 case 0x1f1:
3977 /* For these insns, the f3 prefix is supposed to have priority
3978 over the 66 prefix, but that's not what we implement above
3979 setting b1. */
3980 if (s->prefix & PREFIX_REPNZ) {
3981 goto do_crc32;
3983 /* FALLTHRU */
3984 case 0x0f0: /* movbe Gy,My */
3985 case 0x0f1: /* movbe My,Gy */
3986 if (!(s->cpuid_ext_features & CPUID_EXT_MOVBE)) {
3987 goto illegal_op;
3989 if (s->dflag != 2) {
3990 ot = (s->prefix & PREFIX_DATA ? OT_WORD : OT_LONG);
3991 } else {
3992 ot = OT_QUAD;
3995 /* Load the data incoming to the bswap. Note that the TCG
3996 implementation of bswap requires the input be zero
3997 extended. In the case of the loads, we simply know that
3998 gen_op_ld_v via gen_ldst_modrm does that already. */
3999 if ((b & 1) == 0) {
4000 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4001 } else {
4002 switch (ot) {
4003 case OT_WORD:
4004 tcg_gen_ext16u_tl(cpu_T[0], cpu_regs[reg]);
4005 break;
4006 default:
4007 tcg_gen_ext32u_tl(cpu_T[0], cpu_regs[reg]);
4008 break;
4009 case OT_QUAD:
4010 tcg_gen_mov_tl(cpu_T[0], cpu_regs[reg]);
4011 break;
4015 switch (ot) {
4016 case OT_WORD:
4017 tcg_gen_bswap16_tl(cpu_T[0], cpu_T[0]);
4018 break;
4019 default:
4020 tcg_gen_bswap32_tl(cpu_T[0], cpu_T[0]);
4021 break;
4022 #ifdef TARGET_X86_64
4023 case OT_QUAD:
4024 tcg_gen_bswap64_tl(cpu_T[0], cpu_T[0]);
4025 break;
4026 #endif
4029 if ((b & 1) == 0) {
4030 gen_op_mov_reg_T0(ot, reg);
4031 } else {
4032 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
4034 break;
4036 case 0x0f2: /* andn Gy, By, Ey */
4037 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
4038 || !(s->prefix & PREFIX_VEX)
4039 || s->vex_l != 0) {
4040 goto illegal_op;
4042 ot = s->dflag == 2 ? OT_QUAD : OT_LONG;
4043 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4044 tcg_gen_andc_tl(cpu_T[0], cpu_regs[s->vex_v], cpu_T[0]);
4045 gen_op_mov_reg_T0(ot, reg);
4046 gen_op_update1_cc();
4047 set_cc_op(s, CC_OP_LOGICB + ot);
4048 break;
4050 case 0x0f7: /* bextr Gy, Ey, By */
4051 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
4052 || !(s->prefix & PREFIX_VEX)
4053 || s->vex_l != 0) {
4054 goto illegal_op;
4056 ot = s->dflag == 2 ? OT_QUAD : OT_LONG;
4058 TCGv bound, zero;
4060 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4061 /* Extract START, and shift the operand.
4062 Shifts larger than operand size get zeros. */
4063 tcg_gen_ext8u_tl(cpu_A0, cpu_regs[s->vex_v]);
4064 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_A0);
4066 bound = tcg_const_tl(ot == OT_QUAD ? 63 : 31);
4067 zero = tcg_const_tl(0);
4068 tcg_gen_movcond_tl(TCG_COND_LEU, cpu_T[0], cpu_A0, bound,
4069 cpu_T[0], zero);
4070 tcg_temp_free(zero);
4072 /* Extract the LEN into a mask. Lengths larger than
4073 operand size get all ones. */
4074 tcg_gen_shri_tl(cpu_A0, cpu_regs[s->vex_v], 8);
4075 tcg_gen_ext8u_tl(cpu_A0, cpu_A0);
4076 tcg_gen_movcond_tl(TCG_COND_LEU, cpu_A0, cpu_A0, bound,
4077 cpu_A0, bound);
4078 tcg_temp_free(bound);
4079 tcg_gen_movi_tl(cpu_T[1], 1);
4080 tcg_gen_shl_tl(cpu_T[1], cpu_T[1], cpu_A0);
4081 tcg_gen_subi_tl(cpu_T[1], cpu_T[1], 1);
4082 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4084 gen_op_mov_reg_T0(ot, reg);
4085 gen_op_update1_cc();
4086 set_cc_op(s, CC_OP_LOGICB + ot);
4088 break;
4090 case 0x0f5: /* bzhi Gy, Ey, By */
4091 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
4092 || !(s->prefix & PREFIX_VEX)
4093 || s->vex_l != 0) {
4094 goto illegal_op;
4096 ot = s->dflag == 2 ? OT_QUAD : OT_LONG;
4097 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4098 tcg_gen_ext8u_tl(cpu_T[1], cpu_regs[s->vex_v]);
4100 TCGv bound = tcg_const_tl(ot == OT_QUAD ? 63 : 31);
4101 /* Note that since we're using BMILG (in order to get O
4102 cleared) we need to store the inverse into C. */
4103 tcg_gen_setcond_tl(TCG_COND_LT, cpu_cc_src,
4104 cpu_T[1], bound);
4105 tcg_gen_movcond_tl(TCG_COND_GT, cpu_T[1], cpu_T[1],
4106 bound, bound, cpu_T[1]);
4107 tcg_temp_free(bound);
4109 tcg_gen_movi_tl(cpu_A0, -1);
4110 tcg_gen_shl_tl(cpu_A0, cpu_A0, cpu_T[1]);
4111 tcg_gen_andc_tl(cpu_T[0], cpu_T[0], cpu_A0);
4112 gen_op_mov_reg_T0(ot, reg);
4113 gen_op_update1_cc();
4114 set_cc_op(s, CC_OP_BMILGB + ot);
4115 break;
4117 case 0x3f6: /* mulx By, Gy, rdx, Ey */
4118 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
4119 || !(s->prefix & PREFIX_VEX)
4120 || s->vex_l != 0) {
4121 goto illegal_op;
4123 ot = s->dflag == 2 ? OT_QUAD : OT_LONG;
4124 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4125 switch (ot) {
4126 default:
4127 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4128 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EDX]);
4129 tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
4130 cpu_tmp2_i32, cpu_tmp3_i32);
4131 tcg_gen_extu_i32_tl(cpu_regs[s->vex_v], cpu_tmp2_i32);
4132 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp3_i32);
4133 break;
4134 #ifdef TARGET_X86_64
4135 case OT_QUAD:
4136 tcg_gen_mulu2_i64(cpu_regs[s->vex_v], cpu_regs[reg],
4137 cpu_T[0], cpu_regs[R_EDX]);
4138 break;
4139 #endif
4141 break;
4143 case 0x3f5: /* pdep Gy, By, Ey */
4144 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
4145 || !(s->prefix & PREFIX_VEX)
4146 || s->vex_l != 0) {
4147 goto illegal_op;
4149 ot = s->dflag == 2 ? OT_QUAD : OT_LONG;
4150 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4151 /* Note that by zero-extending the mask operand, we
4152 automatically handle zero-extending the result. */
4153 if (s->dflag == 2) {
4154 tcg_gen_mov_tl(cpu_T[1], cpu_regs[s->vex_v]);
4155 } else {
4156 tcg_gen_ext32u_tl(cpu_T[1], cpu_regs[s->vex_v]);
4158 gen_helper_pdep(cpu_regs[reg], cpu_T[0], cpu_T[1]);
4159 break;
4161 case 0x2f5: /* pext Gy, By, Ey */
4162 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
4163 || !(s->prefix & PREFIX_VEX)
4164 || s->vex_l != 0) {
4165 goto illegal_op;
4167 ot = s->dflag == 2 ? OT_QUAD : OT_LONG;
4168 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4169 /* Note that by zero-extending the mask operand, we
4170 automatically handle zero-extending the result. */
4171 if (s->dflag == 2) {
4172 tcg_gen_mov_tl(cpu_T[1], cpu_regs[s->vex_v]);
4173 } else {
4174 tcg_gen_ext32u_tl(cpu_T[1], cpu_regs[s->vex_v]);
4176 gen_helper_pext(cpu_regs[reg], cpu_T[0], cpu_T[1]);
4177 break;
4179 case 0x1f6: /* adcx Gy, Ey */
4180 case 0x2f6: /* adox Gy, Ey */
4181 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_ADX)) {
4182 goto illegal_op;
4183 } else {
4184 TCGv carry_in, carry_out, zero;
4185 int end_op;
4187 ot = (s->dflag == 2 ? OT_QUAD : OT_LONG);
4188 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4190 /* Re-use the carry-out from a previous round. */
4191 TCGV_UNUSED(carry_in);
4192 carry_out = (b == 0x1f6 ? cpu_cc_dst : cpu_cc_src2);
4193 switch (s->cc_op) {
4194 case CC_OP_ADCX:
4195 if (b == 0x1f6) {
4196 carry_in = cpu_cc_dst;
4197 end_op = CC_OP_ADCX;
4198 } else {
4199 end_op = CC_OP_ADCOX;
4201 break;
4202 case CC_OP_ADOX:
4203 if (b == 0x1f6) {
4204 end_op = CC_OP_ADCOX;
4205 } else {
4206 carry_in = cpu_cc_src2;
4207 end_op = CC_OP_ADOX;
4209 break;
4210 case CC_OP_ADCOX:
4211 end_op = CC_OP_ADCOX;
4212 carry_in = carry_out;
4213 break;
4214 default:
4215 end_op = (b == 0x1f6 ? CC_OP_ADCX : CC_OP_ADOX);
4216 break;
4218 /* If we can't reuse carry-out, get it out of EFLAGS. */
4219 if (TCGV_IS_UNUSED(carry_in)) {
4220 if (s->cc_op != CC_OP_ADCX && s->cc_op != CC_OP_ADOX) {
4221 gen_compute_eflags(s);
4223 carry_in = cpu_tmp0;
4224 tcg_gen_shri_tl(carry_in, cpu_cc_src,
4225 ctz32(b == 0x1f6 ? CC_C : CC_O));
4226 tcg_gen_andi_tl(carry_in, carry_in, 1);
4229 switch (ot) {
4230 #ifdef TARGET_X86_64
4231 case OT_LONG:
4232 /* If we know TL is 64-bit, and we want a 32-bit
4233 result, just do everything in 64-bit arithmetic. */
4234 tcg_gen_ext32u_i64(cpu_regs[reg], cpu_regs[reg]);
4235 tcg_gen_ext32u_i64(cpu_T[0], cpu_T[0]);
4236 tcg_gen_add_i64(cpu_T[0], cpu_T[0], cpu_regs[reg]);
4237 tcg_gen_add_i64(cpu_T[0], cpu_T[0], carry_in);
4238 tcg_gen_ext32u_i64(cpu_regs[reg], cpu_T[0]);
4239 tcg_gen_shri_i64(carry_out, cpu_T[0], 32);
4240 break;
4241 #endif
4242 default:
4243 /* Otherwise compute the carry-out in two steps. */
4244 zero = tcg_const_tl(0);
4245 tcg_gen_add2_tl(cpu_T[0], carry_out,
4246 cpu_T[0], zero,
4247 carry_in, zero);
4248 tcg_gen_add2_tl(cpu_regs[reg], carry_out,
4249 cpu_regs[reg], carry_out,
4250 cpu_T[0], zero);
4251 tcg_temp_free(zero);
4252 break;
4254 set_cc_op(s, end_op);
4256 break;
4258 case 0x1f7: /* shlx Gy, Ey, By */
4259 case 0x2f7: /* sarx Gy, Ey, By */
4260 case 0x3f7: /* shrx Gy, Ey, By */
4261 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
4262 || !(s->prefix & PREFIX_VEX)
4263 || s->vex_l != 0) {
4264 goto illegal_op;
4266 ot = (s->dflag == 2 ? OT_QUAD : OT_LONG);
4267 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4268 if (ot == OT_QUAD) {
4269 tcg_gen_andi_tl(cpu_T[1], cpu_regs[s->vex_v], 63);
4270 } else {
4271 tcg_gen_andi_tl(cpu_T[1], cpu_regs[s->vex_v], 31);
4273 if (b == 0x1f7) {
4274 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4275 } else if (b == 0x2f7) {
4276 if (ot != OT_QUAD) {
4277 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4279 tcg_gen_sar_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4280 } else {
4281 if (ot != OT_QUAD) {
4282 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
4284 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4286 gen_op_mov_reg_T0(ot, reg);
4287 break;
4289 case 0x0f3:
4290 case 0x1f3:
4291 case 0x2f3:
4292 case 0x3f3: /* Group 17 */
4293 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
4294 || !(s->prefix & PREFIX_VEX)
4295 || s->vex_l != 0) {
4296 goto illegal_op;
4298 ot = s->dflag == 2 ? OT_QUAD : OT_LONG;
4299 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4301 switch (reg & 7) {
4302 case 1: /* blsr By,Ey */
4303 tcg_gen_neg_tl(cpu_T[1], cpu_T[0]);
4304 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4305 gen_op_mov_reg_T0(ot, s->vex_v);
4306 gen_op_update2_cc();
4307 set_cc_op(s, CC_OP_BMILGB + ot);
4308 break;
4310 case 2: /* blsmsk By,Ey */
4311 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4312 tcg_gen_subi_tl(cpu_T[0], cpu_T[0], 1);
4313 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_cc_src);
4314 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4315 set_cc_op(s, CC_OP_BMILGB + ot);
4316 break;
4318 case 3: /* blsi By, Ey */
4319 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4320 tcg_gen_subi_tl(cpu_T[0], cpu_T[0], 1);
4321 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_cc_src);
4322 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4323 set_cc_op(s, CC_OP_BMILGB + ot);
4324 break;
4326 default:
4327 goto illegal_op;
4329 break;
4331 default:
4332 goto illegal_op;
4334 break;
4336 case 0x03a:
4337 case 0x13a:
4338 b = modrm;
4339 modrm = cpu_ldub_code(env, s->pc++);
4340 rm = modrm & 7;
4341 reg = ((modrm >> 3) & 7) | rex_r;
4342 mod = (modrm >> 6) & 3;
4343 if (b1 >= 2) {
4344 goto illegal_op;
4347 sse_fn_eppi = sse_op_table7[b].op[b1];
4348 if (!sse_fn_eppi) {
4349 goto illegal_op;
4351 if (!(s->cpuid_ext_features & sse_op_table7[b].ext_mask))
4352 goto illegal_op;
4354 if (sse_fn_eppi == SSE_SPECIAL) {
4355 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
4356 rm = (modrm & 7) | REX_B(s);
4357 if (mod != 3)
4358 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4359 reg = ((modrm >> 3) & 7) | rex_r;
4360 val = cpu_ldub_code(env, s->pc++);
4361 switch (b) {
4362 case 0x14: /* pextrb */
4363 tcg_gen_ld8u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4364 xmm_regs[reg].XMM_B(val & 15)));
4365 if (mod == 3)
4366 gen_op_mov_reg_T0(ot, rm);
4367 else
4368 tcg_gen_qemu_st8(cpu_T[0], cpu_A0,
4369 (s->mem_index >> 2) - 1);
4370 break;
4371 case 0x15: /* pextrw */
4372 tcg_gen_ld16u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4373 xmm_regs[reg].XMM_W(val & 7)));
4374 if (mod == 3)
4375 gen_op_mov_reg_T0(ot, rm);
4376 else
4377 tcg_gen_qemu_st16(cpu_T[0], cpu_A0,
4378 (s->mem_index >> 2) - 1);
4379 break;
4380 case 0x16:
4381 if (ot == OT_LONG) { /* pextrd */
4382 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
4383 offsetof(CPUX86State,
4384 xmm_regs[reg].XMM_L(val & 3)));
4385 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
4386 if (mod == 3)
4387 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
4388 else
4389 tcg_gen_qemu_st32(cpu_T[0], cpu_A0,
4390 (s->mem_index >> 2) - 1);
4391 } else { /* pextrq */
4392 #ifdef TARGET_X86_64
4393 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
4394 offsetof(CPUX86State,
4395 xmm_regs[reg].XMM_Q(val & 1)));
4396 if (mod == 3)
4397 gen_op_mov_reg_v(ot, rm, cpu_tmp1_i64);
4398 else
4399 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
4400 (s->mem_index >> 2) - 1);
4401 #else
4402 goto illegal_op;
4403 #endif
4405 break;
4406 case 0x17: /* extractps */
4407 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4408 xmm_regs[reg].XMM_L(val & 3)));
4409 if (mod == 3)
4410 gen_op_mov_reg_T0(ot, rm);
4411 else
4412 tcg_gen_qemu_st32(cpu_T[0], cpu_A0,
4413 (s->mem_index >> 2) - 1);
4414 break;
4415 case 0x20: /* pinsrb */
4416 if (mod == 3)
4417 gen_op_mov_TN_reg(OT_LONG, 0, rm);
4418 else
4419 tcg_gen_qemu_ld8u(cpu_T[0], cpu_A0,
4420 (s->mem_index >> 2) - 1);
4421 tcg_gen_st8_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4422 xmm_regs[reg].XMM_B(val & 15)));
4423 break;
4424 case 0x21: /* insertps */
4425 if (mod == 3) {
4426 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
4427 offsetof(CPUX86State,xmm_regs[rm]
4428 .XMM_L((val >> 6) & 3)));
4429 } else {
4430 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0,
4431 (s->mem_index >> 2) - 1);
4432 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);
4434 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
4435 offsetof(CPUX86State,xmm_regs[reg]
4436 .XMM_L((val >> 4) & 3)));
4437 if ((val >> 0) & 1)
4438 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4439 cpu_env, offsetof(CPUX86State,
4440 xmm_regs[reg].XMM_L(0)));
4441 if ((val >> 1) & 1)
4442 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4443 cpu_env, offsetof(CPUX86State,
4444 xmm_regs[reg].XMM_L(1)));
4445 if ((val >> 2) & 1)
4446 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4447 cpu_env, offsetof(CPUX86State,
4448 xmm_regs[reg].XMM_L(2)));
4449 if ((val >> 3) & 1)
4450 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4451 cpu_env, offsetof(CPUX86State,
4452 xmm_regs[reg].XMM_L(3)));
4453 break;
4454 case 0x22:
4455 if (ot == OT_LONG) { /* pinsrd */
4456 if (mod == 3)
4457 gen_op_mov_v_reg(ot, cpu_tmp0, rm);
4458 else
4459 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0,
4460 (s->mem_index >> 2) - 1);
4461 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);
4462 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
4463 offsetof(CPUX86State,
4464 xmm_regs[reg].XMM_L(val & 3)));
4465 } else { /* pinsrq */
4466 #ifdef TARGET_X86_64
4467 if (mod == 3)
4468 gen_op_mov_v_reg(ot, cpu_tmp1_i64, rm);
4469 else
4470 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
4471 (s->mem_index >> 2) - 1);
4472 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
4473 offsetof(CPUX86State,
4474 xmm_regs[reg].XMM_Q(val & 1)));
4475 #else
4476 goto illegal_op;
4477 #endif
4479 break;
4481 return;
4484 if (b1) {
4485 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
4486 if (mod == 3) {
4487 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
4488 } else {
4489 op2_offset = offsetof(CPUX86State,xmm_t0);
4490 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4491 gen_ldo_env_A0(s->mem_index, op2_offset);
4493 } else {
4494 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4495 if (mod == 3) {
4496 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4497 } else {
4498 op2_offset = offsetof(CPUX86State,mmx_t0);
4499 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4500 gen_ldq_env_A0(s->mem_index, op2_offset);
4503 val = cpu_ldub_code(env, s->pc++);
4505 if ((b & 0xfc) == 0x60) { /* pcmpXstrX */
4506 set_cc_op(s, CC_OP_EFLAGS);
4508 if (s->dflag == 2)
4509 /* The helper must use entire 64-bit gp registers */
4510 val |= 1 << 8;
4513 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4514 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4515 sse_fn_eppi(cpu_env, cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
4516 break;
4518 case 0x33a:
4519 /* Various integer extensions at 0f 3a f[0-f]. */
4520 b = modrm | (b1 << 8);
4521 modrm = cpu_ldub_code(env, s->pc++);
4522 reg = ((modrm >> 3) & 7) | rex_r;
4524 switch (b) {
4525 case 0x3f0: /* rorx Gy,Ey, Ib */
4526 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
4527 || !(s->prefix & PREFIX_VEX)
4528 || s->vex_l != 0) {
4529 goto illegal_op;
4531 ot = s->dflag == 2 ? OT_QUAD : OT_LONG;
4532 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4533 b = cpu_ldub_code(env, s->pc++);
4534 if (ot == OT_QUAD) {
4535 tcg_gen_rotri_tl(cpu_T[0], cpu_T[0], b & 63);
4536 } else {
4537 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4538 tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, b & 31);
4539 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
4541 gen_op_mov_reg_T0(ot, reg);
4542 break;
4544 default:
4545 goto illegal_op;
4547 break;
4549 default:
4550 goto illegal_op;
4552 } else {
4553 /* generic MMX or SSE operation */
4554 switch(b) {
4555 case 0x70: /* pshufx insn */
4556 case 0xc6: /* pshufx insn */
4557 case 0xc2: /* compare insns */
4558 s->rip_offset = 1;
4559 break;
4560 default:
4561 break;
4563 if (is_xmm) {
4564 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
4565 if (mod != 3) {
4566 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4567 op2_offset = offsetof(CPUX86State,xmm_t0);
4568 if (b1 >= 2 && ((b >= 0x50 && b <= 0x5f && b != 0x5b) ||
4569 b == 0xc2)) {
4570 /* specific case for SSE single instructions */
4571 if (b1 == 2) {
4572 /* 32 bit access */
4573 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
4574 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
4575 } else {
4576 /* 64 bit access */
4577 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_t0.XMM_D(0)));
4579 } else {
4580 gen_ldo_env_A0(s->mem_index, op2_offset);
4582 } else {
4583 rm = (modrm & 7) | REX_B(s);
4584 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
4586 } else {
4587 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4588 if (mod != 3) {
4589 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4590 op2_offset = offsetof(CPUX86State,mmx_t0);
4591 gen_ldq_env_A0(s->mem_index, op2_offset);
4592 } else {
4593 rm = (modrm & 7);
4594 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4597 switch(b) {
4598 case 0x0f: /* 3DNow! data insns */
4599 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
4600 goto illegal_op;
4601 val = cpu_ldub_code(env, s->pc++);
4602 sse_fn_epp = sse_op_table5[val];
4603 if (!sse_fn_epp) {
4604 goto illegal_op;
4606 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4607 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4608 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
4609 break;
4610 case 0x70: /* pshufx insn */
4611 case 0xc6: /* pshufx insn */
4612 val = cpu_ldub_code(env, s->pc++);
4613 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4614 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4615 /* XXX: introduce a new table? */
4616 sse_fn_ppi = (SSEFunc_0_ppi)sse_fn_epp;
4617 sse_fn_ppi(cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
4618 break;
4619 case 0xc2:
4620 /* compare insns */
4621 val = cpu_ldub_code(env, s->pc++);
4622 if (val >= 8)
4623 goto illegal_op;
4624 sse_fn_epp = sse_op_table4[val][b1];
4626 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4627 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4628 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
4629 break;
4630 case 0xf7:
4631 /* maskmov : we must prepare A0 */
4632 if (mod != 3)
4633 goto illegal_op;
4634 #ifdef TARGET_X86_64
4635 if (s->aflag == 2) {
4636 gen_op_movq_A0_reg(R_EDI);
4637 } else
4638 #endif
4640 gen_op_movl_A0_reg(R_EDI);
4641 if (s->aflag == 0)
4642 gen_op_andl_A0_ffff();
4644 gen_add_A0_ds_seg(s);
4646 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4647 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4648 /* XXX: introduce a new table? */
4649 sse_fn_eppt = (SSEFunc_0_eppt)sse_fn_epp;
4650 sse_fn_eppt(cpu_env, cpu_ptr0, cpu_ptr1, cpu_A0);
4651 break;
4652 default:
4653 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4654 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4655 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
4656 break;
4658 if (b == 0x2e || b == 0x2f) {
4659 set_cc_op(s, CC_OP_EFLAGS);
4664 /* convert one instruction. s->is_jmp is set if the translation must
4665 be stopped. Return the next pc value */
4666 static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
4667 target_ulong pc_start)
4669 int b, prefixes, aflag, dflag;
4670 int shift, ot;
4671 int modrm, reg, rm, mod, reg_addr, op, opreg, offset_addr, val;
4672 target_ulong next_eip, tval;
4673 int rex_w, rex_r;
4675 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
4676 tcg_gen_debug_insn_start(pc_start);
4678 s->pc = pc_start;
4679 prefixes = 0;
4680 s->override = -1;
4681 rex_w = -1;
4682 rex_r = 0;
4683 #ifdef TARGET_X86_64
4684 s->rex_x = 0;
4685 s->rex_b = 0;
4686 x86_64_hregs = 0;
4687 #endif
4688 s->rip_offset = 0; /* for relative ip address */
4689 s->vex_l = 0;
4690 s->vex_v = 0;
4691 next_byte:
4692 b = cpu_ldub_code(env, s->pc);
4693 s->pc++;
4694 /* Collect prefixes. */
4695 switch (b) {
4696 case 0xf3:
4697 prefixes |= PREFIX_REPZ;
4698 goto next_byte;
4699 case 0xf2:
4700 prefixes |= PREFIX_REPNZ;
4701 goto next_byte;
4702 case 0xf0:
4703 prefixes |= PREFIX_LOCK;
4704 goto next_byte;
4705 case 0x2e:
4706 s->override = R_CS;
4707 goto next_byte;
4708 case 0x36:
4709 s->override = R_SS;
4710 goto next_byte;
4711 case 0x3e:
4712 s->override = R_DS;
4713 goto next_byte;
4714 case 0x26:
4715 s->override = R_ES;
4716 goto next_byte;
4717 case 0x64:
4718 s->override = R_FS;
4719 goto next_byte;
4720 case 0x65:
4721 s->override = R_GS;
4722 goto next_byte;
4723 case 0x66:
4724 prefixes |= PREFIX_DATA;
4725 goto next_byte;
4726 case 0x67:
4727 prefixes |= PREFIX_ADR;
4728 goto next_byte;
4729 #ifdef TARGET_X86_64
4730 case 0x40 ... 0x4f:
4731 if (CODE64(s)) {
4732 /* REX prefix */
4733 rex_w = (b >> 3) & 1;
4734 rex_r = (b & 0x4) << 1;
4735 s->rex_x = (b & 0x2) << 2;
4736 REX_B(s) = (b & 0x1) << 3;
4737 x86_64_hregs = 1; /* select uniform byte register addressing */
4738 goto next_byte;
4740 break;
4741 #endif
4742 case 0xc5: /* 2-byte VEX */
4743 case 0xc4: /* 3-byte VEX */
4744 /* VEX prefixes cannot be used except in 32-bit mode.
4745 Otherwise the instruction is LES or LDS. */
4746 if (s->code32 && !s->vm86) {
4747 static const int pp_prefix[4] = {
4748 0, PREFIX_DATA, PREFIX_REPZ, PREFIX_REPNZ
4750 int vex3, vex2 = cpu_ldub_code(env, s->pc);
4752 if (!CODE64(s) && (vex2 & 0xc0) != 0xc0) {
4753 /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
4754 otherwise the instruction is LES or LDS. */
4755 break;
4757 s->pc++;
4759 /* 4.1.1-4.1.3: No preceding lock, 66, f2, f3, or rex prefixes. */
4760 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ
4761 | PREFIX_LOCK | PREFIX_DATA)) {
4762 goto illegal_op;
4764 #ifdef TARGET_X86_64
4765 if (x86_64_hregs) {
4766 goto illegal_op;
4768 #endif
4769 rex_r = (~vex2 >> 4) & 8;
4770 if (b == 0xc5) {
4771 vex3 = vex2;
4772 b = cpu_ldub_code(env, s->pc++);
4773 } else {
4774 #ifdef TARGET_X86_64
4775 s->rex_x = (~vex2 >> 3) & 8;
4776 s->rex_b = (~vex2 >> 2) & 8;
4777 #endif
4778 vex3 = cpu_ldub_code(env, s->pc++);
4779 rex_w = (vex3 >> 7) & 1;
4780 switch (vex2 & 0x1f) {
4781 case 0x01: /* Implied 0f leading opcode bytes. */
4782 b = cpu_ldub_code(env, s->pc++) | 0x100;
4783 break;
4784 case 0x02: /* Implied 0f 38 leading opcode bytes. */
4785 b = 0x138;
4786 break;
4787 case 0x03: /* Implied 0f 3a leading opcode bytes. */
4788 b = 0x13a;
4789 break;
4790 default: /* Reserved for future use. */
4791 goto illegal_op;
4794 s->vex_v = (~vex3 >> 3) & 0xf;
4795 s->vex_l = (vex3 >> 2) & 1;
4796 prefixes |= pp_prefix[vex3 & 3] | PREFIX_VEX;
4798 break;
4801 /* Post-process prefixes. */
4802 if (CODE64(s)) {
4803 /* In 64-bit mode, the default data size is 32-bit. Select 64-bit
4804 data with rex_w, and 16-bit data with 0x66; rex_w takes precedence
4805 over 0x66 if both are present. */
4806 dflag = (rex_w > 0 ? 2 : prefixes & PREFIX_DATA ? 0 : 1);
4807 /* In 64-bit mode, 0x67 selects 32-bit addressing. */
4808 aflag = (prefixes & PREFIX_ADR ? 1 : 2);
4809 } else {
4810 /* In 16/32-bit mode, 0x66 selects the opposite data size. */
4811 dflag = s->code32;
4812 if (prefixes & PREFIX_DATA) {
4813 dflag ^= 1;
4815 /* In 16/32-bit mode, 0x67 selects the opposite addressing. */
4816 aflag = s->code32;
4817 if (prefixes & PREFIX_ADR) {
4818 aflag ^= 1;
4822 s->prefix = prefixes;
4823 s->aflag = aflag;
4824 s->dflag = dflag;
4826 /* lock generation */
4827 if (prefixes & PREFIX_LOCK)
4828 gen_helper_lock();
4830 /* now check op code */
4831 reswitch:
4832 switch(b) {
4833 case 0x0f:
4834 /**************************/
4835 /* extended op code */
4836 b = cpu_ldub_code(env, s->pc++) | 0x100;
4837 goto reswitch;
4839 /**************************/
4840 /* arith & logic */
4841 case 0x00 ... 0x05:
4842 case 0x08 ... 0x0d:
4843 case 0x10 ... 0x15:
4844 case 0x18 ... 0x1d:
4845 case 0x20 ... 0x25:
4846 case 0x28 ... 0x2d:
4847 case 0x30 ... 0x35:
4848 case 0x38 ... 0x3d:
4850 int op, f, val;
4851 op = (b >> 3) & 7;
4852 f = (b >> 1) & 3;
4854 if ((b & 1) == 0)
4855 ot = OT_BYTE;
4856 else
4857 ot = dflag + OT_WORD;
4859 switch(f) {
4860 case 0: /* OP Ev, Gv */
4861 modrm = cpu_ldub_code(env, s->pc++);
4862 reg = ((modrm >> 3) & 7) | rex_r;
4863 mod = (modrm >> 6) & 3;
4864 rm = (modrm & 7) | REX_B(s);
4865 if (mod != 3) {
4866 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4867 opreg = OR_TMP0;
4868 } else if (op == OP_XORL && rm == reg) {
4869 xor_zero:
4870 /* xor reg, reg optimisation */
4871 set_cc_op(s, CC_OP_CLR);
4872 gen_op_movl_T0_0();
4873 gen_op_mov_reg_T0(ot, reg);
4874 break;
4875 } else {
4876 opreg = rm;
4878 gen_op_mov_TN_reg(ot, 1, reg);
4879 gen_op(s, op, ot, opreg);
4880 break;
4881 case 1: /* OP Gv, Ev */
4882 modrm = cpu_ldub_code(env, s->pc++);
4883 mod = (modrm >> 6) & 3;
4884 reg = ((modrm >> 3) & 7) | rex_r;
4885 rm = (modrm & 7) | REX_B(s);
4886 if (mod != 3) {
4887 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4888 gen_op_ld_T1_A0(ot + s->mem_index);
4889 } else if (op == OP_XORL && rm == reg) {
4890 goto xor_zero;
4891 } else {
4892 gen_op_mov_TN_reg(ot, 1, rm);
4894 gen_op(s, op, ot, reg);
4895 break;
4896 case 2: /* OP A, Iv */
4897 val = insn_get(env, s, ot);
4898 gen_op_movl_T1_im(val);
4899 gen_op(s, op, ot, OR_EAX);
4900 break;
4903 break;
4905 case 0x82:
4906 if (CODE64(s))
4907 goto illegal_op;
4908 case 0x80: /* GRP1 */
4909 case 0x81:
4910 case 0x83:
4912 int val;
4914 if ((b & 1) == 0)
4915 ot = OT_BYTE;
4916 else
4917 ot = dflag + OT_WORD;
4919 modrm = cpu_ldub_code(env, s->pc++);
4920 mod = (modrm >> 6) & 3;
4921 rm = (modrm & 7) | REX_B(s);
4922 op = (modrm >> 3) & 7;
4924 if (mod != 3) {
4925 if (b == 0x83)
4926 s->rip_offset = 1;
4927 else
4928 s->rip_offset = insn_const_size(ot);
4929 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4930 opreg = OR_TMP0;
4931 } else {
4932 opreg = rm;
4935 switch(b) {
4936 default:
4937 case 0x80:
4938 case 0x81:
4939 case 0x82:
4940 val = insn_get(env, s, ot);
4941 break;
4942 case 0x83:
4943 val = (int8_t)insn_get(env, s, OT_BYTE);
4944 break;
4946 gen_op_movl_T1_im(val);
4947 gen_op(s, op, ot, opreg);
4949 break;
4951 /**************************/
4952 /* inc, dec, and other misc arith */
4953 case 0x40 ... 0x47: /* inc Gv */
4954 ot = dflag ? OT_LONG : OT_WORD;
4955 gen_inc(s, ot, OR_EAX + (b & 7), 1);
4956 break;
4957 case 0x48 ... 0x4f: /* dec Gv */
4958 ot = dflag ? OT_LONG : OT_WORD;
4959 gen_inc(s, ot, OR_EAX + (b & 7), -1);
4960 break;
4961 case 0xf6: /* GRP3 */
4962 case 0xf7:
4963 if ((b & 1) == 0)
4964 ot = OT_BYTE;
4965 else
4966 ot = dflag + OT_WORD;
4968 modrm = cpu_ldub_code(env, s->pc++);
4969 mod = (modrm >> 6) & 3;
4970 rm = (modrm & 7) | REX_B(s);
4971 op = (modrm >> 3) & 7;
4972 if (mod != 3) {
4973 if (op == 0)
4974 s->rip_offset = insn_const_size(ot);
4975 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4976 gen_op_ld_T0_A0(ot + s->mem_index);
4977 } else {
4978 gen_op_mov_TN_reg(ot, 0, rm);
4981 switch(op) {
4982 case 0: /* test */
4983 val = insn_get(env, s, ot);
4984 gen_op_movl_T1_im(val);
4985 gen_op_testl_T0_T1_cc();
4986 set_cc_op(s, CC_OP_LOGICB + ot);
4987 break;
4988 case 2: /* not */
4989 tcg_gen_not_tl(cpu_T[0], cpu_T[0]);
4990 if (mod != 3) {
4991 gen_op_st_T0_A0(ot + s->mem_index);
4992 } else {
4993 gen_op_mov_reg_T0(ot, rm);
4995 break;
4996 case 3: /* neg */
4997 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
4998 if (mod != 3) {
4999 gen_op_st_T0_A0(ot + s->mem_index);
5000 } else {
5001 gen_op_mov_reg_T0(ot, rm);
5003 gen_op_update_neg_cc();
5004 set_cc_op(s, CC_OP_SUBB + ot);
5005 break;
5006 case 4: /* mul */
5007 switch(ot) {
5008 case OT_BYTE:
5009 gen_op_mov_TN_reg(OT_BYTE, 1, R_EAX);
5010 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
5011 tcg_gen_ext8u_tl(cpu_T[1], cpu_T[1]);
5012 /* XXX: use 32 bit mul which could be faster */
5013 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
5014 gen_op_mov_reg_T0(OT_WORD, R_EAX);
5015 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
5016 tcg_gen_andi_tl(cpu_cc_src, cpu_T[0], 0xff00);
5017 set_cc_op(s, CC_OP_MULB);
5018 break;
5019 case OT_WORD:
5020 gen_op_mov_TN_reg(OT_WORD, 1, R_EAX);
5021 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
5022 tcg_gen_ext16u_tl(cpu_T[1], cpu_T[1]);
5023 /* XXX: use 32 bit mul which could be faster */
5024 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
5025 gen_op_mov_reg_T0(OT_WORD, R_EAX);
5026 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
5027 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
5028 gen_op_mov_reg_T0(OT_WORD, R_EDX);
5029 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
5030 set_cc_op(s, CC_OP_MULW);
5031 break;
5032 default:
5033 case OT_LONG:
5034 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5035 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]);
5036 tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
5037 cpu_tmp2_i32, cpu_tmp3_i32);
5038 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32);
5039 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32);
5040 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
5041 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
5042 set_cc_op(s, CC_OP_MULL);
5043 break;
5044 #ifdef TARGET_X86_64
5045 case OT_QUAD:
5046 tcg_gen_mulu2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
5047 cpu_T[0], cpu_regs[R_EAX]);
5048 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
5049 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
5050 set_cc_op(s, CC_OP_MULQ);
5051 break;
5052 #endif
5054 break;
5055 case 5: /* imul */
5056 switch(ot) {
5057 case OT_BYTE:
5058 gen_op_mov_TN_reg(OT_BYTE, 1, R_EAX);
5059 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
5060 tcg_gen_ext8s_tl(cpu_T[1], cpu_T[1]);
5061 /* XXX: use 32 bit mul which could be faster */
5062 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
5063 gen_op_mov_reg_T0(OT_WORD, R_EAX);
5064 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
5065 tcg_gen_ext8s_tl(cpu_tmp0, cpu_T[0]);
5066 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
5067 set_cc_op(s, CC_OP_MULB);
5068 break;
5069 case OT_WORD:
5070 gen_op_mov_TN_reg(OT_WORD, 1, R_EAX);
5071 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5072 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
5073 /* XXX: use 32 bit mul which could be faster */
5074 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
5075 gen_op_mov_reg_T0(OT_WORD, R_EAX);
5076 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
5077 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
5078 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
5079 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
5080 gen_op_mov_reg_T0(OT_WORD, R_EDX);
5081 set_cc_op(s, CC_OP_MULW);
5082 break;
5083 default:
5084 case OT_LONG:
5085 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5086 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]);
5087 tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
5088 cpu_tmp2_i32, cpu_tmp3_i32);
5089 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32);
5090 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32);
5091 tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31);
5092 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
5093 tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
5094 tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32);
5095 set_cc_op(s, CC_OP_MULL);
5096 break;
5097 #ifdef TARGET_X86_64
5098 case OT_QUAD:
5099 tcg_gen_muls2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
5100 cpu_T[0], cpu_regs[R_EAX]);
5101 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
5102 tcg_gen_sari_tl(cpu_cc_src, cpu_regs[R_EAX], 63);
5103 tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_regs[R_EDX]);
5104 set_cc_op(s, CC_OP_MULQ);
5105 break;
5106 #endif
5108 break;
5109 case 6: /* div */
5110 switch(ot) {
5111 case OT_BYTE:
5112 gen_jmp_im(pc_start - s->cs_base);
5113 gen_helper_divb_AL(cpu_env, cpu_T[0]);
5114 break;
5115 case OT_WORD:
5116 gen_jmp_im(pc_start - s->cs_base);
5117 gen_helper_divw_AX(cpu_env, cpu_T[0]);
5118 break;
5119 default:
5120 case OT_LONG:
5121 gen_jmp_im(pc_start - s->cs_base);
5122 gen_helper_divl_EAX(cpu_env, cpu_T[0]);
5123 break;
5124 #ifdef TARGET_X86_64
5125 case OT_QUAD:
5126 gen_jmp_im(pc_start - s->cs_base);
5127 gen_helper_divq_EAX(cpu_env, cpu_T[0]);
5128 break;
5129 #endif
5131 break;
5132 case 7: /* idiv */
5133 switch(ot) {
5134 case OT_BYTE:
5135 gen_jmp_im(pc_start - s->cs_base);
5136 gen_helper_idivb_AL(cpu_env, cpu_T[0]);
5137 break;
5138 case OT_WORD:
5139 gen_jmp_im(pc_start - s->cs_base);
5140 gen_helper_idivw_AX(cpu_env, cpu_T[0]);
5141 break;
5142 default:
5143 case OT_LONG:
5144 gen_jmp_im(pc_start - s->cs_base);
5145 gen_helper_idivl_EAX(cpu_env, cpu_T[0]);
5146 break;
5147 #ifdef TARGET_X86_64
5148 case OT_QUAD:
5149 gen_jmp_im(pc_start - s->cs_base);
5150 gen_helper_idivq_EAX(cpu_env, cpu_T[0]);
5151 break;
5152 #endif
5154 break;
5155 default:
5156 goto illegal_op;
5158 break;
5160 case 0xfe: /* GRP4 */
5161 case 0xff: /* GRP5 */
5162 if ((b & 1) == 0)
5163 ot = OT_BYTE;
5164 else
5165 ot = dflag + OT_WORD;
5167 modrm = cpu_ldub_code(env, s->pc++);
5168 mod = (modrm >> 6) & 3;
5169 rm = (modrm & 7) | REX_B(s);
5170 op = (modrm >> 3) & 7;
5171 if (op >= 2 && b == 0xfe) {
5172 goto illegal_op;
5174 if (CODE64(s)) {
5175 if (op == 2 || op == 4) {
5176 /* operand size for jumps is 64 bit */
5177 ot = OT_QUAD;
5178 } else if (op == 3 || op == 5) {
5179 ot = dflag ? OT_LONG + (rex_w == 1) : OT_WORD;
5180 } else if (op == 6) {
5181 /* default push size is 64 bit */
5182 ot = dflag ? OT_QUAD : OT_WORD;
5185 if (mod != 3) {
5186 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5187 if (op >= 2 && op != 3 && op != 5)
5188 gen_op_ld_T0_A0(ot + s->mem_index);
5189 } else {
5190 gen_op_mov_TN_reg(ot, 0, rm);
5193 switch(op) {
5194 case 0: /* inc Ev */
5195 if (mod != 3)
5196 opreg = OR_TMP0;
5197 else
5198 opreg = rm;
5199 gen_inc(s, ot, opreg, 1);
5200 break;
5201 case 1: /* dec Ev */
5202 if (mod != 3)
5203 opreg = OR_TMP0;
5204 else
5205 opreg = rm;
5206 gen_inc(s, ot, opreg, -1);
5207 break;
5208 case 2: /* call Ev */
5209 /* XXX: optimize if memory (no 'and' is necessary) */
5210 if (s->dflag == 0)
5211 gen_op_andl_T0_ffff();
5212 next_eip = s->pc - s->cs_base;
5213 gen_movtl_T1_im(next_eip);
5214 gen_push_T1(s);
5215 gen_op_jmp_T0();
5216 gen_eob(s);
5217 break;
5218 case 3: /* lcall Ev */
5219 gen_op_ld_T1_A0(ot + s->mem_index);
5220 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
5221 gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
5222 do_lcall:
5223 if (s->pe && !s->vm86) {
5224 gen_update_cc_op(s);
5225 gen_jmp_im(pc_start - s->cs_base);
5226 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5227 gen_helper_lcall_protected(cpu_env, cpu_tmp2_i32, cpu_T[1],
5228 tcg_const_i32(dflag),
5229 tcg_const_i32(s->pc - pc_start));
5230 } else {
5231 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5232 gen_helper_lcall_real(cpu_env, cpu_tmp2_i32, cpu_T[1],
5233 tcg_const_i32(dflag),
5234 tcg_const_i32(s->pc - s->cs_base));
5236 gen_eob(s);
5237 break;
5238 case 4: /* jmp Ev */
5239 if (s->dflag == 0)
5240 gen_op_andl_T0_ffff();
5241 gen_op_jmp_T0();
5242 gen_eob(s);
5243 break;
5244 case 5: /* ljmp Ev */
5245 gen_op_ld_T1_A0(ot + s->mem_index);
5246 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
5247 gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
5248 do_ljmp:
5249 if (s->pe && !s->vm86) {
5250 gen_update_cc_op(s);
5251 gen_jmp_im(pc_start - s->cs_base);
5252 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5253 gen_helper_ljmp_protected(cpu_env, cpu_tmp2_i32, cpu_T[1],
5254 tcg_const_i32(s->pc - pc_start));
5255 } else {
5256 gen_op_movl_seg_T0_vm(R_CS);
5257 gen_op_movl_T0_T1();
5258 gen_op_jmp_T0();
5260 gen_eob(s);
5261 break;
5262 case 6: /* push Ev */
5263 gen_push_T0(s);
5264 break;
5265 default:
5266 goto illegal_op;
5268 break;
5270 case 0x84: /* test Ev, Gv */
5271 case 0x85:
5272 if ((b & 1) == 0)
5273 ot = OT_BYTE;
5274 else
5275 ot = dflag + OT_WORD;
5277 modrm = cpu_ldub_code(env, s->pc++);
5278 reg = ((modrm >> 3) & 7) | rex_r;
5280 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
5281 gen_op_mov_TN_reg(ot, 1, reg);
5282 gen_op_testl_T0_T1_cc();
5283 set_cc_op(s, CC_OP_LOGICB + ot);
5284 break;
5286 case 0xa8: /* test eAX, Iv */
5287 case 0xa9:
5288 if ((b & 1) == 0)
5289 ot = OT_BYTE;
5290 else
5291 ot = dflag + OT_WORD;
5292 val = insn_get(env, s, ot);
5294 gen_op_mov_TN_reg(ot, 0, OR_EAX);
5295 gen_op_movl_T1_im(val);
5296 gen_op_testl_T0_T1_cc();
5297 set_cc_op(s, CC_OP_LOGICB + ot);
5298 break;
5300 case 0x98: /* CWDE/CBW */
5301 #ifdef TARGET_X86_64
5302 if (dflag == 2) {
5303 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
5304 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
5305 gen_op_mov_reg_T0(OT_QUAD, R_EAX);
5306 } else
5307 #endif
5308 if (dflag == 1) {
5309 gen_op_mov_TN_reg(OT_WORD, 0, R_EAX);
5310 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5311 gen_op_mov_reg_T0(OT_LONG, R_EAX);
5312 } else {
5313 gen_op_mov_TN_reg(OT_BYTE, 0, R_EAX);
5314 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
5315 gen_op_mov_reg_T0(OT_WORD, R_EAX);
5317 break;
5318 case 0x99: /* CDQ/CWD */
5319 #ifdef TARGET_X86_64
5320 if (dflag == 2) {
5321 gen_op_mov_TN_reg(OT_QUAD, 0, R_EAX);
5322 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 63);
5323 gen_op_mov_reg_T0(OT_QUAD, R_EDX);
5324 } else
5325 #endif
5326 if (dflag == 1) {
5327 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
5328 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
5329 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 31);
5330 gen_op_mov_reg_T0(OT_LONG, R_EDX);
5331 } else {
5332 gen_op_mov_TN_reg(OT_WORD, 0, R_EAX);
5333 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5334 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 15);
5335 gen_op_mov_reg_T0(OT_WORD, R_EDX);
5337 break;
5338 case 0x1af: /* imul Gv, Ev */
5339 case 0x69: /* imul Gv, Ev, I */
5340 case 0x6b:
5341 ot = dflag + OT_WORD;
5342 modrm = cpu_ldub_code(env, s->pc++);
5343 reg = ((modrm >> 3) & 7) | rex_r;
5344 if (b == 0x69)
5345 s->rip_offset = insn_const_size(ot);
5346 else if (b == 0x6b)
5347 s->rip_offset = 1;
5348 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
5349 if (b == 0x69) {
5350 val = insn_get(env, s, ot);
5351 gen_op_movl_T1_im(val);
5352 } else if (b == 0x6b) {
5353 val = (int8_t)insn_get(env, s, OT_BYTE);
5354 gen_op_movl_T1_im(val);
5355 } else {
5356 gen_op_mov_TN_reg(ot, 1, reg);
5358 switch (ot) {
5359 #ifdef TARGET_X86_64
5360 case OT_QUAD:
5361 tcg_gen_muls2_i64(cpu_regs[reg], cpu_T[1], cpu_T[0], cpu_T[1]);
5362 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
5363 tcg_gen_sari_tl(cpu_cc_src, cpu_cc_dst, 63);
5364 tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_T[1]);
5365 break;
5366 #endif
5367 case OT_LONG:
5368 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5369 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
5370 tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
5371 cpu_tmp2_i32, cpu_tmp3_i32);
5372 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
5373 tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31);
5374 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
5375 tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
5376 tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32);
5377 break;
5378 default:
5379 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5380 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
5381 /* XXX: use 32 bit mul which could be faster */
5382 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
5383 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
5384 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
5385 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
5386 gen_op_mov_reg_T0(ot, reg);
5387 break;
5389 set_cc_op(s, CC_OP_MULB + ot);
5390 break;
5391 case 0x1c0:
5392 case 0x1c1: /* xadd Ev, Gv */
5393 if ((b & 1) == 0)
5394 ot = OT_BYTE;
5395 else
5396 ot = dflag + OT_WORD;
5397 modrm = cpu_ldub_code(env, s->pc++);
5398 reg = ((modrm >> 3) & 7) | rex_r;
5399 mod = (modrm >> 6) & 3;
5400 if (mod == 3) {
5401 rm = (modrm & 7) | REX_B(s);
5402 gen_op_mov_TN_reg(ot, 0, reg);
5403 gen_op_mov_TN_reg(ot, 1, rm);
5404 gen_op_addl_T0_T1();
5405 gen_op_mov_reg_T1(ot, reg);
5406 gen_op_mov_reg_T0(ot, rm);
5407 } else {
5408 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5409 gen_op_mov_TN_reg(ot, 0, reg);
5410 gen_op_ld_T1_A0(ot + s->mem_index);
5411 gen_op_addl_T0_T1();
5412 gen_op_st_T0_A0(ot + s->mem_index);
5413 gen_op_mov_reg_T1(ot, reg);
5415 gen_op_update2_cc();
5416 set_cc_op(s, CC_OP_ADDB + ot);
5417 break;
5418 case 0x1b0:
5419 case 0x1b1: /* cmpxchg Ev, Gv */
5421 int label1, label2;
5422 TCGv t0, t1, t2, a0;
5424 if ((b & 1) == 0)
5425 ot = OT_BYTE;
5426 else
5427 ot = dflag + OT_WORD;
5428 modrm = cpu_ldub_code(env, s->pc++);
5429 reg = ((modrm >> 3) & 7) | rex_r;
5430 mod = (modrm >> 6) & 3;
5431 t0 = tcg_temp_local_new();
5432 t1 = tcg_temp_local_new();
5433 t2 = tcg_temp_local_new();
5434 a0 = tcg_temp_local_new();
5435 gen_op_mov_v_reg(ot, t1, reg);
5436 if (mod == 3) {
5437 rm = (modrm & 7) | REX_B(s);
5438 gen_op_mov_v_reg(ot, t0, rm);
5439 } else {
5440 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5441 tcg_gen_mov_tl(a0, cpu_A0);
5442 gen_op_ld_v(ot + s->mem_index, t0, a0);
5443 rm = 0; /* avoid warning */
5445 label1 = gen_new_label();
5446 tcg_gen_mov_tl(t2, cpu_regs[R_EAX]);
5447 gen_extu(ot, t0);
5448 gen_extu(ot, t2);
5449 tcg_gen_brcond_tl(TCG_COND_EQ, t2, t0, label1);
5450 label2 = gen_new_label();
5451 if (mod == 3) {
5452 gen_op_mov_reg_v(ot, R_EAX, t0);
5453 tcg_gen_br(label2);
5454 gen_set_label(label1);
5455 gen_op_mov_reg_v(ot, rm, t1);
5456 } else {
5457 /* perform no-op store cycle like physical cpu; must be
5458 before changing accumulator to ensure idempotency if
5459 the store faults and the instruction is restarted */
5460 gen_op_st_v(ot + s->mem_index, t0, a0);
5461 gen_op_mov_reg_v(ot, R_EAX, t0);
5462 tcg_gen_br(label2);
5463 gen_set_label(label1);
5464 gen_op_st_v(ot + s->mem_index, t1, a0);
5466 gen_set_label(label2);
5467 tcg_gen_mov_tl(cpu_cc_src, t0);
5468 tcg_gen_mov_tl(cpu_cc_srcT, t2);
5469 tcg_gen_sub_tl(cpu_cc_dst, t2, t0);
5470 set_cc_op(s, CC_OP_SUBB + ot);
5471 tcg_temp_free(t0);
5472 tcg_temp_free(t1);
5473 tcg_temp_free(t2);
5474 tcg_temp_free(a0);
5476 break;
5477 case 0x1c7: /* cmpxchg8b */
5478 modrm = cpu_ldub_code(env, s->pc++);
5479 mod = (modrm >> 6) & 3;
5480 if ((mod == 3) || ((modrm & 0x38) != 0x8))
5481 goto illegal_op;
5482 #ifdef TARGET_X86_64
5483 if (dflag == 2) {
5484 if (!(s->cpuid_ext_features & CPUID_EXT_CX16))
5485 goto illegal_op;
5486 gen_jmp_im(pc_start - s->cs_base);
5487 gen_update_cc_op(s);
5488 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5489 gen_helper_cmpxchg16b(cpu_env, cpu_A0);
5490 } else
5491 #endif
5493 if (!(s->cpuid_features & CPUID_CX8))
5494 goto illegal_op;
5495 gen_jmp_im(pc_start - s->cs_base);
5496 gen_update_cc_op(s);
5497 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5498 gen_helper_cmpxchg8b(cpu_env, cpu_A0);
5500 set_cc_op(s, CC_OP_EFLAGS);
5501 break;
5503 /**************************/
5504 /* push/pop */
5505 case 0x50 ... 0x57: /* push */
5506 gen_op_mov_TN_reg(OT_LONG, 0, (b & 7) | REX_B(s));
5507 gen_push_T0(s);
5508 break;
5509 case 0x58 ... 0x5f: /* pop */
5510 if (CODE64(s)) {
5511 ot = dflag ? OT_QUAD : OT_WORD;
5512 } else {
5513 ot = dflag + OT_WORD;
5515 gen_pop_T0(s);
5516 /* NOTE: order is important for pop %sp */
5517 gen_pop_update(s);
5518 gen_op_mov_reg_T0(ot, (b & 7) | REX_B(s));
5519 break;
5520 case 0x60: /* pusha */
5521 if (CODE64(s))
5522 goto illegal_op;
5523 gen_pusha(s);
5524 break;
5525 case 0x61: /* popa */
5526 if (CODE64(s))
5527 goto illegal_op;
5528 gen_popa(s);
5529 break;
5530 case 0x68: /* push Iv */
5531 case 0x6a:
5532 if (CODE64(s)) {
5533 ot = dflag ? OT_QUAD : OT_WORD;
5534 } else {
5535 ot = dflag + OT_WORD;
5537 if (b == 0x68)
5538 val = insn_get(env, s, ot);
5539 else
5540 val = (int8_t)insn_get(env, s, OT_BYTE);
5541 gen_op_movl_T0_im(val);
5542 gen_push_T0(s);
5543 break;
5544 case 0x8f: /* pop Ev */
5545 if (CODE64(s)) {
5546 ot = dflag ? OT_QUAD : OT_WORD;
5547 } else {
5548 ot = dflag + OT_WORD;
5550 modrm = cpu_ldub_code(env, s->pc++);
5551 mod = (modrm >> 6) & 3;
5552 gen_pop_T0(s);
5553 if (mod == 3) {
5554 /* NOTE: order is important for pop %sp */
5555 gen_pop_update(s);
5556 rm = (modrm & 7) | REX_B(s);
5557 gen_op_mov_reg_T0(ot, rm);
5558 } else {
5559 /* NOTE: order is important too for MMU exceptions */
5560 s->popl_esp_hack = 1 << ot;
5561 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
5562 s->popl_esp_hack = 0;
5563 gen_pop_update(s);
5565 break;
5566 case 0xc8: /* enter */
5568 int level;
5569 val = cpu_lduw_code(env, s->pc);
5570 s->pc += 2;
5571 level = cpu_ldub_code(env, s->pc++);
5572 gen_enter(s, val, level);
5574 break;
5575 case 0xc9: /* leave */
5576 /* XXX: exception not precise (ESP is updated before potential exception) */
5577 if (CODE64(s)) {
5578 gen_op_mov_TN_reg(OT_QUAD, 0, R_EBP);
5579 gen_op_mov_reg_T0(OT_QUAD, R_ESP);
5580 } else if (s->ss32) {
5581 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
5582 gen_op_mov_reg_T0(OT_LONG, R_ESP);
5583 } else {
5584 gen_op_mov_TN_reg(OT_WORD, 0, R_EBP);
5585 gen_op_mov_reg_T0(OT_WORD, R_ESP);
5587 gen_pop_T0(s);
5588 if (CODE64(s)) {
5589 ot = dflag ? OT_QUAD : OT_WORD;
5590 } else {
5591 ot = dflag + OT_WORD;
5593 gen_op_mov_reg_T0(ot, R_EBP);
5594 gen_pop_update(s);
5595 break;
5596 case 0x06: /* push es */
5597 case 0x0e: /* push cs */
5598 case 0x16: /* push ss */
5599 case 0x1e: /* push ds */
5600 if (CODE64(s))
5601 goto illegal_op;
5602 gen_op_movl_T0_seg(b >> 3);
5603 gen_push_T0(s);
5604 break;
5605 case 0x1a0: /* push fs */
5606 case 0x1a8: /* push gs */
5607 gen_op_movl_T0_seg((b >> 3) & 7);
5608 gen_push_T0(s);
5609 break;
5610 case 0x07: /* pop es */
5611 case 0x17: /* pop ss */
5612 case 0x1f: /* pop ds */
5613 if (CODE64(s))
5614 goto illegal_op;
5615 reg = b >> 3;
5616 gen_pop_T0(s);
5617 gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
5618 gen_pop_update(s);
5619 if (reg == R_SS) {
5620 /* if reg == SS, inhibit interrupts/trace. */
5621 /* If several instructions disable interrupts, only the
5622 _first_ does it */
5623 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
5624 gen_helper_set_inhibit_irq(cpu_env);
5625 s->tf = 0;
5627 if (s->is_jmp) {
5628 gen_jmp_im(s->pc - s->cs_base);
5629 gen_eob(s);
5631 break;
5632 case 0x1a1: /* pop fs */
5633 case 0x1a9: /* pop gs */
5634 gen_pop_T0(s);
5635 gen_movl_seg_T0(s, (b >> 3) & 7, pc_start - s->cs_base);
5636 gen_pop_update(s);
5637 if (s->is_jmp) {
5638 gen_jmp_im(s->pc - s->cs_base);
5639 gen_eob(s);
5641 break;
5643 /**************************/
5644 /* mov */
5645 case 0x88:
5646 case 0x89: /* mov Gv, Ev */
5647 if ((b & 1) == 0)
5648 ot = OT_BYTE;
5649 else
5650 ot = dflag + OT_WORD;
5651 modrm = cpu_ldub_code(env, s->pc++);
5652 reg = ((modrm >> 3) & 7) | rex_r;
5654 /* generate a generic store */
5655 gen_ldst_modrm(env, s, modrm, ot, reg, 1);
5656 break;
5657 case 0xc6:
5658 case 0xc7: /* mov Ev, Iv */
5659 if ((b & 1) == 0)
5660 ot = OT_BYTE;
5661 else
5662 ot = dflag + OT_WORD;
5663 modrm = cpu_ldub_code(env, s->pc++);
5664 mod = (modrm >> 6) & 3;
5665 if (mod != 3) {
5666 s->rip_offset = insn_const_size(ot);
5667 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5669 val = insn_get(env, s, ot);
5670 gen_op_movl_T0_im(val);
5671 if (mod != 3)
5672 gen_op_st_T0_A0(ot + s->mem_index);
5673 else
5674 gen_op_mov_reg_T0(ot, (modrm & 7) | REX_B(s));
5675 break;
5676 case 0x8a:
5677 case 0x8b: /* mov Ev, Gv */
5678 if ((b & 1) == 0)
5679 ot = OT_BYTE;
5680 else
5681 ot = OT_WORD + dflag;
5682 modrm = cpu_ldub_code(env, s->pc++);
5683 reg = ((modrm >> 3) & 7) | rex_r;
5685 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
5686 gen_op_mov_reg_T0(ot, reg);
5687 break;
5688 case 0x8e: /* mov seg, Gv */
5689 modrm = cpu_ldub_code(env, s->pc++);
5690 reg = (modrm >> 3) & 7;
5691 if (reg >= 6 || reg == R_CS)
5692 goto illegal_op;
5693 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
5694 gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
5695 if (reg == R_SS) {
5696 /* if reg == SS, inhibit interrupts/trace */
5697 /* If several instructions disable interrupts, only the
5698 _first_ does it */
5699 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
5700 gen_helper_set_inhibit_irq(cpu_env);
5701 s->tf = 0;
5703 if (s->is_jmp) {
5704 gen_jmp_im(s->pc - s->cs_base);
5705 gen_eob(s);
5707 break;
5708 case 0x8c: /* mov Gv, seg */
5709 modrm = cpu_ldub_code(env, s->pc++);
5710 reg = (modrm >> 3) & 7;
5711 mod = (modrm >> 6) & 3;
5712 if (reg >= 6)
5713 goto illegal_op;
5714 gen_op_movl_T0_seg(reg);
5715 if (mod == 3)
5716 ot = OT_WORD + dflag;
5717 else
5718 ot = OT_WORD;
5719 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
5720 break;
5722 case 0x1b6: /* movzbS Gv, Eb */
5723 case 0x1b7: /* movzwS Gv, Eb */
5724 case 0x1be: /* movsbS Gv, Eb */
5725 case 0x1bf: /* movswS Gv, Eb */
5727 int d_ot;
5728 /* d_ot is the size of destination */
5729 d_ot = dflag + OT_WORD;
5730 /* ot is the size of source */
5731 ot = (b & 1) + OT_BYTE;
5732 modrm = cpu_ldub_code(env, s->pc++);
5733 reg = ((modrm >> 3) & 7) | rex_r;
5734 mod = (modrm >> 6) & 3;
5735 rm = (modrm & 7) | REX_B(s);
5737 if (mod == 3) {
5738 gen_op_mov_TN_reg(ot, 0, rm);
5739 switch(ot | (b & 8)) {
5740 case OT_BYTE:
5741 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
5742 break;
5743 case OT_BYTE | 8:
5744 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
5745 break;
5746 case OT_WORD:
5747 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
5748 break;
5749 default:
5750 case OT_WORD | 8:
5751 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5752 break;
5754 gen_op_mov_reg_T0(d_ot, reg);
5755 } else {
5756 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5757 if (b & 8) {
5758 gen_op_lds_T0_A0(ot + s->mem_index);
5759 } else {
5760 gen_op_ldu_T0_A0(ot + s->mem_index);
5762 gen_op_mov_reg_T0(d_ot, reg);
5765 break;
5767 case 0x8d: /* lea */
5768 ot = dflag + OT_WORD;
5769 modrm = cpu_ldub_code(env, s->pc++);
5770 mod = (modrm >> 6) & 3;
5771 if (mod == 3)
5772 goto illegal_op;
5773 reg = ((modrm >> 3) & 7) | rex_r;
5774 /* we must ensure that no segment is added */
5775 s->override = -1;
5776 val = s->addseg;
5777 s->addseg = 0;
5778 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5779 s->addseg = val;
5780 gen_op_mov_reg_A0(ot - OT_WORD, reg);
5781 break;
5783 case 0xa0: /* mov EAX, Ov */
5784 case 0xa1:
5785 case 0xa2: /* mov Ov, EAX */
5786 case 0xa3:
5788 target_ulong offset_addr;
5790 if ((b & 1) == 0)
5791 ot = OT_BYTE;
5792 else
5793 ot = dflag + OT_WORD;
5794 #ifdef TARGET_X86_64
5795 if (s->aflag == 2) {
5796 offset_addr = cpu_ldq_code(env, s->pc);
5797 s->pc += 8;
5798 gen_op_movq_A0_im(offset_addr);
5799 } else
5800 #endif
5802 if (s->aflag) {
5803 offset_addr = insn_get(env, s, OT_LONG);
5804 } else {
5805 offset_addr = insn_get(env, s, OT_WORD);
5807 gen_op_movl_A0_im(offset_addr);
5809 gen_add_A0_ds_seg(s);
5810 if ((b & 2) == 0) {
5811 gen_op_ld_T0_A0(ot + s->mem_index);
5812 gen_op_mov_reg_T0(ot, R_EAX);
5813 } else {
5814 gen_op_mov_TN_reg(ot, 0, R_EAX);
5815 gen_op_st_T0_A0(ot + s->mem_index);
5818 break;
5819 case 0xd7: /* xlat */
5820 #ifdef TARGET_X86_64
5821 if (s->aflag == 2) {
5822 gen_op_movq_A0_reg(R_EBX);
5823 gen_op_mov_TN_reg(OT_QUAD, 0, R_EAX);
5824 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xff);
5825 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]);
5826 } else
5827 #endif
5829 gen_op_movl_A0_reg(R_EBX);
5830 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
5831 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xff);
5832 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]);
5833 if (s->aflag == 0)
5834 gen_op_andl_A0_ffff();
5835 else
5836 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
5838 gen_add_A0_ds_seg(s);
5839 gen_op_ldu_T0_A0(OT_BYTE + s->mem_index);
5840 gen_op_mov_reg_T0(OT_BYTE, R_EAX);
5841 break;
5842 case 0xb0 ... 0xb7: /* mov R, Ib */
5843 val = insn_get(env, s, OT_BYTE);
5844 gen_op_movl_T0_im(val);
5845 gen_op_mov_reg_T0(OT_BYTE, (b & 7) | REX_B(s));
5846 break;
5847 case 0xb8 ... 0xbf: /* mov R, Iv */
5848 #ifdef TARGET_X86_64
5849 if (dflag == 2) {
5850 uint64_t tmp;
5851 /* 64 bit case */
5852 tmp = cpu_ldq_code(env, s->pc);
5853 s->pc += 8;
5854 reg = (b & 7) | REX_B(s);
5855 gen_movtl_T0_im(tmp);
5856 gen_op_mov_reg_T0(OT_QUAD, reg);
5857 } else
5858 #endif
5860 ot = dflag ? OT_LONG : OT_WORD;
5861 val = insn_get(env, s, ot);
5862 reg = (b & 7) | REX_B(s);
5863 gen_op_movl_T0_im(val);
5864 gen_op_mov_reg_T0(ot, reg);
5866 break;
5868 case 0x91 ... 0x97: /* xchg R, EAX */
5869 do_xchg_reg_eax:
5870 ot = dflag + OT_WORD;
5871 reg = (b & 7) | REX_B(s);
5872 rm = R_EAX;
5873 goto do_xchg_reg;
5874 case 0x86:
5875 case 0x87: /* xchg Ev, Gv */
5876 if ((b & 1) == 0)
5877 ot = OT_BYTE;
5878 else
5879 ot = dflag + OT_WORD;
5880 modrm = cpu_ldub_code(env, s->pc++);
5881 reg = ((modrm >> 3) & 7) | rex_r;
5882 mod = (modrm >> 6) & 3;
5883 if (mod == 3) {
5884 rm = (modrm & 7) | REX_B(s);
5885 do_xchg_reg:
5886 gen_op_mov_TN_reg(ot, 0, reg);
5887 gen_op_mov_TN_reg(ot, 1, rm);
5888 gen_op_mov_reg_T0(ot, rm);
5889 gen_op_mov_reg_T1(ot, reg);
5890 } else {
5891 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5892 gen_op_mov_TN_reg(ot, 0, reg);
5893 /* for xchg, lock is implicit */
5894 if (!(prefixes & PREFIX_LOCK))
5895 gen_helper_lock();
5896 gen_op_ld_T1_A0(ot + s->mem_index);
5897 gen_op_st_T0_A0(ot + s->mem_index);
5898 if (!(prefixes & PREFIX_LOCK))
5899 gen_helper_unlock();
5900 gen_op_mov_reg_T1(ot, reg);
5902 break;
5903 case 0xc4: /* les Gv */
5904 /* In CODE64 this is VEX3; see above. */
5905 op = R_ES;
5906 goto do_lxx;
5907 case 0xc5: /* lds Gv */
5908 /* In CODE64 this is VEX2; see above. */
5909 op = R_DS;
5910 goto do_lxx;
5911 case 0x1b2: /* lss Gv */
5912 op = R_SS;
5913 goto do_lxx;
5914 case 0x1b4: /* lfs Gv */
5915 op = R_FS;
5916 goto do_lxx;
5917 case 0x1b5: /* lgs Gv */
5918 op = R_GS;
5919 do_lxx:
5920 ot = dflag ? OT_LONG : OT_WORD;
5921 modrm = cpu_ldub_code(env, s->pc++);
5922 reg = ((modrm >> 3) & 7) | rex_r;
5923 mod = (modrm >> 6) & 3;
5924 if (mod == 3)
5925 goto illegal_op;
5926 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5927 gen_op_ld_T1_A0(ot + s->mem_index);
5928 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
5929 /* load the segment first to handle exceptions properly */
5930 gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
5931 gen_movl_seg_T0(s, op, pc_start - s->cs_base);
5932 /* then put the data */
5933 gen_op_mov_reg_T1(ot, reg);
5934 if (s->is_jmp) {
5935 gen_jmp_im(s->pc - s->cs_base);
5936 gen_eob(s);
5938 break;
5940 /************************/
5941 /* shifts */
5942 case 0xc0:
5943 case 0xc1:
5944 /* shift Ev,Ib */
5945 shift = 2;
5946 grp2:
5948 if ((b & 1) == 0)
5949 ot = OT_BYTE;
5950 else
5951 ot = dflag + OT_WORD;
5953 modrm = cpu_ldub_code(env, s->pc++);
5954 mod = (modrm >> 6) & 3;
5955 op = (modrm >> 3) & 7;
5957 if (mod != 3) {
5958 if (shift == 2) {
5959 s->rip_offset = 1;
5961 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5962 opreg = OR_TMP0;
5963 } else {
5964 opreg = (modrm & 7) | REX_B(s);
5967 /* simpler op */
5968 if (shift == 0) {
5969 gen_shift(s, op, ot, opreg, OR_ECX);
5970 } else {
5971 if (shift == 2) {
5972 shift = cpu_ldub_code(env, s->pc++);
5974 gen_shifti(s, op, ot, opreg, shift);
5977 break;
5978 case 0xd0:
5979 case 0xd1:
5980 /* shift Ev,1 */
5981 shift = 1;
5982 goto grp2;
5983 case 0xd2:
5984 case 0xd3:
5985 /* shift Ev,cl */
5986 shift = 0;
5987 goto grp2;
5989 case 0x1a4: /* shld imm */
5990 op = 0;
5991 shift = 1;
5992 goto do_shiftd;
5993 case 0x1a5: /* shld cl */
5994 op = 0;
5995 shift = 0;
5996 goto do_shiftd;
5997 case 0x1ac: /* shrd imm */
5998 op = 1;
5999 shift = 1;
6000 goto do_shiftd;
6001 case 0x1ad: /* shrd cl */
6002 op = 1;
6003 shift = 0;
6004 do_shiftd:
6005 ot = dflag + OT_WORD;
6006 modrm = cpu_ldub_code(env, s->pc++);
6007 mod = (modrm >> 6) & 3;
6008 rm = (modrm & 7) | REX_B(s);
6009 reg = ((modrm >> 3) & 7) | rex_r;
6010 if (mod != 3) {
6011 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
6012 opreg = OR_TMP0;
6013 } else {
6014 opreg = rm;
6016 gen_op_mov_TN_reg(ot, 1, reg);
6018 if (shift) {
6019 TCGv imm = tcg_const_tl(cpu_ldub_code(env, s->pc++));
6020 gen_shiftd_rm_T1(s, ot, opreg, op, imm);
6021 tcg_temp_free(imm);
6022 } else {
6023 gen_shiftd_rm_T1(s, ot, opreg, op, cpu_regs[R_ECX]);
6025 break;
6027 /************************/
6028 /* floats */
6029 case 0xd8 ... 0xdf:
6030 if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
6031 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
6032 /* XXX: what to do if illegal op ? */
6033 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
6034 break;
6036 modrm = cpu_ldub_code(env, s->pc++);
6037 mod = (modrm >> 6) & 3;
6038 rm = modrm & 7;
6039 op = ((b & 7) << 3) | ((modrm >> 3) & 7);
6040 if (mod != 3) {
6041 /* memory op */
6042 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
6043 switch(op) {
6044 case 0x00 ... 0x07: /* fxxxs */
6045 case 0x10 ... 0x17: /* fixxxl */
6046 case 0x20 ... 0x27: /* fxxxl */
6047 case 0x30 ... 0x37: /* fixxx */
6049 int op1;
6050 op1 = op & 7;
6052 switch(op >> 4) {
6053 case 0:
6054 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
6055 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6056 gen_helper_flds_FT0(cpu_env, cpu_tmp2_i32);
6057 break;
6058 case 1:
6059 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
6060 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6061 gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
6062 break;
6063 case 2:
6064 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
6065 (s->mem_index >> 2) - 1);
6066 gen_helper_fldl_FT0(cpu_env, cpu_tmp1_i64);
6067 break;
6068 case 3:
6069 default:
6070 gen_op_lds_T0_A0(OT_WORD + s->mem_index);
6071 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6072 gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
6073 break;
6076 gen_helper_fp_arith_ST0_FT0(op1);
6077 if (op1 == 3) {
6078 /* fcomp needs pop */
6079 gen_helper_fpop(cpu_env);
6082 break;
6083 case 0x08: /* flds */
6084 case 0x0a: /* fsts */
6085 case 0x0b: /* fstps */
6086 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
6087 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
6088 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
6089 switch(op & 7) {
6090 case 0:
6091 switch(op >> 4) {
6092 case 0:
6093 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
6094 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6095 gen_helper_flds_ST0(cpu_env, cpu_tmp2_i32);
6096 break;
6097 case 1:
6098 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
6099 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6100 gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
6101 break;
6102 case 2:
6103 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
6104 (s->mem_index >> 2) - 1);
6105 gen_helper_fldl_ST0(cpu_env, cpu_tmp1_i64);
6106 break;
6107 case 3:
6108 default:
6109 gen_op_lds_T0_A0(OT_WORD + s->mem_index);
6110 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6111 gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
6112 break;
6114 break;
6115 case 1:
6116 /* XXX: the corresponding CPUID bit must be tested ! */
6117 switch(op >> 4) {
6118 case 1:
6119 gen_helper_fisttl_ST0(cpu_tmp2_i32, cpu_env);
6120 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
6121 gen_op_st_T0_A0(OT_LONG + s->mem_index);
6122 break;
6123 case 2:
6124 gen_helper_fisttll_ST0(cpu_tmp1_i64, cpu_env);
6125 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
6126 (s->mem_index >> 2) - 1);
6127 break;
6128 case 3:
6129 default:
6130 gen_helper_fistt_ST0(cpu_tmp2_i32, cpu_env);
6131 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
6132 gen_op_st_T0_A0(OT_WORD + s->mem_index);
6133 break;
6135 gen_helper_fpop(cpu_env);
6136 break;
6137 default:
6138 switch(op >> 4) {
6139 case 0:
6140 gen_helper_fsts_ST0(cpu_tmp2_i32, cpu_env);
6141 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
6142 gen_op_st_T0_A0(OT_LONG + s->mem_index);
6143 break;
6144 case 1:
6145 gen_helper_fistl_ST0(cpu_tmp2_i32, cpu_env);
6146 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
6147 gen_op_st_T0_A0(OT_LONG + s->mem_index);
6148 break;
6149 case 2:
6150 gen_helper_fstl_ST0(cpu_tmp1_i64, cpu_env);
6151 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
6152 (s->mem_index >> 2) - 1);
6153 break;
6154 case 3:
6155 default:
6156 gen_helper_fist_ST0(cpu_tmp2_i32, cpu_env);
6157 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
6158 gen_op_st_T0_A0(OT_WORD + s->mem_index);
6159 break;
6161 if ((op & 7) == 3)
6162 gen_helper_fpop(cpu_env);
6163 break;
6165 break;
6166 case 0x0c: /* fldenv mem */
6167 gen_update_cc_op(s);
6168 gen_jmp_im(pc_start - s->cs_base);
6169 gen_helper_fldenv(cpu_env, cpu_A0, tcg_const_i32(s->dflag));
6170 break;
6171 case 0x0d: /* fldcw mem */
6172 gen_op_ld_T0_A0(OT_WORD + s->mem_index);
6173 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6174 gen_helper_fldcw(cpu_env, cpu_tmp2_i32);
6175 break;
6176 case 0x0e: /* fnstenv mem */
6177 gen_update_cc_op(s);
6178 gen_jmp_im(pc_start - s->cs_base);
6179 gen_helper_fstenv(cpu_env, cpu_A0, tcg_const_i32(s->dflag));
6180 break;
6181 case 0x0f: /* fnstcw mem */
6182 gen_helper_fnstcw(cpu_tmp2_i32, cpu_env);
6183 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
6184 gen_op_st_T0_A0(OT_WORD + s->mem_index);
6185 break;
6186 case 0x1d: /* fldt mem */
6187 gen_update_cc_op(s);
6188 gen_jmp_im(pc_start - s->cs_base);
6189 gen_helper_fldt_ST0(cpu_env, cpu_A0);
6190 break;
6191 case 0x1f: /* fstpt mem */
6192 gen_update_cc_op(s);
6193 gen_jmp_im(pc_start - s->cs_base);
6194 gen_helper_fstt_ST0(cpu_env, cpu_A0);
6195 gen_helper_fpop(cpu_env);
6196 break;
6197 case 0x2c: /* frstor mem */
6198 gen_update_cc_op(s);
6199 gen_jmp_im(pc_start - s->cs_base);
6200 gen_helper_frstor(cpu_env, cpu_A0, tcg_const_i32(s->dflag));
6201 break;
6202 case 0x2e: /* fnsave mem */
6203 gen_update_cc_op(s);
6204 gen_jmp_im(pc_start - s->cs_base);
6205 gen_helper_fsave(cpu_env, cpu_A0, tcg_const_i32(s->dflag));
6206 break;
6207 case 0x2f: /* fnstsw mem */
6208 gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
6209 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
6210 gen_op_st_T0_A0(OT_WORD + s->mem_index);
6211 break;
6212 case 0x3c: /* fbld */
6213 gen_update_cc_op(s);
6214 gen_jmp_im(pc_start - s->cs_base);
6215 gen_helper_fbld_ST0(cpu_env, cpu_A0);
6216 break;
6217 case 0x3e: /* fbstp */
6218 gen_update_cc_op(s);
6219 gen_jmp_im(pc_start - s->cs_base);
6220 gen_helper_fbst_ST0(cpu_env, cpu_A0);
6221 gen_helper_fpop(cpu_env);
6222 break;
6223 case 0x3d: /* fildll */
6224 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
6225 (s->mem_index >> 2) - 1);
6226 gen_helper_fildll_ST0(cpu_env, cpu_tmp1_i64);
6227 break;
6228 case 0x3f: /* fistpll */
6229 gen_helper_fistll_ST0(cpu_tmp1_i64, cpu_env);
6230 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
6231 (s->mem_index >> 2) - 1);
6232 gen_helper_fpop(cpu_env);
6233 break;
6234 default:
6235 goto illegal_op;
6237 } else {
6238 /* register float ops */
6239 opreg = rm;
6241 switch(op) {
6242 case 0x08: /* fld sti */
6243 gen_helper_fpush(cpu_env);
6244 gen_helper_fmov_ST0_STN(cpu_env,
6245 tcg_const_i32((opreg + 1) & 7));
6246 break;
6247 case 0x09: /* fxchg sti */
6248 case 0x29: /* fxchg4 sti, undocumented op */
6249 case 0x39: /* fxchg7 sti, undocumented op */
6250 gen_helper_fxchg_ST0_STN(cpu_env, tcg_const_i32(opreg));
6251 break;
6252 case 0x0a: /* grp d9/2 */
6253 switch(rm) {
6254 case 0: /* fnop */
6255 /* check exceptions (FreeBSD FPU probe) */
6256 gen_update_cc_op(s);
6257 gen_jmp_im(pc_start - s->cs_base);
6258 gen_helper_fwait(cpu_env);
6259 break;
6260 default:
6261 goto illegal_op;
6263 break;
6264 case 0x0c: /* grp d9/4 */
6265 switch(rm) {
6266 case 0: /* fchs */
6267 gen_helper_fchs_ST0(cpu_env);
6268 break;
6269 case 1: /* fabs */
6270 gen_helper_fabs_ST0(cpu_env);
6271 break;
6272 case 4: /* ftst */
6273 gen_helper_fldz_FT0(cpu_env);
6274 gen_helper_fcom_ST0_FT0(cpu_env);
6275 break;
6276 case 5: /* fxam */
6277 gen_helper_fxam_ST0(cpu_env);
6278 break;
6279 default:
6280 goto illegal_op;
6282 break;
6283 case 0x0d: /* grp d9/5 */
6285 switch(rm) {
6286 case 0:
6287 gen_helper_fpush(cpu_env);
6288 gen_helper_fld1_ST0(cpu_env);
6289 break;
6290 case 1:
6291 gen_helper_fpush(cpu_env);
6292 gen_helper_fldl2t_ST0(cpu_env);
6293 break;
6294 case 2:
6295 gen_helper_fpush(cpu_env);
6296 gen_helper_fldl2e_ST0(cpu_env);
6297 break;
6298 case 3:
6299 gen_helper_fpush(cpu_env);
6300 gen_helper_fldpi_ST0(cpu_env);
6301 break;
6302 case 4:
6303 gen_helper_fpush(cpu_env);
6304 gen_helper_fldlg2_ST0(cpu_env);
6305 break;
6306 case 5:
6307 gen_helper_fpush(cpu_env);
6308 gen_helper_fldln2_ST0(cpu_env);
6309 break;
6310 case 6:
6311 gen_helper_fpush(cpu_env);
6312 gen_helper_fldz_ST0(cpu_env);
6313 break;
6314 default:
6315 goto illegal_op;
6318 break;
6319 case 0x0e: /* grp d9/6 */
6320 switch(rm) {
6321 case 0: /* f2xm1 */
6322 gen_helper_f2xm1(cpu_env);
6323 break;
6324 case 1: /* fyl2x */
6325 gen_helper_fyl2x(cpu_env);
6326 break;
6327 case 2: /* fptan */
6328 gen_helper_fptan(cpu_env);
6329 break;
6330 case 3: /* fpatan */
6331 gen_helper_fpatan(cpu_env);
6332 break;
6333 case 4: /* fxtract */
6334 gen_helper_fxtract(cpu_env);
6335 break;
6336 case 5: /* fprem1 */
6337 gen_helper_fprem1(cpu_env);
6338 break;
6339 case 6: /* fdecstp */
6340 gen_helper_fdecstp(cpu_env);
6341 break;
6342 default:
6343 case 7: /* fincstp */
6344 gen_helper_fincstp(cpu_env);
6345 break;
6347 break;
6348 case 0x0f: /* grp d9/7 */
6349 switch(rm) {
6350 case 0: /* fprem */
6351 gen_helper_fprem(cpu_env);
6352 break;
6353 case 1: /* fyl2xp1 */
6354 gen_helper_fyl2xp1(cpu_env);
6355 break;
6356 case 2: /* fsqrt */
6357 gen_helper_fsqrt(cpu_env);
6358 break;
6359 case 3: /* fsincos */
6360 gen_helper_fsincos(cpu_env);
6361 break;
6362 case 5: /* fscale */
6363 gen_helper_fscale(cpu_env);
6364 break;
6365 case 4: /* frndint */
6366 gen_helper_frndint(cpu_env);
6367 break;
6368 case 6: /* fsin */
6369 gen_helper_fsin(cpu_env);
6370 break;
6371 default:
6372 case 7: /* fcos */
6373 gen_helper_fcos(cpu_env);
6374 break;
6376 break;
6377 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
6378 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
6379 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
6381 int op1;
6383 op1 = op & 7;
6384 if (op >= 0x20) {
6385 gen_helper_fp_arith_STN_ST0(op1, opreg);
6386 if (op >= 0x30)
6387 gen_helper_fpop(cpu_env);
6388 } else {
6389 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6390 gen_helper_fp_arith_ST0_FT0(op1);
6393 break;
6394 case 0x02: /* fcom */
6395 case 0x22: /* fcom2, undocumented op */
6396 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6397 gen_helper_fcom_ST0_FT0(cpu_env);
6398 break;
6399 case 0x03: /* fcomp */
6400 case 0x23: /* fcomp3, undocumented op */
6401 case 0x32: /* fcomp5, undocumented op */
6402 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6403 gen_helper_fcom_ST0_FT0(cpu_env);
6404 gen_helper_fpop(cpu_env);
6405 break;
6406 case 0x15: /* da/5 */
6407 switch(rm) {
6408 case 1: /* fucompp */
6409 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
6410 gen_helper_fucom_ST0_FT0(cpu_env);
6411 gen_helper_fpop(cpu_env);
6412 gen_helper_fpop(cpu_env);
6413 break;
6414 default:
6415 goto illegal_op;
6417 break;
6418 case 0x1c:
6419 switch(rm) {
6420 case 0: /* feni (287 only, just do nop here) */
6421 break;
6422 case 1: /* fdisi (287 only, just do nop here) */
6423 break;
6424 case 2: /* fclex */
6425 gen_helper_fclex(cpu_env);
6426 break;
6427 case 3: /* fninit */
6428 gen_helper_fninit(cpu_env);
6429 break;
6430 case 4: /* fsetpm (287 only, just do nop here) */
6431 break;
6432 default:
6433 goto illegal_op;
6435 break;
6436 case 0x1d: /* fucomi */
6437 if (!(s->cpuid_features & CPUID_CMOV)) {
6438 goto illegal_op;
6440 gen_update_cc_op(s);
6441 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6442 gen_helper_fucomi_ST0_FT0(cpu_env);
6443 set_cc_op(s, CC_OP_EFLAGS);
6444 break;
6445 case 0x1e: /* fcomi */
6446 if (!(s->cpuid_features & CPUID_CMOV)) {
6447 goto illegal_op;
6449 gen_update_cc_op(s);
6450 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6451 gen_helper_fcomi_ST0_FT0(cpu_env);
6452 set_cc_op(s, CC_OP_EFLAGS);
6453 break;
6454 case 0x28: /* ffree sti */
6455 gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
6456 break;
6457 case 0x2a: /* fst sti */
6458 gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
6459 break;
6460 case 0x2b: /* fstp sti */
6461 case 0x0b: /* fstp1 sti, undocumented op */
6462 case 0x3a: /* fstp8 sti, undocumented op */
6463 case 0x3b: /* fstp9 sti, undocumented op */
6464 gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
6465 gen_helper_fpop(cpu_env);
6466 break;
6467 case 0x2c: /* fucom st(i) */
6468 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6469 gen_helper_fucom_ST0_FT0(cpu_env);
6470 break;
6471 case 0x2d: /* fucomp st(i) */
6472 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6473 gen_helper_fucom_ST0_FT0(cpu_env);
6474 gen_helper_fpop(cpu_env);
6475 break;
6476 case 0x33: /* de/3 */
6477 switch(rm) {
6478 case 1: /* fcompp */
6479 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
6480 gen_helper_fcom_ST0_FT0(cpu_env);
6481 gen_helper_fpop(cpu_env);
6482 gen_helper_fpop(cpu_env);
6483 break;
6484 default:
6485 goto illegal_op;
6487 break;
6488 case 0x38: /* ffreep sti, undocumented op */
6489 gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
6490 gen_helper_fpop(cpu_env);
6491 break;
6492 case 0x3c: /* df/4 */
6493 switch(rm) {
6494 case 0:
6495 gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
6496 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
6497 gen_op_mov_reg_T0(OT_WORD, R_EAX);
6498 break;
6499 default:
6500 goto illegal_op;
6502 break;
6503 case 0x3d: /* fucomip */
6504 if (!(s->cpuid_features & CPUID_CMOV)) {
6505 goto illegal_op;
6507 gen_update_cc_op(s);
6508 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6509 gen_helper_fucomi_ST0_FT0(cpu_env);
6510 gen_helper_fpop(cpu_env);
6511 set_cc_op(s, CC_OP_EFLAGS);
6512 break;
6513 case 0x3e: /* fcomip */
6514 if (!(s->cpuid_features & CPUID_CMOV)) {
6515 goto illegal_op;
6517 gen_update_cc_op(s);
6518 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6519 gen_helper_fcomi_ST0_FT0(cpu_env);
6520 gen_helper_fpop(cpu_env);
6521 set_cc_op(s, CC_OP_EFLAGS);
6522 break;
6523 case 0x10 ... 0x13: /* fcmovxx */
6524 case 0x18 ... 0x1b:
6526 int op1, l1;
6527 static const uint8_t fcmov_cc[8] = {
6528 (JCC_B << 1),
6529 (JCC_Z << 1),
6530 (JCC_BE << 1),
6531 (JCC_P << 1),
6534 if (!(s->cpuid_features & CPUID_CMOV)) {
6535 goto illegal_op;
6537 op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
6538 l1 = gen_new_label();
6539 gen_jcc1_noeob(s, op1, l1);
6540 gen_helper_fmov_ST0_STN(cpu_env, tcg_const_i32(opreg));
6541 gen_set_label(l1);
6543 break;
6544 default:
6545 goto illegal_op;
6548 break;
6549 /************************/
6550 /* string ops */
6552 case 0xa4: /* movsS */
6553 case 0xa5:
6554 if ((b & 1) == 0)
6555 ot = OT_BYTE;
6556 else
6557 ot = dflag + OT_WORD;
6559 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6560 gen_repz_movs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6561 } else {
6562 gen_movs(s, ot);
6564 break;
6566 case 0xaa: /* stosS */
6567 case 0xab:
6568 if ((b & 1) == 0)
6569 ot = OT_BYTE;
6570 else
6571 ot = dflag + OT_WORD;
6573 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6574 gen_repz_stos(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6575 } else {
6576 gen_stos(s, ot);
6578 break;
6579 case 0xac: /* lodsS */
6580 case 0xad:
6581 if ((b & 1) == 0)
6582 ot = OT_BYTE;
6583 else
6584 ot = dflag + OT_WORD;
6585 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6586 gen_repz_lods(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6587 } else {
6588 gen_lods(s, ot);
6590 break;
6591 case 0xae: /* scasS */
6592 case 0xaf:
6593 if ((b & 1) == 0)
6594 ot = OT_BYTE;
6595 else
6596 ot = dflag + OT_WORD;
6597 if (prefixes & PREFIX_REPNZ) {
6598 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6599 } else if (prefixes & PREFIX_REPZ) {
6600 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6601 } else {
6602 gen_scas(s, ot);
6604 break;
6606 case 0xa6: /* cmpsS */
6607 case 0xa7:
6608 if ((b & 1) == 0)
6609 ot = OT_BYTE;
6610 else
6611 ot = dflag + OT_WORD;
6612 if (prefixes & PREFIX_REPNZ) {
6613 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6614 } else if (prefixes & PREFIX_REPZ) {
6615 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6616 } else {
6617 gen_cmps(s, ot);
6619 break;
6620 case 0x6c: /* insS */
6621 case 0x6d:
6622 if ((b & 1) == 0)
6623 ot = OT_BYTE;
6624 else
6625 ot = dflag ? OT_LONG : OT_WORD;
6626 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
6627 gen_op_andl_T0_ffff();
6628 gen_check_io(s, ot, pc_start - s->cs_base,
6629 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4);
6630 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6631 gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6632 } else {
6633 gen_ins(s, ot);
6634 if (use_icount) {
6635 gen_jmp(s, s->pc - s->cs_base);
6638 break;
6639 case 0x6e: /* outsS */
6640 case 0x6f:
6641 if ((b & 1) == 0)
6642 ot = OT_BYTE;
6643 else
6644 ot = dflag ? OT_LONG : OT_WORD;
6645 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
6646 gen_op_andl_T0_ffff();
6647 gen_check_io(s, ot, pc_start - s->cs_base,
6648 svm_is_rep(prefixes) | 4);
6649 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6650 gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6651 } else {
6652 gen_outs(s, ot);
6653 if (use_icount) {
6654 gen_jmp(s, s->pc - s->cs_base);
6657 break;
6659 /************************/
6660 /* port I/O */
6662 case 0xe4:
6663 case 0xe5:
6664 if ((b & 1) == 0)
6665 ot = OT_BYTE;
6666 else
6667 ot = dflag ? OT_LONG : OT_WORD;
6668 val = cpu_ldub_code(env, s->pc++);
6669 gen_op_movl_T0_im(val);
6670 gen_check_io(s, ot, pc_start - s->cs_base,
6671 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
6672 if (use_icount)
6673 gen_io_start();
6674 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6675 gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
6676 gen_op_mov_reg_T1(ot, R_EAX);
6677 if (use_icount) {
6678 gen_io_end();
6679 gen_jmp(s, s->pc - s->cs_base);
6681 break;
6682 case 0xe6:
6683 case 0xe7:
6684 if ((b & 1) == 0)
6685 ot = OT_BYTE;
6686 else
6687 ot = dflag ? OT_LONG : OT_WORD;
6688 val = cpu_ldub_code(env, s->pc++);
6689 gen_op_movl_T0_im(val);
6690 gen_check_io(s, ot, pc_start - s->cs_base,
6691 svm_is_rep(prefixes));
6692 gen_op_mov_TN_reg(ot, 1, R_EAX);
6694 if (use_icount)
6695 gen_io_start();
6696 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6697 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
6698 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
6699 if (use_icount) {
6700 gen_io_end();
6701 gen_jmp(s, s->pc - s->cs_base);
6703 break;
6704 case 0xec:
6705 case 0xed:
6706 if ((b & 1) == 0)
6707 ot = OT_BYTE;
6708 else
6709 ot = dflag ? OT_LONG : OT_WORD;
6710 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
6711 gen_op_andl_T0_ffff();
6712 gen_check_io(s, ot, pc_start - s->cs_base,
6713 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
6714 if (use_icount)
6715 gen_io_start();
6716 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6717 gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
6718 gen_op_mov_reg_T1(ot, R_EAX);
6719 if (use_icount) {
6720 gen_io_end();
6721 gen_jmp(s, s->pc - s->cs_base);
6723 break;
6724 case 0xee:
6725 case 0xef:
6726 if ((b & 1) == 0)
6727 ot = OT_BYTE;
6728 else
6729 ot = dflag ? OT_LONG : OT_WORD;
6730 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
6731 gen_op_andl_T0_ffff();
6732 gen_check_io(s, ot, pc_start - s->cs_base,
6733 svm_is_rep(prefixes));
6734 gen_op_mov_TN_reg(ot, 1, R_EAX);
6736 if (use_icount)
6737 gen_io_start();
6738 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6739 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
6740 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
6741 if (use_icount) {
6742 gen_io_end();
6743 gen_jmp(s, s->pc - s->cs_base);
6745 break;
6747 /************************/
6748 /* control */
6749 case 0xc2: /* ret im */
6750 val = cpu_ldsw_code(env, s->pc);
6751 s->pc += 2;
6752 gen_pop_T0(s);
6753 if (CODE64(s) && s->dflag)
6754 s->dflag = 2;
6755 gen_stack_update(s, val + (2 << s->dflag));
6756 if (s->dflag == 0)
6757 gen_op_andl_T0_ffff();
6758 gen_op_jmp_T0();
6759 gen_eob(s);
6760 break;
6761 case 0xc3: /* ret */
6762 gen_pop_T0(s);
6763 gen_pop_update(s);
6764 if (s->dflag == 0)
6765 gen_op_andl_T0_ffff();
6766 gen_op_jmp_T0();
6767 gen_eob(s);
6768 break;
6769 case 0xca: /* lret im */
6770 val = cpu_ldsw_code(env, s->pc);
6771 s->pc += 2;
6772 do_lret:
6773 if (s->pe && !s->vm86) {
6774 gen_update_cc_op(s);
6775 gen_jmp_im(pc_start - s->cs_base);
6776 gen_helper_lret_protected(cpu_env, tcg_const_i32(s->dflag),
6777 tcg_const_i32(val));
6778 } else {
6779 gen_stack_A0(s);
6780 /* pop offset */
6781 gen_op_ld_T0_A0(1 + s->dflag + s->mem_index);
6782 if (s->dflag == 0)
6783 gen_op_andl_T0_ffff();
6784 /* NOTE: keeping EIP updated is not a problem in case of
6785 exception */
6786 gen_op_jmp_T0();
6787 /* pop selector */
6788 gen_op_addl_A0_im(2 << s->dflag);
6789 gen_op_ld_T0_A0(1 + s->dflag + s->mem_index);
6790 gen_op_movl_seg_T0_vm(R_CS);
6791 /* add stack offset */
6792 gen_stack_update(s, val + (4 << s->dflag));
6794 gen_eob(s);
6795 break;
6796 case 0xcb: /* lret */
6797 val = 0;
6798 goto do_lret;
6799 case 0xcf: /* iret */
6800 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET);
6801 if (!s->pe) {
6802 /* real mode */
6803 gen_helper_iret_real(cpu_env, tcg_const_i32(s->dflag));
6804 set_cc_op(s, CC_OP_EFLAGS);
6805 } else if (s->vm86) {
6806 if (s->iopl != 3) {
6807 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6808 } else {
6809 gen_helper_iret_real(cpu_env, tcg_const_i32(s->dflag));
6810 set_cc_op(s, CC_OP_EFLAGS);
6812 } else {
6813 gen_update_cc_op(s);
6814 gen_jmp_im(pc_start - s->cs_base);
6815 gen_helper_iret_protected(cpu_env, tcg_const_i32(s->dflag),
6816 tcg_const_i32(s->pc - s->cs_base));
6817 set_cc_op(s, CC_OP_EFLAGS);
6819 gen_eob(s);
6820 break;
6821 case 0xe8: /* call im */
6823 if (dflag)
6824 tval = (int32_t)insn_get(env, s, OT_LONG);
6825 else
6826 tval = (int16_t)insn_get(env, s, OT_WORD);
6827 next_eip = s->pc - s->cs_base;
6828 tval += next_eip;
6829 if (s->dflag == 0)
6830 tval &= 0xffff;
6831 else if(!CODE64(s))
6832 tval &= 0xffffffff;
6833 gen_movtl_T0_im(next_eip);
6834 gen_push_T0(s);
6835 gen_jmp(s, tval);
6837 break;
6838 case 0x9a: /* lcall im */
6840 unsigned int selector, offset;
6842 if (CODE64(s))
6843 goto illegal_op;
6844 ot = dflag ? OT_LONG : OT_WORD;
6845 offset = insn_get(env, s, ot);
6846 selector = insn_get(env, s, OT_WORD);
6848 gen_op_movl_T0_im(selector);
6849 gen_op_movl_T1_imu(offset);
6851 goto do_lcall;
6852 case 0xe9: /* jmp im */
6853 if (dflag)
6854 tval = (int32_t)insn_get(env, s, OT_LONG);
6855 else
6856 tval = (int16_t)insn_get(env, s, OT_WORD);
6857 tval += s->pc - s->cs_base;
6858 if (s->dflag == 0)
6859 tval &= 0xffff;
6860 else if(!CODE64(s))
6861 tval &= 0xffffffff;
6862 gen_jmp(s, tval);
6863 break;
6864 case 0xea: /* ljmp im */
6866 unsigned int selector, offset;
6868 if (CODE64(s))
6869 goto illegal_op;
6870 ot = dflag ? OT_LONG : OT_WORD;
6871 offset = insn_get(env, s, ot);
6872 selector = insn_get(env, s, OT_WORD);
6874 gen_op_movl_T0_im(selector);
6875 gen_op_movl_T1_imu(offset);
6877 goto do_ljmp;
6878 case 0xeb: /* jmp Jb */
6879 tval = (int8_t)insn_get(env, s, OT_BYTE);
6880 tval += s->pc - s->cs_base;
6881 if (s->dflag == 0)
6882 tval &= 0xffff;
6883 gen_jmp(s, tval);
6884 break;
6885 case 0x70 ... 0x7f: /* jcc Jb */
6886 tval = (int8_t)insn_get(env, s, OT_BYTE);
6887 goto do_jcc;
6888 case 0x180 ... 0x18f: /* jcc Jv */
6889 if (dflag) {
6890 tval = (int32_t)insn_get(env, s, OT_LONG);
6891 } else {
6892 tval = (int16_t)insn_get(env, s, OT_WORD);
6894 do_jcc:
6895 next_eip = s->pc - s->cs_base;
6896 tval += next_eip;
6897 if (s->dflag == 0)
6898 tval &= 0xffff;
6899 gen_jcc(s, b, tval, next_eip);
6900 break;
6902 case 0x190 ... 0x19f: /* setcc Gv */
6903 modrm = cpu_ldub_code(env, s->pc++);
6904 gen_setcc1(s, b, cpu_T[0]);
6905 gen_ldst_modrm(env, s, modrm, OT_BYTE, OR_TMP0, 1);
6906 break;
6907 case 0x140 ... 0x14f: /* cmov Gv, Ev */
6908 if (!(s->cpuid_features & CPUID_CMOV)) {
6909 goto illegal_op;
6911 ot = dflag + OT_WORD;
6912 modrm = cpu_ldub_code(env, s->pc++);
6913 reg = ((modrm >> 3) & 7) | rex_r;
6914 gen_cmovcc1(env, s, ot, b, modrm, reg);
6915 break;
6917 /************************/
6918 /* flags */
6919 case 0x9c: /* pushf */
6920 gen_svm_check_intercept(s, pc_start, SVM_EXIT_PUSHF);
6921 if (s->vm86 && s->iopl != 3) {
6922 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6923 } else {
6924 gen_update_cc_op(s);
6925 gen_helper_read_eflags(cpu_T[0], cpu_env);
6926 gen_push_T0(s);
6928 break;
6929 case 0x9d: /* popf */
6930 gen_svm_check_intercept(s, pc_start, SVM_EXIT_POPF);
6931 if (s->vm86 && s->iopl != 3) {
6932 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6933 } else {
6934 gen_pop_T0(s);
6935 if (s->cpl == 0) {
6936 if (s->dflag) {
6937 gen_helper_write_eflags(cpu_env, cpu_T[0],
6938 tcg_const_i32((TF_MASK | AC_MASK |
6939 ID_MASK | NT_MASK |
6940 IF_MASK |
6941 IOPL_MASK)));
6942 } else {
6943 gen_helper_write_eflags(cpu_env, cpu_T[0],
6944 tcg_const_i32((TF_MASK | AC_MASK |
6945 ID_MASK | NT_MASK |
6946 IF_MASK | IOPL_MASK)
6947 & 0xffff));
6949 } else {
6950 if (s->cpl <= s->iopl) {
6951 if (s->dflag) {
6952 gen_helper_write_eflags(cpu_env, cpu_T[0],
6953 tcg_const_i32((TF_MASK |
6954 AC_MASK |
6955 ID_MASK |
6956 NT_MASK |
6957 IF_MASK)));
6958 } else {
6959 gen_helper_write_eflags(cpu_env, cpu_T[0],
6960 tcg_const_i32((TF_MASK |
6961 AC_MASK |
6962 ID_MASK |
6963 NT_MASK |
6964 IF_MASK)
6965 & 0xffff));
6967 } else {
6968 if (s->dflag) {
6969 gen_helper_write_eflags(cpu_env, cpu_T[0],
6970 tcg_const_i32((TF_MASK | AC_MASK |
6971 ID_MASK | NT_MASK)));
6972 } else {
6973 gen_helper_write_eflags(cpu_env, cpu_T[0],
6974 tcg_const_i32((TF_MASK | AC_MASK |
6975 ID_MASK | NT_MASK)
6976 & 0xffff));
6980 gen_pop_update(s);
6981 set_cc_op(s, CC_OP_EFLAGS);
6982 /* abort translation because TF/AC flag may change */
6983 gen_jmp_im(s->pc - s->cs_base);
6984 gen_eob(s);
6986 break;
6987 case 0x9e: /* sahf */
6988 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
6989 goto illegal_op;
6990 gen_op_mov_TN_reg(OT_BYTE, 0, R_AH);
6991 gen_compute_eflags(s);
6992 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O);
6993 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], CC_S | CC_Z | CC_A | CC_P | CC_C);
6994 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T[0]);
6995 break;
6996 case 0x9f: /* lahf */
6997 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
6998 goto illegal_op;
6999 gen_compute_eflags(s);
7000 /* Note: gen_compute_eflags() only gives the condition codes */
7001 tcg_gen_ori_tl(cpu_T[0], cpu_cc_src, 0x02);
7002 gen_op_mov_reg_T0(OT_BYTE, R_AH);
7003 break;
7004 case 0xf5: /* cmc */
7005 gen_compute_eflags(s);
7006 tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C);
7007 break;
7008 case 0xf8: /* clc */
7009 gen_compute_eflags(s);
7010 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C);
7011 break;
7012 case 0xf9: /* stc */
7013 gen_compute_eflags(s);
7014 tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C);
7015 break;
7016 case 0xfc: /* cld */
7017 tcg_gen_movi_i32(cpu_tmp2_i32, 1);
7018 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
7019 break;
7020 case 0xfd: /* std */
7021 tcg_gen_movi_i32(cpu_tmp2_i32, -1);
7022 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
7023 break;
7025 /************************/
7026 /* bit operations */
7027 case 0x1ba: /* bt/bts/btr/btc Gv, im */
7028 ot = dflag + OT_WORD;
7029 modrm = cpu_ldub_code(env, s->pc++);
7030 op = (modrm >> 3) & 7;
7031 mod = (modrm >> 6) & 3;
7032 rm = (modrm & 7) | REX_B(s);
7033 if (mod != 3) {
7034 s->rip_offset = 1;
7035 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7036 gen_op_ld_T0_A0(ot + s->mem_index);
7037 } else {
7038 gen_op_mov_TN_reg(ot, 0, rm);
7040 /* load shift */
7041 val = cpu_ldub_code(env, s->pc++);
7042 gen_op_movl_T1_im(val);
7043 if (op < 4)
7044 goto illegal_op;
7045 op -= 4;
7046 goto bt_op;
7047 case 0x1a3: /* bt Gv, Ev */
7048 op = 0;
7049 goto do_btx;
7050 case 0x1ab: /* bts */
7051 op = 1;
7052 goto do_btx;
7053 case 0x1b3: /* btr */
7054 op = 2;
7055 goto do_btx;
7056 case 0x1bb: /* btc */
7057 op = 3;
7058 do_btx:
7059 ot = dflag + OT_WORD;
7060 modrm = cpu_ldub_code(env, s->pc++);
7061 reg = ((modrm >> 3) & 7) | rex_r;
7062 mod = (modrm >> 6) & 3;
7063 rm = (modrm & 7) | REX_B(s);
7064 gen_op_mov_TN_reg(OT_LONG, 1, reg);
7065 if (mod != 3) {
7066 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7067 /* specific case: we need to add a displacement */
7068 gen_exts(ot, cpu_T[1]);
7069 tcg_gen_sari_tl(cpu_tmp0, cpu_T[1], 3 + ot);
7070 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, ot);
7071 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
7072 gen_op_ld_T0_A0(ot + s->mem_index);
7073 } else {
7074 gen_op_mov_TN_reg(ot, 0, rm);
7076 bt_op:
7077 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], (1 << (3 + ot)) - 1);
7078 switch(op) {
7079 case 0:
7080 tcg_gen_shr_tl(cpu_cc_src, cpu_T[0], cpu_T[1]);
7081 tcg_gen_movi_tl(cpu_cc_dst, 0);
7082 break;
7083 case 1:
7084 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
7085 tcg_gen_movi_tl(cpu_tmp0, 1);
7086 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
7087 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
7088 break;
7089 case 2:
7090 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
7091 tcg_gen_movi_tl(cpu_tmp0, 1);
7092 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
7093 tcg_gen_not_tl(cpu_tmp0, cpu_tmp0);
7094 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
7095 break;
7096 default:
7097 case 3:
7098 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
7099 tcg_gen_movi_tl(cpu_tmp0, 1);
7100 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
7101 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
7102 break;
7104 set_cc_op(s, CC_OP_SARB + ot);
7105 if (op != 0) {
7106 if (mod != 3)
7107 gen_op_st_T0_A0(ot + s->mem_index);
7108 else
7109 gen_op_mov_reg_T0(ot, rm);
7110 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
7111 tcg_gen_movi_tl(cpu_cc_dst, 0);
7113 break;
7114 case 0x1bc: /* bsf / tzcnt */
7115 case 0x1bd: /* bsr / lzcnt */
7116 ot = dflag + OT_WORD;
7117 modrm = cpu_ldub_code(env, s->pc++);
7118 reg = ((modrm >> 3) & 7) | rex_r;
7119 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
7120 gen_extu(ot, cpu_T[0]);
7122 /* Note that lzcnt and tzcnt are in different extensions. */
7123 if ((prefixes & PREFIX_REPZ)
7124 && (b & 1
7125 ? s->cpuid_ext3_features & CPUID_EXT3_ABM
7126 : s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)) {
7127 int size = 8 << ot;
7128 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
7129 if (b & 1) {
7130 /* For lzcnt, reduce the target_ulong result by the
7131 number of zeros that we expect to find at the top. */
7132 gen_helper_clz(cpu_T[0], cpu_T[0]);
7133 tcg_gen_subi_tl(cpu_T[0], cpu_T[0], TARGET_LONG_BITS - size);
7134 } else {
7135 /* For tzcnt, a zero input must return the operand size:
7136 force all bits outside the operand size to 1. */
7137 target_ulong mask = (target_ulong)-2 << (size - 1);
7138 tcg_gen_ori_tl(cpu_T[0], cpu_T[0], mask);
7139 gen_helper_ctz(cpu_T[0], cpu_T[0]);
7141 /* For lzcnt/tzcnt, C and Z bits are defined and are
7142 related to the result. */
7143 gen_op_update1_cc();
7144 set_cc_op(s, CC_OP_BMILGB + ot);
7145 } else {
7146 /* For bsr/bsf, only the Z bit is defined and it is related
7147 to the input and not the result. */
7148 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
7149 set_cc_op(s, CC_OP_LOGICB + ot);
7150 if (b & 1) {
7151 /* For bsr, return the bit index of the first 1 bit,
7152 not the count of leading zeros. */
7153 gen_helper_clz(cpu_T[0], cpu_T[0]);
7154 tcg_gen_xori_tl(cpu_T[0], cpu_T[0], TARGET_LONG_BITS - 1);
7155 } else {
7156 gen_helper_ctz(cpu_T[0], cpu_T[0]);
7158 /* ??? The manual says that the output is undefined when the
7159 input is zero, but real hardware leaves it unchanged, and
7160 real programs appear to depend on that. */
7161 tcg_gen_movi_tl(cpu_tmp0, 0);
7162 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_T[0], cpu_cc_dst, cpu_tmp0,
7163 cpu_regs[reg], cpu_T[0]);
7165 gen_op_mov_reg_T0(ot, reg);
7166 break;
7167 /************************/
7168 /* bcd */
7169 case 0x27: /* daa */
7170 if (CODE64(s))
7171 goto illegal_op;
7172 gen_update_cc_op(s);
7173 gen_helper_daa(cpu_env);
7174 set_cc_op(s, CC_OP_EFLAGS);
7175 break;
7176 case 0x2f: /* das */
7177 if (CODE64(s))
7178 goto illegal_op;
7179 gen_update_cc_op(s);
7180 gen_helper_das(cpu_env);
7181 set_cc_op(s, CC_OP_EFLAGS);
7182 break;
7183 case 0x37: /* aaa */
7184 if (CODE64(s))
7185 goto illegal_op;
7186 gen_update_cc_op(s);
7187 gen_helper_aaa(cpu_env);
7188 set_cc_op(s, CC_OP_EFLAGS);
7189 break;
7190 case 0x3f: /* aas */
7191 if (CODE64(s))
7192 goto illegal_op;
7193 gen_update_cc_op(s);
7194 gen_helper_aas(cpu_env);
7195 set_cc_op(s, CC_OP_EFLAGS);
7196 break;
7197 case 0xd4: /* aam */
7198 if (CODE64(s))
7199 goto illegal_op;
7200 val = cpu_ldub_code(env, s->pc++);
7201 if (val == 0) {
7202 gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base);
7203 } else {
7204 gen_helper_aam(cpu_env, tcg_const_i32(val));
7205 set_cc_op(s, CC_OP_LOGICB);
7207 break;
7208 case 0xd5: /* aad */
7209 if (CODE64(s))
7210 goto illegal_op;
7211 val = cpu_ldub_code(env, s->pc++);
7212 gen_helper_aad(cpu_env, tcg_const_i32(val));
7213 set_cc_op(s, CC_OP_LOGICB);
7214 break;
7215 /************************/
7216 /* misc */
7217 case 0x90: /* nop */
7218 /* XXX: correct lock test for all insn */
7219 if (prefixes & PREFIX_LOCK) {
7220 goto illegal_op;
7222 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
7223 if (REX_B(s)) {
7224 goto do_xchg_reg_eax;
7226 if (prefixes & PREFIX_REPZ) {
7227 gen_svm_check_intercept(s, pc_start, SVM_EXIT_PAUSE);
7229 break;
7230 case 0x9b: /* fwait */
7231 if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) ==
7232 (HF_MP_MASK | HF_TS_MASK)) {
7233 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7234 } else {
7235 gen_update_cc_op(s);
7236 gen_jmp_im(pc_start - s->cs_base);
7237 gen_helper_fwait(cpu_env);
7239 break;
7240 case 0xcc: /* int3 */
7241 gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base);
7242 break;
7243 case 0xcd: /* int N */
7244 val = cpu_ldub_code(env, s->pc++);
7245 if (s->vm86 && s->iopl != 3) {
7246 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7247 } else {
7248 gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base);
7250 break;
7251 case 0xce: /* into */
7252 if (CODE64(s))
7253 goto illegal_op;
7254 gen_update_cc_op(s);
7255 gen_jmp_im(pc_start - s->cs_base);
7256 gen_helper_into(cpu_env, tcg_const_i32(s->pc - pc_start));
7257 break;
7258 #ifdef WANT_ICEBP
7259 case 0xf1: /* icebp (undocumented, exits to external debugger) */
7260 gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP);
7261 #if 1
7262 gen_debug(s, pc_start - s->cs_base);
7263 #else
7264 /* start debug */
7265 tb_flush(env);
7266 qemu_set_log(CPU_LOG_INT | CPU_LOG_TB_IN_ASM);
7267 #endif
7268 break;
7269 #endif
7270 case 0xfa: /* cli */
7271 if (!s->vm86) {
7272 if (s->cpl <= s->iopl) {
7273 gen_helper_cli(cpu_env);
7274 } else {
7275 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7277 } else {
7278 if (s->iopl == 3) {
7279 gen_helper_cli(cpu_env);
7280 } else {
7281 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7284 break;
7285 case 0xfb: /* sti */
7286 if (!s->vm86) {
7287 if (s->cpl <= s->iopl) {
7288 gen_sti:
7289 gen_helper_sti(cpu_env);
7290 /* interruptions are enabled only the first insn after sti */
7291 /* If several instructions disable interrupts, only the
7292 _first_ does it */
7293 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
7294 gen_helper_set_inhibit_irq(cpu_env);
7295 /* give a chance to handle pending irqs */
7296 gen_jmp_im(s->pc - s->cs_base);
7297 gen_eob(s);
7298 } else {
7299 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7301 } else {
7302 if (s->iopl == 3) {
7303 goto gen_sti;
7304 } else {
7305 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7308 break;
7309 case 0x62: /* bound */
7310 if (CODE64(s))
7311 goto illegal_op;
7312 ot = dflag ? OT_LONG : OT_WORD;
7313 modrm = cpu_ldub_code(env, s->pc++);
7314 reg = (modrm >> 3) & 7;
7315 mod = (modrm >> 6) & 3;
7316 if (mod == 3)
7317 goto illegal_op;
7318 gen_op_mov_TN_reg(ot, 0, reg);
7319 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7320 gen_jmp_im(pc_start - s->cs_base);
7321 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
7322 if (ot == OT_WORD) {
7323 gen_helper_boundw(cpu_env, cpu_A0, cpu_tmp2_i32);
7324 } else {
7325 gen_helper_boundl(cpu_env, cpu_A0, cpu_tmp2_i32);
7327 break;
7328 case 0x1c8 ... 0x1cf: /* bswap reg */
7329 reg = (b & 7) | REX_B(s);
7330 #ifdef TARGET_X86_64
7331 if (dflag == 2) {
7332 gen_op_mov_TN_reg(OT_QUAD, 0, reg);
7333 tcg_gen_bswap64_i64(cpu_T[0], cpu_T[0]);
7334 gen_op_mov_reg_T0(OT_QUAD, reg);
7335 } else
7336 #endif
7338 gen_op_mov_TN_reg(OT_LONG, 0, reg);
7339 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
7340 tcg_gen_bswap32_tl(cpu_T[0], cpu_T[0]);
7341 gen_op_mov_reg_T0(OT_LONG, reg);
7343 break;
7344 case 0xd6: /* salc */
7345 if (CODE64(s))
7346 goto illegal_op;
7347 gen_compute_eflags_c(s, cpu_T[0]);
7348 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
7349 gen_op_mov_reg_T0(OT_BYTE, R_EAX);
7350 break;
7351 case 0xe0: /* loopnz */
7352 case 0xe1: /* loopz */
7353 case 0xe2: /* loop */
7354 case 0xe3: /* jecxz */
7356 int l1, l2, l3;
7358 tval = (int8_t)insn_get(env, s, OT_BYTE);
7359 next_eip = s->pc - s->cs_base;
7360 tval += next_eip;
7361 if (s->dflag == 0)
7362 tval &= 0xffff;
7364 l1 = gen_new_label();
7365 l2 = gen_new_label();
7366 l3 = gen_new_label();
7367 b &= 3;
7368 switch(b) {
7369 case 0: /* loopnz */
7370 case 1: /* loopz */
7371 gen_op_add_reg_im(s->aflag, R_ECX, -1);
7372 gen_op_jz_ecx(s->aflag, l3);
7373 gen_jcc1(s, (JCC_Z << 1) | (b ^ 1), l1);
7374 break;
7375 case 2: /* loop */
7376 gen_op_add_reg_im(s->aflag, R_ECX, -1);
7377 gen_op_jnz_ecx(s->aflag, l1);
7378 break;
7379 default:
7380 case 3: /* jcxz */
7381 gen_op_jz_ecx(s->aflag, l1);
7382 break;
7385 gen_set_label(l3);
7386 gen_jmp_im(next_eip);
7387 tcg_gen_br(l2);
7389 gen_set_label(l1);
7390 gen_jmp_im(tval);
7391 gen_set_label(l2);
7392 gen_eob(s);
7394 break;
7395 case 0x130: /* wrmsr */
7396 case 0x132: /* rdmsr */
7397 if (s->cpl != 0) {
7398 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7399 } else {
7400 gen_update_cc_op(s);
7401 gen_jmp_im(pc_start - s->cs_base);
7402 if (b & 2) {
7403 gen_helper_rdmsr(cpu_env);
7404 } else {
7405 gen_helper_wrmsr(cpu_env);
7408 break;
7409 case 0x131: /* rdtsc */
7410 gen_update_cc_op(s);
7411 gen_jmp_im(pc_start - s->cs_base);
7412 if (use_icount)
7413 gen_io_start();
7414 gen_helper_rdtsc(cpu_env);
7415 if (use_icount) {
7416 gen_io_end();
7417 gen_jmp(s, s->pc - s->cs_base);
7419 break;
7420 case 0x133: /* rdpmc */
7421 gen_update_cc_op(s);
7422 gen_jmp_im(pc_start - s->cs_base);
7423 gen_helper_rdpmc(cpu_env);
7424 break;
7425 case 0x134: /* sysenter */
7426 /* For Intel SYSENTER is valid on 64-bit */
7427 if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
7428 goto illegal_op;
7429 if (!s->pe) {
7430 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7431 } else {
7432 gen_update_cc_op(s);
7433 gen_jmp_im(pc_start - s->cs_base);
7434 gen_helper_sysenter(cpu_env);
7435 gen_eob(s);
7437 break;
7438 case 0x135: /* sysexit */
7439 /* For Intel SYSEXIT is valid on 64-bit */
7440 if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
7441 goto illegal_op;
7442 if (!s->pe) {
7443 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7444 } else {
7445 gen_update_cc_op(s);
7446 gen_jmp_im(pc_start - s->cs_base);
7447 gen_helper_sysexit(cpu_env, tcg_const_i32(dflag));
7448 gen_eob(s);
7450 break;
7451 #ifdef TARGET_X86_64
7452 case 0x105: /* syscall */
7453 /* XXX: is it usable in real mode ? */
7454 gen_update_cc_op(s);
7455 gen_jmp_im(pc_start - s->cs_base);
7456 gen_helper_syscall(cpu_env, tcg_const_i32(s->pc - pc_start));
7457 gen_eob(s);
7458 break;
7459 case 0x107: /* sysret */
7460 if (!s->pe) {
7461 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7462 } else {
7463 gen_update_cc_op(s);
7464 gen_jmp_im(pc_start - s->cs_base);
7465 gen_helper_sysret(cpu_env, tcg_const_i32(s->dflag));
7466 /* condition codes are modified only in long mode */
7467 if (s->lma) {
7468 set_cc_op(s, CC_OP_EFLAGS);
7470 gen_eob(s);
7472 break;
7473 #endif
7474 case 0x1a2: /* cpuid */
7475 gen_update_cc_op(s);
7476 gen_jmp_im(pc_start - s->cs_base);
7477 gen_helper_cpuid(cpu_env);
7478 break;
7479 case 0xf4: /* hlt */
7480 if (s->cpl != 0) {
7481 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7482 } else {
7483 gen_update_cc_op(s);
7484 gen_jmp_im(pc_start - s->cs_base);
7485 gen_helper_hlt(cpu_env, tcg_const_i32(s->pc - pc_start));
7486 s->is_jmp = DISAS_TB_JUMP;
7488 break;
7489 case 0x100:
7490 modrm = cpu_ldub_code(env, s->pc++);
7491 mod = (modrm >> 6) & 3;
7492 op = (modrm >> 3) & 7;
7493 switch(op) {
7494 case 0: /* sldt */
7495 if (!s->pe || s->vm86)
7496 goto illegal_op;
7497 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ);
7498 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,ldt.selector));
7499 ot = OT_WORD;
7500 if (mod == 3)
7501 ot += s->dflag;
7502 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
7503 break;
7504 case 2: /* lldt */
7505 if (!s->pe || s->vm86)
7506 goto illegal_op;
7507 if (s->cpl != 0) {
7508 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7509 } else {
7510 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE);
7511 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
7512 gen_jmp_im(pc_start - s->cs_base);
7513 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
7514 gen_helper_lldt(cpu_env, cpu_tmp2_i32);
7516 break;
7517 case 1: /* str */
7518 if (!s->pe || s->vm86)
7519 goto illegal_op;
7520 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ);
7521 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,tr.selector));
7522 ot = OT_WORD;
7523 if (mod == 3)
7524 ot += s->dflag;
7525 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
7526 break;
7527 case 3: /* ltr */
7528 if (!s->pe || s->vm86)
7529 goto illegal_op;
7530 if (s->cpl != 0) {
7531 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7532 } else {
7533 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE);
7534 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
7535 gen_jmp_im(pc_start - s->cs_base);
7536 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
7537 gen_helper_ltr(cpu_env, cpu_tmp2_i32);
7539 break;
7540 case 4: /* verr */
7541 case 5: /* verw */
7542 if (!s->pe || s->vm86)
7543 goto illegal_op;
7544 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
7545 gen_update_cc_op(s);
7546 if (op == 4) {
7547 gen_helper_verr(cpu_env, cpu_T[0]);
7548 } else {
7549 gen_helper_verw(cpu_env, cpu_T[0]);
7551 set_cc_op(s, CC_OP_EFLAGS);
7552 break;
7553 default:
7554 goto illegal_op;
7556 break;
7557 case 0x101:
7558 modrm = cpu_ldub_code(env, s->pc++);
7559 mod = (modrm >> 6) & 3;
7560 op = (modrm >> 3) & 7;
7561 rm = modrm & 7;
7562 switch(op) {
7563 case 0: /* sgdt */
7564 if (mod == 3)
7565 goto illegal_op;
7566 gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ);
7567 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7568 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.limit));
7569 gen_op_st_T0_A0(OT_WORD + s->mem_index);
7570 gen_add_A0_im(s, 2);
7571 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.base));
7572 if (!s->dflag)
7573 gen_op_andl_T0_im(0xffffff);
7574 gen_op_st_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
7575 break;
7576 case 1:
7577 if (mod == 3) {
7578 switch (rm) {
7579 case 0: /* monitor */
7580 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
7581 s->cpl != 0)
7582 goto illegal_op;
7583 gen_update_cc_op(s);
7584 gen_jmp_im(pc_start - s->cs_base);
7585 #ifdef TARGET_X86_64
7586 if (s->aflag == 2) {
7587 gen_op_movq_A0_reg(R_EAX);
7588 } else
7589 #endif
7591 gen_op_movl_A0_reg(R_EAX);
7592 if (s->aflag == 0)
7593 gen_op_andl_A0_ffff();
7595 gen_add_A0_ds_seg(s);
7596 gen_helper_monitor(cpu_env, cpu_A0);
7597 break;
7598 case 1: /* mwait */
7599 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
7600 s->cpl != 0)
7601 goto illegal_op;
7602 gen_update_cc_op(s);
7603 gen_jmp_im(pc_start - s->cs_base);
7604 gen_helper_mwait(cpu_env, tcg_const_i32(s->pc - pc_start));
7605 gen_eob(s);
7606 break;
7607 case 2: /* clac */
7608 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) ||
7609 s->cpl != 0) {
7610 goto illegal_op;
7612 gen_helper_clac(cpu_env);
7613 gen_jmp_im(s->pc - s->cs_base);
7614 gen_eob(s);
7615 break;
7616 case 3: /* stac */
7617 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) ||
7618 s->cpl != 0) {
7619 goto illegal_op;
7621 gen_helper_stac(cpu_env);
7622 gen_jmp_im(s->pc - s->cs_base);
7623 gen_eob(s);
7624 break;
7625 default:
7626 goto illegal_op;
7628 } else { /* sidt */
7629 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ);
7630 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7631 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.limit));
7632 gen_op_st_T0_A0(OT_WORD + s->mem_index);
7633 gen_add_A0_im(s, 2);
7634 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.base));
7635 if (!s->dflag)
7636 gen_op_andl_T0_im(0xffffff);
7637 gen_op_st_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
7639 break;
7640 case 2: /* lgdt */
7641 case 3: /* lidt */
7642 if (mod == 3) {
7643 gen_update_cc_op(s);
7644 gen_jmp_im(pc_start - s->cs_base);
7645 switch(rm) {
7646 case 0: /* VMRUN */
7647 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7648 goto illegal_op;
7649 if (s->cpl != 0) {
7650 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7651 break;
7652 } else {
7653 gen_helper_vmrun(cpu_env, tcg_const_i32(s->aflag),
7654 tcg_const_i32(s->pc - pc_start));
7655 tcg_gen_exit_tb(0);
7656 s->is_jmp = DISAS_TB_JUMP;
7658 break;
7659 case 1: /* VMMCALL */
7660 if (!(s->flags & HF_SVME_MASK))
7661 goto illegal_op;
7662 gen_helper_vmmcall(cpu_env);
7663 break;
7664 case 2: /* VMLOAD */
7665 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7666 goto illegal_op;
7667 if (s->cpl != 0) {
7668 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7669 break;
7670 } else {
7671 gen_helper_vmload(cpu_env, tcg_const_i32(s->aflag));
7673 break;
7674 case 3: /* VMSAVE */
7675 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7676 goto illegal_op;
7677 if (s->cpl != 0) {
7678 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7679 break;
7680 } else {
7681 gen_helper_vmsave(cpu_env, tcg_const_i32(s->aflag));
7683 break;
7684 case 4: /* STGI */
7685 if ((!(s->flags & HF_SVME_MASK) &&
7686 !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
7687 !s->pe)
7688 goto illegal_op;
7689 if (s->cpl != 0) {
7690 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7691 break;
7692 } else {
7693 gen_helper_stgi(cpu_env);
7695 break;
7696 case 5: /* CLGI */
7697 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7698 goto illegal_op;
7699 if (s->cpl != 0) {
7700 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7701 break;
7702 } else {
7703 gen_helper_clgi(cpu_env);
7705 break;
7706 case 6: /* SKINIT */
7707 if ((!(s->flags & HF_SVME_MASK) &&
7708 !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
7709 !s->pe)
7710 goto illegal_op;
7711 gen_helper_skinit(cpu_env);
7712 break;
7713 case 7: /* INVLPGA */
7714 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7715 goto illegal_op;
7716 if (s->cpl != 0) {
7717 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7718 break;
7719 } else {
7720 gen_helper_invlpga(cpu_env, tcg_const_i32(s->aflag));
7722 break;
7723 default:
7724 goto illegal_op;
7726 } else if (s->cpl != 0) {
7727 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7728 } else {
7729 gen_svm_check_intercept(s, pc_start,
7730 op==2 ? SVM_EXIT_GDTR_WRITE : SVM_EXIT_IDTR_WRITE);
7731 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7732 gen_op_ld_T1_A0(OT_WORD + s->mem_index);
7733 gen_add_A0_im(s, 2);
7734 gen_op_ld_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
7735 if (!s->dflag)
7736 gen_op_andl_T0_im(0xffffff);
7737 if (op == 2) {
7738 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,gdt.base));
7739 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,gdt.limit));
7740 } else {
7741 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,idt.base));
7742 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,idt.limit));
7745 break;
7746 case 4: /* smsw */
7747 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0);
7748 #if defined TARGET_X86_64 && defined HOST_WORDS_BIGENDIAN
7749 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]) + 4);
7750 #else
7751 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]));
7752 #endif
7753 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 1);
7754 break;
7755 case 6: /* lmsw */
7756 if (s->cpl != 0) {
7757 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7758 } else {
7759 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
7760 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
7761 gen_helper_lmsw(cpu_env, cpu_T[0]);
7762 gen_jmp_im(s->pc - s->cs_base);
7763 gen_eob(s);
7765 break;
7766 case 7:
7767 if (mod != 3) { /* invlpg */
7768 if (s->cpl != 0) {
7769 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7770 } else {
7771 gen_update_cc_op(s);
7772 gen_jmp_im(pc_start - s->cs_base);
7773 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7774 gen_helper_invlpg(cpu_env, cpu_A0);
7775 gen_jmp_im(s->pc - s->cs_base);
7776 gen_eob(s);
7778 } else {
7779 switch (rm) {
7780 case 0: /* swapgs */
7781 #ifdef TARGET_X86_64
7782 if (CODE64(s)) {
7783 if (s->cpl != 0) {
7784 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7785 } else {
7786 tcg_gen_ld_tl(cpu_T[0], cpu_env,
7787 offsetof(CPUX86State,segs[R_GS].base));
7788 tcg_gen_ld_tl(cpu_T[1], cpu_env,
7789 offsetof(CPUX86State,kernelgsbase));
7790 tcg_gen_st_tl(cpu_T[1], cpu_env,
7791 offsetof(CPUX86State,segs[R_GS].base));
7792 tcg_gen_st_tl(cpu_T[0], cpu_env,
7793 offsetof(CPUX86State,kernelgsbase));
7795 } else
7796 #endif
7798 goto illegal_op;
7800 break;
7801 case 1: /* rdtscp */
7802 if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP))
7803 goto illegal_op;
7804 gen_update_cc_op(s);
7805 gen_jmp_im(pc_start - s->cs_base);
7806 if (use_icount)
7807 gen_io_start();
7808 gen_helper_rdtscp(cpu_env);
7809 if (use_icount) {
7810 gen_io_end();
7811 gen_jmp(s, s->pc - s->cs_base);
7813 break;
7814 default:
7815 goto illegal_op;
7818 break;
7819 default:
7820 goto illegal_op;
7822 break;
7823 case 0x108: /* invd */
7824 case 0x109: /* wbinvd */
7825 if (s->cpl != 0) {
7826 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7827 } else {
7828 gen_svm_check_intercept(s, pc_start, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD);
7829 /* nothing to do */
7831 break;
7832 case 0x63: /* arpl or movslS (x86_64) */
7833 #ifdef TARGET_X86_64
7834 if (CODE64(s)) {
7835 int d_ot;
7836 /* d_ot is the size of destination */
7837 d_ot = dflag + OT_WORD;
7839 modrm = cpu_ldub_code(env, s->pc++);
7840 reg = ((modrm >> 3) & 7) | rex_r;
7841 mod = (modrm >> 6) & 3;
7842 rm = (modrm & 7) | REX_B(s);
7844 if (mod == 3) {
7845 gen_op_mov_TN_reg(OT_LONG, 0, rm);
7846 /* sign extend */
7847 if (d_ot == OT_QUAD)
7848 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
7849 gen_op_mov_reg_T0(d_ot, reg);
7850 } else {
7851 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7852 if (d_ot == OT_QUAD) {
7853 gen_op_lds_T0_A0(OT_LONG + s->mem_index);
7854 } else {
7855 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
7857 gen_op_mov_reg_T0(d_ot, reg);
7859 } else
7860 #endif
7862 int label1;
7863 TCGv t0, t1, t2, a0;
7865 if (!s->pe || s->vm86)
7866 goto illegal_op;
7867 t0 = tcg_temp_local_new();
7868 t1 = tcg_temp_local_new();
7869 t2 = tcg_temp_local_new();
7870 ot = OT_WORD;
7871 modrm = cpu_ldub_code(env, s->pc++);
7872 reg = (modrm >> 3) & 7;
7873 mod = (modrm >> 6) & 3;
7874 rm = modrm & 7;
7875 if (mod != 3) {
7876 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7877 gen_op_ld_v(ot + s->mem_index, t0, cpu_A0);
7878 a0 = tcg_temp_local_new();
7879 tcg_gen_mov_tl(a0, cpu_A0);
7880 } else {
7881 gen_op_mov_v_reg(ot, t0, rm);
7882 TCGV_UNUSED(a0);
7884 gen_op_mov_v_reg(ot, t1, reg);
7885 tcg_gen_andi_tl(cpu_tmp0, t0, 3);
7886 tcg_gen_andi_tl(t1, t1, 3);
7887 tcg_gen_movi_tl(t2, 0);
7888 label1 = gen_new_label();
7889 tcg_gen_brcond_tl(TCG_COND_GE, cpu_tmp0, t1, label1);
7890 tcg_gen_andi_tl(t0, t0, ~3);
7891 tcg_gen_or_tl(t0, t0, t1);
7892 tcg_gen_movi_tl(t2, CC_Z);
7893 gen_set_label(label1);
7894 if (mod != 3) {
7895 gen_op_st_v(ot + s->mem_index, t0, a0);
7896 tcg_temp_free(a0);
7897 } else {
7898 gen_op_mov_reg_v(ot, rm, t0);
7900 gen_compute_eflags(s);
7901 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z);
7902 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2);
7903 tcg_temp_free(t0);
7904 tcg_temp_free(t1);
7905 tcg_temp_free(t2);
7907 break;
7908 case 0x102: /* lar */
7909 case 0x103: /* lsl */
7911 int label1;
7912 TCGv t0;
7913 if (!s->pe || s->vm86)
7914 goto illegal_op;
7915 ot = dflag ? OT_LONG : OT_WORD;
7916 modrm = cpu_ldub_code(env, s->pc++);
7917 reg = ((modrm >> 3) & 7) | rex_r;
7918 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
7919 t0 = tcg_temp_local_new();
7920 gen_update_cc_op(s);
7921 if (b == 0x102) {
7922 gen_helper_lar(t0, cpu_env, cpu_T[0]);
7923 } else {
7924 gen_helper_lsl(t0, cpu_env, cpu_T[0]);
7926 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_src, CC_Z);
7927 label1 = gen_new_label();
7928 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
7929 gen_op_mov_reg_v(ot, reg, t0);
7930 gen_set_label(label1);
7931 set_cc_op(s, CC_OP_EFLAGS);
7932 tcg_temp_free(t0);
7934 break;
7935 case 0x118:
7936 modrm = cpu_ldub_code(env, s->pc++);
7937 mod = (modrm >> 6) & 3;
7938 op = (modrm >> 3) & 7;
7939 switch(op) {
7940 case 0: /* prefetchnta */
7941 case 1: /* prefetchnt0 */
7942 case 2: /* prefetchnt0 */
7943 case 3: /* prefetchnt0 */
7944 if (mod == 3)
7945 goto illegal_op;
7946 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7947 /* nothing more to do */
7948 break;
7949 default: /* nop (multi byte) */
7950 gen_nop_modrm(env, s, modrm);
7951 break;
7953 break;
7954 case 0x119 ... 0x11f: /* nop (multi byte) */
7955 modrm = cpu_ldub_code(env, s->pc++);
7956 gen_nop_modrm(env, s, modrm);
7957 break;
7958 case 0x120: /* mov reg, crN */
7959 case 0x122: /* mov crN, reg */
7960 if (s->cpl != 0) {
7961 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7962 } else {
7963 modrm = cpu_ldub_code(env, s->pc++);
7964 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7965 * AMD documentation (24594.pdf) and testing of
7966 * intel 386 and 486 processors all show that the mod bits
7967 * are assumed to be 1's, regardless of actual values.
7969 rm = (modrm & 7) | REX_B(s);
7970 reg = ((modrm >> 3) & 7) | rex_r;
7971 if (CODE64(s))
7972 ot = OT_QUAD;
7973 else
7974 ot = OT_LONG;
7975 if ((prefixes & PREFIX_LOCK) && (reg == 0) &&
7976 (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) {
7977 reg = 8;
7979 switch(reg) {
7980 case 0:
7981 case 2:
7982 case 3:
7983 case 4:
7984 case 8:
7985 gen_update_cc_op(s);
7986 gen_jmp_im(pc_start - s->cs_base);
7987 if (b & 2) {
7988 gen_op_mov_TN_reg(ot, 0, rm);
7989 gen_helper_write_crN(cpu_env, tcg_const_i32(reg),
7990 cpu_T[0]);
7991 gen_jmp_im(s->pc - s->cs_base);
7992 gen_eob(s);
7993 } else {
7994 gen_helper_read_crN(cpu_T[0], cpu_env, tcg_const_i32(reg));
7995 gen_op_mov_reg_T0(ot, rm);
7997 break;
7998 default:
7999 goto illegal_op;
8002 break;
8003 case 0x121: /* mov reg, drN */
8004 case 0x123: /* mov drN, reg */
8005 if (s->cpl != 0) {
8006 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
8007 } else {
8008 modrm = cpu_ldub_code(env, s->pc++);
8009 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
8010 * AMD documentation (24594.pdf) and testing of
8011 * intel 386 and 486 processors all show that the mod bits
8012 * are assumed to be 1's, regardless of actual values.
8014 rm = (modrm & 7) | REX_B(s);
8015 reg = ((modrm >> 3) & 7) | rex_r;
8016 if (CODE64(s))
8017 ot = OT_QUAD;
8018 else
8019 ot = OT_LONG;
8020 /* XXX: do it dynamically with CR4.DE bit */
8021 if (reg == 4 || reg == 5 || reg >= 8)
8022 goto illegal_op;
8023 if (b & 2) {
8024 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg);
8025 gen_op_mov_TN_reg(ot, 0, rm);
8026 gen_helper_movl_drN_T0(cpu_env, tcg_const_i32(reg), cpu_T[0]);
8027 gen_jmp_im(s->pc - s->cs_base);
8028 gen_eob(s);
8029 } else {
8030 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg);
8031 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,dr[reg]));
8032 gen_op_mov_reg_T0(ot, rm);
8035 break;
8036 case 0x106: /* clts */
8037 if (s->cpl != 0) {
8038 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
8039 } else {
8040 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
8041 gen_helper_clts(cpu_env);
8042 /* abort block because static cpu state changed */
8043 gen_jmp_im(s->pc - s->cs_base);
8044 gen_eob(s);
8046 break;
8047 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
8048 case 0x1c3: /* MOVNTI reg, mem */
8049 if (!(s->cpuid_features & CPUID_SSE2))
8050 goto illegal_op;
8051 ot = s->dflag == 2 ? OT_QUAD : OT_LONG;
8052 modrm = cpu_ldub_code(env, s->pc++);
8053 mod = (modrm >> 6) & 3;
8054 if (mod == 3)
8055 goto illegal_op;
8056 reg = ((modrm >> 3) & 7) | rex_r;
8057 /* generate a generic store */
8058 gen_ldst_modrm(env, s, modrm, ot, reg, 1);
8059 break;
8060 case 0x1ae:
8061 modrm = cpu_ldub_code(env, s->pc++);
8062 mod = (modrm >> 6) & 3;
8063 op = (modrm >> 3) & 7;
8064 switch(op) {
8065 case 0: /* fxsave */
8066 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
8067 (s->prefix & PREFIX_LOCK))
8068 goto illegal_op;
8069 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
8070 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
8071 break;
8073 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
8074 gen_update_cc_op(s);
8075 gen_jmp_im(pc_start - s->cs_base);
8076 gen_helper_fxsave(cpu_env, cpu_A0, tcg_const_i32((s->dflag == 2)));
8077 break;
8078 case 1: /* fxrstor */
8079 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
8080 (s->prefix & PREFIX_LOCK))
8081 goto illegal_op;
8082 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
8083 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
8084 break;
8086 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
8087 gen_update_cc_op(s);
8088 gen_jmp_im(pc_start - s->cs_base);
8089 gen_helper_fxrstor(cpu_env, cpu_A0,
8090 tcg_const_i32((s->dflag == 2)));
8091 break;
8092 case 2: /* ldmxcsr */
8093 case 3: /* stmxcsr */
8094 if (s->flags & HF_TS_MASK) {
8095 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
8096 break;
8098 if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK) ||
8099 mod == 3)
8100 goto illegal_op;
8101 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
8102 if (op == 2) {
8103 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
8104 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
8105 gen_helper_ldmxcsr(cpu_env, cpu_tmp2_i32);
8106 } else {
8107 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, mxcsr));
8108 gen_op_st_T0_A0(OT_LONG + s->mem_index);
8110 break;
8111 case 5: /* lfence */
8112 case 6: /* mfence */
8113 if ((modrm & 0xc7) != 0xc0 || !(s->cpuid_features & CPUID_SSE2))
8114 goto illegal_op;
8115 break;
8116 case 7: /* sfence / clflush */
8117 if ((modrm & 0xc7) == 0xc0) {
8118 /* sfence */
8119 /* XXX: also check for cpuid_ext2_features & CPUID_EXT2_EMMX */
8120 if (!(s->cpuid_features & CPUID_SSE))
8121 goto illegal_op;
8122 } else {
8123 /* clflush */
8124 if (!(s->cpuid_features & CPUID_CLFLUSH))
8125 goto illegal_op;
8126 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
8128 break;
8129 default:
8130 goto illegal_op;
8132 break;
8133 case 0x10d: /* 3DNow! prefetch(w) */
8134 modrm = cpu_ldub_code(env, s->pc++);
8135 mod = (modrm >> 6) & 3;
8136 if (mod == 3)
8137 goto illegal_op;
8138 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
8139 /* ignore for now */
8140 break;
8141 case 0x1aa: /* rsm */
8142 gen_svm_check_intercept(s, pc_start, SVM_EXIT_RSM);
8143 if (!(s->flags & HF_SMM_MASK))
8144 goto illegal_op;
8145 gen_update_cc_op(s);
8146 gen_jmp_im(s->pc - s->cs_base);
8147 gen_helper_rsm(cpu_env);
8148 gen_eob(s);
8149 break;
8150 case 0x1b8: /* SSE4.2 popcnt */
8151 if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) !=
8152 PREFIX_REPZ)
8153 goto illegal_op;
8154 if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
8155 goto illegal_op;
8157 modrm = cpu_ldub_code(env, s->pc++);
8158 reg = ((modrm >> 3) & 7) | rex_r;
8160 if (s->prefix & PREFIX_DATA)
8161 ot = OT_WORD;
8162 else if (s->dflag != 2)
8163 ot = OT_LONG;
8164 else
8165 ot = OT_QUAD;
8167 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
8168 gen_helper_popcnt(cpu_T[0], cpu_env, cpu_T[0], tcg_const_i32(ot));
8169 gen_op_mov_reg_T0(ot, reg);
8171 set_cc_op(s, CC_OP_EFLAGS);
8172 break;
8173 case 0x10e ... 0x10f:
8174 /* 3DNow! instructions, ignore prefixes */
8175 s->prefix &= ~(PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA);
8176 case 0x110 ... 0x117:
8177 case 0x128 ... 0x12f:
8178 case 0x138 ... 0x13a:
8179 case 0x150 ... 0x179:
8180 case 0x17c ... 0x17f:
8181 case 0x1c2:
8182 case 0x1c4 ... 0x1c6:
8183 case 0x1d0 ... 0x1fe:
8184 gen_sse(env, s, b, pc_start, rex_r);
8185 break;
8186 default:
8187 goto illegal_op;
8189 /* lock generation */
8190 if (s->prefix & PREFIX_LOCK)
8191 gen_helper_unlock();
8192 return s->pc;
8193 illegal_op:
8194 if (s->prefix & PREFIX_LOCK)
8195 gen_helper_unlock();
8196 /* XXX: ensure that no lock was generated */
8197 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
8198 return s->pc;
8201 void optimize_flags_init(void)
8203 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
8204 cpu_cc_op = tcg_global_mem_new_i32(TCG_AREG0,
8205 offsetof(CPUX86State, cc_op), "cc_op");
8206 cpu_cc_dst = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_dst),
8207 "cc_dst");
8208 cpu_cc_src = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_src),
8209 "cc_src");
8210 cpu_cc_src2 = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_src2),
8211 "cc_src2");
8213 #ifdef TARGET_X86_64
8214 cpu_regs[R_EAX] = tcg_global_mem_new_i64(TCG_AREG0,
8215 offsetof(CPUX86State, regs[R_EAX]), "rax");
8216 cpu_regs[R_ECX] = tcg_global_mem_new_i64(TCG_AREG0,
8217 offsetof(CPUX86State, regs[R_ECX]), "rcx");
8218 cpu_regs[R_EDX] = tcg_global_mem_new_i64(TCG_AREG0,
8219 offsetof(CPUX86State, regs[R_EDX]), "rdx");
8220 cpu_regs[R_EBX] = tcg_global_mem_new_i64(TCG_AREG0,
8221 offsetof(CPUX86State, regs[R_EBX]), "rbx");
8222 cpu_regs[R_ESP] = tcg_global_mem_new_i64(TCG_AREG0,
8223 offsetof(CPUX86State, regs[R_ESP]), "rsp");
8224 cpu_regs[R_EBP] = tcg_global_mem_new_i64(TCG_AREG0,
8225 offsetof(CPUX86State, regs[R_EBP]), "rbp");
8226 cpu_regs[R_ESI] = tcg_global_mem_new_i64(TCG_AREG0,
8227 offsetof(CPUX86State, regs[R_ESI]), "rsi");
8228 cpu_regs[R_EDI] = tcg_global_mem_new_i64(TCG_AREG0,
8229 offsetof(CPUX86State, regs[R_EDI]), "rdi");
8230 cpu_regs[8] = tcg_global_mem_new_i64(TCG_AREG0,
8231 offsetof(CPUX86State, regs[8]), "r8");
8232 cpu_regs[9] = tcg_global_mem_new_i64(TCG_AREG0,
8233 offsetof(CPUX86State, regs[9]), "r9");
8234 cpu_regs[10] = tcg_global_mem_new_i64(TCG_AREG0,
8235 offsetof(CPUX86State, regs[10]), "r10");
8236 cpu_regs[11] = tcg_global_mem_new_i64(TCG_AREG0,
8237 offsetof(CPUX86State, regs[11]), "r11");
8238 cpu_regs[12] = tcg_global_mem_new_i64(TCG_AREG0,
8239 offsetof(CPUX86State, regs[12]), "r12");
8240 cpu_regs[13] = tcg_global_mem_new_i64(TCG_AREG0,
8241 offsetof(CPUX86State, regs[13]), "r13");
8242 cpu_regs[14] = tcg_global_mem_new_i64(TCG_AREG0,
8243 offsetof(CPUX86State, regs[14]), "r14");
8244 cpu_regs[15] = tcg_global_mem_new_i64(TCG_AREG0,
8245 offsetof(CPUX86State, regs[15]), "r15");
8246 #else
8247 cpu_regs[R_EAX] = tcg_global_mem_new_i32(TCG_AREG0,
8248 offsetof(CPUX86State, regs[R_EAX]), "eax");
8249 cpu_regs[R_ECX] = tcg_global_mem_new_i32(TCG_AREG0,
8250 offsetof(CPUX86State, regs[R_ECX]), "ecx");
8251 cpu_regs[R_EDX] = tcg_global_mem_new_i32(TCG_AREG0,
8252 offsetof(CPUX86State, regs[R_EDX]), "edx");
8253 cpu_regs[R_EBX] = tcg_global_mem_new_i32(TCG_AREG0,
8254 offsetof(CPUX86State, regs[R_EBX]), "ebx");
8255 cpu_regs[R_ESP] = tcg_global_mem_new_i32(TCG_AREG0,
8256 offsetof(CPUX86State, regs[R_ESP]), "esp");
8257 cpu_regs[R_EBP] = tcg_global_mem_new_i32(TCG_AREG0,
8258 offsetof(CPUX86State, regs[R_EBP]), "ebp");
8259 cpu_regs[R_ESI] = tcg_global_mem_new_i32(TCG_AREG0,
8260 offsetof(CPUX86State, regs[R_ESI]), "esi");
8261 cpu_regs[R_EDI] = tcg_global_mem_new_i32(TCG_AREG0,
8262 offsetof(CPUX86State, regs[R_EDI]), "edi");
8263 #endif
8266 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
8267 basic block 'tb'. If search_pc is TRUE, also generate PC
8268 information for each intermediate instruction. */
8269 static inline void gen_intermediate_code_internal(X86CPU *cpu,
8270 TranslationBlock *tb,
8271 bool search_pc)
8273 CPUState *cs = CPU(cpu);
8274 CPUX86State *env = &cpu->env;
8275 DisasContext dc1, *dc = &dc1;
8276 target_ulong pc_ptr;
8277 uint16_t *gen_opc_end;
8278 CPUBreakpoint *bp;
8279 int j, lj;
8280 uint64_t flags;
8281 target_ulong pc_start;
8282 target_ulong cs_base;
8283 int num_insns;
8284 int max_insns;
8286 /* generate intermediate code */
8287 pc_start = tb->pc;
8288 cs_base = tb->cs_base;
8289 flags = tb->flags;
8291 dc->pe = (flags >> HF_PE_SHIFT) & 1;
8292 dc->code32 = (flags >> HF_CS32_SHIFT) & 1;
8293 dc->ss32 = (flags >> HF_SS32_SHIFT) & 1;
8294 dc->addseg = (flags >> HF_ADDSEG_SHIFT) & 1;
8295 dc->f_st = 0;
8296 dc->vm86 = (flags >> VM_SHIFT) & 1;
8297 dc->cpl = (flags >> HF_CPL_SHIFT) & 3;
8298 dc->iopl = (flags >> IOPL_SHIFT) & 3;
8299 dc->tf = (flags >> TF_SHIFT) & 1;
8300 dc->singlestep_enabled = cs->singlestep_enabled;
8301 dc->cc_op = CC_OP_DYNAMIC;
8302 dc->cc_op_dirty = false;
8303 dc->cs_base = cs_base;
8304 dc->tb = tb;
8305 dc->popl_esp_hack = 0;
8306 /* select memory access functions */
8307 dc->mem_index = 0;
8308 if (flags & HF_SOFTMMU_MASK) {
8309 dc->mem_index = (cpu_mmu_index(env) + 1) << 2;
8311 dc->cpuid_features = env->features[FEAT_1_EDX];
8312 dc->cpuid_ext_features = env->features[FEAT_1_ECX];
8313 dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX];
8314 dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX];
8315 dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX];
8316 #ifdef TARGET_X86_64
8317 dc->lma = (flags >> HF_LMA_SHIFT) & 1;
8318 dc->code64 = (flags >> HF_CS64_SHIFT) & 1;
8319 #endif
8320 dc->flags = flags;
8321 dc->jmp_opt = !(dc->tf || cs->singlestep_enabled ||
8322 (flags & HF_INHIBIT_IRQ_MASK)
8323 #ifndef CONFIG_SOFTMMU
8324 || (flags & HF_SOFTMMU_MASK)
8325 #endif
8327 #if 0
8328 /* check addseg logic */
8329 if (!dc->addseg && (dc->vm86 || !dc->pe || !dc->code32))
8330 printf("ERROR addseg\n");
8331 #endif
8333 cpu_T[0] = tcg_temp_new();
8334 cpu_T[1] = tcg_temp_new();
8335 cpu_A0 = tcg_temp_new();
8337 cpu_tmp0 = tcg_temp_new();
8338 cpu_tmp1_i64 = tcg_temp_new_i64();
8339 cpu_tmp2_i32 = tcg_temp_new_i32();
8340 cpu_tmp3_i32 = tcg_temp_new_i32();
8341 cpu_tmp4 = tcg_temp_new();
8342 cpu_ptr0 = tcg_temp_new_ptr();
8343 cpu_ptr1 = tcg_temp_new_ptr();
8344 cpu_cc_srcT = tcg_temp_local_new();
8346 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
8348 dc->is_jmp = DISAS_NEXT;
8349 pc_ptr = pc_start;
8350 lj = -1;
8351 num_insns = 0;
8352 max_insns = tb->cflags & CF_COUNT_MASK;
8353 if (max_insns == 0)
8354 max_insns = CF_COUNT_MASK;
8356 gen_tb_start();
8357 for(;;) {
8358 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
8359 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
8360 if (bp->pc == pc_ptr &&
8361 !((bp->flags & BP_CPU) && (tb->flags & HF_RF_MASK))) {
8362 gen_debug(dc, pc_ptr - dc->cs_base);
8363 break;
8367 if (search_pc) {
8368 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
8369 if (lj < j) {
8370 lj++;
8371 while (lj < j)
8372 tcg_ctx.gen_opc_instr_start[lj++] = 0;
8374 tcg_ctx.gen_opc_pc[lj] = pc_ptr;
8375 gen_opc_cc_op[lj] = dc->cc_op;
8376 tcg_ctx.gen_opc_instr_start[lj] = 1;
8377 tcg_ctx.gen_opc_icount[lj] = num_insns;
8379 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
8380 gen_io_start();
8382 pc_ptr = disas_insn(env, dc, pc_ptr);
8383 num_insns++;
8384 /* stop translation if indicated */
8385 if (dc->is_jmp)
8386 break;
8387 /* if single step mode, we generate only one instruction and
8388 generate an exception */
8389 /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
8390 the flag and abort the translation to give the irqs a
8391 change to be happen */
8392 if (dc->tf || dc->singlestep_enabled ||
8393 (flags & HF_INHIBIT_IRQ_MASK)) {
8394 gen_jmp_im(pc_ptr - dc->cs_base);
8395 gen_eob(dc);
8396 break;
8398 /* if too long translation, stop generation too */
8399 if (tcg_ctx.gen_opc_ptr >= gen_opc_end ||
8400 (pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32) ||
8401 num_insns >= max_insns) {
8402 gen_jmp_im(pc_ptr - dc->cs_base);
8403 gen_eob(dc);
8404 break;
8406 if (singlestep) {
8407 gen_jmp_im(pc_ptr - dc->cs_base);
8408 gen_eob(dc);
8409 break;
8412 if (tb->cflags & CF_LAST_IO)
8413 gen_io_end();
8414 gen_tb_end(tb, num_insns);
8415 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
8416 /* we don't forget to fill the last values */
8417 if (search_pc) {
8418 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
8419 lj++;
8420 while (lj <= j)
8421 tcg_ctx.gen_opc_instr_start[lj++] = 0;
8424 #ifdef DEBUG_DISAS
8425 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
8426 int disas_flags;
8427 qemu_log("----------------\n");
8428 qemu_log("IN: %s\n", lookup_symbol(pc_start));
8429 #ifdef TARGET_X86_64
8430 if (dc->code64)
8431 disas_flags = 2;
8432 else
8433 #endif
8434 disas_flags = !dc->code32;
8435 log_target_disas(env, pc_start, pc_ptr - pc_start, disas_flags);
8436 qemu_log("\n");
8438 #endif
8440 if (!search_pc) {
8441 tb->size = pc_ptr - pc_start;
8442 tb->icount = num_insns;
8446 void gen_intermediate_code(CPUX86State *env, TranslationBlock *tb)
8448 gen_intermediate_code_internal(x86_env_get_cpu(env), tb, false);
8451 void gen_intermediate_code_pc(CPUX86State *env, TranslationBlock *tb)
8453 gen_intermediate_code_internal(x86_env_get_cpu(env), tb, true);
8456 void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb, int pc_pos)
8458 int cc_op;
8459 #ifdef DEBUG_DISAS
8460 if (qemu_loglevel_mask(CPU_LOG_TB_OP)) {
8461 int i;
8462 qemu_log("RESTORE:\n");
8463 for(i = 0;i <= pc_pos; i++) {
8464 if (tcg_ctx.gen_opc_instr_start[i]) {
8465 qemu_log("0x%04x: " TARGET_FMT_lx "\n", i,
8466 tcg_ctx.gen_opc_pc[i]);
8469 qemu_log("pc_pos=0x%x eip=" TARGET_FMT_lx " cs_base=%x\n",
8470 pc_pos, tcg_ctx.gen_opc_pc[pc_pos] - tb->cs_base,
8471 (uint32_t)tb->cs_base);
8473 #endif
8474 env->eip = tcg_ctx.gen_opc_pc[pc_pos] - tb->cs_base;
8475 cc_op = gen_opc_cc_op[pc_pos];
8476 if (cc_op != CC_OP_DYNAMIC)
8477 env->cc_op = cc_op;