target-i386: add helper functions to get other flags
[qemu.git] / target-i386 / translate.c
blob9bbe969cd646f4bb794bec9b53bb6ed69650dd2f
1 /*
2 * i386 translation
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdarg.h>
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <string.h>
23 #include <inttypes.h>
24 #include <signal.h>
26 #include "cpu.h"
27 #include "disas/disas.h"
28 #include "tcg-op.h"
30 #include "helper.h"
31 #define GEN_HELPER 1
32 #include "helper.h"
34 #define PREFIX_REPZ 0x01
35 #define PREFIX_REPNZ 0x02
36 #define PREFIX_LOCK 0x04
37 #define PREFIX_DATA 0x08
38 #define PREFIX_ADR 0x10
40 #ifdef TARGET_X86_64
41 #define CODE64(s) ((s)->code64)
42 #define REX_X(s) ((s)->rex_x)
43 #define REX_B(s) ((s)->rex_b)
44 #else
45 #define CODE64(s) 0
46 #define REX_X(s) 0
47 #define REX_B(s) 0
48 #endif
50 //#define MACRO_TEST 1
52 /* global register indexes */
53 static TCGv_ptr cpu_env;
54 static TCGv cpu_A0, cpu_cc_src, cpu_cc_dst;
55 static TCGv_i32 cpu_cc_op;
56 static TCGv cpu_regs[CPU_NB_REGS];
57 /* local temps */
58 static TCGv cpu_T[2], cpu_T3;
59 /* local register indexes (only used inside old micro ops) */
60 static TCGv cpu_tmp0, cpu_tmp4;
61 static TCGv_ptr cpu_ptr0, cpu_ptr1;
62 static TCGv_i32 cpu_tmp2_i32, cpu_tmp3_i32;
63 static TCGv_i64 cpu_tmp1_i64;
64 static TCGv cpu_tmp5;
66 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
68 #include "exec/gen-icount.h"
70 #ifdef TARGET_X86_64
71 static int x86_64_hregs;
72 #endif
74 typedef struct DisasContext {
75 /* current insn context */
76 int override; /* -1 if no override */
77 int prefix;
78 int aflag, dflag;
79 target_ulong pc; /* pc = eip + cs_base */
80 int is_jmp; /* 1 = means jump (stop translation), 2 means CPU
81 static state change (stop translation) */
82 /* current block context */
83 target_ulong cs_base; /* base of CS segment */
84 int pe; /* protected mode */
85 int code32; /* 32 bit code segment */
86 #ifdef TARGET_X86_64
87 int lma; /* long mode active */
88 int code64; /* 64 bit code segment */
89 int rex_x, rex_b;
90 #endif
91 int ss32; /* 32 bit stack segment */
92 CCOp cc_op; /* current CC operation */
93 bool cc_op_dirty;
94 int addseg; /* non zero if either DS/ES/SS have a non zero base */
95 int f_st; /* currently unused */
96 int vm86; /* vm86 mode */
97 int cpl;
98 int iopl;
99 int tf; /* TF cpu flag */
100 int singlestep_enabled; /* "hardware" single step enabled */
101 int jmp_opt; /* use direct block chaining for direct jumps */
102 int mem_index; /* select memory access functions */
103 uint64_t flags; /* all execution flags */
104 struct TranslationBlock *tb;
105 int popl_esp_hack; /* for correct popl with esp base handling */
106 int rip_offset; /* only used in x86_64, but left for simplicity */
107 int cpuid_features;
108 int cpuid_ext_features;
109 int cpuid_ext2_features;
110 int cpuid_ext3_features;
111 int cpuid_7_0_ebx_features;
112 } DisasContext;
114 static void gen_eob(DisasContext *s);
115 static void gen_jmp(DisasContext *s, target_ulong eip);
116 static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num);
118 /* i386 arith/logic operations */
119 enum {
120 OP_ADDL,
121 OP_ORL,
122 OP_ADCL,
123 OP_SBBL,
124 OP_ANDL,
125 OP_SUBL,
126 OP_XORL,
127 OP_CMPL,
130 /* i386 shift ops */
131 enum {
132 OP_ROL,
133 OP_ROR,
134 OP_RCL,
135 OP_RCR,
136 OP_SHL,
137 OP_SHR,
138 OP_SHL1, /* undocumented */
139 OP_SAR = 7,
142 enum {
143 JCC_O,
144 JCC_B,
145 JCC_Z,
146 JCC_BE,
147 JCC_S,
148 JCC_P,
149 JCC_L,
150 JCC_LE,
153 /* operand size */
154 enum {
155 OT_BYTE = 0,
156 OT_WORD,
157 OT_LONG,
158 OT_QUAD,
161 enum {
162 /* I386 int registers */
163 OR_EAX, /* MUST be even numbered */
164 OR_ECX,
165 OR_EDX,
166 OR_EBX,
167 OR_ESP,
168 OR_EBP,
169 OR_ESI,
170 OR_EDI,
172 OR_TMP0 = 16, /* temporary operand register */
173 OR_TMP1,
174 OR_A0, /* temporary register used when doing address evaluation */
177 static void set_cc_op(DisasContext *s, CCOp op)
179 if (s->cc_op != op) {
180 s->cc_op = op;
181 /* The DYNAMIC setting is translator only, and should never be
182 stored. Thus we always consider it clean. */
183 s->cc_op_dirty = (op != CC_OP_DYNAMIC);
187 static void gen_update_cc_op(DisasContext *s)
189 if (s->cc_op_dirty) {
190 tcg_gen_movi_i32(cpu_cc_op, s->cc_op);
191 s->cc_op_dirty = false;
195 static inline void gen_op_movl_T0_0(void)
197 tcg_gen_movi_tl(cpu_T[0], 0);
200 static inline void gen_op_movl_T0_im(int32_t val)
202 tcg_gen_movi_tl(cpu_T[0], val);
205 static inline void gen_op_movl_T0_imu(uint32_t val)
207 tcg_gen_movi_tl(cpu_T[0], val);
210 static inline void gen_op_movl_T1_im(int32_t val)
212 tcg_gen_movi_tl(cpu_T[1], val);
215 static inline void gen_op_movl_T1_imu(uint32_t val)
217 tcg_gen_movi_tl(cpu_T[1], val);
220 static inline void gen_op_movl_A0_im(uint32_t val)
222 tcg_gen_movi_tl(cpu_A0, val);
225 #ifdef TARGET_X86_64
226 static inline void gen_op_movq_A0_im(int64_t val)
228 tcg_gen_movi_tl(cpu_A0, val);
230 #endif
232 static inline void gen_movtl_T0_im(target_ulong val)
234 tcg_gen_movi_tl(cpu_T[0], val);
237 static inline void gen_movtl_T1_im(target_ulong val)
239 tcg_gen_movi_tl(cpu_T[1], val);
242 static inline void gen_op_andl_T0_ffff(void)
244 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
247 static inline void gen_op_andl_T0_im(uint32_t val)
249 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], val);
252 static inline void gen_op_movl_T0_T1(void)
254 tcg_gen_mov_tl(cpu_T[0], cpu_T[1]);
257 static inline void gen_op_andl_A0_ffff(void)
259 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffff);
262 #ifdef TARGET_X86_64
264 #define NB_OP_SIZES 4
266 #else /* !TARGET_X86_64 */
268 #define NB_OP_SIZES 3
270 #endif /* !TARGET_X86_64 */
272 #if defined(HOST_WORDS_BIGENDIAN)
273 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
274 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
275 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
276 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
277 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
278 #else
279 #define REG_B_OFFSET 0
280 #define REG_H_OFFSET 1
281 #define REG_W_OFFSET 0
282 #define REG_L_OFFSET 0
283 #define REG_LH_OFFSET 4
284 #endif
286 /* In instruction encodings for byte register accesses the
287 * register number usually indicates "low 8 bits of register N";
288 * however there are some special cases where N 4..7 indicates
289 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
290 * true for this special case, false otherwise.
292 static inline bool byte_reg_is_xH(int reg)
294 if (reg < 4) {
295 return false;
297 #ifdef TARGET_X86_64
298 if (reg >= 8 || x86_64_hregs) {
299 return false;
301 #endif
302 return true;
305 static inline void gen_op_mov_reg_v(int ot, int reg, TCGv t0)
307 switch(ot) {
308 case OT_BYTE:
309 if (!byte_reg_is_xH(reg)) {
310 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 8);
311 } else {
312 tcg_gen_deposit_tl(cpu_regs[reg - 4], cpu_regs[reg - 4], t0, 8, 8);
314 break;
315 case OT_WORD:
316 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 16);
317 break;
318 default: /* XXX this shouldn't be reached; abort? */
319 case OT_LONG:
320 /* For x86_64, this sets the higher half of register to zero.
321 For i386, this is equivalent to a mov. */
322 tcg_gen_ext32u_tl(cpu_regs[reg], t0);
323 break;
324 #ifdef TARGET_X86_64
325 case OT_QUAD:
326 tcg_gen_mov_tl(cpu_regs[reg], t0);
327 break;
328 #endif
332 static inline void gen_op_mov_reg_T0(int ot, int reg)
334 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
337 static inline void gen_op_mov_reg_T1(int ot, int reg)
339 gen_op_mov_reg_v(ot, reg, cpu_T[1]);
342 static inline void gen_op_mov_reg_A0(int size, int reg)
344 switch(size) {
345 case OT_BYTE:
346 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], cpu_A0, 0, 16);
347 break;
348 default: /* XXX this shouldn't be reached; abort? */
349 case OT_WORD:
350 /* For x86_64, this sets the higher half of register to zero.
351 For i386, this is equivalent to a mov. */
352 tcg_gen_ext32u_tl(cpu_regs[reg], cpu_A0);
353 break;
354 #ifdef TARGET_X86_64
355 case OT_LONG:
356 tcg_gen_mov_tl(cpu_regs[reg], cpu_A0);
357 break;
358 #endif
362 static inline void gen_op_mov_v_reg(int ot, TCGv t0, int reg)
364 if (ot == OT_BYTE && byte_reg_is_xH(reg)) {
365 tcg_gen_shri_tl(t0, cpu_regs[reg - 4], 8);
366 tcg_gen_ext8u_tl(t0, t0);
367 } else {
368 tcg_gen_mov_tl(t0, cpu_regs[reg]);
372 static inline void gen_op_mov_TN_reg(int ot, int t_index, int reg)
374 gen_op_mov_v_reg(ot, cpu_T[t_index], reg);
377 static inline void gen_op_movl_A0_reg(int reg)
379 tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
382 static inline void gen_op_addl_A0_im(int32_t val)
384 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
385 #ifdef TARGET_X86_64
386 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
387 #endif
390 #ifdef TARGET_X86_64
391 static inline void gen_op_addq_A0_im(int64_t val)
393 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
395 #endif
397 static void gen_add_A0_im(DisasContext *s, int val)
399 #ifdef TARGET_X86_64
400 if (CODE64(s))
401 gen_op_addq_A0_im(val);
402 else
403 #endif
404 gen_op_addl_A0_im(val);
407 static inline void gen_op_addl_T0_T1(void)
409 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
412 static inline void gen_op_jmp_T0(void)
414 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, eip));
417 static inline void gen_op_add_reg_im(int size, int reg, int32_t val)
419 switch(size) {
420 case OT_BYTE:
421 tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val);
422 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], cpu_tmp0, 0, 16);
423 break;
424 case OT_WORD:
425 tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val);
426 /* For x86_64, this sets the higher half of register to zero.
427 For i386, this is equivalent to a nop. */
428 tcg_gen_ext32u_tl(cpu_tmp0, cpu_tmp0);
429 tcg_gen_mov_tl(cpu_regs[reg], cpu_tmp0);
430 break;
431 #ifdef TARGET_X86_64
432 case OT_LONG:
433 tcg_gen_addi_tl(cpu_regs[reg], cpu_regs[reg], val);
434 break;
435 #endif
439 static inline void gen_op_add_reg_T0(int size, int reg)
441 switch(size) {
442 case OT_BYTE:
443 tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T[0]);
444 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], cpu_tmp0, 0, 16);
445 break;
446 case OT_WORD:
447 tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T[0]);
448 /* For x86_64, this sets the higher half of register to zero.
449 For i386, this is equivalent to a nop. */
450 tcg_gen_ext32u_tl(cpu_tmp0, cpu_tmp0);
451 tcg_gen_mov_tl(cpu_regs[reg], cpu_tmp0);
452 break;
453 #ifdef TARGET_X86_64
454 case OT_LONG:
455 tcg_gen_add_tl(cpu_regs[reg], cpu_regs[reg], cpu_T[0]);
456 break;
457 #endif
461 static inline void gen_op_addl_A0_reg_sN(int shift, int reg)
463 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
464 if (shift != 0)
465 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
466 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
467 /* For x86_64, this sets the higher half of register to zero.
468 For i386, this is equivalent to a nop. */
469 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
472 static inline void gen_op_movl_A0_seg(int reg)
474 tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base) + REG_L_OFFSET);
477 static inline void gen_op_addl_A0_seg(DisasContext *s, int reg)
479 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base));
480 #ifdef TARGET_X86_64
481 if (CODE64(s)) {
482 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
483 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
484 } else {
485 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
486 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
488 #else
489 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
490 #endif
493 #ifdef TARGET_X86_64
494 static inline void gen_op_movq_A0_seg(int reg)
496 tcg_gen_ld_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base));
499 static inline void gen_op_addq_A0_seg(int reg)
501 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base));
502 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
505 static inline void gen_op_movq_A0_reg(int reg)
507 tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
510 static inline void gen_op_addq_A0_reg_sN(int shift, int reg)
512 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
513 if (shift != 0)
514 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
515 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
517 #endif
519 static inline void gen_op_lds_T0_A0(int idx)
521 int mem_index = (idx >> 2) - 1;
522 switch(idx & 3) {
523 case OT_BYTE:
524 tcg_gen_qemu_ld8s(cpu_T[0], cpu_A0, mem_index);
525 break;
526 case OT_WORD:
527 tcg_gen_qemu_ld16s(cpu_T[0], cpu_A0, mem_index);
528 break;
529 default:
530 case OT_LONG:
531 tcg_gen_qemu_ld32s(cpu_T[0], cpu_A0, mem_index);
532 break;
536 static inline void gen_op_ld_v(int idx, TCGv t0, TCGv a0)
538 int mem_index = (idx >> 2) - 1;
539 switch(idx & 3) {
540 case OT_BYTE:
541 tcg_gen_qemu_ld8u(t0, a0, mem_index);
542 break;
543 case OT_WORD:
544 tcg_gen_qemu_ld16u(t0, a0, mem_index);
545 break;
546 case OT_LONG:
547 tcg_gen_qemu_ld32u(t0, a0, mem_index);
548 break;
549 default:
550 case OT_QUAD:
551 /* Should never happen on 32-bit targets. */
552 #ifdef TARGET_X86_64
553 tcg_gen_qemu_ld64(t0, a0, mem_index);
554 #endif
555 break;
559 /* XXX: always use ldu or lds */
560 static inline void gen_op_ld_T0_A0(int idx)
562 gen_op_ld_v(idx, cpu_T[0], cpu_A0);
565 static inline void gen_op_ldu_T0_A0(int idx)
567 gen_op_ld_v(idx, cpu_T[0], cpu_A0);
570 static inline void gen_op_ld_T1_A0(int idx)
572 gen_op_ld_v(idx, cpu_T[1], cpu_A0);
575 static inline void gen_op_st_v(int idx, TCGv t0, TCGv a0)
577 int mem_index = (idx >> 2) - 1;
578 switch(idx & 3) {
579 case OT_BYTE:
580 tcg_gen_qemu_st8(t0, a0, mem_index);
581 break;
582 case OT_WORD:
583 tcg_gen_qemu_st16(t0, a0, mem_index);
584 break;
585 case OT_LONG:
586 tcg_gen_qemu_st32(t0, a0, mem_index);
587 break;
588 default:
589 case OT_QUAD:
590 /* Should never happen on 32-bit targets. */
591 #ifdef TARGET_X86_64
592 tcg_gen_qemu_st64(t0, a0, mem_index);
593 #endif
594 break;
598 static inline void gen_op_st_T0_A0(int idx)
600 gen_op_st_v(idx, cpu_T[0], cpu_A0);
603 static inline void gen_op_st_T1_A0(int idx)
605 gen_op_st_v(idx, cpu_T[1], cpu_A0);
608 static inline void gen_jmp_im(target_ulong pc)
610 tcg_gen_movi_tl(cpu_tmp0, pc);
611 tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, eip));
614 static inline void gen_string_movl_A0_ESI(DisasContext *s)
616 int override;
618 override = s->override;
619 #ifdef TARGET_X86_64
620 if (s->aflag == 2) {
621 if (override >= 0) {
622 gen_op_movq_A0_seg(override);
623 gen_op_addq_A0_reg_sN(0, R_ESI);
624 } else {
625 gen_op_movq_A0_reg(R_ESI);
627 } else
628 #endif
629 if (s->aflag) {
630 /* 32 bit address */
631 if (s->addseg && override < 0)
632 override = R_DS;
633 if (override >= 0) {
634 gen_op_movl_A0_seg(override);
635 gen_op_addl_A0_reg_sN(0, R_ESI);
636 } else {
637 gen_op_movl_A0_reg(R_ESI);
639 } else {
640 /* 16 address, always override */
641 if (override < 0)
642 override = R_DS;
643 gen_op_movl_A0_reg(R_ESI);
644 gen_op_andl_A0_ffff();
645 gen_op_addl_A0_seg(s, override);
649 static inline void gen_string_movl_A0_EDI(DisasContext *s)
651 #ifdef TARGET_X86_64
652 if (s->aflag == 2) {
653 gen_op_movq_A0_reg(R_EDI);
654 } else
655 #endif
656 if (s->aflag) {
657 if (s->addseg) {
658 gen_op_movl_A0_seg(R_ES);
659 gen_op_addl_A0_reg_sN(0, R_EDI);
660 } else {
661 gen_op_movl_A0_reg(R_EDI);
663 } else {
664 gen_op_movl_A0_reg(R_EDI);
665 gen_op_andl_A0_ffff();
666 gen_op_addl_A0_seg(s, R_ES);
670 static inline void gen_op_movl_T0_Dshift(int ot)
672 tcg_gen_ld32s_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, df));
673 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], ot);
676 static TCGv gen_ext_tl(TCGv dst, TCGv src, int size, bool sign)
678 switch (size) {
679 case OT_BYTE:
680 if (sign) {
681 tcg_gen_ext8s_tl(dst, src);
682 } else {
683 tcg_gen_ext8u_tl(dst, src);
685 return dst;
686 case OT_WORD:
687 if (sign) {
688 tcg_gen_ext16s_tl(dst, src);
689 } else {
690 tcg_gen_ext16u_tl(dst, src);
692 return dst;
693 #ifdef TARGET_X86_64
694 case OT_LONG:
695 if (sign) {
696 tcg_gen_ext32s_tl(dst, src);
697 } else {
698 tcg_gen_ext32u_tl(dst, src);
700 return dst;
701 #endif
702 default:
703 return src;
707 static void gen_extu(int ot, TCGv reg)
709 gen_ext_tl(reg, reg, ot, false);
712 static void gen_exts(int ot, TCGv reg)
714 gen_ext_tl(reg, reg, ot, true);
717 static inline void gen_op_jnz_ecx(int size, int label1)
719 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
720 gen_extu(size + 1, cpu_tmp0);
721 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, label1);
724 static inline void gen_op_jz_ecx(int size, int label1)
726 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
727 gen_extu(size + 1, cpu_tmp0);
728 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
731 static void gen_helper_in_func(int ot, TCGv v, TCGv_i32 n)
733 switch (ot) {
734 case OT_BYTE:
735 gen_helper_inb(v, n);
736 break;
737 case OT_WORD:
738 gen_helper_inw(v, n);
739 break;
740 case OT_LONG:
741 gen_helper_inl(v, n);
742 break;
746 static void gen_helper_out_func(int ot, TCGv_i32 v, TCGv_i32 n)
748 switch (ot) {
749 case OT_BYTE:
750 gen_helper_outb(v, n);
751 break;
752 case OT_WORD:
753 gen_helper_outw(v, n);
754 break;
755 case OT_LONG:
756 gen_helper_outl(v, n);
757 break;
761 static void gen_check_io(DisasContext *s, int ot, target_ulong cur_eip,
762 uint32_t svm_flags)
764 int state_saved;
765 target_ulong next_eip;
767 state_saved = 0;
768 if (s->pe && (s->cpl > s->iopl || s->vm86)) {
769 gen_update_cc_op(s);
770 gen_jmp_im(cur_eip);
771 state_saved = 1;
772 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
773 switch (ot) {
774 case OT_BYTE:
775 gen_helper_check_iob(cpu_env, cpu_tmp2_i32);
776 break;
777 case OT_WORD:
778 gen_helper_check_iow(cpu_env, cpu_tmp2_i32);
779 break;
780 case OT_LONG:
781 gen_helper_check_iol(cpu_env, cpu_tmp2_i32);
782 break;
785 if(s->flags & HF_SVMI_MASK) {
786 if (!state_saved) {
787 gen_update_cc_op(s);
788 gen_jmp_im(cur_eip);
790 svm_flags |= (1 << (4 + ot));
791 next_eip = s->pc - s->cs_base;
792 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
793 gen_helper_svm_check_io(cpu_env, cpu_tmp2_i32,
794 tcg_const_i32(svm_flags),
795 tcg_const_i32(next_eip - cur_eip));
799 static inline void gen_movs(DisasContext *s, int ot)
801 gen_string_movl_A0_ESI(s);
802 gen_op_ld_T0_A0(ot + s->mem_index);
803 gen_string_movl_A0_EDI(s);
804 gen_op_st_T0_A0(ot + s->mem_index);
805 gen_op_movl_T0_Dshift(ot);
806 gen_op_add_reg_T0(s->aflag, R_ESI);
807 gen_op_add_reg_T0(s->aflag, R_EDI);
810 static void gen_op_update1_cc(void)
812 tcg_gen_discard_tl(cpu_cc_src);
813 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
816 static void gen_op_update2_cc(void)
818 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
819 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
822 static inline void gen_op_cmpl_T0_T1_cc(void)
824 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
825 tcg_gen_sub_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
828 static inline void gen_op_testl_T0_T1_cc(void)
830 tcg_gen_discard_tl(cpu_cc_src);
831 tcg_gen_and_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
834 static void gen_op_update_neg_cc(void)
836 tcg_gen_neg_tl(cpu_cc_src, cpu_T[0]);
837 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
840 /* compute eflags.C to reg */
841 static void gen_compute_eflags_c(DisasContext *s, TCGv reg)
843 gen_update_cc_op(s);
844 gen_helper_cc_compute_c(cpu_tmp2_i32, cpu_env, cpu_cc_op);
845 tcg_gen_extu_i32_tl(reg, cpu_tmp2_i32);
848 /* compute all eflags to reg */
849 static void gen_compute_eflags(DisasContext *s, TCGv reg)
851 gen_update_cc_op(s);
852 gen_helper_cc_compute_all(cpu_tmp2_i32, cpu_env, cpu_cc_op);
853 if (TCGV_EQUAL(reg, cpu_cc_src)) {
854 tcg_gen_discard_tl(cpu_cc_dst);
855 set_cc_op(s, CC_OP_EFLAGS);
857 tcg_gen_extu_i32_tl(reg, cpu_tmp2_i32);
860 /* compute eflags.P to reg */
861 static void gen_compute_eflags_p(DisasContext *s, TCGv reg)
863 gen_compute_eflags(s, reg);
864 tcg_gen_shri_tl(reg, reg, 2);
865 tcg_gen_andi_tl(reg, reg, 1);
868 /* compute eflags.S to reg */
869 static void gen_compute_eflags_s(DisasContext *s, TCGv reg)
871 gen_compute_eflags(s, reg);
872 tcg_gen_shri_tl(reg, reg, 7);
873 tcg_gen_andi_tl(reg, reg, 1);
876 /* compute eflags.O to reg */
877 static void gen_compute_eflags_o(DisasContext *s, TCGv reg)
879 gen_compute_eflags(s, reg);
880 tcg_gen_shri_tl(reg, reg, 11);
881 tcg_gen_andi_tl(reg, reg, 1);
884 /* compute eflags.Z to reg */
885 static void gen_compute_eflags_z(DisasContext *s, TCGv reg)
887 gen_compute_eflags(s, reg);
888 tcg_gen_shri_tl(reg, reg, 6);
889 tcg_gen_andi_tl(reg, reg, 1);
892 static inline void gen_setcc_slow_T0(DisasContext *s, int jcc_op)
894 switch(jcc_op) {
895 case JCC_O:
896 gen_compute_eflags_o(s, cpu_T[0]);
897 break;
898 case JCC_B:
899 gen_compute_eflags_c(s, cpu_T[0]);
900 break;
901 case JCC_Z:
902 gen_compute_eflags_z(s, cpu_T[0]);
903 break;
904 case JCC_BE:
905 gen_compute_eflags(s, cpu_tmp0);
906 tcg_gen_shri_tl(cpu_T[0], cpu_tmp0, 6);
907 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
908 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
909 break;
910 case JCC_S:
911 gen_compute_eflags_s(s, cpu_T[0]);
912 break;
913 case JCC_P:
914 gen_compute_eflags_p(s, cpu_T[0]);
915 break;
916 case JCC_L:
917 gen_compute_eflags(s, cpu_tmp0);
918 tcg_gen_shri_tl(cpu_T[0], cpu_tmp0, 11); /* CC_O */
919 tcg_gen_shri_tl(cpu_tmp0, cpu_tmp0, 7); /* CC_S */
920 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
921 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
922 break;
923 default:
924 case JCC_LE:
925 gen_compute_eflags(s, cpu_tmp0);
926 tcg_gen_shri_tl(cpu_T[0], cpu_tmp0, 11); /* CC_O */
927 tcg_gen_shri_tl(cpu_tmp4, cpu_tmp0, 7); /* CC_S */
928 tcg_gen_shri_tl(cpu_tmp0, cpu_tmp0, 6); /* CC_Z */
929 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
930 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
931 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
932 break;
936 /* return true if setcc_slow is not needed (WARNING: must be kept in
937 sync with gen_jcc1) */
938 static int is_fast_jcc_case(DisasContext *s, int b)
940 int jcc_op;
941 jcc_op = (b >> 1) & 7;
942 switch(s->cc_op) {
943 /* we optimize the cmp/jcc case */
944 case CC_OP_SUBB:
945 case CC_OP_SUBW:
946 case CC_OP_SUBL:
947 case CC_OP_SUBQ:
948 if (jcc_op == JCC_O || jcc_op == JCC_P)
949 goto slow_jcc;
950 break;
952 /* some jumps are easy to compute */
953 case CC_OP_ADDB:
954 case CC_OP_ADDW:
955 case CC_OP_ADDL:
956 case CC_OP_ADDQ:
958 case CC_OP_LOGICB:
959 case CC_OP_LOGICW:
960 case CC_OP_LOGICL:
961 case CC_OP_LOGICQ:
963 case CC_OP_INCB:
964 case CC_OP_INCW:
965 case CC_OP_INCL:
966 case CC_OP_INCQ:
968 case CC_OP_DECB:
969 case CC_OP_DECW:
970 case CC_OP_DECL:
971 case CC_OP_DECQ:
973 case CC_OP_SHLB:
974 case CC_OP_SHLW:
975 case CC_OP_SHLL:
976 case CC_OP_SHLQ:
977 if (jcc_op != JCC_Z && jcc_op != JCC_S)
978 goto slow_jcc;
979 break;
980 default:
981 slow_jcc:
982 return 0;
984 return 1;
987 /* generate a conditional jump to label 'l1' according to jump opcode
988 value 'b'. In the fast case, T0 is guaranted not to be used. */
989 static inline void gen_jcc1(DisasContext *s, int b, int l1)
991 int inv, jcc_op, size, cond;
992 TCGv t0;
994 inv = b & 1;
995 jcc_op = (b >> 1) & 7;
997 switch (s->cc_op) {
998 /* we optimize the cmp/jcc case */
999 case CC_OP_SUBB:
1000 case CC_OP_SUBW:
1001 case CC_OP_SUBL:
1002 case CC_OP_SUBQ:
1004 size = s->cc_op - CC_OP_SUBB;
1005 switch(jcc_op) {
1006 case JCC_Z:
1007 fast_jcc_z:
1008 t0 = gen_ext_tl(cpu_tmp0, cpu_cc_dst, size, false);
1009 tcg_gen_brcondi_tl(inv ? TCG_COND_NE : TCG_COND_EQ, t0, 0, l1);
1010 break;
1011 case JCC_S:
1012 fast_jcc_s:
1013 t0 = gen_ext_tl(cpu_tmp0, cpu_cc_dst, size, true);
1014 tcg_gen_brcondi_tl(inv ? TCG_COND_GE : TCG_COND_LT, t0, 0, l1);
1015 break;
1017 case JCC_B:
1018 cond = inv ? TCG_COND_GEU : TCG_COND_LTU;
1019 goto fast_jcc_b;
1020 case JCC_BE:
1021 cond = inv ? TCG_COND_GTU : TCG_COND_LEU;
1022 fast_jcc_b:
1023 tcg_gen_add_tl(cpu_tmp4, cpu_cc_dst, cpu_cc_src);
1024 gen_extu(size, cpu_tmp4);
1025 t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
1026 tcg_gen_brcond_tl(cond, cpu_tmp4, t0, l1);
1027 break;
1029 case JCC_L:
1030 cond = inv ? TCG_COND_GE : TCG_COND_LT;
1031 goto fast_jcc_l;
1032 case JCC_LE:
1033 cond = inv ? TCG_COND_GT : TCG_COND_LE;
1034 fast_jcc_l:
1035 tcg_gen_add_tl(cpu_tmp4, cpu_cc_dst, cpu_cc_src);
1036 gen_exts(size, cpu_tmp4);
1037 t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, true);
1038 tcg_gen_brcond_tl(cond, cpu_tmp4, t0, l1);
1039 break;
1041 default:
1042 goto slow_jcc;
1044 break;
1046 /* some jumps are easy to compute */
1047 case CC_OP_ADDB:
1048 case CC_OP_ADDW:
1049 case CC_OP_ADDL:
1050 case CC_OP_ADDQ:
1052 case CC_OP_ADCB:
1053 case CC_OP_ADCW:
1054 case CC_OP_ADCL:
1055 case CC_OP_ADCQ:
1057 case CC_OP_SBBB:
1058 case CC_OP_SBBW:
1059 case CC_OP_SBBL:
1060 case CC_OP_SBBQ:
1062 case CC_OP_LOGICB:
1063 case CC_OP_LOGICW:
1064 case CC_OP_LOGICL:
1065 case CC_OP_LOGICQ:
1067 case CC_OP_INCB:
1068 case CC_OP_INCW:
1069 case CC_OP_INCL:
1070 case CC_OP_INCQ:
1072 case CC_OP_DECB:
1073 case CC_OP_DECW:
1074 case CC_OP_DECL:
1075 case CC_OP_DECQ:
1077 case CC_OP_SHLB:
1078 case CC_OP_SHLW:
1079 case CC_OP_SHLL:
1080 case CC_OP_SHLQ:
1082 case CC_OP_SARB:
1083 case CC_OP_SARW:
1084 case CC_OP_SARL:
1085 case CC_OP_SARQ:
1086 switch(jcc_op) {
1087 case JCC_Z:
1088 size = (s->cc_op - CC_OP_ADDB) & 3;
1089 goto fast_jcc_z;
1090 case JCC_S:
1091 size = (s->cc_op - CC_OP_ADDB) & 3;
1092 goto fast_jcc_s;
1093 default:
1094 goto slow_jcc;
1096 break;
1097 default:
1098 slow_jcc:
1099 gen_setcc_slow_T0(s, jcc_op);
1100 tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE,
1101 cpu_T[0], 0, l1);
1102 break;
1106 /* XXX: does not work with gdbstub "ice" single step - not a
1107 serious problem */
1108 static int gen_jz_ecx_string(DisasContext *s, target_ulong next_eip)
1110 int l1, l2;
1112 l1 = gen_new_label();
1113 l2 = gen_new_label();
1114 gen_op_jnz_ecx(s->aflag, l1);
1115 gen_set_label(l2);
1116 gen_jmp_tb(s, next_eip, 1);
1117 gen_set_label(l1);
1118 return l2;
1121 static inline void gen_stos(DisasContext *s, int ot)
1123 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
1124 gen_string_movl_A0_EDI(s);
1125 gen_op_st_T0_A0(ot + s->mem_index);
1126 gen_op_movl_T0_Dshift(ot);
1127 gen_op_add_reg_T0(s->aflag, R_EDI);
1130 static inline void gen_lods(DisasContext *s, int ot)
1132 gen_string_movl_A0_ESI(s);
1133 gen_op_ld_T0_A0(ot + s->mem_index);
1134 gen_op_mov_reg_T0(ot, R_EAX);
1135 gen_op_movl_T0_Dshift(ot);
1136 gen_op_add_reg_T0(s->aflag, R_ESI);
1139 static inline void gen_scas(DisasContext *s, int ot)
1141 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
1142 gen_string_movl_A0_EDI(s);
1143 gen_op_ld_T1_A0(ot + s->mem_index);
1144 gen_op_cmpl_T0_T1_cc();
1145 gen_op_movl_T0_Dshift(ot);
1146 gen_op_add_reg_T0(s->aflag, R_EDI);
1147 set_cc_op(s, CC_OP_SUBB + ot);
1150 static inline void gen_cmps(DisasContext *s, int ot)
1152 gen_string_movl_A0_ESI(s);
1153 gen_op_ld_T0_A0(ot + s->mem_index);
1154 gen_string_movl_A0_EDI(s);
1155 gen_op_ld_T1_A0(ot + s->mem_index);
1156 gen_op_cmpl_T0_T1_cc();
1157 gen_op_movl_T0_Dshift(ot);
1158 gen_op_add_reg_T0(s->aflag, R_ESI);
1159 gen_op_add_reg_T0(s->aflag, R_EDI);
1160 set_cc_op(s, CC_OP_SUBB + ot);
1163 static inline void gen_ins(DisasContext *s, int ot)
1165 if (use_icount)
1166 gen_io_start();
1167 gen_string_movl_A0_EDI(s);
1168 /* Note: we must do this dummy write first to be restartable in
1169 case of page fault. */
1170 gen_op_movl_T0_0();
1171 gen_op_st_T0_A0(ot + s->mem_index);
1172 gen_op_mov_TN_reg(OT_WORD, 1, R_EDX);
1173 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[1]);
1174 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1175 gen_helper_in_func(ot, cpu_T[0], cpu_tmp2_i32);
1176 gen_op_st_T0_A0(ot + s->mem_index);
1177 gen_op_movl_T0_Dshift(ot);
1178 gen_op_add_reg_T0(s->aflag, R_EDI);
1179 if (use_icount)
1180 gen_io_end();
1183 static inline void gen_outs(DisasContext *s, int ot)
1185 if (use_icount)
1186 gen_io_start();
1187 gen_string_movl_A0_ESI(s);
1188 gen_op_ld_T0_A0(ot + s->mem_index);
1190 gen_op_mov_TN_reg(OT_WORD, 1, R_EDX);
1191 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[1]);
1192 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1193 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[0]);
1194 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
1196 gen_op_movl_T0_Dshift(ot);
1197 gen_op_add_reg_T0(s->aflag, R_ESI);
1198 if (use_icount)
1199 gen_io_end();
1202 /* same method as Valgrind : we generate jumps to current or next
1203 instruction */
1204 #define GEN_REPZ(op) \
1205 static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1206 target_ulong cur_eip, target_ulong next_eip) \
1208 int l2;\
1209 gen_update_cc_op(s); \
1210 l2 = gen_jz_ecx_string(s, next_eip); \
1211 gen_ ## op(s, ot); \
1212 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1213 /* a loop would cause two single step exceptions if ECX = 1 \
1214 before rep string_insn */ \
1215 if (!s->jmp_opt) \
1216 gen_op_jz_ecx(s->aflag, l2); \
1217 gen_jmp(s, cur_eip); \
1220 #define GEN_REPZ2(op) \
1221 static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1222 target_ulong cur_eip, \
1223 target_ulong next_eip, \
1224 int nz) \
1226 int l2;\
1227 gen_update_cc_op(s); \
1228 l2 = gen_jz_ecx_string(s, next_eip); \
1229 gen_ ## op(s, ot); \
1230 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1231 gen_update_cc_op(s); \
1232 gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2); \
1233 if (!s->jmp_opt) \
1234 gen_op_jz_ecx(s->aflag, l2); \
1235 gen_jmp(s, cur_eip); \
1236 set_cc_op(s, CC_OP_DYNAMIC); \
1239 GEN_REPZ(movs)
1240 GEN_REPZ(stos)
1241 GEN_REPZ(lods)
1242 GEN_REPZ(ins)
1243 GEN_REPZ(outs)
1244 GEN_REPZ2(scas)
1245 GEN_REPZ2(cmps)
1247 static void gen_helper_fp_arith_ST0_FT0(int op)
1249 switch (op) {
1250 case 0:
1251 gen_helper_fadd_ST0_FT0(cpu_env);
1252 break;
1253 case 1:
1254 gen_helper_fmul_ST0_FT0(cpu_env);
1255 break;
1256 case 2:
1257 gen_helper_fcom_ST0_FT0(cpu_env);
1258 break;
1259 case 3:
1260 gen_helper_fcom_ST0_FT0(cpu_env);
1261 break;
1262 case 4:
1263 gen_helper_fsub_ST0_FT0(cpu_env);
1264 break;
1265 case 5:
1266 gen_helper_fsubr_ST0_FT0(cpu_env);
1267 break;
1268 case 6:
1269 gen_helper_fdiv_ST0_FT0(cpu_env);
1270 break;
1271 case 7:
1272 gen_helper_fdivr_ST0_FT0(cpu_env);
1273 break;
1277 /* NOTE the exception in "r" op ordering */
1278 static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1280 TCGv_i32 tmp = tcg_const_i32(opreg);
1281 switch (op) {
1282 case 0:
1283 gen_helper_fadd_STN_ST0(cpu_env, tmp);
1284 break;
1285 case 1:
1286 gen_helper_fmul_STN_ST0(cpu_env, tmp);
1287 break;
1288 case 4:
1289 gen_helper_fsubr_STN_ST0(cpu_env, tmp);
1290 break;
1291 case 5:
1292 gen_helper_fsub_STN_ST0(cpu_env, tmp);
1293 break;
1294 case 6:
1295 gen_helper_fdivr_STN_ST0(cpu_env, tmp);
1296 break;
1297 case 7:
1298 gen_helper_fdiv_STN_ST0(cpu_env, tmp);
1299 break;
1303 /* if d == OR_TMP0, it means memory operand (address in A0) */
1304 static void gen_op(DisasContext *s1, int op, int ot, int d)
1306 if (d != OR_TMP0) {
1307 gen_op_mov_TN_reg(ot, 0, d);
1308 } else {
1309 gen_op_ld_T0_A0(ot + s1->mem_index);
1311 switch(op) {
1312 case OP_ADCL:
1313 gen_compute_eflags_c(s1, cpu_tmp4);
1314 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1315 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1316 if (d != OR_TMP0)
1317 gen_op_mov_reg_T0(ot, d);
1318 else
1319 gen_op_st_T0_A0(ot + s1->mem_index);
1320 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
1321 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1322 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp4);
1323 tcg_gen_shli_i32(cpu_tmp2_i32, cpu_tmp2_i32, 2);
1324 tcg_gen_addi_i32(cpu_cc_op, cpu_tmp2_i32, CC_OP_ADDB + ot);
1325 set_cc_op(s1, CC_OP_DYNAMIC);
1326 break;
1327 case OP_SBBL:
1328 gen_compute_eflags_c(s1, cpu_tmp4);
1329 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1330 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1331 if (d != OR_TMP0)
1332 gen_op_mov_reg_T0(ot, d);
1333 else
1334 gen_op_st_T0_A0(ot + s1->mem_index);
1335 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
1336 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1337 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp4);
1338 tcg_gen_shli_i32(cpu_tmp2_i32, cpu_tmp2_i32, 2);
1339 tcg_gen_addi_i32(cpu_cc_op, cpu_tmp2_i32, CC_OP_SUBB + ot);
1340 set_cc_op(s1, CC_OP_DYNAMIC);
1341 break;
1342 case OP_ADDL:
1343 gen_op_addl_T0_T1();
1344 if (d != OR_TMP0)
1345 gen_op_mov_reg_T0(ot, d);
1346 else
1347 gen_op_st_T0_A0(ot + s1->mem_index);
1348 gen_op_update2_cc();
1349 set_cc_op(s1, CC_OP_ADDB + ot);
1350 break;
1351 case OP_SUBL:
1352 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1353 if (d != OR_TMP0)
1354 gen_op_mov_reg_T0(ot, d);
1355 else
1356 gen_op_st_T0_A0(ot + s1->mem_index);
1357 gen_op_update2_cc();
1358 set_cc_op(s1, CC_OP_SUBB + ot);
1359 break;
1360 default:
1361 case OP_ANDL:
1362 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1363 if (d != OR_TMP0)
1364 gen_op_mov_reg_T0(ot, d);
1365 else
1366 gen_op_st_T0_A0(ot + s1->mem_index);
1367 gen_op_update1_cc();
1368 set_cc_op(s1, CC_OP_LOGICB + ot);
1369 break;
1370 case OP_ORL:
1371 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1372 if (d != OR_TMP0)
1373 gen_op_mov_reg_T0(ot, d);
1374 else
1375 gen_op_st_T0_A0(ot + s1->mem_index);
1376 gen_op_update1_cc();
1377 set_cc_op(s1, CC_OP_LOGICB + ot);
1378 break;
1379 case OP_XORL:
1380 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1381 if (d != OR_TMP0)
1382 gen_op_mov_reg_T0(ot, d);
1383 else
1384 gen_op_st_T0_A0(ot + s1->mem_index);
1385 gen_op_update1_cc();
1386 set_cc_op(s1, CC_OP_LOGICB + ot);
1387 break;
1388 case OP_CMPL:
1389 gen_op_cmpl_T0_T1_cc();
1390 set_cc_op(s1, CC_OP_SUBB + ot);
1391 break;
1395 /* if d == OR_TMP0, it means memory operand (address in A0) */
1396 static void gen_inc(DisasContext *s1, int ot, int d, int c)
1398 if (d != OR_TMP0)
1399 gen_op_mov_TN_reg(ot, 0, d);
1400 else
1401 gen_op_ld_T0_A0(ot + s1->mem_index);
1402 gen_compute_eflags_c(s1, cpu_cc_src);
1403 if (c > 0) {
1404 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], 1);
1405 set_cc_op(s1, CC_OP_INCB + ot);
1406 } else {
1407 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], -1);
1408 set_cc_op(s1, CC_OP_DECB + ot);
1410 if (d != OR_TMP0)
1411 gen_op_mov_reg_T0(ot, d);
1412 else
1413 gen_op_st_T0_A0(ot + s1->mem_index);
1414 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1417 static void gen_shift_rm_T1(DisasContext *s, int ot, int op1,
1418 int is_right, int is_arith)
1420 target_ulong mask;
1421 int shift_label;
1422 TCGv t0, t1, t2;
1424 if (ot == OT_QUAD) {
1425 mask = 0x3f;
1426 } else {
1427 mask = 0x1f;
1430 /* load */
1431 if (op1 == OR_TMP0) {
1432 gen_op_ld_T0_A0(ot + s->mem_index);
1433 } else {
1434 gen_op_mov_TN_reg(ot, 0, op1);
1437 t0 = tcg_temp_local_new();
1438 t1 = tcg_temp_local_new();
1439 t2 = tcg_temp_local_new();
1441 tcg_gen_andi_tl(t2, cpu_T[1], mask);
1443 if (is_right) {
1444 if (is_arith) {
1445 gen_exts(ot, cpu_T[0]);
1446 tcg_gen_mov_tl(t0, cpu_T[0]);
1447 tcg_gen_sar_tl(cpu_T[0], cpu_T[0], t2);
1448 } else {
1449 gen_extu(ot, cpu_T[0]);
1450 tcg_gen_mov_tl(t0, cpu_T[0]);
1451 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], t2);
1453 } else {
1454 tcg_gen_mov_tl(t0, cpu_T[0]);
1455 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], t2);
1458 /* store */
1459 if (op1 == OR_TMP0) {
1460 gen_op_st_T0_A0(ot + s->mem_index);
1461 } else {
1462 gen_op_mov_reg_T0(ot, op1);
1465 /* update eflags */
1466 gen_update_cc_op(s);
1468 tcg_gen_mov_tl(t1, cpu_T[0]);
1470 shift_label = gen_new_label();
1471 tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, shift_label);
1473 tcg_gen_addi_tl(t2, t2, -1);
1474 tcg_gen_mov_tl(cpu_cc_dst, t1);
1476 if (is_right) {
1477 if (is_arith) {
1478 tcg_gen_sar_tl(cpu_cc_src, t0, t2);
1479 } else {
1480 tcg_gen_shr_tl(cpu_cc_src, t0, t2);
1482 } else {
1483 tcg_gen_shl_tl(cpu_cc_src, t0, t2);
1486 if (is_right) {
1487 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SARB + ot);
1488 } else {
1489 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SHLB + ot);
1492 gen_set_label(shift_label);
1493 set_cc_op(s, CC_OP_DYNAMIC); /* cannot predict flags after */
1495 tcg_temp_free(t0);
1496 tcg_temp_free(t1);
1497 tcg_temp_free(t2);
1500 static void gen_shift_rm_im(DisasContext *s, int ot, int op1, int op2,
1501 int is_right, int is_arith)
1503 int mask;
1505 if (ot == OT_QUAD)
1506 mask = 0x3f;
1507 else
1508 mask = 0x1f;
1510 /* load */
1511 if (op1 == OR_TMP0)
1512 gen_op_ld_T0_A0(ot + s->mem_index);
1513 else
1514 gen_op_mov_TN_reg(ot, 0, op1);
1516 op2 &= mask;
1517 if (op2 != 0) {
1518 if (is_right) {
1519 if (is_arith) {
1520 gen_exts(ot, cpu_T[0]);
1521 tcg_gen_sari_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1522 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], op2);
1523 } else {
1524 gen_extu(ot, cpu_T[0]);
1525 tcg_gen_shri_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1526 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], op2);
1528 } else {
1529 tcg_gen_shli_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1530 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], op2);
1534 /* store */
1535 if (op1 == OR_TMP0)
1536 gen_op_st_T0_A0(ot + s->mem_index);
1537 else
1538 gen_op_mov_reg_T0(ot, op1);
1540 /* update eflags if non zero shift */
1541 if (op2 != 0) {
1542 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
1543 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1544 set_cc_op(s, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
1548 static inline void tcg_gen_lshift(TCGv ret, TCGv arg1, target_long arg2)
1550 if (arg2 >= 0)
1551 tcg_gen_shli_tl(ret, arg1, arg2);
1552 else
1553 tcg_gen_shri_tl(ret, arg1, -arg2);
1556 static void gen_rot_rm_T1(DisasContext *s, int ot, int op1,
1557 int is_right)
1559 target_ulong mask;
1560 int label1, label2, data_bits;
1561 TCGv t0, t1, t2, a0;
1563 /* XXX: inefficient, but we must use local temps */
1564 t0 = tcg_temp_local_new();
1565 t1 = tcg_temp_local_new();
1566 t2 = tcg_temp_local_new();
1567 a0 = tcg_temp_local_new();
1569 if (ot == OT_QUAD)
1570 mask = 0x3f;
1571 else
1572 mask = 0x1f;
1574 /* load */
1575 if (op1 == OR_TMP0) {
1576 tcg_gen_mov_tl(a0, cpu_A0);
1577 gen_op_ld_v(ot + s->mem_index, t0, a0);
1578 } else {
1579 gen_op_mov_v_reg(ot, t0, op1);
1582 tcg_gen_mov_tl(t1, cpu_T[1]);
1584 tcg_gen_andi_tl(t1, t1, mask);
1586 /* Must test zero case to avoid using undefined behaviour in TCG
1587 shifts. */
1588 label1 = gen_new_label();
1589 tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, label1);
1591 if (ot <= OT_WORD)
1592 tcg_gen_andi_tl(cpu_tmp0, t1, (1 << (3 + ot)) - 1);
1593 else
1594 tcg_gen_mov_tl(cpu_tmp0, t1);
1596 gen_extu(ot, t0);
1597 tcg_gen_mov_tl(t2, t0);
1599 data_bits = 8 << ot;
1600 /* XXX: rely on behaviour of shifts when operand 2 overflows (XXX:
1601 fix TCG definition) */
1602 if (is_right) {
1603 tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp0);
1604 tcg_gen_subfi_tl(cpu_tmp0, data_bits, cpu_tmp0);
1605 tcg_gen_shl_tl(t0, t0, cpu_tmp0);
1606 } else {
1607 tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp0);
1608 tcg_gen_subfi_tl(cpu_tmp0, data_bits, cpu_tmp0);
1609 tcg_gen_shr_tl(t0, t0, cpu_tmp0);
1611 tcg_gen_or_tl(t0, t0, cpu_tmp4);
1613 gen_set_label(label1);
1614 /* store */
1615 if (op1 == OR_TMP0) {
1616 gen_op_st_v(ot + s->mem_index, t0, a0);
1617 } else {
1618 gen_op_mov_reg_v(ot, op1, t0);
1621 /* update eflags. It is needed anyway most of the time, do it always. */
1622 gen_compute_eflags(s, cpu_cc_src);
1623 assert(s->cc_op == CC_OP_EFLAGS);
1625 label2 = gen_new_label();
1626 tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, label2);
1628 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~(CC_O | CC_C));
1629 tcg_gen_xor_tl(cpu_tmp0, t2, t0);
1630 tcg_gen_lshift(cpu_tmp0, cpu_tmp0, 11 - (data_bits - 1));
1631 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, CC_O);
1632 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_tmp0);
1633 if (is_right) {
1634 tcg_gen_shri_tl(t0, t0, data_bits - 1);
1636 tcg_gen_andi_tl(t0, t0, CC_C);
1637 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
1639 gen_set_label(label2);
1641 tcg_temp_free(t0);
1642 tcg_temp_free(t1);
1643 tcg_temp_free(t2);
1644 tcg_temp_free(a0);
1647 static void gen_rot_rm_im(DisasContext *s, int ot, int op1, int op2,
1648 int is_right)
1650 int mask;
1651 int data_bits;
1652 TCGv t0, t1, a0;
1654 /* XXX: inefficient, but we must use local temps */
1655 t0 = tcg_temp_local_new();
1656 t1 = tcg_temp_local_new();
1657 a0 = tcg_temp_local_new();
1659 if (ot == OT_QUAD)
1660 mask = 0x3f;
1661 else
1662 mask = 0x1f;
1664 /* load */
1665 if (op1 == OR_TMP0) {
1666 tcg_gen_mov_tl(a0, cpu_A0);
1667 gen_op_ld_v(ot + s->mem_index, t0, a0);
1668 } else {
1669 gen_op_mov_v_reg(ot, t0, op1);
1672 gen_extu(ot, t0);
1673 tcg_gen_mov_tl(t1, t0);
1675 op2 &= mask;
1676 data_bits = 8 << ot;
1677 if (op2 != 0) {
1678 int shift = op2 & ((1 << (3 + ot)) - 1);
1679 if (is_right) {
1680 tcg_gen_shri_tl(cpu_tmp4, t0, shift);
1681 tcg_gen_shli_tl(t0, t0, data_bits - shift);
1683 else {
1684 tcg_gen_shli_tl(cpu_tmp4, t0, shift);
1685 tcg_gen_shri_tl(t0, t0, data_bits - shift);
1687 tcg_gen_or_tl(t0, t0, cpu_tmp4);
1690 /* store */
1691 if (op1 == OR_TMP0) {
1692 gen_op_st_v(ot + s->mem_index, t0, a0);
1693 } else {
1694 gen_op_mov_reg_v(ot, op1, t0);
1697 if (op2 != 0) {
1698 /* update eflags */
1699 gen_compute_eflags(s, cpu_cc_src);
1700 assert(s->cc_op == CC_OP_EFLAGS);
1702 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~(CC_O | CC_C));
1703 tcg_gen_xor_tl(cpu_tmp0, t1, t0);
1704 tcg_gen_lshift(cpu_tmp0, cpu_tmp0, 11 - (data_bits - 1));
1705 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, CC_O);
1706 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_tmp0);
1707 if (is_right) {
1708 tcg_gen_shri_tl(t0, t0, data_bits - 1);
1710 tcg_gen_andi_tl(t0, t0, CC_C);
1711 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
1714 tcg_temp_free(t0);
1715 tcg_temp_free(t1);
1716 tcg_temp_free(a0);
1719 /* XXX: add faster immediate = 1 case */
1720 static void gen_rotc_rm_T1(DisasContext *s, int ot, int op1,
1721 int is_right)
1723 gen_update_cc_op(s);
1724 gen_compute_eflags(s, cpu_cc_src);
1725 assert(s->cc_op == CC_OP_EFLAGS);
1727 /* load */
1728 if (op1 == OR_TMP0)
1729 gen_op_ld_T0_A0(ot + s->mem_index);
1730 else
1731 gen_op_mov_TN_reg(ot, 0, op1);
1733 if (is_right) {
1734 switch (ot) {
1735 case OT_BYTE:
1736 gen_helper_rcrb(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1737 break;
1738 case OT_WORD:
1739 gen_helper_rcrw(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1740 break;
1741 case OT_LONG:
1742 gen_helper_rcrl(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1743 break;
1744 #ifdef TARGET_X86_64
1745 case OT_QUAD:
1746 gen_helper_rcrq(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1747 break;
1748 #endif
1750 } else {
1751 switch (ot) {
1752 case OT_BYTE:
1753 gen_helper_rclb(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1754 break;
1755 case OT_WORD:
1756 gen_helper_rclw(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1757 break;
1758 case OT_LONG:
1759 gen_helper_rcll(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1760 break;
1761 #ifdef TARGET_X86_64
1762 case OT_QUAD:
1763 gen_helper_rclq(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1764 break;
1765 #endif
1768 /* store */
1769 if (op1 == OR_TMP0)
1770 gen_op_st_T0_A0(ot + s->mem_index);
1771 else
1772 gen_op_mov_reg_T0(ot, op1);
1775 /* XXX: add faster immediate case */
1776 static void gen_shiftd_rm_T1_T3(DisasContext *s, int ot, int op1,
1777 int is_right)
1779 int label1, label2, data_bits;
1780 target_ulong mask;
1781 TCGv t0, t1, t2, a0;
1783 t0 = tcg_temp_local_new();
1784 t1 = tcg_temp_local_new();
1785 t2 = tcg_temp_local_new();
1786 a0 = tcg_temp_local_new();
1788 if (ot == OT_QUAD)
1789 mask = 0x3f;
1790 else
1791 mask = 0x1f;
1793 /* load */
1794 if (op1 == OR_TMP0) {
1795 tcg_gen_mov_tl(a0, cpu_A0);
1796 gen_op_ld_v(ot + s->mem_index, t0, a0);
1797 } else {
1798 gen_op_mov_v_reg(ot, t0, op1);
1801 tcg_gen_andi_tl(cpu_T3, cpu_T3, mask);
1803 tcg_gen_mov_tl(t1, cpu_T[1]);
1804 tcg_gen_mov_tl(t2, cpu_T3);
1806 /* Must test zero case to avoid using undefined behaviour in TCG
1807 shifts. */
1808 label1 = gen_new_label();
1809 tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label1);
1811 tcg_gen_addi_tl(cpu_tmp5, t2, -1);
1812 if (ot == OT_WORD) {
1813 /* Note: we implement the Intel behaviour for shift count > 16 */
1814 if (is_right) {
1815 tcg_gen_andi_tl(t0, t0, 0xffff);
1816 tcg_gen_shli_tl(cpu_tmp0, t1, 16);
1817 tcg_gen_or_tl(t0, t0, cpu_tmp0);
1818 tcg_gen_ext32u_tl(t0, t0);
1820 tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp5);
1822 /* only needed if count > 16, but a test would complicate */
1823 tcg_gen_subfi_tl(cpu_tmp5, 32, t2);
1824 tcg_gen_shl_tl(cpu_tmp0, t0, cpu_tmp5);
1826 tcg_gen_shr_tl(t0, t0, t2);
1828 tcg_gen_or_tl(t0, t0, cpu_tmp0);
1829 } else {
1830 /* XXX: not optimal */
1831 tcg_gen_andi_tl(t0, t0, 0xffff);
1832 tcg_gen_shli_tl(t1, t1, 16);
1833 tcg_gen_or_tl(t1, t1, t0);
1834 tcg_gen_ext32u_tl(t1, t1);
1836 tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp5);
1837 tcg_gen_subfi_tl(cpu_tmp0, 32, cpu_tmp5);
1838 tcg_gen_shr_tl(cpu_tmp5, t1, cpu_tmp0);
1839 tcg_gen_or_tl(cpu_tmp4, cpu_tmp4, cpu_tmp5);
1841 tcg_gen_shl_tl(t0, t0, t2);
1842 tcg_gen_subfi_tl(cpu_tmp5, 32, t2);
1843 tcg_gen_shr_tl(t1, t1, cpu_tmp5);
1844 tcg_gen_or_tl(t0, t0, t1);
1846 } else {
1847 data_bits = 8 << ot;
1848 if (is_right) {
1849 if (ot == OT_LONG)
1850 tcg_gen_ext32u_tl(t0, t0);
1852 tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp5);
1854 tcg_gen_shr_tl(t0, t0, t2);
1855 tcg_gen_subfi_tl(cpu_tmp5, data_bits, t2);
1856 tcg_gen_shl_tl(t1, t1, cpu_tmp5);
1857 tcg_gen_or_tl(t0, t0, t1);
1859 } else {
1860 if (ot == OT_LONG)
1861 tcg_gen_ext32u_tl(t1, t1);
1863 tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp5);
1865 tcg_gen_shl_tl(t0, t0, t2);
1866 tcg_gen_subfi_tl(cpu_tmp5, data_bits, t2);
1867 tcg_gen_shr_tl(t1, t1, cpu_tmp5);
1868 tcg_gen_or_tl(t0, t0, t1);
1871 tcg_gen_mov_tl(t1, cpu_tmp4);
1873 gen_set_label(label1);
1874 /* store */
1875 if (op1 == OR_TMP0) {
1876 gen_op_st_v(ot + s->mem_index, t0, a0);
1877 } else {
1878 gen_op_mov_reg_v(ot, op1, t0);
1881 /* update eflags */
1882 gen_update_cc_op(s);
1884 label2 = gen_new_label();
1885 tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label2);
1887 tcg_gen_mov_tl(cpu_cc_src, t1);
1888 tcg_gen_mov_tl(cpu_cc_dst, t0);
1889 if (is_right) {
1890 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SARB + ot);
1891 } else {
1892 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SHLB + ot);
1894 gen_set_label(label2);
1895 set_cc_op(s, CC_OP_DYNAMIC); /* cannot predict flags after */
1897 tcg_temp_free(t0);
1898 tcg_temp_free(t1);
1899 tcg_temp_free(t2);
1900 tcg_temp_free(a0);
1903 static void gen_shift(DisasContext *s1, int op, int ot, int d, int s)
1905 if (s != OR_TMP1)
1906 gen_op_mov_TN_reg(ot, 1, s);
1907 switch(op) {
1908 case OP_ROL:
1909 gen_rot_rm_T1(s1, ot, d, 0);
1910 break;
1911 case OP_ROR:
1912 gen_rot_rm_T1(s1, ot, d, 1);
1913 break;
1914 case OP_SHL:
1915 case OP_SHL1:
1916 gen_shift_rm_T1(s1, ot, d, 0, 0);
1917 break;
1918 case OP_SHR:
1919 gen_shift_rm_T1(s1, ot, d, 1, 0);
1920 break;
1921 case OP_SAR:
1922 gen_shift_rm_T1(s1, ot, d, 1, 1);
1923 break;
1924 case OP_RCL:
1925 gen_rotc_rm_T1(s1, ot, d, 0);
1926 break;
1927 case OP_RCR:
1928 gen_rotc_rm_T1(s1, ot, d, 1);
1929 break;
1933 static void gen_shifti(DisasContext *s1, int op, int ot, int d, int c)
1935 switch(op) {
1936 case OP_ROL:
1937 gen_rot_rm_im(s1, ot, d, c, 0);
1938 break;
1939 case OP_ROR:
1940 gen_rot_rm_im(s1, ot, d, c, 1);
1941 break;
1942 case OP_SHL:
1943 case OP_SHL1:
1944 gen_shift_rm_im(s1, ot, d, c, 0, 0);
1945 break;
1946 case OP_SHR:
1947 gen_shift_rm_im(s1, ot, d, c, 1, 0);
1948 break;
1949 case OP_SAR:
1950 gen_shift_rm_im(s1, ot, d, c, 1, 1);
1951 break;
1952 default:
1953 /* currently not optimized */
1954 gen_op_movl_T1_im(c);
1955 gen_shift(s1, op, ot, d, OR_TMP1);
1956 break;
1960 static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm,
1961 int *reg_ptr, int *offset_ptr)
1963 target_long disp;
1964 int havesib;
1965 int base;
1966 int index;
1967 int scale;
1968 int opreg;
1969 int mod, rm, code, override, must_add_seg;
1971 override = s->override;
1972 must_add_seg = s->addseg;
1973 if (override >= 0)
1974 must_add_seg = 1;
1975 mod = (modrm >> 6) & 3;
1976 rm = modrm & 7;
1978 if (s->aflag) {
1980 havesib = 0;
1981 base = rm;
1982 index = 0;
1983 scale = 0;
1985 if (base == 4) {
1986 havesib = 1;
1987 code = cpu_ldub_code(env, s->pc++);
1988 scale = (code >> 6) & 3;
1989 index = ((code >> 3) & 7) | REX_X(s);
1990 base = (code & 7);
1992 base |= REX_B(s);
1994 switch (mod) {
1995 case 0:
1996 if ((base & 7) == 5) {
1997 base = -1;
1998 disp = (int32_t)cpu_ldl_code(env, s->pc);
1999 s->pc += 4;
2000 if (CODE64(s) && !havesib) {
2001 disp += s->pc + s->rip_offset;
2003 } else {
2004 disp = 0;
2006 break;
2007 case 1:
2008 disp = (int8_t)cpu_ldub_code(env, s->pc++);
2009 break;
2010 default:
2011 case 2:
2012 disp = (int32_t)cpu_ldl_code(env, s->pc);
2013 s->pc += 4;
2014 break;
2017 if (base >= 0) {
2018 /* for correct popl handling with esp */
2019 if (base == 4 && s->popl_esp_hack)
2020 disp += s->popl_esp_hack;
2021 #ifdef TARGET_X86_64
2022 if (s->aflag == 2) {
2023 gen_op_movq_A0_reg(base);
2024 if (disp != 0) {
2025 gen_op_addq_A0_im(disp);
2027 } else
2028 #endif
2030 gen_op_movl_A0_reg(base);
2031 if (disp != 0)
2032 gen_op_addl_A0_im(disp);
2034 } else {
2035 #ifdef TARGET_X86_64
2036 if (s->aflag == 2) {
2037 gen_op_movq_A0_im(disp);
2038 } else
2039 #endif
2041 gen_op_movl_A0_im(disp);
2044 /* index == 4 means no index */
2045 if (havesib && (index != 4)) {
2046 #ifdef TARGET_X86_64
2047 if (s->aflag == 2) {
2048 gen_op_addq_A0_reg_sN(scale, index);
2049 } else
2050 #endif
2052 gen_op_addl_A0_reg_sN(scale, index);
2055 if (must_add_seg) {
2056 if (override < 0) {
2057 if (base == R_EBP || base == R_ESP)
2058 override = R_SS;
2059 else
2060 override = R_DS;
2062 #ifdef TARGET_X86_64
2063 if (s->aflag == 2) {
2064 gen_op_addq_A0_seg(override);
2065 } else
2066 #endif
2068 gen_op_addl_A0_seg(s, override);
2071 } else {
2072 switch (mod) {
2073 case 0:
2074 if (rm == 6) {
2075 disp = cpu_lduw_code(env, s->pc);
2076 s->pc += 2;
2077 gen_op_movl_A0_im(disp);
2078 rm = 0; /* avoid SS override */
2079 goto no_rm;
2080 } else {
2081 disp = 0;
2083 break;
2084 case 1:
2085 disp = (int8_t)cpu_ldub_code(env, s->pc++);
2086 break;
2087 default:
2088 case 2:
2089 disp = cpu_lduw_code(env, s->pc);
2090 s->pc += 2;
2091 break;
2093 switch(rm) {
2094 case 0:
2095 gen_op_movl_A0_reg(R_EBX);
2096 gen_op_addl_A0_reg_sN(0, R_ESI);
2097 break;
2098 case 1:
2099 gen_op_movl_A0_reg(R_EBX);
2100 gen_op_addl_A0_reg_sN(0, R_EDI);
2101 break;
2102 case 2:
2103 gen_op_movl_A0_reg(R_EBP);
2104 gen_op_addl_A0_reg_sN(0, R_ESI);
2105 break;
2106 case 3:
2107 gen_op_movl_A0_reg(R_EBP);
2108 gen_op_addl_A0_reg_sN(0, R_EDI);
2109 break;
2110 case 4:
2111 gen_op_movl_A0_reg(R_ESI);
2112 break;
2113 case 5:
2114 gen_op_movl_A0_reg(R_EDI);
2115 break;
2116 case 6:
2117 gen_op_movl_A0_reg(R_EBP);
2118 break;
2119 default:
2120 case 7:
2121 gen_op_movl_A0_reg(R_EBX);
2122 break;
2124 if (disp != 0)
2125 gen_op_addl_A0_im(disp);
2126 gen_op_andl_A0_ffff();
2127 no_rm:
2128 if (must_add_seg) {
2129 if (override < 0) {
2130 if (rm == 2 || rm == 3 || rm == 6)
2131 override = R_SS;
2132 else
2133 override = R_DS;
2135 gen_op_addl_A0_seg(s, override);
2139 opreg = OR_A0;
2140 disp = 0;
2141 *reg_ptr = opreg;
2142 *offset_ptr = disp;
2145 static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm)
2147 int mod, rm, base, code;
2149 mod = (modrm >> 6) & 3;
2150 if (mod == 3)
2151 return;
2152 rm = modrm & 7;
2154 if (s->aflag) {
2156 base = rm;
2158 if (base == 4) {
2159 code = cpu_ldub_code(env, s->pc++);
2160 base = (code & 7);
2163 switch (mod) {
2164 case 0:
2165 if (base == 5) {
2166 s->pc += 4;
2168 break;
2169 case 1:
2170 s->pc++;
2171 break;
2172 default:
2173 case 2:
2174 s->pc += 4;
2175 break;
2177 } else {
2178 switch (mod) {
2179 case 0:
2180 if (rm == 6) {
2181 s->pc += 2;
2183 break;
2184 case 1:
2185 s->pc++;
2186 break;
2187 default:
2188 case 2:
2189 s->pc += 2;
2190 break;
2195 /* used for LEA and MOV AX, mem */
2196 static void gen_add_A0_ds_seg(DisasContext *s)
2198 int override, must_add_seg;
2199 must_add_seg = s->addseg;
2200 override = R_DS;
2201 if (s->override >= 0) {
2202 override = s->override;
2203 must_add_seg = 1;
2205 if (must_add_seg) {
2206 #ifdef TARGET_X86_64
2207 if (CODE64(s)) {
2208 gen_op_addq_A0_seg(override);
2209 } else
2210 #endif
2212 gen_op_addl_A0_seg(s, override);
2217 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2218 OR_TMP0 */
2219 static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm,
2220 int ot, int reg, int is_store)
2222 int mod, rm, opreg, disp;
2224 mod = (modrm >> 6) & 3;
2225 rm = (modrm & 7) | REX_B(s);
2226 if (mod == 3) {
2227 if (is_store) {
2228 if (reg != OR_TMP0)
2229 gen_op_mov_TN_reg(ot, 0, reg);
2230 gen_op_mov_reg_T0(ot, rm);
2231 } else {
2232 gen_op_mov_TN_reg(ot, 0, rm);
2233 if (reg != OR_TMP0)
2234 gen_op_mov_reg_T0(ot, reg);
2236 } else {
2237 gen_lea_modrm(env, s, modrm, &opreg, &disp);
2238 if (is_store) {
2239 if (reg != OR_TMP0)
2240 gen_op_mov_TN_reg(ot, 0, reg);
2241 gen_op_st_T0_A0(ot + s->mem_index);
2242 } else {
2243 gen_op_ld_T0_A0(ot + s->mem_index);
2244 if (reg != OR_TMP0)
2245 gen_op_mov_reg_T0(ot, reg);
2250 static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, int ot)
2252 uint32_t ret;
2254 switch(ot) {
2255 case OT_BYTE:
2256 ret = cpu_ldub_code(env, s->pc);
2257 s->pc++;
2258 break;
2259 case OT_WORD:
2260 ret = cpu_lduw_code(env, s->pc);
2261 s->pc += 2;
2262 break;
2263 default:
2264 case OT_LONG:
2265 ret = cpu_ldl_code(env, s->pc);
2266 s->pc += 4;
2267 break;
2269 return ret;
2272 static inline int insn_const_size(unsigned int ot)
2274 if (ot <= OT_LONG)
2275 return 1 << ot;
2276 else
2277 return 4;
2280 static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip)
2282 TranslationBlock *tb;
2283 target_ulong pc;
2285 pc = s->cs_base + eip;
2286 tb = s->tb;
2287 /* NOTE: we handle the case where the TB spans two pages here */
2288 if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) ||
2289 (pc & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK)) {
2290 /* jump to same page: we can use a direct jump */
2291 tcg_gen_goto_tb(tb_num);
2292 gen_jmp_im(eip);
2293 tcg_gen_exit_tb((tcg_target_long)tb + tb_num);
2294 } else {
2295 /* jump to another page: currently not optimized */
2296 gen_jmp_im(eip);
2297 gen_eob(s);
2301 static inline void gen_jcc(DisasContext *s, int b,
2302 target_ulong val, target_ulong next_eip)
2304 int l1, l2;
2306 gen_update_cc_op(s);
2307 if (s->jmp_opt) {
2308 l1 = gen_new_label();
2309 gen_jcc1(s, b, l1);
2310 set_cc_op(s, CC_OP_DYNAMIC);
2312 gen_goto_tb(s, 0, next_eip);
2314 gen_set_label(l1);
2315 gen_goto_tb(s, 1, val);
2316 s->is_jmp = DISAS_TB_JUMP;
2317 } else {
2319 l1 = gen_new_label();
2320 l2 = gen_new_label();
2321 gen_jcc1(s, b, l1);
2322 set_cc_op(s, CC_OP_DYNAMIC);
2324 gen_jmp_im(next_eip);
2325 tcg_gen_br(l2);
2327 gen_set_label(l1);
2328 gen_jmp_im(val);
2329 gen_set_label(l2);
2330 gen_eob(s);
2334 static void gen_setcc(DisasContext *s, int b)
2336 int inv, jcc_op, l1;
2337 TCGv t0;
2339 if (is_fast_jcc_case(s, b)) {
2340 /* nominal case: we use a jump */
2341 /* XXX: make it faster by adding new instructions in TCG */
2342 t0 = tcg_temp_local_new();
2343 tcg_gen_movi_tl(t0, 0);
2344 l1 = gen_new_label();
2345 gen_jcc1(s, b ^ 1, l1);
2346 tcg_gen_movi_tl(t0, 1);
2347 gen_set_label(l1);
2348 tcg_gen_mov_tl(cpu_T[0], t0);
2349 tcg_temp_free(t0);
2350 } else {
2351 /* slow case: it is more efficient not to generate a jump,
2352 although it is questionnable whether this optimization is
2353 worth to */
2354 inv = b & 1;
2355 jcc_op = (b >> 1) & 7;
2356 gen_setcc_slow_T0(s, jcc_op);
2357 if (inv) {
2358 tcg_gen_xori_tl(cpu_T[0], cpu_T[0], 1);
2363 static inline void gen_op_movl_T0_seg(int seg_reg)
2365 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
2366 offsetof(CPUX86State,segs[seg_reg].selector));
2369 static inline void gen_op_movl_seg_T0_vm(int seg_reg)
2371 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
2372 tcg_gen_st32_tl(cpu_T[0], cpu_env,
2373 offsetof(CPUX86State,segs[seg_reg].selector));
2374 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], 4);
2375 tcg_gen_st_tl(cpu_T[0], cpu_env,
2376 offsetof(CPUX86State,segs[seg_reg].base));
2379 /* move T0 to seg_reg and compute if the CPU state may change. Never
2380 call this function with seg_reg == R_CS */
2381 static void gen_movl_seg_T0(DisasContext *s, int seg_reg, target_ulong cur_eip)
2383 if (s->pe && !s->vm86) {
2384 /* XXX: optimize by finding processor state dynamically */
2385 gen_update_cc_op(s);
2386 gen_jmp_im(cur_eip);
2387 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2388 gen_helper_load_seg(cpu_env, tcg_const_i32(seg_reg), cpu_tmp2_i32);
2389 /* abort translation because the addseg value may change or
2390 because ss32 may change. For R_SS, translation must always
2391 stop as a special handling must be done to disable hardware
2392 interrupts for the next instruction */
2393 if (seg_reg == R_SS || (s->code32 && seg_reg < R_FS))
2394 s->is_jmp = DISAS_TB_JUMP;
2395 } else {
2396 gen_op_movl_seg_T0_vm(seg_reg);
2397 if (seg_reg == R_SS)
2398 s->is_jmp = DISAS_TB_JUMP;
2402 static inline int svm_is_rep(int prefixes)
2404 return ((prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) ? 8 : 0);
2407 static inline void
2408 gen_svm_check_intercept_param(DisasContext *s, target_ulong pc_start,
2409 uint32_t type, uint64_t param)
2411 /* no SVM activated; fast case */
2412 if (likely(!(s->flags & HF_SVMI_MASK)))
2413 return;
2414 gen_update_cc_op(s);
2415 gen_jmp_im(pc_start - s->cs_base);
2416 gen_helper_svm_check_intercept_param(cpu_env, tcg_const_i32(type),
2417 tcg_const_i64(param));
2420 static inline void
2421 gen_svm_check_intercept(DisasContext *s, target_ulong pc_start, uint64_t type)
2423 gen_svm_check_intercept_param(s, pc_start, type, 0);
2426 static inline void gen_stack_update(DisasContext *s, int addend)
2428 #ifdef TARGET_X86_64
2429 if (CODE64(s)) {
2430 gen_op_add_reg_im(2, R_ESP, addend);
2431 } else
2432 #endif
2433 if (s->ss32) {
2434 gen_op_add_reg_im(1, R_ESP, addend);
2435 } else {
2436 gen_op_add_reg_im(0, R_ESP, addend);
2440 /* generate a push. It depends on ss32, addseg and dflag */
2441 static void gen_push_T0(DisasContext *s)
2443 #ifdef TARGET_X86_64
2444 if (CODE64(s)) {
2445 gen_op_movq_A0_reg(R_ESP);
2446 if (s->dflag) {
2447 gen_op_addq_A0_im(-8);
2448 gen_op_st_T0_A0(OT_QUAD + s->mem_index);
2449 } else {
2450 gen_op_addq_A0_im(-2);
2451 gen_op_st_T0_A0(OT_WORD + s->mem_index);
2453 gen_op_mov_reg_A0(2, R_ESP);
2454 } else
2455 #endif
2457 gen_op_movl_A0_reg(R_ESP);
2458 if (!s->dflag)
2459 gen_op_addl_A0_im(-2);
2460 else
2461 gen_op_addl_A0_im(-4);
2462 if (s->ss32) {
2463 if (s->addseg) {
2464 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2465 gen_op_addl_A0_seg(s, R_SS);
2467 } else {
2468 gen_op_andl_A0_ffff();
2469 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2470 gen_op_addl_A0_seg(s, R_SS);
2472 gen_op_st_T0_A0(s->dflag + 1 + s->mem_index);
2473 if (s->ss32 && !s->addseg)
2474 gen_op_mov_reg_A0(1, R_ESP);
2475 else
2476 gen_op_mov_reg_T1(s->ss32 + 1, R_ESP);
2480 /* generate a push. It depends on ss32, addseg and dflag */
2481 /* slower version for T1, only used for call Ev */
2482 static void gen_push_T1(DisasContext *s)
2484 #ifdef TARGET_X86_64
2485 if (CODE64(s)) {
2486 gen_op_movq_A0_reg(R_ESP);
2487 if (s->dflag) {
2488 gen_op_addq_A0_im(-8);
2489 gen_op_st_T1_A0(OT_QUAD + s->mem_index);
2490 } else {
2491 gen_op_addq_A0_im(-2);
2492 gen_op_st_T0_A0(OT_WORD + s->mem_index);
2494 gen_op_mov_reg_A0(2, R_ESP);
2495 } else
2496 #endif
2498 gen_op_movl_A0_reg(R_ESP);
2499 if (!s->dflag)
2500 gen_op_addl_A0_im(-2);
2501 else
2502 gen_op_addl_A0_im(-4);
2503 if (s->ss32) {
2504 if (s->addseg) {
2505 gen_op_addl_A0_seg(s, R_SS);
2507 } else {
2508 gen_op_andl_A0_ffff();
2509 gen_op_addl_A0_seg(s, R_SS);
2511 gen_op_st_T1_A0(s->dflag + 1 + s->mem_index);
2513 if (s->ss32 && !s->addseg)
2514 gen_op_mov_reg_A0(1, R_ESP);
2515 else
2516 gen_stack_update(s, (-2) << s->dflag);
2520 /* two step pop is necessary for precise exceptions */
2521 static void gen_pop_T0(DisasContext *s)
2523 #ifdef TARGET_X86_64
2524 if (CODE64(s)) {
2525 gen_op_movq_A0_reg(R_ESP);
2526 gen_op_ld_T0_A0((s->dflag ? OT_QUAD : OT_WORD) + s->mem_index);
2527 } else
2528 #endif
2530 gen_op_movl_A0_reg(R_ESP);
2531 if (s->ss32) {
2532 if (s->addseg)
2533 gen_op_addl_A0_seg(s, R_SS);
2534 } else {
2535 gen_op_andl_A0_ffff();
2536 gen_op_addl_A0_seg(s, R_SS);
2538 gen_op_ld_T0_A0(s->dflag + 1 + s->mem_index);
2542 static void gen_pop_update(DisasContext *s)
2544 #ifdef TARGET_X86_64
2545 if (CODE64(s) && s->dflag) {
2546 gen_stack_update(s, 8);
2547 } else
2548 #endif
2550 gen_stack_update(s, 2 << s->dflag);
2554 static void gen_stack_A0(DisasContext *s)
2556 gen_op_movl_A0_reg(R_ESP);
2557 if (!s->ss32)
2558 gen_op_andl_A0_ffff();
2559 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2560 if (s->addseg)
2561 gen_op_addl_A0_seg(s, R_SS);
2564 /* NOTE: wrap around in 16 bit not fully handled */
2565 static void gen_pusha(DisasContext *s)
2567 int i;
2568 gen_op_movl_A0_reg(R_ESP);
2569 gen_op_addl_A0_im(-16 << s->dflag);
2570 if (!s->ss32)
2571 gen_op_andl_A0_ffff();
2572 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2573 if (s->addseg)
2574 gen_op_addl_A0_seg(s, R_SS);
2575 for(i = 0;i < 8; i++) {
2576 gen_op_mov_TN_reg(OT_LONG, 0, 7 - i);
2577 gen_op_st_T0_A0(OT_WORD + s->dflag + s->mem_index);
2578 gen_op_addl_A0_im(2 << s->dflag);
2580 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2583 /* NOTE: wrap around in 16 bit not fully handled */
2584 static void gen_popa(DisasContext *s)
2586 int i;
2587 gen_op_movl_A0_reg(R_ESP);
2588 if (!s->ss32)
2589 gen_op_andl_A0_ffff();
2590 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2591 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], 16 << s->dflag);
2592 if (s->addseg)
2593 gen_op_addl_A0_seg(s, R_SS);
2594 for(i = 0;i < 8; i++) {
2595 /* ESP is not reloaded */
2596 if (i != 3) {
2597 gen_op_ld_T0_A0(OT_WORD + s->dflag + s->mem_index);
2598 gen_op_mov_reg_T0(OT_WORD + s->dflag, 7 - i);
2600 gen_op_addl_A0_im(2 << s->dflag);
2602 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2605 static void gen_enter(DisasContext *s, int esp_addend, int level)
2607 int ot, opsize;
2609 level &= 0x1f;
2610 #ifdef TARGET_X86_64
2611 if (CODE64(s)) {
2612 ot = s->dflag ? OT_QUAD : OT_WORD;
2613 opsize = 1 << ot;
2615 gen_op_movl_A0_reg(R_ESP);
2616 gen_op_addq_A0_im(-opsize);
2617 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2619 /* push bp */
2620 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
2621 gen_op_st_T0_A0(ot + s->mem_index);
2622 if (level) {
2623 /* XXX: must save state */
2624 gen_helper_enter64_level(cpu_env, tcg_const_i32(level),
2625 tcg_const_i32((ot == OT_QUAD)),
2626 cpu_T[1]);
2628 gen_op_mov_reg_T1(ot, R_EBP);
2629 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
2630 gen_op_mov_reg_T1(OT_QUAD, R_ESP);
2631 } else
2632 #endif
2634 ot = s->dflag + OT_WORD;
2635 opsize = 2 << s->dflag;
2637 gen_op_movl_A0_reg(R_ESP);
2638 gen_op_addl_A0_im(-opsize);
2639 if (!s->ss32)
2640 gen_op_andl_A0_ffff();
2641 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2642 if (s->addseg)
2643 gen_op_addl_A0_seg(s, R_SS);
2644 /* push bp */
2645 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
2646 gen_op_st_T0_A0(ot + s->mem_index);
2647 if (level) {
2648 /* XXX: must save state */
2649 gen_helper_enter_level(cpu_env, tcg_const_i32(level),
2650 tcg_const_i32(s->dflag),
2651 cpu_T[1]);
2653 gen_op_mov_reg_T1(ot, R_EBP);
2654 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
2655 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2659 static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip)
2661 gen_update_cc_op(s);
2662 gen_jmp_im(cur_eip);
2663 gen_helper_raise_exception(cpu_env, tcg_const_i32(trapno));
2664 s->is_jmp = DISAS_TB_JUMP;
2667 /* an interrupt is different from an exception because of the
2668 privilege checks */
2669 static void gen_interrupt(DisasContext *s, int intno,
2670 target_ulong cur_eip, target_ulong next_eip)
2672 gen_update_cc_op(s);
2673 gen_jmp_im(cur_eip);
2674 gen_helper_raise_interrupt(cpu_env, tcg_const_i32(intno),
2675 tcg_const_i32(next_eip - cur_eip));
2676 s->is_jmp = DISAS_TB_JUMP;
2679 static void gen_debug(DisasContext *s, target_ulong cur_eip)
2681 gen_update_cc_op(s);
2682 gen_jmp_im(cur_eip);
2683 gen_helper_debug(cpu_env);
2684 s->is_jmp = DISAS_TB_JUMP;
2687 /* generate a generic end of block. Trace exception is also generated
2688 if needed */
2689 static void gen_eob(DisasContext *s)
2691 gen_update_cc_op(s);
2692 if (s->tb->flags & HF_INHIBIT_IRQ_MASK) {
2693 gen_helper_reset_inhibit_irq(cpu_env);
2695 if (s->tb->flags & HF_RF_MASK) {
2696 gen_helper_reset_rf(cpu_env);
2698 if (s->singlestep_enabled) {
2699 gen_helper_debug(cpu_env);
2700 } else if (s->tf) {
2701 gen_helper_single_step(cpu_env);
2702 } else {
2703 tcg_gen_exit_tb(0);
2705 s->is_jmp = DISAS_TB_JUMP;
2708 /* generate a jump to eip. No segment change must happen before as a
2709 direct call to the next block may occur */
2710 static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num)
2712 if (s->jmp_opt) {
2713 gen_update_cc_op(s);
2714 gen_goto_tb(s, tb_num, eip);
2715 s->is_jmp = DISAS_TB_JUMP;
2716 } else {
2717 gen_jmp_im(eip);
2718 gen_eob(s);
2722 static void gen_jmp(DisasContext *s, target_ulong eip)
2724 gen_jmp_tb(s, eip, 0);
2727 static inline void gen_ldq_env_A0(int idx, int offset)
2729 int mem_index = (idx >> 2) - 1;
2730 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, mem_index);
2731 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset);
2734 static inline void gen_stq_env_A0(int idx, int offset)
2736 int mem_index = (idx >> 2) - 1;
2737 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset);
2738 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, mem_index);
2741 static inline void gen_ldo_env_A0(int idx, int offset)
2743 int mem_index = (idx >> 2) - 1;
2744 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, mem_index);
2745 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
2746 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2747 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_tmp0, mem_index);
2748 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
2751 static inline void gen_sto_env_A0(int idx, int offset)
2753 int mem_index = (idx >> 2) - 1;
2754 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
2755 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, mem_index);
2756 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2757 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
2758 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_tmp0, mem_index);
2761 static inline void gen_op_movo(int d_offset, int s_offset)
2763 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2764 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2765 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + 8);
2766 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + 8);
2769 static inline void gen_op_movq(int d_offset, int s_offset)
2771 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2772 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2775 static inline void gen_op_movl(int d_offset, int s_offset)
2777 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env, s_offset);
2778 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, d_offset);
2781 static inline void gen_op_movq_env_0(int d_offset)
2783 tcg_gen_movi_i64(cpu_tmp1_i64, 0);
2784 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2787 typedef void (*SSEFunc_i_ep)(TCGv_i32 val, TCGv_ptr env, TCGv_ptr reg);
2788 typedef void (*SSEFunc_l_ep)(TCGv_i64 val, TCGv_ptr env, TCGv_ptr reg);
2789 typedef void (*SSEFunc_0_epi)(TCGv_ptr env, TCGv_ptr reg, TCGv_i32 val);
2790 typedef void (*SSEFunc_0_epl)(TCGv_ptr env, TCGv_ptr reg, TCGv_i64 val);
2791 typedef void (*SSEFunc_0_epp)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b);
2792 typedef void (*SSEFunc_0_eppi)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
2793 TCGv_i32 val);
2794 typedef void (*SSEFunc_0_ppi)(TCGv_ptr reg_a, TCGv_ptr reg_b, TCGv_i32 val);
2795 typedef void (*SSEFunc_0_eppt)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
2796 TCGv val);
2798 #define SSE_SPECIAL ((void *)1)
2799 #define SSE_DUMMY ((void *)2)
2801 #define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
2802 #define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
2803 gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
2805 static const SSEFunc_0_epp sse_op_table1[256][4] = {
2806 /* 3DNow! extensions */
2807 [0x0e] = { SSE_DUMMY }, /* femms */
2808 [0x0f] = { SSE_DUMMY }, /* pf... */
2809 /* pure SSE operations */
2810 [0x10] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2811 [0x11] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2812 [0x12] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd, movsldup, movddup */
2813 [0x13] = { SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd */
2814 [0x14] = { gen_helper_punpckldq_xmm, gen_helper_punpcklqdq_xmm },
2815 [0x15] = { gen_helper_punpckhdq_xmm, gen_helper_punpckhqdq_xmm },
2816 [0x16] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd, movshdup */
2817 [0x17] = { SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd */
2819 [0x28] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2820 [0x29] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2821 [0x2a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
2822 [0x2b] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movntps, movntpd, movntss, movntsd */
2823 [0x2c] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
2824 [0x2d] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
2825 [0x2e] = { gen_helper_ucomiss, gen_helper_ucomisd },
2826 [0x2f] = { gen_helper_comiss, gen_helper_comisd },
2827 [0x50] = { SSE_SPECIAL, SSE_SPECIAL }, /* movmskps, movmskpd */
2828 [0x51] = SSE_FOP(sqrt),
2829 [0x52] = { gen_helper_rsqrtps, NULL, gen_helper_rsqrtss, NULL },
2830 [0x53] = { gen_helper_rcpps, NULL, gen_helper_rcpss, NULL },
2831 [0x54] = { gen_helper_pand_xmm, gen_helper_pand_xmm }, /* andps, andpd */
2832 [0x55] = { gen_helper_pandn_xmm, gen_helper_pandn_xmm }, /* andnps, andnpd */
2833 [0x56] = { gen_helper_por_xmm, gen_helper_por_xmm }, /* orps, orpd */
2834 [0x57] = { gen_helper_pxor_xmm, gen_helper_pxor_xmm }, /* xorps, xorpd */
2835 [0x58] = SSE_FOP(add),
2836 [0x59] = SSE_FOP(mul),
2837 [0x5a] = { gen_helper_cvtps2pd, gen_helper_cvtpd2ps,
2838 gen_helper_cvtss2sd, gen_helper_cvtsd2ss },
2839 [0x5b] = { gen_helper_cvtdq2ps, gen_helper_cvtps2dq, gen_helper_cvttps2dq },
2840 [0x5c] = SSE_FOP(sub),
2841 [0x5d] = SSE_FOP(min),
2842 [0x5e] = SSE_FOP(div),
2843 [0x5f] = SSE_FOP(max),
2845 [0xc2] = SSE_FOP(cmpeq),
2846 [0xc6] = { (SSEFunc_0_epp)gen_helper_shufps,
2847 (SSEFunc_0_epp)gen_helper_shufpd }, /* XXX: casts */
2849 [0x38] = { SSE_SPECIAL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* SSSE3/SSE4 */
2850 [0x3a] = { SSE_SPECIAL, SSE_SPECIAL }, /* SSSE3/SSE4 */
2852 /* MMX ops and their SSE extensions */
2853 [0x60] = MMX_OP2(punpcklbw),
2854 [0x61] = MMX_OP2(punpcklwd),
2855 [0x62] = MMX_OP2(punpckldq),
2856 [0x63] = MMX_OP2(packsswb),
2857 [0x64] = MMX_OP2(pcmpgtb),
2858 [0x65] = MMX_OP2(pcmpgtw),
2859 [0x66] = MMX_OP2(pcmpgtl),
2860 [0x67] = MMX_OP2(packuswb),
2861 [0x68] = MMX_OP2(punpckhbw),
2862 [0x69] = MMX_OP2(punpckhwd),
2863 [0x6a] = MMX_OP2(punpckhdq),
2864 [0x6b] = MMX_OP2(packssdw),
2865 [0x6c] = { NULL, gen_helper_punpcklqdq_xmm },
2866 [0x6d] = { NULL, gen_helper_punpckhqdq_xmm },
2867 [0x6e] = { SSE_SPECIAL, SSE_SPECIAL }, /* movd mm, ea */
2868 [0x6f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, , movqdu */
2869 [0x70] = { (SSEFunc_0_epp)gen_helper_pshufw_mmx,
2870 (SSEFunc_0_epp)gen_helper_pshufd_xmm,
2871 (SSEFunc_0_epp)gen_helper_pshufhw_xmm,
2872 (SSEFunc_0_epp)gen_helper_pshuflw_xmm }, /* XXX: casts */
2873 [0x71] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftw */
2874 [0x72] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftd */
2875 [0x73] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftq */
2876 [0x74] = MMX_OP2(pcmpeqb),
2877 [0x75] = MMX_OP2(pcmpeqw),
2878 [0x76] = MMX_OP2(pcmpeql),
2879 [0x77] = { SSE_DUMMY }, /* emms */
2880 [0x78] = { NULL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* extrq_i, insertq_i */
2881 [0x79] = { NULL, gen_helper_extrq_r, NULL, gen_helper_insertq_r },
2882 [0x7c] = { NULL, gen_helper_haddpd, NULL, gen_helper_haddps },
2883 [0x7d] = { NULL, gen_helper_hsubpd, NULL, gen_helper_hsubps },
2884 [0x7e] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movd, movd, , movq */
2885 [0x7f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, movdqu */
2886 [0xc4] = { SSE_SPECIAL, SSE_SPECIAL }, /* pinsrw */
2887 [0xc5] = { SSE_SPECIAL, SSE_SPECIAL }, /* pextrw */
2888 [0xd0] = { NULL, gen_helper_addsubpd, NULL, gen_helper_addsubps },
2889 [0xd1] = MMX_OP2(psrlw),
2890 [0xd2] = MMX_OP2(psrld),
2891 [0xd3] = MMX_OP2(psrlq),
2892 [0xd4] = MMX_OP2(paddq),
2893 [0xd5] = MMX_OP2(pmullw),
2894 [0xd6] = { NULL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2895 [0xd7] = { SSE_SPECIAL, SSE_SPECIAL }, /* pmovmskb */
2896 [0xd8] = MMX_OP2(psubusb),
2897 [0xd9] = MMX_OP2(psubusw),
2898 [0xda] = MMX_OP2(pminub),
2899 [0xdb] = MMX_OP2(pand),
2900 [0xdc] = MMX_OP2(paddusb),
2901 [0xdd] = MMX_OP2(paddusw),
2902 [0xde] = MMX_OP2(pmaxub),
2903 [0xdf] = MMX_OP2(pandn),
2904 [0xe0] = MMX_OP2(pavgb),
2905 [0xe1] = MMX_OP2(psraw),
2906 [0xe2] = MMX_OP2(psrad),
2907 [0xe3] = MMX_OP2(pavgw),
2908 [0xe4] = MMX_OP2(pmulhuw),
2909 [0xe5] = MMX_OP2(pmulhw),
2910 [0xe6] = { NULL, gen_helper_cvttpd2dq, gen_helper_cvtdq2pd, gen_helper_cvtpd2dq },
2911 [0xe7] = { SSE_SPECIAL , SSE_SPECIAL }, /* movntq, movntq */
2912 [0xe8] = MMX_OP2(psubsb),
2913 [0xe9] = MMX_OP2(psubsw),
2914 [0xea] = MMX_OP2(pminsw),
2915 [0xeb] = MMX_OP2(por),
2916 [0xec] = MMX_OP2(paddsb),
2917 [0xed] = MMX_OP2(paddsw),
2918 [0xee] = MMX_OP2(pmaxsw),
2919 [0xef] = MMX_OP2(pxor),
2920 [0xf0] = { NULL, NULL, NULL, SSE_SPECIAL }, /* lddqu */
2921 [0xf1] = MMX_OP2(psllw),
2922 [0xf2] = MMX_OP2(pslld),
2923 [0xf3] = MMX_OP2(psllq),
2924 [0xf4] = MMX_OP2(pmuludq),
2925 [0xf5] = MMX_OP2(pmaddwd),
2926 [0xf6] = MMX_OP2(psadbw),
2927 [0xf7] = { (SSEFunc_0_epp)gen_helper_maskmov_mmx,
2928 (SSEFunc_0_epp)gen_helper_maskmov_xmm }, /* XXX: casts */
2929 [0xf8] = MMX_OP2(psubb),
2930 [0xf9] = MMX_OP2(psubw),
2931 [0xfa] = MMX_OP2(psubl),
2932 [0xfb] = MMX_OP2(psubq),
2933 [0xfc] = MMX_OP2(paddb),
2934 [0xfd] = MMX_OP2(paddw),
2935 [0xfe] = MMX_OP2(paddl),
2938 static const SSEFunc_0_epp sse_op_table2[3 * 8][2] = {
2939 [0 + 2] = MMX_OP2(psrlw),
2940 [0 + 4] = MMX_OP2(psraw),
2941 [0 + 6] = MMX_OP2(psllw),
2942 [8 + 2] = MMX_OP2(psrld),
2943 [8 + 4] = MMX_OP2(psrad),
2944 [8 + 6] = MMX_OP2(pslld),
2945 [16 + 2] = MMX_OP2(psrlq),
2946 [16 + 3] = { NULL, gen_helper_psrldq_xmm },
2947 [16 + 6] = MMX_OP2(psllq),
2948 [16 + 7] = { NULL, gen_helper_pslldq_xmm },
2951 static const SSEFunc_0_epi sse_op_table3ai[] = {
2952 gen_helper_cvtsi2ss,
2953 gen_helper_cvtsi2sd
2956 #ifdef TARGET_X86_64
2957 static const SSEFunc_0_epl sse_op_table3aq[] = {
2958 gen_helper_cvtsq2ss,
2959 gen_helper_cvtsq2sd
2961 #endif
2963 static const SSEFunc_i_ep sse_op_table3bi[] = {
2964 gen_helper_cvttss2si,
2965 gen_helper_cvtss2si,
2966 gen_helper_cvttsd2si,
2967 gen_helper_cvtsd2si
2970 #ifdef TARGET_X86_64
2971 static const SSEFunc_l_ep sse_op_table3bq[] = {
2972 gen_helper_cvttss2sq,
2973 gen_helper_cvtss2sq,
2974 gen_helper_cvttsd2sq,
2975 gen_helper_cvtsd2sq
2977 #endif
2979 static const SSEFunc_0_epp sse_op_table4[8][4] = {
2980 SSE_FOP(cmpeq),
2981 SSE_FOP(cmplt),
2982 SSE_FOP(cmple),
2983 SSE_FOP(cmpunord),
2984 SSE_FOP(cmpneq),
2985 SSE_FOP(cmpnlt),
2986 SSE_FOP(cmpnle),
2987 SSE_FOP(cmpord),
2990 static const SSEFunc_0_epp sse_op_table5[256] = {
2991 [0x0c] = gen_helper_pi2fw,
2992 [0x0d] = gen_helper_pi2fd,
2993 [0x1c] = gen_helper_pf2iw,
2994 [0x1d] = gen_helper_pf2id,
2995 [0x8a] = gen_helper_pfnacc,
2996 [0x8e] = gen_helper_pfpnacc,
2997 [0x90] = gen_helper_pfcmpge,
2998 [0x94] = gen_helper_pfmin,
2999 [0x96] = gen_helper_pfrcp,
3000 [0x97] = gen_helper_pfrsqrt,
3001 [0x9a] = gen_helper_pfsub,
3002 [0x9e] = gen_helper_pfadd,
3003 [0xa0] = gen_helper_pfcmpgt,
3004 [0xa4] = gen_helper_pfmax,
3005 [0xa6] = gen_helper_movq, /* pfrcpit1; no need to actually increase precision */
3006 [0xa7] = gen_helper_movq, /* pfrsqit1 */
3007 [0xaa] = gen_helper_pfsubr,
3008 [0xae] = gen_helper_pfacc,
3009 [0xb0] = gen_helper_pfcmpeq,
3010 [0xb4] = gen_helper_pfmul,
3011 [0xb6] = gen_helper_movq, /* pfrcpit2 */
3012 [0xb7] = gen_helper_pmulhrw_mmx,
3013 [0xbb] = gen_helper_pswapd,
3014 [0xbf] = gen_helper_pavgb_mmx /* pavgusb */
3017 struct SSEOpHelper_epp {
3018 SSEFunc_0_epp op[2];
3019 uint32_t ext_mask;
3022 struct SSEOpHelper_eppi {
3023 SSEFunc_0_eppi op[2];
3024 uint32_t ext_mask;
3027 #define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
3028 #define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
3029 #define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
3030 #define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
3032 static const struct SSEOpHelper_epp sse_op_table6[256] = {
3033 [0x00] = SSSE3_OP(pshufb),
3034 [0x01] = SSSE3_OP(phaddw),
3035 [0x02] = SSSE3_OP(phaddd),
3036 [0x03] = SSSE3_OP(phaddsw),
3037 [0x04] = SSSE3_OP(pmaddubsw),
3038 [0x05] = SSSE3_OP(phsubw),
3039 [0x06] = SSSE3_OP(phsubd),
3040 [0x07] = SSSE3_OP(phsubsw),
3041 [0x08] = SSSE3_OP(psignb),
3042 [0x09] = SSSE3_OP(psignw),
3043 [0x0a] = SSSE3_OP(psignd),
3044 [0x0b] = SSSE3_OP(pmulhrsw),
3045 [0x10] = SSE41_OP(pblendvb),
3046 [0x14] = SSE41_OP(blendvps),
3047 [0x15] = SSE41_OP(blendvpd),
3048 [0x17] = SSE41_OP(ptest),
3049 [0x1c] = SSSE3_OP(pabsb),
3050 [0x1d] = SSSE3_OP(pabsw),
3051 [0x1e] = SSSE3_OP(pabsd),
3052 [0x20] = SSE41_OP(pmovsxbw),
3053 [0x21] = SSE41_OP(pmovsxbd),
3054 [0x22] = SSE41_OP(pmovsxbq),
3055 [0x23] = SSE41_OP(pmovsxwd),
3056 [0x24] = SSE41_OP(pmovsxwq),
3057 [0x25] = SSE41_OP(pmovsxdq),
3058 [0x28] = SSE41_OP(pmuldq),
3059 [0x29] = SSE41_OP(pcmpeqq),
3060 [0x2a] = SSE41_SPECIAL, /* movntqda */
3061 [0x2b] = SSE41_OP(packusdw),
3062 [0x30] = SSE41_OP(pmovzxbw),
3063 [0x31] = SSE41_OP(pmovzxbd),
3064 [0x32] = SSE41_OP(pmovzxbq),
3065 [0x33] = SSE41_OP(pmovzxwd),
3066 [0x34] = SSE41_OP(pmovzxwq),
3067 [0x35] = SSE41_OP(pmovzxdq),
3068 [0x37] = SSE42_OP(pcmpgtq),
3069 [0x38] = SSE41_OP(pminsb),
3070 [0x39] = SSE41_OP(pminsd),
3071 [0x3a] = SSE41_OP(pminuw),
3072 [0x3b] = SSE41_OP(pminud),
3073 [0x3c] = SSE41_OP(pmaxsb),
3074 [0x3d] = SSE41_OP(pmaxsd),
3075 [0x3e] = SSE41_OP(pmaxuw),
3076 [0x3f] = SSE41_OP(pmaxud),
3077 [0x40] = SSE41_OP(pmulld),
3078 [0x41] = SSE41_OP(phminposuw),
3081 static const struct SSEOpHelper_eppi sse_op_table7[256] = {
3082 [0x08] = SSE41_OP(roundps),
3083 [0x09] = SSE41_OP(roundpd),
3084 [0x0a] = SSE41_OP(roundss),
3085 [0x0b] = SSE41_OP(roundsd),
3086 [0x0c] = SSE41_OP(blendps),
3087 [0x0d] = SSE41_OP(blendpd),
3088 [0x0e] = SSE41_OP(pblendw),
3089 [0x0f] = SSSE3_OP(palignr),
3090 [0x14] = SSE41_SPECIAL, /* pextrb */
3091 [0x15] = SSE41_SPECIAL, /* pextrw */
3092 [0x16] = SSE41_SPECIAL, /* pextrd/pextrq */
3093 [0x17] = SSE41_SPECIAL, /* extractps */
3094 [0x20] = SSE41_SPECIAL, /* pinsrb */
3095 [0x21] = SSE41_SPECIAL, /* insertps */
3096 [0x22] = SSE41_SPECIAL, /* pinsrd/pinsrq */
3097 [0x40] = SSE41_OP(dpps),
3098 [0x41] = SSE41_OP(dppd),
3099 [0x42] = SSE41_OP(mpsadbw),
3100 [0x60] = SSE42_OP(pcmpestrm),
3101 [0x61] = SSE42_OP(pcmpestri),
3102 [0x62] = SSE42_OP(pcmpistrm),
3103 [0x63] = SSE42_OP(pcmpistri),
3106 static void gen_sse(CPUX86State *env, DisasContext *s, int b,
3107 target_ulong pc_start, int rex_r)
3109 int b1, op1_offset, op2_offset, is_xmm, val, ot;
3110 int modrm, mod, rm, reg, reg_addr, offset_addr;
3111 SSEFunc_0_epp sse_fn_epp;
3112 SSEFunc_0_eppi sse_fn_eppi;
3113 SSEFunc_0_ppi sse_fn_ppi;
3114 SSEFunc_0_eppt sse_fn_eppt;
3116 b &= 0xff;
3117 if (s->prefix & PREFIX_DATA)
3118 b1 = 1;
3119 else if (s->prefix & PREFIX_REPZ)
3120 b1 = 2;
3121 else if (s->prefix & PREFIX_REPNZ)
3122 b1 = 3;
3123 else
3124 b1 = 0;
3125 sse_fn_epp = sse_op_table1[b][b1];
3126 if (!sse_fn_epp) {
3127 goto illegal_op;
3129 if ((b <= 0x5f && b >= 0x10) || b == 0xc6 || b == 0xc2) {
3130 is_xmm = 1;
3131 } else {
3132 if (b1 == 0) {
3133 /* MMX case */
3134 is_xmm = 0;
3135 } else {
3136 is_xmm = 1;
3139 /* simple MMX/SSE operation */
3140 if (s->flags & HF_TS_MASK) {
3141 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
3142 return;
3144 if (s->flags & HF_EM_MASK) {
3145 illegal_op:
3146 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
3147 return;
3149 if (is_xmm && !(s->flags & HF_OSFXSR_MASK))
3150 if ((b != 0x38 && b != 0x3a) || (s->prefix & PREFIX_DATA))
3151 goto illegal_op;
3152 if (b == 0x0e) {
3153 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
3154 goto illegal_op;
3155 /* femms */
3156 gen_helper_emms(cpu_env);
3157 return;
3159 if (b == 0x77) {
3160 /* emms */
3161 gen_helper_emms(cpu_env);
3162 return;
3164 /* prepare MMX state (XXX: optimize by storing fptt and fptags in
3165 the static cpu state) */
3166 if (!is_xmm) {
3167 gen_helper_enter_mmx(cpu_env);
3170 modrm = cpu_ldub_code(env, s->pc++);
3171 reg = ((modrm >> 3) & 7);
3172 if (is_xmm)
3173 reg |= rex_r;
3174 mod = (modrm >> 6) & 3;
3175 if (sse_fn_epp == SSE_SPECIAL) {
3176 b |= (b1 << 8);
3177 switch(b) {
3178 case 0x0e7: /* movntq */
3179 if (mod == 3)
3180 goto illegal_op;
3181 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3182 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
3183 break;
3184 case 0x1e7: /* movntdq */
3185 case 0x02b: /* movntps */
3186 case 0x12b: /* movntps */
3187 if (mod == 3)
3188 goto illegal_op;
3189 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3190 gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3191 break;
3192 case 0x3f0: /* lddqu */
3193 if (mod == 3)
3194 goto illegal_op;
3195 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3196 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3197 break;
3198 case 0x22b: /* movntss */
3199 case 0x32b: /* movntsd */
3200 if (mod == 3)
3201 goto illegal_op;
3202 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3203 if (b1 & 1) {
3204 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,
3205 xmm_regs[reg]));
3206 } else {
3207 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3208 xmm_regs[reg].XMM_L(0)));
3209 gen_op_st_T0_A0(OT_LONG + s->mem_index);
3211 break;
3212 case 0x6e: /* movd mm, ea */
3213 #ifdef TARGET_X86_64
3214 if (s->dflag == 2) {
3215 gen_ldst_modrm(env, s, modrm, OT_QUAD, OR_TMP0, 0);
3216 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,fpregs[reg].mmx));
3217 } else
3218 #endif
3220 gen_ldst_modrm(env, s, modrm, OT_LONG, OR_TMP0, 0);
3221 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3222 offsetof(CPUX86State,fpregs[reg].mmx));
3223 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3224 gen_helper_movl_mm_T0_mmx(cpu_ptr0, cpu_tmp2_i32);
3226 break;
3227 case 0x16e: /* movd xmm, ea */
3228 #ifdef TARGET_X86_64
3229 if (s->dflag == 2) {
3230 gen_ldst_modrm(env, s, modrm, OT_QUAD, OR_TMP0, 0);
3231 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3232 offsetof(CPUX86State,xmm_regs[reg]));
3233 gen_helper_movq_mm_T0_xmm(cpu_ptr0, cpu_T[0]);
3234 } else
3235 #endif
3237 gen_ldst_modrm(env, s, modrm, OT_LONG, OR_TMP0, 0);
3238 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3239 offsetof(CPUX86State,xmm_regs[reg]));
3240 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3241 gen_helper_movl_mm_T0_xmm(cpu_ptr0, cpu_tmp2_i32);
3243 break;
3244 case 0x6f: /* movq mm, ea */
3245 if (mod != 3) {
3246 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3247 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
3248 } else {
3249 rm = (modrm & 7);
3250 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
3251 offsetof(CPUX86State,fpregs[rm].mmx));
3252 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
3253 offsetof(CPUX86State,fpregs[reg].mmx));
3255 break;
3256 case 0x010: /* movups */
3257 case 0x110: /* movupd */
3258 case 0x028: /* movaps */
3259 case 0x128: /* movapd */
3260 case 0x16f: /* movdqa xmm, ea */
3261 case 0x26f: /* movdqu xmm, ea */
3262 if (mod != 3) {
3263 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3264 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3265 } else {
3266 rm = (modrm & 7) | REX_B(s);
3267 gen_op_movo(offsetof(CPUX86State,xmm_regs[reg]),
3268 offsetof(CPUX86State,xmm_regs[rm]));
3270 break;
3271 case 0x210: /* movss xmm, ea */
3272 if (mod != 3) {
3273 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3274 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
3275 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3276 gen_op_movl_T0_0();
3277 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3278 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3279 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3280 } else {
3281 rm = (modrm & 7) | REX_B(s);
3282 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3283 offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3285 break;
3286 case 0x310: /* movsd xmm, ea */
3287 if (mod != 3) {
3288 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3289 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3290 gen_op_movl_T0_0();
3291 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3292 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3293 } else {
3294 rm = (modrm & 7) | REX_B(s);
3295 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3296 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3298 break;
3299 case 0x012: /* movlps */
3300 case 0x112: /* movlpd */
3301 if (mod != 3) {
3302 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3303 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3304 } else {
3305 /* movhlps */
3306 rm = (modrm & 7) | REX_B(s);
3307 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3308 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3310 break;
3311 case 0x212: /* movsldup */
3312 if (mod != 3) {
3313 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3314 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3315 } else {
3316 rm = (modrm & 7) | REX_B(s);
3317 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3318 offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3319 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3320 offsetof(CPUX86State,xmm_regs[rm].XMM_L(2)));
3322 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3323 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3324 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3325 offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3326 break;
3327 case 0x312: /* movddup */
3328 if (mod != 3) {
3329 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3330 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3331 } else {
3332 rm = (modrm & 7) | REX_B(s);
3333 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3334 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3336 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
3337 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3338 break;
3339 case 0x016: /* movhps */
3340 case 0x116: /* movhpd */
3341 if (mod != 3) {
3342 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3343 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3344 } else {
3345 /* movlhps */
3346 rm = (modrm & 7) | REX_B(s);
3347 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
3348 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3350 break;
3351 case 0x216: /* movshdup */
3352 if (mod != 3) {
3353 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3354 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3355 } else {
3356 rm = (modrm & 7) | REX_B(s);
3357 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3358 offsetof(CPUX86State,xmm_regs[rm].XMM_L(1)));
3359 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3360 offsetof(CPUX86State,xmm_regs[rm].XMM_L(3)));
3362 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3363 offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3364 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3365 offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3366 break;
3367 case 0x178:
3368 case 0x378:
3370 int bit_index, field_length;
3372 if (b1 == 1 && reg != 0)
3373 goto illegal_op;
3374 field_length = cpu_ldub_code(env, s->pc++) & 0x3F;
3375 bit_index = cpu_ldub_code(env, s->pc++) & 0x3F;
3376 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3377 offsetof(CPUX86State,xmm_regs[reg]));
3378 if (b1 == 1)
3379 gen_helper_extrq_i(cpu_env, cpu_ptr0,
3380 tcg_const_i32(bit_index),
3381 tcg_const_i32(field_length));
3382 else
3383 gen_helper_insertq_i(cpu_env, cpu_ptr0,
3384 tcg_const_i32(bit_index),
3385 tcg_const_i32(field_length));
3387 break;
3388 case 0x7e: /* movd ea, mm */
3389 #ifdef TARGET_X86_64
3390 if (s->dflag == 2) {
3391 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3392 offsetof(CPUX86State,fpregs[reg].mmx));
3393 gen_ldst_modrm(env, s, modrm, OT_QUAD, OR_TMP0, 1);
3394 } else
3395 #endif
3397 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3398 offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0)));
3399 gen_ldst_modrm(env, s, modrm, OT_LONG, OR_TMP0, 1);
3401 break;
3402 case 0x17e: /* movd ea, xmm */
3403 #ifdef TARGET_X86_64
3404 if (s->dflag == 2) {
3405 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3406 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3407 gen_ldst_modrm(env, s, modrm, OT_QUAD, OR_TMP0, 1);
3408 } else
3409 #endif
3411 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3412 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3413 gen_ldst_modrm(env, s, modrm, OT_LONG, OR_TMP0, 1);
3415 break;
3416 case 0x27e: /* movq xmm, ea */
3417 if (mod != 3) {
3418 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3419 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3420 } else {
3421 rm = (modrm & 7) | REX_B(s);
3422 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3423 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3425 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3426 break;
3427 case 0x7f: /* movq ea, mm */
3428 if (mod != 3) {
3429 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3430 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
3431 } else {
3432 rm = (modrm & 7);
3433 gen_op_movq(offsetof(CPUX86State,fpregs[rm].mmx),
3434 offsetof(CPUX86State,fpregs[reg].mmx));
3436 break;
3437 case 0x011: /* movups */
3438 case 0x111: /* movupd */
3439 case 0x029: /* movaps */
3440 case 0x129: /* movapd */
3441 case 0x17f: /* movdqa ea, xmm */
3442 case 0x27f: /* movdqu ea, xmm */
3443 if (mod != 3) {
3444 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3445 gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3446 } else {
3447 rm = (modrm & 7) | REX_B(s);
3448 gen_op_movo(offsetof(CPUX86State,xmm_regs[rm]),
3449 offsetof(CPUX86State,xmm_regs[reg]));
3451 break;
3452 case 0x211: /* movss ea, xmm */
3453 if (mod != 3) {
3454 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3455 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3456 gen_op_st_T0_A0(OT_LONG + s->mem_index);
3457 } else {
3458 rm = (modrm & 7) | REX_B(s);
3459 gen_op_movl(offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)),
3460 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3462 break;
3463 case 0x311: /* movsd ea, xmm */
3464 if (mod != 3) {
3465 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3466 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3467 } else {
3468 rm = (modrm & 7) | REX_B(s);
3469 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3470 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3472 break;
3473 case 0x013: /* movlps */
3474 case 0x113: /* movlpd */
3475 if (mod != 3) {
3476 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3477 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3478 } else {
3479 goto illegal_op;
3481 break;
3482 case 0x017: /* movhps */
3483 case 0x117: /* movhpd */
3484 if (mod != 3) {
3485 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3486 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3487 } else {
3488 goto illegal_op;
3490 break;
3491 case 0x71: /* shift mm, im */
3492 case 0x72:
3493 case 0x73:
3494 case 0x171: /* shift xmm, im */
3495 case 0x172:
3496 case 0x173:
3497 if (b1 >= 2) {
3498 goto illegal_op;
3500 val = cpu_ldub_code(env, s->pc++);
3501 if (is_xmm) {
3502 gen_op_movl_T0_im(val);
3503 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
3504 gen_op_movl_T0_0();
3505 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(1)));
3506 op1_offset = offsetof(CPUX86State,xmm_t0);
3507 } else {
3508 gen_op_movl_T0_im(val);
3509 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(0)));
3510 gen_op_movl_T0_0();
3511 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(1)));
3512 op1_offset = offsetof(CPUX86State,mmx_t0);
3514 sse_fn_epp = sse_op_table2[((b - 1) & 3) * 8 +
3515 (((modrm >> 3)) & 7)][b1];
3516 if (!sse_fn_epp) {
3517 goto illegal_op;
3519 if (is_xmm) {
3520 rm = (modrm & 7) | REX_B(s);
3521 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3522 } else {
3523 rm = (modrm & 7);
3524 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3526 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3527 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op1_offset);
3528 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
3529 break;
3530 case 0x050: /* movmskps */
3531 rm = (modrm & 7) | REX_B(s);
3532 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3533 offsetof(CPUX86State,xmm_regs[rm]));
3534 gen_helper_movmskps(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3535 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3536 gen_op_mov_reg_T0(OT_LONG, reg);
3537 break;
3538 case 0x150: /* movmskpd */
3539 rm = (modrm & 7) | REX_B(s);
3540 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3541 offsetof(CPUX86State,xmm_regs[rm]));
3542 gen_helper_movmskpd(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3543 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3544 gen_op_mov_reg_T0(OT_LONG, reg);
3545 break;
3546 case 0x02a: /* cvtpi2ps */
3547 case 0x12a: /* cvtpi2pd */
3548 gen_helper_enter_mmx(cpu_env);
3549 if (mod != 3) {
3550 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3551 op2_offset = offsetof(CPUX86State,mmx_t0);
3552 gen_ldq_env_A0(s->mem_index, op2_offset);
3553 } else {
3554 rm = (modrm & 7);
3555 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3557 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3558 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3559 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3560 switch(b >> 8) {
3561 case 0x0:
3562 gen_helper_cvtpi2ps(cpu_env, cpu_ptr0, cpu_ptr1);
3563 break;
3564 default:
3565 case 0x1:
3566 gen_helper_cvtpi2pd(cpu_env, cpu_ptr0, cpu_ptr1);
3567 break;
3569 break;
3570 case 0x22a: /* cvtsi2ss */
3571 case 0x32a: /* cvtsi2sd */
3572 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3573 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3574 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3575 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3576 if (ot == OT_LONG) {
3577 SSEFunc_0_epi sse_fn_epi = sse_op_table3ai[(b >> 8) & 1];
3578 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3579 sse_fn_epi(cpu_env, cpu_ptr0, cpu_tmp2_i32);
3580 } else {
3581 #ifdef TARGET_X86_64
3582 SSEFunc_0_epl sse_fn_epl = sse_op_table3aq[(b >> 8) & 1];
3583 sse_fn_epl(cpu_env, cpu_ptr0, cpu_T[0]);
3584 #else
3585 goto illegal_op;
3586 #endif
3588 break;
3589 case 0x02c: /* cvttps2pi */
3590 case 0x12c: /* cvttpd2pi */
3591 case 0x02d: /* cvtps2pi */
3592 case 0x12d: /* cvtpd2pi */
3593 gen_helper_enter_mmx(cpu_env);
3594 if (mod != 3) {
3595 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3596 op2_offset = offsetof(CPUX86State,xmm_t0);
3597 gen_ldo_env_A0(s->mem_index, op2_offset);
3598 } else {
3599 rm = (modrm & 7) | REX_B(s);
3600 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3602 op1_offset = offsetof(CPUX86State,fpregs[reg & 7].mmx);
3603 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3604 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3605 switch(b) {
3606 case 0x02c:
3607 gen_helper_cvttps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3608 break;
3609 case 0x12c:
3610 gen_helper_cvttpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3611 break;
3612 case 0x02d:
3613 gen_helper_cvtps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3614 break;
3615 case 0x12d:
3616 gen_helper_cvtpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3617 break;
3619 break;
3620 case 0x22c: /* cvttss2si */
3621 case 0x32c: /* cvttsd2si */
3622 case 0x22d: /* cvtss2si */
3623 case 0x32d: /* cvtsd2si */
3624 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3625 if (mod != 3) {
3626 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3627 if ((b >> 8) & 1) {
3628 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_t0.XMM_Q(0)));
3629 } else {
3630 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
3631 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
3633 op2_offset = offsetof(CPUX86State,xmm_t0);
3634 } else {
3635 rm = (modrm & 7) | REX_B(s);
3636 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3638 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3639 if (ot == OT_LONG) {
3640 SSEFunc_i_ep sse_fn_i_ep =
3641 sse_op_table3bi[((b >> 7) & 2) | (b & 1)];
3642 sse_fn_i_ep(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3643 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3644 } else {
3645 #ifdef TARGET_X86_64
3646 SSEFunc_l_ep sse_fn_l_ep =
3647 sse_op_table3bq[((b >> 7) & 2) | (b & 1)];
3648 sse_fn_l_ep(cpu_T[0], cpu_env, cpu_ptr0);
3649 #else
3650 goto illegal_op;
3651 #endif
3653 gen_op_mov_reg_T0(ot, reg);
3654 break;
3655 case 0xc4: /* pinsrw */
3656 case 0x1c4:
3657 s->rip_offset = 1;
3658 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
3659 val = cpu_ldub_code(env, s->pc++);
3660 if (b1) {
3661 val &= 7;
3662 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3663 offsetof(CPUX86State,xmm_regs[reg].XMM_W(val)));
3664 } else {
3665 val &= 3;
3666 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3667 offsetof(CPUX86State,fpregs[reg].mmx.MMX_W(val)));
3669 break;
3670 case 0xc5: /* pextrw */
3671 case 0x1c5:
3672 if (mod != 3)
3673 goto illegal_op;
3674 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3675 val = cpu_ldub_code(env, s->pc++);
3676 if (b1) {
3677 val &= 7;
3678 rm = (modrm & 7) | REX_B(s);
3679 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3680 offsetof(CPUX86State,xmm_regs[rm].XMM_W(val)));
3681 } else {
3682 val &= 3;
3683 rm = (modrm & 7);
3684 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3685 offsetof(CPUX86State,fpregs[rm].mmx.MMX_W(val)));
3687 reg = ((modrm >> 3) & 7) | rex_r;
3688 gen_op_mov_reg_T0(ot, reg);
3689 break;
3690 case 0x1d6: /* movq ea, xmm */
3691 if (mod != 3) {
3692 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3693 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3694 } else {
3695 rm = (modrm & 7) | REX_B(s);
3696 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3697 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3698 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3700 break;
3701 case 0x2d6: /* movq2dq */
3702 gen_helper_enter_mmx(cpu_env);
3703 rm = (modrm & 7);
3704 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3705 offsetof(CPUX86State,fpregs[rm].mmx));
3706 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3707 break;
3708 case 0x3d6: /* movdq2q */
3709 gen_helper_enter_mmx(cpu_env);
3710 rm = (modrm & 7) | REX_B(s);
3711 gen_op_movq(offsetof(CPUX86State,fpregs[reg & 7].mmx),
3712 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3713 break;
3714 case 0xd7: /* pmovmskb */
3715 case 0x1d7:
3716 if (mod != 3)
3717 goto illegal_op;
3718 if (b1) {
3719 rm = (modrm & 7) | REX_B(s);
3720 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[rm]));
3721 gen_helper_pmovmskb_xmm(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3722 } else {
3723 rm = (modrm & 7);
3724 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,fpregs[rm].mmx));
3725 gen_helper_pmovmskb_mmx(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3727 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3728 reg = ((modrm >> 3) & 7) | rex_r;
3729 gen_op_mov_reg_T0(OT_LONG, reg);
3730 break;
3731 case 0x138:
3732 if (s->prefix & PREFIX_REPNZ)
3733 goto crc32;
3734 case 0x038:
3735 b = modrm;
3736 modrm = cpu_ldub_code(env, s->pc++);
3737 rm = modrm & 7;
3738 reg = ((modrm >> 3) & 7) | rex_r;
3739 mod = (modrm >> 6) & 3;
3740 if (b1 >= 2) {
3741 goto illegal_op;
3744 sse_fn_epp = sse_op_table6[b].op[b1];
3745 if (!sse_fn_epp) {
3746 goto illegal_op;
3748 if (!(s->cpuid_ext_features & sse_op_table6[b].ext_mask))
3749 goto illegal_op;
3751 if (b1) {
3752 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3753 if (mod == 3) {
3754 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
3755 } else {
3756 op2_offset = offsetof(CPUX86State,xmm_t0);
3757 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3758 switch (b) {
3759 case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
3760 case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
3761 case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
3762 gen_ldq_env_A0(s->mem_index, op2_offset +
3763 offsetof(XMMReg, XMM_Q(0)));
3764 break;
3765 case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
3766 case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
3767 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0,
3768 (s->mem_index >> 2) - 1);
3769 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);
3770 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, op2_offset +
3771 offsetof(XMMReg, XMM_L(0)));
3772 break;
3773 case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
3774 tcg_gen_qemu_ld16u(cpu_tmp0, cpu_A0,
3775 (s->mem_index >> 2) - 1);
3776 tcg_gen_st16_tl(cpu_tmp0, cpu_env, op2_offset +
3777 offsetof(XMMReg, XMM_W(0)));
3778 break;
3779 case 0x2a: /* movntqda */
3780 gen_ldo_env_A0(s->mem_index, op1_offset);
3781 return;
3782 default:
3783 gen_ldo_env_A0(s->mem_index, op2_offset);
3786 } else {
3787 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
3788 if (mod == 3) {
3789 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3790 } else {
3791 op2_offset = offsetof(CPUX86State,mmx_t0);
3792 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3793 gen_ldq_env_A0(s->mem_index, op2_offset);
3796 if (sse_fn_epp == SSE_SPECIAL) {
3797 goto illegal_op;
3800 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3801 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3802 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
3804 if (b == 0x17) {
3805 set_cc_op(s, CC_OP_EFLAGS);
3807 break;
3808 case 0x338: /* crc32 */
3809 crc32:
3810 b = modrm;
3811 modrm = cpu_ldub_code(env, s->pc++);
3812 reg = ((modrm >> 3) & 7) | rex_r;
3814 if (b != 0xf0 && b != 0xf1)
3815 goto illegal_op;
3816 if (!(s->cpuid_ext_features & CPUID_EXT_SSE42))
3817 goto illegal_op;
3819 if (b == 0xf0)
3820 ot = OT_BYTE;
3821 else if (b == 0xf1 && s->dflag != 2)
3822 if (s->prefix & PREFIX_DATA)
3823 ot = OT_WORD;
3824 else
3825 ot = OT_LONG;
3826 else
3827 ot = OT_QUAD;
3829 gen_op_mov_TN_reg(OT_LONG, 0, reg);
3830 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3831 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3832 gen_helper_crc32(cpu_T[0], cpu_tmp2_i32,
3833 cpu_T[0], tcg_const_i32(8 << ot));
3835 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3836 gen_op_mov_reg_T0(ot, reg);
3837 break;
3838 case 0x03a:
3839 case 0x13a:
3840 b = modrm;
3841 modrm = cpu_ldub_code(env, s->pc++);
3842 rm = modrm & 7;
3843 reg = ((modrm >> 3) & 7) | rex_r;
3844 mod = (modrm >> 6) & 3;
3845 if (b1 >= 2) {
3846 goto illegal_op;
3849 sse_fn_eppi = sse_op_table7[b].op[b1];
3850 if (!sse_fn_eppi) {
3851 goto illegal_op;
3853 if (!(s->cpuid_ext_features & sse_op_table7[b].ext_mask))
3854 goto illegal_op;
3856 if (sse_fn_eppi == SSE_SPECIAL) {
3857 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3858 rm = (modrm & 7) | REX_B(s);
3859 if (mod != 3)
3860 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3861 reg = ((modrm >> 3) & 7) | rex_r;
3862 val = cpu_ldub_code(env, s->pc++);
3863 switch (b) {
3864 case 0x14: /* pextrb */
3865 tcg_gen_ld8u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3866 xmm_regs[reg].XMM_B(val & 15)));
3867 if (mod == 3)
3868 gen_op_mov_reg_T0(ot, rm);
3869 else
3870 tcg_gen_qemu_st8(cpu_T[0], cpu_A0,
3871 (s->mem_index >> 2) - 1);
3872 break;
3873 case 0x15: /* pextrw */
3874 tcg_gen_ld16u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3875 xmm_regs[reg].XMM_W(val & 7)));
3876 if (mod == 3)
3877 gen_op_mov_reg_T0(ot, rm);
3878 else
3879 tcg_gen_qemu_st16(cpu_T[0], cpu_A0,
3880 (s->mem_index >> 2) - 1);
3881 break;
3882 case 0x16:
3883 if (ot == OT_LONG) { /* pextrd */
3884 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
3885 offsetof(CPUX86State,
3886 xmm_regs[reg].XMM_L(val & 3)));
3887 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3888 if (mod == 3)
3889 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
3890 else
3891 tcg_gen_qemu_st32(cpu_T[0], cpu_A0,
3892 (s->mem_index >> 2) - 1);
3893 } else { /* pextrq */
3894 #ifdef TARGET_X86_64
3895 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
3896 offsetof(CPUX86State,
3897 xmm_regs[reg].XMM_Q(val & 1)));
3898 if (mod == 3)
3899 gen_op_mov_reg_v(ot, rm, cpu_tmp1_i64);
3900 else
3901 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
3902 (s->mem_index >> 2) - 1);
3903 #else
3904 goto illegal_op;
3905 #endif
3907 break;
3908 case 0x17: /* extractps */
3909 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3910 xmm_regs[reg].XMM_L(val & 3)));
3911 if (mod == 3)
3912 gen_op_mov_reg_T0(ot, rm);
3913 else
3914 tcg_gen_qemu_st32(cpu_T[0], cpu_A0,
3915 (s->mem_index >> 2) - 1);
3916 break;
3917 case 0x20: /* pinsrb */
3918 if (mod == 3)
3919 gen_op_mov_TN_reg(OT_LONG, 0, rm);
3920 else
3921 tcg_gen_qemu_ld8u(cpu_tmp0, cpu_A0,
3922 (s->mem_index >> 2) - 1);
3923 tcg_gen_st8_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State,
3924 xmm_regs[reg].XMM_B(val & 15)));
3925 break;
3926 case 0x21: /* insertps */
3927 if (mod == 3) {
3928 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
3929 offsetof(CPUX86State,xmm_regs[rm]
3930 .XMM_L((val >> 6) & 3)));
3931 } else {
3932 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0,
3933 (s->mem_index >> 2) - 1);
3934 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);
3936 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
3937 offsetof(CPUX86State,xmm_regs[reg]
3938 .XMM_L((val >> 4) & 3)));
3939 if ((val >> 0) & 1)
3940 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3941 cpu_env, offsetof(CPUX86State,
3942 xmm_regs[reg].XMM_L(0)));
3943 if ((val >> 1) & 1)
3944 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3945 cpu_env, offsetof(CPUX86State,
3946 xmm_regs[reg].XMM_L(1)));
3947 if ((val >> 2) & 1)
3948 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3949 cpu_env, offsetof(CPUX86State,
3950 xmm_regs[reg].XMM_L(2)));
3951 if ((val >> 3) & 1)
3952 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3953 cpu_env, offsetof(CPUX86State,
3954 xmm_regs[reg].XMM_L(3)));
3955 break;
3956 case 0x22:
3957 if (ot == OT_LONG) { /* pinsrd */
3958 if (mod == 3)
3959 gen_op_mov_v_reg(ot, cpu_tmp0, rm);
3960 else
3961 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0,
3962 (s->mem_index >> 2) - 1);
3963 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);
3964 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
3965 offsetof(CPUX86State,
3966 xmm_regs[reg].XMM_L(val & 3)));
3967 } else { /* pinsrq */
3968 #ifdef TARGET_X86_64
3969 if (mod == 3)
3970 gen_op_mov_v_reg(ot, cpu_tmp1_i64, rm);
3971 else
3972 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
3973 (s->mem_index >> 2) - 1);
3974 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
3975 offsetof(CPUX86State,
3976 xmm_regs[reg].XMM_Q(val & 1)));
3977 #else
3978 goto illegal_op;
3979 #endif
3981 break;
3983 return;
3986 if (b1) {
3987 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3988 if (mod == 3) {
3989 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
3990 } else {
3991 op2_offset = offsetof(CPUX86State,xmm_t0);
3992 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
3993 gen_ldo_env_A0(s->mem_index, op2_offset);
3995 } else {
3996 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
3997 if (mod == 3) {
3998 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3999 } else {
4000 op2_offset = offsetof(CPUX86State,mmx_t0);
4001 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4002 gen_ldq_env_A0(s->mem_index, op2_offset);
4005 val = cpu_ldub_code(env, s->pc++);
4007 if ((b & 0xfc) == 0x60) { /* pcmpXstrX */
4008 set_cc_op(s, CC_OP_EFLAGS);
4010 if (s->dflag == 2)
4011 /* The helper must use entire 64-bit gp registers */
4012 val |= 1 << 8;
4015 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4016 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4017 sse_fn_eppi(cpu_env, cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
4018 break;
4019 default:
4020 goto illegal_op;
4022 } else {
4023 /* generic MMX or SSE operation */
4024 switch(b) {
4025 case 0x70: /* pshufx insn */
4026 case 0xc6: /* pshufx insn */
4027 case 0xc2: /* compare insns */
4028 s->rip_offset = 1;
4029 break;
4030 default:
4031 break;
4033 if (is_xmm) {
4034 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
4035 if (mod != 3) {
4036 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4037 op2_offset = offsetof(CPUX86State,xmm_t0);
4038 if (b1 >= 2 && ((b >= 0x50 && b <= 0x5f && b != 0x5b) ||
4039 b == 0xc2)) {
4040 /* specific case for SSE single instructions */
4041 if (b1 == 2) {
4042 /* 32 bit access */
4043 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
4044 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
4045 } else {
4046 /* 64 bit access */
4047 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_t0.XMM_D(0)));
4049 } else {
4050 gen_ldo_env_A0(s->mem_index, op2_offset);
4052 } else {
4053 rm = (modrm & 7) | REX_B(s);
4054 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
4056 } else {
4057 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4058 if (mod != 3) {
4059 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4060 op2_offset = offsetof(CPUX86State,mmx_t0);
4061 gen_ldq_env_A0(s->mem_index, op2_offset);
4062 } else {
4063 rm = (modrm & 7);
4064 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4067 switch(b) {
4068 case 0x0f: /* 3DNow! data insns */
4069 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
4070 goto illegal_op;
4071 val = cpu_ldub_code(env, s->pc++);
4072 sse_fn_epp = sse_op_table5[val];
4073 if (!sse_fn_epp) {
4074 goto illegal_op;
4076 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4077 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4078 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
4079 break;
4080 case 0x70: /* pshufx insn */
4081 case 0xc6: /* pshufx insn */
4082 val = cpu_ldub_code(env, s->pc++);
4083 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4084 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4085 /* XXX: introduce a new table? */
4086 sse_fn_ppi = (SSEFunc_0_ppi)sse_fn_epp;
4087 sse_fn_ppi(cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
4088 break;
4089 case 0xc2:
4090 /* compare insns */
4091 val = cpu_ldub_code(env, s->pc++);
4092 if (val >= 8)
4093 goto illegal_op;
4094 sse_fn_epp = sse_op_table4[val][b1];
4096 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4097 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4098 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
4099 break;
4100 case 0xf7:
4101 /* maskmov : we must prepare A0 */
4102 if (mod != 3)
4103 goto illegal_op;
4104 #ifdef TARGET_X86_64
4105 if (s->aflag == 2) {
4106 gen_op_movq_A0_reg(R_EDI);
4107 } else
4108 #endif
4110 gen_op_movl_A0_reg(R_EDI);
4111 if (s->aflag == 0)
4112 gen_op_andl_A0_ffff();
4114 gen_add_A0_ds_seg(s);
4116 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4117 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4118 /* XXX: introduce a new table? */
4119 sse_fn_eppt = (SSEFunc_0_eppt)sse_fn_epp;
4120 sse_fn_eppt(cpu_env, cpu_ptr0, cpu_ptr1, cpu_A0);
4121 break;
4122 default:
4123 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4124 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4125 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
4126 break;
4128 if (b == 0x2e || b == 0x2f) {
4129 set_cc_op(s, CC_OP_EFLAGS);
4134 /* convert one instruction. s->is_jmp is set if the translation must
4135 be stopped. Return the next pc value */
4136 static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
4137 target_ulong pc_start)
4139 int b, prefixes, aflag, dflag;
4140 int shift, ot;
4141 int modrm, reg, rm, mod, reg_addr, op, opreg, offset_addr, val;
4142 target_ulong next_eip, tval;
4143 int rex_w, rex_r;
4145 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
4146 tcg_gen_debug_insn_start(pc_start);
4148 s->pc = pc_start;
4149 prefixes = 0;
4150 aflag = s->code32;
4151 dflag = s->code32;
4152 s->override = -1;
4153 rex_w = -1;
4154 rex_r = 0;
4155 #ifdef TARGET_X86_64
4156 s->rex_x = 0;
4157 s->rex_b = 0;
4158 x86_64_hregs = 0;
4159 #endif
4160 s->rip_offset = 0; /* for relative ip address */
4161 next_byte:
4162 b = cpu_ldub_code(env, s->pc);
4163 s->pc++;
4164 /* check prefixes */
4165 #ifdef TARGET_X86_64
4166 if (CODE64(s)) {
4167 switch (b) {
4168 case 0xf3:
4169 prefixes |= PREFIX_REPZ;
4170 goto next_byte;
4171 case 0xf2:
4172 prefixes |= PREFIX_REPNZ;
4173 goto next_byte;
4174 case 0xf0:
4175 prefixes |= PREFIX_LOCK;
4176 goto next_byte;
4177 case 0x2e:
4178 s->override = R_CS;
4179 goto next_byte;
4180 case 0x36:
4181 s->override = R_SS;
4182 goto next_byte;
4183 case 0x3e:
4184 s->override = R_DS;
4185 goto next_byte;
4186 case 0x26:
4187 s->override = R_ES;
4188 goto next_byte;
4189 case 0x64:
4190 s->override = R_FS;
4191 goto next_byte;
4192 case 0x65:
4193 s->override = R_GS;
4194 goto next_byte;
4195 case 0x66:
4196 prefixes |= PREFIX_DATA;
4197 goto next_byte;
4198 case 0x67:
4199 prefixes |= PREFIX_ADR;
4200 goto next_byte;
4201 case 0x40 ... 0x4f:
4202 /* REX prefix */
4203 rex_w = (b >> 3) & 1;
4204 rex_r = (b & 0x4) << 1;
4205 s->rex_x = (b & 0x2) << 2;
4206 REX_B(s) = (b & 0x1) << 3;
4207 x86_64_hregs = 1; /* select uniform byte register addressing */
4208 goto next_byte;
4210 if (rex_w == 1) {
4211 /* 0x66 is ignored if rex.w is set */
4212 dflag = 2;
4213 } else {
4214 if (prefixes & PREFIX_DATA)
4215 dflag ^= 1;
4217 if (!(prefixes & PREFIX_ADR))
4218 aflag = 2;
4219 } else
4220 #endif
4222 switch (b) {
4223 case 0xf3:
4224 prefixes |= PREFIX_REPZ;
4225 goto next_byte;
4226 case 0xf2:
4227 prefixes |= PREFIX_REPNZ;
4228 goto next_byte;
4229 case 0xf0:
4230 prefixes |= PREFIX_LOCK;
4231 goto next_byte;
4232 case 0x2e:
4233 s->override = R_CS;
4234 goto next_byte;
4235 case 0x36:
4236 s->override = R_SS;
4237 goto next_byte;
4238 case 0x3e:
4239 s->override = R_DS;
4240 goto next_byte;
4241 case 0x26:
4242 s->override = R_ES;
4243 goto next_byte;
4244 case 0x64:
4245 s->override = R_FS;
4246 goto next_byte;
4247 case 0x65:
4248 s->override = R_GS;
4249 goto next_byte;
4250 case 0x66:
4251 prefixes |= PREFIX_DATA;
4252 goto next_byte;
4253 case 0x67:
4254 prefixes |= PREFIX_ADR;
4255 goto next_byte;
4257 if (prefixes & PREFIX_DATA)
4258 dflag ^= 1;
4259 if (prefixes & PREFIX_ADR)
4260 aflag ^= 1;
4263 s->prefix = prefixes;
4264 s->aflag = aflag;
4265 s->dflag = dflag;
4267 /* lock generation */
4268 if (prefixes & PREFIX_LOCK)
4269 gen_helper_lock();
4271 /* now check op code */
4272 reswitch:
4273 switch(b) {
4274 case 0x0f:
4275 /**************************/
4276 /* extended op code */
4277 b = cpu_ldub_code(env, s->pc++) | 0x100;
4278 goto reswitch;
4280 /**************************/
4281 /* arith & logic */
4282 case 0x00 ... 0x05:
4283 case 0x08 ... 0x0d:
4284 case 0x10 ... 0x15:
4285 case 0x18 ... 0x1d:
4286 case 0x20 ... 0x25:
4287 case 0x28 ... 0x2d:
4288 case 0x30 ... 0x35:
4289 case 0x38 ... 0x3d:
4291 int op, f, val;
4292 op = (b >> 3) & 7;
4293 f = (b >> 1) & 3;
4295 if ((b & 1) == 0)
4296 ot = OT_BYTE;
4297 else
4298 ot = dflag + OT_WORD;
4300 switch(f) {
4301 case 0: /* OP Ev, Gv */
4302 modrm = cpu_ldub_code(env, s->pc++);
4303 reg = ((modrm >> 3) & 7) | rex_r;
4304 mod = (modrm >> 6) & 3;
4305 rm = (modrm & 7) | REX_B(s);
4306 if (mod != 3) {
4307 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4308 opreg = OR_TMP0;
4309 } else if (op == OP_XORL && rm == reg) {
4310 xor_zero:
4311 /* xor reg, reg optimisation */
4312 gen_op_movl_T0_0();
4313 set_cc_op(s, CC_OP_LOGICB + ot);
4314 gen_op_mov_reg_T0(ot, reg);
4315 gen_op_update1_cc();
4316 break;
4317 } else {
4318 opreg = rm;
4320 gen_op_mov_TN_reg(ot, 1, reg);
4321 gen_op(s, op, ot, opreg);
4322 break;
4323 case 1: /* OP Gv, Ev */
4324 modrm = cpu_ldub_code(env, s->pc++);
4325 mod = (modrm >> 6) & 3;
4326 reg = ((modrm >> 3) & 7) | rex_r;
4327 rm = (modrm & 7) | REX_B(s);
4328 if (mod != 3) {
4329 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4330 gen_op_ld_T1_A0(ot + s->mem_index);
4331 } else if (op == OP_XORL && rm == reg) {
4332 goto xor_zero;
4333 } else {
4334 gen_op_mov_TN_reg(ot, 1, rm);
4336 gen_op(s, op, ot, reg);
4337 break;
4338 case 2: /* OP A, Iv */
4339 val = insn_get(env, s, ot);
4340 gen_op_movl_T1_im(val);
4341 gen_op(s, op, ot, OR_EAX);
4342 break;
4345 break;
4347 case 0x82:
4348 if (CODE64(s))
4349 goto illegal_op;
4350 case 0x80: /* GRP1 */
4351 case 0x81:
4352 case 0x83:
4354 int val;
4356 if ((b & 1) == 0)
4357 ot = OT_BYTE;
4358 else
4359 ot = dflag + OT_WORD;
4361 modrm = cpu_ldub_code(env, s->pc++);
4362 mod = (modrm >> 6) & 3;
4363 rm = (modrm & 7) | REX_B(s);
4364 op = (modrm >> 3) & 7;
4366 if (mod != 3) {
4367 if (b == 0x83)
4368 s->rip_offset = 1;
4369 else
4370 s->rip_offset = insn_const_size(ot);
4371 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4372 opreg = OR_TMP0;
4373 } else {
4374 opreg = rm;
4377 switch(b) {
4378 default:
4379 case 0x80:
4380 case 0x81:
4381 case 0x82:
4382 val = insn_get(env, s, ot);
4383 break;
4384 case 0x83:
4385 val = (int8_t)insn_get(env, s, OT_BYTE);
4386 break;
4388 gen_op_movl_T1_im(val);
4389 gen_op(s, op, ot, opreg);
4391 break;
4393 /**************************/
4394 /* inc, dec, and other misc arith */
4395 case 0x40 ... 0x47: /* inc Gv */
4396 ot = dflag ? OT_LONG : OT_WORD;
4397 gen_inc(s, ot, OR_EAX + (b & 7), 1);
4398 break;
4399 case 0x48 ... 0x4f: /* dec Gv */
4400 ot = dflag ? OT_LONG : OT_WORD;
4401 gen_inc(s, ot, OR_EAX + (b & 7), -1);
4402 break;
4403 case 0xf6: /* GRP3 */
4404 case 0xf7:
4405 if ((b & 1) == 0)
4406 ot = OT_BYTE;
4407 else
4408 ot = dflag + OT_WORD;
4410 modrm = cpu_ldub_code(env, s->pc++);
4411 mod = (modrm >> 6) & 3;
4412 rm = (modrm & 7) | REX_B(s);
4413 op = (modrm >> 3) & 7;
4414 if (mod != 3) {
4415 if (op == 0)
4416 s->rip_offset = insn_const_size(ot);
4417 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4418 gen_op_ld_T0_A0(ot + s->mem_index);
4419 } else {
4420 gen_op_mov_TN_reg(ot, 0, rm);
4423 switch(op) {
4424 case 0: /* test */
4425 val = insn_get(env, s, ot);
4426 gen_op_movl_T1_im(val);
4427 gen_op_testl_T0_T1_cc();
4428 set_cc_op(s, CC_OP_LOGICB + ot);
4429 break;
4430 case 2: /* not */
4431 tcg_gen_not_tl(cpu_T[0], cpu_T[0]);
4432 if (mod != 3) {
4433 gen_op_st_T0_A0(ot + s->mem_index);
4434 } else {
4435 gen_op_mov_reg_T0(ot, rm);
4437 break;
4438 case 3: /* neg */
4439 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
4440 if (mod != 3) {
4441 gen_op_st_T0_A0(ot + s->mem_index);
4442 } else {
4443 gen_op_mov_reg_T0(ot, rm);
4445 gen_op_update_neg_cc();
4446 set_cc_op(s, CC_OP_SUBB + ot);
4447 break;
4448 case 4: /* mul */
4449 switch(ot) {
4450 case OT_BYTE:
4451 gen_op_mov_TN_reg(OT_BYTE, 1, R_EAX);
4452 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
4453 tcg_gen_ext8u_tl(cpu_T[1], cpu_T[1]);
4454 /* XXX: use 32 bit mul which could be faster */
4455 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4456 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4457 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4458 tcg_gen_andi_tl(cpu_cc_src, cpu_T[0], 0xff00);
4459 set_cc_op(s, CC_OP_MULB);
4460 break;
4461 case OT_WORD:
4462 gen_op_mov_TN_reg(OT_WORD, 1, R_EAX);
4463 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
4464 tcg_gen_ext16u_tl(cpu_T[1], cpu_T[1]);
4465 /* XXX: use 32 bit mul which could be faster */
4466 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4467 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4468 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4469 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
4470 gen_op_mov_reg_T0(OT_WORD, R_EDX);
4471 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4472 set_cc_op(s, CC_OP_MULW);
4473 break;
4474 default:
4475 case OT_LONG:
4476 #ifdef TARGET_X86_64
4477 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4478 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
4479 tcg_gen_ext32u_tl(cpu_T[1], cpu_T[1]);
4480 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4481 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4482 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4483 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 32);
4484 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4485 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4486 #else
4488 TCGv_i64 t0, t1;
4489 t0 = tcg_temp_new_i64();
4490 t1 = tcg_temp_new_i64();
4491 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4492 tcg_gen_extu_i32_i64(t0, cpu_T[0]);
4493 tcg_gen_extu_i32_i64(t1, cpu_T[1]);
4494 tcg_gen_mul_i64(t0, t0, t1);
4495 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4496 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4497 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4498 tcg_gen_shri_i64(t0, t0, 32);
4499 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4500 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4501 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4503 #endif
4504 set_cc_op(s, CC_OP_MULL);
4505 break;
4506 #ifdef TARGET_X86_64
4507 case OT_QUAD:
4508 gen_helper_mulq_EAX_T0(cpu_env, cpu_T[0]);
4509 set_cc_op(s, CC_OP_MULQ);
4510 break;
4511 #endif
4513 break;
4514 case 5: /* imul */
4515 switch(ot) {
4516 case OT_BYTE:
4517 gen_op_mov_TN_reg(OT_BYTE, 1, R_EAX);
4518 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
4519 tcg_gen_ext8s_tl(cpu_T[1], cpu_T[1]);
4520 /* XXX: use 32 bit mul which could be faster */
4521 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4522 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4523 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4524 tcg_gen_ext8s_tl(cpu_tmp0, cpu_T[0]);
4525 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4526 set_cc_op(s, CC_OP_MULB);
4527 break;
4528 case OT_WORD:
4529 gen_op_mov_TN_reg(OT_WORD, 1, R_EAX);
4530 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4531 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
4532 /* XXX: use 32 bit mul which could be faster */
4533 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4534 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4535 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4536 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
4537 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4538 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
4539 gen_op_mov_reg_T0(OT_WORD, R_EDX);
4540 set_cc_op(s, CC_OP_MULW);
4541 break;
4542 default:
4543 case OT_LONG:
4544 #ifdef TARGET_X86_64
4545 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4546 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4547 tcg_gen_ext32s_tl(cpu_T[1], cpu_T[1]);
4548 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4549 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4550 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4551 tcg_gen_ext32s_tl(cpu_tmp0, cpu_T[0]);
4552 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4553 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 32);
4554 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4555 #else
4557 TCGv_i64 t0, t1;
4558 t0 = tcg_temp_new_i64();
4559 t1 = tcg_temp_new_i64();
4560 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4561 tcg_gen_ext_i32_i64(t0, cpu_T[0]);
4562 tcg_gen_ext_i32_i64(t1, cpu_T[1]);
4563 tcg_gen_mul_i64(t0, t0, t1);
4564 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4565 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4566 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4567 tcg_gen_sari_tl(cpu_tmp0, cpu_T[0], 31);
4568 tcg_gen_shri_i64(t0, t0, 32);
4569 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4570 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4571 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4573 #endif
4574 set_cc_op(s, CC_OP_MULL);
4575 break;
4576 #ifdef TARGET_X86_64
4577 case OT_QUAD:
4578 gen_helper_imulq_EAX_T0(cpu_env, cpu_T[0]);
4579 set_cc_op(s, CC_OP_MULQ);
4580 break;
4581 #endif
4583 break;
4584 case 6: /* div */
4585 switch(ot) {
4586 case OT_BYTE:
4587 gen_jmp_im(pc_start - s->cs_base);
4588 gen_helper_divb_AL(cpu_env, cpu_T[0]);
4589 break;
4590 case OT_WORD:
4591 gen_jmp_im(pc_start - s->cs_base);
4592 gen_helper_divw_AX(cpu_env, cpu_T[0]);
4593 break;
4594 default:
4595 case OT_LONG:
4596 gen_jmp_im(pc_start - s->cs_base);
4597 gen_helper_divl_EAX(cpu_env, cpu_T[0]);
4598 break;
4599 #ifdef TARGET_X86_64
4600 case OT_QUAD:
4601 gen_jmp_im(pc_start - s->cs_base);
4602 gen_helper_divq_EAX(cpu_env, cpu_T[0]);
4603 break;
4604 #endif
4606 break;
4607 case 7: /* idiv */
4608 switch(ot) {
4609 case OT_BYTE:
4610 gen_jmp_im(pc_start - s->cs_base);
4611 gen_helper_idivb_AL(cpu_env, cpu_T[0]);
4612 break;
4613 case OT_WORD:
4614 gen_jmp_im(pc_start - s->cs_base);
4615 gen_helper_idivw_AX(cpu_env, cpu_T[0]);
4616 break;
4617 default:
4618 case OT_LONG:
4619 gen_jmp_im(pc_start - s->cs_base);
4620 gen_helper_idivl_EAX(cpu_env, cpu_T[0]);
4621 break;
4622 #ifdef TARGET_X86_64
4623 case OT_QUAD:
4624 gen_jmp_im(pc_start - s->cs_base);
4625 gen_helper_idivq_EAX(cpu_env, cpu_T[0]);
4626 break;
4627 #endif
4629 break;
4630 default:
4631 goto illegal_op;
4633 break;
4635 case 0xfe: /* GRP4 */
4636 case 0xff: /* GRP5 */
4637 if ((b & 1) == 0)
4638 ot = OT_BYTE;
4639 else
4640 ot = dflag + OT_WORD;
4642 modrm = cpu_ldub_code(env, s->pc++);
4643 mod = (modrm >> 6) & 3;
4644 rm = (modrm & 7) | REX_B(s);
4645 op = (modrm >> 3) & 7;
4646 if (op >= 2 && b == 0xfe) {
4647 goto illegal_op;
4649 if (CODE64(s)) {
4650 if (op == 2 || op == 4) {
4651 /* operand size for jumps is 64 bit */
4652 ot = OT_QUAD;
4653 } else if (op == 3 || op == 5) {
4654 ot = dflag ? OT_LONG + (rex_w == 1) : OT_WORD;
4655 } else if (op == 6) {
4656 /* default push size is 64 bit */
4657 ot = dflag ? OT_QUAD : OT_WORD;
4660 if (mod != 3) {
4661 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4662 if (op >= 2 && op != 3 && op != 5)
4663 gen_op_ld_T0_A0(ot + s->mem_index);
4664 } else {
4665 gen_op_mov_TN_reg(ot, 0, rm);
4668 switch(op) {
4669 case 0: /* inc Ev */
4670 if (mod != 3)
4671 opreg = OR_TMP0;
4672 else
4673 opreg = rm;
4674 gen_inc(s, ot, opreg, 1);
4675 break;
4676 case 1: /* dec Ev */
4677 if (mod != 3)
4678 opreg = OR_TMP0;
4679 else
4680 opreg = rm;
4681 gen_inc(s, ot, opreg, -1);
4682 break;
4683 case 2: /* call Ev */
4684 /* XXX: optimize if memory (no 'and' is necessary) */
4685 if (s->dflag == 0)
4686 gen_op_andl_T0_ffff();
4687 next_eip = s->pc - s->cs_base;
4688 gen_movtl_T1_im(next_eip);
4689 gen_push_T1(s);
4690 gen_op_jmp_T0();
4691 gen_eob(s);
4692 break;
4693 case 3: /* lcall Ev */
4694 gen_op_ld_T1_A0(ot + s->mem_index);
4695 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
4696 gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
4697 do_lcall:
4698 if (s->pe && !s->vm86) {
4699 gen_update_cc_op(s);
4700 gen_jmp_im(pc_start - s->cs_base);
4701 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4702 gen_helper_lcall_protected(cpu_env, cpu_tmp2_i32, cpu_T[1],
4703 tcg_const_i32(dflag),
4704 tcg_const_i32(s->pc - pc_start));
4705 } else {
4706 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4707 gen_helper_lcall_real(cpu_env, cpu_tmp2_i32, cpu_T[1],
4708 tcg_const_i32(dflag),
4709 tcg_const_i32(s->pc - s->cs_base));
4711 gen_eob(s);
4712 break;
4713 case 4: /* jmp Ev */
4714 if (s->dflag == 0)
4715 gen_op_andl_T0_ffff();
4716 gen_op_jmp_T0();
4717 gen_eob(s);
4718 break;
4719 case 5: /* ljmp Ev */
4720 gen_op_ld_T1_A0(ot + s->mem_index);
4721 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
4722 gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
4723 do_ljmp:
4724 if (s->pe && !s->vm86) {
4725 gen_update_cc_op(s);
4726 gen_jmp_im(pc_start - s->cs_base);
4727 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4728 gen_helper_ljmp_protected(cpu_env, cpu_tmp2_i32, cpu_T[1],
4729 tcg_const_i32(s->pc - pc_start));
4730 } else {
4731 gen_op_movl_seg_T0_vm(R_CS);
4732 gen_op_movl_T0_T1();
4733 gen_op_jmp_T0();
4735 gen_eob(s);
4736 break;
4737 case 6: /* push Ev */
4738 gen_push_T0(s);
4739 break;
4740 default:
4741 goto illegal_op;
4743 break;
4745 case 0x84: /* test Ev, Gv */
4746 case 0x85:
4747 if ((b & 1) == 0)
4748 ot = OT_BYTE;
4749 else
4750 ot = dflag + OT_WORD;
4752 modrm = cpu_ldub_code(env, s->pc++);
4753 reg = ((modrm >> 3) & 7) | rex_r;
4755 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4756 gen_op_mov_TN_reg(ot, 1, reg);
4757 gen_op_testl_T0_T1_cc();
4758 set_cc_op(s, CC_OP_LOGICB + ot);
4759 break;
4761 case 0xa8: /* test eAX, Iv */
4762 case 0xa9:
4763 if ((b & 1) == 0)
4764 ot = OT_BYTE;
4765 else
4766 ot = dflag + OT_WORD;
4767 val = insn_get(env, s, ot);
4769 gen_op_mov_TN_reg(ot, 0, OR_EAX);
4770 gen_op_movl_T1_im(val);
4771 gen_op_testl_T0_T1_cc();
4772 set_cc_op(s, CC_OP_LOGICB + ot);
4773 break;
4775 case 0x98: /* CWDE/CBW */
4776 #ifdef TARGET_X86_64
4777 if (dflag == 2) {
4778 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
4779 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4780 gen_op_mov_reg_T0(OT_QUAD, R_EAX);
4781 } else
4782 #endif
4783 if (dflag == 1) {
4784 gen_op_mov_TN_reg(OT_WORD, 0, R_EAX);
4785 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4786 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4787 } else {
4788 gen_op_mov_TN_reg(OT_BYTE, 0, R_EAX);
4789 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
4790 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4792 break;
4793 case 0x99: /* CDQ/CWD */
4794 #ifdef TARGET_X86_64
4795 if (dflag == 2) {
4796 gen_op_mov_TN_reg(OT_QUAD, 0, R_EAX);
4797 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 63);
4798 gen_op_mov_reg_T0(OT_QUAD, R_EDX);
4799 } else
4800 #endif
4801 if (dflag == 1) {
4802 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
4803 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4804 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 31);
4805 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4806 } else {
4807 gen_op_mov_TN_reg(OT_WORD, 0, R_EAX);
4808 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4809 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 15);
4810 gen_op_mov_reg_T0(OT_WORD, R_EDX);
4812 break;
4813 case 0x1af: /* imul Gv, Ev */
4814 case 0x69: /* imul Gv, Ev, I */
4815 case 0x6b:
4816 ot = dflag + OT_WORD;
4817 modrm = cpu_ldub_code(env, s->pc++);
4818 reg = ((modrm >> 3) & 7) | rex_r;
4819 if (b == 0x69)
4820 s->rip_offset = insn_const_size(ot);
4821 else if (b == 0x6b)
4822 s->rip_offset = 1;
4823 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4824 if (b == 0x69) {
4825 val = insn_get(env, s, ot);
4826 gen_op_movl_T1_im(val);
4827 } else if (b == 0x6b) {
4828 val = (int8_t)insn_get(env, s, OT_BYTE);
4829 gen_op_movl_T1_im(val);
4830 } else {
4831 gen_op_mov_TN_reg(ot, 1, reg);
4834 #ifdef TARGET_X86_64
4835 if (ot == OT_QUAD) {
4836 gen_helper_imulq_T0_T1(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
4837 } else
4838 #endif
4839 if (ot == OT_LONG) {
4840 #ifdef TARGET_X86_64
4841 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4842 tcg_gen_ext32s_tl(cpu_T[1], cpu_T[1]);
4843 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4844 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4845 tcg_gen_ext32s_tl(cpu_tmp0, cpu_T[0]);
4846 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4847 #else
4849 TCGv_i64 t0, t1;
4850 t0 = tcg_temp_new_i64();
4851 t1 = tcg_temp_new_i64();
4852 tcg_gen_ext_i32_i64(t0, cpu_T[0]);
4853 tcg_gen_ext_i32_i64(t1, cpu_T[1]);
4854 tcg_gen_mul_i64(t0, t0, t1);
4855 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4856 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4857 tcg_gen_sari_tl(cpu_tmp0, cpu_T[0], 31);
4858 tcg_gen_shri_i64(t0, t0, 32);
4859 tcg_gen_trunc_i64_i32(cpu_T[1], t0);
4860 tcg_gen_sub_tl(cpu_cc_src, cpu_T[1], cpu_tmp0);
4862 #endif
4863 } else {
4864 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4865 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
4866 /* XXX: use 32 bit mul which could be faster */
4867 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4868 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4869 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
4870 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4872 gen_op_mov_reg_T0(ot, reg);
4873 set_cc_op(s, CC_OP_MULB + ot);
4874 break;
4875 case 0x1c0:
4876 case 0x1c1: /* xadd Ev, Gv */
4877 if ((b & 1) == 0)
4878 ot = OT_BYTE;
4879 else
4880 ot = dflag + OT_WORD;
4881 modrm = cpu_ldub_code(env, s->pc++);
4882 reg = ((modrm >> 3) & 7) | rex_r;
4883 mod = (modrm >> 6) & 3;
4884 if (mod == 3) {
4885 rm = (modrm & 7) | REX_B(s);
4886 gen_op_mov_TN_reg(ot, 0, reg);
4887 gen_op_mov_TN_reg(ot, 1, rm);
4888 gen_op_addl_T0_T1();
4889 gen_op_mov_reg_T1(ot, reg);
4890 gen_op_mov_reg_T0(ot, rm);
4891 } else {
4892 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4893 gen_op_mov_TN_reg(ot, 0, reg);
4894 gen_op_ld_T1_A0(ot + s->mem_index);
4895 gen_op_addl_T0_T1();
4896 gen_op_st_T0_A0(ot + s->mem_index);
4897 gen_op_mov_reg_T1(ot, reg);
4899 gen_op_update2_cc();
4900 set_cc_op(s, CC_OP_ADDB + ot);
4901 break;
4902 case 0x1b0:
4903 case 0x1b1: /* cmpxchg Ev, Gv */
4905 int label1, label2;
4906 TCGv t0, t1, t2, a0;
4908 if ((b & 1) == 0)
4909 ot = OT_BYTE;
4910 else
4911 ot = dflag + OT_WORD;
4912 modrm = cpu_ldub_code(env, s->pc++);
4913 reg = ((modrm >> 3) & 7) | rex_r;
4914 mod = (modrm >> 6) & 3;
4915 t0 = tcg_temp_local_new();
4916 t1 = tcg_temp_local_new();
4917 t2 = tcg_temp_local_new();
4918 a0 = tcg_temp_local_new();
4919 gen_op_mov_v_reg(ot, t1, reg);
4920 if (mod == 3) {
4921 rm = (modrm & 7) | REX_B(s);
4922 gen_op_mov_v_reg(ot, t0, rm);
4923 } else {
4924 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4925 tcg_gen_mov_tl(a0, cpu_A0);
4926 gen_op_ld_v(ot + s->mem_index, t0, a0);
4927 rm = 0; /* avoid warning */
4929 label1 = gen_new_label();
4930 tcg_gen_sub_tl(t2, cpu_regs[R_EAX], t0);
4931 gen_extu(ot, t2);
4932 tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label1);
4933 label2 = gen_new_label();
4934 if (mod == 3) {
4935 gen_op_mov_reg_v(ot, R_EAX, t0);
4936 tcg_gen_br(label2);
4937 gen_set_label(label1);
4938 gen_op_mov_reg_v(ot, rm, t1);
4939 } else {
4940 /* perform no-op store cycle like physical cpu; must be
4941 before changing accumulator to ensure idempotency if
4942 the store faults and the instruction is restarted */
4943 gen_op_st_v(ot + s->mem_index, t0, a0);
4944 gen_op_mov_reg_v(ot, R_EAX, t0);
4945 tcg_gen_br(label2);
4946 gen_set_label(label1);
4947 gen_op_st_v(ot + s->mem_index, t1, a0);
4949 gen_set_label(label2);
4950 tcg_gen_mov_tl(cpu_cc_src, t0);
4951 tcg_gen_mov_tl(cpu_cc_dst, t2);
4952 set_cc_op(s, CC_OP_SUBB + ot);
4953 tcg_temp_free(t0);
4954 tcg_temp_free(t1);
4955 tcg_temp_free(t2);
4956 tcg_temp_free(a0);
4958 break;
4959 case 0x1c7: /* cmpxchg8b */
4960 modrm = cpu_ldub_code(env, s->pc++);
4961 mod = (modrm >> 6) & 3;
4962 if ((mod == 3) || ((modrm & 0x38) != 0x8))
4963 goto illegal_op;
4964 #ifdef TARGET_X86_64
4965 if (dflag == 2) {
4966 if (!(s->cpuid_ext_features & CPUID_EXT_CX16))
4967 goto illegal_op;
4968 gen_jmp_im(pc_start - s->cs_base);
4969 gen_update_cc_op(s);
4970 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4971 gen_helper_cmpxchg16b(cpu_env, cpu_A0);
4972 } else
4973 #endif
4975 if (!(s->cpuid_features & CPUID_CX8))
4976 goto illegal_op;
4977 gen_jmp_im(pc_start - s->cs_base);
4978 gen_update_cc_op(s);
4979 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
4980 gen_helper_cmpxchg8b(cpu_env, cpu_A0);
4982 set_cc_op(s, CC_OP_EFLAGS);
4983 break;
4985 /**************************/
4986 /* push/pop */
4987 case 0x50 ... 0x57: /* push */
4988 gen_op_mov_TN_reg(OT_LONG, 0, (b & 7) | REX_B(s));
4989 gen_push_T0(s);
4990 break;
4991 case 0x58 ... 0x5f: /* pop */
4992 if (CODE64(s)) {
4993 ot = dflag ? OT_QUAD : OT_WORD;
4994 } else {
4995 ot = dflag + OT_WORD;
4997 gen_pop_T0(s);
4998 /* NOTE: order is important for pop %sp */
4999 gen_pop_update(s);
5000 gen_op_mov_reg_T0(ot, (b & 7) | REX_B(s));
5001 break;
5002 case 0x60: /* pusha */
5003 if (CODE64(s))
5004 goto illegal_op;
5005 gen_pusha(s);
5006 break;
5007 case 0x61: /* popa */
5008 if (CODE64(s))
5009 goto illegal_op;
5010 gen_popa(s);
5011 break;
5012 case 0x68: /* push Iv */
5013 case 0x6a:
5014 if (CODE64(s)) {
5015 ot = dflag ? OT_QUAD : OT_WORD;
5016 } else {
5017 ot = dflag + OT_WORD;
5019 if (b == 0x68)
5020 val = insn_get(env, s, ot);
5021 else
5022 val = (int8_t)insn_get(env, s, OT_BYTE);
5023 gen_op_movl_T0_im(val);
5024 gen_push_T0(s);
5025 break;
5026 case 0x8f: /* pop Ev */
5027 if (CODE64(s)) {
5028 ot = dflag ? OT_QUAD : OT_WORD;
5029 } else {
5030 ot = dflag + OT_WORD;
5032 modrm = cpu_ldub_code(env, s->pc++);
5033 mod = (modrm >> 6) & 3;
5034 gen_pop_T0(s);
5035 if (mod == 3) {
5036 /* NOTE: order is important for pop %sp */
5037 gen_pop_update(s);
5038 rm = (modrm & 7) | REX_B(s);
5039 gen_op_mov_reg_T0(ot, rm);
5040 } else {
5041 /* NOTE: order is important too for MMU exceptions */
5042 s->popl_esp_hack = 1 << ot;
5043 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
5044 s->popl_esp_hack = 0;
5045 gen_pop_update(s);
5047 break;
5048 case 0xc8: /* enter */
5050 int level;
5051 val = cpu_lduw_code(env, s->pc);
5052 s->pc += 2;
5053 level = cpu_ldub_code(env, s->pc++);
5054 gen_enter(s, val, level);
5056 break;
5057 case 0xc9: /* leave */
5058 /* XXX: exception not precise (ESP is updated before potential exception) */
5059 if (CODE64(s)) {
5060 gen_op_mov_TN_reg(OT_QUAD, 0, R_EBP);
5061 gen_op_mov_reg_T0(OT_QUAD, R_ESP);
5062 } else if (s->ss32) {
5063 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
5064 gen_op_mov_reg_T0(OT_LONG, R_ESP);
5065 } else {
5066 gen_op_mov_TN_reg(OT_WORD, 0, R_EBP);
5067 gen_op_mov_reg_T0(OT_WORD, R_ESP);
5069 gen_pop_T0(s);
5070 if (CODE64(s)) {
5071 ot = dflag ? OT_QUAD : OT_WORD;
5072 } else {
5073 ot = dflag + OT_WORD;
5075 gen_op_mov_reg_T0(ot, R_EBP);
5076 gen_pop_update(s);
5077 break;
5078 case 0x06: /* push es */
5079 case 0x0e: /* push cs */
5080 case 0x16: /* push ss */
5081 case 0x1e: /* push ds */
5082 if (CODE64(s))
5083 goto illegal_op;
5084 gen_op_movl_T0_seg(b >> 3);
5085 gen_push_T0(s);
5086 break;
5087 case 0x1a0: /* push fs */
5088 case 0x1a8: /* push gs */
5089 gen_op_movl_T0_seg((b >> 3) & 7);
5090 gen_push_T0(s);
5091 break;
5092 case 0x07: /* pop es */
5093 case 0x17: /* pop ss */
5094 case 0x1f: /* pop ds */
5095 if (CODE64(s))
5096 goto illegal_op;
5097 reg = b >> 3;
5098 gen_pop_T0(s);
5099 gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
5100 gen_pop_update(s);
5101 if (reg == R_SS) {
5102 /* if reg == SS, inhibit interrupts/trace. */
5103 /* If several instructions disable interrupts, only the
5104 _first_ does it */
5105 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
5106 gen_helper_set_inhibit_irq(cpu_env);
5107 s->tf = 0;
5109 if (s->is_jmp) {
5110 gen_jmp_im(s->pc - s->cs_base);
5111 gen_eob(s);
5113 break;
5114 case 0x1a1: /* pop fs */
5115 case 0x1a9: /* pop gs */
5116 gen_pop_T0(s);
5117 gen_movl_seg_T0(s, (b >> 3) & 7, pc_start - s->cs_base);
5118 gen_pop_update(s);
5119 if (s->is_jmp) {
5120 gen_jmp_im(s->pc - s->cs_base);
5121 gen_eob(s);
5123 break;
5125 /**************************/
5126 /* mov */
5127 case 0x88:
5128 case 0x89: /* mov Gv, Ev */
5129 if ((b & 1) == 0)
5130 ot = OT_BYTE;
5131 else
5132 ot = dflag + OT_WORD;
5133 modrm = cpu_ldub_code(env, s->pc++);
5134 reg = ((modrm >> 3) & 7) | rex_r;
5136 /* generate a generic store */
5137 gen_ldst_modrm(env, s, modrm, ot, reg, 1);
5138 break;
5139 case 0xc6:
5140 case 0xc7: /* mov Ev, Iv */
5141 if ((b & 1) == 0)
5142 ot = OT_BYTE;
5143 else
5144 ot = dflag + OT_WORD;
5145 modrm = cpu_ldub_code(env, s->pc++);
5146 mod = (modrm >> 6) & 3;
5147 if (mod != 3) {
5148 s->rip_offset = insn_const_size(ot);
5149 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5151 val = insn_get(env, s, ot);
5152 gen_op_movl_T0_im(val);
5153 if (mod != 3)
5154 gen_op_st_T0_A0(ot + s->mem_index);
5155 else
5156 gen_op_mov_reg_T0(ot, (modrm & 7) | REX_B(s));
5157 break;
5158 case 0x8a:
5159 case 0x8b: /* mov Ev, Gv */
5160 if ((b & 1) == 0)
5161 ot = OT_BYTE;
5162 else
5163 ot = OT_WORD + dflag;
5164 modrm = cpu_ldub_code(env, s->pc++);
5165 reg = ((modrm >> 3) & 7) | rex_r;
5167 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
5168 gen_op_mov_reg_T0(ot, reg);
5169 break;
5170 case 0x8e: /* mov seg, Gv */
5171 modrm = cpu_ldub_code(env, s->pc++);
5172 reg = (modrm >> 3) & 7;
5173 if (reg >= 6 || reg == R_CS)
5174 goto illegal_op;
5175 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
5176 gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
5177 if (reg == R_SS) {
5178 /* if reg == SS, inhibit interrupts/trace */
5179 /* If several instructions disable interrupts, only the
5180 _first_ does it */
5181 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
5182 gen_helper_set_inhibit_irq(cpu_env);
5183 s->tf = 0;
5185 if (s->is_jmp) {
5186 gen_jmp_im(s->pc - s->cs_base);
5187 gen_eob(s);
5189 break;
5190 case 0x8c: /* mov Gv, seg */
5191 modrm = cpu_ldub_code(env, s->pc++);
5192 reg = (modrm >> 3) & 7;
5193 mod = (modrm >> 6) & 3;
5194 if (reg >= 6)
5195 goto illegal_op;
5196 gen_op_movl_T0_seg(reg);
5197 if (mod == 3)
5198 ot = OT_WORD + dflag;
5199 else
5200 ot = OT_WORD;
5201 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
5202 break;
5204 case 0x1b6: /* movzbS Gv, Eb */
5205 case 0x1b7: /* movzwS Gv, Eb */
5206 case 0x1be: /* movsbS Gv, Eb */
5207 case 0x1bf: /* movswS Gv, Eb */
5209 int d_ot;
5210 /* d_ot is the size of destination */
5211 d_ot = dflag + OT_WORD;
5212 /* ot is the size of source */
5213 ot = (b & 1) + OT_BYTE;
5214 modrm = cpu_ldub_code(env, s->pc++);
5215 reg = ((modrm >> 3) & 7) | rex_r;
5216 mod = (modrm >> 6) & 3;
5217 rm = (modrm & 7) | REX_B(s);
5219 if (mod == 3) {
5220 gen_op_mov_TN_reg(ot, 0, rm);
5221 switch(ot | (b & 8)) {
5222 case OT_BYTE:
5223 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
5224 break;
5225 case OT_BYTE | 8:
5226 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
5227 break;
5228 case OT_WORD:
5229 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
5230 break;
5231 default:
5232 case OT_WORD | 8:
5233 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5234 break;
5236 gen_op_mov_reg_T0(d_ot, reg);
5237 } else {
5238 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5239 if (b & 8) {
5240 gen_op_lds_T0_A0(ot + s->mem_index);
5241 } else {
5242 gen_op_ldu_T0_A0(ot + s->mem_index);
5244 gen_op_mov_reg_T0(d_ot, reg);
5247 break;
5249 case 0x8d: /* lea */
5250 ot = dflag + OT_WORD;
5251 modrm = cpu_ldub_code(env, s->pc++);
5252 mod = (modrm >> 6) & 3;
5253 if (mod == 3)
5254 goto illegal_op;
5255 reg = ((modrm >> 3) & 7) | rex_r;
5256 /* we must ensure that no segment is added */
5257 s->override = -1;
5258 val = s->addseg;
5259 s->addseg = 0;
5260 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5261 s->addseg = val;
5262 gen_op_mov_reg_A0(ot - OT_WORD, reg);
5263 break;
5265 case 0xa0: /* mov EAX, Ov */
5266 case 0xa1:
5267 case 0xa2: /* mov Ov, EAX */
5268 case 0xa3:
5270 target_ulong offset_addr;
5272 if ((b & 1) == 0)
5273 ot = OT_BYTE;
5274 else
5275 ot = dflag + OT_WORD;
5276 #ifdef TARGET_X86_64
5277 if (s->aflag == 2) {
5278 offset_addr = cpu_ldq_code(env, s->pc);
5279 s->pc += 8;
5280 gen_op_movq_A0_im(offset_addr);
5281 } else
5282 #endif
5284 if (s->aflag) {
5285 offset_addr = insn_get(env, s, OT_LONG);
5286 } else {
5287 offset_addr = insn_get(env, s, OT_WORD);
5289 gen_op_movl_A0_im(offset_addr);
5291 gen_add_A0_ds_seg(s);
5292 if ((b & 2) == 0) {
5293 gen_op_ld_T0_A0(ot + s->mem_index);
5294 gen_op_mov_reg_T0(ot, R_EAX);
5295 } else {
5296 gen_op_mov_TN_reg(ot, 0, R_EAX);
5297 gen_op_st_T0_A0(ot + s->mem_index);
5300 break;
5301 case 0xd7: /* xlat */
5302 #ifdef TARGET_X86_64
5303 if (s->aflag == 2) {
5304 gen_op_movq_A0_reg(R_EBX);
5305 gen_op_mov_TN_reg(OT_QUAD, 0, R_EAX);
5306 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xff);
5307 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]);
5308 } else
5309 #endif
5311 gen_op_movl_A0_reg(R_EBX);
5312 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
5313 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xff);
5314 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]);
5315 if (s->aflag == 0)
5316 gen_op_andl_A0_ffff();
5317 else
5318 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
5320 gen_add_A0_ds_seg(s);
5321 gen_op_ldu_T0_A0(OT_BYTE + s->mem_index);
5322 gen_op_mov_reg_T0(OT_BYTE, R_EAX);
5323 break;
5324 case 0xb0 ... 0xb7: /* mov R, Ib */
5325 val = insn_get(env, s, OT_BYTE);
5326 gen_op_movl_T0_im(val);
5327 gen_op_mov_reg_T0(OT_BYTE, (b & 7) | REX_B(s));
5328 break;
5329 case 0xb8 ... 0xbf: /* mov R, Iv */
5330 #ifdef TARGET_X86_64
5331 if (dflag == 2) {
5332 uint64_t tmp;
5333 /* 64 bit case */
5334 tmp = cpu_ldq_code(env, s->pc);
5335 s->pc += 8;
5336 reg = (b & 7) | REX_B(s);
5337 gen_movtl_T0_im(tmp);
5338 gen_op_mov_reg_T0(OT_QUAD, reg);
5339 } else
5340 #endif
5342 ot = dflag ? OT_LONG : OT_WORD;
5343 val = insn_get(env, s, ot);
5344 reg = (b & 7) | REX_B(s);
5345 gen_op_movl_T0_im(val);
5346 gen_op_mov_reg_T0(ot, reg);
5348 break;
5350 case 0x91 ... 0x97: /* xchg R, EAX */
5351 do_xchg_reg_eax:
5352 ot = dflag + OT_WORD;
5353 reg = (b & 7) | REX_B(s);
5354 rm = R_EAX;
5355 goto do_xchg_reg;
5356 case 0x86:
5357 case 0x87: /* xchg Ev, Gv */
5358 if ((b & 1) == 0)
5359 ot = OT_BYTE;
5360 else
5361 ot = dflag + OT_WORD;
5362 modrm = cpu_ldub_code(env, s->pc++);
5363 reg = ((modrm >> 3) & 7) | rex_r;
5364 mod = (modrm >> 6) & 3;
5365 if (mod == 3) {
5366 rm = (modrm & 7) | REX_B(s);
5367 do_xchg_reg:
5368 gen_op_mov_TN_reg(ot, 0, reg);
5369 gen_op_mov_TN_reg(ot, 1, rm);
5370 gen_op_mov_reg_T0(ot, rm);
5371 gen_op_mov_reg_T1(ot, reg);
5372 } else {
5373 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5374 gen_op_mov_TN_reg(ot, 0, reg);
5375 /* for xchg, lock is implicit */
5376 if (!(prefixes & PREFIX_LOCK))
5377 gen_helper_lock();
5378 gen_op_ld_T1_A0(ot + s->mem_index);
5379 gen_op_st_T0_A0(ot + s->mem_index);
5380 if (!(prefixes & PREFIX_LOCK))
5381 gen_helper_unlock();
5382 gen_op_mov_reg_T1(ot, reg);
5384 break;
5385 case 0xc4: /* les Gv */
5386 if (CODE64(s))
5387 goto illegal_op;
5388 op = R_ES;
5389 goto do_lxx;
5390 case 0xc5: /* lds Gv */
5391 if (CODE64(s))
5392 goto illegal_op;
5393 op = R_DS;
5394 goto do_lxx;
5395 case 0x1b2: /* lss Gv */
5396 op = R_SS;
5397 goto do_lxx;
5398 case 0x1b4: /* lfs Gv */
5399 op = R_FS;
5400 goto do_lxx;
5401 case 0x1b5: /* lgs Gv */
5402 op = R_GS;
5403 do_lxx:
5404 ot = dflag ? OT_LONG : OT_WORD;
5405 modrm = cpu_ldub_code(env, s->pc++);
5406 reg = ((modrm >> 3) & 7) | rex_r;
5407 mod = (modrm >> 6) & 3;
5408 if (mod == 3)
5409 goto illegal_op;
5410 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5411 gen_op_ld_T1_A0(ot + s->mem_index);
5412 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
5413 /* load the segment first to handle exceptions properly */
5414 gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
5415 gen_movl_seg_T0(s, op, pc_start - s->cs_base);
5416 /* then put the data */
5417 gen_op_mov_reg_T1(ot, reg);
5418 if (s->is_jmp) {
5419 gen_jmp_im(s->pc - s->cs_base);
5420 gen_eob(s);
5422 break;
5424 /************************/
5425 /* shifts */
5426 case 0xc0:
5427 case 0xc1:
5428 /* shift Ev,Ib */
5429 shift = 2;
5430 grp2:
5432 if ((b & 1) == 0)
5433 ot = OT_BYTE;
5434 else
5435 ot = dflag + OT_WORD;
5437 modrm = cpu_ldub_code(env, s->pc++);
5438 mod = (modrm >> 6) & 3;
5439 op = (modrm >> 3) & 7;
5441 if (mod != 3) {
5442 if (shift == 2) {
5443 s->rip_offset = 1;
5445 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5446 opreg = OR_TMP0;
5447 } else {
5448 opreg = (modrm & 7) | REX_B(s);
5451 /* simpler op */
5452 if (shift == 0) {
5453 gen_shift(s, op, ot, opreg, OR_ECX);
5454 } else {
5455 if (shift == 2) {
5456 shift = cpu_ldub_code(env, s->pc++);
5458 gen_shifti(s, op, ot, opreg, shift);
5461 break;
5462 case 0xd0:
5463 case 0xd1:
5464 /* shift Ev,1 */
5465 shift = 1;
5466 goto grp2;
5467 case 0xd2:
5468 case 0xd3:
5469 /* shift Ev,cl */
5470 shift = 0;
5471 goto grp2;
5473 case 0x1a4: /* shld imm */
5474 op = 0;
5475 shift = 1;
5476 goto do_shiftd;
5477 case 0x1a5: /* shld cl */
5478 op = 0;
5479 shift = 0;
5480 goto do_shiftd;
5481 case 0x1ac: /* shrd imm */
5482 op = 1;
5483 shift = 1;
5484 goto do_shiftd;
5485 case 0x1ad: /* shrd cl */
5486 op = 1;
5487 shift = 0;
5488 do_shiftd:
5489 ot = dflag + OT_WORD;
5490 modrm = cpu_ldub_code(env, s->pc++);
5491 mod = (modrm >> 6) & 3;
5492 rm = (modrm & 7) | REX_B(s);
5493 reg = ((modrm >> 3) & 7) | rex_r;
5494 if (mod != 3) {
5495 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5496 opreg = OR_TMP0;
5497 } else {
5498 opreg = rm;
5500 gen_op_mov_TN_reg(ot, 1, reg);
5502 if (shift) {
5503 val = cpu_ldub_code(env, s->pc++);
5504 tcg_gen_movi_tl(cpu_T3, val);
5505 } else {
5506 tcg_gen_mov_tl(cpu_T3, cpu_regs[R_ECX]);
5508 gen_shiftd_rm_T1_T3(s, ot, opreg, op);
5509 break;
5511 /************************/
5512 /* floats */
5513 case 0xd8 ... 0xdf:
5514 if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
5515 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
5516 /* XXX: what to do if illegal op ? */
5517 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
5518 break;
5520 modrm = cpu_ldub_code(env, s->pc++);
5521 mod = (modrm >> 6) & 3;
5522 rm = modrm & 7;
5523 op = ((b & 7) << 3) | ((modrm >> 3) & 7);
5524 if (mod != 3) {
5525 /* memory op */
5526 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
5527 switch(op) {
5528 case 0x00 ... 0x07: /* fxxxs */
5529 case 0x10 ... 0x17: /* fixxxl */
5530 case 0x20 ... 0x27: /* fxxxl */
5531 case 0x30 ... 0x37: /* fixxx */
5533 int op1;
5534 op1 = op & 7;
5536 switch(op >> 4) {
5537 case 0:
5538 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5539 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5540 gen_helper_flds_FT0(cpu_env, cpu_tmp2_i32);
5541 break;
5542 case 1:
5543 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5544 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5545 gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
5546 break;
5547 case 2:
5548 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
5549 (s->mem_index >> 2) - 1);
5550 gen_helper_fldl_FT0(cpu_env, cpu_tmp1_i64);
5551 break;
5552 case 3:
5553 default:
5554 gen_op_lds_T0_A0(OT_WORD + s->mem_index);
5555 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5556 gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
5557 break;
5560 gen_helper_fp_arith_ST0_FT0(op1);
5561 if (op1 == 3) {
5562 /* fcomp needs pop */
5563 gen_helper_fpop(cpu_env);
5566 break;
5567 case 0x08: /* flds */
5568 case 0x0a: /* fsts */
5569 case 0x0b: /* fstps */
5570 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
5571 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
5572 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
5573 switch(op & 7) {
5574 case 0:
5575 switch(op >> 4) {
5576 case 0:
5577 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5578 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5579 gen_helper_flds_ST0(cpu_env, cpu_tmp2_i32);
5580 break;
5581 case 1:
5582 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5583 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5584 gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
5585 break;
5586 case 2:
5587 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
5588 (s->mem_index >> 2) - 1);
5589 gen_helper_fldl_ST0(cpu_env, cpu_tmp1_i64);
5590 break;
5591 case 3:
5592 default:
5593 gen_op_lds_T0_A0(OT_WORD + s->mem_index);
5594 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5595 gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
5596 break;
5598 break;
5599 case 1:
5600 /* XXX: the corresponding CPUID bit must be tested ! */
5601 switch(op >> 4) {
5602 case 1:
5603 gen_helper_fisttl_ST0(cpu_tmp2_i32, cpu_env);
5604 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5605 gen_op_st_T0_A0(OT_LONG + s->mem_index);
5606 break;
5607 case 2:
5608 gen_helper_fisttll_ST0(cpu_tmp1_i64, cpu_env);
5609 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
5610 (s->mem_index >> 2) - 1);
5611 break;
5612 case 3:
5613 default:
5614 gen_helper_fistt_ST0(cpu_tmp2_i32, cpu_env);
5615 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5616 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5617 break;
5619 gen_helper_fpop(cpu_env);
5620 break;
5621 default:
5622 switch(op >> 4) {
5623 case 0:
5624 gen_helper_fsts_ST0(cpu_tmp2_i32, cpu_env);
5625 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5626 gen_op_st_T0_A0(OT_LONG + s->mem_index);
5627 break;
5628 case 1:
5629 gen_helper_fistl_ST0(cpu_tmp2_i32, cpu_env);
5630 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5631 gen_op_st_T0_A0(OT_LONG + s->mem_index);
5632 break;
5633 case 2:
5634 gen_helper_fstl_ST0(cpu_tmp1_i64, cpu_env);
5635 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
5636 (s->mem_index >> 2) - 1);
5637 break;
5638 case 3:
5639 default:
5640 gen_helper_fist_ST0(cpu_tmp2_i32, cpu_env);
5641 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5642 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5643 break;
5645 if ((op & 7) == 3)
5646 gen_helper_fpop(cpu_env);
5647 break;
5649 break;
5650 case 0x0c: /* fldenv mem */
5651 gen_update_cc_op(s);
5652 gen_jmp_im(pc_start - s->cs_base);
5653 gen_helper_fldenv(cpu_env, cpu_A0, tcg_const_i32(s->dflag));
5654 break;
5655 case 0x0d: /* fldcw mem */
5656 gen_op_ld_T0_A0(OT_WORD + s->mem_index);
5657 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5658 gen_helper_fldcw(cpu_env, cpu_tmp2_i32);
5659 break;
5660 case 0x0e: /* fnstenv mem */
5661 gen_update_cc_op(s);
5662 gen_jmp_im(pc_start - s->cs_base);
5663 gen_helper_fstenv(cpu_env, cpu_A0, tcg_const_i32(s->dflag));
5664 break;
5665 case 0x0f: /* fnstcw mem */
5666 gen_helper_fnstcw(cpu_tmp2_i32, cpu_env);
5667 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5668 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5669 break;
5670 case 0x1d: /* fldt mem */
5671 gen_update_cc_op(s);
5672 gen_jmp_im(pc_start - s->cs_base);
5673 gen_helper_fldt_ST0(cpu_env, cpu_A0);
5674 break;
5675 case 0x1f: /* fstpt mem */
5676 gen_update_cc_op(s);
5677 gen_jmp_im(pc_start - s->cs_base);
5678 gen_helper_fstt_ST0(cpu_env, cpu_A0);
5679 gen_helper_fpop(cpu_env);
5680 break;
5681 case 0x2c: /* frstor mem */
5682 gen_update_cc_op(s);
5683 gen_jmp_im(pc_start - s->cs_base);
5684 gen_helper_frstor(cpu_env, cpu_A0, tcg_const_i32(s->dflag));
5685 break;
5686 case 0x2e: /* fnsave mem */
5687 gen_update_cc_op(s);
5688 gen_jmp_im(pc_start - s->cs_base);
5689 gen_helper_fsave(cpu_env, cpu_A0, tcg_const_i32(s->dflag));
5690 break;
5691 case 0x2f: /* fnstsw mem */
5692 gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
5693 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5694 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5695 break;
5696 case 0x3c: /* fbld */
5697 gen_update_cc_op(s);
5698 gen_jmp_im(pc_start - s->cs_base);
5699 gen_helper_fbld_ST0(cpu_env, cpu_A0);
5700 break;
5701 case 0x3e: /* fbstp */
5702 gen_update_cc_op(s);
5703 gen_jmp_im(pc_start - s->cs_base);
5704 gen_helper_fbst_ST0(cpu_env, cpu_A0);
5705 gen_helper_fpop(cpu_env);
5706 break;
5707 case 0x3d: /* fildll */
5708 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
5709 (s->mem_index >> 2) - 1);
5710 gen_helper_fildll_ST0(cpu_env, cpu_tmp1_i64);
5711 break;
5712 case 0x3f: /* fistpll */
5713 gen_helper_fistll_ST0(cpu_tmp1_i64, cpu_env);
5714 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
5715 (s->mem_index >> 2) - 1);
5716 gen_helper_fpop(cpu_env);
5717 break;
5718 default:
5719 goto illegal_op;
5721 } else {
5722 /* register float ops */
5723 opreg = rm;
5725 switch(op) {
5726 case 0x08: /* fld sti */
5727 gen_helper_fpush(cpu_env);
5728 gen_helper_fmov_ST0_STN(cpu_env,
5729 tcg_const_i32((opreg + 1) & 7));
5730 break;
5731 case 0x09: /* fxchg sti */
5732 case 0x29: /* fxchg4 sti, undocumented op */
5733 case 0x39: /* fxchg7 sti, undocumented op */
5734 gen_helper_fxchg_ST0_STN(cpu_env, tcg_const_i32(opreg));
5735 break;
5736 case 0x0a: /* grp d9/2 */
5737 switch(rm) {
5738 case 0: /* fnop */
5739 /* check exceptions (FreeBSD FPU probe) */
5740 gen_update_cc_op(s);
5741 gen_jmp_im(pc_start - s->cs_base);
5742 gen_helper_fwait(cpu_env);
5743 break;
5744 default:
5745 goto illegal_op;
5747 break;
5748 case 0x0c: /* grp d9/4 */
5749 switch(rm) {
5750 case 0: /* fchs */
5751 gen_helper_fchs_ST0(cpu_env);
5752 break;
5753 case 1: /* fabs */
5754 gen_helper_fabs_ST0(cpu_env);
5755 break;
5756 case 4: /* ftst */
5757 gen_helper_fldz_FT0(cpu_env);
5758 gen_helper_fcom_ST0_FT0(cpu_env);
5759 break;
5760 case 5: /* fxam */
5761 gen_helper_fxam_ST0(cpu_env);
5762 break;
5763 default:
5764 goto illegal_op;
5766 break;
5767 case 0x0d: /* grp d9/5 */
5769 switch(rm) {
5770 case 0:
5771 gen_helper_fpush(cpu_env);
5772 gen_helper_fld1_ST0(cpu_env);
5773 break;
5774 case 1:
5775 gen_helper_fpush(cpu_env);
5776 gen_helper_fldl2t_ST0(cpu_env);
5777 break;
5778 case 2:
5779 gen_helper_fpush(cpu_env);
5780 gen_helper_fldl2e_ST0(cpu_env);
5781 break;
5782 case 3:
5783 gen_helper_fpush(cpu_env);
5784 gen_helper_fldpi_ST0(cpu_env);
5785 break;
5786 case 4:
5787 gen_helper_fpush(cpu_env);
5788 gen_helper_fldlg2_ST0(cpu_env);
5789 break;
5790 case 5:
5791 gen_helper_fpush(cpu_env);
5792 gen_helper_fldln2_ST0(cpu_env);
5793 break;
5794 case 6:
5795 gen_helper_fpush(cpu_env);
5796 gen_helper_fldz_ST0(cpu_env);
5797 break;
5798 default:
5799 goto illegal_op;
5802 break;
5803 case 0x0e: /* grp d9/6 */
5804 switch(rm) {
5805 case 0: /* f2xm1 */
5806 gen_helper_f2xm1(cpu_env);
5807 break;
5808 case 1: /* fyl2x */
5809 gen_helper_fyl2x(cpu_env);
5810 break;
5811 case 2: /* fptan */
5812 gen_helper_fptan(cpu_env);
5813 break;
5814 case 3: /* fpatan */
5815 gen_helper_fpatan(cpu_env);
5816 break;
5817 case 4: /* fxtract */
5818 gen_helper_fxtract(cpu_env);
5819 break;
5820 case 5: /* fprem1 */
5821 gen_helper_fprem1(cpu_env);
5822 break;
5823 case 6: /* fdecstp */
5824 gen_helper_fdecstp(cpu_env);
5825 break;
5826 default:
5827 case 7: /* fincstp */
5828 gen_helper_fincstp(cpu_env);
5829 break;
5831 break;
5832 case 0x0f: /* grp d9/7 */
5833 switch(rm) {
5834 case 0: /* fprem */
5835 gen_helper_fprem(cpu_env);
5836 break;
5837 case 1: /* fyl2xp1 */
5838 gen_helper_fyl2xp1(cpu_env);
5839 break;
5840 case 2: /* fsqrt */
5841 gen_helper_fsqrt(cpu_env);
5842 break;
5843 case 3: /* fsincos */
5844 gen_helper_fsincos(cpu_env);
5845 break;
5846 case 5: /* fscale */
5847 gen_helper_fscale(cpu_env);
5848 break;
5849 case 4: /* frndint */
5850 gen_helper_frndint(cpu_env);
5851 break;
5852 case 6: /* fsin */
5853 gen_helper_fsin(cpu_env);
5854 break;
5855 default:
5856 case 7: /* fcos */
5857 gen_helper_fcos(cpu_env);
5858 break;
5860 break;
5861 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
5862 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
5863 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
5865 int op1;
5867 op1 = op & 7;
5868 if (op >= 0x20) {
5869 gen_helper_fp_arith_STN_ST0(op1, opreg);
5870 if (op >= 0x30)
5871 gen_helper_fpop(cpu_env);
5872 } else {
5873 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
5874 gen_helper_fp_arith_ST0_FT0(op1);
5877 break;
5878 case 0x02: /* fcom */
5879 case 0x22: /* fcom2, undocumented op */
5880 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
5881 gen_helper_fcom_ST0_FT0(cpu_env);
5882 break;
5883 case 0x03: /* fcomp */
5884 case 0x23: /* fcomp3, undocumented op */
5885 case 0x32: /* fcomp5, undocumented op */
5886 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
5887 gen_helper_fcom_ST0_FT0(cpu_env);
5888 gen_helper_fpop(cpu_env);
5889 break;
5890 case 0x15: /* da/5 */
5891 switch(rm) {
5892 case 1: /* fucompp */
5893 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
5894 gen_helper_fucom_ST0_FT0(cpu_env);
5895 gen_helper_fpop(cpu_env);
5896 gen_helper_fpop(cpu_env);
5897 break;
5898 default:
5899 goto illegal_op;
5901 break;
5902 case 0x1c:
5903 switch(rm) {
5904 case 0: /* feni (287 only, just do nop here) */
5905 break;
5906 case 1: /* fdisi (287 only, just do nop here) */
5907 break;
5908 case 2: /* fclex */
5909 gen_helper_fclex(cpu_env);
5910 break;
5911 case 3: /* fninit */
5912 gen_helper_fninit(cpu_env);
5913 break;
5914 case 4: /* fsetpm (287 only, just do nop here) */
5915 break;
5916 default:
5917 goto illegal_op;
5919 break;
5920 case 0x1d: /* fucomi */
5921 gen_update_cc_op(s);
5922 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
5923 gen_helper_fucomi_ST0_FT0(cpu_env);
5924 set_cc_op(s, CC_OP_EFLAGS);
5925 break;
5926 case 0x1e: /* fcomi */
5927 gen_update_cc_op(s);
5928 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
5929 gen_helper_fcomi_ST0_FT0(cpu_env);
5930 set_cc_op(s, CC_OP_EFLAGS);
5931 break;
5932 case 0x28: /* ffree sti */
5933 gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
5934 break;
5935 case 0x2a: /* fst sti */
5936 gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
5937 break;
5938 case 0x2b: /* fstp sti */
5939 case 0x0b: /* fstp1 sti, undocumented op */
5940 case 0x3a: /* fstp8 sti, undocumented op */
5941 case 0x3b: /* fstp9 sti, undocumented op */
5942 gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
5943 gen_helper_fpop(cpu_env);
5944 break;
5945 case 0x2c: /* fucom st(i) */
5946 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
5947 gen_helper_fucom_ST0_FT0(cpu_env);
5948 break;
5949 case 0x2d: /* fucomp st(i) */
5950 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
5951 gen_helper_fucom_ST0_FT0(cpu_env);
5952 gen_helper_fpop(cpu_env);
5953 break;
5954 case 0x33: /* de/3 */
5955 switch(rm) {
5956 case 1: /* fcompp */
5957 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
5958 gen_helper_fcom_ST0_FT0(cpu_env);
5959 gen_helper_fpop(cpu_env);
5960 gen_helper_fpop(cpu_env);
5961 break;
5962 default:
5963 goto illegal_op;
5965 break;
5966 case 0x38: /* ffreep sti, undocumented op */
5967 gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
5968 gen_helper_fpop(cpu_env);
5969 break;
5970 case 0x3c: /* df/4 */
5971 switch(rm) {
5972 case 0:
5973 gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
5974 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5975 gen_op_mov_reg_T0(OT_WORD, R_EAX);
5976 break;
5977 default:
5978 goto illegal_op;
5980 break;
5981 case 0x3d: /* fucomip */
5982 gen_update_cc_op(s);
5983 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
5984 gen_helper_fucomi_ST0_FT0(cpu_env);
5985 gen_helper_fpop(cpu_env);
5986 set_cc_op(s, CC_OP_EFLAGS);
5987 break;
5988 case 0x3e: /* fcomip */
5989 gen_update_cc_op(s);
5990 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
5991 gen_helper_fcomi_ST0_FT0(cpu_env);
5992 gen_helper_fpop(cpu_env);
5993 set_cc_op(s, CC_OP_EFLAGS);
5994 break;
5995 case 0x10 ... 0x13: /* fcmovxx */
5996 case 0x18 ... 0x1b:
5998 int op1, l1;
5999 static const uint8_t fcmov_cc[8] = {
6000 (JCC_B << 1),
6001 (JCC_Z << 1),
6002 (JCC_BE << 1),
6003 (JCC_P << 1),
6005 op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
6006 l1 = gen_new_label();
6007 gen_jcc1(s, op1, l1);
6008 gen_helper_fmov_ST0_STN(cpu_env, tcg_const_i32(opreg));
6009 gen_set_label(l1);
6011 break;
6012 default:
6013 goto illegal_op;
6016 break;
6017 /************************/
6018 /* string ops */
6020 case 0xa4: /* movsS */
6021 case 0xa5:
6022 if ((b & 1) == 0)
6023 ot = OT_BYTE;
6024 else
6025 ot = dflag + OT_WORD;
6027 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6028 gen_repz_movs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6029 } else {
6030 gen_movs(s, ot);
6032 break;
6034 case 0xaa: /* stosS */
6035 case 0xab:
6036 if ((b & 1) == 0)
6037 ot = OT_BYTE;
6038 else
6039 ot = dflag + OT_WORD;
6041 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6042 gen_repz_stos(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6043 } else {
6044 gen_stos(s, ot);
6046 break;
6047 case 0xac: /* lodsS */
6048 case 0xad:
6049 if ((b & 1) == 0)
6050 ot = OT_BYTE;
6051 else
6052 ot = dflag + OT_WORD;
6053 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6054 gen_repz_lods(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6055 } else {
6056 gen_lods(s, ot);
6058 break;
6059 case 0xae: /* scasS */
6060 case 0xaf:
6061 if ((b & 1) == 0)
6062 ot = OT_BYTE;
6063 else
6064 ot = dflag + OT_WORD;
6065 if (prefixes & PREFIX_REPNZ) {
6066 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6067 } else if (prefixes & PREFIX_REPZ) {
6068 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6069 } else {
6070 gen_scas(s, ot);
6072 break;
6074 case 0xa6: /* cmpsS */
6075 case 0xa7:
6076 if ((b & 1) == 0)
6077 ot = OT_BYTE;
6078 else
6079 ot = dflag + OT_WORD;
6080 if (prefixes & PREFIX_REPNZ) {
6081 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6082 } else if (prefixes & PREFIX_REPZ) {
6083 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6084 } else {
6085 gen_cmps(s, ot);
6087 break;
6088 case 0x6c: /* insS */
6089 case 0x6d:
6090 if ((b & 1) == 0)
6091 ot = OT_BYTE;
6092 else
6093 ot = dflag ? OT_LONG : OT_WORD;
6094 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
6095 gen_op_andl_T0_ffff();
6096 gen_check_io(s, ot, pc_start - s->cs_base,
6097 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4);
6098 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6099 gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6100 } else {
6101 gen_ins(s, ot);
6102 if (use_icount) {
6103 gen_jmp(s, s->pc - s->cs_base);
6106 break;
6107 case 0x6e: /* outsS */
6108 case 0x6f:
6109 if ((b & 1) == 0)
6110 ot = OT_BYTE;
6111 else
6112 ot = dflag ? OT_LONG : OT_WORD;
6113 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
6114 gen_op_andl_T0_ffff();
6115 gen_check_io(s, ot, pc_start - s->cs_base,
6116 svm_is_rep(prefixes) | 4);
6117 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6118 gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6119 } else {
6120 gen_outs(s, ot);
6121 if (use_icount) {
6122 gen_jmp(s, s->pc - s->cs_base);
6125 break;
6127 /************************/
6128 /* port I/O */
6130 case 0xe4:
6131 case 0xe5:
6132 if ((b & 1) == 0)
6133 ot = OT_BYTE;
6134 else
6135 ot = dflag ? OT_LONG : OT_WORD;
6136 val = cpu_ldub_code(env, s->pc++);
6137 gen_op_movl_T0_im(val);
6138 gen_check_io(s, ot, pc_start - s->cs_base,
6139 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
6140 if (use_icount)
6141 gen_io_start();
6142 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6143 gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
6144 gen_op_mov_reg_T1(ot, R_EAX);
6145 if (use_icount) {
6146 gen_io_end();
6147 gen_jmp(s, s->pc - s->cs_base);
6149 break;
6150 case 0xe6:
6151 case 0xe7:
6152 if ((b & 1) == 0)
6153 ot = OT_BYTE;
6154 else
6155 ot = dflag ? OT_LONG : OT_WORD;
6156 val = cpu_ldub_code(env, s->pc++);
6157 gen_op_movl_T0_im(val);
6158 gen_check_io(s, ot, pc_start - s->cs_base,
6159 svm_is_rep(prefixes));
6160 gen_op_mov_TN_reg(ot, 1, R_EAX);
6162 if (use_icount)
6163 gen_io_start();
6164 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6165 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
6166 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
6167 if (use_icount) {
6168 gen_io_end();
6169 gen_jmp(s, s->pc - s->cs_base);
6171 break;
6172 case 0xec:
6173 case 0xed:
6174 if ((b & 1) == 0)
6175 ot = OT_BYTE;
6176 else
6177 ot = dflag ? OT_LONG : OT_WORD;
6178 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
6179 gen_op_andl_T0_ffff();
6180 gen_check_io(s, ot, pc_start - s->cs_base,
6181 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
6182 if (use_icount)
6183 gen_io_start();
6184 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6185 gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
6186 gen_op_mov_reg_T1(ot, R_EAX);
6187 if (use_icount) {
6188 gen_io_end();
6189 gen_jmp(s, s->pc - s->cs_base);
6191 break;
6192 case 0xee:
6193 case 0xef:
6194 if ((b & 1) == 0)
6195 ot = OT_BYTE;
6196 else
6197 ot = dflag ? OT_LONG : OT_WORD;
6198 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
6199 gen_op_andl_T0_ffff();
6200 gen_check_io(s, ot, pc_start - s->cs_base,
6201 svm_is_rep(prefixes));
6202 gen_op_mov_TN_reg(ot, 1, R_EAX);
6204 if (use_icount)
6205 gen_io_start();
6206 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6207 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
6208 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
6209 if (use_icount) {
6210 gen_io_end();
6211 gen_jmp(s, s->pc - s->cs_base);
6213 break;
6215 /************************/
6216 /* control */
6217 case 0xc2: /* ret im */
6218 val = cpu_ldsw_code(env, s->pc);
6219 s->pc += 2;
6220 gen_pop_T0(s);
6221 if (CODE64(s) && s->dflag)
6222 s->dflag = 2;
6223 gen_stack_update(s, val + (2 << s->dflag));
6224 if (s->dflag == 0)
6225 gen_op_andl_T0_ffff();
6226 gen_op_jmp_T0();
6227 gen_eob(s);
6228 break;
6229 case 0xc3: /* ret */
6230 gen_pop_T0(s);
6231 gen_pop_update(s);
6232 if (s->dflag == 0)
6233 gen_op_andl_T0_ffff();
6234 gen_op_jmp_T0();
6235 gen_eob(s);
6236 break;
6237 case 0xca: /* lret im */
6238 val = cpu_ldsw_code(env, s->pc);
6239 s->pc += 2;
6240 do_lret:
6241 if (s->pe && !s->vm86) {
6242 gen_update_cc_op(s);
6243 gen_jmp_im(pc_start - s->cs_base);
6244 gen_helper_lret_protected(cpu_env, tcg_const_i32(s->dflag),
6245 tcg_const_i32(val));
6246 } else {
6247 gen_stack_A0(s);
6248 /* pop offset */
6249 gen_op_ld_T0_A0(1 + s->dflag + s->mem_index);
6250 if (s->dflag == 0)
6251 gen_op_andl_T0_ffff();
6252 /* NOTE: keeping EIP updated is not a problem in case of
6253 exception */
6254 gen_op_jmp_T0();
6255 /* pop selector */
6256 gen_op_addl_A0_im(2 << s->dflag);
6257 gen_op_ld_T0_A0(1 + s->dflag + s->mem_index);
6258 gen_op_movl_seg_T0_vm(R_CS);
6259 /* add stack offset */
6260 gen_stack_update(s, val + (4 << s->dflag));
6262 gen_eob(s);
6263 break;
6264 case 0xcb: /* lret */
6265 val = 0;
6266 goto do_lret;
6267 case 0xcf: /* iret */
6268 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET);
6269 if (!s->pe) {
6270 /* real mode */
6271 gen_helper_iret_real(cpu_env, tcg_const_i32(s->dflag));
6272 set_cc_op(s, CC_OP_EFLAGS);
6273 } else if (s->vm86) {
6274 if (s->iopl != 3) {
6275 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6276 } else {
6277 gen_helper_iret_real(cpu_env, tcg_const_i32(s->dflag));
6278 set_cc_op(s, CC_OP_EFLAGS);
6280 } else {
6281 gen_update_cc_op(s);
6282 gen_jmp_im(pc_start - s->cs_base);
6283 gen_helper_iret_protected(cpu_env, tcg_const_i32(s->dflag),
6284 tcg_const_i32(s->pc - s->cs_base));
6285 set_cc_op(s, CC_OP_EFLAGS);
6287 gen_eob(s);
6288 break;
6289 case 0xe8: /* call im */
6291 if (dflag)
6292 tval = (int32_t)insn_get(env, s, OT_LONG);
6293 else
6294 tval = (int16_t)insn_get(env, s, OT_WORD);
6295 next_eip = s->pc - s->cs_base;
6296 tval += next_eip;
6297 if (s->dflag == 0)
6298 tval &= 0xffff;
6299 else if(!CODE64(s))
6300 tval &= 0xffffffff;
6301 gen_movtl_T0_im(next_eip);
6302 gen_push_T0(s);
6303 gen_jmp(s, tval);
6305 break;
6306 case 0x9a: /* lcall im */
6308 unsigned int selector, offset;
6310 if (CODE64(s))
6311 goto illegal_op;
6312 ot = dflag ? OT_LONG : OT_WORD;
6313 offset = insn_get(env, s, ot);
6314 selector = insn_get(env, s, OT_WORD);
6316 gen_op_movl_T0_im(selector);
6317 gen_op_movl_T1_imu(offset);
6319 goto do_lcall;
6320 case 0xe9: /* jmp im */
6321 if (dflag)
6322 tval = (int32_t)insn_get(env, s, OT_LONG);
6323 else
6324 tval = (int16_t)insn_get(env, s, OT_WORD);
6325 tval += s->pc - s->cs_base;
6326 if (s->dflag == 0)
6327 tval &= 0xffff;
6328 else if(!CODE64(s))
6329 tval &= 0xffffffff;
6330 gen_jmp(s, tval);
6331 break;
6332 case 0xea: /* ljmp im */
6334 unsigned int selector, offset;
6336 if (CODE64(s))
6337 goto illegal_op;
6338 ot = dflag ? OT_LONG : OT_WORD;
6339 offset = insn_get(env, s, ot);
6340 selector = insn_get(env, s, OT_WORD);
6342 gen_op_movl_T0_im(selector);
6343 gen_op_movl_T1_imu(offset);
6345 goto do_ljmp;
6346 case 0xeb: /* jmp Jb */
6347 tval = (int8_t)insn_get(env, s, OT_BYTE);
6348 tval += s->pc - s->cs_base;
6349 if (s->dflag == 0)
6350 tval &= 0xffff;
6351 gen_jmp(s, tval);
6352 break;
6353 case 0x70 ... 0x7f: /* jcc Jb */
6354 tval = (int8_t)insn_get(env, s, OT_BYTE);
6355 goto do_jcc;
6356 case 0x180 ... 0x18f: /* jcc Jv */
6357 if (dflag) {
6358 tval = (int32_t)insn_get(env, s, OT_LONG);
6359 } else {
6360 tval = (int16_t)insn_get(env, s, OT_WORD);
6362 do_jcc:
6363 next_eip = s->pc - s->cs_base;
6364 tval += next_eip;
6365 if (s->dflag == 0)
6366 tval &= 0xffff;
6367 gen_jcc(s, b, tval, next_eip);
6368 break;
6370 case 0x190 ... 0x19f: /* setcc Gv */
6371 modrm = cpu_ldub_code(env, s->pc++);
6372 gen_setcc(s, b);
6373 gen_ldst_modrm(env, s, modrm, OT_BYTE, OR_TMP0, 1);
6374 break;
6375 case 0x140 ... 0x14f: /* cmov Gv, Ev */
6377 int l1;
6378 TCGv t0;
6380 ot = dflag + OT_WORD;
6381 modrm = cpu_ldub_code(env, s->pc++);
6382 reg = ((modrm >> 3) & 7) | rex_r;
6383 mod = (modrm >> 6) & 3;
6384 t0 = tcg_temp_local_new();
6385 if (mod != 3) {
6386 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
6387 gen_op_ld_v(ot + s->mem_index, t0, cpu_A0);
6388 } else {
6389 rm = (modrm & 7) | REX_B(s);
6390 gen_op_mov_v_reg(ot, t0, rm);
6392 #ifdef TARGET_X86_64
6393 if (ot == OT_LONG) {
6394 /* XXX: specific Intel behaviour ? */
6395 l1 = gen_new_label();
6396 gen_jcc1(s, b ^ 1, l1);
6397 tcg_gen_mov_tl(cpu_regs[reg], t0);
6398 gen_set_label(l1);
6399 tcg_gen_ext32u_tl(cpu_regs[reg], cpu_regs[reg]);
6400 } else
6401 #endif
6403 l1 = gen_new_label();
6404 gen_jcc1(s, b ^ 1, l1);
6405 gen_op_mov_reg_v(ot, reg, t0);
6406 gen_set_label(l1);
6408 tcg_temp_free(t0);
6410 break;
6412 /************************/
6413 /* flags */
6414 case 0x9c: /* pushf */
6415 gen_svm_check_intercept(s, pc_start, SVM_EXIT_PUSHF);
6416 if (s->vm86 && s->iopl != 3) {
6417 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6418 } else {
6419 gen_update_cc_op(s);
6420 gen_helper_read_eflags(cpu_T[0], cpu_env);
6421 gen_push_T0(s);
6423 break;
6424 case 0x9d: /* popf */
6425 gen_svm_check_intercept(s, pc_start, SVM_EXIT_POPF);
6426 if (s->vm86 && s->iopl != 3) {
6427 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6428 } else {
6429 gen_pop_T0(s);
6430 if (s->cpl == 0) {
6431 if (s->dflag) {
6432 gen_helper_write_eflags(cpu_env, cpu_T[0],
6433 tcg_const_i32((TF_MASK | AC_MASK |
6434 ID_MASK | NT_MASK |
6435 IF_MASK |
6436 IOPL_MASK)));
6437 } else {
6438 gen_helper_write_eflags(cpu_env, cpu_T[0],
6439 tcg_const_i32((TF_MASK | AC_MASK |
6440 ID_MASK | NT_MASK |
6441 IF_MASK | IOPL_MASK)
6442 & 0xffff));
6444 } else {
6445 if (s->cpl <= s->iopl) {
6446 if (s->dflag) {
6447 gen_helper_write_eflags(cpu_env, cpu_T[0],
6448 tcg_const_i32((TF_MASK |
6449 AC_MASK |
6450 ID_MASK |
6451 NT_MASK |
6452 IF_MASK)));
6453 } else {
6454 gen_helper_write_eflags(cpu_env, cpu_T[0],
6455 tcg_const_i32((TF_MASK |
6456 AC_MASK |
6457 ID_MASK |
6458 NT_MASK |
6459 IF_MASK)
6460 & 0xffff));
6462 } else {
6463 if (s->dflag) {
6464 gen_helper_write_eflags(cpu_env, cpu_T[0],
6465 tcg_const_i32((TF_MASK | AC_MASK |
6466 ID_MASK | NT_MASK)));
6467 } else {
6468 gen_helper_write_eflags(cpu_env, cpu_T[0],
6469 tcg_const_i32((TF_MASK | AC_MASK |
6470 ID_MASK | NT_MASK)
6471 & 0xffff));
6475 gen_pop_update(s);
6476 set_cc_op(s, CC_OP_EFLAGS);
6477 /* abort translation because TF/AC flag may change */
6478 gen_jmp_im(s->pc - s->cs_base);
6479 gen_eob(s);
6481 break;
6482 case 0x9e: /* sahf */
6483 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
6484 goto illegal_op;
6485 gen_op_mov_TN_reg(OT_BYTE, 0, R_AH);
6486 gen_compute_eflags(s, cpu_cc_src);
6487 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O);
6488 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], CC_S | CC_Z | CC_A | CC_P | CC_C);
6489 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T[0]);
6490 break;
6491 case 0x9f: /* lahf */
6492 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
6493 goto illegal_op;
6494 gen_compute_eflags(s, cpu_T[0]);
6495 /* Note: gen_compute_eflags() only gives the condition codes */
6496 tcg_gen_ori_tl(cpu_T[0], cpu_T[0], 0x02);
6497 gen_op_mov_reg_T0(OT_BYTE, R_AH);
6498 break;
6499 case 0xf5: /* cmc */
6500 gen_compute_eflags(s, cpu_cc_src);
6501 tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C);
6502 break;
6503 case 0xf8: /* clc */
6504 gen_compute_eflags(s, cpu_cc_src);
6505 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C);
6506 break;
6507 case 0xf9: /* stc */
6508 gen_compute_eflags(s, cpu_cc_src);
6509 tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C);
6510 break;
6511 case 0xfc: /* cld */
6512 tcg_gen_movi_i32(cpu_tmp2_i32, 1);
6513 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
6514 break;
6515 case 0xfd: /* std */
6516 tcg_gen_movi_i32(cpu_tmp2_i32, -1);
6517 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
6518 break;
6520 /************************/
6521 /* bit operations */
6522 case 0x1ba: /* bt/bts/btr/btc Gv, im */
6523 ot = dflag + OT_WORD;
6524 modrm = cpu_ldub_code(env, s->pc++);
6525 op = (modrm >> 3) & 7;
6526 mod = (modrm >> 6) & 3;
6527 rm = (modrm & 7) | REX_B(s);
6528 if (mod != 3) {
6529 s->rip_offset = 1;
6530 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
6531 gen_op_ld_T0_A0(ot + s->mem_index);
6532 } else {
6533 gen_op_mov_TN_reg(ot, 0, rm);
6535 /* load shift */
6536 val = cpu_ldub_code(env, s->pc++);
6537 gen_op_movl_T1_im(val);
6538 if (op < 4)
6539 goto illegal_op;
6540 op -= 4;
6541 goto bt_op;
6542 case 0x1a3: /* bt Gv, Ev */
6543 op = 0;
6544 goto do_btx;
6545 case 0x1ab: /* bts */
6546 op = 1;
6547 goto do_btx;
6548 case 0x1b3: /* btr */
6549 op = 2;
6550 goto do_btx;
6551 case 0x1bb: /* btc */
6552 op = 3;
6553 do_btx:
6554 ot = dflag + OT_WORD;
6555 modrm = cpu_ldub_code(env, s->pc++);
6556 reg = ((modrm >> 3) & 7) | rex_r;
6557 mod = (modrm >> 6) & 3;
6558 rm = (modrm & 7) | REX_B(s);
6559 gen_op_mov_TN_reg(OT_LONG, 1, reg);
6560 if (mod != 3) {
6561 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
6562 /* specific case: we need to add a displacement */
6563 gen_exts(ot, cpu_T[1]);
6564 tcg_gen_sari_tl(cpu_tmp0, cpu_T[1], 3 + ot);
6565 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, ot);
6566 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
6567 gen_op_ld_T0_A0(ot + s->mem_index);
6568 } else {
6569 gen_op_mov_TN_reg(ot, 0, rm);
6571 bt_op:
6572 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], (1 << (3 + ot)) - 1);
6573 switch(op) {
6574 case 0:
6575 tcg_gen_shr_tl(cpu_cc_src, cpu_T[0], cpu_T[1]);
6576 tcg_gen_movi_tl(cpu_cc_dst, 0);
6577 break;
6578 case 1:
6579 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6580 tcg_gen_movi_tl(cpu_tmp0, 1);
6581 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6582 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6583 break;
6584 case 2:
6585 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6586 tcg_gen_movi_tl(cpu_tmp0, 1);
6587 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6588 tcg_gen_not_tl(cpu_tmp0, cpu_tmp0);
6589 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6590 break;
6591 default:
6592 case 3:
6593 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6594 tcg_gen_movi_tl(cpu_tmp0, 1);
6595 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6596 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6597 break;
6599 set_cc_op(s, CC_OP_SARB + ot);
6600 if (op != 0) {
6601 if (mod != 3)
6602 gen_op_st_T0_A0(ot + s->mem_index);
6603 else
6604 gen_op_mov_reg_T0(ot, rm);
6605 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
6606 tcg_gen_movi_tl(cpu_cc_dst, 0);
6608 break;
6609 case 0x1bc: /* bsf */
6610 case 0x1bd: /* bsr */
6612 int label1;
6613 TCGv t0;
6615 ot = dflag + OT_WORD;
6616 modrm = cpu_ldub_code(env, s->pc++);
6617 reg = ((modrm >> 3) & 7) | rex_r;
6618 gen_ldst_modrm(env, s,modrm, ot, OR_TMP0, 0);
6619 gen_extu(ot, cpu_T[0]);
6620 t0 = tcg_temp_local_new();
6621 tcg_gen_mov_tl(t0, cpu_T[0]);
6622 if ((b & 1) && (prefixes & PREFIX_REPZ) &&
6623 (s->cpuid_ext3_features & CPUID_EXT3_ABM)) {
6624 switch(ot) {
6625 case OT_WORD: gen_helper_lzcnt(cpu_T[0], t0,
6626 tcg_const_i32(16)); break;
6627 case OT_LONG: gen_helper_lzcnt(cpu_T[0], t0,
6628 tcg_const_i32(32)); break;
6629 case OT_QUAD: gen_helper_lzcnt(cpu_T[0], t0,
6630 tcg_const_i32(64)); break;
6632 gen_op_mov_reg_T0(ot, reg);
6633 } else {
6634 label1 = gen_new_label();
6635 tcg_gen_movi_tl(cpu_cc_dst, 0);
6636 tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, label1);
6637 if (b & 1) {
6638 gen_helper_bsr(cpu_T[0], t0);
6639 } else {
6640 gen_helper_bsf(cpu_T[0], t0);
6642 gen_op_mov_reg_T0(ot, reg);
6643 tcg_gen_movi_tl(cpu_cc_dst, 1);
6644 gen_set_label(label1);
6645 tcg_gen_discard_tl(cpu_cc_src);
6646 set_cc_op(s, CC_OP_LOGICB + ot);
6648 tcg_temp_free(t0);
6650 break;
6651 /************************/
6652 /* bcd */
6653 case 0x27: /* daa */
6654 if (CODE64(s))
6655 goto illegal_op;
6656 gen_update_cc_op(s);
6657 gen_helper_daa(cpu_env);
6658 set_cc_op(s, CC_OP_EFLAGS);
6659 break;
6660 case 0x2f: /* das */
6661 if (CODE64(s))
6662 goto illegal_op;
6663 gen_update_cc_op(s);
6664 gen_helper_das(cpu_env);
6665 set_cc_op(s, CC_OP_EFLAGS);
6666 break;
6667 case 0x37: /* aaa */
6668 if (CODE64(s))
6669 goto illegal_op;
6670 gen_update_cc_op(s);
6671 gen_helper_aaa(cpu_env);
6672 set_cc_op(s, CC_OP_EFLAGS);
6673 break;
6674 case 0x3f: /* aas */
6675 if (CODE64(s))
6676 goto illegal_op;
6677 gen_update_cc_op(s);
6678 gen_helper_aas(cpu_env);
6679 set_cc_op(s, CC_OP_EFLAGS);
6680 break;
6681 case 0xd4: /* aam */
6682 if (CODE64(s))
6683 goto illegal_op;
6684 val = cpu_ldub_code(env, s->pc++);
6685 if (val == 0) {
6686 gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base);
6687 } else {
6688 gen_helper_aam(cpu_env, tcg_const_i32(val));
6689 set_cc_op(s, CC_OP_LOGICB);
6691 break;
6692 case 0xd5: /* aad */
6693 if (CODE64(s))
6694 goto illegal_op;
6695 val = cpu_ldub_code(env, s->pc++);
6696 gen_helper_aad(cpu_env, tcg_const_i32(val));
6697 set_cc_op(s, CC_OP_LOGICB);
6698 break;
6699 /************************/
6700 /* misc */
6701 case 0x90: /* nop */
6702 /* XXX: correct lock test for all insn */
6703 if (prefixes & PREFIX_LOCK) {
6704 goto illegal_op;
6706 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
6707 if (REX_B(s)) {
6708 goto do_xchg_reg_eax;
6710 if (prefixes & PREFIX_REPZ) {
6711 gen_svm_check_intercept(s, pc_start, SVM_EXIT_PAUSE);
6713 break;
6714 case 0x9b: /* fwait */
6715 if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) ==
6716 (HF_MP_MASK | HF_TS_MASK)) {
6717 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
6718 } else {
6719 gen_update_cc_op(s);
6720 gen_jmp_im(pc_start - s->cs_base);
6721 gen_helper_fwait(cpu_env);
6723 break;
6724 case 0xcc: /* int3 */
6725 gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base);
6726 break;
6727 case 0xcd: /* int N */
6728 val = cpu_ldub_code(env, s->pc++);
6729 if (s->vm86 && s->iopl != 3) {
6730 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6731 } else {
6732 gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base);
6734 break;
6735 case 0xce: /* into */
6736 if (CODE64(s))
6737 goto illegal_op;
6738 gen_update_cc_op(s);
6739 gen_jmp_im(pc_start - s->cs_base);
6740 gen_helper_into(cpu_env, tcg_const_i32(s->pc - pc_start));
6741 break;
6742 #ifdef WANT_ICEBP
6743 case 0xf1: /* icebp (undocumented, exits to external debugger) */
6744 gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP);
6745 #if 1
6746 gen_debug(s, pc_start - s->cs_base);
6747 #else
6748 /* start debug */
6749 tb_flush(env);
6750 qemu_set_log(CPU_LOG_INT | CPU_LOG_TB_IN_ASM);
6751 #endif
6752 break;
6753 #endif
6754 case 0xfa: /* cli */
6755 if (!s->vm86) {
6756 if (s->cpl <= s->iopl) {
6757 gen_helper_cli(cpu_env);
6758 } else {
6759 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6761 } else {
6762 if (s->iopl == 3) {
6763 gen_helper_cli(cpu_env);
6764 } else {
6765 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6768 break;
6769 case 0xfb: /* sti */
6770 if (!s->vm86) {
6771 if (s->cpl <= s->iopl) {
6772 gen_sti:
6773 gen_helper_sti(cpu_env);
6774 /* interruptions are enabled only the first insn after sti */
6775 /* If several instructions disable interrupts, only the
6776 _first_ does it */
6777 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
6778 gen_helper_set_inhibit_irq(cpu_env);
6779 /* give a chance to handle pending irqs */
6780 gen_jmp_im(s->pc - s->cs_base);
6781 gen_eob(s);
6782 } else {
6783 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6785 } else {
6786 if (s->iopl == 3) {
6787 goto gen_sti;
6788 } else {
6789 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6792 break;
6793 case 0x62: /* bound */
6794 if (CODE64(s))
6795 goto illegal_op;
6796 ot = dflag ? OT_LONG : OT_WORD;
6797 modrm = cpu_ldub_code(env, s->pc++);
6798 reg = (modrm >> 3) & 7;
6799 mod = (modrm >> 6) & 3;
6800 if (mod == 3)
6801 goto illegal_op;
6802 gen_op_mov_TN_reg(ot, 0, reg);
6803 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
6804 gen_jmp_im(pc_start - s->cs_base);
6805 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6806 if (ot == OT_WORD) {
6807 gen_helper_boundw(cpu_env, cpu_A0, cpu_tmp2_i32);
6808 } else {
6809 gen_helper_boundl(cpu_env, cpu_A0, cpu_tmp2_i32);
6811 break;
6812 case 0x1c8 ... 0x1cf: /* bswap reg */
6813 reg = (b & 7) | REX_B(s);
6814 #ifdef TARGET_X86_64
6815 if (dflag == 2) {
6816 gen_op_mov_TN_reg(OT_QUAD, 0, reg);
6817 tcg_gen_bswap64_i64(cpu_T[0], cpu_T[0]);
6818 gen_op_mov_reg_T0(OT_QUAD, reg);
6819 } else
6820 #endif
6822 gen_op_mov_TN_reg(OT_LONG, 0, reg);
6823 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
6824 tcg_gen_bswap32_tl(cpu_T[0], cpu_T[0]);
6825 gen_op_mov_reg_T0(OT_LONG, reg);
6827 break;
6828 case 0xd6: /* salc */
6829 if (CODE64(s))
6830 goto illegal_op;
6831 gen_compute_eflags_c(s, cpu_T[0]);
6832 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
6833 gen_op_mov_reg_T0(OT_BYTE, R_EAX);
6834 break;
6835 case 0xe0: /* loopnz */
6836 case 0xe1: /* loopz */
6837 case 0xe2: /* loop */
6838 case 0xe3: /* jecxz */
6840 int l1, l2, l3;
6842 tval = (int8_t)insn_get(env, s, OT_BYTE);
6843 next_eip = s->pc - s->cs_base;
6844 tval += next_eip;
6845 if (s->dflag == 0)
6846 tval &= 0xffff;
6848 l1 = gen_new_label();
6849 l2 = gen_new_label();
6850 l3 = gen_new_label();
6851 b &= 3;
6852 switch(b) {
6853 case 0: /* loopnz */
6854 case 1: /* loopz */
6855 gen_op_add_reg_im(s->aflag, R_ECX, -1);
6856 gen_op_jz_ecx(s->aflag, l3);
6857 gen_jcc1(s, (JCC_Z << 1) | (b ^ 1), l1);
6858 break;
6859 case 2: /* loop */
6860 gen_op_add_reg_im(s->aflag, R_ECX, -1);
6861 gen_op_jnz_ecx(s->aflag, l1);
6862 break;
6863 default:
6864 case 3: /* jcxz */
6865 gen_op_jz_ecx(s->aflag, l1);
6866 break;
6869 gen_set_label(l3);
6870 gen_jmp_im(next_eip);
6871 tcg_gen_br(l2);
6873 gen_set_label(l1);
6874 gen_jmp_im(tval);
6875 gen_set_label(l2);
6876 gen_eob(s);
6878 break;
6879 case 0x130: /* wrmsr */
6880 case 0x132: /* rdmsr */
6881 if (s->cpl != 0) {
6882 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6883 } else {
6884 gen_update_cc_op(s);
6885 gen_jmp_im(pc_start - s->cs_base);
6886 if (b & 2) {
6887 gen_helper_rdmsr(cpu_env);
6888 } else {
6889 gen_helper_wrmsr(cpu_env);
6892 break;
6893 case 0x131: /* rdtsc */
6894 gen_update_cc_op(s);
6895 gen_jmp_im(pc_start - s->cs_base);
6896 if (use_icount)
6897 gen_io_start();
6898 gen_helper_rdtsc(cpu_env);
6899 if (use_icount) {
6900 gen_io_end();
6901 gen_jmp(s, s->pc - s->cs_base);
6903 break;
6904 case 0x133: /* rdpmc */
6905 gen_update_cc_op(s);
6906 gen_jmp_im(pc_start - s->cs_base);
6907 gen_helper_rdpmc(cpu_env);
6908 break;
6909 case 0x134: /* sysenter */
6910 /* For Intel SYSENTER is valid on 64-bit */
6911 if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
6912 goto illegal_op;
6913 if (!s->pe) {
6914 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6915 } else {
6916 gen_update_cc_op(s);
6917 gen_jmp_im(pc_start - s->cs_base);
6918 gen_helper_sysenter(cpu_env);
6919 gen_eob(s);
6921 break;
6922 case 0x135: /* sysexit */
6923 /* For Intel SYSEXIT is valid on 64-bit */
6924 if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
6925 goto illegal_op;
6926 if (!s->pe) {
6927 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6928 } else {
6929 gen_update_cc_op(s);
6930 gen_jmp_im(pc_start - s->cs_base);
6931 gen_helper_sysexit(cpu_env, tcg_const_i32(dflag));
6932 gen_eob(s);
6934 break;
6935 #ifdef TARGET_X86_64
6936 case 0x105: /* syscall */
6937 /* XXX: is it usable in real mode ? */
6938 gen_update_cc_op(s);
6939 gen_jmp_im(pc_start - s->cs_base);
6940 gen_helper_syscall(cpu_env, tcg_const_i32(s->pc - pc_start));
6941 gen_eob(s);
6942 break;
6943 case 0x107: /* sysret */
6944 if (!s->pe) {
6945 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6946 } else {
6947 gen_update_cc_op(s);
6948 gen_jmp_im(pc_start - s->cs_base);
6949 gen_helper_sysret(cpu_env, tcg_const_i32(s->dflag));
6950 /* condition codes are modified only in long mode */
6951 if (s->lma) {
6952 set_cc_op(s, CC_OP_EFLAGS);
6954 gen_eob(s);
6956 break;
6957 #endif
6958 case 0x1a2: /* cpuid */
6959 gen_update_cc_op(s);
6960 gen_jmp_im(pc_start - s->cs_base);
6961 gen_helper_cpuid(cpu_env);
6962 break;
6963 case 0xf4: /* hlt */
6964 if (s->cpl != 0) {
6965 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6966 } else {
6967 gen_update_cc_op(s);
6968 gen_jmp_im(pc_start - s->cs_base);
6969 gen_helper_hlt(cpu_env, tcg_const_i32(s->pc - pc_start));
6970 s->is_jmp = DISAS_TB_JUMP;
6972 break;
6973 case 0x100:
6974 modrm = cpu_ldub_code(env, s->pc++);
6975 mod = (modrm >> 6) & 3;
6976 op = (modrm >> 3) & 7;
6977 switch(op) {
6978 case 0: /* sldt */
6979 if (!s->pe || s->vm86)
6980 goto illegal_op;
6981 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ);
6982 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,ldt.selector));
6983 ot = OT_WORD;
6984 if (mod == 3)
6985 ot += s->dflag;
6986 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
6987 break;
6988 case 2: /* lldt */
6989 if (!s->pe || s->vm86)
6990 goto illegal_op;
6991 if (s->cpl != 0) {
6992 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6993 } else {
6994 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE);
6995 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
6996 gen_jmp_im(pc_start - s->cs_base);
6997 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6998 gen_helper_lldt(cpu_env, cpu_tmp2_i32);
7000 break;
7001 case 1: /* str */
7002 if (!s->pe || s->vm86)
7003 goto illegal_op;
7004 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ);
7005 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,tr.selector));
7006 ot = OT_WORD;
7007 if (mod == 3)
7008 ot += s->dflag;
7009 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
7010 break;
7011 case 3: /* ltr */
7012 if (!s->pe || s->vm86)
7013 goto illegal_op;
7014 if (s->cpl != 0) {
7015 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7016 } else {
7017 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE);
7018 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
7019 gen_jmp_im(pc_start - s->cs_base);
7020 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
7021 gen_helper_ltr(cpu_env, cpu_tmp2_i32);
7023 break;
7024 case 4: /* verr */
7025 case 5: /* verw */
7026 if (!s->pe || s->vm86)
7027 goto illegal_op;
7028 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
7029 gen_update_cc_op(s);
7030 if (op == 4) {
7031 gen_helper_verr(cpu_env, cpu_T[0]);
7032 } else {
7033 gen_helper_verw(cpu_env, cpu_T[0]);
7035 set_cc_op(s, CC_OP_EFLAGS);
7036 break;
7037 default:
7038 goto illegal_op;
7040 break;
7041 case 0x101:
7042 modrm = cpu_ldub_code(env, s->pc++);
7043 mod = (modrm >> 6) & 3;
7044 op = (modrm >> 3) & 7;
7045 rm = modrm & 7;
7046 switch(op) {
7047 case 0: /* sgdt */
7048 if (mod == 3)
7049 goto illegal_op;
7050 gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ);
7051 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7052 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.limit));
7053 gen_op_st_T0_A0(OT_WORD + s->mem_index);
7054 gen_add_A0_im(s, 2);
7055 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.base));
7056 if (!s->dflag)
7057 gen_op_andl_T0_im(0xffffff);
7058 gen_op_st_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
7059 break;
7060 case 1:
7061 if (mod == 3) {
7062 switch (rm) {
7063 case 0: /* monitor */
7064 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
7065 s->cpl != 0)
7066 goto illegal_op;
7067 gen_update_cc_op(s);
7068 gen_jmp_im(pc_start - s->cs_base);
7069 #ifdef TARGET_X86_64
7070 if (s->aflag == 2) {
7071 gen_op_movq_A0_reg(R_EAX);
7072 } else
7073 #endif
7075 gen_op_movl_A0_reg(R_EAX);
7076 if (s->aflag == 0)
7077 gen_op_andl_A0_ffff();
7079 gen_add_A0_ds_seg(s);
7080 gen_helper_monitor(cpu_env, cpu_A0);
7081 break;
7082 case 1: /* mwait */
7083 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
7084 s->cpl != 0)
7085 goto illegal_op;
7086 gen_update_cc_op(s);
7087 gen_jmp_im(pc_start - s->cs_base);
7088 gen_helper_mwait(cpu_env, tcg_const_i32(s->pc - pc_start));
7089 gen_eob(s);
7090 break;
7091 case 2: /* clac */
7092 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) ||
7093 s->cpl != 0) {
7094 goto illegal_op;
7096 gen_helper_clac(cpu_env);
7097 gen_jmp_im(s->pc - s->cs_base);
7098 gen_eob(s);
7099 break;
7100 case 3: /* stac */
7101 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) ||
7102 s->cpl != 0) {
7103 goto illegal_op;
7105 gen_helper_stac(cpu_env);
7106 gen_jmp_im(s->pc - s->cs_base);
7107 gen_eob(s);
7108 break;
7109 default:
7110 goto illegal_op;
7112 } else { /* sidt */
7113 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ);
7114 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7115 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.limit));
7116 gen_op_st_T0_A0(OT_WORD + s->mem_index);
7117 gen_add_A0_im(s, 2);
7118 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.base));
7119 if (!s->dflag)
7120 gen_op_andl_T0_im(0xffffff);
7121 gen_op_st_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
7123 break;
7124 case 2: /* lgdt */
7125 case 3: /* lidt */
7126 if (mod == 3) {
7127 gen_update_cc_op(s);
7128 gen_jmp_im(pc_start - s->cs_base);
7129 switch(rm) {
7130 case 0: /* VMRUN */
7131 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7132 goto illegal_op;
7133 if (s->cpl != 0) {
7134 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7135 break;
7136 } else {
7137 gen_helper_vmrun(cpu_env, tcg_const_i32(s->aflag),
7138 tcg_const_i32(s->pc - pc_start));
7139 tcg_gen_exit_tb(0);
7140 s->is_jmp = DISAS_TB_JUMP;
7142 break;
7143 case 1: /* VMMCALL */
7144 if (!(s->flags & HF_SVME_MASK))
7145 goto illegal_op;
7146 gen_helper_vmmcall(cpu_env);
7147 break;
7148 case 2: /* VMLOAD */
7149 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7150 goto illegal_op;
7151 if (s->cpl != 0) {
7152 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7153 break;
7154 } else {
7155 gen_helper_vmload(cpu_env, tcg_const_i32(s->aflag));
7157 break;
7158 case 3: /* VMSAVE */
7159 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7160 goto illegal_op;
7161 if (s->cpl != 0) {
7162 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7163 break;
7164 } else {
7165 gen_helper_vmsave(cpu_env, tcg_const_i32(s->aflag));
7167 break;
7168 case 4: /* STGI */
7169 if ((!(s->flags & HF_SVME_MASK) &&
7170 !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
7171 !s->pe)
7172 goto illegal_op;
7173 if (s->cpl != 0) {
7174 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7175 break;
7176 } else {
7177 gen_helper_stgi(cpu_env);
7179 break;
7180 case 5: /* CLGI */
7181 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7182 goto illegal_op;
7183 if (s->cpl != 0) {
7184 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7185 break;
7186 } else {
7187 gen_helper_clgi(cpu_env);
7189 break;
7190 case 6: /* SKINIT */
7191 if ((!(s->flags & HF_SVME_MASK) &&
7192 !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
7193 !s->pe)
7194 goto illegal_op;
7195 gen_helper_skinit(cpu_env);
7196 break;
7197 case 7: /* INVLPGA */
7198 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7199 goto illegal_op;
7200 if (s->cpl != 0) {
7201 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7202 break;
7203 } else {
7204 gen_helper_invlpga(cpu_env, tcg_const_i32(s->aflag));
7206 break;
7207 default:
7208 goto illegal_op;
7210 } else if (s->cpl != 0) {
7211 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7212 } else {
7213 gen_svm_check_intercept(s, pc_start,
7214 op==2 ? SVM_EXIT_GDTR_WRITE : SVM_EXIT_IDTR_WRITE);
7215 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7216 gen_op_ld_T1_A0(OT_WORD + s->mem_index);
7217 gen_add_A0_im(s, 2);
7218 gen_op_ld_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
7219 if (!s->dflag)
7220 gen_op_andl_T0_im(0xffffff);
7221 if (op == 2) {
7222 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,gdt.base));
7223 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,gdt.limit));
7224 } else {
7225 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,idt.base));
7226 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,idt.limit));
7229 break;
7230 case 4: /* smsw */
7231 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0);
7232 #if defined TARGET_X86_64 && defined HOST_WORDS_BIGENDIAN
7233 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]) + 4);
7234 #else
7235 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]));
7236 #endif
7237 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 1);
7238 break;
7239 case 6: /* lmsw */
7240 if (s->cpl != 0) {
7241 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7242 } else {
7243 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
7244 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
7245 gen_helper_lmsw(cpu_env, cpu_T[0]);
7246 gen_jmp_im(s->pc - s->cs_base);
7247 gen_eob(s);
7249 break;
7250 case 7:
7251 if (mod != 3) { /* invlpg */
7252 if (s->cpl != 0) {
7253 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7254 } else {
7255 gen_update_cc_op(s);
7256 gen_jmp_im(pc_start - s->cs_base);
7257 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7258 gen_helper_invlpg(cpu_env, cpu_A0);
7259 gen_jmp_im(s->pc - s->cs_base);
7260 gen_eob(s);
7262 } else {
7263 switch (rm) {
7264 case 0: /* swapgs */
7265 #ifdef TARGET_X86_64
7266 if (CODE64(s)) {
7267 if (s->cpl != 0) {
7268 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7269 } else {
7270 tcg_gen_ld_tl(cpu_T[0], cpu_env,
7271 offsetof(CPUX86State,segs[R_GS].base));
7272 tcg_gen_ld_tl(cpu_T[1], cpu_env,
7273 offsetof(CPUX86State,kernelgsbase));
7274 tcg_gen_st_tl(cpu_T[1], cpu_env,
7275 offsetof(CPUX86State,segs[R_GS].base));
7276 tcg_gen_st_tl(cpu_T[0], cpu_env,
7277 offsetof(CPUX86State,kernelgsbase));
7279 } else
7280 #endif
7282 goto illegal_op;
7284 break;
7285 case 1: /* rdtscp */
7286 if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP))
7287 goto illegal_op;
7288 gen_update_cc_op(s);
7289 gen_jmp_im(pc_start - s->cs_base);
7290 if (use_icount)
7291 gen_io_start();
7292 gen_helper_rdtscp(cpu_env);
7293 if (use_icount) {
7294 gen_io_end();
7295 gen_jmp(s, s->pc - s->cs_base);
7297 break;
7298 default:
7299 goto illegal_op;
7302 break;
7303 default:
7304 goto illegal_op;
7306 break;
7307 case 0x108: /* invd */
7308 case 0x109: /* wbinvd */
7309 if (s->cpl != 0) {
7310 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7311 } else {
7312 gen_svm_check_intercept(s, pc_start, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD);
7313 /* nothing to do */
7315 break;
7316 case 0x63: /* arpl or movslS (x86_64) */
7317 #ifdef TARGET_X86_64
7318 if (CODE64(s)) {
7319 int d_ot;
7320 /* d_ot is the size of destination */
7321 d_ot = dflag + OT_WORD;
7323 modrm = cpu_ldub_code(env, s->pc++);
7324 reg = ((modrm >> 3) & 7) | rex_r;
7325 mod = (modrm >> 6) & 3;
7326 rm = (modrm & 7) | REX_B(s);
7328 if (mod == 3) {
7329 gen_op_mov_TN_reg(OT_LONG, 0, rm);
7330 /* sign extend */
7331 if (d_ot == OT_QUAD)
7332 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
7333 gen_op_mov_reg_T0(d_ot, reg);
7334 } else {
7335 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7336 if (d_ot == OT_QUAD) {
7337 gen_op_lds_T0_A0(OT_LONG + s->mem_index);
7338 } else {
7339 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
7341 gen_op_mov_reg_T0(d_ot, reg);
7343 } else
7344 #endif
7346 int label1;
7347 TCGv t0, t1, t2, a0;
7349 if (!s->pe || s->vm86)
7350 goto illegal_op;
7351 t0 = tcg_temp_local_new();
7352 t1 = tcg_temp_local_new();
7353 t2 = tcg_temp_local_new();
7354 ot = OT_WORD;
7355 modrm = cpu_ldub_code(env, s->pc++);
7356 reg = (modrm >> 3) & 7;
7357 mod = (modrm >> 6) & 3;
7358 rm = modrm & 7;
7359 if (mod != 3) {
7360 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7361 gen_op_ld_v(ot + s->mem_index, t0, cpu_A0);
7362 a0 = tcg_temp_local_new();
7363 tcg_gen_mov_tl(a0, cpu_A0);
7364 } else {
7365 gen_op_mov_v_reg(ot, t0, rm);
7366 TCGV_UNUSED(a0);
7368 gen_op_mov_v_reg(ot, t1, reg);
7369 tcg_gen_andi_tl(cpu_tmp0, t0, 3);
7370 tcg_gen_andi_tl(t1, t1, 3);
7371 tcg_gen_movi_tl(t2, 0);
7372 label1 = gen_new_label();
7373 tcg_gen_brcond_tl(TCG_COND_GE, cpu_tmp0, t1, label1);
7374 tcg_gen_andi_tl(t0, t0, ~3);
7375 tcg_gen_or_tl(t0, t0, t1);
7376 tcg_gen_movi_tl(t2, CC_Z);
7377 gen_set_label(label1);
7378 if (mod != 3) {
7379 gen_op_st_v(ot + s->mem_index, t0, a0);
7380 tcg_temp_free(a0);
7381 } else {
7382 gen_op_mov_reg_v(ot, rm, t0);
7384 gen_compute_eflags(s, cpu_cc_src);
7385 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z);
7386 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2);
7387 tcg_temp_free(t0);
7388 tcg_temp_free(t1);
7389 tcg_temp_free(t2);
7391 break;
7392 case 0x102: /* lar */
7393 case 0x103: /* lsl */
7395 int label1;
7396 TCGv t0;
7397 if (!s->pe || s->vm86)
7398 goto illegal_op;
7399 ot = dflag ? OT_LONG : OT_WORD;
7400 modrm = cpu_ldub_code(env, s->pc++);
7401 reg = ((modrm >> 3) & 7) | rex_r;
7402 gen_ldst_modrm(env, s, modrm, OT_WORD, OR_TMP0, 0);
7403 t0 = tcg_temp_local_new();
7404 gen_update_cc_op(s);
7405 if (b == 0x102) {
7406 gen_helper_lar(t0, cpu_env, cpu_T[0]);
7407 } else {
7408 gen_helper_lsl(t0, cpu_env, cpu_T[0]);
7410 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_src, CC_Z);
7411 label1 = gen_new_label();
7412 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
7413 gen_op_mov_reg_v(ot, reg, t0);
7414 gen_set_label(label1);
7415 set_cc_op(s, CC_OP_EFLAGS);
7416 tcg_temp_free(t0);
7418 break;
7419 case 0x118:
7420 modrm = cpu_ldub_code(env, s->pc++);
7421 mod = (modrm >> 6) & 3;
7422 op = (modrm >> 3) & 7;
7423 switch(op) {
7424 case 0: /* prefetchnta */
7425 case 1: /* prefetchnt0 */
7426 case 2: /* prefetchnt0 */
7427 case 3: /* prefetchnt0 */
7428 if (mod == 3)
7429 goto illegal_op;
7430 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7431 /* nothing more to do */
7432 break;
7433 default: /* nop (multi byte) */
7434 gen_nop_modrm(env, s, modrm);
7435 break;
7437 break;
7438 case 0x119 ... 0x11f: /* nop (multi byte) */
7439 modrm = cpu_ldub_code(env, s->pc++);
7440 gen_nop_modrm(env, s, modrm);
7441 break;
7442 case 0x120: /* mov reg, crN */
7443 case 0x122: /* mov crN, reg */
7444 if (s->cpl != 0) {
7445 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7446 } else {
7447 modrm = cpu_ldub_code(env, s->pc++);
7448 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7449 * AMD documentation (24594.pdf) and testing of
7450 * intel 386 and 486 processors all show that the mod bits
7451 * are assumed to be 1's, regardless of actual values.
7453 rm = (modrm & 7) | REX_B(s);
7454 reg = ((modrm >> 3) & 7) | rex_r;
7455 if (CODE64(s))
7456 ot = OT_QUAD;
7457 else
7458 ot = OT_LONG;
7459 if ((prefixes & PREFIX_LOCK) && (reg == 0) &&
7460 (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) {
7461 reg = 8;
7463 switch(reg) {
7464 case 0:
7465 case 2:
7466 case 3:
7467 case 4:
7468 case 8:
7469 gen_update_cc_op(s);
7470 gen_jmp_im(pc_start - s->cs_base);
7471 if (b & 2) {
7472 gen_op_mov_TN_reg(ot, 0, rm);
7473 gen_helper_write_crN(cpu_env, tcg_const_i32(reg),
7474 cpu_T[0]);
7475 gen_jmp_im(s->pc - s->cs_base);
7476 gen_eob(s);
7477 } else {
7478 gen_helper_read_crN(cpu_T[0], cpu_env, tcg_const_i32(reg));
7479 gen_op_mov_reg_T0(ot, rm);
7481 break;
7482 default:
7483 goto illegal_op;
7486 break;
7487 case 0x121: /* mov reg, drN */
7488 case 0x123: /* mov drN, reg */
7489 if (s->cpl != 0) {
7490 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7491 } else {
7492 modrm = cpu_ldub_code(env, s->pc++);
7493 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7494 * AMD documentation (24594.pdf) and testing of
7495 * intel 386 and 486 processors all show that the mod bits
7496 * are assumed to be 1's, regardless of actual values.
7498 rm = (modrm & 7) | REX_B(s);
7499 reg = ((modrm >> 3) & 7) | rex_r;
7500 if (CODE64(s))
7501 ot = OT_QUAD;
7502 else
7503 ot = OT_LONG;
7504 /* XXX: do it dynamically with CR4.DE bit */
7505 if (reg == 4 || reg == 5 || reg >= 8)
7506 goto illegal_op;
7507 if (b & 2) {
7508 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg);
7509 gen_op_mov_TN_reg(ot, 0, rm);
7510 gen_helper_movl_drN_T0(cpu_env, tcg_const_i32(reg), cpu_T[0]);
7511 gen_jmp_im(s->pc - s->cs_base);
7512 gen_eob(s);
7513 } else {
7514 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg);
7515 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,dr[reg]));
7516 gen_op_mov_reg_T0(ot, rm);
7519 break;
7520 case 0x106: /* clts */
7521 if (s->cpl != 0) {
7522 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7523 } else {
7524 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
7525 gen_helper_clts(cpu_env);
7526 /* abort block because static cpu state changed */
7527 gen_jmp_im(s->pc - s->cs_base);
7528 gen_eob(s);
7530 break;
7531 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
7532 case 0x1c3: /* MOVNTI reg, mem */
7533 if (!(s->cpuid_features & CPUID_SSE2))
7534 goto illegal_op;
7535 ot = s->dflag == 2 ? OT_QUAD : OT_LONG;
7536 modrm = cpu_ldub_code(env, s->pc++);
7537 mod = (modrm >> 6) & 3;
7538 if (mod == 3)
7539 goto illegal_op;
7540 reg = ((modrm >> 3) & 7) | rex_r;
7541 /* generate a generic store */
7542 gen_ldst_modrm(env, s, modrm, ot, reg, 1);
7543 break;
7544 case 0x1ae:
7545 modrm = cpu_ldub_code(env, s->pc++);
7546 mod = (modrm >> 6) & 3;
7547 op = (modrm >> 3) & 7;
7548 switch(op) {
7549 case 0: /* fxsave */
7550 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
7551 (s->prefix & PREFIX_LOCK))
7552 goto illegal_op;
7553 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
7554 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7555 break;
7557 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7558 gen_update_cc_op(s);
7559 gen_jmp_im(pc_start - s->cs_base);
7560 gen_helper_fxsave(cpu_env, cpu_A0, tcg_const_i32((s->dflag == 2)));
7561 break;
7562 case 1: /* fxrstor */
7563 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
7564 (s->prefix & PREFIX_LOCK))
7565 goto illegal_op;
7566 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
7567 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7568 break;
7570 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7571 gen_update_cc_op(s);
7572 gen_jmp_im(pc_start - s->cs_base);
7573 gen_helper_fxrstor(cpu_env, cpu_A0,
7574 tcg_const_i32((s->dflag == 2)));
7575 break;
7576 case 2: /* ldmxcsr */
7577 case 3: /* stmxcsr */
7578 if (s->flags & HF_TS_MASK) {
7579 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7580 break;
7582 if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK) ||
7583 mod == 3)
7584 goto illegal_op;
7585 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7586 if (op == 2) {
7587 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
7588 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
7589 gen_helper_ldmxcsr(cpu_env, cpu_tmp2_i32);
7590 } else {
7591 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, mxcsr));
7592 gen_op_st_T0_A0(OT_LONG + s->mem_index);
7594 break;
7595 case 5: /* lfence */
7596 case 6: /* mfence */
7597 if ((modrm & 0xc7) != 0xc0 || !(s->cpuid_features & CPUID_SSE2))
7598 goto illegal_op;
7599 break;
7600 case 7: /* sfence / clflush */
7601 if ((modrm & 0xc7) == 0xc0) {
7602 /* sfence */
7603 /* XXX: also check for cpuid_ext2_features & CPUID_EXT2_EMMX */
7604 if (!(s->cpuid_features & CPUID_SSE))
7605 goto illegal_op;
7606 } else {
7607 /* clflush */
7608 if (!(s->cpuid_features & CPUID_CLFLUSH))
7609 goto illegal_op;
7610 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7612 break;
7613 default:
7614 goto illegal_op;
7616 break;
7617 case 0x10d: /* 3DNow! prefetch(w) */
7618 modrm = cpu_ldub_code(env, s->pc++);
7619 mod = (modrm >> 6) & 3;
7620 if (mod == 3)
7621 goto illegal_op;
7622 gen_lea_modrm(env, s, modrm, &reg_addr, &offset_addr);
7623 /* ignore for now */
7624 break;
7625 case 0x1aa: /* rsm */
7626 gen_svm_check_intercept(s, pc_start, SVM_EXIT_RSM);
7627 if (!(s->flags & HF_SMM_MASK))
7628 goto illegal_op;
7629 gen_update_cc_op(s);
7630 gen_jmp_im(s->pc - s->cs_base);
7631 gen_helper_rsm(cpu_env);
7632 gen_eob(s);
7633 break;
7634 case 0x1b8: /* SSE4.2 popcnt */
7635 if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) !=
7636 PREFIX_REPZ)
7637 goto illegal_op;
7638 if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
7639 goto illegal_op;
7641 modrm = cpu_ldub_code(env, s->pc++);
7642 reg = ((modrm >> 3) & 7) | rex_r;
7644 if (s->prefix & PREFIX_DATA)
7645 ot = OT_WORD;
7646 else if (s->dflag != 2)
7647 ot = OT_LONG;
7648 else
7649 ot = OT_QUAD;
7651 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
7652 gen_helper_popcnt(cpu_T[0], cpu_env, cpu_T[0], tcg_const_i32(ot));
7653 gen_op_mov_reg_T0(ot, reg);
7655 set_cc_op(s, CC_OP_EFLAGS);
7656 break;
7657 case 0x10e ... 0x10f:
7658 /* 3DNow! instructions, ignore prefixes */
7659 s->prefix &= ~(PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA);
7660 case 0x110 ... 0x117:
7661 case 0x128 ... 0x12f:
7662 case 0x138 ... 0x13a:
7663 case 0x150 ... 0x179:
7664 case 0x17c ... 0x17f:
7665 case 0x1c2:
7666 case 0x1c4 ... 0x1c6:
7667 case 0x1d0 ... 0x1fe:
7668 gen_sse(env, s, b, pc_start, rex_r);
7669 break;
7670 default:
7671 goto illegal_op;
7673 /* lock generation */
7674 if (s->prefix & PREFIX_LOCK)
7675 gen_helper_unlock();
7676 return s->pc;
7677 illegal_op:
7678 if (s->prefix & PREFIX_LOCK)
7679 gen_helper_unlock();
7680 /* XXX: ensure that no lock was generated */
7681 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
7682 return s->pc;
7685 void optimize_flags_init(void)
7687 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
7688 cpu_cc_op = tcg_global_mem_new_i32(TCG_AREG0,
7689 offsetof(CPUX86State, cc_op), "cc_op");
7690 cpu_cc_src = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_src),
7691 "cc_src");
7692 cpu_cc_dst = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_dst),
7693 "cc_dst");
7695 #ifdef TARGET_X86_64
7696 cpu_regs[R_EAX] = tcg_global_mem_new_i64(TCG_AREG0,
7697 offsetof(CPUX86State, regs[R_EAX]), "rax");
7698 cpu_regs[R_ECX] = tcg_global_mem_new_i64(TCG_AREG0,
7699 offsetof(CPUX86State, regs[R_ECX]), "rcx");
7700 cpu_regs[R_EDX] = tcg_global_mem_new_i64(TCG_AREG0,
7701 offsetof(CPUX86State, regs[R_EDX]), "rdx");
7702 cpu_regs[R_EBX] = tcg_global_mem_new_i64(TCG_AREG0,
7703 offsetof(CPUX86State, regs[R_EBX]), "rbx");
7704 cpu_regs[R_ESP] = tcg_global_mem_new_i64(TCG_AREG0,
7705 offsetof(CPUX86State, regs[R_ESP]), "rsp");
7706 cpu_regs[R_EBP] = tcg_global_mem_new_i64(TCG_AREG0,
7707 offsetof(CPUX86State, regs[R_EBP]), "rbp");
7708 cpu_regs[R_ESI] = tcg_global_mem_new_i64(TCG_AREG0,
7709 offsetof(CPUX86State, regs[R_ESI]), "rsi");
7710 cpu_regs[R_EDI] = tcg_global_mem_new_i64(TCG_AREG0,
7711 offsetof(CPUX86State, regs[R_EDI]), "rdi");
7712 cpu_regs[8] = tcg_global_mem_new_i64(TCG_AREG0,
7713 offsetof(CPUX86State, regs[8]), "r8");
7714 cpu_regs[9] = tcg_global_mem_new_i64(TCG_AREG0,
7715 offsetof(CPUX86State, regs[9]), "r9");
7716 cpu_regs[10] = tcg_global_mem_new_i64(TCG_AREG0,
7717 offsetof(CPUX86State, regs[10]), "r10");
7718 cpu_regs[11] = tcg_global_mem_new_i64(TCG_AREG0,
7719 offsetof(CPUX86State, regs[11]), "r11");
7720 cpu_regs[12] = tcg_global_mem_new_i64(TCG_AREG0,
7721 offsetof(CPUX86State, regs[12]), "r12");
7722 cpu_regs[13] = tcg_global_mem_new_i64(TCG_AREG0,
7723 offsetof(CPUX86State, regs[13]), "r13");
7724 cpu_regs[14] = tcg_global_mem_new_i64(TCG_AREG0,
7725 offsetof(CPUX86State, regs[14]), "r14");
7726 cpu_regs[15] = tcg_global_mem_new_i64(TCG_AREG0,
7727 offsetof(CPUX86State, regs[15]), "r15");
7728 #else
7729 cpu_regs[R_EAX] = tcg_global_mem_new_i32(TCG_AREG0,
7730 offsetof(CPUX86State, regs[R_EAX]), "eax");
7731 cpu_regs[R_ECX] = tcg_global_mem_new_i32(TCG_AREG0,
7732 offsetof(CPUX86State, regs[R_ECX]), "ecx");
7733 cpu_regs[R_EDX] = tcg_global_mem_new_i32(TCG_AREG0,
7734 offsetof(CPUX86State, regs[R_EDX]), "edx");
7735 cpu_regs[R_EBX] = tcg_global_mem_new_i32(TCG_AREG0,
7736 offsetof(CPUX86State, regs[R_EBX]), "ebx");
7737 cpu_regs[R_ESP] = tcg_global_mem_new_i32(TCG_AREG0,
7738 offsetof(CPUX86State, regs[R_ESP]), "esp");
7739 cpu_regs[R_EBP] = tcg_global_mem_new_i32(TCG_AREG0,
7740 offsetof(CPUX86State, regs[R_EBP]), "ebp");
7741 cpu_regs[R_ESI] = tcg_global_mem_new_i32(TCG_AREG0,
7742 offsetof(CPUX86State, regs[R_ESI]), "esi");
7743 cpu_regs[R_EDI] = tcg_global_mem_new_i32(TCG_AREG0,
7744 offsetof(CPUX86State, regs[R_EDI]), "edi");
7745 #endif
7747 /* register helpers */
7748 #define GEN_HELPER 2
7749 #include "helper.h"
7752 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
7753 basic block 'tb'. If search_pc is TRUE, also generate PC
7754 information for each intermediate instruction. */
7755 static inline void gen_intermediate_code_internal(CPUX86State *env,
7756 TranslationBlock *tb,
7757 int search_pc)
7759 DisasContext dc1, *dc = &dc1;
7760 target_ulong pc_ptr;
7761 uint16_t *gen_opc_end;
7762 CPUBreakpoint *bp;
7763 int j, lj;
7764 uint64_t flags;
7765 target_ulong pc_start;
7766 target_ulong cs_base;
7767 int num_insns;
7768 int max_insns;
7770 /* generate intermediate code */
7771 pc_start = tb->pc;
7772 cs_base = tb->cs_base;
7773 flags = tb->flags;
7775 dc->pe = (flags >> HF_PE_SHIFT) & 1;
7776 dc->code32 = (flags >> HF_CS32_SHIFT) & 1;
7777 dc->ss32 = (flags >> HF_SS32_SHIFT) & 1;
7778 dc->addseg = (flags >> HF_ADDSEG_SHIFT) & 1;
7779 dc->f_st = 0;
7780 dc->vm86 = (flags >> VM_SHIFT) & 1;
7781 dc->cpl = (flags >> HF_CPL_SHIFT) & 3;
7782 dc->iopl = (flags >> IOPL_SHIFT) & 3;
7783 dc->tf = (flags >> TF_SHIFT) & 1;
7784 dc->singlestep_enabled = env->singlestep_enabled;
7785 dc->cc_op = CC_OP_DYNAMIC;
7786 dc->cc_op_dirty = false;
7787 dc->cs_base = cs_base;
7788 dc->tb = tb;
7789 dc->popl_esp_hack = 0;
7790 /* select memory access functions */
7791 dc->mem_index = 0;
7792 if (flags & HF_SOFTMMU_MASK) {
7793 dc->mem_index = (cpu_mmu_index(env) + 1) << 2;
7795 dc->cpuid_features = env->cpuid_features;
7796 dc->cpuid_ext_features = env->cpuid_ext_features;
7797 dc->cpuid_ext2_features = env->cpuid_ext2_features;
7798 dc->cpuid_ext3_features = env->cpuid_ext3_features;
7799 dc->cpuid_7_0_ebx_features = env->cpuid_7_0_ebx_features;
7800 #ifdef TARGET_X86_64
7801 dc->lma = (flags >> HF_LMA_SHIFT) & 1;
7802 dc->code64 = (flags >> HF_CS64_SHIFT) & 1;
7803 #endif
7804 dc->flags = flags;
7805 dc->jmp_opt = !(dc->tf || env->singlestep_enabled ||
7806 (flags & HF_INHIBIT_IRQ_MASK)
7807 #ifndef CONFIG_SOFTMMU
7808 || (flags & HF_SOFTMMU_MASK)
7809 #endif
7811 #if 0
7812 /* check addseg logic */
7813 if (!dc->addseg && (dc->vm86 || !dc->pe || !dc->code32))
7814 printf("ERROR addseg\n");
7815 #endif
7817 cpu_T[0] = tcg_temp_new();
7818 cpu_T[1] = tcg_temp_new();
7819 cpu_A0 = tcg_temp_new();
7820 cpu_T3 = tcg_temp_new();
7822 cpu_tmp0 = tcg_temp_new();
7823 cpu_tmp1_i64 = tcg_temp_new_i64();
7824 cpu_tmp2_i32 = tcg_temp_new_i32();
7825 cpu_tmp3_i32 = tcg_temp_new_i32();
7826 cpu_tmp4 = tcg_temp_new();
7827 cpu_tmp5 = tcg_temp_new();
7828 cpu_ptr0 = tcg_temp_new_ptr();
7829 cpu_ptr1 = tcg_temp_new_ptr();
7831 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
7833 dc->is_jmp = DISAS_NEXT;
7834 pc_ptr = pc_start;
7835 lj = -1;
7836 num_insns = 0;
7837 max_insns = tb->cflags & CF_COUNT_MASK;
7838 if (max_insns == 0)
7839 max_insns = CF_COUNT_MASK;
7841 gen_icount_start();
7842 for(;;) {
7843 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
7844 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
7845 if (bp->pc == pc_ptr &&
7846 !((bp->flags & BP_CPU) && (tb->flags & HF_RF_MASK))) {
7847 gen_debug(dc, pc_ptr - dc->cs_base);
7848 break;
7852 if (search_pc) {
7853 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
7854 if (lj < j) {
7855 lj++;
7856 while (lj < j)
7857 tcg_ctx.gen_opc_instr_start[lj++] = 0;
7859 tcg_ctx.gen_opc_pc[lj] = pc_ptr;
7860 gen_opc_cc_op[lj] = dc->cc_op;
7861 tcg_ctx.gen_opc_instr_start[lj] = 1;
7862 tcg_ctx.gen_opc_icount[lj] = num_insns;
7864 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
7865 gen_io_start();
7867 pc_ptr = disas_insn(env, dc, pc_ptr);
7868 num_insns++;
7869 /* stop translation if indicated */
7870 if (dc->is_jmp)
7871 break;
7872 /* if single step mode, we generate only one instruction and
7873 generate an exception */
7874 /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
7875 the flag and abort the translation to give the irqs a
7876 change to be happen */
7877 if (dc->tf || dc->singlestep_enabled ||
7878 (flags & HF_INHIBIT_IRQ_MASK)) {
7879 gen_jmp_im(pc_ptr - dc->cs_base);
7880 gen_eob(dc);
7881 break;
7883 /* if too long translation, stop generation too */
7884 if (tcg_ctx.gen_opc_ptr >= gen_opc_end ||
7885 (pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32) ||
7886 num_insns >= max_insns) {
7887 gen_jmp_im(pc_ptr - dc->cs_base);
7888 gen_eob(dc);
7889 break;
7891 if (singlestep) {
7892 gen_jmp_im(pc_ptr - dc->cs_base);
7893 gen_eob(dc);
7894 break;
7897 if (tb->cflags & CF_LAST_IO)
7898 gen_io_end();
7899 gen_icount_end(tb, num_insns);
7900 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
7901 /* we don't forget to fill the last values */
7902 if (search_pc) {
7903 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
7904 lj++;
7905 while (lj <= j)
7906 tcg_ctx.gen_opc_instr_start[lj++] = 0;
7909 #ifdef DEBUG_DISAS
7910 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
7911 int disas_flags;
7912 qemu_log("----------------\n");
7913 qemu_log("IN: %s\n", lookup_symbol(pc_start));
7914 #ifdef TARGET_X86_64
7915 if (dc->code64)
7916 disas_flags = 2;
7917 else
7918 #endif
7919 disas_flags = !dc->code32;
7920 log_target_disas(env, pc_start, pc_ptr - pc_start, disas_flags);
7921 qemu_log("\n");
7923 #endif
7925 if (!search_pc) {
7926 tb->size = pc_ptr - pc_start;
7927 tb->icount = num_insns;
7931 void gen_intermediate_code(CPUX86State *env, TranslationBlock *tb)
7933 gen_intermediate_code_internal(env, tb, 0);
7936 void gen_intermediate_code_pc(CPUX86State *env, TranslationBlock *tb)
7938 gen_intermediate_code_internal(env, tb, 1);
7941 void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb, int pc_pos)
7943 int cc_op;
7944 #ifdef DEBUG_DISAS
7945 if (qemu_loglevel_mask(CPU_LOG_TB_OP)) {
7946 int i;
7947 qemu_log("RESTORE:\n");
7948 for(i = 0;i <= pc_pos; i++) {
7949 if (tcg_ctx.gen_opc_instr_start[i]) {
7950 qemu_log("0x%04x: " TARGET_FMT_lx "\n", i,
7951 tcg_ctx.gen_opc_pc[i]);
7954 qemu_log("pc_pos=0x%x eip=" TARGET_FMT_lx " cs_base=%x\n",
7955 pc_pos, tcg_ctx.gen_opc_pc[pc_pos] - tb->cs_base,
7956 (uint32_t)tb->cs_base);
7958 #endif
7959 env->eip = tcg_ctx.gen_opc_pc[pc_pos] - tb->cs_base;
7960 cc_op = gen_opc_cc_op[pc_pos];
7961 if (cc_op != CC_OP_DYNAMIC)
7962 env->cc_op = cc_op;