exec.c: Remove out of date comment
[qemu-kvm.git] / target-i386 / translate.c
blob2b113333ac4e9f9e2acbe540964e5e6939d34d68
1 /*
2 * i386 translation
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdarg.h>
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <string.h>
23 #include <inttypes.h>
24 #include <signal.h>
26 #include "cpu.h"
27 #include "disas.h"
28 #include "tcg-op.h"
30 #include "helper.h"
31 #define GEN_HELPER 1
32 #include "helper.h"
34 #define PREFIX_REPZ 0x01
35 #define PREFIX_REPNZ 0x02
36 #define PREFIX_LOCK 0x04
37 #define PREFIX_DATA 0x08
38 #define PREFIX_ADR 0x10
40 #ifdef TARGET_X86_64
41 #define CODE64(s) ((s)->code64)
42 #define REX_X(s) ((s)->rex_x)
43 #define REX_B(s) ((s)->rex_b)
44 #else
45 #define CODE64(s) 0
46 #define REX_X(s) 0
47 #define REX_B(s) 0
48 #endif
50 //#define MACRO_TEST 1
52 /* global register indexes */
53 static TCGv_ptr cpu_env;
54 static TCGv cpu_A0, cpu_cc_src, cpu_cc_dst, cpu_cc_tmp;
55 static TCGv_i32 cpu_cc_op;
56 static TCGv cpu_regs[CPU_NB_REGS];
57 /* local temps */
58 static TCGv cpu_T[2], cpu_T3;
59 /* local register indexes (only used inside old micro ops) */
60 static TCGv cpu_tmp0, cpu_tmp4;
61 static TCGv_ptr cpu_ptr0, cpu_ptr1;
62 static TCGv_i32 cpu_tmp2_i32, cpu_tmp3_i32;
63 static TCGv_i64 cpu_tmp1_i64;
64 static TCGv cpu_tmp5;
66 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
68 #include "gen-icount.h"
70 #ifdef TARGET_X86_64
71 static int x86_64_hregs;
72 #endif
74 typedef struct DisasContext {
75 /* current insn context */
76 int override; /* -1 if no override */
77 int prefix;
78 int aflag, dflag;
79 target_ulong pc; /* pc = eip + cs_base */
80 int is_jmp; /* 1 = means jump (stop translation), 2 means CPU
81 static state change (stop translation) */
82 /* current block context */
83 target_ulong cs_base; /* base of CS segment */
84 int pe; /* protected mode */
85 int code32; /* 32 bit code segment */
86 #ifdef TARGET_X86_64
87 int lma; /* long mode active */
88 int code64; /* 64 bit code segment */
89 int rex_x, rex_b;
90 #endif
91 int ss32; /* 32 bit stack segment */
92 int cc_op; /* current CC operation */
93 int addseg; /* non zero if either DS/ES/SS have a non zero base */
94 int f_st; /* currently unused */
95 int vm86; /* vm86 mode */
96 int cpl;
97 int iopl;
98 int tf; /* TF cpu flag */
99 int singlestep_enabled; /* "hardware" single step enabled */
100 int jmp_opt; /* use direct block chaining for direct jumps */
101 int mem_index; /* select memory access functions */
102 uint64_t flags; /* all execution flags */
103 struct TranslationBlock *tb;
104 int popl_esp_hack; /* for correct popl with esp base handling */
105 int rip_offset; /* only used in x86_64, but left for simplicity */
106 int cpuid_features;
107 int cpuid_ext_features;
108 int cpuid_ext2_features;
109 int cpuid_ext3_features;
110 } DisasContext;
112 static void gen_eob(DisasContext *s);
113 static void gen_jmp(DisasContext *s, target_ulong eip);
114 static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num);
116 /* i386 arith/logic operations */
117 enum {
118 OP_ADDL,
119 OP_ORL,
120 OP_ADCL,
121 OP_SBBL,
122 OP_ANDL,
123 OP_SUBL,
124 OP_XORL,
125 OP_CMPL,
128 /* i386 shift ops */
129 enum {
130 OP_ROL,
131 OP_ROR,
132 OP_RCL,
133 OP_RCR,
134 OP_SHL,
135 OP_SHR,
136 OP_SHL1, /* undocumented */
137 OP_SAR = 7,
140 enum {
141 JCC_O,
142 JCC_B,
143 JCC_Z,
144 JCC_BE,
145 JCC_S,
146 JCC_P,
147 JCC_L,
148 JCC_LE,
151 /* operand size */
152 enum {
153 OT_BYTE = 0,
154 OT_WORD,
155 OT_LONG,
156 OT_QUAD,
159 enum {
160 /* I386 int registers */
161 OR_EAX, /* MUST be even numbered */
162 OR_ECX,
163 OR_EDX,
164 OR_EBX,
165 OR_ESP,
166 OR_EBP,
167 OR_ESI,
168 OR_EDI,
170 OR_TMP0 = 16, /* temporary operand register */
171 OR_TMP1,
172 OR_A0, /* temporary register used when doing address evaluation */
175 static inline void gen_op_movl_T0_0(void)
177 tcg_gen_movi_tl(cpu_T[0], 0);
180 static inline void gen_op_movl_T0_im(int32_t val)
182 tcg_gen_movi_tl(cpu_T[0], val);
185 static inline void gen_op_movl_T0_imu(uint32_t val)
187 tcg_gen_movi_tl(cpu_T[0], val);
190 static inline void gen_op_movl_T1_im(int32_t val)
192 tcg_gen_movi_tl(cpu_T[1], val);
195 static inline void gen_op_movl_T1_imu(uint32_t val)
197 tcg_gen_movi_tl(cpu_T[1], val);
200 static inline void gen_op_movl_A0_im(uint32_t val)
202 tcg_gen_movi_tl(cpu_A0, val);
205 #ifdef TARGET_X86_64
206 static inline void gen_op_movq_A0_im(int64_t val)
208 tcg_gen_movi_tl(cpu_A0, val);
210 #endif
212 static inline void gen_movtl_T0_im(target_ulong val)
214 tcg_gen_movi_tl(cpu_T[0], val);
217 static inline void gen_movtl_T1_im(target_ulong val)
219 tcg_gen_movi_tl(cpu_T[1], val);
222 static inline void gen_op_andl_T0_ffff(void)
224 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
227 static inline void gen_op_andl_T0_im(uint32_t val)
229 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], val);
232 static inline void gen_op_movl_T0_T1(void)
234 tcg_gen_mov_tl(cpu_T[0], cpu_T[1]);
237 static inline void gen_op_andl_A0_ffff(void)
239 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffff);
242 #ifdef TARGET_X86_64
244 #define NB_OP_SIZES 4
246 #else /* !TARGET_X86_64 */
248 #define NB_OP_SIZES 3
250 #endif /* !TARGET_X86_64 */
252 #if defined(HOST_WORDS_BIGENDIAN)
253 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
254 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
255 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
256 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
257 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
258 #else
259 #define REG_B_OFFSET 0
260 #define REG_H_OFFSET 1
261 #define REG_W_OFFSET 0
262 #define REG_L_OFFSET 0
263 #define REG_LH_OFFSET 4
264 #endif
266 /* In instruction encodings for byte register accesses the
267 * register number usually indicates "low 8 bits of register N";
268 * however there are some special cases where N 4..7 indicates
269 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
270 * true for this special case, false otherwise.
272 static inline bool byte_reg_is_xH(int reg)
274 if (reg < 4) {
275 return false;
277 #ifdef TARGET_X86_64
278 if (reg >= 8 || x86_64_hregs) {
279 return false;
281 #endif
282 return true;
285 static inline void gen_op_mov_reg_v(int ot, int reg, TCGv t0)
287 switch(ot) {
288 case OT_BYTE:
289 if (!byte_reg_is_xH(reg)) {
290 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 8);
291 } else {
292 tcg_gen_deposit_tl(cpu_regs[reg - 4], cpu_regs[reg - 4], t0, 8, 8);
294 break;
295 case OT_WORD:
296 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 16);
297 break;
298 default: /* XXX this shouldn't be reached; abort? */
299 case OT_LONG:
300 /* For x86_64, this sets the higher half of register to zero.
301 For i386, this is equivalent to a mov. */
302 tcg_gen_ext32u_tl(cpu_regs[reg], t0);
303 break;
304 #ifdef TARGET_X86_64
305 case OT_QUAD:
306 tcg_gen_mov_tl(cpu_regs[reg], t0);
307 break;
308 #endif
312 static inline void gen_op_mov_reg_T0(int ot, int reg)
314 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
317 static inline void gen_op_mov_reg_T1(int ot, int reg)
319 gen_op_mov_reg_v(ot, reg, cpu_T[1]);
322 static inline void gen_op_mov_reg_A0(int size, int reg)
324 switch(size) {
325 case 0:
326 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], cpu_A0, 0, 16);
327 break;
328 default: /* XXX this shouldn't be reached; abort? */
329 case 1:
330 /* For x86_64, this sets the higher half of register to zero.
331 For i386, this is equivalent to a mov. */
332 tcg_gen_ext32u_tl(cpu_regs[reg], cpu_A0);
333 break;
334 #ifdef TARGET_X86_64
335 case 2:
336 tcg_gen_mov_tl(cpu_regs[reg], cpu_A0);
337 break;
338 #endif
342 static inline void gen_op_mov_v_reg(int ot, TCGv t0, int reg)
344 if (ot == OT_BYTE && byte_reg_is_xH(reg)) {
345 tcg_gen_shri_tl(t0, cpu_regs[reg - 4], 8);
346 tcg_gen_ext8u_tl(t0, t0);
347 } else {
348 tcg_gen_mov_tl(t0, cpu_regs[reg]);
352 static inline void gen_op_mov_TN_reg(int ot, int t_index, int reg)
354 gen_op_mov_v_reg(ot, cpu_T[t_index], reg);
357 static inline void gen_op_movl_A0_reg(int reg)
359 tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
362 static inline void gen_op_addl_A0_im(int32_t val)
364 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
365 #ifdef TARGET_X86_64
366 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
367 #endif
370 #ifdef TARGET_X86_64
371 static inline void gen_op_addq_A0_im(int64_t val)
373 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
375 #endif
377 static void gen_add_A0_im(DisasContext *s, int val)
379 #ifdef TARGET_X86_64
380 if (CODE64(s))
381 gen_op_addq_A0_im(val);
382 else
383 #endif
384 gen_op_addl_A0_im(val);
387 static inline void gen_op_addl_T0_T1(void)
389 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
392 static inline void gen_op_jmp_T0(void)
394 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, eip));
397 static inline void gen_op_add_reg_im(int size, int reg, int32_t val)
399 switch(size) {
400 case 0:
401 tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val);
402 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], cpu_tmp0, 0, 16);
403 break;
404 case 1:
405 tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val);
406 /* For x86_64, this sets the higher half of register to zero.
407 For i386, this is equivalent to a nop. */
408 tcg_gen_ext32u_tl(cpu_tmp0, cpu_tmp0);
409 tcg_gen_mov_tl(cpu_regs[reg], cpu_tmp0);
410 break;
411 #ifdef TARGET_X86_64
412 case 2:
413 tcg_gen_addi_tl(cpu_regs[reg], cpu_regs[reg], val);
414 break;
415 #endif
419 static inline void gen_op_add_reg_T0(int size, int reg)
421 switch(size) {
422 case 0:
423 tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T[0]);
424 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], cpu_tmp0, 0, 16);
425 break;
426 case 1:
427 tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T[0]);
428 /* For x86_64, this sets the higher half of register to zero.
429 For i386, this is equivalent to a nop. */
430 tcg_gen_ext32u_tl(cpu_tmp0, cpu_tmp0);
431 tcg_gen_mov_tl(cpu_regs[reg], cpu_tmp0);
432 break;
433 #ifdef TARGET_X86_64
434 case 2:
435 tcg_gen_add_tl(cpu_regs[reg], cpu_regs[reg], cpu_T[0]);
436 break;
437 #endif
441 static inline void gen_op_set_cc_op(int32_t val)
443 tcg_gen_movi_i32(cpu_cc_op, val);
446 static inline void gen_op_addl_A0_reg_sN(int shift, int reg)
448 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
449 if (shift != 0)
450 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
451 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
452 /* For x86_64, this sets the higher half of register to zero.
453 For i386, this is equivalent to a nop. */
454 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
457 static inline void gen_op_movl_A0_seg(int reg)
459 tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base) + REG_L_OFFSET);
462 static inline void gen_op_addl_A0_seg(DisasContext *s, int reg)
464 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base));
465 #ifdef TARGET_X86_64
466 if (CODE64(s)) {
467 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
468 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
469 } else {
470 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
471 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
473 #else
474 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
475 #endif
478 #ifdef TARGET_X86_64
479 static inline void gen_op_movq_A0_seg(int reg)
481 tcg_gen_ld_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base));
484 static inline void gen_op_addq_A0_seg(int reg)
486 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base));
487 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
490 static inline void gen_op_movq_A0_reg(int reg)
492 tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
495 static inline void gen_op_addq_A0_reg_sN(int shift, int reg)
497 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
498 if (shift != 0)
499 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
500 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
502 #endif
504 static inline void gen_op_lds_T0_A0(int idx)
506 int mem_index = (idx >> 2) - 1;
507 switch(idx & 3) {
508 case 0:
509 tcg_gen_qemu_ld8s(cpu_T[0], cpu_A0, mem_index);
510 break;
511 case 1:
512 tcg_gen_qemu_ld16s(cpu_T[0], cpu_A0, mem_index);
513 break;
514 default:
515 case 2:
516 tcg_gen_qemu_ld32s(cpu_T[0], cpu_A0, mem_index);
517 break;
521 static inline void gen_op_ld_v(int idx, TCGv t0, TCGv a0)
523 int mem_index = (idx >> 2) - 1;
524 switch(idx & 3) {
525 case 0:
526 tcg_gen_qemu_ld8u(t0, a0, mem_index);
527 break;
528 case 1:
529 tcg_gen_qemu_ld16u(t0, a0, mem_index);
530 break;
531 case 2:
532 tcg_gen_qemu_ld32u(t0, a0, mem_index);
533 break;
534 default:
535 case 3:
536 /* Should never happen on 32-bit targets. */
537 #ifdef TARGET_X86_64
538 tcg_gen_qemu_ld64(t0, a0, mem_index);
539 #endif
540 break;
544 /* XXX: always use ldu or lds */
545 static inline void gen_op_ld_T0_A0(int idx)
547 gen_op_ld_v(idx, cpu_T[0], cpu_A0);
550 static inline void gen_op_ldu_T0_A0(int idx)
552 gen_op_ld_v(idx, cpu_T[0], cpu_A0);
555 static inline void gen_op_ld_T1_A0(int idx)
557 gen_op_ld_v(idx, cpu_T[1], cpu_A0);
560 static inline void gen_op_st_v(int idx, TCGv t0, TCGv a0)
562 int mem_index = (idx >> 2) - 1;
563 switch(idx & 3) {
564 case 0:
565 tcg_gen_qemu_st8(t0, a0, mem_index);
566 break;
567 case 1:
568 tcg_gen_qemu_st16(t0, a0, mem_index);
569 break;
570 case 2:
571 tcg_gen_qemu_st32(t0, a0, mem_index);
572 break;
573 default:
574 case 3:
575 /* Should never happen on 32-bit targets. */
576 #ifdef TARGET_X86_64
577 tcg_gen_qemu_st64(t0, a0, mem_index);
578 #endif
579 break;
583 static inline void gen_op_st_T0_A0(int idx)
585 gen_op_st_v(idx, cpu_T[0], cpu_A0);
588 static inline void gen_op_st_T1_A0(int idx)
590 gen_op_st_v(idx, cpu_T[1], cpu_A0);
593 static inline void gen_jmp_im(target_ulong pc)
595 tcg_gen_movi_tl(cpu_tmp0, pc);
596 tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, eip));
599 static inline void gen_string_movl_A0_ESI(DisasContext *s)
601 int override;
603 override = s->override;
604 #ifdef TARGET_X86_64
605 if (s->aflag == 2) {
606 if (override >= 0) {
607 gen_op_movq_A0_seg(override);
608 gen_op_addq_A0_reg_sN(0, R_ESI);
609 } else {
610 gen_op_movq_A0_reg(R_ESI);
612 } else
613 #endif
614 if (s->aflag) {
615 /* 32 bit address */
616 if (s->addseg && override < 0)
617 override = R_DS;
618 if (override >= 0) {
619 gen_op_movl_A0_seg(override);
620 gen_op_addl_A0_reg_sN(0, R_ESI);
621 } else {
622 gen_op_movl_A0_reg(R_ESI);
624 } else {
625 /* 16 address, always override */
626 if (override < 0)
627 override = R_DS;
628 gen_op_movl_A0_reg(R_ESI);
629 gen_op_andl_A0_ffff();
630 gen_op_addl_A0_seg(s, override);
634 static inline void gen_string_movl_A0_EDI(DisasContext *s)
636 #ifdef TARGET_X86_64
637 if (s->aflag == 2) {
638 gen_op_movq_A0_reg(R_EDI);
639 } else
640 #endif
641 if (s->aflag) {
642 if (s->addseg) {
643 gen_op_movl_A0_seg(R_ES);
644 gen_op_addl_A0_reg_sN(0, R_EDI);
645 } else {
646 gen_op_movl_A0_reg(R_EDI);
648 } else {
649 gen_op_movl_A0_reg(R_EDI);
650 gen_op_andl_A0_ffff();
651 gen_op_addl_A0_seg(s, R_ES);
655 static inline void gen_op_movl_T0_Dshift(int ot)
657 tcg_gen_ld32s_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, df));
658 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], ot);
661 static void gen_extu(int ot, TCGv reg)
663 switch(ot) {
664 case OT_BYTE:
665 tcg_gen_ext8u_tl(reg, reg);
666 break;
667 case OT_WORD:
668 tcg_gen_ext16u_tl(reg, reg);
669 break;
670 case OT_LONG:
671 tcg_gen_ext32u_tl(reg, reg);
672 break;
673 default:
674 break;
678 static void gen_exts(int ot, TCGv reg)
680 switch(ot) {
681 case OT_BYTE:
682 tcg_gen_ext8s_tl(reg, reg);
683 break;
684 case OT_WORD:
685 tcg_gen_ext16s_tl(reg, reg);
686 break;
687 case OT_LONG:
688 tcg_gen_ext32s_tl(reg, reg);
689 break;
690 default:
691 break;
695 static inline void gen_op_jnz_ecx(int size, int label1)
697 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
698 gen_extu(size + 1, cpu_tmp0);
699 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, label1);
702 static inline void gen_op_jz_ecx(int size, int label1)
704 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
705 gen_extu(size + 1, cpu_tmp0);
706 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
709 static void gen_helper_in_func(int ot, TCGv v, TCGv_i32 n)
711 switch (ot) {
712 case 0: gen_helper_inb(v, n); break;
713 case 1: gen_helper_inw(v, n); break;
714 case 2: gen_helper_inl(v, n); break;
719 static void gen_helper_out_func(int ot, TCGv_i32 v, TCGv_i32 n)
721 switch (ot) {
722 case 0: gen_helper_outb(v, n); break;
723 case 1: gen_helper_outw(v, n); break;
724 case 2: gen_helper_outl(v, n); break;
729 static void gen_check_io(DisasContext *s, int ot, target_ulong cur_eip,
730 uint32_t svm_flags)
732 int state_saved;
733 target_ulong next_eip;
735 state_saved = 0;
736 if (s->pe && (s->cpl > s->iopl || s->vm86)) {
737 if (s->cc_op != CC_OP_DYNAMIC)
738 gen_op_set_cc_op(s->cc_op);
739 gen_jmp_im(cur_eip);
740 state_saved = 1;
741 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
742 switch (ot) {
743 case 0: gen_helper_check_iob(cpu_tmp2_i32); break;
744 case 1: gen_helper_check_iow(cpu_tmp2_i32); break;
745 case 2: gen_helper_check_iol(cpu_tmp2_i32); break;
748 if(s->flags & HF_SVMI_MASK) {
749 if (!state_saved) {
750 if (s->cc_op != CC_OP_DYNAMIC)
751 gen_op_set_cc_op(s->cc_op);
752 gen_jmp_im(cur_eip);
754 svm_flags |= (1 << (4 + ot));
755 next_eip = s->pc - s->cs_base;
756 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
757 gen_helper_svm_check_io(cpu_tmp2_i32, tcg_const_i32(svm_flags),
758 tcg_const_i32(next_eip - cur_eip));
762 static inline void gen_movs(DisasContext *s, int ot)
764 gen_string_movl_A0_ESI(s);
765 gen_op_ld_T0_A0(ot + s->mem_index);
766 gen_string_movl_A0_EDI(s);
767 gen_op_st_T0_A0(ot + s->mem_index);
768 gen_op_movl_T0_Dshift(ot);
769 gen_op_add_reg_T0(s->aflag, R_ESI);
770 gen_op_add_reg_T0(s->aflag, R_EDI);
773 static inline void gen_update_cc_op(DisasContext *s)
775 if (s->cc_op != CC_OP_DYNAMIC) {
776 gen_op_set_cc_op(s->cc_op);
777 s->cc_op = CC_OP_DYNAMIC;
781 static void gen_op_update1_cc(void)
783 tcg_gen_discard_tl(cpu_cc_src);
784 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
787 static void gen_op_update2_cc(void)
789 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
790 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
793 static inline void gen_op_cmpl_T0_T1_cc(void)
795 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
796 tcg_gen_sub_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
799 static inline void gen_op_testl_T0_T1_cc(void)
801 tcg_gen_discard_tl(cpu_cc_src);
802 tcg_gen_and_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
805 static void gen_op_update_neg_cc(void)
807 tcg_gen_neg_tl(cpu_cc_src, cpu_T[0]);
808 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
811 /* compute eflags.C to reg */
812 static void gen_compute_eflags_c(TCGv reg)
814 gen_helper_cc_compute_c(cpu_tmp2_i32, cpu_cc_op);
815 tcg_gen_extu_i32_tl(reg, cpu_tmp2_i32);
818 /* compute all eflags to cc_src */
819 static void gen_compute_eflags(TCGv reg)
821 gen_helper_cc_compute_all(cpu_tmp2_i32, cpu_cc_op);
822 tcg_gen_extu_i32_tl(reg, cpu_tmp2_i32);
825 static inline void gen_setcc_slow_T0(DisasContext *s, int jcc_op)
827 if (s->cc_op != CC_OP_DYNAMIC)
828 gen_op_set_cc_op(s->cc_op);
829 switch(jcc_op) {
830 case JCC_O:
831 gen_compute_eflags(cpu_T[0]);
832 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 11);
833 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
834 break;
835 case JCC_B:
836 gen_compute_eflags_c(cpu_T[0]);
837 break;
838 case JCC_Z:
839 gen_compute_eflags(cpu_T[0]);
840 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 6);
841 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
842 break;
843 case JCC_BE:
844 gen_compute_eflags(cpu_tmp0);
845 tcg_gen_shri_tl(cpu_T[0], cpu_tmp0, 6);
846 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
847 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
848 break;
849 case JCC_S:
850 gen_compute_eflags(cpu_T[0]);
851 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 7);
852 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
853 break;
854 case JCC_P:
855 gen_compute_eflags(cpu_T[0]);
856 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 2);
857 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
858 break;
859 case JCC_L:
860 gen_compute_eflags(cpu_tmp0);
861 tcg_gen_shri_tl(cpu_T[0], cpu_tmp0, 11); /* CC_O */
862 tcg_gen_shri_tl(cpu_tmp0, cpu_tmp0, 7); /* CC_S */
863 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
864 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
865 break;
866 default:
867 case JCC_LE:
868 gen_compute_eflags(cpu_tmp0);
869 tcg_gen_shri_tl(cpu_T[0], cpu_tmp0, 11); /* CC_O */
870 tcg_gen_shri_tl(cpu_tmp4, cpu_tmp0, 7); /* CC_S */
871 tcg_gen_shri_tl(cpu_tmp0, cpu_tmp0, 6); /* CC_Z */
872 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
873 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
874 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
875 break;
879 /* return true if setcc_slow is not needed (WARNING: must be kept in
880 sync with gen_jcc1) */
881 static int is_fast_jcc_case(DisasContext *s, int b)
883 int jcc_op;
884 jcc_op = (b >> 1) & 7;
885 switch(s->cc_op) {
886 /* we optimize the cmp/jcc case */
887 case CC_OP_SUBB:
888 case CC_OP_SUBW:
889 case CC_OP_SUBL:
890 case CC_OP_SUBQ:
891 if (jcc_op == JCC_O || jcc_op == JCC_P)
892 goto slow_jcc;
893 break;
895 /* some jumps are easy to compute */
896 case CC_OP_ADDB:
897 case CC_OP_ADDW:
898 case CC_OP_ADDL:
899 case CC_OP_ADDQ:
901 case CC_OP_LOGICB:
902 case CC_OP_LOGICW:
903 case CC_OP_LOGICL:
904 case CC_OP_LOGICQ:
906 case CC_OP_INCB:
907 case CC_OP_INCW:
908 case CC_OP_INCL:
909 case CC_OP_INCQ:
911 case CC_OP_DECB:
912 case CC_OP_DECW:
913 case CC_OP_DECL:
914 case CC_OP_DECQ:
916 case CC_OP_SHLB:
917 case CC_OP_SHLW:
918 case CC_OP_SHLL:
919 case CC_OP_SHLQ:
920 if (jcc_op != JCC_Z && jcc_op != JCC_S)
921 goto slow_jcc;
922 break;
923 default:
924 slow_jcc:
925 return 0;
927 return 1;
930 /* generate a conditional jump to label 'l1' according to jump opcode
931 value 'b'. In the fast case, T0 is guaranted not to be used. */
932 static inline void gen_jcc1(DisasContext *s, int cc_op, int b, int l1)
934 int inv, jcc_op, size, cond;
935 TCGv t0;
937 inv = b & 1;
938 jcc_op = (b >> 1) & 7;
940 switch(cc_op) {
941 /* we optimize the cmp/jcc case */
942 case CC_OP_SUBB:
943 case CC_OP_SUBW:
944 case CC_OP_SUBL:
945 case CC_OP_SUBQ:
947 size = cc_op - CC_OP_SUBB;
948 switch(jcc_op) {
949 case JCC_Z:
950 fast_jcc_z:
951 switch(size) {
952 case 0:
953 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0xff);
954 t0 = cpu_tmp0;
955 break;
956 case 1:
957 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0xffff);
958 t0 = cpu_tmp0;
959 break;
960 #ifdef TARGET_X86_64
961 case 2:
962 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0xffffffff);
963 t0 = cpu_tmp0;
964 break;
965 #endif
966 default:
967 t0 = cpu_cc_dst;
968 break;
970 tcg_gen_brcondi_tl(inv ? TCG_COND_NE : TCG_COND_EQ, t0, 0, l1);
971 break;
972 case JCC_S:
973 fast_jcc_s:
974 switch(size) {
975 case 0:
976 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0x80);
977 tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
978 0, l1);
979 break;
980 case 1:
981 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0x8000);
982 tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
983 0, l1);
984 break;
985 #ifdef TARGET_X86_64
986 case 2:
987 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0x80000000);
988 tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
989 0, l1);
990 break;
991 #endif
992 default:
993 tcg_gen_brcondi_tl(inv ? TCG_COND_GE : TCG_COND_LT, cpu_cc_dst,
994 0, l1);
995 break;
997 break;
999 case JCC_B:
1000 cond = inv ? TCG_COND_GEU : TCG_COND_LTU;
1001 goto fast_jcc_b;
1002 case JCC_BE:
1003 cond = inv ? TCG_COND_GTU : TCG_COND_LEU;
1004 fast_jcc_b:
1005 tcg_gen_add_tl(cpu_tmp4, cpu_cc_dst, cpu_cc_src);
1006 switch(size) {
1007 case 0:
1008 t0 = cpu_tmp0;
1009 tcg_gen_andi_tl(cpu_tmp4, cpu_tmp4, 0xff);
1010 tcg_gen_andi_tl(t0, cpu_cc_src, 0xff);
1011 break;
1012 case 1:
1013 t0 = cpu_tmp0;
1014 tcg_gen_andi_tl(cpu_tmp4, cpu_tmp4, 0xffff);
1015 tcg_gen_andi_tl(t0, cpu_cc_src, 0xffff);
1016 break;
1017 #ifdef TARGET_X86_64
1018 case 2:
1019 t0 = cpu_tmp0;
1020 tcg_gen_andi_tl(cpu_tmp4, cpu_tmp4, 0xffffffff);
1021 tcg_gen_andi_tl(t0, cpu_cc_src, 0xffffffff);
1022 break;
1023 #endif
1024 default:
1025 t0 = cpu_cc_src;
1026 break;
1028 tcg_gen_brcond_tl(cond, cpu_tmp4, t0, l1);
1029 break;
1031 case JCC_L:
1032 cond = inv ? TCG_COND_GE : TCG_COND_LT;
1033 goto fast_jcc_l;
1034 case JCC_LE:
1035 cond = inv ? TCG_COND_GT : TCG_COND_LE;
1036 fast_jcc_l:
1037 tcg_gen_add_tl(cpu_tmp4, cpu_cc_dst, cpu_cc_src);
1038 switch(size) {
1039 case 0:
1040 t0 = cpu_tmp0;
1041 tcg_gen_ext8s_tl(cpu_tmp4, cpu_tmp4);
1042 tcg_gen_ext8s_tl(t0, cpu_cc_src);
1043 break;
1044 case 1:
1045 t0 = cpu_tmp0;
1046 tcg_gen_ext16s_tl(cpu_tmp4, cpu_tmp4);
1047 tcg_gen_ext16s_tl(t0, cpu_cc_src);
1048 break;
1049 #ifdef TARGET_X86_64
1050 case 2:
1051 t0 = cpu_tmp0;
1052 tcg_gen_ext32s_tl(cpu_tmp4, cpu_tmp4);
1053 tcg_gen_ext32s_tl(t0, cpu_cc_src);
1054 break;
1055 #endif
1056 default:
1057 t0 = cpu_cc_src;
1058 break;
1060 tcg_gen_brcond_tl(cond, cpu_tmp4, t0, l1);
1061 break;
1063 default:
1064 goto slow_jcc;
1066 break;
1068 /* some jumps are easy to compute */
1069 case CC_OP_ADDB:
1070 case CC_OP_ADDW:
1071 case CC_OP_ADDL:
1072 case CC_OP_ADDQ:
1074 case CC_OP_ADCB:
1075 case CC_OP_ADCW:
1076 case CC_OP_ADCL:
1077 case CC_OP_ADCQ:
1079 case CC_OP_SBBB:
1080 case CC_OP_SBBW:
1081 case CC_OP_SBBL:
1082 case CC_OP_SBBQ:
1084 case CC_OP_LOGICB:
1085 case CC_OP_LOGICW:
1086 case CC_OP_LOGICL:
1087 case CC_OP_LOGICQ:
1089 case CC_OP_INCB:
1090 case CC_OP_INCW:
1091 case CC_OP_INCL:
1092 case CC_OP_INCQ:
1094 case CC_OP_DECB:
1095 case CC_OP_DECW:
1096 case CC_OP_DECL:
1097 case CC_OP_DECQ:
1099 case CC_OP_SHLB:
1100 case CC_OP_SHLW:
1101 case CC_OP_SHLL:
1102 case CC_OP_SHLQ:
1104 case CC_OP_SARB:
1105 case CC_OP_SARW:
1106 case CC_OP_SARL:
1107 case CC_OP_SARQ:
1108 switch(jcc_op) {
1109 case JCC_Z:
1110 size = (cc_op - CC_OP_ADDB) & 3;
1111 goto fast_jcc_z;
1112 case JCC_S:
1113 size = (cc_op - CC_OP_ADDB) & 3;
1114 goto fast_jcc_s;
1115 default:
1116 goto slow_jcc;
1118 break;
1119 default:
1120 slow_jcc:
1121 gen_setcc_slow_T0(s, jcc_op);
1122 tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE,
1123 cpu_T[0], 0, l1);
1124 break;
1128 /* XXX: does not work with gdbstub "ice" single step - not a
1129 serious problem */
1130 static int gen_jz_ecx_string(DisasContext *s, target_ulong next_eip)
1132 int l1, l2;
1134 l1 = gen_new_label();
1135 l2 = gen_new_label();
1136 gen_op_jnz_ecx(s->aflag, l1);
1137 gen_set_label(l2);
1138 gen_jmp_tb(s, next_eip, 1);
1139 gen_set_label(l1);
1140 return l2;
1143 static inline void gen_stos(DisasContext *s, int ot)
1145 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
1146 gen_string_movl_A0_EDI(s);
1147 gen_op_st_T0_A0(ot + s->mem_index);
1148 gen_op_movl_T0_Dshift(ot);
1149 gen_op_add_reg_T0(s->aflag, R_EDI);
1152 static inline void gen_lods(DisasContext *s, int ot)
1154 gen_string_movl_A0_ESI(s);
1155 gen_op_ld_T0_A0(ot + s->mem_index);
1156 gen_op_mov_reg_T0(ot, R_EAX);
1157 gen_op_movl_T0_Dshift(ot);
1158 gen_op_add_reg_T0(s->aflag, R_ESI);
1161 static inline void gen_scas(DisasContext *s, int ot)
1163 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
1164 gen_string_movl_A0_EDI(s);
1165 gen_op_ld_T1_A0(ot + s->mem_index);
1166 gen_op_cmpl_T0_T1_cc();
1167 gen_op_movl_T0_Dshift(ot);
1168 gen_op_add_reg_T0(s->aflag, R_EDI);
1171 static inline void gen_cmps(DisasContext *s, int ot)
1173 gen_string_movl_A0_ESI(s);
1174 gen_op_ld_T0_A0(ot + s->mem_index);
1175 gen_string_movl_A0_EDI(s);
1176 gen_op_ld_T1_A0(ot + s->mem_index);
1177 gen_op_cmpl_T0_T1_cc();
1178 gen_op_movl_T0_Dshift(ot);
1179 gen_op_add_reg_T0(s->aflag, R_ESI);
1180 gen_op_add_reg_T0(s->aflag, R_EDI);
1183 static inline void gen_ins(DisasContext *s, int ot)
1185 if (use_icount)
1186 gen_io_start();
1187 gen_string_movl_A0_EDI(s);
1188 /* Note: we must do this dummy write first to be restartable in
1189 case of page fault. */
1190 gen_op_movl_T0_0();
1191 gen_op_st_T0_A0(ot + s->mem_index);
1192 gen_op_mov_TN_reg(OT_WORD, 1, R_EDX);
1193 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[1]);
1194 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1195 gen_helper_in_func(ot, cpu_T[0], cpu_tmp2_i32);
1196 gen_op_st_T0_A0(ot + s->mem_index);
1197 gen_op_movl_T0_Dshift(ot);
1198 gen_op_add_reg_T0(s->aflag, R_EDI);
1199 if (use_icount)
1200 gen_io_end();
1203 static inline void gen_outs(DisasContext *s, int ot)
1205 if (use_icount)
1206 gen_io_start();
1207 gen_string_movl_A0_ESI(s);
1208 gen_op_ld_T0_A0(ot + s->mem_index);
1210 gen_op_mov_TN_reg(OT_WORD, 1, R_EDX);
1211 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[1]);
1212 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1213 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[0]);
1214 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
1216 gen_op_movl_T0_Dshift(ot);
1217 gen_op_add_reg_T0(s->aflag, R_ESI);
1218 if (use_icount)
1219 gen_io_end();
1222 /* same method as Valgrind : we generate jumps to current or next
1223 instruction */
1224 #define GEN_REPZ(op) \
1225 static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1226 target_ulong cur_eip, target_ulong next_eip) \
1228 int l2;\
1229 gen_update_cc_op(s); \
1230 l2 = gen_jz_ecx_string(s, next_eip); \
1231 gen_ ## op(s, ot); \
1232 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1233 /* a loop would cause two single step exceptions if ECX = 1 \
1234 before rep string_insn */ \
1235 if (!s->jmp_opt) \
1236 gen_op_jz_ecx(s->aflag, l2); \
1237 gen_jmp(s, cur_eip); \
1240 #define GEN_REPZ2(op) \
1241 static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1242 target_ulong cur_eip, \
1243 target_ulong next_eip, \
1244 int nz) \
1246 int l2;\
1247 gen_update_cc_op(s); \
1248 l2 = gen_jz_ecx_string(s, next_eip); \
1249 gen_ ## op(s, ot); \
1250 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1251 gen_op_set_cc_op(CC_OP_SUBB + ot); \
1252 gen_jcc1(s, CC_OP_SUBB + ot, (JCC_Z << 1) | (nz ^ 1), l2); \
1253 if (!s->jmp_opt) \
1254 gen_op_jz_ecx(s->aflag, l2); \
1255 gen_jmp(s, cur_eip); \
1258 GEN_REPZ(movs)
1259 GEN_REPZ(stos)
1260 GEN_REPZ(lods)
1261 GEN_REPZ(ins)
1262 GEN_REPZ(outs)
1263 GEN_REPZ2(scas)
1264 GEN_REPZ2(cmps)
1266 static void gen_helper_fp_arith_ST0_FT0(int op)
1268 switch (op) {
1269 case 0: gen_helper_fadd_ST0_FT0(); break;
1270 case 1: gen_helper_fmul_ST0_FT0(); break;
1271 case 2: gen_helper_fcom_ST0_FT0(); break;
1272 case 3: gen_helper_fcom_ST0_FT0(); break;
1273 case 4: gen_helper_fsub_ST0_FT0(); break;
1274 case 5: gen_helper_fsubr_ST0_FT0(); break;
1275 case 6: gen_helper_fdiv_ST0_FT0(); break;
1276 case 7: gen_helper_fdivr_ST0_FT0(); break;
1280 /* NOTE the exception in "r" op ordering */
1281 static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1283 TCGv_i32 tmp = tcg_const_i32(opreg);
1284 switch (op) {
1285 case 0: gen_helper_fadd_STN_ST0(tmp); break;
1286 case 1: gen_helper_fmul_STN_ST0(tmp); break;
1287 case 4: gen_helper_fsubr_STN_ST0(tmp); break;
1288 case 5: gen_helper_fsub_STN_ST0(tmp); break;
1289 case 6: gen_helper_fdivr_STN_ST0(tmp); break;
1290 case 7: gen_helper_fdiv_STN_ST0(tmp); break;
1294 /* if d == OR_TMP0, it means memory operand (address in A0) */
1295 static void gen_op(DisasContext *s1, int op, int ot, int d)
1297 if (d != OR_TMP0) {
1298 gen_op_mov_TN_reg(ot, 0, d);
1299 } else {
1300 gen_op_ld_T0_A0(ot + s1->mem_index);
1302 switch(op) {
1303 case OP_ADCL:
1304 if (s1->cc_op != CC_OP_DYNAMIC)
1305 gen_op_set_cc_op(s1->cc_op);
1306 gen_compute_eflags_c(cpu_tmp4);
1307 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1308 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1309 if (d != OR_TMP0)
1310 gen_op_mov_reg_T0(ot, d);
1311 else
1312 gen_op_st_T0_A0(ot + s1->mem_index);
1313 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
1314 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1315 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp4);
1316 tcg_gen_shli_i32(cpu_tmp2_i32, cpu_tmp2_i32, 2);
1317 tcg_gen_addi_i32(cpu_cc_op, cpu_tmp2_i32, CC_OP_ADDB + ot);
1318 s1->cc_op = CC_OP_DYNAMIC;
1319 break;
1320 case OP_SBBL:
1321 if (s1->cc_op != CC_OP_DYNAMIC)
1322 gen_op_set_cc_op(s1->cc_op);
1323 gen_compute_eflags_c(cpu_tmp4);
1324 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1325 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1326 if (d != OR_TMP0)
1327 gen_op_mov_reg_T0(ot, d);
1328 else
1329 gen_op_st_T0_A0(ot + s1->mem_index);
1330 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
1331 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1332 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp4);
1333 tcg_gen_shli_i32(cpu_tmp2_i32, cpu_tmp2_i32, 2);
1334 tcg_gen_addi_i32(cpu_cc_op, cpu_tmp2_i32, CC_OP_SUBB + ot);
1335 s1->cc_op = CC_OP_DYNAMIC;
1336 break;
1337 case OP_ADDL:
1338 gen_op_addl_T0_T1();
1339 if (d != OR_TMP0)
1340 gen_op_mov_reg_T0(ot, d);
1341 else
1342 gen_op_st_T0_A0(ot + s1->mem_index);
1343 gen_op_update2_cc();
1344 s1->cc_op = CC_OP_ADDB + ot;
1345 break;
1346 case OP_SUBL:
1347 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1348 if (d != OR_TMP0)
1349 gen_op_mov_reg_T0(ot, d);
1350 else
1351 gen_op_st_T0_A0(ot + s1->mem_index);
1352 gen_op_update2_cc();
1353 s1->cc_op = CC_OP_SUBB + ot;
1354 break;
1355 default:
1356 case OP_ANDL:
1357 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1358 if (d != OR_TMP0)
1359 gen_op_mov_reg_T0(ot, d);
1360 else
1361 gen_op_st_T0_A0(ot + s1->mem_index);
1362 gen_op_update1_cc();
1363 s1->cc_op = CC_OP_LOGICB + ot;
1364 break;
1365 case OP_ORL:
1366 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1367 if (d != OR_TMP0)
1368 gen_op_mov_reg_T0(ot, d);
1369 else
1370 gen_op_st_T0_A0(ot + s1->mem_index);
1371 gen_op_update1_cc();
1372 s1->cc_op = CC_OP_LOGICB + ot;
1373 break;
1374 case OP_XORL:
1375 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1376 if (d != OR_TMP0)
1377 gen_op_mov_reg_T0(ot, d);
1378 else
1379 gen_op_st_T0_A0(ot + s1->mem_index);
1380 gen_op_update1_cc();
1381 s1->cc_op = CC_OP_LOGICB + ot;
1382 break;
1383 case OP_CMPL:
1384 gen_op_cmpl_T0_T1_cc();
1385 s1->cc_op = CC_OP_SUBB + ot;
1386 break;
1390 /* if d == OR_TMP0, it means memory operand (address in A0) */
1391 static void gen_inc(DisasContext *s1, int ot, int d, int c)
1393 if (d != OR_TMP0)
1394 gen_op_mov_TN_reg(ot, 0, d);
1395 else
1396 gen_op_ld_T0_A0(ot + s1->mem_index);
1397 if (s1->cc_op != CC_OP_DYNAMIC)
1398 gen_op_set_cc_op(s1->cc_op);
1399 if (c > 0) {
1400 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], 1);
1401 s1->cc_op = CC_OP_INCB + ot;
1402 } else {
1403 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], -1);
1404 s1->cc_op = CC_OP_DECB + ot;
1406 if (d != OR_TMP0)
1407 gen_op_mov_reg_T0(ot, d);
1408 else
1409 gen_op_st_T0_A0(ot + s1->mem_index);
1410 gen_compute_eflags_c(cpu_cc_src);
1411 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1414 static void gen_shift_rm_T1(DisasContext *s, int ot, int op1,
1415 int is_right, int is_arith)
1417 target_ulong mask;
1418 int shift_label;
1419 TCGv t0, t1, t2;
1421 if (ot == OT_QUAD) {
1422 mask = 0x3f;
1423 } else {
1424 mask = 0x1f;
1427 /* load */
1428 if (op1 == OR_TMP0) {
1429 gen_op_ld_T0_A0(ot + s->mem_index);
1430 } else {
1431 gen_op_mov_TN_reg(ot, 0, op1);
1434 t0 = tcg_temp_local_new();
1435 t1 = tcg_temp_local_new();
1436 t2 = tcg_temp_local_new();
1438 tcg_gen_andi_tl(t2, cpu_T[1], mask);
1440 if (is_right) {
1441 if (is_arith) {
1442 gen_exts(ot, cpu_T[0]);
1443 tcg_gen_mov_tl(t0, cpu_T[0]);
1444 tcg_gen_sar_tl(cpu_T[0], cpu_T[0], t2);
1445 } else {
1446 gen_extu(ot, cpu_T[0]);
1447 tcg_gen_mov_tl(t0, cpu_T[0]);
1448 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], t2);
1450 } else {
1451 tcg_gen_mov_tl(t0, cpu_T[0]);
1452 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], t2);
1455 /* store */
1456 if (op1 == OR_TMP0) {
1457 gen_op_st_T0_A0(ot + s->mem_index);
1458 } else {
1459 gen_op_mov_reg_T0(ot, op1);
1462 /* update eflags if non zero shift */
1463 if (s->cc_op != CC_OP_DYNAMIC) {
1464 gen_op_set_cc_op(s->cc_op);
1467 tcg_gen_mov_tl(t1, cpu_T[0]);
1469 shift_label = gen_new_label();
1470 tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, shift_label);
1472 tcg_gen_addi_tl(t2, t2, -1);
1473 tcg_gen_mov_tl(cpu_cc_dst, t1);
1475 if (is_right) {
1476 if (is_arith) {
1477 tcg_gen_sar_tl(cpu_cc_src, t0, t2);
1478 } else {
1479 tcg_gen_shr_tl(cpu_cc_src, t0, t2);
1481 } else {
1482 tcg_gen_shl_tl(cpu_cc_src, t0, t2);
1485 if (is_right) {
1486 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SARB + ot);
1487 } else {
1488 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SHLB + ot);
1491 gen_set_label(shift_label);
1492 s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
1494 tcg_temp_free(t0);
1495 tcg_temp_free(t1);
1496 tcg_temp_free(t2);
1499 static void gen_shift_rm_im(DisasContext *s, int ot, int op1, int op2,
1500 int is_right, int is_arith)
1502 int mask;
1504 if (ot == OT_QUAD)
1505 mask = 0x3f;
1506 else
1507 mask = 0x1f;
1509 /* load */
1510 if (op1 == OR_TMP0)
1511 gen_op_ld_T0_A0(ot + s->mem_index);
1512 else
1513 gen_op_mov_TN_reg(ot, 0, op1);
1515 op2 &= mask;
1516 if (op2 != 0) {
1517 if (is_right) {
1518 if (is_arith) {
1519 gen_exts(ot, cpu_T[0]);
1520 tcg_gen_sari_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1521 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], op2);
1522 } else {
1523 gen_extu(ot, cpu_T[0]);
1524 tcg_gen_shri_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1525 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], op2);
1527 } else {
1528 tcg_gen_shli_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1529 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], op2);
1533 /* store */
1534 if (op1 == OR_TMP0)
1535 gen_op_st_T0_A0(ot + s->mem_index);
1536 else
1537 gen_op_mov_reg_T0(ot, op1);
1539 /* update eflags if non zero shift */
1540 if (op2 != 0) {
1541 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
1542 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1543 if (is_right)
1544 s->cc_op = CC_OP_SARB + ot;
1545 else
1546 s->cc_op = CC_OP_SHLB + ot;
1550 static inline void tcg_gen_lshift(TCGv ret, TCGv arg1, target_long arg2)
1552 if (arg2 >= 0)
1553 tcg_gen_shli_tl(ret, arg1, arg2);
1554 else
1555 tcg_gen_shri_tl(ret, arg1, -arg2);
1558 static void gen_rot_rm_T1(DisasContext *s, int ot, int op1,
1559 int is_right)
1561 target_ulong mask;
1562 int label1, label2, data_bits;
1563 TCGv t0, t1, t2, a0;
1565 /* XXX: inefficient, but we must use local temps */
1566 t0 = tcg_temp_local_new();
1567 t1 = tcg_temp_local_new();
1568 t2 = tcg_temp_local_new();
1569 a0 = tcg_temp_local_new();
1571 if (ot == OT_QUAD)
1572 mask = 0x3f;
1573 else
1574 mask = 0x1f;
1576 /* load */
1577 if (op1 == OR_TMP0) {
1578 tcg_gen_mov_tl(a0, cpu_A0);
1579 gen_op_ld_v(ot + s->mem_index, t0, a0);
1580 } else {
1581 gen_op_mov_v_reg(ot, t0, op1);
1584 tcg_gen_mov_tl(t1, cpu_T[1]);
1586 tcg_gen_andi_tl(t1, t1, mask);
1588 /* Must test zero case to avoid using undefined behaviour in TCG
1589 shifts. */
1590 label1 = gen_new_label();
1591 tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, label1);
1593 if (ot <= OT_WORD)
1594 tcg_gen_andi_tl(cpu_tmp0, t1, (1 << (3 + ot)) - 1);
1595 else
1596 tcg_gen_mov_tl(cpu_tmp0, t1);
1598 gen_extu(ot, t0);
1599 tcg_gen_mov_tl(t2, t0);
1601 data_bits = 8 << ot;
1602 /* XXX: rely on behaviour of shifts when operand 2 overflows (XXX:
1603 fix TCG definition) */
1604 if (is_right) {
1605 tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp0);
1606 tcg_gen_subfi_tl(cpu_tmp0, data_bits, cpu_tmp0);
1607 tcg_gen_shl_tl(t0, t0, cpu_tmp0);
1608 } else {
1609 tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp0);
1610 tcg_gen_subfi_tl(cpu_tmp0, data_bits, cpu_tmp0);
1611 tcg_gen_shr_tl(t0, t0, cpu_tmp0);
1613 tcg_gen_or_tl(t0, t0, cpu_tmp4);
1615 gen_set_label(label1);
1616 /* store */
1617 if (op1 == OR_TMP0) {
1618 gen_op_st_v(ot + s->mem_index, t0, a0);
1619 } else {
1620 gen_op_mov_reg_v(ot, op1, t0);
1623 /* update eflags */
1624 if (s->cc_op != CC_OP_DYNAMIC)
1625 gen_op_set_cc_op(s->cc_op);
1627 label2 = gen_new_label();
1628 tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, label2);
1630 gen_compute_eflags(cpu_cc_src);
1631 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~(CC_O | CC_C));
1632 tcg_gen_xor_tl(cpu_tmp0, t2, t0);
1633 tcg_gen_lshift(cpu_tmp0, cpu_tmp0, 11 - (data_bits - 1));
1634 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, CC_O);
1635 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_tmp0);
1636 if (is_right) {
1637 tcg_gen_shri_tl(t0, t0, data_bits - 1);
1639 tcg_gen_andi_tl(t0, t0, CC_C);
1640 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
1642 tcg_gen_discard_tl(cpu_cc_dst);
1643 tcg_gen_movi_i32(cpu_cc_op, CC_OP_EFLAGS);
1645 gen_set_label(label2);
1646 s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
1648 tcg_temp_free(t0);
1649 tcg_temp_free(t1);
1650 tcg_temp_free(t2);
1651 tcg_temp_free(a0);
1654 static void gen_rot_rm_im(DisasContext *s, int ot, int op1, int op2,
1655 int is_right)
1657 int mask;
1658 int data_bits;
1659 TCGv t0, t1, a0;
1661 /* XXX: inefficient, but we must use local temps */
1662 t0 = tcg_temp_local_new();
1663 t1 = tcg_temp_local_new();
1664 a0 = tcg_temp_local_new();
1666 if (ot == OT_QUAD)
1667 mask = 0x3f;
1668 else
1669 mask = 0x1f;
1671 /* load */
1672 if (op1 == OR_TMP0) {
1673 tcg_gen_mov_tl(a0, cpu_A0);
1674 gen_op_ld_v(ot + s->mem_index, t0, a0);
1675 } else {
1676 gen_op_mov_v_reg(ot, t0, op1);
1679 gen_extu(ot, t0);
1680 tcg_gen_mov_tl(t1, t0);
1682 op2 &= mask;
1683 data_bits = 8 << ot;
1684 if (op2 != 0) {
1685 int shift = op2 & ((1 << (3 + ot)) - 1);
1686 if (is_right) {
1687 tcg_gen_shri_tl(cpu_tmp4, t0, shift);
1688 tcg_gen_shli_tl(t0, t0, data_bits - shift);
1690 else {
1691 tcg_gen_shli_tl(cpu_tmp4, t0, shift);
1692 tcg_gen_shri_tl(t0, t0, data_bits - shift);
1694 tcg_gen_or_tl(t0, t0, cpu_tmp4);
1697 /* store */
1698 if (op1 == OR_TMP0) {
1699 gen_op_st_v(ot + s->mem_index, t0, a0);
1700 } else {
1701 gen_op_mov_reg_v(ot, op1, t0);
1704 if (op2 != 0) {
1705 /* update eflags */
1706 if (s->cc_op != CC_OP_DYNAMIC)
1707 gen_op_set_cc_op(s->cc_op);
1709 gen_compute_eflags(cpu_cc_src);
1710 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~(CC_O | CC_C));
1711 tcg_gen_xor_tl(cpu_tmp0, t1, t0);
1712 tcg_gen_lshift(cpu_tmp0, cpu_tmp0, 11 - (data_bits - 1));
1713 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, CC_O);
1714 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_tmp0);
1715 if (is_right) {
1716 tcg_gen_shri_tl(t0, t0, data_bits - 1);
1718 tcg_gen_andi_tl(t0, t0, CC_C);
1719 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
1721 tcg_gen_discard_tl(cpu_cc_dst);
1722 tcg_gen_movi_i32(cpu_cc_op, CC_OP_EFLAGS);
1723 s->cc_op = CC_OP_EFLAGS;
1726 tcg_temp_free(t0);
1727 tcg_temp_free(t1);
1728 tcg_temp_free(a0);
1731 /* XXX: add faster immediate = 1 case */
1732 static void gen_rotc_rm_T1(DisasContext *s, int ot, int op1,
1733 int is_right)
1735 int label1;
1737 if (s->cc_op != CC_OP_DYNAMIC)
1738 gen_op_set_cc_op(s->cc_op);
1740 /* load */
1741 if (op1 == OR_TMP0)
1742 gen_op_ld_T0_A0(ot + s->mem_index);
1743 else
1744 gen_op_mov_TN_reg(ot, 0, op1);
1746 if (is_right) {
1747 switch (ot) {
1748 case 0: gen_helper_rcrb(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1749 case 1: gen_helper_rcrw(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1750 case 2: gen_helper_rcrl(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1751 #ifdef TARGET_X86_64
1752 case 3: gen_helper_rcrq(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1753 #endif
1755 } else {
1756 switch (ot) {
1757 case 0: gen_helper_rclb(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1758 case 1: gen_helper_rclw(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1759 case 2: gen_helper_rcll(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1760 #ifdef TARGET_X86_64
1761 case 3: gen_helper_rclq(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1762 #endif
1765 /* store */
1766 if (op1 == OR_TMP0)
1767 gen_op_st_T0_A0(ot + s->mem_index);
1768 else
1769 gen_op_mov_reg_T0(ot, op1);
1771 /* update eflags */
1772 label1 = gen_new_label();
1773 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_cc_tmp, -1, label1);
1775 tcg_gen_mov_tl(cpu_cc_src, cpu_cc_tmp);
1776 tcg_gen_discard_tl(cpu_cc_dst);
1777 tcg_gen_movi_i32(cpu_cc_op, CC_OP_EFLAGS);
1779 gen_set_label(label1);
1780 s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
1783 /* XXX: add faster immediate case */
1784 static void gen_shiftd_rm_T1_T3(DisasContext *s, int ot, int op1,
1785 int is_right)
1787 int label1, label2, data_bits;
1788 target_ulong mask;
1789 TCGv t0, t1, t2, a0;
1791 t0 = tcg_temp_local_new();
1792 t1 = tcg_temp_local_new();
1793 t2 = tcg_temp_local_new();
1794 a0 = tcg_temp_local_new();
1796 if (ot == OT_QUAD)
1797 mask = 0x3f;
1798 else
1799 mask = 0x1f;
1801 /* load */
1802 if (op1 == OR_TMP0) {
1803 tcg_gen_mov_tl(a0, cpu_A0);
1804 gen_op_ld_v(ot + s->mem_index, t0, a0);
1805 } else {
1806 gen_op_mov_v_reg(ot, t0, op1);
1809 tcg_gen_andi_tl(cpu_T3, cpu_T3, mask);
1811 tcg_gen_mov_tl(t1, cpu_T[1]);
1812 tcg_gen_mov_tl(t2, cpu_T3);
1814 /* Must test zero case to avoid using undefined behaviour in TCG
1815 shifts. */
1816 label1 = gen_new_label();
1817 tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label1);
1819 tcg_gen_addi_tl(cpu_tmp5, t2, -1);
1820 if (ot == OT_WORD) {
1821 /* Note: we implement the Intel behaviour for shift count > 16 */
1822 if (is_right) {
1823 tcg_gen_andi_tl(t0, t0, 0xffff);
1824 tcg_gen_shli_tl(cpu_tmp0, t1, 16);
1825 tcg_gen_or_tl(t0, t0, cpu_tmp0);
1826 tcg_gen_ext32u_tl(t0, t0);
1828 tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp5);
1830 /* only needed if count > 16, but a test would complicate */
1831 tcg_gen_subfi_tl(cpu_tmp5, 32, t2);
1832 tcg_gen_shl_tl(cpu_tmp0, t0, cpu_tmp5);
1834 tcg_gen_shr_tl(t0, t0, t2);
1836 tcg_gen_or_tl(t0, t0, cpu_tmp0);
1837 } else {
1838 /* XXX: not optimal */
1839 tcg_gen_andi_tl(t0, t0, 0xffff);
1840 tcg_gen_shli_tl(t1, t1, 16);
1841 tcg_gen_or_tl(t1, t1, t0);
1842 tcg_gen_ext32u_tl(t1, t1);
1844 tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp5);
1845 tcg_gen_subfi_tl(cpu_tmp0, 32, cpu_tmp5);
1846 tcg_gen_shr_tl(cpu_tmp5, t1, cpu_tmp0);
1847 tcg_gen_or_tl(cpu_tmp4, cpu_tmp4, cpu_tmp5);
1849 tcg_gen_shl_tl(t0, t0, t2);
1850 tcg_gen_subfi_tl(cpu_tmp5, 32, t2);
1851 tcg_gen_shr_tl(t1, t1, cpu_tmp5);
1852 tcg_gen_or_tl(t0, t0, t1);
1854 } else {
1855 data_bits = 8 << ot;
1856 if (is_right) {
1857 if (ot == OT_LONG)
1858 tcg_gen_ext32u_tl(t0, t0);
1860 tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp5);
1862 tcg_gen_shr_tl(t0, t0, t2);
1863 tcg_gen_subfi_tl(cpu_tmp5, data_bits, t2);
1864 tcg_gen_shl_tl(t1, t1, cpu_tmp5);
1865 tcg_gen_or_tl(t0, t0, t1);
1867 } else {
1868 if (ot == OT_LONG)
1869 tcg_gen_ext32u_tl(t1, t1);
1871 tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp5);
1873 tcg_gen_shl_tl(t0, t0, t2);
1874 tcg_gen_subfi_tl(cpu_tmp5, data_bits, t2);
1875 tcg_gen_shr_tl(t1, t1, cpu_tmp5);
1876 tcg_gen_or_tl(t0, t0, t1);
1879 tcg_gen_mov_tl(t1, cpu_tmp4);
1881 gen_set_label(label1);
1882 /* store */
1883 if (op1 == OR_TMP0) {
1884 gen_op_st_v(ot + s->mem_index, t0, a0);
1885 } else {
1886 gen_op_mov_reg_v(ot, op1, t0);
1889 /* update eflags */
1890 if (s->cc_op != CC_OP_DYNAMIC)
1891 gen_op_set_cc_op(s->cc_op);
1893 label2 = gen_new_label();
1894 tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label2);
1896 tcg_gen_mov_tl(cpu_cc_src, t1);
1897 tcg_gen_mov_tl(cpu_cc_dst, t0);
1898 if (is_right) {
1899 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SARB + ot);
1900 } else {
1901 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SHLB + ot);
1903 gen_set_label(label2);
1904 s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
1906 tcg_temp_free(t0);
1907 tcg_temp_free(t1);
1908 tcg_temp_free(t2);
1909 tcg_temp_free(a0);
1912 static void gen_shift(DisasContext *s1, int op, int ot, int d, int s)
1914 if (s != OR_TMP1)
1915 gen_op_mov_TN_reg(ot, 1, s);
1916 switch(op) {
1917 case OP_ROL:
1918 gen_rot_rm_T1(s1, ot, d, 0);
1919 break;
1920 case OP_ROR:
1921 gen_rot_rm_T1(s1, ot, d, 1);
1922 break;
1923 case OP_SHL:
1924 case OP_SHL1:
1925 gen_shift_rm_T1(s1, ot, d, 0, 0);
1926 break;
1927 case OP_SHR:
1928 gen_shift_rm_T1(s1, ot, d, 1, 0);
1929 break;
1930 case OP_SAR:
1931 gen_shift_rm_T1(s1, ot, d, 1, 1);
1932 break;
1933 case OP_RCL:
1934 gen_rotc_rm_T1(s1, ot, d, 0);
1935 break;
1936 case OP_RCR:
1937 gen_rotc_rm_T1(s1, ot, d, 1);
1938 break;
1942 static void gen_shifti(DisasContext *s1, int op, int ot, int d, int c)
1944 switch(op) {
1945 case OP_ROL:
1946 gen_rot_rm_im(s1, ot, d, c, 0);
1947 break;
1948 case OP_ROR:
1949 gen_rot_rm_im(s1, ot, d, c, 1);
1950 break;
1951 case OP_SHL:
1952 case OP_SHL1:
1953 gen_shift_rm_im(s1, ot, d, c, 0, 0);
1954 break;
1955 case OP_SHR:
1956 gen_shift_rm_im(s1, ot, d, c, 1, 0);
1957 break;
1958 case OP_SAR:
1959 gen_shift_rm_im(s1, ot, d, c, 1, 1);
1960 break;
1961 default:
1962 /* currently not optimized */
1963 gen_op_movl_T1_im(c);
1964 gen_shift(s1, op, ot, d, OR_TMP1);
1965 break;
1969 static void gen_lea_modrm(DisasContext *s, int modrm, int *reg_ptr, int *offset_ptr)
1971 target_long disp;
1972 int havesib;
1973 int base;
1974 int index;
1975 int scale;
1976 int opreg;
1977 int mod, rm, code, override, must_add_seg;
1979 override = s->override;
1980 must_add_seg = s->addseg;
1981 if (override >= 0)
1982 must_add_seg = 1;
1983 mod = (modrm >> 6) & 3;
1984 rm = modrm & 7;
1986 if (s->aflag) {
1988 havesib = 0;
1989 base = rm;
1990 index = 0;
1991 scale = 0;
1993 if (base == 4) {
1994 havesib = 1;
1995 code = ldub_code(s->pc++);
1996 scale = (code >> 6) & 3;
1997 index = ((code >> 3) & 7) | REX_X(s);
1998 base = (code & 7);
2000 base |= REX_B(s);
2002 switch (mod) {
2003 case 0:
2004 if ((base & 7) == 5) {
2005 base = -1;
2006 disp = (int32_t)ldl_code(s->pc);
2007 s->pc += 4;
2008 if (CODE64(s) && !havesib) {
2009 disp += s->pc + s->rip_offset;
2011 } else {
2012 disp = 0;
2014 break;
2015 case 1:
2016 disp = (int8_t)ldub_code(s->pc++);
2017 break;
2018 default:
2019 case 2:
2020 disp = (int32_t)ldl_code(s->pc);
2021 s->pc += 4;
2022 break;
2025 if (base >= 0) {
2026 /* for correct popl handling with esp */
2027 if (base == 4 && s->popl_esp_hack)
2028 disp += s->popl_esp_hack;
2029 #ifdef TARGET_X86_64
2030 if (s->aflag == 2) {
2031 gen_op_movq_A0_reg(base);
2032 if (disp != 0) {
2033 gen_op_addq_A0_im(disp);
2035 } else
2036 #endif
2038 gen_op_movl_A0_reg(base);
2039 if (disp != 0)
2040 gen_op_addl_A0_im(disp);
2042 } else {
2043 #ifdef TARGET_X86_64
2044 if (s->aflag == 2) {
2045 gen_op_movq_A0_im(disp);
2046 } else
2047 #endif
2049 gen_op_movl_A0_im(disp);
2052 /* index == 4 means no index */
2053 if (havesib && (index != 4)) {
2054 #ifdef TARGET_X86_64
2055 if (s->aflag == 2) {
2056 gen_op_addq_A0_reg_sN(scale, index);
2057 } else
2058 #endif
2060 gen_op_addl_A0_reg_sN(scale, index);
2063 if (must_add_seg) {
2064 if (override < 0) {
2065 if (base == R_EBP || base == R_ESP)
2066 override = R_SS;
2067 else
2068 override = R_DS;
2070 #ifdef TARGET_X86_64
2071 if (s->aflag == 2) {
2072 gen_op_addq_A0_seg(override);
2073 } else
2074 #endif
2076 gen_op_addl_A0_seg(s, override);
2079 } else {
2080 switch (mod) {
2081 case 0:
2082 if (rm == 6) {
2083 disp = lduw_code(s->pc);
2084 s->pc += 2;
2085 gen_op_movl_A0_im(disp);
2086 rm = 0; /* avoid SS override */
2087 goto no_rm;
2088 } else {
2089 disp = 0;
2091 break;
2092 case 1:
2093 disp = (int8_t)ldub_code(s->pc++);
2094 break;
2095 default:
2096 case 2:
2097 disp = lduw_code(s->pc);
2098 s->pc += 2;
2099 break;
2101 switch(rm) {
2102 case 0:
2103 gen_op_movl_A0_reg(R_EBX);
2104 gen_op_addl_A0_reg_sN(0, R_ESI);
2105 break;
2106 case 1:
2107 gen_op_movl_A0_reg(R_EBX);
2108 gen_op_addl_A0_reg_sN(0, R_EDI);
2109 break;
2110 case 2:
2111 gen_op_movl_A0_reg(R_EBP);
2112 gen_op_addl_A0_reg_sN(0, R_ESI);
2113 break;
2114 case 3:
2115 gen_op_movl_A0_reg(R_EBP);
2116 gen_op_addl_A0_reg_sN(0, R_EDI);
2117 break;
2118 case 4:
2119 gen_op_movl_A0_reg(R_ESI);
2120 break;
2121 case 5:
2122 gen_op_movl_A0_reg(R_EDI);
2123 break;
2124 case 6:
2125 gen_op_movl_A0_reg(R_EBP);
2126 break;
2127 default:
2128 case 7:
2129 gen_op_movl_A0_reg(R_EBX);
2130 break;
2132 if (disp != 0)
2133 gen_op_addl_A0_im(disp);
2134 gen_op_andl_A0_ffff();
2135 no_rm:
2136 if (must_add_seg) {
2137 if (override < 0) {
2138 if (rm == 2 || rm == 3 || rm == 6)
2139 override = R_SS;
2140 else
2141 override = R_DS;
2143 gen_op_addl_A0_seg(s, override);
2147 opreg = OR_A0;
2148 disp = 0;
2149 *reg_ptr = opreg;
2150 *offset_ptr = disp;
2153 static void gen_nop_modrm(DisasContext *s, int modrm)
2155 int mod, rm, base, code;
2157 mod = (modrm >> 6) & 3;
2158 if (mod == 3)
2159 return;
2160 rm = modrm & 7;
2162 if (s->aflag) {
2164 base = rm;
2166 if (base == 4) {
2167 code = ldub_code(s->pc++);
2168 base = (code & 7);
2171 switch (mod) {
2172 case 0:
2173 if (base == 5) {
2174 s->pc += 4;
2176 break;
2177 case 1:
2178 s->pc++;
2179 break;
2180 default:
2181 case 2:
2182 s->pc += 4;
2183 break;
2185 } else {
2186 switch (mod) {
2187 case 0:
2188 if (rm == 6) {
2189 s->pc += 2;
2191 break;
2192 case 1:
2193 s->pc++;
2194 break;
2195 default:
2196 case 2:
2197 s->pc += 2;
2198 break;
2203 /* used for LEA and MOV AX, mem */
2204 static void gen_add_A0_ds_seg(DisasContext *s)
2206 int override, must_add_seg;
2207 must_add_seg = s->addseg;
2208 override = R_DS;
2209 if (s->override >= 0) {
2210 override = s->override;
2211 must_add_seg = 1;
2213 if (must_add_seg) {
2214 #ifdef TARGET_X86_64
2215 if (CODE64(s)) {
2216 gen_op_addq_A0_seg(override);
2217 } else
2218 #endif
2220 gen_op_addl_A0_seg(s, override);
2225 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2226 OR_TMP0 */
2227 static void gen_ldst_modrm(DisasContext *s, int modrm, int ot, int reg, int is_store)
2229 int mod, rm, opreg, disp;
2231 mod = (modrm >> 6) & 3;
2232 rm = (modrm & 7) | REX_B(s);
2233 if (mod == 3) {
2234 if (is_store) {
2235 if (reg != OR_TMP0)
2236 gen_op_mov_TN_reg(ot, 0, reg);
2237 gen_op_mov_reg_T0(ot, rm);
2238 } else {
2239 gen_op_mov_TN_reg(ot, 0, rm);
2240 if (reg != OR_TMP0)
2241 gen_op_mov_reg_T0(ot, reg);
2243 } else {
2244 gen_lea_modrm(s, modrm, &opreg, &disp);
2245 if (is_store) {
2246 if (reg != OR_TMP0)
2247 gen_op_mov_TN_reg(ot, 0, reg);
2248 gen_op_st_T0_A0(ot + s->mem_index);
2249 } else {
2250 gen_op_ld_T0_A0(ot + s->mem_index);
2251 if (reg != OR_TMP0)
2252 gen_op_mov_reg_T0(ot, reg);
2257 static inline uint32_t insn_get(DisasContext *s, int ot)
2259 uint32_t ret;
2261 switch(ot) {
2262 case OT_BYTE:
2263 ret = ldub_code(s->pc);
2264 s->pc++;
2265 break;
2266 case OT_WORD:
2267 ret = lduw_code(s->pc);
2268 s->pc += 2;
2269 break;
2270 default:
2271 case OT_LONG:
2272 ret = ldl_code(s->pc);
2273 s->pc += 4;
2274 break;
2276 return ret;
2279 static inline int insn_const_size(unsigned int ot)
2281 if (ot <= OT_LONG)
2282 return 1 << ot;
2283 else
2284 return 4;
2287 static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip)
2289 TranslationBlock *tb;
2290 target_ulong pc;
2292 pc = s->cs_base + eip;
2293 tb = s->tb;
2294 /* NOTE: we handle the case where the TB spans two pages here */
2295 if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) ||
2296 (pc & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK)) {
2297 /* jump to same page: we can use a direct jump */
2298 tcg_gen_goto_tb(tb_num);
2299 gen_jmp_im(eip);
2300 tcg_gen_exit_tb((tcg_target_long)tb + tb_num);
2301 } else {
2302 /* jump to another page: currently not optimized */
2303 gen_jmp_im(eip);
2304 gen_eob(s);
2308 static inline void gen_jcc(DisasContext *s, int b,
2309 target_ulong val, target_ulong next_eip)
2311 int l1, l2, cc_op;
2313 cc_op = s->cc_op;
2314 gen_update_cc_op(s);
2315 if (s->jmp_opt) {
2316 l1 = gen_new_label();
2317 gen_jcc1(s, cc_op, b, l1);
2319 gen_goto_tb(s, 0, next_eip);
2321 gen_set_label(l1);
2322 gen_goto_tb(s, 1, val);
2323 s->is_jmp = DISAS_TB_JUMP;
2324 } else {
2326 l1 = gen_new_label();
2327 l2 = gen_new_label();
2328 gen_jcc1(s, cc_op, b, l1);
2330 gen_jmp_im(next_eip);
2331 tcg_gen_br(l2);
2333 gen_set_label(l1);
2334 gen_jmp_im(val);
2335 gen_set_label(l2);
2336 gen_eob(s);
2340 static void gen_setcc(DisasContext *s, int b)
2342 int inv, jcc_op, l1;
2343 TCGv t0;
2345 if (is_fast_jcc_case(s, b)) {
2346 /* nominal case: we use a jump */
2347 /* XXX: make it faster by adding new instructions in TCG */
2348 t0 = tcg_temp_local_new();
2349 tcg_gen_movi_tl(t0, 0);
2350 l1 = gen_new_label();
2351 gen_jcc1(s, s->cc_op, b ^ 1, l1);
2352 tcg_gen_movi_tl(t0, 1);
2353 gen_set_label(l1);
2354 tcg_gen_mov_tl(cpu_T[0], t0);
2355 tcg_temp_free(t0);
2356 } else {
2357 /* slow case: it is more efficient not to generate a jump,
2358 although it is questionnable whether this optimization is
2359 worth to */
2360 inv = b & 1;
2361 jcc_op = (b >> 1) & 7;
2362 gen_setcc_slow_T0(s, jcc_op);
2363 if (inv) {
2364 tcg_gen_xori_tl(cpu_T[0], cpu_T[0], 1);
2369 static inline void gen_op_movl_T0_seg(int seg_reg)
2371 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
2372 offsetof(CPUX86State,segs[seg_reg].selector));
2375 static inline void gen_op_movl_seg_T0_vm(int seg_reg)
2377 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
2378 tcg_gen_st32_tl(cpu_T[0], cpu_env,
2379 offsetof(CPUX86State,segs[seg_reg].selector));
2380 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], 4);
2381 tcg_gen_st_tl(cpu_T[0], cpu_env,
2382 offsetof(CPUX86State,segs[seg_reg].base));
2385 /* move T0 to seg_reg and compute if the CPU state may change. Never
2386 call this function with seg_reg == R_CS */
2387 static void gen_movl_seg_T0(DisasContext *s, int seg_reg, target_ulong cur_eip)
2389 if (s->pe && !s->vm86) {
2390 /* XXX: optimize by finding processor state dynamically */
2391 if (s->cc_op != CC_OP_DYNAMIC)
2392 gen_op_set_cc_op(s->cc_op);
2393 gen_jmp_im(cur_eip);
2394 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2395 gen_helper_load_seg(tcg_const_i32(seg_reg), cpu_tmp2_i32);
2396 /* abort translation because the addseg value may change or
2397 because ss32 may change. For R_SS, translation must always
2398 stop as a special handling must be done to disable hardware
2399 interrupts for the next instruction */
2400 if (seg_reg == R_SS || (s->code32 && seg_reg < R_FS))
2401 s->is_jmp = DISAS_TB_JUMP;
2402 } else {
2403 gen_op_movl_seg_T0_vm(seg_reg);
2404 if (seg_reg == R_SS)
2405 s->is_jmp = DISAS_TB_JUMP;
2409 static inline int svm_is_rep(int prefixes)
2411 return ((prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) ? 8 : 0);
2414 static inline void
2415 gen_svm_check_intercept_param(DisasContext *s, target_ulong pc_start,
2416 uint32_t type, uint64_t param)
2418 /* no SVM activated; fast case */
2419 if (likely(!(s->flags & HF_SVMI_MASK)))
2420 return;
2421 if (s->cc_op != CC_OP_DYNAMIC)
2422 gen_op_set_cc_op(s->cc_op);
2423 gen_jmp_im(pc_start - s->cs_base);
2424 gen_helper_svm_check_intercept_param(tcg_const_i32(type),
2425 tcg_const_i64(param));
2428 static inline void
2429 gen_svm_check_intercept(DisasContext *s, target_ulong pc_start, uint64_t type)
2431 gen_svm_check_intercept_param(s, pc_start, type, 0);
2434 static inline void gen_stack_update(DisasContext *s, int addend)
2436 #ifdef TARGET_X86_64
2437 if (CODE64(s)) {
2438 gen_op_add_reg_im(2, R_ESP, addend);
2439 } else
2440 #endif
2441 if (s->ss32) {
2442 gen_op_add_reg_im(1, R_ESP, addend);
2443 } else {
2444 gen_op_add_reg_im(0, R_ESP, addend);
2448 /* generate a push. It depends on ss32, addseg and dflag */
2449 static void gen_push_T0(DisasContext *s)
2451 #ifdef TARGET_X86_64
2452 if (CODE64(s)) {
2453 gen_op_movq_A0_reg(R_ESP);
2454 if (s->dflag) {
2455 gen_op_addq_A0_im(-8);
2456 gen_op_st_T0_A0(OT_QUAD + s->mem_index);
2457 } else {
2458 gen_op_addq_A0_im(-2);
2459 gen_op_st_T0_A0(OT_WORD + s->mem_index);
2461 gen_op_mov_reg_A0(2, R_ESP);
2462 } else
2463 #endif
2465 gen_op_movl_A0_reg(R_ESP);
2466 if (!s->dflag)
2467 gen_op_addl_A0_im(-2);
2468 else
2469 gen_op_addl_A0_im(-4);
2470 if (s->ss32) {
2471 if (s->addseg) {
2472 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2473 gen_op_addl_A0_seg(s, R_SS);
2475 } else {
2476 gen_op_andl_A0_ffff();
2477 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2478 gen_op_addl_A0_seg(s, R_SS);
2480 gen_op_st_T0_A0(s->dflag + 1 + s->mem_index);
2481 if (s->ss32 && !s->addseg)
2482 gen_op_mov_reg_A0(1, R_ESP);
2483 else
2484 gen_op_mov_reg_T1(s->ss32 + 1, R_ESP);
2488 /* generate a push. It depends on ss32, addseg and dflag */
2489 /* slower version for T1, only used for call Ev */
2490 static void gen_push_T1(DisasContext *s)
2492 #ifdef TARGET_X86_64
2493 if (CODE64(s)) {
2494 gen_op_movq_A0_reg(R_ESP);
2495 if (s->dflag) {
2496 gen_op_addq_A0_im(-8);
2497 gen_op_st_T1_A0(OT_QUAD + s->mem_index);
2498 } else {
2499 gen_op_addq_A0_im(-2);
2500 gen_op_st_T0_A0(OT_WORD + s->mem_index);
2502 gen_op_mov_reg_A0(2, R_ESP);
2503 } else
2504 #endif
2506 gen_op_movl_A0_reg(R_ESP);
2507 if (!s->dflag)
2508 gen_op_addl_A0_im(-2);
2509 else
2510 gen_op_addl_A0_im(-4);
2511 if (s->ss32) {
2512 if (s->addseg) {
2513 gen_op_addl_A0_seg(s, R_SS);
2515 } else {
2516 gen_op_andl_A0_ffff();
2517 gen_op_addl_A0_seg(s, R_SS);
2519 gen_op_st_T1_A0(s->dflag + 1 + s->mem_index);
2521 if (s->ss32 && !s->addseg)
2522 gen_op_mov_reg_A0(1, R_ESP);
2523 else
2524 gen_stack_update(s, (-2) << s->dflag);
2528 /* two step pop is necessary for precise exceptions */
2529 static void gen_pop_T0(DisasContext *s)
2531 #ifdef TARGET_X86_64
2532 if (CODE64(s)) {
2533 gen_op_movq_A0_reg(R_ESP);
2534 gen_op_ld_T0_A0((s->dflag ? OT_QUAD : OT_WORD) + s->mem_index);
2535 } else
2536 #endif
2538 gen_op_movl_A0_reg(R_ESP);
2539 if (s->ss32) {
2540 if (s->addseg)
2541 gen_op_addl_A0_seg(s, R_SS);
2542 } else {
2543 gen_op_andl_A0_ffff();
2544 gen_op_addl_A0_seg(s, R_SS);
2546 gen_op_ld_T0_A0(s->dflag + 1 + s->mem_index);
2550 static void gen_pop_update(DisasContext *s)
2552 #ifdef TARGET_X86_64
2553 if (CODE64(s) && s->dflag) {
2554 gen_stack_update(s, 8);
2555 } else
2556 #endif
2558 gen_stack_update(s, 2 << s->dflag);
2562 static void gen_stack_A0(DisasContext *s)
2564 gen_op_movl_A0_reg(R_ESP);
2565 if (!s->ss32)
2566 gen_op_andl_A0_ffff();
2567 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2568 if (s->addseg)
2569 gen_op_addl_A0_seg(s, R_SS);
2572 /* NOTE: wrap around in 16 bit not fully handled */
2573 static void gen_pusha(DisasContext *s)
2575 int i;
2576 gen_op_movl_A0_reg(R_ESP);
2577 gen_op_addl_A0_im(-16 << s->dflag);
2578 if (!s->ss32)
2579 gen_op_andl_A0_ffff();
2580 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2581 if (s->addseg)
2582 gen_op_addl_A0_seg(s, R_SS);
2583 for(i = 0;i < 8; i++) {
2584 gen_op_mov_TN_reg(OT_LONG, 0, 7 - i);
2585 gen_op_st_T0_A0(OT_WORD + s->dflag + s->mem_index);
2586 gen_op_addl_A0_im(2 << s->dflag);
2588 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2591 /* NOTE: wrap around in 16 bit not fully handled */
2592 static void gen_popa(DisasContext *s)
2594 int i;
2595 gen_op_movl_A0_reg(R_ESP);
2596 if (!s->ss32)
2597 gen_op_andl_A0_ffff();
2598 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2599 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], 16 << s->dflag);
2600 if (s->addseg)
2601 gen_op_addl_A0_seg(s, R_SS);
2602 for(i = 0;i < 8; i++) {
2603 /* ESP is not reloaded */
2604 if (i != 3) {
2605 gen_op_ld_T0_A0(OT_WORD + s->dflag + s->mem_index);
2606 gen_op_mov_reg_T0(OT_WORD + s->dflag, 7 - i);
2608 gen_op_addl_A0_im(2 << s->dflag);
2610 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2613 static void gen_enter(DisasContext *s, int esp_addend, int level)
2615 int ot, opsize;
2617 level &= 0x1f;
2618 #ifdef TARGET_X86_64
2619 if (CODE64(s)) {
2620 ot = s->dflag ? OT_QUAD : OT_WORD;
2621 opsize = 1 << ot;
2623 gen_op_movl_A0_reg(R_ESP);
2624 gen_op_addq_A0_im(-opsize);
2625 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2627 /* push bp */
2628 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
2629 gen_op_st_T0_A0(ot + s->mem_index);
2630 if (level) {
2631 /* XXX: must save state */
2632 gen_helper_enter64_level(tcg_const_i32(level),
2633 tcg_const_i32((ot == OT_QUAD)),
2634 cpu_T[1]);
2636 gen_op_mov_reg_T1(ot, R_EBP);
2637 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
2638 gen_op_mov_reg_T1(OT_QUAD, R_ESP);
2639 } else
2640 #endif
2642 ot = s->dflag + OT_WORD;
2643 opsize = 2 << s->dflag;
2645 gen_op_movl_A0_reg(R_ESP);
2646 gen_op_addl_A0_im(-opsize);
2647 if (!s->ss32)
2648 gen_op_andl_A0_ffff();
2649 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2650 if (s->addseg)
2651 gen_op_addl_A0_seg(s, R_SS);
2652 /* push bp */
2653 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
2654 gen_op_st_T0_A0(ot + s->mem_index);
2655 if (level) {
2656 /* XXX: must save state */
2657 gen_helper_enter_level(tcg_const_i32(level),
2658 tcg_const_i32(s->dflag),
2659 cpu_T[1]);
2661 gen_op_mov_reg_T1(ot, R_EBP);
2662 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
2663 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2667 static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip)
2669 if (s->cc_op != CC_OP_DYNAMIC)
2670 gen_op_set_cc_op(s->cc_op);
2671 gen_jmp_im(cur_eip);
2672 gen_helper_raise_exception(cpu_env, tcg_const_i32(trapno));
2673 s->is_jmp = DISAS_TB_JUMP;
2676 /* an interrupt is different from an exception because of the
2677 privilege checks */
2678 static void gen_interrupt(DisasContext *s, int intno,
2679 target_ulong cur_eip, target_ulong next_eip)
2681 if (s->cc_op != CC_OP_DYNAMIC)
2682 gen_op_set_cc_op(s->cc_op);
2683 gen_jmp_im(cur_eip);
2684 gen_helper_raise_interrupt(cpu_env, tcg_const_i32(intno),
2685 tcg_const_i32(next_eip - cur_eip));
2686 s->is_jmp = DISAS_TB_JUMP;
2689 static void gen_debug(DisasContext *s, target_ulong cur_eip)
2691 if (s->cc_op != CC_OP_DYNAMIC)
2692 gen_op_set_cc_op(s->cc_op);
2693 gen_jmp_im(cur_eip);
2694 gen_helper_debug();
2695 s->is_jmp = DISAS_TB_JUMP;
2698 /* generate a generic end of block. Trace exception is also generated
2699 if needed */
2700 static void gen_eob(DisasContext *s)
2702 if (s->cc_op != CC_OP_DYNAMIC)
2703 gen_op_set_cc_op(s->cc_op);
2704 if (s->tb->flags & HF_INHIBIT_IRQ_MASK) {
2705 gen_helper_reset_inhibit_irq();
2707 if (s->tb->flags & HF_RF_MASK) {
2708 gen_helper_reset_rf();
2710 if (s->singlestep_enabled) {
2711 gen_helper_debug();
2712 } else if (s->tf) {
2713 gen_helper_single_step();
2714 } else {
2715 tcg_gen_exit_tb(0);
2717 s->is_jmp = DISAS_TB_JUMP;
2720 /* generate a jump to eip. No segment change must happen before as a
2721 direct call to the next block may occur */
2722 static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num)
2724 if (s->jmp_opt) {
2725 gen_update_cc_op(s);
2726 gen_goto_tb(s, tb_num, eip);
2727 s->is_jmp = DISAS_TB_JUMP;
2728 } else {
2729 gen_jmp_im(eip);
2730 gen_eob(s);
2734 static void gen_jmp(DisasContext *s, target_ulong eip)
2736 gen_jmp_tb(s, eip, 0);
2739 static inline void gen_ldq_env_A0(int idx, int offset)
2741 int mem_index = (idx >> 2) - 1;
2742 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, mem_index);
2743 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset);
2746 static inline void gen_stq_env_A0(int idx, int offset)
2748 int mem_index = (idx >> 2) - 1;
2749 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset);
2750 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, mem_index);
2753 static inline void gen_ldo_env_A0(int idx, int offset)
2755 int mem_index = (idx >> 2) - 1;
2756 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, mem_index);
2757 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
2758 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2759 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_tmp0, mem_index);
2760 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
2763 static inline void gen_sto_env_A0(int idx, int offset)
2765 int mem_index = (idx >> 2) - 1;
2766 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
2767 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, mem_index);
2768 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2769 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
2770 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_tmp0, mem_index);
2773 static inline void gen_op_movo(int d_offset, int s_offset)
2775 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2776 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2777 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + 8);
2778 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + 8);
2781 static inline void gen_op_movq(int d_offset, int s_offset)
2783 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2784 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2787 static inline void gen_op_movl(int d_offset, int s_offset)
2789 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env, s_offset);
2790 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, d_offset);
2793 static inline void gen_op_movq_env_0(int d_offset)
2795 tcg_gen_movi_i64(cpu_tmp1_i64, 0);
2796 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2799 typedef void (*SSEFunc_i_p)(TCGv_i32 val, TCGv_ptr reg);
2800 typedef void (*SSEFunc_l_p)(TCGv_i64 val, TCGv_ptr reg);
2801 typedef void (*SSEFunc_0_pi)(TCGv_ptr reg, TCGv_i32 val);
2802 typedef void (*SSEFunc_0_pl)(TCGv_ptr reg, TCGv_i64 val);
2803 typedef void (*SSEFunc_0_pp)(TCGv_ptr reg_a, TCGv_ptr reg_b);
2804 typedef void (*SSEFunc_0_ppi)(TCGv_ptr reg_a, TCGv_ptr reg_b, TCGv_i32 val);
2805 typedef void (*SSEFunc_0_ppt)(TCGv_ptr reg_a, TCGv_ptr reg_b, TCGv val);
2807 #define SSE_SPECIAL ((void *)1)
2808 #define SSE_DUMMY ((void *)2)
2810 #define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
2811 #define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
2812 gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
2814 static const SSEFunc_0_pp sse_op_table1[256][4] = {
2815 /* 3DNow! extensions */
2816 [0x0e] = { SSE_DUMMY }, /* femms */
2817 [0x0f] = { SSE_DUMMY }, /* pf... */
2818 /* pure SSE operations */
2819 [0x10] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2820 [0x11] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2821 [0x12] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd, movsldup, movddup */
2822 [0x13] = { SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd */
2823 [0x14] = { gen_helper_punpckldq_xmm, gen_helper_punpcklqdq_xmm },
2824 [0x15] = { gen_helper_punpckhdq_xmm, gen_helper_punpckhqdq_xmm },
2825 [0x16] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd, movshdup */
2826 [0x17] = { SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd */
2828 [0x28] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2829 [0x29] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2830 [0x2a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
2831 [0x2b] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movntps, movntpd, movntss, movntsd */
2832 [0x2c] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
2833 [0x2d] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
2834 [0x2e] = { gen_helper_ucomiss, gen_helper_ucomisd },
2835 [0x2f] = { gen_helper_comiss, gen_helper_comisd },
2836 [0x50] = { SSE_SPECIAL, SSE_SPECIAL }, /* movmskps, movmskpd */
2837 [0x51] = SSE_FOP(sqrt),
2838 [0x52] = { gen_helper_rsqrtps, NULL, gen_helper_rsqrtss, NULL },
2839 [0x53] = { gen_helper_rcpps, NULL, gen_helper_rcpss, NULL },
2840 [0x54] = { gen_helper_pand_xmm, gen_helper_pand_xmm }, /* andps, andpd */
2841 [0x55] = { gen_helper_pandn_xmm, gen_helper_pandn_xmm }, /* andnps, andnpd */
2842 [0x56] = { gen_helper_por_xmm, gen_helper_por_xmm }, /* orps, orpd */
2843 [0x57] = { gen_helper_pxor_xmm, gen_helper_pxor_xmm }, /* xorps, xorpd */
2844 [0x58] = SSE_FOP(add),
2845 [0x59] = SSE_FOP(mul),
2846 [0x5a] = { gen_helper_cvtps2pd, gen_helper_cvtpd2ps,
2847 gen_helper_cvtss2sd, gen_helper_cvtsd2ss },
2848 [0x5b] = { gen_helper_cvtdq2ps, gen_helper_cvtps2dq, gen_helper_cvttps2dq },
2849 [0x5c] = SSE_FOP(sub),
2850 [0x5d] = SSE_FOP(min),
2851 [0x5e] = SSE_FOP(div),
2852 [0x5f] = SSE_FOP(max),
2854 [0xc2] = SSE_FOP(cmpeq),
2855 [0xc6] = { (SSEFunc_0_pp)gen_helper_shufps,
2856 (SSEFunc_0_pp)gen_helper_shufpd }, /* XXX: casts */
2858 [0x38] = { SSE_SPECIAL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* SSSE3/SSE4 */
2859 [0x3a] = { SSE_SPECIAL, SSE_SPECIAL }, /* SSSE3/SSE4 */
2861 /* MMX ops and their SSE extensions */
2862 [0x60] = MMX_OP2(punpcklbw),
2863 [0x61] = MMX_OP2(punpcklwd),
2864 [0x62] = MMX_OP2(punpckldq),
2865 [0x63] = MMX_OP2(packsswb),
2866 [0x64] = MMX_OP2(pcmpgtb),
2867 [0x65] = MMX_OP2(pcmpgtw),
2868 [0x66] = MMX_OP2(pcmpgtl),
2869 [0x67] = MMX_OP2(packuswb),
2870 [0x68] = MMX_OP2(punpckhbw),
2871 [0x69] = MMX_OP2(punpckhwd),
2872 [0x6a] = MMX_OP2(punpckhdq),
2873 [0x6b] = MMX_OP2(packssdw),
2874 [0x6c] = { NULL, gen_helper_punpcklqdq_xmm },
2875 [0x6d] = { NULL, gen_helper_punpckhqdq_xmm },
2876 [0x6e] = { SSE_SPECIAL, SSE_SPECIAL }, /* movd mm, ea */
2877 [0x6f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, , movqdu */
2878 [0x70] = { (SSEFunc_0_pp)gen_helper_pshufw_mmx,
2879 (SSEFunc_0_pp)gen_helper_pshufd_xmm,
2880 (SSEFunc_0_pp)gen_helper_pshufhw_xmm,
2881 (SSEFunc_0_pp)gen_helper_pshuflw_xmm }, /* XXX: casts */
2882 [0x71] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftw */
2883 [0x72] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftd */
2884 [0x73] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftq */
2885 [0x74] = MMX_OP2(pcmpeqb),
2886 [0x75] = MMX_OP2(pcmpeqw),
2887 [0x76] = MMX_OP2(pcmpeql),
2888 [0x77] = { SSE_DUMMY }, /* emms */
2889 [0x78] = { NULL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* extrq_i, insertq_i */
2890 [0x79] = { NULL, gen_helper_extrq_r, NULL, gen_helper_insertq_r },
2891 [0x7c] = { NULL, gen_helper_haddpd, NULL, gen_helper_haddps },
2892 [0x7d] = { NULL, gen_helper_hsubpd, NULL, gen_helper_hsubps },
2893 [0x7e] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movd, movd, , movq */
2894 [0x7f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, movdqu */
2895 [0xc4] = { SSE_SPECIAL, SSE_SPECIAL }, /* pinsrw */
2896 [0xc5] = { SSE_SPECIAL, SSE_SPECIAL }, /* pextrw */
2897 [0xd0] = { NULL, gen_helper_addsubpd, NULL, gen_helper_addsubps },
2898 [0xd1] = MMX_OP2(psrlw),
2899 [0xd2] = MMX_OP2(psrld),
2900 [0xd3] = MMX_OP2(psrlq),
2901 [0xd4] = MMX_OP2(paddq),
2902 [0xd5] = MMX_OP2(pmullw),
2903 [0xd6] = { NULL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2904 [0xd7] = { SSE_SPECIAL, SSE_SPECIAL }, /* pmovmskb */
2905 [0xd8] = MMX_OP2(psubusb),
2906 [0xd9] = MMX_OP2(psubusw),
2907 [0xda] = MMX_OP2(pminub),
2908 [0xdb] = MMX_OP2(pand),
2909 [0xdc] = MMX_OP2(paddusb),
2910 [0xdd] = MMX_OP2(paddusw),
2911 [0xde] = MMX_OP2(pmaxub),
2912 [0xdf] = MMX_OP2(pandn),
2913 [0xe0] = MMX_OP2(pavgb),
2914 [0xe1] = MMX_OP2(psraw),
2915 [0xe2] = MMX_OP2(psrad),
2916 [0xe3] = MMX_OP2(pavgw),
2917 [0xe4] = MMX_OP2(pmulhuw),
2918 [0xe5] = MMX_OP2(pmulhw),
2919 [0xe6] = { NULL, gen_helper_cvttpd2dq, gen_helper_cvtdq2pd, gen_helper_cvtpd2dq },
2920 [0xe7] = { SSE_SPECIAL , SSE_SPECIAL }, /* movntq, movntq */
2921 [0xe8] = MMX_OP2(psubsb),
2922 [0xe9] = MMX_OP2(psubsw),
2923 [0xea] = MMX_OP2(pminsw),
2924 [0xeb] = MMX_OP2(por),
2925 [0xec] = MMX_OP2(paddsb),
2926 [0xed] = MMX_OP2(paddsw),
2927 [0xee] = MMX_OP2(pmaxsw),
2928 [0xef] = MMX_OP2(pxor),
2929 [0xf0] = { NULL, NULL, NULL, SSE_SPECIAL }, /* lddqu */
2930 [0xf1] = MMX_OP2(psllw),
2931 [0xf2] = MMX_OP2(pslld),
2932 [0xf3] = MMX_OP2(psllq),
2933 [0xf4] = MMX_OP2(pmuludq),
2934 [0xf5] = MMX_OP2(pmaddwd),
2935 [0xf6] = MMX_OP2(psadbw),
2936 [0xf7] = { (SSEFunc_0_pp)gen_helper_maskmov_mmx,
2937 (SSEFunc_0_pp)gen_helper_maskmov_xmm }, /* XXX: casts */
2938 [0xf8] = MMX_OP2(psubb),
2939 [0xf9] = MMX_OP2(psubw),
2940 [0xfa] = MMX_OP2(psubl),
2941 [0xfb] = MMX_OP2(psubq),
2942 [0xfc] = MMX_OP2(paddb),
2943 [0xfd] = MMX_OP2(paddw),
2944 [0xfe] = MMX_OP2(paddl),
2947 static const SSEFunc_0_pp sse_op_table2[3 * 8][2] = {
2948 [0 + 2] = MMX_OP2(psrlw),
2949 [0 + 4] = MMX_OP2(psraw),
2950 [0 + 6] = MMX_OP2(psllw),
2951 [8 + 2] = MMX_OP2(psrld),
2952 [8 + 4] = MMX_OP2(psrad),
2953 [8 + 6] = MMX_OP2(pslld),
2954 [16 + 2] = MMX_OP2(psrlq),
2955 [16 + 3] = { NULL, gen_helper_psrldq_xmm },
2956 [16 + 6] = MMX_OP2(psllq),
2957 [16 + 7] = { NULL, gen_helper_pslldq_xmm },
2960 static const SSEFunc_0_pi sse_op_table3ai[] = {
2961 gen_helper_cvtsi2ss,
2962 gen_helper_cvtsi2sd
2965 #ifdef TARGET_X86_64
2966 static const SSEFunc_0_pl sse_op_table3aq[] = {
2967 gen_helper_cvtsq2ss,
2968 gen_helper_cvtsq2sd
2970 #endif
2972 static const SSEFunc_i_p sse_op_table3bi[] = {
2973 gen_helper_cvttss2si,
2974 gen_helper_cvtss2si,
2975 gen_helper_cvttsd2si,
2976 gen_helper_cvtsd2si
2979 #ifdef TARGET_X86_64
2980 static const SSEFunc_l_p sse_op_table3bq[] = {
2981 gen_helper_cvttss2sq,
2982 gen_helper_cvtss2sq,
2983 gen_helper_cvttsd2sq,
2984 gen_helper_cvtsd2sq
2986 #endif
2988 static const SSEFunc_0_pp sse_op_table4[8][4] = {
2989 SSE_FOP(cmpeq),
2990 SSE_FOP(cmplt),
2991 SSE_FOP(cmple),
2992 SSE_FOP(cmpunord),
2993 SSE_FOP(cmpneq),
2994 SSE_FOP(cmpnlt),
2995 SSE_FOP(cmpnle),
2996 SSE_FOP(cmpord),
2999 static const SSEFunc_0_pp sse_op_table5[256] = {
3000 [0x0c] = gen_helper_pi2fw,
3001 [0x0d] = gen_helper_pi2fd,
3002 [0x1c] = gen_helper_pf2iw,
3003 [0x1d] = gen_helper_pf2id,
3004 [0x8a] = gen_helper_pfnacc,
3005 [0x8e] = gen_helper_pfpnacc,
3006 [0x90] = gen_helper_pfcmpge,
3007 [0x94] = gen_helper_pfmin,
3008 [0x96] = gen_helper_pfrcp,
3009 [0x97] = gen_helper_pfrsqrt,
3010 [0x9a] = gen_helper_pfsub,
3011 [0x9e] = gen_helper_pfadd,
3012 [0xa0] = gen_helper_pfcmpgt,
3013 [0xa4] = gen_helper_pfmax,
3014 [0xa6] = gen_helper_movq, /* pfrcpit1; no need to actually increase precision */
3015 [0xa7] = gen_helper_movq, /* pfrsqit1 */
3016 [0xaa] = gen_helper_pfsubr,
3017 [0xae] = gen_helper_pfacc,
3018 [0xb0] = gen_helper_pfcmpeq,
3019 [0xb4] = gen_helper_pfmul,
3020 [0xb6] = gen_helper_movq, /* pfrcpit2 */
3021 [0xb7] = gen_helper_pmulhrw_mmx,
3022 [0xbb] = gen_helper_pswapd,
3023 [0xbf] = gen_helper_pavgb_mmx /* pavgusb */
3026 struct SSEOpHelper_pp {
3027 SSEFunc_0_pp op[2];
3028 uint32_t ext_mask;
3031 struct SSEOpHelper_ppi {
3032 SSEFunc_0_ppi op[2];
3033 uint32_t ext_mask;
3036 #define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
3037 #define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
3038 #define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
3039 #define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
3041 static const struct SSEOpHelper_pp sse_op_table6[256] = {
3042 [0x00] = SSSE3_OP(pshufb),
3043 [0x01] = SSSE3_OP(phaddw),
3044 [0x02] = SSSE3_OP(phaddd),
3045 [0x03] = SSSE3_OP(phaddsw),
3046 [0x04] = SSSE3_OP(pmaddubsw),
3047 [0x05] = SSSE3_OP(phsubw),
3048 [0x06] = SSSE3_OP(phsubd),
3049 [0x07] = SSSE3_OP(phsubsw),
3050 [0x08] = SSSE3_OP(psignb),
3051 [0x09] = SSSE3_OP(psignw),
3052 [0x0a] = SSSE3_OP(psignd),
3053 [0x0b] = SSSE3_OP(pmulhrsw),
3054 [0x10] = SSE41_OP(pblendvb),
3055 [0x14] = SSE41_OP(blendvps),
3056 [0x15] = SSE41_OP(blendvpd),
3057 [0x17] = SSE41_OP(ptest),
3058 [0x1c] = SSSE3_OP(pabsb),
3059 [0x1d] = SSSE3_OP(pabsw),
3060 [0x1e] = SSSE3_OP(pabsd),
3061 [0x20] = SSE41_OP(pmovsxbw),
3062 [0x21] = SSE41_OP(pmovsxbd),
3063 [0x22] = SSE41_OP(pmovsxbq),
3064 [0x23] = SSE41_OP(pmovsxwd),
3065 [0x24] = SSE41_OP(pmovsxwq),
3066 [0x25] = SSE41_OP(pmovsxdq),
3067 [0x28] = SSE41_OP(pmuldq),
3068 [0x29] = SSE41_OP(pcmpeqq),
3069 [0x2a] = SSE41_SPECIAL, /* movntqda */
3070 [0x2b] = SSE41_OP(packusdw),
3071 [0x30] = SSE41_OP(pmovzxbw),
3072 [0x31] = SSE41_OP(pmovzxbd),
3073 [0x32] = SSE41_OP(pmovzxbq),
3074 [0x33] = SSE41_OP(pmovzxwd),
3075 [0x34] = SSE41_OP(pmovzxwq),
3076 [0x35] = SSE41_OP(pmovzxdq),
3077 [0x37] = SSE42_OP(pcmpgtq),
3078 [0x38] = SSE41_OP(pminsb),
3079 [0x39] = SSE41_OP(pminsd),
3080 [0x3a] = SSE41_OP(pminuw),
3081 [0x3b] = SSE41_OP(pminud),
3082 [0x3c] = SSE41_OP(pmaxsb),
3083 [0x3d] = SSE41_OP(pmaxsd),
3084 [0x3e] = SSE41_OP(pmaxuw),
3085 [0x3f] = SSE41_OP(pmaxud),
3086 [0x40] = SSE41_OP(pmulld),
3087 [0x41] = SSE41_OP(phminposuw),
3090 static const struct SSEOpHelper_ppi sse_op_table7[256] = {
3091 [0x08] = SSE41_OP(roundps),
3092 [0x09] = SSE41_OP(roundpd),
3093 [0x0a] = SSE41_OP(roundss),
3094 [0x0b] = SSE41_OP(roundsd),
3095 [0x0c] = SSE41_OP(blendps),
3096 [0x0d] = SSE41_OP(blendpd),
3097 [0x0e] = SSE41_OP(pblendw),
3098 [0x0f] = SSSE3_OP(palignr),
3099 [0x14] = SSE41_SPECIAL, /* pextrb */
3100 [0x15] = SSE41_SPECIAL, /* pextrw */
3101 [0x16] = SSE41_SPECIAL, /* pextrd/pextrq */
3102 [0x17] = SSE41_SPECIAL, /* extractps */
3103 [0x20] = SSE41_SPECIAL, /* pinsrb */
3104 [0x21] = SSE41_SPECIAL, /* insertps */
3105 [0x22] = SSE41_SPECIAL, /* pinsrd/pinsrq */
3106 [0x40] = SSE41_OP(dpps),
3107 [0x41] = SSE41_OP(dppd),
3108 [0x42] = SSE41_OP(mpsadbw),
3109 [0x60] = SSE42_OP(pcmpestrm),
3110 [0x61] = SSE42_OP(pcmpestri),
3111 [0x62] = SSE42_OP(pcmpistrm),
3112 [0x63] = SSE42_OP(pcmpistri),
3115 static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
3117 int b1, op1_offset, op2_offset, is_xmm, val, ot;
3118 int modrm, mod, rm, reg, reg_addr, offset_addr;
3119 SSEFunc_0_pp sse_fn_pp;
3120 SSEFunc_0_ppi sse_fn_ppi;
3121 SSEFunc_0_ppt sse_fn_ppt;
3123 b &= 0xff;
3124 if (s->prefix & PREFIX_DATA)
3125 b1 = 1;
3126 else if (s->prefix & PREFIX_REPZ)
3127 b1 = 2;
3128 else if (s->prefix & PREFIX_REPNZ)
3129 b1 = 3;
3130 else
3131 b1 = 0;
3132 sse_fn_pp = sse_op_table1[b][b1];
3133 if (!sse_fn_pp) {
3134 goto illegal_op;
3136 if ((b <= 0x5f && b >= 0x10) || b == 0xc6 || b == 0xc2) {
3137 is_xmm = 1;
3138 } else {
3139 if (b1 == 0) {
3140 /* MMX case */
3141 is_xmm = 0;
3142 } else {
3143 is_xmm = 1;
3146 /* simple MMX/SSE operation */
3147 if (s->flags & HF_TS_MASK) {
3148 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
3149 return;
3151 if (s->flags & HF_EM_MASK) {
3152 illegal_op:
3153 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
3154 return;
3156 if (is_xmm && !(s->flags & HF_OSFXSR_MASK))
3157 if ((b != 0x38 && b != 0x3a) || (s->prefix & PREFIX_DATA))
3158 goto illegal_op;
3159 if (b == 0x0e) {
3160 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
3161 goto illegal_op;
3162 /* femms */
3163 gen_helper_emms();
3164 return;
3166 if (b == 0x77) {
3167 /* emms */
3168 gen_helper_emms();
3169 return;
3171 /* prepare MMX state (XXX: optimize by storing fptt and fptags in
3172 the static cpu state) */
3173 if (!is_xmm) {
3174 gen_helper_enter_mmx();
3177 modrm = ldub_code(s->pc++);
3178 reg = ((modrm >> 3) & 7);
3179 if (is_xmm)
3180 reg |= rex_r;
3181 mod = (modrm >> 6) & 3;
3182 if (sse_fn_pp == SSE_SPECIAL) {
3183 b |= (b1 << 8);
3184 switch(b) {
3185 case 0x0e7: /* movntq */
3186 if (mod == 3)
3187 goto illegal_op;
3188 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3189 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
3190 break;
3191 case 0x1e7: /* movntdq */
3192 case 0x02b: /* movntps */
3193 case 0x12b: /* movntps */
3194 if (mod == 3)
3195 goto illegal_op;
3196 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3197 gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3198 break;
3199 case 0x3f0: /* lddqu */
3200 if (mod == 3)
3201 goto illegal_op;
3202 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3203 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3204 break;
3205 case 0x22b: /* movntss */
3206 case 0x32b: /* movntsd */
3207 if (mod == 3)
3208 goto illegal_op;
3209 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3210 if (b1 & 1) {
3211 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,
3212 xmm_regs[reg]));
3213 } else {
3214 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3215 xmm_regs[reg].XMM_L(0)));
3216 gen_op_st_T0_A0(OT_LONG + s->mem_index);
3218 break;
3219 case 0x6e: /* movd mm, ea */
3220 #ifdef TARGET_X86_64
3221 if (s->dflag == 2) {
3222 gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 0);
3223 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,fpregs[reg].mmx));
3224 } else
3225 #endif
3227 gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 0);
3228 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3229 offsetof(CPUX86State,fpregs[reg].mmx));
3230 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3231 gen_helper_movl_mm_T0_mmx(cpu_ptr0, cpu_tmp2_i32);
3233 break;
3234 case 0x16e: /* movd xmm, ea */
3235 #ifdef TARGET_X86_64
3236 if (s->dflag == 2) {
3237 gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 0);
3238 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3239 offsetof(CPUX86State,xmm_regs[reg]));
3240 gen_helper_movq_mm_T0_xmm(cpu_ptr0, cpu_T[0]);
3241 } else
3242 #endif
3244 gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 0);
3245 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3246 offsetof(CPUX86State,xmm_regs[reg]));
3247 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3248 gen_helper_movl_mm_T0_xmm(cpu_ptr0, cpu_tmp2_i32);
3250 break;
3251 case 0x6f: /* movq mm, ea */
3252 if (mod != 3) {
3253 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3254 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
3255 } else {
3256 rm = (modrm & 7);
3257 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
3258 offsetof(CPUX86State,fpregs[rm].mmx));
3259 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
3260 offsetof(CPUX86State,fpregs[reg].mmx));
3262 break;
3263 case 0x010: /* movups */
3264 case 0x110: /* movupd */
3265 case 0x028: /* movaps */
3266 case 0x128: /* movapd */
3267 case 0x16f: /* movdqa xmm, ea */
3268 case 0x26f: /* movdqu xmm, ea */
3269 if (mod != 3) {
3270 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3271 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3272 } else {
3273 rm = (modrm & 7) | REX_B(s);
3274 gen_op_movo(offsetof(CPUX86State,xmm_regs[reg]),
3275 offsetof(CPUX86State,xmm_regs[rm]));
3277 break;
3278 case 0x210: /* movss xmm, ea */
3279 if (mod != 3) {
3280 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3281 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
3282 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3283 gen_op_movl_T0_0();
3284 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3285 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3286 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3287 } else {
3288 rm = (modrm & 7) | REX_B(s);
3289 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3290 offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3292 break;
3293 case 0x310: /* movsd xmm, ea */
3294 if (mod != 3) {
3295 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3296 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3297 gen_op_movl_T0_0();
3298 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3299 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3300 } else {
3301 rm = (modrm & 7) | REX_B(s);
3302 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3303 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3305 break;
3306 case 0x012: /* movlps */
3307 case 0x112: /* movlpd */
3308 if (mod != 3) {
3309 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3310 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3311 } else {
3312 /* movhlps */
3313 rm = (modrm & 7) | REX_B(s);
3314 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3315 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3317 break;
3318 case 0x212: /* movsldup */
3319 if (mod != 3) {
3320 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3321 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3322 } else {
3323 rm = (modrm & 7) | REX_B(s);
3324 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3325 offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3326 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3327 offsetof(CPUX86State,xmm_regs[rm].XMM_L(2)));
3329 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3330 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3331 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3332 offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3333 break;
3334 case 0x312: /* movddup */
3335 if (mod != 3) {
3336 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3337 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3338 } else {
3339 rm = (modrm & 7) | REX_B(s);
3340 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3341 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3343 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
3344 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3345 break;
3346 case 0x016: /* movhps */
3347 case 0x116: /* movhpd */
3348 if (mod != 3) {
3349 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3350 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3351 } else {
3352 /* movlhps */
3353 rm = (modrm & 7) | REX_B(s);
3354 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
3355 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3357 break;
3358 case 0x216: /* movshdup */
3359 if (mod != 3) {
3360 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3361 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3362 } else {
3363 rm = (modrm & 7) | REX_B(s);
3364 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3365 offsetof(CPUX86State,xmm_regs[rm].XMM_L(1)));
3366 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3367 offsetof(CPUX86State,xmm_regs[rm].XMM_L(3)));
3369 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3370 offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3371 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3372 offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3373 break;
3374 case 0x178:
3375 case 0x378:
3377 int bit_index, field_length;
3379 if (b1 == 1 && reg != 0)
3380 goto illegal_op;
3381 field_length = ldub_code(s->pc++) & 0x3F;
3382 bit_index = ldub_code(s->pc++) & 0x3F;
3383 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3384 offsetof(CPUX86State,xmm_regs[reg]));
3385 if (b1 == 1)
3386 gen_helper_extrq_i(cpu_ptr0, tcg_const_i32(bit_index),
3387 tcg_const_i32(field_length));
3388 else
3389 gen_helper_insertq_i(cpu_ptr0, tcg_const_i32(bit_index),
3390 tcg_const_i32(field_length));
3392 break;
3393 case 0x7e: /* movd ea, mm */
3394 #ifdef TARGET_X86_64
3395 if (s->dflag == 2) {
3396 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3397 offsetof(CPUX86State,fpregs[reg].mmx));
3398 gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 1);
3399 } else
3400 #endif
3402 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3403 offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0)));
3404 gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 1);
3406 break;
3407 case 0x17e: /* movd ea, xmm */
3408 #ifdef TARGET_X86_64
3409 if (s->dflag == 2) {
3410 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3411 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3412 gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 1);
3413 } else
3414 #endif
3416 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3417 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3418 gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 1);
3420 break;
3421 case 0x27e: /* movq xmm, ea */
3422 if (mod != 3) {
3423 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3424 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3425 } else {
3426 rm = (modrm & 7) | REX_B(s);
3427 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3428 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3430 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3431 break;
3432 case 0x7f: /* movq ea, mm */
3433 if (mod != 3) {
3434 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3435 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
3436 } else {
3437 rm = (modrm & 7);
3438 gen_op_movq(offsetof(CPUX86State,fpregs[rm].mmx),
3439 offsetof(CPUX86State,fpregs[reg].mmx));
3441 break;
3442 case 0x011: /* movups */
3443 case 0x111: /* movupd */
3444 case 0x029: /* movaps */
3445 case 0x129: /* movapd */
3446 case 0x17f: /* movdqa ea, xmm */
3447 case 0x27f: /* movdqu ea, xmm */
3448 if (mod != 3) {
3449 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3450 gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3451 } else {
3452 rm = (modrm & 7) | REX_B(s);
3453 gen_op_movo(offsetof(CPUX86State,xmm_regs[rm]),
3454 offsetof(CPUX86State,xmm_regs[reg]));
3456 break;
3457 case 0x211: /* movss ea, xmm */
3458 if (mod != 3) {
3459 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3460 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3461 gen_op_st_T0_A0(OT_LONG + s->mem_index);
3462 } else {
3463 rm = (modrm & 7) | REX_B(s);
3464 gen_op_movl(offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)),
3465 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3467 break;
3468 case 0x311: /* movsd ea, xmm */
3469 if (mod != 3) {
3470 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3471 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3472 } else {
3473 rm = (modrm & 7) | REX_B(s);
3474 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3475 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3477 break;
3478 case 0x013: /* movlps */
3479 case 0x113: /* movlpd */
3480 if (mod != 3) {
3481 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3482 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3483 } else {
3484 goto illegal_op;
3486 break;
3487 case 0x017: /* movhps */
3488 case 0x117: /* movhpd */
3489 if (mod != 3) {
3490 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3491 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3492 } else {
3493 goto illegal_op;
3495 break;
3496 case 0x71: /* shift mm, im */
3497 case 0x72:
3498 case 0x73:
3499 case 0x171: /* shift xmm, im */
3500 case 0x172:
3501 case 0x173:
3502 if (b1 >= 2) {
3503 goto illegal_op;
3505 val = ldub_code(s->pc++);
3506 if (is_xmm) {
3507 gen_op_movl_T0_im(val);
3508 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
3509 gen_op_movl_T0_0();
3510 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(1)));
3511 op1_offset = offsetof(CPUX86State,xmm_t0);
3512 } else {
3513 gen_op_movl_T0_im(val);
3514 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(0)));
3515 gen_op_movl_T0_0();
3516 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(1)));
3517 op1_offset = offsetof(CPUX86State,mmx_t0);
3519 sse_fn_pp = sse_op_table2[((b - 1) & 3) * 8 + (((modrm >> 3)) & 7)][b1];
3520 if (!sse_fn_pp) {
3521 goto illegal_op;
3523 if (is_xmm) {
3524 rm = (modrm & 7) | REX_B(s);
3525 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3526 } else {
3527 rm = (modrm & 7);
3528 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3530 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3531 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op1_offset);
3532 sse_fn_pp(cpu_ptr0, cpu_ptr1);
3533 break;
3534 case 0x050: /* movmskps */
3535 rm = (modrm & 7) | REX_B(s);
3536 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3537 offsetof(CPUX86State,xmm_regs[rm]));
3538 gen_helper_movmskps(cpu_tmp2_i32, cpu_ptr0);
3539 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3540 gen_op_mov_reg_T0(OT_LONG, reg);
3541 break;
3542 case 0x150: /* movmskpd */
3543 rm = (modrm & 7) | REX_B(s);
3544 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3545 offsetof(CPUX86State,xmm_regs[rm]));
3546 gen_helper_movmskpd(cpu_tmp2_i32, cpu_ptr0);
3547 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3548 gen_op_mov_reg_T0(OT_LONG, reg);
3549 break;
3550 case 0x02a: /* cvtpi2ps */
3551 case 0x12a: /* cvtpi2pd */
3552 gen_helper_enter_mmx();
3553 if (mod != 3) {
3554 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3555 op2_offset = offsetof(CPUX86State,mmx_t0);
3556 gen_ldq_env_A0(s->mem_index, op2_offset);
3557 } else {
3558 rm = (modrm & 7);
3559 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3561 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3562 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3563 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3564 switch(b >> 8) {
3565 case 0x0:
3566 gen_helper_cvtpi2ps(cpu_ptr0, cpu_ptr1);
3567 break;
3568 default:
3569 case 0x1:
3570 gen_helper_cvtpi2pd(cpu_ptr0, cpu_ptr1);
3571 break;
3573 break;
3574 case 0x22a: /* cvtsi2ss */
3575 case 0x32a: /* cvtsi2sd */
3576 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3577 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
3578 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3579 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3580 if (ot == OT_LONG) {
3581 SSEFunc_0_pi sse_fn_pi = sse_op_table3ai[(b >> 8) & 1];
3582 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3583 sse_fn_pi(cpu_ptr0, cpu_tmp2_i32);
3584 } else {
3585 #ifdef TARGET_X86_64
3586 SSEFunc_0_pl sse_fn_pl = sse_op_table3aq[(b >> 8) & 1];
3587 sse_fn_pl(cpu_ptr0, cpu_T[0]);
3588 #else
3589 goto illegal_op;
3590 #endif
3592 break;
3593 case 0x02c: /* cvttps2pi */
3594 case 0x12c: /* cvttpd2pi */
3595 case 0x02d: /* cvtps2pi */
3596 case 0x12d: /* cvtpd2pi */
3597 gen_helper_enter_mmx();
3598 if (mod != 3) {
3599 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3600 op2_offset = offsetof(CPUX86State,xmm_t0);
3601 gen_ldo_env_A0(s->mem_index, op2_offset);
3602 } else {
3603 rm = (modrm & 7) | REX_B(s);
3604 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3606 op1_offset = offsetof(CPUX86State,fpregs[reg & 7].mmx);
3607 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3608 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3609 switch(b) {
3610 case 0x02c:
3611 gen_helper_cvttps2pi(cpu_ptr0, cpu_ptr1);
3612 break;
3613 case 0x12c:
3614 gen_helper_cvttpd2pi(cpu_ptr0, cpu_ptr1);
3615 break;
3616 case 0x02d:
3617 gen_helper_cvtps2pi(cpu_ptr0, cpu_ptr1);
3618 break;
3619 case 0x12d:
3620 gen_helper_cvtpd2pi(cpu_ptr0, cpu_ptr1);
3621 break;
3623 break;
3624 case 0x22c: /* cvttss2si */
3625 case 0x32c: /* cvttsd2si */
3626 case 0x22d: /* cvtss2si */
3627 case 0x32d: /* cvtsd2si */
3628 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3629 if (mod != 3) {
3630 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3631 if ((b >> 8) & 1) {
3632 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_t0.XMM_Q(0)));
3633 } else {
3634 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
3635 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
3637 op2_offset = offsetof(CPUX86State,xmm_t0);
3638 } else {
3639 rm = (modrm & 7) | REX_B(s);
3640 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3642 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3643 if (ot == OT_LONG) {
3644 SSEFunc_i_p sse_fn_i_p =
3645 sse_op_table3bi[((b >> 7) & 2) | (b & 1)];
3646 sse_fn_i_p(cpu_tmp2_i32, cpu_ptr0);
3647 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3648 } else {
3649 #ifdef TARGET_X86_64
3650 SSEFunc_l_p sse_fn_l_p =
3651 sse_op_table3bq[((b >> 7) & 2) | (b & 1)];
3652 sse_fn_l_p(cpu_T[0], cpu_ptr0);
3653 #else
3654 goto illegal_op;
3655 #endif
3657 gen_op_mov_reg_T0(ot, reg);
3658 break;
3659 case 0xc4: /* pinsrw */
3660 case 0x1c4:
3661 s->rip_offset = 1;
3662 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
3663 val = ldub_code(s->pc++);
3664 if (b1) {
3665 val &= 7;
3666 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3667 offsetof(CPUX86State,xmm_regs[reg].XMM_W(val)));
3668 } else {
3669 val &= 3;
3670 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3671 offsetof(CPUX86State,fpregs[reg].mmx.MMX_W(val)));
3673 break;
3674 case 0xc5: /* pextrw */
3675 case 0x1c5:
3676 if (mod != 3)
3677 goto illegal_op;
3678 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3679 val = ldub_code(s->pc++);
3680 if (b1) {
3681 val &= 7;
3682 rm = (modrm & 7) | REX_B(s);
3683 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3684 offsetof(CPUX86State,xmm_regs[rm].XMM_W(val)));
3685 } else {
3686 val &= 3;
3687 rm = (modrm & 7);
3688 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3689 offsetof(CPUX86State,fpregs[rm].mmx.MMX_W(val)));
3691 reg = ((modrm >> 3) & 7) | rex_r;
3692 gen_op_mov_reg_T0(ot, reg);
3693 break;
3694 case 0x1d6: /* movq ea, xmm */
3695 if (mod != 3) {
3696 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3697 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3698 } else {
3699 rm = (modrm & 7) | REX_B(s);
3700 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3701 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3702 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3704 break;
3705 case 0x2d6: /* movq2dq */
3706 gen_helper_enter_mmx();
3707 rm = (modrm & 7);
3708 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3709 offsetof(CPUX86State,fpregs[rm].mmx));
3710 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3711 break;
3712 case 0x3d6: /* movdq2q */
3713 gen_helper_enter_mmx();
3714 rm = (modrm & 7) | REX_B(s);
3715 gen_op_movq(offsetof(CPUX86State,fpregs[reg & 7].mmx),
3716 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3717 break;
3718 case 0xd7: /* pmovmskb */
3719 case 0x1d7:
3720 if (mod != 3)
3721 goto illegal_op;
3722 if (b1) {
3723 rm = (modrm & 7) | REX_B(s);
3724 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[rm]));
3725 gen_helper_pmovmskb_xmm(cpu_tmp2_i32, cpu_ptr0);
3726 } else {
3727 rm = (modrm & 7);
3728 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,fpregs[rm].mmx));
3729 gen_helper_pmovmskb_mmx(cpu_tmp2_i32, cpu_ptr0);
3731 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3732 reg = ((modrm >> 3) & 7) | rex_r;
3733 gen_op_mov_reg_T0(OT_LONG, reg);
3734 break;
3735 case 0x138:
3736 if (s->prefix & PREFIX_REPNZ)
3737 goto crc32;
3738 case 0x038:
3739 b = modrm;
3740 modrm = ldub_code(s->pc++);
3741 rm = modrm & 7;
3742 reg = ((modrm >> 3) & 7) | rex_r;
3743 mod = (modrm >> 6) & 3;
3744 if (b1 >= 2) {
3745 goto illegal_op;
3748 sse_fn_pp = sse_op_table6[b].op[b1];
3749 if (!sse_fn_pp) {
3750 goto illegal_op;
3752 if (!(s->cpuid_ext_features & sse_op_table6[b].ext_mask))
3753 goto illegal_op;
3755 if (b1) {
3756 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3757 if (mod == 3) {
3758 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
3759 } else {
3760 op2_offset = offsetof(CPUX86State,xmm_t0);
3761 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3762 switch (b) {
3763 case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
3764 case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
3765 case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
3766 gen_ldq_env_A0(s->mem_index, op2_offset +
3767 offsetof(XMMReg, XMM_Q(0)));
3768 break;
3769 case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
3770 case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
3771 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0,
3772 (s->mem_index >> 2) - 1);
3773 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);
3774 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, op2_offset +
3775 offsetof(XMMReg, XMM_L(0)));
3776 break;
3777 case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
3778 tcg_gen_qemu_ld16u(cpu_tmp0, cpu_A0,
3779 (s->mem_index >> 2) - 1);
3780 tcg_gen_st16_tl(cpu_tmp0, cpu_env, op2_offset +
3781 offsetof(XMMReg, XMM_W(0)));
3782 break;
3783 case 0x2a: /* movntqda */
3784 gen_ldo_env_A0(s->mem_index, op1_offset);
3785 return;
3786 default:
3787 gen_ldo_env_A0(s->mem_index, op2_offset);
3790 } else {
3791 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
3792 if (mod == 3) {
3793 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3794 } else {
3795 op2_offset = offsetof(CPUX86State,mmx_t0);
3796 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3797 gen_ldq_env_A0(s->mem_index, op2_offset);
3800 if (sse_fn_pp == SSE_SPECIAL) {
3801 goto illegal_op;
3804 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3805 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3806 sse_fn_pp(cpu_ptr0, cpu_ptr1);
3808 if (b == 0x17)
3809 s->cc_op = CC_OP_EFLAGS;
3810 break;
3811 case 0x338: /* crc32 */
3812 crc32:
3813 b = modrm;
3814 modrm = ldub_code(s->pc++);
3815 reg = ((modrm >> 3) & 7) | rex_r;
3817 if (b != 0xf0 && b != 0xf1)
3818 goto illegal_op;
3819 if (!(s->cpuid_ext_features & CPUID_EXT_SSE42))
3820 goto illegal_op;
3822 if (b == 0xf0)
3823 ot = OT_BYTE;
3824 else if (b == 0xf1 && s->dflag != 2)
3825 if (s->prefix & PREFIX_DATA)
3826 ot = OT_WORD;
3827 else
3828 ot = OT_LONG;
3829 else
3830 ot = OT_QUAD;
3832 gen_op_mov_TN_reg(OT_LONG, 0, reg);
3833 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3834 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
3835 gen_helper_crc32(cpu_T[0], cpu_tmp2_i32,
3836 cpu_T[0], tcg_const_i32(8 << ot));
3838 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3839 gen_op_mov_reg_T0(ot, reg);
3840 break;
3841 case 0x03a:
3842 case 0x13a:
3843 b = modrm;
3844 modrm = ldub_code(s->pc++);
3845 rm = modrm & 7;
3846 reg = ((modrm >> 3) & 7) | rex_r;
3847 mod = (modrm >> 6) & 3;
3848 if (b1 >= 2) {
3849 goto illegal_op;
3852 sse_fn_ppi = sse_op_table7[b].op[b1];
3853 if (!sse_fn_ppi) {
3854 goto illegal_op;
3856 if (!(s->cpuid_ext_features & sse_op_table7[b].ext_mask))
3857 goto illegal_op;
3859 if (sse_fn_ppi == SSE_SPECIAL) {
3860 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3861 rm = (modrm & 7) | REX_B(s);
3862 if (mod != 3)
3863 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3864 reg = ((modrm >> 3) & 7) | rex_r;
3865 val = ldub_code(s->pc++);
3866 switch (b) {
3867 case 0x14: /* pextrb */
3868 tcg_gen_ld8u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3869 xmm_regs[reg].XMM_B(val & 15)));
3870 if (mod == 3)
3871 gen_op_mov_reg_T0(ot, rm);
3872 else
3873 tcg_gen_qemu_st8(cpu_T[0], cpu_A0,
3874 (s->mem_index >> 2) - 1);
3875 break;
3876 case 0x15: /* pextrw */
3877 tcg_gen_ld16u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3878 xmm_regs[reg].XMM_W(val & 7)));
3879 if (mod == 3)
3880 gen_op_mov_reg_T0(ot, rm);
3881 else
3882 tcg_gen_qemu_st16(cpu_T[0], cpu_A0,
3883 (s->mem_index >> 2) - 1);
3884 break;
3885 case 0x16:
3886 if (ot == OT_LONG) { /* pextrd */
3887 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
3888 offsetof(CPUX86State,
3889 xmm_regs[reg].XMM_L(val & 3)));
3890 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3891 if (mod == 3)
3892 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
3893 else
3894 tcg_gen_qemu_st32(cpu_T[0], cpu_A0,
3895 (s->mem_index >> 2) - 1);
3896 } else { /* pextrq */
3897 #ifdef TARGET_X86_64
3898 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
3899 offsetof(CPUX86State,
3900 xmm_regs[reg].XMM_Q(val & 1)));
3901 if (mod == 3)
3902 gen_op_mov_reg_v(ot, rm, cpu_tmp1_i64);
3903 else
3904 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
3905 (s->mem_index >> 2) - 1);
3906 #else
3907 goto illegal_op;
3908 #endif
3910 break;
3911 case 0x17: /* extractps */
3912 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3913 xmm_regs[reg].XMM_L(val & 3)));
3914 if (mod == 3)
3915 gen_op_mov_reg_T0(ot, rm);
3916 else
3917 tcg_gen_qemu_st32(cpu_T[0], cpu_A0,
3918 (s->mem_index >> 2) - 1);
3919 break;
3920 case 0x20: /* pinsrb */
3921 if (mod == 3)
3922 gen_op_mov_TN_reg(OT_LONG, 0, rm);
3923 else
3924 tcg_gen_qemu_ld8u(cpu_tmp0, cpu_A0,
3925 (s->mem_index >> 2) - 1);
3926 tcg_gen_st8_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State,
3927 xmm_regs[reg].XMM_B(val & 15)));
3928 break;
3929 case 0x21: /* insertps */
3930 if (mod == 3) {
3931 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
3932 offsetof(CPUX86State,xmm_regs[rm]
3933 .XMM_L((val >> 6) & 3)));
3934 } else {
3935 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0,
3936 (s->mem_index >> 2) - 1);
3937 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);
3939 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
3940 offsetof(CPUX86State,xmm_regs[reg]
3941 .XMM_L((val >> 4) & 3)));
3942 if ((val >> 0) & 1)
3943 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3944 cpu_env, offsetof(CPUX86State,
3945 xmm_regs[reg].XMM_L(0)));
3946 if ((val >> 1) & 1)
3947 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3948 cpu_env, offsetof(CPUX86State,
3949 xmm_regs[reg].XMM_L(1)));
3950 if ((val >> 2) & 1)
3951 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3952 cpu_env, offsetof(CPUX86State,
3953 xmm_regs[reg].XMM_L(2)));
3954 if ((val >> 3) & 1)
3955 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3956 cpu_env, offsetof(CPUX86State,
3957 xmm_regs[reg].XMM_L(3)));
3958 break;
3959 case 0x22:
3960 if (ot == OT_LONG) { /* pinsrd */
3961 if (mod == 3)
3962 gen_op_mov_v_reg(ot, cpu_tmp0, rm);
3963 else
3964 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0,
3965 (s->mem_index >> 2) - 1);
3966 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);
3967 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
3968 offsetof(CPUX86State,
3969 xmm_regs[reg].XMM_L(val & 3)));
3970 } else { /* pinsrq */
3971 #ifdef TARGET_X86_64
3972 if (mod == 3)
3973 gen_op_mov_v_reg(ot, cpu_tmp1_i64, rm);
3974 else
3975 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
3976 (s->mem_index >> 2) - 1);
3977 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
3978 offsetof(CPUX86State,
3979 xmm_regs[reg].XMM_Q(val & 1)));
3980 #else
3981 goto illegal_op;
3982 #endif
3984 break;
3986 return;
3989 if (b1) {
3990 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3991 if (mod == 3) {
3992 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
3993 } else {
3994 op2_offset = offsetof(CPUX86State,xmm_t0);
3995 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3996 gen_ldo_env_A0(s->mem_index, op2_offset);
3998 } else {
3999 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4000 if (mod == 3) {
4001 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4002 } else {
4003 op2_offset = offsetof(CPUX86State,mmx_t0);
4004 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4005 gen_ldq_env_A0(s->mem_index, op2_offset);
4008 val = ldub_code(s->pc++);
4010 if ((b & 0xfc) == 0x60) { /* pcmpXstrX */
4011 s->cc_op = CC_OP_EFLAGS;
4013 if (s->dflag == 2)
4014 /* The helper must use entire 64-bit gp registers */
4015 val |= 1 << 8;
4018 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4019 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4020 sse_fn_ppi(cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
4021 break;
4022 default:
4023 goto illegal_op;
4025 } else {
4026 /* generic MMX or SSE operation */
4027 switch(b) {
4028 case 0x70: /* pshufx insn */
4029 case 0xc6: /* pshufx insn */
4030 case 0xc2: /* compare insns */
4031 s->rip_offset = 1;
4032 break;
4033 default:
4034 break;
4036 if (is_xmm) {
4037 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
4038 if (mod != 3) {
4039 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4040 op2_offset = offsetof(CPUX86State,xmm_t0);
4041 if (b1 >= 2 && ((b >= 0x50 && b <= 0x5f && b != 0x5b) ||
4042 b == 0xc2)) {
4043 /* specific case for SSE single instructions */
4044 if (b1 == 2) {
4045 /* 32 bit access */
4046 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
4047 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
4048 } else {
4049 /* 64 bit access */
4050 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_t0.XMM_D(0)));
4052 } else {
4053 gen_ldo_env_A0(s->mem_index, op2_offset);
4055 } else {
4056 rm = (modrm & 7) | REX_B(s);
4057 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
4059 } else {
4060 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4061 if (mod != 3) {
4062 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4063 op2_offset = offsetof(CPUX86State,mmx_t0);
4064 gen_ldq_env_A0(s->mem_index, op2_offset);
4065 } else {
4066 rm = (modrm & 7);
4067 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4070 switch(b) {
4071 case 0x0f: /* 3DNow! data insns */
4072 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
4073 goto illegal_op;
4074 val = ldub_code(s->pc++);
4075 sse_fn_pp = sse_op_table5[val];
4076 if (!sse_fn_pp) {
4077 goto illegal_op;
4079 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4080 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4081 sse_fn_pp(cpu_ptr0, cpu_ptr1);
4082 break;
4083 case 0x70: /* pshufx insn */
4084 case 0xc6: /* pshufx insn */
4085 val = ldub_code(s->pc++);
4086 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4087 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4088 /* XXX: introduce a new table? */
4089 sse_fn_ppi = (SSEFunc_0_ppi)sse_fn_pp;
4090 sse_fn_ppi(cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
4091 break;
4092 case 0xc2:
4093 /* compare insns */
4094 val = ldub_code(s->pc++);
4095 if (val >= 8)
4096 goto illegal_op;
4097 sse_fn_pp = sse_op_table4[val][b1];
4099 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4100 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4101 sse_fn_pp(cpu_ptr0, cpu_ptr1);
4102 break;
4103 case 0xf7:
4104 /* maskmov : we must prepare A0 */
4105 if (mod != 3)
4106 goto illegal_op;
4107 #ifdef TARGET_X86_64
4108 if (s->aflag == 2) {
4109 gen_op_movq_A0_reg(R_EDI);
4110 } else
4111 #endif
4113 gen_op_movl_A0_reg(R_EDI);
4114 if (s->aflag == 0)
4115 gen_op_andl_A0_ffff();
4117 gen_add_A0_ds_seg(s);
4119 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4120 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4121 /* XXX: introduce a new table? */
4122 sse_fn_ppt = (SSEFunc_0_ppt)sse_fn_pp;
4123 sse_fn_ppt(cpu_ptr0, cpu_ptr1, cpu_A0);
4124 break;
4125 default:
4126 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4127 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4128 sse_fn_pp(cpu_ptr0, cpu_ptr1);
4129 break;
4131 if (b == 0x2e || b == 0x2f) {
4132 s->cc_op = CC_OP_EFLAGS;
4137 /* convert one instruction. s->is_jmp is set if the translation must
4138 be stopped. Return the next pc value */
4139 static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
4141 int b, prefixes, aflag, dflag;
4142 int shift, ot;
4143 int modrm, reg, rm, mod, reg_addr, op, opreg, offset_addr, val;
4144 target_ulong next_eip, tval;
4145 int rex_w, rex_r;
4147 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)))
4148 tcg_gen_debug_insn_start(pc_start);
4149 s->pc = pc_start;
4150 prefixes = 0;
4151 aflag = s->code32;
4152 dflag = s->code32;
4153 s->override = -1;
4154 rex_w = -1;
4155 rex_r = 0;
4156 #ifdef TARGET_X86_64
4157 s->rex_x = 0;
4158 s->rex_b = 0;
4159 x86_64_hregs = 0;
4160 #endif
4161 s->rip_offset = 0; /* for relative ip address */
4162 next_byte:
4163 b = ldub_code(s->pc);
4164 s->pc++;
4165 /* check prefixes */
4166 #ifdef TARGET_X86_64
4167 if (CODE64(s)) {
4168 switch (b) {
4169 case 0xf3:
4170 prefixes |= PREFIX_REPZ;
4171 goto next_byte;
4172 case 0xf2:
4173 prefixes |= PREFIX_REPNZ;
4174 goto next_byte;
4175 case 0xf0:
4176 prefixes |= PREFIX_LOCK;
4177 goto next_byte;
4178 case 0x2e:
4179 s->override = R_CS;
4180 goto next_byte;
4181 case 0x36:
4182 s->override = R_SS;
4183 goto next_byte;
4184 case 0x3e:
4185 s->override = R_DS;
4186 goto next_byte;
4187 case 0x26:
4188 s->override = R_ES;
4189 goto next_byte;
4190 case 0x64:
4191 s->override = R_FS;
4192 goto next_byte;
4193 case 0x65:
4194 s->override = R_GS;
4195 goto next_byte;
4196 case 0x66:
4197 prefixes |= PREFIX_DATA;
4198 goto next_byte;
4199 case 0x67:
4200 prefixes |= PREFIX_ADR;
4201 goto next_byte;
4202 case 0x40 ... 0x4f:
4203 /* REX prefix */
4204 rex_w = (b >> 3) & 1;
4205 rex_r = (b & 0x4) << 1;
4206 s->rex_x = (b & 0x2) << 2;
4207 REX_B(s) = (b & 0x1) << 3;
4208 x86_64_hregs = 1; /* select uniform byte register addressing */
4209 goto next_byte;
4211 if (rex_w == 1) {
4212 /* 0x66 is ignored if rex.w is set */
4213 dflag = 2;
4214 } else {
4215 if (prefixes & PREFIX_DATA)
4216 dflag ^= 1;
4218 if (!(prefixes & PREFIX_ADR))
4219 aflag = 2;
4220 } else
4221 #endif
4223 switch (b) {
4224 case 0xf3:
4225 prefixes |= PREFIX_REPZ;
4226 goto next_byte;
4227 case 0xf2:
4228 prefixes |= PREFIX_REPNZ;
4229 goto next_byte;
4230 case 0xf0:
4231 prefixes |= PREFIX_LOCK;
4232 goto next_byte;
4233 case 0x2e:
4234 s->override = R_CS;
4235 goto next_byte;
4236 case 0x36:
4237 s->override = R_SS;
4238 goto next_byte;
4239 case 0x3e:
4240 s->override = R_DS;
4241 goto next_byte;
4242 case 0x26:
4243 s->override = R_ES;
4244 goto next_byte;
4245 case 0x64:
4246 s->override = R_FS;
4247 goto next_byte;
4248 case 0x65:
4249 s->override = R_GS;
4250 goto next_byte;
4251 case 0x66:
4252 prefixes |= PREFIX_DATA;
4253 goto next_byte;
4254 case 0x67:
4255 prefixes |= PREFIX_ADR;
4256 goto next_byte;
4258 if (prefixes & PREFIX_DATA)
4259 dflag ^= 1;
4260 if (prefixes & PREFIX_ADR)
4261 aflag ^= 1;
4264 s->prefix = prefixes;
4265 s->aflag = aflag;
4266 s->dflag = dflag;
4268 /* lock generation */
4269 if (prefixes & PREFIX_LOCK)
4270 gen_helper_lock();
4272 /* now check op code */
4273 reswitch:
4274 switch(b) {
4275 case 0x0f:
4276 /**************************/
4277 /* extended op code */
4278 b = ldub_code(s->pc++) | 0x100;
4279 goto reswitch;
4281 /**************************/
4282 /* arith & logic */
4283 case 0x00 ... 0x05:
4284 case 0x08 ... 0x0d:
4285 case 0x10 ... 0x15:
4286 case 0x18 ... 0x1d:
4287 case 0x20 ... 0x25:
4288 case 0x28 ... 0x2d:
4289 case 0x30 ... 0x35:
4290 case 0x38 ... 0x3d:
4292 int op, f, val;
4293 op = (b >> 3) & 7;
4294 f = (b >> 1) & 3;
4296 if ((b & 1) == 0)
4297 ot = OT_BYTE;
4298 else
4299 ot = dflag + OT_WORD;
4301 switch(f) {
4302 case 0: /* OP Ev, Gv */
4303 modrm = ldub_code(s->pc++);
4304 reg = ((modrm >> 3) & 7) | rex_r;
4305 mod = (modrm >> 6) & 3;
4306 rm = (modrm & 7) | REX_B(s);
4307 if (mod != 3) {
4308 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4309 opreg = OR_TMP0;
4310 } else if (op == OP_XORL && rm == reg) {
4311 xor_zero:
4312 /* xor reg, reg optimisation */
4313 gen_op_movl_T0_0();
4314 s->cc_op = CC_OP_LOGICB + ot;
4315 gen_op_mov_reg_T0(ot, reg);
4316 gen_op_update1_cc();
4317 break;
4318 } else {
4319 opreg = rm;
4321 gen_op_mov_TN_reg(ot, 1, reg);
4322 gen_op(s, op, ot, opreg);
4323 break;
4324 case 1: /* OP Gv, Ev */
4325 modrm = ldub_code(s->pc++);
4326 mod = (modrm >> 6) & 3;
4327 reg = ((modrm >> 3) & 7) | rex_r;
4328 rm = (modrm & 7) | REX_B(s);
4329 if (mod != 3) {
4330 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4331 gen_op_ld_T1_A0(ot + s->mem_index);
4332 } else if (op == OP_XORL && rm == reg) {
4333 goto xor_zero;
4334 } else {
4335 gen_op_mov_TN_reg(ot, 1, rm);
4337 gen_op(s, op, ot, reg);
4338 break;
4339 case 2: /* OP A, Iv */
4340 val = insn_get(s, ot);
4341 gen_op_movl_T1_im(val);
4342 gen_op(s, op, ot, OR_EAX);
4343 break;
4346 break;
4348 case 0x82:
4349 if (CODE64(s))
4350 goto illegal_op;
4351 case 0x80: /* GRP1 */
4352 case 0x81:
4353 case 0x83:
4355 int val;
4357 if ((b & 1) == 0)
4358 ot = OT_BYTE;
4359 else
4360 ot = dflag + OT_WORD;
4362 modrm = ldub_code(s->pc++);
4363 mod = (modrm >> 6) & 3;
4364 rm = (modrm & 7) | REX_B(s);
4365 op = (modrm >> 3) & 7;
4367 if (mod != 3) {
4368 if (b == 0x83)
4369 s->rip_offset = 1;
4370 else
4371 s->rip_offset = insn_const_size(ot);
4372 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4373 opreg = OR_TMP0;
4374 } else {
4375 opreg = rm;
4378 switch(b) {
4379 default:
4380 case 0x80:
4381 case 0x81:
4382 case 0x82:
4383 val = insn_get(s, ot);
4384 break;
4385 case 0x83:
4386 val = (int8_t)insn_get(s, OT_BYTE);
4387 break;
4389 gen_op_movl_T1_im(val);
4390 gen_op(s, op, ot, opreg);
4392 break;
4394 /**************************/
4395 /* inc, dec, and other misc arith */
4396 case 0x40 ... 0x47: /* inc Gv */
4397 ot = dflag ? OT_LONG : OT_WORD;
4398 gen_inc(s, ot, OR_EAX + (b & 7), 1);
4399 break;
4400 case 0x48 ... 0x4f: /* dec Gv */
4401 ot = dflag ? OT_LONG : OT_WORD;
4402 gen_inc(s, ot, OR_EAX + (b & 7), -1);
4403 break;
4404 case 0xf6: /* GRP3 */
4405 case 0xf7:
4406 if ((b & 1) == 0)
4407 ot = OT_BYTE;
4408 else
4409 ot = dflag + OT_WORD;
4411 modrm = ldub_code(s->pc++);
4412 mod = (modrm >> 6) & 3;
4413 rm = (modrm & 7) | REX_B(s);
4414 op = (modrm >> 3) & 7;
4415 if (mod != 3) {
4416 if (op == 0)
4417 s->rip_offset = insn_const_size(ot);
4418 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4419 gen_op_ld_T0_A0(ot + s->mem_index);
4420 } else {
4421 gen_op_mov_TN_reg(ot, 0, rm);
4424 switch(op) {
4425 case 0: /* test */
4426 val = insn_get(s, ot);
4427 gen_op_movl_T1_im(val);
4428 gen_op_testl_T0_T1_cc();
4429 s->cc_op = CC_OP_LOGICB + ot;
4430 break;
4431 case 2: /* not */
4432 tcg_gen_not_tl(cpu_T[0], cpu_T[0]);
4433 if (mod != 3) {
4434 gen_op_st_T0_A0(ot + s->mem_index);
4435 } else {
4436 gen_op_mov_reg_T0(ot, rm);
4438 break;
4439 case 3: /* neg */
4440 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
4441 if (mod != 3) {
4442 gen_op_st_T0_A0(ot + s->mem_index);
4443 } else {
4444 gen_op_mov_reg_T0(ot, rm);
4446 gen_op_update_neg_cc();
4447 s->cc_op = CC_OP_SUBB + ot;
4448 break;
4449 case 4: /* mul */
4450 switch(ot) {
4451 case OT_BYTE:
4452 gen_op_mov_TN_reg(OT_BYTE, 1, R_EAX);
4453 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
4454 tcg_gen_ext8u_tl(cpu_T[1], cpu_T[1]);
4455 /* XXX: use 32 bit mul which could be faster */
4456 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4457 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4458 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4459 tcg_gen_andi_tl(cpu_cc_src, cpu_T[0], 0xff00);
4460 s->cc_op = CC_OP_MULB;
4461 break;
4462 case OT_WORD:
4463 gen_op_mov_TN_reg(OT_WORD, 1, R_EAX);
4464 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
4465 tcg_gen_ext16u_tl(cpu_T[1], cpu_T[1]);
4466 /* XXX: use 32 bit mul which could be faster */
4467 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4468 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4469 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4470 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
4471 gen_op_mov_reg_T0(OT_WORD, R_EDX);
4472 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4473 s->cc_op = CC_OP_MULW;
4474 break;
4475 default:
4476 case OT_LONG:
4477 #ifdef TARGET_X86_64
4478 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4479 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
4480 tcg_gen_ext32u_tl(cpu_T[1], cpu_T[1]);
4481 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4482 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4483 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4484 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 32);
4485 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4486 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4487 #else
4489 TCGv_i64 t0, t1;
4490 t0 = tcg_temp_new_i64();
4491 t1 = tcg_temp_new_i64();
4492 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4493 tcg_gen_extu_i32_i64(t0, cpu_T[0]);
4494 tcg_gen_extu_i32_i64(t1, cpu_T[1]);
4495 tcg_gen_mul_i64(t0, t0, t1);
4496 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4497 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4498 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4499 tcg_gen_shri_i64(t0, t0, 32);
4500 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4501 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4502 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4504 #endif
4505 s->cc_op = CC_OP_MULL;
4506 break;
4507 #ifdef TARGET_X86_64
4508 case OT_QUAD:
4509 gen_helper_mulq_EAX_T0(cpu_T[0]);
4510 s->cc_op = CC_OP_MULQ;
4511 break;
4512 #endif
4514 break;
4515 case 5: /* imul */
4516 switch(ot) {
4517 case OT_BYTE:
4518 gen_op_mov_TN_reg(OT_BYTE, 1, R_EAX);
4519 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
4520 tcg_gen_ext8s_tl(cpu_T[1], cpu_T[1]);
4521 /* XXX: use 32 bit mul which could be faster */
4522 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4523 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4524 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4525 tcg_gen_ext8s_tl(cpu_tmp0, cpu_T[0]);
4526 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4527 s->cc_op = CC_OP_MULB;
4528 break;
4529 case OT_WORD:
4530 gen_op_mov_TN_reg(OT_WORD, 1, R_EAX);
4531 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4532 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
4533 /* XXX: use 32 bit mul which could be faster */
4534 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4535 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4536 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4537 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
4538 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4539 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
4540 gen_op_mov_reg_T0(OT_WORD, R_EDX);
4541 s->cc_op = CC_OP_MULW;
4542 break;
4543 default:
4544 case OT_LONG:
4545 #ifdef TARGET_X86_64
4546 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4547 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4548 tcg_gen_ext32s_tl(cpu_T[1], cpu_T[1]);
4549 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4550 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4551 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4552 tcg_gen_ext32s_tl(cpu_tmp0, cpu_T[0]);
4553 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4554 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 32);
4555 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4556 #else
4558 TCGv_i64 t0, t1;
4559 t0 = tcg_temp_new_i64();
4560 t1 = tcg_temp_new_i64();
4561 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4562 tcg_gen_ext_i32_i64(t0, cpu_T[0]);
4563 tcg_gen_ext_i32_i64(t1, cpu_T[1]);
4564 tcg_gen_mul_i64(t0, t0, t1);
4565 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4566 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4567 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4568 tcg_gen_sari_tl(cpu_tmp0, cpu_T[0], 31);
4569 tcg_gen_shri_i64(t0, t0, 32);
4570 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4571 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4572 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4574 #endif
4575 s->cc_op = CC_OP_MULL;
4576 break;
4577 #ifdef TARGET_X86_64
4578 case OT_QUAD:
4579 gen_helper_imulq_EAX_T0(cpu_T[0]);
4580 s->cc_op = CC_OP_MULQ;
4581 break;
4582 #endif
4584 break;
4585 case 6: /* div */
4586 switch(ot) {
4587 case OT_BYTE:
4588 gen_jmp_im(pc_start - s->cs_base);
4589 gen_helper_divb_AL(cpu_T[0]);
4590 break;
4591 case OT_WORD:
4592 gen_jmp_im(pc_start - s->cs_base);
4593 gen_helper_divw_AX(cpu_T[0]);
4594 break;
4595 default:
4596 case OT_LONG:
4597 gen_jmp_im(pc_start - s->cs_base);
4598 gen_helper_divl_EAX(cpu_T[0]);
4599 break;
4600 #ifdef TARGET_X86_64
4601 case OT_QUAD:
4602 gen_jmp_im(pc_start - s->cs_base);
4603 gen_helper_divq_EAX(cpu_T[0]);
4604 break;
4605 #endif
4607 break;
4608 case 7: /* idiv */
4609 switch(ot) {
4610 case OT_BYTE:
4611 gen_jmp_im(pc_start - s->cs_base);
4612 gen_helper_idivb_AL(cpu_T[0]);
4613 break;
4614 case OT_WORD:
4615 gen_jmp_im(pc_start - s->cs_base);
4616 gen_helper_idivw_AX(cpu_T[0]);
4617 break;
4618 default:
4619 case OT_LONG:
4620 gen_jmp_im(pc_start - s->cs_base);
4621 gen_helper_idivl_EAX(cpu_T[0]);
4622 break;
4623 #ifdef TARGET_X86_64
4624 case OT_QUAD:
4625 gen_jmp_im(pc_start - s->cs_base);
4626 gen_helper_idivq_EAX(cpu_T[0]);
4627 break;
4628 #endif
4630 break;
4631 default:
4632 goto illegal_op;
4634 break;
4636 case 0xfe: /* GRP4 */
4637 case 0xff: /* GRP5 */
4638 if ((b & 1) == 0)
4639 ot = OT_BYTE;
4640 else
4641 ot = dflag + OT_WORD;
4643 modrm = ldub_code(s->pc++);
4644 mod = (modrm >> 6) & 3;
4645 rm = (modrm & 7) | REX_B(s);
4646 op = (modrm >> 3) & 7;
4647 if (op >= 2 && b == 0xfe) {
4648 goto illegal_op;
4650 if (CODE64(s)) {
4651 if (op == 2 || op == 4) {
4652 /* operand size for jumps is 64 bit */
4653 ot = OT_QUAD;
4654 } else if (op == 3 || op == 5) {
4655 ot = dflag ? OT_LONG + (rex_w == 1) : OT_WORD;
4656 } else if (op == 6) {
4657 /* default push size is 64 bit */
4658 ot = dflag ? OT_QUAD : OT_WORD;
4661 if (mod != 3) {
4662 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4663 if (op >= 2 && op != 3 && op != 5)
4664 gen_op_ld_T0_A0(ot + s->mem_index);
4665 } else {
4666 gen_op_mov_TN_reg(ot, 0, rm);
4669 switch(op) {
4670 case 0: /* inc Ev */
4671 if (mod != 3)
4672 opreg = OR_TMP0;
4673 else
4674 opreg = rm;
4675 gen_inc(s, ot, opreg, 1);
4676 break;
4677 case 1: /* dec Ev */
4678 if (mod != 3)
4679 opreg = OR_TMP0;
4680 else
4681 opreg = rm;
4682 gen_inc(s, ot, opreg, -1);
4683 break;
4684 case 2: /* call Ev */
4685 /* XXX: optimize if memory (no 'and' is necessary) */
4686 if (s->dflag == 0)
4687 gen_op_andl_T0_ffff();
4688 next_eip = s->pc - s->cs_base;
4689 gen_movtl_T1_im(next_eip);
4690 gen_push_T1(s);
4691 gen_op_jmp_T0();
4692 gen_eob(s);
4693 break;
4694 case 3: /* lcall Ev */
4695 gen_op_ld_T1_A0(ot + s->mem_index);
4696 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
4697 gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
4698 do_lcall:
4699 if (s->pe && !s->vm86) {
4700 if (s->cc_op != CC_OP_DYNAMIC)
4701 gen_op_set_cc_op(s->cc_op);
4702 gen_jmp_im(pc_start - s->cs_base);
4703 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4704 gen_helper_lcall_protected(cpu_tmp2_i32, cpu_T[1],
4705 tcg_const_i32(dflag),
4706 tcg_const_i32(s->pc - pc_start));
4707 } else {
4708 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4709 gen_helper_lcall_real(cpu_tmp2_i32, cpu_T[1],
4710 tcg_const_i32(dflag),
4711 tcg_const_i32(s->pc - s->cs_base));
4713 gen_eob(s);
4714 break;
4715 case 4: /* jmp Ev */
4716 if (s->dflag == 0)
4717 gen_op_andl_T0_ffff();
4718 gen_op_jmp_T0();
4719 gen_eob(s);
4720 break;
4721 case 5: /* ljmp Ev */
4722 gen_op_ld_T1_A0(ot + s->mem_index);
4723 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
4724 gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
4725 do_ljmp:
4726 if (s->pe && !s->vm86) {
4727 if (s->cc_op != CC_OP_DYNAMIC)
4728 gen_op_set_cc_op(s->cc_op);
4729 gen_jmp_im(pc_start - s->cs_base);
4730 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4731 gen_helper_ljmp_protected(cpu_tmp2_i32, cpu_T[1],
4732 tcg_const_i32(s->pc - pc_start));
4733 } else {
4734 gen_op_movl_seg_T0_vm(R_CS);
4735 gen_op_movl_T0_T1();
4736 gen_op_jmp_T0();
4738 gen_eob(s);
4739 break;
4740 case 6: /* push Ev */
4741 gen_push_T0(s);
4742 break;
4743 default:
4744 goto illegal_op;
4746 break;
4748 case 0x84: /* test Ev, Gv */
4749 case 0x85:
4750 if ((b & 1) == 0)
4751 ot = OT_BYTE;
4752 else
4753 ot = dflag + OT_WORD;
4755 modrm = ldub_code(s->pc++);
4756 reg = ((modrm >> 3) & 7) | rex_r;
4758 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
4759 gen_op_mov_TN_reg(ot, 1, reg);
4760 gen_op_testl_T0_T1_cc();
4761 s->cc_op = CC_OP_LOGICB + ot;
4762 break;
4764 case 0xa8: /* test eAX, Iv */
4765 case 0xa9:
4766 if ((b & 1) == 0)
4767 ot = OT_BYTE;
4768 else
4769 ot = dflag + OT_WORD;
4770 val = insn_get(s, ot);
4772 gen_op_mov_TN_reg(ot, 0, OR_EAX);
4773 gen_op_movl_T1_im(val);
4774 gen_op_testl_T0_T1_cc();
4775 s->cc_op = CC_OP_LOGICB + ot;
4776 break;
4778 case 0x98: /* CWDE/CBW */
4779 #ifdef TARGET_X86_64
4780 if (dflag == 2) {
4781 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
4782 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4783 gen_op_mov_reg_T0(OT_QUAD, R_EAX);
4784 } else
4785 #endif
4786 if (dflag == 1) {
4787 gen_op_mov_TN_reg(OT_WORD, 0, R_EAX);
4788 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4789 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4790 } else {
4791 gen_op_mov_TN_reg(OT_BYTE, 0, R_EAX);
4792 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
4793 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4795 break;
4796 case 0x99: /* CDQ/CWD */
4797 #ifdef TARGET_X86_64
4798 if (dflag == 2) {
4799 gen_op_mov_TN_reg(OT_QUAD, 0, R_EAX);
4800 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 63);
4801 gen_op_mov_reg_T0(OT_QUAD, R_EDX);
4802 } else
4803 #endif
4804 if (dflag == 1) {
4805 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
4806 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4807 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 31);
4808 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4809 } else {
4810 gen_op_mov_TN_reg(OT_WORD, 0, R_EAX);
4811 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4812 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 15);
4813 gen_op_mov_reg_T0(OT_WORD, R_EDX);
4815 break;
4816 case 0x1af: /* imul Gv, Ev */
4817 case 0x69: /* imul Gv, Ev, I */
4818 case 0x6b:
4819 ot = dflag + OT_WORD;
4820 modrm = ldub_code(s->pc++);
4821 reg = ((modrm >> 3) & 7) | rex_r;
4822 if (b == 0x69)
4823 s->rip_offset = insn_const_size(ot);
4824 else if (b == 0x6b)
4825 s->rip_offset = 1;
4826 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
4827 if (b == 0x69) {
4828 val = insn_get(s, ot);
4829 gen_op_movl_T1_im(val);
4830 } else if (b == 0x6b) {
4831 val = (int8_t)insn_get(s, OT_BYTE);
4832 gen_op_movl_T1_im(val);
4833 } else {
4834 gen_op_mov_TN_reg(ot, 1, reg);
4837 #ifdef TARGET_X86_64
4838 if (ot == OT_QUAD) {
4839 gen_helper_imulq_T0_T1(cpu_T[0], cpu_T[0], cpu_T[1]);
4840 } else
4841 #endif
4842 if (ot == OT_LONG) {
4843 #ifdef TARGET_X86_64
4844 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4845 tcg_gen_ext32s_tl(cpu_T[1], cpu_T[1]);
4846 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4847 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4848 tcg_gen_ext32s_tl(cpu_tmp0, cpu_T[0]);
4849 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4850 #else
4852 TCGv_i64 t0, t1;
4853 t0 = tcg_temp_new_i64();
4854 t1 = tcg_temp_new_i64();
4855 tcg_gen_ext_i32_i64(t0, cpu_T[0]);
4856 tcg_gen_ext_i32_i64(t1, cpu_T[1]);
4857 tcg_gen_mul_i64(t0, t0, t1);
4858 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4859 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4860 tcg_gen_sari_tl(cpu_tmp0, cpu_T[0], 31);
4861 tcg_gen_shri_i64(t0, t0, 32);
4862 tcg_gen_trunc_i64_i32(cpu_T[1], t0);
4863 tcg_gen_sub_tl(cpu_cc_src, cpu_T[1], cpu_tmp0);
4865 #endif
4866 } else {
4867 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4868 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
4869 /* XXX: use 32 bit mul which could be faster */
4870 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4871 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4872 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
4873 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4875 gen_op_mov_reg_T0(ot, reg);
4876 s->cc_op = CC_OP_MULB + ot;
4877 break;
4878 case 0x1c0:
4879 case 0x1c1: /* xadd Ev, Gv */
4880 if ((b & 1) == 0)
4881 ot = OT_BYTE;
4882 else
4883 ot = dflag + OT_WORD;
4884 modrm = ldub_code(s->pc++);
4885 reg = ((modrm >> 3) & 7) | rex_r;
4886 mod = (modrm >> 6) & 3;
4887 if (mod == 3) {
4888 rm = (modrm & 7) | REX_B(s);
4889 gen_op_mov_TN_reg(ot, 0, reg);
4890 gen_op_mov_TN_reg(ot, 1, rm);
4891 gen_op_addl_T0_T1();
4892 gen_op_mov_reg_T1(ot, reg);
4893 gen_op_mov_reg_T0(ot, rm);
4894 } else {
4895 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4896 gen_op_mov_TN_reg(ot, 0, reg);
4897 gen_op_ld_T1_A0(ot + s->mem_index);
4898 gen_op_addl_T0_T1();
4899 gen_op_st_T0_A0(ot + s->mem_index);
4900 gen_op_mov_reg_T1(ot, reg);
4902 gen_op_update2_cc();
4903 s->cc_op = CC_OP_ADDB + ot;
4904 break;
4905 case 0x1b0:
4906 case 0x1b1: /* cmpxchg Ev, Gv */
4908 int label1, label2;
4909 TCGv t0, t1, t2, a0;
4911 if ((b & 1) == 0)
4912 ot = OT_BYTE;
4913 else
4914 ot = dflag + OT_WORD;
4915 modrm = ldub_code(s->pc++);
4916 reg = ((modrm >> 3) & 7) | rex_r;
4917 mod = (modrm >> 6) & 3;
4918 t0 = tcg_temp_local_new();
4919 t1 = tcg_temp_local_new();
4920 t2 = tcg_temp_local_new();
4921 a0 = tcg_temp_local_new();
4922 gen_op_mov_v_reg(ot, t1, reg);
4923 if (mod == 3) {
4924 rm = (modrm & 7) | REX_B(s);
4925 gen_op_mov_v_reg(ot, t0, rm);
4926 } else {
4927 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4928 tcg_gen_mov_tl(a0, cpu_A0);
4929 gen_op_ld_v(ot + s->mem_index, t0, a0);
4930 rm = 0; /* avoid warning */
4932 label1 = gen_new_label();
4933 tcg_gen_sub_tl(t2, cpu_regs[R_EAX], t0);
4934 gen_extu(ot, t2);
4935 tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label1);
4936 label2 = gen_new_label();
4937 if (mod == 3) {
4938 gen_op_mov_reg_v(ot, R_EAX, t0);
4939 tcg_gen_br(label2);
4940 gen_set_label(label1);
4941 gen_op_mov_reg_v(ot, rm, t1);
4942 } else {
4943 /* perform no-op store cycle like physical cpu; must be
4944 before changing accumulator to ensure idempotency if
4945 the store faults and the instruction is restarted */
4946 gen_op_st_v(ot + s->mem_index, t0, a0);
4947 gen_op_mov_reg_v(ot, R_EAX, t0);
4948 tcg_gen_br(label2);
4949 gen_set_label(label1);
4950 gen_op_st_v(ot + s->mem_index, t1, a0);
4952 gen_set_label(label2);
4953 tcg_gen_mov_tl(cpu_cc_src, t0);
4954 tcg_gen_mov_tl(cpu_cc_dst, t2);
4955 s->cc_op = CC_OP_SUBB + ot;
4956 tcg_temp_free(t0);
4957 tcg_temp_free(t1);
4958 tcg_temp_free(t2);
4959 tcg_temp_free(a0);
4961 break;
4962 case 0x1c7: /* cmpxchg8b */
4963 modrm = ldub_code(s->pc++);
4964 mod = (modrm >> 6) & 3;
4965 if ((mod == 3) || ((modrm & 0x38) != 0x8))
4966 goto illegal_op;
4967 #ifdef TARGET_X86_64
4968 if (dflag == 2) {
4969 if (!(s->cpuid_ext_features & CPUID_EXT_CX16))
4970 goto illegal_op;
4971 gen_jmp_im(pc_start - s->cs_base);
4972 if (s->cc_op != CC_OP_DYNAMIC)
4973 gen_op_set_cc_op(s->cc_op);
4974 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4975 gen_helper_cmpxchg16b(cpu_A0);
4976 } else
4977 #endif
4979 if (!(s->cpuid_features & CPUID_CX8))
4980 goto illegal_op;
4981 gen_jmp_im(pc_start - s->cs_base);
4982 if (s->cc_op != CC_OP_DYNAMIC)
4983 gen_op_set_cc_op(s->cc_op);
4984 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4985 gen_helper_cmpxchg8b(cpu_A0);
4987 s->cc_op = CC_OP_EFLAGS;
4988 break;
4990 /**************************/
4991 /* push/pop */
4992 case 0x50 ... 0x57: /* push */
4993 gen_op_mov_TN_reg(OT_LONG, 0, (b & 7) | REX_B(s));
4994 gen_push_T0(s);
4995 break;
4996 case 0x58 ... 0x5f: /* pop */
4997 if (CODE64(s)) {
4998 ot = dflag ? OT_QUAD : OT_WORD;
4999 } else {
5000 ot = dflag + OT_WORD;
5002 gen_pop_T0(s);
5003 /* NOTE: order is important for pop %sp */
5004 gen_pop_update(s);
5005 gen_op_mov_reg_T0(ot, (b & 7) | REX_B(s));
5006 break;
5007 case 0x60: /* pusha */
5008 if (CODE64(s))
5009 goto illegal_op;
5010 gen_pusha(s);
5011 break;
5012 case 0x61: /* popa */
5013 if (CODE64(s))
5014 goto illegal_op;
5015 gen_popa(s);
5016 break;
5017 case 0x68: /* push Iv */
5018 case 0x6a:
5019 if (CODE64(s)) {
5020 ot = dflag ? OT_QUAD : OT_WORD;
5021 } else {
5022 ot = dflag + OT_WORD;
5024 if (b == 0x68)
5025 val = insn_get(s, ot);
5026 else
5027 val = (int8_t)insn_get(s, OT_BYTE);
5028 gen_op_movl_T0_im(val);
5029 gen_push_T0(s);
5030 break;
5031 case 0x8f: /* pop Ev */
5032 if (CODE64(s)) {
5033 ot = dflag ? OT_QUAD : OT_WORD;
5034 } else {
5035 ot = dflag + OT_WORD;
5037 modrm = ldub_code(s->pc++);
5038 mod = (modrm >> 6) & 3;
5039 gen_pop_T0(s);
5040 if (mod == 3) {
5041 /* NOTE: order is important for pop %sp */
5042 gen_pop_update(s);
5043 rm = (modrm & 7) | REX_B(s);
5044 gen_op_mov_reg_T0(ot, rm);
5045 } else {
5046 /* NOTE: order is important too for MMU exceptions */
5047 s->popl_esp_hack = 1 << ot;
5048 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1);
5049 s->popl_esp_hack = 0;
5050 gen_pop_update(s);
5052 break;
5053 case 0xc8: /* enter */
5055 int level;
5056 val = lduw_code(s->pc);
5057 s->pc += 2;
5058 level = ldub_code(s->pc++);
5059 gen_enter(s, val, level);
5061 break;
5062 case 0xc9: /* leave */
5063 /* XXX: exception not precise (ESP is updated before potential exception) */
5064 if (CODE64(s)) {
5065 gen_op_mov_TN_reg(OT_QUAD, 0, R_EBP);
5066 gen_op_mov_reg_T0(OT_QUAD, R_ESP);
5067 } else if (s->ss32) {
5068 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
5069 gen_op_mov_reg_T0(OT_LONG, R_ESP);
5070 } else {
5071 gen_op_mov_TN_reg(OT_WORD, 0, R_EBP);
5072 gen_op_mov_reg_T0(OT_WORD, R_ESP);
5074 gen_pop_T0(s);
5075 if (CODE64(s)) {
5076 ot = dflag ? OT_QUAD : OT_WORD;
5077 } else {
5078 ot = dflag + OT_WORD;
5080 gen_op_mov_reg_T0(ot, R_EBP);
5081 gen_pop_update(s);
5082 break;
5083 case 0x06: /* push es */
5084 case 0x0e: /* push cs */
5085 case 0x16: /* push ss */
5086 case 0x1e: /* push ds */
5087 if (CODE64(s))
5088 goto illegal_op;
5089 gen_op_movl_T0_seg(b >> 3);
5090 gen_push_T0(s);
5091 break;
5092 case 0x1a0: /* push fs */
5093 case 0x1a8: /* push gs */
5094 gen_op_movl_T0_seg((b >> 3) & 7);
5095 gen_push_T0(s);
5096 break;
5097 case 0x07: /* pop es */
5098 case 0x17: /* pop ss */
5099 case 0x1f: /* pop ds */
5100 if (CODE64(s))
5101 goto illegal_op;
5102 reg = b >> 3;
5103 gen_pop_T0(s);
5104 gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
5105 gen_pop_update(s);
5106 if (reg == R_SS) {
5107 /* if reg == SS, inhibit interrupts/trace. */
5108 /* If several instructions disable interrupts, only the
5109 _first_ does it */
5110 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
5111 gen_helper_set_inhibit_irq();
5112 s->tf = 0;
5114 if (s->is_jmp) {
5115 gen_jmp_im(s->pc - s->cs_base);
5116 gen_eob(s);
5118 break;
5119 case 0x1a1: /* pop fs */
5120 case 0x1a9: /* pop gs */
5121 gen_pop_T0(s);
5122 gen_movl_seg_T0(s, (b >> 3) & 7, pc_start - s->cs_base);
5123 gen_pop_update(s);
5124 if (s->is_jmp) {
5125 gen_jmp_im(s->pc - s->cs_base);
5126 gen_eob(s);
5128 break;
5130 /**************************/
5131 /* mov */
5132 case 0x88:
5133 case 0x89: /* mov Gv, Ev */
5134 if ((b & 1) == 0)
5135 ot = OT_BYTE;
5136 else
5137 ot = dflag + OT_WORD;
5138 modrm = ldub_code(s->pc++);
5139 reg = ((modrm >> 3) & 7) | rex_r;
5141 /* generate a generic store */
5142 gen_ldst_modrm(s, modrm, ot, reg, 1);
5143 break;
5144 case 0xc6:
5145 case 0xc7: /* mov Ev, Iv */
5146 if ((b & 1) == 0)
5147 ot = OT_BYTE;
5148 else
5149 ot = dflag + OT_WORD;
5150 modrm = ldub_code(s->pc++);
5151 mod = (modrm >> 6) & 3;
5152 if (mod != 3) {
5153 s->rip_offset = insn_const_size(ot);
5154 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5156 val = insn_get(s, ot);
5157 gen_op_movl_T0_im(val);
5158 if (mod != 3)
5159 gen_op_st_T0_A0(ot + s->mem_index);
5160 else
5161 gen_op_mov_reg_T0(ot, (modrm & 7) | REX_B(s));
5162 break;
5163 case 0x8a:
5164 case 0x8b: /* mov Ev, Gv */
5165 if ((b & 1) == 0)
5166 ot = OT_BYTE;
5167 else
5168 ot = OT_WORD + dflag;
5169 modrm = ldub_code(s->pc++);
5170 reg = ((modrm >> 3) & 7) | rex_r;
5172 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
5173 gen_op_mov_reg_T0(ot, reg);
5174 break;
5175 case 0x8e: /* mov seg, Gv */
5176 modrm = ldub_code(s->pc++);
5177 reg = (modrm >> 3) & 7;
5178 if (reg >= 6 || reg == R_CS)
5179 goto illegal_op;
5180 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
5181 gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
5182 if (reg == R_SS) {
5183 /* if reg == SS, inhibit interrupts/trace */
5184 /* If several instructions disable interrupts, only the
5185 _first_ does it */
5186 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
5187 gen_helper_set_inhibit_irq();
5188 s->tf = 0;
5190 if (s->is_jmp) {
5191 gen_jmp_im(s->pc - s->cs_base);
5192 gen_eob(s);
5194 break;
5195 case 0x8c: /* mov Gv, seg */
5196 modrm = ldub_code(s->pc++);
5197 reg = (modrm >> 3) & 7;
5198 mod = (modrm >> 6) & 3;
5199 if (reg >= 6)
5200 goto illegal_op;
5201 gen_op_movl_T0_seg(reg);
5202 if (mod == 3)
5203 ot = OT_WORD + dflag;
5204 else
5205 ot = OT_WORD;
5206 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1);
5207 break;
5209 case 0x1b6: /* movzbS Gv, Eb */
5210 case 0x1b7: /* movzwS Gv, Eb */
5211 case 0x1be: /* movsbS Gv, Eb */
5212 case 0x1bf: /* movswS Gv, Eb */
5214 int d_ot;
5215 /* d_ot is the size of destination */
5216 d_ot = dflag + OT_WORD;
5217 /* ot is the size of source */
5218 ot = (b & 1) + OT_BYTE;
5219 modrm = ldub_code(s->pc++);
5220 reg = ((modrm >> 3) & 7) | rex_r;
5221 mod = (modrm >> 6) & 3;
5222 rm = (modrm & 7) | REX_B(s);
5224 if (mod == 3) {
5225 gen_op_mov_TN_reg(ot, 0, rm);
5226 switch(ot | (b & 8)) {
5227 case OT_BYTE:
5228 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
5229 break;
5230 case OT_BYTE | 8:
5231 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
5232 break;
5233 case OT_WORD:
5234 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
5235 break;
5236 default:
5237 case OT_WORD | 8:
5238 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5239 break;
5241 gen_op_mov_reg_T0(d_ot, reg);
5242 } else {
5243 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5244 if (b & 8) {
5245 gen_op_lds_T0_A0(ot + s->mem_index);
5246 } else {
5247 gen_op_ldu_T0_A0(ot + s->mem_index);
5249 gen_op_mov_reg_T0(d_ot, reg);
5252 break;
5254 case 0x8d: /* lea */
5255 ot = dflag + OT_WORD;
5256 modrm = ldub_code(s->pc++);
5257 mod = (modrm >> 6) & 3;
5258 if (mod == 3)
5259 goto illegal_op;
5260 reg = ((modrm >> 3) & 7) | rex_r;
5261 /* we must ensure that no segment is added */
5262 s->override = -1;
5263 val = s->addseg;
5264 s->addseg = 0;
5265 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5266 s->addseg = val;
5267 gen_op_mov_reg_A0(ot - OT_WORD, reg);
5268 break;
5270 case 0xa0: /* mov EAX, Ov */
5271 case 0xa1:
5272 case 0xa2: /* mov Ov, EAX */
5273 case 0xa3:
5275 target_ulong offset_addr;
5277 if ((b & 1) == 0)
5278 ot = OT_BYTE;
5279 else
5280 ot = dflag + OT_WORD;
5281 #ifdef TARGET_X86_64
5282 if (s->aflag == 2) {
5283 offset_addr = ldq_code(s->pc);
5284 s->pc += 8;
5285 gen_op_movq_A0_im(offset_addr);
5286 } else
5287 #endif
5289 if (s->aflag) {
5290 offset_addr = insn_get(s, OT_LONG);
5291 } else {
5292 offset_addr = insn_get(s, OT_WORD);
5294 gen_op_movl_A0_im(offset_addr);
5296 gen_add_A0_ds_seg(s);
5297 if ((b & 2) == 0) {
5298 gen_op_ld_T0_A0(ot + s->mem_index);
5299 gen_op_mov_reg_T0(ot, R_EAX);
5300 } else {
5301 gen_op_mov_TN_reg(ot, 0, R_EAX);
5302 gen_op_st_T0_A0(ot + s->mem_index);
5305 break;
5306 case 0xd7: /* xlat */
5307 #ifdef TARGET_X86_64
5308 if (s->aflag == 2) {
5309 gen_op_movq_A0_reg(R_EBX);
5310 gen_op_mov_TN_reg(OT_QUAD, 0, R_EAX);
5311 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xff);
5312 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]);
5313 } else
5314 #endif
5316 gen_op_movl_A0_reg(R_EBX);
5317 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
5318 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xff);
5319 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]);
5320 if (s->aflag == 0)
5321 gen_op_andl_A0_ffff();
5322 else
5323 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
5325 gen_add_A0_ds_seg(s);
5326 gen_op_ldu_T0_A0(OT_BYTE + s->mem_index);
5327 gen_op_mov_reg_T0(OT_BYTE, R_EAX);
5328 break;
5329 case 0xb0 ... 0xb7: /* mov R, Ib */
5330 val = insn_get(s, OT_BYTE);
5331 gen_op_movl_T0_im(val);
5332 gen_op_mov_reg_T0(OT_BYTE, (b & 7) | REX_B(s));
5333 break;
5334 case 0xb8 ... 0xbf: /* mov R, Iv */
5335 #ifdef TARGET_X86_64
5336 if (dflag == 2) {
5337 uint64_t tmp;
5338 /* 64 bit case */
5339 tmp = ldq_code(s->pc);
5340 s->pc += 8;
5341 reg = (b & 7) | REX_B(s);
5342 gen_movtl_T0_im(tmp);
5343 gen_op_mov_reg_T0(OT_QUAD, reg);
5344 } else
5345 #endif
5347 ot = dflag ? OT_LONG : OT_WORD;
5348 val = insn_get(s, ot);
5349 reg = (b & 7) | REX_B(s);
5350 gen_op_movl_T0_im(val);
5351 gen_op_mov_reg_T0(ot, reg);
5353 break;
5355 case 0x91 ... 0x97: /* xchg R, EAX */
5356 do_xchg_reg_eax:
5357 ot = dflag + OT_WORD;
5358 reg = (b & 7) | REX_B(s);
5359 rm = R_EAX;
5360 goto do_xchg_reg;
5361 case 0x86:
5362 case 0x87: /* xchg Ev, Gv */
5363 if ((b & 1) == 0)
5364 ot = OT_BYTE;
5365 else
5366 ot = dflag + OT_WORD;
5367 modrm = ldub_code(s->pc++);
5368 reg = ((modrm >> 3) & 7) | rex_r;
5369 mod = (modrm >> 6) & 3;
5370 if (mod == 3) {
5371 rm = (modrm & 7) | REX_B(s);
5372 do_xchg_reg:
5373 gen_op_mov_TN_reg(ot, 0, reg);
5374 gen_op_mov_TN_reg(ot, 1, rm);
5375 gen_op_mov_reg_T0(ot, rm);
5376 gen_op_mov_reg_T1(ot, reg);
5377 } else {
5378 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5379 gen_op_mov_TN_reg(ot, 0, reg);
5380 /* for xchg, lock is implicit */
5381 if (!(prefixes & PREFIX_LOCK))
5382 gen_helper_lock();
5383 gen_op_ld_T1_A0(ot + s->mem_index);
5384 gen_op_st_T0_A0(ot + s->mem_index);
5385 if (!(prefixes & PREFIX_LOCK))
5386 gen_helper_unlock();
5387 gen_op_mov_reg_T1(ot, reg);
5389 break;
5390 case 0xc4: /* les Gv */
5391 if (CODE64(s))
5392 goto illegal_op;
5393 op = R_ES;
5394 goto do_lxx;
5395 case 0xc5: /* lds Gv */
5396 if (CODE64(s))
5397 goto illegal_op;
5398 op = R_DS;
5399 goto do_lxx;
5400 case 0x1b2: /* lss Gv */
5401 op = R_SS;
5402 goto do_lxx;
5403 case 0x1b4: /* lfs Gv */
5404 op = R_FS;
5405 goto do_lxx;
5406 case 0x1b5: /* lgs Gv */
5407 op = R_GS;
5408 do_lxx:
5409 ot = dflag ? OT_LONG : OT_WORD;
5410 modrm = ldub_code(s->pc++);
5411 reg = ((modrm >> 3) & 7) | rex_r;
5412 mod = (modrm >> 6) & 3;
5413 if (mod == 3)
5414 goto illegal_op;
5415 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5416 gen_op_ld_T1_A0(ot + s->mem_index);
5417 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
5418 /* load the segment first to handle exceptions properly */
5419 gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
5420 gen_movl_seg_T0(s, op, pc_start - s->cs_base);
5421 /* then put the data */
5422 gen_op_mov_reg_T1(ot, reg);
5423 if (s->is_jmp) {
5424 gen_jmp_im(s->pc - s->cs_base);
5425 gen_eob(s);
5427 break;
5429 /************************/
5430 /* shifts */
5431 case 0xc0:
5432 case 0xc1:
5433 /* shift Ev,Ib */
5434 shift = 2;
5435 grp2:
5437 if ((b & 1) == 0)
5438 ot = OT_BYTE;
5439 else
5440 ot = dflag + OT_WORD;
5442 modrm = ldub_code(s->pc++);
5443 mod = (modrm >> 6) & 3;
5444 op = (modrm >> 3) & 7;
5446 if (mod != 3) {
5447 if (shift == 2) {
5448 s->rip_offset = 1;
5450 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5451 opreg = OR_TMP0;
5452 } else {
5453 opreg = (modrm & 7) | REX_B(s);
5456 /* simpler op */
5457 if (shift == 0) {
5458 gen_shift(s, op, ot, opreg, OR_ECX);
5459 } else {
5460 if (shift == 2) {
5461 shift = ldub_code(s->pc++);
5463 gen_shifti(s, op, ot, opreg, shift);
5466 break;
5467 case 0xd0:
5468 case 0xd1:
5469 /* shift Ev,1 */
5470 shift = 1;
5471 goto grp2;
5472 case 0xd2:
5473 case 0xd3:
5474 /* shift Ev,cl */
5475 shift = 0;
5476 goto grp2;
5478 case 0x1a4: /* shld imm */
5479 op = 0;
5480 shift = 1;
5481 goto do_shiftd;
5482 case 0x1a5: /* shld cl */
5483 op = 0;
5484 shift = 0;
5485 goto do_shiftd;
5486 case 0x1ac: /* shrd imm */
5487 op = 1;
5488 shift = 1;
5489 goto do_shiftd;
5490 case 0x1ad: /* shrd cl */
5491 op = 1;
5492 shift = 0;
5493 do_shiftd:
5494 ot = dflag + OT_WORD;
5495 modrm = ldub_code(s->pc++);
5496 mod = (modrm >> 6) & 3;
5497 rm = (modrm & 7) | REX_B(s);
5498 reg = ((modrm >> 3) & 7) | rex_r;
5499 if (mod != 3) {
5500 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5501 opreg = OR_TMP0;
5502 } else {
5503 opreg = rm;
5505 gen_op_mov_TN_reg(ot, 1, reg);
5507 if (shift) {
5508 val = ldub_code(s->pc++);
5509 tcg_gen_movi_tl(cpu_T3, val);
5510 } else {
5511 tcg_gen_mov_tl(cpu_T3, cpu_regs[R_ECX]);
5513 gen_shiftd_rm_T1_T3(s, ot, opreg, op);
5514 break;
5516 /************************/
5517 /* floats */
5518 case 0xd8 ... 0xdf:
5519 if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
5520 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
5521 /* XXX: what to do if illegal op ? */
5522 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
5523 break;
5525 modrm = ldub_code(s->pc++);
5526 mod = (modrm >> 6) & 3;
5527 rm = modrm & 7;
5528 op = ((b & 7) << 3) | ((modrm >> 3) & 7);
5529 if (mod != 3) {
5530 /* memory op */
5531 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5532 switch(op) {
5533 case 0x00 ... 0x07: /* fxxxs */
5534 case 0x10 ... 0x17: /* fixxxl */
5535 case 0x20 ... 0x27: /* fxxxl */
5536 case 0x30 ... 0x37: /* fixxx */
5538 int op1;
5539 op1 = op & 7;
5541 switch(op >> 4) {
5542 case 0:
5543 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5544 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5545 gen_helper_flds_FT0(cpu_tmp2_i32);
5546 break;
5547 case 1:
5548 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5549 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5550 gen_helper_fildl_FT0(cpu_tmp2_i32);
5551 break;
5552 case 2:
5553 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
5554 (s->mem_index >> 2) - 1);
5555 gen_helper_fldl_FT0(cpu_tmp1_i64);
5556 break;
5557 case 3:
5558 default:
5559 gen_op_lds_T0_A0(OT_WORD + s->mem_index);
5560 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5561 gen_helper_fildl_FT0(cpu_tmp2_i32);
5562 break;
5565 gen_helper_fp_arith_ST0_FT0(op1);
5566 if (op1 == 3) {
5567 /* fcomp needs pop */
5568 gen_helper_fpop();
5571 break;
5572 case 0x08: /* flds */
5573 case 0x0a: /* fsts */
5574 case 0x0b: /* fstps */
5575 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
5576 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
5577 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
5578 switch(op & 7) {
5579 case 0:
5580 switch(op >> 4) {
5581 case 0:
5582 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5583 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5584 gen_helper_flds_ST0(cpu_tmp2_i32);
5585 break;
5586 case 1:
5587 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5588 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5589 gen_helper_fildl_ST0(cpu_tmp2_i32);
5590 break;
5591 case 2:
5592 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
5593 (s->mem_index >> 2) - 1);
5594 gen_helper_fldl_ST0(cpu_tmp1_i64);
5595 break;
5596 case 3:
5597 default:
5598 gen_op_lds_T0_A0(OT_WORD + s->mem_index);
5599 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5600 gen_helper_fildl_ST0(cpu_tmp2_i32);
5601 break;
5603 break;
5604 case 1:
5605 /* XXX: the corresponding CPUID bit must be tested ! */
5606 switch(op >> 4) {
5607 case 1:
5608 gen_helper_fisttl_ST0(cpu_tmp2_i32);
5609 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5610 gen_op_st_T0_A0(OT_LONG + s->mem_index);
5611 break;
5612 case 2:
5613 gen_helper_fisttll_ST0(cpu_tmp1_i64);
5614 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
5615 (s->mem_index >> 2) - 1);
5616 break;
5617 case 3:
5618 default:
5619 gen_helper_fistt_ST0(cpu_tmp2_i32);
5620 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5621 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5622 break;
5624 gen_helper_fpop();
5625 break;
5626 default:
5627 switch(op >> 4) {
5628 case 0:
5629 gen_helper_fsts_ST0(cpu_tmp2_i32);
5630 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5631 gen_op_st_T0_A0(OT_LONG + s->mem_index);
5632 break;
5633 case 1:
5634 gen_helper_fistl_ST0(cpu_tmp2_i32);
5635 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5636 gen_op_st_T0_A0(OT_LONG + s->mem_index);
5637 break;
5638 case 2:
5639 gen_helper_fstl_ST0(cpu_tmp1_i64);
5640 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
5641 (s->mem_index >> 2) - 1);
5642 break;
5643 case 3:
5644 default:
5645 gen_helper_fist_ST0(cpu_tmp2_i32);
5646 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5647 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5648 break;
5650 if ((op & 7) == 3)
5651 gen_helper_fpop();
5652 break;
5654 break;
5655 case 0x0c: /* fldenv mem */
5656 if (s->cc_op != CC_OP_DYNAMIC)
5657 gen_op_set_cc_op(s->cc_op);
5658 gen_jmp_im(pc_start - s->cs_base);
5659 gen_helper_fldenv(
5660 cpu_A0, tcg_const_i32(s->dflag));
5661 break;
5662 case 0x0d: /* fldcw mem */
5663 gen_op_ld_T0_A0(OT_WORD + s->mem_index);
5664 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5665 gen_helper_fldcw(cpu_tmp2_i32);
5666 break;
5667 case 0x0e: /* fnstenv mem */
5668 if (s->cc_op != CC_OP_DYNAMIC)
5669 gen_op_set_cc_op(s->cc_op);
5670 gen_jmp_im(pc_start - s->cs_base);
5671 gen_helper_fstenv(cpu_A0, tcg_const_i32(s->dflag));
5672 break;
5673 case 0x0f: /* fnstcw mem */
5674 gen_helper_fnstcw(cpu_tmp2_i32);
5675 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5676 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5677 break;
5678 case 0x1d: /* fldt mem */
5679 if (s->cc_op != CC_OP_DYNAMIC)
5680 gen_op_set_cc_op(s->cc_op);
5681 gen_jmp_im(pc_start - s->cs_base);
5682 gen_helper_fldt_ST0(cpu_A0);
5683 break;
5684 case 0x1f: /* fstpt mem */
5685 if (s->cc_op != CC_OP_DYNAMIC)
5686 gen_op_set_cc_op(s->cc_op);
5687 gen_jmp_im(pc_start - s->cs_base);
5688 gen_helper_fstt_ST0(cpu_A0);
5689 gen_helper_fpop();
5690 break;
5691 case 0x2c: /* frstor mem */
5692 if (s->cc_op != CC_OP_DYNAMIC)
5693 gen_op_set_cc_op(s->cc_op);
5694 gen_jmp_im(pc_start - s->cs_base);
5695 gen_helper_frstor(cpu_A0, tcg_const_i32(s->dflag));
5696 break;
5697 case 0x2e: /* fnsave mem */
5698 if (s->cc_op != CC_OP_DYNAMIC)
5699 gen_op_set_cc_op(s->cc_op);
5700 gen_jmp_im(pc_start - s->cs_base);
5701 gen_helper_fsave(cpu_A0, tcg_const_i32(s->dflag));
5702 break;
5703 case 0x2f: /* fnstsw mem */
5704 gen_helper_fnstsw(cpu_tmp2_i32);
5705 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5706 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5707 break;
5708 case 0x3c: /* fbld */
5709 if (s->cc_op != CC_OP_DYNAMIC)
5710 gen_op_set_cc_op(s->cc_op);
5711 gen_jmp_im(pc_start - s->cs_base);
5712 gen_helper_fbld_ST0(cpu_A0);
5713 break;
5714 case 0x3e: /* fbstp */
5715 if (s->cc_op != CC_OP_DYNAMIC)
5716 gen_op_set_cc_op(s->cc_op);
5717 gen_jmp_im(pc_start - s->cs_base);
5718 gen_helper_fbst_ST0(cpu_A0);
5719 gen_helper_fpop();
5720 break;
5721 case 0x3d: /* fildll */
5722 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
5723 (s->mem_index >> 2) - 1);
5724 gen_helper_fildll_ST0(cpu_tmp1_i64);
5725 break;
5726 case 0x3f: /* fistpll */
5727 gen_helper_fistll_ST0(cpu_tmp1_i64);
5728 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
5729 (s->mem_index >> 2) - 1);
5730 gen_helper_fpop();
5731 break;
5732 default:
5733 goto illegal_op;
5735 } else {
5736 /* register float ops */
5737 opreg = rm;
5739 switch(op) {
5740 case 0x08: /* fld sti */
5741 gen_helper_fpush();
5742 gen_helper_fmov_ST0_STN(tcg_const_i32((opreg + 1) & 7));
5743 break;
5744 case 0x09: /* fxchg sti */
5745 case 0x29: /* fxchg4 sti, undocumented op */
5746 case 0x39: /* fxchg7 sti, undocumented op */
5747 gen_helper_fxchg_ST0_STN(tcg_const_i32(opreg));
5748 break;
5749 case 0x0a: /* grp d9/2 */
5750 switch(rm) {
5751 case 0: /* fnop */
5752 /* check exceptions (FreeBSD FPU probe) */
5753 if (s->cc_op != CC_OP_DYNAMIC)
5754 gen_op_set_cc_op(s->cc_op);
5755 gen_jmp_im(pc_start - s->cs_base);
5756 gen_helper_fwait();
5757 break;
5758 default:
5759 goto illegal_op;
5761 break;
5762 case 0x0c: /* grp d9/4 */
5763 switch(rm) {
5764 case 0: /* fchs */
5765 gen_helper_fchs_ST0();
5766 break;
5767 case 1: /* fabs */
5768 gen_helper_fabs_ST0();
5769 break;
5770 case 4: /* ftst */
5771 gen_helper_fldz_FT0();
5772 gen_helper_fcom_ST0_FT0();
5773 break;
5774 case 5: /* fxam */
5775 gen_helper_fxam_ST0();
5776 break;
5777 default:
5778 goto illegal_op;
5780 break;
5781 case 0x0d: /* grp d9/5 */
5783 switch(rm) {
5784 case 0:
5785 gen_helper_fpush();
5786 gen_helper_fld1_ST0();
5787 break;
5788 case 1:
5789 gen_helper_fpush();
5790 gen_helper_fldl2t_ST0();
5791 break;
5792 case 2:
5793 gen_helper_fpush();
5794 gen_helper_fldl2e_ST0();
5795 break;
5796 case 3:
5797 gen_helper_fpush();
5798 gen_helper_fldpi_ST0();
5799 break;
5800 case 4:
5801 gen_helper_fpush();
5802 gen_helper_fldlg2_ST0();
5803 break;
5804 case 5:
5805 gen_helper_fpush();
5806 gen_helper_fldln2_ST0();
5807 break;
5808 case 6:
5809 gen_helper_fpush();
5810 gen_helper_fldz_ST0();
5811 break;
5812 default:
5813 goto illegal_op;
5816 break;
5817 case 0x0e: /* grp d9/6 */
5818 switch(rm) {
5819 case 0: /* f2xm1 */
5820 gen_helper_f2xm1();
5821 break;
5822 case 1: /* fyl2x */
5823 gen_helper_fyl2x();
5824 break;
5825 case 2: /* fptan */
5826 gen_helper_fptan();
5827 break;
5828 case 3: /* fpatan */
5829 gen_helper_fpatan();
5830 break;
5831 case 4: /* fxtract */
5832 gen_helper_fxtract();
5833 break;
5834 case 5: /* fprem1 */
5835 gen_helper_fprem1();
5836 break;
5837 case 6: /* fdecstp */
5838 gen_helper_fdecstp();
5839 break;
5840 default:
5841 case 7: /* fincstp */
5842 gen_helper_fincstp();
5843 break;
5845 break;
5846 case 0x0f: /* grp d9/7 */
5847 switch(rm) {
5848 case 0: /* fprem */
5849 gen_helper_fprem();
5850 break;
5851 case 1: /* fyl2xp1 */
5852 gen_helper_fyl2xp1();
5853 break;
5854 case 2: /* fsqrt */
5855 gen_helper_fsqrt();
5856 break;
5857 case 3: /* fsincos */
5858 gen_helper_fsincos();
5859 break;
5860 case 5: /* fscale */
5861 gen_helper_fscale();
5862 break;
5863 case 4: /* frndint */
5864 gen_helper_frndint();
5865 break;
5866 case 6: /* fsin */
5867 gen_helper_fsin();
5868 break;
5869 default:
5870 case 7: /* fcos */
5871 gen_helper_fcos();
5872 break;
5874 break;
5875 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
5876 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
5877 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
5879 int op1;
5881 op1 = op & 7;
5882 if (op >= 0x20) {
5883 gen_helper_fp_arith_STN_ST0(op1, opreg);
5884 if (op >= 0x30)
5885 gen_helper_fpop();
5886 } else {
5887 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
5888 gen_helper_fp_arith_ST0_FT0(op1);
5891 break;
5892 case 0x02: /* fcom */
5893 case 0x22: /* fcom2, undocumented op */
5894 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
5895 gen_helper_fcom_ST0_FT0();
5896 break;
5897 case 0x03: /* fcomp */
5898 case 0x23: /* fcomp3, undocumented op */
5899 case 0x32: /* fcomp5, undocumented op */
5900 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
5901 gen_helper_fcom_ST0_FT0();
5902 gen_helper_fpop();
5903 break;
5904 case 0x15: /* da/5 */
5905 switch(rm) {
5906 case 1: /* fucompp */
5907 gen_helper_fmov_FT0_STN(tcg_const_i32(1));
5908 gen_helper_fucom_ST0_FT0();
5909 gen_helper_fpop();
5910 gen_helper_fpop();
5911 break;
5912 default:
5913 goto illegal_op;
5915 break;
5916 case 0x1c:
5917 switch(rm) {
5918 case 0: /* feni (287 only, just do nop here) */
5919 break;
5920 case 1: /* fdisi (287 only, just do nop here) */
5921 break;
5922 case 2: /* fclex */
5923 gen_helper_fclex();
5924 break;
5925 case 3: /* fninit */
5926 gen_helper_fninit();
5927 break;
5928 case 4: /* fsetpm (287 only, just do nop here) */
5929 break;
5930 default:
5931 goto illegal_op;
5933 break;
5934 case 0x1d: /* fucomi */
5935 if (s->cc_op != CC_OP_DYNAMIC)
5936 gen_op_set_cc_op(s->cc_op);
5937 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
5938 gen_helper_fucomi_ST0_FT0();
5939 s->cc_op = CC_OP_EFLAGS;
5940 break;
5941 case 0x1e: /* fcomi */
5942 if (s->cc_op != CC_OP_DYNAMIC)
5943 gen_op_set_cc_op(s->cc_op);
5944 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
5945 gen_helper_fcomi_ST0_FT0();
5946 s->cc_op = CC_OP_EFLAGS;
5947 break;
5948 case 0x28: /* ffree sti */
5949 gen_helper_ffree_STN(tcg_const_i32(opreg));
5950 break;
5951 case 0x2a: /* fst sti */
5952 gen_helper_fmov_STN_ST0(tcg_const_i32(opreg));
5953 break;
5954 case 0x2b: /* fstp sti */
5955 case 0x0b: /* fstp1 sti, undocumented op */
5956 case 0x3a: /* fstp8 sti, undocumented op */
5957 case 0x3b: /* fstp9 sti, undocumented op */
5958 gen_helper_fmov_STN_ST0(tcg_const_i32(opreg));
5959 gen_helper_fpop();
5960 break;
5961 case 0x2c: /* fucom st(i) */
5962 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
5963 gen_helper_fucom_ST0_FT0();
5964 break;
5965 case 0x2d: /* fucomp st(i) */
5966 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
5967 gen_helper_fucom_ST0_FT0();
5968 gen_helper_fpop();
5969 break;
5970 case 0x33: /* de/3 */
5971 switch(rm) {
5972 case 1: /* fcompp */
5973 gen_helper_fmov_FT0_STN(tcg_const_i32(1));
5974 gen_helper_fcom_ST0_FT0();
5975 gen_helper_fpop();
5976 gen_helper_fpop();
5977 break;
5978 default:
5979 goto illegal_op;
5981 break;
5982 case 0x38: /* ffreep sti, undocumented op */
5983 gen_helper_ffree_STN(tcg_const_i32(opreg));
5984 gen_helper_fpop();
5985 break;
5986 case 0x3c: /* df/4 */
5987 switch(rm) {
5988 case 0:
5989 gen_helper_fnstsw(cpu_tmp2_i32);
5990 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5991 gen_op_mov_reg_T0(OT_WORD, R_EAX);
5992 break;
5993 default:
5994 goto illegal_op;
5996 break;
5997 case 0x3d: /* fucomip */
5998 if (s->cc_op != CC_OP_DYNAMIC)
5999 gen_op_set_cc_op(s->cc_op);
6000 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
6001 gen_helper_fucomi_ST0_FT0();
6002 gen_helper_fpop();
6003 s->cc_op = CC_OP_EFLAGS;
6004 break;
6005 case 0x3e: /* fcomip */
6006 if (s->cc_op != CC_OP_DYNAMIC)
6007 gen_op_set_cc_op(s->cc_op);
6008 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
6009 gen_helper_fcomi_ST0_FT0();
6010 gen_helper_fpop();
6011 s->cc_op = CC_OP_EFLAGS;
6012 break;
6013 case 0x10 ... 0x13: /* fcmovxx */
6014 case 0x18 ... 0x1b:
6016 int op1, l1;
6017 static const uint8_t fcmov_cc[8] = {
6018 (JCC_B << 1),
6019 (JCC_Z << 1),
6020 (JCC_BE << 1),
6021 (JCC_P << 1),
6023 op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
6024 l1 = gen_new_label();
6025 gen_jcc1(s, s->cc_op, op1, l1);
6026 gen_helper_fmov_ST0_STN(tcg_const_i32(opreg));
6027 gen_set_label(l1);
6029 break;
6030 default:
6031 goto illegal_op;
6034 break;
6035 /************************/
6036 /* string ops */
6038 case 0xa4: /* movsS */
6039 case 0xa5:
6040 if ((b & 1) == 0)
6041 ot = OT_BYTE;
6042 else
6043 ot = dflag + OT_WORD;
6045 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6046 gen_repz_movs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6047 } else {
6048 gen_movs(s, ot);
6050 break;
6052 case 0xaa: /* stosS */
6053 case 0xab:
6054 if ((b & 1) == 0)
6055 ot = OT_BYTE;
6056 else
6057 ot = dflag + OT_WORD;
6059 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6060 gen_repz_stos(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6061 } else {
6062 gen_stos(s, ot);
6064 break;
6065 case 0xac: /* lodsS */
6066 case 0xad:
6067 if ((b & 1) == 0)
6068 ot = OT_BYTE;
6069 else
6070 ot = dflag + OT_WORD;
6071 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6072 gen_repz_lods(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6073 } else {
6074 gen_lods(s, ot);
6076 break;
6077 case 0xae: /* scasS */
6078 case 0xaf:
6079 if ((b & 1) == 0)
6080 ot = OT_BYTE;
6081 else
6082 ot = dflag + OT_WORD;
6083 if (prefixes & PREFIX_REPNZ) {
6084 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6085 } else if (prefixes & PREFIX_REPZ) {
6086 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6087 } else {
6088 gen_scas(s, ot);
6089 s->cc_op = CC_OP_SUBB + ot;
6091 break;
6093 case 0xa6: /* cmpsS */
6094 case 0xa7:
6095 if ((b & 1) == 0)
6096 ot = OT_BYTE;
6097 else
6098 ot = dflag + OT_WORD;
6099 if (prefixes & PREFIX_REPNZ) {
6100 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6101 } else if (prefixes & PREFIX_REPZ) {
6102 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6103 } else {
6104 gen_cmps(s, ot);
6105 s->cc_op = CC_OP_SUBB + ot;
6107 break;
6108 case 0x6c: /* insS */
6109 case 0x6d:
6110 if ((b & 1) == 0)
6111 ot = OT_BYTE;
6112 else
6113 ot = dflag ? OT_LONG : OT_WORD;
6114 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
6115 gen_op_andl_T0_ffff();
6116 gen_check_io(s, ot, pc_start - s->cs_base,
6117 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4);
6118 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6119 gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6120 } else {
6121 gen_ins(s, ot);
6122 if (use_icount) {
6123 gen_jmp(s, s->pc - s->cs_base);
6126 break;
6127 case 0x6e: /* outsS */
6128 case 0x6f:
6129 if ((b & 1) == 0)
6130 ot = OT_BYTE;
6131 else
6132 ot = dflag ? OT_LONG : OT_WORD;
6133 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
6134 gen_op_andl_T0_ffff();
6135 gen_check_io(s, ot, pc_start - s->cs_base,
6136 svm_is_rep(prefixes) | 4);
6137 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6138 gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6139 } else {
6140 gen_outs(s, ot);
6141 if (use_icount) {
6142 gen_jmp(s, s->pc - s->cs_base);
6145 break;
6147 /************************/
6148 /* port I/O */
6150 case 0xe4:
6151 case 0xe5:
6152 if ((b & 1) == 0)
6153 ot = OT_BYTE;
6154 else
6155 ot = dflag ? OT_LONG : OT_WORD;
6156 val = ldub_code(s->pc++);
6157 gen_op_movl_T0_im(val);
6158 gen_check_io(s, ot, pc_start - s->cs_base,
6159 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
6160 if (use_icount)
6161 gen_io_start();
6162 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6163 gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
6164 gen_op_mov_reg_T1(ot, R_EAX);
6165 if (use_icount) {
6166 gen_io_end();
6167 gen_jmp(s, s->pc - s->cs_base);
6169 break;
6170 case 0xe6:
6171 case 0xe7:
6172 if ((b & 1) == 0)
6173 ot = OT_BYTE;
6174 else
6175 ot = dflag ? OT_LONG : OT_WORD;
6176 val = ldub_code(s->pc++);
6177 gen_op_movl_T0_im(val);
6178 gen_check_io(s, ot, pc_start - s->cs_base,
6179 svm_is_rep(prefixes));
6180 gen_op_mov_TN_reg(ot, 1, R_EAX);
6182 if (use_icount)
6183 gen_io_start();
6184 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6185 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
6186 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
6187 if (use_icount) {
6188 gen_io_end();
6189 gen_jmp(s, s->pc - s->cs_base);
6191 break;
6192 case 0xec:
6193 case 0xed:
6194 if ((b & 1) == 0)
6195 ot = OT_BYTE;
6196 else
6197 ot = dflag ? OT_LONG : OT_WORD;
6198 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
6199 gen_op_andl_T0_ffff();
6200 gen_check_io(s, ot, pc_start - s->cs_base,
6201 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
6202 if (use_icount)
6203 gen_io_start();
6204 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6205 gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
6206 gen_op_mov_reg_T1(ot, R_EAX);
6207 if (use_icount) {
6208 gen_io_end();
6209 gen_jmp(s, s->pc - s->cs_base);
6211 break;
6212 case 0xee:
6213 case 0xef:
6214 if ((b & 1) == 0)
6215 ot = OT_BYTE;
6216 else
6217 ot = dflag ? OT_LONG : OT_WORD;
6218 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
6219 gen_op_andl_T0_ffff();
6220 gen_check_io(s, ot, pc_start - s->cs_base,
6221 svm_is_rep(prefixes));
6222 gen_op_mov_TN_reg(ot, 1, R_EAX);
6224 if (use_icount)
6225 gen_io_start();
6226 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6227 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
6228 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
6229 if (use_icount) {
6230 gen_io_end();
6231 gen_jmp(s, s->pc - s->cs_base);
6233 break;
6235 /************************/
6236 /* control */
6237 case 0xc2: /* ret im */
6238 val = ldsw_code(s->pc);
6239 s->pc += 2;
6240 gen_pop_T0(s);
6241 if (CODE64(s) && s->dflag)
6242 s->dflag = 2;
6243 gen_stack_update(s, val + (2 << s->dflag));
6244 if (s->dflag == 0)
6245 gen_op_andl_T0_ffff();
6246 gen_op_jmp_T0();
6247 gen_eob(s);
6248 break;
6249 case 0xc3: /* ret */
6250 gen_pop_T0(s);
6251 gen_pop_update(s);
6252 if (s->dflag == 0)
6253 gen_op_andl_T0_ffff();
6254 gen_op_jmp_T0();
6255 gen_eob(s);
6256 break;
6257 case 0xca: /* lret im */
6258 val = ldsw_code(s->pc);
6259 s->pc += 2;
6260 do_lret:
6261 if (s->pe && !s->vm86) {
6262 if (s->cc_op != CC_OP_DYNAMIC)
6263 gen_op_set_cc_op(s->cc_op);
6264 gen_jmp_im(pc_start - s->cs_base);
6265 gen_helper_lret_protected(tcg_const_i32(s->dflag),
6266 tcg_const_i32(val));
6267 } else {
6268 gen_stack_A0(s);
6269 /* pop offset */
6270 gen_op_ld_T0_A0(1 + s->dflag + s->mem_index);
6271 if (s->dflag == 0)
6272 gen_op_andl_T0_ffff();
6273 /* NOTE: keeping EIP updated is not a problem in case of
6274 exception */
6275 gen_op_jmp_T0();
6276 /* pop selector */
6277 gen_op_addl_A0_im(2 << s->dflag);
6278 gen_op_ld_T0_A0(1 + s->dflag + s->mem_index);
6279 gen_op_movl_seg_T0_vm(R_CS);
6280 /* add stack offset */
6281 gen_stack_update(s, val + (4 << s->dflag));
6283 gen_eob(s);
6284 break;
6285 case 0xcb: /* lret */
6286 val = 0;
6287 goto do_lret;
6288 case 0xcf: /* iret */
6289 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET);
6290 if (!s->pe) {
6291 /* real mode */
6292 gen_helper_iret_real(tcg_const_i32(s->dflag));
6293 s->cc_op = CC_OP_EFLAGS;
6294 } else if (s->vm86) {
6295 if (s->iopl != 3) {
6296 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6297 } else {
6298 gen_helper_iret_real(tcg_const_i32(s->dflag));
6299 s->cc_op = CC_OP_EFLAGS;
6301 } else {
6302 if (s->cc_op != CC_OP_DYNAMIC)
6303 gen_op_set_cc_op(s->cc_op);
6304 gen_jmp_im(pc_start - s->cs_base);
6305 gen_helper_iret_protected(tcg_const_i32(s->dflag),
6306 tcg_const_i32(s->pc - s->cs_base));
6307 s->cc_op = CC_OP_EFLAGS;
6309 gen_eob(s);
6310 break;
6311 case 0xe8: /* call im */
6313 if (dflag)
6314 tval = (int32_t)insn_get(s, OT_LONG);
6315 else
6316 tval = (int16_t)insn_get(s, OT_WORD);
6317 next_eip = s->pc - s->cs_base;
6318 tval += next_eip;
6319 if (s->dflag == 0)
6320 tval &= 0xffff;
6321 else if(!CODE64(s))
6322 tval &= 0xffffffff;
6323 gen_movtl_T0_im(next_eip);
6324 gen_push_T0(s);
6325 gen_jmp(s, tval);
6327 break;
6328 case 0x9a: /* lcall im */
6330 unsigned int selector, offset;
6332 if (CODE64(s))
6333 goto illegal_op;
6334 ot = dflag ? OT_LONG : OT_WORD;
6335 offset = insn_get(s, ot);
6336 selector = insn_get(s, OT_WORD);
6338 gen_op_movl_T0_im(selector);
6339 gen_op_movl_T1_imu(offset);
6341 goto do_lcall;
6342 case 0xe9: /* jmp im */
6343 if (dflag)
6344 tval = (int32_t)insn_get(s, OT_LONG);
6345 else
6346 tval = (int16_t)insn_get(s, OT_WORD);
6347 tval += s->pc - s->cs_base;
6348 if (s->dflag == 0)
6349 tval &= 0xffff;
6350 else if(!CODE64(s))
6351 tval &= 0xffffffff;
6352 gen_jmp(s, tval);
6353 break;
6354 case 0xea: /* ljmp im */
6356 unsigned int selector, offset;
6358 if (CODE64(s))
6359 goto illegal_op;
6360 ot = dflag ? OT_LONG : OT_WORD;
6361 offset = insn_get(s, ot);
6362 selector = insn_get(s, OT_WORD);
6364 gen_op_movl_T0_im(selector);
6365 gen_op_movl_T1_imu(offset);
6367 goto do_ljmp;
6368 case 0xeb: /* jmp Jb */
6369 tval = (int8_t)insn_get(s, OT_BYTE);
6370 tval += s->pc - s->cs_base;
6371 if (s->dflag == 0)
6372 tval &= 0xffff;
6373 gen_jmp(s, tval);
6374 break;
6375 case 0x70 ... 0x7f: /* jcc Jb */
6376 tval = (int8_t)insn_get(s, OT_BYTE);
6377 goto do_jcc;
6378 case 0x180 ... 0x18f: /* jcc Jv */
6379 if (dflag) {
6380 tval = (int32_t)insn_get(s, OT_LONG);
6381 } else {
6382 tval = (int16_t)insn_get(s, OT_WORD);
6384 do_jcc:
6385 next_eip = s->pc - s->cs_base;
6386 tval += next_eip;
6387 if (s->dflag == 0)
6388 tval &= 0xffff;
6389 gen_jcc(s, b, tval, next_eip);
6390 break;
6392 case 0x190 ... 0x19f: /* setcc Gv */
6393 modrm = ldub_code(s->pc++);
6394 gen_setcc(s, b);
6395 gen_ldst_modrm(s, modrm, OT_BYTE, OR_TMP0, 1);
6396 break;
6397 case 0x140 ... 0x14f: /* cmov Gv, Ev */
6399 int l1;
6400 TCGv t0;
6402 ot = dflag + OT_WORD;
6403 modrm = ldub_code(s->pc++);
6404 reg = ((modrm >> 3) & 7) | rex_r;
6405 mod = (modrm >> 6) & 3;
6406 t0 = tcg_temp_local_new();
6407 if (mod != 3) {
6408 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6409 gen_op_ld_v(ot + s->mem_index, t0, cpu_A0);
6410 } else {
6411 rm = (modrm & 7) | REX_B(s);
6412 gen_op_mov_v_reg(ot, t0, rm);
6414 #ifdef TARGET_X86_64
6415 if (ot == OT_LONG) {
6416 /* XXX: specific Intel behaviour ? */
6417 l1 = gen_new_label();
6418 gen_jcc1(s, s->cc_op, b ^ 1, l1);
6419 tcg_gen_mov_tl(cpu_regs[reg], t0);
6420 gen_set_label(l1);
6421 tcg_gen_ext32u_tl(cpu_regs[reg], cpu_regs[reg]);
6422 } else
6423 #endif
6425 l1 = gen_new_label();
6426 gen_jcc1(s, s->cc_op, b ^ 1, l1);
6427 gen_op_mov_reg_v(ot, reg, t0);
6428 gen_set_label(l1);
6430 tcg_temp_free(t0);
6432 break;
6434 /************************/
6435 /* flags */
6436 case 0x9c: /* pushf */
6437 gen_svm_check_intercept(s, pc_start, SVM_EXIT_PUSHF);
6438 if (s->vm86 && s->iopl != 3) {
6439 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6440 } else {
6441 if (s->cc_op != CC_OP_DYNAMIC)
6442 gen_op_set_cc_op(s->cc_op);
6443 gen_helper_read_eflags(cpu_T[0]);
6444 gen_push_T0(s);
6446 break;
6447 case 0x9d: /* popf */
6448 gen_svm_check_intercept(s, pc_start, SVM_EXIT_POPF);
6449 if (s->vm86 && s->iopl != 3) {
6450 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6451 } else {
6452 gen_pop_T0(s);
6453 if (s->cpl == 0) {
6454 if (s->dflag) {
6455 gen_helper_write_eflags(cpu_T[0],
6456 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK | IOPL_MASK)));
6457 } else {
6458 gen_helper_write_eflags(cpu_T[0],
6459 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK | IOPL_MASK) & 0xffff));
6461 } else {
6462 if (s->cpl <= s->iopl) {
6463 if (s->dflag) {
6464 gen_helper_write_eflags(cpu_T[0],
6465 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK)));
6466 } else {
6467 gen_helper_write_eflags(cpu_T[0],
6468 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK) & 0xffff));
6470 } else {
6471 if (s->dflag) {
6472 gen_helper_write_eflags(cpu_T[0],
6473 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK)));
6474 } else {
6475 gen_helper_write_eflags(cpu_T[0],
6476 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff));
6480 gen_pop_update(s);
6481 s->cc_op = CC_OP_EFLAGS;
6482 /* abort translation because TF flag may change */
6483 gen_jmp_im(s->pc - s->cs_base);
6484 gen_eob(s);
6486 break;
6487 case 0x9e: /* sahf */
6488 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
6489 goto illegal_op;
6490 gen_op_mov_TN_reg(OT_BYTE, 0, R_AH);
6491 if (s->cc_op != CC_OP_DYNAMIC)
6492 gen_op_set_cc_op(s->cc_op);
6493 gen_compute_eflags(cpu_cc_src);
6494 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O);
6495 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], CC_S | CC_Z | CC_A | CC_P | CC_C);
6496 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T[0]);
6497 s->cc_op = CC_OP_EFLAGS;
6498 break;
6499 case 0x9f: /* lahf */
6500 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
6501 goto illegal_op;
6502 if (s->cc_op != CC_OP_DYNAMIC)
6503 gen_op_set_cc_op(s->cc_op);
6504 gen_compute_eflags(cpu_T[0]);
6505 /* Note: gen_compute_eflags() only gives the condition codes */
6506 tcg_gen_ori_tl(cpu_T[0], cpu_T[0], 0x02);
6507 gen_op_mov_reg_T0(OT_BYTE, R_AH);
6508 break;
6509 case 0xf5: /* cmc */
6510 if (s->cc_op != CC_OP_DYNAMIC)
6511 gen_op_set_cc_op(s->cc_op);
6512 gen_compute_eflags(cpu_cc_src);
6513 tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C);
6514 s->cc_op = CC_OP_EFLAGS;
6515 break;
6516 case 0xf8: /* clc */
6517 if (s->cc_op != CC_OP_DYNAMIC)
6518 gen_op_set_cc_op(s->cc_op);
6519 gen_compute_eflags(cpu_cc_src);
6520 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C);
6521 s->cc_op = CC_OP_EFLAGS;
6522 break;
6523 case 0xf9: /* stc */
6524 if (s->cc_op != CC_OP_DYNAMIC)
6525 gen_op_set_cc_op(s->cc_op);
6526 gen_compute_eflags(cpu_cc_src);
6527 tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C);
6528 s->cc_op = CC_OP_EFLAGS;
6529 break;
6530 case 0xfc: /* cld */
6531 tcg_gen_movi_i32(cpu_tmp2_i32, 1);
6532 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
6533 break;
6534 case 0xfd: /* std */
6535 tcg_gen_movi_i32(cpu_tmp2_i32, -1);
6536 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
6537 break;
6539 /************************/
6540 /* bit operations */
6541 case 0x1ba: /* bt/bts/btr/btc Gv, im */
6542 ot = dflag + OT_WORD;
6543 modrm = ldub_code(s->pc++);
6544 op = (modrm >> 3) & 7;
6545 mod = (modrm >> 6) & 3;
6546 rm = (modrm & 7) | REX_B(s);
6547 if (mod != 3) {
6548 s->rip_offset = 1;
6549 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6550 gen_op_ld_T0_A0(ot + s->mem_index);
6551 } else {
6552 gen_op_mov_TN_reg(ot, 0, rm);
6554 /* load shift */
6555 val = ldub_code(s->pc++);
6556 gen_op_movl_T1_im(val);
6557 if (op < 4)
6558 goto illegal_op;
6559 op -= 4;
6560 goto bt_op;
6561 case 0x1a3: /* bt Gv, Ev */
6562 op = 0;
6563 goto do_btx;
6564 case 0x1ab: /* bts */
6565 op = 1;
6566 goto do_btx;
6567 case 0x1b3: /* btr */
6568 op = 2;
6569 goto do_btx;
6570 case 0x1bb: /* btc */
6571 op = 3;
6572 do_btx:
6573 ot = dflag + OT_WORD;
6574 modrm = ldub_code(s->pc++);
6575 reg = ((modrm >> 3) & 7) | rex_r;
6576 mod = (modrm >> 6) & 3;
6577 rm = (modrm & 7) | REX_B(s);
6578 gen_op_mov_TN_reg(OT_LONG, 1, reg);
6579 if (mod != 3) {
6580 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6581 /* specific case: we need to add a displacement */
6582 gen_exts(ot, cpu_T[1]);
6583 tcg_gen_sari_tl(cpu_tmp0, cpu_T[1], 3 + ot);
6584 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, ot);
6585 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
6586 gen_op_ld_T0_A0(ot + s->mem_index);
6587 } else {
6588 gen_op_mov_TN_reg(ot, 0, rm);
6590 bt_op:
6591 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], (1 << (3 + ot)) - 1);
6592 switch(op) {
6593 case 0:
6594 tcg_gen_shr_tl(cpu_cc_src, cpu_T[0], cpu_T[1]);
6595 tcg_gen_movi_tl(cpu_cc_dst, 0);
6596 break;
6597 case 1:
6598 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6599 tcg_gen_movi_tl(cpu_tmp0, 1);
6600 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6601 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6602 break;
6603 case 2:
6604 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6605 tcg_gen_movi_tl(cpu_tmp0, 1);
6606 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6607 tcg_gen_not_tl(cpu_tmp0, cpu_tmp0);
6608 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6609 break;
6610 default:
6611 case 3:
6612 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6613 tcg_gen_movi_tl(cpu_tmp0, 1);
6614 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6615 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6616 break;
6618 s->cc_op = CC_OP_SARB + ot;
6619 if (op != 0) {
6620 if (mod != 3)
6621 gen_op_st_T0_A0(ot + s->mem_index);
6622 else
6623 gen_op_mov_reg_T0(ot, rm);
6624 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
6625 tcg_gen_movi_tl(cpu_cc_dst, 0);
6627 break;
6628 case 0x1bc: /* bsf */
6629 case 0x1bd: /* bsr */
6631 int label1;
6632 TCGv t0;
6634 ot = dflag + OT_WORD;
6635 modrm = ldub_code(s->pc++);
6636 reg = ((modrm >> 3) & 7) | rex_r;
6637 gen_ldst_modrm(s,modrm, ot, OR_TMP0, 0);
6638 gen_extu(ot, cpu_T[0]);
6639 t0 = tcg_temp_local_new();
6640 tcg_gen_mov_tl(t0, cpu_T[0]);
6641 if ((b & 1) && (prefixes & PREFIX_REPZ) &&
6642 (s->cpuid_ext3_features & CPUID_EXT3_ABM)) {
6643 switch(ot) {
6644 case OT_WORD: gen_helper_lzcnt(cpu_T[0], t0,
6645 tcg_const_i32(16)); break;
6646 case OT_LONG: gen_helper_lzcnt(cpu_T[0], t0,
6647 tcg_const_i32(32)); break;
6648 case OT_QUAD: gen_helper_lzcnt(cpu_T[0], t0,
6649 tcg_const_i32(64)); break;
6651 gen_op_mov_reg_T0(ot, reg);
6652 } else {
6653 label1 = gen_new_label();
6654 tcg_gen_movi_tl(cpu_cc_dst, 0);
6655 tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, label1);
6656 if (b & 1) {
6657 gen_helper_bsr(cpu_T[0], t0);
6658 } else {
6659 gen_helper_bsf(cpu_T[0], t0);
6661 gen_op_mov_reg_T0(ot, reg);
6662 tcg_gen_movi_tl(cpu_cc_dst, 1);
6663 gen_set_label(label1);
6664 tcg_gen_discard_tl(cpu_cc_src);
6665 s->cc_op = CC_OP_LOGICB + ot;
6667 tcg_temp_free(t0);
6669 break;
6670 /************************/
6671 /* bcd */
6672 case 0x27: /* daa */
6673 if (CODE64(s))
6674 goto illegal_op;
6675 if (s->cc_op != CC_OP_DYNAMIC)
6676 gen_op_set_cc_op(s->cc_op);
6677 gen_helper_daa();
6678 s->cc_op = CC_OP_EFLAGS;
6679 break;
6680 case 0x2f: /* das */
6681 if (CODE64(s))
6682 goto illegal_op;
6683 if (s->cc_op != CC_OP_DYNAMIC)
6684 gen_op_set_cc_op(s->cc_op);
6685 gen_helper_das();
6686 s->cc_op = CC_OP_EFLAGS;
6687 break;
6688 case 0x37: /* aaa */
6689 if (CODE64(s))
6690 goto illegal_op;
6691 if (s->cc_op != CC_OP_DYNAMIC)
6692 gen_op_set_cc_op(s->cc_op);
6693 gen_helper_aaa();
6694 s->cc_op = CC_OP_EFLAGS;
6695 break;
6696 case 0x3f: /* aas */
6697 if (CODE64(s))
6698 goto illegal_op;
6699 if (s->cc_op != CC_OP_DYNAMIC)
6700 gen_op_set_cc_op(s->cc_op);
6701 gen_helper_aas();
6702 s->cc_op = CC_OP_EFLAGS;
6703 break;
6704 case 0xd4: /* aam */
6705 if (CODE64(s))
6706 goto illegal_op;
6707 val = ldub_code(s->pc++);
6708 if (val == 0) {
6709 gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base);
6710 } else {
6711 gen_helper_aam(tcg_const_i32(val));
6712 s->cc_op = CC_OP_LOGICB;
6714 break;
6715 case 0xd5: /* aad */
6716 if (CODE64(s))
6717 goto illegal_op;
6718 val = ldub_code(s->pc++);
6719 gen_helper_aad(tcg_const_i32(val));
6720 s->cc_op = CC_OP_LOGICB;
6721 break;
6722 /************************/
6723 /* misc */
6724 case 0x90: /* nop */
6725 /* XXX: correct lock test for all insn */
6726 if (prefixes & PREFIX_LOCK) {
6727 goto illegal_op;
6729 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
6730 if (REX_B(s)) {
6731 goto do_xchg_reg_eax;
6733 if (prefixes & PREFIX_REPZ) {
6734 gen_svm_check_intercept(s, pc_start, SVM_EXIT_PAUSE);
6736 break;
6737 case 0x9b: /* fwait */
6738 if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) ==
6739 (HF_MP_MASK | HF_TS_MASK)) {
6740 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
6741 } else {
6742 if (s->cc_op != CC_OP_DYNAMIC)
6743 gen_op_set_cc_op(s->cc_op);
6744 gen_jmp_im(pc_start - s->cs_base);
6745 gen_helper_fwait();
6747 break;
6748 case 0xcc: /* int3 */
6749 gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base);
6750 break;
6751 case 0xcd: /* int N */
6752 val = ldub_code(s->pc++);
6753 if (s->vm86 && s->iopl != 3) {
6754 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6755 } else {
6756 gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base);
6758 break;
6759 case 0xce: /* into */
6760 if (CODE64(s))
6761 goto illegal_op;
6762 if (s->cc_op != CC_OP_DYNAMIC)
6763 gen_op_set_cc_op(s->cc_op);
6764 gen_jmp_im(pc_start - s->cs_base);
6765 gen_helper_into(tcg_const_i32(s->pc - pc_start));
6766 break;
6767 #ifdef WANT_ICEBP
6768 case 0xf1: /* icebp (undocumented, exits to external debugger) */
6769 gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP);
6770 #if 1
6771 gen_debug(s, pc_start - s->cs_base);
6772 #else
6773 /* start debug */
6774 tb_flush(cpu_single_env);
6775 cpu_set_log(CPU_LOG_INT | CPU_LOG_TB_IN_ASM);
6776 #endif
6777 break;
6778 #endif
6779 case 0xfa: /* cli */
6780 if (!s->vm86) {
6781 if (s->cpl <= s->iopl) {
6782 gen_helper_cli();
6783 } else {
6784 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6786 } else {
6787 if (s->iopl == 3) {
6788 gen_helper_cli();
6789 } else {
6790 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6793 break;
6794 case 0xfb: /* sti */
6795 if (!s->vm86) {
6796 if (s->cpl <= s->iopl) {
6797 gen_sti:
6798 gen_helper_sti();
6799 /* interruptions are enabled only the first insn after sti */
6800 /* If several instructions disable interrupts, only the
6801 _first_ does it */
6802 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
6803 gen_helper_set_inhibit_irq();
6804 /* give a chance to handle pending irqs */
6805 gen_jmp_im(s->pc - s->cs_base);
6806 gen_eob(s);
6807 } else {
6808 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6810 } else {
6811 if (s->iopl == 3) {
6812 goto gen_sti;
6813 } else {
6814 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6817 break;
6818 case 0x62: /* bound */
6819 if (CODE64(s))
6820 goto illegal_op;
6821 ot = dflag ? OT_LONG : OT_WORD;
6822 modrm = ldub_code(s->pc++);
6823 reg = (modrm >> 3) & 7;
6824 mod = (modrm >> 6) & 3;
6825 if (mod == 3)
6826 goto illegal_op;
6827 gen_op_mov_TN_reg(ot, 0, reg);
6828 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6829 gen_jmp_im(pc_start - s->cs_base);
6830 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6831 if (ot == OT_WORD)
6832 gen_helper_boundw(cpu_A0, cpu_tmp2_i32);
6833 else
6834 gen_helper_boundl(cpu_A0, cpu_tmp2_i32);
6835 break;
6836 case 0x1c8 ... 0x1cf: /* bswap reg */
6837 reg = (b & 7) | REX_B(s);
6838 #ifdef TARGET_X86_64
6839 if (dflag == 2) {
6840 gen_op_mov_TN_reg(OT_QUAD, 0, reg);
6841 tcg_gen_bswap64_i64(cpu_T[0], cpu_T[0]);
6842 gen_op_mov_reg_T0(OT_QUAD, reg);
6843 } else
6844 #endif
6846 gen_op_mov_TN_reg(OT_LONG, 0, reg);
6847 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
6848 tcg_gen_bswap32_tl(cpu_T[0], cpu_T[0]);
6849 gen_op_mov_reg_T0(OT_LONG, reg);
6851 break;
6852 case 0xd6: /* salc */
6853 if (CODE64(s))
6854 goto illegal_op;
6855 if (s->cc_op != CC_OP_DYNAMIC)
6856 gen_op_set_cc_op(s->cc_op);
6857 gen_compute_eflags_c(cpu_T[0]);
6858 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
6859 gen_op_mov_reg_T0(OT_BYTE, R_EAX);
6860 break;
6861 case 0xe0: /* loopnz */
6862 case 0xe1: /* loopz */
6863 case 0xe2: /* loop */
6864 case 0xe3: /* jecxz */
6866 int l1, l2, l3;
6868 tval = (int8_t)insn_get(s, OT_BYTE);
6869 next_eip = s->pc - s->cs_base;
6870 tval += next_eip;
6871 if (s->dflag == 0)
6872 tval &= 0xffff;
6874 l1 = gen_new_label();
6875 l2 = gen_new_label();
6876 l3 = gen_new_label();
6877 b &= 3;
6878 switch(b) {
6879 case 0: /* loopnz */
6880 case 1: /* loopz */
6881 if (s->cc_op != CC_OP_DYNAMIC)
6882 gen_op_set_cc_op(s->cc_op);
6883 gen_op_add_reg_im(s->aflag, R_ECX, -1);
6884 gen_op_jz_ecx(s->aflag, l3);
6885 gen_compute_eflags(cpu_tmp0);
6886 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, CC_Z);
6887 if (b == 0) {
6888 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, l1);
6889 } else {
6890 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, l1);
6892 break;
6893 case 2: /* loop */
6894 gen_op_add_reg_im(s->aflag, R_ECX, -1);
6895 gen_op_jnz_ecx(s->aflag, l1);
6896 break;
6897 default:
6898 case 3: /* jcxz */
6899 gen_op_jz_ecx(s->aflag, l1);
6900 break;
6903 gen_set_label(l3);
6904 gen_jmp_im(next_eip);
6905 tcg_gen_br(l2);
6907 gen_set_label(l1);
6908 gen_jmp_im(tval);
6909 gen_set_label(l2);
6910 gen_eob(s);
6912 break;
6913 case 0x130: /* wrmsr */
6914 case 0x132: /* rdmsr */
6915 if (s->cpl != 0) {
6916 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6917 } else {
6918 if (s->cc_op != CC_OP_DYNAMIC)
6919 gen_op_set_cc_op(s->cc_op);
6920 gen_jmp_im(pc_start - s->cs_base);
6921 if (b & 2) {
6922 gen_helper_rdmsr();
6923 } else {
6924 gen_helper_wrmsr();
6927 break;
6928 case 0x131: /* rdtsc */
6929 if (s->cc_op != CC_OP_DYNAMIC)
6930 gen_op_set_cc_op(s->cc_op);
6931 gen_jmp_im(pc_start - s->cs_base);
6932 if (use_icount)
6933 gen_io_start();
6934 gen_helper_rdtsc();
6935 if (use_icount) {
6936 gen_io_end();
6937 gen_jmp(s, s->pc - s->cs_base);
6939 break;
6940 case 0x133: /* rdpmc */
6941 if (s->cc_op != CC_OP_DYNAMIC)
6942 gen_op_set_cc_op(s->cc_op);
6943 gen_jmp_im(pc_start - s->cs_base);
6944 gen_helper_rdpmc();
6945 break;
6946 case 0x134: /* sysenter */
6947 /* For Intel SYSENTER is valid on 64-bit */
6948 if (CODE64(s) && cpu_single_env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
6949 goto illegal_op;
6950 if (!s->pe) {
6951 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6952 } else {
6953 gen_update_cc_op(s);
6954 gen_jmp_im(pc_start - s->cs_base);
6955 gen_helper_sysenter();
6956 gen_eob(s);
6958 break;
6959 case 0x135: /* sysexit */
6960 /* For Intel SYSEXIT is valid on 64-bit */
6961 if (CODE64(s) && cpu_single_env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
6962 goto illegal_op;
6963 if (!s->pe) {
6964 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6965 } else {
6966 gen_update_cc_op(s);
6967 gen_jmp_im(pc_start - s->cs_base);
6968 gen_helper_sysexit(tcg_const_i32(dflag));
6969 gen_eob(s);
6971 break;
6972 #ifdef TARGET_X86_64
6973 case 0x105: /* syscall */
6974 /* XXX: is it usable in real mode ? */
6975 gen_update_cc_op(s);
6976 gen_jmp_im(pc_start - s->cs_base);
6977 gen_helper_syscall(tcg_const_i32(s->pc - pc_start));
6978 gen_eob(s);
6979 break;
6980 case 0x107: /* sysret */
6981 if (!s->pe) {
6982 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6983 } else {
6984 gen_update_cc_op(s);
6985 gen_jmp_im(pc_start - s->cs_base);
6986 gen_helper_sysret(tcg_const_i32(s->dflag));
6987 /* condition codes are modified only in long mode */
6988 if (s->lma)
6989 s->cc_op = CC_OP_EFLAGS;
6990 gen_eob(s);
6992 break;
6993 #endif
6994 case 0x1a2: /* cpuid */
6995 if (s->cc_op != CC_OP_DYNAMIC)
6996 gen_op_set_cc_op(s->cc_op);
6997 gen_jmp_im(pc_start - s->cs_base);
6998 gen_helper_cpuid();
6999 break;
7000 case 0xf4: /* hlt */
7001 if (s->cpl != 0) {
7002 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7003 } else {
7004 if (s->cc_op != CC_OP_DYNAMIC)
7005 gen_op_set_cc_op(s->cc_op);
7006 gen_jmp_im(pc_start - s->cs_base);
7007 gen_helper_hlt(tcg_const_i32(s->pc - pc_start));
7008 s->is_jmp = DISAS_TB_JUMP;
7010 break;
7011 case 0x100:
7012 modrm = ldub_code(s->pc++);
7013 mod = (modrm >> 6) & 3;
7014 op = (modrm >> 3) & 7;
7015 switch(op) {
7016 case 0: /* sldt */
7017 if (!s->pe || s->vm86)
7018 goto illegal_op;
7019 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ);
7020 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,ldt.selector));
7021 ot = OT_WORD;
7022 if (mod == 3)
7023 ot += s->dflag;
7024 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1);
7025 break;
7026 case 2: /* lldt */
7027 if (!s->pe || s->vm86)
7028 goto illegal_op;
7029 if (s->cpl != 0) {
7030 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7031 } else {
7032 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE);
7033 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
7034 gen_jmp_im(pc_start - s->cs_base);
7035 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
7036 gen_helper_lldt(cpu_tmp2_i32);
7038 break;
7039 case 1: /* str */
7040 if (!s->pe || s->vm86)
7041 goto illegal_op;
7042 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ);
7043 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,tr.selector));
7044 ot = OT_WORD;
7045 if (mod == 3)
7046 ot += s->dflag;
7047 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1);
7048 break;
7049 case 3: /* ltr */
7050 if (!s->pe || s->vm86)
7051 goto illegal_op;
7052 if (s->cpl != 0) {
7053 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7054 } else {
7055 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE);
7056 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
7057 gen_jmp_im(pc_start - s->cs_base);
7058 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
7059 gen_helper_ltr(cpu_tmp2_i32);
7061 break;
7062 case 4: /* verr */
7063 case 5: /* verw */
7064 if (!s->pe || s->vm86)
7065 goto illegal_op;
7066 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
7067 if (s->cc_op != CC_OP_DYNAMIC)
7068 gen_op_set_cc_op(s->cc_op);
7069 if (op == 4)
7070 gen_helper_verr(cpu_T[0]);
7071 else
7072 gen_helper_verw(cpu_T[0]);
7073 s->cc_op = CC_OP_EFLAGS;
7074 break;
7075 default:
7076 goto illegal_op;
7078 break;
7079 case 0x101:
7080 modrm = ldub_code(s->pc++);
7081 mod = (modrm >> 6) & 3;
7082 op = (modrm >> 3) & 7;
7083 rm = modrm & 7;
7084 switch(op) {
7085 case 0: /* sgdt */
7086 if (mod == 3)
7087 goto illegal_op;
7088 gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ);
7089 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7090 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.limit));
7091 gen_op_st_T0_A0(OT_WORD + s->mem_index);
7092 gen_add_A0_im(s, 2);
7093 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.base));
7094 if (!s->dflag)
7095 gen_op_andl_T0_im(0xffffff);
7096 gen_op_st_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
7097 break;
7098 case 1:
7099 if (mod == 3) {
7100 switch (rm) {
7101 case 0: /* monitor */
7102 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
7103 s->cpl != 0)
7104 goto illegal_op;
7105 if (s->cc_op != CC_OP_DYNAMIC)
7106 gen_op_set_cc_op(s->cc_op);
7107 gen_jmp_im(pc_start - s->cs_base);
7108 #ifdef TARGET_X86_64
7109 if (s->aflag == 2) {
7110 gen_op_movq_A0_reg(R_EAX);
7111 } else
7112 #endif
7114 gen_op_movl_A0_reg(R_EAX);
7115 if (s->aflag == 0)
7116 gen_op_andl_A0_ffff();
7118 gen_add_A0_ds_seg(s);
7119 gen_helper_monitor(cpu_A0);
7120 break;
7121 case 1: /* mwait */
7122 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
7123 s->cpl != 0)
7124 goto illegal_op;
7125 gen_update_cc_op(s);
7126 gen_jmp_im(pc_start - s->cs_base);
7127 gen_helper_mwait(tcg_const_i32(s->pc - pc_start));
7128 gen_eob(s);
7129 break;
7130 default:
7131 goto illegal_op;
7133 } else { /* sidt */
7134 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ);
7135 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7136 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.limit));
7137 gen_op_st_T0_A0(OT_WORD + s->mem_index);
7138 gen_add_A0_im(s, 2);
7139 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.base));
7140 if (!s->dflag)
7141 gen_op_andl_T0_im(0xffffff);
7142 gen_op_st_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
7144 break;
7145 case 2: /* lgdt */
7146 case 3: /* lidt */
7147 if (mod == 3) {
7148 if (s->cc_op != CC_OP_DYNAMIC)
7149 gen_op_set_cc_op(s->cc_op);
7150 gen_jmp_im(pc_start - s->cs_base);
7151 switch(rm) {
7152 case 0: /* VMRUN */
7153 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7154 goto illegal_op;
7155 if (s->cpl != 0) {
7156 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7157 break;
7158 } else {
7159 gen_helper_vmrun(tcg_const_i32(s->aflag),
7160 tcg_const_i32(s->pc - pc_start));
7161 tcg_gen_exit_tb(0);
7162 s->is_jmp = DISAS_TB_JUMP;
7164 break;
7165 case 1: /* VMMCALL */
7166 if (!(s->flags & HF_SVME_MASK))
7167 goto illegal_op;
7168 gen_helper_vmmcall();
7169 break;
7170 case 2: /* VMLOAD */
7171 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7172 goto illegal_op;
7173 if (s->cpl != 0) {
7174 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7175 break;
7176 } else {
7177 gen_helper_vmload(tcg_const_i32(s->aflag));
7179 break;
7180 case 3: /* VMSAVE */
7181 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7182 goto illegal_op;
7183 if (s->cpl != 0) {
7184 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7185 break;
7186 } else {
7187 gen_helper_vmsave(tcg_const_i32(s->aflag));
7189 break;
7190 case 4: /* STGI */
7191 if ((!(s->flags & HF_SVME_MASK) &&
7192 !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
7193 !s->pe)
7194 goto illegal_op;
7195 if (s->cpl != 0) {
7196 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7197 break;
7198 } else {
7199 gen_helper_stgi();
7201 break;
7202 case 5: /* CLGI */
7203 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7204 goto illegal_op;
7205 if (s->cpl != 0) {
7206 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7207 break;
7208 } else {
7209 gen_helper_clgi();
7211 break;
7212 case 6: /* SKINIT */
7213 if ((!(s->flags & HF_SVME_MASK) &&
7214 !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
7215 !s->pe)
7216 goto illegal_op;
7217 gen_helper_skinit();
7218 break;
7219 case 7: /* INVLPGA */
7220 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7221 goto illegal_op;
7222 if (s->cpl != 0) {
7223 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7224 break;
7225 } else {
7226 gen_helper_invlpga(tcg_const_i32(s->aflag));
7228 break;
7229 default:
7230 goto illegal_op;
7232 } else if (s->cpl != 0) {
7233 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7234 } else {
7235 gen_svm_check_intercept(s, pc_start,
7236 op==2 ? SVM_EXIT_GDTR_WRITE : SVM_EXIT_IDTR_WRITE);
7237 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7238 gen_op_ld_T1_A0(OT_WORD + s->mem_index);
7239 gen_add_A0_im(s, 2);
7240 gen_op_ld_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
7241 if (!s->dflag)
7242 gen_op_andl_T0_im(0xffffff);
7243 if (op == 2) {
7244 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,gdt.base));
7245 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,gdt.limit));
7246 } else {
7247 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,idt.base));
7248 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,idt.limit));
7251 break;
7252 case 4: /* smsw */
7253 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0);
7254 #if defined TARGET_X86_64 && defined HOST_WORDS_BIGENDIAN
7255 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]) + 4);
7256 #else
7257 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]));
7258 #endif
7259 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 1);
7260 break;
7261 case 6: /* lmsw */
7262 if (s->cpl != 0) {
7263 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7264 } else {
7265 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
7266 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
7267 gen_helper_lmsw(cpu_T[0]);
7268 gen_jmp_im(s->pc - s->cs_base);
7269 gen_eob(s);
7271 break;
7272 case 7:
7273 if (mod != 3) { /* invlpg */
7274 if (s->cpl != 0) {
7275 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7276 } else {
7277 if (s->cc_op != CC_OP_DYNAMIC)
7278 gen_op_set_cc_op(s->cc_op);
7279 gen_jmp_im(pc_start - s->cs_base);
7280 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7281 gen_helper_invlpg(cpu_A0);
7282 gen_jmp_im(s->pc - s->cs_base);
7283 gen_eob(s);
7285 } else {
7286 switch (rm) {
7287 case 0: /* swapgs */
7288 #ifdef TARGET_X86_64
7289 if (CODE64(s)) {
7290 if (s->cpl != 0) {
7291 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7292 } else {
7293 tcg_gen_ld_tl(cpu_T[0], cpu_env,
7294 offsetof(CPUX86State,segs[R_GS].base));
7295 tcg_gen_ld_tl(cpu_T[1], cpu_env,
7296 offsetof(CPUX86State,kernelgsbase));
7297 tcg_gen_st_tl(cpu_T[1], cpu_env,
7298 offsetof(CPUX86State,segs[R_GS].base));
7299 tcg_gen_st_tl(cpu_T[0], cpu_env,
7300 offsetof(CPUX86State,kernelgsbase));
7302 } else
7303 #endif
7305 goto illegal_op;
7307 break;
7308 case 1: /* rdtscp */
7309 if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP))
7310 goto illegal_op;
7311 if (s->cc_op != CC_OP_DYNAMIC)
7312 gen_op_set_cc_op(s->cc_op);
7313 gen_jmp_im(pc_start - s->cs_base);
7314 if (use_icount)
7315 gen_io_start();
7316 gen_helper_rdtscp();
7317 if (use_icount) {
7318 gen_io_end();
7319 gen_jmp(s, s->pc - s->cs_base);
7321 break;
7322 default:
7323 goto illegal_op;
7326 break;
7327 default:
7328 goto illegal_op;
7330 break;
7331 case 0x108: /* invd */
7332 case 0x109: /* wbinvd */
7333 if (s->cpl != 0) {
7334 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7335 } else {
7336 gen_svm_check_intercept(s, pc_start, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD);
7337 /* nothing to do */
7339 break;
7340 case 0x63: /* arpl or movslS (x86_64) */
7341 #ifdef TARGET_X86_64
7342 if (CODE64(s)) {
7343 int d_ot;
7344 /* d_ot is the size of destination */
7345 d_ot = dflag + OT_WORD;
7347 modrm = ldub_code(s->pc++);
7348 reg = ((modrm >> 3) & 7) | rex_r;
7349 mod = (modrm >> 6) & 3;
7350 rm = (modrm & 7) | REX_B(s);
7352 if (mod == 3) {
7353 gen_op_mov_TN_reg(OT_LONG, 0, rm);
7354 /* sign extend */
7355 if (d_ot == OT_QUAD)
7356 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
7357 gen_op_mov_reg_T0(d_ot, reg);
7358 } else {
7359 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7360 if (d_ot == OT_QUAD) {
7361 gen_op_lds_T0_A0(OT_LONG + s->mem_index);
7362 } else {
7363 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
7365 gen_op_mov_reg_T0(d_ot, reg);
7367 } else
7368 #endif
7370 int label1;
7371 TCGv t0, t1, t2, a0;
7373 if (!s->pe || s->vm86)
7374 goto illegal_op;
7375 t0 = tcg_temp_local_new();
7376 t1 = tcg_temp_local_new();
7377 t2 = tcg_temp_local_new();
7378 ot = OT_WORD;
7379 modrm = ldub_code(s->pc++);
7380 reg = (modrm >> 3) & 7;
7381 mod = (modrm >> 6) & 3;
7382 rm = modrm & 7;
7383 if (mod != 3) {
7384 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7385 gen_op_ld_v(ot + s->mem_index, t0, cpu_A0);
7386 a0 = tcg_temp_local_new();
7387 tcg_gen_mov_tl(a0, cpu_A0);
7388 } else {
7389 gen_op_mov_v_reg(ot, t0, rm);
7390 TCGV_UNUSED(a0);
7392 gen_op_mov_v_reg(ot, t1, reg);
7393 tcg_gen_andi_tl(cpu_tmp0, t0, 3);
7394 tcg_gen_andi_tl(t1, t1, 3);
7395 tcg_gen_movi_tl(t2, 0);
7396 label1 = gen_new_label();
7397 tcg_gen_brcond_tl(TCG_COND_GE, cpu_tmp0, t1, label1);
7398 tcg_gen_andi_tl(t0, t0, ~3);
7399 tcg_gen_or_tl(t0, t0, t1);
7400 tcg_gen_movi_tl(t2, CC_Z);
7401 gen_set_label(label1);
7402 if (mod != 3) {
7403 gen_op_st_v(ot + s->mem_index, t0, a0);
7404 tcg_temp_free(a0);
7405 } else {
7406 gen_op_mov_reg_v(ot, rm, t0);
7408 if (s->cc_op != CC_OP_DYNAMIC)
7409 gen_op_set_cc_op(s->cc_op);
7410 gen_compute_eflags(cpu_cc_src);
7411 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z);
7412 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2);
7413 s->cc_op = CC_OP_EFLAGS;
7414 tcg_temp_free(t0);
7415 tcg_temp_free(t1);
7416 tcg_temp_free(t2);
7418 break;
7419 case 0x102: /* lar */
7420 case 0x103: /* lsl */
7422 int label1;
7423 TCGv t0;
7424 if (!s->pe || s->vm86)
7425 goto illegal_op;
7426 ot = dflag ? OT_LONG : OT_WORD;
7427 modrm = ldub_code(s->pc++);
7428 reg = ((modrm >> 3) & 7) | rex_r;
7429 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
7430 t0 = tcg_temp_local_new();
7431 if (s->cc_op != CC_OP_DYNAMIC)
7432 gen_op_set_cc_op(s->cc_op);
7433 if (b == 0x102)
7434 gen_helper_lar(t0, cpu_T[0]);
7435 else
7436 gen_helper_lsl(t0, cpu_T[0]);
7437 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_src, CC_Z);
7438 label1 = gen_new_label();
7439 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
7440 gen_op_mov_reg_v(ot, reg, t0);
7441 gen_set_label(label1);
7442 s->cc_op = CC_OP_EFLAGS;
7443 tcg_temp_free(t0);
7445 break;
7446 case 0x118:
7447 modrm = ldub_code(s->pc++);
7448 mod = (modrm >> 6) & 3;
7449 op = (modrm >> 3) & 7;
7450 switch(op) {
7451 case 0: /* prefetchnta */
7452 case 1: /* prefetchnt0 */
7453 case 2: /* prefetchnt0 */
7454 case 3: /* prefetchnt0 */
7455 if (mod == 3)
7456 goto illegal_op;
7457 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7458 /* nothing more to do */
7459 break;
7460 default: /* nop (multi byte) */
7461 gen_nop_modrm(s, modrm);
7462 break;
7464 break;
7465 case 0x119 ... 0x11f: /* nop (multi byte) */
7466 modrm = ldub_code(s->pc++);
7467 gen_nop_modrm(s, modrm);
7468 break;
7469 case 0x120: /* mov reg, crN */
7470 case 0x122: /* mov crN, reg */
7471 if (s->cpl != 0) {
7472 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7473 } else {
7474 modrm = ldub_code(s->pc++);
7475 if ((modrm & 0xc0) != 0xc0)
7476 goto illegal_op;
7477 rm = (modrm & 7) | REX_B(s);
7478 reg = ((modrm >> 3) & 7) | rex_r;
7479 if (CODE64(s))
7480 ot = OT_QUAD;
7481 else
7482 ot = OT_LONG;
7483 if ((prefixes & PREFIX_LOCK) && (reg == 0) &&
7484 (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) {
7485 reg = 8;
7487 switch(reg) {
7488 case 0:
7489 case 2:
7490 case 3:
7491 case 4:
7492 case 8:
7493 if (s->cc_op != CC_OP_DYNAMIC)
7494 gen_op_set_cc_op(s->cc_op);
7495 gen_jmp_im(pc_start - s->cs_base);
7496 if (b & 2) {
7497 gen_op_mov_TN_reg(ot, 0, rm);
7498 gen_helper_write_crN(tcg_const_i32(reg), cpu_T[0]);
7499 gen_jmp_im(s->pc - s->cs_base);
7500 gen_eob(s);
7501 } else {
7502 gen_helper_read_crN(cpu_T[0], tcg_const_i32(reg));
7503 gen_op_mov_reg_T0(ot, rm);
7505 break;
7506 default:
7507 goto illegal_op;
7510 break;
7511 case 0x121: /* mov reg, drN */
7512 case 0x123: /* mov drN, reg */
7513 if (s->cpl != 0) {
7514 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7515 } else {
7516 modrm = ldub_code(s->pc++);
7517 if ((modrm & 0xc0) != 0xc0)
7518 goto illegal_op;
7519 rm = (modrm & 7) | REX_B(s);
7520 reg = ((modrm >> 3) & 7) | rex_r;
7521 if (CODE64(s))
7522 ot = OT_QUAD;
7523 else
7524 ot = OT_LONG;
7525 /* XXX: do it dynamically with CR4.DE bit */
7526 if (reg == 4 || reg == 5 || reg >= 8)
7527 goto illegal_op;
7528 if (b & 2) {
7529 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg);
7530 gen_op_mov_TN_reg(ot, 0, rm);
7531 gen_helper_movl_drN_T0(tcg_const_i32(reg), cpu_T[0]);
7532 gen_jmp_im(s->pc - s->cs_base);
7533 gen_eob(s);
7534 } else {
7535 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg);
7536 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,dr[reg]));
7537 gen_op_mov_reg_T0(ot, rm);
7540 break;
7541 case 0x106: /* clts */
7542 if (s->cpl != 0) {
7543 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7544 } else {
7545 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
7546 gen_helper_clts();
7547 /* abort block because static cpu state changed */
7548 gen_jmp_im(s->pc - s->cs_base);
7549 gen_eob(s);
7551 break;
7552 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
7553 case 0x1c3: /* MOVNTI reg, mem */
7554 if (!(s->cpuid_features & CPUID_SSE2))
7555 goto illegal_op;
7556 ot = s->dflag == 2 ? OT_QUAD : OT_LONG;
7557 modrm = ldub_code(s->pc++);
7558 mod = (modrm >> 6) & 3;
7559 if (mod == 3)
7560 goto illegal_op;
7561 reg = ((modrm >> 3) & 7) | rex_r;
7562 /* generate a generic store */
7563 gen_ldst_modrm(s, modrm, ot, reg, 1);
7564 break;
7565 case 0x1ae:
7566 modrm = ldub_code(s->pc++);
7567 mod = (modrm >> 6) & 3;
7568 op = (modrm >> 3) & 7;
7569 switch(op) {
7570 case 0: /* fxsave */
7571 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
7572 (s->prefix & PREFIX_LOCK))
7573 goto illegal_op;
7574 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
7575 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7576 break;
7578 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7579 if (s->cc_op != CC_OP_DYNAMIC)
7580 gen_op_set_cc_op(s->cc_op);
7581 gen_jmp_im(pc_start - s->cs_base);
7582 gen_helper_fxsave(cpu_A0, tcg_const_i32((s->dflag == 2)));
7583 break;
7584 case 1: /* fxrstor */
7585 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
7586 (s->prefix & PREFIX_LOCK))
7587 goto illegal_op;
7588 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
7589 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7590 break;
7592 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7593 if (s->cc_op != CC_OP_DYNAMIC)
7594 gen_op_set_cc_op(s->cc_op);
7595 gen_jmp_im(pc_start - s->cs_base);
7596 gen_helper_fxrstor(cpu_A0, tcg_const_i32((s->dflag == 2)));
7597 break;
7598 case 2: /* ldmxcsr */
7599 case 3: /* stmxcsr */
7600 if (s->flags & HF_TS_MASK) {
7601 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7602 break;
7604 if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK) ||
7605 mod == 3)
7606 goto illegal_op;
7607 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7608 if (op == 2) {
7609 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
7610 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
7611 gen_helper_ldmxcsr(cpu_tmp2_i32);
7612 } else {
7613 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, mxcsr));
7614 gen_op_st_T0_A0(OT_LONG + s->mem_index);
7616 break;
7617 case 5: /* lfence */
7618 case 6: /* mfence */
7619 if ((modrm & 0xc7) != 0xc0 || !(s->cpuid_features & CPUID_SSE2))
7620 goto illegal_op;
7621 break;
7622 case 7: /* sfence / clflush */
7623 if ((modrm & 0xc7) == 0xc0) {
7624 /* sfence */
7625 /* XXX: also check for cpuid_ext2_features & CPUID_EXT2_EMMX */
7626 if (!(s->cpuid_features & CPUID_SSE))
7627 goto illegal_op;
7628 } else {
7629 /* clflush */
7630 if (!(s->cpuid_features & CPUID_CLFLUSH))
7631 goto illegal_op;
7632 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7634 break;
7635 default:
7636 goto illegal_op;
7638 break;
7639 case 0x10d: /* 3DNow! prefetch(w) */
7640 modrm = ldub_code(s->pc++);
7641 mod = (modrm >> 6) & 3;
7642 if (mod == 3)
7643 goto illegal_op;
7644 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7645 /* ignore for now */
7646 break;
7647 case 0x1aa: /* rsm */
7648 gen_svm_check_intercept(s, pc_start, SVM_EXIT_RSM);
7649 if (!(s->flags & HF_SMM_MASK))
7650 goto illegal_op;
7651 gen_update_cc_op(s);
7652 gen_jmp_im(s->pc - s->cs_base);
7653 gen_helper_rsm();
7654 gen_eob(s);
7655 break;
7656 case 0x1b8: /* SSE4.2 popcnt */
7657 if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) !=
7658 PREFIX_REPZ)
7659 goto illegal_op;
7660 if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
7661 goto illegal_op;
7663 modrm = ldub_code(s->pc++);
7664 reg = ((modrm >> 3) & 7);
7666 if (s->prefix & PREFIX_DATA)
7667 ot = OT_WORD;
7668 else if (s->dflag != 2)
7669 ot = OT_LONG;
7670 else
7671 ot = OT_QUAD;
7673 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
7674 gen_helper_popcnt(cpu_T[0], cpu_T[0], tcg_const_i32(ot));
7675 gen_op_mov_reg_T0(ot, reg);
7677 s->cc_op = CC_OP_EFLAGS;
7678 break;
7679 case 0x10e ... 0x10f:
7680 /* 3DNow! instructions, ignore prefixes */
7681 s->prefix &= ~(PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA);
7682 case 0x110 ... 0x117:
7683 case 0x128 ... 0x12f:
7684 case 0x138 ... 0x13a:
7685 case 0x150 ... 0x179:
7686 case 0x17c ... 0x17f:
7687 case 0x1c2:
7688 case 0x1c4 ... 0x1c6:
7689 case 0x1d0 ... 0x1fe:
7690 gen_sse(s, b, pc_start, rex_r);
7691 break;
7692 default:
7693 goto illegal_op;
7695 /* lock generation */
7696 if (s->prefix & PREFIX_LOCK)
7697 gen_helper_unlock();
7698 return s->pc;
7699 illegal_op:
7700 if (s->prefix & PREFIX_LOCK)
7701 gen_helper_unlock();
7702 /* XXX: ensure that no lock was generated */
7703 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
7704 return s->pc;
7707 void optimize_flags_init(void)
7709 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
7710 cpu_cc_op = tcg_global_mem_new_i32(TCG_AREG0,
7711 offsetof(CPUX86State, cc_op), "cc_op");
7712 cpu_cc_src = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_src),
7713 "cc_src");
7714 cpu_cc_dst = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_dst),
7715 "cc_dst");
7716 cpu_cc_tmp = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_tmp),
7717 "cc_tmp");
7719 #ifdef TARGET_X86_64
7720 cpu_regs[R_EAX] = tcg_global_mem_new_i64(TCG_AREG0,
7721 offsetof(CPUX86State, regs[R_EAX]), "rax");
7722 cpu_regs[R_ECX] = tcg_global_mem_new_i64(TCG_AREG0,
7723 offsetof(CPUX86State, regs[R_ECX]), "rcx");
7724 cpu_regs[R_EDX] = tcg_global_mem_new_i64(TCG_AREG0,
7725 offsetof(CPUX86State, regs[R_EDX]), "rdx");
7726 cpu_regs[R_EBX] = tcg_global_mem_new_i64(TCG_AREG0,
7727 offsetof(CPUX86State, regs[R_EBX]), "rbx");
7728 cpu_regs[R_ESP] = tcg_global_mem_new_i64(TCG_AREG0,
7729 offsetof(CPUX86State, regs[R_ESP]), "rsp");
7730 cpu_regs[R_EBP] = tcg_global_mem_new_i64(TCG_AREG0,
7731 offsetof(CPUX86State, regs[R_EBP]), "rbp");
7732 cpu_regs[R_ESI] = tcg_global_mem_new_i64(TCG_AREG0,
7733 offsetof(CPUX86State, regs[R_ESI]), "rsi");
7734 cpu_regs[R_EDI] = tcg_global_mem_new_i64(TCG_AREG0,
7735 offsetof(CPUX86State, regs[R_EDI]), "rdi");
7736 cpu_regs[8] = tcg_global_mem_new_i64(TCG_AREG0,
7737 offsetof(CPUX86State, regs[8]), "r8");
7738 cpu_regs[9] = tcg_global_mem_new_i64(TCG_AREG0,
7739 offsetof(CPUX86State, regs[9]), "r9");
7740 cpu_regs[10] = tcg_global_mem_new_i64(TCG_AREG0,
7741 offsetof(CPUX86State, regs[10]), "r10");
7742 cpu_regs[11] = tcg_global_mem_new_i64(TCG_AREG0,
7743 offsetof(CPUX86State, regs[11]), "r11");
7744 cpu_regs[12] = tcg_global_mem_new_i64(TCG_AREG0,
7745 offsetof(CPUX86State, regs[12]), "r12");
7746 cpu_regs[13] = tcg_global_mem_new_i64(TCG_AREG0,
7747 offsetof(CPUX86State, regs[13]), "r13");
7748 cpu_regs[14] = tcg_global_mem_new_i64(TCG_AREG0,
7749 offsetof(CPUX86State, regs[14]), "r14");
7750 cpu_regs[15] = tcg_global_mem_new_i64(TCG_AREG0,
7751 offsetof(CPUX86State, regs[15]), "r15");
7752 #else
7753 cpu_regs[R_EAX] = tcg_global_mem_new_i32(TCG_AREG0,
7754 offsetof(CPUX86State, regs[R_EAX]), "eax");
7755 cpu_regs[R_ECX] = tcg_global_mem_new_i32(TCG_AREG0,
7756 offsetof(CPUX86State, regs[R_ECX]), "ecx");
7757 cpu_regs[R_EDX] = tcg_global_mem_new_i32(TCG_AREG0,
7758 offsetof(CPUX86State, regs[R_EDX]), "edx");
7759 cpu_regs[R_EBX] = tcg_global_mem_new_i32(TCG_AREG0,
7760 offsetof(CPUX86State, regs[R_EBX]), "ebx");
7761 cpu_regs[R_ESP] = tcg_global_mem_new_i32(TCG_AREG0,
7762 offsetof(CPUX86State, regs[R_ESP]), "esp");
7763 cpu_regs[R_EBP] = tcg_global_mem_new_i32(TCG_AREG0,
7764 offsetof(CPUX86State, regs[R_EBP]), "ebp");
7765 cpu_regs[R_ESI] = tcg_global_mem_new_i32(TCG_AREG0,
7766 offsetof(CPUX86State, regs[R_ESI]), "esi");
7767 cpu_regs[R_EDI] = tcg_global_mem_new_i32(TCG_AREG0,
7768 offsetof(CPUX86State, regs[R_EDI]), "edi");
7769 #endif
7771 /* register helpers */
7772 #define GEN_HELPER 2
7773 #include "helper.h"
7776 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
7777 basic block 'tb'. If search_pc is TRUE, also generate PC
7778 information for each intermediate instruction. */
7779 static inline void gen_intermediate_code_internal(CPUX86State *env,
7780 TranslationBlock *tb,
7781 int search_pc)
7783 DisasContext dc1, *dc = &dc1;
7784 target_ulong pc_ptr;
7785 uint16_t *gen_opc_end;
7786 CPUBreakpoint *bp;
7787 int j, lj;
7788 uint64_t flags;
7789 target_ulong pc_start;
7790 target_ulong cs_base;
7791 int num_insns;
7792 int max_insns;
7794 /* generate intermediate code */
7795 pc_start = tb->pc;
7796 cs_base = tb->cs_base;
7797 flags = tb->flags;
7799 dc->pe = (flags >> HF_PE_SHIFT) & 1;
7800 dc->code32 = (flags >> HF_CS32_SHIFT) & 1;
7801 dc->ss32 = (flags >> HF_SS32_SHIFT) & 1;
7802 dc->addseg = (flags >> HF_ADDSEG_SHIFT) & 1;
7803 dc->f_st = 0;
7804 dc->vm86 = (flags >> VM_SHIFT) & 1;
7805 dc->cpl = (flags >> HF_CPL_SHIFT) & 3;
7806 dc->iopl = (flags >> IOPL_SHIFT) & 3;
7807 dc->tf = (flags >> TF_SHIFT) & 1;
7808 dc->singlestep_enabled = env->singlestep_enabled;
7809 dc->cc_op = CC_OP_DYNAMIC;
7810 dc->cs_base = cs_base;
7811 dc->tb = tb;
7812 dc->popl_esp_hack = 0;
7813 /* select memory access functions */
7814 dc->mem_index = 0;
7815 if (flags & HF_SOFTMMU_MASK) {
7816 if (dc->cpl == 3)
7817 dc->mem_index = 2 * 4;
7818 else
7819 dc->mem_index = 1 * 4;
7821 dc->cpuid_features = env->cpuid_features;
7822 dc->cpuid_ext_features = env->cpuid_ext_features;
7823 dc->cpuid_ext2_features = env->cpuid_ext2_features;
7824 dc->cpuid_ext3_features = env->cpuid_ext3_features;
7825 #ifdef TARGET_X86_64
7826 dc->lma = (flags >> HF_LMA_SHIFT) & 1;
7827 dc->code64 = (flags >> HF_CS64_SHIFT) & 1;
7828 #endif
7829 dc->flags = flags;
7830 dc->jmp_opt = !(dc->tf || env->singlestep_enabled ||
7831 (flags & HF_INHIBIT_IRQ_MASK)
7832 #ifndef CONFIG_SOFTMMU
7833 || (flags & HF_SOFTMMU_MASK)
7834 #endif
7836 #if 0
7837 /* check addseg logic */
7838 if (!dc->addseg && (dc->vm86 || !dc->pe || !dc->code32))
7839 printf("ERROR addseg\n");
7840 #endif
7842 cpu_T[0] = tcg_temp_new();
7843 cpu_T[1] = tcg_temp_new();
7844 cpu_A0 = tcg_temp_new();
7845 cpu_T3 = tcg_temp_new();
7847 cpu_tmp0 = tcg_temp_new();
7848 cpu_tmp1_i64 = tcg_temp_new_i64();
7849 cpu_tmp2_i32 = tcg_temp_new_i32();
7850 cpu_tmp3_i32 = tcg_temp_new_i32();
7851 cpu_tmp4 = tcg_temp_new();
7852 cpu_tmp5 = tcg_temp_new();
7853 cpu_ptr0 = tcg_temp_new_ptr();
7854 cpu_ptr1 = tcg_temp_new_ptr();
7856 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
7858 dc->is_jmp = DISAS_NEXT;
7859 pc_ptr = pc_start;
7860 lj = -1;
7861 num_insns = 0;
7862 max_insns = tb->cflags & CF_COUNT_MASK;
7863 if (max_insns == 0)
7864 max_insns = CF_COUNT_MASK;
7866 gen_icount_start();
7867 for(;;) {
7868 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
7869 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
7870 if (bp->pc == pc_ptr &&
7871 !((bp->flags & BP_CPU) && (tb->flags & HF_RF_MASK))) {
7872 gen_debug(dc, pc_ptr - dc->cs_base);
7873 break;
7877 if (search_pc) {
7878 j = gen_opc_ptr - gen_opc_buf;
7879 if (lj < j) {
7880 lj++;
7881 while (lj < j)
7882 gen_opc_instr_start[lj++] = 0;
7884 gen_opc_pc[lj] = pc_ptr;
7885 gen_opc_cc_op[lj] = dc->cc_op;
7886 gen_opc_instr_start[lj] = 1;
7887 gen_opc_icount[lj] = num_insns;
7889 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
7890 gen_io_start();
7892 pc_ptr = disas_insn(dc, pc_ptr);
7893 num_insns++;
7894 /* stop translation if indicated */
7895 if (dc->is_jmp)
7896 break;
7897 /* if single step mode, we generate only one instruction and
7898 generate an exception */
7899 /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
7900 the flag and abort the translation to give the irqs a
7901 change to be happen */
7902 if (dc->tf || dc->singlestep_enabled ||
7903 (flags & HF_INHIBIT_IRQ_MASK)) {
7904 gen_jmp_im(pc_ptr - dc->cs_base);
7905 gen_eob(dc);
7906 break;
7908 /* if too long translation, stop generation too */
7909 if (gen_opc_ptr >= gen_opc_end ||
7910 (pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32) ||
7911 num_insns >= max_insns) {
7912 gen_jmp_im(pc_ptr - dc->cs_base);
7913 gen_eob(dc);
7914 break;
7916 if (singlestep) {
7917 gen_jmp_im(pc_ptr - dc->cs_base);
7918 gen_eob(dc);
7919 break;
7922 if (tb->cflags & CF_LAST_IO)
7923 gen_io_end();
7924 gen_icount_end(tb, num_insns);
7925 *gen_opc_ptr = INDEX_op_end;
7926 /* we don't forget to fill the last values */
7927 if (search_pc) {
7928 j = gen_opc_ptr - gen_opc_buf;
7929 lj++;
7930 while (lj <= j)
7931 gen_opc_instr_start[lj++] = 0;
7934 #ifdef DEBUG_DISAS
7935 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
7936 int disas_flags;
7937 qemu_log("----------------\n");
7938 qemu_log("IN: %s\n", lookup_symbol(pc_start));
7939 #ifdef TARGET_X86_64
7940 if (dc->code64)
7941 disas_flags = 2;
7942 else
7943 #endif
7944 disas_flags = !dc->code32;
7945 log_target_disas(pc_start, pc_ptr - pc_start, disas_flags);
7946 qemu_log("\n");
7948 #endif
7950 if (!search_pc) {
7951 tb->size = pc_ptr - pc_start;
7952 tb->icount = num_insns;
7956 void gen_intermediate_code(CPUX86State *env, TranslationBlock *tb)
7958 gen_intermediate_code_internal(env, tb, 0);
7961 void gen_intermediate_code_pc(CPUX86State *env, TranslationBlock *tb)
7963 gen_intermediate_code_internal(env, tb, 1);
7966 void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb, int pc_pos)
7968 int cc_op;
7969 #ifdef DEBUG_DISAS
7970 if (qemu_loglevel_mask(CPU_LOG_TB_OP)) {
7971 int i;
7972 qemu_log("RESTORE:\n");
7973 for(i = 0;i <= pc_pos; i++) {
7974 if (gen_opc_instr_start[i]) {
7975 qemu_log("0x%04x: " TARGET_FMT_lx "\n", i, gen_opc_pc[i]);
7978 qemu_log("pc_pos=0x%x eip=" TARGET_FMT_lx " cs_base=%x\n",
7979 pc_pos, gen_opc_pc[pc_pos] - tb->cs_base,
7980 (uint32_t)tb->cs_base);
7982 #endif
7983 env->eip = gen_opc_pc[pc_pos] - tb->cs_base;
7984 cc_op = gen_opc_cc_op[pc_pos];
7985 if (cc_op != CC_OP_DYNAMIC)
7986 env->cc_op = cc_op;