target-i386: Remove unused macros
[qemu-kvm.git] / target-i386 / translate.c
blob8d696eaa619c5c0dbfab2813107d0ff99c36a327
1 /*
2 * i386 translation
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdarg.h>
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <string.h>
23 #include <inttypes.h>
24 #include <signal.h>
26 #include "cpu.h"
27 #include "disas.h"
28 #include "tcg-op.h"
30 #include "helper.h"
31 #define GEN_HELPER 1
32 #include "helper.h"
34 #define PREFIX_REPZ 0x01
35 #define PREFIX_REPNZ 0x02
36 #define PREFIX_LOCK 0x04
37 #define PREFIX_DATA 0x08
38 #define PREFIX_ADR 0x10
40 #ifdef TARGET_X86_64
41 #define X86_64_DEF(...) __VA_ARGS__
42 #define CODE64(s) ((s)->code64)
43 #define REX_X(s) ((s)->rex_x)
44 #define REX_B(s) ((s)->rex_b)
45 #else
46 #define X86_64_DEF(...)
47 #define CODE64(s) 0
48 #define REX_X(s) 0
49 #define REX_B(s) 0
50 #endif
52 //#define MACRO_TEST 1
54 /* global register indexes */
55 static TCGv_ptr cpu_env;
56 static TCGv cpu_A0, cpu_cc_src, cpu_cc_dst, cpu_cc_tmp;
57 static TCGv_i32 cpu_cc_op;
58 static TCGv cpu_regs[CPU_NB_REGS];
59 /* local temps */
60 static TCGv cpu_T[2], cpu_T3;
61 /* local register indexes (only used inside old micro ops) */
62 static TCGv cpu_tmp0, cpu_tmp4;
63 static TCGv_ptr cpu_ptr0, cpu_ptr1;
64 static TCGv_i32 cpu_tmp2_i32, cpu_tmp3_i32;
65 static TCGv_i64 cpu_tmp1_i64;
66 static TCGv cpu_tmp5;
68 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
70 #include "gen-icount.h"
72 #ifdef TARGET_X86_64
73 static int x86_64_hregs;
74 #endif
76 typedef struct DisasContext {
77 /* current insn context */
78 int override; /* -1 if no override */
79 int prefix;
80 int aflag, dflag;
81 target_ulong pc; /* pc = eip + cs_base */
82 int is_jmp; /* 1 = means jump (stop translation), 2 means CPU
83 static state change (stop translation) */
84 /* current block context */
85 target_ulong cs_base; /* base of CS segment */
86 int pe; /* protected mode */
87 int code32; /* 32 bit code segment */
88 #ifdef TARGET_X86_64
89 int lma; /* long mode active */
90 int code64; /* 64 bit code segment */
91 int rex_x, rex_b;
92 #endif
93 int ss32; /* 32 bit stack segment */
94 int cc_op; /* current CC operation */
95 int addseg; /* non zero if either DS/ES/SS have a non zero base */
96 int f_st; /* currently unused */
97 int vm86; /* vm86 mode */
98 int cpl;
99 int iopl;
100 int tf; /* TF cpu flag */
101 int singlestep_enabled; /* "hardware" single step enabled */
102 int jmp_opt; /* use direct block chaining for direct jumps */
103 int mem_index; /* select memory access functions */
104 uint64_t flags; /* all execution flags */
105 struct TranslationBlock *tb;
106 int popl_esp_hack; /* for correct popl with esp base handling */
107 int rip_offset; /* only used in x86_64, but left for simplicity */
108 int cpuid_features;
109 int cpuid_ext_features;
110 int cpuid_ext2_features;
111 int cpuid_ext3_features;
112 } DisasContext;
114 static void gen_eob(DisasContext *s);
115 static void gen_jmp(DisasContext *s, target_ulong eip);
116 static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num);
118 /* i386 arith/logic operations */
119 enum {
120 OP_ADDL,
121 OP_ORL,
122 OP_ADCL,
123 OP_SBBL,
124 OP_ANDL,
125 OP_SUBL,
126 OP_XORL,
127 OP_CMPL,
130 /* i386 shift ops */
131 enum {
132 OP_ROL,
133 OP_ROR,
134 OP_RCL,
135 OP_RCR,
136 OP_SHL,
137 OP_SHR,
138 OP_SHL1, /* undocumented */
139 OP_SAR = 7,
142 enum {
143 JCC_O,
144 JCC_B,
145 JCC_Z,
146 JCC_BE,
147 JCC_S,
148 JCC_P,
149 JCC_L,
150 JCC_LE,
153 /* operand size */
154 enum {
155 OT_BYTE = 0,
156 OT_WORD,
157 OT_LONG,
158 OT_QUAD,
161 enum {
162 /* I386 int registers */
163 OR_EAX, /* MUST be even numbered */
164 OR_ECX,
165 OR_EDX,
166 OR_EBX,
167 OR_ESP,
168 OR_EBP,
169 OR_ESI,
170 OR_EDI,
172 OR_TMP0 = 16, /* temporary operand register */
173 OR_TMP1,
174 OR_A0, /* temporary register used when doing address evaluation */
177 static inline void gen_op_movl_T0_0(void)
179 tcg_gen_movi_tl(cpu_T[0], 0);
182 static inline void gen_op_movl_T0_im(int32_t val)
184 tcg_gen_movi_tl(cpu_T[0], val);
187 static inline void gen_op_movl_T0_imu(uint32_t val)
189 tcg_gen_movi_tl(cpu_T[0], val);
192 static inline void gen_op_movl_T1_im(int32_t val)
194 tcg_gen_movi_tl(cpu_T[1], val);
197 static inline void gen_op_movl_T1_imu(uint32_t val)
199 tcg_gen_movi_tl(cpu_T[1], val);
202 static inline void gen_op_movl_A0_im(uint32_t val)
204 tcg_gen_movi_tl(cpu_A0, val);
207 #ifdef TARGET_X86_64
208 static inline void gen_op_movq_A0_im(int64_t val)
210 tcg_gen_movi_tl(cpu_A0, val);
212 #endif
214 static inline void gen_movtl_T0_im(target_ulong val)
216 tcg_gen_movi_tl(cpu_T[0], val);
219 static inline void gen_movtl_T1_im(target_ulong val)
221 tcg_gen_movi_tl(cpu_T[1], val);
224 static inline void gen_op_andl_T0_ffff(void)
226 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
229 static inline void gen_op_andl_T0_im(uint32_t val)
231 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], val);
234 static inline void gen_op_movl_T0_T1(void)
236 tcg_gen_mov_tl(cpu_T[0], cpu_T[1]);
239 static inline void gen_op_andl_A0_ffff(void)
241 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffff);
244 #ifdef TARGET_X86_64
246 #define NB_OP_SIZES 4
248 #else /* !TARGET_X86_64 */
250 #define NB_OP_SIZES 3
252 #endif /* !TARGET_X86_64 */
254 #if defined(HOST_WORDS_BIGENDIAN)
255 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
256 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
257 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
258 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
259 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
260 #else
261 #define REG_B_OFFSET 0
262 #define REG_H_OFFSET 1
263 #define REG_W_OFFSET 0
264 #define REG_L_OFFSET 0
265 #define REG_LH_OFFSET 4
266 #endif
268 static inline void gen_op_mov_reg_v(int ot, int reg, TCGv t0)
270 switch(ot) {
271 case OT_BYTE:
272 if (reg < 4 X86_64_DEF( || reg >= 8 || x86_64_hregs)) {
273 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 8);
274 } else {
275 tcg_gen_deposit_tl(cpu_regs[reg - 4], cpu_regs[reg - 4], t0, 8, 8);
277 break;
278 case OT_WORD:
279 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 16);
280 break;
281 default: /* XXX this shouldn't be reached; abort? */
282 case OT_LONG:
283 /* For x86_64, this sets the higher half of register to zero.
284 For i386, this is equivalent to a mov. */
285 tcg_gen_ext32u_tl(cpu_regs[reg], t0);
286 break;
287 #ifdef TARGET_X86_64
288 case OT_QUAD:
289 tcg_gen_mov_tl(cpu_regs[reg], t0);
290 break;
291 #endif
295 static inline void gen_op_mov_reg_T0(int ot, int reg)
297 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
300 static inline void gen_op_mov_reg_T1(int ot, int reg)
302 gen_op_mov_reg_v(ot, reg, cpu_T[1]);
305 static inline void gen_op_mov_reg_A0(int size, int reg)
307 switch(size) {
308 case 0:
309 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], cpu_A0, 0, 16);
310 break;
311 default: /* XXX this shouldn't be reached; abort? */
312 case 1:
313 /* For x86_64, this sets the higher half of register to zero.
314 For i386, this is equivalent to a mov. */
315 tcg_gen_ext32u_tl(cpu_regs[reg], cpu_A0);
316 break;
317 #ifdef TARGET_X86_64
318 case 2:
319 tcg_gen_mov_tl(cpu_regs[reg], cpu_A0);
320 break;
321 #endif
325 static inline void gen_op_mov_v_reg(int ot, TCGv t0, int reg)
327 switch(ot) {
328 case OT_BYTE:
329 if (reg < 4 X86_64_DEF( || reg >= 8 || x86_64_hregs)) {
330 goto std_case;
331 } else {
332 tcg_gen_shri_tl(t0, cpu_regs[reg - 4], 8);
333 tcg_gen_ext8u_tl(t0, t0);
335 break;
336 default:
337 std_case:
338 tcg_gen_mov_tl(t0, cpu_regs[reg]);
339 break;
343 static inline void gen_op_mov_TN_reg(int ot, int t_index, int reg)
345 gen_op_mov_v_reg(ot, cpu_T[t_index], reg);
348 static inline void gen_op_movl_A0_reg(int reg)
350 tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
353 static inline void gen_op_addl_A0_im(int32_t val)
355 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
356 #ifdef TARGET_X86_64
357 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
358 #endif
361 #ifdef TARGET_X86_64
362 static inline void gen_op_addq_A0_im(int64_t val)
364 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
366 #endif
368 static void gen_add_A0_im(DisasContext *s, int val)
370 #ifdef TARGET_X86_64
371 if (CODE64(s))
372 gen_op_addq_A0_im(val);
373 else
374 #endif
375 gen_op_addl_A0_im(val);
378 static inline void gen_op_addl_T0_T1(void)
380 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
383 static inline void gen_op_jmp_T0(void)
385 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, eip));
388 static inline void gen_op_add_reg_im(int size, int reg, int32_t val)
390 switch(size) {
391 case 0:
392 tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val);
393 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], cpu_tmp0, 0, 16);
394 break;
395 case 1:
396 tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val);
397 /* For x86_64, this sets the higher half of register to zero.
398 For i386, this is equivalent to a nop. */
399 tcg_gen_ext32u_tl(cpu_tmp0, cpu_tmp0);
400 tcg_gen_mov_tl(cpu_regs[reg], cpu_tmp0);
401 break;
402 #ifdef TARGET_X86_64
403 case 2:
404 tcg_gen_addi_tl(cpu_regs[reg], cpu_regs[reg], val);
405 break;
406 #endif
410 static inline void gen_op_add_reg_T0(int size, int reg)
412 switch(size) {
413 case 0:
414 tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T[0]);
415 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], cpu_tmp0, 0, 16);
416 break;
417 case 1:
418 tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T[0]);
419 /* For x86_64, this sets the higher half of register to zero.
420 For i386, this is equivalent to a nop. */
421 tcg_gen_ext32u_tl(cpu_tmp0, cpu_tmp0);
422 tcg_gen_mov_tl(cpu_regs[reg], cpu_tmp0);
423 break;
424 #ifdef TARGET_X86_64
425 case 2:
426 tcg_gen_add_tl(cpu_regs[reg], cpu_regs[reg], cpu_T[0]);
427 break;
428 #endif
432 static inline void gen_op_set_cc_op(int32_t val)
434 tcg_gen_movi_i32(cpu_cc_op, val);
437 static inline void gen_op_addl_A0_reg_sN(int shift, int reg)
439 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
440 if (shift != 0)
441 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
442 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
443 /* For x86_64, this sets the higher half of register to zero.
444 For i386, this is equivalent to a nop. */
445 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
448 static inline void gen_op_movl_A0_seg(int reg)
450 tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base) + REG_L_OFFSET);
453 static inline void gen_op_addl_A0_seg(int reg)
455 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base));
456 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
457 #ifdef TARGET_X86_64
458 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
459 #endif
462 #ifdef TARGET_X86_64
463 static inline void gen_op_movq_A0_seg(int reg)
465 tcg_gen_ld_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base));
468 static inline void gen_op_addq_A0_seg(int reg)
470 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base));
471 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
474 static inline void gen_op_movq_A0_reg(int reg)
476 tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
479 static inline void gen_op_addq_A0_reg_sN(int shift, int reg)
481 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
482 if (shift != 0)
483 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
484 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
486 #endif
488 static inline void gen_op_lds_T0_A0(int idx)
490 int mem_index = (idx >> 2) - 1;
491 switch(idx & 3) {
492 case 0:
493 tcg_gen_qemu_ld8s(cpu_T[0], cpu_A0, mem_index);
494 break;
495 case 1:
496 tcg_gen_qemu_ld16s(cpu_T[0], cpu_A0, mem_index);
497 break;
498 default:
499 case 2:
500 tcg_gen_qemu_ld32s(cpu_T[0], cpu_A0, mem_index);
501 break;
505 static inline void gen_op_ld_v(int idx, TCGv t0, TCGv a0)
507 int mem_index = (idx >> 2) - 1;
508 switch(idx & 3) {
509 case 0:
510 tcg_gen_qemu_ld8u(t0, a0, mem_index);
511 break;
512 case 1:
513 tcg_gen_qemu_ld16u(t0, a0, mem_index);
514 break;
515 case 2:
516 tcg_gen_qemu_ld32u(t0, a0, mem_index);
517 break;
518 default:
519 case 3:
520 /* Should never happen on 32-bit targets. */
521 #ifdef TARGET_X86_64
522 tcg_gen_qemu_ld64(t0, a0, mem_index);
523 #endif
524 break;
528 /* XXX: always use ldu or lds */
529 static inline void gen_op_ld_T0_A0(int idx)
531 gen_op_ld_v(idx, cpu_T[0], cpu_A0);
534 static inline void gen_op_ldu_T0_A0(int idx)
536 gen_op_ld_v(idx, cpu_T[0], cpu_A0);
539 static inline void gen_op_ld_T1_A0(int idx)
541 gen_op_ld_v(idx, cpu_T[1], cpu_A0);
544 static inline void gen_op_st_v(int idx, TCGv t0, TCGv a0)
546 int mem_index = (idx >> 2) - 1;
547 switch(idx & 3) {
548 case 0:
549 tcg_gen_qemu_st8(t0, a0, mem_index);
550 break;
551 case 1:
552 tcg_gen_qemu_st16(t0, a0, mem_index);
553 break;
554 case 2:
555 tcg_gen_qemu_st32(t0, a0, mem_index);
556 break;
557 default:
558 case 3:
559 /* Should never happen on 32-bit targets. */
560 #ifdef TARGET_X86_64
561 tcg_gen_qemu_st64(t0, a0, mem_index);
562 #endif
563 break;
567 static inline void gen_op_st_T0_A0(int idx)
569 gen_op_st_v(idx, cpu_T[0], cpu_A0);
572 static inline void gen_op_st_T1_A0(int idx)
574 gen_op_st_v(idx, cpu_T[1], cpu_A0);
577 static inline void gen_jmp_im(target_ulong pc)
579 tcg_gen_movi_tl(cpu_tmp0, pc);
580 tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, eip));
583 static inline void gen_string_movl_A0_ESI(DisasContext *s)
585 int override;
587 override = s->override;
588 #ifdef TARGET_X86_64
589 if (s->aflag == 2) {
590 if (override >= 0) {
591 gen_op_movq_A0_seg(override);
592 gen_op_addq_A0_reg_sN(0, R_ESI);
593 } else {
594 gen_op_movq_A0_reg(R_ESI);
596 } else
597 #endif
598 if (s->aflag) {
599 /* 32 bit address */
600 if (s->addseg && override < 0)
601 override = R_DS;
602 if (override >= 0) {
603 gen_op_movl_A0_seg(override);
604 gen_op_addl_A0_reg_sN(0, R_ESI);
605 } else {
606 gen_op_movl_A0_reg(R_ESI);
608 } else {
609 /* 16 address, always override */
610 if (override < 0)
611 override = R_DS;
612 gen_op_movl_A0_reg(R_ESI);
613 gen_op_andl_A0_ffff();
614 gen_op_addl_A0_seg(override);
618 static inline void gen_string_movl_A0_EDI(DisasContext *s)
620 #ifdef TARGET_X86_64
621 if (s->aflag == 2) {
622 gen_op_movq_A0_reg(R_EDI);
623 } else
624 #endif
625 if (s->aflag) {
626 if (s->addseg) {
627 gen_op_movl_A0_seg(R_ES);
628 gen_op_addl_A0_reg_sN(0, R_EDI);
629 } else {
630 gen_op_movl_A0_reg(R_EDI);
632 } else {
633 gen_op_movl_A0_reg(R_EDI);
634 gen_op_andl_A0_ffff();
635 gen_op_addl_A0_seg(R_ES);
639 static inline void gen_op_movl_T0_Dshift(int ot)
641 tcg_gen_ld32s_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, df));
642 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], ot);
645 static void gen_extu(int ot, TCGv reg)
647 switch(ot) {
648 case OT_BYTE:
649 tcg_gen_ext8u_tl(reg, reg);
650 break;
651 case OT_WORD:
652 tcg_gen_ext16u_tl(reg, reg);
653 break;
654 case OT_LONG:
655 tcg_gen_ext32u_tl(reg, reg);
656 break;
657 default:
658 break;
662 static void gen_exts(int ot, TCGv reg)
664 switch(ot) {
665 case OT_BYTE:
666 tcg_gen_ext8s_tl(reg, reg);
667 break;
668 case OT_WORD:
669 tcg_gen_ext16s_tl(reg, reg);
670 break;
671 case OT_LONG:
672 tcg_gen_ext32s_tl(reg, reg);
673 break;
674 default:
675 break;
679 static inline void gen_op_jnz_ecx(int size, int label1)
681 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
682 gen_extu(size + 1, cpu_tmp0);
683 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, label1);
686 static inline void gen_op_jz_ecx(int size, int label1)
688 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
689 gen_extu(size + 1, cpu_tmp0);
690 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
693 static void gen_helper_in_func(int ot, TCGv v, TCGv_i32 n)
695 switch (ot) {
696 case 0: gen_helper_inb(v, n); break;
697 case 1: gen_helper_inw(v, n); break;
698 case 2: gen_helper_inl(v, n); break;
703 static void gen_helper_out_func(int ot, TCGv_i32 v, TCGv_i32 n)
705 switch (ot) {
706 case 0: gen_helper_outb(v, n); break;
707 case 1: gen_helper_outw(v, n); break;
708 case 2: gen_helper_outl(v, n); break;
713 static void gen_check_io(DisasContext *s, int ot, target_ulong cur_eip,
714 uint32_t svm_flags)
716 int state_saved;
717 target_ulong next_eip;
719 state_saved = 0;
720 if (s->pe && (s->cpl > s->iopl || s->vm86)) {
721 if (s->cc_op != CC_OP_DYNAMIC)
722 gen_op_set_cc_op(s->cc_op);
723 gen_jmp_im(cur_eip);
724 state_saved = 1;
725 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
726 switch (ot) {
727 case 0: gen_helper_check_iob(cpu_tmp2_i32); break;
728 case 1: gen_helper_check_iow(cpu_tmp2_i32); break;
729 case 2: gen_helper_check_iol(cpu_tmp2_i32); break;
732 if(s->flags & HF_SVMI_MASK) {
733 if (!state_saved) {
734 if (s->cc_op != CC_OP_DYNAMIC)
735 gen_op_set_cc_op(s->cc_op);
736 gen_jmp_im(cur_eip);
738 svm_flags |= (1 << (4 + ot));
739 next_eip = s->pc - s->cs_base;
740 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
741 gen_helper_svm_check_io(cpu_tmp2_i32, tcg_const_i32(svm_flags),
742 tcg_const_i32(next_eip - cur_eip));
746 static inline void gen_movs(DisasContext *s, int ot)
748 gen_string_movl_A0_ESI(s);
749 gen_op_ld_T0_A0(ot + s->mem_index);
750 gen_string_movl_A0_EDI(s);
751 gen_op_st_T0_A0(ot + s->mem_index);
752 gen_op_movl_T0_Dshift(ot);
753 gen_op_add_reg_T0(s->aflag, R_ESI);
754 gen_op_add_reg_T0(s->aflag, R_EDI);
757 static inline void gen_update_cc_op(DisasContext *s)
759 if (s->cc_op != CC_OP_DYNAMIC) {
760 gen_op_set_cc_op(s->cc_op);
761 s->cc_op = CC_OP_DYNAMIC;
765 static void gen_op_update1_cc(void)
767 tcg_gen_discard_tl(cpu_cc_src);
768 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
771 static void gen_op_update2_cc(void)
773 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
774 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
777 static inline void gen_op_cmpl_T0_T1_cc(void)
779 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
780 tcg_gen_sub_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
783 static inline void gen_op_testl_T0_T1_cc(void)
785 tcg_gen_discard_tl(cpu_cc_src);
786 tcg_gen_and_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
789 static void gen_op_update_neg_cc(void)
791 tcg_gen_neg_tl(cpu_cc_src, cpu_T[0]);
792 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
795 /* compute eflags.C to reg */
796 static void gen_compute_eflags_c(TCGv reg)
798 gen_helper_cc_compute_c(cpu_tmp2_i32, cpu_cc_op);
799 tcg_gen_extu_i32_tl(reg, cpu_tmp2_i32);
802 /* compute all eflags to cc_src */
803 static void gen_compute_eflags(TCGv reg)
805 gen_helper_cc_compute_all(cpu_tmp2_i32, cpu_cc_op);
806 tcg_gen_extu_i32_tl(reg, cpu_tmp2_i32);
809 static inline void gen_setcc_slow_T0(DisasContext *s, int jcc_op)
811 if (s->cc_op != CC_OP_DYNAMIC)
812 gen_op_set_cc_op(s->cc_op);
813 switch(jcc_op) {
814 case JCC_O:
815 gen_compute_eflags(cpu_T[0]);
816 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 11);
817 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
818 break;
819 case JCC_B:
820 gen_compute_eflags_c(cpu_T[0]);
821 break;
822 case JCC_Z:
823 gen_compute_eflags(cpu_T[0]);
824 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 6);
825 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
826 break;
827 case JCC_BE:
828 gen_compute_eflags(cpu_tmp0);
829 tcg_gen_shri_tl(cpu_T[0], cpu_tmp0, 6);
830 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
831 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
832 break;
833 case JCC_S:
834 gen_compute_eflags(cpu_T[0]);
835 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 7);
836 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
837 break;
838 case JCC_P:
839 gen_compute_eflags(cpu_T[0]);
840 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 2);
841 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
842 break;
843 case JCC_L:
844 gen_compute_eflags(cpu_tmp0);
845 tcg_gen_shri_tl(cpu_T[0], cpu_tmp0, 11); /* CC_O */
846 tcg_gen_shri_tl(cpu_tmp0, cpu_tmp0, 7); /* CC_S */
847 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
848 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
849 break;
850 default:
851 case JCC_LE:
852 gen_compute_eflags(cpu_tmp0);
853 tcg_gen_shri_tl(cpu_T[0], cpu_tmp0, 11); /* CC_O */
854 tcg_gen_shri_tl(cpu_tmp4, cpu_tmp0, 7); /* CC_S */
855 tcg_gen_shri_tl(cpu_tmp0, cpu_tmp0, 6); /* CC_Z */
856 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
857 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
858 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
859 break;
863 /* return true if setcc_slow is not needed (WARNING: must be kept in
864 sync with gen_jcc1) */
865 static int is_fast_jcc_case(DisasContext *s, int b)
867 int jcc_op;
868 jcc_op = (b >> 1) & 7;
869 switch(s->cc_op) {
870 /* we optimize the cmp/jcc case */
871 case CC_OP_SUBB:
872 case CC_OP_SUBW:
873 case CC_OP_SUBL:
874 case CC_OP_SUBQ:
875 if (jcc_op == JCC_O || jcc_op == JCC_P)
876 goto slow_jcc;
877 break;
879 /* some jumps are easy to compute */
880 case CC_OP_ADDB:
881 case CC_OP_ADDW:
882 case CC_OP_ADDL:
883 case CC_OP_ADDQ:
885 case CC_OP_LOGICB:
886 case CC_OP_LOGICW:
887 case CC_OP_LOGICL:
888 case CC_OP_LOGICQ:
890 case CC_OP_INCB:
891 case CC_OP_INCW:
892 case CC_OP_INCL:
893 case CC_OP_INCQ:
895 case CC_OP_DECB:
896 case CC_OP_DECW:
897 case CC_OP_DECL:
898 case CC_OP_DECQ:
900 case CC_OP_SHLB:
901 case CC_OP_SHLW:
902 case CC_OP_SHLL:
903 case CC_OP_SHLQ:
904 if (jcc_op != JCC_Z && jcc_op != JCC_S)
905 goto slow_jcc;
906 break;
907 default:
908 slow_jcc:
909 return 0;
911 return 1;
914 /* generate a conditional jump to label 'l1' according to jump opcode
915 value 'b'. In the fast case, T0 is guaranted not to be used. */
916 static inline void gen_jcc1(DisasContext *s, int cc_op, int b, int l1)
918 int inv, jcc_op, size, cond;
919 TCGv t0;
921 inv = b & 1;
922 jcc_op = (b >> 1) & 7;
924 switch(cc_op) {
925 /* we optimize the cmp/jcc case */
926 case CC_OP_SUBB:
927 case CC_OP_SUBW:
928 case CC_OP_SUBL:
929 case CC_OP_SUBQ:
931 size = cc_op - CC_OP_SUBB;
932 switch(jcc_op) {
933 case JCC_Z:
934 fast_jcc_z:
935 switch(size) {
936 case 0:
937 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0xff);
938 t0 = cpu_tmp0;
939 break;
940 case 1:
941 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0xffff);
942 t0 = cpu_tmp0;
943 break;
944 #ifdef TARGET_X86_64
945 case 2:
946 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0xffffffff);
947 t0 = cpu_tmp0;
948 break;
949 #endif
950 default:
951 t0 = cpu_cc_dst;
952 break;
954 tcg_gen_brcondi_tl(inv ? TCG_COND_NE : TCG_COND_EQ, t0, 0, l1);
955 break;
956 case JCC_S:
957 fast_jcc_s:
958 switch(size) {
959 case 0:
960 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0x80);
961 tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
962 0, l1);
963 break;
964 case 1:
965 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0x8000);
966 tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
967 0, l1);
968 break;
969 #ifdef TARGET_X86_64
970 case 2:
971 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0x80000000);
972 tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
973 0, l1);
974 break;
975 #endif
976 default:
977 tcg_gen_brcondi_tl(inv ? TCG_COND_GE : TCG_COND_LT, cpu_cc_dst,
978 0, l1);
979 break;
981 break;
983 case JCC_B:
984 cond = inv ? TCG_COND_GEU : TCG_COND_LTU;
985 goto fast_jcc_b;
986 case JCC_BE:
987 cond = inv ? TCG_COND_GTU : TCG_COND_LEU;
988 fast_jcc_b:
989 tcg_gen_add_tl(cpu_tmp4, cpu_cc_dst, cpu_cc_src);
990 switch(size) {
991 case 0:
992 t0 = cpu_tmp0;
993 tcg_gen_andi_tl(cpu_tmp4, cpu_tmp4, 0xff);
994 tcg_gen_andi_tl(t0, cpu_cc_src, 0xff);
995 break;
996 case 1:
997 t0 = cpu_tmp0;
998 tcg_gen_andi_tl(cpu_tmp4, cpu_tmp4, 0xffff);
999 tcg_gen_andi_tl(t0, cpu_cc_src, 0xffff);
1000 break;
1001 #ifdef TARGET_X86_64
1002 case 2:
1003 t0 = cpu_tmp0;
1004 tcg_gen_andi_tl(cpu_tmp4, cpu_tmp4, 0xffffffff);
1005 tcg_gen_andi_tl(t0, cpu_cc_src, 0xffffffff);
1006 break;
1007 #endif
1008 default:
1009 t0 = cpu_cc_src;
1010 break;
1012 tcg_gen_brcond_tl(cond, cpu_tmp4, t0, l1);
1013 break;
1015 case JCC_L:
1016 cond = inv ? TCG_COND_GE : TCG_COND_LT;
1017 goto fast_jcc_l;
1018 case JCC_LE:
1019 cond = inv ? TCG_COND_GT : TCG_COND_LE;
1020 fast_jcc_l:
1021 tcg_gen_add_tl(cpu_tmp4, cpu_cc_dst, cpu_cc_src);
1022 switch(size) {
1023 case 0:
1024 t0 = cpu_tmp0;
1025 tcg_gen_ext8s_tl(cpu_tmp4, cpu_tmp4);
1026 tcg_gen_ext8s_tl(t0, cpu_cc_src);
1027 break;
1028 case 1:
1029 t0 = cpu_tmp0;
1030 tcg_gen_ext16s_tl(cpu_tmp4, cpu_tmp4);
1031 tcg_gen_ext16s_tl(t0, cpu_cc_src);
1032 break;
1033 #ifdef TARGET_X86_64
1034 case 2:
1035 t0 = cpu_tmp0;
1036 tcg_gen_ext32s_tl(cpu_tmp4, cpu_tmp4);
1037 tcg_gen_ext32s_tl(t0, cpu_cc_src);
1038 break;
1039 #endif
1040 default:
1041 t0 = cpu_cc_src;
1042 break;
1044 tcg_gen_brcond_tl(cond, cpu_tmp4, t0, l1);
1045 break;
1047 default:
1048 goto slow_jcc;
1050 break;
1052 /* some jumps are easy to compute */
1053 case CC_OP_ADDB:
1054 case CC_OP_ADDW:
1055 case CC_OP_ADDL:
1056 case CC_OP_ADDQ:
1058 case CC_OP_ADCB:
1059 case CC_OP_ADCW:
1060 case CC_OP_ADCL:
1061 case CC_OP_ADCQ:
1063 case CC_OP_SBBB:
1064 case CC_OP_SBBW:
1065 case CC_OP_SBBL:
1066 case CC_OP_SBBQ:
1068 case CC_OP_LOGICB:
1069 case CC_OP_LOGICW:
1070 case CC_OP_LOGICL:
1071 case CC_OP_LOGICQ:
1073 case CC_OP_INCB:
1074 case CC_OP_INCW:
1075 case CC_OP_INCL:
1076 case CC_OP_INCQ:
1078 case CC_OP_DECB:
1079 case CC_OP_DECW:
1080 case CC_OP_DECL:
1081 case CC_OP_DECQ:
1083 case CC_OP_SHLB:
1084 case CC_OP_SHLW:
1085 case CC_OP_SHLL:
1086 case CC_OP_SHLQ:
1088 case CC_OP_SARB:
1089 case CC_OP_SARW:
1090 case CC_OP_SARL:
1091 case CC_OP_SARQ:
1092 switch(jcc_op) {
1093 case JCC_Z:
1094 size = (cc_op - CC_OP_ADDB) & 3;
1095 goto fast_jcc_z;
1096 case JCC_S:
1097 size = (cc_op - CC_OP_ADDB) & 3;
1098 goto fast_jcc_s;
1099 default:
1100 goto slow_jcc;
1102 break;
1103 default:
1104 slow_jcc:
1105 gen_setcc_slow_T0(s, jcc_op);
1106 tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE,
1107 cpu_T[0], 0, l1);
1108 break;
1112 /* XXX: does not work with gdbstub "ice" single step - not a
1113 serious problem */
1114 static int gen_jz_ecx_string(DisasContext *s, target_ulong next_eip)
1116 int l1, l2;
1118 l1 = gen_new_label();
1119 l2 = gen_new_label();
1120 gen_op_jnz_ecx(s->aflag, l1);
1121 gen_set_label(l2);
1122 gen_jmp_tb(s, next_eip, 1);
1123 gen_set_label(l1);
1124 return l2;
1127 static inline void gen_stos(DisasContext *s, int ot)
1129 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
1130 gen_string_movl_A0_EDI(s);
1131 gen_op_st_T0_A0(ot + s->mem_index);
1132 gen_op_movl_T0_Dshift(ot);
1133 gen_op_add_reg_T0(s->aflag, R_EDI);
1136 static inline void gen_lods(DisasContext *s, int ot)
1138 gen_string_movl_A0_ESI(s);
1139 gen_op_ld_T0_A0(ot + s->mem_index);
1140 gen_op_mov_reg_T0(ot, R_EAX);
1141 gen_op_movl_T0_Dshift(ot);
1142 gen_op_add_reg_T0(s->aflag, R_ESI);
1145 static inline void gen_scas(DisasContext *s, int ot)
1147 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
1148 gen_string_movl_A0_EDI(s);
1149 gen_op_ld_T1_A0(ot + s->mem_index);
1150 gen_op_cmpl_T0_T1_cc();
1151 gen_op_movl_T0_Dshift(ot);
1152 gen_op_add_reg_T0(s->aflag, R_EDI);
1155 static inline void gen_cmps(DisasContext *s, int ot)
1157 gen_string_movl_A0_ESI(s);
1158 gen_op_ld_T0_A0(ot + s->mem_index);
1159 gen_string_movl_A0_EDI(s);
1160 gen_op_ld_T1_A0(ot + s->mem_index);
1161 gen_op_cmpl_T0_T1_cc();
1162 gen_op_movl_T0_Dshift(ot);
1163 gen_op_add_reg_T0(s->aflag, R_ESI);
1164 gen_op_add_reg_T0(s->aflag, R_EDI);
1167 static inline void gen_ins(DisasContext *s, int ot)
1169 if (use_icount)
1170 gen_io_start();
1171 gen_string_movl_A0_EDI(s);
1172 /* Note: we must do this dummy write first to be restartable in
1173 case of page fault. */
1174 gen_op_movl_T0_0();
1175 gen_op_st_T0_A0(ot + s->mem_index);
1176 gen_op_mov_TN_reg(OT_WORD, 1, R_EDX);
1177 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[1]);
1178 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1179 gen_helper_in_func(ot, cpu_T[0], cpu_tmp2_i32);
1180 gen_op_st_T0_A0(ot + s->mem_index);
1181 gen_op_movl_T0_Dshift(ot);
1182 gen_op_add_reg_T0(s->aflag, R_EDI);
1183 if (use_icount)
1184 gen_io_end();
1187 static inline void gen_outs(DisasContext *s, int ot)
1189 if (use_icount)
1190 gen_io_start();
1191 gen_string_movl_A0_ESI(s);
1192 gen_op_ld_T0_A0(ot + s->mem_index);
1194 gen_op_mov_TN_reg(OT_WORD, 1, R_EDX);
1195 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[1]);
1196 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1197 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[0]);
1198 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
1200 gen_op_movl_T0_Dshift(ot);
1201 gen_op_add_reg_T0(s->aflag, R_ESI);
1202 if (use_icount)
1203 gen_io_end();
1206 /* same method as Valgrind : we generate jumps to current or next
1207 instruction */
1208 #define GEN_REPZ(op) \
1209 static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1210 target_ulong cur_eip, target_ulong next_eip) \
1212 int l2;\
1213 gen_update_cc_op(s); \
1214 l2 = gen_jz_ecx_string(s, next_eip); \
1215 gen_ ## op(s, ot); \
1216 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1217 /* a loop would cause two single step exceptions if ECX = 1 \
1218 before rep string_insn */ \
1219 if (!s->jmp_opt) \
1220 gen_op_jz_ecx(s->aflag, l2); \
1221 gen_jmp(s, cur_eip); \
1224 #define GEN_REPZ2(op) \
1225 static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1226 target_ulong cur_eip, \
1227 target_ulong next_eip, \
1228 int nz) \
1230 int l2;\
1231 gen_update_cc_op(s); \
1232 l2 = gen_jz_ecx_string(s, next_eip); \
1233 gen_ ## op(s, ot); \
1234 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1235 gen_op_set_cc_op(CC_OP_SUBB + ot); \
1236 gen_jcc1(s, CC_OP_SUBB + ot, (JCC_Z << 1) | (nz ^ 1), l2); \
1237 if (!s->jmp_opt) \
1238 gen_op_jz_ecx(s->aflag, l2); \
1239 gen_jmp(s, cur_eip); \
1242 GEN_REPZ(movs)
1243 GEN_REPZ(stos)
1244 GEN_REPZ(lods)
1245 GEN_REPZ(ins)
1246 GEN_REPZ(outs)
1247 GEN_REPZ2(scas)
1248 GEN_REPZ2(cmps)
1250 static void gen_helper_fp_arith_ST0_FT0(int op)
1252 switch (op) {
1253 case 0: gen_helper_fadd_ST0_FT0(); break;
1254 case 1: gen_helper_fmul_ST0_FT0(); break;
1255 case 2: gen_helper_fcom_ST0_FT0(); break;
1256 case 3: gen_helper_fcom_ST0_FT0(); break;
1257 case 4: gen_helper_fsub_ST0_FT0(); break;
1258 case 5: gen_helper_fsubr_ST0_FT0(); break;
1259 case 6: gen_helper_fdiv_ST0_FT0(); break;
1260 case 7: gen_helper_fdivr_ST0_FT0(); break;
1264 /* NOTE the exception in "r" op ordering */
1265 static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1267 TCGv_i32 tmp = tcg_const_i32(opreg);
1268 switch (op) {
1269 case 0: gen_helper_fadd_STN_ST0(tmp); break;
1270 case 1: gen_helper_fmul_STN_ST0(tmp); break;
1271 case 4: gen_helper_fsubr_STN_ST0(tmp); break;
1272 case 5: gen_helper_fsub_STN_ST0(tmp); break;
1273 case 6: gen_helper_fdivr_STN_ST0(tmp); break;
1274 case 7: gen_helper_fdiv_STN_ST0(tmp); break;
1278 /* if d == OR_TMP0, it means memory operand (address in A0) */
1279 static void gen_op(DisasContext *s1, int op, int ot, int d)
1281 if (d != OR_TMP0) {
1282 gen_op_mov_TN_reg(ot, 0, d);
1283 } else {
1284 gen_op_ld_T0_A0(ot + s1->mem_index);
1286 switch(op) {
1287 case OP_ADCL:
1288 if (s1->cc_op != CC_OP_DYNAMIC)
1289 gen_op_set_cc_op(s1->cc_op);
1290 gen_compute_eflags_c(cpu_tmp4);
1291 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1292 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1293 if (d != OR_TMP0)
1294 gen_op_mov_reg_T0(ot, d);
1295 else
1296 gen_op_st_T0_A0(ot + s1->mem_index);
1297 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
1298 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1299 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp4);
1300 tcg_gen_shli_i32(cpu_tmp2_i32, cpu_tmp2_i32, 2);
1301 tcg_gen_addi_i32(cpu_cc_op, cpu_tmp2_i32, CC_OP_ADDB + ot);
1302 s1->cc_op = CC_OP_DYNAMIC;
1303 break;
1304 case OP_SBBL:
1305 if (s1->cc_op != CC_OP_DYNAMIC)
1306 gen_op_set_cc_op(s1->cc_op);
1307 gen_compute_eflags_c(cpu_tmp4);
1308 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1309 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1310 if (d != OR_TMP0)
1311 gen_op_mov_reg_T0(ot, d);
1312 else
1313 gen_op_st_T0_A0(ot + s1->mem_index);
1314 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
1315 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1316 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp4);
1317 tcg_gen_shli_i32(cpu_tmp2_i32, cpu_tmp2_i32, 2);
1318 tcg_gen_addi_i32(cpu_cc_op, cpu_tmp2_i32, CC_OP_SUBB + ot);
1319 s1->cc_op = CC_OP_DYNAMIC;
1320 break;
1321 case OP_ADDL:
1322 gen_op_addl_T0_T1();
1323 if (d != OR_TMP0)
1324 gen_op_mov_reg_T0(ot, d);
1325 else
1326 gen_op_st_T0_A0(ot + s1->mem_index);
1327 gen_op_update2_cc();
1328 s1->cc_op = CC_OP_ADDB + ot;
1329 break;
1330 case OP_SUBL:
1331 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1332 if (d != OR_TMP0)
1333 gen_op_mov_reg_T0(ot, d);
1334 else
1335 gen_op_st_T0_A0(ot + s1->mem_index);
1336 gen_op_update2_cc();
1337 s1->cc_op = CC_OP_SUBB + ot;
1338 break;
1339 default:
1340 case OP_ANDL:
1341 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1342 if (d != OR_TMP0)
1343 gen_op_mov_reg_T0(ot, d);
1344 else
1345 gen_op_st_T0_A0(ot + s1->mem_index);
1346 gen_op_update1_cc();
1347 s1->cc_op = CC_OP_LOGICB + ot;
1348 break;
1349 case OP_ORL:
1350 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1351 if (d != OR_TMP0)
1352 gen_op_mov_reg_T0(ot, d);
1353 else
1354 gen_op_st_T0_A0(ot + s1->mem_index);
1355 gen_op_update1_cc();
1356 s1->cc_op = CC_OP_LOGICB + ot;
1357 break;
1358 case OP_XORL:
1359 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1360 if (d != OR_TMP0)
1361 gen_op_mov_reg_T0(ot, d);
1362 else
1363 gen_op_st_T0_A0(ot + s1->mem_index);
1364 gen_op_update1_cc();
1365 s1->cc_op = CC_OP_LOGICB + ot;
1366 break;
1367 case OP_CMPL:
1368 gen_op_cmpl_T0_T1_cc();
1369 s1->cc_op = CC_OP_SUBB + ot;
1370 break;
1374 /* if d == OR_TMP0, it means memory operand (address in A0) */
1375 static void gen_inc(DisasContext *s1, int ot, int d, int c)
1377 if (d != OR_TMP0)
1378 gen_op_mov_TN_reg(ot, 0, d);
1379 else
1380 gen_op_ld_T0_A0(ot + s1->mem_index);
1381 if (s1->cc_op != CC_OP_DYNAMIC)
1382 gen_op_set_cc_op(s1->cc_op);
1383 if (c > 0) {
1384 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], 1);
1385 s1->cc_op = CC_OP_INCB + ot;
1386 } else {
1387 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], -1);
1388 s1->cc_op = CC_OP_DECB + ot;
1390 if (d != OR_TMP0)
1391 gen_op_mov_reg_T0(ot, d);
1392 else
1393 gen_op_st_T0_A0(ot + s1->mem_index);
1394 gen_compute_eflags_c(cpu_cc_src);
1395 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1398 static void gen_shift_rm_T1(DisasContext *s, int ot, int op1,
1399 int is_right, int is_arith)
1401 target_ulong mask;
1402 int shift_label;
1403 TCGv t0, t1, t2;
1405 if (ot == OT_QUAD) {
1406 mask = 0x3f;
1407 } else {
1408 mask = 0x1f;
1411 /* load */
1412 if (op1 == OR_TMP0) {
1413 gen_op_ld_T0_A0(ot + s->mem_index);
1414 } else {
1415 gen_op_mov_TN_reg(ot, 0, op1);
1418 t0 = tcg_temp_local_new();
1419 t1 = tcg_temp_local_new();
1420 t2 = tcg_temp_local_new();
1422 tcg_gen_andi_tl(t2, cpu_T[1], mask);
1424 if (is_right) {
1425 if (is_arith) {
1426 gen_exts(ot, cpu_T[0]);
1427 tcg_gen_mov_tl(t0, cpu_T[0]);
1428 tcg_gen_sar_tl(cpu_T[0], cpu_T[0], t2);
1429 } else {
1430 gen_extu(ot, cpu_T[0]);
1431 tcg_gen_mov_tl(t0, cpu_T[0]);
1432 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], t2);
1434 } else {
1435 tcg_gen_mov_tl(t0, cpu_T[0]);
1436 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], t2);
1439 /* store */
1440 if (op1 == OR_TMP0) {
1441 gen_op_st_T0_A0(ot + s->mem_index);
1442 } else {
1443 gen_op_mov_reg_T0(ot, op1);
1446 /* update eflags if non zero shift */
1447 if (s->cc_op != CC_OP_DYNAMIC) {
1448 gen_op_set_cc_op(s->cc_op);
1451 tcg_gen_mov_tl(t1, cpu_T[0]);
1453 shift_label = gen_new_label();
1454 tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, shift_label);
1456 tcg_gen_addi_tl(t2, t2, -1);
1457 tcg_gen_mov_tl(cpu_cc_dst, t1);
1459 if (is_right) {
1460 if (is_arith) {
1461 tcg_gen_sar_tl(cpu_cc_src, t0, t2);
1462 } else {
1463 tcg_gen_shr_tl(cpu_cc_src, t0, t2);
1465 } else {
1466 tcg_gen_shl_tl(cpu_cc_src, t0, t2);
1469 if (is_right) {
1470 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SARB + ot);
1471 } else {
1472 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SHLB + ot);
1475 gen_set_label(shift_label);
1476 s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
1478 tcg_temp_free(t0);
1479 tcg_temp_free(t1);
1480 tcg_temp_free(t2);
1483 static void gen_shift_rm_im(DisasContext *s, int ot, int op1, int op2,
1484 int is_right, int is_arith)
1486 int mask;
1488 if (ot == OT_QUAD)
1489 mask = 0x3f;
1490 else
1491 mask = 0x1f;
1493 /* load */
1494 if (op1 == OR_TMP0)
1495 gen_op_ld_T0_A0(ot + s->mem_index);
1496 else
1497 gen_op_mov_TN_reg(ot, 0, op1);
1499 op2 &= mask;
1500 if (op2 != 0) {
1501 if (is_right) {
1502 if (is_arith) {
1503 gen_exts(ot, cpu_T[0]);
1504 tcg_gen_sari_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1505 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], op2);
1506 } else {
1507 gen_extu(ot, cpu_T[0]);
1508 tcg_gen_shri_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1509 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], op2);
1511 } else {
1512 tcg_gen_shli_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1513 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], op2);
1517 /* store */
1518 if (op1 == OR_TMP0)
1519 gen_op_st_T0_A0(ot + s->mem_index);
1520 else
1521 gen_op_mov_reg_T0(ot, op1);
1523 /* update eflags if non zero shift */
1524 if (op2 != 0) {
1525 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
1526 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1527 if (is_right)
1528 s->cc_op = CC_OP_SARB + ot;
1529 else
1530 s->cc_op = CC_OP_SHLB + ot;
1534 static inline void tcg_gen_lshift(TCGv ret, TCGv arg1, target_long arg2)
1536 if (arg2 >= 0)
1537 tcg_gen_shli_tl(ret, arg1, arg2);
1538 else
1539 tcg_gen_shri_tl(ret, arg1, -arg2);
1542 static void gen_rot_rm_T1(DisasContext *s, int ot, int op1,
1543 int is_right)
1545 target_ulong mask;
1546 int label1, label2, data_bits;
1547 TCGv t0, t1, t2, a0;
1549 /* XXX: inefficient, but we must use local temps */
1550 t0 = tcg_temp_local_new();
1551 t1 = tcg_temp_local_new();
1552 t2 = tcg_temp_local_new();
1553 a0 = tcg_temp_local_new();
1555 if (ot == OT_QUAD)
1556 mask = 0x3f;
1557 else
1558 mask = 0x1f;
1560 /* load */
1561 if (op1 == OR_TMP0) {
1562 tcg_gen_mov_tl(a0, cpu_A0);
1563 gen_op_ld_v(ot + s->mem_index, t0, a0);
1564 } else {
1565 gen_op_mov_v_reg(ot, t0, op1);
1568 tcg_gen_mov_tl(t1, cpu_T[1]);
1570 tcg_gen_andi_tl(t1, t1, mask);
1572 /* Must test zero case to avoid using undefined behaviour in TCG
1573 shifts. */
1574 label1 = gen_new_label();
1575 tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, label1);
1577 if (ot <= OT_WORD)
1578 tcg_gen_andi_tl(cpu_tmp0, t1, (1 << (3 + ot)) - 1);
1579 else
1580 tcg_gen_mov_tl(cpu_tmp0, t1);
1582 gen_extu(ot, t0);
1583 tcg_gen_mov_tl(t2, t0);
1585 data_bits = 8 << ot;
1586 /* XXX: rely on behaviour of shifts when operand 2 overflows (XXX:
1587 fix TCG definition) */
1588 if (is_right) {
1589 tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp0);
1590 tcg_gen_subfi_tl(cpu_tmp0, data_bits, cpu_tmp0);
1591 tcg_gen_shl_tl(t0, t0, cpu_tmp0);
1592 } else {
1593 tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp0);
1594 tcg_gen_subfi_tl(cpu_tmp0, data_bits, cpu_tmp0);
1595 tcg_gen_shr_tl(t0, t0, cpu_tmp0);
1597 tcg_gen_or_tl(t0, t0, cpu_tmp4);
1599 gen_set_label(label1);
1600 /* store */
1601 if (op1 == OR_TMP0) {
1602 gen_op_st_v(ot + s->mem_index, t0, a0);
1603 } else {
1604 gen_op_mov_reg_v(ot, op1, t0);
1607 /* update eflags */
1608 if (s->cc_op != CC_OP_DYNAMIC)
1609 gen_op_set_cc_op(s->cc_op);
1611 label2 = gen_new_label();
1612 tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, label2);
1614 gen_compute_eflags(cpu_cc_src);
1615 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~(CC_O | CC_C));
1616 tcg_gen_xor_tl(cpu_tmp0, t2, t0);
1617 tcg_gen_lshift(cpu_tmp0, cpu_tmp0, 11 - (data_bits - 1));
1618 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, CC_O);
1619 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_tmp0);
1620 if (is_right) {
1621 tcg_gen_shri_tl(t0, t0, data_bits - 1);
1623 tcg_gen_andi_tl(t0, t0, CC_C);
1624 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
1626 tcg_gen_discard_tl(cpu_cc_dst);
1627 tcg_gen_movi_i32(cpu_cc_op, CC_OP_EFLAGS);
1629 gen_set_label(label2);
1630 s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
1632 tcg_temp_free(t0);
1633 tcg_temp_free(t1);
1634 tcg_temp_free(t2);
1635 tcg_temp_free(a0);
1638 static void gen_rot_rm_im(DisasContext *s, int ot, int op1, int op2,
1639 int is_right)
1641 int mask;
1642 int data_bits;
1643 TCGv t0, t1, a0;
1645 /* XXX: inefficient, but we must use local temps */
1646 t0 = tcg_temp_local_new();
1647 t1 = tcg_temp_local_new();
1648 a0 = tcg_temp_local_new();
1650 if (ot == OT_QUAD)
1651 mask = 0x3f;
1652 else
1653 mask = 0x1f;
1655 /* load */
1656 if (op1 == OR_TMP0) {
1657 tcg_gen_mov_tl(a0, cpu_A0);
1658 gen_op_ld_v(ot + s->mem_index, t0, a0);
1659 } else {
1660 gen_op_mov_v_reg(ot, t0, op1);
1663 gen_extu(ot, t0);
1664 tcg_gen_mov_tl(t1, t0);
1666 op2 &= mask;
1667 data_bits = 8 << ot;
1668 if (op2 != 0) {
1669 int shift = op2 & ((1 << (3 + ot)) - 1);
1670 if (is_right) {
1671 tcg_gen_shri_tl(cpu_tmp4, t0, shift);
1672 tcg_gen_shli_tl(t0, t0, data_bits - shift);
1674 else {
1675 tcg_gen_shli_tl(cpu_tmp4, t0, shift);
1676 tcg_gen_shri_tl(t0, t0, data_bits - shift);
1678 tcg_gen_or_tl(t0, t0, cpu_tmp4);
1681 /* store */
1682 if (op1 == OR_TMP0) {
1683 gen_op_st_v(ot + s->mem_index, t0, a0);
1684 } else {
1685 gen_op_mov_reg_v(ot, op1, t0);
1688 if (op2 != 0) {
1689 /* update eflags */
1690 if (s->cc_op != CC_OP_DYNAMIC)
1691 gen_op_set_cc_op(s->cc_op);
1693 gen_compute_eflags(cpu_cc_src);
1694 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~(CC_O | CC_C));
1695 tcg_gen_xor_tl(cpu_tmp0, t1, t0);
1696 tcg_gen_lshift(cpu_tmp0, cpu_tmp0, 11 - (data_bits - 1));
1697 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, CC_O);
1698 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_tmp0);
1699 if (is_right) {
1700 tcg_gen_shri_tl(t0, t0, data_bits - 1);
1702 tcg_gen_andi_tl(t0, t0, CC_C);
1703 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
1705 tcg_gen_discard_tl(cpu_cc_dst);
1706 tcg_gen_movi_i32(cpu_cc_op, CC_OP_EFLAGS);
1707 s->cc_op = CC_OP_EFLAGS;
1710 tcg_temp_free(t0);
1711 tcg_temp_free(t1);
1712 tcg_temp_free(a0);
1715 /* XXX: add faster immediate = 1 case */
1716 static void gen_rotc_rm_T1(DisasContext *s, int ot, int op1,
1717 int is_right)
1719 int label1;
1721 if (s->cc_op != CC_OP_DYNAMIC)
1722 gen_op_set_cc_op(s->cc_op);
1724 /* load */
1725 if (op1 == OR_TMP0)
1726 gen_op_ld_T0_A0(ot + s->mem_index);
1727 else
1728 gen_op_mov_TN_reg(ot, 0, op1);
1730 if (is_right) {
1731 switch (ot) {
1732 case 0: gen_helper_rcrb(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1733 case 1: gen_helper_rcrw(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1734 case 2: gen_helper_rcrl(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1735 #ifdef TARGET_X86_64
1736 case 3: gen_helper_rcrq(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1737 #endif
1739 } else {
1740 switch (ot) {
1741 case 0: gen_helper_rclb(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1742 case 1: gen_helper_rclw(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1743 case 2: gen_helper_rcll(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1744 #ifdef TARGET_X86_64
1745 case 3: gen_helper_rclq(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1746 #endif
1749 /* store */
1750 if (op1 == OR_TMP0)
1751 gen_op_st_T0_A0(ot + s->mem_index);
1752 else
1753 gen_op_mov_reg_T0(ot, op1);
1755 /* update eflags */
1756 label1 = gen_new_label();
1757 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_cc_tmp, -1, label1);
1759 tcg_gen_mov_tl(cpu_cc_src, cpu_cc_tmp);
1760 tcg_gen_discard_tl(cpu_cc_dst);
1761 tcg_gen_movi_i32(cpu_cc_op, CC_OP_EFLAGS);
1763 gen_set_label(label1);
1764 s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
1767 /* XXX: add faster immediate case */
1768 static void gen_shiftd_rm_T1_T3(DisasContext *s, int ot, int op1,
1769 int is_right)
1771 int label1, label2, data_bits;
1772 target_ulong mask;
1773 TCGv t0, t1, t2, a0;
1775 t0 = tcg_temp_local_new();
1776 t1 = tcg_temp_local_new();
1777 t2 = tcg_temp_local_new();
1778 a0 = tcg_temp_local_new();
1780 if (ot == OT_QUAD)
1781 mask = 0x3f;
1782 else
1783 mask = 0x1f;
1785 /* load */
1786 if (op1 == OR_TMP0) {
1787 tcg_gen_mov_tl(a0, cpu_A0);
1788 gen_op_ld_v(ot + s->mem_index, t0, a0);
1789 } else {
1790 gen_op_mov_v_reg(ot, t0, op1);
1793 tcg_gen_andi_tl(cpu_T3, cpu_T3, mask);
1795 tcg_gen_mov_tl(t1, cpu_T[1]);
1796 tcg_gen_mov_tl(t2, cpu_T3);
1798 /* Must test zero case to avoid using undefined behaviour in TCG
1799 shifts. */
1800 label1 = gen_new_label();
1801 tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label1);
1803 tcg_gen_addi_tl(cpu_tmp5, t2, -1);
1804 if (ot == OT_WORD) {
1805 /* Note: we implement the Intel behaviour for shift count > 16 */
1806 if (is_right) {
1807 tcg_gen_andi_tl(t0, t0, 0xffff);
1808 tcg_gen_shli_tl(cpu_tmp0, t1, 16);
1809 tcg_gen_or_tl(t0, t0, cpu_tmp0);
1810 tcg_gen_ext32u_tl(t0, t0);
1812 tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp5);
1814 /* only needed if count > 16, but a test would complicate */
1815 tcg_gen_subfi_tl(cpu_tmp5, 32, t2);
1816 tcg_gen_shl_tl(cpu_tmp0, t0, cpu_tmp5);
1818 tcg_gen_shr_tl(t0, t0, t2);
1820 tcg_gen_or_tl(t0, t0, cpu_tmp0);
1821 } else {
1822 /* XXX: not optimal */
1823 tcg_gen_andi_tl(t0, t0, 0xffff);
1824 tcg_gen_shli_tl(t1, t1, 16);
1825 tcg_gen_or_tl(t1, t1, t0);
1826 tcg_gen_ext32u_tl(t1, t1);
1828 tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp5);
1829 tcg_gen_subfi_tl(cpu_tmp0, 32, cpu_tmp5);
1830 tcg_gen_shr_tl(cpu_tmp5, t1, cpu_tmp0);
1831 tcg_gen_or_tl(cpu_tmp4, cpu_tmp4, cpu_tmp5);
1833 tcg_gen_shl_tl(t0, t0, t2);
1834 tcg_gen_subfi_tl(cpu_tmp5, 32, t2);
1835 tcg_gen_shr_tl(t1, t1, cpu_tmp5);
1836 tcg_gen_or_tl(t0, t0, t1);
1838 } else {
1839 data_bits = 8 << ot;
1840 if (is_right) {
1841 if (ot == OT_LONG)
1842 tcg_gen_ext32u_tl(t0, t0);
1844 tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp5);
1846 tcg_gen_shr_tl(t0, t0, t2);
1847 tcg_gen_subfi_tl(cpu_tmp5, data_bits, t2);
1848 tcg_gen_shl_tl(t1, t1, cpu_tmp5);
1849 tcg_gen_or_tl(t0, t0, t1);
1851 } else {
1852 if (ot == OT_LONG)
1853 tcg_gen_ext32u_tl(t1, t1);
1855 tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp5);
1857 tcg_gen_shl_tl(t0, t0, t2);
1858 tcg_gen_subfi_tl(cpu_tmp5, data_bits, t2);
1859 tcg_gen_shr_tl(t1, t1, cpu_tmp5);
1860 tcg_gen_or_tl(t0, t0, t1);
1863 tcg_gen_mov_tl(t1, cpu_tmp4);
1865 gen_set_label(label1);
1866 /* store */
1867 if (op1 == OR_TMP0) {
1868 gen_op_st_v(ot + s->mem_index, t0, a0);
1869 } else {
1870 gen_op_mov_reg_v(ot, op1, t0);
1873 /* update eflags */
1874 if (s->cc_op != CC_OP_DYNAMIC)
1875 gen_op_set_cc_op(s->cc_op);
1877 label2 = gen_new_label();
1878 tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label2);
1880 tcg_gen_mov_tl(cpu_cc_src, t1);
1881 tcg_gen_mov_tl(cpu_cc_dst, t0);
1882 if (is_right) {
1883 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SARB + ot);
1884 } else {
1885 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SHLB + ot);
1887 gen_set_label(label2);
1888 s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
1890 tcg_temp_free(t0);
1891 tcg_temp_free(t1);
1892 tcg_temp_free(t2);
1893 tcg_temp_free(a0);
1896 static void gen_shift(DisasContext *s1, int op, int ot, int d, int s)
1898 if (s != OR_TMP1)
1899 gen_op_mov_TN_reg(ot, 1, s);
1900 switch(op) {
1901 case OP_ROL:
1902 gen_rot_rm_T1(s1, ot, d, 0);
1903 break;
1904 case OP_ROR:
1905 gen_rot_rm_T1(s1, ot, d, 1);
1906 break;
1907 case OP_SHL:
1908 case OP_SHL1:
1909 gen_shift_rm_T1(s1, ot, d, 0, 0);
1910 break;
1911 case OP_SHR:
1912 gen_shift_rm_T1(s1, ot, d, 1, 0);
1913 break;
1914 case OP_SAR:
1915 gen_shift_rm_T1(s1, ot, d, 1, 1);
1916 break;
1917 case OP_RCL:
1918 gen_rotc_rm_T1(s1, ot, d, 0);
1919 break;
1920 case OP_RCR:
1921 gen_rotc_rm_T1(s1, ot, d, 1);
1922 break;
1926 static void gen_shifti(DisasContext *s1, int op, int ot, int d, int c)
1928 switch(op) {
1929 case OP_ROL:
1930 gen_rot_rm_im(s1, ot, d, c, 0);
1931 break;
1932 case OP_ROR:
1933 gen_rot_rm_im(s1, ot, d, c, 1);
1934 break;
1935 case OP_SHL:
1936 case OP_SHL1:
1937 gen_shift_rm_im(s1, ot, d, c, 0, 0);
1938 break;
1939 case OP_SHR:
1940 gen_shift_rm_im(s1, ot, d, c, 1, 0);
1941 break;
1942 case OP_SAR:
1943 gen_shift_rm_im(s1, ot, d, c, 1, 1);
1944 break;
1945 default:
1946 /* currently not optimized */
1947 gen_op_movl_T1_im(c);
1948 gen_shift(s1, op, ot, d, OR_TMP1);
1949 break;
1953 static void gen_lea_modrm(DisasContext *s, int modrm, int *reg_ptr, int *offset_ptr)
1955 target_long disp;
1956 int havesib;
1957 int base;
1958 int index;
1959 int scale;
1960 int opreg;
1961 int mod, rm, code, override, must_add_seg;
1963 override = s->override;
1964 must_add_seg = s->addseg;
1965 if (override >= 0)
1966 must_add_seg = 1;
1967 mod = (modrm >> 6) & 3;
1968 rm = modrm & 7;
1970 if (s->aflag) {
1972 havesib = 0;
1973 base = rm;
1974 index = 0;
1975 scale = 0;
1977 if (base == 4) {
1978 havesib = 1;
1979 code = ldub_code(s->pc++);
1980 scale = (code >> 6) & 3;
1981 index = ((code >> 3) & 7) | REX_X(s);
1982 base = (code & 7);
1984 base |= REX_B(s);
1986 switch (mod) {
1987 case 0:
1988 if ((base & 7) == 5) {
1989 base = -1;
1990 disp = (int32_t)ldl_code(s->pc);
1991 s->pc += 4;
1992 if (CODE64(s) && !havesib) {
1993 disp += s->pc + s->rip_offset;
1995 } else {
1996 disp = 0;
1998 break;
1999 case 1:
2000 disp = (int8_t)ldub_code(s->pc++);
2001 break;
2002 default:
2003 case 2:
2004 disp = (int32_t)ldl_code(s->pc);
2005 s->pc += 4;
2006 break;
2009 if (base >= 0) {
2010 /* for correct popl handling with esp */
2011 if (base == 4 && s->popl_esp_hack)
2012 disp += s->popl_esp_hack;
2013 #ifdef TARGET_X86_64
2014 if (s->aflag == 2) {
2015 gen_op_movq_A0_reg(base);
2016 if (disp != 0) {
2017 gen_op_addq_A0_im(disp);
2019 } else
2020 #endif
2022 gen_op_movl_A0_reg(base);
2023 if (disp != 0)
2024 gen_op_addl_A0_im(disp);
2026 } else {
2027 #ifdef TARGET_X86_64
2028 if (s->aflag == 2) {
2029 gen_op_movq_A0_im(disp);
2030 } else
2031 #endif
2033 gen_op_movl_A0_im(disp);
2036 /* index == 4 means no index */
2037 if (havesib && (index != 4)) {
2038 #ifdef TARGET_X86_64
2039 if (s->aflag == 2) {
2040 gen_op_addq_A0_reg_sN(scale, index);
2041 } else
2042 #endif
2044 gen_op_addl_A0_reg_sN(scale, index);
2047 if (must_add_seg) {
2048 if (override < 0) {
2049 if (base == R_EBP || base == R_ESP)
2050 override = R_SS;
2051 else
2052 override = R_DS;
2054 #ifdef TARGET_X86_64
2055 if (s->aflag == 2) {
2056 gen_op_addq_A0_seg(override);
2057 } else
2058 #endif
2060 gen_op_addl_A0_seg(override);
2063 } else {
2064 switch (mod) {
2065 case 0:
2066 if (rm == 6) {
2067 disp = lduw_code(s->pc);
2068 s->pc += 2;
2069 gen_op_movl_A0_im(disp);
2070 rm = 0; /* avoid SS override */
2071 goto no_rm;
2072 } else {
2073 disp = 0;
2075 break;
2076 case 1:
2077 disp = (int8_t)ldub_code(s->pc++);
2078 break;
2079 default:
2080 case 2:
2081 disp = lduw_code(s->pc);
2082 s->pc += 2;
2083 break;
2085 switch(rm) {
2086 case 0:
2087 gen_op_movl_A0_reg(R_EBX);
2088 gen_op_addl_A0_reg_sN(0, R_ESI);
2089 break;
2090 case 1:
2091 gen_op_movl_A0_reg(R_EBX);
2092 gen_op_addl_A0_reg_sN(0, R_EDI);
2093 break;
2094 case 2:
2095 gen_op_movl_A0_reg(R_EBP);
2096 gen_op_addl_A0_reg_sN(0, R_ESI);
2097 break;
2098 case 3:
2099 gen_op_movl_A0_reg(R_EBP);
2100 gen_op_addl_A0_reg_sN(0, R_EDI);
2101 break;
2102 case 4:
2103 gen_op_movl_A0_reg(R_ESI);
2104 break;
2105 case 5:
2106 gen_op_movl_A0_reg(R_EDI);
2107 break;
2108 case 6:
2109 gen_op_movl_A0_reg(R_EBP);
2110 break;
2111 default:
2112 case 7:
2113 gen_op_movl_A0_reg(R_EBX);
2114 break;
2116 if (disp != 0)
2117 gen_op_addl_A0_im(disp);
2118 gen_op_andl_A0_ffff();
2119 no_rm:
2120 if (must_add_seg) {
2121 if (override < 0) {
2122 if (rm == 2 || rm == 3 || rm == 6)
2123 override = R_SS;
2124 else
2125 override = R_DS;
2127 gen_op_addl_A0_seg(override);
2131 opreg = OR_A0;
2132 disp = 0;
2133 *reg_ptr = opreg;
2134 *offset_ptr = disp;
2137 static void gen_nop_modrm(DisasContext *s, int modrm)
2139 int mod, rm, base, code;
2141 mod = (modrm >> 6) & 3;
2142 if (mod == 3)
2143 return;
2144 rm = modrm & 7;
2146 if (s->aflag) {
2148 base = rm;
2150 if (base == 4) {
2151 code = ldub_code(s->pc++);
2152 base = (code & 7);
2155 switch (mod) {
2156 case 0:
2157 if (base == 5) {
2158 s->pc += 4;
2160 break;
2161 case 1:
2162 s->pc++;
2163 break;
2164 default:
2165 case 2:
2166 s->pc += 4;
2167 break;
2169 } else {
2170 switch (mod) {
2171 case 0:
2172 if (rm == 6) {
2173 s->pc += 2;
2175 break;
2176 case 1:
2177 s->pc++;
2178 break;
2179 default:
2180 case 2:
2181 s->pc += 2;
2182 break;
2187 /* used for LEA and MOV AX, mem */
2188 static void gen_add_A0_ds_seg(DisasContext *s)
2190 int override, must_add_seg;
2191 must_add_seg = s->addseg;
2192 override = R_DS;
2193 if (s->override >= 0) {
2194 override = s->override;
2195 must_add_seg = 1;
2197 if (must_add_seg) {
2198 #ifdef TARGET_X86_64
2199 if (CODE64(s)) {
2200 gen_op_addq_A0_seg(override);
2201 } else
2202 #endif
2204 gen_op_addl_A0_seg(override);
2209 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2210 OR_TMP0 */
2211 static void gen_ldst_modrm(DisasContext *s, int modrm, int ot, int reg, int is_store)
2213 int mod, rm, opreg, disp;
2215 mod = (modrm >> 6) & 3;
2216 rm = (modrm & 7) | REX_B(s);
2217 if (mod == 3) {
2218 if (is_store) {
2219 if (reg != OR_TMP0)
2220 gen_op_mov_TN_reg(ot, 0, reg);
2221 gen_op_mov_reg_T0(ot, rm);
2222 } else {
2223 gen_op_mov_TN_reg(ot, 0, rm);
2224 if (reg != OR_TMP0)
2225 gen_op_mov_reg_T0(ot, reg);
2227 } else {
2228 gen_lea_modrm(s, modrm, &opreg, &disp);
2229 if (is_store) {
2230 if (reg != OR_TMP0)
2231 gen_op_mov_TN_reg(ot, 0, reg);
2232 gen_op_st_T0_A0(ot + s->mem_index);
2233 } else {
2234 gen_op_ld_T0_A0(ot + s->mem_index);
2235 if (reg != OR_TMP0)
2236 gen_op_mov_reg_T0(ot, reg);
2241 static inline uint32_t insn_get(DisasContext *s, int ot)
2243 uint32_t ret;
2245 switch(ot) {
2246 case OT_BYTE:
2247 ret = ldub_code(s->pc);
2248 s->pc++;
2249 break;
2250 case OT_WORD:
2251 ret = lduw_code(s->pc);
2252 s->pc += 2;
2253 break;
2254 default:
2255 case OT_LONG:
2256 ret = ldl_code(s->pc);
2257 s->pc += 4;
2258 break;
2260 return ret;
2263 static inline int insn_const_size(unsigned int ot)
2265 if (ot <= OT_LONG)
2266 return 1 << ot;
2267 else
2268 return 4;
2271 static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip)
2273 TranslationBlock *tb;
2274 target_ulong pc;
2276 pc = s->cs_base + eip;
2277 tb = s->tb;
2278 /* NOTE: we handle the case where the TB spans two pages here */
2279 if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) ||
2280 (pc & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK)) {
2281 /* jump to same page: we can use a direct jump */
2282 tcg_gen_goto_tb(tb_num);
2283 gen_jmp_im(eip);
2284 tcg_gen_exit_tb((tcg_target_long)tb + tb_num);
2285 } else {
2286 /* jump to another page: currently not optimized */
2287 gen_jmp_im(eip);
2288 gen_eob(s);
2292 static inline void gen_jcc(DisasContext *s, int b,
2293 target_ulong val, target_ulong next_eip)
2295 int l1, l2, cc_op;
2297 cc_op = s->cc_op;
2298 gen_update_cc_op(s);
2299 if (s->jmp_opt) {
2300 l1 = gen_new_label();
2301 gen_jcc1(s, cc_op, b, l1);
2303 gen_goto_tb(s, 0, next_eip);
2305 gen_set_label(l1);
2306 gen_goto_tb(s, 1, val);
2307 s->is_jmp = DISAS_TB_JUMP;
2308 } else {
2310 l1 = gen_new_label();
2311 l2 = gen_new_label();
2312 gen_jcc1(s, cc_op, b, l1);
2314 gen_jmp_im(next_eip);
2315 tcg_gen_br(l2);
2317 gen_set_label(l1);
2318 gen_jmp_im(val);
2319 gen_set_label(l2);
2320 gen_eob(s);
2324 static void gen_setcc(DisasContext *s, int b)
2326 int inv, jcc_op, l1;
2327 TCGv t0;
2329 if (is_fast_jcc_case(s, b)) {
2330 /* nominal case: we use a jump */
2331 /* XXX: make it faster by adding new instructions in TCG */
2332 t0 = tcg_temp_local_new();
2333 tcg_gen_movi_tl(t0, 0);
2334 l1 = gen_new_label();
2335 gen_jcc1(s, s->cc_op, b ^ 1, l1);
2336 tcg_gen_movi_tl(t0, 1);
2337 gen_set_label(l1);
2338 tcg_gen_mov_tl(cpu_T[0], t0);
2339 tcg_temp_free(t0);
2340 } else {
2341 /* slow case: it is more efficient not to generate a jump,
2342 although it is questionnable whether this optimization is
2343 worth to */
2344 inv = b & 1;
2345 jcc_op = (b >> 1) & 7;
2346 gen_setcc_slow_T0(s, jcc_op);
2347 if (inv) {
2348 tcg_gen_xori_tl(cpu_T[0], cpu_T[0], 1);
2353 static inline void gen_op_movl_T0_seg(int seg_reg)
2355 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
2356 offsetof(CPUX86State,segs[seg_reg].selector));
2359 static inline void gen_op_movl_seg_T0_vm(int seg_reg)
2361 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
2362 tcg_gen_st32_tl(cpu_T[0], cpu_env,
2363 offsetof(CPUX86State,segs[seg_reg].selector));
2364 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], 4);
2365 tcg_gen_st_tl(cpu_T[0], cpu_env,
2366 offsetof(CPUX86State,segs[seg_reg].base));
2369 /* move T0 to seg_reg and compute if the CPU state may change. Never
2370 call this function with seg_reg == R_CS */
2371 static void gen_movl_seg_T0(DisasContext *s, int seg_reg, target_ulong cur_eip)
2373 if (s->pe && !s->vm86) {
2374 /* XXX: optimize by finding processor state dynamically */
2375 if (s->cc_op != CC_OP_DYNAMIC)
2376 gen_op_set_cc_op(s->cc_op);
2377 gen_jmp_im(cur_eip);
2378 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2379 gen_helper_load_seg(tcg_const_i32(seg_reg), cpu_tmp2_i32);
2380 /* abort translation because the addseg value may change or
2381 because ss32 may change. For R_SS, translation must always
2382 stop as a special handling must be done to disable hardware
2383 interrupts for the next instruction */
2384 if (seg_reg == R_SS || (s->code32 && seg_reg < R_FS))
2385 s->is_jmp = DISAS_TB_JUMP;
2386 } else {
2387 gen_op_movl_seg_T0_vm(seg_reg);
2388 if (seg_reg == R_SS)
2389 s->is_jmp = DISAS_TB_JUMP;
2393 static inline int svm_is_rep(int prefixes)
2395 return ((prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) ? 8 : 0);
2398 static inline void
2399 gen_svm_check_intercept_param(DisasContext *s, target_ulong pc_start,
2400 uint32_t type, uint64_t param)
2402 /* no SVM activated; fast case */
2403 if (likely(!(s->flags & HF_SVMI_MASK)))
2404 return;
2405 if (s->cc_op != CC_OP_DYNAMIC)
2406 gen_op_set_cc_op(s->cc_op);
2407 gen_jmp_im(pc_start - s->cs_base);
2408 gen_helper_svm_check_intercept_param(tcg_const_i32(type),
2409 tcg_const_i64(param));
2412 static inline void
2413 gen_svm_check_intercept(DisasContext *s, target_ulong pc_start, uint64_t type)
2415 gen_svm_check_intercept_param(s, pc_start, type, 0);
2418 static inline void gen_stack_update(DisasContext *s, int addend)
2420 #ifdef TARGET_X86_64
2421 if (CODE64(s)) {
2422 gen_op_add_reg_im(2, R_ESP, addend);
2423 } else
2424 #endif
2425 if (s->ss32) {
2426 gen_op_add_reg_im(1, R_ESP, addend);
2427 } else {
2428 gen_op_add_reg_im(0, R_ESP, addend);
2432 /* generate a push. It depends on ss32, addseg and dflag */
2433 static void gen_push_T0(DisasContext *s)
2435 #ifdef TARGET_X86_64
2436 if (CODE64(s)) {
2437 gen_op_movq_A0_reg(R_ESP);
2438 if (s->dflag) {
2439 gen_op_addq_A0_im(-8);
2440 gen_op_st_T0_A0(OT_QUAD + s->mem_index);
2441 } else {
2442 gen_op_addq_A0_im(-2);
2443 gen_op_st_T0_A0(OT_WORD + s->mem_index);
2445 gen_op_mov_reg_A0(2, R_ESP);
2446 } else
2447 #endif
2449 gen_op_movl_A0_reg(R_ESP);
2450 if (!s->dflag)
2451 gen_op_addl_A0_im(-2);
2452 else
2453 gen_op_addl_A0_im(-4);
2454 if (s->ss32) {
2455 if (s->addseg) {
2456 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2457 gen_op_addl_A0_seg(R_SS);
2459 } else {
2460 gen_op_andl_A0_ffff();
2461 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2462 gen_op_addl_A0_seg(R_SS);
2464 gen_op_st_T0_A0(s->dflag + 1 + s->mem_index);
2465 if (s->ss32 && !s->addseg)
2466 gen_op_mov_reg_A0(1, R_ESP);
2467 else
2468 gen_op_mov_reg_T1(s->ss32 + 1, R_ESP);
2472 /* generate a push. It depends on ss32, addseg and dflag */
2473 /* slower version for T1, only used for call Ev */
2474 static void gen_push_T1(DisasContext *s)
2476 #ifdef TARGET_X86_64
2477 if (CODE64(s)) {
2478 gen_op_movq_A0_reg(R_ESP);
2479 if (s->dflag) {
2480 gen_op_addq_A0_im(-8);
2481 gen_op_st_T1_A0(OT_QUAD + s->mem_index);
2482 } else {
2483 gen_op_addq_A0_im(-2);
2484 gen_op_st_T0_A0(OT_WORD + s->mem_index);
2486 gen_op_mov_reg_A0(2, R_ESP);
2487 } else
2488 #endif
2490 gen_op_movl_A0_reg(R_ESP);
2491 if (!s->dflag)
2492 gen_op_addl_A0_im(-2);
2493 else
2494 gen_op_addl_A0_im(-4);
2495 if (s->ss32) {
2496 if (s->addseg) {
2497 gen_op_addl_A0_seg(R_SS);
2499 } else {
2500 gen_op_andl_A0_ffff();
2501 gen_op_addl_A0_seg(R_SS);
2503 gen_op_st_T1_A0(s->dflag + 1 + s->mem_index);
2505 if (s->ss32 && !s->addseg)
2506 gen_op_mov_reg_A0(1, R_ESP);
2507 else
2508 gen_stack_update(s, (-2) << s->dflag);
2512 /* two step pop is necessary for precise exceptions */
2513 static void gen_pop_T0(DisasContext *s)
2515 #ifdef TARGET_X86_64
2516 if (CODE64(s)) {
2517 gen_op_movq_A0_reg(R_ESP);
2518 gen_op_ld_T0_A0((s->dflag ? OT_QUAD : OT_WORD) + s->mem_index);
2519 } else
2520 #endif
2522 gen_op_movl_A0_reg(R_ESP);
2523 if (s->ss32) {
2524 if (s->addseg)
2525 gen_op_addl_A0_seg(R_SS);
2526 } else {
2527 gen_op_andl_A0_ffff();
2528 gen_op_addl_A0_seg(R_SS);
2530 gen_op_ld_T0_A0(s->dflag + 1 + s->mem_index);
2534 static void gen_pop_update(DisasContext *s)
2536 #ifdef TARGET_X86_64
2537 if (CODE64(s) && s->dflag) {
2538 gen_stack_update(s, 8);
2539 } else
2540 #endif
2542 gen_stack_update(s, 2 << s->dflag);
2546 static void gen_stack_A0(DisasContext *s)
2548 gen_op_movl_A0_reg(R_ESP);
2549 if (!s->ss32)
2550 gen_op_andl_A0_ffff();
2551 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2552 if (s->addseg)
2553 gen_op_addl_A0_seg(R_SS);
2556 /* NOTE: wrap around in 16 bit not fully handled */
2557 static void gen_pusha(DisasContext *s)
2559 int i;
2560 gen_op_movl_A0_reg(R_ESP);
2561 gen_op_addl_A0_im(-16 << s->dflag);
2562 if (!s->ss32)
2563 gen_op_andl_A0_ffff();
2564 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2565 if (s->addseg)
2566 gen_op_addl_A0_seg(R_SS);
2567 for(i = 0;i < 8; i++) {
2568 gen_op_mov_TN_reg(OT_LONG, 0, 7 - i);
2569 gen_op_st_T0_A0(OT_WORD + s->dflag + s->mem_index);
2570 gen_op_addl_A0_im(2 << s->dflag);
2572 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2575 /* NOTE: wrap around in 16 bit not fully handled */
2576 static void gen_popa(DisasContext *s)
2578 int i;
2579 gen_op_movl_A0_reg(R_ESP);
2580 if (!s->ss32)
2581 gen_op_andl_A0_ffff();
2582 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2583 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], 16 << s->dflag);
2584 if (s->addseg)
2585 gen_op_addl_A0_seg(R_SS);
2586 for(i = 0;i < 8; i++) {
2587 /* ESP is not reloaded */
2588 if (i != 3) {
2589 gen_op_ld_T0_A0(OT_WORD + s->dflag + s->mem_index);
2590 gen_op_mov_reg_T0(OT_WORD + s->dflag, 7 - i);
2592 gen_op_addl_A0_im(2 << s->dflag);
2594 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2597 static void gen_enter(DisasContext *s, int esp_addend, int level)
2599 int ot, opsize;
2601 level &= 0x1f;
2602 #ifdef TARGET_X86_64
2603 if (CODE64(s)) {
2604 ot = s->dflag ? OT_QUAD : OT_WORD;
2605 opsize = 1 << ot;
2607 gen_op_movl_A0_reg(R_ESP);
2608 gen_op_addq_A0_im(-opsize);
2609 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2611 /* push bp */
2612 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
2613 gen_op_st_T0_A0(ot + s->mem_index);
2614 if (level) {
2615 /* XXX: must save state */
2616 gen_helper_enter64_level(tcg_const_i32(level),
2617 tcg_const_i32((ot == OT_QUAD)),
2618 cpu_T[1]);
2620 gen_op_mov_reg_T1(ot, R_EBP);
2621 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
2622 gen_op_mov_reg_T1(OT_QUAD, R_ESP);
2623 } else
2624 #endif
2626 ot = s->dflag + OT_WORD;
2627 opsize = 2 << s->dflag;
2629 gen_op_movl_A0_reg(R_ESP);
2630 gen_op_addl_A0_im(-opsize);
2631 if (!s->ss32)
2632 gen_op_andl_A0_ffff();
2633 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2634 if (s->addseg)
2635 gen_op_addl_A0_seg(R_SS);
2636 /* push bp */
2637 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
2638 gen_op_st_T0_A0(ot + s->mem_index);
2639 if (level) {
2640 /* XXX: must save state */
2641 gen_helper_enter_level(tcg_const_i32(level),
2642 tcg_const_i32(s->dflag),
2643 cpu_T[1]);
2645 gen_op_mov_reg_T1(ot, R_EBP);
2646 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
2647 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2651 static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip)
2653 if (s->cc_op != CC_OP_DYNAMIC)
2654 gen_op_set_cc_op(s->cc_op);
2655 gen_jmp_im(cur_eip);
2656 gen_helper_raise_exception(cpu_env, tcg_const_i32(trapno));
2657 s->is_jmp = DISAS_TB_JUMP;
2660 /* an interrupt is different from an exception because of the
2661 privilege checks */
2662 static void gen_interrupt(DisasContext *s, int intno,
2663 target_ulong cur_eip, target_ulong next_eip)
2665 if (s->cc_op != CC_OP_DYNAMIC)
2666 gen_op_set_cc_op(s->cc_op);
2667 gen_jmp_im(cur_eip);
2668 gen_helper_raise_interrupt(cpu_env, tcg_const_i32(intno),
2669 tcg_const_i32(next_eip - cur_eip));
2670 s->is_jmp = DISAS_TB_JUMP;
2673 static void gen_debug(DisasContext *s, target_ulong cur_eip)
2675 if (s->cc_op != CC_OP_DYNAMIC)
2676 gen_op_set_cc_op(s->cc_op);
2677 gen_jmp_im(cur_eip);
2678 gen_helper_debug();
2679 s->is_jmp = DISAS_TB_JUMP;
2682 /* generate a generic end of block. Trace exception is also generated
2683 if needed */
2684 static void gen_eob(DisasContext *s)
2686 if (s->cc_op != CC_OP_DYNAMIC)
2687 gen_op_set_cc_op(s->cc_op);
2688 if (s->tb->flags & HF_INHIBIT_IRQ_MASK) {
2689 gen_helper_reset_inhibit_irq();
2691 if (s->tb->flags & HF_RF_MASK) {
2692 gen_helper_reset_rf();
2694 if (s->singlestep_enabled) {
2695 gen_helper_debug();
2696 } else if (s->tf) {
2697 gen_helper_single_step();
2698 } else {
2699 tcg_gen_exit_tb(0);
2701 s->is_jmp = DISAS_TB_JUMP;
2704 /* generate a jump to eip. No segment change must happen before as a
2705 direct call to the next block may occur */
2706 static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num)
2708 if (s->jmp_opt) {
2709 gen_update_cc_op(s);
2710 gen_goto_tb(s, tb_num, eip);
2711 s->is_jmp = DISAS_TB_JUMP;
2712 } else {
2713 gen_jmp_im(eip);
2714 gen_eob(s);
2718 static void gen_jmp(DisasContext *s, target_ulong eip)
2720 gen_jmp_tb(s, eip, 0);
2723 static inline void gen_ldq_env_A0(int idx, int offset)
2725 int mem_index = (idx >> 2) - 1;
2726 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, mem_index);
2727 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset);
2730 static inline void gen_stq_env_A0(int idx, int offset)
2732 int mem_index = (idx >> 2) - 1;
2733 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset);
2734 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, mem_index);
2737 static inline void gen_ldo_env_A0(int idx, int offset)
2739 int mem_index = (idx >> 2) - 1;
2740 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, mem_index);
2741 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
2742 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2743 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_tmp0, mem_index);
2744 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
2747 static inline void gen_sto_env_A0(int idx, int offset)
2749 int mem_index = (idx >> 2) - 1;
2750 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
2751 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, mem_index);
2752 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2753 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
2754 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_tmp0, mem_index);
2757 static inline void gen_op_movo(int d_offset, int s_offset)
2759 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2760 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2761 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + 8);
2762 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + 8);
2765 static inline void gen_op_movq(int d_offset, int s_offset)
2767 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2768 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2771 static inline void gen_op_movl(int d_offset, int s_offset)
2773 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env, s_offset);
2774 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, d_offset);
2777 static inline void gen_op_movq_env_0(int d_offset)
2779 tcg_gen_movi_i64(cpu_tmp1_i64, 0);
2780 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2783 typedef void (*SSEFunc_i_p)(TCGv_i32 val, TCGv_ptr reg);
2784 typedef void (*SSEFunc_l_p)(TCGv_i64 val, TCGv_ptr reg);
2785 typedef void (*SSEFunc_0_pi)(TCGv_ptr reg, TCGv_i32 val);
2786 typedef void (*SSEFunc_0_pl)(TCGv_ptr reg, TCGv_i64 val);
2787 typedef void (*SSEFunc_0_pp)(TCGv_ptr reg_a, TCGv_ptr reg_b);
2788 typedef void (*SSEFunc_0_ppi)(TCGv_ptr reg_a, TCGv_ptr reg_b, TCGv_i32 val);
2789 typedef void (*SSEFunc_0_ppt)(TCGv_ptr reg_a, TCGv_ptr reg_b, TCGv val);
2791 #define SSE_SPECIAL ((void *)1)
2792 #define SSE_DUMMY ((void *)2)
2794 #define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
2795 #define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
2796 gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
2798 static const SSEFunc_0_pp sse_op_table1[256][4] = {
2799 /* 3DNow! extensions */
2800 [0x0e] = { SSE_DUMMY }, /* femms */
2801 [0x0f] = { SSE_DUMMY }, /* pf... */
2802 /* pure SSE operations */
2803 [0x10] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2804 [0x11] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2805 [0x12] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd, movsldup, movddup */
2806 [0x13] = { SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd */
2807 [0x14] = { gen_helper_punpckldq_xmm, gen_helper_punpcklqdq_xmm },
2808 [0x15] = { gen_helper_punpckhdq_xmm, gen_helper_punpckhqdq_xmm },
2809 [0x16] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd, movshdup */
2810 [0x17] = { SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd */
2812 [0x28] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2813 [0x29] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2814 [0x2a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
2815 [0x2b] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movntps, movntpd, movntss, movntsd */
2816 [0x2c] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
2817 [0x2d] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
2818 [0x2e] = { gen_helper_ucomiss, gen_helper_ucomisd },
2819 [0x2f] = { gen_helper_comiss, gen_helper_comisd },
2820 [0x50] = { SSE_SPECIAL, SSE_SPECIAL }, /* movmskps, movmskpd */
2821 [0x51] = SSE_FOP(sqrt),
2822 [0x52] = { gen_helper_rsqrtps, NULL, gen_helper_rsqrtss, NULL },
2823 [0x53] = { gen_helper_rcpps, NULL, gen_helper_rcpss, NULL },
2824 [0x54] = { gen_helper_pand_xmm, gen_helper_pand_xmm }, /* andps, andpd */
2825 [0x55] = { gen_helper_pandn_xmm, gen_helper_pandn_xmm }, /* andnps, andnpd */
2826 [0x56] = { gen_helper_por_xmm, gen_helper_por_xmm }, /* orps, orpd */
2827 [0x57] = { gen_helper_pxor_xmm, gen_helper_pxor_xmm }, /* xorps, xorpd */
2828 [0x58] = SSE_FOP(add),
2829 [0x59] = SSE_FOP(mul),
2830 [0x5a] = { gen_helper_cvtps2pd, gen_helper_cvtpd2ps,
2831 gen_helper_cvtss2sd, gen_helper_cvtsd2ss },
2832 [0x5b] = { gen_helper_cvtdq2ps, gen_helper_cvtps2dq, gen_helper_cvttps2dq },
2833 [0x5c] = SSE_FOP(sub),
2834 [0x5d] = SSE_FOP(min),
2835 [0x5e] = SSE_FOP(div),
2836 [0x5f] = SSE_FOP(max),
2838 [0xc2] = SSE_FOP(cmpeq),
2839 [0xc6] = { (SSEFunc_0_pp)gen_helper_shufps,
2840 (SSEFunc_0_pp)gen_helper_shufpd }, /* XXX: casts */
2842 [0x38] = { SSE_SPECIAL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* SSSE3/SSE4 */
2843 [0x3a] = { SSE_SPECIAL, SSE_SPECIAL }, /* SSSE3/SSE4 */
2845 /* MMX ops and their SSE extensions */
2846 [0x60] = MMX_OP2(punpcklbw),
2847 [0x61] = MMX_OP2(punpcklwd),
2848 [0x62] = MMX_OP2(punpckldq),
2849 [0x63] = MMX_OP2(packsswb),
2850 [0x64] = MMX_OP2(pcmpgtb),
2851 [0x65] = MMX_OP2(pcmpgtw),
2852 [0x66] = MMX_OP2(pcmpgtl),
2853 [0x67] = MMX_OP2(packuswb),
2854 [0x68] = MMX_OP2(punpckhbw),
2855 [0x69] = MMX_OP2(punpckhwd),
2856 [0x6a] = MMX_OP2(punpckhdq),
2857 [0x6b] = MMX_OP2(packssdw),
2858 [0x6c] = { NULL, gen_helper_punpcklqdq_xmm },
2859 [0x6d] = { NULL, gen_helper_punpckhqdq_xmm },
2860 [0x6e] = { SSE_SPECIAL, SSE_SPECIAL }, /* movd mm, ea */
2861 [0x6f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, , movqdu */
2862 [0x70] = { (SSEFunc_0_pp)gen_helper_pshufw_mmx,
2863 (SSEFunc_0_pp)gen_helper_pshufd_xmm,
2864 (SSEFunc_0_pp)gen_helper_pshufhw_xmm,
2865 (SSEFunc_0_pp)gen_helper_pshuflw_xmm }, /* XXX: casts */
2866 [0x71] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftw */
2867 [0x72] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftd */
2868 [0x73] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftq */
2869 [0x74] = MMX_OP2(pcmpeqb),
2870 [0x75] = MMX_OP2(pcmpeqw),
2871 [0x76] = MMX_OP2(pcmpeql),
2872 [0x77] = { SSE_DUMMY }, /* emms */
2873 [0x78] = { NULL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* extrq_i, insertq_i */
2874 [0x79] = { NULL, gen_helper_extrq_r, NULL, gen_helper_insertq_r },
2875 [0x7c] = { NULL, gen_helper_haddpd, NULL, gen_helper_haddps },
2876 [0x7d] = { NULL, gen_helper_hsubpd, NULL, gen_helper_hsubps },
2877 [0x7e] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movd, movd, , movq */
2878 [0x7f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, movdqu */
2879 [0xc4] = { SSE_SPECIAL, SSE_SPECIAL }, /* pinsrw */
2880 [0xc5] = { SSE_SPECIAL, SSE_SPECIAL }, /* pextrw */
2881 [0xd0] = { NULL, gen_helper_addsubpd, NULL, gen_helper_addsubps },
2882 [0xd1] = MMX_OP2(psrlw),
2883 [0xd2] = MMX_OP2(psrld),
2884 [0xd3] = MMX_OP2(psrlq),
2885 [0xd4] = MMX_OP2(paddq),
2886 [0xd5] = MMX_OP2(pmullw),
2887 [0xd6] = { NULL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2888 [0xd7] = { SSE_SPECIAL, SSE_SPECIAL }, /* pmovmskb */
2889 [0xd8] = MMX_OP2(psubusb),
2890 [0xd9] = MMX_OP2(psubusw),
2891 [0xda] = MMX_OP2(pminub),
2892 [0xdb] = MMX_OP2(pand),
2893 [0xdc] = MMX_OP2(paddusb),
2894 [0xdd] = MMX_OP2(paddusw),
2895 [0xde] = MMX_OP2(pmaxub),
2896 [0xdf] = MMX_OP2(pandn),
2897 [0xe0] = MMX_OP2(pavgb),
2898 [0xe1] = MMX_OP2(psraw),
2899 [0xe2] = MMX_OP2(psrad),
2900 [0xe3] = MMX_OP2(pavgw),
2901 [0xe4] = MMX_OP2(pmulhuw),
2902 [0xe5] = MMX_OP2(pmulhw),
2903 [0xe6] = { NULL, gen_helper_cvttpd2dq, gen_helper_cvtdq2pd, gen_helper_cvtpd2dq },
2904 [0xe7] = { SSE_SPECIAL , SSE_SPECIAL }, /* movntq, movntq */
2905 [0xe8] = MMX_OP2(psubsb),
2906 [0xe9] = MMX_OP2(psubsw),
2907 [0xea] = MMX_OP2(pminsw),
2908 [0xeb] = MMX_OP2(por),
2909 [0xec] = MMX_OP2(paddsb),
2910 [0xed] = MMX_OP2(paddsw),
2911 [0xee] = MMX_OP2(pmaxsw),
2912 [0xef] = MMX_OP2(pxor),
2913 [0xf0] = { NULL, NULL, NULL, SSE_SPECIAL }, /* lddqu */
2914 [0xf1] = MMX_OP2(psllw),
2915 [0xf2] = MMX_OP2(pslld),
2916 [0xf3] = MMX_OP2(psllq),
2917 [0xf4] = MMX_OP2(pmuludq),
2918 [0xf5] = MMX_OP2(pmaddwd),
2919 [0xf6] = MMX_OP2(psadbw),
2920 [0xf7] = { (SSEFunc_0_pp)gen_helper_maskmov_mmx,
2921 (SSEFunc_0_pp)gen_helper_maskmov_xmm }, /* XXX: casts */
2922 [0xf8] = MMX_OP2(psubb),
2923 [0xf9] = MMX_OP2(psubw),
2924 [0xfa] = MMX_OP2(psubl),
2925 [0xfb] = MMX_OP2(psubq),
2926 [0xfc] = MMX_OP2(paddb),
2927 [0xfd] = MMX_OP2(paddw),
2928 [0xfe] = MMX_OP2(paddl),
2931 static const SSEFunc_0_pp sse_op_table2[3 * 8][2] = {
2932 [0 + 2] = MMX_OP2(psrlw),
2933 [0 + 4] = MMX_OP2(psraw),
2934 [0 + 6] = MMX_OP2(psllw),
2935 [8 + 2] = MMX_OP2(psrld),
2936 [8 + 4] = MMX_OP2(psrad),
2937 [8 + 6] = MMX_OP2(pslld),
2938 [16 + 2] = MMX_OP2(psrlq),
2939 [16 + 3] = { NULL, gen_helper_psrldq_xmm },
2940 [16 + 6] = MMX_OP2(psllq),
2941 [16 + 7] = { NULL, gen_helper_pslldq_xmm },
2944 static const SSEFunc_0_pi sse_op_table3ai[] = {
2945 gen_helper_cvtsi2ss,
2946 gen_helper_cvtsi2sd
2949 #ifdef TARGET_X86_64
2950 static const SSEFunc_0_pl sse_op_table3aq[] = {
2951 gen_helper_cvtsq2ss,
2952 gen_helper_cvtsq2sd
2954 #endif
2956 static const SSEFunc_i_p sse_op_table3bi[] = {
2957 gen_helper_cvttss2si,
2958 gen_helper_cvttsd2si,
2959 gen_helper_cvtss2si,
2960 gen_helper_cvtsd2si
2963 #ifdef TARGET_X86_64
2964 static const SSEFunc_l_p sse_op_table3bq[] = {
2965 gen_helper_cvttss2sq,
2966 gen_helper_cvttsd2sq,
2967 gen_helper_cvtss2sq,
2968 gen_helper_cvtsd2sq
2970 #endif
2972 static const SSEFunc_0_pp sse_op_table4[8][4] = {
2973 SSE_FOP(cmpeq),
2974 SSE_FOP(cmplt),
2975 SSE_FOP(cmple),
2976 SSE_FOP(cmpunord),
2977 SSE_FOP(cmpneq),
2978 SSE_FOP(cmpnlt),
2979 SSE_FOP(cmpnle),
2980 SSE_FOP(cmpord),
2983 static const SSEFunc_0_pp sse_op_table5[256] = {
2984 [0x0c] = gen_helper_pi2fw,
2985 [0x0d] = gen_helper_pi2fd,
2986 [0x1c] = gen_helper_pf2iw,
2987 [0x1d] = gen_helper_pf2id,
2988 [0x8a] = gen_helper_pfnacc,
2989 [0x8e] = gen_helper_pfpnacc,
2990 [0x90] = gen_helper_pfcmpge,
2991 [0x94] = gen_helper_pfmin,
2992 [0x96] = gen_helper_pfrcp,
2993 [0x97] = gen_helper_pfrsqrt,
2994 [0x9a] = gen_helper_pfsub,
2995 [0x9e] = gen_helper_pfadd,
2996 [0xa0] = gen_helper_pfcmpgt,
2997 [0xa4] = gen_helper_pfmax,
2998 [0xa6] = gen_helper_movq, /* pfrcpit1; no need to actually increase precision */
2999 [0xa7] = gen_helper_movq, /* pfrsqit1 */
3000 [0xaa] = gen_helper_pfsubr,
3001 [0xae] = gen_helper_pfacc,
3002 [0xb0] = gen_helper_pfcmpeq,
3003 [0xb4] = gen_helper_pfmul,
3004 [0xb6] = gen_helper_movq, /* pfrcpit2 */
3005 [0xb7] = gen_helper_pmulhrw_mmx,
3006 [0xbb] = gen_helper_pswapd,
3007 [0xbf] = gen_helper_pavgb_mmx /* pavgusb */
3010 struct SSEOpHelper_pp {
3011 SSEFunc_0_pp op[2];
3012 uint32_t ext_mask;
3015 struct SSEOpHelper_ppi {
3016 SSEFunc_0_ppi op[2];
3017 uint32_t ext_mask;
3020 #define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
3021 #define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
3022 #define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
3023 #define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
3025 static const struct SSEOpHelper_pp sse_op_table6[256] = {
3026 [0x00] = SSSE3_OP(pshufb),
3027 [0x01] = SSSE3_OP(phaddw),
3028 [0x02] = SSSE3_OP(phaddd),
3029 [0x03] = SSSE3_OP(phaddsw),
3030 [0x04] = SSSE3_OP(pmaddubsw),
3031 [0x05] = SSSE3_OP(phsubw),
3032 [0x06] = SSSE3_OP(phsubd),
3033 [0x07] = SSSE3_OP(phsubsw),
3034 [0x08] = SSSE3_OP(psignb),
3035 [0x09] = SSSE3_OP(psignw),
3036 [0x0a] = SSSE3_OP(psignd),
3037 [0x0b] = SSSE3_OP(pmulhrsw),
3038 [0x10] = SSE41_OP(pblendvb),
3039 [0x14] = SSE41_OP(blendvps),
3040 [0x15] = SSE41_OP(blendvpd),
3041 [0x17] = SSE41_OP(ptest),
3042 [0x1c] = SSSE3_OP(pabsb),
3043 [0x1d] = SSSE3_OP(pabsw),
3044 [0x1e] = SSSE3_OP(pabsd),
3045 [0x20] = SSE41_OP(pmovsxbw),
3046 [0x21] = SSE41_OP(pmovsxbd),
3047 [0x22] = SSE41_OP(pmovsxbq),
3048 [0x23] = SSE41_OP(pmovsxwd),
3049 [0x24] = SSE41_OP(pmovsxwq),
3050 [0x25] = SSE41_OP(pmovsxdq),
3051 [0x28] = SSE41_OP(pmuldq),
3052 [0x29] = SSE41_OP(pcmpeqq),
3053 [0x2a] = SSE41_SPECIAL, /* movntqda */
3054 [0x2b] = SSE41_OP(packusdw),
3055 [0x30] = SSE41_OP(pmovzxbw),
3056 [0x31] = SSE41_OP(pmovzxbd),
3057 [0x32] = SSE41_OP(pmovzxbq),
3058 [0x33] = SSE41_OP(pmovzxwd),
3059 [0x34] = SSE41_OP(pmovzxwq),
3060 [0x35] = SSE41_OP(pmovzxdq),
3061 [0x37] = SSE42_OP(pcmpgtq),
3062 [0x38] = SSE41_OP(pminsb),
3063 [0x39] = SSE41_OP(pminsd),
3064 [0x3a] = SSE41_OP(pminuw),
3065 [0x3b] = SSE41_OP(pminud),
3066 [0x3c] = SSE41_OP(pmaxsb),
3067 [0x3d] = SSE41_OP(pmaxsd),
3068 [0x3e] = SSE41_OP(pmaxuw),
3069 [0x3f] = SSE41_OP(pmaxud),
3070 [0x40] = SSE41_OP(pmulld),
3071 [0x41] = SSE41_OP(phminposuw),
3074 static const struct SSEOpHelper_ppi sse_op_table7[256] = {
3075 [0x08] = SSE41_OP(roundps),
3076 [0x09] = SSE41_OP(roundpd),
3077 [0x0a] = SSE41_OP(roundss),
3078 [0x0b] = SSE41_OP(roundsd),
3079 [0x0c] = SSE41_OP(blendps),
3080 [0x0d] = SSE41_OP(blendpd),
3081 [0x0e] = SSE41_OP(pblendw),
3082 [0x0f] = SSSE3_OP(palignr),
3083 [0x14] = SSE41_SPECIAL, /* pextrb */
3084 [0x15] = SSE41_SPECIAL, /* pextrw */
3085 [0x16] = SSE41_SPECIAL, /* pextrd/pextrq */
3086 [0x17] = SSE41_SPECIAL, /* extractps */
3087 [0x20] = SSE41_SPECIAL, /* pinsrb */
3088 [0x21] = SSE41_SPECIAL, /* insertps */
3089 [0x22] = SSE41_SPECIAL, /* pinsrd/pinsrq */
3090 [0x40] = SSE41_OP(dpps),
3091 [0x41] = SSE41_OP(dppd),
3092 [0x42] = SSE41_OP(mpsadbw),
3093 [0x60] = SSE42_OP(pcmpestrm),
3094 [0x61] = SSE42_OP(pcmpestri),
3095 [0x62] = SSE42_OP(pcmpistrm),
3096 [0x63] = SSE42_OP(pcmpistri),
3099 static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
3101 int b1, op1_offset, op2_offset, is_xmm, val, ot;
3102 int modrm, mod, rm, reg, reg_addr, offset_addr;
3103 SSEFunc_0_pp sse_fn_pp;
3104 SSEFunc_0_ppi sse_fn_ppi;
3105 SSEFunc_0_ppt sse_fn_ppt;
3107 b &= 0xff;
3108 if (s->prefix & PREFIX_DATA)
3109 b1 = 1;
3110 else if (s->prefix & PREFIX_REPZ)
3111 b1 = 2;
3112 else if (s->prefix & PREFIX_REPNZ)
3113 b1 = 3;
3114 else
3115 b1 = 0;
3116 sse_fn_pp = sse_op_table1[b][b1];
3117 if (!sse_fn_pp) {
3118 goto illegal_op;
3120 if ((b <= 0x5f && b >= 0x10) || b == 0xc6 || b == 0xc2) {
3121 is_xmm = 1;
3122 } else {
3123 if (b1 == 0) {
3124 /* MMX case */
3125 is_xmm = 0;
3126 } else {
3127 is_xmm = 1;
3130 /* simple MMX/SSE operation */
3131 if (s->flags & HF_TS_MASK) {
3132 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
3133 return;
3135 if (s->flags & HF_EM_MASK) {
3136 illegal_op:
3137 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
3138 return;
3140 if (is_xmm && !(s->flags & HF_OSFXSR_MASK))
3141 if ((b != 0x38 && b != 0x3a) || (s->prefix & PREFIX_DATA))
3142 goto illegal_op;
3143 if (b == 0x0e) {
3144 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
3145 goto illegal_op;
3146 /* femms */
3147 gen_helper_emms();
3148 return;
3150 if (b == 0x77) {
3151 /* emms */
3152 gen_helper_emms();
3153 return;
3155 /* prepare MMX state (XXX: optimize by storing fptt and fptags in
3156 the static cpu state) */
3157 if (!is_xmm) {
3158 gen_helper_enter_mmx();
3161 modrm = ldub_code(s->pc++);
3162 reg = ((modrm >> 3) & 7);
3163 if (is_xmm)
3164 reg |= rex_r;
3165 mod = (modrm >> 6) & 3;
3166 if (sse_fn_pp == SSE_SPECIAL) {
3167 b |= (b1 << 8);
3168 switch(b) {
3169 case 0x0e7: /* movntq */
3170 if (mod == 3)
3171 goto illegal_op;
3172 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3173 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
3174 break;
3175 case 0x1e7: /* movntdq */
3176 case 0x02b: /* movntps */
3177 case 0x12b: /* movntps */
3178 if (mod == 3)
3179 goto illegal_op;
3180 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3181 gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3182 break;
3183 case 0x3f0: /* lddqu */
3184 if (mod == 3)
3185 goto illegal_op;
3186 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3187 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3188 break;
3189 case 0x22b: /* movntss */
3190 case 0x32b: /* movntsd */
3191 if (mod == 3)
3192 goto illegal_op;
3193 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3194 if (b1 & 1) {
3195 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,
3196 xmm_regs[reg]));
3197 } else {
3198 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3199 xmm_regs[reg].XMM_L(0)));
3200 gen_op_st_T0_A0(OT_LONG + s->mem_index);
3202 break;
3203 case 0x6e: /* movd mm, ea */
3204 #ifdef TARGET_X86_64
3205 if (s->dflag == 2) {
3206 gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 0);
3207 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,fpregs[reg].mmx));
3208 } else
3209 #endif
3211 gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 0);
3212 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3213 offsetof(CPUX86State,fpregs[reg].mmx));
3214 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3215 gen_helper_movl_mm_T0_mmx(cpu_ptr0, cpu_tmp2_i32);
3217 break;
3218 case 0x16e: /* movd xmm, ea */
3219 #ifdef TARGET_X86_64
3220 if (s->dflag == 2) {
3221 gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 0);
3222 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3223 offsetof(CPUX86State,xmm_regs[reg]));
3224 gen_helper_movq_mm_T0_xmm(cpu_ptr0, cpu_T[0]);
3225 } else
3226 #endif
3228 gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 0);
3229 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3230 offsetof(CPUX86State,xmm_regs[reg]));
3231 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3232 gen_helper_movl_mm_T0_xmm(cpu_ptr0, cpu_tmp2_i32);
3234 break;
3235 case 0x6f: /* movq mm, ea */
3236 if (mod != 3) {
3237 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3238 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
3239 } else {
3240 rm = (modrm & 7);
3241 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
3242 offsetof(CPUX86State,fpregs[rm].mmx));
3243 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
3244 offsetof(CPUX86State,fpregs[reg].mmx));
3246 break;
3247 case 0x010: /* movups */
3248 case 0x110: /* movupd */
3249 case 0x028: /* movaps */
3250 case 0x128: /* movapd */
3251 case 0x16f: /* movdqa xmm, ea */
3252 case 0x26f: /* movdqu xmm, ea */
3253 if (mod != 3) {
3254 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3255 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3256 } else {
3257 rm = (modrm & 7) | REX_B(s);
3258 gen_op_movo(offsetof(CPUX86State,xmm_regs[reg]),
3259 offsetof(CPUX86State,xmm_regs[rm]));
3261 break;
3262 case 0x210: /* movss xmm, ea */
3263 if (mod != 3) {
3264 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3265 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
3266 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3267 gen_op_movl_T0_0();
3268 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3269 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3270 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3271 } else {
3272 rm = (modrm & 7) | REX_B(s);
3273 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3274 offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3276 break;
3277 case 0x310: /* movsd xmm, ea */
3278 if (mod != 3) {
3279 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3280 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3281 gen_op_movl_T0_0();
3282 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3283 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3284 } else {
3285 rm = (modrm & 7) | REX_B(s);
3286 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3287 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3289 break;
3290 case 0x012: /* movlps */
3291 case 0x112: /* movlpd */
3292 if (mod != 3) {
3293 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3294 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3295 } else {
3296 /* movhlps */
3297 rm = (modrm & 7) | REX_B(s);
3298 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3299 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3301 break;
3302 case 0x212: /* movsldup */
3303 if (mod != 3) {
3304 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3305 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3306 } else {
3307 rm = (modrm & 7) | REX_B(s);
3308 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3309 offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3310 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3311 offsetof(CPUX86State,xmm_regs[rm].XMM_L(2)));
3313 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3314 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3315 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3316 offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3317 break;
3318 case 0x312: /* movddup */
3319 if (mod != 3) {
3320 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3321 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3322 } else {
3323 rm = (modrm & 7) | REX_B(s);
3324 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3325 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3327 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
3328 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3329 break;
3330 case 0x016: /* movhps */
3331 case 0x116: /* movhpd */
3332 if (mod != 3) {
3333 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3334 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3335 } else {
3336 /* movlhps */
3337 rm = (modrm & 7) | REX_B(s);
3338 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
3339 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3341 break;
3342 case 0x216: /* movshdup */
3343 if (mod != 3) {
3344 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3345 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3346 } else {
3347 rm = (modrm & 7) | REX_B(s);
3348 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3349 offsetof(CPUX86State,xmm_regs[rm].XMM_L(1)));
3350 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3351 offsetof(CPUX86State,xmm_regs[rm].XMM_L(3)));
3353 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3354 offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3355 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3356 offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3357 break;
3358 case 0x178:
3359 case 0x378:
3361 int bit_index, field_length;
3363 if (b1 == 1 && reg != 0)
3364 goto illegal_op;
3365 field_length = ldub_code(s->pc++) & 0x3F;
3366 bit_index = ldub_code(s->pc++) & 0x3F;
3367 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3368 offsetof(CPUX86State,xmm_regs[reg]));
3369 if (b1 == 1)
3370 gen_helper_extrq_i(cpu_ptr0, tcg_const_i32(bit_index),
3371 tcg_const_i32(field_length));
3372 else
3373 gen_helper_insertq_i(cpu_ptr0, tcg_const_i32(bit_index),
3374 tcg_const_i32(field_length));
3376 break;
3377 case 0x7e: /* movd ea, mm */
3378 #ifdef TARGET_X86_64
3379 if (s->dflag == 2) {
3380 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3381 offsetof(CPUX86State,fpregs[reg].mmx));
3382 gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 1);
3383 } else
3384 #endif
3386 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3387 offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0)));
3388 gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 1);
3390 break;
3391 case 0x17e: /* movd ea, xmm */
3392 #ifdef TARGET_X86_64
3393 if (s->dflag == 2) {
3394 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3395 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3396 gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 1);
3397 } else
3398 #endif
3400 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3401 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3402 gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 1);
3404 break;
3405 case 0x27e: /* movq xmm, ea */
3406 if (mod != 3) {
3407 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3408 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3409 } else {
3410 rm = (modrm & 7) | REX_B(s);
3411 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3412 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3414 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3415 break;
3416 case 0x7f: /* movq ea, mm */
3417 if (mod != 3) {
3418 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3419 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
3420 } else {
3421 rm = (modrm & 7);
3422 gen_op_movq(offsetof(CPUX86State,fpregs[rm].mmx),
3423 offsetof(CPUX86State,fpregs[reg].mmx));
3425 break;
3426 case 0x011: /* movups */
3427 case 0x111: /* movupd */
3428 case 0x029: /* movaps */
3429 case 0x129: /* movapd */
3430 case 0x17f: /* movdqa ea, xmm */
3431 case 0x27f: /* movdqu ea, xmm */
3432 if (mod != 3) {
3433 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3434 gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3435 } else {
3436 rm = (modrm & 7) | REX_B(s);
3437 gen_op_movo(offsetof(CPUX86State,xmm_regs[rm]),
3438 offsetof(CPUX86State,xmm_regs[reg]));
3440 break;
3441 case 0x211: /* movss ea, xmm */
3442 if (mod != 3) {
3443 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3444 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3445 gen_op_st_T0_A0(OT_LONG + s->mem_index);
3446 } else {
3447 rm = (modrm & 7) | REX_B(s);
3448 gen_op_movl(offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)),
3449 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3451 break;
3452 case 0x311: /* movsd ea, xmm */
3453 if (mod != 3) {
3454 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3455 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3456 } else {
3457 rm = (modrm & 7) | REX_B(s);
3458 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3459 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3461 break;
3462 case 0x013: /* movlps */
3463 case 0x113: /* movlpd */
3464 if (mod != 3) {
3465 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3466 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3467 } else {
3468 goto illegal_op;
3470 break;
3471 case 0x017: /* movhps */
3472 case 0x117: /* movhpd */
3473 if (mod != 3) {
3474 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3475 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3476 } else {
3477 goto illegal_op;
3479 break;
3480 case 0x71: /* shift mm, im */
3481 case 0x72:
3482 case 0x73:
3483 case 0x171: /* shift xmm, im */
3484 case 0x172:
3485 case 0x173:
3486 if (b1 >= 2) {
3487 goto illegal_op;
3489 val = ldub_code(s->pc++);
3490 if (is_xmm) {
3491 gen_op_movl_T0_im(val);
3492 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
3493 gen_op_movl_T0_0();
3494 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(1)));
3495 op1_offset = offsetof(CPUX86State,xmm_t0);
3496 } else {
3497 gen_op_movl_T0_im(val);
3498 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(0)));
3499 gen_op_movl_T0_0();
3500 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(1)));
3501 op1_offset = offsetof(CPUX86State,mmx_t0);
3503 sse_fn_pp = sse_op_table2[((b - 1) & 3) * 8 + (((modrm >> 3)) & 7)][b1];
3504 if (!sse_fn_pp) {
3505 goto illegal_op;
3507 if (is_xmm) {
3508 rm = (modrm & 7) | REX_B(s);
3509 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3510 } else {
3511 rm = (modrm & 7);
3512 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3514 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3515 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op1_offset);
3516 sse_fn_pp(cpu_ptr0, cpu_ptr1);
3517 break;
3518 case 0x050: /* movmskps */
3519 rm = (modrm & 7) | REX_B(s);
3520 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3521 offsetof(CPUX86State,xmm_regs[rm]));
3522 gen_helper_movmskps(cpu_tmp2_i32, cpu_ptr0);
3523 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3524 gen_op_mov_reg_T0(OT_LONG, reg);
3525 break;
3526 case 0x150: /* movmskpd */
3527 rm = (modrm & 7) | REX_B(s);
3528 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3529 offsetof(CPUX86State,xmm_regs[rm]));
3530 gen_helper_movmskpd(cpu_tmp2_i32, cpu_ptr0);
3531 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3532 gen_op_mov_reg_T0(OT_LONG, reg);
3533 break;
3534 case 0x02a: /* cvtpi2ps */
3535 case 0x12a: /* cvtpi2pd */
3536 gen_helper_enter_mmx();
3537 if (mod != 3) {
3538 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3539 op2_offset = offsetof(CPUX86State,mmx_t0);
3540 gen_ldq_env_A0(s->mem_index, op2_offset);
3541 } else {
3542 rm = (modrm & 7);
3543 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3545 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3546 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3547 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3548 switch(b >> 8) {
3549 case 0x0:
3550 gen_helper_cvtpi2ps(cpu_ptr0, cpu_ptr1);
3551 break;
3552 default:
3553 case 0x1:
3554 gen_helper_cvtpi2pd(cpu_ptr0, cpu_ptr1);
3555 break;
3557 break;
3558 case 0x22a: /* cvtsi2ss */
3559 case 0x32a: /* cvtsi2sd */
3560 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3561 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
3562 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3563 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3564 if (ot == OT_LONG) {
3565 SSEFunc_0_pi sse_fn_pi = sse_op_table3ai[(b >> 8) - 2];
3566 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3567 sse_fn_pi(cpu_ptr0, cpu_tmp2_i32);
3568 } else {
3569 #ifdef TARGET_X86_64
3570 SSEFunc_0_pl sse_fn_pl = sse_op_table3aq[(b >> 8) - 2];
3571 sse_fn_pl(cpu_ptr0, cpu_T[0]);
3572 #else
3573 goto illegal_op;
3574 #endif
3576 break;
3577 case 0x02c: /* cvttps2pi */
3578 case 0x12c: /* cvttpd2pi */
3579 case 0x02d: /* cvtps2pi */
3580 case 0x12d: /* cvtpd2pi */
3581 gen_helper_enter_mmx();
3582 if (mod != 3) {
3583 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3584 op2_offset = offsetof(CPUX86State,xmm_t0);
3585 gen_ldo_env_A0(s->mem_index, op2_offset);
3586 } else {
3587 rm = (modrm & 7) | REX_B(s);
3588 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3590 op1_offset = offsetof(CPUX86State,fpregs[reg & 7].mmx);
3591 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3592 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3593 switch(b) {
3594 case 0x02c:
3595 gen_helper_cvttps2pi(cpu_ptr0, cpu_ptr1);
3596 break;
3597 case 0x12c:
3598 gen_helper_cvttpd2pi(cpu_ptr0, cpu_ptr1);
3599 break;
3600 case 0x02d:
3601 gen_helper_cvtps2pi(cpu_ptr0, cpu_ptr1);
3602 break;
3603 case 0x12d:
3604 gen_helper_cvtpd2pi(cpu_ptr0, cpu_ptr1);
3605 break;
3607 break;
3608 case 0x22c: /* cvttss2si */
3609 case 0x32c: /* cvttsd2si */
3610 case 0x22d: /* cvtss2si */
3611 case 0x32d: /* cvtsd2si */
3612 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3613 if (mod != 3) {
3614 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3615 if ((b >> 8) & 1) {
3616 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_t0.XMM_Q(0)));
3617 } else {
3618 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
3619 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
3621 op2_offset = offsetof(CPUX86State,xmm_t0);
3622 } else {
3623 rm = (modrm & 7) | REX_B(s);
3624 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3626 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3627 if (ot == OT_LONG) {
3628 SSEFunc_i_p sse_fn_i_p =
3629 sse_op_table3bi[(b >> 8) - 2 + (b & 1) * 2];
3630 sse_fn_i_p(cpu_tmp2_i32, cpu_ptr0);
3631 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3632 } else {
3633 #ifdef TARGET_X86_64
3634 SSEFunc_l_p sse_fn_l_p =
3635 sse_op_table3bq[(b >> 8) - 2 + (b & 1) * 2];
3636 sse_fn_l_p(cpu_T[0], cpu_ptr0);
3637 #else
3638 goto illegal_op;
3639 #endif
3641 gen_op_mov_reg_T0(ot, reg);
3642 break;
3643 case 0xc4: /* pinsrw */
3644 case 0x1c4:
3645 s->rip_offset = 1;
3646 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
3647 val = ldub_code(s->pc++);
3648 if (b1) {
3649 val &= 7;
3650 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3651 offsetof(CPUX86State,xmm_regs[reg].XMM_W(val)));
3652 } else {
3653 val &= 3;
3654 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3655 offsetof(CPUX86State,fpregs[reg].mmx.MMX_W(val)));
3657 break;
3658 case 0xc5: /* pextrw */
3659 case 0x1c5:
3660 if (mod != 3)
3661 goto illegal_op;
3662 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3663 val = ldub_code(s->pc++);
3664 if (b1) {
3665 val &= 7;
3666 rm = (modrm & 7) | REX_B(s);
3667 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3668 offsetof(CPUX86State,xmm_regs[rm].XMM_W(val)));
3669 } else {
3670 val &= 3;
3671 rm = (modrm & 7);
3672 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3673 offsetof(CPUX86State,fpregs[rm].mmx.MMX_W(val)));
3675 reg = ((modrm >> 3) & 7) | rex_r;
3676 gen_op_mov_reg_T0(ot, reg);
3677 break;
3678 case 0x1d6: /* movq ea, xmm */
3679 if (mod != 3) {
3680 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3681 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3682 } else {
3683 rm = (modrm & 7) | REX_B(s);
3684 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3685 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3686 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3688 break;
3689 case 0x2d6: /* movq2dq */
3690 gen_helper_enter_mmx();
3691 rm = (modrm & 7);
3692 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3693 offsetof(CPUX86State,fpregs[rm].mmx));
3694 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3695 break;
3696 case 0x3d6: /* movdq2q */
3697 gen_helper_enter_mmx();
3698 rm = (modrm & 7) | REX_B(s);
3699 gen_op_movq(offsetof(CPUX86State,fpregs[reg & 7].mmx),
3700 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3701 break;
3702 case 0xd7: /* pmovmskb */
3703 case 0x1d7:
3704 if (mod != 3)
3705 goto illegal_op;
3706 if (b1) {
3707 rm = (modrm & 7) | REX_B(s);
3708 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[rm]));
3709 gen_helper_pmovmskb_xmm(cpu_tmp2_i32, cpu_ptr0);
3710 } else {
3711 rm = (modrm & 7);
3712 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,fpregs[rm].mmx));
3713 gen_helper_pmovmskb_mmx(cpu_tmp2_i32, cpu_ptr0);
3715 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3716 reg = ((modrm >> 3) & 7) | rex_r;
3717 gen_op_mov_reg_T0(OT_LONG, reg);
3718 break;
3719 case 0x138:
3720 if (s->prefix & PREFIX_REPNZ)
3721 goto crc32;
3722 case 0x038:
3723 b = modrm;
3724 modrm = ldub_code(s->pc++);
3725 rm = modrm & 7;
3726 reg = ((modrm >> 3) & 7) | rex_r;
3727 mod = (modrm >> 6) & 3;
3728 if (b1 >= 2) {
3729 goto illegal_op;
3732 sse_fn_pp = sse_op_table6[b].op[b1];
3733 if (!sse_fn_pp) {
3734 goto illegal_op;
3736 if (!(s->cpuid_ext_features & sse_op_table6[b].ext_mask))
3737 goto illegal_op;
3739 if (b1) {
3740 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3741 if (mod == 3) {
3742 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
3743 } else {
3744 op2_offset = offsetof(CPUX86State,xmm_t0);
3745 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3746 switch (b) {
3747 case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
3748 case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
3749 case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
3750 gen_ldq_env_A0(s->mem_index, op2_offset +
3751 offsetof(XMMReg, XMM_Q(0)));
3752 break;
3753 case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
3754 case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
3755 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0,
3756 (s->mem_index >> 2) - 1);
3757 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);
3758 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, op2_offset +
3759 offsetof(XMMReg, XMM_L(0)));
3760 break;
3761 case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
3762 tcg_gen_qemu_ld16u(cpu_tmp0, cpu_A0,
3763 (s->mem_index >> 2) - 1);
3764 tcg_gen_st16_tl(cpu_tmp0, cpu_env, op2_offset +
3765 offsetof(XMMReg, XMM_W(0)));
3766 break;
3767 case 0x2a: /* movntqda */
3768 gen_ldo_env_A0(s->mem_index, op1_offset);
3769 return;
3770 default:
3771 gen_ldo_env_A0(s->mem_index, op2_offset);
3774 } else {
3775 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
3776 if (mod == 3) {
3777 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3778 } else {
3779 op2_offset = offsetof(CPUX86State,mmx_t0);
3780 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3781 gen_ldq_env_A0(s->mem_index, op2_offset);
3784 if (sse_fn_pp == SSE_SPECIAL) {
3785 goto illegal_op;
3788 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3789 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3790 sse_fn_pp(cpu_ptr0, cpu_ptr1);
3792 if (b == 0x17)
3793 s->cc_op = CC_OP_EFLAGS;
3794 break;
3795 case 0x338: /* crc32 */
3796 crc32:
3797 b = modrm;
3798 modrm = ldub_code(s->pc++);
3799 reg = ((modrm >> 3) & 7) | rex_r;
3801 if (b != 0xf0 && b != 0xf1)
3802 goto illegal_op;
3803 if (!(s->cpuid_ext_features & CPUID_EXT_SSE42))
3804 goto illegal_op;
3806 if (b == 0xf0)
3807 ot = OT_BYTE;
3808 else if (b == 0xf1 && s->dflag != 2)
3809 if (s->prefix & PREFIX_DATA)
3810 ot = OT_WORD;
3811 else
3812 ot = OT_LONG;
3813 else
3814 ot = OT_QUAD;
3816 gen_op_mov_TN_reg(OT_LONG, 0, reg);
3817 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3818 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
3819 gen_helper_crc32(cpu_T[0], cpu_tmp2_i32,
3820 cpu_T[0], tcg_const_i32(8 << ot));
3822 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3823 gen_op_mov_reg_T0(ot, reg);
3824 break;
3825 case 0x03a:
3826 case 0x13a:
3827 b = modrm;
3828 modrm = ldub_code(s->pc++);
3829 rm = modrm & 7;
3830 reg = ((modrm >> 3) & 7) | rex_r;
3831 mod = (modrm >> 6) & 3;
3832 if (b1 >= 2) {
3833 goto illegal_op;
3836 sse_fn_ppi = sse_op_table7[b].op[b1];
3837 if (!sse_fn_ppi) {
3838 goto illegal_op;
3840 if (!(s->cpuid_ext_features & sse_op_table7[b].ext_mask))
3841 goto illegal_op;
3843 if (sse_fn_ppi == SSE_SPECIAL) {
3844 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3845 rm = (modrm & 7) | REX_B(s);
3846 if (mod != 3)
3847 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3848 reg = ((modrm >> 3) & 7) | rex_r;
3849 val = ldub_code(s->pc++);
3850 switch (b) {
3851 case 0x14: /* pextrb */
3852 tcg_gen_ld8u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3853 xmm_regs[reg].XMM_B(val & 15)));
3854 if (mod == 3)
3855 gen_op_mov_reg_T0(ot, rm);
3856 else
3857 tcg_gen_qemu_st8(cpu_T[0], cpu_A0,
3858 (s->mem_index >> 2) - 1);
3859 break;
3860 case 0x15: /* pextrw */
3861 tcg_gen_ld16u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3862 xmm_regs[reg].XMM_W(val & 7)));
3863 if (mod == 3)
3864 gen_op_mov_reg_T0(ot, rm);
3865 else
3866 tcg_gen_qemu_st16(cpu_T[0], cpu_A0,
3867 (s->mem_index >> 2) - 1);
3868 break;
3869 case 0x16:
3870 if (ot == OT_LONG) { /* pextrd */
3871 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
3872 offsetof(CPUX86State,
3873 xmm_regs[reg].XMM_L(val & 3)));
3874 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3875 if (mod == 3)
3876 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
3877 else
3878 tcg_gen_qemu_st32(cpu_T[0], cpu_A0,
3879 (s->mem_index >> 2) - 1);
3880 } else { /* pextrq */
3881 #ifdef TARGET_X86_64
3882 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
3883 offsetof(CPUX86State,
3884 xmm_regs[reg].XMM_Q(val & 1)));
3885 if (mod == 3)
3886 gen_op_mov_reg_v(ot, rm, cpu_tmp1_i64);
3887 else
3888 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
3889 (s->mem_index >> 2) - 1);
3890 #else
3891 goto illegal_op;
3892 #endif
3894 break;
3895 case 0x17: /* extractps */
3896 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3897 xmm_regs[reg].XMM_L(val & 3)));
3898 if (mod == 3)
3899 gen_op_mov_reg_T0(ot, rm);
3900 else
3901 tcg_gen_qemu_st32(cpu_T[0], cpu_A0,
3902 (s->mem_index >> 2) - 1);
3903 break;
3904 case 0x20: /* pinsrb */
3905 if (mod == 3)
3906 gen_op_mov_TN_reg(OT_LONG, 0, rm);
3907 else
3908 tcg_gen_qemu_ld8u(cpu_tmp0, cpu_A0,
3909 (s->mem_index >> 2) - 1);
3910 tcg_gen_st8_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State,
3911 xmm_regs[reg].XMM_B(val & 15)));
3912 break;
3913 case 0x21: /* insertps */
3914 if (mod == 3) {
3915 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
3916 offsetof(CPUX86State,xmm_regs[rm]
3917 .XMM_L((val >> 6) & 3)));
3918 } else {
3919 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0,
3920 (s->mem_index >> 2) - 1);
3921 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);
3923 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
3924 offsetof(CPUX86State,xmm_regs[reg]
3925 .XMM_L((val >> 4) & 3)));
3926 if ((val >> 0) & 1)
3927 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3928 cpu_env, offsetof(CPUX86State,
3929 xmm_regs[reg].XMM_L(0)));
3930 if ((val >> 1) & 1)
3931 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3932 cpu_env, offsetof(CPUX86State,
3933 xmm_regs[reg].XMM_L(1)));
3934 if ((val >> 2) & 1)
3935 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3936 cpu_env, offsetof(CPUX86State,
3937 xmm_regs[reg].XMM_L(2)));
3938 if ((val >> 3) & 1)
3939 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3940 cpu_env, offsetof(CPUX86State,
3941 xmm_regs[reg].XMM_L(3)));
3942 break;
3943 case 0x22:
3944 if (ot == OT_LONG) { /* pinsrd */
3945 if (mod == 3)
3946 gen_op_mov_v_reg(ot, cpu_tmp0, rm);
3947 else
3948 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0,
3949 (s->mem_index >> 2) - 1);
3950 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);
3951 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
3952 offsetof(CPUX86State,
3953 xmm_regs[reg].XMM_L(val & 3)));
3954 } else { /* pinsrq */
3955 #ifdef TARGET_X86_64
3956 if (mod == 3)
3957 gen_op_mov_v_reg(ot, cpu_tmp1_i64, rm);
3958 else
3959 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
3960 (s->mem_index >> 2) - 1);
3961 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
3962 offsetof(CPUX86State,
3963 xmm_regs[reg].XMM_Q(val & 1)));
3964 #else
3965 goto illegal_op;
3966 #endif
3968 break;
3970 return;
3973 if (b1) {
3974 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3975 if (mod == 3) {
3976 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
3977 } else {
3978 op2_offset = offsetof(CPUX86State,xmm_t0);
3979 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3980 gen_ldo_env_A0(s->mem_index, op2_offset);
3982 } else {
3983 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
3984 if (mod == 3) {
3985 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3986 } else {
3987 op2_offset = offsetof(CPUX86State,mmx_t0);
3988 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3989 gen_ldq_env_A0(s->mem_index, op2_offset);
3992 val = ldub_code(s->pc++);
3994 if ((b & 0xfc) == 0x60) { /* pcmpXstrX */
3995 s->cc_op = CC_OP_EFLAGS;
3997 if (s->dflag == 2)
3998 /* The helper must use entire 64-bit gp registers */
3999 val |= 1 << 8;
4002 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4003 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4004 sse_fn_ppi(cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
4005 break;
4006 default:
4007 goto illegal_op;
4009 } else {
4010 /* generic MMX or SSE operation */
4011 switch(b) {
4012 case 0x70: /* pshufx insn */
4013 case 0xc6: /* pshufx insn */
4014 case 0xc2: /* compare insns */
4015 s->rip_offset = 1;
4016 break;
4017 default:
4018 break;
4020 if (is_xmm) {
4021 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
4022 if (mod != 3) {
4023 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4024 op2_offset = offsetof(CPUX86State,xmm_t0);
4025 if (b1 >= 2 && ((b >= 0x50 && b <= 0x5f && b != 0x5b) ||
4026 b == 0xc2)) {
4027 /* specific case for SSE single instructions */
4028 if (b1 == 2) {
4029 /* 32 bit access */
4030 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
4031 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
4032 } else {
4033 /* 64 bit access */
4034 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_t0.XMM_D(0)));
4036 } else {
4037 gen_ldo_env_A0(s->mem_index, op2_offset);
4039 } else {
4040 rm = (modrm & 7) | REX_B(s);
4041 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
4043 } else {
4044 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4045 if (mod != 3) {
4046 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4047 op2_offset = offsetof(CPUX86State,mmx_t0);
4048 gen_ldq_env_A0(s->mem_index, op2_offset);
4049 } else {
4050 rm = (modrm & 7);
4051 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4054 switch(b) {
4055 case 0x0f: /* 3DNow! data insns */
4056 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
4057 goto illegal_op;
4058 val = ldub_code(s->pc++);
4059 sse_fn_pp = sse_op_table5[val];
4060 if (!sse_fn_pp) {
4061 goto illegal_op;
4063 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4064 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4065 sse_fn_pp(cpu_ptr0, cpu_ptr1);
4066 break;
4067 case 0x70: /* pshufx insn */
4068 case 0xc6: /* pshufx insn */
4069 val = ldub_code(s->pc++);
4070 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4071 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4072 /* XXX: introduce a new table? */
4073 sse_fn_ppi = (SSEFunc_0_ppi)sse_fn_pp;
4074 sse_fn_ppi(cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
4075 break;
4076 case 0xc2:
4077 /* compare insns */
4078 val = ldub_code(s->pc++);
4079 if (val >= 8)
4080 goto illegal_op;
4081 sse_fn_pp = sse_op_table4[val][b1];
4083 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4084 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4085 sse_fn_pp(cpu_ptr0, cpu_ptr1);
4086 break;
4087 case 0xf7:
4088 /* maskmov : we must prepare A0 */
4089 if (mod != 3)
4090 goto illegal_op;
4091 #ifdef TARGET_X86_64
4092 if (s->aflag == 2) {
4093 gen_op_movq_A0_reg(R_EDI);
4094 } else
4095 #endif
4097 gen_op_movl_A0_reg(R_EDI);
4098 if (s->aflag == 0)
4099 gen_op_andl_A0_ffff();
4101 gen_add_A0_ds_seg(s);
4103 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4104 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4105 /* XXX: introduce a new table? */
4106 sse_fn_ppt = (SSEFunc_0_ppt)sse_fn_pp;
4107 sse_fn_ppt(cpu_ptr0, cpu_ptr1, cpu_A0);
4108 break;
4109 default:
4110 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4111 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4112 sse_fn_pp(cpu_ptr0, cpu_ptr1);
4113 break;
4115 if (b == 0x2e || b == 0x2f) {
4116 s->cc_op = CC_OP_EFLAGS;
4121 /* convert one instruction. s->is_jmp is set if the translation must
4122 be stopped. Return the next pc value */
4123 static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
4125 int b, prefixes, aflag, dflag;
4126 int shift, ot;
4127 int modrm, reg, rm, mod, reg_addr, op, opreg, offset_addr, val;
4128 target_ulong next_eip, tval;
4129 int rex_w, rex_r;
4131 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)))
4132 tcg_gen_debug_insn_start(pc_start);
4133 s->pc = pc_start;
4134 prefixes = 0;
4135 aflag = s->code32;
4136 dflag = s->code32;
4137 s->override = -1;
4138 rex_w = -1;
4139 rex_r = 0;
4140 #ifdef TARGET_X86_64
4141 s->rex_x = 0;
4142 s->rex_b = 0;
4143 x86_64_hregs = 0;
4144 #endif
4145 s->rip_offset = 0; /* for relative ip address */
4146 next_byte:
4147 b = ldub_code(s->pc);
4148 s->pc++;
4149 /* check prefixes */
4150 #ifdef TARGET_X86_64
4151 if (CODE64(s)) {
4152 switch (b) {
4153 case 0xf3:
4154 prefixes |= PREFIX_REPZ;
4155 goto next_byte;
4156 case 0xf2:
4157 prefixes |= PREFIX_REPNZ;
4158 goto next_byte;
4159 case 0xf0:
4160 prefixes |= PREFIX_LOCK;
4161 goto next_byte;
4162 case 0x2e:
4163 s->override = R_CS;
4164 goto next_byte;
4165 case 0x36:
4166 s->override = R_SS;
4167 goto next_byte;
4168 case 0x3e:
4169 s->override = R_DS;
4170 goto next_byte;
4171 case 0x26:
4172 s->override = R_ES;
4173 goto next_byte;
4174 case 0x64:
4175 s->override = R_FS;
4176 goto next_byte;
4177 case 0x65:
4178 s->override = R_GS;
4179 goto next_byte;
4180 case 0x66:
4181 prefixes |= PREFIX_DATA;
4182 goto next_byte;
4183 case 0x67:
4184 prefixes |= PREFIX_ADR;
4185 goto next_byte;
4186 case 0x40 ... 0x4f:
4187 /* REX prefix */
4188 rex_w = (b >> 3) & 1;
4189 rex_r = (b & 0x4) << 1;
4190 s->rex_x = (b & 0x2) << 2;
4191 REX_B(s) = (b & 0x1) << 3;
4192 x86_64_hregs = 1; /* select uniform byte register addressing */
4193 goto next_byte;
4195 if (rex_w == 1) {
4196 /* 0x66 is ignored if rex.w is set */
4197 dflag = 2;
4198 } else {
4199 if (prefixes & PREFIX_DATA)
4200 dflag ^= 1;
4202 if (!(prefixes & PREFIX_ADR))
4203 aflag = 2;
4204 } else
4205 #endif
4207 switch (b) {
4208 case 0xf3:
4209 prefixes |= PREFIX_REPZ;
4210 goto next_byte;
4211 case 0xf2:
4212 prefixes |= PREFIX_REPNZ;
4213 goto next_byte;
4214 case 0xf0:
4215 prefixes |= PREFIX_LOCK;
4216 goto next_byte;
4217 case 0x2e:
4218 s->override = R_CS;
4219 goto next_byte;
4220 case 0x36:
4221 s->override = R_SS;
4222 goto next_byte;
4223 case 0x3e:
4224 s->override = R_DS;
4225 goto next_byte;
4226 case 0x26:
4227 s->override = R_ES;
4228 goto next_byte;
4229 case 0x64:
4230 s->override = R_FS;
4231 goto next_byte;
4232 case 0x65:
4233 s->override = R_GS;
4234 goto next_byte;
4235 case 0x66:
4236 prefixes |= PREFIX_DATA;
4237 goto next_byte;
4238 case 0x67:
4239 prefixes |= PREFIX_ADR;
4240 goto next_byte;
4242 if (prefixes & PREFIX_DATA)
4243 dflag ^= 1;
4244 if (prefixes & PREFIX_ADR)
4245 aflag ^= 1;
4248 s->prefix = prefixes;
4249 s->aflag = aflag;
4250 s->dflag = dflag;
4252 /* lock generation */
4253 if (prefixes & PREFIX_LOCK)
4254 gen_helper_lock();
4256 /* now check op code */
4257 reswitch:
4258 switch(b) {
4259 case 0x0f:
4260 /**************************/
4261 /* extended op code */
4262 b = ldub_code(s->pc++) | 0x100;
4263 goto reswitch;
4265 /**************************/
4266 /* arith & logic */
4267 case 0x00 ... 0x05:
4268 case 0x08 ... 0x0d:
4269 case 0x10 ... 0x15:
4270 case 0x18 ... 0x1d:
4271 case 0x20 ... 0x25:
4272 case 0x28 ... 0x2d:
4273 case 0x30 ... 0x35:
4274 case 0x38 ... 0x3d:
4276 int op, f, val;
4277 op = (b >> 3) & 7;
4278 f = (b >> 1) & 3;
4280 if ((b & 1) == 0)
4281 ot = OT_BYTE;
4282 else
4283 ot = dflag + OT_WORD;
4285 switch(f) {
4286 case 0: /* OP Ev, Gv */
4287 modrm = ldub_code(s->pc++);
4288 reg = ((modrm >> 3) & 7) | rex_r;
4289 mod = (modrm >> 6) & 3;
4290 rm = (modrm & 7) | REX_B(s);
4291 if (mod != 3) {
4292 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4293 opreg = OR_TMP0;
4294 } else if (op == OP_XORL && rm == reg) {
4295 xor_zero:
4296 /* xor reg, reg optimisation */
4297 gen_op_movl_T0_0();
4298 s->cc_op = CC_OP_LOGICB + ot;
4299 gen_op_mov_reg_T0(ot, reg);
4300 gen_op_update1_cc();
4301 break;
4302 } else {
4303 opreg = rm;
4305 gen_op_mov_TN_reg(ot, 1, reg);
4306 gen_op(s, op, ot, opreg);
4307 break;
4308 case 1: /* OP Gv, Ev */
4309 modrm = ldub_code(s->pc++);
4310 mod = (modrm >> 6) & 3;
4311 reg = ((modrm >> 3) & 7) | rex_r;
4312 rm = (modrm & 7) | REX_B(s);
4313 if (mod != 3) {
4314 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4315 gen_op_ld_T1_A0(ot + s->mem_index);
4316 } else if (op == OP_XORL && rm == reg) {
4317 goto xor_zero;
4318 } else {
4319 gen_op_mov_TN_reg(ot, 1, rm);
4321 gen_op(s, op, ot, reg);
4322 break;
4323 case 2: /* OP A, Iv */
4324 val = insn_get(s, ot);
4325 gen_op_movl_T1_im(val);
4326 gen_op(s, op, ot, OR_EAX);
4327 break;
4330 break;
4332 case 0x82:
4333 if (CODE64(s))
4334 goto illegal_op;
4335 case 0x80: /* GRP1 */
4336 case 0x81:
4337 case 0x83:
4339 int val;
4341 if ((b & 1) == 0)
4342 ot = OT_BYTE;
4343 else
4344 ot = dflag + OT_WORD;
4346 modrm = ldub_code(s->pc++);
4347 mod = (modrm >> 6) & 3;
4348 rm = (modrm & 7) | REX_B(s);
4349 op = (modrm >> 3) & 7;
4351 if (mod != 3) {
4352 if (b == 0x83)
4353 s->rip_offset = 1;
4354 else
4355 s->rip_offset = insn_const_size(ot);
4356 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4357 opreg = OR_TMP0;
4358 } else {
4359 opreg = rm;
4362 switch(b) {
4363 default:
4364 case 0x80:
4365 case 0x81:
4366 case 0x82:
4367 val = insn_get(s, ot);
4368 break;
4369 case 0x83:
4370 val = (int8_t)insn_get(s, OT_BYTE);
4371 break;
4373 gen_op_movl_T1_im(val);
4374 gen_op(s, op, ot, opreg);
4376 break;
4378 /**************************/
4379 /* inc, dec, and other misc arith */
4380 case 0x40 ... 0x47: /* inc Gv */
4381 ot = dflag ? OT_LONG : OT_WORD;
4382 gen_inc(s, ot, OR_EAX + (b & 7), 1);
4383 break;
4384 case 0x48 ... 0x4f: /* dec Gv */
4385 ot = dflag ? OT_LONG : OT_WORD;
4386 gen_inc(s, ot, OR_EAX + (b & 7), -1);
4387 break;
4388 case 0xf6: /* GRP3 */
4389 case 0xf7:
4390 if ((b & 1) == 0)
4391 ot = OT_BYTE;
4392 else
4393 ot = dflag + OT_WORD;
4395 modrm = ldub_code(s->pc++);
4396 mod = (modrm >> 6) & 3;
4397 rm = (modrm & 7) | REX_B(s);
4398 op = (modrm >> 3) & 7;
4399 if (mod != 3) {
4400 if (op == 0)
4401 s->rip_offset = insn_const_size(ot);
4402 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4403 gen_op_ld_T0_A0(ot + s->mem_index);
4404 } else {
4405 gen_op_mov_TN_reg(ot, 0, rm);
4408 switch(op) {
4409 case 0: /* test */
4410 val = insn_get(s, ot);
4411 gen_op_movl_T1_im(val);
4412 gen_op_testl_T0_T1_cc();
4413 s->cc_op = CC_OP_LOGICB + ot;
4414 break;
4415 case 2: /* not */
4416 tcg_gen_not_tl(cpu_T[0], cpu_T[0]);
4417 if (mod != 3) {
4418 gen_op_st_T0_A0(ot + s->mem_index);
4419 } else {
4420 gen_op_mov_reg_T0(ot, rm);
4422 break;
4423 case 3: /* neg */
4424 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
4425 if (mod != 3) {
4426 gen_op_st_T0_A0(ot + s->mem_index);
4427 } else {
4428 gen_op_mov_reg_T0(ot, rm);
4430 gen_op_update_neg_cc();
4431 s->cc_op = CC_OP_SUBB + ot;
4432 break;
4433 case 4: /* mul */
4434 switch(ot) {
4435 case OT_BYTE:
4436 gen_op_mov_TN_reg(OT_BYTE, 1, R_EAX);
4437 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
4438 tcg_gen_ext8u_tl(cpu_T[1], cpu_T[1]);
4439 /* XXX: use 32 bit mul which could be faster */
4440 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4441 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4442 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4443 tcg_gen_andi_tl(cpu_cc_src, cpu_T[0], 0xff00);
4444 s->cc_op = CC_OP_MULB;
4445 break;
4446 case OT_WORD:
4447 gen_op_mov_TN_reg(OT_WORD, 1, R_EAX);
4448 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
4449 tcg_gen_ext16u_tl(cpu_T[1], cpu_T[1]);
4450 /* XXX: use 32 bit mul which could be faster */
4451 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4452 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4453 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4454 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
4455 gen_op_mov_reg_T0(OT_WORD, R_EDX);
4456 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4457 s->cc_op = CC_OP_MULW;
4458 break;
4459 default:
4460 case OT_LONG:
4461 #ifdef TARGET_X86_64
4462 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4463 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
4464 tcg_gen_ext32u_tl(cpu_T[1], cpu_T[1]);
4465 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4466 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4467 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4468 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 32);
4469 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4470 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4471 #else
4473 TCGv_i64 t0, t1;
4474 t0 = tcg_temp_new_i64();
4475 t1 = tcg_temp_new_i64();
4476 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4477 tcg_gen_extu_i32_i64(t0, cpu_T[0]);
4478 tcg_gen_extu_i32_i64(t1, cpu_T[1]);
4479 tcg_gen_mul_i64(t0, t0, t1);
4480 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4481 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4482 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4483 tcg_gen_shri_i64(t0, t0, 32);
4484 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4485 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4486 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4488 #endif
4489 s->cc_op = CC_OP_MULL;
4490 break;
4491 #ifdef TARGET_X86_64
4492 case OT_QUAD:
4493 gen_helper_mulq_EAX_T0(cpu_T[0]);
4494 s->cc_op = CC_OP_MULQ;
4495 break;
4496 #endif
4498 break;
4499 case 5: /* imul */
4500 switch(ot) {
4501 case OT_BYTE:
4502 gen_op_mov_TN_reg(OT_BYTE, 1, R_EAX);
4503 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
4504 tcg_gen_ext8s_tl(cpu_T[1], cpu_T[1]);
4505 /* XXX: use 32 bit mul which could be faster */
4506 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4507 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4508 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4509 tcg_gen_ext8s_tl(cpu_tmp0, cpu_T[0]);
4510 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4511 s->cc_op = CC_OP_MULB;
4512 break;
4513 case OT_WORD:
4514 gen_op_mov_TN_reg(OT_WORD, 1, R_EAX);
4515 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4516 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
4517 /* XXX: use 32 bit mul which could be faster */
4518 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4519 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4520 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4521 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
4522 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4523 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
4524 gen_op_mov_reg_T0(OT_WORD, R_EDX);
4525 s->cc_op = CC_OP_MULW;
4526 break;
4527 default:
4528 case OT_LONG:
4529 #ifdef TARGET_X86_64
4530 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4531 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4532 tcg_gen_ext32s_tl(cpu_T[1], cpu_T[1]);
4533 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4534 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4535 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4536 tcg_gen_ext32s_tl(cpu_tmp0, cpu_T[0]);
4537 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4538 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 32);
4539 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4540 #else
4542 TCGv_i64 t0, t1;
4543 t0 = tcg_temp_new_i64();
4544 t1 = tcg_temp_new_i64();
4545 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4546 tcg_gen_ext_i32_i64(t0, cpu_T[0]);
4547 tcg_gen_ext_i32_i64(t1, cpu_T[1]);
4548 tcg_gen_mul_i64(t0, t0, t1);
4549 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4550 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4551 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4552 tcg_gen_sari_tl(cpu_tmp0, cpu_T[0], 31);
4553 tcg_gen_shri_i64(t0, t0, 32);
4554 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4555 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4556 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4558 #endif
4559 s->cc_op = CC_OP_MULL;
4560 break;
4561 #ifdef TARGET_X86_64
4562 case OT_QUAD:
4563 gen_helper_imulq_EAX_T0(cpu_T[0]);
4564 s->cc_op = CC_OP_MULQ;
4565 break;
4566 #endif
4568 break;
4569 case 6: /* div */
4570 switch(ot) {
4571 case OT_BYTE:
4572 gen_jmp_im(pc_start - s->cs_base);
4573 gen_helper_divb_AL(cpu_T[0]);
4574 break;
4575 case OT_WORD:
4576 gen_jmp_im(pc_start - s->cs_base);
4577 gen_helper_divw_AX(cpu_T[0]);
4578 break;
4579 default:
4580 case OT_LONG:
4581 gen_jmp_im(pc_start - s->cs_base);
4582 gen_helper_divl_EAX(cpu_T[0]);
4583 break;
4584 #ifdef TARGET_X86_64
4585 case OT_QUAD:
4586 gen_jmp_im(pc_start - s->cs_base);
4587 gen_helper_divq_EAX(cpu_T[0]);
4588 break;
4589 #endif
4591 break;
4592 case 7: /* idiv */
4593 switch(ot) {
4594 case OT_BYTE:
4595 gen_jmp_im(pc_start - s->cs_base);
4596 gen_helper_idivb_AL(cpu_T[0]);
4597 break;
4598 case OT_WORD:
4599 gen_jmp_im(pc_start - s->cs_base);
4600 gen_helper_idivw_AX(cpu_T[0]);
4601 break;
4602 default:
4603 case OT_LONG:
4604 gen_jmp_im(pc_start - s->cs_base);
4605 gen_helper_idivl_EAX(cpu_T[0]);
4606 break;
4607 #ifdef TARGET_X86_64
4608 case OT_QUAD:
4609 gen_jmp_im(pc_start - s->cs_base);
4610 gen_helper_idivq_EAX(cpu_T[0]);
4611 break;
4612 #endif
4614 break;
4615 default:
4616 goto illegal_op;
4618 break;
4620 case 0xfe: /* GRP4 */
4621 case 0xff: /* GRP5 */
4622 if ((b & 1) == 0)
4623 ot = OT_BYTE;
4624 else
4625 ot = dflag + OT_WORD;
4627 modrm = ldub_code(s->pc++);
4628 mod = (modrm >> 6) & 3;
4629 rm = (modrm & 7) | REX_B(s);
4630 op = (modrm >> 3) & 7;
4631 if (op >= 2 && b == 0xfe) {
4632 goto illegal_op;
4634 if (CODE64(s)) {
4635 if (op == 2 || op == 4) {
4636 /* operand size for jumps is 64 bit */
4637 ot = OT_QUAD;
4638 } else if (op == 3 || op == 5) {
4639 ot = dflag ? OT_LONG + (rex_w == 1) : OT_WORD;
4640 } else if (op == 6) {
4641 /* default push size is 64 bit */
4642 ot = dflag ? OT_QUAD : OT_WORD;
4645 if (mod != 3) {
4646 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4647 if (op >= 2 && op != 3 && op != 5)
4648 gen_op_ld_T0_A0(ot + s->mem_index);
4649 } else {
4650 gen_op_mov_TN_reg(ot, 0, rm);
4653 switch(op) {
4654 case 0: /* inc Ev */
4655 if (mod != 3)
4656 opreg = OR_TMP0;
4657 else
4658 opreg = rm;
4659 gen_inc(s, ot, opreg, 1);
4660 break;
4661 case 1: /* dec Ev */
4662 if (mod != 3)
4663 opreg = OR_TMP0;
4664 else
4665 opreg = rm;
4666 gen_inc(s, ot, opreg, -1);
4667 break;
4668 case 2: /* call Ev */
4669 /* XXX: optimize if memory (no 'and' is necessary) */
4670 if (s->dflag == 0)
4671 gen_op_andl_T0_ffff();
4672 next_eip = s->pc - s->cs_base;
4673 gen_movtl_T1_im(next_eip);
4674 gen_push_T1(s);
4675 gen_op_jmp_T0();
4676 gen_eob(s);
4677 break;
4678 case 3: /* lcall Ev */
4679 gen_op_ld_T1_A0(ot + s->mem_index);
4680 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
4681 gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
4682 do_lcall:
4683 if (s->pe && !s->vm86) {
4684 if (s->cc_op != CC_OP_DYNAMIC)
4685 gen_op_set_cc_op(s->cc_op);
4686 gen_jmp_im(pc_start - s->cs_base);
4687 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4688 gen_helper_lcall_protected(cpu_tmp2_i32, cpu_T[1],
4689 tcg_const_i32(dflag),
4690 tcg_const_i32(s->pc - pc_start));
4691 } else {
4692 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4693 gen_helper_lcall_real(cpu_tmp2_i32, cpu_T[1],
4694 tcg_const_i32(dflag),
4695 tcg_const_i32(s->pc - s->cs_base));
4697 gen_eob(s);
4698 break;
4699 case 4: /* jmp Ev */
4700 if (s->dflag == 0)
4701 gen_op_andl_T0_ffff();
4702 gen_op_jmp_T0();
4703 gen_eob(s);
4704 break;
4705 case 5: /* ljmp Ev */
4706 gen_op_ld_T1_A0(ot + s->mem_index);
4707 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
4708 gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
4709 do_ljmp:
4710 if (s->pe && !s->vm86) {
4711 if (s->cc_op != CC_OP_DYNAMIC)
4712 gen_op_set_cc_op(s->cc_op);
4713 gen_jmp_im(pc_start - s->cs_base);
4714 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4715 gen_helper_ljmp_protected(cpu_tmp2_i32, cpu_T[1],
4716 tcg_const_i32(s->pc - pc_start));
4717 } else {
4718 gen_op_movl_seg_T0_vm(R_CS);
4719 gen_op_movl_T0_T1();
4720 gen_op_jmp_T0();
4722 gen_eob(s);
4723 break;
4724 case 6: /* push Ev */
4725 gen_push_T0(s);
4726 break;
4727 default:
4728 goto illegal_op;
4730 break;
4732 case 0x84: /* test Ev, Gv */
4733 case 0x85:
4734 if ((b & 1) == 0)
4735 ot = OT_BYTE;
4736 else
4737 ot = dflag + OT_WORD;
4739 modrm = ldub_code(s->pc++);
4740 reg = ((modrm >> 3) & 7) | rex_r;
4742 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
4743 gen_op_mov_TN_reg(ot, 1, reg);
4744 gen_op_testl_T0_T1_cc();
4745 s->cc_op = CC_OP_LOGICB + ot;
4746 break;
4748 case 0xa8: /* test eAX, Iv */
4749 case 0xa9:
4750 if ((b & 1) == 0)
4751 ot = OT_BYTE;
4752 else
4753 ot = dflag + OT_WORD;
4754 val = insn_get(s, ot);
4756 gen_op_mov_TN_reg(ot, 0, OR_EAX);
4757 gen_op_movl_T1_im(val);
4758 gen_op_testl_T0_T1_cc();
4759 s->cc_op = CC_OP_LOGICB + ot;
4760 break;
4762 case 0x98: /* CWDE/CBW */
4763 #ifdef TARGET_X86_64
4764 if (dflag == 2) {
4765 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
4766 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4767 gen_op_mov_reg_T0(OT_QUAD, R_EAX);
4768 } else
4769 #endif
4770 if (dflag == 1) {
4771 gen_op_mov_TN_reg(OT_WORD, 0, R_EAX);
4772 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4773 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4774 } else {
4775 gen_op_mov_TN_reg(OT_BYTE, 0, R_EAX);
4776 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
4777 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4779 break;
4780 case 0x99: /* CDQ/CWD */
4781 #ifdef TARGET_X86_64
4782 if (dflag == 2) {
4783 gen_op_mov_TN_reg(OT_QUAD, 0, R_EAX);
4784 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 63);
4785 gen_op_mov_reg_T0(OT_QUAD, R_EDX);
4786 } else
4787 #endif
4788 if (dflag == 1) {
4789 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
4790 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4791 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 31);
4792 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4793 } else {
4794 gen_op_mov_TN_reg(OT_WORD, 0, R_EAX);
4795 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4796 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 15);
4797 gen_op_mov_reg_T0(OT_WORD, R_EDX);
4799 break;
4800 case 0x1af: /* imul Gv, Ev */
4801 case 0x69: /* imul Gv, Ev, I */
4802 case 0x6b:
4803 ot = dflag + OT_WORD;
4804 modrm = ldub_code(s->pc++);
4805 reg = ((modrm >> 3) & 7) | rex_r;
4806 if (b == 0x69)
4807 s->rip_offset = insn_const_size(ot);
4808 else if (b == 0x6b)
4809 s->rip_offset = 1;
4810 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
4811 if (b == 0x69) {
4812 val = insn_get(s, ot);
4813 gen_op_movl_T1_im(val);
4814 } else if (b == 0x6b) {
4815 val = (int8_t)insn_get(s, OT_BYTE);
4816 gen_op_movl_T1_im(val);
4817 } else {
4818 gen_op_mov_TN_reg(ot, 1, reg);
4821 #ifdef TARGET_X86_64
4822 if (ot == OT_QUAD) {
4823 gen_helper_imulq_T0_T1(cpu_T[0], cpu_T[0], cpu_T[1]);
4824 } else
4825 #endif
4826 if (ot == OT_LONG) {
4827 #ifdef TARGET_X86_64
4828 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4829 tcg_gen_ext32s_tl(cpu_T[1], cpu_T[1]);
4830 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4831 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4832 tcg_gen_ext32s_tl(cpu_tmp0, cpu_T[0]);
4833 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4834 #else
4836 TCGv_i64 t0, t1;
4837 t0 = tcg_temp_new_i64();
4838 t1 = tcg_temp_new_i64();
4839 tcg_gen_ext_i32_i64(t0, cpu_T[0]);
4840 tcg_gen_ext_i32_i64(t1, cpu_T[1]);
4841 tcg_gen_mul_i64(t0, t0, t1);
4842 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4843 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4844 tcg_gen_sari_tl(cpu_tmp0, cpu_T[0], 31);
4845 tcg_gen_shri_i64(t0, t0, 32);
4846 tcg_gen_trunc_i64_i32(cpu_T[1], t0);
4847 tcg_gen_sub_tl(cpu_cc_src, cpu_T[1], cpu_tmp0);
4849 #endif
4850 } else {
4851 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4852 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
4853 /* XXX: use 32 bit mul which could be faster */
4854 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4855 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4856 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
4857 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4859 gen_op_mov_reg_T0(ot, reg);
4860 s->cc_op = CC_OP_MULB + ot;
4861 break;
4862 case 0x1c0:
4863 case 0x1c1: /* xadd Ev, Gv */
4864 if ((b & 1) == 0)
4865 ot = OT_BYTE;
4866 else
4867 ot = dflag + OT_WORD;
4868 modrm = ldub_code(s->pc++);
4869 reg = ((modrm >> 3) & 7) | rex_r;
4870 mod = (modrm >> 6) & 3;
4871 if (mod == 3) {
4872 rm = (modrm & 7) | REX_B(s);
4873 gen_op_mov_TN_reg(ot, 0, reg);
4874 gen_op_mov_TN_reg(ot, 1, rm);
4875 gen_op_addl_T0_T1();
4876 gen_op_mov_reg_T1(ot, reg);
4877 gen_op_mov_reg_T0(ot, rm);
4878 } else {
4879 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4880 gen_op_mov_TN_reg(ot, 0, reg);
4881 gen_op_ld_T1_A0(ot + s->mem_index);
4882 gen_op_addl_T0_T1();
4883 gen_op_st_T0_A0(ot + s->mem_index);
4884 gen_op_mov_reg_T1(ot, reg);
4886 gen_op_update2_cc();
4887 s->cc_op = CC_OP_ADDB + ot;
4888 break;
4889 case 0x1b0:
4890 case 0x1b1: /* cmpxchg Ev, Gv */
4892 int label1, label2;
4893 TCGv t0, t1, t2, a0;
4895 if ((b & 1) == 0)
4896 ot = OT_BYTE;
4897 else
4898 ot = dflag + OT_WORD;
4899 modrm = ldub_code(s->pc++);
4900 reg = ((modrm >> 3) & 7) | rex_r;
4901 mod = (modrm >> 6) & 3;
4902 t0 = tcg_temp_local_new();
4903 t1 = tcg_temp_local_new();
4904 t2 = tcg_temp_local_new();
4905 a0 = tcg_temp_local_new();
4906 gen_op_mov_v_reg(ot, t1, reg);
4907 if (mod == 3) {
4908 rm = (modrm & 7) | REX_B(s);
4909 gen_op_mov_v_reg(ot, t0, rm);
4910 } else {
4911 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4912 tcg_gen_mov_tl(a0, cpu_A0);
4913 gen_op_ld_v(ot + s->mem_index, t0, a0);
4914 rm = 0; /* avoid warning */
4916 label1 = gen_new_label();
4917 tcg_gen_sub_tl(t2, cpu_regs[R_EAX], t0);
4918 gen_extu(ot, t2);
4919 tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label1);
4920 label2 = gen_new_label();
4921 if (mod == 3) {
4922 gen_op_mov_reg_v(ot, R_EAX, t0);
4923 tcg_gen_br(label2);
4924 gen_set_label(label1);
4925 gen_op_mov_reg_v(ot, rm, t1);
4926 } else {
4927 /* perform no-op store cycle like physical cpu; must be
4928 before changing accumulator to ensure idempotency if
4929 the store faults and the instruction is restarted */
4930 gen_op_st_v(ot + s->mem_index, t0, a0);
4931 gen_op_mov_reg_v(ot, R_EAX, t0);
4932 tcg_gen_br(label2);
4933 gen_set_label(label1);
4934 gen_op_st_v(ot + s->mem_index, t1, a0);
4936 gen_set_label(label2);
4937 tcg_gen_mov_tl(cpu_cc_src, t0);
4938 tcg_gen_mov_tl(cpu_cc_dst, t2);
4939 s->cc_op = CC_OP_SUBB + ot;
4940 tcg_temp_free(t0);
4941 tcg_temp_free(t1);
4942 tcg_temp_free(t2);
4943 tcg_temp_free(a0);
4945 break;
4946 case 0x1c7: /* cmpxchg8b */
4947 modrm = ldub_code(s->pc++);
4948 mod = (modrm >> 6) & 3;
4949 if ((mod == 3) || ((modrm & 0x38) != 0x8))
4950 goto illegal_op;
4951 #ifdef TARGET_X86_64
4952 if (dflag == 2) {
4953 if (!(s->cpuid_ext_features & CPUID_EXT_CX16))
4954 goto illegal_op;
4955 gen_jmp_im(pc_start - s->cs_base);
4956 if (s->cc_op != CC_OP_DYNAMIC)
4957 gen_op_set_cc_op(s->cc_op);
4958 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4959 gen_helper_cmpxchg16b(cpu_A0);
4960 } else
4961 #endif
4963 if (!(s->cpuid_features & CPUID_CX8))
4964 goto illegal_op;
4965 gen_jmp_im(pc_start - s->cs_base);
4966 if (s->cc_op != CC_OP_DYNAMIC)
4967 gen_op_set_cc_op(s->cc_op);
4968 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4969 gen_helper_cmpxchg8b(cpu_A0);
4971 s->cc_op = CC_OP_EFLAGS;
4972 break;
4974 /**************************/
4975 /* push/pop */
4976 case 0x50 ... 0x57: /* push */
4977 gen_op_mov_TN_reg(OT_LONG, 0, (b & 7) | REX_B(s));
4978 gen_push_T0(s);
4979 break;
4980 case 0x58 ... 0x5f: /* pop */
4981 if (CODE64(s)) {
4982 ot = dflag ? OT_QUAD : OT_WORD;
4983 } else {
4984 ot = dflag + OT_WORD;
4986 gen_pop_T0(s);
4987 /* NOTE: order is important for pop %sp */
4988 gen_pop_update(s);
4989 gen_op_mov_reg_T0(ot, (b & 7) | REX_B(s));
4990 break;
4991 case 0x60: /* pusha */
4992 if (CODE64(s))
4993 goto illegal_op;
4994 gen_pusha(s);
4995 break;
4996 case 0x61: /* popa */
4997 if (CODE64(s))
4998 goto illegal_op;
4999 gen_popa(s);
5000 break;
5001 case 0x68: /* push Iv */
5002 case 0x6a:
5003 if (CODE64(s)) {
5004 ot = dflag ? OT_QUAD : OT_WORD;
5005 } else {
5006 ot = dflag + OT_WORD;
5008 if (b == 0x68)
5009 val = insn_get(s, ot);
5010 else
5011 val = (int8_t)insn_get(s, OT_BYTE);
5012 gen_op_movl_T0_im(val);
5013 gen_push_T0(s);
5014 break;
5015 case 0x8f: /* pop Ev */
5016 if (CODE64(s)) {
5017 ot = dflag ? OT_QUAD : OT_WORD;
5018 } else {
5019 ot = dflag + OT_WORD;
5021 modrm = ldub_code(s->pc++);
5022 mod = (modrm >> 6) & 3;
5023 gen_pop_T0(s);
5024 if (mod == 3) {
5025 /* NOTE: order is important for pop %sp */
5026 gen_pop_update(s);
5027 rm = (modrm & 7) | REX_B(s);
5028 gen_op_mov_reg_T0(ot, rm);
5029 } else {
5030 /* NOTE: order is important too for MMU exceptions */
5031 s->popl_esp_hack = 1 << ot;
5032 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1);
5033 s->popl_esp_hack = 0;
5034 gen_pop_update(s);
5036 break;
5037 case 0xc8: /* enter */
5039 int level;
5040 val = lduw_code(s->pc);
5041 s->pc += 2;
5042 level = ldub_code(s->pc++);
5043 gen_enter(s, val, level);
5045 break;
5046 case 0xc9: /* leave */
5047 /* XXX: exception not precise (ESP is updated before potential exception) */
5048 if (CODE64(s)) {
5049 gen_op_mov_TN_reg(OT_QUAD, 0, R_EBP);
5050 gen_op_mov_reg_T0(OT_QUAD, R_ESP);
5051 } else if (s->ss32) {
5052 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
5053 gen_op_mov_reg_T0(OT_LONG, R_ESP);
5054 } else {
5055 gen_op_mov_TN_reg(OT_WORD, 0, R_EBP);
5056 gen_op_mov_reg_T0(OT_WORD, R_ESP);
5058 gen_pop_T0(s);
5059 if (CODE64(s)) {
5060 ot = dflag ? OT_QUAD : OT_WORD;
5061 } else {
5062 ot = dflag + OT_WORD;
5064 gen_op_mov_reg_T0(ot, R_EBP);
5065 gen_pop_update(s);
5066 break;
5067 case 0x06: /* push es */
5068 case 0x0e: /* push cs */
5069 case 0x16: /* push ss */
5070 case 0x1e: /* push ds */
5071 if (CODE64(s))
5072 goto illegal_op;
5073 gen_op_movl_T0_seg(b >> 3);
5074 gen_push_T0(s);
5075 break;
5076 case 0x1a0: /* push fs */
5077 case 0x1a8: /* push gs */
5078 gen_op_movl_T0_seg((b >> 3) & 7);
5079 gen_push_T0(s);
5080 break;
5081 case 0x07: /* pop es */
5082 case 0x17: /* pop ss */
5083 case 0x1f: /* pop ds */
5084 if (CODE64(s))
5085 goto illegal_op;
5086 reg = b >> 3;
5087 gen_pop_T0(s);
5088 gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
5089 gen_pop_update(s);
5090 if (reg == R_SS) {
5091 /* if reg == SS, inhibit interrupts/trace. */
5092 /* If several instructions disable interrupts, only the
5093 _first_ does it */
5094 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
5095 gen_helper_set_inhibit_irq();
5096 s->tf = 0;
5098 if (s->is_jmp) {
5099 gen_jmp_im(s->pc - s->cs_base);
5100 gen_eob(s);
5102 break;
5103 case 0x1a1: /* pop fs */
5104 case 0x1a9: /* pop gs */
5105 gen_pop_T0(s);
5106 gen_movl_seg_T0(s, (b >> 3) & 7, pc_start - s->cs_base);
5107 gen_pop_update(s);
5108 if (s->is_jmp) {
5109 gen_jmp_im(s->pc - s->cs_base);
5110 gen_eob(s);
5112 break;
5114 /**************************/
5115 /* mov */
5116 case 0x88:
5117 case 0x89: /* mov Gv, Ev */
5118 if ((b & 1) == 0)
5119 ot = OT_BYTE;
5120 else
5121 ot = dflag + OT_WORD;
5122 modrm = ldub_code(s->pc++);
5123 reg = ((modrm >> 3) & 7) | rex_r;
5125 /* generate a generic store */
5126 gen_ldst_modrm(s, modrm, ot, reg, 1);
5127 break;
5128 case 0xc6:
5129 case 0xc7: /* mov Ev, Iv */
5130 if ((b & 1) == 0)
5131 ot = OT_BYTE;
5132 else
5133 ot = dflag + OT_WORD;
5134 modrm = ldub_code(s->pc++);
5135 mod = (modrm >> 6) & 3;
5136 if (mod != 3) {
5137 s->rip_offset = insn_const_size(ot);
5138 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5140 val = insn_get(s, ot);
5141 gen_op_movl_T0_im(val);
5142 if (mod != 3)
5143 gen_op_st_T0_A0(ot + s->mem_index);
5144 else
5145 gen_op_mov_reg_T0(ot, (modrm & 7) | REX_B(s));
5146 break;
5147 case 0x8a:
5148 case 0x8b: /* mov Ev, Gv */
5149 if ((b & 1) == 0)
5150 ot = OT_BYTE;
5151 else
5152 ot = OT_WORD + dflag;
5153 modrm = ldub_code(s->pc++);
5154 reg = ((modrm >> 3) & 7) | rex_r;
5156 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
5157 gen_op_mov_reg_T0(ot, reg);
5158 break;
5159 case 0x8e: /* mov seg, Gv */
5160 modrm = ldub_code(s->pc++);
5161 reg = (modrm >> 3) & 7;
5162 if (reg >= 6 || reg == R_CS)
5163 goto illegal_op;
5164 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
5165 gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
5166 if (reg == R_SS) {
5167 /* if reg == SS, inhibit interrupts/trace */
5168 /* If several instructions disable interrupts, only the
5169 _first_ does it */
5170 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
5171 gen_helper_set_inhibit_irq();
5172 s->tf = 0;
5174 if (s->is_jmp) {
5175 gen_jmp_im(s->pc - s->cs_base);
5176 gen_eob(s);
5178 break;
5179 case 0x8c: /* mov Gv, seg */
5180 modrm = ldub_code(s->pc++);
5181 reg = (modrm >> 3) & 7;
5182 mod = (modrm >> 6) & 3;
5183 if (reg >= 6)
5184 goto illegal_op;
5185 gen_op_movl_T0_seg(reg);
5186 if (mod == 3)
5187 ot = OT_WORD + dflag;
5188 else
5189 ot = OT_WORD;
5190 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1);
5191 break;
5193 case 0x1b6: /* movzbS Gv, Eb */
5194 case 0x1b7: /* movzwS Gv, Eb */
5195 case 0x1be: /* movsbS Gv, Eb */
5196 case 0x1bf: /* movswS Gv, Eb */
5198 int d_ot;
5199 /* d_ot is the size of destination */
5200 d_ot = dflag + OT_WORD;
5201 /* ot is the size of source */
5202 ot = (b & 1) + OT_BYTE;
5203 modrm = ldub_code(s->pc++);
5204 reg = ((modrm >> 3) & 7) | rex_r;
5205 mod = (modrm >> 6) & 3;
5206 rm = (modrm & 7) | REX_B(s);
5208 if (mod == 3) {
5209 gen_op_mov_TN_reg(ot, 0, rm);
5210 switch(ot | (b & 8)) {
5211 case OT_BYTE:
5212 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
5213 break;
5214 case OT_BYTE | 8:
5215 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
5216 break;
5217 case OT_WORD:
5218 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
5219 break;
5220 default:
5221 case OT_WORD | 8:
5222 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5223 break;
5225 gen_op_mov_reg_T0(d_ot, reg);
5226 } else {
5227 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5228 if (b & 8) {
5229 gen_op_lds_T0_A0(ot + s->mem_index);
5230 } else {
5231 gen_op_ldu_T0_A0(ot + s->mem_index);
5233 gen_op_mov_reg_T0(d_ot, reg);
5236 break;
5238 case 0x8d: /* lea */
5239 ot = dflag + OT_WORD;
5240 modrm = ldub_code(s->pc++);
5241 mod = (modrm >> 6) & 3;
5242 if (mod == 3)
5243 goto illegal_op;
5244 reg = ((modrm >> 3) & 7) | rex_r;
5245 /* we must ensure that no segment is added */
5246 s->override = -1;
5247 val = s->addseg;
5248 s->addseg = 0;
5249 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5250 s->addseg = val;
5251 gen_op_mov_reg_A0(ot - OT_WORD, reg);
5252 break;
5254 case 0xa0: /* mov EAX, Ov */
5255 case 0xa1:
5256 case 0xa2: /* mov Ov, EAX */
5257 case 0xa3:
5259 target_ulong offset_addr;
5261 if ((b & 1) == 0)
5262 ot = OT_BYTE;
5263 else
5264 ot = dflag + OT_WORD;
5265 #ifdef TARGET_X86_64
5266 if (s->aflag == 2) {
5267 offset_addr = ldq_code(s->pc);
5268 s->pc += 8;
5269 gen_op_movq_A0_im(offset_addr);
5270 } else
5271 #endif
5273 if (s->aflag) {
5274 offset_addr = insn_get(s, OT_LONG);
5275 } else {
5276 offset_addr = insn_get(s, OT_WORD);
5278 gen_op_movl_A0_im(offset_addr);
5280 gen_add_A0_ds_seg(s);
5281 if ((b & 2) == 0) {
5282 gen_op_ld_T0_A0(ot + s->mem_index);
5283 gen_op_mov_reg_T0(ot, R_EAX);
5284 } else {
5285 gen_op_mov_TN_reg(ot, 0, R_EAX);
5286 gen_op_st_T0_A0(ot + s->mem_index);
5289 break;
5290 case 0xd7: /* xlat */
5291 #ifdef TARGET_X86_64
5292 if (s->aflag == 2) {
5293 gen_op_movq_A0_reg(R_EBX);
5294 gen_op_mov_TN_reg(OT_QUAD, 0, R_EAX);
5295 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xff);
5296 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]);
5297 } else
5298 #endif
5300 gen_op_movl_A0_reg(R_EBX);
5301 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
5302 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xff);
5303 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]);
5304 if (s->aflag == 0)
5305 gen_op_andl_A0_ffff();
5306 else
5307 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
5309 gen_add_A0_ds_seg(s);
5310 gen_op_ldu_T0_A0(OT_BYTE + s->mem_index);
5311 gen_op_mov_reg_T0(OT_BYTE, R_EAX);
5312 break;
5313 case 0xb0 ... 0xb7: /* mov R, Ib */
5314 val = insn_get(s, OT_BYTE);
5315 gen_op_movl_T0_im(val);
5316 gen_op_mov_reg_T0(OT_BYTE, (b & 7) | REX_B(s));
5317 break;
5318 case 0xb8 ... 0xbf: /* mov R, Iv */
5319 #ifdef TARGET_X86_64
5320 if (dflag == 2) {
5321 uint64_t tmp;
5322 /* 64 bit case */
5323 tmp = ldq_code(s->pc);
5324 s->pc += 8;
5325 reg = (b & 7) | REX_B(s);
5326 gen_movtl_T0_im(tmp);
5327 gen_op_mov_reg_T0(OT_QUAD, reg);
5328 } else
5329 #endif
5331 ot = dflag ? OT_LONG : OT_WORD;
5332 val = insn_get(s, ot);
5333 reg = (b & 7) | REX_B(s);
5334 gen_op_movl_T0_im(val);
5335 gen_op_mov_reg_T0(ot, reg);
5337 break;
5339 case 0x91 ... 0x97: /* xchg R, EAX */
5340 do_xchg_reg_eax:
5341 ot = dflag + OT_WORD;
5342 reg = (b & 7) | REX_B(s);
5343 rm = R_EAX;
5344 goto do_xchg_reg;
5345 case 0x86:
5346 case 0x87: /* xchg Ev, Gv */
5347 if ((b & 1) == 0)
5348 ot = OT_BYTE;
5349 else
5350 ot = dflag + OT_WORD;
5351 modrm = ldub_code(s->pc++);
5352 reg = ((modrm >> 3) & 7) | rex_r;
5353 mod = (modrm >> 6) & 3;
5354 if (mod == 3) {
5355 rm = (modrm & 7) | REX_B(s);
5356 do_xchg_reg:
5357 gen_op_mov_TN_reg(ot, 0, reg);
5358 gen_op_mov_TN_reg(ot, 1, rm);
5359 gen_op_mov_reg_T0(ot, rm);
5360 gen_op_mov_reg_T1(ot, reg);
5361 } else {
5362 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5363 gen_op_mov_TN_reg(ot, 0, reg);
5364 /* for xchg, lock is implicit */
5365 if (!(prefixes & PREFIX_LOCK))
5366 gen_helper_lock();
5367 gen_op_ld_T1_A0(ot + s->mem_index);
5368 gen_op_st_T0_A0(ot + s->mem_index);
5369 if (!(prefixes & PREFIX_LOCK))
5370 gen_helper_unlock();
5371 gen_op_mov_reg_T1(ot, reg);
5373 break;
5374 case 0xc4: /* les Gv */
5375 if (CODE64(s))
5376 goto illegal_op;
5377 op = R_ES;
5378 goto do_lxx;
5379 case 0xc5: /* lds Gv */
5380 if (CODE64(s))
5381 goto illegal_op;
5382 op = R_DS;
5383 goto do_lxx;
5384 case 0x1b2: /* lss Gv */
5385 op = R_SS;
5386 goto do_lxx;
5387 case 0x1b4: /* lfs Gv */
5388 op = R_FS;
5389 goto do_lxx;
5390 case 0x1b5: /* lgs Gv */
5391 op = R_GS;
5392 do_lxx:
5393 ot = dflag ? OT_LONG : OT_WORD;
5394 modrm = ldub_code(s->pc++);
5395 reg = ((modrm >> 3) & 7) | rex_r;
5396 mod = (modrm >> 6) & 3;
5397 if (mod == 3)
5398 goto illegal_op;
5399 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5400 gen_op_ld_T1_A0(ot + s->mem_index);
5401 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
5402 /* load the segment first to handle exceptions properly */
5403 gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
5404 gen_movl_seg_T0(s, op, pc_start - s->cs_base);
5405 /* then put the data */
5406 gen_op_mov_reg_T1(ot, reg);
5407 if (s->is_jmp) {
5408 gen_jmp_im(s->pc - s->cs_base);
5409 gen_eob(s);
5411 break;
5413 /************************/
5414 /* shifts */
5415 case 0xc0:
5416 case 0xc1:
5417 /* shift Ev,Ib */
5418 shift = 2;
5419 grp2:
5421 if ((b & 1) == 0)
5422 ot = OT_BYTE;
5423 else
5424 ot = dflag + OT_WORD;
5426 modrm = ldub_code(s->pc++);
5427 mod = (modrm >> 6) & 3;
5428 op = (modrm >> 3) & 7;
5430 if (mod != 3) {
5431 if (shift == 2) {
5432 s->rip_offset = 1;
5434 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5435 opreg = OR_TMP0;
5436 } else {
5437 opreg = (modrm & 7) | REX_B(s);
5440 /* simpler op */
5441 if (shift == 0) {
5442 gen_shift(s, op, ot, opreg, OR_ECX);
5443 } else {
5444 if (shift == 2) {
5445 shift = ldub_code(s->pc++);
5447 gen_shifti(s, op, ot, opreg, shift);
5450 break;
5451 case 0xd0:
5452 case 0xd1:
5453 /* shift Ev,1 */
5454 shift = 1;
5455 goto grp2;
5456 case 0xd2:
5457 case 0xd3:
5458 /* shift Ev,cl */
5459 shift = 0;
5460 goto grp2;
5462 case 0x1a4: /* shld imm */
5463 op = 0;
5464 shift = 1;
5465 goto do_shiftd;
5466 case 0x1a5: /* shld cl */
5467 op = 0;
5468 shift = 0;
5469 goto do_shiftd;
5470 case 0x1ac: /* shrd imm */
5471 op = 1;
5472 shift = 1;
5473 goto do_shiftd;
5474 case 0x1ad: /* shrd cl */
5475 op = 1;
5476 shift = 0;
5477 do_shiftd:
5478 ot = dflag + OT_WORD;
5479 modrm = ldub_code(s->pc++);
5480 mod = (modrm >> 6) & 3;
5481 rm = (modrm & 7) | REX_B(s);
5482 reg = ((modrm >> 3) & 7) | rex_r;
5483 if (mod != 3) {
5484 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5485 opreg = OR_TMP0;
5486 } else {
5487 opreg = rm;
5489 gen_op_mov_TN_reg(ot, 1, reg);
5491 if (shift) {
5492 val = ldub_code(s->pc++);
5493 tcg_gen_movi_tl(cpu_T3, val);
5494 } else {
5495 tcg_gen_mov_tl(cpu_T3, cpu_regs[R_ECX]);
5497 gen_shiftd_rm_T1_T3(s, ot, opreg, op);
5498 break;
5500 /************************/
5501 /* floats */
5502 case 0xd8 ... 0xdf:
5503 if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
5504 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
5505 /* XXX: what to do if illegal op ? */
5506 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
5507 break;
5509 modrm = ldub_code(s->pc++);
5510 mod = (modrm >> 6) & 3;
5511 rm = modrm & 7;
5512 op = ((b & 7) << 3) | ((modrm >> 3) & 7);
5513 if (mod != 3) {
5514 /* memory op */
5515 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5516 switch(op) {
5517 case 0x00 ... 0x07: /* fxxxs */
5518 case 0x10 ... 0x17: /* fixxxl */
5519 case 0x20 ... 0x27: /* fxxxl */
5520 case 0x30 ... 0x37: /* fixxx */
5522 int op1;
5523 op1 = op & 7;
5525 switch(op >> 4) {
5526 case 0:
5527 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5528 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5529 gen_helper_flds_FT0(cpu_tmp2_i32);
5530 break;
5531 case 1:
5532 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5533 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5534 gen_helper_fildl_FT0(cpu_tmp2_i32);
5535 break;
5536 case 2:
5537 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
5538 (s->mem_index >> 2) - 1);
5539 gen_helper_fldl_FT0(cpu_tmp1_i64);
5540 break;
5541 case 3:
5542 default:
5543 gen_op_lds_T0_A0(OT_WORD + s->mem_index);
5544 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5545 gen_helper_fildl_FT0(cpu_tmp2_i32);
5546 break;
5549 gen_helper_fp_arith_ST0_FT0(op1);
5550 if (op1 == 3) {
5551 /* fcomp needs pop */
5552 gen_helper_fpop();
5555 break;
5556 case 0x08: /* flds */
5557 case 0x0a: /* fsts */
5558 case 0x0b: /* fstps */
5559 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
5560 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
5561 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
5562 switch(op & 7) {
5563 case 0:
5564 switch(op >> 4) {
5565 case 0:
5566 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5567 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5568 gen_helper_flds_ST0(cpu_tmp2_i32);
5569 break;
5570 case 1:
5571 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5572 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5573 gen_helper_fildl_ST0(cpu_tmp2_i32);
5574 break;
5575 case 2:
5576 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
5577 (s->mem_index >> 2) - 1);
5578 gen_helper_fldl_ST0(cpu_tmp1_i64);
5579 break;
5580 case 3:
5581 default:
5582 gen_op_lds_T0_A0(OT_WORD + s->mem_index);
5583 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5584 gen_helper_fildl_ST0(cpu_tmp2_i32);
5585 break;
5587 break;
5588 case 1:
5589 /* XXX: the corresponding CPUID bit must be tested ! */
5590 switch(op >> 4) {
5591 case 1:
5592 gen_helper_fisttl_ST0(cpu_tmp2_i32);
5593 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5594 gen_op_st_T0_A0(OT_LONG + s->mem_index);
5595 break;
5596 case 2:
5597 gen_helper_fisttll_ST0(cpu_tmp1_i64);
5598 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
5599 (s->mem_index >> 2) - 1);
5600 break;
5601 case 3:
5602 default:
5603 gen_helper_fistt_ST0(cpu_tmp2_i32);
5604 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5605 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5606 break;
5608 gen_helper_fpop();
5609 break;
5610 default:
5611 switch(op >> 4) {
5612 case 0:
5613 gen_helper_fsts_ST0(cpu_tmp2_i32);
5614 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5615 gen_op_st_T0_A0(OT_LONG + s->mem_index);
5616 break;
5617 case 1:
5618 gen_helper_fistl_ST0(cpu_tmp2_i32);
5619 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5620 gen_op_st_T0_A0(OT_LONG + s->mem_index);
5621 break;
5622 case 2:
5623 gen_helper_fstl_ST0(cpu_tmp1_i64);
5624 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
5625 (s->mem_index >> 2) - 1);
5626 break;
5627 case 3:
5628 default:
5629 gen_helper_fist_ST0(cpu_tmp2_i32);
5630 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5631 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5632 break;
5634 if ((op & 7) == 3)
5635 gen_helper_fpop();
5636 break;
5638 break;
5639 case 0x0c: /* fldenv mem */
5640 if (s->cc_op != CC_OP_DYNAMIC)
5641 gen_op_set_cc_op(s->cc_op);
5642 gen_jmp_im(pc_start - s->cs_base);
5643 gen_helper_fldenv(
5644 cpu_A0, tcg_const_i32(s->dflag));
5645 break;
5646 case 0x0d: /* fldcw mem */
5647 gen_op_ld_T0_A0(OT_WORD + s->mem_index);
5648 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5649 gen_helper_fldcw(cpu_tmp2_i32);
5650 break;
5651 case 0x0e: /* fnstenv mem */
5652 if (s->cc_op != CC_OP_DYNAMIC)
5653 gen_op_set_cc_op(s->cc_op);
5654 gen_jmp_im(pc_start - s->cs_base);
5655 gen_helper_fstenv(cpu_A0, tcg_const_i32(s->dflag));
5656 break;
5657 case 0x0f: /* fnstcw mem */
5658 gen_helper_fnstcw(cpu_tmp2_i32);
5659 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5660 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5661 break;
5662 case 0x1d: /* fldt mem */
5663 if (s->cc_op != CC_OP_DYNAMIC)
5664 gen_op_set_cc_op(s->cc_op);
5665 gen_jmp_im(pc_start - s->cs_base);
5666 gen_helper_fldt_ST0(cpu_A0);
5667 break;
5668 case 0x1f: /* fstpt mem */
5669 if (s->cc_op != CC_OP_DYNAMIC)
5670 gen_op_set_cc_op(s->cc_op);
5671 gen_jmp_im(pc_start - s->cs_base);
5672 gen_helper_fstt_ST0(cpu_A0);
5673 gen_helper_fpop();
5674 break;
5675 case 0x2c: /* frstor mem */
5676 if (s->cc_op != CC_OP_DYNAMIC)
5677 gen_op_set_cc_op(s->cc_op);
5678 gen_jmp_im(pc_start - s->cs_base);
5679 gen_helper_frstor(cpu_A0, tcg_const_i32(s->dflag));
5680 break;
5681 case 0x2e: /* fnsave mem */
5682 if (s->cc_op != CC_OP_DYNAMIC)
5683 gen_op_set_cc_op(s->cc_op);
5684 gen_jmp_im(pc_start - s->cs_base);
5685 gen_helper_fsave(cpu_A0, tcg_const_i32(s->dflag));
5686 break;
5687 case 0x2f: /* fnstsw mem */
5688 gen_helper_fnstsw(cpu_tmp2_i32);
5689 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5690 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5691 break;
5692 case 0x3c: /* fbld */
5693 if (s->cc_op != CC_OP_DYNAMIC)
5694 gen_op_set_cc_op(s->cc_op);
5695 gen_jmp_im(pc_start - s->cs_base);
5696 gen_helper_fbld_ST0(cpu_A0);
5697 break;
5698 case 0x3e: /* fbstp */
5699 if (s->cc_op != CC_OP_DYNAMIC)
5700 gen_op_set_cc_op(s->cc_op);
5701 gen_jmp_im(pc_start - s->cs_base);
5702 gen_helper_fbst_ST0(cpu_A0);
5703 gen_helper_fpop();
5704 break;
5705 case 0x3d: /* fildll */
5706 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
5707 (s->mem_index >> 2) - 1);
5708 gen_helper_fildll_ST0(cpu_tmp1_i64);
5709 break;
5710 case 0x3f: /* fistpll */
5711 gen_helper_fistll_ST0(cpu_tmp1_i64);
5712 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
5713 (s->mem_index >> 2) - 1);
5714 gen_helper_fpop();
5715 break;
5716 default:
5717 goto illegal_op;
5719 } else {
5720 /* register float ops */
5721 opreg = rm;
5723 switch(op) {
5724 case 0x08: /* fld sti */
5725 gen_helper_fpush();
5726 gen_helper_fmov_ST0_STN(tcg_const_i32((opreg + 1) & 7));
5727 break;
5728 case 0x09: /* fxchg sti */
5729 case 0x29: /* fxchg4 sti, undocumented op */
5730 case 0x39: /* fxchg7 sti, undocumented op */
5731 gen_helper_fxchg_ST0_STN(tcg_const_i32(opreg));
5732 break;
5733 case 0x0a: /* grp d9/2 */
5734 switch(rm) {
5735 case 0: /* fnop */
5736 /* check exceptions (FreeBSD FPU probe) */
5737 if (s->cc_op != CC_OP_DYNAMIC)
5738 gen_op_set_cc_op(s->cc_op);
5739 gen_jmp_im(pc_start - s->cs_base);
5740 gen_helper_fwait();
5741 break;
5742 default:
5743 goto illegal_op;
5745 break;
5746 case 0x0c: /* grp d9/4 */
5747 switch(rm) {
5748 case 0: /* fchs */
5749 gen_helper_fchs_ST0();
5750 break;
5751 case 1: /* fabs */
5752 gen_helper_fabs_ST0();
5753 break;
5754 case 4: /* ftst */
5755 gen_helper_fldz_FT0();
5756 gen_helper_fcom_ST0_FT0();
5757 break;
5758 case 5: /* fxam */
5759 gen_helper_fxam_ST0();
5760 break;
5761 default:
5762 goto illegal_op;
5764 break;
5765 case 0x0d: /* grp d9/5 */
5767 switch(rm) {
5768 case 0:
5769 gen_helper_fpush();
5770 gen_helper_fld1_ST0();
5771 break;
5772 case 1:
5773 gen_helper_fpush();
5774 gen_helper_fldl2t_ST0();
5775 break;
5776 case 2:
5777 gen_helper_fpush();
5778 gen_helper_fldl2e_ST0();
5779 break;
5780 case 3:
5781 gen_helper_fpush();
5782 gen_helper_fldpi_ST0();
5783 break;
5784 case 4:
5785 gen_helper_fpush();
5786 gen_helper_fldlg2_ST0();
5787 break;
5788 case 5:
5789 gen_helper_fpush();
5790 gen_helper_fldln2_ST0();
5791 break;
5792 case 6:
5793 gen_helper_fpush();
5794 gen_helper_fldz_ST0();
5795 break;
5796 default:
5797 goto illegal_op;
5800 break;
5801 case 0x0e: /* grp d9/6 */
5802 switch(rm) {
5803 case 0: /* f2xm1 */
5804 gen_helper_f2xm1();
5805 break;
5806 case 1: /* fyl2x */
5807 gen_helper_fyl2x();
5808 break;
5809 case 2: /* fptan */
5810 gen_helper_fptan();
5811 break;
5812 case 3: /* fpatan */
5813 gen_helper_fpatan();
5814 break;
5815 case 4: /* fxtract */
5816 gen_helper_fxtract();
5817 break;
5818 case 5: /* fprem1 */
5819 gen_helper_fprem1();
5820 break;
5821 case 6: /* fdecstp */
5822 gen_helper_fdecstp();
5823 break;
5824 default:
5825 case 7: /* fincstp */
5826 gen_helper_fincstp();
5827 break;
5829 break;
5830 case 0x0f: /* grp d9/7 */
5831 switch(rm) {
5832 case 0: /* fprem */
5833 gen_helper_fprem();
5834 break;
5835 case 1: /* fyl2xp1 */
5836 gen_helper_fyl2xp1();
5837 break;
5838 case 2: /* fsqrt */
5839 gen_helper_fsqrt();
5840 break;
5841 case 3: /* fsincos */
5842 gen_helper_fsincos();
5843 break;
5844 case 5: /* fscale */
5845 gen_helper_fscale();
5846 break;
5847 case 4: /* frndint */
5848 gen_helper_frndint();
5849 break;
5850 case 6: /* fsin */
5851 gen_helper_fsin();
5852 break;
5853 default:
5854 case 7: /* fcos */
5855 gen_helper_fcos();
5856 break;
5858 break;
5859 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
5860 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
5861 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
5863 int op1;
5865 op1 = op & 7;
5866 if (op >= 0x20) {
5867 gen_helper_fp_arith_STN_ST0(op1, opreg);
5868 if (op >= 0x30)
5869 gen_helper_fpop();
5870 } else {
5871 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
5872 gen_helper_fp_arith_ST0_FT0(op1);
5875 break;
5876 case 0x02: /* fcom */
5877 case 0x22: /* fcom2, undocumented op */
5878 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
5879 gen_helper_fcom_ST0_FT0();
5880 break;
5881 case 0x03: /* fcomp */
5882 case 0x23: /* fcomp3, undocumented op */
5883 case 0x32: /* fcomp5, undocumented op */
5884 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
5885 gen_helper_fcom_ST0_FT0();
5886 gen_helper_fpop();
5887 break;
5888 case 0x15: /* da/5 */
5889 switch(rm) {
5890 case 1: /* fucompp */
5891 gen_helper_fmov_FT0_STN(tcg_const_i32(1));
5892 gen_helper_fucom_ST0_FT0();
5893 gen_helper_fpop();
5894 gen_helper_fpop();
5895 break;
5896 default:
5897 goto illegal_op;
5899 break;
5900 case 0x1c:
5901 switch(rm) {
5902 case 0: /* feni (287 only, just do nop here) */
5903 break;
5904 case 1: /* fdisi (287 only, just do nop here) */
5905 break;
5906 case 2: /* fclex */
5907 gen_helper_fclex();
5908 break;
5909 case 3: /* fninit */
5910 gen_helper_fninit();
5911 break;
5912 case 4: /* fsetpm (287 only, just do nop here) */
5913 break;
5914 default:
5915 goto illegal_op;
5917 break;
5918 case 0x1d: /* fucomi */
5919 if (s->cc_op != CC_OP_DYNAMIC)
5920 gen_op_set_cc_op(s->cc_op);
5921 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
5922 gen_helper_fucomi_ST0_FT0();
5923 s->cc_op = CC_OP_EFLAGS;
5924 break;
5925 case 0x1e: /* fcomi */
5926 if (s->cc_op != CC_OP_DYNAMIC)
5927 gen_op_set_cc_op(s->cc_op);
5928 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
5929 gen_helper_fcomi_ST0_FT0();
5930 s->cc_op = CC_OP_EFLAGS;
5931 break;
5932 case 0x28: /* ffree sti */
5933 gen_helper_ffree_STN(tcg_const_i32(opreg));
5934 break;
5935 case 0x2a: /* fst sti */
5936 gen_helper_fmov_STN_ST0(tcg_const_i32(opreg));
5937 break;
5938 case 0x2b: /* fstp sti */
5939 case 0x0b: /* fstp1 sti, undocumented op */
5940 case 0x3a: /* fstp8 sti, undocumented op */
5941 case 0x3b: /* fstp9 sti, undocumented op */
5942 gen_helper_fmov_STN_ST0(tcg_const_i32(opreg));
5943 gen_helper_fpop();
5944 break;
5945 case 0x2c: /* fucom st(i) */
5946 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
5947 gen_helper_fucom_ST0_FT0();
5948 break;
5949 case 0x2d: /* fucomp st(i) */
5950 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
5951 gen_helper_fucom_ST0_FT0();
5952 gen_helper_fpop();
5953 break;
5954 case 0x33: /* de/3 */
5955 switch(rm) {
5956 case 1: /* fcompp */
5957 gen_helper_fmov_FT0_STN(tcg_const_i32(1));
5958 gen_helper_fcom_ST0_FT0();
5959 gen_helper_fpop();
5960 gen_helper_fpop();
5961 break;
5962 default:
5963 goto illegal_op;
5965 break;
5966 case 0x38: /* ffreep sti, undocumented op */
5967 gen_helper_ffree_STN(tcg_const_i32(opreg));
5968 gen_helper_fpop();
5969 break;
5970 case 0x3c: /* df/4 */
5971 switch(rm) {
5972 case 0:
5973 gen_helper_fnstsw(cpu_tmp2_i32);
5974 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5975 gen_op_mov_reg_T0(OT_WORD, R_EAX);
5976 break;
5977 default:
5978 goto illegal_op;
5980 break;
5981 case 0x3d: /* fucomip */
5982 if (s->cc_op != CC_OP_DYNAMIC)
5983 gen_op_set_cc_op(s->cc_op);
5984 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
5985 gen_helper_fucomi_ST0_FT0();
5986 gen_helper_fpop();
5987 s->cc_op = CC_OP_EFLAGS;
5988 break;
5989 case 0x3e: /* fcomip */
5990 if (s->cc_op != CC_OP_DYNAMIC)
5991 gen_op_set_cc_op(s->cc_op);
5992 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
5993 gen_helper_fcomi_ST0_FT0();
5994 gen_helper_fpop();
5995 s->cc_op = CC_OP_EFLAGS;
5996 break;
5997 case 0x10 ... 0x13: /* fcmovxx */
5998 case 0x18 ... 0x1b:
6000 int op1, l1;
6001 static const uint8_t fcmov_cc[8] = {
6002 (JCC_B << 1),
6003 (JCC_Z << 1),
6004 (JCC_BE << 1),
6005 (JCC_P << 1),
6007 op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
6008 l1 = gen_new_label();
6009 gen_jcc1(s, s->cc_op, op1, l1);
6010 gen_helper_fmov_ST0_STN(tcg_const_i32(opreg));
6011 gen_set_label(l1);
6013 break;
6014 default:
6015 goto illegal_op;
6018 break;
6019 /************************/
6020 /* string ops */
6022 case 0xa4: /* movsS */
6023 case 0xa5:
6024 if ((b & 1) == 0)
6025 ot = OT_BYTE;
6026 else
6027 ot = dflag + OT_WORD;
6029 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6030 gen_repz_movs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6031 } else {
6032 gen_movs(s, ot);
6034 break;
6036 case 0xaa: /* stosS */
6037 case 0xab:
6038 if ((b & 1) == 0)
6039 ot = OT_BYTE;
6040 else
6041 ot = dflag + OT_WORD;
6043 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6044 gen_repz_stos(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6045 } else {
6046 gen_stos(s, ot);
6048 break;
6049 case 0xac: /* lodsS */
6050 case 0xad:
6051 if ((b & 1) == 0)
6052 ot = OT_BYTE;
6053 else
6054 ot = dflag + OT_WORD;
6055 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6056 gen_repz_lods(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6057 } else {
6058 gen_lods(s, ot);
6060 break;
6061 case 0xae: /* scasS */
6062 case 0xaf:
6063 if ((b & 1) == 0)
6064 ot = OT_BYTE;
6065 else
6066 ot = dflag + OT_WORD;
6067 if (prefixes & PREFIX_REPNZ) {
6068 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6069 } else if (prefixes & PREFIX_REPZ) {
6070 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6071 } else {
6072 gen_scas(s, ot);
6073 s->cc_op = CC_OP_SUBB + ot;
6075 break;
6077 case 0xa6: /* cmpsS */
6078 case 0xa7:
6079 if ((b & 1) == 0)
6080 ot = OT_BYTE;
6081 else
6082 ot = dflag + OT_WORD;
6083 if (prefixes & PREFIX_REPNZ) {
6084 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6085 } else if (prefixes & PREFIX_REPZ) {
6086 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6087 } else {
6088 gen_cmps(s, ot);
6089 s->cc_op = CC_OP_SUBB + ot;
6091 break;
6092 case 0x6c: /* insS */
6093 case 0x6d:
6094 if ((b & 1) == 0)
6095 ot = OT_BYTE;
6096 else
6097 ot = dflag ? OT_LONG : OT_WORD;
6098 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
6099 gen_op_andl_T0_ffff();
6100 gen_check_io(s, ot, pc_start - s->cs_base,
6101 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4);
6102 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6103 gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6104 } else {
6105 gen_ins(s, ot);
6106 if (use_icount) {
6107 gen_jmp(s, s->pc - s->cs_base);
6110 break;
6111 case 0x6e: /* outsS */
6112 case 0x6f:
6113 if ((b & 1) == 0)
6114 ot = OT_BYTE;
6115 else
6116 ot = dflag ? OT_LONG : OT_WORD;
6117 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
6118 gen_op_andl_T0_ffff();
6119 gen_check_io(s, ot, pc_start - s->cs_base,
6120 svm_is_rep(prefixes) | 4);
6121 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6122 gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6123 } else {
6124 gen_outs(s, ot);
6125 if (use_icount) {
6126 gen_jmp(s, s->pc - s->cs_base);
6129 break;
6131 /************************/
6132 /* port I/O */
6134 case 0xe4:
6135 case 0xe5:
6136 if ((b & 1) == 0)
6137 ot = OT_BYTE;
6138 else
6139 ot = dflag ? OT_LONG : OT_WORD;
6140 val = ldub_code(s->pc++);
6141 gen_op_movl_T0_im(val);
6142 gen_check_io(s, ot, pc_start - s->cs_base,
6143 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
6144 if (use_icount)
6145 gen_io_start();
6146 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6147 gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
6148 gen_op_mov_reg_T1(ot, R_EAX);
6149 if (use_icount) {
6150 gen_io_end();
6151 gen_jmp(s, s->pc - s->cs_base);
6153 break;
6154 case 0xe6:
6155 case 0xe7:
6156 if ((b & 1) == 0)
6157 ot = OT_BYTE;
6158 else
6159 ot = dflag ? OT_LONG : OT_WORD;
6160 val = ldub_code(s->pc++);
6161 gen_op_movl_T0_im(val);
6162 gen_check_io(s, ot, pc_start - s->cs_base,
6163 svm_is_rep(prefixes));
6164 gen_op_mov_TN_reg(ot, 1, R_EAX);
6166 if (use_icount)
6167 gen_io_start();
6168 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6169 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
6170 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
6171 if (use_icount) {
6172 gen_io_end();
6173 gen_jmp(s, s->pc - s->cs_base);
6175 break;
6176 case 0xec:
6177 case 0xed:
6178 if ((b & 1) == 0)
6179 ot = OT_BYTE;
6180 else
6181 ot = dflag ? OT_LONG : OT_WORD;
6182 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
6183 gen_op_andl_T0_ffff();
6184 gen_check_io(s, ot, pc_start - s->cs_base,
6185 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
6186 if (use_icount)
6187 gen_io_start();
6188 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6189 gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
6190 gen_op_mov_reg_T1(ot, R_EAX);
6191 if (use_icount) {
6192 gen_io_end();
6193 gen_jmp(s, s->pc - s->cs_base);
6195 break;
6196 case 0xee:
6197 case 0xef:
6198 if ((b & 1) == 0)
6199 ot = OT_BYTE;
6200 else
6201 ot = dflag ? OT_LONG : OT_WORD;
6202 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
6203 gen_op_andl_T0_ffff();
6204 gen_check_io(s, ot, pc_start - s->cs_base,
6205 svm_is_rep(prefixes));
6206 gen_op_mov_TN_reg(ot, 1, R_EAX);
6208 if (use_icount)
6209 gen_io_start();
6210 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6211 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
6212 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
6213 if (use_icount) {
6214 gen_io_end();
6215 gen_jmp(s, s->pc - s->cs_base);
6217 break;
6219 /************************/
6220 /* control */
6221 case 0xc2: /* ret im */
6222 val = ldsw_code(s->pc);
6223 s->pc += 2;
6224 gen_pop_T0(s);
6225 if (CODE64(s) && s->dflag)
6226 s->dflag = 2;
6227 gen_stack_update(s, val + (2 << s->dflag));
6228 if (s->dflag == 0)
6229 gen_op_andl_T0_ffff();
6230 gen_op_jmp_T0();
6231 gen_eob(s);
6232 break;
6233 case 0xc3: /* ret */
6234 gen_pop_T0(s);
6235 gen_pop_update(s);
6236 if (s->dflag == 0)
6237 gen_op_andl_T0_ffff();
6238 gen_op_jmp_T0();
6239 gen_eob(s);
6240 break;
6241 case 0xca: /* lret im */
6242 val = ldsw_code(s->pc);
6243 s->pc += 2;
6244 do_lret:
6245 if (s->pe && !s->vm86) {
6246 if (s->cc_op != CC_OP_DYNAMIC)
6247 gen_op_set_cc_op(s->cc_op);
6248 gen_jmp_im(pc_start - s->cs_base);
6249 gen_helper_lret_protected(tcg_const_i32(s->dflag),
6250 tcg_const_i32(val));
6251 } else {
6252 gen_stack_A0(s);
6253 /* pop offset */
6254 gen_op_ld_T0_A0(1 + s->dflag + s->mem_index);
6255 if (s->dflag == 0)
6256 gen_op_andl_T0_ffff();
6257 /* NOTE: keeping EIP updated is not a problem in case of
6258 exception */
6259 gen_op_jmp_T0();
6260 /* pop selector */
6261 gen_op_addl_A0_im(2 << s->dflag);
6262 gen_op_ld_T0_A0(1 + s->dflag + s->mem_index);
6263 gen_op_movl_seg_T0_vm(R_CS);
6264 /* add stack offset */
6265 gen_stack_update(s, val + (4 << s->dflag));
6267 gen_eob(s);
6268 break;
6269 case 0xcb: /* lret */
6270 val = 0;
6271 goto do_lret;
6272 case 0xcf: /* iret */
6273 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET);
6274 if (!s->pe) {
6275 /* real mode */
6276 gen_helper_iret_real(tcg_const_i32(s->dflag));
6277 s->cc_op = CC_OP_EFLAGS;
6278 } else if (s->vm86) {
6279 if (s->iopl != 3) {
6280 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6281 } else {
6282 gen_helper_iret_real(tcg_const_i32(s->dflag));
6283 s->cc_op = CC_OP_EFLAGS;
6285 } else {
6286 if (s->cc_op != CC_OP_DYNAMIC)
6287 gen_op_set_cc_op(s->cc_op);
6288 gen_jmp_im(pc_start - s->cs_base);
6289 gen_helper_iret_protected(tcg_const_i32(s->dflag),
6290 tcg_const_i32(s->pc - s->cs_base));
6291 s->cc_op = CC_OP_EFLAGS;
6293 gen_eob(s);
6294 break;
6295 case 0xe8: /* call im */
6297 if (dflag)
6298 tval = (int32_t)insn_get(s, OT_LONG);
6299 else
6300 tval = (int16_t)insn_get(s, OT_WORD);
6301 next_eip = s->pc - s->cs_base;
6302 tval += next_eip;
6303 if (s->dflag == 0)
6304 tval &= 0xffff;
6305 else if(!CODE64(s))
6306 tval &= 0xffffffff;
6307 gen_movtl_T0_im(next_eip);
6308 gen_push_T0(s);
6309 gen_jmp(s, tval);
6311 break;
6312 case 0x9a: /* lcall im */
6314 unsigned int selector, offset;
6316 if (CODE64(s))
6317 goto illegal_op;
6318 ot = dflag ? OT_LONG : OT_WORD;
6319 offset = insn_get(s, ot);
6320 selector = insn_get(s, OT_WORD);
6322 gen_op_movl_T0_im(selector);
6323 gen_op_movl_T1_imu(offset);
6325 goto do_lcall;
6326 case 0xe9: /* jmp im */
6327 if (dflag)
6328 tval = (int32_t)insn_get(s, OT_LONG);
6329 else
6330 tval = (int16_t)insn_get(s, OT_WORD);
6331 tval += s->pc - s->cs_base;
6332 if (s->dflag == 0)
6333 tval &= 0xffff;
6334 else if(!CODE64(s))
6335 tval &= 0xffffffff;
6336 gen_jmp(s, tval);
6337 break;
6338 case 0xea: /* ljmp im */
6340 unsigned int selector, offset;
6342 if (CODE64(s))
6343 goto illegal_op;
6344 ot = dflag ? OT_LONG : OT_WORD;
6345 offset = insn_get(s, ot);
6346 selector = insn_get(s, OT_WORD);
6348 gen_op_movl_T0_im(selector);
6349 gen_op_movl_T1_imu(offset);
6351 goto do_ljmp;
6352 case 0xeb: /* jmp Jb */
6353 tval = (int8_t)insn_get(s, OT_BYTE);
6354 tval += s->pc - s->cs_base;
6355 if (s->dflag == 0)
6356 tval &= 0xffff;
6357 gen_jmp(s, tval);
6358 break;
6359 case 0x70 ... 0x7f: /* jcc Jb */
6360 tval = (int8_t)insn_get(s, OT_BYTE);
6361 goto do_jcc;
6362 case 0x180 ... 0x18f: /* jcc Jv */
6363 if (dflag) {
6364 tval = (int32_t)insn_get(s, OT_LONG);
6365 } else {
6366 tval = (int16_t)insn_get(s, OT_WORD);
6368 do_jcc:
6369 next_eip = s->pc - s->cs_base;
6370 tval += next_eip;
6371 if (s->dflag == 0)
6372 tval &= 0xffff;
6373 gen_jcc(s, b, tval, next_eip);
6374 break;
6376 case 0x190 ... 0x19f: /* setcc Gv */
6377 modrm = ldub_code(s->pc++);
6378 gen_setcc(s, b);
6379 gen_ldst_modrm(s, modrm, OT_BYTE, OR_TMP0, 1);
6380 break;
6381 case 0x140 ... 0x14f: /* cmov Gv, Ev */
6383 int l1;
6384 TCGv t0;
6386 ot = dflag + OT_WORD;
6387 modrm = ldub_code(s->pc++);
6388 reg = ((modrm >> 3) & 7) | rex_r;
6389 mod = (modrm >> 6) & 3;
6390 t0 = tcg_temp_local_new();
6391 if (mod != 3) {
6392 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6393 gen_op_ld_v(ot + s->mem_index, t0, cpu_A0);
6394 } else {
6395 rm = (modrm & 7) | REX_B(s);
6396 gen_op_mov_v_reg(ot, t0, rm);
6398 #ifdef TARGET_X86_64
6399 if (ot == OT_LONG) {
6400 /* XXX: specific Intel behaviour ? */
6401 l1 = gen_new_label();
6402 gen_jcc1(s, s->cc_op, b ^ 1, l1);
6403 tcg_gen_mov_tl(cpu_regs[reg], t0);
6404 gen_set_label(l1);
6405 tcg_gen_ext32u_tl(cpu_regs[reg], cpu_regs[reg]);
6406 } else
6407 #endif
6409 l1 = gen_new_label();
6410 gen_jcc1(s, s->cc_op, b ^ 1, l1);
6411 gen_op_mov_reg_v(ot, reg, t0);
6412 gen_set_label(l1);
6414 tcg_temp_free(t0);
6416 break;
6418 /************************/
6419 /* flags */
6420 case 0x9c: /* pushf */
6421 gen_svm_check_intercept(s, pc_start, SVM_EXIT_PUSHF);
6422 if (s->vm86 && s->iopl != 3) {
6423 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6424 } else {
6425 if (s->cc_op != CC_OP_DYNAMIC)
6426 gen_op_set_cc_op(s->cc_op);
6427 gen_helper_read_eflags(cpu_T[0]);
6428 gen_push_T0(s);
6430 break;
6431 case 0x9d: /* popf */
6432 gen_svm_check_intercept(s, pc_start, SVM_EXIT_POPF);
6433 if (s->vm86 && s->iopl != 3) {
6434 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6435 } else {
6436 gen_pop_T0(s);
6437 if (s->cpl == 0) {
6438 if (s->dflag) {
6439 gen_helper_write_eflags(cpu_T[0],
6440 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK | IOPL_MASK)));
6441 } else {
6442 gen_helper_write_eflags(cpu_T[0],
6443 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK | IOPL_MASK) & 0xffff));
6445 } else {
6446 if (s->cpl <= s->iopl) {
6447 if (s->dflag) {
6448 gen_helper_write_eflags(cpu_T[0],
6449 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK)));
6450 } else {
6451 gen_helper_write_eflags(cpu_T[0],
6452 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK) & 0xffff));
6454 } else {
6455 if (s->dflag) {
6456 gen_helper_write_eflags(cpu_T[0],
6457 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK)));
6458 } else {
6459 gen_helper_write_eflags(cpu_T[0],
6460 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff));
6464 gen_pop_update(s);
6465 s->cc_op = CC_OP_EFLAGS;
6466 /* abort translation because TF flag may change */
6467 gen_jmp_im(s->pc - s->cs_base);
6468 gen_eob(s);
6470 break;
6471 case 0x9e: /* sahf */
6472 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
6473 goto illegal_op;
6474 gen_op_mov_TN_reg(OT_BYTE, 0, R_AH);
6475 if (s->cc_op != CC_OP_DYNAMIC)
6476 gen_op_set_cc_op(s->cc_op);
6477 gen_compute_eflags(cpu_cc_src);
6478 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O);
6479 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], CC_S | CC_Z | CC_A | CC_P | CC_C);
6480 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T[0]);
6481 s->cc_op = CC_OP_EFLAGS;
6482 break;
6483 case 0x9f: /* lahf */
6484 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
6485 goto illegal_op;
6486 if (s->cc_op != CC_OP_DYNAMIC)
6487 gen_op_set_cc_op(s->cc_op);
6488 gen_compute_eflags(cpu_T[0]);
6489 /* Note: gen_compute_eflags() only gives the condition codes */
6490 tcg_gen_ori_tl(cpu_T[0], cpu_T[0], 0x02);
6491 gen_op_mov_reg_T0(OT_BYTE, R_AH);
6492 break;
6493 case 0xf5: /* cmc */
6494 if (s->cc_op != CC_OP_DYNAMIC)
6495 gen_op_set_cc_op(s->cc_op);
6496 gen_compute_eflags(cpu_cc_src);
6497 tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C);
6498 s->cc_op = CC_OP_EFLAGS;
6499 break;
6500 case 0xf8: /* clc */
6501 if (s->cc_op != CC_OP_DYNAMIC)
6502 gen_op_set_cc_op(s->cc_op);
6503 gen_compute_eflags(cpu_cc_src);
6504 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C);
6505 s->cc_op = CC_OP_EFLAGS;
6506 break;
6507 case 0xf9: /* stc */
6508 if (s->cc_op != CC_OP_DYNAMIC)
6509 gen_op_set_cc_op(s->cc_op);
6510 gen_compute_eflags(cpu_cc_src);
6511 tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C);
6512 s->cc_op = CC_OP_EFLAGS;
6513 break;
6514 case 0xfc: /* cld */
6515 tcg_gen_movi_i32(cpu_tmp2_i32, 1);
6516 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
6517 break;
6518 case 0xfd: /* std */
6519 tcg_gen_movi_i32(cpu_tmp2_i32, -1);
6520 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
6521 break;
6523 /************************/
6524 /* bit operations */
6525 case 0x1ba: /* bt/bts/btr/btc Gv, im */
6526 ot = dflag + OT_WORD;
6527 modrm = ldub_code(s->pc++);
6528 op = (modrm >> 3) & 7;
6529 mod = (modrm >> 6) & 3;
6530 rm = (modrm & 7) | REX_B(s);
6531 if (mod != 3) {
6532 s->rip_offset = 1;
6533 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6534 gen_op_ld_T0_A0(ot + s->mem_index);
6535 } else {
6536 gen_op_mov_TN_reg(ot, 0, rm);
6538 /* load shift */
6539 val = ldub_code(s->pc++);
6540 gen_op_movl_T1_im(val);
6541 if (op < 4)
6542 goto illegal_op;
6543 op -= 4;
6544 goto bt_op;
6545 case 0x1a3: /* bt Gv, Ev */
6546 op = 0;
6547 goto do_btx;
6548 case 0x1ab: /* bts */
6549 op = 1;
6550 goto do_btx;
6551 case 0x1b3: /* btr */
6552 op = 2;
6553 goto do_btx;
6554 case 0x1bb: /* btc */
6555 op = 3;
6556 do_btx:
6557 ot = dflag + OT_WORD;
6558 modrm = ldub_code(s->pc++);
6559 reg = ((modrm >> 3) & 7) | rex_r;
6560 mod = (modrm >> 6) & 3;
6561 rm = (modrm & 7) | REX_B(s);
6562 gen_op_mov_TN_reg(OT_LONG, 1, reg);
6563 if (mod != 3) {
6564 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6565 /* specific case: we need to add a displacement */
6566 gen_exts(ot, cpu_T[1]);
6567 tcg_gen_sari_tl(cpu_tmp0, cpu_T[1], 3 + ot);
6568 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, ot);
6569 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
6570 gen_op_ld_T0_A0(ot + s->mem_index);
6571 } else {
6572 gen_op_mov_TN_reg(ot, 0, rm);
6574 bt_op:
6575 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], (1 << (3 + ot)) - 1);
6576 switch(op) {
6577 case 0:
6578 tcg_gen_shr_tl(cpu_cc_src, cpu_T[0], cpu_T[1]);
6579 tcg_gen_movi_tl(cpu_cc_dst, 0);
6580 break;
6581 case 1:
6582 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6583 tcg_gen_movi_tl(cpu_tmp0, 1);
6584 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6585 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6586 break;
6587 case 2:
6588 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6589 tcg_gen_movi_tl(cpu_tmp0, 1);
6590 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6591 tcg_gen_not_tl(cpu_tmp0, cpu_tmp0);
6592 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6593 break;
6594 default:
6595 case 3:
6596 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6597 tcg_gen_movi_tl(cpu_tmp0, 1);
6598 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6599 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6600 break;
6602 s->cc_op = CC_OP_SARB + ot;
6603 if (op != 0) {
6604 if (mod != 3)
6605 gen_op_st_T0_A0(ot + s->mem_index);
6606 else
6607 gen_op_mov_reg_T0(ot, rm);
6608 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
6609 tcg_gen_movi_tl(cpu_cc_dst, 0);
6611 break;
6612 case 0x1bc: /* bsf */
6613 case 0x1bd: /* bsr */
6615 int label1;
6616 TCGv t0;
6618 ot = dflag + OT_WORD;
6619 modrm = ldub_code(s->pc++);
6620 reg = ((modrm >> 3) & 7) | rex_r;
6621 gen_ldst_modrm(s,modrm, ot, OR_TMP0, 0);
6622 gen_extu(ot, cpu_T[0]);
6623 t0 = tcg_temp_local_new();
6624 tcg_gen_mov_tl(t0, cpu_T[0]);
6625 if ((b & 1) && (prefixes & PREFIX_REPZ) &&
6626 (s->cpuid_ext3_features & CPUID_EXT3_ABM)) {
6627 switch(ot) {
6628 case OT_WORD: gen_helper_lzcnt(cpu_T[0], t0,
6629 tcg_const_i32(16)); break;
6630 case OT_LONG: gen_helper_lzcnt(cpu_T[0], t0,
6631 tcg_const_i32(32)); break;
6632 case OT_QUAD: gen_helper_lzcnt(cpu_T[0], t0,
6633 tcg_const_i32(64)); break;
6635 gen_op_mov_reg_T0(ot, reg);
6636 } else {
6637 label1 = gen_new_label();
6638 tcg_gen_movi_tl(cpu_cc_dst, 0);
6639 tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, label1);
6640 if (b & 1) {
6641 gen_helper_bsr(cpu_T[0], t0);
6642 } else {
6643 gen_helper_bsf(cpu_T[0], t0);
6645 gen_op_mov_reg_T0(ot, reg);
6646 tcg_gen_movi_tl(cpu_cc_dst, 1);
6647 gen_set_label(label1);
6648 tcg_gen_discard_tl(cpu_cc_src);
6649 s->cc_op = CC_OP_LOGICB + ot;
6651 tcg_temp_free(t0);
6653 break;
6654 /************************/
6655 /* bcd */
6656 case 0x27: /* daa */
6657 if (CODE64(s))
6658 goto illegal_op;
6659 if (s->cc_op != CC_OP_DYNAMIC)
6660 gen_op_set_cc_op(s->cc_op);
6661 gen_helper_daa();
6662 s->cc_op = CC_OP_EFLAGS;
6663 break;
6664 case 0x2f: /* das */
6665 if (CODE64(s))
6666 goto illegal_op;
6667 if (s->cc_op != CC_OP_DYNAMIC)
6668 gen_op_set_cc_op(s->cc_op);
6669 gen_helper_das();
6670 s->cc_op = CC_OP_EFLAGS;
6671 break;
6672 case 0x37: /* aaa */
6673 if (CODE64(s))
6674 goto illegal_op;
6675 if (s->cc_op != CC_OP_DYNAMIC)
6676 gen_op_set_cc_op(s->cc_op);
6677 gen_helper_aaa();
6678 s->cc_op = CC_OP_EFLAGS;
6679 break;
6680 case 0x3f: /* aas */
6681 if (CODE64(s))
6682 goto illegal_op;
6683 if (s->cc_op != CC_OP_DYNAMIC)
6684 gen_op_set_cc_op(s->cc_op);
6685 gen_helper_aas();
6686 s->cc_op = CC_OP_EFLAGS;
6687 break;
6688 case 0xd4: /* aam */
6689 if (CODE64(s))
6690 goto illegal_op;
6691 val = ldub_code(s->pc++);
6692 if (val == 0) {
6693 gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base);
6694 } else {
6695 gen_helper_aam(tcg_const_i32(val));
6696 s->cc_op = CC_OP_LOGICB;
6698 break;
6699 case 0xd5: /* aad */
6700 if (CODE64(s))
6701 goto illegal_op;
6702 val = ldub_code(s->pc++);
6703 gen_helper_aad(tcg_const_i32(val));
6704 s->cc_op = CC_OP_LOGICB;
6705 break;
6706 /************************/
6707 /* misc */
6708 case 0x90: /* nop */
6709 /* XXX: correct lock test for all insn */
6710 if (prefixes & PREFIX_LOCK) {
6711 goto illegal_op;
6713 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
6714 if (REX_B(s)) {
6715 goto do_xchg_reg_eax;
6717 if (prefixes & PREFIX_REPZ) {
6718 gen_svm_check_intercept(s, pc_start, SVM_EXIT_PAUSE);
6720 break;
6721 case 0x9b: /* fwait */
6722 if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) ==
6723 (HF_MP_MASK | HF_TS_MASK)) {
6724 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
6725 } else {
6726 if (s->cc_op != CC_OP_DYNAMIC)
6727 gen_op_set_cc_op(s->cc_op);
6728 gen_jmp_im(pc_start - s->cs_base);
6729 gen_helper_fwait();
6731 break;
6732 case 0xcc: /* int3 */
6733 gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base);
6734 break;
6735 case 0xcd: /* int N */
6736 val = ldub_code(s->pc++);
6737 if (s->vm86 && s->iopl != 3) {
6738 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6739 } else {
6740 gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base);
6742 break;
6743 case 0xce: /* into */
6744 if (CODE64(s))
6745 goto illegal_op;
6746 if (s->cc_op != CC_OP_DYNAMIC)
6747 gen_op_set_cc_op(s->cc_op);
6748 gen_jmp_im(pc_start - s->cs_base);
6749 gen_helper_into(tcg_const_i32(s->pc - pc_start));
6750 break;
6751 #ifdef WANT_ICEBP
6752 case 0xf1: /* icebp (undocumented, exits to external debugger) */
6753 gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP);
6754 #if 1
6755 gen_debug(s, pc_start - s->cs_base);
6756 #else
6757 /* start debug */
6758 tb_flush(cpu_single_env);
6759 cpu_set_log(CPU_LOG_INT | CPU_LOG_TB_IN_ASM);
6760 #endif
6761 break;
6762 #endif
6763 case 0xfa: /* cli */
6764 if (!s->vm86) {
6765 if (s->cpl <= s->iopl) {
6766 gen_helper_cli();
6767 } else {
6768 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6770 } else {
6771 if (s->iopl == 3) {
6772 gen_helper_cli();
6773 } else {
6774 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6777 break;
6778 case 0xfb: /* sti */
6779 if (!s->vm86) {
6780 if (s->cpl <= s->iopl) {
6781 gen_sti:
6782 gen_helper_sti();
6783 /* interruptions are enabled only the first insn after sti */
6784 /* If several instructions disable interrupts, only the
6785 _first_ does it */
6786 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
6787 gen_helper_set_inhibit_irq();
6788 /* give a chance to handle pending irqs */
6789 gen_jmp_im(s->pc - s->cs_base);
6790 gen_eob(s);
6791 } else {
6792 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6794 } else {
6795 if (s->iopl == 3) {
6796 goto gen_sti;
6797 } else {
6798 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6801 break;
6802 case 0x62: /* bound */
6803 if (CODE64(s))
6804 goto illegal_op;
6805 ot = dflag ? OT_LONG : OT_WORD;
6806 modrm = ldub_code(s->pc++);
6807 reg = (modrm >> 3) & 7;
6808 mod = (modrm >> 6) & 3;
6809 if (mod == 3)
6810 goto illegal_op;
6811 gen_op_mov_TN_reg(ot, 0, reg);
6812 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6813 gen_jmp_im(pc_start - s->cs_base);
6814 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6815 if (ot == OT_WORD)
6816 gen_helper_boundw(cpu_A0, cpu_tmp2_i32);
6817 else
6818 gen_helper_boundl(cpu_A0, cpu_tmp2_i32);
6819 break;
6820 case 0x1c8 ... 0x1cf: /* bswap reg */
6821 reg = (b & 7) | REX_B(s);
6822 #ifdef TARGET_X86_64
6823 if (dflag == 2) {
6824 gen_op_mov_TN_reg(OT_QUAD, 0, reg);
6825 tcg_gen_bswap64_i64(cpu_T[0], cpu_T[0]);
6826 gen_op_mov_reg_T0(OT_QUAD, reg);
6827 } else
6828 #endif
6830 gen_op_mov_TN_reg(OT_LONG, 0, reg);
6831 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
6832 tcg_gen_bswap32_tl(cpu_T[0], cpu_T[0]);
6833 gen_op_mov_reg_T0(OT_LONG, reg);
6835 break;
6836 case 0xd6: /* salc */
6837 if (CODE64(s))
6838 goto illegal_op;
6839 if (s->cc_op != CC_OP_DYNAMIC)
6840 gen_op_set_cc_op(s->cc_op);
6841 gen_compute_eflags_c(cpu_T[0]);
6842 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
6843 gen_op_mov_reg_T0(OT_BYTE, R_EAX);
6844 break;
6845 case 0xe0: /* loopnz */
6846 case 0xe1: /* loopz */
6847 case 0xe2: /* loop */
6848 case 0xe3: /* jecxz */
6850 int l1, l2, l3;
6852 tval = (int8_t)insn_get(s, OT_BYTE);
6853 next_eip = s->pc - s->cs_base;
6854 tval += next_eip;
6855 if (s->dflag == 0)
6856 tval &= 0xffff;
6858 l1 = gen_new_label();
6859 l2 = gen_new_label();
6860 l3 = gen_new_label();
6861 b &= 3;
6862 switch(b) {
6863 case 0: /* loopnz */
6864 case 1: /* loopz */
6865 if (s->cc_op != CC_OP_DYNAMIC)
6866 gen_op_set_cc_op(s->cc_op);
6867 gen_op_add_reg_im(s->aflag, R_ECX, -1);
6868 gen_op_jz_ecx(s->aflag, l3);
6869 gen_compute_eflags(cpu_tmp0);
6870 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, CC_Z);
6871 if (b == 0) {
6872 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, l1);
6873 } else {
6874 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, l1);
6876 break;
6877 case 2: /* loop */
6878 gen_op_add_reg_im(s->aflag, R_ECX, -1);
6879 gen_op_jnz_ecx(s->aflag, l1);
6880 break;
6881 default:
6882 case 3: /* jcxz */
6883 gen_op_jz_ecx(s->aflag, l1);
6884 break;
6887 gen_set_label(l3);
6888 gen_jmp_im(next_eip);
6889 tcg_gen_br(l2);
6891 gen_set_label(l1);
6892 gen_jmp_im(tval);
6893 gen_set_label(l2);
6894 gen_eob(s);
6896 break;
6897 case 0x130: /* wrmsr */
6898 case 0x132: /* rdmsr */
6899 if (s->cpl != 0) {
6900 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6901 } else {
6902 if (s->cc_op != CC_OP_DYNAMIC)
6903 gen_op_set_cc_op(s->cc_op);
6904 gen_jmp_im(pc_start - s->cs_base);
6905 if (b & 2) {
6906 gen_helper_rdmsr();
6907 } else {
6908 gen_helper_wrmsr();
6911 break;
6912 case 0x131: /* rdtsc */
6913 if (s->cc_op != CC_OP_DYNAMIC)
6914 gen_op_set_cc_op(s->cc_op);
6915 gen_jmp_im(pc_start - s->cs_base);
6916 if (use_icount)
6917 gen_io_start();
6918 gen_helper_rdtsc();
6919 if (use_icount) {
6920 gen_io_end();
6921 gen_jmp(s, s->pc - s->cs_base);
6923 break;
6924 case 0x133: /* rdpmc */
6925 if (s->cc_op != CC_OP_DYNAMIC)
6926 gen_op_set_cc_op(s->cc_op);
6927 gen_jmp_im(pc_start - s->cs_base);
6928 gen_helper_rdpmc();
6929 break;
6930 case 0x134: /* sysenter */
6931 /* For Intel SYSENTER is valid on 64-bit */
6932 if (CODE64(s) && cpu_single_env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
6933 goto illegal_op;
6934 if (!s->pe) {
6935 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6936 } else {
6937 gen_update_cc_op(s);
6938 gen_jmp_im(pc_start - s->cs_base);
6939 gen_helper_sysenter();
6940 gen_eob(s);
6942 break;
6943 case 0x135: /* sysexit */
6944 /* For Intel SYSEXIT is valid on 64-bit */
6945 if (CODE64(s) && cpu_single_env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
6946 goto illegal_op;
6947 if (!s->pe) {
6948 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6949 } else {
6950 gen_update_cc_op(s);
6951 gen_jmp_im(pc_start - s->cs_base);
6952 gen_helper_sysexit(tcg_const_i32(dflag));
6953 gen_eob(s);
6955 break;
6956 #ifdef TARGET_X86_64
6957 case 0x105: /* syscall */
6958 /* XXX: is it usable in real mode ? */
6959 gen_update_cc_op(s);
6960 gen_jmp_im(pc_start - s->cs_base);
6961 gen_helper_syscall(tcg_const_i32(s->pc - pc_start));
6962 gen_eob(s);
6963 break;
6964 case 0x107: /* sysret */
6965 if (!s->pe) {
6966 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6967 } else {
6968 gen_update_cc_op(s);
6969 gen_jmp_im(pc_start - s->cs_base);
6970 gen_helper_sysret(tcg_const_i32(s->dflag));
6971 /* condition codes are modified only in long mode */
6972 if (s->lma)
6973 s->cc_op = CC_OP_EFLAGS;
6974 gen_eob(s);
6976 break;
6977 #endif
6978 case 0x1a2: /* cpuid */
6979 if (s->cc_op != CC_OP_DYNAMIC)
6980 gen_op_set_cc_op(s->cc_op);
6981 gen_jmp_im(pc_start - s->cs_base);
6982 gen_helper_cpuid();
6983 break;
6984 case 0xf4: /* hlt */
6985 if (s->cpl != 0) {
6986 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6987 } else {
6988 if (s->cc_op != CC_OP_DYNAMIC)
6989 gen_op_set_cc_op(s->cc_op);
6990 gen_jmp_im(pc_start - s->cs_base);
6991 gen_helper_hlt(tcg_const_i32(s->pc - pc_start));
6992 s->is_jmp = DISAS_TB_JUMP;
6994 break;
6995 case 0x100:
6996 modrm = ldub_code(s->pc++);
6997 mod = (modrm >> 6) & 3;
6998 op = (modrm >> 3) & 7;
6999 switch(op) {
7000 case 0: /* sldt */
7001 if (!s->pe || s->vm86)
7002 goto illegal_op;
7003 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ);
7004 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,ldt.selector));
7005 ot = OT_WORD;
7006 if (mod == 3)
7007 ot += s->dflag;
7008 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1);
7009 break;
7010 case 2: /* lldt */
7011 if (!s->pe || s->vm86)
7012 goto illegal_op;
7013 if (s->cpl != 0) {
7014 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7015 } else {
7016 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE);
7017 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
7018 gen_jmp_im(pc_start - s->cs_base);
7019 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
7020 gen_helper_lldt(cpu_tmp2_i32);
7022 break;
7023 case 1: /* str */
7024 if (!s->pe || s->vm86)
7025 goto illegal_op;
7026 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ);
7027 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,tr.selector));
7028 ot = OT_WORD;
7029 if (mod == 3)
7030 ot += s->dflag;
7031 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1);
7032 break;
7033 case 3: /* ltr */
7034 if (!s->pe || s->vm86)
7035 goto illegal_op;
7036 if (s->cpl != 0) {
7037 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7038 } else {
7039 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE);
7040 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
7041 gen_jmp_im(pc_start - s->cs_base);
7042 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
7043 gen_helper_ltr(cpu_tmp2_i32);
7045 break;
7046 case 4: /* verr */
7047 case 5: /* verw */
7048 if (!s->pe || s->vm86)
7049 goto illegal_op;
7050 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
7051 if (s->cc_op != CC_OP_DYNAMIC)
7052 gen_op_set_cc_op(s->cc_op);
7053 if (op == 4)
7054 gen_helper_verr(cpu_T[0]);
7055 else
7056 gen_helper_verw(cpu_T[0]);
7057 s->cc_op = CC_OP_EFLAGS;
7058 break;
7059 default:
7060 goto illegal_op;
7062 break;
7063 case 0x101:
7064 modrm = ldub_code(s->pc++);
7065 mod = (modrm >> 6) & 3;
7066 op = (modrm >> 3) & 7;
7067 rm = modrm & 7;
7068 switch(op) {
7069 case 0: /* sgdt */
7070 if (mod == 3)
7071 goto illegal_op;
7072 gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ);
7073 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7074 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.limit));
7075 gen_op_st_T0_A0(OT_WORD + s->mem_index);
7076 gen_add_A0_im(s, 2);
7077 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.base));
7078 if (!s->dflag)
7079 gen_op_andl_T0_im(0xffffff);
7080 gen_op_st_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
7081 break;
7082 case 1:
7083 if (mod == 3) {
7084 switch (rm) {
7085 case 0: /* monitor */
7086 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
7087 s->cpl != 0)
7088 goto illegal_op;
7089 if (s->cc_op != CC_OP_DYNAMIC)
7090 gen_op_set_cc_op(s->cc_op);
7091 gen_jmp_im(pc_start - s->cs_base);
7092 #ifdef TARGET_X86_64
7093 if (s->aflag == 2) {
7094 gen_op_movq_A0_reg(R_EAX);
7095 } else
7096 #endif
7098 gen_op_movl_A0_reg(R_EAX);
7099 if (s->aflag == 0)
7100 gen_op_andl_A0_ffff();
7102 gen_add_A0_ds_seg(s);
7103 gen_helper_monitor(cpu_A0);
7104 break;
7105 case 1: /* mwait */
7106 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
7107 s->cpl != 0)
7108 goto illegal_op;
7109 gen_update_cc_op(s);
7110 gen_jmp_im(pc_start - s->cs_base);
7111 gen_helper_mwait(tcg_const_i32(s->pc - pc_start));
7112 gen_eob(s);
7113 break;
7114 default:
7115 goto illegal_op;
7117 } else { /* sidt */
7118 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ);
7119 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7120 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.limit));
7121 gen_op_st_T0_A0(OT_WORD + s->mem_index);
7122 gen_add_A0_im(s, 2);
7123 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.base));
7124 if (!s->dflag)
7125 gen_op_andl_T0_im(0xffffff);
7126 gen_op_st_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
7128 break;
7129 case 2: /* lgdt */
7130 case 3: /* lidt */
7131 if (mod == 3) {
7132 if (s->cc_op != CC_OP_DYNAMIC)
7133 gen_op_set_cc_op(s->cc_op);
7134 gen_jmp_im(pc_start - s->cs_base);
7135 switch(rm) {
7136 case 0: /* VMRUN */
7137 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7138 goto illegal_op;
7139 if (s->cpl != 0) {
7140 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7141 break;
7142 } else {
7143 gen_helper_vmrun(tcg_const_i32(s->aflag),
7144 tcg_const_i32(s->pc - pc_start));
7145 tcg_gen_exit_tb(0);
7146 s->is_jmp = DISAS_TB_JUMP;
7148 break;
7149 case 1: /* VMMCALL */
7150 if (!(s->flags & HF_SVME_MASK))
7151 goto illegal_op;
7152 gen_helper_vmmcall();
7153 break;
7154 case 2: /* VMLOAD */
7155 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7156 goto illegal_op;
7157 if (s->cpl != 0) {
7158 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7159 break;
7160 } else {
7161 gen_helper_vmload(tcg_const_i32(s->aflag));
7163 break;
7164 case 3: /* VMSAVE */
7165 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7166 goto illegal_op;
7167 if (s->cpl != 0) {
7168 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7169 break;
7170 } else {
7171 gen_helper_vmsave(tcg_const_i32(s->aflag));
7173 break;
7174 case 4: /* STGI */
7175 if ((!(s->flags & HF_SVME_MASK) &&
7176 !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
7177 !s->pe)
7178 goto illegal_op;
7179 if (s->cpl != 0) {
7180 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7181 break;
7182 } else {
7183 gen_helper_stgi();
7185 break;
7186 case 5: /* CLGI */
7187 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7188 goto illegal_op;
7189 if (s->cpl != 0) {
7190 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7191 break;
7192 } else {
7193 gen_helper_clgi();
7195 break;
7196 case 6: /* SKINIT */
7197 if ((!(s->flags & HF_SVME_MASK) &&
7198 !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
7199 !s->pe)
7200 goto illegal_op;
7201 gen_helper_skinit();
7202 break;
7203 case 7: /* INVLPGA */
7204 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7205 goto illegal_op;
7206 if (s->cpl != 0) {
7207 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7208 break;
7209 } else {
7210 gen_helper_invlpga(tcg_const_i32(s->aflag));
7212 break;
7213 default:
7214 goto illegal_op;
7216 } else if (s->cpl != 0) {
7217 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7218 } else {
7219 gen_svm_check_intercept(s, pc_start,
7220 op==2 ? SVM_EXIT_GDTR_WRITE : SVM_EXIT_IDTR_WRITE);
7221 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7222 gen_op_ld_T1_A0(OT_WORD + s->mem_index);
7223 gen_add_A0_im(s, 2);
7224 gen_op_ld_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
7225 if (!s->dflag)
7226 gen_op_andl_T0_im(0xffffff);
7227 if (op == 2) {
7228 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,gdt.base));
7229 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,gdt.limit));
7230 } else {
7231 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,idt.base));
7232 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,idt.limit));
7235 break;
7236 case 4: /* smsw */
7237 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0);
7238 #if defined TARGET_X86_64 && defined HOST_WORDS_BIGENDIAN
7239 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]) + 4);
7240 #else
7241 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]));
7242 #endif
7243 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 1);
7244 break;
7245 case 6: /* lmsw */
7246 if (s->cpl != 0) {
7247 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7248 } else {
7249 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
7250 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
7251 gen_helper_lmsw(cpu_T[0]);
7252 gen_jmp_im(s->pc - s->cs_base);
7253 gen_eob(s);
7255 break;
7256 case 7:
7257 if (mod != 3) { /* invlpg */
7258 if (s->cpl != 0) {
7259 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7260 } else {
7261 if (s->cc_op != CC_OP_DYNAMIC)
7262 gen_op_set_cc_op(s->cc_op);
7263 gen_jmp_im(pc_start - s->cs_base);
7264 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7265 gen_helper_invlpg(cpu_A0);
7266 gen_jmp_im(s->pc - s->cs_base);
7267 gen_eob(s);
7269 } else {
7270 switch (rm) {
7271 case 0: /* swapgs */
7272 #ifdef TARGET_X86_64
7273 if (CODE64(s)) {
7274 if (s->cpl != 0) {
7275 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7276 } else {
7277 tcg_gen_ld_tl(cpu_T[0], cpu_env,
7278 offsetof(CPUX86State,segs[R_GS].base));
7279 tcg_gen_ld_tl(cpu_T[1], cpu_env,
7280 offsetof(CPUX86State,kernelgsbase));
7281 tcg_gen_st_tl(cpu_T[1], cpu_env,
7282 offsetof(CPUX86State,segs[R_GS].base));
7283 tcg_gen_st_tl(cpu_T[0], cpu_env,
7284 offsetof(CPUX86State,kernelgsbase));
7286 } else
7287 #endif
7289 goto illegal_op;
7291 break;
7292 case 1: /* rdtscp */
7293 if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP))
7294 goto illegal_op;
7295 if (s->cc_op != CC_OP_DYNAMIC)
7296 gen_op_set_cc_op(s->cc_op);
7297 gen_jmp_im(pc_start - s->cs_base);
7298 if (use_icount)
7299 gen_io_start();
7300 gen_helper_rdtscp();
7301 if (use_icount) {
7302 gen_io_end();
7303 gen_jmp(s, s->pc - s->cs_base);
7305 break;
7306 default:
7307 goto illegal_op;
7310 break;
7311 default:
7312 goto illegal_op;
7314 break;
7315 case 0x108: /* invd */
7316 case 0x109: /* wbinvd */
7317 if (s->cpl != 0) {
7318 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7319 } else {
7320 gen_svm_check_intercept(s, pc_start, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD);
7321 /* nothing to do */
7323 break;
7324 case 0x63: /* arpl or movslS (x86_64) */
7325 #ifdef TARGET_X86_64
7326 if (CODE64(s)) {
7327 int d_ot;
7328 /* d_ot is the size of destination */
7329 d_ot = dflag + OT_WORD;
7331 modrm = ldub_code(s->pc++);
7332 reg = ((modrm >> 3) & 7) | rex_r;
7333 mod = (modrm >> 6) & 3;
7334 rm = (modrm & 7) | REX_B(s);
7336 if (mod == 3) {
7337 gen_op_mov_TN_reg(OT_LONG, 0, rm);
7338 /* sign extend */
7339 if (d_ot == OT_QUAD)
7340 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
7341 gen_op_mov_reg_T0(d_ot, reg);
7342 } else {
7343 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7344 if (d_ot == OT_QUAD) {
7345 gen_op_lds_T0_A0(OT_LONG + s->mem_index);
7346 } else {
7347 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
7349 gen_op_mov_reg_T0(d_ot, reg);
7351 } else
7352 #endif
7354 int label1;
7355 TCGv t0, t1, t2, a0;
7357 if (!s->pe || s->vm86)
7358 goto illegal_op;
7359 t0 = tcg_temp_local_new();
7360 t1 = tcg_temp_local_new();
7361 t2 = tcg_temp_local_new();
7362 ot = OT_WORD;
7363 modrm = ldub_code(s->pc++);
7364 reg = (modrm >> 3) & 7;
7365 mod = (modrm >> 6) & 3;
7366 rm = modrm & 7;
7367 if (mod != 3) {
7368 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7369 gen_op_ld_v(ot + s->mem_index, t0, cpu_A0);
7370 a0 = tcg_temp_local_new();
7371 tcg_gen_mov_tl(a0, cpu_A0);
7372 } else {
7373 gen_op_mov_v_reg(ot, t0, rm);
7374 TCGV_UNUSED(a0);
7376 gen_op_mov_v_reg(ot, t1, reg);
7377 tcg_gen_andi_tl(cpu_tmp0, t0, 3);
7378 tcg_gen_andi_tl(t1, t1, 3);
7379 tcg_gen_movi_tl(t2, 0);
7380 label1 = gen_new_label();
7381 tcg_gen_brcond_tl(TCG_COND_GE, cpu_tmp0, t1, label1);
7382 tcg_gen_andi_tl(t0, t0, ~3);
7383 tcg_gen_or_tl(t0, t0, t1);
7384 tcg_gen_movi_tl(t2, CC_Z);
7385 gen_set_label(label1);
7386 if (mod != 3) {
7387 gen_op_st_v(ot + s->mem_index, t0, a0);
7388 tcg_temp_free(a0);
7389 } else {
7390 gen_op_mov_reg_v(ot, rm, t0);
7392 if (s->cc_op != CC_OP_DYNAMIC)
7393 gen_op_set_cc_op(s->cc_op);
7394 gen_compute_eflags(cpu_cc_src);
7395 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z);
7396 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2);
7397 s->cc_op = CC_OP_EFLAGS;
7398 tcg_temp_free(t0);
7399 tcg_temp_free(t1);
7400 tcg_temp_free(t2);
7402 break;
7403 case 0x102: /* lar */
7404 case 0x103: /* lsl */
7406 int label1;
7407 TCGv t0;
7408 if (!s->pe || s->vm86)
7409 goto illegal_op;
7410 ot = dflag ? OT_LONG : OT_WORD;
7411 modrm = ldub_code(s->pc++);
7412 reg = ((modrm >> 3) & 7) | rex_r;
7413 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
7414 t0 = tcg_temp_local_new();
7415 if (s->cc_op != CC_OP_DYNAMIC)
7416 gen_op_set_cc_op(s->cc_op);
7417 if (b == 0x102)
7418 gen_helper_lar(t0, cpu_T[0]);
7419 else
7420 gen_helper_lsl(t0, cpu_T[0]);
7421 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_src, CC_Z);
7422 label1 = gen_new_label();
7423 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
7424 gen_op_mov_reg_v(ot, reg, t0);
7425 gen_set_label(label1);
7426 s->cc_op = CC_OP_EFLAGS;
7427 tcg_temp_free(t0);
7429 break;
7430 case 0x118:
7431 modrm = ldub_code(s->pc++);
7432 mod = (modrm >> 6) & 3;
7433 op = (modrm >> 3) & 7;
7434 switch(op) {
7435 case 0: /* prefetchnta */
7436 case 1: /* prefetchnt0 */
7437 case 2: /* prefetchnt0 */
7438 case 3: /* prefetchnt0 */
7439 if (mod == 3)
7440 goto illegal_op;
7441 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7442 /* nothing more to do */
7443 break;
7444 default: /* nop (multi byte) */
7445 gen_nop_modrm(s, modrm);
7446 break;
7448 break;
7449 case 0x119 ... 0x11f: /* nop (multi byte) */
7450 modrm = ldub_code(s->pc++);
7451 gen_nop_modrm(s, modrm);
7452 break;
7453 case 0x120: /* mov reg, crN */
7454 case 0x122: /* mov crN, reg */
7455 if (s->cpl != 0) {
7456 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7457 } else {
7458 modrm = ldub_code(s->pc++);
7459 if ((modrm & 0xc0) != 0xc0)
7460 goto illegal_op;
7461 rm = (modrm & 7) | REX_B(s);
7462 reg = ((modrm >> 3) & 7) | rex_r;
7463 if (CODE64(s))
7464 ot = OT_QUAD;
7465 else
7466 ot = OT_LONG;
7467 if ((prefixes & PREFIX_LOCK) && (reg == 0) &&
7468 (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) {
7469 reg = 8;
7471 switch(reg) {
7472 case 0:
7473 case 2:
7474 case 3:
7475 case 4:
7476 case 8:
7477 if (s->cc_op != CC_OP_DYNAMIC)
7478 gen_op_set_cc_op(s->cc_op);
7479 gen_jmp_im(pc_start - s->cs_base);
7480 if (b & 2) {
7481 gen_op_mov_TN_reg(ot, 0, rm);
7482 gen_helper_write_crN(tcg_const_i32(reg), cpu_T[0]);
7483 gen_jmp_im(s->pc - s->cs_base);
7484 gen_eob(s);
7485 } else {
7486 gen_helper_read_crN(cpu_T[0], tcg_const_i32(reg));
7487 gen_op_mov_reg_T0(ot, rm);
7489 break;
7490 default:
7491 goto illegal_op;
7494 break;
7495 case 0x121: /* mov reg, drN */
7496 case 0x123: /* mov drN, reg */
7497 if (s->cpl != 0) {
7498 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7499 } else {
7500 modrm = ldub_code(s->pc++);
7501 if ((modrm & 0xc0) != 0xc0)
7502 goto illegal_op;
7503 rm = (modrm & 7) | REX_B(s);
7504 reg = ((modrm >> 3) & 7) | rex_r;
7505 if (CODE64(s))
7506 ot = OT_QUAD;
7507 else
7508 ot = OT_LONG;
7509 /* XXX: do it dynamically with CR4.DE bit */
7510 if (reg == 4 || reg == 5 || reg >= 8)
7511 goto illegal_op;
7512 if (b & 2) {
7513 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg);
7514 gen_op_mov_TN_reg(ot, 0, rm);
7515 gen_helper_movl_drN_T0(tcg_const_i32(reg), cpu_T[0]);
7516 gen_jmp_im(s->pc - s->cs_base);
7517 gen_eob(s);
7518 } else {
7519 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg);
7520 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,dr[reg]));
7521 gen_op_mov_reg_T0(ot, rm);
7524 break;
7525 case 0x106: /* clts */
7526 if (s->cpl != 0) {
7527 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7528 } else {
7529 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
7530 gen_helper_clts();
7531 /* abort block because static cpu state changed */
7532 gen_jmp_im(s->pc - s->cs_base);
7533 gen_eob(s);
7535 break;
7536 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
7537 case 0x1c3: /* MOVNTI reg, mem */
7538 if (!(s->cpuid_features & CPUID_SSE2))
7539 goto illegal_op;
7540 ot = s->dflag == 2 ? OT_QUAD : OT_LONG;
7541 modrm = ldub_code(s->pc++);
7542 mod = (modrm >> 6) & 3;
7543 if (mod == 3)
7544 goto illegal_op;
7545 reg = ((modrm >> 3) & 7) | rex_r;
7546 /* generate a generic store */
7547 gen_ldst_modrm(s, modrm, ot, reg, 1);
7548 break;
7549 case 0x1ae:
7550 modrm = ldub_code(s->pc++);
7551 mod = (modrm >> 6) & 3;
7552 op = (modrm >> 3) & 7;
7553 switch(op) {
7554 case 0: /* fxsave */
7555 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
7556 (s->prefix & PREFIX_LOCK))
7557 goto illegal_op;
7558 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
7559 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7560 break;
7562 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7563 if (s->cc_op != CC_OP_DYNAMIC)
7564 gen_op_set_cc_op(s->cc_op);
7565 gen_jmp_im(pc_start - s->cs_base);
7566 gen_helper_fxsave(cpu_A0, tcg_const_i32((s->dflag == 2)));
7567 break;
7568 case 1: /* fxrstor */
7569 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
7570 (s->prefix & PREFIX_LOCK))
7571 goto illegal_op;
7572 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
7573 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7574 break;
7576 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7577 if (s->cc_op != CC_OP_DYNAMIC)
7578 gen_op_set_cc_op(s->cc_op);
7579 gen_jmp_im(pc_start - s->cs_base);
7580 gen_helper_fxrstor(cpu_A0, tcg_const_i32((s->dflag == 2)));
7581 break;
7582 case 2: /* ldmxcsr */
7583 case 3: /* stmxcsr */
7584 if (s->flags & HF_TS_MASK) {
7585 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7586 break;
7588 if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK) ||
7589 mod == 3)
7590 goto illegal_op;
7591 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7592 if (op == 2) {
7593 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
7594 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
7595 gen_helper_ldmxcsr(cpu_tmp2_i32);
7596 } else {
7597 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, mxcsr));
7598 gen_op_st_T0_A0(OT_LONG + s->mem_index);
7600 break;
7601 case 5: /* lfence */
7602 case 6: /* mfence */
7603 if ((modrm & 0xc7) != 0xc0 || !(s->cpuid_features & CPUID_SSE2))
7604 goto illegal_op;
7605 break;
7606 case 7: /* sfence / clflush */
7607 if ((modrm & 0xc7) == 0xc0) {
7608 /* sfence */
7609 /* XXX: also check for cpuid_ext2_features & CPUID_EXT2_EMMX */
7610 if (!(s->cpuid_features & CPUID_SSE))
7611 goto illegal_op;
7612 } else {
7613 /* clflush */
7614 if (!(s->cpuid_features & CPUID_CLFLUSH))
7615 goto illegal_op;
7616 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7618 break;
7619 default:
7620 goto illegal_op;
7622 break;
7623 case 0x10d: /* 3DNow! prefetch(w) */
7624 modrm = ldub_code(s->pc++);
7625 mod = (modrm >> 6) & 3;
7626 if (mod == 3)
7627 goto illegal_op;
7628 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7629 /* ignore for now */
7630 break;
7631 case 0x1aa: /* rsm */
7632 gen_svm_check_intercept(s, pc_start, SVM_EXIT_RSM);
7633 if (!(s->flags & HF_SMM_MASK))
7634 goto illegal_op;
7635 gen_update_cc_op(s);
7636 gen_jmp_im(s->pc - s->cs_base);
7637 gen_helper_rsm();
7638 gen_eob(s);
7639 break;
7640 case 0x1b8: /* SSE4.2 popcnt */
7641 if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) !=
7642 PREFIX_REPZ)
7643 goto illegal_op;
7644 if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
7645 goto illegal_op;
7647 modrm = ldub_code(s->pc++);
7648 reg = ((modrm >> 3) & 7);
7650 if (s->prefix & PREFIX_DATA)
7651 ot = OT_WORD;
7652 else if (s->dflag != 2)
7653 ot = OT_LONG;
7654 else
7655 ot = OT_QUAD;
7657 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
7658 gen_helper_popcnt(cpu_T[0], cpu_T[0], tcg_const_i32(ot));
7659 gen_op_mov_reg_T0(ot, reg);
7661 s->cc_op = CC_OP_EFLAGS;
7662 break;
7663 case 0x10e ... 0x10f:
7664 /* 3DNow! instructions, ignore prefixes */
7665 s->prefix &= ~(PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA);
7666 case 0x110 ... 0x117:
7667 case 0x128 ... 0x12f:
7668 case 0x138 ... 0x13a:
7669 case 0x150 ... 0x179:
7670 case 0x17c ... 0x17f:
7671 case 0x1c2:
7672 case 0x1c4 ... 0x1c6:
7673 case 0x1d0 ... 0x1fe:
7674 gen_sse(s, b, pc_start, rex_r);
7675 break;
7676 default:
7677 goto illegal_op;
7679 /* lock generation */
7680 if (s->prefix & PREFIX_LOCK)
7681 gen_helper_unlock();
7682 return s->pc;
7683 illegal_op:
7684 if (s->prefix & PREFIX_LOCK)
7685 gen_helper_unlock();
7686 /* XXX: ensure that no lock was generated */
7687 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
7688 return s->pc;
7691 void optimize_flags_init(void)
7693 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
7694 cpu_cc_op = tcg_global_mem_new_i32(TCG_AREG0,
7695 offsetof(CPUX86State, cc_op), "cc_op");
7696 cpu_cc_src = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_src),
7697 "cc_src");
7698 cpu_cc_dst = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_dst),
7699 "cc_dst");
7700 cpu_cc_tmp = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_tmp),
7701 "cc_tmp");
7703 #ifdef TARGET_X86_64
7704 cpu_regs[R_EAX] = tcg_global_mem_new_i64(TCG_AREG0,
7705 offsetof(CPUX86State, regs[R_EAX]), "rax");
7706 cpu_regs[R_ECX] = tcg_global_mem_new_i64(TCG_AREG0,
7707 offsetof(CPUX86State, regs[R_ECX]), "rcx");
7708 cpu_regs[R_EDX] = tcg_global_mem_new_i64(TCG_AREG0,
7709 offsetof(CPUX86State, regs[R_EDX]), "rdx");
7710 cpu_regs[R_EBX] = tcg_global_mem_new_i64(TCG_AREG0,
7711 offsetof(CPUX86State, regs[R_EBX]), "rbx");
7712 cpu_regs[R_ESP] = tcg_global_mem_new_i64(TCG_AREG0,
7713 offsetof(CPUX86State, regs[R_ESP]), "rsp");
7714 cpu_regs[R_EBP] = tcg_global_mem_new_i64(TCG_AREG0,
7715 offsetof(CPUX86State, regs[R_EBP]), "rbp");
7716 cpu_regs[R_ESI] = tcg_global_mem_new_i64(TCG_AREG0,
7717 offsetof(CPUX86State, regs[R_ESI]), "rsi");
7718 cpu_regs[R_EDI] = tcg_global_mem_new_i64(TCG_AREG0,
7719 offsetof(CPUX86State, regs[R_EDI]), "rdi");
7720 cpu_regs[8] = tcg_global_mem_new_i64(TCG_AREG0,
7721 offsetof(CPUX86State, regs[8]), "r8");
7722 cpu_regs[9] = tcg_global_mem_new_i64(TCG_AREG0,
7723 offsetof(CPUX86State, regs[9]), "r9");
7724 cpu_regs[10] = tcg_global_mem_new_i64(TCG_AREG0,
7725 offsetof(CPUX86State, regs[10]), "r10");
7726 cpu_regs[11] = tcg_global_mem_new_i64(TCG_AREG0,
7727 offsetof(CPUX86State, regs[11]), "r11");
7728 cpu_regs[12] = tcg_global_mem_new_i64(TCG_AREG0,
7729 offsetof(CPUX86State, regs[12]), "r12");
7730 cpu_regs[13] = tcg_global_mem_new_i64(TCG_AREG0,
7731 offsetof(CPUX86State, regs[13]), "r13");
7732 cpu_regs[14] = tcg_global_mem_new_i64(TCG_AREG0,
7733 offsetof(CPUX86State, regs[14]), "r14");
7734 cpu_regs[15] = tcg_global_mem_new_i64(TCG_AREG0,
7735 offsetof(CPUX86State, regs[15]), "r15");
7736 #else
7737 cpu_regs[R_EAX] = tcg_global_mem_new_i32(TCG_AREG0,
7738 offsetof(CPUX86State, regs[R_EAX]), "eax");
7739 cpu_regs[R_ECX] = tcg_global_mem_new_i32(TCG_AREG0,
7740 offsetof(CPUX86State, regs[R_ECX]), "ecx");
7741 cpu_regs[R_EDX] = tcg_global_mem_new_i32(TCG_AREG0,
7742 offsetof(CPUX86State, regs[R_EDX]), "edx");
7743 cpu_regs[R_EBX] = tcg_global_mem_new_i32(TCG_AREG0,
7744 offsetof(CPUX86State, regs[R_EBX]), "ebx");
7745 cpu_regs[R_ESP] = tcg_global_mem_new_i32(TCG_AREG0,
7746 offsetof(CPUX86State, regs[R_ESP]), "esp");
7747 cpu_regs[R_EBP] = tcg_global_mem_new_i32(TCG_AREG0,
7748 offsetof(CPUX86State, regs[R_EBP]), "ebp");
7749 cpu_regs[R_ESI] = tcg_global_mem_new_i32(TCG_AREG0,
7750 offsetof(CPUX86State, regs[R_ESI]), "esi");
7751 cpu_regs[R_EDI] = tcg_global_mem_new_i32(TCG_AREG0,
7752 offsetof(CPUX86State, regs[R_EDI]), "edi");
7753 #endif
7755 /* register helpers */
7756 #define GEN_HELPER 2
7757 #include "helper.h"
7760 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
7761 basic block 'tb'. If search_pc is TRUE, also generate PC
7762 information for each intermediate instruction. */
7763 static inline void gen_intermediate_code_internal(CPUX86State *env,
7764 TranslationBlock *tb,
7765 int search_pc)
7767 DisasContext dc1, *dc = &dc1;
7768 target_ulong pc_ptr;
7769 uint16_t *gen_opc_end;
7770 CPUBreakpoint *bp;
7771 int j, lj;
7772 uint64_t flags;
7773 target_ulong pc_start;
7774 target_ulong cs_base;
7775 int num_insns;
7776 int max_insns;
7778 /* generate intermediate code */
7779 pc_start = tb->pc;
7780 cs_base = tb->cs_base;
7781 flags = tb->flags;
7783 dc->pe = (flags >> HF_PE_SHIFT) & 1;
7784 dc->code32 = (flags >> HF_CS32_SHIFT) & 1;
7785 dc->ss32 = (flags >> HF_SS32_SHIFT) & 1;
7786 dc->addseg = (flags >> HF_ADDSEG_SHIFT) & 1;
7787 dc->f_st = 0;
7788 dc->vm86 = (flags >> VM_SHIFT) & 1;
7789 dc->cpl = (flags >> HF_CPL_SHIFT) & 3;
7790 dc->iopl = (flags >> IOPL_SHIFT) & 3;
7791 dc->tf = (flags >> TF_SHIFT) & 1;
7792 dc->singlestep_enabled = env->singlestep_enabled;
7793 dc->cc_op = CC_OP_DYNAMIC;
7794 dc->cs_base = cs_base;
7795 dc->tb = tb;
7796 dc->popl_esp_hack = 0;
7797 /* select memory access functions */
7798 dc->mem_index = 0;
7799 if (flags & HF_SOFTMMU_MASK) {
7800 if (dc->cpl == 3)
7801 dc->mem_index = 2 * 4;
7802 else
7803 dc->mem_index = 1 * 4;
7805 dc->cpuid_features = env->cpuid_features;
7806 dc->cpuid_ext_features = env->cpuid_ext_features;
7807 dc->cpuid_ext2_features = env->cpuid_ext2_features;
7808 dc->cpuid_ext3_features = env->cpuid_ext3_features;
7809 #ifdef TARGET_X86_64
7810 dc->lma = (flags >> HF_LMA_SHIFT) & 1;
7811 dc->code64 = (flags >> HF_CS64_SHIFT) & 1;
7812 #endif
7813 dc->flags = flags;
7814 dc->jmp_opt = !(dc->tf || env->singlestep_enabled ||
7815 (flags & HF_INHIBIT_IRQ_MASK)
7816 #ifndef CONFIG_SOFTMMU
7817 || (flags & HF_SOFTMMU_MASK)
7818 #endif
7820 #if 0
7821 /* check addseg logic */
7822 if (!dc->addseg && (dc->vm86 || !dc->pe || !dc->code32))
7823 printf("ERROR addseg\n");
7824 #endif
7826 cpu_T[0] = tcg_temp_new();
7827 cpu_T[1] = tcg_temp_new();
7828 cpu_A0 = tcg_temp_new();
7829 cpu_T3 = tcg_temp_new();
7831 cpu_tmp0 = tcg_temp_new();
7832 cpu_tmp1_i64 = tcg_temp_new_i64();
7833 cpu_tmp2_i32 = tcg_temp_new_i32();
7834 cpu_tmp3_i32 = tcg_temp_new_i32();
7835 cpu_tmp4 = tcg_temp_new();
7836 cpu_tmp5 = tcg_temp_new();
7837 cpu_ptr0 = tcg_temp_new_ptr();
7838 cpu_ptr1 = tcg_temp_new_ptr();
7840 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
7842 dc->is_jmp = DISAS_NEXT;
7843 pc_ptr = pc_start;
7844 lj = -1;
7845 num_insns = 0;
7846 max_insns = tb->cflags & CF_COUNT_MASK;
7847 if (max_insns == 0)
7848 max_insns = CF_COUNT_MASK;
7850 gen_icount_start();
7851 for(;;) {
7852 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
7853 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
7854 if (bp->pc == pc_ptr &&
7855 !((bp->flags & BP_CPU) && (tb->flags & HF_RF_MASK))) {
7856 gen_debug(dc, pc_ptr - dc->cs_base);
7857 break;
7861 if (search_pc) {
7862 j = gen_opc_ptr - gen_opc_buf;
7863 if (lj < j) {
7864 lj++;
7865 while (lj < j)
7866 gen_opc_instr_start[lj++] = 0;
7868 gen_opc_pc[lj] = pc_ptr;
7869 gen_opc_cc_op[lj] = dc->cc_op;
7870 gen_opc_instr_start[lj] = 1;
7871 gen_opc_icount[lj] = num_insns;
7873 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
7874 gen_io_start();
7876 pc_ptr = disas_insn(dc, pc_ptr);
7877 num_insns++;
7878 /* stop translation if indicated */
7879 if (dc->is_jmp)
7880 break;
7881 /* if single step mode, we generate only one instruction and
7882 generate an exception */
7883 /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
7884 the flag and abort the translation to give the irqs a
7885 change to be happen */
7886 if (dc->tf || dc->singlestep_enabled ||
7887 (flags & HF_INHIBIT_IRQ_MASK)) {
7888 gen_jmp_im(pc_ptr - dc->cs_base);
7889 gen_eob(dc);
7890 break;
7892 /* if too long translation, stop generation too */
7893 if (gen_opc_ptr >= gen_opc_end ||
7894 (pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32) ||
7895 num_insns >= max_insns) {
7896 gen_jmp_im(pc_ptr - dc->cs_base);
7897 gen_eob(dc);
7898 break;
7900 if (singlestep) {
7901 gen_jmp_im(pc_ptr - dc->cs_base);
7902 gen_eob(dc);
7903 break;
7906 if (tb->cflags & CF_LAST_IO)
7907 gen_io_end();
7908 gen_icount_end(tb, num_insns);
7909 *gen_opc_ptr = INDEX_op_end;
7910 /* we don't forget to fill the last values */
7911 if (search_pc) {
7912 j = gen_opc_ptr - gen_opc_buf;
7913 lj++;
7914 while (lj <= j)
7915 gen_opc_instr_start[lj++] = 0;
7918 #ifdef DEBUG_DISAS
7919 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
7920 int disas_flags;
7921 qemu_log("----------------\n");
7922 qemu_log("IN: %s\n", lookup_symbol(pc_start));
7923 #ifdef TARGET_X86_64
7924 if (dc->code64)
7925 disas_flags = 2;
7926 else
7927 #endif
7928 disas_flags = !dc->code32;
7929 log_target_disas(pc_start, pc_ptr - pc_start, disas_flags);
7930 qemu_log("\n");
7932 #endif
7934 if (!search_pc) {
7935 tb->size = pc_ptr - pc_start;
7936 tb->icount = num_insns;
7940 void gen_intermediate_code(CPUX86State *env, TranslationBlock *tb)
7942 gen_intermediate_code_internal(env, tb, 0);
7945 void gen_intermediate_code_pc(CPUX86State *env, TranslationBlock *tb)
7947 gen_intermediate_code_internal(env, tb, 1);
7950 void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb, int pc_pos)
7952 int cc_op;
7953 #ifdef DEBUG_DISAS
7954 if (qemu_loglevel_mask(CPU_LOG_TB_OP)) {
7955 int i;
7956 qemu_log("RESTORE:\n");
7957 for(i = 0;i <= pc_pos; i++) {
7958 if (gen_opc_instr_start[i]) {
7959 qemu_log("0x%04x: " TARGET_FMT_lx "\n", i, gen_opc_pc[i]);
7962 qemu_log("pc_pos=0x%x eip=" TARGET_FMT_lx " cs_base=%x\n",
7963 pc_pos, gen_opc_pc[pc_pos] - tb->cs_base,
7964 (uint32_t)tb->cs_base);
7966 #endif
7967 env->eip = gen_opc_pc[pc_pos] - tb->cs_base;
7968 cc_op = gen_opc_cc_op[pc_pos];
7969 if (cc_op != CC_OP_DYNAMIC)
7970 env->cc_op = cc_op;