Make -acpi-enable a machine specific option
[qemu/aliguori-queue.git] / target-i386 / translate.c
blob38c6016574011e2ebddbe2d5fa372acaaf5e235c
1 /*
2 * i386 translation
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdarg.h>
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <string.h>
23 #include <inttypes.h>
24 #include <signal.h>
26 #include "cpu.h"
27 #include "exec-all.h"
28 #include "disas.h"
29 #include "tcg-op.h"
31 #include "helper.h"
32 #define GEN_HELPER 1
33 #include "helper.h"
35 #define PREFIX_REPZ 0x01
36 #define PREFIX_REPNZ 0x02
37 #define PREFIX_LOCK 0x04
38 #define PREFIX_DATA 0x08
39 #define PREFIX_ADR 0x10
41 #ifdef TARGET_X86_64
42 #define X86_64_ONLY(x) x
43 #define X86_64_DEF(...) __VA_ARGS__
44 #define CODE64(s) ((s)->code64)
45 #define REX_X(s) ((s)->rex_x)
46 #define REX_B(s) ((s)->rex_b)
47 /* XXX: gcc generates push/pop in some opcodes, so we cannot use them */
48 #if 1
49 #define BUGGY_64(x) NULL
50 #endif
51 #else
52 #define X86_64_ONLY(x) NULL
53 #define X86_64_DEF(...)
54 #define CODE64(s) 0
55 #define REX_X(s) 0
56 #define REX_B(s) 0
57 #endif
59 //#define MACRO_TEST 1
61 /* global register indexes */
62 static TCGv_ptr cpu_env;
63 static TCGv cpu_A0, cpu_cc_src, cpu_cc_dst, cpu_cc_tmp;
64 static TCGv_i32 cpu_cc_op;
65 static TCGv cpu_regs[CPU_NB_REGS];
66 /* local temps */
67 static TCGv cpu_T[2], cpu_T3;
68 /* local register indexes (only used inside old micro ops) */
69 static TCGv cpu_tmp0, cpu_tmp4;
70 static TCGv_ptr cpu_ptr0, cpu_ptr1;
71 static TCGv_i32 cpu_tmp2_i32, cpu_tmp3_i32;
72 static TCGv_i64 cpu_tmp1_i64;
73 static TCGv cpu_tmp5;
75 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
77 #include "gen-icount.h"
79 #ifdef TARGET_X86_64
80 static int x86_64_hregs;
81 #endif
83 typedef struct DisasContext {
84 /* current insn context */
85 int override; /* -1 if no override */
86 int prefix;
87 int aflag, dflag;
88 target_ulong pc; /* pc = eip + cs_base */
89 int is_jmp; /* 1 = means jump (stop translation), 2 means CPU
90 static state change (stop translation) */
91 /* current block context */
92 target_ulong cs_base; /* base of CS segment */
93 int pe; /* protected mode */
94 int code32; /* 32 bit code segment */
95 #ifdef TARGET_X86_64
96 int lma; /* long mode active */
97 int code64; /* 64 bit code segment */
98 int rex_x, rex_b;
99 #endif
100 int ss32; /* 32 bit stack segment */
101 int cc_op; /* current CC operation */
102 int addseg; /* non zero if either DS/ES/SS have a non zero base */
103 int f_st; /* currently unused */
104 int vm86; /* vm86 mode */
105 int cpl;
106 int iopl;
107 int tf; /* TF cpu flag */
108 int singlestep_enabled; /* "hardware" single step enabled */
109 int jmp_opt; /* use direct block chaining for direct jumps */
110 int mem_index; /* select memory access functions */
111 uint64_t flags; /* all execution flags */
112 struct TranslationBlock *tb;
113 int popl_esp_hack; /* for correct popl with esp base handling */
114 int rip_offset; /* only used in x86_64, but left for simplicity */
115 int cpuid_features;
116 int cpuid_ext_features;
117 int cpuid_ext2_features;
118 int cpuid_ext3_features;
119 } DisasContext;
121 static void gen_eob(DisasContext *s);
122 static void gen_jmp(DisasContext *s, target_ulong eip);
123 static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num);
125 /* i386 arith/logic operations */
126 enum {
127 OP_ADDL,
128 OP_ORL,
129 OP_ADCL,
130 OP_SBBL,
131 OP_ANDL,
132 OP_SUBL,
133 OP_XORL,
134 OP_CMPL,
137 /* i386 shift ops */
138 enum {
139 OP_ROL,
140 OP_ROR,
141 OP_RCL,
142 OP_RCR,
143 OP_SHL,
144 OP_SHR,
145 OP_SHL1, /* undocumented */
146 OP_SAR = 7,
149 enum {
150 JCC_O,
151 JCC_B,
152 JCC_Z,
153 JCC_BE,
154 JCC_S,
155 JCC_P,
156 JCC_L,
157 JCC_LE,
160 /* operand size */
161 enum {
162 OT_BYTE = 0,
163 OT_WORD,
164 OT_LONG,
165 OT_QUAD,
168 enum {
169 /* I386 int registers */
170 OR_EAX, /* MUST be even numbered */
171 OR_ECX,
172 OR_EDX,
173 OR_EBX,
174 OR_ESP,
175 OR_EBP,
176 OR_ESI,
177 OR_EDI,
179 OR_TMP0 = 16, /* temporary operand register */
180 OR_TMP1,
181 OR_A0, /* temporary register used when doing address evaluation */
184 static inline void gen_op_movl_T0_0(void)
186 tcg_gen_movi_tl(cpu_T[0], 0);
189 static inline void gen_op_movl_T0_im(int32_t val)
191 tcg_gen_movi_tl(cpu_T[0], val);
194 static inline void gen_op_movl_T0_imu(uint32_t val)
196 tcg_gen_movi_tl(cpu_T[0], val);
199 static inline void gen_op_movl_T1_im(int32_t val)
201 tcg_gen_movi_tl(cpu_T[1], val);
204 static inline void gen_op_movl_T1_imu(uint32_t val)
206 tcg_gen_movi_tl(cpu_T[1], val);
209 static inline void gen_op_movl_A0_im(uint32_t val)
211 tcg_gen_movi_tl(cpu_A0, val);
214 #ifdef TARGET_X86_64
215 static inline void gen_op_movq_A0_im(int64_t val)
217 tcg_gen_movi_tl(cpu_A0, val);
219 #endif
221 static inline void gen_movtl_T0_im(target_ulong val)
223 tcg_gen_movi_tl(cpu_T[0], val);
226 static inline void gen_movtl_T1_im(target_ulong val)
228 tcg_gen_movi_tl(cpu_T[1], val);
231 static inline void gen_op_andl_T0_ffff(void)
233 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
236 static inline void gen_op_andl_T0_im(uint32_t val)
238 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], val);
241 static inline void gen_op_movl_T0_T1(void)
243 tcg_gen_mov_tl(cpu_T[0], cpu_T[1]);
246 static inline void gen_op_andl_A0_ffff(void)
248 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffff);
251 #ifdef TARGET_X86_64
253 #define NB_OP_SIZES 4
255 #else /* !TARGET_X86_64 */
257 #define NB_OP_SIZES 3
259 #endif /* !TARGET_X86_64 */
261 #if defined(HOST_WORDS_BIGENDIAN)
262 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
263 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
264 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
265 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
266 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
267 #else
268 #define REG_B_OFFSET 0
269 #define REG_H_OFFSET 1
270 #define REG_W_OFFSET 0
271 #define REG_L_OFFSET 0
272 #define REG_LH_OFFSET 4
273 #endif
275 static inline void gen_op_mov_reg_v(int ot, int reg, TCGv t0)
277 TCGv tmp;
279 switch(ot) {
280 case OT_BYTE:
281 tmp = tcg_temp_new();
282 tcg_gen_ext8u_tl(tmp, t0);
283 if (reg < 4 X86_64_DEF( || reg >= 8 || x86_64_hregs)) {
284 tcg_gen_andi_tl(cpu_regs[reg], cpu_regs[reg], ~0xff);
285 tcg_gen_or_tl(cpu_regs[reg], cpu_regs[reg], tmp);
286 } else {
287 tcg_gen_shli_tl(tmp, tmp, 8);
288 tcg_gen_andi_tl(cpu_regs[reg - 4], cpu_regs[reg - 4], ~0xff00);
289 tcg_gen_or_tl(cpu_regs[reg - 4], cpu_regs[reg - 4], tmp);
291 tcg_temp_free(tmp);
292 break;
293 case OT_WORD:
294 tmp = tcg_temp_new();
295 tcg_gen_ext16u_tl(tmp, t0);
296 tcg_gen_andi_tl(cpu_regs[reg], cpu_regs[reg], ~0xffff);
297 tcg_gen_or_tl(cpu_regs[reg], cpu_regs[reg], tmp);
298 tcg_temp_free(tmp);
299 break;
300 default: /* XXX this shouldn't be reached; abort? */
301 case OT_LONG:
302 /* For x86_64, this sets the higher half of register to zero.
303 For i386, this is equivalent to a mov. */
304 tcg_gen_ext32u_tl(cpu_regs[reg], t0);
305 break;
306 #ifdef TARGET_X86_64
307 case OT_QUAD:
308 tcg_gen_mov_tl(cpu_regs[reg], t0);
309 break;
310 #endif
314 static inline void gen_op_mov_reg_T0(int ot, int reg)
316 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
319 static inline void gen_op_mov_reg_T1(int ot, int reg)
321 gen_op_mov_reg_v(ot, reg, cpu_T[1]);
324 static inline void gen_op_mov_reg_A0(int size, int reg)
326 TCGv tmp;
328 switch(size) {
329 case 0:
330 tmp = tcg_temp_new();
331 tcg_gen_ext16u_tl(tmp, cpu_A0);
332 tcg_gen_andi_tl(cpu_regs[reg], cpu_regs[reg], ~0xffff);
333 tcg_gen_or_tl(cpu_regs[reg], cpu_regs[reg], tmp);
334 tcg_temp_free(tmp);
335 break;
336 default: /* XXX this shouldn't be reached; abort? */
337 case 1:
338 /* For x86_64, this sets the higher half of register to zero.
339 For i386, this is equivalent to a mov. */
340 tcg_gen_ext32u_tl(cpu_regs[reg], cpu_A0);
341 break;
342 #ifdef TARGET_X86_64
343 case 2:
344 tcg_gen_mov_tl(cpu_regs[reg], cpu_A0);
345 break;
346 #endif
350 static inline void gen_op_mov_v_reg(int ot, TCGv t0, int reg)
352 switch(ot) {
353 case OT_BYTE:
354 if (reg < 4 X86_64_DEF( || reg >= 8 || x86_64_hregs)) {
355 goto std_case;
356 } else {
357 tcg_gen_shri_tl(t0, cpu_regs[reg - 4], 8);
358 tcg_gen_ext8u_tl(t0, t0);
360 break;
361 default:
362 std_case:
363 tcg_gen_mov_tl(t0, cpu_regs[reg]);
364 break;
368 static inline void gen_op_mov_TN_reg(int ot, int t_index, int reg)
370 gen_op_mov_v_reg(ot, cpu_T[t_index], reg);
373 static inline void gen_op_movl_A0_reg(int reg)
375 tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
378 static inline void gen_op_addl_A0_im(int32_t val)
380 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
381 #ifdef TARGET_X86_64
382 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
383 #endif
386 #ifdef TARGET_X86_64
387 static inline void gen_op_addq_A0_im(int64_t val)
389 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
391 #endif
393 static void gen_add_A0_im(DisasContext *s, int val)
395 #ifdef TARGET_X86_64
396 if (CODE64(s))
397 gen_op_addq_A0_im(val);
398 else
399 #endif
400 gen_op_addl_A0_im(val);
403 static inline void gen_op_addl_T0_T1(void)
405 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
408 static inline void gen_op_jmp_T0(void)
410 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUState, eip));
413 static inline void gen_op_add_reg_im(int size, int reg, int32_t val)
415 switch(size) {
416 case 0:
417 tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val);
418 tcg_gen_ext16u_tl(cpu_tmp0, cpu_tmp0);
419 tcg_gen_andi_tl(cpu_regs[reg], cpu_regs[reg], ~0xffff);
420 tcg_gen_or_tl(cpu_regs[reg], cpu_regs[reg], cpu_tmp0);
421 break;
422 case 1:
423 tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val);
424 /* For x86_64, this sets the higher half of register to zero.
425 For i386, this is equivalent to a nop. */
426 tcg_gen_ext32u_tl(cpu_tmp0, cpu_tmp0);
427 tcg_gen_mov_tl(cpu_regs[reg], cpu_tmp0);
428 break;
429 #ifdef TARGET_X86_64
430 case 2:
431 tcg_gen_addi_tl(cpu_regs[reg], cpu_regs[reg], val);
432 break;
433 #endif
437 static inline void gen_op_add_reg_T0(int size, int reg)
439 switch(size) {
440 case 0:
441 tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T[0]);
442 tcg_gen_ext16u_tl(cpu_tmp0, cpu_tmp0);
443 tcg_gen_andi_tl(cpu_regs[reg], cpu_regs[reg], ~0xffff);
444 tcg_gen_or_tl(cpu_regs[reg], cpu_regs[reg], cpu_tmp0);
445 break;
446 case 1:
447 tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T[0]);
448 /* For x86_64, this sets the higher half of register to zero.
449 For i386, this is equivalent to a nop. */
450 tcg_gen_ext32u_tl(cpu_tmp0, cpu_tmp0);
451 tcg_gen_mov_tl(cpu_regs[reg], cpu_tmp0);
452 break;
453 #ifdef TARGET_X86_64
454 case 2:
455 tcg_gen_add_tl(cpu_regs[reg], cpu_regs[reg], cpu_T[0]);
456 break;
457 #endif
461 static inline void gen_op_set_cc_op(int32_t val)
463 tcg_gen_movi_i32(cpu_cc_op, val);
466 static inline void gen_op_addl_A0_reg_sN(int shift, int reg)
468 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
469 if (shift != 0)
470 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
471 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
472 /* For x86_64, this sets the higher half of register to zero.
473 For i386, this is equivalent to a nop. */
474 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
477 static inline void gen_op_movl_A0_seg(int reg)
479 tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUState, segs[reg].base) + REG_L_OFFSET);
482 static inline void gen_op_addl_A0_seg(int reg)
484 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, segs[reg].base));
485 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
486 #ifdef TARGET_X86_64
487 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
488 #endif
491 #ifdef TARGET_X86_64
492 static inline void gen_op_movq_A0_seg(int reg)
494 tcg_gen_ld_tl(cpu_A0, cpu_env, offsetof(CPUState, segs[reg].base));
497 static inline void gen_op_addq_A0_seg(int reg)
499 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, segs[reg].base));
500 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
503 static inline void gen_op_movq_A0_reg(int reg)
505 tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
508 static inline void gen_op_addq_A0_reg_sN(int shift, int reg)
510 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
511 if (shift != 0)
512 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
513 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
515 #endif
517 static inline void gen_op_lds_T0_A0(int idx)
519 int mem_index = (idx >> 2) - 1;
520 switch(idx & 3) {
521 case 0:
522 tcg_gen_qemu_ld8s(cpu_T[0], cpu_A0, mem_index);
523 break;
524 case 1:
525 tcg_gen_qemu_ld16s(cpu_T[0], cpu_A0, mem_index);
526 break;
527 default:
528 case 2:
529 tcg_gen_qemu_ld32s(cpu_T[0], cpu_A0, mem_index);
530 break;
534 static inline void gen_op_ld_v(int idx, TCGv t0, TCGv a0)
536 int mem_index = (idx >> 2) - 1;
537 switch(idx & 3) {
538 case 0:
539 tcg_gen_qemu_ld8u(t0, a0, mem_index);
540 break;
541 case 1:
542 tcg_gen_qemu_ld16u(t0, a0, mem_index);
543 break;
544 case 2:
545 tcg_gen_qemu_ld32u(t0, a0, mem_index);
546 break;
547 default:
548 case 3:
549 /* Should never happen on 32-bit targets. */
550 #ifdef TARGET_X86_64
551 tcg_gen_qemu_ld64(t0, a0, mem_index);
552 #endif
553 break;
557 /* XXX: always use ldu or lds */
558 static inline void gen_op_ld_T0_A0(int idx)
560 gen_op_ld_v(idx, cpu_T[0], cpu_A0);
563 static inline void gen_op_ldu_T0_A0(int idx)
565 gen_op_ld_v(idx, cpu_T[0], cpu_A0);
568 static inline void gen_op_ld_T1_A0(int idx)
570 gen_op_ld_v(idx, cpu_T[1], cpu_A0);
573 static inline void gen_op_st_v(int idx, TCGv t0, TCGv a0)
575 int mem_index = (idx >> 2) - 1;
576 switch(idx & 3) {
577 case 0:
578 tcg_gen_qemu_st8(t0, a0, mem_index);
579 break;
580 case 1:
581 tcg_gen_qemu_st16(t0, a0, mem_index);
582 break;
583 case 2:
584 tcg_gen_qemu_st32(t0, a0, mem_index);
585 break;
586 default:
587 case 3:
588 /* Should never happen on 32-bit targets. */
589 #ifdef TARGET_X86_64
590 tcg_gen_qemu_st64(t0, a0, mem_index);
591 #endif
592 break;
596 static inline void gen_op_st_T0_A0(int idx)
598 gen_op_st_v(idx, cpu_T[0], cpu_A0);
601 static inline void gen_op_st_T1_A0(int idx)
603 gen_op_st_v(idx, cpu_T[1], cpu_A0);
606 static inline void gen_jmp_im(target_ulong pc)
608 tcg_gen_movi_tl(cpu_tmp0, pc);
609 tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, eip));
612 static inline void gen_string_movl_A0_ESI(DisasContext *s)
614 int override;
616 override = s->override;
617 #ifdef TARGET_X86_64
618 if (s->aflag == 2) {
619 if (override >= 0) {
620 gen_op_movq_A0_seg(override);
621 gen_op_addq_A0_reg_sN(0, R_ESI);
622 } else {
623 gen_op_movq_A0_reg(R_ESI);
625 } else
626 #endif
627 if (s->aflag) {
628 /* 32 bit address */
629 if (s->addseg && override < 0)
630 override = R_DS;
631 if (override >= 0) {
632 gen_op_movl_A0_seg(override);
633 gen_op_addl_A0_reg_sN(0, R_ESI);
634 } else {
635 gen_op_movl_A0_reg(R_ESI);
637 } else {
638 /* 16 address, always override */
639 if (override < 0)
640 override = R_DS;
641 gen_op_movl_A0_reg(R_ESI);
642 gen_op_andl_A0_ffff();
643 gen_op_addl_A0_seg(override);
647 static inline void gen_string_movl_A0_EDI(DisasContext *s)
649 #ifdef TARGET_X86_64
650 if (s->aflag == 2) {
651 gen_op_movq_A0_reg(R_EDI);
652 } else
653 #endif
654 if (s->aflag) {
655 if (s->addseg) {
656 gen_op_movl_A0_seg(R_ES);
657 gen_op_addl_A0_reg_sN(0, R_EDI);
658 } else {
659 gen_op_movl_A0_reg(R_EDI);
661 } else {
662 gen_op_movl_A0_reg(R_EDI);
663 gen_op_andl_A0_ffff();
664 gen_op_addl_A0_seg(R_ES);
668 static inline void gen_op_movl_T0_Dshift(int ot)
670 tcg_gen_ld32s_tl(cpu_T[0], cpu_env, offsetof(CPUState, df));
671 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], ot);
674 static void gen_extu(int ot, TCGv reg)
676 switch(ot) {
677 case OT_BYTE:
678 tcg_gen_ext8u_tl(reg, reg);
679 break;
680 case OT_WORD:
681 tcg_gen_ext16u_tl(reg, reg);
682 break;
683 case OT_LONG:
684 tcg_gen_ext32u_tl(reg, reg);
685 break;
686 default:
687 break;
691 static void gen_exts(int ot, TCGv reg)
693 switch(ot) {
694 case OT_BYTE:
695 tcg_gen_ext8s_tl(reg, reg);
696 break;
697 case OT_WORD:
698 tcg_gen_ext16s_tl(reg, reg);
699 break;
700 case OT_LONG:
701 tcg_gen_ext32s_tl(reg, reg);
702 break;
703 default:
704 break;
708 static inline void gen_op_jnz_ecx(int size, int label1)
710 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
711 gen_extu(size + 1, cpu_tmp0);
712 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, label1);
715 static inline void gen_op_jz_ecx(int size, int label1)
717 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
718 gen_extu(size + 1, cpu_tmp0);
719 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
722 static void gen_helper_in_func(int ot, TCGv v, TCGv_i32 n)
724 switch (ot) {
725 case 0: gen_helper_inb(v, n); break;
726 case 1: gen_helper_inw(v, n); break;
727 case 2: gen_helper_inl(v, n); break;
732 static void gen_helper_out_func(int ot, TCGv_i32 v, TCGv_i32 n)
734 switch (ot) {
735 case 0: gen_helper_outb(v, n); break;
736 case 1: gen_helper_outw(v, n); break;
737 case 2: gen_helper_outl(v, n); break;
742 static void gen_check_io(DisasContext *s, int ot, target_ulong cur_eip,
743 uint32_t svm_flags)
745 int state_saved;
746 target_ulong next_eip;
748 state_saved = 0;
749 if (s->pe && (s->cpl > s->iopl || s->vm86)) {
750 if (s->cc_op != CC_OP_DYNAMIC)
751 gen_op_set_cc_op(s->cc_op);
752 gen_jmp_im(cur_eip);
753 state_saved = 1;
754 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
755 switch (ot) {
756 case 0: gen_helper_check_iob(cpu_tmp2_i32); break;
757 case 1: gen_helper_check_iow(cpu_tmp2_i32); break;
758 case 2: gen_helper_check_iol(cpu_tmp2_i32); break;
761 if(s->flags & HF_SVMI_MASK) {
762 if (!state_saved) {
763 if (s->cc_op != CC_OP_DYNAMIC)
764 gen_op_set_cc_op(s->cc_op);
765 gen_jmp_im(cur_eip);
767 svm_flags |= (1 << (4 + ot));
768 next_eip = s->pc - s->cs_base;
769 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
770 gen_helper_svm_check_io(cpu_tmp2_i32, tcg_const_i32(svm_flags),
771 tcg_const_i32(next_eip - cur_eip));
775 static inline void gen_movs(DisasContext *s, int ot)
777 gen_string_movl_A0_ESI(s);
778 gen_op_ld_T0_A0(ot + s->mem_index);
779 gen_string_movl_A0_EDI(s);
780 gen_op_st_T0_A0(ot + s->mem_index);
781 gen_op_movl_T0_Dshift(ot);
782 gen_op_add_reg_T0(s->aflag, R_ESI);
783 gen_op_add_reg_T0(s->aflag, R_EDI);
786 static inline void gen_update_cc_op(DisasContext *s)
788 if (s->cc_op != CC_OP_DYNAMIC) {
789 gen_op_set_cc_op(s->cc_op);
790 s->cc_op = CC_OP_DYNAMIC;
794 static void gen_op_update1_cc(void)
796 tcg_gen_discard_tl(cpu_cc_src);
797 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
800 static void gen_op_update2_cc(void)
802 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
803 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
806 static inline void gen_op_cmpl_T0_T1_cc(void)
808 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
809 tcg_gen_sub_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
812 static inline void gen_op_testl_T0_T1_cc(void)
814 tcg_gen_discard_tl(cpu_cc_src);
815 tcg_gen_and_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
818 static void gen_op_update_neg_cc(void)
820 tcg_gen_neg_tl(cpu_cc_src, cpu_T[0]);
821 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
824 /* compute eflags.C to reg */
825 static void gen_compute_eflags_c(TCGv reg)
827 gen_helper_cc_compute_c(cpu_tmp2_i32, cpu_cc_op);
828 tcg_gen_extu_i32_tl(reg, cpu_tmp2_i32);
831 /* compute all eflags to cc_src */
832 static void gen_compute_eflags(TCGv reg)
834 gen_helper_cc_compute_all(cpu_tmp2_i32, cpu_cc_op);
835 tcg_gen_extu_i32_tl(reg, cpu_tmp2_i32);
838 static inline void gen_setcc_slow_T0(DisasContext *s, int jcc_op)
840 if (s->cc_op != CC_OP_DYNAMIC)
841 gen_op_set_cc_op(s->cc_op);
842 switch(jcc_op) {
843 case JCC_O:
844 gen_compute_eflags(cpu_T[0]);
845 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 11);
846 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
847 break;
848 case JCC_B:
849 gen_compute_eflags_c(cpu_T[0]);
850 break;
851 case JCC_Z:
852 gen_compute_eflags(cpu_T[0]);
853 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 6);
854 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
855 break;
856 case JCC_BE:
857 gen_compute_eflags(cpu_tmp0);
858 tcg_gen_shri_tl(cpu_T[0], cpu_tmp0, 6);
859 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
860 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
861 break;
862 case JCC_S:
863 gen_compute_eflags(cpu_T[0]);
864 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 7);
865 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
866 break;
867 case JCC_P:
868 gen_compute_eflags(cpu_T[0]);
869 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 2);
870 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
871 break;
872 case JCC_L:
873 gen_compute_eflags(cpu_tmp0);
874 tcg_gen_shri_tl(cpu_T[0], cpu_tmp0, 11); /* CC_O */
875 tcg_gen_shri_tl(cpu_tmp0, cpu_tmp0, 7); /* CC_S */
876 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
877 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
878 break;
879 default:
880 case JCC_LE:
881 gen_compute_eflags(cpu_tmp0);
882 tcg_gen_shri_tl(cpu_T[0], cpu_tmp0, 11); /* CC_O */
883 tcg_gen_shri_tl(cpu_tmp4, cpu_tmp0, 7); /* CC_S */
884 tcg_gen_shri_tl(cpu_tmp0, cpu_tmp0, 6); /* CC_Z */
885 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
886 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
887 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
888 break;
892 /* return true if setcc_slow is not needed (WARNING: must be kept in
893 sync with gen_jcc1) */
894 static int is_fast_jcc_case(DisasContext *s, int b)
896 int jcc_op;
897 jcc_op = (b >> 1) & 7;
898 switch(s->cc_op) {
899 /* we optimize the cmp/jcc case */
900 case CC_OP_SUBB:
901 case CC_OP_SUBW:
902 case CC_OP_SUBL:
903 case CC_OP_SUBQ:
904 if (jcc_op == JCC_O || jcc_op == JCC_P)
905 goto slow_jcc;
906 break;
908 /* some jumps are easy to compute */
909 case CC_OP_ADDB:
910 case CC_OP_ADDW:
911 case CC_OP_ADDL:
912 case CC_OP_ADDQ:
914 case CC_OP_LOGICB:
915 case CC_OP_LOGICW:
916 case CC_OP_LOGICL:
917 case CC_OP_LOGICQ:
919 case CC_OP_INCB:
920 case CC_OP_INCW:
921 case CC_OP_INCL:
922 case CC_OP_INCQ:
924 case CC_OP_DECB:
925 case CC_OP_DECW:
926 case CC_OP_DECL:
927 case CC_OP_DECQ:
929 case CC_OP_SHLB:
930 case CC_OP_SHLW:
931 case CC_OP_SHLL:
932 case CC_OP_SHLQ:
933 if (jcc_op != JCC_Z && jcc_op != JCC_S)
934 goto slow_jcc;
935 break;
936 default:
937 slow_jcc:
938 return 0;
940 return 1;
943 /* generate a conditional jump to label 'l1' according to jump opcode
944 value 'b'. In the fast case, T0 is guaranted not to be used. */
945 static inline void gen_jcc1(DisasContext *s, int cc_op, int b, int l1)
947 int inv, jcc_op, size, cond;
948 TCGv t0;
950 inv = b & 1;
951 jcc_op = (b >> 1) & 7;
953 switch(cc_op) {
954 /* we optimize the cmp/jcc case */
955 case CC_OP_SUBB:
956 case CC_OP_SUBW:
957 case CC_OP_SUBL:
958 case CC_OP_SUBQ:
960 size = cc_op - CC_OP_SUBB;
961 switch(jcc_op) {
962 case JCC_Z:
963 fast_jcc_z:
964 switch(size) {
965 case 0:
966 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0xff);
967 t0 = cpu_tmp0;
968 break;
969 case 1:
970 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0xffff);
971 t0 = cpu_tmp0;
972 break;
973 #ifdef TARGET_X86_64
974 case 2:
975 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0xffffffff);
976 t0 = cpu_tmp0;
977 break;
978 #endif
979 default:
980 t0 = cpu_cc_dst;
981 break;
983 tcg_gen_brcondi_tl(inv ? TCG_COND_NE : TCG_COND_EQ, t0, 0, l1);
984 break;
985 case JCC_S:
986 fast_jcc_s:
987 switch(size) {
988 case 0:
989 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0x80);
990 tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
991 0, l1);
992 break;
993 case 1:
994 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0x8000);
995 tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
996 0, l1);
997 break;
998 #ifdef TARGET_X86_64
999 case 2:
1000 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0x80000000);
1001 tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
1002 0, l1);
1003 break;
1004 #endif
1005 default:
1006 tcg_gen_brcondi_tl(inv ? TCG_COND_GE : TCG_COND_LT, cpu_cc_dst,
1007 0, l1);
1008 break;
1010 break;
1012 case JCC_B:
1013 cond = inv ? TCG_COND_GEU : TCG_COND_LTU;
1014 goto fast_jcc_b;
1015 case JCC_BE:
1016 cond = inv ? TCG_COND_GTU : TCG_COND_LEU;
1017 fast_jcc_b:
1018 tcg_gen_add_tl(cpu_tmp4, cpu_cc_dst, cpu_cc_src);
1019 switch(size) {
1020 case 0:
1021 t0 = cpu_tmp0;
1022 tcg_gen_andi_tl(cpu_tmp4, cpu_tmp4, 0xff);
1023 tcg_gen_andi_tl(t0, cpu_cc_src, 0xff);
1024 break;
1025 case 1:
1026 t0 = cpu_tmp0;
1027 tcg_gen_andi_tl(cpu_tmp4, cpu_tmp4, 0xffff);
1028 tcg_gen_andi_tl(t0, cpu_cc_src, 0xffff);
1029 break;
1030 #ifdef TARGET_X86_64
1031 case 2:
1032 t0 = cpu_tmp0;
1033 tcg_gen_andi_tl(cpu_tmp4, cpu_tmp4, 0xffffffff);
1034 tcg_gen_andi_tl(t0, cpu_cc_src, 0xffffffff);
1035 break;
1036 #endif
1037 default:
1038 t0 = cpu_cc_src;
1039 break;
1041 tcg_gen_brcond_tl(cond, cpu_tmp4, t0, l1);
1042 break;
1044 case JCC_L:
1045 cond = inv ? TCG_COND_GE : TCG_COND_LT;
1046 goto fast_jcc_l;
1047 case JCC_LE:
1048 cond = inv ? TCG_COND_GT : TCG_COND_LE;
1049 fast_jcc_l:
1050 tcg_gen_add_tl(cpu_tmp4, cpu_cc_dst, cpu_cc_src);
1051 switch(size) {
1052 case 0:
1053 t0 = cpu_tmp0;
1054 tcg_gen_ext8s_tl(cpu_tmp4, cpu_tmp4);
1055 tcg_gen_ext8s_tl(t0, cpu_cc_src);
1056 break;
1057 case 1:
1058 t0 = cpu_tmp0;
1059 tcg_gen_ext16s_tl(cpu_tmp4, cpu_tmp4);
1060 tcg_gen_ext16s_tl(t0, cpu_cc_src);
1061 break;
1062 #ifdef TARGET_X86_64
1063 case 2:
1064 t0 = cpu_tmp0;
1065 tcg_gen_ext32s_tl(cpu_tmp4, cpu_tmp4);
1066 tcg_gen_ext32s_tl(t0, cpu_cc_src);
1067 break;
1068 #endif
1069 default:
1070 t0 = cpu_cc_src;
1071 break;
1073 tcg_gen_brcond_tl(cond, cpu_tmp4, t0, l1);
1074 break;
1076 default:
1077 goto slow_jcc;
1079 break;
1081 /* some jumps are easy to compute */
1082 case CC_OP_ADDB:
1083 case CC_OP_ADDW:
1084 case CC_OP_ADDL:
1085 case CC_OP_ADDQ:
1087 case CC_OP_ADCB:
1088 case CC_OP_ADCW:
1089 case CC_OP_ADCL:
1090 case CC_OP_ADCQ:
1092 case CC_OP_SBBB:
1093 case CC_OP_SBBW:
1094 case CC_OP_SBBL:
1095 case CC_OP_SBBQ:
1097 case CC_OP_LOGICB:
1098 case CC_OP_LOGICW:
1099 case CC_OP_LOGICL:
1100 case CC_OP_LOGICQ:
1102 case CC_OP_INCB:
1103 case CC_OP_INCW:
1104 case CC_OP_INCL:
1105 case CC_OP_INCQ:
1107 case CC_OP_DECB:
1108 case CC_OP_DECW:
1109 case CC_OP_DECL:
1110 case CC_OP_DECQ:
1112 case CC_OP_SHLB:
1113 case CC_OP_SHLW:
1114 case CC_OP_SHLL:
1115 case CC_OP_SHLQ:
1117 case CC_OP_SARB:
1118 case CC_OP_SARW:
1119 case CC_OP_SARL:
1120 case CC_OP_SARQ:
1121 switch(jcc_op) {
1122 case JCC_Z:
1123 size = (cc_op - CC_OP_ADDB) & 3;
1124 goto fast_jcc_z;
1125 case JCC_S:
1126 size = (cc_op - CC_OP_ADDB) & 3;
1127 goto fast_jcc_s;
1128 default:
1129 goto slow_jcc;
1131 break;
1132 default:
1133 slow_jcc:
1134 gen_setcc_slow_T0(s, jcc_op);
1135 tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE,
1136 cpu_T[0], 0, l1);
1137 break;
1141 /* XXX: does not work with gdbstub "ice" single step - not a
1142 serious problem */
1143 static int gen_jz_ecx_string(DisasContext *s, target_ulong next_eip)
1145 int l1, l2;
1147 l1 = gen_new_label();
1148 l2 = gen_new_label();
1149 gen_op_jnz_ecx(s->aflag, l1);
1150 gen_set_label(l2);
1151 gen_jmp_tb(s, next_eip, 1);
1152 gen_set_label(l1);
1153 return l2;
1156 static inline void gen_stos(DisasContext *s, int ot)
1158 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
1159 gen_string_movl_A0_EDI(s);
1160 gen_op_st_T0_A0(ot + s->mem_index);
1161 gen_op_movl_T0_Dshift(ot);
1162 gen_op_add_reg_T0(s->aflag, R_EDI);
1165 static inline void gen_lods(DisasContext *s, int ot)
1167 gen_string_movl_A0_ESI(s);
1168 gen_op_ld_T0_A0(ot + s->mem_index);
1169 gen_op_mov_reg_T0(ot, R_EAX);
1170 gen_op_movl_T0_Dshift(ot);
1171 gen_op_add_reg_T0(s->aflag, R_ESI);
1174 static inline void gen_scas(DisasContext *s, int ot)
1176 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
1177 gen_string_movl_A0_EDI(s);
1178 gen_op_ld_T1_A0(ot + s->mem_index);
1179 gen_op_cmpl_T0_T1_cc();
1180 gen_op_movl_T0_Dshift(ot);
1181 gen_op_add_reg_T0(s->aflag, R_EDI);
1184 static inline void gen_cmps(DisasContext *s, int ot)
1186 gen_string_movl_A0_ESI(s);
1187 gen_op_ld_T0_A0(ot + s->mem_index);
1188 gen_string_movl_A0_EDI(s);
1189 gen_op_ld_T1_A0(ot + s->mem_index);
1190 gen_op_cmpl_T0_T1_cc();
1191 gen_op_movl_T0_Dshift(ot);
1192 gen_op_add_reg_T0(s->aflag, R_ESI);
1193 gen_op_add_reg_T0(s->aflag, R_EDI);
1196 static inline void gen_ins(DisasContext *s, int ot)
1198 if (use_icount)
1199 gen_io_start();
1200 gen_string_movl_A0_EDI(s);
1201 /* Note: we must do this dummy write first to be restartable in
1202 case of page fault. */
1203 gen_op_movl_T0_0();
1204 gen_op_st_T0_A0(ot + s->mem_index);
1205 gen_op_mov_TN_reg(OT_WORD, 1, R_EDX);
1206 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[1]);
1207 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1208 gen_helper_in_func(ot, cpu_T[0], cpu_tmp2_i32);
1209 gen_op_st_T0_A0(ot + s->mem_index);
1210 gen_op_movl_T0_Dshift(ot);
1211 gen_op_add_reg_T0(s->aflag, R_EDI);
1212 if (use_icount)
1213 gen_io_end();
1216 static inline void gen_outs(DisasContext *s, int ot)
1218 if (use_icount)
1219 gen_io_start();
1220 gen_string_movl_A0_ESI(s);
1221 gen_op_ld_T0_A0(ot + s->mem_index);
1223 gen_op_mov_TN_reg(OT_WORD, 1, R_EDX);
1224 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[1]);
1225 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1226 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[0]);
1227 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
1229 gen_op_movl_T0_Dshift(ot);
1230 gen_op_add_reg_T0(s->aflag, R_ESI);
1231 if (use_icount)
1232 gen_io_end();
1235 /* same method as Valgrind : we generate jumps to current or next
1236 instruction */
1237 #define GEN_REPZ(op) \
1238 static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1239 target_ulong cur_eip, target_ulong next_eip) \
1241 int l2;\
1242 gen_update_cc_op(s); \
1243 l2 = gen_jz_ecx_string(s, next_eip); \
1244 gen_ ## op(s, ot); \
1245 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1246 /* a loop would cause two single step exceptions if ECX = 1 \
1247 before rep string_insn */ \
1248 if (!s->jmp_opt) \
1249 gen_op_jz_ecx(s->aflag, l2); \
1250 gen_jmp(s, cur_eip); \
1253 #define GEN_REPZ2(op) \
1254 static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1255 target_ulong cur_eip, \
1256 target_ulong next_eip, \
1257 int nz) \
1259 int l2;\
1260 gen_update_cc_op(s); \
1261 l2 = gen_jz_ecx_string(s, next_eip); \
1262 gen_ ## op(s, ot); \
1263 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1264 gen_op_set_cc_op(CC_OP_SUBB + ot); \
1265 gen_jcc1(s, CC_OP_SUBB + ot, (JCC_Z << 1) | (nz ^ 1), l2); \
1266 if (!s->jmp_opt) \
1267 gen_op_jz_ecx(s->aflag, l2); \
1268 gen_jmp(s, cur_eip); \
1271 GEN_REPZ(movs)
1272 GEN_REPZ(stos)
1273 GEN_REPZ(lods)
1274 GEN_REPZ(ins)
1275 GEN_REPZ(outs)
1276 GEN_REPZ2(scas)
1277 GEN_REPZ2(cmps)
1279 static void gen_helper_fp_arith_ST0_FT0(int op)
1281 switch (op) {
1282 case 0: gen_helper_fadd_ST0_FT0(); break;
1283 case 1: gen_helper_fmul_ST0_FT0(); break;
1284 case 2: gen_helper_fcom_ST0_FT0(); break;
1285 case 3: gen_helper_fcom_ST0_FT0(); break;
1286 case 4: gen_helper_fsub_ST0_FT0(); break;
1287 case 5: gen_helper_fsubr_ST0_FT0(); break;
1288 case 6: gen_helper_fdiv_ST0_FT0(); break;
1289 case 7: gen_helper_fdivr_ST0_FT0(); break;
1293 /* NOTE the exception in "r" op ordering */
1294 static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1296 TCGv_i32 tmp = tcg_const_i32(opreg);
1297 switch (op) {
1298 case 0: gen_helper_fadd_STN_ST0(tmp); break;
1299 case 1: gen_helper_fmul_STN_ST0(tmp); break;
1300 case 4: gen_helper_fsubr_STN_ST0(tmp); break;
1301 case 5: gen_helper_fsub_STN_ST0(tmp); break;
1302 case 6: gen_helper_fdivr_STN_ST0(tmp); break;
1303 case 7: gen_helper_fdiv_STN_ST0(tmp); break;
1307 /* if d == OR_TMP0, it means memory operand (address in A0) */
1308 static void gen_op(DisasContext *s1, int op, int ot, int d)
1310 if (d != OR_TMP0) {
1311 gen_op_mov_TN_reg(ot, 0, d);
1312 } else {
1313 gen_op_ld_T0_A0(ot + s1->mem_index);
1315 switch(op) {
1316 case OP_ADCL:
1317 if (s1->cc_op != CC_OP_DYNAMIC)
1318 gen_op_set_cc_op(s1->cc_op);
1319 gen_compute_eflags_c(cpu_tmp4);
1320 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1321 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1322 if (d != OR_TMP0)
1323 gen_op_mov_reg_T0(ot, d);
1324 else
1325 gen_op_st_T0_A0(ot + s1->mem_index);
1326 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
1327 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1328 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp4);
1329 tcg_gen_shli_i32(cpu_tmp2_i32, cpu_tmp2_i32, 2);
1330 tcg_gen_addi_i32(cpu_cc_op, cpu_tmp2_i32, CC_OP_ADDB + ot);
1331 s1->cc_op = CC_OP_DYNAMIC;
1332 break;
1333 case OP_SBBL:
1334 if (s1->cc_op != CC_OP_DYNAMIC)
1335 gen_op_set_cc_op(s1->cc_op);
1336 gen_compute_eflags_c(cpu_tmp4);
1337 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1338 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1339 if (d != OR_TMP0)
1340 gen_op_mov_reg_T0(ot, d);
1341 else
1342 gen_op_st_T0_A0(ot + s1->mem_index);
1343 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
1344 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1345 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp4);
1346 tcg_gen_shli_i32(cpu_tmp2_i32, cpu_tmp2_i32, 2);
1347 tcg_gen_addi_i32(cpu_cc_op, cpu_tmp2_i32, CC_OP_SUBB + ot);
1348 s1->cc_op = CC_OP_DYNAMIC;
1349 break;
1350 case OP_ADDL:
1351 gen_op_addl_T0_T1();
1352 if (d != OR_TMP0)
1353 gen_op_mov_reg_T0(ot, d);
1354 else
1355 gen_op_st_T0_A0(ot + s1->mem_index);
1356 gen_op_update2_cc();
1357 s1->cc_op = CC_OP_ADDB + ot;
1358 break;
1359 case OP_SUBL:
1360 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1361 if (d != OR_TMP0)
1362 gen_op_mov_reg_T0(ot, d);
1363 else
1364 gen_op_st_T0_A0(ot + s1->mem_index);
1365 gen_op_update2_cc();
1366 s1->cc_op = CC_OP_SUBB + ot;
1367 break;
1368 default:
1369 case OP_ANDL:
1370 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1371 if (d != OR_TMP0)
1372 gen_op_mov_reg_T0(ot, d);
1373 else
1374 gen_op_st_T0_A0(ot + s1->mem_index);
1375 gen_op_update1_cc();
1376 s1->cc_op = CC_OP_LOGICB + ot;
1377 break;
1378 case OP_ORL:
1379 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1380 if (d != OR_TMP0)
1381 gen_op_mov_reg_T0(ot, d);
1382 else
1383 gen_op_st_T0_A0(ot + s1->mem_index);
1384 gen_op_update1_cc();
1385 s1->cc_op = CC_OP_LOGICB + ot;
1386 break;
1387 case OP_XORL:
1388 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1389 if (d != OR_TMP0)
1390 gen_op_mov_reg_T0(ot, d);
1391 else
1392 gen_op_st_T0_A0(ot + s1->mem_index);
1393 gen_op_update1_cc();
1394 s1->cc_op = CC_OP_LOGICB + ot;
1395 break;
1396 case OP_CMPL:
1397 gen_op_cmpl_T0_T1_cc();
1398 s1->cc_op = CC_OP_SUBB + ot;
1399 break;
1403 /* if d == OR_TMP0, it means memory operand (address in A0) */
1404 static void gen_inc(DisasContext *s1, int ot, int d, int c)
1406 if (d != OR_TMP0)
1407 gen_op_mov_TN_reg(ot, 0, d);
1408 else
1409 gen_op_ld_T0_A0(ot + s1->mem_index);
1410 if (s1->cc_op != CC_OP_DYNAMIC)
1411 gen_op_set_cc_op(s1->cc_op);
1412 if (c > 0) {
1413 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], 1);
1414 s1->cc_op = CC_OP_INCB + ot;
1415 } else {
1416 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], -1);
1417 s1->cc_op = CC_OP_DECB + ot;
1419 if (d != OR_TMP0)
1420 gen_op_mov_reg_T0(ot, d);
1421 else
1422 gen_op_st_T0_A0(ot + s1->mem_index);
1423 gen_compute_eflags_c(cpu_cc_src);
1424 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1427 static void gen_shift_rm_T1(DisasContext *s, int ot, int op1,
1428 int is_right, int is_arith)
1430 target_ulong mask;
1431 int shift_label;
1432 TCGv t0, t1;
1434 if (ot == OT_QUAD)
1435 mask = 0x3f;
1436 else
1437 mask = 0x1f;
1439 /* load */
1440 if (op1 == OR_TMP0)
1441 gen_op_ld_T0_A0(ot + s->mem_index);
1442 else
1443 gen_op_mov_TN_reg(ot, 0, op1);
1445 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], mask);
1447 tcg_gen_addi_tl(cpu_tmp5, cpu_T[1], -1);
1449 if (is_right) {
1450 if (is_arith) {
1451 gen_exts(ot, cpu_T[0]);
1452 tcg_gen_sar_tl(cpu_T3, cpu_T[0], cpu_tmp5);
1453 tcg_gen_sar_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1454 } else {
1455 gen_extu(ot, cpu_T[0]);
1456 tcg_gen_shr_tl(cpu_T3, cpu_T[0], cpu_tmp5);
1457 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1459 } else {
1460 tcg_gen_shl_tl(cpu_T3, cpu_T[0], cpu_tmp5);
1461 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1464 /* store */
1465 if (op1 == OR_TMP0)
1466 gen_op_st_T0_A0(ot + s->mem_index);
1467 else
1468 gen_op_mov_reg_T0(ot, op1);
1470 /* update eflags if non zero shift */
1471 if (s->cc_op != CC_OP_DYNAMIC)
1472 gen_op_set_cc_op(s->cc_op);
1474 /* XXX: inefficient */
1475 t0 = tcg_temp_local_new();
1476 t1 = tcg_temp_local_new();
1478 tcg_gen_mov_tl(t0, cpu_T[0]);
1479 tcg_gen_mov_tl(t1, cpu_T3);
1481 shift_label = gen_new_label();
1482 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_T[1], 0, shift_label);
1484 tcg_gen_mov_tl(cpu_cc_src, t1);
1485 tcg_gen_mov_tl(cpu_cc_dst, t0);
1486 if (is_right)
1487 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SARB + ot);
1488 else
1489 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SHLB + ot);
1491 gen_set_label(shift_label);
1492 s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
1494 tcg_temp_free(t0);
1495 tcg_temp_free(t1);
1498 static void gen_shift_rm_im(DisasContext *s, int ot, int op1, int op2,
1499 int is_right, int is_arith)
1501 int mask;
1503 if (ot == OT_QUAD)
1504 mask = 0x3f;
1505 else
1506 mask = 0x1f;
1508 /* load */
1509 if (op1 == OR_TMP0)
1510 gen_op_ld_T0_A0(ot + s->mem_index);
1511 else
1512 gen_op_mov_TN_reg(ot, 0, op1);
1514 op2 &= mask;
1515 if (op2 != 0) {
1516 if (is_right) {
1517 if (is_arith) {
1518 gen_exts(ot, cpu_T[0]);
1519 tcg_gen_sari_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1520 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], op2);
1521 } else {
1522 gen_extu(ot, cpu_T[0]);
1523 tcg_gen_shri_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1524 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], op2);
1526 } else {
1527 tcg_gen_shli_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1528 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], op2);
1532 /* store */
1533 if (op1 == OR_TMP0)
1534 gen_op_st_T0_A0(ot + s->mem_index);
1535 else
1536 gen_op_mov_reg_T0(ot, op1);
1538 /* update eflags if non zero shift */
1539 if (op2 != 0) {
1540 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
1541 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1542 if (is_right)
1543 s->cc_op = CC_OP_SARB + ot;
1544 else
1545 s->cc_op = CC_OP_SHLB + ot;
1549 static inline void tcg_gen_lshift(TCGv ret, TCGv arg1, target_long arg2)
1551 if (arg2 >= 0)
1552 tcg_gen_shli_tl(ret, arg1, arg2);
1553 else
1554 tcg_gen_shri_tl(ret, arg1, -arg2);
1557 static void gen_rot_rm_T1(DisasContext *s, int ot, int op1,
1558 int is_right)
1560 target_ulong mask;
1561 int label1, label2, data_bits;
1562 TCGv t0, t1, t2, a0;
1564 /* XXX: inefficient, but we must use local temps */
1565 t0 = tcg_temp_local_new();
1566 t1 = tcg_temp_local_new();
1567 t2 = tcg_temp_local_new();
1568 a0 = tcg_temp_local_new();
1570 if (ot == OT_QUAD)
1571 mask = 0x3f;
1572 else
1573 mask = 0x1f;
1575 /* load */
1576 if (op1 == OR_TMP0) {
1577 tcg_gen_mov_tl(a0, cpu_A0);
1578 gen_op_ld_v(ot + s->mem_index, t0, a0);
1579 } else {
1580 gen_op_mov_v_reg(ot, t0, op1);
1583 tcg_gen_mov_tl(t1, cpu_T[1]);
1585 tcg_gen_andi_tl(t1, t1, mask);
1587 /* Must test zero case to avoid using undefined behaviour in TCG
1588 shifts. */
1589 label1 = gen_new_label();
1590 tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, label1);
1592 if (ot <= OT_WORD)
1593 tcg_gen_andi_tl(cpu_tmp0, t1, (1 << (3 + ot)) - 1);
1594 else
1595 tcg_gen_mov_tl(cpu_tmp0, t1);
1597 gen_extu(ot, t0);
1598 tcg_gen_mov_tl(t2, t0);
1600 data_bits = 8 << ot;
1601 /* XXX: rely on behaviour of shifts when operand 2 overflows (XXX:
1602 fix TCG definition) */
1603 if (is_right) {
1604 tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp0);
1605 tcg_gen_subfi_tl(cpu_tmp0, data_bits, cpu_tmp0);
1606 tcg_gen_shl_tl(t0, t0, cpu_tmp0);
1607 } else {
1608 tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp0);
1609 tcg_gen_subfi_tl(cpu_tmp0, data_bits, cpu_tmp0);
1610 tcg_gen_shr_tl(t0, t0, cpu_tmp0);
1612 tcg_gen_or_tl(t0, t0, cpu_tmp4);
1614 gen_set_label(label1);
1615 /* store */
1616 if (op1 == OR_TMP0) {
1617 gen_op_st_v(ot + s->mem_index, t0, a0);
1618 } else {
1619 gen_op_mov_reg_v(ot, op1, t0);
1622 /* update eflags */
1623 if (s->cc_op != CC_OP_DYNAMIC)
1624 gen_op_set_cc_op(s->cc_op);
1626 label2 = gen_new_label();
1627 tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, label2);
1629 gen_compute_eflags(cpu_cc_src);
1630 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~(CC_O | CC_C));
1631 tcg_gen_xor_tl(cpu_tmp0, t2, t0);
1632 tcg_gen_lshift(cpu_tmp0, cpu_tmp0, 11 - (data_bits - 1));
1633 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, CC_O);
1634 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_tmp0);
1635 if (is_right) {
1636 tcg_gen_shri_tl(t0, t0, data_bits - 1);
1638 tcg_gen_andi_tl(t0, t0, CC_C);
1639 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
1641 tcg_gen_discard_tl(cpu_cc_dst);
1642 tcg_gen_movi_i32(cpu_cc_op, CC_OP_EFLAGS);
1644 gen_set_label(label2);
1645 s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
1647 tcg_temp_free(t0);
1648 tcg_temp_free(t1);
1649 tcg_temp_free(t2);
1650 tcg_temp_free(a0);
1653 static void gen_rot_rm_im(DisasContext *s, int ot, int op1, int op2,
1654 int is_right)
1656 int mask;
1657 int data_bits;
1658 TCGv t0, t1, a0;
1660 /* XXX: inefficient, but we must use local temps */
1661 t0 = tcg_temp_local_new();
1662 t1 = tcg_temp_local_new();
1663 a0 = tcg_temp_local_new();
1665 if (ot == OT_QUAD)
1666 mask = 0x3f;
1667 else
1668 mask = 0x1f;
1670 /* load */
1671 if (op1 == OR_TMP0) {
1672 tcg_gen_mov_tl(a0, cpu_A0);
1673 gen_op_ld_v(ot + s->mem_index, t0, a0);
1674 } else {
1675 gen_op_mov_v_reg(ot, t0, op1);
1678 gen_extu(ot, t0);
1679 tcg_gen_mov_tl(t1, t0);
1681 op2 &= mask;
1682 data_bits = 8 << ot;
1683 if (op2 != 0) {
1684 int shift = op2 & ((1 << (3 + ot)) - 1);
1685 if (is_right) {
1686 tcg_gen_shri_tl(cpu_tmp4, t0, shift);
1687 tcg_gen_shli_tl(t0, t0, data_bits - shift);
1689 else {
1690 tcg_gen_shli_tl(cpu_tmp4, t0, shift);
1691 tcg_gen_shri_tl(t0, t0, data_bits - shift);
1693 tcg_gen_or_tl(t0, t0, cpu_tmp4);
1696 /* store */
1697 if (op1 == OR_TMP0) {
1698 gen_op_st_v(ot + s->mem_index, t0, a0);
1699 } else {
1700 gen_op_mov_reg_v(ot, op1, t0);
1703 if (op2 != 0) {
1704 /* update eflags */
1705 if (s->cc_op != CC_OP_DYNAMIC)
1706 gen_op_set_cc_op(s->cc_op);
1708 gen_compute_eflags(cpu_cc_src);
1709 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~(CC_O | CC_C));
1710 tcg_gen_xor_tl(cpu_tmp0, t1, t0);
1711 tcg_gen_lshift(cpu_tmp0, cpu_tmp0, 11 - (data_bits - 1));
1712 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, CC_O);
1713 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_tmp0);
1714 if (is_right) {
1715 tcg_gen_shri_tl(t0, t0, data_bits - 1);
1717 tcg_gen_andi_tl(t0, t0, CC_C);
1718 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
1720 tcg_gen_discard_tl(cpu_cc_dst);
1721 tcg_gen_movi_i32(cpu_cc_op, CC_OP_EFLAGS);
1722 s->cc_op = CC_OP_EFLAGS;
1725 tcg_temp_free(t0);
1726 tcg_temp_free(t1);
1727 tcg_temp_free(a0);
1730 /* XXX: add faster immediate = 1 case */
1731 static void gen_rotc_rm_T1(DisasContext *s, int ot, int op1,
1732 int is_right)
1734 int label1;
1736 if (s->cc_op != CC_OP_DYNAMIC)
1737 gen_op_set_cc_op(s->cc_op);
1739 /* load */
1740 if (op1 == OR_TMP0)
1741 gen_op_ld_T0_A0(ot + s->mem_index);
1742 else
1743 gen_op_mov_TN_reg(ot, 0, op1);
1745 if (is_right) {
1746 switch (ot) {
1747 case 0: gen_helper_rcrb(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1748 case 1: gen_helper_rcrw(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1749 case 2: gen_helper_rcrl(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1750 #ifdef TARGET_X86_64
1751 case 3: gen_helper_rcrq(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1752 #endif
1754 } else {
1755 switch (ot) {
1756 case 0: gen_helper_rclb(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1757 case 1: gen_helper_rclw(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1758 case 2: gen_helper_rcll(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1759 #ifdef TARGET_X86_64
1760 case 3: gen_helper_rclq(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1761 #endif
1764 /* store */
1765 if (op1 == OR_TMP0)
1766 gen_op_st_T0_A0(ot + s->mem_index);
1767 else
1768 gen_op_mov_reg_T0(ot, op1);
1770 /* update eflags */
1771 label1 = gen_new_label();
1772 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_cc_tmp, -1, label1);
1774 tcg_gen_mov_tl(cpu_cc_src, cpu_cc_tmp);
1775 tcg_gen_discard_tl(cpu_cc_dst);
1776 tcg_gen_movi_i32(cpu_cc_op, CC_OP_EFLAGS);
1778 gen_set_label(label1);
1779 s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
1782 /* XXX: add faster immediate case */
1783 static void gen_shiftd_rm_T1_T3(DisasContext *s, int ot, int op1,
1784 int is_right)
1786 int label1, label2, data_bits;
1787 target_ulong mask;
1788 TCGv t0, t1, t2, a0;
1790 t0 = tcg_temp_local_new();
1791 t1 = tcg_temp_local_new();
1792 t2 = tcg_temp_local_new();
1793 a0 = tcg_temp_local_new();
1795 if (ot == OT_QUAD)
1796 mask = 0x3f;
1797 else
1798 mask = 0x1f;
1800 /* load */
1801 if (op1 == OR_TMP0) {
1802 tcg_gen_mov_tl(a0, cpu_A0);
1803 gen_op_ld_v(ot + s->mem_index, t0, a0);
1804 } else {
1805 gen_op_mov_v_reg(ot, t0, op1);
1808 tcg_gen_andi_tl(cpu_T3, cpu_T3, mask);
1810 tcg_gen_mov_tl(t1, cpu_T[1]);
1811 tcg_gen_mov_tl(t2, cpu_T3);
1813 /* Must test zero case to avoid using undefined behaviour in TCG
1814 shifts. */
1815 label1 = gen_new_label();
1816 tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label1);
1818 tcg_gen_addi_tl(cpu_tmp5, t2, -1);
1819 if (ot == OT_WORD) {
1820 /* Note: we implement the Intel behaviour for shift count > 16 */
1821 if (is_right) {
1822 tcg_gen_andi_tl(t0, t0, 0xffff);
1823 tcg_gen_shli_tl(cpu_tmp0, t1, 16);
1824 tcg_gen_or_tl(t0, t0, cpu_tmp0);
1825 tcg_gen_ext32u_tl(t0, t0);
1827 tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp5);
1829 /* only needed if count > 16, but a test would complicate */
1830 tcg_gen_subfi_tl(cpu_tmp5, 32, t2);
1831 tcg_gen_shl_tl(cpu_tmp0, t0, cpu_tmp5);
1833 tcg_gen_shr_tl(t0, t0, t2);
1835 tcg_gen_or_tl(t0, t0, cpu_tmp0);
1836 } else {
1837 /* XXX: not optimal */
1838 tcg_gen_andi_tl(t0, t0, 0xffff);
1839 tcg_gen_shli_tl(t1, t1, 16);
1840 tcg_gen_or_tl(t1, t1, t0);
1841 tcg_gen_ext32u_tl(t1, t1);
1843 tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp5);
1844 tcg_gen_subfi_tl(cpu_tmp0, 32, cpu_tmp5);
1845 tcg_gen_shr_tl(cpu_tmp5, t1, cpu_tmp0);
1846 tcg_gen_or_tl(cpu_tmp4, cpu_tmp4, cpu_tmp5);
1848 tcg_gen_shl_tl(t0, t0, t2);
1849 tcg_gen_subfi_tl(cpu_tmp5, 32, t2);
1850 tcg_gen_shr_tl(t1, t1, cpu_tmp5);
1851 tcg_gen_or_tl(t0, t0, t1);
1853 } else {
1854 data_bits = 8 << ot;
1855 if (is_right) {
1856 if (ot == OT_LONG)
1857 tcg_gen_ext32u_tl(t0, t0);
1859 tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp5);
1861 tcg_gen_shr_tl(t0, t0, t2);
1862 tcg_gen_subfi_tl(cpu_tmp5, data_bits, t2);
1863 tcg_gen_shl_tl(t1, t1, cpu_tmp5);
1864 tcg_gen_or_tl(t0, t0, t1);
1866 } else {
1867 if (ot == OT_LONG)
1868 tcg_gen_ext32u_tl(t1, t1);
1870 tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp5);
1872 tcg_gen_shl_tl(t0, t0, t2);
1873 tcg_gen_subfi_tl(cpu_tmp5, data_bits, t2);
1874 tcg_gen_shr_tl(t1, t1, cpu_tmp5);
1875 tcg_gen_or_tl(t0, t0, t1);
1878 tcg_gen_mov_tl(t1, cpu_tmp4);
1880 gen_set_label(label1);
1881 /* store */
1882 if (op1 == OR_TMP0) {
1883 gen_op_st_v(ot + s->mem_index, t0, a0);
1884 } else {
1885 gen_op_mov_reg_v(ot, op1, t0);
1888 /* update eflags */
1889 if (s->cc_op != CC_OP_DYNAMIC)
1890 gen_op_set_cc_op(s->cc_op);
1892 label2 = gen_new_label();
1893 tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label2);
1895 tcg_gen_mov_tl(cpu_cc_src, t1);
1896 tcg_gen_mov_tl(cpu_cc_dst, t0);
1897 if (is_right) {
1898 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SARB + ot);
1899 } else {
1900 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SHLB + ot);
1902 gen_set_label(label2);
1903 s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
1905 tcg_temp_free(t0);
1906 tcg_temp_free(t1);
1907 tcg_temp_free(t2);
1908 tcg_temp_free(a0);
1911 static void gen_shift(DisasContext *s1, int op, int ot, int d, int s)
1913 if (s != OR_TMP1)
1914 gen_op_mov_TN_reg(ot, 1, s);
1915 switch(op) {
1916 case OP_ROL:
1917 gen_rot_rm_T1(s1, ot, d, 0);
1918 break;
1919 case OP_ROR:
1920 gen_rot_rm_T1(s1, ot, d, 1);
1921 break;
1922 case OP_SHL:
1923 case OP_SHL1:
1924 gen_shift_rm_T1(s1, ot, d, 0, 0);
1925 break;
1926 case OP_SHR:
1927 gen_shift_rm_T1(s1, ot, d, 1, 0);
1928 break;
1929 case OP_SAR:
1930 gen_shift_rm_T1(s1, ot, d, 1, 1);
1931 break;
1932 case OP_RCL:
1933 gen_rotc_rm_T1(s1, ot, d, 0);
1934 break;
1935 case OP_RCR:
1936 gen_rotc_rm_T1(s1, ot, d, 1);
1937 break;
1941 static void gen_shifti(DisasContext *s1, int op, int ot, int d, int c)
1943 switch(op) {
1944 case OP_ROL:
1945 gen_rot_rm_im(s1, ot, d, c, 0);
1946 break;
1947 case OP_ROR:
1948 gen_rot_rm_im(s1, ot, d, c, 1);
1949 break;
1950 case OP_SHL:
1951 case OP_SHL1:
1952 gen_shift_rm_im(s1, ot, d, c, 0, 0);
1953 break;
1954 case OP_SHR:
1955 gen_shift_rm_im(s1, ot, d, c, 1, 0);
1956 break;
1957 case OP_SAR:
1958 gen_shift_rm_im(s1, ot, d, c, 1, 1);
1959 break;
1960 default:
1961 /* currently not optimized */
1962 gen_op_movl_T1_im(c);
1963 gen_shift(s1, op, ot, d, OR_TMP1);
1964 break;
1968 static void gen_lea_modrm(DisasContext *s, int modrm, int *reg_ptr, int *offset_ptr)
1970 target_long disp;
1971 int havesib;
1972 int base;
1973 int index;
1974 int scale;
1975 int opreg;
1976 int mod, rm, code, override, must_add_seg;
1978 override = s->override;
1979 must_add_seg = s->addseg;
1980 if (override >= 0)
1981 must_add_seg = 1;
1982 mod = (modrm >> 6) & 3;
1983 rm = modrm & 7;
1985 if (s->aflag) {
1987 havesib = 0;
1988 base = rm;
1989 index = 0;
1990 scale = 0;
1992 if (base == 4) {
1993 havesib = 1;
1994 code = ldub_code(s->pc++);
1995 scale = (code >> 6) & 3;
1996 index = ((code >> 3) & 7) | REX_X(s);
1997 base = (code & 7);
1999 base |= REX_B(s);
2001 switch (mod) {
2002 case 0:
2003 if ((base & 7) == 5) {
2004 base = -1;
2005 disp = (int32_t)ldl_code(s->pc);
2006 s->pc += 4;
2007 if (CODE64(s) && !havesib) {
2008 disp += s->pc + s->rip_offset;
2010 } else {
2011 disp = 0;
2013 break;
2014 case 1:
2015 disp = (int8_t)ldub_code(s->pc++);
2016 break;
2017 default:
2018 case 2:
2019 disp = ldl_code(s->pc);
2020 s->pc += 4;
2021 break;
2024 if (base >= 0) {
2025 /* for correct popl handling with esp */
2026 if (base == 4 && s->popl_esp_hack)
2027 disp += s->popl_esp_hack;
2028 #ifdef TARGET_X86_64
2029 if (s->aflag == 2) {
2030 gen_op_movq_A0_reg(base);
2031 if (disp != 0) {
2032 gen_op_addq_A0_im(disp);
2034 } else
2035 #endif
2037 gen_op_movl_A0_reg(base);
2038 if (disp != 0)
2039 gen_op_addl_A0_im(disp);
2041 } else {
2042 #ifdef TARGET_X86_64
2043 if (s->aflag == 2) {
2044 gen_op_movq_A0_im(disp);
2045 } else
2046 #endif
2048 gen_op_movl_A0_im(disp);
2051 /* index == 4 means no index */
2052 if (havesib && (index != 4)) {
2053 #ifdef TARGET_X86_64
2054 if (s->aflag == 2) {
2055 gen_op_addq_A0_reg_sN(scale, index);
2056 } else
2057 #endif
2059 gen_op_addl_A0_reg_sN(scale, index);
2062 if (must_add_seg) {
2063 if (override < 0) {
2064 if (base == R_EBP || base == R_ESP)
2065 override = R_SS;
2066 else
2067 override = R_DS;
2069 #ifdef TARGET_X86_64
2070 if (s->aflag == 2) {
2071 gen_op_addq_A0_seg(override);
2072 } else
2073 #endif
2075 gen_op_addl_A0_seg(override);
2078 } else {
2079 switch (mod) {
2080 case 0:
2081 if (rm == 6) {
2082 disp = lduw_code(s->pc);
2083 s->pc += 2;
2084 gen_op_movl_A0_im(disp);
2085 rm = 0; /* avoid SS override */
2086 goto no_rm;
2087 } else {
2088 disp = 0;
2090 break;
2091 case 1:
2092 disp = (int8_t)ldub_code(s->pc++);
2093 break;
2094 default:
2095 case 2:
2096 disp = lduw_code(s->pc);
2097 s->pc += 2;
2098 break;
2100 switch(rm) {
2101 case 0:
2102 gen_op_movl_A0_reg(R_EBX);
2103 gen_op_addl_A0_reg_sN(0, R_ESI);
2104 break;
2105 case 1:
2106 gen_op_movl_A0_reg(R_EBX);
2107 gen_op_addl_A0_reg_sN(0, R_EDI);
2108 break;
2109 case 2:
2110 gen_op_movl_A0_reg(R_EBP);
2111 gen_op_addl_A0_reg_sN(0, R_ESI);
2112 break;
2113 case 3:
2114 gen_op_movl_A0_reg(R_EBP);
2115 gen_op_addl_A0_reg_sN(0, R_EDI);
2116 break;
2117 case 4:
2118 gen_op_movl_A0_reg(R_ESI);
2119 break;
2120 case 5:
2121 gen_op_movl_A0_reg(R_EDI);
2122 break;
2123 case 6:
2124 gen_op_movl_A0_reg(R_EBP);
2125 break;
2126 default:
2127 case 7:
2128 gen_op_movl_A0_reg(R_EBX);
2129 break;
2131 if (disp != 0)
2132 gen_op_addl_A0_im(disp);
2133 gen_op_andl_A0_ffff();
2134 no_rm:
2135 if (must_add_seg) {
2136 if (override < 0) {
2137 if (rm == 2 || rm == 3 || rm == 6)
2138 override = R_SS;
2139 else
2140 override = R_DS;
2142 gen_op_addl_A0_seg(override);
2146 opreg = OR_A0;
2147 disp = 0;
2148 *reg_ptr = opreg;
2149 *offset_ptr = disp;
2152 static void gen_nop_modrm(DisasContext *s, int modrm)
2154 int mod, rm, base, code;
2156 mod = (modrm >> 6) & 3;
2157 if (mod == 3)
2158 return;
2159 rm = modrm & 7;
2161 if (s->aflag) {
2163 base = rm;
2165 if (base == 4) {
2166 code = ldub_code(s->pc++);
2167 base = (code & 7);
2170 switch (mod) {
2171 case 0:
2172 if (base == 5) {
2173 s->pc += 4;
2175 break;
2176 case 1:
2177 s->pc++;
2178 break;
2179 default:
2180 case 2:
2181 s->pc += 4;
2182 break;
2184 } else {
2185 switch (mod) {
2186 case 0:
2187 if (rm == 6) {
2188 s->pc += 2;
2190 break;
2191 case 1:
2192 s->pc++;
2193 break;
2194 default:
2195 case 2:
2196 s->pc += 2;
2197 break;
2202 /* used for LEA and MOV AX, mem */
2203 static void gen_add_A0_ds_seg(DisasContext *s)
2205 int override, must_add_seg;
2206 must_add_seg = s->addseg;
2207 override = R_DS;
2208 if (s->override >= 0) {
2209 override = s->override;
2210 must_add_seg = 1;
2212 if (must_add_seg) {
2213 #ifdef TARGET_X86_64
2214 if (CODE64(s)) {
2215 gen_op_addq_A0_seg(override);
2216 } else
2217 #endif
2219 gen_op_addl_A0_seg(override);
2224 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2225 OR_TMP0 */
2226 static void gen_ldst_modrm(DisasContext *s, int modrm, int ot, int reg, int is_store)
2228 int mod, rm, opreg, disp;
2230 mod = (modrm >> 6) & 3;
2231 rm = (modrm & 7) | REX_B(s);
2232 if (mod == 3) {
2233 if (is_store) {
2234 if (reg != OR_TMP0)
2235 gen_op_mov_TN_reg(ot, 0, reg);
2236 gen_op_mov_reg_T0(ot, rm);
2237 } else {
2238 gen_op_mov_TN_reg(ot, 0, rm);
2239 if (reg != OR_TMP0)
2240 gen_op_mov_reg_T0(ot, reg);
2242 } else {
2243 gen_lea_modrm(s, modrm, &opreg, &disp);
2244 if (is_store) {
2245 if (reg != OR_TMP0)
2246 gen_op_mov_TN_reg(ot, 0, reg);
2247 gen_op_st_T0_A0(ot + s->mem_index);
2248 } else {
2249 gen_op_ld_T0_A0(ot + s->mem_index);
2250 if (reg != OR_TMP0)
2251 gen_op_mov_reg_T0(ot, reg);
2256 static inline uint32_t insn_get(DisasContext *s, int ot)
2258 uint32_t ret;
2260 switch(ot) {
2261 case OT_BYTE:
2262 ret = ldub_code(s->pc);
2263 s->pc++;
2264 break;
2265 case OT_WORD:
2266 ret = lduw_code(s->pc);
2267 s->pc += 2;
2268 break;
2269 default:
2270 case OT_LONG:
2271 ret = ldl_code(s->pc);
2272 s->pc += 4;
2273 break;
2275 return ret;
2278 static inline int insn_const_size(unsigned int ot)
2280 if (ot <= OT_LONG)
2281 return 1 << ot;
2282 else
2283 return 4;
2286 static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip)
2288 TranslationBlock *tb;
2289 target_ulong pc;
2291 pc = s->cs_base + eip;
2292 tb = s->tb;
2293 /* NOTE: we handle the case where the TB spans two pages here */
2294 if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) ||
2295 (pc & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK)) {
2296 /* jump to same page: we can use a direct jump */
2297 tcg_gen_goto_tb(tb_num);
2298 gen_jmp_im(eip);
2299 tcg_gen_exit_tb((long)tb + tb_num);
2300 } else {
2301 /* jump to another page: currently not optimized */
2302 gen_jmp_im(eip);
2303 gen_eob(s);
2307 static inline void gen_jcc(DisasContext *s, int b,
2308 target_ulong val, target_ulong next_eip)
2310 int l1, l2, cc_op;
2312 cc_op = s->cc_op;
2313 if (s->cc_op != CC_OP_DYNAMIC) {
2314 gen_op_set_cc_op(s->cc_op);
2315 s->cc_op = CC_OP_DYNAMIC;
2317 if (s->jmp_opt) {
2318 l1 = gen_new_label();
2319 gen_jcc1(s, cc_op, b, l1);
2321 gen_goto_tb(s, 0, next_eip);
2323 gen_set_label(l1);
2324 gen_goto_tb(s, 1, val);
2325 s->is_jmp = 3;
2326 } else {
2328 l1 = gen_new_label();
2329 l2 = gen_new_label();
2330 gen_jcc1(s, cc_op, b, l1);
2332 gen_jmp_im(next_eip);
2333 tcg_gen_br(l2);
2335 gen_set_label(l1);
2336 gen_jmp_im(val);
2337 gen_set_label(l2);
2338 gen_eob(s);
2342 static void gen_setcc(DisasContext *s, int b)
2344 int inv, jcc_op, l1;
2345 TCGv t0;
2347 if (is_fast_jcc_case(s, b)) {
2348 /* nominal case: we use a jump */
2349 /* XXX: make it faster by adding new instructions in TCG */
2350 t0 = tcg_temp_local_new();
2351 tcg_gen_movi_tl(t0, 0);
2352 l1 = gen_new_label();
2353 gen_jcc1(s, s->cc_op, b ^ 1, l1);
2354 tcg_gen_movi_tl(t0, 1);
2355 gen_set_label(l1);
2356 tcg_gen_mov_tl(cpu_T[0], t0);
2357 tcg_temp_free(t0);
2358 } else {
2359 /* slow case: it is more efficient not to generate a jump,
2360 although it is questionnable whether this optimization is
2361 worth to */
2362 inv = b & 1;
2363 jcc_op = (b >> 1) & 7;
2364 gen_setcc_slow_T0(s, jcc_op);
2365 if (inv) {
2366 tcg_gen_xori_tl(cpu_T[0], cpu_T[0], 1);
2371 static inline void gen_op_movl_T0_seg(int seg_reg)
2373 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
2374 offsetof(CPUX86State,segs[seg_reg].selector));
2377 static inline void gen_op_movl_seg_T0_vm(int seg_reg)
2379 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
2380 tcg_gen_st32_tl(cpu_T[0], cpu_env,
2381 offsetof(CPUX86State,segs[seg_reg].selector));
2382 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], 4);
2383 tcg_gen_st_tl(cpu_T[0], cpu_env,
2384 offsetof(CPUX86State,segs[seg_reg].base));
2387 /* move T0 to seg_reg and compute if the CPU state may change. Never
2388 call this function with seg_reg == R_CS */
2389 static void gen_movl_seg_T0(DisasContext *s, int seg_reg, target_ulong cur_eip)
2391 if (s->pe && !s->vm86) {
2392 /* XXX: optimize by finding processor state dynamically */
2393 if (s->cc_op != CC_OP_DYNAMIC)
2394 gen_op_set_cc_op(s->cc_op);
2395 gen_jmp_im(cur_eip);
2396 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2397 gen_helper_load_seg(tcg_const_i32(seg_reg), cpu_tmp2_i32);
2398 /* abort translation because the addseg value may change or
2399 because ss32 may change. For R_SS, translation must always
2400 stop as a special handling must be done to disable hardware
2401 interrupts for the next instruction */
2402 if (seg_reg == R_SS || (s->code32 && seg_reg < R_FS))
2403 s->is_jmp = 3;
2404 } else {
2405 gen_op_movl_seg_T0_vm(seg_reg);
2406 if (seg_reg == R_SS)
2407 s->is_jmp = 3;
2411 static inline int svm_is_rep(int prefixes)
2413 return ((prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) ? 8 : 0);
2416 static inline void
2417 gen_svm_check_intercept_param(DisasContext *s, target_ulong pc_start,
2418 uint32_t type, uint64_t param)
2420 /* no SVM activated; fast case */
2421 if (likely(!(s->flags & HF_SVMI_MASK)))
2422 return;
2423 if (s->cc_op != CC_OP_DYNAMIC)
2424 gen_op_set_cc_op(s->cc_op);
2425 gen_jmp_im(pc_start - s->cs_base);
2426 gen_helper_svm_check_intercept_param(tcg_const_i32(type),
2427 tcg_const_i64(param));
2430 static inline void
2431 gen_svm_check_intercept(DisasContext *s, target_ulong pc_start, uint64_t type)
2433 gen_svm_check_intercept_param(s, pc_start, type, 0);
2436 static inline void gen_stack_update(DisasContext *s, int addend)
2438 #ifdef TARGET_X86_64
2439 if (CODE64(s)) {
2440 gen_op_add_reg_im(2, R_ESP, addend);
2441 } else
2442 #endif
2443 if (s->ss32) {
2444 gen_op_add_reg_im(1, R_ESP, addend);
2445 } else {
2446 gen_op_add_reg_im(0, R_ESP, addend);
2450 /* generate a push. It depends on ss32, addseg and dflag */
2451 static void gen_push_T0(DisasContext *s)
2453 #ifdef TARGET_X86_64
2454 if (CODE64(s)) {
2455 gen_op_movq_A0_reg(R_ESP);
2456 if (s->dflag) {
2457 gen_op_addq_A0_im(-8);
2458 gen_op_st_T0_A0(OT_QUAD + s->mem_index);
2459 } else {
2460 gen_op_addq_A0_im(-2);
2461 gen_op_st_T0_A0(OT_WORD + s->mem_index);
2463 gen_op_mov_reg_A0(2, R_ESP);
2464 } else
2465 #endif
2467 gen_op_movl_A0_reg(R_ESP);
2468 if (!s->dflag)
2469 gen_op_addl_A0_im(-2);
2470 else
2471 gen_op_addl_A0_im(-4);
2472 if (s->ss32) {
2473 if (s->addseg) {
2474 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2475 gen_op_addl_A0_seg(R_SS);
2477 } else {
2478 gen_op_andl_A0_ffff();
2479 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2480 gen_op_addl_A0_seg(R_SS);
2482 gen_op_st_T0_A0(s->dflag + 1 + s->mem_index);
2483 if (s->ss32 && !s->addseg)
2484 gen_op_mov_reg_A0(1, R_ESP);
2485 else
2486 gen_op_mov_reg_T1(s->ss32 + 1, R_ESP);
2490 /* generate a push. It depends on ss32, addseg and dflag */
2491 /* slower version for T1, only used for call Ev */
2492 static void gen_push_T1(DisasContext *s)
2494 #ifdef TARGET_X86_64
2495 if (CODE64(s)) {
2496 gen_op_movq_A0_reg(R_ESP);
2497 if (s->dflag) {
2498 gen_op_addq_A0_im(-8);
2499 gen_op_st_T1_A0(OT_QUAD + s->mem_index);
2500 } else {
2501 gen_op_addq_A0_im(-2);
2502 gen_op_st_T0_A0(OT_WORD + s->mem_index);
2504 gen_op_mov_reg_A0(2, R_ESP);
2505 } else
2506 #endif
2508 gen_op_movl_A0_reg(R_ESP);
2509 if (!s->dflag)
2510 gen_op_addl_A0_im(-2);
2511 else
2512 gen_op_addl_A0_im(-4);
2513 if (s->ss32) {
2514 if (s->addseg) {
2515 gen_op_addl_A0_seg(R_SS);
2517 } else {
2518 gen_op_andl_A0_ffff();
2519 gen_op_addl_A0_seg(R_SS);
2521 gen_op_st_T1_A0(s->dflag + 1 + s->mem_index);
2523 if (s->ss32 && !s->addseg)
2524 gen_op_mov_reg_A0(1, R_ESP);
2525 else
2526 gen_stack_update(s, (-2) << s->dflag);
2530 /* two step pop is necessary for precise exceptions */
2531 static void gen_pop_T0(DisasContext *s)
2533 #ifdef TARGET_X86_64
2534 if (CODE64(s)) {
2535 gen_op_movq_A0_reg(R_ESP);
2536 gen_op_ld_T0_A0((s->dflag ? OT_QUAD : OT_WORD) + s->mem_index);
2537 } else
2538 #endif
2540 gen_op_movl_A0_reg(R_ESP);
2541 if (s->ss32) {
2542 if (s->addseg)
2543 gen_op_addl_A0_seg(R_SS);
2544 } else {
2545 gen_op_andl_A0_ffff();
2546 gen_op_addl_A0_seg(R_SS);
2548 gen_op_ld_T0_A0(s->dflag + 1 + s->mem_index);
2552 static void gen_pop_update(DisasContext *s)
2554 #ifdef TARGET_X86_64
2555 if (CODE64(s) && s->dflag) {
2556 gen_stack_update(s, 8);
2557 } else
2558 #endif
2560 gen_stack_update(s, 2 << s->dflag);
2564 static void gen_stack_A0(DisasContext *s)
2566 gen_op_movl_A0_reg(R_ESP);
2567 if (!s->ss32)
2568 gen_op_andl_A0_ffff();
2569 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2570 if (s->addseg)
2571 gen_op_addl_A0_seg(R_SS);
2574 /* NOTE: wrap around in 16 bit not fully handled */
2575 static void gen_pusha(DisasContext *s)
2577 int i;
2578 gen_op_movl_A0_reg(R_ESP);
2579 gen_op_addl_A0_im(-16 << s->dflag);
2580 if (!s->ss32)
2581 gen_op_andl_A0_ffff();
2582 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2583 if (s->addseg)
2584 gen_op_addl_A0_seg(R_SS);
2585 for(i = 0;i < 8; i++) {
2586 gen_op_mov_TN_reg(OT_LONG, 0, 7 - i);
2587 gen_op_st_T0_A0(OT_WORD + s->dflag + s->mem_index);
2588 gen_op_addl_A0_im(2 << s->dflag);
2590 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2593 /* NOTE: wrap around in 16 bit not fully handled */
2594 static void gen_popa(DisasContext *s)
2596 int i;
2597 gen_op_movl_A0_reg(R_ESP);
2598 if (!s->ss32)
2599 gen_op_andl_A0_ffff();
2600 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2601 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], 16 << s->dflag);
2602 if (s->addseg)
2603 gen_op_addl_A0_seg(R_SS);
2604 for(i = 0;i < 8; i++) {
2605 /* ESP is not reloaded */
2606 if (i != 3) {
2607 gen_op_ld_T0_A0(OT_WORD + s->dflag + s->mem_index);
2608 gen_op_mov_reg_T0(OT_WORD + s->dflag, 7 - i);
2610 gen_op_addl_A0_im(2 << s->dflag);
2612 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2615 static void gen_enter(DisasContext *s, int esp_addend, int level)
2617 int ot, opsize;
2619 level &= 0x1f;
2620 #ifdef TARGET_X86_64
2621 if (CODE64(s)) {
2622 ot = s->dflag ? OT_QUAD : OT_WORD;
2623 opsize = 1 << ot;
2625 gen_op_movl_A0_reg(R_ESP);
2626 gen_op_addq_A0_im(-opsize);
2627 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2629 /* push bp */
2630 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
2631 gen_op_st_T0_A0(ot + s->mem_index);
2632 if (level) {
2633 /* XXX: must save state */
2634 gen_helper_enter64_level(tcg_const_i32(level),
2635 tcg_const_i32((ot == OT_QUAD)),
2636 cpu_T[1]);
2638 gen_op_mov_reg_T1(ot, R_EBP);
2639 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
2640 gen_op_mov_reg_T1(OT_QUAD, R_ESP);
2641 } else
2642 #endif
2644 ot = s->dflag + OT_WORD;
2645 opsize = 2 << s->dflag;
2647 gen_op_movl_A0_reg(R_ESP);
2648 gen_op_addl_A0_im(-opsize);
2649 if (!s->ss32)
2650 gen_op_andl_A0_ffff();
2651 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2652 if (s->addseg)
2653 gen_op_addl_A0_seg(R_SS);
2654 /* push bp */
2655 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
2656 gen_op_st_T0_A0(ot + s->mem_index);
2657 if (level) {
2658 /* XXX: must save state */
2659 gen_helper_enter_level(tcg_const_i32(level),
2660 tcg_const_i32(s->dflag),
2661 cpu_T[1]);
2663 gen_op_mov_reg_T1(ot, R_EBP);
2664 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
2665 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2669 static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip)
2671 if (s->cc_op != CC_OP_DYNAMIC)
2672 gen_op_set_cc_op(s->cc_op);
2673 gen_jmp_im(cur_eip);
2674 gen_helper_raise_exception(tcg_const_i32(trapno));
2675 s->is_jmp = 3;
2678 /* an interrupt is different from an exception because of the
2679 privilege checks */
2680 static void gen_interrupt(DisasContext *s, int intno,
2681 target_ulong cur_eip, target_ulong next_eip)
2683 if (s->cc_op != CC_OP_DYNAMIC)
2684 gen_op_set_cc_op(s->cc_op);
2685 gen_jmp_im(cur_eip);
2686 gen_helper_raise_interrupt(tcg_const_i32(intno),
2687 tcg_const_i32(next_eip - cur_eip));
2688 s->is_jmp = 3;
2691 static void gen_debug(DisasContext *s, target_ulong cur_eip)
2693 if (s->cc_op != CC_OP_DYNAMIC)
2694 gen_op_set_cc_op(s->cc_op);
2695 gen_jmp_im(cur_eip);
2696 gen_helper_debug();
2697 s->is_jmp = 3;
2700 /* generate a generic end of block. Trace exception is also generated
2701 if needed */
2702 static void gen_eob(DisasContext *s)
2704 if (s->cc_op != CC_OP_DYNAMIC)
2705 gen_op_set_cc_op(s->cc_op);
2706 if (s->tb->flags & HF_INHIBIT_IRQ_MASK) {
2707 gen_helper_reset_inhibit_irq();
2709 if (s->tb->flags & HF_RF_MASK) {
2710 gen_helper_reset_rf();
2712 if (s->singlestep_enabled) {
2713 gen_helper_debug();
2714 } else if (s->tf) {
2715 gen_helper_single_step();
2716 } else {
2717 tcg_gen_exit_tb(0);
2719 s->is_jmp = 3;
2722 /* generate a jump to eip. No segment change must happen before as a
2723 direct call to the next block may occur */
2724 static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num)
2726 if (s->jmp_opt) {
2727 if (s->cc_op != CC_OP_DYNAMIC) {
2728 gen_op_set_cc_op(s->cc_op);
2729 s->cc_op = CC_OP_DYNAMIC;
2731 gen_goto_tb(s, tb_num, eip);
2732 s->is_jmp = 3;
2733 } else {
2734 gen_jmp_im(eip);
2735 gen_eob(s);
2739 static void gen_jmp(DisasContext *s, target_ulong eip)
2741 gen_jmp_tb(s, eip, 0);
2744 static inline void gen_ldq_env_A0(int idx, int offset)
2746 int mem_index = (idx >> 2) - 1;
2747 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, mem_index);
2748 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset);
2751 static inline void gen_stq_env_A0(int idx, int offset)
2753 int mem_index = (idx >> 2) - 1;
2754 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset);
2755 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, mem_index);
2758 static inline void gen_ldo_env_A0(int idx, int offset)
2760 int mem_index = (idx >> 2) - 1;
2761 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, mem_index);
2762 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
2763 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2764 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_tmp0, mem_index);
2765 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
2768 static inline void gen_sto_env_A0(int idx, int offset)
2770 int mem_index = (idx >> 2) - 1;
2771 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
2772 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, mem_index);
2773 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2774 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
2775 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_tmp0, mem_index);
2778 static inline void gen_op_movo(int d_offset, int s_offset)
2780 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2781 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2782 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + 8);
2783 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + 8);
2786 static inline void gen_op_movq(int d_offset, int s_offset)
2788 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2789 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2792 static inline void gen_op_movl(int d_offset, int s_offset)
2794 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env, s_offset);
2795 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, d_offset);
2798 static inline void gen_op_movq_env_0(int d_offset)
2800 tcg_gen_movi_i64(cpu_tmp1_i64, 0);
2801 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2804 #define SSE_SPECIAL ((void *)1)
2805 #define SSE_DUMMY ((void *)2)
2807 #define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
2808 #define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
2809 gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
2811 static void *sse_op_table1[256][4] = {
2812 /* 3DNow! extensions */
2813 [0x0e] = { SSE_DUMMY }, /* femms */
2814 [0x0f] = { SSE_DUMMY }, /* pf... */
2815 /* pure SSE operations */
2816 [0x10] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2817 [0x11] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2818 [0x12] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd, movsldup, movddup */
2819 [0x13] = { SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd */
2820 [0x14] = { gen_helper_punpckldq_xmm, gen_helper_punpcklqdq_xmm },
2821 [0x15] = { gen_helper_punpckhdq_xmm, gen_helper_punpckhqdq_xmm },
2822 [0x16] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd, movshdup */
2823 [0x17] = { SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd */
2825 [0x28] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2826 [0x29] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2827 [0x2a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
2828 [0x2b] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movntps, movntpd, movntss, movntsd */
2829 [0x2c] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
2830 [0x2d] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
2831 [0x2e] = { gen_helper_ucomiss, gen_helper_ucomisd },
2832 [0x2f] = { gen_helper_comiss, gen_helper_comisd },
2833 [0x50] = { SSE_SPECIAL, SSE_SPECIAL }, /* movmskps, movmskpd */
2834 [0x51] = SSE_FOP(sqrt),
2835 [0x52] = { gen_helper_rsqrtps, NULL, gen_helper_rsqrtss, NULL },
2836 [0x53] = { gen_helper_rcpps, NULL, gen_helper_rcpss, NULL },
2837 [0x54] = { gen_helper_pand_xmm, gen_helper_pand_xmm }, /* andps, andpd */
2838 [0x55] = { gen_helper_pandn_xmm, gen_helper_pandn_xmm }, /* andnps, andnpd */
2839 [0x56] = { gen_helper_por_xmm, gen_helper_por_xmm }, /* orps, orpd */
2840 [0x57] = { gen_helper_pxor_xmm, gen_helper_pxor_xmm }, /* xorps, xorpd */
2841 [0x58] = SSE_FOP(add),
2842 [0x59] = SSE_FOP(mul),
2843 [0x5a] = { gen_helper_cvtps2pd, gen_helper_cvtpd2ps,
2844 gen_helper_cvtss2sd, gen_helper_cvtsd2ss },
2845 [0x5b] = { gen_helper_cvtdq2ps, gen_helper_cvtps2dq, gen_helper_cvttps2dq },
2846 [0x5c] = SSE_FOP(sub),
2847 [0x5d] = SSE_FOP(min),
2848 [0x5e] = SSE_FOP(div),
2849 [0x5f] = SSE_FOP(max),
2851 [0xc2] = SSE_FOP(cmpeq),
2852 [0xc6] = { gen_helper_shufps, gen_helper_shufpd },
2854 [0x38] = { SSE_SPECIAL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* SSSE3/SSE4 */
2855 [0x3a] = { SSE_SPECIAL, SSE_SPECIAL }, /* SSSE3/SSE4 */
2857 /* MMX ops and their SSE extensions */
2858 [0x60] = MMX_OP2(punpcklbw),
2859 [0x61] = MMX_OP2(punpcklwd),
2860 [0x62] = MMX_OP2(punpckldq),
2861 [0x63] = MMX_OP2(packsswb),
2862 [0x64] = MMX_OP2(pcmpgtb),
2863 [0x65] = MMX_OP2(pcmpgtw),
2864 [0x66] = MMX_OP2(pcmpgtl),
2865 [0x67] = MMX_OP2(packuswb),
2866 [0x68] = MMX_OP2(punpckhbw),
2867 [0x69] = MMX_OP2(punpckhwd),
2868 [0x6a] = MMX_OP2(punpckhdq),
2869 [0x6b] = MMX_OP2(packssdw),
2870 [0x6c] = { NULL, gen_helper_punpcklqdq_xmm },
2871 [0x6d] = { NULL, gen_helper_punpckhqdq_xmm },
2872 [0x6e] = { SSE_SPECIAL, SSE_SPECIAL }, /* movd mm, ea */
2873 [0x6f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, , movqdu */
2874 [0x70] = { gen_helper_pshufw_mmx,
2875 gen_helper_pshufd_xmm,
2876 gen_helper_pshufhw_xmm,
2877 gen_helper_pshuflw_xmm },
2878 [0x71] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftw */
2879 [0x72] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftd */
2880 [0x73] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftq */
2881 [0x74] = MMX_OP2(pcmpeqb),
2882 [0x75] = MMX_OP2(pcmpeqw),
2883 [0x76] = MMX_OP2(pcmpeql),
2884 [0x77] = { SSE_DUMMY }, /* emms */
2885 [0x78] = { NULL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* extrq_i, insertq_i */
2886 [0x79] = { NULL, gen_helper_extrq_r, NULL, gen_helper_insertq_r },
2887 [0x7c] = { NULL, gen_helper_haddpd, NULL, gen_helper_haddps },
2888 [0x7d] = { NULL, gen_helper_hsubpd, NULL, gen_helper_hsubps },
2889 [0x7e] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movd, movd, , movq */
2890 [0x7f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, movdqu */
2891 [0xc4] = { SSE_SPECIAL, SSE_SPECIAL }, /* pinsrw */
2892 [0xc5] = { SSE_SPECIAL, SSE_SPECIAL }, /* pextrw */
2893 [0xd0] = { NULL, gen_helper_addsubpd, NULL, gen_helper_addsubps },
2894 [0xd1] = MMX_OP2(psrlw),
2895 [0xd2] = MMX_OP2(psrld),
2896 [0xd3] = MMX_OP2(psrlq),
2897 [0xd4] = MMX_OP2(paddq),
2898 [0xd5] = MMX_OP2(pmullw),
2899 [0xd6] = { NULL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2900 [0xd7] = { SSE_SPECIAL, SSE_SPECIAL }, /* pmovmskb */
2901 [0xd8] = MMX_OP2(psubusb),
2902 [0xd9] = MMX_OP2(psubusw),
2903 [0xda] = MMX_OP2(pminub),
2904 [0xdb] = MMX_OP2(pand),
2905 [0xdc] = MMX_OP2(paddusb),
2906 [0xdd] = MMX_OP2(paddusw),
2907 [0xde] = MMX_OP2(pmaxub),
2908 [0xdf] = MMX_OP2(pandn),
2909 [0xe0] = MMX_OP2(pavgb),
2910 [0xe1] = MMX_OP2(psraw),
2911 [0xe2] = MMX_OP2(psrad),
2912 [0xe3] = MMX_OP2(pavgw),
2913 [0xe4] = MMX_OP2(pmulhuw),
2914 [0xe5] = MMX_OP2(pmulhw),
2915 [0xe6] = { NULL, gen_helper_cvttpd2dq, gen_helper_cvtdq2pd, gen_helper_cvtpd2dq },
2916 [0xe7] = { SSE_SPECIAL , SSE_SPECIAL }, /* movntq, movntq */
2917 [0xe8] = MMX_OP2(psubsb),
2918 [0xe9] = MMX_OP2(psubsw),
2919 [0xea] = MMX_OP2(pminsw),
2920 [0xeb] = MMX_OP2(por),
2921 [0xec] = MMX_OP2(paddsb),
2922 [0xed] = MMX_OP2(paddsw),
2923 [0xee] = MMX_OP2(pmaxsw),
2924 [0xef] = MMX_OP2(pxor),
2925 [0xf0] = { NULL, NULL, NULL, SSE_SPECIAL }, /* lddqu */
2926 [0xf1] = MMX_OP2(psllw),
2927 [0xf2] = MMX_OP2(pslld),
2928 [0xf3] = MMX_OP2(psllq),
2929 [0xf4] = MMX_OP2(pmuludq),
2930 [0xf5] = MMX_OP2(pmaddwd),
2931 [0xf6] = MMX_OP2(psadbw),
2932 [0xf7] = MMX_OP2(maskmov),
2933 [0xf8] = MMX_OP2(psubb),
2934 [0xf9] = MMX_OP2(psubw),
2935 [0xfa] = MMX_OP2(psubl),
2936 [0xfb] = MMX_OP2(psubq),
2937 [0xfc] = MMX_OP2(paddb),
2938 [0xfd] = MMX_OP2(paddw),
2939 [0xfe] = MMX_OP2(paddl),
2942 static void *sse_op_table2[3 * 8][2] = {
2943 [0 + 2] = MMX_OP2(psrlw),
2944 [0 + 4] = MMX_OP2(psraw),
2945 [0 + 6] = MMX_OP2(psllw),
2946 [8 + 2] = MMX_OP2(psrld),
2947 [8 + 4] = MMX_OP2(psrad),
2948 [8 + 6] = MMX_OP2(pslld),
2949 [16 + 2] = MMX_OP2(psrlq),
2950 [16 + 3] = { NULL, gen_helper_psrldq_xmm },
2951 [16 + 6] = MMX_OP2(psllq),
2952 [16 + 7] = { NULL, gen_helper_pslldq_xmm },
2955 static void *sse_op_table3[4 * 3] = {
2956 gen_helper_cvtsi2ss,
2957 gen_helper_cvtsi2sd,
2958 X86_64_ONLY(gen_helper_cvtsq2ss),
2959 X86_64_ONLY(gen_helper_cvtsq2sd),
2961 gen_helper_cvttss2si,
2962 gen_helper_cvttsd2si,
2963 X86_64_ONLY(gen_helper_cvttss2sq),
2964 X86_64_ONLY(gen_helper_cvttsd2sq),
2966 gen_helper_cvtss2si,
2967 gen_helper_cvtsd2si,
2968 X86_64_ONLY(gen_helper_cvtss2sq),
2969 X86_64_ONLY(gen_helper_cvtsd2sq),
2972 static void *sse_op_table4[8][4] = {
2973 SSE_FOP(cmpeq),
2974 SSE_FOP(cmplt),
2975 SSE_FOP(cmple),
2976 SSE_FOP(cmpunord),
2977 SSE_FOP(cmpneq),
2978 SSE_FOP(cmpnlt),
2979 SSE_FOP(cmpnle),
2980 SSE_FOP(cmpord),
2983 static void *sse_op_table5[256] = {
2984 [0x0c] = gen_helper_pi2fw,
2985 [0x0d] = gen_helper_pi2fd,
2986 [0x1c] = gen_helper_pf2iw,
2987 [0x1d] = gen_helper_pf2id,
2988 [0x8a] = gen_helper_pfnacc,
2989 [0x8e] = gen_helper_pfpnacc,
2990 [0x90] = gen_helper_pfcmpge,
2991 [0x94] = gen_helper_pfmin,
2992 [0x96] = gen_helper_pfrcp,
2993 [0x97] = gen_helper_pfrsqrt,
2994 [0x9a] = gen_helper_pfsub,
2995 [0x9e] = gen_helper_pfadd,
2996 [0xa0] = gen_helper_pfcmpgt,
2997 [0xa4] = gen_helper_pfmax,
2998 [0xa6] = gen_helper_movq, /* pfrcpit1; no need to actually increase precision */
2999 [0xa7] = gen_helper_movq, /* pfrsqit1 */
3000 [0xaa] = gen_helper_pfsubr,
3001 [0xae] = gen_helper_pfacc,
3002 [0xb0] = gen_helper_pfcmpeq,
3003 [0xb4] = gen_helper_pfmul,
3004 [0xb6] = gen_helper_movq, /* pfrcpit2 */
3005 [0xb7] = gen_helper_pmulhrw_mmx,
3006 [0xbb] = gen_helper_pswapd,
3007 [0xbf] = gen_helper_pavgb_mmx /* pavgusb */
3010 struct sse_op_helper_s {
3011 void *op[2]; uint32_t ext_mask;
3013 #define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
3014 #define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
3015 #define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
3016 #define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
3017 static struct sse_op_helper_s sse_op_table6[256] = {
3018 [0x00] = SSSE3_OP(pshufb),
3019 [0x01] = SSSE3_OP(phaddw),
3020 [0x02] = SSSE3_OP(phaddd),
3021 [0x03] = SSSE3_OP(phaddsw),
3022 [0x04] = SSSE3_OP(pmaddubsw),
3023 [0x05] = SSSE3_OP(phsubw),
3024 [0x06] = SSSE3_OP(phsubd),
3025 [0x07] = SSSE3_OP(phsubsw),
3026 [0x08] = SSSE3_OP(psignb),
3027 [0x09] = SSSE3_OP(psignw),
3028 [0x0a] = SSSE3_OP(psignd),
3029 [0x0b] = SSSE3_OP(pmulhrsw),
3030 [0x10] = SSE41_OP(pblendvb),
3031 [0x14] = SSE41_OP(blendvps),
3032 [0x15] = SSE41_OP(blendvpd),
3033 [0x17] = SSE41_OP(ptest),
3034 [0x1c] = SSSE3_OP(pabsb),
3035 [0x1d] = SSSE3_OP(pabsw),
3036 [0x1e] = SSSE3_OP(pabsd),
3037 [0x20] = SSE41_OP(pmovsxbw),
3038 [0x21] = SSE41_OP(pmovsxbd),
3039 [0x22] = SSE41_OP(pmovsxbq),
3040 [0x23] = SSE41_OP(pmovsxwd),
3041 [0x24] = SSE41_OP(pmovsxwq),
3042 [0x25] = SSE41_OP(pmovsxdq),
3043 [0x28] = SSE41_OP(pmuldq),
3044 [0x29] = SSE41_OP(pcmpeqq),
3045 [0x2a] = SSE41_SPECIAL, /* movntqda */
3046 [0x2b] = SSE41_OP(packusdw),
3047 [0x30] = SSE41_OP(pmovzxbw),
3048 [0x31] = SSE41_OP(pmovzxbd),
3049 [0x32] = SSE41_OP(pmovzxbq),
3050 [0x33] = SSE41_OP(pmovzxwd),
3051 [0x34] = SSE41_OP(pmovzxwq),
3052 [0x35] = SSE41_OP(pmovzxdq),
3053 [0x37] = SSE42_OP(pcmpgtq),
3054 [0x38] = SSE41_OP(pminsb),
3055 [0x39] = SSE41_OP(pminsd),
3056 [0x3a] = SSE41_OP(pminuw),
3057 [0x3b] = SSE41_OP(pminud),
3058 [0x3c] = SSE41_OP(pmaxsb),
3059 [0x3d] = SSE41_OP(pmaxsd),
3060 [0x3e] = SSE41_OP(pmaxuw),
3061 [0x3f] = SSE41_OP(pmaxud),
3062 [0x40] = SSE41_OP(pmulld),
3063 [0x41] = SSE41_OP(phminposuw),
3066 static struct sse_op_helper_s sse_op_table7[256] = {
3067 [0x08] = SSE41_OP(roundps),
3068 [0x09] = SSE41_OP(roundpd),
3069 [0x0a] = SSE41_OP(roundss),
3070 [0x0b] = SSE41_OP(roundsd),
3071 [0x0c] = SSE41_OP(blendps),
3072 [0x0d] = SSE41_OP(blendpd),
3073 [0x0e] = SSE41_OP(pblendw),
3074 [0x0f] = SSSE3_OP(palignr),
3075 [0x14] = SSE41_SPECIAL, /* pextrb */
3076 [0x15] = SSE41_SPECIAL, /* pextrw */
3077 [0x16] = SSE41_SPECIAL, /* pextrd/pextrq */
3078 [0x17] = SSE41_SPECIAL, /* extractps */
3079 [0x20] = SSE41_SPECIAL, /* pinsrb */
3080 [0x21] = SSE41_SPECIAL, /* insertps */
3081 [0x22] = SSE41_SPECIAL, /* pinsrd/pinsrq */
3082 [0x40] = SSE41_OP(dpps),
3083 [0x41] = SSE41_OP(dppd),
3084 [0x42] = SSE41_OP(mpsadbw),
3085 [0x60] = SSE42_OP(pcmpestrm),
3086 [0x61] = SSE42_OP(pcmpestri),
3087 [0x62] = SSE42_OP(pcmpistrm),
3088 [0x63] = SSE42_OP(pcmpistri),
3091 static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
3093 int b1, op1_offset, op2_offset, is_xmm, val, ot;
3094 int modrm, mod, rm, reg, reg_addr, offset_addr;
3095 void *sse_op2;
3097 b &= 0xff;
3098 if (s->prefix & PREFIX_DATA)
3099 b1 = 1;
3100 else if (s->prefix & PREFIX_REPZ)
3101 b1 = 2;
3102 else if (s->prefix & PREFIX_REPNZ)
3103 b1 = 3;
3104 else
3105 b1 = 0;
3106 sse_op2 = sse_op_table1[b][b1];
3107 if (!sse_op2)
3108 goto illegal_op;
3109 if ((b <= 0x5f && b >= 0x10) || b == 0xc6 || b == 0xc2) {
3110 is_xmm = 1;
3111 } else {
3112 if (b1 == 0) {
3113 /* MMX case */
3114 is_xmm = 0;
3115 } else {
3116 is_xmm = 1;
3119 /* simple MMX/SSE operation */
3120 if (s->flags & HF_TS_MASK) {
3121 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
3122 return;
3124 if (s->flags & HF_EM_MASK) {
3125 illegal_op:
3126 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
3127 return;
3129 if (is_xmm && !(s->flags & HF_OSFXSR_MASK))
3130 if ((b != 0x38 && b != 0x3a) || (s->prefix & PREFIX_DATA))
3131 goto illegal_op;
3132 if (b == 0x0e) {
3133 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
3134 goto illegal_op;
3135 /* femms */
3136 gen_helper_emms();
3137 return;
3139 if (b == 0x77) {
3140 /* emms */
3141 gen_helper_emms();
3142 return;
3144 /* prepare MMX state (XXX: optimize by storing fptt and fptags in
3145 the static cpu state) */
3146 if (!is_xmm) {
3147 gen_helper_enter_mmx();
3150 modrm = ldub_code(s->pc++);
3151 reg = ((modrm >> 3) & 7);
3152 if (is_xmm)
3153 reg |= rex_r;
3154 mod = (modrm >> 6) & 3;
3155 if (sse_op2 == SSE_SPECIAL) {
3156 b |= (b1 << 8);
3157 switch(b) {
3158 case 0x0e7: /* movntq */
3159 if (mod == 3)
3160 goto illegal_op;
3161 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3162 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
3163 break;
3164 case 0x1e7: /* movntdq */
3165 case 0x02b: /* movntps */
3166 case 0x12b: /* movntps */
3167 if (mod == 3)
3168 goto illegal_op;
3169 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3170 gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3171 break;
3172 case 0x3f0: /* lddqu */
3173 if (mod == 3)
3174 goto illegal_op;
3175 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3176 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3177 break;
3178 case 0x22b: /* movntss */
3179 case 0x32b: /* movntsd */
3180 if (mod == 3)
3181 goto illegal_op;
3182 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3183 if (b1 & 1) {
3184 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,
3185 xmm_regs[reg]));
3186 } else {
3187 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3188 xmm_regs[reg].XMM_L(0)));
3189 gen_op_st_T0_A0(OT_LONG + s->mem_index);
3191 break;
3192 case 0x6e: /* movd mm, ea */
3193 #ifdef TARGET_X86_64
3194 if (s->dflag == 2) {
3195 gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 0);
3196 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,fpregs[reg].mmx));
3197 } else
3198 #endif
3200 gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 0);
3201 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3202 offsetof(CPUX86State,fpregs[reg].mmx));
3203 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3204 gen_helper_movl_mm_T0_mmx(cpu_ptr0, cpu_tmp2_i32);
3206 break;
3207 case 0x16e: /* movd xmm, ea */
3208 #ifdef TARGET_X86_64
3209 if (s->dflag == 2) {
3210 gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 0);
3211 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3212 offsetof(CPUX86State,xmm_regs[reg]));
3213 gen_helper_movq_mm_T0_xmm(cpu_ptr0, cpu_T[0]);
3214 } else
3215 #endif
3217 gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 0);
3218 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3219 offsetof(CPUX86State,xmm_regs[reg]));
3220 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3221 gen_helper_movl_mm_T0_xmm(cpu_ptr0, cpu_tmp2_i32);
3223 break;
3224 case 0x6f: /* movq mm, ea */
3225 if (mod != 3) {
3226 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3227 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
3228 } else {
3229 rm = (modrm & 7);
3230 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
3231 offsetof(CPUX86State,fpregs[rm].mmx));
3232 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
3233 offsetof(CPUX86State,fpregs[reg].mmx));
3235 break;
3236 case 0x010: /* movups */
3237 case 0x110: /* movupd */
3238 case 0x028: /* movaps */
3239 case 0x128: /* movapd */
3240 case 0x16f: /* movdqa xmm, ea */
3241 case 0x26f: /* movdqu xmm, ea */
3242 if (mod != 3) {
3243 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3244 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3245 } else {
3246 rm = (modrm & 7) | REX_B(s);
3247 gen_op_movo(offsetof(CPUX86State,xmm_regs[reg]),
3248 offsetof(CPUX86State,xmm_regs[rm]));
3250 break;
3251 case 0x210: /* movss xmm, ea */
3252 if (mod != 3) {
3253 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3254 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
3255 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3256 gen_op_movl_T0_0();
3257 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3258 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3259 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3260 } else {
3261 rm = (modrm & 7) | REX_B(s);
3262 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3263 offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3265 break;
3266 case 0x310: /* movsd xmm, ea */
3267 if (mod != 3) {
3268 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3269 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3270 gen_op_movl_T0_0();
3271 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3272 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3273 } else {
3274 rm = (modrm & 7) | REX_B(s);
3275 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3276 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3278 break;
3279 case 0x012: /* movlps */
3280 case 0x112: /* movlpd */
3281 if (mod != 3) {
3282 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3283 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3284 } else {
3285 /* movhlps */
3286 rm = (modrm & 7) | REX_B(s);
3287 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3288 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3290 break;
3291 case 0x212: /* movsldup */
3292 if (mod != 3) {
3293 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3294 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3295 } else {
3296 rm = (modrm & 7) | REX_B(s);
3297 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3298 offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3299 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3300 offsetof(CPUX86State,xmm_regs[rm].XMM_L(2)));
3302 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3303 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3304 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3305 offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3306 break;
3307 case 0x312: /* movddup */
3308 if (mod != 3) {
3309 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3310 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3311 } else {
3312 rm = (modrm & 7) | REX_B(s);
3313 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3314 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3316 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
3317 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3318 break;
3319 case 0x016: /* movhps */
3320 case 0x116: /* movhpd */
3321 if (mod != 3) {
3322 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3323 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3324 } else {
3325 /* movlhps */
3326 rm = (modrm & 7) | REX_B(s);
3327 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
3328 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3330 break;
3331 case 0x216: /* movshdup */
3332 if (mod != 3) {
3333 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3334 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3335 } else {
3336 rm = (modrm & 7) | REX_B(s);
3337 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3338 offsetof(CPUX86State,xmm_regs[rm].XMM_L(1)));
3339 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3340 offsetof(CPUX86State,xmm_regs[rm].XMM_L(3)));
3342 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3343 offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3344 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3345 offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3346 break;
3347 case 0x178:
3348 case 0x378:
3350 int bit_index, field_length;
3352 if (b1 == 1 && reg != 0)
3353 goto illegal_op;
3354 field_length = ldub_code(s->pc++) & 0x3F;
3355 bit_index = ldub_code(s->pc++) & 0x3F;
3356 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3357 offsetof(CPUX86State,xmm_regs[reg]));
3358 if (b1 == 1)
3359 gen_helper_extrq_i(cpu_ptr0, tcg_const_i32(bit_index),
3360 tcg_const_i32(field_length));
3361 else
3362 gen_helper_insertq_i(cpu_ptr0, tcg_const_i32(bit_index),
3363 tcg_const_i32(field_length));
3365 break;
3366 case 0x7e: /* movd ea, mm */
3367 #ifdef TARGET_X86_64
3368 if (s->dflag == 2) {
3369 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3370 offsetof(CPUX86State,fpregs[reg].mmx));
3371 gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 1);
3372 } else
3373 #endif
3375 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3376 offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0)));
3377 gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 1);
3379 break;
3380 case 0x17e: /* movd ea, xmm */
3381 #ifdef TARGET_X86_64
3382 if (s->dflag == 2) {
3383 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3384 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3385 gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 1);
3386 } else
3387 #endif
3389 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3390 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3391 gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 1);
3393 break;
3394 case 0x27e: /* movq xmm, ea */
3395 if (mod != 3) {
3396 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3397 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3398 } else {
3399 rm = (modrm & 7) | REX_B(s);
3400 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3401 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3403 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3404 break;
3405 case 0x7f: /* movq ea, mm */
3406 if (mod != 3) {
3407 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3408 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
3409 } else {
3410 rm = (modrm & 7);
3411 gen_op_movq(offsetof(CPUX86State,fpregs[rm].mmx),
3412 offsetof(CPUX86State,fpregs[reg].mmx));
3414 break;
3415 case 0x011: /* movups */
3416 case 0x111: /* movupd */
3417 case 0x029: /* movaps */
3418 case 0x129: /* movapd */
3419 case 0x17f: /* movdqa ea, xmm */
3420 case 0x27f: /* movdqu ea, xmm */
3421 if (mod != 3) {
3422 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3423 gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3424 } else {
3425 rm = (modrm & 7) | REX_B(s);
3426 gen_op_movo(offsetof(CPUX86State,xmm_regs[rm]),
3427 offsetof(CPUX86State,xmm_regs[reg]));
3429 break;
3430 case 0x211: /* movss ea, xmm */
3431 if (mod != 3) {
3432 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3433 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3434 gen_op_st_T0_A0(OT_LONG + s->mem_index);
3435 } else {
3436 rm = (modrm & 7) | REX_B(s);
3437 gen_op_movl(offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)),
3438 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3440 break;
3441 case 0x311: /* movsd ea, xmm */
3442 if (mod != 3) {
3443 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3444 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3445 } else {
3446 rm = (modrm & 7) | REX_B(s);
3447 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3448 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3450 break;
3451 case 0x013: /* movlps */
3452 case 0x113: /* movlpd */
3453 if (mod != 3) {
3454 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3455 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3456 } else {
3457 goto illegal_op;
3459 break;
3460 case 0x017: /* movhps */
3461 case 0x117: /* movhpd */
3462 if (mod != 3) {
3463 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3464 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3465 } else {
3466 goto illegal_op;
3468 break;
3469 case 0x71: /* shift mm, im */
3470 case 0x72:
3471 case 0x73:
3472 case 0x171: /* shift xmm, im */
3473 case 0x172:
3474 case 0x173:
3475 val = ldub_code(s->pc++);
3476 if (is_xmm) {
3477 gen_op_movl_T0_im(val);
3478 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
3479 gen_op_movl_T0_0();
3480 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(1)));
3481 op1_offset = offsetof(CPUX86State,xmm_t0);
3482 } else {
3483 gen_op_movl_T0_im(val);
3484 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(0)));
3485 gen_op_movl_T0_0();
3486 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(1)));
3487 op1_offset = offsetof(CPUX86State,mmx_t0);
3489 sse_op2 = sse_op_table2[((b - 1) & 3) * 8 + (((modrm >> 3)) & 7)][b1];
3490 if (!sse_op2)
3491 goto illegal_op;
3492 if (is_xmm) {
3493 rm = (modrm & 7) | REX_B(s);
3494 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3495 } else {
3496 rm = (modrm & 7);
3497 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3499 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3500 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op1_offset);
3501 ((void (*)(TCGv_ptr, TCGv_ptr))sse_op2)(cpu_ptr0, cpu_ptr1);
3502 break;
3503 case 0x050: /* movmskps */
3504 rm = (modrm & 7) | REX_B(s);
3505 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3506 offsetof(CPUX86State,xmm_regs[rm]));
3507 gen_helper_movmskps(cpu_tmp2_i32, cpu_ptr0);
3508 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3509 gen_op_mov_reg_T0(OT_LONG, reg);
3510 break;
3511 case 0x150: /* movmskpd */
3512 rm = (modrm & 7) | REX_B(s);
3513 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3514 offsetof(CPUX86State,xmm_regs[rm]));
3515 gen_helper_movmskpd(cpu_tmp2_i32, cpu_ptr0);
3516 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3517 gen_op_mov_reg_T0(OT_LONG, reg);
3518 break;
3519 case 0x02a: /* cvtpi2ps */
3520 case 0x12a: /* cvtpi2pd */
3521 gen_helper_enter_mmx();
3522 if (mod != 3) {
3523 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3524 op2_offset = offsetof(CPUX86State,mmx_t0);
3525 gen_ldq_env_A0(s->mem_index, op2_offset);
3526 } else {
3527 rm = (modrm & 7);
3528 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3530 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3531 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3532 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3533 switch(b >> 8) {
3534 case 0x0:
3535 gen_helper_cvtpi2ps(cpu_ptr0, cpu_ptr1);
3536 break;
3537 default:
3538 case 0x1:
3539 gen_helper_cvtpi2pd(cpu_ptr0, cpu_ptr1);
3540 break;
3542 break;
3543 case 0x22a: /* cvtsi2ss */
3544 case 0x32a: /* cvtsi2sd */
3545 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3546 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
3547 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3548 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3549 sse_op2 = sse_op_table3[(s->dflag == 2) * 2 + ((b >> 8) - 2)];
3550 if (ot == OT_LONG) {
3551 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3552 ((void (*)(TCGv_ptr, TCGv_i32))sse_op2)(cpu_ptr0, cpu_tmp2_i32);
3553 } else {
3554 ((void (*)(TCGv_ptr, TCGv))sse_op2)(cpu_ptr0, cpu_T[0]);
3556 break;
3557 case 0x02c: /* cvttps2pi */
3558 case 0x12c: /* cvttpd2pi */
3559 case 0x02d: /* cvtps2pi */
3560 case 0x12d: /* cvtpd2pi */
3561 gen_helper_enter_mmx();
3562 if (mod != 3) {
3563 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3564 op2_offset = offsetof(CPUX86State,xmm_t0);
3565 gen_ldo_env_A0(s->mem_index, op2_offset);
3566 } else {
3567 rm = (modrm & 7) | REX_B(s);
3568 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3570 op1_offset = offsetof(CPUX86State,fpregs[reg & 7].mmx);
3571 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3572 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3573 switch(b) {
3574 case 0x02c:
3575 gen_helper_cvttps2pi(cpu_ptr0, cpu_ptr1);
3576 break;
3577 case 0x12c:
3578 gen_helper_cvttpd2pi(cpu_ptr0, cpu_ptr1);
3579 break;
3580 case 0x02d:
3581 gen_helper_cvtps2pi(cpu_ptr0, cpu_ptr1);
3582 break;
3583 case 0x12d:
3584 gen_helper_cvtpd2pi(cpu_ptr0, cpu_ptr1);
3585 break;
3587 break;
3588 case 0x22c: /* cvttss2si */
3589 case 0x32c: /* cvttsd2si */
3590 case 0x22d: /* cvtss2si */
3591 case 0x32d: /* cvtsd2si */
3592 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3593 if (mod != 3) {
3594 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3595 if ((b >> 8) & 1) {
3596 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_t0.XMM_Q(0)));
3597 } else {
3598 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
3599 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
3601 op2_offset = offsetof(CPUX86State,xmm_t0);
3602 } else {
3603 rm = (modrm & 7) | REX_B(s);
3604 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3606 sse_op2 = sse_op_table3[(s->dflag == 2) * 2 + ((b >> 8) - 2) + 4 +
3607 (b & 1) * 4];
3608 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3609 if (ot == OT_LONG) {
3610 ((void (*)(TCGv_i32, TCGv_ptr))sse_op2)(cpu_tmp2_i32, cpu_ptr0);
3611 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3612 } else {
3613 ((void (*)(TCGv, TCGv_ptr))sse_op2)(cpu_T[0], cpu_ptr0);
3615 gen_op_mov_reg_T0(ot, reg);
3616 break;
3617 case 0xc4: /* pinsrw */
3618 case 0x1c4:
3619 s->rip_offset = 1;
3620 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
3621 val = ldub_code(s->pc++);
3622 if (b1) {
3623 val &= 7;
3624 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3625 offsetof(CPUX86State,xmm_regs[reg].XMM_W(val)));
3626 } else {
3627 val &= 3;
3628 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3629 offsetof(CPUX86State,fpregs[reg].mmx.MMX_W(val)));
3631 break;
3632 case 0xc5: /* pextrw */
3633 case 0x1c5:
3634 if (mod != 3)
3635 goto illegal_op;
3636 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3637 val = ldub_code(s->pc++);
3638 if (b1) {
3639 val &= 7;
3640 rm = (modrm & 7) | REX_B(s);
3641 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3642 offsetof(CPUX86State,xmm_regs[rm].XMM_W(val)));
3643 } else {
3644 val &= 3;
3645 rm = (modrm & 7);
3646 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3647 offsetof(CPUX86State,fpregs[rm].mmx.MMX_W(val)));
3649 reg = ((modrm >> 3) & 7) | rex_r;
3650 gen_op_mov_reg_T0(ot, reg);
3651 break;
3652 case 0x1d6: /* movq ea, xmm */
3653 if (mod != 3) {
3654 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3655 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3656 } else {
3657 rm = (modrm & 7) | REX_B(s);
3658 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3659 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3660 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3662 break;
3663 case 0x2d6: /* movq2dq */
3664 gen_helper_enter_mmx();
3665 rm = (modrm & 7);
3666 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3667 offsetof(CPUX86State,fpregs[rm].mmx));
3668 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3669 break;
3670 case 0x3d6: /* movdq2q */
3671 gen_helper_enter_mmx();
3672 rm = (modrm & 7) | REX_B(s);
3673 gen_op_movq(offsetof(CPUX86State,fpregs[reg & 7].mmx),
3674 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3675 break;
3676 case 0xd7: /* pmovmskb */
3677 case 0x1d7:
3678 if (mod != 3)
3679 goto illegal_op;
3680 if (b1) {
3681 rm = (modrm & 7) | REX_B(s);
3682 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[rm]));
3683 gen_helper_pmovmskb_xmm(cpu_tmp2_i32, cpu_ptr0);
3684 } else {
3685 rm = (modrm & 7);
3686 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,fpregs[rm].mmx));
3687 gen_helper_pmovmskb_mmx(cpu_tmp2_i32, cpu_ptr0);
3689 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3690 reg = ((modrm >> 3) & 7) | rex_r;
3691 gen_op_mov_reg_T0(OT_LONG, reg);
3692 break;
3693 case 0x138:
3694 if (s->prefix & PREFIX_REPNZ)
3695 goto crc32;
3696 case 0x038:
3697 b = modrm;
3698 modrm = ldub_code(s->pc++);
3699 rm = modrm & 7;
3700 reg = ((modrm >> 3) & 7) | rex_r;
3701 mod = (modrm >> 6) & 3;
3703 sse_op2 = sse_op_table6[b].op[b1];
3704 if (!sse_op2)
3705 goto illegal_op;
3706 if (!(s->cpuid_ext_features & sse_op_table6[b].ext_mask))
3707 goto illegal_op;
3709 if (b1) {
3710 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3711 if (mod == 3) {
3712 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
3713 } else {
3714 op2_offset = offsetof(CPUX86State,xmm_t0);
3715 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3716 switch (b) {
3717 case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
3718 case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
3719 case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
3720 gen_ldq_env_A0(s->mem_index, op2_offset +
3721 offsetof(XMMReg, XMM_Q(0)));
3722 break;
3723 case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
3724 case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
3725 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0,
3726 (s->mem_index >> 2) - 1);
3727 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);
3728 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, op2_offset +
3729 offsetof(XMMReg, XMM_L(0)));
3730 break;
3731 case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
3732 tcg_gen_qemu_ld16u(cpu_tmp0, cpu_A0,
3733 (s->mem_index >> 2) - 1);
3734 tcg_gen_st16_tl(cpu_tmp0, cpu_env, op2_offset +
3735 offsetof(XMMReg, XMM_W(0)));
3736 break;
3737 case 0x2a: /* movntqda */
3738 gen_ldo_env_A0(s->mem_index, op1_offset);
3739 return;
3740 default:
3741 gen_ldo_env_A0(s->mem_index, op2_offset);
3744 } else {
3745 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
3746 if (mod == 3) {
3747 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3748 } else {
3749 op2_offset = offsetof(CPUX86State,mmx_t0);
3750 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3751 gen_ldq_env_A0(s->mem_index, op2_offset);
3754 if (sse_op2 == SSE_SPECIAL)
3755 goto illegal_op;
3757 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3758 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3759 ((void (*)(TCGv_ptr, TCGv_ptr))sse_op2)(cpu_ptr0, cpu_ptr1);
3761 if (b == 0x17)
3762 s->cc_op = CC_OP_EFLAGS;
3763 break;
3764 case 0x338: /* crc32 */
3765 crc32:
3766 b = modrm;
3767 modrm = ldub_code(s->pc++);
3768 reg = ((modrm >> 3) & 7) | rex_r;
3770 if (b != 0xf0 && b != 0xf1)
3771 goto illegal_op;
3772 if (!(s->cpuid_ext_features & CPUID_EXT_SSE42))
3773 goto illegal_op;
3775 if (b == 0xf0)
3776 ot = OT_BYTE;
3777 else if (b == 0xf1 && s->dflag != 2)
3778 if (s->prefix & PREFIX_DATA)
3779 ot = OT_WORD;
3780 else
3781 ot = OT_LONG;
3782 else
3783 ot = OT_QUAD;
3785 gen_op_mov_TN_reg(OT_LONG, 0, reg);
3786 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3787 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
3788 gen_helper_crc32(cpu_T[0], cpu_tmp2_i32,
3789 cpu_T[0], tcg_const_i32(8 << ot));
3791 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3792 gen_op_mov_reg_T0(ot, reg);
3793 break;
3794 case 0x03a:
3795 case 0x13a:
3796 b = modrm;
3797 modrm = ldub_code(s->pc++);
3798 rm = modrm & 7;
3799 reg = ((modrm >> 3) & 7) | rex_r;
3800 mod = (modrm >> 6) & 3;
3802 sse_op2 = sse_op_table7[b].op[b1];
3803 if (!sse_op2)
3804 goto illegal_op;
3805 if (!(s->cpuid_ext_features & sse_op_table7[b].ext_mask))
3806 goto illegal_op;
3808 if (sse_op2 == SSE_SPECIAL) {
3809 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3810 rm = (modrm & 7) | REX_B(s);
3811 if (mod != 3)
3812 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3813 reg = ((modrm >> 3) & 7) | rex_r;
3814 val = ldub_code(s->pc++);
3815 switch (b) {
3816 case 0x14: /* pextrb */
3817 tcg_gen_ld8u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3818 xmm_regs[reg].XMM_B(val & 15)));
3819 if (mod == 3)
3820 gen_op_mov_reg_T0(ot, rm);
3821 else
3822 tcg_gen_qemu_st8(cpu_T[0], cpu_A0,
3823 (s->mem_index >> 2) - 1);
3824 break;
3825 case 0x15: /* pextrw */
3826 tcg_gen_ld16u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3827 xmm_regs[reg].XMM_W(val & 7)));
3828 if (mod == 3)
3829 gen_op_mov_reg_T0(ot, rm);
3830 else
3831 tcg_gen_qemu_st16(cpu_T[0], cpu_A0,
3832 (s->mem_index >> 2) - 1);
3833 break;
3834 case 0x16:
3835 if (ot == OT_LONG) { /* pextrd */
3836 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
3837 offsetof(CPUX86State,
3838 xmm_regs[reg].XMM_L(val & 3)));
3839 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3840 if (mod == 3)
3841 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
3842 else
3843 tcg_gen_qemu_st32(cpu_T[0], cpu_A0,
3844 (s->mem_index >> 2) - 1);
3845 } else { /* pextrq */
3846 #ifdef TARGET_X86_64
3847 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
3848 offsetof(CPUX86State,
3849 xmm_regs[reg].XMM_Q(val & 1)));
3850 if (mod == 3)
3851 gen_op_mov_reg_v(ot, rm, cpu_tmp1_i64);
3852 else
3853 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
3854 (s->mem_index >> 2) - 1);
3855 #else
3856 goto illegal_op;
3857 #endif
3859 break;
3860 case 0x17: /* extractps */
3861 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3862 xmm_regs[reg].XMM_L(val & 3)));
3863 if (mod == 3)
3864 gen_op_mov_reg_T0(ot, rm);
3865 else
3866 tcg_gen_qemu_st32(cpu_T[0], cpu_A0,
3867 (s->mem_index >> 2) - 1);
3868 break;
3869 case 0x20: /* pinsrb */
3870 if (mod == 3)
3871 gen_op_mov_TN_reg(OT_LONG, 0, rm);
3872 else
3873 tcg_gen_qemu_ld8u(cpu_tmp0, cpu_A0,
3874 (s->mem_index >> 2) - 1);
3875 tcg_gen_st8_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State,
3876 xmm_regs[reg].XMM_B(val & 15)));
3877 break;
3878 case 0x21: /* insertps */
3879 if (mod == 3) {
3880 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
3881 offsetof(CPUX86State,xmm_regs[rm]
3882 .XMM_L((val >> 6) & 3)));
3883 } else {
3884 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0,
3885 (s->mem_index >> 2) - 1);
3886 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);
3888 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
3889 offsetof(CPUX86State,xmm_regs[reg]
3890 .XMM_L((val >> 4) & 3)));
3891 if ((val >> 0) & 1)
3892 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3893 cpu_env, offsetof(CPUX86State,
3894 xmm_regs[reg].XMM_L(0)));
3895 if ((val >> 1) & 1)
3896 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3897 cpu_env, offsetof(CPUX86State,
3898 xmm_regs[reg].XMM_L(1)));
3899 if ((val >> 2) & 1)
3900 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3901 cpu_env, offsetof(CPUX86State,
3902 xmm_regs[reg].XMM_L(2)));
3903 if ((val >> 3) & 1)
3904 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3905 cpu_env, offsetof(CPUX86State,
3906 xmm_regs[reg].XMM_L(3)));
3907 break;
3908 case 0x22:
3909 if (ot == OT_LONG) { /* pinsrd */
3910 if (mod == 3)
3911 gen_op_mov_v_reg(ot, cpu_tmp0, rm);
3912 else
3913 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0,
3914 (s->mem_index >> 2) - 1);
3915 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);
3916 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
3917 offsetof(CPUX86State,
3918 xmm_regs[reg].XMM_L(val & 3)));
3919 } else { /* pinsrq */
3920 #ifdef TARGET_X86_64
3921 if (mod == 3)
3922 gen_op_mov_v_reg(ot, cpu_tmp1_i64, rm);
3923 else
3924 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
3925 (s->mem_index >> 2) - 1);
3926 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
3927 offsetof(CPUX86State,
3928 xmm_regs[reg].XMM_Q(val & 1)));
3929 #else
3930 goto illegal_op;
3931 #endif
3933 break;
3935 return;
3938 if (b1) {
3939 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3940 if (mod == 3) {
3941 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
3942 } else {
3943 op2_offset = offsetof(CPUX86State,xmm_t0);
3944 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3945 gen_ldo_env_A0(s->mem_index, op2_offset);
3947 } else {
3948 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
3949 if (mod == 3) {
3950 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3951 } else {
3952 op2_offset = offsetof(CPUX86State,mmx_t0);
3953 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3954 gen_ldq_env_A0(s->mem_index, op2_offset);
3957 val = ldub_code(s->pc++);
3959 if ((b & 0xfc) == 0x60) { /* pcmpXstrX */
3960 s->cc_op = CC_OP_EFLAGS;
3962 if (s->dflag == 2)
3963 /* The helper must use entire 64-bit gp registers */
3964 val |= 1 << 8;
3967 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3968 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3969 ((void (*)(TCGv_ptr, TCGv_ptr, TCGv_i32))sse_op2)(cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
3970 break;
3971 default:
3972 goto illegal_op;
3974 } else {
3975 /* generic MMX or SSE operation */
3976 switch(b) {
3977 case 0x70: /* pshufx insn */
3978 case 0xc6: /* pshufx insn */
3979 case 0xc2: /* compare insns */
3980 s->rip_offset = 1;
3981 break;
3982 default:
3983 break;
3985 if (is_xmm) {
3986 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3987 if (mod != 3) {
3988 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3989 op2_offset = offsetof(CPUX86State,xmm_t0);
3990 if (b1 >= 2 && ((b >= 0x50 && b <= 0x5f && b != 0x5b) ||
3991 b == 0xc2)) {
3992 /* specific case for SSE single instructions */
3993 if (b1 == 2) {
3994 /* 32 bit access */
3995 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
3996 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
3997 } else {
3998 /* 64 bit access */
3999 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_t0.XMM_D(0)));
4001 } else {
4002 gen_ldo_env_A0(s->mem_index, op2_offset);
4004 } else {
4005 rm = (modrm & 7) | REX_B(s);
4006 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
4008 } else {
4009 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4010 if (mod != 3) {
4011 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4012 op2_offset = offsetof(CPUX86State,mmx_t0);
4013 gen_ldq_env_A0(s->mem_index, op2_offset);
4014 } else {
4015 rm = (modrm & 7);
4016 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4019 switch(b) {
4020 case 0x0f: /* 3DNow! data insns */
4021 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
4022 goto illegal_op;
4023 val = ldub_code(s->pc++);
4024 sse_op2 = sse_op_table5[val];
4025 if (!sse_op2)
4026 goto illegal_op;
4027 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4028 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4029 ((void (*)(TCGv_ptr, TCGv_ptr))sse_op2)(cpu_ptr0, cpu_ptr1);
4030 break;
4031 case 0x70: /* pshufx insn */
4032 case 0xc6: /* pshufx insn */
4033 val = ldub_code(s->pc++);
4034 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4035 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4036 ((void (*)(TCGv_ptr, TCGv_ptr, TCGv_i32))sse_op2)(cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
4037 break;
4038 case 0xc2:
4039 /* compare insns */
4040 val = ldub_code(s->pc++);
4041 if (val >= 8)
4042 goto illegal_op;
4043 sse_op2 = sse_op_table4[val][b1];
4044 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4045 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4046 ((void (*)(TCGv_ptr, TCGv_ptr))sse_op2)(cpu_ptr0, cpu_ptr1);
4047 break;
4048 case 0xf7:
4049 /* maskmov : we must prepare A0 */
4050 if (mod != 3)
4051 goto illegal_op;
4052 #ifdef TARGET_X86_64
4053 if (s->aflag == 2) {
4054 gen_op_movq_A0_reg(R_EDI);
4055 } else
4056 #endif
4058 gen_op_movl_A0_reg(R_EDI);
4059 if (s->aflag == 0)
4060 gen_op_andl_A0_ffff();
4062 gen_add_A0_ds_seg(s);
4064 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4065 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4066 ((void (*)(TCGv_ptr, TCGv_ptr, TCGv))sse_op2)(cpu_ptr0, cpu_ptr1, cpu_A0);
4067 break;
4068 default:
4069 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4070 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4071 ((void (*)(TCGv_ptr, TCGv_ptr))sse_op2)(cpu_ptr0, cpu_ptr1);
4072 break;
4074 if (b == 0x2e || b == 0x2f) {
4075 s->cc_op = CC_OP_EFLAGS;
4080 /* convert one instruction. s->is_jmp is set if the translation must
4081 be stopped. Return the next pc value */
4082 static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
4084 int b, prefixes, aflag, dflag;
4085 int shift, ot;
4086 int modrm, reg, rm, mod, reg_addr, op, opreg, offset_addr, val;
4087 target_ulong next_eip, tval;
4088 int rex_w, rex_r;
4090 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)))
4091 tcg_gen_debug_insn_start(pc_start);
4092 s->pc = pc_start;
4093 prefixes = 0;
4094 aflag = s->code32;
4095 dflag = s->code32;
4096 s->override = -1;
4097 rex_w = -1;
4098 rex_r = 0;
4099 #ifdef TARGET_X86_64
4100 s->rex_x = 0;
4101 s->rex_b = 0;
4102 x86_64_hregs = 0;
4103 #endif
4104 s->rip_offset = 0; /* for relative ip address */
4105 next_byte:
4106 b = ldub_code(s->pc);
4107 s->pc++;
4108 /* check prefixes */
4109 #ifdef TARGET_X86_64
4110 if (CODE64(s)) {
4111 switch (b) {
4112 case 0xf3:
4113 prefixes |= PREFIX_REPZ;
4114 goto next_byte;
4115 case 0xf2:
4116 prefixes |= PREFIX_REPNZ;
4117 goto next_byte;
4118 case 0xf0:
4119 prefixes |= PREFIX_LOCK;
4120 goto next_byte;
4121 case 0x2e:
4122 s->override = R_CS;
4123 goto next_byte;
4124 case 0x36:
4125 s->override = R_SS;
4126 goto next_byte;
4127 case 0x3e:
4128 s->override = R_DS;
4129 goto next_byte;
4130 case 0x26:
4131 s->override = R_ES;
4132 goto next_byte;
4133 case 0x64:
4134 s->override = R_FS;
4135 goto next_byte;
4136 case 0x65:
4137 s->override = R_GS;
4138 goto next_byte;
4139 case 0x66:
4140 prefixes |= PREFIX_DATA;
4141 goto next_byte;
4142 case 0x67:
4143 prefixes |= PREFIX_ADR;
4144 goto next_byte;
4145 case 0x40 ... 0x4f:
4146 /* REX prefix */
4147 rex_w = (b >> 3) & 1;
4148 rex_r = (b & 0x4) << 1;
4149 s->rex_x = (b & 0x2) << 2;
4150 REX_B(s) = (b & 0x1) << 3;
4151 x86_64_hregs = 1; /* select uniform byte register addressing */
4152 goto next_byte;
4154 if (rex_w == 1) {
4155 /* 0x66 is ignored if rex.w is set */
4156 dflag = 2;
4157 } else {
4158 if (prefixes & PREFIX_DATA)
4159 dflag ^= 1;
4161 if (!(prefixes & PREFIX_ADR))
4162 aflag = 2;
4163 } else
4164 #endif
4166 switch (b) {
4167 case 0xf3:
4168 prefixes |= PREFIX_REPZ;
4169 goto next_byte;
4170 case 0xf2:
4171 prefixes |= PREFIX_REPNZ;
4172 goto next_byte;
4173 case 0xf0:
4174 prefixes |= PREFIX_LOCK;
4175 goto next_byte;
4176 case 0x2e:
4177 s->override = R_CS;
4178 goto next_byte;
4179 case 0x36:
4180 s->override = R_SS;
4181 goto next_byte;
4182 case 0x3e:
4183 s->override = R_DS;
4184 goto next_byte;
4185 case 0x26:
4186 s->override = R_ES;
4187 goto next_byte;
4188 case 0x64:
4189 s->override = R_FS;
4190 goto next_byte;
4191 case 0x65:
4192 s->override = R_GS;
4193 goto next_byte;
4194 case 0x66:
4195 prefixes |= PREFIX_DATA;
4196 goto next_byte;
4197 case 0x67:
4198 prefixes |= PREFIX_ADR;
4199 goto next_byte;
4201 if (prefixes & PREFIX_DATA)
4202 dflag ^= 1;
4203 if (prefixes & PREFIX_ADR)
4204 aflag ^= 1;
4207 s->prefix = prefixes;
4208 s->aflag = aflag;
4209 s->dflag = dflag;
4211 /* lock generation */
4212 if (prefixes & PREFIX_LOCK)
4213 gen_helper_lock();
4215 /* now check op code */
4216 reswitch:
4217 switch(b) {
4218 case 0x0f:
4219 /**************************/
4220 /* extended op code */
4221 b = ldub_code(s->pc++) | 0x100;
4222 goto reswitch;
4224 /**************************/
4225 /* arith & logic */
4226 case 0x00 ... 0x05:
4227 case 0x08 ... 0x0d:
4228 case 0x10 ... 0x15:
4229 case 0x18 ... 0x1d:
4230 case 0x20 ... 0x25:
4231 case 0x28 ... 0x2d:
4232 case 0x30 ... 0x35:
4233 case 0x38 ... 0x3d:
4235 int op, f, val;
4236 op = (b >> 3) & 7;
4237 f = (b >> 1) & 3;
4239 if ((b & 1) == 0)
4240 ot = OT_BYTE;
4241 else
4242 ot = dflag + OT_WORD;
4244 switch(f) {
4245 case 0: /* OP Ev, Gv */
4246 modrm = ldub_code(s->pc++);
4247 reg = ((modrm >> 3) & 7) | rex_r;
4248 mod = (modrm >> 6) & 3;
4249 rm = (modrm & 7) | REX_B(s);
4250 if (mod != 3) {
4251 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4252 opreg = OR_TMP0;
4253 } else if (op == OP_XORL && rm == reg) {
4254 xor_zero:
4255 /* xor reg, reg optimisation */
4256 gen_op_movl_T0_0();
4257 s->cc_op = CC_OP_LOGICB + ot;
4258 gen_op_mov_reg_T0(ot, reg);
4259 gen_op_update1_cc();
4260 break;
4261 } else {
4262 opreg = rm;
4264 gen_op_mov_TN_reg(ot, 1, reg);
4265 gen_op(s, op, ot, opreg);
4266 break;
4267 case 1: /* OP Gv, Ev */
4268 modrm = ldub_code(s->pc++);
4269 mod = (modrm >> 6) & 3;
4270 reg = ((modrm >> 3) & 7) | rex_r;
4271 rm = (modrm & 7) | REX_B(s);
4272 if (mod != 3) {
4273 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4274 gen_op_ld_T1_A0(ot + s->mem_index);
4275 } else if (op == OP_XORL && rm == reg) {
4276 goto xor_zero;
4277 } else {
4278 gen_op_mov_TN_reg(ot, 1, rm);
4280 gen_op(s, op, ot, reg);
4281 break;
4282 case 2: /* OP A, Iv */
4283 val = insn_get(s, ot);
4284 gen_op_movl_T1_im(val);
4285 gen_op(s, op, ot, OR_EAX);
4286 break;
4289 break;
4291 case 0x82:
4292 if (CODE64(s))
4293 goto illegal_op;
4294 case 0x80: /* GRP1 */
4295 case 0x81:
4296 case 0x83:
4298 int val;
4300 if ((b & 1) == 0)
4301 ot = OT_BYTE;
4302 else
4303 ot = dflag + OT_WORD;
4305 modrm = ldub_code(s->pc++);
4306 mod = (modrm >> 6) & 3;
4307 rm = (modrm & 7) | REX_B(s);
4308 op = (modrm >> 3) & 7;
4310 if (mod != 3) {
4311 if (b == 0x83)
4312 s->rip_offset = 1;
4313 else
4314 s->rip_offset = insn_const_size(ot);
4315 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4316 opreg = OR_TMP0;
4317 } else {
4318 opreg = rm;
4321 switch(b) {
4322 default:
4323 case 0x80:
4324 case 0x81:
4325 case 0x82:
4326 val = insn_get(s, ot);
4327 break;
4328 case 0x83:
4329 val = (int8_t)insn_get(s, OT_BYTE);
4330 break;
4332 gen_op_movl_T1_im(val);
4333 gen_op(s, op, ot, opreg);
4335 break;
4337 /**************************/
4338 /* inc, dec, and other misc arith */
4339 case 0x40 ... 0x47: /* inc Gv */
4340 ot = dflag ? OT_LONG : OT_WORD;
4341 gen_inc(s, ot, OR_EAX + (b & 7), 1);
4342 break;
4343 case 0x48 ... 0x4f: /* dec Gv */
4344 ot = dflag ? OT_LONG : OT_WORD;
4345 gen_inc(s, ot, OR_EAX + (b & 7), -1);
4346 break;
4347 case 0xf6: /* GRP3 */
4348 case 0xf7:
4349 if ((b & 1) == 0)
4350 ot = OT_BYTE;
4351 else
4352 ot = dflag + OT_WORD;
4354 modrm = ldub_code(s->pc++);
4355 mod = (modrm >> 6) & 3;
4356 rm = (modrm & 7) | REX_B(s);
4357 op = (modrm >> 3) & 7;
4358 if (mod != 3) {
4359 if (op == 0)
4360 s->rip_offset = insn_const_size(ot);
4361 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4362 gen_op_ld_T0_A0(ot + s->mem_index);
4363 } else {
4364 gen_op_mov_TN_reg(ot, 0, rm);
4367 switch(op) {
4368 case 0: /* test */
4369 val = insn_get(s, ot);
4370 gen_op_movl_T1_im(val);
4371 gen_op_testl_T0_T1_cc();
4372 s->cc_op = CC_OP_LOGICB + ot;
4373 break;
4374 case 2: /* not */
4375 tcg_gen_not_tl(cpu_T[0], cpu_T[0]);
4376 if (mod != 3) {
4377 gen_op_st_T0_A0(ot + s->mem_index);
4378 } else {
4379 gen_op_mov_reg_T0(ot, rm);
4381 break;
4382 case 3: /* neg */
4383 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
4384 if (mod != 3) {
4385 gen_op_st_T0_A0(ot + s->mem_index);
4386 } else {
4387 gen_op_mov_reg_T0(ot, rm);
4389 gen_op_update_neg_cc();
4390 s->cc_op = CC_OP_SUBB + ot;
4391 break;
4392 case 4: /* mul */
4393 switch(ot) {
4394 case OT_BYTE:
4395 gen_op_mov_TN_reg(OT_BYTE, 1, R_EAX);
4396 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
4397 tcg_gen_ext8u_tl(cpu_T[1], cpu_T[1]);
4398 /* XXX: use 32 bit mul which could be faster */
4399 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4400 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4401 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4402 tcg_gen_andi_tl(cpu_cc_src, cpu_T[0], 0xff00);
4403 s->cc_op = CC_OP_MULB;
4404 break;
4405 case OT_WORD:
4406 gen_op_mov_TN_reg(OT_WORD, 1, R_EAX);
4407 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
4408 tcg_gen_ext16u_tl(cpu_T[1], cpu_T[1]);
4409 /* XXX: use 32 bit mul which could be faster */
4410 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4411 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4412 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4413 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
4414 gen_op_mov_reg_T0(OT_WORD, R_EDX);
4415 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4416 s->cc_op = CC_OP_MULW;
4417 break;
4418 default:
4419 case OT_LONG:
4420 #ifdef TARGET_X86_64
4421 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4422 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
4423 tcg_gen_ext32u_tl(cpu_T[1], cpu_T[1]);
4424 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4425 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4426 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4427 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 32);
4428 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4429 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4430 #else
4432 TCGv_i64 t0, t1;
4433 t0 = tcg_temp_new_i64();
4434 t1 = tcg_temp_new_i64();
4435 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4436 tcg_gen_extu_i32_i64(t0, cpu_T[0]);
4437 tcg_gen_extu_i32_i64(t1, cpu_T[1]);
4438 tcg_gen_mul_i64(t0, t0, t1);
4439 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4440 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4441 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4442 tcg_gen_shri_i64(t0, t0, 32);
4443 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4444 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4445 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4447 #endif
4448 s->cc_op = CC_OP_MULL;
4449 break;
4450 #ifdef TARGET_X86_64
4451 case OT_QUAD:
4452 gen_helper_mulq_EAX_T0(cpu_T[0]);
4453 s->cc_op = CC_OP_MULQ;
4454 break;
4455 #endif
4457 break;
4458 case 5: /* imul */
4459 switch(ot) {
4460 case OT_BYTE:
4461 gen_op_mov_TN_reg(OT_BYTE, 1, R_EAX);
4462 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
4463 tcg_gen_ext8s_tl(cpu_T[1], cpu_T[1]);
4464 /* XXX: use 32 bit mul which could be faster */
4465 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4466 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4467 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4468 tcg_gen_ext8s_tl(cpu_tmp0, cpu_T[0]);
4469 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4470 s->cc_op = CC_OP_MULB;
4471 break;
4472 case OT_WORD:
4473 gen_op_mov_TN_reg(OT_WORD, 1, R_EAX);
4474 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4475 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
4476 /* XXX: use 32 bit mul which could be faster */
4477 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4478 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4479 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4480 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
4481 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4482 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
4483 gen_op_mov_reg_T0(OT_WORD, R_EDX);
4484 s->cc_op = CC_OP_MULW;
4485 break;
4486 default:
4487 case OT_LONG:
4488 #ifdef TARGET_X86_64
4489 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4490 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4491 tcg_gen_ext32s_tl(cpu_T[1], cpu_T[1]);
4492 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4493 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4494 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4495 tcg_gen_ext32s_tl(cpu_tmp0, cpu_T[0]);
4496 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4497 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 32);
4498 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4499 #else
4501 TCGv_i64 t0, t1;
4502 t0 = tcg_temp_new_i64();
4503 t1 = tcg_temp_new_i64();
4504 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4505 tcg_gen_ext_i32_i64(t0, cpu_T[0]);
4506 tcg_gen_ext_i32_i64(t1, cpu_T[1]);
4507 tcg_gen_mul_i64(t0, t0, t1);
4508 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4509 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4510 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4511 tcg_gen_sari_tl(cpu_tmp0, cpu_T[0], 31);
4512 tcg_gen_shri_i64(t0, t0, 32);
4513 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4514 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4515 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4517 #endif
4518 s->cc_op = CC_OP_MULL;
4519 break;
4520 #ifdef TARGET_X86_64
4521 case OT_QUAD:
4522 gen_helper_imulq_EAX_T0(cpu_T[0]);
4523 s->cc_op = CC_OP_MULQ;
4524 break;
4525 #endif
4527 break;
4528 case 6: /* div */
4529 switch(ot) {
4530 case OT_BYTE:
4531 gen_jmp_im(pc_start - s->cs_base);
4532 gen_helper_divb_AL(cpu_T[0]);
4533 break;
4534 case OT_WORD:
4535 gen_jmp_im(pc_start - s->cs_base);
4536 gen_helper_divw_AX(cpu_T[0]);
4537 break;
4538 default:
4539 case OT_LONG:
4540 gen_jmp_im(pc_start - s->cs_base);
4541 gen_helper_divl_EAX(cpu_T[0]);
4542 break;
4543 #ifdef TARGET_X86_64
4544 case OT_QUAD:
4545 gen_jmp_im(pc_start - s->cs_base);
4546 gen_helper_divq_EAX(cpu_T[0]);
4547 break;
4548 #endif
4550 break;
4551 case 7: /* idiv */
4552 switch(ot) {
4553 case OT_BYTE:
4554 gen_jmp_im(pc_start - s->cs_base);
4555 gen_helper_idivb_AL(cpu_T[0]);
4556 break;
4557 case OT_WORD:
4558 gen_jmp_im(pc_start - s->cs_base);
4559 gen_helper_idivw_AX(cpu_T[0]);
4560 break;
4561 default:
4562 case OT_LONG:
4563 gen_jmp_im(pc_start - s->cs_base);
4564 gen_helper_idivl_EAX(cpu_T[0]);
4565 break;
4566 #ifdef TARGET_X86_64
4567 case OT_QUAD:
4568 gen_jmp_im(pc_start - s->cs_base);
4569 gen_helper_idivq_EAX(cpu_T[0]);
4570 break;
4571 #endif
4573 break;
4574 default:
4575 goto illegal_op;
4577 break;
4579 case 0xfe: /* GRP4 */
4580 case 0xff: /* GRP5 */
4581 if ((b & 1) == 0)
4582 ot = OT_BYTE;
4583 else
4584 ot = dflag + OT_WORD;
4586 modrm = ldub_code(s->pc++);
4587 mod = (modrm >> 6) & 3;
4588 rm = (modrm & 7) | REX_B(s);
4589 op = (modrm >> 3) & 7;
4590 if (op >= 2 && b == 0xfe) {
4591 goto illegal_op;
4593 if (CODE64(s)) {
4594 if (op == 2 || op == 4) {
4595 /* operand size for jumps is 64 bit */
4596 ot = OT_QUAD;
4597 } else if (op == 3 || op == 5) {
4598 ot = dflag ? OT_LONG + (rex_w == 1) : OT_WORD;
4599 } else if (op == 6) {
4600 /* default push size is 64 bit */
4601 ot = dflag ? OT_QUAD : OT_WORD;
4604 if (mod != 3) {
4605 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4606 if (op >= 2 && op != 3 && op != 5)
4607 gen_op_ld_T0_A0(ot + s->mem_index);
4608 } else {
4609 gen_op_mov_TN_reg(ot, 0, rm);
4612 switch(op) {
4613 case 0: /* inc Ev */
4614 if (mod != 3)
4615 opreg = OR_TMP0;
4616 else
4617 opreg = rm;
4618 gen_inc(s, ot, opreg, 1);
4619 break;
4620 case 1: /* dec Ev */
4621 if (mod != 3)
4622 opreg = OR_TMP0;
4623 else
4624 opreg = rm;
4625 gen_inc(s, ot, opreg, -1);
4626 break;
4627 case 2: /* call Ev */
4628 /* XXX: optimize if memory (no 'and' is necessary) */
4629 if (s->dflag == 0)
4630 gen_op_andl_T0_ffff();
4631 next_eip = s->pc - s->cs_base;
4632 gen_movtl_T1_im(next_eip);
4633 gen_push_T1(s);
4634 gen_op_jmp_T0();
4635 gen_eob(s);
4636 break;
4637 case 3: /* lcall Ev */
4638 gen_op_ld_T1_A0(ot + s->mem_index);
4639 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
4640 gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
4641 do_lcall:
4642 if (s->pe && !s->vm86) {
4643 if (s->cc_op != CC_OP_DYNAMIC)
4644 gen_op_set_cc_op(s->cc_op);
4645 gen_jmp_im(pc_start - s->cs_base);
4646 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4647 gen_helper_lcall_protected(cpu_tmp2_i32, cpu_T[1],
4648 tcg_const_i32(dflag),
4649 tcg_const_i32(s->pc - pc_start));
4650 } else {
4651 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4652 gen_helper_lcall_real(cpu_tmp2_i32, cpu_T[1],
4653 tcg_const_i32(dflag),
4654 tcg_const_i32(s->pc - s->cs_base));
4656 gen_eob(s);
4657 break;
4658 case 4: /* jmp Ev */
4659 if (s->dflag == 0)
4660 gen_op_andl_T0_ffff();
4661 gen_op_jmp_T0();
4662 gen_eob(s);
4663 break;
4664 case 5: /* ljmp Ev */
4665 gen_op_ld_T1_A0(ot + s->mem_index);
4666 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
4667 gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
4668 do_ljmp:
4669 if (s->pe && !s->vm86) {
4670 if (s->cc_op != CC_OP_DYNAMIC)
4671 gen_op_set_cc_op(s->cc_op);
4672 gen_jmp_im(pc_start - s->cs_base);
4673 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4674 gen_helper_ljmp_protected(cpu_tmp2_i32, cpu_T[1],
4675 tcg_const_i32(s->pc - pc_start));
4676 } else {
4677 gen_op_movl_seg_T0_vm(R_CS);
4678 gen_op_movl_T0_T1();
4679 gen_op_jmp_T0();
4681 gen_eob(s);
4682 break;
4683 case 6: /* push Ev */
4684 gen_push_T0(s);
4685 break;
4686 default:
4687 goto illegal_op;
4689 break;
4691 case 0x84: /* test Ev, Gv */
4692 case 0x85:
4693 if ((b & 1) == 0)
4694 ot = OT_BYTE;
4695 else
4696 ot = dflag + OT_WORD;
4698 modrm = ldub_code(s->pc++);
4699 reg = ((modrm >> 3) & 7) | rex_r;
4701 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
4702 gen_op_mov_TN_reg(ot, 1, reg);
4703 gen_op_testl_T0_T1_cc();
4704 s->cc_op = CC_OP_LOGICB + ot;
4705 break;
4707 case 0xa8: /* test eAX, Iv */
4708 case 0xa9:
4709 if ((b & 1) == 0)
4710 ot = OT_BYTE;
4711 else
4712 ot = dflag + OT_WORD;
4713 val = insn_get(s, ot);
4715 gen_op_mov_TN_reg(ot, 0, OR_EAX);
4716 gen_op_movl_T1_im(val);
4717 gen_op_testl_T0_T1_cc();
4718 s->cc_op = CC_OP_LOGICB + ot;
4719 break;
4721 case 0x98: /* CWDE/CBW */
4722 #ifdef TARGET_X86_64
4723 if (dflag == 2) {
4724 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
4725 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4726 gen_op_mov_reg_T0(OT_QUAD, R_EAX);
4727 } else
4728 #endif
4729 if (dflag == 1) {
4730 gen_op_mov_TN_reg(OT_WORD, 0, R_EAX);
4731 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4732 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4733 } else {
4734 gen_op_mov_TN_reg(OT_BYTE, 0, R_EAX);
4735 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
4736 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4738 break;
4739 case 0x99: /* CDQ/CWD */
4740 #ifdef TARGET_X86_64
4741 if (dflag == 2) {
4742 gen_op_mov_TN_reg(OT_QUAD, 0, R_EAX);
4743 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 63);
4744 gen_op_mov_reg_T0(OT_QUAD, R_EDX);
4745 } else
4746 #endif
4747 if (dflag == 1) {
4748 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
4749 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4750 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 31);
4751 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4752 } else {
4753 gen_op_mov_TN_reg(OT_WORD, 0, R_EAX);
4754 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4755 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 15);
4756 gen_op_mov_reg_T0(OT_WORD, R_EDX);
4758 break;
4759 case 0x1af: /* imul Gv, Ev */
4760 case 0x69: /* imul Gv, Ev, I */
4761 case 0x6b:
4762 ot = dflag + OT_WORD;
4763 modrm = ldub_code(s->pc++);
4764 reg = ((modrm >> 3) & 7) | rex_r;
4765 if (b == 0x69)
4766 s->rip_offset = insn_const_size(ot);
4767 else if (b == 0x6b)
4768 s->rip_offset = 1;
4769 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
4770 if (b == 0x69) {
4771 val = insn_get(s, ot);
4772 gen_op_movl_T1_im(val);
4773 } else if (b == 0x6b) {
4774 val = (int8_t)insn_get(s, OT_BYTE);
4775 gen_op_movl_T1_im(val);
4776 } else {
4777 gen_op_mov_TN_reg(ot, 1, reg);
4780 #ifdef TARGET_X86_64
4781 if (ot == OT_QUAD) {
4782 gen_helper_imulq_T0_T1(cpu_T[0], cpu_T[0], cpu_T[1]);
4783 } else
4784 #endif
4785 if (ot == OT_LONG) {
4786 #ifdef TARGET_X86_64
4787 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4788 tcg_gen_ext32s_tl(cpu_T[1], cpu_T[1]);
4789 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4790 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4791 tcg_gen_ext32s_tl(cpu_tmp0, cpu_T[0]);
4792 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4793 #else
4795 TCGv_i64 t0, t1;
4796 t0 = tcg_temp_new_i64();
4797 t1 = tcg_temp_new_i64();
4798 tcg_gen_ext_i32_i64(t0, cpu_T[0]);
4799 tcg_gen_ext_i32_i64(t1, cpu_T[1]);
4800 tcg_gen_mul_i64(t0, t0, t1);
4801 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4802 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4803 tcg_gen_sari_tl(cpu_tmp0, cpu_T[0], 31);
4804 tcg_gen_shri_i64(t0, t0, 32);
4805 tcg_gen_trunc_i64_i32(cpu_T[1], t0);
4806 tcg_gen_sub_tl(cpu_cc_src, cpu_T[1], cpu_tmp0);
4808 #endif
4809 } else {
4810 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4811 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
4812 /* XXX: use 32 bit mul which could be faster */
4813 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4814 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4815 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
4816 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4818 gen_op_mov_reg_T0(ot, reg);
4819 s->cc_op = CC_OP_MULB + ot;
4820 break;
4821 case 0x1c0:
4822 case 0x1c1: /* xadd Ev, Gv */
4823 if ((b & 1) == 0)
4824 ot = OT_BYTE;
4825 else
4826 ot = dflag + OT_WORD;
4827 modrm = ldub_code(s->pc++);
4828 reg = ((modrm >> 3) & 7) | rex_r;
4829 mod = (modrm >> 6) & 3;
4830 if (mod == 3) {
4831 rm = (modrm & 7) | REX_B(s);
4832 gen_op_mov_TN_reg(ot, 0, reg);
4833 gen_op_mov_TN_reg(ot, 1, rm);
4834 gen_op_addl_T0_T1();
4835 gen_op_mov_reg_T1(ot, reg);
4836 gen_op_mov_reg_T0(ot, rm);
4837 } else {
4838 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4839 gen_op_mov_TN_reg(ot, 0, reg);
4840 gen_op_ld_T1_A0(ot + s->mem_index);
4841 gen_op_addl_T0_T1();
4842 gen_op_st_T0_A0(ot + s->mem_index);
4843 gen_op_mov_reg_T1(ot, reg);
4845 gen_op_update2_cc();
4846 s->cc_op = CC_OP_ADDB + ot;
4847 break;
4848 case 0x1b0:
4849 case 0x1b1: /* cmpxchg Ev, Gv */
4851 int label1, label2;
4852 TCGv t0, t1, t2, a0;
4854 if ((b & 1) == 0)
4855 ot = OT_BYTE;
4856 else
4857 ot = dflag + OT_WORD;
4858 modrm = ldub_code(s->pc++);
4859 reg = ((modrm >> 3) & 7) | rex_r;
4860 mod = (modrm >> 6) & 3;
4861 t0 = tcg_temp_local_new();
4862 t1 = tcg_temp_local_new();
4863 t2 = tcg_temp_local_new();
4864 a0 = tcg_temp_local_new();
4865 gen_op_mov_v_reg(ot, t1, reg);
4866 if (mod == 3) {
4867 rm = (modrm & 7) | REX_B(s);
4868 gen_op_mov_v_reg(ot, t0, rm);
4869 } else {
4870 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4871 tcg_gen_mov_tl(a0, cpu_A0);
4872 gen_op_ld_v(ot + s->mem_index, t0, a0);
4873 rm = 0; /* avoid warning */
4875 label1 = gen_new_label();
4876 tcg_gen_sub_tl(t2, cpu_regs[R_EAX], t0);
4877 gen_extu(ot, t2);
4878 tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label1);
4879 if (mod == 3) {
4880 label2 = gen_new_label();
4881 gen_op_mov_reg_v(ot, R_EAX, t0);
4882 tcg_gen_br(label2);
4883 gen_set_label(label1);
4884 gen_op_mov_reg_v(ot, rm, t1);
4885 gen_set_label(label2);
4886 } else {
4887 tcg_gen_mov_tl(t1, t0);
4888 gen_op_mov_reg_v(ot, R_EAX, t0);
4889 gen_set_label(label1);
4890 /* always store */
4891 gen_op_st_v(ot + s->mem_index, t1, a0);
4893 tcg_gen_mov_tl(cpu_cc_src, t0);
4894 tcg_gen_mov_tl(cpu_cc_dst, t2);
4895 s->cc_op = CC_OP_SUBB + ot;
4896 tcg_temp_free(t0);
4897 tcg_temp_free(t1);
4898 tcg_temp_free(t2);
4899 tcg_temp_free(a0);
4901 break;
4902 case 0x1c7: /* cmpxchg8b */
4903 modrm = ldub_code(s->pc++);
4904 mod = (modrm >> 6) & 3;
4905 if ((mod == 3) || ((modrm & 0x38) != 0x8))
4906 goto illegal_op;
4907 #ifdef TARGET_X86_64
4908 if (dflag == 2) {
4909 if (!(s->cpuid_ext_features & CPUID_EXT_CX16))
4910 goto illegal_op;
4911 gen_jmp_im(pc_start - s->cs_base);
4912 if (s->cc_op != CC_OP_DYNAMIC)
4913 gen_op_set_cc_op(s->cc_op);
4914 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4915 gen_helper_cmpxchg16b(cpu_A0);
4916 } else
4917 #endif
4919 if (!(s->cpuid_features & CPUID_CX8))
4920 goto illegal_op;
4921 gen_jmp_im(pc_start - s->cs_base);
4922 if (s->cc_op != CC_OP_DYNAMIC)
4923 gen_op_set_cc_op(s->cc_op);
4924 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4925 gen_helper_cmpxchg8b(cpu_A0);
4927 s->cc_op = CC_OP_EFLAGS;
4928 break;
4930 /**************************/
4931 /* push/pop */
4932 case 0x50 ... 0x57: /* push */
4933 gen_op_mov_TN_reg(OT_LONG, 0, (b & 7) | REX_B(s));
4934 gen_push_T0(s);
4935 break;
4936 case 0x58 ... 0x5f: /* pop */
4937 if (CODE64(s)) {
4938 ot = dflag ? OT_QUAD : OT_WORD;
4939 } else {
4940 ot = dflag + OT_WORD;
4942 gen_pop_T0(s);
4943 /* NOTE: order is important for pop %sp */
4944 gen_pop_update(s);
4945 gen_op_mov_reg_T0(ot, (b & 7) | REX_B(s));
4946 break;
4947 case 0x60: /* pusha */
4948 if (CODE64(s))
4949 goto illegal_op;
4950 gen_pusha(s);
4951 break;
4952 case 0x61: /* popa */
4953 if (CODE64(s))
4954 goto illegal_op;
4955 gen_popa(s);
4956 break;
4957 case 0x68: /* push Iv */
4958 case 0x6a:
4959 if (CODE64(s)) {
4960 ot = dflag ? OT_QUAD : OT_WORD;
4961 } else {
4962 ot = dflag + OT_WORD;
4964 if (b == 0x68)
4965 val = insn_get(s, ot);
4966 else
4967 val = (int8_t)insn_get(s, OT_BYTE);
4968 gen_op_movl_T0_im(val);
4969 gen_push_T0(s);
4970 break;
4971 case 0x8f: /* pop Ev */
4972 if (CODE64(s)) {
4973 ot = dflag ? OT_QUAD : OT_WORD;
4974 } else {
4975 ot = dflag + OT_WORD;
4977 modrm = ldub_code(s->pc++);
4978 mod = (modrm >> 6) & 3;
4979 gen_pop_T0(s);
4980 if (mod == 3) {
4981 /* NOTE: order is important for pop %sp */
4982 gen_pop_update(s);
4983 rm = (modrm & 7) | REX_B(s);
4984 gen_op_mov_reg_T0(ot, rm);
4985 } else {
4986 /* NOTE: order is important too for MMU exceptions */
4987 s->popl_esp_hack = 1 << ot;
4988 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1);
4989 s->popl_esp_hack = 0;
4990 gen_pop_update(s);
4992 break;
4993 case 0xc8: /* enter */
4995 int level;
4996 val = lduw_code(s->pc);
4997 s->pc += 2;
4998 level = ldub_code(s->pc++);
4999 gen_enter(s, val, level);
5001 break;
5002 case 0xc9: /* leave */
5003 /* XXX: exception not precise (ESP is updated before potential exception) */
5004 if (CODE64(s)) {
5005 gen_op_mov_TN_reg(OT_QUAD, 0, R_EBP);
5006 gen_op_mov_reg_T0(OT_QUAD, R_ESP);
5007 } else if (s->ss32) {
5008 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
5009 gen_op_mov_reg_T0(OT_LONG, R_ESP);
5010 } else {
5011 gen_op_mov_TN_reg(OT_WORD, 0, R_EBP);
5012 gen_op_mov_reg_T0(OT_WORD, R_ESP);
5014 gen_pop_T0(s);
5015 if (CODE64(s)) {
5016 ot = dflag ? OT_QUAD : OT_WORD;
5017 } else {
5018 ot = dflag + OT_WORD;
5020 gen_op_mov_reg_T0(ot, R_EBP);
5021 gen_pop_update(s);
5022 break;
5023 case 0x06: /* push es */
5024 case 0x0e: /* push cs */
5025 case 0x16: /* push ss */
5026 case 0x1e: /* push ds */
5027 if (CODE64(s))
5028 goto illegal_op;
5029 gen_op_movl_T0_seg(b >> 3);
5030 gen_push_T0(s);
5031 break;
5032 case 0x1a0: /* push fs */
5033 case 0x1a8: /* push gs */
5034 gen_op_movl_T0_seg((b >> 3) & 7);
5035 gen_push_T0(s);
5036 break;
5037 case 0x07: /* pop es */
5038 case 0x17: /* pop ss */
5039 case 0x1f: /* pop ds */
5040 if (CODE64(s))
5041 goto illegal_op;
5042 reg = b >> 3;
5043 gen_pop_T0(s);
5044 gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
5045 gen_pop_update(s);
5046 if (reg == R_SS) {
5047 /* if reg == SS, inhibit interrupts/trace. */
5048 /* If several instructions disable interrupts, only the
5049 _first_ does it */
5050 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
5051 gen_helper_set_inhibit_irq();
5052 s->tf = 0;
5054 if (s->is_jmp) {
5055 gen_jmp_im(s->pc - s->cs_base);
5056 gen_eob(s);
5058 break;
5059 case 0x1a1: /* pop fs */
5060 case 0x1a9: /* pop gs */
5061 gen_pop_T0(s);
5062 gen_movl_seg_T0(s, (b >> 3) & 7, pc_start - s->cs_base);
5063 gen_pop_update(s);
5064 if (s->is_jmp) {
5065 gen_jmp_im(s->pc - s->cs_base);
5066 gen_eob(s);
5068 break;
5070 /**************************/
5071 /* mov */
5072 case 0x88:
5073 case 0x89: /* mov Gv, Ev */
5074 if ((b & 1) == 0)
5075 ot = OT_BYTE;
5076 else
5077 ot = dflag + OT_WORD;
5078 modrm = ldub_code(s->pc++);
5079 reg = ((modrm >> 3) & 7) | rex_r;
5081 /* generate a generic store */
5082 gen_ldst_modrm(s, modrm, ot, reg, 1);
5083 break;
5084 case 0xc6:
5085 case 0xc7: /* mov Ev, Iv */
5086 if ((b & 1) == 0)
5087 ot = OT_BYTE;
5088 else
5089 ot = dflag + OT_WORD;
5090 modrm = ldub_code(s->pc++);
5091 mod = (modrm >> 6) & 3;
5092 if (mod != 3) {
5093 s->rip_offset = insn_const_size(ot);
5094 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5096 val = insn_get(s, ot);
5097 gen_op_movl_T0_im(val);
5098 if (mod != 3)
5099 gen_op_st_T0_A0(ot + s->mem_index);
5100 else
5101 gen_op_mov_reg_T0(ot, (modrm & 7) | REX_B(s));
5102 break;
5103 case 0x8a:
5104 case 0x8b: /* mov Ev, Gv */
5105 if ((b & 1) == 0)
5106 ot = OT_BYTE;
5107 else
5108 ot = OT_WORD + dflag;
5109 modrm = ldub_code(s->pc++);
5110 reg = ((modrm >> 3) & 7) | rex_r;
5112 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
5113 gen_op_mov_reg_T0(ot, reg);
5114 break;
5115 case 0x8e: /* mov seg, Gv */
5116 modrm = ldub_code(s->pc++);
5117 reg = (modrm >> 3) & 7;
5118 if (reg >= 6 || reg == R_CS)
5119 goto illegal_op;
5120 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
5121 gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
5122 if (reg == R_SS) {
5123 /* if reg == SS, inhibit interrupts/trace */
5124 /* If several instructions disable interrupts, only the
5125 _first_ does it */
5126 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
5127 gen_helper_set_inhibit_irq();
5128 s->tf = 0;
5130 if (s->is_jmp) {
5131 gen_jmp_im(s->pc - s->cs_base);
5132 gen_eob(s);
5134 break;
5135 case 0x8c: /* mov Gv, seg */
5136 modrm = ldub_code(s->pc++);
5137 reg = (modrm >> 3) & 7;
5138 mod = (modrm >> 6) & 3;
5139 if (reg >= 6)
5140 goto illegal_op;
5141 gen_op_movl_T0_seg(reg);
5142 if (mod == 3)
5143 ot = OT_WORD + dflag;
5144 else
5145 ot = OT_WORD;
5146 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1);
5147 break;
5149 case 0x1b6: /* movzbS Gv, Eb */
5150 case 0x1b7: /* movzwS Gv, Eb */
5151 case 0x1be: /* movsbS Gv, Eb */
5152 case 0x1bf: /* movswS Gv, Eb */
5154 int d_ot;
5155 /* d_ot is the size of destination */
5156 d_ot = dflag + OT_WORD;
5157 /* ot is the size of source */
5158 ot = (b & 1) + OT_BYTE;
5159 modrm = ldub_code(s->pc++);
5160 reg = ((modrm >> 3) & 7) | rex_r;
5161 mod = (modrm >> 6) & 3;
5162 rm = (modrm & 7) | REX_B(s);
5164 if (mod == 3) {
5165 gen_op_mov_TN_reg(ot, 0, rm);
5166 switch(ot | (b & 8)) {
5167 case OT_BYTE:
5168 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
5169 break;
5170 case OT_BYTE | 8:
5171 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
5172 break;
5173 case OT_WORD:
5174 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
5175 break;
5176 default:
5177 case OT_WORD | 8:
5178 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5179 break;
5181 gen_op_mov_reg_T0(d_ot, reg);
5182 } else {
5183 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5184 if (b & 8) {
5185 gen_op_lds_T0_A0(ot + s->mem_index);
5186 } else {
5187 gen_op_ldu_T0_A0(ot + s->mem_index);
5189 gen_op_mov_reg_T0(d_ot, reg);
5192 break;
5194 case 0x8d: /* lea */
5195 ot = dflag + OT_WORD;
5196 modrm = ldub_code(s->pc++);
5197 mod = (modrm >> 6) & 3;
5198 if (mod == 3)
5199 goto illegal_op;
5200 reg = ((modrm >> 3) & 7) | rex_r;
5201 /* we must ensure that no segment is added */
5202 s->override = -1;
5203 val = s->addseg;
5204 s->addseg = 0;
5205 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5206 s->addseg = val;
5207 gen_op_mov_reg_A0(ot - OT_WORD, reg);
5208 break;
5210 case 0xa0: /* mov EAX, Ov */
5211 case 0xa1:
5212 case 0xa2: /* mov Ov, EAX */
5213 case 0xa3:
5215 target_ulong offset_addr;
5217 if ((b & 1) == 0)
5218 ot = OT_BYTE;
5219 else
5220 ot = dflag + OT_WORD;
5221 #ifdef TARGET_X86_64
5222 if (s->aflag == 2) {
5223 offset_addr = ldq_code(s->pc);
5224 s->pc += 8;
5225 gen_op_movq_A0_im(offset_addr);
5226 } else
5227 #endif
5229 if (s->aflag) {
5230 offset_addr = insn_get(s, OT_LONG);
5231 } else {
5232 offset_addr = insn_get(s, OT_WORD);
5234 gen_op_movl_A0_im(offset_addr);
5236 gen_add_A0_ds_seg(s);
5237 if ((b & 2) == 0) {
5238 gen_op_ld_T0_A0(ot + s->mem_index);
5239 gen_op_mov_reg_T0(ot, R_EAX);
5240 } else {
5241 gen_op_mov_TN_reg(ot, 0, R_EAX);
5242 gen_op_st_T0_A0(ot + s->mem_index);
5245 break;
5246 case 0xd7: /* xlat */
5247 #ifdef TARGET_X86_64
5248 if (s->aflag == 2) {
5249 gen_op_movq_A0_reg(R_EBX);
5250 gen_op_mov_TN_reg(OT_QUAD, 0, R_EAX);
5251 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xff);
5252 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]);
5253 } else
5254 #endif
5256 gen_op_movl_A0_reg(R_EBX);
5257 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
5258 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xff);
5259 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]);
5260 if (s->aflag == 0)
5261 gen_op_andl_A0_ffff();
5262 else
5263 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
5265 gen_add_A0_ds_seg(s);
5266 gen_op_ldu_T0_A0(OT_BYTE + s->mem_index);
5267 gen_op_mov_reg_T0(OT_BYTE, R_EAX);
5268 break;
5269 case 0xb0 ... 0xb7: /* mov R, Ib */
5270 val = insn_get(s, OT_BYTE);
5271 gen_op_movl_T0_im(val);
5272 gen_op_mov_reg_T0(OT_BYTE, (b & 7) | REX_B(s));
5273 break;
5274 case 0xb8 ... 0xbf: /* mov R, Iv */
5275 #ifdef TARGET_X86_64
5276 if (dflag == 2) {
5277 uint64_t tmp;
5278 /* 64 bit case */
5279 tmp = ldq_code(s->pc);
5280 s->pc += 8;
5281 reg = (b & 7) | REX_B(s);
5282 gen_movtl_T0_im(tmp);
5283 gen_op_mov_reg_T0(OT_QUAD, reg);
5284 } else
5285 #endif
5287 ot = dflag ? OT_LONG : OT_WORD;
5288 val = insn_get(s, ot);
5289 reg = (b & 7) | REX_B(s);
5290 gen_op_movl_T0_im(val);
5291 gen_op_mov_reg_T0(ot, reg);
5293 break;
5295 case 0x91 ... 0x97: /* xchg R, EAX */
5296 ot = dflag + OT_WORD;
5297 reg = (b & 7) | REX_B(s);
5298 rm = R_EAX;
5299 goto do_xchg_reg;
5300 case 0x86:
5301 case 0x87: /* xchg Ev, Gv */
5302 if ((b & 1) == 0)
5303 ot = OT_BYTE;
5304 else
5305 ot = dflag + OT_WORD;
5306 modrm = ldub_code(s->pc++);
5307 reg = ((modrm >> 3) & 7) | rex_r;
5308 mod = (modrm >> 6) & 3;
5309 if (mod == 3) {
5310 rm = (modrm & 7) | REX_B(s);
5311 do_xchg_reg:
5312 gen_op_mov_TN_reg(ot, 0, reg);
5313 gen_op_mov_TN_reg(ot, 1, rm);
5314 gen_op_mov_reg_T0(ot, rm);
5315 gen_op_mov_reg_T1(ot, reg);
5316 } else {
5317 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5318 gen_op_mov_TN_reg(ot, 0, reg);
5319 /* for xchg, lock is implicit */
5320 if (!(prefixes & PREFIX_LOCK))
5321 gen_helper_lock();
5322 gen_op_ld_T1_A0(ot + s->mem_index);
5323 gen_op_st_T0_A0(ot + s->mem_index);
5324 if (!(prefixes & PREFIX_LOCK))
5325 gen_helper_unlock();
5326 gen_op_mov_reg_T1(ot, reg);
5328 break;
5329 case 0xc4: /* les Gv */
5330 if (CODE64(s))
5331 goto illegal_op;
5332 op = R_ES;
5333 goto do_lxx;
5334 case 0xc5: /* lds Gv */
5335 if (CODE64(s))
5336 goto illegal_op;
5337 op = R_DS;
5338 goto do_lxx;
5339 case 0x1b2: /* lss Gv */
5340 op = R_SS;
5341 goto do_lxx;
5342 case 0x1b4: /* lfs Gv */
5343 op = R_FS;
5344 goto do_lxx;
5345 case 0x1b5: /* lgs Gv */
5346 op = R_GS;
5347 do_lxx:
5348 ot = dflag ? OT_LONG : OT_WORD;
5349 modrm = ldub_code(s->pc++);
5350 reg = ((modrm >> 3) & 7) | rex_r;
5351 mod = (modrm >> 6) & 3;
5352 if (mod == 3)
5353 goto illegal_op;
5354 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5355 gen_op_ld_T1_A0(ot + s->mem_index);
5356 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
5357 /* load the segment first to handle exceptions properly */
5358 gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
5359 gen_movl_seg_T0(s, op, pc_start - s->cs_base);
5360 /* then put the data */
5361 gen_op_mov_reg_T1(ot, reg);
5362 if (s->is_jmp) {
5363 gen_jmp_im(s->pc - s->cs_base);
5364 gen_eob(s);
5366 break;
5368 /************************/
5369 /* shifts */
5370 case 0xc0:
5371 case 0xc1:
5372 /* shift Ev,Ib */
5373 shift = 2;
5374 grp2:
5376 if ((b & 1) == 0)
5377 ot = OT_BYTE;
5378 else
5379 ot = dflag + OT_WORD;
5381 modrm = ldub_code(s->pc++);
5382 mod = (modrm >> 6) & 3;
5383 op = (modrm >> 3) & 7;
5385 if (mod != 3) {
5386 if (shift == 2) {
5387 s->rip_offset = 1;
5389 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5390 opreg = OR_TMP0;
5391 } else {
5392 opreg = (modrm & 7) | REX_B(s);
5395 /* simpler op */
5396 if (shift == 0) {
5397 gen_shift(s, op, ot, opreg, OR_ECX);
5398 } else {
5399 if (shift == 2) {
5400 shift = ldub_code(s->pc++);
5402 gen_shifti(s, op, ot, opreg, shift);
5405 break;
5406 case 0xd0:
5407 case 0xd1:
5408 /* shift Ev,1 */
5409 shift = 1;
5410 goto grp2;
5411 case 0xd2:
5412 case 0xd3:
5413 /* shift Ev,cl */
5414 shift = 0;
5415 goto grp2;
5417 case 0x1a4: /* shld imm */
5418 op = 0;
5419 shift = 1;
5420 goto do_shiftd;
5421 case 0x1a5: /* shld cl */
5422 op = 0;
5423 shift = 0;
5424 goto do_shiftd;
5425 case 0x1ac: /* shrd imm */
5426 op = 1;
5427 shift = 1;
5428 goto do_shiftd;
5429 case 0x1ad: /* shrd cl */
5430 op = 1;
5431 shift = 0;
5432 do_shiftd:
5433 ot = dflag + OT_WORD;
5434 modrm = ldub_code(s->pc++);
5435 mod = (modrm >> 6) & 3;
5436 rm = (modrm & 7) | REX_B(s);
5437 reg = ((modrm >> 3) & 7) | rex_r;
5438 if (mod != 3) {
5439 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5440 opreg = OR_TMP0;
5441 } else {
5442 opreg = rm;
5444 gen_op_mov_TN_reg(ot, 1, reg);
5446 if (shift) {
5447 val = ldub_code(s->pc++);
5448 tcg_gen_movi_tl(cpu_T3, val);
5449 } else {
5450 tcg_gen_mov_tl(cpu_T3, cpu_regs[R_ECX]);
5452 gen_shiftd_rm_T1_T3(s, ot, opreg, op);
5453 break;
5455 /************************/
5456 /* floats */
5457 case 0xd8 ... 0xdf:
5458 if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
5459 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
5460 /* XXX: what to do if illegal op ? */
5461 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
5462 break;
5464 modrm = ldub_code(s->pc++);
5465 mod = (modrm >> 6) & 3;
5466 rm = modrm & 7;
5467 op = ((b & 7) << 3) | ((modrm >> 3) & 7);
5468 if (mod != 3) {
5469 /* memory op */
5470 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5471 switch(op) {
5472 case 0x00 ... 0x07: /* fxxxs */
5473 case 0x10 ... 0x17: /* fixxxl */
5474 case 0x20 ... 0x27: /* fxxxl */
5475 case 0x30 ... 0x37: /* fixxx */
5477 int op1;
5478 op1 = op & 7;
5480 switch(op >> 4) {
5481 case 0:
5482 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5483 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5484 gen_helper_flds_FT0(cpu_tmp2_i32);
5485 break;
5486 case 1:
5487 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5488 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5489 gen_helper_fildl_FT0(cpu_tmp2_i32);
5490 break;
5491 case 2:
5492 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
5493 (s->mem_index >> 2) - 1);
5494 gen_helper_fldl_FT0(cpu_tmp1_i64);
5495 break;
5496 case 3:
5497 default:
5498 gen_op_lds_T0_A0(OT_WORD + s->mem_index);
5499 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5500 gen_helper_fildl_FT0(cpu_tmp2_i32);
5501 break;
5504 gen_helper_fp_arith_ST0_FT0(op1);
5505 if (op1 == 3) {
5506 /* fcomp needs pop */
5507 gen_helper_fpop();
5510 break;
5511 case 0x08: /* flds */
5512 case 0x0a: /* fsts */
5513 case 0x0b: /* fstps */
5514 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
5515 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
5516 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
5517 switch(op & 7) {
5518 case 0:
5519 switch(op >> 4) {
5520 case 0:
5521 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5522 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5523 gen_helper_flds_ST0(cpu_tmp2_i32);
5524 break;
5525 case 1:
5526 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5527 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5528 gen_helper_fildl_ST0(cpu_tmp2_i32);
5529 break;
5530 case 2:
5531 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
5532 (s->mem_index >> 2) - 1);
5533 gen_helper_fldl_ST0(cpu_tmp1_i64);
5534 break;
5535 case 3:
5536 default:
5537 gen_op_lds_T0_A0(OT_WORD + s->mem_index);
5538 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5539 gen_helper_fildl_ST0(cpu_tmp2_i32);
5540 break;
5542 break;
5543 case 1:
5544 /* XXX: the corresponding CPUID bit must be tested ! */
5545 switch(op >> 4) {
5546 case 1:
5547 gen_helper_fisttl_ST0(cpu_tmp2_i32);
5548 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5549 gen_op_st_T0_A0(OT_LONG + s->mem_index);
5550 break;
5551 case 2:
5552 gen_helper_fisttll_ST0(cpu_tmp1_i64);
5553 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
5554 (s->mem_index >> 2) - 1);
5555 break;
5556 case 3:
5557 default:
5558 gen_helper_fistt_ST0(cpu_tmp2_i32);
5559 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5560 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5561 break;
5563 gen_helper_fpop();
5564 break;
5565 default:
5566 switch(op >> 4) {
5567 case 0:
5568 gen_helper_fsts_ST0(cpu_tmp2_i32);
5569 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5570 gen_op_st_T0_A0(OT_LONG + s->mem_index);
5571 break;
5572 case 1:
5573 gen_helper_fistl_ST0(cpu_tmp2_i32);
5574 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5575 gen_op_st_T0_A0(OT_LONG + s->mem_index);
5576 break;
5577 case 2:
5578 gen_helper_fstl_ST0(cpu_tmp1_i64);
5579 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
5580 (s->mem_index >> 2) - 1);
5581 break;
5582 case 3:
5583 default:
5584 gen_helper_fist_ST0(cpu_tmp2_i32);
5585 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5586 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5587 break;
5589 if ((op & 7) == 3)
5590 gen_helper_fpop();
5591 break;
5593 break;
5594 case 0x0c: /* fldenv mem */
5595 if (s->cc_op != CC_OP_DYNAMIC)
5596 gen_op_set_cc_op(s->cc_op);
5597 gen_jmp_im(pc_start - s->cs_base);
5598 gen_helper_fldenv(
5599 cpu_A0, tcg_const_i32(s->dflag));
5600 break;
5601 case 0x0d: /* fldcw mem */
5602 gen_op_ld_T0_A0(OT_WORD + s->mem_index);
5603 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5604 gen_helper_fldcw(cpu_tmp2_i32);
5605 break;
5606 case 0x0e: /* fnstenv mem */
5607 if (s->cc_op != CC_OP_DYNAMIC)
5608 gen_op_set_cc_op(s->cc_op);
5609 gen_jmp_im(pc_start - s->cs_base);
5610 gen_helper_fstenv(cpu_A0, tcg_const_i32(s->dflag));
5611 break;
5612 case 0x0f: /* fnstcw mem */
5613 gen_helper_fnstcw(cpu_tmp2_i32);
5614 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5615 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5616 break;
5617 case 0x1d: /* fldt mem */
5618 if (s->cc_op != CC_OP_DYNAMIC)
5619 gen_op_set_cc_op(s->cc_op);
5620 gen_jmp_im(pc_start - s->cs_base);
5621 gen_helper_fldt_ST0(cpu_A0);
5622 break;
5623 case 0x1f: /* fstpt mem */
5624 if (s->cc_op != CC_OP_DYNAMIC)
5625 gen_op_set_cc_op(s->cc_op);
5626 gen_jmp_im(pc_start - s->cs_base);
5627 gen_helper_fstt_ST0(cpu_A0);
5628 gen_helper_fpop();
5629 break;
5630 case 0x2c: /* frstor mem */
5631 if (s->cc_op != CC_OP_DYNAMIC)
5632 gen_op_set_cc_op(s->cc_op);
5633 gen_jmp_im(pc_start - s->cs_base);
5634 gen_helper_frstor(cpu_A0, tcg_const_i32(s->dflag));
5635 break;
5636 case 0x2e: /* fnsave mem */
5637 if (s->cc_op != CC_OP_DYNAMIC)
5638 gen_op_set_cc_op(s->cc_op);
5639 gen_jmp_im(pc_start - s->cs_base);
5640 gen_helper_fsave(cpu_A0, tcg_const_i32(s->dflag));
5641 break;
5642 case 0x2f: /* fnstsw mem */
5643 gen_helper_fnstsw(cpu_tmp2_i32);
5644 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5645 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5646 break;
5647 case 0x3c: /* fbld */
5648 if (s->cc_op != CC_OP_DYNAMIC)
5649 gen_op_set_cc_op(s->cc_op);
5650 gen_jmp_im(pc_start - s->cs_base);
5651 gen_helper_fbld_ST0(cpu_A0);
5652 break;
5653 case 0x3e: /* fbstp */
5654 if (s->cc_op != CC_OP_DYNAMIC)
5655 gen_op_set_cc_op(s->cc_op);
5656 gen_jmp_im(pc_start - s->cs_base);
5657 gen_helper_fbst_ST0(cpu_A0);
5658 gen_helper_fpop();
5659 break;
5660 case 0x3d: /* fildll */
5661 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
5662 (s->mem_index >> 2) - 1);
5663 gen_helper_fildll_ST0(cpu_tmp1_i64);
5664 break;
5665 case 0x3f: /* fistpll */
5666 gen_helper_fistll_ST0(cpu_tmp1_i64);
5667 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
5668 (s->mem_index >> 2) - 1);
5669 gen_helper_fpop();
5670 break;
5671 default:
5672 goto illegal_op;
5674 } else {
5675 /* register float ops */
5676 opreg = rm;
5678 switch(op) {
5679 case 0x08: /* fld sti */
5680 gen_helper_fpush();
5681 gen_helper_fmov_ST0_STN(tcg_const_i32((opreg + 1) & 7));
5682 break;
5683 case 0x09: /* fxchg sti */
5684 case 0x29: /* fxchg4 sti, undocumented op */
5685 case 0x39: /* fxchg7 sti, undocumented op */
5686 gen_helper_fxchg_ST0_STN(tcg_const_i32(opreg));
5687 break;
5688 case 0x0a: /* grp d9/2 */
5689 switch(rm) {
5690 case 0: /* fnop */
5691 /* check exceptions (FreeBSD FPU probe) */
5692 if (s->cc_op != CC_OP_DYNAMIC)
5693 gen_op_set_cc_op(s->cc_op);
5694 gen_jmp_im(pc_start - s->cs_base);
5695 gen_helper_fwait();
5696 break;
5697 default:
5698 goto illegal_op;
5700 break;
5701 case 0x0c: /* grp d9/4 */
5702 switch(rm) {
5703 case 0: /* fchs */
5704 gen_helper_fchs_ST0();
5705 break;
5706 case 1: /* fabs */
5707 gen_helper_fabs_ST0();
5708 break;
5709 case 4: /* ftst */
5710 gen_helper_fldz_FT0();
5711 gen_helper_fcom_ST0_FT0();
5712 break;
5713 case 5: /* fxam */
5714 gen_helper_fxam_ST0();
5715 break;
5716 default:
5717 goto illegal_op;
5719 break;
5720 case 0x0d: /* grp d9/5 */
5722 switch(rm) {
5723 case 0:
5724 gen_helper_fpush();
5725 gen_helper_fld1_ST0();
5726 break;
5727 case 1:
5728 gen_helper_fpush();
5729 gen_helper_fldl2t_ST0();
5730 break;
5731 case 2:
5732 gen_helper_fpush();
5733 gen_helper_fldl2e_ST0();
5734 break;
5735 case 3:
5736 gen_helper_fpush();
5737 gen_helper_fldpi_ST0();
5738 break;
5739 case 4:
5740 gen_helper_fpush();
5741 gen_helper_fldlg2_ST0();
5742 break;
5743 case 5:
5744 gen_helper_fpush();
5745 gen_helper_fldln2_ST0();
5746 break;
5747 case 6:
5748 gen_helper_fpush();
5749 gen_helper_fldz_ST0();
5750 break;
5751 default:
5752 goto illegal_op;
5755 break;
5756 case 0x0e: /* grp d9/6 */
5757 switch(rm) {
5758 case 0: /* f2xm1 */
5759 gen_helper_f2xm1();
5760 break;
5761 case 1: /* fyl2x */
5762 gen_helper_fyl2x();
5763 break;
5764 case 2: /* fptan */
5765 gen_helper_fptan();
5766 break;
5767 case 3: /* fpatan */
5768 gen_helper_fpatan();
5769 break;
5770 case 4: /* fxtract */
5771 gen_helper_fxtract();
5772 break;
5773 case 5: /* fprem1 */
5774 gen_helper_fprem1();
5775 break;
5776 case 6: /* fdecstp */
5777 gen_helper_fdecstp();
5778 break;
5779 default:
5780 case 7: /* fincstp */
5781 gen_helper_fincstp();
5782 break;
5784 break;
5785 case 0x0f: /* grp d9/7 */
5786 switch(rm) {
5787 case 0: /* fprem */
5788 gen_helper_fprem();
5789 break;
5790 case 1: /* fyl2xp1 */
5791 gen_helper_fyl2xp1();
5792 break;
5793 case 2: /* fsqrt */
5794 gen_helper_fsqrt();
5795 break;
5796 case 3: /* fsincos */
5797 gen_helper_fsincos();
5798 break;
5799 case 5: /* fscale */
5800 gen_helper_fscale();
5801 break;
5802 case 4: /* frndint */
5803 gen_helper_frndint();
5804 break;
5805 case 6: /* fsin */
5806 gen_helper_fsin();
5807 break;
5808 default:
5809 case 7: /* fcos */
5810 gen_helper_fcos();
5811 break;
5813 break;
5814 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
5815 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
5816 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
5818 int op1;
5820 op1 = op & 7;
5821 if (op >= 0x20) {
5822 gen_helper_fp_arith_STN_ST0(op1, opreg);
5823 if (op >= 0x30)
5824 gen_helper_fpop();
5825 } else {
5826 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
5827 gen_helper_fp_arith_ST0_FT0(op1);
5830 break;
5831 case 0x02: /* fcom */
5832 case 0x22: /* fcom2, undocumented op */
5833 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
5834 gen_helper_fcom_ST0_FT0();
5835 break;
5836 case 0x03: /* fcomp */
5837 case 0x23: /* fcomp3, undocumented op */
5838 case 0x32: /* fcomp5, undocumented op */
5839 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
5840 gen_helper_fcom_ST0_FT0();
5841 gen_helper_fpop();
5842 break;
5843 case 0x15: /* da/5 */
5844 switch(rm) {
5845 case 1: /* fucompp */
5846 gen_helper_fmov_FT0_STN(tcg_const_i32(1));
5847 gen_helper_fucom_ST0_FT0();
5848 gen_helper_fpop();
5849 gen_helper_fpop();
5850 break;
5851 default:
5852 goto illegal_op;
5854 break;
5855 case 0x1c:
5856 switch(rm) {
5857 case 0: /* feni (287 only, just do nop here) */
5858 break;
5859 case 1: /* fdisi (287 only, just do nop here) */
5860 break;
5861 case 2: /* fclex */
5862 gen_helper_fclex();
5863 break;
5864 case 3: /* fninit */
5865 gen_helper_fninit();
5866 break;
5867 case 4: /* fsetpm (287 only, just do nop here) */
5868 break;
5869 default:
5870 goto illegal_op;
5872 break;
5873 case 0x1d: /* fucomi */
5874 if (s->cc_op != CC_OP_DYNAMIC)
5875 gen_op_set_cc_op(s->cc_op);
5876 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
5877 gen_helper_fucomi_ST0_FT0();
5878 s->cc_op = CC_OP_EFLAGS;
5879 break;
5880 case 0x1e: /* fcomi */
5881 if (s->cc_op != CC_OP_DYNAMIC)
5882 gen_op_set_cc_op(s->cc_op);
5883 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
5884 gen_helper_fcomi_ST0_FT0();
5885 s->cc_op = CC_OP_EFLAGS;
5886 break;
5887 case 0x28: /* ffree sti */
5888 gen_helper_ffree_STN(tcg_const_i32(opreg));
5889 break;
5890 case 0x2a: /* fst sti */
5891 gen_helper_fmov_STN_ST0(tcg_const_i32(opreg));
5892 break;
5893 case 0x2b: /* fstp sti */
5894 case 0x0b: /* fstp1 sti, undocumented op */
5895 case 0x3a: /* fstp8 sti, undocumented op */
5896 case 0x3b: /* fstp9 sti, undocumented op */
5897 gen_helper_fmov_STN_ST0(tcg_const_i32(opreg));
5898 gen_helper_fpop();
5899 break;
5900 case 0x2c: /* fucom st(i) */
5901 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
5902 gen_helper_fucom_ST0_FT0();
5903 break;
5904 case 0x2d: /* fucomp st(i) */
5905 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
5906 gen_helper_fucom_ST0_FT0();
5907 gen_helper_fpop();
5908 break;
5909 case 0x33: /* de/3 */
5910 switch(rm) {
5911 case 1: /* fcompp */
5912 gen_helper_fmov_FT0_STN(tcg_const_i32(1));
5913 gen_helper_fcom_ST0_FT0();
5914 gen_helper_fpop();
5915 gen_helper_fpop();
5916 break;
5917 default:
5918 goto illegal_op;
5920 break;
5921 case 0x38: /* ffreep sti, undocumented op */
5922 gen_helper_ffree_STN(tcg_const_i32(opreg));
5923 gen_helper_fpop();
5924 break;
5925 case 0x3c: /* df/4 */
5926 switch(rm) {
5927 case 0:
5928 gen_helper_fnstsw(cpu_tmp2_i32);
5929 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5930 gen_op_mov_reg_T0(OT_WORD, R_EAX);
5931 break;
5932 default:
5933 goto illegal_op;
5935 break;
5936 case 0x3d: /* fucomip */
5937 if (s->cc_op != CC_OP_DYNAMIC)
5938 gen_op_set_cc_op(s->cc_op);
5939 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
5940 gen_helper_fucomi_ST0_FT0();
5941 gen_helper_fpop();
5942 s->cc_op = CC_OP_EFLAGS;
5943 break;
5944 case 0x3e: /* fcomip */
5945 if (s->cc_op != CC_OP_DYNAMIC)
5946 gen_op_set_cc_op(s->cc_op);
5947 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
5948 gen_helper_fcomi_ST0_FT0();
5949 gen_helper_fpop();
5950 s->cc_op = CC_OP_EFLAGS;
5951 break;
5952 case 0x10 ... 0x13: /* fcmovxx */
5953 case 0x18 ... 0x1b:
5955 int op1, l1;
5956 static const uint8_t fcmov_cc[8] = {
5957 (JCC_B << 1),
5958 (JCC_Z << 1),
5959 (JCC_BE << 1),
5960 (JCC_P << 1),
5962 op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
5963 l1 = gen_new_label();
5964 gen_jcc1(s, s->cc_op, op1, l1);
5965 gen_helper_fmov_ST0_STN(tcg_const_i32(opreg));
5966 gen_set_label(l1);
5968 break;
5969 default:
5970 goto illegal_op;
5973 break;
5974 /************************/
5975 /* string ops */
5977 case 0xa4: /* movsS */
5978 case 0xa5:
5979 if ((b & 1) == 0)
5980 ot = OT_BYTE;
5981 else
5982 ot = dflag + OT_WORD;
5984 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
5985 gen_repz_movs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
5986 } else {
5987 gen_movs(s, ot);
5989 break;
5991 case 0xaa: /* stosS */
5992 case 0xab:
5993 if ((b & 1) == 0)
5994 ot = OT_BYTE;
5995 else
5996 ot = dflag + OT_WORD;
5998 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
5999 gen_repz_stos(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6000 } else {
6001 gen_stos(s, ot);
6003 break;
6004 case 0xac: /* lodsS */
6005 case 0xad:
6006 if ((b & 1) == 0)
6007 ot = OT_BYTE;
6008 else
6009 ot = dflag + OT_WORD;
6010 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6011 gen_repz_lods(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6012 } else {
6013 gen_lods(s, ot);
6015 break;
6016 case 0xae: /* scasS */
6017 case 0xaf:
6018 if ((b & 1) == 0)
6019 ot = OT_BYTE;
6020 else
6021 ot = dflag + OT_WORD;
6022 if (prefixes & PREFIX_REPNZ) {
6023 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6024 } else if (prefixes & PREFIX_REPZ) {
6025 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6026 } else {
6027 gen_scas(s, ot);
6028 s->cc_op = CC_OP_SUBB + ot;
6030 break;
6032 case 0xa6: /* cmpsS */
6033 case 0xa7:
6034 if ((b & 1) == 0)
6035 ot = OT_BYTE;
6036 else
6037 ot = dflag + OT_WORD;
6038 if (prefixes & PREFIX_REPNZ) {
6039 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6040 } else if (prefixes & PREFIX_REPZ) {
6041 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6042 } else {
6043 gen_cmps(s, ot);
6044 s->cc_op = CC_OP_SUBB + ot;
6046 break;
6047 case 0x6c: /* insS */
6048 case 0x6d:
6049 if ((b & 1) == 0)
6050 ot = OT_BYTE;
6051 else
6052 ot = dflag ? OT_LONG : OT_WORD;
6053 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
6054 gen_op_andl_T0_ffff();
6055 gen_check_io(s, ot, pc_start - s->cs_base,
6056 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4);
6057 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6058 gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6059 } else {
6060 gen_ins(s, ot);
6061 if (use_icount) {
6062 gen_jmp(s, s->pc - s->cs_base);
6065 break;
6066 case 0x6e: /* outsS */
6067 case 0x6f:
6068 if ((b & 1) == 0)
6069 ot = OT_BYTE;
6070 else
6071 ot = dflag ? OT_LONG : OT_WORD;
6072 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
6073 gen_op_andl_T0_ffff();
6074 gen_check_io(s, ot, pc_start - s->cs_base,
6075 svm_is_rep(prefixes) | 4);
6076 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6077 gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6078 } else {
6079 gen_outs(s, ot);
6080 if (use_icount) {
6081 gen_jmp(s, s->pc - s->cs_base);
6084 break;
6086 /************************/
6087 /* port I/O */
6089 case 0xe4:
6090 case 0xe5:
6091 if ((b & 1) == 0)
6092 ot = OT_BYTE;
6093 else
6094 ot = dflag ? OT_LONG : OT_WORD;
6095 val = ldub_code(s->pc++);
6096 gen_op_movl_T0_im(val);
6097 gen_check_io(s, ot, pc_start - s->cs_base,
6098 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
6099 if (use_icount)
6100 gen_io_start();
6101 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6102 gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
6103 gen_op_mov_reg_T1(ot, R_EAX);
6104 if (use_icount) {
6105 gen_io_end();
6106 gen_jmp(s, s->pc - s->cs_base);
6108 break;
6109 case 0xe6:
6110 case 0xe7:
6111 if ((b & 1) == 0)
6112 ot = OT_BYTE;
6113 else
6114 ot = dflag ? OT_LONG : OT_WORD;
6115 val = ldub_code(s->pc++);
6116 gen_op_movl_T0_im(val);
6117 gen_check_io(s, ot, pc_start - s->cs_base,
6118 svm_is_rep(prefixes));
6119 gen_op_mov_TN_reg(ot, 1, R_EAX);
6121 if (use_icount)
6122 gen_io_start();
6123 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6124 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
6125 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
6126 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
6127 if (use_icount) {
6128 gen_io_end();
6129 gen_jmp(s, s->pc - s->cs_base);
6131 break;
6132 case 0xec:
6133 case 0xed:
6134 if ((b & 1) == 0)
6135 ot = OT_BYTE;
6136 else
6137 ot = dflag ? OT_LONG : OT_WORD;
6138 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
6139 gen_op_andl_T0_ffff();
6140 gen_check_io(s, ot, pc_start - s->cs_base,
6141 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
6142 if (use_icount)
6143 gen_io_start();
6144 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6145 gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
6146 gen_op_mov_reg_T1(ot, R_EAX);
6147 if (use_icount) {
6148 gen_io_end();
6149 gen_jmp(s, s->pc - s->cs_base);
6151 break;
6152 case 0xee:
6153 case 0xef:
6154 if ((b & 1) == 0)
6155 ot = OT_BYTE;
6156 else
6157 ot = dflag ? OT_LONG : OT_WORD;
6158 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
6159 gen_op_andl_T0_ffff();
6160 gen_check_io(s, ot, pc_start - s->cs_base,
6161 svm_is_rep(prefixes));
6162 gen_op_mov_TN_reg(ot, 1, R_EAX);
6164 if (use_icount)
6165 gen_io_start();
6166 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6167 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
6168 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
6169 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
6170 if (use_icount) {
6171 gen_io_end();
6172 gen_jmp(s, s->pc - s->cs_base);
6174 break;
6176 /************************/
6177 /* control */
6178 case 0xc2: /* ret im */
6179 val = ldsw_code(s->pc);
6180 s->pc += 2;
6181 gen_pop_T0(s);
6182 if (CODE64(s) && s->dflag)
6183 s->dflag = 2;
6184 gen_stack_update(s, val + (2 << s->dflag));
6185 if (s->dflag == 0)
6186 gen_op_andl_T0_ffff();
6187 gen_op_jmp_T0();
6188 gen_eob(s);
6189 break;
6190 case 0xc3: /* ret */
6191 gen_pop_T0(s);
6192 gen_pop_update(s);
6193 if (s->dflag == 0)
6194 gen_op_andl_T0_ffff();
6195 gen_op_jmp_T0();
6196 gen_eob(s);
6197 break;
6198 case 0xca: /* lret im */
6199 val = ldsw_code(s->pc);
6200 s->pc += 2;
6201 do_lret:
6202 if (s->pe && !s->vm86) {
6203 if (s->cc_op != CC_OP_DYNAMIC)
6204 gen_op_set_cc_op(s->cc_op);
6205 gen_jmp_im(pc_start - s->cs_base);
6206 gen_helper_lret_protected(tcg_const_i32(s->dflag),
6207 tcg_const_i32(val));
6208 } else {
6209 gen_stack_A0(s);
6210 /* pop offset */
6211 gen_op_ld_T0_A0(1 + s->dflag + s->mem_index);
6212 if (s->dflag == 0)
6213 gen_op_andl_T0_ffff();
6214 /* NOTE: keeping EIP updated is not a problem in case of
6215 exception */
6216 gen_op_jmp_T0();
6217 /* pop selector */
6218 gen_op_addl_A0_im(2 << s->dflag);
6219 gen_op_ld_T0_A0(1 + s->dflag + s->mem_index);
6220 gen_op_movl_seg_T0_vm(R_CS);
6221 /* add stack offset */
6222 gen_stack_update(s, val + (4 << s->dflag));
6224 gen_eob(s);
6225 break;
6226 case 0xcb: /* lret */
6227 val = 0;
6228 goto do_lret;
6229 case 0xcf: /* iret */
6230 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET);
6231 if (!s->pe) {
6232 /* real mode */
6233 gen_helper_iret_real(tcg_const_i32(s->dflag));
6234 s->cc_op = CC_OP_EFLAGS;
6235 } else if (s->vm86) {
6236 if (s->iopl != 3) {
6237 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6238 } else {
6239 gen_helper_iret_real(tcg_const_i32(s->dflag));
6240 s->cc_op = CC_OP_EFLAGS;
6242 } else {
6243 if (s->cc_op != CC_OP_DYNAMIC)
6244 gen_op_set_cc_op(s->cc_op);
6245 gen_jmp_im(pc_start - s->cs_base);
6246 gen_helper_iret_protected(tcg_const_i32(s->dflag),
6247 tcg_const_i32(s->pc - s->cs_base));
6248 s->cc_op = CC_OP_EFLAGS;
6250 gen_eob(s);
6251 break;
6252 case 0xe8: /* call im */
6254 if (dflag)
6255 tval = (int32_t)insn_get(s, OT_LONG);
6256 else
6257 tval = (int16_t)insn_get(s, OT_WORD);
6258 next_eip = s->pc - s->cs_base;
6259 tval += next_eip;
6260 if (s->dflag == 0)
6261 tval &= 0xffff;
6262 else if(!CODE64(s))
6263 tval &= 0xffffffff;
6264 gen_movtl_T0_im(next_eip);
6265 gen_push_T0(s);
6266 gen_jmp(s, tval);
6268 break;
6269 case 0x9a: /* lcall im */
6271 unsigned int selector, offset;
6273 if (CODE64(s))
6274 goto illegal_op;
6275 ot = dflag ? OT_LONG : OT_WORD;
6276 offset = insn_get(s, ot);
6277 selector = insn_get(s, OT_WORD);
6279 gen_op_movl_T0_im(selector);
6280 gen_op_movl_T1_imu(offset);
6282 goto do_lcall;
6283 case 0xe9: /* jmp im */
6284 if (dflag)
6285 tval = (int32_t)insn_get(s, OT_LONG);
6286 else
6287 tval = (int16_t)insn_get(s, OT_WORD);
6288 tval += s->pc - s->cs_base;
6289 if (s->dflag == 0)
6290 tval &= 0xffff;
6291 else if(!CODE64(s))
6292 tval &= 0xffffffff;
6293 gen_jmp(s, tval);
6294 break;
6295 case 0xea: /* ljmp im */
6297 unsigned int selector, offset;
6299 if (CODE64(s))
6300 goto illegal_op;
6301 ot = dflag ? OT_LONG : OT_WORD;
6302 offset = insn_get(s, ot);
6303 selector = insn_get(s, OT_WORD);
6305 gen_op_movl_T0_im(selector);
6306 gen_op_movl_T1_imu(offset);
6308 goto do_ljmp;
6309 case 0xeb: /* jmp Jb */
6310 tval = (int8_t)insn_get(s, OT_BYTE);
6311 tval += s->pc - s->cs_base;
6312 if (s->dflag == 0)
6313 tval &= 0xffff;
6314 gen_jmp(s, tval);
6315 break;
6316 case 0x70 ... 0x7f: /* jcc Jb */
6317 tval = (int8_t)insn_get(s, OT_BYTE);
6318 goto do_jcc;
6319 case 0x180 ... 0x18f: /* jcc Jv */
6320 if (dflag) {
6321 tval = (int32_t)insn_get(s, OT_LONG);
6322 } else {
6323 tval = (int16_t)insn_get(s, OT_WORD);
6325 do_jcc:
6326 next_eip = s->pc - s->cs_base;
6327 tval += next_eip;
6328 if (s->dflag == 0)
6329 tval &= 0xffff;
6330 gen_jcc(s, b, tval, next_eip);
6331 break;
6333 case 0x190 ... 0x19f: /* setcc Gv */
6334 modrm = ldub_code(s->pc++);
6335 gen_setcc(s, b);
6336 gen_ldst_modrm(s, modrm, OT_BYTE, OR_TMP0, 1);
6337 break;
6338 case 0x140 ... 0x14f: /* cmov Gv, Ev */
6340 int l1;
6341 TCGv t0;
6343 ot = dflag + OT_WORD;
6344 modrm = ldub_code(s->pc++);
6345 reg = ((modrm >> 3) & 7) | rex_r;
6346 mod = (modrm >> 6) & 3;
6347 t0 = tcg_temp_local_new();
6348 if (mod != 3) {
6349 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6350 gen_op_ld_v(ot + s->mem_index, t0, cpu_A0);
6351 } else {
6352 rm = (modrm & 7) | REX_B(s);
6353 gen_op_mov_v_reg(ot, t0, rm);
6355 #ifdef TARGET_X86_64
6356 if (ot == OT_LONG) {
6357 /* XXX: specific Intel behaviour ? */
6358 l1 = gen_new_label();
6359 gen_jcc1(s, s->cc_op, b ^ 1, l1);
6360 tcg_gen_mov_tl(cpu_regs[reg], t0);
6361 gen_set_label(l1);
6362 tcg_gen_ext32u_tl(cpu_regs[reg], cpu_regs[reg]);
6363 } else
6364 #endif
6366 l1 = gen_new_label();
6367 gen_jcc1(s, s->cc_op, b ^ 1, l1);
6368 gen_op_mov_reg_v(ot, reg, t0);
6369 gen_set_label(l1);
6371 tcg_temp_free(t0);
6373 break;
6375 /************************/
6376 /* flags */
6377 case 0x9c: /* pushf */
6378 gen_svm_check_intercept(s, pc_start, SVM_EXIT_PUSHF);
6379 if (s->vm86 && s->iopl != 3) {
6380 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6381 } else {
6382 if (s->cc_op != CC_OP_DYNAMIC)
6383 gen_op_set_cc_op(s->cc_op);
6384 gen_helper_read_eflags(cpu_T[0]);
6385 gen_push_T0(s);
6387 break;
6388 case 0x9d: /* popf */
6389 gen_svm_check_intercept(s, pc_start, SVM_EXIT_POPF);
6390 if (s->vm86 && s->iopl != 3) {
6391 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6392 } else {
6393 gen_pop_T0(s);
6394 if (s->cpl == 0) {
6395 if (s->dflag) {
6396 gen_helper_write_eflags(cpu_T[0],
6397 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK | IOPL_MASK)));
6398 } else {
6399 gen_helper_write_eflags(cpu_T[0],
6400 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK | IOPL_MASK) & 0xffff));
6402 } else {
6403 if (s->cpl <= s->iopl) {
6404 if (s->dflag) {
6405 gen_helper_write_eflags(cpu_T[0],
6406 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK)));
6407 } else {
6408 gen_helper_write_eflags(cpu_T[0],
6409 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK) & 0xffff));
6411 } else {
6412 if (s->dflag) {
6413 gen_helper_write_eflags(cpu_T[0],
6414 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK)));
6415 } else {
6416 gen_helper_write_eflags(cpu_T[0],
6417 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff));
6421 gen_pop_update(s);
6422 s->cc_op = CC_OP_EFLAGS;
6423 /* abort translation because TF flag may change */
6424 gen_jmp_im(s->pc - s->cs_base);
6425 gen_eob(s);
6427 break;
6428 case 0x9e: /* sahf */
6429 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
6430 goto illegal_op;
6431 gen_op_mov_TN_reg(OT_BYTE, 0, R_AH);
6432 if (s->cc_op != CC_OP_DYNAMIC)
6433 gen_op_set_cc_op(s->cc_op);
6434 gen_compute_eflags(cpu_cc_src);
6435 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O);
6436 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], CC_S | CC_Z | CC_A | CC_P | CC_C);
6437 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T[0]);
6438 s->cc_op = CC_OP_EFLAGS;
6439 break;
6440 case 0x9f: /* lahf */
6441 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
6442 goto illegal_op;
6443 if (s->cc_op != CC_OP_DYNAMIC)
6444 gen_op_set_cc_op(s->cc_op);
6445 gen_compute_eflags(cpu_T[0]);
6446 /* Note: gen_compute_eflags() only gives the condition codes */
6447 tcg_gen_ori_tl(cpu_T[0], cpu_T[0], 0x02);
6448 gen_op_mov_reg_T0(OT_BYTE, R_AH);
6449 break;
6450 case 0xf5: /* cmc */
6451 if (s->cc_op != CC_OP_DYNAMIC)
6452 gen_op_set_cc_op(s->cc_op);
6453 gen_compute_eflags(cpu_cc_src);
6454 tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C);
6455 s->cc_op = CC_OP_EFLAGS;
6456 break;
6457 case 0xf8: /* clc */
6458 if (s->cc_op != CC_OP_DYNAMIC)
6459 gen_op_set_cc_op(s->cc_op);
6460 gen_compute_eflags(cpu_cc_src);
6461 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C);
6462 s->cc_op = CC_OP_EFLAGS;
6463 break;
6464 case 0xf9: /* stc */
6465 if (s->cc_op != CC_OP_DYNAMIC)
6466 gen_op_set_cc_op(s->cc_op);
6467 gen_compute_eflags(cpu_cc_src);
6468 tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C);
6469 s->cc_op = CC_OP_EFLAGS;
6470 break;
6471 case 0xfc: /* cld */
6472 tcg_gen_movi_i32(cpu_tmp2_i32, 1);
6473 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUState, df));
6474 break;
6475 case 0xfd: /* std */
6476 tcg_gen_movi_i32(cpu_tmp2_i32, -1);
6477 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUState, df));
6478 break;
6480 /************************/
6481 /* bit operations */
6482 case 0x1ba: /* bt/bts/btr/btc Gv, im */
6483 ot = dflag + OT_WORD;
6484 modrm = ldub_code(s->pc++);
6485 op = (modrm >> 3) & 7;
6486 mod = (modrm >> 6) & 3;
6487 rm = (modrm & 7) | REX_B(s);
6488 if (mod != 3) {
6489 s->rip_offset = 1;
6490 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6491 gen_op_ld_T0_A0(ot + s->mem_index);
6492 } else {
6493 gen_op_mov_TN_reg(ot, 0, rm);
6495 /* load shift */
6496 val = ldub_code(s->pc++);
6497 gen_op_movl_T1_im(val);
6498 if (op < 4)
6499 goto illegal_op;
6500 op -= 4;
6501 goto bt_op;
6502 case 0x1a3: /* bt Gv, Ev */
6503 op = 0;
6504 goto do_btx;
6505 case 0x1ab: /* bts */
6506 op = 1;
6507 goto do_btx;
6508 case 0x1b3: /* btr */
6509 op = 2;
6510 goto do_btx;
6511 case 0x1bb: /* btc */
6512 op = 3;
6513 do_btx:
6514 ot = dflag + OT_WORD;
6515 modrm = ldub_code(s->pc++);
6516 reg = ((modrm >> 3) & 7) | rex_r;
6517 mod = (modrm >> 6) & 3;
6518 rm = (modrm & 7) | REX_B(s);
6519 gen_op_mov_TN_reg(OT_LONG, 1, reg);
6520 if (mod != 3) {
6521 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6522 /* specific case: we need to add a displacement */
6523 gen_exts(ot, cpu_T[1]);
6524 tcg_gen_sari_tl(cpu_tmp0, cpu_T[1], 3 + ot);
6525 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, ot);
6526 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
6527 gen_op_ld_T0_A0(ot + s->mem_index);
6528 } else {
6529 gen_op_mov_TN_reg(ot, 0, rm);
6531 bt_op:
6532 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], (1 << (3 + ot)) - 1);
6533 switch(op) {
6534 case 0:
6535 tcg_gen_shr_tl(cpu_cc_src, cpu_T[0], cpu_T[1]);
6536 tcg_gen_movi_tl(cpu_cc_dst, 0);
6537 break;
6538 case 1:
6539 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6540 tcg_gen_movi_tl(cpu_tmp0, 1);
6541 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6542 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6543 break;
6544 case 2:
6545 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6546 tcg_gen_movi_tl(cpu_tmp0, 1);
6547 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6548 tcg_gen_not_tl(cpu_tmp0, cpu_tmp0);
6549 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6550 break;
6551 default:
6552 case 3:
6553 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6554 tcg_gen_movi_tl(cpu_tmp0, 1);
6555 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6556 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6557 break;
6559 s->cc_op = CC_OP_SARB + ot;
6560 if (op != 0) {
6561 if (mod != 3)
6562 gen_op_st_T0_A0(ot + s->mem_index);
6563 else
6564 gen_op_mov_reg_T0(ot, rm);
6565 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
6566 tcg_gen_movi_tl(cpu_cc_dst, 0);
6568 break;
6569 case 0x1bc: /* bsf */
6570 case 0x1bd: /* bsr */
6572 int label1;
6573 TCGv t0;
6575 ot = dflag + OT_WORD;
6576 modrm = ldub_code(s->pc++);
6577 reg = ((modrm >> 3) & 7) | rex_r;
6578 gen_ldst_modrm(s,modrm, ot, OR_TMP0, 0);
6579 gen_extu(ot, cpu_T[0]);
6580 t0 = tcg_temp_local_new();
6581 tcg_gen_mov_tl(t0, cpu_T[0]);
6582 if ((b & 1) && (prefixes & PREFIX_REPZ) &&
6583 (s->cpuid_ext3_features & CPUID_EXT3_ABM)) {
6584 switch(ot) {
6585 case OT_WORD: gen_helper_lzcnt(cpu_T[0], t0,
6586 tcg_const_i32(16)); break;
6587 case OT_LONG: gen_helper_lzcnt(cpu_T[0], t0,
6588 tcg_const_i32(32)); break;
6589 case OT_QUAD: gen_helper_lzcnt(cpu_T[0], t0,
6590 tcg_const_i32(64)); break;
6592 gen_op_mov_reg_T0(ot, reg);
6593 } else {
6594 label1 = gen_new_label();
6595 tcg_gen_movi_tl(cpu_cc_dst, 0);
6596 tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, label1);
6597 if (b & 1) {
6598 gen_helper_bsr(cpu_T[0], t0);
6599 } else {
6600 gen_helper_bsf(cpu_T[0], t0);
6602 gen_op_mov_reg_T0(ot, reg);
6603 tcg_gen_movi_tl(cpu_cc_dst, 1);
6604 gen_set_label(label1);
6605 tcg_gen_discard_tl(cpu_cc_src);
6606 s->cc_op = CC_OP_LOGICB + ot;
6608 tcg_temp_free(t0);
6610 break;
6611 /************************/
6612 /* bcd */
6613 case 0x27: /* daa */
6614 if (CODE64(s))
6615 goto illegal_op;
6616 if (s->cc_op != CC_OP_DYNAMIC)
6617 gen_op_set_cc_op(s->cc_op);
6618 gen_helper_daa();
6619 s->cc_op = CC_OP_EFLAGS;
6620 break;
6621 case 0x2f: /* das */
6622 if (CODE64(s))
6623 goto illegal_op;
6624 if (s->cc_op != CC_OP_DYNAMIC)
6625 gen_op_set_cc_op(s->cc_op);
6626 gen_helper_das();
6627 s->cc_op = CC_OP_EFLAGS;
6628 break;
6629 case 0x37: /* aaa */
6630 if (CODE64(s))
6631 goto illegal_op;
6632 if (s->cc_op != CC_OP_DYNAMIC)
6633 gen_op_set_cc_op(s->cc_op);
6634 gen_helper_aaa();
6635 s->cc_op = CC_OP_EFLAGS;
6636 break;
6637 case 0x3f: /* aas */
6638 if (CODE64(s))
6639 goto illegal_op;
6640 if (s->cc_op != CC_OP_DYNAMIC)
6641 gen_op_set_cc_op(s->cc_op);
6642 gen_helper_aas();
6643 s->cc_op = CC_OP_EFLAGS;
6644 break;
6645 case 0xd4: /* aam */
6646 if (CODE64(s))
6647 goto illegal_op;
6648 val = ldub_code(s->pc++);
6649 if (val == 0) {
6650 gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base);
6651 } else {
6652 gen_helper_aam(tcg_const_i32(val));
6653 s->cc_op = CC_OP_LOGICB;
6655 break;
6656 case 0xd5: /* aad */
6657 if (CODE64(s))
6658 goto illegal_op;
6659 val = ldub_code(s->pc++);
6660 gen_helper_aad(tcg_const_i32(val));
6661 s->cc_op = CC_OP_LOGICB;
6662 break;
6663 /************************/
6664 /* misc */
6665 case 0x90: /* nop */
6666 /* XXX: xchg + rex handling */
6667 /* XXX: correct lock test for all insn */
6668 if (prefixes & PREFIX_LOCK)
6669 goto illegal_op;
6670 if (prefixes & PREFIX_REPZ) {
6671 gen_svm_check_intercept(s, pc_start, SVM_EXIT_PAUSE);
6673 break;
6674 case 0x9b: /* fwait */
6675 if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) ==
6676 (HF_MP_MASK | HF_TS_MASK)) {
6677 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
6678 } else {
6679 if (s->cc_op != CC_OP_DYNAMIC)
6680 gen_op_set_cc_op(s->cc_op);
6681 gen_jmp_im(pc_start - s->cs_base);
6682 gen_helper_fwait();
6684 break;
6685 case 0xcc: /* int3 */
6686 gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base);
6687 break;
6688 case 0xcd: /* int N */
6689 val = ldub_code(s->pc++);
6690 if (s->vm86 && s->iopl != 3) {
6691 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6692 } else {
6693 gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base);
6695 break;
6696 case 0xce: /* into */
6697 if (CODE64(s))
6698 goto illegal_op;
6699 if (s->cc_op != CC_OP_DYNAMIC)
6700 gen_op_set_cc_op(s->cc_op);
6701 gen_jmp_im(pc_start - s->cs_base);
6702 gen_helper_into(tcg_const_i32(s->pc - pc_start));
6703 break;
6704 #ifdef WANT_ICEBP
6705 case 0xf1: /* icebp (undocumented, exits to external debugger) */
6706 gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP);
6707 #if 1
6708 gen_debug(s, pc_start - s->cs_base);
6709 #else
6710 /* start debug */
6711 tb_flush(cpu_single_env);
6712 cpu_set_log(CPU_LOG_INT | CPU_LOG_TB_IN_ASM);
6713 #endif
6714 break;
6715 #endif
6716 case 0xfa: /* cli */
6717 if (!s->vm86) {
6718 if (s->cpl <= s->iopl) {
6719 gen_helper_cli();
6720 } else {
6721 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6723 } else {
6724 if (s->iopl == 3) {
6725 gen_helper_cli();
6726 } else {
6727 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6730 break;
6731 case 0xfb: /* sti */
6732 if (!s->vm86) {
6733 if (s->cpl <= s->iopl) {
6734 gen_sti:
6735 gen_helper_sti();
6736 /* interruptions are enabled only the first insn after sti */
6737 /* If several instructions disable interrupts, only the
6738 _first_ does it */
6739 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
6740 gen_helper_set_inhibit_irq();
6741 /* give a chance to handle pending irqs */
6742 gen_jmp_im(s->pc - s->cs_base);
6743 gen_eob(s);
6744 } else {
6745 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6747 } else {
6748 if (s->iopl == 3) {
6749 goto gen_sti;
6750 } else {
6751 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6754 break;
6755 case 0x62: /* bound */
6756 if (CODE64(s))
6757 goto illegal_op;
6758 ot = dflag ? OT_LONG : OT_WORD;
6759 modrm = ldub_code(s->pc++);
6760 reg = (modrm >> 3) & 7;
6761 mod = (modrm >> 6) & 3;
6762 if (mod == 3)
6763 goto illegal_op;
6764 gen_op_mov_TN_reg(ot, 0, reg);
6765 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6766 gen_jmp_im(pc_start - s->cs_base);
6767 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6768 if (ot == OT_WORD)
6769 gen_helper_boundw(cpu_A0, cpu_tmp2_i32);
6770 else
6771 gen_helper_boundl(cpu_A0, cpu_tmp2_i32);
6772 break;
6773 case 0x1c8 ... 0x1cf: /* bswap reg */
6774 reg = (b & 7) | REX_B(s);
6775 #ifdef TARGET_X86_64
6776 if (dflag == 2) {
6777 gen_op_mov_TN_reg(OT_QUAD, 0, reg);
6778 tcg_gen_bswap64_i64(cpu_T[0], cpu_T[0]);
6779 gen_op_mov_reg_T0(OT_QUAD, reg);
6780 } else
6781 #endif
6783 gen_op_mov_TN_reg(OT_LONG, 0, reg);
6784 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
6785 tcg_gen_bswap32_tl(cpu_T[0], cpu_T[0]);
6786 gen_op_mov_reg_T0(OT_LONG, reg);
6788 break;
6789 case 0xd6: /* salc */
6790 if (CODE64(s))
6791 goto illegal_op;
6792 if (s->cc_op != CC_OP_DYNAMIC)
6793 gen_op_set_cc_op(s->cc_op);
6794 gen_compute_eflags_c(cpu_T[0]);
6795 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
6796 gen_op_mov_reg_T0(OT_BYTE, R_EAX);
6797 break;
6798 case 0xe0: /* loopnz */
6799 case 0xe1: /* loopz */
6800 case 0xe2: /* loop */
6801 case 0xe3: /* jecxz */
6803 int l1, l2, l3;
6805 tval = (int8_t)insn_get(s, OT_BYTE);
6806 next_eip = s->pc - s->cs_base;
6807 tval += next_eip;
6808 if (s->dflag == 0)
6809 tval &= 0xffff;
6811 l1 = gen_new_label();
6812 l2 = gen_new_label();
6813 l3 = gen_new_label();
6814 b &= 3;
6815 switch(b) {
6816 case 0: /* loopnz */
6817 case 1: /* loopz */
6818 if (s->cc_op != CC_OP_DYNAMIC)
6819 gen_op_set_cc_op(s->cc_op);
6820 gen_op_add_reg_im(s->aflag, R_ECX, -1);
6821 gen_op_jz_ecx(s->aflag, l3);
6822 gen_compute_eflags(cpu_tmp0);
6823 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, CC_Z);
6824 if (b == 0) {
6825 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, l1);
6826 } else {
6827 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, l1);
6829 break;
6830 case 2: /* loop */
6831 gen_op_add_reg_im(s->aflag, R_ECX, -1);
6832 gen_op_jnz_ecx(s->aflag, l1);
6833 break;
6834 default:
6835 case 3: /* jcxz */
6836 gen_op_jz_ecx(s->aflag, l1);
6837 break;
6840 gen_set_label(l3);
6841 gen_jmp_im(next_eip);
6842 tcg_gen_br(l2);
6844 gen_set_label(l1);
6845 gen_jmp_im(tval);
6846 gen_set_label(l2);
6847 gen_eob(s);
6849 break;
6850 case 0x130: /* wrmsr */
6851 case 0x132: /* rdmsr */
6852 if (s->cpl != 0) {
6853 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6854 } else {
6855 if (s->cc_op != CC_OP_DYNAMIC)
6856 gen_op_set_cc_op(s->cc_op);
6857 gen_jmp_im(pc_start - s->cs_base);
6858 if (b & 2) {
6859 gen_helper_rdmsr();
6860 } else {
6861 gen_helper_wrmsr();
6864 break;
6865 case 0x131: /* rdtsc */
6866 if (s->cc_op != CC_OP_DYNAMIC)
6867 gen_op_set_cc_op(s->cc_op);
6868 gen_jmp_im(pc_start - s->cs_base);
6869 if (use_icount)
6870 gen_io_start();
6871 gen_helper_rdtsc();
6872 if (use_icount) {
6873 gen_io_end();
6874 gen_jmp(s, s->pc - s->cs_base);
6876 break;
6877 case 0x133: /* rdpmc */
6878 if (s->cc_op != CC_OP_DYNAMIC)
6879 gen_op_set_cc_op(s->cc_op);
6880 gen_jmp_im(pc_start - s->cs_base);
6881 gen_helper_rdpmc();
6882 break;
6883 case 0x134: /* sysenter */
6884 /* For Intel SYSENTER is valid on 64-bit */
6885 if (CODE64(s) && cpu_single_env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
6886 goto illegal_op;
6887 if (!s->pe) {
6888 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6889 } else {
6890 if (s->cc_op != CC_OP_DYNAMIC) {
6891 gen_op_set_cc_op(s->cc_op);
6892 s->cc_op = CC_OP_DYNAMIC;
6894 gen_jmp_im(pc_start - s->cs_base);
6895 gen_helper_sysenter();
6896 gen_eob(s);
6898 break;
6899 case 0x135: /* sysexit */
6900 /* For Intel SYSEXIT is valid on 64-bit */
6901 if (CODE64(s) && cpu_single_env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
6902 goto illegal_op;
6903 if (!s->pe) {
6904 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6905 } else {
6906 if (s->cc_op != CC_OP_DYNAMIC) {
6907 gen_op_set_cc_op(s->cc_op);
6908 s->cc_op = CC_OP_DYNAMIC;
6910 gen_jmp_im(pc_start - s->cs_base);
6911 gen_helper_sysexit(tcg_const_i32(dflag));
6912 gen_eob(s);
6914 break;
6915 #ifdef TARGET_X86_64
6916 case 0x105: /* syscall */
6917 /* XXX: is it usable in real mode ? */
6918 if (s->cc_op != CC_OP_DYNAMIC) {
6919 gen_op_set_cc_op(s->cc_op);
6920 s->cc_op = CC_OP_DYNAMIC;
6922 gen_jmp_im(pc_start - s->cs_base);
6923 gen_helper_syscall(tcg_const_i32(s->pc - pc_start));
6924 gen_eob(s);
6925 break;
6926 case 0x107: /* sysret */
6927 if (!s->pe) {
6928 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6929 } else {
6930 if (s->cc_op != CC_OP_DYNAMIC) {
6931 gen_op_set_cc_op(s->cc_op);
6932 s->cc_op = CC_OP_DYNAMIC;
6934 gen_jmp_im(pc_start - s->cs_base);
6935 gen_helper_sysret(tcg_const_i32(s->dflag));
6936 /* condition codes are modified only in long mode */
6937 if (s->lma)
6938 s->cc_op = CC_OP_EFLAGS;
6939 gen_eob(s);
6941 break;
6942 #endif
6943 case 0x1a2: /* cpuid */
6944 if (s->cc_op != CC_OP_DYNAMIC)
6945 gen_op_set_cc_op(s->cc_op);
6946 gen_jmp_im(pc_start - s->cs_base);
6947 gen_helper_cpuid();
6948 break;
6949 case 0xf4: /* hlt */
6950 if (s->cpl != 0) {
6951 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6952 } else {
6953 if (s->cc_op != CC_OP_DYNAMIC)
6954 gen_op_set_cc_op(s->cc_op);
6955 gen_jmp_im(pc_start - s->cs_base);
6956 gen_helper_hlt(tcg_const_i32(s->pc - pc_start));
6957 s->is_jmp = 3;
6959 break;
6960 case 0x100:
6961 modrm = ldub_code(s->pc++);
6962 mod = (modrm >> 6) & 3;
6963 op = (modrm >> 3) & 7;
6964 switch(op) {
6965 case 0: /* sldt */
6966 if (!s->pe || s->vm86)
6967 goto illegal_op;
6968 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ);
6969 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,ldt.selector));
6970 ot = OT_WORD;
6971 if (mod == 3)
6972 ot += s->dflag;
6973 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1);
6974 break;
6975 case 2: /* lldt */
6976 if (!s->pe || s->vm86)
6977 goto illegal_op;
6978 if (s->cpl != 0) {
6979 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6980 } else {
6981 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE);
6982 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
6983 gen_jmp_im(pc_start - s->cs_base);
6984 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6985 gen_helper_lldt(cpu_tmp2_i32);
6987 break;
6988 case 1: /* str */
6989 if (!s->pe || s->vm86)
6990 goto illegal_op;
6991 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ);
6992 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,tr.selector));
6993 ot = OT_WORD;
6994 if (mod == 3)
6995 ot += s->dflag;
6996 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1);
6997 break;
6998 case 3: /* ltr */
6999 if (!s->pe || s->vm86)
7000 goto illegal_op;
7001 if (s->cpl != 0) {
7002 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7003 } else {
7004 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE);
7005 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
7006 gen_jmp_im(pc_start - s->cs_base);
7007 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
7008 gen_helper_ltr(cpu_tmp2_i32);
7010 break;
7011 case 4: /* verr */
7012 case 5: /* verw */
7013 if (!s->pe || s->vm86)
7014 goto illegal_op;
7015 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
7016 if (s->cc_op != CC_OP_DYNAMIC)
7017 gen_op_set_cc_op(s->cc_op);
7018 if (op == 4)
7019 gen_helper_verr(cpu_T[0]);
7020 else
7021 gen_helper_verw(cpu_T[0]);
7022 s->cc_op = CC_OP_EFLAGS;
7023 break;
7024 default:
7025 goto illegal_op;
7027 break;
7028 case 0x101:
7029 modrm = ldub_code(s->pc++);
7030 mod = (modrm >> 6) & 3;
7031 op = (modrm >> 3) & 7;
7032 rm = modrm & 7;
7033 switch(op) {
7034 case 0: /* sgdt */
7035 if (mod == 3)
7036 goto illegal_op;
7037 gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ);
7038 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7039 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.limit));
7040 gen_op_st_T0_A0(OT_WORD + s->mem_index);
7041 gen_add_A0_im(s, 2);
7042 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.base));
7043 if (!s->dflag)
7044 gen_op_andl_T0_im(0xffffff);
7045 gen_op_st_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
7046 break;
7047 case 1:
7048 if (mod == 3) {
7049 switch (rm) {
7050 case 0: /* monitor */
7051 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
7052 s->cpl != 0)
7053 goto illegal_op;
7054 if (s->cc_op != CC_OP_DYNAMIC)
7055 gen_op_set_cc_op(s->cc_op);
7056 gen_jmp_im(pc_start - s->cs_base);
7057 #ifdef TARGET_X86_64
7058 if (s->aflag == 2) {
7059 gen_op_movq_A0_reg(R_EAX);
7060 } else
7061 #endif
7063 gen_op_movl_A0_reg(R_EAX);
7064 if (s->aflag == 0)
7065 gen_op_andl_A0_ffff();
7067 gen_add_A0_ds_seg(s);
7068 gen_helper_monitor(cpu_A0);
7069 break;
7070 case 1: /* mwait */
7071 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
7072 s->cpl != 0)
7073 goto illegal_op;
7074 if (s->cc_op != CC_OP_DYNAMIC) {
7075 gen_op_set_cc_op(s->cc_op);
7076 s->cc_op = CC_OP_DYNAMIC;
7078 gen_jmp_im(pc_start - s->cs_base);
7079 gen_helper_mwait(tcg_const_i32(s->pc - pc_start));
7080 gen_eob(s);
7081 break;
7082 default:
7083 goto illegal_op;
7085 } else { /* sidt */
7086 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ);
7087 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7088 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.limit));
7089 gen_op_st_T0_A0(OT_WORD + s->mem_index);
7090 gen_add_A0_im(s, 2);
7091 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.base));
7092 if (!s->dflag)
7093 gen_op_andl_T0_im(0xffffff);
7094 gen_op_st_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
7096 break;
7097 case 2: /* lgdt */
7098 case 3: /* lidt */
7099 if (mod == 3) {
7100 if (s->cc_op != CC_OP_DYNAMIC)
7101 gen_op_set_cc_op(s->cc_op);
7102 gen_jmp_im(pc_start - s->cs_base);
7103 switch(rm) {
7104 case 0: /* VMRUN */
7105 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7106 goto illegal_op;
7107 if (s->cpl != 0) {
7108 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7109 break;
7110 } else {
7111 gen_helper_vmrun(tcg_const_i32(s->aflag),
7112 tcg_const_i32(s->pc - pc_start));
7113 tcg_gen_exit_tb(0);
7114 s->is_jmp = 3;
7116 break;
7117 case 1: /* VMMCALL */
7118 if (!(s->flags & HF_SVME_MASK))
7119 goto illegal_op;
7120 gen_helper_vmmcall();
7121 break;
7122 case 2: /* VMLOAD */
7123 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7124 goto illegal_op;
7125 if (s->cpl != 0) {
7126 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7127 break;
7128 } else {
7129 gen_helper_vmload(tcg_const_i32(s->aflag));
7131 break;
7132 case 3: /* VMSAVE */
7133 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7134 goto illegal_op;
7135 if (s->cpl != 0) {
7136 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7137 break;
7138 } else {
7139 gen_helper_vmsave(tcg_const_i32(s->aflag));
7141 break;
7142 case 4: /* STGI */
7143 if ((!(s->flags & HF_SVME_MASK) &&
7144 !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
7145 !s->pe)
7146 goto illegal_op;
7147 if (s->cpl != 0) {
7148 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7149 break;
7150 } else {
7151 gen_helper_stgi();
7153 break;
7154 case 5: /* CLGI */
7155 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7156 goto illegal_op;
7157 if (s->cpl != 0) {
7158 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7159 break;
7160 } else {
7161 gen_helper_clgi();
7163 break;
7164 case 6: /* SKINIT */
7165 if ((!(s->flags & HF_SVME_MASK) &&
7166 !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
7167 !s->pe)
7168 goto illegal_op;
7169 gen_helper_skinit();
7170 break;
7171 case 7: /* INVLPGA */
7172 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7173 goto illegal_op;
7174 if (s->cpl != 0) {
7175 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7176 break;
7177 } else {
7178 gen_helper_invlpga(tcg_const_i32(s->aflag));
7180 break;
7181 default:
7182 goto illegal_op;
7184 } else if (s->cpl != 0) {
7185 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7186 } else {
7187 gen_svm_check_intercept(s, pc_start,
7188 op==2 ? SVM_EXIT_GDTR_WRITE : SVM_EXIT_IDTR_WRITE);
7189 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7190 gen_op_ld_T1_A0(OT_WORD + s->mem_index);
7191 gen_add_A0_im(s, 2);
7192 gen_op_ld_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
7193 if (!s->dflag)
7194 gen_op_andl_T0_im(0xffffff);
7195 if (op == 2) {
7196 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,gdt.base));
7197 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,gdt.limit));
7198 } else {
7199 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,idt.base));
7200 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,idt.limit));
7203 break;
7204 case 4: /* smsw */
7205 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0);
7206 #if defined TARGET_X86_64 && defined HOST_WORDS_BIGENDIAN
7207 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]) + 4);
7208 #else
7209 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]));
7210 #endif
7211 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 1);
7212 break;
7213 case 6: /* lmsw */
7214 if (s->cpl != 0) {
7215 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7216 } else {
7217 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
7218 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
7219 gen_helper_lmsw(cpu_T[0]);
7220 gen_jmp_im(s->pc - s->cs_base);
7221 gen_eob(s);
7223 break;
7224 case 7:
7225 if (mod != 3) { /* invlpg */
7226 if (s->cpl != 0) {
7227 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7228 } else {
7229 if (s->cc_op != CC_OP_DYNAMIC)
7230 gen_op_set_cc_op(s->cc_op);
7231 gen_jmp_im(pc_start - s->cs_base);
7232 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7233 gen_helper_invlpg(cpu_A0);
7234 gen_jmp_im(s->pc - s->cs_base);
7235 gen_eob(s);
7237 } else {
7238 switch (rm) {
7239 case 0: /* swapgs */
7240 #ifdef TARGET_X86_64
7241 if (CODE64(s)) {
7242 if (s->cpl != 0) {
7243 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7244 } else {
7245 tcg_gen_ld_tl(cpu_T[0], cpu_env,
7246 offsetof(CPUX86State,segs[R_GS].base));
7247 tcg_gen_ld_tl(cpu_T[1], cpu_env,
7248 offsetof(CPUX86State,kernelgsbase));
7249 tcg_gen_st_tl(cpu_T[1], cpu_env,
7250 offsetof(CPUX86State,segs[R_GS].base));
7251 tcg_gen_st_tl(cpu_T[0], cpu_env,
7252 offsetof(CPUX86State,kernelgsbase));
7254 } else
7255 #endif
7257 goto illegal_op;
7259 break;
7260 case 1: /* rdtscp */
7261 if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP))
7262 goto illegal_op;
7263 if (s->cc_op != CC_OP_DYNAMIC)
7264 gen_op_set_cc_op(s->cc_op);
7265 gen_jmp_im(pc_start - s->cs_base);
7266 if (use_icount)
7267 gen_io_start();
7268 gen_helper_rdtscp();
7269 if (use_icount) {
7270 gen_io_end();
7271 gen_jmp(s, s->pc - s->cs_base);
7273 break;
7274 default:
7275 goto illegal_op;
7278 break;
7279 default:
7280 goto illegal_op;
7282 break;
7283 case 0x108: /* invd */
7284 case 0x109: /* wbinvd */
7285 if (s->cpl != 0) {
7286 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7287 } else {
7288 gen_svm_check_intercept(s, pc_start, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD);
7289 /* nothing to do */
7291 break;
7292 case 0x63: /* arpl or movslS (x86_64) */
7293 #ifdef TARGET_X86_64
7294 if (CODE64(s)) {
7295 int d_ot;
7296 /* d_ot is the size of destination */
7297 d_ot = dflag + OT_WORD;
7299 modrm = ldub_code(s->pc++);
7300 reg = ((modrm >> 3) & 7) | rex_r;
7301 mod = (modrm >> 6) & 3;
7302 rm = (modrm & 7) | REX_B(s);
7304 if (mod == 3) {
7305 gen_op_mov_TN_reg(OT_LONG, 0, rm);
7306 /* sign extend */
7307 if (d_ot == OT_QUAD)
7308 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
7309 gen_op_mov_reg_T0(d_ot, reg);
7310 } else {
7311 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7312 if (d_ot == OT_QUAD) {
7313 gen_op_lds_T0_A0(OT_LONG + s->mem_index);
7314 } else {
7315 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
7317 gen_op_mov_reg_T0(d_ot, reg);
7319 } else
7320 #endif
7322 int label1;
7323 TCGv t0, t1, t2, a0;
7325 if (!s->pe || s->vm86)
7326 goto illegal_op;
7327 t0 = tcg_temp_local_new();
7328 t1 = tcg_temp_local_new();
7329 t2 = tcg_temp_local_new();
7330 ot = OT_WORD;
7331 modrm = ldub_code(s->pc++);
7332 reg = (modrm >> 3) & 7;
7333 mod = (modrm >> 6) & 3;
7334 rm = modrm & 7;
7335 if (mod != 3) {
7336 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7337 gen_op_ld_v(ot + s->mem_index, t0, cpu_A0);
7338 a0 = tcg_temp_local_new();
7339 tcg_gen_mov_tl(a0, cpu_A0);
7340 } else {
7341 gen_op_mov_v_reg(ot, t0, rm);
7342 TCGV_UNUSED(a0);
7344 gen_op_mov_v_reg(ot, t1, reg);
7345 tcg_gen_andi_tl(cpu_tmp0, t0, 3);
7346 tcg_gen_andi_tl(t1, t1, 3);
7347 tcg_gen_movi_tl(t2, 0);
7348 label1 = gen_new_label();
7349 tcg_gen_brcond_tl(TCG_COND_GE, cpu_tmp0, t1, label1);
7350 tcg_gen_andi_tl(t0, t0, ~3);
7351 tcg_gen_or_tl(t0, t0, t1);
7352 tcg_gen_movi_tl(t2, CC_Z);
7353 gen_set_label(label1);
7354 if (mod != 3) {
7355 gen_op_st_v(ot + s->mem_index, t0, a0);
7356 tcg_temp_free(a0);
7357 } else {
7358 gen_op_mov_reg_v(ot, rm, t0);
7360 if (s->cc_op != CC_OP_DYNAMIC)
7361 gen_op_set_cc_op(s->cc_op);
7362 gen_compute_eflags(cpu_cc_src);
7363 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z);
7364 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2);
7365 s->cc_op = CC_OP_EFLAGS;
7366 tcg_temp_free(t0);
7367 tcg_temp_free(t1);
7368 tcg_temp_free(t2);
7370 break;
7371 case 0x102: /* lar */
7372 case 0x103: /* lsl */
7374 int label1;
7375 TCGv t0;
7376 if (!s->pe || s->vm86)
7377 goto illegal_op;
7378 ot = dflag ? OT_LONG : OT_WORD;
7379 modrm = ldub_code(s->pc++);
7380 reg = ((modrm >> 3) & 7) | rex_r;
7381 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
7382 t0 = tcg_temp_local_new();
7383 if (s->cc_op != CC_OP_DYNAMIC)
7384 gen_op_set_cc_op(s->cc_op);
7385 if (b == 0x102)
7386 gen_helper_lar(t0, cpu_T[0]);
7387 else
7388 gen_helper_lsl(t0, cpu_T[0]);
7389 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_src, CC_Z);
7390 label1 = gen_new_label();
7391 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
7392 gen_op_mov_reg_v(ot, reg, t0);
7393 gen_set_label(label1);
7394 s->cc_op = CC_OP_EFLAGS;
7395 tcg_temp_free(t0);
7397 break;
7398 case 0x118:
7399 modrm = ldub_code(s->pc++);
7400 mod = (modrm >> 6) & 3;
7401 op = (modrm >> 3) & 7;
7402 switch(op) {
7403 case 0: /* prefetchnta */
7404 case 1: /* prefetchnt0 */
7405 case 2: /* prefetchnt0 */
7406 case 3: /* prefetchnt0 */
7407 if (mod == 3)
7408 goto illegal_op;
7409 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7410 /* nothing more to do */
7411 break;
7412 default: /* nop (multi byte) */
7413 gen_nop_modrm(s, modrm);
7414 break;
7416 break;
7417 case 0x119 ... 0x11f: /* nop (multi byte) */
7418 modrm = ldub_code(s->pc++);
7419 gen_nop_modrm(s, modrm);
7420 break;
7421 case 0x120: /* mov reg, crN */
7422 case 0x122: /* mov crN, reg */
7423 if (s->cpl != 0) {
7424 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7425 } else {
7426 modrm = ldub_code(s->pc++);
7427 if ((modrm & 0xc0) != 0xc0)
7428 goto illegal_op;
7429 rm = (modrm & 7) | REX_B(s);
7430 reg = ((modrm >> 3) & 7) | rex_r;
7431 if (CODE64(s))
7432 ot = OT_QUAD;
7433 else
7434 ot = OT_LONG;
7435 if ((prefixes & PREFIX_LOCK) && (reg == 0) &&
7436 (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) {
7437 reg = 8;
7439 switch(reg) {
7440 case 0:
7441 case 2:
7442 case 3:
7443 case 4:
7444 case 8:
7445 if (s->cc_op != CC_OP_DYNAMIC)
7446 gen_op_set_cc_op(s->cc_op);
7447 gen_jmp_im(pc_start - s->cs_base);
7448 if (b & 2) {
7449 gen_op_mov_TN_reg(ot, 0, rm);
7450 gen_helper_write_crN(tcg_const_i32(reg), cpu_T[0]);
7451 gen_jmp_im(s->pc - s->cs_base);
7452 gen_eob(s);
7453 } else {
7454 gen_helper_read_crN(cpu_T[0], tcg_const_i32(reg));
7455 gen_op_mov_reg_T0(ot, rm);
7457 break;
7458 default:
7459 goto illegal_op;
7462 break;
7463 case 0x121: /* mov reg, drN */
7464 case 0x123: /* mov drN, reg */
7465 if (s->cpl != 0) {
7466 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7467 } else {
7468 modrm = ldub_code(s->pc++);
7469 if ((modrm & 0xc0) != 0xc0)
7470 goto illegal_op;
7471 rm = (modrm & 7) | REX_B(s);
7472 reg = ((modrm >> 3) & 7) | rex_r;
7473 if (CODE64(s))
7474 ot = OT_QUAD;
7475 else
7476 ot = OT_LONG;
7477 /* XXX: do it dynamically with CR4.DE bit */
7478 if (reg == 4 || reg == 5 || reg >= 8)
7479 goto illegal_op;
7480 if (b & 2) {
7481 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg);
7482 gen_op_mov_TN_reg(ot, 0, rm);
7483 gen_helper_movl_drN_T0(tcg_const_i32(reg), cpu_T[0]);
7484 gen_jmp_im(s->pc - s->cs_base);
7485 gen_eob(s);
7486 } else {
7487 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg);
7488 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,dr[reg]));
7489 gen_op_mov_reg_T0(ot, rm);
7492 break;
7493 case 0x106: /* clts */
7494 if (s->cpl != 0) {
7495 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7496 } else {
7497 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
7498 gen_helper_clts();
7499 /* abort block because static cpu state changed */
7500 gen_jmp_im(s->pc - s->cs_base);
7501 gen_eob(s);
7503 break;
7504 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
7505 case 0x1c3: /* MOVNTI reg, mem */
7506 if (!(s->cpuid_features & CPUID_SSE2))
7507 goto illegal_op;
7508 ot = s->dflag == 2 ? OT_QUAD : OT_LONG;
7509 modrm = ldub_code(s->pc++);
7510 mod = (modrm >> 6) & 3;
7511 if (mod == 3)
7512 goto illegal_op;
7513 reg = ((modrm >> 3) & 7) | rex_r;
7514 /* generate a generic store */
7515 gen_ldst_modrm(s, modrm, ot, reg, 1);
7516 break;
7517 case 0x1ae:
7518 modrm = ldub_code(s->pc++);
7519 mod = (modrm >> 6) & 3;
7520 op = (modrm >> 3) & 7;
7521 switch(op) {
7522 case 0: /* fxsave */
7523 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
7524 (s->prefix & PREFIX_LOCK))
7525 goto illegal_op;
7526 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
7527 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7528 break;
7530 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7531 if (s->cc_op != CC_OP_DYNAMIC)
7532 gen_op_set_cc_op(s->cc_op);
7533 gen_jmp_im(pc_start - s->cs_base);
7534 gen_helper_fxsave(cpu_A0, tcg_const_i32((s->dflag == 2)));
7535 break;
7536 case 1: /* fxrstor */
7537 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
7538 (s->prefix & PREFIX_LOCK))
7539 goto illegal_op;
7540 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
7541 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7542 break;
7544 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7545 if (s->cc_op != CC_OP_DYNAMIC)
7546 gen_op_set_cc_op(s->cc_op);
7547 gen_jmp_im(pc_start - s->cs_base);
7548 gen_helper_fxrstor(cpu_A0, tcg_const_i32((s->dflag == 2)));
7549 break;
7550 case 2: /* ldmxcsr */
7551 case 3: /* stmxcsr */
7552 if (s->flags & HF_TS_MASK) {
7553 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7554 break;
7556 if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK) ||
7557 mod == 3)
7558 goto illegal_op;
7559 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7560 if (op == 2) {
7561 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
7562 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, mxcsr));
7563 } else {
7564 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, mxcsr));
7565 gen_op_st_T0_A0(OT_LONG + s->mem_index);
7567 break;
7568 case 5: /* lfence */
7569 case 6: /* mfence */
7570 if ((modrm & 0xc7) != 0xc0 || !(s->cpuid_features & CPUID_SSE))
7571 goto illegal_op;
7572 break;
7573 case 7: /* sfence / clflush */
7574 if ((modrm & 0xc7) == 0xc0) {
7575 /* sfence */
7576 /* XXX: also check for cpuid_ext2_features & CPUID_EXT2_EMMX */
7577 if (!(s->cpuid_features & CPUID_SSE))
7578 goto illegal_op;
7579 } else {
7580 /* clflush */
7581 if (!(s->cpuid_features & CPUID_CLFLUSH))
7582 goto illegal_op;
7583 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7585 break;
7586 default:
7587 goto illegal_op;
7589 break;
7590 case 0x10d: /* 3DNow! prefetch(w) */
7591 modrm = ldub_code(s->pc++);
7592 mod = (modrm >> 6) & 3;
7593 if (mod == 3)
7594 goto illegal_op;
7595 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7596 /* ignore for now */
7597 break;
7598 case 0x1aa: /* rsm */
7599 gen_svm_check_intercept(s, pc_start, SVM_EXIT_RSM);
7600 if (!(s->flags & HF_SMM_MASK))
7601 goto illegal_op;
7602 if (s->cc_op != CC_OP_DYNAMIC) {
7603 gen_op_set_cc_op(s->cc_op);
7604 s->cc_op = CC_OP_DYNAMIC;
7606 gen_jmp_im(s->pc - s->cs_base);
7607 gen_helper_rsm();
7608 gen_eob(s);
7609 break;
7610 case 0x1b8: /* SSE4.2 popcnt */
7611 if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) !=
7612 PREFIX_REPZ)
7613 goto illegal_op;
7614 if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
7615 goto illegal_op;
7617 modrm = ldub_code(s->pc++);
7618 reg = ((modrm >> 3) & 7);
7620 if (s->prefix & PREFIX_DATA)
7621 ot = OT_WORD;
7622 else if (s->dflag != 2)
7623 ot = OT_LONG;
7624 else
7625 ot = OT_QUAD;
7627 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
7628 gen_helper_popcnt(cpu_T[0], cpu_T[0], tcg_const_i32(ot));
7629 gen_op_mov_reg_T0(ot, reg);
7631 s->cc_op = CC_OP_EFLAGS;
7632 break;
7633 case 0x10e ... 0x10f:
7634 /* 3DNow! instructions, ignore prefixes */
7635 s->prefix &= ~(PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA);
7636 case 0x110 ... 0x117:
7637 case 0x128 ... 0x12f:
7638 case 0x138 ... 0x13a:
7639 case 0x150 ... 0x179:
7640 case 0x17c ... 0x17f:
7641 case 0x1c2:
7642 case 0x1c4 ... 0x1c6:
7643 case 0x1d0 ... 0x1fe:
7644 gen_sse(s, b, pc_start, rex_r);
7645 break;
7646 default:
7647 goto illegal_op;
7649 /* lock generation */
7650 if (s->prefix & PREFIX_LOCK)
7651 gen_helper_unlock();
7652 return s->pc;
7653 illegal_op:
7654 if (s->prefix & PREFIX_LOCK)
7655 gen_helper_unlock();
7656 /* XXX: ensure that no lock was generated */
7657 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
7658 return s->pc;
7661 void optimize_flags_init(void)
7663 #if TCG_TARGET_REG_BITS == 32
7664 assert(sizeof(CCTable) == (1 << 3));
7665 #else
7666 assert(sizeof(CCTable) == (1 << 4));
7667 #endif
7668 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
7669 cpu_cc_op = tcg_global_mem_new_i32(TCG_AREG0,
7670 offsetof(CPUState, cc_op), "cc_op");
7671 cpu_cc_src = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, cc_src),
7672 "cc_src");
7673 cpu_cc_dst = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, cc_dst),
7674 "cc_dst");
7675 cpu_cc_tmp = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, cc_tmp),
7676 "cc_tmp");
7678 #ifdef TARGET_X86_64
7679 cpu_regs[R_EAX] = tcg_global_mem_new_i64(TCG_AREG0,
7680 offsetof(CPUState, regs[R_EAX]), "rax");
7681 cpu_regs[R_ECX] = tcg_global_mem_new_i64(TCG_AREG0,
7682 offsetof(CPUState, regs[R_ECX]), "rcx");
7683 cpu_regs[R_EDX] = tcg_global_mem_new_i64(TCG_AREG0,
7684 offsetof(CPUState, regs[R_EDX]), "rdx");
7685 cpu_regs[R_EBX] = tcg_global_mem_new_i64(TCG_AREG0,
7686 offsetof(CPUState, regs[R_EBX]), "rbx");
7687 cpu_regs[R_ESP] = tcg_global_mem_new_i64(TCG_AREG0,
7688 offsetof(CPUState, regs[R_ESP]), "rsp");
7689 cpu_regs[R_EBP] = tcg_global_mem_new_i64(TCG_AREG0,
7690 offsetof(CPUState, regs[R_EBP]), "rbp");
7691 cpu_regs[R_ESI] = tcg_global_mem_new_i64(TCG_AREG0,
7692 offsetof(CPUState, regs[R_ESI]), "rsi");
7693 cpu_regs[R_EDI] = tcg_global_mem_new_i64(TCG_AREG0,
7694 offsetof(CPUState, regs[R_EDI]), "rdi");
7695 cpu_regs[8] = tcg_global_mem_new_i64(TCG_AREG0,
7696 offsetof(CPUState, regs[8]), "r8");
7697 cpu_regs[9] = tcg_global_mem_new_i64(TCG_AREG0,
7698 offsetof(CPUState, regs[9]), "r9");
7699 cpu_regs[10] = tcg_global_mem_new_i64(TCG_AREG0,
7700 offsetof(CPUState, regs[10]), "r10");
7701 cpu_regs[11] = tcg_global_mem_new_i64(TCG_AREG0,
7702 offsetof(CPUState, regs[11]), "r11");
7703 cpu_regs[12] = tcg_global_mem_new_i64(TCG_AREG0,
7704 offsetof(CPUState, regs[12]), "r12");
7705 cpu_regs[13] = tcg_global_mem_new_i64(TCG_AREG0,
7706 offsetof(CPUState, regs[13]), "r13");
7707 cpu_regs[14] = tcg_global_mem_new_i64(TCG_AREG0,
7708 offsetof(CPUState, regs[14]), "r14");
7709 cpu_regs[15] = tcg_global_mem_new_i64(TCG_AREG0,
7710 offsetof(CPUState, regs[15]), "r15");
7711 #else
7712 cpu_regs[R_EAX] = tcg_global_mem_new_i32(TCG_AREG0,
7713 offsetof(CPUState, regs[R_EAX]), "eax");
7714 cpu_regs[R_ECX] = tcg_global_mem_new_i32(TCG_AREG0,
7715 offsetof(CPUState, regs[R_ECX]), "ecx");
7716 cpu_regs[R_EDX] = tcg_global_mem_new_i32(TCG_AREG0,
7717 offsetof(CPUState, regs[R_EDX]), "edx");
7718 cpu_regs[R_EBX] = tcg_global_mem_new_i32(TCG_AREG0,
7719 offsetof(CPUState, regs[R_EBX]), "ebx");
7720 cpu_regs[R_ESP] = tcg_global_mem_new_i32(TCG_AREG0,
7721 offsetof(CPUState, regs[R_ESP]), "esp");
7722 cpu_regs[R_EBP] = tcg_global_mem_new_i32(TCG_AREG0,
7723 offsetof(CPUState, regs[R_EBP]), "ebp");
7724 cpu_regs[R_ESI] = tcg_global_mem_new_i32(TCG_AREG0,
7725 offsetof(CPUState, regs[R_ESI]), "esi");
7726 cpu_regs[R_EDI] = tcg_global_mem_new_i32(TCG_AREG0,
7727 offsetof(CPUState, regs[R_EDI]), "edi");
7728 #endif
7730 /* register helpers */
7731 #define GEN_HELPER 2
7732 #include "helper.h"
7735 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
7736 basic block 'tb'. If search_pc is TRUE, also generate PC
7737 information for each intermediate instruction. */
7738 static inline void gen_intermediate_code_internal(CPUState *env,
7739 TranslationBlock *tb,
7740 int search_pc)
7742 DisasContext dc1, *dc = &dc1;
7743 target_ulong pc_ptr;
7744 uint16_t *gen_opc_end;
7745 CPUBreakpoint *bp;
7746 int j, lj;
7747 uint64_t flags;
7748 target_ulong pc_start;
7749 target_ulong cs_base;
7750 int num_insns;
7751 int max_insns;
7753 /* generate intermediate code */
7754 pc_start = tb->pc;
7755 cs_base = tb->cs_base;
7756 flags = tb->flags;
7758 dc->pe = (flags >> HF_PE_SHIFT) & 1;
7759 dc->code32 = (flags >> HF_CS32_SHIFT) & 1;
7760 dc->ss32 = (flags >> HF_SS32_SHIFT) & 1;
7761 dc->addseg = (flags >> HF_ADDSEG_SHIFT) & 1;
7762 dc->f_st = 0;
7763 dc->vm86 = (flags >> VM_SHIFT) & 1;
7764 dc->cpl = (flags >> HF_CPL_SHIFT) & 3;
7765 dc->iopl = (flags >> IOPL_SHIFT) & 3;
7766 dc->tf = (flags >> TF_SHIFT) & 1;
7767 dc->singlestep_enabled = env->singlestep_enabled;
7768 dc->cc_op = CC_OP_DYNAMIC;
7769 dc->cs_base = cs_base;
7770 dc->tb = tb;
7771 dc->popl_esp_hack = 0;
7772 /* select memory access functions */
7773 dc->mem_index = 0;
7774 if (flags & HF_SOFTMMU_MASK) {
7775 if (dc->cpl == 3)
7776 dc->mem_index = 2 * 4;
7777 else
7778 dc->mem_index = 1 * 4;
7780 dc->cpuid_features = env->cpuid_features;
7781 dc->cpuid_ext_features = env->cpuid_ext_features;
7782 dc->cpuid_ext2_features = env->cpuid_ext2_features;
7783 dc->cpuid_ext3_features = env->cpuid_ext3_features;
7784 #ifdef TARGET_X86_64
7785 dc->lma = (flags >> HF_LMA_SHIFT) & 1;
7786 dc->code64 = (flags >> HF_CS64_SHIFT) & 1;
7787 #endif
7788 dc->flags = flags;
7789 dc->jmp_opt = !(dc->tf || env->singlestep_enabled ||
7790 (flags & HF_INHIBIT_IRQ_MASK)
7791 #ifndef CONFIG_SOFTMMU
7792 || (flags & HF_SOFTMMU_MASK)
7793 #endif
7795 #if 0
7796 /* check addseg logic */
7797 if (!dc->addseg && (dc->vm86 || !dc->pe || !dc->code32))
7798 printf("ERROR addseg\n");
7799 #endif
7801 cpu_T[0] = tcg_temp_new();
7802 cpu_T[1] = tcg_temp_new();
7803 cpu_A0 = tcg_temp_new();
7804 cpu_T3 = tcg_temp_new();
7806 cpu_tmp0 = tcg_temp_new();
7807 cpu_tmp1_i64 = tcg_temp_new_i64();
7808 cpu_tmp2_i32 = tcg_temp_new_i32();
7809 cpu_tmp3_i32 = tcg_temp_new_i32();
7810 cpu_tmp4 = tcg_temp_new();
7811 cpu_tmp5 = tcg_temp_new();
7812 cpu_ptr0 = tcg_temp_new_ptr();
7813 cpu_ptr1 = tcg_temp_new_ptr();
7815 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
7817 dc->is_jmp = DISAS_NEXT;
7818 pc_ptr = pc_start;
7819 lj = -1;
7820 num_insns = 0;
7821 max_insns = tb->cflags & CF_COUNT_MASK;
7822 if (max_insns == 0)
7823 max_insns = CF_COUNT_MASK;
7825 gen_icount_start();
7826 for(;;) {
7827 if (unlikely(!QTAILQ_EMPTY(&env->breakpoints))) {
7828 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
7829 if (bp->pc == pc_ptr &&
7830 !((bp->flags & BP_CPU) && (tb->flags & HF_RF_MASK))) {
7831 gen_debug(dc, pc_ptr - dc->cs_base);
7832 break;
7836 if (search_pc) {
7837 j = gen_opc_ptr - gen_opc_buf;
7838 if (lj < j) {
7839 lj++;
7840 while (lj < j)
7841 gen_opc_instr_start[lj++] = 0;
7843 gen_opc_pc[lj] = pc_ptr;
7844 gen_opc_cc_op[lj] = dc->cc_op;
7845 gen_opc_instr_start[lj] = 1;
7846 gen_opc_icount[lj] = num_insns;
7848 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
7849 gen_io_start();
7851 pc_ptr = disas_insn(dc, pc_ptr);
7852 num_insns++;
7853 /* stop translation if indicated */
7854 if (dc->is_jmp)
7855 break;
7856 /* if single step mode, we generate only one instruction and
7857 generate an exception */
7858 /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
7859 the flag and abort the translation to give the irqs a
7860 change to be happen */
7861 if (dc->tf || dc->singlestep_enabled ||
7862 (flags & HF_INHIBIT_IRQ_MASK)) {
7863 gen_jmp_im(pc_ptr - dc->cs_base);
7864 gen_eob(dc);
7865 break;
7867 /* if too long translation, stop generation too */
7868 if (gen_opc_ptr >= gen_opc_end ||
7869 (pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32) ||
7870 num_insns >= max_insns) {
7871 gen_jmp_im(pc_ptr - dc->cs_base);
7872 gen_eob(dc);
7873 break;
7875 if (singlestep) {
7876 gen_jmp_im(pc_ptr - dc->cs_base);
7877 gen_eob(dc);
7878 break;
7881 if (tb->cflags & CF_LAST_IO)
7882 gen_io_end();
7883 gen_icount_end(tb, num_insns);
7884 *gen_opc_ptr = INDEX_op_end;
7885 /* we don't forget to fill the last values */
7886 if (search_pc) {
7887 j = gen_opc_ptr - gen_opc_buf;
7888 lj++;
7889 while (lj <= j)
7890 gen_opc_instr_start[lj++] = 0;
7893 #ifdef DEBUG_DISAS
7894 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
7895 int disas_flags;
7896 qemu_log("----------------\n");
7897 qemu_log("IN: %s\n", lookup_symbol(pc_start));
7898 #ifdef TARGET_X86_64
7899 if (dc->code64)
7900 disas_flags = 2;
7901 else
7902 #endif
7903 disas_flags = !dc->code32;
7904 log_target_disas(pc_start, pc_ptr - pc_start, disas_flags);
7905 qemu_log("\n");
7907 #endif
7909 if (!search_pc) {
7910 tb->size = pc_ptr - pc_start;
7911 tb->icount = num_insns;
7915 void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
7917 gen_intermediate_code_internal(env, tb, 0);
7920 void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
7922 gen_intermediate_code_internal(env, tb, 1);
7925 void gen_pc_load(CPUState *env, TranslationBlock *tb,
7926 unsigned long searched_pc, int pc_pos, void *puc)
7928 int cc_op;
7929 #ifdef DEBUG_DISAS
7930 if (qemu_loglevel_mask(CPU_LOG_TB_OP)) {
7931 int i;
7932 qemu_log("RESTORE:\n");
7933 for(i = 0;i <= pc_pos; i++) {
7934 if (gen_opc_instr_start[i]) {
7935 qemu_log("0x%04x: " TARGET_FMT_lx "\n", i, gen_opc_pc[i]);
7938 qemu_log("spc=0x%08lx pc_pos=0x%x eip=" TARGET_FMT_lx " cs_base=%x\n",
7939 searched_pc, pc_pos, gen_opc_pc[pc_pos] - tb->cs_base,
7940 (uint32_t)tb->cs_base);
7942 #endif
7943 env->eip = gen_opc_pc[pc_pos] - tb->cs_base;
7944 cc_op = gen_opc_cc_op[pc_pos];
7945 if (cc_op != CC_OP_DYNAMIC)
7946 env->cc_op = cc_op;