Fix qemu_event_init
[qemu/hppa.git] / target-i386 / translate.c
blob8df3ea439c99bb1a2d502dffab65296488d7dbdd
1 /*
2 * i386 translation
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
20 #include <stdarg.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <string.h>
24 #include <inttypes.h>
25 #include <signal.h>
26 #include <assert.h>
28 #include "cpu.h"
29 #include "exec-all.h"
30 #include "disas.h"
31 #include "tcg-op.h"
33 #include "helper.h"
34 #define GEN_HELPER 1
35 #include "helper.h"
37 #define PREFIX_REPZ 0x01
38 #define PREFIX_REPNZ 0x02
39 #define PREFIX_LOCK 0x04
40 #define PREFIX_DATA 0x08
41 #define PREFIX_ADR 0x10
43 #ifdef TARGET_X86_64
44 #define X86_64_ONLY(x) x
45 #define X86_64_DEF(x...) x
46 #define CODE64(s) ((s)->code64)
47 #define REX_X(s) ((s)->rex_x)
48 #define REX_B(s) ((s)->rex_b)
49 /* XXX: gcc generates push/pop in some opcodes, so we cannot use them */
50 #if 1
51 #define BUGGY_64(x) NULL
52 #endif
53 #else
54 #define X86_64_ONLY(x) NULL
55 #define X86_64_DEF(x...)
56 #define CODE64(s) 0
57 #define REX_X(s) 0
58 #define REX_B(s) 0
59 #endif
61 //#define MACRO_TEST 1
63 /* global register indexes */
64 static TCGv_ptr cpu_env;
65 static TCGv cpu_A0, cpu_cc_src, cpu_cc_dst, cpu_cc_tmp;
66 static TCGv_i32 cpu_cc_op;
67 /* local temps */
68 static TCGv cpu_T[2], cpu_T3;
69 /* local register indexes (only used inside old micro ops) */
70 static TCGv cpu_tmp0, cpu_tmp4;
71 static TCGv_ptr cpu_ptr0, cpu_ptr1;
72 static TCGv_i32 cpu_tmp2_i32, cpu_tmp3_i32;
73 static TCGv_i64 cpu_tmp1_i64;
74 static TCGv cpu_tmp5, cpu_tmp6;
76 #include "gen-icount.h"
78 #ifdef TARGET_X86_64
79 static int x86_64_hregs;
80 #endif
82 typedef struct DisasContext {
83 /* current insn context */
84 int override; /* -1 if no override */
85 int prefix;
86 int aflag, dflag;
87 target_ulong pc; /* pc = eip + cs_base */
88 int is_jmp; /* 1 = means jump (stop translation), 2 means CPU
89 static state change (stop translation) */
90 /* current block context */
91 target_ulong cs_base; /* base of CS segment */
92 int pe; /* protected mode */
93 int code32; /* 32 bit code segment */
94 #ifdef TARGET_X86_64
95 int lma; /* long mode active */
96 int code64; /* 64 bit code segment */
97 int rex_x, rex_b;
98 #endif
99 int ss32; /* 32 bit stack segment */
100 int cc_op; /* current CC operation */
101 int addseg; /* non zero if either DS/ES/SS have a non zero base */
102 int f_st; /* currently unused */
103 int vm86; /* vm86 mode */
104 int cpl;
105 int iopl;
106 int tf; /* TF cpu flag */
107 int singlestep_enabled; /* "hardware" single step enabled */
108 int jmp_opt; /* use direct block chaining for direct jumps */
109 int mem_index; /* select memory access functions */
110 uint64_t flags; /* all execution flags */
111 struct TranslationBlock *tb;
112 int popl_esp_hack; /* for correct popl with esp base handling */
113 int rip_offset; /* only used in x86_64, but left for simplicity */
114 int cpuid_features;
115 int cpuid_ext_features;
116 int cpuid_ext2_features;
117 int cpuid_ext3_features;
118 } DisasContext;
120 static void gen_eob(DisasContext *s);
121 static void gen_jmp(DisasContext *s, target_ulong eip);
122 static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num);
124 /* i386 arith/logic operations */
125 enum {
126 OP_ADDL,
127 OP_ORL,
128 OP_ADCL,
129 OP_SBBL,
130 OP_ANDL,
131 OP_SUBL,
132 OP_XORL,
133 OP_CMPL,
136 /* i386 shift ops */
137 enum {
138 OP_ROL,
139 OP_ROR,
140 OP_RCL,
141 OP_RCR,
142 OP_SHL,
143 OP_SHR,
144 OP_SHL1, /* undocumented */
145 OP_SAR = 7,
148 enum {
149 JCC_O,
150 JCC_B,
151 JCC_Z,
152 JCC_BE,
153 JCC_S,
154 JCC_P,
155 JCC_L,
156 JCC_LE,
159 /* operand size */
160 enum {
161 OT_BYTE = 0,
162 OT_WORD,
163 OT_LONG,
164 OT_QUAD,
167 enum {
168 /* I386 int registers */
169 OR_EAX, /* MUST be even numbered */
170 OR_ECX,
171 OR_EDX,
172 OR_EBX,
173 OR_ESP,
174 OR_EBP,
175 OR_ESI,
176 OR_EDI,
178 OR_TMP0 = 16, /* temporary operand register */
179 OR_TMP1,
180 OR_A0, /* temporary register used when doing address evaluation */
183 static inline void gen_op_movl_T0_0(void)
185 tcg_gen_movi_tl(cpu_T[0], 0);
188 static inline void gen_op_movl_T0_im(int32_t val)
190 tcg_gen_movi_tl(cpu_T[0], val);
193 static inline void gen_op_movl_T0_imu(uint32_t val)
195 tcg_gen_movi_tl(cpu_T[0], val);
198 static inline void gen_op_movl_T1_im(int32_t val)
200 tcg_gen_movi_tl(cpu_T[1], val);
203 static inline void gen_op_movl_T1_imu(uint32_t val)
205 tcg_gen_movi_tl(cpu_T[1], val);
208 static inline void gen_op_movl_A0_im(uint32_t val)
210 tcg_gen_movi_tl(cpu_A0, val);
213 #ifdef TARGET_X86_64
214 static inline void gen_op_movq_A0_im(int64_t val)
216 tcg_gen_movi_tl(cpu_A0, val);
218 #endif
220 static inline void gen_movtl_T0_im(target_ulong val)
222 tcg_gen_movi_tl(cpu_T[0], val);
225 static inline void gen_movtl_T1_im(target_ulong val)
227 tcg_gen_movi_tl(cpu_T[1], val);
230 static inline void gen_op_andl_T0_ffff(void)
232 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
235 static inline void gen_op_andl_T0_im(uint32_t val)
237 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], val);
240 static inline void gen_op_movl_T0_T1(void)
242 tcg_gen_mov_tl(cpu_T[0], cpu_T[1]);
245 static inline void gen_op_andl_A0_ffff(void)
247 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffff);
250 #ifdef TARGET_X86_64
252 #define NB_OP_SIZES 4
254 #else /* !TARGET_X86_64 */
256 #define NB_OP_SIZES 3
258 #endif /* !TARGET_X86_64 */
260 #if defined(WORDS_BIGENDIAN)
261 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
262 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
263 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
264 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
265 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
266 #else
267 #define REG_B_OFFSET 0
268 #define REG_H_OFFSET 1
269 #define REG_W_OFFSET 0
270 #define REG_L_OFFSET 0
271 #define REG_LH_OFFSET 4
272 #endif
274 static inline void gen_op_mov_reg_v(int ot, int reg, TCGv t0)
276 switch(ot) {
277 case OT_BYTE:
278 if (reg < 4 X86_64_DEF( || reg >= 8 || x86_64_hregs)) {
279 tcg_gen_st8_tl(t0, cpu_env, offsetof(CPUState, regs[reg]) + REG_B_OFFSET);
280 } else {
281 tcg_gen_st8_tl(t0, cpu_env, offsetof(CPUState, regs[reg - 4]) + REG_H_OFFSET);
283 break;
284 case OT_WORD:
285 tcg_gen_st16_tl(t0, cpu_env, offsetof(CPUState, regs[reg]) + REG_W_OFFSET);
286 break;
287 #ifdef TARGET_X86_64
288 case OT_LONG:
289 tcg_gen_st32_tl(t0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET);
290 /* high part of register set to zero */
291 tcg_gen_movi_tl(cpu_tmp0, 0);
292 tcg_gen_st32_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]) + REG_LH_OFFSET);
293 break;
294 default:
295 case OT_QUAD:
296 tcg_gen_st_tl(t0, cpu_env, offsetof(CPUState, regs[reg]));
297 break;
298 #else
299 default:
300 case OT_LONG:
301 tcg_gen_st32_tl(t0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET);
302 break;
303 #endif
307 static inline void gen_op_mov_reg_T0(int ot, int reg)
309 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
312 static inline void gen_op_mov_reg_T1(int ot, int reg)
314 gen_op_mov_reg_v(ot, reg, cpu_T[1]);
317 static inline void gen_op_mov_reg_A0(int size, int reg)
319 switch(size) {
320 case 0:
321 tcg_gen_st16_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]) + REG_W_OFFSET);
322 break;
323 #ifdef TARGET_X86_64
324 case 1:
325 tcg_gen_st32_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET);
326 /* high part of register set to zero */
327 tcg_gen_movi_tl(cpu_tmp0, 0);
328 tcg_gen_st32_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]) + REG_LH_OFFSET);
329 break;
330 default:
331 case 2:
332 tcg_gen_st_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]));
333 break;
334 #else
335 default:
336 case 1:
337 tcg_gen_st32_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET);
338 break;
339 #endif
343 static inline void gen_op_mov_v_reg(int ot, TCGv t0, int reg)
345 switch(ot) {
346 case OT_BYTE:
347 if (reg < 4 X86_64_DEF( || reg >= 8 || x86_64_hregs)) {
348 goto std_case;
349 } else {
350 tcg_gen_ld8u_tl(t0, cpu_env, offsetof(CPUState, regs[reg - 4]) + REG_H_OFFSET);
352 break;
353 default:
354 std_case:
355 tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, regs[reg]));
356 break;
360 static inline void gen_op_mov_TN_reg(int ot, int t_index, int reg)
362 gen_op_mov_v_reg(ot, cpu_T[t_index], reg);
365 static inline void gen_op_movl_A0_reg(int reg)
367 tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET);
370 static inline void gen_op_addl_A0_im(int32_t val)
372 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
373 #ifdef TARGET_X86_64
374 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
375 #endif
378 #ifdef TARGET_X86_64
379 static inline void gen_op_addq_A0_im(int64_t val)
381 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
383 #endif
385 static void gen_add_A0_im(DisasContext *s, int val)
387 #ifdef TARGET_X86_64
388 if (CODE64(s))
389 gen_op_addq_A0_im(val);
390 else
391 #endif
392 gen_op_addl_A0_im(val);
395 static inline void gen_op_addl_T0_T1(void)
397 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
400 static inline void gen_op_jmp_T0(void)
402 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUState, eip));
405 static inline void gen_op_add_reg_im(int size, int reg, int32_t val)
407 switch(size) {
408 case 0:
409 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
410 tcg_gen_addi_tl(cpu_tmp0, cpu_tmp0, val);
411 tcg_gen_st16_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]) + REG_W_OFFSET);
412 break;
413 case 1:
414 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
415 tcg_gen_addi_tl(cpu_tmp0, cpu_tmp0, val);
416 #ifdef TARGET_X86_64
417 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xffffffff);
418 #endif
419 tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
420 break;
421 #ifdef TARGET_X86_64
422 case 2:
423 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
424 tcg_gen_addi_tl(cpu_tmp0, cpu_tmp0, val);
425 tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
426 break;
427 #endif
431 static inline void gen_op_add_reg_T0(int size, int reg)
433 switch(size) {
434 case 0:
435 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
436 tcg_gen_add_tl(cpu_tmp0, cpu_tmp0, cpu_T[0]);
437 tcg_gen_st16_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]) + REG_W_OFFSET);
438 break;
439 case 1:
440 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
441 tcg_gen_add_tl(cpu_tmp0, cpu_tmp0, cpu_T[0]);
442 #ifdef TARGET_X86_64
443 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xffffffff);
444 #endif
445 tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
446 break;
447 #ifdef TARGET_X86_64
448 case 2:
449 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
450 tcg_gen_add_tl(cpu_tmp0, cpu_tmp0, cpu_T[0]);
451 tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
452 break;
453 #endif
457 static inline void gen_op_set_cc_op(int32_t val)
459 tcg_gen_movi_i32(cpu_cc_op, val);
462 static inline void gen_op_addl_A0_reg_sN(int shift, int reg)
464 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
465 if (shift != 0)
466 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
467 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
468 #ifdef TARGET_X86_64
469 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
470 #endif
473 static inline void gen_op_movl_A0_seg(int reg)
475 tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUState, segs[reg].base) + REG_L_OFFSET);
478 static inline void gen_op_addl_A0_seg(int reg)
480 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, segs[reg].base));
481 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
482 #ifdef TARGET_X86_64
483 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
484 #endif
487 #ifdef TARGET_X86_64
488 static inline void gen_op_movq_A0_seg(int reg)
490 tcg_gen_ld_tl(cpu_A0, cpu_env, offsetof(CPUState, segs[reg].base));
493 static inline void gen_op_addq_A0_seg(int reg)
495 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, segs[reg].base));
496 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
499 static inline void gen_op_movq_A0_reg(int reg)
501 tcg_gen_ld_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]));
504 static inline void gen_op_addq_A0_reg_sN(int shift, int reg)
506 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
507 if (shift != 0)
508 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
509 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
511 #endif
513 static inline void gen_op_lds_T0_A0(int idx)
515 int mem_index = (idx >> 2) - 1;
516 switch(idx & 3) {
517 case 0:
518 tcg_gen_qemu_ld8s(cpu_T[0], cpu_A0, mem_index);
519 break;
520 case 1:
521 tcg_gen_qemu_ld16s(cpu_T[0], cpu_A0, mem_index);
522 break;
523 default:
524 case 2:
525 tcg_gen_qemu_ld32s(cpu_T[0], cpu_A0, mem_index);
526 break;
530 static inline void gen_op_ld_v(int idx, TCGv t0, TCGv a0)
532 int mem_index = (idx >> 2) - 1;
533 switch(idx & 3) {
534 case 0:
535 tcg_gen_qemu_ld8u(t0, a0, mem_index);
536 break;
537 case 1:
538 tcg_gen_qemu_ld16u(t0, a0, mem_index);
539 break;
540 case 2:
541 tcg_gen_qemu_ld32u(t0, a0, mem_index);
542 break;
543 default:
544 case 3:
545 /* Should never happen on 32-bit targets. */
546 #ifdef TARGET_X86_64
547 tcg_gen_qemu_ld64(t0, a0, mem_index);
548 #endif
549 break;
553 /* XXX: always use ldu or lds */
554 static inline void gen_op_ld_T0_A0(int idx)
556 gen_op_ld_v(idx, cpu_T[0], cpu_A0);
559 static inline void gen_op_ldu_T0_A0(int idx)
561 gen_op_ld_v(idx, cpu_T[0], cpu_A0);
564 static inline void gen_op_ld_T1_A0(int idx)
566 gen_op_ld_v(idx, cpu_T[1], cpu_A0);
569 static inline void gen_op_st_v(int idx, TCGv t0, TCGv a0)
571 int mem_index = (idx >> 2) - 1;
572 switch(idx & 3) {
573 case 0:
574 tcg_gen_qemu_st8(t0, a0, mem_index);
575 break;
576 case 1:
577 tcg_gen_qemu_st16(t0, a0, mem_index);
578 break;
579 case 2:
580 tcg_gen_qemu_st32(t0, a0, mem_index);
581 break;
582 default:
583 case 3:
584 /* Should never happen on 32-bit targets. */
585 #ifdef TARGET_X86_64
586 tcg_gen_qemu_st64(t0, a0, mem_index);
587 #endif
588 break;
592 static inline void gen_op_st_T0_A0(int idx)
594 gen_op_st_v(idx, cpu_T[0], cpu_A0);
597 static inline void gen_op_st_T1_A0(int idx)
599 gen_op_st_v(idx, cpu_T[1], cpu_A0);
602 static inline void gen_jmp_im(target_ulong pc)
604 tcg_gen_movi_tl(cpu_tmp0, pc);
605 tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, eip));
608 static inline void gen_string_movl_A0_ESI(DisasContext *s)
610 int override;
612 override = s->override;
613 #ifdef TARGET_X86_64
614 if (s->aflag == 2) {
615 if (override >= 0) {
616 gen_op_movq_A0_seg(override);
617 gen_op_addq_A0_reg_sN(0, R_ESI);
618 } else {
619 gen_op_movq_A0_reg(R_ESI);
621 } else
622 #endif
623 if (s->aflag) {
624 /* 32 bit address */
625 if (s->addseg && override < 0)
626 override = R_DS;
627 if (override >= 0) {
628 gen_op_movl_A0_seg(override);
629 gen_op_addl_A0_reg_sN(0, R_ESI);
630 } else {
631 gen_op_movl_A0_reg(R_ESI);
633 } else {
634 /* 16 address, always override */
635 if (override < 0)
636 override = R_DS;
637 gen_op_movl_A0_reg(R_ESI);
638 gen_op_andl_A0_ffff();
639 gen_op_addl_A0_seg(override);
643 static inline void gen_string_movl_A0_EDI(DisasContext *s)
645 #ifdef TARGET_X86_64
646 if (s->aflag == 2) {
647 gen_op_movq_A0_reg(R_EDI);
648 } else
649 #endif
650 if (s->aflag) {
651 if (s->addseg) {
652 gen_op_movl_A0_seg(R_ES);
653 gen_op_addl_A0_reg_sN(0, R_EDI);
654 } else {
655 gen_op_movl_A0_reg(R_EDI);
657 } else {
658 gen_op_movl_A0_reg(R_EDI);
659 gen_op_andl_A0_ffff();
660 gen_op_addl_A0_seg(R_ES);
664 static inline void gen_op_movl_T0_Dshift(int ot)
666 tcg_gen_ld32s_tl(cpu_T[0], cpu_env, offsetof(CPUState, df));
667 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], ot);
670 static void gen_extu(int ot, TCGv reg)
672 switch(ot) {
673 case OT_BYTE:
674 tcg_gen_ext8u_tl(reg, reg);
675 break;
676 case OT_WORD:
677 tcg_gen_ext16u_tl(reg, reg);
678 break;
679 case OT_LONG:
680 tcg_gen_ext32u_tl(reg, reg);
681 break;
682 default:
683 break;
687 static void gen_exts(int ot, TCGv reg)
689 switch(ot) {
690 case OT_BYTE:
691 tcg_gen_ext8s_tl(reg, reg);
692 break;
693 case OT_WORD:
694 tcg_gen_ext16s_tl(reg, reg);
695 break;
696 case OT_LONG:
697 tcg_gen_ext32s_tl(reg, reg);
698 break;
699 default:
700 break;
704 static inline void gen_op_jnz_ecx(int size, int label1)
706 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[R_ECX]));
707 gen_extu(size + 1, cpu_tmp0);
708 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, label1);
711 static inline void gen_op_jz_ecx(int size, int label1)
713 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[R_ECX]));
714 gen_extu(size + 1, cpu_tmp0);
715 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
718 static void gen_helper_in_func(int ot, TCGv v, TCGv_i32 n)
720 switch (ot) {
721 case 0: gen_helper_inb(v, n); break;
722 case 1: gen_helper_inw(v, n); break;
723 case 2: gen_helper_inl(v, n); break;
728 static void gen_helper_out_func(int ot, TCGv_i32 v, TCGv_i32 n)
730 switch (ot) {
731 case 0: gen_helper_outb(v, n); break;
732 case 1: gen_helper_outw(v, n); break;
733 case 2: gen_helper_outl(v, n); break;
738 static void gen_check_io(DisasContext *s, int ot, target_ulong cur_eip,
739 uint32_t svm_flags)
741 int state_saved;
742 target_ulong next_eip;
744 state_saved = 0;
745 if (s->pe && (s->cpl > s->iopl || s->vm86)) {
746 if (s->cc_op != CC_OP_DYNAMIC)
747 gen_op_set_cc_op(s->cc_op);
748 gen_jmp_im(cur_eip);
749 state_saved = 1;
750 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
751 switch (ot) {
752 case 0: gen_helper_check_iob(cpu_tmp2_i32); break;
753 case 1: gen_helper_check_iow(cpu_tmp2_i32); break;
754 case 2: gen_helper_check_iol(cpu_tmp2_i32); break;
757 if(s->flags & HF_SVMI_MASK) {
758 if (!state_saved) {
759 if (s->cc_op != CC_OP_DYNAMIC)
760 gen_op_set_cc_op(s->cc_op);
761 gen_jmp_im(cur_eip);
762 state_saved = 1;
764 svm_flags |= (1 << (4 + ot));
765 next_eip = s->pc - s->cs_base;
766 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
767 gen_helper_svm_check_io(cpu_tmp2_i32, tcg_const_i32(svm_flags),
768 tcg_const_i32(next_eip - cur_eip));
772 static inline void gen_movs(DisasContext *s, int ot)
774 gen_string_movl_A0_ESI(s);
775 gen_op_ld_T0_A0(ot + s->mem_index);
776 gen_string_movl_A0_EDI(s);
777 gen_op_st_T0_A0(ot + s->mem_index);
778 gen_op_movl_T0_Dshift(ot);
779 gen_op_add_reg_T0(s->aflag, R_ESI);
780 gen_op_add_reg_T0(s->aflag, R_EDI);
783 static inline void gen_update_cc_op(DisasContext *s)
785 if (s->cc_op != CC_OP_DYNAMIC) {
786 gen_op_set_cc_op(s->cc_op);
787 s->cc_op = CC_OP_DYNAMIC;
791 static void gen_op_update1_cc(void)
793 tcg_gen_discard_tl(cpu_cc_src);
794 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
797 static void gen_op_update2_cc(void)
799 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
800 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
803 static inline void gen_op_cmpl_T0_T1_cc(void)
805 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
806 tcg_gen_sub_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
809 static inline void gen_op_testl_T0_T1_cc(void)
811 tcg_gen_discard_tl(cpu_cc_src);
812 tcg_gen_and_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
815 static void gen_op_update_neg_cc(void)
817 tcg_gen_neg_tl(cpu_cc_src, cpu_T[0]);
818 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
821 /* compute eflags.C to reg */
822 static void gen_compute_eflags_c(TCGv reg)
824 gen_helper_cc_compute_c(cpu_tmp2_i32, cpu_cc_op);
825 tcg_gen_extu_i32_tl(reg, cpu_tmp2_i32);
828 /* compute all eflags to cc_src */
829 static void gen_compute_eflags(TCGv reg)
831 gen_helper_cc_compute_all(cpu_tmp2_i32, cpu_cc_op);
832 tcg_gen_extu_i32_tl(reg, cpu_tmp2_i32);
835 static inline void gen_setcc_slow_T0(DisasContext *s, int jcc_op)
837 if (s->cc_op != CC_OP_DYNAMIC)
838 gen_op_set_cc_op(s->cc_op);
839 switch(jcc_op) {
840 case JCC_O:
841 gen_compute_eflags(cpu_T[0]);
842 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 11);
843 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
844 break;
845 case JCC_B:
846 gen_compute_eflags_c(cpu_T[0]);
847 break;
848 case JCC_Z:
849 gen_compute_eflags(cpu_T[0]);
850 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 6);
851 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
852 break;
853 case JCC_BE:
854 gen_compute_eflags(cpu_tmp0);
855 tcg_gen_shri_tl(cpu_T[0], cpu_tmp0, 6);
856 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
857 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
858 break;
859 case JCC_S:
860 gen_compute_eflags(cpu_T[0]);
861 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 7);
862 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
863 break;
864 case JCC_P:
865 gen_compute_eflags(cpu_T[0]);
866 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 2);
867 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
868 break;
869 case JCC_L:
870 gen_compute_eflags(cpu_tmp0);
871 tcg_gen_shri_tl(cpu_T[0], cpu_tmp0, 11); /* CC_O */
872 tcg_gen_shri_tl(cpu_tmp0, cpu_tmp0, 7); /* CC_S */
873 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
874 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
875 break;
876 default:
877 case JCC_LE:
878 gen_compute_eflags(cpu_tmp0);
879 tcg_gen_shri_tl(cpu_T[0], cpu_tmp0, 11); /* CC_O */
880 tcg_gen_shri_tl(cpu_tmp4, cpu_tmp0, 7); /* CC_S */
881 tcg_gen_shri_tl(cpu_tmp0, cpu_tmp0, 6); /* CC_Z */
882 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
883 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
884 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
885 break;
889 /* return true if setcc_slow is not needed (WARNING: must be kept in
890 sync with gen_jcc1) */
891 static int is_fast_jcc_case(DisasContext *s, int b)
893 int jcc_op;
894 jcc_op = (b >> 1) & 7;
895 switch(s->cc_op) {
896 /* we optimize the cmp/jcc case */
897 case CC_OP_SUBB:
898 case CC_OP_SUBW:
899 case CC_OP_SUBL:
900 case CC_OP_SUBQ:
901 if (jcc_op == JCC_O || jcc_op == JCC_P)
902 goto slow_jcc;
903 break;
905 /* some jumps are easy to compute */
906 case CC_OP_ADDB:
907 case CC_OP_ADDW:
908 case CC_OP_ADDL:
909 case CC_OP_ADDQ:
911 case CC_OP_LOGICB:
912 case CC_OP_LOGICW:
913 case CC_OP_LOGICL:
914 case CC_OP_LOGICQ:
916 case CC_OP_INCB:
917 case CC_OP_INCW:
918 case CC_OP_INCL:
919 case CC_OP_INCQ:
921 case CC_OP_DECB:
922 case CC_OP_DECW:
923 case CC_OP_DECL:
924 case CC_OP_DECQ:
926 case CC_OP_SHLB:
927 case CC_OP_SHLW:
928 case CC_OP_SHLL:
929 case CC_OP_SHLQ:
930 if (jcc_op != JCC_Z && jcc_op != JCC_S)
931 goto slow_jcc;
932 break;
933 default:
934 slow_jcc:
935 return 0;
937 return 1;
940 /* generate a conditional jump to label 'l1' according to jump opcode
941 value 'b'. In the fast case, T0 is guaranted not to be used. */
942 static inline void gen_jcc1(DisasContext *s, int cc_op, int b, int l1)
944 int inv, jcc_op, size, cond;
945 TCGv t0;
947 inv = b & 1;
948 jcc_op = (b >> 1) & 7;
950 switch(cc_op) {
951 /* we optimize the cmp/jcc case */
952 case CC_OP_SUBB:
953 case CC_OP_SUBW:
954 case CC_OP_SUBL:
955 case CC_OP_SUBQ:
957 size = cc_op - CC_OP_SUBB;
958 switch(jcc_op) {
959 case JCC_Z:
960 fast_jcc_z:
961 switch(size) {
962 case 0:
963 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0xff);
964 t0 = cpu_tmp0;
965 break;
966 case 1:
967 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0xffff);
968 t0 = cpu_tmp0;
969 break;
970 #ifdef TARGET_X86_64
971 case 2:
972 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0xffffffff);
973 t0 = cpu_tmp0;
974 break;
975 #endif
976 default:
977 t0 = cpu_cc_dst;
978 break;
980 tcg_gen_brcondi_tl(inv ? TCG_COND_NE : TCG_COND_EQ, t0, 0, l1);
981 break;
982 case JCC_S:
983 fast_jcc_s:
984 switch(size) {
985 case 0:
986 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0x80);
987 tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
988 0, l1);
989 break;
990 case 1:
991 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0x8000);
992 tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
993 0, l1);
994 break;
995 #ifdef TARGET_X86_64
996 case 2:
997 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0x80000000);
998 tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
999 0, l1);
1000 break;
1001 #endif
1002 default:
1003 tcg_gen_brcondi_tl(inv ? TCG_COND_GE : TCG_COND_LT, cpu_cc_dst,
1004 0, l1);
1005 break;
1007 break;
1009 case JCC_B:
1010 cond = inv ? TCG_COND_GEU : TCG_COND_LTU;
1011 goto fast_jcc_b;
1012 case JCC_BE:
1013 cond = inv ? TCG_COND_GTU : TCG_COND_LEU;
1014 fast_jcc_b:
1015 tcg_gen_add_tl(cpu_tmp4, cpu_cc_dst, cpu_cc_src);
1016 switch(size) {
1017 case 0:
1018 t0 = cpu_tmp0;
1019 tcg_gen_andi_tl(cpu_tmp4, cpu_tmp4, 0xff);
1020 tcg_gen_andi_tl(t0, cpu_cc_src, 0xff);
1021 break;
1022 case 1:
1023 t0 = cpu_tmp0;
1024 tcg_gen_andi_tl(cpu_tmp4, cpu_tmp4, 0xffff);
1025 tcg_gen_andi_tl(t0, cpu_cc_src, 0xffff);
1026 break;
1027 #ifdef TARGET_X86_64
1028 case 2:
1029 t0 = cpu_tmp0;
1030 tcg_gen_andi_tl(cpu_tmp4, cpu_tmp4, 0xffffffff);
1031 tcg_gen_andi_tl(t0, cpu_cc_src, 0xffffffff);
1032 break;
1033 #endif
1034 default:
1035 t0 = cpu_cc_src;
1036 break;
1038 tcg_gen_brcond_tl(cond, cpu_tmp4, t0, l1);
1039 break;
1041 case JCC_L:
1042 cond = inv ? TCG_COND_GE : TCG_COND_LT;
1043 goto fast_jcc_l;
1044 case JCC_LE:
1045 cond = inv ? TCG_COND_GT : TCG_COND_LE;
1046 fast_jcc_l:
1047 tcg_gen_add_tl(cpu_tmp4, cpu_cc_dst, cpu_cc_src);
1048 switch(size) {
1049 case 0:
1050 t0 = cpu_tmp0;
1051 tcg_gen_ext8s_tl(cpu_tmp4, cpu_tmp4);
1052 tcg_gen_ext8s_tl(t0, cpu_cc_src);
1053 break;
1054 case 1:
1055 t0 = cpu_tmp0;
1056 tcg_gen_ext16s_tl(cpu_tmp4, cpu_tmp4);
1057 tcg_gen_ext16s_tl(t0, cpu_cc_src);
1058 break;
1059 #ifdef TARGET_X86_64
1060 case 2:
1061 t0 = cpu_tmp0;
1062 tcg_gen_ext32s_tl(cpu_tmp4, cpu_tmp4);
1063 tcg_gen_ext32s_tl(t0, cpu_cc_src);
1064 break;
1065 #endif
1066 default:
1067 t0 = cpu_cc_src;
1068 break;
1070 tcg_gen_brcond_tl(cond, cpu_tmp4, t0, l1);
1071 break;
1073 default:
1074 goto slow_jcc;
1076 break;
1078 /* some jumps are easy to compute */
1079 case CC_OP_ADDB:
1080 case CC_OP_ADDW:
1081 case CC_OP_ADDL:
1082 case CC_OP_ADDQ:
1084 case CC_OP_ADCB:
1085 case CC_OP_ADCW:
1086 case CC_OP_ADCL:
1087 case CC_OP_ADCQ:
1089 case CC_OP_SBBB:
1090 case CC_OP_SBBW:
1091 case CC_OP_SBBL:
1092 case CC_OP_SBBQ:
1094 case CC_OP_LOGICB:
1095 case CC_OP_LOGICW:
1096 case CC_OP_LOGICL:
1097 case CC_OP_LOGICQ:
1099 case CC_OP_INCB:
1100 case CC_OP_INCW:
1101 case CC_OP_INCL:
1102 case CC_OP_INCQ:
1104 case CC_OP_DECB:
1105 case CC_OP_DECW:
1106 case CC_OP_DECL:
1107 case CC_OP_DECQ:
1109 case CC_OP_SHLB:
1110 case CC_OP_SHLW:
1111 case CC_OP_SHLL:
1112 case CC_OP_SHLQ:
1114 case CC_OP_SARB:
1115 case CC_OP_SARW:
1116 case CC_OP_SARL:
1117 case CC_OP_SARQ:
1118 switch(jcc_op) {
1119 case JCC_Z:
1120 size = (cc_op - CC_OP_ADDB) & 3;
1121 goto fast_jcc_z;
1122 case JCC_S:
1123 size = (cc_op - CC_OP_ADDB) & 3;
1124 goto fast_jcc_s;
1125 default:
1126 goto slow_jcc;
1128 break;
1129 default:
1130 slow_jcc:
1131 gen_setcc_slow_T0(s, jcc_op);
1132 tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE,
1133 cpu_T[0], 0, l1);
1134 break;
1138 /* XXX: does not work with gdbstub "ice" single step - not a
1139 serious problem */
1140 static int gen_jz_ecx_string(DisasContext *s, target_ulong next_eip)
1142 int l1, l2;
1144 l1 = gen_new_label();
1145 l2 = gen_new_label();
1146 gen_op_jnz_ecx(s->aflag, l1);
1147 gen_set_label(l2);
1148 gen_jmp_tb(s, next_eip, 1);
1149 gen_set_label(l1);
1150 return l2;
1153 static inline void gen_stos(DisasContext *s, int ot)
1155 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
1156 gen_string_movl_A0_EDI(s);
1157 gen_op_st_T0_A0(ot + s->mem_index);
1158 gen_op_movl_T0_Dshift(ot);
1159 gen_op_add_reg_T0(s->aflag, R_EDI);
1162 static inline void gen_lods(DisasContext *s, int ot)
1164 gen_string_movl_A0_ESI(s);
1165 gen_op_ld_T0_A0(ot + s->mem_index);
1166 gen_op_mov_reg_T0(ot, R_EAX);
1167 gen_op_movl_T0_Dshift(ot);
1168 gen_op_add_reg_T0(s->aflag, R_ESI);
1171 static inline void gen_scas(DisasContext *s, int ot)
1173 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
1174 gen_string_movl_A0_EDI(s);
1175 gen_op_ld_T1_A0(ot + s->mem_index);
1176 gen_op_cmpl_T0_T1_cc();
1177 gen_op_movl_T0_Dshift(ot);
1178 gen_op_add_reg_T0(s->aflag, R_EDI);
1181 static inline void gen_cmps(DisasContext *s, int ot)
1183 gen_string_movl_A0_ESI(s);
1184 gen_op_ld_T0_A0(ot + s->mem_index);
1185 gen_string_movl_A0_EDI(s);
1186 gen_op_ld_T1_A0(ot + s->mem_index);
1187 gen_op_cmpl_T0_T1_cc();
1188 gen_op_movl_T0_Dshift(ot);
1189 gen_op_add_reg_T0(s->aflag, R_ESI);
1190 gen_op_add_reg_T0(s->aflag, R_EDI);
1193 static inline void gen_ins(DisasContext *s, int ot)
1195 if (use_icount)
1196 gen_io_start();
1197 gen_string_movl_A0_EDI(s);
1198 /* Note: we must do this dummy write first to be restartable in
1199 case of page fault. */
1200 gen_op_movl_T0_0();
1201 gen_op_st_T0_A0(ot + s->mem_index);
1202 gen_op_mov_TN_reg(OT_WORD, 1, R_EDX);
1203 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[1]);
1204 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1205 gen_helper_in_func(ot, cpu_T[0], cpu_tmp2_i32);
1206 gen_op_st_T0_A0(ot + s->mem_index);
1207 gen_op_movl_T0_Dshift(ot);
1208 gen_op_add_reg_T0(s->aflag, R_EDI);
1209 if (use_icount)
1210 gen_io_end();
1213 static inline void gen_outs(DisasContext *s, int ot)
1215 if (use_icount)
1216 gen_io_start();
1217 gen_string_movl_A0_ESI(s);
1218 gen_op_ld_T0_A0(ot + s->mem_index);
1220 gen_op_mov_TN_reg(OT_WORD, 1, R_EDX);
1221 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[1]);
1222 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1223 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[0]);
1224 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
1226 gen_op_movl_T0_Dshift(ot);
1227 gen_op_add_reg_T0(s->aflag, R_ESI);
1228 if (use_icount)
1229 gen_io_end();
1232 /* same method as Valgrind : we generate jumps to current or next
1233 instruction */
1234 #define GEN_REPZ(op) \
1235 static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1236 target_ulong cur_eip, target_ulong next_eip) \
1238 int l2;\
1239 gen_update_cc_op(s); \
1240 l2 = gen_jz_ecx_string(s, next_eip); \
1241 gen_ ## op(s, ot); \
1242 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1243 /* a loop would cause two single step exceptions if ECX = 1 \
1244 before rep string_insn */ \
1245 if (!s->jmp_opt) \
1246 gen_op_jz_ecx(s->aflag, l2); \
1247 gen_jmp(s, cur_eip); \
1250 #define GEN_REPZ2(op) \
1251 static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1252 target_ulong cur_eip, \
1253 target_ulong next_eip, \
1254 int nz) \
1256 int l2;\
1257 gen_update_cc_op(s); \
1258 l2 = gen_jz_ecx_string(s, next_eip); \
1259 gen_ ## op(s, ot); \
1260 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1261 gen_op_set_cc_op(CC_OP_SUBB + ot); \
1262 gen_jcc1(s, CC_OP_SUBB + ot, (JCC_Z << 1) | (nz ^ 1), l2); \
1263 if (!s->jmp_opt) \
1264 gen_op_jz_ecx(s->aflag, l2); \
1265 gen_jmp(s, cur_eip); \
1268 GEN_REPZ(movs)
1269 GEN_REPZ(stos)
1270 GEN_REPZ(lods)
1271 GEN_REPZ(ins)
1272 GEN_REPZ(outs)
1273 GEN_REPZ2(scas)
1274 GEN_REPZ2(cmps)
1276 static void gen_helper_fp_arith_ST0_FT0(int op)
1278 switch (op) {
1279 case 0: gen_helper_fadd_ST0_FT0(); break;
1280 case 1: gen_helper_fmul_ST0_FT0(); break;
1281 case 2: gen_helper_fcom_ST0_FT0(); break;
1282 case 3: gen_helper_fcom_ST0_FT0(); break;
1283 case 4: gen_helper_fsub_ST0_FT0(); break;
1284 case 5: gen_helper_fsubr_ST0_FT0(); break;
1285 case 6: gen_helper_fdiv_ST0_FT0(); break;
1286 case 7: gen_helper_fdivr_ST0_FT0(); break;
1290 /* NOTE the exception in "r" op ordering */
1291 static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1293 TCGv_i32 tmp = tcg_const_i32(opreg);
1294 switch (op) {
1295 case 0: gen_helper_fadd_STN_ST0(tmp); break;
1296 case 1: gen_helper_fmul_STN_ST0(tmp); break;
1297 case 4: gen_helper_fsubr_STN_ST0(tmp); break;
1298 case 5: gen_helper_fsub_STN_ST0(tmp); break;
1299 case 6: gen_helper_fdivr_STN_ST0(tmp); break;
1300 case 7: gen_helper_fdiv_STN_ST0(tmp); break;
1304 /* if d == OR_TMP0, it means memory operand (address in A0) */
1305 static void gen_op(DisasContext *s1, int op, int ot, int d)
1307 if (d != OR_TMP0) {
1308 gen_op_mov_TN_reg(ot, 0, d);
1309 } else {
1310 gen_op_ld_T0_A0(ot + s1->mem_index);
1312 switch(op) {
1313 case OP_ADCL:
1314 if (s1->cc_op != CC_OP_DYNAMIC)
1315 gen_op_set_cc_op(s1->cc_op);
1316 gen_compute_eflags_c(cpu_tmp4);
1317 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1318 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1319 if (d != OR_TMP0)
1320 gen_op_mov_reg_T0(ot, d);
1321 else
1322 gen_op_st_T0_A0(ot + s1->mem_index);
1323 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
1324 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1325 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp4);
1326 tcg_gen_shli_i32(cpu_tmp2_i32, cpu_tmp2_i32, 2);
1327 tcg_gen_addi_i32(cpu_cc_op, cpu_tmp2_i32, CC_OP_ADDB + ot);
1328 s1->cc_op = CC_OP_DYNAMIC;
1329 break;
1330 case OP_SBBL:
1331 if (s1->cc_op != CC_OP_DYNAMIC)
1332 gen_op_set_cc_op(s1->cc_op);
1333 gen_compute_eflags_c(cpu_tmp4);
1334 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1335 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1336 if (d != OR_TMP0)
1337 gen_op_mov_reg_T0(ot, d);
1338 else
1339 gen_op_st_T0_A0(ot + s1->mem_index);
1340 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
1341 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1342 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp4);
1343 tcg_gen_shli_i32(cpu_tmp2_i32, cpu_tmp2_i32, 2);
1344 tcg_gen_addi_i32(cpu_cc_op, cpu_tmp2_i32, CC_OP_SUBB + ot);
1345 s1->cc_op = CC_OP_DYNAMIC;
1346 break;
1347 case OP_ADDL:
1348 gen_op_addl_T0_T1();
1349 if (d != OR_TMP0)
1350 gen_op_mov_reg_T0(ot, d);
1351 else
1352 gen_op_st_T0_A0(ot + s1->mem_index);
1353 gen_op_update2_cc();
1354 s1->cc_op = CC_OP_ADDB + ot;
1355 break;
1356 case OP_SUBL:
1357 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1358 if (d != OR_TMP0)
1359 gen_op_mov_reg_T0(ot, d);
1360 else
1361 gen_op_st_T0_A0(ot + s1->mem_index);
1362 gen_op_update2_cc();
1363 s1->cc_op = CC_OP_SUBB + ot;
1364 break;
1365 default:
1366 case OP_ANDL:
1367 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1368 if (d != OR_TMP0)
1369 gen_op_mov_reg_T0(ot, d);
1370 else
1371 gen_op_st_T0_A0(ot + s1->mem_index);
1372 gen_op_update1_cc();
1373 s1->cc_op = CC_OP_LOGICB + ot;
1374 break;
1375 case OP_ORL:
1376 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1377 if (d != OR_TMP0)
1378 gen_op_mov_reg_T0(ot, d);
1379 else
1380 gen_op_st_T0_A0(ot + s1->mem_index);
1381 gen_op_update1_cc();
1382 s1->cc_op = CC_OP_LOGICB + ot;
1383 break;
1384 case OP_XORL:
1385 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1386 if (d != OR_TMP0)
1387 gen_op_mov_reg_T0(ot, d);
1388 else
1389 gen_op_st_T0_A0(ot + s1->mem_index);
1390 gen_op_update1_cc();
1391 s1->cc_op = CC_OP_LOGICB + ot;
1392 break;
1393 case OP_CMPL:
1394 gen_op_cmpl_T0_T1_cc();
1395 s1->cc_op = CC_OP_SUBB + ot;
1396 break;
1400 /* if d == OR_TMP0, it means memory operand (address in A0) */
1401 static void gen_inc(DisasContext *s1, int ot, int d, int c)
1403 if (d != OR_TMP0)
1404 gen_op_mov_TN_reg(ot, 0, d);
1405 else
1406 gen_op_ld_T0_A0(ot + s1->mem_index);
1407 if (s1->cc_op != CC_OP_DYNAMIC)
1408 gen_op_set_cc_op(s1->cc_op);
1409 if (c > 0) {
1410 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], 1);
1411 s1->cc_op = CC_OP_INCB + ot;
1412 } else {
1413 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], -1);
1414 s1->cc_op = CC_OP_DECB + ot;
1416 if (d != OR_TMP0)
1417 gen_op_mov_reg_T0(ot, d);
1418 else
1419 gen_op_st_T0_A0(ot + s1->mem_index);
1420 gen_compute_eflags_c(cpu_cc_src);
1421 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1424 static void gen_shift_rm_T1(DisasContext *s, int ot, int op1,
1425 int is_right, int is_arith)
1427 target_ulong mask;
1428 int shift_label;
1429 TCGv t0, t1;
1431 if (ot == OT_QUAD)
1432 mask = 0x3f;
1433 else
1434 mask = 0x1f;
1436 /* load */
1437 if (op1 == OR_TMP0)
1438 gen_op_ld_T0_A0(ot + s->mem_index);
1439 else
1440 gen_op_mov_TN_reg(ot, 0, op1);
1442 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], mask);
1444 tcg_gen_addi_tl(cpu_tmp5, cpu_T[1], -1);
1446 if (is_right) {
1447 if (is_arith) {
1448 gen_exts(ot, cpu_T[0]);
1449 tcg_gen_sar_tl(cpu_T3, cpu_T[0], cpu_tmp5);
1450 tcg_gen_sar_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1451 } else {
1452 gen_extu(ot, cpu_T[0]);
1453 tcg_gen_shr_tl(cpu_T3, cpu_T[0], cpu_tmp5);
1454 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1456 } else {
1457 tcg_gen_shl_tl(cpu_T3, cpu_T[0], cpu_tmp5);
1458 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1461 /* store */
1462 if (op1 == OR_TMP0)
1463 gen_op_st_T0_A0(ot + s->mem_index);
1464 else
1465 gen_op_mov_reg_T0(ot, op1);
1467 /* update eflags if non zero shift */
1468 if (s->cc_op != CC_OP_DYNAMIC)
1469 gen_op_set_cc_op(s->cc_op);
1471 /* XXX: inefficient */
1472 t0 = tcg_temp_local_new();
1473 t1 = tcg_temp_local_new();
1475 tcg_gen_mov_tl(t0, cpu_T[0]);
1476 tcg_gen_mov_tl(t1, cpu_T3);
1478 shift_label = gen_new_label();
1479 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_T[1], 0, shift_label);
1481 tcg_gen_mov_tl(cpu_cc_src, t1);
1482 tcg_gen_mov_tl(cpu_cc_dst, t0);
1483 if (is_right)
1484 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SARB + ot);
1485 else
1486 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SHLB + ot);
1488 gen_set_label(shift_label);
1489 s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
1491 tcg_temp_free(t0);
1492 tcg_temp_free(t1);
1495 static void gen_shift_rm_im(DisasContext *s, int ot, int op1, int op2,
1496 int is_right, int is_arith)
1498 int mask;
1500 if (ot == OT_QUAD)
1501 mask = 0x3f;
1502 else
1503 mask = 0x1f;
1505 /* load */
1506 if (op1 == OR_TMP0)
1507 gen_op_ld_T0_A0(ot + s->mem_index);
1508 else
1509 gen_op_mov_TN_reg(ot, 0, op1);
1511 op2 &= mask;
1512 if (op2 != 0) {
1513 if (is_right) {
1514 if (is_arith) {
1515 gen_exts(ot, cpu_T[0]);
1516 tcg_gen_sari_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1517 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], op2);
1518 } else {
1519 gen_extu(ot, cpu_T[0]);
1520 tcg_gen_shri_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1521 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], op2);
1523 } else {
1524 tcg_gen_shli_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1525 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], op2);
1529 /* store */
1530 if (op1 == OR_TMP0)
1531 gen_op_st_T0_A0(ot + s->mem_index);
1532 else
1533 gen_op_mov_reg_T0(ot, op1);
1535 /* update eflags if non zero shift */
1536 if (op2 != 0) {
1537 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
1538 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1539 if (is_right)
1540 s->cc_op = CC_OP_SARB + ot;
1541 else
1542 s->cc_op = CC_OP_SHLB + ot;
1546 static inline void tcg_gen_lshift(TCGv ret, TCGv arg1, target_long arg2)
1548 if (arg2 >= 0)
1549 tcg_gen_shli_tl(ret, arg1, arg2);
1550 else
1551 tcg_gen_shri_tl(ret, arg1, -arg2);
1554 static void gen_rot_rm_T1(DisasContext *s, int ot, int op1,
1555 int is_right)
1557 target_ulong mask;
1558 int label1, label2, data_bits;
1559 TCGv t0, t1, t2, a0;
1561 /* XXX: inefficient, but we must use local temps */
1562 t0 = tcg_temp_local_new();
1563 t1 = tcg_temp_local_new();
1564 t2 = tcg_temp_local_new();
1565 a0 = tcg_temp_local_new();
1567 if (ot == OT_QUAD)
1568 mask = 0x3f;
1569 else
1570 mask = 0x1f;
1572 /* load */
1573 if (op1 == OR_TMP0) {
1574 tcg_gen_mov_tl(a0, cpu_A0);
1575 gen_op_ld_v(ot + s->mem_index, t0, a0);
1576 } else {
1577 gen_op_mov_v_reg(ot, t0, op1);
1580 tcg_gen_mov_tl(t1, cpu_T[1]);
1582 tcg_gen_andi_tl(t1, t1, mask);
1584 /* Must test zero case to avoid using undefined behaviour in TCG
1585 shifts. */
1586 label1 = gen_new_label();
1587 tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, label1);
1589 if (ot <= OT_WORD)
1590 tcg_gen_andi_tl(cpu_tmp0, t1, (1 << (3 + ot)) - 1);
1591 else
1592 tcg_gen_mov_tl(cpu_tmp0, t1);
1594 gen_extu(ot, t0);
1595 tcg_gen_mov_tl(t2, t0);
1597 data_bits = 8 << ot;
1598 /* XXX: rely on behaviour of shifts when operand 2 overflows (XXX:
1599 fix TCG definition) */
1600 if (is_right) {
1601 tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp0);
1602 tcg_gen_sub_tl(cpu_tmp0, tcg_const_tl(data_bits), cpu_tmp0);
1603 tcg_gen_shl_tl(t0, t0, cpu_tmp0);
1604 } else {
1605 tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp0);
1606 tcg_gen_sub_tl(cpu_tmp0, tcg_const_tl(data_bits), cpu_tmp0);
1607 tcg_gen_shr_tl(t0, t0, cpu_tmp0);
1609 tcg_gen_or_tl(t0, t0, cpu_tmp4);
1611 gen_set_label(label1);
1612 /* store */
1613 if (op1 == OR_TMP0) {
1614 gen_op_st_v(ot + s->mem_index, t0, a0);
1615 } else {
1616 gen_op_mov_reg_v(ot, op1, t0);
1619 /* update eflags */
1620 if (s->cc_op != CC_OP_DYNAMIC)
1621 gen_op_set_cc_op(s->cc_op);
1623 label2 = gen_new_label();
1624 tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, label2);
1626 gen_compute_eflags(cpu_cc_src);
1627 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~(CC_O | CC_C));
1628 tcg_gen_xor_tl(cpu_tmp0, t2, t0);
1629 tcg_gen_lshift(cpu_tmp0, cpu_tmp0, 11 - (data_bits - 1));
1630 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, CC_O);
1631 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_tmp0);
1632 if (is_right) {
1633 tcg_gen_shri_tl(t0, t0, data_bits - 1);
1635 tcg_gen_andi_tl(t0, t0, CC_C);
1636 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
1638 tcg_gen_discard_tl(cpu_cc_dst);
1639 tcg_gen_movi_i32(cpu_cc_op, CC_OP_EFLAGS);
1641 gen_set_label(label2);
1642 s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
1644 tcg_temp_free(t0);
1645 tcg_temp_free(t1);
1646 tcg_temp_free(t2);
1647 tcg_temp_free(a0);
1650 static void gen_rot_rm_im(DisasContext *s, int ot, int op1, int op2,
1651 int is_right)
1653 int mask;
1654 int data_bits;
1655 TCGv t0, t1, a0;
1657 /* XXX: inefficient, but we must use local temps */
1658 t0 = tcg_temp_local_new();
1659 t1 = tcg_temp_local_new();
1660 a0 = tcg_temp_local_new();
1662 if (ot == OT_QUAD)
1663 mask = 0x3f;
1664 else
1665 mask = 0x1f;
1667 /* load */
1668 if (op1 == OR_TMP0) {
1669 tcg_gen_mov_tl(a0, cpu_A0);
1670 gen_op_ld_v(ot + s->mem_index, t0, a0);
1671 } else {
1672 gen_op_mov_v_reg(ot, t0, op1);
1675 gen_extu(ot, t0);
1676 tcg_gen_mov_tl(t1, t0);
1678 op2 &= mask;
1679 data_bits = 8 << ot;
1680 if (op2 != 0) {
1681 int shift = op2 & ((1 << (3 + ot)) - 1);
1682 if (is_right) {
1683 tcg_gen_shri_tl(cpu_tmp4, t0, shift);
1684 tcg_gen_shli_tl(t0, t0, data_bits - shift);
1686 else {
1687 tcg_gen_shli_tl(cpu_tmp4, t0, shift);
1688 tcg_gen_shri_tl(t0, t0, data_bits - shift);
1690 tcg_gen_or_tl(t0, t0, cpu_tmp4);
1693 /* store */
1694 if (op1 == OR_TMP0) {
1695 gen_op_st_v(ot + s->mem_index, t0, a0);
1696 } else {
1697 gen_op_mov_reg_v(ot, op1, t0);
1700 if (op2 != 0) {
1701 /* update eflags */
1702 if (s->cc_op != CC_OP_DYNAMIC)
1703 gen_op_set_cc_op(s->cc_op);
1705 gen_compute_eflags(cpu_cc_src);
1706 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~(CC_O | CC_C));
1707 tcg_gen_xor_tl(cpu_tmp0, t1, t0);
1708 tcg_gen_lshift(cpu_tmp0, cpu_tmp0, 11 - (data_bits - 1));
1709 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, CC_O);
1710 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_tmp0);
1711 if (is_right) {
1712 tcg_gen_shri_tl(t0, t0, data_bits - 1);
1714 tcg_gen_andi_tl(t0, t0, CC_C);
1715 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
1717 tcg_gen_discard_tl(cpu_cc_dst);
1718 tcg_gen_movi_i32(cpu_cc_op, CC_OP_EFLAGS);
1719 s->cc_op = CC_OP_EFLAGS;
1722 tcg_temp_free(t0);
1723 tcg_temp_free(t1);
1724 tcg_temp_free(a0);
1727 /* XXX: add faster immediate = 1 case */
1728 static void gen_rotc_rm_T1(DisasContext *s, int ot, int op1,
1729 int is_right)
1731 int label1;
1733 if (s->cc_op != CC_OP_DYNAMIC)
1734 gen_op_set_cc_op(s->cc_op);
1736 /* load */
1737 if (op1 == OR_TMP0)
1738 gen_op_ld_T0_A0(ot + s->mem_index);
1739 else
1740 gen_op_mov_TN_reg(ot, 0, op1);
1742 if (is_right) {
1743 switch (ot) {
1744 case 0: gen_helper_rcrb(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1745 case 1: gen_helper_rcrw(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1746 case 2: gen_helper_rcrl(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1747 #ifdef TARGET_X86_64
1748 case 3: gen_helper_rcrq(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1749 #endif
1751 } else {
1752 switch (ot) {
1753 case 0: gen_helper_rclb(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1754 case 1: gen_helper_rclw(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1755 case 2: gen_helper_rcll(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1756 #ifdef TARGET_X86_64
1757 case 3: gen_helper_rclq(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1758 #endif
1761 /* store */
1762 if (op1 == OR_TMP0)
1763 gen_op_st_T0_A0(ot + s->mem_index);
1764 else
1765 gen_op_mov_reg_T0(ot, op1);
1767 /* update eflags */
1768 label1 = gen_new_label();
1769 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_cc_tmp, -1, label1);
1771 tcg_gen_mov_tl(cpu_cc_src, cpu_cc_tmp);
1772 tcg_gen_discard_tl(cpu_cc_dst);
1773 tcg_gen_movi_i32(cpu_cc_op, CC_OP_EFLAGS);
1775 gen_set_label(label1);
1776 s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
1779 /* XXX: add faster immediate case */
1780 static void gen_shiftd_rm_T1_T3(DisasContext *s, int ot, int op1,
1781 int is_right)
1783 int label1, label2, data_bits;
1784 target_ulong mask;
1785 TCGv t0, t1, t2, a0;
1787 t0 = tcg_temp_local_new();
1788 t1 = tcg_temp_local_new();
1789 t2 = tcg_temp_local_new();
1790 a0 = tcg_temp_local_new();
1792 if (ot == OT_QUAD)
1793 mask = 0x3f;
1794 else
1795 mask = 0x1f;
1797 /* load */
1798 if (op1 == OR_TMP0) {
1799 tcg_gen_mov_tl(a0, cpu_A0);
1800 gen_op_ld_v(ot + s->mem_index, t0, a0);
1801 } else {
1802 gen_op_mov_v_reg(ot, t0, op1);
1805 tcg_gen_andi_tl(cpu_T3, cpu_T3, mask);
1807 tcg_gen_mov_tl(t1, cpu_T[1]);
1808 tcg_gen_mov_tl(t2, cpu_T3);
1810 /* Must test zero case to avoid using undefined behaviour in TCG
1811 shifts. */
1812 label1 = gen_new_label();
1813 tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label1);
1815 tcg_gen_addi_tl(cpu_tmp5, t2, -1);
1816 if (ot == OT_WORD) {
1817 /* Note: we implement the Intel behaviour for shift count > 16 */
1818 if (is_right) {
1819 tcg_gen_andi_tl(t0, t0, 0xffff);
1820 tcg_gen_shli_tl(cpu_tmp0, t1, 16);
1821 tcg_gen_or_tl(t0, t0, cpu_tmp0);
1822 tcg_gen_ext32u_tl(t0, t0);
1824 tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp5);
1826 /* only needed if count > 16, but a test would complicate */
1827 tcg_gen_sub_tl(cpu_tmp5, tcg_const_tl(32), t2);
1828 tcg_gen_shl_tl(cpu_tmp0, t0, cpu_tmp5);
1830 tcg_gen_shr_tl(t0, t0, t2);
1832 tcg_gen_or_tl(t0, t0, cpu_tmp0);
1833 } else {
1834 /* XXX: not optimal */
1835 tcg_gen_andi_tl(t0, t0, 0xffff);
1836 tcg_gen_shli_tl(t1, t1, 16);
1837 tcg_gen_or_tl(t1, t1, t0);
1838 tcg_gen_ext32u_tl(t1, t1);
1840 tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp5);
1841 tcg_gen_sub_tl(cpu_tmp0, tcg_const_tl(32), cpu_tmp5);
1842 tcg_gen_shr_tl(cpu_tmp6, t1, cpu_tmp0);
1843 tcg_gen_or_tl(cpu_tmp4, cpu_tmp4, cpu_tmp6);
1845 tcg_gen_shl_tl(t0, t0, t2);
1846 tcg_gen_sub_tl(cpu_tmp5, tcg_const_tl(32), t2);
1847 tcg_gen_shr_tl(t1, t1, cpu_tmp5);
1848 tcg_gen_or_tl(t0, t0, t1);
1850 } else {
1851 data_bits = 8 << ot;
1852 if (is_right) {
1853 if (ot == OT_LONG)
1854 tcg_gen_ext32u_tl(t0, t0);
1856 tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp5);
1858 tcg_gen_shr_tl(t0, t0, t2);
1859 tcg_gen_sub_tl(cpu_tmp5, tcg_const_tl(data_bits), t2);
1860 tcg_gen_shl_tl(t1, t1, cpu_tmp5);
1861 tcg_gen_or_tl(t0, t0, t1);
1863 } else {
1864 if (ot == OT_LONG)
1865 tcg_gen_ext32u_tl(t1, t1);
1867 tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp5);
1869 tcg_gen_shl_tl(t0, t0, t2);
1870 tcg_gen_sub_tl(cpu_tmp5, tcg_const_tl(data_bits), t2);
1871 tcg_gen_shr_tl(t1, t1, cpu_tmp5);
1872 tcg_gen_or_tl(t0, t0, t1);
1875 tcg_gen_mov_tl(t1, cpu_tmp4);
1877 gen_set_label(label1);
1878 /* store */
1879 if (op1 == OR_TMP0) {
1880 gen_op_st_v(ot + s->mem_index, t0, a0);
1881 } else {
1882 gen_op_mov_reg_v(ot, op1, t0);
1885 /* update eflags */
1886 if (s->cc_op != CC_OP_DYNAMIC)
1887 gen_op_set_cc_op(s->cc_op);
1889 label2 = gen_new_label();
1890 tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label2);
1892 tcg_gen_mov_tl(cpu_cc_src, t1);
1893 tcg_gen_mov_tl(cpu_cc_dst, t0);
1894 if (is_right) {
1895 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SARB + ot);
1896 } else {
1897 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SHLB + ot);
1899 gen_set_label(label2);
1900 s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
1902 tcg_temp_free(t0);
1903 tcg_temp_free(t1);
1904 tcg_temp_free(t2);
1905 tcg_temp_free(a0);
1908 static void gen_shift(DisasContext *s1, int op, int ot, int d, int s)
1910 if (s != OR_TMP1)
1911 gen_op_mov_TN_reg(ot, 1, s);
1912 switch(op) {
1913 case OP_ROL:
1914 gen_rot_rm_T1(s1, ot, d, 0);
1915 break;
1916 case OP_ROR:
1917 gen_rot_rm_T1(s1, ot, d, 1);
1918 break;
1919 case OP_SHL:
1920 case OP_SHL1:
1921 gen_shift_rm_T1(s1, ot, d, 0, 0);
1922 break;
1923 case OP_SHR:
1924 gen_shift_rm_T1(s1, ot, d, 1, 0);
1925 break;
1926 case OP_SAR:
1927 gen_shift_rm_T1(s1, ot, d, 1, 1);
1928 break;
1929 case OP_RCL:
1930 gen_rotc_rm_T1(s1, ot, d, 0);
1931 break;
1932 case OP_RCR:
1933 gen_rotc_rm_T1(s1, ot, d, 1);
1934 break;
1938 static void gen_shifti(DisasContext *s1, int op, int ot, int d, int c)
1940 switch(op) {
1941 case OP_ROL:
1942 gen_rot_rm_im(s1, ot, d, c, 0);
1943 break;
1944 case OP_ROR:
1945 gen_rot_rm_im(s1, ot, d, c, 1);
1946 break;
1947 case OP_SHL:
1948 case OP_SHL1:
1949 gen_shift_rm_im(s1, ot, d, c, 0, 0);
1950 break;
1951 case OP_SHR:
1952 gen_shift_rm_im(s1, ot, d, c, 1, 0);
1953 break;
1954 case OP_SAR:
1955 gen_shift_rm_im(s1, ot, d, c, 1, 1);
1956 break;
1957 default:
1958 /* currently not optimized */
1959 gen_op_movl_T1_im(c);
1960 gen_shift(s1, op, ot, d, OR_TMP1);
1961 break;
1965 static void gen_lea_modrm(DisasContext *s, int modrm, int *reg_ptr, int *offset_ptr)
1967 target_long disp;
1968 int havesib;
1969 int base;
1970 int index;
1971 int scale;
1972 int opreg;
1973 int mod, rm, code, override, must_add_seg;
1975 override = s->override;
1976 must_add_seg = s->addseg;
1977 if (override >= 0)
1978 must_add_seg = 1;
1979 mod = (modrm >> 6) & 3;
1980 rm = modrm & 7;
1982 if (s->aflag) {
1984 havesib = 0;
1985 base = rm;
1986 index = 0;
1987 scale = 0;
1989 if (base == 4) {
1990 havesib = 1;
1991 code = ldub_code(s->pc++);
1992 scale = (code >> 6) & 3;
1993 index = ((code >> 3) & 7) | REX_X(s);
1994 base = (code & 7);
1996 base |= REX_B(s);
1998 switch (mod) {
1999 case 0:
2000 if ((base & 7) == 5) {
2001 base = -1;
2002 disp = (int32_t)ldl_code(s->pc);
2003 s->pc += 4;
2004 if (CODE64(s) && !havesib) {
2005 disp += s->pc + s->rip_offset;
2007 } else {
2008 disp = 0;
2010 break;
2011 case 1:
2012 disp = (int8_t)ldub_code(s->pc++);
2013 break;
2014 default:
2015 case 2:
2016 disp = ldl_code(s->pc);
2017 s->pc += 4;
2018 break;
2021 if (base >= 0) {
2022 /* for correct popl handling with esp */
2023 if (base == 4 && s->popl_esp_hack)
2024 disp += s->popl_esp_hack;
2025 #ifdef TARGET_X86_64
2026 if (s->aflag == 2) {
2027 gen_op_movq_A0_reg(base);
2028 if (disp != 0) {
2029 gen_op_addq_A0_im(disp);
2031 } else
2032 #endif
2034 gen_op_movl_A0_reg(base);
2035 if (disp != 0)
2036 gen_op_addl_A0_im(disp);
2038 } else {
2039 #ifdef TARGET_X86_64
2040 if (s->aflag == 2) {
2041 gen_op_movq_A0_im(disp);
2042 } else
2043 #endif
2045 gen_op_movl_A0_im(disp);
2048 /* XXX: index == 4 is always invalid */
2049 if (havesib && (index != 4 || scale != 0)) {
2050 #ifdef TARGET_X86_64
2051 if (s->aflag == 2) {
2052 gen_op_addq_A0_reg_sN(scale, index);
2053 } else
2054 #endif
2056 gen_op_addl_A0_reg_sN(scale, index);
2059 if (must_add_seg) {
2060 if (override < 0) {
2061 if (base == R_EBP || base == R_ESP)
2062 override = R_SS;
2063 else
2064 override = R_DS;
2066 #ifdef TARGET_X86_64
2067 if (s->aflag == 2) {
2068 gen_op_addq_A0_seg(override);
2069 } else
2070 #endif
2072 gen_op_addl_A0_seg(override);
2075 } else {
2076 switch (mod) {
2077 case 0:
2078 if (rm == 6) {
2079 disp = lduw_code(s->pc);
2080 s->pc += 2;
2081 gen_op_movl_A0_im(disp);
2082 rm = 0; /* avoid SS override */
2083 goto no_rm;
2084 } else {
2085 disp = 0;
2087 break;
2088 case 1:
2089 disp = (int8_t)ldub_code(s->pc++);
2090 break;
2091 default:
2092 case 2:
2093 disp = lduw_code(s->pc);
2094 s->pc += 2;
2095 break;
2097 switch(rm) {
2098 case 0:
2099 gen_op_movl_A0_reg(R_EBX);
2100 gen_op_addl_A0_reg_sN(0, R_ESI);
2101 break;
2102 case 1:
2103 gen_op_movl_A0_reg(R_EBX);
2104 gen_op_addl_A0_reg_sN(0, R_EDI);
2105 break;
2106 case 2:
2107 gen_op_movl_A0_reg(R_EBP);
2108 gen_op_addl_A0_reg_sN(0, R_ESI);
2109 break;
2110 case 3:
2111 gen_op_movl_A0_reg(R_EBP);
2112 gen_op_addl_A0_reg_sN(0, R_EDI);
2113 break;
2114 case 4:
2115 gen_op_movl_A0_reg(R_ESI);
2116 break;
2117 case 5:
2118 gen_op_movl_A0_reg(R_EDI);
2119 break;
2120 case 6:
2121 gen_op_movl_A0_reg(R_EBP);
2122 break;
2123 default:
2124 case 7:
2125 gen_op_movl_A0_reg(R_EBX);
2126 break;
2128 if (disp != 0)
2129 gen_op_addl_A0_im(disp);
2130 gen_op_andl_A0_ffff();
2131 no_rm:
2132 if (must_add_seg) {
2133 if (override < 0) {
2134 if (rm == 2 || rm == 3 || rm == 6)
2135 override = R_SS;
2136 else
2137 override = R_DS;
2139 gen_op_addl_A0_seg(override);
2143 opreg = OR_A0;
2144 disp = 0;
2145 *reg_ptr = opreg;
2146 *offset_ptr = disp;
2149 static void gen_nop_modrm(DisasContext *s, int modrm)
2151 int mod, rm, base, code;
2153 mod = (modrm >> 6) & 3;
2154 if (mod == 3)
2155 return;
2156 rm = modrm & 7;
2158 if (s->aflag) {
2160 base = rm;
2162 if (base == 4) {
2163 code = ldub_code(s->pc++);
2164 base = (code & 7);
2167 switch (mod) {
2168 case 0:
2169 if (base == 5) {
2170 s->pc += 4;
2172 break;
2173 case 1:
2174 s->pc++;
2175 break;
2176 default:
2177 case 2:
2178 s->pc += 4;
2179 break;
2181 } else {
2182 switch (mod) {
2183 case 0:
2184 if (rm == 6) {
2185 s->pc += 2;
2187 break;
2188 case 1:
2189 s->pc++;
2190 break;
2191 default:
2192 case 2:
2193 s->pc += 2;
2194 break;
2199 /* used for LEA and MOV AX, mem */
2200 static void gen_add_A0_ds_seg(DisasContext *s)
2202 int override, must_add_seg;
2203 must_add_seg = s->addseg;
2204 override = R_DS;
2205 if (s->override >= 0) {
2206 override = s->override;
2207 must_add_seg = 1;
2208 } else {
2209 override = R_DS;
2211 if (must_add_seg) {
2212 #ifdef TARGET_X86_64
2213 if (CODE64(s)) {
2214 gen_op_addq_A0_seg(override);
2215 } else
2216 #endif
2218 gen_op_addl_A0_seg(override);
2223 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2224 OR_TMP0 */
2225 static void gen_ldst_modrm(DisasContext *s, int modrm, int ot, int reg, int is_store)
2227 int mod, rm, opreg, disp;
2229 mod = (modrm >> 6) & 3;
2230 rm = (modrm & 7) | REX_B(s);
2231 if (mod == 3) {
2232 if (is_store) {
2233 if (reg != OR_TMP0)
2234 gen_op_mov_TN_reg(ot, 0, reg);
2235 gen_op_mov_reg_T0(ot, rm);
2236 } else {
2237 gen_op_mov_TN_reg(ot, 0, rm);
2238 if (reg != OR_TMP0)
2239 gen_op_mov_reg_T0(ot, reg);
2241 } else {
2242 gen_lea_modrm(s, modrm, &opreg, &disp);
2243 if (is_store) {
2244 if (reg != OR_TMP0)
2245 gen_op_mov_TN_reg(ot, 0, reg);
2246 gen_op_st_T0_A0(ot + s->mem_index);
2247 } else {
2248 gen_op_ld_T0_A0(ot + s->mem_index);
2249 if (reg != OR_TMP0)
2250 gen_op_mov_reg_T0(ot, reg);
2255 static inline uint32_t insn_get(DisasContext *s, int ot)
2257 uint32_t ret;
2259 switch(ot) {
2260 case OT_BYTE:
2261 ret = ldub_code(s->pc);
2262 s->pc++;
2263 break;
2264 case OT_WORD:
2265 ret = lduw_code(s->pc);
2266 s->pc += 2;
2267 break;
2268 default:
2269 case OT_LONG:
2270 ret = ldl_code(s->pc);
2271 s->pc += 4;
2272 break;
2274 return ret;
2277 static inline int insn_const_size(unsigned int ot)
2279 if (ot <= OT_LONG)
2280 return 1 << ot;
2281 else
2282 return 4;
2285 static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip)
2287 TranslationBlock *tb;
2288 target_ulong pc;
2290 pc = s->cs_base + eip;
2291 tb = s->tb;
2292 /* NOTE: we handle the case where the TB spans two pages here */
2293 if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) ||
2294 (pc & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK)) {
2295 /* jump to same page: we can use a direct jump */
2296 tcg_gen_goto_tb(tb_num);
2297 gen_jmp_im(eip);
2298 tcg_gen_exit_tb((long)tb + tb_num);
2299 } else {
2300 /* jump to another page: currently not optimized */
2301 gen_jmp_im(eip);
2302 gen_eob(s);
2306 static inline void gen_jcc(DisasContext *s, int b,
2307 target_ulong val, target_ulong next_eip)
2309 int l1, l2, cc_op;
2311 cc_op = s->cc_op;
2312 if (s->cc_op != CC_OP_DYNAMIC) {
2313 gen_op_set_cc_op(s->cc_op);
2314 s->cc_op = CC_OP_DYNAMIC;
2316 if (s->jmp_opt) {
2317 l1 = gen_new_label();
2318 gen_jcc1(s, cc_op, b, l1);
2320 gen_goto_tb(s, 0, next_eip);
2322 gen_set_label(l1);
2323 gen_goto_tb(s, 1, val);
2324 s->is_jmp = 3;
2325 } else {
2327 l1 = gen_new_label();
2328 l2 = gen_new_label();
2329 gen_jcc1(s, cc_op, b, l1);
2331 gen_jmp_im(next_eip);
2332 tcg_gen_br(l2);
2334 gen_set_label(l1);
2335 gen_jmp_im(val);
2336 gen_set_label(l2);
2337 gen_eob(s);
2341 static void gen_setcc(DisasContext *s, int b)
2343 int inv, jcc_op, l1;
2344 TCGv t0;
2346 if (is_fast_jcc_case(s, b)) {
2347 /* nominal case: we use a jump */
2348 /* XXX: make it faster by adding new instructions in TCG */
2349 t0 = tcg_temp_local_new();
2350 tcg_gen_movi_tl(t0, 0);
2351 l1 = gen_new_label();
2352 gen_jcc1(s, s->cc_op, b ^ 1, l1);
2353 tcg_gen_movi_tl(t0, 1);
2354 gen_set_label(l1);
2355 tcg_gen_mov_tl(cpu_T[0], t0);
2356 tcg_temp_free(t0);
2357 } else {
2358 /* slow case: it is more efficient not to generate a jump,
2359 although it is questionnable whether this optimization is
2360 worth to */
2361 inv = b & 1;
2362 jcc_op = (b >> 1) & 7;
2363 gen_setcc_slow_T0(s, jcc_op);
2364 if (inv) {
2365 tcg_gen_xori_tl(cpu_T[0], cpu_T[0], 1);
2370 static inline void gen_op_movl_T0_seg(int seg_reg)
2372 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
2373 offsetof(CPUX86State,segs[seg_reg].selector));
2376 static inline void gen_op_movl_seg_T0_vm(int seg_reg)
2378 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
2379 tcg_gen_st32_tl(cpu_T[0], cpu_env,
2380 offsetof(CPUX86State,segs[seg_reg].selector));
2381 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], 4);
2382 tcg_gen_st_tl(cpu_T[0], cpu_env,
2383 offsetof(CPUX86State,segs[seg_reg].base));
2386 /* move T0 to seg_reg and compute if the CPU state may change. Never
2387 call this function with seg_reg == R_CS */
2388 static void gen_movl_seg_T0(DisasContext *s, int seg_reg, target_ulong cur_eip)
2390 if (s->pe && !s->vm86) {
2391 /* XXX: optimize by finding processor state dynamically */
2392 if (s->cc_op != CC_OP_DYNAMIC)
2393 gen_op_set_cc_op(s->cc_op);
2394 gen_jmp_im(cur_eip);
2395 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2396 gen_helper_load_seg(tcg_const_i32(seg_reg), cpu_tmp2_i32);
2397 /* abort translation because the addseg value may change or
2398 because ss32 may change. For R_SS, translation must always
2399 stop as a special handling must be done to disable hardware
2400 interrupts for the next instruction */
2401 if (seg_reg == R_SS || (s->code32 && seg_reg < R_FS))
2402 s->is_jmp = 3;
2403 } else {
2404 gen_op_movl_seg_T0_vm(seg_reg);
2405 if (seg_reg == R_SS)
2406 s->is_jmp = 3;
2410 static inline int svm_is_rep(int prefixes)
2412 return ((prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) ? 8 : 0);
2415 static inline void
2416 gen_svm_check_intercept_param(DisasContext *s, target_ulong pc_start,
2417 uint32_t type, uint64_t param)
2419 /* no SVM activated; fast case */
2420 if (likely(!(s->flags & HF_SVMI_MASK)))
2421 return;
2422 if (s->cc_op != CC_OP_DYNAMIC)
2423 gen_op_set_cc_op(s->cc_op);
2424 gen_jmp_im(pc_start - s->cs_base);
2425 gen_helper_svm_check_intercept_param(tcg_const_i32(type),
2426 tcg_const_i64(param));
2429 static inline void
2430 gen_svm_check_intercept(DisasContext *s, target_ulong pc_start, uint64_t type)
2432 gen_svm_check_intercept_param(s, pc_start, type, 0);
2435 static inline void gen_stack_update(DisasContext *s, int addend)
2437 #ifdef TARGET_X86_64
2438 if (CODE64(s)) {
2439 gen_op_add_reg_im(2, R_ESP, addend);
2440 } else
2441 #endif
2442 if (s->ss32) {
2443 gen_op_add_reg_im(1, R_ESP, addend);
2444 } else {
2445 gen_op_add_reg_im(0, R_ESP, addend);
2449 /* generate a push. It depends on ss32, addseg and dflag */
2450 static void gen_push_T0(DisasContext *s)
2452 #ifdef TARGET_X86_64
2453 if (CODE64(s)) {
2454 gen_op_movq_A0_reg(R_ESP);
2455 if (s->dflag) {
2456 gen_op_addq_A0_im(-8);
2457 gen_op_st_T0_A0(OT_QUAD + s->mem_index);
2458 } else {
2459 gen_op_addq_A0_im(-2);
2460 gen_op_st_T0_A0(OT_WORD + s->mem_index);
2462 gen_op_mov_reg_A0(2, R_ESP);
2463 } else
2464 #endif
2466 gen_op_movl_A0_reg(R_ESP);
2467 if (!s->dflag)
2468 gen_op_addl_A0_im(-2);
2469 else
2470 gen_op_addl_A0_im(-4);
2471 if (s->ss32) {
2472 if (s->addseg) {
2473 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2474 gen_op_addl_A0_seg(R_SS);
2476 } else {
2477 gen_op_andl_A0_ffff();
2478 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2479 gen_op_addl_A0_seg(R_SS);
2481 gen_op_st_T0_A0(s->dflag + 1 + s->mem_index);
2482 if (s->ss32 && !s->addseg)
2483 gen_op_mov_reg_A0(1, R_ESP);
2484 else
2485 gen_op_mov_reg_T1(s->ss32 + 1, R_ESP);
2489 /* generate a push. It depends on ss32, addseg and dflag */
2490 /* slower version for T1, only used for call Ev */
2491 static void gen_push_T1(DisasContext *s)
2493 #ifdef TARGET_X86_64
2494 if (CODE64(s)) {
2495 gen_op_movq_A0_reg(R_ESP);
2496 if (s->dflag) {
2497 gen_op_addq_A0_im(-8);
2498 gen_op_st_T1_A0(OT_QUAD + s->mem_index);
2499 } else {
2500 gen_op_addq_A0_im(-2);
2501 gen_op_st_T0_A0(OT_WORD + s->mem_index);
2503 gen_op_mov_reg_A0(2, R_ESP);
2504 } else
2505 #endif
2507 gen_op_movl_A0_reg(R_ESP);
2508 if (!s->dflag)
2509 gen_op_addl_A0_im(-2);
2510 else
2511 gen_op_addl_A0_im(-4);
2512 if (s->ss32) {
2513 if (s->addseg) {
2514 gen_op_addl_A0_seg(R_SS);
2516 } else {
2517 gen_op_andl_A0_ffff();
2518 gen_op_addl_A0_seg(R_SS);
2520 gen_op_st_T1_A0(s->dflag + 1 + s->mem_index);
2522 if (s->ss32 && !s->addseg)
2523 gen_op_mov_reg_A0(1, R_ESP);
2524 else
2525 gen_stack_update(s, (-2) << s->dflag);
2529 /* two step pop is necessary for precise exceptions */
2530 static void gen_pop_T0(DisasContext *s)
2532 #ifdef TARGET_X86_64
2533 if (CODE64(s)) {
2534 gen_op_movq_A0_reg(R_ESP);
2535 gen_op_ld_T0_A0((s->dflag ? OT_QUAD : OT_WORD) + s->mem_index);
2536 } else
2537 #endif
2539 gen_op_movl_A0_reg(R_ESP);
2540 if (s->ss32) {
2541 if (s->addseg)
2542 gen_op_addl_A0_seg(R_SS);
2543 } else {
2544 gen_op_andl_A0_ffff();
2545 gen_op_addl_A0_seg(R_SS);
2547 gen_op_ld_T0_A0(s->dflag + 1 + s->mem_index);
2551 static void gen_pop_update(DisasContext *s)
2553 #ifdef TARGET_X86_64
2554 if (CODE64(s) && s->dflag) {
2555 gen_stack_update(s, 8);
2556 } else
2557 #endif
2559 gen_stack_update(s, 2 << s->dflag);
2563 static void gen_stack_A0(DisasContext *s)
2565 gen_op_movl_A0_reg(R_ESP);
2566 if (!s->ss32)
2567 gen_op_andl_A0_ffff();
2568 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2569 if (s->addseg)
2570 gen_op_addl_A0_seg(R_SS);
2573 /* NOTE: wrap around in 16 bit not fully handled */
2574 static void gen_pusha(DisasContext *s)
2576 int i;
2577 gen_op_movl_A0_reg(R_ESP);
2578 gen_op_addl_A0_im(-16 << s->dflag);
2579 if (!s->ss32)
2580 gen_op_andl_A0_ffff();
2581 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2582 if (s->addseg)
2583 gen_op_addl_A0_seg(R_SS);
2584 for(i = 0;i < 8; i++) {
2585 gen_op_mov_TN_reg(OT_LONG, 0, 7 - i);
2586 gen_op_st_T0_A0(OT_WORD + s->dflag + s->mem_index);
2587 gen_op_addl_A0_im(2 << s->dflag);
2589 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2592 /* NOTE: wrap around in 16 bit not fully handled */
2593 static void gen_popa(DisasContext *s)
2595 int i;
2596 gen_op_movl_A0_reg(R_ESP);
2597 if (!s->ss32)
2598 gen_op_andl_A0_ffff();
2599 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2600 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], 16 << s->dflag);
2601 if (s->addseg)
2602 gen_op_addl_A0_seg(R_SS);
2603 for(i = 0;i < 8; i++) {
2604 /* ESP is not reloaded */
2605 if (i != 3) {
2606 gen_op_ld_T0_A0(OT_WORD + s->dflag + s->mem_index);
2607 gen_op_mov_reg_T0(OT_WORD + s->dflag, 7 - i);
2609 gen_op_addl_A0_im(2 << s->dflag);
2611 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2614 static void gen_enter(DisasContext *s, int esp_addend, int level)
2616 int ot, opsize;
2618 level &= 0x1f;
2619 #ifdef TARGET_X86_64
2620 if (CODE64(s)) {
2621 ot = s->dflag ? OT_QUAD : OT_WORD;
2622 opsize = 1 << ot;
2624 gen_op_movl_A0_reg(R_ESP);
2625 gen_op_addq_A0_im(-opsize);
2626 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2628 /* push bp */
2629 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
2630 gen_op_st_T0_A0(ot + s->mem_index);
2631 if (level) {
2632 /* XXX: must save state */
2633 gen_helper_enter64_level(tcg_const_i32(level),
2634 tcg_const_i32((ot == OT_QUAD)),
2635 cpu_T[1]);
2637 gen_op_mov_reg_T1(ot, R_EBP);
2638 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
2639 gen_op_mov_reg_T1(OT_QUAD, R_ESP);
2640 } else
2641 #endif
2643 ot = s->dflag + OT_WORD;
2644 opsize = 2 << s->dflag;
2646 gen_op_movl_A0_reg(R_ESP);
2647 gen_op_addl_A0_im(-opsize);
2648 if (!s->ss32)
2649 gen_op_andl_A0_ffff();
2650 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2651 if (s->addseg)
2652 gen_op_addl_A0_seg(R_SS);
2653 /* push bp */
2654 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
2655 gen_op_st_T0_A0(ot + s->mem_index);
2656 if (level) {
2657 /* XXX: must save state */
2658 gen_helper_enter_level(tcg_const_i32(level),
2659 tcg_const_i32(s->dflag),
2660 cpu_T[1]);
2662 gen_op_mov_reg_T1(ot, R_EBP);
2663 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
2664 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2668 static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip)
2670 if (s->cc_op != CC_OP_DYNAMIC)
2671 gen_op_set_cc_op(s->cc_op);
2672 gen_jmp_im(cur_eip);
2673 gen_helper_raise_exception(tcg_const_i32(trapno));
2674 s->is_jmp = 3;
2677 /* an interrupt is different from an exception because of the
2678 privilege checks */
2679 static void gen_interrupt(DisasContext *s, int intno,
2680 target_ulong cur_eip, target_ulong next_eip)
2682 if (s->cc_op != CC_OP_DYNAMIC)
2683 gen_op_set_cc_op(s->cc_op);
2684 gen_jmp_im(cur_eip);
2685 gen_helper_raise_interrupt(tcg_const_i32(intno),
2686 tcg_const_i32(next_eip - cur_eip));
2687 s->is_jmp = 3;
2690 static void gen_debug(DisasContext *s, target_ulong cur_eip)
2692 if (s->cc_op != CC_OP_DYNAMIC)
2693 gen_op_set_cc_op(s->cc_op);
2694 gen_jmp_im(cur_eip);
2695 gen_helper_debug();
2696 s->is_jmp = 3;
2699 /* generate a generic end of block. Trace exception is also generated
2700 if needed */
2701 static void gen_eob(DisasContext *s)
2703 if (s->cc_op != CC_OP_DYNAMIC)
2704 gen_op_set_cc_op(s->cc_op);
2705 if (s->tb->flags & HF_INHIBIT_IRQ_MASK) {
2706 gen_helper_reset_inhibit_irq();
2708 if (s->singlestep_enabled) {
2709 gen_helper_debug();
2710 } else if (s->tf) {
2711 gen_helper_single_step();
2712 } else {
2713 tcg_gen_exit_tb(0);
2715 s->is_jmp = 3;
2718 /* generate a jump to eip. No segment change must happen before as a
2719 direct call to the next block may occur */
2720 static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num)
2722 if (s->jmp_opt) {
2723 if (s->cc_op != CC_OP_DYNAMIC) {
2724 gen_op_set_cc_op(s->cc_op);
2725 s->cc_op = CC_OP_DYNAMIC;
2727 gen_goto_tb(s, tb_num, eip);
2728 s->is_jmp = 3;
2729 } else {
2730 gen_jmp_im(eip);
2731 gen_eob(s);
2735 static void gen_jmp(DisasContext *s, target_ulong eip)
2737 gen_jmp_tb(s, eip, 0);
2740 static inline void gen_ldq_env_A0(int idx, int offset)
2742 int mem_index = (idx >> 2) - 1;
2743 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, mem_index);
2744 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset);
2747 static inline void gen_stq_env_A0(int idx, int offset)
2749 int mem_index = (idx >> 2) - 1;
2750 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset);
2751 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, mem_index);
2754 static inline void gen_ldo_env_A0(int idx, int offset)
2756 int mem_index = (idx >> 2) - 1;
2757 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, mem_index);
2758 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
2759 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2760 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_tmp0, mem_index);
2761 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
2764 static inline void gen_sto_env_A0(int idx, int offset)
2766 int mem_index = (idx >> 2) - 1;
2767 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
2768 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, mem_index);
2769 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2770 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
2771 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_tmp0, mem_index);
2774 static inline void gen_op_movo(int d_offset, int s_offset)
2776 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2777 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2778 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + 8);
2779 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + 8);
2782 static inline void gen_op_movq(int d_offset, int s_offset)
2784 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2785 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2788 static inline void gen_op_movl(int d_offset, int s_offset)
2790 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env, s_offset);
2791 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, d_offset);
2794 static inline void gen_op_movq_env_0(int d_offset)
2796 tcg_gen_movi_i64(cpu_tmp1_i64, 0);
2797 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2800 #define SSE_SPECIAL ((void *)1)
2801 #define SSE_DUMMY ((void *)2)
2803 #define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
2804 #define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
2805 gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
2807 static void *sse_op_table1[256][4] = {
2808 /* 3DNow! extensions */
2809 [0x0e] = { SSE_DUMMY }, /* femms */
2810 [0x0f] = { SSE_DUMMY }, /* pf... */
2811 /* pure SSE operations */
2812 [0x10] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2813 [0x11] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2814 [0x12] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd, movsldup, movddup */
2815 [0x13] = { SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd */
2816 [0x14] = { gen_helper_punpckldq_xmm, gen_helper_punpcklqdq_xmm },
2817 [0x15] = { gen_helper_punpckhdq_xmm, gen_helper_punpckhqdq_xmm },
2818 [0x16] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd, movshdup */
2819 [0x17] = { SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd */
2821 [0x28] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2822 [0x29] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2823 [0x2a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
2824 [0x2b] = { SSE_SPECIAL, SSE_SPECIAL }, /* movntps, movntpd */
2825 [0x2c] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
2826 [0x2d] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
2827 [0x2e] = { gen_helper_ucomiss, gen_helper_ucomisd },
2828 [0x2f] = { gen_helper_comiss, gen_helper_comisd },
2829 [0x50] = { SSE_SPECIAL, SSE_SPECIAL }, /* movmskps, movmskpd */
2830 [0x51] = SSE_FOP(sqrt),
2831 [0x52] = { gen_helper_rsqrtps, NULL, gen_helper_rsqrtss, NULL },
2832 [0x53] = { gen_helper_rcpps, NULL, gen_helper_rcpss, NULL },
2833 [0x54] = { gen_helper_pand_xmm, gen_helper_pand_xmm }, /* andps, andpd */
2834 [0x55] = { gen_helper_pandn_xmm, gen_helper_pandn_xmm }, /* andnps, andnpd */
2835 [0x56] = { gen_helper_por_xmm, gen_helper_por_xmm }, /* orps, orpd */
2836 [0x57] = { gen_helper_pxor_xmm, gen_helper_pxor_xmm }, /* xorps, xorpd */
2837 [0x58] = SSE_FOP(add),
2838 [0x59] = SSE_FOP(mul),
2839 [0x5a] = { gen_helper_cvtps2pd, gen_helper_cvtpd2ps,
2840 gen_helper_cvtss2sd, gen_helper_cvtsd2ss },
2841 [0x5b] = { gen_helper_cvtdq2ps, gen_helper_cvtps2dq, gen_helper_cvttps2dq },
2842 [0x5c] = SSE_FOP(sub),
2843 [0x5d] = SSE_FOP(min),
2844 [0x5e] = SSE_FOP(div),
2845 [0x5f] = SSE_FOP(max),
2847 [0xc2] = SSE_FOP(cmpeq),
2848 [0xc6] = { gen_helper_shufps, gen_helper_shufpd },
2850 [0x38] = { SSE_SPECIAL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* SSSE3/SSE4 */
2851 [0x3a] = { SSE_SPECIAL, SSE_SPECIAL }, /* SSSE3/SSE4 */
2853 /* MMX ops and their SSE extensions */
2854 [0x60] = MMX_OP2(punpcklbw),
2855 [0x61] = MMX_OP2(punpcklwd),
2856 [0x62] = MMX_OP2(punpckldq),
2857 [0x63] = MMX_OP2(packsswb),
2858 [0x64] = MMX_OP2(pcmpgtb),
2859 [0x65] = MMX_OP2(pcmpgtw),
2860 [0x66] = MMX_OP2(pcmpgtl),
2861 [0x67] = MMX_OP2(packuswb),
2862 [0x68] = MMX_OP2(punpckhbw),
2863 [0x69] = MMX_OP2(punpckhwd),
2864 [0x6a] = MMX_OP2(punpckhdq),
2865 [0x6b] = MMX_OP2(packssdw),
2866 [0x6c] = { NULL, gen_helper_punpcklqdq_xmm },
2867 [0x6d] = { NULL, gen_helper_punpckhqdq_xmm },
2868 [0x6e] = { SSE_SPECIAL, SSE_SPECIAL }, /* movd mm, ea */
2869 [0x6f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, , movqdu */
2870 [0x70] = { gen_helper_pshufw_mmx,
2871 gen_helper_pshufd_xmm,
2872 gen_helper_pshufhw_xmm,
2873 gen_helper_pshuflw_xmm },
2874 [0x71] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftw */
2875 [0x72] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftd */
2876 [0x73] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftq */
2877 [0x74] = MMX_OP2(pcmpeqb),
2878 [0x75] = MMX_OP2(pcmpeqw),
2879 [0x76] = MMX_OP2(pcmpeql),
2880 [0x77] = { SSE_DUMMY }, /* emms */
2881 [0x7c] = { NULL, gen_helper_haddpd, NULL, gen_helper_haddps },
2882 [0x7d] = { NULL, gen_helper_hsubpd, NULL, gen_helper_hsubps },
2883 [0x7e] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movd, movd, , movq */
2884 [0x7f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, movdqu */
2885 [0xc4] = { SSE_SPECIAL, SSE_SPECIAL }, /* pinsrw */
2886 [0xc5] = { SSE_SPECIAL, SSE_SPECIAL }, /* pextrw */
2887 [0xd0] = { NULL, gen_helper_addsubpd, NULL, gen_helper_addsubps },
2888 [0xd1] = MMX_OP2(psrlw),
2889 [0xd2] = MMX_OP2(psrld),
2890 [0xd3] = MMX_OP2(psrlq),
2891 [0xd4] = MMX_OP2(paddq),
2892 [0xd5] = MMX_OP2(pmullw),
2893 [0xd6] = { NULL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2894 [0xd7] = { SSE_SPECIAL, SSE_SPECIAL }, /* pmovmskb */
2895 [0xd8] = MMX_OP2(psubusb),
2896 [0xd9] = MMX_OP2(psubusw),
2897 [0xda] = MMX_OP2(pminub),
2898 [0xdb] = MMX_OP2(pand),
2899 [0xdc] = MMX_OP2(paddusb),
2900 [0xdd] = MMX_OP2(paddusw),
2901 [0xde] = MMX_OP2(pmaxub),
2902 [0xdf] = MMX_OP2(pandn),
2903 [0xe0] = MMX_OP2(pavgb),
2904 [0xe1] = MMX_OP2(psraw),
2905 [0xe2] = MMX_OP2(psrad),
2906 [0xe3] = MMX_OP2(pavgw),
2907 [0xe4] = MMX_OP2(pmulhuw),
2908 [0xe5] = MMX_OP2(pmulhw),
2909 [0xe6] = { NULL, gen_helper_cvttpd2dq, gen_helper_cvtdq2pd, gen_helper_cvtpd2dq },
2910 [0xe7] = { SSE_SPECIAL , SSE_SPECIAL }, /* movntq, movntq */
2911 [0xe8] = MMX_OP2(psubsb),
2912 [0xe9] = MMX_OP2(psubsw),
2913 [0xea] = MMX_OP2(pminsw),
2914 [0xeb] = MMX_OP2(por),
2915 [0xec] = MMX_OP2(paddsb),
2916 [0xed] = MMX_OP2(paddsw),
2917 [0xee] = MMX_OP2(pmaxsw),
2918 [0xef] = MMX_OP2(pxor),
2919 [0xf0] = { NULL, NULL, NULL, SSE_SPECIAL }, /* lddqu */
2920 [0xf1] = MMX_OP2(psllw),
2921 [0xf2] = MMX_OP2(pslld),
2922 [0xf3] = MMX_OP2(psllq),
2923 [0xf4] = MMX_OP2(pmuludq),
2924 [0xf5] = MMX_OP2(pmaddwd),
2925 [0xf6] = MMX_OP2(psadbw),
2926 [0xf7] = MMX_OP2(maskmov),
2927 [0xf8] = MMX_OP2(psubb),
2928 [0xf9] = MMX_OP2(psubw),
2929 [0xfa] = MMX_OP2(psubl),
2930 [0xfb] = MMX_OP2(psubq),
2931 [0xfc] = MMX_OP2(paddb),
2932 [0xfd] = MMX_OP2(paddw),
2933 [0xfe] = MMX_OP2(paddl),
2936 static void *sse_op_table2[3 * 8][2] = {
2937 [0 + 2] = MMX_OP2(psrlw),
2938 [0 + 4] = MMX_OP2(psraw),
2939 [0 + 6] = MMX_OP2(psllw),
2940 [8 + 2] = MMX_OP2(psrld),
2941 [8 + 4] = MMX_OP2(psrad),
2942 [8 + 6] = MMX_OP2(pslld),
2943 [16 + 2] = MMX_OP2(psrlq),
2944 [16 + 3] = { NULL, gen_helper_psrldq_xmm },
2945 [16 + 6] = MMX_OP2(psllq),
2946 [16 + 7] = { NULL, gen_helper_pslldq_xmm },
2949 static void *sse_op_table3[4 * 3] = {
2950 gen_helper_cvtsi2ss,
2951 gen_helper_cvtsi2sd,
2952 X86_64_ONLY(gen_helper_cvtsq2ss),
2953 X86_64_ONLY(gen_helper_cvtsq2sd),
2955 gen_helper_cvttss2si,
2956 gen_helper_cvttsd2si,
2957 X86_64_ONLY(gen_helper_cvttss2sq),
2958 X86_64_ONLY(gen_helper_cvttsd2sq),
2960 gen_helper_cvtss2si,
2961 gen_helper_cvtsd2si,
2962 X86_64_ONLY(gen_helper_cvtss2sq),
2963 X86_64_ONLY(gen_helper_cvtsd2sq),
2966 static void *sse_op_table4[8][4] = {
2967 SSE_FOP(cmpeq),
2968 SSE_FOP(cmplt),
2969 SSE_FOP(cmple),
2970 SSE_FOP(cmpunord),
2971 SSE_FOP(cmpneq),
2972 SSE_FOP(cmpnlt),
2973 SSE_FOP(cmpnle),
2974 SSE_FOP(cmpord),
2977 static void *sse_op_table5[256] = {
2978 [0x0c] = gen_helper_pi2fw,
2979 [0x0d] = gen_helper_pi2fd,
2980 [0x1c] = gen_helper_pf2iw,
2981 [0x1d] = gen_helper_pf2id,
2982 [0x8a] = gen_helper_pfnacc,
2983 [0x8e] = gen_helper_pfpnacc,
2984 [0x90] = gen_helper_pfcmpge,
2985 [0x94] = gen_helper_pfmin,
2986 [0x96] = gen_helper_pfrcp,
2987 [0x97] = gen_helper_pfrsqrt,
2988 [0x9a] = gen_helper_pfsub,
2989 [0x9e] = gen_helper_pfadd,
2990 [0xa0] = gen_helper_pfcmpgt,
2991 [0xa4] = gen_helper_pfmax,
2992 [0xa6] = gen_helper_movq, /* pfrcpit1; no need to actually increase precision */
2993 [0xa7] = gen_helper_movq, /* pfrsqit1 */
2994 [0xaa] = gen_helper_pfsubr,
2995 [0xae] = gen_helper_pfacc,
2996 [0xb0] = gen_helper_pfcmpeq,
2997 [0xb4] = gen_helper_pfmul,
2998 [0xb6] = gen_helper_movq, /* pfrcpit2 */
2999 [0xb7] = gen_helper_pmulhrw_mmx,
3000 [0xbb] = gen_helper_pswapd,
3001 [0xbf] = gen_helper_pavgb_mmx /* pavgusb */
3004 struct sse_op_helper_s {
3005 void *op[2]; uint32_t ext_mask;
3007 #define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
3008 #define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
3009 #define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
3010 #define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
3011 static struct sse_op_helper_s sse_op_table6[256] = {
3012 [0x00] = SSSE3_OP(pshufb),
3013 [0x01] = SSSE3_OP(phaddw),
3014 [0x02] = SSSE3_OP(phaddd),
3015 [0x03] = SSSE3_OP(phaddsw),
3016 [0x04] = SSSE3_OP(pmaddubsw),
3017 [0x05] = SSSE3_OP(phsubw),
3018 [0x06] = SSSE3_OP(phsubd),
3019 [0x07] = SSSE3_OP(phsubsw),
3020 [0x08] = SSSE3_OP(psignb),
3021 [0x09] = SSSE3_OP(psignw),
3022 [0x0a] = SSSE3_OP(psignd),
3023 [0x0b] = SSSE3_OP(pmulhrsw),
3024 [0x10] = SSE41_OP(pblendvb),
3025 [0x14] = SSE41_OP(blendvps),
3026 [0x15] = SSE41_OP(blendvpd),
3027 [0x17] = SSE41_OP(ptest),
3028 [0x1c] = SSSE3_OP(pabsb),
3029 [0x1d] = SSSE3_OP(pabsw),
3030 [0x1e] = SSSE3_OP(pabsd),
3031 [0x20] = SSE41_OP(pmovsxbw),
3032 [0x21] = SSE41_OP(pmovsxbd),
3033 [0x22] = SSE41_OP(pmovsxbq),
3034 [0x23] = SSE41_OP(pmovsxwd),
3035 [0x24] = SSE41_OP(pmovsxwq),
3036 [0x25] = SSE41_OP(pmovsxdq),
3037 [0x28] = SSE41_OP(pmuldq),
3038 [0x29] = SSE41_OP(pcmpeqq),
3039 [0x2a] = SSE41_SPECIAL, /* movntqda */
3040 [0x2b] = SSE41_OP(packusdw),
3041 [0x30] = SSE41_OP(pmovzxbw),
3042 [0x31] = SSE41_OP(pmovzxbd),
3043 [0x32] = SSE41_OP(pmovzxbq),
3044 [0x33] = SSE41_OP(pmovzxwd),
3045 [0x34] = SSE41_OP(pmovzxwq),
3046 [0x35] = SSE41_OP(pmovzxdq),
3047 [0x37] = SSE42_OP(pcmpgtq),
3048 [0x38] = SSE41_OP(pminsb),
3049 [0x39] = SSE41_OP(pminsd),
3050 [0x3a] = SSE41_OP(pminuw),
3051 [0x3b] = SSE41_OP(pminud),
3052 [0x3c] = SSE41_OP(pmaxsb),
3053 [0x3d] = SSE41_OP(pmaxsd),
3054 [0x3e] = SSE41_OP(pmaxuw),
3055 [0x3f] = SSE41_OP(pmaxud),
3056 [0x40] = SSE41_OP(pmulld),
3057 [0x41] = SSE41_OP(phminposuw),
3060 static struct sse_op_helper_s sse_op_table7[256] = {
3061 [0x08] = SSE41_OP(roundps),
3062 [0x09] = SSE41_OP(roundpd),
3063 [0x0a] = SSE41_OP(roundss),
3064 [0x0b] = SSE41_OP(roundsd),
3065 [0x0c] = SSE41_OP(blendps),
3066 [0x0d] = SSE41_OP(blendpd),
3067 [0x0e] = SSE41_OP(pblendw),
3068 [0x0f] = SSSE3_OP(palignr),
3069 [0x14] = SSE41_SPECIAL, /* pextrb */
3070 [0x15] = SSE41_SPECIAL, /* pextrw */
3071 [0x16] = SSE41_SPECIAL, /* pextrd/pextrq */
3072 [0x17] = SSE41_SPECIAL, /* extractps */
3073 [0x20] = SSE41_SPECIAL, /* pinsrb */
3074 [0x21] = SSE41_SPECIAL, /* insertps */
3075 [0x22] = SSE41_SPECIAL, /* pinsrd/pinsrq */
3076 [0x40] = SSE41_OP(dpps),
3077 [0x41] = SSE41_OP(dppd),
3078 [0x42] = SSE41_OP(mpsadbw),
3079 [0x60] = SSE42_OP(pcmpestrm),
3080 [0x61] = SSE42_OP(pcmpestri),
3081 [0x62] = SSE42_OP(pcmpistrm),
3082 [0x63] = SSE42_OP(pcmpistri),
3085 static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
3087 int b1, op1_offset, op2_offset, is_xmm, val, ot;
3088 int modrm, mod, rm, reg, reg_addr, offset_addr;
3089 void *sse_op2;
3091 b &= 0xff;
3092 if (s->prefix & PREFIX_DATA)
3093 b1 = 1;
3094 else if (s->prefix & PREFIX_REPZ)
3095 b1 = 2;
3096 else if (s->prefix & PREFIX_REPNZ)
3097 b1 = 3;
3098 else
3099 b1 = 0;
3100 sse_op2 = sse_op_table1[b][b1];
3101 if (!sse_op2)
3102 goto illegal_op;
3103 if ((b <= 0x5f && b >= 0x10) || b == 0xc6 || b == 0xc2) {
3104 is_xmm = 1;
3105 } else {
3106 if (b1 == 0) {
3107 /* MMX case */
3108 is_xmm = 0;
3109 } else {
3110 is_xmm = 1;
3113 /* simple MMX/SSE operation */
3114 if (s->flags & HF_TS_MASK) {
3115 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
3116 return;
3118 if (s->flags & HF_EM_MASK) {
3119 illegal_op:
3120 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
3121 return;
3123 if (is_xmm && !(s->flags & HF_OSFXSR_MASK))
3124 if ((b != 0x38 && b != 0x3a) || (s->prefix & PREFIX_DATA))
3125 goto illegal_op;
3126 if (b == 0x0e) {
3127 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
3128 goto illegal_op;
3129 /* femms */
3130 gen_helper_emms();
3131 return;
3133 if (b == 0x77) {
3134 /* emms */
3135 gen_helper_emms();
3136 return;
3138 /* prepare MMX state (XXX: optimize by storing fptt and fptags in
3139 the static cpu state) */
3140 if (!is_xmm) {
3141 gen_helper_enter_mmx();
3144 modrm = ldub_code(s->pc++);
3145 reg = ((modrm >> 3) & 7);
3146 if (is_xmm)
3147 reg |= rex_r;
3148 mod = (modrm >> 6) & 3;
3149 if (sse_op2 == SSE_SPECIAL) {
3150 b |= (b1 << 8);
3151 switch(b) {
3152 case 0x0e7: /* movntq */
3153 if (mod == 3)
3154 goto illegal_op;
3155 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3156 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
3157 break;
3158 case 0x1e7: /* movntdq */
3159 case 0x02b: /* movntps */
3160 case 0x12b: /* movntps */
3161 case 0x3f0: /* lddqu */
3162 if (mod == 3)
3163 goto illegal_op;
3164 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3165 gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3166 break;
3167 case 0x6e: /* movd mm, ea */
3168 #ifdef TARGET_X86_64
3169 if (s->dflag == 2) {
3170 gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 0);
3171 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,fpregs[reg].mmx));
3172 } else
3173 #endif
3175 gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 0);
3176 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3177 offsetof(CPUX86State,fpregs[reg].mmx));
3178 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3179 gen_helper_movl_mm_T0_mmx(cpu_ptr0, cpu_tmp2_i32);
3181 break;
3182 case 0x16e: /* movd xmm, ea */
3183 #ifdef TARGET_X86_64
3184 if (s->dflag == 2) {
3185 gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 0);
3186 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3187 offsetof(CPUX86State,xmm_regs[reg]));
3188 gen_helper_movq_mm_T0_xmm(cpu_ptr0, cpu_T[0]);
3189 } else
3190 #endif
3192 gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 0);
3193 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3194 offsetof(CPUX86State,xmm_regs[reg]));
3195 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3196 gen_helper_movl_mm_T0_xmm(cpu_ptr0, cpu_tmp2_i32);
3198 break;
3199 case 0x6f: /* movq mm, ea */
3200 if (mod != 3) {
3201 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3202 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
3203 } else {
3204 rm = (modrm & 7);
3205 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
3206 offsetof(CPUX86State,fpregs[rm].mmx));
3207 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
3208 offsetof(CPUX86State,fpregs[reg].mmx));
3210 break;
3211 case 0x010: /* movups */
3212 case 0x110: /* movupd */
3213 case 0x028: /* movaps */
3214 case 0x128: /* movapd */
3215 case 0x16f: /* movdqa xmm, ea */
3216 case 0x26f: /* movdqu xmm, ea */
3217 if (mod != 3) {
3218 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3219 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3220 } else {
3221 rm = (modrm & 7) | REX_B(s);
3222 gen_op_movo(offsetof(CPUX86State,xmm_regs[reg]),
3223 offsetof(CPUX86State,xmm_regs[rm]));
3225 break;
3226 case 0x210: /* movss xmm, ea */
3227 if (mod != 3) {
3228 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3229 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
3230 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3231 gen_op_movl_T0_0();
3232 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3233 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3234 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3235 } else {
3236 rm = (modrm & 7) | REX_B(s);
3237 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3238 offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3240 break;
3241 case 0x310: /* movsd xmm, ea */
3242 if (mod != 3) {
3243 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3244 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3245 gen_op_movl_T0_0();
3246 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3247 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3248 } else {
3249 rm = (modrm & 7) | REX_B(s);
3250 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3251 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3253 break;
3254 case 0x012: /* movlps */
3255 case 0x112: /* movlpd */
3256 if (mod != 3) {
3257 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3258 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3259 } else {
3260 /* movhlps */
3261 rm = (modrm & 7) | REX_B(s);
3262 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3263 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3265 break;
3266 case 0x212: /* movsldup */
3267 if (mod != 3) {
3268 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3269 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3270 } else {
3271 rm = (modrm & 7) | REX_B(s);
3272 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3273 offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3274 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3275 offsetof(CPUX86State,xmm_regs[rm].XMM_L(2)));
3277 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3278 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3279 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3280 offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3281 break;
3282 case 0x312: /* movddup */
3283 if (mod != 3) {
3284 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3285 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3286 } else {
3287 rm = (modrm & 7) | REX_B(s);
3288 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3289 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3291 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
3292 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3293 break;
3294 case 0x016: /* movhps */
3295 case 0x116: /* movhpd */
3296 if (mod != 3) {
3297 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3298 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3299 } else {
3300 /* movlhps */
3301 rm = (modrm & 7) | REX_B(s);
3302 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
3303 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3305 break;
3306 case 0x216: /* movshdup */
3307 if (mod != 3) {
3308 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3309 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3310 } else {
3311 rm = (modrm & 7) | REX_B(s);
3312 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3313 offsetof(CPUX86State,xmm_regs[rm].XMM_L(1)));
3314 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3315 offsetof(CPUX86State,xmm_regs[rm].XMM_L(3)));
3317 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3318 offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3319 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3320 offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3321 break;
3322 case 0x7e: /* movd ea, mm */
3323 #ifdef TARGET_X86_64
3324 if (s->dflag == 2) {
3325 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3326 offsetof(CPUX86State,fpregs[reg].mmx));
3327 gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 1);
3328 } else
3329 #endif
3331 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3332 offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0)));
3333 gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 1);
3335 break;
3336 case 0x17e: /* movd ea, xmm */
3337 #ifdef TARGET_X86_64
3338 if (s->dflag == 2) {
3339 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3340 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3341 gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 1);
3342 } else
3343 #endif
3345 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3346 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3347 gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 1);
3349 break;
3350 case 0x27e: /* movq xmm, ea */
3351 if (mod != 3) {
3352 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3353 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3354 } else {
3355 rm = (modrm & 7) | REX_B(s);
3356 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3357 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3359 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3360 break;
3361 case 0x7f: /* movq ea, mm */
3362 if (mod != 3) {
3363 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3364 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
3365 } else {
3366 rm = (modrm & 7);
3367 gen_op_movq(offsetof(CPUX86State,fpregs[rm].mmx),
3368 offsetof(CPUX86State,fpregs[reg].mmx));
3370 break;
3371 case 0x011: /* movups */
3372 case 0x111: /* movupd */
3373 case 0x029: /* movaps */
3374 case 0x129: /* movapd */
3375 case 0x17f: /* movdqa ea, xmm */
3376 case 0x27f: /* movdqu ea, xmm */
3377 if (mod != 3) {
3378 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3379 gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3380 } else {
3381 rm = (modrm & 7) | REX_B(s);
3382 gen_op_movo(offsetof(CPUX86State,xmm_regs[rm]),
3383 offsetof(CPUX86State,xmm_regs[reg]));
3385 break;
3386 case 0x211: /* movss ea, xmm */
3387 if (mod != 3) {
3388 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3389 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3390 gen_op_st_T0_A0(OT_LONG + s->mem_index);
3391 } else {
3392 rm = (modrm & 7) | REX_B(s);
3393 gen_op_movl(offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)),
3394 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3396 break;
3397 case 0x311: /* movsd ea, xmm */
3398 if (mod != 3) {
3399 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3400 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3401 } else {
3402 rm = (modrm & 7) | REX_B(s);
3403 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3404 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3406 break;
3407 case 0x013: /* movlps */
3408 case 0x113: /* movlpd */
3409 if (mod != 3) {
3410 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3411 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3412 } else {
3413 goto illegal_op;
3415 break;
3416 case 0x017: /* movhps */
3417 case 0x117: /* movhpd */
3418 if (mod != 3) {
3419 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3420 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3421 } else {
3422 goto illegal_op;
3424 break;
3425 case 0x71: /* shift mm, im */
3426 case 0x72:
3427 case 0x73:
3428 case 0x171: /* shift xmm, im */
3429 case 0x172:
3430 case 0x173:
3431 val = ldub_code(s->pc++);
3432 if (is_xmm) {
3433 gen_op_movl_T0_im(val);
3434 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
3435 gen_op_movl_T0_0();
3436 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(1)));
3437 op1_offset = offsetof(CPUX86State,xmm_t0);
3438 } else {
3439 gen_op_movl_T0_im(val);
3440 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(0)));
3441 gen_op_movl_T0_0();
3442 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(1)));
3443 op1_offset = offsetof(CPUX86State,mmx_t0);
3445 sse_op2 = sse_op_table2[((b - 1) & 3) * 8 + (((modrm >> 3)) & 7)][b1];
3446 if (!sse_op2)
3447 goto illegal_op;
3448 if (is_xmm) {
3449 rm = (modrm & 7) | REX_B(s);
3450 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3451 } else {
3452 rm = (modrm & 7);
3453 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3455 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3456 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op1_offset);
3457 ((void (*)(TCGv_ptr, TCGv_ptr))sse_op2)(cpu_ptr0, cpu_ptr1);
3458 break;
3459 case 0x050: /* movmskps */
3460 rm = (modrm & 7) | REX_B(s);
3461 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3462 offsetof(CPUX86State,xmm_regs[rm]));
3463 gen_helper_movmskps(cpu_tmp2_i32, cpu_ptr0);
3464 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3465 gen_op_mov_reg_T0(OT_LONG, reg);
3466 break;
3467 case 0x150: /* movmskpd */
3468 rm = (modrm & 7) | REX_B(s);
3469 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3470 offsetof(CPUX86State,xmm_regs[rm]));
3471 gen_helper_movmskpd(cpu_tmp2_i32, cpu_ptr0);
3472 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3473 gen_op_mov_reg_T0(OT_LONG, reg);
3474 break;
3475 case 0x02a: /* cvtpi2ps */
3476 case 0x12a: /* cvtpi2pd */
3477 gen_helper_enter_mmx();
3478 if (mod != 3) {
3479 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3480 op2_offset = offsetof(CPUX86State,mmx_t0);
3481 gen_ldq_env_A0(s->mem_index, op2_offset);
3482 } else {
3483 rm = (modrm & 7);
3484 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3486 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3487 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3488 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3489 switch(b >> 8) {
3490 case 0x0:
3491 gen_helper_cvtpi2ps(cpu_ptr0, cpu_ptr1);
3492 break;
3493 default:
3494 case 0x1:
3495 gen_helper_cvtpi2pd(cpu_ptr0, cpu_ptr1);
3496 break;
3498 break;
3499 case 0x22a: /* cvtsi2ss */
3500 case 0x32a: /* cvtsi2sd */
3501 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3502 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
3503 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3504 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3505 sse_op2 = sse_op_table3[(s->dflag == 2) * 2 + ((b >> 8) - 2)];
3506 if (ot == OT_LONG) {
3507 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3508 ((void (*)(TCGv_ptr, TCGv_i32))sse_op2)(cpu_ptr0, cpu_tmp2_i32);
3509 } else {
3510 ((void (*)(TCGv_ptr, TCGv))sse_op2)(cpu_ptr0, cpu_T[0]);
3512 break;
3513 case 0x02c: /* cvttps2pi */
3514 case 0x12c: /* cvttpd2pi */
3515 case 0x02d: /* cvtps2pi */
3516 case 0x12d: /* cvtpd2pi */
3517 gen_helper_enter_mmx();
3518 if (mod != 3) {
3519 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3520 op2_offset = offsetof(CPUX86State,xmm_t0);
3521 gen_ldo_env_A0(s->mem_index, op2_offset);
3522 } else {
3523 rm = (modrm & 7) | REX_B(s);
3524 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3526 op1_offset = offsetof(CPUX86State,fpregs[reg & 7].mmx);
3527 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3528 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3529 switch(b) {
3530 case 0x02c:
3531 gen_helper_cvttps2pi(cpu_ptr0, cpu_ptr1);
3532 break;
3533 case 0x12c:
3534 gen_helper_cvttpd2pi(cpu_ptr0, cpu_ptr1);
3535 break;
3536 case 0x02d:
3537 gen_helper_cvtps2pi(cpu_ptr0, cpu_ptr1);
3538 break;
3539 case 0x12d:
3540 gen_helper_cvtpd2pi(cpu_ptr0, cpu_ptr1);
3541 break;
3543 break;
3544 case 0x22c: /* cvttss2si */
3545 case 0x32c: /* cvttsd2si */
3546 case 0x22d: /* cvtss2si */
3547 case 0x32d: /* cvtsd2si */
3548 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3549 if (mod != 3) {
3550 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3551 if ((b >> 8) & 1) {
3552 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_t0.XMM_Q(0)));
3553 } else {
3554 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
3555 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
3557 op2_offset = offsetof(CPUX86State,xmm_t0);
3558 } else {
3559 rm = (modrm & 7) | REX_B(s);
3560 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3562 sse_op2 = sse_op_table3[(s->dflag == 2) * 2 + ((b >> 8) - 2) + 4 +
3563 (b & 1) * 4];
3564 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3565 if (ot == OT_LONG) {
3566 ((void (*)(TCGv_i32, TCGv_ptr))sse_op2)(cpu_tmp2_i32, cpu_ptr0);
3567 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3568 } else {
3569 ((void (*)(TCGv, TCGv_ptr))sse_op2)(cpu_T[0], cpu_ptr0);
3571 gen_op_mov_reg_T0(ot, reg);
3572 break;
3573 case 0xc4: /* pinsrw */
3574 case 0x1c4:
3575 s->rip_offset = 1;
3576 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
3577 val = ldub_code(s->pc++);
3578 if (b1) {
3579 val &= 7;
3580 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3581 offsetof(CPUX86State,xmm_regs[reg].XMM_W(val)));
3582 } else {
3583 val &= 3;
3584 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3585 offsetof(CPUX86State,fpregs[reg].mmx.MMX_W(val)));
3587 break;
3588 case 0xc5: /* pextrw */
3589 case 0x1c5:
3590 if (mod != 3)
3591 goto illegal_op;
3592 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3593 val = ldub_code(s->pc++);
3594 if (b1) {
3595 val &= 7;
3596 rm = (modrm & 7) | REX_B(s);
3597 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3598 offsetof(CPUX86State,xmm_regs[rm].XMM_W(val)));
3599 } else {
3600 val &= 3;
3601 rm = (modrm & 7);
3602 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3603 offsetof(CPUX86State,fpregs[rm].mmx.MMX_W(val)));
3605 reg = ((modrm >> 3) & 7) | rex_r;
3606 gen_op_mov_reg_T0(ot, reg);
3607 break;
3608 case 0x1d6: /* movq ea, xmm */
3609 if (mod != 3) {
3610 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3611 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3612 } else {
3613 rm = (modrm & 7) | REX_B(s);
3614 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3615 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3616 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3618 break;
3619 case 0x2d6: /* movq2dq */
3620 gen_helper_enter_mmx();
3621 rm = (modrm & 7);
3622 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3623 offsetof(CPUX86State,fpregs[rm].mmx));
3624 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3625 break;
3626 case 0x3d6: /* movdq2q */
3627 gen_helper_enter_mmx();
3628 rm = (modrm & 7) | REX_B(s);
3629 gen_op_movq(offsetof(CPUX86State,fpregs[reg & 7].mmx),
3630 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3631 break;
3632 case 0xd7: /* pmovmskb */
3633 case 0x1d7:
3634 if (mod != 3)
3635 goto illegal_op;
3636 if (b1) {
3637 rm = (modrm & 7) | REX_B(s);
3638 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[rm]));
3639 gen_helper_pmovmskb_xmm(cpu_tmp2_i32, cpu_ptr0);
3640 } else {
3641 rm = (modrm & 7);
3642 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,fpregs[rm].mmx));
3643 gen_helper_pmovmskb_mmx(cpu_tmp2_i32, cpu_ptr0);
3645 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3646 reg = ((modrm >> 3) & 7) | rex_r;
3647 gen_op_mov_reg_T0(OT_LONG, reg);
3648 break;
3649 case 0x138:
3650 if (s->prefix & PREFIX_REPNZ)
3651 goto crc32;
3652 case 0x038:
3653 b = modrm;
3654 modrm = ldub_code(s->pc++);
3655 rm = modrm & 7;
3656 reg = ((modrm >> 3) & 7) | rex_r;
3657 mod = (modrm >> 6) & 3;
3659 sse_op2 = sse_op_table6[b].op[b1];
3660 if (!sse_op2)
3661 goto illegal_op;
3662 if (!(s->cpuid_ext_features & sse_op_table6[b].ext_mask))
3663 goto illegal_op;
3665 if (b1) {
3666 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3667 if (mod == 3) {
3668 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
3669 } else {
3670 op2_offset = offsetof(CPUX86State,xmm_t0);
3671 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3672 switch (b) {
3673 case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
3674 case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
3675 case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
3676 gen_ldq_env_A0(s->mem_index, op2_offset +
3677 offsetof(XMMReg, XMM_Q(0)));
3678 break;
3679 case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
3680 case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
3681 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0,
3682 (s->mem_index >> 2) - 1);
3683 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);
3684 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, op2_offset +
3685 offsetof(XMMReg, XMM_L(0)));
3686 break;
3687 case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
3688 tcg_gen_qemu_ld16u(cpu_tmp0, cpu_A0,
3689 (s->mem_index >> 2) - 1);
3690 tcg_gen_st16_tl(cpu_tmp0, cpu_env, op2_offset +
3691 offsetof(XMMReg, XMM_W(0)));
3692 break;
3693 case 0x2a: /* movntqda */
3694 gen_ldo_env_A0(s->mem_index, op1_offset);
3695 return;
3696 default:
3697 gen_ldo_env_A0(s->mem_index, op2_offset);
3700 } else {
3701 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
3702 if (mod == 3) {
3703 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3704 } else {
3705 op2_offset = offsetof(CPUX86State,mmx_t0);
3706 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3707 gen_ldq_env_A0(s->mem_index, op2_offset);
3710 if (sse_op2 == SSE_SPECIAL)
3711 goto illegal_op;
3713 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3714 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3715 ((void (*)(TCGv_ptr, TCGv_ptr))sse_op2)(cpu_ptr0, cpu_ptr1);
3717 if (b == 0x17)
3718 s->cc_op = CC_OP_EFLAGS;
3719 break;
3720 case 0x338: /* crc32 */
3721 crc32:
3722 b = modrm;
3723 modrm = ldub_code(s->pc++);
3724 reg = ((modrm >> 3) & 7) | rex_r;
3726 if (b != 0xf0 && b != 0xf1)
3727 goto illegal_op;
3728 if (!(s->cpuid_ext_features & CPUID_EXT_SSE42))
3729 goto illegal_op;
3731 if (b == 0xf0)
3732 ot = OT_BYTE;
3733 else if (b == 0xf1 && s->dflag != 2)
3734 if (s->prefix & PREFIX_DATA)
3735 ot = OT_WORD;
3736 else
3737 ot = OT_LONG;
3738 else
3739 ot = OT_QUAD;
3741 gen_op_mov_TN_reg(OT_LONG, 0, reg);
3742 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3743 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
3744 gen_helper_crc32(cpu_T[0], cpu_tmp2_i32,
3745 cpu_T[0], tcg_const_i32(8 << ot));
3747 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3748 gen_op_mov_reg_T0(ot, reg);
3749 break;
3750 case 0x03a:
3751 case 0x13a:
3752 b = modrm;
3753 modrm = ldub_code(s->pc++);
3754 rm = modrm & 7;
3755 reg = ((modrm >> 3) & 7) | rex_r;
3756 mod = (modrm >> 6) & 3;
3758 sse_op2 = sse_op_table7[b].op[b1];
3759 if (!sse_op2)
3760 goto illegal_op;
3761 if (!(s->cpuid_ext_features & sse_op_table7[b].ext_mask))
3762 goto illegal_op;
3764 if (sse_op2 == SSE_SPECIAL) {
3765 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3766 rm = (modrm & 7) | REX_B(s);
3767 if (mod != 3)
3768 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3769 reg = ((modrm >> 3) & 7) | rex_r;
3770 val = ldub_code(s->pc++);
3771 switch (b) {
3772 case 0x14: /* pextrb */
3773 tcg_gen_ld8u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3774 xmm_regs[reg].XMM_B(val & 15)));
3775 if (mod == 3)
3776 gen_op_mov_reg_T0(ot, rm);
3777 else
3778 tcg_gen_qemu_st8(cpu_T[0], cpu_A0,
3779 (s->mem_index >> 2) - 1);
3780 break;
3781 case 0x15: /* pextrw */
3782 tcg_gen_ld16u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3783 xmm_regs[reg].XMM_W(val & 7)));
3784 if (mod == 3)
3785 gen_op_mov_reg_T0(ot, rm);
3786 else
3787 tcg_gen_qemu_st16(cpu_T[0], cpu_A0,
3788 (s->mem_index >> 2) - 1);
3789 break;
3790 case 0x16:
3791 if (ot == OT_LONG) { /* pextrd */
3792 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
3793 offsetof(CPUX86State,
3794 xmm_regs[reg].XMM_L(val & 3)));
3795 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3796 if (mod == 3)
3797 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
3798 else
3799 tcg_gen_qemu_st32(cpu_T[0], cpu_A0,
3800 (s->mem_index >> 2) - 1);
3801 } else { /* pextrq */
3802 #ifdef TARGET_X86_64
3803 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
3804 offsetof(CPUX86State,
3805 xmm_regs[reg].XMM_Q(val & 1)));
3806 if (mod == 3)
3807 gen_op_mov_reg_v(ot, rm, cpu_tmp1_i64);
3808 else
3809 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
3810 (s->mem_index >> 2) - 1);
3811 #else
3812 goto illegal_op;
3813 #endif
3815 break;
3816 case 0x17: /* extractps */
3817 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3818 xmm_regs[reg].XMM_L(val & 3)));
3819 if (mod == 3)
3820 gen_op_mov_reg_T0(ot, rm);
3821 else
3822 tcg_gen_qemu_st32(cpu_T[0], cpu_A0,
3823 (s->mem_index >> 2) - 1);
3824 break;
3825 case 0x20: /* pinsrb */
3826 if (mod == 3)
3827 gen_op_mov_TN_reg(OT_LONG, 0, rm);
3828 else
3829 tcg_gen_qemu_ld8u(cpu_tmp0, cpu_A0,
3830 (s->mem_index >> 2) - 1);
3831 tcg_gen_st8_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State,
3832 xmm_regs[reg].XMM_B(val & 15)));
3833 break;
3834 case 0x21: /* insertps */
3835 if (mod == 3) {
3836 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
3837 offsetof(CPUX86State,xmm_regs[rm]
3838 .XMM_L((val >> 6) & 3)));
3839 } else {
3840 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0,
3841 (s->mem_index >> 2) - 1);
3842 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);
3844 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
3845 offsetof(CPUX86State,xmm_regs[reg]
3846 .XMM_L((val >> 4) & 3)));
3847 if ((val >> 0) & 1)
3848 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3849 cpu_env, offsetof(CPUX86State,
3850 xmm_regs[reg].XMM_L(0)));
3851 if ((val >> 1) & 1)
3852 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3853 cpu_env, offsetof(CPUX86State,
3854 xmm_regs[reg].XMM_L(1)));
3855 if ((val >> 2) & 1)
3856 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3857 cpu_env, offsetof(CPUX86State,
3858 xmm_regs[reg].XMM_L(2)));
3859 if ((val >> 3) & 1)
3860 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3861 cpu_env, offsetof(CPUX86State,
3862 xmm_regs[reg].XMM_L(3)));
3863 break;
3864 case 0x22:
3865 if (ot == OT_LONG) { /* pinsrd */
3866 if (mod == 3)
3867 gen_op_mov_v_reg(ot, cpu_tmp0, rm);
3868 else
3869 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0,
3870 (s->mem_index >> 2) - 1);
3871 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);
3872 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
3873 offsetof(CPUX86State,
3874 xmm_regs[reg].XMM_L(val & 3)));
3875 } else { /* pinsrq */
3876 #ifdef TARGET_X86_64
3877 if (mod == 3)
3878 gen_op_mov_v_reg(ot, cpu_tmp1_i64, rm);
3879 else
3880 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
3881 (s->mem_index >> 2) - 1);
3882 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
3883 offsetof(CPUX86State,
3884 xmm_regs[reg].XMM_Q(val & 1)));
3885 #else
3886 goto illegal_op;
3887 #endif
3889 break;
3891 return;
3894 if (b1) {
3895 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3896 if (mod == 3) {
3897 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
3898 } else {
3899 op2_offset = offsetof(CPUX86State,xmm_t0);
3900 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3901 gen_ldo_env_A0(s->mem_index, op2_offset);
3903 } else {
3904 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
3905 if (mod == 3) {
3906 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3907 } else {
3908 op2_offset = offsetof(CPUX86State,mmx_t0);
3909 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3910 gen_ldq_env_A0(s->mem_index, op2_offset);
3913 val = ldub_code(s->pc++);
3915 if ((b & 0xfc) == 0x60) { /* pcmpXstrX */
3916 s->cc_op = CC_OP_EFLAGS;
3918 if (s->dflag == 2)
3919 /* The helper must use entire 64-bit gp registers */
3920 val |= 1 << 8;
3923 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3924 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3925 ((void (*)(TCGv_ptr, TCGv_ptr, TCGv_i32))sse_op2)(cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
3926 break;
3927 default:
3928 goto illegal_op;
3930 } else {
3931 /* generic MMX or SSE operation */
3932 switch(b) {
3933 case 0x70: /* pshufx insn */
3934 case 0xc6: /* pshufx insn */
3935 case 0xc2: /* compare insns */
3936 s->rip_offset = 1;
3937 break;
3938 default:
3939 break;
3941 if (is_xmm) {
3942 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3943 if (mod != 3) {
3944 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3945 op2_offset = offsetof(CPUX86State,xmm_t0);
3946 if (b1 >= 2 && ((b >= 0x50 && b <= 0x5f && b != 0x5b) ||
3947 b == 0xc2)) {
3948 /* specific case for SSE single instructions */
3949 if (b1 == 2) {
3950 /* 32 bit access */
3951 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
3952 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
3953 } else {
3954 /* 64 bit access */
3955 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_t0.XMM_D(0)));
3957 } else {
3958 gen_ldo_env_A0(s->mem_index, op2_offset);
3960 } else {
3961 rm = (modrm & 7) | REX_B(s);
3962 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3964 } else {
3965 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
3966 if (mod != 3) {
3967 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3968 op2_offset = offsetof(CPUX86State,mmx_t0);
3969 gen_ldq_env_A0(s->mem_index, op2_offset);
3970 } else {
3971 rm = (modrm & 7);
3972 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3975 switch(b) {
3976 case 0x0f: /* 3DNow! data insns */
3977 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
3978 goto illegal_op;
3979 val = ldub_code(s->pc++);
3980 sse_op2 = sse_op_table5[val];
3981 if (!sse_op2)
3982 goto illegal_op;
3983 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3984 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3985 ((void (*)(TCGv_ptr, TCGv_ptr))sse_op2)(cpu_ptr0, cpu_ptr1);
3986 break;
3987 case 0x70: /* pshufx insn */
3988 case 0xc6: /* pshufx insn */
3989 val = ldub_code(s->pc++);
3990 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3991 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3992 ((void (*)(TCGv_ptr, TCGv_ptr, TCGv_i32))sse_op2)(cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
3993 break;
3994 case 0xc2:
3995 /* compare insns */
3996 val = ldub_code(s->pc++);
3997 if (val >= 8)
3998 goto illegal_op;
3999 sse_op2 = sse_op_table4[val][b1];
4000 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4001 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4002 ((void (*)(TCGv_ptr, TCGv_ptr))sse_op2)(cpu_ptr0, cpu_ptr1);
4003 break;
4004 case 0xf7:
4005 /* maskmov : we must prepare A0 */
4006 if (mod != 3)
4007 goto illegal_op;
4008 #ifdef TARGET_X86_64
4009 if (s->aflag == 2) {
4010 gen_op_movq_A0_reg(R_EDI);
4011 } else
4012 #endif
4014 gen_op_movl_A0_reg(R_EDI);
4015 if (s->aflag == 0)
4016 gen_op_andl_A0_ffff();
4018 gen_add_A0_ds_seg(s);
4020 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4021 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4022 ((void (*)(TCGv_ptr, TCGv_ptr, TCGv))sse_op2)(cpu_ptr0, cpu_ptr1, cpu_A0);
4023 break;
4024 default:
4025 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4026 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4027 ((void (*)(TCGv_ptr, TCGv_ptr))sse_op2)(cpu_ptr0, cpu_ptr1);
4028 break;
4030 if (b == 0x2e || b == 0x2f) {
4031 s->cc_op = CC_OP_EFLAGS;
4036 /* convert one instruction. s->is_jmp is set if the translation must
4037 be stopped. Return the next pc value */
4038 static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
4040 int b, prefixes, aflag, dflag;
4041 int shift, ot;
4042 int modrm, reg, rm, mod, reg_addr, op, opreg, offset_addr, val;
4043 target_ulong next_eip, tval;
4044 int rex_w, rex_r;
4046 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)))
4047 tcg_gen_debug_insn_start(pc_start);
4048 s->pc = pc_start;
4049 prefixes = 0;
4050 aflag = s->code32;
4051 dflag = s->code32;
4052 s->override = -1;
4053 rex_w = -1;
4054 rex_r = 0;
4055 #ifdef TARGET_X86_64
4056 s->rex_x = 0;
4057 s->rex_b = 0;
4058 x86_64_hregs = 0;
4059 #endif
4060 s->rip_offset = 0; /* for relative ip address */
4061 next_byte:
4062 b = ldub_code(s->pc);
4063 s->pc++;
4064 /* check prefixes */
4065 #ifdef TARGET_X86_64
4066 if (CODE64(s)) {
4067 switch (b) {
4068 case 0xf3:
4069 prefixes |= PREFIX_REPZ;
4070 goto next_byte;
4071 case 0xf2:
4072 prefixes |= PREFIX_REPNZ;
4073 goto next_byte;
4074 case 0xf0:
4075 prefixes |= PREFIX_LOCK;
4076 goto next_byte;
4077 case 0x2e:
4078 s->override = R_CS;
4079 goto next_byte;
4080 case 0x36:
4081 s->override = R_SS;
4082 goto next_byte;
4083 case 0x3e:
4084 s->override = R_DS;
4085 goto next_byte;
4086 case 0x26:
4087 s->override = R_ES;
4088 goto next_byte;
4089 case 0x64:
4090 s->override = R_FS;
4091 goto next_byte;
4092 case 0x65:
4093 s->override = R_GS;
4094 goto next_byte;
4095 case 0x66:
4096 prefixes |= PREFIX_DATA;
4097 goto next_byte;
4098 case 0x67:
4099 prefixes |= PREFIX_ADR;
4100 goto next_byte;
4101 case 0x40 ... 0x4f:
4102 /* REX prefix */
4103 rex_w = (b >> 3) & 1;
4104 rex_r = (b & 0x4) << 1;
4105 s->rex_x = (b & 0x2) << 2;
4106 REX_B(s) = (b & 0x1) << 3;
4107 x86_64_hregs = 1; /* select uniform byte register addressing */
4108 goto next_byte;
4110 if (rex_w == 1) {
4111 /* 0x66 is ignored if rex.w is set */
4112 dflag = 2;
4113 } else {
4114 if (prefixes & PREFIX_DATA)
4115 dflag ^= 1;
4117 if (!(prefixes & PREFIX_ADR))
4118 aflag = 2;
4119 } else
4120 #endif
4122 switch (b) {
4123 case 0xf3:
4124 prefixes |= PREFIX_REPZ;
4125 goto next_byte;
4126 case 0xf2:
4127 prefixes |= PREFIX_REPNZ;
4128 goto next_byte;
4129 case 0xf0:
4130 prefixes |= PREFIX_LOCK;
4131 goto next_byte;
4132 case 0x2e:
4133 s->override = R_CS;
4134 goto next_byte;
4135 case 0x36:
4136 s->override = R_SS;
4137 goto next_byte;
4138 case 0x3e:
4139 s->override = R_DS;
4140 goto next_byte;
4141 case 0x26:
4142 s->override = R_ES;
4143 goto next_byte;
4144 case 0x64:
4145 s->override = R_FS;
4146 goto next_byte;
4147 case 0x65:
4148 s->override = R_GS;
4149 goto next_byte;
4150 case 0x66:
4151 prefixes |= PREFIX_DATA;
4152 goto next_byte;
4153 case 0x67:
4154 prefixes |= PREFIX_ADR;
4155 goto next_byte;
4157 if (prefixes & PREFIX_DATA)
4158 dflag ^= 1;
4159 if (prefixes & PREFIX_ADR)
4160 aflag ^= 1;
4163 s->prefix = prefixes;
4164 s->aflag = aflag;
4165 s->dflag = dflag;
4167 /* lock generation */
4168 if (prefixes & PREFIX_LOCK)
4169 gen_helper_lock();
4171 /* now check op code */
4172 reswitch:
4173 switch(b) {
4174 case 0x0f:
4175 /**************************/
4176 /* extended op code */
4177 b = ldub_code(s->pc++) | 0x100;
4178 goto reswitch;
4180 /**************************/
4181 /* arith & logic */
4182 case 0x00 ... 0x05:
4183 case 0x08 ... 0x0d:
4184 case 0x10 ... 0x15:
4185 case 0x18 ... 0x1d:
4186 case 0x20 ... 0x25:
4187 case 0x28 ... 0x2d:
4188 case 0x30 ... 0x35:
4189 case 0x38 ... 0x3d:
4191 int op, f, val;
4192 op = (b >> 3) & 7;
4193 f = (b >> 1) & 3;
4195 if ((b & 1) == 0)
4196 ot = OT_BYTE;
4197 else
4198 ot = dflag + OT_WORD;
4200 switch(f) {
4201 case 0: /* OP Ev, Gv */
4202 modrm = ldub_code(s->pc++);
4203 reg = ((modrm >> 3) & 7) | rex_r;
4204 mod = (modrm >> 6) & 3;
4205 rm = (modrm & 7) | REX_B(s);
4206 if (mod != 3) {
4207 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4208 opreg = OR_TMP0;
4209 } else if (op == OP_XORL && rm == reg) {
4210 xor_zero:
4211 /* xor reg, reg optimisation */
4212 gen_op_movl_T0_0();
4213 s->cc_op = CC_OP_LOGICB + ot;
4214 gen_op_mov_reg_T0(ot, reg);
4215 gen_op_update1_cc();
4216 break;
4217 } else {
4218 opreg = rm;
4220 gen_op_mov_TN_reg(ot, 1, reg);
4221 gen_op(s, op, ot, opreg);
4222 break;
4223 case 1: /* OP Gv, Ev */
4224 modrm = ldub_code(s->pc++);
4225 mod = (modrm >> 6) & 3;
4226 reg = ((modrm >> 3) & 7) | rex_r;
4227 rm = (modrm & 7) | REX_B(s);
4228 if (mod != 3) {
4229 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4230 gen_op_ld_T1_A0(ot + s->mem_index);
4231 } else if (op == OP_XORL && rm == reg) {
4232 goto xor_zero;
4233 } else {
4234 gen_op_mov_TN_reg(ot, 1, rm);
4236 gen_op(s, op, ot, reg);
4237 break;
4238 case 2: /* OP A, Iv */
4239 val = insn_get(s, ot);
4240 gen_op_movl_T1_im(val);
4241 gen_op(s, op, ot, OR_EAX);
4242 break;
4245 break;
4247 case 0x82:
4248 if (CODE64(s))
4249 goto illegal_op;
4250 case 0x80: /* GRP1 */
4251 case 0x81:
4252 case 0x83:
4254 int val;
4256 if ((b & 1) == 0)
4257 ot = OT_BYTE;
4258 else
4259 ot = dflag + OT_WORD;
4261 modrm = ldub_code(s->pc++);
4262 mod = (modrm >> 6) & 3;
4263 rm = (modrm & 7) | REX_B(s);
4264 op = (modrm >> 3) & 7;
4266 if (mod != 3) {
4267 if (b == 0x83)
4268 s->rip_offset = 1;
4269 else
4270 s->rip_offset = insn_const_size(ot);
4271 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4272 opreg = OR_TMP0;
4273 } else {
4274 opreg = rm;
4277 switch(b) {
4278 default:
4279 case 0x80:
4280 case 0x81:
4281 case 0x82:
4282 val = insn_get(s, ot);
4283 break;
4284 case 0x83:
4285 val = (int8_t)insn_get(s, OT_BYTE);
4286 break;
4288 gen_op_movl_T1_im(val);
4289 gen_op(s, op, ot, opreg);
4291 break;
4293 /**************************/
4294 /* inc, dec, and other misc arith */
4295 case 0x40 ... 0x47: /* inc Gv */
4296 ot = dflag ? OT_LONG : OT_WORD;
4297 gen_inc(s, ot, OR_EAX + (b & 7), 1);
4298 break;
4299 case 0x48 ... 0x4f: /* dec Gv */
4300 ot = dflag ? OT_LONG : OT_WORD;
4301 gen_inc(s, ot, OR_EAX + (b & 7), -1);
4302 break;
4303 case 0xf6: /* GRP3 */
4304 case 0xf7:
4305 if ((b & 1) == 0)
4306 ot = OT_BYTE;
4307 else
4308 ot = dflag + OT_WORD;
4310 modrm = ldub_code(s->pc++);
4311 mod = (modrm >> 6) & 3;
4312 rm = (modrm & 7) | REX_B(s);
4313 op = (modrm >> 3) & 7;
4314 if (mod != 3) {
4315 if (op == 0)
4316 s->rip_offset = insn_const_size(ot);
4317 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4318 gen_op_ld_T0_A0(ot + s->mem_index);
4319 } else {
4320 gen_op_mov_TN_reg(ot, 0, rm);
4323 switch(op) {
4324 case 0: /* test */
4325 val = insn_get(s, ot);
4326 gen_op_movl_T1_im(val);
4327 gen_op_testl_T0_T1_cc();
4328 s->cc_op = CC_OP_LOGICB + ot;
4329 break;
4330 case 2: /* not */
4331 tcg_gen_not_tl(cpu_T[0], cpu_T[0]);
4332 if (mod != 3) {
4333 gen_op_st_T0_A0(ot + s->mem_index);
4334 } else {
4335 gen_op_mov_reg_T0(ot, rm);
4337 break;
4338 case 3: /* neg */
4339 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
4340 if (mod != 3) {
4341 gen_op_st_T0_A0(ot + s->mem_index);
4342 } else {
4343 gen_op_mov_reg_T0(ot, rm);
4345 gen_op_update_neg_cc();
4346 s->cc_op = CC_OP_SUBB + ot;
4347 break;
4348 case 4: /* mul */
4349 switch(ot) {
4350 case OT_BYTE:
4351 gen_op_mov_TN_reg(OT_BYTE, 1, R_EAX);
4352 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
4353 tcg_gen_ext8u_tl(cpu_T[1], cpu_T[1]);
4354 /* XXX: use 32 bit mul which could be faster */
4355 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4356 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4357 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4358 tcg_gen_andi_tl(cpu_cc_src, cpu_T[0], 0xff00);
4359 s->cc_op = CC_OP_MULB;
4360 break;
4361 case OT_WORD:
4362 gen_op_mov_TN_reg(OT_WORD, 1, R_EAX);
4363 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
4364 tcg_gen_ext16u_tl(cpu_T[1], cpu_T[1]);
4365 /* XXX: use 32 bit mul which could be faster */
4366 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4367 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4368 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4369 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
4370 gen_op_mov_reg_T0(OT_WORD, R_EDX);
4371 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4372 s->cc_op = CC_OP_MULW;
4373 break;
4374 default:
4375 case OT_LONG:
4376 #ifdef TARGET_X86_64
4377 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4378 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
4379 tcg_gen_ext32u_tl(cpu_T[1], cpu_T[1]);
4380 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4381 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4382 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4383 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 32);
4384 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4385 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4386 #else
4388 TCGv_i64 t0, t1;
4389 t0 = tcg_temp_new_i64();
4390 t1 = tcg_temp_new_i64();
4391 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4392 tcg_gen_extu_i32_i64(t0, cpu_T[0]);
4393 tcg_gen_extu_i32_i64(t1, cpu_T[1]);
4394 tcg_gen_mul_i64(t0, t0, t1);
4395 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4396 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4397 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4398 tcg_gen_shri_i64(t0, t0, 32);
4399 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4400 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4401 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4403 #endif
4404 s->cc_op = CC_OP_MULL;
4405 break;
4406 #ifdef TARGET_X86_64
4407 case OT_QUAD:
4408 gen_helper_mulq_EAX_T0(cpu_T[0]);
4409 s->cc_op = CC_OP_MULQ;
4410 break;
4411 #endif
4413 break;
4414 case 5: /* imul */
4415 switch(ot) {
4416 case OT_BYTE:
4417 gen_op_mov_TN_reg(OT_BYTE, 1, R_EAX);
4418 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
4419 tcg_gen_ext8s_tl(cpu_T[1], cpu_T[1]);
4420 /* XXX: use 32 bit mul which could be faster */
4421 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4422 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4423 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4424 tcg_gen_ext8s_tl(cpu_tmp0, cpu_T[0]);
4425 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4426 s->cc_op = CC_OP_MULB;
4427 break;
4428 case OT_WORD:
4429 gen_op_mov_TN_reg(OT_WORD, 1, R_EAX);
4430 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4431 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
4432 /* XXX: use 32 bit mul which could be faster */
4433 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4434 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4435 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4436 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
4437 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4438 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
4439 gen_op_mov_reg_T0(OT_WORD, R_EDX);
4440 s->cc_op = CC_OP_MULW;
4441 break;
4442 default:
4443 case OT_LONG:
4444 #ifdef TARGET_X86_64
4445 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4446 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4447 tcg_gen_ext32s_tl(cpu_T[1], cpu_T[1]);
4448 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4449 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4450 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4451 tcg_gen_ext32s_tl(cpu_tmp0, cpu_T[0]);
4452 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4453 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 32);
4454 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4455 #else
4457 TCGv_i64 t0, t1;
4458 t0 = tcg_temp_new_i64();
4459 t1 = tcg_temp_new_i64();
4460 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4461 tcg_gen_ext_i32_i64(t0, cpu_T[0]);
4462 tcg_gen_ext_i32_i64(t1, cpu_T[1]);
4463 tcg_gen_mul_i64(t0, t0, t1);
4464 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4465 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4466 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4467 tcg_gen_sari_tl(cpu_tmp0, cpu_T[0], 31);
4468 tcg_gen_shri_i64(t0, t0, 32);
4469 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4470 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4471 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4473 #endif
4474 s->cc_op = CC_OP_MULL;
4475 break;
4476 #ifdef TARGET_X86_64
4477 case OT_QUAD:
4478 gen_helper_imulq_EAX_T0(cpu_T[0]);
4479 s->cc_op = CC_OP_MULQ;
4480 break;
4481 #endif
4483 break;
4484 case 6: /* div */
4485 switch(ot) {
4486 case OT_BYTE:
4487 gen_jmp_im(pc_start - s->cs_base);
4488 gen_helper_divb_AL(cpu_T[0]);
4489 break;
4490 case OT_WORD:
4491 gen_jmp_im(pc_start - s->cs_base);
4492 gen_helper_divw_AX(cpu_T[0]);
4493 break;
4494 default:
4495 case OT_LONG:
4496 gen_jmp_im(pc_start - s->cs_base);
4497 gen_helper_divl_EAX(cpu_T[0]);
4498 break;
4499 #ifdef TARGET_X86_64
4500 case OT_QUAD:
4501 gen_jmp_im(pc_start - s->cs_base);
4502 gen_helper_divq_EAX(cpu_T[0]);
4503 break;
4504 #endif
4506 break;
4507 case 7: /* idiv */
4508 switch(ot) {
4509 case OT_BYTE:
4510 gen_jmp_im(pc_start - s->cs_base);
4511 gen_helper_idivb_AL(cpu_T[0]);
4512 break;
4513 case OT_WORD:
4514 gen_jmp_im(pc_start - s->cs_base);
4515 gen_helper_idivw_AX(cpu_T[0]);
4516 break;
4517 default:
4518 case OT_LONG:
4519 gen_jmp_im(pc_start - s->cs_base);
4520 gen_helper_idivl_EAX(cpu_T[0]);
4521 break;
4522 #ifdef TARGET_X86_64
4523 case OT_QUAD:
4524 gen_jmp_im(pc_start - s->cs_base);
4525 gen_helper_idivq_EAX(cpu_T[0]);
4526 break;
4527 #endif
4529 break;
4530 default:
4531 goto illegal_op;
4533 break;
4535 case 0xfe: /* GRP4 */
4536 case 0xff: /* GRP5 */
4537 if ((b & 1) == 0)
4538 ot = OT_BYTE;
4539 else
4540 ot = dflag + OT_WORD;
4542 modrm = ldub_code(s->pc++);
4543 mod = (modrm >> 6) & 3;
4544 rm = (modrm & 7) | REX_B(s);
4545 op = (modrm >> 3) & 7;
4546 if (op >= 2 && b == 0xfe) {
4547 goto illegal_op;
4549 if (CODE64(s)) {
4550 if (op == 2 || op == 4) {
4551 /* operand size for jumps is 64 bit */
4552 ot = OT_QUAD;
4553 } else if (op == 3 || op == 5) {
4554 /* for call calls, the operand is 16 or 32 bit, even
4555 in long mode */
4556 ot = dflag ? OT_LONG : OT_WORD;
4557 } else if (op == 6) {
4558 /* default push size is 64 bit */
4559 ot = dflag ? OT_QUAD : OT_WORD;
4562 if (mod != 3) {
4563 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4564 if (op >= 2 && op != 3 && op != 5)
4565 gen_op_ld_T0_A0(ot + s->mem_index);
4566 } else {
4567 gen_op_mov_TN_reg(ot, 0, rm);
4570 switch(op) {
4571 case 0: /* inc Ev */
4572 if (mod != 3)
4573 opreg = OR_TMP0;
4574 else
4575 opreg = rm;
4576 gen_inc(s, ot, opreg, 1);
4577 break;
4578 case 1: /* dec Ev */
4579 if (mod != 3)
4580 opreg = OR_TMP0;
4581 else
4582 opreg = rm;
4583 gen_inc(s, ot, opreg, -1);
4584 break;
4585 case 2: /* call Ev */
4586 /* XXX: optimize if memory (no 'and' is necessary) */
4587 if (s->dflag == 0)
4588 gen_op_andl_T0_ffff();
4589 next_eip = s->pc - s->cs_base;
4590 gen_movtl_T1_im(next_eip);
4591 gen_push_T1(s);
4592 gen_op_jmp_T0();
4593 gen_eob(s);
4594 break;
4595 case 3: /* lcall Ev */
4596 gen_op_ld_T1_A0(ot + s->mem_index);
4597 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
4598 gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
4599 do_lcall:
4600 if (s->pe && !s->vm86) {
4601 if (s->cc_op != CC_OP_DYNAMIC)
4602 gen_op_set_cc_op(s->cc_op);
4603 gen_jmp_im(pc_start - s->cs_base);
4604 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4605 gen_helper_lcall_protected(cpu_tmp2_i32, cpu_T[1],
4606 tcg_const_i32(dflag),
4607 tcg_const_i32(s->pc - pc_start));
4608 } else {
4609 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4610 gen_helper_lcall_real(cpu_tmp2_i32, cpu_T[1],
4611 tcg_const_i32(dflag),
4612 tcg_const_i32(s->pc - s->cs_base));
4614 gen_eob(s);
4615 break;
4616 case 4: /* jmp Ev */
4617 if (s->dflag == 0)
4618 gen_op_andl_T0_ffff();
4619 gen_op_jmp_T0();
4620 gen_eob(s);
4621 break;
4622 case 5: /* ljmp Ev */
4623 gen_op_ld_T1_A0(ot + s->mem_index);
4624 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
4625 gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
4626 do_ljmp:
4627 if (s->pe && !s->vm86) {
4628 if (s->cc_op != CC_OP_DYNAMIC)
4629 gen_op_set_cc_op(s->cc_op);
4630 gen_jmp_im(pc_start - s->cs_base);
4631 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4632 gen_helper_ljmp_protected(cpu_tmp2_i32, cpu_T[1],
4633 tcg_const_i32(s->pc - pc_start));
4634 } else {
4635 gen_op_movl_seg_T0_vm(R_CS);
4636 gen_op_movl_T0_T1();
4637 gen_op_jmp_T0();
4639 gen_eob(s);
4640 break;
4641 case 6: /* push Ev */
4642 gen_push_T0(s);
4643 break;
4644 default:
4645 goto illegal_op;
4647 break;
4649 case 0x84: /* test Ev, Gv */
4650 case 0x85:
4651 if ((b & 1) == 0)
4652 ot = OT_BYTE;
4653 else
4654 ot = dflag + OT_WORD;
4656 modrm = ldub_code(s->pc++);
4657 mod = (modrm >> 6) & 3;
4658 rm = (modrm & 7) | REX_B(s);
4659 reg = ((modrm >> 3) & 7) | rex_r;
4661 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
4662 gen_op_mov_TN_reg(ot, 1, reg);
4663 gen_op_testl_T0_T1_cc();
4664 s->cc_op = CC_OP_LOGICB + ot;
4665 break;
4667 case 0xa8: /* test eAX, Iv */
4668 case 0xa9:
4669 if ((b & 1) == 0)
4670 ot = OT_BYTE;
4671 else
4672 ot = dflag + OT_WORD;
4673 val = insn_get(s, ot);
4675 gen_op_mov_TN_reg(ot, 0, OR_EAX);
4676 gen_op_movl_T1_im(val);
4677 gen_op_testl_T0_T1_cc();
4678 s->cc_op = CC_OP_LOGICB + ot;
4679 break;
4681 case 0x98: /* CWDE/CBW */
4682 #ifdef TARGET_X86_64
4683 if (dflag == 2) {
4684 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
4685 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4686 gen_op_mov_reg_T0(OT_QUAD, R_EAX);
4687 } else
4688 #endif
4689 if (dflag == 1) {
4690 gen_op_mov_TN_reg(OT_WORD, 0, R_EAX);
4691 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4692 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4693 } else {
4694 gen_op_mov_TN_reg(OT_BYTE, 0, R_EAX);
4695 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
4696 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4698 break;
4699 case 0x99: /* CDQ/CWD */
4700 #ifdef TARGET_X86_64
4701 if (dflag == 2) {
4702 gen_op_mov_TN_reg(OT_QUAD, 0, R_EAX);
4703 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 63);
4704 gen_op_mov_reg_T0(OT_QUAD, R_EDX);
4705 } else
4706 #endif
4707 if (dflag == 1) {
4708 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
4709 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4710 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 31);
4711 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4712 } else {
4713 gen_op_mov_TN_reg(OT_WORD, 0, R_EAX);
4714 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4715 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 15);
4716 gen_op_mov_reg_T0(OT_WORD, R_EDX);
4718 break;
4719 case 0x1af: /* imul Gv, Ev */
4720 case 0x69: /* imul Gv, Ev, I */
4721 case 0x6b:
4722 ot = dflag + OT_WORD;
4723 modrm = ldub_code(s->pc++);
4724 reg = ((modrm >> 3) & 7) | rex_r;
4725 if (b == 0x69)
4726 s->rip_offset = insn_const_size(ot);
4727 else if (b == 0x6b)
4728 s->rip_offset = 1;
4729 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
4730 if (b == 0x69) {
4731 val = insn_get(s, ot);
4732 gen_op_movl_T1_im(val);
4733 } else if (b == 0x6b) {
4734 val = (int8_t)insn_get(s, OT_BYTE);
4735 gen_op_movl_T1_im(val);
4736 } else {
4737 gen_op_mov_TN_reg(ot, 1, reg);
4740 #ifdef TARGET_X86_64
4741 if (ot == OT_QUAD) {
4742 gen_helper_imulq_T0_T1(cpu_T[0], cpu_T[0], cpu_T[1]);
4743 } else
4744 #endif
4745 if (ot == OT_LONG) {
4746 #ifdef TARGET_X86_64
4747 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4748 tcg_gen_ext32s_tl(cpu_T[1], cpu_T[1]);
4749 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4750 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4751 tcg_gen_ext32s_tl(cpu_tmp0, cpu_T[0]);
4752 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4753 #else
4755 TCGv_i64 t0, t1;
4756 t0 = tcg_temp_new_i64();
4757 t1 = tcg_temp_new_i64();
4758 tcg_gen_ext_i32_i64(t0, cpu_T[0]);
4759 tcg_gen_ext_i32_i64(t1, cpu_T[1]);
4760 tcg_gen_mul_i64(t0, t0, t1);
4761 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4762 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4763 tcg_gen_sari_tl(cpu_tmp0, cpu_T[0], 31);
4764 tcg_gen_shri_i64(t0, t0, 32);
4765 tcg_gen_trunc_i64_i32(cpu_T[1], t0);
4766 tcg_gen_sub_tl(cpu_cc_src, cpu_T[1], cpu_tmp0);
4768 #endif
4769 } else {
4770 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4771 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
4772 /* XXX: use 32 bit mul which could be faster */
4773 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4774 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4775 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
4776 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4778 gen_op_mov_reg_T0(ot, reg);
4779 s->cc_op = CC_OP_MULB + ot;
4780 break;
4781 case 0x1c0:
4782 case 0x1c1: /* xadd Ev, Gv */
4783 if ((b & 1) == 0)
4784 ot = OT_BYTE;
4785 else
4786 ot = dflag + OT_WORD;
4787 modrm = ldub_code(s->pc++);
4788 reg = ((modrm >> 3) & 7) | rex_r;
4789 mod = (modrm >> 6) & 3;
4790 if (mod == 3) {
4791 rm = (modrm & 7) | REX_B(s);
4792 gen_op_mov_TN_reg(ot, 0, reg);
4793 gen_op_mov_TN_reg(ot, 1, rm);
4794 gen_op_addl_T0_T1();
4795 gen_op_mov_reg_T1(ot, reg);
4796 gen_op_mov_reg_T0(ot, rm);
4797 } else {
4798 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4799 gen_op_mov_TN_reg(ot, 0, reg);
4800 gen_op_ld_T1_A0(ot + s->mem_index);
4801 gen_op_addl_T0_T1();
4802 gen_op_st_T0_A0(ot + s->mem_index);
4803 gen_op_mov_reg_T1(ot, reg);
4805 gen_op_update2_cc();
4806 s->cc_op = CC_OP_ADDB + ot;
4807 break;
4808 case 0x1b0:
4809 case 0x1b1: /* cmpxchg Ev, Gv */
4811 int label1, label2;
4812 TCGv t0, t1, t2, a0;
4814 if ((b & 1) == 0)
4815 ot = OT_BYTE;
4816 else
4817 ot = dflag + OT_WORD;
4818 modrm = ldub_code(s->pc++);
4819 reg = ((modrm >> 3) & 7) | rex_r;
4820 mod = (modrm >> 6) & 3;
4821 t0 = tcg_temp_local_new();
4822 t1 = tcg_temp_local_new();
4823 t2 = tcg_temp_local_new();
4824 a0 = tcg_temp_local_new();
4825 gen_op_mov_v_reg(ot, t1, reg);
4826 if (mod == 3) {
4827 rm = (modrm & 7) | REX_B(s);
4828 gen_op_mov_v_reg(ot, t0, rm);
4829 } else {
4830 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4831 tcg_gen_mov_tl(a0, cpu_A0);
4832 gen_op_ld_v(ot + s->mem_index, t0, a0);
4833 rm = 0; /* avoid warning */
4835 label1 = gen_new_label();
4836 tcg_gen_ld_tl(t2, cpu_env, offsetof(CPUState, regs[R_EAX]));
4837 tcg_gen_sub_tl(t2, t2, t0);
4838 gen_extu(ot, t2);
4839 tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label1);
4840 if (mod == 3) {
4841 label2 = gen_new_label();
4842 gen_op_mov_reg_v(ot, R_EAX, t0);
4843 tcg_gen_br(label2);
4844 gen_set_label(label1);
4845 gen_op_mov_reg_v(ot, rm, t1);
4846 gen_set_label(label2);
4847 } else {
4848 tcg_gen_mov_tl(t1, t0);
4849 gen_op_mov_reg_v(ot, R_EAX, t0);
4850 gen_set_label(label1);
4851 /* always store */
4852 gen_op_st_v(ot + s->mem_index, t1, a0);
4854 tcg_gen_mov_tl(cpu_cc_src, t0);
4855 tcg_gen_mov_tl(cpu_cc_dst, t2);
4856 s->cc_op = CC_OP_SUBB + ot;
4857 tcg_temp_free(t0);
4858 tcg_temp_free(t1);
4859 tcg_temp_free(t2);
4860 tcg_temp_free(a0);
4862 break;
4863 case 0x1c7: /* cmpxchg8b */
4864 modrm = ldub_code(s->pc++);
4865 mod = (modrm >> 6) & 3;
4866 if ((mod == 3) || ((modrm & 0x38) != 0x8))
4867 goto illegal_op;
4868 #ifdef TARGET_X86_64
4869 if (dflag == 2) {
4870 if (!(s->cpuid_ext_features & CPUID_EXT_CX16))
4871 goto illegal_op;
4872 gen_jmp_im(pc_start - s->cs_base);
4873 if (s->cc_op != CC_OP_DYNAMIC)
4874 gen_op_set_cc_op(s->cc_op);
4875 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4876 gen_helper_cmpxchg16b(cpu_A0);
4877 } else
4878 #endif
4880 if (!(s->cpuid_features & CPUID_CX8))
4881 goto illegal_op;
4882 gen_jmp_im(pc_start - s->cs_base);
4883 if (s->cc_op != CC_OP_DYNAMIC)
4884 gen_op_set_cc_op(s->cc_op);
4885 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4886 gen_helper_cmpxchg8b(cpu_A0);
4888 s->cc_op = CC_OP_EFLAGS;
4889 break;
4891 /**************************/
4892 /* push/pop */
4893 case 0x50 ... 0x57: /* push */
4894 gen_op_mov_TN_reg(OT_LONG, 0, (b & 7) | REX_B(s));
4895 gen_push_T0(s);
4896 break;
4897 case 0x58 ... 0x5f: /* pop */
4898 if (CODE64(s)) {
4899 ot = dflag ? OT_QUAD : OT_WORD;
4900 } else {
4901 ot = dflag + OT_WORD;
4903 gen_pop_T0(s);
4904 /* NOTE: order is important for pop %sp */
4905 gen_pop_update(s);
4906 gen_op_mov_reg_T0(ot, (b & 7) | REX_B(s));
4907 break;
4908 case 0x60: /* pusha */
4909 if (CODE64(s))
4910 goto illegal_op;
4911 gen_pusha(s);
4912 break;
4913 case 0x61: /* popa */
4914 if (CODE64(s))
4915 goto illegal_op;
4916 gen_popa(s);
4917 break;
4918 case 0x68: /* push Iv */
4919 case 0x6a:
4920 if (CODE64(s)) {
4921 ot = dflag ? OT_QUAD : OT_WORD;
4922 } else {
4923 ot = dflag + OT_WORD;
4925 if (b == 0x68)
4926 val = insn_get(s, ot);
4927 else
4928 val = (int8_t)insn_get(s, OT_BYTE);
4929 gen_op_movl_T0_im(val);
4930 gen_push_T0(s);
4931 break;
4932 case 0x8f: /* pop Ev */
4933 if (CODE64(s)) {
4934 ot = dflag ? OT_QUAD : OT_WORD;
4935 } else {
4936 ot = dflag + OT_WORD;
4938 modrm = ldub_code(s->pc++);
4939 mod = (modrm >> 6) & 3;
4940 gen_pop_T0(s);
4941 if (mod == 3) {
4942 /* NOTE: order is important for pop %sp */
4943 gen_pop_update(s);
4944 rm = (modrm & 7) | REX_B(s);
4945 gen_op_mov_reg_T0(ot, rm);
4946 } else {
4947 /* NOTE: order is important too for MMU exceptions */
4948 s->popl_esp_hack = 1 << ot;
4949 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1);
4950 s->popl_esp_hack = 0;
4951 gen_pop_update(s);
4953 break;
4954 case 0xc8: /* enter */
4956 int level;
4957 val = lduw_code(s->pc);
4958 s->pc += 2;
4959 level = ldub_code(s->pc++);
4960 gen_enter(s, val, level);
4962 break;
4963 case 0xc9: /* leave */
4964 /* XXX: exception not precise (ESP is updated before potential exception) */
4965 if (CODE64(s)) {
4966 gen_op_mov_TN_reg(OT_QUAD, 0, R_EBP);
4967 gen_op_mov_reg_T0(OT_QUAD, R_ESP);
4968 } else if (s->ss32) {
4969 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
4970 gen_op_mov_reg_T0(OT_LONG, R_ESP);
4971 } else {
4972 gen_op_mov_TN_reg(OT_WORD, 0, R_EBP);
4973 gen_op_mov_reg_T0(OT_WORD, R_ESP);
4975 gen_pop_T0(s);
4976 if (CODE64(s)) {
4977 ot = dflag ? OT_QUAD : OT_WORD;
4978 } else {
4979 ot = dflag + OT_WORD;
4981 gen_op_mov_reg_T0(ot, R_EBP);
4982 gen_pop_update(s);
4983 break;
4984 case 0x06: /* push es */
4985 case 0x0e: /* push cs */
4986 case 0x16: /* push ss */
4987 case 0x1e: /* push ds */
4988 if (CODE64(s))
4989 goto illegal_op;
4990 gen_op_movl_T0_seg(b >> 3);
4991 gen_push_T0(s);
4992 break;
4993 case 0x1a0: /* push fs */
4994 case 0x1a8: /* push gs */
4995 gen_op_movl_T0_seg((b >> 3) & 7);
4996 gen_push_T0(s);
4997 break;
4998 case 0x07: /* pop es */
4999 case 0x17: /* pop ss */
5000 case 0x1f: /* pop ds */
5001 if (CODE64(s))
5002 goto illegal_op;
5003 reg = b >> 3;
5004 gen_pop_T0(s);
5005 gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
5006 gen_pop_update(s);
5007 if (reg == R_SS) {
5008 /* if reg == SS, inhibit interrupts/trace. */
5009 /* If several instructions disable interrupts, only the
5010 _first_ does it */
5011 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
5012 gen_helper_set_inhibit_irq();
5013 s->tf = 0;
5015 if (s->is_jmp) {
5016 gen_jmp_im(s->pc - s->cs_base);
5017 gen_eob(s);
5019 break;
5020 case 0x1a1: /* pop fs */
5021 case 0x1a9: /* pop gs */
5022 gen_pop_T0(s);
5023 gen_movl_seg_T0(s, (b >> 3) & 7, pc_start - s->cs_base);
5024 gen_pop_update(s);
5025 if (s->is_jmp) {
5026 gen_jmp_im(s->pc - s->cs_base);
5027 gen_eob(s);
5029 break;
5031 /**************************/
5032 /* mov */
5033 case 0x88:
5034 case 0x89: /* mov Gv, Ev */
5035 if ((b & 1) == 0)
5036 ot = OT_BYTE;
5037 else
5038 ot = dflag + OT_WORD;
5039 modrm = ldub_code(s->pc++);
5040 reg = ((modrm >> 3) & 7) | rex_r;
5042 /* generate a generic store */
5043 gen_ldst_modrm(s, modrm, ot, reg, 1);
5044 break;
5045 case 0xc6:
5046 case 0xc7: /* mov Ev, Iv */
5047 if ((b & 1) == 0)
5048 ot = OT_BYTE;
5049 else
5050 ot = dflag + OT_WORD;
5051 modrm = ldub_code(s->pc++);
5052 mod = (modrm >> 6) & 3;
5053 if (mod != 3) {
5054 s->rip_offset = insn_const_size(ot);
5055 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5057 val = insn_get(s, ot);
5058 gen_op_movl_T0_im(val);
5059 if (mod != 3)
5060 gen_op_st_T0_A0(ot + s->mem_index);
5061 else
5062 gen_op_mov_reg_T0(ot, (modrm & 7) | REX_B(s));
5063 break;
5064 case 0x8a:
5065 case 0x8b: /* mov Ev, Gv */
5066 if ((b & 1) == 0)
5067 ot = OT_BYTE;
5068 else
5069 ot = OT_WORD + dflag;
5070 modrm = ldub_code(s->pc++);
5071 reg = ((modrm >> 3) & 7) | rex_r;
5073 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
5074 gen_op_mov_reg_T0(ot, reg);
5075 break;
5076 case 0x8e: /* mov seg, Gv */
5077 modrm = ldub_code(s->pc++);
5078 reg = (modrm >> 3) & 7;
5079 if (reg >= 6 || reg == R_CS)
5080 goto illegal_op;
5081 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
5082 gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
5083 if (reg == R_SS) {
5084 /* if reg == SS, inhibit interrupts/trace */
5085 /* If several instructions disable interrupts, only the
5086 _first_ does it */
5087 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
5088 gen_helper_set_inhibit_irq();
5089 s->tf = 0;
5091 if (s->is_jmp) {
5092 gen_jmp_im(s->pc - s->cs_base);
5093 gen_eob(s);
5095 break;
5096 case 0x8c: /* mov Gv, seg */
5097 modrm = ldub_code(s->pc++);
5098 reg = (modrm >> 3) & 7;
5099 mod = (modrm >> 6) & 3;
5100 if (reg >= 6)
5101 goto illegal_op;
5102 gen_op_movl_T0_seg(reg);
5103 if (mod == 3)
5104 ot = OT_WORD + dflag;
5105 else
5106 ot = OT_WORD;
5107 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1);
5108 break;
5110 case 0x1b6: /* movzbS Gv, Eb */
5111 case 0x1b7: /* movzwS Gv, Eb */
5112 case 0x1be: /* movsbS Gv, Eb */
5113 case 0x1bf: /* movswS Gv, Eb */
5115 int d_ot;
5116 /* d_ot is the size of destination */
5117 d_ot = dflag + OT_WORD;
5118 /* ot is the size of source */
5119 ot = (b & 1) + OT_BYTE;
5120 modrm = ldub_code(s->pc++);
5121 reg = ((modrm >> 3) & 7) | rex_r;
5122 mod = (modrm >> 6) & 3;
5123 rm = (modrm & 7) | REX_B(s);
5125 if (mod == 3) {
5126 gen_op_mov_TN_reg(ot, 0, rm);
5127 switch(ot | (b & 8)) {
5128 case OT_BYTE:
5129 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
5130 break;
5131 case OT_BYTE | 8:
5132 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
5133 break;
5134 case OT_WORD:
5135 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
5136 break;
5137 default:
5138 case OT_WORD | 8:
5139 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5140 break;
5142 gen_op_mov_reg_T0(d_ot, reg);
5143 } else {
5144 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5145 if (b & 8) {
5146 gen_op_lds_T0_A0(ot + s->mem_index);
5147 } else {
5148 gen_op_ldu_T0_A0(ot + s->mem_index);
5150 gen_op_mov_reg_T0(d_ot, reg);
5153 break;
5155 case 0x8d: /* lea */
5156 ot = dflag + OT_WORD;
5157 modrm = ldub_code(s->pc++);
5158 mod = (modrm >> 6) & 3;
5159 if (mod == 3)
5160 goto illegal_op;
5161 reg = ((modrm >> 3) & 7) | rex_r;
5162 /* we must ensure that no segment is added */
5163 s->override = -1;
5164 val = s->addseg;
5165 s->addseg = 0;
5166 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5167 s->addseg = val;
5168 gen_op_mov_reg_A0(ot - OT_WORD, reg);
5169 break;
5171 case 0xa0: /* mov EAX, Ov */
5172 case 0xa1:
5173 case 0xa2: /* mov Ov, EAX */
5174 case 0xa3:
5176 target_ulong offset_addr;
5178 if ((b & 1) == 0)
5179 ot = OT_BYTE;
5180 else
5181 ot = dflag + OT_WORD;
5182 #ifdef TARGET_X86_64
5183 if (s->aflag == 2) {
5184 offset_addr = ldq_code(s->pc);
5185 s->pc += 8;
5186 gen_op_movq_A0_im(offset_addr);
5187 } else
5188 #endif
5190 if (s->aflag) {
5191 offset_addr = insn_get(s, OT_LONG);
5192 } else {
5193 offset_addr = insn_get(s, OT_WORD);
5195 gen_op_movl_A0_im(offset_addr);
5197 gen_add_A0_ds_seg(s);
5198 if ((b & 2) == 0) {
5199 gen_op_ld_T0_A0(ot + s->mem_index);
5200 gen_op_mov_reg_T0(ot, R_EAX);
5201 } else {
5202 gen_op_mov_TN_reg(ot, 0, R_EAX);
5203 gen_op_st_T0_A0(ot + s->mem_index);
5206 break;
5207 case 0xd7: /* xlat */
5208 #ifdef TARGET_X86_64
5209 if (s->aflag == 2) {
5210 gen_op_movq_A0_reg(R_EBX);
5211 gen_op_mov_TN_reg(OT_QUAD, 0, R_EAX);
5212 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xff);
5213 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]);
5214 } else
5215 #endif
5217 gen_op_movl_A0_reg(R_EBX);
5218 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
5219 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xff);
5220 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]);
5221 if (s->aflag == 0)
5222 gen_op_andl_A0_ffff();
5223 else
5224 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
5226 gen_add_A0_ds_seg(s);
5227 gen_op_ldu_T0_A0(OT_BYTE + s->mem_index);
5228 gen_op_mov_reg_T0(OT_BYTE, R_EAX);
5229 break;
5230 case 0xb0 ... 0xb7: /* mov R, Ib */
5231 val = insn_get(s, OT_BYTE);
5232 gen_op_movl_T0_im(val);
5233 gen_op_mov_reg_T0(OT_BYTE, (b & 7) | REX_B(s));
5234 break;
5235 case 0xb8 ... 0xbf: /* mov R, Iv */
5236 #ifdef TARGET_X86_64
5237 if (dflag == 2) {
5238 uint64_t tmp;
5239 /* 64 bit case */
5240 tmp = ldq_code(s->pc);
5241 s->pc += 8;
5242 reg = (b & 7) | REX_B(s);
5243 gen_movtl_T0_im(tmp);
5244 gen_op_mov_reg_T0(OT_QUAD, reg);
5245 } else
5246 #endif
5248 ot = dflag ? OT_LONG : OT_WORD;
5249 val = insn_get(s, ot);
5250 reg = (b & 7) | REX_B(s);
5251 gen_op_movl_T0_im(val);
5252 gen_op_mov_reg_T0(ot, reg);
5254 break;
5256 case 0x91 ... 0x97: /* xchg R, EAX */
5257 ot = dflag + OT_WORD;
5258 reg = (b & 7) | REX_B(s);
5259 rm = R_EAX;
5260 goto do_xchg_reg;
5261 case 0x86:
5262 case 0x87: /* xchg Ev, Gv */
5263 if ((b & 1) == 0)
5264 ot = OT_BYTE;
5265 else
5266 ot = dflag + OT_WORD;
5267 modrm = ldub_code(s->pc++);
5268 reg = ((modrm >> 3) & 7) | rex_r;
5269 mod = (modrm >> 6) & 3;
5270 if (mod == 3) {
5271 rm = (modrm & 7) | REX_B(s);
5272 do_xchg_reg:
5273 gen_op_mov_TN_reg(ot, 0, reg);
5274 gen_op_mov_TN_reg(ot, 1, rm);
5275 gen_op_mov_reg_T0(ot, rm);
5276 gen_op_mov_reg_T1(ot, reg);
5277 } else {
5278 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5279 gen_op_mov_TN_reg(ot, 0, reg);
5280 /* for xchg, lock is implicit */
5281 if (!(prefixes & PREFIX_LOCK))
5282 gen_helper_lock();
5283 gen_op_ld_T1_A0(ot + s->mem_index);
5284 gen_op_st_T0_A0(ot + s->mem_index);
5285 if (!(prefixes & PREFIX_LOCK))
5286 gen_helper_unlock();
5287 gen_op_mov_reg_T1(ot, reg);
5289 break;
5290 case 0xc4: /* les Gv */
5291 if (CODE64(s))
5292 goto illegal_op;
5293 op = R_ES;
5294 goto do_lxx;
5295 case 0xc5: /* lds Gv */
5296 if (CODE64(s))
5297 goto illegal_op;
5298 op = R_DS;
5299 goto do_lxx;
5300 case 0x1b2: /* lss Gv */
5301 op = R_SS;
5302 goto do_lxx;
5303 case 0x1b4: /* lfs Gv */
5304 op = R_FS;
5305 goto do_lxx;
5306 case 0x1b5: /* lgs Gv */
5307 op = R_GS;
5308 do_lxx:
5309 ot = dflag ? OT_LONG : OT_WORD;
5310 modrm = ldub_code(s->pc++);
5311 reg = ((modrm >> 3) & 7) | rex_r;
5312 mod = (modrm >> 6) & 3;
5313 if (mod == 3)
5314 goto illegal_op;
5315 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5316 gen_op_ld_T1_A0(ot + s->mem_index);
5317 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
5318 /* load the segment first to handle exceptions properly */
5319 gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
5320 gen_movl_seg_T0(s, op, pc_start - s->cs_base);
5321 /* then put the data */
5322 gen_op_mov_reg_T1(ot, reg);
5323 if (s->is_jmp) {
5324 gen_jmp_im(s->pc - s->cs_base);
5325 gen_eob(s);
5327 break;
5329 /************************/
5330 /* shifts */
5331 case 0xc0:
5332 case 0xc1:
5333 /* shift Ev,Ib */
5334 shift = 2;
5335 grp2:
5337 if ((b & 1) == 0)
5338 ot = OT_BYTE;
5339 else
5340 ot = dflag + OT_WORD;
5342 modrm = ldub_code(s->pc++);
5343 mod = (modrm >> 6) & 3;
5344 op = (modrm >> 3) & 7;
5346 if (mod != 3) {
5347 if (shift == 2) {
5348 s->rip_offset = 1;
5350 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5351 opreg = OR_TMP0;
5352 } else {
5353 opreg = (modrm & 7) | REX_B(s);
5356 /* simpler op */
5357 if (shift == 0) {
5358 gen_shift(s, op, ot, opreg, OR_ECX);
5359 } else {
5360 if (shift == 2) {
5361 shift = ldub_code(s->pc++);
5363 gen_shifti(s, op, ot, opreg, shift);
5366 break;
5367 case 0xd0:
5368 case 0xd1:
5369 /* shift Ev,1 */
5370 shift = 1;
5371 goto grp2;
5372 case 0xd2:
5373 case 0xd3:
5374 /* shift Ev,cl */
5375 shift = 0;
5376 goto grp2;
5378 case 0x1a4: /* shld imm */
5379 op = 0;
5380 shift = 1;
5381 goto do_shiftd;
5382 case 0x1a5: /* shld cl */
5383 op = 0;
5384 shift = 0;
5385 goto do_shiftd;
5386 case 0x1ac: /* shrd imm */
5387 op = 1;
5388 shift = 1;
5389 goto do_shiftd;
5390 case 0x1ad: /* shrd cl */
5391 op = 1;
5392 shift = 0;
5393 do_shiftd:
5394 ot = dflag + OT_WORD;
5395 modrm = ldub_code(s->pc++);
5396 mod = (modrm >> 6) & 3;
5397 rm = (modrm & 7) | REX_B(s);
5398 reg = ((modrm >> 3) & 7) | rex_r;
5399 if (mod != 3) {
5400 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5401 opreg = OR_TMP0;
5402 } else {
5403 opreg = rm;
5405 gen_op_mov_TN_reg(ot, 1, reg);
5407 if (shift) {
5408 val = ldub_code(s->pc++);
5409 tcg_gen_movi_tl(cpu_T3, val);
5410 } else {
5411 tcg_gen_ld_tl(cpu_T3, cpu_env, offsetof(CPUState, regs[R_ECX]));
5413 gen_shiftd_rm_T1_T3(s, ot, opreg, op);
5414 break;
5416 /************************/
5417 /* floats */
5418 case 0xd8 ... 0xdf:
5419 if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
5420 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
5421 /* XXX: what to do if illegal op ? */
5422 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
5423 break;
5425 modrm = ldub_code(s->pc++);
5426 mod = (modrm >> 6) & 3;
5427 rm = modrm & 7;
5428 op = ((b & 7) << 3) | ((modrm >> 3) & 7);
5429 if (mod != 3) {
5430 /* memory op */
5431 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5432 switch(op) {
5433 case 0x00 ... 0x07: /* fxxxs */
5434 case 0x10 ... 0x17: /* fixxxl */
5435 case 0x20 ... 0x27: /* fxxxl */
5436 case 0x30 ... 0x37: /* fixxx */
5438 int op1;
5439 op1 = op & 7;
5441 switch(op >> 4) {
5442 case 0:
5443 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5444 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5445 gen_helper_flds_FT0(cpu_tmp2_i32);
5446 break;
5447 case 1:
5448 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5449 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5450 gen_helper_fildl_FT0(cpu_tmp2_i32);
5451 break;
5452 case 2:
5453 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
5454 (s->mem_index >> 2) - 1);
5455 gen_helper_fldl_FT0(cpu_tmp1_i64);
5456 break;
5457 case 3:
5458 default:
5459 gen_op_lds_T0_A0(OT_WORD + s->mem_index);
5460 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5461 gen_helper_fildl_FT0(cpu_tmp2_i32);
5462 break;
5465 gen_helper_fp_arith_ST0_FT0(op1);
5466 if (op1 == 3) {
5467 /* fcomp needs pop */
5468 gen_helper_fpop();
5471 break;
5472 case 0x08: /* flds */
5473 case 0x0a: /* fsts */
5474 case 0x0b: /* fstps */
5475 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
5476 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
5477 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
5478 switch(op & 7) {
5479 case 0:
5480 switch(op >> 4) {
5481 case 0:
5482 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5483 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5484 gen_helper_flds_ST0(cpu_tmp2_i32);
5485 break;
5486 case 1:
5487 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5488 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5489 gen_helper_fildl_ST0(cpu_tmp2_i32);
5490 break;
5491 case 2:
5492 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
5493 (s->mem_index >> 2) - 1);
5494 gen_helper_fldl_ST0(cpu_tmp1_i64);
5495 break;
5496 case 3:
5497 default:
5498 gen_op_lds_T0_A0(OT_WORD + s->mem_index);
5499 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5500 gen_helper_fildl_ST0(cpu_tmp2_i32);
5501 break;
5503 break;
5504 case 1:
5505 /* XXX: the corresponding CPUID bit must be tested ! */
5506 switch(op >> 4) {
5507 case 1:
5508 gen_helper_fisttl_ST0(cpu_tmp2_i32);
5509 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5510 gen_op_st_T0_A0(OT_LONG + s->mem_index);
5511 break;
5512 case 2:
5513 gen_helper_fisttll_ST0(cpu_tmp1_i64);
5514 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
5515 (s->mem_index >> 2) - 1);
5516 break;
5517 case 3:
5518 default:
5519 gen_helper_fistt_ST0(cpu_tmp2_i32);
5520 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5521 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5522 break;
5524 gen_helper_fpop();
5525 break;
5526 default:
5527 switch(op >> 4) {
5528 case 0:
5529 gen_helper_fsts_ST0(cpu_tmp2_i32);
5530 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5531 gen_op_st_T0_A0(OT_LONG + s->mem_index);
5532 break;
5533 case 1:
5534 gen_helper_fistl_ST0(cpu_tmp2_i32);
5535 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5536 gen_op_st_T0_A0(OT_LONG + s->mem_index);
5537 break;
5538 case 2:
5539 gen_helper_fstl_ST0(cpu_tmp1_i64);
5540 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
5541 (s->mem_index >> 2) - 1);
5542 break;
5543 case 3:
5544 default:
5545 gen_helper_fist_ST0(cpu_tmp2_i32);
5546 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5547 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5548 break;
5550 if ((op & 7) == 3)
5551 gen_helper_fpop();
5552 break;
5554 break;
5555 case 0x0c: /* fldenv mem */
5556 if (s->cc_op != CC_OP_DYNAMIC)
5557 gen_op_set_cc_op(s->cc_op);
5558 gen_jmp_im(pc_start - s->cs_base);
5559 gen_helper_fldenv(
5560 cpu_A0, tcg_const_i32(s->dflag));
5561 break;
5562 case 0x0d: /* fldcw mem */
5563 gen_op_ld_T0_A0(OT_WORD + s->mem_index);
5564 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5565 gen_helper_fldcw(cpu_tmp2_i32);
5566 break;
5567 case 0x0e: /* fnstenv mem */
5568 if (s->cc_op != CC_OP_DYNAMIC)
5569 gen_op_set_cc_op(s->cc_op);
5570 gen_jmp_im(pc_start - s->cs_base);
5571 gen_helper_fstenv(cpu_A0, tcg_const_i32(s->dflag));
5572 break;
5573 case 0x0f: /* fnstcw mem */
5574 gen_helper_fnstcw(cpu_tmp2_i32);
5575 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5576 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5577 break;
5578 case 0x1d: /* fldt mem */
5579 if (s->cc_op != CC_OP_DYNAMIC)
5580 gen_op_set_cc_op(s->cc_op);
5581 gen_jmp_im(pc_start - s->cs_base);
5582 gen_helper_fldt_ST0(cpu_A0);
5583 break;
5584 case 0x1f: /* fstpt mem */
5585 if (s->cc_op != CC_OP_DYNAMIC)
5586 gen_op_set_cc_op(s->cc_op);
5587 gen_jmp_im(pc_start - s->cs_base);
5588 gen_helper_fstt_ST0(cpu_A0);
5589 gen_helper_fpop();
5590 break;
5591 case 0x2c: /* frstor mem */
5592 if (s->cc_op != CC_OP_DYNAMIC)
5593 gen_op_set_cc_op(s->cc_op);
5594 gen_jmp_im(pc_start - s->cs_base);
5595 gen_helper_frstor(cpu_A0, tcg_const_i32(s->dflag));
5596 break;
5597 case 0x2e: /* fnsave mem */
5598 if (s->cc_op != CC_OP_DYNAMIC)
5599 gen_op_set_cc_op(s->cc_op);
5600 gen_jmp_im(pc_start - s->cs_base);
5601 gen_helper_fsave(cpu_A0, tcg_const_i32(s->dflag));
5602 break;
5603 case 0x2f: /* fnstsw mem */
5604 gen_helper_fnstsw(cpu_tmp2_i32);
5605 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5606 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5607 break;
5608 case 0x3c: /* fbld */
5609 if (s->cc_op != CC_OP_DYNAMIC)
5610 gen_op_set_cc_op(s->cc_op);
5611 gen_jmp_im(pc_start - s->cs_base);
5612 gen_helper_fbld_ST0(cpu_A0);
5613 break;
5614 case 0x3e: /* fbstp */
5615 if (s->cc_op != CC_OP_DYNAMIC)
5616 gen_op_set_cc_op(s->cc_op);
5617 gen_jmp_im(pc_start - s->cs_base);
5618 gen_helper_fbst_ST0(cpu_A0);
5619 gen_helper_fpop();
5620 break;
5621 case 0x3d: /* fildll */
5622 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
5623 (s->mem_index >> 2) - 1);
5624 gen_helper_fildll_ST0(cpu_tmp1_i64);
5625 break;
5626 case 0x3f: /* fistpll */
5627 gen_helper_fistll_ST0(cpu_tmp1_i64);
5628 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
5629 (s->mem_index >> 2) - 1);
5630 gen_helper_fpop();
5631 break;
5632 default:
5633 goto illegal_op;
5635 } else {
5636 /* register float ops */
5637 opreg = rm;
5639 switch(op) {
5640 case 0x08: /* fld sti */
5641 gen_helper_fpush();
5642 gen_helper_fmov_ST0_STN(tcg_const_i32((opreg + 1) & 7));
5643 break;
5644 case 0x09: /* fxchg sti */
5645 case 0x29: /* fxchg4 sti, undocumented op */
5646 case 0x39: /* fxchg7 sti, undocumented op */
5647 gen_helper_fxchg_ST0_STN(tcg_const_i32(opreg));
5648 break;
5649 case 0x0a: /* grp d9/2 */
5650 switch(rm) {
5651 case 0: /* fnop */
5652 /* check exceptions (FreeBSD FPU probe) */
5653 if (s->cc_op != CC_OP_DYNAMIC)
5654 gen_op_set_cc_op(s->cc_op);
5655 gen_jmp_im(pc_start - s->cs_base);
5656 gen_helper_fwait();
5657 break;
5658 default:
5659 goto illegal_op;
5661 break;
5662 case 0x0c: /* grp d9/4 */
5663 switch(rm) {
5664 case 0: /* fchs */
5665 gen_helper_fchs_ST0();
5666 break;
5667 case 1: /* fabs */
5668 gen_helper_fabs_ST0();
5669 break;
5670 case 4: /* ftst */
5671 gen_helper_fldz_FT0();
5672 gen_helper_fcom_ST0_FT0();
5673 break;
5674 case 5: /* fxam */
5675 gen_helper_fxam_ST0();
5676 break;
5677 default:
5678 goto illegal_op;
5680 break;
5681 case 0x0d: /* grp d9/5 */
5683 switch(rm) {
5684 case 0:
5685 gen_helper_fpush();
5686 gen_helper_fld1_ST0();
5687 break;
5688 case 1:
5689 gen_helper_fpush();
5690 gen_helper_fldl2t_ST0();
5691 break;
5692 case 2:
5693 gen_helper_fpush();
5694 gen_helper_fldl2e_ST0();
5695 break;
5696 case 3:
5697 gen_helper_fpush();
5698 gen_helper_fldpi_ST0();
5699 break;
5700 case 4:
5701 gen_helper_fpush();
5702 gen_helper_fldlg2_ST0();
5703 break;
5704 case 5:
5705 gen_helper_fpush();
5706 gen_helper_fldln2_ST0();
5707 break;
5708 case 6:
5709 gen_helper_fpush();
5710 gen_helper_fldz_ST0();
5711 break;
5712 default:
5713 goto illegal_op;
5716 break;
5717 case 0x0e: /* grp d9/6 */
5718 switch(rm) {
5719 case 0: /* f2xm1 */
5720 gen_helper_f2xm1();
5721 break;
5722 case 1: /* fyl2x */
5723 gen_helper_fyl2x();
5724 break;
5725 case 2: /* fptan */
5726 gen_helper_fptan();
5727 break;
5728 case 3: /* fpatan */
5729 gen_helper_fpatan();
5730 break;
5731 case 4: /* fxtract */
5732 gen_helper_fxtract();
5733 break;
5734 case 5: /* fprem1 */
5735 gen_helper_fprem1();
5736 break;
5737 case 6: /* fdecstp */
5738 gen_helper_fdecstp();
5739 break;
5740 default:
5741 case 7: /* fincstp */
5742 gen_helper_fincstp();
5743 break;
5745 break;
5746 case 0x0f: /* grp d9/7 */
5747 switch(rm) {
5748 case 0: /* fprem */
5749 gen_helper_fprem();
5750 break;
5751 case 1: /* fyl2xp1 */
5752 gen_helper_fyl2xp1();
5753 break;
5754 case 2: /* fsqrt */
5755 gen_helper_fsqrt();
5756 break;
5757 case 3: /* fsincos */
5758 gen_helper_fsincos();
5759 break;
5760 case 5: /* fscale */
5761 gen_helper_fscale();
5762 break;
5763 case 4: /* frndint */
5764 gen_helper_frndint();
5765 break;
5766 case 6: /* fsin */
5767 gen_helper_fsin();
5768 break;
5769 default:
5770 case 7: /* fcos */
5771 gen_helper_fcos();
5772 break;
5774 break;
5775 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
5776 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
5777 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
5779 int op1;
5781 op1 = op & 7;
5782 if (op >= 0x20) {
5783 gen_helper_fp_arith_STN_ST0(op1, opreg);
5784 if (op >= 0x30)
5785 gen_helper_fpop();
5786 } else {
5787 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
5788 gen_helper_fp_arith_ST0_FT0(op1);
5791 break;
5792 case 0x02: /* fcom */
5793 case 0x22: /* fcom2, undocumented op */
5794 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
5795 gen_helper_fcom_ST0_FT0();
5796 break;
5797 case 0x03: /* fcomp */
5798 case 0x23: /* fcomp3, undocumented op */
5799 case 0x32: /* fcomp5, undocumented op */
5800 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
5801 gen_helper_fcom_ST0_FT0();
5802 gen_helper_fpop();
5803 break;
5804 case 0x15: /* da/5 */
5805 switch(rm) {
5806 case 1: /* fucompp */
5807 gen_helper_fmov_FT0_STN(tcg_const_i32(1));
5808 gen_helper_fucom_ST0_FT0();
5809 gen_helper_fpop();
5810 gen_helper_fpop();
5811 break;
5812 default:
5813 goto illegal_op;
5815 break;
5816 case 0x1c:
5817 switch(rm) {
5818 case 0: /* feni (287 only, just do nop here) */
5819 break;
5820 case 1: /* fdisi (287 only, just do nop here) */
5821 break;
5822 case 2: /* fclex */
5823 gen_helper_fclex();
5824 break;
5825 case 3: /* fninit */
5826 gen_helper_fninit();
5827 break;
5828 case 4: /* fsetpm (287 only, just do nop here) */
5829 break;
5830 default:
5831 goto illegal_op;
5833 break;
5834 case 0x1d: /* fucomi */
5835 if (s->cc_op != CC_OP_DYNAMIC)
5836 gen_op_set_cc_op(s->cc_op);
5837 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
5838 gen_helper_fucomi_ST0_FT0();
5839 s->cc_op = CC_OP_EFLAGS;
5840 break;
5841 case 0x1e: /* fcomi */
5842 if (s->cc_op != CC_OP_DYNAMIC)
5843 gen_op_set_cc_op(s->cc_op);
5844 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
5845 gen_helper_fcomi_ST0_FT0();
5846 s->cc_op = CC_OP_EFLAGS;
5847 break;
5848 case 0x28: /* ffree sti */
5849 gen_helper_ffree_STN(tcg_const_i32(opreg));
5850 break;
5851 case 0x2a: /* fst sti */
5852 gen_helper_fmov_STN_ST0(tcg_const_i32(opreg));
5853 break;
5854 case 0x2b: /* fstp sti */
5855 case 0x0b: /* fstp1 sti, undocumented op */
5856 case 0x3a: /* fstp8 sti, undocumented op */
5857 case 0x3b: /* fstp9 sti, undocumented op */
5858 gen_helper_fmov_STN_ST0(tcg_const_i32(opreg));
5859 gen_helper_fpop();
5860 break;
5861 case 0x2c: /* fucom st(i) */
5862 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
5863 gen_helper_fucom_ST0_FT0();
5864 break;
5865 case 0x2d: /* fucomp st(i) */
5866 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
5867 gen_helper_fucom_ST0_FT0();
5868 gen_helper_fpop();
5869 break;
5870 case 0x33: /* de/3 */
5871 switch(rm) {
5872 case 1: /* fcompp */
5873 gen_helper_fmov_FT0_STN(tcg_const_i32(1));
5874 gen_helper_fcom_ST0_FT0();
5875 gen_helper_fpop();
5876 gen_helper_fpop();
5877 break;
5878 default:
5879 goto illegal_op;
5881 break;
5882 case 0x38: /* ffreep sti, undocumented op */
5883 gen_helper_ffree_STN(tcg_const_i32(opreg));
5884 gen_helper_fpop();
5885 break;
5886 case 0x3c: /* df/4 */
5887 switch(rm) {
5888 case 0:
5889 gen_helper_fnstsw(cpu_tmp2_i32);
5890 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5891 gen_op_mov_reg_T0(OT_WORD, R_EAX);
5892 break;
5893 default:
5894 goto illegal_op;
5896 break;
5897 case 0x3d: /* fucomip */
5898 if (s->cc_op != CC_OP_DYNAMIC)
5899 gen_op_set_cc_op(s->cc_op);
5900 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
5901 gen_helper_fucomi_ST0_FT0();
5902 gen_helper_fpop();
5903 s->cc_op = CC_OP_EFLAGS;
5904 break;
5905 case 0x3e: /* fcomip */
5906 if (s->cc_op != CC_OP_DYNAMIC)
5907 gen_op_set_cc_op(s->cc_op);
5908 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
5909 gen_helper_fcomi_ST0_FT0();
5910 gen_helper_fpop();
5911 s->cc_op = CC_OP_EFLAGS;
5912 break;
5913 case 0x10 ... 0x13: /* fcmovxx */
5914 case 0x18 ... 0x1b:
5916 int op1, l1;
5917 static const uint8_t fcmov_cc[8] = {
5918 (JCC_B << 1),
5919 (JCC_Z << 1),
5920 (JCC_BE << 1),
5921 (JCC_P << 1),
5923 op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
5924 l1 = gen_new_label();
5925 gen_jcc1(s, s->cc_op, op1, l1);
5926 gen_helper_fmov_ST0_STN(tcg_const_i32(opreg));
5927 gen_set_label(l1);
5929 break;
5930 default:
5931 goto illegal_op;
5934 break;
5935 /************************/
5936 /* string ops */
5938 case 0xa4: /* movsS */
5939 case 0xa5:
5940 if ((b & 1) == 0)
5941 ot = OT_BYTE;
5942 else
5943 ot = dflag + OT_WORD;
5945 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
5946 gen_repz_movs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
5947 } else {
5948 gen_movs(s, ot);
5950 break;
5952 case 0xaa: /* stosS */
5953 case 0xab:
5954 if ((b & 1) == 0)
5955 ot = OT_BYTE;
5956 else
5957 ot = dflag + OT_WORD;
5959 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
5960 gen_repz_stos(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
5961 } else {
5962 gen_stos(s, ot);
5964 break;
5965 case 0xac: /* lodsS */
5966 case 0xad:
5967 if ((b & 1) == 0)
5968 ot = OT_BYTE;
5969 else
5970 ot = dflag + OT_WORD;
5971 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
5972 gen_repz_lods(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
5973 } else {
5974 gen_lods(s, ot);
5976 break;
5977 case 0xae: /* scasS */
5978 case 0xaf:
5979 if ((b & 1) == 0)
5980 ot = OT_BYTE;
5981 else
5982 ot = dflag + OT_WORD;
5983 if (prefixes & PREFIX_REPNZ) {
5984 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
5985 } else if (prefixes & PREFIX_REPZ) {
5986 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
5987 } else {
5988 gen_scas(s, ot);
5989 s->cc_op = CC_OP_SUBB + ot;
5991 break;
5993 case 0xa6: /* cmpsS */
5994 case 0xa7:
5995 if ((b & 1) == 0)
5996 ot = OT_BYTE;
5997 else
5998 ot = dflag + OT_WORD;
5999 if (prefixes & PREFIX_REPNZ) {
6000 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6001 } else if (prefixes & PREFIX_REPZ) {
6002 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6003 } else {
6004 gen_cmps(s, ot);
6005 s->cc_op = CC_OP_SUBB + ot;
6007 break;
6008 case 0x6c: /* insS */
6009 case 0x6d:
6010 if ((b & 1) == 0)
6011 ot = OT_BYTE;
6012 else
6013 ot = dflag ? OT_LONG : OT_WORD;
6014 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
6015 gen_op_andl_T0_ffff();
6016 gen_check_io(s, ot, pc_start - s->cs_base,
6017 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4);
6018 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6019 gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6020 } else {
6021 gen_ins(s, ot);
6022 if (use_icount) {
6023 gen_jmp(s, s->pc - s->cs_base);
6026 break;
6027 case 0x6e: /* outsS */
6028 case 0x6f:
6029 if ((b & 1) == 0)
6030 ot = OT_BYTE;
6031 else
6032 ot = dflag ? OT_LONG : OT_WORD;
6033 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
6034 gen_op_andl_T0_ffff();
6035 gen_check_io(s, ot, pc_start - s->cs_base,
6036 svm_is_rep(prefixes) | 4);
6037 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6038 gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6039 } else {
6040 gen_outs(s, ot);
6041 if (use_icount) {
6042 gen_jmp(s, s->pc - s->cs_base);
6045 break;
6047 /************************/
6048 /* port I/O */
6050 case 0xe4:
6051 case 0xe5:
6052 if ((b & 1) == 0)
6053 ot = OT_BYTE;
6054 else
6055 ot = dflag ? OT_LONG : OT_WORD;
6056 val = ldub_code(s->pc++);
6057 gen_op_movl_T0_im(val);
6058 gen_check_io(s, ot, pc_start - s->cs_base,
6059 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
6060 if (use_icount)
6061 gen_io_start();
6062 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6063 gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
6064 gen_op_mov_reg_T1(ot, R_EAX);
6065 if (use_icount) {
6066 gen_io_end();
6067 gen_jmp(s, s->pc - s->cs_base);
6069 break;
6070 case 0xe6:
6071 case 0xe7:
6072 if ((b & 1) == 0)
6073 ot = OT_BYTE;
6074 else
6075 ot = dflag ? OT_LONG : OT_WORD;
6076 val = ldub_code(s->pc++);
6077 gen_op_movl_T0_im(val);
6078 gen_check_io(s, ot, pc_start - s->cs_base,
6079 svm_is_rep(prefixes));
6080 gen_op_mov_TN_reg(ot, 1, R_EAX);
6082 if (use_icount)
6083 gen_io_start();
6084 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6085 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
6086 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
6087 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
6088 if (use_icount) {
6089 gen_io_end();
6090 gen_jmp(s, s->pc - s->cs_base);
6092 break;
6093 case 0xec:
6094 case 0xed:
6095 if ((b & 1) == 0)
6096 ot = OT_BYTE;
6097 else
6098 ot = dflag ? OT_LONG : OT_WORD;
6099 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
6100 gen_op_andl_T0_ffff();
6101 gen_check_io(s, ot, pc_start - s->cs_base,
6102 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
6103 if (use_icount)
6104 gen_io_start();
6105 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6106 gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
6107 gen_op_mov_reg_T1(ot, R_EAX);
6108 if (use_icount) {
6109 gen_io_end();
6110 gen_jmp(s, s->pc - s->cs_base);
6112 break;
6113 case 0xee:
6114 case 0xef:
6115 if ((b & 1) == 0)
6116 ot = OT_BYTE;
6117 else
6118 ot = dflag ? OT_LONG : OT_WORD;
6119 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
6120 gen_op_andl_T0_ffff();
6121 gen_check_io(s, ot, pc_start - s->cs_base,
6122 svm_is_rep(prefixes));
6123 gen_op_mov_TN_reg(ot, 1, R_EAX);
6125 if (use_icount)
6126 gen_io_start();
6127 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6128 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
6129 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
6130 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
6131 if (use_icount) {
6132 gen_io_end();
6133 gen_jmp(s, s->pc - s->cs_base);
6135 break;
6137 /************************/
6138 /* control */
6139 case 0xc2: /* ret im */
6140 val = ldsw_code(s->pc);
6141 s->pc += 2;
6142 gen_pop_T0(s);
6143 if (CODE64(s) && s->dflag)
6144 s->dflag = 2;
6145 gen_stack_update(s, val + (2 << s->dflag));
6146 if (s->dflag == 0)
6147 gen_op_andl_T0_ffff();
6148 gen_op_jmp_T0();
6149 gen_eob(s);
6150 break;
6151 case 0xc3: /* ret */
6152 gen_pop_T0(s);
6153 gen_pop_update(s);
6154 if (s->dflag == 0)
6155 gen_op_andl_T0_ffff();
6156 gen_op_jmp_T0();
6157 gen_eob(s);
6158 break;
6159 case 0xca: /* lret im */
6160 val = ldsw_code(s->pc);
6161 s->pc += 2;
6162 do_lret:
6163 if (s->pe && !s->vm86) {
6164 if (s->cc_op != CC_OP_DYNAMIC)
6165 gen_op_set_cc_op(s->cc_op);
6166 gen_jmp_im(pc_start - s->cs_base);
6167 gen_helper_lret_protected(tcg_const_i32(s->dflag),
6168 tcg_const_i32(val));
6169 } else {
6170 gen_stack_A0(s);
6171 /* pop offset */
6172 gen_op_ld_T0_A0(1 + s->dflag + s->mem_index);
6173 if (s->dflag == 0)
6174 gen_op_andl_T0_ffff();
6175 /* NOTE: keeping EIP updated is not a problem in case of
6176 exception */
6177 gen_op_jmp_T0();
6178 /* pop selector */
6179 gen_op_addl_A0_im(2 << s->dflag);
6180 gen_op_ld_T0_A0(1 + s->dflag + s->mem_index);
6181 gen_op_movl_seg_T0_vm(R_CS);
6182 /* add stack offset */
6183 gen_stack_update(s, val + (4 << s->dflag));
6185 gen_eob(s);
6186 break;
6187 case 0xcb: /* lret */
6188 val = 0;
6189 goto do_lret;
6190 case 0xcf: /* iret */
6191 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET);
6192 if (!s->pe) {
6193 /* real mode */
6194 gen_helper_iret_real(tcg_const_i32(s->dflag));
6195 s->cc_op = CC_OP_EFLAGS;
6196 } else if (s->vm86) {
6197 if (s->iopl != 3) {
6198 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6199 } else {
6200 gen_helper_iret_real(tcg_const_i32(s->dflag));
6201 s->cc_op = CC_OP_EFLAGS;
6203 } else {
6204 if (s->cc_op != CC_OP_DYNAMIC)
6205 gen_op_set_cc_op(s->cc_op);
6206 gen_jmp_im(pc_start - s->cs_base);
6207 gen_helper_iret_protected(tcg_const_i32(s->dflag),
6208 tcg_const_i32(s->pc - s->cs_base));
6209 s->cc_op = CC_OP_EFLAGS;
6211 gen_eob(s);
6212 break;
6213 case 0xe8: /* call im */
6215 if (dflag)
6216 tval = (int32_t)insn_get(s, OT_LONG);
6217 else
6218 tval = (int16_t)insn_get(s, OT_WORD);
6219 next_eip = s->pc - s->cs_base;
6220 tval += next_eip;
6221 if (s->dflag == 0)
6222 tval &= 0xffff;
6223 gen_movtl_T0_im(next_eip);
6224 gen_push_T0(s);
6225 gen_jmp(s, tval);
6227 break;
6228 case 0x9a: /* lcall im */
6230 unsigned int selector, offset;
6232 if (CODE64(s))
6233 goto illegal_op;
6234 ot = dflag ? OT_LONG : OT_WORD;
6235 offset = insn_get(s, ot);
6236 selector = insn_get(s, OT_WORD);
6238 gen_op_movl_T0_im(selector);
6239 gen_op_movl_T1_imu(offset);
6241 goto do_lcall;
6242 case 0xe9: /* jmp im */
6243 if (dflag)
6244 tval = (int32_t)insn_get(s, OT_LONG);
6245 else
6246 tval = (int16_t)insn_get(s, OT_WORD);
6247 tval += s->pc - s->cs_base;
6248 if (s->dflag == 0)
6249 tval &= 0xffff;
6250 else if(!CODE64(s))
6251 tval &= 0xffffffff;
6252 gen_jmp(s, tval);
6253 break;
6254 case 0xea: /* ljmp im */
6256 unsigned int selector, offset;
6258 if (CODE64(s))
6259 goto illegal_op;
6260 ot = dflag ? OT_LONG : OT_WORD;
6261 offset = insn_get(s, ot);
6262 selector = insn_get(s, OT_WORD);
6264 gen_op_movl_T0_im(selector);
6265 gen_op_movl_T1_imu(offset);
6267 goto do_ljmp;
6268 case 0xeb: /* jmp Jb */
6269 tval = (int8_t)insn_get(s, OT_BYTE);
6270 tval += s->pc - s->cs_base;
6271 if (s->dflag == 0)
6272 tval &= 0xffff;
6273 gen_jmp(s, tval);
6274 break;
6275 case 0x70 ... 0x7f: /* jcc Jb */
6276 tval = (int8_t)insn_get(s, OT_BYTE);
6277 goto do_jcc;
6278 case 0x180 ... 0x18f: /* jcc Jv */
6279 if (dflag) {
6280 tval = (int32_t)insn_get(s, OT_LONG);
6281 } else {
6282 tval = (int16_t)insn_get(s, OT_WORD);
6284 do_jcc:
6285 next_eip = s->pc - s->cs_base;
6286 tval += next_eip;
6287 if (s->dflag == 0)
6288 tval &= 0xffff;
6289 gen_jcc(s, b, tval, next_eip);
6290 break;
6292 case 0x190 ... 0x19f: /* setcc Gv */
6293 modrm = ldub_code(s->pc++);
6294 gen_setcc(s, b);
6295 gen_ldst_modrm(s, modrm, OT_BYTE, OR_TMP0, 1);
6296 break;
6297 case 0x140 ... 0x14f: /* cmov Gv, Ev */
6299 int l1;
6300 TCGv t0;
6302 ot = dflag + OT_WORD;
6303 modrm = ldub_code(s->pc++);
6304 reg = ((modrm >> 3) & 7) | rex_r;
6305 mod = (modrm >> 6) & 3;
6306 t0 = tcg_temp_local_new();
6307 if (mod != 3) {
6308 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6309 gen_op_ld_v(ot + s->mem_index, t0, cpu_A0);
6310 } else {
6311 rm = (modrm & 7) | REX_B(s);
6312 gen_op_mov_v_reg(ot, t0, rm);
6314 #ifdef TARGET_X86_64
6315 if (ot == OT_LONG) {
6316 /* XXX: specific Intel behaviour ? */
6317 l1 = gen_new_label();
6318 gen_jcc1(s, s->cc_op, b ^ 1, l1);
6319 tcg_gen_st32_tl(t0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET);
6320 gen_set_label(l1);
6321 tcg_gen_movi_tl(cpu_tmp0, 0);
6322 tcg_gen_st32_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]) + REG_LH_OFFSET);
6323 } else
6324 #endif
6326 l1 = gen_new_label();
6327 gen_jcc1(s, s->cc_op, b ^ 1, l1);
6328 gen_op_mov_reg_v(ot, reg, t0);
6329 gen_set_label(l1);
6331 tcg_temp_free(t0);
6333 break;
6335 /************************/
6336 /* flags */
6337 case 0x9c: /* pushf */
6338 gen_svm_check_intercept(s, pc_start, SVM_EXIT_PUSHF);
6339 if (s->vm86 && s->iopl != 3) {
6340 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6341 } else {
6342 if (s->cc_op != CC_OP_DYNAMIC)
6343 gen_op_set_cc_op(s->cc_op);
6344 gen_helper_read_eflags(cpu_T[0]);
6345 gen_push_T0(s);
6347 break;
6348 case 0x9d: /* popf */
6349 gen_svm_check_intercept(s, pc_start, SVM_EXIT_POPF);
6350 if (s->vm86 && s->iopl != 3) {
6351 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6352 } else {
6353 gen_pop_T0(s);
6354 if (s->cpl == 0) {
6355 if (s->dflag) {
6356 gen_helper_write_eflags(cpu_T[0],
6357 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK | IOPL_MASK)));
6358 } else {
6359 gen_helper_write_eflags(cpu_T[0],
6360 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK | IOPL_MASK) & 0xffff));
6362 } else {
6363 if (s->cpl <= s->iopl) {
6364 if (s->dflag) {
6365 gen_helper_write_eflags(cpu_T[0],
6366 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK)));
6367 } else {
6368 gen_helper_write_eflags(cpu_T[0],
6369 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK) & 0xffff));
6371 } else {
6372 if (s->dflag) {
6373 gen_helper_write_eflags(cpu_T[0],
6374 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK)));
6375 } else {
6376 gen_helper_write_eflags(cpu_T[0],
6377 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff));
6381 gen_pop_update(s);
6382 s->cc_op = CC_OP_EFLAGS;
6383 /* abort translation because TF flag may change */
6384 gen_jmp_im(s->pc - s->cs_base);
6385 gen_eob(s);
6387 break;
6388 case 0x9e: /* sahf */
6389 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
6390 goto illegal_op;
6391 gen_op_mov_TN_reg(OT_BYTE, 0, R_AH);
6392 if (s->cc_op != CC_OP_DYNAMIC)
6393 gen_op_set_cc_op(s->cc_op);
6394 gen_compute_eflags(cpu_cc_src);
6395 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O);
6396 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], CC_S | CC_Z | CC_A | CC_P | CC_C);
6397 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T[0]);
6398 s->cc_op = CC_OP_EFLAGS;
6399 break;
6400 case 0x9f: /* lahf */
6401 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
6402 goto illegal_op;
6403 if (s->cc_op != CC_OP_DYNAMIC)
6404 gen_op_set_cc_op(s->cc_op);
6405 gen_compute_eflags(cpu_T[0]);
6406 /* Note: gen_compute_eflags() only gives the condition codes */
6407 tcg_gen_ori_tl(cpu_T[0], cpu_T[0], 0x02);
6408 gen_op_mov_reg_T0(OT_BYTE, R_AH);
6409 break;
6410 case 0xf5: /* cmc */
6411 if (s->cc_op != CC_OP_DYNAMIC)
6412 gen_op_set_cc_op(s->cc_op);
6413 gen_compute_eflags(cpu_cc_src);
6414 tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C);
6415 s->cc_op = CC_OP_EFLAGS;
6416 break;
6417 case 0xf8: /* clc */
6418 if (s->cc_op != CC_OP_DYNAMIC)
6419 gen_op_set_cc_op(s->cc_op);
6420 gen_compute_eflags(cpu_cc_src);
6421 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C);
6422 s->cc_op = CC_OP_EFLAGS;
6423 break;
6424 case 0xf9: /* stc */
6425 if (s->cc_op != CC_OP_DYNAMIC)
6426 gen_op_set_cc_op(s->cc_op);
6427 gen_compute_eflags(cpu_cc_src);
6428 tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C);
6429 s->cc_op = CC_OP_EFLAGS;
6430 break;
6431 case 0xfc: /* cld */
6432 tcg_gen_movi_i32(cpu_tmp2_i32, 1);
6433 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUState, df));
6434 break;
6435 case 0xfd: /* std */
6436 tcg_gen_movi_i32(cpu_tmp2_i32, -1);
6437 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUState, df));
6438 break;
6440 /************************/
6441 /* bit operations */
6442 case 0x1ba: /* bt/bts/btr/btc Gv, im */
6443 ot = dflag + OT_WORD;
6444 modrm = ldub_code(s->pc++);
6445 op = (modrm >> 3) & 7;
6446 mod = (modrm >> 6) & 3;
6447 rm = (modrm & 7) | REX_B(s);
6448 if (mod != 3) {
6449 s->rip_offset = 1;
6450 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6451 gen_op_ld_T0_A0(ot + s->mem_index);
6452 } else {
6453 gen_op_mov_TN_reg(ot, 0, rm);
6455 /* load shift */
6456 val = ldub_code(s->pc++);
6457 gen_op_movl_T1_im(val);
6458 if (op < 4)
6459 goto illegal_op;
6460 op -= 4;
6461 goto bt_op;
6462 case 0x1a3: /* bt Gv, Ev */
6463 op = 0;
6464 goto do_btx;
6465 case 0x1ab: /* bts */
6466 op = 1;
6467 goto do_btx;
6468 case 0x1b3: /* btr */
6469 op = 2;
6470 goto do_btx;
6471 case 0x1bb: /* btc */
6472 op = 3;
6473 do_btx:
6474 ot = dflag + OT_WORD;
6475 modrm = ldub_code(s->pc++);
6476 reg = ((modrm >> 3) & 7) | rex_r;
6477 mod = (modrm >> 6) & 3;
6478 rm = (modrm & 7) | REX_B(s);
6479 gen_op_mov_TN_reg(OT_LONG, 1, reg);
6480 if (mod != 3) {
6481 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6482 /* specific case: we need to add a displacement */
6483 gen_exts(ot, cpu_T[1]);
6484 tcg_gen_sari_tl(cpu_tmp0, cpu_T[1], 3 + ot);
6485 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, ot);
6486 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
6487 gen_op_ld_T0_A0(ot + s->mem_index);
6488 } else {
6489 gen_op_mov_TN_reg(ot, 0, rm);
6491 bt_op:
6492 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], (1 << (3 + ot)) - 1);
6493 switch(op) {
6494 case 0:
6495 tcg_gen_shr_tl(cpu_cc_src, cpu_T[0], cpu_T[1]);
6496 tcg_gen_movi_tl(cpu_cc_dst, 0);
6497 break;
6498 case 1:
6499 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6500 tcg_gen_movi_tl(cpu_tmp0, 1);
6501 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6502 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6503 break;
6504 case 2:
6505 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6506 tcg_gen_movi_tl(cpu_tmp0, 1);
6507 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6508 tcg_gen_not_tl(cpu_tmp0, cpu_tmp0);
6509 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6510 break;
6511 default:
6512 case 3:
6513 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6514 tcg_gen_movi_tl(cpu_tmp0, 1);
6515 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6516 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6517 break;
6519 s->cc_op = CC_OP_SARB + ot;
6520 if (op != 0) {
6521 if (mod != 3)
6522 gen_op_st_T0_A0(ot + s->mem_index);
6523 else
6524 gen_op_mov_reg_T0(ot, rm);
6525 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
6526 tcg_gen_movi_tl(cpu_cc_dst, 0);
6528 break;
6529 case 0x1bc: /* bsf */
6530 case 0x1bd: /* bsr */
6532 int label1;
6533 TCGv t0;
6535 ot = dflag + OT_WORD;
6536 modrm = ldub_code(s->pc++);
6537 reg = ((modrm >> 3) & 7) | rex_r;
6538 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
6539 gen_extu(ot, cpu_T[0]);
6540 label1 = gen_new_label();
6541 tcg_gen_movi_tl(cpu_cc_dst, 0);
6542 t0 = tcg_temp_local_new();
6543 tcg_gen_mov_tl(t0, cpu_T[0]);
6544 tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, label1);
6545 if (b & 1) {
6546 gen_helper_bsr(cpu_T[0], t0);
6547 } else {
6548 gen_helper_bsf(cpu_T[0], t0);
6550 gen_op_mov_reg_T0(ot, reg);
6551 tcg_gen_movi_tl(cpu_cc_dst, 1);
6552 gen_set_label(label1);
6553 tcg_gen_discard_tl(cpu_cc_src);
6554 s->cc_op = CC_OP_LOGICB + ot;
6555 tcg_temp_free(t0);
6557 break;
6558 /************************/
6559 /* bcd */
6560 case 0x27: /* daa */
6561 if (CODE64(s))
6562 goto illegal_op;
6563 if (s->cc_op != CC_OP_DYNAMIC)
6564 gen_op_set_cc_op(s->cc_op);
6565 gen_helper_daa();
6566 s->cc_op = CC_OP_EFLAGS;
6567 break;
6568 case 0x2f: /* das */
6569 if (CODE64(s))
6570 goto illegal_op;
6571 if (s->cc_op != CC_OP_DYNAMIC)
6572 gen_op_set_cc_op(s->cc_op);
6573 gen_helper_das();
6574 s->cc_op = CC_OP_EFLAGS;
6575 break;
6576 case 0x37: /* aaa */
6577 if (CODE64(s))
6578 goto illegal_op;
6579 if (s->cc_op != CC_OP_DYNAMIC)
6580 gen_op_set_cc_op(s->cc_op);
6581 gen_helper_aaa();
6582 s->cc_op = CC_OP_EFLAGS;
6583 break;
6584 case 0x3f: /* aas */
6585 if (CODE64(s))
6586 goto illegal_op;
6587 if (s->cc_op != CC_OP_DYNAMIC)
6588 gen_op_set_cc_op(s->cc_op);
6589 gen_helper_aas();
6590 s->cc_op = CC_OP_EFLAGS;
6591 break;
6592 case 0xd4: /* aam */
6593 if (CODE64(s))
6594 goto illegal_op;
6595 val = ldub_code(s->pc++);
6596 if (val == 0) {
6597 gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base);
6598 } else {
6599 gen_helper_aam(tcg_const_i32(val));
6600 s->cc_op = CC_OP_LOGICB;
6602 break;
6603 case 0xd5: /* aad */
6604 if (CODE64(s))
6605 goto illegal_op;
6606 val = ldub_code(s->pc++);
6607 gen_helper_aad(tcg_const_i32(val));
6608 s->cc_op = CC_OP_LOGICB;
6609 break;
6610 /************************/
6611 /* misc */
6612 case 0x90: /* nop */
6613 /* XXX: xchg + rex handling */
6614 /* XXX: correct lock test for all insn */
6615 if (prefixes & PREFIX_LOCK)
6616 goto illegal_op;
6617 if (prefixes & PREFIX_REPZ) {
6618 gen_svm_check_intercept(s, pc_start, SVM_EXIT_PAUSE);
6620 break;
6621 case 0x9b: /* fwait */
6622 if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) ==
6623 (HF_MP_MASK | HF_TS_MASK)) {
6624 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
6625 } else {
6626 if (s->cc_op != CC_OP_DYNAMIC)
6627 gen_op_set_cc_op(s->cc_op);
6628 gen_jmp_im(pc_start - s->cs_base);
6629 gen_helper_fwait();
6631 break;
6632 case 0xcc: /* int3 */
6633 gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base);
6634 break;
6635 case 0xcd: /* int N */
6636 val = ldub_code(s->pc++);
6637 if (s->vm86 && s->iopl != 3) {
6638 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6639 } else {
6640 gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base);
6642 break;
6643 case 0xce: /* into */
6644 if (CODE64(s))
6645 goto illegal_op;
6646 if (s->cc_op != CC_OP_DYNAMIC)
6647 gen_op_set_cc_op(s->cc_op);
6648 gen_jmp_im(pc_start - s->cs_base);
6649 gen_helper_into(tcg_const_i32(s->pc - pc_start));
6650 break;
6651 #ifdef WANT_ICEBP
6652 case 0xf1: /* icebp (undocumented, exits to external debugger) */
6653 gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP);
6654 #if 1
6655 gen_debug(s, pc_start - s->cs_base);
6656 #else
6657 /* start debug */
6658 tb_flush(cpu_single_env);
6659 cpu_set_log(CPU_LOG_INT | CPU_LOG_TB_IN_ASM);
6660 #endif
6661 break;
6662 #endif
6663 case 0xfa: /* cli */
6664 if (!s->vm86) {
6665 if (s->cpl <= s->iopl) {
6666 gen_helper_cli();
6667 } else {
6668 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6670 } else {
6671 if (s->iopl == 3) {
6672 gen_helper_cli();
6673 } else {
6674 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6677 break;
6678 case 0xfb: /* sti */
6679 if (!s->vm86) {
6680 if (s->cpl <= s->iopl) {
6681 gen_sti:
6682 gen_helper_sti();
6683 /* interruptions are enabled only the first insn after sti */
6684 /* If several instructions disable interrupts, only the
6685 _first_ does it */
6686 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
6687 gen_helper_set_inhibit_irq();
6688 /* give a chance to handle pending irqs */
6689 gen_jmp_im(s->pc - s->cs_base);
6690 gen_eob(s);
6691 } else {
6692 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6694 } else {
6695 if (s->iopl == 3) {
6696 goto gen_sti;
6697 } else {
6698 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6701 break;
6702 case 0x62: /* bound */
6703 if (CODE64(s))
6704 goto illegal_op;
6705 ot = dflag ? OT_LONG : OT_WORD;
6706 modrm = ldub_code(s->pc++);
6707 reg = (modrm >> 3) & 7;
6708 mod = (modrm >> 6) & 3;
6709 if (mod == 3)
6710 goto illegal_op;
6711 gen_op_mov_TN_reg(ot, 0, reg);
6712 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6713 gen_jmp_im(pc_start - s->cs_base);
6714 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6715 if (ot == OT_WORD)
6716 gen_helper_boundw(cpu_A0, cpu_tmp2_i32);
6717 else
6718 gen_helper_boundl(cpu_A0, cpu_tmp2_i32);
6719 break;
6720 case 0x1c8 ... 0x1cf: /* bswap reg */
6721 reg = (b & 7) | REX_B(s);
6722 #ifdef TARGET_X86_64
6723 if (dflag == 2) {
6724 gen_op_mov_TN_reg(OT_QUAD, 0, reg);
6725 tcg_gen_bswap64_i64(cpu_T[0], cpu_T[0]);
6726 gen_op_mov_reg_T0(OT_QUAD, reg);
6727 } else
6728 #endif
6730 gen_op_mov_TN_reg(OT_LONG, 0, reg);
6731 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
6732 tcg_gen_bswap32_tl(cpu_T[0], cpu_T[0]);
6733 gen_op_mov_reg_T0(OT_LONG, reg);
6735 break;
6736 case 0xd6: /* salc */
6737 if (CODE64(s))
6738 goto illegal_op;
6739 if (s->cc_op != CC_OP_DYNAMIC)
6740 gen_op_set_cc_op(s->cc_op);
6741 gen_compute_eflags_c(cpu_T[0]);
6742 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
6743 gen_op_mov_reg_T0(OT_BYTE, R_EAX);
6744 break;
6745 case 0xe0: /* loopnz */
6746 case 0xe1: /* loopz */
6747 case 0xe2: /* loop */
6748 case 0xe3: /* jecxz */
6750 int l1, l2, l3;
6752 tval = (int8_t)insn_get(s, OT_BYTE);
6753 next_eip = s->pc - s->cs_base;
6754 tval += next_eip;
6755 if (s->dflag == 0)
6756 tval &= 0xffff;
6758 l1 = gen_new_label();
6759 l2 = gen_new_label();
6760 l3 = gen_new_label();
6761 b &= 3;
6762 switch(b) {
6763 case 0: /* loopnz */
6764 case 1: /* loopz */
6765 if (s->cc_op != CC_OP_DYNAMIC)
6766 gen_op_set_cc_op(s->cc_op);
6767 gen_op_add_reg_im(s->aflag, R_ECX, -1);
6768 gen_op_jz_ecx(s->aflag, l3);
6769 gen_compute_eflags(cpu_tmp0);
6770 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, CC_Z);
6771 if (b == 0) {
6772 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, l1);
6773 } else {
6774 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, l1);
6776 break;
6777 case 2: /* loop */
6778 gen_op_add_reg_im(s->aflag, R_ECX, -1);
6779 gen_op_jnz_ecx(s->aflag, l1);
6780 break;
6781 default:
6782 case 3: /* jcxz */
6783 gen_op_jz_ecx(s->aflag, l1);
6784 break;
6787 gen_set_label(l3);
6788 gen_jmp_im(next_eip);
6789 tcg_gen_br(l2);
6791 gen_set_label(l1);
6792 gen_jmp_im(tval);
6793 gen_set_label(l2);
6794 gen_eob(s);
6796 break;
6797 case 0x130: /* wrmsr */
6798 case 0x132: /* rdmsr */
6799 if (s->cpl != 0) {
6800 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6801 } else {
6802 if (s->cc_op != CC_OP_DYNAMIC)
6803 gen_op_set_cc_op(s->cc_op);
6804 gen_jmp_im(pc_start - s->cs_base);
6805 if (b & 2) {
6806 gen_helper_rdmsr();
6807 } else {
6808 gen_helper_wrmsr();
6811 break;
6812 case 0x131: /* rdtsc */
6813 if (s->cc_op != CC_OP_DYNAMIC)
6814 gen_op_set_cc_op(s->cc_op);
6815 gen_jmp_im(pc_start - s->cs_base);
6816 if (use_icount)
6817 gen_io_start();
6818 gen_helper_rdtsc();
6819 if (use_icount) {
6820 gen_io_end();
6821 gen_jmp(s, s->pc - s->cs_base);
6823 break;
6824 case 0x133: /* rdpmc */
6825 if (s->cc_op != CC_OP_DYNAMIC)
6826 gen_op_set_cc_op(s->cc_op);
6827 gen_jmp_im(pc_start - s->cs_base);
6828 gen_helper_rdpmc();
6829 break;
6830 case 0x134: /* sysenter */
6831 /* For Intel SYSENTER is valid on 64-bit */
6832 if (CODE64(s) && cpu_single_env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
6833 goto illegal_op;
6834 if (!s->pe) {
6835 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6836 } else {
6837 if (s->cc_op != CC_OP_DYNAMIC) {
6838 gen_op_set_cc_op(s->cc_op);
6839 s->cc_op = CC_OP_DYNAMIC;
6841 gen_jmp_im(pc_start - s->cs_base);
6842 gen_helper_sysenter();
6843 gen_eob(s);
6845 break;
6846 case 0x135: /* sysexit */
6847 /* For Intel SYSEXIT is valid on 64-bit */
6848 if (CODE64(s) && cpu_single_env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
6849 goto illegal_op;
6850 if (!s->pe) {
6851 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6852 } else {
6853 if (s->cc_op != CC_OP_DYNAMIC) {
6854 gen_op_set_cc_op(s->cc_op);
6855 s->cc_op = CC_OP_DYNAMIC;
6857 gen_jmp_im(pc_start - s->cs_base);
6858 gen_helper_sysexit(tcg_const_i32(dflag));
6859 gen_eob(s);
6861 break;
6862 #ifdef TARGET_X86_64
6863 case 0x105: /* syscall */
6864 /* XXX: is it usable in real mode ? */
6865 if (s->cc_op != CC_OP_DYNAMIC) {
6866 gen_op_set_cc_op(s->cc_op);
6867 s->cc_op = CC_OP_DYNAMIC;
6869 gen_jmp_im(pc_start - s->cs_base);
6870 gen_helper_syscall(tcg_const_i32(s->pc - pc_start));
6871 gen_eob(s);
6872 break;
6873 case 0x107: /* sysret */
6874 if (!s->pe) {
6875 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6876 } else {
6877 if (s->cc_op != CC_OP_DYNAMIC) {
6878 gen_op_set_cc_op(s->cc_op);
6879 s->cc_op = CC_OP_DYNAMIC;
6881 gen_jmp_im(pc_start - s->cs_base);
6882 gen_helper_sysret(tcg_const_i32(s->dflag));
6883 /* condition codes are modified only in long mode */
6884 if (s->lma)
6885 s->cc_op = CC_OP_EFLAGS;
6886 gen_eob(s);
6888 break;
6889 #endif
6890 case 0x1a2: /* cpuid */
6891 if (s->cc_op != CC_OP_DYNAMIC)
6892 gen_op_set_cc_op(s->cc_op);
6893 gen_jmp_im(pc_start - s->cs_base);
6894 gen_helper_cpuid();
6895 break;
6896 case 0xf4: /* hlt */
6897 if (s->cpl != 0) {
6898 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6899 } else {
6900 if (s->cc_op != CC_OP_DYNAMIC)
6901 gen_op_set_cc_op(s->cc_op);
6902 gen_jmp_im(pc_start - s->cs_base);
6903 gen_helper_hlt(tcg_const_i32(s->pc - pc_start));
6904 s->is_jmp = 3;
6906 break;
6907 case 0x100:
6908 modrm = ldub_code(s->pc++);
6909 mod = (modrm >> 6) & 3;
6910 op = (modrm >> 3) & 7;
6911 switch(op) {
6912 case 0: /* sldt */
6913 if (!s->pe || s->vm86)
6914 goto illegal_op;
6915 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ);
6916 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,ldt.selector));
6917 ot = OT_WORD;
6918 if (mod == 3)
6919 ot += s->dflag;
6920 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1);
6921 break;
6922 case 2: /* lldt */
6923 if (!s->pe || s->vm86)
6924 goto illegal_op;
6925 if (s->cpl != 0) {
6926 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6927 } else {
6928 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE);
6929 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
6930 gen_jmp_im(pc_start - s->cs_base);
6931 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6932 gen_helper_lldt(cpu_tmp2_i32);
6934 break;
6935 case 1: /* str */
6936 if (!s->pe || s->vm86)
6937 goto illegal_op;
6938 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ);
6939 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,tr.selector));
6940 ot = OT_WORD;
6941 if (mod == 3)
6942 ot += s->dflag;
6943 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1);
6944 break;
6945 case 3: /* ltr */
6946 if (!s->pe || s->vm86)
6947 goto illegal_op;
6948 if (s->cpl != 0) {
6949 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6950 } else {
6951 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE);
6952 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
6953 gen_jmp_im(pc_start - s->cs_base);
6954 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6955 gen_helper_ltr(cpu_tmp2_i32);
6957 break;
6958 case 4: /* verr */
6959 case 5: /* verw */
6960 if (!s->pe || s->vm86)
6961 goto illegal_op;
6962 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
6963 if (s->cc_op != CC_OP_DYNAMIC)
6964 gen_op_set_cc_op(s->cc_op);
6965 if (op == 4)
6966 gen_helper_verr(cpu_T[0]);
6967 else
6968 gen_helper_verw(cpu_T[0]);
6969 s->cc_op = CC_OP_EFLAGS;
6970 break;
6971 default:
6972 goto illegal_op;
6974 break;
6975 case 0x101:
6976 modrm = ldub_code(s->pc++);
6977 mod = (modrm >> 6) & 3;
6978 op = (modrm >> 3) & 7;
6979 rm = modrm & 7;
6980 switch(op) {
6981 case 0: /* sgdt */
6982 if (mod == 3)
6983 goto illegal_op;
6984 gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ);
6985 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6986 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.limit));
6987 gen_op_st_T0_A0(OT_WORD + s->mem_index);
6988 gen_add_A0_im(s, 2);
6989 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.base));
6990 if (!s->dflag)
6991 gen_op_andl_T0_im(0xffffff);
6992 gen_op_st_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
6993 break;
6994 case 1:
6995 if (mod == 3) {
6996 switch (rm) {
6997 case 0: /* monitor */
6998 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
6999 s->cpl != 0)
7000 goto illegal_op;
7001 if (s->cc_op != CC_OP_DYNAMIC)
7002 gen_op_set_cc_op(s->cc_op);
7003 gen_jmp_im(pc_start - s->cs_base);
7004 #ifdef TARGET_X86_64
7005 if (s->aflag == 2) {
7006 gen_op_movq_A0_reg(R_EAX);
7007 } else
7008 #endif
7010 gen_op_movl_A0_reg(R_EAX);
7011 if (s->aflag == 0)
7012 gen_op_andl_A0_ffff();
7014 gen_add_A0_ds_seg(s);
7015 gen_helper_monitor(cpu_A0);
7016 break;
7017 case 1: /* mwait */
7018 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
7019 s->cpl != 0)
7020 goto illegal_op;
7021 if (s->cc_op != CC_OP_DYNAMIC) {
7022 gen_op_set_cc_op(s->cc_op);
7023 s->cc_op = CC_OP_DYNAMIC;
7025 gen_jmp_im(pc_start - s->cs_base);
7026 gen_helper_mwait(tcg_const_i32(s->pc - pc_start));
7027 gen_eob(s);
7028 break;
7029 default:
7030 goto illegal_op;
7032 } else { /* sidt */
7033 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ);
7034 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7035 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.limit));
7036 gen_op_st_T0_A0(OT_WORD + s->mem_index);
7037 gen_add_A0_im(s, 2);
7038 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.base));
7039 if (!s->dflag)
7040 gen_op_andl_T0_im(0xffffff);
7041 gen_op_st_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
7043 break;
7044 case 2: /* lgdt */
7045 case 3: /* lidt */
7046 if (mod == 3) {
7047 if (s->cc_op != CC_OP_DYNAMIC)
7048 gen_op_set_cc_op(s->cc_op);
7049 gen_jmp_im(pc_start - s->cs_base);
7050 switch(rm) {
7051 case 0: /* VMRUN */
7052 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7053 goto illegal_op;
7054 if (s->cpl != 0) {
7055 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7056 break;
7057 } else {
7058 gen_helper_vmrun(tcg_const_i32(s->aflag),
7059 tcg_const_i32(s->pc - pc_start));
7060 tcg_gen_exit_tb(0);
7061 s->is_jmp = 3;
7063 break;
7064 case 1: /* VMMCALL */
7065 if (!(s->flags & HF_SVME_MASK))
7066 goto illegal_op;
7067 gen_helper_vmmcall();
7068 break;
7069 case 2: /* VMLOAD */
7070 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7071 goto illegal_op;
7072 if (s->cpl != 0) {
7073 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7074 break;
7075 } else {
7076 gen_helper_vmload(tcg_const_i32(s->aflag));
7078 break;
7079 case 3: /* VMSAVE */
7080 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7081 goto illegal_op;
7082 if (s->cpl != 0) {
7083 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7084 break;
7085 } else {
7086 gen_helper_vmsave(tcg_const_i32(s->aflag));
7088 break;
7089 case 4: /* STGI */
7090 if ((!(s->flags & HF_SVME_MASK) &&
7091 !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
7092 !s->pe)
7093 goto illegal_op;
7094 if (s->cpl != 0) {
7095 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7096 break;
7097 } else {
7098 gen_helper_stgi();
7100 break;
7101 case 5: /* CLGI */
7102 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7103 goto illegal_op;
7104 if (s->cpl != 0) {
7105 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7106 break;
7107 } else {
7108 gen_helper_clgi();
7110 break;
7111 case 6: /* SKINIT */
7112 if ((!(s->flags & HF_SVME_MASK) &&
7113 !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
7114 !s->pe)
7115 goto illegal_op;
7116 gen_helper_skinit();
7117 break;
7118 case 7: /* INVLPGA */
7119 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7120 goto illegal_op;
7121 if (s->cpl != 0) {
7122 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7123 break;
7124 } else {
7125 gen_helper_invlpga(tcg_const_i32(s->aflag));
7127 break;
7128 default:
7129 goto illegal_op;
7131 } else if (s->cpl != 0) {
7132 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7133 } else {
7134 gen_svm_check_intercept(s, pc_start,
7135 op==2 ? SVM_EXIT_GDTR_WRITE : SVM_EXIT_IDTR_WRITE);
7136 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7137 gen_op_ld_T1_A0(OT_WORD + s->mem_index);
7138 gen_add_A0_im(s, 2);
7139 gen_op_ld_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
7140 if (!s->dflag)
7141 gen_op_andl_T0_im(0xffffff);
7142 if (op == 2) {
7143 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,gdt.base));
7144 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,gdt.limit));
7145 } else {
7146 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,idt.base));
7147 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,idt.limit));
7150 break;
7151 case 4: /* smsw */
7152 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0);
7153 #if defined TARGET_X86_64 && defined WORDS_BIGENDIAN
7154 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]) + 4);
7155 #else
7156 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]));
7157 #endif
7158 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 1);
7159 break;
7160 case 6: /* lmsw */
7161 if (s->cpl != 0) {
7162 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7163 } else {
7164 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
7165 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
7166 gen_helper_lmsw(cpu_T[0]);
7167 gen_jmp_im(s->pc - s->cs_base);
7168 gen_eob(s);
7170 break;
7171 case 7: /* invlpg */
7172 if (s->cpl != 0) {
7173 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7174 } else {
7175 if (mod == 3) {
7176 #ifdef TARGET_X86_64
7177 if (CODE64(s) && rm == 0) {
7178 /* swapgs */
7179 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,segs[R_GS].base));
7180 tcg_gen_ld_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,kernelgsbase));
7181 tcg_gen_st_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,segs[R_GS].base));
7182 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,kernelgsbase));
7183 } else
7184 #endif
7186 goto illegal_op;
7188 } else {
7189 if (s->cc_op != CC_OP_DYNAMIC)
7190 gen_op_set_cc_op(s->cc_op);
7191 gen_jmp_im(pc_start - s->cs_base);
7192 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7193 gen_helper_invlpg(cpu_A0);
7194 gen_jmp_im(s->pc - s->cs_base);
7195 gen_eob(s);
7198 break;
7199 default:
7200 goto illegal_op;
7202 break;
7203 case 0x108: /* invd */
7204 case 0x109: /* wbinvd */
7205 if (s->cpl != 0) {
7206 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7207 } else {
7208 gen_svm_check_intercept(s, pc_start, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD);
7209 /* nothing to do */
7211 break;
7212 case 0x63: /* arpl or movslS (x86_64) */
7213 #ifdef TARGET_X86_64
7214 if (CODE64(s)) {
7215 int d_ot;
7216 /* d_ot is the size of destination */
7217 d_ot = dflag + OT_WORD;
7219 modrm = ldub_code(s->pc++);
7220 reg = ((modrm >> 3) & 7) | rex_r;
7221 mod = (modrm >> 6) & 3;
7222 rm = (modrm & 7) | REX_B(s);
7224 if (mod == 3) {
7225 gen_op_mov_TN_reg(OT_LONG, 0, rm);
7226 /* sign extend */
7227 if (d_ot == OT_QUAD)
7228 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
7229 gen_op_mov_reg_T0(d_ot, reg);
7230 } else {
7231 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7232 if (d_ot == OT_QUAD) {
7233 gen_op_lds_T0_A0(OT_LONG + s->mem_index);
7234 } else {
7235 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
7237 gen_op_mov_reg_T0(d_ot, reg);
7239 } else
7240 #endif
7242 int label1;
7243 TCGv t0, t1, t2;
7245 if (!s->pe || s->vm86)
7246 goto illegal_op;
7247 t0 = tcg_temp_local_new();
7248 t1 = tcg_temp_local_new();
7249 t2 = tcg_temp_local_new();
7250 ot = OT_WORD;
7251 modrm = ldub_code(s->pc++);
7252 reg = (modrm >> 3) & 7;
7253 mod = (modrm >> 6) & 3;
7254 rm = modrm & 7;
7255 if (mod != 3) {
7256 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7257 gen_op_ld_v(ot + s->mem_index, t0, cpu_A0);
7258 } else {
7259 gen_op_mov_v_reg(ot, t0, rm);
7261 gen_op_mov_v_reg(ot, t1, reg);
7262 tcg_gen_andi_tl(cpu_tmp0, t0, 3);
7263 tcg_gen_andi_tl(t1, t1, 3);
7264 tcg_gen_movi_tl(t2, 0);
7265 label1 = gen_new_label();
7266 tcg_gen_brcond_tl(TCG_COND_GE, cpu_tmp0, t1, label1);
7267 tcg_gen_andi_tl(t0, t0, ~3);
7268 tcg_gen_or_tl(t0, t0, t1);
7269 tcg_gen_movi_tl(t2, CC_Z);
7270 gen_set_label(label1);
7271 if (mod != 3) {
7272 gen_op_st_v(ot + s->mem_index, t0, cpu_A0);
7273 } else {
7274 gen_op_mov_reg_v(ot, rm, t0);
7276 if (s->cc_op != CC_OP_DYNAMIC)
7277 gen_op_set_cc_op(s->cc_op);
7278 gen_compute_eflags(cpu_cc_src);
7279 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z);
7280 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2);
7281 s->cc_op = CC_OP_EFLAGS;
7282 tcg_temp_free(t0);
7283 tcg_temp_free(t1);
7284 tcg_temp_free(t2);
7286 break;
7287 case 0x102: /* lar */
7288 case 0x103: /* lsl */
7290 int label1;
7291 TCGv t0;
7292 if (!s->pe || s->vm86)
7293 goto illegal_op;
7294 ot = dflag ? OT_LONG : OT_WORD;
7295 modrm = ldub_code(s->pc++);
7296 reg = ((modrm >> 3) & 7) | rex_r;
7297 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
7298 t0 = tcg_temp_local_new();
7299 if (s->cc_op != CC_OP_DYNAMIC)
7300 gen_op_set_cc_op(s->cc_op);
7301 if (b == 0x102)
7302 gen_helper_lar(t0, cpu_T[0]);
7303 else
7304 gen_helper_lsl(t0, cpu_T[0]);
7305 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_src, CC_Z);
7306 label1 = gen_new_label();
7307 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
7308 gen_op_mov_reg_v(ot, reg, t0);
7309 gen_set_label(label1);
7310 s->cc_op = CC_OP_EFLAGS;
7311 tcg_temp_free(t0);
7313 break;
7314 case 0x118:
7315 modrm = ldub_code(s->pc++);
7316 mod = (modrm >> 6) & 3;
7317 op = (modrm >> 3) & 7;
7318 switch(op) {
7319 case 0: /* prefetchnta */
7320 case 1: /* prefetchnt0 */
7321 case 2: /* prefetchnt0 */
7322 case 3: /* prefetchnt0 */
7323 if (mod == 3)
7324 goto illegal_op;
7325 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7326 /* nothing more to do */
7327 break;
7328 default: /* nop (multi byte) */
7329 gen_nop_modrm(s, modrm);
7330 break;
7332 break;
7333 case 0x119 ... 0x11f: /* nop (multi byte) */
7334 modrm = ldub_code(s->pc++);
7335 gen_nop_modrm(s, modrm);
7336 break;
7337 case 0x120: /* mov reg, crN */
7338 case 0x122: /* mov crN, reg */
7339 if (s->cpl != 0) {
7340 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7341 } else {
7342 modrm = ldub_code(s->pc++);
7343 if ((modrm & 0xc0) != 0xc0)
7344 goto illegal_op;
7345 rm = (modrm & 7) | REX_B(s);
7346 reg = ((modrm >> 3) & 7) | rex_r;
7347 if (CODE64(s))
7348 ot = OT_QUAD;
7349 else
7350 ot = OT_LONG;
7351 switch(reg) {
7352 case 0:
7353 case 2:
7354 case 3:
7355 case 4:
7356 case 8:
7357 if (s->cc_op != CC_OP_DYNAMIC)
7358 gen_op_set_cc_op(s->cc_op);
7359 gen_jmp_im(pc_start - s->cs_base);
7360 if (b & 2) {
7361 gen_op_mov_TN_reg(ot, 0, rm);
7362 gen_helper_write_crN(tcg_const_i32(reg), cpu_T[0]);
7363 gen_jmp_im(s->pc - s->cs_base);
7364 gen_eob(s);
7365 } else {
7366 gen_helper_read_crN(cpu_T[0], tcg_const_i32(reg));
7367 gen_op_mov_reg_T0(ot, rm);
7369 break;
7370 default:
7371 goto illegal_op;
7374 break;
7375 case 0x121: /* mov reg, drN */
7376 case 0x123: /* mov drN, reg */
7377 if (s->cpl != 0) {
7378 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7379 } else {
7380 modrm = ldub_code(s->pc++);
7381 if ((modrm & 0xc0) != 0xc0)
7382 goto illegal_op;
7383 rm = (modrm & 7) | REX_B(s);
7384 reg = ((modrm >> 3) & 7) | rex_r;
7385 if (CODE64(s))
7386 ot = OT_QUAD;
7387 else
7388 ot = OT_LONG;
7389 /* XXX: do it dynamically with CR4.DE bit */
7390 if (reg == 4 || reg == 5 || reg >= 8)
7391 goto illegal_op;
7392 if (b & 2) {
7393 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg);
7394 gen_op_mov_TN_reg(ot, 0, rm);
7395 gen_helper_movl_drN_T0(tcg_const_i32(reg), cpu_T[0]);
7396 gen_jmp_im(s->pc - s->cs_base);
7397 gen_eob(s);
7398 } else {
7399 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg);
7400 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,dr[reg]));
7401 gen_op_mov_reg_T0(ot, rm);
7404 break;
7405 case 0x106: /* clts */
7406 if (s->cpl != 0) {
7407 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7408 } else {
7409 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
7410 gen_helper_clts();
7411 /* abort block because static cpu state changed */
7412 gen_jmp_im(s->pc - s->cs_base);
7413 gen_eob(s);
7415 break;
7416 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
7417 case 0x1c3: /* MOVNTI reg, mem */
7418 if (!(s->cpuid_features & CPUID_SSE2))
7419 goto illegal_op;
7420 ot = s->dflag == 2 ? OT_QUAD : OT_LONG;
7421 modrm = ldub_code(s->pc++);
7422 mod = (modrm >> 6) & 3;
7423 if (mod == 3)
7424 goto illegal_op;
7425 reg = ((modrm >> 3) & 7) | rex_r;
7426 /* generate a generic store */
7427 gen_ldst_modrm(s, modrm, ot, reg, 1);
7428 break;
7429 case 0x1ae:
7430 modrm = ldub_code(s->pc++);
7431 mod = (modrm >> 6) & 3;
7432 op = (modrm >> 3) & 7;
7433 switch(op) {
7434 case 0: /* fxsave */
7435 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
7436 (s->flags & HF_EM_MASK))
7437 goto illegal_op;
7438 if (s->flags & HF_TS_MASK) {
7439 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7440 break;
7442 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7443 if (s->cc_op != CC_OP_DYNAMIC)
7444 gen_op_set_cc_op(s->cc_op);
7445 gen_jmp_im(pc_start - s->cs_base);
7446 gen_helper_fxsave(cpu_A0, tcg_const_i32((s->dflag == 2)));
7447 break;
7448 case 1: /* fxrstor */
7449 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
7450 (s->flags & HF_EM_MASK))
7451 goto illegal_op;
7452 if (s->flags & HF_TS_MASK) {
7453 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7454 break;
7456 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7457 if (s->cc_op != CC_OP_DYNAMIC)
7458 gen_op_set_cc_op(s->cc_op);
7459 gen_jmp_im(pc_start - s->cs_base);
7460 gen_helper_fxrstor(cpu_A0, tcg_const_i32((s->dflag == 2)));
7461 break;
7462 case 2: /* ldmxcsr */
7463 case 3: /* stmxcsr */
7464 if (s->flags & HF_TS_MASK) {
7465 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7466 break;
7468 if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK) ||
7469 mod == 3)
7470 goto illegal_op;
7471 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7472 if (op == 2) {
7473 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
7474 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, mxcsr));
7475 } else {
7476 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, mxcsr));
7477 gen_op_st_T0_A0(OT_LONG + s->mem_index);
7479 break;
7480 case 5: /* lfence */
7481 case 6: /* mfence */
7482 if ((modrm & 0xc7) != 0xc0 || !(s->cpuid_features & CPUID_SSE))
7483 goto illegal_op;
7484 break;
7485 case 7: /* sfence / clflush */
7486 if ((modrm & 0xc7) == 0xc0) {
7487 /* sfence */
7488 /* XXX: also check for cpuid_ext2_features & CPUID_EXT2_EMMX */
7489 if (!(s->cpuid_features & CPUID_SSE))
7490 goto illegal_op;
7491 } else {
7492 /* clflush */
7493 if (!(s->cpuid_features & CPUID_CLFLUSH))
7494 goto illegal_op;
7495 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7497 break;
7498 default:
7499 goto illegal_op;
7501 break;
7502 case 0x10d: /* 3DNow! prefetch(w) */
7503 modrm = ldub_code(s->pc++);
7504 mod = (modrm >> 6) & 3;
7505 if (mod == 3)
7506 goto illegal_op;
7507 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7508 /* ignore for now */
7509 break;
7510 case 0x1aa: /* rsm */
7511 gen_svm_check_intercept(s, pc_start, SVM_EXIT_RSM);
7512 if (!(s->flags & HF_SMM_MASK))
7513 goto illegal_op;
7514 if (s->cc_op != CC_OP_DYNAMIC) {
7515 gen_op_set_cc_op(s->cc_op);
7516 s->cc_op = CC_OP_DYNAMIC;
7518 gen_jmp_im(s->pc - s->cs_base);
7519 gen_helper_rsm();
7520 gen_eob(s);
7521 break;
7522 case 0x1b8: /* SSE4.2 popcnt */
7523 if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) !=
7524 PREFIX_REPZ)
7525 goto illegal_op;
7526 if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
7527 goto illegal_op;
7529 modrm = ldub_code(s->pc++);
7530 reg = ((modrm >> 3) & 7);
7532 if (s->prefix & PREFIX_DATA)
7533 ot = OT_WORD;
7534 else if (s->dflag != 2)
7535 ot = OT_LONG;
7536 else
7537 ot = OT_QUAD;
7539 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
7540 gen_helper_popcnt(cpu_T[0], cpu_T[0], tcg_const_i32(ot));
7541 gen_op_mov_reg_T0(ot, reg);
7543 s->cc_op = CC_OP_EFLAGS;
7544 break;
7545 case 0x10e ... 0x10f:
7546 /* 3DNow! instructions, ignore prefixes */
7547 s->prefix &= ~(PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA);
7548 case 0x110 ... 0x117:
7549 case 0x128 ... 0x12f:
7550 case 0x138 ... 0x13a:
7551 case 0x150 ... 0x177:
7552 case 0x17c ... 0x17f:
7553 case 0x1c2:
7554 case 0x1c4 ... 0x1c6:
7555 case 0x1d0 ... 0x1fe:
7556 gen_sse(s, b, pc_start, rex_r);
7557 break;
7558 default:
7559 goto illegal_op;
7561 /* lock generation */
7562 if (s->prefix & PREFIX_LOCK)
7563 gen_helper_unlock();
7564 return s->pc;
7565 illegal_op:
7566 if (s->prefix & PREFIX_LOCK)
7567 gen_helper_unlock();
7568 /* XXX: ensure that no lock was generated */
7569 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
7570 return s->pc;
7573 void optimize_flags_init(void)
7575 #if TCG_TARGET_REG_BITS == 32
7576 assert(sizeof(CCTable) == (1 << 3));
7577 #else
7578 assert(sizeof(CCTable) == (1 << 4));
7579 #endif
7580 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
7581 cpu_cc_op = tcg_global_mem_new_i32(TCG_AREG0,
7582 offsetof(CPUState, cc_op), "cc_op");
7583 cpu_cc_src = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, cc_src),
7584 "cc_src");
7585 cpu_cc_dst = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, cc_dst),
7586 "cc_dst");
7587 cpu_cc_tmp = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, cc_tmp),
7588 "cc_tmp");
7590 /* register helpers */
7591 #define GEN_HELPER 2
7592 #include "helper.h"
7595 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
7596 basic block 'tb'. If search_pc is TRUE, also generate PC
7597 information for each intermediate instruction. */
7598 static inline void gen_intermediate_code_internal(CPUState *env,
7599 TranslationBlock *tb,
7600 int search_pc)
7602 DisasContext dc1, *dc = &dc1;
7603 target_ulong pc_ptr;
7604 uint16_t *gen_opc_end;
7605 CPUBreakpoint *bp;
7606 int j, lj, cflags;
7607 uint64_t flags;
7608 target_ulong pc_start;
7609 target_ulong cs_base;
7610 int num_insns;
7611 int max_insns;
7613 /* generate intermediate code */
7614 pc_start = tb->pc;
7615 cs_base = tb->cs_base;
7616 flags = tb->flags;
7617 cflags = tb->cflags;
7619 dc->pe = (flags >> HF_PE_SHIFT) & 1;
7620 dc->code32 = (flags >> HF_CS32_SHIFT) & 1;
7621 dc->ss32 = (flags >> HF_SS32_SHIFT) & 1;
7622 dc->addseg = (flags >> HF_ADDSEG_SHIFT) & 1;
7623 dc->f_st = 0;
7624 dc->vm86 = (flags >> VM_SHIFT) & 1;
7625 dc->cpl = (flags >> HF_CPL_SHIFT) & 3;
7626 dc->iopl = (flags >> IOPL_SHIFT) & 3;
7627 dc->tf = (flags >> TF_SHIFT) & 1;
7628 dc->singlestep_enabled = env->singlestep_enabled;
7629 dc->cc_op = CC_OP_DYNAMIC;
7630 dc->cs_base = cs_base;
7631 dc->tb = tb;
7632 dc->popl_esp_hack = 0;
7633 /* select memory access functions */
7634 dc->mem_index = 0;
7635 if (flags & HF_SOFTMMU_MASK) {
7636 if (dc->cpl == 3)
7637 dc->mem_index = 2 * 4;
7638 else
7639 dc->mem_index = 1 * 4;
7641 dc->cpuid_features = env->cpuid_features;
7642 dc->cpuid_ext_features = env->cpuid_ext_features;
7643 dc->cpuid_ext2_features = env->cpuid_ext2_features;
7644 dc->cpuid_ext3_features = env->cpuid_ext3_features;
7645 #ifdef TARGET_X86_64
7646 dc->lma = (flags >> HF_LMA_SHIFT) & 1;
7647 dc->code64 = (flags >> HF_CS64_SHIFT) & 1;
7648 #endif
7649 dc->flags = flags;
7650 dc->jmp_opt = !(dc->tf || env->singlestep_enabled ||
7651 (flags & HF_INHIBIT_IRQ_MASK)
7652 #ifndef CONFIG_SOFTMMU
7653 || (flags & HF_SOFTMMU_MASK)
7654 #endif
7656 #if 0
7657 /* check addseg logic */
7658 if (!dc->addseg && (dc->vm86 || !dc->pe || !dc->code32))
7659 printf("ERROR addseg\n");
7660 #endif
7662 cpu_T[0] = tcg_temp_new();
7663 cpu_T[1] = tcg_temp_new();
7664 cpu_A0 = tcg_temp_new();
7665 cpu_T3 = tcg_temp_new();
7667 cpu_tmp0 = tcg_temp_new();
7668 cpu_tmp1_i64 = tcg_temp_new_i64();
7669 cpu_tmp2_i32 = tcg_temp_new_i32();
7670 cpu_tmp3_i32 = tcg_temp_new_i32();
7671 cpu_tmp4 = tcg_temp_new();
7672 cpu_tmp5 = tcg_temp_new();
7673 cpu_tmp6 = tcg_temp_new();
7674 cpu_ptr0 = tcg_temp_new_ptr();
7675 cpu_ptr1 = tcg_temp_new_ptr();
7677 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
7679 dc->is_jmp = DISAS_NEXT;
7680 pc_ptr = pc_start;
7681 lj = -1;
7682 num_insns = 0;
7683 max_insns = tb->cflags & CF_COUNT_MASK;
7684 if (max_insns == 0)
7685 max_insns = CF_COUNT_MASK;
7687 gen_icount_start();
7688 for(;;) {
7689 if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
7690 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
7691 if (bp->pc == pc_ptr) {
7692 gen_debug(dc, pc_ptr - dc->cs_base);
7693 break;
7697 if (search_pc) {
7698 j = gen_opc_ptr - gen_opc_buf;
7699 if (lj < j) {
7700 lj++;
7701 while (lj < j)
7702 gen_opc_instr_start[lj++] = 0;
7704 gen_opc_pc[lj] = pc_ptr;
7705 gen_opc_cc_op[lj] = dc->cc_op;
7706 gen_opc_instr_start[lj] = 1;
7707 gen_opc_icount[lj] = num_insns;
7709 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
7710 gen_io_start();
7712 pc_ptr = disas_insn(dc, pc_ptr);
7713 num_insns++;
7714 /* stop translation if indicated */
7715 if (dc->is_jmp)
7716 break;
7717 /* if single step mode, we generate only one instruction and
7718 generate an exception */
7719 /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
7720 the flag and abort the translation to give the irqs a
7721 change to be happen */
7722 if (dc->tf || dc->singlestep_enabled ||
7723 (flags & HF_INHIBIT_IRQ_MASK)) {
7724 gen_jmp_im(pc_ptr - dc->cs_base);
7725 gen_eob(dc);
7726 break;
7728 /* if too long translation, stop generation too */
7729 if (gen_opc_ptr >= gen_opc_end ||
7730 (pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32) ||
7731 num_insns >= max_insns) {
7732 gen_jmp_im(pc_ptr - dc->cs_base);
7733 gen_eob(dc);
7734 break;
7736 if (singlestep) {
7737 gen_jmp_im(pc_ptr - dc->cs_base);
7738 gen_eob(dc);
7739 break;
7742 if (tb->cflags & CF_LAST_IO)
7743 gen_io_end();
7744 gen_icount_end(tb, num_insns);
7745 *gen_opc_ptr = INDEX_op_end;
7746 /* we don't forget to fill the last values */
7747 if (search_pc) {
7748 j = gen_opc_ptr - gen_opc_buf;
7749 lj++;
7750 while (lj <= j)
7751 gen_opc_instr_start[lj++] = 0;
7754 #ifdef DEBUG_DISAS
7755 log_cpu_state_mask(CPU_LOG_TB_CPU, env, X86_DUMP_CCOP);
7756 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
7757 int disas_flags;
7758 qemu_log("----------------\n");
7759 qemu_log("IN: %s\n", lookup_symbol(pc_start));
7760 #ifdef TARGET_X86_64
7761 if (dc->code64)
7762 disas_flags = 2;
7763 else
7764 #endif
7765 disas_flags = !dc->code32;
7766 log_target_disas(pc_start, pc_ptr - pc_start, disas_flags);
7767 qemu_log("\n");
7769 #endif
7771 if (!search_pc) {
7772 tb->size = pc_ptr - pc_start;
7773 tb->icount = num_insns;
7777 void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
7779 gen_intermediate_code_internal(env, tb, 0);
7782 void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
7784 gen_intermediate_code_internal(env, tb, 1);
7787 void gen_pc_load(CPUState *env, TranslationBlock *tb,
7788 unsigned long searched_pc, int pc_pos, void *puc)
7790 int cc_op;
7791 #ifdef DEBUG_DISAS
7792 if (qemu_loglevel_mask(CPU_LOG_TB_OP)) {
7793 int i;
7794 qemu_log("RESTORE:\n");
7795 for(i = 0;i <= pc_pos; i++) {
7796 if (gen_opc_instr_start[i]) {
7797 qemu_log("0x%04x: " TARGET_FMT_lx "\n", i, gen_opc_pc[i]);
7800 qemu_log("spc=0x%08lx pc_pos=0x%x eip=" TARGET_FMT_lx " cs_base=%x\n",
7801 searched_pc, pc_pos, gen_opc_pc[pc_pos] - tb->cs_base,
7802 (uint32_t)tb->cs_base);
7804 #endif
7805 env->eip = gen_opc_pc[pc_pos] - tb->cs_base;
7806 cc_op = gen_opc_cc_op[pc_pos];
7807 if (cc_op != CC_OP_DYNAMIC)
7808 env->cc_op = cc_op;