xics: Add xics_find_source()
[qemu/cris-port.git] / target-i386 / translate.c
blob6fcd8245d2df7117fea5fa226c6fc96c4fd5559e
1 /*
2 * i386 translation
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdarg.h>
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <string.h>
23 #include <inttypes.h>
24 #include <signal.h>
26 #include "qemu/host-utils.h"
27 #include "cpu.h"
28 #include "disas/disas.h"
29 #include "tcg-op.h"
30 #include "exec/cpu_ldst.h"
32 #include "exec/helper-proto.h"
33 #include "exec/helper-gen.h"
35 #define PREFIX_REPZ 0x01
36 #define PREFIX_REPNZ 0x02
37 #define PREFIX_LOCK 0x04
38 #define PREFIX_DATA 0x08
39 #define PREFIX_ADR 0x10
40 #define PREFIX_VEX 0x20
42 #ifdef TARGET_X86_64
43 #define CODE64(s) ((s)->code64)
44 #define REX_X(s) ((s)->rex_x)
45 #define REX_B(s) ((s)->rex_b)
46 #else
47 #define CODE64(s) 0
48 #define REX_X(s) 0
49 #define REX_B(s) 0
50 #endif
52 #ifdef TARGET_X86_64
53 # define ctztl ctz64
54 # define clztl clz64
55 #else
56 # define ctztl ctz32
57 # define clztl clz32
58 #endif
60 //#define MACRO_TEST 1
62 /* global register indexes */
63 static TCGv_ptr cpu_env;
64 static TCGv cpu_A0;
65 static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2, cpu_cc_srcT;
66 static TCGv_i32 cpu_cc_op;
67 static TCGv cpu_regs[CPU_NB_REGS];
68 /* local temps */
69 static TCGv cpu_T[2];
70 /* local register indexes (only used inside old micro ops) */
71 static TCGv cpu_tmp0, cpu_tmp4;
72 static TCGv_ptr cpu_ptr0, cpu_ptr1;
73 static TCGv_i32 cpu_tmp2_i32, cpu_tmp3_i32;
74 static TCGv_i64 cpu_tmp1_i64;
76 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
78 #include "exec/gen-icount.h"
80 #ifdef TARGET_X86_64
81 static int x86_64_hregs;
82 #endif
84 typedef struct DisasContext {
85 /* current insn context */
86 int override; /* -1 if no override */
87 int prefix;
88 TCGMemOp aflag;
89 TCGMemOp dflag;
90 target_ulong pc; /* pc = eip + cs_base */
91 int is_jmp; /* 1 = means jump (stop translation), 2 means CPU
92 static state change (stop translation) */
93 /* current block context */
94 target_ulong cs_base; /* base of CS segment */
95 int pe; /* protected mode */
96 int code32; /* 32 bit code segment */
97 #ifdef TARGET_X86_64
98 int lma; /* long mode active */
99 int code64; /* 64 bit code segment */
100 int rex_x, rex_b;
101 #endif
102 int vex_l; /* vex vector length */
103 int vex_v; /* vex vvvv register, without 1's compliment. */
104 int ss32; /* 32 bit stack segment */
105 CCOp cc_op; /* current CC operation */
106 bool cc_op_dirty;
107 int addseg; /* non zero if either DS/ES/SS have a non zero base */
108 int f_st; /* currently unused */
109 int vm86; /* vm86 mode */
110 int cpl;
111 int iopl;
112 int tf; /* TF cpu flag */
113 int singlestep_enabled; /* "hardware" single step enabled */
114 int jmp_opt; /* use direct block chaining for direct jumps */
115 int mem_index; /* select memory access functions */
116 uint64_t flags; /* all execution flags */
117 struct TranslationBlock *tb;
118 int popl_esp_hack; /* for correct popl with esp base handling */
119 int rip_offset; /* only used in x86_64, but left for simplicity */
120 int cpuid_features;
121 int cpuid_ext_features;
122 int cpuid_ext2_features;
123 int cpuid_ext3_features;
124 int cpuid_7_0_ebx_features;
125 } DisasContext;
127 static void gen_eob(DisasContext *s);
128 static void gen_jmp(DisasContext *s, target_ulong eip);
129 static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num);
130 static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d);
132 /* i386 arith/logic operations */
133 enum {
134 OP_ADDL,
135 OP_ORL,
136 OP_ADCL,
137 OP_SBBL,
138 OP_ANDL,
139 OP_SUBL,
140 OP_XORL,
141 OP_CMPL,
144 /* i386 shift ops */
145 enum {
146 OP_ROL,
147 OP_ROR,
148 OP_RCL,
149 OP_RCR,
150 OP_SHL,
151 OP_SHR,
152 OP_SHL1, /* undocumented */
153 OP_SAR = 7,
156 enum {
157 JCC_O,
158 JCC_B,
159 JCC_Z,
160 JCC_BE,
161 JCC_S,
162 JCC_P,
163 JCC_L,
164 JCC_LE,
167 enum {
168 /* I386 int registers */
169 OR_EAX, /* MUST be even numbered */
170 OR_ECX,
171 OR_EDX,
172 OR_EBX,
173 OR_ESP,
174 OR_EBP,
175 OR_ESI,
176 OR_EDI,
178 OR_TMP0 = 16, /* temporary operand register */
179 OR_TMP1,
180 OR_A0, /* temporary register used when doing address evaluation */
183 enum {
184 USES_CC_DST = 1,
185 USES_CC_SRC = 2,
186 USES_CC_SRC2 = 4,
187 USES_CC_SRCT = 8,
190 /* Bit set if the global variable is live after setting CC_OP to X. */
191 static const uint8_t cc_op_live[CC_OP_NB] = {
192 [CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
193 [CC_OP_EFLAGS] = USES_CC_SRC,
194 [CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC,
195 [CC_OP_ADDB ... CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC,
196 [CC_OP_ADCB ... CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
197 [CC_OP_SUBB ... CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT,
198 [CC_OP_SBBB ... CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
199 [CC_OP_LOGICB ... CC_OP_LOGICQ] = USES_CC_DST,
200 [CC_OP_INCB ... CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC,
201 [CC_OP_DECB ... CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC,
202 [CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC,
203 [CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC,
204 [CC_OP_BMILGB ... CC_OP_BMILGQ] = USES_CC_DST | USES_CC_SRC,
205 [CC_OP_ADCX] = USES_CC_DST | USES_CC_SRC,
206 [CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2,
207 [CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
208 [CC_OP_CLR] = 0,
211 static void set_cc_op(DisasContext *s, CCOp op)
213 int dead;
215 if (s->cc_op == op) {
216 return;
219 /* Discard CC computation that will no longer be used. */
220 dead = cc_op_live[s->cc_op] & ~cc_op_live[op];
221 if (dead & USES_CC_DST) {
222 tcg_gen_discard_tl(cpu_cc_dst);
224 if (dead & USES_CC_SRC) {
225 tcg_gen_discard_tl(cpu_cc_src);
227 if (dead & USES_CC_SRC2) {
228 tcg_gen_discard_tl(cpu_cc_src2);
230 if (dead & USES_CC_SRCT) {
231 tcg_gen_discard_tl(cpu_cc_srcT);
234 if (op == CC_OP_DYNAMIC) {
235 /* The DYNAMIC setting is translator only, and should never be
236 stored. Thus we always consider it clean. */
237 s->cc_op_dirty = false;
238 } else {
239 /* Discard any computed CC_OP value (see shifts). */
240 if (s->cc_op == CC_OP_DYNAMIC) {
241 tcg_gen_discard_i32(cpu_cc_op);
243 s->cc_op_dirty = true;
245 s->cc_op = op;
248 static void gen_update_cc_op(DisasContext *s)
250 if (s->cc_op_dirty) {
251 tcg_gen_movi_i32(cpu_cc_op, s->cc_op);
252 s->cc_op_dirty = false;
256 #ifdef TARGET_X86_64
258 #define NB_OP_SIZES 4
260 #else /* !TARGET_X86_64 */
262 #define NB_OP_SIZES 3
264 #endif /* !TARGET_X86_64 */
266 #if defined(HOST_WORDS_BIGENDIAN)
267 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
268 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
269 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
270 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
271 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
272 #else
273 #define REG_B_OFFSET 0
274 #define REG_H_OFFSET 1
275 #define REG_W_OFFSET 0
276 #define REG_L_OFFSET 0
277 #define REG_LH_OFFSET 4
278 #endif
280 /* In instruction encodings for byte register accesses the
281 * register number usually indicates "low 8 bits of register N";
282 * however there are some special cases where N 4..7 indicates
283 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
284 * true for this special case, false otherwise.
286 static inline bool byte_reg_is_xH(int reg)
288 if (reg < 4) {
289 return false;
291 #ifdef TARGET_X86_64
292 if (reg >= 8 || x86_64_hregs) {
293 return false;
295 #endif
296 return true;
299 /* Select the size of a push/pop operation. */
300 static inline TCGMemOp mo_pushpop(DisasContext *s, TCGMemOp ot)
302 if (CODE64(s)) {
303 return ot == MO_16 ? MO_16 : MO_64;
304 } else {
305 return ot;
309 /* Select only size 64 else 32. Used for SSE operand sizes. */
310 static inline TCGMemOp mo_64_32(TCGMemOp ot)
312 #ifdef TARGET_X86_64
313 return ot == MO_64 ? MO_64 : MO_32;
314 #else
315 return MO_32;
316 #endif
319 /* Select size 8 if lsb of B is clear, else OT. Used for decoding
320 byte vs word opcodes. */
321 static inline TCGMemOp mo_b_d(int b, TCGMemOp ot)
323 return b & 1 ? ot : MO_8;
326 /* Select size 8 if lsb of B is clear, else OT capped at 32.
327 Used for decoding operand size of port opcodes. */
328 static inline TCGMemOp mo_b_d32(int b, TCGMemOp ot)
330 return b & 1 ? (ot == MO_16 ? MO_16 : MO_32) : MO_8;
333 static void gen_op_mov_reg_v(TCGMemOp ot, int reg, TCGv t0)
335 switch(ot) {
336 case MO_8:
337 if (!byte_reg_is_xH(reg)) {
338 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 8);
339 } else {
340 tcg_gen_deposit_tl(cpu_regs[reg - 4], cpu_regs[reg - 4], t0, 8, 8);
342 break;
343 case MO_16:
344 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 16);
345 break;
346 case MO_32:
347 /* For x86_64, this sets the higher half of register to zero.
348 For i386, this is equivalent to a mov. */
349 tcg_gen_ext32u_tl(cpu_regs[reg], t0);
350 break;
351 #ifdef TARGET_X86_64
352 case MO_64:
353 tcg_gen_mov_tl(cpu_regs[reg], t0);
354 break;
355 #endif
356 default:
357 tcg_abort();
361 static inline void gen_op_mov_v_reg(TCGMemOp ot, TCGv t0, int reg)
363 if (ot == MO_8 && byte_reg_is_xH(reg)) {
364 tcg_gen_shri_tl(t0, cpu_regs[reg - 4], 8);
365 tcg_gen_ext8u_tl(t0, t0);
366 } else {
367 tcg_gen_mov_tl(t0, cpu_regs[reg]);
371 static inline void gen_op_movl_A0_reg(int reg)
373 tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
376 static inline void gen_op_addl_A0_im(int32_t val)
378 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
379 #ifdef TARGET_X86_64
380 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
381 #endif
384 #ifdef TARGET_X86_64
385 static inline void gen_op_addq_A0_im(int64_t val)
387 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
389 #endif
391 static void gen_add_A0_im(DisasContext *s, int val)
393 #ifdef TARGET_X86_64
394 if (CODE64(s))
395 gen_op_addq_A0_im(val);
396 else
397 #endif
398 gen_op_addl_A0_im(val);
401 static inline void gen_op_jmp_v(TCGv dest)
403 tcg_gen_st_tl(dest, cpu_env, offsetof(CPUX86State, eip));
406 static inline void gen_op_add_reg_im(TCGMemOp size, int reg, int32_t val)
408 tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val);
409 gen_op_mov_reg_v(size, reg, cpu_tmp0);
412 static inline void gen_op_add_reg_T0(TCGMemOp size, int reg)
414 tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T[0]);
415 gen_op_mov_reg_v(size, reg, cpu_tmp0);
418 static inline void gen_op_addl_A0_reg_sN(int shift, int reg)
420 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
421 if (shift != 0)
422 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
423 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
424 /* For x86_64, this sets the higher half of register to zero.
425 For i386, this is equivalent to a nop. */
426 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
429 static inline void gen_op_movl_A0_seg(int reg)
431 tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base) + REG_L_OFFSET);
434 static inline void gen_op_addl_A0_seg(DisasContext *s, int reg)
436 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base));
437 #ifdef TARGET_X86_64
438 if (CODE64(s)) {
439 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
440 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
441 } else {
442 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
443 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
445 #else
446 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
447 #endif
450 #ifdef TARGET_X86_64
451 static inline void gen_op_movq_A0_seg(int reg)
453 tcg_gen_ld_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base));
456 static inline void gen_op_addq_A0_seg(int reg)
458 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base));
459 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
462 static inline void gen_op_movq_A0_reg(int reg)
464 tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
467 static inline void gen_op_addq_A0_reg_sN(int shift, int reg)
469 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
470 if (shift != 0)
471 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
472 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
474 #endif
476 static inline void gen_op_ld_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
478 tcg_gen_qemu_ld_tl(t0, a0, s->mem_index, idx | MO_LE);
481 static inline void gen_op_st_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
483 tcg_gen_qemu_st_tl(t0, a0, s->mem_index, idx | MO_LE);
486 static inline void gen_op_st_rm_T0_A0(DisasContext *s, int idx, int d)
488 if (d == OR_TMP0) {
489 gen_op_st_v(s, idx, cpu_T[0], cpu_A0);
490 } else {
491 gen_op_mov_reg_v(idx, d, cpu_T[0]);
495 static inline void gen_jmp_im(target_ulong pc)
497 tcg_gen_movi_tl(cpu_tmp0, pc);
498 gen_op_jmp_v(cpu_tmp0);
501 static inline void gen_string_movl_A0_ESI(DisasContext *s)
503 int override;
505 override = s->override;
506 switch (s->aflag) {
507 #ifdef TARGET_X86_64
508 case MO_64:
509 if (override >= 0) {
510 gen_op_movq_A0_seg(override);
511 gen_op_addq_A0_reg_sN(0, R_ESI);
512 } else {
513 gen_op_movq_A0_reg(R_ESI);
515 break;
516 #endif
517 case MO_32:
518 /* 32 bit address */
519 if (s->addseg && override < 0)
520 override = R_DS;
521 if (override >= 0) {
522 gen_op_movl_A0_seg(override);
523 gen_op_addl_A0_reg_sN(0, R_ESI);
524 } else {
525 gen_op_movl_A0_reg(R_ESI);
527 break;
528 case MO_16:
529 /* 16 address, always override */
530 if (override < 0)
531 override = R_DS;
532 tcg_gen_ext16u_tl(cpu_A0, cpu_regs[R_ESI]);
533 gen_op_addl_A0_seg(s, override);
534 break;
535 default:
536 tcg_abort();
540 static inline void gen_string_movl_A0_EDI(DisasContext *s)
542 switch (s->aflag) {
543 #ifdef TARGET_X86_64
544 case MO_64:
545 gen_op_movq_A0_reg(R_EDI);
546 break;
547 #endif
548 case MO_32:
549 if (s->addseg) {
550 gen_op_movl_A0_seg(R_ES);
551 gen_op_addl_A0_reg_sN(0, R_EDI);
552 } else {
553 gen_op_movl_A0_reg(R_EDI);
555 break;
556 case MO_16:
557 tcg_gen_ext16u_tl(cpu_A0, cpu_regs[R_EDI]);
558 gen_op_addl_A0_seg(s, R_ES);
559 break;
560 default:
561 tcg_abort();
565 static inline void gen_op_movl_T0_Dshift(TCGMemOp ot)
567 tcg_gen_ld32s_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, df));
568 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], ot);
571 static TCGv gen_ext_tl(TCGv dst, TCGv src, TCGMemOp size, bool sign)
573 switch (size) {
574 case MO_8:
575 if (sign) {
576 tcg_gen_ext8s_tl(dst, src);
577 } else {
578 tcg_gen_ext8u_tl(dst, src);
580 return dst;
581 case MO_16:
582 if (sign) {
583 tcg_gen_ext16s_tl(dst, src);
584 } else {
585 tcg_gen_ext16u_tl(dst, src);
587 return dst;
588 #ifdef TARGET_X86_64
589 case MO_32:
590 if (sign) {
591 tcg_gen_ext32s_tl(dst, src);
592 } else {
593 tcg_gen_ext32u_tl(dst, src);
595 return dst;
596 #endif
597 default:
598 return src;
602 static void gen_extu(TCGMemOp ot, TCGv reg)
604 gen_ext_tl(reg, reg, ot, false);
607 static void gen_exts(TCGMemOp ot, TCGv reg)
609 gen_ext_tl(reg, reg, ot, true);
612 static inline void gen_op_jnz_ecx(TCGMemOp size, int label1)
614 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
615 gen_extu(size, cpu_tmp0);
616 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, label1);
619 static inline void gen_op_jz_ecx(TCGMemOp size, int label1)
621 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
622 gen_extu(size, cpu_tmp0);
623 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
626 static void gen_helper_in_func(TCGMemOp ot, TCGv v, TCGv_i32 n)
628 switch (ot) {
629 case MO_8:
630 gen_helper_inb(v, n);
631 break;
632 case MO_16:
633 gen_helper_inw(v, n);
634 break;
635 case MO_32:
636 gen_helper_inl(v, n);
637 break;
638 default:
639 tcg_abort();
643 static void gen_helper_out_func(TCGMemOp ot, TCGv_i32 v, TCGv_i32 n)
645 switch (ot) {
646 case MO_8:
647 gen_helper_outb(v, n);
648 break;
649 case MO_16:
650 gen_helper_outw(v, n);
651 break;
652 case MO_32:
653 gen_helper_outl(v, n);
654 break;
655 default:
656 tcg_abort();
660 static void gen_check_io(DisasContext *s, TCGMemOp ot, target_ulong cur_eip,
661 uint32_t svm_flags)
663 int state_saved;
664 target_ulong next_eip;
666 state_saved = 0;
667 if (s->pe && (s->cpl > s->iopl || s->vm86)) {
668 gen_update_cc_op(s);
669 gen_jmp_im(cur_eip);
670 state_saved = 1;
671 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
672 switch (ot) {
673 case MO_8:
674 gen_helper_check_iob(cpu_env, cpu_tmp2_i32);
675 break;
676 case MO_16:
677 gen_helper_check_iow(cpu_env, cpu_tmp2_i32);
678 break;
679 case MO_32:
680 gen_helper_check_iol(cpu_env, cpu_tmp2_i32);
681 break;
682 default:
683 tcg_abort();
686 if(s->flags & HF_SVMI_MASK) {
687 if (!state_saved) {
688 gen_update_cc_op(s);
689 gen_jmp_im(cur_eip);
691 svm_flags |= (1 << (4 + ot));
692 next_eip = s->pc - s->cs_base;
693 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
694 gen_helper_svm_check_io(cpu_env, cpu_tmp2_i32,
695 tcg_const_i32(svm_flags),
696 tcg_const_i32(next_eip - cur_eip));
700 static inline void gen_movs(DisasContext *s, TCGMemOp ot)
702 gen_string_movl_A0_ESI(s);
703 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
704 gen_string_movl_A0_EDI(s);
705 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
706 gen_op_movl_T0_Dshift(ot);
707 gen_op_add_reg_T0(s->aflag, R_ESI);
708 gen_op_add_reg_T0(s->aflag, R_EDI);
711 static void gen_op_update1_cc(void)
713 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
716 static void gen_op_update2_cc(void)
718 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
719 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
722 static void gen_op_update3_cc(TCGv reg)
724 tcg_gen_mov_tl(cpu_cc_src2, reg);
725 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
726 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
729 static inline void gen_op_testl_T0_T1_cc(void)
731 tcg_gen_and_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
734 static void gen_op_update_neg_cc(void)
736 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
737 tcg_gen_neg_tl(cpu_cc_src, cpu_T[0]);
738 tcg_gen_movi_tl(cpu_cc_srcT, 0);
741 /* compute all eflags to cc_src */
742 static void gen_compute_eflags(DisasContext *s)
744 TCGv zero, dst, src1, src2;
745 int live, dead;
747 if (s->cc_op == CC_OP_EFLAGS) {
748 return;
750 if (s->cc_op == CC_OP_CLR) {
751 tcg_gen_movi_tl(cpu_cc_src, CC_Z | CC_P);
752 set_cc_op(s, CC_OP_EFLAGS);
753 return;
756 TCGV_UNUSED(zero);
757 dst = cpu_cc_dst;
758 src1 = cpu_cc_src;
759 src2 = cpu_cc_src2;
761 /* Take care to not read values that are not live. */
762 live = cc_op_live[s->cc_op] & ~USES_CC_SRCT;
763 dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2);
764 if (dead) {
765 zero = tcg_const_tl(0);
766 if (dead & USES_CC_DST) {
767 dst = zero;
769 if (dead & USES_CC_SRC) {
770 src1 = zero;
772 if (dead & USES_CC_SRC2) {
773 src2 = zero;
777 gen_update_cc_op(s);
778 gen_helper_cc_compute_all(cpu_cc_src, dst, src1, src2, cpu_cc_op);
779 set_cc_op(s, CC_OP_EFLAGS);
781 if (dead) {
782 tcg_temp_free(zero);
786 typedef struct CCPrepare {
787 TCGCond cond;
788 TCGv reg;
789 TCGv reg2;
790 target_ulong imm;
791 target_ulong mask;
792 bool use_reg2;
793 bool no_setcond;
794 } CCPrepare;
796 /* compute eflags.C to reg */
797 static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
799 TCGv t0, t1;
800 int size, shift;
802 switch (s->cc_op) {
803 case CC_OP_SUBB ... CC_OP_SUBQ:
804 /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
805 size = s->cc_op - CC_OP_SUBB;
806 t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
807 /* If no temporary was used, be careful not to alias t1 and t0. */
808 t0 = TCGV_EQUAL(t1, cpu_cc_src) ? cpu_tmp0 : reg;
809 tcg_gen_mov_tl(t0, cpu_cc_srcT);
810 gen_extu(size, t0);
811 goto add_sub;
813 case CC_OP_ADDB ... CC_OP_ADDQ:
814 /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
815 size = s->cc_op - CC_OP_ADDB;
816 t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
817 t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
818 add_sub:
819 return (CCPrepare) { .cond = TCG_COND_LTU, .reg = t0,
820 .reg2 = t1, .mask = -1, .use_reg2 = true };
822 case CC_OP_LOGICB ... CC_OP_LOGICQ:
823 case CC_OP_CLR:
824 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
826 case CC_OP_INCB ... CC_OP_INCQ:
827 case CC_OP_DECB ... CC_OP_DECQ:
828 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
829 .mask = -1, .no_setcond = true };
831 case CC_OP_SHLB ... CC_OP_SHLQ:
832 /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
833 size = s->cc_op - CC_OP_SHLB;
834 shift = (8 << size) - 1;
835 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
836 .mask = (target_ulong)1 << shift };
838 case CC_OP_MULB ... CC_OP_MULQ:
839 return (CCPrepare) { .cond = TCG_COND_NE,
840 .reg = cpu_cc_src, .mask = -1 };
842 case CC_OP_BMILGB ... CC_OP_BMILGQ:
843 size = s->cc_op - CC_OP_BMILGB;
844 t0 = gen_ext_tl(reg, cpu_cc_src, size, false);
845 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
847 case CC_OP_ADCX:
848 case CC_OP_ADCOX:
849 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_dst,
850 .mask = -1, .no_setcond = true };
852 case CC_OP_EFLAGS:
853 case CC_OP_SARB ... CC_OP_SARQ:
854 /* CC_SRC & 1 */
855 return (CCPrepare) { .cond = TCG_COND_NE,
856 .reg = cpu_cc_src, .mask = CC_C };
858 default:
859 /* The need to compute only C from CC_OP_DYNAMIC is important
860 in efficiently implementing e.g. INC at the start of a TB. */
861 gen_update_cc_op(s);
862 gen_helper_cc_compute_c(reg, cpu_cc_dst, cpu_cc_src,
863 cpu_cc_src2, cpu_cc_op);
864 return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
865 .mask = -1, .no_setcond = true };
869 /* compute eflags.P to reg */
870 static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg)
872 gen_compute_eflags(s);
873 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
874 .mask = CC_P };
877 /* compute eflags.S to reg */
878 static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg)
880 switch (s->cc_op) {
881 case CC_OP_DYNAMIC:
882 gen_compute_eflags(s);
883 /* FALLTHRU */
884 case CC_OP_EFLAGS:
885 case CC_OP_ADCX:
886 case CC_OP_ADOX:
887 case CC_OP_ADCOX:
888 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
889 .mask = CC_S };
890 case CC_OP_CLR:
891 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
892 default:
894 TCGMemOp size = (s->cc_op - CC_OP_ADDB) & 3;
895 TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, true);
896 return (CCPrepare) { .cond = TCG_COND_LT, .reg = t0, .mask = -1 };
901 /* compute eflags.O to reg */
902 static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
904 switch (s->cc_op) {
905 case CC_OP_ADOX:
906 case CC_OP_ADCOX:
907 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src2,
908 .mask = -1, .no_setcond = true };
909 case CC_OP_CLR:
910 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
911 default:
912 gen_compute_eflags(s);
913 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
914 .mask = CC_O };
918 /* compute eflags.Z to reg */
919 static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
921 switch (s->cc_op) {
922 case CC_OP_DYNAMIC:
923 gen_compute_eflags(s);
924 /* FALLTHRU */
925 case CC_OP_EFLAGS:
926 case CC_OP_ADCX:
927 case CC_OP_ADOX:
928 case CC_OP_ADCOX:
929 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
930 .mask = CC_Z };
931 case CC_OP_CLR:
932 return (CCPrepare) { .cond = TCG_COND_ALWAYS, .mask = -1 };
933 default:
935 TCGMemOp size = (s->cc_op - CC_OP_ADDB) & 3;
936 TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
937 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
942 /* perform a conditional store into register 'reg' according to jump opcode
943 value 'b'. In the fast case, T0 is guaranted not to be used. */
944 static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
946 int inv, jcc_op, cond;
947 TCGMemOp size;
948 CCPrepare cc;
949 TCGv t0;
951 inv = b & 1;
952 jcc_op = (b >> 1) & 7;
954 switch (s->cc_op) {
955 case CC_OP_SUBB ... CC_OP_SUBQ:
956 /* We optimize relational operators for the cmp/jcc case. */
957 size = s->cc_op - CC_OP_SUBB;
958 switch (jcc_op) {
959 case JCC_BE:
960 tcg_gen_mov_tl(cpu_tmp4, cpu_cc_srcT);
961 gen_extu(size, cpu_tmp4);
962 t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
963 cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = cpu_tmp4,
964 .reg2 = t0, .mask = -1, .use_reg2 = true };
965 break;
967 case JCC_L:
968 cond = TCG_COND_LT;
969 goto fast_jcc_l;
970 case JCC_LE:
971 cond = TCG_COND_LE;
972 fast_jcc_l:
973 tcg_gen_mov_tl(cpu_tmp4, cpu_cc_srcT);
974 gen_exts(size, cpu_tmp4);
975 t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, true);
976 cc = (CCPrepare) { .cond = cond, .reg = cpu_tmp4,
977 .reg2 = t0, .mask = -1, .use_reg2 = true };
978 break;
980 default:
981 goto slow_jcc;
983 break;
985 default:
986 slow_jcc:
987 /* This actually generates good code for JC, JZ and JS. */
988 switch (jcc_op) {
989 case JCC_O:
990 cc = gen_prepare_eflags_o(s, reg);
991 break;
992 case JCC_B:
993 cc = gen_prepare_eflags_c(s, reg);
994 break;
995 case JCC_Z:
996 cc = gen_prepare_eflags_z(s, reg);
997 break;
998 case JCC_BE:
999 gen_compute_eflags(s);
1000 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1001 .mask = CC_Z | CC_C };
1002 break;
1003 case JCC_S:
1004 cc = gen_prepare_eflags_s(s, reg);
1005 break;
1006 case JCC_P:
1007 cc = gen_prepare_eflags_p(s, reg);
1008 break;
1009 case JCC_L:
1010 gen_compute_eflags(s);
1011 if (TCGV_EQUAL(reg, cpu_cc_src)) {
1012 reg = cpu_tmp0;
1014 tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
1015 tcg_gen_xor_tl(reg, reg, cpu_cc_src);
1016 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1017 .mask = CC_S };
1018 break;
1019 default:
1020 case JCC_LE:
1021 gen_compute_eflags(s);
1022 if (TCGV_EQUAL(reg, cpu_cc_src)) {
1023 reg = cpu_tmp0;
1025 tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
1026 tcg_gen_xor_tl(reg, reg, cpu_cc_src);
1027 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1028 .mask = CC_S | CC_Z };
1029 break;
1031 break;
1034 if (inv) {
1035 cc.cond = tcg_invert_cond(cc.cond);
1037 return cc;
1040 static void gen_setcc1(DisasContext *s, int b, TCGv reg)
1042 CCPrepare cc = gen_prepare_cc(s, b, reg);
1044 if (cc.no_setcond) {
1045 if (cc.cond == TCG_COND_EQ) {
1046 tcg_gen_xori_tl(reg, cc.reg, 1);
1047 } else {
1048 tcg_gen_mov_tl(reg, cc.reg);
1050 return;
1053 if (cc.cond == TCG_COND_NE && !cc.use_reg2 && cc.imm == 0 &&
1054 cc.mask != 0 && (cc.mask & (cc.mask - 1)) == 0) {
1055 tcg_gen_shri_tl(reg, cc.reg, ctztl(cc.mask));
1056 tcg_gen_andi_tl(reg, reg, 1);
1057 return;
1059 if (cc.mask != -1) {
1060 tcg_gen_andi_tl(reg, cc.reg, cc.mask);
1061 cc.reg = reg;
1063 if (cc.use_reg2) {
1064 tcg_gen_setcond_tl(cc.cond, reg, cc.reg, cc.reg2);
1065 } else {
1066 tcg_gen_setcondi_tl(cc.cond, reg, cc.reg, cc.imm);
1070 static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg)
1072 gen_setcc1(s, JCC_B << 1, reg);
1075 /* generate a conditional jump to label 'l1' according to jump opcode
1076 value 'b'. In the fast case, T0 is guaranted not to be used. */
1077 static inline void gen_jcc1_noeob(DisasContext *s, int b, int l1)
1079 CCPrepare cc = gen_prepare_cc(s, b, cpu_T[0]);
1081 if (cc.mask != -1) {
1082 tcg_gen_andi_tl(cpu_T[0], cc.reg, cc.mask);
1083 cc.reg = cpu_T[0];
1085 if (cc.use_reg2) {
1086 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1087 } else {
1088 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1092 /* Generate a conditional jump to label 'l1' according to jump opcode
1093 value 'b'. In the fast case, T0 is guaranted not to be used.
1094 A translation block must end soon. */
1095 static inline void gen_jcc1(DisasContext *s, int b, int l1)
1097 CCPrepare cc = gen_prepare_cc(s, b, cpu_T[0]);
1099 gen_update_cc_op(s);
1100 if (cc.mask != -1) {
1101 tcg_gen_andi_tl(cpu_T[0], cc.reg, cc.mask);
1102 cc.reg = cpu_T[0];
1104 set_cc_op(s, CC_OP_DYNAMIC);
1105 if (cc.use_reg2) {
1106 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1107 } else {
1108 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1112 /* XXX: does not work with gdbstub "ice" single step - not a
1113 serious problem */
1114 static int gen_jz_ecx_string(DisasContext *s, target_ulong next_eip)
1116 int l1, l2;
1118 l1 = gen_new_label();
1119 l2 = gen_new_label();
1120 gen_op_jnz_ecx(s->aflag, l1);
1121 gen_set_label(l2);
1122 gen_jmp_tb(s, next_eip, 1);
1123 gen_set_label(l1);
1124 return l2;
1127 static inline void gen_stos(DisasContext *s, TCGMemOp ot)
1129 gen_op_mov_v_reg(MO_32, cpu_T[0], R_EAX);
1130 gen_string_movl_A0_EDI(s);
1131 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
1132 gen_op_movl_T0_Dshift(ot);
1133 gen_op_add_reg_T0(s->aflag, R_EDI);
1136 static inline void gen_lods(DisasContext *s, TCGMemOp ot)
1138 gen_string_movl_A0_ESI(s);
1139 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1140 gen_op_mov_reg_v(ot, R_EAX, cpu_T[0]);
1141 gen_op_movl_T0_Dshift(ot);
1142 gen_op_add_reg_T0(s->aflag, R_ESI);
1145 static inline void gen_scas(DisasContext *s, TCGMemOp ot)
1147 gen_string_movl_A0_EDI(s);
1148 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
1149 gen_op(s, OP_CMPL, ot, R_EAX);
1150 gen_op_movl_T0_Dshift(ot);
1151 gen_op_add_reg_T0(s->aflag, R_EDI);
1154 static inline void gen_cmps(DisasContext *s, TCGMemOp ot)
1156 gen_string_movl_A0_EDI(s);
1157 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
1158 gen_string_movl_A0_ESI(s);
1159 gen_op(s, OP_CMPL, ot, OR_TMP0);
1160 gen_op_movl_T0_Dshift(ot);
1161 gen_op_add_reg_T0(s->aflag, R_ESI);
1162 gen_op_add_reg_T0(s->aflag, R_EDI);
1165 static inline void gen_ins(DisasContext *s, TCGMemOp ot)
1167 if (use_icount)
1168 gen_io_start();
1169 gen_string_movl_A0_EDI(s);
1170 /* Note: we must do this dummy write first to be restartable in
1171 case of page fault. */
1172 tcg_gen_movi_tl(cpu_T[0], 0);
1173 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
1174 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_EDX]);
1175 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1176 gen_helper_in_func(ot, cpu_T[0], cpu_tmp2_i32);
1177 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
1178 gen_op_movl_T0_Dshift(ot);
1179 gen_op_add_reg_T0(s->aflag, R_EDI);
1180 if (use_icount)
1181 gen_io_end();
1184 static inline void gen_outs(DisasContext *s, TCGMemOp ot)
1186 if (use_icount)
1187 gen_io_start();
1188 gen_string_movl_A0_ESI(s);
1189 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1191 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_EDX]);
1192 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1193 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[0]);
1194 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
1196 gen_op_movl_T0_Dshift(ot);
1197 gen_op_add_reg_T0(s->aflag, R_ESI);
1198 if (use_icount)
1199 gen_io_end();
1202 /* same method as Valgrind : we generate jumps to current or next
1203 instruction */
1204 #define GEN_REPZ(op) \
1205 static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \
1206 target_ulong cur_eip, target_ulong next_eip) \
1208 int l2;\
1209 gen_update_cc_op(s); \
1210 l2 = gen_jz_ecx_string(s, next_eip); \
1211 gen_ ## op(s, ot); \
1212 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1213 /* a loop would cause two single step exceptions if ECX = 1 \
1214 before rep string_insn */ \
1215 if (!s->jmp_opt) \
1216 gen_op_jz_ecx(s->aflag, l2); \
1217 gen_jmp(s, cur_eip); \
1220 #define GEN_REPZ2(op) \
1221 static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \
1222 target_ulong cur_eip, \
1223 target_ulong next_eip, \
1224 int nz) \
1226 int l2;\
1227 gen_update_cc_op(s); \
1228 l2 = gen_jz_ecx_string(s, next_eip); \
1229 gen_ ## op(s, ot); \
1230 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1231 gen_update_cc_op(s); \
1232 gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2); \
1233 if (!s->jmp_opt) \
1234 gen_op_jz_ecx(s->aflag, l2); \
1235 gen_jmp(s, cur_eip); \
1238 GEN_REPZ(movs)
1239 GEN_REPZ(stos)
1240 GEN_REPZ(lods)
1241 GEN_REPZ(ins)
1242 GEN_REPZ(outs)
1243 GEN_REPZ2(scas)
1244 GEN_REPZ2(cmps)
1246 static void gen_helper_fp_arith_ST0_FT0(int op)
1248 switch (op) {
1249 case 0:
1250 gen_helper_fadd_ST0_FT0(cpu_env);
1251 break;
1252 case 1:
1253 gen_helper_fmul_ST0_FT0(cpu_env);
1254 break;
1255 case 2:
1256 gen_helper_fcom_ST0_FT0(cpu_env);
1257 break;
1258 case 3:
1259 gen_helper_fcom_ST0_FT0(cpu_env);
1260 break;
1261 case 4:
1262 gen_helper_fsub_ST0_FT0(cpu_env);
1263 break;
1264 case 5:
1265 gen_helper_fsubr_ST0_FT0(cpu_env);
1266 break;
1267 case 6:
1268 gen_helper_fdiv_ST0_FT0(cpu_env);
1269 break;
1270 case 7:
1271 gen_helper_fdivr_ST0_FT0(cpu_env);
1272 break;
1276 /* NOTE the exception in "r" op ordering */
1277 static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1279 TCGv_i32 tmp = tcg_const_i32(opreg);
1280 switch (op) {
1281 case 0:
1282 gen_helper_fadd_STN_ST0(cpu_env, tmp);
1283 break;
1284 case 1:
1285 gen_helper_fmul_STN_ST0(cpu_env, tmp);
1286 break;
1287 case 4:
1288 gen_helper_fsubr_STN_ST0(cpu_env, tmp);
1289 break;
1290 case 5:
1291 gen_helper_fsub_STN_ST0(cpu_env, tmp);
1292 break;
1293 case 6:
1294 gen_helper_fdivr_STN_ST0(cpu_env, tmp);
1295 break;
1296 case 7:
1297 gen_helper_fdiv_STN_ST0(cpu_env, tmp);
1298 break;
1302 /* if d == OR_TMP0, it means memory operand (address in A0) */
1303 static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d)
1305 if (d != OR_TMP0) {
1306 gen_op_mov_v_reg(ot, cpu_T[0], d);
1307 } else {
1308 gen_op_ld_v(s1, ot, cpu_T[0], cpu_A0);
1310 switch(op) {
1311 case OP_ADCL:
1312 gen_compute_eflags_c(s1, cpu_tmp4);
1313 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1314 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1315 gen_op_st_rm_T0_A0(s1, ot, d);
1316 gen_op_update3_cc(cpu_tmp4);
1317 set_cc_op(s1, CC_OP_ADCB + ot);
1318 break;
1319 case OP_SBBL:
1320 gen_compute_eflags_c(s1, cpu_tmp4);
1321 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1322 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1323 gen_op_st_rm_T0_A0(s1, ot, d);
1324 gen_op_update3_cc(cpu_tmp4);
1325 set_cc_op(s1, CC_OP_SBBB + ot);
1326 break;
1327 case OP_ADDL:
1328 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1329 gen_op_st_rm_T0_A0(s1, ot, d);
1330 gen_op_update2_cc();
1331 set_cc_op(s1, CC_OP_ADDB + ot);
1332 break;
1333 case OP_SUBL:
1334 tcg_gen_mov_tl(cpu_cc_srcT, cpu_T[0]);
1335 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1336 gen_op_st_rm_T0_A0(s1, ot, d);
1337 gen_op_update2_cc();
1338 set_cc_op(s1, CC_OP_SUBB + ot);
1339 break;
1340 default:
1341 case OP_ANDL:
1342 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1343 gen_op_st_rm_T0_A0(s1, ot, d);
1344 gen_op_update1_cc();
1345 set_cc_op(s1, CC_OP_LOGICB + ot);
1346 break;
1347 case OP_ORL:
1348 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1349 gen_op_st_rm_T0_A0(s1, ot, d);
1350 gen_op_update1_cc();
1351 set_cc_op(s1, CC_OP_LOGICB + ot);
1352 break;
1353 case OP_XORL:
1354 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1355 gen_op_st_rm_T0_A0(s1, ot, d);
1356 gen_op_update1_cc();
1357 set_cc_op(s1, CC_OP_LOGICB + ot);
1358 break;
1359 case OP_CMPL:
1360 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
1361 tcg_gen_mov_tl(cpu_cc_srcT, cpu_T[0]);
1362 tcg_gen_sub_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
1363 set_cc_op(s1, CC_OP_SUBB + ot);
1364 break;
1368 /* if d == OR_TMP0, it means memory operand (address in A0) */
1369 static void gen_inc(DisasContext *s1, TCGMemOp ot, int d, int c)
1371 if (d != OR_TMP0) {
1372 gen_op_mov_v_reg(ot, cpu_T[0], d);
1373 } else {
1374 gen_op_ld_v(s1, ot, cpu_T[0], cpu_A0);
1376 gen_compute_eflags_c(s1, cpu_cc_src);
1377 if (c > 0) {
1378 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], 1);
1379 set_cc_op(s1, CC_OP_INCB + ot);
1380 } else {
1381 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], -1);
1382 set_cc_op(s1, CC_OP_DECB + ot);
1384 gen_op_st_rm_T0_A0(s1, ot, d);
1385 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1388 static void gen_shift_flags(DisasContext *s, TCGMemOp ot, TCGv result,
1389 TCGv shm1, TCGv count, bool is_right)
1391 TCGv_i32 z32, s32, oldop;
1392 TCGv z_tl;
1394 /* Store the results into the CC variables. If we know that the
1395 variable must be dead, store unconditionally. Otherwise we'll
1396 need to not disrupt the current contents. */
1397 z_tl = tcg_const_tl(0);
1398 if (cc_op_live[s->cc_op] & USES_CC_DST) {
1399 tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_dst, count, z_tl,
1400 result, cpu_cc_dst);
1401 } else {
1402 tcg_gen_mov_tl(cpu_cc_dst, result);
1404 if (cc_op_live[s->cc_op] & USES_CC_SRC) {
1405 tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_src, count, z_tl,
1406 shm1, cpu_cc_src);
1407 } else {
1408 tcg_gen_mov_tl(cpu_cc_src, shm1);
1410 tcg_temp_free(z_tl);
1412 /* Get the two potential CC_OP values into temporaries. */
1413 tcg_gen_movi_i32(cpu_tmp2_i32, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
1414 if (s->cc_op == CC_OP_DYNAMIC) {
1415 oldop = cpu_cc_op;
1416 } else {
1417 tcg_gen_movi_i32(cpu_tmp3_i32, s->cc_op);
1418 oldop = cpu_tmp3_i32;
1421 /* Conditionally store the CC_OP value. */
1422 z32 = tcg_const_i32(0);
1423 s32 = tcg_temp_new_i32();
1424 tcg_gen_trunc_tl_i32(s32, count);
1425 tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, s32, z32, cpu_tmp2_i32, oldop);
1426 tcg_temp_free_i32(z32);
1427 tcg_temp_free_i32(s32);
1429 /* The CC_OP value is no longer predictable. */
1430 set_cc_op(s, CC_OP_DYNAMIC);
1433 static void gen_shift_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
1434 int is_right, int is_arith)
1436 target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
1438 /* load */
1439 if (op1 == OR_TMP0) {
1440 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1441 } else {
1442 gen_op_mov_v_reg(ot, cpu_T[0], op1);
1445 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], mask);
1446 tcg_gen_subi_tl(cpu_tmp0, cpu_T[1], 1);
1448 if (is_right) {
1449 if (is_arith) {
1450 gen_exts(ot, cpu_T[0]);
1451 tcg_gen_sar_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1452 tcg_gen_sar_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1453 } else {
1454 gen_extu(ot, cpu_T[0]);
1455 tcg_gen_shr_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1456 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1458 } else {
1459 tcg_gen_shl_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1460 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1463 /* store */
1464 gen_op_st_rm_T0_A0(s, ot, op1);
1466 gen_shift_flags(s, ot, cpu_T[0], cpu_tmp0, cpu_T[1], is_right);
1469 static void gen_shift_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2,
1470 int is_right, int is_arith)
1472 int mask = (ot == MO_64 ? 0x3f : 0x1f);
1474 /* load */
1475 if (op1 == OR_TMP0)
1476 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1477 else
1478 gen_op_mov_v_reg(ot, cpu_T[0], op1);
1480 op2 &= mask;
1481 if (op2 != 0) {
1482 if (is_right) {
1483 if (is_arith) {
1484 gen_exts(ot, cpu_T[0]);
1485 tcg_gen_sari_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1486 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], op2);
1487 } else {
1488 gen_extu(ot, cpu_T[0]);
1489 tcg_gen_shri_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1490 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], op2);
1492 } else {
1493 tcg_gen_shli_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1494 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], op2);
1498 /* store */
1499 gen_op_st_rm_T0_A0(s, ot, op1);
1501 /* update eflags if non zero shift */
1502 if (op2 != 0) {
1503 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
1504 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1505 set_cc_op(s, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
1509 static void gen_rot_rm_T1(DisasContext *s, TCGMemOp ot, int op1, int is_right)
1511 target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
1512 TCGv_i32 t0, t1;
1514 /* load */
1515 if (op1 == OR_TMP0) {
1516 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1517 } else {
1518 gen_op_mov_v_reg(ot, cpu_T[0], op1);
1521 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], mask);
1523 switch (ot) {
1524 case MO_8:
1525 /* Replicate the 8-bit input so that a 32-bit rotate works. */
1526 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
1527 tcg_gen_muli_tl(cpu_T[0], cpu_T[0], 0x01010101);
1528 goto do_long;
1529 case MO_16:
1530 /* Replicate the 16-bit input so that a 32-bit rotate works. */
1531 tcg_gen_deposit_tl(cpu_T[0], cpu_T[0], cpu_T[0], 16, 16);
1532 goto do_long;
1533 do_long:
1534 #ifdef TARGET_X86_64
1535 case MO_32:
1536 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
1537 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
1538 if (is_right) {
1539 tcg_gen_rotr_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
1540 } else {
1541 tcg_gen_rotl_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
1543 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
1544 break;
1545 #endif
1546 default:
1547 if (is_right) {
1548 tcg_gen_rotr_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1549 } else {
1550 tcg_gen_rotl_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1552 break;
1555 /* store */
1556 gen_op_st_rm_T0_A0(s, ot, op1);
1558 /* We'll need the flags computed into CC_SRC. */
1559 gen_compute_eflags(s);
1561 /* The value that was "rotated out" is now present at the other end
1562 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1563 since we've computed the flags into CC_SRC, these variables are
1564 currently dead. */
1565 if (is_right) {
1566 tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask - 1);
1567 tcg_gen_shri_tl(cpu_cc_dst, cpu_T[0], mask);
1568 tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
1569 } else {
1570 tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask);
1571 tcg_gen_andi_tl(cpu_cc_dst, cpu_T[0], 1);
1573 tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1574 tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1576 /* Now conditionally store the new CC_OP value. If the shift count
1577 is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live.
1578 Otherwise reuse CC_OP_ADCOX which have the C and O flags split out
1579 exactly as we computed above. */
1580 t0 = tcg_const_i32(0);
1581 t1 = tcg_temp_new_i32();
1582 tcg_gen_trunc_tl_i32(t1, cpu_T[1]);
1583 tcg_gen_movi_i32(cpu_tmp2_i32, CC_OP_ADCOX);
1584 tcg_gen_movi_i32(cpu_tmp3_i32, CC_OP_EFLAGS);
1585 tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, t1, t0,
1586 cpu_tmp2_i32, cpu_tmp3_i32);
1587 tcg_temp_free_i32(t0);
1588 tcg_temp_free_i32(t1);
1590 /* The CC_OP value is no longer predictable. */
1591 set_cc_op(s, CC_OP_DYNAMIC);
1594 static void gen_rot_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2,
1595 int is_right)
1597 int mask = (ot == MO_64 ? 0x3f : 0x1f);
1598 int shift;
1600 /* load */
1601 if (op1 == OR_TMP0) {
1602 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1603 } else {
1604 gen_op_mov_v_reg(ot, cpu_T[0], op1);
1607 op2 &= mask;
1608 if (op2 != 0) {
1609 switch (ot) {
1610 #ifdef TARGET_X86_64
1611 case MO_32:
1612 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
1613 if (is_right) {
1614 tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, op2);
1615 } else {
1616 tcg_gen_rotli_i32(cpu_tmp2_i32, cpu_tmp2_i32, op2);
1618 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
1619 break;
1620 #endif
1621 default:
1622 if (is_right) {
1623 tcg_gen_rotri_tl(cpu_T[0], cpu_T[0], op2);
1624 } else {
1625 tcg_gen_rotli_tl(cpu_T[0], cpu_T[0], op2);
1627 break;
1628 case MO_8:
1629 mask = 7;
1630 goto do_shifts;
1631 case MO_16:
1632 mask = 15;
1633 do_shifts:
1634 shift = op2 & mask;
1635 if (is_right) {
1636 shift = mask + 1 - shift;
1638 gen_extu(ot, cpu_T[0]);
1639 tcg_gen_shli_tl(cpu_tmp0, cpu_T[0], shift);
1640 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], mask + 1 - shift);
1641 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
1642 break;
1646 /* store */
1647 gen_op_st_rm_T0_A0(s, ot, op1);
1649 if (op2 != 0) {
1650 /* Compute the flags into CC_SRC. */
1651 gen_compute_eflags(s);
1653 /* The value that was "rotated out" is now present at the other end
1654 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1655 since we've computed the flags into CC_SRC, these variables are
1656 currently dead. */
1657 if (is_right) {
1658 tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask - 1);
1659 tcg_gen_shri_tl(cpu_cc_dst, cpu_T[0], mask);
1660 tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
1661 } else {
1662 tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask);
1663 tcg_gen_andi_tl(cpu_cc_dst, cpu_T[0], 1);
1665 tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1666 tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1667 set_cc_op(s, CC_OP_ADCOX);
1671 /* XXX: add faster immediate = 1 case */
1672 static void gen_rotc_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
1673 int is_right)
1675 gen_compute_eflags(s);
1676 assert(s->cc_op == CC_OP_EFLAGS);
1678 /* load */
1679 if (op1 == OR_TMP0)
1680 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1681 else
1682 gen_op_mov_v_reg(ot, cpu_T[0], op1);
1684 if (is_right) {
1685 switch (ot) {
1686 case MO_8:
1687 gen_helper_rcrb(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1688 break;
1689 case MO_16:
1690 gen_helper_rcrw(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1691 break;
1692 case MO_32:
1693 gen_helper_rcrl(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1694 break;
1695 #ifdef TARGET_X86_64
1696 case MO_64:
1697 gen_helper_rcrq(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1698 break;
1699 #endif
1700 default:
1701 tcg_abort();
1703 } else {
1704 switch (ot) {
1705 case MO_8:
1706 gen_helper_rclb(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1707 break;
1708 case MO_16:
1709 gen_helper_rclw(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1710 break;
1711 case MO_32:
1712 gen_helper_rcll(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1713 break;
1714 #ifdef TARGET_X86_64
1715 case MO_64:
1716 gen_helper_rclq(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1717 break;
1718 #endif
1719 default:
1720 tcg_abort();
1723 /* store */
1724 gen_op_st_rm_T0_A0(s, ot, op1);
1727 /* XXX: add faster immediate case */
1728 static void gen_shiftd_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
1729 bool is_right, TCGv count_in)
1731 target_ulong mask = (ot == MO_64 ? 63 : 31);
1732 TCGv count;
1734 /* load */
1735 if (op1 == OR_TMP0) {
1736 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1737 } else {
1738 gen_op_mov_v_reg(ot, cpu_T[0], op1);
1741 count = tcg_temp_new();
1742 tcg_gen_andi_tl(count, count_in, mask);
1744 switch (ot) {
1745 case MO_16:
1746 /* Note: we implement the Intel behaviour for shift count > 16.
1747 This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A
1748 portion by constructing it as a 32-bit value. */
1749 if (is_right) {
1750 tcg_gen_deposit_tl(cpu_tmp0, cpu_T[0], cpu_T[1], 16, 16);
1751 tcg_gen_mov_tl(cpu_T[1], cpu_T[0]);
1752 tcg_gen_mov_tl(cpu_T[0], cpu_tmp0);
1753 } else {
1754 tcg_gen_deposit_tl(cpu_T[1], cpu_T[0], cpu_T[1], 16, 16);
1756 /* FALLTHRU */
1757 #ifdef TARGET_X86_64
1758 case MO_32:
1759 /* Concatenate the two 32-bit values and use a 64-bit shift. */
1760 tcg_gen_subi_tl(cpu_tmp0, count, 1);
1761 if (is_right) {
1762 tcg_gen_concat_tl_i64(cpu_T[0], cpu_T[0], cpu_T[1]);
1763 tcg_gen_shr_i64(cpu_tmp0, cpu_T[0], cpu_tmp0);
1764 tcg_gen_shr_i64(cpu_T[0], cpu_T[0], count);
1765 } else {
1766 tcg_gen_concat_tl_i64(cpu_T[0], cpu_T[1], cpu_T[0]);
1767 tcg_gen_shl_i64(cpu_tmp0, cpu_T[0], cpu_tmp0);
1768 tcg_gen_shl_i64(cpu_T[0], cpu_T[0], count);
1769 tcg_gen_shri_i64(cpu_tmp0, cpu_tmp0, 32);
1770 tcg_gen_shri_i64(cpu_T[0], cpu_T[0], 32);
1772 break;
1773 #endif
1774 default:
1775 tcg_gen_subi_tl(cpu_tmp0, count, 1);
1776 if (is_right) {
1777 tcg_gen_shr_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1779 tcg_gen_subfi_tl(cpu_tmp4, mask + 1, count);
1780 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], count);
1781 tcg_gen_shl_tl(cpu_T[1], cpu_T[1], cpu_tmp4);
1782 } else {
1783 tcg_gen_shl_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1784 if (ot == MO_16) {
1785 /* Only needed if count > 16, for Intel behaviour. */
1786 tcg_gen_subfi_tl(cpu_tmp4, 33, count);
1787 tcg_gen_shr_tl(cpu_tmp4, cpu_T[1], cpu_tmp4);
1788 tcg_gen_or_tl(cpu_tmp0, cpu_tmp0, cpu_tmp4);
1791 tcg_gen_subfi_tl(cpu_tmp4, mask + 1, count);
1792 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], count);
1793 tcg_gen_shr_tl(cpu_T[1], cpu_T[1], cpu_tmp4);
1795 tcg_gen_movi_tl(cpu_tmp4, 0);
1796 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_T[1], count, cpu_tmp4,
1797 cpu_tmp4, cpu_T[1]);
1798 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1799 break;
1802 /* store */
1803 gen_op_st_rm_T0_A0(s, ot, op1);
1805 gen_shift_flags(s, ot, cpu_T[0], cpu_tmp0, count, is_right);
1806 tcg_temp_free(count);
1809 static void gen_shift(DisasContext *s1, int op, TCGMemOp ot, int d, int s)
1811 if (s != OR_TMP1)
1812 gen_op_mov_v_reg(ot, cpu_T[1], s);
1813 switch(op) {
1814 case OP_ROL:
1815 gen_rot_rm_T1(s1, ot, d, 0);
1816 break;
1817 case OP_ROR:
1818 gen_rot_rm_T1(s1, ot, d, 1);
1819 break;
1820 case OP_SHL:
1821 case OP_SHL1:
1822 gen_shift_rm_T1(s1, ot, d, 0, 0);
1823 break;
1824 case OP_SHR:
1825 gen_shift_rm_T1(s1, ot, d, 1, 0);
1826 break;
1827 case OP_SAR:
1828 gen_shift_rm_T1(s1, ot, d, 1, 1);
1829 break;
1830 case OP_RCL:
1831 gen_rotc_rm_T1(s1, ot, d, 0);
1832 break;
1833 case OP_RCR:
1834 gen_rotc_rm_T1(s1, ot, d, 1);
1835 break;
1839 static void gen_shifti(DisasContext *s1, int op, TCGMemOp ot, int d, int c)
1841 switch(op) {
1842 case OP_ROL:
1843 gen_rot_rm_im(s1, ot, d, c, 0);
1844 break;
1845 case OP_ROR:
1846 gen_rot_rm_im(s1, ot, d, c, 1);
1847 break;
1848 case OP_SHL:
1849 case OP_SHL1:
1850 gen_shift_rm_im(s1, ot, d, c, 0, 0);
1851 break;
1852 case OP_SHR:
1853 gen_shift_rm_im(s1, ot, d, c, 1, 0);
1854 break;
1855 case OP_SAR:
1856 gen_shift_rm_im(s1, ot, d, c, 1, 1);
1857 break;
1858 default:
1859 /* currently not optimized */
1860 tcg_gen_movi_tl(cpu_T[1], c);
1861 gen_shift(s1, op, ot, d, OR_TMP1);
1862 break;
1866 static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm)
1868 target_long disp;
1869 int havesib;
1870 int base;
1871 int index;
1872 int scale;
1873 int mod, rm, code, override, must_add_seg;
1874 TCGv sum;
1876 override = s->override;
1877 must_add_seg = s->addseg;
1878 if (override >= 0)
1879 must_add_seg = 1;
1880 mod = (modrm >> 6) & 3;
1881 rm = modrm & 7;
1883 switch (s->aflag) {
1884 case MO_64:
1885 case MO_32:
1886 havesib = 0;
1887 base = rm;
1888 index = -1;
1889 scale = 0;
1891 if (base == 4) {
1892 havesib = 1;
1893 code = cpu_ldub_code(env, s->pc++);
1894 scale = (code >> 6) & 3;
1895 index = ((code >> 3) & 7) | REX_X(s);
1896 if (index == 4) {
1897 index = -1; /* no index */
1899 base = (code & 7);
1901 base |= REX_B(s);
1903 switch (mod) {
1904 case 0:
1905 if ((base & 7) == 5) {
1906 base = -1;
1907 disp = (int32_t)cpu_ldl_code(env, s->pc);
1908 s->pc += 4;
1909 if (CODE64(s) && !havesib) {
1910 disp += s->pc + s->rip_offset;
1912 } else {
1913 disp = 0;
1915 break;
1916 case 1:
1917 disp = (int8_t)cpu_ldub_code(env, s->pc++);
1918 break;
1919 default:
1920 case 2:
1921 disp = (int32_t)cpu_ldl_code(env, s->pc);
1922 s->pc += 4;
1923 break;
1926 /* For correct popl handling with esp. */
1927 if (base == R_ESP && s->popl_esp_hack) {
1928 disp += s->popl_esp_hack;
1931 /* Compute the address, with a minimum number of TCG ops. */
1932 TCGV_UNUSED(sum);
1933 if (index >= 0) {
1934 if (scale == 0) {
1935 sum = cpu_regs[index];
1936 } else {
1937 tcg_gen_shli_tl(cpu_A0, cpu_regs[index], scale);
1938 sum = cpu_A0;
1940 if (base >= 0) {
1941 tcg_gen_add_tl(cpu_A0, sum, cpu_regs[base]);
1942 sum = cpu_A0;
1944 } else if (base >= 0) {
1945 sum = cpu_regs[base];
1947 if (TCGV_IS_UNUSED(sum)) {
1948 tcg_gen_movi_tl(cpu_A0, disp);
1949 } else {
1950 tcg_gen_addi_tl(cpu_A0, sum, disp);
1953 if (must_add_seg) {
1954 if (override < 0) {
1955 if (base == R_EBP || base == R_ESP) {
1956 override = R_SS;
1957 } else {
1958 override = R_DS;
1962 tcg_gen_ld_tl(cpu_tmp0, cpu_env,
1963 offsetof(CPUX86State, segs[override].base));
1964 if (CODE64(s)) {
1965 if (s->aflag == MO_32) {
1966 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
1968 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
1969 return;
1972 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
1975 if (s->aflag == MO_32) {
1976 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
1978 break;
1980 case MO_16:
1981 switch (mod) {
1982 case 0:
1983 if (rm == 6) {
1984 disp = cpu_lduw_code(env, s->pc);
1985 s->pc += 2;
1986 tcg_gen_movi_tl(cpu_A0, disp);
1987 rm = 0; /* avoid SS override */
1988 goto no_rm;
1989 } else {
1990 disp = 0;
1992 break;
1993 case 1:
1994 disp = (int8_t)cpu_ldub_code(env, s->pc++);
1995 break;
1996 default:
1997 case 2:
1998 disp = (int16_t)cpu_lduw_code(env, s->pc);
1999 s->pc += 2;
2000 break;
2003 sum = cpu_A0;
2004 switch (rm) {
2005 case 0:
2006 tcg_gen_add_tl(cpu_A0, cpu_regs[R_EBX], cpu_regs[R_ESI]);
2007 break;
2008 case 1:
2009 tcg_gen_add_tl(cpu_A0, cpu_regs[R_EBX], cpu_regs[R_EDI]);
2010 break;
2011 case 2:
2012 tcg_gen_add_tl(cpu_A0, cpu_regs[R_EBP], cpu_regs[R_ESI]);
2013 break;
2014 case 3:
2015 tcg_gen_add_tl(cpu_A0, cpu_regs[R_EBP], cpu_regs[R_EDI]);
2016 break;
2017 case 4:
2018 sum = cpu_regs[R_ESI];
2019 break;
2020 case 5:
2021 sum = cpu_regs[R_EDI];
2022 break;
2023 case 6:
2024 sum = cpu_regs[R_EBP];
2025 break;
2026 default:
2027 case 7:
2028 sum = cpu_regs[R_EBX];
2029 break;
2031 tcg_gen_addi_tl(cpu_A0, sum, disp);
2032 tcg_gen_ext16u_tl(cpu_A0, cpu_A0);
2033 no_rm:
2034 if (must_add_seg) {
2035 if (override < 0) {
2036 if (rm == 2 || rm == 3 || rm == 6) {
2037 override = R_SS;
2038 } else {
2039 override = R_DS;
2042 gen_op_addl_A0_seg(s, override);
2044 break;
2046 default:
2047 tcg_abort();
2051 static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm)
2053 int mod, rm, base, code;
2055 mod = (modrm >> 6) & 3;
2056 if (mod == 3)
2057 return;
2058 rm = modrm & 7;
2060 switch (s->aflag) {
2061 case MO_64:
2062 case MO_32:
2063 base = rm;
2065 if (base == 4) {
2066 code = cpu_ldub_code(env, s->pc++);
2067 base = (code & 7);
2070 switch (mod) {
2071 case 0:
2072 if (base == 5) {
2073 s->pc += 4;
2075 break;
2076 case 1:
2077 s->pc++;
2078 break;
2079 default:
2080 case 2:
2081 s->pc += 4;
2082 break;
2084 break;
2086 case MO_16:
2087 switch (mod) {
2088 case 0:
2089 if (rm == 6) {
2090 s->pc += 2;
2092 break;
2093 case 1:
2094 s->pc++;
2095 break;
2096 default:
2097 case 2:
2098 s->pc += 2;
2099 break;
2101 break;
2103 default:
2104 tcg_abort();
2108 /* used for LEA and MOV AX, mem */
2109 static void gen_add_A0_ds_seg(DisasContext *s)
2111 int override, must_add_seg;
2112 must_add_seg = s->addseg;
2113 override = R_DS;
2114 if (s->override >= 0) {
2115 override = s->override;
2116 must_add_seg = 1;
2118 if (must_add_seg) {
2119 #ifdef TARGET_X86_64
2120 if (CODE64(s)) {
2121 gen_op_addq_A0_seg(override);
2122 } else
2123 #endif
2125 gen_op_addl_A0_seg(s, override);
2130 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2131 OR_TMP0 */
2132 static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm,
2133 TCGMemOp ot, int reg, int is_store)
2135 int mod, rm;
2137 mod = (modrm >> 6) & 3;
2138 rm = (modrm & 7) | REX_B(s);
2139 if (mod == 3) {
2140 if (is_store) {
2141 if (reg != OR_TMP0)
2142 gen_op_mov_v_reg(ot, cpu_T[0], reg);
2143 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
2144 } else {
2145 gen_op_mov_v_reg(ot, cpu_T[0], rm);
2146 if (reg != OR_TMP0)
2147 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
2149 } else {
2150 gen_lea_modrm(env, s, modrm);
2151 if (is_store) {
2152 if (reg != OR_TMP0)
2153 gen_op_mov_v_reg(ot, cpu_T[0], reg);
2154 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
2155 } else {
2156 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
2157 if (reg != OR_TMP0)
2158 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
2163 static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, TCGMemOp ot)
2165 uint32_t ret;
2167 switch (ot) {
2168 case MO_8:
2169 ret = cpu_ldub_code(env, s->pc);
2170 s->pc++;
2171 break;
2172 case MO_16:
2173 ret = cpu_lduw_code(env, s->pc);
2174 s->pc += 2;
2175 break;
2176 case MO_32:
2177 #ifdef TARGET_X86_64
2178 case MO_64:
2179 #endif
2180 ret = cpu_ldl_code(env, s->pc);
2181 s->pc += 4;
2182 break;
2183 default:
2184 tcg_abort();
2186 return ret;
2189 static inline int insn_const_size(TCGMemOp ot)
2191 if (ot <= MO_32) {
2192 return 1 << ot;
2193 } else {
2194 return 4;
2198 static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip)
2200 TranslationBlock *tb;
2201 target_ulong pc;
2203 pc = s->cs_base + eip;
2204 tb = s->tb;
2205 /* NOTE: we handle the case where the TB spans two pages here */
2206 if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) ||
2207 (pc & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK)) {
2208 /* jump to same page: we can use a direct jump */
2209 tcg_gen_goto_tb(tb_num);
2210 gen_jmp_im(eip);
2211 tcg_gen_exit_tb((uintptr_t)tb + tb_num);
2212 } else {
2213 /* jump to another page: currently not optimized */
2214 gen_jmp_im(eip);
2215 gen_eob(s);
2219 static inline void gen_jcc(DisasContext *s, int b,
2220 target_ulong val, target_ulong next_eip)
2222 int l1, l2;
2224 if (s->jmp_opt) {
2225 l1 = gen_new_label();
2226 gen_jcc1(s, b, l1);
2228 gen_goto_tb(s, 0, next_eip);
2230 gen_set_label(l1);
2231 gen_goto_tb(s, 1, val);
2232 s->is_jmp = DISAS_TB_JUMP;
2233 } else {
2234 l1 = gen_new_label();
2235 l2 = gen_new_label();
2236 gen_jcc1(s, b, l1);
2238 gen_jmp_im(next_eip);
2239 tcg_gen_br(l2);
2241 gen_set_label(l1);
2242 gen_jmp_im(val);
2243 gen_set_label(l2);
2244 gen_eob(s);
2248 static void gen_cmovcc1(CPUX86State *env, DisasContext *s, TCGMemOp ot, int b,
2249 int modrm, int reg)
2251 CCPrepare cc;
2253 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
2255 cc = gen_prepare_cc(s, b, cpu_T[1]);
2256 if (cc.mask != -1) {
2257 TCGv t0 = tcg_temp_new();
2258 tcg_gen_andi_tl(t0, cc.reg, cc.mask);
2259 cc.reg = t0;
2261 if (!cc.use_reg2) {
2262 cc.reg2 = tcg_const_tl(cc.imm);
2265 tcg_gen_movcond_tl(cc.cond, cpu_T[0], cc.reg, cc.reg2,
2266 cpu_T[0], cpu_regs[reg]);
2267 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
2269 if (cc.mask != -1) {
2270 tcg_temp_free(cc.reg);
2272 if (!cc.use_reg2) {
2273 tcg_temp_free(cc.reg2);
2277 static inline void gen_op_movl_T0_seg(int seg_reg)
2279 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
2280 offsetof(CPUX86State,segs[seg_reg].selector));
2283 static inline void gen_op_movl_seg_T0_vm(int seg_reg)
2285 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
2286 tcg_gen_st32_tl(cpu_T[0], cpu_env,
2287 offsetof(CPUX86State,segs[seg_reg].selector));
2288 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], 4);
2289 tcg_gen_st_tl(cpu_T[0], cpu_env,
2290 offsetof(CPUX86State,segs[seg_reg].base));
2293 /* move T0 to seg_reg and compute if the CPU state may change. Never
2294 call this function with seg_reg == R_CS */
2295 static void gen_movl_seg_T0(DisasContext *s, int seg_reg, target_ulong cur_eip)
2297 if (s->pe && !s->vm86) {
2298 /* XXX: optimize by finding processor state dynamically */
2299 gen_update_cc_op(s);
2300 gen_jmp_im(cur_eip);
2301 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2302 gen_helper_load_seg(cpu_env, tcg_const_i32(seg_reg), cpu_tmp2_i32);
2303 /* abort translation because the addseg value may change or
2304 because ss32 may change. For R_SS, translation must always
2305 stop as a special handling must be done to disable hardware
2306 interrupts for the next instruction */
2307 if (seg_reg == R_SS || (s->code32 && seg_reg < R_FS))
2308 s->is_jmp = DISAS_TB_JUMP;
2309 } else {
2310 gen_op_movl_seg_T0_vm(seg_reg);
2311 if (seg_reg == R_SS)
2312 s->is_jmp = DISAS_TB_JUMP;
2316 static inline int svm_is_rep(int prefixes)
2318 return ((prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) ? 8 : 0);
2321 static inline void
2322 gen_svm_check_intercept_param(DisasContext *s, target_ulong pc_start,
2323 uint32_t type, uint64_t param)
2325 /* no SVM activated; fast case */
2326 if (likely(!(s->flags & HF_SVMI_MASK)))
2327 return;
2328 gen_update_cc_op(s);
2329 gen_jmp_im(pc_start - s->cs_base);
2330 gen_helper_svm_check_intercept_param(cpu_env, tcg_const_i32(type),
2331 tcg_const_i64(param));
2334 static inline void
2335 gen_svm_check_intercept(DisasContext *s, target_ulong pc_start, uint64_t type)
2337 gen_svm_check_intercept_param(s, pc_start, type, 0);
2340 static inline void gen_stack_update(DisasContext *s, int addend)
2342 #ifdef TARGET_X86_64
2343 if (CODE64(s)) {
2344 gen_op_add_reg_im(MO_64, R_ESP, addend);
2345 } else
2346 #endif
2347 if (s->ss32) {
2348 gen_op_add_reg_im(MO_32, R_ESP, addend);
2349 } else {
2350 gen_op_add_reg_im(MO_16, R_ESP, addend);
2354 /* Generate a push. It depends on ss32, addseg and dflag. */
2355 static void gen_push_v(DisasContext *s, TCGv val)
2357 TCGMemOp a_ot, d_ot = mo_pushpop(s, s->dflag);
2358 int size = 1 << d_ot;
2359 TCGv new_esp = cpu_A0;
2361 tcg_gen_subi_tl(cpu_A0, cpu_regs[R_ESP], size);
2363 if (CODE64(s)) {
2364 a_ot = MO_64;
2365 } else if (s->ss32) {
2366 a_ot = MO_32;
2367 if (s->addseg) {
2368 new_esp = cpu_tmp4;
2369 tcg_gen_mov_tl(new_esp, cpu_A0);
2370 gen_op_addl_A0_seg(s, R_SS);
2371 } else {
2372 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
2374 } else {
2375 a_ot = MO_16;
2376 new_esp = cpu_tmp4;
2377 tcg_gen_ext16u_tl(cpu_A0, cpu_A0);
2378 tcg_gen_mov_tl(new_esp, cpu_A0);
2379 gen_op_addl_A0_seg(s, R_SS);
2382 gen_op_st_v(s, d_ot, val, cpu_A0);
2383 gen_op_mov_reg_v(a_ot, R_ESP, new_esp);
2386 /* two step pop is necessary for precise exceptions */
2387 static TCGMemOp gen_pop_T0(DisasContext *s)
2389 TCGMemOp d_ot = mo_pushpop(s, s->dflag);
2390 TCGv addr = cpu_A0;
2392 if (CODE64(s)) {
2393 addr = cpu_regs[R_ESP];
2394 } else if (!s->ss32) {
2395 tcg_gen_ext16u_tl(cpu_A0, cpu_regs[R_ESP]);
2396 gen_op_addl_A0_seg(s, R_SS);
2397 } else if (s->addseg) {
2398 tcg_gen_mov_tl(cpu_A0, cpu_regs[R_ESP]);
2399 gen_op_addl_A0_seg(s, R_SS);
2400 } else {
2401 tcg_gen_ext32u_tl(cpu_A0, cpu_regs[R_ESP]);
2404 gen_op_ld_v(s, d_ot, cpu_T[0], addr);
2405 return d_ot;
2408 static void gen_pop_update(DisasContext *s, TCGMemOp ot)
2410 gen_stack_update(s, 1 << ot);
2413 static void gen_stack_A0(DisasContext *s)
2415 gen_op_movl_A0_reg(R_ESP);
2416 if (!s->ss32)
2417 tcg_gen_ext16u_tl(cpu_A0, cpu_A0);
2418 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2419 if (s->addseg)
2420 gen_op_addl_A0_seg(s, R_SS);
2423 /* NOTE: wrap around in 16 bit not fully handled */
2424 static void gen_pusha(DisasContext *s)
2426 int i;
2427 gen_op_movl_A0_reg(R_ESP);
2428 gen_op_addl_A0_im(-8 << s->dflag);
2429 if (!s->ss32)
2430 tcg_gen_ext16u_tl(cpu_A0, cpu_A0);
2431 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2432 if (s->addseg)
2433 gen_op_addl_A0_seg(s, R_SS);
2434 for(i = 0;i < 8; i++) {
2435 gen_op_mov_v_reg(MO_32, cpu_T[0], 7 - i);
2436 gen_op_st_v(s, s->dflag, cpu_T[0], cpu_A0);
2437 gen_op_addl_A0_im(1 << s->dflag);
2439 gen_op_mov_reg_v(MO_16 + s->ss32, R_ESP, cpu_T[1]);
2442 /* NOTE: wrap around in 16 bit not fully handled */
2443 static void gen_popa(DisasContext *s)
2445 int i;
2446 gen_op_movl_A0_reg(R_ESP);
2447 if (!s->ss32)
2448 tcg_gen_ext16u_tl(cpu_A0, cpu_A0);
2449 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2450 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], 8 << s->dflag);
2451 if (s->addseg)
2452 gen_op_addl_A0_seg(s, R_SS);
2453 for(i = 0;i < 8; i++) {
2454 /* ESP is not reloaded */
2455 if (i != 3) {
2456 gen_op_ld_v(s, s->dflag, cpu_T[0], cpu_A0);
2457 gen_op_mov_reg_v(s->dflag, 7 - i, cpu_T[0]);
2459 gen_op_addl_A0_im(1 << s->dflag);
2461 gen_op_mov_reg_v(MO_16 + s->ss32, R_ESP, cpu_T[1]);
2464 static void gen_enter(DisasContext *s, int esp_addend, int level)
2466 TCGMemOp ot = mo_pushpop(s, s->dflag);
2467 int opsize = 1 << ot;
2469 level &= 0x1f;
2470 #ifdef TARGET_X86_64
2471 if (CODE64(s)) {
2472 gen_op_movl_A0_reg(R_ESP);
2473 gen_op_addq_A0_im(-opsize);
2474 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2476 /* push bp */
2477 gen_op_mov_v_reg(MO_32, cpu_T[0], R_EBP);
2478 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
2479 if (level) {
2480 /* XXX: must save state */
2481 gen_helper_enter64_level(cpu_env, tcg_const_i32(level),
2482 tcg_const_i32((ot == MO_64)),
2483 cpu_T[1]);
2485 gen_op_mov_reg_v(ot, R_EBP, cpu_T[1]);
2486 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
2487 gen_op_mov_reg_v(MO_64, R_ESP, cpu_T[1]);
2488 } else
2489 #endif
2491 gen_op_movl_A0_reg(R_ESP);
2492 gen_op_addl_A0_im(-opsize);
2493 if (!s->ss32)
2494 tcg_gen_ext16u_tl(cpu_A0, cpu_A0);
2495 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2496 if (s->addseg)
2497 gen_op_addl_A0_seg(s, R_SS);
2498 /* push bp */
2499 gen_op_mov_v_reg(MO_32, cpu_T[0], R_EBP);
2500 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
2501 if (level) {
2502 /* XXX: must save state */
2503 gen_helper_enter_level(cpu_env, tcg_const_i32(level),
2504 tcg_const_i32(s->dflag - 1),
2505 cpu_T[1]);
2507 gen_op_mov_reg_v(ot, R_EBP, cpu_T[1]);
2508 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
2509 gen_op_mov_reg_v(MO_16 + s->ss32, R_ESP, cpu_T[1]);
2513 static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip)
2515 gen_update_cc_op(s);
2516 gen_jmp_im(cur_eip);
2517 gen_helper_raise_exception(cpu_env, tcg_const_i32(trapno));
2518 s->is_jmp = DISAS_TB_JUMP;
2521 /* an interrupt is different from an exception because of the
2522 privilege checks */
2523 static void gen_interrupt(DisasContext *s, int intno,
2524 target_ulong cur_eip, target_ulong next_eip)
2526 gen_update_cc_op(s);
2527 gen_jmp_im(cur_eip);
2528 gen_helper_raise_interrupt(cpu_env, tcg_const_i32(intno),
2529 tcg_const_i32(next_eip - cur_eip));
2530 s->is_jmp = DISAS_TB_JUMP;
2533 static void gen_debug(DisasContext *s, target_ulong cur_eip)
2535 gen_update_cc_op(s);
2536 gen_jmp_im(cur_eip);
2537 gen_helper_debug(cpu_env);
2538 s->is_jmp = DISAS_TB_JUMP;
2541 /* generate a generic end of block. Trace exception is also generated
2542 if needed */
2543 static void gen_eob(DisasContext *s)
2545 gen_update_cc_op(s);
2546 if (s->tb->flags & HF_INHIBIT_IRQ_MASK) {
2547 gen_helper_reset_inhibit_irq(cpu_env);
2549 if (s->tb->flags & HF_RF_MASK) {
2550 gen_helper_reset_rf(cpu_env);
2552 if (s->singlestep_enabled) {
2553 gen_helper_debug(cpu_env);
2554 } else if (s->tf) {
2555 gen_helper_single_step(cpu_env);
2556 } else {
2557 tcg_gen_exit_tb(0);
2559 s->is_jmp = DISAS_TB_JUMP;
2562 /* generate a jump to eip. No segment change must happen before as a
2563 direct call to the next block may occur */
2564 static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num)
2566 gen_update_cc_op(s);
2567 set_cc_op(s, CC_OP_DYNAMIC);
2568 if (s->jmp_opt) {
2569 gen_goto_tb(s, tb_num, eip);
2570 s->is_jmp = DISAS_TB_JUMP;
2571 } else {
2572 gen_jmp_im(eip);
2573 gen_eob(s);
2577 static void gen_jmp(DisasContext *s, target_ulong eip)
2579 gen_jmp_tb(s, eip, 0);
2582 static inline void gen_ldq_env_A0(DisasContext *s, int offset)
2584 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
2585 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset);
2588 static inline void gen_stq_env_A0(DisasContext *s, int offset)
2590 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset);
2591 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
2594 static inline void gen_ldo_env_A0(DisasContext *s, int offset)
2596 int mem_index = s->mem_index;
2597 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, mem_index, MO_LEQ);
2598 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
2599 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2600 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_tmp0, mem_index, MO_LEQ);
2601 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
2604 static inline void gen_sto_env_A0(DisasContext *s, int offset)
2606 int mem_index = s->mem_index;
2607 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
2608 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, mem_index, MO_LEQ);
2609 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2610 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
2611 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_tmp0, mem_index, MO_LEQ);
2614 static inline void gen_op_movo(int d_offset, int s_offset)
2616 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2617 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2618 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + 8);
2619 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + 8);
2622 static inline void gen_op_movq(int d_offset, int s_offset)
2624 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2625 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2628 static inline void gen_op_movl(int d_offset, int s_offset)
2630 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env, s_offset);
2631 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, d_offset);
2634 static inline void gen_op_movq_env_0(int d_offset)
2636 tcg_gen_movi_i64(cpu_tmp1_i64, 0);
2637 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2640 typedef void (*SSEFunc_i_ep)(TCGv_i32 val, TCGv_ptr env, TCGv_ptr reg);
2641 typedef void (*SSEFunc_l_ep)(TCGv_i64 val, TCGv_ptr env, TCGv_ptr reg);
2642 typedef void (*SSEFunc_0_epi)(TCGv_ptr env, TCGv_ptr reg, TCGv_i32 val);
2643 typedef void (*SSEFunc_0_epl)(TCGv_ptr env, TCGv_ptr reg, TCGv_i64 val);
2644 typedef void (*SSEFunc_0_epp)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b);
2645 typedef void (*SSEFunc_0_eppi)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
2646 TCGv_i32 val);
2647 typedef void (*SSEFunc_0_ppi)(TCGv_ptr reg_a, TCGv_ptr reg_b, TCGv_i32 val);
2648 typedef void (*SSEFunc_0_eppt)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
2649 TCGv val);
2651 #define SSE_SPECIAL ((void *)1)
2652 #define SSE_DUMMY ((void *)2)
2654 #define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
2655 #define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
2656 gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
2658 static const SSEFunc_0_epp sse_op_table1[256][4] = {
2659 /* 3DNow! extensions */
2660 [0x0e] = { SSE_DUMMY }, /* femms */
2661 [0x0f] = { SSE_DUMMY }, /* pf... */
2662 /* pure SSE operations */
2663 [0x10] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2664 [0x11] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2665 [0x12] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd, movsldup, movddup */
2666 [0x13] = { SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd */
2667 [0x14] = { gen_helper_punpckldq_xmm, gen_helper_punpcklqdq_xmm },
2668 [0x15] = { gen_helper_punpckhdq_xmm, gen_helper_punpckhqdq_xmm },
2669 [0x16] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd, movshdup */
2670 [0x17] = { SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd */
2672 [0x28] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2673 [0x29] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2674 [0x2a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
2675 [0x2b] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movntps, movntpd, movntss, movntsd */
2676 [0x2c] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
2677 [0x2d] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
2678 [0x2e] = { gen_helper_ucomiss, gen_helper_ucomisd },
2679 [0x2f] = { gen_helper_comiss, gen_helper_comisd },
2680 [0x50] = { SSE_SPECIAL, SSE_SPECIAL }, /* movmskps, movmskpd */
2681 [0x51] = SSE_FOP(sqrt),
2682 [0x52] = { gen_helper_rsqrtps, NULL, gen_helper_rsqrtss, NULL },
2683 [0x53] = { gen_helper_rcpps, NULL, gen_helper_rcpss, NULL },
2684 [0x54] = { gen_helper_pand_xmm, gen_helper_pand_xmm }, /* andps, andpd */
2685 [0x55] = { gen_helper_pandn_xmm, gen_helper_pandn_xmm }, /* andnps, andnpd */
2686 [0x56] = { gen_helper_por_xmm, gen_helper_por_xmm }, /* orps, orpd */
2687 [0x57] = { gen_helper_pxor_xmm, gen_helper_pxor_xmm }, /* xorps, xorpd */
2688 [0x58] = SSE_FOP(add),
2689 [0x59] = SSE_FOP(mul),
2690 [0x5a] = { gen_helper_cvtps2pd, gen_helper_cvtpd2ps,
2691 gen_helper_cvtss2sd, gen_helper_cvtsd2ss },
2692 [0x5b] = { gen_helper_cvtdq2ps, gen_helper_cvtps2dq, gen_helper_cvttps2dq },
2693 [0x5c] = SSE_FOP(sub),
2694 [0x5d] = SSE_FOP(min),
2695 [0x5e] = SSE_FOP(div),
2696 [0x5f] = SSE_FOP(max),
2698 [0xc2] = SSE_FOP(cmpeq),
2699 [0xc6] = { (SSEFunc_0_epp)gen_helper_shufps,
2700 (SSEFunc_0_epp)gen_helper_shufpd }, /* XXX: casts */
2702 /* SSSE3, SSE4, MOVBE, CRC32, BMI1, BMI2, ADX. */
2703 [0x38] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2704 [0x3a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2706 /* MMX ops and their SSE extensions */
2707 [0x60] = MMX_OP2(punpcklbw),
2708 [0x61] = MMX_OP2(punpcklwd),
2709 [0x62] = MMX_OP2(punpckldq),
2710 [0x63] = MMX_OP2(packsswb),
2711 [0x64] = MMX_OP2(pcmpgtb),
2712 [0x65] = MMX_OP2(pcmpgtw),
2713 [0x66] = MMX_OP2(pcmpgtl),
2714 [0x67] = MMX_OP2(packuswb),
2715 [0x68] = MMX_OP2(punpckhbw),
2716 [0x69] = MMX_OP2(punpckhwd),
2717 [0x6a] = MMX_OP2(punpckhdq),
2718 [0x6b] = MMX_OP2(packssdw),
2719 [0x6c] = { NULL, gen_helper_punpcklqdq_xmm },
2720 [0x6d] = { NULL, gen_helper_punpckhqdq_xmm },
2721 [0x6e] = { SSE_SPECIAL, SSE_SPECIAL }, /* movd mm, ea */
2722 [0x6f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, , movqdu */
2723 [0x70] = { (SSEFunc_0_epp)gen_helper_pshufw_mmx,
2724 (SSEFunc_0_epp)gen_helper_pshufd_xmm,
2725 (SSEFunc_0_epp)gen_helper_pshufhw_xmm,
2726 (SSEFunc_0_epp)gen_helper_pshuflw_xmm }, /* XXX: casts */
2727 [0x71] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftw */
2728 [0x72] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftd */
2729 [0x73] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftq */
2730 [0x74] = MMX_OP2(pcmpeqb),
2731 [0x75] = MMX_OP2(pcmpeqw),
2732 [0x76] = MMX_OP2(pcmpeql),
2733 [0x77] = { SSE_DUMMY }, /* emms */
2734 [0x78] = { NULL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* extrq_i, insertq_i */
2735 [0x79] = { NULL, gen_helper_extrq_r, NULL, gen_helper_insertq_r },
2736 [0x7c] = { NULL, gen_helper_haddpd, NULL, gen_helper_haddps },
2737 [0x7d] = { NULL, gen_helper_hsubpd, NULL, gen_helper_hsubps },
2738 [0x7e] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movd, movd, , movq */
2739 [0x7f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, movdqu */
2740 [0xc4] = { SSE_SPECIAL, SSE_SPECIAL }, /* pinsrw */
2741 [0xc5] = { SSE_SPECIAL, SSE_SPECIAL }, /* pextrw */
2742 [0xd0] = { NULL, gen_helper_addsubpd, NULL, gen_helper_addsubps },
2743 [0xd1] = MMX_OP2(psrlw),
2744 [0xd2] = MMX_OP2(psrld),
2745 [0xd3] = MMX_OP2(psrlq),
2746 [0xd4] = MMX_OP2(paddq),
2747 [0xd5] = MMX_OP2(pmullw),
2748 [0xd6] = { NULL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2749 [0xd7] = { SSE_SPECIAL, SSE_SPECIAL }, /* pmovmskb */
2750 [0xd8] = MMX_OP2(psubusb),
2751 [0xd9] = MMX_OP2(psubusw),
2752 [0xda] = MMX_OP2(pminub),
2753 [0xdb] = MMX_OP2(pand),
2754 [0xdc] = MMX_OP2(paddusb),
2755 [0xdd] = MMX_OP2(paddusw),
2756 [0xde] = MMX_OP2(pmaxub),
2757 [0xdf] = MMX_OP2(pandn),
2758 [0xe0] = MMX_OP2(pavgb),
2759 [0xe1] = MMX_OP2(psraw),
2760 [0xe2] = MMX_OP2(psrad),
2761 [0xe3] = MMX_OP2(pavgw),
2762 [0xe4] = MMX_OP2(pmulhuw),
2763 [0xe5] = MMX_OP2(pmulhw),
2764 [0xe6] = { NULL, gen_helper_cvttpd2dq, gen_helper_cvtdq2pd, gen_helper_cvtpd2dq },
2765 [0xe7] = { SSE_SPECIAL , SSE_SPECIAL }, /* movntq, movntq */
2766 [0xe8] = MMX_OP2(psubsb),
2767 [0xe9] = MMX_OP2(psubsw),
2768 [0xea] = MMX_OP2(pminsw),
2769 [0xeb] = MMX_OP2(por),
2770 [0xec] = MMX_OP2(paddsb),
2771 [0xed] = MMX_OP2(paddsw),
2772 [0xee] = MMX_OP2(pmaxsw),
2773 [0xef] = MMX_OP2(pxor),
2774 [0xf0] = { NULL, NULL, NULL, SSE_SPECIAL }, /* lddqu */
2775 [0xf1] = MMX_OP2(psllw),
2776 [0xf2] = MMX_OP2(pslld),
2777 [0xf3] = MMX_OP2(psllq),
2778 [0xf4] = MMX_OP2(pmuludq),
2779 [0xf5] = MMX_OP2(pmaddwd),
2780 [0xf6] = MMX_OP2(psadbw),
2781 [0xf7] = { (SSEFunc_0_epp)gen_helper_maskmov_mmx,
2782 (SSEFunc_0_epp)gen_helper_maskmov_xmm }, /* XXX: casts */
2783 [0xf8] = MMX_OP2(psubb),
2784 [0xf9] = MMX_OP2(psubw),
2785 [0xfa] = MMX_OP2(psubl),
2786 [0xfb] = MMX_OP2(psubq),
2787 [0xfc] = MMX_OP2(paddb),
2788 [0xfd] = MMX_OP2(paddw),
2789 [0xfe] = MMX_OP2(paddl),
2792 static const SSEFunc_0_epp sse_op_table2[3 * 8][2] = {
2793 [0 + 2] = MMX_OP2(psrlw),
2794 [0 + 4] = MMX_OP2(psraw),
2795 [0 + 6] = MMX_OP2(psllw),
2796 [8 + 2] = MMX_OP2(psrld),
2797 [8 + 4] = MMX_OP2(psrad),
2798 [8 + 6] = MMX_OP2(pslld),
2799 [16 + 2] = MMX_OP2(psrlq),
2800 [16 + 3] = { NULL, gen_helper_psrldq_xmm },
2801 [16 + 6] = MMX_OP2(psllq),
2802 [16 + 7] = { NULL, gen_helper_pslldq_xmm },
2805 static const SSEFunc_0_epi sse_op_table3ai[] = {
2806 gen_helper_cvtsi2ss,
2807 gen_helper_cvtsi2sd
2810 #ifdef TARGET_X86_64
2811 static const SSEFunc_0_epl sse_op_table3aq[] = {
2812 gen_helper_cvtsq2ss,
2813 gen_helper_cvtsq2sd
2815 #endif
2817 static const SSEFunc_i_ep sse_op_table3bi[] = {
2818 gen_helper_cvttss2si,
2819 gen_helper_cvtss2si,
2820 gen_helper_cvttsd2si,
2821 gen_helper_cvtsd2si
2824 #ifdef TARGET_X86_64
2825 static const SSEFunc_l_ep sse_op_table3bq[] = {
2826 gen_helper_cvttss2sq,
2827 gen_helper_cvtss2sq,
2828 gen_helper_cvttsd2sq,
2829 gen_helper_cvtsd2sq
2831 #endif
2833 static const SSEFunc_0_epp sse_op_table4[8][4] = {
2834 SSE_FOP(cmpeq),
2835 SSE_FOP(cmplt),
2836 SSE_FOP(cmple),
2837 SSE_FOP(cmpunord),
2838 SSE_FOP(cmpneq),
2839 SSE_FOP(cmpnlt),
2840 SSE_FOP(cmpnle),
2841 SSE_FOP(cmpord),
2844 static const SSEFunc_0_epp sse_op_table5[256] = {
2845 [0x0c] = gen_helper_pi2fw,
2846 [0x0d] = gen_helper_pi2fd,
2847 [0x1c] = gen_helper_pf2iw,
2848 [0x1d] = gen_helper_pf2id,
2849 [0x8a] = gen_helper_pfnacc,
2850 [0x8e] = gen_helper_pfpnacc,
2851 [0x90] = gen_helper_pfcmpge,
2852 [0x94] = gen_helper_pfmin,
2853 [0x96] = gen_helper_pfrcp,
2854 [0x97] = gen_helper_pfrsqrt,
2855 [0x9a] = gen_helper_pfsub,
2856 [0x9e] = gen_helper_pfadd,
2857 [0xa0] = gen_helper_pfcmpgt,
2858 [0xa4] = gen_helper_pfmax,
2859 [0xa6] = gen_helper_movq, /* pfrcpit1; no need to actually increase precision */
2860 [0xa7] = gen_helper_movq, /* pfrsqit1 */
2861 [0xaa] = gen_helper_pfsubr,
2862 [0xae] = gen_helper_pfacc,
2863 [0xb0] = gen_helper_pfcmpeq,
2864 [0xb4] = gen_helper_pfmul,
2865 [0xb6] = gen_helper_movq, /* pfrcpit2 */
2866 [0xb7] = gen_helper_pmulhrw_mmx,
2867 [0xbb] = gen_helper_pswapd,
2868 [0xbf] = gen_helper_pavgb_mmx /* pavgusb */
2871 struct SSEOpHelper_epp {
2872 SSEFunc_0_epp op[2];
2873 uint32_t ext_mask;
2876 struct SSEOpHelper_eppi {
2877 SSEFunc_0_eppi op[2];
2878 uint32_t ext_mask;
2881 #define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
2882 #define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
2883 #define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
2884 #define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
2885 #define PCLMULQDQ_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, \
2886 CPUID_EXT_PCLMULQDQ }
2887 #define AESNI_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_AES }
2889 static const struct SSEOpHelper_epp sse_op_table6[256] = {
2890 [0x00] = SSSE3_OP(pshufb),
2891 [0x01] = SSSE3_OP(phaddw),
2892 [0x02] = SSSE3_OP(phaddd),
2893 [0x03] = SSSE3_OP(phaddsw),
2894 [0x04] = SSSE3_OP(pmaddubsw),
2895 [0x05] = SSSE3_OP(phsubw),
2896 [0x06] = SSSE3_OP(phsubd),
2897 [0x07] = SSSE3_OP(phsubsw),
2898 [0x08] = SSSE3_OP(psignb),
2899 [0x09] = SSSE3_OP(psignw),
2900 [0x0a] = SSSE3_OP(psignd),
2901 [0x0b] = SSSE3_OP(pmulhrsw),
2902 [0x10] = SSE41_OP(pblendvb),
2903 [0x14] = SSE41_OP(blendvps),
2904 [0x15] = SSE41_OP(blendvpd),
2905 [0x17] = SSE41_OP(ptest),
2906 [0x1c] = SSSE3_OP(pabsb),
2907 [0x1d] = SSSE3_OP(pabsw),
2908 [0x1e] = SSSE3_OP(pabsd),
2909 [0x20] = SSE41_OP(pmovsxbw),
2910 [0x21] = SSE41_OP(pmovsxbd),
2911 [0x22] = SSE41_OP(pmovsxbq),
2912 [0x23] = SSE41_OP(pmovsxwd),
2913 [0x24] = SSE41_OP(pmovsxwq),
2914 [0x25] = SSE41_OP(pmovsxdq),
2915 [0x28] = SSE41_OP(pmuldq),
2916 [0x29] = SSE41_OP(pcmpeqq),
2917 [0x2a] = SSE41_SPECIAL, /* movntqda */
2918 [0x2b] = SSE41_OP(packusdw),
2919 [0x30] = SSE41_OP(pmovzxbw),
2920 [0x31] = SSE41_OP(pmovzxbd),
2921 [0x32] = SSE41_OP(pmovzxbq),
2922 [0x33] = SSE41_OP(pmovzxwd),
2923 [0x34] = SSE41_OP(pmovzxwq),
2924 [0x35] = SSE41_OP(pmovzxdq),
2925 [0x37] = SSE42_OP(pcmpgtq),
2926 [0x38] = SSE41_OP(pminsb),
2927 [0x39] = SSE41_OP(pminsd),
2928 [0x3a] = SSE41_OP(pminuw),
2929 [0x3b] = SSE41_OP(pminud),
2930 [0x3c] = SSE41_OP(pmaxsb),
2931 [0x3d] = SSE41_OP(pmaxsd),
2932 [0x3e] = SSE41_OP(pmaxuw),
2933 [0x3f] = SSE41_OP(pmaxud),
2934 [0x40] = SSE41_OP(pmulld),
2935 [0x41] = SSE41_OP(phminposuw),
2936 [0xdb] = AESNI_OP(aesimc),
2937 [0xdc] = AESNI_OP(aesenc),
2938 [0xdd] = AESNI_OP(aesenclast),
2939 [0xde] = AESNI_OP(aesdec),
2940 [0xdf] = AESNI_OP(aesdeclast),
2943 static const struct SSEOpHelper_eppi sse_op_table7[256] = {
2944 [0x08] = SSE41_OP(roundps),
2945 [0x09] = SSE41_OP(roundpd),
2946 [0x0a] = SSE41_OP(roundss),
2947 [0x0b] = SSE41_OP(roundsd),
2948 [0x0c] = SSE41_OP(blendps),
2949 [0x0d] = SSE41_OP(blendpd),
2950 [0x0e] = SSE41_OP(pblendw),
2951 [0x0f] = SSSE3_OP(palignr),
2952 [0x14] = SSE41_SPECIAL, /* pextrb */
2953 [0x15] = SSE41_SPECIAL, /* pextrw */
2954 [0x16] = SSE41_SPECIAL, /* pextrd/pextrq */
2955 [0x17] = SSE41_SPECIAL, /* extractps */
2956 [0x20] = SSE41_SPECIAL, /* pinsrb */
2957 [0x21] = SSE41_SPECIAL, /* insertps */
2958 [0x22] = SSE41_SPECIAL, /* pinsrd/pinsrq */
2959 [0x40] = SSE41_OP(dpps),
2960 [0x41] = SSE41_OP(dppd),
2961 [0x42] = SSE41_OP(mpsadbw),
2962 [0x44] = PCLMULQDQ_OP(pclmulqdq),
2963 [0x60] = SSE42_OP(pcmpestrm),
2964 [0x61] = SSE42_OP(pcmpestri),
2965 [0x62] = SSE42_OP(pcmpistrm),
2966 [0x63] = SSE42_OP(pcmpistri),
2967 [0xdf] = AESNI_OP(aeskeygenassist),
2970 static void gen_sse(CPUX86State *env, DisasContext *s, int b,
2971 target_ulong pc_start, int rex_r)
2973 int b1, op1_offset, op2_offset, is_xmm, val;
2974 int modrm, mod, rm, reg;
2975 SSEFunc_0_epp sse_fn_epp;
2976 SSEFunc_0_eppi sse_fn_eppi;
2977 SSEFunc_0_ppi sse_fn_ppi;
2978 SSEFunc_0_eppt sse_fn_eppt;
2979 TCGMemOp ot;
2981 b &= 0xff;
2982 if (s->prefix & PREFIX_DATA)
2983 b1 = 1;
2984 else if (s->prefix & PREFIX_REPZ)
2985 b1 = 2;
2986 else if (s->prefix & PREFIX_REPNZ)
2987 b1 = 3;
2988 else
2989 b1 = 0;
2990 sse_fn_epp = sse_op_table1[b][b1];
2991 if (!sse_fn_epp) {
2992 goto illegal_op;
2994 if ((b <= 0x5f && b >= 0x10) || b == 0xc6 || b == 0xc2) {
2995 is_xmm = 1;
2996 } else {
2997 if (b1 == 0) {
2998 /* MMX case */
2999 is_xmm = 0;
3000 } else {
3001 is_xmm = 1;
3004 /* simple MMX/SSE operation */
3005 if (s->flags & HF_TS_MASK) {
3006 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
3007 return;
3009 if (s->flags & HF_EM_MASK) {
3010 illegal_op:
3011 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
3012 return;
3014 if (is_xmm && !(s->flags & HF_OSFXSR_MASK))
3015 if ((b != 0x38 && b != 0x3a) || (s->prefix & PREFIX_DATA))
3016 goto illegal_op;
3017 if (b == 0x0e) {
3018 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
3019 goto illegal_op;
3020 /* femms */
3021 gen_helper_emms(cpu_env);
3022 return;
3024 if (b == 0x77) {
3025 /* emms */
3026 gen_helper_emms(cpu_env);
3027 return;
3029 /* prepare MMX state (XXX: optimize by storing fptt and fptags in
3030 the static cpu state) */
3031 if (!is_xmm) {
3032 gen_helper_enter_mmx(cpu_env);
3035 modrm = cpu_ldub_code(env, s->pc++);
3036 reg = ((modrm >> 3) & 7);
3037 if (is_xmm)
3038 reg |= rex_r;
3039 mod = (modrm >> 6) & 3;
3040 if (sse_fn_epp == SSE_SPECIAL) {
3041 b |= (b1 << 8);
3042 switch(b) {
3043 case 0x0e7: /* movntq */
3044 if (mod == 3)
3045 goto illegal_op;
3046 gen_lea_modrm(env, s, modrm);
3047 gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
3048 break;
3049 case 0x1e7: /* movntdq */
3050 case 0x02b: /* movntps */
3051 case 0x12b: /* movntps */
3052 if (mod == 3)
3053 goto illegal_op;
3054 gen_lea_modrm(env, s, modrm);
3055 gen_sto_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3056 break;
3057 case 0x3f0: /* lddqu */
3058 if (mod == 3)
3059 goto illegal_op;
3060 gen_lea_modrm(env, s, modrm);
3061 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3062 break;
3063 case 0x22b: /* movntss */
3064 case 0x32b: /* movntsd */
3065 if (mod == 3)
3066 goto illegal_op;
3067 gen_lea_modrm(env, s, modrm);
3068 if (b1 & 1) {
3069 gen_stq_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3070 } else {
3071 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3072 xmm_regs[reg].XMM_L(0)));
3073 gen_op_st_v(s, MO_32, cpu_T[0], cpu_A0);
3075 break;
3076 case 0x6e: /* movd mm, ea */
3077 #ifdef TARGET_X86_64
3078 if (s->dflag == MO_64) {
3079 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0);
3080 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,fpregs[reg].mmx));
3081 } else
3082 #endif
3084 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0);
3085 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3086 offsetof(CPUX86State,fpregs[reg].mmx));
3087 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3088 gen_helper_movl_mm_T0_mmx(cpu_ptr0, cpu_tmp2_i32);
3090 break;
3091 case 0x16e: /* movd xmm, ea */
3092 #ifdef TARGET_X86_64
3093 if (s->dflag == MO_64) {
3094 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0);
3095 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3096 offsetof(CPUX86State,xmm_regs[reg]));
3097 gen_helper_movq_mm_T0_xmm(cpu_ptr0, cpu_T[0]);
3098 } else
3099 #endif
3101 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0);
3102 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3103 offsetof(CPUX86State,xmm_regs[reg]));
3104 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3105 gen_helper_movl_mm_T0_xmm(cpu_ptr0, cpu_tmp2_i32);
3107 break;
3108 case 0x6f: /* movq mm, ea */
3109 if (mod != 3) {
3110 gen_lea_modrm(env, s, modrm);
3111 gen_ldq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
3112 } else {
3113 rm = (modrm & 7);
3114 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
3115 offsetof(CPUX86State,fpregs[rm].mmx));
3116 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
3117 offsetof(CPUX86State,fpregs[reg].mmx));
3119 break;
3120 case 0x010: /* movups */
3121 case 0x110: /* movupd */
3122 case 0x028: /* movaps */
3123 case 0x128: /* movapd */
3124 case 0x16f: /* movdqa xmm, ea */
3125 case 0x26f: /* movdqu xmm, ea */
3126 if (mod != 3) {
3127 gen_lea_modrm(env, s, modrm);
3128 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3129 } else {
3130 rm = (modrm & 7) | REX_B(s);
3131 gen_op_movo(offsetof(CPUX86State,xmm_regs[reg]),
3132 offsetof(CPUX86State,xmm_regs[rm]));
3134 break;
3135 case 0x210: /* movss xmm, ea */
3136 if (mod != 3) {
3137 gen_lea_modrm(env, s, modrm);
3138 gen_op_ld_v(s, MO_32, cpu_T[0], cpu_A0);
3139 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3140 tcg_gen_movi_tl(cpu_T[0], 0);
3141 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3142 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3143 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3144 } else {
3145 rm = (modrm & 7) | REX_B(s);
3146 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3147 offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3149 break;
3150 case 0x310: /* movsd xmm, ea */
3151 if (mod != 3) {
3152 gen_lea_modrm(env, s, modrm);
3153 gen_ldq_env_A0(s, offsetof(CPUX86State,
3154 xmm_regs[reg].XMM_Q(0)));
3155 tcg_gen_movi_tl(cpu_T[0], 0);
3156 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3157 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3158 } else {
3159 rm = (modrm & 7) | REX_B(s);
3160 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3161 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3163 break;
3164 case 0x012: /* movlps */
3165 case 0x112: /* movlpd */
3166 if (mod != 3) {
3167 gen_lea_modrm(env, s, modrm);
3168 gen_ldq_env_A0(s, offsetof(CPUX86State,
3169 xmm_regs[reg].XMM_Q(0)));
3170 } else {
3171 /* movhlps */
3172 rm = (modrm & 7) | REX_B(s);
3173 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3174 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3176 break;
3177 case 0x212: /* movsldup */
3178 if (mod != 3) {
3179 gen_lea_modrm(env, s, modrm);
3180 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3181 } else {
3182 rm = (modrm & 7) | REX_B(s);
3183 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3184 offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3185 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3186 offsetof(CPUX86State,xmm_regs[rm].XMM_L(2)));
3188 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3189 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3190 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3191 offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3192 break;
3193 case 0x312: /* movddup */
3194 if (mod != 3) {
3195 gen_lea_modrm(env, s, modrm);
3196 gen_ldq_env_A0(s, offsetof(CPUX86State,
3197 xmm_regs[reg].XMM_Q(0)));
3198 } else {
3199 rm = (modrm & 7) | REX_B(s);
3200 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3201 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3203 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
3204 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3205 break;
3206 case 0x016: /* movhps */
3207 case 0x116: /* movhpd */
3208 if (mod != 3) {
3209 gen_lea_modrm(env, s, modrm);
3210 gen_ldq_env_A0(s, offsetof(CPUX86State,
3211 xmm_regs[reg].XMM_Q(1)));
3212 } else {
3213 /* movlhps */
3214 rm = (modrm & 7) | REX_B(s);
3215 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
3216 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3218 break;
3219 case 0x216: /* movshdup */
3220 if (mod != 3) {
3221 gen_lea_modrm(env, s, modrm);
3222 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3223 } else {
3224 rm = (modrm & 7) | REX_B(s);
3225 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3226 offsetof(CPUX86State,xmm_regs[rm].XMM_L(1)));
3227 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3228 offsetof(CPUX86State,xmm_regs[rm].XMM_L(3)));
3230 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3231 offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3232 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3233 offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3234 break;
3235 case 0x178:
3236 case 0x378:
3238 int bit_index, field_length;
3240 if (b1 == 1 && reg != 0)
3241 goto illegal_op;
3242 field_length = cpu_ldub_code(env, s->pc++) & 0x3F;
3243 bit_index = cpu_ldub_code(env, s->pc++) & 0x3F;
3244 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3245 offsetof(CPUX86State,xmm_regs[reg]));
3246 if (b1 == 1)
3247 gen_helper_extrq_i(cpu_env, cpu_ptr0,
3248 tcg_const_i32(bit_index),
3249 tcg_const_i32(field_length));
3250 else
3251 gen_helper_insertq_i(cpu_env, cpu_ptr0,
3252 tcg_const_i32(bit_index),
3253 tcg_const_i32(field_length));
3255 break;
3256 case 0x7e: /* movd ea, mm */
3257 #ifdef TARGET_X86_64
3258 if (s->dflag == MO_64) {
3259 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3260 offsetof(CPUX86State,fpregs[reg].mmx));
3261 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1);
3262 } else
3263 #endif
3265 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3266 offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0)));
3267 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1);
3269 break;
3270 case 0x17e: /* movd ea, xmm */
3271 #ifdef TARGET_X86_64
3272 if (s->dflag == MO_64) {
3273 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3274 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3275 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1);
3276 } else
3277 #endif
3279 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3280 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3281 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1);
3283 break;
3284 case 0x27e: /* movq xmm, ea */
3285 if (mod != 3) {
3286 gen_lea_modrm(env, s, modrm);
3287 gen_ldq_env_A0(s, offsetof(CPUX86State,
3288 xmm_regs[reg].XMM_Q(0)));
3289 } else {
3290 rm = (modrm & 7) | REX_B(s);
3291 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3292 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3294 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3295 break;
3296 case 0x7f: /* movq ea, mm */
3297 if (mod != 3) {
3298 gen_lea_modrm(env, s, modrm);
3299 gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
3300 } else {
3301 rm = (modrm & 7);
3302 gen_op_movq(offsetof(CPUX86State,fpregs[rm].mmx),
3303 offsetof(CPUX86State,fpregs[reg].mmx));
3305 break;
3306 case 0x011: /* movups */
3307 case 0x111: /* movupd */
3308 case 0x029: /* movaps */
3309 case 0x129: /* movapd */
3310 case 0x17f: /* movdqa ea, xmm */
3311 case 0x27f: /* movdqu ea, xmm */
3312 if (mod != 3) {
3313 gen_lea_modrm(env, s, modrm);
3314 gen_sto_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3315 } else {
3316 rm = (modrm & 7) | REX_B(s);
3317 gen_op_movo(offsetof(CPUX86State,xmm_regs[rm]),
3318 offsetof(CPUX86State,xmm_regs[reg]));
3320 break;
3321 case 0x211: /* movss ea, xmm */
3322 if (mod != 3) {
3323 gen_lea_modrm(env, s, modrm);
3324 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3325 gen_op_st_v(s, MO_32, cpu_T[0], cpu_A0);
3326 } else {
3327 rm = (modrm & 7) | REX_B(s);
3328 gen_op_movl(offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)),
3329 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3331 break;
3332 case 0x311: /* movsd ea, xmm */
3333 if (mod != 3) {
3334 gen_lea_modrm(env, s, modrm);
3335 gen_stq_env_A0(s, offsetof(CPUX86State,
3336 xmm_regs[reg].XMM_Q(0)));
3337 } else {
3338 rm = (modrm & 7) | REX_B(s);
3339 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3340 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3342 break;
3343 case 0x013: /* movlps */
3344 case 0x113: /* movlpd */
3345 if (mod != 3) {
3346 gen_lea_modrm(env, s, modrm);
3347 gen_stq_env_A0(s, offsetof(CPUX86State,
3348 xmm_regs[reg].XMM_Q(0)));
3349 } else {
3350 goto illegal_op;
3352 break;
3353 case 0x017: /* movhps */
3354 case 0x117: /* movhpd */
3355 if (mod != 3) {
3356 gen_lea_modrm(env, s, modrm);
3357 gen_stq_env_A0(s, offsetof(CPUX86State,
3358 xmm_regs[reg].XMM_Q(1)));
3359 } else {
3360 goto illegal_op;
3362 break;
3363 case 0x71: /* shift mm, im */
3364 case 0x72:
3365 case 0x73:
3366 case 0x171: /* shift xmm, im */
3367 case 0x172:
3368 case 0x173:
3369 if (b1 >= 2) {
3370 goto illegal_op;
3372 val = cpu_ldub_code(env, s->pc++);
3373 if (is_xmm) {
3374 tcg_gen_movi_tl(cpu_T[0], val);
3375 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
3376 tcg_gen_movi_tl(cpu_T[0], 0);
3377 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(1)));
3378 op1_offset = offsetof(CPUX86State,xmm_t0);
3379 } else {
3380 tcg_gen_movi_tl(cpu_T[0], val);
3381 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(0)));
3382 tcg_gen_movi_tl(cpu_T[0], 0);
3383 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(1)));
3384 op1_offset = offsetof(CPUX86State,mmx_t0);
3386 sse_fn_epp = sse_op_table2[((b - 1) & 3) * 8 +
3387 (((modrm >> 3)) & 7)][b1];
3388 if (!sse_fn_epp) {
3389 goto illegal_op;
3391 if (is_xmm) {
3392 rm = (modrm & 7) | REX_B(s);
3393 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3394 } else {
3395 rm = (modrm & 7);
3396 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3398 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3399 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op1_offset);
3400 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
3401 break;
3402 case 0x050: /* movmskps */
3403 rm = (modrm & 7) | REX_B(s);
3404 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3405 offsetof(CPUX86State,xmm_regs[rm]));
3406 gen_helper_movmskps(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3407 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
3408 break;
3409 case 0x150: /* movmskpd */
3410 rm = (modrm & 7) | REX_B(s);
3411 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3412 offsetof(CPUX86State,xmm_regs[rm]));
3413 gen_helper_movmskpd(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3414 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
3415 break;
3416 case 0x02a: /* cvtpi2ps */
3417 case 0x12a: /* cvtpi2pd */
3418 gen_helper_enter_mmx(cpu_env);
3419 if (mod != 3) {
3420 gen_lea_modrm(env, s, modrm);
3421 op2_offset = offsetof(CPUX86State,mmx_t0);
3422 gen_ldq_env_A0(s, op2_offset);
3423 } else {
3424 rm = (modrm & 7);
3425 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3427 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3428 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3429 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3430 switch(b >> 8) {
3431 case 0x0:
3432 gen_helper_cvtpi2ps(cpu_env, cpu_ptr0, cpu_ptr1);
3433 break;
3434 default:
3435 case 0x1:
3436 gen_helper_cvtpi2pd(cpu_env, cpu_ptr0, cpu_ptr1);
3437 break;
3439 break;
3440 case 0x22a: /* cvtsi2ss */
3441 case 0x32a: /* cvtsi2sd */
3442 ot = mo_64_32(s->dflag);
3443 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3444 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3445 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3446 if (ot == MO_32) {
3447 SSEFunc_0_epi sse_fn_epi = sse_op_table3ai[(b >> 8) & 1];
3448 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3449 sse_fn_epi(cpu_env, cpu_ptr0, cpu_tmp2_i32);
3450 } else {
3451 #ifdef TARGET_X86_64
3452 SSEFunc_0_epl sse_fn_epl = sse_op_table3aq[(b >> 8) & 1];
3453 sse_fn_epl(cpu_env, cpu_ptr0, cpu_T[0]);
3454 #else
3455 goto illegal_op;
3456 #endif
3458 break;
3459 case 0x02c: /* cvttps2pi */
3460 case 0x12c: /* cvttpd2pi */
3461 case 0x02d: /* cvtps2pi */
3462 case 0x12d: /* cvtpd2pi */
3463 gen_helper_enter_mmx(cpu_env);
3464 if (mod != 3) {
3465 gen_lea_modrm(env, s, modrm);
3466 op2_offset = offsetof(CPUX86State,xmm_t0);
3467 gen_ldo_env_A0(s, op2_offset);
3468 } else {
3469 rm = (modrm & 7) | REX_B(s);
3470 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3472 op1_offset = offsetof(CPUX86State,fpregs[reg & 7].mmx);
3473 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3474 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3475 switch(b) {
3476 case 0x02c:
3477 gen_helper_cvttps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3478 break;
3479 case 0x12c:
3480 gen_helper_cvttpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3481 break;
3482 case 0x02d:
3483 gen_helper_cvtps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3484 break;
3485 case 0x12d:
3486 gen_helper_cvtpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3487 break;
3489 break;
3490 case 0x22c: /* cvttss2si */
3491 case 0x32c: /* cvttsd2si */
3492 case 0x22d: /* cvtss2si */
3493 case 0x32d: /* cvtsd2si */
3494 ot = mo_64_32(s->dflag);
3495 if (mod != 3) {
3496 gen_lea_modrm(env, s, modrm);
3497 if ((b >> 8) & 1) {
3498 gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.XMM_Q(0)));
3499 } else {
3500 gen_op_ld_v(s, MO_32, cpu_T[0], cpu_A0);
3501 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
3503 op2_offset = offsetof(CPUX86State,xmm_t0);
3504 } else {
3505 rm = (modrm & 7) | REX_B(s);
3506 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3508 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3509 if (ot == MO_32) {
3510 SSEFunc_i_ep sse_fn_i_ep =
3511 sse_op_table3bi[((b >> 7) & 2) | (b & 1)];
3512 sse_fn_i_ep(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3513 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3514 } else {
3515 #ifdef TARGET_X86_64
3516 SSEFunc_l_ep sse_fn_l_ep =
3517 sse_op_table3bq[((b >> 7) & 2) | (b & 1)];
3518 sse_fn_l_ep(cpu_T[0], cpu_env, cpu_ptr0);
3519 #else
3520 goto illegal_op;
3521 #endif
3523 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
3524 break;
3525 case 0xc4: /* pinsrw */
3526 case 0x1c4:
3527 s->rip_offset = 1;
3528 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
3529 val = cpu_ldub_code(env, s->pc++);
3530 if (b1) {
3531 val &= 7;
3532 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3533 offsetof(CPUX86State,xmm_regs[reg].XMM_W(val)));
3534 } else {
3535 val &= 3;
3536 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3537 offsetof(CPUX86State,fpregs[reg].mmx.MMX_W(val)));
3539 break;
3540 case 0xc5: /* pextrw */
3541 case 0x1c5:
3542 if (mod != 3)
3543 goto illegal_op;
3544 ot = mo_64_32(s->dflag);
3545 val = cpu_ldub_code(env, s->pc++);
3546 if (b1) {
3547 val &= 7;
3548 rm = (modrm & 7) | REX_B(s);
3549 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3550 offsetof(CPUX86State,xmm_regs[rm].XMM_W(val)));
3551 } else {
3552 val &= 3;
3553 rm = (modrm & 7);
3554 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3555 offsetof(CPUX86State,fpregs[rm].mmx.MMX_W(val)));
3557 reg = ((modrm >> 3) & 7) | rex_r;
3558 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
3559 break;
3560 case 0x1d6: /* movq ea, xmm */
3561 if (mod != 3) {
3562 gen_lea_modrm(env, s, modrm);
3563 gen_stq_env_A0(s, offsetof(CPUX86State,
3564 xmm_regs[reg].XMM_Q(0)));
3565 } else {
3566 rm = (modrm & 7) | REX_B(s);
3567 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3568 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3569 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3571 break;
3572 case 0x2d6: /* movq2dq */
3573 gen_helper_enter_mmx(cpu_env);
3574 rm = (modrm & 7);
3575 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3576 offsetof(CPUX86State,fpregs[rm].mmx));
3577 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3578 break;
3579 case 0x3d6: /* movdq2q */
3580 gen_helper_enter_mmx(cpu_env);
3581 rm = (modrm & 7) | REX_B(s);
3582 gen_op_movq(offsetof(CPUX86State,fpregs[reg & 7].mmx),
3583 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3584 break;
3585 case 0xd7: /* pmovmskb */
3586 case 0x1d7:
3587 if (mod != 3)
3588 goto illegal_op;
3589 if (b1) {
3590 rm = (modrm & 7) | REX_B(s);
3591 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[rm]));
3592 gen_helper_pmovmskb_xmm(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3593 } else {
3594 rm = (modrm & 7);
3595 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,fpregs[rm].mmx));
3596 gen_helper_pmovmskb_mmx(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3598 reg = ((modrm >> 3) & 7) | rex_r;
3599 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
3600 break;
3602 case 0x138:
3603 case 0x038:
3604 b = modrm;
3605 if ((b & 0xf0) == 0xf0) {
3606 goto do_0f_38_fx;
3608 modrm = cpu_ldub_code(env, s->pc++);
3609 rm = modrm & 7;
3610 reg = ((modrm >> 3) & 7) | rex_r;
3611 mod = (modrm >> 6) & 3;
3612 if (b1 >= 2) {
3613 goto illegal_op;
3616 sse_fn_epp = sse_op_table6[b].op[b1];
3617 if (!sse_fn_epp) {
3618 goto illegal_op;
3620 if (!(s->cpuid_ext_features & sse_op_table6[b].ext_mask))
3621 goto illegal_op;
3623 if (b1) {
3624 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3625 if (mod == 3) {
3626 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
3627 } else {
3628 op2_offset = offsetof(CPUX86State,xmm_t0);
3629 gen_lea_modrm(env, s, modrm);
3630 switch (b) {
3631 case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
3632 case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
3633 case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
3634 gen_ldq_env_A0(s, op2_offset +
3635 offsetof(XMMReg, XMM_Q(0)));
3636 break;
3637 case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
3638 case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
3639 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
3640 s->mem_index, MO_LEUL);
3641 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, op2_offset +
3642 offsetof(XMMReg, XMM_L(0)));
3643 break;
3644 case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
3645 tcg_gen_qemu_ld_tl(cpu_tmp0, cpu_A0,
3646 s->mem_index, MO_LEUW);
3647 tcg_gen_st16_tl(cpu_tmp0, cpu_env, op2_offset +
3648 offsetof(XMMReg, XMM_W(0)));
3649 break;
3650 case 0x2a: /* movntqda */
3651 gen_ldo_env_A0(s, op1_offset);
3652 return;
3653 default:
3654 gen_ldo_env_A0(s, op2_offset);
3657 } else {
3658 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
3659 if (mod == 3) {
3660 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3661 } else {
3662 op2_offset = offsetof(CPUX86State,mmx_t0);
3663 gen_lea_modrm(env, s, modrm);
3664 gen_ldq_env_A0(s, op2_offset);
3667 if (sse_fn_epp == SSE_SPECIAL) {
3668 goto illegal_op;
3671 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3672 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3673 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
3675 if (b == 0x17) {
3676 set_cc_op(s, CC_OP_EFLAGS);
3678 break;
3680 case 0x238:
3681 case 0x338:
3682 do_0f_38_fx:
3683 /* Various integer extensions at 0f 38 f[0-f]. */
3684 b = modrm | (b1 << 8);
3685 modrm = cpu_ldub_code(env, s->pc++);
3686 reg = ((modrm >> 3) & 7) | rex_r;
3688 switch (b) {
3689 case 0x3f0: /* crc32 Gd,Eb */
3690 case 0x3f1: /* crc32 Gd,Ey */
3691 do_crc32:
3692 if (!(s->cpuid_ext_features & CPUID_EXT_SSE42)) {
3693 goto illegal_op;
3695 if ((b & 0xff) == 0xf0) {
3696 ot = MO_8;
3697 } else if (s->dflag != MO_64) {
3698 ot = (s->prefix & PREFIX_DATA ? MO_16 : MO_32);
3699 } else {
3700 ot = MO_64;
3703 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[reg]);
3704 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3705 gen_helper_crc32(cpu_T[0], cpu_tmp2_i32,
3706 cpu_T[0], tcg_const_i32(8 << ot));
3708 ot = mo_64_32(s->dflag);
3709 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
3710 break;
3712 case 0x1f0: /* crc32 or movbe */
3713 case 0x1f1:
3714 /* For these insns, the f3 prefix is supposed to have priority
3715 over the 66 prefix, but that's not what we implement above
3716 setting b1. */
3717 if (s->prefix & PREFIX_REPNZ) {
3718 goto do_crc32;
3720 /* FALLTHRU */
3721 case 0x0f0: /* movbe Gy,My */
3722 case 0x0f1: /* movbe My,Gy */
3723 if (!(s->cpuid_ext_features & CPUID_EXT_MOVBE)) {
3724 goto illegal_op;
3726 if (s->dflag != MO_64) {
3727 ot = (s->prefix & PREFIX_DATA ? MO_16 : MO_32);
3728 } else {
3729 ot = MO_64;
3732 gen_lea_modrm(env, s, modrm);
3733 if ((b & 1) == 0) {
3734 tcg_gen_qemu_ld_tl(cpu_T[0], cpu_A0,
3735 s->mem_index, ot | MO_BE);
3736 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
3737 } else {
3738 tcg_gen_qemu_st_tl(cpu_regs[reg], cpu_A0,
3739 s->mem_index, ot | MO_BE);
3741 break;
3743 case 0x0f2: /* andn Gy, By, Ey */
3744 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
3745 || !(s->prefix & PREFIX_VEX)
3746 || s->vex_l != 0) {
3747 goto illegal_op;
3749 ot = mo_64_32(s->dflag);
3750 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3751 tcg_gen_andc_tl(cpu_T[0], cpu_regs[s->vex_v], cpu_T[0]);
3752 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
3753 gen_op_update1_cc();
3754 set_cc_op(s, CC_OP_LOGICB + ot);
3755 break;
3757 case 0x0f7: /* bextr Gy, Ey, By */
3758 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
3759 || !(s->prefix & PREFIX_VEX)
3760 || s->vex_l != 0) {
3761 goto illegal_op;
3763 ot = mo_64_32(s->dflag);
3765 TCGv bound, zero;
3767 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3768 /* Extract START, and shift the operand.
3769 Shifts larger than operand size get zeros. */
3770 tcg_gen_ext8u_tl(cpu_A0, cpu_regs[s->vex_v]);
3771 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_A0);
3773 bound = tcg_const_tl(ot == MO_64 ? 63 : 31);
3774 zero = tcg_const_tl(0);
3775 tcg_gen_movcond_tl(TCG_COND_LEU, cpu_T[0], cpu_A0, bound,
3776 cpu_T[0], zero);
3777 tcg_temp_free(zero);
3779 /* Extract the LEN into a mask. Lengths larger than
3780 operand size get all ones. */
3781 tcg_gen_shri_tl(cpu_A0, cpu_regs[s->vex_v], 8);
3782 tcg_gen_ext8u_tl(cpu_A0, cpu_A0);
3783 tcg_gen_movcond_tl(TCG_COND_LEU, cpu_A0, cpu_A0, bound,
3784 cpu_A0, bound);
3785 tcg_temp_free(bound);
3786 tcg_gen_movi_tl(cpu_T[1], 1);
3787 tcg_gen_shl_tl(cpu_T[1], cpu_T[1], cpu_A0);
3788 tcg_gen_subi_tl(cpu_T[1], cpu_T[1], 1);
3789 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
3791 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
3792 gen_op_update1_cc();
3793 set_cc_op(s, CC_OP_LOGICB + ot);
3795 break;
3797 case 0x0f5: /* bzhi Gy, Ey, By */
3798 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3799 || !(s->prefix & PREFIX_VEX)
3800 || s->vex_l != 0) {
3801 goto illegal_op;
3803 ot = mo_64_32(s->dflag);
3804 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3805 tcg_gen_ext8u_tl(cpu_T[1], cpu_regs[s->vex_v]);
3807 TCGv bound = tcg_const_tl(ot == MO_64 ? 63 : 31);
3808 /* Note that since we're using BMILG (in order to get O
3809 cleared) we need to store the inverse into C. */
3810 tcg_gen_setcond_tl(TCG_COND_LT, cpu_cc_src,
3811 cpu_T[1], bound);
3812 tcg_gen_movcond_tl(TCG_COND_GT, cpu_T[1], cpu_T[1],
3813 bound, bound, cpu_T[1]);
3814 tcg_temp_free(bound);
3816 tcg_gen_movi_tl(cpu_A0, -1);
3817 tcg_gen_shl_tl(cpu_A0, cpu_A0, cpu_T[1]);
3818 tcg_gen_andc_tl(cpu_T[0], cpu_T[0], cpu_A0);
3819 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
3820 gen_op_update1_cc();
3821 set_cc_op(s, CC_OP_BMILGB + ot);
3822 break;
3824 case 0x3f6: /* mulx By, Gy, rdx, Ey */
3825 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3826 || !(s->prefix & PREFIX_VEX)
3827 || s->vex_l != 0) {
3828 goto illegal_op;
3830 ot = mo_64_32(s->dflag);
3831 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3832 switch (ot) {
3833 default:
3834 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3835 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EDX]);
3836 tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
3837 cpu_tmp2_i32, cpu_tmp3_i32);
3838 tcg_gen_extu_i32_tl(cpu_regs[s->vex_v], cpu_tmp2_i32);
3839 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp3_i32);
3840 break;
3841 #ifdef TARGET_X86_64
3842 case MO_64:
3843 tcg_gen_mulu2_i64(cpu_regs[s->vex_v], cpu_regs[reg],
3844 cpu_T[0], cpu_regs[R_EDX]);
3845 break;
3846 #endif
3848 break;
3850 case 0x3f5: /* pdep Gy, By, Ey */
3851 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3852 || !(s->prefix & PREFIX_VEX)
3853 || s->vex_l != 0) {
3854 goto illegal_op;
3856 ot = mo_64_32(s->dflag);
3857 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3858 /* Note that by zero-extending the mask operand, we
3859 automatically handle zero-extending the result. */
3860 if (ot == MO_64) {
3861 tcg_gen_mov_tl(cpu_T[1], cpu_regs[s->vex_v]);
3862 } else {
3863 tcg_gen_ext32u_tl(cpu_T[1], cpu_regs[s->vex_v]);
3865 gen_helper_pdep(cpu_regs[reg], cpu_T[0], cpu_T[1]);
3866 break;
3868 case 0x2f5: /* pext Gy, By, Ey */
3869 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3870 || !(s->prefix & PREFIX_VEX)
3871 || s->vex_l != 0) {
3872 goto illegal_op;
3874 ot = mo_64_32(s->dflag);
3875 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3876 /* Note that by zero-extending the mask operand, we
3877 automatically handle zero-extending the result. */
3878 if (ot == MO_64) {
3879 tcg_gen_mov_tl(cpu_T[1], cpu_regs[s->vex_v]);
3880 } else {
3881 tcg_gen_ext32u_tl(cpu_T[1], cpu_regs[s->vex_v]);
3883 gen_helper_pext(cpu_regs[reg], cpu_T[0], cpu_T[1]);
3884 break;
3886 case 0x1f6: /* adcx Gy, Ey */
3887 case 0x2f6: /* adox Gy, Ey */
3888 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_ADX)) {
3889 goto illegal_op;
3890 } else {
3891 TCGv carry_in, carry_out, zero;
3892 int end_op;
3894 ot = mo_64_32(s->dflag);
3895 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3897 /* Re-use the carry-out from a previous round. */
3898 TCGV_UNUSED(carry_in);
3899 carry_out = (b == 0x1f6 ? cpu_cc_dst : cpu_cc_src2);
3900 switch (s->cc_op) {
3901 case CC_OP_ADCX:
3902 if (b == 0x1f6) {
3903 carry_in = cpu_cc_dst;
3904 end_op = CC_OP_ADCX;
3905 } else {
3906 end_op = CC_OP_ADCOX;
3908 break;
3909 case CC_OP_ADOX:
3910 if (b == 0x1f6) {
3911 end_op = CC_OP_ADCOX;
3912 } else {
3913 carry_in = cpu_cc_src2;
3914 end_op = CC_OP_ADOX;
3916 break;
3917 case CC_OP_ADCOX:
3918 end_op = CC_OP_ADCOX;
3919 carry_in = carry_out;
3920 break;
3921 default:
3922 end_op = (b == 0x1f6 ? CC_OP_ADCX : CC_OP_ADOX);
3923 break;
3925 /* If we can't reuse carry-out, get it out of EFLAGS. */
3926 if (TCGV_IS_UNUSED(carry_in)) {
3927 if (s->cc_op != CC_OP_ADCX && s->cc_op != CC_OP_ADOX) {
3928 gen_compute_eflags(s);
3930 carry_in = cpu_tmp0;
3931 tcg_gen_shri_tl(carry_in, cpu_cc_src,
3932 ctz32(b == 0x1f6 ? CC_C : CC_O));
3933 tcg_gen_andi_tl(carry_in, carry_in, 1);
3936 switch (ot) {
3937 #ifdef TARGET_X86_64
3938 case MO_32:
3939 /* If we know TL is 64-bit, and we want a 32-bit
3940 result, just do everything in 64-bit arithmetic. */
3941 tcg_gen_ext32u_i64(cpu_regs[reg], cpu_regs[reg]);
3942 tcg_gen_ext32u_i64(cpu_T[0], cpu_T[0]);
3943 tcg_gen_add_i64(cpu_T[0], cpu_T[0], cpu_regs[reg]);
3944 tcg_gen_add_i64(cpu_T[0], cpu_T[0], carry_in);
3945 tcg_gen_ext32u_i64(cpu_regs[reg], cpu_T[0]);
3946 tcg_gen_shri_i64(carry_out, cpu_T[0], 32);
3947 break;
3948 #endif
3949 default:
3950 /* Otherwise compute the carry-out in two steps. */
3951 zero = tcg_const_tl(0);
3952 tcg_gen_add2_tl(cpu_T[0], carry_out,
3953 cpu_T[0], zero,
3954 carry_in, zero);
3955 tcg_gen_add2_tl(cpu_regs[reg], carry_out,
3956 cpu_regs[reg], carry_out,
3957 cpu_T[0], zero);
3958 tcg_temp_free(zero);
3959 break;
3961 set_cc_op(s, end_op);
3963 break;
3965 case 0x1f7: /* shlx Gy, Ey, By */
3966 case 0x2f7: /* sarx Gy, Ey, By */
3967 case 0x3f7: /* shrx Gy, Ey, By */
3968 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3969 || !(s->prefix & PREFIX_VEX)
3970 || s->vex_l != 0) {
3971 goto illegal_op;
3973 ot = mo_64_32(s->dflag);
3974 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3975 if (ot == MO_64) {
3976 tcg_gen_andi_tl(cpu_T[1], cpu_regs[s->vex_v], 63);
3977 } else {
3978 tcg_gen_andi_tl(cpu_T[1], cpu_regs[s->vex_v], 31);
3980 if (b == 0x1f7) {
3981 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
3982 } else if (b == 0x2f7) {
3983 if (ot != MO_64) {
3984 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
3986 tcg_gen_sar_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
3987 } else {
3988 if (ot != MO_64) {
3989 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
3991 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
3993 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
3994 break;
3996 case 0x0f3:
3997 case 0x1f3:
3998 case 0x2f3:
3999 case 0x3f3: /* Group 17 */
4000 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
4001 || !(s->prefix & PREFIX_VEX)
4002 || s->vex_l != 0) {
4003 goto illegal_op;
4005 ot = mo_64_32(s->dflag);
4006 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4008 switch (reg & 7) {
4009 case 1: /* blsr By,Ey */
4010 tcg_gen_neg_tl(cpu_T[1], cpu_T[0]);
4011 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4012 gen_op_mov_reg_v(ot, s->vex_v, cpu_T[0]);
4013 gen_op_update2_cc();
4014 set_cc_op(s, CC_OP_BMILGB + ot);
4015 break;
4017 case 2: /* blsmsk By,Ey */
4018 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4019 tcg_gen_subi_tl(cpu_T[0], cpu_T[0], 1);
4020 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_cc_src);
4021 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4022 set_cc_op(s, CC_OP_BMILGB + ot);
4023 break;
4025 case 3: /* blsi By, Ey */
4026 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4027 tcg_gen_subi_tl(cpu_T[0], cpu_T[0], 1);
4028 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_cc_src);
4029 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4030 set_cc_op(s, CC_OP_BMILGB + ot);
4031 break;
4033 default:
4034 goto illegal_op;
4036 break;
4038 default:
4039 goto illegal_op;
4041 break;
4043 case 0x03a:
4044 case 0x13a:
4045 b = modrm;
4046 modrm = cpu_ldub_code(env, s->pc++);
4047 rm = modrm & 7;
4048 reg = ((modrm >> 3) & 7) | rex_r;
4049 mod = (modrm >> 6) & 3;
4050 if (b1 >= 2) {
4051 goto illegal_op;
4054 sse_fn_eppi = sse_op_table7[b].op[b1];
4055 if (!sse_fn_eppi) {
4056 goto illegal_op;
4058 if (!(s->cpuid_ext_features & sse_op_table7[b].ext_mask))
4059 goto illegal_op;
4061 if (sse_fn_eppi == SSE_SPECIAL) {
4062 ot = mo_64_32(s->dflag);
4063 rm = (modrm & 7) | REX_B(s);
4064 if (mod != 3)
4065 gen_lea_modrm(env, s, modrm);
4066 reg = ((modrm >> 3) & 7) | rex_r;
4067 val = cpu_ldub_code(env, s->pc++);
4068 switch (b) {
4069 case 0x14: /* pextrb */
4070 tcg_gen_ld8u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4071 xmm_regs[reg].XMM_B(val & 15)));
4072 if (mod == 3) {
4073 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
4074 } else {
4075 tcg_gen_qemu_st_tl(cpu_T[0], cpu_A0,
4076 s->mem_index, MO_UB);
4078 break;
4079 case 0x15: /* pextrw */
4080 tcg_gen_ld16u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4081 xmm_regs[reg].XMM_W(val & 7)));
4082 if (mod == 3) {
4083 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
4084 } else {
4085 tcg_gen_qemu_st_tl(cpu_T[0], cpu_A0,
4086 s->mem_index, MO_LEUW);
4088 break;
4089 case 0x16:
4090 if (ot == MO_32) { /* pextrd */
4091 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
4092 offsetof(CPUX86State,
4093 xmm_regs[reg].XMM_L(val & 3)));
4094 if (mod == 3) {
4095 tcg_gen_extu_i32_tl(cpu_regs[rm], cpu_tmp2_i32);
4096 } else {
4097 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
4098 s->mem_index, MO_LEUL);
4100 } else { /* pextrq */
4101 #ifdef TARGET_X86_64
4102 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
4103 offsetof(CPUX86State,
4104 xmm_regs[reg].XMM_Q(val & 1)));
4105 if (mod == 3) {
4106 tcg_gen_mov_i64(cpu_regs[rm], cpu_tmp1_i64);
4107 } else {
4108 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
4109 s->mem_index, MO_LEQ);
4111 #else
4112 goto illegal_op;
4113 #endif
4115 break;
4116 case 0x17: /* extractps */
4117 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4118 xmm_regs[reg].XMM_L(val & 3)));
4119 if (mod == 3) {
4120 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
4121 } else {
4122 tcg_gen_qemu_st_tl(cpu_T[0], cpu_A0,
4123 s->mem_index, MO_LEUL);
4125 break;
4126 case 0x20: /* pinsrb */
4127 if (mod == 3) {
4128 gen_op_mov_v_reg(MO_32, cpu_T[0], rm);
4129 } else {
4130 tcg_gen_qemu_ld_tl(cpu_T[0], cpu_A0,
4131 s->mem_index, MO_UB);
4133 tcg_gen_st8_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4134 xmm_regs[reg].XMM_B(val & 15)));
4135 break;
4136 case 0x21: /* insertps */
4137 if (mod == 3) {
4138 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
4139 offsetof(CPUX86State,xmm_regs[rm]
4140 .XMM_L((val >> 6) & 3)));
4141 } else {
4142 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
4143 s->mem_index, MO_LEUL);
4145 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
4146 offsetof(CPUX86State,xmm_regs[reg]
4147 .XMM_L((val >> 4) & 3)));
4148 if ((val >> 0) & 1)
4149 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4150 cpu_env, offsetof(CPUX86State,
4151 xmm_regs[reg].XMM_L(0)));
4152 if ((val >> 1) & 1)
4153 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4154 cpu_env, offsetof(CPUX86State,
4155 xmm_regs[reg].XMM_L(1)));
4156 if ((val >> 2) & 1)
4157 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4158 cpu_env, offsetof(CPUX86State,
4159 xmm_regs[reg].XMM_L(2)));
4160 if ((val >> 3) & 1)
4161 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4162 cpu_env, offsetof(CPUX86State,
4163 xmm_regs[reg].XMM_L(3)));
4164 break;
4165 case 0x22:
4166 if (ot == MO_32) { /* pinsrd */
4167 if (mod == 3) {
4168 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[rm]);
4169 } else {
4170 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
4171 s->mem_index, MO_LEUL);
4173 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
4174 offsetof(CPUX86State,
4175 xmm_regs[reg].XMM_L(val & 3)));
4176 } else { /* pinsrq */
4177 #ifdef TARGET_X86_64
4178 if (mod == 3) {
4179 gen_op_mov_v_reg(ot, cpu_tmp1_i64, rm);
4180 } else {
4181 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
4182 s->mem_index, MO_LEQ);
4184 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
4185 offsetof(CPUX86State,
4186 xmm_regs[reg].XMM_Q(val & 1)));
4187 #else
4188 goto illegal_op;
4189 #endif
4191 break;
4193 return;
4196 if (b1) {
4197 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
4198 if (mod == 3) {
4199 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
4200 } else {
4201 op2_offset = offsetof(CPUX86State,xmm_t0);
4202 gen_lea_modrm(env, s, modrm);
4203 gen_ldo_env_A0(s, op2_offset);
4205 } else {
4206 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4207 if (mod == 3) {
4208 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4209 } else {
4210 op2_offset = offsetof(CPUX86State,mmx_t0);
4211 gen_lea_modrm(env, s, modrm);
4212 gen_ldq_env_A0(s, op2_offset);
4215 val = cpu_ldub_code(env, s->pc++);
4217 if ((b & 0xfc) == 0x60) { /* pcmpXstrX */
4218 set_cc_op(s, CC_OP_EFLAGS);
4220 if (s->dflag == MO_64) {
4221 /* The helper must use entire 64-bit gp registers */
4222 val |= 1 << 8;
4226 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4227 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4228 sse_fn_eppi(cpu_env, cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
4229 break;
4231 case 0x33a:
4232 /* Various integer extensions at 0f 3a f[0-f]. */
4233 b = modrm | (b1 << 8);
4234 modrm = cpu_ldub_code(env, s->pc++);
4235 reg = ((modrm >> 3) & 7) | rex_r;
4237 switch (b) {
4238 case 0x3f0: /* rorx Gy,Ey, Ib */
4239 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
4240 || !(s->prefix & PREFIX_VEX)
4241 || s->vex_l != 0) {
4242 goto illegal_op;
4244 ot = mo_64_32(s->dflag);
4245 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4246 b = cpu_ldub_code(env, s->pc++);
4247 if (ot == MO_64) {
4248 tcg_gen_rotri_tl(cpu_T[0], cpu_T[0], b & 63);
4249 } else {
4250 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4251 tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, b & 31);
4252 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
4254 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
4255 break;
4257 default:
4258 goto illegal_op;
4260 break;
4262 default:
4263 goto illegal_op;
4265 } else {
4266 /* generic MMX or SSE operation */
4267 switch(b) {
4268 case 0x70: /* pshufx insn */
4269 case 0xc6: /* pshufx insn */
4270 case 0xc2: /* compare insns */
4271 s->rip_offset = 1;
4272 break;
4273 default:
4274 break;
4276 if (is_xmm) {
4277 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
4278 if (mod != 3) {
4279 int sz = 4;
4281 gen_lea_modrm(env, s, modrm);
4282 op2_offset = offsetof(CPUX86State,xmm_t0);
4284 switch (b) {
4285 case 0x50 ... 0x5a:
4286 case 0x5c ... 0x5f:
4287 case 0xc2:
4288 /* Most sse scalar operations. */
4289 if (b1 == 2) {
4290 sz = 2;
4291 } else if (b1 == 3) {
4292 sz = 3;
4294 break;
4296 case 0x2e: /* ucomis[sd] */
4297 case 0x2f: /* comis[sd] */
4298 if (b1 == 0) {
4299 sz = 2;
4300 } else {
4301 sz = 3;
4303 break;
4306 switch (sz) {
4307 case 2:
4308 /* 32 bit access */
4309 gen_op_ld_v(s, MO_32, cpu_T[0], cpu_A0);
4310 tcg_gen_st32_tl(cpu_T[0], cpu_env,
4311 offsetof(CPUX86State,xmm_t0.XMM_L(0)));
4312 break;
4313 case 3:
4314 /* 64 bit access */
4315 gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.XMM_D(0)));
4316 break;
4317 default:
4318 /* 128 bit access */
4319 gen_ldo_env_A0(s, op2_offset);
4320 break;
4322 } else {
4323 rm = (modrm & 7) | REX_B(s);
4324 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
4326 } else {
4327 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4328 if (mod != 3) {
4329 gen_lea_modrm(env, s, modrm);
4330 op2_offset = offsetof(CPUX86State,mmx_t0);
4331 gen_ldq_env_A0(s, op2_offset);
4332 } else {
4333 rm = (modrm & 7);
4334 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4337 switch(b) {
4338 case 0x0f: /* 3DNow! data insns */
4339 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
4340 goto illegal_op;
4341 val = cpu_ldub_code(env, s->pc++);
4342 sse_fn_epp = sse_op_table5[val];
4343 if (!sse_fn_epp) {
4344 goto illegal_op;
4346 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4347 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4348 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
4349 break;
4350 case 0x70: /* pshufx insn */
4351 case 0xc6: /* pshufx insn */
4352 val = cpu_ldub_code(env, s->pc++);
4353 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4354 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4355 /* XXX: introduce a new table? */
4356 sse_fn_ppi = (SSEFunc_0_ppi)sse_fn_epp;
4357 sse_fn_ppi(cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
4358 break;
4359 case 0xc2:
4360 /* compare insns */
4361 val = cpu_ldub_code(env, s->pc++);
4362 if (val >= 8)
4363 goto illegal_op;
4364 sse_fn_epp = sse_op_table4[val][b1];
4366 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4367 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4368 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
4369 break;
4370 case 0xf7:
4371 /* maskmov : we must prepare A0 */
4372 if (mod != 3)
4373 goto illegal_op;
4374 tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EDI]);
4375 gen_extu(s->aflag, cpu_A0);
4376 gen_add_A0_ds_seg(s);
4378 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4379 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4380 /* XXX: introduce a new table? */
4381 sse_fn_eppt = (SSEFunc_0_eppt)sse_fn_epp;
4382 sse_fn_eppt(cpu_env, cpu_ptr0, cpu_ptr1, cpu_A0);
4383 break;
4384 default:
4385 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4386 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4387 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
4388 break;
4390 if (b == 0x2e || b == 0x2f) {
4391 set_cc_op(s, CC_OP_EFLAGS);
4396 /* convert one instruction. s->is_jmp is set if the translation must
4397 be stopped. Return the next pc value */
4398 static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
4399 target_ulong pc_start)
4401 int b, prefixes;
4402 int shift;
4403 TCGMemOp ot, aflag, dflag;
4404 int modrm, reg, rm, mod, op, opreg, val;
4405 target_ulong next_eip, tval;
4406 int rex_w, rex_r;
4408 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
4409 tcg_gen_debug_insn_start(pc_start);
4411 s->pc = pc_start;
4412 prefixes = 0;
4413 s->override = -1;
4414 rex_w = -1;
4415 rex_r = 0;
4416 #ifdef TARGET_X86_64
4417 s->rex_x = 0;
4418 s->rex_b = 0;
4419 x86_64_hregs = 0;
4420 #endif
4421 s->rip_offset = 0; /* for relative ip address */
4422 s->vex_l = 0;
4423 s->vex_v = 0;
4424 next_byte:
4425 b = cpu_ldub_code(env, s->pc);
4426 s->pc++;
4427 /* Collect prefixes. */
4428 switch (b) {
4429 case 0xf3:
4430 prefixes |= PREFIX_REPZ;
4431 goto next_byte;
4432 case 0xf2:
4433 prefixes |= PREFIX_REPNZ;
4434 goto next_byte;
4435 case 0xf0:
4436 prefixes |= PREFIX_LOCK;
4437 goto next_byte;
4438 case 0x2e:
4439 s->override = R_CS;
4440 goto next_byte;
4441 case 0x36:
4442 s->override = R_SS;
4443 goto next_byte;
4444 case 0x3e:
4445 s->override = R_DS;
4446 goto next_byte;
4447 case 0x26:
4448 s->override = R_ES;
4449 goto next_byte;
4450 case 0x64:
4451 s->override = R_FS;
4452 goto next_byte;
4453 case 0x65:
4454 s->override = R_GS;
4455 goto next_byte;
4456 case 0x66:
4457 prefixes |= PREFIX_DATA;
4458 goto next_byte;
4459 case 0x67:
4460 prefixes |= PREFIX_ADR;
4461 goto next_byte;
4462 #ifdef TARGET_X86_64
4463 case 0x40 ... 0x4f:
4464 if (CODE64(s)) {
4465 /* REX prefix */
4466 rex_w = (b >> 3) & 1;
4467 rex_r = (b & 0x4) << 1;
4468 s->rex_x = (b & 0x2) << 2;
4469 REX_B(s) = (b & 0x1) << 3;
4470 x86_64_hregs = 1; /* select uniform byte register addressing */
4471 goto next_byte;
4473 break;
4474 #endif
4475 case 0xc5: /* 2-byte VEX */
4476 case 0xc4: /* 3-byte VEX */
4477 /* VEX prefixes cannot be used except in 32-bit mode.
4478 Otherwise the instruction is LES or LDS. */
4479 if (s->code32 && !s->vm86) {
4480 static const int pp_prefix[4] = {
4481 0, PREFIX_DATA, PREFIX_REPZ, PREFIX_REPNZ
4483 int vex3, vex2 = cpu_ldub_code(env, s->pc);
4485 if (!CODE64(s) && (vex2 & 0xc0) != 0xc0) {
4486 /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
4487 otherwise the instruction is LES or LDS. */
4488 break;
4490 s->pc++;
4492 /* 4.1.1-4.1.3: No preceding lock, 66, f2, f3, or rex prefixes. */
4493 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ
4494 | PREFIX_LOCK | PREFIX_DATA)) {
4495 goto illegal_op;
4497 #ifdef TARGET_X86_64
4498 if (x86_64_hregs) {
4499 goto illegal_op;
4501 #endif
4502 rex_r = (~vex2 >> 4) & 8;
4503 if (b == 0xc5) {
4504 vex3 = vex2;
4505 b = cpu_ldub_code(env, s->pc++);
4506 } else {
4507 #ifdef TARGET_X86_64
4508 s->rex_x = (~vex2 >> 3) & 8;
4509 s->rex_b = (~vex2 >> 2) & 8;
4510 #endif
4511 vex3 = cpu_ldub_code(env, s->pc++);
4512 rex_w = (vex3 >> 7) & 1;
4513 switch (vex2 & 0x1f) {
4514 case 0x01: /* Implied 0f leading opcode bytes. */
4515 b = cpu_ldub_code(env, s->pc++) | 0x100;
4516 break;
4517 case 0x02: /* Implied 0f 38 leading opcode bytes. */
4518 b = 0x138;
4519 break;
4520 case 0x03: /* Implied 0f 3a leading opcode bytes. */
4521 b = 0x13a;
4522 break;
4523 default: /* Reserved for future use. */
4524 goto illegal_op;
4527 s->vex_v = (~vex3 >> 3) & 0xf;
4528 s->vex_l = (vex3 >> 2) & 1;
4529 prefixes |= pp_prefix[vex3 & 3] | PREFIX_VEX;
4531 break;
4534 /* Post-process prefixes. */
4535 if (CODE64(s)) {
4536 /* In 64-bit mode, the default data size is 32-bit. Select 64-bit
4537 data with rex_w, and 16-bit data with 0x66; rex_w takes precedence
4538 over 0x66 if both are present. */
4539 dflag = (rex_w > 0 ? MO_64 : prefixes & PREFIX_DATA ? MO_16 : MO_32);
4540 /* In 64-bit mode, 0x67 selects 32-bit addressing. */
4541 aflag = (prefixes & PREFIX_ADR ? MO_32 : MO_64);
4542 } else {
4543 /* In 16/32-bit mode, 0x66 selects the opposite data size. */
4544 if (s->code32 ^ ((prefixes & PREFIX_DATA) != 0)) {
4545 dflag = MO_32;
4546 } else {
4547 dflag = MO_16;
4549 /* In 16/32-bit mode, 0x67 selects the opposite addressing. */
4550 if (s->code32 ^ ((prefixes & PREFIX_ADR) != 0)) {
4551 aflag = MO_32;
4552 } else {
4553 aflag = MO_16;
4557 s->prefix = prefixes;
4558 s->aflag = aflag;
4559 s->dflag = dflag;
4561 /* lock generation */
4562 if (prefixes & PREFIX_LOCK)
4563 gen_helper_lock();
4565 /* now check op code */
4566 reswitch:
4567 switch(b) {
4568 case 0x0f:
4569 /**************************/
4570 /* extended op code */
4571 b = cpu_ldub_code(env, s->pc++) | 0x100;
4572 goto reswitch;
4574 /**************************/
4575 /* arith & logic */
4576 case 0x00 ... 0x05:
4577 case 0x08 ... 0x0d:
4578 case 0x10 ... 0x15:
4579 case 0x18 ... 0x1d:
4580 case 0x20 ... 0x25:
4581 case 0x28 ... 0x2d:
4582 case 0x30 ... 0x35:
4583 case 0x38 ... 0x3d:
4585 int op, f, val;
4586 op = (b >> 3) & 7;
4587 f = (b >> 1) & 3;
4589 ot = mo_b_d(b, dflag);
4591 switch(f) {
4592 case 0: /* OP Ev, Gv */
4593 modrm = cpu_ldub_code(env, s->pc++);
4594 reg = ((modrm >> 3) & 7) | rex_r;
4595 mod = (modrm >> 6) & 3;
4596 rm = (modrm & 7) | REX_B(s);
4597 if (mod != 3) {
4598 gen_lea_modrm(env, s, modrm);
4599 opreg = OR_TMP0;
4600 } else if (op == OP_XORL && rm == reg) {
4601 xor_zero:
4602 /* xor reg, reg optimisation */
4603 set_cc_op(s, CC_OP_CLR);
4604 tcg_gen_movi_tl(cpu_T[0], 0);
4605 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
4606 break;
4607 } else {
4608 opreg = rm;
4610 gen_op_mov_v_reg(ot, cpu_T[1], reg);
4611 gen_op(s, op, ot, opreg);
4612 break;
4613 case 1: /* OP Gv, Ev */
4614 modrm = cpu_ldub_code(env, s->pc++);
4615 mod = (modrm >> 6) & 3;
4616 reg = ((modrm >> 3) & 7) | rex_r;
4617 rm = (modrm & 7) | REX_B(s);
4618 if (mod != 3) {
4619 gen_lea_modrm(env, s, modrm);
4620 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
4621 } else if (op == OP_XORL && rm == reg) {
4622 goto xor_zero;
4623 } else {
4624 gen_op_mov_v_reg(ot, cpu_T[1], rm);
4626 gen_op(s, op, ot, reg);
4627 break;
4628 case 2: /* OP A, Iv */
4629 val = insn_get(env, s, ot);
4630 tcg_gen_movi_tl(cpu_T[1], val);
4631 gen_op(s, op, ot, OR_EAX);
4632 break;
4635 break;
4637 case 0x82:
4638 if (CODE64(s))
4639 goto illegal_op;
4640 case 0x80: /* GRP1 */
4641 case 0x81:
4642 case 0x83:
4644 int val;
4646 ot = mo_b_d(b, dflag);
4648 modrm = cpu_ldub_code(env, s->pc++);
4649 mod = (modrm >> 6) & 3;
4650 rm = (modrm & 7) | REX_B(s);
4651 op = (modrm >> 3) & 7;
4653 if (mod != 3) {
4654 if (b == 0x83)
4655 s->rip_offset = 1;
4656 else
4657 s->rip_offset = insn_const_size(ot);
4658 gen_lea_modrm(env, s, modrm);
4659 opreg = OR_TMP0;
4660 } else {
4661 opreg = rm;
4664 switch(b) {
4665 default:
4666 case 0x80:
4667 case 0x81:
4668 case 0x82:
4669 val = insn_get(env, s, ot);
4670 break;
4671 case 0x83:
4672 val = (int8_t)insn_get(env, s, MO_8);
4673 break;
4675 tcg_gen_movi_tl(cpu_T[1], val);
4676 gen_op(s, op, ot, opreg);
4678 break;
4680 /**************************/
4681 /* inc, dec, and other misc arith */
4682 case 0x40 ... 0x47: /* inc Gv */
4683 ot = dflag;
4684 gen_inc(s, ot, OR_EAX + (b & 7), 1);
4685 break;
4686 case 0x48 ... 0x4f: /* dec Gv */
4687 ot = dflag;
4688 gen_inc(s, ot, OR_EAX + (b & 7), -1);
4689 break;
4690 case 0xf6: /* GRP3 */
4691 case 0xf7:
4692 ot = mo_b_d(b, dflag);
4694 modrm = cpu_ldub_code(env, s->pc++);
4695 mod = (modrm >> 6) & 3;
4696 rm = (modrm & 7) | REX_B(s);
4697 op = (modrm >> 3) & 7;
4698 if (mod != 3) {
4699 if (op == 0)
4700 s->rip_offset = insn_const_size(ot);
4701 gen_lea_modrm(env, s, modrm);
4702 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
4703 } else {
4704 gen_op_mov_v_reg(ot, cpu_T[0], rm);
4707 switch(op) {
4708 case 0: /* test */
4709 val = insn_get(env, s, ot);
4710 tcg_gen_movi_tl(cpu_T[1], val);
4711 gen_op_testl_T0_T1_cc();
4712 set_cc_op(s, CC_OP_LOGICB + ot);
4713 break;
4714 case 2: /* not */
4715 tcg_gen_not_tl(cpu_T[0], cpu_T[0]);
4716 if (mod != 3) {
4717 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
4718 } else {
4719 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
4721 break;
4722 case 3: /* neg */
4723 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
4724 if (mod != 3) {
4725 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
4726 } else {
4727 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
4729 gen_op_update_neg_cc();
4730 set_cc_op(s, CC_OP_SUBB + ot);
4731 break;
4732 case 4: /* mul */
4733 switch(ot) {
4734 case MO_8:
4735 gen_op_mov_v_reg(MO_8, cpu_T[1], R_EAX);
4736 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
4737 tcg_gen_ext8u_tl(cpu_T[1], cpu_T[1]);
4738 /* XXX: use 32 bit mul which could be faster */
4739 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4740 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T[0]);
4741 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4742 tcg_gen_andi_tl(cpu_cc_src, cpu_T[0], 0xff00);
4743 set_cc_op(s, CC_OP_MULB);
4744 break;
4745 case MO_16:
4746 gen_op_mov_v_reg(MO_16, cpu_T[1], R_EAX);
4747 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
4748 tcg_gen_ext16u_tl(cpu_T[1], cpu_T[1]);
4749 /* XXX: use 32 bit mul which could be faster */
4750 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4751 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T[0]);
4752 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4753 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
4754 gen_op_mov_reg_v(MO_16, R_EDX, cpu_T[0]);
4755 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4756 set_cc_op(s, CC_OP_MULW);
4757 break;
4758 default:
4759 case MO_32:
4760 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4761 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]);
4762 tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
4763 cpu_tmp2_i32, cpu_tmp3_i32);
4764 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32);
4765 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32);
4766 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4767 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
4768 set_cc_op(s, CC_OP_MULL);
4769 break;
4770 #ifdef TARGET_X86_64
4771 case MO_64:
4772 tcg_gen_mulu2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
4773 cpu_T[0], cpu_regs[R_EAX]);
4774 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4775 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
4776 set_cc_op(s, CC_OP_MULQ);
4777 break;
4778 #endif
4780 break;
4781 case 5: /* imul */
4782 switch(ot) {
4783 case MO_8:
4784 gen_op_mov_v_reg(MO_8, cpu_T[1], R_EAX);
4785 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
4786 tcg_gen_ext8s_tl(cpu_T[1], cpu_T[1]);
4787 /* XXX: use 32 bit mul which could be faster */
4788 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4789 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T[0]);
4790 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4791 tcg_gen_ext8s_tl(cpu_tmp0, cpu_T[0]);
4792 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4793 set_cc_op(s, CC_OP_MULB);
4794 break;
4795 case MO_16:
4796 gen_op_mov_v_reg(MO_16, cpu_T[1], R_EAX);
4797 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4798 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
4799 /* XXX: use 32 bit mul which could be faster */
4800 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4801 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T[0]);
4802 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4803 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
4804 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4805 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
4806 gen_op_mov_reg_v(MO_16, R_EDX, cpu_T[0]);
4807 set_cc_op(s, CC_OP_MULW);
4808 break;
4809 default:
4810 case MO_32:
4811 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4812 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]);
4813 tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
4814 cpu_tmp2_i32, cpu_tmp3_i32);
4815 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32);
4816 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32);
4817 tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31);
4818 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4819 tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
4820 tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32);
4821 set_cc_op(s, CC_OP_MULL);
4822 break;
4823 #ifdef TARGET_X86_64
4824 case MO_64:
4825 tcg_gen_muls2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
4826 cpu_T[0], cpu_regs[R_EAX]);
4827 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4828 tcg_gen_sari_tl(cpu_cc_src, cpu_regs[R_EAX], 63);
4829 tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_regs[R_EDX]);
4830 set_cc_op(s, CC_OP_MULQ);
4831 break;
4832 #endif
4834 break;
4835 case 6: /* div */
4836 switch(ot) {
4837 case MO_8:
4838 gen_jmp_im(pc_start - s->cs_base);
4839 gen_helper_divb_AL(cpu_env, cpu_T[0]);
4840 break;
4841 case MO_16:
4842 gen_jmp_im(pc_start - s->cs_base);
4843 gen_helper_divw_AX(cpu_env, cpu_T[0]);
4844 break;
4845 default:
4846 case MO_32:
4847 gen_jmp_im(pc_start - s->cs_base);
4848 gen_helper_divl_EAX(cpu_env, cpu_T[0]);
4849 break;
4850 #ifdef TARGET_X86_64
4851 case MO_64:
4852 gen_jmp_im(pc_start - s->cs_base);
4853 gen_helper_divq_EAX(cpu_env, cpu_T[0]);
4854 break;
4855 #endif
4857 break;
4858 case 7: /* idiv */
4859 switch(ot) {
4860 case MO_8:
4861 gen_jmp_im(pc_start - s->cs_base);
4862 gen_helper_idivb_AL(cpu_env, cpu_T[0]);
4863 break;
4864 case MO_16:
4865 gen_jmp_im(pc_start - s->cs_base);
4866 gen_helper_idivw_AX(cpu_env, cpu_T[0]);
4867 break;
4868 default:
4869 case MO_32:
4870 gen_jmp_im(pc_start - s->cs_base);
4871 gen_helper_idivl_EAX(cpu_env, cpu_T[0]);
4872 break;
4873 #ifdef TARGET_X86_64
4874 case MO_64:
4875 gen_jmp_im(pc_start - s->cs_base);
4876 gen_helper_idivq_EAX(cpu_env, cpu_T[0]);
4877 break;
4878 #endif
4880 break;
4881 default:
4882 goto illegal_op;
4884 break;
4886 case 0xfe: /* GRP4 */
4887 case 0xff: /* GRP5 */
4888 ot = mo_b_d(b, dflag);
4890 modrm = cpu_ldub_code(env, s->pc++);
4891 mod = (modrm >> 6) & 3;
4892 rm = (modrm & 7) | REX_B(s);
4893 op = (modrm >> 3) & 7;
4894 if (op >= 2 && b == 0xfe) {
4895 goto illegal_op;
4897 if (CODE64(s)) {
4898 if (op == 2 || op == 4) {
4899 /* operand size for jumps is 64 bit */
4900 ot = MO_64;
4901 } else if (op == 3 || op == 5) {
4902 ot = dflag != MO_16 ? MO_32 + (rex_w == 1) : MO_16;
4903 } else if (op == 6) {
4904 /* default push size is 64 bit */
4905 ot = mo_pushpop(s, dflag);
4908 if (mod != 3) {
4909 gen_lea_modrm(env, s, modrm);
4910 if (op >= 2 && op != 3 && op != 5)
4911 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
4912 } else {
4913 gen_op_mov_v_reg(ot, cpu_T[0], rm);
4916 switch(op) {
4917 case 0: /* inc Ev */
4918 if (mod != 3)
4919 opreg = OR_TMP0;
4920 else
4921 opreg = rm;
4922 gen_inc(s, ot, opreg, 1);
4923 break;
4924 case 1: /* dec Ev */
4925 if (mod != 3)
4926 opreg = OR_TMP0;
4927 else
4928 opreg = rm;
4929 gen_inc(s, ot, opreg, -1);
4930 break;
4931 case 2: /* call Ev */
4932 /* XXX: optimize if memory (no 'and' is necessary) */
4933 if (dflag == MO_16) {
4934 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
4936 next_eip = s->pc - s->cs_base;
4937 tcg_gen_movi_tl(cpu_T[1], next_eip);
4938 gen_push_v(s, cpu_T[1]);
4939 gen_op_jmp_v(cpu_T[0]);
4940 gen_eob(s);
4941 break;
4942 case 3: /* lcall Ev */
4943 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
4944 gen_add_A0_im(s, 1 << ot);
4945 gen_op_ld_v(s, MO_16, cpu_T[0], cpu_A0);
4946 do_lcall:
4947 if (s->pe && !s->vm86) {
4948 gen_update_cc_op(s);
4949 gen_jmp_im(pc_start - s->cs_base);
4950 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4951 gen_helper_lcall_protected(cpu_env, cpu_tmp2_i32, cpu_T[1],
4952 tcg_const_i32(dflag - 1),
4953 tcg_const_i32(s->pc - pc_start));
4954 } else {
4955 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4956 gen_helper_lcall_real(cpu_env, cpu_tmp2_i32, cpu_T[1],
4957 tcg_const_i32(dflag - 1),
4958 tcg_const_i32(s->pc - s->cs_base));
4960 gen_eob(s);
4961 break;
4962 case 4: /* jmp Ev */
4963 if (dflag == MO_16) {
4964 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
4966 gen_op_jmp_v(cpu_T[0]);
4967 gen_eob(s);
4968 break;
4969 case 5: /* ljmp Ev */
4970 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
4971 gen_add_A0_im(s, 1 << ot);
4972 gen_op_ld_v(s, MO_16, cpu_T[0], cpu_A0);
4973 do_ljmp:
4974 if (s->pe && !s->vm86) {
4975 gen_update_cc_op(s);
4976 gen_jmp_im(pc_start - s->cs_base);
4977 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4978 gen_helper_ljmp_protected(cpu_env, cpu_tmp2_i32, cpu_T[1],
4979 tcg_const_i32(s->pc - pc_start));
4980 } else {
4981 gen_op_movl_seg_T0_vm(R_CS);
4982 gen_op_jmp_v(cpu_T[1]);
4984 gen_eob(s);
4985 break;
4986 case 6: /* push Ev */
4987 gen_push_v(s, cpu_T[0]);
4988 break;
4989 default:
4990 goto illegal_op;
4992 break;
4994 case 0x84: /* test Ev, Gv */
4995 case 0x85:
4996 ot = mo_b_d(b, dflag);
4998 modrm = cpu_ldub_code(env, s->pc++);
4999 reg = ((modrm >> 3) & 7) | rex_r;
5001 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
5002 gen_op_mov_v_reg(ot, cpu_T[1], reg);
5003 gen_op_testl_T0_T1_cc();
5004 set_cc_op(s, CC_OP_LOGICB + ot);
5005 break;
5007 case 0xa8: /* test eAX, Iv */
5008 case 0xa9:
5009 ot = mo_b_d(b, dflag);
5010 val = insn_get(env, s, ot);
5012 gen_op_mov_v_reg(ot, cpu_T[0], OR_EAX);
5013 tcg_gen_movi_tl(cpu_T[1], val);
5014 gen_op_testl_T0_T1_cc();
5015 set_cc_op(s, CC_OP_LOGICB + ot);
5016 break;
5018 case 0x98: /* CWDE/CBW */
5019 switch (dflag) {
5020 #ifdef TARGET_X86_64
5021 case MO_64:
5022 gen_op_mov_v_reg(MO_32, cpu_T[0], R_EAX);
5023 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
5024 gen_op_mov_reg_v(MO_64, R_EAX, cpu_T[0]);
5025 break;
5026 #endif
5027 case MO_32:
5028 gen_op_mov_v_reg(MO_16, cpu_T[0], R_EAX);
5029 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5030 gen_op_mov_reg_v(MO_32, R_EAX, cpu_T[0]);
5031 break;
5032 case MO_16:
5033 gen_op_mov_v_reg(MO_8, cpu_T[0], R_EAX);
5034 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
5035 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T[0]);
5036 break;
5037 default:
5038 tcg_abort();
5040 break;
5041 case 0x99: /* CDQ/CWD */
5042 switch (dflag) {
5043 #ifdef TARGET_X86_64
5044 case MO_64:
5045 gen_op_mov_v_reg(MO_64, cpu_T[0], R_EAX);
5046 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 63);
5047 gen_op_mov_reg_v(MO_64, R_EDX, cpu_T[0]);
5048 break;
5049 #endif
5050 case MO_32:
5051 gen_op_mov_v_reg(MO_32, cpu_T[0], R_EAX);
5052 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
5053 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 31);
5054 gen_op_mov_reg_v(MO_32, R_EDX, cpu_T[0]);
5055 break;
5056 case MO_16:
5057 gen_op_mov_v_reg(MO_16, cpu_T[0], R_EAX);
5058 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5059 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 15);
5060 gen_op_mov_reg_v(MO_16, R_EDX, cpu_T[0]);
5061 break;
5062 default:
5063 tcg_abort();
5065 break;
5066 case 0x1af: /* imul Gv, Ev */
5067 case 0x69: /* imul Gv, Ev, I */
5068 case 0x6b:
5069 ot = dflag;
5070 modrm = cpu_ldub_code(env, s->pc++);
5071 reg = ((modrm >> 3) & 7) | rex_r;
5072 if (b == 0x69)
5073 s->rip_offset = insn_const_size(ot);
5074 else if (b == 0x6b)
5075 s->rip_offset = 1;
5076 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
5077 if (b == 0x69) {
5078 val = insn_get(env, s, ot);
5079 tcg_gen_movi_tl(cpu_T[1], val);
5080 } else if (b == 0x6b) {
5081 val = (int8_t)insn_get(env, s, MO_8);
5082 tcg_gen_movi_tl(cpu_T[1], val);
5083 } else {
5084 gen_op_mov_v_reg(ot, cpu_T[1], reg);
5086 switch (ot) {
5087 #ifdef TARGET_X86_64
5088 case MO_64:
5089 tcg_gen_muls2_i64(cpu_regs[reg], cpu_T[1], cpu_T[0], cpu_T[1]);
5090 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
5091 tcg_gen_sari_tl(cpu_cc_src, cpu_cc_dst, 63);
5092 tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_T[1]);
5093 break;
5094 #endif
5095 case MO_32:
5096 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5097 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
5098 tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
5099 cpu_tmp2_i32, cpu_tmp3_i32);
5100 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
5101 tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31);
5102 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
5103 tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
5104 tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32);
5105 break;
5106 default:
5107 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5108 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
5109 /* XXX: use 32 bit mul which could be faster */
5110 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
5111 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
5112 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
5113 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
5114 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
5115 break;
5117 set_cc_op(s, CC_OP_MULB + ot);
5118 break;
5119 case 0x1c0:
5120 case 0x1c1: /* xadd Ev, Gv */
5121 ot = mo_b_d(b, dflag);
5122 modrm = cpu_ldub_code(env, s->pc++);
5123 reg = ((modrm >> 3) & 7) | rex_r;
5124 mod = (modrm >> 6) & 3;
5125 if (mod == 3) {
5126 rm = (modrm & 7) | REX_B(s);
5127 gen_op_mov_v_reg(ot, cpu_T[0], reg);
5128 gen_op_mov_v_reg(ot, cpu_T[1], rm);
5129 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
5130 gen_op_mov_reg_v(ot, reg, cpu_T[1]);
5131 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
5132 } else {
5133 gen_lea_modrm(env, s, modrm);
5134 gen_op_mov_v_reg(ot, cpu_T[0], reg);
5135 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
5136 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
5137 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
5138 gen_op_mov_reg_v(ot, reg, cpu_T[1]);
5140 gen_op_update2_cc();
5141 set_cc_op(s, CC_OP_ADDB + ot);
5142 break;
5143 case 0x1b0:
5144 case 0x1b1: /* cmpxchg Ev, Gv */
5146 int label1, label2;
5147 TCGv t0, t1, t2, a0;
5149 ot = mo_b_d(b, dflag);
5150 modrm = cpu_ldub_code(env, s->pc++);
5151 reg = ((modrm >> 3) & 7) | rex_r;
5152 mod = (modrm >> 6) & 3;
5153 t0 = tcg_temp_local_new();
5154 t1 = tcg_temp_local_new();
5155 t2 = tcg_temp_local_new();
5156 a0 = tcg_temp_local_new();
5157 gen_op_mov_v_reg(ot, t1, reg);
5158 if (mod == 3) {
5159 rm = (modrm & 7) | REX_B(s);
5160 gen_op_mov_v_reg(ot, t0, rm);
5161 } else {
5162 gen_lea_modrm(env, s, modrm);
5163 tcg_gen_mov_tl(a0, cpu_A0);
5164 gen_op_ld_v(s, ot, t0, a0);
5165 rm = 0; /* avoid warning */
5167 label1 = gen_new_label();
5168 tcg_gen_mov_tl(t2, cpu_regs[R_EAX]);
5169 gen_extu(ot, t0);
5170 gen_extu(ot, t2);
5171 tcg_gen_brcond_tl(TCG_COND_EQ, t2, t0, label1);
5172 label2 = gen_new_label();
5173 if (mod == 3) {
5174 gen_op_mov_reg_v(ot, R_EAX, t0);
5175 tcg_gen_br(label2);
5176 gen_set_label(label1);
5177 gen_op_mov_reg_v(ot, rm, t1);
5178 } else {
5179 /* perform no-op store cycle like physical cpu; must be
5180 before changing accumulator to ensure idempotency if
5181 the store faults and the instruction is restarted */
5182 gen_op_st_v(s, ot, t0, a0);
5183 gen_op_mov_reg_v(ot, R_EAX, t0);
5184 tcg_gen_br(label2);
5185 gen_set_label(label1);
5186 gen_op_st_v(s, ot, t1, a0);
5188 gen_set_label(label2);
5189 tcg_gen_mov_tl(cpu_cc_src, t0);
5190 tcg_gen_mov_tl(cpu_cc_srcT, t2);
5191 tcg_gen_sub_tl(cpu_cc_dst, t2, t0);
5192 set_cc_op(s, CC_OP_SUBB + ot);
5193 tcg_temp_free(t0);
5194 tcg_temp_free(t1);
5195 tcg_temp_free(t2);
5196 tcg_temp_free(a0);
5198 break;
5199 case 0x1c7: /* cmpxchg8b */
5200 modrm = cpu_ldub_code(env, s->pc++);
5201 mod = (modrm >> 6) & 3;
5202 if ((mod == 3) || ((modrm & 0x38) != 0x8))
5203 goto illegal_op;
5204 #ifdef TARGET_X86_64
5205 if (dflag == MO_64) {
5206 if (!(s->cpuid_ext_features & CPUID_EXT_CX16))
5207 goto illegal_op;
5208 gen_jmp_im(pc_start - s->cs_base);
5209 gen_update_cc_op(s);
5210 gen_lea_modrm(env, s, modrm);
5211 gen_helper_cmpxchg16b(cpu_env, cpu_A0);
5212 } else
5213 #endif
5215 if (!(s->cpuid_features & CPUID_CX8))
5216 goto illegal_op;
5217 gen_jmp_im(pc_start - s->cs_base);
5218 gen_update_cc_op(s);
5219 gen_lea_modrm(env, s, modrm);
5220 gen_helper_cmpxchg8b(cpu_env, cpu_A0);
5222 set_cc_op(s, CC_OP_EFLAGS);
5223 break;
5225 /**************************/
5226 /* push/pop */
5227 case 0x50 ... 0x57: /* push */
5228 gen_op_mov_v_reg(MO_32, cpu_T[0], (b & 7) | REX_B(s));
5229 gen_push_v(s, cpu_T[0]);
5230 break;
5231 case 0x58 ... 0x5f: /* pop */
5232 ot = gen_pop_T0(s);
5233 /* NOTE: order is important for pop %sp */
5234 gen_pop_update(s, ot);
5235 gen_op_mov_reg_v(ot, (b & 7) | REX_B(s), cpu_T[0]);
5236 break;
5237 case 0x60: /* pusha */
5238 if (CODE64(s))
5239 goto illegal_op;
5240 gen_pusha(s);
5241 break;
5242 case 0x61: /* popa */
5243 if (CODE64(s))
5244 goto illegal_op;
5245 gen_popa(s);
5246 break;
5247 case 0x68: /* push Iv */
5248 case 0x6a:
5249 ot = mo_pushpop(s, dflag);
5250 if (b == 0x68)
5251 val = insn_get(env, s, ot);
5252 else
5253 val = (int8_t)insn_get(env, s, MO_8);
5254 tcg_gen_movi_tl(cpu_T[0], val);
5255 gen_push_v(s, cpu_T[0]);
5256 break;
5257 case 0x8f: /* pop Ev */
5258 modrm = cpu_ldub_code(env, s->pc++);
5259 mod = (modrm >> 6) & 3;
5260 ot = gen_pop_T0(s);
5261 if (mod == 3) {
5262 /* NOTE: order is important for pop %sp */
5263 gen_pop_update(s, ot);
5264 rm = (modrm & 7) | REX_B(s);
5265 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
5266 } else {
5267 /* NOTE: order is important too for MMU exceptions */
5268 s->popl_esp_hack = 1 << ot;
5269 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
5270 s->popl_esp_hack = 0;
5271 gen_pop_update(s, ot);
5273 break;
5274 case 0xc8: /* enter */
5276 int level;
5277 val = cpu_lduw_code(env, s->pc);
5278 s->pc += 2;
5279 level = cpu_ldub_code(env, s->pc++);
5280 gen_enter(s, val, level);
5282 break;
5283 case 0xc9: /* leave */
5284 /* XXX: exception not precise (ESP is updated before potential exception) */
5285 if (CODE64(s)) {
5286 gen_op_mov_v_reg(MO_64, cpu_T[0], R_EBP);
5287 gen_op_mov_reg_v(MO_64, R_ESP, cpu_T[0]);
5288 } else if (s->ss32) {
5289 gen_op_mov_v_reg(MO_32, cpu_T[0], R_EBP);
5290 gen_op_mov_reg_v(MO_32, R_ESP, cpu_T[0]);
5291 } else {
5292 gen_op_mov_v_reg(MO_16, cpu_T[0], R_EBP);
5293 gen_op_mov_reg_v(MO_16, R_ESP, cpu_T[0]);
5295 ot = gen_pop_T0(s);
5296 gen_op_mov_reg_v(ot, R_EBP, cpu_T[0]);
5297 gen_pop_update(s, ot);
5298 break;
5299 case 0x06: /* push es */
5300 case 0x0e: /* push cs */
5301 case 0x16: /* push ss */
5302 case 0x1e: /* push ds */
5303 if (CODE64(s))
5304 goto illegal_op;
5305 gen_op_movl_T0_seg(b >> 3);
5306 gen_push_v(s, cpu_T[0]);
5307 break;
5308 case 0x1a0: /* push fs */
5309 case 0x1a8: /* push gs */
5310 gen_op_movl_T0_seg((b >> 3) & 7);
5311 gen_push_v(s, cpu_T[0]);
5312 break;
5313 case 0x07: /* pop es */
5314 case 0x17: /* pop ss */
5315 case 0x1f: /* pop ds */
5316 if (CODE64(s))
5317 goto illegal_op;
5318 reg = b >> 3;
5319 ot = gen_pop_T0(s);
5320 gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
5321 gen_pop_update(s, ot);
5322 if (reg == R_SS) {
5323 /* if reg == SS, inhibit interrupts/trace. */
5324 /* If several instructions disable interrupts, only the
5325 _first_ does it */
5326 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
5327 gen_helper_set_inhibit_irq(cpu_env);
5328 s->tf = 0;
5330 if (s->is_jmp) {
5331 gen_jmp_im(s->pc - s->cs_base);
5332 gen_eob(s);
5334 break;
5335 case 0x1a1: /* pop fs */
5336 case 0x1a9: /* pop gs */
5337 ot = gen_pop_T0(s);
5338 gen_movl_seg_T0(s, (b >> 3) & 7, pc_start - s->cs_base);
5339 gen_pop_update(s, ot);
5340 if (s->is_jmp) {
5341 gen_jmp_im(s->pc - s->cs_base);
5342 gen_eob(s);
5344 break;
5346 /**************************/
5347 /* mov */
5348 case 0x88:
5349 case 0x89: /* mov Gv, Ev */
5350 ot = mo_b_d(b, dflag);
5351 modrm = cpu_ldub_code(env, s->pc++);
5352 reg = ((modrm >> 3) & 7) | rex_r;
5354 /* generate a generic store */
5355 gen_ldst_modrm(env, s, modrm, ot, reg, 1);
5356 break;
5357 case 0xc6:
5358 case 0xc7: /* mov Ev, Iv */
5359 ot = mo_b_d(b, dflag);
5360 modrm = cpu_ldub_code(env, s->pc++);
5361 mod = (modrm >> 6) & 3;
5362 if (mod != 3) {
5363 s->rip_offset = insn_const_size(ot);
5364 gen_lea_modrm(env, s, modrm);
5366 val = insn_get(env, s, ot);
5367 tcg_gen_movi_tl(cpu_T[0], val);
5368 if (mod != 3) {
5369 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
5370 } else {
5371 gen_op_mov_reg_v(ot, (modrm & 7) | REX_B(s), cpu_T[0]);
5373 break;
5374 case 0x8a:
5375 case 0x8b: /* mov Ev, Gv */
5376 ot = mo_b_d(b, dflag);
5377 modrm = cpu_ldub_code(env, s->pc++);
5378 reg = ((modrm >> 3) & 7) | rex_r;
5380 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
5381 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
5382 break;
5383 case 0x8e: /* mov seg, Gv */
5384 modrm = cpu_ldub_code(env, s->pc++);
5385 reg = (modrm >> 3) & 7;
5386 if (reg >= 6 || reg == R_CS)
5387 goto illegal_op;
5388 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
5389 gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
5390 if (reg == R_SS) {
5391 /* if reg == SS, inhibit interrupts/trace */
5392 /* If several instructions disable interrupts, only the
5393 _first_ does it */
5394 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
5395 gen_helper_set_inhibit_irq(cpu_env);
5396 s->tf = 0;
5398 if (s->is_jmp) {
5399 gen_jmp_im(s->pc - s->cs_base);
5400 gen_eob(s);
5402 break;
5403 case 0x8c: /* mov Gv, seg */
5404 modrm = cpu_ldub_code(env, s->pc++);
5405 reg = (modrm >> 3) & 7;
5406 mod = (modrm >> 6) & 3;
5407 if (reg >= 6)
5408 goto illegal_op;
5409 gen_op_movl_T0_seg(reg);
5410 ot = mod == 3 ? dflag : MO_16;
5411 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
5412 break;
5414 case 0x1b6: /* movzbS Gv, Eb */
5415 case 0x1b7: /* movzwS Gv, Eb */
5416 case 0x1be: /* movsbS Gv, Eb */
5417 case 0x1bf: /* movswS Gv, Eb */
5419 TCGMemOp d_ot;
5420 TCGMemOp s_ot;
5422 /* d_ot is the size of destination */
5423 d_ot = dflag;
5424 /* ot is the size of source */
5425 ot = (b & 1) + MO_8;
5426 /* s_ot is the sign+size of source */
5427 s_ot = b & 8 ? MO_SIGN | ot : ot;
5429 modrm = cpu_ldub_code(env, s->pc++);
5430 reg = ((modrm >> 3) & 7) | rex_r;
5431 mod = (modrm >> 6) & 3;
5432 rm = (modrm & 7) | REX_B(s);
5434 if (mod == 3) {
5435 gen_op_mov_v_reg(ot, cpu_T[0], rm);
5436 switch (s_ot) {
5437 case MO_UB:
5438 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
5439 break;
5440 case MO_SB:
5441 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
5442 break;
5443 case MO_UW:
5444 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
5445 break;
5446 default:
5447 case MO_SW:
5448 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5449 break;
5451 gen_op_mov_reg_v(d_ot, reg, cpu_T[0]);
5452 } else {
5453 gen_lea_modrm(env, s, modrm);
5454 gen_op_ld_v(s, s_ot, cpu_T[0], cpu_A0);
5455 gen_op_mov_reg_v(d_ot, reg, cpu_T[0]);
5458 break;
5460 case 0x8d: /* lea */
5461 ot = dflag;
5462 modrm = cpu_ldub_code(env, s->pc++);
5463 mod = (modrm >> 6) & 3;
5464 if (mod == 3)
5465 goto illegal_op;
5466 reg = ((modrm >> 3) & 7) | rex_r;
5467 /* we must ensure that no segment is added */
5468 s->override = -1;
5469 val = s->addseg;
5470 s->addseg = 0;
5471 gen_lea_modrm(env, s, modrm);
5472 s->addseg = val;
5473 gen_op_mov_reg_v(ot, reg, cpu_A0);
5474 break;
5476 case 0xa0: /* mov EAX, Ov */
5477 case 0xa1:
5478 case 0xa2: /* mov Ov, EAX */
5479 case 0xa3:
5481 target_ulong offset_addr;
5483 ot = mo_b_d(b, dflag);
5484 switch (s->aflag) {
5485 #ifdef TARGET_X86_64
5486 case MO_64:
5487 offset_addr = cpu_ldq_code(env, s->pc);
5488 s->pc += 8;
5489 break;
5490 #endif
5491 default:
5492 offset_addr = insn_get(env, s, s->aflag);
5493 break;
5495 tcg_gen_movi_tl(cpu_A0, offset_addr);
5496 gen_add_A0_ds_seg(s);
5497 if ((b & 2) == 0) {
5498 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
5499 gen_op_mov_reg_v(ot, R_EAX, cpu_T[0]);
5500 } else {
5501 gen_op_mov_v_reg(ot, cpu_T[0], R_EAX);
5502 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
5505 break;
5506 case 0xd7: /* xlat */
5507 tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EBX]);
5508 tcg_gen_ext8u_tl(cpu_T[0], cpu_regs[R_EAX]);
5509 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]);
5510 gen_extu(s->aflag, cpu_A0);
5511 gen_add_A0_ds_seg(s);
5512 gen_op_ld_v(s, MO_8, cpu_T[0], cpu_A0);
5513 gen_op_mov_reg_v(MO_8, R_EAX, cpu_T[0]);
5514 break;
5515 case 0xb0 ... 0xb7: /* mov R, Ib */
5516 val = insn_get(env, s, MO_8);
5517 tcg_gen_movi_tl(cpu_T[0], val);
5518 gen_op_mov_reg_v(MO_8, (b & 7) | REX_B(s), cpu_T[0]);
5519 break;
5520 case 0xb8 ... 0xbf: /* mov R, Iv */
5521 #ifdef TARGET_X86_64
5522 if (dflag == MO_64) {
5523 uint64_t tmp;
5524 /* 64 bit case */
5525 tmp = cpu_ldq_code(env, s->pc);
5526 s->pc += 8;
5527 reg = (b & 7) | REX_B(s);
5528 tcg_gen_movi_tl(cpu_T[0], tmp);
5529 gen_op_mov_reg_v(MO_64, reg, cpu_T[0]);
5530 } else
5531 #endif
5533 ot = dflag;
5534 val = insn_get(env, s, ot);
5535 reg = (b & 7) | REX_B(s);
5536 tcg_gen_movi_tl(cpu_T[0], val);
5537 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
5539 break;
5541 case 0x91 ... 0x97: /* xchg R, EAX */
5542 do_xchg_reg_eax:
5543 ot = dflag;
5544 reg = (b & 7) | REX_B(s);
5545 rm = R_EAX;
5546 goto do_xchg_reg;
5547 case 0x86:
5548 case 0x87: /* xchg Ev, Gv */
5549 ot = mo_b_d(b, dflag);
5550 modrm = cpu_ldub_code(env, s->pc++);
5551 reg = ((modrm >> 3) & 7) | rex_r;
5552 mod = (modrm >> 6) & 3;
5553 if (mod == 3) {
5554 rm = (modrm & 7) | REX_B(s);
5555 do_xchg_reg:
5556 gen_op_mov_v_reg(ot, cpu_T[0], reg);
5557 gen_op_mov_v_reg(ot, cpu_T[1], rm);
5558 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
5559 gen_op_mov_reg_v(ot, reg, cpu_T[1]);
5560 } else {
5561 gen_lea_modrm(env, s, modrm);
5562 gen_op_mov_v_reg(ot, cpu_T[0], reg);
5563 /* for xchg, lock is implicit */
5564 if (!(prefixes & PREFIX_LOCK))
5565 gen_helper_lock();
5566 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
5567 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
5568 if (!(prefixes & PREFIX_LOCK))
5569 gen_helper_unlock();
5570 gen_op_mov_reg_v(ot, reg, cpu_T[1]);
5572 break;
5573 case 0xc4: /* les Gv */
5574 /* In CODE64 this is VEX3; see above. */
5575 op = R_ES;
5576 goto do_lxx;
5577 case 0xc5: /* lds Gv */
5578 /* In CODE64 this is VEX2; see above. */
5579 op = R_DS;
5580 goto do_lxx;
5581 case 0x1b2: /* lss Gv */
5582 op = R_SS;
5583 goto do_lxx;
5584 case 0x1b4: /* lfs Gv */
5585 op = R_FS;
5586 goto do_lxx;
5587 case 0x1b5: /* lgs Gv */
5588 op = R_GS;
5589 do_lxx:
5590 ot = dflag != MO_16 ? MO_32 : MO_16;
5591 modrm = cpu_ldub_code(env, s->pc++);
5592 reg = ((modrm >> 3) & 7) | rex_r;
5593 mod = (modrm >> 6) & 3;
5594 if (mod == 3)
5595 goto illegal_op;
5596 gen_lea_modrm(env, s, modrm);
5597 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
5598 gen_add_A0_im(s, 1 << ot);
5599 /* load the segment first to handle exceptions properly */
5600 gen_op_ld_v(s, MO_16, cpu_T[0], cpu_A0);
5601 gen_movl_seg_T0(s, op, pc_start - s->cs_base);
5602 /* then put the data */
5603 gen_op_mov_reg_v(ot, reg, cpu_T[1]);
5604 if (s->is_jmp) {
5605 gen_jmp_im(s->pc - s->cs_base);
5606 gen_eob(s);
5608 break;
5610 /************************/
5611 /* shifts */
5612 case 0xc0:
5613 case 0xc1:
5614 /* shift Ev,Ib */
5615 shift = 2;
5616 grp2:
5618 ot = mo_b_d(b, dflag);
5619 modrm = cpu_ldub_code(env, s->pc++);
5620 mod = (modrm >> 6) & 3;
5621 op = (modrm >> 3) & 7;
5623 if (mod != 3) {
5624 if (shift == 2) {
5625 s->rip_offset = 1;
5627 gen_lea_modrm(env, s, modrm);
5628 opreg = OR_TMP0;
5629 } else {
5630 opreg = (modrm & 7) | REX_B(s);
5633 /* simpler op */
5634 if (shift == 0) {
5635 gen_shift(s, op, ot, opreg, OR_ECX);
5636 } else {
5637 if (shift == 2) {
5638 shift = cpu_ldub_code(env, s->pc++);
5640 gen_shifti(s, op, ot, opreg, shift);
5643 break;
5644 case 0xd0:
5645 case 0xd1:
5646 /* shift Ev,1 */
5647 shift = 1;
5648 goto grp2;
5649 case 0xd2:
5650 case 0xd3:
5651 /* shift Ev,cl */
5652 shift = 0;
5653 goto grp2;
5655 case 0x1a4: /* shld imm */
5656 op = 0;
5657 shift = 1;
5658 goto do_shiftd;
5659 case 0x1a5: /* shld cl */
5660 op = 0;
5661 shift = 0;
5662 goto do_shiftd;
5663 case 0x1ac: /* shrd imm */
5664 op = 1;
5665 shift = 1;
5666 goto do_shiftd;
5667 case 0x1ad: /* shrd cl */
5668 op = 1;
5669 shift = 0;
5670 do_shiftd:
5671 ot = dflag;
5672 modrm = cpu_ldub_code(env, s->pc++);
5673 mod = (modrm >> 6) & 3;
5674 rm = (modrm & 7) | REX_B(s);
5675 reg = ((modrm >> 3) & 7) | rex_r;
5676 if (mod != 3) {
5677 gen_lea_modrm(env, s, modrm);
5678 opreg = OR_TMP0;
5679 } else {
5680 opreg = rm;
5682 gen_op_mov_v_reg(ot, cpu_T[1], reg);
5684 if (shift) {
5685 TCGv imm = tcg_const_tl(cpu_ldub_code(env, s->pc++));
5686 gen_shiftd_rm_T1(s, ot, opreg, op, imm);
5687 tcg_temp_free(imm);
5688 } else {
5689 gen_shiftd_rm_T1(s, ot, opreg, op, cpu_regs[R_ECX]);
5691 break;
5693 /************************/
5694 /* floats */
5695 case 0xd8 ... 0xdf:
5696 if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
5697 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
5698 /* XXX: what to do if illegal op ? */
5699 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
5700 break;
5702 modrm = cpu_ldub_code(env, s->pc++);
5703 mod = (modrm >> 6) & 3;
5704 rm = modrm & 7;
5705 op = ((b & 7) << 3) | ((modrm >> 3) & 7);
5706 if (mod != 3) {
5707 /* memory op */
5708 gen_lea_modrm(env, s, modrm);
5709 switch(op) {
5710 case 0x00 ... 0x07: /* fxxxs */
5711 case 0x10 ... 0x17: /* fixxxl */
5712 case 0x20 ... 0x27: /* fxxxl */
5713 case 0x30 ... 0x37: /* fixxx */
5715 int op1;
5716 op1 = op & 7;
5718 switch(op >> 4) {
5719 case 0:
5720 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5721 s->mem_index, MO_LEUL);
5722 gen_helper_flds_FT0(cpu_env, cpu_tmp2_i32);
5723 break;
5724 case 1:
5725 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5726 s->mem_index, MO_LEUL);
5727 gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
5728 break;
5729 case 2:
5730 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
5731 s->mem_index, MO_LEQ);
5732 gen_helper_fldl_FT0(cpu_env, cpu_tmp1_i64);
5733 break;
5734 case 3:
5735 default:
5736 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5737 s->mem_index, MO_LESW);
5738 gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
5739 break;
5742 gen_helper_fp_arith_ST0_FT0(op1);
5743 if (op1 == 3) {
5744 /* fcomp needs pop */
5745 gen_helper_fpop(cpu_env);
5748 break;
5749 case 0x08: /* flds */
5750 case 0x0a: /* fsts */
5751 case 0x0b: /* fstps */
5752 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
5753 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
5754 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
5755 switch(op & 7) {
5756 case 0:
5757 switch(op >> 4) {
5758 case 0:
5759 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5760 s->mem_index, MO_LEUL);
5761 gen_helper_flds_ST0(cpu_env, cpu_tmp2_i32);
5762 break;
5763 case 1:
5764 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5765 s->mem_index, MO_LEUL);
5766 gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
5767 break;
5768 case 2:
5769 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
5770 s->mem_index, MO_LEQ);
5771 gen_helper_fldl_ST0(cpu_env, cpu_tmp1_i64);
5772 break;
5773 case 3:
5774 default:
5775 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5776 s->mem_index, MO_LESW);
5777 gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
5778 break;
5780 break;
5781 case 1:
5782 /* XXX: the corresponding CPUID bit must be tested ! */
5783 switch(op >> 4) {
5784 case 1:
5785 gen_helper_fisttl_ST0(cpu_tmp2_i32, cpu_env);
5786 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5787 s->mem_index, MO_LEUL);
5788 break;
5789 case 2:
5790 gen_helper_fisttll_ST0(cpu_tmp1_i64, cpu_env);
5791 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
5792 s->mem_index, MO_LEQ);
5793 break;
5794 case 3:
5795 default:
5796 gen_helper_fistt_ST0(cpu_tmp2_i32, cpu_env);
5797 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5798 s->mem_index, MO_LEUW);
5799 break;
5801 gen_helper_fpop(cpu_env);
5802 break;
5803 default:
5804 switch(op >> 4) {
5805 case 0:
5806 gen_helper_fsts_ST0(cpu_tmp2_i32, cpu_env);
5807 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5808 s->mem_index, MO_LEUL);
5809 break;
5810 case 1:
5811 gen_helper_fistl_ST0(cpu_tmp2_i32, cpu_env);
5812 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5813 s->mem_index, MO_LEUL);
5814 break;
5815 case 2:
5816 gen_helper_fstl_ST0(cpu_tmp1_i64, cpu_env);
5817 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
5818 s->mem_index, MO_LEQ);
5819 break;
5820 case 3:
5821 default:
5822 gen_helper_fist_ST0(cpu_tmp2_i32, cpu_env);
5823 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5824 s->mem_index, MO_LEUW);
5825 break;
5827 if ((op & 7) == 3)
5828 gen_helper_fpop(cpu_env);
5829 break;
5831 break;
5832 case 0x0c: /* fldenv mem */
5833 gen_update_cc_op(s);
5834 gen_jmp_im(pc_start - s->cs_base);
5835 gen_helper_fldenv(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
5836 break;
5837 case 0x0d: /* fldcw mem */
5838 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5839 s->mem_index, MO_LEUW);
5840 gen_helper_fldcw(cpu_env, cpu_tmp2_i32);
5841 break;
5842 case 0x0e: /* fnstenv mem */
5843 gen_update_cc_op(s);
5844 gen_jmp_im(pc_start - s->cs_base);
5845 gen_helper_fstenv(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
5846 break;
5847 case 0x0f: /* fnstcw mem */
5848 gen_helper_fnstcw(cpu_tmp2_i32, cpu_env);
5849 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5850 s->mem_index, MO_LEUW);
5851 break;
5852 case 0x1d: /* fldt mem */
5853 gen_update_cc_op(s);
5854 gen_jmp_im(pc_start - s->cs_base);
5855 gen_helper_fldt_ST0(cpu_env, cpu_A0);
5856 break;
5857 case 0x1f: /* fstpt mem */
5858 gen_update_cc_op(s);
5859 gen_jmp_im(pc_start - s->cs_base);
5860 gen_helper_fstt_ST0(cpu_env, cpu_A0);
5861 gen_helper_fpop(cpu_env);
5862 break;
5863 case 0x2c: /* frstor mem */
5864 gen_update_cc_op(s);
5865 gen_jmp_im(pc_start - s->cs_base);
5866 gen_helper_frstor(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
5867 break;
5868 case 0x2e: /* fnsave mem */
5869 gen_update_cc_op(s);
5870 gen_jmp_im(pc_start - s->cs_base);
5871 gen_helper_fsave(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
5872 break;
5873 case 0x2f: /* fnstsw mem */
5874 gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
5875 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5876 s->mem_index, MO_LEUW);
5877 break;
5878 case 0x3c: /* fbld */
5879 gen_update_cc_op(s);
5880 gen_jmp_im(pc_start - s->cs_base);
5881 gen_helper_fbld_ST0(cpu_env, cpu_A0);
5882 break;
5883 case 0x3e: /* fbstp */
5884 gen_update_cc_op(s);
5885 gen_jmp_im(pc_start - s->cs_base);
5886 gen_helper_fbst_ST0(cpu_env, cpu_A0);
5887 gen_helper_fpop(cpu_env);
5888 break;
5889 case 0x3d: /* fildll */
5890 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
5891 gen_helper_fildll_ST0(cpu_env, cpu_tmp1_i64);
5892 break;
5893 case 0x3f: /* fistpll */
5894 gen_helper_fistll_ST0(cpu_tmp1_i64, cpu_env);
5895 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
5896 gen_helper_fpop(cpu_env);
5897 break;
5898 default:
5899 goto illegal_op;
5901 } else {
5902 /* register float ops */
5903 opreg = rm;
5905 switch(op) {
5906 case 0x08: /* fld sti */
5907 gen_helper_fpush(cpu_env);
5908 gen_helper_fmov_ST0_STN(cpu_env,
5909 tcg_const_i32((opreg + 1) & 7));
5910 break;
5911 case 0x09: /* fxchg sti */
5912 case 0x29: /* fxchg4 sti, undocumented op */
5913 case 0x39: /* fxchg7 sti, undocumented op */
5914 gen_helper_fxchg_ST0_STN(cpu_env, tcg_const_i32(opreg));
5915 break;
5916 case 0x0a: /* grp d9/2 */
5917 switch(rm) {
5918 case 0: /* fnop */
5919 /* check exceptions (FreeBSD FPU probe) */
5920 gen_update_cc_op(s);
5921 gen_jmp_im(pc_start - s->cs_base);
5922 gen_helper_fwait(cpu_env);
5923 break;
5924 default:
5925 goto illegal_op;
5927 break;
5928 case 0x0c: /* grp d9/4 */
5929 switch(rm) {
5930 case 0: /* fchs */
5931 gen_helper_fchs_ST0(cpu_env);
5932 break;
5933 case 1: /* fabs */
5934 gen_helper_fabs_ST0(cpu_env);
5935 break;
5936 case 4: /* ftst */
5937 gen_helper_fldz_FT0(cpu_env);
5938 gen_helper_fcom_ST0_FT0(cpu_env);
5939 break;
5940 case 5: /* fxam */
5941 gen_helper_fxam_ST0(cpu_env);
5942 break;
5943 default:
5944 goto illegal_op;
5946 break;
5947 case 0x0d: /* grp d9/5 */
5949 switch(rm) {
5950 case 0:
5951 gen_helper_fpush(cpu_env);
5952 gen_helper_fld1_ST0(cpu_env);
5953 break;
5954 case 1:
5955 gen_helper_fpush(cpu_env);
5956 gen_helper_fldl2t_ST0(cpu_env);
5957 break;
5958 case 2:
5959 gen_helper_fpush(cpu_env);
5960 gen_helper_fldl2e_ST0(cpu_env);
5961 break;
5962 case 3:
5963 gen_helper_fpush(cpu_env);
5964 gen_helper_fldpi_ST0(cpu_env);
5965 break;
5966 case 4:
5967 gen_helper_fpush(cpu_env);
5968 gen_helper_fldlg2_ST0(cpu_env);
5969 break;
5970 case 5:
5971 gen_helper_fpush(cpu_env);
5972 gen_helper_fldln2_ST0(cpu_env);
5973 break;
5974 case 6:
5975 gen_helper_fpush(cpu_env);
5976 gen_helper_fldz_ST0(cpu_env);
5977 break;
5978 default:
5979 goto illegal_op;
5982 break;
5983 case 0x0e: /* grp d9/6 */
5984 switch(rm) {
5985 case 0: /* f2xm1 */
5986 gen_helper_f2xm1(cpu_env);
5987 break;
5988 case 1: /* fyl2x */
5989 gen_helper_fyl2x(cpu_env);
5990 break;
5991 case 2: /* fptan */
5992 gen_helper_fptan(cpu_env);
5993 break;
5994 case 3: /* fpatan */
5995 gen_helper_fpatan(cpu_env);
5996 break;
5997 case 4: /* fxtract */
5998 gen_helper_fxtract(cpu_env);
5999 break;
6000 case 5: /* fprem1 */
6001 gen_helper_fprem1(cpu_env);
6002 break;
6003 case 6: /* fdecstp */
6004 gen_helper_fdecstp(cpu_env);
6005 break;
6006 default:
6007 case 7: /* fincstp */
6008 gen_helper_fincstp(cpu_env);
6009 break;
6011 break;
6012 case 0x0f: /* grp d9/7 */
6013 switch(rm) {
6014 case 0: /* fprem */
6015 gen_helper_fprem(cpu_env);
6016 break;
6017 case 1: /* fyl2xp1 */
6018 gen_helper_fyl2xp1(cpu_env);
6019 break;
6020 case 2: /* fsqrt */
6021 gen_helper_fsqrt(cpu_env);
6022 break;
6023 case 3: /* fsincos */
6024 gen_helper_fsincos(cpu_env);
6025 break;
6026 case 5: /* fscale */
6027 gen_helper_fscale(cpu_env);
6028 break;
6029 case 4: /* frndint */
6030 gen_helper_frndint(cpu_env);
6031 break;
6032 case 6: /* fsin */
6033 gen_helper_fsin(cpu_env);
6034 break;
6035 default:
6036 case 7: /* fcos */
6037 gen_helper_fcos(cpu_env);
6038 break;
6040 break;
6041 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
6042 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
6043 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
6045 int op1;
6047 op1 = op & 7;
6048 if (op >= 0x20) {
6049 gen_helper_fp_arith_STN_ST0(op1, opreg);
6050 if (op >= 0x30)
6051 gen_helper_fpop(cpu_env);
6052 } else {
6053 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6054 gen_helper_fp_arith_ST0_FT0(op1);
6057 break;
6058 case 0x02: /* fcom */
6059 case 0x22: /* fcom2, undocumented op */
6060 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6061 gen_helper_fcom_ST0_FT0(cpu_env);
6062 break;
6063 case 0x03: /* fcomp */
6064 case 0x23: /* fcomp3, undocumented op */
6065 case 0x32: /* fcomp5, undocumented op */
6066 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6067 gen_helper_fcom_ST0_FT0(cpu_env);
6068 gen_helper_fpop(cpu_env);
6069 break;
6070 case 0x15: /* da/5 */
6071 switch(rm) {
6072 case 1: /* fucompp */
6073 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
6074 gen_helper_fucom_ST0_FT0(cpu_env);
6075 gen_helper_fpop(cpu_env);
6076 gen_helper_fpop(cpu_env);
6077 break;
6078 default:
6079 goto illegal_op;
6081 break;
6082 case 0x1c:
6083 switch(rm) {
6084 case 0: /* feni (287 only, just do nop here) */
6085 break;
6086 case 1: /* fdisi (287 only, just do nop here) */
6087 break;
6088 case 2: /* fclex */
6089 gen_helper_fclex(cpu_env);
6090 break;
6091 case 3: /* fninit */
6092 gen_helper_fninit(cpu_env);
6093 break;
6094 case 4: /* fsetpm (287 only, just do nop here) */
6095 break;
6096 default:
6097 goto illegal_op;
6099 break;
6100 case 0x1d: /* fucomi */
6101 if (!(s->cpuid_features & CPUID_CMOV)) {
6102 goto illegal_op;
6104 gen_update_cc_op(s);
6105 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6106 gen_helper_fucomi_ST0_FT0(cpu_env);
6107 set_cc_op(s, CC_OP_EFLAGS);
6108 break;
6109 case 0x1e: /* fcomi */
6110 if (!(s->cpuid_features & CPUID_CMOV)) {
6111 goto illegal_op;
6113 gen_update_cc_op(s);
6114 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6115 gen_helper_fcomi_ST0_FT0(cpu_env);
6116 set_cc_op(s, CC_OP_EFLAGS);
6117 break;
6118 case 0x28: /* ffree sti */
6119 gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
6120 break;
6121 case 0x2a: /* fst sti */
6122 gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
6123 break;
6124 case 0x2b: /* fstp sti */
6125 case 0x0b: /* fstp1 sti, undocumented op */
6126 case 0x3a: /* fstp8 sti, undocumented op */
6127 case 0x3b: /* fstp9 sti, undocumented op */
6128 gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
6129 gen_helper_fpop(cpu_env);
6130 break;
6131 case 0x2c: /* fucom st(i) */
6132 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6133 gen_helper_fucom_ST0_FT0(cpu_env);
6134 break;
6135 case 0x2d: /* fucomp st(i) */
6136 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6137 gen_helper_fucom_ST0_FT0(cpu_env);
6138 gen_helper_fpop(cpu_env);
6139 break;
6140 case 0x33: /* de/3 */
6141 switch(rm) {
6142 case 1: /* fcompp */
6143 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
6144 gen_helper_fcom_ST0_FT0(cpu_env);
6145 gen_helper_fpop(cpu_env);
6146 gen_helper_fpop(cpu_env);
6147 break;
6148 default:
6149 goto illegal_op;
6151 break;
6152 case 0x38: /* ffreep sti, undocumented op */
6153 gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
6154 gen_helper_fpop(cpu_env);
6155 break;
6156 case 0x3c: /* df/4 */
6157 switch(rm) {
6158 case 0:
6159 gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
6160 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
6161 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T[0]);
6162 break;
6163 default:
6164 goto illegal_op;
6166 break;
6167 case 0x3d: /* fucomip */
6168 if (!(s->cpuid_features & CPUID_CMOV)) {
6169 goto illegal_op;
6171 gen_update_cc_op(s);
6172 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6173 gen_helper_fucomi_ST0_FT0(cpu_env);
6174 gen_helper_fpop(cpu_env);
6175 set_cc_op(s, CC_OP_EFLAGS);
6176 break;
6177 case 0x3e: /* fcomip */
6178 if (!(s->cpuid_features & CPUID_CMOV)) {
6179 goto illegal_op;
6181 gen_update_cc_op(s);
6182 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6183 gen_helper_fcomi_ST0_FT0(cpu_env);
6184 gen_helper_fpop(cpu_env);
6185 set_cc_op(s, CC_OP_EFLAGS);
6186 break;
6187 case 0x10 ... 0x13: /* fcmovxx */
6188 case 0x18 ... 0x1b:
6190 int op1, l1;
6191 static const uint8_t fcmov_cc[8] = {
6192 (JCC_B << 1),
6193 (JCC_Z << 1),
6194 (JCC_BE << 1),
6195 (JCC_P << 1),
6198 if (!(s->cpuid_features & CPUID_CMOV)) {
6199 goto illegal_op;
6201 op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
6202 l1 = gen_new_label();
6203 gen_jcc1_noeob(s, op1, l1);
6204 gen_helper_fmov_ST0_STN(cpu_env, tcg_const_i32(opreg));
6205 gen_set_label(l1);
6207 break;
6208 default:
6209 goto illegal_op;
6212 break;
6213 /************************/
6214 /* string ops */
6216 case 0xa4: /* movsS */
6217 case 0xa5:
6218 ot = mo_b_d(b, dflag);
6219 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6220 gen_repz_movs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6221 } else {
6222 gen_movs(s, ot);
6224 break;
6226 case 0xaa: /* stosS */
6227 case 0xab:
6228 ot = mo_b_d(b, dflag);
6229 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6230 gen_repz_stos(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6231 } else {
6232 gen_stos(s, ot);
6234 break;
6235 case 0xac: /* lodsS */
6236 case 0xad:
6237 ot = mo_b_d(b, dflag);
6238 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6239 gen_repz_lods(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6240 } else {
6241 gen_lods(s, ot);
6243 break;
6244 case 0xae: /* scasS */
6245 case 0xaf:
6246 ot = mo_b_d(b, dflag);
6247 if (prefixes & PREFIX_REPNZ) {
6248 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6249 } else if (prefixes & PREFIX_REPZ) {
6250 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6251 } else {
6252 gen_scas(s, ot);
6254 break;
6256 case 0xa6: /* cmpsS */
6257 case 0xa7:
6258 ot = mo_b_d(b, dflag);
6259 if (prefixes & PREFIX_REPNZ) {
6260 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6261 } else if (prefixes & PREFIX_REPZ) {
6262 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6263 } else {
6264 gen_cmps(s, ot);
6266 break;
6267 case 0x6c: /* insS */
6268 case 0x6d:
6269 ot = mo_b_d32(b, dflag);
6270 tcg_gen_ext16u_tl(cpu_T[0], cpu_regs[R_EDX]);
6271 gen_check_io(s, ot, pc_start - s->cs_base,
6272 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4);
6273 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6274 gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6275 } else {
6276 gen_ins(s, ot);
6277 if (use_icount) {
6278 gen_jmp(s, s->pc - s->cs_base);
6281 break;
6282 case 0x6e: /* outsS */
6283 case 0x6f:
6284 ot = mo_b_d32(b, dflag);
6285 tcg_gen_ext16u_tl(cpu_T[0], cpu_regs[R_EDX]);
6286 gen_check_io(s, ot, pc_start - s->cs_base,
6287 svm_is_rep(prefixes) | 4);
6288 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6289 gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6290 } else {
6291 gen_outs(s, ot);
6292 if (use_icount) {
6293 gen_jmp(s, s->pc - s->cs_base);
6296 break;
6298 /************************/
6299 /* port I/O */
6301 case 0xe4:
6302 case 0xe5:
6303 ot = mo_b_d32(b, dflag);
6304 val = cpu_ldub_code(env, s->pc++);
6305 tcg_gen_movi_tl(cpu_T[0], val);
6306 gen_check_io(s, ot, pc_start - s->cs_base,
6307 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
6308 if (use_icount)
6309 gen_io_start();
6310 tcg_gen_movi_i32(cpu_tmp2_i32, val);
6311 gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
6312 gen_op_mov_reg_v(ot, R_EAX, cpu_T[1]);
6313 if (use_icount) {
6314 gen_io_end();
6315 gen_jmp(s, s->pc - s->cs_base);
6317 break;
6318 case 0xe6:
6319 case 0xe7:
6320 ot = mo_b_d32(b, dflag);
6321 val = cpu_ldub_code(env, s->pc++);
6322 tcg_gen_movi_tl(cpu_T[0], val);
6323 gen_check_io(s, ot, pc_start - s->cs_base,
6324 svm_is_rep(prefixes));
6325 gen_op_mov_v_reg(ot, cpu_T[1], R_EAX);
6327 if (use_icount)
6328 gen_io_start();
6329 tcg_gen_movi_i32(cpu_tmp2_i32, val);
6330 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
6331 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
6332 if (use_icount) {
6333 gen_io_end();
6334 gen_jmp(s, s->pc - s->cs_base);
6336 break;
6337 case 0xec:
6338 case 0xed:
6339 ot = mo_b_d32(b, dflag);
6340 tcg_gen_ext16u_tl(cpu_T[0], cpu_regs[R_EDX]);
6341 gen_check_io(s, ot, pc_start - s->cs_base,
6342 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
6343 if (use_icount)
6344 gen_io_start();
6345 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6346 gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
6347 gen_op_mov_reg_v(ot, R_EAX, cpu_T[1]);
6348 if (use_icount) {
6349 gen_io_end();
6350 gen_jmp(s, s->pc - s->cs_base);
6352 break;
6353 case 0xee:
6354 case 0xef:
6355 ot = mo_b_d32(b, dflag);
6356 tcg_gen_ext16u_tl(cpu_T[0], cpu_regs[R_EDX]);
6357 gen_check_io(s, ot, pc_start - s->cs_base,
6358 svm_is_rep(prefixes));
6359 gen_op_mov_v_reg(ot, cpu_T[1], R_EAX);
6361 if (use_icount)
6362 gen_io_start();
6363 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6364 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
6365 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
6366 if (use_icount) {
6367 gen_io_end();
6368 gen_jmp(s, s->pc - s->cs_base);
6370 break;
6372 /************************/
6373 /* control */
6374 case 0xc2: /* ret im */
6375 val = cpu_ldsw_code(env, s->pc);
6376 s->pc += 2;
6377 ot = gen_pop_T0(s);
6378 gen_stack_update(s, val + (1 << ot));
6379 /* Note that gen_pop_T0 uses a zero-extending load. */
6380 gen_op_jmp_v(cpu_T[0]);
6381 gen_eob(s);
6382 break;
6383 case 0xc3: /* ret */
6384 ot = gen_pop_T0(s);
6385 gen_pop_update(s, ot);
6386 /* Note that gen_pop_T0 uses a zero-extending load. */
6387 gen_op_jmp_v(cpu_T[0]);
6388 gen_eob(s);
6389 break;
6390 case 0xca: /* lret im */
6391 val = cpu_ldsw_code(env, s->pc);
6392 s->pc += 2;
6393 do_lret:
6394 if (s->pe && !s->vm86) {
6395 gen_update_cc_op(s);
6396 gen_jmp_im(pc_start - s->cs_base);
6397 gen_helper_lret_protected(cpu_env, tcg_const_i32(dflag - 1),
6398 tcg_const_i32(val));
6399 } else {
6400 gen_stack_A0(s);
6401 /* pop offset */
6402 gen_op_ld_v(s, dflag, cpu_T[0], cpu_A0);
6403 /* NOTE: keeping EIP updated is not a problem in case of
6404 exception */
6405 gen_op_jmp_v(cpu_T[0]);
6406 /* pop selector */
6407 gen_op_addl_A0_im(1 << dflag);
6408 gen_op_ld_v(s, dflag, cpu_T[0], cpu_A0);
6409 gen_op_movl_seg_T0_vm(R_CS);
6410 /* add stack offset */
6411 gen_stack_update(s, val + (2 << dflag));
6413 gen_eob(s);
6414 break;
6415 case 0xcb: /* lret */
6416 val = 0;
6417 goto do_lret;
6418 case 0xcf: /* iret */
6419 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET);
6420 if (!s->pe) {
6421 /* real mode */
6422 gen_helper_iret_real(cpu_env, tcg_const_i32(dflag - 1));
6423 set_cc_op(s, CC_OP_EFLAGS);
6424 } else if (s->vm86) {
6425 if (s->iopl != 3) {
6426 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6427 } else {
6428 gen_helper_iret_real(cpu_env, tcg_const_i32(dflag - 1));
6429 set_cc_op(s, CC_OP_EFLAGS);
6431 } else {
6432 gen_update_cc_op(s);
6433 gen_jmp_im(pc_start - s->cs_base);
6434 gen_helper_iret_protected(cpu_env, tcg_const_i32(dflag - 1),
6435 tcg_const_i32(s->pc - s->cs_base));
6436 set_cc_op(s, CC_OP_EFLAGS);
6438 gen_eob(s);
6439 break;
6440 case 0xe8: /* call im */
6442 if (dflag != MO_16) {
6443 tval = (int32_t)insn_get(env, s, MO_32);
6444 } else {
6445 tval = (int16_t)insn_get(env, s, MO_16);
6447 next_eip = s->pc - s->cs_base;
6448 tval += next_eip;
6449 if (dflag == MO_16) {
6450 tval &= 0xffff;
6451 } else if (!CODE64(s)) {
6452 tval &= 0xffffffff;
6454 tcg_gen_movi_tl(cpu_T[0], next_eip);
6455 gen_push_v(s, cpu_T[0]);
6456 gen_jmp(s, tval);
6458 break;
6459 case 0x9a: /* lcall im */
6461 unsigned int selector, offset;
6463 if (CODE64(s))
6464 goto illegal_op;
6465 ot = dflag;
6466 offset = insn_get(env, s, ot);
6467 selector = insn_get(env, s, MO_16);
6469 tcg_gen_movi_tl(cpu_T[0], selector);
6470 tcg_gen_movi_tl(cpu_T[1], offset);
6472 goto do_lcall;
6473 case 0xe9: /* jmp im */
6474 if (dflag != MO_16) {
6475 tval = (int32_t)insn_get(env, s, MO_32);
6476 } else {
6477 tval = (int16_t)insn_get(env, s, MO_16);
6479 tval += s->pc - s->cs_base;
6480 if (dflag == MO_16) {
6481 tval &= 0xffff;
6482 } else if (!CODE64(s)) {
6483 tval &= 0xffffffff;
6485 gen_jmp(s, tval);
6486 break;
6487 case 0xea: /* ljmp im */
6489 unsigned int selector, offset;
6491 if (CODE64(s))
6492 goto illegal_op;
6493 ot = dflag;
6494 offset = insn_get(env, s, ot);
6495 selector = insn_get(env, s, MO_16);
6497 tcg_gen_movi_tl(cpu_T[0], selector);
6498 tcg_gen_movi_tl(cpu_T[1], offset);
6500 goto do_ljmp;
6501 case 0xeb: /* jmp Jb */
6502 tval = (int8_t)insn_get(env, s, MO_8);
6503 tval += s->pc - s->cs_base;
6504 if (dflag == MO_16) {
6505 tval &= 0xffff;
6507 gen_jmp(s, tval);
6508 break;
6509 case 0x70 ... 0x7f: /* jcc Jb */
6510 tval = (int8_t)insn_get(env, s, MO_8);
6511 goto do_jcc;
6512 case 0x180 ... 0x18f: /* jcc Jv */
6513 if (dflag != MO_16) {
6514 tval = (int32_t)insn_get(env, s, MO_32);
6515 } else {
6516 tval = (int16_t)insn_get(env, s, MO_16);
6518 do_jcc:
6519 next_eip = s->pc - s->cs_base;
6520 tval += next_eip;
6521 if (dflag == MO_16) {
6522 tval &= 0xffff;
6524 gen_jcc(s, b, tval, next_eip);
6525 break;
6527 case 0x190 ... 0x19f: /* setcc Gv */
6528 modrm = cpu_ldub_code(env, s->pc++);
6529 gen_setcc1(s, b, cpu_T[0]);
6530 gen_ldst_modrm(env, s, modrm, MO_8, OR_TMP0, 1);
6531 break;
6532 case 0x140 ... 0x14f: /* cmov Gv, Ev */
6533 if (!(s->cpuid_features & CPUID_CMOV)) {
6534 goto illegal_op;
6536 ot = dflag;
6537 modrm = cpu_ldub_code(env, s->pc++);
6538 reg = ((modrm >> 3) & 7) | rex_r;
6539 gen_cmovcc1(env, s, ot, b, modrm, reg);
6540 break;
6542 /************************/
6543 /* flags */
6544 case 0x9c: /* pushf */
6545 gen_svm_check_intercept(s, pc_start, SVM_EXIT_PUSHF);
6546 if (s->vm86 && s->iopl != 3) {
6547 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6548 } else {
6549 gen_update_cc_op(s);
6550 gen_helper_read_eflags(cpu_T[0], cpu_env);
6551 gen_push_v(s, cpu_T[0]);
6553 break;
6554 case 0x9d: /* popf */
6555 gen_svm_check_intercept(s, pc_start, SVM_EXIT_POPF);
6556 if (s->vm86 && s->iopl != 3) {
6557 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6558 } else {
6559 ot = gen_pop_T0(s);
6560 if (s->cpl == 0) {
6561 if (dflag != MO_16) {
6562 gen_helper_write_eflags(cpu_env, cpu_T[0],
6563 tcg_const_i32((TF_MASK | AC_MASK |
6564 ID_MASK | NT_MASK |
6565 IF_MASK |
6566 IOPL_MASK)));
6567 } else {
6568 gen_helper_write_eflags(cpu_env, cpu_T[0],
6569 tcg_const_i32((TF_MASK | AC_MASK |
6570 ID_MASK | NT_MASK |
6571 IF_MASK | IOPL_MASK)
6572 & 0xffff));
6574 } else {
6575 if (s->cpl <= s->iopl) {
6576 if (dflag != MO_16) {
6577 gen_helper_write_eflags(cpu_env, cpu_T[0],
6578 tcg_const_i32((TF_MASK |
6579 AC_MASK |
6580 ID_MASK |
6581 NT_MASK |
6582 IF_MASK)));
6583 } else {
6584 gen_helper_write_eflags(cpu_env, cpu_T[0],
6585 tcg_const_i32((TF_MASK |
6586 AC_MASK |
6587 ID_MASK |
6588 NT_MASK |
6589 IF_MASK)
6590 & 0xffff));
6592 } else {
6593 if (dflag != MO_16) {
6594 gen_helper_write_eflags(cpu_env, cpu_T[0],
6595 tcg_const_i32((TF_MASK | AC_MASK |
6596 ID_MASK | NT_MASK)));
6597 } else {
6598 gen_helper_write_eflags(cpu_env, cpu_T[0],
6599 tcg_const_i32((TF_MASK | AC_MASK |
6600 ID_MASK | NT_MASK)
6601 & 0xffff));
6605 gen_pop_update(s, ot);
6606 set_cc_op(s, CC_OP_EFLAGS);
6607 /* abort translation because TF/AC flag may change */
6608 gen_jmp_im(s->pc - s->cs_base);
6609 gen_eob(s);
6611 break;
6612 case 0x9e: /* sahf */
6613 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
6614 goto illegal_op;
6615 gen_op_mov_v_reg(MO_8, cpu_T[0], R_AH);
6616 gen_compute_eflags(s);
6617 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O);
6618 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], CC_S | CC_Z | CC_A | CC_P | CC_C);
6619 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T[0]);
6620 break;
6621 case 0x9f: /* lahf */
6622 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
6623 goto illegal_op;
6624 gen_compute_eflags(s);
6625 /* Note: gen_compute_eflags() only gives the condition codes */
6626 tcg_gen_ori_tl(cpu_T[0], cpu_cc_src, 0x02);
6627 gen_op_mov_reg_v(MO_8, R_AH, cpu_T[0]);
6628 break;
6629 case 0xf5: /* cmc */
6630 gen_compute_eflags(s);
6631 tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C);
6632 break;
6633 case 0xf8: /* clc */
6634 gen_compute_eflags(s);
6635 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C);
6636 break;
6637 case 0xf9: /* stc */
6638 gen_compute_eflags(s);
6639 tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C);
6640 break;
6641 case 0xfc: /* cld */
6642 tcg_gen_movi_i32(cpu_tmp2_i32, 1);
6643 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
6644 break;
6645 case 0xfd: /* std */
6646 tcg_gen_movi_i32(cpu_tmp2_i32, -1);
6647 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
6648 break;
6650 /************************/
6651 /* bit operations */
6652 case 0x1ba: /* bt/bts/btr/btc Gv, im */
6653 ot = dflag;
6654 modrm = cpu_ldub_code(env, s->pc++);
6655 op = (modrm >> 3) & 7;
6656 mod = (modrm >> 6) & 3;
6657 rm = (modrm & 7) | REX_B(s);
6658 if (mod != 3) {
6659 s->rip_offset = 1;
6660 gen_lea_modrm(env, s, modrm);
6661 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
6662 } else {
6663 gen_op_mov_v_reg(ot, cpu_T[0], rm);
6665 /* load shift */
6666 val = cpu_ldub_code(env, s->pc++);
6667 tcg_gen_movi_tl(cpu_T[1], val);
6668 if (op < 4)
6669 goto illegal_op;
6670 op -= 4;
6671 goto bt_op;
6672 case 0x1a3: /* bt Gv, Ev */
6673 op = 0;
6674 goto do_btx;
6675 case 0x1ab: /* bts */
6676 op = 1;
6677 goto do_btx;
6678 case 0x1b3: /* btr */
6679 op = 2;
6680 goto do_btx;
6681 case 0x1bb: /* btc */
6682 op = 3;
6683 do_btx:
6684 ot = dflag;
6685 modrm = cpu_ldub_code(env, s->pc++);
6686 reg = ((modrm >> 3) & 7) | rex_r;
6687 mod = (modrm >> 6) & 3;
6688 rm = (modrm & 7) | REX_B(s);
6689 gen_op_mov_v_reg(MO_32, cpu_T[1], reg);
6690 if (mod != 3) {
6691 gen_lea_modrm(env, s, modrm);
6692 /* specific case: we need to add a displacement */
6693 gen_exts(ot, cpu_T[1]);
6694 tcg_gen_sari_tl(cpu_tmp0, cpu_T[1], 3 + ot);
6695 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, ot);
6696 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
6697 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
6698 } else {
6699 gen_op_mov_v_reg(ot, cpu_T[0], rm);
6701 bt_op:
6702 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], (1 << (3 + ot)) - 1);
6703 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6704 switch(op) {
6705 case 0:
6706 break;
6707 case 1:
6708 tcg_gen_movi_tl(cpu_tmp0, 1);
6709 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6710 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6711 break;
6712 case 2:
6713 tcg_gen_movi_tl(cpu_tmp0, 1);
6714 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6715 tcg_gen_andc_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6716 break;
6717 default:
6718 case 3:
6719 tcg_gen_movi_tl(cpu_tmp0, 1);
6720 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6721 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6722 break;
6724 if (op != 0) {
6725 if (mod != 3) {
6726 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
6727 } else {
6728 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
6732 /* Delay all CC updates until after the store above. Note that
6733 C is the result of the test, Z is unchanged, and the others
6734 are all undefined. */
6735 switch (s->cc_op) {
6736 case CC_OP_MULB ... CC_OP_MULQ:
6737 case CC_OP_ADDB ... CC_OP_ADDQ:
6738 case CC_OP_ADCB ... CC_OP_ADCQ:
6739 case CC_OP_SUBB ... CC_OP_SUBQ:
6740 case CC_OP_SBBB ... CC_OP_SBBQ:
6741 case CC_OP_LOGICB ... CC_OP_LOGICQ:
6742 case CC_OP_INCB ... CC_OP_INCQ:
6743 case CC_OP_DECB ... CC_OP_DECQ:
6744 case CC_OP_SHLB ... CC_OP_SHLQ:
6745 case CC_OP_SARB ... CC_OP_SARQ:
6746 case CC_OP_BMILGB ... CC_OP_BMILGQ:
6747 /* Z was going to be computed from the non-zero status of CC_DST.
6748 We can get that same Z value (and the new C value) by leaving
6749 CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the
6750 same width. */
6751 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
6752 set_cc_op(s, ((s->cc_op - CC_OP_MULB) & 3) + CC_OP_SARB);
6753 break;
6754 default:
6755 /* Otherwise, generate EFLAGS and replace the C bit. */
6756 gen_compute_eflags(s);
6757 tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, cpu_tmp4,
6758 ctz32(CC_C), 1);
6759 break;
6761 break;
6762 case 0x1bc: /* bsf / tzcnt */
6763 case 0x1bd: /* bsr / lzcnt */
6764 ot = dflag;
6765 modrm = cpu_ldub_code(env, s->pc++);
6766 reg = ((modrm >> 3) & 7) | rex_r;
6767 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
6768 gen_extu(ot, cpu_T[0]);
6770 /* Note that lzcnt and tzcnt are in different extensions. */
6771 if ((prefixes & PREFIX_REPZ)
6772 && (b & 1
6773 ? s->cpuid_ext3_features & CPUID_EXT3_ABM
6774 : s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)) {
6775 int size = 8 << ot;
6776 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
6777 if (b & 1) {
6778 /* For lzcnt, reduce the target_ulong result by the
6779 number of zeros that we expect to find at the top. */
6780 gen_helper_clz(cpu_T[0], cpu_T[0]);
6781 tcg_gen_subi_tl(cpu_T[0], cpu_T[0], TARGET_LONG_BITS - size);
6782 } else {
6783 /* For tzcnt, a zero input must return the operand size:
6784 force all bits outside the operand size to 1. */
6785 target_ulong mask = (target_ulong)-2 << (size - 1);
6786 tcg_gen_ori_tl(cpu_T[0], cpu_T[0], mask);
6787 gen_helper_ctz(cpu_T[0], cpu_T[0]);
6789 /* For lzcnt/tzcnt, C and Z bits are defined and are
6790 related to the result. */
6791 gen_op_update1_cc();
6792 set_cc_op(s, CC_OP_BMILGB + ot);
6793 } else {
6794 /* For bsr/bsf, only the Z bit is defined and it is related
6795 to the input and not the result. */
6796 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
6797 set_cc_op(s, CC_OP_LOGICB + ot);
6798 if (b & 1) {
6799 /* For bsr, return the bit index of the first 1 bit,
6800 not the count of leading zeros. */
6801 gen_helper_clz(cpu_T[0], cpu_T[0]);
6802 tcg_gen_xori_tl(cpu_T[0], cpu_T[0], TARGET_LONG_BITS - 1);
6803 } else {
6804 gen_helper_ctz(cpu_T[0], cpu_T[0]);
6806 /* ??? The manual says that the output is undefined when the
6807 input is zero, but real hardware leaves it unchanged, and
6808 real programs appear to depend on that. */
6809 tcg_gen_movi_tl(cpu_tmp0, 0);
6810 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_T[0], cpu_cc_dst, cpu_tmp0,
6811 cpu_regs[reg], cpu_T[0]);
6813 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
6814 break;
6815 /************************/
6816 /* bcd */
6817 case 0x27: /* daa */
6818 if (CODE64(s))
6819 goto illegal_op;
6820 gen_update_cc_op(s);
6821 gen_helper_daa(cpu_env);
6822 set_cc_op(s, CC_OP_EFLAGS);
6823 break;
6824 case 0x2f: /* das */
6825 if (CODE64(s))
6826 goto illegal_op;
6827 gen_update_cc_op(s);
6828 gen_helper_das(cpu_env);
6829 set_cc_op(s, CC_OP_EFLAGS);
6830 break;
6831 case 0x37: /* aaa */
6832 if (CODE64(s))
6833 goto illegal_op;
6834 gen_update_cc_op(s);
6835 gen_helper_aaa(cpu_env);
6836 set_cc_op(s, CC_OP_EFLAGS);
6837 break;
6838 case 0x3f: /* aas */
6839 if (CODE64(s))
6840 goto illegal_op;
6841 gen_update_cc_op(s);
6842 gen_helper_aas(cpu_env);
6843 set_cc_op(s, CC_OP_EFLAGS);
6844 break;
6845 case 0xd4: /* aam */
6846 if (CODE64(s))
6847 goto illegal_op;
6848 val = cpu_ldub_code(env, s->pc++);
6849 if (val == 0) {
6850 gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base);
6851 } else {
6852 gen_helper_aam(cpu_env, tcg_const_i32(val));
6853 set_cc_op(s, CC_OP_LOGICB);
6855 break;
6856 case 0xd5: /* aad */
6857 if (CODE64(s))
6858 goto illegal_op;
6859 val = cpu_ldub_code(env, s->pc++);
6860 gen_helper_aad(cpu_env, tcg_const_i32(val));
6861 set_cc_op(s, CC_OP_LOGICB);
6862 break;
6863 /************************/
6864 /* misc */
6865 case 0x90: /* nop */
6866 /* XXX: correct lock test for all insn */
6867 if (prefixes & PREFIX_LOCK) {
6868 goto illegal_op;
6870 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
6871 if (REX_B(s)) {
6872 goto do_xchg_reg_eax;
6874 if (prefixes & PREFIX_REPZ) {
6875 gen_update_cc_op(s);
6876 gen_jmp_im(pc_start - s->cs_base);
6877 gen_helper_pause(cpu_env, tcg_const_i32(s->pc - pc_start));
6878 s->is_jmp = DISAS_TB_JUMP;
6880 break;
6881 case 0x9b: /* fwait */
6882 if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) ==
6883 (HF_MP_MASK | HF_TS_MASK)) {
6884 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
6885 } else {
6886 gen_update_cc_op(s);
6887 gen_jmp_im(pc_start - s->cs_base);
6888 gen_helper_fwait(cpu_env);
6890 break;
6891 case 0xcc: /* int3 */
6892 gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base);
6893 break;
6894 case 0xcd: /* int N */
6895 val = cpu_ldub_code(env, s->pc++);
6896 if (s->vm86 && s->iopl != 3) {
6897 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6898 } else {
6899 gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base);
6901 break;
6902 case 0xce: /* into */
6903 if (CODE64(s))
6904 goto illegal_op;
6905 gen_update_cc_op(s);
6906 gen_jmp_im(pc_start - s->cs_base);
6907 gen_helper_into(cpu_env, tcg_const_i32(s->pc - pc_start));
6908 break;
6909 #ifdef WANT_ICEBP
6910 case 0xf1: /* icebp (undocumented, exits to external debugger) */
6911 gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP);
6912 #if 1
6913 gen_debug(s, pc_start - s->cs_base);
6914 #else
6915 /* start debug */
6916 tb_flush(env);
6917 qemu_set_log(CPU_LOG_INT | CPU_LOG_TB_IN_ASM);
6918 #endif
6919 break;
6920 #endif
6921 case 0xfa: /* cli */
6922 if (!s->vm86) {
6923 if (s->cpl <= s->iopl) {
6924 gen_helper_cli(cpu_env);
6925 } else {
6926 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6928 } else {
6929 if (s->iopl == 3) {
6930 gen_helper_cli(cpu_env);
6931 } else {
6932 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6935 break;
6936 case 0xfb: /* sti */
6937 if (!s->vm86) {
6938 if (s->cpl <= s->iopl) {
6939 gen_sti:
6940 gen_helper_sti(cpu_env);
6941 /* interruptions are enabled only the first insn after sti */
6942 /* If several instructions disable interrupts, only the
6943 _first_ does it */
6944 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
6945 gen_helper_set_inhibit_irq(cpu_env);
6946 /* give a chance to handle pending irqs */
6947 gen_jmp_im(s->pc - s->cs_base);
6948 gen_eob(s);
6949 } else {
6950 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6952 } else {
6953 if (s->iopl == 3) {
6954 goto gen_sti;
6955 } else {
6956 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6959 break;
6960 case 0x62: /* bound */
6961 if (CODE64(s))
6962 goto illegal_op;
6963 ot = dflag;
6964 modrm = cpu_ldub_code(env, s->pc++);
6965 reg = (modrm >> 3) & 7;
6966 mod = (modrm >> 6) & 3;
6967 if (mod == 3)
6968 goto illegal_op;
6969 gen_op_mov_v_reg(ot, cpu_T[0], reg);
6970 gen_lea_modrm(env, s, modrm);
6971 gen_jmp_im(pc_start - s->cs_base);
6972 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6973 if (ot == MO_16) {
6974 gen_helper_boundw(cpu_env, cpu_A0, cpu_tmp2_i32);
6975 } else {
6976 gen_helper_boundl(cpu_env, cpu_A0, cpu_tmp2_i32);
6978 break;
6979 case 0x1c8 ... 0x1cf: /* bswap reg */
6980 reg = (b & 7) | REX_B(s);
6981 #ifdef TARGET_X86_64
6982 if (dflag == MO_64) {
6983 gen_op_mov_v_reg(MO_64, cpu_T[0], reg);
6984 tcg_gen_bswap64_i64(cpu_T[0], cpu_T[0]);
6985 gen_op_mov_reg_v(MO_64, reg, cpu_T[0]);
6986 } else
6987 #endif
6989 gen_op_mov_v_reg(MO_32, cpu_T[0], reg);
6990 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
6991 tcg_gen_bswap32_tl(cpu_T[0], cpu_T[0]);
6992 gen_op_mov_reg_v(MO_32, reg, cpu_T[0]);
6994 break;
6995 case 0xd6: /* salc */
6996 if (CODE64(s))
6997 goto illegal_op;
6998 gen_compute_eflags_c(s, cpu_T[0]);
6999 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
7000 gen_op_mov_reg_v(MO_8, R_EAX, cpu_T[0]);
7001 break;
7002 case 0xe0: /* loopnz */
7003 case 0xe1: /* loopz */
7004 case 0xe2: /* loop */
7005 case 0xe3: /* jecxz */
7007 int l1, l2, l3;
7009 tval = (int8_t)insn_get(env, s, MO_8);
7010 next_eip = s->pc - s->cs_base;
7011 tval += next_eip;
7012 if (dflag == MO_16) {
7013 tval &= 0xffff;
7016 l1 = gen_new_label();
7017 l2 = gen_new_label();
7018 l3 = gen_new_label();
7019 b &= 3;
7020 switch(b) {
7021 case 0: /* loopnz */
7022 case 1: /* loopz */
7023 gen_op_add_reg_im(s->aflag, R_ECX, -1);
7024 gen_op_jz_ecx(s->aflag, l3);
7025 gen_jcc1(s, (JCC_Z << 1) | (b ^ 1), l1);
7026 break;
7027 case 2: /* loop */
7028 gen_op_add_reg_im(s->aflag, R_ECX, -1);
7029 gen_op_jnz_ecx(s->aflag, l1);
7030 break;
7031 default:
7032 case 3: /* jcxz */
7033 gen_op_jz_ecx(s->aflag, l1);
7034 break;
7037 gen_set_label(l3);
7038 gen_jmp_im(next_eip);
7039 tcg_gen_br(l2);
7041 gen_set_label(l1);
7042 gen_jmp_im(tval);
7043 gen_set_label(l2);
7044 gen_eob(s);
7046 break;
7047 case 0x130: /* wrmsr */
7048 case 0x132: /* rdmsr */
7049 if (s->cpl != 0) {
7050 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7051 } else {
7052 gen_update_cc_op(s);
7053 gen_jmp_im(pc_start - s->cs_base);
7054 if (b & 2) {
7055 gen_helper_rdmsr(cpu_env);
7056 } else {
7057 gen_helper_wrmsr(cpu_env);
7060 break;
7061 case 0x131: /* rdtsc */
7062 gen_update_cc_op(s);
7063 gen_jmp_im(pc_start - s->cs_base);
7064 if (use_icount)
7065 gen_io_start();
7066 gen_helper_rdtsc(cpu_env);
7067 if (use_icount) {
7068 gen_io_end();
7069 gen_jmp(s, s->pc - s->cs_base);
7071 break;
7072 case 0x133: /* rdpmc */
7073 gen_update_cc_op(s);
7074 gen_jmp_im(pc_start - s->cs_base);
7075 gen_helper_rdpmc(cpu_env);
7076 break;
7077 case 0x134: /* sysenter */
7078 /* For Intel SYSENTER is valid on 64-bit */
7079 if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
7080 goto illegal_op;
7081 if (!s->pe) {
7082 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7083 } else {
7084 gen_update_cc_op(s);
7085 gen_jmp_im(pc_start - s->cs_base);
7086 gen_helper_sysenter(cpu_env);
7087 gen_eob(s);
7089 break;
7090 case 0x135: /* sysexit */
7091 /* For Intel SYSEXIT is valid on 64-bit */
7092 if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
7093 goto illegal_op;
7094 if (!s->pe) {
7095 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7096 } else {
7097 gen_update_cc_op(s);
7098 gen_jmp_im(pc_start - s->cs_base);
7099 gen_helper_sysexit(cpu_env, tcg_const_i32(dflag - 1));
7100 gen_eob(s);
7102 break;
7103 #ifdef TARGET_X86_64
7104 case 0x105: /* syscall */
7105 /* XXX: is it usable in real mode ? */
7106 gen_update_cc_op(s);
7107 gen_jmp_im(pc_start - s->cs_base);
7108 gen_helper_syscall(cpu_env, tcg_const_i32(s->pc - pc_start));
7109 gen_eob(s);
7110 break;
7111 case 0x107: /* sysret */
7112 if (!s->pe) {
7113 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7114 } else {
7115 gen_update_cc_op(s);
7116 gen_jmp_im(pc_start - s->cs_base);
7117 gen_helper_sysret(cpu_env, tcg_const_i32(dflag - 1));
7118 /* condition codes are modified only in long mode */
7119 if (s->lma) {
7120 set_cc_op(s, CC_OP_EFLAGS);
7122 gen_eob(s);
7124 break;
7125 #endif
7126 case 0x1a2: /* cpuid */
7127 gen_update_cc_op(s);
7128 gen_jmp_im(pc_start - s->cs_base);
7129 gen_helper_cpuid(cpu_env);
7130 break;
7131 case 0xf4: /* hlt */
7132 if (s->cpl != 0) {
7133 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7134 } else {
7135 gen_update_cc_op(s);
7136 gen_jmp_im(pc_start - s->cs_base);
7137 gen_helper_hlt(cpu_env, tcg_const_i32(s->pc - pc_start));
7138 s->is_jmp = DISAS_TB_JUMP;
7140 break;
7141 case 0x100:
7142 modrm = cpu_ldub_code(env, s->pc++);
7143 mod = (modrm >> 6) & 3;
7144 op = (modrm >> 3) & 7;
7145 switch(op) {
7146 case 0: /* sldt */
7147 if (!s->pe || s->vm86)
7148 goto illegal_op;
7149 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ);
7150 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,ldt.selector));
7151 ot = mod == 3 ? dflag : MO_16;
7152 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
7153 break;
7154 case 2: /* lldt */
7155 if (!s->pe || s->vm86)
7156 goto illegal_op;
7157 if (s->cpl != 0) {
7158 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7159 } else {
7160 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE);
7161 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
7162 gen_jmp_im(pc_start - s->cs_base);
7163 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
7164 gen_helper_lldt(cpu_env, cpu_tmp2_i32);
7166 break;
7167 case 1: /* str */
7168 if (!s->pe || s->vm86)
7169 goto illegal_op;
7170 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ);
7171 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,tr.selector));
7172 ot = mod == 3 ? dflag : MO_16;
7173 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
7174 break;
7175 case 3: /* ltr */
7176 if (!s->pe || s->vm86)
7177 goto illegal_op;
7178 if (s->cpl != 0) {
7179 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7180 } else {
7181 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE);
7182 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
7183 gen_jmp_im(pc_start - s->cs_base);
7184 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
7185 gen_helper_ltr(cpu_env, cpu_tmp2_i32);
7187 break;
7188 case 4: /* verr */
7189 case 5: /* verw */
7190 if (!s->pe || s->vm86)
7191 goto illegal_op;
7192 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
7193 gen_update_cc_op(s);
7194 if (op == 4) {
7195 gen_helper_verr(cpu_env, cpu_T[0]);
7196 } else {
7197 gen_helper_verw(cpu_env, cpu_T[0]);
7199 set_cc_op(s, CC_OP_EFLAGS);
7200 break;
7201 default:
7202 goto illegal_op;
7204 break;
7205 case 0x101:
7206 modrm = cpu_ldub_code(env, s->pc++);
7207 mod = (modrm >> 6) & 3;
7208 op = (modrm >> 3) & 7;
7209 rm = modrm & 7;
7210 switch(op) {
7211 case 0: /* sgdt */
7212 if (mod == 3)
7213 goto illegal_op;
7214 gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ);
7215 gen_lea_modrm(env, s, modrm);
7216 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.limit));
7217 gen_op_st_v(s, MO_16, cpu_T[0], cpu_A0);
7218 gen_add_A0_im(s, 2);
7219 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.base));
7220 if (dflag == MO_16) {
7221 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffffff);
7223 gen_op_st_v(s, CODE64(s) + MO_32, cpu_T[0], cpu_A0);
7224 break;
7225 case 1:
7226 if (mod == 3) {
7227 switch (rm) {
7228 case 0: /* monitor */
7229 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
7230 s->cpl != 0)
7231 goto illegal_op;
7232 gen_update_cc_op(s);
7233 gen_jmp_im(pc_start - s->cs_base);
7234 tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EAX]);
7235 gen_extu(s->aflag, cpu_A0);
7236 gen_add_A0_ds_seg(s);
7237 gen_helper_monitor(cpu_env, cpu_A0);
7238 break;
7239 case 1: /* mwait */
7240 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
7241 s->cpl != 0)
7242 goto illegal_op;
7243 gen_update_cc_op(s);
7244 gen_jmp_im(pc_start - s->cs_base);
7245 gen_helper_mwait(cpu_env, tcg_const_i32(s->pc - pc_start));
7246 gen_eob(s);
7247 break;
7248 case 2: /* clac */
7249 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) ||
7250 s->cpl != 0) {
7251 goto illegal_op;
7253 gen_helper_clac(cpu_env);
7254 gen_jmp_im(s->pc - s->cs_base);
7255 gen_eob(s);
7256 break;
7257 case 3: /* stac */
7258 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) ||
7259 s->cpl != 0) {
7260 goto illegal_op;
7262 gen_helper_stac(cpu_env);
7263 gen_jmp_im(s->pc - s->cs_base);
7264 gen_eob(s);
7265 break;
7266 default:
7267 goto illegal_op;
7269 } else { /* sidt */
7270 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ);
7271 gen_lea_modrm(env, s, modrm);
7272 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.limit));
7273 gen_op_st_v(s, MO_16, cpu_T[0], cpu_A0);
7274 gen_add_A0_im(s, 2);
7275 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.base));
7276 if (dflag == MO_16) {
7277 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffffff);
7279 gen_op_st_v(s, CODE64(s) + MO_32, cpu_T[0], cpu_A0);
7281 break;
7282 case 2: /* lgdt */
7283 case 3: /* lidt */
7284 if (mod == 3) {
7285 gen_update_cc_op(s);
7286 gen_jmp_im(pc_start - s->cs_base);
7287 switch(rm) {
7288 case 0: /* VMRUN */
7289 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7290 goto illegal_op;
7291 if (s->cpl != 0) {
7292 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7293 break;
7294 } else {
7295 gen_helper_vmrun(cpu_env, tcg_const_i32(s->aflag - 1),
7296 tcg_const_i32(s->pc - pc_start));
7297 tcg_gen_exit_tb(0);
7298 s->is_jmp = DISAS_TB_JUMP;
7300 break;
7301 case 1: /* VMMCALL */
7302 if (!(s->flags & HF_SVME_MASK))
7303 goto illegal_op;
7304 gen_helper_vmmcall(cpu_env);
7305 break;
7306 case 2: /* VMLOAD */
7307 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7308 goto illegal_op;
7309 if (s->cpl != 0) {
7310 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7311 break;
7312 } else {
7313 gen_helper_vmload(cpu_env, tcg_const_i32(s->aflag - 1));
7315 break;
7316 case 3: /* VMSAVE */
7317 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7318 goto illegal_op;
7319 if (s->cpl != 0) {
7320 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7321 break;
7322 } else {
7323 gen_helper_vmsave(cpu_env, tcg_const_i32(s->aflag - 1));
7325 break;
7326 case 4: /* STGI */
7327 if ((!(s->flags & HF_SVME_MASK) &&
7328 !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
7329 !s->pe)
7330 goto illegal_op;
7331 if (s->cpl != 0) {
7332 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7333 break;
7334 } else {
7335 gen_helper_stgi(cpu_env);
7337 break;
7338 case 5: /* CLGI */
7339 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7340 goto illegal_op;
7341 if (s->cpl != 0) {
7342 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7343 break;
7344 } else {
7345 gen_helper_clgi(cpu_env);
7347 break;
7348 case 6: /* SKINIT */
7349 if ((!(s->flags & HF_SVME_MASK) &&
7350 !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
7351 !s->pe)
7352 goto illegal_op;
7353 gen_helper_skinit(cpu_env);
7354 break;
7355 case 7: /* INVLPGA */
7356 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7357 goto illegal_op;
7358 if (s->cpl != 0) {
7359 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7360 break;
7361 } else {
7362 gen_helper_invlpga(cpu_env,
7363 tcg_const_i32(s->aflag - 1));
7365 break;
7366 default:
7367 goto illegal_op;
7369 } else if (s->cpl != 0) {
7370 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7371 } else {
7372 gen_svm_check_intercept(s, pc_start,
7373 op==2 ? SVM_EXIT_GDTR_WRITE : SVM_EXIT_IDTR_WRITE);
7374 gen_lea_modrm(env, s, modrm);
7375 gen_op_ld_v(s, MO_16, cpu_T[1], cpu_A0);
7376 gen_add_A0_im(s, 2);
7377 gen_op_ld_v(s, CODE64(s) + MO_32, cpu_T[0], cpu_A0);
7378 if (dflag == MO_16) {
7379 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffffff);
7381 if (op == 2) {
7382 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,gdt.base));
7383 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,gdt.limit));
7384 } else {
7385 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,idt.base));
7386 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,idt.limit));
7389 break;
7390 case 4: /* smsw */
7391 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0);
7392 #if defined TARGET_X86_64 && defined HOST_WORDS_BIGENDIAN
7393 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]) + 4);
7394 #else
7395 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]));
7396 #endif
7397 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 1);
7398 break;
7399 case 6: /* lmsw */
7400 if (s->cpl != 0) {
7401 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7402 } else {
7403 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
7404 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
7405 gen_helper_lmsw(cpu_env, cpu_T[0]);
7406 gen_jmp_im(s->pc - s->cs_base);
7407 gen_eob(s);
7409 break;
7410 case 7:
7411 if (mod != 3) { /* invlpg */
7412 if (s->cpl != 0) {
7413 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7414 } else {
7415 gen_update_cc_op(s);
7416 gen_jmp_im(pc_start - s->cs_base);
7417 gen_lea_modrm(env, s, modrm);
7418 gen_helper_invlpg(cpu_env, cpu_A0);
7419 gen_jmp_im(s->pc - s->cs_base);
7420 gen_eob(s);
7422 } else {
7423 switch (rm) {
7424 case 0: /* swapgs */
7425 #ifdef TARGET_X86_64
7426 if (CODE64(s)) {
7427 if (s->cpl != 0) {
7428 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7429 } else {
7430 tcg_gen_ld_tl(cpu_T[0], cpu_env,
7431 offsetof(CPUX86State,segs[R_GS].base));
7432 tcg_gen_ld_tl(cpu_T[1], cpu_env,
7433 offsetof(CPUX86State,kernelgsbase));
7434 tcg_gen_st_tl(cpu_T[1], cpu_env,
7435 offsetof(CPUX86State,segs[R_GS].base));
7436 tcg_gen_st_tl(cpu_T[0], cpu_env,
7437 offsetof(CPUX86State,kernelgsbase));
7439 } else
7440 #endif
7442 goto illegal_op;
7444 break;
7445 case 1: /* rdtscp */
7446 if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP))
7447 goto illegal_op;
7448 gen_update_cc_op(s);
7449 gen_jmp_im(pc_start - s->cs_base);
7450 if (use_icount)
7451 gen_io_start();
7452 gen_helper_rdtscp(cpu_env);
7453 if (use_icount) {
7454 gen_io_end();
7455 gen_jmp(s, s->pc - s->cs_base);
7457 break;
7458 default:
7459 goto illegal_op;
7462 break;
7463 default:
7464 goto illegal_op;
7466 break;
7467 case 0x108: /* invd */
7468 case 0x109: /* wbinvd */
7469 if (s->cpl != 0) {
7470 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7471 } else {
7472 gen_svm_check_intercept(s, pc_start, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD);
7473 /* nothing to do */
7475 break;
7476 case 0x63: /* arpl or movslS (x86_64) */
7477 #ifdef TARGET_X86_64
7478 if (CODE64(s)) {
7479 int d_ot;
7480 /* d_ot is the size of destination */
7481 d_ot = dflag;
7483 modrm = cpu_ldub_code(env, s->pc++);
7484 reg = ((modrm >> 3) & 7) | rex_r;
7485 mod = (modrm >> 6) & 3;
7486 rm = (modrm & 7) | REX_B(s);
7488 if (mod == 3) {
7489 gen_op_mov_v_reg(MO_32, cpu_T[0], rm);
7490 /* sign extend */
7491 if (d_ot == MO_64) {
7492 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
7494 gen_op_mov_reg_v(d_ot, reg, cpu_T[0]);
7495 } else {
7496 gen_lea_modrm(env, s, modrm);
7497 gen_op_ld_v(s, MO_32 | MO_SIGN, cpu_T[0], cpu_A0);
7498 gen_op_mov_reg_v(d_ot, reg, cpu_T[0]);
7500 } else
7501 #endif
7503 int label1;
7504 TCGv t0, t1, t2, a0;
7506 if (!s->pe || s->vm86)
7507 goto illegal_op;
7508 t0 = tcg_temp_local_new();
7509 t1 = tcg_temp_local_new();
7510 t2 = tcg_temp_local_new();
7511 ot = MO_16;
7512 modrm = cpu_ldub_code(env, s->pc++);
7513 reg = (modrm >> 3) & 7;
7514 mod = (modrm >> 6) & 3;
7515 rm = modrm & 7;
7516 if (mod != 3) {
7517 gen_lea_modrm(env, s, modrm);
7518 gen_op_ld_v(s, ot, t0, cpu_A0);
7519 a0 = tcg_temp_local_new();
7520 tcg_gen_mov_tl(a0, cpu_A0);
7521 } else {
7522 gen_op_mov_v_reg(ot, t0, rm);
7523 TCGV_UNUSED(a0);
7525 gen_op_mov_v_reg(ot, t1, reg);
7526 tcg_gen_andi_tl(cpu_tmp0, t0, 3);
7527 tcg_gen_andi_tl(t1, t1, 3);
7528 tcg_gen_movi_tl(t2, 0);
7529 label1 = gen_new_label();
7530 tcg_gen_brcond_tl(TCG_COND_GE, cpu_tmp0, t1, label1);
7531 tcg_gen_andi_tl(t0, t0, ~3);
7532 tcg_gen_or_tl(t0, t0, t1);
7533 tcg_gen_movi_tl(t2, CC_Z);
7534 gen_set_label(label1);
7535 if (mod != 3) {
7536 gen_op_st_v(s, ot, t0, a0);
7537 tcg_temp_free(a0);
7538 } else {
7539 gen_op_mov_reg_v(ot, rm, t0);
7541 gen_compute_eflags(s);
7542 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z);
7543 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2);
7544 tcg_temp_free(t0);
7545 tcg_temp_free(t1);
7546 tcg_temp_free(t2);
7548 break;
7549 case 0x102: /* lar */
7550 case 0x103: /* lsl */
7552 int label1;
7553 TCGv t0;
7554 if (!s->pe || s->vm86)
7555 goto illegal_op;
7556 ot = dflag != MO_16 ? MO_32 : MO_16;
7557 modrm = cpu_ldub_code(env, s->pc++);
7558 reg = ((modrm >> 3) & 7) | rex_r;
7559 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
7560 t0 = tcg_temp_local_new();
7561 gen_update_cc_op(s);
7562 if (b == 0x102) {
7563 gen_helper_lar(t0, cpu_env, cpu_T[0]);
7564 } else {
7565 gen_helper_lsl(t0, cpu_env, cpu_T[0]);
7567 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_src, CC_Z);
7568 label1 = gen_new_label();
7569 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
7570 gen_op_mov_reg_v(ot, reg, t0);
7571 gen_set_label(label1);
7572 set_cc_op(s, CC_OP_EFLAGS);
7573 tcg_temp_free(t0);
7575 break;
7576 case 0x118:
7577 modrm = cpu_ldub_code(env, s->pc++);
7578 mod = (modrm >> 6) & 3;
7579 op = (modrm >> 3) & 7;
7580 switch(op) {
7581 case 0: /* prefetchnta */
7582 case 1: /* prefetchnt0 */
7583 case 2: /* prefetchnt0 */
7584 case 3: /* prefetchnt0 */
7585 if (mod == 3)
7586 goto illegal_op;
7587 gen_lea_modrm(env, s, modrm);
7588 /* nothing more to do */
7589 break;
7590 default: /* nop (multi byte) */
7591 gen_nop_modrm(env, s, modrm);
7592 break;
7594 break;
7595 case 0x119 ... 0x11f: /* nop (multi byte) */
7596 modrm = cpu_ldub_code(env, s->pc++);
7597 gen_nop_modrm(env, s, modrm);
7598 break;
7599 case 0x120: /* mov reg, crN */
7600 case 0x122: /* mov crN, reg */
7601 if (s->cpl != 0) {
7602 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7603 } else {
7604 modrm = cpu_ldub_code(env, s->pc++);
7605 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7606 * AMD documentation (24594.pdf) and testing of
7607 * intel 386 and 486 processors all show that the mod bits
7608 * are assumed to be 1's, regardless of actual values.
7610 rm = (modrm & 7) | REX_B(s);
7611 reg = ((modrm >> 3) & 7) | rex_r;
7612 if (CODE64(s))
7613 ot = MO_64;
7614 else
7615 ot = MO_32;
7616 if ((prefixes & PREFIX_LOCK) && (reg == 0) &&
7617 (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) {
7618 reg = 8;
7620 switch(reg) {
7621 case 0:
7622 case 2:
7623 case 3:
7624 case 4:
7625 case 8:
7626 gen_update_cc_op(s);
7627 gen_jmp_im(pc_start - s->cs_base);
7628 if (b & 2) {
7629 gen_op_mov_v_reg(ot, cpu_T[0], rm);
7630 gen_helper_write_crN(cpu_env, tcg_const_i32(reg),
7631 cpu_T[0]);
7632 gen_jmp_im(s->pc - s->cs_base);
7633 gen_eob(s);
7634 } else {
7635 gen_helper_read_crN(cpu_T[0], cpu_env, tcg_const_i32(reg));
7636 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
7638 break;
7639 default:
7640 goto illegal_op;
7643 break;
7644 case 0x121: /* mov reg, drN */
7645 case 0x123: /* mov drN, reg */
7646 if (s->cpl != 0) {
7647 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7648 } else {
7649 modrm = cpu_ldub_code(env, s->pc++);
7650 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7651 * AMD documentation (24594.pdf) and testing of
7652 * intel 386 and 486 processors all show that the mod bits
7653 * are assumed to be 1's, regardless of actual values.
7655 rm = (modrm & 7) | REX_B(s);
7656 reg = ((modrm >> 3) & 7) | rex_r;
7657 if (CODE64(s))
7658 ot = MO_64;
7659 else
7660 ot = MO_32;
7661 /* XXX: do it dynamically with CR4.DE bit */
7662 if (reg == 4 || reg == 5 || reg >= 8)
7663 goto illegal_op;
7664 if (b & 2) {
7665 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg);
7666 gen_op_mov_v_reg(ot, cpu_T[0], rm);
7667 gen_helper_movl_drN_T0(cpu_env, tcg_const_i32(reg), cpu_T[0]);
7668 gen_jmp_im(s->pc - s->cs_base);
7669 gen_eob(s);
7670 } else {
7671 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg);
7672 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,dr[reg]));
7673 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
7676 break;
7677 case 0x106: /* clts */
7678 if (s->cpl != 0) {
7679 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7680 } else {
7681 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
7682 gen_helper_clts(cpu_env);
7683 /* abort block because static cpu state changed */
7684 gen_jmp_im(s->pc - s->cs_base);
7685 gen_eob(s);
7687 break;
7688 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
7689 case 0x1c3: /* MOVNTI reg, mem */
7690 if (!(s->cpuid_features & CPUID_SSE2))
7691 goto illegal_op;
7692 ot = mo_64_32(dflag);
7693 modrm = cpu_ldub_code(env, s->pc++);
7694 mod = (modrm >> 6) & 3;
7695 if (mod == 3)
7696 goto illegal_op;
7697 reg = ((modrm >> 3) & 7) | rex_r;
7698 /* generate a generic store */
7699 gen_ldst_modrm(env, s, modrm, ot, reg, 1);
7700 break;
7701 case 0x1ae:
7702 modrm = cpu_ldub_code(env, s->pc++);
7703 mod = (modrm >> 6) & 3;
7704 op = (modrm >> 3) & 7;
7705 switch(op) {
7706 case 0: /* fxsave */
7707 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
7708 (s->prefix & PREFIX_LOCK))
7709 goto illegal_op;
7710 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
7711 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7712 break;
7714 gen_lea_modrm(env, s, modrm);
7715 gen_update_cc_op(s);
7716 gen_jmp_im(pc_start - s->cs_base);
7717 gen_helper_fxsave(cpu_env, cpu_A0, tcg_const_i32(dflag == MO_64));
7718 break;
7719 case 1: /* fxrstor */
7720 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
7721 (s->prefix & PREFIX_LOCK))
7722 goto illegal_op;
7723 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
7724 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7725 break;
7727 gen_lea_modrm(env, s, modrm);
7728 gen_update_cc_op(s);
7729 gen_jmp_im(pc_start - s->cs_base);
7730 gen_helper_fxrstor(cpu_env, cpu_A0, tcg_const_i32(dflag == MO_64));
7731 break;
7732 case 2: /* ldmxcsr */
7733 case 3: /* stmxcsr */
7734 if (s->flags & HF_TS_MASK) {
7735 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7736 break;
7738 if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK) ||
7739 mod == 3)
7740 goto illegal_op;
7741 gen_lea_modrm(env, s, modrm);
7742 if (op == 2) {
7743 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
7744 s->mem_index, MO_LEUL);
7745 gen_helper_ldmxcsr(cpu_env, cpu_tmp2_i32);
7746 } else {
7747 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, mxcsr));
7748 gen_op_st_v(s, MO_32, cpu_T[0], cpu_A0);
7750 break;
7751 case 5: /* lfence */
7752 case 6: /* mfence */
7753 if ((modrm & 0xc7) != 0xc0 || !(s->cpuid_features & CPUID_SSE2))
7754 goto illegal_op;
7755 break;
7756 case 7: /* sfence / clflush */
7757 if ((modrm & 0xc7) == 0xc0) {
7758 /* sfence */
7759 /* XXX: also check for cpuid_ext2_features & CPUID_EXT2_EMMX */
7760 if (!(s->cpuid_features & CPUID_SSE))
7761 goto illegal_op;
7762 } else {
7763 /* clflush */
7764 if (!(s->cpuid_features & CPUID_CLFLUSH))
7765 goto illegal_op;
7766 gen_lea_modrm(env, s, modrm);
7768 break;
7769 default:
7770 goto illegal_op;
7772 break;
7773 case 0x10d: /* 3DNow! prefetch(w) */
7774 modrm = cpu_ldub_code(env, s->pc++);
7775 mod = (modrm >> 6) & 3;
7776 if (mod == 3)
7777 goto illegal_op;
7778 gen_lea_modrm(env, s, modrm);
7779 /* ignore for now */
7780 break;
7781 case 0x1aa: /* rsm */
7782 gen_svm_check_intercept(s, pc_start, SVM_EXIT_RSM);
7783 if (!(s->flags & HF_SMM_MASK))
7784 goto illegal_op;
7785 gen_update_cc_op(s);
7786 gen_jmp_im(s->pc - s->cs_base);
7787 gen_helper_rsm(cpu_env);
7788 gen_eob(s);
7789 break;
7790 case 0x1b8: /* SSE4.2 popcnt */
7791 if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) !=
7792 PREFIX_REPZ)
7793 goto illegal_op;
7794 if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
7795 goto illegal_op;
7797 modrm = cpu_ldub_code(env, s->pc++);
7798 reg = ((modrm >> 3) & 7) | rex_r;
7800 if (s->prefix & PREFIX_DATA) {
7801 ot = MO_16;
7802 } else {
7803 ot = mo_64_32(dflag);
7806 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
7807 gen_helper_popcnt(cpu_T[0], cpu_env, cpu_T[0], tcg_const_i32(ot));
7808 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
7810 set_cc_op(s, CC_OP_EFLAGS);
7811 break;
7812 case 0x10e ... 0x10f:
7813 /* 3DNow! instructions, ignore prefixes */
7814 s->prefix &= ~(PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA);
7815 case 0x110 ... 0x117:
7816 case 0x128 ... 0x12f:
7817 case 0x138 ... 0x13a:
7818 case 0x150 ... 0x179:
7819 case 0x17c ... 0x17f:
7820 case 0x1c2:
7821 case 0x1c4 ... 0x1c6:
7822 case 0x1d0 ... 0x1fe:
7823 gen_sse(env, s, b, pc_start, rex_r);
7824 break;
7825 default:
7826 goto illegal_op;
7828 /* lock generation */
7829 if (s->prefix & PREFIX_LOCK)
7830 gen_helper_unlock();
7831 return s->pc;
7832 illegal_op:
7833 if (s->prefix & PREFIX_LOCK)
7834 gen_helper_unlock();
7835 /* XXX: ensure that no lock was generated */
7836 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
7837 return s->pc;
7840 void optimize_flags_init(void)
7842 static const char reg_names[CPU_NB_REGS][4] = {
7843 #ifdef TARGET_X86_64
7844 [R_EAX] = "rax",
7845 [R_EBX] = "rbx",
7846 [R_ECX] = "rcx",
7847 [R_EDX] = "rdx",
7848 [R_ESI] = "rsi",
7849 [R_EDI] = "rdi",
7850 [R_EBP] = "rbp",
7851 [R_ESP] = "rsp",
7852 [8] = "r8",
7853 [9] = "r9",
7854 [10] = "r10",
7855 [11] = "r11",
7856 [12] = "r12",
7857 [13] = "r13",
7858 [14] = "r14",
7859 [15] = "r15",
7860 #else
7861 [R_EAX] = "eax",
7862 [R_EBX] = "ebx",
7863 [R_ECX] = "ecx",
7864 [R_EDX] = "edx",
7865 [R_ESI] = "esi",
7866 [R_EDI] = "edi",
7867 [R_EBP] = "ebp",
7868 [R_ESP] = "esp",
7869 #endif
7871 int i;
7873 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
7874 cpu_cc_op = tcg_global_mem_new_i32(TCG_AREG0,
7875 offsetof(CPUX86State, cc_op), "cc_op");
7876 cpu_cc_dst = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_dst),
7877 "cc_dst");
7878 cpu_cc_src = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_src),
7879 "cc_src");
7880 cpu_cc_src2 = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_src2),
7881 "cc_src2");
7883 for (i = 0; i < CPU_NB_REGS; ++i) {
7884 cpu_regs[i] = tcg_global_mem_new(TCG_AREG0,
7885 offsetof(CPUX86State, regs[i]),
7886 reg_names[i]);
7890 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
7891 basic block 'tb'. If search_pc is TRUE, also generate PC
7892 information for each intermediate instruction. */
7893 static inline void gen_intermediate_code_internal(X86CPU *cpu,
7894 TranslationBlock *tb,
7895 bool search_pc)
7897 CPUState *cs = CPU(cpu);
7898 CPUX86State *env = &cpu->env;
7899 DisasContext dc1, *dc = &dc1;
7900 target_ulong pc_ptr;
7901 uint16_t *gen_opc_end;
7902 CPUBreakpoint *bp;
7903 int j, lj;
7904 uint64_t flags;
7905 target_ulong pc_start;
7906 target_ulong cs_base;
7907 int num_insns;
7908 int max_insns;
7910 /* generate intermediate code */
7911 pc_start = tb->pc;
7912 cs_base = tb->cs_base;
7913 flags = tb->flags;
7915 dc->pe = (flags >> HF_PE_SHIFT) & 1;
7916 dc->code32 = (flags >> HF_CS32_SHIFT) & 1;
7917 dc->ss32 = (flags >> HF_SS32_SHIFT) & 1;
7918 dc->addseg = (flags >> HF_ADDSEG_SHIFT) & 1;
7919 dc->f_st = 0;
7920 dc->vm86 = (flags >> VM_SHIFT) & 1;
7921 dc->cpl = (flags >> HF_CPL_SHIFT) & 3;
7922 dc->iopl = (flags >> IOPL_SHIFT) & 3;
7923 dc->tf = (flags >> TF_SHIFT) & 1;
7924 dc->singlestep_enabled = cs->singlestep_enabled;
7925 dc->cc_op = CC_OP_DYNAMIC;
7926 dc->cc_op_dirty = false;
7927 dc->cs_base = cs_base;
7928 dc->tb = tb;
7929 dc->popl_esp_hack = 0;
7930 /* select memory access functions */
7931 dc->mem_index = 0;
7932 if (flags & HF_SOFTMMU_MASK) {
7933 dc->mem_index = cpu_mmu_index(env);
7935 dc->cpuid_features = env->features[FEAT_1_EDX];
7936 dc->cpuid_ext_features = env->features[FEAT_1_ECX];
7937 dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX];
7938 dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX];
7939 dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX];
7940 #ifdef TARGET_X86_64
7941 dc->lma = (flags >> HF_LMA_SHIFT) & 1;
7942 dc->code64 = (flags >> HF_CS64_SHIFT) & 1;
7943 #endif
7944 dc->flags = flags;
7945 dc->jmp_opt = !(dc->tf || cs->singlestep_enabled ||
7946 (flags & HF_INHIBIT_IRQ_MASK)
7947 #ifndef CONFIG_SOFTMMU
7948 || (flags & HF_SOFTMMU_MASK)
7949 #endif
7951 #if 0
7952 /* check addseg logic */
7953 if (!dc->addseg && (dc->vm86 || !dc->pe || !dc->code32))
7954 printf("ERROR addseg\n");
7955 #endif
7957 cpu_T[0] = tcg_temp_new();
7958 cpu_T[1] = tcg_temp_new();
7959 cpu_A0 = tcg_temp_new();
7961 cpu_tmp0 = tcg_temp_new();
7962 cpu_tmp1_i64 = tcg_temp_new_i64();
7963 cpu_tmp2_i32 = tcg_temp_new_i32();
7964 cpu_tmp3_i32 = tcg_temp_new_i32();
7965 cpu_tmp4 = tcg_temp_new();
7966 cpu_ptr0 = tcg_temp_new_ptr();
7967 cpu_ptr1 = tcg_temp_new_ptr();
7968 cpu_cc_srcT = tcg_temp_local_new();
7970 gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
7972 dc->is_jmp = DISAS_NEXT;
7973 pc_ptr = pc_start;
7974 lj = -1;
7975 num_insns = 0;
7976 max_insns = tb->cflags & CF_COUNT_MASK;
7977 if (max_insns == 0)
7978 max_insns = CF_COUNT_MASK;
7980 gen_tb_start();
7981 for(;;) {
7982 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
7983 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
7984 if (bp->pc == pc_ptr &&
7985 !((bp->flags & BP_CPU) && (tb->flags & HF_RF_MASK))) {
7986 gen_debug(dc, pc_ptr - dc->cs_base);
7987 break;
7991 if (search_pc) {
7992 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
7993 if (lj < j) {
7994 lj++;
7995 while (lj < j)
7996 tcg_ctx.gen_opc_instr_start[lj++] = 0;
7998 tcg_ctx.gen_opc_pc[lj] = pc_ptr;
7999 gen_opc_cc_op[lj] = dc->cc_op;
8000 tcg_ctx.gen_opc_instr_start[lj] = 1;
8001 tcg_ctx.gen_opc_icount[lj] = num_insns;
8003 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
8004 gen_io_start();
8006 pc_ptr = disas_insn(env, dc, pc_ptr);
8007 num_insns++;
8008 /* stop translation if indicated */
8009 if (dc->is_jmp)
8010 break;
8011 /* if single step mode, we generate only one instruction and
8012 generate an exception */
8013 /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
8014 the flag and abort the translation to give the irqs a
8015 change to be happen */
8016 if (dc->tf || dc->singlestep_enabled ||
8017 (flags & HF_INHIBIT_IRQ_MASK)) {
8018 gen_jmp_im(pc_ptr - dc->cs_base);
8019 gen_eob(dc);
8020 break;
8022 /* if too long translation, stop generation too */
8023 if (tcg_ctx.gen_opc_ptr >= gen_opc_end ||
8024 (pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32) ||
8025 num_insns >= max_insns) {
8026 gen_jmp_im(pc_ptr - dc->cs_base);
8027 gen_eob(dc);
8028 break;
8030 if (singlestep) {
8031 gen_jmp_im(pc_ptr - dc->cs_base);
8032 gen_eob(dc);
8033 break;
8036 if (tb->cflags & CF_LAST_IO)
8037 gen_io_end();
8038 gen_tb_end(tb, num_insns);
8039 *tcg_ctx.gen_opc_ptr = INDEX_op_end;
8040 /* we don't forget to fill the last values */
8041 if (search_pc) {
8042 j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
8043 lj++;
8044 while (lj <= j)
8045 tcg_ctx.gen_opc_instr_start[lj++] = 0;
8048 #ifdef DEBUG_DISAS
8049 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
8050 int disas_flags;
8051 qemu_log("----------------\n");
8052 qemu_log("IN: %s\n", lookup_symbol(pc_start));
8053 #ifdef TARGET_X86_64
8054 if (dc->code64)
8055 disas_flags = 2;
8056 else
8057 #endif
8058 disas_flags = !dc->code32;
8059 log_target_disas(env, pc_start, pc_ptr - pc_start, disas_flags);
8060 qemu_log("\n");
8062 #endif
8064 if (!search_pc) {
8065 tb->size = pc_ptr - pc_start;
8066 tb->icount = num_insns;
8070 void gen_intermediate_code(CPUX86State *env, TranslationBlock *tb)
8072 gen_intermediate_code_internal(x86_env_get_cpu(env), tb, false);
8075 void gen_intermediate_code_pc(CPUX86State *env, TranslationBlock *tb)
8077 gen_intermediate_code_internal(x86_env_get_cpu(env), tb, true);
8080 void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb, int pc_pos)
8082 int cc_op;
8083 #ifdef DEBUG_DISAS
8084 if (qemu_loglevel_mask(CPU_LOG_TB_OP)) {
8085 int i;
8086 qemu_log("RESTORE:\n");
8087 for(i = 0;i <= pc_pos; i++) {
8088 if (tcg_ctx.gen_opc_instr_start[i]) {
8089 qemu_log("0x%04x: " TARGET_FMT_lx "\n", i,
8090 tcg_ctx.gen_opc_pc[i]);
8093 qemu_log("pc_pos=0x%x eip=" TARGET_FMT_lx " cs_base=%x\n",
8094 pc_pos, tcg_ctx.gen_opc_pc[pc_pos] - tb->cs_base,
8095 (uint32_t)tb->cs_base);
8097 #endif
8098 env->eip = tcg_ctx.gen_opc_pc[pc_pos] - tb->cs_base;
8099 cc_op = gen_opc_cc_op[pc_pos];
8100 if (cc_op != CC_OP_DYNAMIC)
8101 env->cc_op = cc_op;