qemu-io: Remove duplicate 'open' error message
[qemu.git] / target-i386 / translate.c
blobd72fa4687cb997c6c76b041416e2742669a9bcf7
1 /*
2 * i386 translation
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdarg.h>
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <string.h>
23 #include <inttypes.h>
25 #include "qemu/host-utils.h"
26 #include "cpu.h"
27 #include "disas/disas.h"
28 #include "tcg-op.h"
29 #include "exec/cpu_ldst.h"
31 #include "exec/helper-proto.h"
32 #include "exec/helper-gen.h"
34 #include "trace-tcg.h"
37 #define PREFIX_REPZ 0x01
38 #define PREFIX_REPNZ 0x02
39 #define PREFIX_LOCK 0x04
40 #define PREFIX_DATA 0x08
41 #define PREFIX_ADR 0x10
42 #define PREFIX_VEX 0x20
44 #ifdef TARGET_X86_64
45 #define CODE64(s) ((s)->code64)
46 #define REX_X(s) ((s)->rex_x)
47 #define REX_B(s) ((s)->rex_b)
48 #else
49 #define CODE64(s) 0
50 #define REX_X(s) 0
51 #define REX_B(s) 0
52 #endif
54 #ifdef TARGET_X86_64
55 # define ctztl ctz64
56 # define clztl clz64
57 #else
58 # define ctztl ctz32
59 # define clztl clz32
60 #endif
62 //#define MACRO_TEST 1
64 /* global register indexes */
65 static TCGv_ptr cpu_env;
66 static TCGv cpu_A0;
67 static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2, cpu_cc_srcT;
68 static TCGv_i32 cpu_cc_op;
69 static TCGv cpu_regs[CPU_NB_REGS];
70 /* local temps */
71 static TCGv cpu_T[2];
72 /* local register indexes (only used inside old micro ops) */
73 static TCGv cpu_tmp0, cpu_tmp4;
74 static TCGv_ptr cpu_ptr0, cpu_ptr1;
75 static TCGv_i32 cpu_tmp2_i32, cpu_tmp3_i32;
76 static TCGv_i64 cpu_tmp1_i64;
78 static uint8_t gen_opc_cc_op[OPC_BUF_SIZE];
80 #include "exec/gen-icount.h"
82 #ifdef TARGET_X86_64
83 static int x86_64_hregs;
84 #endif
86 typedef struct DisasContext {
87 /* current insn context */
88 int override; /* -1 if no override */
89 int prefix;
90 TCGMemOp aflag;
91 TCGMemOp dflag;
92 target_ulong pc; /* pc = eip + cs_base */
93 int is_jmp; /* 1 = means jump (stop translation), 2 means CPU
94 static state change (stop translation) */
95 /* current block context */
96 target_ulong cs_base; /* base of CS segment */
97 int pe; /* protected mode */
98 int code32; /* 32 bit code segment */
99 #ifdef TARGET_X86_64
100 int lma; /* long mode active */
101 int code64; /* 64 bit code segment */
102 int rex_x, rex_b;
103 #endif
104 int vex_l; /* vex vector length */
105 int vex_v; /* vex vvvv register, without 1's compliment. */
106 int ss32; /* 32 bit stack segment */
107 CCOp cc_op; /* current CC operation */
108 bool cc_op_dirty;
109 int addseg; /* non zero if either DS/ES/SS have a non zero base */
110 int f_st; /* currently unused */
111 int vm86; /* vm86 mode */
112 int cpl;
113 int iopl;
114 int tf; /* TF cpu flag */
115 int singlestep_enabled; /* "hardware" single step enabled */
116 int jmp_opt; /* use direct block chaining for direct jumps */
117 int repz_opt; /* optimize jumps within repz instructions */
118 int mem_index; /* select memory access functions */
119 uint64_t flags; /* all execution flags */
120 struct TranslationBlock *tb;
121 int popl_esp_hack; /* for correct popl with esp base handling */
122 int rip_offset; /* only used in x86_64, but left for simplicity */
123 int cpuid_features;
124 int cpuid_ext_features;
125 int cpuid_ext2_features;
126 int cpuid_ext3_features;
127 int cpuid_7_0_ebx_features;
128 } DisasContext;
130 static void gen_eob(DisasContext *s);
131 static void gen_jmp(DisasContext *s, target_ulong eip);
132 static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num);
133 static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d);
135 /* i386 arith/logic operations */
136 enum {
137 OP_ADDL,
138 OP_ORL,
139 OP_ADCL,
140 OP_SBBL,
141 OP_ANDL,
142 OP_SUBL,
143 OP_XORL,
144 OP_CMPL,
147 /* i386 shift ops */
148 enum {
149 OP_ROL,
150 OP_ROR,
151 OP_RCL,
152 OP_RCR,
153 OP_SHL,
154 OP_SHR,
155 OP_SHL1, /* undocumented */
156 OP_SAR = 7,
159 enum {
160 JCC_O,
161 JCC_B,
162 JCC_Z,
163 JCC_BE,
164 JCC_S,
165 JCC_P,
166 JCC_L,
167 JCC_LE,
170 enum {
171 /* I386 int registers */
172 OR_EAX, /* MUST be even numbered */
173 OR_ECX,
174 OR_EDX,
175 OR_EBX,
176 OR_ESP,
177 OR_EBP,
178 OR_ESI,
179 OR_EDI,
181 OR_TMP0 = 16, /* temporary operand register */
182 OR_TMP1,
183 OR_A0, /* temporary register used when doing address evaluation */
186 enum {
187 USES_CC_DST = 1,
188 USES_CC_SRC = 2,
189 USES_CC_SRC2 = 4,
190 USES_CC_SRCT = 8,
193 /* Bit set if the global variable is live after setting CC_OP to X. */
194 static const uint8_t cc_op_live[CC_OP_NB] = {
195 [CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
196 [CC_OP_EFLAGS] = USES_CC_SRC,
197 [CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC,
198 [CC_OP_ADDB ... CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC,
199 [CC_OP_ADCB ... CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
200 [CC_OP_SUBB ... CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT,
201 [CC_OP_SBBB ... CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
202 [CC_OP_LOGICB ... CC_OP_LOGICQ] = USES_CC_DST,
203 [CC_OP_INCB ... CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC,
204 [CC_OP_DECB ... CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC,
205 [CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC,
206 [CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC,
207 [CC_OP_BMILGB ... CC_OP_BMILGQ] = USES_CC_DST | USES_CC_SRC,
208 [CC_OP_ADCX] = USES_CC_DST | USES_CC_SRC,
209 [CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2,
210 [CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
211 [CC_OP_CLR] = 0,
214 static void set_cc_op(DisasContext *s, CCOp op)
216 int dead;
218 if (s->cc_op == op) {
219 return;
222 /* Discard CC computation that will no longer be used. */
223 dead = cc_op_live[s->cc_op] & ~cc_op_live[op];
224 if (dead & USES_CC_DST) {
225 tcg_gen_discard_tl(cpu_cc_dst);
227 if (dead & USES_CC_SRC) {
228 tcg_gen_discard_tl(cpu_cc_src);
230 if (dead & USES_CC_SRC2) {
231 tcg_gen_discard_tl(cpu_cc_src2);
233 if (dead & USES_CC_SRCT) {
234 tcg_gen_discard_tl(cpu_cc_srcT);
237 if (op == CC_OP_DYNAMIC) {
238 /* The DYNAMIC setting is translator only, and should never be
239 stored. Thus we always consider it clean. */
240 s->cc_op_dirty = false;
241 } else {
242 /* Discard any computed CC_OP value (see shifts). */
243 if (s->cc_op == CC_OP_DYNAMIC) {
244 tcg_gen_discard_i32(cpu_cc_op);
246 s->cc_op_dirty = true;
248 s->cc_op = op;
251 static void gen_update_cc_op(DisasContext *s)
253 if (s->cc_op_dirty) {
254 tcg_gen_movi_i32(cpu_cc_op, s->cc_op);
255 s->cc_op_dirty = false;
259 #ifdef TARGET_X86_64
261 #define NB_OP_SIZES 4
263 #else /* !TARGET_X86_64 */
265 #define NB_OP_SIZES 3
267 #endif /* !TARGET_X86_64 */
269 #if defined(HOST_WORDS_BIGENDIAN)
270 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
271 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
272 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
273 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
274 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
275 #else
276 #define REG_B_OFFSET 0
277 #define REG_H_OFFSET 1
278 #define REG_W_OFFSET 0
279 #define REG_L_OFFSET 0
280 #define REG_LH_OFFSET 4
281 #endif
283 /* In instruction encodings for byte register accesses the
284 * register number usually indicates "low 8 bits of register N";
285 * however there are some special cases where N 4..7 indicates
286 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
287 * true for this special case, false otherwise.
289 static inline bool byte_reg_is_xH(int reg)
291 if (reg < 4) {
292 return false;
294 #ifdef TARGET_X86_64
295 if (reg >= 8 || x86_64_hregs) {
296 return false;
298 #endif
299 return true;
302 /* Select the size of a push/pop operation. */
303 static inline TCGMemOp mo_pushpop(DisasContext *s, TCGMemOp ot)
305 if (CODE64(s)) {
306 return ot == MO_16 ? MO_16 : MO_64;
307 } else {
308 return ot;
312 /* Select only size 64 else 32. Used for SSE operand sizes. */
313 static inline TCGMemOp mo_64_32(TCGMemOp ot)
315 #ifdef TARGET_X86_64
316 return ot == MO_64 ? MO_64 : MO_32;
317 #else
318 return MO_32;
319 #endif
322 /* Select size 8 if lsb of B is clear, else OT. Used for decoding
323 byte vs word opcodes. */
324 static inline TCGMemOp mo_b_d(int b, TCGMemOp ot)
326 return b & 1 ? ot : MO_8;
329 /* Select size 8 if lsb of B is clear, else OT capped at 32.
330 Used for decoding operand size of port opcodes. */
331 static inline TCGMemOp mo_b_d32(int b, TCGMemOp ot)
333 return b & 1 ? (ot == MO_16 ? MO_16 : MO_32) : MO_8;
336 static void gen_op_mov_reg_v(TCGMemOp ot, int reg, TCGv t0)
338 switch(ot) {
339 case MO_8:
340 if (!byte_reg_is_xH(reg)) {
341 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 8);
342 } else {
343 tcg_gen_deposit_tl(cpu_regs[reg - 4], cpu_regs[reg - 4], t0, 8, 8);
345 break;
346 case MO_16:
347 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 16);
348 break;
349 case MO_32:
350 /* For x86_64, this sets the higher half of register to zero.
351 For i386, this is equivalent to a mov. */
352 tcg_gen_ext32u_tl(cpu_regs[reg], t0);
353 break;
354 #ifdef TARGET_X86_64
355 case MO_64:
356 tcg_gen_mov_tl(cpu_regs[reg], t0);
357 break;
358 #endif
359 default:
360 tcg_abort();
364 static inline void gen_op_mov_v_reg(TCGMemOp ot, TCGv t0, int reg)
366 if (ot == MO_8 && byte_reg_is_xH(reg)) {
367 tcg_gen_shri_tl(t0, cpu_regs[reg - 4], 8);
368 tcg_gen_ext8u_tl(t0, t0);
369 } else {
370 tcg_gen_mov_tl(t0, cpu_regs[reg]);
374 static inline void gen_op_movl_A0_reg(int reg)
376 tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
379 static inline void gen_op_addl_A0_im(int32_t val)
381 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
382 #ifdef TARGET_X86_64
383 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
384 #endif
387 #ifdef TARGET_X86_64
388 static inline void gen_op_addq_A0_im(int64_t val)
390 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
392 #endif
394 static void gen_add_A0_im(DisasContext *s, int val)
396 #ifdef TARGET_X86_64
397 if (CODE64(s))
398 gen_op_addq_A0_im(val);
399 else
400 #endif
401 gen_op_addl_A0_im(val);
404 static inline void gen_op_jmp_v(TCGv dest)
406 tcg_gen_st_tl(dest, cpu_env, offsetof(CPUX86State, eip));
409 static inline void gen_op_add_reg_im(TCGMemOp size, int reg, int32_t val)
411 tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val);
412 gen_op_mov_reg_v(size, reg, cpu_tmp0);
415 static inline void gen_op_add_reg_T0(TCGMemOp size, int reg)
417 tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T[0]);
418 gen_op_mov_reg_v(size, reg, cpu_tmp0);
421 static inline void gen_op_addl_A0_reg_sN(int shift, int reg)
423 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
424 if (shift != 0)
425 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
426 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
427 /* For x86_64, this sets the higher half of register to zero.
428 For i386, this is equivalent to a nop. */
429 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
432 static inline void gen_op_movl_A0_seg(int reg)
434 tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base) + REG_L_OFFSET);
437 static inline void gen_op_addl_A0_seg(DisasContext *s, int reg)
439 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base));
440 #ifdef TARGET_X86_64
441 if (CODE64(s)) {
442 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
443 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
444 } else {
445 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
446 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
448 #else
449 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
450 #endif
453 #ifdef TARGET_X86_64
454 static inline void gen_op_movq_A0_seg(int reg)
456 tcg_gen_ld_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base));
459 static inline void gen_op_addq_A0_seg(int reg)
461 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base));
462 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
465 static inline void gen_op_movq_A0_reg(int reg)
467 tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
470 static inline void gen_op_addq_A0_reg_sN(int shift, int reg)
472 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
473 if (shift != 0)
474 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
475 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
477 #endif
479 static inline void gen_op_ld_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
481 tcg_gen_qemu_ld_tl(t0, a0, s->mem_index, idx | MO_LE);
484 static inline void gen_op_st_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
486 tcg_gen_qemu_st_tl(t0, a0, s->mem_index, idx | MO_LE);
489 static inline void gen_op_st_rm_T0_A0(DisasContext *s, int idx, int d)
491 if (d == OR_TMP0) {
492 gen_op_st_v(s, idx, cpu_T[0], cpu_A0);
493 } else {
494 gen_op_mov_reg_v(idx, d, cpu_T[0]);
498 static inline void gen_jmp_im(target_ulong pc)
500 tcg_gen_movi_tl(cpu_tmp0, pc);
501 gen_op_jmp_v(cpu_tmp0);
504 static inline void gen_string_movl_A0_ESI(DisasContext *s)
506 int override;
508 override = s->override;
509 switch (s->aflag) {
510 #ifdef TARGET_X86_64
511 case MO_64:
512 if (override >= 0) {
513 gen_op_movq_A0_seg(override);
514 gen_op_addq_A0_reg_sN(0, R_ESI);
515 } else {
516 gen_op_movq_A0_reg(R_ESI);
518 break;
519 #endif
520 case MO_32:
521 /* 32 bit address */
522 if (s->addseg && override < 0)
523 override = R_DS;
524 if (override >= 0) {
525 gen_op_movl_A0_seg(override);
526 gen_op_addl_A0_reg_sN(0, R_ESI);
527 } else {
528 gen_op_movl_A0_reg(R_ESI);
530 break;
531 case MO_16:
532 /* 16 address, always override */
533 if (override < 0)
534 override = R_DS;
535 tcg_gen_ext16u_tl(cpu_A0, cpu_regs[R_ESI]);
536 gen_op_addl_A0_seg(s, override);
537 break;
538 default:
539 tcg_abort();
543 static inline void gen_string_movl_A0_EDI(DisasContext *s)
545 switch (s->aflag) {
546 #ifdef TARGET_X86_64
547 case MO_64:
548 gen_op_movq_A0_reg(R_EDI);
549 break;
550 #endif
551 case MO_32:
552 if (s->addseg) {
553 gen_op_movl_A0_seg(R_ES);
554 gen_op_addl_A0_reg_sN(0, R_EDI);
555 } else {
556 gen_op_movl_A0_reg(R_EDI);
558 break;
559 case MO_16:
560 tcg_gen_ext16u_tl(cpu_A0, cpu_regs[R_EDI]);
561 gen_op_addl_A0_seg(s, R_ES);
562 break;
563 default:
564 tcg_abort();
568 static inline void gen_op_movl_T0_Dshift(TCGMemOp ot)
570 tcg_gen_ld32s_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, df));
571 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], ot);
574 static TCGv gen_ext_tl(TCGv dst, TCGv src, TCGMemOp size, bool sign)
576 switch (size) {
577 case MO_8:
578 if (sign) {
579 tcg_gen_ext8s_tl(dst, src);
580 } else {
581 tcg_gen_ext8u_tl(dst, src);
583 return dst;
584 case MO_16:
585 if (sign) {
586 tcg_gen_ext16s_tl(dst, src);
587 } else {
588 tcg_gen_ext16u_tl(dst, src);
590 return dst;
591 #ifdef TARGET_X86_64
592 case MO_32:
593 if (sign) {
594 tcg_gen_ext32s_tl(dst, src);
595 } else {
596 tcg_gen_ext32u_tl(dst, src);
598 return dst;
599 #endif
600 default:
601 return src;
605 static void gen_extu(TCGMemOp ot, TCGv reg)
607 gen_ext_tl(reg, reg, ot, false);
610 static void gen_exts(TCGMemOp ot, TCGv reg)
612 gen_ext_tl(reg, reg, ot, true);
615 static inline void gen_op_jnz_ecx(TCGMemOp size, TCGLabel *label1)
617 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
618 gen_extu(size, cpu_tmp0);
619 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, label1);
622 static inline void gen_op_jz_ecx(TCGMemOp size, TCGLabel *label1)
624 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
625 gen_extu(size, cpu_tmp0);
626 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
629 static void gen_helper_in_func(TCGMemOp ot, TCGv v, TCGv_i32 n)
631 switch (ot) {
632 case MO_8:
633 gen_helper_inb(v, cpu_env, n);
634 break;
635 case MO_16:
636 gen_helper_inw(v, cpu_env, n);
637 break;
638 case MO_32:
639 gen_helper_inl(v, cpu_env, n);
640 break;
641 default:
642 tcg_abort();
646 static void gen_helper_out_func(TCGMemOp ot, TCGv_i32 v, TCGv_i32 n)
648 switch (ot) {
649 case MO_8:
650 gen_helper_outb(cpu_env, v, n);
651 break;
652 case MO_16:
653 gen_helper_outw(cpu_env, v, n);
654 break;
655 case MO_32:
656 gen_helper_outl(cpu_env, v, n);
657 break;
658 default:
659 tcg_abort();
663 static void gen_check_io(DisasContext *s, TCGMemOp ot, target_ulong cur_eip,
664 uint32_t svm_flags)
666 int state_saved;
667 target_ulong next_eip;
669 state_saved = 0;
670 if (s->pe && (s->cpl > s->iopl || s->vm86)) {
671 gen_update_cc_op(s);
672 gen_jmp_im(cur_eip);
673 state_saved = 1;
674 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
675 switch (ot) {
676 case MO_8:
677 gen_helper_check_iob(cpu_env, cpu_tmp2_i32);
678 break;
679 case MO_16:
680 gen_helper_check_iow(cpu_env, cpu_tmp2_i32);
681 break;
682 case MO_32:
683 gen_helper_check_iol(cpu_env, cpu_tmp2_i32);
684 break;
685 default:
686 tcg_abort();
689 if(s->flags & HF_SVMI_MASK) {
690 if (!state_saved) {
691 gen_update_cc_op(s);
692 gen_jmp_im(cur_eip);
694 svm_flags |= (1 << (4 + ot));
695 next_eip = s->pc - s->cs_base;
696 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
697 gen_helper_svm_check_io(cpu_env, cpu_tmp2_i32,
698 tcg_const_i32(svm_flags),
699 tcg_const_i32(next_eip - cur_eip));
703 static inline void gen_movs(DisasContext *s, TCGMemOp ot)
705 gen_string_movl_A0_ESI(s);
706 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
707 gen_string_movl_A0_EDI(s);
708 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
709 gen_op_movl_T0_Dshift(ot);
710 gen_op_add_reg_T0(s->aflag, R_ESI);
711 gen_op_add_reg_T0(s->aflag, R_EDI);
714 static void gen_op_update1_cc(void)
716 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
719 static void gen_op_update2_cc(void)
721 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
722 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
725 static void gen_op_update3_cc(TCGv reg)
727 tcg_gen_mov_tl(cpu_cc_src2, reg);
728 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
729 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
732 static inline void gen_op_testl_T0_T1_cc(void)
734 tcg_gen_and_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
737 static void gen_op_update_neg_cc(void)
739 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
740 tcg_gen_neg_tl(cpu_cc_src, cpu_T[0]);
741 tcg_gen_movi_tl(cpu_cc_srcT, 0);
744 /* compute all eflags to cc_src */
745 static void gen_compute_eflags(DisasContext *s)
747 TCGv zero, dst, src1, src2;
748 int live, dead;
750 if (s->cc_op == CC_OP_EFLAGS) {
751 return;
753 if (s->cc_op == CC_OP_CLR) {
754 tcg_gen_movi_tl(cpu_cc_src, CC_Z | CC_P);
755 set_cc_op(s, CC_OP_EFLAGS);
756 return;
759 TCGV_UNUSED(zero);
760 dst = cpu_cc_dst;
761 src1 = cpu_cc_src;
762 src2 = cpu_cc_src2;
764 /* Take care to not read values that are not live. */
765 live = cc_op_live[s->cc_op] & ~USES_CC_SRCT;
766 dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2);
767 if (dead) {
768 zero = tcg_const_tl(0);
769 if (dead & USES_CC_DST) {
770 dst = zero;
772 if (dead & USES_CC_SRC) {
773 src1 = zero;
775 if (dead & USES_CC_SRC2) {
776 src2 = zero;
780 gen_update_cc_op(s);
781 gen_helper_cc_compute_all(cpu_cc_src, dst, src1, src2, cpu_cc_op);
782 set_cc_op(s, CC_OP_EFLAGS);
784 if (dead) {
785 tcg_temp_free(zero);
789 typedef struct CCPrepare {
790 TCGCond cond;
791 TCGv reg;
792 TCGv reg2;
793 target_ulong imm;
794 target_ulong mask;
795 bool use_reg2;
796 bool no_setcond;
797 } CCPrepare;
799 /* compute eflags.C to reg */
800 static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
802 TCGv t0, t1;
803 int size, shift;
805 switch (s->cc_op) {
806 case CC_OP_SUBB ... CC_OP_SUBQ:
807 /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
808 size = s->cc_op - CC_OP_SUBB;
809 t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
810 /* If no temporary was used, be careful not to alias t1 and t0. */
811 t0 = TCGV_EQUAL(t1, cpu_cc_src) ? cpu_tmp0 : reg;
812 tcg_gen_mov_tl(t0, cpu_cc_srcT);
813 gen_extu(size, t0);
814 goto add_sub;
816 case CC_OP_ADDB ... CC_OP_ADDQ:
817 /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
818 size = s->cc_op - CC_OP_ADDB;
819 t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
820 t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
821 add_sub:
822 return (CCPrepare) { .cond = TCG_COND_LTU, .reg = t0,
823 .reg2 = t1, .mask = -1, .use_reg2 = true };
825 case CC_OP_LOGICB ... CC_OP_LOGICQ:
826 case CC_OP_CLR:
827 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
829 case CC_OP_INCB ... CC_OP_INCQ:
830 case CC_OP_DECB ... CC_OP_DECQ:
831 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
832 .mask = -1, .no_setcond = true };
834 case CC_OP_SHLB ... CC_OP_SHLQ:
835 /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
836 size = s->cc_op - CC_OP_SHLB;
837 shift = (8 << size) - 1;
838 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
839 .mask = (target_ulong)1 << shift };
841 case CC_OP_MULB ... CC_OP_MULQ:
842 return (CCPrepare) { .cond = TCG_COND_NE,
843 .reg = cpu_cc_src, .mask = -1 };
845 case CC_OP_BMILGB ... CC_OP_BMILGQ:
846 size = s->cc_op - CC_OP_BMILGB;
847 t0 = gen_ext_tl(reg, cpu_cc_src, size, false);
848 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
850 case CC_OP_ADCX:
851 case CC_OP_ADCOX:
852 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_dst,
853 .mask = -1, .no_setcond = true };
855 case CC_OP_EFLAGS:
856 case CC_OP_SARB ... CC_OP_SARQ:
857 /* CC_SRC & 1 */
858 return (CCPrepare) { .cond = TCG_COND_NE,
859 .reg = cpu_cc_src, .mask = CC_C };
861 default:
862 /* The need to compute only C from CC_OP_DYNAMIC is important
863 in efficiently implementing e.g. INC at the start of a TB. */
864 gen_update_cc_op(s);
865 gen_helper_cc_compute_c(reg, cpu_cc_dst, cpu_cc_src,
866 cpu_cc_src2, cpu_cc_op);
867 return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
868 .mask = -1, .no_setcond = true };
872 /* compute eflags.P to reg */
873 static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg)
875 gen_compute_eflags(s);
876 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
877 .mask = CC_P };
880 /* compute eflags.S to reg */
881 static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg)
883 switch (s->cc_op) {
884 case CC_OP_DYNAMIC:
885 gen_compute_eflags(s);
886 /* FALLTHRU */
887 case CC_OP_EFLAGS:
888 case CC_OP_ADCX:
889 case CC_OP_ADOX:
890 case CC_OP_ADCOX:
891 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
892 .mask = CC_S };
893 case CC_OP_CLR:
894 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
895 default:
897 TCGMemOp size = (s->cc_op - CC_OP_ADDB) & 3;
898 TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, true);
899 return (CCPrepare) { .cond = TCG_COND_LT, .reg = t0, .mask = -1 };
904 /* compute eflags.O to reg */
905 static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
907 switch (s->cc_op) {
908 case CC_OP_ADOX:
909 case CC_OP_ADCOX:
910 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src2,
911 .mask = -1, .no_setcond = true };
912 case CC_OP_CLR:
913 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
914 default:
915 gen_compute_eflags(s);
916 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
917 .mask = CC_O };
921 /* compute eflags.Z to reg */
922 static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
924 switch (s->cc_op) {
925 case CC_OP_DYNAMIC:
926 gen_compute_eflags(s);
927 /* FALLTHRU */
928 case CC_OP_EFLAGS:
929 case CC_OP_ADCX:
930 case CC_OP_ADOX:
931 case CC_OP_ADCOX:
932 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
933 .mask = CC_Z };
934 case CC_OP_CLR:
935 return (CCPrepare) { .cond = TCG_COND_ALWAYS, .mask = -1 };
936 default:
938 TCGMemOp size = (s->cc_op - CC_OP_ADDB) & 3;
939 TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
940 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
945 /* perform a conditional store into register 'reg' according to jump opcode
946 value 'b'. In the fast case, T0 is guaranted not to be used. */
947 static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
949 int inv, jcc_op, cond;
950 TCGMemOp size;
951 CCPrepare cc;
952 TCGv t0;
954 inv = b & 1;
955 jcc_op = (b >> 1) & 7;
957 switch (s->cc_op) {
958 case CC_OP_SUBB ... CC_OP_SUBQ:
959 /* We optimize relational operators for the cmp/jcc case. */
960 size = s->cc_op - CC_OP_SUBB;
961 switch (jcc_op) {
962 case JCC_BE:
963 tcg_gen_mov_tl(cpu_tmp4, cpu_cc_srcT);
964 gen_extu(size, cpu_tmp4);
965 t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
966 cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = cpu_tmp4,
967 .reg2 = t0, .mask = -1, .use_reg2 = true };
968 break;
970 case JCC_L:
971 cond = TCG_COND_LT;
972 goto fast_jcc_l;
973 case JCC_LE:
974 cond = TCG_COND_LE;
975 fast_jcc_l:
976 tcg_gen_mov_tl(cpu_tmp4, cpu_cc_srcT);
977 gen_exts(size, cpu_tmp4);
978 t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, true);
979 cc = (CCPrepare) { .cond = cond, .reg = cpu_tmp4,
980 .reg2 = t0, .mask = -1, .use_reg2 = true };
981 break;
983 default:
984 goto slow_jcc;
986 break;
988 default:
989 slow_jcc:
990 /* This actually generates good code for JC, JZ and JS. */
991 switch (jcc_op) {
992 case JCC_O:
993 cc = gen_prepare_eflags_o(s, reg);
994 break;
995 case JCC_B:
996 cc = gen_prepare_eflags_c(s, reg);
997 break;
998 case JCC_Z:
999 cc = gen_prepare_eflags_z(s, reg);
1000 break;
1001 case JCC_BE:
1002 gen_compute_eflags(s);
1003 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
1004 .mask = CC_Z | CC_C };
1005 break;
1006 case JCC_S:
1007 cc = gen_prepare_eflags_s(s, reg);
1008 break;
1009 case JCC_P:
1010 cc = gen_prepare_eflags_p(s, reg);
1011 break;
1012 case JCC_L:
1013 gen_compute_eflags(s);
1014 if (TCGV_EQUAL(reg, cpu_cc_src)) {
1015 reg = cpu_tmp0;
1017 tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
1018 tcg_gen_xor_tl(reg, reg, cpu_cc_src);
1019 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1020 .mask = CC_S };
1021 break;
1022 default:
1023 case JCC_LE:
1024 gen_compute_eflags(s);
1025 if (TCGV_EQUAL(reg, cpu_cc_src)) {
1026 reg = cpu_tmp0;
1028 tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
1029 tcg_gen_xor_tl(reg, reg, cpu_cc_src);
1030 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1031 .mask = CC_S | CC_Z };
1032 break;
1034 break;
1037 if (inv) {
1038 cc.cond = tcg_invert_cond(cc.cond);
1040 return cc;
1043 static void gen_setcc1(DisasContext *s, int b, TCGv reg)
1045 CCPrepare cc = gen_prepare_cc(s, b, reg);
1047 if (cc.no_setcond) {
1048 if (cc.cond == TCG_COND_EQ) {
1049 tcg_gen_xori_tl(reg, cc.reg, 1);
1050 } else {
1051 tcg_gen_mov_tl(reg, cc.reg);
1053 return;
1056 if (cc.cond == TCG_COND_NE && !cc.use_reg2 && cc.imm == 0 &&
1057 cc.mask != 0 && (cc.mask & (cc.mask - 1)) == 0) {
1058 tcg_gen_shri_tl(reg, cc.reg, ctztl(cc.mask));
1059 tcg_gen_andi_tl(reg, reg, 1);
1060 return;
1062 if (cc.mask != -1) {
1063 tcg_gen_andi_tl(reg, cc.reg, cc.mask);
1064 cc.reg = reg;
1066 if (cc.use_reg2) {
1067 tcg_gen_setcond_tl(cc.cond, reg, cc.reg, cc.reg2);
1068 } else {
1069 tcg_gen_setcondi_tl(cc.cond, reg, cc.reg, cc.imm);
1073 static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg)
1075 gen_setcc1(s, JCC_B << 1, reg);
1078 /* generate a conditional jump to label 'l1' according to jump opcode
1079 value 'b'. In the fast case, T0 is guaranted not to be used. */
1080 static inline void gen_jcc1_noeob(DisasContext *s, int b, TCGLabel *l1)
1082 CCPrepare cc = gen_prepare_cc(s, b, cpu_T[0]);
1084 if (cc.mask != -1) {
1085 tcg_gen_andi_tl(cpu_T[0], cc.reg, cc.mask);
1086 cc.reg = cpu_T[0];
1088 if (cc.use_reg2) {
1089 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1090 } else {
1091 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1095 /* Generate a conditional jump to label 'l1' according to jump opcode
1096 value 'b'. In the fast case, T0 is guaranted not to be used.
1097 A translation block must end soon. */
1098 static inline void gen_jcc1(DisasContext *s, int b, TCGLabel *l1)
1100 CCPrepare cc = gen_prepare_cc(s, b, cpu_T[0]);
1102 gen_update_cc_op(s);
1103 if (cc.mask != -1) {
1104 tcg_gen_andi_tl(cpu_T[0], cc.reg, cc.mask);
1105 cc.reg = cpu_T[0];
1107 set_cc_op(s, CC_OP_DYNAMIC);
1108 if (cc.use_reg2) {
1109 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1110 } else {
1111 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1115 /* XXX: does not work with gdbstub "ice" single step - not a
1116 serious problem */
1117 static TCGLabel *gen_jz_ecx_string(DisasContext *s, target_ulong next_eip)
1119 TCGLabel *l1 = gen_new_label();
1120 TCGLabel *l2 = gen_new_label();
1121 gen_op_jnz_ecx(s->aflag, l1);
1122 gen_set_label(l2);
1123 gen_jmp_tb(s, next_eip, 1);
1124 gen_set_label(l1);
1125 return l2;
1128 static inline void gen_stos(DisasContext *s, TCGMemOp ot)
1130 gen_op_mov_v_reg(MO_32, cpu_T[0], R_EAX);
1131 gen_string_movl_A0_EDI(s);
1132 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
1133 gen_op_movl_T0_Dshift(ot);
1134 gen_op_add_reg_T0(s->aflag, R_EDI);
1137 static inline void gen_lods(DisasContext *s, TCGMemOp ot)
1139 gen_string_movl_A0_ESI(s);
1140 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1141 gen_op_mov_reg_v(ot, R_EAX, cpu_T[0]);
1142 gen_op_movl_T0_Dshift(ot);
1143 gen_op_add_reg_T0(s->aflag, R_ESI);
1146 static inline void gen_scas(DisasContext *s, TCGMemOp ot)
1148 gen_string_movl_A0_EDI(s);
1149 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
1150 gen_op(s, OP_CMPL, ot, R_EAX);
1151 gen_op_movl_T0_Dshift(ot);
1152 gen_op_add_reg_T0(s->aflag, R_EDI);
1155 static inline void gen_cmps(DisasContext *s, TCGMemOp ot)
1157 gen_string_movl_A0_EDI(s);
1158 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
1159 gen_string_movl_A0_ESI(s);
1160 gen_op(s, OP_CMPL, ot, OR_TMP0);
1161 gen_op_movl_T0_Dshift(ot);
1162 gen_op_add_reg_T0(s->aflag, R_ESI);
1163 gen_op_add_reg_T0(s->aflag, R_EDI);
1166 static inline void gen_ins(DisasContext *s, TCGMemOp ot)
1168 if (s->tb->cflags & CF_USE_ICOUNT) {
1169 gen_io_start();
1171 gen_string_movl_A0_EDI(s);
1172 /* Note: we must do this dummy write first to be restartable in
1173 case of page fault. */
1174 tcg_gen_movi_tl(cpu_T[0], 0);
1175 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
1176 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_EDX]);
1177 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1178 gen_helper_in_func(ot, cpu_T[0], cpu_tmp2_i32);
1179 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
1180 gen_op_movl_T0_Dshift(ot);
1181 gen_op_add_reg_T0(s->aflag, R_EDI);
1182 if (s->tb->cflags & CF_USE_ICOUNT) {
1183 gen_io_end();
1187 static inline void gen_outs(DisasContext *s, TCGMemOp ot)
1189 if (s->tb->cflags & CF_USE_ICOUNT) {
1190 gen_io_start();
1192 gen_string_movl_A0_ESI(s);
1193 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1195 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_EDX]);
1196 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1197 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[0]);
1198 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
1200 gen_op_movl_T0_Dshift(ot);
1201 gen_op_add_reg_T0(s->aflag, R_ESI);
1202 if (s->tb->cflags & CF_USE_ICOUNT) {
1203 gen_io_end();
1207 /* same method as Valgrind : we generate jumps to current or next
1208 instruction */
1209 #define GEN_REPZ(op) \
1210 static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \
1211 target_ulong cur_eip, target_ulong next_eip) \
1213 TCGLabel *l2; \
1214 gen_update_cc_op(s); \
1215 l2 = gen_jz_ecx_string(s, next_eip); \
1216 gen_ ## op(s, ot); \
1217 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1218 /* a loop would cause two single step exceptions if ECX = 1 \
1219 before rep string_insn */ \
1220 if (s->repz_opt) \
1221 gen_op_jz_ecx(s->aflag, l2); \
1222 gen_jmp(s, cur_eip); \
1225 #define GEN_REPZ2(op) \
1226 static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \
1227 target_ulong cur_eip, \
1228 target_ulong next_eip, \
1229 int nz) \
1231 TCGLabel *l2; \
1232 gen_update_cc_op(s); \
1233 l2 = gen_jz_ecx_string(s, next_eip); \
1234 gen_ ## op(s, ot); \
1235 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1236 gen_update_cc_op(s); \
1237 gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2); \
1238 if (s->repz_opt) \
1239 gen_op_jz_ecx(s->aflag, l2); \
1240 gen_jmp(s, cur_eip); \
1243 GEN_REPZ(movs)
1244 GEN_REPZ(stos)
1245 GEN_REPZ(lods)
1246 GEN_REPZ(ins)
1247 GEN_REPZ(outs)
1248 GEN_REPZ2(scas)
1249 GEN_REPZ2(cmps)
1251 static void gen_helper_fp_arith_ST0_FT0(int op)
1253 switch (op) {
1254 case 0:
1255 gen_helper_fadd_ST0_FT0(cpu_env);
1256 break;
1257 case 1:
1258 gen_helper_fmul_ST0_FT0(cpu_env);
1259 break;
1260 case 2:
1261 gen_helper_fcom_ST0_FT0(cpu_env);
1262 break;
1263 case 3:
1264 gen_helper_fcom_ST0_FT0(cpu_env);
1265 break;
1266 case 4:
1267 gen_helper_fsub_ST0_FT0(cpu_env);
1268 break;
1269 case 5:
1270 gen_helper_fsubr_ST0_FT0(cpu_env);
1271 break;
1272 case 6:
1273 gen_helper_fdiv_ST0_FT0(cpu_env);
1274 break;
1275 case 7:
1276 gen_helper_fdivr_ST0_FT0(cpu_env);
1277 break;
1281 /* NOTE the exception in "r" op ordering */
1282 static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1284 TCGv_i32 tmp = tcg_const_i32(opreg);
1285 switch (op) {
1286 case 0:
1287 gen_helper_fadd_STN_ST0(cpu_env, tmp);
1288 break;
1289 case 1:
1290 gen_helper_fmul_STN_ST0(cpu_env, tmp);
1291 break;
1292 case 4:
1293 gen_helper_fsubr_STN_ST0(cpu_env, tmp);
1294 break;
1295 case 5:
1296 gen_helper_fsub_STN_ST0(cpu_env, tmp);
1297 break;
1298 case 6:
1299 gen_helper_fdivr_STN_ST0(cpu_env, tmp);
1300 break;
1301 case 7:
1302 gen_helper_fdiv_STN_ST0(cpu_env, tmp);
1303 break;
1307 /* if d == OR_TMP0, it means memory operand (address in A0) */
1308 static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d)
1310 if (d != OR_TMP0) {
1311 gen_op_mov_v_reg(ot, cpu_T[0], d);
1312 } else {
1313 gen_op_ld_v(s1, ot, cpu_T[0], cpu_A0);
1315 switch(op) {
1316 case OP_ADCL:
1317 gen_compute_eflags_c(s1, cpu_tmp4);
1318 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1319 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1320 gen_op_st_rm_T0_A0(s1, ot, d);
1321 gen_op_update3_cc(cpu_tmp4);
1322 set_cc_op(s1, CC_OP_ADCB + ot);
1323 break;
1324 case OP_SBBL:
1325 gen_compute_eflags_c(s1, cpu_tmp4);
1326 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1327 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1328 gen_op_st_rm_T0_A0(s1, ot, d);
1329 gen_op_update3_cc(cpu_tmp4);
1330 set_cc_op(s1, CC_OP_SBBB + ot);
1331 break;
1332 case OP_ADDL:
1333 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1334 gen_op_st_rm_T0_A0(s1, ot, d);
1335 gen_op_update2_cc();
1336 set_cc_op(s1, CC_OP_ADDB + ot);
1337 break;
1338 case OP_SUBL:
1339 tcg_gen_mov_tl(cpu_cc_srcT, cpu_T[0]);
1340 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1341 gen_op_st_rm_T0_A0(s1, ot, d);
1342 gen_op_update2_cc();
1343 set_cc_op(s1, CC_OP_SUBB + ot);
1344 break;
1345 default:
1346 case OP_ANDL:
1347 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1348 gen_op_st_rm_T0_A0(s1, ot, d);
1349 gen_op_update1_cc();
1350 set_cc_op(s1, CC_OP_LOGICB + ot);
1351 break;
1352 case OP_ORL:
1353 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1354 gen_op_st_rm_T0_A0(s1, ot, d);
1355 gen_op_update1_cc();
1356 set_cc_op(s1, CC_OP_LOGICB + ot);
1357 break;
1358 case OP_XORL:
1359 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1360 gen_op_st_rm_T0_A0(s1, ot, d);
1361 gen_op_update1_cc();
1362 set_cc_op(s1, CC_OP_LOGICB + ot);
1363 break;
1364 case OP_CMPL:
1365 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
1366 tcg_gen_mov_tl(cpu_cc_srcT, cpu_T[0]);
1367 tcg_gen_sub_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
1368 set_cc_op(s1, CC_OP_SUBB + ot);
1369 break;
1373 /* if d == OR_TMP0, it means memory operand (address in A0) */
1374 static void gen_inc(DisasContext *s1, TCGMemOp ot, int d, int c)
1376 if (d != OR_TMP0) {
1377 gen_op_mov_v_reg(ot, cpu_T[0], d);
1378 } else {
1379 gen_op_ld_v(s1, ot, cpu_T[0], cpu_A0);
1381 gen_compute_eflags_c(s1, cpu_cc_src);
1382 if (c > 0) {
1383 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], 1);
1384 set_cc_op(s1, CC_OP_INCB + ot);
1385 } else {
1386 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], -1);
1387 set_cc_op(s1, CC_OP_DECB + ot);
1389 gen_op_st_rm_T0_A0(s1, ot, d);
1390 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1393 static void gen_shift_flags(DisasContext *s, TCGMemOp ot, TCGv result,
1394 TCGv shm1, TCGv count, bool is_right)
1396 TCGv_i32 z32, s32, oldop;
1397 TCGv z_tl;
1399 /* Store the results into the CC variables. If we know that the
1400 variable must be dead, store unconditionally. Otherwise we'll
1401 need to not disrupt the current contents. */
1402 z_tl = tcg_const_tl(0);
1403 if (cc_op_live[s->cc_op] & USES_CC_DST) {
1404 tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_dst, count, z_tl,
1405 result, cpu_cc_dst);
1406 } else {
1407 tcg_gen_mov_tl(cpu_cc_dst, result);
1409 if (cc_op_live[s->cc_op] & USES_CC_SRC) {
1410 tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_src, count, z_tl,
1411 shm1, cpu_cc_src);
1412 } else {
1413 tcg_gen_mov_tl(cpu_cc_src, shm1);
1415 tcg_temp_free(z_tl);
1417 /* Get the two potential CC_OP values into temporaries. */
1418 tcg_gen_movi_i32(cpu_tmp2_i32, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
1419 if (s->cc_op == CC_OP_DYNAMIC) {
1420 oldop = cpu_cc_op;
1421 } else {
1422 tcg_gen_movi_i32(cpu_tmp3_i32, s->cc_op);
1423 oldop = cpu_tmp3_i32;
1426 /* Conditionally store the CC_OP value. */
1427 z32 = tcg_const_i32(0);
1428 s32 = tcg_temp_new_i32();
1429 tcg_gen_trunc_tl_i32(s32, count);
1430 tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, s32, z32, cpu_tmp2_i32, oldop);
1431 tcg_temp_free_i32(z32);
1432 tcg_temp_free_i32(s32);
1434 /* The CC_OP value is no longer predictable. */
1435 set_cc_op(s, CC_OP_DYNAMIC);
1438 static void gen_shift_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
1439 int is_right, int is_arith)
1441 target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
1443 /* load */
1444 if (op1 == OR_TMP0) {
1445 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1446 } else {
1447 gen_op_mov_v_reg(ot, cpu_T[0], op1);
1450 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], mask);
1451 tcg_gen_subi_tl(cpu_tmp0, cpu_T[1], 1);
1453 if (is_right) {
1454 if (is_arith) {
1455 gen_exts(ot, cpu_T[0]);
1456 tcg_gen_sar_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1457 tcg_gen_sar_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1458 } else {
1459 gen_extu(ot, cpu_T[0]);
1460 tcg_gen_shr_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1461 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1463 } else {
1464 tcg_gen_shl_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1465 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1468 /* store */
1469 gen_op_st_rm_T0_A0(s, ot, op1);
1471 gen_shift_flags(s, ot, cpu_T[0], cpu_tmp0, cpu_T[1], is_right);
1474 static void gen_shift_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2,
1475 int is_right, int is_arith)
1477 int mask = (ot == MO_64 ? 0x3f : 0x1f);
1479 /* load */
1480 if (op1 == OR_TMP0)
1481 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1482 else
1483 gen_op_mov_v_reg(ot, cpu_T[0], op1);
1485 op2 &= mask;
1486 if (op2 != 0) {
1487 if (is_right) {
1488 if (is_arith) {
1489 gen_exts(ot, cpu_T[0]);
1490 tcg_gen_sari_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1491 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], op2);
1492 } else {
1493 gen_extu(ot, cpu_T[0]);
1494 tcg_gen_shri_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1495 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], op2);
1497 } else {
1498 tcg_gen_shli_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1499 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], op2);
1503 /* store */
1504 gen_op_st_rm_T0_A0(s, ot, op1);
1506 /* update eflags if non zero shift */
1507 if (op2 != 0) {
1508 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
1509 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1510 set_cc_op(s, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
1514 static void gen_rot_rm_T1(DisasContext *s, TCGMemOp ot, int op1, int is_right)
1516 target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
1517 TCGv_i32 t0, t1;
1519 /* load */
1520 if (op1 == OR_TMP0) {
1521 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1522 } else {
1523 gen_op_mov_v_reg(ot, cpu_T[0], op1);
1526 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], mask);
1528 switch (ot) {
1529 case MO_8:
1530 /* Replicate the 8-bit input so that a 32-bit rotate works. */
1531 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
1532 tcg_gen_muli_tl(cpu_T[0], cpu_T[0], 0x01010101);
1533 goto do_long;
1534 case MO_16:
1535 /* Replicate the 16-bit input so that a 32-bit rotate works. */
1536 tcg_gen_deposit_tl(cpu_T[0], cpu_T[0], cpu_T[0], 16, 16);
1537 goto do_long;
1538 do_long:
1539 #ifdef TARGET_X86_64
1540 case MO_32:
1541 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
1542 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
1543 if (is_right) {
1544 tcg_gen_rotr_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
1545 } else {
1546 tcg_gen_rotl_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
1548 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
1549 break;
1550 #endif
1551 default:
1552 if (is_right) {
1553 tcg_gen_rotr_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1554 } else {
1555 tcg_gen_rotl_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1557 break;
1560 /* store */
1561 gen_op_st_rm_T0_A0(s, ot, op1);
1563 /* We'll need the flags computed into CC_SRC. */
1564 gen_compute_eflags(s);
1566 /* The value that was "rotated out" is now present at the other end
1567 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1568 since we've computed the flags into CC_SRC, these variables are
1569 currently dead. */
1570 if (is_right) {
1571 tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask - 1);
1572 tcg_gen_shri_tl(cpu_cc_dst, cpu_T[0], mask);
1573 tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
1574 } else {
1575 tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask);
1576 tcg_gen_andi_tl(cpu_cc_dst, cpu_T[0], 1);
1578 tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1579 tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1581 /* Now conditionally store the new CC_OP value. If the shift count
1582 is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live.
1583 Otherwise reuse CC_OP_ADCOX which have the C and O flags split out
1584 exactly as we computed above. */
1585 t0 = tcg_const_i32(0);
1586 t1 = tcg_temp_new_i32();
1587 tcg_gen_trunc_tl_i32(t1, cpu_T[1]);
1588 tcg_gen_movi_i32(cpu_tmp2_i32, CC_OP_ADCOX);
1589 tcg_gen_movi_i32(cpu_tmp3_i32, CC_OP_EFLAGS);
1590 tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, t1, t0,
1591 cpu_tmp2_i32, cpu_tmp3_i32);
1592 tcg_temp_free_i32(t0);
1593 tcg_temp_free_i32(t1);
1595 /* The CC_OP value is no longer predictable. */
1596 set_cc_op(s, CC_OP_DYNAMIC);
1599 static void gen_rot_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2,
1600 int is_right)
1602 int mask = (ot == MO_64 ? 0x3f : 0x1f);
1603 int shift;
1605 /* load */
1606 if (op1 == OR_TMP0) {
1607 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1608 } else {
1609 gen_op_mov_v_reg(ot, cpu_T[0], op1);
1612 op2 &= mask;
1613 if (op2 != 0) {
1614 switch (ot) {
1615 #ifdef TARGET_X86_64
1616 case MO_32:
1617 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
1618 if (is_right) {
1619 tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, op2);
1620 } else {
1621 tcg_gen_rotli_i32(cpu_tmp2_i32, cpu_tmp2_i32, op2);
1623 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
1624 break;
1625 #endif
1626 default:
1627 if (is_right) {
1628 tcg_gen_rotri_tl(cpu_T[0], cpu_T[0], op2);
1629 } else {
1630 tcg_gen_rotli_tl(cpu_T[0], cpu_T[0], op2);
1632 break;
1633 case MO_8:
1634 mask = 7;
1635 goto do_shifts;
1636 case MO_16:
1637 mask = 15;
1638 do_shifts:
1639 shift = op2 & mask;
1640 if (is_right) {
1641 shift = mask + 1 - shift;
1643 gen_extu(ot, cpu_T[0]);
1644 tcg_gen_shli_tl(cpu_tmp0, cpu_T[0], shift);
1645 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], mask + 1 - shift);
1646 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
1647 break;
1651 /* store */
1652 gen_op_st_rm_T0_A0(s, ot, op1);
1654 if (op2 != 0) {
1655 /* Compute the flags into CC_SRC. */
1656 gen_compute_eflags(s);
1658 /* The value that was "rotated out" is now present at the other end
1659 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1660 since we've computed the flags into CC_SRC, these variables are
1661 currently dead. */
1662 if (is_right) {
1663 tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask - 1);
1664 tcg_gen_shri_tl(cpu_cc_dst, cpu_T[0], mask);
1665 tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
1666 } else {
1667 tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask);
1668 tcg_gen_andi_tl(cpu_cc_dst, cpu_T[0], 1);
1670 tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1671 tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1672 set_cc_op(s, CC_OP_ADCOX);
1676 /* XXX: add faster immediate = 1 case */
1677 static void gen_rotc_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
1678 int is_right)
1680 gen_compute_eflags(s);
1681 assert(s->cc_op == CC_OP_EFLAGS);
1683 /* load */
1684 if (op1 == OR_TMP0)
1685 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1686 else
1687 gen_op_mov_v_reg(ot, cpu_T[0], op1);
1689 if (is_right) {
1690 switch (ot) {
1691 case MO_8:
1692 gen_helper_rcrb(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1693 break;
1694 case MO_16:
1695 gen_helper_rcrw(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1696 break;
1697 case MO_32:
1698 gen_helper_rcrl(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1699 break;
1700 #ifdef TARGET_X86_64
1701 case MO_64:
1702 gen_helper_rcrq(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1703 break;
1704 #endif
1705 default:
1706 tcg_abort();
1708 } else {
1709 switch (ot) {
1710 case MO_8:
1711 gen_helper_rclb(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1712 break;
1713 case MO_16:
1714 gen_helper_rclw(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1715 break;
1716 case MO_32:
1717 gen_helper_rcll(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1718 break;
1719 #ifdef TARGET_X86_64
1720 case MO_64:
1721 gen_helper_rclq(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1722 break;
1723 #endif
1724 default:
1725 tcg_abort();
1728 /* store */
1729 gen_op_st_rm_T0_A0(s, ot, op1);
1732 /* XXX: add faster immediate case */
1733 static void gen_shiftd_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
1734 bool is_right, TCGv count_in)
1736 target_ulong mask = (ot == MO_64 ? 63 : 31);
1737 TCGv count;
1739 /* load */
1740 if (op1 == OR_TMP0) {
1741 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1742 } else {
1743 gen_op_mov_v_reg(ot, cpu_T[0], op1);
1746 count = tcg_temp_new();
1747 tcg_gen_andi_tl(count, count_in, mask);
1749 switch (ot) {
1750 case MO_16:
1751 /* Note: we implement the Intel behaviour for shift count > 16.
1752 This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A
1753 portion by constructing it as a 32-bit value. */
1754 if (is_right) {
1755 tcg_gen_deposit_tl(cpu_tmp0, cpu_T[0], cpu_T[1], 16, 16);
1756 tcg_gen_mov_tl(cpu_T[1], cpu_T[0]);
1757 tcg_gen_mov_tl(cpu_T[0], cpu_tmp0);
1758 } else {
1759 tcg_gen_deposit_tl(cpu_T[1], cpu_T[0], cpu_T[1], 16, 16);
1761 /* FALLTHRU */
1762 #ifdef TARGET_X86_64
1763 case MO_32:
1764 /* Concatenate the two 32-bit values and use a 64-bit shift. */
1765 tcg_gen_subi_tl(cpu_tmp0, count, 1);
1766 if (is_right) {
1767 tcg_gen_concat_tl_i64(cpu_T[0], cpu_T[0], cpu_T[1]);
1768 tcg_gen_shr_i64(cpu_tmp0, cpu_T[0], cpu_tmp0);
1769 tcg_gen_shr_i64(cpu_T[0], cpu_T[0], count);
1770 } else {
1771 tcg_gen_concat_tl_i64(cpu_T[0], cpu_T[1], cpu_T[0]);
1772 tcg_gen_shl_i64(cpu_tmp0, cpu_T[0], cpu_tmp0);
1773 tcg_gen_shl_i64(cpu_T[0], cpu_T[0], count);
1774 tcg_gen_shri_i64(cpu_tmp0, cpu_tmp0, 32);
1775 tcg_gen_shri_i64(cpu_T[0], cpu_T[0], 32);
1777 break;
1778 #endif
1779 default:
1780 tcg_gen_subi_tl(cpu_tmp0, count, 1);
1781 if (is_right) {
1782 tcg_gen_shr_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1784 tcg_gen_subfi_tl(cpu_tmp4, mask + 1, count);
1785 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], count);
1786 tcg_gen_shl_tl(cpu_T[1], cpu_T[1], cpu_tmp4);
1787 } else {
1788 tcg_gen_shl_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1789 if (ot == MO_16) {
1790 /* Only needed if count > 16, for Intel behaviour. */
1791 tcg_gen_subfi_tl(cpu_tmp4, 33, count);
1792 tcg_gen_shr_tl(cpu_tmp4, cpu_T[1], cpu_tmp4);
1793 tcg_gen_or_tl(cpu_tmp0, cpu_tmp0, cpu_tmp4);
1796 tcg_gen_subfi_tl(cpu_tmp4, mask + 1, count);
1797 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], count);
1798 tcg_gen_shr_tl(cpu_T[1], cpu_T[1], cpu_tmp4);
1800 tcg_gen_movi_tl(cpu_tmp4, 0);
1801 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_T[1], count, cpu_tmp4,
1802 cpu_tmp4, cpu_T[1]);
1803 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1804 break;
1807 /* store */
1808 gen_op_st_rm_T0_A0(s, ot, op1);
1810 gen_shift_flags(s, ot, cpu_T[0], cpu_tmp0, count, is_right);
1811 tcg_temp_free(count);
1814 static void gen_shift(DisasContext *s1, int op, TCGMemOp ot, int d, int s)
1816 if (s != OR_TMP1)
1817 gen_op_mov_v_reg(ot, cpu_T[1], s);
1818 switch(op) {
1819 case OP_ROL:
1820 gen_rot_rm_T1(s1, ot, d, 0);
1821 break;
1822 case OP_ROR:
1823 gen_rot_rm_T1(s1, ot, d, 1);
1824 break;
1825 case OP_SHL:
1826 case OP_SHL1:
1827 gen_shift_rm_T1(s1, ot, d, 0, 0);
1828 break;
1829 case OP_SHR:
1830 gen_shift_rm_T1(s1, ot, d, 1, 0);
1831 break;
1832 case OP_SAR:
1833 gen_shift_rm_T1(s1, ot, d, 1, 1);
1834 break;
1835 case OP_RCL:
1836 gen_rotc_rm_T1(s1, ot, d, 0);
1837 break;
1838 case OP_RCR:
1839 gen_rotc_rm_T1(s1, ot, d, 1);
1840 break;
1844 static void gen_shifti(DisasContext *s1, int op, TCGMemOp ot, int d, int c)
1846 switch(op) {
1847 case OP_ROL:
1848 gen_rot_rm_im(s1, ot, d, c, 0);
1849 break;
1850 case OP_ROR:
1851 gen_rot_rm_im(s1, ot, d, c, 1);
1852 break;
1853 case OP_SHL:
1854 case OP_SHL1:
1855 gen_shift_rm_im(s1, ot, d, c, 0, 0);
1856 break;
1857 case OP_SHR:
1858 gen_shift_rm_im(s1, ot, d, c, 1, 0);
1859 break;
1860 case OP_SAR:
1861 gen_shift_rm_im(s1, ot, d, c, 1, 1);
1862 break;
1863 default:
1864 /* currently not optimized */
1865 tcg_gen_movi_tl(cpu_T[1], c);
1866 gen_shift(s1, op, ot, d, OR_TMP1);
1867 break;
1871 static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm)
1873 target_long disp;
1874 int havesib;
1875 int base;
1876 int index;
1877 int scale;
1878 int mod, rm, code, override, must_add_seg;
1879 TCGv sum;
1881 override = s->override;
1882 must_add_seg = s->addseg;
1883 if (override >= 0)
1884 must_add_seg = 1;
1885 mod = (modrm >> 6) & 3;
1886 rm = modrm & 7;
1888 switch (s->aflag) {
1889 case MO_64:
1890 case MO_32:
1891 havesib = 0;
1892 base = rm;
1893 index = -1;
1894 scale = 0;
1896 if (base == 4) {
1897 havesib = 1;
1898 code = cpu_ldub_code(env, s->pc++);
1899 scale = (code >> 6) & 3;
1900 index = ((code >> 3) & 7) | REX_X(s);
1901 if (index == 4) {
1902 index = -1; /* no index */
1904 base = (code & 7);
1906 base |= REX_B(s);
1908 switch (mod) {
1909 case 0:
1910 if ((base & 7) == 5) {
1911 base = -1;
1912 disp = (int32_t)cpu_ldl_code(env, s->pc);
1913 s->pc += 4;
1914 if (CODE64(s) && !havesib) {
1915 disp += s->pc + s->rip_offset;
1917 } else {
1918 disp = 0;
1920 break;
1921 case 1:
1922 disp = (int8_t)cpu_ldub_code(env, s->pc++);
1923 break;
1924 default:
1925 case 2:
1926 disp = (int32_t)cpu_ldl_code(env, s->pc);
1927 s->pc += 4;
1928 break;
1931 /* For correct popl handling with esp. */
1932 if (base == R_ESP && s->popl_esp_hack) {
1933 disp += s->popl_esp_hack;
1936 /* Compute the address, with a minimum number of TCG ops. */
1937 TCGV_UNUSED(sum);
1938 if (index >= 0) {
1939 if (scale == 0) {
1940 sum = cpu_regs[index];
1941 } else {
1942 tcg_gen_shli_tl(cpu_A0, cpu_regs[index], scale);
1943 sum = cpu_A0;
1945 if (base >= 0) {
1946 tcg_gen_add_tl(cpu_A0, sum, cpu_regs[base]);
1947 sum = cpu_A0;
1949 } else if (base >= 0) {
1950 sum = cpu_regs[base];
1952 if (TCGV_IS_UNUSED(sum)) {
1953 tcg_gen_movi_tl(cpu_A0, disp);
1954 } else {
1955 tcg_gen_addi_tl(cpu_A0, sum, disp);
1958 if (must_add_seg) {
1959 if (override < 0) {
1960 if (base == R_EBP || base == R_ESP) {
1961 override = R_SS;
1962 } else {
1963 override = R_DS;
1967 tcg_gen_ld_tl(cpu_tmp0, cpu_env,
1968 offsetof(CPUX86State, segs[override].base));
1969 if (CODE64(s)) {
1970 if (s->aflag == MO_32) {
1971 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
1973 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
1974 return;
1977 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
1980 if (s->aflag == MO_32) {
1981 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
1983 break;
1985 case MO_16:
1986 switch (mod) {
1987 case 0:
1988 if (rm == 6) {
1989 disp = cpu_lduw_code(env, s->pc);
1990 s->pc += 2;
1991 tcg_gen_movi_tl(cpu_A0, disp);
1992 rm = 0; /* avoid SS override */
1993 goto no_rm;
1994 } else {
1995 disp = 0;
1997 break;
1998 case 1:
1999 disp = (int8_t)cpu_ldub_code(env, s->pc++);
2000 break;
2001 default:
2002 case 2:
2003 disp = (int16_t)cpu_lduw_code(env, s->pc);
2004 s->pc += 2;
2005 break;
2008 sum = cpu_A0;
2009 switch (rm) {
2010 case 0:
2011 tcg_gen_add_tl(cpu_A0, cpu_regs[R_EBX], cpu_regs[R_ESI]);
2012 break;
2013 case 1:
2014 tcg_gen_add_tl(cpu_A0, cpu_regs[R_EBX], cpu_regs[R_EDI]);
2015 break;
2016 case 2:
2017 tcg_gen_add_tl(cpu_A0, cpu_regs[R_EBP], cpu_regs[R_ESI]);
2018 break;
2019 case 3:
2020 tcg_gen_add_tl(cpu_A0, cpu_regs[R_EBP], cpu_regs[R_EDI]);
2021 break;
2022 case 4:
2023 sum = cpu_regs[R_ESI];
2024 break;
2025 case 5:
2026 sum = cpu_regs[R_EDI];
2027 break;
2028 case 6:
2029 sum = cpu_regs[R_EBP];
2030 break;
2031 default:
2032 case 7:
2033 sum = cpu_regs[R_EBX];
2034 break;
2036 tcg_gen_addi_tl(cpu_A0, sum, disp);
2037 tcg_gen_ext16u_tl(cpu_A0, cpu_A0);
2038 no_rm:
2039 if (must_add_seg) {
2040 if (override < 0) {
2041 if (rm == 2 || rm == 3 || rm == 6) {
2042 override = R_SS;
2043 } else {
2044 override = R_DS;
2047 gen_op_addl_A0_seg(s, override);
2049 break;
2051 default:
2052 tcg_abort();
2056 static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm)
2058 int mod, rm, base, code;
2060 mod = (modrm >> 6) & 3;
2061 if (mod == 3)
2062 return;
2063 rm = modrm & 7;
2065 switch (s->aflag) {
2066 case MO_64:
2067 case MO_32:
2068 base = rm;
2070 if (base == 4) {
2071 code = cpu_ldub_code(env, s->pc++);
2072 base = (code & 7);
2075 switch (mod) {
2076 case 0:
2077 if (base == 5) {
2078 s->pc += 4;
2080 break;
2081 case 1:
2082 s->pc++;
2083 break;
2084 default:
2085 case 2:
2086 s->pc += 4;
2087 break;
2089 break;
2091 case MO_16:
2092 switch (mod) {
2093 case 0:
2094 if (rm == 6) {
2095 s->pc += 2;
2097 break;
2098 case 1:
2099 s->pc++;
2100 break;
2101 default:
2102 case 2:
2103 s->pc += 2;
2104 break;
2106 break;
2108 default:
2109 tcg_abort();
2113 /* used for LEA and MOV AX, mem */
2114 static void gen_add_A0_ds_seg(DisasContext *s)
2116 int override, must_add_seg;
2117 must_add_seg = s->addseg;
2118 override = R_DS;
2119 if (s->override >= 0) {
2120 override = s->override;
2121 must_add_seg = 1;
2123 if (must_add_seg) {
2124 #ifdef TARGET_X86_64
2125 if (CODE64(s)) {
2126 gen_op_addq_A0_seg(override);
2127 } else
2128 #endif
2130 gen_op_addl_A0_seg(s, override);
2135 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2136 OR_TMP0 */
2137 static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm,
2138 TCGMemOp ot, int reg, int is_store)
2140 int mod, rm;
2142 mod = (modrm >> 6) & 3;
2143 rm = (modrm & 7) | REX_B(s);
2144 if (mod == 3) {
2145 if (is_store) {
2146 if (reg != OR_TMP0)
2147 gen_op_mov_v_reg(ot, cpu_T[0], reg);
2148 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
2149 } else {
2150 gen_op_mov_v_reg(ot, cpu_T[0], rm);
2151 if (reg != OR_TMP0)
2152 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
2154 } else {
2155 gen_lea_modrm(env, s, modrm);
2156 if (is_store) {
2157 if (reg != OR_TMP0)
2158 gen_op_mov_v_reg(ot, cpu_T[0], reg);
2159 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
2160 } else {
2161 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
2162 if (reg != OR_TMP0)
2163 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
2168 static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, TCGMemOp ot)
2170 uint32_t ret;
2172 switch (ot) {
2173 case MO_8:
2174 ret = cpu_ldub_code(env, s->pc);
2175 s->pc++;
2176 break;
2177 case MO_16:
2178 ret = cpu_lduw_code(env, s->pc);
2179 s->pc += 2;
2180 break;
2181 case MO_32:
2182 #ifdef TARGET_X86_64
2183 case MO_64:
2184 #endif
2185 ret = cpu_ldl_code(env, s->pc);
2186 s->pc += 4;
2187 break;
2188 default:
2189 tcg_abort();
2191 return ret;
2194 static inline int insn_const_size(TCGMemOp ot)
2196 if (ot <= MO_32) {
2197 return 1 << ot;
2198 } else {
2199 return 4;
2203 static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip)
2205 TranslationBlock *tb;
2206 target_ulong pc;
2208 pc = s->cs_base + eip;
2209 tb = s->tb;
2210 /* NOTE: we handle the case where the TB spans two pages here */
2211 if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) ||
2212 (pc & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK)) {
2213 /* jump to same page: we can use a direct jump */
2214 tcg_gen_goto_tb(tb_num);
2215 gen_jmp_im(eip);
2216 tcg_gen_exit_tb((uintptr_t)tb + tb_num);
2217 } else {
2218 /* jump to another page: currently not optimized */
2219 gen_jmp_im(eip);
2220 gen_eob(s);
2224 static inline void gen_jcc(DisasContext *s, int b,
2225 target_ulong val, target_ulong next_eip)
2227 TCGLabel *l1, *l2;
2229 if (s->jmp_opt) {
2230 l1 = gen_new_label();
2231 gen_jcc1(s, b, l1);
2233 gen_goto_tb(s, 0, next_eip);
2235 gen_set_label(l1);
2236 gen_goto_tb(s, 1, val);
2237 s->is_jmp = DISAS_TB_JUMP;
2238 } else {
2239 l1 = gen_new_label();
2240 l2 = gen_new_label();
2241 gen_jcc1(s, b, l1);
2243 gen_jmp_im(next_eip);
2244 tcg_gen_br(l2);
2246 gen_set_label(l1);
2247 gen_jmp_im(val);
2248 gen_set_label(l2);
2249 gen_eob(s);
2253 static void gen_cmovcc1(CPUX86State *env, DisasContext *s, TCGMemOp ot, int b,
2254 int modrm, int reg)
2256 CCPrepare cc;
2258 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
2260 cc = gen_prepare_cc(s, b, cpu_T[1]);
2261 if (cc.mask != -1) {
2262 TCGv t0 = tcg_temp_new();
2263 tcg_gen_andi_tl(t0, cc.reg, cc.mask);
2264 cc.reg = t0;
2266 if (!cc.use_reg2) {
2267 cc.reg2 = tcg_const_tl(cc.imm);
2270 tcg_gen_movcond_tl(cc.cond, cpu_T[0], cc.reg, cc.reg2,
2271 cpu_T[0], cpu_regs[reg]);
2272 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
2274 if (cc.mask != -1) {
2275 tcg_temp_free(cc.reg);
2277 if (!cc.use_reg2) {
2278 tcg_temp_free(cc.reg2);
2282 static inline void gen_op_movl_T0_seg(int seg_reg)
2284 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
2285 offsetof(CPUX86State,segs[seg_reg].selector));
2288 static inline void gen_op_movl_seg_T0_vm(int seg_reg)
2290 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
2291 tcg_gen_st32_tl(cpu_T[0], cpu_env,
2292 offsetof(CPUX86State,segs[seg_reg].selector));
2293 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], 4);
2294 tcg_gen_st_tl(cpu_T[0], cpu_env,
2295 offsetof(CPUX86State,segs[seg_reg].base));
2298 /* move T0 to seg_reg and compute if the CPU state may change. Never
2299 call this function with seg_reg == R_CS */
2300 static void gen_movl_seg_T0(DisasContext *s, int seg_reg, target_ulong cur_eip)
2302 if (s->pe && !s->vm86) {
2303 /* XXX: optimize by finding processor state dynamically */
2304 gen_update_cc_op(s);
2305 gen_jmp_im(cur_eip);
2306 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2307 gen_helper_load_seg(cpu_env, tcg_const_i32(seg_reg), cpu_tmp2_i32);
2308 /* abort translation because the addseg value may change or
2309 because ss32 may change. For R_SS, translation must always
2310 stop as a special handling must be done to disable hardware
2311 interrupts for the next instruction */
2312 if (seg_reg == R_SS || (s->code32 && seg_reg < R_FS))
2313 s->is_jmp = DISAS_TB_JUMP;
2314 } else {
2315 gen_op_movl_seg_T0_vm(seg_reg);
2316 if (seg_reg == R_SS)
2317 s->is_jmp = DISAS_TB_JUMP;
2321 static inline int svm_is_rep(int prefixes)
2323 return ((prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) ? 8 : 0);
2326 static inline void
2327 gen_svm_check_intercept_param(DisasContext *s, target_ulong pc_start,
2328 uint32_t type, uint64_t param)
2330 /* no SVM activated; fast case */
2331 if (likely(!(s->flags & HF_SVMI_MASK)))
2332 return;
2333 gen_update_cc_op(s);
2334 gen_jmp_im(pc_start - s->cs_base);
2335 gen_helper_svm_check_intercept_param(cpu_env, tcg_const_i32(type),
2336 tcg_const_i64(param));
2339 static inline void
2340 gen_svm_check_intercept(DisasContext *s, target_ulong pc_start, uint64_t type)
2342 gen_svm_check_intercept_param(s, pc_start, type, 0);
2345 static inline void gen_stack_update(DisasContext *s, int addend)
2347 #ifdef TARGET_X86_64
2348 if (CODE64(s)) {
2349 gen_op_add_reg_im(MO_64, R_ESP, addend);
2350 } else
2351 #endif
2352 if (s->ss32) {
2353 gen_op_add_reg_im(MO_32, R_ESP, addend);
2354 } else {
2355 gen_op_add_reg_im(MO_16, R_ESP, addend);
2359 /* Generate a push. It depends on ss32, addseg and dflag. */
2360 static void gen_push_v(DisasContext *s, TCGv val)
2362 TCGMemOp a_ot, d_ot = mo_pushpop(s, s->dflag);
2363 int size = 1 << d_ot;
2364 TCGv new_esp = cpu_A0;
2366 tcg_gen_subi_tl(cpu_A0, cpu_regs[R_ESP], size);
2368 if (CODE64(s)) {
2369 a_ot = MO_64;
2370 } else if (s->ss32) {
2371 a_ot = MO_32;
2372 if (s->addseg) {
2373 new_esp = cpu_tmp4;
2374 tcg_gen_mov_tl(new_esp, cpu_A0);
2375 gen_op_addl_A0_seg(s, R_SS);
2376 } else {
2377 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
2379 } else {
2380 a_ot = MO_16;
2381 new_esp = cpu_tmp4;
2382 tcg_gen_ext16u_tl(cpu_A0, cpu_A0);
2383 tcg_gen_mov_tl(new_esp, cpu_A0);
2384 gen_op_addl_A0_seg(s, R_SS);
2387 gen_op_st_v(s, d_ot, val, cpu_A0);
2388 gen_op_mov_reg_v(a_ot, R_ESP, new_esp);
2391 /* two step pop is necessary for precise exceptions */
2392 static TCGMemOp gen_pop_T0(DisasContext *s)
2394 TCGMemOp d_ot = mo_pushpop(s, s->dflag);
2395 TCGv addr = cpu_A0;
2397 if (CODE64(s)) {
2398 addr = cpu_regs[R_ESP];
2399 } else if (!s->ss32) {
2400 tcg_gen_ext16u_tl(cpu_A0, cpu_regs[R_ESP]);
2401 gen_op_addl_A0_seg(s, R_SS);
2402 } else if (s->addseg) {
2403 tcg_gen_mov_tl(cpu_A0, cpu_regs[R_ESP]);
2404 gen_op_addl_A0_seg(s, R_SS);
2405 } else {
2406 tcg_gen_ext32u_tl(cpu_A0, cpu_regs[R_ESP]);
2409 gen_op_ld_v(s, d_ot, cpu_T[0], addr);
2410 return d_ot;
2413 static void gen_pop_update(DisasContext *s, TCGMemOp ot)
2415 gen_stack_update(s, 1 << ot);
2418 static void gen_stack_A0(DisasContext *s)
2420 gen_op_movl_A0_reg(R_ESP);
2421 if (!s->ss32)
2422 tcg_gen_ext16u_tl(cpu_A0, cpu_A0);
2423 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2424 if (s->addseg)
2425 gen_op_addl_A0_seg(s, R_SS);
2428 /* NOTE: wrap around in 16 bit not fully handled */
2429 static void gen_pusha(DisasContext *s)
2431 int i;
2432 gen_op_movl_A0_reg(R_ESP);
2433 gen_op_addl_A0_im(-8 << s->dflag);
2434 if (!s->ss32)
2435 tcg_gen_ext16u_tl(cpu_A0, cpu_A0);
2436 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2437 if (s->addseg)
2438 gen_op_addl_A0_seg(s, R_SS);
2439 for(i = 0;i < 8; i++) {
2440 gen_op_mov_v_reg(MO_32, cpu_T[0], 7 - i);
2441 gen_op_st_v(s, s->dflag, cpu_T[0], cpu_A0);
2442 gen_op_addl_A0_im(1 << s->dflag);
2444 gen_op_mov_reg_v(MO_16 + s->ss32, R_ESP, cpu_T[1]);
2447 /* NOTE: wrap around in 16 bit not fully handled */
2448 static void gen_popa(DisasContext *s)
2450 int i;
2451 gen_op_movl_A0_reg(R_ESP);
2452 if (!s->ss32)
2453 tcg_gen_ext16u_tl(cpu_A0, cpu_A0);
2454 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2455 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], 8 << s->dflag);
2456 if (s->addseg)
2457 gen_op_addl_A0_seg(s, R_SS);
2458 for(i = 0;i < 8; i++) {
2459 /* ESP is not reloaded */
2460 if (i != 3) {
2461 gen_op_ld_v(s, s->dflag, cpu_T[0], cpu_A0);
2462 gen_op_mov_reg_v(s->dflag, 7 - i, cpu_T[0]);
2464 gen_op_addl_A0_im(1 << s->dflag);
2466 gen_op_mov_reg_v(MO_16 + s->ss32, R_ESP, cpu_T[1]);
2469 static void gen_enter(DisasContext *s, int esp_addend, int level)
2471 TCGMemOp ot = mo_pushpop(s, s->dflag);
2472 int opsize = 1 << ot;
2474 level &= 0x1f;
2475 #ifdef TARGET_X86_64
2476 if (CODE64(s)) {
2477 gen_op_movl_A0_reg(R_ESP);
2478 gen_op_addq_A0_im(-opsize);
2479 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2481 /* push bp */
2482 gen_op_mov_v_reg(MO_32, cpu_T[0], R_EBP);
2483 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
2484 if (level) {
2485 /* XXX: must save state */
2486 gen_helper_enter64_level(cpu_env, tcg_const_i32(level),
2487 tcg_const_i32((ot == MO_64)),
2488 cpu_T[1]);
2490 gen_op_mov_reg_v(ot, R_EBP, cpu_T[1]);
2491 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
2492 gen_op_mov_reg_v(MO_64, R_ESP, cpu_T[1]);
2493 } else
2494 #endif
2496 gen_op_movl_A0_reg(R_ESP);
2497 gen_op_addl_A0_im(-opsize);
2498 if (!s->ss32)
2499 tcg_gen_ext16u_tl(cpu_A0, cpu_A0);
2500 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2501 if (s->addseg)
2502 gen_op_addl_A0_seg(s, R_SS);
2503 /* push bp */
2504 gen_op_mov_v_reg(MO_32, cpu_T[0], R_EBP);
2505 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
2506 if (level) {
2507 /* XXX: must save state */
2508 gen_helper_enter_level(cpu_env, tcg_const_i32(level),
2509 tcg_const_i32(s->dflag - 1),
2510 cpu_T[1]);
2512 gen_op_mov_reg_v(ot, R_EBP, cpu_T[1]);
2513 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
2514 gen_op_mov_reg_v(MO_16 + s->ss32, R_ESP, cpu_T[1]);
2518 static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip)
2520 gen_update_cc_op(s);
2521 gen_jmp_im(cur_eip);
2522 gen_helper_raise_exception(cpu_env, tcg_const_i32(trapno));
2523 s->is_jmp = DISAS_TB_JUMP;
2526 /* an interrupt is different from an exception because of the
2527 privilege checks */
2528 static void gen_interrupt(DisasContext *s, int intno,
2529 target_ulong cur_eip, target_ulong next_eip)
2531 gen_update_cc_op(s);
2532 gen_jmp_im(cur_eip);
2533 gen_helper_raise_interrupt(cpu_env, tcg_const_i32(intno),
2534 tcg_const_i32(next_eip - cur_eip));
2535 s->is_jmp = DISAS_TB_JUMP;
2538 static void gen_debug(DisasContext *s, target_ulong cur_eip)
2540 gen_update_cc_op(s);
2541 gen_jmp_im(cur_eip);
2542 gen_helper_debug(cpu_env);
2543 s->is_jmp = DISAS_TB_JUMP;
2546 /* generate a generic end of block. Trace exception is also generated
2547 if needed */
2548 static void gen_eob(DisasContext *s)
2550 gen_update_cc_op(s);
2551 if (s->tb->flags & HF_INHIBIT_IRQ_MASK) {
2552 gen_helper_reset_inhibit_irq(cpu_env);
2554 if (s->tb->flags & HF_RF_MASK) {
2555 gen_helper_reset_rf(cpu_env);
2557 if (s->singlestep_enabled) {
2558 gen_helper_debug(cpu_env);
2559 } else if (s->tf) {
2560 gen_helper_single_step(cpu_env);
2561 } else {
2562 tcg_gen_exit_tb(0);
2564 s->is_jmp = DISAS_TB_JUMP;
2567 /* generate a jump to eip. No segment change must happen before as a
2568 direct call to the next block may occur */
2569 static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num)
2571 gen_update_cc_op(s);
2572 set_cc_op(s, CC_OP_DYNAMIC);
2573 if (s->jmp_opt) {
2574 gen_goto_tb(s, tb_num, eip);
2575 s->is_jmp = DISAS_TB_JUMP;
2576 } else {
2577 gen_jmp_im(eip);
2578 gen_eob(s);
2582 static void gen_jmp(DisasContext *s, target_ulong eip)
2584 gen_jmp_tb(s, eip, 0);
2587 static inline void gen_ldq_env_A0(DisasContext *s, int offset)
2589 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
2590 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset);
2593 static inline void gen_stq_env_A0(DisasContext *s, int offset)
2595 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset);
2596 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
2599 static inline void gen_ldo_env_A0(DisasContext *s, int offset)
2601 int mem_index = s->mem_index;
2602 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, mem_index, MO_LEQ);
2603 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
2604 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2605 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_tmp0, mem_index, MO_LEQ);
2606 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
2609 static inline void gen_sto_env_A0(DisasContext *s, int offset)
2611 int mem_index = s->mem_index;
2612 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
2613 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, mem_index, MO_LEQ);
2614 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2615 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
2616 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_tmp0, mem_index, MO_LEQ);
2619 static inline void gen_op_movo(int d_offset, int s_offset)
2621 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + offsetof(XMMReg, XMM_Q(0)));
2622 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + offsetof(XMMReg, XMM_Q(0)));
2623 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + offsetof(XMMReg, XMM_Q(1)));
2624 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + offsetof(XMMReg, XMM_Q(1)));
2627 static inline void gen_op_movq(int d_offset, int s_offset)
2629 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2630 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2633 static inline void gen_op_movl(int d_offset, int s_offset)
2635 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env, s_offset);
2636 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, d_offset);
2639 static inline void gen_op_movq_env_0(int d_offset)
2641 tcg_gen_movi_i64(cpu_tmp1_i64, 0);
2642 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2645 typedef void (*SSEFunc_i_ep)(TCGv_i32 val, TCGv_ptr env, TCGv_ptr reg);
2646 typedef void (*SSEFunc_l_ep)(TCGv_i64 val, TCGv_ptr env, TCGv_ptr reg);
2647 typedef void (*SSEFunc_0_epi)(TCGv_ptr env, TCGv_ptr reg, TCGv_i32 val);
2648 typedef void (*SSEFunc_0_epl)(TCGv_ptr env, TCGv_ptr reg, TCGv_i64 val);
2649 typedef void (*SSEFunc_0_epp)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b);
2650 typedef void (*SSEFunc_0_eppi)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
2651 TCGv_i32 val);
2652 typedef void (*SSEFunc_0_ppi)(TCGv_ptr reg_a, TCGv_ptr reg_b, TCGv_i32 val);
2653 typedef void (*SSEFunc_0_eppt)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
2654 TCGv val);
2656 #define SSE_SPECIAL ((void *)1)
2657 #define SSE_DUMMY ((void *)2)
2659 #define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
2660 #define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
2661 gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
2663 static const SSEFunc_0_epp sse_op_table1[256][4] = {
2664 /* 3DNow! extensions */
2665 [0x0e] = { SSE_DUMMY }, /* femms */
2666 [0x0f] = { SSE_DUMMY }, /* pf... */
2667 /* pure SSE operations */
2668 [0x10] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2669 [0x11] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2670 [0x12] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd, movsldup, movddup */
2671 [0x13] = { SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd */
2672 [0x14] = { gen_helper_punpckldq_xmm, gen_helper_punpcklqdq_xmm },
2673 [0x15] = { gen_helper_punpckhdq_xmm, gen_helper_punpckhqdq_xmm },
2674 [0x16] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd, movshdup */
2675 [0x17] = { SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd */
2677 [0x28] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2678 [0x29] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2679 [0x2a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
2680 [0x2b] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movntps, movntpd, movntss, movntsd */
2681 [0x2c] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
2682 [0x2d] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
2683 [0x2e] = { gen_helper_ucomiss, gen_helper_ucomisd },
2684 [0x2f] = { gen_helper_comiss, gen_helper_comisd },
2685 [0x50] = { SSE_SPECIAL, SSE_SPECIAL }, /* movmskps, movmskpd */
2686 [0x51] = SSE_FOP(sqrt),
2687 [0x52] = { gen_helper_rsqrtps, NULL, gen_helper_rsqrtss, NULL },
2688 [0x53] = { gen_helper_rcpps, NULL, gen_helper_rcpss, NULL },
2689 [0x54] = { gen_helper_pand_xmm, gen_helper_pand_xmm }, /* andps, andpd */
2690 [0x55] = { gen_helper_pandn_xmm, gen_helper_pandn_xmm }, /* andnps, andnpd */
2691 [0x56] = { gen_helper_por_xmm, gen_helper_por_xmm }, /* orps, orpd */
2692 [0x57] = { gen_helper_pxor_xmm, gen_helper_pxor_xmm }, /* xorps, xorpd */
2693 [0x58] = SSE_FOP(add),
2694 [0x59] = SSE_FOP(mul),
2695 [0x5a] = { gen_helper_cvtps2pd, gen_helper_cvtpd2ps,
2696 gen_helper_cvtss2sd, gen_helper_cvtsd2ss },
2697 [0x5b] = { gen_helper_cvtdq2ps, gen_helper_cvtps2dq, gen_helper_cvttps2dq },
2698 [0x5c] = SSE_FOP(sub),
2699 [0x5d] = SSE_FOP(min),
2700 [0x5e] = SSE_FOP(div),
2701 [0x5f] = SSE_FOP(max),
2703 [0xc2] = SSE_FOP(cmpeq),
2704 [0xc6] = { (SSEFunc_0_epp)gen_helper_shufps,
2705 (SSEFunc_0_epp)gen_helper_shufpd }, /* XXX: casts */
2707 /* SSSE3, SSE4, MOVBE, CRC32, BMI1, BMI2, ADX. */
2708 [0x38] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2709 [0x3a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2711 /* MMX ops and their SSE extensions */
2712 [0x60] = MMX_OP2(punpcklbw),
2713 [0x61] = MMX_OP2(punpcklwd),
2714 [0x62] = MMX_OP2(punpckldq),
2715 [0x63] = MMX_OP2(packsswb),
2716 [0x64] = MMX_OP2(pcmpgtb),
2717 [0x65] = MMX_OP2(pcmpgtw),
2718 [0x66] = MMX_OP2(pcmpgtl),
2719 [0x67] = MMX_OP2(packuswb),
2720 [0x68] = MMX_OP2(punpckhbw),
2721 [0x69] = MMX_OP2(punpckhwd),
2722 [0x6a] = MMX_OP2(punpckhdq),
2723 [0x6b] = MMX_OP2(packssdw),
2724 [0x6c] = { NULL, gen_helper_punpcklqdq_xmm },
2725 [0x6d] = { NULL, gen_helper_punpckhqdq_xmm },
2726 [0x6e] = { SSE_SPECIAL, SSE_SPECIAL }, /* movd mm, ea */
2727 [0x6f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, , movqdu */
2728 [0x70] = { (SSEFunc_0_epp)gen_helper_pshufw_mmx,
2729 (SSEFunc_0_epp)gen_helper_pshufd_xmm,
2730 (SSEFunc_0_epp)gen_helper_pshufhw_xmm,
2731 (SSEFunc_0_epp)gen_helper_pshuflw_xmm }, /* XXX: casts */
2732 [0x71] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftw */
2733 [0x72] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftd */
2734 [0x73] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftq */
2735 [0x74] = MMX_OP2(pcmpeqb),
2736 [0x75] = MMX_OP2(pcmpeqw),
2737 [0x76] = MMX_OP2(pcmpeql),
2738 [0x77] = { SSE_DUMMY }, /* emms */
2739 [0x78] = { NULL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* extrq_i, insertq_i */
2740 [0x79] = { NULL, gen_helper_extrq_r, NULL, gen_helper_insertq_r },
2741 [0x7c] = { NULL, gen_helper_haddpd, NULL, gen_helper_haddps },
2742 [0x7d] = { NULL, gen_helper_hsubpd, NULL, gen_helper_hsubps },
2743 [0x7e] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movd, movd, , movq */
2744 [0x7f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, movdqu */
2745 [0xc4] = { SSE_SPECIAL, SSE_SPECIAL }, /* pinsrw */
2746 [0xc5] = { SSE_SPECIAL, SSE_SPECIAL }, /* pextrw */
2747 [0xd0] = { NULL, gen_helper_addsubpd, NULL, gen_helper_addsubps },
2748 [0xd1] = MMX_OP2(psrlw),
2749 [0xd2] = MMX_OP2(psrld),
2750 [0xd3] = MMX_OP2(psrlq),
2751 [0xd4] = MMX_OP2(paddq),
2752 [0xd5] = MMX_OP2(pmullw),
2753 [0xd6] = { NULL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2754 [0xd7] = { SSE_SPECIAL, SSE_SPECIAL }, /* pmovmskb */
2755 [0xd8] = MMX_OP2(psubusb),
2756 [0xd9] = MMX_OP2(psubusw),
2757 [0xda] = MMX_OP2(pminub),
2758 [0xdb] = MMX_OP2(pand),
2759 [0xdc] = MMX_OP2(paddusb),
2760 [0xdd] = MMX_OP2(paddusw),
2761 [0xde] = MMX_OP2(pmaxub),
2762 [0xdf] = MMX_OP2(pandn),
2763 [0xe0] = MMX_OP2(pavgb),
2764 [0xe1] = MMX_OP2(psraw),
2765 [0xe2] = MMX_OP2(psrad),
2766 [0xe3] = MMX_OP2(pavgw),
2767 [0xe4] = MMX_OP2(pmulhuw),
2768 [0xe5] = MMX_OP2(pmulhw),
2769 [0xe6] = { NULL, gen_helper_cvttpd2dq, gen_helper_cvtdq2pd, gen_helper_cvtpd2dq },
2770 [0xe7] = { SSE_SPECIAL , SSE_SPECIAL }, /* movntq, movntq */
2771 [0xe8] = MMX_OP2(psubsb),
2772 [0xe9] = MMX_OP2(psubsw),
2773 [0xea] = MMX_OP2(pminsw),
2774 [0xeb] = MMX_OP2(por),
2775 [0xec] = MMX_OP2(paddsb),
2776 [0xed] = MMX_OP2(paddsw),
2777 [0xee] = MMX_OP2(pmaxsw),
2778 [0xef] = MMX_OP2(pxor),
2779 [0xf0] = { NULL, NULL, NULL, SSE_SPECIAL }, /* lddqu */
2780 [0xf1] = MMX_OP2(psllw),
2781 [0xf2] = MMX_OP2(pslld),
2782 [0xf3] = MMX_OP2(psllq),
2783 [0xf4] = MMX_OP2(pmuludq),
2784 [0xf5] = MMX_OP2(pmaddwd),
2785 [0xf6] = MMX_OP2(psadbw),
2786 [0xf7] = { (SSEFunc_0_epp)gen_helper_maskmov_mmx,
2787 (SSEFunc_0_epp)gen_helper_maskmov_xmm }, /* XXX: casts */
2788 [0xf8] = MMX_OP2(psubb),
2789 [0xf9] = MMX_OP2(psubw),
2790 [0xfa] = MMX_OP2(psubl),
2791 [0xfb] = MMX_OP2(psubq),
2792 [0xfc] = MMX_OP2(paddb),
2793 [0xfd] = MMX_OP2(paddw),
2794 [0xfe] = MMX_OP2(paddl),
2797 static const SSEFunc_0_epp sse_op_table2[3 * 8][2] = {
2798 [0 + 2] = MMX_OP2(psrlw),
2799 [0 + 4] = MMX_OP2(psraw),
2800 [0 + 6] = MMX_OP2(psllw),
2801 [8 + 2] = MMX_OP2(psrld),
2802 [8 + 4] = MMX_OP2(psrad),
2803 [8 + 6] = MMX_OP2(pslld),
2804 [16 + 2] = MMX_OP2(psrlq),
2805 [16 + 3] = { NULL, gen_helper_psrldq_xmm },
2806 [16 + 6] = MMX_OP2(psllq),
2807 [16 + 7] = { NULL, gen_helper_pslldq_xmm },
2810 static const SSEFunc_0_epi sse_op_table3ai[] = {
2811 gen_helper_cvtsi2ss,
2812 gen_helper_cvtsi2sd
2815 #ifdef TARGET_X86_64
2816 static const SSEFunc_0_epl sse_op_table3aq[] = {
2817 gen_helper_cvtsq2ss,
2818 gen_helper_cvtsq2sd
2820 #endif
2822 static const SSEFunc_i_ep sse_op_table3bi[] = {
2823 gen_helper_cvttss2si,
2824 gen_helper_cvtss2si,
2825 gen_helper_cvttsd2si,
2826 gen_helper_cvtsd2si
2829 #ifdef TARGET_X86_64
2830 static const SSEFunc_l_ep sse_op_table3bq[] = {
2831 gen_helper_cvttss2sq,
2832 gen_helper_cvtss2sq,
2833 gen_helper_cvttsd2sq,
2834 gen_helper_cvtsd2sq
2836 #endif
2838 static const SSEFunc_0_epp sse_op_table4[8][4] = {
2839 SSE_FOP(cmpeq),
2840 SSE_FOP(cmplt),
2841 SSE_FOP(cmple),
2842 SSE_FOP(cmpunord),
2843 SSE_FOP(cmpneq),
2844 SSE_FOP(cmpnlt),
2845 SSE_FOP(cmpnle),
2846 SSE_FOP(cmpord),
2849 static const SSEFunc_0_epp sse_op_table5[256] = {
2850 [0x0c] = gen_helper_pi2fw,
2851 [0x0d] = gen_helper_pi2fd,
2852 [0x1c] = gen_helper_pf2iw,
2853 [0x1d] = gen_helper_pf2id,
2854 [0x8a] = gen_helper_pfnacc,
2855 [0x8e] = gen_helper_pfpnacc,
2856 [0x90] = gen_helper_pfcmpge,
2857 [0x94] = gen_helper_pfmin,
2858 [0x96] = gen_helper_pfrcp,
2859 [0x97] = gen_helper_pfrsqrt,
2860 [0x9a] = gen_helper_pfsub,
2861 [0x9e] = gen_helper_pfadd,
2862 [0xa0] = gen_helper_pfcmpgt,
2863 [0xa4] = gen_helper_pfmax,
2864 [0xa6] = gen_helper_movq, /* pfrcpit1; no need to actually increase precision */
2865 [0xa7] = gen_helper_movq, /* pfrsqit1 */
2866 [0xaa] = gen_helper_pfsubr,
2867 [0xae] = gen_helper_pfacc,
2868 [0xb0] = gen_helper_pfcmpeq,
2869 [0xb4] = gen_helper_pfmul,
2870 [0xb6] = gen_helper_movq, /* pfrcpit2 */
2871 [0xb7] = gen_helper_pmulhrw_mmx,
2872 [0xbb] = gen_helper_pswapd,
2873 [0xbf] = gen_helper_pavgb_mmx /* pavgusb */
2876 struct SSEOpHelper_epp {
2877 SSEFunc_0_epp op[2];
2878 uint32_t ext_mask;
2881 struct SSEOpHelper_eppi {
2882 SSEFunc_0_eppi op[2];
2883 uint32_t ext_mask;
2886 #define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
2887 #define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
2888 #define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
2889 #define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
2890 #define PCLMULQDQ_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, \
2891 CPUID_EXT_PCLMULQDQ }
2892 #define AESNI_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_AES }
2894 static const struct SSEOpHelper_epp sse_op_table6[256] = {
2895 [0x00] = SSSE3_OP(pshufb),
2896 [0x01] = SSSE3_OP(phaddw),
2897 [0x02] = SSSE3_OP(phaddd),
2898 [0x03] = SSSE3_OP(phaddsw),
2899 [0x04] = SSSE3_OP(pmaddubsw),
2900 [0x05] = SSSE3_OP(phsubw),
2901 [0x06] = SSSE3_OP(phsubd),
2902 [0x07] = SSSE3_OP(phsubsw),
2903 [0x08] = SSSE3_OP(psignb),
2904 [0x09] = SSSE3_OP(psignw),
2905 [0x0a] = SSSE3_OP(psignd),
2906 [0x0b] = SSSE3_OP(pmulhrsw),
2907 [0x10] = SSE41_OP(pblendvb),
2908 [0x14] = SSE41_OP(blendvps),
2909 [0x15] = SSE41_OP(blendvpd),
2910 [0x17] = SSE41_OP(ptest),
2911 [0x1c] = SSSE3_OP(pabsb),
2912 [0x1d] = SSSE3_OP(pabsw),
2913 [0x1e] = SSSE3_OP(pabsd),
2914 [0x20] = SSE41_OP(pmovsxbw),
2915 [0x21] = SSE41_OP(pmovsxbd),
2916 [0x22] = SSE41_OP(pmovsxbq),
2917 [0x23] = SSE41_OP(pmovsxwd),
2918 [0x24] = SSE41_OP(pmovsxwq),
2919 [0x25] = SSE41_OP(pmovsxdq),
2920 [0x28] = SSE41_OP(pmuldq),
2921 [0x29] = SSE41_OP(pcmpeqq),
2922 [0x2a] = SSE41_SPECIAL, /* movntqda */
2923 [0x2b] = SSE41_OP(packusdw),
2924 [0x30] = SSE41_OP(pmovzxbw),
2925 [0x31] = SSE41_OP(pmovzxbd),
2926 [0x32] = SSE41_OP(pmovzxbq),
2927 [0x33] = SSE41_OP(pmovzxwd),
2928 [0x34] = SSE41_OP(pmovzxwq),
2929 [0x35] = SSE41_OP(pmovzxdq),
2930 [0x37] = SSE42_OP(pcmpgtq),
2931 [0x38] = SSE41_OP(pminsb),
2932 [0x39] = SSE41_OP(pminsd),
2933 [0x3a] = SSE41_OP(pminuw),
2934 [0x3b] = SSE41_OP(pminud),
2935 [0x3c] = SSE41_OP(pmaxsb),
2936 [0x3d] = SSE41_OP(pmaxsd),
2937 [0x3e] = SSE41_OP(pmaxuw),
2938 [0x3f] = SSE41_OP(pmaxud),
2939 [0x40] = SSE41_OP(pmulld),
2940 [0x41] = SSE41_OP(phminposuw),
2941 [0xdb] = AESNI_OP(aesimc),
2942 [0xdc] = AESNI_OP(aesenc),
2943 [0xdd] = AESNI_OP(aesenclast),
2944 [0xde] = AESNI_OP(aesdec),
2945 [0xdf] = AESNI_OP(aesdeclast),
2948 static const struct SSEOpHelper_eppi sse_op_table7[256] = {
2949 [0x08] = SSE41_OP(roundps),
2950 [0x09] = SSE41_OP(roundpd),
2951 [0x0a] = SSE41_OP(roundss),
2952 [0x0b] = SSE41_OP(roundsd),
2953 [0x0c] = SSE41_OP(blendps),
2954 [0x0d] = SSE41_OP(blendpd),
2955 [0x0e] = SSE41_OP(pblendw),
2956 [0x0f] = SSSE3_OP(palignr),
2957 [0x14] = SSE41_SPECIAL, /* pextrb */
2958 [0x15] = SSE41_SPECIAL, /* pextrw */
2959 [0x16] = SSE41_SPECIAL, /* pextrd/pextrq */
2960 [0x17] = SSE41_SPECIAL, /* extractps */
2961 [0x20] = SSE41_SPECIAL, /* pinsrb */
2962 [0x21] = SSE41_SPECIAL, /* insertps */
2963 [0x22] = SSE41_SPECIAL, /* pinsrd/pinsrq */
2964 [0x40] = SSE41_OP(dpps),
2965 [0x41] = SSE41_OP(dppd),
2966 [0x42] = SSE41_OP(mpsadbw),
2967 [0x44] = PCLMULQDQ_OP(pclmulqdq),
2968 [0x60] = SSE42_OP(pcmpestrm),
2969 [0x61] = SSE42_OP(pcmpestri),
2970 [0x62] = SSE42_OP(pcmpistrm),
2971 [0x63] = SSE42_OP(pcmpistri),
2972 [0xdf] = AESNI_OP(aeskeygenassist),
2975 static void gen_sse(CPUX86State *env, DisasContext *s, int b,
2976 target_ulong pc_start, int rex_r)
2978 int b1, op1_offset, op2_offset, is_xmm, val;
2979 int modrm, mod, rm, reg;
2980 SSEFunc_0_epp sse_fn_epp;
2981 SSEFunc_0_eppi sse_fn_eppi;
2982 SSEFunc_0_ppi sse_fn_ppi;
2983 SSEFunc_0_eppt sse_fn_eppt;
2984 TCGMemOp ot;
2986 b &= 0xff;
2987 if (s->prefix & PREFIX_DATA)
2988 b1 = 1;
2989 else if (s->prefix & PREFIX_REPZ)
2990 b1 = 2;
2991 else if (s->prefix & PREFIX_REPNZ)
2992 b1 = 3;
2993 else
2994 b1 = 0;
2995 sse_fn_epp = sse_op_table1[b][b1];
2996 if (!sse_fn_epp) {
2997 goto illegal_op;
2999 if ((b <= 0x5f && b >= 0x10) || b == 0xc6 || b == 0xc2) {
3000 is_xmm = 1;
3001 } else {
3002 if (b1 == 0) {
3003 /* MMX case */
3004 is_xmm = 0;
3005 } else {
3006 is_xmm = 1;
3009 /* simple MMX/SSE operation */
3010 if (s->flags & HF_TS_MASK) {
3011 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
3012 return;
3014 if (s->flags & HF_EM_MASK) {
3015 illegal_op:
3016 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
3017 return;
3019 if (is_xmm && !(s->flags & HF_OSFXSR_MASK))
3020 if ((b != 0x38 && b != 0x3a) || (s->prefix & PREFIX_DATA))
3021 goto illegal_op;
3022 if (b == 0x0e) {
3023 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
3024 goto illegal_op;
3025 /* femms */
3026 gen_helper_emms(cpu_env);
3027 return;
3029 if (b == 0x77) {
3030 /* emms */
3031 gen_helper_emms(cpu_env);
3032 return;
3034 /* prepare MMX state (XXX: optimize by storing fptt and fptags in
3035 the static cpu state) */
3036 if (!is_xmm) {
3037 gen_helper_enter_mmx(cpu_env);
3040 modrm = cpu_ldub_code(env, s->pc++);
3041 reg = ((modrm >> 3) & 7);
3042 if (is_xmm)
3043 reg |= rex_r;
3044 mod = (modrm >> 6) & 3;
3045 if (sse_fn_epp == SSE_SPECIAL) {
3046 b |= (b1 << 8);
3047 switch(b) {
3048 case 0x0e7: /* movntq */
3049 if (mod == 3)
3050 goto illegal_op;
3051 gen_lea_modrm(env, s, modrm);
3052 gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
3053 break;
3054 case 0x1e7: /* movntdq */
3055 case 0x02b: /* movntps */
3056 case 0x12b: /* movntps */
3057 if (mod == 3)
3058 goto illegal_op;
3059 gen_lea_modrm(env, s, modrm);
3060 gen_sto_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3061 break;
3062 case 0x3f0: /* lddqu */
3063 if (mod == 3)
3064 goto illegal_op;
3065 gen_lea_modrm(env, s, modrm);
3066 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3067 break;
3068 case 0x22b: /* movntss */
3069 case 0x32b: /* movntsd */
3070 if (mod == 3)
3071 goto illegal_op;
3072 gen_lea_modrm(env, s, modrm);
3073 if (b1 & 1) {
3074 gen_stq_env_A0(s, offsetof(CPUX86State,
3075 xmm_regs[reg].XMM_Q(0)));
3076 } else {
3077 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3078 xmm_regs[reg].XMM_L(0)));
3079 gen_op_st_v(s, MO_32, cpu_T[0], cpu_A0);
3081 break;
3082 case 0x6e: /* movd mm, ea */
3083 #ifdef TARGET_X86_64
3084 if (s->dflag == MO_64) {
3085 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0);
3086 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,fpregs[reg].mmx));
3087 } else
3088 #endif
3090 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0);
3091 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3092 offsetof(CPUX86State,fpregs[reg].mmx));
3093 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3094 gen_helper_movl_mm_T0_mmx(cpu_ptr0, cpu_tmp2_i32);
3096 break;
3097 case 0x16e: /* movd xmm, ea */
3098 #ifdef TARGET_X86_64
3099 if (s->dflag == MO_64) {
3100 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0);
3101 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3102 offsetof(CPUX86State,xmm_regs[reg]));
3103 gen_helper_movq_mm_T0_xmm(cpu_ptr0, cpu_T[0]);
3104 } else
3105 #endif
3107 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0);
3108 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3109 offsetof(CPUX86State,xmm_regs[reg]));
3110 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3111 gen_helper_movl_mm_T0_xmm(cpu_ptr0, cpu_tmp2_i32);
3113 break;
3114 case 0x6f: /* movq mm, ea */
3115 if (mod != 3) {
3116 gen_lea_modrm(env, s, modrm);
3117 gen_ldq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
3118 } else {
3119 rm = (modrm & 7);
3120 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
3121 offsetof(CPUX86State,fpregs[rm].mmx));
3122 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
3123 offsetof(CPUX86State,fpregs[reg].mmx));
3125 break;
3126 case 0x010: /* movups */
3127 case 0x110: /* movupd */
3128 case 0x028: /* movaps */
3129 case 0x128: /* movapd */
3130 case 0x16f: /* movdqa xmm, ea */
3131 case 0x26f: /* movdqu xmm, ea */
3132 if (mod != 3) {
3133 gen_lea_modrm(env, s, modrm);
3134 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3135 } else {
3136 rm = (modrm & 7) | REX_B(s);
3137 gen_op_movo(offsetof(CPUX86State,xmm_regs[reg]),
3138 offsetof(CPUX86State,xmm_regs[rm]));
3140 break;
3141 case 0x210: /* movss xmm, ea */
3142 if (mod != 3) {
3143 gen_lea_modrm(env, s, modrm);
3144 gen_op_ld_v(s, MO_32, cpu_T[0], cpu_A0);
3145 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3146 tcg_gen_movi_tl(cpu_T[0], 0);
3147 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3148 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3149 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3150 } else {
3151 rm = (modrm & 7) | REX_B(s);
3152 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3153 offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3155 break;
3156 case 0x310: /* movsd xmm, ea */
3157 if (mod != 3) {
3158 gen_lea_modrm(env, s, modrm);
3159 gen_ldq_env_A0(s, offsetof(CPUX86State,
3160 xmm_regs[reg].XMM_Q(0)));
3161 tcg_gen_movi_tl(cpu_T[0], 0);
3162 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3163 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3164 } else {
3165 rm = (modrm & 7) | REX_B(s);
3166 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3167 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3169 break;
3170 case 0x012: /* movlps */
3171 case 0x112: /* movlpd */
3172 if (mod != 3) {
3173 gen_lea_modrm(env, s, modrm);
3174 gen_ldq_env_A0(s, offsetof(CPUX86State,
3175 xmm_regs[reg].XMM_Q(0)));
3176 } else {
3177 /* movhlps */
3178 rm = (modrm & 7) | REX_B(s);
3179 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3180 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3182 break;
3183 case 0x212: /* movsldup */
3184 if (mod != 3) {
3185 gen_lea_modrm(env, s, modrm);
3186 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3187 } else {
3188 rm = (modrm & 7) | REX_B(s);
3189 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3190 offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3191 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3192 offsetof(CPUX86State,xmm_regs[rm].XMM_L(2)));
3194 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3195 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3196 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3197 offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3198 break;
3199 case 0x312: /* movddup */
3200 if (mod != 3) {
3201 gen_lea_modrm(env, s, modrm);
3202 gen_ldq_env_A0(s, offsetof(CPUX86State,
3203 xmm_regs[reg].XMM_Q(0)));
3204 } else {
3205 rm = (modrm & 7) | REX_B(s);
3206 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3207 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3209 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
3210 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3211 break;
3212 case 0x016: /* movhps */
3213 case 0x116: /* movhpd */
3214 if (mod != 3) {
3215 gen_lea_modrm(env, s, modrm);
3216 gen_ldq_env_A0(s, offsetof(CPUX86State,
3217 xmm_regs[reg].XMM_Q(1)));
3218 } else {
3219 /* movlhps */
3220 rm = (modrm & 7) | REX_B(s);
3221 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
3222 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3224 break;
3225 case 0x216: /* movshdup */
3226 if (mod != 3) {
3227 gen_lea_modrm(env, s, modrm);
3228 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3229 } else {
3230 rm = (modrm & 7) | REX_B(s);
3231 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3232 offsetof(CPUX86State,xmm_regs[rm].XMM_L(1)));
3233 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3234 offsetof(CPUX86State,xmm_regs[rm].XMM_L(3)));
3236 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3237 offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3238 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3239 offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3240 break;
3241 case 0x178:
3242 case 0x378:
3244 int bit_index, field_length;
3246 if (b1 == 1 && reg != 0)
3247 goto illegal_op;
3248 field_length = cpu_ldub_code(env, s->pc++) & 0x3F;
3249 bit_index = cpu_ldub_code(env, s->pc++) & 0x3F;
3250 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3251 offsetof(CPUX86State,xmm_regs[reg]));
3252 if (b1 == 1)
3253 gen_helper_extrq_i(cpu_env, cpu_ptr0,
3254 tcg_const_i32(bit_index),
3255 tcg_const_i32(field_length));
3256 else
3257 gen_helper_insertq_i(cpu_env, cpu_ptr0,
3258 tcg_const_i32(bit_index),
3259 tcg_const_i32(field_length));
3261 break;
3262 case 0x7e: /* movd ea, mm */
3263 #ifdef TARGET_X86_64
3264 if (s->dflag == MO_64) {
3265 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3266 offsetof(CPUX86State,fpregs[reg].mmx));
3267 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1);
3268 } else
3269 #endif
3271 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3272 offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0)));
3273 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1);
3275 break;
3276 case 0x17e: /* movd ea, xmm */
3277 #ifdef TARGET_X86_64
3278 if (s->dflag == MO_64) {
3279 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3280 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3281 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1);
3282 } else
3283 #endif
3285 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3286 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3287 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1);
3289 break;
3290 case 0x27e: /* movq xmm, ea */
3291 if (mod != 3) {
3292 gen_lea_modrm(env, s, modrm);
3293 gen_ldq_env_A0(s, offsetof(CPUX86State,
3294 xmm_regs[reg].XMM_Q(0)));
3295 } else {
3296 rm = (modrm & 7) | REX_B(s);
3297 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3298 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3300 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3301 break;
3302 case 0x7f: /* movq ea, mm */
3303 if (mod != 3) {
3304 gen_lea_modrm(env, s, modrm);
3305 gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
3306 } else {
3307 rm = (modrm & 7);
3308 gen_op_movq(offsetof(CPUX86State,fpregs[rm].mmx),
3309 offsetof(CPUX86State,fpregs[reg].mmx));
3311 break;
3312 case 0x011: /* movups */
3313 case 0x111: /* movupd */
3314 case 0x029: /* movaps */
3315 case 0x129: /* movapd */
3316 case 0x17f: /* movdqa ea, xmm */
3317 case 0x27f: /* movdqu ea, xmm */
3318 if (mod != 3) {
3319 gen_lea_modrm(env, s, modrm);
3320 gen_sto_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3321 } else {
3322 rm = (modrm & 7) | REX_B(s);
3323 gen_op_movo(offsetof(CPUX86State,xmm_regs[rm]),
3324 offsetof(CPUX86State,xmm_regs[reg]));
3326 break;
3327 case 0x211: /* movss ea, xmm */
3328 if (mod != 3) {
3329 gen_lea_modrm(env, s, modrm);
3330 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3331 gen_op_st_v(s, MO_32, cpu_T[0], cpu_A0);
3332 } else {
3333 rm = (modrm & 7) | REX_B(s);
3334 gen_op_movl(offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)),
3335 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3337 break;
3338 case 0x311: /* movsd ea, xmm */
3339 if (mod != 3) {
3340 gen_lea_modrm(env, s, modrm);
3341 gen_stq_env_A0(s, offsetof(CPUX86State,
3342 xmm_regs[reg].XMM_Q(0)));
3343 } else {
3344 rm = (modrm & 7) | REX_B(s);
3345 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3346 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3348 break;
3349 case 0x013: /* movlps */
3350 case 0x113: /* movlpd */
3351 if (mod != 3) {
3352 gen_lea_modrm(env, s, modrm);
3353 gen_stq_env_A0(s, offsetof(CPUX86State,
3354 xmm_regs[reg].XMM_Q(0)));
3355 } else {
3356 goto illegal_op;
3358 break;
3359 case 0x017: /* movhps */
3360 case 0x117: /* movhpd */
3361 if (mod != 3) {
3362 gen_lea_modrm(env, s, modrm);
3363 gen_stq_env_A0(s, offsetof(CPUX86State,
3364 xmm_regs[reg].XMM_Q(1)));
3365 } else {
3366 goto illegal_op;
3368 break;
3369 case 0x71: /* shift mm, im */
3370 case 0x72:
3371 case 0x73:
3372 case 0x171: /* shift xmm, im */
3373 case 0x172:
3374 case 0x173:
3375 if (b1 >= 2) {
3376 goto illegal_op;
3378 val = cpu_ldub_code(env, s->pc++);
3379 if (is_xmm) {
3380 tcg_gen_movi_tl(cpu_T[0], val);
3381 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
3382 tcg_gen_movi_tl(cpu_T[0], 0);
3383 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(1)));
3384 op1_offset = offsetof(CPUX86State,xmm_t0);
3385 } else {
3386 tcg_gen_movi_tl(cpu_T[0], val);
3387 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(0)));
3388 tcg_gen_movi_tl(cpu_T[0], 0);
3389 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(1)));
3390 op1_offset = offsetof(CPUX86State,mmx_t0);
3392 sse_fn_epp = sse_op_table2[((b - 1) & 3) * 8 +
3393 (((modrm >> 3)) & 7)][b1];
3394 if (!sse_fn_epp) {
3395 goto illegal_op;
3397 if (is_xmm) {
3398 rm = (modrm & 7) | REX_B(s);
3399 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3400 } else {
3401 rm = (modrm & 7);
3402 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3404 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3405 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op1_offset);
3406 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
3407 break;
3408 case 0x050: /* movmskps */
3409 rm = (modrm & 7) | REX_B(s);
3410 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3411 offsetof(CPUX86State,xmm_regs[rm]));
3412 gen_helper_movmskps(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3413 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
3414 break;
3415 case 0x150: /* movmskpd */
3416 rm = (modrm & 7) | REX_B(s);
3417 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3418 offsetof(CPUX86State,xmm_regs[rm]));
3419 gen_helper_movmskpd(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3420 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
3421 break;
3422 case 0x02a: /* cvtpi2ps */
3423 case 0x12a: /* cvtpi2pd */
3424 gen_helper_enter_mmx(cpu_env);
3425 if (mod != 3) {
3426 gen_lea_modrm(env, s, modrm);
3427 op2_offset = offsetof(CPUX86State,mmx_t0);
3428 gen_ldq_env_A0(s, op2_offset);
3429 } else {
3430 rm = (modrm & 7);
3431 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3433 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3434 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3435 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3436 switch(b >> 8) {
3437 case 0x0:
3438 gen_helper_cvtpi2ps(cpu_env, cpu_ptr0, cpu_ptr1);
3439 break;
3440 default:
3441 case 0x1:
3442 gen_helper_cvtpi2pd(cpu_env, cpu_ptr0, cpu_ptr1);
3443 break;
3445 break;
3446 case 0x22a: /* cvtsi2ss */
3447 case 0x32a: /* cvtsi2sd */
3448 ot = mo_64_32(s->dflag);
3449 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3450 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3451 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3452 if (ot == MO_32) {
3453 SSEFunc_0_epi sse_fn_epi = sse_op_table3ai[(b >> 8) & 1];
3454 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3455 sse_fn_epi(cpu_env, cpu_ptr0, cpu_tmp2_i32);
3456 } else {
3457 #ifdef TARGET_X86_64
3458 SSEFunc_0_epl sse_fn_epl = sse_op_table3aq[(b >> 8) & 1];
3459 sse_fn_epl(cpu_env, cpu_ptr0, cpu_T[0]);
3460 #else
3461 goto illegal_op;
3462 #endif
3464 break;
3465 case 0x02c: /* cvttps2pi */
3466 case 0x12c: /* cvttpd2pi */
3467 case 0x02d: /* cvtps2pi */
3468 case 0x12d: /* cvtpd2pi */
3469 gen_helper_enter_mmx(cpu_env);
3470 if (mod != 3) {
3471 gen_lea_modrm(env, s, modrm);
3472 op2_offset = offsetof(CPUX86State,xmm_t0);
3473 gen_ldo_env_A0(s, op2_offset);
3474 } else {
3475 rm = (modrm & 7) | REX_B(s);
3476 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3478 op1_offset = offsetof(CPUX86State,fpregs[reg & 7].mmx);
3479 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3480 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3481 switch(b) {
3482 case 0x02c:
3483 gen_helper_cvttps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3484 break;
3485 case 0x12c:
3486 gen_helper_cvttpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3487 break;
3488 case 0x02d:
3489 gen_helper_cvtps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3490 break;
3491 case 0x12d:
3492 gen_helper_cvtpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3493 break;
3495 break;
3496 case 0x22c: /* cvttss2si */
3497 case 0x32c: /* cvttsd2si */
3498 case 0x22d: /* cvtss2si */
3499 case 0x32d: /* cvtsd2si */
3500 ot = mo_64_32(s->dflag);
3501 if (mod != 3) {
3502 gen_lea_modrm(env, s, modrm);
3503 if ((b >> 8) & 1) {
3504 gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.XMM_Q(0)));
3505 } else {
3506 gen_op_ld_v(s, MO_32, cpu_T[0], cpu_A0);
3507 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
3509 op2_offset = offsetof(CPUX86State,xmm_t0);
3510 } else {
3511 rm = (modrm & 7) | REX_B(s);
3512 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3514 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3515 if (ot == MO_32) {
3516 SSEFunc_i_ep sse_fn_i_ep =
3517 sse_op_table3bi[((b >> 7) & 2) | (b & 1)];
3518 sse_fn_i_ep(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3519 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3520 } else {
3521 #ifdef TARGET_X86_64
3522 SSEFunc_l_ep sse_fn_l_ep =
3523 sse_op_table3bq[((b >> 7) & 2) | (b & 1)];
3524 sse_fn_l_ep(cpu_T[0], cpu_env, cpu_ptr0);
3525 #else
3526 goto illegal_op;
3527 #endif
3529 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
3530 break;
3531 case 0xc4: /* pinsrw */
3532 case 0x1c4:
3533 s->rip_offset = 1;
3534 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
3535 val = cpu_ldub_code(env, s->pc++);
3536 if (b1) {
3537 val &= 7;
3538 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3539 offsetof(CPUX86State,xmm_regs[reg].XMM_W(val)));
3540 } else {
3541 val &= 3;
3542 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3543 offsetof(CPUX86State,fpregs[reg].mmx.MMX_W(val)));
3545 break;
3546 case 0xc5: /* pextrw */
3547 case 0x1c5:
3548 if (mod != 3)
3549 goto illegal_op;
3550 ot = mo_64_32(s->dflag);
3551 val = cpu_ldub_code(env, s->pc++);
3552 if (b1) {
3553 val &= 7;
3554 rm = (modrm & 7) | REX_B(s);
3555 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3556 offsetof(CPUX86State,xmm_regs[rm].XMM_W(val)));
3557 } else {
3558 val &= 3;
3559 rm = (modrm & 7);
3560 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3561 offsetof(CPUX86State,fpregs[rm].mmx.MMX_W(val)));
3563 reg = ((modrm >> 3) & 7) | rex_r;
3564 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
3565 break;
3566 case 0x1d6: /* movq ea, xmm */
3567 if (mod != 3) {
3568 gen_lea_modrm(env, s, modrm);
3569 gen_stq_env_A0(s, offsetof(CPUX86State,
3570 xmm_regs[reg].XMM_Q(0)));
3571 } else {
3572 rm = (modrm & 7) | REX_B(s);
3573 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3574 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3575 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3577 break;
3578 case 0x2d6: /* movq2dq */
3579 gen_helper_enter_mmx(cpu_env);
3580 rm = (modrm & 7);
3581 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3582 offsetof(CPUX86State,fpregs[rm].mmx));
3583 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3584 break;
3585 case 0x3d6: /* movdq2q */
3586 gen_helper_enter_mmx(cpu_env);
3587 rm = (modrm & 7) | REX_B(s);
3588 gen_op_movq(offsetof(CPUX86State,fpregs[reg & 7].mmx),
3589 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3590 break;
3591 case 0xd7: /* pmovmskb */
3592 case 0x1d7:
3593 if (mod != 3)
3594 goto illegal_op;
3595 if (b1) {
3596 rm = (modrm & 7) | REX_B(s);
3597 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[rm]));
3598 gen_helper_pmovmskb_xmm(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3599 } else {
3600 rm = (modrm & 7);
3601 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,fpregs[rm].mmx));
3602 gen_helper_pmovmskb_mmx(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3604 reg = ((modrm >> 3) & 7) | rex_r;
3605 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
3606 break;
3608 case 0x138:
3609 case 0x038:
3610 b = modrm;
3611 if ((b & 0xf0) == 0xf0) {
3612 goto do_0f_38_fx;
3614 modrm = cpu_ldub_code(env, s->pc++);
3615 rm = modrm & 7;
3616 reg = ((modrm >> 3) & 7) | rex_r;
3617 mod = (modrm >> 6) & 3;
3618 if (b1 >= 2) {
3619 goto illegal_op;
3622 sse_fn_epp = sse_op_table6[b].op[b1];
3623 if (!sse_fn_epp) {
3624 goto illegal_op;
3626 if (!(s->cpuid_ext_features & sse_op_table6[b].ext_mask))
3627 goto illegal_op;
3629 if (b1) {
3630 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3631 if (mod == 3) {
3632 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
3633 } else {
3634 op2_offset = offsetof(CPUX86State,xmm_t0);
3635 gen_lea_modrm(env, s, modrm);
3636 switch (b) {
3637 case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
3638 case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
3639 case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
3640 gen_ldq_env_A0(s, op2_offset +
3641 offsetof(XMMReg, XMM_Q(0)));
3642 break;
3643 case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
3644 case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
3645 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
3646 s->mem_index, MO_LEUL);
3647 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, op2_offset +
3648 offsetof(XMMReg, XMM_L(0)));
3649 break;
3650 case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
3651 tcg_gen_qemu_ld_tl(cpu_tmp0, cpu_A0,
3652 s->mem_index, MO_LEUW);
3653 tcg_gen_st16_tl(cpu_tmp0, cpu_env, op2_offset +
3654 offsetof(XMMReg, XMM_W(0)));
3655 break;
3656 case 0x2a: /* movntqda */
3657 gen_ldo_env_A0(s, op1_offset);
3658 return;
3659 default:
3660 gen_ldo_env_A0(s, op2_offset);
3663 } else {
3664 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
3665 if (mod == 3) {
3666 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3667 } else {
3668 op2_offset = offsetof(CPUX86State,mmx_t0);
3669 gen_lea_modrm(env, s, modrm);
3670 gen_ldq_env_A0(s, op2_offset);
3673 if (sse_fn_epp == SSE_SPECIAL) {
3674 goto illegal_op;
3677 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3678 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3679 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
3681 if (b == 0x17) {
3682 set_cc_op(s, CC_OP_EFLAGS);
3684 break;
3686 case 0x238:
3687 case 0x338:
3688 do_0f_38_fx:
3689 /* Various integer extensions at 0f 38 f[0-f]. */
3690 b = modrm | (b1 << 8);
3691 modrm = cpu_ldub_code(env, s->pc++);
3692 reg = ((modrm >> 3) & 7) | rex_r;
3694 switch (b) {
3695 case 0x3f0: /* crc32 Gd,Eb */
3696 case 0x3f1: /* crc32 Gd,Ey */
3697 do_crc32:
3698 if (!(s->cpuid_ext_features & CPUID_EXT_SSE42)) {
3699 goto illegal_op;
3701 if ((b & 0xff) == 0xf0) {
3702 ot = MO_8;
3703 } else if (s->dflag != MO_64) {
3704 ot = (s->prefix & PREFIX_DATA ? MO_16 : MO_32);
3705 } else {
3706 ot = MO_64;
3709 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[reg]);
3710 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3711 gen_helper_crc32(cpu_T[0], cpu_tmp2_i32,
3712 cpu_T[0], tcg_const_i32(8 << ot));
3714 ot = mo_64_32(s->dflag);
3715 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
3716 break;
3718 case 0x1f0: /* crc32 or movbe */
3719 case 0x1f1:
3720 /* For these insns, the f3 prefix is supposed to have priority
3721 over the 66 prefix, but that's not what we implement above
3722 setting b1. */
3723 if (s->prefix & PREFIX_REPNZ) {
3724 goto do_crc32;
3726 /* FALLTHRU */
3727 case 0x0f0: /* movbe Gy,My */
3728 case 0x0f1: /* movbe My,Gy */
3729 if (!(s->cpuid_ext_features & CPUID_EXT_MOVBE)) {
3730 goto illegal_op;
3732 if (s->dflag != MO_64) {
3733 ot = (s->prefix & PREFIX_DATA ? MO_16 : MO_32);
3734 } else {
3735 ot = MO_64;
3738 gen_lea_modrm(env, s, modrm);
3739 if ((b & 1) == 0) {
3740 tcg_gen_qemu_ld_tl(cpu_T[0], cpu_A0,
3741 s->mem_index, ot | MO_BE);
3742 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
3743 } else {
3744 tcg_gen_qemu_st_tl(cpu_regs[reg], cpu_A0,
3745 s->mem_index, ot | MO_BE);
3747 break;
3749 case 0x0f2: /* andn Gy, By, Ey */
3750 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
3751 || !(s->prefix & PREFIX_VEX)
3752 || s->vex_l != 0) {
3753 goto illegal_op;
3755 ot = mo_64_32(s->dflag);
3756 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3757 tcg_gen_andc_tl(cpu_T[0], cpu_regs[s->vex_v], cpu_T[0]);
3758 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
3759 gen_op_update1_cc();
3760 set_cc_op(s, CC_OP_LOGICB + ot);
3761 break;
3763 case 0x0f7: /* bextr Gy, Ey, By */
3764 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
3765 || !(s->prefix & PREFIX_VEX)
3766 || s->vex_l != 0) {
3767 goto illegal_op;
3769 ot = mo_64_32(s->dflag);
3771 TCGv bound, zero;
3773 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3774 /* Extract START, and shift the operand.
3775 Shifts larger than operand size get zeros. */
3776 tcg_gen_ext8u_tl(cpu_A0, cpu_regs[s->vex_v]);
3777 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_A0);
3779 bound = tcg_const_tl(ot == MO_64 ? 63 : 31);
3780 zero = tcg_const_tl(0);
3781 tcg_gen_movcond_tl(TCG_COND_LEU, cpu_T[0], cpu_A0, bound,
3782 cpu_T[0], zero);
3783 tcg_temp_free(zero);
3785 /* Extract the LEN into a mask. Lengths larger than
3786 operand size get all ones. */
3787 tcg_gen_shri_tl(cpu_A0, cpu_regs[s->vex_v], 8);
3788 tcg_gen_ext8u_tl(cpu_A0, cpu_A0);
3789 tcg_gen_movcond_tl(TCG_COND_LEU, cpu_A0, cpu_A0, bound,
3790 cpu_A0, bound);
3791 tcg_temp_free(bound);
3792 tcg_gen_movi_tl(cpu_T[1], 1);
3793 tcg_gen_shl_tl(cpu_T[1], cpu_T[1], cpu_A0);
3794 tcg_gen_subi_tl(cpu_T[1], cpu_T[1], 1);
3795 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
3797 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
3798 gen_op_update1_cc();
3799 set_cc_op(s, CC_OP_LOGICB + ot);
3801 break;
3803 case 0x0f5: /* bzhi Gy, Ey, By */
3804 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3805 || !(s->prefix & PREFIX_VEX)
3806 || s->vex_l != 0) {
3807 goto illegal_op;
3809 ot = mo_64_32(s->dflag);
3810 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3811 tcg_gen_ext8u_tl(cpu_T[1], cpu_regs[s->vex_v]);
3813 TCGv bound = tcg_const_tl(ot == MO_64 ? 63 : 31);
3814 /* Note that since we're using BMILG (in order to get O
3815 cleared) we need to store the inverse into C. */
3816 tcg_gen_setcond_tl(TCG_COND_LT, cpu_cc_src,
3817 cpu_T[1], bound);
3818 tcg_gen_movcond_tl(TCG_COND_GT, cpu_T[1], cpu_T[1],
3819 bound, bound, cpu_T[1]);
3820 tcg_temp_free(bound);
3822 tcg_gen_movi_tl(cpu_A0, -1);
3823 tcg_gen_shl_tl(cpu_A0, cpu_A0, cpu_T[1]);
3824 tcg_gen_andc_tl(cpu_T[0], cpu_T[0], cpu_A0);
3825 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
3826 gen_op_update1_cc();
3827 set_cc_op(s, CC_OP_BMILGB + ot);
3828 break;
3830 case 0x3f6: /* mulx By, Gy, rdx, Ey */
3831 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3832 || !(s->prefix & PREFIX_VEX)
3833 || s->vex_l != 0) {
3834 goto illegal_op;
3836 ot = mo_64_32(s->dflag);
3837 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3838 switch (ot) {
3839 default:
3840 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3841 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EDX]);
3842 tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
3843 cpu_tmp2_i32, cpu_tmp3_i32);
3844 tcg_gen_extu_i32_tl(cpu_regs[s->vex_v], cpu_tmp2_i32);
3845 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp3_i32);
3846 break;
3847 #ifdef TARGET_X86_64
3848 case MO_64:
3849 tcg_gen_mulu2_i64(cpu_regs[s->vex_v], cpu_regs[reg],
3850 cpu_T[0], cpu_regs[R_EDX]);
3851 break;
3852 #endif
3854 break;
3856 case 0x3f5: /* pdep Gy, By, Ey */
3857 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3858 || !(s->prefix & PREFIX_VEX)
3859 || s->vex_l != 0) {
3860 goto illegal_op;
3862 ot = mo_64_32(s->dflag);
3863 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3864 /* Note that by zero-extending the mask operand, we
3865 automatically handle zero-extending the result. */
3866 if (ot == MO_64) {
3867 tcg_gen_mov_tl(cpu_T[1], cpu_regs[s->vex_v]);
3868 } else {
3869 tcg_gen_ext32u_tl(cpu_T[1], cpu_regs[s->vex_v]);
3871 gen_helper_pdep(cpu_regs[reg], cpu_T[0], cpu_T[1]);
3872 break;
3874 case 0x2f5: /* pext Gy, By, Ey */
3875 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3876 || !(s->prefix & PREFIX_VEX)
3877 || s->vex_l != 0) {
3878 goto illegal_op;
3880 ot = mo_64_32(s->dflag);
3881 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3882 /* Note that by zero-extending the mask operand, we
3883 automatically handle zero-extending the result. */
3884 if (ot == MO_64) {
3885 tcg_gen_mov_tl(cpu_T[1], cpu_regs[s->vex_v]);
3886 } else {
3887 tcg_gen_ext32u_tl(cpu_T[1], cpu_regs[s->vex_v]);
3889 gen_helper_pext(cpu_regs[reg], cpu_T[0], cpu_T[1]);
3890 break;
3892 case 0x1f6: /* adcx Gy, Ey */
3893 case 0x2f6: /* adox Gy, Ey */
3894 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_ADX)) {
3895 goto illegal_op;
3896 } else {
3897 TCGv carry_in, carry_out, zero;
3898 int end_op;
3900 ot = mo_64_32(s->dflag);
3901 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3903 /* Re-use the carry-out from a previous round. */
3904 TCGV_UNUSED(carry_in);
3905 carry_out = (b == 0x1f6 ? cpu_cc_dst : cpu_cc_src2);
3906 switch (s->cc_op) {
3907 case CC_OP_ADCX:
3908 if (b == 0x1f6) {
3909 carry_in = cpu_cc_dst;
3910 end_op = CC_OP_ADCX;
3911 } else {
3912 end_op = CC_OP_ADCOX;
3914 break;
3915 case CC_OP_ADOX:
3916 if (b == 0x1f6) {
3917 end_op = CC_OP_ADCOX;
3918 } else {
3919 carry_in = cpu_cc_src2;
3920 end_op = CC_OP_ADOX;
3922 break;
3923 case CC_OP_ADCOX:
3924 end_op = CC_OP_ADCOX;
3925 carry_in = carry_out;
3926 break;
3927 default:
3928 end_op = (b == 0x1f6 ? CC_OP_ADCX : CC_OP_ADOX);
3929 break;
3931 /* If we can't reuse carry-out, get it out of EFLAGS. */
3932 if (TCGV_IS_UNUSED(carry_in)) {
3933 if (s->cc_op != CC_OP_ADCX && s->cc_op != CC_OP_ADOX) {
3934 gen_compute_eflags(s);
3936 carry_in = cpu_tmp0;
3937 tcg_gen_shri_tl(carry_in, cpu_cc_src,
3938 ctz32(b == 0x1f6 ? CC_C : CC_O));
3939 tcg_gen_andi_tl(carry_in, carry_in, 1);
3942 switch (ot) {
3943 #ifdef TARGET_X86_64
3944 case MO_32:
3945 /* If we know TL is 64-bit, and we want a 32-bit
3946 result, just do everything in 64-bit arithmetic. */
3947 tcg_gen_ext32u_i64(cpu_regs[reg], cpu_regs[reg]);
3948 tcg_gen_ext32u_i64(cpu_T[0], cpu_T[0]);
3949 tcg_gen_add_i64(cpu_T[0], cpu_T[0], cpu_regs[reg]);
3950 tcg_gen_add_i64(cpu_T[0], cpu_T[0], carry_in);
3951 tcg_gen_ext32u_i64(cpu_regs[reg], cpu_T[0]);
3952 tcg_gen_shri_i64(carry_out, cpu_T[0], 32);
3953 break;
3954 #endif
3955 default:
3956 /* Otherwise compute the carry-out in two steps. */
3957 zero = tcg_const_tl(0);
3958 tcg_gen_add2_tl(cpu_T[0], carry_out,
3959 cpu_T[0], zero,
3960 carry_in, zero);
3961 tcg_gen_add2_tl(cpu_regs[reg], carry_out,
3962 cpu_regs[reg], carry_out,
3963 cpu_T[0], zero);
3964 tcg_temp_free(zero);
3965 break;
3967 set_cc_op(s, end_op);
3969 break;
3971 case 0x1f7: /* shlx Gy, Ey, By */
3972 case 0x2f7: /* sarx Gy, Ey, By */
3973 case 0x3f7: /* shrx Gy, Ey, By */
3974 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3975 || !(s->prefix & PREFIX_VEX)
3976 || s->vex_l != 0) {
3977 goto illegal_op;
3979 ot = mo_64_32(s->dflag);
3980 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3981 if (ot == MO_64) {
3982 tcg_gen_andi_tl(cpu_T[1], cpu_regs[s->vex_v], 63);
3983 } else {
3984 tcg_gen_andi_tl(cpu_T[1], cpu_regs[s->vex_v], 31);
3986 if (b == 0x1f7) {
3987 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
3988 } else if (b == 0x2f7) {
3989 if (ot != MO_64) {
3990 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
3992 tcg_gen_sar_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
3993 } else {
3994 if (ot != MO_64) {
3995 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
3997 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
3999 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
4000 break;
4002 case 0x0f3:
4003 case 0x1f3:
4004 case 0x2f3:
4005 case 0x3f3: /* Group 17 */
4006 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
4007 || !(s->prefix & PREFIX_VEX)
4008 || s->vex_l != 0) {
4009 goto illegal_op;
4011 ot = mo_64_32(s->dflag);
4012 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4014 switch (reg & 7) {
4015 case 1: /* blsr By,Ey */
4016 tcg_gen_neg_tl(cpu_T[1], cpu_T[0]);
4017 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4018 gen_op_mov_reg_v(ot, s->vex_v, cpu_T[0]);
4019 gen_op_update2_cc();
4020 set_cc_op(s, CC_OP_BMILGB + ot);
4021 break;
4023 case 2: /* blsmsk By,Ey */
4024 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4025 tcg_gen_subi_tl(cpu_T[0], cpu_T[0], 1);
4026 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_cc_src);
4027 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4028 set_cc_op(s, CC_OP_BMILGB + ot);
4029 break;
4031 case 3: /* blsi By, Ey */
4032 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4033 tcg_gen_subi_tl(cpu_T[0], cpu_T[0], 1);
4034 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_cc_src);
4035 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4036 set_cc_op(s, CC_OP_BMILGB + ot);
4037 break;
4039 default:
4040 goto illegal_op;
4042 break;
4044 default:
4045 goto illegal_op;
4047 break;
4049 case 0x03a:
4050 case 0x13a:
4051 b = modrm;
4052 modrm = cpu_ldub_code(env, s->pc++);
4053 rm = modrm & 7;
4054 reg = ((modrm >> 3) & 7) | rex_r;
4055 mod = (modrm >> 6) & 3;
4056 if (b1 >= 2) {
4057 goto illegal_op;
4060 sse_fn_eppi = sse_op_table7[b].op[b1];
4061 if (!sse_fn_eppi) {
4062 goto illegal_op;
4064 if (!(s->cpuid_ext_features & sse_op_table7[b].ext_mask))
4065 goto illegal_op;
4067 if (sse_fn_eppi == SSE_SPECIAL) {
4068 ot = mo_64_32(s->dflag);
4069 rm = (modrm & 7) | REX_B(s);
4070 if (mod != 3)
4071 gen_lea_modrm(env, s, modrm);
4072 reg = ((modrm >> 3) & 7) | rex_r;
4073 val = cpu_ldub_code(env, s->pc++);
4074 switch (b) {
4075 case 0x14: /* pextrb */
4076 tcg_gen_ld8u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4077 xmm_regs[reg].XMM_B(val & 15)));
4078 if (mod == 3) {
4079 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
4080 } else {
4081 tcg_gen_qemu_st_tl(cpu_T[0], cpu_A0,
4082 s->mem_index, MO_UB);
4084 break;
4085 case 0x15: /* pextrw */
4086 tcg_gen_ld16u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4087 xmm_regs[reg].XMM_W(val & 7)));
4088 if (mod == 3) {
4089 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
4090 } else {
4091 tcg_gen_qemu_st_tl(cpu_T[0], cpu_A0,
4092 s->mem_index, MO_LEUW);
4094 break;
4095 case 0x16:
4096 if (ot == MO_32) { /* pextrd */
4097 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
4098 offsetof(CPUX86State,
4099 xmm_regs[reg].XMM_L(val & 3)));
4100 if (mod == 3) {
4101 tcg_gen_extu_i32_tl(cpu_regs[rm], cpu_tmp2_i32);
4102 } else {
4103 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
4104 s->mem_index, MO_LEUL);
4106 } else { /* pextrq */
4107 #ifdef TARGET_X86_64
4108 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
4109 offsetof(CPUX86State,
4110 xmm_regs[reg].XMM_Q(val & 1)));
4111 if (mod == 3) {
4112 tcg_gen_mov_i64(cpu_regs[rm], cpu_tmp1_i64);
4113 } else {
4114 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
4115 s->mem_index, MO_LEQ);
4117 #else
4118 goto illegal_op;
4119 #endif
4121 break;
4122 case 0x17: /* extractps */
4123 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4124 xmm_regs[reg].XMM_L(val & 3)));
4125 if (mod == 3) {
4126 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
4127 } else {
4128 tcg_gen_qemu_st_tl(cpu_T[0], cpu_A0,
4129 s->mem_index, MO_LEUL);
4131 break;
4132 case 0x20: /* pinsrb */
4133 if (mod == 3) {
4134 gen_op_mov_v_reg(MO_32, cpu_T[0], rm);
4135 } else {
4136 tcg_gen_qemu_ld_tl(cpu_T[0], cpu_A0,
4137 s->mem_index, MO_UB);
4139 tcg_gen_st8_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4140 xmm_regs[reg].XMM_B(val & 15)));
4141 break;
4142 case 0x21: /* insertps */
4143 if (mod == 3) {
4144 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
4145 offsetof(CPUX86State,xmm_regs[rm]
4146 .XMM_L((val >> 6) & 3)));
4147 } else {
4148 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
4149 s->mem_index, MO_LEUL);
4151 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
4152 offsetof(CPUX86State,xmm_regs[reg]
4153 .XMM_L((val >> 4) & 3)));
4154 if ((val >> 0) & 1)
4155 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4156 cpu_env, offsetof(CPUX86State,
4157 xmm_regs[reg].XMM_L(0)));
4158 if ((val >> 1) & 1)
4159 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4160 cpu_env, offsetof(CPUX86State,
4161 xmm_regs[reg].XMM_L(1)));
4162 if ((val >> 2) & 1)
4163 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4164 cpu_env, offsetof(CPUX86State,
4165 xmm_regs[reg].XMM_L(2)));
4166 if ((val >> 3) & 1)
4167 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4168 cpu_env, offsetof(CPUX86State,
4169 xmm_regs[reg].XMM_L(3)));
4170 break;
4171 case 0x22:
4172 if (ot == MO_32) { /* pinsrd */
4173 if (mod == 3) {
4174 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[rm]);
4175 } else {
4176 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
4177 s->mem_index, MO_LEUL);
4179 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
4180 offsetof(CPUX86State,
4181 xmm_regs[reg].XMM_L(val & 3)));
4182 } else { /* pinsrq */
4183 #ifdef TARGET_X86_64
4184 if (mod == 3) {
4185 gen_op_mov_v_reg(ot, cpu_tmp1_i64, rm);
4186 } else {
4187 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
4188 s->mem_index, MO_LEQ);
4190 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
4191 offsetof(CPUX86State,
4192 xmm_regs[reg].XMM_Q(val & 1)));
4193 #else
4194 goto illegal_op;
4195 #endif
4197 break;
4199 return;
4202 if (b1) {
4203 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
4204 if (mod == 3) {
4205 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
4206 } else {
4207 op2_offset = offsetof(CPUX86State,xmm_t0);
4208 gen_lea_modrm(env, s, modrm);
4209 gen_ldo_env_A0(s, op2_offset);
4211 } else {
4212 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4213 if (mod == 3) {
4214 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4215 } else {
4216 op2_offset = offsetof(CPUX86State,mmx_t0);
4217 gen_lea_modrm(env, s, modrm);
4218 gen_ldq_env_A0(s, op2_offset);
4221 val = cpu_ldub_code(env, s->pc++);
4223 if ((b & 0xfc) == 0x60) { /* pcmpXstrX */
4224 set_cc_op(s, CC_OP_EFLAGS);
4226 if (s->dflag == MO_64) {
4227 /* The helper must use entire 64-bit gp registers */
4228 val |= 1 << 8;
4232 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4233 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4234 sse_fn_eppi(cpu_env, cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
4235 break;
4237 case 0x33a:
4238 /* Various integer extensions at 0f 3a f[0-f]. */
4239 b = modrm | (b1 << 8);
4240 modrm = cpu_ldub_code(env, s->pc++);
4241 reg = ((modrm >> 3) & 7) | rex_r;
4243 switch (b) {
4244 case 0x3f0: /* rorx Gy,Ey, Ib */
4245 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
4246 || !(s->prefix & PREFIX_VEX)
4247 || s->vex_l != 0) {
4248 goto illegal_op;
4250 ot = mo_64_32(s->dflag);
4251 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4252 b = cpu_ldub_code(env, s->pc++);
4253 if (ot == MO_64) {
4254 tcg_gen_rotri_tl(cpu_T[0], cpu_T[0], b & 63);
4255 } else {
4256 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4257 tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, b & 31);
4258 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
4260 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
4261 break;
4263 default:
4264 goto illegal_op;
4266 break;
4268 default:
4269 goto illegal_op;
4271 } else {
4272 /* generic MMX or SSE operation */
4273 switch(b) {
4274 case 0x70: /* pshufx insn */
4275 case 0xc6: /* pshufx insn */
4276 case 0xc2: /* compare insns */
4277 s->rip_offset = 1;
4278 break;
4279 default:
4280 break;
4282 if (is_xmm) {
4283 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
4284 if (mod != 3) {
4285 int sz = 4;
4287 gen_lea_modrm(env, s, modrm);
4288 op2_offset = offsetof(CPUX86State,xmm_t0);
4290 switch (b) {
4291 case 0x50 ... 0x5a:
4292 case 0x5c ... 0x5f:
4293 case 0xc2:
4294 /* Most sse scalar operations. */
4295 if (b1 == 2) {
4296 sz = 2;
4297 } else if (b1 == 3) {
4298 sz = 3;
4300 break;
4302 case 0x2e: /* ucomis[sd] */
4303 case 0x2f: /* comis[sd] */
4304 if (b1 == 0) {
4305 sz = 2;
4306 } else {
4307 sz = 3;
4309 break;
4312 switch (sz) {
4313 case 2:
4314 /* 32 bit access */
4315 gen_op_ld_v(s, MO_32, cpu_T[0], cpu_A0);
4316 tcg_gen_st32_tl(cpu_T[0], cpu_env,
4317 offsetof(CPUX86State,xmm_t0.XMM_L(0)));
4318 break;
4319 case 3:
4320 /* 64 bit access */
4321 gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.XMM_D(0)));
4322 break;
4323 default:
4324 /* 128 bit access */
4325 gen_ldo_env_A0(s, op2_offset);
4326 break;
4328 } else {
4329 rm = (modrm & 7) | REX_B(s);
4330 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
4332 } else {
4333 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4334 if (mod != 3) {
4335 gen_lea_modrm(env, s, modrm);
4336 op2_offset = offsetof(CPUX86State,mmx_t0);
4337 gen_ldq_env_A0(s, op2_offset);
4338 } else {
4339 rm = (modrm & 7);
4340 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4343 switch(b) {
4344 case 0x0f: /* 3DNow! data insns */
4345 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
4346 goto illegal_op;
4347 val = cpu_ldub_code(env, s->pc++);
4348 sse_fn_epp = sse_op_table5[val];
4349 if (!sse_fn_epp) {
4350 goto illegal_op;
4352 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4353 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4354 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
4355 break;
4356 case 0x70: /* pshufx insn */
4357 case 0xc6: /* pshufx insn */
4358 val = cpu_ldub_code(env, s->pc++);
4359 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4360 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4361 /* XXX: introduce a new table? */
4362 sse_fn_ppi = (SSEFunc_0_ppi)sse_fn_epp;
4363 sse_fn_ppi(cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
4364 break;
4365 case 0xc2:
4366 /* compare insns */
4367 val = cpu_ldub_code(env, s->pc++);
4368 if (val >= 8)
4369 goto illegal_op;
4370 sse_fn_epp = sse_op_table4[val][b1];
4372 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4373 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4374 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
4375 break;
4376 case 0xf7:
4377 /* maskmov : we must prepare A0 */
4378 if (mod != 3)
4379 goto illegal_op;
4380 tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EDI]);
4381 gen_extu(s->aflag, cpu_A0);
4382 gen_add_A0_ds_seg(s);
4384 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4385 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4386 /* XXX: introduce a new table? */
4387 sse_fn_eppt = (SSEFunc_0_eppt)sse_fn_epp;
4388 sse_fn_eppt(cpu_env, cpu_ptr0, cpu_ptr1, cpu_A0);
4389 break;
4390 default:
4391 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4392 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4393 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
4394 break;
4396 if (b == 0x2e || b == 0x2f) {
4397 set_cc_op(s, CC_OP_EFLAGS);
4402 /* convert one instruction. s->is_jmp is set if the translation must
4403 be stopped. Return the next pc value */
4404 static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
4405 target_ulong pc_start)
4407 int b, prefixes;
4408 int shift;
4409 TCGMemOp ot, aflag, dflag;
4410 int modrm, reg, rm, mod, op, opreg, val;
4411 target_ulong next_eip, tval;
4412 int rex_w, rex_r;
4414 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
4415 tcg_gen_debug_insn_start(pc_start);
4417 s->pc = pc_start;
4418 prefixes = 0;
4419 s->override = -1;
4420 rex_w = -1;
4421 rex_r = 0;
4422 #ifdef TARGET_X86_64
4423 s->rex_x = 0;
4424 s->rex_b = 0;
4425 x86_64_hregs = 0;
4426 #endif
4427 s->rip_offset = 0; /* for relative ip address */
4428 s->vex_l = 0;
4429 s->vex_v = 0;
4430 next_byte:
4431 b = cpu_ldub_code(env, s->pc);
4432 s->pc++;
4433 /* Collect prefixes. */
4434 switch (b) {
4435 case 0xf3:
4436 prefixes |= PREFIX_REPZ;
4437 goto next_byte;
4438 case 0xf2:
4439 prefixes |= PREFIX_REPNZ;
4440 goto next_byte;
4441 case 0xf0:
4442 prefixes |= PREFIX_LOCK;
4443 goto next_byte;
4444 case 0x2e:
4445 s->override = R_CS;
4446 goto next_byte;
4447 case 0x36:
4448 s->override = R_SS;
4449 goto next_byte;
4450 case 0x3e:
4451 s->override = R_DS;
4452 goto next_byte;
4453 case 0x26:
4454 s->override = R_ES;
4455 goto next_byte;
4456 case 0x64:
4457 s->override = R_FS;
4458 goto next_byte;
4459 case 0x65:
4460 s->override = R_GS;
4461 goto next_byte;
4462 case 0x66:
4463 prefixes |= PREFIX_DATA;
4464 goto next_byte;
4465 case 0x67:
4466 prefixes |= PREFIX_ADR;
4467 goto next_byte;
4468 #ifdef TARGET_X86_64
4469 case 0x40 ... 0x4f:
4470 if (CODE64(s)) {
4471 /* REX prefix */
4472 rex_w = (b >> 3) & 1;
4473 rex_r = (b & 0x4) << 1;
4474 s->rex_x = (b & 0x2) << 2;
4475 REX_B(s) = (b & 0x1) << 3;
4476 x86_64_hregs = 1; /* select uniform byte register addressing */
4477 goto next_byte;
4479 break;
4480 #endif
4481 case 0xc5: /* 2-byte VEX */
4482 case 0xc4: /* 3-byte VEX */
4483 /* VEX prefixes cannot be used except in 32-bit mode.
4484 Otherwise the instruction is LES or LDS. */
4485 if (s->code32 && !s->vm86) {
4486 static const int pp_prefix[4] = {
4487 0, PREFIX_DATA, PREFIX_REPZ, PREFIX_REPNZ
4489 int vex3, vex2 = cpu_ldub_code(env, s->pc);
4491 if (!CODE64(s) && (vex2 & 0xc0) != 0xc0) {
4492 /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
4493 otherwise the instruction is LES or LDS. */
4494 break;
4496 s->pc++;
4498 /* 4.1.1-4.1.3: No preceding lock, 66, f2, f3, or rex prefixes. */
4499 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ
4500 | PREFIX_LOCK | PREFIX_DATA)) {
4501 goto illegal_op;
4503 #ifdef TARGET_X86_64
4504 if (x86_64_hregs) {
4505 goto illegal_op;
4507 #endif
4508 rex_r = (~vex2 >> 4) & 8;
4509 if (b == 0xc5) {
4510 vex3 = vex2;
4511 b = cpu_ldub_code(env, s->pc++);
4512 } else {
4513 #ifdef TARGET_X86_64
4514 s->rex_x = (~vex2 >> 3) & 8;
4515 s->rex_b = (~vex2 >> 2) & 8;
4516 #endif
4517 vex3 = cpu_ldub_code(env, s->pc++);
4518 rex_w = (vex3 >> 7) & 1;
4519 switch (vex2 & 0x1f) {
4520 case 0x01: /* Implied 0f leading opcode bytes. */
4521 b = cpu_ldub_code(env, s->pc++) | 0x100;
4522 break;
4523 case 0x02: /* Implied 0f 38 leading opcode bytes. */
4524 b = 0x138;
4525 break;
4526 case 0x03: /* Implied 0f 3a leading opcode bytes. */
4527 b = 0x13a;
4528 break;
4529 default: /* Reserved for future use. */
4530 goto illegal_op;
4533 s->vex_v = (~vex3 >> 3) & 0xf;
4534 s->vex_l = (vex3 >> 2) & 1;
4535 prefixes |= pp_prefix[vex3 & 3] | PREFIX_VEX;
4537 break;
4540 /* Post-process prefixes. */
4541 if (CODE64(s)) {
4542 /* In 64-bit mode, the default data size is 32-bit. Select 64-bit
4543 data with rex_w, and 16-bit data with 0x66; rex_w takes precedence
4544 over 0x66 if both are present. */
4545 dflag = (rex_w > 0 ? MO_64 : prefixes & PREFIX_DATA ? MO_16 : MO_32);
4546 /* In 64-bit mode, 0x67 selects 32-bit addressing. */
4547 aflag = (prefixes & PREFIX_ADR ? MO_32 : MO_64);
4548 } else {
4549 /* In 16/32-bit mode, 0x66 selects the opposite data size. */
4550 if (s->code32 ^ ((prefixes & PREFIX_DATA) != 0)) {
4551 dflag = MO_32;
4552 } else {
4553 dflag = MO_16;
4555 /* In 16/32-bit mode, 0x67 selects the opposite addressing. */
4556 if (s->code32 ^ ((prefixes & PREFIX_ADR) != 0)) {
4557 aflag = MO_32;
4558 } else {
4559 aflag = MO_16;
4563 s->prefix = prefixes;
4564 s->aflag = aflag;
4565 s->dflag = dflag;
4567 /* lock generation */
4568 if (prefixes & PREFIX_LOCK)
4569 gen_helper_lock();
4571 /* now check op code */
4572 reswitch:
4573 switch(b) {
4574 case 0x0f:
4575 /**************************/
4576 /* extended op code */
4577 b = cpu_ldub_code(env, s->pc++) | 0x100;
4578 goto reswitch;
4580 /**************************/
4581 /* arith & logic */
4582 case 0x00 ... 0x05:
4583 case 0x08 ... 0x0d:
4584 case 0x10 ... 0x15:
4585 case 0x18 ... 0x1d:
4586 case 0x20 ... 0x25:
4587 case 0x28 ... 0x2d:
4588 case 0x30 ... 0x35:
4589 case 0x38 ... 0x3d:
4591 int op, f, val;
4592 op = (b >> 3) & 7;
4593 f = (b >> 1) & 3;
4595 ot = mo_b_d(b, dflag);
4597 switch(f) {
4598 case 0: /* OP Ev, Gv */
4599 modrm = cpu_ldub_code(env, s->pc++);
4600 reg = ((modrm >> 3) & 7) | rex_r;
4601 mod = (modrm >> 6) & 3;
4602 rm = (modrm & 7) | REX_B(s);
4603 if (mod != 3) {
4604 gen_lea_modrm(env, s, modrm);
4605 opreg = OR_TMP0;
4606 } else if (op == OP_XORL && rm == reg) {
4607 xor_zero:
4608 /* xor reg, reg optimisation */
4609 set_cc_op(s, CC_OP_CLR);
4610 tcg_gen_movi_tl(cpu_T[0], 0);
4611 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
4612 break;
4613 } else {
4614 opreg = rm;
4616 gen_op_mov_v_reg(ot, cpu_T[1], reg);
4617 gen_op(s, op, ot, opreg);
4618 break;
4619 case 1: /* OP Gv, Ev */
4620 modrm = cpu_ldub_code(env, s->pc++);
4621 mod = (modrm >> 6) & 3;
4622 reg = ((modrm >> 3) & 7) | rex_r;
4623 rm = (modrm & 7) | REX_B(s);
4624 if (mod != 3) {
4625 gen_lea_modrm(env, s, modrm);
4626 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
4627 } else if (op == OP_XORL && rm == reg) {
4628 goto xor_zero;
4629 } else {
4630 gen_op_mov_v_reg(ot, cpu_T[1], rm);
4632 gen_op(s, op, ot, reg);
4633 break;
4634 case 2: /* OP A, Iv */
4635 val = insn_get(env, s, ot);
4636 tcg_gen_movi_tl(cpu_T[1], val);
4637 gen_op(s, op, ot, OR_EAX);
4638 break;
4641 break;
4643 case 0x82:
4644 if (CODE64(s))
4645 goto illegal_op;
4646 case 0x80: /* GRP1 */
4647 case 0x81:
4648 case 0x83:
4650 int val;
4652 ot = mo_b_d(b, dflag);
4654 modrm = cpu_ldub_code(env, s->pc++);
4655 mod = (modrm >> 6) & 3;
4656 rm = (modrm & 7) | REX_B(s);
4657 op = (modrm >> 3) & 7;
4659 if (mod != 3) {
4660 if (b == 0x83)
4661 s->rip_offset = 1;
4662 else
4663 s->rip_offset = insn_const_size(ot);
4664 gen_lea_modrm(env, s, modrm);
4665 opreg = OR_TMP0;
4666 } else {
4667 opreg = rm;
4670 switch(b) {
4671 default:
4672 case 0x80:
4673 case 0x81:
4674 case 0x82:
4675 val = insn_get(env, s, ot);
4676 break;
4677 case 0x83:
4678 val = (int8_t)insn_get(env, s, MO_8);
4679 break;
4681 tcg_gen_movi_tl(cpu_T[1], val);
4682 gen_op(s, op, ot, opreg);
4684 break;
4686 /**************************/
4687 /* inc, dec, and other misc arith */
4688 case 0x40 ... 0x47: /* inc Gv */
4689 ot = dflag;
4690 gen_inc(s, ot, OR_EAX + (b & 7), 1);
4691 break;
4692 case 0x48 ... 0x4f: /* dec Gv */
4693 ot = dflag;
4694 gen_inc(s, ot, OR_EAX + (b & 7), -1);
4695 break;
4696 case 0xf6: /* GRP3 */
4697 case 0xf7:
4698 ot = mo_b_d(b, dflag);
4700 modrm = cpu_ldub_code(env, s->pc++);
4701 mod = (modrm >> 6) & 3;
4702 rm = (modrm & 7) | REX_B(s);
4703 op = (modrm >> 3) & 7;
4704 if (mod != 3) {
4705 if (op == 0)
4706 s->rip_offset = insn_const_size(ot);
4707 gen_lea_modrm(env, s, modrm);
4708 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
4709 } else {
4710 gen_op_mov_v_reg(ot, cpu_T[0], rm);
4713 switch(op) {
4714 case 0: /* test */
4715 val = insn_get(env, s, ot);
4716 tcg_gen_movi_tl(cpu_T[1], val);
4717 gen_op_testl_T0_T1_cc();
4718 set_cc_op(s, CC_OP_LOGICB + ot);
4719 break;
4720 case 2: /* not */
4721 tcg_gen_not_tl(cpu_T[0], cpu_T[0]);
4722 if (mod != 3) {
4723 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
4724 } else {
4725 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
4727 break;
4728 case 3: /* neg */
4729 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
4730 if (mod != 3) {
4731 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
4732 } else {
4733 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
4735 gen_op_update_neg_cc();
4736 set_cc_op(s, CC_OP_SUBB + ot);
4737 break;
4738 case 4: /* mul */
4739 switch(ot) {
4740 case MO_8:
4741 gen_op_mov_v_reg(MO_8, cpu_T[1], R_EAX);
4742 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
4743 tcg_gen_ext8u_tl(cpu_T[1], cpu_T[1]);
4744 /* XXX: use 32 bit mul which could be faster */
4745 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4746 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T[0]);
4747 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4748 tcg_gen_andi_tl(cpu_cc_src, cpu_T[0], 0xff00);
4749 set_cc_op(s, CC_OP_MULB);
4750 break;
4751 case MO_16:
4752 gen_op_mov_v_reg(MO_16, cpu_T[1], R_EAX);
4753 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
4754 tcg_gen_ext16u_tl(cpu_T[1], cpu_T[1]);
4755 /* XXX: use 32 bit mul which could be faster */
4756 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4757 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T[0]);
4758 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4759 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
4760 gen_op_mov_reg_v(MO_16, R_EDX, cpu_T[0]);
4761 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4762 set_cc_op(s, CC_OP_MULW);
4763 break;
4764 default:
4765 case MO_32:
4766 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4767 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]);
4768 tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
4769 cpu_tmp2_i32, cpu_tmp3_i32);
4770 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32);
4771 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32);
4772 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4773 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
4774 set_cc_op(s, CC_OP_MULL);
4775 break;
4776 #ifdef TARGET_X86_64
4777 case MO_64:
4778 tcg_gen_mulu2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
4779 cpu_T[0], cpu_regs[R_EAX]);
4780 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4781 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
4782 set_cc_op(s, CC_OP_MULQ);
4783 break;
4784 #endif
4786 break;
4787 case 5: /* imul */
4788 switch(ot) {
4789 case MO_8:
4790 gen_op_mov_v_reg(MO_8, cpu_T[1], R_EAX);
4791 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
4792 tcg_gen_ext8s_tl(cpu_T[1], cpu_T[1]);
4793 /* XXX: use 32 bit mul which could be faster */
4794 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4795 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T[0]);
4796 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4797 tcg_gen_ext8s_tl(cpu_tmp0, cpu_T[0]);
4798 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4799 set_cc_op(s, CC_OP_MULB);
4800 break;
4801 case MO_16:
4802 gen_op_mov_v_reg(MO_16, cpu_T[1], R_EAX);
4803 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4804 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
4805 /* XXX: use 32 bit mul which could be faster */
4806 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4807 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T[0]);
4808 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4809 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
4810 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4811 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
4812 gen_op_mov_reg_v(MO_16, R_EDX, cpu_T[0]);
4813 set_cc_op(s, CC_OP_MULW);
4814 break;
4815 default:
4816 case MO_32:
4817 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4818 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]);
4819 tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
4820 cpu_tmp2_i32, cpu_tmp3_i32);
4821 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32);
4822 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32);
4823 tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31);
4824 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4825 tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
4826 tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32);
4827 set_cc_op(s, CC_OP_MULL);
4828 break;
4829 #ifdef TARGET_X86_64
4830 case MO_64:
4831 tcg_gen_muls2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
4832 cpu_T[0], cpu_regs[R_EAX]);
4833 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4834 tcg_gen_sari_tl(cpu_cc_src, cpu_regs[R_EAX], 63);
4835 tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_regs[R_EDX]);
4836 set_cc_op(s, CC_OP_MULQ);
4837 break;
4838 #endif
4840 break;
4841 case 6: /* div */
4842 switch(ot) {
4843 case MO_8:
4844 gen_jmp_im(pc_start - s->cs_base);
4845 gen_helper_divb_AL(cpu_env, cpu_T[0]);
4846 break;
4847 case MO_16:
4848 gen_jmp_im(pc_start - s->cs_base);
4849 gen_helper_divw_AX(cpu_env, cpu_T[0]);
4850 break;
4851 default:
4852 case MO_32:
4853 gen_jmp_im(pc_start - s->cs_base);
4854 gen_helper_divl_EAX(cpu_env, cpu_T[0]);
4855 break;
4856 #ifdef TARGET_X86_64
4857 case MO_64:
4858 gen_jmp_im(pc_start - s->cs_base);
4859 gen_helper_divq_EAX(cpu_env, cpu_T[0]);
4860 break;
4861 #endif
4863 break;
4864 case 7: /* idiv */
4865 switch(ot) {
4866 case MO_8:
4867 gen_jmp_im(pc_start - s->cs_base);
4868 gen_helper_idivb_AL(cpu_env, cpu_T[0]);
4869 break;
4870 case MO_16:
4871 gen_jmp_im(pc_start - s->cs_base);
4872 gen_helper_idivw_AX(cpu_env, cpu_T[0]);
4873 break;
4874 default:
4875 case MO_32:
4876 gen_jmp_im(pc_start - s->cs_base);
4877 gen_helper_idivl_EAX(cpu_env, cpu_T[0]);
4878 break;
4879 #ifdef TARGET_X86_64
4880 case MO_64:
4881 gen_jmp_im(pc_start - s->cs_base);
4882 gen_helper_idivq_EAX(cpu_env, cpu_T[0]);
4883 break;
4884 #endif
4886 break;
4887 default:
4888 goto illegal_op;
4890 break;
4892 case 0xfe: /* GRP4 */
4893 case 0xff: /* GRP5 */
4894 ot = mo_b_d(b, dflag);
4896 modrm = cpu_ldub_code(env, s->pc++);
4897 mod = (modrm >> 6) & 3;
4898 rm = (modrm & 7) | REX_B(s);
4899 op = (modrm >> 3) & 7;
4900 if (op >= 2 && b == 0xfe) {
4901 goto illegal_op;
4903 if (CODE64(s)) {
4904 if (op == 2 || op == 4) {
4905 /* operand size for jumps is 64 bit */
4906 ot = MO_64;
4907 } else if (op == 3 || op == 5) {
4908 ot = dflag != MO_16 ? MO_32 + (rex_w == 1) : MO_16;
4909 } else if (op == 6) {
4910 /* default push size is 64 bit */
4911 ot = mo_pushpop(s, dflag);
4914 if (mod != 3) {
4915 gen_lea_modrm(env, s, modrm);
4916 if (op >= 2 && op != 3 && op != 5)
4917 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
4918 } else {
4919 gen_op_mov_v_reg(ot, cpu_T[0], rm);
4922 switch(op) {
4923 case 0: /* inc Ev */
4924 if (mod != 3)
4925 opreg = OR_TMP0;
4926 else
4927 opreg = rm;
4928 gen_inc(s, ot, opreg, 1);
4929 break;
4930 case 1: /* dec Ev */
4931 if (mod != 3)
4932 opreg = OR_TMP0;
4933 else
4934 opreg = rm;
4935 gen_inc(s, ot, opreg, -1);
4936 break;
4937 case 2: /* call Ev */
4938 /* XXX: optimize if memory (no 'and' is necessary) */
4939 if (dflag == MO_16) {
4940 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
4942 next_eip = s->pc - s->cs_base;
4943 tcg_gen_movi_tl(cpu_T[1], next_eip);
4944 gen_push_v(s, cpu_T[1]);
4945 gen_op_jmp_v(cpu_T[0]);
4946 gen_eob(s);
4947 break;
4948 case 3: /* lcall Ev */
4949 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
4950 gen_add_A0_im(s, 1 << ot);
4951 gen_op_ld_v(s, MO_16, cpu_T[0], cpu_A0);
4952 do_lcall:
4953 if (s->pe && !s->vm86) {
4954 gen_update_cc_op(s);
4955 gen_jmp_im(pc_start - s->cs_base);
4956 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4957 gen_helper_lcall_protected(cpu_env, cpu_tmp2_i32, cpu_T[1],
4958 tcg_const_i32(dflag - 1),
4959 tcg_const_i32(s->pc - pc_start));
4960 } else {
4961 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4962 gen_helper_lcall_real(cpu_env, cpu_tmp2_i32, cpu_T[1],
4963 tcg_const_i32(dflag - 1),
4964 tcg_const_i32(s->pc - s->cs_base));
4966 gen_eob(s);
4967 break;
4968 case 4: /* jmp Ev */
4969 if (dflag == MO_16) {
4970 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
4972 gen_op_jmp_v(cpu_T[0]);
4973 gen_eob(s);
4974 break;
4975 case 5: /* ljmp Ev */
4976 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
4977 gen_add_A0_im(s, 1 << ot);
4978 gen_op_ld_v(s, MO_16, cpu_T[0], cpu_A0);
4979 do_ljmp:
4980 if (s->pe && !s->vm86) {
4981 gen_update_cc_op(s);
4982 gen_jmp_im(pc_start - s->cs_base);
4983 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4984 gen_helper_ljmp_protected(cpu_env, cpu_tmp2_i32, cpu_T[1],
4985 tcg_const_i32(s->pc - pc_start));
4986 } else {
4987 gen_op_movl_seg_T0_vm(R_CS);
4988 gen_op_jmp_v(cpu_T[1]);
4990 gen_eob(s);
4991 break;
4992 case 6: /* push Ev */
4993 gen_push_v(s, cpu_T[0]);
4994 break;
4995 default:
4996 goto illegal_op;
4998 break;
5000 case 0x84: /* test Ev, Gv */
5001 case 0x85:
5002 ot = mo_b_d(b, dflag);
5004 modrm = cpu_ldub_code(env, s->pc++);
5005 reg = ((modrm >> 3) & 7) | rex_r;
5007 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
5008 gen_op_mov_v_reg(ot, cpu_T[1], reg);
5009 gen_op_testl_T0_T1_cc();
5010 set_cc_op(s, CC_OP_LOGICB + ot);
5011 break;
5013 case 0xa8: /* test eAX, Iv */
5014 case 0xa9:
5015 ot = mo_b_d(b, dflag);
5016 val = insn_get(env, s, ot);
5018 gen_op_mov_v_reg(ot, cpu_T[0], OR_EAX);
5019 tcg_gen_movi_tl(cpu_T[1], val);
5020 gen_op_testl_T0_T1_cc();
5021 set_cc_op(s, CC_OP_LOGICB + ot);
5022 break;
5024 case 0x98: /* CWDE/CBW */
5025 switch (dflag) {
5026 #ifdef TARGET_X86_64
5027 case MO_64:
5028 gen_op_mov_v_reg(MO_32, cpu_T[0], R_EAX);
5029 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
5030 gen_op_mov_reg_v(MO_64, R_EAX, cpu_T[0]);
5031 break;
5032 #endif
5033 case MO_32:
5034 gen_op_mov_v_reg(MO_16, cpu_T[0], R_EAX);
5035 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5036 gen_op_mov_reg_v(MO_32, R_EAX, cpu_T[0]);
5037 break;
5038 case MO_16:
5039 gen_op_mov_v_reg(MO_8, cpu_T[0], R_EAX);
5040 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
5041 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T[0]);
5042 break;
5043 default:
5044 tcg_abort();
5046 break;
5047 case 0x99: /* CDQ/CWD */
5048 switch (dflag) {
5049 #ifdef TARGET_X86_64
5050 case MO_64:
5051 gen_op_mov_v_reg(MO_64, cpu_T[0], R_EAX);
5052 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 63);
5053 gen_op_mov_reg_v(MO_64, R_EDX, cpu_T[0]);
5054 break;
5055 #endif
5056 case MO_32:
5057 gen_op_mov_v_reg(MO_32, cpu_T[0], R_EAX);
5058 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
5059 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 31);
5060 gen_op_mov_reg_v(MO_32, R_EDX, cpu_T[0]);
5061 break;
5062 case MO_16:
5063 gen_op_mov_v_reg(MO_16, cpu_T[0], R_EAX);
5064 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5065 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 15);
5066 gen_op_mov_reg_v(MO_16, R_EDX, cpu_T[0]);
5067 break;
5068 default:
5069 tcg_abort();
5071 break;
5072 case 0x1af: /* imul Gv, Ev */
5073 case 0x69: /* imul Gv, Ev, I */
5074 case 0x6b:
5075 ot = dflag;
5076 modrm = cpu_ldub_code(env, s->pc++);
5077 reg = ((modrm >> 3) & 7) | rex_r;
5078 if (b == 0x69)
5079 s->rip_offset = insn_const_size(ot);
5080 else if (b == 0x6b)
5081 s->rip_offset = 1;
5082 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
5083 if (b == 0x69) {
5084 val = insn_get(env, s, ot);
5085 tcg_gen_movi_tl(cpu_T[1], val);
5086 } else if (b == 0x6b) {
5087 val = (int8_t)insn_get(env, s, MO_8);
5088 tcg_gen_movi_tl(cpu_T[1], val);
5089 } else {
5090 gen_op_mov_v_reg(ot, cpu_T[1], reg);
5092 switch (ot) {
5093 #ifdef TARGET_X86_64
5094 case MO_64:
5095 tcg_gen_muls2_i64(cpu_regs[reg], cpu_T[1], cpu_T[0], cpu_T[1]);
5096 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
5097 tcg_gen_sari_tl(cpu_cc_src, cpu_cc_dst, 63);
5098 tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_T[1]);
5099 break;
5100 #endif
5101 case MO_32:
5102 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5103 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
5104 tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
5105 cpu_tmp2_i32, cpu_tmp3_i32);
5106 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
5107 tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31);
5108 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
5109 tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
5110 tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32);
5111 break;
5112 default:
5113 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5114 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
5115 /* XXX: use 32 bit mul which could be faster */
5116 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
5117 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
5118 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
5119 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
5120 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
5121 break;
5123 set_cc_op(s, CC_OP_MULB + ot);
5124 break;
5125 case 0x1c0:
5126 case 0x1c1: /* xadd Ev, Gv */
5127 ot = mo_b_d(b, dflag);
5128 modrm = cpu_ldub_code(env, s->pc++);
5129 reg = ((modrm >> 3) & 7) | rex_r;
5130 mod = (modrm >> 6) & 3;
5131 if (mod == 3) {
5132 rm = (modrm & 7) | REX_B(s);
5133 gen_op_mov_v_reg(ot, cpu_T[0], reg);
5134 gen_op_mov_v_reg(ot, cpu_T[1], rm);
5135 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
5136 gen_op_mov_reg_v(ot, reg, cpu_T[1]);
5137 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
5138 } else {
5139 gen_lea_modrm(env, s, modrm);
5140 gen_op_mov_v_reg(ot, cpu_T[0], reg);
5141 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
5142 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
5143 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
5144 gen_op_mov_reg_v(ot, reg, cpu_T[1]);
5146 gen_op_update2_cc();
5147 set_cc_op(s, CC_OP_ADDB + ot);
5148 break;
5149 case 0x1b0:
5150 case 0x1b1: /* cmpxchg Ev, Gv */
5152 TCGLabel *label1, *label2;
5153 TCGv t0, t1, t2, a0;
5155 ot = mo_b_d(b, dflag);
5156 modrm = cpu_ldub_code(env, s->pc++);
5157 reg = ((modrm >> 3) & 7) | rex_r;
5158 mod = (modrm >> 6) & 3;
5159 t0 = tcg_temp_local_new();
5160 t1 = tcg_temp_local_new();
5161 t2 = tcg_temp_local_new();
5162 a0 = tcg_temp_local_new();
5163 gen_op_mov_v_reg(ot, t1, reg);
5164 if (mod == 3) {
5165 rm = (modrm & 7) | REX_B(s);
5166 gen_op_mov_v_reg(ot, t0, rm);
5167 } else {
5168 gen_lea_modrm(env, s, modrm);
5169 tcg_gen_mov_tl(a0, cpu_A0);
5170 gen_op_ld_v(s, ot, t0, a0);
5171 rm = 0; /* avoid warning */
5173 label1 = gen_new_label();
5174 tcg_gen_mov_tl(t2, cpu_regs[R_EAX]);
5175 gen_extu(ot, t0);
5176 gen_extu(ot, t2);
5177 tcg_gen_brcond_tl(TCG_COND_EQ, t2, t0, label1);
5178 label2 = gen_new_label();
5179 if (mod == 3) {
5180 gen_op_mov_reg_v(ot, R_EAX, t0);
5181 tcg_gen_br(label2);
5182 gen_set_label(label1);
5183 gen_op_mov_reg_v(ot, rm, t1);
5184 } else {
5185 /* perform no-op store cycle like physical cpu; must be
5186 before changing accumulator to ensure idempotency if
5187 the store faults and the instruction is restarted */
5188 gen_op_st_v(s, ot, t0, a0);
5189 gen_op_mov_reg_v(ot, R_EAX, t0);
5190 tcg_gen_br(label2);
5191 gen_set_label(label1);
5192 gen_op_st_v(s, ot, t1, a0);
5194 gen_set_label(label2);
5195 tcg_gen_mov_tl(cpu_cc_src, t0);
5196 tcg_gen_mov_tl(cpu_cc_srcT, t2);
5197 tcg_gen_sub_tl(cpu_cc_dst, t2, t0);
5198 set_cc_op(s, CC_OP_SUBB + ot);
5199 tcg_temp_free(t0);
5200 tcg_temp_free(t1);
5201 tcg_temp_free(t2);
5202 tcg_temp_free(a0);
5204 break;
5205 case 0x1c7: /* cmpxchg8b */
5206 modrm = cpu_ldub_code(env, s->pc++);
5207 mod = (modrm >> 6) & 3;
5208 if ((mod == 3) || ((modrm & 0x38) != 0x8))
5209 goto illegal_op;
5210 #ifdef TARGET_X86_64
5211 if (dflag == MO_64) {
5212 if (!(s->cpuid_ext_features & CPUID_EXT_CX16))
5213 goto illegal_op;
5214 gen_jmp_im(pc_start - s->cs_base);
5215 gen_update_cc_op(s);
5216 gen_lea_modrm(env, s, modrm);
5217 gen_helper_cmpxchg16b(cpu_env, cpu_A0);
5218 } else
5219 #endif
5221 if (!(s->cpuid_features & CPUID_CX8))
5222 goto illegal_op;
5223 gen_jmp_im(pc_start - s->cs_base);
5224 gen_update_cc_op(s);
5225 gen_lea_modrm(env, s, modrm);
5226 gen_helper_cmpxchg8b(cpu_env, cpu_A0);
5228 set_cc_op(s, CC_OP_EFLAGS);
5229 break;
5231 /**************************/
5232 /* push/pop */
5233 case 0x50 ... 0x57: /* push */
5234 gen_op_mov_v_reg(MO_32, cpu_T[0], (b & 7) | REX_B(s));
5235 gen_push_v(s, cpu_T[0]);
5236 break;
5237 case 0x58 ... 0x5f: /* pop */
5238 ot = gen_pop_T0(s);
5239 /* NOTE: order is important for pop %sp */
5240 gen_pop_update(s, ot);
5241 gen_op_mov_reg_v(ot, (b & 7) | REX_B(s), cpu_T[0]);
5242 break;
5243 case 0x60: /* pusha */
5244 if (CODE64(s))
5245 goto illegal_op;
5246 gen_pusha(s);
5247 break;
5248 case 0x61: /* popa */
5249 if (CODE64(s))
5250 goto illegal_op;
5251 gen_popa(s);
5252 break;
5253 case 0x68: /* push Iv */
5254 case 0x6a:
5255 ot = mo_pushpop(s, dflag);
5256 if (b == 0x68)
5257 val = insn_get(env, s, ot);
5258 else
5259 val = (int8_t)insn_get(env, s, MO_8);
5260 tcg_gen_movi_tl(cpu_T[0], val);
5261 gen_push_v(s, cpu_T[0]);
5262 break;
5263 case 0x8f: /* pop Ev */
5264 modrm = cpu_ldub_code(env, s->pc++);
5265 mod = (modrm >> 6) & 3;
5266 ot = gen_pop_T0(s);
5267 if (mod == 3) {
5268 /* NOTE: order is important for pop %sp */
5269 gen_pop_update(s, ot);
5270 rm = (modrm & 7) | REX_B(s);
5271 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
5272 } else {
5273 /* NOTE: order is important too for MMU exceptions */
5274 s->popl_esp_hack = 1 << ot;
5275 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
5276 s->popl_esp_hack = 0;
5277 gen_pop_update(s, ot);
5279 break;
5280 case 0xc8: /* enter */
5282 int level;
5283 val = cpu_lduw_code(env, s->pc);
5284 s->pc += 2;
5285 level = cpu_ldub_code(env, s->pc++);
5286 gen_enter(s, val, level);
5288 break;
5289 case 0xc9: /* leave */
5290 /* XXX: exception not precise (ESP is updated before potential exception) */
5291 if (CODE64(s)) {
5292 gen_op_mov_v_reg(MO_64, cpu_T[0], R_EBP);
5293 gen_op_mov_reg_v(MO_64, R_ESP, cpu_T[0]);
5294 } else if (s->ss32) {
5295 gen_op_mov_v_reg(MO_32, cpu_T[0], R_EBP);
5296 gen_op_mov_reg_v(MO_32, R_ESP, cpu_T[0]);
5297 } else {
5298 gen_op_mov_v_reg(MO_16, cpu_T[0], R_EBP);
5299 gen_op_mov_reg_v(MO_16, R_ESP, cpu_T[0]);
5301 ot = gen_pop_T0(s);
5302 gen_op_mov_reg_v(ot, R_EBP, cpu_T[0]);
5303 gen_pop_update(s, ot);
5304 break;
5305 case 0x06: /* push es */
5306 case 0x0e: /* push cs */
5307 case 0x16: /* push ss */
5308 case 0x1e: /* push ds */
5309 if (CODE64(s))
5310 goto illegal_op;
5311 gen_op_movl_T0_seg(b >> 3);
5312 gen_push_v(s, cpu_T[0]);
5313 break;
5314 case 0x1a0: /* push fs */
5315 case 0x1a8: /* push gs */
5316 gen_op_movl_T0_seg((b >> 3) & 7);
5317 gen_push_v(s, cpu_T[0]);
5318 break;
5319 case 0x07: /* pop es */
5320 case 0x17: /* pop ss */
5321 case 0x1f: /* pop ds */
5322 if (CODE64(s))
5323 goto illegal_op;
5324 reg = b >> 3;
5325 ot = gen_pop_T0(s);
5326 gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
5327 gen_pop_update(s, ot);
5328 if (reg == R_SS) {
5329 /* if reg == SS, inhibit interrupts/trace. */
5330 /* If several instructions disable interrupts, only the
5331 _first_ does it */
5332 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
5333 gen_helper_set_inhibit_irq(cpu_env);
5334 s->tf = 0;
5336 if (s->is_jmp) {
5337 gen_jmp_im(s->pc - s->cs_base);
5338 gen_eob(s);
5340 break;
5341 case 0x1a1: /* pop fs */
5342 case 0x1a9: /* pop gs */
5343 ot = gen_pop_T0(s);
5344 gen_movl_seg_T0(s, (b >> 3) & 7, pc_start - s->cs_base);
5345 gen_pop_update(s, ot);
5346 if (s->is_jmp) {
5347 gen_jmp_im(s->pc - s->cs_base);
5348 gen_eob(s);
5350 break;
5352 /**************************/
5353 /* mov */
5354 case 0x88:
5355 case 0x89: /* mov Gv, Ev */
5356 ot = mo_b_d(b, dflag);
5357 modrm = cpu_ldub_code(env, s->pc++);
5358 reg = ((modrm >> 3) & 7) | rex_r;
5360 /* generate a generic store */
5361 gen_ldst_modrm(env, s, modrm, ot, reg, 1);
5362 break;
5363 case 0xc6:
5364 case 0xc7: /* mov Ev, Iv */
5365 ot = mo_b_d(b, dflag);
5366 modrm = cpu_ldub_code(env, s->pc++);
5367 mod = (modrm >> 6) & 3;
5368 if (mod != 3) {
5369 s->rip_offset = insn_const_size(ot);
5370 gen_lea_modrm(env, s, modrm);
5372 val = insn_get(env, s, ot);
5373 tcg_gen_movi_tl(cpu_T[0], val);
5374 if (mod != 3) {
5375 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
5376 } else {
5377 gen_op_mov_reg_v(ot, (modrm & 7) | REX_B(s), cpu_T[0]);
5379 break;
5380 case 0x8a:
5381 case 0x8b: /* mov Ev, Gv */
5382 ot = mo_b_d(b, dflag);
5383 modrm = cpu_ldub_code(env, s->pc++);
5384 reg = ((modrm >> 3) & 7) | rex_r;
5386 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
5387 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
5388 break;
5389 case 0x8e: /* mov seg, Gv */
5390 modrm = cpu_ldub_code(env, s->pc++);
5391 reg = (modrm >> 3) & 7;
5392 if (reg >= 6 || reg == R_CS)
5393 goto illegal_op;
5394 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
5395 gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
5396 if (reg == R_SS) {
5397 /* if reg == SS, inhibit interrupts/trace */
5398 /* If several instructions disable interrupts, only the
5399 _first_ does it */
5400 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
5401 gen_helper_set_inhibit_irq(cpu_env);
5402 s->tf = 0;
5404 if (s->is_jmp) {
5405 gen_jmp_im(s->pc - s->cs_base);
5406 gen_eob(s);
5408 break;
5409 case 0x8c: /* mov Gv, seg */
5410 modrm = cpu_ldub_code(env, s->pc++);
5411 reg = (modrm >> 3) & 7;
5412 mod = (modrm >> 6) & 3;
5413 if (reg >= 6)
5414 goto illegal_op;
5415 gen_op_movl_T0_seg(reg);
5416 ot = mod == 3 ? dflag : MO_16;
5417 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
5418 break;
5420 case 0x1b6: /* movzbS Gv, Eb */
5421 case 0x1b7: /* movzwS Gv, Eb */
5422 case 0x1be: /* movsbS Gv, Eb */
5423 case 0x1bf: /* movswS Gv, Eb */
5425 TCGMemOp d_ot;
5426 TCGMemOp s_ot;
5428 /* d_ot is the size of destination */
5429 d_ot = dflag;
5430 /* ot is the size of source */
5431 ot = (b & 1) + MO_8;
5432 /* s_ot is the sign+size of source */
5433 s_ot = b & 8 ? MO_SIGN | ot : ot;
5435 modrm = cpu_ldub_code(env, s->pc++);
5436 reg = ((modrm >> 3) & 7) | rex_r;
5437 mod = (modrm >> 6) & 3;
5438 rm = (modrm & 7) | REX_B(s);
5440 if (mod == 3) {
5441 gen_op_mov_v_reg(ot, cpu_T[0], rm);
5442 switch (s_ot) {
5443 case MO_UB:
5444 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
5445 break;
5446 case MO_SB:
5447 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
5448 break;
5449 case MO_UW:
5450 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
5451 break;
5452 default:
5453 case MO_SW:
5454 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5455 break;
5457 gen_op_mov_reg_v(d_ot, reg, cpu_T[0]);
5458 } else {
5459 gen_lea_modrm(env, s, modrm);
5460 gen_op_ld_v(s, s_ot, cpu_T[0], cpu_A0);
5461 gen_op_mov_reg_v(d_ot, reg, cpu_T[0]);
5464 break;
5466 case 0x8d: /* lea */
5467 ot = dflag;
5468 modrm = cpu_ldub_code(env, s->pc++);
5469 mod = (modrm >> 6) & 3;
5470 if (mod == 3)
5471 goto illegal_op;
5472 reg = ((modrm >> 3) & 7) | rex_r;
5473 /* we must ensure that no segment is added */
5474 s->override = -1;
5475 val = s->addseg;
5476 s->addseg = 0;
5477 gen_lea_modrm(env, s, modrm);
5478 s->addseg = val;
5479 gen_op_mov_reg_v(ot, reg, cpu_A0);
5480 break;
5482 case 0xa0: /* mov EAX, Ov */
5483 case 0xa1:
5484 case 0xa2: /* mov Ov, EAX */
5485 case 0xa3:
5487 target_ulong offset_addr;
5489 ot = mo_b_d(b, dflag);
5490 switch (s->aflag) {
5491 #ifdef TARGET_X86_64
5492 case MO_64:
5493 offset_addr = cpu_ldq_code(env, s->pc);
5494 s->pc += 8;
5495 break;
5496 #endif
5497 default:
5498 offset_addr = insn_get(env, s, s->aflag);
5499 break;
5501 tcg_gen_movi_tl(cpu_A0, offset_addr);
5502 gen_add_A0_ds_seg(s);
5503 if ((b & 2) == 0) {
5504 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
5505 gen_op_mov_reg_v(ot, R_EAX, cpu_T[0]);
5506 } else {
5507 gen_op_mov_v_reg(ot, cpu_T[0], R_EAX);
5508 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
5511 break;
5512 case 0xd7: /* xlat */
5513 tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EBX]);
5514 tcg_gen_ext8u_tl(cpu_T[0], cpu_regs[R_EAX]);
5515 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]);
5516 gen_extu(s->aflag, cpu_A0);
5517 gen_add_A0_ds_seg(s);
5518 gen_op_ld_v(s, MO_8, cpu_T[0], cpu_A0);
5519 gen_op_mov_reg_v(MO_8, R_EAX, cpu_T[0]);
5520 break;
5521 case 0xb0 ... 0xb7: /* mov R, Ib */
5522 val = insn_get(env, s, MO_8);
5523 tcg_gen_movi_tl(cpu_T[0], val);
5524 gen_op_mov_reg_v(MO_8, (b & 7) | REX_B(s), cpu_T[0]);
5525 break;
5526 case 0xb8 ... 0xbf: /* mov R, Iv */
5527 #ifdef TARGET_X86_64
5528 if (dflag == MO_64) {
5529 uint64_t tmp;
5530 /* 64 bit case */
5531 tmp = cpu_ldq_code(env, s->pc);
5532 s->pc += 8;
5533 reg = (b & 7) | REX_B(s);
5534 tcg_gen_movi_tl(cpu_T[0], tmp);
5535 gen_op_mov_reg_v(MO_64, reg, cpu_T[0]);
5536 } else
5537 #endif
5539 ot = dflag;
5540 val = insn_get(env, s, ot);
5541 reg = (b & 7) | REX_B(s);
5542 tcg_gen_movi_tl(cpu_T[0], val);
5543 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
5545 break;
5547 case 0x91 ... 0x97: /* xchg R, EAX */
5548 do_xchg_reg_eax:
5549 ot = dflag;
5550 reg = (b & 7) | REX_B(s);
5551 rm = R_EAX;
5552 goto do_xchg_reg;
5553 case 0x86:
5554 case 0x87: /* xchg Ev, Gv */
5555 ot = mo_b_d(b, dflag);
5556 modrm = cpu_ldub_code(env, s->pc++);
5557 reg = ((modrm >> 3) & 7) | rex_r;
5558 mod = (modrm >> 6) & 3;
5559 if (mod == 3) {
5560 rm = (modrm & 7) | REX_B(s);
5561 do_xchg_reg:
5562 gen_op_mov_v_reg(ot, cpu_T[0], reg);
5563 gen_op_mov_v_reg(ot, cpu_T[1], rm);
5564 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
5565 gen_op_mov_reg_v(ot, reg, cpu_T[1]);
5566 } else {
5567 gen_lea_modrm(env, s, modrm);
5568 gen_op_mov_v_reg(ot, cpu_T[0], reg);
5569 /* for xchg, lock is implicit */
5570 if (!(prefixes & PREFIX_LOCK))
5571 gen_helper_lock();
5572 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
5573 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
5574 if (!(prefixes & PREFIX_LOCK))
5575 gen_helper_unlock();
5576 gen_op_mov_reg_v(ot, reg, cpu_T[1]);
5578 break;
5579 case 0xc4: /* les Gv */
5580 /* In CODE64 this is VEX3; see above. */
5581 op = R_ES;
5582 goto do_lxx;
5583 case 0xc5: /* lds Gv */
5584 /* In CODE64 this is VEX2; see above. */
5585 op = R_DS;
5586 goto do_lxx;
5587 case 0x1b2: /* lss Gv */
5588 op = R_SS;
5589 goto do_lxx;
5590 case 0x1b4: /* lfs Gv */
5591 op = R_FS;
5592 goto do_lxx;
5593 case 0x1b5: /* lgs Gv */
5594 op = R_GS;
5595 do_lxx:
5596 ot = dflag != MO_16 ? MO_32 : MO_16;
5597 modrm = cpu_ldub_code(env, s->pc++);
5598 reg = ((modrm >> 3) & 7) | rex_r;
5599 mod = (modrm >> 6) & 3;
5600 if (mod == 3)
5601 goto illegal_op;
5602 gen_lea_modrm(env, s, modrm);
5603 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
5604 gen_add_A0_im(s, 1 << ot);
5605 /* load the segment first to handle exceptions properly */
5606 gen_op_ld_v(s, MO_16, cpu_T[0], cpu_A0);
5607 gen_movl_seg_T0(s, op, pc_start - s->cs_base);
5608 /* then put the data */
5609 gen_op_mov_reg_v(ot, reg, cpu_T[1]);
5610 if (s->is_jmp) {
5611 gen_jmp_im(s->pc - s->cs_base);
5612 gen_eob(s);
5614 break;
5616 /************************/
5617 /* shifts */
5618 case 0xc0:
5619 case 0xc1:
5620 /* shift Ev,Ib */
5621 shift = 2;
5622 grp2:
5624 ot = mo_b_d(b, dflag);
5625 modrm = cpu_ldub_code(env, s->pc++);
5626 mod = (modrm >> 6) & 3;
5627 op = (modrm >> 3) & 7;
5629 if (mod != 3) {
5630 if (shift == 2) {
5631 s->rip_offset = 1;
5633 gen_lea_modrm(env, s, modrm);
5634 opreg = OR_TMP0;
5635 } else {
5636 opreg = (modrm & 7) | REX_B(s);
5639 /* simpler op */
5640 if (shift == 0) {
5641 gen_shift(s, op, ot, opreg, OR_ECX);
5642 } else {
5643 if (shift == 2) {
5644 shift = cpu_ldub_code(env, s->pc++);
5646 gen_shifti(s, op, ot, opreg, shift);
5649 break;
5650 case 0xd0:
5651 case 0xd1:
5652 /* shift Ev,1 */
5653 shift = 1;
5654 goto grp2;
5655 case 0xd2:
5656 case 0xd3:
5657 /* shift Ev,cl */
5658 shift = 0;
5659 goto grp2;
5661 case 0x1a4: /* shld imm */
5662 op = 0;
5663 shift = 1;
5664 goto do_shiftd;
5665 case 0x1a5: /* shld cl */
5666 op = 0;
5667 shift = 0;
5668 goto do_shiftd;
5669 case 0x1ac: /* shrd imm */
5670 op = 1;
5671 shift = 1;
5672 goto do_shiftd;
5673 case 0x1ad: /* shrd cl */
5674 op = 1;
5675 shift = 0;
5676 do_shiftd:
5677 ot = dflag;
5678 modrm = cpu_ldub_code(env, s->pc++);
5679 mod = (modrm >> 6) & 3;
5680 rm = (modrm & 7) | REX_B(s);
5681 reg = ((modrm >> 3) & 7) | rex_r;
5682 if (mod != 3) {
5683 gen_lea_modrm(env, s, modrm);
5684 opreg = OR_TMP0;
5685 } else {
5686 opreg = rm;
5688 gen_op_mov_v_reg(ot, cpu_T[1], reg);
5690 if (shift) {
5691 TCGv imm = tcg_const_tl(cpu_ldub_code(env, s->pc++));
5692 gen_shiftd_rm_T1(s, ot, opreg, op, imm);
5693 tcg_temp_free(imm);
5694 } else {
5695 gen_shiftd_rm_T1(s, ot, opreg, op, cpu_regs[R_ECX]);
5697 break;
5699 /************************/
5700 /* floats */
5701 case 0xd8 ... 0xdf:
5702 if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
5703 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
5704 /* XXX: what to do if illegal op ? */
5705 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
5706 break;
5708 modrm = cpu_ldub_code(env, s->pc++);
5709 mod = (modrm >> 6) & 3;
5710 rm = modrm & 7;
5711 op = ((b & 7) << 3) | ((modrm >> 3) & 7);
5712 if (mod != 3) {
5713 /* memory op */
5714 gen_lea_modrm(env, s, modrm);
5715 switch(op) {
5716 case 0x00 ... 0x07: /* fxxxs */
5717 case 0x10 ... 0x17: /* fixxxl */
5718 case 0x20 ... 0x27: /* fxxxl */
5719 case 0x30 ... 0x37: /* fixxx */
5721 int op1;
5722 op1 = op & 7;
5724 switch(op >> 4) {
5725 case 0:
5726 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5727 s->mem_index, MO_LEUL);
5728 gen_helper_flds_FT0(cpu_env, cpu_tmp2_i32);
5729 break;
5730 case 1:
5731 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5732 s->mem_index, MO_LEUL);
5733 gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
5734 break;
5735 case 2:
5736 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
5737 s->mem_index, MO_LEQ);
5738 gen_helper_fldl_FT0(cpu_env, cpu_tmp1_i64);
5739 break;
5740 case 3:
5741 default:
5742 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5743 s->mem_index, MO_LESW);
5744 gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
5745 break;
5748 gen_helper_fp_arith_ST0_FT0(op1);
5749 if (op1 == 3) {
5750 /* fcomp needs pop */
5751 gen_helper_fpop(cpu_env);
5754 break;
5755 case 0x08: /* flds */
5756 case 0x0a: /* fsts */
5757 case 0x0b: /* fstps */
5758 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
5759 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
5760 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
5761 switch(op & 7) {
5762 case 0:
5763 switch(op >> 4) {
5764 case 0:
5765 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5766 s->mem_index, MO_LEUL);
5767 gen_helper_flds_ST0(cpu_env, cpu_tmp2_i32);
5768 break;
5769 case 1:
5770 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5771 s->mem_index, MO_LEUL);
5772 gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
5773 break;
5774 case 2:
5775 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
5776 s->mem_index, MO_LEQ);
5777 gen_helper_fldl_ST0(cpu_env, cpu_tmp1_i64);
5778 break;
5779 case 3:
5780 default:
5781 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5782 s->mem_index, MO_LESW);
5783 gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
5784 break;
5786 break;
5787 case 1:
5788 /* XXX: the corresponding CPUID bit must be tested ! */
5789 switch(op >> 4) {
5790 case 1:
5791 gen_helper_fisttl_ST0(cpu_tmp2_i32, cpu_env);
5792 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5793 s->mem_index, MO_LEUL);
5794 break;
5795 case 2:
5796 gen_helper_fisttll_ST0(cpu_tmp1_i64, cpu_env);
5797 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
5798 s->mem_index, MO_LEQ);
5799 break;
5800 case 3:
5801 default:
5802 gen_helper_fistt_ST0(cpu_tmp2_i32, cpu_env);
5803 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5804 s->mem_index, MO_LEUW);
5805 break;
5807 gen_helper_fpop(cpu_env);
5808 break;
5809 default:
5810 switch(op >> 4) {
5811 case 0:
5812 gen_helper_fsts_ST0(cpu_tmp2_i32, cpu_env);
5813 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5814 s->mem_index, MO_LEUL);
5815 break;
5816 case 1:
5817 gen_helper_fistl_ST0(cpu_tmp2_i32, cpu_env);
5818 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5819 s->mem_index, MO_LEUL);
5820 break;
5821 case 2:
5822 gen_helper_fstl_ST0(cpu_tmp1_i64, cpu_env);
5823 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
5824 s->mem_index, MO_LEQ);
5825 break;
5826 case 3:
5827 default:
5828 gen_helper_fist_ST0(cpu_tmp2_i32, cpu_env);
5829 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5830 s->mem_index, MO_LEUW);
5831 break;
5833 if ((op & 7) == 3)
5834 gen_helper_fpop(cpu_env);
5835 break;
5837 break;
5838 case 0x0c: /* fldenv mem */
5839 gen_update_cc_op(s);
5840 gen_jmp_im(pc_start - s->cs_base);
5841 gen_helper_fldenv(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
5842 break;
5843 case 0x0d: /* fldcw mem */
5844 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5845 s->mem_index, MO_LEUW);
5846 gen_helper_fldcw(cpu_env, cpu_tmp2_i32);
5847 break;
5848 case 0x0e: /* fnstenv mem */
5849 gen_update_cc_op(s);
5850 gen_jmp_im(pc_start - s->cs_base);
5851 gen_helper_fstenv(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
5852 break;
5853 case 0x0f: /* fnstcw mem */
5854 gen_helper_fnstcw(cpu_tmp2_i32, cpu_env);
5855 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5856 s->mem_index, MO_LEUW);
5857 break;
5858 case 0x1d: /* fldt mem */
5859 gen_update_cc_op(s);
5860 gen_jmp_im(pc_start - s->cs_base);
5861 gen_helper_fldt_ST0(cpu_env, cpu_A0);
5862 break;
5863 case 0x1f: /* fstpt mem */
5864 gen_update_cc_op(s);
5865 gen_jmp_im(pc_start - s->cs_base);
5866 gen_helper_fstt_ST0(cpu_env, cpu_A0);
5867 gen_helper_fpop(cpu_env);
5868 break;
5869 case 0x2c: /* frstor mem */
5870 gen_update_cc_op(s);
5871 gen_jmp_im(pc_start - s->cs_base);
5872 gen_helper_frstor(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
5873 break;
5874 case 0x2e: /* fnsave mem */
5875 gen_update_cc_op(s);
5876 gen_jmp_im(pc_start - s->cs_base);
5877 gen_helper_fsave(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
5878 break;
5879 case 0x2f: /* fnstsw mem */
5880 gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
5881 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5882 s->mem_index, MO_LEUW);
5883 break;
5884 case 0x3c: /* fbld */
5885 gen_update_cc_op(s);
5886 gen_jmp_im(pc_start - s->cs_base);
5887 gen_helper_fbld_ST0(cpu_env, cpu_A0);
5888 break;
5889 case 0x3e: /* fbstp */
5890 gen_update_cc_op(s);
5891 gen_jmp_im(pc_start - s->cs_base);
5892 gen_helper_fbst_ST0(cpu_env, cpu_A0);
5893 gen_helper_fpop(cpu_env);
5894 break;
5895 case 0x3d: /* fildll */
5896 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
5897 gen_helper_fildll_ST0(cpu_env, cpu_tmp1_i64);
5898 break;
5899 case 0x3f: /* fistpll */
5900 gen_helper_fistll_ST0(cpu_tmp1_i64, cpu_env);
5901 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
5902 gen_helper_fpop(cpu_env);
5903 break;
5904 default:
5905 goto illegal_op;
5907 } else {
5908 /* register float ops */
5909 opreg = rm;
5911 switch(op) {
5912 case 0x08: /* fld sti */
5913 gen_helper_fpush(cpu_env);
5914 gen_helper_fmov_ST0_STN(cpu_env,
5915 tcg_const_i32((opreg + 1) & 7));
5916 break;
5917 case 0x09: /* fxchg sti */
5918 case 0x29: /* fxchg4 sti, undocumented op */
5919 case 0x39: /* fxchg7 sti, undocumented op */
5920 gen_helper_fxchg_ST0_STN(cpu_env, tcg_const_i32(opreg));
5921 break;
5922 case 0x0a: /* grp d9/2 */
5923 switch(rm) {
5924 case 0: /* fnop */
5925 /* check exceptions (FreeBSD FPU probe) */
5926 gen_update_cc_op(s);
5927 gen_jmp_im(pc_start - s->cs_base);
5928 gen_helper_fwait(cpu_env);
5929 break;
5930 default:
5931 goto illegal_op;
5933 break;
5934 case 0x0c: /* grp d9/4 */
5935 switch(rm) {
5936 case 0: /* fchs */
5937 gen_helper_fchs_ST0(cpu_env);
5938 break;
5939 case 1: /* fabs */
5940 gen_helper_fabs_ST0(cpu_env);
5941 break;
5942 case 4: /* ftst */
5943 gen_helper_fldz_FT0(cpu_env);
5944 gen_helper_fcom_ST0_FT0(cpu_env);
5945 break;
5946 case 5: /* fxam */
5947 gen_helper_fxam_ST0(cpu_env);
5948 break;
5949 default:
5950 goto illegal_op;
5952 break;
5953 case 0x0d: /* grp d9/5 */
5955 switch(rm) {
5956 case 0:
5957 gen_helper_fpush(cpu_env);
5958 gen_helper_fld1_ST0(cpu_env);
5959 break;
5960 case 1:
5961 gen_helper_fpush(cpu_env);
5962 gen_helper_fldl2t_ST0(cpu_env);
5963 break;
5964 case 2:
5965 gen_helper_fpush(cpu_env);
5966 gen_helper_fldl2e_ST0(cpu_env);
5967 break;
5968 case 3:
5969 gen_helper_fpush(cpu_env);
5970 gen_helper_fldpi_ST0(cpu_env);
5971 break;
5972 case 4:
5973 gen_helper_fpush(cpu_env);
5974 gen_helper_fldlg2_ST0(cpu_env);
5975 break;
5976 case 5:
5977 gen_helper_fpush(cpu_env);
5978 gen_helper_fldln2_ST0(cpu_env);
5979 break;
5980 case 6:
5981 gen_helper_fpush(cpu_env);
5982 gen_helper_fldz_ST0(cpu_env);
5983 break;
5984 default:
5985 goto illegal_op;
5988 break;
5989 case 0x0e: /* grp d9/6 */
5990 switch(rm) {
5991 case 0: /* f2xm1 */
5992 gen_helper_f2xm1(cpu_env);
5993 break;
5994 case 1: /* fyl2x */
5995 gen_helper_fyl2x(cpu_env);
5996 break;
5997 case 2: /* fptan */
5998 gen_helper_fptan(cpu_env);
5999 break;
6000 case 3: /* fpatan */
6001 gen_helper_fpatan(cpu_env);
6002 break;
6003 case 4: /* fxtract */
6004 gen_helper_fxtract(cpu_env);
6005 break;
6006 case 5: /* fprem1 */
6007 gen_helper_fprem1(cpu_env);
6008 break;
6009 case 6: /* fdecstp */
6010 gen_helper_fdecstp(cpu_env);
6011 break;
6012 default:
6013 case 7: /* fincstp */
6014 gen_helper_fincstp(cpu_env);
6015 break;
6017 break;
6018 case 0x0f: /* grp d9/7 */
6019 switch(rm) {
6020 case 0: /* fprem */
6021 gen_helper_fprem(cpu_env);
6022 break;
6023 case 1: /* fyl2xp1 */
6024 gen_helper_fyl2xp1(cpu_env);
6025 break;
6026 case 2: /* fsqrt */
6027 gen_helper_fsqrt(cpu_env);
6028 break;
6029 case 3: /* fsincos */
6030 gen_helper_fsincos(cpu_env);
6031 break;
6032 case 5: /* fscale */
6033 gen_helper_fscale(cpu_env);
6034 break;
6035 case 4: /* frndint */
6036 gen_helper_frndint(cpu_env);
6037 break;
6038 case 6: /* fsin */
6039 gen_helper_fsin(cpu_env);
6040 break;
6041 default:
6042 case 7: /* fcos */
6043 gen_helper_fcos(cpu_env);
6044 break;
6046 break;
6047 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
6048 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
6049 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
6051 int op1;
6053 op1 = op & 7;
6054 if (op >= 0x20) {
6055 gen_helper_fp_arith_STN_ST0(op1, opreg);
6056 if (op >= 0x30)
6057 gen_helper_fpop(cpu_env);
6058 } else {
6059 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6060 gen_helper_fp_arith_ST0_FT0(op1);
6063 break;
6064 case 0x02: /* fcom */
6065 case 0x22: /* fcom2, undocumented op */
6066 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6067 gen_helper_fcom_ST0_FT0(cpu_env);
6068 break;
6069 case 0x03: /* fcomp */
6070 case 0x23: /* fcomp3, undocumented op */
6071 case 0x32: /* fcomp5, undocumented op */
6072 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6073 gen_helper_fcom_ST0_FT0(cpu_env);
6074 gen_helper_fpop(cpu_env);
6075 break;
6076 case 0x15: /* da/5 */
6077 switch(rm) {
6078 case 1: /* fucompp */
6079 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
6080 gen_helper_fucom_ST0_FT0(cpu_env);
6081 gen_helper_fpop(cpu_env);
6082 gen_helper_fpop(cpu_env);
6083 break;
6084 default:
6085 goto illegal_op;
6087 break;
6088 case 0x1c:
6089 switch(rm) {
6090 case 0: /* feni (287 only, just do nop here) */
6091 break;
6092 case 1: /* fdisi (287 only, just do nop here) */
6093 break;
6094 case 2: /* fclex */
6095 gen_helper_fclex(cpu_env);
6096 break;
6097 case 3: /* fninit */
6098 gen_helper_fninit(cpu_env);
6099 break;
6100 case 4: /* fsetpm (287 only, just do nop here) */
6101 break;
6102 default:
6103 goto illegal_op;
6105 break;
6106 case 0x1d: /* fucomi */
6107 if (!(s->cpuid_features & CPUID_CMOV)) {
6108 goto illegal_op;
6110 gen_update_cc_op(s);
6111 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6112 gen_helper_fucomi_ST0_FT0(cpu_env);
6113 set_cc_op(s, CC_OP_EFLAGS);
6114 break;
6115 case 0x1e: /* fcomi */
6116 if (!(s->cpuid_features & CPUID_CMOV)) {
6117 goto illegal_op;
6119 gen_update_cc_op(s);
6120 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6121 gen_helper_fcomi_ST0_FT0(cpu_env);
6122 set_cc_op(s, CC_OP_EFLAGS);
6123 break;
6124 case 0x28: /* ffree sti */
6125 gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
6126 break;
6127 case 0x2a: /* fst sti */
6128 gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
6129 break;
6130 case 0x2b: /* fstp sti */
6131 case 0x0b: /* fstp1 sti, undocumented op */
6132 case 0x3a: /* fstp8 sti, undocumented op */
6133 case 0x3b: /* fstp9 sti, undocumented op */
6134 gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
6135 gen_helper_fpop(cpu_env);
6136 break;
6137 case 0x2c: /* fucom st(i) */
6138 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6139 gen_helper_fucom_ST0_FT0(cpu_env);
6140 break;
6141 case 0x2d: /* fucomp st(i) */
6142 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6143 gen_helper_fucom_ST0_FT0(cpu_env);
6144 gen_helper_fpop(cpu_env);
6145 break;
6146 case 0x33: /* de/3 */
6147 switch(rm) {
6148 case 1: /* fcompp */
6149 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
6150 gen_helper_fcom_ST0_FT0(cpu_env);
6151 gen_helper_fpop(cpu_env);
6152 gen_helper_fpop(cpu_env);
6153 break;
6154 default:
6155 goto illegal_op;
6157 break;
6158 case 0x38: /* ffreep sti, undocumented op */
6159 gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
6160 gen_helper_fpop(cpu_env);
6161 break;
6162 case 0x3c: /* df/4 */
6163 switch(rm) {
6164 case 0:
6165 gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
6166 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
6167 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T[0]);
6168 break;
6169 default:
6170 goto illegal_op;
6172 break;
6173 case 0x3d: /* fucomip */
6174 if (!(s->cpuid_features & CPUID_CMOV)) {
6175 goto illegal_op;
6177 gen_update_cc_op(s);
6178 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6179 gen_helper_fucomi_ST0_FT0(cpu_env);
6180 gen_helper_fpop(cpu_env);
6181 set_cc_op(s, CC_OP_EFLAGS);
6182 break;
6183 case 0x3e: /* fcomip */
6184 if (!(s->cpuid_features & CPUID_CMOV)) {
6185 goto illegal_op;
6187 gen_update_cc_op(s);
6188 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6189 gen_helper_fcomi_ST0_FT0(cpu_env);
6190 gen_helper_fpop(cpu_env);
6191 set_cc_op(s, CC_OP_EFLAGS);
6192 break;
6193 case 0x10 ... 0x13: /* fcmovxx */
6194 case 0x18 ... 0x1b:
6196 int op1;
6197 TCGLabel *l1;
6198 static const uint8_t fcmov_cc[8] = {
6199 (JCC_B << 1),
6200 (JCC_Z << 1),
6201 (JCC_BE << 1),
6202 (JCC_P << 1),
6205 if (!(s->cpuid_features & CPUID_CMOV)) {
6206 goto illegal_op;
6208 op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
6209 l1 = gen_new_label();
6210 gen_jcc1_noeob(s, op1, l1);
6211 gen_helper_fmov_ST0_STN(cpu_env, tcg_const_i32(opreg));
6212 gen_set_label(l1);
6214 break;
6215 default:
6216 goto illegal_op;
6219 break;
6220 /************************/
6221 /* string ops */
6223 case 0xa4: /* movsS */
6224 case 0xa5:
6225 ot = mo_b_d(b, dflag);
6226 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6227 gen_repz_movs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6228 } else {
6229 gen_movs(s, ot);
6231 break;
6233 case 0xaa: /* stosS */
6234 case 0xab:
6235 ot = mo_b_d(b, dflag);
6236 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6237 gen_repz_stos(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6238 } else {
6239 gen_stos(s, ot);
6241 break;
6242 case 0xac: /* lodsS */
6243 case 0xad:
6244 ot = mo_b_d(b, dflag);
6245 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6246 gen_repz_lods(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6247 } else {
6248 gen_lods(s, ot);
6250 break;
6251 case 0xae: /* scasS */
6252 case 0xaf:
6253 ot = mo_b_d(b, dflag);
6254 if (prefixes & PREFIX_REPNZ) {
6255 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6256 } else if (prefixes & PREFIX_REPZ) {
6257 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6258 } else {
6259 gen_scas(s, ot);
6261 break;
6263 case 0xa6: /* cmpsS */
6264 case 0xa7:
6265 ot = mo_b_d(b, dflag);
6266 if (prefixes & PREFIX_REPNZ) {
6267 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6268 } else if (prefixes & PREFIX_REPZ) {
6269 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6270 } else {
6271 gen_cmps(s, ot);
6273 break;
6274 case 0x6c: /* insS */
6275 case 0x6d:
6276 ot = mo_b_d32(b, dflag);
6277 tcg_gen_ext16u_tl(cpu_T[0], cpu_regs[R_EDX]);
6278 gen_check_io(s, ot, pc_start - s->cs_base,
6279 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4);
6280 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6281 gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6282 } else {
6283 gen_ins(s, ot);
6284 if (s->tb->cflags & CF_USE_ICOUNT) {
6285 gen_jmp(s, s->pc - s->cs_base);
6288 break;
6289 case 0x6e: /* outsS */
6290 case 0x6f:
6291 ot = mo_b_d32(b, dflag);
6292 tcg_gen_ext16u_tl(cpu_T[0], cpu_regs[R_EDX]);
6293 gen_check_io(s, ot, pc_start - s->cs_base,
6294 svm_is_rep(prefixes) | 4);
6295 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6296 gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6297 } else {
6298 gen_outs(s, ot);
6299 if (s->tb->cflags & CF_USE_ICOUNT) {
6300 gen_jmp(s, s->pc - s->cs_base);
6303 break;
6305 /************************/
6306 /* port I/O */
6308 case 0xe4:
6309 case 0xe5:
6310 ot = mo_b_d32(b, dflag);
6311 val = cpu_ldub_code(env, s->pc++);
6312 tcg_gen_movi_tl(cpu_T[0], val);
6313 gen_check_io(s, ot, pc_start - s->cs_base,
6314 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
6315 if (s->tb->cflags & CF_USE_ICOUNT) {
6316 gen_io_start();
6318 tcg_gen_movi_i32(cpu_tmp2_i32, val);
6319 gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
6320 gen_op_mov_reg_v(ot, R_EAX, cpu_T[1]);
6321 if (s->tb->cflags & CF_USE_ICOUNT) {
6322 gen_io_end();
6323 gen_jmp(s, s->pc - s->cs_base);
6325 break;
6326 case 0xe6:
6327 case 0xe7:
6328 ot = mo_b_d32(b, dflag);
6329 val = cpu_ldub_code(env, s->pc++);
6330 tcg_gen_movi_tl(cpu_T[0], val);
6331 gen_check_io(s, ot, pc_start - s->cs_base,
6332 svm_is_rep(prefixes));
6333 gen_op_mov_v_reg(ot, cpu_T[1], R_EAX);
6335 if (s->tb->cflags & CF_USE_ICOUNT) {
6336 gen_io_start();
6338 tcg_gen_movi_i32(cpu_tmp2_i32, val);
6339 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
6340 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
6341 if (s->tb->cflags & CF_USE_ICOUNT) {
6342 gen_io_end();
6343 gen_jmp(s, s->pc - s->cs_base);
6345 break;
6346 case 0xec:
6347 case 0xed:
6348 ot = mo_b_d32(b, dflag);
6349 tcg_gen_ext16u_tl(cpu_T[0], cpu_regs[R_EDX]);
6350 gen_check_io(s, ot, pc_start - s->cs_base,
6351 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
6352 if (s->tb->cflags & CF_USE_ICOUNT) {
6353 gen_io_start();
6355 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6356 gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
6357 gen_op_mov_reg_v(ot, R_EAX, cpu_T[1]);
6358 if (s->tb->cflags & CF_USE_ICOUNT) {
6359 gen_io_end();
6360 gen_jmp(s, s->pc - s->cs_base);
6362 break;
6363 case 0xee:
6364 case 0xef:
6365 ot = mo_b_d32(b, dflag);
6366 tcg_gen_ext16u_tl(cpu_T[0], cpu_regs[R_EDX]);
6367 gen_check_io(s, ot, pc_start - s->cs_base,
6368 svm_is_rep(prefixes));
6369 gen_op_mov_v_reg(ot, cpu_T[1], R_EAX);
6371 if (s->tb->cflags & CF_USE_ICOUNT) {
6372 gen_io_start();
6374 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6375 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
6376 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
6377 if (s->tb->cflags & CF_USE_ICOUNT) {
6378 gen_io_end();
6379 gen_jmp(s, s->pc - s->cs_base);
6381 break;
6383 /************************/
6384 /* control */
6385 case 0xc2: /* ret im */
6386 val = cpu_ldsw_code(env, s->pc);
6387 s->pc += 2;
6388 ot = gen_pop_T0(s);
6389 gen_stack_update(s, val + (1 << ot));
6390 /* Note that gen_pop_T0 uses a zero-extending load. */
6391 gen_op_jmp_v(cpu_T[0]);
6392 gen_eob(s);
6393 break;
6394 case 0xc3: /* ret */
6395 ot = gen_pop_T0(s);
6396 gen_pop_update(s, ot);
6397 /* Note that gen_pop_T0 uses a zero-extending load. */
6398 gen_op_jmp_v(cpu_T[0]);
6399 gen_eob(s);
6400 break;
6401 case 0xca: /* lret im */
6402 val = cpu_ldsw_code(env, s->pc);
6403 s->pc += 2;
6404 do_lret:
6405 if (s->pe && !s->vm86) {
6406 gen_update_cc_op(s);
6407 gen_jmp_im(pc_start - s->cs_base);
6408 gen_helper_lret_protected(cpu_env, tcg_const_i32(dflag - 1),
6409 tcg_const_i32(val));
6410 } else {
6411 gen_stack_A0(s);
6412 /* pop offset */
6413 gen_op_ld_v(s, dflag, cpu_T[0], cpu_A0);
6414 /* NOTE: keeping EIP updated is not a problem in case of
6415 exception */
6416 gen_op_jmp_v(cpu_T[0]);
6417 /* pop selector */
6418 gen_op_addl_A0_im(1 << dflag);
6419 gen_op_ld_v(s, dflag, cpu_T[0], cpu_A0);
6420 gen_op_movl_seg_T0_vm(R_CS);
6421 /* add stack offset */
6422 gen_stack_update(s, val + (2 << dflag));
6424 gen_eob(s);
6425 break;
6426 case 0xcb: /* lret */
6427 val = 0;
6428 goto do_lret;
6429 case 0xcf: /* iret */
6430 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET);
6431 if (!s->pe) {
6432 /* real mode */
6433 gen_helper_iret_real(cpu_env, tcg_const_i32(dflag - 1));
6434 set_cc_op(s, CC_OP_EFLAGS);
6435 } else if (s->vm86) {
6436 if (s->iopl != 3) {
6437 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6438 } else {
6439 gen_helper_iret_real(cpu_env, tcg_const_i32(dflag - 1));
6440 set_cc_op(s, CC_OP_EFLAGS);
6442 } else {
6443 gen_update_cc_op(s);
6444 gen_jmp_im(pc_start - s->cs_base);
6445 gen_helper_iret_protected(cpu_env, tcg_const_i32(dflag - 1),
6446 tcg_const_i32(s->pc - s->cs_base));
6447 set_cc_op(s, CC_OP_EFLAGS);
6449 gen_eob(s);
6450 break;
6451 case 0xe8: /* call im */
6453 if (dflag != MO_16) {
6454 tval = (int32_t)insn_get(env, s, MO_32);
6455 } else {
6456 tval = (int16_t)insn_get(env, s, MO_16);
6458 next_eip = s->pc - s->cs_base;
6459 tval += next_eip;
6460 if (dflag == MO_16) {
6461 tval &= 0xffff;
6462 } else if (!CODE64(s)) {
6463 tval &= 0xffffffff;
6465 tcg_gen_movi_tl(cpu_T[0], next_eip);
6466 gen_push_v(s, cpu_T[0]);
6467 gen_jmp(s, tval);
6469 break;
6470 case 0x9a: /* lcall im */
6472 unsigned int selector, offset;
6474 if (CODE64(s))
6475 goto illegal_op;
6476 ot = dflag;
6477 offset = insn_get(env, s, ot);
6478 selector = insn_get(env, s, MO_16);
6480 tcg_gen_movi_tl(cpu_T[0], selector);
6481 tcg_gen_movi_tl(cpu_T[1], offset);
6483 goto do_lcall;
6484 case 0xe9: /* jmp im */
6485 if (dflag != MO_16) {
6486 tval = (int32_t)insn_get(env, s, MO_32);
6487 } else {
6488 tval = (int16_t)insn_get(env, s, MO_16);
6490 tval += s->pc - s->cs_base;
6491 if (dflag == MO_16) {
6492 tval &= 0xffff;
6493 } else if (!CODE64(s)) {
6494 tval &= 0xffffffff;
6496 gen_jmp(s, tval);
6497 break;
6498 case 0xea: /* ljmp im */
6500 unsigned int selector, offset;
6502 if (CODE64(s))
6503 goto illegal_op;
6504 ot = dflag;
6505 offset = insn_get(env, s, ot);
6506 selector = insn_get(env, s, MO_16);
6508 tcg_gen_movi_tl(cpu_T[0], selector);
6509 tcg_gen_movi_tl(cpu_T[1], offset);
6511 goto do_ljmp;
6512 case 0xeb: /* jmp Jb */
6513 tval = (int8_t)insn_get(env, s, MO_8);
6514 tval += s->pc - s->cs_base;
6515 if (dflag == MO_16) {
6516 tval &= 0xffff;
6518 gen_jmp(s, tval);
6519 break;
6520 case 0x70 ... 0x7f: /* jcc Jb */
6521 tval = (int8_t)insn_get(env, s, MO_8);
6522 goto do_jcc;
6523 case 0x180 ... 0x18f: /* jcc Jv */
6524 if (dflag != MO_16) {
6525 tval = (int32_t)insn_get(env, s, MO_32);
6526 } else {
6527 tval = (int16_t)insn_get(env, s, MO_16);
6529 do_jcc:
6530 next_eip = s->pc - s->cs_base;
6531 tval += next_eip;
6532 if (dflag == MO_16) {
6533 tval &= 0xffff;
6535 gen_jcc(s, b, tval, next_eip);
6536 break;
6538 case 0x190 ... 0x19f: /* setcc Gv */
6539 modrm = cpu_ldub_code(env, s->pc++);
6540 gen_setcc1(s, b, cpu_T[0]);
6541 gen_ldst_modrm(env, s, modrm, MO_8, OR_TMP0, 1);
6542 break;
6543 case 0x140 ... 0x14f: /* cmov Gv, Ev */
6544 if (!(s->cpuid_features & CPUID_CMOV)) {
6545 goto illegal_op;
6547 ot = dflag;
6548 modrm = cpu_ldub_code(env, s->pc++);
6549 reg = ((modrm >> 3) & 7) | rex_r;
6550 gen_cmovcc1(env, s, ot, b, modrm, reg);
6551 break;
6553 /************************/
6554 /* flags */
6555 case 0x9c: /* pushf */
6556 gen_svm_check_intercept(s, pc_start, SVM_EXIT_PUSHF);
6557 if (s->vm86 && s->iopl != 3) {
6558 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6559 } else {
6560 gen_update_cc_op(s);
6561 gen_helper_read_eflags(cpu_T[0], cpu_env);
6562 gen_push_v(s, cpu_T[0]);
6564 break;
6565 case 0x9d: /* popf */
6566 gen_svm_check_intercept(s, pc_start, SVM_EXIT_POPF);
6567 if (s->vm86 && s->iopl != 3) {
6568 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6569 } else {
6570 ot = gen_pop_T0(s);
6571 if (s->cpl == 0) {
6572 if (dflag != MO_16) {
6573 gen_helper_write_eflags(cpu_env, cpu_T[0],
6574 tcg_const_i32((TF_MASK | AC_MASK |
6575 ID_MASK | NT_MASK |
6576 IF_MASK |
6577 IOPL_MASK)));
6578 } else {
6579 gen_helper_write_eflags(cpu_env, cpu_T[0],
6580 tcg_const_i32((TF_MASK | AC_MASK |
6581 ID_MASK | NT_MASK |
6582 IF_MASK | IOPL_MASK)
6583 & 0xffff));
6585 } else {
6586 if (s->cpl <= s->iopl) {
6587 if (dflag != MO_16) {
6588 gen_helper_write_eflags(cpu_env, cpu_T[0],
6589 tcg_const_i32((TF_MASK |
6590 AC_MASK |
6591 ID_MASK |
6592 NT_MASK |
6593 IF_MASK)));
6594 } else {
6595 gen_helper_write_eflags(cpu_env, cpu_T[0],
6596 tcg_const_i32((TF_MASK |
6597 AC_MASK |
6598 ID_MASK |
6599 NT_MASK |
6600 IF_MASK)
6601 & 0xffff));
6603 } else {
6604 if (dflag != MO_16) {
6605 gen_helper_write_eflags(cpu_env, cpu_T[0],
6606 tcg_const_i32((TF_MASK | AC_MASK |
6607 ID_MASK | NT_MASK)));
6608 } else {
6609 gen_helper_write_eflags(cpu_env, cpu_T[0],
6610 tcg_const_i32((TF_MASK | AC_MASK |
6611 ID_MASK | NT_MASK)
6612 & 0xffff));
6616 gen_pop_update(s, ot);
6617 set_cc_op(s, CC_OP_EFLAGS);
6618 /* abort translation because TF/AC flag may change */
6619 gen_jmp_im(s->pc - s->cs_base);
6620 gen_eob(s);
6622 break;
6623 case 0x9e: /* sahf */
6624 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
6625 goto illegal_op;
6626 gen_op_mov_v_reg(MO_8, cpu_T[0], R_AH);
6627 gen_compute_eflags(s);
6628 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O);
6629 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], CC_S | CC_Z | CC_A | CC_P | CC_C);
6630 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T[0]);
6631 break;
6632 case 0x9f: /* lahf */
6633 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
6634 goto illegal_op;
6635 gen_compute_eflags(s);
6636 /* Note: gen_compute_eflags() only gives the condition codes */
6637 tcg_gen_ori_tl(cpu_T[0], cpu_cc_src, 0x02);
6638 gen_op_mov_reg_v(MO_8, R_AH, cpu_T[0]);
6639 break;
6640 case 0xf5: /* cmc */
6641 gen_compute_eflags(s);
6642 tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C);
6643 break;
6644 case 0xf8: /* clc */
6645 gen_compute_eflags(s);
6646 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C);
6647 break;
6648 case 0xf9: /* stc */
6649 gen_compute_eflags(s);
6650 tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C);
6651 break;
6652 case 0xfc: /* cld */
6653 tcg_gen_movi_i32(cpu_tmp2_i32, 1);
6654 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
6655 break;
6656 case 0xfd: /* std */
6657 tcg_gen_movi_i32(cpu_tmp2_i32, -1);
6658 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
6659 break;
6661 /************************/
6662 /* bit operations */
6663 case 0x1ba: /* bt/bts/btr/btc Gv, im */
6664 ot = dflag;
6665 modrm = cpu_ldub_code(env, s->pc++);
6666 op = (modrm >> 3) & 7;
6667 mod = (modrm >> 6) & 3;
6668 rm = (modrm & 7) | REX_B(s);
6669 if (mod != 3) {
6670 s->rip_offset = 1;
6671 gen_lea_modrm(env, s, modrm);
6672 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
6673 } else {
6674 gen_op_mov_v_reg(ot, cpu_T[0], rm);
6676 /* load shift */
6677 val = cpu_ldub_code(env, s->pc++);
6678 tcg_gen_movi_tl(cpu_T[1], val);
6679 if (op < 4)
6680 goto illegal_op;
6681 op -= 4;
6682 goto bt_op;
6683 case 0x1a3: /* bt Gv, Ev */
6684 op = 0;
6685 goto do_btx;
6686 case 0x1ab: /* bts */
6687 op = 1;
6688 goto do_btx;
6689 case 0x1b3: /* btr */
6690 op = 2;
6691 goto do_btx;
6692 case 0x1bb: /* btc */
6693 op = 3;
6694 do_btx:
6695 ot = dflag;
6696 modrm = cpu_ldub_code(env, s->pc++);
6697 reg = ((modrm >> 3) & 7) | rex_r;
6698 mod = (modrm >> 6) & 3;
6699 rm = (modrm & 7) | REX_B(s);
6700 gen_op_mov_v_reg(MO_32, cpu_T[1], reg);
6701 if (mod != 3) {
6702 gen_lea_modrm(env, s, modrm);
6703 /* specific case: we need to add a displacement */
6704 gen_exts(ot, cpu_T[1]);
6705 tcg_gen_sari_tl(cpu_tmp0, cpu_T[1], 3 + ot);
6706 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, ot);
6707 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
6708 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
6709 } else {
6710 gen_op_mov_v_reg(ot, cpu_T[0], rm);
6712 bt_op:
6713 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], (1 << (3 + ot)) - 1);
6714 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6715 switch(op) {
6716 case 0:
6717 break;
6718 case 1:
6719 tcg_gen_movi_tl(cpu_tmp0, 1);
6720 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6721 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6722 break;
6723 case 2:
6724 tcg_gen_movi_tl(cpu_tmp0, 1);
6725 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6726 tcg_gen_andc_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6727 break;
6728 default:
6729 case 3:
6730 tcg_gen_movi_tl(cpu_tmp0, 1);
6731 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6732 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6733 break;
6735 if (op != 0) {
6736 if (mod != 3) {
6737 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
6738 } else {
6739 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
6743 /* Delay all CC updates until after the store above. Note that
6744 C is the result of the test, Z is unchanged, and the others
6745 are all undefined. */
6746 switch (s->cc_op) {
6747 case CC_OP_MULB ... CC_OP_MULQ:
6748 case CC_OP_ADDB ... CC_OP_ADDQ:
6749 case CC_OP_ADCB ... CC_OP_ADCQ:
6750 case CC_OP_SUBB ... CC_OP_SUBQ:
6751 case CC_OP_SBBB ... CC_OP_SBBQ:
6752 case CC_OP_LOGICB ... CC_OP_LOGICQ:
6753 case CC_OP_INCB ... CC_OP_INCQ:
6754 case CC_OP_DECB ... CC_OP_DECQ:
6755 case CC_OP_SHLB ... CC_OP_SHLQ:
6756 case CC_OP_SARB ... CC_OP_SARQ:
6757 case CC_OP_BMILGB ... CC_OP_BMILGQ:
6758 /* Z was going to be computed from the non-zero status of CC_DST.
6759 We can get that same Z value (and the new C value) by leaving
6760 CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the
6761 same width. */
6762 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
6763 set_cc_op(s, ((s->cc_op - CC_OP_MULB) & 3) + CC_OP_SARB);
6764 break;
6765 default:
6766 /* Otherwise, generate EFLAGS and replace the C bit. */
6767 gen_compute_eflags(s);
6768 tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, cpu_tmp4,
6769 ctz32(CC_C), 1);
6770 break;
6772 break;
6773 case 0x1bc: /* bsf / tzcnt */
6774 case 0x1bd: /* bsr / lzcnt */
6775 ot = dflag;
6776 modrm = cpu_ldub_code(env, s->pc++);
6777 reg = ((modrm >> 3) & 7) | rex_r;
6778 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
6779 gen_extu(ot, cpu_T[0]);
6781 /* Note that lzcnt and tzcnt are in different extensions. */
6782 if ((prefixes & PREFIX_REPZ)
6783 && (b & 1
6784 ? s->cpuid_ext3_features & CPUID_EXT3_ABM
6785 : s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)) {
6786 int size = 8 << ot;
6787 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
6788 if (b & 1) {
6789 /* For lzcnt, reduce the target_ulong result by the
6790 number of zeros that we expect to find at the top. */
6791 gen_helper_clz(cpu_T[0], cpu_T[0]);
6792 tcg_gen_subi_tl(cpu_T[0], cpu_T[0], TARGET_LONG_BITS - size);
6793 } else {
6794 /* For tzcnt, a zero input must return the operand size:
6795 force all bits outside the operand size to 1. */
6796 target_ulong mask = (target_ulong)-2 << (size - 1);
6797 tcg_gen_ori_tl(cpu_T[0], cpu_T[0], mask);
6798 gen_helper_ctz(cpu_T[0], cpu_T[0]);
6800 /* For lzcnt/tzcnt, C and Z bits are defined and are
6801 related to the result. */
6802 gen_op_update1_cc();
6803 set_cc_op(s, CC_OP_BMILGB + ot);
6804 } else {
6805 /* For bsr/bsf, only the Z bit is defined and it is related
6806 to the input and not the result. */
6807 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
6808 set_cc_op(s, CC_OP_LOGICB + ot);
6809 if (b & 1) {
6810 /* For bsr, return the bit index of the first 1 bit,
6811 not the count of leading zeros. */
6812 gen_helper_clz(cpu_T[0], cpu_T[0]);
6813 tcg_gen_xori_tl(cpu_T[0], cpu_T[0], TARGET_LONG_BITS - 1);
6814 } else {
6815 gen_helper_ctz(cpu_T[0], cpu_T[0]);
6817 /* ??? The manual says that the output is undefined when the
6818 input is zero, but real hardware leaves it unchanged, and
6819 real programs appear to depend on that. */
6820 tcg_gen_movi_tl(cpu_tmp0, 0);
6821 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_T[0], cpu_cc_dst, cpu_tmp0,
6822 cpu_regs[reg], cpu_T[0]);
6824 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
6825 break;
6826 /************************/
6827 /* bcd */
6828 case 0x27: /* daa */
6829 if (CODE64(s))
6830 goto illegal_op;
6831 gen_update_cc_op(s);
6832 gen_helper_daa(cpu_env);
6833 set_cc_op(s, CC_OP_EFLAGS);
6834 break;
6835 case 0x2f: /* das */
6836 if (CODE64(s))
6837 goto illegal_op;
6838 gen_update_cc_op(s);
6839 gen_helper_das(cpu_env);
6840 set_cc_op(s, CC_OP_EFLAGS);
6841 break;
6842 case 0x37: /* aaa */
6843 if (CODE64(s))
6844 goto illegal_op;
6845 gen_update_cc_op(s);
6846 gen_helper_aaa(cpu_env);
6847 set_cc_op(s, CC_OP_EFLAGS);
6848 break;
6849 case 0x3f: /* aas */
6850 if (CODE64(s))
6851 goto illegal_op;
6852 gen_update_cc_op(s);
6853 gen_helper_aas(cpu_env);
6854 set_cc_op(s, CC_OP_EFLAGS);
6855 break;
6856 case 0xd4: /* aam */
6857 if (CODE64(s))
6858 goto illegal_op;
6859 val = cpu_ldub_code(env, s->pc++);
6860 if (val == 0) {
6861 gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base);
6862 } else {
6863 gen_helper_aam(cpu_env, tcg_const_i32(val));
6864 set_cc_op(s, CC_OP_LOGICB);
6866 break;
6867 case 0xd5: /* aad */
6868 if (CODE64(s))
6869 goto illegal_op;
6870 val = cpu_ldub_code(env, s->pc++);
6871 gen_helper_aad(cpu_env, tcg_const_i32(val));
6872 set_cc_op(s, CC_OP_LOGICB);
6873 break;
6874 /************************/
6875 /* misc */
6876 case 0x90: /* nop */
6877 /* XXX: correct lock test for all insn */
6878 if (prefixes & PREFIX_LOCK) {
6879 goto illegal_op;
6881 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
6882 if (REX_B(s)) {
6883 goto do_xchg_reg_eax;
6885 if (prefixes & PREFIX_REPZ) {
6886 gen_update_cc_op(s);
6887 gen_jmp_im(pc_start - s->cs_base);
6888 gen_helper_pause(cpu_env, tcg_const_i32(s->pc - pc_start));
6889 s->is_jmp = DISAS_TB_JUMP;
6891 break;
6892 case 0x9b: /* fwait */
6893 if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) ==
6894 (HF_MP_MASK | HF_TS_MASK)) {
6895 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
6896 } else {
6897 gen_update_cc_op(s);
6898 gen_jmp_im(pc_start - s->cs_base);
6899 gen_helper_fwait(cpu_env);
6901 break;
6902 case 0xcc: /* int3 */
6903 gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base);
6904 break;
6905 case 0xcd: /* int N */
6906 val = cpu_ldub_code(env, s->pc++);
6907 if (s->vm86 && s->iopl != 3) {
6908 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6909 } else {
6910 gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base);
6912 break;
6913 case 0xce: /* into */
6914 if (CODE64(s))
6915 goto illegal_op;
6916 gen_update_cc_op(s);
6917 gen_jmp_im(pc_start - s->cs_base);
6918 gen_helper_into(cpu_env, tcg_const_i32(s->pc - pc_start));
6919 break;
6920 #ifdef WANT_ICEBP
6921 case 0xf1: /* icebp (undocumented, exits to external debugger) */
6922 gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP);
6923 #if 1
6924 gen_debug(s, pc_start - s->cs_base);
6925 #else
6926 /* start debug */
6927 tb_flush(CPU(x86_env_get_cpu(env)));
6928 qemu_set_log(CPU_LOG_INT | CPU_LOG_TB_IN_ASM);
6929 #endif
6930 break;
6931 #endif
6932 case 0xfa: /* cli */
6933 if (!s->vm86) {
6934 if (s->cpl <= s->iopl) {
6935 gen_helper_cli(cpu_env);
6936 } else {
6937 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6939 } else {
6940 if (s->iopl == 3) {
6941 gen_helper_cli(cpu_env);
6942 } else {
6943 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6946 break;
6947 case 0xfb: /* sti */
6948 if (!s->vm86) {
6949 if (s->cpl <= s->iopl) {
6950 gen_sti:
6951 gen_helper_sti(cpu_env);
6952 /* interruptions are enabled only the first insn after sti */
6953 /* If several instructions disable interrupts, only the
6954 _first_ does it */
6955 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
6956 gen_helper_set_inhibit_irq(cpu_env);
6957 /* give a chance to handle pending irqs */
6958 gen_jmp_im(s->pc - s->cs_base);
6959 gen_eob(s);
6960 } else {
6961 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6963 } else {
6964 if (s->iopl == 3) {
6965 goto gen_sti;
6966 } else {
6967 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6970 break;
6971 case 0x62: /* bound */
6972 if (CODE64(s))
6973 goto illegal_op;
6974 ot = dflag;
6975 modrm = cpu_ldub_code(env, s->pc++);
6976 reg = (modrm >> 3) & 7;
6977 mod = (modrm >> 6) & 3;
6978 if (mod == 3)
6979 goto illegal_op;
6980 gen_op_mov_v_reg(ot, cpu_T[0], reg);
6981 gen_lea_modrm(env, s, modrm);
6982 gen_jmp_im(pc_start - s->cs_base);
6983 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6984 if (ot == MO_16) {
6985 gen_helper_boundw(cpu_env, cpu_A0, cpu_tmp2_i32);
6986 } else {
6987 gen_helper_boundl(cpu_env, cpu_A0, cpu_tmp2_i32);
6989 break;
6990 case 0x1c8 ... 0x1cf: /* bswap reg */
6991 reg = (b & 7) | REX_B(s);
6992 #ifdef TARGET_X86_64
6993 if (dflag == MO_64) {
6994 gen_op_mov_v_reg(MO_64, cpu_T[0], reg);
6995 tcg_gen_bswap64_i64(cpu_T[0], cpu_T[0]);
6996 gen_op_mov_reg_v(MO_64, reg, cpu_T[0]);
6997 } else
6998 #endif
7000 gen_op_mov_v_reg(MO_32, cpu_T[0], reg);
7001 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
7002 tcg_gen_bswap32_tl(cpu_T[0], cpu_T[0]);
7003 gen_op_mov_reg_v(MO_32, reg, cpu_T[0]);
7005 break;
7006 case 0xd6: /* salc */
7007 if (CODE64(s))
7008 goto illegal_op;
7009 gen_compute_eflags_c(s, cpu_T[0]);
7010 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
7011 gen_op_mov_reg_v(MO_8, R_EAX, cpu_T[0]);
7012 break;
7013 case 0xe0: /* loopnz */
7014 case 0xe1: /* loopz */
7015 case 0xe2: /* loop */
7016 case 0xe3: /* jecxz */
7018 TCGLabel *l1, *l2, *l3;
7020 tval = (int8_t)insn_get(env, s, MO_8);
7021 next_eip = s->pc - s->cs_base;
7022 tval += next_eip;
7023 if (dflag == MO_16) {
7024 tval &= 0xffff;
7027 l1 = gen_new_label();
7028 l2 = gen_new_label();
7029 l3 = gen_new_label();
7030 b &= 3;
7031 switch(b) {
7032 case 0: /* loopnz */
7033 case 1: /* loopz */
7034 gen_op_add_reg_im(s->aflag, R_ECX, -1);
7035 gen_op_jz_ecx(s->aflag, l3);
7036 gen_jcc1(s, (JCC_Z << 1) | (b ^ 1), l1);
7037 break;
7038 case 2: /* loop */
7039 gen_op_add_reg_im(s->aflag, R_ECX, -1);
7040 gen_op_jnz_ecx(s->aflag, l1);
7041 break;
7042 default:
7043 case 3: /* jcxz */
7044 gen_op_jz_ecx(s->aflag, l1);
7045 break;
7048 gen_set_label(l3);
7049 gen_jmp_im(next_eip);
7050 tcg_gen_br(l2);
7052 gen_set_label(l1);
7053 gen_jmp_im(tval);
7054 gen_set_label(l2);
7055 gen_eob(s);
7057 break;
7058 case 0x130: /* wrmsr */
7059 case 0x132: /* rdmsr */
7060 if (s->cpl != 0) {
7061 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7062 } else {
7063 gen_update_cc_op(s);
7064 gen_jmp_im(pc_start - s->cs_base);
7065 if (b & 2) {
7066 gen_helper_rdmsr(cpu_env);
7067 } else {
7068 gen_helper_wrmsr(cpu_env);
7071 break;
7072 case 0x131: /* rdtsc */
7073 gen_update_cc_op(s);
7074 gen_jmp_im(pc_start - s->cs_base);
7075 if (s->tb->cflags & CF_USE_ICOUNT) {
7076 gen_io_start();
7078 gen_helper_rdtsc(cpu_env);
7079 if (s->tb->cflags & CF_USE_ICOUNT) {
7080 gen_io_end();
7081 gen_jmp(s, s->pc - s->cs_base);
7083 break;
7084 case 0x133: /* rdpmc */
7085 gen_update_cc_op(s);
7086 gen_jmp_im(pc_start - s->cs_base);
7087 gen_helper_rdpmc(cpu_env);
7088 break;
7089 case 0x134: /* sysenter */
7090 /* For Intel SYSENTER is valid on 64-bit */
7091 if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
7092 goto illegal_op;
7093 if (!s->pe) {
7094 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7095 } else {
7096 gen_update_cc_op(s);
7097 gen_jmp_im(pc_start - s->cs_base);
7098 gen_helper_sysenter(cpu_env);
7099 gen_eob(s);
7101 break;
7102 case 0x135: /* sysexit */
7103 /* For Intel SYSEXIT is valid on 64-bit */
7104 if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
7105 goto illegal_op;
7106 if (!s->pe) {
7107 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7108 } else {
7109 gen_update_cc_op(s);
7110 gen_jmp_im(pc_start - s->cs_base);
7111 gen_helper_sysexit(cpu_env, tcg_const_i32(dflag - 1));
7112 gen_eob(s);
7114 break;
7115 #ifdef TARGET_X86_64
7116 case 0x105: /* syscall */
7117 /* XXX: is it usable in real mode ? */
7118 gen_update_cc_op(s);
7119 gen_jmp_im(pc_start - s->cs_base);
7120 gen_helper_syscall(cpu_env, tcg_const_i32(s->pc - pc_start));
7121 gen_eob(s);
7122 break;
7123 case 0x107: /* sysret */
7124 if (!s->pe) {
7125 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7126 } else {
7127 gen_update_cc_op(s);
7128 gen_jmp_im(pc_start - s->cs_base);
7129 gen_helper_sysret(cpu_env, tcg_const_i32(dflag - 1));
7130 /* condition codes are modified only in long mode */
7131 if (s->lma) {
7132 set_cc_op(s, CC_OP_EFLAGS);
7134 gen_eob(s);
7136 break;
7137 #endif
7138 case 0x1a2: /* cpuid */
7139 gen_update_cc_op(s);
7140 gen_jmp_im(pc_start - s->cs_base);
7141 gen_helper_cpuid(cpu_env);
7142 break;
7143 case 0xf4: /* hlt */
7144 if (s->cpl != 0) {
7145 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7146 } else {
7147 gen_update_cc_op(s);
7148 gen_jmp_im(pc_start - s->cs_base);
7149 gen_helper_hlt(cpu_env, tcg_const_i32(s->pc - pc_start));
7150 s->is_jmp = DISAS_TB_JUMP;
7152 break;
7153 case 0x100:
7154 modrm = cpu_ldub_code(env, s->pc++);
7155 mod = (modrm >> 6) & 3;
7156 op = (modrm >> 3) & 7;
7157 switch(op) {
7158 case 0: /* sldt */
7159 if (!s->pe || s->vm86)
7160 goto illegal_op;
7161 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ);
7162 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,ldt.selector));
7163 ot = mod == 3 ? dflag : MO_16;
7164 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
7165 break;
7166 case 2: /* lldt */
7167 if (!s->pe || s->vm86)
7168 goto illegal_op;
7169 if (s->cpl != 0) {
7170 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7171 } else {
7172 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE);
7173 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
7174 gen_jmp_im(pc_start - s->cs_base);
7175 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
7176 gen_helper_lldt(cpu_env, cpu_tmp2_i32);
7178 break;
7179 case 1: /* str */
7180 if (!s->pe || s->vm86)
7181 goto illegal_op;
7182 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ);
7183 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,tr.selector));
7184 ot = mod == 3 ? dflag : MO_16;
7185 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
7186 break;
7187 case 3: /* ltr */
7188 if (!s->pe || s->vm86)
7189 goto illegal_op;
7190 if (s->cpl != 0) {
7191 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7192 } else {
7193 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE);
7194 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
7195 gen_jmp_im(pc_start - s->cs_base);
7196 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
7197 gen_helper_ltr(cpu_env, cpu_tmp2_i32);
7199 break;
7200 case 4: /* verr */
7201 case 5: /* verw */
7202 if (!s->pe || s->vm86)
7203 goto illegal_op;
7204 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
7205 gen_update_cc_op(s);
7206 if (op == 4) {
7207 gen_helper_verr(cpu_env, cpu_T[0]);
7208 } else {
7209 gen_helper_verw(cpu_env, cpu_T[0]);
7211 set_cc_op(s, CC_OP_EFLAGS);
7212 break;
7213 default:
7214 goto illegal_op;
7216 break;
7217 case 0x101:
7218 modrm = cpu_ldub_code(env, s->pc++);
7219 mod = (modrm >> 6) & 3;
7220 op = (modrm >> 3) & 7;
7221 rm = modrm & 7;
7222 switch(op) {
7223 case 0: /* sgdt */
7224 if (mod == 3)
7225 goto illegal_op;
7226 gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ);
7227 gen_lea_modrm(env, s, modrm);
7228 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.limit));
7229 gen_op_st_v(s, MO_16, cpu_T[0], cpu_A0);
7230 gen_add_A0_im(s, 2);
7231 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.base));
7232 if (dflag == MO_16) {
7233 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffffff);
7235 gen_op_st_v(s, CODE64(s) + MO_32, cpu_T[0], cpu_A0);
7236 break;
7237 case 1:
7238 if (mod == 3) {
7239 switch (rm) {
7240 case 0: /* monitor */
7241 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
7242 s->cpl != 0)
7243 goto illegal_op;
7244 gen_update_cc_op(s);
7245 gen_jmp_im(pc_start - s->cs_base);
7246 tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EAX]);
7247 gen_extu(s->aflag, cpu_A0);
7248 gen_add_A0_ds_seg(s);
7249 gen_helper_monitor(cpu_env, cpu_A0);
7250 break;
7251 case 1: /* mwait */
7252 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
7253 s->cpl != 0)
7254 goto illegal_op;
7255 gen_update_cc_op(s);
7256 gen_jmp_im(pc_start - s->cs_base);
7257 gen_helper_mwait(cpu_env, tcg_const_i32(s->pc - pc_start));
7258 gen_eob(s);
7259 break;
7260 case 2: /* clac */
7261 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) ||
7262 s->cpl != 0) {
7263 goto illegal_op;
7265 gen_helper_clac(cpu_env);
7266 gen_jmp_im(s->pc - s->cs_base);
7267 gen_eob(s);
7268 break;
7269 case 3: /* stac */
7270 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) ||
7271 s->cpl != 0) {
7272 goto illegal_op;
7274 gen_helper_stac(cpu_env);
7275 gen_jmp_im(s->pc - s->cs_base);
7276 gen_eob(s);
7277 break;
7278 default:
7279 goto illegal_op;
7281 } else { /* sidt */
7282 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ);
7283 gen_lea_modrm(env, s, modrm);
7284 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.limit));
7285 gen_op_st_v(s, MO_16, cpu_T[0], cpu_A0);
7286 gen_add_A0_im(s, 2);
7287 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.base));
7288 if (dflag == MO_16) {
7289 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffffff);
7291 gen_op_st_v(s, CODE64(s) + MO_32, cpu_T[0], cpu_A0);
7293 break;
7294 case 2: /* lgdt */
7295 case 3: /* lidt */
7296 if (mod == 3) {
7297 gen_update_cc_op(s);
7298 gen_jmp_im(pc_start - s->cs_base);
7299 switch(rm) {
7300 case 0: /* VMRUN */
7301 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7302 goto illegal_op;
7303 if (s->cpl != 0) {
7304 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7305 break;
7306 } else {
7307 gen_helper_vmrun(cpu_env, tcg_const_i32(s->aflag - 1),
7308 tcg_const_i32(s->pc - pc_start));
7309 tcg_gen_exit_tb(0);
7310 s->is_jmp = DISAS_TB_JUMP;
7312 break;
7313 case 1: /* VMMCALL */
7314 if (!(s->flags & HF_SVME_MASK))
7315 goto illegal_op;
7316 gen_helper_vmmcall(cpu_env);
7317 break;
7318 case 2: /* VMLOAD */
7319 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7320 goto illegal_op;
7321 if (s->cpl != 0) {
7322 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7323 break;
7324 } else {
7325 gen_helper_vmload(cpu_env, tcg_const_i32(s->aflag - 1));
7327 break;
7328 case 3: /* VMSAVE */
7329 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7330 goto illegal_op;
7331 if (s->cpl != 0) {
7332 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7333 break;
7334 } else {
7335 gen_helper_vmsave(cpu_env, tcg_const_i32(s->aflag - 1));
7337 break;
7338 case 4: /* STGI */
7339 if ((!(s->flags & HF_SVME_MASK) &&
7340 !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
7341 !s->pe)
7342 goto illegal_op;
7343 if (s->cpl != 0) {
7344 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7345 break;
7346 } else {
7347 gen_helper_stgi(cpu_env);
7349 break;
7350 case 5: /* CLGI */
7351 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7352 goto illegal_op;
7353 if (s->cpl != 0) {
7354 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7355 break;
7356 } else {
7357 gen_helper_clgi(cpu_env);
7359 break;
7360 case 6: /* SKINIT */
7361 if ((!(s->flags & HF_SVME_MASK) &&
7362 !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
7363 !s->pe)
7364 goto illegal_op;
7365 gen_helper_skinit(cpu_env);
7366 break;
7367 case 7: /* INVLPGA */
7368 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7369 goto illegal_op;
7370 if (s->cpl != 0) {
7371 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7372 break;
7373 } else {
7374 gen_helper_invlpga(cpu_env,
7375 tcg_const_i32(s->aflag - 1));
7377 break;
7378 default:
7379 goto illegal_op;
7381 } else if (s->cpl != 0) {
7382 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7383 } else {
7384 gen_svm_check_intercept(s, pc_start,
7385 op==2 ? SVM_EXIT_GDTR_WRITE : SVM_EXIT_IDTR_WRITE);
7386 gen_lea_modrm(env, s, modrm);
7387 gen_op_ld_v(s, MO_16, cpu_T[1], cpu_A0);
7388 gen_add_A0_im(s, 2);
7389 gen_op_ld_v(s, CODE64(s) + MO_32, cpu_T[0], cpu_A0);
7390 if (dflag == MO_16) {
7391 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffffff);
7393 if (op == 2) {
7394 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,gdt.base));
7395 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,gdt.limit));
7396 } else {
7397 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,idt.base));
7398 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,idt.limit));
7401 break;
7402 case 4: /* smsw */
7403 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0);
7404 #if defined TARGET_X86_64 && defined HOST_WORDS_BIGENDIAN
7405 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]) + 4);
7406 #else
7407 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]));
7408 #endif
7409 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 1);
7410 break;
7411 case 6: /* lmsw */
7412 if (s->cpl != 0) {
7413 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7414 } else {
7415 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
7416 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
7417 gen_helper_lmsw(cpu_env, cpu_T[0]);
7418 gen_jmp_im(s->pc - s->cs_base);
7419 gen_eob(s);
7421 break;
7422 case 7:
7423 if (mod != 3) { /* invlpg */
7424 if (s->cpl != 0) {
7425 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7426 } else {
7427 gen_update_cc_op(s);
7428 gen_jmp_im(pc_start - s->cs_base);
7429 gen_lea_modrm(env, s, modrm);
7430 gen_helper_invlpg(cpu_env, cpu_A0);
7431 gen_jmp_im(s->pc - s->cs_base);
7432 gen_eob(s);
7434 } else {
7435 switch (rm) {
7436 case 0: /* swapgs */
7437 #ifdef TARGET_X86_64
7438 if (CODE64(s)) {
7439 if (s->cpl != 0) {
7440 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7441 } else {
7442 tcg_gen_ld_tl(cpu_T[0], cpu_env,
7443 offsetof(CPUX86State,segs[R_GS].base));
7444 tcg_gen_ld_tl(cpu_T[1], cpu_env,
7445 offsetof(CPUX86State,kernelgsbase));
7446 tcg_gen_st_tl(cpu_T[1], cpu_env,
7447 offsetof(CPUX86State,segs[R_GS].base));
7448 tcg_gen_st_tl(cpu_T[0], cpu_env,
7449 offsetof(CPUX86State,kernelgsbase));
7451 } else
7452 #endif
7454 goto illegal_op;
7456 break;
7457 case 1: /* rdtscp */
7458 if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP))
7459 goto illegal_op;
7460 gen_update_cc_op(s);
7461 gen_jmp_im(pc_start - s->cs_base);
7462 if (s->tb->cflags & CF_USE_ICOUNT) {
7463 gen_io_start();
7465 gen_helper_rdtscp(cpu_env);
7466 if (s->tb->cflags & CF_USE_ICOUNT) {
7467 gen_io_end();
7468 gen_jmp(s, s->pc - s->cs_base);
7470 break;
7471 default:
7472 goto illegal_op;
7475 break;
7476 default:
7477 goto illegal_op;
7479 break;
7480 case 0x108: /* invd */
7481 case 0x109: /* wbinvd */
7482 if (s->cpl != 0) {
7483 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7484 } else {
7485 gen_svm_check_intercept(s, pc_start, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD);
7486 /* nothing to do */
7488 break;
7489 case 0x63: /* arpl or movslS (x86_64) */
7490 #ifdef TARGET_X86_64
7491 if (CODE64(s)) {
7492 int d_ot;
7493 /* d_ot is the size of destination */
7494 d_ot = dflag;
7496 modrm = cpu_ldub_code(env, s->pc++);
7497 reg = ((modrm >> 3) & 7) | rex_r;
7498 mod = (modrm >> 6) & 3;
7499 rm = (modrm & 7) | REX_B(s);
7501 if (mod == 3) {
7502 gen_op_mov_v_reg(MO_32, cpu_T[0], rm);
7503 /* sign extend */
7504 if (d_ot == MO_64) {
7505 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
7507 gen_op_mov_reg_v(d_ot, reg, cpu_T[0]);
7508 } else {
7509 gen_lea_modrm(env, s, modrm);
7510 gen_op_ld_v(s, MO_32 | MO_SIGN, cpu_T[0], cpu_A0);
7511 gen_op_mov_reg_v(d_ot, reg, cpu_T[0]);
7513 } else
7514 #endif
7516 TCGLabel *label1;
7517 TCGv t0, t1, t2, a0;
7519 if (!s->pe || s->vm86)
7520 goto illegal_op;
7521 t0 = tcg_temp_local_new();
7522 t1 = tcg_temp_local_new();
7523 t2 = tcg_temp_local_new();
7524 ot = MO_16;
7525 modrm = cpu_ldub_code(env, s->pc++);
7526 reg = (modrm >> 3) & 7;
7527 mod = (modrm >> 6) & 3;
7528 rm = modrm & 7;
7529 if (mod != 3) {
7530 gen_lea_modrm(env, s, modrm);
7531 gen_op_ld_v(s, ot, t0, cpu_A0);
7532 a0 = tcg_temp_local_new();
7533 tcg_gen_mov_tl(a0, cpu_A0);
7534 } else {
7535 gen_op_mov_v_reg(ot, t0, rm);
7536 TCGV_UNUSED(a0);
7538 gen_op_mov_v_reg(ot, t1, reg);
7539 tcg_gen_andi_tl(cpu_tmp0, t0, 3);
7540 tcg_gen_andi_tl(t1, t1, 3);
7541 tcg_gen_movi_tl(t2, 0);
7542 label1 = gen_new_label();
7543 tcg_gen_brcond_tl(TCG_COND_GE, cpu_tmp0, t1, label1);
7544 tcg_gen_andi_tl(t0, t0, ~3);
7545 tcg_gen_or_tl(t0, t0, t1);
7546 tcg_gen_movi_tl(t2, CC_Z);
7547 gen_set_label(label1);
7548 if (mod != 3) {
7549 gen_op_st_v(s, ot, t0, a0);
7550 tcg_temp_free(a0);
7551 } else {
7552 gen_op_mov_reg_v(ot, rm, t0);
7554 gen_compute_eflags(s);
7555 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z);
7556 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2);
7557 tcg_temp_free(t0);
7558 tcg_temp_free(t1);
7559 tcg_temp_free(t2);
7561 break;
7562 case 0x102: /* lar */
7563 case 0x103: /* lsl */
7565 TCGLabel *label1;
7566 TCGv t0;
7567 if (!s->pe || s->vm86)
7568 goto illegal_op;
7569 ot = dflag != MO_16 ? MO_32 : MO_16;
7570 modrm = cpu_ldub_code(env, s->pc++);
7571 reg = ((modrm >> 3) & 7) | rex_r;
7572 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
7573 t0 = tcg_temp_local_new();
7574 gen_update_cc_op(s);
7575 if (b == 0x102) {
7576 gen_helper_lar(t0, cpu_env, cpu_T[0]);
7577 } else {
7578 gen_helper_lsl(t0, cpu_env, cpu_T[0]);
7580 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_src, CC_Z);
7581 label1 = gen_new_label();
7582 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
7583 gen_op_mov_reg_v(ot, reg, t0);
7584 gen_set_label(label1);
7585 set_cc_op(s, CC_OP_EFLAGS);
7586 tcg_temp_free(t0);
7588 break;
7589 case 0x118:
7590 modrm = cpu_ldub_code(env, s->pc++);
7591 mod = (modrm >> 6) & 3;
7592 op = (modrm >> 3) & 7;
7593 switch(op) {
7594 case 0: /* prefetchnta */
7595 case 1: /* prefetchnt0 */
7596 case 2: /* prefetchnt0 */
7597 case 3: /* prefetchnt0 */
7598 if (mod == 3)
7599 goto illegal_op;
7600 gen_lea_modrm(env, s, modrm);
7601 /* nothing more to do */
7602 break;
7603 default: /* nop (multi byte) */
7604 gen_nop_modrm(env, s, modrm);
7605 break;
7607 break;
7608 case 0x119 ... 0x11f: /* nop (multi byte) */
7609 modrm = cpu_ldub_code(env, s->pc++);
7610 gen_nop_modrm(env, s, modrm);
7611 break;
7612 case 0x120: /* mov reg, crN */
7613 case 0x122: /* mov crN, reg */
7614 if (s->cpl != 0) {
7615 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7616 } else {
7617 modrm = cpu_ldub_code(env, s->pc++);
7618 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7619 * AMD documentation (24594.pdf) and testing of
7620 * intel 386 and 486 processors all show that the mod bits
7621 * are assumed to be 1's, regardless of actual values.
7623 rm = (modrm & 7) | REX_B(s);
7624 reg = ((modrm >> 3) & 7) | rex_r;
7625 if (CODE64(s))
7626 ot = MO_64;
7627 else
7628 ot = MO_32;
7629 if ((prefixes & PREFIX_LOCK) && (reg == 0) &&
7630 (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) {
7631 reg = 8;
7633 switch(reg) {
7634 case 0:
7635 case 2:
7636 case 3:
7637 case 4:
7638 case 8:
7639 gen_update_cc_op(s);
7640 gen_jmp_im(pc_start - s->cs_base);
7641 if (b & 2) {
7642 gen_op_mov_v_reg(ot, cpu_T[0], rm);
7643 gen_helper_write_crN(cpu_env, tcg_const_i32(reg),
7644 cpu_T[0]);
7645 gen_jmp_im(s->pc - s->cs_base);
7646 gen_eob(s);
7647 } else {
7648 gen_helper_read_crN(cpu_T[0], cpu_env, tcg_const_i32(reg));
7649 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
7651 break;
7652 default:
7653 goto illegal_op;
7656 break;
7657 case 0x121: /* mov reg, drN */
7658 case 0x123: /* mov drN, reg */
7659 if (s->cpl != 0) {
7660 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7661 } else {
7662 modrm = cpu_ldub_code(env, s->pc++);
7663 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7664 * AMD documentation (24594.pdf) and testing of
7665 * intel 386 and 486 processors all show that the mod bits
7666 * are assumed to be 1's, regardless of actual values.
7668 rm = (modrm & 7) | REX_B(s);
7669 reg = ((modrm >> 3) & 7) | rex_r;
7670 if (CODE64(s))
7671 ot = MO_64;
7672 else
7673 ot = MO_32;
7674 /* XXX: do it dynamically with CR4.DE bit */
7675 if (reg == 4 || reg == 5 || reg >= 8)
7676 goto illegal_op;
7677 if (b & 2) {
7678 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg);
7679 gen_op_mov_v_reg(ot, cpu_T[0], rm);
7680 gen_helper_movl_drN_T0(cpu_env, tcg_const_i32(reg), cpu_T[0]);
7681 gen_jmp_im(s->pc - s->cs_base);
7682 gen_eob(s);
7683 } else {
7684 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg);
7685 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,dr[reg]));
7686 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
7689 break;
7690 case 0x106: /* clts */
7691 if (s->cpl != 0) {
7692 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7693 } else {
7694 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
7695 gen_helper_clts(cpu_env);
7696 /* abort block because static cpu state changed */
7697 gen_jmp_im(s->pc - s->cs_base);
7698 gen_eob(s);
7700 break;
7701 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
7702 case 0x1c3: /* MOVNTI reg, mem */
7703 if (!(s->cpuid_features & CPUID_SSE2))
7704 goto illegal_op;
7705 ot = mo_64_32(dflag);
7706 modrm = cpu_ldub_code(env, s->pc++);
7707 mod = (modrm >> 6) & 3;
7708 if (mod == 3)
7709 goto illegal_op;
7710 reg = ((modrm >> 3) & 7) | rex_r;
7711 /* generate a generic store */
7712 gen_ldst_modrm(env, s, modrm, ot, reg, 1);
7713 break;
7714 case 0x1ae:
7715 modrm = cpu_ldub_code(env, s->pc++);
7716 mod = (modrm >> 6) & 3;
7717 op = (modrm >> 3) & 7;
7718 switch(op) {
7719 case 0: /* fxsave */
7720 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
7721 (s->prefix & PREFIX_LOCK))
7722 goto illegal_op;
7723 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
7724 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7725 break;
7727 gen_lea_modrm(env, s, modrm);
7728 gen_update_cc_op(s);
7729 gen_jmp_im(pc_start - s->cs_base);
7730 gen_helper_fxsave(cpu_env, cpu_A0, tcg_const_i32(dflag == MO_64));
7731 break;
7732 case 1: /* fxrstor */
7733 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
7734 (s->prefix & PREFIX_LOCK))
7735 goto illegal_op;
7736 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
7737 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7738 break;
7740 gen_lea_modrm(env, s, modrm);
7741 gen_update_cc_op(s);
7742 gen_jmp_im(pc_start - s->cs_base);
7743 gen_helper_fxrstor(cpu_env, cpu_A0, tcg_const_i32(dflag == MO_64));
7744 break;
7745 case 2: /* ldmxcsr */
7746 case 3: /* stmxcsr */
7747 if (s->flags & HF_TS_MASK) {
7748 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7749 break;
7751 if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK) ||
7752 mod == 3)
7753 goto illegal_op;
7754 gen_lea_modrm(env, s, modrm);
7755 if (op == 2) {
7756 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
7757 s->mem_index, MO_LEUL);
7758 gen_helper_ldmxcsr(cpu_env, cpu_tmp2_i32);
7759 } else {
7760 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, mxcsr));
7761 gen_op_st_v(s, MO_32, cpu_T[0], cpu_A0);
7763 break;
7764 case 5: /* lfence */
7765 case 6: /* mfence */
7766 if ((modrm & 0xc7) != 0xc0 || !(s->cpuid_features & CPUID_SSE2))
7767 goto illegal_op;
7768 break;
7769 case 7: /* sfence / clflush */
7770 if ((modrm & 0xc7) == 0xc0) {
7771 /* sfence */
7772 /* XXX: also check for cpuid_ext2_features & CPUID_EXT2_EMMX */
7773 if (!(s->cpuid_features & CPUID_SSE))
7774 goto illegal_op;
7775 } else {
7776 /* clflush */
7777 if (!(s->cpuid_features & CPUID_CLFLUSH))
7778 goto illegal_op;
7779 gen_lea_modrm(env, s, modrm);
7781 break;
7782 default:
7783 goto illegal_op;
7785 break;
7786 case 0x10d: /* 3DNow! prefetch(w) */
7787 modrm = cpu_ldub_code(env, s->pc++);
7788 mod = (modrm >> 6) & 3;
7789 if (mod == 3)
7790 goto illegal_op;
7791 gen_lea_modrm(env, s, modrm);
7792 /* ignore for now */
7793 break;
7794 case 0x1aa: /* rsm */
7795 gen_svm_check_intercept(s, pc_start, SVM_EXIT_RSM);
7796 if (!(s->flags & HF_SMM_MASK))
7797 goto illegal_op;
7798 gen_update_cc_op(s);
7799 gen_jmp_im(s->pc - s->cs_base);
7800 gen_helper_rsm(cpu_env);
7801 gen_eob(s);
7802 break;
7803 case 0x1b8: /* SSE4.2 popcnt */
7804 if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) !=
7805 PREFIX_REPZ)
7806 goto illegal_op;
7807 if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
7808 goto illegal_op;
7810 modrm = cpu_ldub_code(env, s->pc++);
7811 reg = ((modrm >> 3) & 7) | rex_r;
7813 if (s->prefix & PREFIX_DATA) {
7814 ot = MO_16;
7815 } else {
7816 ot = mo_64_32(dflag);
7819 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
7820 gen_helper_popcnt(cpu_T[0], cpu_env, cpu_T[0], tcg_const_i32(ot));
7821 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
7823 set_cc_op(s, CC_OP_EFLAGS);
7824 break;
7825 case 0x10e ... 0x10f:
7826 /* 3DNow! instructions, ignore prefixes */
7827 s->prefix &= ~(PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA);
7828 case 0x110 ... 0x117:
7829 case 0x128 ... 0x12f:
7830 case 0x138 ... 0x13a:
7831 case 0x150 ... 0x179:
7832 case 0x17c ... 0x17f:
7833 case 0x1c2:
7834 case 0x1c4 ... 0x1c6:
7835 case 0x1d0 ... 0x1fe:
7836 gen_sse(env, s, b, pc_start, rex_r);
7837 break;
7838 default:
7839 goto illegal_op;
7841 /* lock generation */
7842 if (s->prefix & PREFIX_LOCK)
7843 gen_helper_unlock();
7844 return s->pc;
7845 illegal_op:
7846 if (s->prefix & PREFIX_LOCK)
7847 gen_helper_unlock();
7848 /* XXX: ensure that no lock was generated */
7849 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
7850 return s->pc;
7853 void optimize_flags_init(void)
7855 static const char reg_names[CPU_NB_REGS][4] = {
7856 #ifdef TARGET_X86_64
7857 [R_EAX] = "rax",
7858 [R_EBX] = "rbx",
7859 [R_ECX] = "rcx",
7860 [R_EDX] = "rdx",
7861 [R_ESI] = "rsi",
7862 [R_EDI] = "rdi",
7863 [R_EBP] = "rbp",
7864 [R_ESP] = "rsp",
7865 [8] = "r8",
7866 [9] = "r9",
7867 [10] = "r10",
7868 [11] = "r11",
7869 [12] = "r12",
7870 [13] = "r13",
7871 [14] = "r14",
7872 [15] = "r15",
7873 #else
7874 [R_EAX] = "eax",
7875 [R_EBX] = "ebx",
7876 [R_ECX] = "ecx",
7877 [R_EDX] = "edx",
7878 [R_ESI] = "esi",
7879 [R_EDI] = "edi",
7880 [R_EBP] = "ebp",
7881 [R_ESP] = "esp",
7882 #endif
7884 int i;
7886 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
7887 cpu_cc_op = tcg_global_mem_new_i32(TCG_AREG0,
7888 offsetof(CPUX86State, cc_op), "cc_op");
7889 cpu_cc_dst = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_dst),
7890 "cc_dst");
7891 cpu_cc_src = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_src),
7892 "cc_src");
7893 cpu_cc_src2 = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_src2),
7894 "cc_src2");
7896 for (i = 0; i < CPU_NB_REGS; ++i) {
7897 cpu_regs[i] = tcg_global_mem_new(TCG_AREG0,
7898 offsetof(CPUX86State, regs[i]),
7899 reg_names[i]);
7903 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
7904 basic block 'tb'. If search_pc is TRUE, also generate PC
7905 information for each intermediate instruction. */
7906 static inline void gen_intermediate_code_internal(X86CPU *cpu,
7907 TranslationBlock *tb,
7908 bool search_pc)
7910 CPUState *cs = CPU(cpu);
7911 CPUX86State *env = &cpu->env;
7912 DisasContext dc1, *dc = &dc1;
7913 target_ulong pc_ptr;
7914 CPUBreakpoint *bp;
7915 int j, lj;
7916 uint64_t flags;
7917 target_ulong pc_start;
7918 target_ulong cs_base;
7919 int num_insns;
7920 int max_insns;
7922 /* generate intermediate code */
7923 pc_start = tb->pc;
7924 cs_base = tb->cs_base;
7925 flags = tb->flags;
7927 dc->pe = (flags >> HF_PE_SHIFT) & 1;
7928 dc->code32 = (flags >> HF_CS32_SHIFT) & 1;
7929 dc->ss32 = (flags >> HF_SS32_SHIFT) & 1;
7930 dc->addseg = (flags >> HF_ADDSEG_SHIFT) & 1;
7931 dc->f_st = 0;
7932 dc->vm86 = (flags >> VM_SHIFT) & 1;
7933 dc->cpl = (flags >> HF_CPL_SHIFT) & 3;
7934 dc->iopl = (flags >> IOPL_SHIFT) & 3;
7935 dc->tf = (flags >> TF_SHIFT) & 1;
7936 dc->singlestep_enabled = cs->singlestep_enabled;
7937 dc->cc_op = CC_OP_DYNAMIC;
7938 dc->cc_op_dirty = false;
7939 dc->cs_base = cs_base;
7940 dc->tb = tb;
7941 dc->popl_esp_hack = 0;
7942 /* select memory access functions */
7943 dc->mem_index = 0;
7944 if (flags & HF_SOFTMMU_MASK) {
7945 dc->mem_index = cpu_mmu_index(env, false);
7947 dc->cpuid_features = env->features[FEAT_1_EDX];
7948 dc->cpuid_ext_features = env->features[FEAT_1_ECX];
7949 dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX];
7950 dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX];
7951 dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX];
7952 #ifdef TARGET_X86_64
7953 dc->lma = (flags >> HF_LMA_SHIFT) & 1;
7954 dc->code64 = (flags >> HF_CS64_SHIFT) & 1;
7955 #endif
7956 dc->flags = flags;
7957 dc->jmp_opt = !(dc->tf || cs->singlestep_enabled ||
7958 (flags & HF_INHIBIT_IRQ_MASK)
7959 #ifndef CONFIG_SOFTMMU
7960 || (flags & HF_SOFTMMU_MASK)
7961 #endif
7963 /* Do not optimize repz jumps at all in icount mode, because
7964 rep movsS instructions are execured with different paths
7965 in !repz_opt and repz_opt modes. The first one was used
7966 always except single step mode. And this setting
7967 disables jumps optimization and control paths become
7968 equivalent in run and single step modes.
7969 Now there will be no jump optimization for repz in
7970 record/replay modes and there will always be an
7971 additional step for ecx=0 when icount is enabled.
7973 dc->repz_opt = !dc->jmp_opt && !(tb->cflags & CF_USE_ICOUNT);
7974 #if 0
7975 /* check addseg logic */
7976 if (!dc->addseg && (dc->vm86 || !dc->pe || !dc->code32))
7977 printf("ERROR addseg\n");
7978 #endif
7980 cpu_T[0] = tcg_temp_new();
7981 cpu_T[1] = tcg_temp_new();
7982 cpu_A0 = tcg_temp_new();
7984 cpu_tmp0 = tcg_temp_new();
7985 cpu_tmp1_i64 = tcg_temp_new_i64();
7986 cpu_tmp2_i32 = tcg_temp_new_i32();
7987 cpu_tmp3_i32 = tcg_temp_new_i32();
7988 cpu_tmp4 = tcg_temp_new();
7989 cpu_ptr0 = tcg_temp_new_ptr();
7990 cpu_ptr1 = tcg_temp_new_ptr();
7991 cpu_cc_srcT = tcg_temp_local_new();
7993 dc->is_jmp = DISAS_NEXT;
7994 pc_ptr = pc_start;
7995 lj = -1;
7996 num_insns = 0;
7997 max_insns = tb->cflags & CF_COUNT_MASK;
7998 if (max_insns == 0)
7999 max_insns = CF_COUNT_MASK;
8001 gen_tb_start(tb);
8002 for(;;) {
8003 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
8004 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
8005 if (bp->pc == pc_ptr &&
8006 !((bp->flags & BP_CPU) && (tb->flags & HF_RF_MASK))) {
8007 gen_debug(dc, pc_ptr - dc->cs_base);
8008 goto done_generating;
8012 if (search_pc) {
8013 j = tcg_op_buf_count();
8014 if (lj < j) {
8015 lj++;
8016 while (lj < j)
8017 tcg_ctx.gen_opc_instr_start[lj++] = 0;
8019 tcg_ctx.gen_opc_pc[lj] = pc_ptr;
8020 gen_opc_cc_op[lj] = dc->cc_op;
8021 tcg_ctx.gen_opc_instr_start[lj] = 1;
8022 tcg_ctx.gen_opc_icount[lj] = num_insns;
8024 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
8025 gen_io_start();
8027 pc_ptr = disas_insn(env, dc, pc_ptr);
8028 num_insns++;
8029 /* stop translation if indicated */
8030 if (dc->is_jmp)
8031 break;
8032 /* if single step mode, we generate only one instruction and
8033 generate an exception */
8034 /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
8035 the flag and abort the translation to give the irqs a
8036 change to be happen */
8037 if (dc->tf || dc->singlestep_enabled ||
8038 (flags & HF_INHIBIT_IRQ_MASK)) {
8039 gen_jmp_im(pc_ptr - dc->cs_base);
8040 gen_eob(dc);
8041 break;
8043 /* Do not cross the boundary of the pages in icount mode,
8044 it can cause an exception. Do it only when boundary is
8045 crossed by the first instruction in the block.
8046 If current instruction already crossed the bound - it's ok,
8047 because an exception hasn't stopped this code.
8049 if ((tb->cflags & CF_USE_ICOUNT)
8050 && ((pc_ptr & TARGET_PAGE_MASK)
8051 != ((pc_ptr + TARGET_MAX_INSN_SIZE - 1) & TARGET_PAGE_MASK)
8052 || (pc_ptr & ~TARGET_PAGE_MASK) == 0)) {
8053 gen_jmp_im(pc_ptr - dc->cs_base);
8054 gen_eob(dc);
8055 break;
8057 /* if too long translation, stop generation too */
8058 if (tcg_op_buf_full() ||
8059 (pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32) ||
8060 num_insns >= max_insns) {
8061 gen_jmp_im(pc_ptr - dc->cs_base);
8062 gen_eob(dc);
8063 break;
8065 if (singlestep) {
8066 gen_jmp_im(pc_ptr - dc->cs_base);
8067 gen_eob(dc);
8068 break;
8071 if (tb->cflags & CF_LAST_IO)
8072 gen_io_end();
8073 done_generating:
8074 gen_tb_end(tb, num_insns);
8076 /* we don't forget to fill the last values */
8077 if (search_pc) {
8078 j = tcg_op_buf_count();
8079 lj++;
8080 while (lj <= j)
8081 tcg_ctx.gen_opc_instr_start[lj++] = 0;
8084 #ifdef DEBUG_DISAS
8085 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
8086 int disas_flags;
8087 qemu_log("----------------\n");
8088 qemu_log("IN: %s\n", lookup_symbol(pc_start));
8089 #ifdef TARGET_X86_64
8090 if (dc->code64)
8091 disas_flags = 2;
8092 else
8093 #endif
8094 disas_flags = !dc->code32;
8095 log_target_disas(cs, pc_start, pc_ptr - pc_start, disas_flags);
8096 qemu_log("\n");
8098 #endif
8100 if (!search_pc) {
8101 tb->size = pc_ptr - pc_start;
8102 tb->icount = num_insns;
8106 void gen_intermediate_code(CPUX86State *env, TranslationBlock *tb)
8108 gen_intermediate_code_internal(x86_env_get_cpu(env), tb, false);
8111 void gen_intermediate_code_pc(CPUX86State *env, TranslationBlock *tb)
8113 gen_intermediate_code_internal(x86_env_get_cpu(env), tb, true);
8116 void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb, int pc_pos)
8118 int cc_op;
8119 #ifdef DEBUG_DISAS
8120 if (qemu_loglevel_mask(CPU_LOG_TB_OP)) {
8121 int i;
8122 qemu_log("RESTORE:\n");
8123 for(i = 0;i <= pc_pos; i++) {
8124 if (tcg_ctx.gen_opc_instr_start[i]) {
8125 qemu_log("0x%04x: " TARGET_FMT_lx "\n", i,
8126 tcg_ctx.gen_opc_pc[i]);
8129 qemu_log("pc_pos=0x%x eip=" TARGET_FMT_lx " cs_base=%x\n",
8130 pc_pos, tcg_ctx.gen_opc_pc[pc_pos] - tb->cs_base,
8131 (uint32_t)tb->cs_base);
8133 #endif
8134 env->eip = tcg_ctx.gen_opc_pc[pc_pos] - tb->cs_base;
8135 cc_op = gen_opc_cc_op[pc_pos];
8136 if (cc_op != CC_OP_DYNAMIC)
8137 env->cc_op = cc_op;