spapr_pci: Allow PCI host bridge DMA window to be configured
[qemu/ar7.git] / target-i386 / translate.c
blobef10e685cc2943a6487486353bc51c2924a688f7
1 /*
2 * i386 translation
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include <stdarg.h>
20 #include <stdlib.h>
21 #include <stdio.h>
22 #include <string.h>
23 #include <inttypes.h>
25 #include "qemu/host-utils.h"
26 #include "cpu.h"
27 #include "disas/disas.h"
28 #include "tcg-op.h"
29 #include "exec/cpu_ldst.h"
31 #include "exec/helper-proto.h"
32 #include "exec/helper-gen.h"
34 #include "trace-tcg.h"
37 #define PREFIX_REPZ 0x01
38 #define PREFIX_REPNZ 0x02
39 #define PREFIX_LOCK 0x04
40 #define PREFIX_DATA 0x08
41 #define PREFIX_ADR 0x10
42 #define PREFIX_VEX 0x20
44 #ifdef TARGET_X86_64
45 #define CODE64(s) ((s)->code64)
46 #define REX_X(s) ((s)->rex_x)
47 #define REX_B(s) ((s)->rex_b)
48 #else
49 #define CODE64(s) 0
50 #define REX_X(s) 0
51 #define REX_B(s) 0
52 #endif
54 #ifdef TARGET_X86_64
55 # define ctztl ctz64
56 # define clztl clz64
57 #else
58 # define ctztl ctz32
59 # define clztl clz32
60 #endif
62 //#define MACRO_TEST 1
64 /* global register indexes */
65 static TCGv_ptr cpu_env;
66 static TCGv cpu_A0;
67 static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2, cpu_cc_srcT;
68 static TCGv_i32 cpu_cc_op;
69 static TCGv cpu_regs[CPU_NB_REGS];
70 /* local temps */
71 static TCGv cpu_T[2];
72 /* local register indexes (only used inside old micro ops) */
73 static TCGv cpu_tmp0, cpu_tmp4;
74 static TCGv_ptr cpu_ptr0, cpu_ptr1;
75 static TCGv_i32 cpu_tmp2_i32, cpu_tmp3_i32;
76 static TCGv_i64 cpu_tmp1_i64;
78 #include "exec/gen-icount.h"
80 #ifdef TARGET_X86_64
81 static int x86_64_hregs;
82 #endif
84 typedef struct DisasContext {
85 /* current insn context */
86 int override; /* -1 if no override */
87 int prefix;
88 TCGMemOp aflag;
89 TCGMemOp dflag;
90 target_ulong pc; /* pc = eip + cs_base */
91 int is_jmp; /* 1 = means jump (stop translation), 2 means CPU
92 static state change (stop translation) */
93 /* current block context */
94 target_ulong cs_base; /* base of CS segment */
95 int pe; /* protected mode */
96 int code32; /* 32 bit code segment */
97 #ifdef TARGET_X86_64
98 int lma; /* long mode active */
99 int code64; /* 64 bit code segment */
100 int rex_x, rex_b;
101 #endif
102 int vex_l; /* vex vector length */
103 int vex_v; /* vex vvvv register, without 1's compliment. */
104 int ss32; /* 32 bit stack segment */
105 CCOp cc_op; /* current CC operation */
106 bool cc_op_dirty;
107 int addseg; /* non zero if either DS/ES/SS have a non zero base */
108 int f_st; /* currently unused */
109 int vm86; /* vm86 mode */
110 int cpl;
111 int iopl;
112 int tf; /* TF cpu flag */
113 int singlestep_enabled; /* "hardware" single step enabled */
114 int jmp_opt; /* use direct block chaining for direct jumps */
115 int repz_opt; /* optimize jumps within repz instructions */
116 int mem_index; /* select memory access functions */
117 uint64_t flags; /* all execution flags */
118 struct TranslationBlock *tb;
119 int popl_esp_hack; /* for correct popl with esp base handling */
120 int rip_offset; /* only used in x86_64, but left for simplicity */
121 int cpuid_features;
122 int cpuid_ext_features;
123 int cpuid_ext2_features;
124 int cpuid_ext3_features;
125 int cpuid_7_0_ebx_features;
126 } DisasContext;
128 static void gen_eob(DisasContext *s);
129 static void gen_jmp(DisasContext *s, target_ulong eip);
130 static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num);
131 static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d);
133 /* i386 arith/logic operations */
134 enum {
135 OP_ADDL,
136 OP_ORL,
137 OP_ADCL,
138 OP_SBBL,
139 OP_ANDL,
140 OP_SUBL,
141 OP_XORL,
142 OP_CMPL,
145 /* i386 shift ops */
146 enum {
147 OP_ROL,
148 OP_ROR,
149 OP_RCL,
150 OP_RCR,
151 OP_SHL,
152 OP_SHR,
153 OP_SHL1, /* undocumented */
154 OP_SAR = 7,
157 enum {
158 JCC_O,
159 JCC_B,
160 JCC_Z,
161 JCC_BE,
162 JCC_S,
163 JCC_P,
164 JCC_L,
165 JCC_LE,
168 enum {
169 /* I386 int registers */
170 OR_EAX, /* MUST be even numbered */
171 OR_ECX,
172 OR_EDX,
173 OR_EBX,
174 OR_ESP,
175 OR_EBP,
176 OR_ESI,
177 OR_EDI,
179 OR_TMP0 = 16, /* temporary operand register */
180 OR_TMP1,
181 OR_A0, /* temporary register used when doing address evaluation */
184 enum {
185 USES_CC_DST = 1,
186 USES_CC_SRC = 2,
187 USES_CC_SRC2 = 4,
188 USES_CC_SRCT = 8,
191 /* Bit set if the global variable is live after setting CC_OP to X. */
192 static const uint8_t cc_op_live[CC_OP_NB] = {
193 [CC_OP_DYNAMIC] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
194 [CC_OP_EFLAGS] = USES_CC_SRC,
195 [CC_OP_MULB ... CC_OP_MULQ] = USES_CC_DST | USES_CC_SRC,
196 [CC_OP_ADDB ... CC_OP_ADDQ] = USES_CC_DST | USES_CC_SRC,
197 [CC_OP_ADCB ... CC_OP_ADCQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
198 [CC_OP_SUBB ... CC_OP_SUBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRCT,
199 [CC_OP_SBBB ... CC_OP_SBBQ] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
200 [CC_OP_LOGICB ... CC_OP_LOGICQ] = USES_CC_DST,
201 [CC_OP_INCB ... CC_OP_INCQ] = USES_CC_DST | USES_CC_SRC,
202 [CC_OP_DECB ... CC_OP_DECQ] = USES_CC_DST | USES_CC_SRC,
203 [CC_OP_SHLB ... CC_OP_SHLQ] = USES_CC_DST | USES_CC_SRC,
204 [CC_OP_SARB ... CC_OP_SARQ] = USES_CC_DST | USES_CC_SRC,
205 [CC_OP_BMILGB ... CC_OP_BMILGQ] = USES_CC_DST | USES_CC_SRC,
206 [CC_OP_ADCX] = USES_CC_DST | USES_CC_SRC,
207 [CC_OP_ADOX] = USES_CC_SRC | USES_CC_SRC2,
208 [CC_OP_ADCOX] = USES_CC_DST | USES_CC_SRC | USES_CC_SRC2,
209 [CC_OP_CLR] = 0,
212 static void set_cc_op(DisasContext *s, CCOp op)
214 int dead;
216 if (s->cc_op == op) {
217 return;
220 /* Discard CC computation that will no longer be used. */
221 dead = cc_op_live[s->cc_op] & ~cc_op_live[op];
222 if (dead & USES_CC_DST) {
223 tcg_gen_discard_tl(cpu_cc_dst);
225 if (dead & USES_CC_SRC) {
226 tcg_gen_discard_tl(cpu_cc_src);
228 if (dead & USES_CC_SRC2) {
229 tcg_gen_discard_tl(cpu_cc_src2);
231 if (dead & USES_CC_SRCT) {
232 tcg_gen_discard_tl(cpu_cc_srcT);
235 if (op == CC_OP_DYNAMIC) {
236 /* The DYNAMIC setting is translator only, and should never be
237 stored. Thus we always consider it clean. */
238 s->cc_op_dirty = false;
239 } else {
240 /* Discard any computed CC_OP value (see shifts). */
241 if (s->cc_op == CC_OP_DYNAMIC) {
242 tcg_gen_discard_i32(cpu_cc_op);
244 s->cc_op_dirty = true;
246 s->cc_op = op;
249 static void gen_update_cc_op(DisasContext *s)
251 if (s->cc_op_dirty) {
252 tcg_gen_movi_i32(cpu_cc_op, s->cc_op);
253 s->cc_op_dirty = false;
257 #ifdef TARGET_X86_64
259 #define NB_OP_SIZES 4
261 #else /* !TARGET_X86_64 */
263 #define NB_OP_SIZES 3
265 #endif /* !TARGET_X86_64 */
267 #if defined(HOST_WORDS_BIGENDIAN)
268 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
269 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
270 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
271 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
272 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
273 #else
274 #define REG_B_OFFSET 0
275 #define REG_H_OFFSET 1
276 #define REG_W_OFFSET 0
277 #define REG_L_OFFSET 0
278 #define REG_LH_OFFSET 4
279 #endif
281 /* In instruction encodings for byte register accesses the
282 * register number usually indicates "low 8 bits of register N";
283 * however there are some special cases where N 4..7 indicates
284 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
285 * true for this special case, false otherwise.
287 static inline bool byte_reg_is_xH(int reg)
289 if (reg < 4) {
290 return false;
292 #ifdef TARGET_X86_64
293 if (reg >= 8 || x86_64_hregs) {
294 return false;
296 #endif
297 return true;
300 /* Select the size of a push/pop operation. */
301 static inline TCGMemOp mo_pushpop(DisasContext *s, TCGMemOp ot)
303 if (CODE64(s)) {
304 return ot == MO_16 ? MO_16 : MO_64;
305 } else {
306 return ot;
310 /* Select only size 64 else 32. Used for SSE operand sizes. */
311 static inline TCGMemOp mo_64_32(TCGMemOp ot)
313 #ifdef TARGET_X86_64
314 return ot == MO_64 ? MO_64 : MO_32;
315 #else
316 return MO_32;
317 #endif
320 /* Select size 8 if lsb of B is clear, else OT. Used for decoding
321 byte vs word opcodes. */
322 static inline TCGMemOp mo_b_d(int b, TCGMemOp ot)
324 return b & 1 ? ot : MO_8;
327 /* Select size 8 if lsb of B is clear, else OT capped at 32.
328 Used for decoding operand size of port opcodes. */
329 static inline TCGMemOp mo_b_d32(int b, TCGMemOp ot)
331 return b & 1 ? (ot == MO_16 ? MO_16 : MO_32) : MO_8;
334 static void gen_op_mov_reg_v(TCGMemOp ot, int reg, TCGv t0)
336 switch(ot) {
337 case MO_8:
338 if (!byte_reg_is_xH(reg)) {
339 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 8);
340 } else {
341 tcg_gen_deposit_tl(cpu_regs[reg - 4], cpu_regs[reg - 4], t0, 8, 8);
343 break;
344 case MO_16:
345 tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 16);
346 break;
347 case MO_32:
348 /* For x86_64, this sets the higher half of register to zero.
349 For i386, this is equivalent to a mov. */
350 tcg_gen_ext32u_tl(cpu_regs[reg], t0);
351 break;
352 #ifdef TARGET_X86_64
353 case MO_64:
354 tcg_gen_mov_tl(cpu_regs[reg], t0);
355 break;
356 #endif
357 default:
358 tcg_abort();
362 static inline void gen_op_mov_v_reg(TCGMemOp ot, TCGv t0, int reg)
364 if (ot == MO_8 && byte_reg_is_xH(reg)) {
365 tcg_gen_shri_tl(t0, cpu_regs[reg - 4], 8);
366 tcg_gen_ext8u_tl(t0, t0);
367 } else {
368 tcg_gen_mov_tl(t0, cpu_regs[reg]);
372 static inline void gen_op_movl_A0_reg(int reg)
374 tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
377 static inline void gen_op_addl_A0_im(int32_t val)
379 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
380 #ifdef TARGET_X86_64
381 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
382 #endif
385 #ifdef TARGET_X86_64
386 static inline void gen_op_addq_A0_im(int64_t val)
388 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
390 #endif
392 static void gen_add_A0_im(DisasContext *s, int val)
394 #ifdef TARGET_X86_64
395 if (CODE64(s))
396 gen_op_addq_A0_im(val);
397 else
398 #endif
399 gen_op_addl_A0_im(val);
402 static inline void gen_op_jmp_v(TCGv dest)
404 tcg_gen_st_tl(dest, cpu_env, offsetof(CPUX86State, eip));
407 static inline void gen_op_add_reg_im(TCGMemOp size, int reg, int32_t val)
409 tcg_gen_addi_tl(cpu_tmp0, cpu_regs[reg], val);
410 gen_op_mov_reg_v(size, reg, cpu_tmp0);
413 static inline void gen_op_add_reg_T0(TCGMemOp size, int reg)
415 tcg_gen_add_tl(cpu_tmp0, cpu_regs[reg], cpu_T[0]);
416 gen_op_mov_reg_v(size, reg, cpu_tmp0);
419 static inline void gen_op_addl_A0_reg_sN(int shift, int reg)
421 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
422 if (shift != 0)
423 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
424 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
425 /* For x86_64, this sets the higher half of register to zero.
426 For i386, this is equivalent to a nop. */
427 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
430 static inline void gen_op_movl_A0_seg(int reg)
432 tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base) + REG_L_OFFSET);
435 static inline void gen_op_addl_A0_seg(DisasContext *s, int reg)
437 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base));
438 #ifdef TARGET_X86_64
439 if (CODE64(s)) {
440 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
441 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
442 } else {
443 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
444 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
446 #else
447 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
448 #endif
451 #ifdef TARGET_X86_64
452 static inline void gen_op_movq_A0_seg(int reg)
454 tcg_gen_ld_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base));
457 static inline void gen_op_addq_A0_seg(int reg)
459 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base));
460 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
463 static inline void gen_op_movq_A0_reg(int reg)
465 tcg_gen_mov_tl(cpu_A0, cpu_regs[reg]);
468 static inline void gen_op_addq_A0_reg_sN(int shift, int reg)
470 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[reg]);
471 if (shift != 0)
472 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
473 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
475 #endif
477 static inline void gen_op_ld_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
479 tcg_gen_qemu_ld_tl(t0, a0, s->mem_index, idx | MO_LE);
482 static inline void gen_op_st_v(DisasContext *s, int idx, TCGv t0, TCGv a0)
484 tcg_gen_qemu_st_tl(t0, a0, s->mem_index, idx | MO_LE);
487 static inline void gen_op_st_rm_T0_A0(DisasContext *s, int idx, int d)
489 if (d == OR_TMP0) {
490 gen_op_st_v(s, idx, cpu_T[0], cpu_A0);
491 } else {
492 gen_op_mov_reg_v(idx, d, cpu_T[0]);
496 static inline void gen_jmp_im(target_ulong pc)
498 tcg_gen_movi_tl(cpu_tmp0, pc);
499 gen_op_jmp_v(cpu_tmp0);
502 static inline void gen_string_movl_A0_ESI(DisasContext *s)
504 int override;
506 override = s->override;
507 switch (s->aflag) {
508 #ifdef TARGET_X86_64
509 case MO_64:
510 if (override >= 0) {
511 gen_op_movq_A0_seg(override);
512 gen_op_addq_A0_reg_sN(0, R_ESI);
513 } else {
514 gen_op_movq_A0_reg(R_ESI);
516 break;
517 #endif
518 case MO_32:
519 /* 32 bit address */
520 if (s->addseg && override < 0)
521 override = R_DS;
522 if (override >= 0) {
523 gen_op_movl_A0_seg(override);
524 gen_op_addl_A0_reg_sN(0, R_ESI);
525 } else {
526 gen_op_movl_A0_reg(R_ESI);
528 break;
529 case MO_16:
530 /* 16 address, always override */
531 if (override < 0)
532 override = R_DS;
533 tcg_gen_ext16u_tl(cpu_A0, cpu_regs[R_ESI]);
534 gen_op_addl_A0_seg(s, override);
535 break;
536 default:
537 tcg_abort();
541 static inline void gen_string_movl_A0_EDI(DisasContext *s)
543 switch (s->aflag) {
544 #ifdef TARGET_X86_64
545 case MO_64:
546 gen_op_movq_A0_reg(R_EDI);
547 break;
548 #endif
549 case MO_32:
550 if (s->addseg) {
551 gen_op_movl_A0_seg(R_ES);
552 gen_op_addl_A0_reg_sN(0, R_EDI);
553 } else {
554 gen_op_movl_A0_reg(R_EDI);
556 break;
557 case MO_16:
558 tcg_gen_ext16u_tl(cpu_A0, cpu_regs[R_EDI]);
559 gen_op_addl_A0_seg(s, R_ES);
560 break;
561 default:
562 tcg_abort();
566 static inline void gen_op_movl_T0_Dshift(TCGMemOp ot)
568 tcg_gen_ld32s_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, df));
569 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], ot);
572 static TCGv gen_ext_tl(TCGv dst, TCGv src, TCGMemOp size, bool sign)
574 switch (size) {
575 case MO_8:
576 if (sign) {
577 tcg_gen_ext8s_tl(dst, src);
578 } else {
579 tcg_gen_ext8u_tl(dst, src);
581 return dst;
582 case MO_16:
583 if (sign) {
584 tcg_gen_ext16s_tl(dst, src);
585 } else {
586 tcg_gen_ext16u_tl(dst, src);
588 return dst;
589 #ifdef TARGET_X86_64
590 case MO_32:
591 if (sign) {
592 tcg_gen_ext32s_tl(dst, src);
593 } else {
594 tcg_gen_ext32u_tl(dst, src);
596 return dst;
597 #endif
598 default:
599 return src;
603 static void gen_extu(TCGMemOp ot, TCGv reg)
605 gen_ext_tl(reg, reg, ot, false);
608 static void gen_exts(TCGMemOp ot, TCGv reg)
610 gen_ext_tl(reg, reg, ot, true);
613 static inline void gen_op_jnz_ecx(TCGMemOp size, TCGLabel *label1)
615 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
616 gen_extu(size, cpu_tmp0);
617 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, label1);
620 static inline void gen_op_jz_ecx(TCGMemOp size, TCGLabel *label1)
622 tcg_gen_mov_tl(cpu_tmp0, cpu_regs[R_ECX]);
623 gen_extu(size, cpu_tmp0);
624 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
627 static void gen_helper_in_func(TCGMemOp ot, TCGv v, TCGv_i32 n)
629 switch (ot) {
630 case MO_8:
631 gen_helper_inb(v, cpu_env, n);
632 break;
633 case MO_16:
634 gen_helper_inw(v, cpu_env, n);
635 break;
636 case MO_32:
637 gen_helper_inl(v, cpu_env, n);
638 break;
639 default:
640 tcg_abort();
644 static void gen_helper_out_func(TCGMemOp ot, TCGv_i32 v, TCGv_i32 n)
646 switch (ot) {
647 case MO_8:
648 gen_helper_outb(cpu_env, v, n);
649 break;
650 case MO_16:
651 gen_helper_outw(cpu_env, v, n);
652 break;
653 case MO_32:
654 gen_helper_outl(cpu_env, v, n);
655 break;
656 default:
657 tcg_abort();
661 static void gen_check_io(DisasContext *s, TCGMemOp ot, target_ulong cur_eip,
662 uint32_t svm_flags)
664 target_ulong next_eip;
666 if (s->pe && (s->cpl > s->iopl || s->vm86)) {
667 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
668 switch (ot) {
669 case MO_8:
670 gen_helper_check_iob(cpu_env, cpu_tmp2_i32);
671 break;
672 case MO_16:
673 gen_helper_check_iow(cpu_env, cpu_tmp2_i32);
674 break;
675 case MO_32:
676 gen_helper_check_iol(cpu_env, cpu_tmp2_i32);
677 break;
678 default:
679 tcg_abort();
682 if(s->flags & HF_SVMI_MASK) {
683 gen_update_cc_op(s);
684 gen_jmp_im(cur_eip);
685 svm_flags |= (1 << (4 + ot));
686 next_eip = s->pc - s->cs_base;
687 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
688 gen_helper_svm_check_io(cpu_env, cpu_tmp2_i32,
689 tcg_const_i32(svm_flags),
690 tcg_const_i32(next_eip - cur_eip));
694 static inline void gen_movs(DisasContext *s, TCGMemOp ot)
696 gen_string_movl_A0_ESI(s);
697 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
698 gen_string_movl_A0_EDI(s);
699 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
700 gen_op_movl_T0_Dshift(ot);
701 gen_op_add_reg_T0(s->aflag, R_ESI);
702 gen_op_add_reg_T0(s->aflag, R_EDI);
705 static void gen_op_update1_cc(void)
707 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
710 static void gen_op_update2_cc(void)
712 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
713 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
716 static void gen_op_update3_cc(TCGv reg)
718 tcg_gen_mov_tl(cpu_cc_src2, reg);
719 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
720 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
723 static inline void gen_op_testl_T0_T1_cc(void)
725 tcg_gen_and_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
728 static void gen_op_update_neg_cc(void)
730 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
731 tcg_gen_neg_tl(cpu_cc_src, cpu_T[0]);
732 tcg_gen_movi_tl(cpu_cc_srcT, 0);
735 /* compute all eflags to cc_src */
736 static void gen_compute_eflags(DisasContext *s)
738 TCGv zero, dst, src1, src2;
739 int live, dead;
741 if (s->cc_op == CC_OP_EFLAGS) {
742 return;
744 if (s->cc_op == CC_OP_CLR) {
745 tcg_gen_movi_tl(cpu_cc_src, CC_Z | CC_P);
746 set_cc_op(s, CC_OP_EFLAGS);
747 return;
750 TCGV_UNUSED(zero);
751 dst = cpu_cc_dst;
752 src1 = cpu_cc_src;
753 src2 = cpu_cc_src2;
755 /* Take care to not read values that are not live. */
756 live = cc_op_live[s->cc_op] & ~USES_CC_SRCT;
757 dead = live ^ (USES_CC_DST | USES_CC_SRC | USES_CC_SRC2);
758 if (dead) {
759 zero = tcg_const_tl(0);
760 if (dead & USES_CC_DST) {
761 dst = zero;
763 if (dead & USES_CC_SRC) {
764 src1 = zero;
766 if (dead & USES_CC_SRC2) {
767 src2 = zero;
771 gen_update_cc_op(s);
772 gen_helper_cc_compute_all(cpu_cc_src, dst, src1, src2, cpu_cc_op);
773 set_cc_op(s, CC_OP_EFLAGS);
775 if (dead) {
776 tcg_temp_free(zero);
780 typedef struct CCPrepare {
781 TCGCond cond;
782 TCGv reg;
783 TCGv reg2;
784 target_ulong imm;
785 target_ulong mask;
786 bool use_reg2;
787 bool no_setcond;
788 } CCPrepare;
790 /* compute eflags.C to reg */
791 static CCPrepare gen_prepare_eflags_c(DisasContext *s, TCGv reg)
793 TCGv t0, t1;
794 int size, shift;
796 switch (s->cc_op) {
797 case CC_OP_SUBB ... CC_OP_SUBQ:
798 /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
799 size = s->cc_op - CC_OP_SUBB;
800 t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
801 /* If no temporary was used, be careful not to alias t1 and t0. */
802 t0 = TCGV_EQUAL(t1, cpu_cc_src) ? cpu_tmp0 : reg;
803 tcg_gen_mov_tl(t0, cpu_cc_srcT);
804 gen_extu(size, t0);
805 goto add_sub;
807 case CC_OP_ADDB ... CC_OP_ADDQ:
808 /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
809 size = s->cc_op - CC_OP_ADDB;
810 t1 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
811 t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
812 add_sub:
813 return (CCPrepare) { .cond = TCG_COND_LTU, .reg = t0,
814 .reg2 = t1, .mask = -1, .use_reg2 = true };
816 case CC_OP_LOGICB ... CC_OP_LOGICQ:
817 case CC_OP_CLR:
818 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
820 case CC_OP_INCB ... CC_OP_INCQ:
821 case CC_OP_DECB ... CC_OP_DECQ:
822 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
823 .mask = -1, .no_setcond = true };
825 case CC_OP_SHLB ... CC_OP_SHLQ:
826 /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
827 size = s->cc_op - CC_OP_SHLB;
828 shift = (8 << size) - 1;
829 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
830 .mask = (target_ulong)1 << shift };
832 case CC_OP_MULB ... CC_OP_MULQ:
833 return (CCPrepare) { .cond = TCG_COND_NE,
834 .reg = cpu_cc_src, .mask = -1 };
836 case CC_OP_BMILGB ... CC_OP_BMILGQ:
837 size = s->cc_op - CC_OP_BMILGB;
838 t0 = gen_ext_tl(reg, cpu_cc_src, size, false);
839 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
841 case CC_OP_ADCX:
842 case CC_OP_ADCOX:
843 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_dst,
844 .mask = -1, .no_setcond = true };
846 case CC_OP_EFLAGS:
847 case CC_OP_SARB ... CC_OP_SARQ:
848 /* CC_SRC & 1 */
849 return (CCPrepare) { .cond = TCG_COND_NE,
850 .reg = cpu_cc_src, .mask = CC_C };
852 default:
853 /* The need to compute only C from CC_OP_DYNAMIC is important
854 in efficiently implementing e.g. INC at the start of a TB. */
855 gen_update_cc_op(s);
856 gen_helper_cc_compute_c(reg, cpu_cc_dst, cpu_cc_src,
857 cpu_cc_src2, cpu_cc_op);
858 return (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
859 .mask = -1, .no_setcond = true };
863 /* compute eflags.P to reg */
864 static CCPrepare gen_prepare_eflags_p(DisasContext *s, TCGv reg)
866 gen_compute_eflags(s);
867 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
868 .mask = CC_P };
871 /* compute eflags.S to reg */
872 static CCPrepare gen_prepare_eflags_s(DisasContext *s, TCGv reg)
874 switch (s->cc_op) {
875 case CC_OP_DYNAMIC:
876 gen_compute_eflags(s);
877 /* FALLTHRU */
878 case CC_OP_EFLAGS:
879 case CC_OP_ADCX:
880 case CC_OP_ADOX:
881 case CC_OP_ADCOX:
882 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
883 .mask = CC_S };
884 case CC_OP_CLR:
885 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
886 default:
888 TCGMemOp size = (s->cc_op - CC_OP_ADDB) & 3;
889 TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, true);
890 return (CCPrepare) { .cond = TCG_COND_LT, .reg = t0, .mask = -1 };
895 /* compute eflags.O to reg */
896 static CCPrepare gen_prepare_eflags_o(DisasContext *s, TCGv reg)
898 switch (s->cc_op) {
899 case CC_OP_ADOX:
900 case CC_OP_ADCOX:
901 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src2,
902 .mask = -1, .no_setcond = true };
903 case CC_OP_CLR:
904 return (CCPrepare) { .cond = TCG_COND_NEVER, .mask = -1 };
905 default:
906 gen_compute_eflags(s);
907 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
908 .mask = CC_O };
912 /* compute eflags.Z to reg */
913 static CCPrepare gen_prepare_eflags_z(DisasContext *s, TCGv reg)
915 switch (s->cc_op) {
916 case CC_OP_DYNAMIC:
917 gen_compute_eflags(s);
918 /* FALLTHRU */
919 case CC_OP_EFLAGS:
920 case CC_OP_ADCX:
921 case CC_OP_ADOX:
922 case CC_OP_ADCOX:
923 return (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
924 .mask = CC_Z };
925 case CC_OP_CLR:
926 return (CCPrepare) { .cond = TCG_COND_ALWAYS, .mask = -1 };
927 default:
929 TCGMemOp size = (s->cc_op - CC_OP_ADDB) & 3;
930 TCGv t0 = gen_ext_tl(reg, cpu_cc_dst, size, false);
931 return (CCPrepare) { .cond = TCG_COND_EQ, .reg = t0, .mask = -1 };
936 /* perform a conditional store into register 'reg' according to jump opcode
937 value 'b'. In the fast case, T0 is guaranted not to be used. */
938 static CCPrepare gen_prepare_cc(DisasContext *s, int b, TCGv reg)
940 int inv, jcc_op, cond;
941 TCGMemOp size;
942 CCPrepare cc;
943 TCGv t0;
945 inv = b & 1;
946 jcc_op = (b >> 1) & 7;
948 switch (s->cc_op) {
949 case CC_OP_SUBB ... CC_OP_SUBQ:
950 /* We optimize relational operators for the cmp/jcc case. */
951 size = s->cc_op - CC_OP_SUBB;
952 switch (jcc_op) {
953 case JCC_BE:
954 tcg_gen_mov_tl(cpu_tmp4, cpu_cc_srcT);
955 gen_extu(size, cpu_tmp4);
956 t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, false);
957 cc = (CCPrepare) { .cond = TCG_COND_LEU, .reg = cpu_tmp4,
958 .reg2 = t0, .mask = -1, .use_reg2 = true };
959 break;
961 case JCC_L:
962 cond = TCG_COND_LT;
963 goto fast_jcc_l;
964 case JCC_LE:
965 cond = TCG_COND_LE;
966 fast_jcc_l:
967 tcg_gen_mov_tl(cpu_tmp4, cpu_cc_srcT);
968 gen_exts(size, cpu_tmp4);
969 t0 = gen_ext_tl(cpu_tmp0, cpu_cc_src, size, true);
970 cc = (CCPrepare) { .cond = cond, .reg = cpu_tmp4,
971 .reg2 = t0, .mask = -1, .use_reg2 = true };
972 break;
974 default:
975 goto slow_jcc;
977 break;
979 default:
980 slow_jcc:
981 /* This actually generates good code for JC, JZ and JS. */
982 switch (jcc_op) {
983 case JCC_O:
984 cc = gen_prepare_eflags_o(s, reg);
985 break;
986 case JCC_B:
987 cc = gen_prepare_eflags_c(s, reg);
988 break;
989 case JCC_Z:
990 cc = gen_prepare_eflags_z(s, reg);
991 break;
992 case JCC_BE:
993 gen_compute_eflags(s);
994 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = cpu_cc_src,
995 .mask = CC_Z | CC_C };
996 break;
997 case JCC_S:
998 cc = gen_prepare_eflags_s(s, reg);
999 break;
1000 case JCC_P:
1001 cc = gen_prepare_eflags_p(s, reg);
1002 break;
1003 case JCC_L:
1004 gen_compute_eflags(s);
1005 if (TCGV_EQUAL(reg, cpu_cc_src)) {
1006 reg = cpu_tmp0;
1008 tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
1009 tcg_gen_xor_tl(reg, reg, cpu_cc_src);
1010 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1011 .mask = CC_S };
1012 break;
1013 default:
1014 case JCC_LE:
1015 gen_compute_eflags(s);
1016 if (TCGV_EQUAL(reg, cpu_cc_src)) {
1017 reg = cpu_tmp0;
1019 tcg_gen_shri_tl(reg, cpu_cc_src, 4); /* CC_O -> CC_S */
1020 tcg_gen_xor_tl(reg, reg, cpu_cc_src);
1021 cc = (CCPrepare) { .cond = TCG_COND_NE, .reg = reg,
1022 .mask = CC_S | CC_Z };
1023 break;
1025 break;
1028 if (inv) {
1029 cc.cond = tcg_invert_cond(cc.cond);
1031 return cc;
1034 static void gen_setcc1(DisasContext *s, int b, TCGv reg)
1036 CCPrepare cc = gen_prepare_cc(s, b, reg);
1038 if (cc.no_setcond) {
1039 if (cc.cond == TCG_COND_EQ) {
1040 tcg_gen_xori_tl(reg, cc.reg, 1);
1041 } else {
1042 tcg_gen_mov_tl(reg, cc.reg);
1044 return;
1047 if (cc.cond == TCG_COND_NE && !cc.use_reg2 && cc.imm == 0 &&
1048 cc.mask != 0 && (cc.mask & (cc.mask - 1)) == 0) {
1049 tcg_gen_shri_tl(reg, cc.reg, ctztl(cc.mask));
1050 tcg_gen_andi_tl(reg, reg, 1);
1051 return;
1053 if (cc.mask != -1) {
1054 tcg_gen_andi_tl(reg, cc.reg, cc.mask);
1055 cc.reg = reg;
1057 if (cc.use_reg2) {
1058 tcg_gen_setcond_tl(cc.cond, reg, cc.reg, cc.reg2);
1059 } else {
1060 tcg_gen_setcondi_tl(cc.cond, reg, cc.reg, cc.imm);
1064 static inline void gen_compute_eflags_c(DisasContext *s, TCGv reg)
1066 gen_setcc1(s, JCC_B << 1, reg);
1069 /* generate a conditional jump to label 'l1' according to jump opcode
1070 value 'b'. In the fast case, T0 is guaranted not to be used. */
1071 static inline void gen_jcc1_noeob(DisasContext *s, int b, TCGLabel *l1)
1073 CCPrepare cc = gen_prepare_cc(s, b, cpu_T[0]);
1075 if (cc.mask != -1) {
1076 tcg_gen_andi_tl(cpu_T[0], cc.reg, cc.mask);
1077 cc.reg = cpu_T[0];
1079 if (cc.use_reg2) {
1080 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1081 } else {
1082 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1086 /* Generate a conditional jump to label 'l1' according to jump opcode
1087 value 'b'. In the fast case, T0 is guaranted not to be used.
1088 A translation block must end soon. */
1089 static inline void gen_jcc1(DisasContext *s, int b, TCGLabel *l1)
1091 CCPrepare cc = gen_prepare_cc(s, b, cpu_T[0]);
1093 gen_update_cc_op(s);
1094 if (cc.mask != -1) {
1095 tcg_gen_andi_tl(cpu_T[0], cc.reg, cc.mask);
1096 cc.reg = cpu_T[0];
1098 set_cc_op(s, CC_OP_DYNAMIC);
1099 if (cc.use_reg2) {
1100 tcg_gen_brcond_tl(cc.cond, cc.reg, cc.reg2, l1);
1101 } else {
1102 tcg_gen_brcondi_tl(cc.cond, cc.reg, cc.imm, l1);
1106 /* XXX: does not work with gdbstub "ice" single step - not a
1107 serious problem */
1108 static TCGLabel *gen_jz_ecx_string(DisasContext *s, target_ulong next_eip)
1110 TCGLabel *l1 = gen_new_label();
1111 TCGLabel *l2 = gen_new_label();
1112 gen_op_jnz_ecx(s->aflag, l1);
1113 gen_set_label(l2);
1114 gen_jmp_tb(s, next_eip, 1);
1115 gen_set_label(l1);
1116 return l2;
1119 static inline void gen_stos(DisasContext *s, TCGMemOp ot)
1121 gen_op_mov_v_reg(MO_32, cpu_T[0], R_EAX);
1122 gen_string_movl_A0_EDI(s);
1123 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
1124 gen_op_movl_T0_Dshift(ot);
1125 gen_op_add_reg_T0(s->aflag, R_EDI);
1128 static inline void gen_lods(DisasContext *s, TCGMemOp ot)
1130 gen_string_movl_A0_ESI(s);
1131 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1132 gen_op_mov_reg_v(ot, R_EAX, cpu_T[0]);
1133 gen_op_movl_T0_Dshift(ot);
1134 gen_op_add_reg_T0(s->aflag, R_ESI);
1137 static inline void gen_scas(DisasContext *s, TCGMemOp ot)
1139 gen_string_movl_A0_EDI(s);
1140 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
1141 gen_op(s, OP_CMPL, ot, R_EAX);
1142 gen_op_movl_T0_Dshift(ot);
1143 gen_op_add_reg_T0(s->aflag, R_EDI);
1146 static inline void gen_cmps(DisasContext *s, TCGMemOp ot)
1148 gen_string_movl_A0_EDI(s);
1149 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
1150 gen_string_movl_A0_ESI(s);
1151 gen_op(s, OP_CMPL, ot, OR_TMP0);
1152 gen_op_movl_T0_Dshift(ot);
1153 gen_op_add_reg_T0(s->aflag, R_ESI);
1154 gen_op_add_reg_T0(s->aflag, R_EDI);
1157 static inline void gen_ins(DisasContext *s, TCGMemOp ot)
1159 if (s->tb->cflags & CF_USE_ICOUNT) {
1160 gen_io_start();
1162 gen_string_movl_A0_EDI(s);
1163 /* Note: we must do this dummy write first to be restartable in
1164 case of page fault. */
1165 tcg_gen_movi_tl(cpu_T[0], 0);
1166 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
1167 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_EDX]);
1168 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1169 gen_helper_in_func(ot, cpu_T[0], cpu_tmp2_i32);
1170 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
1171 gen_op_movl_T0_Dshift(ot);
1172 gen_op_add_reg_T0(s->aflag, R_EDI);
1173 if (s->tb->cflags & CF_USE_ICOUNT) {
1174 gen_io_end();
1178 static inline void gen_outs(DisasContext *s, TCGMemOp ot)
1180 if (s->tb->cflags & CF_USE_ICOUNT) {
1181 gen_io_start();
1183 gen_string_movl_A0_ESI(s);
1184 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1186 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_EDX]);
1187 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1188 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[0]);
1189 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
1191 gen_op_movl_T0_Dshift(ot);
1192 gen_op_add_reg_T0(s->aflag, R_ESI);
1193 if (s->tb->cflags & CF_USE_ICOUNT) {
1194 gen_io_end();
1198 /* same method as Valgrind : we generate jumps to current or next
1199 instruction */
1200 #define GEN_REPZ(op) \
1201 static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \
1202 target_ulong cur_eip, target_ulong next_eip) \
1204 TCGLabel *l2; \
1205 gen_update_cc_op(s); \
1206 l2 = gen_jz_ecx_string(s, next_eip); \
1207 gen_ ## op(s, ot); \
1208 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1209 /* a loop would cause two single step exceptions if ECX = 1 \
1210 before rep string_insn */ \
1211 if (s->repz_opt) \
1212 gen_op_jz_ecx(s->aflag, l2); \
1213 gen_jmp(s, cur_eip); \
1216 #define GEN_REPZ2(op) \
1217 static inline void gen_repz_ ## op(DisasContext *s, TCGMemOp ot, \
1218 target_ulong cur_eip, \
1219 target_ulong next_eip, \
1220 int nz) \
1222 TCGLabel *l2; \
1223 gen_update_cc_op(s); \
1224 l2 = gen_jz_ecx_string(s, next_eip); \
1225 gen_ ## op(s, ot); \
1226 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1227 gen_update_cc_op(s); \
1228 gen_jcc1(s, (JCC_Z << 1) | (nz ^ 1), l2); \
1229 if (s->repz_opt) \
1230 gen_op_jz_ecx(s->aflag, l2); \
1231 gen_jmp(s, cur_eip); \
1234 GEN_REPZ(movs)
1235 GEN_REPZ(stos)
1236 GEN_REPZ(lods)
1237 GEN_REPZ(ins)
1238 GEN_REPZ(outs)
1239 GEN_REPZ2(scas)
1240 GEN_REPZ2(cmps)
1242 static void gen_helper_fp_arith_ST0_FT0(int op)
1244 switch (op) {
1245 case 0:
1246 gen_helper_fadd_ST0_FT0(cpu_env);
1247 break;
1248 case 1:
1249 gen_helper_fmul_ST0_FT0(cpu_env);
1250 break;
1251 case 2:
1252 gen_helper_fcom_ST0_FT0(cpu_env);
1253 break;
1254 case 3:
1255 gen_helper_fcom_ST0_FT0(cpu_env);
1256 break;
1257 case 4:
1258 gen_helper_fsub_ST0_FT0(cpu_env);
1259 break;
1260 case 5:
1261 gen_helper_fsubr_ST0_FT0(cpu_env);
1262 break;
1263 case 6:
1264 gen_helper_fdiv_ST0_FT0(cpu_env);
1265 break;
1266 case 7:
1267 gen_helper_fdivr_ST0_FT0(cpu_env);
1268 break;
1272 /* NOTE the exception in "r" op ordering */
1273 static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1275 TCGv_i32 tmp = tcg_const_i32(opreg);
1276 switch (op) {
1277 case 0:
1278 gen_helper_fadd_STN_ST0(cpu_env, tmp);
1279 break;
1280 case 1:
1281 gen_helper_fmul_STN_ST0(cpu_env, tmp);
1282 break;
1283 case 4:
1284 gen_helper_fsubr_STN_ST0(cpu_env, tmp);
1285 break;
1286 case 5:
1287 gen_helper_fsub_STN_ST0(cpu_env, tmp);
1288 break;
1289 case 6:
1290 gen_helper_fdivr_STN_ST0(cpu_env, tmp);
1291 break;
1292 case 7:
1293 gen_helper_fdiv_STN_ST0(cpu_env, tmp);
1294 break;
1298 /* if d == OR_TMP0, it means memory operand (address in A0) */
1299 static void gen_op(DisasContext *s1, int op, TCGMemOp ot, int d)
1301 if (d != OR_TMP0) {
1302 gen_op_mov_v_reg(ot, cpu_T[0], d);
1303 } else {
1304 gen_op_ld_v(s1, ot, cpu_T[0], cpu_A0);
1306 switch(op) {
1307 case OP_ADCL:
1308 gen_compute_eflags_c(s1, cpu_tmp4);
1309 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1310 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1311 gen_op_st_rm_T0_A0(s1, ot, d);
1312 gen_op_update3_cc(cpu_tmp4);
1313 set_cc_op(s1, CC_OP_ADCB + ot);
1314 break;
1315 case OP_SBBL:
1316 gen_compute_eflags_c(s1, cpu_tmp4);
1317 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1318 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1319 gen_op_st_rm_T0_A0(s1, ot, d);
1320 gen_op_update3_cc(cpu_tmp4);
1321 set_cc_op(s1, CC_OP_SBBB + ot);
1322 break;
1323 case OP_ADDL:
1324 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1325 gen_op_st_rm_T0_A0(s1, ot, d);
1326 gen_op_update2_cc();
1327 set_cc_op(s1, CC_OP_ADDB + ot);
1328 break;
1329 case OP_SUBL:
1330 tcg_gen_mov_tl(cpu_cc_srcT, cpu_T[0]);
1331 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1332 gen_op_st_rm_T0_A0(s1, ot, d);
1333 gen_op_update2_cc();
1334 set_cc_op(s1, CC_OP_SUBB + ot);
1335 break;
1336 default:
1337 case OP_ANDL:
1338 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1339 gen_op_st_rm_T0_A0(s1, ot, d);
1340 gen_op_update1_cc();
1341 set_cc_op(s1, CC_OP_LOGICB + ot);
1342 break;
1343 case OP_ORL:
1344 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1345 gen_op_st_rm_T0_A0(s1, ot, d);
1346 gen_op_update1_cc();
1347 set_cc_op(s1, CC_OP_LOGICB + ot);
1348 break;
1349 case OP_XORL:
1350 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1351 gen_op_st_rm_T0_A0(s1, ot, d);
1352 gen_op_update1_cc();
1353 set_cc_op(s1, CC_OP_LOGICB + ot);
1354 break;
1355 case OP_CMPL:
1356 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
1357 tcg_gen_mov_tl(cpu_cc_srcT, cpu_T[0]);
1358 tcg_gen_sub_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
1359 set_cc_op(s1, CC_OP_SUBB + ot);
1360 break;
1364 /* if d == OR_TMP0, it means memory operand (address in A0) */
1365 static void gen_inc(DisasContext *s1, TCGMemOp ot, int d, int c)
1367 if (d != OR_TMP0) {
1368 gen_op_mov_v_reg(ot, cpu_T[0], d);
1369 } else {
1370 gen_op_ld_v(s1, ot, cpu_T[0], cpu_A0);
1372 gen_compute_eflags_c(s1, cpu_cc_src);
1373 if (c > 0) {
1374 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], 1);
1375 set_cc_op(s1, CC_OP_INCB + ot);
1376 } else {
1377 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], -1);
1378 set_cc_op(s1, CC_OP_DECB + ot);
1380 gen_op_st_rm_T0_A0(s1, ot, d);
1381 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1384 static void gen_shift_flags(DisasContext *s, TCGMemOp ot, TCGv result,
1385 TCGv shm1, TCGv count, bool is_right)
1387 TCGv_i32 z32, s32, oldop;
1388 TCGv z_tl;
1390 /* Store the results into the CC variables. If we know that the
1391 variable must be dead, store unconditionally. Otherwise we'll
1392 need to not disrupt the current contents. */
1393 z_tl = tcg_const_tl(0);
1394 if (cc_op_live[s->cc_op] & USES_CC_DST) {
1395 tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_dst, count, z_tl,
1396 result, cpu_cc_dst);
1397 } else {
1398 tcg_gen_mov_tl(cpu_cc_dst, result);
1400 if (cc_op_live[s->cc_op] & USES_CC_SRC) {
1401 tcg_gen_movcond_tl(TCG_COND_NE, cpu_cc_src, count, z_tl,
1402 shm1, cpu_cc_src);
1403 } else {
1404 tcg_gen_mov_tl(cpu_cc_src, shm1);
1406 tcg_temp_free(z_tl);
1408 /* Get the two potential CC_OP values into temporaries. */
1409 tcg_gen_movi_i32(cpu_tmp2_i32, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
1410 if (s->cc_op == CC_OP_DYNAMIC) {
1411 oldop = cpu_cc_op;
1412 } else {
1413 tcg_gen_movi_i32(cpu_tmp3_i32, s->cc_op);
1414 oldop = cpu_tmp3_i32;
1417 /* Conditionally store the CC_OP value. */
1418 z32 = tcg_const_i32(0);
1419 s32 = tcg_temp_new_i32();
1420 tcg_gen_trunc_tl_i32(s32, count);
1421 tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, s32, z32, cpu_tmp2_i32, oldop);
1422 tcg_temp_free_i32(z32);
1423 tcg_temp_free_i32(s32);
1425 /* The CC_OP value is no longer predictable. */
1426 set_cc_op(s, CC_OP_DYNAMIC);
1429 static void gen_shift_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
1430 int is_right, int is_arith)
1432 target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
1434 /* load */
1435 if (op1 == OR_TMP0) {
1436 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1437 } else {
1438 gen_op_mov_v_reg(ot, cpu_T[0], op1);
1441 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], mask);
1442 tcg_gen_subi_tl(cpu_tmp0, cpu_T[1], 1);
1444 if (is_right) {
1445 if (is_arith) {
1446 gen_exts(ot, cpu_T[0]);
1447 tcg_gen_sar_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1448 tcg_gen_sar_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1449 } else {
1450 gen_extu(ot, cpu_T[0]);
1451 tcg_gen_shr_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1452 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1454 } else {
1455 tcg_gen_shl_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1456 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1459 /* store */
1460 gen_op_st_rm_T0_A0(s, ot, op1);
1462 gen_shift_flags(s, ot, cpu_T[0], cpu_tmp0, cpu_T[1], is_right);
1465 static void gen_shift_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2,
1466 int is_right, int is_arith)
1468 int mask = (ot == MO_64 ? 0x3f : 0x1f);
1470 /* load */
1471 if (op1 == OR_TMP0)
1472 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1473 else
1474 gen_op_mov_v_reg(ot, cpu_T[0], op1);
1476 op2 &= mask;
1477 if (op2 != 0) {
1478 if (is_right) {
1479 if (is_arith) {
1480 gen_exts(ot, cpu_T[0]);
1481 tcg_gen_sari_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1482 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], op2);
1483 } else {
1484 gen_extu(ot, cpu_T[0]);
1485 tcg_gen_shri_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1486 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], op2);
1488 } else {
1489 tcg_gen_shli_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1490 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], op2);
1494 /* store */
1495 gen_op_st_rm_T0_A0(s, ot, op1);
1497 /* update eflags if non zero shift */
1498 if (op2 != 0) {
1499 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
1500 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1501 set_cc_op(s, (is_right ? CC_OP_SARB : CC_OP_SHLB) + ot);
1505 static void gen_rot_rm_T1(DisasContext *s, TCGMemOp ot, int op1, int is_right)
1507 target_ulong mask = (ot == MO_64 ? 0x3f : 0x1f);
1508 TCGv_i32 t0, t1;
1510 /* load */
1511 if (op1 == OR_TMP0) {
1512 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1513 } else {
1514 gen_op_mov_v_reg(ot, cpu_T[0], op1);
1517 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], mask);
1519 switch (ot) {
1520 case MO_8:
1521 /* Replicate the 8-bit input so that a 32-bit rotate works. */
1522 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
1523 tcg_gen_muli_tl(cpu_T[0], cpu_T[0], 0x01010101);
1524 goto do_long;
1525 case MO_16:
1526 /* Replicate the 16-bit input so that a 32-bit rotate works. */
1527 tcg_gen_deposit_tl(cpu_T[0], cpu_T[0], cpu_T[0], 16, 16);
1528 goto do_long;
1529 do_long:
1530 #ifdef TARGET_X86_64
1531 case MO_32:
1532 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
1533 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
1534 if (is_right) {
1535 tcg_gen_rotr_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
1536 } else {
1537 tcg_gen_rotl_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
1539 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
1540 break;
1541 #endif
1542 default:
1543 if (is_right) {
1544 tcg_gen_rotr_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1545 } else {
1546 tcg_gen_rotl_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1548 break;
1551 /* store */
1552 gen_op_st_rm_T0_A0(s, ot, op1);
1554 /* We'll need the flags computed into CC_SRC. */
1555 gen_compute_eflags(s);
1557 /* The value that was "rotated out" is now present at the other end
1558 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1559 since we've computed the flags into CC_SRC, these variables are
1560 currently dead. */
1561 if (is_right) {
1562 tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask - 1);
1563 tcg_gen_shri_tl(cpu_cc_dst, cpu_T[0], mask);
1564 tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
1565 } else {
1566 tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask);
1567 tcg_gen_andi_tl(cpu_cc_dst, cpu_T[0], 1);
1569 tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1570 tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1572 /* Now conditionally store the new CC_OP value. If the shift count
1573 is 0 we keep the CC_OP_EFLAGS setting so that only CC_SRC is live.
1574 Otherwise reuse CC_OP_ADCOX which have the C and O flags split out
1575 exactly as we computed above. */
1576 t0 = tcg_const_i32(0);
1577 t1 = tcg_temp_new_i32();
1578 tcg_gen_trunc_tl_i32(t1, cpu_T[1]);
1579 tcg_gen_movi_i32(cpu_tmp2_i32, CC_OP_ADCOX);
1580 tcg_gen_movi_i32(cpu_tmp3_i32, CC_OP_EFLAGS);
1581 tcg_gen_movcond_i32(TCG_COND_NE, cpu_cc_op, t1, t0,
1582 cpu_tmp2_i32, cpu_tmp3_i32);
1583 tcg_temp_free_i32(t0);
1584 tcg_temp_free_i32(t1);
1586 /* The CC_OP value is no longer predictable. */
1587 set_cc_op(s, CC_OP_DYNAMIC);
1590 static void gen_rot_rm_im(DisasContext *s, TCGMemOp ot, int op1, int op2,
1591 int is_right)
1593 int mask = (ot == MO_64 ? 0x3f : 0x1f);
1594 int shift;
1596 /* load */
1597 if (op1 == OR_TMP0) {
1598 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1599 } else {
1600 gen_op_mov_v_reg(ot, cpu_T[0], op1);
1603 op2 &= mask;
1604 if (op2 != 0) {
1605 switch (ot) {
1606 #ifdef TARGET_X86_64
1607 case MO_32:
1608 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
1609 if (is_right) {
1610 tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, op2);
1611 } else {
1612 tcg_gen_rotli_i32(cpu_tmp2_i32, cpu_tmp2_i32, op2);
1614 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
1615 break;
1616 #endif
1617 default:
1618 if (is_right) {
1619 tcg_gen_rotri_tl(cpu_T[0], cpu_T[0], op2);
1620 } else {
1621 tcg_gen_rotli_tl(cpu_T[0], cpu_T[0], op2);
1623 break;
1624 case MO_8:
1625 mask = 7;
1626 goto do_shifts;
1627 case MO_16:
1628 mask = 15;
1629 do_shifts:
1630 shift = op2 & mask;
1631 if (is_right) {
1632 shift = mask + 1 - shift;
1634 gen_extu(ot, cpu_T[0]);
1635 tcg_gen_shli_tl(cpu_tmp0, cpu_T[0], shift);
1636 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], mask + 1 - shift);
1637 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
1638 break;
1642 /* store */
1643 gen_op_st_rm_T0_A0(s, ot, op1);
1645 if (op2 != 0) {
1646 /* Compute the flags into CC_SRC. */
1647 gen_compute_eflags(s);
1649 /* The value that was "rotated out" is now present at the other end
1650 of the word. Compute C into CC_DST and O into CC_SRC2. Note that
1651 since we've computed the flags into CC_SRC, these variables are
1652 currently dead. */
1653 if (is_right) {
1654 tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask - 1);
1655 tcg_gen_shri_tl(cpu_cc_dst, cpu_T[0], mask);
1656 tcg_gen_andi_tl(cpu_cc_dst, cpu_cc_dst, 1);
1657 } else {
1658 tcg_gen_shri_tl(cpu_cc_src2, cpu_T[0], mask);
1659 tcg_gen_andi_tl(cpu_cc_dst, cpu_T[0], 1);
1661 tcg_gen_andi_tl(cpu_cc_src2, cpu_cc_src2, 1);
1662 tcg_gen_xor_tl(cpu_cc_src2, cpu_cc_src2, cpu_cc_dst);
1663 set_cc_op(s, CC_OP_ADCOX);
1667 /* XXX: add faster immediate = 1 case */
1668 static void gen_rotc_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
1669 int is_right)
1671 gen_compute_eflags(s);
1672 assert(s->cc_op == CC_OP_EFLAGS);
1674 /* load */
1675 if (op1 == OR_TMP0)
1676 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1677 else
1678 gen_op_mov_v_reg(ot, cpu_T[0], op1);
1680 if (is_right) {
1681 switch (ot) {
1682 case MO_8:
1683 gen_helper_rcrb(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1684 break;
1685 case MO_16:
1686 gen_helper_rcrw(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1687 break;
1688 case MO_32:
1689 gen_helper_rcrl(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1690 break;
1691 #ifdef TARGET_X86_64
1692 case MO_64:
1693 gen_helper_rcrq(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1694 break;
1695 #endif
1696 default:
1697 tcg_abort();
1699 } else {
1700 switch (ot) {
1701 case MO_8:
1702 gen_helper_rclb(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1703 break;
1704 case MO_16:
1705 gen_helper_rclw(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1706 break;
1707 case MO_32:
1708 gen_helper_rcll(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1709 break;
1710 #ifdef TARGET_X86_64
1711 case MO_64:
1712 gen_helper_rclq(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]);
1713 break;
1714 #endif
1715 default:
1716 tcg_abort();
1719 /* store */
1720 gen_op_st_rm_T0_A0(s, ot, op1);
1723 /* XXX: add faster immediate case */
1724 static void gen_shiftd_rm_T1(DisasContext *s, TCGMemOp ot, int op1,
1725 bool is_right, TCGv count_in)
1727 target_ulong mask = (ot == MO_64 ? 63 : 31);
1728 TCGv count;
1730 /* load */
1731 if (op1 == OR_TMP0) {
1732 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
1733 } else {
1734 gen_op_mov_v_reg(ot, cpu_T[0], op1);
1737 count = tcg_temp_new();
1738 tcg_gen_andi_tl(count, count_in, mask);
1740 switch (ot) {
1741 case MO_16:
1742 /* Note: we implement the Intel behaviour for shift count > 16.
1743 This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A
1744 portion by constructing it as a 32-bit value. */
1745 if (is_right) {
1746 tcg_gen_deposit_tl(cpu_tmp0, cpu_T[0], cpu_T[1], 16, 16);
1747 tcg_gen_mov_tl(cpu_T[1], cpu_T[0]);
1748 tcg_gen_mov_tl(cpu_T[0], cpu_tmp0);
1749 } else {
1750 tcg_gen_deposit_tl(cpu_T[1], cpu_T[0], cpu_T[1], 16, 16);
1752 /* FALLTHRU */
1753 #ifdef TARGET_X86_64
1754 case MO_32:
1755 /* Concatenate the two 32-bit values and use a 64-bit shift. */
1756 tcg_gen_subi_tl(cpu_tmp0, count, 1);
1757 if (is_right) {
1758 tcg_gen_concat_tl_i64(cpu_T[0], cpu_T[0], cpu_T[1]);
1759 tcg_gen_shr_i64(cpu_tmp0, cpu_T[0], cpu_tmp0);
1760 tcg_gen_shr_i64(cpu_T[0], cpu_T[0], count);
1761 } else {
1762 tcg_gen_concat_tl_i64(cpu_T[0], cpu_T[1], cpu_T[0]);
1763 tcg_gen_shl_i64(cpu_tmp0, cpu_T[0], cpu_tmp0);
1764 tcg_gen_shl_i64(cpu_T[0], cpu_T[0], count);
1765 tcg_gen_shri_i64(cpu_tmp0, cpu_tmp0, 32);
1766 tcg_gen_shri_i64(cpu_T[0], cpu_T[0], 32);
1768 break;
1769 #endif
1770 default:
1771 tcg_gen_subi_tl(cpu_tmp0, count, 1);
1772 if (is_right) {
1773 tcg_gen_shr_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1775 tcg_gen_subfi_tl(cpu_tmp4, mask + 1, count);
1776 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], count);
1777 tcg_gen_shl_tl(cpu_T[1], cpu_T[1], cpu_tmp4);
1778 } else {
1779 tcg_gen_shl_tl(cpu_tmp0, cpu_T[0], cpu_tmp0);
1780 if (ot == MO_16) {
1781 /* Only needed if count > 16, for Intel behaviour. */
1782 tcg_gen_subfi_tl(cpu_tmp4, 33, count);
1783 tcg_gen_shr_tl(cpu_tmp4, cpu_T[1], cpu_tmp4);
1784 tcg_gen_or_tl(cpu_tmp0, cpu_tmp0, cpu_tmp4);
1787 tcg_gen_subfi_tl(cpu_tmp4, mask + 1, count);
1788 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], count);
1789 tcg_gen_shr_tl(cpu_T[1], cpu_T[1], cpu_tmp4);
1791 tcg_gen_movi_tl(cpu_tmp4, 0);
1792 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_T[1], count, cpu_tmp4,
1793 cpu_tmp4, cpu_T[1]);
1794 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1795 break;
1798 /* store */
1799 gen_op_st_rm_T0_A0(s, ot, op1);
1801 gen_shift_flags(s, ot, cpu_T[0], cpu_tmp0, count, is_right);
1802 tcg_temp_free(count);
1805 static void gen_shift(DisasContext *s1, int op, TCGMemOp ot, int d, int s)
1807 if (s != OR_TMP1)
1808 gen_op_mov_v_reg(ot, cpu_T[1], s);
1809 switch(op) {
1810 case OP_ROL:
1811 gen_rot_rm_T1(s1, ot, d, 0);
1812 break;
1813 case OP_ROR:
1814 gen_rot_rm_T1(s1, ot, d, 1);
1815 break;
1816 case OP_SHL:
1817 case OP_SHL1:
1818 gen_shift_rm_T1(s1, ot, d, 0, 0);
1819 break;
1820 case OP_SHR:
1821 gen_shift_rm_T1(s1, ot, d, 1, 0);
1822 break;
1823 case OP_SAR:
1824 gen_shift_rm_T1(s1, ot, d, 1, 1);
1825 break;
1826 case OP_RCL:
1827 gen_rotc_rm_T1(s1, ot, d, 0);
1828 break;
1829 case OP_RCR:
1830 gen_rotc_rm_T1(s1, ot, d, 1);
1831 break;
1835 static void gen_shifti(DisasContext *s1, int op, TCGMemOp ot, int d, int c)
1837 switch(op) {
1838 case OP_ROL:
1839 gen_rot_rm_im(s1, ot, d, c, 0);
1840 break;
1841 case OP_ROR:
1842 gen_rot_rm_im(s1, ot, d, c, 1);
1843 break;
1844 case OP_SHL:
1845 case OP_SHL1:
1846 gen_shift_rm_im(s1, ot, d, c, 0, 0);
1847 break;
1848 case OP_SHR:
1849 gen_shift_rm_im(s1, ot, d, c, 1, 0);
1850 break;
1851 case OP_SAR:
1852 gen_shift_rm_im(s1, ot, d, c, 1, 1);
1853 break;
1854 default:
1855 /* currently not optimized */
1856 tcg_gen_movi_tl(cpu_T[1], c);
1857 gen_shift(s1, op, ot, d, OR_TMP1);
1858 break;
1862 static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm)
1864 target_long disp;
1865 int havesib;
1866 int base;
1867 int index;
1868 int scale;
1869 int mod, rm, code, override, must_add_seg;
1870 TCGv sum;
1872 override = s->override;
1873 must_add_seg = s->addseg;
1874 if (override >= 0)
1875 must_add_seg = 1;
1876 mod = (modrm >> 6) & 3;
1877 rm = modrm & 7;
1879 switch (s->aflag) {
1880 case MO_64:
1881 case MO_32:
1882 havesib = 0;
1883 base = rm;
1884 index = -1;
1885 scale = 0;
1887 if (base == 4) {
1888 havesib = 1;
1889 code = cpu_ldub_code(env, s->pc++);
1890 scale = (code >> 6) & 3;
1891 index = ((code >> 3) & 7) | REX_X(s);
1892 if (index == 4) {
1893 index = -1; /* no index */
1895 base = (code & 7);
1897 base |= REX_B(s);
1899 switch (mod) {
1900 case 0:
1901 if ((base & 7) == 5) {
1902 base = -1;
1903 disp = (int32_t)cpu_ldl_code(env, s->pc);
1904 s->pc += 4;
1905 if (CODE64(s) && !havesib) {
1906 disp += s->pc + s->rip_offset;
1908 } else {
1909 disp = 0;
1911 break;
1912 case 1:
1913 disp = (int8_t)cpu_ldub_code(env, s->pc++);
1914 break;
1915 default:
1916 case 2:
1917 disp = (int32_t)cpu_ldl_code(env, s->pc);
1918 s->pc += 4;
1919 break;
1922 /* For correct popl handling with esp. */
1923 if (base == R_ESP && s->popl_esp_hack) {
1924 disp += s->popl_esp_hack;
1927 /* Compute the address, with a minimum number of TCG ops. */
1928 TCGV_UNUSED(sum);
1929 if (index >= 0) {
1930 if (scale == 0) {
1931 sum = cpu_regs[index];
1932 } else {
1933 tcg_gen_shli_tl(cpu_A0, cpu_regs[index], scale);
1934 sum = cpu_A0;
1936 if (base >= 0) {
1937 tcg_gen_add_tl(cpu_A0, sum, cpu_regs[base]);
1938 sum = cpu_A0;
1940 } else if (base >= 0) {
1941 sum = cpu_regs[base];
1943 if (TCGV_IS_UNUSED(sum)) {
1944 tcg_gen_movi_tl(cpu_A0, disp);
1945 } else {
1946 tcg_gen_addi_tl(cpu_A0, sum, disp);
1949 if (must_add_seg) {
1950 if (override < 0) {
1951 if (base == R_EBP || base == R_ESP) {
1952 override = R_SS;
1953 } else {
1954 override = R_DS;
1958 tcg_gen_ld_tl(cpu_tmp0, cpu_env,
1959 offsetof(CPUX86State, segs[override].base));
1960 if (CODE64(s)) {
1961 if (s->aflag == MO_32) {
1962 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
1964 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
1965 return;
1968 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
1971 if (s->aflag == MO_32) {
1972 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
1974 break;
1976 case MO_16:
1977 switch (mod) {
1978 case 0:
1979 if (rm == 6) {
1980 disp = cpu_lduw_code(env, s->pc);
1981 s->pc += 2;
1982 tcg_gen_movi_tl(cpu_A0, disp);
1983 rm = 0; /* avoid SS override */
1984 goto no_rm;
1985 } else {
1986 disp = 0;
1988 break;
1989 case 1:
1990 disp = (int8_t)cpu_ldub_code(env, s->pc++);
1991 break;
1992 default:
1993 case 2:
1994 disp = (int16_t)cpu_lduw_code(env, s->pc);
1995 s->pc += 2;
1996 break;
1999 sum = cpu_A0;
2000 switch (rm) {
2001 case 0:
2002 tcg_gen_add_tl(cpu_A0, cpu_regs[R_EBX], cpu_regs[R_ESI]);
2003 break;
2004 case 1:
2005 tcg_gen_add_tl(cpu_A0, cpu_regs[R_EBX], cpu_regs[R_EDI]);
2006 break;
2007 case 2:
2008 tcg_gen_add_tl(cpu_A0, cpu_regs[R_EBP], cpu_regs[R_ESI]);
2009 break;
2010 case 3:
2011 tcg_gen_add_tl(cpu_A0, cpu_regs[R_EBP], cpu_regs[R_EDI]);
2012 break;
2013 case 4:
2014 sum = cpu_regs[R_ESI];
2015 break;
2016 case 5:
2017 sum = cpu_regs[R_EDI];
2018 break;
2019 case 6:
2020 sum = cpu_regs[R_EBP];
2021 break;
2022 default:
2023 case 7:
2024 sum = cpu_regs[R_EBX];
2025 break;
2027 tcg_gen_addi_tl(cpu_A0, sum, disp);
2028 tcg_gen_ext16u_tl(cpu_A0, cpu_A0);
2029 no_rm:
2030 if (must_add_seg) {
2031 if (override < 0) {
2032 if (rm == 2 || rm == 3 || rm == 6) {
2033 override = R_SS;
2034 } else {
2035 override = R_DS;
2038 gen_op_addl_A0_seg(s, override);
2040 break;
2042 default:
2043 tcg_abort();
2047 static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm)
2049 int mod, rm, base, code;
2051 mod = (modrm >> 6) & 3;
2052 if (mod == 3)
2053 return;
2054 rm = modrm & 7;
2056 switch (s->aflag) {
2057 case MO_64:
2058 case MO_32:
2059 base = rm;
2061 if (base == 4) {
2062 code = cpu_ldub_code(env, s->pc++);
2063 base = (code & 7);
2066 switch (mod) {
2067 case 0:
2068 if (base == 5) {
2069 s->pc += 4;
2071 break;
2072 case 1:
2073 s->pc++;
2074 break;
2075 default:
2076 case 2:
2077 s->pc += 4;
2078 break;
2080 break;
2082 case MO_16:
2083 switch (mod) {
2084 case 0:
2085 if (rm == 6) {
2086 s->pc += 2;
2088 break;
2089 case 1:
2090 s->pc++;
2091 break;
2092 default:
2093 case 2:
2094 s->pc += 2;
2095 break;
2097 break;
2099 default:
2100 tcg_abort();
2104 /* used for LEA and MOV AX, mem */
2105 static void gen_add_A0_ds_seg(DisasContext *s)
2107 int override, must_add_seg;
2108 must_add_seg = s->addseg;
2109 override = R_DS;
2110 if (s->override >= 0) {
2111 override = s->override;
2112 must_add_seg = 1;
2114 if (must_add_seg) {
2115 #ifdef TARGET_X86_64
2116 if (CODE64(s)) {
2117 gen_op_addq_A0_seg(override);
2118 } else
2119 #endif
2121 gen_op_addl_A0_seg(s, override);
2126 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2127 OR_TMP0 */
2128 static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm,
2129 TCGMemOp ot, int reg, int is_store)
2131 int mod, rm;
2133 mod = (modrm >> 6) & 3;
2134 rm = (modrm & 7) | REX_B(s);
2135 if (mod == 3) {
2136 if (is_store) {
2137 if (reg != OR_TMP0)
2138 gen_op_mov_v_reg(ot, cpu_T[0], reg);
2139 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
2140 } else {
2141 gen_op_mov_v_reg(ot, cpu_T[0], rm);
2142 if (reg != OR_TMP0)
2143 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
2145 } else {
2146 gen_lea_modrm(env, s, modrm);
2147 if (is_store) {
2148 if (reg != OR_TMP0)
2149 gen_op_mov_v_reg(ot, cpu_T[0], reg);
2150 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
2151 } else {
2152 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
2153 if (reg != OR_TMP0)
2154 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
2159 static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, TCGMemOp ot)
2161 uint32_t ret;
2163 switch (ot) {
2164 case MO_8:
2165 ret = cpu_ldub_code(env, s->pc);
2166 s->pc++;
2167 break;
2168 case MO_16:
2169 ret = cpu_lduw_code(env, s->pc);
2170 s->pc += 2;
2171 break;
2172 case MO_32:
2173 #ifdef TARGET_X86_64
2174 case MO_64:
2175 #endif
2176 ret = cpu_ldl_code(env, s->pc);
2177 s->pc += 4;
2178 break;
2179 default:
2180 tcg_abort();
2182 return ret;
2185 static inline int insn_const_size(TCGMemOp ot)
2187 if (ot <= MO_32) {
2188 return 1 << ot;
2189 } else {
2190 return 4;
2194 static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip)
2196 TranslationBlock *tb;
2197 target_ulong pc;
2199 pc = s->cs_base + eip;
2200 tb = s->tb;
2201 /* NOTE: we handle the case where the TB spans two pages here */
2202 if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) ||
2203 (pc & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK)) {
2204 /* jump to same page: we can use a direct jump */
2205 tcg_gen_goto_tb(tb_num);
2206 gen_jmp_im(eip);
2207 tcg_gen_exit_tb((uintptr_t)tb + tb_num);
2208 } else {
2209 /* jump to another page: currently not optimized */
2210 gen_jmp_im(eip);
2211 gen_eob(s);
2215 static inline void gen_jcc(DisasContext *s, int b,
2216 target_ulong val, target_ulong next_eip)
2218 TCGLabel *l1, *l2;
2220 if (s->jmp_opt) {
2221 l1 = gen_new_label();
2222 gen_jcc1(s, b, l1);
2224 gen_goto_tb(s, 0, next_eip);
2226 gen_set_label(l1);
2227 gen_goto_tb(s, 1, val);
2228 s->is_jmp = DISAS_TB_JUMP;
2229 } else {
2230 l1 = gen_new_label();
2231 l2 = gen_new_label();
2232 gen_jcc1(s, b, l1);
2234 gen_jmp_im(next_eip);
2235 tcg_gen_br(l2);
2237 gen_set_label(l1);
2238 gen_jmp_im(val);
2239 gen_set_label(l2);
2240 gen_eob(s);
2244 static void gen_cmovcc1(CPUX86State *env, DisasContext *s, TCGMemOp ot, int b,
2245 int modrm, int reg)
2247 CCPrepare cc;
2249 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
2251 cc = gen_prepare_cc(s, b, cpu_T[1]);
2252 if (cc.mask != -1) {
2253 TCGv t0 = tcg_temp_new();
2254 tcg_gen_andi_tl(t0, cc.reg, cc.mask);
2255 cc.reg = t0;
2257 if (!cc.use_reg2) {
2258 cc.reg2 = tcg_const_tl(cc.imm);
2261 tcg_gen_movcond_tl(cc.cond, cpu_T[0], cc.reg, cc.reg2,
2262 cpu_T[0], cpu_regs[reg]);
2263 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
2265 if (cc.mask != -1) {
2266 tcg_temp_free(cc.reg);
2268 if (!cc.use_reg2) {
2269 tcg_temp_free(cc.reg2);
2273 static inline void gen_op_movl_T0_seg(int seg_reg)
2275 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
2276 offsetof(CPUX86State,segs[seg_reg].selector));
2279 static inline void gen_op_movl_seg_T0_vm(int seg_reg)
2281 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
2282 tcg_gen_st32_tl(cpu_T[0], cpu_env,
2283 offsetof(CPUX86State,segs[seg_reg].selector));
2284 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], 4);
2285 tcg_gen_st_tl(cpu_T[0], cpu_env,
2286 offsetof(CPUX86State,segs[seg_reg].base));
2289 /* move T0 to seg_reg and compute if the CPU state may change. Never
2290 call this function with seg_reg == R_CS */
2291 static void gen_movl_seg_T0(DisasContext *s, int seg_reg)
2293 if (s->pe && !s->vm86) {
2294 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2295 gen_helper_load_seg(cpu_env, tcg_const_i32(seg_reg), cpu_tmp2_i32);
2296 /* abort translation because the addseg value may change or
2297 because ss32 may change. For R_SS, translation must always
2298 stop as a special handling must be done to disable hardware
2299 interrupts for the next instruction */
2300 if (seg_reg == R_SS || (s->code32 && seg_reg < R_FS))
2301 s->is_jmp = DISAS_TB_JUMP;
2302 } else {
2303 gen_op_movl_seg_T0_vm(seg_reg);
2304 if (seg_reg == R_SS)
2305 s->is_jmp = DISAS_TB_JUMP;
2309 static inline int svm_is_rep(int prefixes)
2311 return ((prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) ? 8 : 0);
2314 static inline void
2315 gen_svm_check_intercept_param(DisasContext *s, target_ulong pc_start,
2316 uint32_t type, uint64_t param)
2318 /* no SVM activated; fast case */
2319 if (likely(!(s->flags & HF_SVMI_MASK)))
2320 return;
2321 gen_update_cc_op(s);
2322 gen_jmp_im(pc_start - s->cs_base);
2323 gen_helper_svm_check_intercept_param(cpu_env, tcg_const_i32(type),
2324 tcg_const_i64(param));
2327 static inline void
2328 gen_svm_check_intercept(DisasContext *s, target_ulong pc_start, uint64_t type)
2330 gen_svm_check_intercept_param(s, pc_start, type, 0);
2333 static inline void gen_stack_update(DisasContext *s, int addend)
2335 #ifdef TARGET_X86_64
2336 if (CODE64(s)) {
2337 gen_op_add_reg_im(MO_64, R_ESP, addend);
2338 } else
2339 #endif
2340 if (s->ss32) {
2341 gen_op_add_reg_im(MO_32, R_ESP, addend);
2342 } else {
2343 gen_op_add_reg_im(MO_16, R_ESP, addend);
2347 /* Generate a push. It depends on ss32, addseg and dflag. */
2348 static void gen_push_v(DisasContext *s, TCGv val)
2350 TCGMemOp a_ot, d_ot = mo_pushpop(s, s->dflag);
2351 int size = 1 << d_ot;
2352 TCGv new_esp = cpu_A0;
2354 tcg_gen_subi_tl(cpu_A0, cpu_regs[R_ESP], size);
2356 if (CODE64(s)) {
2357 a_ot = MO_64;
2358 } else if (s->ss32) {
2359 a_ot = MO_32;
2360 if (s->addseg) {
2361 new_esp = cpu_tmp4;
2362 tcg_gen_mov_tl(new_esp, cpu_A0);
2363 gen_op_addl_A0_seg(s, R_SS);
2364 } else {
2365 tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
2367 } else {
2368 a_ot = MO_16;
2369 new_esp = cpu_tmp4;
2370 tcg_gen_ext16u_tl(cpu_A0, cpu_A0);
2371 tcg_gen_mov_tl(new_esp, cpu_A0);
2372 gen_op_addl_A0_seg(s, R_SS);
2375 gen_op_st_v(s, d_ot, val, cpu_A0);
2376 gen_op_mov_reg_v(a_ot, R_ESP, new_esp);
2379 /* two step pop is necessary for precise exceptions */
2380 static TCGMemOp gen_pop_T0(DisasContext *s)
2382 TCGMemOp d_ot = mo_pushpop(s, s->dflag);
2383 TCGv addr = cpu_A0;
2385 if (CODE64(s)) {
2386 addr = cpu_regs[R_ESP];
2387 } else if (!s->ss32) {
2388 tcg_gen_ext16u_tl(cpu_A0, cpu_regs[R_ESP]);
2389 gen_op_addl_A0_seg(s, R_SS);
2390 } else if (s->addseg) {
2391 tcg_gen_mov_tl(cpu_A0, cpu_regs[R_ESP]);
2392 gen_op_addl_A0_seg(s, R_SS);
2393 } else {
2394 tcg_gen_ext32u_tl(cpu_A0, cpu_regs[R_ESP]);
2397 gen_op_ld_v(s, d_ot, cpu_T[0], addr);
2398 return d_ot;
2401 static void gen_pop_update(DisasContext *s, TCGMemOp ot)
2403 gen_stack_update(s, 1 << ot);
2406 static void gen_stack_A0(DisasContext *s)
2408 gen_op_movl_A0_reg(R_ESP);
2409 if (!s->ss32)
2410 tcg_gen_ext16u_tl(cpu_A0, cpu_A0);
2411 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2412 if (s->addseg)
2413 gen_op_addl_A0_seg(s, R_SS);
2416 /* NOTE: wrap around in 16 bit not fully handled */
2417 static void gen_pusha(DisasContext *s)
2419 int i;
2420 gen_op_movl_A0_reg(R_ESP);
2421 gen_op_addl_A0_im(-8 << s->dflag);
2422 if (!s->ss32)
2423 tcg_gen_ext16u_tl(cpu_A0, cpu_A0);
2424 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2425 if (s->addseg)
2426 gen_op_addl_A0_seg(s, R_SS);
2427 for(i = 0;i < 8; i++) {
2428 gen_op_mov_v_reg(MO_32, cpu_T[0], 7 - i);
2429 gen_op_st_v(s, s->dflag, cpu_T[0], cpu_A0);
2430 gen_op_addl_A0_im(1 << s->dflag);
2432 gen_op_mov_reg_v(MO_16 + s->ss32, R_ESP, cpu_T[1]);
2435 /* NOTE: wrap around in 16 bit not fully handled */
2436 static void gen_popa(DisasContext *s)
2438 int i;
2439 gen_op_movl_A0_reg(R_ESP);
2440 if (!s->ss32)
2441 tcg_gen_ext16u_tl(cpu_A0, cpu_A0);
2442 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2443 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], 8 << s->dflag);
2444 if (s->addseg)
2445 gen_op_addl_A0_seg(s, R_SS);
2446 for(i = 0;i < 8; i++) {
2447 /* ESP is not reloaded */
2448 if (i != 3) {
2449 gen_op_ld_v(s, s->dflag, cpu_T[0], cpu_A0);
2450 gen_op_mov_reg_v(s->dflag, 7 - i, cpu_T[0]);
2452 gen_op_addl_A0_im(1 << s->dflag);
2454 gen_op_mov_reg_v(MO_16 + s->ss32, R_ESP, cpu_T[1]);
2457 static void gen_enter(DisasContext *s, int esp_addend, int level)
2459 TCGMemOp ot = mo_pushpop(s, s->dflag);
2460 int opsize = 1 << ot;
2462 level &= 0x1f;
2463 #ifdef TARGET_X86_64
2464 if (CODE64(s)) {
2465 gen_op_movl_A0_reg(R_ESP);
2466 gen_op_addq_A0_im(-opsize);
2467 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2469 /* push bp */
2470 gen_op_mov_v_reg(MO_32, cpu_T[0], R_EBP);
2471 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
2472 if (level) {
2473 /* XXX: must save state */
2474 gen_helper_enter64_level(cpu_env, tcg_const_i32(level),
2475 tcg_const_i32((ot == MO_64)),
2476 cpu_T[1]);
2478 gen_op_mov_reg_v(ot, R_EBP, cpu_T[1]);
2479 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
2480 gen_op_mov_reg_v(MO_64, R_ESP, cpu_T[1]);
2481 } else
2482 #endif
2484 gen_op_movl_A0_reg(R_ESP);
2485 gen_op_addl_A0_im(-opsize);
2486 if (!s->ss32)
2487 tcg_gen_ext16u_tl(cpu_A0, cpu_A0);
2488 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2489 if (s->addseg)
2490 gen_op_addl_A0_seg(s, R_SS);
2491 /* push bp */
2492 gen_op_mov_v_reg(MO_32, cpu_T[0], R_EBP);
2493 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
2494 if (level) {
2495 /* XXX: must save state */
2496 gen_helper_enter_level(cpu_env, tcg_const_i32(level),
2497 tcg_const_i32(s->dflag - 1),
2498 cpu_T[1]);
2500 gen_op_mov_reg_v(ot, R_EBP, cpu_T[1]);
2501 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
2502 gen_op_mov_reg_v(MO_16 + s->ss32, R_ESP, cpu_T[1]);
2506 static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip)
2508 gen_update_cc_op(s);
2509 gen_jmp_im(cur_eip);
2510 gen_helper_raise_exception(cpu_env, tcg_const_i32(trapno));
2511 s->is_jmp = DISAS_TB_JUMP;
2514 /* an interrupt is different from an exception because of the
2515 privilege checks */
2516 static void gen_interrupt(DisasContext *s, int intno,
2517 target_ulong cur_eip, target_ulong next_eip)
2519 gen_update_cc_op(s);
2520 gen_jmp_im(cur_eip);
2521 gen_helper_raise_interrupt(cpu_env, tcg_const_i32(intno),
2522 tcg_const_i32(next_eip - cur_eip));
2523 s->is_jmp = DISAS_TB_JUMP;
2526 static void gen_debug(DisasContext *s, target_ulong cur_eip)
2528 gen_update_cc_op(s);
2529 gen_jmp_im(cur_eip);
2530 gen_helper_debug(cpu_env);
2531 s->is_jmp = DISAS_TB_JUMP;
2534 /* generate a generic end of block. Trace exception is also generated
2535 if needed */
2536 static void gen_eob(DisasContext *s)
2538 gen_update_cc_op(s);
2539 if (s->tb->flags & HF_INHIBIT_IRQ_MASK) {
2540 gen_helper_reset_inhibit_irq(cpu_env);
2542 if (s->tb->flags & HF_RF_MASK) {
2543 gen_helper_reset_rf(cpu_env);
2545 if (s->singlestep_enabled) {
2546 gen_helper_debug(cpu_env);
2547 } else if (s->tf) {
2548 gen_helper_single_step(cpu_env);
2549 } else {
2550 tcg_gen_exit_tb(0);
2552 s->is_jmp = DISAS_TB_JUMP;
2555 /* generate a jump to eip. No segment change must happen before as a
2556 direct call to the next block may occur */
2557 static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num)
2559 gen_update_cc_op(s);
2560 set_cc_op(s, CC_OP_DYNAMIC);
2561 if (s->jmp_opt) {
2562 gen_goto_tb(s, tb_num, eip);
2563 s->is_jmp = DISAS_TB_JUMP;
2564 } else {
2565 gen_jmp_im(eip);
2566 gen_eob(s);
2570 static void gen_jmp(DisasContext *s, target_ulong eip)
2572 gen_jmp_tb(s, eip, 0);
2575 static inline void gen_ldq_env_A0(DisasContext *s, int offset)
2577 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
2578 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset);
2581 static inline void gen_stq_env_A0(DisasContext *s, int offset)
2583 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset);
2584 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
2587 static inline void gen_ldo_env_A0(DisasContext *s, int offset)
2589 int mem_index = s->mem_index;
2590 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, mem_index, MO_LEQ);
2591 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
2592 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2593 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_tmp0, mem_index, MO_LEQ);
2594 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
2597 static inline void gen_sto_env_A0(DisasContext *s, int offset)
2599 int mem_index = s->mem_index;
2600 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
2601 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, mem_index, MO_LEQ);
2602 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2603 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
2604 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_tmp0, mem_index, MO_LEQ);
2607 static inline void gen_op_movo(int d_offset, int s_offset)
2609 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + offsetof(XMMReg, XMM_Q(0)));
2610 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + offsetof(XMMReg, XMM_Q(0)));
2611 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + offsetof(XMMReg, XMM_Q(1)));
2612 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + offsetof(XMMReg, XMM_Q(1)));
2615 static inline void gen_op_movq(int d_offset, int s_offset)
2617 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2618 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2621 static inline void gen_op_movl(int d_offset, int s_offset)
2623 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env, s_offset);
2624 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, d_offset);
2627 static inline void gen_op_movq_env_0(int d_offset)
2629 tcg_gen_movi_i64(cpu_tmp1_i64, 0);
2630 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2633 typedef void (*SSEFunc_i_ep)(TCGv_i32 val, TCGv_ptr env, TCGv_ptr reg);
2634 typedef void (*SSEFunc_l_ep)(TCGv_i64 val, TCGv_ptr env, TCGv_ptr reg);
2635 typedef void (*SSEFunc_0_epi)(TCGv_ptr env, TCGv_ptr reg, TCGv_i32 val);
2636 typedef void (*SSEFunc_0_epl)(TCGv_ptr env, TCGv_ptr reg, TCGv_i64 val);
2637 typedef void (*SSEFunc_0_epp)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b);
2638 typedef void (*SSEFunc_0_eppi)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
2639 TCGv_i32 val);
2640 typedef void (*SSEFunc_0_ppi)(TCGv_ptr reg_a, TCGv_ptr reg_b, TCGv_i32 val);
2641 typedef void (*SSEFunc_0_eppt)(TCGv_ptr env, TCGv_ptr reg_a, TCGv_ptr reg_b,
2642 TCGv val);
2644 #define SSE_SPECIAL ((void *)1)
2645 #define SSE_DUMMY ((void *)2)
2647 #define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
2648 #define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
2649 gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
2651 static const SSEFunc_0_epp sse_op_table1[256][4] = {
2652 /* 3DNow! extensions */
2653 [0x0e] = { SSE_DUMMY }, /* femms */
2654 [0x0f] = { SSE_DUMMY }, /* pf... */
2655 /* pure SSE operations */
2656 [0x10] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2657 [0x11] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2658 [0x12] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd, movsldup, movddup */
2659 [0x13] = { SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd */
2660 [0x14] = { gen_helper_punpckldq_xmm, gen_helper_punpcklqdq_xmm },
2661 [0x15] = { gen_helper_punpckhdq_xmm, gen_helper_punpckhqdq_xmm },
2662 [0x16] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd, movshdup */
2663 [0x17] = { SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd */
2665 [0x28] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2666 [0x29] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2667 [0x2a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
2668 [0x2b] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movntps, movntpd, movntss, movntsd */
2669 [0x2c] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
2670 [0x2d] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
2671 [0x2e] = { gen_helper_ucomiss, gen_helper_ucomisd },
2672 [0x2f] = { gen_helper_comiss, gen_helper_comisd },
2673 [0x50] = { SSE_SPECIAL, SSE_SPECIAL }, /* movmskps, movmskpd */
2674 [0x51] = SSE_FOP(sqrt),
2675 [0x52] = { gen_helper_rsqrtps, NULL, gen_helper_rsqrtss, NULL },
2676 [0x53] = { gen_helper_rcpps, NULL, gen_helper_rcpss, NULL },
2677 [0x54] = { gen_helper_pand_xmm, gen_helper_pand_xmm }, /* andps, andpd */
2678 [0x55] = { gen_helper_pandn_xmm, gen_helper_pandn_xmm }, /* andnps, andnpd */
2679 [0x56] = { gen_helper_por_xmm, gen_helper_por_xmm }, /* orps, orpd */
2680 [0x57] = { gen_helper_pxor_xmm, gen_helper_pxor_xmm }, /* xorps, xorpd */
2681 [0x58] = SSE_FOP(add),
2682 [0x59] = SSE_FOP(mul),
2683 [0x5a] = { gen_helper_cvtps2pd, gen_helper_cvtpd2ps,
2684 gen_helper_cvtss2sd, gen_helper_cvtsd2ss },
2685 [0x5b] = { gen_helper_cvtdq2ps, gen_helper_cvtps2dq, gen_helper_cvttps2dq },
2686 [0x5c] = SSE_FOP(sub),
2687 [0x5d] = SSE_FOP(min),
2688 [0x5e] = SSE_FOP(div),
2689 [0x5f] = SSE_FOP(max),
2691 [0xc2] = SSE_FOP(cmpeq),
2692 [0xc6] = { (SSEFunc_0_epp)gen_helper_shufps,
2693 (SSEFunc_0_epp)gen_helper_shufpd }, /* XXX: casts */
2695 /* SSSE3, SSE4, MOVBE, CRC32, BMI1, BMI2, ADX. */
2696 [0x38] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2697 [0x3a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2699 /* MMX ops and their SSE extensions */
2700 [0x60] = MMX_OP2(punpcklbw),
2701 [0x61] = MMX_OP2(punpcklwd),
2702 [0x62] = MMX_OP2(punpckldq),
2703 [0x63] = MMX_OP2(packsswb),
2704 [0x64] = MMX_OP2(pcmpgtb),
2705 [0x65] = MMX_OP2(pcmpgtw),
2706 [0x66] = MMX_OP2(pcmpgtl),
2707 [0x67] = MMX_OP2(packuswb),
2708 [0x68] = MMX_OP2(punpckhbw),
2709 [0x69] = MMX_OP2(punpckhwd),
2710 [0x6a] = MMX_OP2(punpckhdq),
2711 [0x6b] = MMX_OP2(packssdw),
2712 [0x6c] = { NULL, gen_helper_punpcklqdq_xmm },
2713 [0x6d] = { NULL, gen_helper_punpckhqdq_xmm },
2714 [0x6e] = { SSE_SPECIAL, SSE_SPECIAL }, /* movd mm, ea */
2715 [0x6f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, , movqdu */
2716 [0x70] = { (SSEFunc_0_epp)gen_helper_pshufw_mmx,
2717 (SSEFunc_0_epp)gen_helper_pshufd_xmm,
2718 (SSEFunc_0_epp)gen_helper_pshufhw_xmm,
2719 (SSEFunc_0_epp)gen_helper_pshuflw_xmm }, /* XXX: casts */
2720 [0x71] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftw */
2721 [0x72] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftd */
2722 [0x73] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftq */
2723 [0x74] = MMX_OP2(pcmpeqb),
2724 [0x75] = MMX_OP2(pcmpeqw),
2725 [0x76] = MMX_OP2(pcmpeql),
2726 [0x77] = { SSE_DUMMY }, /* emms */
2727 [0x78] = { NULL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* extrq_i, insertq_i */
2728 [0x79] = { NULL, gen_helper_extrq_r, NULL, gen_helper_insertq_r },
2729 [0x7c] = { NULL, gen_helper_haddpd, NULL, gen_helper_haddps },
2730 [0x7d] = { NULL, gen_helper_hsubpd, NULL, gen_helper_hsubps },
2731 [0x7e] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movd, movd, , movq */
2732 [0x7f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, movdqu */
2733 [0xc4] = { SSE_SPECIAL, SSE_SPECIAL }, /* pinsrw */
2734 [0xc5] = { SSE_SPECIAL, SSE_SPECIAL }, /* pextrw */
2735 [0xd0] = { NULL, gen_helper_addsubpd, NULL, gen_helper_addsubps },
2736 [0xd1] = MMX_OP2(psrlw),
2737 [0xd2] = MMX_OP2(psrld),
2738 [0xd3] = MMX_OP2(psrlq),
2739 [0xd4] = MMX_OP2(paddq),
2740 [0xd5] = MMX_OP2(pmullw),
2741 [0xd6] = { NULL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2742 [0xd7] = { SSE_SPECIAL, SSE_SPECIAL }, /* pmovmskb */
2743 [0xd8] = MMX_OP2(psubusb),
2744 [0xd9] = MMX_OP2(psubusw),
2745 [0xda] = MMX_OP2(pminub),
2746 [0xdb] = MMX_OP2(pand),
2747 [0xdc] = MMX_OP2(paddusb),
2748 [0xdd] = MMX_OP2(paddusw),
2749 [0xde] = MMX_OP2(pmaxub),
2750 [0xdf] = MMX_OP2(pandn),
2751 [0xe0] = MMX_OP2(pavgb),
2752 [0xe1] = MMX_OP2(psraw),
2753 [0xe2] = MMX_OP2(psrad),
2754 [0xe3] = MMX_OP2(pavgw),
2755 [0xe4] = MMX_OP2(pmulhuw),
2756 [0xe5] = MMX_OP2(pmulhw),
2757 [0xe6] = { NULL, gen_helper_cvttpd2dq, gen_helper_cvtdq2pd, gen_helper_cvtpd2dq },
2758 [0xe7] = { SSE_SPECIAL , SSE_SPECIAL }, /* movntq, movntq */
2759 [0xe8] = MMX_OP2(psubsb),
2760 [0xe9] = MMX_OP2(psubsw),
2761 [0xea] = MMX_OP2(pminsw),
2762 [0xeb] = MMX_OP2(por),
2763 [0xec] = MMX_OP2(paddsb),
2764 [0xed] = MMX_OP2(paddsw),
2765 [0xee] = MMX_OP2(pmaxsw),
2766 [0xef] = MMX_OP2(pxor),
2767 [0xf0] = { NULL, NULL, NULL, SSE_SPECIAL }, /* lddqu */
2768 [0xf1] = MMX_OP2(psllw),
2769 [0xf2] = MMX_OP2(pslld),
2770 [0xf3] = MMX_OP2(psllq),
2771 [0xf4] = MMX_OP2(pmuludq),
2772 [0xf5] = MMX_OP2(pmaddwd),
2773 [0xf6] = MMX_OP2(psadbw),
2774 [0xf7] = { (SSEFunc_0_epp)gen_helper_maskmov_mmx,
2775 (SSEFunc_0_epp)gen_helper_maskmov_xmm }, /* XXX: casts */
2776 [0xf8] = MMX_OP2(psubb),
2777 [0xf9] = MMX_OP2(psubw),
2778 [0xfa] = MMX_OP2(psubl),
2779 [0xfb] = MMX_OP2(psubq),
2780 [0xfc] = MMX_OP2(paddb),
2781 [0xfd] = MMX_OP2(paddw),
2782 [0xfe] = MMX_OP2(paddl),
2785 static const SSEFunc_0_epp sse_op_table2[3 * 8][2] = {
2786 [0 + 2] = MMX_OP2(psrlw),
2787 [0 + 4] = MMX_OP2(psraw),
2788 [0 + 6] = MMX_OP2(psllw),
2789 [8 + 2] = MMX_OP2(psrld),
2790 [8 + 4] = MMX_OP2(psrad),
2791 [8 + 6] = MMX_OP2(pslld),
2792 [16 + 2] = MMX_OP2(psrlq),
2793 [16 + 3] = { NULL, gen_helper_psrldq_xmm },
2794 [16 + 6] = MMX_OP2(psllq),
2795 [16 + 7] = { NULL, gen_helper_pslldq_xmm },
2798 static const SSEFunc_0_epi sse_op_table3ai[] = {
2799 gen_helper_cvtsi2ss,
2800 gen_helper_cvtsi2sd
2803 #ifdef TARGET_X86_64
2804 static const SSEFunc_0_epl sse_op_table3aq[] = {
2805 gen_helper_cvtsq2ss,
2806 gen_helper_cvtsq2sd
2808 #endif
2810 static const SSEFunc_i_ep sse_op_table3bi[] = {
2811 gen_helper_cvttss2si,
2812 gen_helper_cvtss2si,
2813 gen_helper_cvttsd2si,
2814 gen_helper_cvtsd2si
2817 #ifdef TARGET_X86_64
2818 static const SSEFunc_l_ep sse_op_table3bq[] = {
2819 gen_helper_cvttss2sq,
2820 gen_helper_cvtss2sq,
2821 gen_helper_cvttsd2sq,
2822 gen_helper_cvtsd2sq
2824 #endif
2826 static const SSEFunc_0_epp sse_op_table4[8][4] = {
2827 SSE_FOP(cmpeq),
2828 SSE_FOP(cmplt),
2829 SSE_FOP(cmple),
2830 SSE_FOP(cmpunord),
2831 SSE_FOP(cmpneq),
2832 SSE_FOP(cmpnlt),
2833 SSE_FOP(cmpnle),
2834 SSE_FOP(cmpord),
2837 static const SSEFunc_0_epp sse_op_table5[256] = {
2838 [0x0c] = gen_helper_pi2fw,
2839 [0x0d] = gen_helper_pi2fd,
2840 [0x1c] = gen_helper_pf2iw,
2841 [0x1d] = gen_helper_pf2id,
2842 [0x8a] = gen_helper_pfnacc,
2843 [0x8e] = gen_helper_pfpnacc,
2844 [0x90] = gen_helper_pfcmpge,
2845 [0x94] = gen_helper_pfmin,
2846 [0x96] = gen_helper_pfrcp,
2847 [0x97] = gen_helper_pfrsqrt,
2848 [0x9a] = gen_helper_pfsub,
2849 [0x9e] = gen_helper_pfadd,
2850 [0xa0] = gen_helper_pfcmpgt,
2851 [0xa4] = gen_helper_pfmax,
2852 [0xa6] = gen_helper_movq, /* pfrcpit1; no need to actually increase precision */
2853 [0xa7] = gen_helper_movq, /* pfrsqit1 */
2854 [0xaa] = gen_helper_pfsubr,
2855 [0xae] = gen_helper_pfacc,
2856 [0xb0] = gen_helper_pfcmpeq,
2857 [0xb4] = gen_helper_pfmul,
2858 [0xb6] = gen_helper_movq, /* pfrcpit2 */
2859 [0xb7] = gen_helper_pmulhrw_mmx,
2860 [0xbb] = gen_helper_pswapd,
2861 [0xbf] = gen_helper_pavgb_mmx /* pavgusb */
2864 struct SSEOpHelper_epp {
2865 SSEFunc_0_epp op[2];
2866 uint32_t ext_mask;
2869 struct SSEOpHelper_eppi {
2870 SSEFunc_0_eppi op[2];
2871 uint32_t ext_mask;
2874 #define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
2875 #define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
2876 #define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
2877 #define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
2878 #define PCLMULQDQ_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, \
2879 CPUID_EXT_PCLMULQDQ }
2880 #define AESNI_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_AES }
2882 static const struct SSEOpHelper_epp sse_op_table6[256] = {
2883 [0x00] = SSSE3_OP(pshufb),
2884 [0x01] = SSSE3_OP(phaddw),
2885 [0x02] = SSSE3_OP(phaddd),
2886 [0x03] = SSSE3_OP(phaddsw),
2887 [0x04] = SSSE3_OP(pmaddubsw),
2888 [0x05] = SSSE3_OP(phsubw),
2889 [0x06] = SSSE3_OP(phsubd),
2890 [0x07] = SSSE3_OP(phsubsw),
2891 [0x08] = SSSE3_OP(psignb),
2892 [0x09] = SSSE3_OP(psignw),
2893 [0x0a] = SSSE3_OP(psignd),
2894 [0x0b] = SSSE3_OP(pmulhrsw),
2895 [0x10] = SSE41_OP(pblendvb),
2896 [0x14] = SSE41_OP(blendvps),
2897 [0x15] = SSE41_OP(blendvpd),
2898 [0x17] = SSE41_OP(ptest),
2899 [0x1c] = SSSE3_OP(pabsb),
2900 [0x1d] = SSSE3_OP(pabsw),
2901 [0x1e] = SSSE3_OP(pabsd),
2902 [0x20] = SSE41_OP(pmovsxbw),
2903 [0x21] = SSE41_OP(pmovsxbd),
2904 [0x22] = SSE41_OP(pmovsxbq),
2905 [0x23] = SSE41_OP(pmovsxwd),
2906 [0x24] = SSE41_OP(pmovsxwq),
2907 [0x25] = SSE41_OP(pmovsxdq),
2908 [0x28] = SSE41_OP(pmuldq),
2909 [0x29] = SSE41_OP(pcmpeqq),
2910 [0x2a] = SSE41_SPECIAL, /* movntqda */
2911 [0x2b] = SSE41_OP(packusdw),
2912 [0x30] = SSE41_OP(pmovzxbw),
2913 [0x31] = SSE41_OP(pmovzxbd),
2914 [0x32] = SSE41_OP(pmovzxbq),
2915 [0x33] = SSE41_OP(pmovzxwd),
2916 [0x34] = SSE41_OP(pmovzxwq),
2917 [0x35] = SSE41_OP(pmovzxdq),
2918 [0x37] = SSE42_OP(pcmpgtq),
2919 [0x38] = SSE41_OP(pminsb),
2920 [0x39] = SSE41_OP(pminsd),
2921 [0x3a] = SSE41_OP(pminuw),
2922 [0x3b] = SSE41_OP(pminud),
2923 [0x3c] = SSE41_OP(pmaxsb),
2924 [0x3d] = SSE41_OP(pmaxsd),
2925 [0x3e] = SSE41_OP(pmaxuw),
2926 [0x3f] = SSE41_OP(pmaxud),
2927 [0x40] = SSE41_OP(pmulld),
2928 [0x41] = SSE41_OP(phminposuw),
2929 [0xdb] = AESNI_OP(aesimc),
2930 [0xdc] = AESNI_OP(aesenc),
2931 [0xdd] = AESNI_OP(aesenclast),
2932 [0xde] = AESNI_OP(aesdec),
2933 [0xdf] = AESNI_OP(aesdeclast),
2936 static const struct SSEOpHelper_eppi sse_op_table7[256] = {
2937 [0x08] = SSE41_OP(roundps),
2938 [0x09] = SSE41_OP(roundpd),
2939 [0x0a] = SSE41_OP(roundss),
2940 [0x0b] = SSE41_OP(roundsd),
2941 [0x0c] = SSE41_OP(blendps),
2942 [0x0d] = SSE41_OP(blendpd),
2943 [0x0e] = SSE41_OP(pblendw),
2944 [0x0f] = SSSE3_OP(palignr),
2945 [0x14] = SSE41_SPECIAL, /* pextrb */
2946 [0x15] = SSE41_SPECIAL, /* pextrw */
2947 [0x16] = SSE41_SPECIAL, /* pextrd/pextrq */
2948 [0x17] = SSE41_SPECIAL, /* extractps */
2949 [0x20] = SSE41_SPECIAL, /* pinsrb */
2950 [0x21] = SSE41_SPECIAL, /* insertps */
2951 [0x22] = SSE41_SPECIAL, /* pinsrd/pinsrq */
2952 [0x40] = SSE41_OP(dpps),
2953 [0x41] = SSE41_OP(dppd),
2954 [0x42] = SSE41_OP(mpsadbw),
2955 [0x44] = PCLMULQDQ_OP(pclmulqdq),
2956 [0x60] = SSE42_OP(pcmpestrm),
2957 [0x61] = SSE42_OP(pcmpestri),
2958 [0x62] = SSE42_OP(pcmpistrm),
2959 [0x63] = SSE42_OP(pcmpistri),
2960 [0xdf] = AESNI_OP(aeskeygenassist),
2963 static void gen_sse(CPUX86State *env, DisasContext *s, int b,
2964 target_ulong pc_start, int rex_r)
2966 int b1, op1_offset, op2_offset, is_xmm, val;
2967 int modrm, mod, rm, reg;
2968 SSEFunc_0_epp sse_fn_epp;
2969 SSEFunc_0_eppi sse_fn_eppi;
2970 SSEFunc_0_ppi sse_fn_ppi;
2971 SSEFunc_0_eppt sse_fn_eppt;
2972 TCGMemOp ot;
2974 b &= 0xff;
2975 if (s->prefix & PREFIX_DATA)
2976 b1 = 1;
2977 else if (s->prefix & PREFIX_REPZ)
2978 b1 = 2;
2979 else if (s->prefix & PREFIX_REPNZ)
2980 b1 = 3;
2981 else
2982 b1 = 0;
2983 sse_fn_epp = sse_op_table1[b][b1];
2984 if (!sse_fn_epp) {
2985 goto illegal_op;
2987 if ((b <= 0x5f && b >= 0x10) || b == 0xc6 || b == 0xc2) {
2988 is_xmm = 1;
2989 } else {
2990 if (b1 == 0) {
2991 /* MMX case */
2992 is_xmm = 0;
2993 } else {
2994 is_xmm = 1;
2997 /* simple MMX/SSE operation */
2998 if (s->flags & HF_TS_MASK) {
2999 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
3000 return;
3002 if (s->flags & HF_EM_MASK) {
3003 illegal_op:
3004 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
3005 return;
3007 if (is_xmm && !(s->flags & HF_OSFXSR_MASK))
3008 if ((b != 0x38 && b != 0x3a) || (s->prefix & PREFIX_DATA))
3009 goto illegal_op;
3010 if (b == 0x0e) {
3011 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
3012 goto illegal_op;
3013 /* femms */
3014 gen_helper_emms(cpu_env);
3015 return;
3017 if (b == 0x77) {
3018 /* emms */
3019 gen_helper_emms(cpu_env);
3020 return;
3022 /* prepare MMX state (XXX: optimize by storing fptt and fptags in
3023 the static cpu state) */
3024 if (!is_xmm) {
3025 gen_helper_enter_mmx(cpu_env);
3028 modrm = cpu_ldub_code(env, s->pc++);
3029 reg = ((modrm >> 3) & 7);
3030 if (is_xmm)
3031 reg |= rex_r;
3032 mod = (modrm >> 6) & 3;
3033 if (sse_fn_epp == SSE_SPECIAL) {
3034 b |= (b1 << 8);
3035 switch(b) {
3036 case 0x0e7: /* movntq */
3037 if (mod == 3)
3038 goto illegal_op;
3039 gen_lea_modrm(env, s, modrm);
3040 gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
3041 break;
3042 case 0x1e7: /* movntdq */
3043 case 0x02b: /* movntps */
3044 case 0x12b: /* movntps */
3045 if (mod == 3)
3046 goto illegal_op;
3047 gen_lea_modrm(env, s, modrm);
3048 gen_sto_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3049 break;
3050 case 0x3f0: /* lddqu */
3051 if (mod == 3)
3052 goto illegal_op;
3053 gen_lea_modrm(env, s, modrm);
3054 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3055 break;
3056 case 0x22b: /* movntss */
3057 case 0x32b: /* movntsd */
3058 if (mod == 3)
3059 goto illegal_op;
3060 gen_lea_modrm(env, s, modrm);
3061 if (b1 & 1) {
3062 gen_stq_env_A0(s, offsetof(CPUX86State,
3063 xmm_regs[reg].XMM_Q(0)));
3064 } else {
3065 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3066 xmm_regs[reg].XMM_L(0)));
3067 gen_op_st_v(s, MO_32, cpu_T[0], cpu_A0);
3069 break;
3070 case 0x6e: /* movd mm, ea */
3071 #ifdef TARGET_X86_64
3072 if (s->dflag == MO_64) {
3073 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0);
3074 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,fpregs[reg].mmx));
3075 } else
3076 #endif
3078 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0);
3079 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3080 offsetof(CPUX86State,fpregs[reg].mmx));
3081 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3082 gen_helper_movl_mm_T0_mmx(cpu_ptr0, cpu_tmp2_i32);
3084 break;
3085 case 0x16e: /* movd xmm, ea */
3086 #ifdef TARGET_X86_64
3087 if (s->dflag == MO_64) {
3088 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 0);
3089 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3090 offsetof(CPUX86State,xmm_regs[reg]));
3091 gen_helper_movq_mm_T0_xmm(cpu_ptr0, cpu_T[0]);
3092 } else
3093 #endif
3095 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 0);
3096 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3097 offsetof(CPUX86State,xmm_regs[reg]));
3098 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3099 gen_helper_movl_mm_T0_xmm(cpu_ptr0, cpu_tmp2_i32);
3101 break;
3102 case 0x6f: /* movq mm, ea */
3103 if (mod != 3) {
3104 gen_lea_modrm(env, s, modrm);
3105 gen_ldq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
3106 } else {
3107 rm = (modrm & 7);
3108 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
3109 offsetof(CPUX86State,fpregs[rm].mmx));
3110 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
3111 offsetof(CPUX86State,fpregs[reg].mmx));
3113 break;
3114 case 0x010: /* movups */
3115 case 0x110: /* movupd */
3116 case 0x028: /* movaps */
3117 case 0x128: /* movapd */
3118 case 0x16f: /* movdqa xmm, ea */
3119 case 0x26f: /* movdqu xmm, ea */
3120 if (mod != 3) {
3121 gen_lea_modrm(env, s, modrm);
3122 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3123 } else {
3124 rm = (modrm & 7) | REX_B(s);
3125 gen_op_movo(offsetof(CPUX86State,xmm_regs[reg]),
3126 offsetof(CPUX86State,xmm_regs[rm]));
3128 break;
3129 case 0x210: /* movss xmm, ea */
3130 if (mod != 3) {
3131 gen_lea_modrm(env, s, modrm);
3132 gen_op_ld_v(s, MO_32, cpu_T[0], cpu_A0);
3133 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3134 tcg_gen_movi_tl(cpu_T[0], 0);
3135 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3136 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3137 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3138 } else {
3139 rm = (modrm & 7) | REX_B(s);
3140 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3141 offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3143 break;
3144 case 0x310: /* movsd xmm, ea */
3145 if (mod != 3) {
3146 gen_lea_modrm(env, s, modrm);
3147 gen_ldq_env_A0(s, offsetof(CPUX86State,
3148 xmm_regs[reg].XMM_Q(0)));
3149 tcg_gen_movi_tl(cpu_T[0], 0);
3150 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3151 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3152 } else {
3153 rm = (modrm & 7) | REX_B(s);
3154 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3155 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3157 break;
3158 case 0x012: /* movlps */
3159 case 0x112: /* movlpd */
3160 if (mod != 3) {
3161 gen_lea_modrm(env, s, modrm);
3162 gen_ldq_env_A0(s, offsetof(CPUX86State,
3163 xmm_regs[reg].XMM_Q(0)));
3164 } else {
3165 /* movhlps */
3166 rm = (modrm & 7) | REX_B(s);
3167 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3168 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3170 break;
3171 case 0x212: /* movsldup */
3172 if (mod != 3) {
3173 gen_lea_modrm(env, s, modrm);
3174 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3175 } else {
3176 rm = (modrm & 7) | REX_B(s);
3177 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3178 offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3179 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3180 offsetof(CPUX86State,xmm_regs[rm].XMM_L(2)));
3182 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3183 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3184 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3185 offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3186 break;
3187 case 0x312: /* movddup */
3188 if (mod != 3) {
3189 gen_lea_modrm(env, s, modrm);
3190 gen_ldq_env_A0(s, offsetof(CPUX86State,
3191 xmm_regs[reg].XMM_Q(0)));
3192 } else {
3193 rm = (modrm & 7) | REX_B(s);
3194 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3195 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3197 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
3198 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3199 break;
3200 case 0x016: /* movhps */
3201 case 0x116: /* movhpd */
3202 if (mod != 3) {
3203 gen_lea_modrm(env, s, modrm);
3204 gen_ldq_env_A0(s, offsetof(CPUX86State,
3205 xmm_regs[reg].XMM_Q(1)));
3206 } else {
3207 /* movlhps */
3208 rm = (modrm & 7) | REX_B(s);
3209 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
3210 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3212 break;
3213 case 0x216: /* movshdup */
3214 if (mod != 3) {
3215 gen_lea_modrm(env, s, modrm);
3216 gen_ldo_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3217 } else {
3218 rm = (modrm & 7) | REX_B(s);
3219 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3220 offsetof(CPUX86State,xmm_regs[rm].XMM_L(1)));
3221 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3222 offsetof(CPUX86State,xmm_regs[rm].XMM_L(3)));
3224 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3225 offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3226 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3227 offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3228 break;
3229 case 0x178:
3230 case 0x378:
3232 int bit_index, field_length;
3234 if (b1 == 1 && reg != 0)
3235 goto illegal_op;
3236 field_length = cpu_ldub_code(env, s->pc++) & 0x3F;
3237 bit_index = cpu_ldub_code(env, s->pc++) & 0x3F;
3238 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3239 offsetof(CPUX86State,xmm_regs[reg]));
3240 if (b1 == 1)
3241 gen_helper_extrq_i(cpu_env, cpu_ptr0,
3242 tcg_const_i32(bit_index),
3243 tcg_const_i32(field_length));
3244 else
3245 gen_helper_insertq_i(cpu_env, cpu_ptr0,
3246 tcg_const_i32(bit_index),
3247 tcg_const_i32(field_length));
3249 break;
3250 case 0x7e: /* movd ea, mm */
3251 #ifdef TARGET_X86_64
3252 if (s->dflag == MO_64) {
3253 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3254 offsetof(CPUX86State,fpregs[reg].mmx));
3255 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1);
3256 } else
3257 #endif
3259 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3260 offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0)));
3261 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1);
3263 break;
3264 case 0x17e: /* movd ea, xmm */
3265 #ifdef TARGET_X86_64
3266 if (s->dflag == MO_64) {
3267 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3268 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3269 gen_ldst_modrm(env, s, modrm, MO_64, OR_TMP0, 1);
3270 } else
3271 #endif
3273 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3274 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3275 gen_ldst_modrm(env, s, modrm, MO_32, OR_TMP0, 1);
3277 break;
3278 case 0x27e: /* movq xmm, ea */
3279 if (mod != 3) {
3280 gen_lea_modrm(env, s, modrm);
3281 gen_ldq_env_A0(s, offsetof(CPUX86State,
3282 xmm_regs[reg].XMM_Q(0)));
3283 } else {
3284 rm = (modrm & 7) | REX_B(s);
3285 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3286 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3288 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3289 break;
3290 case 0x7f: /* movq ea, mm */
3291 if (mod != 3) {
3292 gen_lea_modrm(env, s, modrm);
3293 gen_stq_env_A0(s, offsetof(CPUX86State, fpregs[reg].mmx));
3294 } else {
3295 rm = (modrm & 7);
3296 gen_op_movq(offsetof(CPUX86State,fpregs[rm].mmx),
3297 offsetof(CPUX86State,fpregs[reg].mmx));
3299 break;
3300 case 0x011: /* movups */
3301 case 0x111: /* movupd */
3302 case 0x029: /* movaps */
3303 case 0x129: /* movapd */
3304 case 0x17f: /* movdqa ea, xmm */
3305 case 0x27f: /* movdqu ea, xmm */
3306 if (mod != 3) {
3307 gen_lea_modrm(env, s, modrm);
3308 gen_sto_env_A0(s, offsetof(CPUX86State, xmm_regs[reg]));
3309 } else {
3310 rm = (modrm & 7) | REX_B(s);
3311 gen_op_movo(offsetof(CPUX86State,xmm_regs[rm]),
3312 offsetof(CPUX86State,xmm_regs[reg]));
3314 break;
3315 case 0x211: /* movss ea, xmm */
3316 if (mod != 3) {
3317 gen_lea_modrm(env, s, modrm);
3318 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3319 gen_op_st_v(s, MO_32, cpu_T[0], cpu_A0);
3320 } else {
3321 rm = (modrm & 7) | REX_B(s);
3322 gen_op_movl(offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)),
3323 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3325 break;
3326 case 0x311: /* movsd ea, xmm */
3327 if (mod != 3) {
3328 gen_lea_modrm(env, s, modrm);
3329 gen_stq_env_A0(s, offsetof(CPUX86State,
3330 xmm_regs[reg].XMM_Q(0)));
3331 } else {
3332 rm = (modrm & 7) | REX_B(s);
3333 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3334 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3336 break;
3337 case 0x013: /* movlps */
3338 case 0x113: /* movlpd */
3339 if (mod != 3) {
3340 gen_lea_modrm(env, s, modrm);
3341 gen_stq_env_A0(s, offsetof(CPUX86State,
3342 xmm_regs[reg].XMM_Q(0)));
3343 } else {
3344 goto illegal_op;
3346 break;
3347 case 0x017: /* movhps */
3348 case 0x117: /* movhpd */
3349 if (mod != 3) {
3350 gen_lea_modrm(env, s, modrm);
3351 gen_stq_env_A0(s, offsetof(CPUX86State,
3352 xmm_regs[reg].XMM_Q(1)));
3353 } else {
3354 goto illegal_op;
3356 break;
3357 case 0x71: /* shift mm, im */
3358 case 0x72:
3359 case 0x73:
3360 case 0x171: /* shift xmm, im */
3361 case 0x172:
3362 case 0x173:
3363 if (b1 >= 2) {
3364 goto illegal_op;
3366 val = cpu_ldub_code(env, s->pc++);
3367 if (is_xmm) {
3368 tcg_gen_movi_tl(cpu_T[0], val);
3369 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
3370 tcg_gen_movi_tl(cpu_T[0], 0);
3371 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(1)));
3372 op1_offset = offsetof(CPUX86State,xmm_t0);
3373 } else {
3374 tcg_gen_movi_tl(cpu_T[0], val);
3375 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(0)));
3376 tcg_gen_movi_tl(cpu_T[0], 0);
3377 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(1)));
3378 op1_offset = offsetof(CPUX86State,mmx_t0);
3380 sse_fn_epp = sse_op_table2[((b - 1) & 3) * 8 +
3381 (((modrm >> 3)) & 7)][b1];
3382 if (!sse_fn_epp) {
3383 goto illegal_op;
3385 if (is_xmm) {
3386 rm = (modrm & 7) | REX_B(s);
3387 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3388 } else {
3389 rm = (modrm & 7);
3390 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3392 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3393 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op1_offset);
3394 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
3395 break;
3396 case 0x050: /* movmskps */
3397 rm = (modrm & 7) | REX_B(s);
3398 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3399 offsetof(CPUX86State,xmm_regs[rm]));
3400 gen_helper_movmskps(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3401 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
3402 break;
3403 case 0x150: /* movmskpd */
3404 rm = (modrm & 7) | REX_B(s);
3405 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3406 offsetof(CPUX86State,xmm_regs[rm]));
3407 gen_helper_movmskpd(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3408 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
3409 break;
3410 case 0x02a: /* cvtpi2ps */
3411 case 0x12a: /* cvtpi2pd */
3412 gen_helper_enter_mmx(cpu_env);
3413 if (mod != 3) {
3414 gen_lea_modrm(env, s, modrm);
3415 op2_offset = offsetof(CPUX86State,mmx_t0);
3416 gen_ldq_env_A0(s, op2_offset);
3417 } else {
3418 rm = (modrm & 7);
3419 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3421 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3422 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3423 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3424 switch(b >> 8) {
3425 case 0x0:
3426 gen_helper_cvtpi2ps(cpu_env, cpu_ptr0, cpu_ptr1);
3427 break;
3428 default:
3429 case 0x1:
3430 gen_helper_cvtpi2pd(cpu_env, cpu_ptr0, cpu_ptr1);
3431 break;
3433 break;
3434 case 0x22a: /* cvtsi2ss */
3435 case 0x32a: /* cvtsi2sd */
3436 ot = mo_64_32(s->dflag);
3437 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3438 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3439 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3440 if (ot == MO_32) {
3441 SSEFunc_0_epi sse_fn_epi = sse_op_table3ai[(b >> 8) & 1];
3442 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3443 sse_fn_epi(cpu_env, cpu_ptr0, cpu_tmp2_i32);
3444 } else {
3445 #ifdef TARGET_X86_64
3446 SSEFunc_0_epl sse_fn_epl = sse_op_table3aq[(b >> 8) & 1];
3447 sse_fn_epl(cpu_env, cpu_ptr0, cpu_T[0]);
3448 #else
3449 goto illegal_op;
3450 #endif
3452 break;
3453 case 0x02c: /* cvttps2pi */
3454 case 0x12c: /* cvttpd2pi */
3455 case 0x02d: /* cvtps2pi */
3456 case 0x12d: /* cvtpd2pi */
3457 gen_helper_enter_mmx(cpu_env);
3458 if (mod != 3) {
3459 gen_lea_modrm(env, s, modrm);
3460 op2_offset = offsetof(CPUX86State,xmm_t0);
3461 gen_ldo_env_A0(s, op2_offset);
3462 } else {
3463 rm = (modrm & 7) | REX_B(s);
3464 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3466 op1_offset = offsetof(CPUX86State,fpregs[reg & 7].mmx);
3467 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3468 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3469 switch(b) {
3470 case 0x02c:
3471 gen_helper_cvttps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3472 break;
3473 case 0x12c:
3474 gen_helper_cvttpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3475 break;
3476 case 0x02d:
3477 gen_helper_cvtps2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3478 break;
3479 case 0x12d:
3480 gen_helper_cvtpd2pi(cpu_env, cpu_ptr0, cpu_ptr1);
3481 break;
3483 break;
3484 case 0x22c: /* cvttss2si */
3485 case 0x32c: /* cvttsd2si */
3486 case 0x22d: /* cvtss2si */
3487 case 0x32d: /* cvtsd2si */
3488 ot = mo_64_32(s->dflag);
3489 if (mod != 3) {
3490 gen_lea_modrm(env, s, modrm);
3491 if ((b >> 8) & 1) {
3492 gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.XMM_Q(0)));
3493 } else {
3494 gen_op_ld_v(s, MO_32, cpu_T[0], cpu_A0);
3495 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
3497 op2_offset = offsetof(CPUX86State,xmm_t0);
3498 } else {
3499 rm = (modrm & 7) | REX_B(s);
3500 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3502 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3503 if (ot == MO_32) {
3504 SSEFunc_i_ep sse_fn_i_ep =
3505 sse_op_table3bi[((b >> 7) & 2) | (b & 1)];
3506 sse_fn_i_ep(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3507 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3508 } else {
3509 #ifdef TARGET_X86_64
3510 SSEFunc_l_ep sse_fn_l_ep =
3511 sse_op_table3bq[((b >> 7) & 2) | (b & 1)];
3512 sse_fn_l_ep(cpu_T[0], cpu_env, cpu_ptr0);
3513 #else
3514 goto illegal_op;
3515 #endif
3517 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
3518 break;
3519 case 0xc4: /* pinsrw */
3520 case 0x1c4:
3521 s->rip_offset = 1;
3522 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
3523 val = cpu_ldub_code(env, s->pc++);
3524 if (b1) {
3525 val &= 7;
3526 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3527 offsetof(CPUX86State,xmm_regs[reg].XMM_W(val)));
3528 } else {
3529 val &= 3;
3530 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3531 offsetof(CPUX86State,fpregs[reg].mmx.MMX_W(val)));
3533 break;
3534 case 0xc5: /* pextrw */
3535 case 0x1c5:
3536 if (mod != 3)
3537 goto illegal_op;
3538 ot = mo_64_32(s->dflag);
3539 val = cpu_ldub_code(env, s->pc++);
3540 if (b1) {
3541 val &= 7;
3542 rm = (modrm & 7) | REX_B(s);
3543 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3544 offsetof(CPUX86State,xmm_regs[rm].XMM_W(val)));
3545 } else {
3546 val &= 3;
3547 rm = (modrm & 7);
3548 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3549 offsetof(CPUX86State,fpregs[rm].mmx.MMX_W(val)));
3551 reg = ((modrm >> 3) & 7) | rex_r;
3552 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
3553 break;
3554 case 0x1d6: /* movq ea, xmm */
3555 if (mod != 3) {
3556 gen_lea_modrm(env, s, modrm);
3557 gen_stq_env_A0(s, offsetof(CPUX86State,
3558 xmm_regs[reg].XMM_Q(0)));
3559 } else {
3560 rm = (modrm & 7) | REX_B(s);
3561 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3562 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3563 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3565 break;
3566 case 0x2d6: /* movq2dq */
3567 gen_helper_enter_mmx(cpu_env);
3568 rm = (modrm & 7);
3569 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3570 offsetof(CPUX86State,fpregs[rm].mmx));
3571 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3572 break;
3573 case 0x3d6: /* movdq2q */
3574 gen_helper_enter_mmx(cpu_env);
3575 rm = (modrm & 7) | REX_B(s);
3576 gen_op_movq(offsetof(CPUX86State,fpregs[reg & 7].mmx),
3577 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3578 break;
3579 case 0xd7: /* pmovmskb */
3580 case 0x1d7:
3581 if (mod != 3)
3582 goto illegal_op;
3583 if (b1) {
3584 rm = (modrm & 7) | REX_B(s);
3585 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[rm]));
3586 gen_helper_pmovmskb_xmm(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3587 } else {
3588 rm = (modrm & 7);
3589 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,fpregs[rm].mmx));
3590 gen_helper_pmovmskb_mmx(cpu_tmp2_i32, cpu_env, cpu_ptr0);
3592 reg = ((modrm >> 3) & 7) | rex_r;
3593 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
3594 break;
3596 case 0x138:
3597 case 0x038:
3598 b = modrm;
3599 if ((b & 0xf0) == 0xf0) {
3600 goto do_0f_38_fx;
3602 modrm = cpu_ldub_code(env, s->pc++);
3603 rm = modrm & 7;
3604 reg = ((modrm >> 3) & 7) | rex_r;
3605 mod = (modrm >> 6) & 3;
3606 if (b1 >= 2) {
3607 goto illegal_op;
3610 sse_fn_epp = sse_op_table6[b].op[b1];
3611 if (!sse_fn_epp) {
3612 goto illegal_op;
3614 if (!(s->cpuid_ext_features & sse_op_table6[b].ext_mask))
3615 goto illegal_op;
3617 if (b1) {
3618 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3619 if (mod == 3) {
3620 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
3621 } else {
3622 op2_offset = offsetof(CPUX86State,xmm_t0);
3623 gen_lea_modrm(env, s, modrm);
3624 switch (b) {
3625 case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
3626 case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
3627 case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
3628 gen_ldq_env_A0(s, op2_offset +
3629 offsetof(XMMReg, XMM_Q(0)));
3630 break;
3631 case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
3632 case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
3633 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
3634 s->mem_index, MO_LEUL);
3635 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, op2_offset +
3636 offsetof(XMMReg, XMM_L(0)));
3637 break;
3638 case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
3639 tcg_gen_qemu_ld_tl(cpu_tmp0, cpu_A0,
3640 s->mem_index, MO_LEUW);
3641 tcg_gen_st16_tl(cpu_tmp0, cpu_env, op2_offset +
3642 offsetof(XMMReg, XMM_W(0)));
3643 break;
3644 case 0x2a: /* movntqda */
3645 gen_ldo_env_A0(s, op1_offset);
3646 return;
3647 default:
3648 gen_ldo_env_A0(s, op2_offset);
3651 } else {
3652 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
3653 if (mod == 3) {
3654 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3655 } else {
3656 op2_offset = offsetof(CPUX86State,mmx_t0);
3657 gen_lea_modrm(env, s, modrm);
3658 gen_ldq_env_A0(s, op2_offset);
3661 if (sse_fn_epp == SSE_SPECIAL) {
3662 goto illegal_op;
3665 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3666 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3667 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
3669 if (b == 0x17) {
3670 set_cc_op(s, CC_OP_EFLAGS);
3672 break;
3674 case 0x238:
3675 case 0x338:
3676 do_0f_38_fx:
3677 /* Various integer extensions at 0f 38 f[0-f]. */
3678 b = modrm | (b1 << 8);
3679 modrm = cpu_ldub_code(env, s->pc++);
3680 reg = ((modrm >> 3) & 7) | rex_r;
3682 switch (b) {
3683 case 0x3f0: /* crc32 Gd,Eb */
3684 case 0x3f1: /* crc32 Gd,Ey */
3685 do_crc32:
3686 if (!(s->cpuid_ext_features & CPUID_EXT_SSE42)) {
3687 goto illegal_op;
3689 if ((b & 0xff) == 0xf0) {
3690 ot = MO_8;
3691 } else if (s->dflag != MO_64) {
3692 ot = (s->prefix & PREFIX_DATA ? MO_16 : MO_32);
3693 } else {
3694 ot = MO_64;
3697 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[reg]);
3698 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3699 gen_helper_crc32(cpu_T[0], cpu_tmp2_i32,
3700 cpu_T[0], tcg_const_i32(8 << ot));
3702 ot = mo_64_32(s->dflag);
3703 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
3704 break;
3706 case 0x1f0: /* crc32 or movbe */
3707 case 0x1f1:
3708 /* For these insns, the f3 prefix is supposed to have priority
3709 over the 66 prefix, but that's not what we implement above
3710 setting b1. */
3711 if (s->prefix & PREFIX_REPNZ) {
3712 goto do_crc32;
3714 /* FALLTHRU */
3715 case 0x0f0: /* movbe Gy,My */
3716 case 0x0f1: /* movbe My,Gy */
3717 if (!(s->cpuid_ext_features & CPUID_EXT_MOVBE)) {
3718 goto illegal_op;
3720 if (s->dflag != MO_64) {
3721 ot = (s->prefix & PREFIX_DATA ? MO_16 : MO_32);
3722 } else {
3723 ot = MO_64;
3726 gen_lea_modrm(env, s, modrm);
3727 if ((b & 1) == 0) {
3728 tcg_gen_qemu_ld_tl(cpu_T[0], cpu_A0,
3729 s->mem_index, ot | MO_BE);
3730 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
3731 } else {
3732 tcg_gen_qemu_st_tl(cpu_regs[reg], cpu_A0,
3733 s->mem_index, ot | MO_BE);
3735 break;
3737 case 0x0f2: /* andn Gy, By, Ey */
3738 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
3739 || !(s->prefix & PREFIX_VEX)
3740 || s->vex_l != 0) {
3741 goto illegal_op;
3743 ot = mo_64_32(s->dflag);
3744 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3745 tcg_gen_andc_tl(cpu_T[0], cpu_regs[s->vex_v], cpu_T[0]);
3746 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
3747 gen_op_update1_cc();
3748 set_cc_op(s, CC_OP_LOGICB + ot);
3749 break;
3751 case 0x0f7: /* bextr Gy, Ey, By */
3752 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
3753 || !(s->prefix & PREFIX_VEX)
3754 || s->vex_l != 0) {
3755 goto illegal_op;
3757 ot = mo_64_32(s->dflag);
3759 TCGv bound, zero;
3761 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3762 /* Extract START, and shift the operand.
3763 Shifts larger than operand size get zeros. */
3764 tcg_gen_ext8u_tl(cpu_A0, cpu_regs[s->vex_v]);
3765 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_A0);
3767 bound = tcg_const_tl(ot == MO_64 ? 63 : 31);
3768 zero = tcg_const_tl(0);
3769 tcg_gen_movcond_tl(TCG_COND_LEU, cpu_T[0], cpu_A0, bound,
3770 cpu_T[0], zero);
3771 tcg_temp_free(zero);
3773 /* Extract the LEN into a mask. Lengths larger than
3774 operand size get all ones. */
3775 tcg_gen_shri_tl(cpu_A0, cpu_regs[s->vex_v], 8);
3776 tcg_gen_ext8u_tl(cpu_A0, cpu_A0);
3777 tcg_gen_movcond_tl(TCG_COND_LEU, cpu_A0, cpu_A0, bound,
3778 cpu_A0, bound);
3779 tcg_temp_free(bound);
3780 tcg_gen_movi_tl(cpu_T[1], 1);
3781 tcg_gen_shl_tl(cpu_T[1], cpu_T[1], cpu_A0);
3782 tcg_gen_subi_tl(cpu_T[1], cpu_T[1], 1);
3783 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
3785 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
3786 gen_op_update1_cc();
3787 set_cc_op(s, CC_OP_LOGICB + ot);
3789 break;
3791 case 0x0f5: /* bzhi Gy, Ey, By */
3792 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3793 || !(s->prefix & PREFIX_VEX)
3794 || s->vex_l != 0) {
3795 goto illegal_op;
3797 ot = mo_64_32(s->dflag);
3798 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3799 tcg_gen_ext8u_tl(cpu_T[1], cpu_regs[s->vex_v]);
3801 TCGv bound = tcg_const_tl(ot == MO_64 ? 63 : 31);
3802 /* Note that since we're using BMILG (in order to get O
3803 cleared) we need to store the inverse into C. */
3804 tcg_gen_setcond_tl(TCG_COND_LT, cpu_cc_src,
3805 cpu_T[1], bound);
3806 tcg_gen_movcond_tl(TCG_COND_GT, cpu_T[1], cpu_T[1],
3807 bound, bound, cpu_T[1]);
3808 tcg_temp_free(bound);
3810 tcg_gen_movi_tl(cpu_A0, -1);
3811 tcg_gen_shl_tl(cpu_A0, cpu_A0, cpu_T[1]);
3812 tcg_gen_andc_tl(cpu_T[0], cpu_T[0], cpu_A0);
3813 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
3814 gen_op_update1_cc();
3815 set_cc_op(s, CC_OP_BMILGB + ot);
3816 break;
3818 case 0x3f6: /* mulx By, Gy, rdx, Ey */
3819 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3820 || !(s->prefix & PREFIX_VEX)
3821 || s->vex_l != 0) {
3822 goto illegal_op;
3824 ot = mo_64_32(s->dflag);
3825 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3826 switch (ot) {
3827 default:
3828 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3829 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EDX]);
3830 tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
3831 cpu_tmp2_i32, cpu_tmp3_i32);
3832 tcg_gen_extu_i32_tl(cpu_regs[s->vex_v], cpu_tmp2_i32);
3833 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp3_i32);
3834 break;
3835 #ifdef TARGET_X86_64
3836 case MO_64:
3837 tcg_gen_mulu2_i64(cpu_regs[s->vex_v], cpu_regs[reg],
3838 cpu_T[0], cpu_regs[R_EDX]);
3839 break;
3840 #endif
3842 break;
3844 case 0x3f5: /* pdep Gy, By, Ey */
3845 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3846 || !(s->prefix & PREFIX_VEX)
3847 || s->vex_l != 0) {
3848 goto illegal_op;
3850 ot = mo_64_32(s->dflag);
3851 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3852 /* Note that by zero-extending the mask operand, we
3853 automatically handle zero-extending the result. */
3854 if (ot == MO_64) {
3855 tcg_gen_mov_tl(cpu_T[1], cpu_regs[s->vex_v]);
3856 } else {
3857 tcg_gen_ext32u_tl(cpu_T[1], cpu_regs[s->vex_v]);
3859 gen_helper_pdep(cpu_regs[reg], cpu_T[0], cpu_T[1]);
3860 break;
3862 case 0x2f5: /* pext Gy, By, Ey */
3863 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3864 || !(s->prefix & PREFIX_VEX)
3865 || s->vex_l != 0) {
3866 goto illegal_op;
3868 ot = mo_64_32(s->dflag);
3869 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3870 /* Note that by zero-extending the mask operand, we
3871 automatically handle zero-extending the result. */
3872 if (ot == MO_64) {
3873 tcg_gen_mov_tl(cpu_T[1], cpu_regs[s->vex_v]);
3874 } else {
3875 tcg_gen_ext32u_tl(cpu_T[1], cpu_regs[s->vex_v]);
3877 gen_helper_pext(cpu_regs[reg], cpu_T[0], cpu_T[1]);
3878 break;
3880 case 0x1f6: /* adcx Gy, Ey */
3881 case 0x2f6: /* adox Gy, Ey */
3882 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_ADX)) {
3883 goto illegal_op;
3884 } else {
3885 TCGv carry_in, carry_out, zero;
3886 int end_op;
3888 ot = mo_64_32(s->dflag);
3889 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3891 /* Re-use the carry-out from a previous round. */
3892 TCGV_UNUSED(carry_in);
3893 carry_out = (b == 0x1f6 ? cpu_cc_dst : cpu_cc_src2);
3894 switch (s->cc_op) {
3895 case CC_OP_ADCX:
3896 if (b == 0x1f6) {
3897 carry_in = cpu_cc_dst;
3898 end_op = CC_OP_ADCX;
3899 } else {
3900 end_op = CC_OP_ADCOX;
3902 break;
3903 case CC_OP_ADOX:
3904 if (b == 0x1f6) {
3905 end_op = CC_OP_ADCOX;
3906 } else {
3907 carry_in = cpu_cc_src2;
3908 end_op = CC_OP_ADOX;
3910 break;
3911 case CC_OP_ADCOX:
3912 end_op = CC_OP_ADCOX;
3913 carry_in = carry_out;
3914 break;
3915 default:
3916 end_op = (b == 0x1f6 ? CC_OP_ADCX : CC_OP_ADOX);
3917 break;
3919 /* If we can't reuse carry-out, get it out of EFLAGS. */
3920 if (TCGV_IS_UNUSED(carry_in)) {
3921 if (s->cc_op != CC_OP_ADCX && s->cc_op != CC_OP_ADOX) {
3922 gen_compute_eflags(s);
3924 carry_in = cpu_tmp0;
3925 tcg_gen_shri_tl(carry_in, cpu_cc_src,
3926 ctz32(b == 0x1f6 ? CC_C : CC_O));
3927 tcg_gen_andi_tl(carry_in, carry_in, 1);
3930 switch (ot) {
3931 #ifdef TARGET_X86_64
3932 case MO_32:
3933 /* If we know TL is 64-bit, and we want a 32-bit
3934 result, just do everything in 64-bit arithmetic. */
3935 tcg_gen_ext32u_i64(cpu_regs[reg], cpu_regs[reg]);
3936 tcg_gen_ext32u_i64(cpu_T[0], cpu_T[0]);
3937 tcg_gen_add_i64(cpu_T[0], cpu_T[0], cpu_regs[reg]);
3938 tcg_gen_add_i64(cpu_T[0], cpu_T[0], carry_in);
3939 tcg_gen_ext32u_i64(cpu_regs[reg], cpu_T[0]);
3940 tcg_gen_shri_i64(carry_out, cpu_T[0], 32);
3941 break;
3942 #endif
3943 default:
3944 /* Otherwise compute the carry-out in two steps. */
3945 zero = tcg_const_tl(0);
3946 tcg_gen_add2_tl(cpu_T[0], carry_out,
3947 cpu_T[0], zero,
3948 carry_in, zero);
3949 tcg_gen_add2_tl(cpu_regs[reg], carry_out,
3950 cpu_regs[reg], carry_out,
3951 cpu_T[0], zero);
3952 tcg_temp_free(zero);
3953 break;
3955 set_cc_op(s, end_op);
3957 break;
3959 case 0x1f7: /* shlx Gy, Ey, By */
3960 case 0x2f7: /* sarx Gy, Ey, By */
3961 case 0x3f7: /* shrx Gy, Ey, By */
3962 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
3963 || !(s->prefix & PREFIX_VEX)
3964 || s->vex_l != 0) {
3965 goto illegal_op;
3967 ot = mo_64_32(s->dflag);
3968 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
3969 if (ot == MO_64) {
3970 tcg_gen_andi_tl(cpu_T[1], cpu_regs[s->vex_v], 63);
3971 } else {
3972 tcg_gen_andi_tl(cpu_T[1], cpu_regs[s->vex_v], 31);
3974 if (b == 0x1f7) {
3975 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
3976 } else if (b == 0x2f7) {
3977 if (ot != MO_64) {
3978 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
3980 tcg_gen_sar_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
3981 } else {
3982 if (ot != MO_64) {
3983 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
3985 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
3987 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
3988 break;
3990 case 0x0f3:
3991 case 0x1f3:
3992 case 0x2f3:
3993 case 0x3f3: /* Group 17 */
3994 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)
3995 || !(s->prefix & PREFIX_VEX)
3996 || s->vex_l != 0) {
3997 goto illegal_op;
3999 ot = mo_64_32(s->dflag);
4000 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4002 switch (reg & 7) {
4003 case 1: /* blsr By,Ey */
4004 tcg_gen_neg_tl(cpu_T[1], cpu_T[0]);
4005 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4006 gen_op_mov_reg_v(ot, s->vex_v, cpu_T[0]);
4007 gen_op_update2_cc();
4008 set_cc_op(s, CC_OP_BMILGB + ot);
4009 break;
4011 case 2: /* blsmsk By,Ey */
4012 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4013 tcg_gen_subi_tl(cpu_T[0], cpu_T[0], 1);
4014 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_cc_src);
4015 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4016 set_cc_op(s, CC_OP_BMILGB + ot);
4017 break;
4019 case 3: /* blsi By, Ey */
4020 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4021 tcg_gen_subi_tl(cpu_T[0], cpu_T[0], 1);
4022 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_cc_src);
4023 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4024 set_cc_op(s, CC_OP_BMILGB + ot);
4025 break;
4027 default:
4028 goto illegal_op;
4030 break;
4032 default:
4033 goto illegal_op;
4035 break;
4037 case 0x03a:
4038 case 0x13a:
4039 b = modrm;
4040 modrm = cpu_ldub_code(env, s->pc++);
4041 rm = modrm & 7;
4042 reg = ((modrm >> 3) & 7) | rex_r;
4043 mod = (modrm >> 6) & 3;
4044 if (b1 >= 2) {
4045 goto illegal_op;
4048 sse_fn_eppi = sse_op_table7[b].op[b1];
4049 if (!sse_fn_eppi) {
4050 goto illegal_op;
4052 if (!(s->cpuid_ext_features & sse_op_table7[b].ext_mask))
4053 goto illegal_op;
4055 if (sse_fn_eppi == SSE_SPECIAL) {
4056 ot = mo_64_32(s->dflag);
4057 rm = (modrm & 7) | REX_B(s);
4058 if (mod != 3)
4059 gen_lea_modrm(env, s, modrm);
4060 reg = ((modrm >> 3) & 7) | rex_r;
4061 val = cpu_ldub_code(env, s->pc++);
4062 switch (b) {
4063 case 0x14: /* pextrb */
4064 tcg_gen_ld8u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4065 xmm_regs[reg].XMM_B(val & 15)));
4066 if (mod == 3) {
4067 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
4068 } else {
4069 tcg_gen_qemu_st_tl(cpu_T[0], cpu_A0,
4070 s->mem_index, MO_UB);
4072 break;
4073 case 0x15: /* pextrw */
4074 tcg_gen_ld16u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4075 xmm_regs[reg].XMM_W(val & 7)));
4076 if (mod == 3) {
4077 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
4078 } else {
4079 tcg_gen_qemu_st_tl(cpu_T[0], cpu_A0,
4080 s->mem_index, MO_LEUW);
4082 break;
4083 case 0x16:
4084 if (ot == MO_32) { /* pextrd */
4085 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
4086 offsetof(CPUX86State,
4087 xmm_regs[reg].XMM_L(val & 3)));
4088 if (mod == 3) {
4089 tcg_gen_extu_i32_tl(cpu_regs[rm], cpu_tmp2_i32);
4090 } else {
4091 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
4092 s->mem_index, MO_LEUL);
4094 } else { /* pextrq */
4095 #ifdef TARGET_X86_64
4096 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
4097 offsetof(CPUX86State,
4098 xmm_regs[reg].XMM_Q(val & 1)));
4099 if (mod == 3) {
4100 tcg_gen_mov_i64(cpu_regs[rm], cpu_tmp1_i64);
4101 } else {
4102 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
4103 s->mem_index, MO_LEQ);
4105 #else
4106 goto illegal_op;
4107 #endif
4109 break;
4110 case 0x17: /* extractps */
4111 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4112 xmm_regs[reg].XMM_L(val & 3)));
4113 if (mod == 3) {
4114 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
4115 } else {
4116 tcg_gen_qemu_st_tl(cpu_T[0], cpu_A0,
4117 s->mem_index, MO_LEUL);
4119 break;
4120 case 0x20: /* pinsrb */
4121 if (mod == 3) {
4122 gen_op_mov_v_reg(MO_32, cpu_T[0], rm);
4123 } else {
4124 tcg_gen_qemu_ld_tl(cpu_T[0], cpu_A0,
4125 s->mem_index, MO_UB);
4127 tcg_gen_st8_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
4128 xmm_regs[reg].XMM_B(val & 15)));
4129 break;
4130 case 0x21: /* insertps */
4131 if (mod == 3) {
4132 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
4133 offsetof(CPUX86State,xmm_regs[rm]
4134 .XMM_L((val >> 6) & 3)));
4135 } else {
4136 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
4137 s->mem_index, MO_LEUL);
4139 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
4140 offsetof(CPUX86State,xmm_regs[reg]
4141 .XMM_L((val >> 4) & 3)));
4142 if ((val >> 0) & 1)
4143 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4144 cpu_env, offsetof(CPUX86State,
4145 xmm_regs[reg].XMM_L(0)));
4146 if ((val >> 1) & 1)
4147 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4148 cpu_env, offsetof(CPUX86State,
4149 xmm_regs[reg].XMM_L(1)));
4150 if ((val >> 2) & 1)
4151 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4152 cpu_env, offsetof(CPUX86State,
4153 xmm_regs[reg].XMM_L(2)));
4154 if ((val >> 3) & 1)
4155 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
4156 cpu_env, offsetof(CPUX86State,
4157 xmm_regs[reg].XMM_L(3)));
4158 break;
4159 case 0x22:
4160 if (ot == MO_32) { /* pinsrd */
4161 if (mod == 3) {
4162 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[rm]);
4163 } else {
4164 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
4165 s->mem_index, MO_LEUL);
4167 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
4168 offsetof(CPUX86State,
4169 xmm_regs[reg].XMM_L(val & 3)));
4170 } else { /* pinsrq */
4171 #ifdef TARGET_X86_64
4172 if (mod == 3) {
4173 gen_op_mov_v_reg(ot, cpu_tmp1_i64, rm);
4174 } else {
4175 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
4176 s->mem_index, MO_LEQ);
4178 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
4179 offsetof(CPUX86State,
4180 xmm_regs[reg].XMM_Q(val & 1)));
4181 #else
4182 goto illegal_op;
4183 #endif
4185 break;
4187 return;
4190 if (b1) {
4191 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
4192 if (mod == 3) {
4193 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
4194 } else {
4195 op2_offset = offsetof(CPUX86State,xmm_t0);
4196 gen_lea_modrm(env, s, modrm);
4197 gen_ldo_env_A0(s, op2_offset);
4199 } else {
4200 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4201 if (mod == 3) {
4202 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4203 } else {
4204 op2_offset = offsetof(CPUX86State,mmx_t0);
4205 gen_lea_modrm(env, s, modrm);
4206 gen_ldq_env_A0(s, op2_offset);
4209 val = cpu_ldub_code(env, s->pc++);
4211 if ((b & 0xfc) == 0x60) { /* pcmpXstrX */
4212 set_cc_op(s, CC_OP_EFLAGS);
4214 if (s->dflag == MO_64) {
4215 /* The helper must use entire 64-bit gp registers */
4216 val |= 1 << 8;
4220 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4221 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4222 sse_fn_eppi(cpu_env, cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
4223 break;
4225 case 0x33a:
4226 /* Various integer extensions at 0f 3a f[0-f]. */
4227 b = modrm | (b1 << 8);
4228 modrm = cpu_ldub_code(env, s->pc++);
4229 reg = ((modrm >> 3) & 7) | rex_r;
4231 switch (b) {
4232 case 0x3f0: /* rorx Gy,Ey, Ib */
4233 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2)
4234 || !(s->prefix & PREFIX_VEX)
4235 || s->vex_l != 0) {
4236 goto illegal_op;
4238 ot = mo_64_32(s->dflag);
4239 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4240 b = cpu_ldub_code(env, s->pc++);
4241 if (ot == MO_64) {
4242 tcg_gen_rotri_tl(cpu_T[0], cpu_T[0], b & 63);
4243 } else {
4244 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4245 tcg_gen_rotri_i32(cpu_tmp2_i32, cpu_tmp2_i32, b & 31);
4246 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
4248 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
4249 break;
4251 default:
4252 goto illegal_op;
4254 break;
4256 default:
4257 goto illegal_op;
4259 } else {
4260 /* generic MMX or SSE operation */
4261 switch(b) {
4262 case 0x70: /* pshufx insn */
4263 case 0xc6: /* pshufx insn */
4264 case 0xc2: /* compare insns */
4265 s->rip_offset = 1;
4266 break;
4267 default:
4268 break;
4270 if (is_xmm) {
4271 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
4272 if (mod != 3) {
4273 int sz = 4;
4275 gen_lea_modrm(env, s, modrm);
4276 op2_offset = offsetof(CPUX86State,xmm_t0);
4278 switch (b) {
4279 case 0x50 ... 0x5a:
4280 case 0x5c ... 0x5f:
4281 case 0xc2:
4282 /* Most sse scalar operations. */
4283 if (b1 == 2) {
4284 sz = 2;
4285 } else if (b1 == 3) {
4286 sz = 3;
4288 break;
4290 case 0x2e: /* ucomis[sd] */
4291 case 0x2f: /* comis[sd] */
4292 if (b1 == 0) {
4293 sz = 2;
4294 } else {
4295 sz = 3;
4297 break;
4300 switch (sz) {
4301 case 2:
4302 /* 32 bit access */
4303 gen_op_ld_v(s, MO_32, cpu_T[0], cpu_A0);
4304 tcg_gen_st32_tl(cpu_T[0], cpu_env,
4305 offsetof(CPUX86State,xmm_t0.XMM_L(0)));
4306 break;
4307 case 3:
4308 /* 64 bit access */
4309 gen_ldq_env_A0(s, offsetof(CPUX86State, xmm_t0.XMM_D(0)));
4310 break;
4311 default:
4312 /* 128 bit access */
4313 gen_ldo_env_A0(s, op2_offset);
4314 break;
4316 } else {
4317 rm = (modrm & 7) | REX_B(s);
4318 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
4320 } else {
4321 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
4322 if (mod != 3) {
4323 gen_lea_modrm(env, s, modrm);
4324 op2_offset = offsetof(CPUX86State,mmx_t0);
4325 gen_ldq_env_A0(s, op2_offset);
4326 } else {
4327 rm = (modrm & 7);
4328 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
4331 switch(b) {
4332 case 0x0f: /* 3DNow! data insns */
4333 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
4334 goto illegal_op;
4335 val = cpu_ldub_code(env, s->pc++);
4336 sse_fn_epp = sse_op_table5[val];
4337 if (!sse_fn_epp) {
4338 goto illegal_op;
4340 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4341 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4342 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
4343 break;
4344 case 0x70: /* pshufx insn */
4345 case 0xc6: /* pshufx insn */
4346 val = cpu_ldub_code(env, s->pc++);
4347 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4348 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4349 /* XXX: introduce a new table? */
4350 sse_fn_ppi = (SSEFunc_0_ppi)sse_fn_epp;
4351 sse_fn_ppi(cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
4352 break;
4353 case 0xc2:
4354 /* compare insns */
4355 val = cpu_ldub_code(env, s->pc++);
4356 if (val >= 8)
4357 goto illegal_op;
4358 sse_fn_epp = sse_op_table4[val][b1];
4360 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4361 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4362 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
4363 break;
4364 case 0xf7:
4365 /* maskmov : we must prepare A0 */
4366 if (mod != 3)
4367 goto illegal_op;
4368 tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EDI]);
4369 gen_extu(s->aflag, cpu_A0);
4370 gen_add_A0_ds_seg(s);
4372 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4373 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4374 /* XXX: introduce a new table? */
4375 sse_fn_eppt = (SSEFunc_0_eppt)sse_fn_epp;
4376 sse_fn_eppt(cpu_env, cpu_ptr0, cpu_ptr1, cpu_A0);
4377 break;
4378 default:
4379 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4380 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4381 sse_fn_epp(cpu_env, cpu_ptr0, cpu_ptr1);
4382 break;
4384 if (b == 0x2e || b == 0x2f) {
4385 set_cc_op(s, CC_OP_EFLAGS);
4390 /* convert one instruction. s->is_jmp is set if the translation must
4391 be stopped. Return the next pc value */
4392 static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
4393 target_ulong pc_start)
4395 int b, prefixes;
4396 int shift;
4397 TCGMemOp ot, aflag, dflag;
4398 int modrm, reg, rm, mod, op, opreg, val;
4399 target_ulong next_eip, tval;
4400 int rex_w, rex_r;
4402 s->pc = pc_start;
4403 prefixes = 0;
4404 s->override = -1;
4405 rex_w = -1;
4406 rex_r = 0;
4407 #ifdef TARGET_X86_64
4408 s->rex_x = 0;
4409 s->rex_b = 0;
4410 x86_64_hregs = 0;
4411 #endif
4412 s->rip_offset = 0; /* for relative ip address */
4413 s->vex_l = 0;
4414 s->vex_v = 0;
4415 next_byte:
4416 b = cpu_ldub_code(env, s->pc);
4417 s->pc++;
4418 /* Collect prefixes. */
4419 switch (b) {
4420 case 0xf3:
4421 prefixes |= PREFIX_REPZ;
4422 goto next_byte;
4423 case 0xf2:
4424 prefixes |= PREFIX_REPNZ;
4425 goto next_byte;
4426 case 0xf0:
4427 prefixes |= PREFIX_LOCK;
4428 goto next_byte;
4429 case 0x2e:
4430 s->override = R_CS;
4431 goto next_byte;
4432 case 0x36:
4433 s->override = R_SS;
4434 goto next_byte;
4435 case 0x3e:
4436 s->override = R_DS;
4437 goto next_byte;
4438 case 0x26:
4439 s->override = R_ES;
4440 goto next_byte;
4441 case 0x64:
4442 s->override = R_FS;
4443 goto next_byte;
4444 case 0x65:
4445 s->override = R_GS;
4446 goto next_byte;
4447 case 0x66:
4448 prefixes |= PREFIX_DATA;
4449 goto next_byte;
4450 case 0x67:
4451 prefixes |= PREFIX_ADR;
4452 goto next_byte;
4453 #ifdef TARGET_X86_64
4454 case 0x40 ... 0x4f:
4455 if (CODE64(s)) {
4456 /* REX prefix */
4457 rex_w = (b >> 3) & 1;
4458 rex_r = (b & 0x4) << 1;
4459 s->rex_x = (b & 0x2) << 2;
4460 REX_B(s) = (b & 0x1) << 3;
4461 x86_64_hregs = 1; /* select uniform byte register addressing */
4462 goto next_byte;
4464 break;
4465 #endif
4466 case 0xc5: /* 2-byte VEX */
4467 case 0xc4: /* 3-byte VEX */
4468 /* VEX prefixes cannot be used except in 32-bit mode.
4469 Otherwise the instruction is LES or LDS. */
4470 if (s->code32 && !s->vm86) {
4471 static const int pp_prefix[4] = {
4472 0, PREFIX_DATA, PREFIX_REPZ, PREFIX_REPNZ
4474 int vex3, vex2 = cpu_ldub_code(env, s->pc);
4476 if (!CODE64(s) && (vex2 & 0xc0) != 0xc0) {
4477 /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
4478 otherwise the instruction is LES or LDS. */
4479 break;
4481 s->pc++;
4483 /* 4.1.1-4.1.3: No preceding lock, 66, f2, f3, or rex prefixes. */
4484 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ
4485 | PREFIX_LOCK | PREFIX_DATA)) {
4486 goto illegal_op;
4488 #ifdef TARGET_X86_64
4489 if (x86_64_hregs) {
4490 goto illegal_op;
4492 #endif
4493 rex_r = (~vex2 >> 4) & 8;
4494 if (b == 0xc5) {
4495 vex3 = vex2;
4496 b = cpu_ldub_code(env, s->pc++);
4497 } else {
4498 #ifdef TARGET_X86_64
4499 s->rex_x = (~vex2 >> 3) & 8;
4500 s->rex_b = (~vex2 >> 2) & 8;
4501 #endif
4502 vex3 = cpu_ldub_code(env, s->pc++);
4503 rex_w = (vex3 >> 7) & 1;
4504 switch (vex2 & 0x1f) {
4505 case 0x01: /* Implied 0f leading opcode bytes. */
4506 b = cpu_ldub_code(env, s->pc++) | 0x100;
4507 break;
4508 case 0x02: /* Implied 0f 38 leading opcode bytes. */
4509 b = 0x138;
4510 break;
4511 case 0x03: /* Implied 0f 3a leading opcode bytes. */
4512 b = 0x13a;
4513 break;
4514 default: /* Reserved for future use. */
4515 goto illegal_op;
4518 s->vex_v = (~vex3 >> 3) & 0xf;
4519 s->vex_l = (vex3 >> 2) & 1;
4520 prefixes |= pp_prefix[vex3 & 3] | PREFIX_VEX;
4522 break;
4525 /* Post-process prefixes. */
4526 if (CODE64(s)) {
4527 /* In 64-bit mode, the default data size is 32-bit. Select 64-bit
4528 data with rex_w, and 16-bit data with 0x66; rex_w takes precedence
4529 over 0x66 if both are present. */
4530 dflag = (rex_w > 0 ? MO_64 : prefixes & PREFIX_DATA ? MO_16 : MO_32);
4531 /* In 64-bit mode, 0x67 selects 32-bit addressing. */
4532 aflag = (prefixes & PREFIX_ADR ? MO_32 : MO_64);
4533 } else {
4534 /* In 16/32-bit mode, 0x66 selects the opposite data size. */
4535 if (s->code32 ^ ((prefixes & PREFIX_DATA) != 0)) {
4536 dflag = MO_32;
4537 } else {
4538 dflag = MO_16;
4540 /* In 16/32-bit mode, 0x67 selects the opposite addressing. */
4541 if (s->code32 ^ ((prefixes & PREFIX_ADR) != 0)) {
4542 aflag = MO_32;
4543 } else {
4544 aflag = MO_16;
4548 s->prefix = prefixes;
4549 s->aflag = aflag;
4550 s->dflag = dflag;
4552 /* lock generation */
4553 if (prefixes & PREFIX_LOCK)
4554 gen_helper_lock();
4556 /* now check op code */
4557 reswitch:
4558 switch(b) {
4559 case 0x0f:
4560 /**************************/
4561 /* extended op code */
4562 b = cpu_ldub_code(env, s->pc++) | 0x100;
4563 goto reswitch;
4565 /**************************/
4566 /* arith & logic */
4567 case 0x00 ... 0x05:
4568 case 0x08 ... 0x0d:
4569 case 0x10 ... 0x15:
4570 case 0x18 ... 0x1d:
4571 case 0x20 ... 0x25:
4572 case 0x28 ... 0x2d:
4573 case 0x30 ... 0x35:
4574 case 0x38 ... 0x3d:
4576 int op, f, val;
4577 op = (b >> 3) & 7;
4578 f = (b >> 1) & 3;
4580 ot = mo_b_d(b, dflag);
4582 switch(f) {
4583 case 0: /* OP Ev, Gv */
4584 modrm = cpu_ldub_code(env, s->pc++);
4585 reg = ((modrm >> 3) & 7) | rex_r;
4586 mod = (modrm >> 6) & 3;
4587 rm = (modrm & 7) | REX_B(s);
4588 if (mod != 3) {
4589 gen_lea_modrm(env, s, modrm);
4590 opreg = OR_TMP0;
4591 } else if (op == OP_XORL && rm == reg) {
4592 xor_zero:
4593 /* xor reg, reg optimisation */
4594 set_cc_op(s, CC_OP_CLR);
4595 tcg_gen_movi_tl(cpu_T[0], 0);
4596 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
4597 break;
4598 } else {
4599 opreg = rm;
4601 gen_op_mov_v_reg(ot, cpu_T[1], reg);
4602 gen_op(s, op, ot, opreg);
4603 break;
4604 case 1: /* OP Gv, Ev */
4605 modrm = cpu_ldub_code(env, s->pc++);
4606 mod = (modrm >> 6) & 3;
4607 reg = ((modrm >> 3) & 7) | rex_r;
4608 rm = (modrm & 7) | REX_B(s);
4609 if (mod != 3) {
4610 gen_lea_modrm(env, s, modrm);
4611 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
4612 } else if (op == OP_XORL && rm == reg) {
4613 goto xor_zero;
4614 } else {
4615 gen_op_mov_v_reg(ot, cpu_T[1], rm);
4617 gen_op(s, op, ot, reg);
4618 break;
4619 case 2: /* OP A, Iv */
4620 val = insn_get(env, s, ot);
4621 tcg_gen_movi_tl(cpu_T[1], val);
4622 gen_op(s, op, ot, OR_EAX);
4623 break;
4626 break;
4628 case 0x82:
4629 if (CODE64(s))
4630 goto illegal_op;
4631 case 0x80: /* GRP1 */
4632 case 0x81:
4633 case 0x83:
4635 int val;
4637 ot = mo_b_d(b, dflag);
4639 modrm = cpu_ldub_code(env, s->pc++);
4640 mod = (modrm >> 6) & 3;
4641 rm = (modrm & 7) | REX_B(s);
4642 op = (modrm >> 3) & 7;
4644 if (mod != 3) {
4645 if (b == 0x83)
4646 s->rip_offset = 1;
4647 else
4648 s->rip_offset = insn_const_size(ot);
4649 gen_lea_modrm(env, s, modrm);
4650 opreg = OR_TMP0;
4651 } else {
4652 opreg = rm;
4655 switch(b) {
4656 default:
4657 case 0x80:
4658 case 0x81:
4659 case 0x82:
4660 val = insn_get(env, s, ot);
4661 break;
4662 case 0x83:
4663 val = (int8_t)insn_get(env, s, MO_8);
4664 break;
4666 tcg_gen_movi_tl(cpu_T[1], val);
4667 gen_op(s, op, ot, opreg);
4669 break;
4671 /**************************/
4672 /* inc, dec, and other misc arith */
4673 case 0x40 ... 0x47: /* inc Gv */
4674 ot = dflag;
4675 gen_inc(s, ot, OR_EAX + (b & 7), 1);
4676 break;
4677 case 0x48 ... 0x4f: /* dec Gv */
4678 ot = dflag;
4679 gen_inc(s, ot, OR_EAX + (b & 7), -1);
4680 break;
4681 case 0xf6: /* GRP3 */
4682 case 0xf7:
4683 ot = mo_b_d(b, dflag);
4685 modrm = cpu_ldub_code(env, s->pc++);
4686 mod = (modrm >> 6) & 3;
4687 rm = (modrm & 7) | REX_B(s);
4688 op = (modrm >> 3) & 7;
4689 if (mod != 3) {
4690 if (op == 0)
4691 s->rip_offset = insn_const_size(ot);
4692 gen_lea_modrm(env, s, modrm);
4693 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
4694 } else {
4695 gen_op_mov_v_reg(ot, cpu_T[0], rm);
4698 switch(op) {
4699 case 0: /* test */
4700 val = insn_get(env, s, ot);
4701 tcg_gen_movi_tl(cpu_T[1], val);
4702 gen_op_testl_T0_T1_cc();
4703 set_cc_op(s, CC_OP_LOGICB + ot);
4704 break;
4705 case 2: /* not */
4706 tcg_gen_not_tl(cpu_T[0], cpu_T[0]);
4707 if (mod != 3) {
4708 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
4709 } else {
4710 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
4712 break;
4713 case 3: /* neg */
4714 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
4715 if (mod != 3) {
4716 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
4717 } else {
4718 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
4720 gen_op_update_neg_cc();
4721 set_cc_op(s, CC_OP_SUBB + ot);
4722 break;
4723 case 4: /* mul */
4724 switch(ot) {
4725 case MO_8:
4726 gen_op_mov_v_reg(MO_8, cpu_T[1], R_EAX);
4727 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
4728 tcg_gen_ext8u_tl(cpu_T[1], cpu_T[1]);
4729 /* XXX: use 32 bit mul which could be faster */
4730 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4731 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T[0]);
4732 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4733 tcg_gen_andi_tl(cpu_cc_src, cpu_T[0], 0xff00);
4734 set_cc_op(s, CC_OP_MULB);
4735 break;
4736 case MO_16:
4737 gen_op_mov_v_reg(MO_16, cpu_T[1], R_EAX);
4738 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
4739 tcg_gen_ext16u_tl(cpu_T[1], cpu_T[1]);
4740 /* XXX: use 32 bit mul which could be faster */
4741 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4742 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T[0]);
4743 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4744 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
4745 gen_op_mov_reg_v(MO_16, R_EDX, cpu_T[0]);
4746 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4747 set_cc_op(s, CC_OP_MULW);
4748 break;
4749 default:
4750 case MO_32:
4751 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4752 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]);
4753 tcg_gen_mulu2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
4754 cpu_tmp2_i32, cpu_tmp3_i32);
4755 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32);
4756 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32);
4757 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4758 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
4759 set_cc_op(s, CC_OP_MULL);
4760 break;
4761 #ifdef TARGET_X86_64
4762 case MO_64:
4763 tcg_gen_mulu2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
4764 cpu_T[0], cpu_regs[R_EAX]);
4765 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4766 tcg_gen_mov_tl(cpu_cc_src, cpu_regs[R_EDX]);
4767 set_cc_op(s, CC_OP_MULQ);
4768 break;
4769 #endif
4771 break;
4772 case 5: /* imul */
4773 switch(ot) {
4774 case MO_8:
4775 gen_op_mov_v_reg(MO_8, cpu_T[1], R_EAX);
4776 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
4777 tcg_gen_ext8s_tl(cpu_T[1], cpu_T[1]);
4778 /* XXX: use 32 bit mul which could be faster */
4779 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4780 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T[0]);
4781 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4782 tcg_gen_ext8s_tl(cpu_tmp0, cpu_T[0]);
4783 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4784 set_cc_op(s, CC_OP_MULB);
4785 break;
4786 case MO_16:
4787 gen_op_mov_v_reg(MO_16, cpu_T[1], R_EAX);
4788 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4789 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
4790 /* XXX: use 32 bit mul which could be faster */
4791 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4792 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T[0]);
4793 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4794 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
4795 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4796 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
4797 gen_op_mov_reg_v(MO_16, R_EDX, cpu_T[0]);
4798 set_cc_op(s, CC_OP_MULW);
4799 break;
4800 default:
4801 case MO_32:
4802 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4803 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_regs[R_EAX]);
4804 tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
4805 cpu_tmp2_i32, cpu_tmp3_i32);
4806 tcg_gen_extu_i32_tl(cpu_regs[R_EAX], cpu_tmp2_i32);
4807 tcg_gen_extu_i32_tl(cpu_regs[R_EDX], cpu_tmp3_i32);
4808 tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31);
4809 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4810 tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
4811 tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32);
4812 set_cc_op(s, CC_OP_MULL);
4813 break;
4814 #ifdef TARGET_X86_64
4815 case MO_64:
4816 tcg_gen_muls2_i64(cpu_regs[R_EAX], cpu_regs[R_EDX],
4817 cpu_T[0], cpu_regs[R_EAX]);
4818 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[R_EAX]);
4819 tcg_gen_sari_tl(cpu_cc_src, cpu_regs[R_EAX], 63);
4820 tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_regs[R_EDX]);
4821 set_cc_op(s, CC_OP_MULQ);
4822 break;
4823 #endif
4825 break;
4826 case 6: /* div */
4827 switch(ot) {
4828 case MO_8:
4829 gen_helper_divb_AL(cpu_env, cpu_T[0]);
4830 break;
4831 case MO_16:
4832 gen_helper_divw_AX(cpu_env, cpu_T[0]);
4833 break;
4834 default:
4835 case MO_32:
4836 gen_helper_divl_EAX(cpu_env, cpu_T[0]);
4837 break;
4838 #ifdef TARGET_X86_64
4839 case MO_64:
4840 gen_helper_divq_EAX(cpu_env, cpu_T[0]);
4841 break;
4842 #endif
4844 break;
4845 case 7: /* idiv */
4846 switch(ot) {
4847 case MO_8:
4848 gen_helper_idivb_AL(cpu_env, cpu_T[0]);
4849 break;
4850 case MO_16:
4851 gen_helper_idivw_AX(cpu_env, cpu_T[0]);
4852 break;
4853 default:
4854 case MO_32:
4855 gen_helper_idivl_EAX(cpu_env, cpu_T[0]);
4856 break;
4857 #ifdef TARGET_X86_64
4858 case MO_64:
4859 gen_helper_idivq_EAX(cpu_env, cpu_T[0]);
4860 break;
4861 #endif
4863 break;
4864 default:
4865 goto illegal_op;
4867 break;
4869 case 0xfe: /* GRP4 */
4870 case 0xff: /* GRP5 */
4871 ot = mo_b_d(b, dflag);
4873 modrm = cpu_ldub_code(env, s->pc++);
4874 mod = (modrm >> 6) & 3;
4875 rm = (modrm & 7) | REX_B(s);
4876 op = (modrm >> 3) & 7;
4877 if (op >= 2 && b == 0xfe) {
4878 goto illegal_op;
4880 if (CODE64(s)) {
4881 if (op == 2 || op == 4) {
4882 /* operand size for jumps is 64 bit */
4883 ot = MO_64;
4884 } else if (op == 3 || op == 5) {
4885 ot = dflag != MO_16 ? MO_32 + (rex_w == 1) : MO_16;
4886 } else if (op == 6) {
4887 /* default push size is 64 bit */
4888 ot = mo_pushpop(s, dflag);
4891 if (mod != 3) {
4892 gen_lea_modrm(env, s, modrm);
4893 if (op >= 2 && op != 3 && op != 5)
4894 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
4895 } else {
4896 gen_op_mov_v_reg(ot, cpu_T[0], rm);
4899 switch(op) {
4900 case 0: /* inc Ev */
4901 if (mod != 3)
4902 opreg = OR_TMP0;
4903 else
4904 opreg = rm;
4905 gen_inc(s, ot, opreg, 1);
4906 break;
4907 case 1: /* dec Ev */
4908 if (mod != 3)
4909 opreg = OR_TMP0;
4910 else
4911 opreg = rm;
4912 gen_inc(s, ot, opreg, -1);
4913 break;
4914 case 2: /* call Ev */
4915 /* XXX: optimize if memory (no 'and' is necessary) */
4916 if (dflag == MO_16) {
4917 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
4919 next_eip = s->pc - s->cs_base;
4920 tcg_gen_movi_tl(cpu_T[1], next_eip);
4921 gen_push_v(s, cpu_T[1]);
4922 gen_op_jmp_v(cpu_T[0]);
4923 gen_eob(s);
4924 break;
4925 case 3: /* lcall Ev */
4926 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
4927 gen_add_A0_im(s, 1 << ot);
4928 gen_op_ld_v(s, MO_16, cpu_T[0], cpu_A0);
4929 do_lcall:
4930 if (s->pe && !s->vm86) {
4931 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4932 gen_helper_lcall_protected(cpu_env, cpu_tmp2_i32, cpu_T[1],
4933 tcg_const_i32(dflag - 1),
4934 tcg_const_tl(s->pc - s->cs_base));
4935 } else {
4936 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4937 gen_helper_lcall_real(cpu_env, cpu_tmp2_i32, cpu_T[1],
4938 tcg_const_i32(dflag - 1),
4939 tcg_const_i32(s->pc - s->cs_base));
4941 gen_eob(s);
4942 break;
4943 case 4: /* jmp Ev */
4944 if (dflag == MO_16) {
4945 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
4947 gen_op_jmp_v(cpu_T[0]);
4948 gen_eob(s);
4949 break;
4950 case 5: /* ljmp Ev */
4951 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
4952 gen_add_A0_im(s, 1 << ot);
4953 gen_op_ld_v(s, MO_16, cpu_T[0], cpu_A0);
4954 do_ljmp:
4955 if (s->pe && !s->vm86) {
4956 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4957 gen_helper_ljmp_protected(cpu_env, cpu_tmp2_i32, cpu_T[1],
4958 tcg_const_tl(s->pc - s->cs_base));
4959 } else {
4960 gen_op_movl_seg_T0_vm(R_CS);
4961 gen_op_jmp_v(cpu_T[1]);
4963 gen_eob(s);
4964 break;
4965 case 6: /* push Ev */
4966 gen_push_v(s, cpu_T[0]);
4967 break;
4968 default:
4969 goto illegal_op;
4971 break;
4973 case 0x84: /* test Ev, Gv */
4974 case 0x85:
4975 ot = mo_b_d(b, dflag);
4977 modrm = cpu_ldub_code(env, s->pc++);
4978 reg = ((modrm >> 3) & 7) | rex_r;
4980 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
4981 gen_op_mov_v_reg(ot, cpu_T[1], reg);
4982 gen_op_testl_T0_T1_cc();
4983 set_cc_op(s, CC_OP_LOGICB + ot);
4984 break;
4986 case 0xa8: /* test eAX, Iv */
4987 case 0xa9:
4988 ot = mo_b_d(b, dflag);
4989 val = insn_get(env, s, ot);
4991 gen_op_mov_v_reg(ot, cpu_T[0], OR_EAX);
4992 tcg_gen_movi_tl(cpu_T[1], val);
4993 gen_op_testl_T0_T1_cc();
4994 set_cc_op(s, CC_OP_LOGICB + ot);
4995 break;
4997 case 0x98: /* CWDE/CBW */
4998 switch (dflag) {
4999 #ifdef TARGET_X86_64
5000 case MO_64:
5001 gen_op_mov_v_reg(MO_32, cpu_T[0], R_EAX);
5002 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
5003 gen_op_mov_reg_v(MO_64, R_EAX, cpu_T[0]);
5004 break;
5005 #endif
5006 case MO_32:
5007 gen_op_mov_v_reg(MO_16, cpu_T[0], R_EAX);
5008 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5009 gen_op_mov_reg_v(MO_32, R_EAX, cpu_T[0]);
5010 break;
5011 case MO_16:
5012 gen_op_mov_v_reg(MO_8, cpu_T[0], R_EAX);
5013 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
5014 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T[0]);
5015 break;
5016 default:
5017 tcg_abort();
5019 break;
5020 case 0x99: /* CDQ/CWD */
5021 switch (dflag) {
5022 #ifdef TARGET_X86_64
5023 case MO_64:
5024 gen_op_mov_v_reg(MO_64, cpu_T[0], R_EAX);
5025 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 63);
5026 gen_op_mov_reg_v(MO_64, R_EDX, cpu_T[0]);
5027 break;
5028 #endif
5029 case MO_32:
5030 gen_op_mov_v_reg(MO_32, cpu_T[0], R_EAX);
5031 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
5032 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 31);
5033 gen_op_mov_reg_v(MO_32, R_EDX, cpu_T[0]);
5034 break;
5035 case MO_16:
5036 gen_op_mov_v_reg(MO_16, cpu_T[0], R_EAX);
5037 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5038 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 15);
5039 gen_op_mov_reg_v(MO_16, R_EDX, cpu_T[0]);
5040 break;
5041 default:
5042 tcg_abort();
5044 break;
5045 case 0x1af: /* imul Gv, Ev */
5046 case 0x69: /* imul Gv, Ev, I */
5047 case 0x6b:
5048 ot = dflag;
5049 modrm = cpu_ldub_code(env, s->pc++);
5050 reg = ((modrm >> 3) & 7) | rex_r;
5051 if (b == 0x69)
5052 s->rip_offset = insn_const_size(ot);
5053 else if (b == 0x6b)
5054 s->rip_offset = 1;
5055 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
5056 if (b == 0x69) {
5057 val = insn_get(env, s, ot);
5058 tcg_gen_movi_tl(cpu_T[1], val);
5059 } else if (b == 0x6b) {
5060 val = (int8_t)insn_get(env, s, MO_8);
5061 tcg_gen_movi_tl(cpu_T[1], val);
5062 } else {
5063 gen_op_mov_v_reg(ot, cpu_T[1], reg);
5065 switch (ot) {
5066 #ifdef TARGET_X86_64
5067 case MO_64:
5068 tcg_gen_muls2_i64(cpu_regs[reg], cpu_T[1], cpu_T[0], cpu_T[1]);
5069 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
5070 tcg_gen_sari_tl(cpu_cc_src, cpu_cc_dst, 63);
5071 tcg_gen_sub_tl(cpu_cc_src, cpu_cc_src, cpu_T[1]);
5072 break;
5073 #endif
5074 case MO_32:
5075 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5076 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
5077 tcg_gen_muls2_i32(cpu_tmp2_i32, cpu_tmp3_i32,
5078 cpu_tmp2_i32, cpu_tmp3_i32);
5079 tcg_gen_extu_i32_tl(cpu_regs[reg], cpu_tmp2_i32);
5080 tcg_gen_sari_i32(cpu_tmp2_i32, cpu_tmp2_i32, 31);
5081 tcg_gen_mov_tl(cpu_cc_dst, cpu_regs[reg]);
5082 tcg_gen_sub_i32(cpu_tmp2_i32, cpu_tmp2_i32, cpu_tmp3_i32);
5083 tcg_gen_extu_i32_tl(cpu_cc_src, cpu_tmp2_i32);
5084 break;
5085 default:
5086 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5087 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
5088 /* XXX: use 32 bit mul which could be faster */
5089 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
5090 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
5091 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
5092 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
5093 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
5094 break;
5096 set_cc_op(s, CC_OP_MULB + ot);
5097 break;
5098 case 0x1c0:
5099 case 0x1c1: /* xadd Ev, Gv */
5100 ot = mo_b_d(b, dflag);
5101 modrm = cpu_ldub_code(env, s->pc++);
5102 reg = ((modrm >> 3) & 7) | rex_r;
5103 mod = (modrm >> 6) & 3;
5104 if (mod == 3) {
5105 rm = (modrm & 7) | REX_B(s);
5106 gen_op_mov_v_reg(ot, cpu_T[0], reg);
5107 gen_op_mov_v_reg(ot, cpu_T[1], rm);
5108 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
5109 gen_op_mov_reg_v(ot, reg, cpu_T[1]);
5110 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
5111 } else {
5112 gen_lea_modrm(env, s, modrm);
5113 gen_op_mov_v_reg(ot, cpu_T[0], reg);
5114 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
5115 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
5116 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
5117 gen_op_mov_reg_v(ot, reg, cpu_T[1]);
5119 gen_op_update2_cc();
5120 set_cc_op(s, CC_OP_ADDB + ot);
5121 break;
5122 case 0x1b0:
5123 case 0x1b1: /* cmpxchg Ev, Gv */
5125 TCGLabel *label1, *label2;
5126 TCGv t0, t1, t2, a0;
5128 ot = mo_b_d(b, dflag);
5129 modrm = cpu_ldub_code(env, s->pc++);
5130 reg = ((modrm >> 3) & 7) | rex_r;
5131 mod = (modrm >> 6) & 3;
5132 t0 = tcg_temp_local_new();
5133 t1 = tcg_temp_local_new();
5134 t2 = tcg_temp_local_new();
5135 a0 = tcg_temp_local_new();
5136 gen_op_mov_v_reg(ot, t1, reg);
5137 if (mod == 3) {
5138 rm = (modrm & 7) | REX_B(s);
5139 gen_op_mov_v_reg(ot, t0, rm);
5140 } else {
5141 gen_lea_modrm(env, s, modrm);
5142 tcg_gen_mov_tl(a0, cpu_A0);
5143 gen_op_ld_v(s, ot, t0, a0);
5144 rm = 0; /* avoid warning */
5146 label1 = gen_new_label();
5147 tcg_gen_mov_tl(t2, cpu_regs[R_EAX]);
5148 gen_extu(ot, t0);
5149 gen_extu(ot, t2);
5150 tcg_gen_brcond_tl(TCG_COND_EQ, t2, t0, label1);
5151 label2 = gen_new_label();
5152 if (mod == 3) {
5153 gen_op_mov_reg_v(ot, R_EAX, t0);
5154 tcg_gen_br(label2);
5155 gen_set_label(label1);
5156 gen_op_mov_reg_v(ot, rm, t1);
5157 } else {
5158 /* perform no-op store cycle like physical cpu; must be
5159 before changing accumulator to ensure idempotency if
5160 the store faults and the instruction is restarted */
5161 gen_op_st_v(s, ot, t0, a0);
5162 gen_op_mov_reg_v(ot, R_EAX, t0);
5163 tcg_gen_br(label2);
5164 gen_set_label(label1);
5165 gen_op_st_v(s, ot, t1, a0);
5167 gen_set_label(label2);
5168 tcg_gen_mov_tl(cpu_cc_src, t0);
5169 tcg_gen_mov_tl(cpu_cc_srcT, t2);
5170 tcg_gen_sub_tl(cpu_cc_dst, t2, t0);
5171 set_cc_op(s, CC_OP_SUBB + ot);
5172 tcg_temp_free(t0);
5173 tcg_temp_free(t1);
5174 tcg_temp_free(t2);
5175 tcg_temp_free(a0);
5177 break;
5178 case 0x1c7: /* cmpxchg8b */
5179 modrm = cpu_ldub_code(env, s->pc++);
5180 mod = (modrm >> 6) & 3;
5181 if ((mod == 3) || ((modrm & 0x38) != 0x8))
5182 goto illegal_op;
5183 #ifdef TARGET_X86_64
5184 if (dflag == MO_64) {
5185 if (!(s->cpuid_ext_features & CPUID_EXT_CX16))
5186 goto illegal_op;
5187 gen_lea_modrm(env, s, modrm);
5188 gen_helper_cmpxchg16b(cpu_env, cpu_A0);
5189 } else
5190 #endif
5192 if (!(s->cpuid_features & CPUID_CX8))
5193 goto illegal_op;
5194 gen_lea_modrm(env, s, modrm);
5195 gen_helper_cmpxchg8b(cpu_env, cpu_A0);
5197 set_cc_op(s, CC_OP_EFLAGS);
5198 break;
5200 /**************************/
5201 /* push/pop */
5202 case 0x50 ... 0x57: /* push */
5203 gen_op_mov_v_reg(MO_32, cpu_T[0], (b & 7) | REX_B(s));
5204 gen_push_v(s, cpu_T[0]);
5205 break;
5206 case 0x58 ... 0x5f: /* pop */
5207 ot = gen_pop_T0(s);
5208 /* NOTE: order is important for pop %sp */
5209 gen_pop_update(s, ot);
5210 gen_op_mov_reg_v(ot, (b & 7) | REX_B(s), cpu_T[0]);
5211 break;
5212 case 0x60: /* pusha */
5213 if (CODE64(s))
5214 goto illegal_op;
5215 gen_pusha(s);
5216 break;
5217 case 0x61: /* popa */
5218 if (CODE64(s))
5219 goto illegal_op;
5220 gen_popa(s);
5221 break;
5222 case 0x68: /* push Iv */
5223 case 0x6a:
5224 ot = mo_pushpop(s, dflag);
5225 if (b == 0x68)
5226 val = insn_get(env, s, ot);
5227 else
5228 val = (int8_t)insn_get(env, s, MO_8);
5229 tcg_gen_movi_tl(cpu_T[0], val);
5230 gen_push_v(s, cpu_T[0]);
5231 break;
5232 case 0x8f: /* pop Ev */
5233 modrm = cpu_ldub_code(env, s->pc++);
5234 mod = (modrm >> 6) & 3;
5235 ot = gen_pop_T0(s);
5236 if (mod == 3) {
5237 /* NOTE: order is important for pop %sp */
5238 gen_pop_update(s, ot);
5239 rm = (modrm & 7) | REX_B(s);
5240 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
5241 } else {
5242 /* NOTE: order is important too for MMU exceptions */
5243 s->popl_esp_hack = 1 << ot;
5244 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
5245 s->popl_esp_hack = 0;
5246 gen_pop_update(s, ot);
5248 break;
5249 case 0xc8: /* enter */
5251 int level;
5252 val = cpu_lduw_code(env, s->pc);
5253 s->pc += 2;
5254 level = cpu_ldub_code(env, s->pc++);
5255 gen_enter(s, val, level);
5257 break;
5258 case 0xc9: /* leave */
5259 /* XXX: exception not precise (ESP is updated before potential exception) */
5260 if (CODE64(s)) {
5261 gen_op_mov_v_reg(MO_64, cpu_T[0], R_EBP);
5262 gen_op_mov_reg_v(MO_64, R_ESP, cpu_T[0]);
5263 } else if (s->ss32) {
5264 gen_op_mov_v_reg(MO_32, cpu_T[0], R_EBP);
5265 gen_op_mov_reg_v(MO_32, R_ESP, cpu_T[0]);
5266 } else {
5267 gen_op_mov_v_reg(MO_16, cpu_T[0], R_EBP);
5268 gen_op_mov_reg_v(MO_16, R_ESP, cpu_T[0]);
5270 ot = gen_pop_T0(s);
5271 gen_op_mov_reg_v(ot, R_EBP, cpu_T[0]);
5272 gen_pop_update(s, ot);
5273 break;
5274 case 0x06: /* push es */
5275 case 0x0e: /* push cs */
5276 case 0x16: /* push ss */
5277 case 0x1e: /* push ds */
5278 if (CODE64(s))
5279 goto illegal_op;
5280 gen_op_movl_T0_seg(b >> 3);
5281 gen_push_v(s, cpu_T[0]);
5282 break;
5283 case 0x1a0: /* push fs */
5284 case 0x1a8: /* push gs */
5285 gen_op_movl_T0_seg((b >> 3) & 7);
5286 gen_push_v(s, cpu_T[0]);
5287 break;
5288 case 0x07: /* pop es */
5289 case 0x17: /* pop ss */
5290 case 0x1f: /* pop ds */
5291 if (CODE64(s))
5292 goto illegal_op;
5293 reg = b >> 3;
5294 ot = gen_pop_T0(s);
5295 gen_movl_seg_T0(s, reg);
5296 gen_pop_update(s, ot);
5297 if (reg == R_SS) {
5298 /* if reg == SS, inhibit interrupts/trace. */
5299 /* If several instructions disable interrupts, only the
5300 _first_ does it */
5301 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
5302 gen_helper_set_inhibit_irq(cpu_env);
5303 s->tf = 0;
5305 if (s->is_jmp) {
5306 gen_jmp_im(s->pc - s->cs_base);
5307 gen_eob(s);
5309 break;
5310 case 0x1a1: /* pop fs */
5311 case 0x1a9: /* pop gs */
5312 ot = gen_pop_T0(s);
5313 gen_movl_seg_T0(s, (b >> 3) & 7);
5314 gen_pop_update(s, ot);
5315 if (s->is_jmp) {
5316 gen_jmp_im(s->pc - s->cs_base);
5317 gen_eob(s);
5319 break;
5321 /**************************/
5322 /* mov */
5323 case 0x88:
5324 case 0x89: /* mov Gv, Ev */
5325 ot = mo_b_d(b, dflag);
5326 modrm = cpu_ldub_code(env, s->pc++);
5327 reg = ((modrm >> 3) & 7) | rex_r;
5329 /* generate a generic store */
5330 gen_ldst_modrm(env, s, modrm, ot, reg, 1);
5331 break;
5332 case 0xc6:
5333 case 0xc7: /* mov Ev, Iv */
5334 ot = mo_b_d(b, dflag);
5335 modrm = cpu_ldub_code(env, s->pc++);
5336 mod = (modrm >> 6) & 3;
5337 if (mod != 3) {
5338 s->rip_offset = insn_const_size(ot);
5339 gen_lea_modrm(env, s, modrm);
5341 val = insn_get(env, s, ot);
5342 tcg_gen_movi_tl(cpu_T[0], val);
5343 if (mod != 3) {
5344 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
5345 } else {
5346 gen_op_mov_reg_v(ot, (modrm & 7) | REX_B(s), cpu_T[0]);
5348 break;
5349 case 0x8a:
5350 case 0x8b: /* mov Ev, Gv */
5351 ot = mo_b_d(b, dflag);
5352 modrm = cpu_ldub_code(env, s->pc++);
5353 reg = ((modrm >> 3) & 7) | rex_r;
5355 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
5356 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
5357 break;
5358 case 0x8e: /* mov seg, Gv */
5359 modrm = cpu_ldub_code(env, s->pc++);
5360 reg = (modrm >> 3) & 7;
5361 if (reg >= 6 || reg == R_CS)
5362 goto illegal_op;
5363 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
5364 gen_movl_seg_T0(s, reg);
5365 if (reg == R_SS) {
5366 /* if reg == SS, inhibit interrupts/trace */
5367 /* If several instructions disable interrupts, only the
5368 _first_ does it */
5369 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
5370 gen_helper_set_inhibit_irq(cpu_env);
5371 s->tf = 0;
5373 if (s->is_jmp) {
5374 gen_jmp_im(s->pc - s->cs_base);
5375 gen_eob(s);
5377 break;
5378 case 0x8c: /* mov Gv, seg */
5379 modrm = cpu_ldub_code(env, s->pc++);
5380 reg = (modrm >> 3) & 7;
5381 mod = (modrm >> 6) & 3;
5382 if (reg >= 6)
5383 goto illegal_op;
5384 gen_op_movl_T0_seg(reg);
5385 ot = mod == 3 ? dflag : MO_16;
5386 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
5387 break;
5389 case 0x1b6: /* movzbS Gv, Eb */
5390 case 0x1b7: /* movzwS Gv, Eb */
5391 case 0x1be: /* movsbS Gv, Eb */
5392 case 0x1bf: /* movswS Gv, Eb */
5394 TCGMemOp d_ot;
5395 TCGMemOp s_ot;
5397 /* d_ot is the size of destination */
5398 d_ot = dflag;
5399 /* ot is the size of source */
5400 ot = (b & 1) + MO_8;
5401 /* s_ot is the sign+size of source */
5402 s_ot = b & 8 ? MO_SIGN | ot : ot;
5404 modrm = cpu_ldub_code(env, s->pc++);
5405 reg = ((modrm >> 3) & 7) | rex_r;
5406 mod = (modrm >> 6) & 3;
5407 rm = (modrm & 7) | REX_B(s);
5409 if (mod == 3) {
5410 gen_op_mov_v_reg(ot, cpu_T[0], rm);
5411 switch (s_ot) {
5412 case MO_UB:
5413 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
5414 break;
5415 case MO_SB:
5416 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
5417 break;
5418 case MO_UW:
5419 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
5420 break;
5421 default:
5422 case MO_SW:
5423 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5424 break;
5426 gen_op_mov_reg_v(d_ot, reg, cpu_T[0]);
5427 } else {
5428 gen_lea_modrm(env, s, modrm);
5429 gen_op_ld_v(s, s_ot, cpu_T[0], cpu_A0);
5430 gen_op_mov_reg_v(d_ot, reg, cpu_T[0]);
5433 break;
5435 case 0x8d: /* lea */
5436 ot = dflag;
5437 modrm = cpu_ldub_code(env, s->pc++);
5438 mod = (modrm >> 6) & 3;
5439 if (mod == 3)
5440 goto illegal_op;
5441 reg = ((modrm >> 3) & 7) | rex_r;
5442 /* we must ensure that no segment is added */
5443 s->override = -1;
5444 val = s->addseg;
5445 s->addseg = 0;
5446 gen_lea_modrm(env, s, modrm);
5447 s->addseg = val;
5448 gen_op_mov_reg_v(ot, reg, cpu_A0);
5449 break;
5451 case 0xa0: /* mov EAX, Ov */
5452 case 0xa1:
5453 case 0xa2: /* mov Ov, EAX */
5454 case 0xa3:
5456 target_ulong offset_addr;
5458 ot = mo_b_d(b, dflag);
5459 switch (s->aflag) {
5460 #ifdef TARGET_X86_64
5461 case MO_64:
5462 offset_addr = cpu_ldq_code(env, s->pc);
5463 s->pc += 8;
5464 break;
5465 #endif
5466 default:
5467 offset_addr = insn_get(env, s, s->aflag);
5468 break;
5470 tcg_gen_movi_tl(cpu_A0, offset_addr);
5471 gen_add_A0_ds_seg(s);
5472 if ((b & 2) == 0) {
5473 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
5474 gen_op_mov_reg_v(ot, R_EAX, cpu_T[0]);
5475 } else {
5476 gen_op_mov_v_reg(ot, cpu_T[0], R_EAX);
5477 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
5480 break;
5481 case 0xd7: /* xlat */
5482 tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EBX]);
5483 tcg_gen_ext8u_tl(cpu_T[0], cpu_regs[R_EAX]);
5484 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]);
5485 gen_extu(s->aflag, cpu_A0);
5486 gen_add_A0_ds_seg(s);
5487 gen_op_ld_v(s, MO_8, cpu_T[0], cpu_A0);
5488 gen_op_mov_reg_v(MO_8, R_EAX, cpu_T[0]);
5489 break;
5490 case 0xb0 ... 0xb7: /* mov R, Ib */
5491 val = insn_get(env, s, MO_8);
5492 tcg_gen_movi_tl(cpu_T[0], val);
5493 gen_op_mov_reg_v(MO_8, (b & 7) | REX_B(s), cpu_T[0]);
5494 break;
5495 case 0xb8 ... 0xbf: /* mov R, Iv */
5496 #ifdef TARGET_X86_64
5497 if (dflag == MO_64) {
5498 uint64_t tmp;
5499 /* 64 bit case */
5500 tmp = cpu_ldq_code(env, s->pc);
5501 s->pc += 8;
5502 reg = (b & 7) | REX_B(s);
5503 tcg_gen_movi_tl(cpu_T[0], tmp);
5504 gen_op_mov_reg_v(MO_64, reg, cpu_T[0]);
5505 } else
5506 #endif
5508 ot = dflag;
5509 val = insn_get(env, s, ot);
5510 reg = (b & 7) | REX_B(s);
5511 tcg_gen_movi_tl(cpu_T[0], val);
5512 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
5514 break;
5516 case 0x91 ... 0x97: /* xchg R, EAX */
5517 do_xchg_reg_eax:
5518 ot = dflag;
5519 reg = (b & 7) | REX_B(s);
5520 rm = R_EAX;
5521 goto do_xchg_reg;
5522 case 0x86:
5523 case 0x87: /* xchg Ev, Gv */
5524 ot = mo_b_d(b, dflag);
5525 modrm = cpu_ldub_code(env, s->pc++);
5526 reg = ((modrm >> 3) & 7) | rex_r;
5527 mod = (modrm >> 6) & 3;
5528 if (mod == 3) {
5529 rm = (modrm & 7) | REX_B(s);
5530 do_xchg_reg:
5531 gen_op_mov_v_reg(ot, cpu_T[0], reg);
5532 gen_op_mov_v_reg(ot, cpu_T[1], rm);
5533 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
5534 gen_op_mov_reg_v(ot, reg, cpu_T[1]);
5535 } else {
5536 gen_lea_modrm(env, s, modrm);
5537 gen_op_mov_v_reg(ot, cpu_T[0], reg);
5538 /* for xchg, lock is implicit */
5539 if (!(prefixes & PREFIX_LOCK))
5540 gen_helper_lock();
5541 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
5542 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
5543 if (!(prefixes & PREFIX_LOCK))
5544 gen_helper_unlock();
5545 gen_op_mov_reg_v(ot, reg, cpu_T[1]);
5547 break;
5548 case 0xc4: /* les Gv */
5549 /* In CODE64 this is VEX3; see above. */
5550 op = R_ES;
5551 goto do_lxx;
5552 case 0xc5: /* lds Gv */
5553 /* In CODE64 this is VEX2; see above. */
5554 op = R_DS;
5555 goto do_lxx;
5556 case 0x1b2: /* lss Gv */
5557 op = R_SS;
5558 goto do_lxx;
5559 case 0x1b4: /* lfs Gv */
5560 op = R_FS;
5561 goto do_lxx;
5562 case 0x1b5: /* lgs Gv */
5563 op = R_GS;
5564 do_lxx:
5565 ot = dflag != MO_16 ? MO_32 : MO_16;
5566 modrm = cpu_ldub_code(env, s->pc++);
5567 reg = ((modrm >> 3) & 7) | rex_r;
5568 mod = (modrm >> 6) & 3;
5569 if (mod == 3)
5570 goto illegal_op;
5571 gen_lea_modrm(env, s, modrm);
5572 gen_op_ld_v(s, ot, cpu_T[1], cpu_A0);
5573 gen_add_A0_im(s, 1 << ot);
5574 /* load the segment first to handle exceptions properly */
5575 gen_op_ld_v(s, MO_16, cpu_T[0], cpu_A0);
5576 gen_movl_seg_T0(s, op);
5577 /* then put the data */
5578 gen_op_mov_reg_v(ot, reg, cpu_T[1]);
5579 if (s->is_jmp) {
5580 gen_jmp_im(s->pc - s->cs_base);
5581 gen_eob(s);
5583 break;
5585 /************************/
5586 /* shifts */
5587 case 0xc0:
5588 case 0xc1:
5589 /* shift Ev,Ib */
5590 shift = 2;
5591 grp2:
5593 ot = mo_b_d(b, dflag);
5594 modrm = cpu_ldub_code(env, s->pc++);
5595 mod = (modrm >> 6) & 3;
5596 op = (modrm >> 3) & 7;
5598 if (mod != 3) {
5599 if (shift == 2) {
5600 s->rip_offset = 1;
5602 gen_lea_modrm(env, s, modrm);
5603 opreg = OR_TMP0;
5604 } else {
5605 opreg = (modrm & 7) | REX_B(s);
5608 /* simpler op */
5609 if (shift == 0) {
5610 gen_shift(s, op, ot, opreg, OR_ECX);
5611 } else {
5612 if (shift == 2) {
5613 shift = cpu_ldub_code(env, s->pc++);
5615 gen_shifti(s, op, ot, opreg, shift);
5618 break;
5619 case 0xd0:
5620 case 0xd1:
5621 /* shift Ev,1 */
5622 shift = 1;
5623 goto grp2;
5624 case 0xd2:
5625 case 0xd3:
5626 /* shift Ev,cl */
5627 shift = 0;
5628 goto grp2;
5630 case 0x1a4: /* shld imm */
5631 op = 0;
5632 shift = 1;
5633 goto do_shiftd;
5634 case 0x1a5: /* shld cl */
5635 op = 0;
5636 shift = 0;
5637 goto do_shiftd;
5638 case 0x1ac: /* shrd imm */
5639 op = 1;
5640 shift = 1;
5641 goto do_shiftd;
5642 case 0x1ad: /* shrd cl */
5643 op = 1;
5644 shift = 0;
5645 do_shiftd:
5646 ot = dflag;
5647 modrm = cpu_ldub_code(env, s->pc++);
5648 mod = (modrm >> 6) & 3;
5649 rm = (modrm & 7) | REX_B(s);
5650 reg = ((modrm >> 3) & 7) | rex_r;
5651 if (mod != 3) {
5652 gen_lea_modrm(env, s, modrm);
5653 opreg = OR_TMP0;
5654 } else {
5655 opreg = rm;
5657 gen_op_mov_v_reg(ot, cpu_T[1], reg);
5659 if (shift) {
5660 TCGv imm = tcg_const_tl(cpu_ldub_code(env, s->pc++));
5661 gen_shiftd_rm_T1(s, ot, opreg, op, imm);
5662 tcg_temp_free(imm);
5663 } else {
5664 gen_shiftd_rm_T1(s, ot, opreg, op, cpu_regs[R_ECX]);
5666 break;
5668 /************************/
5669 /* floats */
5670 case 0xd8 ... 0xdf:
5671 if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
5672 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
5673 /* XXX: what to do if illegal op ? */
5674 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
5675 break;
5677 modrm = cpu_ldub_code(env, s->pc++);
5678 mod = (modrm >> 6) & 3;
5679 rm = modrm & 7;
5680 op = ((b & 7) << 3) | ((modrm >> 3) & 7);
5681 if (mod != 3) {
5682 /* memory op */
5683 gen_lea_modrm(env, s, modrm);
5684 switch(op) {
5685 case 0x00 ... 0x07: /* fxxxs */
5686 case 0x10 ... 0x17: /* fixxxl */
5687 case 0x20 ... 0x27: /* fxxxl */
5688 case 0x30 ... 0x37: /* fixxx */
5690 int op1;
5691 op1 = op & 7;
5693 switch(op >> 4) {
5694 case 0:
5695 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5696 s->mem_index, MO_LEUL);
5697 gen_helper_flds_FT0(cpu_env, cpu_tmp2_i32);
5698 break;
5699 case 1:
5700 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5701 s->mem_index, MO_LEUL);
5702 gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
5703 break;
5704 case 2:
5705 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
5706 s->mem_index, MO_LEQ);
5707 gen_helper_fldl_FT0(cpu_env, cpu_tmp1_i64);
5708 break;
5709 case 3:
5710 default:
5711 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5712 s->mem_index, MO_LESW);
5713 gen_helper_fildl_FT0(cpu_env, cpu_tmp2_i32);
5714 break;
5717 gen_helper_fp_arith_ST0_FT0(op1);
5718 if (op1 == 3) {
5719 /* fcomp needs pop */
5720 gen_helper_fpop(cpu_env);
5723 break;
5724 case 0x08: /* flds */
5725 case 0x0a: /* fsts */
5726 case 0x0b: /* fstps */
5727 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
5728 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
5729 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
5730 switch(op & 7) {
5731 case 0:
5732 switch(op >> 4) {
5733 case 0:
5734 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5735 s->mem_index, MO_LEUL);
5736 gen_helper_flds_ST0(cpu_env, cpu_tmp2_i32);
5737 break;
5738 case 1:
5739 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5740 s->mem_index, MO_LEUL);
5741 gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
5742 break;
5743 case 2:
5744 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0,
5745 s->mem_index, MO_LEQ);
5746 gen_helper_fldl_ST0(cpu_env, cpu_tmp1_i64);
5747 break;
5748 case 3:
5749 default:
5750 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5751 s->mem_index, MO_LESW);
5752 gen_helper_fildl_ST0(cpu_env, cpu_tmp2_i32);
5753 break;
5755 break;
5756 case 1:
5757 /* XXX: the corresponding CPUID bit must be tested ! */
5758 switch(op >> 4) {
5759 case 1:
5760 gen_helper_fisttl_ST0(cpu_tmp2_i32, cpu_env);
5761 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5762 s->mem_index, MO_LEUL);
5763 break;
5764 case 2:
5765 gen_helper_fisttll_ST0(cpu_tmp1_i64, cpu_env);
5766 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
5767 s->mem_index, MO_LEQ);
5768 break;
5769 case 3:
5770 default:
5771 gen_helper_fistt_ST0(cpu_tmp2_i32, cpu_env);
5772 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5773 s->mem_index, MO_LEUW);
5774 break;
5776 gen_helper_fpop(cpu_env);
5777 break;
5778 default:
5779 switch(op >> 4) {
5780 case 0:
5781 gen_helper_fsts_ST0(cpu_tmp2_i32, cpu_env);
5782 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5783 s->mem_index, MO_LEUL);
5784 break;
5785 case 1:
5786 gen_helper_fistl_ST0(cpu_tmp2_i32, cpu_env);
5787 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5788 s->mem_index, MO_LEUL);
5789 break;
5790 case 2:
5791 gen_helper_fstl_ST0(cpu_tmp1_i64, cpu_env);
5792 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0,
5793 s->mem_index, MO_LEQ);
5794 break;
5795 case 3:
5796 default:
5797 gen_helper_fist_ST0(cpu_tmp2_i32, cpu_env);
5798 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5799 s->mem_index, MO_LEUW);
5800 break;
5802 if ((op & 7) == 3)
5803 gen_helper_fpop(cpu_env);
5804 break;
5806 break;
5807 case 0x0c: /* fldenv mem */
5808 gen_helper_fldenv(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
5809 break;
5810 case 0x0d: /* fldcw mem */
5811 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
5812 s->mem_index, MO_LEUW);
5813 gen_helper_fldcw(cpu_env, cpu_tmp2_i32);
5814 break;
5815 case 0x0e: /* fnstenv mem */
5816 gen_helper_fstenv(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
5817 break;
5818 case 0x0f: /* fnstcw mem */
5819 gen_helper_fnstcw(cpu_tmp2_i32, cpu_env);
5820 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5821 s->mem_index, MO_LEUW);
5822 break;
5823 case 0x1d: /* fldt mem */
5824 gen_helper_fldt_ST0(cpu_env, cpu_A0);
5825 break;
5826 case 0x1f: /* fstpt mem */
5827 gen_helper_fstt_ST0(cpu_env, cpu_A0);
5828 gen_helper_fpop(cpu_env);
5829 break;
5830 case 0x2c: /* frstor mem */
5831 gen_helper_frstor(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
5832 break;
5833 case 0x2e: /* fnsave mem */
5834 gen_helper_fsave(cpu_env, cpu_A0, tcg_const_i32(dflag - 1));
5835 break;
5836 case 0x2f: /* fnstsw mem */
5837 gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
5838 tcg_gen_qemu_st_i32(cpu_tmp2_i32, cpu_A0,
5839 s->mem_index, MO_LEUW);
5840 break;
5841 case 0x3c: /* fbld */
5842 gen_helper_fbld_ST0(cpu_env, cpu_A0);
5843 break;
5844 case 0x3e: /* fbstp */
5845 gen_helper_fbst_ST0(cpu_env, cpu_A0);
5846 gen_helper_fpop(cpu_env);
5847 break;
5848 case 0x3d: /* fildll */
5849 tcg_gen_qemu_ld_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
5850 gen_helper_fildll_ST0(cpu_env, cpu_tmp1_i64);
5851 break;
5852 case 0x3f: /* fistpll */
5853 gen_helper_fistll_ST0(cpu_tmp1_i64, cpu_env);
5854 tcg_gen_qemu_st_i64(cpu_tmp1_i64, cpu_A0, s->mem_index, MO_LEQ);
5855 gen_helper_fpop(cpu_env);
5856 break;
5857 default:
5858 goto illegal_op;
5860 } else {
5861 /* register float ops */
5862 opreg = rm;
5864 switch(op) {
5865 case 0x08: /* fld sti */
5866 gen_helper_fpush(cpu_env);
5867 gen_helper_fmov_ST0_STN(cpu_env,
5868 tcg_const_i32((opreg + 1) & 7));
5869 break;
5870 case 0x09: /* fxchg sti */
5871 case 0x29: /* fxchg4 sti, undocumented op */
5872 case 0x39: /* fxchg7 sti, undocumented op */
5873 gen_helper_fxchg_ST0_STN(cpu_env, tcg_const_i32(opreg));
5874 break;
5875 case 0x0a: /* grp d9/2 */
5876 switch(rm) {
5877 case 0: /* fnop */
5878 /* check exceptions (FreeBSD FPU probe) */
5879 gen_helper_fwait(cpu_env);
5880 break;
5881 default:
5882 goto illegal_op;
5884 break;
5885 case 0x0c: /* grp d9/4 */
5886 switch(rm) {
5887 case 0: /* fchs */
5888 gen_helper_fchs_ST0(cpu_env);
5889 break;
5890 case 1: /* fabs */
5891 gen_helper_fabs_ST0(cpu_env);
5892 break;
5893 case 4: /* ftst */
5894 gen_helper_fldz_FT0(cpu_env);
5895 gen_helper_fcom_ST0_FT0(cpu_env);
5896 break;
5897 case 5: /* fxam */
5898 gen_helper_fxam_ST0(cpu_env);
5899 break;
5900 default:
5901 goto illegal_op;
5903 break;
5904 case 0x0d: /* grp d9/5 */
5906 switch(rm) {
5907 case 0:
5908 gen_helper_fpush(cpu_env);
5909 gen_helper_fld1_ST0(cpu_env);
5910 break;
5911 case 1:
5912 gen_helper_fpush(cpu_env);
5913 gen_helper_fldl2t_ST0(cpu_env);
5914 break;
5915 case 2:
5916 gen_helper_fpush(cpu_env);
5917 gen_helper_fldl2e_ST0(cpu_env);
5918 break;
5919 case 3:
5920 gen_helper_fpush(cpu_env);
5921 gen_helper_fldpi_ST0(cpu_env);
5922 break;
5923 case 4:
5924 gen_helper_fpush(cpu_env);
5925 gen_helper_fldlg2_ST0(cpu_env);
5926 break;
5927 case 5:
5928 gen_helper_fpush(cpu_env);
5929 gen_helper_fldln2_ST0(cpu_env);
5930 break;
5931 case 6:
5932 gen_helper_fpush(cpu_env);
5933 gen_helper_fldz_ST0(cpu_env);
5934 break;
5935 default:
5936 goto illegal_op;
5939 break;
5940 case 0x0e: /* grp d9/6 */
5941 switch(rm) {
5942 case 0: /* f2xm1 */
5943 gen_helper_f2xm1(cpu_env);
5944 break;
5945 case 1: /* fyl2x */
5946 gen_helper_fyl2x(cpu_env);
5947 break;
5948 case 2: /* fptan */
5949 gen_helper_fptan(cpu_env);
5950 break;
5951 case 3: /* fpatan */
5952 gen_helper_fpatan(cpu_env);
5953 break;
5954 case 4: /* fxtract */
5955 gen_helper_fxtract(cpu_env);
5956 break;
5957 case 5: /* fprem1 */
5958 gen_helper_fprem1(cpu_env);
5959 break;
5960 case 6: /* fdecstp */
5961 gen_helper_fdecstp(cpu_env);
5962 break;
5963 default:
5964 case 7: /* fincstp */
5965 gen_helper_fincstp(cpu_env);
5966 break;
5968 break;
5969 case 0x0f: /* grp d9/7 */
5970 switch(rm) {
5971 case 0: /* fprem */
5972 gen_helper_fprem(cpu_env);
5973 break;
5974 case 1: /* fyl2xp1 */
5975 gen_helper_fyl2xp1(cpu_env);
5976 break;
5977 case 2: /* fsqrt */
5978 gen_helper_fsqrt(cpu_env);
5979 break;
5980 case 3: /* fsincos */
5981 gen_helper_fsincos(cpu_env);
5982 break;
5983 case 5: /* fscale */
5984 gen_helper_fscale(cpu_env);
5985 break;
5986 case 4: /* frndint */
5987 gen_helper_frndint(cpu_env);
5988 break;
5989 case 6: /* fsin */
5990 gen_helper_fsin(cpu_env);
5991 break;
5992 default:
5993 case 7: /* fcos */
5994 gen_helper_fcos(cpu_env);
5995 break;
5997 break;
5998 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
5999 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
6000 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
6002 int op1;
6004 op1 = op & 7;
6005 if (op >= 0x20) {
6006 gen_helper_fp_arith_STN_ST0(op1, opreg);
6007 if (op >= 0x30)
6008 gen_helper_fpop(cpu_env);
6009 } else {
6010 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6011 gen_helper_fp_arith_ST0_FT0(op1);
6014 break;
6015 case 0x02: /* fcom */
6016 case 0x22: /* fcom2, undocumented op */
6017 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6018 gen_helper_fcom_ST0_FT0(cpu_env);
6019 break;
6020 case 0x03: /* fcomp */
6021 case 0x23: /* fcomp3, undocumented op */
6022 case 0x32: /* fcomp5, undocumented op */
6023 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6024 gen_helper_fcom_ST0_FT0(cpu_env);
6025 gen_helper_fpop(cpu_env);
6026 break;
6027 case 0x15: /* da/5 */
6028 switch(rm) {
6029 case 1: /* fucompp */
6030 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
6031 gen_helper_fucom_ST0_FT0(cpu_env);
6032 gen_helper_fpop(cpu_env);
6033 gen_helper_fpop(cpu_env);
6034 break;
6035 default:
6036 goto illegal_op;
6038 break;
6039 case 0x1c:
6040 switch(rm) {
6041 case 0: /* feni (287 only, just do nop here) */
6042 break;
6043 case 1: /* fdisi (287 only, just do nop here) */
6044 break;
6045 case 2: /* fclex */
6046 gen_helper_fclex(cpu_env);
6047 break;
6048 case 3: /* fninit */
6049 gen_helper_fninit(cpu_env);
6050 break;
6051 case 4: /* fsetpm (287 only, just do nop here) */
6052 break;
6053 default:
6054 goto illegal_op;
6056 break;
6057 case 0x1d: /* fucomi */
6058 if (!(s->cpuid_features & CPUID_CMOV)) {
6059 goto illegal_op;
6061 gen_update_cc_op(s);
6062 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6063 gen_helper_fucomi_ST0_FT0(cpu_env);
6064 set_cc_op(s, CC_OP_EFLAGS);
6065 break;
6066 case 0x1e: /* fcomi */
6067 if (!(s->cpuid_features & CPUID_CMOV)) {
6068 goto illegal_op;
6070 gen_update_cc_op(s);
6071 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6072 gen_helper_fcomi_ST0_FT0(cpu_env);
6073 set_cc_op(s, CC_OP_EFLAGS);
6074 break;
6075 case 0x28: /* ffree sti */
6076 gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
6077 break;
6078 case 0x2a: /* fst sti */
6079 gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
6080 break;
6081 case 0x2b: /* fstp sti */
6082 case 0x0b: /* fstp1 sti, undocumented op */
6083 case 0x3a: /* fstp8 sti, undocumented op */
6084 case 0x3b: /* fstp9 sti, undocumented op */
6085 gen_helper_fmov_STN_ST0(cpu_env, tcg_const_i32(opreg));
6086 gen_helper_fpop(cpu_env);
6087 break;
6088 case 0x2c: /* fucom st(i) */
6089 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6090 gen_helper_fucom_ST0_FT0(cpu_env);
6091 break;
6092 case 0x2d: /* fucomp st(i) */
6093 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6094 gen_helper_fucom_ST0_FT0(cpu_env);
6095 gen_helper_fpop(cpu_env);
6096 break;
6097 case 0x33: /* de/3 */
6098 switch(rm) {
6099 case 1: /* fcompp */
6100 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(1));
6101 gen_helper_fcom_ST0_FT0(cpu_env);
6102 gen_helper_fpop(cpu_env);
6103 gen_helper_fpop(cpu_env);
6104 break;
6105 default:
6106 goto illegal_op;
6108 break;
6109 case 0x38: /* ffreep sti, undocumented op */
6110 gen_helper_ffree_STN(cpu_env, tcg_const_i32(opreg));
6111 gen_helper_fpop(cpu_env);
6112 break;
6113 case 0x3c: /* df/4 */
6114 switch(rm) {
6115 case 0:
6116 gen_helper_fnstsw(cpu_tmp2_i32, cpu_env);
6117 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
6118 gen_op_mov_reg_v(MO_16, R_EAX, cpu_T[0]);
6119 break;
6120 default:
6121 goto illegal_op;
6123 break;
6124 case 0x3d: /* fucomip */
6125 if (!(s->cpuid_features & CPUID_CMOV)) {
6126 goto illegal_op;
6128 gen_update_cc_op(s);
6129 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6130 gen_helper_fucomi_ST0_FT0(cpu_env);
6131 gen_helper_fpop(cpu_env);
6132 set_cc_op(s, CC_OP_EFLAGS);
6133 break;
6134 case 0x3e: /* fcomip */
6135 if (!(s->cpuid_features & CPUID_CMOV)) {
6136 goto illegal_op;
6138 gen_update_cc_op(s);
6139 gen_helper_fmov_FT0_STN(cpu_env, tcg_const_i32(opreg));
6140 gen_helper_fcomi_ST0_FT0(cpu_env);
6141 gen_helper_fpop(cpu_env);
6142 set_cc_op(s, CC_OP_EFLAGS);
6143 break;
6144 case 0x10 ... 0x13: /* fcmovxx */
6145 case 0x18 ... 0x1b:
6147 int op1;
6148 TCGLabel *l1;
6149 static const uint8_t fcmov_cc[8] = {
6150 (JCC_B << 1),
6151 (JCC_Z << 1),
6152 (JCC_BE << 1),
6153 (JCC_P << 1),
6156 if (!(s->cpuid_features & CPUID_CMOV)) {
6157 goto illegal_op;
6159 op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
6160 l1 = gen_new_label();
6161 gen_jcc1_noeob(s, op1, l1);
6162 gen_helper_fmov_ST0_STN(cpu_env, tcg_const_i32(opreg));
6163 gen_set_label(l1);
6165 break;
6166 default:
6167 goto illegal_op;
6170 break;
6171 /************************/
6172 /* string ops */
6174 case 0xa4: /* movsS */
6175 case 0xa5:
6176 ot = mo_b_d(b, dflag);
6177 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6178 gen_repz_movs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6179 } else {
6180 gen_movs(s, ot);
6182 break;
6184 case 0xaa: /* stosS */
6185 case 0xab:
6186 ot = mo_b_d(b, dflag);
6187 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6188 gen_repz_stos(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6189 } else {
6190 gen_stos(s, ot);
6192 break;
6193 case 0xac: /* lodsS */
6194 case 0xad:
6195 ot = mo_b_d(b, dflag);
6196 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6197 gen_repz_lods(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6198 } else {
6199 gen_lods(s, ot);
6201 break;
6202 case 0xae: /* scasS */
6203 case 0xaf:
6204 ot = mo_b_d(b, dflag);
6205 if (prefixes & PREFIX_REPNZ) {
6206 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6207 } else if (prefixes & PREFIX_REPZ) {
6208 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6209 } else {
6210 gen_scas(s, ot);
6212 break;
6214 case 0xa6: /* cmpsS */
6215 case 0xa7:
6216 ot = mo_b_d(b, dflag);
6217 if (prefixes & PREFIX_REPNZ) {
6218 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6219 } else if (prefixes & PREFIX_REPZ) {
6220 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6221 } else {
6222 gen_cmps(s, ot);
6224 break;
6225 case 0x6c: /* insS */
6226 case 0x6d:
6227 ot = mo_b_d32(b, dflag);
6228 tcg_gen_ext16u_tl(cpu_T[0], cpu_regs[R_EDX]);
6229 gen_check_io(s, ot, pc_start - s->cs_base,
6230 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4);
6231 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6232 gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6233 } else {
6234 gen_ins(s, ot);
6235 if (s->tb->cflags & CF_USE_ICOUNT) {
6236 gen_jmp(s, s->pc - s->cs_base);
6239 break;
6240 case 0x6e: /* outsS */
6241 case 0x6f:
6242 ot = mo_b_d32(b, dflag);
6243 tcg_gen_ext16u_tl(cpu_T[0], cpu_regs[R_EDX]);
6244 gen_check_io(s, ot, pc_start - s->cs_base,
6245 svm_is_rep(prefixes) | 4);
6246 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6247 gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6248 } else {
6249 gen_outs(s, ot);
6250 if (s->tb->cflags & CF_USE_ICOUNT) {
6251 gen_jmp(s, s->pc - s->cs_base);
6254 break;
6256 /************************/
6257 /* port I/O */
6259 case 0xe4:
6260 case 0xe5:
6261 ot = mo_b_d32(b, dflag);
6262 val = cpu_ldub_code(env, s->pc++);
6263 tcg_gen_movi_tl(cpu_T[0], val);
6264 gen_check_io(s, ot, pc_start - s->cs_base,
6265 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
6266 if (s->tb->cflags & CF_USE_ICOUNT) {
6267 gen_io_start();
6269 tcg_gen_movi_i32(cpu_tmp2_i32, val);
6270 gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
6271 gen_op_mov_reg_v(ot, R_EAX, cpu_T[1]);
6272 if (s->tb->cflags & CF_USE_ICOUNT) {
6273 gen_io_end();
6274 gen_jmp(s, s->pc - s->cs_base);
6276 break;
6277 case 0xe6:
6278 case 0xe7:
6279 ot = mo_b_d32(b, dflag);
6280 val = cpu_ldub_code(env, s->pc++);
6281 tcg_gen_movi_tl(cpu_T[0], val);
6282 gen_check_io(s, ot, pc_start - s->cs_base,
6283 svm_is_rep(prefixes));
6284 gen_op_mov_v_reg(ot, cpu_T[1], R_EAX);
6286 if (s->tb->cflags & CF_USE_ICOUNT) {
6287 gen_io_start();
6289 tcg_gen_movi_i32(cpu_tmp2_i32, val);
6290 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
6291 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
6292 if (s->tb->cflags & CF_USE_ICOUNT) {
6293 gen_io_end();
6294 gen_jmp(s, s->pc - s->cs_base);
6296 break;
6297 case 0xec:
6298 case 0xed:
6299 ot = mo_b_d32(b, dflag);
6300 tcg_gen_ext16u_tl(cpu_T[0], cpu_regs[R_EDX]);
6301 gen_check_io(s, ot, pc_start - s->cs_base,
6302 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
6303 if (s->tb->cflags & CF_USE_ICOUNT) {
6304 gen_io_start();
6306 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6307 gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
6308 gen_op_mov_reg_v(ot, R_EAX, cpu_T[1]);
6309 if (s->tb->cflags & CF_USE_ICOUNT) {
6310 gen_io_end();
6311 gen_jmp(s, s->pc - s->cs_base);
6313 break;
6314 case 0xee:
6315 case 0xef:
6316 ot = mo_b_d32(b, dflag);
6317 tcg_gen_ext16u_tl(cpu_T[0], cpu_regs[R_EDX]);
6318 gen_check_io(s, ot, pc_start - s->cs_base,
6319 svm_is_rep(prefixes));
6320 gen_op_mov_v_reg(ot, cpu_T[1], R_EAX);
6322 if (s->tb->cflags & CF_USE_ICOUNT) {
6323 gen_io_start();
6325 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6326 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
6327 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
6328 if (s->tb->cflags & CF_USE_ICOUNT) {
6329 gen_io_end();
6330 gen_jmp(s, s->pc - s->cs_base);
6332 break;
6334 /************************/
6335 /* control */
6336 case 0xc2: /* ret im */
6337 val = cpu_ldsw_code(env, s->pc);
6338 s->pc += 2;
6339 ot = gen_pop_T0(s);
6340 gen_stack_update(s, val + (1 << ot));
6341 /* Note that gen_pop_T0 uses a zero-extending load. */
6342 gen_op_jmp_v(cpu_T[0]);
6343 gen_eob(s);
6344 break;
6345 case 0xc3: /* ret */
6346 ot = gen_pop_T0(s);
6347 gen_pop_update(s, ot);
6348 /* Note that gen_pop_T0 uses a zero-extending load. */
6349 gen_op_jmp_v(cpu_T[0]);
6350 gen_eob(s);
6351 break;
6352 case 0xca: /* lret im */
6353 val = cpu_ldsw_code(env, s->pc);
6354 s->pc += 2;
6355 do_lret:
6356 if (s->pe && !s->vm86) {
6357 gen_update_cc_op(s);
6358 gen_jmp_im(pc_start - s->cs_base);
6359 gen_helper_lret_protected(cpu_env, tcg_const_i32(dflag - 1),
6360 tcg_const_i32(val));
6361 } else {
6362 gen_stack_A0(s);
6363 /* pop offset */
6364 gen_op_ld_v(s, dflag, cpu_T[0], cpu_A0);
6365 /* NOTE: keeping EIP updated is not a problem in case of
6366 exception */
6367 gen_op_jmp_v(cpu_T[0]);
6368 /* pop selector */
6369 gen_op_addl_A0_im(1 << dflag);
6370 gen_op_ld_v(s, dflag, cpu_T[0], cpu_A0);
6371 gen_op_movl_seg_T0_vm(R_CS);
6372 /* add stack offset */
6373 gen_stack_update(s, val + (2 << dflag));
6375 gen_eob(s);
6376 break;
6377 case 0xcb: /* lret */
6378 val = 0;
6379 goto do_lret;
6380 case 0xcf: /* iret */
6381 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET);
6382 if (!s->pe) {
6383 /* real mode */
6384 gen_helper_iret_real(cpu_env, tcg_const_i32(dflag - 1));
6385 set_cc_op(s, CC_OP_EFLAGS);
6386 } else if (s->vm86) {
6387 if (s->iopl != 3) {
6388 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6389 } else {
6390 gen_helper_iret_real(cpu_env, tcg_const_i32(dflag - 1));
6391 set_cc_op(s, CC_OP_EFLAGS);
6393 } else {
6394 gen_helper_iret_protected(cpu_env, tcg_const_i32(dflag - 1),
6395 tcg_const_i32(s->pc - s->cs_base));
6396 set_cc_op(s, CC_OP_EFLAGS);
6398 gen_eob(s);
6399 break;
6400 case 0xe8: /* call im */
6402 if (dflag != MO_16) {
6403 tval = (int32_t)insn_get(env, s, MO_32);
6404 } else {
6405 tval = (int16_t)insn_get(env, s, MO_16);
6407 next_eip = s->pc - s->cs_base;
6408 tval += next_eip;
6409 if (dflag == MO_16) {
6410 tval &= 0xffff;
6411 } else if (!CODE64(s)) {
6412 tval &= 0xffffffff;
6414 tcg_gen_movi_tl(cpu_T[0], next_eip);
6415 gen_push_v(s, cpu_T[0]);
6416 gen_jmp(s, tval);
6418 break;
6419 case 0x9a: /* lcall im */
6421 unsigned int selector, offset;
6423 if (CODE64(s))
6424 goto illegal_op;
6425 ot = dflag;
6426 offset = insn_get(env, s, ot);
6427 selector = insn_get(env, s, MO_16);
6429 tcg_gen_movi_tl(cpu_T[0], selector);
6430 tcg_gen_movi_tl(cpu_T[1], offset);
6432 goto do_lcall;
6433 case 0xe9: /* jmp im */
6434 if (dflag != MO_16) {
6435 tval = (int32_t)insn_get(env, s, MO_32);
6436 } else {
6437 tval = (int16_t)insn_get(env, s, MO_16);
6439 tval += s->pc - s->cs_base;
6440 if (dflag == MO_16) {
6441 tval &= 0xffff;
6442 } else if (!CODE64(s)) {
6443 tval &= 0xffffffff;
6445 gen_jmp(s, tval);
6446 break;
6447 case 0xea: /* ljmp im */
6449 unsigned int selector, offset;
6451 if (CODE64(s))
6452 goto illegal_op;
6453 ot = dflag;
6454 offset = insn_get(env, s, ot);
6455 selector = insn_get(env, s, MO_16);
6457 tcg_gen_movi_tl(cpu_T[0], selector);
6458 tcg_gen_movi_tl(cpu_T[1], offset);
6460 goto do_ljmp;
6461 case 0xeb: /* jmp Jb */
6462 tval = (int8_t)insn_get(env, s, MO_8);
6463 tval += s->pc - s->cs_base;
6464 if (dflag == MO_16) {
6465 tval &= 0xffff;
6467 gen_jmp(s, tval);
6468 break;
6469 case 0x70 ... 0x7f: /* jcc Jb */
6470 tval = (int8_t)insn_get(env, s, MO_8);
6471 goto do_jcc;
6472 case 0x180 ... 0x18f: /* jcc Jv */
6473 if (dflag != MO_16) {
6474 tval = (int32_t)insn_get(env, s, MO_32);
6475 } else {
6476 tval = (int16_t)insn_get(env, s, MO_16);
6478 do_jcc:
6479 next_eip = s->pc - s->cs_base;
6480 tval += next_eip;
6481 if (dflag == MO_16) {
6482 tval &= 0xffff;
6484 gen_jcc(s, b, tval, next_eip);
6485 break;
6487 case 0x190 ... 0x19f: /* setcc Gv */
6488 modrm = cpu_ldub_code(env, s->pc++);
6489 gen_setcc1(s, b, cpu_T[0]);
6490 gen_ldst_modrm(env, s, modrm, MO_8, OR_TMP0, 1);
6491 break;
6492 case 0x140 ... 0x14f: /* cmov Gv, Ev */
6493 if (!(s->cpuid_features & CPUID_CMOV)) {
6494 goto illegal_op;
6496 ot = dflag;
6497 modrm = cpu_ldub_code(env, s->pc++);
6498 reg = ((modrm >> 3) & 7) | rex_r;
6499 gen_cmovcc1(env, s, ot, b, modrm, reg);
6500 break;
6502 /************************/
6503 /* flags */
6504 case 0x9c: /* pushf */
6505 gen_svm_check_intercept(s, pc_start, SVM_EXIT_PUSHF);
6506 if (s->vm86 && s->iopl != 3) {
6507 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6508 } else {
6509 gen_update_cc_op(s);
6510 gen_helper_read_eflags(cpu_T[0], cpu_env);
6511 gen_push_v(s, cpu_T[0]);
6513 break;
6514 case 0x9d: /* popf */
6515 gen_svm_check_intercept(s, pc_start, SVM_EXIT_POPF);
6516 if (s->vm86 && s->iopl != 3) {
6517 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6518 } else {
6519 ot = gen_pop_T0(s);
6520 if (s->cpl == 0) {
6521 if (dflag != MO_16) {
6522 gen_helper_write_eflags(cpu_env, cpu_T[0],
6523 tcg_const_i32((TF_MASK | AC_MASK |
6524 ID_MASK | NT_MASK |
6525 IF_MASK |
6526 IOPL_MASK)));
6527 } else {
6528 gen_helper_write_eflags(cpu_env, cpu_T[0],
6529 tcg_const_i32((TF_MASK | AC_MASK |
6530 ID_MASK | NT_MASK |
6531 IF_MASK | IOPL_MASK)
6532 & 0xffff));
6534 } else {
6535 if (s->cpl <= s->iopl) {
6536 if (dflag != MO_16) {
6537 gen_helper_write_eflags(cpu_env, cpu_T[0],
6538 tcg_const_i32((TF_MASK |
6539 AC_MASK |
6540 ID_MASK |
6541 NT_MASK |
6542 IF_MASK)));
6543 } else {
6544 gen_helper_write_eflags(cpu_env, cpu_T[0],
6545 tcg_const_i32((TF_MASK |
6546 AC_MASK |
6547 ID_MASK |
6548 NT_MASK |
6549 IF_MASK)
6550 & 0xffff));
6552 } else {
6553 if (dflag != MO_16) {
6554 gen_helper_write_eflags(cpu_env, cpu_T[0],
6555 tcg_const_i32((TF_MASK | AC_MASK |
6556 ID_MASK | NT_MASK)));
6557 } else {
6558 gen_helper_write_eflags(cpu_env, cpu_T[0],
6559 tcg_const_i32((TF_MASK | AC_MASK |
6560 ID_MASK | NT_MASK)
6561 & 0xffff));
6565 gen_pop_update(s, ot);
6566 set_cc_op(s, CC_OP_EFLAGS);
6567 /* abort translation because TF/AC flag may change */
6568 gen_jmp_im(s->pc - s->cs_base);
6569 gen_eob(s);
6571 break;
6572 case 0x9e: /* sahf */
6573 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
6574 goto illegal_op;
6575 gen_op_mov_v_reg(MO_8, cpu_T[0], R_AH);
6576 gen_compute_eflags(s);
6577 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O);
6578 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], CC_S | CC_Z | CC_A | CC_P | CC_C);
6579 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T[0]);
6580 break;
6581 case 0x9f: /* lahf */
6582 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
6583 goto illegal_op;
6584 gen_compute_eflags(s);
6585 /* Note: gen_compute_eflags() only gives the condition codes */
6586 tcg_gen_ori_tl(cpu_T[0], cpu_cc_src, 0x02);
6587 gen_op_mov_reg_v(MO_8, R_AH, cpu_T[0]);
6588 break;
6589 case 0xf5: /* cmc */
6590 gen_compute_eflags(s);
6591 tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C);
6592 break;
6593 case 0xf8: /* clc */
6594 gen_compute_eflags(s);
6595 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C);
6596 break;
6597 case 0xf9: /* stc */
6598 gen_compute_eflags(s);
6599 tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C);
6600 break;
6601 case 0xfc: /* cld */
6602 tcg_gen_movi_i32(cpu_tmp2_i32, 1);
6603 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
6604 break;
6605 case 0xfd: /* std */
6606 tcg_gen_movi_i32(cpu_tmp2_i32, -1);
6607 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUX86State, df));
6608 break;
6610 /************************/
6611 /* bit operations */
6612 case 0x1ba: /* bt/bts/btr/btc Gv, im */
6613 ot = dflag;
6614 modrm = cpu_ldub_code(env, s->pc++);
6615 op = (modrm >> 3) & 7;
6616 mod = (modrm >> 6) & 3;
6617 rm = (modrm & 7) | REX_B(s);
6618 if (mod != 3) {
6619 s->rip_offset = 1;
6620 gen_lea_modrm(env, s, modrm);
6621 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
6622 } else {
6623 gen_op_mov_v_reg(ot, cpu_T[0], rm);
6625 /* load shift */
6626 val = cpu_ldub_code(env, s->pc++);
6627 tcg_gen_movi_tl(cpu_T[1], val);
6628 if (op < 4)
6629 goto illegal_op;
6630 op -= 4;
6631 goto bt_op;
6632 case 0x1a3: /* bt Gv, Ev */
6633 op = 0;
6634 goto do_btx;
6635 case 0x1ab: /* bts */
6636 op = 1;
6637 goto do_btx;
6638 case 0x1b3: /* btr */
6639 op = 2;
6640 goto do_btx;
6641 case 0x1bb: /* btc */
6642 op = 3;
6643 do_btx:
6644 ot = dflag;
6645 modrm = cpu_ldub_code(env, s->pc++);
6646 reg = ((modrm >> 3) & 7) | rex_r;
6647 mod = (modrm >> 6) & 3;
6648 rm = (modrm & 7) | REX_B(s);
6649 gen_op_mov_v_reg(MO_32, cpu_T[1], reg);
6650 if (mod != 3) {
6651 gen_lea_modrm(env, s, modrm);
6652 /* specific case: we need to add a displacement */
6653 gen_exts(ot, cpu_T[1]);
6654 tcg_gen_sari_tl(cpu_tmp0, cpu_T[1], 3 + ot);
6655 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, ot);
6656 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
6657 gen_op_ld_v(s, ot, cpu_T[0], cpu_A0);
6658 } else {
6659 gen_op_mov_v_reg(ot, cpu_T[0], rm);
6661 bt_op:
6662 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], (1 << (3 + ot)) - 1);
6663 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6664 switch(op) {
6665 case 0:
6666 break;
6667 case 1:
6668 tcg_gen_movi_tl(cpu_tmp0, 1);
6669 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6670 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6671 break;
6672 case 2:
6673 tcg_gen_movi_tl(cpu_tmp0, 1);
6674 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6675 tcg_gen_andc_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6676 break;
6677 default:
6678 case 3:
6679 tcg_gen_movi_tl(cpu_tmp0, 1);
6680 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6681 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6682 break;
6684 if (op != 0) {
6685 if (mod != 3) {
6686 gen_op_st_v(s, ot, cpu_T[0], cpu_A0);
6687 } else {
6688 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
6692 /* Delay all CC updates until after the store above. Note that
6693 C is the result of the test, Z is unchanged, and the others
6694 are all undefined. */
6695 switch (s->cc_op) {
6696 case CC_OP_MULB ... CC_OP_MULQ:
6697 case CC_OP_ADDB ... CC_OP_ADDQ:
6698 case CC_OP_ADCB ... CC_OP_ADCQ:
6699 case CC_OP_SUBB ... CC_OP_SUBQ:
6700 case CC_OP_SBBB ... CC_OP_SBBQ:
6701 case CC_OP_LOGICB ... CC_OP_LOGICQ:
6702 case CC_OP_INCB ... CC_OP_INCQ:
6703 case CC_OP_DECB ... CC_OP_DECQ:
6704 case CC_OP_SHLB ... CC_OP_SHLQ:
6705 case CC_OP_SARB ... CC_OP_SARQ:
6706 case CC_OP_BMILGB ... CC_OP_BMILGQ:
6707 /* Z was going to be computed from the non-zero status of CC_DST.
6708 We can get that same Z value (and the new C value) by leaving
6709 CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the
6710 same width. */
6711 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
6712 set_cc_op(s, ((s->cc_op - CC_OP_MULB) & 3) + CC_OP_SARB);
6713 break;
6714 default:
6715 /* Otherwise, generate EFLAGS and replace the C bit. */
6716 gen_compute_eflags(s);
6717 tcg_gen_deposit_tl(cpu_cc_src, cpu_cc_src, cpu_tmp4,
6718 ctz32(CC_C), 1);
6719 break;
6721 break;
6722 case 0x1bc: /* bsf / tzcnt */
6723 case 0x1bd: /* bsr / lzcnt */
6724 ot = dflag;
6725 modrm = cpu_ldub_code(env, s->pc++);
6726 reg = ((modrm >> 3) & 7) | rex_r;
6727 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
6728 gen_extu(ot, cpu_T[0]);
6730 /* Note that lzcnt and tzcnt are in different extensions. */
6731 if ((prefixes & PREFIX_REPZ)
6732 && (b & 1
6733 ? s->cpuid_ext3_features & CPUID_EXT3_ABM
6734 : s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1)) {
6735 int size = 8 << ot;
6736 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
6737 if (b & 1) {
6738 /* For lzcnt, reduce the target_ulong result by the
6739 number of zeros that we expect to find at the top. */
6740 gen_helper_clz(cpu_T[0], cpu_T[0]);
6741 tcg_gen_subi_tl(cpu_T[0], cpu_T[0], TARGET_LONG_BITS - size);
6742 } else {
6743 /* For tzcnt, a zero input must return the operand size:
6744 force all bits outside the operand size to 1. */
6745 target_ulong mask = (target_ulong)-2 << (size - 1);
6746 tcg_gen_ori_tl(cpu_T[0], cpu_T[0], mask);
6747 gen_helper_ctz(cpu_T[0], cpu_T[0]);
6749 /* For lzcnt/tzcnt, C and Z bits are defined and are
6750 related to the result. */
6751 gen_op_update1_cc();
6752 set_cc_op(s, CC_OP_BMILGB + ot);
6753 } else {
6754 /* For bsr/bsf, only the Z bit is defined and it is related
6755 to the input and not the result. */
6756 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
6757 set_cc_op(s, CC_OP_LOGICB + ot);
6758 if (b & 1) {
6759 /* For bsr, return the bit index of the first 1 bit,
6760 not the count of leading zeros. */
6761 gen_helper_clz(cpu_T[0], cpu_T[0]);
6762 tcg_gen_xori_tl(cpu_T[0], cpu_T[0], TARGET_LONG_BITS - 1);
6763 } else {
6764 gen_helper_ctz(cpu_T[0], cpu_T[0]);
6766 /* ??? The manual says that the output is undefined when the
6767 input is zero, but real hardware leaves it unchanged, and
6768 real programs appear to depend on that. */
6769 tcg_gen_movi_tl(cpu_tmp0, 0);
6770 tcg_gen_movcond_tl(TCG_COND_EQ, cpu_T[0], cpu_cc_dst, cpu_tmp0,
6771 cpu_regs[reg], cpu_T[0]);
6773 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
6774 break;
6775 /************************/
6776 /* bcd */
6777 case 0x27: /* daa */
6778 if (CODE64(s))
6779 goto illegal_op;
6780 gen_update_cc_op(s);
6781 gen_helper_daa(cpu_env);
6782 set_cc_op(s, CC_OP_EFLAGS);
6783 break;
6784 case 0x2f: /* das */
6785 if (CODE64(s))
6786 goto illegal_op;
6787 gen_update_cc_op(s);
6788 gen_helper_das(cpu_env);
6789 set_cc_op(s, CC_OP_EFLAGS);
6790 break;
6791 case 0x37: /* aaa */
6792 if (CODE64(s))
6793 goto illegal_op;
6794 gen_update_cc_op(s);
6795 gen_helper_aaa(cpu_env);
6796 set_cc_op(s, CC_OP_EFLAGS);
6797 break;
6798 case 0x3f: /* aas */
6799 if (CODE64(s))
6800 goto illegal_op;
6801 gen_update_cc_op(s);
6802 gen_helper_aas(cpu_env);
6803 set_cc_op(s, CC_OP_EFLAGS);
6804 break;
6805 case 0xd4: /* aam */
6806 if (CODE64(s))
6807 goto illegal_op;
6808 val = cpu_ldub_code(env, s->pc++);
6809 if (val == 0) {
6810 gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base);
6811 } else {
6812 gen_helper_aam(cpu_env, tcg_const_i32(val));
6813 set_cc_op(s, CC_OP_LOGICB);
6815 break;
6816 case 0xd5: /* aad */
6817 if (CODE64(s))
6818 goto illegal_op;
6819 val = cpu_ldub_code(env, s->pc++);
6820 gen_helper_aad(cpu_env, tcg_const_i32(val));
6821 set_cc_op(s, CC_OP_LOGICB);
6822 break;
6823 /************************/
6824 /* misc */
6825 case 0x90: /* nop */
6826 /* XXX: correct lock test for all insn */
6827 if (prefixes & PREFIX_LOCK) {
6828 goto illegal_op;
6830 /* If REX_B is set, then this is xchg eax, r8d, not a nop. */
6831 if (REX_B(s)) {
6832 goto do_xchg_reg_eax;
6834 if (prefixes & PREFIX_REPZ) {
6835 gen_update_cc_op(s);
6836 gen_jmp_im(pc_start - s->cs_base);
6837 gen_helper_pause(cpu_env, tcg_const_i32(s->pc - pc_start));
6838 s->is_jmp = DISAS_TB_JUMP;
6840 break;
6841 case 0x9b: /* fwait */
6842 if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) ==
6843 (HF_MP_MASK | HF_TS_MASK)) {
6844 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
6845 } else {
6846 gen_helper_fwait(cpu_env);
6848 break;
6849 case 0xcc: /* int3 */
6850 gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base);
6851 break;
6852 case 0xcd: /* int N */
6853 val = cpu_ldub_code(env, s->pc++);
6854 if (s->vm86 && s->iopl != 3) {
6855 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6856 } else {
6857 gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base);
6859 break;
6860 case 0xce: /* into */
6861 if (CODE64(s))
6862 goto illegal_op;
6863 gen_update_cc_op(s);
6864 gen_jmp_im(pc_start - s->cs_base);
6865 gen_helper_into(cpu_env, tcg_const_i32(s->pc - pc_start));
6866 break;
6867 #ifdef WANT_ICEBP
6868 case 0xf1: /* icebp (undocumented, exits to external debugger) */
6869 gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP);
6870 #if 1
6871 gen_debug(s, pc_start - s->cs_base);
6872 #else
6873 /* start debug */
6874 tb_flush(CPU(x86_env_get_cpu(env)));
6875 qemu_set_log(CPU_LOG_INT | CPU_LOG_TB_IN_ASM);
6876 #endif
6877 break;
6878 #endif
6879 case 0xfa: /* cli */
6880 if (!s->vm86) {
6881 if (s->cpl <= s->iopl) {
6882 gen_helper_cli(cpu_env);
6883 } else {
6884 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6886 } else {
6887 if (s->iopl == 3) {
6888 gen_helper_cli(cpu_env);
6889 } else {
6890 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6893 break;
6894 case 0xfb: /* sti */
6895 if (!s->vm86) {
6896 if (s->cpl <= s->iopl) {
6897 gen_sti:
6898 gen_helper_sti(cpu_env);
6899 /* interruptions are enabled only the first insn after sti */
6900 /* If several instructions disable interrupts, only the
6901 _first_ does it */
6902 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
6903 gen_helper_set_inhibit_irq(cpu_env);
6904 /* give a chance to handle pending irqs */
6905 gen_jmp_im(s->pc - s->cs_base);
6906 gen_eob(s);
6907 } else {
6908 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6910 } else {
6911 if (s->iopl == 3) {
6912 goto gen_sti;
6913 } else {
6914 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6917 break;
6918 case 0x62: /* bound */
6919 if (CODE64(s))
6920 goto illegal_op;
6921 ot = dflag;
6922 modrm = cpu_ldub_code(env, s->pc++);
6923 reg = (modrm >> 3) & 7;
6924 mod = (modrm >> 6) & 3;
6925 if (mod == 3)
6926 goto illegal_op;
6927 gen_op_mov_v_reg(ot, cpu_T[0], reg);
6928 gen_lea_modrm(env, s, modrm);
6929 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6930 if (ot == MO_16) {
6931 gen_helper_boundw(cpu_env, cpu_A0, cpu_tmp2_i32);
6932 } else {
6933 gen_helper_boundl(cpu_env, cpu_A0, cpu_tmp2_i32);
6935 break;
6936 case 0x1c8 ... 0x1cf: /* bswap reg */
6937 reg = (b & 7) | REX_B(s);
6938 #ifdef TARGET_X86_64
6939 if (dflag == MO_64) {
6940 gen_op_mov_v_reg(MO_64, cpu_T[0], reg);
6941 tcg_gen_bswap64_i64(cpu_T[0], cpu_T[0]);
6942 gen_op_mov_reg_v(MO_64, reg, cpu_T[0]);
6943 } else
6944 #endif
6946 gen_op_mov_v_reg(MO_32, cpu_T[0], reg);
6947 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
6948 tcg_gen_bswap32_tl(cpu_T[0], cpu_T[0]);
6949 gen_op_mov_reg_v(MO_32, reg, cpu_T[0]);
6951 break;
6952 case 0xd6: /* salc */
6953 if (CODE64(s))
6954 goto illegal_op;
6955 gen_compute_eflags_c(s, cpu_T[0]);
6956 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
6957 gen_op_mov_reg_v(MO_8, R_EAX, cpu_T[0]);
6958 break;
6959 case 0xe0: /* loopnz */
6960 case 0xe1: /* loopz */
6961 case 0xe2: /* loop */
6962 case 0xe3: /* jecxz */
6964 TCGLabel *l1, *l2, *l3;
6966 tval = (int8_t)insn_get(env, s, MO_8);
6967 next_eip = s->pc - s->cs_base;
6968 tval += next_eip;
6969 if (dflag == MO_16) {
6970 tval &= 0xffff;
6973 l1 = gen_new_label();
6974 l2 = gen_new_label();
6975 l3 = gen_new_label();
6976 b &= 3;
6977 switch(b) {
6978 case 0: /* loopnz */
6979 case 1: /* loopz */
6980 gen_op_add_reg_im(s->aflag, R_ECX, -1);
6981 gen_op_jz_ecx(s->aflag, l3);
6982 gen_jcc1(s, (JCC_Z << 1) | (b ^ 1), l1);
6983 break;
6984 case 2: /* loop */
6985 gen_op_add_reg_im(s->aflag, R_ECX, -1);
6986 gen_op_jnz_ecx(s->aflag, l1);
6987 break;
6988 default:
6989 case 3: /* jcxz */
6990 gen_op_jz_ecx(s->aflag, l1);
6991 break;
6994 gen_set_label(l3);
6995 gen_jmp_im(next_eip);
6996 tcg_gen_br(l2);
6998 gen_set_label(l1);
6999 gen_jmp_im(tval);
7000 gen_set_label(l2);
7001 gen_eob(s);
7003 break;
7004 case 0x130: /* wrmsr */
7005 case 0x132: /* rdmsr */
7006 if (s->cpl != 0) {
7007 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7008 } else {
7009 gen_update_cc_op(s);
7010 gen_jmp_im(pc_start - s->cs_base);
7011 if (b & 2) {
7012 gen_helper_rdmsr(cpu_env);
7013 } else {
7014 gen_helper_wrmsr(cpu_env);
7017 break;
7018 case 0x131: /* rdtsc */
7019 gen_update_cc_op(s);
7020 gen_jmp_im(pc_start - s->cs_base);
7021 if (s->tb->cflags & CF_USE_ICOUNT) {
7022 gen_io_start();
7024 gen_helper_rdtsc(cpu_env);
7025 if (s->tb->cflags & CF_USE_ICOUNT) {
7026 gen_io_end();
7027 gen_jmp(s, s->pc - s->cs_base);
7029 break;
7030 case 0x133: /* rdpmc */
7031 gen_update_cc_op(s);
7032 gen_jmp_im(pc_start - s->cs_base);
7033 gen_helper_rdpmc(cpu_env);
7034 break;
7035 case 0x134: /* sysenter */
7036 /* For Intel SYSENTER is valid on 64-bit */
7037 if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
7038 goto illegal_op;
7039 if (!s->pe) {
7040 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7041 } else {
7042 gen_helper_sysenter(cpu_env);
7043 gen_eob(s);
7045 break;
7046 case 0x135: /* sysexit */
7047 /* For Intel SYSEXIT is valid on 64-bit */
7048 if (CODE64(s) && env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
7049 goto illegal_op;
7050 if (!s->pe) {
7051 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7052 } else {
7053 gen_helper_sysexit(cpu_env, tcg_const_i32(dflag - 1));
7054 gen_eob(s);
7056 break;
7057 #ifdef TARGET_X86_64
7058 case 0x105: /* syscall */
7059 /* XXX: is it usable in real mode ? */
7060 gen_update_cc_op(s);
7061 gen_jmp_im(pc_start - s->cs_base);
7062 gen_helper_syscall(cpu_env, tcg_const_i32(s->pc - pc_start));
7063 gen_eob(s);
7064 break;
7065 case 0x107: /* sysret */
7066 if (!s->pe) {
7067 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7068 } else {
7069 gen_helper_sysret(cpu_env, tcg_const_i32(dflag - 1));
7070 /* condition codes are modified only in long mode */
7071 if (s->lma) {
7072 set_cc_op(s, CC_OP_EFLAGS);
7074 gen_eob(s);
7076 break;
7077 #endif
7078 case 0x1a2: /* cpuid */
7079 gen_update_cc_op(s);
7080 gen_jmp_im(pc_start - s->cs_base);
7081 gen_helper_cpuid(cpu_env);
7082 break;
7083 case 0xf4: /* hlt */
7084 if (s->cpl != 0) {
7085 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7086 } else {
7087 gen_update_cc_op(s);
7088 gen_jmp_im(pc_start - s->cs_base);
7089 gen_helper_hlt(cpu_env, tcg_const_i32(s->pc - pc_start));
7090 s->is_jmp = DISAS_TB_JUMP;
7092 break;
7093 case 0x100:
7094 modrm = cpu_ldub_code(env, s->pc++);
7095 mod = (modrm >> 6) & 3;
7096 op = (modrm >> 3) & 7;
7097 switch(op) {
7098 case 0: /* sldt */
7099 if (!s->pe || s->vm86)
7100 goto illegal_op;
7101 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ);
7102 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,ldt.selector));
7103 ot = mod == 3 ? dflag : MO_16;
7104 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
7105 break;
7106 case 2: /* lldt */
7107 if (!s->pe || s->vm86)
7108 goto illegal_op;
7109 if (s->cpl != 0) {
7110 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7111 } else {
7112 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE);
7113 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
7114 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
7115 gen_helper_lldt(cpu_env, cpu_tmp2_i32);
7117 break;
7118 case 1: /* str */
7119 if (!s->pe || s->vm86)
7120 goto illegal_op;
7121 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ);
7122 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,tr.selector));
7123 ot = mod == 3 ? dflag : MO_16;
7124 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 1);
7125 break;
7126 case 3: /* ltr */
7127 if (!s->pe || s->vm86)
7128 goto illegal_op;
7129 if (s->cpl != 0) {
7130 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7131 } else {
7132 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE);
7133 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
7134 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
7135 gen_helper_ltr(cpu_env, cpu_tmp2_i32);
7137 break;
7138 case 4: /* verr */
7139 case 5: /* verw */
7140 if (!s->pe || s->vm86)
7141 goto illegal_op;
7142 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
7143 gen_update_cc_op(s);
7144 if (op == 4) {
7145 gen_helper_verr(cpu_env, cpu_T[0]);
7146 } else {
7147 gen_helper_verw(cpu_env, cpu_T[0]);
7149 set_cc_op(s, CC_OP_EFLAGS);
7150 break;
7151 default:
7152 goto illegal_op;
7154 break;
7155 case 0x101:
7156 modrm = cpu_ldub_code(env, s->pc++);
7157 mod = (modrm >> 6) & 3;
7158 op = (modrm >> 3) & 7;
7159 rm = modrm & 7;
7160 switch(op) {
7161 case 0: /* sgdt */
7162 if (mod == 3)
7163 goto illegal_op;
7164 gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ);
7165 gen_lea_modrm(env, s, modrm);
7166 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.limit));
7167 gen_op_st_v(s, MO_16, cpu_T[0], cpu_A0);
7168 gen_add_A0_im(s, 2);
7169 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.base));
7170 if (dflag == MO_16) {
7171 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffffff);
7173 gen_op_st_v(s, CODE64(s) + MO_32, cpu_T[0], cpu_A0);
7174 break;
7175 case 1:
7176 if (mod == 3) {
7177 switch (rm) {
7178 case 0: /* monitor */
7179 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
7180 s->cpl != 0)
7181 goto illegal_op;
7182 gen_update_cc_op(s);
7183 gen_jmp_im(pc_start - s->cs_base);
7184 tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EAX]);
7185 gen_extu(s->aflag, cpu_A0);
7186 gen_add_A0_ds_seg(s);
7187 gen_helper_monitor(cpu_env, cpu_A0);
7188 break;
7189 case 1: /* mwait */
7190 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
7191 s->cpl != 0)
7192 goto illegal_op;
7193 gen_update_cc_op(s);
7194 gen_jmp_im(pc_start - s->cs_base);
7195 gen_helper_mwait(cpu_env, tcg_const_i32(s->pc - pc_start));
7196 gen_eob(s);
7197 break;
7198 case 2: /* clac */
7199 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) ||
7200 s->cpl != 0) {
7201 goto illegal_op;
7203 gen_helper_clac(cpu_env);
7204 gen_jmp_im(s->pc - s->cs_base);
7205 gen_eob(s);
7206 break;
7207 case 3: /* stac */
7208 if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) ||
7209 s->cpl != 0) {
7210 goto illegal_op;
7212 gen_helper_stac(cpu_env);
7213 gen_jmp_im(s->pc - s->cs_base);
7214 gen_eob(s);
7215 break;
7216 default:
7217 goto illegal_op;
7219 } else { /* sidt */
7220 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ);
7221 gen_lea_modrm(env, s, modrm);
7222 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.limit));
7223 gen_op_st_v(s, MO_16, cpu_T[0], cpu_A0);
7224 gen_add_A0_im(s, 2);
7225 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.base));
7226 if (dflag == MO_16) {
7227 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffffff);
7229 gen_op_st_v(s, CODE64(s) + MO_32, cpu_T[0], cpu_A0);
7231 break;
7232 case 2: /* lgdt */
7233 case 3: /* lidt */
7234 if (mod == 3) {
7235 gen_update_cc_op(s);
7236 gen_jmp_im(pc_start - s->cs_base);
7237 switch(rm) {
7238 case 0: /* VMRUN */
7239 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7240 goto illegal_op;
7241 if (s->cpl != 0) {
7242 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7243 break;
7244 } else {
7245 gen_helper_vmrun(cpu_env, tcg_const_i32(s->aflag - 1),
7246 tcg_const_i32(s->pc - pc_start));
7247 tcg_gen_exit_tb(0);
7248 s->is_jmp = DISAS_TB_JUMP;
7250 break;
7251 case 1: /* VMMCALL */
7252 if (!(s->flags & HF_SVME_MASK))
7253 goto illegal_op;
7254 gen_helper_vmmcall(cpu_env);
7255 break;
7256 case 2: /* VMLOAD */
7257 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7258 goto illegal_op;
7259 if (s->cpl != 0) {
7260 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7261 break;
7262 } else {
7263 gen_helper_vmload(cpu_env, tcg_const_i32(s->aflag - 1));
7265 break;
7266 case 3: /* VMSAVE */
7267 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7268 goto illegal_op;
7269 if (s->cpl != 0) {
7270 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7271 break;
7272 } else {
7273 gen_helper_vmsave(cpu_env, tcg_const_i32(s->aflag - 1));
7275 break;
7276 case 4: /* STGI */
7277 if ((!(s->flags & HF_SVME_MASK) &&
7278 !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
7279 !s->pe)
7280 goto illegal_op;
7281 if (s->cpl != 0) {
7282 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7283 break;
7284 } else {
7285 gen_helper_stgi(cpu_env);
7287 break;
7288 case 5: /* CLGI */
7289 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7290 goto illegal_op;
7291 if (s->cpl != 0) {
7292 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7293 break;
7294 } else {
7295 gen_helper_clgi(cpu_env);
7297 break;
7298 case 6: /* SKINIT */
7299 if ((!(s->flags & HF_SVME_MASK) &&
7300 !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
7301 !s->pe)
7302 goto illegal_op;
7303 gen_helper_skinit(cpu_env);
7304 break;
7305 case 7: /* INVLPGA */
7306 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7307 goto illegal_op;
7308 if (s->cpl != 0) {
7309 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7310 break;
7311 } else {
7312 gen_helper_invlpga(cpu_env,
7313 tcg_const_i32(s->aflag - 1));
7315 break;
7316 default:
7317 goto illegal_op;
7319 } else if (s->cpl != 0) {
7320 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7321 } else {
7322 gen_svm_check_intercept(s, pc_start,
7323 op==2 ? SVM_EXIT_GDTR_WRITE : SVM_EXIT_IDTR_WRITE);
7324 gen_lea_modrm(env, s, modrm);
7325 gen_op_ld_v(s, MO_16, cpu_T[1], cpu_A0);
7326 gen_add_A0_im(s, 2);
7327 gen_op_ld_v(s, CODE64(s) + MO_32, cpu_T[0], cpu_A0);
7328 if (dflag == MO_16) {
7329 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffffff);
7331 if (op == 2) {
7332 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,gdt.base));
7333 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,gdt.limit));
7334 } else {
7335 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,idt.base));
7336 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,idt.limit));
7339 break;
7340 case 4: /* smsw */
7341 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0);
7342 #if defined TARGET_X86_64 && defined HOST_WORDS_BIGENDIAN
7343 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]) + 4);
7344 #else
7345 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]));
7346 #endif
7347 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 1);
7348 break;
7349 case 6: /* lmsw */
7350 if (s->cpl != 0) {
7351 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7352 } else {
7353 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
7354 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
7355 gen_helper_lmsw(cpu_env, cpu_T[0]);
7356 gen_jmp_im(s->pc - s->cs_base);
7357 gen_eob(s);
7359 break;
7360 case 7:
7361 if (mod != 3) { /* invlpg */
7362 if (s->cpl != 0) {
7363 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7364 } else {
7365 gen_update_cc_op(s);
7366 gen_jmp_im(pc_start - s->cs_base);
7367 gen_lea_modrm(env, s, modrm);
7368 gen_helper_invlpg(cpu_env, cpu_A0);
7369 gen_jmp_im(s->pc - s->cs_base);
7370 gen_eob(s);
7372 } else {
7373 switch (rm) {
7374 case 0: /* swapgs */
7375 #ifdef TARGET_X86_64
7376 if (CODE64(s)) {
7377 if (s->cpl != 0) {
7378 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7379 } else {
7380 tcg_gen_ld_tl(cpu_T[0], cpu_env,
7381 offsetof(CPUX86State,segs[R_GS].base));
7382 tcg_gen_ld_tl(cpu_T[1], cpu_env,
7383 offsetof(CPUX86State,kernelgsbase));
7384 tcg_gen_st_tl(cpu_T[1], cpu_env,
7385 offsetof(CPUX86State,segs[R_GS].base));
7386 tcg_gen_st_tl(cpu_T[0], cpu_env,
7387 offsetof(CPUX86State,kernelgsbase));
7389 } else
7390 #endif
7392 goto illegal_op;
7394 break;
7395 case 1: /* rdtscp */
7396 if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP))
7397 goto illegal_op;
7398 gen_update_cc_op(s);
7399 gen_jmp_im(pc_start - s->cs_base);
7400 if (s->tb->cflags & CF_USE_ICOUNT) {
7401 gen_io_start();
7403 gen_helper_rdtscp(cpu_env);
7404 if (s->tb->cflags & CF_USE_ICOUNT) {
7405 gen_io_end();
7406 gen_jmp(s, s->pc - s->cs_base);
7408 break;
7409 default:
7410 goto illegal_op;
7413 break;
7414 default:
7415 goto illegal_op;
7417 break;
7418 case 0x108: /* invd */
7419 case 0x109: /* wbinvd */
7420 if (s->cpl != 0) {
7421 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7422 } else {
7423 gen_svm_check_intercept(s, pc_start, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD);
7424 /* nothing to do */
7426 break;
7427 case 0x63: /* arpl or movslS (x86_64) */
7428 #ifdef TARGET_X86_64
7429 if (CODE64(s)) {
7430 int d_ot;
7431 /* d_ot is the size of destination */
7432 d_ot = dflag;
7434 modrm = cpu_ldub_code(env, s->pc++);
7435 reg = ((modrm >> 3) & 7) | rex_r;
7436 mod = (modrm >> 6) & 3;
7437 rm = (modrm & 7) | REX_B(s);
7439 if (mod == 3) {
7440 gen_op_mov_v_reg(MO_32, cpu_T[0], rm);
7441 /* sign extend */
7442 if (d_ot == MO_64) {
7443 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
7445 gen_op_mov_reg_v(d_ot, reg, cpu_T[0]);
7446 } else {
7447 gen_lea_modrm(env, s, modrm);
7448 gen_op_ld_v(s, MO_32 | MO_SIGN, cpu_T[0], cpu_A0);
7449 gen_op_mov_reg_v(d_ot, reg, cpu_T[0]);
7451 } else
7452 #endif
7454 TCGLabel *label1;
7455 TCGv t0, t1, t2, a0;
7457 if (!s->pe || s->vm86)
7458 goto illegal_op;
7459 t0 = tcg_temp_local_new();
7460 t1 = tcg_temp_local_new();
7461 t2 = tcg_temp_local_new();
7462 ot = MO_16;
7463 modrm = cpu_ldub_code(env, s->pc++);
7464 reg = (modrm >> 3) & 7;
7465 mod = (modrm >> 6) & 3;
7466 rm = modrm & 7;
7467 if (mod != 3) {
7468 gen_lea_modrm(env, s, modrm);
7469 gen_op_ld_v(s, ot, t0, cpu_A0);
7470 a0 = tcg_temp_local_new();
7471 tcg_gen_mov_tl(a0, cpu_A0);
7472 } else {
7473 gen_op_mov_v_reg(ot, t0, rm);
7474 TCGV_UNUSED(a0);
7476 gen_op_mov_v_reg(ot, t1, reg);
7477 tcg_gen_andi_tl(cpu_tmp0, t0, 3);
7478 tcg_gen_andi_tl(t1, t1, 3);
7479 tcg_gen_movi_tl(t2, 0);
7480 label1 = gen_new_label();
7481 tcg_gen_brcond_tl(TCG_COND_GE, cpu_tmp0, t1, label1);
7482 tcg_gen_andi_tl(t0, t0, ~3);
7483 tcg_gen_or_tl(t0, t0, t1);
7484 tcg_gen_movi_tl(t2, CC_Z);
7485 gen_set_label(label1);
7486 if (mod != 3) {
7487 gen_op_st_v(s, ot, t0, a0);
7488 tcg_temp_free(a0);
7489 } else {
7490 gen_op_mov_reg_v(ot, rm, t0);
7492 gen_compute_eflags(s);
7493 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z);
7494 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2);
7495 tcg_temp_free(t0);
7496 tcg_temp_free(t1);
7497 tcg_temp_free(t2);
7499 break;
7500 case 0x102: /* lar */
7501 case 0x103: /* lsl */
7503 TCGLabel *label1;
7504 TCGv t0;
7505 if (!s->pe || s->vm86)
7506 goto illegal_op;
7507 ot = dflag != MO_16 ? MO_32 : MO_16;
7508 modrm = cpu_ldub_code(env, s->pc++);
7509 reg = ((modrm >> 3) & 7) | rex_r;
7510 gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
7511 t0 = tcg_temp_local_new();
7512 gen_update_cc_op(s);
7513 if (b == 0x102) {
7514 gen_helper_lar(t0, cpu_env, cpu_T[0]);
7515 } else {
7516 gen_helper_lsl(t0, cpu_env, cpu_T[0]);
7518 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_src, CC_Z);
7519 label1 = gen_new_label();
7520 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
7521 gen_op_mov_reg_v(ot, reg, t0);
7522 gen_set_label(label1);
7523 set_cc_op(s, CC_OP_EFLAGS);
7524 tcg_temp_free(t0);
7526 break;
7527 case 0x118:
7528 modrm = cpu_ldub_code(env, s->pc++);
7529 mod = (modrm >> 6) & 3;
7530 op = (modrm >> 3) & 7;
7531 switch(op) {
7532 case 0: /* prefetchnta */
7533 case 1: /* prefetchnt0 */
7534 case 2: /* prefetchnt0 */
7535 case 3: /* prefetchnt0 */
7536 if (mod == 3)
7537 goto illegal_op;
7538 gen_lea_modrm(env, s, modrm);
7539 /* nothing more to do */
7540 break;
7541 default: /* nop (multi byte) */
7542 gen_nop_modrm(env, s, modrm);
7543 break;
7545 break;
7546 case 0x119 ... 0x11f: /* nop (multi byte) */
7547 modrm = cpu_ldub_code(env, s->pc++);
7548 gen_nop_modrm(env, s, modrm);
7549 break;
7550 case 0x120: /* mov reg, crN */
7551 case 0x122: /* mov crN, reg */
7552 if (s->cpl != 0) {
7553 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7554 } else {
7555 modrm = cpu_ldub_code(env, s->pc++);
7556 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7557 * AMD documentation (24594.pdf) and testing of
7558 * intel 386 and 486 processors all show that the mod bits
7559 * are assumed to be 1's, regardless of actual values.
7561 rm = (modrm & 7) | REX_B(s);
7562 reg = ((modrm >> 3) & 7) | rex_r;
7563 if (CODE64(s))
7564 ot = MO_64;
7565 else
7566 ot = MO_32;
7567 if ((prefixes & PREFIX_LOCK) && (reg == 0) &&
7568 (s->cpuid_ext3_features & CPUID_EXT3_CR8LEG)) {
7569 reg = 8;
7571 switch(reg) {
7572 case 0:
7573 case 2:
7574 case 3:
7575 case 4:
7576 case 8:
7577 gen_update_cc_op(s);
7578 gen_jmp_im(pc_start - s->cs_base);
7579 if (b & 2) {
7580 gen_op_mov_v_reg(ot, cpu_T[0], rm);
7581 gen_helper_write_crN(cpu_env, tcg_const_i32(reg),
7582 cpu_T[0]);
7583 gen_jmp_im(s->pc - s->cs_base);
7584 gen_eob(s);
7585 } else {
7586 gen_helper_read_crN(cpu_T[0], cpu_env, tcg_const_i32(reg));
7587 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
7589 break;
7590 default:
7591 goto illegal_op;
7594 break;
7595 case 0x121: /* mov reg, drN */
7596 case 0x123: /* mov drN, reg */
7597 if (s->cpl != 0) {
7598 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7599 } else {
7600 modrm = cpu_ldub_code(env, s->pc++);
7601 /* Ignore the mod bits (assume (modrm&0xc0)==0xc0).
7602 * AMD documentation (24594.pdf) and testing of
7603 * intel 386 and 486 processors all show that the mod bits
7604 * are assumed to be 1's, regardless of actual values.
7606 rm = (modrm & 7) | REX_B(s);
7607 reg = ((modrm >> 3) & 7) | rex_r;
7608 if (CODE64(s))
7609 ot = MO_64;
7610 else
7611 ot = MO_32;
7612 /* XXX: do it dynamically with CR4.DE bit */
7613 if (reg == 4 || reg == 5 || reg >= 8)
7614 goto illegal_op;
7615 if (b & 2) {
7616 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg);
7617 gen_op_mov_v_reg(ot, cpu_T[0], rm);
7618 gen_helper_movl_drN_T0(cpu_env, tcg_const_i32(reg), cpu_T[0]);
7619 gen_jmp_im(s->pc - s->cs_base);
7620 gen_eob(s);
7621 } else {
7622 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg);
7623 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,dr[reg]));
7624 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
7627 break;
7628 case 0x106: /* clts */
7629 if (s->cpl != 0) {
7630 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7631 } else {
7632 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
7633 gen_helper_clts(cpu_env);
7634 /* abort block because static cpu state changed */
7635 gen_jmp_im(s->pc - s->cs_base);
7636 gen_eob(s);
7638 break;
7639 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
7640 case 0x1c3: /* MOVNTI reg, mem */
7641 if (!(s->cpuid_features & CPUID_SSE2))
7642 goto illegal_op;
7643 ot = mo_64_32(dflag);
7644 modrm = cpu_ldub_code(env, s->pc++);
7645 mod = (modrm >> 6) & 3;
7646 if (mod == 3)
7647 goto illegal_op;
7648 reg = ((modrm >> 3) & 7) | rex_r;
7649 /* generate a generic store */
7650 gen_ldst_modrm(env, s, modrm, ot, reg, 1);
7651 break;
7652 case 0x1ae:
7653 modrm = cpu_ldub_code(env, s->pc++);
7654 mod = (modrm >> 6) & 3;
7655 op = (modrm >> 3) & 7;
7656 switch(op) {
7657 case 0: /* fxsave */
7658 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
7659 (s->prefix & PREFIX_LOCK))
7660 goto illegal_op;
7661 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
7662 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7663 break;
7665 gen_lea_modrm(env, s, modrm);
7666 gen_helper_fxsave(cpu_env, cpu_A0, tcg_const_i32(dflag == MO_64));
7667 break;
7668 case 1: /* fxrstor */
7669 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
7670 (s->prefix & PREFIX_LOCK))
7671 goto illegal_op;
7672 if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
7673 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7674 break;
7676 gen_lea_modrm(env, s, modrm);
7677 gen_helper_fxrstor(cpu_env, cpu_A0, tcg_const_i32(dflag == MO_64));
7678 break;
7679 case 2: /* ldmxcsr */
7680 case 3: /* stmxcsr */
7681 if (s->flags & HF_TS_MASK) {
7682 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7683 break;
7685 if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK) ||
7686 mod == 3)
7687 goto illegal_op;
7688 gen_lea_modrm(env, s, modrm);
7689 if (op == 2) {
7690 tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
7691 s->mem_index, MO_LEUL);
7692 gen_helper_ldmxcsr(cpu_env, cpu_tmp2_i32);
7693 } else {
7694 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, mxcsr));
7695 gen_op_st_v(s, MO_32, cpu_T[0], cpu_A0);
7697 break;
7698 case 5: /* lfence */
7699 case 6: /* mfence */
7700 if ((modrm & 0xc7) != 0xc0 || !(s->cpuid_features & CPUID_SSE2))
7701 goto illegal_op;
7702 break;
7703 case 7: /* sfence / clflush */
7704 if ((modrm & 0xc7) == 0xc0) {
7705 /* sfence */
7706 /* XXX: also check for cpuid_ext2_features & CPUID_EXT2_EMMX */
7707 if (!(s->cpuid_features & CPUID_SSE))
7708 goto illegal_op;
7709 } else {
7710 /* clflush */
7711 if (!(s->cpuid_features & CPUID_CLFLUSH))
7712 goto illegal_op;
7713 gen_lea_modrm(env, s, modrm);
7715 break;
7716 default:
7717 goto illegal_op;
7719 break;
7720 case 0x10d: /* 3DNow! prefetch(w) */
7721 modrm = cpu_ldub_code(env, s->pc++);
7722 mod = (modrm >> 6) & 3;
7723 if (mod == 3)
7724 goto illegal_op;
7725 gen_lea_modrm(env, s, modrm);
7726 /* ignore for now */
7727 break;
7728 case 0x1aa: /* rsm */
7729 gen_svm_check_intercept(s, pc_start, SVM_EXIT_RSM);
7730 if (!(s->flags & HF_SMM_MASK))
7731 goto illegal_op;
7732 gen_update_cc_op(s);
7733 gen_jmp_im(s->pc - s->cs_base);
7734 gen_helper_rsm(cpu_env);
7735 gen_eob(s);
7736 break;
7737 case 0x1b8: /* SSE4.2 popcnt */
7738 if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) !=
7739 PREFIX_REPZ)
7740 goto illegal_op;
7741 if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
7742 goto illegal_op;
7744 modrm = cpu_ldub_code(env, s->pc++);
7745 reg = ((modrm >> 3) & 7) | rex_r;
7747 if (s->prefix & PREFIX_DATA) {
7748 ot = MO_16;
7749 } else {
7750 ot = mo_64_32(dflag);
7753 gen_ldst_modrm(env, s, modrm, ot, OR_TMP0, 0);
7754 gen_helper_popcnt(cpu_T[0], cpu_env, cpu_T[0], tcg_const_i32(ot));
7755 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
7757 set_cc_op(s, CC_OP_EFLAGS);
7758 break;
7759 case 0x10e ... 0x10f:
7760 /* 3DNow! instructions, ignore prefixes */
7761 s->prefix &= ~(PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA);
7762 case 0x110 ... 0x117:
7763 case 0x128 ... 0x12f:
7764 case 0x138 ... 0x13a:
7765 case 0x150 ... 0x179:
7766 case 0x17c ... 0x17f:
7767 case 0x1c2:
7768 case 0x1c4 ... 0x1c6:
7769 case 0x1d0 ... 0x1fe:
7770 gen_sse(env, s, b, pc_start, rex_r);
7771 break;
7772 default:
7773 goto illegal_op;
7775 /* lock generation */
7776 if (s->prefix & PREFIX_LOCK)
7777 gen_helper_unlock();
7778 return s->pc;
7779 illegal_op:
7780 if (s->prefix & PREFIX_LOCK)
7781 gen_helper_unlock();
7782 /* XXX: ensure that no lock was generated */
7783 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
7784 return s->pc;
7787 void optimize_flags_init(void)
7789 static const char reg_names[CPU_NB_REGS][4] = {
7790 #ifdef TARGET_X86_64
7791 [R_EAX] = "rax",
7792 [R_EBX] = "rbx",
7793 [R_ECX] = "rcx",
7794 [R_EDX] = "rdx",
7795 [R_ESI] = "rsi",
7796 [R_EDI] = "rdi",
7797 [R_EBP] = "rbp",
7798 [R_ESP] = "rsp",
7799 [8] = "r8",
7800 [9] = "r9",
7801 [10] = "r10",
7802 [11] = "r11",
7803 [12] = "r12",
7804 [13] = "r13",
7805 [14] = "r14",
7806 [15] = "r15",
7807 #else
7808 [R_EAX] = "eax",
7809 [R_EBX] = "ebx",
7810 [R_ECX] = "ecx",
7811 [R_EDX] = "edx",
7812 [R_ESI] = "esi",
7813 [R_EDI] = "edi",
7814 [R_EBP] = "ebp",
7815 [R_ESP] = "esp",
7816 #endif
7818 int i;
7820 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
7821 cpu_cc_op = tcg_global_mem_new_i32(TCG_AREG0,
7822 offsetof(CPUX86State, cc_op), "cc_op");
7823 cpu_cc_dst = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_dst),
7824 "cc_dst");
7825 cpu_cc_src = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_src),
7826 "cc_src");
7827 cpu_cc_src2 = tcg_global_mem_new(TCG_AREG0, offsetof(CPUX86State, cc_src2),
7828 "cc_src2");
7830 for (i = 0; i < CPU_NB_REGS; ++i) {
7831 cpu_regs[i] = tcg_global_mem_new(TCG_AREG0,
7832 offsetof(CPUX86State, regs[i]),
7833 reg_names[i]);
7836 helper_lock_init();
7839 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
7840 basic block 'tb'. */
7841 void gen_intermediate_code(CPUX86State *env, TranslationBlock *tb)
7843 X86CPU *cpu = x86_env_get_cpu(env);
7844 CPUState *cs = CPU(cpu);
7845 DisasContext dc1, *dc = &dc1;
7846 target_ulong pc_ptr;
7847 uint64_t flags;
7848 target_ulong pc_start;
7849 target_ulong cs_base;
7850 int num_insns;
7851 int max_insns;
7853 /* generate intermediate code */
7854 pc_start = tb->pc;
7855 cs_base = tb->cs_base;
7856 flags = tb->flags;
7858 dc->pe = (flags >> HF_PE_SHIFT) & 1;
7859 dc->code32 = (flags >> HF_CS32_SHIFT) & 1;
7860 dc->ss32 = (flags >> HF_SS32_SHIFT) & 1;
7861 dc->addseg = (flags >> HF_ADDSEG_SHIFT) & 1;
7862 dc->f_st = 0;
7863 dc->vm86 = (flags >> VM_SHIFT) & 1;
7864 dc->cpl = (flags >> HF_CPL_SHIFT) & 3;
7865 dc->iopl = (flags >> IOPL_SHIFT) & 3;
7866 dc->tf = (flags >> TF_SHIFT) & 1;
7867 dc->singlestep_enabled = cs->singlestep_enabled;
7868 dc->cc_op = CC_OP_DYNAMIC;
7869 dc->cc_op_dirty = false;
7870 dc->cs_base = cs_base;
7871 dc->tb = tb;
7872 dc->popl_esp_hack = 0;
7873 /* select memory access functions */
7874 dc->mem_index = 0;
7875 if (flags & HF_SOFTMMU_MASK) {
7876 dc->mem_index = cpu_mmu_index(env, false);
7878 dc->cpuid_features = env->features[FEAT_1_EDX];
7879 dc->cpuid_ext_features = env->features[FEAT_1_ECX];
7880 dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX];
7881 dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX];
7882 dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX];
7883 #ifdef TARGET_X86_64
7884 dc->lma = (flags >> HF_LMA_SHIFT) & 1;
7885 dc->code64 = (flags >> HF_CS64_SHIFT) & 1;
7886 #endif
7887 dc->flags = flags;
7888 dc->jmp_opt = !(dc->tf || cs->singlestep_enabled ||
7889 (flags & HF_INHIBIT_IRQ_MASK)
7890 #ifndef CONFIG_SOFTMMU
7891 || (flags & HF_SOFTMMU_MASK)
7892 #endif
7894 /* Do not optimize repz jumps at all in icount mode, because
7895 rep movsS instructions are execured with different paths
7896 in !repz_opt and repz_opt modes. The first one was used
7897 always except single step mode. And this setting
7898 disables jumps optimization and control paths become
7899 equivalent in run and single step modes.
7900 Now there will be no jump optimization for repz in
7901 record/replay modes and there will always be an
7902 additional step for ecx=0 when icount is enabled.
7904 dc->repz_opt = !dc->jmp_opt && !(tb->cflags & CF_USE_ICOUNT);
7905 #if 0
7906 /* check addseg logic */
7907 if (!dc->addseg && (dc->vm86 || !dc->pe || !dc->code32))
7908 printf("ERROR addseg\n");
7909 #endif
7911 cpu_T[0] = tcg_temp_new();
7912 cpu_T[1] = tcg_temp_new();
7913 cpu_A0 = tcg_temp_new();
7915 cpu_tmp0 = tcg_temp_new();
7916 cpu_tmp1_i64 = tcg_temp_new_i64();
7917 cpu_tmp2_i32 = tcg_temp_new_i32();
7918 cpu_tmp3_i32 = tcg_temp_new_i32();
7919 cpu_tmp4 = tcg_temp_new();
7920 cpu_ptr0 = tcg_temp_new_ptr();
7921 cpu_ptr1 = tcg_temp_new_ptr();
7922 cpu_cc_srcT = tcg_temp_local_new();
7924 dc->is_jmp = DISAS_NEXT;
7925 pc_ptr = pc_start;
7926 num_insns = 0;
7927 max_insns = tb->cflags & CF_COUNT_MASK;
7928 if (max_insns == 0) {
7929 max_insns = CF_COUNT_MASK;
7931 if (max_insns > TCG_MAX_INSNS) {
7932 max_insns = TCG_MAX_INSNS;
7935 gen_tb_start(tb);
7936 for(;;) {
7937 tcg_gen_insn_start(pc_ptr, dc->cc_op);
7938 num_insns++;
7940 /* If RF is set, suppress an internally generated breakpoint. */
7941 if (unlikely(cpu_breakpoint_test(cs, pc_ptr,
7942 tb->flags & HF_RF_MASK
7943 ? BP_GDB : BP_ANY))) {
7944 gen_debug(dc, pc_ptr - dc->cs_base);
7945 goto done_generating;
7947 if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
7948 gen_io_start();
7951 pc_ptr = disas_insn(env, dc, pc_ptr);
7952 /* stop translation if indicated */
7953 if (dc->is_jmp)
7954 break;
7955 /* if single step mode, we generate only one instruction and
7956 generate an exception */
7957 /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
7958 the flag and abort the translation to give the irqs a
7959 change to be happen */
7960 if (dc->tf || dc->singlestep_enabled ||
7961 (flags & HF_INHIBIT_IRQ_MASK)) {
7962 gen_jmp_im(pc_ptr - dc->cs_base);
7963 gen_eob(dc);
7964 break;
7966 /* Do not cross the boundary of the pages in icount mode,
7967 it can cause an exception. Do it only when boundary is
7968 crossed by the first instruction in the block.
7969 If current instruction already crossed the bound - it's ok,
7970 because an exception hasn't stopped this code.
7972 if ((tb->cflags & CF_USE_ICOUNT)
7973 && ((pc_ptr & TARGET_PAGE_MASK)
7974 != ((pc_ptr + TARGET_MAX_INSN_SIZE - 1) & TARGET_PAGE_MASK)
7975 || (pc_ptr & ~TARGET_PAGE_MASK) == 0)) {
7976 gen_jmp_im(pc_ptr - dc->cs_base);
7977 gen_eob(dc);
7978 break;
7980 /* if too long translation, stop generation too */
7981 if (tcg_op_buf_full() ||
7982 (pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32) ||
7983 num_insns >= max_insns) {
7984 gen_jmp_im(pc_ptr - dc->cs_base);
7985 gen_eob(dc);
7986 break;
7988 if (singlestep) {
7989 gen_jmp_im(pc_ptr - dc->cs_base);
7990 gen_eob(dc);
7991 break;
7994 if (tb->cflags & CF_LAST_IO)
7995 gen_io_end();
7996 done_generating:
7997 gen_tb_end(tb, num_insns);
7999 #ifdef DEBUG_DISAS
8000 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
8001 int disas_flags;
8002 qemu_log("----------------\n");
8003 qemu_log("IN: %s\n", lookup_symbol(pc_start));
8004 #ifdef TARGET_X86_64
8005 if (dc->code64)
8006 disas_flags = 2;
8007 else
8008 #endif
8009 disas_flags = !dc->code32;
8010 log_target_disas(cs, pc_start, pc_ptr - pc_start, disas_flags);
8011 qemu_log("\n");
8013 #endif
8015 tb->size = pc_ptr - pc_start;
8016 tb->icount = num_insns;
8019 void restore_state_to_opc(CPUX86State *env, TranslationBlock *tb,
8020 target_ulong *data)
8022 int cc_op = data[1];
8023 env->eip = data[0] - tb->cs_base;
8024 if (cc_op != CC_OP_DYNAMIC) {
8025 env->cc_op = cc_op;