2 * New-style decoder for i386 instructions
4 * Copyright (c) 2022 Red Hat, Inc.
6 * Author: Paolo Bonzini <pbonzini@redhat.com>
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 * The decoder is mostly based on tables copied from the Intel SDM. As
24 * a result, most operand load and writeback is done entirely in common
25 * table-driven code using the same operand type (X86_TYPE_*) and
26 * size (X86_SIZE_*) codes used in the manual.
28 * The main difference is that the V, U and W types are extended to
29 * cover MMX as well; if an instruction is like
34 * only the second row is included and the instruction is marked as a
35 * valid MMX instruction. The MMX flag directs the decoder to rewrite
36 * the V/U/H/W types to P/N/P/Q if there is no prefix, as well as changing
37 * "x" to "q" if there is no prefix.
39 * In addition, the ss/ps/sd/pd types are sometimes mushed together as "x"
40 * if the difference is expressed via prefixes. Individual instructions
41 * are separated by prefix in the generator functions.
43 * There are a couple cases in which instructions (e.g. MOVD) write the
44 * whole XMM or MM register but are established incorrectly in the manual
45 * as "d" or "q". These have to be fixed for the decoder to work correctly.
48 #define X86_OP_NONE { 0 },
50 #define X86_OP_GROUP3(op, op0_, s0_, op1_, s1_, op2_, s2_, ...) { \
51 .decode = glue(decode_, op), \
52 .op0 = glue(X86_TYPE_, op0_), \
53 .s0 = glue(X86_SIZE_, s0_), \
54 .op1 = glue(X86_TYPE_, op1_), \
55 .s1 = glue(X86_SIZE_, s1_), \
56 .op2 = glue(X86_TYPE_, op2_), \
57 .s2 = glue(X86_SIZE_, s2_), \
62 #define X86_OP_GROUP2(op, op0, s0, op1, s1, ...) \
63 X86_OP_GROUP3(op, op0, s0, 2op, s0, op1, s1, ## __VA_ARGS__)
64 #define X86_OP_GROUP0(op, ...) \
65 X86_OP_GROUP3(op, None, None, None, None, None, None, ## __VA_ARGS__)
67 #define X86_OP_ENTRY3(op, op0_, s0_, op1_, s1_, op2_, s2_, ...) { \
68 .gen = glue(gen_, op), \
69 .op0 = glue(X86_TYPE_, op0_), \
70 .s0 = glue(X86_SIZE_, s0_), \
71 .op1 = glue(X86_TYPE_, op1_), \
72 .s1 = glue(X86_SIZE_, s1_), \
73 .op2 = glue(X86_TYPE_, op2_), \
74 .s2 = glue(X86_SIZE_, s2_), \
78 #define X86_OP_ENTRY4(op, op0_, s0_, op1_, s1_, op2_, s2_, ...) \
79 X86_OP_ENTRY3(op, op0_, s0_, op1_, s1_, op2_, s2_, \
80 .op3 = X86_TYPE_I, .s3 = X86_SIZE_b, \
83 #define X86_OP_ENTRY2(op, op0, s0, op1, s1, ...) \
84 X86_OP_ENTRY3(op, op0, s0, 2op, s0, op1, s1, ## __VA_ARGS__)
85 #define X86_OP_ENTRY0(op, ...) \
86 X86_OP_ENTRY3(op, None, None, None, None, None, None, ## __VA_ARGS__)
88 #define cpuid(feat) .cpuid = X86_FEAT_##feat,
89 #define i64 .special = X86_SPECIAL_i64,
90 #define o64 .special = X86_SPECIAL_o64,
91 #define xchg .special = X86_SPECIAL_Locked,
92 #define mmx .special = X86_SPECIAL_MMX,
93 #define zext0 .special = X86_SPECIAL_ZExtOp0,
94 #define zext2 .special = X86_SPECIAL_ZExtOp2,
96 #define vex1 .vex_class = 1,
97 #define vex1_rep3 .vex_class = 1, .vex_special = X86_VEX_REPScalar,
98 #define vex2 .vex_class = 2,
99 #define vex2_rep3 .vex_class = 2, .vex_special = X86_VEX_REPScalar,
100 #define vex3 .vex_class = 3,
101 #define vex4 .vex_class = 4,
102 #define vex4_unal .vex_class = 4, .vex_special = X86_VEX_SSEUnaligned,
103 #define vex5 .vex_class = 5,
104 #define vex6 .vex_class = 6,
105 #define vex7 .vex_class = 7,
106 #define vex8 .vex_class = 8,
107 #define vex11 .vex_class = 11,
108 #define vex12 .vex_class = 12,
109 #define vex13 .vex_class = 13,
111 #define avx2_256 .vex_special = X86_VEX_AVX2_256,
113 static uint8_t get_modrm(DisasContext *s, CPUX86State *env)
116 s->modrm = x86_ldub_code(env, s);
122 static const X86OpEntry opcodes_0F38_00toEF[240] = {
125 /* five rows for no prefix, 66, F3, F2, 66+F2 */
126 static const X86OpEntry opcodes_0F38_F0toFF[16][5] = {
129 static void decode_0F38(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b)
131 *b = x86_ldub_code(env, s);
133 *entry = opcodes_0F38_00toEF[*b];
136 if (s->prefix & PREFIX_REPZ) {
137 /* The REPZ (F3) prefix has priority over 66 */
140 row += s->prefix & PREFIX_REPNZ ? 3 : 0;
141 row += s->prefix & PREFIX_DATA ? 1 : 0;
143 *entry = opcodes_0F38_F0toFF[*b & 15][row];
147 static const X86OpEntry opcodes_0F3A[256] = {
150 static void decode_0F3A(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b)
152 *b = x86_ldub_code(env, s);
153 *entry = opcodes_0F3A[*b];
156 static const X86OpEntry opcodes_0F[256] = {
157 [0x38] = X86_OP_GROUP0(0F38),
158 [0x3a] = X86_OP_GROUP0(0F3A),
161 static void do_decode_0F(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b)
163 *entry = opcodes_0F[*b];
166 static void decode_0F(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b)
168 *b = x86_ldub_code(env, s);
169 do_decode_0F(s, env, entry, b);
172 static const X86OpEntry opcodes_root[256] = {
173 [0x0F] = X86_OP_GROUP0(0F),
191 * Decode the fixed part of the opcode and place the last
194 static void decode_root(DisasContext *s, CPUX86State *env, X86OpEntry *entry, uint8_t *b)
196 *entry = opcodes_root[*b];
200 static int decode_modrm(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode,
201 X86DecodedOp *op, X86OpType type)
203 int modrm = get_modrm(s, env);
204 if ((modrm >> 6) == 3) {
205 if (s->prefix & PREFIX_LOCK) {
206 decode->e.gen = gen_illegal;
210 if (type != X86_TYPE_Q && type != X86_TYPE_N) {
216 decode->mem = gen_lea_modrm_0(env, s, get_modrm(s, env));
221 static bool decode_op_size(DisasContext *s, X86OpEntry *e, X86OpSize size, MemOp *ot)
224 case X86_SIZE_b: /* byte */
228 case X86_SIZE_d: /* 32-bit */
229 case X86_SIZE_ss: /* SSE/AVX scalar single precision */
233 case X86_SIZE_p: /* Far pointer, return offset size */
234 case X86_SIZE_s: /* Descriptor, return offset size */
235 case X86_SIZE_v: /* 16/32/64-bit, based on operand size */
239 case X86_SIZE_pi: /* MMX */
240 case X86_SIZE_q: /* 64-bit */
241 case X86_SIZE_sd: /* SSE/AVX scalar double precision */
245 case X86_SIZE_w: /* 16-bit */
249 case X86_SIZE_y: /* 32/64-bit, based on operand size */
250 *ot = s->dflag == MO_16 ? MO_32 : s->dflag;
253 case X86_SIZE_z: /* 16-bit for 16-bit operand size, else 32-bit */
254 *ot = s->dflag == MO_16 ? MO_16 : MO_32;
257 case X86_SIZE_dq: /* SSE/AVX 128-bit */
258 if (e->special == X86_SPECIAL_MMX &&
259 !(s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ))) {
263 if (s->vex_l && e->s0 != X86_SIZE_qq && e->s1 != X86_SIZE_qq) {
269 case X86_SIZE_qq: /* AVX 256-bit */
276 case X86_SIZE_x: /* 128/256-bit, based on operand size */
277 if (e->special == X86_SPECIAL_MMX &&
278 !(s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ))) {
283 case X86_SIZE_ps: /* SSE/AVX packed single precision */
284 case X86_SIZE_pd: /* SSE/AVX packed double precision */
285 *ot = s->vex_l ? MO_256 : MO_128;
288 case X86_SIZE_d64: /* Default to 64-bit in 64-bit mode */
289 *ot = CODE64(s) && s->dflag == MO_32 ? MO_64 : s->dflag;
292 case X86_SIZE_f64: /* Ignore size override prefix in 64-bit mode */
293 *ot = CODE64(s) ? MO_64 : s->dflag;
302 static bool decode_op(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode,
303 X86DecodedOp *op, X86OpType type, int b)
308 case X86_TYPE_None: /* Implicit or absent */
309 case X86_TYPE_A: /* Implicit */
310 case X86_TYPE_F: /* EFLAGS/RFLAGS */
313 case X86_TYPE_B: /* VEX.vvvv selects a GPR */
314 op->unit = X86_OP_INT;
318 case X86_TYPE_C: /* REG in the modrm byte selects a control register */
319 op->unit = X86_OP_CR;
322 case X86_TYPE_D: /* REG in the modrm byte selects a debug register */
323 op->unit = X86_OP_DR;
326 case X86_TYPE_G: /* REG in the modrm byte selects a GPR */
327 op->unit = X86_OP_INT;
330 case X86_TYPE_S: /* reg selects a segment register */
331 op->unit = X86_OP_SEG;
335 op->unit = X86_OP_MMX;
338 case X86_TYPE_V: /* reg in the modrm byte selects an XMM/YMM register */
339 if (decode->e.special == X86_SPECIAL_MMX &&
340 !(s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ))) {
341 op->unit = X86_OP_MMX;
343 op->unit = X86_OP_SSE;
346 op->n = ((get_modrm(s, env) >> 3) & 7) | REX_R(s);
349 case X86_TYPE_E: /* ALU modrm operand */
350 op->unit = X86_OP_INT;
353 case X86_TYPE_Q: /* MMX modrm operand */
354 op->unit = X86_OP_MMX;
357 case X86_TYPE_W: /* XMM/YMM modrm operand */
358 if (decode->e.special == X86_SPECIAL_MMX &&
359 !(s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ))) {
360 op->unit = X86_OP_MMX;
362 op->unit = X86_OP_SSE;
366 case X86_TYPE_N: /* R/M in the modrm byte selects an MMX register */
367 op->unit = X86_OP_MMX;
370 case X86_TYPE_U: /* R/M in the modrm byte selects an XMM/YMM register */
371 if (decode->e.special == X86_SPECIAL_MMX &&
372 !(s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ))) {
373 op->unit = X86_OP_MMX;
375 op->unit = X86_OP_SSE;
379 case X86_TYPE_R: /* R/M in the modrm byte selects a register */
380 op->unit = X86_OP_INT;
382 modrm = get_modrm(s, env);
383 if ((modrm >> 6) != 3) {
388 case X86_TYPE_M: /* modrm byte selects a memory operand */
389 modrm = get_modrm(s, env);
390 if ((modrm >> 6) == 3) {
394 decode_modrm(s, env, decode, op, type);
397 case X86_TYPE_O: /* Absolute address encoded in the instruction */
398 op->unit = X86_OP_INT;
401 decode->mem = (AddressParts) {
405 .disp = insn_get_addr(env, s, s->aflag)
409 case X86_TYPE_H: /* For AVX, VEX.vvvv selects an XMM/YMM register */
410 if ((s->prefix & PREFIX_VEX)) {
411 op->unit = X86_OP_SSE;
415 if (op == &decode->op[0]) {
416 /* shifts place the destination in VEX.vvvv, use modrm */
417 return decode_op(s, env, decode, op, decode->e.op1, b);
419 return decode_op(s, env, decode, op, decode->e.op0, b);
422 case X86_TYPE_I: /* Immediate */
423 op->unit = X86_OP_IMM;
424 decode->immediate = insn_get_signed(env, s, op->ot);
427 case X86_TYPE_J: /* Relative offset for a jump */
428 op->unit = X86_OP_IMM;
429 decode->immediate = insn_get_signed(env, s, op->ot);
430 decode->immediate += s->pc - s->cs_base;
431 if (s->dflag == MO_16) {
432 decode->immediate &= 0xffff;
433 } else if (!CODE64(s)) {
434 decode->immediate &= 0xffffffffu;
438 case X86_TYPE_L: /* The upper 4 bits of the immediate select a 128-bit register */
439 op->n = insn_get(env, s, op->ot) >> 4;
442 case X86_TYPE_X: /* string source */
444 decode->mem = (AddressParts) {
451 case X86_TYPE_Y: /* string destination */
453 decode->mem = (AddressParts) {
464 case X86_TYPE_LoBits:
465 op->n = (b & 7) | REX_B(s);
466 op->unit = X86_OP_INT;
469 case X86_TYPE_0 ... X86_TYPE_7:
470 op->n = type - X86_TYPE_0;
471 op->unit = X86_OP_INT;
474 case X86_TYPE_ES ... X86_TYPE_GS:
475 op->n = type - X86_TYPE_ES;
476 op->unit = X86_OP_SEG;
483 static bool decode_insn(DisasContext *s, CPUX86State *env, X86DecodeFunc decode_func,
484 X86DecodedInsn *decode)
486 X86OpEntry *e = &decode->e;
488 decode_func(s, env, e, &decode->b);
489 while (e->is_decode) {
490 e->is_decode = false;
491 e->decode(s, env, e, &decode->b);
494 /* First compute size of operands in order to initialize s->rip_offset. */
495 if (e->op0 != X86_TYPE_None) {
496 if (!decode_op_size(s, e, e->s0, &decode->op[0].ot)) {
499 if (e->op0 == X86_TYPE_I) {
500 s->rip_offset += 1 << decode->op[0].ot;
503 if (e->op1 != X86_TYPE_None) {
504 if (!decode_op_size(s, e, e->s1, &decode->op[1].ot)) {
507 if (e->op1 == X86_TYPE_I) {
508 s->rip_offset += 1 << decode->op[1].ot;
511 if (e->op2 != X86_TYPE_None) {
512 if (!decode_op_size(s, e, e->s2, &decode->op[2].ot)) {
515 if (e->op2 == X86_TYPE_I) {
516 s->rip_offset += 1 << decode->op[2].ot;
519 if (e->op3 != X86_TYPE_None) {
520 assert(e->op3 == X86_TYPE_I && e->s3 == X86_SIZE_b);
524 if (e->op0 != X86_TYPE_None &&
525 !decode_op(s, env, decode, &decode->op[0], e->op0, decode->b)) {
529 if (e->op1 != X86_TYPE_None &&
530 !decode_op(s, env, decode, &decode->op[1], e->op1, decode->b)) {
534 if (e->op2 != X86_TYPE_None &&
535 !decode_op(s, env, decode, &decode->op[2], e->op2, decode->b)) {
539 if (e->op3 != X86_TYPE_None) {
540 decode->immediate = insn_get_signed(env, s, MO_8);
546 static bool has_cpuid_feature(DisasContext *s, X86CPUIDFeature cpuid)
552 return (s->cpuid_ext_features & CPUID_EXT_MOVBE);
553 case X86_FEAT_PCLMULQDQ:
554 return (s->cpuid_ext_features & CPUID_EXT_PCLMULQDQ);
556 return (s->cpuid_ext_features & CPUID_SSE);
558 return (s->cpuid_ext_features & CPUID_SSE2);
560 return (s->cpuid_ext_features & CPUID_EXT_SSE3);
562 return (s->cpuid_ext_features & CPUID_EXT_SSSE3);
564 return (s->cpuid_ext_features & CPUID_EXT_SSE41);
566 return (s->cpuid_ext_features & CPUID_EXT_SSE42);
568 if (!(s->cpuid_ext_features & CPUID_EXT_AES)) {
570 } else if (!(s->prefix & PREFIX_VEX)) {
572 } else if (!(s->cpuid_ext_features & CPUID_EXT_AVX)) {
575 return !s->vex_l || (s->cpuid_7_0_ecx_features & CPUID_7_0_ECX_VAES);
579 return (s->cpuid_ext_features & CPUID_EXT_AVX);
582 return (s->cpuid_ext3_features & CPUID_EXT3_SSE4A);
585 return (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_ADX);
587 return (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI1);
589 return (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_BMI2);
591 return (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_AVX2);
593 g_assert_not_reached();
596 static bool validate_vex(DisasContext *s, X86DecodedInsn *decode)
598 X86OpEntry *e = &decode->e;
600 switch (e->vex_special) {
601 case X86_VEX_REPScalar:
603 * Instructions which differ between 00/66 and F2/F3 in the
604 * exception classification and the size of the memory operand.
606 assert(e->vex_class == 1 || e->vex_class == 2);
607 if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) {
612 assert(decode->e.s2 == X86_SIZE_x);
613 if (decode->op[2].has_ea) {
614 decode->op[2].ot = s->prefix & PREFIX_REPZ ? MO_32 : MO_64;
619 case X86_VEX_SSEUnaligned:
620 /* handled in sse_needs_alignment. */
623 case X86_VEX_AVX2_256:
624 if ((s->prefix & PREFIX_VEX) && s->vex_l && !has_cpuid_feature(s, X86_FEAT_AVX2)) {
629 /* TODO: instructions that require VEX.W=0 (Table 2-16) */
631 switch (e->vex_class) {
633 if (s->prefix & PREFIX_VEX) {
643 if (s->prefix & PREFIX_VEX) {
644 if (!(s->flags & HF_AVX_EN_MASK)) {
648 if (!(s->flags & HF_OSFXSR_MASK)) {
654 /* Must have a VSIB byte and no address prefix. */
655 assert(s->has_modrm);
656 if ((s->modrm & 7) != 4 || s->aflag == MO_16) {
660 /* Check no overlap between registers. */
661 if (!decode->op[0].has_ea &&
662 (decode->op[0].n == decode->mem.index || decode->op[0].n == decode->op[1].n)) {
665 assert(!decode->op[1].has_ea);
666 if (decode->op[1].n == decode->mem.index) {
669 if (!decode->op[2].has_ea &&
670 (decode->op[2].n == decode->mem.index || decode->op[2].n == decode->op[1].n)) {
676 if (!(s->prefix & PREFIX_VEX)) {
679 if (!(s->flags & HF_AVX_EN_MASK)) {
684 if (!(s->prefix & PREFIX_VEX)) {
688 if (!(s->flags & HF_AVX_EN_MASK)) {
693 if (!(s->prefix & PREFIX_VEX)) {
699 /* All integer instructions use VEX.vvvv, so exit. */
704 e->op0 != X86_TYPE_H && e->op0 != X86_TYPE_B &&
705 e->op1 != X86_TYPE_H && e->op1 != X86_TYPE_B &&
706 e->op2 != X86_TYPE_H && e->op2 != X86_TYPE_B) {
710 if (s->flags & HF_TS_MASK) {
713 if (s->flags & HF_EM_MASK) {
722 gen_illegal_opcode(s);
726 static void decode_temp_free(X86DecodedOp *op)
729 tcg_temp_free_ptr(op->v_ptr);
733 static void decode_temps_free(X86DecodedInsn *decode)
735 decode_temp_free(&decode->op[0]);
736 decode_temp_free(&decode->op[1]);
737 decode_temp_free(&decode->op[2]);
741 * Convert one instruction. s->base.is_jmp is set if the translation must
744 static void disas_insn_new(DisasContext *s, CPUState *cpu, int b)
746 CPUX86State *env = cpu->env_ptr;
748 X86DecodedInsn decode;
749 X86DecodeFunc decode_func = decode_root;
751 #ifdef CONFIG_USER_ONLY
752 if (limit) { --limit; }
754 s->has_modrm = false;
760 b = x86_ldub_code(env, s);
762 /* Collect prefixes. */
765 s->prefix |= PREFIX_REPZ;
766 s->prefix &= ~PREFIX_REPNZ;
769 s->prefix |= PREFIX_REPNZ;
770 s->prefix &= ~PREFIX_REPZ;
773 s->prefix |= PREFIX_LOCK;
794 s->prefix |= PREFIX_DATA;
797 s->prefix |= PREFIX_ADR;
803 s->prefix |= PREFIX_REX;
804 s->vex_w = (b >> 3) & 1;
805 s->rex_r = (b & 0x4) << 1;
806 s->rex_x = (b & 0x2) << 2;
807 s->rex_b = (b & 0x1) << 3;
812 case 0xc5: /* 2-byte VEX */
813 case 0xc4: /* 3-byte VEX */
815 * VEX prefixes cannot be used except in 32-bit mode.
816 * Otherwise the instruction is LES or LDS.
818 if (CODE32(s) && !VM86(s)) {
819 static const int pp_prefix[4] = {
820 0, PREFIX_DATA, PREFIX_REPZ, PREFIX_REPNZ
822 int vex3, vex2 = x86_ldub_code(env, s);
824 if (!CODE64(s) && (vex2 & 0xc0) != 0xc0) {
826 * 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b,
827 * otherwise the instruction is LES or LDS.
829 s->pc--; /* rewind the advance_pc() x86_ldub_code() did */
833 /* 4.1.1-4.1.3: No preceding lock, 66, f2, f3, or rex prefixes. */
834 if (s->prefix & (PREFIX_REPZ | PREFIX_REPNZ
835 | PREFIX_LOCK | PREFIX_DATA | PREFIX_REX)) {
839 s->rex_r = (~vex2 >> 4) & 8;
842 /* 2-byte VEX prefix: RVVVVlpp, implied 0f leading opcode byte */
844 decode_func = decode_0F;
846 /* 3-byte VEX prefix: RXBmmmmm wVVVVlpp */
847 vex3 = x86_ldub_code(env, s);
849 s->rex_x = (~vex2 >> 3) & 8;
850 s->rex_b = (~vex2 >> 2) & 8;
852 s->vex_w = (vex3 >> 7) & 1;
853 switch (vex2 & 0x1f) {
854 case 0x01: /* Implied 0f leading opcode bytes. */
855 decode_func = decode_0F;
857 case 0x02: /* Implied 0f 38 leading opcode bytes. */
858 decode_func = decode_0F38;
860 case 0x03: /* Implied 0f 3a leading opcode bytes. */
861 decode_func = decode_0F3A;
863 default: /* Reserved for future use. */
867 s->vex_v = (~vex3 >> 3) & 0xf;
868 s->vex_l = (vex3 >> 2) & 1;
869 s->prefix |= pp_prefix[vex3 & 3] | PREFIX_VEX;
875 decode_func = do_decode_0F;
880 /* Post-process prefixes. */
883 * In 64-bit mode, the default data size is 32-bit. Select 64-bit
884 * data with rex_w, and 16-bit data with 0x66; rex_w takes precedence
885 * over 0x66 if both are present.
887 s->dflag = (REX_W(s) ? MO_64 : s->prefix & PREFIX_DATA ? MO_16 : MO_32);
888 /* In 64-bit mode, 0x67 selects 32-bit addressing. */
889 s->aflag = (s->prefix & PREFIX_ADR ? MO_32 : MO_64);
891 /* In 16/32-bit mode, 0x66 selects the opposite data size. */
892 if (CODE32(s) ^ ((s->prefix & PREFIX_DATA) != 0)) {
897 /* In 16/32-bit mode, 0x67 selects the opposite addressing. */
898 if (CODE32(s) ^ ((s->prefix & PREFIX_ADR) != 0)) {
905 memset(&decode, 0, sizeof(decode));
907 if (!decode_insn(s, env, decode_func, &decode)) {
914 if (!has_cpuid_feature(s, decode.e.cpuid)) {
918 switch (decode.e.special) {
919 case X86_SPECIAL_None:
922 case X86_SPECIAL_Locked:
923 if (decode.op[0].has_ea) {
924 s->prefix |= PREFIX_LOCK;
928 case X86_SPECIAL_ProtMode:
929 if (!PE(s) || VM86(s)) {
934 case X86_SPECIAL_i64:
939 case X86_SPECIAL_o64:
945 case X86_SPECIAL_ZExtOp0:
946 assert(decode.op[0].unit == X86_OP_INT);
947 if (!decode.op[0].has_ea) {
948 decode.op[0].ot = MO_32;
952 case X86_SPECIAL_ZExtOp2:
953 assert(decode.op[2].unit == X86_OP_INT);
954 if (!decode.op[2].has_ea) {
955 decode.op[2].ot = MO_32;
959 case X86_SPECIAL_MMX:
960 if (!(s->prefix & (PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA))) {
961 gen_helper_enter_mmx(cpu_env);
966 if (!validate_vex(s, &decode)) {
969 if (decode.op[0].has_ea || decode.op[1].has_ea || decode.op[2].has_ea) {
970 gen_load_ea(s, &decode.mem, decode.e.vex_class == 12);
972 if (s->prefix & PREFIX_LOCK) {
973 if (decode.op[0].unit != X86_OP_INT || !decode.op[0].has_ea) {
976 gen_load(s, &decode, 2, s->T1);
977 decode.e.gen(s, env, &decode);
979 if (decode.op[0].unit == X86_OP_MMX) {
980 compute_mmx_offset(&decode.op[0]);
981 } else if (decode.op[0].unit == X86_OP_SSE) {
982 compute_xmm_offset(&decode.op[0]);
984 gen_load(s, &decode, 1, s->T0);
985 gen_load(s, &decode, 2, s->T1);
986 decode.e.gen(s, env, &decode);
987 gen_writeback(s, &decode, 0, s->T0);
989 decode_temps_free(&decode);
992 gen_illegal_opcode(s);
995 gen_unknown_opcode(env, s);