2 * New-style TCG opcode generator for i386 instructions
4 * Copyright (c) 2022 Red Hat, Inc.
6 * Author: Paolo Bonzini <pbonzini@redhat.com>
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 static void gen_NM_exception(DisasContext *s)
24 gen_exception(s, EXCP07_PREX);
27 static void gen_illegal(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
29 gen_illegal_opcode(s);
32 static void gen_load_ea(DisasContext *s, AddressParts *mem, bool is_vsib)
34 TCGv ea = gen_lea_modrm_1(s, *mem, is_vsib);
35 gen_lea_v_seg(s, s->aflag, ea, mem->def_seg, s->override);
38 static inline int mmx_offset(MemOp ot)
42 return offsetof(MMXReg, MMX_B(0));
44 return offsetof(MMXReg, MMX_W(0));
46 return offsetof(MMXReg, MMX_L(0));
48 return offsetof(MMXReg, MMX_Q(0));
50 g_assert_not_reached();
54 static inline int xmm_offset(MemOp ot)
58 return offsetof(ZMMReg, ZMM_B(0));
60 return offsetof(ZMMReg, ZMM_W(0));
62 return offsetof(ZMMReg, ZMM_L(0));
64 return offsetof(ZMMReg, ZMM_Q(0));
66 return offsetof(ZMMReg, ZMM_X(0));
68 return offsetof(ZMMReg, ZMM_Y(0));
70 g_assert_not_reached();
74 static void compute_mmx_offset(X86DecodedOp *op)
77 op->offset = offsetof(CPUX86State, fpregs[op->n].mmx) + mmx_offset(op->ot);
79 op->offset = offsetof(CPUX86State, mmx_t0) + mmx_offset(op->ot);
83 static void compute_xmm_offset(X86DecodedOp *op)
86 op->offset = ZMM_OFFSET(op->n) + xmm_offset(op->ot);
88 op->offset = offsetof(CPUX86State, xmm_t0) + xmm_offset(op->ot);
92 static void gen_load_sse(DisasContext *s, TCGv temp, MemOp ot, int dest_ofs, bool aligned)
96 gen_op_ld_v(s, MO_8, temp, s->A0);
97 tcg_gen_st8_tl(temp, cpu_env, dest_ofs);
100 gen_op_ld_v(s, MO_16, temp, s->A0);
101 tcg_gen_st16_tl(temp, cpu_env, dest_ofs);
104 gen_op_ld_v(s, MO_32, temp, s->A0);
105 tcg_gen_st32_tl(temp, cpu_env, dest_ofs);
108 gen_ldq_env_A0(s, dest_ofs);
111 gen_ldo_env_A0(s, dest_ofs, aligned);
114 gen_ldy_env_A0(s, dest_ofs, aligned);
117 g_assert_not_reached();
121 static bool sse_needs_alignment(DisasContext *s, X86DecodedInsn *decode, MemOp ot)
123 switch (decode->e.vex_class) {
126 if ((s->prefix & PREFIX_VEX) ||
127 decode->e.vex_special == X86_VEX_SSEUnaligned) {
128 /* MOST legacy SSE instructions require aligned memory operands, but not all. */
140 static void gen_load(DisasContext *s, X86DecodedInsn *decode, int opn, TCGv v)
142 X86DecodedOp *op = &decode->op[opn];
148 tcg_gen_ld32u_tl(v, cpu_env,
149 offsetof(CPUX86State,segs[op->n].selector));
152 tcg_gen_ld_tl(v, cpu_env, offsetof(CPUX86State, cr[op->n]));
155 tcg_gen_ld_tl(v, cpu_env, offsetof(CPUX86State, dr[op->n]));
159 gen_op_ld_v(s, op->ot, v, s->A0);
161 gen_op_mov_v_reg(s, op->ot, v, op->n);
165 tcg_gen_movi_tl(v, decode->immediate);
169 compute_mmx_offset(op);
173 compute_xmm_offset(op);
176 bool aligned = sse_needs_alignment(s, decode, op->ot);
177 gen_load_sse(s, v, op->ot, op->offset, aligned);
182 g_assert_not_reached();
186 static void gen_writeback(DisasContext *s, X86DecodedInsn *decode, int opn, TCGv v)
188 X86DecodedOp *op = &decode->op[opn];
193 /* Note that gen_movl_seg_T0 takes care of interrupt shadow and TF. */
194 gen_movl_seg_T0(s, op->n);
198 gen_op_st_v(s, op->ot, v, s->A0);
200 gen_op_mov_reg_v(s, op->ot, op->n, v);
206 if ((s->prefix & PREFIX_VEX) && op->ot == MO_128) {
207 tcg_gen_gvec_dup_imm(MO_64,
208 offsetof(CPUX86State, xmm_regs[op->n].ZMM_X(1)),
215 g_assert_not_reached();