2 * New-style TCG opcode generator for i386 instructions
4 * Copyright (c) 2022 Red Hat, Inc.
6 * Author: Paolo Bonzini <pbonzini@redhat.com>
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 static void gen_NM_exception(DisasContext *s)
24 gen_exception(s, EXCP07_PREX);
27 static void gen_illegal(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
29 gen_illegal_opcode(s);
32 static void gen_load_ea(DisasContext *s, AddressParts *mem, bool is_vsib)
34 TCGv ea = gen_lea_modrm_1(s, *mem, is_vsib);
35 gen_lea_v_seg(s, s->aflag, ea, mem->def_seg, s->override);
38 static inline int mmx_offset(MemOp ot)
42 return offsetof(MMXReg, MMX_B(0));
44 return offsetof(MMXReg, MMX_W(0));
46 return offsetof(MMXReg, MMX_L(0));
48 return offsetof(MMXReg, MMX_Q(0));
50 g_assert_not_reached();
54 static inline int xmm_offset(MemOp ot)
58 return offsetof(ZMMReg, ZMM_B(0));
60 return offsetof(ZMMReg, ZMM_W(0));
62 return offsetof(ZMMReg, ZMM_L(0));
64 return offsetof(ZMMReg, ZMM_Q(0));
66 return offsetof(ZMMReg, ZMM_X(0));
68 return offsetof(ZMMReg, ZMM_Y(0));
70 g_assert_not_reached();
74 static int vector_reg_offset(X86DecodedOp *op)
76 assert(op->unit == X86_OP_MMX || op->unit == X86_OP_SSE);
78 if (op->unit == X86_OP_MMX) {
79 return op->offset - mmx_offset(op->ot);
81 return op->offset - xmm_offset(op->ot);
85 static int vector_elem_offset(X86DecodedOp *op, MemOp ot, int n)
87 int base_ofs = vector_reg_offset(op);
90 if (op->unit == X86_OP_MMX) {
91 return base_ofs + offsetof(MMXReg, MMX_B(n));
93 return base_ofs + offsetof(ZMMReg, ZMM_B(n));
96 if (op->unit == X86_OP_MMX) {
97 return base_ofs + offsetof(MMXReg, MMX_W(n));
99 return base_ofs + offsetof(ZMMReg, ZMM_W(n));
102 if (op->unit == X86_OP_MMX) {
103 return base_ofs + offsetof(MMXReg, MMX_L(n));
105 return base_ofs + offsetof(ZMMReg, ZMM_L(n));
108 if (op->unit == X86_OP_MMX) {
111 return base_ofs + offsetof(ZMMReg, ZMM_Q(n));
114 assert(op->unit == X86_OP_SSE);
115 return base_ofs + offsetof(ZMMReg, ZMM_X(n));
117 assert(op->unit == X86_OP_SSE);
118 return base_ofs + offsetof(ZMMReg, ZMM_Y(n));
120 g_assert_not_reached();
124 static void compute_mmx_offset(X86DecodedOp *op)
127 op->offset = offsetof(CPUX86State, fpregs[op->n].mmx) + mmx_offset(op->ot);
129 op->offset = offsetof(CPUX86State, mmx_t0) + mmx_offset(op->ot);
133 static void compute_xmm_offset(X86DecodedOp *op)
136 op->offset = ZMM_OFFSET(op->n) + xmm_offset(op->ot);
138 op->offset = offsetof(CPUX86State, xmm_t0) + xmm_offset(op->ot);
142 static void gen_load_sse(DisasContext *s, TCGv temp, MemOp ot, int dest_ofs, bool aligned)
146 gen_op_ld_v(s, MO_8, temp, s->A0);
147 tcg_gen_st8_tl(temp, cpu_env, dest_ofs);
150 gen_op_ld_v(s, MO_16, temp, s->A0);
151 tcg_gen_st16_tl(temp, cpu_env, dest_ofs);
154 gen_op_ld_v(s, MO_32, temp, s->A0);
155 tcg_gen_st32_tl(temp, cpu_env, dest_ofs);
158 gen_ldq_env_A0(s, dest_ofs);
161 gen_ldo_env_A0(s, dest_ofs, aligned);
164 gen_ldy_env_A0(s, dest_ofs, aligned);
167 g_assert_not_reached();
171 static bool sse_needs_alignment(DisasContext *s, X86DecodedInsn *decode, MemOp ot)
173 switch (decode->e.vex_class) {
176 if ((s->prefix & PREFIX_VEX) ||
177 decode->e.vex_special == X86_VEX_SSEUnaligned) {
178 /* MOST legacy SSE instructions require aligned memory operands, but not all. */
190 static void gen_load(DisasContext *s, X86DecodedInsn *decode, int opn, TCGv v)
192 X86DecodedOp *op = &decode->op[opn];
198 tcg_gen_ld32u_tl(v, cpu_env,
199 offsetof(CPUX86State,segs[op->n].selector));
202 tcg_gen_ld_tl(v, cpu_env, offsetof(CPUX86State, cr[op->n]));
205 tcg_gen_ld_tl(v, cpu_env, offsetof(CPUX86State, dr[op->n]));
209 gen_op_ld_v(s, op->ot, v, s->A0);
211 gen_op_mov_v_reg(s, op->ot, v, op->n);
215 tcg_gen_movi_tl(v, decode->immediate);
219 compute_mmx_offset(op);
223 compute_xmm_offset(op);
226 bool aligned = sse_needs_alignment(s, decode, op->ot);
227 gen_load_sse(s, v, op->ot, op->offset, aligned);
232 g_assert_not_reached();
236 static TCGv_ptr op_ptr(X86DecodedInsn *decode, int opn)
238 X86DecodedOp *op = &decode->op[opn];
242 op->v_ptr = tcg_temp_new_ptr();
244 /* The temporary points to the MMXReg or ZMMReg. */
245 tcg_gen_addi_ptr(op->v_ptr, cpu_env, vector_reg_offset(op));
249 #define OP_PTR0 op_ptr(decode, 0)
250 #define OP_PTR1 op_ptr(decode, 1)
251 #define OP_PTR2 op_ptr(decode, 2)
253 static void gen_writeback(DisasContext *s, X86DecodedInsn *decode, int opn, TCGv v)
255 X86DecodedOp *op = &decode->op[opn];
260 /* Note that gen_movl_seg_T0 takes care of interrupt shadow and TF. */
261 gen_movl_seg_T0(s, op->n);
265 gen_op_st_v(s, op->ot, v, s->A0);
267 gen_op_mov_reg_v(s, op->ot, op->n, v);
273 if ((s->prefix & PREFIX_VEX) && op->ot == MO_128) {
274 tcg_gen_gvec_dup_imm(MO_64,
275 offsetof(CPUX86State, xmm_regs[op->n].ZMM_X(1)),
282 g_assert_not_reached();
286 static inline int vector_len(DisasContext *s, X86DecodedInsn *decode)
288 if (decode->e.special == X86_SPECIAL_MMX &&
289 !(s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ))) {
292 return s->vex_l ? 32 : 16;
295 static void gen_store_sse(DisasContext *s, X86DecodedInsn *decode, int src_ofs)
297 MemOp ot = decode->op[0].ot;
298 int vec_len = vector_len(s, decode);
299 bool aligned = sse_needs_alignment(s, decode, ot);
301 if (!decode->op[0].has_ea) {
302 tcg_gen_gvec_mov(MO_64, decode->op[0].offset, src_ofs, vec_len, vec_len);
308 gen_stq_env_A0(s, src_ofs);
311 gen_sto_env_A0(s, src_ofs, aligned);
314 gen_sty_env_A0(s, src_ofs, aligned);
317 g_assert_not_reached();
321 #define BINARY_INT_GVEC(uname, func, ...) \
322 static void gen_##uname(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) \
324 int vec_len = vector_len(s, decode); \
327 decode->op[0].offset, decode->op[1].offset, \
328 decode->op[2].offset, vec_len, vec_len); \
331 BINARY_INT_GVEC(PCMPGTB, tcg_gen_gvec_cmp, TCG_COND_GT, MO_8)
332 BINARY_INT_GVEC(PCMPGTW, tcg_gen_gvec_cmp, TCG_COND_GT, MO_16)
333 BINARY_INT_GVEC(PCMPGTD, tcg_gen_gvec_cmp, TCG_COND_GT, MO_32)
337 * 00 = p* Pq, Qq (if mmx not NULL; no VEX)
338 * 66 = vp* Vx, Hx, Wx
340 * These are really the same encoding, because 1) V is the same as P when VEX.V
341 * is not present 2) P and Q are the same as H and W apart from MM/XMM
343 static inline void gen_binary_int_sse(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode,
344 SSEFunc_0_eppp mmx, SSEFunc_0_eppp xmm, SSEFunc_0_eppp ymm)
346 assert(!!mmx == !!(decode->e.special == X86_SPECIAL_MMX));
348 if (mmx && (s->prefix & PREFIX_VEX) && !(s->prefix & PREFIX_DATA)) {
349 /* VEX encoding is not applicable to MMX instructions. */
350 gen_illegal_opcode(s);
353 if (!(s->prefix & PREFIX_DATA)) {
354 mmx(cpu_env, OP_PTR0, OP_PTR1, OP_PTR2);
355 } else if (!s->vex_l) {
356 xmm(cpu_env, OP_PTR0, OP_PTR1, OP_PTR2);
358 ymm(cpu_env, OP_PTR0, OP_PTR1, OP_PTR2);
363 #define BINARY_INT_MMX(uname, lname) \
364 static void gen_##uname(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) \
366 gen_binary_int_sse(s, env, decode, \
367 gen_helper_##lname##_mmx, \
368 gen_helper_##lname##_xmm, \
369 gen_helper_##lname##_ymm); \
371 BINARY_INT_MMX(PUNPCKLBW, punpcklbw)
372 BINARY_INT_MMX(PUNPCKLWD, punpcklwd)
373 BINARY_INT_MMX(PUNPCKLDQ, punpckldq)
374 BINARY_INT_MMX(PACKSSWB, packsswb)
375 BINARY_INT_MMX(PACKUSWB, packuswb)
376 BINARY_INT_MMX(PUNPCKHBW, punpckhbw)
377 BINARY_INT_MMX(PUNPCKHWD, punpckhwd)
378 BINARY_INT_MMX(PUNPCKHDQ, punpckhdq)
379 BINARY_INT_MMX(PACKSSDW, packssdw)
381 /* Instructions with no MMX equivalent. */
382 #define BINARY_INT_SSE(uname, lname) \
383 static void gen_##uname(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) \
385 gen_binary_int_sse(s, env, decode, \
387 gen_helper_##lname##_xmm, \
388 gen_helper_##lname##_ymm); \
391 BINARY_INT_SSE(PUNPCKLQDQ, punpcklqdq)
392 BINARY_INT_SSE(PUNPCKHQDQ, punpckhqdq)
394 static void gen_ADCOX(DisasContext *s, CPUX86State *env, MemOp ot, int cc_op)
396 TCGv carry_in = NULL;
397 TCGv carry_out = (cc_op == CC_OP_ADCX ? cpu_cc_dst : cpu_cc_src2);
400 if (cc_op == s->cc_op || s->cc_op == CC_OP_ADCOX) {
401 /* Re-use the carry-out from a previous round. */
402 carry_in = carry_out;
404 } else if (s->cc_op == CC_OP_ADCX || s->cc_op == CC_OP_ADOX) {
405 /* Merge with the carry-out from the opposite instruction. */
409 /* If we don't have a carry-in, get it out of EFLAGS. */
411 if (s->cc_op != CC_OP_ADCX && s->cc_op != CC_OP_ADOX) {
412 gen_compute_eflags(s);
415 tcg_gen_extract_tl(carry_in, cpu_cc_src,
416 ctz32(cc_op == CC_OP_ADCX ? CC_C : CC_O), 1);
422 /* If TL is 64-bit just do everything in 64-bit arithmetic. */
423 tcg_gen_add_i64(s->T0, s->T0, s->T1);
424 tcg_gen_add_i64(s->T0, s->T0, carry_in);
425 tcg_gen_shri_i64(carry_out, s->T0, 32);
429 zero = tcg_constant_tl(0);
430 tcg_gen_add2_tl(s->T0, carry_out, s->T0, zero, carry_in, zero);
431 tcg_gen_add2_tl(s->T0, carry_out, s->T0, carry_out, s->T1, zero);
437 static void gen_ADCX(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
439 gen_ADCOX(s, env, decode->op[0].ot, CC_OP_ADCX);
442 static void gen_ADOX(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
444 gen_ADCOX(s, env, decode->op[0].ot, CC_OP_ADOX);
447 static void gen_ANDN(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
449 MemOp ot = decode->op[0].ot;
451 tcg_gen_andc_tl(s->T0, s->T1, s->T0);
452 gen_op_update1_cc(s);
453 set_cc_op(s, CC_OP_LOGICB + ot);
456 static void gen_BEXTR(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
458 MemOp ot = decode->op[0].ot;
462 * Extract START, and shift the operand.
463 * Shifts larger than operand size get zeros.
465 tcg_gen_ext8u_tl(s->A0, s->T1);
466 tcg_gen_shr_tl(s->T0, s->T0, s->A0);
468 bound = tcg_constant_tl(ot == MO_64 ? 63 : 31);
469 zero = tcg_constant_tl(0);
470 tcg_gen_movcond_tl(TCG_COND_LEU, s->T0, s->A0, bound, s->T0, zero);
473 * Extract the LEN into a mask. Lengths larger than
474 * operand size get all ones.
476 tcg_gen_extract_tl(s->A0, s->T1, 8, 8);
477 tcg_gen_movcond_tl(TCG_COND_LEU, s->A0, s->A0, bound, s->A0, bound);
479 tcg_gen_movi_tl(s->T1, 1);
480 tcg_gen_shl_tl(s->T1, s->T1, s->A0);
481 tcg_gen_subi_tl(s->T1, s->T1, 1);
482 tcg_gen_and_tl(s->T0, s->T0, s->T1);
484 gen_op_update1_cc(s);
485 set_cc_op(s, CC_OP_LOGICB + ot);
488 static void gen_BLSI(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
490 MemOp ot = decode->op[0].ot;
492 tcg_gen_neg_tl(s->T1, s->T0);
493 tcg_gen_and_tl(s->T0, s->T0, s->T1);
494 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
495 set_cc_op(s, CC_OP_BMILGB + ot);
498 static void gen_BLSMSK(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
500 MemOp ot = decode->op[0].ot;
502 tcg_gen_subi_tl(s->T1, s->T0, 1);
503 tcg_gen_xor_tl(s->T0, s->T0, s->T1);
504 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
505 set_cc_op(s, CC_OP_BMILGB + ot);
508 static void gen_BLSR(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
510 MemOp ot = decode->op[0].ot;
512 tcg_gen_subi_tl(s->T1, s->T0, 1);
513 tcg_gen_and_tl(s->T0, s->T0, s->T1);
514 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
515 set_cc_op(s, CC_OP_BMILGB + ot);
518 static void gen_BZHI(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
520 MemOp ot = decode->op[0].ot;
523 tcg_gen_ext8u_tl(s->T1, cpu_regs[s->vex_v]);
524 bound = tcg_constant_tl(ot == MO_64 ? 63 : 31);
527 * Note that since we're using BMILG (in order to get O
528 * cleared) we need to store the inverse into C.
530 tcg_gen_setcond_tl(TCG_COND_LT, cpu_cc_src, s->T1, bound);
531 tcg_gen_movcond_tl(TCG_COND_GT, s->T1, s->T1, bound, bound, s->T1);
533 tcg_gen_movi_tl(s->A0, -1);
534 tcg_gen_shl_tl(s->A0, s->A0, s->T1);
535 tcg_gen_andc_tl(s->T0, s->T0, s->A0);
537 gen_op_update1_cc(s);
538 set_cc_op(s, CC_OP_BMILGB + ot);
541 static void gen_CRC32(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
543 MemOp ot = decode->op[2].ot;
545 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
546 gen_helper_crc32(s->T0, s->tmp2_i32, s->T1, tcg_constant_i32(8 << ot));
549 static void gen_MOVBE(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
551 MemOp ot = decode->op[0].ot;
553 /* M operand type does not load/store */
554 if (decode->e.op0 == X86_TYPE_M) {
555 tcg_gen_qemu_st_tl(s->T0, s->A0, s->mem_index, ot | MO_BE);
557 tcg_gen_qemu_ld_tl(s->T0, s->A0, s->mem_index, ot | MO_BE);
561 static void gen_MOVD_to(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
563 MemOp ot = decode->op[2].ot;
564 int vec_len = vector_len(s, decode);
565 int lo_ofs = vector_elem_offset(&decode->op[0], ot, 0);
567 tcg_gen_gvec_dup_imm(MO_64, decode->op[0].offset, vec_len, vec_len, 0);
572 tcg_gen_st32_tl(s->T1, cpu_env, lo_ofs);
576 tcg_gen_st_tl(s->T1, cpu_env, lo_ofs);
579 g_assert_not_reached();
583 static void gen_MOVDQ(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
585 gen_store_sse(s, decode, decode->op[2].offset);
588 static void gen_MULX(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
590 MemOp ot = decode->op[0].ot;
592 /* low part of result in VEX.vvvv, high in MODRM */
595 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
596 tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
597 tcg_gen_mulu2_i32(s->tmp2_i32, s->tmp3_i32,
598 s->tmp2_i32, s->tmp3_i32);
599 tcg_gen_extu_i32_tl(cpu_regs[s->vex_v], s->tmp2_i32);
600 tcg_gen_extu_i32_tl(s->T0, s->tmp3_i32);
604 tcg_gen_mulu2_i64(cpu_regs[s->vex_v], s->T0, s->T0, s->T1);
611 static void gen_PDEP(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
613 MemOp ot = decode->op[1].ot;
615 tcg_gen_ext32u_tl(s->T0, s->T0);
617 gen_helper_pdep(s->T0, s->T0, s->T1);
620 static void gen_PEXT(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
622 MemOp ot = decode->op[1].ot;
624 tcg_gen_ext32u_tl(s->T0, s->T0);
626 gen_helper_pext(s->T0, s->T0, s->T1);
629 static void gen_RORX(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
631 MemOp ot = decode->op[0].ot;
632 int b = decode->immediate;
635 tcg_gen_rotri_tl(s->T0, s->T0, b & 63);
637 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
638 tcg_gen_rotri_i32(s->tmp2_i32, s->tmp2_i32, b & 31);
639 tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
643 static void gen_SARX(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
645 MemOp ot = decode->op[0].ot;
648 mask = ot == MO_64 ? 63 : 31;
649 tcg_gen_andi_tl(s->T1, s->T1, mask);
651 tcg_gen_ext32s_tl(s->T0, s->T0);
653 tcg_gen_sar_tl(s->T0, s->T0, s->T1);
656 static void gen_SHLX(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
658 MemOp ot = decode->op[0].ot;
661 mask = ot == MO_64 ? 63 : 31;
662 tcg_gen_andi_tl(s->T1, s->T1, mask);
663 tcg_gen_shl_tl(s->T0, s->T0, s->T1);
666 static void gen_SHRX(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
668 MemOp ot = decode->op[0].ot;
671 mask = ot == MO_64 ? 63 : 31;
672 tcg_gen_andi_tl(s->T1, s->T1, mask);
674 tcg_gen_ext32u_tl(s->T0, s->T0);
676 tcg_gen_shr_tl(s->T0, s->T0, s->T1);