2 * New-style TCG opcode generator for i386 instructions
4 * Copyright (c) 2022 Red Hat, Inc.
6 * Author: Paolo Bonzini <pbonzini@redhat.com>
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 static void gen_NM_exception(DisasContext *s)
24 gen_exception(s, EXCP07_PREX);
27 static void gen_illegal(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
29 gen_illegal_opcode(s);
32 static void gen_load_ea(DisasContext *s, AddressParts *mem, bool is_vsib)
34 TCGv ea = gen_lea_modrm_1(s, *mem, is_vsib);
35 gen_lea_v_seg(s, s->aflag, ea, mem->def_seg, s->override);
38 static inline int mmx_offset(MemOp ot)
42 return offsetof(MMXReg, MMX_B(0));
44 return offsetof(MMXReg, MMX_W(0));
46 return offsetof(MMXReg, MMX_L(0));
48 return offsetof(MMXReg, MMX_Q(0));
50 g_assert_not_reached();
54 static inline int xmm_offset(MemOp ot)
58 return offsetof(ZMMReg, ZMM_B(0));
60 return offsetof(ZMMReg, ZMM_W(0));
62 return offsetof(ZMMReg, ZMM_L(0));
64 return offsetof(ZMMReg, ZMM_Q(0));
66 return offsetof(ZMMReg, ZMM_X(0));
68 return offsetof(ZMMReg, ZMM_Y(0));
70 g_assert_not_reached();
74 static int vector_reg_offset(X86DecodedOp *op)
76 assert(op->unit == X86_OP_MMX || op->unit == X86_OP_SSE);
78 if (op->unit == X86_OP_MMX) {
79 return op->offset - mmx_offset(op->ot);
81 return op->offset - xmm_offset(op->ot);
85 static int vector_elem_offset(X86DecodedOp *op, MemOp ot, int n)
87 int base_ofs = vector_reg_offset(op);
90 if (op->unit == X86_OP_MMX) {
91 return base_ofs + offsetof(MMXReg, MMX_B(n));
93 return base_ofs + offsetof(ZMMReg, ZMM_B(n));
96 if (op->unit == X86_OP_MMX) {
97 return base_ofs + offsetof(MMXReg, MMX_W(n));
99 return base_ofs + offsetof(ZMMReg, ZMM_W(n));
102 if (op->unit == X86_OP_MMX) {
103 return base_ofs + offsetof(MMXReg, MMX_L(n));
105 return base_ofs + offsetof(ZMMReg, ZMM_L(n));
108 if (op->unit == X86_OP_MMX) {
111 return base_ofs + offsetof(ZMMReg, ZMM_Q(n));
114 assert(op->unit == X86_OP_SSE);
115 return base_ofs + offsetof(ZMMReg, ZMM_X(n));
117 assert(op->unit == X86_OP_SSE);
118 return base_ofs + offsetof(ZMMReg, ZMM_Y(n));
120 g_assert_not_reached();
124 static void compute_mmx_offset(X86DecodedOp *op)
127 op->offset = offsetof(CPUX86State, fpregs[op->n].mmx) + mmx_offset(op->ot);
129 op->offset = offsetof(CPUX86State, mmx_t0) + mmx_offset(op->ot);
133 static void compute_xmm_offset(X86DecodedOp *op)
136 op->offset = ZMM_OFFSET(op->n) + xmm_offset(op->ot);
138 op->offset = offsetof(CPUX86State, xmm_t0) + xmm_offset(op->ot);
142 static void gen_load_sse(DisasContext *s, TCGv temp, MemOp ot, int dest_ofs, bool aligned)
146 gen_op_ld_v(s, MO_8, temp, s->A0);
147 tcg_gen_st8_tl(temp, cpu_env, dest_ofs);
150 gen_op_ld_v(s, MO_16, temp, s->A0);
151 tcg_gen_st16_tl(temp, cpu_env, dest_ofs);
154 gen_op_ld_v(s, MO_32, temp, s->A0);
155 tcg_gen_st32_tl(temp, cpu_env, dest_ofs);
158 gen_ldq_env_A0(s, dest_ofs);
161 gen_ldo_env_A0(s, dest_ofs, aligned);
164 gen_ldy_env_A0(s, dest_ofs, aligned);
167 g_assert_not_reached();
171 static bool sse_needs_alignment(DisasContext *s, X86DecodedInsn *decode, MemOp ot)
173 switch (decode->e.vex_class) {
176 if ((s->prefix & PREFIX_VEX) ||
177 decode->e.vex_special == X86_VEX_SSEUnaligned) {
178 /* MOST legacy SSE instructions require aligned memory operands, but not all. */
190 static void gen_load(DisasContext *s, X86DecodedInsn *decode, int opn, TCGv v)
192 X86DecodedOp *op = &decode->op[opn];
198 tcg_gen_ld32u_tl(v, cpu_env,
199 offsetof(CPUX86State,segs[op->n].selector));
202 tcg_gen_ld_tl(v, cpu_env, offsetof(CPUX86State, cr[op->n]));
205 tcg_gen_ld_tl(v, cpu_env, offsetof(CPUX86State, dr[op->n]));
209 gen_op_ld_v(s, op->ot, v, s->A0);
211 gen_op_mov_v_reg(s, op->ot, v, op->n);
215 tcg_gen_movi_tl(v, decode->immediate);
219 compute_mmx_offset(op);
223 compute_xmm_offset(op);
226 bool aligned = sse_needs_alignment(s, decode, op->ot);
227 gen_load_sse(s, v, op->ot, op->offset, aligned);
232 g_assert_not_reached();
236 static TCGv_ptr op_ptr(X86DecodedInsn *decode, int opn)
238 X86DecodedOp *op = &decode->op[opn];
242 op->v_ptr = tcg_temp_new_ptr();
244 /* The temporary points to the MMXReg or ZMMReg. */
245 tcg_gen_addi_ptr(op->v_ptr, cpu_env, vector_reg_offset(op));
249 #define OP_PTR0 op_ptr(decode, 0)
250 #define OP_PTR1 op_ptr(decode, 1)
251 #define OP_PTR2 op_ptr(decode, 2)
253 static void gen_writeback(DisasContext *s, X86DecodedInsn *decode, int opn, TCGv v)
255 X86DecodedOp *op = &decode->op[opn];
260 /* Note that gen_movl_seg_T0 takes care of interrupt shadow and TF. */
261 gen_movl_seg_T0(s, op->n);
265 gen_op_st_v(s, op->ot, v, s->A0);
267 gen_op_mov_reg_v(s, op->ot, op->n, v);
273 if ((s->prefix & PREFIX_VEX) && op->ot == MO_128) {
274 tcg_gen_gvec_dup_imm(MO_64,
275 offsetof(CPUX86State, xmm_regs[op->n].ZMM_X(1)),
282 g_assert_not_reached();
286 static inline int vector_len(DisasContext *s, X86DecodedInsn *decode)
288 if (decode->e.special == X86_SPECIAL_MMX &&
289 !(s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ))) {
292 return s->vex_l ? 32 : 16;
295 static void gen_store_sse(DisasContext *s, X86DecodedInsn *decode, int src_ofs)
297 MemOp ot = decode->op[0].ot;
298 int vec_len = vector_len(s, decode);
299 bool aligned = sse_needs_alignment(s, decode, ot);
301 if (!decode->op[0].has_ea) {
302 tcg_gen_gvec_mov(MO_64, decode->op[0].offset, src_ofs, vec_len, vec_len);
308 gen_stq_env_A0(s, src_ofs);
311 gen_sto_env_A0(s, src_ofs, aligned);
314 gen_sty_env_A0(s, src_ofs, aligned);
317 g_assert_not_reached();
321 #define BINARY_INT_GVEC(uname, func, ...) \
322 static void gen_##uname(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) \
324 int vec_len = vector_len(s, decode); \
327 decode->op[0].offset, decode->op[1].offset, \
328 decode->op[2].offset, vec_len, vec_len); \
331 BINARY_INT_GVEC(PADDB, tcg_gen_gvec_add, MO_8)
332 BINARY_INT_GVEC(PADDW, tcg_gen_gvec_add, MO_16)
333 BINARY_INT_GVEC(PADDD, tcg_gen_gvec_add, MO_32)
334 BINARY_INT_GVEC(PADDSB, tcg_gen_gvec_ssadd, MO_8)
335 BINARY_INT_GVEC(PADDSW, tcg_gen_gvec_ssadd, MO_16)
336 BINARY_INT_GVEC(PADDUSB, tcg_gen_gvec_usadd, MO_8)
337 BINARY_INT_GVEC(PADDUSW, tcg_gen_gvec_usadd, MO_16)
338 BINARY_INT_GVEC(PAND, tcg_gen_gvec_and, MO_64)
339 BINARY_INT_GVEC(PCMPGTB, tcg_gen_gvec_cmp, TCG_COND_GT, MO_8)
340 BINARY_INT_GVEC(PCMPGTW, tcg_gen_gvec_cmp, TCG_COND_GT, MO_16)
341 BINARY_INT_GVEC(PCMPGTD, tcg_gen_gvec_cmp, TCG_COND_GT, MO_32)
342 BINARY_INT_GVEC(PMAXSW, tcg_gen_gvec_smax, MO_16)
343 BINARY_INT_GVEC(PMAXUB, tcg_gen_gvec_umax, MO_8)
344 BINARY_INT_GVEC(PMINSW, tcg_gen_gvec_smin, MO_16)
345 BINARY_INT_GVEC(PMINUB, tcg_gen_gvec_umin, MO_8)
346 BINARY_INT_GVEC(POR, tcg_gen_gvec_or, MO_64)
347 BINARY_INT_GVEC(PSUBB, tcg_gen_gvec_sub, MO_8)
348 BINARY_INT_GVEC(PSUBW, tcg_gen_gvec_sub, MO_16)
349 BINARY_INT_GVEC(PSUBD, tcg_gen_gvec_sub, MO_32)
350 BINARY_INT_GVEC(PSUBQ, tcg_gen_gvec_sub, MO_64)
351 BINARY_INT_GVEC(PSUBSB, tcg_gen_gvec_sssub, MO_8)
352 BINARY_INT_GVEC(PSUBSW, tcg_gen_gvec_sssub, MO_16)
353 BINARY_INT_GVEC(PSUBUSB, tcg_gen_gvec_ussub, MO_8)
354 BINARY_INT_GVEC(PSUBUSW, tcg_gen_gvec_ussub, MO_16)
355 BINARY_INT_GVEC(PXOR, tcg_gen_gvec_xor, MO_64)
359 * 00 = p* Pq, Qq (if mmx not NULL; no VEX)
360 * 66 = vp* Vx, Hx, Wx
362 * These are really the same encoding, because 1) V is the same as P when VEX.V
363 * is not present 2) P and Q are the same as H and W apart from MM/XMM
365 static inline void gen_binary_int_sse(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode,
366 SSEFunc_0_eppp mmx, SSEFunc_0_eppp xmm, SSEFunc_0_eppp ymm)
368 assert(!!mmx == !!(decode->e.special == X86_SPECIAL_MMX));
370 if (mmx && (s->prefix & PREFIX_VEX) && !(s->prefix & PREFIX_DATA)) {
371 /* VEX encoding is not applicable to MMX instructions. */
372 gen_illegal_opcode(s);
375 if (!(s->prefix & PREFIX_DATA)) {
376 mmx(cpu_env, OP_PTR0, OP_PTR1, OP_PTR2);
377 } else if (!s->vex_l) {
378 xmm(cpu_env, OP_PTR0, OP_PTR1, OP_PTR2);
380 ymm(cpu_env, OP_PTR0, OP_PTR1, OP_PTR2);
385 #define BINARY_INT_MMX(uname, lname) \
386 static void gen_##uname(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) \
388 gen_binary_int_sse(s, env, decode, \
389 gen_helper_##lname##_mmx, \
390 gen_helper_##lname##_xmm, \
391 gen_helper_##lname##_ymm); \
393 BINARY_INT_MMX(PUNPCKLBW, punpcklbw)
394 BINARY_INT_MMX(PUNPCKLWD, punpcklwd)
395 BINARY_INT_MMX(PUNPCKLDQ, punpckldq)
396 BINARY_INT_MMX(PACKSSWB, packsswb)
397 BINARY_INT_MMX(PACKUSWB, packuswb)
398 BINARY_INT_MMX(PUNPCKHBW, punpckhbw)
399 BINARY_INT_MMX(PUNPCKHWD, punpckhwd)
400 BINARY_INT_MMX(PUNPCKHDQ, punpckhdq)
401 BINARY_INT_MMX(PACKSSDW, packssdw)
403 /* Instructions with no MMX equivalent. */
404 #define BINARY_INT_SSE(uname, lname) \
405 static void gen_##uname(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) \
407 gen_binary_int_sse(s, env, decode, \
409 gen_helper_##lname##_xmm, \
410 gen_helper_##lname##_ymm); \
413 BINARY_INT_SSE(PUNPCKLQDQ, punpcklqdq)
414 BINARY_INT_SSE(PUNPCKHQDQ, punpckhqdq)
416 static void gen_ADCOX(DisasContext *s, CPUX86State *env, MemOp ot, int cc_op)
418 TCGv carry_in = NULL;
419 TCGv carry_out = (cc_op == CC_OP_ADCX ? cpu_cc_dst : cpu_cc_src2);
422 if (cc_op == s->cc_op || s->cc_op == CC_OP_ADCOX) {
423 /* Re-use the carry-out from a previous round. */
424 carry_in = carry_out;
426 } else if (s->cc_op == CC_OP_ADCX || s->cc_op == CC_OP_ADOX) {
427 /* Merge with the carry-out from the opposite instruction. */
431 /* If we don't have a carry-in, get it out of EFLAGS. */
433 if (s->cc_op != CC_OP_ADCX && s->cc_op != CC_OP_ADOX) {
434 gen_compute_eflags(s);
437 tcg_gen_extract_tl(carry_in, cpu_cc_src,
438 ctz32(cc_op == CC_OP_ADCX ? CC_C : CC_O), 1);
444 /* If TL is 64-bit just do everything in 64-bit arithmetic. */
445 tcg_gen_add_i64(s->T0, s->T0, s->T1);
446 tcg_gen_add_i64(s->T0, s->T0, carry_in);
447 tcg_gen_shri_i64(carry_out, s->T0, 32);
451 zero = tcg_constant_tl(0);
452 tcg_gen_add2_tl(s->T0, carry_out, s->T0, zero, carry_in, zero);
453 tcg_gen_add2_tl(s->T0, carry_out, s->T0, carry_out, s->T1, zero);
459 static void gen_ADCX(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
461 gen_ADCOX(s, env, decode->op[0].ot, CC_OP_ADCX);
464 static void gen_ADOX(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
466 gen_ADCOX(s, env, decode->op[0].ot, CC_OP_ADOX);
469 static void gen_ANDN(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
471 MemOp ot = decode->op[0].ot;
473 tcg_gen_andc_tl(s->T0, s->T1, s->T0);
474 gen_op_update1_cc(s);
475 set_cc_op(s, CC_OP_LOGICB + ot);
478 static void gen_BEXTR(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
480 MemOp ot = decode->op[0].ot;
484 * Extract START, and shift the operand.
485 * Shifts larger than operand size get zeros.
487 tcg_gen_ext8u_tl(s->A0, s->T1);
488 tcg_gen_shr_tl(s->T0, s->T0, s->A0);
490 bound = tcg_constant_tl(ot == MO_64 ? 63 : 31);
491 zero = tcg_constant_tl(0);
492 tcg_gen_movcond_tl(TCG_COND_LEU, s->T0, s->A0, bound, s->T0, zero);
495 * Extract the LEN into a mask. Lengths larger than
496 * operand size get all ones.
498 tcg_gen_extract_tl(s->A0, s->T1, 8, 8);
499 tcg_gen_movcond_tl(TCG_COND_LEU, s->A0, s->A0, bound, s->A0, bound);
501 tcg_gen_movi_tl(s->T1, 1);
502 tcg_gen_shl_tl(s->T1, s->T1, s->A0);
503 tcg_gen_subi_tl(s->T1, s->T1, 1);
504 tcg_gen_and_tl(s->T0, s->T0, s->T1);
506 gen_op_update1_cc(s);
507 set_cc_op(s, CC_OP_LOGICB + ot);
510 static void gen_BLSI(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
512 MemOp ot = decode->op[0].ot;
514 tcg_gen_neg_tl(s->T1, s->T0);
515 tcg_gen_and_tl(s->T0, s->T0, s->T1);
516 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
517 set_cc_op(s, CC_OP_BMILGB + ot);
520 static void gen_BLSMSK(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
522 MemOp ot = decode->op[0].ot;
524 tcg_gen_subi_tl(s->T1, s->T0, 1);
525 tcg_gen_xor_tl(s->T0, s->T0, s->T1);
526 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
527 set_cc_op(s, CC_OP_BMILGB + ot);
530 static void gen_BLSR(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
532 MemOp ot = decode->op[0].ot;
534 tcg_gen_subi_tl(s->T1, s->T0, 1);
535 tcg_gen_and_tl(s->T0, s->T0, s->T1);
536 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
537 set_cc_op(s, CC_OP_BMILGB + ot);
540 static void gen_BZHI(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
542 MemOp ot = decode->op[0].ot;
545 tcg_gen_ext8u_tl(s->T1, cpu_regs[s->vex_v]);
546 bound = tcg_constant_tl(ot == MO_64 ? 63 : 31);
549 * Note that since we're using BMILG (in order to get O
550 * cleared) we need to store the inverse into C.
552 tcg_gen_setcond_tl(TCG_COND_LT, cpu_cc_src, s->T1, bound);
553 tcg_gen_movcond_tl(TCG_COND_GT, s->T1, s->T1, bound, bound, s->T1);
555 tcg_gen_movi_tl(s->A0, -1);
556 tcg_gen_shl_tl(s->A0, s->A0, s->T1);
557 tcg_gen_andc_tl(s->T0, s->T0, s->A0);
559 gen_op_update1_cc(s);
560 set_cc_op(s, CC_OP_BMILGB + ot);
563 static void gen_CRC32(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
565 MemOp ot = decode->op[2].ot;
567 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
568 gen_helper_crc32(s->T0, s->tmp2_i32, s->T1, tcg_constant_i32(8 << ot));
571 static void gen_MOVBE(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
573 MemOp ot = decode->op[0].ot;
575 /* M operand type does not load/store */
576 if (decode->e.op0 == X86_TYPE_M) {
577 tcg_gen_qemu_st_tl(s->T0, s->A0, s->mem_index, ot | MO_BE);
579 tcg_gen_qemu_ld_tl(s->T0, s->A0, s->mem_index, ot | MO_BE);
583 static void gen_MOVD_to(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
585 MemOp ot = decode->op[2].ot;
586 int vec_len = vector_len(s, decode);
587 int lo_ofs = vector_elem_offset(&decode->op[0], ot, 0);
589 tcg_gen_gvec_dup_imm(MO_64, decode->op[0].offset, vec_len, vec_len, 0);
594 tcg_gen_st32_tl(s->T1, cpu_env, lo_ofs);
598 tcg_gen_st_tl(s->T1, cpu_env, lo_ofs);
601 g_assert_not_reached();
605 static void gen_MOVDQ(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
607 gen_store_sse(s, decode, decode->op[2].offset);
610 static void gen_MULX(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
612 MemOp ot = decode->op[0].ot;
614 /* low part of result in VEX.vvvv, high in MODRM */
617 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
618 tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
619 tcg_gen_mulu2_i32(s->tmp2_i32, s->tmp3_i32,
620 s->tmp2_i32, s->tmp3_i32);
621 tcg_gen_extu_i32_tl(cpu_regs[s->vex_v], s->tmp2_i32);
622 tcg_gen_extu_i32_tl(s->T0, s->tmp3_i32);
626 tcg_gen_mulu2_i64(cpu_regs[s->vex_v], s->T0, s->T0, s->T1);
633 static void gen_PANDN(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
635 int vec_len = vector_len(s, decode);
637 /* Careful, operand order is reversed! */
638 tcg_gen_gvec_andc(MO_64,
639 decode->op[0].offset, decode->op[2].offset,
640 decode->op[1].offset, vec_len, vec_len);
643 static void gen_PDEP(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
645 MemOp ot = decode->op[1].ot;
647 tcg_gen_ext32u_tl(s->T0, s->T0);
649 gen_helper_pdep(s->T0, s->T0, s->T1);
652 static void gen_PEXT(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
654 MemOp ot = decode->op[1].ot;
656 tcg_gen_ext32u_tl(s->T0, s->T0);
658 gen_helper_pext(s->T0, s->T0, s->T1);
661 static void gen_RORX(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
663 MemOp ot = decode->op[0].ot;
664 int b = decode->immediate;
667 tcg_gen_rotri_tl(s->T0, s->T0, b & 63);
669 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
670 tcg_gen_rotri_i32(s->tmp2_i32, s->tmp2_i32, b & 31);
671 tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
675 static void gen_SARX(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
677 MemOp ot = decode->op[0].ot;
680 mask = ot == MO_64 ? 63 : 31;
681 tcg_gen_andi_tl(s->T1, s->T1, mask);
683 tcg_gen_ext32s_tl(s->T0, s->T0);
685 tcg_gen_sar_tl(s->T0, s->T0, s->T1);
688 static void gen_SHLX(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
690 MemOp ot = decode->op[0].ot;
693 mask = ot == MO_64 ? 63 : 31;
694 tcg_gen_andi_tl(s->T1, s->T1, mask);
695 tcg_gen_shl_tl(s->T0, s->T0, s->T1);
698 static void gen_SHRX(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
700 MemOp ot = decode->op[0].ot;
703 mask = ot == MO_64 ? 63 : 31;
704 tcg_gen_andi_tl(s->T1, s->T1, mask);
706 tcg_gen_ext32u_tl(s->T0, s->T0);
708 tcg_gen_shr_tl(s->T0, s->T0, s->T1);