2 * New-style TCG opcode generator for i386 instructions
4 * Copyright (c) 2022 Red Hat, Inc.
6 * Author: Paolo Bonzini <pbonzini@redhat.com>
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 static void gen_NM_exception(DisasContext *s)
24 gen_exception(s, EXCP07_PREX);
27 static void gen_illegal(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
29 gen_illegal_opcode(s);
32 static void gen_load_ea(DisasContext *s, AddressParts *mem, bool is_vsib)
34 TCGv ea = gen_lea_modrm_1(s, *mem, is_vsib);
35 gen_lea_v_seg(s, s->aflag, ea, mem->def_seg, s->override);
38 static inline int mmx_offset(MemOp ot)
42 return offsetof(MMXReg, MMX_B(0));
44 return offsetof(MMXReg, MMX_W(0));
46 return offsetof(MMXReg, MMX_L(0));
48 return offsetof(MMXReg, MMX_Q(0));
50 g_assert_not_reached();
54 static inline int xmm_offset(MemOp ot)
58 return offsetof(ZMMReg, ZMM_B(0));
60 return offsetof(ZMMReg, ZMM_W(0));
62 return offsetof(ZMMReg, ZMM_L(0));
64 return offsetof(ZMMReg, ZMM_Q(0));
66 return offsetof(ZMMReg, ZMM_X(0));
68 return offsetof(ZMMReg, ZMM_Y(0));
70 g_assert_not_reached();
74 static int vector_reg_offset(X86DecodedOp *op)
76 assert(op->unit == X86_OP_MMX || op->unit == X86_OP_SSE);
78 if (op->unit == X86_OP_MMX) {
79 return op->offset - mmx_offset(op->ot);
81 return op->offset - xmm_offset(op->ot);
85 static int vector_elem_offset(X86DecodedOp *op, MemOp ot, int n)
87 int base_ofs = vector_reg_offset(op);
90 if (op->unit == X86_OP_MMX) {
91 return base_ofs + offsetof(MMXReg, MMX_B(n));
93 return base_ofs + offsetof(ZMMReg, ZMM_B(n));
96 if (op->unit == X86_OP_MMX) {
97 return base_ofs + offsetof(MMXReg, MMX_W(n));
99 return base_ofs + offsetof(ZMMReg, ZMM_W(n));
102 if (op->unit == X86_OP_MMX) {
103 return base_ofs + offsetof(MMXReg, MMX_L(n));
105 return base_ofs + offsetof(ZMMReg, ZMM_L(n));
108 if (op->unit == X86_OP_MMX) {
111 return base_ofs + offsetof(ZMMReg, ZMM_Q(n));
114 assert(op->unit == X86_OP_SSE);
115 return base_ofs + offsetof(ZMMReg, ZMM_X(n));
117 assert(op->unit == X86_OP_SSE);
118 return base_ofs + offsetof(ZMMReg, ZMM_Y(n));
120 g_assert_not_reached();
124 static void compute_mmx_offset(X86DecodedOp *op)
127 op->offset = offsetof(CPUX86State, fpregs[op->n].mmx) + mmx_offset(op->ot);
129 op->offset = offsetof(CPUX86State, mmx_t0) + mmx_offset(op->ot);
133 static void compute_xmm_offset(X86DecodedOp *op)
136 op->offset = ZMM_OFFSET(op->n) + xmm_offset(op->ot);
138 op->offset = offsetof(CPUX86State, xmm_t0) + xmm_offset(op->ot);
142 static void gen_load_sse(DisasContext *s, TCGv temp, MemOp ot, int dest_ofs, bool aligned)
146 gen_op_ld_v(s, MO_8, temp, s->A0);
147 tcg_gen_st8_tl(temp, cpu_env, dest_ofs);
150 gen_op_ld_v(s, MO_16, temp, s->A0);
151 tcg_gen_st16_tl(temp, cpu_env, dest_ofs);
154 gen_op_ld_v(s, MO_32, temp, s->A0);
155 tcg_gen_st32_tl(temp, cpu_env, dest_ofs);
158 gen_ldq_env_A0(s, dest_ofs);
161 gen_ldo_env_A0(s, dest_ofs, aligned);
164 gen_ldy_env_A0(s, dest_ofs, aligned);
167 g_assert_not_reached();
171 static bool sse_needs_alignment(DisasContext *s, X86DecodedInsn *decode, MemOp ot)
173 switch (decode->e.vex_class) {
176 if ((s->prefix & PREFIX_VEX) ||
177 decode->e.vex_special == X86_VEX_SSEUnaligned) {
178 /* MOST legacy SSE instructions require aligned memory operands, but not all. */
190 static void gen_load(DisasContext *s, X86DecodedInsn *decode, int opn, TCGv v)
192 X86DecodedOp *op = &decode->op[opn];
198 tcg_gen_ld32u_tl(v, cpu_env,
199 offsetof(CPUX86State,segs[op->n].selector));
202 tcg_gen_ld_tl(v, cpu_env, offsetof(CPUX86State, cr[op->n]));
205 tcg_gen_ld_tl(v, cpu_env, offsetof(CPUX86State, dr[op->n]));
209 gen_op_ld_v(s, op->ot, v, s->A0);
211 gen_op_mov_v_reg(s, op->ot, v, op->n);
215 tcg_gen_movi_tl(v, decode->immediate);
219 compute_mmx_offset(op);
223 compute_xmm_offset(op);
226 bool aligned = sse_needs_alignment(s, decode, op->ot);
227 gen_load_sse(s, v, op->ot, op->offset, aligned);
232 g_assert_not_reached();
236 static TCGv_ptr op_ptr(X86DecodedInsn *decode, int opn)
238 X86DecodedOp *op = &decode->op[opn];
242 op->v_ptr = tcg_temp_new_ptr();
244 /* The temporary points to the MMXReg or ZMMReg. */
245 tcg_gen_addi_ptr(op->v_ptr, cpu_env, vector_reg_offset(op));
249 #define OP_PTR0 op_ptr(decode, 0)
250 #define OP_PTR1 op_ptr(decode, 1)
251 #define OP_PTR2 op_ptr(decode, 2)
253 static void gen_writeback(DisasContext *s, X86DecodedInsn *decode, int opn, TCGv v)
255 X86DecodedOp *op = &decode->op[opn];
260 /* Note that gen_movl_seg_T0 takes care of interrupt shadow and TF. */
261 gen_movl_seg_T0(s, op->n);
265 gen_op_st_v(s, op->ot, v, s->A0);
267 gen_op_mov_reg_v(s, op->ot, op->n, v);
273 if ((s->prefix & PREFIX_VEX) && op->ot == MO_128) {
274 tcg_gen_gvec_dup_imm(MO_64,
275 offsetof(CPUX86State, xmm_regs[op->n].ZMM_X(1)),
282 g_assert_not_reached();
286 static inline int vector_len(DisasContext *s, X86DecodedInsn *decode)
288 if (decode->e.special == X86_SPECIAL_MMX &&
289 !(s->prefix & (PREFIX_DATA | PREFIX_REPZ | PREFIX_REPNZ))) {
292 return s->vex_l ? 32 : 16;
295 static void gen_store_sse(DisasContext *s, X86DecodedInsn *decode, int src_ofs)
297 MemOp ot = decode->op[0].ot;
298 int vec_len = vector_len(s, decode);
299 bool aligned = sse_needs_alignment(s, decode, ot);
301 if (!decode->op[0].has_ea) {
302 tcg_gen_gvec_mov(MO_64, decode->op[0].offset, src_ofs, vec_len, vec_len);
308 gen_stq_env_A0(s, src_ofs);
311 gen_sto_env_A0(s, src_ofs, aligned);
314 gen_sty_env_A0(s, src_ofs, aligned);
317 g_assert_not_reached();
322 * 00 = v*ps Vps, Hps, Wpd
323 * 66 = v*pd Vpd, Hpd, Wps
324 * f3 = v*ss Vss, Hss, Wps
325 * f2 = v*sd Vsd, Hsd, Wps
327 static inline void gen_unary_fp_sse(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode,
328 SSEFunc_0_epp pd_xmm, SSEFunc_0_epp ps_xmm,
329 SSEFunc_0_epp pd_ymm, SSEFunc_0_epp ps_ymm,
330 SSEFunc_0_eppp sd, SSEFunc_0_eppp ss)
332 if ((s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) != 0) {
333 SSEFunc_0_eppp fn = s->prefix & PREFIX_REPZ ? ss : sd;
335 gen_illegal_opcode(s);
338 fn(cpu_env, OP_PTR0, OP_PTR1, OP_PTR2);
340 SSEFunc_0_epp ps, pd, fn;
341 ps = s->vex_l ? ps_ymm : ps_xmm;
342 pd = s->vex_l ? pd_ymm : pd_xmm;
343 fn = s->prefix & PREFIX_DATA ? pd : ps;
345 gen_illegal_opcode(s);
348 fn(cpu_env, OP_PTR0, OP_PTR2);
351 #define UNARY_FP_SSE(uname, lname) \
352 static void gen_##uname(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) \
354 gen_unary_fp_sse(s, env, decode, \
355 gen_helper_##lname##pd_xmm, \
356 gen_helper_##lname##ps_xmm, \
357 gen_helper_##lname##pd_ymm, \
358 gen_helper_##lname##ps_ymm, \
359 gen_helper_##lname##sd, \
360 gen_helper_##lname##ss); \
362 UNARY_FP_SSE(VSQRT, sqrt)
365 * 00 = v*ps Vps, Hps, Wpd
366 * 66 = v*pd Vpd, Hpd, Wps
367 * f3 = v*ss Vss, Hss, Wps
368 * f2 = v*sd Vsd, Hsd, Wps
370 static inline void gen_fp_sse(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode,
371 SSEFunc_0_eppp pd_xmm, SSEFunc_0_eppp ps_xmm,
372 SSEFunc_0_eppp pd_ymm, SSEFunc_0_eppp ps_ymm,
373 SSEFunc_0_eppp sd, SSEFunc_0_eppp ss)
375 SSEFunc_0_eppp ps, pd, fn;
376 if ((s->prefix & (PREFIX_REPZ | PREFIX_REPNZ)) != 0) {
377 fn = s->prefix & PREFIX_REPZ ? ss : sd;
379 ps = s->vex_l ? ps_ymm : ps_xmm;
380 pd = s->vex_l ? pd_ymm : pd_xmm;
381 fn = s->prefix & PREFIX_DATA ? pd : ps;
384 fn(cpu_env, OP_PTR0, OP_PTR1, OP_PTR2);
386 gen_illegal_opcode(s);
389 #define FP_SSE(uname, lname) \
390 static void gen_##uname(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) \
392 gen_fp_sse(s, env, decode, \
393 gen_helper_##lname##pd_xmm, \
394 gen_helper_##lname##ps_xmm, \
395 gen_helper_##lname##pd_ymm, \
396 gen_helper_##lname##ps_ymm, \
397 gen_helper_##lname##sd, \
398 gen_helper_##lname##ss); \
411 static inline void gen_unary_fp32_sse(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode,
412 SSEFunc_0_epp ps_xmm,
413 SSEFunc_0_epp ps_ymm,
416 if ((s->prefix & (PREFIX_DATA | PREFIX_REPNZ)) != 0) {
418 } else if (s->prefix & PREFIX_REPZ) {
422 ss(cpu_env, OP_PTR0, OP_PTR1, OP_PTR2);
424 SSEFunc_0_epp fn = s->vex_l ? ps_ymm : ps_xmm;
428 fn(cpu_env, OP_PTR0, OP_PTR2);
433 gen_illegal_opcode(s);
435 #define UNARY_FP32_SSE(uname, lname) \
436 static void gen_##uname(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) \
438 gen_unary_fp32_sse(s, env, decode, \
439 gen_helper_##lname##ps_xmm, \
440 gen_helper_##lname##ps_ymm, \
441 gen_helper_##lname##ss); \
443 UNARY_FP32_SSE(VRSQRT, rsqrt)
444 UNARY_FP32_SSE(VRCP, rcp)
446 #define BINARY_INT_GVEC(uname, func, ...) \
447 static void gen_##uname(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) \
449 int vec_len = vector_len(s, decode); \
452 decode->op[0].offset, decode->op[1].offset, \
453 decode->op[2].offset, vec_len, vec_len); \
456 BINARY_INT_GVEC(PADDB, tcg_gen_gvec_add, MO_8)
457 BINARY_INT_GVEC(PADDW, tcg_gen_gvec_add, MO_16)
458 BINARY_INT_GVEC(PADDD, tcg_gen_gvec_add, MO_32)
459 BINARY_INT_GVEC(PADDSB, tcg_gen_gvec_ssadd, MO_8)
460 BINARY_INT_GVEC(PADDSW, tcg_gen_gvec_ssadd, MO_16)
461 BINARY_INT_GVEC(PADDUSB, tcg_gen_gvec_usadd, MO_8)
462 BINARY_INT_GVEC(PADDUSW, tcg_gen_gvec_usadd, MO_16)
463 BINARY_INT_GVEC(PAND, tcg_gen_gvec_and, MO_64)
464 BINARY_INT_GVEC(PCMPGTB, tcg_gen_gvec_cmp, TCG_COND_GT, MO_8)
465 BINARY_INT_GVEC(PCMPGTW, tcg_gen_gvec_cmp, TCG_COND_GT, MO_16)
466 BINARY_INT_GVEC(PCMPGTD, tcg_gen_gvec_cmp, TCG_COND_GT, MO_32)
467 BINARY_INT_GVEC(PMAXSW, tcg_gen_gvec_smax, MO_16)
468 BINARY_INT_GVEC(PMAXUB, tcg_gen_gvec_umax, MO_8)
469 BINARY_INT_GVEC(PMINSW, tcg_gen_gvec_smin, MO_16)
470 BINARY_INT_GVEC(PMINUB, tcg_gen_gvec_umin, MO_8)
471 BINARY_INT_GVEC(POR, tcg_gen_gvec_or, MO_64)
472 BINARY_INT_GVEC(PSUBB, tcg_gen_gvec_sub, MO_8)
473 BINARY_INT_GVEC(PSUBW, tcg_gen_gvec_sub, MO_16)
474 BINARY_INT_GVEC(PSUBD, tcg_gen_gvec_sub, MO_32)
475 BINARY_INT_GVEC(PSUBQ, tcg_gen_gvec_sub, MO_64)
476 BINARY_INT_GVEC(PSUBSB, tcg_gen_gvec_sssub, MO_8)
477 BINARY_INT_GVEC(PSUBSW, tcg_gen_gvec_sssub, MO_16)
478 BINARY_INT_GVEC(PSUBUSB, tcg_gen_gvec_ussub, MO_8)
479 BINARY_INT_GVEC(PSUBUSW, tcg_gen_gvec_ussub, MO_16)
480 BINARY_INT_GVEC(PXOR, tcg_gen_gvec_xor, MO_64)
484 * 00 = p* Pq, Qq (if mmx not NULL; no VEX)
485 * 66 = vp* Vx, Hx, Wx
487 * These are really the same encoding, because 1) V is the same as P when VEX.V
488 * is not present 2) P and Q are the same as H and W apart from MM/XMM
490 static inline void gen_binary_int_sse(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode,
491 SSEFunc_0_eppp mmx, SSEFunc_0_eppp xmm, SSEFunc_0_eppp ymm)
493 assert(!!mmx == !!(decode->e.special == X86_SPECIAL_MMX));
495 if (mmx && (s->prefix & PREFIX_VEX) && !(s->prefix & PREFIX_DATA)) {
496 /* VEX encoding is not applicable to MMX instructions. */
497 gen_illegal_opcode(s);
500 if (!(s->prefix & PREFIX_DATA)) {
501 mmx(cpu_env, OP_PTR0, OP_PTR1, OP_PTR2);
502 } else if (!s->vex_l) {
503 xmm(cpu_env, OP_PTR0, OP_PTR1, OP_PTR2);
505 ymm(cpu_env, OP_PTR0, OP_PTR1, OP_PTR2);
510 #define BINARY_INT_MMX(uname, lname) \
511 static void gen_##uname(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) \
513 gen_binary_int_sse(s, env, decode, \
514 gen_helper_##lname##_mmx, \
515 gen_helper_##lname##_xmm, \
516 gen_helper_##lname##_ymm); \
518 BINARY_INT_MMX(PUNPCKLBW, punpcklbw)
519 BINARY_INT_MMX(PUNPCKLWD, punpcklwd)
520 BINARY_INT_MMX(PUNPCKLDQ, punpckldq)
521 BINARY_INT_MMX(PACKSSWB, packsswb)
522 BINARY_INT_MMX(PACKUSWB, packuswb)
523 BINARY_INT_MMX(PUNPCKHBW, punpckhbw)
524 BINARY_INT_MMX(PUNPCKHWD, punpckhwd)
525 BINARY_INT_MMX(PUNPCKHDQ, punpckhdq)
526 BINARY_INT_MMX(PACKSSDW, packssdw)
528 /* Instructions with no MMX equivalent. */
529 #define BINARY_INT_SSE(uname, lname) \
530 static void gen_##uname(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) \
532 gen_binary_int_sse(s, env, decode, \
534 gen_helper_##lname##_xmm, \
535 gen_helper_##lname##_ymm); \
538 BINARY_INT_SSE(PUNPCKLQDQ, punpcklqdq)
539 BINARY_INT_SSE(PUNPCKHQDQ, punpckhqdq)
541 static inline void gen_unary_int_sse(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode,
542 SSEFunc_0_epp xmm, SSEFunc_0_epp ymm)
545 xmm(cpu_env, OP_PTR0, OP_PTR2);
547 ymm(cpu_env, OP_PTR0, OP_PTR2);
551 #define UNARY_INT_SSE(uname, lname) \
552 static void gen_##uname(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode) \
554 gen_unary_int_sse(s, env, decode, \
555 gen_helper_##lname##_xmm, \
556 gen_helper_##lname##_ymm); \
559 UNARY_INT_SSE(VCVTDQ2PS, cvtdq2ps)
560 UNARY_INT_SSE(VCVTPS2DQ, cvtps2dq)
561 UNARY_INT_SSE(VCVTTPS2DQ, cvttps2dq)
564 static void gen_ADCOX(DisasContext *s, CPUX86State *env, MemOp ot, int cc_op)
566 TCGv carry_in = NULL;
567 TCGv carry_out = (cc_op == CC_OP_ADCX ? cpu_cc_dst : cpu_cc_src2);
570 if (cc_op == s->cc_op || s->cc_op == CC_OP_ADCOX) {
571 /* Re-use the carry-out from a previous round. */
572 carry_in = carry_out;
574 } else if (s->cc_op == CC_OP_ADCX || s->cc_op == CC_OP_ADOX) {
575 /* Merge with the carry-out from the opposite instruction. */
579 /* If we don't have a carry-in, get it out of EFLAGS. */
581 if (s->cc_op != CC_OP_ADCX && s->cc_op != CC_OP_ADOX) {
582 gen_compute_eflags(s);
585 tcg_gen_extract_tl(carry_in, cpu_cc_src,
586 ctz32(cc_op == CC_OP_ADCX ? CC_C : CC_O), 1);
592 /* If TL is 64-bit just do everything in 64-bit arithmetic. */
593 tcg_gen_add_i64(s->T0, s->T0, s->T1);
594 tcg_gen_add_i64(s->T0, s->T0, carry_in);
595 tcg_gen_shri_i64(carry_out, s->T0, 32);
599 zero = tcg_constant_tl(0);
600 tcg_gen_add2_tl(s->T0, carry_out, s->T0, zero, carry_in, zero);
601 tcg_gen_add2_tl(s->T0, carry_out, s->T0, carry_out, s->T1, zero);
607 static void gen_ADCX(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
609 gen_ADCOX(s, env, decode->op[0].ot, CC_OP_ADCX);
612 static void gen_ADOX(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
614 gen_ADCOX(s, env, decode->op[0].ot, CC_OP_ADOX);
617 static void gen_ANDN(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
619 MemOp ot = decode->op[0].ot;
621 tcg_gen_andc_tl(s->T0, s->T1, s->T0);
622 gen_op_update1_cc(s);
623 set_cc_op(s, CC_OP_LOGICB + ot);
626 static void gen_BEXTR(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
628 MemOp ot = decode->op[0].ot;
632 * Extract START, and shift the operand.
633 * Shifts larger than operand size get zeros.
635 tcg_gen_ext8u_tl(s->A0, s->T1);
636 tcg_gen_shr_tl(s->T0, s->T0, s->A0);
638 bound = tcg_constant_tl(ot == MO_64 ? 63 : 31);
639 zero = tcg_constant_tl(0);
640 tcg_gen_movcond_tl(TCG_COND_LEU, s->T0, s->A0, bound, s->T0, zero);
643 * Extract the LEN into a mask. Lengths larger than
644 * operand size get all ones.
646 tcg_gen_extract_tl(s->A0, s->T1, 8, 8);
647 tcg_gen_movcond_tl(TCG_COND_LEU, s->A0, s->A0, bound, s->A0, bound);
649 tcg_gen_movi_tl(s->T1, 1);
650 tcg_gen_shl_tl(s->T1, s->T1, s->A0);
651 tcg_gen_subi_tl(s->T1, s->T1, 1);
652 tcg_gen_and_tl(s->T0, s->T0, s->T1);
654 gen_op_update1_cc(s);
655 set_cc_op(s, CC_OP_LOGICB + ot);
658 static void gen_BLSI(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
660 MemOp ot = decode->op[0].ot;
662 tcg_gen_neg_tl(s->T1, s->T0);
663 tcg_gen_and_tl(s->T0, s->T0, s->T1);
664 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
665 set_cc_op(s, CC_OP_BMILGB + ot);
668 static void gen_BLSMSK(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
670 MemOp ot = decode->op[0].ot;
672 tcg_gen_subi_tl(s->T1, s->T0, 1);
673 tcg_gen_xor_tl(s->T0, s->T0, s->T1);
674 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
675 set_cc_op(s, CC_OP_BMILGB + ot);
678 static void gen_BLSR(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
680 MemOp ot = decode->op[0].ot;
682 tcg_gen_subi_tl(s->T1, s->T0, 1);
683 tcg_gen_and_tl(s->T0, s->T0, s->T1);
684 tcg_gen_mov_tl(cpu_cc_dst, s->T0);
685 set_cc_op(s, CC_OP_BMILGB + ot);
688 static void gen_BZHI(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
690 MemOp ot = decode->op[0].ot;
693 tcg_gen_ext8u_tl(s->T1, cpu_regs[s->vex_v]);
694 bound = tcg_constant_tl(ot == MO_64 ? 63 : 31);
697 * Note that since we're using BMILG (in order to get O
698 * cleared) we need to store the inverse into C.
700 tcg_gen_setcond_tl(TCG_COND_LT, cpu_cc_src, s->T1, bound);
701 tcg_gen_movcond_tl(TCG_COND_GT, s->T1, s->T1, bound, bound, s->T1);
703 tcg_gen_movi_tl(s->A0, -1);
704 tcg_gen_shl_tl(s->A0, s->A0, s->T1);
705 tcg_gen_andc_tl(s->T0, s->T0, s->A0);
707 gen_op_update1_cc(s);
708 set_cc_op(s, CC_OP_BMILGB + ot);
711 static void gen_CRC32(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
713 MemOp ot = decode->op[2].ot;
715 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
716 gen_helper_crc32(s->T0, s->tmp2_i32, s->T1, tcg_constant_i32(8 << ot));
719 static void gen_MOVBE(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
721 MemOp ot = decode->op[0].ot;
723 /* M operand type does not load/store */
724 if (decode->e.op0 == X86_TYPE_M) {
725 tcg_gen_qemu_st_tl(s->T0, s->A0, s->mem_index, ot | MO_BE);
727 tcg_gen_qemu_ld_tl(s->T0, s->A0, s->mem_index, ot | MO_BE);
731 static void gen_MOVD_to(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
733 MemOp ot = decode->op[2].ot;
734 int vec_len = vector_len(s, decode);
735 int lo_ofs = vector_elem_offset(&decode->op[0], ot, 0);
737 tcg_gen_gvec_dup_imm(MO_64, decode->op[0].offset, vec_len, vec_len, 0);
742 tcg_gen_st32_tl(s->T1, cpu_env, lo_ofs);
746 tcg_gen_st_tl(s->T1, cpu_env, lo_ofs);
749 g_assert_not_reached();
753 static void gen_MOVDQ(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
755 gen_store_sse(s, decode, decode->op[2].offset);
758 static void gen_MOVMSK(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
760 typeof(gen_helper_movmskps_ymm) *ps, *pd, *fn;
761 ps = s->vex_l ? gen_helper_movmskps_ymm : gen_helper_movmskps_xmm;
762 pd = s->vex_l ? gen_helper_movmskpd_ymm : gen_helper_movmskpd_xmm;
763 fn = s->prefix & PREFIX_DATA ? pd : ps;
764 fn(s->tmp2_i32, cpu_env, OP_PTR2);
765 tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
768 static void gen_MULX(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
770 MemOp ot = decode->op[0].ot;
772 /* low part of result in VEX.vvvv, high in MODRM */
775 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
776 tcg_gen_trunc_tl_i32(s->tmp3_i32, s->T1);
777 tcg_gen_mulu2_i32(s->tmp2_i32, s->tmp3_i32,
778 s->tmp2_i32, s->tmp3_i32);
779 tcg_gen_extu_i32_tl(cpu_regs[s->vex_v], s->tmp2_i32);
780 tcg_gen_extu_i32_tl(s->T0, s->tmp3_i32);
784 tcg_gen_mulu2_i64(cpu_regs[s->vex_v], s->T0, s->T0, s->T1);
791 static void gen_PANDN(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
793 int vec_len = vector_len(s, decode);
795 /* Careful, operand order is reversed! */
796 tcg_gen_gvec_andc(MO_64,
797 decode->op[0].offset, decode->op[2].offset,
798 decode->op[1].offset, vec_len, vec_len);
801 static void gen_PDEP(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
803 MemOp ot = decode->op[1].ot;
805 tcg_gen_ext32u_tl(s->T0, s->T0);
807 gen_helper_pdep(s->T0, s->T0, s->T1);
810 static void gen_PEXT(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
812 MemOp ot = decode->op[1].ot;
814 tcg_gen_ext32u_tl(s->T0, s->T0);
816 gen_helper_pext(s->T0, s->T0, s->T1);
819 static void gen_RORX(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
821 MemOp ot = decode->op[0].ot;
822 int b = decode->immediate;
825 tcg_gen_rotri_tl(s->T0, s->T0, b & 63);
827 tcg_gen_trunc_tl_i32(s->tmp2_i32, s->T0);
828 tcg_gen_rotri_i32(s->tmp2_i32, s->tmp2_i32, b & 31);
829 tcg_gen_extu_i32_tl(s->T0, s->tmp2_i32);
833 static void gen_SARX(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
835 MemOp ot = decode->op[0].ot;
838 mask = ot == MO_64 ? 63 : 31;
839 tcg_gen_andi_tl(s->T1, s->T1, mask);
841 tcg_gen_ext32s_tl(s->T0, s->T0);
843 tcg_gen_sar_tl(s->T0, s->T0, s->T1);
846 static void gen_SHLX(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
848 MemOp ot = decode->op[0].ot;
851 mask = ot == MO_64 ? 63 : 31;
852 tcg_gen_andi_tl(s->T1, s->T1, mask);
853 tcg_gen_shl_tl(s->T0, s->T0, s->T1);
856 static void gen_SHRX(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
858 MemOp ot = decode->op[0].ot;
861 mask = ot == MO_64 ? 63 : 31;
862 tcg_gen_andi_tl(s->T1, s->T1, mask);
864 tcg_gen_ext32u_tl(s->T0, s->T0);
866 tcg_gen_shr_tl(s->T0, s->T0, s->T1);
869 static void gen_VCVTfp2fp(DisasContext *s, CPUX86State *env, X86DecodedInsn *decode)
871 gen_unary_fp_sse(s, env, decode,
872 gen_helper_cvtpd2ps_xmm, gen_helper_cvtps2pd_xmm,
873 gen_helper_cvtpd2ps_ymm, gen_helper_cvtps2pd_ymm,
874 gen_helper_cvtsd2ss, gen_helper_cvtss2sd);