3 * Copyright (c) 2020 T-Head Semiconductor Co., Ltd. All rights reserved.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2 or later, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
17 #include "tcg/tcg-op-gvec.h"
18 #include "tcg/tcg-gvec-desc.h"
19 #include "internals.h"
21 static inline bool is_overlapped(const int8_t astart, int8_t asize,
22 const int8_t bstart, int8_t bsize)
24 const int8_t aend = astart + asize;
25 const int8_t bend = bstart + bsize;
27 return MAX(aend, bend) - MIN(astart, bstart) < asize + bsize;
30 static bool require_rvv(DisasContext *s)
32 return s->mstatus_vs != EXT_STATUS_DISABLED;
35 static bool require_rvf(DisasContext *s)
37 if (s->mstatus_fs == EXT_STATUS_DISABLED) {
43 return s->cfg_ptr->ext_zvfh;
45 return s->cfg_ptr->ext_zve32f;
47 return s->cfg_ptr->ext_zve64d;
53 static bool require_scale_rvf(DisasContext *s)
55 if (s->mstatus_fs == EXT_STATUS_DISABLED) {
61 return s->cfg_ptr->ext_zvfh;
63 return s->cfg_ptr->ext_zve32f;
65 return s->cfg_ptr->ext_zve64d;
71 static bool require_scale_rvfmin(DisasContext *s)
73 if (s->mstatus_fs == EXT_STATUS_DISABLED) {
79 return s->cfg_ptr->ext_zvfhmin;
81 return s->cfg_ptr->ext_zve32f;
83 return s->cfg_ptr->ext_zve64d;
89 /* Destination vector register group cannot overlap source mask register. */
90 static bool require_vm(int vm, int vd)
92 return (vm != 0 || vd != 0);
95 static bool require_nf(int vd, int nf, int lmul)
97 int size = nf << MAX(lmul, 0);
98 return size <= 8 && vd + size <= 32;
102 * Vector register should aligned with the passed-in LMUL (EMUL).
103 * If LMUL < 0, i.e. fractional LMUL, any vector register is allowed.
105 static bool require_align(const int8_t val, const int8_t lmul)
107 return lmul <= 0 || extract32(val, 0, lmul) == 0;
111 * A destination vector register group can overlap a source vector
112 * register group only if one of the following holds:
113 * 1. The destination EEW equals the source EEW.
114 * 2. The destination EEW is smaller than the source EEW and the overlap
115 * is in the lowest-numbered part of the source register group.
116 * 3. The destination EEW is greater than the source EEW, the source EMUL
117 * is at least 1, and the overlap is in the highest-numbered part of
118 * the destination register group.
121 * This function returns true if one of the following holds:
122 * * Destination vector register group does not overlap a source vector
125 * For rule 1, overlap is allowed so this function doesn't need to be called.
126 * For rule 2, (vd == vs). Caller has to check whether: (vd != vs) before
127 * calling this function.
129 static bool require_noover(const int8_t dst, const int8_t dst_lmul,
130 const int8_t src, const int8_t src_lmul)
132 int8_t dst_size = dst_lmul <= 0 ? 1 : 1 << dst_lmul;
133 int8_t src_size = src_lmul <= 0 ? 1 : 1 << src_lmul;
135 /* Destination EEW is greater than the source EEW, check rule 3. */
136 if (dst_size > src_size) {
139 is_overlapped(dst, dst_size, src, src_size) &&
140 !is_overlapped(dst, dst_size, src + src_size, src_size)) {
145 return !is_overlapped(dst, dst_size, src, src_size);
148 static bool do_vsetvl(DisasContext *s, int rd, int rs1, TCGv s2)
152 if (!require_rvv(s) || !s->cfg_ptr->ext_zve32f) {
156 dst = dest_gpr(s, rd);
158 if (rd == 0 && rs1 == 0) {
160 tcg_gen_mov_tl(s1, cpu_vl);
161 } else if (rs1 == 0) {
162 /* As the mask is at least one bit, RV_VLEN_MAX is >= VLMAX */
163 s1 = tcg_constant_tl(RV_VLEN_MAX);
165 s1 = get_gpr(s, rs1, EXT_ZERO);
168 gen_helper_vsetvl(dst, tcg_env, s1, s2);
169 gen_set_gpr(s, rd, dst);
170 finalize_rvv_inst(s);
172 gen_update_pc(s, s->cur_insn_len);
173 lookup_and_goto_ptr(s);
174 s->base.is_jmp = DISAS_NORETURN;
178 static bool do_vsetivli(DisasContext *s, int rd, TCGv s1, TCGv s2)
182 if (!require_rvv(s) || !s->cfg_ptr->ext_zve32f) {
186 dst = dest_gpr(s, rd);
188 gen_helper_vsetvl(dst, tcg_env, s1, s2);
189 gen_set_gpr(s, rd, dst);
190 finalize_rvv_inst(s);
191 gen_update_pc(s, s->cur_insn_len);
192 lookup_and_goto_ptr(s);
193 s->base.is_jmp = DISAS_NORETURN;
198 static bool trans_vsetvl(DisasContext *s, arg_vsetvl *a)
200 TCGv s2 = get_gpr(s, a->rs2, EXT_ZERO);
201 return do_vsetvl(s, a->rd, a->rs1, s2);
204 static bool trans_vsetvli(DisasContext *s, arg_vsetvli *a)
206 TCGv s2 = tcg_constant_tl(a->zimm);
207 return do_vsetvl(s, a->rd, a->rs1, s2);
210 static bool trans_vsetivli(DisasContext *s, arg_vsetivli *a)
212 TCGv s1 = tcg_constant_tl(a->rs1);
213 TCGv s2 = tcg_constant_tl(a->zimm);
214 return do_vsetivli(s, a->rd, s1, s2);
217 /* vector register offset from env */
218 static uint32_t vreg_ofs(DisasContext *s, int reg)
220 return offsetof(CPURISCVState, vreg) + reg * s->cfg_ptr->vlenb;
223 /* check functions */
226 * Vector unit-stride, strided, unit-stride segment, strided segment
227 * store check function.
229 * Rules to be checked here:
230 * 1. EMUL must within the range: 1/8 <= EMUL <= 8. (Section 7.3)
231 * 2. Destination vector register number is multiples of EMUL.
232 * (Section 3.4.2, 7.3)
233 * 3. The EMUL setting must be such that EMUL * NFIELDS ≤ 8. (Section 7.8)
234 * 4. Vector register numbers accessed by the segment load or store
235 * cannot increment past 31. (Section 7.8)
237 static bool vext_check_store(DisasContext *s, int vd, int nf, uint8_t eew)
239 int8_t emul = eew - s->sew + s->lmul;
240 return (emul >= -3 && emul <= 3) &&
241 require_align(vd, emul) &&
242 require_nf(vd, nf, emul);
246 * Vector unit-stride, strided, unit-stride segment, strided segment
247 * load check function.
249 * Rules to be checked here:
250 * 1. All rules applies to store instructions are applies
251 * to load instructions.
252 * 2. Destination vector register group for a masked vector
253 * instruction cannot overlap the source mask register (v0).
256 static bool vext_check_load(DisasContext *s, int vd, int nf, int vm,
259 return vext_check_store(s, vd, nf, eew) && require_vm(vm, vd);
263 * Vector indexed, indexed segment store check function.
265 * Rules to be checked here:
266 * 1. EMUL must within the range: 1/8 <= EMUL <= 8. (Section 7.3)
267 * 2. Index vector register number is multiples of EMUL.
268 * (Section 3.4.2, 7.3)
269 * 3. Destination vector register number is multiples of LMUL.
270 * (Section 3.4.2, 7.3)
271 * 4. The EMUL setting must be such that EMUL * NFIELDS ≤ 8. (Section 7.8)
272 * 5. Vector register numbers accessed by the segment load or store
273 * cannot increment past 31. (Section 7.8)
275 static bool vext_check_st_index(DisasContext *s, int vd, int vs2, int nf,
278 int8_t emul = eew - s->sew + s->lmul;
279 bool ret = (emul >= -3 && emul <= 3) &&
280 require_align(vs2, emul) &&
281 require_align(vd, s->lmul) &&
282 require_nf(vd, nf, s->lmul);
285 * V extension supports all vector load and store instructions,
286 * except V extension does not support EEW=64 for index values
287 * when XLEN=32. (Section 18.3)
289 if (get_xl(s) == MXL_RV32) {
290 ret &= (eew != MO_64);
297 * Vector indexed, indexed segment load check function.
299 * Rules to be checked here:
300 * 1. All rules applies to store instructions are applies
301 * to load instructions.
302 * 2. Destination vector register group for a masked vector
303 * instruction cannot overlap the source mask register (v0).
305 * 3. Destination vector register cannot overlap a source vector
306 * register (vs2) group.
308 * 4. Destination vector register groups cannot overlap
309 * the source vector register (vs2) group for
310 * indexed segment load instructions. (Section 7.8.3)
312 static bool vext_check_ld_index(DisasContext *s, int vd, int vs2,
313 int nf, int vm, uint8_t eew)
316 int8_t emul = eew - s->sew + s->lmul;
317 bool ret = vext_check_st_index(s, vd, vs2, nf, eew) &&
320 /* Each segment register group has to follow overlap rules. */
321 for (int i = 0; i < nf; ++i) {
322 seg_vd = vd + (1 << MAX(s->lmul, 0)) * i;
326 ret &= require_noover(seg_vd, s->lmul, vs2, emul);
328 } else if (eew < s->sew) {
329 ret &= require_noover(seg_vd, s->lmul, vs2, emul);
333 * Destination vector register groups cannot overlap
334 * the source vector register (vs2) group for
335 * indexed segment load instructions.
338 ret &= !is_overlapped(seg_vd, 1 << MAX(s->lmul, 0),
339 vs2, 1 << MAX(emul, 0));
345 static bool vext_check_ss(DisasContext *s, int vd, int vs, int vm)
347 return require_vm(vm, vd) &&
348 require_align(vd, s->lmul) &&
349 require_align(vs, s->lmul);
353 * Check function for vector instruction with format:
354 * single-width result and single-width sources (SEW = SEW op SEW)
356 * Rules to be checked here:
357 * 1. Destination vector register group for a masked vector
358 * instruction cannot overlap the source mask register (v0).
360 * 2. Destination vector register number is multiples of LMUL.
362 * 3. Source (vs2, vs1) vector register number are multiples of LMUL.
365 static bool vext_check_sss(DisasContext *s, int vd, int vs1, int vs2, int vm)
367 return vext_check_ss(s, vd, vs2, vm) &&
368 require_align(vs1, s->lmul);
371 static bool vext_check_ms(DisasContext *s, int vd, int vs)
373 bool ret = require_align(vs, s->lmul);
375 ret &= require_noover(vd, 0, vs, s->lmul);
381 * Check function for maskable vector instruction with format:
382 * single-width result and single-width sources (SEW = SEW op SEW)
384 * Rules to be checked here:
385 * 1. Source (vs2, vs1) vector register number are multiples of LMUL.
387 * 2. Destination vector register cannot overlap a source vector
388 * register (vs2, vs1) group.
390 * 3. The destination vector register group for a masked vector
391 * instruction cannot overlap the source mask register (v0),
392 * unless the destination vector register is being written
393 * with a mask value (e.g., comparisons) or the scalar result
394 * of a reduction. (Section 5.3)
396 static bool vext_check_mss(DisasContext *s, int vd, int vs1, int vs2)
398 bool ret = vext_check_ms(s, vd, vs2) &&
399 require_align(vs1, s->lmul);
401 ret &= require_noover(vd, 0, vs1, s->lmul);
407 * Common check function for vector widening instructions
408 * of double-width result (2*SEW).
410 * Rules to be checked here:
411 * 1. The largest vector register group used by an instruction
412 * can not be greater than 8 vector registers (Section 5.2):
415 * 2. Double-width SEW cannot greater than ELEN.
416 * 3. Destination vector register number is multiples of 2 * LMUL.
418 * 4. Destination vector register group for a masked vector
419 * instruction cannot overlap the source mask register (v0).
422 static bool vext_wide_check_common(DisasContext *s, int vd, int vm)
424 return (s->lmul <= 2) &&
426 ((s->sew + 1) <= (s->cfg_ptr->elen >> 4)) &&
427 require_align(vd, s->lmul + 1) &&
432 * Common check function for vector narrowing instructions
433 * of single-width result (SEW) and double-width source (2*SEW).
435 * Rules to be checked here:
436 * 1. The largest vector register group used by an instruction
437 * can not be greater than 8 vector registers (Section 5.2):
440 * 2. Double-width SEW cannot greater than ELEN.
441 * 3. Source vector register number is multiples of 2 * LMUL.
443 * 4. Destination vector register number is multiples of LMUL.
445 * 5. Destination vector register group for a masked vector
446 * instruction cannot overlap the source mask register (v0).
449 static bool vext_narrow_check_common(DisasContext *s, int vd, int vs2,
452 return (s->lmul <= 2) &&
454 ((s->sew + 1) <= (s->cfg_ptr->elen >> 4)) &&
455 require_align(vs2, s->lmul + 1) &&
456 require_align(vd, s->lmul) &&
460 static bool vext_check_ds(DisasContext *s, int vd, int vs, int vm)
462 return vext_wide_check_common(s, vd, vm) &&
463 require_align(vs, s->lmul) &&
464 require_noover(vd, s->lmul + 1, vs, s->lmul);
467 static bool vext_check_dd(DisasContext *s, int vd, int vs, int vm)
469 return vext_wide_check_common(s, vd, vm) &&
470 require_align(vs, s->lmul + 1);
474 * Check function for vector instruction with format:
475 * double-width result and single-width sources (2*SEW = SEW op SEW)
477 * Rules to be checked here:
478 * 1. All rules in defined in widen common rules are applied.
479 * 2. Source (vs2, vs1) vector register number are multiples of LMUL.
481 * 3. Destination vector register cannot overlap a source vector
482 * register (vs2, vs1) group.
485 static bool vext_check_dss(DisasContext *s, int vd, int vs1, int vs2, int vm)
487 return vext_check_ds(s, vd, vs2, vm) &&
488 require_align(vs1, s->lmul) &&
489 require_noover(vd, s->lmul + 1, vs1, s->lmul);
493 * Check function for vector instruction with format:
494 * double-width result and double-width source1 and single-width
495 * source2 (2*SEW = 2*SEW op SEW)
497 * Rules to be checked here:
498 * 1. All rules in defined in widen common rules are applied.
499 * 2. Source 1 (vs2) vector register number is multiples of 2 * LMUL.
501 * 3. Source 2 (vs1) vector register number is multiples of LMUL.
503 * 4. Destination vector register cannot overlap a source vector
504 * register (vs1) group.
507 static bool vext_check_dds(DisasContext *s, int vd, int vs1, int vs2, int vm)
509 return vext_check_ds(s, vd, vs1, vm) &&
510 require_align(vs2, s->lmul + 1);
513 static bool vext_check_sd(DisasContext *s, int vd, int vs, int vm)
515 bool ret = vext_narrow_check_common(s, vd, vs, vm);
517 ret &= require_noover(vd, s->lmul, vs, s->lmul + 1);
523 * Check function for vector instruction with format:
524 * single-width result and double-width source 1 and single-width
525 * source 2 (SEW = 2*SEW op SEW)
527 * Rules to be checked here:
528 * 1. All rules in defined in narrow common rules are applied.
529 * 2. Destination vector register cannot overlap a source vector
530 * register (vs2) group.
532 * 3. Source 2 (vs1) vector register number is multiples of LMUL.
535 static bool vext_check_sds(DisasContext *s, int vd, int vs1, int vs2, int vm)
537 return vext_check_sd(s, vd, vs2, vm) &&
538 require_align(vs1, s->lmul);
542 * Check function for vector reduction instructions.
544 * Rules to be checked here:
545 * 1. Source 1 (vs2) vector register number is multiples of LMUL.
548 static bool vext_check_reduction(DisasContext *s, int vs2)
550 return require_align(vs2, s->lmul) && s->vstart_eq_zero;
554 * Check function for vector slide instructions.
556 * Rules to be checked here:
557 * 1. Source 1 (vs2) vector register number is multiples of LMUL.
559 * 2. Destination vector register number is multiples of LMUL.
561 * 3. Destination vector register group for a masked vector
562 * instruction cannot overlap the source mask register (v0).
564 * 4. The destination vector register group for vslideup, vslide1up,
565 * vfslide1up, cannot overlap the source vector register (vs2) group.
566 * (Section 5.2, 16.3.1, 16.3.3)
568 static bool vext_check_slide(DisasContext *s, int vd, int vs2,
569 int vm, bool is_over)
571 bool ret = require_align(vs2, s->lmul) &&
572 require_align(vd, s->lmul) &&
581 * In cpu_get_tb_cpu_state(), set VILL if RVV was not present.
582 * So RVV is also be checked in this function.
584 static bool vext_check_isa_ill(DisasContext *s)
589 /* common translation macro */
590 #define GEN_VEXT_TRANS(NAME, EEW, ARGTYPE, OP, CHECK) \
591 static bool trans_##NAME(DisasContext *s, arg_##ARGTYPE * a) \
593 if (CHECK(s, a, EEW)) { \
594 return OP(s, a, EEW); \
599 static uint8_t vext_get_emul(DisasContext *s, uint8_t eew)
601 int8_t emul = eew - s->sew + s->lmul;
602 return emul < 0 ? 0 : emul;
606 *** unit stride load and store
608 typedef void gen_helper_ldst_us(TCGv_ptr, TCGv_ptr, TCGv,
611 static bool ldst_us_trans(uint32_t vd, uint32_t rs1, uint32_t data,
612 gen_helper_ldst_us *fn, DisasContext *s,
619 dest = tcg_temp_new_ptr();
620 mask = tcg_temp_new_ptr();
621 base = get_gpr(s, rs1, EXT_NONE);
624 * As simd_desc supports at most 2048 bytes, and in this implementation,
625 * the max vector group length is 4096 bytes. So split it into two parts.
627 * The first part is vlen in bytes (vlenb), encoded in maxsz of simd_desc.
628 * The second part is lmul, encoded in data of simd_desc.
630 desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
631 s->cfg_ptr->vlenb, data));
633 tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
634 tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0));
637 * According to the specification
639 * Additionally, if the Ztso extension is implemented, then vector memory
640 * instructions in the V extension and Zve family of extensions follow
641 * RVTSO at the instruction level. The Ztso extension does not
642 * strengthen the ordering of intra-instruction element accesses.
644 * as a result neither ordered nor unordered accesses from the V
645 * instructions need ordering within the loop but we do still need barriers
648 if (is_store && s->ztso) {
649 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL);
654 fn(dest, mask, base, tcg_env, desc);
656 if (!is_store && s->ztso) {
657 tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
660 finalize_rvv_inst(s);
664 static bool ld_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t eew)
667 gen_helper_ldst_us *fn;
668 static gen_helper_ldst_us * const fns[2][4] = {
669 /* masked unit stride load */
670 { gen_helper_vle8_v_mask, gen_helper_vle16_v_mask,
671 gen_helper_vle32_v_mask, gen_helper_vle64_v_mask },
672 /* unmasked unit stride load */
673 { gen_helper_vle8_v, gen_helper_vle16_v,
674 gen_helper_vle32_v, gen_helper_vle64_v }
677 fn = fns[a->vm][eew];
683 * Vector load/store instructions have the EEW encoded
684 * directly in the instructions. The maximum vector size is
685 * calculated with EMUL rather than LMUL.
687 uint8_t emul = vext_get_emul(s, eew);
688 data = FIELD_DP32(data, VDATA, VM, a->vm);
689 data = FIELD_DP32(data, VDATA, LMUL, emul);
690 data = FIELD_DP32(data, VDATA, NF, a->nf);
691 data = FIELD_DP32(data, VDATA, VTA, s->vta);
692 data = FIELD_DP32(data, VDATA, VMA, s->vma);
693 return ldst_us_trans(a->rd, a->rs1, data, fn, s, false);
696 static bool ld_us_check(DisasContext *s, arg_r2nfvm* a, uint8_t eew)
698 return require_rvv(s) &&
699 vext_check_isa_ill(s) &&
700 vext_check_load(s, a->rd, a->nf, a->vm, eew);
703 GEN_VEXT_TRANS(vle8_v, MO_8, r2nfvm, ld_us_op, ld_us_check)
704 GEN_VEXT_TRANS(vle16_v, MO_16, r2nfvm, ld_us_op, ld_us_check)
705 GEN_VEXT_TRANS(vle32_v, MO_32, r2nfvm, ld_us_op, ld_us_check)
706 GEN_VEXT_TRANS(vle64_v, MO_64, r2nfvm, ld_us_op, ld_us_check)
708 static bool st_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t eew)
711 gen_helper_ldst_us *fn;
712 static gen_helper_ldst_us * const fns[2][4] = {
713 /* masked unit stride store */
714 { gen_helper_vse8_v_mask, gen_helper_vse16_v_mask,
715 gen_helper_vse32_v_mask, gen_helper_vse64_v_mask },
716 /* unmasked unit stride store */
717 { gen_helper_vse8_v, gen_helper_vse16_v,
718 gen_helper_vse32_v, gen_helper_vse64_v }
721 fn = fns[a->vm][eew];
726 uint8_t emul = vext_get_emul(s, eew);
727 data = FIELD_DP32(data, VDATA, VM, a->vm);
728 data = FIELD_DP32(data, VDATA, LMUL, emul);
729 data = FIELD_DP32(data, VDATA, NF, a->nf);
730 return ldst_us_trans(a->rd, a->rs1, data, fn, s, true);
733 static bool st_us_check(DisasContext *s, arg_r2nfvm* a, uint8_t eew)
735 return require_rvv(s) &&
736 vext_check_isa_ill(s) &&
737 vext_check_store(s, a->rd, a->nf, eew);
740 GEN_VEXT_TRANS(vse8_v, MO_8, r2nfvm, st_us_op, st_us_check)
741 GEN_VEXT_TRANS(vse16_v, MO_16, r2nfvm, st_us_op, st_us_check)
742 GEN_VEXT_TRANS(vse32_v, MO_32, r2nfvm, st_us_op, st_us_check)
743 GEN_VEXT_TRANS(vse64_v, MO_64, r2nfvm, st_us_op, st_us_check)
746 *** unit stride mask load and store
748 static bool ld_us_mask_op(DisasContext *s, arg_vlm_v *a, uint8_t eew)
751 gen_helper_ldst_us *fn = gen_helper_vlm_v;
753 /* EMUL = 1, NFIELDS = 1 */
754 data = FIELD_DP32(data, VDATA, LMUL, 0);
755 data = FIELD_DP32(data, VDATA, NF, 1);
756 /* Mask destination register are always tail-agnostic */
757 data = FIELD_DP32(data, VDATA, VTA, s->cfg_vta_all_1s);
758 data = FIELD_DP32(data, VDATA, VMA, s->vma);
759 return ldst_us_trans(a->rd, a->rs1, data, fn, s, false);
762 static bool ld_us_mask_check(DisasContext *s, arg_vlm_v *a, uint8_t eew)
764 /* EMUL = 1, NFIELDS = 1 */
765 return require_rvv(s) && vext_check_isa_ill(s);
768 static bool st_us_mask_op(DisasContext *s, arg_vsm_v *a, uint8_t eew)
771 gen_helper_ldst_us *fn = gen_helper_vsm_v;
773 /* EMUL = 1, NFIELDS = 1 */
774 data = FIELD_DP32(data, VDATA, LMUL, 0);
775 data = FIELD_DP32(data, VDATA, NF, 1);
776 return ldst_us_trans(a->rd, a->rs1, data, fn, s, true);
779 static bool st_us_mask_check(DisasContext *s, arg_vsm_v *a, uint8_t eew)
781 /* EMUL = 1, NFIELDS = 1 */
782 return require_rvv(s) && vext_check_isa_ill(s);
785 GEN_VEXT_TRANS(vlm_v, MO_8, vlm_v, ld_us_mask_op, ld_us_mask_check)
786 GEN_VEXT_TRANS(vsm_v, MO_8, vsm_v, st_us_mask_op, st_us_mask_check)
789 *** stride load and store
791 typedef void gen_helper_ldst_stride(TCGv_ptr, TCGv_ptr, TCGv,
792 TCGv, TCGv_env, TCGv_i32);
794 static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2,
795 uint32_t data, gen_helper_ldst_stride *fn,
802 dest = tcg_temp_new_ptr();
803 mask = tcg_temp_new_ptr();
804 base = get_gpr(s, rs1, EXT_NONE);
805 stride = get_gpr(s, rs2, EXT_NONE);
806 desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
807 s->cfg_ptr->vlenb, data));
809 tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
810 tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0));
814 fn(dest, mask, base, stride, tcg_env, desc);
816 finalize_rvv_inst(s);
820 static bool ld_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
823 gen_helper_ldst_stride *fn;
824 static gen_helper_ldst_stride * const fns[4] = {
825 gen_helper_vlse8_v, gen_helper_vlse16_v,
826 gen_helper_vlse32_v, gen_helper_vlse64_v
834 uint8_t emul = vext_get_emul(s, eew);
835 data = FIELD_DP32(data, VDATA, VM, a->vm);
836 data = FIELD_DP32(data, VDATA, LMUL, emul);
837 data = FIELD_DP32(data, VDATA, NF, a->nf);
838 data = FIELD_DP32(data, VDATA, VTA, s->vta);
839 data = FIELD_DP32(data, VDATA, VMA, s->vma);
840 return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s);
843 static bool ld_stride_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
845 return require_rvv(s) &&
846 vext_check_isa_ill(s) &&
847 vext_check_load(s, a->rd, a->nf, a->vm, eew);
850 GEN_VEXT_TRANS(vlse8_v, MO_8, rnfvm, ld_stride_op, ld_stride_check)
851 GEN_VEXT_TRANS(vlse16_v, MO_16, rnfvm, ld_stride_op, ld_stride_check)
852 GEN_VEXT_TRANS(vlse32_v, MO_32, rnfvm, ld_stride_op, ld_stride_check)
853 GEN_VEXT_TRANS(vlse64_v, MO_64, rnfvm, ld_stride_op, ld_stride_check)
855 static bool st_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
858 gen_helper_ldst_stride *fn;
859 static gen_helper_ldst_stride * const fns[4] = {
860 /* masked stride store */
861 gen_helper_vsse8_v, gen_helper_vsse16_v,
862 gen_helper_vsse32_v, gen_helper_vsse64_v
865 uint8_t emul = vext_get_emul(s, eew);
866 data = FIELD_DP32(data, VDATA, VM, a->vm);
867 data = FIELD_DP32(data, VDATA, LMUL, emul);
868 data = FIELD_DP32(data, VDATA, NF, a->nf);
874 return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s);
877 static bool st_stride_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
879 return require_rvv(s) &&
880 vext_check_isa_ill(s) &&
881 vext_check_store(s, a->rd, a->nf, eew);
884 GEN_VEXT_TRANS(vsse8_v, MO_8, rnfvm, st_stride_op, st_stride_check)
885 GEN_VEXT_TRANS(vsse16_v, MO_16, rnfvm, st_stride_op, st_stride_check)
886 GEN_VEXT_TRANS(vsse32_v, MO_32, rnfvm, st_stride_op, st_stride_check)
887 GEN_VEXT_TRANS(vsse64_v, MO_64, rnfvm, st_stride_op, st_stride_check)
890 *** index load and store
892 typedef void gen_helper_ldst_index(TCGv_ptr, TCGv_ptr, TCGv,
893 TCGv_ptr, TCGv_env, TCGv_i32);
895 static bool ldst_index_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
896 uint32_t data, gen_helper_ldst_index *fn,
899 TCGv_ptr dest, mask, index;
903 dest = tcg_temp_new_ptr();
904 mask = tcg_temp_new_ptr();
905 index = tcg_temp_new_ptr();
906 base = get_gpr(s, rs1, EXT_NONE);
907 desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
908 s->cfg_ptr->vlenb, data));
910 tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
911 tcg_gen_addi_ptr(index, tcg_env, vreg_ofs(s, vs2));
912 tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0));
916 fn(dest, mask, base, index, tcg_env, desc);
918 finalize_rvv_inst(s);
922 static bool ld_index_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
925 gen_helper_ldst_index *fn;
926 static gen_helper_ldst_index * const fns[4][4] = {
928 * offset vector register group EEW = 8,
929 * data vector register group EEW = SEW
931 { gen_helper_vlxei8_8_v, gen_helper_vlxei8_16_v,
932 gen_helper_vlxei8_32_v, gen_helper_vlxei8_64_v },
934 * offset vector register group EEW = 16,
935 * data vector register group EEW = SEW
937 { gen_helper_vlxei16_8_v, gen_helper_vlxei16_16_v,
938 gen_helper_vlxei16_32_v, gen_helper_vlxei16_64_v },
940 * offset vector register group EEW = 32,
941 * data vector register group EEW = SEW
943 { gen_helper_vlxei32_8_v, gen_helper_vlxei32_16_v,
944 gen_helper_vlxei32_32_v, gen_helper_vlxei32_64_v },
946 * offset vector register group EEW = 64,
947 * data vector register group EEW = SEW
949 { gen_helper_vlxei64_8_v, gen_helper_vlxei64_16_v,
950 gen_helper_vlxei64_32_v, gen_helper_vlxei64_64_v }
953 fn = fns[eew][s->sew];
955 uint8_t emul = vext_get_emul(s, s->sew);
956 data = FIELD_DP32(data, VDATA, VM, a->vm);
957 data = FIELD_DP32(data, VDATA, LMUL, emul);
958 data = FIELD_DP32(data, VDATA, NF, a->nf);
959 data = FIELD_DP32(data, VDATA, VTA, s->vta);
960 data = FIELD_DP32(data, VDATA, VMA, s->vma);
961 return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s);
964 static bool ld_index_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
966 return require_rvv(s) &&
967 vext_check_isa_ill(s) &&
968 vext_check_ld_index(s, a->rd, a->rs2, a->nf, a->vm, eew);
971 GEN_VEXT_TRANS(vlxei8_v, MO_8, rnfvm, ld_index_op, ld_index_check)
972 GEN_VEXT_TRANS(vlxei16_v, MO_16, rnfvm, ld_index_op, ld_index_check)
973 GEN_VEXT_TRANS(vlxei32_v, MO_32, rnfvm, ld_index_op, ld_index_check)
974 GEN_VEXT_TRANS(vlxei64_v, MO_64, rnfvm, ld_index_op, ld_index_check)
976 static bool st_index_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
979 gen_helper_ldst_index *fn;
980 static gen_helper_ldst_index * const fns[4][4] = {
982 * offset vector register group EEW = 8,
983 * data vector register group EEW = SEW
985 { gen_helper_vsxei8_8_v, gen_helper_vsxei8_16_v,
986 gen_helper_vsxei8_32_v, gen_helper_vsxei8_64_v },
988 * offset vector register group EEW = 16,
989 * data vector register group EEW = SEW
991 { gen_helper_vsxei16_8_v, gen_helper_vsxei16_16_v,
992 gen_helper_vsxei16_32_v, gen_helper_vsxei16_64_v },
994 * offset vector register group EEW = 32,
995 * data vector register group EEW = SEW
997 { gen_helper_vsxei32_8_v, gen_helper_vsxei32_16_v,
998 gen_helper_vsxei32_32_v, gen_helper_vsxei32_64_v },
1000 * offset vector register group EEW = 64,
1001 * data vector register group EEW = SEW
1003 { gen_helper_vsxei64_8_v, gen_helper_vsxei64_16_v,
1004 gen_helper_vsxei64_32_v, gen_helper_vsxei64_64_v }
1007 fn = fns[eew][s->sew];
1009 uint8_t emul = vext_get_emul(s, s->sew);
1010 data = FIELD_DP32(data, VDATA, VM, a->vm);
1011 data = FIELD_DP32(data, VDATA, LMUL, emul);
1012 data = FIELD_DP32(data, VDATA, NF, a->nf);
1013 return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s);
1016 static bool st_index_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
1018 return require_rvv(s) &&
1019 vext_check_isa_ill(s) &&
1020 vext_check_st_index(s, a->rd, a->rs2, a->nf, eew);
1023 GEN_VEXT_TRANS(vsxei8_v, MO_8, rnfvm, st_index_op, st_index_check)
1024 GEN_VEXT_TRANS(vsxei16_v, MO_16, rnfvm, st_index_op, st_index_check)
1025 GEN_VEXT_TRANS(vsxei32_v, MO_32, rnfvm, st_index_op, st_index_check)
1026 GEN_VEXT_TRANS(vsxei64_v, MO_64, rnfvm, st_index_op, st_index_check)
1029 *** unit stride fault-only-first load
1031 static bool ldff_trans(uint32_t vd, uint32_t rs1, uint32_t data,
1032 gen_helper_ldst_us *fn, DisasContext *s)
1034 TCGv_ptr dest, mask;
1038 dest = tcg_temp_new_ptr();
1039 mask = tcg_temp_new_ptr();
1040 base = get_gpr(s, rs1, EXT_NONE);
1041 desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
1042 s->cfg_ptr->vlenb, data));
1044 tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
1045 tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0));
1047 fn(dest, mask, base, tcg_env, desc);
1049 finalize_rvv_inst(s);
1053 static bool ldff_op(DisasContext *s, arg_r2nfvm *a, uint8_t eew)
1056 gen_helper_ldst_us *fn;
1057 static gen_helper_ldst_us * const fns[4] = {
1058 gen_helper_vle8ff_v, gen_helper_vle16ff_v,
1059 gen_helper_vle32ff_v, gen_helper_vle64ff_v
1067 uint8_t emul = vext_get_emul(s, eew);
1068 data = FIELD_DP32(data, VDATA, VM, a->vm);
1069 data = FIELD_DP32(data, VDATA, LMUL, emul);
1070 data = FIELD_DP32(data, VDATA, NF, a->nf);
1071 data = FIELD_DP32(data, VDATA, VTA, s->vta);
1072 data = FIELD_DP32(data, VDATA, VMA, s->vma);
1073 return ldff_trans(a->rd, a->rs1, data, fn, s);
1076 GEN_VEXT_TRANS(vle8ff_v, MO_8, r2nfvm, ldff_op, ld_us_check)
1077 GEN_VEXT_TRANS(vle16ff_v, MO_16, r2nfvm, ldff_op, ld_us_check)
1078 GEN_VEXT_TRANS(vle32ff_v, MO_32, r2nfvm, ldff_op, ld_us_check)
1079 GEN_VEXT_TRANS(vle64ff_v, MO_64, r2nfvm, ldff_op, ld_us_check)
1082 * load and store whole register instructions
1084 typedef void gen_helper_ldst_whole(TCGv_ptr, TCGv, TCGv_env, TCGv_i32);
1086 static bool ldst_whole_trans(uint32_t vd, uint32_t rs1, uint32_t nf,
1087 gen_helper_ldst_whole *fn,
1094 uint32_t data = FIELD_DP32(0, VDATA, NF, nf);
1095 dest = tcg_temp_new_ptr();
1096 desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
1097 s->cfg_ptr->vlenb, data));
1099 base = get_gpr(s, rs1, EXT_NONE);
1100 tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
1104 fn(dest, base, tcg_env, desc);
1106 finalize_rvv_inst(s);
1111 * load and store whole register instructions ignore vtype and vl setting.
1112 * Thus, we don't need to check vill bit. (Section 7.9)
1114 #define GEN_LDST_WHOLE_TRANS(NAME, ARG_NF) \
1115 static bool trans_##NAME(DisasContext *s, arg_##NAME * a) \
1117 if (require_rvv(s) && \
1118 QEMU_IS_ALIGNED(a->rd, ARG_NF)) { \
1119 return ldst_whole_trans(a->rd, a->rs1, ARG_NF, \
1120 gen_helper_##NAME, s); \
1125 GEN_LDST_WHOLE_TRANS(vl1re8_v, 1)
1126 GEN_LDST_WHOLE_TRANS(vl1re16_v, 1)
1127 GEN_LDST_WHOLE_TRANS(vl1re32_v, 1)
1128 GEN_LDST_WHOLE_TRANS(vl1re64_v, 1)
1129 GEN_LDST_WHOLE_TRANS(vl2re8_v, 2)
1130 GEN_LDST_WHOLE_TRANS(vl2re16_v, 2)
1131 GEN_LDST_WHOLE_TRANS(vl2re32_v, 2)
1132 GEN_LDST_WHOLE_TRANS(vl2re64_v, 2)
1133 GEN_LDST_WHOLE_TRANS(vl4re8_v, 4)
1134 GEN_LDST_WHOLE_TRANS(vl4re16_v, 4)
1135 GEN_LDST_WHOLE_TRANS(vl4re32_v, 4)
1136 GEN_LDST_WHOLE_TRANS(vl4re64_v, 4)
1137 GEN_LDST_WHOLE_TRANS(vl8re8_v, 8)
1138 GEN_LDST_WHOLE_TRANS(vl8re16_v, 8)
1139 GEN_LDST_WHOLE_TRANS(vl8re32_v, 8)
1140 GEN_LDST_WHOLE_TRANS(vl8re64_v, 8)
1143 * The vector whole register store instructions are encoded similar to
1144 * unmasked unit-stride store of elements with EEW=8.
1146 GEN_LDST_WHOLE_TRANS(vs1r_v, 1)
1147 GEN_LDST_WHOLE_TRANS(vs2r_v, 2)
1148 GEN_LDST_WHOLE_TRANS(vs4r_v, 4)
1149 GEN_LDST_WHOLE_TRANS(vs8r_v, 8)
1152 *** Vector Integer Arithmetic Instructions
1156 * MAXSZ returns the maximum vector size can be operated in bytes,
1157 * which is used in GVEC IR when vl_eq_vlmax flag is set to true
1158 * to accelerate vector operation.
1160 static inline uint32_t MAXSZ(DisasContext *s)
1162 int max_sz = s->cfg_ptr->vlenb * 8;
1163 return max_sz >> (3 - s->lmul);
1166 static bool opivv_check(DisasContext *s, arg_rmrr *a)
1168 return require_rvv(s) &&
1169 vext_check_isa_ill(s) &&
1170 vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm);
1173 typedef void GVecGen3Fn(unsigned, uint32_t, uint32_t,
1174 uint32_t, uint32_t, uint32_t);
1177 do_opivv_gvec(DisasContext *s, arg_rmrr *a, GVecGen3Fn *gvec_fn,
1178 gen_helper_gvec_4_ptr *fn)
1180 if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
1181 gvec_fn(s->sew, vreg_ofs(s, a->rd),
1182 vreg_ofs(s, a->rs2), vreg_ofs(s, a->rs1),
1183 MAXSZ(s), MAXSZ(s));
1187 data = FIELD_DP32(data, VDATA, VM, a->vm);
1188 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
1189 data = FIELD_DP32(data, VDATA, VTA, s->vta);
1190 data = FIELD_DP32(data, VDATA, VMA, s->vma);
1191 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
1192 vreg_ofs(s, a->rs1), vreg_ofs(s, a->rs2),
1193 tcg_env, s->cfg_ptr->vlenb,
1194 s->cfg_ptr->vlenb, data, fn);
1196 finalize_rvv_inst(s);
1200 /* OPIVV with GVEC IR */
1201 #define GEN_OPIVV_GVEC_TRANS(NAME, SUF) \
1202 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1204 static gen_helper_gvec_4_ptr * const fns[4] = { \
1205 gen_helper_##NAME##_b, gen_helper_##NAME##_h, \
1206 gen_helper_##NAME##_w, gen_helper_##NAME##_d, \
1208 if (!opivv_check(s, a)) { \
1211 return do_opivv_gvec(s, a, tcg_gen_gvec_##SUF, fns[s->sew]); \
1214 GEN_OPIVV_GVEC_TRANS(vadd_vv, add)
1215 GEN_OPIVV_GVEC_TRANS(vsub_vv, sub)
1217 typedef void gen_helper_opivx(TCGv_ptr, TCGv_ptr, TCGv, TCGv_ptr,
1218 TCGv_env, TCGv_i32);
1220 static bool opivx_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, uint32_t vm,
1221 gen_helper_opivx *fn, DisasContext *s)
1223 TCGv_ptr dest, src2, mask;
1228 dest = tcg_temp_new_ptr();
1229 mask = tcg_temp_new_ptr();
1230 src2 = tcg_temp_new_ptr();
1231 src1 = get_gpr(s, rs1, EXT_SIGN);
1233 data = FIELD_DP32(data, VDATA, VM, vm);
1234 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
1235 data = FIELD_DP32(data, VDATA, VTA, s->vta);
1236 data = FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s);
1237 data = FIELD_DP32(data, VDATA, VMA, s->vma);
1238 desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
1239 s->cfg_ptr->vlenb, data));
1241 tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
1242 tcg_gen_addi_ptr(src2, tcg_env, vreg_ofs(s, vs2));
1243 tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0));
1245 fn(dest, mask, src1, src2, tcg_env, desc);
1247 finalize_rvv_inst(s);
1251 static bool opivx_check(DisasContext *s, arg_rmrr *a)
1253 return require_rvv(s) &&
1254 vext_check_isa_ill(s) &&
1255 vext_check_ss(s, a->rd, a->rs2, a->vm);
1258 typedef void GVecGen2sFn(unsigned, uint32_t, uint32_t, TCGv_i64,
1259 uint32_t, uint32_t);
1262 do_opivx_gvec(DisasContext *s, arg_rmrr *a, GVecGen2sFn *gvec_fn,
1263 gen_helper_opivx *fn)
1265 if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
1266 TCGv_i64 src1 = tcg_temp_new_i64();
1268 tcg_gen_ext_tl_i64(src1, get_gpr(s, a->rs1, EXT_SIGN));
1269 gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2),
1270 src1, MAXSZ(s), MAXSZ(s));
1272 finalize_rvv_inst(s);
1275 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s);
1278 /* OPIVX with GVEC IR */
1279 #define GEN_OPIVX_GVEC_TRANS(NAME, SUF) \
1280 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1282 static gen_helper_opivx * const fns[4] = { \
1283 gen_helper_##NAME##_b, gen_helper_##NAME##_h, \
1284 gen_helper_##NAME##_w, gen_helper_##NAME##_d, \
1286 if (!opivx_check(s, a)) { \
1289 return do_opivx_gvec(s, a, tcg_gen_gvec_##SUF, fns[s->sew]); \
1292 GEN_OPIVX_GVEC_TRANS(vadd_vx, adds)
1293 GEN_OPIVX_GVEC_TRANS(vsub_vx, subs)
1295 static void gen_vec_rsub8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
1297 tcg_gen_vec_sub8_i64(d, b, a);
1300 static void gen_vec_rsub16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
1302 tcg_gen_vec_sub16_i64(d, b, a);
1305 static void gen_rsub_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
1307 tcg_gen_sub_i32(ret, arg2, arg1);
1310 static void gen_rsub_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1312 tcg_gen_sub_i64(ret, arg2, arg1);
1315 static void gen_rsub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
1317 tcg_gen_sub_vec(vece, r, b, a);
1320 static void tcg_gen_gvec_rsubs(unsigned vece, uint32_t dofs, uint32_t aofs,
1321 TCGv_i64 c, uint32_t oprsz, uint32_t maxsz)
1323 static const TCGOpcode vecop_list[] = { INDEX_op_sub_vec, 0 };
1324 static const GVecGen2s rsub_op[4] = {
1325 { .fni8 = gen_vec_rsub8_i64,
1326 .fniv = gen_rsub_vec,
1327 .fno = gen_helper_vec_rsubs8,
1328 .opt_opc = vecop_list,
1330 { .fni8 = gen_vec_rsub16_i64,
1331 .fniv = gen_rsub_vec,
1332 .fno = gen_helper_vec_rsubs16,
1333 .opt_opc = vecop_list,
1335 { .fni4 = gen_rsub_i32,
1336 .fniv = gen_rsub_vec,
1337 .fno = gen_helper_vec_rsubs32,
1338 .opt_opc = vecop_list,
1340 { .fni8 = gen_rsub_i64,
1341 .fniv = gen_rsub_vec,
1342 .fno = gen_helper_vec_rsubs64,
1343 .opt_opc = vecop_list,
1344 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
1348 tcg_debug_assert(vece <= MO_64);
1349 tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, c, &rsub_op[vece]);
1352 GEN_OPIVX_GVEC_TRANS(vrsub_vx, rsubs)
1355 IMM_ZX, /* Zero-extended */
1356 IMM_SX, /* Sign-extended */
1357 IMM_TRUNC_SEW, /* Truncate to log(SEW) bits */
1358 IMM_TRUNC_2SEW, /* Truncate to log(2*SEW) bits */
1361 static int64_t extract_imm(DisasContext *s, uint32_t imm, imm_mode_t imm_mode)
1365 return extract64(imm, 0, 5);
1367 return sextract64(imm, 0, 5);
1369 return extract64(imm, 0, s->sew + 3);
1370 case IMM_TRUNC_2SEW:
1371 return extract64(imm, 0, s->sew + 4);
1373 g_assert_not_reached();
1377 static bool opivi_trans(uint32_t vd, uint32_t imm, uint32_t vs2, uint32_t vm,
1378 gen_helper_opivx *fn, DisasContext *s,
1379 imm_mode_t imm_mode)
1381 TCGv_ptr dest, src2, mask;
1386 dest = tcg_temp_new_ptr();
1387 mask = tcg_temp_new_ptr();
1388 src2 = tcg_temp_new_ptr();
1389 src1 = tcg_constant_tl(extract_imm(s, imm, imm_mode));
1391 data = FIELD_DP32(data, VDATA, VM, vm);
1392 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
1393 data = FIELD_DP32(data, VDATA, VTA, s->vta);
1394 data = FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s);
1395 data = FIELD_DP32(data, VDATA, VMA, s->vma);
1396 desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
1397 s->cfg_ptr->vlenb, data));
1399 tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
1400 tcg_gen_addi_ptr(src2, tcg_env, vreg_ofs(s, vs2));
1401 tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0));
1403 fn(dest, mask, src1, src2, tcg_env, desc);
1405 finalize_rvv_inst(s);
1409 typedef void GVecGen2iFn(unsigned, uint32_t, uint32_t, int64_t,
1410 uint32_t, uint32_t);
1413 do_opivi_gvec(DisasContext *s, arg_rmrr *a, GVecGen2iFn *gvec_fn,
1414 gen_helper_opivx *fn, imm_mode_t imm_mode)
1416 if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
1417 gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2),
1418 extract_imm(s, a->rs1, imm_mode), MAXSZ(s), MAXSZ(s));
1419 finalize_rvv_inst(s);
1422 return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s, imm_mode);
1425 /* OPIVI with GVEC IR */
1426 #define GEN_OPIVI_GVEC_TRANS(NAME, IMM_MODE, OPIVX, SUF) \
1427 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1429 static gen_helper_opivx * const fns[4] = { \
1430 gen_helper_##OPIVX##_b, gen_helper_##OPIVX##_h, \
1431 gen_helper_##OPIVX##_w, gen_helper_##OPIVX##_d, \
1433 if (!opivx_check(s, a)) { \
1436 return do_opivi_gvec(s, a, tcg_gen_gvec_##SUF, \
1437 fns[s->sew], IMM_MODE); \
1440 GEN_OPIVI_GVEC_TRANS(vadd_vi, IMM_SX, vadd_vx, addi)
1442 static void tcg_gen_gvec_rsubi(unsigned vece, uint32_t dofs, uint32_t aofs,
1443 int64_t c, uint32_t oprsz, uint32_t maxsz)
1445 TCGv_i64 tmp = tcg_constant_i64(c);
1446 tcg_gen_gvec_rsubs(vece, dofs, aofs, tmp, oprsz, maxsz);
1449 GEN_OPIVI_GVEC_TRANS(vrsub_vi, IMM_SX, vrsub_vx, rsubi)
1451 /* Vector Widening Integer Add/Subtract */
1453 /* OPIVV with WIDEN */
1454 static bool opivv_widen_check(DisasContext *s, arg_rmrr *a)
1456 return require_rvv(s) &&
1457 vext_check_isa_ill(s) &&
1458 vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm);
1461 static bool do_opivv_widen(DisasContext *s, arg_rmrr *a,
1462 gen_helper_gvec_4_ptr *fn,
1463 bool (*checkfn)(DisasContext *, arg_rmrr *))
1465 if (checkfn(s, a)) {
1468 data = FIELD_DP32(data, VDATA, VM, a->vm);
1469 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
1470 data = FIELD_DP32(data, VDATA, VTA, s->vta);
1471 data = FIELD_DP32(data, VDATA, VMA, s->vma);
1472 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
1473 vreg_ofs(s, a->rs1),
1474 vreg_ofs(s, a->rs2),
1475 tcg_env, s->cfg_ptr->vlenb,
1478 finalize_rvv_inst(s);
1484 #define GEN_OPIVV_WIDEN_TRANS(NAME, CHECK) \
1485 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1487 static gen_helper_gvec_4_ptr * const fns[3] = { \
1488 gen_helper_##NAME##_b, \
1489 gen_helper_##NAME##_h, \
1490 gen_helper_##NAME##_w \
1492 return do_opivv_widen(s, a, fns[s->sew], CHECK); \
1495 GEN_OPIVV_WIDEN_TRANS(vwaddu_vv, opivv_widen_check)
1496 GEN_OPIVV_WIDEN_TRANS(vwadd_vv, opivv_widen_check)
1497 GEN_OPIVV_WIDEN_TRANS(vwsubu_vv, opivv_widen_check)
1498 GEN_OPIVV_WIDEN_TRANS(vwsub_vv, opivv_widen_check)
1500 /* OPIVX with WIDEN */
1501 static bool opivx_widen_check(DisasContext *s, arg_rmrr *a)
1503 return require_rvv(s) &&
1504 vext_check_isa_ill(s) &&
1505 vext_check_ds(s, a->rd, a->rs2, a->vm);
1508 #define GEN_OPIVX_WIDEN_TRANS(NAME, CHECK) \
1509 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1511 if (CHECK(s, a)) { \
1512 static gen_helper_opivx * const fns[3] = { \
1513 gen_helper_##NAME##_b, \
1514 gen_helper_##NAME##_h, \
1515 gen_helper_##NAME##_w \
1517 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s); \
1522 GEN_OPIVX_WIDEN_TRANS(vwaddu_vx, opivx_widen_check)
1523 GEN_OPIVX_WIDEN_TRANS(vwadd_vx, opivx_widen_check)
1524 GEN_OPIVX_WIDEN_TRANS(vwsubu_vx, opivx_widen_check)
1525 GEN_OPIVX_WIDEN_TRANS(vwsub_vx, opivx_widen_check)
1527 /* WIDEN OPIVV with WIDEN */
1528 static bool opiwv_widen_check(DisasContext *s, arg_rmrr *a)
1530 return require_rvv(s) &&
1531 vext_check_isa_ill(s) &&
1532 vext_check_dds(s, a->rd, a->rs1, a->rs2, a->vm);
1535 static bool do_opiwv_widen(DisasContext *s, arg_rmrr *a,
1536 gen_helper_gvec_4_ptr *fn)
1538 if (opiwv_widen_check(s, a)) {
1541 data = FIELD_DP32(data, VDATA, VM, a->vm);
1542 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
1543 data = FIELD_DP32(data, VDATA, VTA, s->vta);
1544 data = FIELD_DP32(data, VDATA, VMA, s->vma);
1545 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
1546 vreg_ofs(s, a->rs1),
1547 vreg_ofs(s, a->rs2),
1548 tcg_env, s->cfg_ptr->vlenb,
1549 s->cfg_ptr->vlenb, data, fn);
1550 finalize_rvv_inst(s);
1556 #define GEN_OPIWV_WIDEN_TRANS(NAME) \
1557 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1559 static gen_helper_gvec_4_ptr * const fns[3] = { \
1560 gen_helper_##NAME##_b, \
1561 gen_helper_##NAME##_h, \
1562 gen_helper_##NAME##_w \
1564 return do_opiwv_widen(s, a, fns[s->sew]); \
1567 GEN_OPIWV_WIDEN_TRANS(vwaddu_wv)
1568 GEN_OPIWV_WIDEN_TRANS(vwadd_wv)
1569 GEN_OPIWV_WIDEN_TRANS(vwsubu_wv)
1570 GEN_OPIWV_WIDEN_TRANS(vwsub_wv)
1572 /* WIDEN OPIVX with WIDEN */
1573 static bool opiwx_widen_check(DisasContext *s, arg_rmrr *a)
1575 return require_rvv(s) &&
1576 vext_check_isa_ill(s) &&
1577 vext_check_dd(s, a->rd, a->rs2, a->vm);
1580 static bool do_opiwx_widen(DisasContext *s, arg_rmrr *a,
1581 gen_helper_opivx *fn)
1583 if (opiwx_widen_check(s, a)) {
1584 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s);
1589 #define GEN_OPIWX_WIDEN_TRANS(NAME) \
1590 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1592 static gen_helper_opivx * const fns[3] = { \
1593 gen_helper_##NAME##_b, \
1594 gen_helper_##NAME##_h, \
1595 gen_helper_##NAME##_w \
1597 return do_opiwx_widen(s, a, fns[s->sew]); \
1600 GEN_OPIWX_WIDEN_TRANS(vwaddu_wx)
1601 GEN_OPIWX_WIDEN_TRANS(vwadd_wx)
1602 GEN_OPIWX_WIDEN_TRANS(vwsubu_wx)
1603 GEN_OPIWX_WIDEN_TRANS(vwsub_wx)
1605 static bool opivv_trans(uint32_t vd, uint32_t vs1, uint32_t vs2, uint32_t vm,
1606 gen_helper_gvec_4_ptr *fn, DisasContext *s)
1610 data = FIELD_DP32(data, VDATA, VM, vm);
1611 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
1612 data = FIELD_DP32(data, VDATA, VTA, s->vta);
1613 data = FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s);
1614 data = FIELD_DP32(data, VDATA, VMA, s->vma);
1615 tcg_gen_gvec_4_ptr(vreg_ofs(s, vd), vreg_ofs(s, 0), vreg_ofs(s, vs1),
1616 vreg_ofs(s, vs2), tcg_env, s->cfg_ptr->vlenb,
1617 s->cfg_ptr->vlenb, data, fn);
1618 finalize_rvv_inst(s);
1622 /* Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions */
1623 /* OPIVV without GVEC IR */
1624 #define GEN_OPIVV_TRANS(NAME, CHECK) \
1625 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1627 if (CHECK(s, a)) { \
1628 static gen_helper_gvec_4_ptr * const fns[4] = { \
1629 gen_helper_##NAME##_b, gen_helper_##NAME##_h, \
1630 gen_helper_##NAME##_w, gen_helper_##NAME##_d, \
1632 return opivv_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s);\
1638 * For vadc and vsbc, an illegal instruction exception is raised if the
1639 * destination vector register is v0 and LMUL > 1. (Section 11.4)
1641 static bool opivv_vadc_check(DisasContext *s, arg_rmrr *a)
1643 return require_rvv(s) &&
1644 vext_check_isa_ill(s) &&
1646 vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm);
1649 GEN_OPIVV_TRANS(vadc_vvm, opivv_vadc_check)
1650 GEN_OPIVV_TRANS(vsbc_vvm, opivv_vadc_check)
1653 * For vmadc and vmsbc, an illegal instruction exception is raised if the
1654 * destination vector register overlaps a source vector register group.
1656 static bool opivv_vmadc_check(DisasContext *s, arg_rmrr *a)
1658 return require_rvv(s) &&
1659 vext_check_isa_ill(s) &&
1660 vext_check_mss(s, a->rd, a->rs1, a->rs2);
1663 GEN_OPIVV_TRANS(vmadc_vvm, opivv_vmadc_check)
1664 GEN_OPIVV_TRANS(vmsbc_vvm, opivv_vmadc_check)
1666 static bool opivx_vadc_check(DisasContext *s, arg_rmrr *a)
1668 return require_rvv(s) &&
1669 vext_check_isa_ill(s) &&
1671 vext_check_ss(s, a->rd, a->rs2, a->vm);
1674 /* OPIVX without GVEC IR */
1675 #define GEN_OPIVX_TRANS(NAME, CHECK) \
1676 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1678 if (CHECK(s, a)) { \
1679 static gen_helper_opivx * const fns[4] = { \
1680 gen_helper_##NAME##_b, gen_helper_##NAME##_h, \
1681 gen_helper_##NAME##_w, gen_helper_##NAME##_d, \
1684 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s);\
1689 GEN_OPIVX_TRANS(vadc_vxm, opivx_vadc_check)
1690 GEN_OPIVX_TRANS(vsbc_vxm, opivx_vadc_check)
1692 static bool opivx_vmadc_check(DisasContext *s, arg_rmrr *a)
1694 return require_rvv(s) &&
1695 vext_check_isa_ill(s) &&
1696 vext_check_ms(s, a->rd, a->rs2);
1699 GEN_OPIVX_TRANS(vmadc_vxm, opivx_vmadc_check)
1700 GEN_OPIVX_TRANS(vmsbc_vxm, opivx_vmadc_check)
1702 /* OPIVI without GVEC IR */
1703 #define GEN_OPIVI_TRANS(NAME, IMM_MODE, OPIVX, CHECK) \
1704 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1706 if (CHECK(s, a)) { \
1707 static gen_helper_opivx * const fns[4] = { \
1708 gen_helper_##OPIVX##_b, gen_helper_##OPIVX##_h, \
1709 gen_helper_##OPIVX##_w, gen_helper_##OPIVX##_d, \
1711 return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, \
1712 fns[s->sew], s, IMM_MODE); \
1717 GEN_OPIVI_TRANS(vadc_vim, IMM_SX, vadc_vxm, opivx_vadc_check)
1718 GEN_OPIVI_TRANS(vmadc_vim, IMM_SX, vmadc_vxm, opivx_vmadc_check)
1720 /* Vector Bitwise Logical Instructions */
1721 GEN_OPIVV_GVEC_TRANS(vand_vv, and)
1722 GEN_OPIVV_GVEC_TRANS(vor_vv, or)
1723 GEN_OPIVV_GVEC_TRANS(vxor_vv, xor)
1724 GEN_OPIVX_GVEC_TRANS(vand_vx, ands)
1725 GEN_OPIVX_GVEC_TRANS(vor_vx, ors)
1726 GEN_OPIVX_GVEC_TRANS(vxor_vx, xors)
1727 GEN_OPIVI_GVEC_TRANS(vand_vi, IMM_SX, vand_vx, andi)
1728 GEN_OPIVI_GVEC_TRANS(vor_vi, IMM_SX, vor_vx, ori)
1729 GEN_OPIVI_GVEC_TRANS(vxor_vi, IMM_SX, vxor_vx, xori)
1731 /* Vector Single-Width Bit Shift Instructions */
1732 GEN_OPIVV_GVEC_TRANS(vsll_vv, shlv)
1733 GEN_OPIVV_GVEC_TRANS(vsrl_vv, shrv)
1734 GEN_OPIVV_GVEC_TRANS(vsra_vv, sarv)
1736 typedef void GVecGen2sFn32(unsigned, uint32_t, uint32_t, TCGv_i32,
1737 uint32_t, uint32_t);
1740 do_opivx_gvec_shift(DisasContext *s, arg_rmrr *a, GVecGen2sFn32 *gvec_fn,
1741 gen_helper_opivx *fn)
1743 if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
1744 TCGv_i32 src1 = tcg_temp_new_i32();
1746 tcg_gen_trunc_tl_i32(src1, get_gpr(s, a->rs1, EXT_NONE));
1747 tcg_gen_extract_i32(src1, src1, 0, s->sew + 3);
1748 gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2),
1749 src1, MAXSZ(s), MAXSZ(s));
1751 finalize_rvv_inst(s);
1754 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s);
1757 #define GEN_OPIVX_GVEC_SHIFT_TRANS(NAME, SUF) \
1758 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1760 static gen_helper_opivx * const fns[4] = { \
1761 gen_helper_##NAME##_b, gen_helper_##NAME##_h, \
1762 gen_helper_##NAME##_w, gen_helper_##NAME##_d, \
1764 if (!opivx_check(s, a)) { \
1767 return do_opivx_gvec_shift(s, a, tcg_gen_gvec_##SUF, fns[s->sew]); \
1770 GEN_OPIVX_GVEC_SHIFT_TRANS(vsll_vx, shls)
1771 GEN_OPIVX_GVEC_SHIFT_TRANS(vsrl_vx, shrs)
1772 GEN_OPIVX_GVEC_SHIFT_TRANS(vsra_vx, sars)
1774 GEN_OPIVI_GVEC_TRANS(vsll_vi, IMM_TRUNC_SEW, vsll_vx, shli)
1775 GEN_OPIVI_GVEC_TRANS(vsrl_vi, IMM_TRUNC_SEW, vsrl_vx, shri)
1776 GEN_OPIVI_GVEC_TRANS(vsra_vi, IMM_TRUNC_SEW, vsra_vx, sari)
1778 /* Vector Narrowing Integer Right Shift Instructions */
1779 static bool opiwv_narrow_check(DisasContext *s, arg_rmrr *a)
1781 return require_rvv(s) &&
1782 vext_check_isa_ill(s) &&
1783 vext_check_sds(s, a->rd, a->rs1, a->rs2, a->vm);
1786 /* OPIVV with NARROW */
1787 #define GEN_OPIWV_NARROW_TRANS(NAME) \
1788 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1790 if (opiwv_narrow_check(s, a)) { \
1791 uint32_t data = 0; \
1792 static gen_helper_gvec_4_ptr * const fns[3] = { \
1793 gen_helper_##NAME##_b, \
1794 gen_helper_##NAME##_h, \
1795 gen_helper_##NAME##_w, \
1798 data = FIELD_DP32(data, VDATA, VM, a->vm); \
1799 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
1800 data = FIELD_DP32(data, VDATA, VTA, s->vta); \
1801 data = FIELD_DP32(data, VDATA, VMA, s->vma); \
1802 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
1803 vreg_ofs(s, a->rs1), \
1804 vreg_ofs(s, a->rs2), tcg_env, \
1805 s->cfg_ptr->vlenb, \
1806 s->cfg_ptr->vlenb, data, \
1808 finalize_rvv_inst(s); \
1813 GEN_OPIWV_NARROW_TRANS(vnsra_wv)
1814 GEN_OPIWV_NARROW_TRANS(vnsrl_wv)
1816 static bool opiwx_narrow_check(DisasContext *s, arg_rmrr *a)
1818 return require_rvv(s) &&
1819 vext_check_isa_ill(s) &&
1820 vext_check_sd(s, a->rd, a->rs2, a->vm);
1823 /* OPIVX with NARROW */
1824 #define GEN_OPIWX_NARROW_TRANS(NAME) \
1825 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1827 if (opiwx_narrow_check(s, a)) { \
1828 static gen_helper_opivx * const fns[3] = { \
1829 gen_helper_##NAME##_b, \
1830 gen_helper_##NAME##_h, \
1831 gen_helper_##NAME##_w, \
1833 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s);\
1838 GEN_OPIWX_NARROW_TRANS(vnsra_wx)
1839 GEN_OPIWX_NARROW_TRANS(vnsrl_wx)
1841 /* OPIWI with NARROW */
1842 #define GEN_OPIWI_NARROW_TRANS(NAME, IMM_MODE, OPIVX) \
1843 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1845 if (opiwx_narrow_check(s, a)) { \
1846 static gen_helper_opivx * const fns[3] = { \
1847 gen_helper_##OPIVX##_b, \
1848 gen_helper_##OPIVX##_h, \
1849 gen_helper_##OPIVX##_w, \
1851 return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, \
1852 fns[s->sew], s, IMM_MODE); \
1857 GEN_OPIWI_NARROW_TRANS(vnsra_wi, IMM_ZX, vnsra_wx)
1858 GEN_OPIWI_NARROW_TRANS(vnsrl_wi, IMM_ZX, vnsrl_wx)
1860 /* Vector Integer Comparison Instructions */
1862 * For all comparison instructions, an illegal instruction exception is raised
1863 * if the destination vector register overlaps a source vector register group
1866 static bool opivv_cmp_check(DisasContext *s, arg_rmrr *a)
1868 return require_rvv(s) &&
1869 vext_check_isa_ill(s) &&
1870 vext_check_mss(s, a->rd, a->rs1, a->rs2);
1873 GEN_OPIVV_TRANS(vmseq_vv, opivv_cmp_check)
1874 GEN_OPIVV_TRANS(vmsne_vv, opivv_cmp_check)
1875 GEN_OPIVV_TRANS(vmsltu_vv, opivv_cmp_check)
1876 GEN_OPIVV_TRANS(vmslt_vv, opivv_cmp_check)
1877 GEN_OPIVV_TRANS(vmsleu_vv, opivv_cmp_check)
1878 GEN_OPIVV_TRANS(vmsle_vv, opivv_cmp_check)
1880 static bool opivx_cmp_check(DisasContext *s, arg_rmrr *a)
1882 return require_rvv(s) &&
1883 vext_check_isa_ill(s) &&
1884 vext_check_ms(s, a->rd, a->rs2);
1887 GEN_OPIVX_TRANS(vmseq_vx, opivx_cmp_check)
1888 GEN_OPIVX_TRANS(vmsne_vx, opivx_cmp_check)
1889 GEN_OPIVX_TRANS(vmsltu_vx, opivx_cmp_check)
1890 GEN_OPIVX_TRANS(vmslt_vx, opivx_cmp_check)
1891 GEN_OPIVX_TRANS(vmsleu_vx, opivx_cmp_check)
1892 GEN_OPIVX_TRANS(vmsle_vx, opivx_cmp_check)
1893 GEN_OPIVX_TRANS(vmsgtu_vx, opivx_cmp_check)
1894 GEN_OPIVX_TRANS(vmsgt_vx, opivx_cmp_check)
1896 GEN_OPIVI_TRANS(vmseq_vi, IMM_SX, vmseq_vx, opivx_cmp_check)
1897 GEN_OPIVI_TRANS(vmsne_vi, IMM_SX, vmsne_vx, opivx_cmp_check)
1898 GEN_OPIVI_TRANS(vmsleu_vi, IMM_SX, vmsleu_vx, opivx_cmp_check)
1899 GEN_OPIVI_TRANS(vmsle_vi, IMM_SX, vmsle_vx, opivx_cmp_check)
1900 GEN_OPIVI_TRANS(vmsgtu_vi, IMM_SX, vmsgtu_vx, opivx_cmp_check)
1901 GEN_OPIVI_TRANS(vmsgt_vi, IMM_SX, vmsgt_vx, opivx_cmp_check)
1903 /* Vector Integer Min/Max Instructions */
1904 GEN_OPIVV_GVEC_TRANS(vminu_vv, umin)
1905 GEN_OPIVV_GVEC_TRANS(vmin_vv, smin)
1906 GEN_OPIVV_GVEC_TRANS(vmaxu_vv, umax)
1907 GEN_OPIVV_GVEC_TRANS(vmax_vv, smax)
1908 GEN_OPIVX_TRANS(vminu_vx, opivx_check)
1909 GEN_OPIVX_TRANS(vmin_vx, opivx_check)
1910 GEN_OPIVX_TRANS(vmaxu_vx, opivx_check)
1911 GEN_OPIVX_TRANS(vmax_vx, opivx_check)
1913 /* Vector Single-Width Integer Multiply Instructions */
1915 static bool vmulh_vv_check(DisasContext *s, arg_rmrr *a)
1918 * All Zve* extensions support all vector integer instructions,
1919 * except that the vmulh integer multiply variants
1920 * that return the high word of the product
1921 * (vmulh.vv, vmulh.vx, vmulhu.vv, vmulhu.vx, vmulhsu.vv, vmulhsu.vx)
1922 * are not included for EEW=64 in Zve64*. (Section 18.2)
1924 return opivv_check(s, a) &&
1925 (!has_ext(s, RVV) ? s->sew != MO_64 : true);
1928 static bool vmulh_vx_check(DisasContext *s, arg_rmrr *a)
1931 * All Zve* extensions support all vector integer instructions,
1932 * except that the vmulh integer multiply variants
1933 * that return the high word of the product
1934 * (vmulh.vv, vmulh.vx, vmulhu.vv, vmulhu.vx, vmulhsu.vv, vmulhsu.vx)
1935 * are not included for EEW=64 in Zve64*. (Section 18.2)
1937 return opivx_check(s, a) &&
1938 (!has_ext(s, RVV) ? s->sew != MO_64 : true);
1941 GEN_OPIVV_GVEC_TRANS(vmul_vv, mul)
1942 GEN_OPIVV_TRANS(vmulh_vv, vmulh_vv_check)
1943 GEN_OPIVV_TRANS(vmulhu_vv, vmulh_vv_check)
1944 GEN_OPIVV_TRANS(vmulhsu_vv, vmulh_vv_check)
1945 GEN_OPIVX_GVEC_TRANS(vmul_vx, muls)
1946 GEN_OPIVX_TRANS(vmulh_vx, vmulh_vx_check)
1947 GEN_OPIVX_TRANS(vmulhu_vx, vmulh_vx_check)
1948 GEN_OPIVX_TRANS(vmulhsu_vx, vmulh_vx_check)
1950 /* Vector Integer Divide Instructions */
1951 GEN_OPIVV_TRANS(vdivu_vv, opivv_check)
1952 GEN_OPIVV_TRANS(vdiv_vv, opivv_check)
1953 GEN_OPIVV_TRANS(vremu_vv, opivv_check)
1954 GEN_OPIVV_TRANS(vrem_vv, opivv_check)
1955 GEN_OPIVX_TRANS(vdivu_vx, opivx_check)
1956 GEN_OPIVX_TRANS(vdiv_vx, opivx_check)
1957 GEN_OPIVX_TRANS(vremu_vx, opivx_check)
1958 GEN_OPIVX_TRANS(vrem_vx, opivx_check)
1960 /* Vector Widening Integer Multiply Instructions */
1961 GEN_OPIVV_WIDEN_TRANS(vwmul_vv, opivv_widen_check)
1962 GEN_OPIVV_WIDEN_TRANS(vwmulu_vv, opivv_widen_check)
1963 GEN_OPIVV_WIDEN_TRANS(vwmulsu_vv, opivv_widen_check)
1964 GEN_OPIVX_WIDEN_TRANS(vwmul_vx, opivx_widen_check)
1965 GEN_OPIVX_WIDEN_TRANS(vwmulu_vx, opivx_widen_check)
1966 GEN_OPIVX_WIDEN_TRANS(vwmulsu_vx, opivx_widen_check)
1968 /* Vector Single-Width Integer Multiply-Add Instructions */
1969 GEN_OPIVV_TRANS(vmacc_vv, opivv_check)
1970 GEN_OPIVV_TRANS(vnmsac_vv, opivv_check)
1971 GEN_OPIVV_TRANS(vmadd_vv, opivv_check)
1972 GEN_OPIVV_TRANS(vnmsub_vv, opivv_check)
1973 GEN_OPIVX_TRANS(vmacc_vx, opivx_check)
1974 GEN_OPIVX_TRANS(vnmsac_vx, opivx_check)
1975 GEN_OPIVX_TRANS(vmadd_vx, opivx_check)
1976 GEN_OPIVX_TRANS(vnmsub_vx, opivx_check)
1978 /* Vector Widening Integer Multiply-Add Instructions */
1979 GEN_OPIVV_WIDEN_TRANS(vwmaccu_vv, opivv_widen_check)
1980 GEN_OPIVV_WIDEN_TRANS(vwmacc_vv, opivv_widen_check)
1981 GEN_OPIVV_WIDEN_TRANS(vwmaccsu_vv, opivv_widen_check)
1982 GEN_OPIVX_WIDEN_TRANS(vwmaccu_vx, opivx_widen_check)
1983 GEN_OPIVX_WIDEN_TRANS(vwmacc_vx, opivx_widen_check)
1984 GEN_OPIVX_WIDEN_TRANS(vwmaccsu_vx, opivx_widen_check)
1985 GEN_OPIVX_WIDEN_TRANS(vwmaccus_vx, opivx_widen_check)
1987 /* Vector Integer Merge and Move Instructions */
1988 static bool trans_vmv_v_v(DisasContext *s, arg_vmv_v_v *a)
1990 if (require_rvv(s) &&
1991 vext_check_isa_ill(s) &&
1992 /* vmv.v.v has rs2 = 0 and vm = 1 */
1993 vext_check_sss(s, a->rd, a->rs1, 0, 1)) {
1994 if (s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
1995 tcg_gen_gvec_mov(s->sew, vreg_ofs(s, a->rd),
1996 vreg_ofs(s, a->rs1),
1997 MAXSZ(s), MAXSZ(s));
1999 uint32_t data = FIELD_DP32(0, VDATA, LMUL, s->lmul);
2000 data = FIELD_DP32(data, VDATA, VTA, s->vta);
2001 static gen_helper_gvec_2_ptr * const fns[4] = {
2002 gen_helper_vmv_v_v_b, gen_helper_vmv_v_v_h,
2003 gen_helper_vmv_v_v_w, gen_helper_vmv_v_v_d,
2006 tcg_gen_gvec_2_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, a->rs1),
2007 tcg_env, s->cfg_ptr->vlenb,
2008 s->cfg_ptr->vlenb, data,
2011 finalize_rvv_inst(s);
2017 typedef void gen_helper_vmv_vx(TCGv_ptr, TCGv_i64, TCGv_env, TCGv_i32);
2018 static bool trans_vmv_v_x(DisasContext *s, arg_vmv_v_x *a)
2020 if (require_rvv(s) &&
2021 vext_check_isa_ill(s) &&
2022 /* vmv.v.x has rs2 = 0 and vm = 1 */
2023 vext_check_ss(s, a->rd, 0, 1)) {
2026 s1 = get_gpr(s, a->rs1, EXT_SIGN);
2028 if (s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
2029 if (get_xl(s) == MXL_RV32 && s->sew == MO_64) {
2030 TCGv_i64 s1_i64 = tcg_temp_new_i64();
2031 tcg_gen_ext_tl_i64(s1_i64, s1);
2032 tcg_gen_gvec_dup_i64(s->sew, vreg_ofs(s, a->rd),
2033 MAXSZ(s), MAXSZ(s), s1_i64);
2035 tcg_gen_gvec_dup_tl(s->sew, vreg_ofs(s, a->rd),
2036 MAXSZ(s), MAXSZ(s), s1);
2040 TCGv_i64 s1_i64 = tcg_temp_new_i64();
2041 TCGv_ptr dest = tcg_temp_new_ptr();
2042 uint32_t data = FIELD_DP32(0, VDATA, LMUL, s->lmul);
2043 data = FIELD_DP32(data, VDATA, VTA, s->vta);
2044 static gen_helper_vmv_vx * const fns[4] = {
2045 gen_helper_vmv_v_x_b, gen_helper_vmv_v_x_h,
2046 gen_helper_vmv_v_x_w, gen_helper_vmv_v_x_d,
2049 tcg_gen_ext_tl_i64(s1_i64, s1);
2050 desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
2051 s->cfg_ptr->vlenb, data));
2052 tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, a->rd));
2053 fns[s->sew](dest, s1_i64, tcg_env, desc);
2056 finalize_rvv_inst(s);
2062 static bool trans_vmv_v_i(DisasContext *s, arg_vmv_v_i *a)
2064 if (require_rvv(s) &&
2065 vext_check_isa_ill(s) &&
2066 /* vmv.v.i has rs2 = 0 and vm = 1 */
2067 vext_check_ss(s, a->rd, 0, 1)) {
2068 int64_t simm = sextract64(a->rs1, 0, 5);
2069 if (s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
2070 tcg_gen_gvec_dup_imm(s->sew, vreg_ofs(s, a->rd),
2071 MAXSZ(s), MAXSZ(s), simm);
2076 uint32_t data = FIELD_DP32(0, VDATA, LMUL, s->lmul);
2077 data = FIELD_DP32(data, VDATA, VTA, s->vta);
2078 static gen_helper_vmv_vx * const fns[4] = {
2079 gen_helper_vmv_v_x_b, gen_helper_vmv_v_x_h,
2080 gen_helper_vmv_v_x_w, gen_helper_vmv_v_x_d,
2083 s1 = tcg_constant_i64(simm);
2084 dest = tcg_temp_new_ptr();
2085 desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
2086 s->cfg_ptr->vlenb, data));
2087 tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, a->rd));
2088 fns[s->sew](dest, s1, tcg_env, desc);
2090 finalize_rvv_inst(s);
2096 GEN_OPIVV_TRANS(vmerge_vvm, opivv_vadc_check)
2097 GEN_OPIVX_TRANS(vmerge_vxm, opivx_vadc_check)
2098 GEN_OPIVI_TRANS(vmerge_vim, IMM_SX, vmerge_vxm, opivx_vadc_check)
2101 *** Vector Fixed-Point Arithmetic Instructions
2104 /* Vector Single-Width Saturating Add and Subtract */
2105 GEN_OPIVV_TRANS(vsaddu_vv, opivv_check)
2106 GEN_OPIVV_TRANS(vsadd_vv, opivv_check)
2107 GEN_OPIVV_TRANS(vssubu_vv, opivv_check)
2108 GEN_OPIVV_TRANS(vssub_vv, opivv_check)
2109 GEN_OPIVX_TRANS(vsaddu_vx, opivx_check)
2110 GEN_OPIVX_TRANS(vsadd_vx, opivx_check)
2111 GEN_OPIVX_TRANS(vssubu_vx, opivx_check)
2112 GEN_OPIVX_TRANS(vssub_vx, opivx_check)
2113 GEN_OPIVI_TRANS(vsaddu_vi, IMM_SX, vsaddu_vx, opivx_check)
2114 GEN_OPIVI_TRANS(vsadd_vi, IMM_SX, vsadd_vx, opivx_check)
2116 /* Vector Single-Width Averaging Add and Subtract */
2117 GEN_OPIVV_TRANS(vaadd_vv, opivv_check)
2118 GEN_OPIVV_TRANS(vaaddu_vv, opivv_check)
2119 GEN_OPIVV_TRANS(vasub_vv, opivv_check)
2120 GEN_OPIVV_TRANS(vasubu_vv, opivv_check)
2121 GEN_OPIVX_TRANS(vaadd_vx, opivx_check)
2122 GEN_OPIVX_TRANS(vaaddu_vx, opivx_check)
2123 GEN_OPIVX_TRANS(vasub_vx, opivx_check)
2124 GEN_OPIVX_TRANS(vasubu_vx, opivx_check)
2126 /* Vector Single-Width Fractional Multiply with Rounding and Saturation */
2128 static bool vsmul_vv_check(DisasContext *s, arg_rmrr *a)
2131 * All Zve* extensions support all vector fixed-point arithmetic
2132 * instructions, except that vsmul.vv and vsmul.vx are not supported
2133 * for EEW=64 in Zve64*. (Section 18.2)
2135 return opivv_check(s, a) &&
2136 (!has_ext(s, RVV) ? s->sew != MO_64 : true);
2139 static bool vsmul_vx_check(DisasContext *s, arg_rmrr *a)
2142 * All Zve* extensions support all vector fixed-point arithmetic
2143 * instructions, except that vsmul.vv and vsmul.vx are not supported
2144 * for EEW=64 in Zve64*. (Section 18.2)
2146 return opivx_check(s, a) &&
2147 (!has_ext(s, RVV) ? s->sew != MO_64 : true);
2150 GEN_OPIVV_TRANS(vsmul_vv, vsmul_vv_check)
2151 GEN_OPIVX_TRANS(vsmul_vx, vsmul_vx_check)
2153 /* Vector Single-Width Scaling Shift Instructions */
2154 GEN_OPIVV_TRANS(vssrl_vv, opivv_check)
2155 GEN_OPIVV_TRANS(vssra_vv, opivv_check)
2156 GEN_OPIVX_TRANS(vssrl_vx, opivx_check)
2157 GEN_OPIVX_TRANS(vssra_vx, opivx_check)
2158 GEN_OPIVI_TRANS(vssrl_vi, IMM_TRUNC_SEW, vssrl_vx, opivx_check)
2159 GEN_OPIVI_TRANS(vssra_vi, IMM_TRUNC_SEW, vssra_vx, opivx_check)
2161 /* Vector Narrowing Fixed-Point Clip Instructions */
2162 GEN_OPIWV_NARROW_TRANS(vnclipu_wv)
2163 GEN_OPIWV_NARROW_TRANS(vnclip_wv)
2164 GEN_OPIWX_NARROW_TRANS(vnclipu_wx)
2165 GEN_OPIWX_NARROW_TRANS(vnclip_wx)
2166 GEN_OPIWI_NARROW_TRANS(vnclipu_wi, IMM_ZX, vnclipu_wx)
2167 GEN_OPIWI_NARROW_TRANS(vnclip_wi, IMM_ZX, vnclip_wx)
2170 *** Vector Float Point Arithmetic Instructions
2174 * As RVF-only cpus always have values NaN-boxed to 64-bits,
2175 * RVF and RVD can be treated equally.
2176 * We don't have to deal with the cases of: SEW > FLEN.
2178 * If SEW < FLEN, check whether input fp register is a valid
2179 * NaN-boxed value, in which case the least-significant SEW bits
2180 * of the f register are used, else the canonical NaN value is used.
2182 static void do_nanbox(DisasContext *s, TCGv_i64 out, TCGv_i64 in)
2186 gen_check_nanbox_h(out, in);
2189 gen_check_nanbox_s(out, in);
2192 tcg_gen_mov_i64(out, in);
2195 g_assert_not_reached();
2199 /* Vector Single-Width Floating-Point Add/Subtract Instructions */
2202 * If the current SEW does not correspond to a supported IEEE floating-point
2203 * type, an illegal instruction exception is raised.
2205 static bool opfvv_check(DisasContext *s, arg_rmrr *a)
2207 return require_rvv(s) &&
2209 vext_check_isa_ill(s) &&
2210 vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm);
2213 /* OPFVV without GVEC IR */
2214 #define GEN_OPFVV_TRANS(NAME, CHECK) \
2215 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
2217 if (CHECK(s, a)) { \
2218 uint32_t data = 0; \
2219 static gen_helper_gvec_4_ptr * const fns[3] = { \
2220 gen_helper_##NAME##_h, \
2221 gen_helper_##NAME##_w, \
2222 gen_helper_##NAME##_d, \
2224 gen_set_rm(s, RISCV_FRM_DYN); \
2226 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2227 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2228 data = FIELD_DP32(data, VDATA, VTA, s->vta); \
2230 FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s);\
2231 data = FIELD_DP32(data, VDATA, VMA, s->vma); \
2232 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
2233 vreg_ofs(s, a->rs1), \
2234 vreg_ofs(s, a->rs2), tcg_env, \
2235 s->cfg_ptr->vlenb, \
2236 s->cfg_ptr->vlenb, data, \
2238 finalize_rvv_inst(s); \
2243 GEN_OPFVV_TRANS(vfadd_vv, opfvv_check)
2244 GEN_OPFVV_TRANS(vfsub_vv, opfvv_check)
2246 typedef void gen_helper_opfvf(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv_ptr,
2247 TCGv_env, TCGv_i32);
2249 static bool opfvf_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
2250 uint32_t data, gen_helper_opfvf *fn, DisasContext *s)
2252 TCGv_ptr dest, src2, mask;
2256 dest = tcg_temp_new_ptr();
2257 mask = tcg_temp_new_ptr();
2258 src2 = tcg_temp_new_ptr();
2259 desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
2260 s->cfg_ptr->vlenb, data));
2262 tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, vd));
2263 tcg_gen_addi_ptr(src2, tcg_env, vreg_ofs(s, vs2));
2264 tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0));
2266 /* NaN-box f[rs1] */
2267 t1 = tcg_temp_new_i64();
2268 do_nanbox(s, t1, cpu_fpr[rs1]);
2270 fn(dest, mask, t1, src2, tcg_env, desc);
2272 finalize_rvv_inst(s);
2277 * If the current SEW does not correspond to a supported IEEE floating-point
2278 * type, an illegal instruction exception is raised
2280 static bool opfvf_check(DisasContext *s, arg_rmrr *a)
2282 return require_rvv(s) &&
2284 vext_check_isa_ill(s) &&
2285 vext_check_ss(s, a->rd, a->rs2, a->vm);
2288 /* OPFVF without GVEC IR */
2289 #define GEN_OPFVF_TRANS(NAME, CHECK) \
2290 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
2292 if (CHECK(s, a)) { \
2293 uint32_t data = 0; \
2294 static gen_helper_opfvf *const fns[3] = { \
2295 gen_helper_##NAME##_h, \
2296 gen_helper_##NAME##_w, \
2297 gen_helper_##NAME##_d, \
2299 gen_set_rm(s, RISCV_FRM_DYN); \
2300 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2301 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2302 data = FIELD_DP32(data, VDATA, VTA, s->vta); \
2303 data = FIELD_DP32(data, VDATA, VTA_ALL_1S, \
2304 s->cfg_vta_all_1s); \
2305 data = FIELD_DP32(data, VDATA, VMA, s->vma); \
2306 return opfvf_trans(a->rd, a->rs1, a->rs2, data, \
2307 fns[s->sew - 1], s); \
2312 GEN_OPFVF_TRANS(vfadd_vf, opfvf_check)
2313 GEN_OPFVF_TRANS(vfsub_vf, opfvf_check)
2314 GEN_OPFVF_TRANS(vfrsub_vf, opfvf_check)
2316 /* Vector Widening Floating-Point Add/Subtract Instructions */
2317 static bool opfvv_widen_check(DisasContext *s, arg_rmrr *a)
2319 return require_rvv(s) &&
2320 require_scale_rvf(s) &&
2322 vext_check_isa_ill(s) &&
2323 vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm);
2326 /* OPFVV with WIDEN */
2327 #define GEN_OPFVV_WIDEN_TRANS(NAME, CHECK) \
2328 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
2330 if (CHECK(s, a)) { \
2331 uint32_t data = 0; \
2332 static gen_helper_gvec_4_ptr * const fns[2] = { \
2333 gen_helper_##NAME##_h, gen_helper_##NAME##_w, \
2335 gen_set_rm(s, RISCV_FRM_DYN); \
2337 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2338 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2339 data = FIELD_DP32(data, VDATA, VTA, s->vta); \
2340 data = FIELD_DP32(data, VDATA, VMA, s->vma); \
2341 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
2342 vreg_ofs(s, a->rs1), \
2343 vreg_ofs(s, a->rs2), tcg_env, \
2344 s->cfg_ptr->vlenb, \
2345 s->cfg_ptr->vlenb, data, \
2347 finalize_rvv_inst(s); \
2353 GEN_OPFVV_WIDEN_TRANS(vfwadd_vv, opfvv_widen_check)
2354 GEN_OPFVV_WIDEN_TRANS(vfwsub_vv, opfvv_widen_check)
2356 static bool opfvf_widen_check(DisasContext *s, arg_rmrr *a)
2358 return require_rvv(s) &&
2359 require_scale_rvf(s) &&
2361 vext_check_isa_ill(s) &&
2362 vext_check_ds(s, a->rd, a->rs2, a->vm);
2365 /* OPFVF with WIDEN */
2366 #define GEN_OPFVF_WIDEN_TRANS(NAME) \
2367 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
2369 if (opfvf_widen_check(s, a)) { \
2370 uint32_t data = 0; \
2371 static gen_helper_opfvf *const fns[2] = { \
2372 gen_helper_##NAME##_h, gen_helper_##NAME##_w, \
2374 gen_set_rm(s, RISCV_FRM_DYN); \
2375 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2376 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2377 data = FIELD_DP32(data, VDATA, VTA, s->vta); \
2378 data = FIELD_DP32(data, VDATA, VMA, s->vma); \
2379 return opfvf_trans(a->rd, a->rs1, a->rs2, data, \
2380 fns[s->sew - 1], s); \
2385 GEN_OPFVF_WIDEN_TRANS(vfwadd_vf)
2386 GEN_OPFVF_WIDEN_TRANS(vfwsub_vf)
2388 static bool opfwv_widen_check(DisasContext *s, arg_rmrr *a)
2390 return require_rvv(s) &&
2391 require_scale_rvf(s) &&
2393 vext_check_isa_ill(s) &&
2394 vext_check_dds(s, a->rd, a->rs1, a->rs2, a->vm);
2397 /* WIDEN OPFVV with WIDEN */
2398 #define GEN_OPFWV_WIDEN_TRANS(NAME) \
2399 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
2401 if (opfwv_widen_check(s, a)) { \
2402 uint32_t data = 0; \
2403 static gen_helper_gvec_4_ptr * const fns[2] = { \
2404 gen_helper_##NAME##_h, gen_helper_##NAME##_w, \
2406 gen_set_rm(s, RISCV_FRM_DYN); \
2408 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2409 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2410 data = FIELD_DP32(data, VDATA, VTA, s->vta); \
2411 data = FIELD_DP32(data, VDATA, VMA, s->vma); \
2412 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
2413 vreg_ofs(s, a->rs1), \
2414 vreg_ofs(s, a->rs2), tcg_env, \
2415 s->cfg_ptr->vlenb, \
2416 s->cfg_ptr->vlenb, data, \
2418 finalize_rvv_inst(s); \
2424 GEN_OPFWV_WIDEN_TRANS(vfwadd_wv)
2425 GEN_OPFWV_WIDEN_TRANS(vfwsub_wv)
2427 static bool opfwf_widen_check(DisasContext *s, arg_rmrr *a)
2429 return require_rvv(s) &&
2430 require_scale_rvf(s) &&
2432 vext_check_isa_ill(s) &&
2433 vext_check_dd(s, a->rd, a->rs2, a->vm);
2436 /* WIDEN OPFVF with WIDEN */
2437 #define GEN_OPFWF_WIDEN_TRANS(NAME) \
2438 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
2440 if (opfwf_widen_check(s, a)) { \
2441 uint32_t data = 0; \
2442 static gen_helper_opfvf *const fns[2] = { \
2443 gen_helper_##NAME##_h, gen_helper_##NAME##_w, \
2445 gen_set_rm(s, RISCV_FRM_DYN); \
2446 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2447 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2448 data = FIELD_DP32(data, VDATA, VTA, s->vta); \
2449 data = FIELD_DP32(data, VDATA, VMA, s->vma); \
2450 return opfvf_trans(a->rd, a->rs1, a->rs2, data, \
2451 fns[s->sew - 1], s); \
2456 GEN_OPFWF_WIDEN_TRANS(vfwadd_wf)
2457 GEN_OPFWF_WIDEN_TRANS(vfwsub_wf)
2459 /* Vector Single-Width Floating-Point Multiply/Divide Instructions */
2460 GEN_OPFVV_TRANS(vfmul_vv, opfvv_check)
2461 GEN_OPFVV_TRANS(vfdiv_vv, opfvv_check)
2462 GEN_OPFVF_TRANS(vfmul_vf, opfvf_check)
2463 GEN_OPFVF_TRANS(vfdiv_vf, opfvf_check)
2464 GEN_OPFVF_TRANS(vfrdiv_vf, opfvf_check)
2466 /* Vector Widening Floating-Point Multiply */
2467 GEN_OPFVV_WIDEN_TRANS(vfwmul_vv, opfvv_widen_check)
2468 GEN_OPFVF_WIDEN_TRANS(vfwmul_vf)
2470 /* Vector Single-Width Floating-Point Fused Multiply-Add Instructions */
2471 GEN_OPFVV_TRANS(vfmacc_vv, opfvv_check)
2472 GEN_OPFVV_TRANS(vfnmacc_vv, opfvv_check)
2473 GEN_OPFVV_TRANS(vfmsac_vv, opfvv_check)
2474 GEN_OPFVV_TRANS(vfnmsac_vv, opfvv_check)
2475 GEN_OPFVV_TRANS(vfmadd_vv, opfvv_check)
2476 GEN_OPFVV_TRANS(vfnmadd_vv, opfvv_check)
2477 GEN_OPFVV_TRANS(vfmsub_vv, opfvv_check)
2478 GEN_OPFVV_TRANS(vfnmsub_vv, opfvv_check)
2479 GEN_OPFVF_TRANS(vfmacc_vf, opfvf_check)
2480 GEN_OPFVF_TRANS(vfnmacc_vf, opfvf_check)
2481 GEN_OPFVF_TRANS(vfmsac_vf, opfvf_check)
2482 GEN_OPFVF_TRANS(vfnmsac_vf, opfvf_check)
2483 GEN_OPFVF_TRANS(vfmadd_vf, opfvf_check)
2484 GEN_OPFVF_TRANS(vfnmadd_vf, opfvf_check)
2485 GEN_OPFVF_TRANS(vfmsub_vf, opfvf_check)
2486 GEN_OPFVF_TRANS(vfnmsub_vf, opfvf_check)
2488 /* Vector Widening Floating-Point Fused Multiply-Add Instructions */
2489 GEN_OPFVV_WIDEN_TRANS(vfwmacc_vv, opfvv_widen_check)
2490 GEN_OPFVV_WIDEN_TRANS(vfwnmacc_vv, opfvv_widen_check)
2491 GEN_OPFVV_WIDEN_TRANS(vfwmsac_vv, opfvv_widen_check)
2492 GEN_OPFVV_WIDEN_TRANS(vfwnmsac_vv, opfvv_widen_check)
2493 GEN_OPFVF_WIDEN_TRANS(vfwmacc_vf)
2494 GEN_OPFVF_WIDEN_TRANS(vfwnmacc_vf)
2495 GEN_OPFVF_WIDEN_TRANS(vfwmsac_vf)
2496 GEN_OPFVF_WIDEN_TRANS(vfwnmsac_vf)
2498 /* Vector Floating-Point Square-Root Instruction */
2501 * If the current SEW does not correspond to a supported IEEE floating-point
2502 * type, an illegal instruction exception is raised
2504 static bool opfv_check(DisasContext *s, arg_rmr *a)
2506 return require_rvv(s) &&
2508 vext_check_isa_ill(s) &&
2509 /* OPFV instructions ignore vs1 check */
2510 vext_check_ss(s, a->rd, a->rs2, a->vm);
2513 static bool do_opfv(DisasContext *s, arg_rmr *a,
2514 gen_helper_gvec_3_ptr *fn,
2515 bool (*checkfn)(DisasContext *, arg_rmr *),
2518 if (checkfn(s, a)) {
2520 gen_set_rm_chkfrm(s, rm);
2522 data = FIELD_DP32(data, VDATA, VM, a->vm);
2523 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
2524 data = FIELD_DP32(data, VDATA, VTA, s->vta);
2525 data = FIELD_DP32(data, VDATA, VMA, s->vma);
2526 tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
2527 vreg_ofs(s, a->rs2), tcg_env,
2529 s->cfg_ptr->vlenb, data, fn);
2530 finalize_rvv_inst(s);
2536 #define GEN_OPFV_TRANS(NAME, CHECK, FRM) \
2537 static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
2539 static gen_helper_gvec_3_ptr * const fns[3] = { \
2540 gen_helper_##NAME##_h, \
2541 gen_helper_##NAME##_w, \
2542 gen_helper_##NAME##_d \
2544 return do_opfv(s, a, fns[s->sew - 1], CHECK, FRM); \
2547 GEN_OPFV_TRANS(vfsqrt_v, opfv_check, RISCV_FRM_DYN)
2548 GEN_OPFV_TRANS(vfrsqrt7_v, opfv_check, RISCV_FRM_DYN)
2549 GEN_OPFV_TRANS(vfrec7_v, opfv_check, RISCV_FRM_DYN)
2551 /* Vector Floating-Point MIN/MAX Instructions */
2552 GEN_OPFVV_TRANS(vfmin_vv, opfvv_check)
2553 GEN_OPFVV_TRANS(vfmax_vv, opfvv_check)
2554 GEN_OPFVF_TRANS(vfmin_vf, opfvf_check)
2555 GEN_OPFVF_TRANS(vfmax_vf, opfvf_check)
2557 /* Vector Floating-Point Sign-Injection Instructions */
2558 GEN_OPFVV_TRANS(vfsgnj_vv, opfvv_check)
2559 GEN_OPFVV_TRANS(vfsgnjn_vv, opfvv_check)
2560 GEN_OPFVV_TRANS(vfsgnjx_vv, opfvv_check)
2561 GEN_OPFVF_TRANS(vfsgnj_vf, opfvf_check)
2562 GEN_OPFVF_TRANS(vfsgnjn_vf, opfvf_check)
2563 GEN_OPFVF_TRANS(vfsgnjx_vf, opfvf_check)
2565 /* Vector Floating-Point Compare Instructions */
2566 static bool opfvv_cmp_check(DisasContext *s, arg_rmrr *a)
2568 return require_rvv(s) &&
2570 vext_check_isa_ill(s) &&
2571 vext_check_mss(s, a->rd, a->rs1, a->rs2);
2574 GEN_OPFVV_TRANS(vmfeq_vv, opfvv_cmp_check)
2575 GEN_OPFVV_TRANS(vmfne_vv, opfvv_cmp_check)
2576 GEN_OPFVV_TRANS(vmflt_vv, opfvv_cmp_check)
2577 GEN_OPFVV_TRANS(vmfle_vv, opfvv_cmp_check)
2579 static bool opfvf_cmp_check(DisasContext *s, arg_rmrr *a)
2581 return require_rvv(s) &&
2583 vext_check_isa_ill(s) &&
2584 vext_check_ms(s, a->rd, a->rs2);
2587 GEN_OPFVF_TRANS(vmfeq_vf, opfvf_cmp_check)
2588 GEN_OPFVF_TRANS(vmfne_vf, opfvf_cmp_check)
2589 GEN_OPFVF_TRANS(vmflt_vf, opfvf_cmp_check)
2590 GEN_OPFVF_TRANS(vmfle_vf, opfvf_cmp_check)
2591 GEN_OPFVF_TRANS(vmfgt_vf, opfvf_cmp_check)
2592 GEN_OPFVF_TRANS(vmfge_vf, opfvf_cmp_check)
2594 /* Vector Floating-Point Classify Instruction */
2595 GEN_OPFV_TRANS(vfclass_v, opfv_check, RISCV_FRM_DYN)
2597 /* Vector Floating-Point Merge Instruction */
2598 GEN_OPFVF_TRANS(vfmerge_vfm, opfvf_check)
2600 static bool trans_vfmv_v_f(DisasContext *s, arg_vfmv_v_f *a)
2602 if (require_rvv(s) &&
2604 vext_check_isa_ill(s) &&
2605 require_align(a->rd, s->lmul)) {
2606 gen_set_rm(s, RISCV_FRM_DYN);
2610 if (s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
2611 t1 = tcg_temp_new_i64();
2612 /* NaN-box f[rs1] */
2613 do_nanbox(s, t1, cpu_fpr[a->rs1]);
2615 tcg_gen_gvec_dup_i64(s->sew, vreg_ofs(s, a->rd),
2616 MAXSZ(s), MAXSZ(s), t1);
2620 uint32_t data = FIELD_DP32(0, VDATA, LMUL, s->lmul);
2621 data = FIELD_DP32(data, VDATA, VTA, s->vta);
2622 data = FIELD_DP32(data, VDATA, VMA, s->vma);
2623 static gen_helper_vmv_vx * const fns[3] = {
2624 gen_helper_vmv_v_x_h,
2625 gen_helper_vmv_v_x_w,
2626 gen_helper_vmv_v_x_d,
2629 t1 = tcg_temp_new_i64();
2630 /* NaN-box f[rs1] */
2631 do_nanbox(s, t1, cpu_fpr[a->rs1]);
2633 dest = tcg_temp_new_ptr();
2634 desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
2635 s->cfg_ptr->vlenb, data));
2636 tcg_gen_addi_ptr(dest, tcg_env, vreg_ofs(s, a->rd));
2638 fns[s->sew - 1](dest, t1, tcg_env, desc);
2640 finalize_rvv_inst(s);
2646 /* Single-Width Floating-Point/Integer Type-Convert Instructions */
2647 #define GEN_OPFV_CVT_TRANS(NAME, HELPER, FRM) \
2648 static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
2650 static gen_helper_gvec_3_ptr * const fns[3] = { \
2651 gen_helper_##HELPER##_h, \
2652 gen_helper_##HELPER##_w, \
2653 gen_helper_##HELPER##_d \
2655 return do_opfv(s, a, fns[s->sew - 1], opfv_check, FRM); \
2658 GEN_OPFV_CVT_TRANS(vfcvt_xu_f_v, vfcvt_xu_f_v, RISCV_FRM_DYN)
2659 GEN_OPFV_CVT_TRANS(vfcvt_x_f_v, vfcvt_x_f_v, RISCV_FRM_DYN)
2660 GEN_OPFV_CVT_TRANS(vfcvt_f_xu_v, vfcvt_f_xu_v, RISCV_FRM_DYN)
2661 GEN_OPFV_CVT_TRANS(vfcvt_f_x_v, vfcvt_f_x_v, RISCV_FRM_DYN)
2662 /* Reuse the helper functions from vfcvt.xu.f.v and vfcvt.x.f.v */
2663 GEN_OPFV_CVT_TRANS(vfcvt_rtz_xu_f_v, vfcvt_xu_f_v, RISCV_FRM_RTZ)
2664 GEN_OPFV_CVT_TRANS(vfcvt_rtz_x_f_v, vfcvt_x_f_v, RISCV_FRM_RTZ)
2666 /* Widening Floating-Point/Integer Type-Convert Instructions */
2669 * If the current SEW does not correspond to a supported IEEE floating-point
2670 * type, an illegal instruction exception is raised
2672 static bool opfv_widen_check(DisasContext *s, arg_rmr *a)
2674 return require_rvv(s) &&
2675 vext_check_isa_ill(s) &&
2676 vext_check_ds(s, a->rd, a->rs2, a->vm);
2679 static bool opxfv_widen_check(DisasContext *s, arg_rmr *a)
2681 return opfv_widen_check(s, a) &&
2685 static bool opffv_widen_check(DisasContext *s, arg_rmr *a)
2687 return opfv_widen_check(s, a) &&
2688 require_scale_rvfmin(s) &&
2692 #define GEN_OPFV_WIDEN_TRANS(NAME, CHECK, HELPER, FRM) \
2693 static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
2695 if (CHECK(s, a)) { \
2696 uint32_t data = 0; \
2697 static gen_helper_gvec_3_ptr * const fns[2] = { \
2698 gen_helper_##HELPER##_h, \
2699 gen_helper_##HELPER##_w, \
2701 gen_set_rm_chkfrm(s, FRM); \
2703 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2704 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2705 data = FIELD_DP32(data, VDATA, VTA, s->vta); \
2706 data = FIELD_DP32(data, VDATA, VMA, s->vma); \
2707 tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
2708 vreg_ofs(s, a->rs2), tcg_env, \
2709 s->cfg_ptr->vlenb, \
2710 s->cfg_ptr->vlenb, data, \
2712 finalize_rvv_inst(s); \
2718 GEN_OPFV_WIDEN_TRANS(vfwcvt_xu_f_v, opxfv_widen_check, vfwcvt_xu_f_v,
2720 GEN_OPFV_WIDEN_TRANS(vfwcvt_x_f_v, opxfv_widen_check, vfwcvt_x_f_v,
2722 GEN_OPFV_WIDEN_TRANS(vfwcvt_f_f_v, opffv_widen_check, vfwcvt_f_f_v,
2724 /* Reuse the helper functions from vfwcvt.xu.f.v and vfwcvt.x.f.v */
2725 GEN_OPFV_WIDEN_TRANS(vfwcvt_rtz_xu_f_v, opxfv_widen_check, vfwcvt_xu_f_v,
2727 GEN_OPFV_WIDEN_TRANS(vfwcvt_rtz_x_f_v, opxfv_widen_check, vfwcvt_x_f_v,
2730 static bool opfxv_widen_check(DisasContext *s, arg_rmr *a)
2732 return require_rvv(s) &&
2733 require_scale_rvf(s) &&
2734 vext_check_isa_ill(s) &&
2735 /* OPFV widening instructions ignore vs1 check */
2736 vext_check_ds(s, a->rd, a->rs2, a->vm);
2739 #define GEN_OPFXV_WIDEN_TRANS(NAME) \
2740 static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
2742 if (opfxv_widen_check(s, a)) { \
2743 uint32_t data = 0; \
2744 static gen_helper_gvec_3_ptr * const fns[3] = { \
2745 gen_helper_##NAME##_b, \
2746 gen_helper_##NAME##_h, \
2747 gen_helper_##NAME##_w, \
2749 gen_set_rm(s, RISCV_FRM_DYN); \
2751 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2752 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2753 data = FIELD_DP32(data, VDATA, VTA, s->vta); \
2754 data = FIELD_DP32(data, VDATA, VMA, s->vma); \
2755 tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
2756 vreg_ofs(s, a->rs2), tcg_env, \
2757 s->cfg_ptr->vlenb, \
2758 s->cfg_ptr->vlenb, data, \
2760 finalize_rvv_inst(s); \
2766 GEN_OPFXV_WIDEN_TRANS(vfwcvt_f_xu_v)
2767 GEN_OPFXV_WIDEN_TRANS(vfwcvt_f_x_v)
2769 /* Narrowing Floating-Point/Integer Type-Convert Instructions */
2772 * If the current SEW does not correspond to a supported IEEE floating-point
2773 * type, an illegal instruction exception is raised
2775 static bool opfv_narrow_check(DisasContext *s, arg_rmr *a)
2777 return require_rvv(s) &&
2778 vext_check_isa_ill(s) &&
2779 /* OPFV narrowing instructions ignore vs1 check */
2780 vext_check_sd(s, a->rd, a->rs2, a->vm);
2783 static bool opfxv_narrow_check(DisasContext *s, arg_rmr *a)
2785 return opfv_narrow_check(s, a) &&
2790 static bool opffv_narrow_check(DisasContext *s, arg_rmr *a)
2792 return opfv_narrow_check(s, a) &&
2793 require_scale_rvfmin(s) &&
2797 static bool opffv_rod_narrow_check(DisasContext *s, arg_rmr *a)
2799 return opfv_narrow_check(s, a) &&
2800 require_scale_rvf(s) &&
2804 #define GEN_OPFV_NARROW_TRANS(NAME, CHECK, HELPER, FRM) \
2805 static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
2807 if (CHECK(s, a)) { \
2808 uint32_t data = 0; \
2809 static gen_helper_gvec_3_ptr * const fns[2] = { \
2810 gen_helper_##HELPER##_h, \
2811 gen_helper_##HELPER##_w, \
2813 gen_set_rm_chkfrm(s, FRM); \
2815 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2816 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2817 data = FIELD_DP32(data, VDATA, VTA, s->vta); \
2818 data = FIELD_DP32(data, VDATA, VMA, s->vma); \
2819 tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
2820 vreg_ofs(s, a->rs2), tcg_env, \
2821 s->cfg_ptr->vlenb, \
2822 s->cfg_ptr->vlenb, data, \
2824 finalize_rvv_inst(s); \
2830 GEN_OPFV_NARROW_TRANS(vfncvt_f_xu_w, opfxv_narrow_check, vfncvt_f_xu_w,
2832 GEN_OPFV_NARROW_TRANS(vfncvt_f_x_w, opfxv_narrow_check, vfncvt_f_x_w,
2834 GEN_OPFV_NARROW_TRANS(vfncvt_f_f_w, opffv_narrow_check, vfncvt_f_f_w,
2836 /* Reuse the helper function from vfncvt.f.f.w */
2837 GEN_OPFV_NARROW_TRANS(vfncvt_rod_f_f_w, opffv_rod_narrow_check, vfncvt_f_f_w,
2840 static bool opxfv_narrow_check(DisasContext *s, arg_rmr *a)
2842 return require_rvv(s) &&
2843 require_scale_rvf(s) &&
2844 vext_check_isa_ill(s) &&
2845 /* OPFV narrowing instructions ignore vs1 check */
2846 vext_check_sd(s, a->rd, a->rs2, a->vm);
2849 #define GEN_OPXFV_NARROW_TRANS(NAME, HELPER, FRM) \
2850 static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
2852 if (opxfv_narrow_check(s, a)) { \
2853 uint32_t data = 0; \
2854 static gen_helper_gvec_3_ptr * const fns[3] = { \
2855 gen_helper_##HELPER##_b, \
2856 gen_helper_##HELPER##_h, \
2857 gen_helper_##HELPER##_w, \
2859 gen_set_rm_chkfrm(s, FRM); \
2861 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2862 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2863 data = FIELD_DP32(data, VDATA, VTA, s->vta); \
2864 data = FIELD_DP32(data, VDATA, VMA, s->vma); \
2865 tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
2866 vreg_ofs(s, a->rs2), tcg_env, \
2867 s->cfg_ptr->vlenb, \
2868 s->cfg_ptr->vlenb, data, \
2870 finalize_rvv_inst(s); \
2876 GEN_OPXFV_NARROW_TRANS(vfncvt_xu_f_w, vfncvt_xu_f_w, RISCV_FRM_DYN)
2877 GEN_OPXFV_NARROW_TRANS(vfncvt_x_f_w, vfncvt_x_f_w, RISCV_FRM_DYN)
2878 /* Reuse the helper functions from vfncvt.xu.f.w and vfncvt.x.f.w */
2879 GEN_OPXFV_NARROW_TRANS(vfncvt_rtz_xu_f_w, vfncvt_xu_f_w, RISCV_FRM_RTZ)
2880 GEN_OPXFV_NARROW_TRANS(vfncvt_rtz_x_f_w, vfncvt_x_f_w, RISCV_FRM_RTZ)
2883 *** Vector Reduction Operations
2885 /* Vector Single-Width Integer Reduction Instructions */
2886 static bool reduction_check(DisasContext *s, arg_rmrr *a)
2888 return require_rvv(s) &&
2889 vext_check_isa_ill(s) &&
2890 vext_check_reduction(s, a->rs2);
2893 GEN_OPIVV_TRANS(vredsum_vs, reduction_check)
2894 GEN_OPIVV_TRANS(vredmaxu_vs, reduction_check)
2895 GEN_OPIVV_TRANS(vredmax_vs, reduction_check)
2896 GEN_OPIVV_TRANS(vredminu_vs, reduction_check)
2897 GEN_OPIVV_TRANS(vredmin_vs, reduction_check)
2898 GEN_OPIVV_TRANS(vredand_vs, reduction_check)
2899 GEN_OPIVV_TRANS(vredor_vs, reduction_check)
2900 GEN_OPIVV_TRANS(vredxor_vs, reduction_check)
2902 /* Vector Widening Integer Reduction Instructions */
2903 static bool reduction_widen_check(DisasContext *s, arg_rmrr *a)
2905 return reduction_check(s, a) && (s->sew < MO_64) &&
2906 ((s->sew + 1) <= (s->cfg_ptr->elen >> 4));
2909 GEN_OPIVV_WIDEN_TRANS(vwredsum_vs, reduction_widen_check)
2910 GEN_OPIVV_WIDEN_TRANS(vwredsumu_vs, reduction_widen_check)
2912 /* Vector Single-Width Floating-Point Reduction Instructions */
2913 static bool freduction_check(DisasContext *s, arg_rmrr *a)
2915 return reduction_check(s, a) &&
2919 GEN_OPFVV_TRANS(vfredusum_vs, freduction_check)
2920 GEN_OPFVV_TRANS(vfredosum_vs, freduction_check)
2921 GEN_OPFVV_TRANS(vfredmax_vs, freduction_check)
2922 GEN_OPFVV_TRANS(vfredmin_vs, freduction_check)
2924 /* Vector Widening Floating-Point Reduction Instructions */
2925 static bool freduction_widen_check(DisasContext *s, arg_rmrr *a)
2927 return reduction_widen_check(s, a) &&
2928 require_scale_rvf(s) &&
2932 GEN_OPFVV_WIDEN_TRANS(vfwredusum_vs, freduction_widen_check)
2933 GEN_OPFVV_WIDEN_TRANS(vfwredosum_vs, freduction_widen_check)
2936 *** Vector Mask Operations
2939 /* Vector Mask-Register Logical Instructions */
2940 #define GEN_MM_TRANS(NAME) \
2941 static bool trans_##NAME(DisasContext *s, arg_r *a) \
2943 if (require_rvv(s) && \
2944 vext_check_isa_ill(s)) { \
2945 uint32_t data = 0; \
2946 gen_helper_gvec_4_ptr *fn = gen_helper_##NAME; \
2948 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2950 FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s);\
2951 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
2952 vreg_ofs(s, a->rs1), \
2953 vreg_ofs(s, a->rs2), tcg_env, \
2954 s->cfg_ptr->vlenb, \
2955 s->cfg_ptr->vlenb, data, fn); \
2956 finalize_rvv_inst(s); \
2962 GEN_MM_TRANS(vmand_mm)
2963 GEN_MM_TRANS(vmnand_mm)
2964 GEN_MM_TRANS(vmandn_mm)
2965 GEN_MM_TRANS(vmxor_mm)
2966 GEN_MM_TRANS(vmor_mm)
2967 GEN_MM_TRANS(vmnor_mm)
2968 GEN_MM_TRANS(vmorn_mm)
2969 GEN_MM_TRANS(vmxnor_mm)
2971 /* Vector count population in mask vcpop */
2972 static bool trans_vcpop_m(DisasContext *s, arg_rmr *a)
2974 if (require_rvv(s) &&
2975 vext_check_isa_ill(s) &&
2976 s->vstart_eq_zero) {
2977 TCGv_ptr src2, mask;
2981 data = FIELD_DP32(data, VDATA, VM, a->vm);
2982 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
2984 mask = tcg_temp_new_ptr();
2985 src2 = tcg_temp_new_ptr();
2986 dst = dest_gpr(s, a->rd);
2987 desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
2988 s->cfg_ptr->vlenb, data));
2990 tcg_gen_addi_ptr(src2, tcg_env, vreg_ofs(s, a->rs2));
2991 tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0));
2993 gen_helper_vcpop_m(dst, mask, src2, tcg_env, desc);
2994 gen_set_gpr(s, a->rd, dst);
3000 /* vmfirst find-first-set mask bit */
3001 static bool trans_vfirst_m(DisasContext *s, arg_rmr *a)
3003 if (require_rvv(s) &&
3004 vext_check_isa_ill(s) &&
3005 s->vstart_eq_zero) {
3006 TCGv_ptr src2, mask;
3010 data = FIELD_DP32(data, VDATA, VM, a->vm);
3011 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
3013 mask = tcg_temp_new_ptr();
3014 src2 = tcg_temp_new_ptr();
3015 dst = dest_gpr(s, a->rd);
3016 desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlenb,
3017 s->cfg_ptr->vlenb, data));
3019 tcg_gen_addi_ptr(src2, tcg_env, vreg_ofs(s, a->rs2));
3020 tcg_gen_addi_ptr(mask, tcg_env, vreg_ofs(s, 0));
3022 gen_helper_vfirst_m(dst, mask, src2, tcg_env, desc);
3023 gen_set_gpr(s, a->rd, dst);
3030 * vmsbf.m set-before-first mask bit
3031 * vmsif.m set-including-first mask bit
3032 * vmsof.m set-only-first mask bit
3034 #define GEN_M_TRANS(NAME) \
3035 static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
3037 if (require_rvv(s) && \
3038 vext_check_isa_ill(s) && \
3039 require_vm(a->vm, a->rd) && \
3040 (a->rd != a->rs2) && \
3041 s->vstart_eq_zero) { \
3042 uint32_t data = 0; \
3043 gen_helper_gvec_3_ptr *fn = gen_helper_##NAME; \
3045 data = FIELD_DP32(data, VDATA, VM, a->vm); \
3046 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
3048 FIELD_DP32(data, VDATA, VTA_ALL_1S, s->cfg_vta_all_1s);\
3049 data = FIELD_DP32(data, VDATA, VMA, s->vma); \
3050 tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), \
3051 vreg_ofs(s, 0), vreg_ofs(s, a->rs2), \
3052 tcg_env, s->cfg_ptr->vlenb, \
3053 s->cfg_ptr->vlenb, \
3055 finalize_rvv_inst(s); \
3061 GEN_M_TRANS(vmsbf_m)
3062 GEN_M_TRANS(vmsif_m)
3063 GEN_M_TRANS(vmsof_m)
3066 * Vector Iota Instruction
3068 * 1. The destination register cannot overlap the source register.
3069 * 2. If masked, cannot overlap the mask register ('v0').
3070 * 3. An illegal instruction exception is raised if vstart is non-zero.
3072 static bool trans_viota_m(DisasContext *s, arg_viota_m *a)
3074 if (require_rvv(s) &&
3075 vext_check_isa_ill(s) &&
3076 !is_overlapped(a->rd, 1 << MAX(s->lmul, 0), a->rs2, 1) &&
3077 require_vm(a->vm, a->rd) &&
3078 require_align(a->rd, s->lmul) &&
3079 s->vstart_eq_zero) {
3082 data = FIELD_DP32(data, VDATA, VM, a->vm);
3083 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
3084 data = FIELD_DP32(data, VDATA, VTA, s->vta);
3085 data = FIELD_DP32(data, VDATA, VMA, s->vma);
3086 static gen_helper_gvec_3_ptr * const fns[4] = {
3087 gen_helper_viota_m_b, gen_helper_viota_m_h,
3088 gen_helper_viota_m_w, gen_helper_viota_m_d,
3090 tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
3091 vreg_ofs(s, a->rs2), tcg_env,
3093 s->cfg_ptr->vlenb, data, fns[s->sew]);
3094 finalize_rvv_inst(s);
3100 /* Vector Element Index Instruction */
3101 static bool trans_vid_v(DisasContext *s, arg_vid_v *a)
3103 if (require_rvv(s) &&
3104 vext_check_isa_ill(s) &&
3105 require_align(a->rd, s->lmul) &&
3106 require_vm(a->vm, a->rd)) {
3109 data = FIELD_DP32(data, VDATA, VM, a->vm);
3110 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
3111 data = FIELD_DP32(data, VDATA, VTA, s->vta);
3112 data = FIELD_DP32(data, VDATA, VMA, s->vma);
3113 static gen_helper_gvec_2_ptr * const fns[4] = {
3114 gen_helper_vid_v_b, gen_helper_vid_v_h,
3115 gen_helper_vid_v_w, gen_helper_vid_v_d,
3117 tcg_gen_gvec_2_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
3118 tcg_env, s->cfg_ptr->vlenb,
3121 finalize_rvv_inst(s);
3128 *** Vector Permutation Instructions
3131 static void load_element(TCGv_i64 dest, TCGv_ptr base,
3132 int ofs, int sew, bool sign)
3137 tcg_gen_ld8u_i64(dest, base, ofs);
3139 tcg_gen_ld8s_i64(dest, base, ofs);
3144 tcg_gen_ld16u_i64(dest, base, ofs);
3146 tcg_gen_ld16s_i64(dest, base, ofs);
3151 tcg_gen_ld32u_i64(dest, base, ofs);
3153 tcg_gen_ld32s_i64(dest, base, ofs);
3157 tcg_gen_ld_i64(dest, base, ofs);
3160 g_assert_not_reached();
3165 /* offset of the idx element with base register r */
3166 static uint32_t endian_ofs(DisasContext *s, int r, int idx)
3169 return vreg_ofs(s, r) + ((idx ^ (7 >> s->sew)) << s->sew);
3171 return vreg_ofs(s, r) + (idx << s->sew);
3175 /* adjust the index according to the endian */
3176 static void endian_adjust(TCGv_i32 ofs, int sew)
3179 tcg_gen_xori_i32(ofs, ofs, 7 >> sew);
3183 /* Load idx >= VLMAX ? 0 : vreg[idx] */
3184 static void vec_element_loadx(DisasContext *s, TCGv_i64 dest,
3185 int vreg, TCGv idx, int vlmax)
3187 TCGv_i32 ofs = tcg_temp_new_i32();
3188 TCGv_ptr base = tcg_temp_new_ptr();
3189 TCGv_i64 t_idx = tcg_temp_new_i64();
3190 TCGv_i64 t_vlmax, t_zero;
3193 * Mask the index to the length so that we do
3194 * not produce an out-of-range load.
3196 tcg_gen_trunc_tl_i32(ofs, idx);
3197 tcg_gen_andi_i32(ofs, ofs, vlmax - 1);
3199 /* Convert the index to an offset. */
3200 endian_adjust(ofs, s->sew);
3201 tcg_gen_shli_i32(ofs, ofs, s->sew);
3203 /* Convert the index to a pointer. */
3204 tcg_gen_ext_i32_ptr(base, ofs);
3205 tcg_gen_add_ptr(base, base, tcg_env);
3207 /* Perform the load. */
3208 load_element(dest, base,
3209 vreg_ofs(s, vreg), s->sew, false);
3211 /* Flush out-of-range indexing to zero. */
3212 t_vlmax = tcg_constant_i64(vlmax);
3213 t_zero = tcg_constant_i64(0);
3214 tcg_gen_extu_tl_i64(t_idx, idx);
3216 tcg_gen_movcond_i64(TCG_COND_LTU, dest, t_idx,
3217 t_vlmax, dest, t_zero);
3220 static void vec_element_loadi(DisasContext *s, TCGv_i64 dest,
3221 int vreg, int idx, bool sign)
3223 load_element(dest, tcg_env, endian_ofs(s, vreg, idx), s->sew, sign);
3226 /* Integer Scalar Move Instruction */
3228 static void store_element(TCGv_i64 val, TCGv_ptr base,
3233 tcg_gen_st8_i64(val, base, ofs);
3236 tcg_gen_st16_i64(val, base, ofs);
3239 tcg_gen_st32_i64(val, base, ofs);
3242 tcg_gen_st_i64(val, base, ofs);
3245 g_assert_not_reached();
3251 * Store vreg[idx] = val.
3252 * The index must be in range of VLMAX.
3254 static void vec_element_storei(DisasContext *s, int vreg,
3255 int idx, TCGv_i64 val)
3257 store_element(val, tcg_env, endian_ofs(s, vreg, idx), s->sew);
3260 /* vmv.x.s rd, vs2 # x[rd] = vs2[0] */
3261 static bool trans_vmv_x_s(DisasContext *s, arg_vmv_x_s *a)
3263 if (require_rvv(s) &&
3264 vext_check_isa_ill(s)) {
3268 t1 = tcg_temp_new_i64();
3269 dest = tcg_temp_new();
3271 * load vreg and sign-extend to 64 bits,
3272 * then truncate to XLEN bits before storing to gpr.
3274 vec_element_loadi(s, t1, a->rs2, 0, true);
3275 tcg_gen_trunc_i64_tl(dest, t1);
3276 gen_set_gpr(s, a->rd, dest);
3277 tcg_gen_movi_tl(cpu_vstart, 0);
3278 finalize_rvv_inst(s);
3284 /* vmv.s.x vd, rs1 # vd[0] = rs1 */
3285 static bool trans_vmv_s_x(DisasContext *s, arg_vmv_s_x *a)
3287 if (require_rvv(s) &&
3288 vext_check_isa_ill(s)) {
3289 /* This instruction ignores LMUL and vector register groups */
3292 TCGLabel *over = gen_new_label();
3294 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
3296 t1 = tcg_temp_new_i64();
3299 * load gpr and sign-extend to 64 bits,
3300 * then truncate to SEW bits when storing to vreg.
3302 s1 = get_gpr(s, a->rs1, EXT_NONE);
3303 tcg_gen_ext_tl_i64(t1, s1);
3304 vec_element_storei(s, a->rd, 0, t1);
3305 gen_set_label(over);
3306 tcg_gen_movi_tl(cpu_vstart, 0);
3307 finalize_rvv_inst(s);
3313 /* Floating-Point Scalar Move Instructions */
3314 static bool trans_vfmv_f_s(DisasContext *s, arg_vfmv_f_s *a)
3316 if (require_rvv(s) &&
3318 vext_check_isa_ill(s)) {
3319 gen_set_rm(s, RISCV_FRM_DYN);
3321 unsigned int ofs = (8 << s->sew);
3322 unsigned int len = 64 - ofs;
3325 vec_element_loadi(s, cpu_fpr[a->rd], a->rs2, 0, false);
3326 /* NaN-box f[rd] as necessary for SEW */
3328 t_nan = tcg_constant_i64(UINT64_MAX);
3329 tcg_gen_deposit_i64(cpu_fpr[a->rd], cpu_fpr[a->rd],
3334 tcg_gen_movi_tl(cpu_vstart, 0);
3335 finalize_rvv_inst(s);
3341 /* vfmv.s.f vd, rs1 # vd[0] = rs1 (vs2=0) */
3342 static bool trans_vfmv_s_f(DisasContext *s, arg_vfmv_s_f *a)
3344 if (require_rvv(s) &&
3346 vext_check_isa_ill(s)) {
3347 gen_set_rm(s, RISCV_FRM_DYN);
3349 /* The instructions ignore LMUL and vector register group. */
3351 TCGLabel *over = gen_new_label();
3353 /* if vstart >= vl, skip vector register write back */
3354 tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
3356 /* NaN-box f[rs1] */
3357 t1 = tcg_temp_new_i64();
3358 do_nanbox(s, t1, cpu_fpr[a->rs1]);
3360 vec_element_storei(s, a->rd, 0, t1);
3362 gen_set_label(over);
3363 tcg_gen_movi_tl(cpu_vstart, 0);
3364 finalize_rvv_inst(s);
3370 /* Vector Slide Instructions */
3371 static bool slideup_check(DisasContext *s, arg_rmrr *a)
3373 return require_rvv(s) &&
3374 vext_check_isa_ill(s) &&
3375 vext_check_slide(s, a->rd, a->rs2, a->vm, true);
3378 GEN_OPIVX_TRANS(vslideup_vx, slideup_check)
3379 GEN_OPIVX_TRANS(vslide1up_vx, slideup_check)
3380 GEN_OPIVI_TRANS(vslideup_vi, IMM_ZX, vslideup_vx, slideup_check)
3382 static bool slidedown_check(DisasContext *s, arg_rmrr *a)
3384 return require_rvv(s) &&
3385 vext_check_isa_ill(s) &&
3386 vext_check_slide(s, a->rd, a->rs2, a->vm, false);
3389 GEN_OPIVX_TRANS(vslidedown_vx, slidedown_check)
3390 GEN_OPIVX_TRANS(vslide1down_vx, slidedown_check)
3391 GEN_OPIVI_TRANS(vslidedown_vi, IMM_ZX, vslidedown_vx, slidedown_check)
3393 /* Vector Floating-Point Slide Instructions */
3394 static bool fslideup_check(DisasContext *s, arg_rmrr *a)
3396 return slideup_check(s, a) &&
3400 static bool fslidedown_check(DisasContext *s, arg_rmrr *a)
3402 return slidedown_check(s, a) &&
3406 GEN_OPFVF_TRANS(vfslide1up_vf, fslideup_check)
3407 GEN_OPFVF_TRANS(vfslide1down_vf, fslidedown_check)
3409 /* Vector Register Gather Instruction */
3410 static bool vrgather_vv_check(DisasContext *s, arg_rmrr *a)
3412 return require_rvv(s) &&
3413 vext_check_isa_ill(s) &&
3414 require_align(a->rd, s->lmul) &&
3415 require_align(a->rs1, s->lmul) &&
3416 require_align(a->rs2, s->lmul) &&
3417 (a->rd != a->rs2 && a->rd != a->rs1) &&
3418 require_vm(a->vm, a->rd);
3421 static bool vrgatherei16_vv_check(DisasContext *s, arg_rmrr *a)
3423 int8_t emul = MO_16 - s->sew + s->lmul;
3424 return require_rvv(s) &&
3425 vext_check_isa_ill(s) &&
3426 (emul >= -3 && emul <= 3) &&
3427 require_align(a->rd, s->lmul) &&
3428 require_align(a->rs1, emul) &&
3429 require_align(a->rs2, s->lmul) &&
3430 (a->rd != a->rs2 && a->rd != a->rs1) &&
3431 !is_overlapped(a->rd, 1 << MAX(s->lmul, 0),
3432 a->rs1, 1 << MAX(emul, 0)) &&
3433 !is_overlapped(a->rd, 1 << MAX(s->lmul, 0),
3434 a->rs2, 1 << MAX(s->lmul, 0)) &&
3435 require_vm(a->vm, a->rd);
3438 GEN_OPIVV_TRANS(vrgather_vv, vrgather_vv_check)
3439 GEN_OPIVV_TRANS(vrgatherei16_vv, vrgatherei16_vv_check)
3441 static bool vrgather_vx_check(DisasContext *s, arg_rmrr *a)
3443 return require_rvv(s) &&
3444 vext_check_isa_ill(s) &&
3445 require_align(a->rd, s->lmul) &&
3446 require_align(a->rs2, s->lmul) &&
3447 (a->rd != a->rs2) &&
3448 require_vm(a->vm, a->rd);
3451 /* vrgather.vx vd, vs2, rs1, vm # vd[i] = (x[rs1] >= VLMAX) ? 0 : vs2[rs1] */
3452 static bool trans_vrgather_vx(DisasContext *s, arg_rmrr *a)
3454 if (!vrgather_vx_check(s, a)) {
3458 if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
3459 int vlmax = vext_get_vlmax(s->cfg_ptr->vlenb, s->sew, s->lmul);
3460 TCGv_i64 dest = tcg_temp_new_i64();
3463 vec_element_loadi(s, dest, a->rs2, 0, false);
3465 vec_element_loadx(s, dest, a->rs2, cpu_gpr[a->rs1], vlmax);
3468 tcg_gen_gvec_dup_i64(s->sew, vreg_ofs(s, a->rd),
3469 MAXSZ(s), MAXSZ(s), dest);
3470 finalize_rvv_inst(s);
3472 static gen_helper_opivx * const fns[4] = {
3473 gen_helper_vrgather_vx_b, gen_helper_vrgather_vx_h,
3474 gen_helper_vrgather_vx_w, gen_helper_vrgather_vx_d
3476 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s);
3481 /* vrgather.vi vd, vs2, imm, vm # vd[i] = (imm >= VLMAX) ? 0 : vs2[imm] */
3482 static bool trans_vrgather_vi(DisasContext *s, arg_rmrr *a)
3484 if (!vrgather_vx_check(s, a)) {
3488 if (a->vm && s->vl_eq_vlmax && !(s->vta && s->lmul < 0)) {
3489 int vlmax = vext_get_vlmax(s->cfg_ptr->vlenb, s->sew, s->lmul);
3490 if (a->rs1 >= vlmax) {
3491 tcg_gen_gvec_dup_imm(MO_64, vreg_ofs(s, a->rd),
3492 MAXSZ(s), MAXSZ(s), 0);
3494 tcg_gen_gvec_dup_mem(s->sew, vreg_ofs(s, a->rd),
3495 endian_ofs(s, a->rs2, a->rs1),
3496 MAXSZ(s), MAXSZ(s));
3498 finalize_rvv_inst(s);
3500 static gen_helper_opivx * const fns[4] = {
3501 gen_helper_vrgather_vx_b, gen_helper_vrgather_vx_h,
3502 gen_helper_vrgather_vx_w, gen_helper_vrgather_vx_d
3504 return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew],
3511 * Vector Compress Instruction
3513 * The destination vector register group cannot overlap the
3514 * source vector register group or the source mask register.
3516 static bool vcompress_vm_check(DisasContext *s, arg_r *a)
3518 return require_rvv(s) &&
3519 vext_check_isa_ill(s) &&
3520 require_align(a->rd, s->lmul) &&
3521 require_align(a->rs2, s->lmul) &&
3522 (a->rd != a->rs2) &&
3523 !is_overlapped(a->rd, 1 << MAX(s->lmul, 0), a->rs1, 1) &&
3527 static bool trans_vcompress_vm(DisasContext *s, arg_r *a)
3529 if (vcompress_vm_check(s, a)) {
3531 static gen_helper_gvec_4_ptr * const fns[4] = {
3532 gen_helper_vcompress_vm_b, gen_helper_vcompress_vm_h,
3533 gen_helper_vcompress_vm_w, gen_helper_vcompress_vm_d,
3536 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
3537 data = FIELD_DP32(data, VDATA, VTA, s->vta);
3538 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
3539 vreg_ofs(s, a->rs1), vreg_ofs(s, a->rs2),
3540 tcg_env, s->cfg_ptr->vlenb,
3541 s->cfg_ptr->vlenb, data,
3543 finalize_rvv_inst(s);
3550 * Whole Vector Register Move Instructions depend on vtype register(vsew).
3551 * Thus, we need to check vill bit. (Section 16.6)
3553 #define GEN_VMV_WHOLE_TRANS(NAME, LEN) \
3554 static bool trans_##NAME(DisasContext *s, arg_##NAME * a) \
3556 if (require_rvv(s) && \
3557 vext_check_isa_ill(s) && \
3558 QEMU_IS_ALIGNED(a->rd, LEN) && \
3559 QEMU_IS_ALIGNED(a->rs2, LEN)) { \
3560 uint32_t maxsz = s->cfg_ptr->vlenb * LEN; \
3561 if (s->vstart_eq_zero) { \
3562 tcg_gen_gvec_mov(s->sew, vreg_ofs(s, a->rd), \
3563 vreg_ofs(s, a->rs2), maxsz, maxsz); \
3565 tcg_gen_gvec_2_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2), \
3566 tcg_env, maxsz, maxsz, 0, gen_helper_vmvr_v); \
3568 finalize_rvv_inst(s); \
3574 GEN_VMV_WHOLE_TRANS(vmv1r_v, 1)
3575 GEN_VMV_WHOLE_TRANS(vmv2r_v, 2)
3576 GEN_VMV_WHOLE_TRANS(vmv4r_v, 4)
3577 GEN_VMV_WHOLE_TRANS(vmv8r_v, 8)
3579 static bool int_ext_check(DisasContext *s, arg_rmr *a, uint8_t div)
3581 uint8_t from = (s->sew + 3) - div;
3582 bool ret = require_rvv(s) &&
3583 (from >= 3 && from <= 8) &&
3584 (a->rd != a->rs2) &&
3585 require_align(a->rd, s->lmul) &&
3586 require_align(a->rs2, s->lmul - div) &&
3587 require_vm(a->vm, a->rd) &&
3588 require_noover(a->rd, s->lmul, a->rs2, s->lmul - div);
3592 static bool int_ext_op(DisasContext *s, arg_rmr *a, uint8_t seq)
3595 gen_helper_gvec_3_ptr *fn;
3597 static gen_helper_gvec_3_ptr * const fns[6][4] = {
3599 NULL, gen_helper_vzext_vf2_h,
3600 gen_helper_vzext_vf2_w, gen_helper_vzext_vf2_d
3604 gen_helper_vzext_vf4_w, gen_helper_vzext_vf4_d,
3608 NULL, gen_helper_vzext_vf8_d
3611 NULL, gen_helper_vsext_vf2_h,
3612 gen_helper_vsext_vf2_w, gen_helper_vsext_vf2_d
3616 gen_helper_vsext_vf4_w, gen_helper_vsext_vf4_d,
3620 NULL, gen_helper_vsext_vf8_d
3624 fn = fns[seq][s->sew];
3629 data = FIELD_DP32(data, VDATA, VM, a->vm);
3630 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
3631 data = FIELD_DP32(data, VDATA, VTA, s->vta);
3632 data = FIELD_DP32(data, VDATA, VMA, s->vma);
3634 tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
3635 vreg_ofs(s, a->rs2), tcg_env,
3637 s->cfg_ptr->vlenb, data, fn);
3639 finalize_rvv_inst(s);
3643 /* Vector Integer Extension */
3644 #define GEN_INT_EXT_TRANS(NAME, DIV, SEQ) \
3645 static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
3647 if (int_ext_check(s, a, DIV)) { \
3648 return int_ext_op(s, a, SEQ); \
3653 GEN_INT_EXT_TRANS(vzext_vf2, 1, 0)
3654 GEN_INT_EXT_TRANS(vzext_vf4, 2, 1)
3655 GEN_INT_EXT_TRANS(vzext_vf8, 3, 2)
3656 GEN_INT_EXT_TRANS(vsext_vf2, 1, 3)
3657 GEN_INT_EXT_TRANS(vsext_vf4, 2, 4)
3658 GEN_INT_EXT_TRANS(vsext_vf8, 3, 5)