2 * RISC-V translation routines for the RVV Standard Extension.
4 * Copyright (c) 2020 T-Head Semiconductor Co., Ltd. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2 or later, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
18 #include "tcg/tcg-op-gvec.h"
19 #include "tcg/tcg-gvec-desc.h"
20 #include "internals.h"
22 static inline bool is_overlapped(const int8_t astart, int8_t asize,
23 const int8_t bstart, int8_t bsize)
25 const int8_t aend = astart + asize;
26 const int8_t bend = bstart + bsize;
28 return MAX(aend, bend) - MIN(astart, bstart) < asize + bsize;
31 static bool require_rvv(DisasContext *s)
33 return s->mstatus_vs != 0;
36 static bool require_rvf(DisasContext *s)
38 if (s->mstatus_fs == 0) {
45 return has_ext(s, RVF);
47 return has_ext(s, RVD);
53 static bool require_scale_rvf(DisasContext *s)
55 if (s->mstatus_fs == 0) {
62 return has_ext(s, RVF);
64 return has_ext(s, RVD);
70 /* Destination vector register group cannot overlap source mask register. */
71 static bool require_vm(int vm, int vd)
73 return (vm != 0 || vd != 0);
76 static bool require_nf(int vd, int nf, int lmul)
78 int size = nf << MAX(lmul, 0);
79 return size <= 8 && vd + size <= 32;
83 * Vector register should aligned with the passed-in LMUL (EMUL).
84 * If LMUL < 0, i.e. fractional LMUL, any vector register is allowed.
86 static bool require_align(const int8_t val, const int8_t lmul)
88 return lmul <= 0 || extract32(val, 0, lmul) == 0;
92 * A destination vector register group can overlap a source vector
93 * register group only if one of the following holds:
94 * 1. The destination EEW equals the source EEW.
95 * 2. The destination EEW is smaller than the source EEW and the overlap
96 * is in the lowest-numbered part of the source register group.
97 * 3. The destination EEW is greater than the source EEW, the source EMUL
98 * is at least 1, and the overlap is in the highest-numbered part of
99 * the destination register group.
102 * This function returns true if one of the following holds:
103 * * Destination vector register group does not overlap a source vector
106 * For rule 1, overlap is allowed so this function doesn't need to be called.
107 * For rule 2, (vd == vs). Caller has to check whether: (vd != vs) before
108 * calling this function.
110 static bool require_noover(const int8_t dst, const int8_t dst_lmul,
111 const int8_t src, const int8_t src_lmul)
113 int8_t dst_size = dst_lmul <= 0 ? 1 : 1 << dst_lmul;
114 int8_t src_size = src_lmul <= 0 ? 1 : 1 << src_lmul;
116 /* Destination EEW is greater than the source EEW, check rule 3. */
117 if (dst_size > src_size) {
120 is_overlapped(dst, dst_size, src, src_size) &&
121 !is_overlapped(dst, dst_size, src + src_size, src_size)) {
126 return !is_overlapped(dst, dst_size, src, src_size);
129 static bool do_vsetvl(DisasContext *s, int rd, int rs1, TCGv s2)
133 if (!require_rvv(s) || !has_ext(s, RVV)) {
137 dst = dest_gpr(s, rd);
139 if (rd == 0 && rs1 == 0) {
141 tcg_gen_mov_tl(s1, cpu_vl);
142 } else if (rs1 == 0) {
143 /* As the mask is at least one bit, RV_VLEN_MAX is >= VLMAX */
144 s1 = tcg_constant_tl(RV_VLEN_MAX);
146 s1 = get_gpr(s, rs1, EXT_ZERO);
149 gen_helper_vsetvl(dst, cpu_env, s1, s2);
150 gen_set_gpr(s, rd, dst);
153 tcg_gen_movi_tl(cpu_pc, s->pc_succ_insn);
154 tcg_gen_lookup_and_goto_ptr();
155 s->base.is_jmp = DISAS_NORETURN;
157 if (rd == 0 && rs1 == 0) {
164 static bool trans_vsetvl(DisasContext *s, arg_vsetvl *a)
166 TCGv s2 = get_gpr(s, a->rs2, EXT_ZERO);
167 return do_vsetvl(s, a->rd, a->rs1, s2);
170 static bool trans_vsetvli(DisasContext *s, arg_vsetvli *a)
172 TCGv s2 = tcg_constant_tl(a->zimm);
173 return do_vsetvl(s, a->rd, a->rs1, s2);
176 /* vector register offset from env */
177 static uint32_t vreg_ofs(DisasContext *s, int reg)
179 return offsetof(CPURISCVState, vreg) + reg * s->vlen / 8;
182 /* check functions */
185 * Vector unit-stride, strided, unit-stride segment, strided segment
186 * store check function.
188 * Rules to be checked here:
189 * 1. EMUL must within the range: 1/8 <= EMUL <= 8. (Section 7.3)
190 * 2. Destination vector register number is multiples of EMUL.
191 * (Section 3.4.2, 7.3)
192 * 3. The EMUL setting must be such that EMUL * NFIELDS ≤ 8. (Section 7.8)
193 * 4. Vector register numbers accessed by the segment load or store
194 * cannot increment past 31. (Section 7.8)
196 static bool vext_check_store(DisasContext *s, int vd, int nf, uint8_t eew)
198 int8_t emul = eew - s->sew + s->lmul;
199 return (emul >= -3 && emul <= 3) &&
200 require_align(vd, emul) &&
201 require_nf(vd, nf, emul);
205 * Vector unit-stride, strided, unit-stride segment, strided segment
206 * load check function.
208 * Rules to be checked here:
209 * 1. All rules applies to store instructions are applies
210 * to load instructions.
211 * 2. Destination vector register group for a masked vector
212 * instruction cannot overlap the source mask register (v0).
215 static bool vext_check_load(DisasContext *s, int vd, int nf, int vm,
218 return vext_check_store(s, vd, nf, eew) && require_vm(vm, vd);
222 * Vector indexed, indexed segment store check function.
224 * Rules to be checked here:
225 * 1. EMUL must within the range: 1/8 <= EMUL <= 8. (Section 7.3)
226 * 2. Index vector register number is multiples of EMUL.
227 * (Section 3.4.2, 7.3)
228 * 3. Destination vector register number is multiples of LMUL.
229 * (Section 3.4.2, 7.3)
230 * 4. The EMUL setting must be such that EMUL * NFIELDS ≤ 8. (Section 7.8)
231 * 5. Vector register numbers accessed by the segment load or store
232 * cannot increment past 31. (Section 7.8)
234 static bool vext_check_st_index(DisasContext *s, int vd, int vs2, int nf,
237 int8_t emul = eew - s->sew + s->lmul;
238 return (emul >= -3 && emul <= 3) &&
239 require_align(vs2, emul) &&
240 require_align(vd, s->lmul) &&
241 require_nf(vd, nf, s->lmul);
245 * Vector indexed, indexed segment load check function.
247 * Rules to be checked here:
248 * 1. All rules applies to store instructions are applies
249 * to load instructions.
250 * 2. Destination vector register group for a masked vector
251 * instruction cannot overlap the source mask register (v0).
253 * 3. Destination vector register cannot overlap a source vector
254 * register (vs2) group.
256 * 4. Destination vector register groups cannot overlap
257 * the source vector register (vs2) group for
258 * indexed segment load instructions. (Section 7.8.3)
260 static bool vext_check_ld_index(DisasContext *s, int vd, int vs2,
261 int nf, int vm, uint8_t eew)
264 int8_t emul = eew - s->sew + s->lmul;
265 bool ret = vext_check_st_index(s, vd, vs2, nf, eew) &&
268 /* Each segment register group has to follow overlap rules. */
269 for (int i = 0; i < nf; ++i) {
270 seg_vd = vd + (1 << MAX(s->lmul, 0)) * i;
274 ret &= require_noover(seg_vd, s->lmul, vs2, emul);
276 } else if (eew < s->sew) {
277 ret &= require_noover(seg_vd, s->lmul, vs2, emul);
281 * Destination vector register groups cannot overlap
282 * the source vector register (vs2) group for
283 * indexed segment load instructions.
286 ret &= !is_overlapped(seg_vd, 1 << MAX(s->lmul, 0),
287 vs2, 1 << MAX(emul, 0));
293 static bool vext_check_ss(DisasContext *s, int vd, int vs, int vm)
295 return require_vm(vm, vd) &&
296 require_align(vd, s->lmul) &&
297 require_align(vs, s->lmul);
301 * Check function for vector instruction with format:
302 * single-width result and single-width sources (SEW = SEW op SEW)
304 * Rules to be checked here:
305 * 1. Destination vector register group for a masked vector
306 * instruction cannot overlap the source mask register (v0).
308 * 2. Destination vector register number is multiples of LMUL.
310 * 3. Source (vs2, vs1) vector register number are multiples of LMUL.
313 static bool vext_check_sss(DisasContext *s, int vd, int vs1, int vs2, int vm)
315 return vext_check_ss(s, vd, vs2, vm) &&
316 require_align(vs1, s->lmul);
319 static bool vext_check_ms(DisasContext *s, int vd, int vs)
321 bool ret = require_align(vs, s->lmul);
323 ret &= require_noover(vd, 0, vs, s->lmul);
329 * Check function for maskable vector instruction with format:
330 * single-width result and single-width sources (SEW = SEW op SEW)
332 * Rules to be checked here:
333 * 1. Source (vs2, vs1) vector register number are multiples of LMUL.
335 * 2. Destination vector register cannot overlap a source vector
336 * register (vs2, vs1) group.
338 * 3. The destination vector register group for a masked vector
339 * instruction cannot overlap the source mask register (v0),
340 * unless the destination vector register is being written
341 * with a mask value (e.g., comparisons) or the scalar result
342 * of a reduction. (Section 5.3)
344 static bool vext_check_mss(DisasContext *s, int vd, int vs1, int vs2)
346 bool ret = vext_check_ms(s, vd, vs2) &&
347 require_align(vs1, s->lmul);
349 ret &= require_noover(vd, 0, vs1, s->lmul);
355 * Common check function for vector widening instructions
356 * of double-width result (2*SEW).
358 * Rules to be checked here:
359 * 1. The largest vector register group used by an instruction
360 * can not be greater than 8 vector registers (Section 5.2):
363 * 2. Destination vector register number is multiples of 2 * LMUL.
365 * 3. Destination vector register group for a masked vector
366 * instruction cannot overlap the source mask register (v0).
369 static bool vext_wide_check_common(DisasContext *s, int vd, int vm)
371 return (s->lmul <= 2) &&
373 require_align(vd, s->lmul + 1) &&
378 * Common check function for vector narrowing instructions
379 * of single-width result (SEW) and double-width source (2*SEW).
381 * Rules to be checked here:
382 * 1. The largest vector register group used by an instruction
383 * can not be greater than 8 vector registers (Section 5.2):
386 * 2. Source vector register number is multiples of 2 * LMUL.
388 * 3. Destination vector register number is multiples of LMUL.
390 * 4. Destination vector register group for a masked vector
391 * instruction cannot overlap the source mask register (v0).
394 static bool vext_narrow_check_common(DisasContext *s, int vd, int vs2,
397 return (s->lmul <= 2) &&
399 require_align(vs2, s->lmul + 1) &&
400 require_align(vd, s->lmul) &&
404 static bool vext_check_ds(DisasContext *s, int vd, int vs, int vm)
406 return vext_wide_check_common(s, vd, vm) &&
407 require_align(vs, s->lmul) &&
408 require_noover(vd, s->lmul + 1, vs, s->lmul);
411 static bool vext_check_dd(DisasContext *s, int vd, int vs, int vm)
413 return vext_wide_check_common(s, vd, vm) &&
414 require_align(vs, s->lmul + 1);
418 * Check function for vector instruction with format:
419 * double-width result and single-width sources (2*SEW = SEW op SEW)
421 * Rules to be checked here:
422 * 1. All rules in defined in widen common rules are applied.
423 * 2. Source (vs2, vs1) vector register number are multiples of LMUL.
425 * 3. Destination vector register cannot overlap a source vector
426 * register (vs2, vs1) group.
429 static bool vext_check_dss(DisasContext *s, int vd, int vs1, int vs2, int vm)
431 return vext_check_ds(s, vd, vs2, vm) &&
432 require_align(vs1, s->lmul) &&
433 require_noover(vd, s->lmul + 1, vs1, s->lmul);
437 * Check function for vector instruction with format:
438 * double-width result and double-width source1 and single-width
439 * source2 (2*SEW = 2*SEW op SEW)
441 * Rules to be checked here:
442 * 1. All rules in defined in widen common rules are applied.
443 * 2. Source 1 (vs2) vector register number is multiples of 2 * LMUL.
445 * 3. Source 2 (vs1) vector register number is multiples of LMUL.
447 * 4. Destination vector register cannot overlap a source vector
448 * register (vs1) group.
451 static bool vext_check_dds(DisasContext *s, int vd, int vs1, int vs2, int vm)
453 return vext_check_ds(s, vd, vs1, vm) &&
454 require_align(vs2, s->lmul + 1);
457 static bool vext_check_sd(DisasContext *s, int vd, int vs, int vm)
459 bool ret = vext_narrow_check_common(s, vd, vs, vm);
461 ret &= require_noover(vd, s->lmul, vs, s->lmul + 1);
467 * Check function for vector instruction with format:
468 * single-width result and double-width source 1 and single-width
469 * source 2 (SEW = 2*SEW op SEW)
471 * Rules to be checked here:
472 * 1. All rules in defined in narrow common rules are applied.
473 * 2. Destination vector register cannot overlap a source vector
474 * register (vs2) group.
476 * 3. Source 2 (vs1) vector register number is multiples of LMUL.
479 static bool vext_check_sds(DisasContext *s, int vd, int vs1, int vs2, int vm)
481 return vext_check_sd(s, vd, vs2, vm) &&
482 require_align(vs1, s->lmul);
486 * Check function for vector reduction instructions.
488 * Rules to be checked here:
489 * 1. Source 1 (vs2) vector register number is multiples of LMUL.
492 static bool vext_check_reduction(DisasContext *s, int vs2)
494 return require_align(vs2, s->lmul);
498 * Check function for vector slide instructions.
500 * Rules to be checked here:
501 * 1. Source 1 (vs2) vector register number is multiples of LMUL.
503 * 2. Destination vector register number is multiples of LMUL.
505 * 3. Destination vector register group for a masked vector
506 * instruction cannot overlap the source mask register (v0).
508 * 4. The destination vector register group for vslideup, vslide1up,
509 * vfslide1up, cannot overlap the source vector register (vs2) group.
510 * (Section 5.2, 16.3.1, 16.3.3)
512 static bool vext_check_slide(DisasContext *s, int vd, int vs2,
513 int vm, bool is_over)
515 bool ret = require_align(vs2, s->lmul) &&
516 require_align(vd, s->lmul) &&
525 * In cpu_get_tb_cpu_state(), set VILL if RVV was not present.
526 * So RVV is also be checked in this function.
528 static bool vext_check_isa_ill(DisasContext *s)
533 /* common translation macro */
534 #define GEN_VEXT_TRANS(NAME, EEW, ARGTYPE, OP, CHECK) \
535 static bool trans_##NAME(DisasContext *s, arg_##ARGTYPE * a) \
537 if (CHECK(s, a, EEW)) { \
538 return OP(s, a, EEW); \
543 static uint8_t vext_get_emul(DisasContext *s, uint8_t eew)
545 int8_t emul = eew - s->sew + s->lmul;
546 return emul < 0 ? 0 : emul;
550 *** unit stride load and store
552 typedef void gen_helper_ldst_us(TCGv_ptr, TCGv_ptr, TCGv,
555 static bool ldst_us_trans(uint32_t vd, uint32_t rs1, uint32_t data,
556 gen_helper_ldst_us *fn, DisasContext *s,
563 TCGLabel *over = gen_new_label();
564 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
566 dest = tcg_temp_new_ptr();
567 mask = tcg_temp_new_ptr();
568 base = get_gpr(s, rs1, EXT_NONE);
571 * As simd_desc supports at most 256 bytes, and in this implementation,
572 * the max vector group length is 2048 bytes. So split it into two parts.
574 * The first part is vlen in bytes, encoded in maxsz of simd_desc.
575 * The second part is lmul, encoded in data of simd_desc.
577 desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
579 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
580 tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
582 fn(dest, mask, base, cpu_env, desc);
584 tcg_temp_free_ptr(dest);
585 tcg_temp_free_ptr(mask);
595 static bool ld_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t eew)
598 gen_helper_ldst_us *fn;
599 static gen_helper_ldst_us * const fns[2][4] = {
600 /* masked unit stride load */
601 { gen_helper_vle8_v_mask, gen_helper_vle16_v_mask,
602 gen_helper_vle32_v_mask, gen_helper_vle64_v_mask },
603 /* unmasked unit stride load */
604 { gen_helper_vle8_v, gen_helper_vle16_v,
605 gen_helper_vle32_v, gen_helper_vle64_v }
608 fn = fns[a->vm][eew];
614 * Vector load/store instructions have the EEW encoded
615 * directly in the instructions. The maximum vector size is
616 * calculated with EMUL rather than LMUL.
618 uint8_t emul = vext_get_emul(s, eew);
619 data = FIELD_DP32(data, VDATA, VM, a->vm);
620 data = FIELD_DP32(data, VDATA, LMUL, emul);
621 data = FIELD_DP32(data, VDATA, NF, a->nf);
622 return ldst_us_trans(a->rd, a->rs1, data, fn, s, false);
625 static bool ld_us_check(DisasContext *s, arg_r2nfvm* a, uint8_t eew)
627 return require_rvv(s) &&
628 vext_check_isa_ill(s) &&
629 vext_check_load(s, a->rd, a->nf, a->vm, eew);
632 GEN_VEXT_TRANS(vle8_v, MO_8, r2nfvm, ld_us_op, ld_us_check)
633 GEN_VEXT_TRANS(vle16_v, MO_16, r2nfvm, ld_us_op, ld_us_check)
634 GEN_VEXT_TRANS(vle32_v, MO_32, r2nfvm, ld_us_op, ld_us_check)
635 GEN_VEXT_TRANS(vle64_v, MO_64, r2nfvm, ld_us_op, ld_us_check)
637 static bool st_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t eew)
640 gen_helper_ldst_us *fn;
641 static gen_helper_ldst_us * const fns[2][4] = {
642 /* masked unit stride store */
643 { gen_helper_vse8_v_mask, gen_helper_vse16_v_mask,
644 gen_helper_vse32_v_mask, gen_helper_vse64_v_mask },
645 /* unmasked unit stride store */
646 { gen_helper_vse8_v, gen_helper_vse16_v,
647 gen_helper_vse32_v, gen_helper_vse64_v }
650 fn = fns[a->vm][eew];
655 uint8_t emul = vext_get_emul(s, eew);
656 data = FIELD_DP32(data, VDATA, VM, a->vm);
657 data = FIELD_DP32(data, VDATA, LMUL, emul);
658 data = FIELD_DP32(data, VDATA, NF, a->nf);
659 return ldst_us_trans(a->rd, a->rs1, data, fn, s, true);
662 static bool st_us_check(DisasContext *s, arg_r2nfvm* a, uint8_t eew)
664 return require_rvv(s) &&
665 vext_check_isa_ill(s) &&
666 vext_check_store(s, a->rd, a->nf, eew);
669 GEN_VEXT_TRANS(vse8_v, MO_8, r2nfvm, st_us_op, st_us_check)
670 GEN_VEXT_TRANS(vse16_v, MO_16, r2nfvm, st_us_op, st_us_check)
671 GEN_VEXT_TRANS(vse32_v, MO_32, r2nfvm, st_us_op, st_us_check)
672 GEN_VEXT_TRANS(vse64_v, MO_64, r2nfvm, st_us_op, st_us_check)
675 *** stride load and store
677 typedef void gen_helper_ldst_stride(TCGv_ptr, TCGv_ptr, TCGv,
678 TCGv, TCGv_env, TCGv_i32);
680 static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2,
681 uint32_t data, gen_helper_ldst_stride *fn,
682 DisasContext *s, bool is_store)
688 TCGLabel *over = gen_new_label();
689 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
691 dest = tcg_temp_new_ptr();
692 mask = tcg_temp_new_ptr();
693 base = get_gpr(s, rs1, EXT_NONE);
694 stride = get_gpr(s, rs2, EXT_NONE);
695 desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
697 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
698 tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
700 fn(dest, mask, base, stride, cpu_env, desc);
702 tcg_temp_free_ptr(dest);
703 tcg_temp_free_ptr(mask);
713 static bool ld_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
716 gen_helper_ldst_stride *fn;
717 static gen_helper_ldst_stride * const fns[4] = {
718 gen_helper_vlse8_v, gen_helper_vlse16_v,
719 gen_helper_vlse32_v, gen_helper_vlse64_v
727 uint8_t emul = vext_get_emul(s, eew);
728 data = FIELD_DP32(data, VDATA, VM, a->vm);
729 data = FIELD_DP32(data, VDATA, LMUL, emul);
730 data = FIELD_DP32(data, VDATA, NF, a->nf);
731 return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s, false);
734 static bool ld_stride_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
736 return require_rvv(s) &&
737 vext_check_isa_ill(s) &&
738 vext_check_load(s, a->rd, a->nf, a->vm, eew);
741 GEN_VEXT_TRANS(vlse8_v, MO_8, rnfvm, ld_stride_op, ld_stride_check)
742 GEN_VEXT_TRANS(vlse16_v, MO_16, rnfvm, ld_stride_op, ld_stride_check)
743 GEN_VEXT_TRANS(vlse32_v, MO_32, rnfvm, ld_stride_op, ld_stride_check)
744 GEN_VEXT_TRANS(vlse64_v, MO_64, rnfvm, ld_stride_op, ld_stride_check)
746 static bool st_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
749 gen_helper_ldst_stride *fn;
750 static gen_helper_ldst_stride * const fns[4] = {
751 /* masked stride store */
752 gen_helper_vsse8_v, gen_helper_vsse16_v,
753 gen_helper_vsse32_v, gen_helper_vsse64_v
756 uint8_t emul = vext_get_emul(s, eew);
757 data = FIELD_DP32(data, VDATA, VM, a->vm);
758 data = FIELD_DP32(data, VDATA, LMUL, emul);
759 data = FIELD_DP32(data, VDATA, NF, a->nf);
765 return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s, true);
768 static bool st_stride_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
770 return require_rvv(s) &&
771 vext_check_isa_ill(s) &&
772 vext_check_store(s, a->rd, a->nf, eew);
775 GEN_VEXT_TRANS(vsse8_v, MO_8, rnfvm, st_stride_op, st_stride_check)
776 GEN_VEXT_TRANS(vsse16_v, MO_16, rnfvm, st_stride_op, st_stride_check)
777 GEN_VEXT_TRANS(vsse32_v, MO_32, rnfvm, st_stride_op, st_stride_check)
778 GEN_VEXT_TRANS(vsse64_v, MO_64, rnfvm, st_stride_op, st_stride_check)
781 *** index load and store
783 typedef void gen_helper_ldst_index(TCGv_ptr, TCGv_ptr, TCGv,
784 TCGv_ptr, TCGv_env, TCGv_i32);
786 static bool ldst_index_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
787 uint32_t data, gen_helper_ldst_index *fn,
788 DisasContext *s, bool is_store)
790 TCGv_ptr dest, mask, index;
794 TCGLabel *over = gen_new_label();
795 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
797 dest = tcg_temp_new_ptr();
798 mask = tcg_temp_new_ptr();
799 index = tcg_temp_new_ptr();
800 base = get_gpr(s, rs1, EXT_NONE);
801 desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
803 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
804 tcg_gen_addi_ptr(index, cpu_env, vreg_ofs(s, vs2));
805 tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
807 fn(dest, mask, base, index, cpu_env, desc);
809 tcg_temp_free_ptr(dest);
810 tcg_temp_free_ptr(mask);
811 tcg_temp_free_ptr(index);
821 static bool ld_index_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
824 gen_helper_ldst_index *fn;
825 static gen_helper_ldst_index * const fns[4][4] = {
827 * offset vector register group EEW = 8,
828 * data vector register group EEW = SEW
830 { gen_helper_vlxei8_8_v, gen_helper_vlxei8_16_v,
831 gen_helper_vlxei8_32_v, gen_helper_vlxei8_64_v },
833 * offset vector register group EEW = 16,
834 * data vector register group EEW = SEW
836 { gen_helper_vlxei16_8_v, gen_helper_vlxei16_16_v,
837 gen_helper_vlxei16_32_v, gen_helper_vlxei16_64_v },
839 * offset vector register group EEW = 32,
840 * data vector register group EEW = SEW
842 { gen_helper_vlxei32_8_v, gen_helper_vlxei32_16_v,
843 gen_helper_vlxei32_32_v, gen_helper_vlxei32_64_v },
845 * offset vector register group EEW = 64,
846 * data vector register group EEW = SEW
848 { gen_helper_vlxei64_8_v, gen_helper_vlxei64_16_v,
849 gen_helper_vlxei64_32_v, gen_helper_vlxei64_64_v }
852 fn = fns[eew][s->sew];
854 uint8_t emul = vext_get_emul(s, s->sew);
855 data = FIELD_DP32(data, VDATA, VM, a->vm);
856 data = FIELD_DP32(data, VDATA, LMUL, emul);
857 data = FIELD_DP32(data, VDATA, NF, a->nf);
858 return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s, false);
861 static bool ld_index_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
863 return require_rvv(s) &&
864 vext_check_isa_ill(s) &&
865 vext_check_ld_index(s, a->rd, a->rs2, a->nf, a->vm, eew);
868 GEN_VEXT_TRANS(vlxei8_v, MO_8, rnfvm, ld_index_op, ld_index_check)
869 GEN_VEXT_TRANS(vlxei16_v, MO_16, rnfvm, ld_index_op, ld_index_check)
870 GEN_VEXT_TRANS(vlxei32_v, MO_32, rnfvm, ld_index_op, ld_index_check)
871 GEN_VEXT_TRANS(vlxei64_v, MO_64, rnfvm, ld_index_op, ld_index_check)
873 static bool st_index_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
876 gen_helper_ldst_index *fn;
877 static gen_helper_ldst_index * const fns[4][4] = {
879 * offset vector register group EEW = 8,
880 * data vector register group EEW = SEW
882 { gen_helper_vsxei8_8_v, gen_helper_vsxei8_16_v,
883 gen_helper_vsxei8_32_v, gen_helper_vsxei8_64_v },
885 * offset vector register group EEW = 16,
886 * data vector register group EEW = SEW
888 { gen_helper_vsxei16_8_v, gen_helper_vsxei16_16_v,
889 gen_helper_vsxei16_32_v, gen_helper_vsxei16_64_v },
891 * offset vector register group EEW = 32,
892 * data vector register group EEW = SEW
894 { gen_helper_vsxei32_8_v, gen_helper_vsxei32_16_v,
895 gen_helper_vsxei32_32_v, gen_helper_vsxei32_64_v },
897 * offset vector register group EEW = 64,
898 * data vector register group EEW = SEW
900 { gen_helper_vsxei64_8_v, gen_helper_vsxei64_16_v,
901 gen_helper_vsxei64_32_v, gen_helper_vsxei64_64_v }
904 fn = fns[eew][s->sew];
906 uint8_t emul = vext_get_emul(s, s->sew);
907 data = FIELD_DP32(data, VDATA, VM, a->vm);
908 data = FIELD_DP32(data, VDATA, LMUL, emul);
909 data = FIELD_DP32(data, VDATA, NF, a->nf);
910 return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s, true);
913 static bool st_index_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
915 return require_rvv(s) &&
916 vext_check_isa_ill(s) &&
917 vext_check_st_index(s, a->rd, a->rs2, a->nf, eew);
920 GEN_VEXT_TRANS(vsxei8_v, MO_8, rnfvm, st_index_op, st_index_check)
921 GEN_VEXT_TRANS(vsxei16_v, MO_16, rnfvm, st_index_op, st_index_check)
922 GEN_VEXT_TRANS(vsxei32_v, MO_32, rnfvm, st_index_op, st_index_check)
923 GEN_VEXT_TRANS(vsxei64_v, MO_64, rnfvm, st_index_op, st_index_check)
926 *** unit stride fault-only-first load
928 static bool ldff_trans(uint32_t vd, uint32_t rs1, uint32_t data,
929 gen_helper_ldst_us *fn, DisasContext *s)
935 TCGLabel *over = gen_new_label();
936 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
938 dest = tcg_temp_new_ptr();
939 mask = tcg_temp_new_ptr();
940 base = get_gpr(s, rs1, EXT_NONE);
941 desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
943 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
944 tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
946 fn(dest, mask, base, cpu_env, desc);
948 tcg_temp_free_ptr(dest);
949 tcg_temp_free_ptr(mask);
955 static bool ldff_op(DisasContext *s, arg_r2nfvm *a, uint8_t eew)
958 gen_helper_ldst_us *fn;
959 static gen_helper_ldst_us * const fns[4] = {
960 gen_helper_vle8ff_v, gen_helper_vle16ff_v,
961 gen_helper_vle32ff_v, gen_helper_vle64ff_v
969 uint8_t emul = vext_get_emul(s, eew);
970 data = FIELD_DP32(data, VDATA, VM, a->vm);
971 data = FIELD_DP32(data, VDATA, LMUL, emul);
972 data = FIELD_DP32(data, VDATA, NF, a->nf);
973 return ldff_trans(a->rd, a->rs1, data, fn, s);
976 GEN_VEXT_TRANS(vle8ff_v, MO_8, r2nfvm, ldff_op, ld_us_check)
977 GEN_VEXT_TRANS(vle16ff_v, MO_16, r2nfvm, ldff_op, ld_us_check)
978 GEN_VEXT_TRANS(vle32ff_v, MO_32, r2nfvm, ldff_op, ld_us_check)
979 GEN_VEXT_TRANS(vle64ff_v, MO_64, r2nfvm, ldff_op, ld_us_check)
982 * load and store whole register instructions
984 typedef void gen_helper_ldst_whole(TCGv_ptr, TCGv, TCGv_env, TCGv_i32);
986 static bool ldst_whole_trans(uint32_t vd, uint32_t rs1, uint32_t nf,
987 gen_helper_ldst_whole *fn, DisasContext *s,
994 uint32_t data = FIELD_DP32(0, VDATA, NF, nf);
995 dest = tcg_temp_new_ptr();
996 desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
998 base = get_gpr(s, rs1, EXT_NONE);
999 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
1001 fn(dest, base, cpu_env, desc);
1003 tcg_temp_free_ptr(dest);
1013 * load and store whole register instructions ignore vtype and vl setting.
1014 * Thus, we don't need to check vill bit. (Section 7.9)
1016 #define GEN_LDST_WHOLE_TRANS(NAME, ARG_NF, IS_STORE) \
1017 static bool trans_##NAME(DisasContext *s, arg_##NAME * a) \
1019 if (require_rvv(s) && \
1020 QEMU_IS_ALIGNED(a->rd, ARG_NF)) { \
1021 return ldst_whole_trans(a->rd, a->rs1, ARG_NF, gen_helper_##NAME, \
1027 GEN_LDST_WHOLE_TRANS(vl1re8_v, 1, false)
1028 GEN_LDST_WHOLE_TRANS(vl1re16_v, 1, false)
1029 GEN_LDST_WHOLE_TRANS(vl1re32_v, 1, false)
1030 GEN_LDST_WHOLE_TRANS(vl1re64_v, 1, false)
1031 GEN_LDST_WHOLE_TRANS(vl2re8_v, 2, false)
1032 GEN_LDST_WHOLE_TRANS(vl2re16_v, 2, false)
1033 GEN_LDST_WHOLE_TRANS(vl2re32_v, 2, false)
1034 GEN_LDST_WHOLE_TRANS(vl2re64_v, 2, false)
1035 GEN_LDST_WHOLE_TRANS(vl4re8_v, 4, false)
1036 GEN_LDST_WHOLE_TRANS(vl4re16_v, 4, false)
1037 GEN_LDST_WHOLE_TRANS(vl4re32_v, 4, false)
1038 GEN_LDST_WHOLE_TRANS(vl4re64_v, 4, false)
1039 GEN_LDST_WHOLE_TRANS(vl8re8_v, 8, false)
1040 GEN_LDST_WHOLE_TRANS(vl8re16_v, 8, false)
1041 GEN_LDST_WHOLE_TRANS(vl8re32_v, 8, false)
1042 GEN_LDST_WHOLE_TRANS(vl8re64_v, 8, false)
1044 GEN_LDST_WHOLE_TRANS(vs1r_v, 1, true)
1045 GEN_LDST_WHOLE_TRANS(vs2r_v, 2, true)
1046 GEN_LDST_WHOLE_TRANS(vs4r_v, 4, true)
1047 GEN_LDST_WHOLE_TRANS(vs8r_v, 8, true)
1050 *** Vector Integer Arithmetic Instructions
1054 * MAXSZ returns the maximum vector size can be operated in bytes,
1055 * which is used in GVEC IR when vl_eq_vlmax flag is set to true
1056 * to accerlate vector operation.
1058 static inline uint32_t MAXSZ(DisasContext *s)
1060 int scale = s->lmul - 3;
1061 return scale < 0 ? s->vlen >> -scale : s->vlen << scale;
1064 static bool opivv_check(DisasContext *s, arg_rmrr *a)
1066 return require_rvv(s) &&
1067 vext_check_isa_ill(s) &&
1068 vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm);
1071 typedef void GVecGen3Fn(unsigned, uint32_t, uint32_t,
1072 uint32_t, uint32_t, uint32_t);
1075 do_opivv_gvec(DisasContext *s, arg_rmrr *a, GVecGen3Fn *gvec_fn,
1076 gen_helper_gvec_4_ptr *fn)
1078 TCGLabel *over = gen_new_label();
1079 if (!opivv_check(s, a)) {
1083 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
1085 if (a->vm && s->vl_eq_vlmax) {
1086 gvec_fn(s->sew, vreg_ofs(s, a->rd),
1087 vreg_ofs(s, a->rs2), vreg_ofs(s, a->rs1),
1088 MAXSZ(s), MAXSZ(s));
1092 data = FIELD_DP32(data, VDATA, VM, a->vm);
1093 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
1094 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
1095 vreg_ofs(s, a->rs1), vreg_ofs(s, a->rs2),
1096 cpu_env, s->vlen / 8, s->vlen / 8, data, fn);
1099 gen_set_label(over);
1103 /* OPIVV with GVEC IR */
1104 #define GEN_OPIVV_GVEC_TRANS(NAME, SUF) \
1105 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1107 static gen_helper_gvec_4_ptr * const fns[4] = { \
1108 gen_helper_##NAME##_b, gen_helper_##NAME##_h, \
1109 gen_helper_##NAME##_w, gen_helper_##NAME##_d, \
1111 return do_opivv_gvec(s, a, tcg_gen_gvec_##SUF, fns[s->sew]); \
1114 GEN_OPIVV_GVEC_TRANS(vadd_vv, add)
1115 GEN_OPIVV_GVEC_TRANS(vsub_vv, sub)
1117 typedef void gen_helper_opivx(TCGv_ptr, TCGv_ptr, TCGv, TCGv_ptr,
1118 TCGv_env, TCGv_i32);
1120 static bool opivx_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, uint32_t vm,
1121 gen_helper_opivx *fn, DisasContext *s)
1123 TCGv_ptr dest, src2, mask;
1128 TCGLabel *over = gen_new_label();
1129 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
1131 dest = tcg_temp_new_ptr();
1132 mask = tcg_temp_new_ptr();
1133 src2 = tcg_temp_new_ptr();
1134 src1 = get_gpr(s, rs1, EXT_NONE);
1136 data = FIELD_DP32(data, VDATA, VM, vm);
1137 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
1138 desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
1140 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
1141 tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, vs2));
1142 tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
1144 fn(dest, mask, src1, src2, cpu_env, desc);
1146 tcg_temp_free_ptr(dest);
1147 tcg_temp_free_ptr(mask);
1148 tcg_temp_free_ptr(src2);
1150 gen_set_label(over);
1154 static bool opivx_check(DisasContext *s, arg_rmrr *a)
1156 return require_rvv(s) &&
1157 vext_check_isa_ill(s) &&
1158 vext_check_ss(s, a->rd, a->rs2, a->vm);
1161 typedef void GVecGen2sFn(unsigned, uint32_t, uint32_t, TCGv_i64,
1162 uint32_t, uint32_t);
1165 do_opivx_gvec(DisasContext *s, arg_rmrr *a, GVecGen2sFn *gvec_fn,
1166 gen_helper_opivx *fn)
1168 if (!opivx_check(s, a)) {
1172 if (a->vm && s->vl_eq_vlmax) {
1173 TCGv_i64 src1 = tcg_temp_new_i64();
1175 tcg_gen_ext_tl_i64(src1, get_gpr(s, a->rs1, EXT_SIGN));
1176 gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2),
1177 src1, MAXSZ(s), MAXSZ(s));
1179 tcg_temp_free_i64(src1);
1183 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s);
1186 /* OPIVX with GVEC IR */
1187 #define GEN_OPIVX_GVEC_TRANS(NAME, SUF) \
1188 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1190 static gen_helper_opivx * const fns[4] = { \
1191 gen_helper_##NAME##_b, gen_helper_##NAME##_h, \
1192 gen_helper_##NAME##_w, gen_helper_##NAME##_d, \
1194 return do_opivx_gvec(s, a, tcg_gen_gvec_##SUF, fns[s->sew]); \
1197 GEN_OPIVX_GVEC_TRANS(vadd_vx, adds)
1198 GEN_OPIVX_GVEC_TRANS(vsub_vx, subs)
1200 static void gen_vec_rsub8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
1202 tcg_gen_vec_sub8_i64(d, b, a);
1205 static void gen_vec_rsub16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
1207 tcg_gen_vec_sub16_i64(d, b, a);
1210 static void gen_rsub_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
1212 tcg_gen_sub_i32(ret, arg2, arg1);
1215 static void gen_rsub_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1217 tcg_gen_sub_i64(ret, arg2, arg1);
1220 static void gen_rsub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
1222 tcg_gen_sub_vec(vece, r, b, a);
1225 static void tcg_gen_gvec_rsubs(unsigned vece, uint32_t dofs, uint32_t aofs,
1226 TCGv_i64 c, uint32_t oprsz, uint32_t maxsz)
1228 static const TCGOpcode vecop_list[] = { INDEX_op_sub_vec, 0 };
1229 static const GVecGen2s rsub_op[4] = {
1230 { .fni8 = gen_vec_rsub8_i64,
1231 .fniv = gen_rsub_vec,
1232 .fno = gen_helper_vec_rsubs8,
1233 .opt_opc = vecop_list,
1235 { .fni8 = gen_vec_rsub16_i64,
1236 .fniv = gen_rsub_vec,
1237 .fno = gen_helper_vec_rsubs16,
1238 .opt_opc = vecop_list,
1240 { .fni4 = gen_rsub_i32,
1241 .fniv = gen_rsub_vec,
1242 .fno = gen_helper_vec_rsubs32,
1243 .opt_opc = vecop_list,
1245 { .fni8 = gen_rsub_i64,
1246 .fniv = gen_rsub_vec,
1247 .fno = gen_helper_vec_rsubs64,
1248 .opt_opc = vecop_list,
1249 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
1253 tcg_debug_assert(vece <= MO_64);
1254 tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, c, &rsub_op[vece]);
1257 GEN_OPIVX_GVEC_TRANS(vrsub_vx, rsubs)
1260 IMM_ZX, /* Zero-extended */
1261 IMM_SX, /* Sign-extended */
1262 IMM_TRUNC_SEW, /* Truncate to log(SEW) bits */
1263 IMM_TRUNC_2SEW, /* Truncate to log(2*SEW) bits */
1266 static int64_t extract_imm(DisasContext *s, uint32_t imm, imm_mode_t imm_mode)
1270 return extract64(imm, 0, 5);
1272 return sextract64(imm, 0, 5);
1274 return extract64(imm, 0, s->sew + 3);
1275 case IMM_TRUNC_2SEW:
1276 return extract64(imm, 0, s->sew + 4);
1278 g_assert_not_reached();
1282 static bool opivi_trans(uint32_t vd, uint32_t imm, uint32_t vs2, uint32_t vm,
1283 gen_helper_opivx *fn, DisasContext *s,
1284 imm_mode_t imm_mode)
1286 TCGv_ptr dest, src2, mask;
1291 TCGLabel *over = gen_new_label();
1292 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
1294 dest = tcg_temp_new_ptr();
1295 mask = tcg_temp_new_ptr();
1296 src2 = tcg_temp_new_ptr();
1297 src1 = tcg_constant_tl(extract_imm(s, imm, imm_mode));
1299 data = FIELD_DP32(data, VDATA, VM, vm);
1300 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
1301 desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
1303 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
1304 tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, vs2));
1305 tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
1307 fn(dest, mask, src1, src2, cpu_env, desc);
1309 tcg_temp_free_ptr(dest);
1310 tcg_temp_free_ptr(mask);
1311 tcg_temp_free_ptr(src2);
1313 gen_set_label(over);
1317 typedef void GVecGen2iFn(unsigned, uint32_t, uint32_t, int64_t,
1318 uint32_t, uint32_t);
1321 do_opivi_gvec(DisasContext *s, arg_rmrr *a, GVecGen2iFn *gvec_fn,
1322 gen_helper_opivx *fn, imm_mode_t imm_mode)
1324 if (!opivx_check(s, a)) {
1328 if (a->vm && s->vl_eq_vlmax) {
1329 gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2),
1330 extract_imm(s, a->rs1, imm_mode), MAXSZ(s), MAXSZ(s));
1334 return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s, imm_mode);
1337 /* OPIVI with GVEC IR */
1338 #define GEN_OPIVI_GVEC_TRANS(NAME, IMM_MODE, OPIVX, SUF) \
1339 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1341 static gen_helper_opivx * const fns[4] = { \
1342 gen_helper_##OPIVX##_b, gen_helper_##OPIVX##_h, \
1343 gen_helper_##OPIVX##_w, gen_helper_##OPIVX##_d, \
1345 return do_opivi_gvec(s, a, tcg_gen_gvec_##SUF, \
1346 fns[s->sew], IMM_MODE); \
1349 GEN_OPIVI_GVEC_TRANS(vadd_vi, IMM_SX, vadd_vx, addi)
1351 static void tcg_gen_gvec_rsubi(unsigned vece, uint32_t dofs, uint32_t aofs,
1352 int64_t c, uint32_t oprsz, uint32_t maxsz)
1354 TCGv_i64 tmp = tcg_constant_i64(c);
1355 tcg_gen_gvec_rsubs(vece, dofs, aofs, tmp, oprsz, maxsz);
1358 GEN_OPIVI_GVEC_TRANS(vrsub_vi, IMM_SX, vrsub_vx, rsubi)
1360 /* Vector Widening Integer Add/Subtract */
1362 /* OPIVV with WIDEN */
1363 static bool opivv_widen_check(DisasContext *s, arg_rmrr *a)
1365 return require_rvv(s) &&
1366 vext_check_isa_ill(s) &&
1367 vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm);
1370 static bool do_opivv_widen(DisasContext *s, arg_rmrr *a,
1371 gen_helper_gvec_4_ptr *fn,
1372 bool (*checkfn)(DisasContext *, arg_rmrr *))
1374 if (checkfn(s, a)) {
1376 TCGLabel *over = gen_new_label();
1377 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
1379 data = FIELD_DP32(data, VDATA, VM, a->vm);
1380 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
1381 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
1382 vreg_ofs(s, a->rs1),
1383 vreg_ofs(s, a->rs2),
1384 cpu_env, s->vlen / 8, s->vlen / 8,
1387 gen_set_label(over);
1393 #define GEN_OPIVV_WIDEN_TRANS(NAME, CHECK) \
1394 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1396 static gen_helper_gvec_4_ptr * const fns[3] = { \
1397 gen_helper_##NAME##_b, \
1398 gen_helper_##NAME##_h, \
1399 gen_helper_##NAME##_w \
1401 return do_opivv_widen(s, a, fns[s->sew], CHECK); \
1404 GEN_OPIVV_WIDEN_TRANS(vwaddu_vv, opivv_widen_check)
1405 GEN_OPIVV_WIDEN_TRANS(vwadd_vv, opivv_widen_check)
1406 GEN_OPIVV_WIDEN_TRANS(vwsubu_vv, opivv_widen_check)
1407 GEN_OPIVV_WIDEN_TRANS(vwsub_vv, opivv_widen_check)
1409 /* OPIVX with WIDEN */
1410 static bool opivx_widen_check(DisasContext *s, arg_rmrr *a)
1412 return require_rvv(s) &&
1413 vext_check_isa_ill(s) &&
1414 vext_check_ds(s, a->rd, a->rs2, a->vm);
1417 static bool do_opivx_widen(DisasContext *s, arg_rmrr *a,
1418 gen_helper_opivx *fn)
1420 if (opivx_widen_check(s, a)) {
1421 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s);
1426 #define GEN_OPIVX_WIDEN_TRANS(NAME) \
1427 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1429 static gen_helper_opivx * const fns[3] = { \
1430 gen_helper_##NAME##_b, \
1431 gen_helper_##NAME##_h, \
1432 gen_helper_##NAME##_w \
1434 return do_opivx_widen(s, a, fns[s->sew]); \
1437 GEN_OPIVX_WIDEN_TRANS(vwaddu_vx)
1438 GEN_OPIVX_WIDEN_TRANS(vwadd_vx)
1439 GEN_OPIVX_WIDEN_TRANS(vwsubu_vx)
1440 GEN_OPIVX_WIDEN_TRANS(vwsub_vx)
1442 /* WIDEN OPIVV with WIDEN */
1443 static bool opiwv_widen_check(DisasContext *s, arg_rmrr *a)
1445 return require_rvv(s) &&
1446 vext_check_isa_ill(s) &&
1447 vext_check_dds(s, a->rd, a->rs1, a->rs2, a->vm);
1450 static bool do_opiwv_widen(DisasContext *s, arg_rmrr *a,
1451 gen_helper_gvec_4_ptr *fn)
1453 if (opiwv_widen_check(s, a)) {
1455 TCGLabel *over = gen_new_label();
1456 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
1458 data = FIELD_DP32(data, VDATA, VM, a->vm);
1459 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
1460 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
1461 vreg_ofs(s, a->rs1),
1462 vreg_ofs(s, a->rs2),
1463 cpu_env, s->vlen / 8, s->vlen / 8, data, fn);
1465 gen_set_label(over);
1471 #define GEN_OPIWV_WIDEN_TRANS(NAME) \
1472 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1474 static gen_helper_gvec_4_ptr * const fns[3] = { \
1475 gen_helper_##NAME##_b, \
1476 gen_helper_##NAME##_h, \
1477 gen_helper_##NAME##_w \
1479 return do_opiwv_widen(s, a, fns[s->sew]); \
1482 GEN_OPIWV_WIDEN_TRANS(vwaddu_wv)
1483 GEN_OPIWV_WIDEN_TRANS(vwadd_wv)
1484 GEN_OPIWV_WIDEN_TRANS(vwsubu_wv)
1485 GEN_OPIWV_WIDEN_TRANS(vwsub_wv)
1487 /* WIDEN OPIVX with WIDEN */
1488 static bool opiwx_widen_check(DisasContext *s, arg_rmrr *a)
1490 return require_rvv(s) &&
1491 vext_check_isa_ill(s) &&
1492 vext_check_dd(s, a->rd, a->rs2, a->vm);
1495 static bool do_opiwx_widen(DisasContext *s, arg_rmrr *a,
1496 gen_helper_opivx *fn)
1498 if (opiwx_widen_check(s, a)) {
1499 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s);
1504 #define GEN_OPIWX_WIDEN_TRANS(NAME) \
1505 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1507 static gen_helper_opivx * const fns[3] = { \
1508 gen_helper_##NAME##_b, \
1509 gen_helper_##NAME##_h, \
1510 gen_helper_##NAME##_w \
1512 return do_opiwx_widen(s, a, fns[s->sew]); \
1515 GEN_OPIWX_WIDEN_TRANS(vwaddu_wx)
1516 GEN_OPIWX_WIDEN_TRANS(vwadd_wx)
1517 GEN_OPIWX_WIDEN_TRANS(vwsubu_wx)
1518 GEN_OPIWX_WIDEN_TRANS(vwsub_wx)
1520 /* Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions */
1521 /* OPIVV without GVEC IR */
1522 #define GEN_OPIVV_TRANS(NAME, CHECK) \
1523 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1525 if (CHECK(s, a)) { \
1526 uint32_t data = 0; \
1527 static gen_helper_gvec_4_ptr * const fns[4] = { \
1528 gen_helper_##NAME##_b, gen_helper_##NAME##_h, \
1529 gen_helper_##NAME##_w, gen_helper_##NAME##_d, \
1531 TCGLabel *over = gen_new_label(); \
1532 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
1534 data = FIELD_DP32(data, VDATA, VM, a->vm); \
1535 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
1536 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
1537 vreg_ofs(s, a->rs1), \
1538 vreg_ofs(s, a->rs2), cpu_env, \
1539 s->vlen / 8, s->vlen / 8, data, \
1542 gen_set_label(over); \
1549 * For vadc and vsbc, an illegal instruction exception is raised if the
1550 * destination vector register is v0 and LMUL > 1. (Section 12.3)
1552 static bool opivv_vadc_check(DisasContext *s, arg_rmrr *a)
1554 return require_rvv(s) &&
1555 vext_check_isa_ill(s) &&
1557 vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm);
1560 GEN_OPIVV_TRANS(vadc_vvm, opivv_vadc_check)
1561 GEN_OPIVV_TRANS(vsbc_vvm, opivv_vadc_check)
1564 * For vmadc and vmsbc, an illegal instruction exception is raised if the
1565 * destination vector register overlaps a source vector register group.
1567 static bool opivv_vmadc_check(DisasContext *s, arg_rmrr *a)
1569 return require_rvv(s) &&
1570 vext_check_isa_ill(s) &&
1571 vext_check_mss(s, a->rd, a->rs1, a->rs2);
1574 GEN_OPIVV_TRANS(vmadc_vvm, opivv_vmadc_check)
1575 GEN_OPIVV_TRANS(vmsbc_vvm, opivv_vmadc_check)
1577 static bool opivx_vadc_check(DisasContext *s, arg_rmrr *a)
1579 return require_rvv(s) &&
1580 vext_check_isa_ill(s) &&
1582 vext_check_ss(s, a->rd, a->rs2, a->vm);
1585 /* OPIVX without GVEC IR */
1586 #define GEN_OPIVX_TRANS(NAME, CHECK) \
1587 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1589 if (CHECK(s, a)) { \
1590 static gen_helper_opivx * const fns[4] = { \
1591 gen_helper_##NAME##_b, gen_helper_##NAME##_h, \
1592 gen_helper_##NAME##_w, gen_helper_##NAME##_d, \
1595 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s);\
1600 GEN_OPIVX_TRANS(vadc_vxm, opivx_vadc_check)
1601 GEN_OPIVX_TRANS(vsbc_vxm, opivx_vadc_check)
1603 static bool opivx_vmadc_check(DisasContext *s, arg_rmrr *a)
1605 return require_rvv(s) &&
1606 vext_check_isa_ill(s) &&
1607 vext_check_ms(s, a->rd, a->rs2);
1610 GEN_OPIVX_TRANS(vmadc_vxm, opivx_vmadc_check)
1611 GEN_OPIVX_TRANS(vmsbc_vxm, opivx_vmadc_check)
1613 /* OPIVI without GVEC IR */
1614 #define GEN_OPIVI_TRANS(NAME, IMM_MODE, OPIVX, CHECK) \
1615 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1617 if (CHECK(s, a)) { \
1618 static gen_helper_opivx * const fns[4] = { \
1619 gen_helper_##OPIVX##_b, gen_helper_##OPIVX##_h, \
1620 gen_helper_##OPIVX##_w, gen_helper_##OPIVX##_d, \
1622 return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, \
1623 fns[s->sew], s, IMM_MODE); \
1628 GEN_OPIVI_TRANS(vadc_vim, IMM_SX, vadc_vxm, opivx_vadc_check)
1629 GEN_OPIVI_TRANS(vmadc_vim, IMM_SX, vmadc_vxm, opivx_vmadc_check)
1631 /* Vector Bitwise Logical Instructions */
1632 GEN_OPIVV_GVEC_TRANS(vand_vv, and)
1633 GEN_OPIVV_GVEC_TRANS(vor_vv, or)
1634 GEN_OPIVV_GVEC_TRANS(vxor_vv, xor)
1635 GEN_OPIVX_GVEC_TRANS(vand_vx, ands)
1636 GEN_OPIVX_GVEC_TRANS(vor_vx, ors)
1637 GEN_OPIVX_GVEC_TRANS(vxor_vx, xors)
1638 GEN_OPIVI_GVEC_TRANS(vand_vi, IMM_SX, vand_vx, andi)
1639 GEN_OPIVI_GVEC_TRANS(vor_vi, IMM_SX, vor_vx, ori)
1640 GEN_OPIVI_GVEC_TRANS(vxor_vi, IMM_SX, vxor_vx, xori)
1642 /* Vector Single-Width Bit Shift Instructions */
1643 GEN_OPIVV_GVEC_TRANS(vsll_vv, shlv)
1644 GEN_OPIVV_GVEC_TRANS(vsrl_vv, shrv)
1645 GEN_OPIVV_GVEC_TRANS(vsra_vv, sarv)
1647 typedef void GVecGen2sFn32(unsigned, uint32_t, uint32_t, TCGv_i32,
1648 uint32_t, uint32_t);
1651 do_opivx_gvec_shift(DisasContext *s, arg_rmrr *a, GVecGen2sFn32 *gvec_fn,
1652 gen_helper_opivx *fn)
1654 if (!opivx_check(s, a)) {
1658 if (a->vm && s->vl_eq_vlmax) {
1659 TCGv_i32 src1 = tcg_temp_new_i32();
1661 tcg_gen_trunc_tl_i32(src1, get_gpr(s, a->rs1, EXT_NONE));
1662 tcg_gen_extract_i32(src1, src1, 0, s->sew + 3);
1663 gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2),
1664 src1, MAXSZ(s), MAXSZ(s));
1666 tcg_temp_free_i32(src1);
1670 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s);
1673 #define GEN_OPIVX_GVEC_SHIFT_TRANS(NAME, SUF) \
1674 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1676 static gen_helper_opivx * const fns[4] = { \
1677 gen_helper_##NAME##_b, gen_helper_##NAME##_h, \
1678 gen_helper_##NAME##_w, gen_helper_##NAME##_d, \
1681 return do_opivx_gvec_shift(s, a, tcg_gen_gvec_##SUF, fns[s->sew]); \
1684 GEN_OPIVX_GVEC_SHIFT_TRANS(vsll_vx, shls)
1685 GEN_OPIVX_GVEC_SHIFT_TRANS(vsrl_vx, shrs)
1686 GEN_OPIVX_GVEC_SHIFT_TRANS(vsra_vx, sars)
1688 GEN_OPIVI_GVEC_TRANS(vsll_vi, IMM_ZX, vsll_vx, shli)
1689 GEN_OPIVI_GVEC_TRANS(vsrl_vi, IMM_ZX, vsrl_vx, shri)
1690 GEN_OPIVI_GVEC_TRANS(vsra_vi, IMM_ZX, vsra_vx, sari)
1692 /* Vector Narrowing Integer Right Shift Instructions */
1693 static bool opivv_narrow_check(DisasContext *s, arg_rmrr *a)
1695 return require_rvv(s) &&
1696 vext_check_isa_ill(s) &&
1697 vext_check_sds(s, a->rd, a->rs1, a->rs2, a->vm);
1700 /* OPIVV with NARROW */
1701 #define GEN_OPIVV_NARROW_TRANS(NAME) \
1702 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1704 if (opivv_narrow_check(s, a)) { \
1705 uint32_t data = 0; \
1706 static gen_helper_gvec_4_ptr * const fns[3] = { \
1707 gen_helper_##NAME##_b, \
1708 gen_helper_##NAME##_h, \
1709 gen_helper_##NAME##_w, \
1711 TCGLabel *over = gen_new_label(); \
1712 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
1714 data = FIELD_DP32(data, VDATA, VM, a->vm); \
1715 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
1716 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
1717 vreg_ofs(s, a->rs1), \
1718 vreg_ofs(s, a->rs2), cpu_env, \
1719 s->vlen / 8, s->vlen / 8, data, \
1722 gen_set_label(over); \
1727 GEN_OPIVV_NARROW_TRANS(vnsra_vv)
1728 GEN_OPIVV_NARROW_TRANS(vnsrl_vv)
1730 static bool opivx_narrow_check(DisasContext *s, arg_rmrr *a)
1732 return require_rvv(s) &&
1733 vext_check_isa_ill(s) &&
1734 vext_check_sd(s, a->rd, a->rs2, a->vm);
1737 /* OPIVX with NARROW */
1738 #define GEN_OPIVX_NARROW_TRANS(NAME) \
1739 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1741 if (opivx_narrow_check(s, a)) { \
1742 static gen_helper_opivx * const fns[3] = { \
1743 gen_helper_##NAME##_b, \
1744 gen_helper_##NAME##_h, \
1745 gen_helper_##NAME##_w, \
1747 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s);\
1752 GEN_OPIVX_NARROW_TRANS(vnsra_vx)
1753 GEN_OPIVX_NARROW_TRANS(vnsrl_vx)
1755 /* OPIVI with NARROW */
1756 #define GEN_OPIVI_NARROW_TRANS(NAME, IMM_MODE, OPIVX) \
1757 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1759 if (opivx_narrow_check(s, a)) { \
1760 static gen_helper_opivx * const fns[3] = { \
1761 gen_helper_##OPIVX##_b, \
1762 gen_helper_##OPIVX##_h, \
1763 gen_helper_##OPIVX##_w, \
1765 return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, \
1766 fns[s->sew], s, IMM_MODE); \
1771 GEN_OPIVI_NARROW_TRANS(vnsra_vi, IMM_ZX, vnsra_vx)
1772 GEN_OPIVI_NARROW_TRANS(vnsrl_vi, IMM_ZX, vnsrl_vx)
1774 /* Vector Integer Comparison Instructions */
1776 * For all comparison instructions, an illegal instruction exception is raised
1777 * if the destination vector register overlaps a source vector register group
1780 static bool opivv_cmp_check(DisasContext *s, arg_rmrr *a)
1782 return require_rvv(s) &&
1783 vext_check_isa_ill(s) &&
1784 vext_check_mss(s, a->rd, a->rs1, a->rs2);
1787 GEN_OPIVV_TRANS(vmseq_vv, opivv_cmp_check)
1788 GEN_OPIVV_TRANS(vmsne_vv, opivv_cmp_check)
1789 GEN_OPIVV_TRANS(vmsltu_vv, opivv_cmp_check)
1790 GEN_OPIVV_TRANS(vmslt_vv, opivv_cmp_check)
1791 GEN_OPIVV_TRANS(vmsleu_vv, opivv_cmp_check)
1792 GEN_OPIVV_TRANS(vmsle_vv, opivv_cmp_check)
1794 static bool opivx_cmp_check(DisasContext *s, arg_rmrr *a)
1796 return require_rvv(s) &&
1797 vext_check_isa_ill(s) &&
1798 vext_check_ms(s, a->rd, a->rs2);
1801 GEN_OPIVX_TRANS(vmseq_vx, opivx_cmp_check)
1802 GEN_OPIVX_TRANS(vmsne_vx, opivx_cmp_check)
1803 GEN_OPIVX_TRANS(vmsltu_vx, opivx_cmp_check)
1804 GEN_OPIVX_TRANS(vmslt_vx, opivx_cmp_check)
1805 GEN_OPIVX_TRANS(vmsleu_vx, opivx_cmp_check)
1806 GEN_OPIVX_TRANS(vmsle_vx, opivx_cmp_check)
1807 GEN_OPIVX_TRANS(vmsgtu_vx, opivx_cmp_check)
1808 GEN_OPIVX_TRANS(vmsgt_vx, opivx_cmp_check)
1810 GEN_OPIVI_TRANS(vmseq_vi, IMM_SX, vmseq_vx, opivx_cmp_check)
1811 GEN_OPIVI_TRANS(vmsne_vi, IMM_SX, vmsne_vx, opivx_cmp_check)
1812 GEN_OPIVI_TRANS(vmsleu_vi, IMM_ZX, vmsleu_vx, opivx_cmp_check)
1813 GEN_OPIVI_TRANS(vmsle_vi, IMM_SX, vmsle_vx, opivx_cmp_check)
1814 GEN_OPIVI_TRANS(vmsgtu_vi, IMM_ZX, vmsgtu_vx, opivx_cmp_check)
1815 GEN_OPIVI_TRANS(vmsgt_vi, IMM_SX, vmsgt_vx, opivx_cmp_check)
1817 /* Vector Integer Min/Max Instructions */
1818 GEN_OPIVV_GVEC_TRANS(vminu_vv, umin)
1819 GEN_OPIVV_GVEC_TRANS(vmin_vv, smin)
1820 GEN_OPIVV_GVEC_TRANS(vmaxu_vv, umax)
1821 GEN_OPIVV_GVEC_TRANS(vmax_vv, smax)
1822 GEN_OPIVX_TRANS(vminu_vx, opivx_check)
1823 GEN_OPIVX_TRANS(vmin_vx, opivx_check)
1824 GEN_OPIVX_TRANS(vmaxu_vx, opivx_check)
1825 GEN_OPIVX_TRANS(vmax_vx, opivx_check)
1827 /* Vector Single-Width Integer Multiply Instructions */
1828 GEN_OPIVV_GVEC_TRANS(vmul_vv, mul)
1829 GEN_OPIVV_TRANS(vmulh_vv, opivv_check)
1830 GEN_OPIVV_TRANS(vmulhu_vv, opivv_check)
1831 GEN_OPIVV_TRANS(vmulhsu_vv, opivv_check)
1832 GEN_OPIVX_GVEC_TRANS(vmul_vx, muls)
1833 GEN_OPIVX_TRANS(vmulh_vx, opivx_check)
1834 GEN_OPIVX_TRANS(vmulhu_vx, opivx_check)
1835 GEN_OPIVX_TRANS(vmulhsu_vx, opivx_check)
1837 /* Vector Integer Divide Instructions */
1838 GEN_OPIVV_TRANS(vdivu_vv, opivv_check)
1839 GEN_OPIVV_TRANS(vdiv_vv, opivv_check)
1840 GEN_OPIVV_TRANS(vremu_vv, opivv_check)
1841 GEN_OPIVV_TRANS(vrem_vv, opivv_check)
1842 GEN_OPIVX_TRANS(vdivu_vx, opivx_check)
1843 GEN_OPIVX_TRANS(vdiv_vx, opivx_check)
1844 GEN_OPIVX_TRANS(vremu_vx, opivx_check)
1845 GEN_OPIVX_TRANS(vrem_vx, opivx_check)
1847 /* Vector Widening Integer Multiply Instructions */
1848 GEN_OPIVV_WIDEN_TRANS(vwmul_vv, opivv_widen_check)
1849 GEN_OPIVV_WIDEN_TRANS(vwmulu_vv, opivv_widen_check)
1850 GEN_OPIVV_WIDEN_TRANS(vwmulsu_vv, opivv_widen_check)
1851 GEN_OPIVX_WIDEN_TRANS(vwmul_vx)
1852 GEN_OPIVX_WIDEN_TRANS(vwmulu_vx)
1853 GEN_OPIVX_WIDEN_TRANS(vwmulsu_vx)
1855 /* Vector Single-Width Integer Multiply-Add Instructions */
1856 GEN_OPIVV_TRANS(vmacc_vv, opivv_check)
1857 GEN_OPIVV_TRANS(vnmsac_vv, opivv_check)
1858 GEN_OPIVV_TRANS(vmadd_vv, opivv_check)
1859 GEN_OPIVV_TRANS(vnmsub_vv, opivv_check)
1860 GEN_OPIVX_TRANS(vmacc_vx, opivx_check)
1861 GEN_OPIVX_TRANS(vnmsac_vx, opivx_check)
1862 GEN_OPIVX_TRANS(vmadd_vx, opivx_check)
1863 GEN_OPIVX_TRANS(vnmsub_vx, opivx_check)
1865 /* Vector Widening Integer Multiply-Add Instructions */
1866 GEN_OPIVV_WIDEN_TRANS(vwmaccu_vv, opivv_widen_check)
1867 GEN_OPIVV_WIDEN_TRANS(vwmacc_vv, opivv_widen_check)
1868 GEN_OPIVV_WIDEN_TRANS(vwmaccsu_vv, opivv_widen_check)
1869 GEN_OPIVX_WIDEN_TRANS(vwmaccu_vx)
1870 GEN_OPIVX_WIDEN_TRANS(vwmacc_vx)
1871 GEN_OPIVX_WIDEN_TRANS(vwmaccsu_vx)
1872 GEN_OPIVX_WIDEN_TRANS(vwmaccus_vx)
1874 /* Vector Integer Merge and Move Instructions */
1875 static bool trans_vmv_v_v(DisasContext *s, arg_vmv_v_v *a)
1877 if (require_rvv(s) &&
1878 vext_check_isa_ill(s) &&
1879 /* vmv.v.v has rs2 = 0 and vm = 1 */
1880 vext_check_sss(s, a->rd, a->rs1, 0, 1)) {
1881 if (s->vl_eq_vlmax) {
1882 tcg_gen_gvec_mov(s->sew, vreg_ofs(s, a->rd),
1883 vreg_ofs(s, a->rs1),
1884 MAXSZ(s), MAXSZ(s));
1886 uint32_t data = FIELD_DP32(0, VDATA, LMUL, s->lmul);
1887 static gen_helper_gvec_2_ptr * const fns[4] = {
1888 gen_helper_vmv_v_v_b, gen_helper_vmv_v_v_h,
1889 gen_helper_vmv_v_v_w, gen_helper_vmv_v_v_d,
1891 TCGLabel *over = gen_new_label();
1892 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
1894 tcg_gen_gvec_2_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, a->rs1),
1895 cpu_env, s->vlen / 8, s->vlen / 8, data,
1897 gen_set_label(over);
1905 typedef void gen_helper_vmv_vx(TCGv_ptr, TCGv_i64, TCGv_env, TCGv_i32);
1906 static bool trans_vmv_v_x(DisasContext *s, arg_vmv_v_x *a)
1908 if (require_rvv(s) &&
1909 vext_check_isa_ill(s) &&
1910 /* vmv.v.x has rs2 = 0 and vm = 1 */
1911 vext_check_ss(s, a->rd, 0, 1)) {
1913 TCGLabel *over = gen_new_label();
1914 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
1916 s1 = get_gpr(s, a->rs1, EXT_SIGN);
1918 if (s->vl_eq_vlmax) {
1919 tcg_gen_gvec_dup_tl(s->sew, vreg_ofs(s, a->rd),
1920 MAXSZ(s), MAXSZ(s), s1);
1923 TCGv_i64 s1_i64 = tcg_temp_new_i64();
1924 TCGv_ptr dest = tcg_temp_new_ptr();
1925 uint32_t data = FIELD_DP32(0, VDATA, LMUL, s->lmul);
1926 static gen_helper_vmv_vx * const fns[4] = {
1927 gen_helper_vmv_v_x_b, gen_helper_vmv_v_x_h,
1928 gen_helper_vmv_v_x_w, gen_helper_vmv_v_x_d,
1931 tcg_gen_ext_tl_i64(s1_i64, s1);
1932 desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
1933 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, a->rd));
1934 fns[s->sew](dest, s1_i64, cpu_env, desc);
1936 tcg_temp_free_ptr(dest);
1937 tcg_temp_free_i64(s1_i64);
1941 gen_set_label(over);
1947 static bool trans_vmv_v_i(DisasContext *s, arg_vmv_v_i *a)
1949 if (require_rvv(s) &&
1950 vext_check_isa_ill(s) &&
1951 /* vmv.v.i has rs2 = 0 and vm = 1 */
1952 vext_check_ss(s, a->rd, 0, 1)) {
1953 int64_t simm = sextract64(a->rs1, 0, 5);
1954 if (s->vl_eq_vlmax) {
1955 tcg_gen_gvec_dup_imm(s->sew, vreg_ofs(s, a->rd),
1956 MAXSZ(s), MAXSZ(s), simm);
1962 uint32_t data = FIELD_DP32(0, VDATA, LMUL, s->lmul);
1963 static gen_helper_vmv_vx * const fns[4] = {
1964 gen_helper_vmv_v_x_b, gen_helper_vmv_v_x_h,
1965 gen_helper_vmv_v_x_w, gen_helper_vmv_v_x_d,
1967 TCGLabel *over = gen_new_label();
1968 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
1970 s1 = tcg_constant_i64(simm);
1971 dest = tcg_temp_new_ptr();
1972 desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
1973 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, a->rd));
1974 fns[s->sew](dest, s1, cpu_env, desc);
1976 tcg_temp_free_ptr(dest);
1978 gen_set_label(over);
1985 GEN_OPIVV_TRANS(vmerge_vvm, opivv_vadc_check)
1986 GEN_OPIVX_TRANS(vmerge_vxm, opivx_vadc_check)
1987 GEN_OPIVI_TRANS(vmerge_vim, IMM_SX, vmerge_vxm, opivx_vadc_check)
1990 *** Vector Fixed-Point Arithmetic Instructions
1993 /* Vector Single-Width Saturating Add and Subtract */
1994 GEN_OPIVV_TRANS(vsaddu_vv, opivv_check)
1995 GEN_OPIVV_TRANS(vsadd_vv, opivv_check)
1996 GEN_OPIVV_TRANS(vssubu_vv, opivv_check)
1997 GEN_OPIVV_TRANS(vssub_vv, opivv_check)
1998 GEN_OPIVX_TRANS(vsaddu_vx, opivx_check)
1999 GEN_OPIVX_TRANS(vsadd_vx, opivx_check)
2000 GEN_OPIVX_TRANS(vssubu_vx, opivx_check)
2001 GEN_OPIVX_TRANS(vssub_vx, opivx_check)
2002 GEN_OPIVI_TRANS(vsaddu_vi, IMM_ZX, vsaddu_vx, opivx_check)
2003 GEN_OPIVI_TRANS(vsadd_vi, IMM_SX, vsadd_vx, opivx_check)
2005 /* Vector Single-Width Averaging Add and Subtract */
2006 GEN_OPIVV_TRANS(vaadd_vv, opivv_check)
2007 GEN_OPIVV_TRANS(vasub_vv, opivv_check)
2008 GEN_OPIVX_TRANS(vaadd_vx, opivx_check)
2009 GEN_OPIVX_TRANS(vasub_vx, opivx_check)
2010 GEN_OPIVI_TRANS(vaadd_vi, 0, vaadd_vx, opivx_check)
2012 /* Vector Single-Width Fractional Multiply with Rounding and Saturation */
2013 GEN_OPIVV_TRANS(vsmul_vv, opivv_check)
2014 GEN_OPIVX_TRANS(vsmul_vx, opivx_check)
2016 /* Vector Widening Saturating Scaled Multiply-Add */
2017 GEN_OPIVV_WIDEN_TRANS(vwsmaccu_vv, opivv_widen_check)
2018 GEN_OPIVV_WIDEN_TRANS(vwsmacc_vv, opivv_widen_check)
2019 GEN_OPIVV_WIDEN_TRANS(vwsmaccsu_vv, opivv_widen_check)
2020 GEN_OPIVX_WIDEN_TRANS(vwsmaccu_vx)
2021 GEN_OPIVX_WIDEN_TRANS(vwsmacc_vx)
2022 GEN_OPIVX_WIDEN_TRANS(vwsmaccsu_vx)
2023 GEN_OPIVX_WIDEN_TRANS(vwsmaccus_vx)
2025 /* Vector Single-Width Scaling Shift Instructions */
2026 GEN_OPIVV_TRANS(vssrl_vv, opivv_check)
2027 GEN_OPIVV_TRANS(vssra_vv, opivv_check)
2028 GEN_OPIVX_TRANS(vssrl_vx, opivx_check)
2029 GEN_OPIVX_TRANS(vssra_vx, opivx_check)
2030 GEN_OPIVI_TRANS(vssrl_vi, IMM_ZX, vssrl_vx, opivx_check)
2031 GEN_OPIVI_TRANS(vssra_vi, IMM_SX, vssra_vx, opivx_check)
2033 /* Vector Narrowing Fixed-Point Clip Instructions */
2034 GEN_OPIVV_NARROW_TRANS(vnclipu_vv)
2035 GEN_OPIVV_NARROW_TRANS(vnclip_vv)
2036 GEN_OPIVX_NARROW_TRANS(vnclipu_vx)
2037 GEN_OPIVX_NARROW_TRANS(vnclip_vx)
2038 GEN_OPIVI_NARROW_TRANS(vnclipu_vi, IMM_ZX, vnclipu_vx)
2039 GEN_OPIVI_NARROW_TRANS(vnclip_vi, IMM_ZX, vnclip_vx)
2042 *** Vector Float Point Arithmetic Instructions
2046 * As RVF-only cpus always have values NaN-boxed to 64-bits,
2047 * RVF and RVD can be treated equally.
2048 * We don't have to deal with the cases of: SEW > FLEN.
2050 * If SEW < FLEN, check whether input fp register is a valid
2051 * NaN-boxed value, in which case the least-significant SEW bits
2052 * of the f regsiter are used, else the canonical NaN value is used.
2054 static void do_nanbox(DisasContext *s, TCGv_i64 out, TCGv_i64 in)
2058 gen_check_nanbox_h(out, in);
2061 gen_check_nanbox_s(out, in);
2064 tcg_gen_mov_i64(out, in);
2067 g_assert_not_reached();
2071 /* Vector Single-Width Floating-Point Add/Subtract Instructions */
2074 * If the current SEW does not correspond to a supported IEEE floating-point
2075 * type, an illegal instruction exception is raised.
2077 static bool opfvv_check(DisasContext *s, arg_rmrr *a)
2079 return require_rvv(s) &&
2081 vext_check_isa_ill(s) &&
2082 vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm);
2085 /* OPFVV without GVEC IR */
2086 #define GEN_OPFVV_TRANS(NAME, CHECK) \
2087 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
2089 if (CHECK(s, a)) { \
2090 uint32_t data = 0; \
2091 static gen_helper_gvec_4_ptr * const fns[3] = { \
2092 gen_helper_##NAME##_h, \
2093 gen_helper_##NAME##_w, \
2094 gen_helper_##NAME##_d, \
2096 TCGLabel *over = gen_new_label(); \
2098 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
2100 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2101 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2102 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
2103 vreg_ofs(s, a->rs1), \
2104 vreg_ofs(s, a->rs2), cpu_env, \
2105 s->vlen / 8, s->vlen / 8, data, \
2108 gen_set_label(over); \
2113 GEN_OPFVV_TRANS(vfadd_vv, opfvv_check)
2114 GEN_OPFVV_TRANS(vfsub_vv, opfvv_check)
2116 typedef void gen_helper_opfvf(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv_ptr,
2117 TCGv_env, TCGv_i32);
2119 static bool opfvf_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
2120 uint32_t data, gen_helper_opfvf *fn, DisasContext *s)
2122 TCGv_ptr dest, src2, mask;
2126 TCGLabel *over = gen_new_label();
2127 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
2129 dest = tcg_temp_new_ptr();
2130 mask = tcg_temp_new_ptr();
2131 src2 = tcg_temp_new_ptr();
2132 desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
2134 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
2135 tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, vs2));
2136 tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
2138 /* NaN-box f[rs1] */
2139 t1 = tcg_temp_new_i64();
2140 do_nanbox(s, t1, cpu_fpr[rs1]);
2142 fn(dest, mask, t1, src2, cpu_env, desc);
2144 tcg_temp_free_ptr(dest);
2145 tcg_temp_free_ptr(mask);
2146 tcg_temp_free_ptr(src2);
2147 tcg_temp_free_i64(t1);
2149 gen_set_label(over);
2154 * If the current SEW does not correspond to a supported IEEE floating-point
2155 * type, an illegal instruction exception is raised
2157 static bool opfvf_check(DisasContext *s, arg_rmrr *a)
2159 return require_rvv(s) &&
2161 vext_check_isa_ill(s) &&
2162 vext_check_ss(s, a->rd, a->rs2, a->vm);
2165 /* OPFVF without GVEC IR */
2166 #define GEN_OPFVF_TRANS(NAME, CHECK) \
2167 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
2169 if (CHECK(s, a)) { \
2170 uint32_t data = 0; \
2171 static gen_helper_opfvf *const fns[3] = { \
2172 gen_helper_##NAME##_h, \
2173 gen_helper_##NAME##_w, \
2174 gen_helper_##NAME##_d, \
2177 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2178 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2179 return opfvf_trans(a->rd, a->rs1, a->rs2, data, \
2180 fns[s->sew - 1], s); \
2185 GEN_OPFVF_TRANS(vfadd_vf, opfvf_check)
2186 GEN_OPFVF_TRANS(vfsub_vf, opfvf_check)
2187 GEN_OPFVF_TRANS(vfrsub_vf, opfvf_check)
2189 /* Vector Widening Floating-Point Add/Subtract Instructions */
2190 static bool opfvv_widen_check(DisasContext *s, arg_rmrr *a)
2192 return require_rvv(s) &&
2194 vext_check_isa_ill(s) &&
2195 vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm);
2198 /* OPFVV with WIDEN */
2199 #define GEN_OPFVV_WIDEN_TRANS(NAME, CHECK) \
2200 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
2202 if (CHECK(s, a)) { \
2203 uint32_t data = 0; \
2204 static gen_helper_gvec_4_ptr * const fns[2] = { \
2205 gen_helper_##NAME##_h, gen_helper_##NAME##_w, \
2207 TCGLabel *over = gen_new_label(); \
2209 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
2211 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2212 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2213 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
2214 vreg_ofs(s, a->rs1), \
2215 vreg_ofs(s, a->rs2), cpu_env, \
2216 s->vlen / 8, s->vlen / 8, data, \
2219 gen_set_label(over); \
2225 GEN_OPFVV_WIDEN_TRANS(vfwadd_vv, opfvv_widen_check)
2226 GEN_OPFVV_WIDEN_TRANS(vfwsub_vv, opfvv_widen_check)
2228 static bool opfvf_widen_check(DisasContext *s, arg_rmrr *a)
2230 return require_rvv(s) &&
2232 vext_check_isa_ill(s) &&
2233 vext_check_ds(s, a->rd, a->rs2, a->vm);
2236 /* OPFVF with WIDEN */
2237 #define GEN_OPFVF_WIDEN_TRANS(NAME) \
2238 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
2240 if (opfvf_widen_check(s, a)) { \
2241 uint32_t data = 0; \
2242 static gen_helper_opfvf *const fns[2] = { \
2243 gen_helper_##NAME##_h, gen_helper_##NAME##_w, \
2246 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2247 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2248 return opfvf_trans(a->rd, a->rs1, a->rs2, data, \
2249 fns[s->sew - 1], s); \
2254 GEN_OPFVF_WIDEN_TRANS(vfwadd_vf)
2255 GEN_OPFVF_WIDEN_TRANS(vfwsub_vf)
2257 static bool opfwv_widen_check(DisasContext *s, arg_rmrr *a)
2259 return require_rvv(s) &&
2261 vext_check_isa_ill(s) &&
2262 vext_check_dds(s, a->rd, a->rs1, a->rs2, a->vm);
2265 /* WIDEN OPFVV with WIDEN */
2266 #define GEN_OPFWV_WIDEN_TRANS(NAME) \
2267 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
2269 if (opfwv_widen_check(s, a)) { \
2270 uint32_t data = 0; \
2271 static gen_helper_gvec_4_ptr * const fns[2] = { \
2272 gen_helper_##NAME##_h, gen_helper_##NAME##_w, \
2274 TCGLabel *over = gen_new_label(); \
2276 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
2278 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2279 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2280 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
2281 vreg_ofs(s, a->rs1), \
2282 vreg_ofs(s, a->rs2), cpu_env, \
2283 s->vlen / 8, s->vlen / 8, data, \
2286 gen_set_label(over); \
2292 GEN_OPFWV_WIDEN_TRANS(vfwadd_wv)
2293 GEN_OPFWV_WIDEN_TRANS(vfwsub_wv)
2295 static bool opfwf_widen_check(DisasContext *s, arg_rmrr *a)
2297 return require_rvv(s) &&
2299 vext_check_isa_ill(s) &&
2300 vext_check_dd(s, a->rd, a->rs2, a->vm);
2303 /* WIDEN OPFVF with WIDEN */
2304 #define GEN_OPFWF_WIDEN_TRANS(NAME) \
2305 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
2307 if (opfwf_widen_check(s, a)) { \
2308 uint32_t data = 0; \
2309 static gen_helper_opfvf *const fns[2] = { \
2310 gen_helper_##NAME##_h, gen_helper_##NAME##_w, \
2313 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2314 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2315 return opfvf_trans(a->rd, a->rs1, a->rs2, data, \
2316 fns[s->sew - 1], s); \
2321 GEN_OPFWF_WIDEN_TRANS(vfwadd_wf)
2322 GEN_OPFWF_WIDEN_TRANS(vfwsub_wf)
2324 /* Vector Single-Width Floating-Point Multiply/Divide Instructions */
2325 GEN_OPFVV_TRANS(vfmul_vv, opfvv_check)
2326 GEN_OPFVV_TRANS(vfdiv_vv, opfvv_check)
2327 GEN_OPFVF_TRANS(vfmul_vf, opfvf_check)
2328 GEN_OPFVF_TRANS(vfdiv_vf, opfvf_check)
2329 GEN_OPFVF_TRANS(vfrdiv_vf, opfvf_check)
2331 /* Vector Widening Floating-Point Multiply */
2332 GEN_OPFVV_WIDEN_TRANS(vfwmul_vv, opfvv_widen_check)
2333 GEN_OPFVF_WIDEN_TRANS(vfwmul_vf)
2335 /* Vector Single-Width Floating-Point Fused Multiply-Add Instructions */
2336 GEN_OPFVV_TRANS(vfmacc_vv, opfvv_check)
2337 GEN_OPFVV_TRANS(vfnmacc_vv, opfvv_check)
2338 GEN_OPFVV_TRANS(vfmsac_vv, opfvv_check)
2339 GEN_OPFVV_TRANS(vfnmsac_vv, opfvv_check)
2340 GEN_OPFVV_TRANS(vfmadd_vv, opfvv_check)
2341 GEN_OPFVV_TRANS(vfnmadd_vv, opfvv_check)
2342 GEN_OPFVV_TRANS(vfmsub_vv, opfvv_check)
2343 GEN_OPFVV_TRANS(vfnmsub_vv, opfvv_check)
2344 GEN_OPFVF_TRANS(vfmacc_vf, opfvf_check)
2345 GEN_OPFVF_TRANS(vfnmacc_vf, opfvf_check)
2346 GEN_OPFVF_TRANS(vfmsac_vf, opfvf_check)
2347 GEN_OPFVF_TRANS(vfnmsac_vf, opfvf_check)
2348 GEN_OPFVF_TRANS(vfmadd_vf, opfvf_check)
2349 GEN_OPFVF_TRANS(vfnmadd_vf, opfvf_check)
2350 GEN_OPFVF_TRANS(vfmsub_vf, opfvf_check)
2351 GEN_OPFVF_TRANS(vfnmsub_vf, opfvf_check)
2353 /* Vector Widening Floating-Point Fused Multiply-Add Instructions */
2354 GEN_OPFVV_WIDEN_TRANS(vfwmacc_vv, opfvv_widen_check)
2355 GEN_OPFVV_WIDEN_TRANS(vfwnmacc_vv, opfvv_widen_check)
2356 GEN_OPFVV_WIDEN_TRANS(vfwmsac_vv, opfvv_widen_check)
2357 GEN_OPFVV_WIDEN_TRANS(vfwnmsac_vv, opfvv_widen_check)
2358 GEN_OPFVF_WIDEN_TRANS(vfwmacc_vf)
2359 GEN_OPFVF_WIDEN_TRANS(vfwnmacc_vf)
2360 GEN_OPFVF_WIDEN_TRANS(vfwmsac_vf)
2361 GEN_OPFVF_WIDEN_TRANS(vfwnmsac_vf)
2363 /* Vector Floating-Point Square-Root Instruction */
2366 * If the current SEW does not correspond to a supported IEEE floating-point
2367 * type, an illegal instruction exception is raised
2369 static bool opfv_check(DisasContext *s, arg_rmr *a)
2371 return require_rvv(s) &&
2373 vext_check_isa_ill(s) &&
2374 /* OPFV instructions ignore vs1 check */
2375 vext_check_ss(s, a->rd, a->rs2, a->vm);
2378 #define GEN_OPFV_TRANS(NAME, CHECK) \
2379 static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
2381 if (CHECK(s, a)) { \
2382 uint32_t data = 0; \
2383 static gen_helper_gvec_3_ptr * const fns[3] = { \
2384 gen_helper_##NAME##_h, \
2385 gen_helper_##NAME##_w, \
2386 gen_helper_##NAME##_d, \
2388 TCGLabel *over = gen_new_label(); \
2390 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
2392 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2393 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2394 tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
2395 vreg_ofs(s, a->rs2), cpu_env, \
2396 s->vlen / 8, s->vlen / 8, data, \
2399 gen_set_label(over); \
2405 GEN_OPFV_TRANS(vfsqrt_v, opfv_check)
2407 /* Vector Floating-Point MIN/MAX Instructions */
2408 GEN_OPFVV_TRANS(vfmin_vv, opfvv_check)
2409 GEN_OPFVV_TRANS(vfmax_vv, opfvv_check)
2410 GEN_OPFVF_TRANS(vfmin_vf, opfvf_check)
2411 GEN_OPFVF_TRANS(vfmax_vf, opfvf_check)
2413 /* Vector Floating-Point Sign-Injection Instructions */
2414 GEN_OPFVV_TRANS(vfsgnj_vv, opfvv_check)
2415 GEN_OPFVV_TRANS(vfsgnjn_vv, opfvv_check)
2416 GEN_OPFVV_TRANS(vfsgnjx_vv, opfvv_check)
2417 GEN_OPFVF_TRANS(vfsgnj_vf, opfvf_check)
2418 GEN_OPFVF_TRANS(vfsgnjn_vf, opfvf_check)
2419 GEN_OPFVF_TRANS(vfsgnjx_vf, opfvf_check)
2421 /* Vector Floating-Point Compare Instructions */
2422 static bool opfvv_cmp_check(DisasContext *s, arg_rmrr *a)
2424 return require_rvv(s) &&
2426 vext_check_isa_ill(s) &&
2427 vext_check_mss(s, a->rd, a->rs1, a->rs2);
2430 GEN_OPFVV_TRANS(vmfeq_vv, opfvv_cmp_check)
2431 GEN_OPFVV_TRANS(vmfne_vv, opfvv_cmp_check)
2432 GEN_OPFVV_TRANS(vmflt_vv, opfvv_cmp_check)
2433 GEN_OPFVV_TRANS(vmfle_vv, opfvv_cmp_check)
2434 GEN_OPFVV_TRANS(vmford_vv, opfvv_cmp_check)
2436 static bool opfvf_cmp_check(DisasContext *s, arg_rmrr *a)
2438 return require_rvv(s) &&
2440 vext_check_isa_ill(s) &&
2441 vext_check_ms(s, a->rd, a->rs2);
2444 GEN_OPFVF_TRANS(vmfeq_vf, opfvf_cmp_check)
2445 GEN_OPFVF_TRANS(vmfne_vf, opfvf_cmp_check)
2446 GEN_OPFVF_TRANS(vmflt_vf, opfvf_cmp_check)
2447 GEN_OPFVF_TRANS(vmfle_vf, opfvf_cmp_check)
2448 GEN_OPFVF_TRANS(vmfgt_vf, opfvf_cmp_check)
2449 GEN_OPFVF_TRANS(vmfge_vf, opfvf_cmp_check)
2450 GEN_OPFVF_TRANS(vmford_vf, opfvf_cmp_check)
2452 /* Vector Floating-Point Classify Instruction */
2453 GEN_OPFV_TRANS(vfclass_v, opfv_check)
2455 /* Vector Floating-Point Merge Instruction */
2456 GEN_OPFVF_TRANS(vfmerge_vfm, opfvf_check)
2458 static bool trans_vfmv_v_f(DisasContext *s, arg_vfmv_v_f *a)
2460 if (require_rvv(s) &&
2462 vext_check_isa_ill(s) &&
2463 require_align(a->rd, s->lmul)) {
2464 if (s->vl_eq_vlmax) {
2465 tcg_gen_gvec_dup_i64(s->sew, vreg_ofs(s, a->rd),
2466 MAXSZ(s), MAXSZ(s), cpu_fpr[a->rs1]);
2471 uint32_t data = FIELD_DP32(0, VDATA, LMUL, s->lmul);
2472 static gen_helper_vmv_vx * const fns[3] = {
2473 gen_helper_vmv_v_x_h,
2474 gen_helper_vmv_v_x_w,
2475 gen_helper_vmv_v_x_d,
2477 TCGLabel *over = gen_new_label();
2478 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
2480 dest = tcg_temp_new_ptr();
2481 desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
2482 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, a->rd));
2483 fns[s->sew - 1](dest, cpu_fpr[a->rs1], cpu_env, desc);
2485 tcg_temp_free_ptr(dest);
2487 gen_set_label(over);
2494 /* Single-Width Floating-Point/Integer Type-Convert Instructions */
2495 GEN_OPFV_TRANS(vfcvt_xu_f_v, opfv_check)
2496 GEN_OPFV_TRANS(vfcvt_x_f_v, opfv_check)
2497 GEN_OPFV_TRANS(vfcvt_f_xu_v, opfv_check)
2498 GEN_OPFV_TRANS(vfcvt_f_x_v, opfv_check)
2500 /* Widening Floating-Point/Integer Type-Convert Instructions */
2503 * If the current SEW does not correspond to a supported IEEE floating-point
2504 * type, an illegal instruction exception is raised
2506 static bool opfv_widen_check(DisasContext *s, arg_rmr *a)
2508 return require_rvv(s) &&
2509 require_scale_rvf(s) &&
2511 vext_check_isa_ill(s) &&
2512 vext_check_ds(s, a->rd, a->rs2, a->vm);
2515 #define GEN_OPFV_WIDEN_TRANS(NAME) \
2516 static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
2518 if (opfv_widen_check(s, a)) { \
2519 uint32_t data = 0; \
2520 static gen_helper_gvec_3_ptr * const fns[2] = { \
2521 gen_helper_##NAME##_h, \
2522 gen_helper_##NAME##_w, \
2524 TCGLabel *over = gen_new_label(); \
2526 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
2528 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2529 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2530 tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
2531 vreg_ofs(s, a->rs2), cpu_env, \
2532 s->vlen / 8, s->vlen / 8, data, \
2535 gen_set_label(over); \
2541 GEN_OPFV_WIDEN_TRANS(vfwcvt_xu_f_v)
2542 GEN_OPFV_WIDEN_TRANS(vfwcvt_x_f_v)
2543 GEN_OPFV_WIDEN_TRANS(vfwcvt_f_xu_v)
2544 GEN_OPFV_WIDEN_TRANS(vfwcvt_f_x_v)
2545 GEN_OPFV_WIDEN_TRANS(vfwcvt_f_f_v)
2547 /* Narrowing Floating-Point/Integer Type-Convert Instructions */
2550 * If the current SEW does not correspond to a supported IEEE floating-point
2551 * type, an illegal instruction exception is raised
2553 static bool opfv_narrow_check(DisasContext *s, arg_rmr *a)
2555 return require_rvv(s) &&
2557 (s->sew != MO_64) &&
2558 vext_check_isa_ill(s) &&
2559 /* OPFV narrowing instructions ignore vs1 check */
2560 vext_check_sd(s, a->rd, a->rs2, a->vm);
2563 #define GEN_OPFV_NARROW_TRANS(NAME) \
2564 static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
2566 if (opfv_narrow_check(s, a)) { \
2567 uint32_t data = 0; \
2568 static gen_helper_gvec_3_ptr * const fns[2] = { \
2569 gen_helper_##NAME##_h, \
2570 gen_helper_##NAME##_w, \
2572 TCGLabel *over = gen_new_label(); \
2574 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
2576 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2577 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2578 tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
2579 vreg_ofs(s, a->rs2), cpu_env, \
2580 s->vlen / 8, s->vlen / 8, data, \
2583 gen_set_label(over); \
2589 GEN_OPFV_NARROW_TRANS(vfncvt_xu_f_v)
2590 GEN_OPFV_NARROW_TRANS(vfncvt_x_f_v)
2591 GEN_OPFV_NARROW_TRANS(vfncvt_f_xu_v)
2592 GEN_OPFV_NARROW_TRANS(vfncvt_f_x_v)
2593 GEN_OPFV_NARROW_TRANS(vfncvt_f_f_v)
2596 *** Vector Reduction Operations
2598 /* Vector Single-Width Integer Reduction Instructions */
2599 static bool reduction_check(DisasContext *s, arg_rmrr *a)
2601 return require_rvv(s) &&
2602 vext_check_isa_ill(s) &&
2603 vext_check_reduction(s, a->rs2);
2606 GEN_OPIVV_TRANS(vredsum_vs, reduction_check)
2607 GEN_OPIVV_TRANS(vredmaxu_vs, reduction_check)
2608 GEN_OPIVV_TRANS(vredmax_vs, reduction_check)
2609 GEN_OPIVV_TRANS(vredminu_vs, reduction_check)
2610 GEN_OPIVV_TRANS(vredmin_vs, reduction_check)
2611 GEN_OPIVV_TRANS(vredand_vs, reduction_check)
2612 GEN_OPIVV_TRANS(vredor_vs, reduction_check)
2613 GEN_OPIVV_TRANS(vredxor_vs, reduction_check)
2615 /* Vector Widening Integer Reduction Instructions */
2616 static bool reduction_widen_check(DisasContext *s, arg_rmrr *a)
2618 return reduction_check(s, a) && (s->sew < MO_64);
2621 GEN_OPIVV_WIDEN_TRANS(vwredsum_vs, reduction_widen_check)
2622 GEN_OPIVV_WIDEN_TRANS(vwredsumu_vs, reduction_widen_check)
2624 /* Vector Single-Width Floating-Point Reduction Instructions */
2625 GEN_OPFVV_TRANS(vfredsum_vs, reduction_check)
2626 GEN_OPFVV_TRANS(vfredmax_vs, reduction_check)
2627 GEN_OPFVV_TRANS(vfredmin_vs, reduction_check)
2629 /* Vector Widening Floating-Point Reduction Instructions */
2630 GEN_OPFVV_WIDEN_TRANS(vfwredsum_vs, reduction_check)
2633 *** Vector Mask Operations
2636 /* Vector Mask-Register Logical Instructions */
2637 #define GEN_MM_TRANS(NAME) \
2638 static bool trans_##NAME(DisasContext *s, arg_r *a) \
2640 if (vext_check_isa_ill(s)) { \
2641 uint32_t data = 0; \
2642 gen_helper_gvec_4_ptr *fn = gen_helper_##NAME; \
2643 TCGLabel *over = gen_new_label(); \
2644 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
2646 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2647 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
2648 vreg_ofs(s, a->rs1), \
2649 vreg_ofs(s, a->rs2), cpu_env, \
2650 s->vlen / 8, s->vlen / 8, data, fn); \
2652 gen_set_label(over); \
2658 GEN_MM_TRANS(vmand_mm)
2659 GEN_MM_TRANS(vmnand_mm)
2660 GEN_MM_TRANS(vmandnot_mm)
2661 GEN_MM_TRANS(vmxor_mm)
2662 GEN_MM_TRANS(vmor_mm)
2663 GEN_MM_TRANS(vmnor_mm)
2664 GEN_MM_TRANS(vmornot_mm)
2665 GEN_MM_TRANS(vmxnor_mm)
2667 /* Vector mask population count vmpopc */
2668 static bool trans_vmpopc_m(DisasContext *s, arg_rmr *a)
2670 if (require_rvv(s) &&
2671 vext_check_isa_ill(s)) {
2672 TCGv_ptr src2, mask;
2676 data = FIELD_DP32(data, VDATA, VM, a->vm);
2677 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
2679 mask = tcg_temp_new_ptr();
2680 src2 = tcg_temp_new_ptr();
2681 dst = dest_gpr(s, a->rd);
2682 desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
2684 tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, a->rs2));
2685 tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
2687 gen_helper_vmpopc_m(dst, mask, src2, cpu_env, desc);
2688 gen_set_gpr(s, a->rd, dst);
2690 tcg_temp_free_ptr(mask);
2691 tcg_temp_free_ptr(src2);
2697 /* vmfirst find-first-set mask bit */
2698 static bool trans_vmfirst_m(DisasContext *s, arg_rmr *a)
2700 if (require_rvv(s) &&
2701 vext_check_isa_ill(s)) {
2702 TCGv_ptr src2, mask;
2706 data = FIELD_DP32(data, VDATA, VM, a->vm);
2707 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
2709 mask = tcg_temp_new_ptr();
2710 src2 = tcg_temp_new_ptr();
2711 dst = dest_gpr(s, a->rd);
2712 desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
2714 tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, a->rs2));
2715 tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
2717 gen_helper_vmfirst_m(dst, mask, src2, cpu_env, desc);
2718 gen_set_gpr(s, a->rd, dst);
2720 tcg_temp_free_ptr(mask);
2721 tcg_temp_free_ptr(src2);
2727 /* vmsbf.m set-before-first mask bit */
2728 /* vmsif.m set-includ-first mask bit */
2729 /* vmsof.m set-only-first mask bit */
2730 #define GEN_M_TRANS(NAME) \
2731 static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
2733 if (vext_check_isa_ill(s)) { \
2734 uint32_t data = 0; \
2735 gen_helper_gvec_3_ptr *fn = gen_helper_##NAME; \
2736 TCGLabel *over = gen_new_label(); \
2737 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
2739 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2740 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2741 tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), \
2742 vreg_ofs(s, 0), vreg_ofs(s, a->rs2), \
2743 cpu_env, s->vlen / 8, s->vlen / 8, \
2746 gen_set_label(over); \
2752 GEN_M_TRANS(vmsbf_m)
2753 GEN_M_TRANS(vmsif_m)
2754 GEN_M_TRANS(vmsof_m)
2756 /* Vector Iota Instruction */
2757 static bool trans_viota_m(DisasContext *s, arg_viota_m *a)
2759 if (require_rvv(s) &&
2760 vext_check_isa_ill(s) &&
2761 require_noover(a->rd, s->lmul, a->rs2, 0) &&
2762 require_vm(a->vm, a->rd) &&
2763 require_align(a->rd, s->lmul)) {
2765 TCGLabel *over = gen_new_label();
2766 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
2768 data = FIELD_DP32(data, VDATA, VM, a->vm);
2769 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
2770 static gen_helper_gvec_3_ptr * const fns[4] = {
2771 gen_helper_viota_m_b, gen_helper_viota_m_h,
2772 gen_helper_viota_m_w, gen_helper_viota_m_d,
2774 tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
2775 vreg_ofs(s, a->rs2), cpu_env,
2776 s->vlen / 8, s->vlen / 8, data, fns[s->sew]);
2778 gen_set_label(over);
2784 /* Vector Element Index Instruction */
2785 static bool trans_vid_v(DisasContext *s, arg_vid_v *a)
2787 if (require_rvv(s) &&
2788 vext_check_isa_ill(s) &&
2789 require_align(a->rd, s->lmul) &&
2790 require_vm(a->vm, a->rd)) {
2792 TCGLabel *over = gen_new_label();
2793 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
2795 data = FIELD_DP32(data, VDATA, VM, a->vm);
2796 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
2797 static gen_helper_gvec_2_ptr * const fns[4] = {
2798 gen_helper_vid_v_b, gen_helper_vid_v_h,
2799 gen_helper_vid_v_w, gen_helper_vid_v_d,
2801 tcg_gen_gvec_2_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
2802 cpu_env, s->vlen / 8, s->vlen / 8,
2805 gen_set_label(over);
2812 *** Vector Permutation Instructions
2815 /* Integer Extract Instruction */
2817 static void load_element(TCGv_i64 dest, TCGv_ptr base,
2822 tcg_gen_ld8u_i64(dest, base, ofs);
2825 tcg_gen_ld16u_i64(dest, base, ofs);
2828 tcg_gen_ld32u_i64(dest, base, ofs);
2831 tcg_gen_ld_i64(dest, base, ofs);
2834 g_assert_not_reached();
2839 /* offset of the idx element with base regsiter r */
2840 static uint32_t endian_ofs(DisasContext *s, int r, int idx)
2842 #ifdef HOST_WORDS_BIGENDIAN
2843 return vreg_ofs(s, r) + ((idx ^ (7 >> s->sew)) << s->sew);
2845 return vreg_ofs(s, r) + (idx << s->sew);
2849 /* adjust the index according to the endian */
2850 static void endian_adjust(TCGv_i32 ofs, int sew)
2852 #ifdef HOST_WORDS_BIGENDIAN
2853 tcg_gen_xori_i32(ofs, ofs, 7 >> sew);
2857 /* Load idx >= VLMAX ? 0 : vreg[idx] */
2858 static void vec_element_loadx(DisasContext *s, TCGv_i64 dest,
2859 int vreg, TCGv idx, int vlmax)
2861 TCGv_i32 ofs = tcg_temp_new_i32();
2862 TCGv_ptr base = tcg_temp_new_ptr();
2863 TCGv_i64 t_idx = tcg_temp_new_i64();
2864 TCGv_i64 t_vlmax, t_zero;
2867 * Mask the index to the length so that we do
2868 * not produce an out-of-range load.
2870 tcg_gen_trunc_tl_i32(ofs, idx);
2871 tcg_gen_andi_i32(ofs, ofs, vlmax - 1);
2873 /* Convert the index to an offset. */
2874 endian_adjust(ofs, s->sew);
2875 tcg_gen_shli_i32(ofs, ofs, s->sew);
2877 /* Convert the index to a pointer. */
2878 tcg_gen_ext_i32_ptr(base, ofs);
2879 tcg_gen_add_ptr(base, base, cpu_env);
2881 /* Perform the load. */
2882 load_element(dest, base,
2883 vreg_ofs(s, vreg), s->sew);
2884 tcg_temp_free_ptr(base);
2885 tcg_temp_free_i32(ofs);
2887 /* Flush out-of-range indexing to zero. */
2888 t_vlmax = tcg_constant_i64(vlmax);
2889 t_zero = tcg_constant_i64(0);
2890 tcg_gen_extu_tl_i64(t_idx, idx);
2892 tcg_gen_movcond_i64(TCG_COND_LTU, dest, t_idx,
2893 t_vlmax, dest, t_zero);
2895 tcg_temp_free_i64(t_idx);
2898 static void vec_element_loadi(DisasContext *s, TCGv_i64 dest,
2901 load_element(dest, cpu_env, endian_ofs(s, vreg, idx), s->sew);
2904 static bool trans_vext_x_v(DisasContext *s, arg_r *a)
2906 TCGv_i64 tmp = tcg_temp_new_i64();
2907 TCGv dest = dest_gpr(s, a->rd);
2910 /* Special case vmv.x.s rd, vs2. */
2911 vec_element_loadi(s, tmp, a->rs2, 0);
2913 /* This instruction ignores LMUL and vector register groups */
2914 int vlmax = s->vlen >> (3 + s->sew);
2915 vec_element_loadx(s, tmp, a->rs2, cpu_gpr[a->rs1], vlmax);
2918 tcg_gen_trunc_i64_tl(dest, tmp);
2919 gen_set_gpr(s, a->rd, dest);
2921 tcg_temp_free_i64(tmp);
2925 /* Integer Scalar Move Instruction */
2927 static void store_element(TCGv_i64 val, TCGv_ptr base,
2932 tcg_gen_st8_i64(val, base, ofs);
2935 tcg_gen_st16_i64(val, base, ofs);
2938 tcg_gen_st32_i64(val, base, ofs);
2941 tcg_gen_st_i64(val, base, ofs);
2944 g_assert_not_reached();
2950 * Store vreg[idx] = val.
2951 * The index must be in range of VLMAX.
2953 static void vec_element_storei(DisasContext *s, int vreg,
2954 int idx, TCGv_i64 val)
2956 store_element(val, cpu_env, endian_ofs(s, vreg, idx), s->sew);
2959 /* vmv.s.x vd, rs1 # vd[0] = rs1 */
2960 static bool trans_vmv_s_x(DisasContext *s, arg_vmv_s_x *a)
2962 if (vext_check_isa_ill(s)) {
2963 /* This instruction ignores LMUL and vector register groups */
2964 int maxsz = s->vlen >> 3;
2966 TCGLabel *over = gen_new_label();
2968 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
2969 tcg_gen_gvec_dup_imm(SEW64, vreg_ofs(s, a->rd), maxsz, maxsz, 0);
2974 t1 = tcg_temp_new_i64();
2975 tcg_gen_extu_tl_i64(t1, cpu_gpr[a->rs1]);
2976 vec_element_storei(s, a->rd, 0, t1);
2977 tcg_temp_free_i64(t1);
2980 gen_set_label(over);
2986 /* Floating-Point Scalar Move Instructions */
2987 static bool trans_vfmv_f_s(DisasContext *s, arg_vfmv_f_s *a)
2989 if (!s->vill && has_ext(s, RVF) &&
2990 (s->mstatus_fs != 0) && (s->sew != 0)) {
2991 unsigned int len = 8 << s->sew;
2993 vec_element_loadi(s, cpu_fpr[a->rd], a->rs2, 0);
2995 tcg_gen_ori_i64(cpu_fpr[a->rd], cpu_fpr[a->rd],
2996 MAKE_64BIT_MASK(len, 64 - len));
3005 /* vfmv.s.f vd, rs1 # vd[0] = rs1 (vs2=0) */
3006 static bool trans_vfmv_s_f(DisasContext *s, arg_vfmv_s_f *a)
3008 if (!s->vill && has_ext(s, RVF) && (s->sew != 0)) {
3010 /* The instructions ignore LMUL and vector register group. */
3011 uint32_t vlmax = s->vlen >> 3;
3013 /* if vl == 0, skip vector register write back */
3014 TCGLabel *over = gen_new_label();
3015 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
3017 /* zeroed all elements */
3018 tcg_gen_gvec_dup_imm(SEW64, vreg_ofs(s, a->rd), vlmax, vlmax, 0);
3020 /* NaN-box f[rs1] as necessary for SEW */
3021 t1 = tcg_temp_new_i64();
3022 if (s->sew == MO_64 && !has_ext(s, RVD)) {
3023 tcg_gen_ori_i64(t1, cpu_fpr[a->rs1], MAKE_64BIT_MASK(32, 32));
3025 tcg_gen_mov_i64(t1, cpu_fpr[a->rs1]);
3027 vec_element_storei(s, a->rd, 0, t1);
3028 tcg_temp_free_i64(t1);
3030 gen_set_label(over);
3036 /* Vector Slide Instructions */
3037 static bool slideup_check(DisasContext *s, arg_rmrr *a)
3039 return require_rvv(s) &&
3040 vext_check_isa_ill(s) &&
3041 vext_check_slide(s, a->rd, a->rs2, a->vm, true);
3044 GEN_OPIVX_TRANS(vslideup_vx, slideup_check)
3045 GEN_OPIVX_TRANS(vslide1up_vx, slideup_check)
3046 GEN_OPIVI_TRANS(vslideup_vi, IMM_ZX, vslideup_vx, slideup_check)
3048 static bool slidedown_check(DisasContext *s, arg_rmrr *a)
3050 return require_rvv(s) &&
3051 vext_check_isa_ill(s) &&
3052 vext_check_slide(s, a->rd, a->rs2, a->vm, false);
3055 GEN_OPIVX_TRANS(vslidedown_vx, slidedown_check)
3056 GEN_OPIVX_TRANS(vslide1down_vx, slidedown_check)
3057 GEN_OPIVI_TRANS(vslidedown_vi, IMM_ZX, vslidedown_vx, slidedown_check)
3059 /* Vector Register Gather Instruction */
3060 static bool vrgather_vv_check(DisasContext *s, arg_rmrr *a)
3062 return require_rvv(s) &&
3063 vext_check_isa_ill(s) &&
3064 require_align(a->rd, s->lmul) &&
3065 require_align(a->rs1, s->lmul) &&
3066 require_align(a->rs2, s->lmul) &&
3067 (a->rd != a->rs2 && a->rd != a->rs1) &&
3068 require_vm(a->vm, a->rd);
3071 GEN_OPIVV_TRANS(vrgather_vv, vrgather_vv_check)
3073 static bool vrgather_vx_check(DisasContext *s, arg_rmrr *a)
3075 return require_rvv(s) &&
3076 vext_check_isa_ill(s) &&
3077 require_align(a->rd, s->lmul) &&
3078 require_align(a->rs2, s->lmul) &&
3079 (a->rd != a->rs2) &&
3080 require_vm(a->vm, a->rd);
3083 /* vrgather.vx vd, vs2, rs1, vm # vd[i] = (x[rs1] >= VLMAX) ? 0 : vs2[rs1] */
3084 static bool trans_vrgather_vx(DisasContext *s, arg_rmrr *a)
3086 if (!vrgather_vx_check(s, a)) {
3090 if (a->vm && s->vl_eq_vlmax) {
3091 int vlmax = s->vlen;
3092 TCGv_i64 dest = tcg_temp_new_i64();
3095 vec_element_loadi(s, dest, a->rs2, 0);
3097 vec_element_loadx(s, dest, a->rs2, cpu_gpr[a->rs1], vlmax);
3100 tcg_gen_gvec_dup_i64(s->sew, vreg_ofs(s, a->rd),
3101 MAXSZ(s), MAXSZ(s), dest);
3102 tcg_temp_free_i64(dest);
3105 static gen_helper_opivx * const fns[4] = {
3106 gen_helper_vrgather_vx_b, gen_helper_vrgather_vx_h,
3107 gen_helper_vrgather_vx_w, gen_helper_vrgather_vx_d
3109 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s);
3114 /* vrgather.vi vd, vs2, imm, vm # vd[i] = (imm >= VLMAX) ? 0 : vs2[imm] */
3115 static bool trans_vrgather_vi(DisasContext *s, arg_rmrr *a)
3117 if (!vrgather_vx_check(s, a)) {
3121 if (a->vm && s->vl_eq_vlmax) {
3122 if (a->rs1 >= s->vlen) {
3123 tcg_gen_gvec_dup_imm(SEW64, vreg_ofs(s, a->rd),
3124 MAXSZ(s), MAXSZ(s), 0);
3126 tcg_gen_gvec_dup_mem(s->sew, vreg_ofs(s, a->rd),
3127 endian_ofs(s, a->rs2, a->rs1),
3128 MAXSZ(s), MAXSZ(s));
3132 static gen_helper_opivx * const fns[4] = {
3133 gen_helper_vrgather_vx_b, gen_helper_vrgather_vx_h,
3134 gen_helper_vrgather_vx_w, gen_helper_vrgather_vx_d
3136 return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew],
3143 * Vector Compress Instruction
3145 * The destination vector register group cannot overlap the
3146 * source vector register group or the source mask register.
3148 static bool vcompress_vm_check(DisasContext *s, arg_r *a)
3150 return require_rvv(s) &&
3151 vext_check_isa_ill(s) &&
3152 require_align(a->rd, s->lmul) &&
3153 require_align(a->rs2, s->lmul) &&
3154 (a->rd != a->rs2) &&
3155 !is_overlapped(a->rd, 1 << MAX(s->lmul, 0), a->rs1, 1);
3158 static bool trans_vcompress_vm(DisasContext *s, arg_r *a)
3160 if (vcompress_vm_check(s, a)) {
3162 static gen_helper_gvec_4_ptr * const fns[4] = {
3163 gen_helper_vcompress_vm_b, gen_helper_vcompress_vm_h,
3164 gen_helper_vcompress_vm_w, gen_helper_vcompress_vm_d,
3166 TCGLabel *over = gen_new_label();
3167 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
3169 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
3170 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
3171 vreg_ofs(s, a->rs1), vreg_ofs(s, a->rs2),
3172 cpu_env, s->vlen / 8, s->vlen / 8, data,
3175 gen_set_label(over);