3 * Copyright (c) 2020 T-Head Semiconductor Co., Ltd. All rights reserved.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2 or later, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
17 #include "tcg/tcg-op-gvec.h"
18 #include "tcg/tcg-gvec-desc.h"
19 #include "internals.h"
21 static inline bool is_overlapped(const int8_t astart, int8_t asize,
22 const int8_t bstart, int8_t bsize)
24 const int8_t aend = astart + asize;
25 const int8_t bend = bstart + bsize;
27 return MAX(aend, bend) - MIN(astart, bstart) < asize + bsize;
30 static bool require_rvv(DisasContext *s)
32 return s->mstatus_vs != 0;
35 static bool require_rvf(DisasContext *s)
37 if (s->mstatus_fs == 0) {
44 return has_ext(s, RVF);
46 return has_ext(s, RVD);
52 static bool require_scale_rvf(DisasContext *s)
54 if (s->mstatus_fs == 0) {
61 return has_ext(s, RVF);
63 return has_ext(s, RVD);
69 /* Destination vector register group cannot overlap source mask register. */
70 static bool require_vm(int vm, int vd)
72 return (vm != 0 || vd != 0);
75 static bool require_nf(int vd, int nf, int lmul)
77 int size = nf << MAX(lmul, 0);
78 return size <= 8 && vd + size <= 32;
82 * Vector register should aligned with the passed-in LMUL (EMUL).
83 * If LMUL < 0, i.e. fractional LMUL, any vector register is allowed.
85 static bool require_align(const int8_t val, const int8_t lmul)
87 return lmul <= 0 || extract32(val, 0, lmul) == 0;
91 * A destination vector register group can overlap a source vector
92 * register group only if one of the following holds:
93 * 1. The destination EEW equals the source EEW.
94 * 2. The destination EEW is smaller than the source EEW and the overlap
95 * is in the lowest-numbered part of the source register group.
96 * 3. The destination EEW is greater than the source EEW, the source EMUL
97 * is at least 1, and the overlap is in the highest-numbered part of
98 * the destination register group.
101 * This function returns true if one of the following holds:
102 * * Destination vector register group does not overlap a source vector
105 * For rule 1, overlap is allowed so this function doesn't need to be called.
106 * For rule 2, (vd == vs). Caller has to check whether: (vd != vs) before
107 * calling this function.
109 static bool require_noover(const int8_t dst, const int8_t dst_lmul,
110 const int8_t src, const int8_t src_lmul)
112 int8_t dst_size = dst_lmul <= 0 ? 1 : 1 << dst_lmul;
113 int8_t src_size = src_lmul <= 0 ? 1 : 1 << src_lmul;
115 /* Destination EEW is greater than the source EEW, check rule 3. */
116 if (dst_size > src_size) {
119 is_overlapped(dst, dst_size, src, src_size) &&
120 !is_overlapped(dst, dst_size, src + src_size, src_size)) {
125 return !is_overlapped(dst, dst_size, src, src_size);
128 static bool do_vsetvl(DisasContext *s, int rd, int rs1, TCGv s2)
132 if (!require_rvv(s) || !has_ext(s, RVV)) {
136 dst = dest_gpr(s, rd);
138 if (rd == 0 && rs1 == 0) {
140 tcg_gen_mov_tl(s1, cpu_vl);
141 } else if (rs1 == 0) {
142 /* As the mask is at least one bit, RV_VLEN_MAX is >= VLMAX */
143 s1 = tcg_constant_tl(RV_VLEN_MAX);
145 s1 = get_gpr(s, rs1, EXT_ZERO);
148 gen_helper_vsetvl(dst, cpu_env, s1, s2);
149 gen_set_gpr(s, rd, dst);
152 tcg_gen_movi_tl(cpu_pc, s->pc_succ_insn);
153 tcg_gen_lookup_and_goto_ptr();
154 s->base.is_jmp = DISAS_NORETURN;
156 if (rd == 0 && rs1 == 0) {
163 static bool trans_vsetvl(DisasContext *s, arg_vsetvl *a)
165 TCGv s2 = get_gpr(s, a->rs2, EXT_ZERO);
166 return do_vsetvl(s, a->rd, a->rs1, s2);
169 static bool trans_vsetvli(DisasContext *s, arg_vsetvli *a)
171 TCGv s2 = tcg_constant_tl(a->zimm);
172 return do_vsetvl(s, a->rd, a->rs1, s2);
175 /* vector register offset from env */
176 static uint32_t vreg_ofs(DisasContext *s, int reg)
178 return offsetof(CPURISCVState, vreg) + reg * s->vlen / 8;
181 /* check functions */
184 * Vector unit-stride, strided, unit-stride segment, strided segment
185 * store check function.
187 * Rules to be checked here:
188 * 1. EMUL must within the range: 1/8 <= EMUL <= 8. (Section 7.3)
189 * 2. Destination vector register number is multiples of EMUL.
190 * (Section 3.4.2, 7.3)
191 * 3. The EMUL setting must be such that EMUL * NFIELDS ≤ 8. (Section 7.8)
192 * 4. Vector register numbers accessed by the segment load or store
193 * cannot increment past 31. (Section 7.8)
195 static bool vext_check_store(DisasContext *s, int vd, int nf, uint8_t eew)
197 int8_t emul = eew - s->sew + s->lmul;
198 return (emul >= -3 && emul <= 3) &&
199 require_align(vd, emul) &&
200 require_nf(vd, nf, emul);
204 * Vector unit-stride, strided, unit-stride segment, strided segment
205 * load check function.
207 * Rules to be checked here:
208 * 1. All rules applies to store instructions are applies
209 * to load instructions.
210 * 2. Destination vector register group for a masked vector
211 * instruction cannot overlap the source mask register (v0).
214 static bool vext_check_load(DisasContext *s, int vd, int nf, int vm,
217 return vext_check_store(s, vd, nf, eew) && require_vm(vm, vd);
221 * Vector indexed, indexed segment store check function.
223 * Rules to be checked here:
224 * 1. EMUL must within the range: 1/8 <= EMUL <= 8. (Section 7.3)
225 * 2. Index vector register number is multiples of EMUL.
226 * (Section 3.4.2, 7.3)
227 * 3. Destination vector register number is multiples of LMUL.
228 * (Section 3.4.2, 7.3)
229 * 4. The EMUL setting must be such that EMUL * NFIELDS ≤ 8. (Section 7.8)
230 * 5. Vector register numbers accessed by the segment load or store
231 * cannot increment past 31. (Section 7.8)
233 static bool vext_check_st_index(DisasContext *s, int vd, int vs2, int nf,
236 int8_t emul = eew - s->sew + s->lmul;
237 return (emul >= -3 && emul <= 3) &&
238 require_align(vs2, emul) &&
239 require_align(vd, s->lmul) &&
240 require_nf(vd, nf, s->lmul);
244 * Vector indexed, indexed segment load check function.
246 * Rules to be checked here:
247 * 1. All rules applies to store instructions are applies
248 * to load instructions.
249 * 2. Destination vector register group for a masked vector
250 * instruction cannot overlap the source mask register (v0).
252 * 3. Destination vector register cannot overlap a source vector
253 * register (vs2) group.
255 * 4. Destination vector register groups cannot overlap
256 * the source vector register (vs2) group for
257 * indexed segment load instructions. (Section 7.8.3)
259 static bool vext_check_ld_index(DisasContext *s, int vd, int vs2,
260 int nf, int vm, uint8_t eew)
263 int8_t emul = eew - s->sew + s->lmul;
264 bool ret = vext_check_st_index(s, vd, vs2, nf, eew) &&
267 /* Each segment register group has to follow overlap rules. */
268 for (int i = 0; i < nf; ++i) {
269 seg_vd = vd + (1 << MAX(s->lmul, 0)) * i;
273 ret &= require_noover(seg_vd, s->lmul, vs2, emul);
275 } else if (eew < s->sew) {
276 ret &= require_noover(seg_vd, s->lmul, vs2, emul);
280 * Destination vector register groups cannot overlap
281 * the source vector register (vs2) group for
282 * indexed segment load instructions.
285 ret &= !is_overlapped(seg_vd, 1 << MAX(s->lmul, 0),
286 vs2, 1 << MAX(emul, 0));
292 static bool vext_check_ss(DisasContext *s, int vd, int vs, int vm)
294 return require_vm(vm, vd) &&
295 require_align(vd, s->lmul) &&
296 require_align(vs, s->lmul);
300 * Check function for vector instruction with format:
301 * single-width result and single-width sources (SEW = SEW op SEW)
303 * Rules to be checked here:
304 * 1. Destination vector register group for a masked vector
305 * instruction cannot overlap the source mask register (v0).
307 * 2. Destination vector register number is multiples of LMUL.
309 * 3. Source (vs2, vs1) vector register number are multiples of LMUL.
312 static bool vext_check_sss(DisasContext *s, int vd, int vs1, int vs2, int vm)
314 return vext_check_ss(s, vd, vs2, vm) &&
315 require_align(vs1, s->lmul);
318 static bool vext_check_ms(DisasContext *s, int vd, int vs)
320 bool ret = require_align(vs, s->lmul);
322 ret &= require_noover(vd, 0, vs, s->lmul);
328 * Check function for maskable vector instruction with format:
329 * single-width result and single-width sources (SEW = SEW op SEW)
331 * Rules to be checked here:
332 * 1. Source (vs2, vs1) vector register number are multiples of LMUL.
334 * 2. Destination vector register cannot overlap a source vector
335 * register (vs2, vs1) group.
337 * 3. The destination vector register group for a masked vector
338 * instruction cannot overlap the source mask register (v0),
339 * unless the destination vector register is being written
340 * with a mask value (e.g., comparisons) or the scalar result
341 * of a reduction. (Section 5.3)
343 static bool vext_check_mss(DisasContext *s, int vd, int vs1, int vs2)
345 bool ret = vext_check_ms(s, vd, vs2) &&
346 require_align(vs1, s->lmul);
348 ret &= require_noover(vd, 0, vs1, s->lmul);
354 * Common check function for vector widening instructions
355 * of double-width result (2*SEW).
357 * Rules to be checked here:
358 * 1. The largest vector register group used by an instruction
359 * can not be greater than 8 vector registers (Section 5.2):
362 * 2. Destination vector register number is multiples of 2 * LMUL.
364 * 3. Destination vector register group for a masked vector
365 * instruction cannot overlap the source mask register (v0).
368 static bool vext_wide_check_common(DisasContext *s, int vd, int vm)
370 return (s->lmul <= 2) &&
372 require_align(vd, s->lmul + 1) &&
377 * Common check function for vector narrowing instructions
378 * of single-width result (SEW) and double-width source (2*SEW).
380 * Rules to be checked here:
381 * 1. The largest vector register group used by an instruction
382 * can not be greater than 8 vector registers (Section 5.2):
385 * 2. Source vector register number is multiples of 2 * LMUL.
387 * 3. Destination vector register number is multiples of LMUL.
389 * 4. Destination vector register group for a masked vector
390 * instruction cannot overlap the source mask register (v0).
393 static bool vext_narrow_check_common(DisasContext *s, int vd, int vs2,
396 return (s->lmul <= 2) &&
398 require_align(vs2, s->lmul + 1) &&
399 require_align(vd, s->lmul) &&
403 static bool vext_check_ds(DisasContext *s, int vd, int vs, int vm)
405 return vext_wide_check_common(s, vd, vm) &&
406 require_align(vs, s->lmul) &&
407 require_noover(vd, s->lmul + 1, vs, s->lmul);
410 static bool vext_check_dd(DisasContext *s, int vd, int vs, int vm)
412 return vext_wide_check_common(s, vd, vm) &&
413 require_align(vs, s->lmul + 1);
417 * Check function for vector instruction with format:
418 * double-width result and single-width sources (2*SEW = SEW op SEW)
420 * Rules to be checked here:
421 * 1. All rules in defined in widen common rules are applied.
422 * 2. Source (vs2, vs1) vector register number are multiples of LMUL.
424 * 3. Destination vector register cannot overlap a source vector
425 * register (vs2, vs1) group.
428 static bool vext_check_dss(DisasContext *s, int vd, int vs1, int vs2, int vm)
430 return vext_check_ds(s, vd, vs2, vm) &&
431 require_align(vs1, s->lmul) &&
432 require_noover(vd, s->lmul + 1, vs1, s->lmul);
436 * Check function for vector instruction with format:
437 * double-width result and double-width source1 and single-width
438 * source2 (2*SEW = 2*SEW op SEW)
440 * Rules to be checked here:
441 * 1. All rules in defined in widen common rules are applied.
442 * 2. Source 1 (vs2) vector register number is multiples of 2 * LMUL.
444 * 3. Source 2 (vs1) vector register number is multiples of LMUL.
446 * 4. Destination vector register cannot overlap a source vector
447 * register (vs1) group.
450 static bool vext_check_dds(DisasContext *s, int vd, int vs1, int vs2, int vm)
452 return vext_check_ds(s, vd, vs1, vm) &&
453 require_align(vs2, s->lmul + 1);
456 static bool vext_check_sd(DisasContext *s, int vd, int vs, int vm)
458 bool ret = vext_narrow_check_common(s, vd, vs, vm);
460 ret &= require_noover(vd, s->lmul, vs, s->lmul + 1);
466 * Check function for vector instruction with format:
467 * single-width result and double-width source 1 and single-width
468 * source 2 (SEW = 2*SEW op SEW)
470 * Rules to be checked here:
471 * 1. All rules in defined in narrow common rules are applied.
472 * 2. Destination vector register cannot overlap a source vector
473 * register (vs2) group.
475 * 3. Source 2 (vs1) vector register number is multiples of LMUL.
478 static bool vext_check_sds(DisasContext *s, int vd, int vs1, int vs2, int vm)
480 return vext_check_sd(s, vd, vs2, vm) &&
481 require_align(vs1, s->lmul);
485 * Check function for vector reduction instructions.
487 * Rules to be checked here:
488 * 1. Source 1 (vs2) vector register number is multiples of LMUL.
491 static bool vext_check_reduction(DisasContext *s, int vs2)
493 return require_align(vs2, s->lmul);
497 * Check function for vector slide instructions.
499 * Rules to be checked here:
500 * 1. Source 1 (vs2) vector register number is multiples of LMUL.
502 * 2. Destination vector register number is multiples of LMUL.
504 * 3. Destination vector register group for a masked vector
505 * instruction cannot overlap the source mask register (v0).
507 * 4. The destination vector register group for vslideup, vslide1up,
508 * vfslide1up, cannot overlap the source vector register (vs2) group.
509 * (Section 5.2, 16.3.1, 16.3.3)
511 static bool vext_check_slide(DisasContext *s, int vd, int vs2,
512 int vm, bool is_over)
514 bool ret = require_align(vs2, s->lmul) &&
515 require_align(vd, s->lmul) &&
524 * In cpu_get_tb_cpu_state(), set VILL if RVV was not present.
525 * So RVV is also be checked in this function.
527 static bool vext_check_isa_ill(DisasContext *s)
532 /* common translation macro */
533 #define GEN_VEXT_TRANS(NAME, EEW, ARGTYPE, OP, CHECK) \
534 static bool trans_##NAME(DisasContext *s, arg_##ARGTYPE * a) \
536 if (CHECK(s, a, EEW)) { \
537 return OP(s, a, EEW); \
542 static uint8_t vext_get_emul(DisasContext *s, uint8_t eew)
544 int8_t emul = eew - s->sew + s->lmul;
545 return emul < 0 ? 0 : emul;
549 *** unit stride load and store
551 typedef void gen_helper_ldst_us(TCGv_ptr, TCGv_ptr, TCGv,
554 static bool ldst_us_trans(uint32_t vd, uint32_t rs1, uint32_t data,
555 gen_helper_ldst_us *fn, DisasContext *s,
562 TCGLabel *over = gen_new_label();
563 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
565 dest = tcg_temp_new_ptr();
566 mask = tcg_temp_new_ptr();
567 base = get_gpr(s, rs1, EXT_NONE);
570 * As simd_desc supports at most 2048 bytes, and in this implementation,
571 * the max vector group length is 4096 bytes. So split it into two parts.
573 * The first part is vlen in bytes, encoded in maxsz of simd_desc.
574 * The second part is lmul, encoded in data of simd_desc.
576 desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
578 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
579 tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
581 fn(dest, mask, base, cpu_env, desc);
583 tcg_temp_free_ptr(dest);
584 tcg_temp_free_ptr(mask);
594 static bool ld_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t eew)
597 gen_helper_ldst_us *fn;
598 static gen_helper_ldst_us * const fns[2][4] = {
599 /* masked unit stride load */
600 { gen_helper_vle8_v_mask, gen_helper_vle16_v_mask,
601 gen_helper_vle32_v_mask, gen_helper_vle64_v_mask },
602 /* unmasked unit stride load */
603 { gen_helper_vle8_v, gen_helper_vle16_v,
604 gen_helper_vle32_v, gen_helper_vle64_v }
607 fn = fns[a->vm][eew];
613 * Vector load/store instructions have the EEW encoded
614 * directly in the instructions. The maximum vector size is
615 * calculated with EMUL rather than LMUL.
617 uint8_t emul = vext_get_emul(s, eew);
618 data = FIELD_DP32(data, VDATA, VM, a->vm);
619 data = FIELD_DP32(data, VDATA, LMUL, emul);
620 data = FIELD_DP32(data, VDATA, NF, a->nf);
621 return ldst_us_trans(a->rd, a->rs1, data, fn, s, false);
624 static bool ld_us_check(DisasContext *s, arg_r2nfvm* a, uint8_t eew)
626 return require_rvv(s) &&
627 vext_check_isa_ill(s) &&
628 vext_check_load(s, a->rd, a->nf, a->vm, eew);
631 GEN_VEXT_TRANS(vle8_v, MO_8, r2nfvm, ld_us_op, ld_us_check)
632 GEN_VEXT_TRANS(vle16_v, MO_16, r2nfvm, ld_us_op, ld_us_check)
633 GEN_VEXT_TRANS(vle32_v, MO_32, r2nfvm, ld_us_op, ld_us_check)
634 GEN_VEXT_TRANS(vle64_v, MO_64, r2nfvm, ld_us_op, ld_us_check)
636 static bool st_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t eew)
639 gen_helper_ldst_us *fn;
640 static gen_helper_ldst_us * const fns[2][4] = {
641 /* masked unit stride store */
642 { gen_helper_vse8_v_mask, gen_helper_vse16_v_mask,
643 gen_helper_vse32_v_mask, gen_helper_vse64_v_mask },
644 /* unmasked unit stride store */
645 { gen_helper_vse8_v, gen_helper_vse16_v,
646 gen_helper_vse32_v, gen_helper_vse64_v }
649 fn = fns[a->vm][eew];
654 uint8_t emul = vext_get_emul(s, eew);
655 data = FIELD_DP32(data, VDATA, VM, a->vm);
656 data = FIELD_DP32(data, VDATA, LMUL, emul);
657 data = FIELD_DP32(data, VDATA, NF, a->nf);
658 return ldst_us_trans(a->rd, a->rs1, data, fn, s, true);
661 static bool st_us_check(DisasContext *s, arg_r2nfvm* a, uint8_t eew)
663 return require_rvv(s) &&
664 vext_check_isa_ill(s) &&
665 vext_check_store(s, a->rd, a->nf, eew);
668 GEN_VEXT_TRANS(vse8_v, MO_8, r2nfvm, st_us_op, st_us_check)
669 GEN_VEXT_TRANS(vse16_v, MO_16, r2nfvm, st_us_op, st_us_check)
670 GEN_VEXT_TRANS(vse32_v, MO_32, r2nfvm, st_us_op, st_us_check)
671 GEN_VEXT_TRANS(vse64_v, MO_64, r2nfvm, st_us_op, st_us_check)
674 *** stride load and store
676 typedef void gen_helper_ldst_stride(TCGv_ptr, TCGv_ptr, TCGv,
677 TCGv, TCGv_env, TCGv_i32);
679 static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2,
680 uint32_t data, gen_helper_ldst_stride *fn,
681 DisasContext *s, bool is_store)
687 TCGLabel *over = gen_new_label();
688 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
690 dest = tcg_temp_new_ptr();
691 mask = tcg_temp_new_ptr();
692 base = get_gpr(s, rs1, EXT_NONE);
693 stride = get_gpr(s, rs2, EXT_NONE);
694 desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
696 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
697 tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
699 fn(dest, mask, base, stride, cpu_env, desc);
701 tcg_temp_free_ptr(dest);
702 tcg_temp_free_ptr(mask);
712 static bool ld_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
715 gen_helper_ldst_stride *fn;
716 static gen_helper_ldst_stride * const fns[4] = {
717 gen_helper_vlse8_v, gen_helper_vlse16_v,
718 gen_helper_vlse32_v, gen_helper_vlse64_v
726 uint8_t emul = vext_get_emul(s, eew);
727 data = FIELD_DP32(data, VDATA, VM, a->vm);
728 data = FIELD_DP32(data, VDATA, LMUL, emul);
729 data = FIELD_DP32(data, VDATA, NF, a->nf);
730 return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s, false);
733 static bool ld_stride_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
735 return require_rvv(s) &&
736 vext_check_isa_ill(s) &&
737 vext_check_load(s, a->rd, a->nf, a->vm, eew);
740 GEN_VEXT_TRANS(vlse8_v, MO_8, rnfvm, ld_stride_op, ld_stride_check)
741 GEN_VEXT_TRANS(vlse16_v, MO_16, rnfvm, ld_stride_op, ld_stride_check)
742 GEN_VEXT_TRANS(vlse32_v, MO_32, rnfvm, ld_stride_op, ld_stride_check)
743 GEN_VEXT_TRANS(vlse64_v, MO_64, rnfvm, ld_stride_op, ld_stride_check)
745 static bool st_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
748 gen_helper_ldst_stride *fn;
749 static gen_helper_ldst_stride * const fns[4] = {
750 /* masked stride store */
751 gen_helper_vsse8_v, gen_helper_vsse16_v,
752 gen_helper_vsse32_v, gen_helper_vsse64_v
755 uint8_t emul = vext_get_emul(s, eew);
756 data = FIELD_DP32(data, VDATA, VM, a->vm);
757 data = FIELD_DP32(data, VDATA, LMUL, emul);
758 data = FIELD_DP32(data, VDATA, NF, a->nf);
764 return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s, true);
767 static bool st_stride_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
769 return require_rvv(s) &&
770 vext_check_isa_ill(s) &&
771 vext_check_store(s, a->rd, a->nf, eew);
774 GEN_VEXT_TRANS(vsse8_v, MO_8, rnfvm, st_stride_op, st_stride_check)
775 GEN_VEXT_TRANS(vsse16_v, MO_16, rnfvm, st_stride_op, st_stride_check)
776 GEN_VEXT_TRANS(vsse32_v, MO_32, rnfvm, st_stride_op, st_stride_check)
777 GEN_VEXT_TRANS(vsse64_v, MO_64, rnfvm, st_stride_op, st_stride_check)
780 *** index load and store
782 typedef void gen_helper_ldst_index(TCGv_ptr, TCGv_ptr, TCGv,
783 TCGv_ptr, TCGv_env, TCGv_i32);
785 static bool ldst_index_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
786 uint32_t data, gen_helper_ldst_index *fn,
787 DisasContext *s, bool is_store)
789 TCGv_ptr dest, mask, index;
793 TCGLabel *over = gen_new_label();
794 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
796 dest = tcg_temp_new_ptr();
797 mask = tcg_temp_new_ptr();
798 index = tcg_temp_new_ptr();
799 base = get_gpr(s, rs1, EXT_NONE);
800 desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
802 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
803 tcg_gen_addi_ptr(index, cpu_env, vreg_ofs(s, vs2));
804 tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
806 fn(dest, mask, base, index, cpu_env, desc);
808 tcg_temp_free_ptr(dest);
809 tcg_temp_free_ptr(mask);
810 tcg_temp_free_ptr(index);
820 static bool ld_index_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
823 gen_helper_ldst_index *fn;
824 static gen_helper_ldst_index * const fns[4][4] = {
826 * offset vector register group EEW = 8,
827 * data vector register group EEW = SEW
829 { gen_helper_vlxei8_8_v, gen_helper_vlxei8_16_v,
830 gen_helper_vlxei8_32_v, gen_helper_vlxei8_64_v },
832 * offset vector register group EEW = 16,
833 * data vector register group EEW = SEW
835 { gen_helper_vlxei16_8_v, gen_helper_vlxei16_16_v,
836 gen_helper_vlxei16_32_v, gen_helper_vlxei16_64_v },
838 * offset vector register group EEW = 32,
839 * data vector register group EEW = SEW
841 { gen_helper_vlxei32_8_v, gen_helper_vlxei32_16_v,
842 gen_helper_vlxei32_32_v, gen_helper_vlxei32_64_v },
844 * offset vector register group EEW = 64,
845 * data vector register group EEW = SEW
847 { gen_helper_vlxei64_8_v, gen_helper_vlxei64_16_v,
848 gen_helper_vlxei64_32_v, gen_helper_vlxei64_64_v }
851 fn = fns[eew][s->sew];
853 uint8_t emul = vext_get_emul(s, s->sew);
854 data = FIELD_DP32(data, VDATA, VM, a->vm);
855 data = FIELD_DP32(data, VDATA, LMUL, emul);
856 data = FIELD_DP32(data, VDATA, NF, a->nf);
857 return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s, false);
860 static bool ld_index_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
862 return require_rvv(s) &&
863 vext_check_isa_ill(s) &&
864 vext_check_ld_index(s, a->rd, a->rs2, a->nf, a->vm, eew);
867 GEN_VEXT_TRANS(vlxei8_v, MO_8, rnfvm, ld_index_op, ld_index_check)
868 GEN_VEXT_TRANS(vlxei16_v, MO_16, rnfvm, ld_index_op, ld_index_check)
869 GEN_VEXT_TRANS(vlxei32_v, MO_32, rnfvm, ld_index_op, ld_index_check)
870 GEN_VEXT_TRANS(vlxei64_v, MO_64, rnfvm, ld_index_op, ld_index_check)
872 static bool st_index_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
875 gen_helper_ldst_index *fn;
876 static gen_helper_ldst_index * const fns[4][4] = {
878 * offset vector register group EEW = 8,
879 * data vector register group EEW = SEW
881 { gen_helper_vsxei8_8_v, gen_helper_vsxei8_16_v,
882 gen_helper_vsxei8_32_v, gen_helper_vsxei8_64_v },
884 * offset vector register group EEW = 16,
885 * data vector register group EEW = SEW
887 { gen_helper_vsxei16_8_v, gen_helper_vsxei16_16_v,
888 gen_helper_vsxei16_32_v, gen_helper_vsxei16_64_v },
890 * offset vector register group EEW = 32,
891 * data vector register group EEW = SEW
893 { gen_helper_vsxei32_8_v, gen_helper_vsxei32_16_v,
894 gen_helper_vsxei32_32_v, gen_helper_vsxei32_64_v },
896 * offset vector register group EEW = 64,
897 * data vector register group EEW = SEW
899 { gen_helper_vsxei64_8_v, gen_helper_vsxei64_16_v,
900 gen_helper_vsxei64_32_v, gen_helper_vsxei64_64_v }
903 fn = fns[eew][s->sew];
905 uint8_t emul = vext_get_emul(s, s->sew);
906 data = FIELD_DP32(data, VDATA, VM, a->vm);
907 data = FIELD_DP32(data, VDATA, LMUL, emul);
908 data = FIELD_DP32(data, VDATA, NF, a->nf);
909 return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s, true);
912 static bool st_index_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
914 return require_rvv(s) &&
915 vext_check_isa_ill(s) &&
916 vext_check_st_index(s, a->rd, a->rs2, a->nf, eew);
919 GEN_VEXT_TRANS(vsxei8_v, MO_8, rnfvm, st_index_op, st_index_check)
920 GEN_VEXT_TRANS(vsxei16_v, MO_16, rnfvm, st_index_op, st_index_check)
921 GEN_VEXT_TRANS(vsxei32_v, MO_32, rnfvm, st_index_op, st_index_check)
922 GEN_VEXT_TRANS(vsxei64_v, MO_64, rnfvm, st_index_op, st_index_check)
925 *** unit stride fault-only-first load
927 static bool ldff_trans(uint32_t vd, uint32_t rs1, uint32_t data,
928 gen_helper_ldst_us *fn, DisasContext *s)
934 TCGLabel *over = gen_new_label();
935 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
937 dest = tcg_temp_new_ptr();
938 mask = tcg_temp_new_ptr();
939 base = get_gpr(s, rs1, EXT_NONE);
940 desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
942 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
943 tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
945 fn(dest, mask, base, cpu_env, desc);
947 tcg_temp_free_ptr(dest);
948 tcg_temp_free_ptr(mask);
954 static bool ldff_op(DisasContext *s, arg_r2nfvm *a, uint8_t eew)
957 gen_helper_ldst_us *fn;
958 static gen_helper_ldst_us * const fns[4] = {
959 gen_helper_vle8ff_v, gen_helper_vle16ff_v,
960 gen_helper_vle32ff_v, gen_helper_vle64ff_v
968 uint8_t emul = vext_get_emul(s, eew);
969 data = FIELD_DP32(data, VDATA, VM, a->vm);
970 data = FIELD_DP32(data, VDATA, LMUL, emul);
971 data = FIELD_DP32(data, VDATA, NF, a->nf);
972 return ldff_trans(a->rd, a->rs1, data, fn, s);
975 GEN_VEXT_TRANS(vle8ff_v, MO_8, r2nfvm, ldff_op, ld_us_check)
976 GEN_VEXT_TRANS(vle16ff_v, MO_16, r2nfvm, ldff_op, ld_us_check)
977 GEN_VEXT_TRANS(vle32ff_v, MO_32, r2nfvm, ldff_op, ld_us_check)
978 GEN_VEXT_TRANS(vle64ff_v, MO_64, r2nfvm, ldff_op, ld_us_check)
981 * load and store whole register instructions
983 typedef void gen_helper_ldst_whole(TCGv_ptr, TCGv, TCGv_env, TCGv_i32);
985 static bool ldst_whole_trans(uint32_t vd, uint32_t rs1, uint32_t nf,
986 gen_helper_ldst_whole *fn, DisasContext *s,
993 uint32_t data = FIELD_DP32(0, VDATA, NF, nf);
994 dest = tcg_temp_new_ptr();
995 desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
997 base = get_gpr(s, rs1, EXT_NONE);
998 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
1000 fn(dest, base, cpu_env, desc);
1002 tcg_temp_free_ptr(dest);
1012 * load and store whole register instructions ignore vtype and vl setting.
1013 * Thus, we don't need to check vill bit. (Section 7.9)
1015 #define GEN_LDST_WHOLE_TRANS(NAME, ARG_NF, IS_STORE) \
1016 static bool trans_##NAME(DisasContext *s, arg_##NAME * a) \
1018 if (require_rvv(s) && \
1019 QEMU_IS_ALIGNED(a->rd, ARG_NF)) { \
1020 return ldst_whole_trans(a->rd, a->rs1, ARG_NF, gen_helper_##NAME, \
1026 GEN_LDST_WHOLE_TRANS(vl1re8_v, 1, false)
1027 GEN_LDST_WHOLE_TRANS(vl1re16_v, 1, false)
1028 GEN_LDST_WHOLE_TRANS(vl1re32_v, 1, false)
1029 GEN_LDST_WHOLE_TRANS(vl1re64_v, 1, false)
1030 GEN_LDST_WHOLE_TRANS(vl2re8_v, 2, false)
1031 GEN_LDST_WHOLE_TRANS(vl2re16_v, 2, false)
1032 GEN_LDST_WHOLE_TRANS(vl2re32_v, 2, false)
1033 GEN_LDST_WHOLE_TRANS(vl2re64_v, 2, false)
1034 GEN_LDST_WHOLE_TRANS(vl4re8_v, 4, false)
1035 GEN_LDST_WHOLE_TRANS(vl4re16_v, 4, false)
1036 GEN_LDST_WHOLE_TRANS(vl4re32_v, 4, false)
1037 GEN_LDST_WHOLE_TRANS(vl4re64_v, 4, false)
1038 GEN_LDST_WHOLE_TRANS(vl8re8_v, 8, false)
1039 GEN_LDST_WHOLE_TRANS(vl8re16_v, 8, false)
1040 GEN_LDST_WHOLE_TRANS(vl8re32_v, 8, false)
1041 GEN_LDST_WHOLE_TRANS(vl8re64_v, 8, false)
1043 GEN_LDST_WHOLE_TRANS(vs1r_v, 1, true)
1044 GEN_LDST_WHOLE_TRANS(vs2r_v, 2, true)
1045 GEN_LDST_WHOLE_TRANS(vs4r_v, 4, true)
1046 GEN_LDST_WHOLE_TRANS(vs8r_v, 8, true)
1049 *** Vector Integer Arithmetic Instructions
1053 * MAXSZ returns the maximum vector size can be operated in bytes,
1054 * which is used in GVEC IR when vl_eq_vlmax flag is set to true
1055 * to accerlate vector operation.
1057 static inline uint32_t MAXSZ(DisasContext *s)
1059 int scale = s->lmul - 3;
1060 return scale < 0 ? s->vlen >> -scale : s->vlen << scale;
1063 static bool opivv_check(DisasContext *s, arg_rmrr *a)
1065 return require_rvv(s) &&
1066 vext_check_isa_ill(s) &&
1067 vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm);
1070 typedef void GVecGen3Fn(unsigned, uint32_t, uint32_t,
1071 uint32_t, uint32_t, uint32_t);
1074 do_opivv_gvec(DisasContext *s, arg_rmrr *a, GVecGen3Fn *gvec_fn,
1075 gen_helper_gvec_4_ptr *fn)
1077 TCGLabel *over = gen_new_label();
1078 if (!opivv_check(s, a)) {
1082 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
1084 if (a->vm && s->vl_eq_vlmax) {
1085 gvec_fn(s->sew, vreg_ofs(s, a->rd),
1086 vreg_ofs(s, a->rs2), vreg_ofs(s, a->rs1),
1087 MAXSZ(s), MAXSZ(s));
1091 data = FIELD_DP32(data, VDATA, VM, a->vm);
1092 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
1093 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
1094 vreg_ofs(s, a->rs1), vreg_ofs(s, a->rs2),
1095 cpu_env, s->vlen / 8, s->vlen / 8, data, fn);
1098 gen_set_label(over);
1102 /* OPIVV with GVEC IR */
1103 #define GEN_OPIVV_GVEC_TRANS(NAME, SUF) \
1104 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1106 static gen_helper_gvec_4_ptr * const fns[4] = { \
1107 gen_helper_##NAME##_b, gen_helper_##NAME##_h, \
1108 gen_helper_##NAME##_w, gen_helper_##NAME##_d, \
1110 return do_opivv_gvec(s, a, tcg_gen_gvec_##SUF, fns[s->sew]); \
1113 GEN_OPIVV_GVEC_TRANS(vadd_vv, add)
1114 GEN_OPIVV_GVEC_TRANS(vsub_vv, sub)
1116 typedef void gen_helper_opivx(TCGv_ptr, TCGv_ptr, TCGv, TCGv_ptr,
1117 TCGv_env, TCGv_i32);
1119 static bool opivx_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, uint32_t vm,
1120 gen_helper_opivx *fn, DisasContext *s)
1122 TCGv_ptr dest, src2, mask;
1127 TCGLabel *over = gen_new_label();
1128 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
1130 dest = tcg_temp_new_ptr();
1131 mask = tcg_temp_new_ptr();
1132 src2 = tcg_temp_new_ptr();
1133 src1 = get_gpr(s, rs1, EXT_NONE);
1135 data = FIELD_DP32(data, VDATA, VM, vm);
1136 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
1137 desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
1139 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
1140 tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, vs2));
1141 tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
1143 fn(dest, mask, src1, src2, cpu_env, desc);
1145 tcg_temp_free_ptr(dest);
1146 tcg_temp_free_ptr(mask);
1147 tcg_temp_free_ptr(src2);
1149 gen_set_label(over);
1153 static bool opivx_check(DisasContext *s, arg_rmrr *a)
1155 return require_rvv(s) &&
1156 vext_check_isa_ill(s) &&
1157 vext_check_ss(s, a->rd, a->rs2, a->vm);
1160 typedef void GVecGen2sFn(unsigned, uint32_t, uint32_t, TCGv_i64,
1161 uint32_t, uint32_t);
1164 do_opivx_gvec(DisasContext *s, arg_rmrr *a, GVecGen2sFn *gvec_fn,
1165 gen_helper_opivx *fn)
1167 if (!opivx_check(s, a)) {
1171 if (a->vm && s->vl_eq_vlmax) {
1172 TCGv_i64 src1 = tcg_temp_new_i64();
1174 tcg_gen_ext_tl_i64(src1, get_gpr(s, a->rs1, EXT_SIGN));
1175 gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2),
1176 src1, MAXSZ(s), MAXSZ(s));
1178 tcg_temp_free_i64(src1);
1182 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s);
1185 /* OPIVX with GVEC IR */
1186 #define GEN_OPIVX_GVEC_TRANS(NAME, SUF) \
1187 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1189 static gen_helper_opivx * const fns[4] = { \
1190 gen_helper_##NAME##_b, gen_helper_##NAME##_h, \
1191 gen_helper_##NAME##_w, gen_helper_##NAME##_d, \
1193 return do_opivx_gvec(s, a, tcg_gen_gvec_##SUF, fns[s->sew]); \
1196 GEN_OPIVX_GVEC_TRANS(vadd_vx, adds)
1197 GEN_OPIVX_GVEC_TRANS(vsub_vx, subs)
1199 static void gen_vec_rsub8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
1201 tcg_gen_vec_sub8_i64(d, b, a);
1204 static void gen_vec_rsub16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
1206 tcg_gen_vec_sub16_i64(d, b, a);
1209 static void gen_rsub_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
1211 tcg_gen_sub_i32(ret, arg2, arg1);
1214 static void gen_rsub_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1216 tcg_gen_sub_i64(ret, arg2, arg1);
1219 static void gen_rsub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
1221 tcg_gen_sub_vec(vece, r, b, a);
1224 static void tcg_gen_gvec_rsubs(unsigned vece, uint32_t dofs, uint32_t aofs,
1225 TCGv_i64 c, uint32_t oprsz, uint32_t maxsz)
1227 static const TCGOpcode vecop_list[] = { INDEX_op_sub_vec, 0 };
1228 static const GVecGen2s rsub_op[4] = {
1229 { .fni8 = gen_vec_rsub8_i64,
1230 .fniv = gen_rsub_vec,
1231 .fno = gen_helper_vec_rsubs8,
1232 .opt_opc = vecop_list,
1234 { .fni8 = gen_vec_rsub16_i64,
1235 .fniv = gen_rsub_vec,
1236 .fno = gen_helper_vec_rsubs16,
1237 .opt_opc = vecop_list,
1239 { .fni4 = gen_rsub_i32,
1240 .fniv = gen_rsub_vec,
1241 .fno = gen_helper_vec_rsubs32,
1242 .opt_opc = vecop_list,
1244 { .fni8 = gen_rsub_i64,
1245 .fniv = gen_rsub_vec,
1246 .fno = gen_helper_vec_rsubs64,
1247 .opt_opc = vecop_list,
1248 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
1252 tcg_debug_assert(vece <= MO_64);
1253 tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, c, &rsub_op[vece]);
1256 GEN_OPIVX_GVEC_TRANS(vrsub_vx, rsubs)
1259 IMM_ZX, /* Zero-extended */
1260 IMM_SX, /* Sign-extended */
1261 IMM_TRUNC_SEW, /* Truncate to log(SEW) bits */
1262 IMM_TRUNC_2SEW, /* Truncate to log(2*SEW) bits */
1265 static int64_t extract_imm(DisasContext *s, uint32_t imm, imm_mode_t imm_mode)
1269 return extract64(imm, 0, 5);
1271 return sextract64(imm, 0, 5);
1273 return extract64(imm, 0, s->sew + 3);
1274 case IMM_TRUNC_2SEW:
1275 return extract64(imm, 0, s->sew + 4);
1277 g_assert_not_reached();
1281 static bool opivi_trans(uint32_t vd, uint32_t imm, uint32_t vs2, uint32_t vm,
1282 gen_helper_opivx *fn, DisasContext *s,
1283 imm_mode_t imm_mode)
1285 TCGv_ptr dest, src2, mask;
1290 TCGLabel *over = gen_new_label();
1291 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
1293 dest = tcg_temp_new_ptr();
1294 mask = tcg_temp_new_ptr();
1295 src2 = tcg_temp_new_ptr();
1296 src1 = tcg_constant_tl(extract_imm(s, imm, imm_mode));
1298 data = FIELD_DP32(data, VDATA, VM, vm);
1299 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
1300 desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
1302 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
1303 tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, vs2));
1304 tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
1306 fn(dest, mask, src1, src2, cpu_env, desc);
1308 tcg_temp_free_ptr(dest);
1309 tcg_temp_free_ptr(mask);
1310 tcg_temp_free_ptr(src2);
1312 gen_set_label(over);
1316 typedef void GVecGen2iFn(unsigned, uint32_t, uint32_t, int64_t,
1317 uint32_t, uint32_t);
1320 do_opivi_gvec(DisasContext *s, arg_rmrr *a, GVecGen2iFn *gvec_fn,
1321 gen_helper_opivx *fn, imm_mode_t imm_mode)
1323 if (!opivx_check(s, a)) {
1327 if (a->vm && s->vl_eq_vlmax) {
1328 gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2),
1329 extract_imm(s, a->rs1, imm_mode), MAXSZ(s), MAXSZ(s));
1333 return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s, imm_mode);
1336 /* OPIVI with GVEC IR */
1337 #define GEN_OPIVI_GVEC_TRANS(NAME, IMM_MODE, OPIVX, SUF) \
1338 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1340 static gen_helper_opivx * const fns[4] = { \
1341 gen_helper_##OPIVX##_b, gen_helper_##OPIVX##_h, \
1342 gen_helper_##OPIVX##_w, gen_helper_##OPIVX##_d, \
1344 return do_opivi_gvec(s, a, tcg_gen_gvec_##SUF, \
1345 fns[s->sew], IMM_MODE); \
1348 GEN_OPIVI_GVEC_TRANS(vadd_vi, IMM_SX, vadd_vx, addi)
1350 static void tcg_gen_gvec_rsubi(unsigned vece, uint32_t dofs, uint32_t aofs,
1351 int64_t c, uint32_t oprsz, uint32_t maxsz)
1353 TCGv_i64 tmp = tcg_constant_i64(c);
1354 tcg_gen_gvec_rsubs(vece, dofs, aofs, tmp, oprsz, maxsz);
1357 GEN_OPIVI_GVEC_TRANS(vrsub_vi, IMM_SX, vrsub_vx, rsubi)
1359 /* Vector Widening Integer Add/Subtract */
1361 /* OPIVV with WIDEN */
1362 static bool opivv_widen_check(DisasContext *s, arg_rmrr *a)
1364 return require_rvv(s) &&
1365 vext_check_isa_ill(s) &&
1366 vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm);
1369 static bool do_opivv_widen(DisasContext *s, arg_rmrr *a,
1370 gen_helper_gvec_4_ptr *fn,
1371 bool (*checkfn)(DisasContext *, arg_rmrr *))
1373 if (checkfn(s, a)) {
1375 TCGLabel *over = gen_new_label();
1376 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
1378 data = FIELD_DP32(data, VDATA, VM, a->vm);
1379 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
1380 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
1381 vreg_ofs(s, a->rs1),
1382 vreg_ofs(s, a->rs2),
1383 cpu_env, s->vlen / 8, s->vlen / 8,
1386 gen_set_label(over);
1392 #define GEN_OPIVV_WIDEN_TRANS(NAME, CHECK) \
1393 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1395 static gen_helper_gvec_4_ptr * const fns[3] = { \
1396 gen_helper_##NAME##_b, \
1397 gen_helper_##NAME##_h, \
1398 gen_helper_##NAME##_w \
1400 return do_opivv_widen(s, a, fns[s->sew], CHECK); \
1403 GEN_OPIVV_WIDEN_TRANS(vwaddu_vv, opivv_widen_check)
1404 GEN_OPIVV_WIDEN_TRANS(vwadd_vv, opivv_widen_check)
1405 GEN_OPIVV_WIDEN_TRANS(vwsubu_vv, opivv_widen_check)
1406 GEN_OPIVV_WIDEN_TRANS(vwsub_vv, opivv_widen_check)
1408 /* OPIVX with WIDEN */
1409 static bool opivx_widen_check(DisasContext *s, arg_rmrr *a)
1411 return require_rvv(s) &&
1412 vext_check_isa_ill(s) &&
1413 vext_check_ds(s, a->rd, a->rs2, a->vm);
1416 static bool do_opivx_widen(DisasContext *s, arg_rmrr *a,
1417 gen_helper_opivx *fn)
1419 if (opivx_widen_check(s, a)) {
1420 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s);
1425 #define GEN_OPIVX_WIDEN_TRANS(NAME) \
1426 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1428 static gen_helper_opivx * const fns[3] = { \
1429 gen_helper_##NAME##_b, \
1430 gen_helper_##NAME##_h, \
1431 gen_helper_##NAME##_w \
1433 return do_opivx_widen(s, a, fns[s->sew]); \
1436 GEN_OPIVX_WIDEN_TRANS(vwaddu_vx)
1437 GEN_OPIVX_WIDEN_TRANS(vwadd_vx)
1438 GEN_OPIVX_WIDEN_TRANS(vwsubu_vx)
1439 GEN_OPIVX_WIDEN_TRANS(vwsub_vx)
1441 /* WIDEN OPIVV with WIDEN */
1442 static bool opiwv_widen_check(DisasContext *s, arg_rmrr *a)
1444 return require_rvv(s) &&
1445 vext_check_isa_ill(s) &&
1446 vext_check_dds(s, a->rd, a->rs1, a->rs2, a->vm);
1449 static bool do_opiwv_widen(DisasContext *s, arg_rmrr *a,
1450 gen_helper_gvec_4_ptr *fn)
1452 if (opiwv_widen_check(s, a)) {
1454 TCGLabel *over = gen_new_label();
1455 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
1457 data = FIELD_DP32(data, VDATA, VM, a->vm);
1458 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
1459 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
1460 vreg_ofs(s, a->rs1),
1461 vreg_ofs(s, a->rs2),
1462 cpu_env, s->vlen / 8, s->vlen / 8, data, fn);
1464 gen_set_label(over);
1470 #define GEN_OPIWV_WIDEN_TRANS(NAME) \
1471 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1473 static gen_helper_gvec_4_ptr * const fns[3] = { \
1474 gen_helper_##NAME##_b, \
1475 gen_helper_##NAME##_h, \
1476 gen_helper_##NAME##_w \
1478 return do_opiwv_widen(s, a, fns[s->sew]); \
1481 GEN_OPIWV_WIDEN_TRANS(vwaddu_wv)
1482 GEN_OPIWV_WIDEN_TRANS(vwadd_wv)
1483 GEN_OPIWV_WIDEN_TRANS(vwsubu_wv)
1484 GEN_OPIWV_WIDEN_TRANS(vwsub_wv)
1486 /* WIDEN OPIVX with WIDEN */
1487 static bool opiwx_widen_check(DisasContext *s, arg_rmrr *a)
1489 return require_rvv(s) &&
1490 vext_check_isa_ill(s) &&
1491 vext_check_dd(s, a->rd, a->rs2, a->vm);
1494 static bool do_opiwx_widen(DisasContext *s, arg_rmrr *a,
1495 gen_helper_opivx *fn)
1497 if (opiwx_widen_check(s, a)) {
1498 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s);
1503 #define GEN_OPIWX_WIDEN_TRANS(NAME) \
1504 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1506 static gen_helper_opivx * const fns[3] = { \
1507 gen_helper_##NAME##_b, \
1508 gen_helper_##NAME##_h, \
1509 gen_helper_##NAME##_w \
1511 return do_opiwx_widen(s, a, fns[s->sew]); \
1514 GEN_OPIWX_WIDEN_TRANS(vwaddu_wx)
1515 GEN_OPIWX_WIDEN_TRANS(vwadd_wx)
1516 GEN_OPIWX_WIDEN_TRANS(vwsubu_wx)
1517 GEN_OPIWX_WIDEN_TRANS(vwsub_wx)
1519 /* Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions */
1520 /* OPIVV without GVEC IR */
1521 #define GEN_OPIVV_TRANS(NAME, CHECK) \
1522 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1524 if (CHECK(s, a)) { \
1525 uint32_t data = 0; \
1526 static gen_helper_gvec_4_ptr * const fns[4] = { \
1527 gen_helper_##NAME##_b, gen_helper_##NAME##_h, \
1528 gen_helper_##NAME##_w, gen_helper_##NAME##_d, \
1530 TCGLabel *over = gen_new_label(); \
1531 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
1533 data = FIELD_DP32(data, VDATA, VM, a->vm); \
1534 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
1535 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
1536 vreg_ofs(s, a->rs1), \
1537 vreg_ofs(s, a->rs2), cpu_env, \
1538 s->vlen / 8, s->vlen / 8, data, \
1541 gen_set_label(over); \
1548 * For vadc and vsbc, an illegal instruction exception is raised if the
1549 * destination vector register is v0 and LMUL > 1. (Section 12.4)
1551 static bool opivv_vadc_check(DisasContext *s, arg_rmrr *a)
1553 return require_rvv(s) &&
1554 vext_check_isa_ill(s) &&
1556 vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm);
1559 GEN_OPIVV_TRANS(vadc_vvm, opivv_vadc_check)
1560 GEN_OPIVV_TRANS(vsbc_vvm, opivv_vadc_check)
1563 * For vmadc and vmsbc, an illegal instruction exception is raised if the
1564 * destination vector register overlaps a source vector register group.
1566 static bool opivv_vmadc_check(DisasContext *s, arg_rmrr *a)
1568 return require_rvv(s) &&
1569 vext_check_isa_ill(s) &&
1570 vext_check_mss(s, a->rd, a->rs1, a->rs2);
1573 GEN_OPIVV_TRANS(vmadc_vvm, opivv_vmadc_check)
1574 GEN_OPIVV_TRANS(vmsbc_vvm, opivv_vmadc_check)
1576 static bool opivx_vadc_check(DisasContext *s, arg_rmrr *a)
1578 return require_rvv(s) &&
1579 vext_check_isa_ill(s) &&
1581 vext_check_ss(s, a->rd, a->rs2, a->vm);
1584 /* OPIVX without GVEC IR */
1585 #define GEN_OPIVX_TRANS(NAME, CHECK) \
1586 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1588 if (CHECK(s, a)) { \
1589 static gen_helper_opivx * const fns[4] = { \
1590 gen_helper_##NAME##_b, gen_helper_##NAME##_h, \
1591 gen_helper_##NAME##_w, gen_helper_##NAME##_d, \
1594 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s);\
1599 GEN_OPIVX_TRANS(vadc_vxm, opivx_vadc_check)
1600 GEN_OPIVX_TRANS(vsbc_vxm, opivx_vadc_check)
1602 static bool opivx_vmadc_check(DisasContext *s, arg_rmrr *a)
1604 return require_rvv(s) &&
1605 vext_check_isa_ill(s) &&
1606 vext_check_ms(s, a->rd, a->rs2);
1609 GEN_OPIVX_TRANS(vmadc_vxm, opivx_vmadc_check)
1610 GEN_OPIVX_TRANS(vmsbc_vxm, opivx_vmadc_check)
1612 /* OPIVI without GVEC IR */
1613 #define GEN_OPIVI_TRANS(NAME, IMM_MODE, OPIVX, CHECK) \
1614 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1616 if (CHECK(s, a)) { \
1617 static gen_helper_opivx * const fns[4] = { \
1618 gen_helper_##OPIVX##_b, gen_helper_##OPIVX##_h, \
1619 gen_helper_##OPIVX##_w, gen_helper_##OPIVX##_d, \
1621 return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, \
1622 fns[s->sew], s, IMM_MODE); \
1627 GEN_OPIVI_TRANS(vadc_vim, IMM_SX, vadc_vxm, opivx_vadc_check)
1628 GEN_OPIVI_TRANS(vmadc_vim, IMM_SX, vmadc_vxm, opivx_vmadc_check)
1630 /* Vector Bitwise Logical Instructions */
1631 GEN_OPIVV_GVEC_TRANS(vand_vv, and)
1632 GEN_OPIVV_GVEC_TRANS(vor_vv, or)
1633 GEN_OPIVV_GVEC_TRANS(vxor_vv, xor)
1634 GEN_OPIVX_GVEC_TRANS(vand_vx, ands)
1635 GEN_OPIVX_GVEC_TRANS(vor_vx, ors)
1636 GEN_OPIVX_GVEC_TRANS(vxor_vx, xors)
1637 GEN_OPIVI_GVEC_TRANS(vand_vi, IMM_SX, vand_vx, andi)
1638 GEN_OPIVI_GVEC_TRANS(vor_vi, IMM_SX, vor_vx, ori)
1639 GEN_OPIVI_GVEC_TRANS(vxor_vi, IMM_SX, vxor_vx, xori)
1641 /* Vector Single-Width Bit Shift Instructions */
1642 GEN_OPIVV_GVEC_TRANS(vsll_vv, shlv)
1643 GEN_OPIVV_GVEC_TRANS(vsrl_vv, shrv)
1644 GEN_OPIVV_GVEC_TRANS(vsra_vv, sarv)
1646 typedef void GVecGen2sFn32(unsigned, uint32_t, uint32_t, TCGv_i32,
1647 uint32_t, uint32_t);
1650 do_opivx_gvec_shift(DisasContext *s, arg_rmrr *a, GVecGen2sFn32 *gvec_fn,
1651 gen_helper_opivx *fn)
1653 if (!opivx_check(s, a)) {
1657 if (a->vm && s->vl_eq_vlmax) {
1658 TCGv_i32 src1 = tcg_temp_new_i32();
1660 tcg_gen_trunc_tl_i32(src1, get_gpr(s, a->rs1, EXT_NONE));
1661 tcg_gen_extract_i32(src1, src1, 0, s->sew + 3);
1662 gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2),
1663 src1, MAXSZ(s), MAXSZ(s));
1665 tcg_temp_free_i32(src1);
1669 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s);
1672 #define GEN_OPIVX_GVEC_SHIFT_TRANS(NAME, SUF) \
1673 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1675 static gen_helper_opivx * const fns[4] = { \
1676 gen_helper_##NAME##_b, gen_helper_##NAME##_h, \
1677 gen_helper_##NAME##_w, gen_helper_##NAME##_d, \
1680 return do_opivx_gvec_shift(s, a, tcg_gen_gvec_##SUF, fns[s->sew]); \
1683 GEN_OPIVX_GVEC_SHIFT_TRANS(vsll_vx, shls)
1684 GEN_OPIVX_GVEC_SHIFT_TRANS(vsrl_vx, shrs)
1685 GEN_OPIVX_GVEC_SHIFT_TRANS(vsra_vx, sars)
1687 GEN_OPIVI_GVEC_TRANS(vsll_vi, IMM_TRUNC_SEW, vsll_vx, shli)
1688 GEN_OPIVI_GVEC_TRANS(vsrl_vi, IMM_TRUNC_SEW, vsrl_vx, shri)
1689 GEN_OPIVI_GVEC_TRANS(vsra_vi, IMM_TRUNC_SEW, vsra_vx, sari)
1691 /* Vector Narrowing Integer Right Shift Instructions */
1692 static bool opiwv_narrow_check(DisasContext *s, arg_rmrr *a)
1694 return require_rvv(s) &&
1695 vext_check_isa_ill(s) &&
1696 vext_check_sds(s, a->rd, a->rs1, a->rs2, a->vm);
1699 /* OPIVV with NARROW */
1700 #define GEN_OPIWV_NARROW_TRANS(NAME) \
1701 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1703 if (opiwv_narrow_check(s, a)) { \
1704 uint32_t data = 0; \
1705 static gen_helper_gvec_4_ptr * const fns[3] = { \
1706 gen_helper_##NAME##_b, \
1707 gen_helper_##NAME##_h, \
1708 gen_helper_##NAME##_w, \
1710 TCGLabel *over = gen_new_label(); \
1711 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
1713 data = FIELD_DP32(data, VDATA, VM, a->vm); \
1714 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
1715 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
1716 vreg_ofs(s, a->rs1), \
1717 vreg_ofs(s, a->rs2), cpu_env, \
1718 s->vlen / 8, s->vlen / 8, data, \
1721 gen_set_label(over); \
1726 GEN_OPIWV_NARROW_TRANS(vnsra_wv)
1727 GEN_OPIWV_NARROW_TRANS(vnsrl_wv)
1729 static bool opiwx_narrow_check(DisasContext *s, arg_rmrr *a)
1731 return require_rvv(s) &&
1732 vext_check_isa_ill(s) &&
1733 vext_check_sd(s, a->rd, a->rs2, a->vm);
1736 /* OPIVX with NARROW */
1737 #define GEN_OPIWX_NARROW_TRANS(NAME) \
1738 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1740 if (opiwx_narrow_check(s, a)) { \
1741 static gen_helper_opivx * const fns[3] = { \
1742 gen_helper_##NAME##_b, \
1743 gen_helper_##NAME##_h, \
1744 gen_helper_##NAME##_w, \
1746 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s);\
1751 GEN_OPIWX_NARROW_TRANS(vnsra_wx)
1752 GEN_OPIWX_NARROW_TRANS(vnsrl_wx)
1754 /* OPIWI with NARROW */
1755 #define GEN_OPIWI_NARROW_TRANS(NAME, IMM_MODE, OPIVX) \
1756 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1758 if (opiwx_narrow_check(s, a)) { \
1759 static gen_helper_opivx * const fns[3] = { \
1760 gen_helper_##OPIVX##_b, \
1761 gen_helper_##OPIVX##_h, \
1762 gen_helper_##OPIVX##_w, \
1764 return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, \
1765 fns[s->sew], s, IMM_MODE); \
1770 GEN_OPIWI_NARROW_TRANS(vnsra_wi, IMM_ZX, vnsra_wx)
1771 GEN_OPIWI_NARROW_TRANS(vnsrl_wi, IMM_ZX, vnsrl_wx)
1773 /* Vector Integer Comparison Instructions */
1775 * For all comparison instructions, an illegal instruction exception is raised
1776 * if the destination vector register overlaps a source vector register group
1779 static bool opivv_cmp_check(DisasContext *s, arg_rmrr *a)
1781 return require_rvv(s) &&
1782 vext_check_isa_ill(s) &&
1783 vext_check_mss(s, a->rd, a->rs1, a->rs2);
1786 GEN_OPIVV_TRANS(vmseq_vv, opivv_cmp_check)
1787 GEN_OPIVV_TRANS(vmsne_vv, opivv_cmp_check)
1788 GEN_OPIVV_TRANS(vmsltu_vv, opivv_cmp_check)
1789 GEN_OPIVV_TRANS(vmslt_vv, opivv_cmp_check)
1790 GEN_OPIVV_TRANS(vmsleu_vv, opivv_cmp_check)
1791 GEN_OPIVV_TRANS(vmsle_vv, opivv_cmp_check)
1793 static bool opivx_cmp_check(DisasContext *s, arg_rmrr *a)
1795 return require_rvv(s) &&
1796 vext_check_isa_ill(s) &&
1797 vext_check_ms(s, a->rd, a->rs2);
1800 GEN_OPIVX_TRANS(vmseq_vx, opivx_cmp_check)
1801 GEN_OPIVX_TRANS(vmsne_vx, opivx_cmp_check)
1802 GEN_OPIVX_TRANS(vmsltu_vx, opivx_cmp_check)
1803 GEN_OPIVX_TRANS(vmslt_vx, opivx_cmp_check)
1804 GEN_OPIVX_TRANS(vmsleu_vx, opivx_cmp_check)
1805 GEN_OPIVX_TRANS(vmsle_vx, opivx_cmp_check)
1806 GEN_OPIVX_TRANS(vmsgtu_vx, opivx_cmp_check)
1807 GEN_OPIVX_TRANS(vmsgt_vx, opivx_cmp_check)
1809 GEN_OPIVI_TRANS(vmseq_vi, IMM_SX, vmseq_vx, opivx_cmp_check)
1810 GEN_OPIVI_TRANS(vmsne_vi, IMM_SX, vmsne_vx, opivx_cmp_check)
1811 GEN_OPIVI_TRANS(vmsleu_vi, IMM_SX, vmsleu_vx, opivx_cmp_check)
1812 GEN_OPIVI_TRANS(vmsle_vi, IMM_SX, vmsle_vx, opivx_cmp_check)
1813 GEN_OPIVI_TRANS(vmsgtu_vi, IMM_SX, vmsgtu_vx, opivx_cmp_check)
1814 GEN_OPIVI_TRANS(vmsgt_vi, IMM_SX, vmsgt_vx, opivx_cmp_check)
1816 /* Vector Integer Min/Max Instructions */
1817 GEN_OPIVV_GVEC_TRANS(vminu_vv, umin)
1818 GEN_OPIVV_GVEC_TRANS(vmin_vv, smin)
1819 GEN_OPIVV_GVEC_TRANS(vmaxu_vv, umax)
1820 GEN_OPIVV_GVEC_TRANS(vmax_vv, smax)
1821 GEN_OPIVX_TRANS(vminu_vx, opivx_check)
1822 GEN_OPIVX_TRANS(vmin_vx, opivx_check)
1823 GEN_OPIVX_TRANS(vmaxu_vx, opivx_check)
1824 GEN_OPIVX_TRANS(vmax_vx, opivx_check)
1826 /* Vector Single-Width Integer Multiply Instructions */
1827 GEN_OPIVV_GVEC_TRANS(vmul_vv, mul)
1828 GEN_OPIVV_TRANS(vmulh_vv, opivv_check)
1829 GEN_OPIVV_TRANS(vmulhu_vv, opivv_check)
1830 GEN_OPIVV_TRANS(vmulhsu_vv, opivv_check)
1831 GEN_OPIVX_GVEC_TRANS(vmul_vx, muls)
1832 GEN_OPIVX_TRANS(vmulh_vx, opivx_check)
1833 GEN_OPIVX_TRANS(vmulhu_vx, opivx_check)
1834 GEN_OPIVX_TRANS(vmulhsu_vx, opivx_check)
1836 /* Vector Integer Divide Instructions */
1837 GEN_OPIVV_TRANS(vdivu_vv, opivv_check)
1838 GEN_OPIVV_TRANS(vdiv_vv, opivv_check)
1839 GEN_OPIVV_TRANS(vremu_vv, opivv_check)
1840 GEN_OPIVV_TRANS(vrem_vv, opivv_check)
1841 GEN_OPIVX_TRANS(vdivu_vx, opivx_check)
1842 GEN_OPIVX_TRANS(vdiv_vx, opivx_check)
1843 GEN_OPIVX_TRANS(vremu_vx, opivx_check)
1844 GEN_OPIVX_TRANS(vrem_vx, opivx_check)
1846 /* Vector Widening Integer Multiply Instructions */
1847 GEN_OPIVV_WIDEN_TRANS(vwmul_vv, opivv_widen_check)
1848 GEN_OPIVV_WIDEN_TRANS(vwmulu_vv, opivv_widen_check)
1849 GEN_OPIVV_WIDEN_TRANS(vwmulsu_vv, opivv_widen_check)
1850 GEN_OPIVX_WIDEN_TRANS(vwmul_vx)
1851 GEN_OPIVX_WIDEN_TRANS(vwmulu_vx)
1852 GEN_OPIVX_WIDEN_TRANS(vwmulsu_vx)
1854 /* Vector Single-Width Integer Multiply-Add Instructions */
1855 GEN_OPIVV_TRANS(vmacc_vv, opivv_check)
1856 GEN_OPIVV_TRANS(vnmsac_vv, opivv_check)
1857 GEN_OPIVV_TRANS(vmadd_vv, opivv_check)
1858 GEN_OPIVV_TRANS(vnmsub_vv, opivv_check)
1859 GEN_OPIVX_TRANS(vmacc_vx, opivx_check)
1860 GEN_OPIVX_TRANS(vnmsac_vx, opivx_check)
1861 GEN_OPIVX_TRANS(vmadd_vx, opivx_check)
1862 GEN_OPIVX_TRANS(vnmsub_vx, opivx_check)
1864 /* Vector Widening Integer Multiply-Add Instructions */
1865 GEN_OPIVV_WIDEN_TRANS(vwmaccu_vv, opivv_widen_check)
1866 GEN_OPIVV_WIDEN_TRANS(vwmacc_vv, opivv_widen_check)
1867 GEN_OPIVV_WIDEN_TRANS(vwmaccsu_vv, opivv_widen_check)
1868 GEN_OPIVX_WIDEN_TRANS(vwmaccu_vx)
1869 GEN_OPIVX_WIDEN_TRANS(vwmacc_vx)
1870 GEN_OPIVX_WIDEN_TRANS(vwmaccsu_vx)
1871 GEN_OPIVX_WIDEN_TRANS(vwmaccus_vx)
1873 /* Vector Integer Merge and Move Instructions */
1874 static bool trans_vmv_v_v(DisasContext *s, arg_vmv_v_v *a)
1876 if (require_rvv(s) &&
1877 vext_check_isa_ill(s) &&
1878 /* vmv.v.v has rs2 = 0 and vm = 1 */
1879 vext_check_sss(s, a->rd, a->rs1, 0, 1)) {
1880 if (s->vl_eq_vlmax) {
1881 tcg_gen_gvec_mov(s->sew, vreg_ofs(s, a->rd),
1882 vreg_ofs(s, a->rs1),
1883 MAXSZ(s), MAXSZ(s));
1885 uint32_t data = FIELD_DP32(0, VDATA, LMUL, s->lmul);
1886 static gen_helper_gvec_2_ptr * const fns[4] = {
1887 gen_helper_vmv_v_v_b, gen_helper_vmv_v_v_h,
1888 gen_helper_vmv_v_v_w, gen_helper_vmv_v_v_d,
1890 TCGLabel *over = gen_new_label();
1891 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
1893 tcg_gen_gvec_2_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, a->rs1),
1894 cpu_env, s->vlen / 8, s->vlen / 8, data,
1896 gen_set_label(over);
1904 typedef void gen_helper_vmv_vx(TCGv_ptr, TCGv_i64, TCGv_env, TCGv_i32);
1905 static bool trans_vmv_v_x(DisasContext *s, arg_vmv_v_x *a)
1907 if (require_rvv(s) &&
1908 vext_check_isa_ill(s) &&
1909 /* vmv.v.x has rs2 = 0 and vm = 1 */
1910 vext_check_ss(s, a->rd, 0, 1)) {
1912 TCGLabel *over = gen_new_label();
1913 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
1915 s1 = get_gpr(s, a->rs1, EXT_SIGN);
1917 if (s->vl_eq_vlmax) {
1918 tcg_gen_gvec_dup_tl(s->sew, vreg_ofs(s, a->rd),
1919 MAXSZ(s), MAXSZ(s), s1);
1922 TCGv_i64 s1_i64 = tcg_temp_new_i64();
1923 TCGv_ptr dest = tcg_temp_new_ptr();
1924 uint32_t data = FIELD_DP32(0, VDATA, LMUL, s->lmul);
1925 static gen_helper_vmv_vx * const fns[4] = {
1926 gen_helper_vmv_v_x_b, gen_helper_vmv_v_x_h,
1927 gen_helper_vmv_v_x_w, gen_helper_vmv_v_x_d,
1930 tcg_gen_ext_tl_i64(s1_i64, s1);
1931 desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
1932 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, a->rd));
1933 fns[s->sew](dest, s1_i64, cpu_env, desc);
1935 tcg_temp_free_ptr(dest);
1936 tcg_temp_free_i64(s1_i64);
1940 gen_set_label(over);
1946 static bool trans_vmv_v_i(DisasContext *s, arg_vmv_v_i *a)
1948 if (require_rvv(s) &&
1949 vext_check_isa_ill(s) &&
1950 /* vmv.v.i has rs2 = 0 and vm = 1 */
1951 vext_check_ss(s, a->rd, 0, 1)) {
1952 int64_t simm = sextract64(a->rs1, 0, 5);
1953 if (s->vl_eq_vlmax) {
1954 tcg_gen_gvec_dup_imm(s->sew, vreg_ofs(s, a->rd),
1955 MAXSZ(s), MAXSZ(s), simm);
1961 uint32_t data = FIELD_DP32(0, VDATA, LMUL, s->lmul);
1962 static gen_helper_vmv_vx * const fns[4] = {
1963 gen_helper_vmv_v_x_b, gen_helper_vmv_v_x_h,
1964 gen_helper_vmv_v_x_w, gen_helper_vmv_v_x_d,
1966 TCGLabel *over = gen_new_label();
1967 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
1969 s1 = tcg_constant_i64(simm);
1970 dest = tcg_temp_new_ptr();
1971 desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
1972 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, a->rd));
1973 fns[s->sew](dest, s1, cpu_env, desc);
1975 tcg_temp_free_ptr(dest);
1977 gen_set_label(over);
1984 GEN_OPIVV_TRANS(vmerge_vvm, opivv_vadc_check)
1985 GEN_OPIVX_TRANS(vmerge_vxm, opivx_vadc_check)
1986 GEN_OPIVI_TRANS(vmerge_vim, IMM_SX, vmerge_vxm, opivx_vadc_check)
1989 *** Vector Fixed-Point Arithmetic Instructions
1992 /* Vector Single-Width Saturating Add and Subtract */
1993 GEN_OPIVV_TRANS(vsaddu_vv, opivv_check)
1994 GEN_OPIVV_TRANS(vsadd_vv, opivv_check)
1995 GEN_OPIVV_TRANS(vssubu_vv, opivv_check)
1996 GEN_OPIVV_TRANS(vssub_vv, opivv_check)
1997 GEN_OPIVX_TRANS(vsaddu_vx, opivx_check)
1998 GEN_OPIVX_TRANS(vsadd_vx, opivx_check)
1999 GEN_OPIVX_TRANS(vssubu_vx, opivx_check)
2000 GEN_OPIVX_TRANS(vssub_vx, opivx_check)
2001 GEN_OPIVI_TRANS(vsaddu_vi, IMM_SX, vsaddu_vx, opivx_check)
2002 GEN_OPIVI_TRANS(vsadd_vi, IMM_SX, vsadd_vx, opivx_check)
2004 /* Vector Single-Width Averaging Add and Subtract */
2005 GEN_OPIVV_TRANS(vaadd_vv, opivv_check)
2006 GEN_OPIVV_TRANS(vaaddu_vv, opivv_check)
2007 GEN_OPIVV_TRANS(vasub_vv, opivv_check)
2008 GEN_OPIVV_TRANS(vasubu_vv, opivv_check)
2009 GEN_OPIVX_TRANS(vaadd_vx, opivx_check)
2010 GEN_OPIVX_TRANS(vaaddu_vx, opivx_check)
2011 GEN_OPIVX_TRANS(vasub_vx, opivx_check)
2012 GEN_OPIVX_TRANS(vasubu_vx, opivx_check)
2014 /* Vector Single-Width Fractional Multiply with Rounding and Saturation */
2015 GEN_OPIVV_TRANS(vsmul_vv, opivv_check)
2016 GEN_OPIVX_TRANS(vsmul_vx, opivx_check)
2018 /* Vector Single-Width Scaling Shift Instructions */
2019 GEN_OPIVV_TRANS(vssrl_vv, opivv_check)
2020 GEN_OPIVV_TRANS(vssra_vv, opivv_check)
2021 GEN_OPIVX_TRANS(vssrl_vx, opivx_check)
2022 GEN_OPIVX_TRANS(vssra_vx, opivx_check)
2023 GEN_OPIVI_TRANS(vssrl_vi, IMM_TRUNC_SEW, vssrl_vx, opivx_check)
2024 GEN_OPIVI_TRANS(vssra_vi, IMM_TRUNC_SEW, vssra_vx, opivx_check)
2026 /* Vector Narrowing Fixed-Point Clip Instructions */
2027 GEN_OPIWV_NARROW_TRANS(vnclipu_wv)
2028 GEN_OPIWV_NARROW_TRANS(vnclip_wv)
2029 GEN_OPIWX_NARROW_TRANS(vnclipu_wx)
2030 GEN_OPIWX_NARROW_TRANS(vnclip_wx)
2031 GEN_OPIWI_NARROW_TRANS(vnclipu_wi, IMM_ZX, vnclipu_wx)
2032 GEN_OPIWI_NARROW_TRANS(vnclip_wi, IMM_ZX, vnclip_wx)
2035 *** Vector Float Point Arithmetic Instructions
2039 * As RVF-only cpus always have values NaN-boxed to 64-bits,
2040 * RVF and RVD can be treated equally.
2041 * We don't have to deal with the cases of: SEW > FLEN.
2043 * If SEW < FLEN, check whether input fp register is a valid
2044 * NaN-boxed value, in which case the least-significant SEW bits
2045 * of the f regsiter are used, else the canonical NaN value is used.
2047 static void do_nanbox(DisasContext *s, TCGv_i64 out, TCGv_i64 in)
2051 gen_check_nanbox_h(out, in);
2054 gen_check_nanbox_s(out, in);
2057 tcg_gen_mov_i64(out, in);
2060 g_assert_not_reached();
2064 /* Vector Single-Width Floating-Point Add/Subtract Instructions */
2067 * If the current SEW does not correspond to a supported IEEE floating-point
2068 * type, an illegal instruction exception is raised.
2070 static bool opfvv_check(DisasContext *s, arg_rmrr *a)
2072 return require_rvv(s) &&
2074 vext_check_isa_ill(s) &&
2075 vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm);
2078 /* OPFVV without GVEC IR */
2079 #define GEN_OPFVV_TRANS(NAME, CHECK) \
2080 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
2082 if (CHECK(s, a)) { \
2083 uint32_t data = 0; \
2084 static gen_helper_gvec_4_ptr * const fns[3] = { \
2085 gen_helper_##NAME##_h, \
2086 gen_helper_##NAME##_w, \
2087 gen_helper_##NAME##_d, \
2089 TCGLabel *over = gen_new_label(); \
2090 gen_set_rm(s, RISCV_FRM_DYN); \
2091 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
2093 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2094 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2095 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
2096 vreg_ofs(s, a->rs1), \
2097 vreg_ofs(s, a->rs2), cpu_env, \
2098 s->vlen / 8, s->vlen / 8, data, \
2101 gen_set_label(over); \
2106 GEN_OPFVV_TRANS(vfadd_vv, opfvv_check)
2107 GEN_OPFVV_TRANS(vfsub_vv, opfvv_check)
2109 typedef void gen_helper_opfvf(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv_ptr,
2110 TCGv_env, TCGv_i32);
2112 static bool opfvf_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
2113 uint32_t data, gen_helper_opfvf *fn, DisasContext *s)
2115 TCGv_ptr dest, src2, mask;
2119 TCGLabel *over = gen_new_label();
2120 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
2122 dest = tcg_temp_new_ptr();
2123 mask = tcg_temp_new_ptr();
2124 src2 = tcg_temp_new_ptr();
2125 desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
2127 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
2128 tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, vs2));
2129 tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
2131 /* NaN-box f[rs1] */
2132 t1 = tcg_temp_new_i64();
2133 do_nanbox(s, t1, cpu_fpr[rs1]);
2135 fn(dest, mask, t1, src2, cpu_env, desc);
2137 tcg_temp_free_ptr(dest);
2138 tcg_temp_free_ptr(mask);
2139 tcg_temp_free_ptr(src2);
2140 tcg_temp_free_i64(t1);
2142 gen_set_label(over);
2147 * If the current SEW does not correspond to a supported IEEE floating-point
2148 * type, an illegal instruction exception is raised
2150 static bool opfvf_check(DisasContext *s, arg_rmrr *a)
2152 return require_rvv(s) &&
2154 vext_check_isa_ill(s) &&
2155 vext_check_ss(s, a->rd, a->rs2, a->vm);
2158 /* OPFVF without GVEC IR */
2159 #define GEN_OPFVF_TRANS(NAME, CHECK) \
2160 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
2162 if (CHECK(s, a)) { \
2163 uint32_t data = 0; \
2164 static gen_helper_opfvf *const fns[3] = { \
2165 gen_helper_##NAME##_h, \
2166 gen_helper_##NAME##_w, \
2167 gen_helper_##NAME##_d, \
2169 gen_set_rm(s, RISCV_FRM_DYN); \
2170 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2171 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2172 return opfvf_trans(a->rd, a->rs1, a->rs2, data, \
2173 fns[s->sew - 1], s); \
2178 GEN_OPFVF_TRANS(vfadd_vf, opfvf_check)
2179 GEN_OPFVF_TRANS(vfsub_vf, opfvf_check)
2180 GEN_OPFVF_TRANS(vfrsub_vf, opfvf_check)
2182 /* Vector Widening Floating-Point Add/Subtract Instructions */
2183 static bool opfvv_widen_check(DisasContext *s, arg_rmrr *a)
2185 return require_rvv(s) &&
2187 vext_check_isa_ill(s) &&
2188 vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm);
2191 /* OPFVV with WIDEN */
2192 #define GEN_OPFVV_WIDEN_TRANS(NAME, CHECK) \
2193 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
2195 if (CHECK(s, a)) { \
2196 uint32_t data = 0; \
2197 static gen_helper_gvec_4_ptr * const fns[2] = { \
2198 gen_helper_##NAME##_h, gen_helper_##NAME##_w, \
2200 TCGLabel *over = gen_new_label(); \
2201 gen_set_rm(s, RISCV_FRM_DYN); \
2202 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
2204 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2205 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2206 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
2207 vreg_ofs(s, a->rs1), \
2208 vreg_ofs(s, a->rs2), cpu_env, \
2209 s->vlen / 8, s->vlen / 8, data, \
2212 gen_set_label(over); \
2218 GEN_OPFVV_WIDEN_TRANS(vfwadd_vv, opfvv_widen_check)
2219 GEN_OPFVV_WIDEN_TRANS(vfwsub_vv, opfvv_widen_check)
2221 static bool opfvf_widen_check(DisasContext *s, arg_rmrr *a)
2223 return require_rvv(s) &&
2225 vext_check_isa_ill(s) &&
2226 vext_check_ds(s, a->rd, a->rs2, a->vm);
2229 /* OPFVF with WIDEN */
2230 #define GEN_OPFVF_WIDEN_TRANS(NAME) \
2231 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
2233 if (opfvf_widen_check(s, a)) { \
2234 uint32_t data = 0; \
2235 static gen_helper_opfvf *const fns[2] = { \
2236 gen_helper_##NAME##_h, gen_helper_##NAME##_w, \
2238 gen_set_rm(s, RISCV_FRM_DYN); \
2239 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2240 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2241 return opfvf_trans(a->rd, a->rs1, a->rs2, data, \
2242 fns[s->sew - 1], s); \
2247 GEN_OPFVF_WIDEN_TRANS(vfwadd_vf)
2248 GEN_OPFVF_WIDEN_TRANS(vfwsub_vf)
2250 static bool opfwv_widen_check(DisasContext *s, arg_rmrr *a)
2252 return require_rvv(s) &&
2254 vext_check_isa_ill(s) &&
2255 vext_check_dds(s, a->rd, a->rs1, a->rs2, a->vm);
2258 /* WIDEN OPFVV with WIDEN */
2259 #define GEN_OPFWV_WIDEN_TRANS(NAME) \
2260 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
2262 if (opfwv_widen_check(s, a)) { \
2263 uint32_t data = 0; \
2264 static gen_helper_gvec_4_ptr * const fns[2] = { \
2265 gen_helper_##NAME##_h, gen_helper_##NAME##_w, \
2267 TCGLabel *over = gen_new_label(); \
2268 gen_set_rm(s, RISCV_FRM_DYN); \
2269 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
2271 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2272 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2273 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
2274 vreg_ofs(s, a->rs1), \
2275 vreg_ofs(s, a->rs2), cpu_env, \
2276 s->vlen / 8, s->vlen / 8, data, \
2279 gen_set_label(over); \
2285 GEN_OPFWV_WIDEN_TRANS(vfwadd_wv)
2286 GEN_OPFWV_WIDEN_TRANS(vfwsub_wv)
2288 static bool opfwf_widen_check(DisasContext *s, arg_rmrr *a)
2290 return require_rvv(s) &&
2292 vext_check_isa_ill(s) &&
2293 vext_check_dd(s, a->rd, a->rs2, a->vm);
2296 /* WIDEN OPFVF with WIDEN */
2297 #define GEN_OPFWF_WIDEN_TRANS(NAME) \
2298 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
2300 if (opfwf_widen_check(s, a)) { \
2301 uint32_t data = 0; \
2302 static gen_helper_opfvf *const fns[2] = { \
2303 gen_helper_##NAME##_h, gen_helper_##NAME##_w, \
2305 gen_set_rm(s, RISCV_FRM_DYN); \
2306 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2307 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2308 return opfvf_trans(a->rd, a->rs1, a->rs2, data, \
2309 fns[s->sew - 1], s); \
2314 GEN_OPFWF_WIDEN_TRANS(vfwadd_wf)
2315 GEN_OPFWF_WIDEN_TRANS(vfwsub_wf)
2317 /* Vector Single-Width Floating-Point Multiply/Divide Instructions */
2318 GEN_OPFVV_TRANS(vfmul_vv, opfvv_check)
2319 GEN_OPFVV_TRANS(vfdiv_vv, opfvv_check)
2320 GEN_OPFVF_TRANS(vfmul_vf, opfvf_check)
2321 GEN_OPFVF_TRANS(vfdiv_vf, opfvf_check)
2322 GEN_OPFVF_TRANS(vfrdiv_vf, opfvf_check)
2324 /* Vector Widening Floating-Point Multiply */
2325 GEN_OPFVV_WIDEN_TRANS(vfwmul_vv, opfvv_widen_check)
2326 GEN_OPFVF_WIDEN_TRANS(vfwmul_vf)
2328 /* Vector Single-Width Floating-Point Fused Multiply-Add Instructions */
2329 GEN_OPFVV_TRANS(vfmacc_vv, opfvv_check)
2330 GEN_OPFVV_TRANS(vfnmacc_vv, opfvv_check)
2331 GEN_OPFVV_TRANS(vfmsac_vv, opfvv_check)
2332 GEN_OPFVV_TRANS(vfnmsac_vv, opfvv_check)
2333 GEN_OPFVV_TRANS(vfmadd_vv, opfvv_check)
2334 GEN_OPFVV_TRANS(vfnmadd_vv, opfvv_check)
2335 GEN_OPFVV_TRANS(vfmsub_vv, opfvv_check)
2336 GEN_OPFVV_TRANS(vfnmsub_vv, opfvv_check)
2337 GEN_OPFVF_TRANS(vfmacc_vf, opfvf_check)
2338 GEN_OPFVF_TRANS(vfnmacc_vf, opfvf_check)
2339 GEN_OPFVF_TRANS(vfmsac_vf, opfvf_check)
2340 GEN_OPFVF_TRANS(vfnmsac_vf, opfvf_check)
2341 GEN_OPFVF_TRANS(vfmadd_vf, opfvf_check)
2342 GEN_OPFVF_TRANS(vfnmadd_vf, opfvf_check)
2343 GEN_OPFVF_TRANS(vfmsub_vf, opfvf_check)
2344 GEN_OPFVF_TRANS(vfnmsub_vf, opfvf_check)
2346 /* Vector Widening Floating-Point Fused Multiply-Add Instructions */
2347 GEN_OPFVV_WIDEN_TRANS(vfwmacc_vv, opfvv_widen_check)
2348 GEN_OPFVV_WIDEN_TRANS(vfwnmacc_vv, opfvv_widen_check)
2349 GEN_OPFVV_WIDEN_TRANS(vfwmsac_vv, opfvv_widen_check)
2350 GEN_OPFVV_WIDEN_TRANS(vfwnmsac_vv, opfvv_widen_check)
2351 GEN_OPFVF_WIDEN_TRANS(vfwmacc_vf)
2352 GEN_OPFVF_WIDEN_TRANS(vfwnmacc_vf)
2353 GEN_OPFVF_WIDEN_TRANS(vfwmsac_vf)
2354 GEN_OPFVF_WIDEN_TRANS(vfwnmsac_vf)
2356 /* Vector Floating-Point Square-Root Instruction */
2359 * If the current SEW does not correspond to a supported IEEE floating-point
2360 * type, an illegal instruction exception is raised
2362 static bool opfv_check(DisasContext *s, arg_rmr *a)
2364 return require_rvv(s) &&
2366 vext_check_isa_ill(s) &&
2367 /* OPFV instructions ignore vs1 check */
2368 vext_check_ss(s, a->rd, a->rs2, a->vm);
2371 static bool do_opfv(DisasContext *s, arg_rmr *a,
2372 gen_helper_gvec_3_ptr *fn,
2373 bool (*checkfn)(DisasContext *, arg_rmr *),
2376 if (checkfn(s, a)) {
2378 TCGLabel *over = gen_new_label();
2380 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
2382 data = FIELD_DP32(data, VDATA, VM, a->vm);
2383 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
2384 tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
2385 vreg_ofs(s, a->rs2), cpu_env,
2386 s->vlen / 8, s->vlen / 8, data, fn);
2388 gen_set_label(over);
2394 #define GEN_OPFV_TRANS(NAME, CHECK, FRM) \
2395 static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
2397 static gen_helper_gvec_3_ptr * const fns[3] = { \
2398 gen_helper_##NAME##_h, \
2399 gen_helper_##NAME##_w, \
2400 gen_helper_##NAME##_d \
2402 return do_opfv(s, a, fns[s->sew - 1], CHECK, FRM); \
2405 GEN_OPFV_TRANS(vfsqrt_v, opfv_check, RISCV_FRM_DYN)
2407 /* Vector Floating-Point MIN/MAX Instructions */
2408 GEN_OPFVV_TRANS(vfmin_vv, opfvv_check)
2409 GEN_OPFVV_TRANS(vfmax_vv, opfvv_check)
2410 GEN_OPFVF_TRANS(vfmin_vf, opfvf_check)
2411 GEN_OPFVF_TRANS(vfmax_vf, opfvf_check)
2413 /* Vector Floating-Point Sign-Injection Instructions */
2414 GEN_OPFVV_TRANS(vfsgnj_vv, opfvv_check)
2415 GEN_OPFVV_TRANS(vfsgnjn_vv, opfvv_check)
2416 GEN_OPFVV_TRANS(vfsgnjx_vv, opfvv_check)
2417 GEN_OPFVF_TRANS(vfsgnj_vf, opfvf_check)
2418 GEN_OPFVF_TRANS(vfsgnjn_vf, opfvf_check)
2419 GEN_OPFVF_TRANS(vfsgnjx_vf, opfvf_check)
2421 /* Vector Floating-Point Compare Instructions */
2422 static bool opfvv_cmp_check(DisasContext *s, arg_rmrr *a)
2424 return require_rvv(s) &&
2426 vext_check_isa_ill(s) &&
2427 vext_check_mss(s, a->rd, a->rs1, a->rs2);
2430 GEN_OPFVV_TRANS(vmfeq_vv, opfvv_cmp_check)
2431 GEN_OPFVV_TRANS(vmfne_vv, opfvv_cmp_check)
2432 GEN_OPFVV_TRANS(vmflt_vv, opfvv_cmp_check)
2433 GEN_OPFVV_TRANS(vmfle_vv, opfvv_cmp_check)
2435 static bool opfvf_cmp_check(DisasContext *s, arg_rmrr *a)
2437 return require_rvv(s) &&
2439 vext_check_isa_ill(s) &&
2440 vext_check_ms(s, a->rd, a->rs2);
2443 GEN_OPFVF_TRANS(vmfeq_vf, opfvf_cmp_check)
2444 GEN_OPFVF_TRANS(vmfne_vf, opfvf_cmp_check)
2445 GEN_OPFVF_TRANS(vmflt_vf, opfvf_cmp_check)
2446 GEN_OPFVF_TRANS(vmfle_vf, opfvf_cmp_check)
2447 GEN_OPFVF_TRANS(vmfgt_vf, opfvf_cmp_check)
2448 GEN_OPFVF_TRANS(vmfge_vf, opfvf_cmp_check)
2450 /* Vector Floating-Point Classify Instruction */
2451 GEN_OPFV_TRANS(vfclass_v, opfv_check, RISCV_FRM_DYN)
2453 /* Vector Floating-Point Merge Instruction */
2454 GEN_OPFVF_TRANS(vfmerge_vfm, opfvf_check)
2456 static bool trans_vfmv_v_f(DisasContext *s, arg_vfmv_v_f *a)
2458 if (require_rvv(s) &&
2460 vext_check_isa_ill(s) &&
2461 require_align(a->rd, s->lmul)) {
2464 if (s->vl_eq_vlmax) {
2465 t1 = tcg_temp_new_i64();
2466 /* NaN-box f[rs1] */
2467 do_nanbox(s, t1, cpu_fpr[a->rs1]);
2469 tcg_gen_gvec_dup_i64(s->sew, vreg_ofs(s, a->rd),
2470 MAXSZ(s), MAXSZ(s), t1);
2475 uint32_t data = FIELD_DP32(0, VDATA, LMUL, s->lmul);
2476 static gen_helper_vmv_vx * const fns[3] = {
2477 gen_helper_vmv_v_x_h,
2478 gen_helper_vmv_v_x_w,
2479 gen_helper_vmv_v_x_d,
2481 TCGLabel *over = gen_new_label();
2482 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
2484 t1 = tcg_temp_new_i64();
2485 /* NaN-box f[rs1] */
2486 do_nanbox(s, t1, cpu_fpr[a->rs1]);
2488 dest = tcg_temp_new_ptr();
2489 desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
2490 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, a->rd));
2492 fns[s->sew - 1](dest, t1, cpu_env, desc);
2494 tcg_temp_free_ptr(dest);
2496 gen_set_label(over);
2498 tcg_temp_free_i64(t1);
2504 /* Single-Width Floating-Point/Integer Type-Convert Instructions */
2505 #define GEN_OPFV_CVT_TRANS(NAME, HELPER, FRM) \
2506 static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
2508 static gen_helper_gvec_3_ptr * const fns[3] = { \
2509 gen_helper_##HELPER##_h, \
2510 gen_helper_##HELPER##_w, \
2511 gen_helper_##HELPER##_d \
2513 return do_opfv(s, a, fns[s->sew - 1], opfv_check, FRM); \
2516 GEN_OPFV_CVT_TRANS(vfcvt_xu_f_v, vfcvt_xu_f_v, RISCV_FRM_DYN)
2517 GEN_OPFV_CVT_TRANS(vfcvt_x_f_v, vfcvt_x_f_v, RISCV_FRM_DYN)
2518 GEN_OPFV_CVT_TRANS(vfcvt_f_xu_v, vfcvt_f_xu_v, RISCV_FRM_DYN)
2519 GEN_OPFV_CVT_TRANS(vfcvt_f_x_v, vfcvt_f_x_v, RISCV_FRM_DYN)
2520 /* Reuse the helper functions from vfcvt.xu.f.v and vfcvt.x.f.v */
2521 GEN_OPFV_CVT_TRANS(vfcvt_rtz_xu_f_v, vfcvt_xu_f_v, RISCV_FRM_RTZ)
2522 GEN_OPFV_CVT_TRANS(vfcvt_rtz_x_f_v, vfcvt_x_f_v, RISCV_FRM_RTZ)
2524 /* Widening Floating-Point/Integer Type-Convert Instructions */
2527 * If the current SEW does not correspond to a supported IEEE floating-point
2528 * type, an illegal instruction exception is raised
2530 static bool opfv_widen_check(DisasContext *s, arg_rmr *a)
2532 return require_rvv(s) &&
2533 require_scale_rvf(s) &&
2535 vext_check_isa_ill(s) &&
2536 vext_check_ds(s, a->rd, a->rs2, a->vm);
2539 #define GEN_OPFV_WIDEN_TRANS(NAME, HELPER, FRM) \
2540 static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
2542 if (opfv_widen_check(s, a)) { \
2543 uint32_t data = 0; \
2544 static gen_helper_gvec_3_ptr * const fns[2] = { \
2545 gen_helper_##HELPER##_h, \
2546 gen_helper_##HELPER##_w, \
2548 TCGLabel *over = gen_new_label(); \
2549 gen_set_rm(s, FRM); \
2550 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
2552 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2553 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2554 tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
2555 vreg_ofs(s, a->rs2), cpu_env, \
2556 s->vlen / 8, s->vlen / 8, data, \
2559 gen_set_label(over); \
2565 GEN_OPFV_WIDEN_TRANS(vfwcvt_xu_f_v, vfwcvt_xu_f_v, RISCV_FRM_DYN)
2566 GEN_OPFV_WIDEN_TRANS(vfwcvt_x_f_v, vfwcvt_x_f_v, RISCV_FRM_DYN)
2567 GEN_OPFV_WIDEN_TRANS(vfwcvt_f_f_v, vfwcvt_f_f_v, RISCV_FRM_DYN)
2568 /* Reuse the helper functions from vfwcvt.xu.f.v and vfwcvt.x.f.v */
2569 GEN_OPFV_WIDEN_TRANS(vfwcvt_rtz_xu_f_v, vfwcvt_xu_f_v, RISCV_FRM_RTZ)
2570 GEN_OPFV_WIDEN_TRANS(vfwcvt_rtz_x_f_v, vfwcvt_x_f_v, RISCV_FRM_RTZ)
2572 static bool opfxv_widen_check(DisasContext *s, arg_rmr *a)
2574 return require_rvv(s) &&
2575 require_scale_rvf(s) &&
2576 vext_check_isa_ill(s) &&
2577 /* OPFV widening instructions ignore vs1 check */
2578 vext_check_ds(s, a->rd, a->rs2, a->vm);
2581 #define GEN_OPFXV_WIDEN_TRANS(NAME) \
2582 static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
2584 if (opfxv_widen_check(s, a)) { \
2585 uint32_t data = 0; \
2586 static gen_helper_gvec_3_ptr * const fns[3] = { \
2587 gen_helper_##NAME##_b, \
2588 gen_helper_##NAME##_h, \
2589 gen_helper_##NAME##_w, \
2591 TCGLabel *over = gen_new_label(); \
2592 gen_set_rm(s, RISCV_FRM_DYN); \
2593 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
2595 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2596 tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
2597 vreg_ofs(s, a->rs2), cpu_env, \
2598 s->vlen / 8, s->vlen / 8, data, \
2601 gen_set_label(over); \
2607 GEN_OPFXV_WIDEN_TRANS(vfwcvt_f_xu_v)
2608 GEN_OPFXV_WIDEN_TRANS(vfwcvt_f_x_v)
2610 /* Narrowing Floating-Point/Integer Type-Convert Instructions */
2613 * If the current SEW does not correspond to a supported IEEE floating-point
2614 * type, an illegal instruction exception is raised
2616 static bool opfv_narrow_check(DisasContext *s, arg_rmr *a)
2618 return require_rvv(s) &&
2620 (s->sew != MO_64) &&
2621 vext_check_isa_ill(s) &&
2622 /* OPFV narrowing instructions ignore vs1 check */
2623 vext_check_sd(s, a->rd, a->rs2, a->vm);
2626 #define GEN_OPFV_NARROW_TRANS(NAME, HELPER, FRM) \
2627 static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
2629 if (opfv_narrow_check(s, a)) { \
2630 uint32_t data = 0; \
2631 static gen_helper_gvec_3_ptr * const fns[2] = { \
2632 gen_helper_##HELPER##_h, \
2633 gen_helper_##HELPER##_w, \
2635 TCGLabel *over = gen_new_label(); \
2636 gen_set_rm(s, FRM); \
2637 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
2639 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2640 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2641 tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
2642 vreg_ofs(s, a->rs2), cpu_env, \
2643 s->vlen / 8, s->vlen / 8, data, \
2646 gen_set_label(over); \
2652 GEN_OPFV_NARROW_TRANS(vfncvt_f_xu_w, vfncvt_f_xu_w, RISCV_FRM_DYN)
2653 GEN_OPFV_NARROW_TRANS(vfncvt_f_x_w, vfncvt_f_x_w, RISCV_FRM_DYN)
2654 GEN_OPFV_NARROW_TRANS(vfncvt_f_f_w, vfncvt_f_f_w, RISCV_FRM_DYN)
2655 /* Reuse the helper function from vfncvt.f.f.w */
2656 GEN_OPFV_NARROW_TRANS(vfncvt_rod_f_f_w, vfncvt_f_f_w, RISCV_FRM_ROD)
2658 static bool opxfv_narrow_check(DisasContext *s, arg_rmr *a)
2660 return require_rvv(s) &&
2661 require_scale_rvf(s) &&
2662 vext_check_isa_ill(s) &&
2663 /* OPFV narrowing instructions ignore vs1 check */
2664 vext_check_sd(s, a->rd, a->rs2, a->vm);
2667 #define GEN_OPXFV_NARROW_TRANS(NAME, HELPER, FRM) \
2668 static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
2670 if (opxfv_narrow_check(s, a)) { \
2671 uint32_t data = 0; \
2672 static gen_helper_gvec_3_ptr * const fns[3] = { \
2673 gen_helper_##HELPER##_b, \
2674 gen_helper_##HELPER##_h, \
2675 gen_helper_##HELPER##_w, \
2677 TCGLabel *over = gen_new_label(); \
2678 gen_set_rm(s, FRM); \
2679 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
2681 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2682 tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
2683 vreg_ofs(s, a->rs2), cpu_env, \
2684 s->vlen / 8, s->vlen / 8, data, \
2687 gen_set_label(over); \
2693 GEN_OPXFV_NARROW_TRANS(vfncvt_xu_f_w, vfncvt_xu_f_w, RISCV_FRM_DYN)
2694 GEN_OPXFV_NARROW_TRANS(vfncvt_x_f_w, vfncvt_x_f_w, RISCV_FRM_DYN)
2695 /* Reuse the helper functions from vfncvt.xu.f.w and vfncvt.x.f.w */
2696 GEN_OPXFV_NARROW_TRANS(vfncvt_rtz_xu_f_w, vfncvt_xu_f_w, RISCV_FRM_RTZ)
2697 GEN_OPXFV_NARROW_TRANS(vfncvt_rtz_x_f_w, vfncvt_x_f_w, RISCV_FRM_RTZ)
2700 *** Vector Reduction Operations
2702 /* Vector Single-Width Integer Reduction Instructions */
2703 static bool reduction_check(DisasContext *s, arg_rmrr *a)
2705 return require_rvv(s) &&
2706 vext_check_isa_ill(s) &&
2707 vext_check_reduction(s, a->rs2);
2710 GEN_OPIVV_TRANS(vredsum_vs, reduction_check)
2711 GEN_OPIVV_TRANS(vredmaxu_vs, reduction_check)
2712 GEN_OPIVV_TRANS(vredmax_vs, reduction_check)
2713 GEN_OPIVV_TRANS(vredminu_vs, reduction_check)
2714 GEN_OPIVV_TRANS(vredmin_vs, reduction_check)
2715 GEN_OPIVV_TRANS(vredand_vs, reduction_check)
2716 GEN_OPIVV_TRANS(vredor_vs, reduction_check)
2717 GEN_OPIVV_TRANS(vredxor_vs, reduction_check)
2719 /* Vector Widening Integer Reduction Instructions */
2720 static bool reduction_widen_check(DisasContext *s, arg_rmrr *a)
2722 return reduction_check(s, a) && (s->sew < MO_64);
2725 GEN_OPIVV_WIDEN_TRANS(vwredsum_vs, reduction_widen_check)
2726 GEN_OPIVV_WIDEN_TRANS(vwredsumu_vs, reduction_widen_check)
2728 /* Vector Single-Width Floating-Point Reduction Instructions */
2729 static bool freduction_check(DisasContext *s, arg_rmrr *a)
2731 return reduction_check(s, a) &&
2735 GEN_OPFVV_TRANS(vfredsum_vs, freduction_check)
2736 GEN_OPFVV_TRANS(vfredmax_vs, freduction_check)
2737 GEN_OPFVV_TRANS(vfredmin_vs, freduction_check)
2739 /* Vector Widening Floating-Point Reduction Instructions */
2740 static bool freduction_widen_check(DisasContext *s, arg_rmrr *a)
2742 return reduction_widen_check(s, a) &&
2743 require_scale_rvf(s) &&
2747 GEN_OPFVV_WIDEN_TRANS(vfwredsum_vs, freduction_widen_check)
2750 *** Vector Mask Operations
2753 /* Vector Mask-Register Logical Instructions */
2754 #define GEN_MM_TRANS(NAME) \
2755 static bool trans_##NAME(DisasContext *s, arg_r *a) \
2757 if (require_rvv(s) && \
2758 vext_check_isa_ill(s)) { \
2759 uint32_t data = 0; \
2760 gen_helper_gvec_4_ptr *fn = gen_helper_##NAME; \
2761 TCGLabel *over = gen_new_label(); \
2762 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
2764 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2765 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
2766 vreg_ofs(s, a->rs1), \
2767 vreg_ofs(s, a->rs2), cpu_env, \
2768 s->vlen / 8, s->vlen / 8, data, fn); \
2770 gen_set_label(over); \
2776 GEN_MM_TRANS(vmand_mm)
2777 GEN_MM_TRANS(vmnand_mm)
2778 GEN_MM_TRANS(vmandnot_mm)
2779 GEN_MM_TRANS(vmxor_mm)
2780 GEN_MM_TRANS(vmor_mm)
2781 GEN_MM_TRANS(vmnor_mm)
2782 GEN_MM_TRANS(vmornot_mm)
2783 GEN_MM_TRANS(vmxnor_mm)
2785 /* Vector count population in mask vcpop */
2786 static bool trans_vcpop_m(DisasContext *s, arg_rmr *a)
2788 if (require_rvv(s) &&
2789 vext_check_isa_ill(s)) {
2790 TCGv_ptr src2, mask;
2794 data = FIELD_DP32(data, VDATA, VM, a->vm);
2795 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
2797 mask = tcg_temp_new_ptr();
2798 src2 = tcg_temp_new_ptr();
2799 dst = dest_gpr(s, a->rd);
2800 desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
2802 tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, a->rs2));
2803 tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
2805 gen_helper_vcpop_m(dst, mask, src2, cpu_env, desc);
2806 gen_set_gpr(s, a->rd, dst);
2808 tcg_temp_free_ptr(mask);
2809 tcg_temp_free_ptr(src2);
2816 /* vmfirst find-first-set mask bit */
2817 static bool trans_vfirst_m(DisasContext *s, arg_rmr *a)
2819 if (require_rvv(s) &&
2820 vext_check_isa_ill(s)) {
2821 TCGv_ptr src2, mask;
2825 data = FIELD_DP32(data, VDATA, VM, a->vm);
2826 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
2828 mask = tcg_temp_new_ptr();
2829 src2 = tcg_temp_new_ptr();
2830 dst = dest_gpr(s, a->rd);
2831 desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
2833 tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, a->rs2));
2834 tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
2836 gen_helper_vfirst_m(dst, mask, src2, cpu_env, desc);
2837 gen_set_gpr(s, a->rd, dst);
2839 tcg_temp_free_ptr(mask);
2840 tcg_temp_free_ptr(src2);
2846 /* vmsbf.m set-before-first mask bit */
2847 /* vmsif.m set-includ-first mask bit */
2848 /* vmsof.m set-only-first mask bit */
2849 #define GEN_M_TRANS(NAME) \
2850 static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
2852 if (require_rvv(s) && \
2853 vext_check_isa_ill(s) && \
2854 require_vm(a->vm, a->rd) && \
2855 (a->rd != a->rs2)) { \
2856 uint32_t data = 0; \
2857 gen_helper_gvec_3_ptr *fn = gen_helper_##NAME; \
2858 TCGLabel *over = gen_new_label(); \
2859 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
2861 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2862 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2863 tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), \
2864 vreg_ofs(s, 0), vreg_ofs(s, a->rs2), \
2865 cpu_env, s->vlen / 8, s->vlen / 8, \
2868 gen_set_label(over); \
2874 GEN_M_TRANS(vmsbf_m)
2875 GEN_M_TRANS(vmsif_m)
2876 GEN_M_TRANS(vmsof_m)
2879 * Vector Iota Instruction
2881 * 1. The destination register cannot overlap the source register.
2882 * 2. If masked, cannot overlap the mask register ('v0').
2883 * 3. An illegal instruction exception is raised if vstart is non-zero.
2885 static bool trans_viota_m(DisasContext *s, arg_viota_m *a)
2887 if (require_rvv(s) &&
2888 vext_check_isa_ill(s) &&
2889 !is_overlapped(a->rd, 1 << MAX(s->lmul, 0), a->rs2, 1) &&
2890 require_vm(a->vm, a->rd) &&
2891 require_align(a->rd, s->lmul)) {
2893 TCGLabel *over = gen_new_label();
2894 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
2896 data = FIELD_DP32(data, VDATA, VM, a->vm);
2897 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
2898 static gen_helper_gvec_3_ptr * const fns[4] = {
2899 gen_helper_viota_m_b, gen_helper_viota_m_h,
2900 gen_helper_viota_m_w, gen_helper_viota_m_d,
2902 tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
2903 vreg_ofs(s, a->rs2), cpu_env,
2904 s->vlen / 8, s->vlen / 8, data, fns[s->sew]);
2906 gen_set_label(over);
2912 /* Vector Element Index Instruction */
2913 static bool trans_vid_v(DisasContext *s, arg_vid_v *a)
2915 if (require_rvv(s) &&
2916 vext_check_isa_ill(s) &&
2917 require_align(a->rd, s->lmul) &&
2918 require_vm(a->vm, a->rd)) {
2920 TCGLabel *over = gen_new_label();
2921 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
2923 data = FIELD_DP32(data, VDATA, VM, a->vm);
2924 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
2925 static gen_helper_gvec_2_ptr * const fns[4] = {
2926 gen_helper_vid_v_b, gen_helper_vid_v_h,
2927 gen_helper_vid_v_w, gen_helper_vid_v_d,
2929 tcg_gen_gvec_2_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
2930 cpu_env, s->vlen / 8, s->vlen / 8,
2933 gen_set_label(over);
2940 *** Vector Permutation Instructions
2943 static void load_element(TCGv_i64 dest, TCGv_ptr base,
2944 int ofs, int sew, bool sign)
2949 tcg_gen_ld8u_i64(dest, base, ofs);
2951 tcg_gen_ld8s_i64(dest, base, ofs);
2956 tcg_gen_ld16u_i64(dest, base, ofs);
2958 tcg_gen_ld16s_i64(dest, base, ofs);
2963 tcg_gen_ld32u_i64(dest, base, ofs);
2965 tcg_gen_ld32s_i64(dest, base, ofs);
2969 tcg_gen_ld_i64(dest, base, ofs);
2972 g_assert_not_reached();
2977 /* offset of the idx element with base regsiter r */
2978 static uint32_t endian_ofs(DisasContext *s, int r, int idx)
2980 #ifdef HOST_WORDS_BIGENDIAN
2981 return vreg_ofs(s, r) + ((idx ^ (7 >> s->sew)) << s->sew);
2983 return vreg_ofs(s, r) + (idx << s->sew);
2987 /* adjust the index according to the endian */
2988 static void endian_adjust(TCGv_i32 ofs, int sew)
2990 #ifdef HOST_WORDS_BIGENDIAN
2991 tcg_gen_xori_i32(ofs, ofs, 7 >> sew);
2995 /* Load idx >= VLMAX ? 0 : vreg[idx] */
2996 static void vec_element_loadx(DisasContext *s, TCGv_i64 dest,
2997 int vreg, TCGv idx, int vlmax)
2999 TCGv_i32 ofs = tcg_temp_new_i32();
3000 TCGv_ptr base = tcg_temp_new_ptr();
3001 TCGv_i64 t_idx = tcg_temp_new_i64();
3002 TCGv_i64 t_vlmax, t_zero;
3005 * Mask the index to the length so that we do
3006 * not produce an out-of-range load.
3008 tcg_gen_trunc_tl_i32(ofs, idx);
3009 tcg_gen_andi_i32(ofs, ofs, vlmax - 1);
3011 /* Convert the index to an offset. */
3012 endian_adjust(ofs, s->sew);
3013 tcg_gen_shli_i32(ofs, ofs, s->sew);
3015 /* Convert the index to a pointer. */
3016 tcg_gen_ext_i32_ptr(base, ofs);
3017 tcg_gen_add_ptr(base, base, cpu_env);
3019 /* Perform the load. */
3020 load_element(dest, base,
3021 vreg_ofs(s, vreg), s->sew, false);
3022 tcg_temp_free_ptr(base);
3023 tcg_temp_free_i32(ofs);
3025 /* Flush out-of-range indexing to zero. */
3026 t_vlmax = tcg_constant_i64(vlmax);
3027 t_zero = tcg_constant_i64(0);
3028 tcg_gen_extu_tl_i64(t_idx, idx);
3030 tcg_gen_movcond_i64(TCG_COND_LTU, dest, t_idx,
3031 t_vlmax, dest, t_zero);
3033 tcg_temp_free_i64(t_idx);
3036 static void vec_element_loadi(DisasContext *s, TCGv_i64 dest,
3037 int vreg, int idx, bool sign)
3039 load_element(dest, cpu_env, endian_ofs(s, vreg, idx), s->sew, sign);
3042 /* Integer Scalar Move Instruction */
3044 static void store_element(TCGv_i64 val, TCGv_ptr base,
3049 tcg_gen_st8_i64(val, base, ofs);
3052 tcg_gen_st16_i64(val, base, ofs);
3055 tcg_gen_st32_i64(val, base, ofs);
3058 tcg_gen_st_i64(val, base, ofs);
3061 g_assert_not_reached();
3067 * Store vreg[idx] = val.
3068 * The index must be in range of VLMAX.
3070 static void vec_element_storei(DisasContext *s, int vreg,
3071 int idx, TCGv_i64 val)
3073 store_element(val, cpu_env, endian_ofs(s, vreg, idx), s->sew);
3076 /* vmv.x.s rd, vs2 # x[rd] = vs2[0] */
3077 static bool trans_vmv_x_s(DisasContext *s, arg_vmv_x_s *a)
3079 if (require_rvv(s) &&
3080 vext_check_isa_ill(s)) {
3084 t1 = tcg_temp_new_i64();
3085 dest = tcg_temp_new();
3087 * load vreg and sign-extend to 64 bits,
3088 * then truncate to XLEN bits before storing to gpr.
3090 vec_element_loadi(s, t1, a->rs2, 0, true);
3091 tcg_gen_trunc_i64_tl(dest, t1);
3092 gen_set_gpr(s, a->rd, dest);
3093 tcg_temp_free_i64(t1);
3094 tcg_temp_free(dest);
3101 /* vmv.s.x vd, rs1 # vd[0] = rs1 */
3102 static bool trans_vmv_s_x(DisasContext *s, arg_vmv_s_x *a)
3104 if (require_rvv(s) &&
3105 vext_check_isa_ill(s)) {
3106 /* This instruction ignores LMUL and vector register groups */
3109 TCGLabel *over = gen_new_label();
3111 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
3113 t1 = tcg_temp_new_i64();
3116 * load gpr and sign-extend to 64 bits,
3117 * then truncate to SEW bits when storing to vreg.
3119 s1 = get_gpr(s, a->rs1, EXT_NONE);
3120 tcg_gen_ext_tl_i64(t1, s1);
3121 vec_element_storei(s, a->rd, 0, t1);
3122 tcg_temp_free_i64(t1);
3124 gen_set_label(over);
3130 /* Floating-Point Scalar Move Instructions */
3131 static bool trans_vfmv_f_s(DisasContext *s, arg_vfmv_f_s *a)
3133 if (require_rvv(s) &&
3135 vext_check_isa_ill(s)) {
3136 unsigned int ofs = (8 << s->sew);
3137 unsigned int len = 64 - ofs;
3140 vec_element_loadi(s, cpu_fpr[a->rd], a->rs2, 0, false);
3141 /* NaN-box f[rd] as necessary for SEW */
3143 t_nan = tcg_constant_i64(UINT64_MAX);
3144 tcg_gen_deposit_i64(cpu_fpr[a->rd], cpu_fpr[a->rd],
3154 /* vfmv.s.f vd, rs1 # vd[0] = rs1 (vs2=0) */
3155 static bool trans_vfmv_s_f(DisasContext *s, arg_vfmv_s_f *a)
3157 if (require_rvv(s) &&
3159 vext_check_isa_ill(s)) {
3160 /* The instructions ignore LMUL and vector register group. */
3162 TCGLabel *over = gen_new_label();
3164 /* if vl == 0, skip vector register write back */
3165 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
3167 /* NaN-box f[rs1] */
3168 t1 = tcg_temp_new_i64();
3169 do_nanbox(s, t1, cpu_fpr[a->rs1]);
3171 vec_element_storei(s, a->rd, 0, t1);
3172 tcg_temp_free_i64(t1);
3174 gen_set_label(over);
3180 /* Vector Slide Instructions */
3181 static bool slideup_check(DisasContext *s, arg_rmrr *a)
3183 return require_rvv(s) &&
3184 vext_check_isa_ill(s) &&
3185 vext_check_slide(s, a->rd, a->rs2, a->vm, true);
3188 GEN_OPIVX_TRANS(vslideup_vx, slideup_check)
3189 GEN_OPIVX_TRANS(vslide1up_vx, slideup_check)
3190 GEN_OPIVI_TRANS(vslideup_vi, IMM_ZX, vslideup_vx, slideup_check)
3192 static bool slidedown_check(DisasContext *s, arg_rmrr *a)
3194 return require_rvv(s) &&
3195 vext_check_isa_ill(s) &&
3196 vext_check_slide(s, a->rd, a->rs2, a->vm, false);
3199 GEN_OPIVX_TRANS(vslidedown_vx, slidedown_check)
3200 GEN_OPIVX_TRANS(vslide1down_vx, slidedown_check)
3201 GEN_OPIVI_TRANS(vslidedown_vi, IMM_ZX, vslidedown_vx, slidedown_check)
3203 /* Vector Floating-Point Slide Instructions */
3204 static bool fslideup_check(DisasContext *s, arg_rmrr *a)
3206 return slideup_check(s, a) &&
3210 static bool fslidedown_check(DisasContext *s, arg_rmrr *a)
3212 return slidedown_check(s, a) &&
3216 GEN_OPFVF_TRANS(vfslide1up_vf, fslideup_check)
3217 GEN_OPFVF_TRANS(vfslide1down_vf, fslidedown_check)
3219 /* Vector Register Gather Instruction */
3220 static bool vrgather_vv_check(DisasContext *s, arg_rmrr *a)
3222 return require_rvv(s) &&
3223 vext_check_isa_ill(s) &&
3224 require_align(a->rd, s->lmul) &&
3225 require_align(a->rs1, s->lmul) &&
3226 require_align(a->rs2, s->lmul) &&
3227 (a->rd != a->rs2 && a->rd != a->rs1) &&
3228 require_vm(a->vm, a->rd);
3231 static bool vrgatherei16_vv_check(DisasContext *s, arg_rmrr *a)
3233 int8_t emul = MO_16 - s->sew + s->lmul;
3234 return require_rvv(s) &&
3235 vext_check_isa_ill(s) &&
3236 (emul >= -3 && emul <= 3) &&
3237 require_align(a->rd, s->lmul) &&
3238 require_align(a->rs1, emul) &&
3239 require_align(a->rs2, s->lmul) &&
3240 (a->rd != a->rs2 && a->rd != a->rs1) &&
3241 !is_overlapped(a->rd, 1 << MAX(s->lmul, 0),
3242 a->rs1, 1 << MAX(emul, 0)) &&
3243 !is_overlapped(a->rd, 1 << MAX(s->lmul, 0),
3244 a->rs2, 1 << MAX(s->lmul, 0)) &&
3245 require_vm(a->vm, a->rd);
3248 GEN_OPIVV_TRANS(vrgather_vv, vrgather_vv_check)
3249 GEN_OPIVV_TRANS(vrgatherei16_vv, vrgatherei16_vv_check)
3251 static bool vrgather_vx_check(DisasContext *s, arg_rmrr *a)
3253 return require_rvv(s) &&
3254 vext_check_isa_ill(s) &&
3255 require_align(a->rd, s->lmul) &&
3256 require_align(a->rs2, s->lmul) &&
3257 (a->rd != a->rs2) &&
3258 require_vm(a->vm, a->rd);
3261 /* vrgather.vx vd, vs2, rs1, vm # vd[i] = (x[rs1] >= VLMAX) ? 0 : vs2[rs1] */
3262 static bool trans_vrgather_vx(DisasContext *s, arg_rmrr *a)
3264 if (!vrgather_vx_check(s, a)) {
3268 if (a->vm && s->vl_eq_vlmax) {
3269 int scale = s->lmul - (s->sew + 3);
3270 int vlmax = scale < 0 ? s->vlen >> -scale : s->vlen << scale;
3271 TCGv_i64 dest = tcg_temp_new_i64();
3274 vec_element_loadi(s, dest, a->rs2, 0, false);
3276 vec_element_loadx(s, dest, a->rs2, cpu_gpr[a->rs1], vlmax);
3279 tcg_gen_gvec_dup_i64(s->sew, vreg_ofs(s, a->rd),
3280 MAXSZ(s), MAXSZ(s), dest);
3281 tcg_temp_free_i64(dest);
3284 static gen_helper_opivx * const fns[4] = {
3285 gen_helper_vrgather_vx_b, gen_helper_vrgather_vx_h,
3286 gen_helper_vrgather_vx_w, gen_helper_vrgather_vx_d
3288 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s);
3293 /* vrgather.vi vd, vs2, imm, vm # vd[i] = (imm >= VLMAX) ? 0 : vs2[imm] */
3294 static bool trans_vrgather_vi(DisasContext *s, arg_rmrr *a)
3296 if (!vrgather_vx_check(s, a)) {
3300 if (a->vm && s->vl_eq_vlmax) {
3301 int scale = s->lmul - (s->sew + 3);
3302 int vlmax = scale < 0 ? s->vlen >> -scale : s->vlen << scale;
3303 if (a->rs1 >= vlmax) {
3304 tcg_gen_gvec_dup_imm(MO_64, vreg_ofs(s, a->rd),
3305 MAXSZ(s), MAXSZ(s), 0);
3307 tcg_gen_gvec_dup_mem(s->sew, vreg_ofs(s, a->rd),
3308 endian_ofs(s, a->rs2, a->rs1),
3309 MAXSZ(s), MAXSZ(s));
3313 static gen_helper_opivx * const fns[4] = {
3314 gen_helper_vrgather_vx_b, gen_helper_vrgather_vx_h,
3315 gen_helper_vrgather_vx_w, gen_helper_vrgather_vx_d
3317 return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew],
3324 * Vector Compress Instruction
3326 * The destination vector register group cannot overlap the
3327 * source vector register group or the source mask register.
3329 static bool vcompress_vm_check(DisasContext *s, arg_r *a)
3331 return require_rvv(s) &&
3332 vext_check_isa_ill(s) &&
3333 require_align(a->rd, s->lmul) &&
3334 require_align(a->rs2, s->lmul) &&
3335 (a->rd != a->rs2) &&
3336 !is_overlapped(a->rd, 1 << MAX(s->lmul, 0), a->rs1, 1);
3339 static bool trans_vcompress_vm(DisasContext *s, arg_r *a)
3341 if (vcompress_vm_check(s, a)) {
3343 static gen_helper_gvec_4_ptr * const fns[4] = {
3344 gen_helper_vcompress_vm_b, gen_helper_vcompress_vm_h,
3345 gen_helper_vcompress_vm_w, gen_helper_vcompress_vm_d,
3347 TCGLabel *over = gen_new_label();
3348 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
3350 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
3351 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
3352 vreg_ofs(s, a->rs1), vreg_ofs(s, a->rs2),
3353 cpu_env, s->vlen / 8, s->vlen / 8, data,
3356 gen_set_label(over);
3363 * Whole Vector Register Move Instructions ignore vtype and vl setting.
3364 * Thus, we don't need to check vill bit. (Section 16.6)
3366 #define GEN_VMV_WHOLE_TRANS(NAME, LEN) \
3367 static bool trans_##NAME(DisasContext *s, arg_##NAME * a) \
3369 if (require_rvv(s) && \
3370 QEMU_IS_ALIGNED(a->rd, LEN) && \
3371 QEMU_IS_ALIGNED(a->rs2, LEN)) { \
3373 tcg_gen_gvec_mov(MO_8, vreg_ofs(s, a->rd), \
3374 vreg_ofs(s, a->rs2), \
3375 s->vlen / 8 * LEN, s->vlen / 8 * LEN); \
3382 GEN_VMV_WHOLE_TRANS(vmv1r_v, 1)
3383 GEN_VMV_WHOLE_TRANS(vmv2r_v, 2)
3384 GEN_VMV_WHOLE_TRANS(vmv4r_v, 4)
3385 GEN_VMV_WHOLE_TRANS(vmv8r_v, 8)
3387 static bool int_ext_check(DisasContext *s, arg_rmr *a, uint8_t div)
3389 uint8_t from = (s->sew + 3) - div;
3390 bool ret = require_rvv(s) &&
3391 (from >= 3 && from <= 8) &&
3392 (a->rd != a->rs2) &&
3393 require_align(a->rd, s->lmul) &&
3394 require_align(a->rs2, s->lmul - div) &&
3395 require_vm(a->vm, a->rd) &&
3396 require_noover(a->rd, s->lmul, a->rs2, s->lmul - div);
3400 static bool int_ext_op(DisasContext *s, arg_rmr *a, uint8_t seq)
3403 gen_helper_gvec_3_ptr *fn;
3404 TCGLabel *over = gen_new_label();
3405 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
3407 static gen_helper_gvec_3_ptr * const fns[6][4] = {
3409 NULL, gen_helper_vzext_vf2_h,
3410 gen_helper_vzext_vf2_w, gen_helper_vzext_vf2_d
3414 gen_helper_vzext_vf4_w, gen_helper_vzext_vf4_d,
3418 NULL, gen_helper_vzext_vf8_d
3421 NULL, gen_helper_vsext_vf2_h,
3422 gen_helper_vsext_vf2_w, gen_helper_vsext_vf2_d
3426 gen_helper_vsext_vf4_w, gen_helper_vsext_vf4_d,
3430 NULL, gen_helper_vsext_vf8_d
3434 fn = fns[seq][s->sew];
3439 data = FIELD_DP32(data, VDATA, VM, a->vm);
3441 tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
3442 vreg_ofs(s, a->rs2), cpu_env,
3443 s->vlen / 8, s->vlen / 8, data, fn);
3446 gen_set_label(over);
3450 /* Vector Integer Extension */
3451 #define GEN_INT_EXT_TRANS(NAME, DIV, SEQ) \
3452 static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
3454 if (int_ext_check(s, a, DIV)) { \
3455 return int_ext_op(s, a, SEQ); \
3460 GEN_INT_EXT_TRANS(vzext_vf2, 1, 0)
3461 GEN_INT_EXT_TRANS(vzext_vf4, 2, 1)
3462 GEN_INT_EXT_TRANS(vzext_vf8, 3, 2)
3463 GEN_INT_EXT_TRANS(vsext_vf2, 1, 3)
3464 GEN_INT_EXT_TRANS(vsext_vf4, 2, 4)
3465 GEN_INT_EXT_TRANS(vsext_vf8, 3, 5)