2 * RISC-V translation routines for the RVV Standard Extension.
4 * Copyright (c) 2020 T-Head Semiconductor Co., Ltd. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2 or later, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
18 #include "tcg/tcg-op-gvec.h"
19 #include "tcg/tcg-gvec-desc.h"
20 #include "internals.h"
22 static inline bool is_overlapped(const int8_t astart, int8_t asize,
23 const int8_t bstart, int8_t bsize)
25 const int8_t aend = astart + asize;
26 const int8_t bend = bstart + bsize;
28 return MAX(aend, bend) - MIN(astart, bstart) < asize + bsize;
31 static bool require_rvv(DisasContext *s)
33 return s->mstatus_vs != 0;
36 static bool require_rvf(DisasContext *s)
38 if (s->mstatus_fs == 0) {
45 return has_ext(s, RVF);
47 return has_ext(s, RVD);
53 static bool require_scale_rvf(DisasContext *s)
55 if (s->mstatus_fs == 0) {
62 return has_ext(s, RVF);
64 return has_ext(s, RVD);
70 /* Destination vector register group cannot overlap source mask register. */
71 static bool require_vm(int vm, int vd)
73 return (vm != 0 || vd != 0);
76 static bool require_nf(int vd, int nf, int lmul)
78 int size = nf << MAX(lmul, 0);
79 return size <= 8 && vd + size <= 32;
83 * Vector register should aligned with the passed-in LMUL (EMUL).
84 * If LMUL < 0, i.e. fractional LMUL, any vector register is allowed.
86 static bool require_align(const int8_t val, const int8_t lmul)
88 return lmul <= 0 || extract32(val, 0, lmul) == 0;
92 * A destination vector register group can overlap a source vector
93 * register group only if one of the following holds:
94 * 1. The destination EEW equals the source EEW.
95 * 2. The destination EEW is smaller than the source EEW and the overlap
96 * is in the lowest-numbered part of the source register group.
97 * 3. The destination EEW is greater than the source EEW, the source EMUL
98 * is at least 1, and the overlap is in the highest-numbered part of
99 * the destination register group.
102 * This function returns true if one of the following holds:
103 * * Destination vector register group does not overlap a source vector
106 * For rule 1, overlap is allowed so this function doesn't need to be called.
107 * For rule 2, (vd == vs). Caller has to check whether: (vd != vs) before
108 * calling this function.
110 static bool require_noover(const int8_t dst, const int8_t dst_lmul,
111 const int8_t src, const int8_t src_lmul)
113 int8_t dst_size = dst_lmul <= 0 ? 1 : 1 << dst_lmul;
114 int8_t src_size = src_lmul <= 0 ? 1 : 1 << src_lmul;
116 /* Destination EEW is greater than the source EEW, check rule 3. */
117 if (dst_size > src_size) {
120 is_overlapped(dst, dst_size, src, src_size) &&
121 !is_overlapped(dst, dst_size, src + src_size, src_size)) {
126 return !is_overlapped(dst, dst_size, src, src_size);
129 static bool do_vsetvl(DisasContext *s, int rd, int rs1, TCGv s2)
133 if (!require_rvv(s) || !has_ext(s, RVV)) {
137 dst = dest_gpr(s, rd);
139 if (rd == 0 && rs1 == 0) {
141 tcg_gen_mov_tl(s1, cpu_vl);
142 } else if (rs1 == 0) {
143 /* As the mask is at least one bit, RV_VLEN_MAX is >= VLMAX */
144 s1 = tcg_constant_tl(RV_VLEN_MAX);
146 s1 = get_gpr(s, rs1, EXT_ZERO);
149 gen_helper_vsetvl(dst, cpu_env, s1, s2);
150 gen_set_gpr(s, rd, dst);
153 tcg_gen_movi_tl(cpu_pc, s->pc_succ_insn);
154 tcg_gen_lookup_and_goto_ptr();
155 s->base.is_jmp = DISAS_NORETURN;
157 if (rd == 0 && rs1 == 0) {
164 static bool trans_vsetvl(DisasContext *s, arg_vsetvl *a)
166 TCGv s2 = get_gpr(s, a->rs2, EXT_ZERO);
167 return do_vsetvl(s, a->rd, a->rs1, s2);
170 static bool trans_vsetvli(DisasContext *s, arg_vsetvli *a)
172 TCGv s2 = tcg_constant_tl(a->zimm);
173 return do_vsetvl(s, a->rd, a->rs1, s2);
176 /* vector register offset from env */
177 static uint32_t vreg_ofs(DisasContext *s, int reg)
179 return offsetof(CPURISCVState, vreg) + reg * s->vlen / 8;
182 /* check functions */
185 * Vector unit-stride, strided, unit-stride segment, strided segment
186 * store check function.
188 * Rules to be checked here:
189 * 1. EMUL must within the range: 1/8 <= EMUL <= 8. (Section 7.3)
190 * 2. Destination vector register number is multiples of EMUL.
191 * (Section 3.4.2, 7.3)
192 * 3. The EMUL setting must be such that EMUL * NFIELDS ≤ 8. (Section 7.8)
193 * 4. Vector register numbers accessed by the segment load or store
194 * cannot increment past 31. (Section 7.8)
196 static bool vext_check_store(DisasContext *s, int vd, int nf, uint8_t eew)
198 int8_t emul = eew - s->sew + s->lmul;
199 return (emul >= -3 && emul <= 3) &&
200 require_align(vd, emul) &&
201 require_nf(vd, nf, emul);
205 * Vector unit-stride, strided, unit-stride segment, strided segment
206 * load check function.
208 * Rules to be checked here:
209 * 1. All rules applies to store instructions are applies
210 * to load instructions.
211 * 2. Destination vector register group for a masked vector
212 * instruction cannot overlap the source mask register (v0).
215 static bool vext_check_load(DisasContext *s, int vd, int nf, int vm,
218 return vext_check_store(s, vd, nf, eew) && require_vm(vm, vd);
222 * Vector indexed, indexed segment store check function.
224 * Rules to be checked here:
225 * 1. EMUL must within the range: 1/8 <= EMUL <= 8. (Section 7.3)
226 * 2. Index vector register number is multiples of EMUL.
227 * (Section 3.4.2, 7.3)
228 * 3. Destination vector register number is multiples of LMUL.
229 * (Section 3.4.2, 7.3)
230 * 4. The EMUL setting must be such that EMUL * NFIELDS ≤ 8. (Section 7.8)
231 * 5. Vector register numbers accessed by the segment load or store
232 * cannot increment past 31. (Section 7.8)
234 static bool vext_check_st_index(DisasContext *s, int vd, int vs2, int nf,
237 int8_t emul = eew - s->sew + s->lmul;
238 return (emul >= -3 && emul <= 3) &&
239 require_align(vs2, emul) &&
240 require_align(vd, s->lmul) &&
241 require_nf(vd, nf, s->lmul);
245 * Vector indexed, indexed segment load check function.
247 * Rules to be checked here:
248 * 1. All rules applies to store instructions are applies
249 * to load instructions.
250 * 2. Destination vector register group for a masked vector
251 * instruction cannot overlap the source mask register (v0).
253 * 3. Destination vector register cannot overlap a source vector
254 * register (vs2) group.
256 * 4. Destination vector register groups cannot overlap
257 * the source vector register (vs2) group for
258 * indexed segment load instructions. (Section 7.8.3)
260 static bool vext_check_ld_index(DisasContext *s, int vd, int vs2,
261 int nf, int vm, uint8_t eew)
264 int8_t emul = eew - s->sew + s->lmul;
265 bool ret = vext_check_st_index(s, vd, vs2, nf, eew) &&
268 /* Each segment register group has to follow overlap rules. */
269 for (int i = 0; i < nf; ++i) {
270 seg_vd = vd + (1 << MAX(s->lmul, 0)) * i;
274 ret &= require_noover(seg_vd, s->lmul, vs2, emul);
276 } else if (eew < s->sew) {
277 ret &= require_noover(seg_vd, s->lmul, vs2, emul);
281 * Destination vector register groups cannot overlap
282 * the source vector register (vs2) group for
283 * indexed segment load instructions.
286 ret &= !is_overlapped(seg_vd, 1 << MAX(s->lmul, 0),
287 vs2, 1 << MAX(emul, 0));
293 static bool vext_check_ss(DisasContext *s, int vd, int vs, int vm)
295 return require_vm(vm, vd) &&
296 require_align(vd, s->lmul) &&
297 require_align(vs, s->lmul);
301 * Check function for vector instruction with format:
302 * single-width result and single-width sources (SEW = SEW op SEW)
304 * Rules to be checked here:
305 * 1. Destination vector register group for a masked vector
306 * instruction cannot overlap the source mask register (v0).
308 * 2. Destination vector register number is multiples of LMUL.
310 * 3. Source (vs2, vs1) vector register number are multiples of LMUL.
313 static bool vext_check_sss(DisasContext *s, int vd, int vs1, int vs2, int vm)
315 return vext_check_ss(s, vd, vs2, vm) &&
316 require_align(vs1, s->lmul);
319 static bool vext_check_ms(DisasContext *s, int vd, int vs)
321 bool ret = require_align(vs, s->lmul);
323 ret &= require_noover(vd, 0, vs, s->lmul);
329 * Check function for maskable vector instruction with format:
330 * single-width result and single-width sources (SEW = SEW op SEW)
332 * Rules to be checked here:
333 * 1. Source (vs2, vs1) vector register number are multiples of LMUL.
335 * 2. Destination vector register cannot overlap a source vector
336 * register (vs2, vs1) group.
338 * 3. The destination vector register group for a masked vector
339 * instruction cannot overlap the source mask register (v0),
340 * unless the destination vector register is being written
341 * with a mask value (e.g., comparisons) or the scalar result
342 * of a reduction. (Section 5.3)
344 static bool vext_check_mss(DisasContext *s, int vd, int vs1, int vs2)
346 bool ret = vext_check_ms(s, vd, vs2) &&
347 require_align(vs1, s->lmul);
349 ret &= require_noover(vd, 0, vs1, s->lmul);
355 * Common check function for vector widening instructions
356 * of double-width result (2*SEW).
358 * Rules to be checked here:
359 * 1. The largest vector register group used by an instruction
360 * can not be greater than 8 vector registers (Section 5.2):
363 * 2. Destination vector register number is multiples of 2 * LMUL.
365 * 3. Destination vector register group for a masked vector
366 * instruction cannot overlap the source mask register (v0).
369 static bool vext_wide_check_common(DisasContext *s, int vd, int vm)
371 return (s->lmul <= 2) &&
373 require_align(vd, s->lmul + 1) &&
378 * Common check function for vector narrowing instructions
379 * of single-width result (SEW) and double-width source (2*SEW).
381 * Rules to be checked here:
382 * 1. The largest vector register group used by an instruction
383 * can not be greater than 8 vector registers (Section 5.2):
386 * 2. Source vector register number is multiples of 2 * LMUL.
388 * 3. Destination vector register number is multiples of LMUL.
390 * 4. Destination vector register group for a masked vector
391 * instruction cannot overlap the source mask register (v0).
394 static bool vext_narrow_check_common(DisasContext *s, int vd, int vs2,
397 return (s->lmul <= 2) &&
399 require_align(vs2, s->lmul + 1) &&
400 require_align(vd, s->lmul) &&
404 static bool vext_check_ds(DisasContext *s, int vd, int vs, int vm)
406 return vext_wide_check_common(s, vd, vm) &&
407 require_align(vs, s->lmul) &&
408 require_noover(vd, s->lmul + 1, vs, s->lmul);
411 static bool vext_check_dd(DisasContext *s, int vd, int vs, int vm)
413 return vext_wide_check_common(s, vd, vm) &&
414 require_align(vs, s->lmul + 1);
418 * Check function for vector instruction with format:
419 * double-width result and single-width sources (2*SEW = SEW op SEW)
421 * Rules to be checked here:
422 * 1. All rules in defined in widen common rules are applied.
423 * 2. Source (vs2, vs1) vector register number are multiples of LMUL.
425 * 3. Destination vector register cannot overlap a source vector
426 * register (vs2, vs1) group.
429 static bool vext_check_dss(DisasContext *s, int vd, int vs1, int vs2, int vm)
431 return vext_check_ds(s, vd, vs2, vm) &&
432 require_align(vs1, s->lmul) &&
433 require_noover(vd, s->lmul + 1, vs1, s->lmul);
437 * Check function for vector instruction with format:
438 * double-width result and double-width source1 and single-width
439 * source2 (2*SEW = 2*SEW op SEW)
441 * Rules to be checked here:
442 * 1. All rules in defined in widen common rules are applied.
443 * 2. Source 1 (vs2) vector register number is multiples of 2 * LMUL.
445 * 3. Source 2 (vs1) vector register number is multiples of LMUL.
447 * 4. Destination vector register cannot overlap a source vector
448 * register (vs1) group.
451 static bool vext_check_dds(DisasContext *s, int vd, int vs1, int vs2, int vm)
453 return vext_check_ds(s, vd, vs1, vm) &&
454 require_align(vs2, s->lmul + 1);
457 static bool vext_check_sd(DisasContext *s, int vd, int vs, int vm)
459 bool ret = vext_narrow_check_common(s, vd, vs, vm);
461 ret &= require_noover(vd, s->lmul, vs, s->lmul + 1);
467 * Check function for vector instruction with format:
468 * single-width result and double-width source 1 and single-width
469 * source 2 (SEW = 2*SEW op SEW)
471 * Rules to be checked here:
472 * 1. All rules in defined in narrow common rules are applied.
473 * 2. Destination vector register cannot overlap a source vector
474 * register (vs2) group.
476 * 3. Source 2 (vs1) vector register number is multiples of LMUL.
479 static bool vext_check_sds(DisasContext *s, int vd, int vs1, int vs2, int vm)
481 return vext_check_sd(s, vd, vs2, vm) &&
482 require_align(vs1, s->lmul);
486 * Check function for vector reduction instructions.
488 * Rules to be checked here:
489 * 1. Source 1 (vs2) vector register number is multiples of LMUL.
492 static bool vext_check_reduction(DisasContext *s, int vs2)
494 return require_align(vs2, s->lmul);
498 * Check function for vector slide instructions.
500 * Rules to be checked here:
501 * 1. Source 1 (vs2) vector register number is multiples of LMUL.
503 * 2. Destination vector register number is multiples of LMUL.
505 * 3. Destination vector register group for a masked vector
506 * instruction cannot overlap the source mask register (v0).
508 * 4. The destination vector register group for vslideup, vslide1up,
509 * vfslide1up, cannot overlap the source vector register (vs2) group.
510 * (Section 5.2, 16.3.1, 16.3.3)
512 static bool vext_check_slide(DisasContext *s, int vd, int vs2,
513 int vm, bool is_over)
515 bool ret = require_align(vs2, s->lmul) &&
516 require_align(vd, s->lmul) &&
525 * In cpu_get_tb_cpu_state(), set VILL if RVV was not present.
526 * So RVV is also be checked in this function.
528 static bool vext_check_isa_ill(DisasContext *s)
533 /* common translation macro */
534 #define GEN_VEXT_TRANS(NAME, EEW, ARGTYPE, OP, CHECK) \
535 static bool trans_##NAME(DisasContext *s, arg_##ARGTYPE * a) \
537 if (CHECK(s, a, EEW)) { \
538 return OP(s, a, EEW); \
543 static uint8_t vext_get_emul(DisasContext *s, uint8_t eew)
545 int8_t emul = eew - s->sew + s->lmul;
546 return emul < 0 ? 0 : emul;
550 *** unit stride load and store
552 typedef void gen_helper_ldst_us(TCGv_ptr, TCGv_ptr, TCGv,
555 static bool ldst_us_trans(uint32_t vd, uint32_t rs1, uint32_t data,
556 gen_helper_ldst_us *fn, DisasContext *s,
563 TCGLabel *over = gen_new_label();
564 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
566 dest = tcg_temp_new_ptr();
567 mask = tcg_temp_new_ptr();
568 base = get_gpr(s, rs1, EXT_NONE);
571 * As simd_desc supports at most 256 bytes, and in this implementation,
572 * the max vector group length is 2048 bytes. So split it into two parts.
574 * The first part is vlen in bytes, encoded in maxsz of simd_desc.
575 * The second part is lmul, encoded in data of simd_desc.
577 desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
579 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
580 tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
582 fn(dest, mask, base, cpu_env, desc);
584 tcg_temp_free_ptr(dest);
585 tcg_temp_free_ptr(mask);
595 static bool ld_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t eew)
598 gen_helper_ldst_us *fn;
599 static gen_helper_ldst_us * const fns[2][4] = {
600 /* masked unit stride load */
601 { gen_helper_vle8_v_mask, gen_helper_vle16_v_mask,
602 gen_helper_vle32_v_mask, gen_helper_vle64_v_mask },
603 /* unmasked unit stride load */
604 { gen_helper_vle8_v, gen_helper_vle16_v,
605 gen_helper_vle32_v, gen_helper_vle64_v }
608 fn = fns[a->vm][eew];
614 * Vector load/store instructions have the EEW encoded
615 * directly in the instructions. The maximum vector size is
616 * calculated with EMUL rather than LMUL.
618 uint8_t emul = vext_get_emul(s, eew);
619 data = FIELD_DP32(data, VDATA, VM, a->vm);
620 data = FIELD_DP32(data, VDATA, LMUL, emul);
621 data = FIELD_DP32(data, VDATA, NF, a->nf);
622 return ldst_us_trans(a->rd, a->rs1, data, fn, s, false);
625 static bool ld_us_check(DisasContext *s, arg_r2nfvm* a, uint8_t eew)
627 return require_rvv(s) &&
628 vext_check_isa_ill(s) &&
629 vext_check_load(s, a->rd, a->nf, a->vm, eew);
632 GEN_VEXT_TRANS(vle8_v, MO_8, r2nfvm, ld_us_op, ld_us_check)
633 GEN_VEXT_TRANS(vle16_v, MO_16, r2nfvm, ld_us_op, ld_us_check)
634 GEN_VEXT_TRANS(vle32_v, MO_32, r2nfvm, ld_us_op, ld_us_check)
635 GEN_VEXT_TRANS(vle64_v, MO_64, r2nfvm, ld_us_op, ld_us_check)
637 static bool st_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t eew)
640 gen_helper_ldst_us *fn;
641 static gen_helper_ldst_us * const fns[2][4] = {
642 /* masked unit stride store */
643 { gen_helper_vse8_v_mask, gen_helper_vse16_v_mask,
644 gen_helper_vse32_v_mask, gen_helper_vse64_v_mask },
645 /* unmasked unit stride store */
646 { gen_helper_vse8_v, gen_helper_vse16_v,
647 gen_helper_vse32_v, gen_helper_vse64_v }
650 fn = fns[a->vm][eew];
655 uint8_t emul = vext_get_emul(s, eew);
656 data = FIELD_DP32(data, VDATA, VM, a->vm);
657 data = FIELD_DP32(data, VDATA, LMUL, emul);
658 data = FIELD_DP32(data, VDATA, NF, a->nf);
659 return ldst_us_trans(a->rd, a->rs1, data, fn, s, true);
662 static bool st_us_check(DisasContext *s, arg_r2nfvm* a, uint8_t eew)
664 return require_rvv(s) &&
665 vext_check_isa_ill(s) &&
666 vext_check_store(s, a->rd, a->nf, eew);
669 GEN_VEXT_TRANS(vse8_v, MO_8, r2nfvm, st_us_op, st_us_check)
670 GEN_VEXT_TRANS(vse16_v, MO_16, r2nfvm, st_us_op, st_us_check)
671 GEN_VEXT_TRANS(vse32_v, MO_32, r2nfvm, st_us_op, st_us_check)
672 GEN_VEXT_TRANS(vse64_v, MO_64, r2nfvm, st_us_op, st_us_check)
675 *** stride load and store
677 typedef void gen_helper_ldst_stride(TCGv_ptr, TCGv_ptr, TCGv,
678 TCGv, TCGv_env, TCGv_i32);
680 static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2,
681 uint32_t data, gen_helper_ldst_stride *fn,
682 DisasContext *s, bool is_store)
688 TCGLabel *over = gen_new_label();
689 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
691 dest = tcg_temp_new_ptr();
692 mask = tcg_temp_new_ptr();
693 base = get_gpr(s, rs1, EXT_NONE);
694 stride = get_gpr(s, rs2, EXT_NONE);
695 desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
697 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
698 tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
700 fn(dest, mask, base, stride, cpu_env, desc);
702 tcg_temp_free_ptr(dest);
703 tcg_temp_free_ptr(mask);
713 static bool ld_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
716 gen_helper_ldst_stride *fn;
717 static gen_helper_ldst_stride * const fns[4] = {
718 gen_helper_vlse8_v, gen_helper_vlse16_v,
719 gen_helper_vlse32_v, gen_helper_vlse64_v
727 uint8_t emul = vext_get_emul(s, eew);
728 data = FIELD_DP32(data, VDATA, VM, a->vm);
729 data = FIELD_DP32(data, VDATA, LMUL, emul);
730 data = FIELD_DP32(data, VDATA, NF, a->nf);
731 return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s, false);
734 static bool ld_stride_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
736 return require_rvv(s) &&
737 vext_check_isa_ill(s) &&
738 vext_check_load(s, a->rd, a->nf, a->vm, eew);
741 GEN_VEXT_TRANS(vlse8_v, MO_8, rnfvm, ld_stride_op, ld_stride_check)
742 GEN_VEXT_TRANS(vlse16_v, MO_16, rnfvm, ld_stride_op, ld_stride_check)
743 GEN_VEXT_TRANS(vlse32_v, MO_32, rnfvm, ld_stride_op, ld_stride_check)
744 GEN_VEXT_TRANS(vlse64_v, MO_64, rnfvm, ld_stride_op, ld_stride_check)
746 static bool st_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
749 gen_helper_ldst_stride *fn;
750 static gen_helper_ldst_stride * const fns[4] = {
751 /* masked stride store */
752 gen_helper_vsse8_v, gen_helper_vsse16_v,
753 gen_helper_vsse32_v, gen_helper_vsse64_v
756 uint8_t emul = vext_get_emul(s, eew);
757 data = FIELD_DP32(data, VDATA, VM, a->vm);
758 data = FIELD_DP32(data, VDATA, LMUL, emul);
759 data = FIELD_DP32(data, VDATA, NF, a->nf);
765 return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s, true);
768 static bool st_stride_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
770 return require_rvv(s) &&
771 vext_check_isa_ill(s) &&
772 vext_check_store(s, a->rd, a->nf, eew);
775 GEN_VEXT_TRANS(vsse8_v, MO_8, rnfvm, st_stride_op, st_stride_check)
776 GEN_VEXT_TRANS(vsse16_v, MO_16, rnfvm, st_stride_op, st_stride_check)
777 GEN_VEXT_TRANS(vsse32_v, MO_32, rnfvm, st_stride_op, st_stride_check)
778 GEN_VEXT_TRANS(vsse64_v, MO_64, rnfvm, st_stride_op, st_stride_check)
781 *** index load and store
783 typedef void gen_helper_ldst_index(TCGv_ptr, TCGv_ptr, TCGv,
784 TCGv_ptr, TCGv_env, TCGv_i32);
786 static bool ldst_index_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
787 uint32_t data, gen_helper_ldst_index *fn,
788 DisasContext *s, bool is_store)
790 TCGv_ptr dest, mask, index;
794 TCGLabel *over = gen_new_label();
795 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
797 dest = tcg_temp_new_ptr();
798 mask = tcg_temp_new_ptr();
799 index = tcg_temp_new_ptr();
800 base = get_gpr(s, rs1, EXT_NONE);
801 desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
803 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
804 tcg_gen_addi_ptr(index, cpu_env, vreg_ofs(s, vs2));
805 tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
807 fn(dest, mask, base, index, cpu_env, desc);
809 tcg_temp_free_ptr(dest);
810 tcg_temp_free_ptr(mask);
811 tcg_temp_free_ptr(index);
821 static bool ld_index_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
824 gen_helper_ldst_index *fn;
825 static gen_helper_ldst_index * const fns[4][4] = {
827 * offset vector register group EEW = 8,
828 * data vector register group EEW = SEW
830 { gen_helper_vlxei8_8_v, gen_helper_vlxei8_16_v,
831 gen_helper_vlxei8_32_v, gen_helper_vlxei8_64_v },
833 * offset vector register group EEW = 16,
834 * data vector register group EEW = SEW
836 { gen_helper_vlxei16_8_v, gen_helper_vlxei16_16_v,
837 gen_helper_vlxei16_32_v, gen_helper_vlxei16_64_v },
839 * offset vector register group EEW = 32,
840 * data vector register group EEW = SEW
842 { gen_helper_vlxei32_8_v, gen_helper_vlxei32_16_v,
843 gen_helper_vlxei32_32_v, gen_helper_vlxei32_64_v },
845 * offset vector register group EEW = 64,
846 * data vector register group EEW = SEW
848 { gen_helper_vlxei64_8_v, gen_helper_vlxei64_16_v,
849 gen_helper_vlxei64_32_v, gen_helper_vlxei64_64_v }
852 fn = fns[eew][s->sew];
854 uint8_t emul = vext_get_emul(s, s->sew);
855 data = FIELD_DP32(data, VDATA, VM, a->vm);
856 data = FIELD_DP32(data, VDATA, LMUL, emul);
857 data = FIELD_DP32(data, VDATA, NF, a->nf);
858 return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s, false);
861 static bool ld_index_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
863 return require_rvv(s) &&
864 vext_check_isa_ill(s) &&
865 vext_check_ld_index(s, a->rd, a->rs2, a->nf, a->vm, eew);
868 GEN_VEXT_TRANS(vlxei8_v, MO_8, rnfvm, ld_index_op, ld_index_check)
869 GEN_VEXT_TRANS(vlxei16_v, MO_16, rnfvm, ld_index_op, ld_index_check)
870 GEN_VEXT_TRANS(vlxei32_v, MO_32, rnfvm, ld_index_op, ld_index_check)
871 GEN_VEXT_TRANS(vlxei64_v, MO_64, rnfvm, ld_index_op, ld_index_check)
873 static bool st_index_op(DisasContext *s, arg_rnfvm *a, uint8_t eew)
876 gen_helper_ldst_index *fn;
877 static gen_helper_ldst_index * const fns[4][4] = {
879 * offset vector register group EEW = 8,
880 * data vector register group EEW = SEW
882 { gen_helper_vsxei8_8_v, gen_helper_vsxei8_16_v,
883 gen_helper_vsxei8_32_v, gen_helper_vsxei8_64_v },
885 * offset vector register group EEW = 16,
886 * data vector register group EEW = SEW
888 { gen_helper_vsxei16_8_v, gen_helper_vsxei16_16_v,
889 gen_helper_vsxei16_32_v, gen_helper_vsxei16_64_v },
891 * offset vector register group EEW = 32,
892 * data vector register group EEW = SEW
894 { gen_helper_vsxei32_8_v, gen_helper_vsxei32_16_v,
895 gen_helper_vsxei32_32_v, gen_helper_vsxei32_64_v },
897 * offset vector register group EEW = 64,
898 * data vector register group EEW = SEW
900 { gen_helper_vsxei64_8_v, gen_helper_vsxei64_16_v,
901 gen_helper_vsxei64_32_v, gen_helper_vsxei64_64_v }
904 fn = fns[eew][s->sew];
906 uint8_t emul = vext_get_emul(s, s->sew);
907 data = FIELD_DP32(data, VDATA, VM, a->vm);
908 data = FIELD_DP32(data, VDATA, LMUL, emul);
909 data = FIELD_DP32(data, VDATA, NF, a->nf);
910 return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s, true);
913 static bool st_index_check(DisasContext *s, arg_rnfvm* a, uint8_t eew)
915 return require_rvv(s) &&
916 vext_check_isa_ill(s) &&
917 vext_check_st_index(s, a->rd, a->rs2, a->nf, eew);
920 GEN_VEXT_TRANS(vsxei8_v, MO_8, rnfvm, st_index_op, st_index_check)
921 GEN_VEXT_TRANS(vsxei16_v, MO_16, rnfvm, st_index_op, st_index_check)
922 GEN_VEXT_TRANS(vsxei32_v, MO_32, rnfvm, st_index_op, st_index_check)
923 GEN_VEXT_TRANS(vsxei64_v, MO_64, rnfvm, st_index_op, st_index_check)
926 *** unit stride fault-only-first load
928 static bool ldff_trans(uint32_t vd, uint32_t rs1, uint32_t data,
929 gen_helper_ldst_us *fn, DisasContext *s)
935 TCGLabel *over = gen_new_label();
936 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
938 dest = tcg_temp_new_ptr();
939 mask = tcg_temp_new_ptr();
940 base = get_gpr(s, rs1, EXT_NONE);
941 desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
943 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
944 tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
946 fn(dest, mask, base, cpu_env, desc);
948 tcg_temp_free_ptr(dest);
949 tcg_temp_free_ptr(mask);
955 static bool ldff_op(DisasContext *s, arg_r2nfvm *a, uint8_t eew)
958 gen_helper_ldst_us *fn;
959 static gen_helper_ldst_us * const fns[4] = {
960 gen_helper_vle8ff_v, gen_helper_vle16ff_v,
961 gen_helper_vle32ff_v, gen_helper_vle64ff_v
969 uint8_t emul = vext_get_emul(s, eew);
970 data = FIELD_DP32(data, VDATA, VM, a->vm);
971 data = FIELD_DP32(data, VDATA, LMUL, emul);
972 data = FIELD_DP32(data, VDATA, NF, a->nf);
973 return ldff_trans(a->rd, a->rs1, data, fn, s);
976 GEN_VEXT_TRANS(vle8ff_v, MO_8, r2nfvm, ldff_op, ld_us_check)
977 GEN_VEXT_TRANS(vle16ff_v, MO_16, r2nfvm, ldff_op, ld_us_check)
978 GEN_VEXT_TRANS(vle32ff_v, MO_32, r2nfvm, ldff_op, ld_us_check)
979 GEN_VEXT_TRANS(vle64ff_v, MO_64, r2nfvm, ldff_op, ld_us_check)
982 * load and store whole register instructions
984 typedef void gen_helper_ldst_whole(TCGv_ptr, TCGv, TCGv_env, TCGv_i32);
986 static bool ldst_whole_trans(uint32_t vd, uint32_t rs1, uint32_t nf,
987 gen_helper_ldst_whole *fn, DisasContext *s,
994 uint32_t data = FIELD_DP32(0, VDATA, NF, nf);
995 dest = tcg_temp_new_ptr();
996 desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
998 base = get_gpr(s, rs1, EXT_NONE);
999 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
1001 fn(dest, base, cpu_env, desc);
1003 tcg_temp_free_ptr(dest);
1013 * load and store whole register instructions ignore vtype and vl setting.
1014 * Thus, we don't need to check vill bit. (Section 7.9)
1016 #define GEN_LDST_WHOLE_TRANS(NAME, ARG_NF, IS_STORE) \
1017 static bool trans_##NAME(DisasContext *s, arg_##NAME * a) \
1019 if (require_rvv(s) && \
1020 QEMU_IS_ALIGNED(a->rd, ARG_NF)) { \
1021 return ldst_whole_trans(a->rd, a->rs1, ARG_NF, gen_helper_##NAME, \
1027 GEN_LDST_WHOLE_TRANS(vl1re8_v, 1, false)
1028 GEN_LDST_WHOLE_TRANS(vl1re16_v, 1, false)
1029 GEN_LDST_WHOLE_TRANS(vl1re32_v, 1, false)
1030 GEN_LDST_WHOLE_TRANS(vl1re64_v, 1, false)
1031 GEN_LDST_WHOLE_TRANS(vl2re8_v, 2, false)
1032 GEN_LDST_WHOLE_TRANS(vl2re16_v, 2, false)
1033 GEN_LDST_WHOLE_TRANS(vl2re32_v, 2, false)
1034 GEN_LDST_WHOLE_TRANS(vl2re64_v, 2, false)
1035 GEN_LDST_WHOLE_TRANS(vl4re8_v, 4, false)
1036 GEN_LDST_WHOLE_TRANS(vl4re16_v, 4, false)
1037 GEN_LDST_WHOLE_TRANS(vl4re32_v, 4, false)
1038 GEN_LDST_WHOLE_TRANS(vl4re64_v, 4, false)
1039 GEN_LDST_WHOLE_TRANS(vl8re8_v, 8, false)
1040 GEN_LDST_WHOLE_TRANS(vl8re16_v, 8, false)
1041 GEN_LDST_WHOLE_TRANS(vl8re32_v, 8, false)
1042 GEN_LDST_WHOLE_TRANS(vl8re64_v, 8, false)
1044 GEN_LDST_WHOLE_TRANS(vs1r_v, 1, true)
1045 GEN_LDST_WHOLE_TRANS(vs2r_v, 2, true)
1046 GEN_LDST_WHOLE_TRANS(vs4r_v, 4, true)
1047 GEN_LDST_WHOLE_TRANS(vs8r_v, 8, true)
1050 *** Vector Integer Arithmetic Instructions
1054 * MAXSZ returns the maximum vector size can be operated in bytes,
1055 * which is used in GVEC IR when vl_eq_vlmax flag is set to true
1056 * to accerlate vector operation.
1058 static inline uint32_t MAXSZ(DisasContext *s)
1060 int scale = s->lmul - 3;
1061 return scale < 0 ? s->vlen >> -scale : s->vlen << scale;
1064 static bool opivv_check(DisasContext *s, arg_rmrr *a)
1066 return require_rvv(s) &&
1067 vext_check_isa_ill(s) &&
1068 vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm);
1071 typedef void GVecGen3Fn(unsigned, uint32_t, uint32_t,
1072 uint32_t, uint32_t, uint32_t);
1075 do_opivv_gvec(DisasContext *s, arg_rmrr *a, GVecGen3Fn *gvec_fn,
1076 gen_helper_gvec_4_ptr *fn)
1078 TCGLabel *over = gen_new_label();
1079 if (!opivv_check(s, a)) {
1083 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
1085 if (a->vm && s->vl_eq_vlmax) {
1086 gvec_fn(s->sew, vreg_ofs(s, a->rd),
1087 vreg_ofs(s, a->rs2), vreg_ofs(s, a->rs1),
1088 MAXSZ(s), MAXSZ(s));
1092 data = FIELD_DP32(data, VDATA, VM, a->vm);
1093 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
1094 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
1095 vreg_ofs(s, a->rs1), vreg_ofs(s, a->rs2),
1096 cpu_env, s->vlen / 8, s->vlen / 8, data, fn);
1099 gen_set_label(over);
1103 /* OPIVV with GVEC IR */
1104 #define GEN_OPIVV_GVEC_TRANS(NAME, SUF) \
1105 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1107 static gen_helper_gvec_4_ptr * const fns[4] = { \
1108 gen_helper_##NAME##_b, gen_helper_##NAME##_h, \
1109 gen_helper_##NAME##_w, gen_helper_##NAME##_d, \
1111 return do_opivv_gvec(s, a, tcg_gen_gvec_##SUF, fns[s->sew]); \
1114 GEN_OPIVV_GVEC_TRANS(vadd_vv, add)
1115 GEN_OPIVV_GVEC_TRANS(vsub_vv, sub)
1117 typedef void gen_helper_opivx(TCGv_ptr, TCGv_ptr, TCGv, TCGv_ptr,
1118 TCGv_env, TCGv_i32);
1120 static bool opivx_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, uint32_t vm,
1121 gen_helper_opivx *fn, DisasContext *s)
1123 TCGv_ptr dest, src2, mask;
1128 TCGLabel *over = gen_new_label();
1129 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
1131 dest = tcg_temp_new_ptr();
1132 mask = tcg_temp_new_ptr();
1133 src2 = tcg_temp_new_ptr();
1134 src1 = get_gpr(s, rs1, EXT_NONE);
1136 data = FIELD_DP32(data, VDATA, VM, vm);
1137 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
1138 desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
1140 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
1141 tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, vs2));
1142 tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
1144 fn(dest, mask, src1, src2, cpu_env, desc);
1146 tcg_temp_free_ptr(dest);
1147 tcg_temp_free_ptr(mask);
1148 tcg_temp_free_ptr(src2);
1150 gen_set_label(over);
1154 static bool opivx_check(DisasContext *s, arg_rmrr *a)
1156 return require_rvv(s) &&
1157 vext_check_isa_ill(s) &&
1158 vext_check_ss(s, a->rd, a->rs2, a->vm);
1161 typedef void GVecGen2sFn(unsigned, uint32_t, uint32_t, TCGv_i64,
1162 uint32_t, uint32_t);
1165 do_opivx_gvec(DisasContext *s, arg_rmrr *a, GVecGen2sFn *gvec_fn,
1166 gen_helper_opivx *fn)
1168 if (!opivx_check(s, a)) {
1172 if (a->vm && s->vl_eq_vlmax) {
1173 TCGv_i64 src1 = tcg_temp_new_i64();
1175 tcg_gen_ext_tl_i64(src1, get_gpr(s, a->rs1, EXT_SIGN));
1176 gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2),
1177 src1, MAXSZ(s), MAXSZ(s));
1179 tcg_temp_free_i64(src1);
1183 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s);
1186 /* OPIVX with GVEC IR */
1187 #define GEN_OPIVX_GVEC_TRANS(NAME, SUF) \
1188 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1190 static gen_helper_opivx * const fns[4] = { \
1191 gen_helper_##NAME##_b, gen_helper_##NAME##_h, \
1192 gen_helper_##NAME##_w, gen_helper_##NAME##_d, \
1194 return do_opivx_gvec(s, a, tcg_gen_gvec_##SUF, fns[s->sew]); \
1197 GEN_OPIVX_GVEC_TRANS(vadd_vx, adds)
1198 GEN_OPIVX_GVEC_TRANS(vsub_vx, subs)
1200 static void gen_vec_rsub8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
1202 tcg_gen_vec_sub8_i64(d, b, a);
1205 static void gen_vec_rsub16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
1207 tcg_gen_vec_sub16_i64(d, b, a);
1210 static void gen_rsub_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
1212 tcg_gen_sub_i32(ret, arg2, arg1);
1215 static void gen_rsub_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
1217 tcg_gen_sub_i64(ret, arg2, arg1);
1220 static void gen_rsub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
1222 tcg_gen_sub_vec(vece, r, b, a);
1225 static void tcg_gen_gvec_rsubs(unsigned vece, uint32_t dofs, uint32_t aofs,
1226 TCGv_i64 c, uint32_t oprsz, uint32_t maxsz)
1228 static const TCGOpcode vecop_list[] = { INDEX_op_sub_vec, 0 };
1229 static const GVecGen2s rsub_op[4] = {
1230 { .fni8 = gen_vec_rsub8_i64,
1231 .fniv = gen_rsub_vec,
1232 .fno = gen_helper_vec_rsubs8,
1233 .opt_opc = vecop_list,
1235 { .fni8 = gen_vec_rsub16_i64,
1236 .fniv = gen_rsub_vec,
1237 .fno = gen_helper_vec_rsubs16,
1238 .opt_opc = vecop_list,
1240 { .fni4 = gen_rsub_i32,
1241 .fniv = gen_rsub_vec,
1242 .fno = gen_helper_vec_rsubs32,
1243 .opt_opc = vecop_list,
1245 { .fni8 = gen_rsub_i64,
1246 .fniv = gen_rsub_vec,
1247 .fno = gen_helper_vec_rsubs64,
1248 .opt_opc = vecop_list,
1249 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
1253 tcg_debug_assert(vece <= MO_64);
1254 tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, c, &rsub_op[vece]);
1257 GEN_OPIVX_GVEC_TRANS(vrsub_vx, rsubs)
1260 IMM_ZX, /* Zero-extended */
1261 IMM_SX, /* Sign-extended */
1262 IMM_TRUNC_SEW, /* Truncate to log(SEW) bits */
1263 IMM_TRUNC_2SEW, /* Truncate to log(2*SEW) bits */
1266 static int64_t extract_imm(DisasContext *s, uint32_t imm, imm_mode_t imm_mode)
1270 return extract64(imm, 0, 5);
1272 return sextract64(imm, 0, 5);
1274 return extract64(imm, 0, s->sew + 3);
1275 case IMM_TRUNC_2SEW:
1276 return extract64(imm, 0, s->sew + 4);
1278 g_assert_not_reached();
1282 static bool opivi_trans(uint32_t vd, uint32_t imm, uint32_t vs2, uint32_t vm,
1283 gen_helper_opivx *fn, DisasContext *s,
1284 imm_mode_t imm_mode)
1286 TCGv_ptr dest, src2, mask;
1291 TCGLabel *over = gen_new_label();
1292 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
1294 dest = tcg_temp_new_ptr();
1295 mask = tcg_temp_new_ptr();
1296 src2 = tcg_temp_new_ptr();
1297 src1 = tcg_constant_tl(extract_imm(s, imm, imm_mode));
1299 data = FIELD_DP32(data, VDATA, VM, vm);
1300 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
1301 desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
1303 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
1304 tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, vs2));
1305 tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
1307 fn(dest, mask, src1, src2, cpu_env, desc);
1309 tcg_temp_free_ptr(dest);
1310 tcg_temp_free_ptr(mask);
1311 tcg_temp_free_ptr(src2);
1313 gen_set_label(over);
1317 typedef void GVecGen2iFn(unsigned, uint32_t, uint32_t, int64_t,
1318 uint32_t, uint32_t);
1321 do_opivi_gvec(DisasContext *s, arg_rmrr *a, GVecGen2iFn *gvec_fn,
1322 gen_helper_opivx *fn, imm_mode_t imm_mode)
1324 if (!opivx_check(s, a)) {
1328 if (a->vm && s->vl_eq_vlmax) {
1329 gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2),
1330 extract_imm(s, a->rs1, imm_mode), MAXSZ(s), MAXSZ(s));
1334 return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s, imm_mode);
1337 /* OPIVI with GVEC IR */
1338 #define GEN_OPIVI_GVEC_TRANS(NAME, IMM_MODE, OPIVX, SUF) \
1339 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1341 static gen_helper_opivx * const fns[4] = { \
1342 gen_helper_##OPIVX##_b, gen_helper_##OPIVX##_h, \
1343 gen_helper_##OPIVX##_w, gen_helper_##OPIVX##_d, \
1345 return do_opivi_gvec(s, a, tcg_gen_gvec_##SUF, \
1346 fns[s->sew], IMM_MODE); \
1349 GEN_OPIVI_GVEC_TRANS(vadd_vi, IMM_SX, vadd_vx, addi)
1351 static void tcg_gen_gvec_rsubi(unsigned vece, uint32_t dofs, uint32_t aofs,
1352 int64_t c, uint32_t oprsz, uint32_t maxsz)
1354 TCGv_i64 tmp = tcg_constant_i64(c);
1355 tcg_gen_gvec_rsubs(vece, dofs, aofs, tmp, oprsz, maxsz);
1358 GEN_OPIVI_GVEC_TRANS(vrsub_vi, IMM_SX, vrsub_vx, rsubi)
1360 /* Vector Widening Integer Add/Subtract */
1362 /* OPIVV with WIDEN */
1363 static bool opivv_widen_check(DisasContext *s, arg_rmrr *a)
1365 return require_rvv(s) &&
1366 vext_check_isa_ill(s) &&
1367 vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm);
1370 static bool do_opivv_widen(DisasContext *s, arg_rmrr *a,
1371 gen_helper_gvec_4_ptr *fn,
1372 bool (*checkfn)(DisasContext *, arg_rmrr *))
1374 if (checkfn(s, a)) {
1376 TCGLabel *over = gen_new_label();
1377 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
1379 data = FIELD_DP32(data, VDATA, VM, a->vm);
1380 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
1381 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
1382 vreg_ofs(s, a->rs1),
1383 vreg_ofs(s, a->rs2),
1384 cpu_env, s->vlen / 8, s->vlen / 8,
1387 gen_set_label(over);
1393 #define GEN_OPIVV_WIDEN_TRANS(NAME, CHECK) \
1394 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1396 static gen_helper_gvec_4_ptr * const fns[3] = { \
1397 gen_helper_##NAME##_b, \
1398 gen_helper_##NAME##_h, \
1399 gen_helper_##NAME##_w \
1401 return do_opivv_widen(s, a, fns[s->sew], CHECK); \
1404 GEN_OPIVV_WIDEN_TRANS(vwaddu_vv, opivv_widen_check)
1405 GEN_OPIVV_WIDEN_TRANS(vwadd_vv, opivv_widen_check)
1406 GEN_OPIVV_WIDEN_TRANS(vwsubu_vv, opivv_widen_check)
1407 GEN_OPIVV_WIDEN_TRANS(vwsub_vv, opivv_widen_check)
1409 /* OPIVX with WIDEN */
1410 static bool opivx_widen_check(DisasContext *s, arg_rmrr *a)
1412 return require_rvv(s) &&
1413 vext_check_isa_ill(s) &&
1414 vext_check_ds(s, a->rd, a->rs2, a->vm);
1417 static bool do_opivx_widen(DisasContext *s, arg_rmrr *a,
1418 gen_helper_opivx *fn)
1420 if (opivx_widen_check(s, a)) {
1421 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s);
1426 #define GEN_OPIVX_WIDEN_TRANS(NAME) \
1427 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1429 static gen_helper_opivx * const fns[3] = { \
1430 gen_helper_##NAME##_b, \
1431 gen_helper_##NAME##_h, \
1432 gen_helper_##NAME##_w \
1434 return do_opivx_widen(s, a, fns[s->sew]); \
1437 GEN_OPIVX_WIDEN_TRANS(vwaddu_vx)
1438 GEN_OPIVX_WIDEN_TRANS(vwadd_vx)
1439 GEN_OPIVX_WIDEN_TRANS(vwsubu_vx)
1440 GEN_OPIVX_WIDEN_TRANS(vwsub_vx)
1442 /* WIDEN OPIVV with WIDEN */
1443 static bool opiwv_widen_check(DisasContext *s, arg_rmrr *a)
1445 return require_rvv(s) &&
1446 vext_check_isa_ill(s) &&
1447 vext_check_dds(s, a->rd, a->rs1, a->rs2, a->vm);
1450 static bool do_opiwv_widen(DisasContext *s, arg_rmrr *a,
1451 gen_helper_gvec_4_ptr *fn)
1453 if (opiwv_widen_check(s, a)) {
1455 TCGLabel *over = gen_new_label();
1456 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
1458 data = FIELD_DP32(data, VDATA, VM, a->vm);
1459 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
1460 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
1461 vreg_ofs(s, a->rs1),
1462 vreg_ofs(s, a->rs2),
1463 cpu_env, s->vlen / 8, s->vlen / 8, data, fn);
1465 gen_set_label(over);
1471 #define GEN_OPIWV_WIDEN_TRANS(NAME) \
1472 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1474 static gen_helper_gvec_4_ptr * const fns[3] = { \
1475 gen_helper_##NAME##_b, \
1476 gen_helper_##NAME##_h, \
1477 gen_helper_##NAME##_w \
1479 return do_opiwv_widen(s, a, fns[s->sew]); \
1482 GEN_OPIWV_WIDEN_TRANS(vwaddu_wv)
1483 GEN_OPIWV_WIDEN_TRANS(vwadd_wv)
1484 GEN_OPIWV_WIDEN_TRANS(vwsubu_wv)
1485 GEN_OPIWV_WIDEN_TRANS(vwsub_wv)
1487 /* WIDEN OPIVX with WIDEN */
1488 static bool opiwx_widen_check(DisasContext *s, arg_rmrr *a)
1490 return require_rvv(s) &&
1491 vext_check_isa_ill(s) &&
1492 vext_check_dd(s, a->rd, a->rs2, a->vm);
1495 static bool do_opiwx_widen(DisasContext *s, arg_rmrr *a,
1496 gen_helper_opivx *fn)
1498 if (opiwx_widen_check(s, a)) {
1499 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s);
1504 #define GEN_OPIWX_WIDEN_TRANS(NAME) \
1505 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1507 static gen_helper_opivx * const fns[3] = { \
1508 gen_helper_##NAME##_b, \
1509 gen_helper_##NAME##_h, \
1510 gen_helper_##NAME##_w \
1512 return do_opiwx_widen(s, a, fns[s->sew]); \
1515 GEN_OPIWX_WIDEN_TRANS(vwaddu_wx)
1516 GEN_OPIWX_WIDEN_TRANS(vwadd_wx)
1517 GEN_OPIWX_WIDEN_TRANS(vwsubu_wx)
1518 GEN_OPIWX_WIDEN_TRANS(vwsub_wx)
1520 /* Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions */
1521 /* OPIVV without GVEC IR */
1522 #define GEN_OPIVV_TRANS(NAME, CHECK) \
1523 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1525 if (CHECK(s, a)) { \
1526 uint32_t data = 0; \
1527 static gen_helper_gvec_4_ptr * const fns[4] = { \
1528 gen_helper_##NAME##_b, gen_helper_##NAME##_h, \
1529 gen_helper_##NAME##_w, gen_helper_##NAME##_d, \
1531 TCGLabel *over = gen_new_label(); \
1532 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
1534 data = FIELD_DP32(data, VDATA, VM, a->vm); \
1535 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
1536 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
1537 vreg_ofs(s, a->rs1), \
1538 vreg_ofs(s, a->rs2), cpu_env, \
1539 s->vlen / 8, s->vlen / 8, data, \
1542 gen_set_label(over); \
1549 * For vadc and vsbc, an illegal instruction exception is raised if the
1550 * destination vector register is v0 and LMUL > 1. (Section 12.4)
1552 static bool opivv_vadc_check(DisasContext *s, arg_rmrr *a)
1554 return require_rvv(s) &&
1555 vext_check_isa_ill(s) &&
1557 vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm);
1560 GEN_OPIVV_TRANS(vadc_vvm, opivv_vadc_check)
1561 GEN_OPIVV_TRANS(vsbc_vvm, opivv_vadc_check)
1564 * For vmadc and vmsbc, an illegal instruction exception is raised if the
1565 * destination vector register overlaps a source vector register group.
1567 static bool opivv_vmadc_check(DisasContext *s, arg_rmrr *a)
1569 return require_rvv(s) &&
1570 vext_check_isa_ill(s) &&
1571 vext_check_mss(s, a->rd, a->rs1, a->rs2);
1574 GEN_OPIVV_TRANS(vmadc_vvm, opivv_vmadc_check)
1575 GEN_OPIVV_TRANS(vmsbc_vvm, opivv_vmadc_check)
1577 static bool opivx_vadc_check(DisasContext *s, arg_rmrr *a)
1579 return require_rvv(s) &&
1580 vext_check_isa_ill(s) &&
1582 vext_check_ss(s, a->rd, a->rs2, a->vm);
1585 /* OPIVX without GVEC IR */
1586 #define GEN_OPIVX_TRANS(NAME, CHECK) \
1587 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1589 if (CHECK(s, a)) { \
1590 static gen_helper_opivx * const fns[4] = { \
1591 gen_helper_##NAME##_b, gen_helper_##NAME##_h, \
1592 gen_helper_##NAME##_w, gen_helper_##NAME##_d, \
1595 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s);\
1600 GEN_OPIVX_TRANS(vadc_vxm, opivx_vadc_check)
1601 GEN_OPIVX_TRANS(vsbc_vxm, opivx_vadc_check)
1603 static bool opivx_vmadc_check(DisasContext *s, arg_rmrr *a)
1605 return require_rvv(s) &&
1606 vext_check_isa_ill(s) &&
1607 vext_check_ms(s, a->rd, a->rs2);
1610 GEN_OPIVX_TRANS(vmadc_vxm, opivx_vmadc_check)
1611 GEN_OPIVX_TRANS(vmsbc_vxm, opivx_vmadc_check)
1613 /* OPIVI without GVEC IR */
1614 #define GEN_OPIVI_TRANS(NAME, IMM_MODE, OPIVX, CHECK) \
1615 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1617 if (CHECK(s, a)) { \
1618 static gen_helper_opivx * const fns[4] = { \
1619 gen_helper_##OPIVX##_b, gen_helper_##OPIVX##_h, \
1620 gen_helper_##OPIVX##_w, gen_helper_##OPIVX##_d, \
1622 return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, \
1623 fns[s->sew], s, IMM_MODE); \
1628 GEN_OPIVI_TRANS(vadc_vim, IMM_SX, vadc_vxm, opivx_vadc_check)
1629 GEN_OPIVI_TRANS(vmadc_vim, IMM_SX, vmadc_vxm, opivx_vmadc_check)
1631 /* Vector Bitwise Logical Instructions */
1632 GEN_OPIVV_GVEC_TRANS(vand_vv, and)
1633 GEN_OPIVV_GVEC_TRANS(vor_vv, or)
1634 GEN_OPIVV_GVEC_TRANS(vxor_vv, xor)
1635 GEN_OPIVX_GVEC_TRANS(vand_vx, ands)
1636 GEN_OPIVX_GVEC_TRANS(vor_vx, ors)
1637 GEN_OPIVX_GVEC_TRANS(vxor_vx, xors)
1638 GEN_OPIVI_GVEC_TRANS(vand_vi, IMM_SX, vand_vx, andi)
1639 GEN_OPIVI_GVEC_TRANS(vor_vi, IMM_SX, vor_vx, ori)
1640 GEN_OPIVI_GVEC_TRANS(vxor_vi, IMM_SX, vxor_vx, xori)
1642 /* Vector Single-Width Bit Shift Instructions */
1643 GEN_OPIVV_GVEC_TRANS(vsll_vv, shlv)
1644 GEN_OPIVV_GVEC_TRANS(vsrl_vv, shrv)
1645 GEN_OPIVV_GVEC_TRANS(vsra_vv, sarv)
1647 typedef void GVecGen2sFn32(unsigned, uint32_t, uint32_t, TCGv_i32,
1648 uint32_t, uint32_t);
1651 do_opivx_gvec_shift(DisasContext *s, arg_rmrr *a, GVecGen2sFn32 *gvec_fn,
1652 gen_helper_opivx *fn)
1654 if (!opivx_check(s, a)) {
1658 if (a->vm && s->vl_eq_vlmax) {
1659 TCGv_i32 src1 = tcg_temp_new_i32();
1661 tcg_gen_trunc_tl_i32(src1, get_gpr(s, a->rs1, EXT_NONE));
1662 tcg_gen_extract_i32(src1, src1, 0, s->sew + 3);
1663 gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2),
1664 src1, MAXSZ(s), MAXSZ(s));
1666 tcg_temp_free_i32(src1);
1670 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s);
1673 #define GEN_OPIVX_GVEC_SHIFT_TRANS(NAME, SUF) \
1674 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1676 static gen_helper_opivx * const fns[4] = { \
1677 gen_helper_##NAME##_b, gen_helper_##NAME##_h, \
1678 gen_helper_##NAME##_w, gen_helper_##NAME##_d, \
1681 return do_opivx_gvec_shift(s, a, tcg_gen_gvec_##SUF, fns[s->sew]); \
1684 GEN_OPIVX_GVEC_SHIFT_TRANS(vsll_vx, shls)
1685 GEN_OPIVX_GVEC_SHIFT_TRANS(vsrl_vx, shrs)
1686 GEN_OPIVX_GVEC_SHIFT_TRANS(vsra_vx, sars)
1688 GEN_OPIVI_GVEC_TRANS(vsll_vi, IMM_TRUNC_SEW, vsll_vx, shli)
1689 GEN_OPIVI_GVEC_TRANS(vsrl_vi, IMM_TRUNC_SEW, vsrl_vx, shri)
1690 GEN_OPIVI_GVEC_TRANS(vsra_vi, IMM_TRUNC_SEW, vsra_vx, sari)
1692 /* Vector Narrowing Integer Right Shift Instructions */
1693 static bool opiwv_narrow_check(DisasContext *s, arg_rmrr *a)
1695 return require_rvv(s) &&
1696 vext_check_isa_ill(s) &&
1697 vext_check_sds(s, a->rd, a->rs1, a->rs2, a->vm);
1700 /* OPIVV with NARROW */
1701 #define GEN_OPIWV_NARROW_TRANS(NAME) \
1702 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1704 if (opiwv_narrow_check(s, a)) { \
1705 uint32_t data = 0; \
1706 static gen_helper_gvec_4_ptr * const fns[3] = { \
1707 gen_helper_##NAME##_b, \
1708 gen_helper_##NAME##_h, \
1709 gen_helper_##NAME##_w, \
1711 TCGLabel *over = gen_new_label(); \
1712 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
1714 data = FIELD_DP32(data, VDATA, VM, a->vm); \
1715 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
1716 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
1717 vreg_ofs(s, a->rs1), \
1718 vreg_ofs(s, a->rs2), cpu_env, \
1719 s->vlen / 8, s->vlen / 8, data, \
1722 gen_set_label(over); \
1727 GEN_OPIWV_NARROW_TRANS(vnsra_wv)
1728 GEN_OPIWV_NARROW_TRANS(vnsrl_wv)
1730 static bool opiwx_narrow_check(DisasContext *s, arg_rmrr *a)
1732 return require_rvv(s) &&
1733 vext_check_isa_ill(s) &&
1734 vext_check_sd(s, a->rd, a->rs2, a->vm);
1737 /* OPIVX with NARROW */
1738 #define GEN_OPIWX_NARROW_TRANS(NAME) \
1739 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1741 if (opiwx_narrow_check(s, a)) { \
1742 static gen_helper_opivx * const fns[3] = { \
1743 gen_helper_##NAME##_b, \
1744 gen_helper_##NAME##_h, \
1745 gen_helper_##NAME##_w, \
1747 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s);\
1752 GEN_OPIWX_NARROW_TRANS(vnsra_wx)
1753 GEN_OPIWX_NARROW_TRANS(vnsrl_wx)
1755 /* OPIWI with NARROW */
1756 #define GEN_OPIWI_NARROW_TRANS(NAME, IMM_MODE, OPIVX) \
1757 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
1759 if (opiwx_narrow_check(s, a)) { \
1760 static gen_helper_opivx * const fns[3] = { \
1761 gen_helper_##OPIVX##_b, \
1762 gen_helper_##OPIVX##_h, \
1763 gen_helper_##OPIVX##_w, \
1765 return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, \
1766 fns[s->sew], s, IMM_MODE); \
1771 GEN_OPIWI_NARROW_TRANS(vnsra_wi, IMM_ZX, vnsra_wx)
1772 GEN_OPIWI_NARROW_TRANS(vnsrl_wi, IMM_ZX, vnsrl_wx)
1774 /* Vector Integer Comparison Instructions */
1776 * For all comparison instructions, an illegal instruction exception is raised
1777 * if the destination vector register overlaps a source vector register group
1780 static bool opivv_cmp_check(DisasContext *s, arg_rmrr *a)
1782 return require_rvv(s) &&
1783 vext_check_isa_ill(s) &&
1784 vext_check_mss(s, a->rd, a->rs1, a->rs2);
1787 GEN_OPIVV_TRANS(vmseq_vv, opivv_cmp_check)
1788 GEN_OPIVV_TRANS(vmsne_vv, opivv_cmp_check)
1789 GEN_OPIVV_TRANS(vmsltu_vv, opivv_cmp_check)
1790 GEN_OPIVV_TRANS(vmslt_vv, opivv_cmp_check)
1791 GEN_OPIVV_TRANS(vmsleu_vv, opivv_cmp_check)
1792 GEN_OPIVV_TRANS(vmsle_vv, opivv_cmp_check)
1794 static bool opivx_cmp_check(DisasContext *s, arg_rmrr *a)
1796 return require_rvv(s) &&
1797 vext_check_isa_ill(s) &&
1798 vext_check_ms(s, a->rd, a->rs2);
1801 GEN_OPIVX_TRANS(vmseq_vx, opivx_cmp_check)
1802 GEN_OPIVX_TRANS(vmsne_vx, opivx_cmp_check)
1803 GEN_OPIVX_TRANS(vmsltu_vx, opivx_cmp_check)
1804 GEN_OPIVX_TRANS(vmslt_vx, opivx_cmp_check)
1805 GEN_OPIVX_TRANS(vmsleu_vx, opivx_cmp_check)
1806 GEN_OPIVX_TRANS(vmsle_vx, opivx_cmp_check)
1807 GEN_OPIVX_TRANS(vmsgtu_vx, opivx_cmp_check)
1808 GEN_OPIVX_TRANS(vmsgt_vx, opivx_cmp_check)
1810 GEN_OPIVI_TRANS(vmseq_vi, IMM_SX, vmseq_vx, opivx_cmp_check)
1811 GEN_OPIVI_TRANS(vmsne_vi, IMM_SX, vmsne_vx, opivx_cmp_check)
1812 GEN_OPIVI_TRANS(vmsleu_vi, IMM_SX, vmsleu_vx, opivx_cmp_check)
1813 GEN_OPIVI_TRANS(vmsle_vi, IMM_SX, vmsle_vx, opivx_cmp_check)
1814 GEN_OPIVI_TRANS(vmsgtu_vi, IMM_SX, vmsgtu_vx, opivx_cmp_check)
1815 GEN_OPIVI_TRANS(vmsgt_vi, IMM_SX, vmsgt_vx, opivx_cmp_check)
1817 /* Vector Integer Min/Max Instructions */
1818 GEN_OPIVV_GVEC_TRANS(vminu_vv, umin)
1819 GEN_OPIVV_GVEC_TRANS(vmin_vv, smin)
1820 GEN_OPIVV_GVEC_TRANS(vmaxu_vv, umax)
1821 GEN_OPIVV_GVEC_TRANS(vmax_vv, smax)
1822 GEN_OPIVX_TRANS(vminu_vx, opivx_check)
1823 GEN_OPIVX_TRANS(vmin_vx, opivx_check)
1824 GEN_OPIVX_TRANS(vmaxu_vx, opivx_check)
1825 GEN_OPIVX_TRANS(vmax_vx, opivx_check)
1827 /* Vector Single-Width Integer Multiply Instructions */
1828 GEN_OPIVV_GVEC_TRANS(vmul_vv, mul)
1829 GEN_OPIVV_TRANS(vmulh_vv, opivv_check)
1830 GEN_OPIVV_TRANS(vmulhu_vv, opivv_check)
1831 GEN_OPIVV_TRANS(vmulhsu_vv, opivv_check)
1832 GEN_OPIVX_GVEC_TRANS(vmul_vx, muls)
1833 GEN_OPIVX_TRANS(vmulh_vx, opivx_check)
1834 GEN_OPIVX_TRANS(vmulhu_vx, opivx_check)
1835 GEN_OPIVX_TRANS(vmulhsu_vx, opivx_check)
1837 /* Vector Integer Divide Instructions */
1838 GEN_OPIVV_TRANS(vdivu_vv, opivv_check)
1839 GEN_OPIVV_TRANS(vdiv_vv, opivv_check)
1840 GEN_OPIVV_TRANS(vremu_vv, opivv_check)
1841 GEN_OPIVV_TRANS(vrem_vv, opivv_check)
1842 GEN_OPIVX_TRANS(vdivu_vx, opivx_check)
1843 GEN_OPIVX_TRANS(vdiv_vx, opivx_check)
1844 GEN_OPIVX_TRANS(vremu_vx, opivx_check)
1845 GEN_OPIVX_TRANS(vrem_vx, opivx_check)
1847 /* Vector Widening Integer Multiply Instructions */
1848 GEN_OPIVV_WIDEN_TRANS(vwmul_vv, opivv_widen_check)
1849 GEN_OPIVV_WIDEN_TRANS(vwmulu_vv, opivv_widen_check)
1850 GEN_OPIVV_WIDEN_TRANS(vwmulsu_vv, opivv_widen_check)
1851 GEN_OPIVX_WIDEN_TRANS(vwmul_vx)
1852 GEN_OPIVX_WIDEN_TRANS(vwmulu_vx)
1853 GEN_OPIVX_WIDEN_TRANS(vwmulsu_vx)
1855 /* Vector Single-Width Integer Multiply-Add Instructions */
1856 GEN_OPIVV_TRANS(vmacc_vv, opivv_check)
1857 GEN_OPIVV_TRANS(vnmsac_vv, opivv_check)
1858 GEN_OPIVV_TRANS(vmadd_vv, opivv_check)
1859 GEN_OPIVV_TRANS(vnmsub_vv, opivv_check)
1860 GEN_OPIVX_TRANS(vmacc_vx, opivx_check)
1861 GEN_OPIVX_TRANS(vnmsac_vx, opivx_check)
1862 GEN_OPIVX_TRANS(vmadd_vx, opivx_check)
1863 GEN_OPIVX_TRANS(vnmsub_vx, opivx_check)
1865 /* Vector Widening Integer Multiply-Add Instructions */
1866 GEN_OPIVV_WIDEN_TRANS(vwmaccu_vv, opivv_widen_check)
1867 GEN_OPIVV_WIDEN_TRANS(vwmacc_vv, opivv_widen_check)
1868 GEN_OPIVV_WIDEN_TRANS(vwmaccsu_vv, opivv_widen_check)
1869 GEN_OPIVX_WIDEN_TRANS(vwmaccu_vx)
1870 GEN_OPIVX_WIDEN_TRANS(vwmacc_vx)
1871 GEN_OPIVX_WIDEN_TRANS(vwmaccsu_vx)
1872 GEN_OPIVX_WIDEN_TRANS(vwmaccus_vx)
1874 /* Vector Integer Merge and Move Instructions */
1875 static bool trans_vmv_v_v(DisasContext *s, arg_vmv_v_v *a)
1877 if (require_rvv(s) &&
1878 vext_check_isa_ill(s) &&
1879 /* vmv.v.v has rs2 = 0 and vm = 1 */
1880 vext_check_sss(s, a->rd, a->rs1, 0, 1)) {
1881 if (s->vl_eq_vlmax) {
1882 tcg_gen_gvec_mov(s->sew, vreg_ofs(s, a->rd),
1883 vreg_ofs(s, a->rs1),
1884 MAXSZ(s), MAXSZ(s));
1886 uint32_t data = FIELD_DP32(0, VDATA, LMUL, s->lmul);
1887 static gen_helper_gvec_2_ptr * const fns[4] = {
1888 gen_helper_vmv_v_v_b, gen_helper_vmv_v_v_h,
1889 gen_helper_vmv_v_v_w, gen_helper_vmv_v_v_d,
1891 TCGLabel *over = gen_new_label();
1892 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
1894 tcg_gen_gvec_2_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, a->rs1),
1895 cpu_env, s->vlen / 8, s->vlen / 8, data,
1897 gen_set_label(over);
1905 typedef void gen_helper_vmv_vx(TCGv_ptr, TCGv_i64, TCGv_env, TCGv_i32);
1906 static bool trans_vmv_v_x(DisasContext *s, arg_vmv_v_x *a)
1908 if (require_rvv(s) &&
1909 vext_check_isa_ill(s) &&
1910 /* vmv.v.x has rs2 = 0 and vm = 1 */
1911 vext_check_ss(s, a->rd, 0, 1)) {
1913 TCGLabel *over = gen_new_label();
1914 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
1916 s1 = get_gpr(s, a->rs1, EXT_SIGN);
1918 if (s->vl_eq_vlmax) {
1919 tcg_gen_gvec_dup_tl(s->sew, vreg_ofs(s, a->rd),
1920 MAXSZ(s), MAXSZ(s), s1);
1923 TCGv_i64 s1_i64 = tcg_temp_new_i64();
1924 TCGv_ptr dest = tcg_temp_new_ptr();
1925 uint32_t data = FIELD_DP32(0, VDATA, LMUL, s->lmul);
1926 static gen_helper_vmv_vx * const fns[4] = {
1927 gen_helper_vmv_v_x_b, gen_helper_vmv_v_x_h,
1928 gen_helper_vmv_v_x_w, gen_helper_vmv_v_x_d,
1931 tcg_gen_ext_tl_i64(s1_i64, s1);
1932 desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
1933 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, a->rd));
1934 fns[s->sew](dest, s1_i64, cpu_env, desc);
1936 tcg_temp_free_ptr(dest);
1937 tcg_temp_free_i64(s1_i64);
1941 gen_set_label(over);
1947 static bool trans_vmv_v_i(DisasContext *s, arg_vmv_v_i *a)
1949 if (require_rvv(s) &&
1950 vext_check_isa_ill(s) &&
1951 /* vmv.v.i has rs2 = 0 and vm = 1 */
1952 vext_check_ss(s, a->rd, 0, 1)) {
1953 int64_t simm = sextract64(a->rs1, 0, 5);
1954 if (s->vl_eq_vlmax) {
1955 tcg_gen_gvec_dup_imm(s->sew, vreg_ofs(s, a->rd),
1956 MAXSZ(s), MAXSZ(s), simm);
1962 uint32_t data = FIELD_DP32(0, VDATA, LMUL, s->lmul);
1963 static gen_helper_vmv_vx * const fns[4] = {
1964 gen_helper_vmv_v_x_b, gen_helper_vmv_v_x_h,
1965 gen_helper_vmv_v_x_w, gen_helper_vmv_v_x_d,
1967 TCGLabel *over = gen_new_label();
1968 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
1970 s1 = tcg_constant_i64(simm);
1971 dest = tcg_temp_new_ptr();
1972 desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
1973 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, a->rd));
1974 fns[s->sew](dest, s1, cpu_env, desc);
1976 tcg_temp_free_ptr(dest);
1978 gen_set_label(over);
1985 GEN_OPIVV_TRANS(vmerge_vvm, opivv_vadc_check)
1986 GEN_OPIVX_TRANS(vmerge_vxm, opivx_vadc_check)
1987 GEN_OPIVI_TRANS(vmerge_vim, IMM_SX, vmerge_vxm, opivx_vadc_check)
1990 *** Vector Fixed-Point Arithmetic Instructions
1993 /* Vector Single-Width Saturating Add and Subtract */
1994 GEN_OPIVV_TRANS(vsaddu_vv, opivv_check)
1995 GEN_OPIVV_TRANS(vsadd_vv, opivv_check)
1996 GEN_OPIVV_TRANS(vssubu_vv, opivv_check)
1997 GEN_OPIVV_TRANS(vssub_vv, opivv_check)
1998 GEN_OPIVX_TRANS(vsaddu_vx, opivx_check)
1999 GEN_OPIVX_TRANS(vsadd_vx, opivx_check)
2000 GEN_OPIVX_TRANS(vssubu_vx, opivx_check)
2001 GEN_OPIVX_TRANS(vssub_vx, opivx_check)
2002 GEN_OPIVI_TRANS(vsaddu_vi, IMM_SX, vsaddu_vx, opivx_check)
2003 GEN_OPIVI_TRANS(vsadd_vi, IMM_SX, vsadd_vx, opivx_check)
2005 /* Vector Single-Width Averaging Add and Subtract */
2006 GEN_OPIVV_TRANS(vaadd_vv, opivv_check)
2007 GEN_OPIVV_TRANS(vaaddu_vv, opivv_check)
2008 GEN_OPIVV_TRANS(vasub_vv, opivv_check)
2009 GEN_OPIVV_TRANS(vasubu_vv, opivv_check)
2010 GEN_OPIVX_TRANS(vaadd_vx, opivx_check)
2011 GEN_OPIVX_TRANS(vaaddu_vx, opivx_check)
2012 GEN_OPIVX_TRANS(vasub_vx, opivx_check)
2013 GEN_OPIVX_TRANS(vasubu_vx, opivx_check)
2015 /* Vector Single-Width Fractional Multiply with Rounding and Saturation */
2016 GEN_OPIVV_TRANS(vsmul_vv, opivv_check)
2017 GEN_OPIVX_TRANS(vsmul_vx, opivx_check)
2019 /* Vector Widening Saturating Scaled Multiply-Add */
2020 GEN_OPIVV_WIDEN_TRANS(vwsmaccu_vv, opivv_widen_check)
2021 GEN_OPIVV_WIDEN_TRANS(vwsmacc_vv, opivv_widen_check)
2022 GEN_OPIVV_WIDEN_TRANS(vwsmaccsu_vv, opivv_widen_check)
2023 GEN_OPIVX_WIDEN_TRANS(vwsmaccu_vx)
2024 GEN_OPIVX_WIDEN_TRANS(vwsmacc_vx)
2025 GEN_OPIVX_WIDEN_TRANS(vwsmaccsu_vx)
2026 GEN_OPIVX_WIDEN_TRANS(vwsmaccus_vx)
2028 /* Vector Single-Width Scaling Shift Instructions */
2029 GEN_OPIVV_TRANS(vssrl_vv, opivv_check)
2030 GEN_OPIVV_TRANS(vssra_vv, opivv_check)
2031 GEN_OPIVX_TRANS(vssrl_vx, opivx_check)
2032 GEN_OPIVX_TRANS(vssra_vx, opivx_check)
2033 GEN_OPIVI_TRANS(vssrl_vi, IMM_TRUNC_SEW, vssrl_vx, opivx_check)
2034 GEN_OPIVI_TRANS(vssra_vi, IMM_TRUNC_SEW, vssra_vx, opivx_check)
2036 /* Vector Narrowing Fixed-Point Clip Instructions */
2037 GEN_OPIWV_NARROW_TRANS(vnclipu_wv)
2038 GEN_OPIWV_NARROW_TRANS(vnclip_wv)
2039 GEN_OPIWX_NARROW_TRANS(vnclipu_wx)
2040 GEN_OPIWX_NARROW_TRANS(vnclip_wx)
2041 GEN_OPIWI_NARROW_TRANS(vnclipu_wi, IMM_ZX, vnclipu_wx)
2042 GEN_OPIWI_NARROW_TRANS(vnclip_wi, IMM_ZX, vnclip_wx)
2045 *** Vector Float Point Arithmetic Instructions
2049 * As RVF-only cpus always have values NaN-boxed to 64-bits,
2050 * RVF and RVD can be treated equally.
2051 * We don't have to deal with the cases of: SEW > FLEN.
2053 * If SEW < FLEN, check whether input fp register is a valid
2054 * NaN-boxed value, in which case the least-significant SEW bits
2055 * of the f regsiter are used, else the canonical NaN value is used.
2057 static void do_nanbox(DisasContext *s, TCGv_i64 out, TCGv_i64 in)
2061 gen_check_nanbox_h(out, in);
2064 gen_check_nanbox_s(out, in);
2067 tcg_gen_mov_i64(out, in);
2070 g_assert_not_reached();
2074 /* Vector Single-Width Floating-Point Add/Subtract Instructions */
2077 * If the current SEW does not correspond to a supported IEEE floating-point
2078 * type, an illegal instruction exception is raised.
2080 static bool opfvv_check(DisasContext *s, arg_rmrr *a)
2082 return require_rvv(s) &&
2084 vext_check_isa_ill(s) &&
2085 vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm);
2088 /* OPFVV without GVEC IR */
2089 #define GEN_OPFVV_TRANS(NAME, CHECK) \
2090 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
2092 if (CHECK(s, a)) { \
2093 uint32_t data = 0; \
2094 static gen_helper_gvec_4_ptr * const fns[3] = { \
2095 gen_helper_##NAME##_h, \
2096 gen_helper_##NAME##_w, \
2097 gen_helper_##NAME##_d, \
2099 TCGLabel *over = gen_new_label(); \
2101 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
2103 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2104 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2105 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
2106 vreg_ofs(s, a->rs1), \
2107 vreg_ofs(s, a->rs2), cpu_env, \
2108 s->vlen / 8, s->vlen / 8, data, \
2111 gen_set_label(over); \
2116 GEN_OPFVV_TRANS(vfadd_vv, opfvv_check)
2117 GEN_OPFVV_TRANS(vfsub_vv, opfvv_check)
2119 typedef void gen_helper_opfvf(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv_ptr,
2120 TCGv_env, TCGv_i32);
2122 static bool opfvf_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
2123 uint32_t data, gen_helper_opfvf *fn, DisasContext *s)
2125 TCGv_ptr dest, src2, mask;
2129 TCGLabel *over = gen_new_label();
2130 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
2132 dest = tcg_temp_new_ptr();
2133 mask = tcg_temp_new_ptr();
2134 src2 = tcg_temp_new_ptr();
2135 desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
2137 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
2138 tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, vs2));
2139 tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
2141 /* NaN-box f[rs1] */
2142 t1 = tcg_temp_new_i64();
2143 do_nanbox(s, t1, cpu_fpr[rs1]);
2145 fn(dest, mask, t1, src2, cpu_env, desc);
2147 tcg_temp_free_ptr(dest);
2148 tcg_temp_free_ptr(mask);
2149 tcg_temp_free_ptr(src2);
2150 tcg_temp_free_i64(t1);
2152 gen_set_label(over);
2157 * If the current SEW does not correspond to a supported IEEE floating-point
2158 * type, an illegal instruction exception is raised
2160 static bool opfvf_check(DisasContext *s, arg_rmrr *a)
2162 return require_rvv(s) &&
2164 vext_check_isa_ill(s) &&
2165 vext_check_ss(s, a->rd, a->rs2, a->vm);
2168 /* OPFVF without GVEC IR */
2169 #define GEN_OPFVF_TRANS(NAME, CHECK) \
2170 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
2172 if (CHECK(s, a)) { \
2173 uint32_t data = 0; \
2174 static gen_helper_opfvf *const fns[3] = { \
2175 gen_helper_##NAME##_h, \
2176 gen_helper_##NAME##_w, \
2177 gen_helper_##NAME##_d, \
2180 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2181 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2182 return opfvf_trans(a->rd, a->rs1, a->rs2, data, \
2183 fns[s->sew - 1], s); \
2188 GEN_OPFVF_TRANS(vfadd_vf, opfvf_check)
2189 GEN_OPFVF_TRANS(vfsub_vf, opfvf_check)
2190 GEN_OPFVF_TRANS(vfrsub_vf, opfvf_check)
2192 /* Vector Widening Floating-Point Add/Subtract Instructions */
2193 static bool opfvv_widen_check(DisasContext *s, arg_rmrr *a)
2195 return require_rvv(s) &&
2197 vext_check_isa_ill(s) &&
2198 vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm);
2201 /* OPFVV with WIDEN */
2202 #define GEN_OPFVV_WIDEN_TRANS(NAME, CHECK) \
2203 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
2205 if (CHECK(s, a)) { \
2206 uint32_t data = 0; \
2207 static gen_helper_gvec_4_ptr * const fns[2] = { \
2208 gen_helper_##NAME##_h, gen_helper_##NAME##_w, \
2210 TCGLabel *over = gen_new_label(); \
2212 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
2214 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2215 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2216 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
2217 vreg_ofs(s, a->rs1), \
2218 vreg_ofs(s, a->rs2), cpu_env, \
2219 s->vlen / 8, s->vlen / 8, data, \
2222 gen_set_label(over); \
2228 GEN_OPFVV_WIDEN_TRANS(vfwadd_vv, opfvv_widen_check)
2229 GEN_OPFVV_WIDEN_TRANS(vfwsub_vv, opfvv_widen_check)
2231 static bool opfvf_widen_check(DisasContext *s, arg_rmrr *a)
2233 return require_rvv(s) &&
2235 vext_check_isa_ill(s) &&
2236 vext_check_ds(s, a->rd, a->rs2, a->vm);
2239 /* OPFVF with WIDEN */
2240 #define GEN_OPFVF_WIDEN_TRANS(NAME) \
2241 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
2243 if (opfvf_widen_check(s, a)) { \
2244 uint32_t data = 0; \
2245 static gen_helper_opfvf *const fns[2] = { \
2246 gen_helper_##NAME##_h, gen_helper_##NAME##_w, \
2249 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2250 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2251 return opfvf_trans(a->rd, a->rs1, a->rs2, data, \
2252 fns[s->sew - 1], s); \
2257 GEN_OPFVF_WIDEN_TRANS(vfwadd_vf)
2258 GEN_OPFVF_WIDEN_TRANS(vfwsub_vf)
2260 static bool opfwv_widen_check(DisasContext *s, arg_rmrr *a)
2262 return require_rvv(s) &&
2264 vext_check_isa_ill(s) &&
2265 vext_check_dds(s, a->rd, a->rs1, a->rs2, a->vm);
2268 /* WIDEN OPFVV with WIDEN */
2269 #define GEN_OPFWV_WIDEN_TRANS(NAME) \
2270 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
2272 if (opfwv_widen_check(s, a)) { \
2273 uint32_t data = 0; \
2274 static gen_helper_gvec_4_ptr * const fns[2] = { \
2275 gen_helper_##NAME##_h, gen_helper_##NAME##_w, \
2277 TCGLabel *over = gen_new_label(); \
2279 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
2281 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2282 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2283 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
2284 vreg_ofs(s, a->rs1), \
2285 vreg_ofs(s, a->rs2), cpu_env, \
2286 s->vlen / 8, s->vlen / 8, data, \
2289 gen_set_label(over); \
2295 GEN_OPFWV_WIDEN_TRANS(vfwadd_wv)
2296 GEN_OPFWV_WIDEN_TRANS(vfwsub_wv)
2298 static bool opfwf_widen_check(DisasContext *s, arg_rmrr *a)
2300 return require_rvv(s) &&
2302 vext_check_isa_ill(s) &&
2303 vext_check_dd(s, a->rd, a->rs2, a->vm);
2306 /* WIDEN OPFVF with WIDEN */
2307 #define GEN_OPFWF_WIDEN_TRANS(NAME) \
2308 static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \
2310 if (opfwf_widen_check(s, a)) { \
2311 uint32_t data = 0; \
2312 static gen_helper_opfvf *const fns[2] = { \
2313 gen_helper_##NAME##_h, gen_helper_##NAME##_w, \
2316 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2317 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2318 return opfvf_trans(a->rd, a->rs1, a->rs2, data, \
2319 fns[s->sew - 1], s); \
2324 GEN_OPFWF_WIDEN_TRANS(vfwadd_wf)
2325 GEN_OPFWF_WIDEN_TRANS(vfwsub_wf)
2327 /* Vector Single-Width Floating-Point Multiply/Divide Instructions */
2328 GEN_OPFVV_TRANS(vfmul_vv, opfvv_check)
2329 GEN_OPFVV_TRANS(vfdiv_vv, opfvv_check)
2330 GEN_OPFVF_TRANS(vfmul_vf, opfvf_check)
2331 GEN_OPFVF_TRANS(vfdiv_vf, opfvf_check)
2332 GEN_OPFVF_TRANS(vfrdiv_vf, opfvf_check)
2334 /* Vector Widening Floating-Point Multiply */
2335 GEN_OPFVV_WIDEN_TRANS(vfwmul_vv, opfvv_widen_check)
2336 GEN_OPFVF_WIDEN_TRANS(vfwmul_vf)
2338 /* Vector Single-Width Floating-Point Fused Multiply-Add Instructions */
2339 GEN_OPFVV_TRANS(vfmacc_vv, opfvv_check)
2340 GEN_OPFVV_TRANS(vfnmacc_vv, opfvv_check)
2341 GEN_OPFVV_TRANS(vfmsac_vv, opfvv_check)
2342 GEN_OPFVV_TRANS(vfnmsac_vv, opfvv_check)
2343 GEN_OPFVV_TRANS(vfmadd_vv, opfvv_check)
2344 GEN_OPFVV_TRANS(vfnmadd_vv, opfvv_check)
2345 GEN_OPFVV_TRANS(vfmsub_vv, opfvv_check)
2346 GEN_OPFVV_TRANS(vfnmsub_vv, opfvv_check)
2347 GEN_OPFVF_TRANS(vfmacc_vf, opfvf_check)
2348 GEN_OPFVF_TRANS(vfnmacc_vf, opfvf_check)
2349 GEN_OPFVF_TRANS(vfmsac_vf, opfvf_check)
2350 GEN_OPFVF_TRANS(vfnmsac_vf, opfvf_check)
2351 GEN_OPFVF_TRANS(vfmadd_vf, opfvf_check)
2352 GEN_OPFVF_TRANS(vfnmadd_vf, opfvf_check)
2353 GEN_OPFVF_TRANS(vfmsub_vf, opfvf_check)
2354 GEN_OPFVF_TRANS(vfnmsub_vf, opfvf_check)
2356 /* Vector Widening Floating-Point Fused Multiply-Add Instructions */
2357 GEN_OPFVV_WIDEN_TRANS(vfwmacc_vv, opfvv_widen_check)
2358 GEN_OPFVV_WIDEN_TRANS(vfwnmacc_vv, opfvv_widen_check)
2359 GEN_OPFVV_WIDEN_TRANS(vfwmsac_vv, opfvv_widen_check)
2360 GEN_OPFVV_WIDEN_TRANS(vfwnmsac_vv, opfvv_widen_check)
2361 GEN_OPFVF_WIDEN_TRANS(vfwmacc_vf)
2362 GEN_OPFVF_WIDEN_TRANS(vfwnmacc_vf)
2363 GEN_OPFVF_WIDEN_TRANS(vfwmsac_vf)
2364 GEN_OPFVF_WIDEN_TRANS(vfwnmsac_vf)
2366 /* Vector Floating-Point Square-Root Instruction */
2369 * If the current SEW does not correspond to a supported IEEE floating-point
2370 * type, an illegal instruction exception is raised
2372 static bool opfv_check(DisasContext *s, arg_rmr *a)
2374 return require_rvv(s) &&
2376 vext_check_isa_ill(s) &&
2377 /* OPFV instructions ignore vs1 check */
2378 vext_check_ss(s, a->rd, a->rs2, a->vm);
2381 #define GEN_OPFV_TRANS(NAME, CHECK) \
2382 static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
2384 if (CHECK(s, a)) { \
2385 uint32_t data = 0; \
2386 static gen_helper_gvec_3_ptr * const fns[3] = { \
2387 gen_helper_##NAME##_h, \
2388 gen_helper_##NAME##_w, \
2389 gen_helper_##NAME##_d, \
2391 TCGLabel *over = gen_new_label(); \
2393 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
2395 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2396 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2397 tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
2398 vreg_ofs(s, a->rs2), cpu_env, \
2399 s->vlen / 8, s->vlen / 8, data, \
2402 gen_set_label(over); \
2408 GEN_OPFV_TRANS(vfsqrt_v, opfv_check)
2410 /* Vector Floating-Point MIN/MAX Instructions */
2411 GEN_OPFVV_TRANS(vfmin_vv, opfvv_check)
2412 GEN_OPFVV_TRANS(vfmax_vv, opfvv_check)
2413 GEN_OPFVF_TRANS(vfmin_vf, opfvf_check)
2414 GEN_OPFVF_TRANS(vfmax_vf, opfvf_check)
2416 /* Vector Floating-Point Sign-Injection Instructions */
2417 GEN_OPFVV_TRANS(vfsgnj_vv, opfvv_check)
2418 GEN_OPFVV_TRANS(vfsgnjn_vv, opfvv_check)
2419 GEN_OPFVV_TRANS(vfsgnjx_vv, opfvv_check)
2420 GEN_OPFVF_TRANS(vfsgnj_vf, opfvf_check)
2421 GEN_OPFVF_TRANS(vfsgnjn_vf, opfvf_check)
2422 GEN_OPFVF_TRANS(vfsgnjx_vf, opfvf_check)
2424 /* Vector Floating-Point Compare Instructions */
2425 static bool opfvv_cmp_check(DisasContext *s, arg_rmrr *a)
2427 return require_rvv(s) &&
2429 vext_check_isa_ill(s) &&
2430 vext_check_mss(s, a->rd, a->rs1, a->rs2);
2433 GEN_OPFVV_TRANS(vmfeq_vv, opfvv_cmp_check)
2434 GEN_OPFVV_TRANS(vmfne_vv, opfvv_cmp_check)
2435 GEN_OPFVV_TRANS(vmflt_vv, opfvv_cmp_check)
2436 GEN_OPFVV_TRANS(vmfle_vv, opfvv_cmp_check)
2437 GEN_OPFVV_TRANS(vmford_vv, opfvv_cmp_check)
2439 static bool opfvf_cmp_check(DisasContext *s, arg_rmrr *a)
2441 return require_rvv(s) &&
2443 vext_check_isa_ill(s) &&
2444 vext_check_ms(s, a->rd, a->rs2);
2447 GEN_OPFVF_TRANS(vmfeq_vf, opfvf_cmp_check)
2448 GEN_OPFVF_TRANS(vmfne_vf, opfvf_cmp_check)
2449 GEN_OPFVF_TRANS(vmflt_vf, opfvf_cmp_check)
2450 GEN_OPFVF_TRANS(vmfle_vf, opfvf_cmp_check)
2451 GEN_OPFVF_TRANS(vmfgt_vf, opfvf_cmp_check)
2452 GEN_OPFVF_TRANS(vmfge_vf, opfvf_cmp_check)
2453 GEN_OPFVF_TRANS(vmford_vf, opfvf_cmp_check)
2455 /* Vector Floating-Point Classify Instruction */
2456 GEN_OPFV_TRANS(vfclass_v, opfv_check)
2458 /* Vector Floating-Point Merge Instruction */
2459 GEN_OPFVF_TRANS(vfmerge_vfm, opfvf_check)
2461 static bool trans_vfmv_v_f(DisasContext *s, arg_vfmv_v_f *a)
2463 if (require_rvv(s) &&
2465 vext_check_isa_ill(s) &&
2466 require_align(a->rd, s->lmul)) {
2469 if (s->vl_eq_vlmax) {
2470 t1 = tcg_temp_new_i64();
2471 /* NaN-box f[rs1] */
2472 do_nanbox(s, t1, cpu_fpr[a->rs1]);
2474 tcg_gen_gvec_dup_i64(s->sew, vreg_ofs(s, a->rd),
2475 MAXSZ(s), MAXSZ(s), t1);
2480 uint32_t data = FIELD_DP32(0, VDATA, LMUL, s->lmul);
2481 static gen_helper_vmv_vx * const fns[3] = {
2482 gen_helper_vmv_v_x_h,
2483 gen_helper_vmv_v_x_w,
2484 gen_helper_vmv_v_x_d,
2486 TCGLabel *over = gen_new_label();
2487 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
2489 t1 = tcg_temp_new_i64();
2490 /* NaN-box f[rs1] */
2491 do_nanbox(s, t1, cpu_fpr[a->rs1]);
2493 dest = tcg_temp_new_ptr();
2494 desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
2495 tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, a->rd));
2497 fns[s->sew - 1](dest, t1, cpu_env, desc);
2499 tcg_temp_free_ptr(dest);
2501 gen_set_label(over);
2503 tcg_temp_free_i64(t1);
2509 /* Single-Width Floating-Point/Integer Type-Convert Instructions */
2510 GEN_OPFV_TRANS(vfcvt_xu_f_v, opfv_check)
2511 GEN_OPFV_TRANS(vfcvt_x_f_v, opfv_check)
2512 GEN_OPFV_TRANS(vfcvt_f_xu_v, opfv_check)
2513 GEN_OPFV_TRANS(vfcvt_f_x_v, opfv_check)
2515 /* Widening Floating-Point/Integer Type-Convert Instructions */
2518 * If the current SEW does not correspond to a supported IEEE floating-point
2519 * type, an illegal instruction exception is raised
2521 static bool opfv_widen_check(DisasContext *s, arg_rmr *a)
2523 return require_rvv(s) &&
2524 require_scale_rvf(s) &&
2526 vext_check_isa_ill(s) &&
2527 vext_check_ds(s, a->rd, a->rs2, a->vm);
2530 #define GEN_OPFV_WIDEN_TRANS(NAME) \
2531 static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
2533 if (opfv_widen_check(s, a)) { \
2534 uint32_t data = 0; \
2535 static gen_helper_gvec_3_ptr * const fns[2] = { \
2536 gen_helper_##NAME##_h, \
2537 gen_helper_##NAME##_w, \
2539 TCGLabel *over = gen_new_label(); \
2541 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
2543 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2544 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2545 tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
2546 vreg_ofs(s, a->rs2), cpu_env, \
2547 s->vlen / 8, s->vlen / 8, data, \
2550 gen_set_label(over); \
2556 GEN_OPFV_WIDEN_TRANS(vfwcvt_xu_f_v)
2557 GEN_OPFV_WIDEN_TRANS(vfwcvt_x_f_v)
2558 GEN_OPFV_WIDEN_TRANS(vfwcvt_f_xu_v)
2559 GEN_OPFV_WIDEN_TRANS(vfwcvt_f_x_v)
2560 GEN_OPFV_WIDEN_TRANS(vfwcvt_f_f_v)
2562 /* Narrowing Floating-Point/Integer Type-Convert Instructions */
2565 * If the current SEW does not correspond to a supported IEEE floating-point
2566 * type, an illegal instruction exception is raised
2568 static bool opfv_narrow_check(DisasContext *s, arg_rmr *a)
2570 return require_rvv(s) &&
2572 (s->sew != MO_64) &&
2573 vext_check_isa_ill(s) &&
2574 /* OPFV narrowing instructions ignore vs1 check */
2575 vext_check_sd(s, a->rd, a->rs2, a->vm);
2578 #define GEN_OPFV_NARROW_TRANS(NAME) \
2579 static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
2581 if (opfv_narrow_check(s, a)) { \
2582 uint32_t data = 0; \
2583 static gen_helper_gvec_3_ptr * const fns[2] = { \
2584 gen_helper_##NAME##_h, \
2585 gen_helper_##NAME##_w, \
2587 TCGLabel *over = gen_new_label(); \
2589 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
2591 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2592 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2593 tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
2594 vreg_ofs(s, a->rs2), cpu_env, \
2595 s->vlen / 8, s->vlen / 8, data, \
2598 gen_set_label(over); \
2604 GEN_OPFV_NARROW_TRANS(vfncvt_xu_f_v)
2605 GEN_OPFV_NARROW_TRANS(vfncvt_x_f_v)
2606 GEN_OPFV_NARROW_TRANS(vfncvt_f_xu_v)
2607 GEN_OPFV_NARROW_TRANS(vfncvt_f_x_v)
2608 GEN_OPFV_NARROW_TRANS(vfncvt_f_f_v)
2611 *** Vector Reduction Operations
2613 /* Vector Single-Width Integer Reduction Instructions */
2614 static bool reduction_check(DisasContext *s, arg_rmrr *a)
2616 return require_rvv(s) &&
2617 vext_check_isa_ill(s) &&
2618 vext_check_reduction(s, a->rs2);
2621 GEN_OPIVV_TRANS(vredsum_vs, reduction_check)
2622 GEN_OPIVV_TRANS(vredmaxu_vs, reduction_check)
2623 GEN_OPIVV_TRANS(vredmax_vs, reduction_check)
2624 GEN_OPIVV_TRANS(vredminu_vs, reduction_check)
2625 GEN_OPIVV_TRANS(vredmin_vs, reduction_check)
2626 GEN_OPIVV_TRANS(vredand_vs, reduction_check)
2627 GEN_OPIVV_TRANS(vredor_vs, reduction_check)
2628 GEN_OPIVV_TRANS(vredxor_vs, reduction_check)
2630 /* Vector Widening Integer Reduction Instructions */
2631 static bool reduction_widen_check(DisasContext *s, arg_rmrr *a)
2633 return reduction_check(s, a) && (s->sew < MO_64);
2636 GEN_OPIVV_WIDEN_TRANS(vwredsum_vs, reduction_widen_check)
2637 GEN_OPIVV_WIDEN_TRANS(vwredsumu_vs, reduction_widen_check)
2639 /* Vector Single-Width Floating-Point Reduction Instructions */
2640 static bool freduction_check(DisasContext *s, arg_rmrr *a)
2642 return reduction_check(s, a) &&
2646 GEN_OPFVV_TRANS(vfredsum_vs, freduction_check)
2647 GEN_OPFVV_TRANS(vfredmax_vs, freduction_check)
2648 GEN_OPFVV_TRANS(vfredmin_vs, freduction_check)
2650 /* Vector Widening Floating-Point Reduction Instructions */
2651 static bool freduction_widen_check(DisasContext *s, arg_rmrr *a)
2653 return reduction_widen_check(s, a) &&
2654 require_scale_rvf(s) &&
2658 GEN_OPFVV_WIDEN_TRANS(vfwredsum_vs, freduction_widen_check)
2661 *** Vector Mask Operations
2664 /* Vector Mask-Register Logical Instructions */
2665 #define GEN_MM_TRANS(NAME) \
2666 static bool trans_##NAME(DisasContext *s, arg_r *a) \
2668 if (require_rvv(s) && \
2669 vext_check_isa_ill(s)) { \
2670 uint32_t data = 0; \
2671 gen_helper_gvec_4_ptr *fn = gen_helper_##NAME; \
2672 TCGLabel *over = gen_new_label(); \
2673 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
2675 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2676 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \
2677 vreg_ofs(s, a->rs1), \
2678 vreg_ofs(s, a->rs2), cpu_env, \
2679 s->vlen / 8, s->vlen / 8, data, fn); \
2681 gen_set_label(over); \
2687 GEN_MM_TRANS(vmand_mm)
2688 GEN_MM_TRANS(vmnand_mm)
2689 GEN_MM_TRANS(vmandnot_mm)
2690 GEN_MM_TRANS(vmxor_mm)
2691 GEN_MM_TRANS(vmor_mm)
2692 GEN_MM_TRANS(vmnor_mm)
2693 GEN_MM_TRANS(vmornot_mm)
2694 GEN_MM_TRANS(vmxnor_mm)
2696 /* Vector count population in mask vcpop */
2697 static bool trans_vcpop_m(DisasContext *s, arg_rmr *a)
2699 if (require_rvv(s) &&
2700 vext_check_isa_ill(s)) {
2701 TCGv_ptr src2, mask;
2705 data = FIELD_DP32(data, VDATA, VM, a->vm);
2706 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
2708 mask = tcg_temp_new_ptr();
2709 src2 = tcg_temp_new_ptr();
2710 dst = dest_gpr(s, a->rd);
2711 desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
2713 tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, a->rs2));
2714 tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
2716 gen_helper_vcpop_m(dst, mask, src2, cpu_env, desc);
2717 gen_set_gpr(s, a->rd, dst);
2719 tcg_temp_free_ptr(mask);
2720 tcg_temp_free_ptr(src2);
2727 /* vmfirst find-first-set mask bit */
2728 static bool trans_vfirst_m(DisasContext *s, arg_rmr *a)
2730 if (require_rvv(s) &&
2731 vext_check_isa_ill(s)) {
2732 TCGv_ptr src2, mask;
2736 data = FIELD_DP32(data, VDATA, VM, a->vm);
2737 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
2739 mask = tcg_temp_new_ptr();
2740 src2 = tcg_temp_new_ptr();
2741 dst = dest_gpr(s, a->rd);
2742 desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data));
2744 tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, a->rs2));
2745 tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
2747 gen_helper_vfirst_m(dst, mask, src2, cpu_env, desc);
2748 gen_set_gpr(s, a->rd, dst);
2750 tcg_temp_free_ptr(mask);
2751 tcg_temp_free_ptr(src2);
2757 /* vmsbf.m set-before-first mask bit */
2758 /* vmsif.m set-includ-first mask bit */
2759 /* vmsof.m set-only-first mask bit */
2760 #define GEN_M_TRANS(NAME) \
2761 static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
2763 if (require_rvv(s) && \
2764 vext_check_isa_ill(s) && \
2765 require_vm(a->vm, a->rd) && \
2766 (a->rd != a->rs2)) { \
2767 uint32_t data = 0; \
2768 gen_helper_gvec_3_ptr *fn = gen_helper_##NAME; \
2769 TCGLabel *over = gen_new_label(); \
2770 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); \
2772 data = FIELD_DP32(data, VDATA, VM, a->vm); \
2773 data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \
2774 tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), \
2775 vreg_ofs(s, 0), vreg_ofs(s, a->rs2), \
2776 cpu_env, s->vlen / 8, s->vlen / 8, \
2779 gen_set_label(over); \
2785 GEN_M_TRANS(vmsbf_m)
2786 GEN_M_TRANS(vmsif_m)
2787 GEN_M_TRANS(vmsof_m)
2790 * Vector Iota Instruction
2792 * 1. The destination register cannot overlap the source register.
2793 * 2. If masked, cannot overlap the mask register ('v0').
2794 * 3. An illegal instruction exception is raised if vstart is non-zero.
2796 static bool trans_viota_m(DisasContext *s, arg_viota_m *a)
2798 if (require_rvv(s) &&
2799 vext_check_isa_ill(s) &&
2800 !is_overlapped(a->rd, 1 << MAX(s->lmul, 0), a->rs2, 1) &&
2801 require_vm(a->vm, a->rd) &&
2802 require_align(a->rd, s->lmul)) {
2804 TCGLabel *over = gen_new_label();
2805 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
2807 data = FIELD_DP32(data, VDATA, VM, a->vm);
2808 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
2809 static gen_helper_gvec_3_ptr * const fns[4] = {
2810 gen_helper_viota_m_b, gen_helper_viota_m_h,
2811 gen_helper_viota_m_w, gen_helper_viota_m_d,
2813 tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
2814 vreg_ofs(s, a->rs2), cpu_env,
2815 s->vlen / 8, s->vlen / 8, data, fns[s->sew]);
2817 gen_set_label(over);
2823 /* Vector Element Index Instruction */
2824 static bool trans_vid_v(DisasContext *s, arg_vid_v *a)
2826 if (require_rvv(s) &&
2827 vext_check_isa_ill(s) &&
2828 require_align(a->rd, s->lmul) &&
2829 require_vm(a->vm, a->rd)) {
2831 TCGLabel *over = gen_new_label();
2832 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
2834 data = FIELD_DP32(data, VDATA, VM, a->vm);
2835 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
2836 static gen_helper_gvec_2_ptr * const fns[4] = {
2837 gen_helper_vid_v_b, gen_helper_vid_v_h,
2838 gen_helper_vid_v_w, gen_helper_vid_v_d,
2840 tcg_gen_gvec_2_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
2841 cpu_env, s->vlen / 8, s->vlen / 8,
2844 gen_set_label(over);
2851 *** Vector Permutation Instructions
2854 /* Integer Extract Instruction */
2856 static void load_element(TCGv_i64 dest, TCGv_ptr base,
2857 int ofs, int sew, bool sign)
2862 tcg_gen_ld8u_i64(dest, base, ofs);
2864 tcg_gen_ld8s_i64(dest, base, ofs);
2869 tcg_gen_ld16u_i64(dest, base, ofs);
2871 tcg_gen_ld16s_i64(dest, base, ofs);
2876 tcg_gen_ld32u_i64(dest, base, ofs);
2878 tcg_gen_ld32s_i64(dest, base, ofs);
2882 tcg_gen_ld_i64(dest, base, ofs);
2885 g_assert_not_reached();
2890 /* offset of the idx element with base regsiter r */
2891 static uint32_t endian_ofs(DisasContext *s, int r, int idx)
2893 #ifdef HOST_WORDS_BIGENDIAN
2894 return vreg_ofs(s, r) + ((idx ^ (7 >> s->sew)) << s->sew);
2896 return vreg_ofs(s, r) + (idx << s->sew);
2900 /* adjust the index according to the endian */
2901 static void endian_adjust(TCGv_i32 ofs, int sew)
2903 #ifdef HOST_WORDS_BIGENDIAN
2904 tcg_gen_xori_i32(ofs, ofs, 7 >> sew);
2908 /* Load idx >= VLMAX ? 0 : vreg[idx] */
2909 static void vec_element_loadx(DisasContext *s, TCGv_i64 dest,
2910 int vreg, TCGv idx, int vlmax)
2912 TCGv_i32 ofs = tcg_temp_new_i32();
2913 TCGv_ptr base = tcg_temp_new_ptr();
2914 TCGv_i64 t_idx = tcg_temp_new_i64();
2915 TCGv_i64 t_vlmax, t_zero;
2918 * Mask the index to the length so that we do
2919 * not produce an out-of-range load.
2921 tcg_gen_trunc_tl_i32(ofs, idx);
2922 tcg_gen_andi_i32(ofs, ofs, vlmax - 1);
2924 /* Convert the index to an offset. */
2925 endian_adjust(ofs, s->sew);
2926 tcg_gen_shli_i32(ofs, ofs, s->sew);
2928 /* Convert the index to a pointer. */
2929 tcg_gen_ext_i32_ptr(base, ofs);
2930 tcg_gen_add_ptr(base, base, cpu_env);
2932 /* Perform the load. */
2933 load_element(dest, base,
2934 vreg_ofs(s, vreg), s->sew, false);
2935 tcg_temp_free_ptr(base);
2936 tcg_temp_free_i32(ofs);
2938 /* Flush out-of-range indexing to zero. */
2939 t_vlmax = tcg_constant_i64(vlmax);
2940 t_zero = tcg_constant_i64(0);
2941 tcg_gen_extu_tl_i64(t_idx, idx);
2943 tcg_gen_movcond_i64(TCG_COND_LTU, dest, t_idx,
2944 t_vlmax, dest, t_zero);
2946 tcg_temp_free_i64(t_idx);
2949 static void vec_element_loadi(DisasContext *s, TCGv_i64 dest,
2950 int vreg, int idx, bool sign)
2952 load_element(dest, cpu_env, endian_ofs(s, vreg, idx), s->sew, sign);
2955 static bool trans_vext_x_v(DisasContext *s, arg_r *a)
2957 TCGv_i64 tmp = tcg_temp_new_i64();
2958 TCGv dest = dest_gpr(s, a->rd);
2961 /* Special case vmv.x.s rd, vs2. */
2962 vec_element_loadi(s, tmp, a->rs2, 0, false);
2964 /* This instruction ignores LMUL and vector register groups */
2965 int vlmax = s->vlen >> (3 + s->sew);
2966 vec_element_loadx(s, tmp, a->rs2, cpu_gpr[a->rs1], vlmax);
2969 tcg_gen_trunc_i64_tl(dest, tmp);
2970 gen_set_gpr(s, a->rd, dest);
2972 tcg_temp_free_i64(tmp);
2976 /* Integer Scalar Move Instruction */
2978 static void store_element(TCGv_i64 val, TCGv_ptr base,
2983 tcg_gen_st8_i64(val, base, ofs);
2986 tcg_gen_st16_i64(val, base, ofs);
2989 tcg_gen_st32_i64(val, base, ofs);
2992 tcg_gen_st_i64(val, base, ofs);
2995 g_assert_not_reached();
3001 * Store vreg[idx] = val.
3002 * The index must be in range of VLMAX.
3004 static void vec_element_storei(DisasContext *s, int vreg,
3005 int idx, TCGv_i64 val)
3007 store_element(val, cpu_env, endian_ofs(s, vreg, idx), s->sew);
3010 /* vmv.x.s rd, vs2 # x[rd] = vs2[0] */
3011 static bool trans_vmv_x_s(DisasContext *s, arg_vmv_x_s *a)
3013 if (require_rvv(s) &&
3014 vext_check_isa_ill(s)) {
3018 t1 = tcg_temp_new_i64();
3019 dest = tcg_temp_new();
3021 * load vreg and sign-extend to 64 bits,
3022 * then truncate to XLEN bits before storing to gpr.
3024 vec_element_loadi(s, t1, a->rs2, 0, true);
3025 tcg_gen_trunc_i64_tl(dest, t1);
3026 gen_set_gpr(s, a->rd, dest);
3027 tcg_temp_free_i64(t1);
3028 tcg_temp_free(dest);
3035 /* vmv.s.x vd, rs1 # vd[0] = rs1 */
3036 static bool trans_vmv_s_x(DisasContext *s, arg_vmv_s_x *a)
3038 if (require_rvv(s) &&
3039 vext_check_isa_ill(s)) {
3040 /* This instruction ignores LMUL and vector register groups */
3043 TCGLabel *over = gen_new_label();
3045 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
3047 t1 = tcg_temp_new_i64();
3050 * load gpr and sign-extend to 64 bits,
3051 * then truncate to SEW bits when storing to vreg.
3053 s1 = get_gpr(s, a->rs1, EXT_NONE);
3054 tcg_gen_ext_tl_i64(t1, s1);
3055 vec_element_storei(s, a->rd, 0, t1);
3056 tcg_temp_free_i64(t1);
3058 gen_set_label(over);
3064 /* Floating-Point Scalar Move Instructions */
3065 static bool trans_vfmv_f_s(DisasContext *s, arg_vfmv_f_s *a)
3067 if (require_rvv(s) &&
3069 vext_check_isa_ill(s)) {
3070 unsigned int ofs = (8 << s->sew);
3071 unsigned int len = 64 - ofs;
3074 vec_element_loadi(s, cpu_fpr[a->rd], a->rs2, 0, false);
3075 /* NaN-box f[rd] as necessary for SEW */
3077 t_nan = tcg_constant_i64(UINT64_MAX);
3078 tcg_gen_deposit_i64(cpu_fpr[a->rd], cpu_fpr[a->rd],
3088 /* vfmv.s.f vd, rs1 # vd[0] = rs1 (vs2=0) */
3089 static bool trans_vfmv_s_f(DisasContext *s, arg_vfmv_s_f *a)
3091 if (require_rvv(s) &&
3093 vext_check_isa_ill(s)) {
3094 /* The instructions ignore LMUL and vector register group. */
3096 TCGLabel *over = gen_new_label();
3098 /* if vl == 0, skip vector register write back */
3099 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
3101 /* NaN-box f[rs1] */
3102 t1 = tcg_temp_new_i64();
3103 do_nanbox(s, t1, cpu_fpr[a->rs1]);
3105 vec_element_storei(s, a->rd, 0, t1);
3106 tcg_temp_free_i64(t1);
3108 gen_set_label(over);
3114 /* Vector Slide Instructions */
3115 static bool slideup_check(DisasContext *s, arg_rmrr *a)
3117 return require_rvv(s) &&
3118 vext_check_isa_ill(s) &&
3119 vext_check_slide(s, a->rd, a->rs2, a->vm, true);
3122 GEN_OPIVX_TRANS(vslideup_vx, slideup_check)
3123 GEN_OPIVX_TRANS(vslide1up_vx, slideup_check)
3124 GEN_OPIVI_TRANS(vslideup_vi, IMM_ZX, vslideup_vx, slideup_check)
3126 static bool slidedown_check(DisasContext *s, arg_rmrr *a)
3128 return require_rvv(s) &&
3129 vext_check_isa_ill(s) &&
3130 vext_check_slide(s, a->rd, a->rs2, a->vm, false);
3133 GEN_OPIVX_TRANS(vslidedown_vx, slidedown_check)
3134 GEN_OPIVX_TRANS(vslide1down_vx, slidedown_check)
3135 GEN_OPIVI_TRANS(vslidedown_vi, IMM_ZX, vslidedown_vx, slidedown_check)
3137 /* Vector Floating-Point Slide Instructions */
3138 static bool fslideup_check(DisasContext *s, arg_rmrr *a)
3140 return slideup_check(s, a) &&
3144 static bool fslidedown_check(DisasContext *s, arg_rmrr *a)
3146 return slidedown_check(s, a) &&
3150 GEN_OPFVF_TRANS(vfslide1up_vf, fslideup_check)
3151 GEN_OPFVF_TRANS(vfslide1down_vf, fslidedown_check)
3153 /* Vector Register Gather Instruction */
3154 static bool vrgather_vv_check(DisasContext *s, arg_rmrr *a)
3156 return require_rvv(s) &&
3157 vext_check_isa_ill(s) &&
3158 require_align(a->rd, s->lmul) &&
3159 require_align(a->rs1, s->lmul) &&
3160 require_align(a->rs2, s->lmul) &&
3161 (a->rd != a->rs2 && a->rd != a->rs1) &&
3162 require_vm(a->vm, a->rd);
3165 static bool vrgatherei16_vv_check(DisasContext *s, arg_rmrr *a)
3167 int8_t emul = MO_16 - s->sew + s->lmul;
3168 return require_rvv(s) &&
3169 vext_check_isa_ill(s) &&
3170 (emul >= -3 && emul <= 3) &&
3171 require_align(a->rd, s->lmul) &&
3172 require_align(a->rs1, emul) &&
3173 require_align(a->rs2, s->lmul) &&
3174 (a->rd != a->rs2 && a->rd != a->rs1) &&
3175 !is_overlapped(a->rd, 1 << MAX(s->lmul, 0),
3176 a->rs1, 1 << MAX(emul, 0)) &&
3177 !is_overlapped(a->rd, 1 << MAX(s->lmul, 0),
3178 a->rs2, 1 << MAX(s->lmul, 0)) &&
3179 require_vm(a->vm, a->rd);
3182 GEN_OPIVV_TRANS(vrgather_vv, vrgather_vv_check)
3183 GEN_OPIVV_TRANS(vrgatherei16_vv, vrgatherei16_vv_check)
3185 static bool vrgather_vx_check(DisasContext *s, arg_rmrr *a)
3187 return require_rvv(s) &&
3188 vext_check_isa_ill(s) &&
3189 require_align(a->rd, s->lmul) &&
3190 require_align(a->rs2, s->lmul) &&
3191 (a->rd != a->rs2) &&
3192 require_vm(a->vm, a->rd);
3195 /* vrgather.vx vd, vs2, rs1, vm # vd[i] = (x[rs1] >= VLMAX) ? 0 : vs2[rs1] */
3196 static bool trans_vrgather_vx(DisasContext *s, arg_rmrr *a)
3198 if (!vrgather_vx_check(s, a)) {
3202 if (a->vm && s->vl_eq_vlmax) {
3203 int scale = s->lmul - (s->sew + 3);
3204 int vlmax = scale < 0 ? s->vlen >> -scale : s->vlen << scale;
3205 TCGv_i64 dest = tcg_temp_new_i64();
3208 vec_element_loadi(s, dest, a->rs2, 0, false);
3210 vec_element_loadx(s, dest, a->rs2, cpu_gpr[a->rs1], vlmax);
3213 tcg_gen_gvec_dup_i64(s->sew, vreg_ofs(s, a->rd),
3214 MAXSZ(s), MAXSZ(s), dest);
3215 tcg_temp_free_i64(dest);
3218 static gen_helper_opivx * const fns[4] = {
3219 gen_helper_vrgather_vx_b, gen_helper_vrgather_vx_h,
3220 gen_helper_vrgather_vx_w, gen_helper_vrgather_vx_d
3222 return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s);
3227 /* vrgather.vi vd, vs2, imm, vm # vd[i] = (imm >= VLMAX) ? 0 : vs2[imm] */
3228 static bool trans_vrgather_vi(DisasContext *s, arg_rmrr *a)
3230 if (!vrgather_vx_check(s, a)) {
3234 if (a->vm && s->vl_eq_vlmax) {
3235 int scale = s->lmul - (s->sew + 3);
3236 int vlmax = scale < 0 ? s->vlen >> -scale : s->vlen << scale;
3237 if (a->rs1 >= vlmax) {
3238 tcg_gen_gvec_dup_imm(MO_64, vreg_ofs(s, a->rd),
3239 MAXSZ(s), MAXSZ(s), 0);
3241 tcg_gen_gvec_dup_mem(s->sew, vreg_ofs(s, a->rd),
3242 endian_ofs(s, a->rs2, a->rs1),
3243 MAXSZ(s), MAXSZ(s));
3247 static gen_helper_opivx * const fns[4] = {
3248 gen_helper_vrgather_vx_b, gen_helper_vrgather_vx_h,
3249 gen_helper_vrgather_vx_w, gen_helper_vrgather_vx_d
3251 return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew],
3258 * Vector Compress Instruction
3260 * The destination vector register group cannot overlap the
3261 * source vector register group or the source mask register.
3263 static bool vcompress_vm_check(DisasContext *s, arg_r *a)
3265 return require_rvv(s) &&
3266 vext_check_isa_ill(s) &&
3267 require_align(a->rd, s->lmul) &&
3268 require_align(a->rs2, s->lmul) &&
3269 (a->rd != a->rs2) &&
3270 !is_overlapped(a->rd, 1 << MAX(s->lmul, 0), a->rs1, 1);
3273 static bool trans_vcompress_vm(DisasContext *s, arg_r *a)
3275 if (vcompress_vm_check(s, a)) {
3277 static gen_helper_gvec_4_ptr * const fns[4] = {
3278 gen_helper_vcompress_vm_b, gen_helper_vcompress_vm_h,
3279 gen_helper_vcompress_vm_w, gen_helper_vcompress_vm_d,
3281 TCGLabel *over = gen_new_label();
3282 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
3284 data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
3285 tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
3286 vreg_ofs(s, a->rs1), vreg_ofs(s, a->rs2),
3287 cpu_env, s->vlen / 8, s->vlen / 8, data,
3290 gen_set_label(over);
3297 * Whole Vector Register Move Instructions ignore vtype and vl setting.
3298 * Thus, we don't need to check vill bit. (Section 16.6)
3300 #define GEN_VMV_WHOLE_TRANS(NAME, LEN) \
3301 static bool trans_##NAME(DisasContext *s, arg_##NAME * a) \
3303 if (require_rvv(s) && \
3304 QEMU_IS_ALIGNED(a->rd, LEN) && \
3305 QEMU_IS_ALIGNED(a->rs2, LEN)) { \
3307 tcg_gen_gvec_mov(MO_8, vreg_ofs(s, a->rd), \
3308 vreg_ofs(s, a->rs2), \
3309 s->vlen / 8 * LEN, s->vlen / 8 * LEN); \
3316 GEN_VMV_WHOLE_TRANS(vmv1r_v, 1)
3317 GEN_VMV_WHOLE_TRANS(vmv2r_v, 2)
3318 GEN_VMV_WHOLE_TRANS(vmv4r_v, 4)
3319 GEN_VMV_WHOLE_TRANS(vmv8r_v, 8)
3321 static bool int_ext_check(DisasContext *s, arg_rmr *a, uint8_t div)
3323 uint8_t from = (s->sew + 3) - div;
3324 bool ret = require_rvv(s) &&
3325 (from >= 3 && from <= 8) &&
3326 (a->rd != a->rs2) &&
3327 require_align(a->rd, s->lmul) &&
3328 require_align(a->rs2, s->lmul - div) &&
3329 require_vm(a->vm, a->rd) &&
3330 require_noover(a->rd, s->lmul, a->rs2, s->lmul - div);
3334 static bool int_ext_op(DisasContext *s, arg_rmr *a, uint8_t seq)
3337 gen_helper_gvec_3_ptr *fn;
3338 TCGLabel *over = gen_new_label();
3339 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
3341 static gen_helper_gvec_3_ptr * const fns[6][4] = {
3343 NULL, gen_helper_vzext_vf2_h,
3344 gen_helper_vzext_vf2_w, gen_helper_vzext_vf2_d
3348 gen_helper_vzext_vf4_w, gen_helper_vzext_vf4_d,
3352 NULL, gen_helper_vzext_vf8_d
3355 NULL, gen_helper_vsext_vf2_h,
3356 gen_helper_vsext_vf2_w, gen_helper_vsext_vf2_d
3360 gen_helper_vsext_vf4_w, gen_helper_vsext_vf4_d,
3364 NULL, gen_helper_vsext_vf8_d
3368 fn = fns[seq][s->sew];
3373 data = FIELD_DP32(data, VDATA, VM, a->vm);
3375 tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
3376 vreg_ofs(s, a->rs2), cpu_env,
3377 s->vlen / 8, s->vlen / 8, data, fn);
3380 gen_set_label(over);
3384 /* Vector Integer Extension */
3385 #define GEN_INT_EXT_TRANS(NAME, DIV, SEQ) \
3386 static bool trans_##NAME(DisasContext *s, arg_rmr *a) \
3388 if (int_ext_check(s, a, DIV)) { \
3389 return int_ext_op(s, a, SEQ); \
3394 GEN_INT_EXT_TRANS(vzext_vf2, 1, 0)
3395 GEN_INT_EXT_TRANS(vzext_vf4, 2, 1)
3396 GEN_INT_EXT_TRANS(vzext_vf8, 3, 2)
3397 GEN_INT_EXT_TRANS(vsext_vf2, 1, 3)
3398 GEN_INT_EXT_TRANS(vsext_vf4, 2, 4)
3399 GEN_INT_EXT_TRANS(vsext_vf8, 3, 5)