meson: rename included C source files to .c.inc
[qemu/ar7.git] / target / riscv / insn_trans / trans_rvv.c.inc
blob887c6b8883173d7c3d735a4d6c2f26c3d922e7bd
1 /*
2  * RISC-V translation routines for the RVV Standard Extension.
3  *
4  * Copyright (c) 2020 T-Head Semiconductor Co., Ltd. All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2 or later, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along with
16  * this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18 #include "tcg/tcg-op-gvec.h"
19 #include "tcg/tcg-gvec-desc.h"
20 #include "internals.h"
22 static bool trans_vsetvl(DisasContext *ctx, arg_vsetvl *a)
24     TCGv s1, s2, dst;
26     if (!has_ext(ctx, RVV)) {
27         return false;
28     }
30     s2 = tcg_temp_new();
31     dst = tcg_temp_new();
33     /* Using x0 as the rs1 register specifier, encodes an infinite AVL */
34     if (a->rs1 == 0) {
35         /* As the mask is at least one bit, RV_VLEN_MAX is >= VLMAX */
36         s1 = tcg_const_tl(RV_VLEN_MAX);
37     } else {
38         s1 = tcg_temp_new();
39         gen_get_gpr(s1, a->rs1);
40     }
41     gen_get_gpr(s2, a->rs2);
42     gen_helper_vsetvl(dst, cpu_env, s1, s2);
43     gen_set_gpr(a->rd, dst);
44     tcg_gen_movi_tl(cpu_pc, ctx->pc_succ_insn);
45     lookup_and_goto_ptr(ctx);
46     ctx->base.is_jmp = DISAS_NORETURN;
48     tcg_temp_free(s1);
49     tcg_temp_free(s2);
50     tcg_temp_free(dst);
51     return true;
54 static bool trans_vsetvli(DisasContext *ctx, arg_vsetvli *a)
56     TCGv s1, s2, dst;
58     if (!has_ext(ctx, RVV)) {
59         return false;
60     }
62     s2 = tcg_const_tl(a->zimm);
63     dst = tcg_temp_new();
65     /* Using x0 as the rs1 register specifier, encodes an infinite AVL */
66     if (a->rs1 == 0) {
67         /* As the mask is at least one bit, RV_VLEN_MAX is >= VLMAX */
68         s1 = tcg_const_tl(RV_VLEN_MAX);
69     } else {
70         s1 = tcg_temp_new();
71         gen_get_gpr(s1, a->rs1);
72     }
73     gen_helper_vsetvl(dst, cpu_env, s1, s2);
74     gen_set_gpr(a->rd, dst);
75     gen_goto_tb(ctx, 0, ctx->pc_succ_insn);
76     ctx->base.is_jmp = DISAS_NORETURN;
78     tcg_temp_free(s1);
79     tcg_temp_free(s2);
80     tcg_temp_free(dst);
81     return true;
84 /* vector register offset from env */
85 static uint32_t vreg_ofs(DisasContext *s, int reg)
87     return offsetof(CPURISCVState, vreg) + reg * s->vlen / 8;
90 /* check functions */
93  * In cpu_get_tb_cpu_state(), set VILL if RVV was not present.
94  * So RVV is also be checked in this function.
95  */
96 static bool vext_check_isa_ill(DisasContext *s)
98     return !s->vill;
102  * There are two rules check here.
104  * 1. Vector register numbers are multiples of LMUL. (Section 3.2)
106  * 2. For all widening instructions, the destination LMUL value must also be
107  *    a supported LMUL value. (Section 11.2)
108  */
109 static bool vext_check_reg(DisasContext *s, uint32_t reg, bool widen)
111     /*
112      * The destination vector register group results are arranged as if both
113      * SEW and LMUL were at twice their current settings. (Section 11.2).
114      */
115     int legal = widen ? 2 << s->lmul : 1 << s->lmul;
117     return !((s->lmul == 0x3 && widen) || (reg % legal));
121  * There are two rules check here.
123  * 1. The destination vector register group for a masked vector instruction can
124  *    only overlap the source mask register (v0) when LMUL=1. (Section 5.3)
126  * 2. In widen instructions and some other insturctions, like vslideup.vx,
127  *    there is no need to check whether LMUL=1.
128  */
129 static bool vext_check_overlap_mask(DisasContext *s, uint32_t vd, bool vm,
130     bool force)
132     return (vm != 0 || vd != 0) || (!force && (s->lmul == 0));
135 /* The LMUL setting must be such that LMUL * NFIELDS <= 8. (Section 7.8) */
136 static bool vext_check_nf(DisasContext *s, uint32_t nf)
138     return (1 << s->lmul) * nf <= 8;
142  * The destination vector register group cannot overlap a source vector register
143  * group of a different element width. (Section 11.2)
144  */
145 static inline bool vext_check_overlap_group(int rd, int dlen, int rs, int slen)
147     return ((rd >= rs + slen) || (rs >= rd + dlen));
149 /* common translation macro */
150 #define GEN_VEXT_TRANS(NAME, SEQ, ARGTYPE, OP, CHECK)      \
151 static bool trans_##NAME(DisasContext *s, arg_##ARGTYPE *a)\
152 {                                                          \
153     if (CHECK(s, a)) {                                     \
154         return OP(s, a, SEQ);                              \
155     }                                                      \
156     return false;                                          \
160  *** unit stride load and store
161  */
162 typedef void gen_helper_ldst_us(TCGv_ptr, TCGv_ptr, TCGv,
163                                 TCGv_env, TCGv_i32);
165 static bool ldst_us_trans(uint32_t vd, uint32_t rs1, uint32_t data,
166                           gen_helper_ldst_us *fn, DisasContext *s)
168     TCGv_ptr dest, mask;
169     TCGv base;
170     TCGv_i32 desc;
172     TCGLabel *over = gen_new_label();
173     tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
175     dest = tcg_temp_new_ptr();
176     mask = tcg_temp_new_ptr();
177     base = tcg_temp_new();
179     /*
180      * As simd_desc supports at most 256 bytes, and in this implementation,
181      * the max vector group length is 2048 bytes. So split it into two parts.
182      *
183      * The first part is vlen in bytes, encoded in maxsz of simd_desc.
184      * The second part is lmul, encoded in data of simd_desc.
185      */
186     desc = tcg_const_i32(simd_desc(0, s->vlen / 8, data));
188     gen_get_gpr(base, rs1);
189     tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
190     tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
192     fn(dest, mask, base, cpu_env, desc);
194     tcg_temp_free_ptr(dest);
195     tcg_temp_free_ptr(mask);
196     tcg_temp_free(base);
197     tcg_temp_free_i32(desc);
198     gen_set_label(over);
199     return true;
202 static bool ld_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t seq)
204     uint32_t data = 0;
205     gen_helper_ldst_us *fn;
206     static gen_helper_ldst_us * const fns[2][7][4] = {
207         /* masked unit stride load */
208         { { gen_helper_vlb_v_b_mask,  gen_helper_vlb_v_h_mask,
209             gen_helper_vlb_v_w_mask,  gen_helper_vlb_v_d_mask },
210           { NULL,                     gen_helper_vlh_v_h_mask,
211             gen_helper_vlh_v_w_mask,  gen_helper_vlh_v_d_mask },
212           { NULL,                     NULL,
213             gen_helper_vlw_v_w_mask,  gen_helper_vlw_v_d_mask },
214           { gen_helper_vle_v_b_mask,  gen_helper_vle_v_h_mask,
215             gen_helper_vle_v_w_mask,  gen_helper_vle_v_d_mask },
216           { gen_helper_vlbu_v_b_mask, gen_helper_vlbu_v_h_mask,
217             gen_helper_vlbu_v_w_mask, gen_helper_vlbu_v_d_mask },
218           { NULL,                     gen_helper_vlhu_v_h_mask,
219             gen_helper_vlhu_v_w_mask, gen_helper_vlhu_v_d_mask },
220           { NULL,                     NULL,
221             gen_helper_vlwu_v_w_mask, gen_helper_vlwu_v_d_mask } },
222         /* unmasked unit stride load */
223         { { gen_helper_vlb_v_b,  gen_helper_vlb_v_h,
224             gen_helper_vlb_v_w,  gen_helper_vlb_v_d },
225           { NULL,                gen_helper_vlh_v_h,
226             gen_helper_vlh_v_w,  gen_helper_vlh_v_d },
227           { NULL,                NULL,
228             gen_helper_vlw_v_w,  gen_helper_vlw_v_d },
229           { gen_helper_vle_v_b,  gen_helper_vle_v_h,
230             gen_helper_vle_v_w,  gen_helper_vle_v_d },
231           { gen_helper_vlbu_v_b, gen_helper_vlbu_v_h,
232             gen_helper_vlbu_v_w, gen_helper_vlbu_v_d },
233           { NULL,                gen_helper_vlhu_v_h,
234             gen_helper_vlhu_v_w, gen_helper_vlhu_v_d },
235           { NULL,                NULL,
236             gen_helper_vlwu_v_w, gen_helper_vlwu_v_d } }
237     };
239     fn =  fns[a->vm][seq][s->sew];
240     if (fn == NULL) {
241         return false;
242     }
244     data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
245     data = FIELD_DP32(data, VDATA, VM, a->vm);
246     data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
247     data = FIELD_DP32(data, VDATA, NF, a->nf);
248     return ldst_us_trans(a->rd, a->rs1, data, fn, s);
251 static bool ld_us_check(DisasContext *s, arg_r2nfvm* a)
253     return (vext_check_isa_ill(s) &&
254             vext_check_overlap_mask(s, a->rd, a->vm, false) &&
255             vext_check_reg(s, a->rd, false) &&
256             vext_check_nf(s, a->nf));
259 GEN_VEXT_TRANS(vlb_v, 0, r2nfvm, ld_us_op, ld_us_check)
260 GEN_VEXT_TRANS(vlh_v, 1, r2nfvm, ld_us_op, ld_us_check)
261 GEN_VEXT_TRANS(vlw_v, 2, r2nfvm, ld_us_op, ld_us_check)
262 GEN_VEXT_TRANS(vle_v, 3, r2nfvm, ld_us_op, ld_us_check)
263 GEN_VEXT_TRANS(vlbu_v, 4, r2nfvm, ld_us_op, ld_us_check)
264 GEN_VEXT_TRANS(vlhu_v, 5, r2nfvm, ld_us_op, ld_us_check)
265 GEN_VEXT_TRANS(vlwu_v, 6, r2nfvm, ld_us_op, ld_us_check)
267 static bool st_us_op(DisasContext *s, arg_r2nfvm *a, uint8_t seq)
269     uint32_t data = 0;
270     gen_helper_ldst_us *fn;
271     static gen_helper_ldst_us * const fns[2][4][4] = {
272         /* masked unit stride load and store */
273         { { gen_helper_vsb_v_b_mask,  gen_helper_vsb_v_h_mask,
274             gen_helper_vsb_v_w_mask,  gen_helper_vsb_v_d_mask },
275           { NULL,                     gen_helper_vsh_v_h_mask,
276             gen_helper_vsh_v_w_mask,  gen_helper_vsh_v_d_mask },
277           { NULL,                     NULL,
278             gen_helper_vsw_v_w_mask,  gen_helper_vsw_v_d_mask },
279           { gen_helper_vse_v_b_mask,  gen_helper_vse_v_h_mask,
280             gen_helper_vse_v_w_mask,  gen_helper_vse_v_d_mask } },
281         /* unmasked unit stride store */
282         { { gen_helper_vsb_v_b,  gen_helper_vsb_v_h,
283             gen_helper_vsb_v_w,  gen_helper_vsb_v_d },
284           { NULL,                gen_helper_vsh_v_h,
285             gen_helper_vsh_v_w,  gen_helper_vsh_v_d },
286           { NULL,                NULL,
287             gen_helper_vsw_v_w,  gen_helper_vsw_v_d },
288           { gen_helper_vse_v_b,  gen_helper_vse_v_h,
289             gen_helper_vse_v_w,  gen_helper_vse_v_d } }
290     };
292     fn =  fns[a->vm][seq][s->sew];
293     if (fn == NULL) {
294         return false;
295     }
297     data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
298     data = FIELD_DP32(data, VDATA, VM, a->vm);
299     data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
300     data = FIELD_DP32(data, VDATA, NF, a->nf);
301     return ldst_us_trans(a->rd, a->rs1, data, fn, s);
304 static bool st_us_check(DisasContext *s, arg_r2nfvm* a)
306     return (vext_check_isa_ill(s) &&
307             vext_check_reg(s, a->rd, false) &&
308             vext_check_nf(s, a->nf));
311 GEN_VEXT_TRANS(vsb_v, 0, r2nfvm, st_us_op, st_us_check)
312 GEN_VEXT_TRANS(vsh_v, 1, r2nfvm, st_us_op, st_us_check)
313 GEN_VEXT_TRANS(vsw_v, 2, r2nfvm, st_us_op, st_us_check)
314 GEN_VEXT_TRANS(vse_v, 3, r2nfvm, st_us_op, st_us_check)
317  *** stride load and store
318  */
319 typedef void gen_helper_ldst_stride(TCGv_ptr, TCGv_ptr, TCGv,
320                                     TCGv, TCGv_env, TCGv_i32);
322 static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2,
323                               uint32_t data, gen_helper_ldst_stride *fn,
324                               DisasContext *s)
326     TCGv_ptr dest, mask;
327     TCGv base, stride;
328     TCGv_i32 desc;
330     TCGLabel *over = gen_new_label();
331     tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
333     dest = tcg_temp_new_ptr();
334     mask = tcg_temp_new_ptr();
335     base = tcg_temp_new();
336     stride = tcg_temp_new();
337     desc = tcg_const_i32(simd_desc(0, s->vlen / 8, data));
339     gen_get_gpr(base, rs1);
340     gen_get_gpr(stride, rs2);
341     tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
342     tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
344     fn(dest, mask, base, stride, cpu_env, desc);
346     tcg_temp_free_ptr(dest);
347     tcg_temp_free_ptr(mask);
348     tcg_temp_free(base);
349     tcg_temp_free(stride);
350     tcg_temp_free_i32(desc);
351     gen_set_label(over);
352     return true;
355 static bool ld_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t seq)
357     uint32_t data = 0;
358     gen_helper_ldst_stride *fn;
359     static gen_helper_ldst_stride * const fns[7][4] = {
360         { gen_helper_vlsb_v_b,  gen_helper_vlsb_v_h,
361           gen_helper_vlsb_v_w,  gen_helper_vlsb_v_d },
362         { NULL,                 gen_helper_vlsh_v_h,
363           gen_helper_vlsh_v_w,  gen_helper_vlsh_v_d },
364         { NULL,                 NULL,
365           gen_helper_vlsw_v_w,  gen_helper_vlsw_v_d },
366         { gen_helper_vlse_v_b,  gen_helper_vlse_v_h,
367           gen_helper_vlse_v_w,  gen_helper_vlse_v_d },
368         { gen_helper_vlsbu_v_b, gen_helper_vlsbu_v_h,
369           gen_helper_vlsbu_v_w, gen_helper_vlsbu_v_d },
370         { NULL,                 gen_helper_vlshu_v_h,
371           gen_helper_vlshu_v_w, gen_helper_vlshu_v_d },
372         { NULL,                 NULL,
373           gen_helper_vlswu_v_w, gen_helper_vlswu_v_d },
374     };
376     fn =  fns[seq][s->sew];
377     if (fn == NULL) {
378         return false;
379     }
381     data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
382     data = FIELD_DP32(data, VDATA, VM, a->vm);
383     data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
384     data = FIELD_DP32(data, VDATA, NF, a->nf);
385     return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s);
388 static bool ld_stride_check(DisasContext *s, arg_rnfvm* a)
390     return (vext_check_isa_ill(s) &&
391             vext_check_overlap_mask(s, a->rd, a->vm, false) &&
392             vext_check_reg(s, a->rd, false) &&
393             vext_check_nf(s, a->nf));
396 GEN_VEXT_TRANS(vlsb_v, 0, rnfvm, ld_stride_op, ld_stride_check)
397 GEN_VEXT_TRANS(vlsh_v, 1, rnfvm, ld_stride_op, ld_stride_check)
398 GEN_VEXT_TRANS(vlsw_v, 2, rnfvm, ld_stride_op, ld_stride_check)
399 GEN_VEXT_TRANS(vlse_v, 3, rnfvm, ld_stride_op, ld_stride_check)
400 GEN_VEXT_TRANS(vlsbu_v, 4, rnfvm, ld_stride_op, ld_stride_check)
401 GEN_VEXT_TRANS(vlshu_v, 5, rnfvm, ld_stride_op, ld_stride_check)
402 GEN_VEXT_TRANS(vlswu_v, 6, rnfvm, ld_stride_op, ld_stride_check)
404 static bool st_stride_op(DisasContext *s, arg_rnfvm *a, uint8_t seq)
406     uint32_t data = 0;
407     gen_helper_ldst_stride *fn;
408     static gen_helper_ldst_stride * const fns[4][4] = {
409         /* masked stride store */
410         { gen_helper_vssb_v_b,  gen_helper_vssb_v_h,
411           gen_helper_vssb_v_w,  gen_helper_vssb_v_d },
412         { NULL,                 gen_helper_vssh_v_h,
413           gen_helper_vssh_v_w,  gen_helper_vssh_v_d },
414         { NULL,                 NULL,
415           gen_helper_vssw_v_w,  gen_helper_vssw_v_d },
416         { gen_helper_vsse_v_b,  gen_helper_vsse_v_h,
417           gen_helper_vsse_v_w,  gen_helper_vsse_v_d }
418     };
420     data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
421     data = FIELD_DP32(data, VDATA, VM, a->vm);
422     data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
423     data = FIELD_DP32(data, VDATA, NF, a->nf);
424     fn =  fns[seq][s->sew];
425     if (fn == NULL) {
426         return false;
427     }
429     return ldst_stride_trans(a->rd, a->rs1, a->rs2, data, fn, s);
432 static bool st_stride_check(DisasContext *s, arg_rnfvm* a)
434     return (vext_check_isa_ill(s) &&
435             vext_check_reg(s, a->rd, false) &&
436             vext_check_nf(s, a->nf));
439 GEN_VEXT_TRANS(vssb_v, 0, rnfvm, st_stride_op, st_stride_check)
440 GEN_VEXT_TRANS(vssh_v, 1, rnfvm, st_stride_op, st_stride_check)
441 GEN_VEXT_TRANS(vssw_v, 2, rnfvm, st_stride_op, st_stride_check)
442 GEN_VEXT_TRANS(vsse_v, 3, rnfvm, st_stride_op, st_stride_check)
445  *** index load and store
446  */
447 typedef void gen_helper_ldst_index(TCGv_ptr, TCGv_ptr, TCGv,
448                                    TCGv_ptr, TCGv_env, TCGv_i32);
450 static bool ldst_index_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
451                              uint32_t data, gen_helper_ldst_index *fn,
452                              DisasContext *s)
454     TCGv_ptr dest, mask, index;
455     TCGv base;
456     TCGv_i32 desc;
458     TCGLabel *over = gen_new_label();
459     tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
461     dest = tcg_temp_new_ptr();
462     mask = tcg_temp_new_ptr();
463     index = tcg_temp_new_ptr();
464     base = tcg_temp_new();
465     desc = tcg_const_i32(simd_desc(0, s->vlen / 8, data));
467     gen_get_gpr(base, rs1);
468     tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
469     tcg_gen_addi_ptr(index, cpu_env, vreg_ofs(s, vs2));
470     tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
472     fn(dest, mask, base, index, cpu_env, desc);
474     tcg_temp_free_ptr(dest);
475     tcg_temp_free_ptr(mask);
476     tcg_temp_free_ptr(index);
477     tcg_temp_free(base);
478     tcg_temp_free_i32(desc);
479     gen_set_label(over);
480     return true;
483 static bool ld_index_op(DisasContext *s, arg_rnfvm *a, uint8_t seq)
485     uint32_t data = 0;
486     gen_helper_ldst_index *fn;
487     static gen_helper_ldst_index * const fns[7][4] = {
488         { gen_helper_vlxb_v_b,  gen_helper_vlxb_v_h,
489           gen_helper_vlxb_v_w,  gen_helper_vlxb_v_d },
490         { NULL,                 gen_helper_vlxh_v_h,
491           gen_helper_vlxh_v_w,  gen_helper_vlxh_v_d },
492         { NULL,                 NULL,
493           gen_helper_vlxw_v_w,  gen_helper_vlxw_v_d },
494         { gen_helper_vlxe_v_b,  gen_helper_vlxe_v_h,
495           gen_helper_vlxe_v_w,  gen_helper_vlxe_v_d },
496         { gen_helper_vlxbu_v_b, gen_helper_vlxbu_v_h,
497           gen_helper_vlxbu_v_w, gen_helper_vlxbu_v_d },
498         { NULL,                 gen_helper_vlxhu_v_h,
499           gen_helper_vlxhu_v_w, gen_helper_vlxhu_v_d },
500         { NULL,                 NULL,
501           gen_helper_vlxwu_v_w, gen_helper_vlxwu_v_d },
502     };
504     fn =  fns[seq][s->sew];
505     if (fn == NULL) {
506         return false;
507     }
509     data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
510     data = FIELD_DP32(data, VDATA, VM, a->vm);
511     data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
512     data = FIELD_DP32(data, VDATA, NF, a->nf);
513     return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s);
517  * For vector indexed segment loads, the destination vector register
518  * groups cannot overlap the source vector register group (specified by
519  * `vs2`), else an illegal instruction exception is raised.
520  */
521 static bool ld_index_check(DisasContext *s, arg_rnfvm* a)
523     return (vext_check_isa_ill(s) &&
524             vext_check_overlap_mask(s, a->rd, a->vm, false) &&
525             vext_check_reg(s, a->rd, false) &&
526             vext_check_reg(s, a->rs2, false) &&
527             vext_check_nf(s, a->nf) &&
528             ((a->nf == 1) ||
529              vext_check_overlap_group(a->rd, a->nf << s->lmul,
530                                       a->rs2, 1 << s->lmul)));
533 GEN_VEXT_TRANS(vlxb_v, 0, rnfvm, ld_index_op, ld_index_check)
534 GEN_VEXT_TRANS(vlxh_v, 1, rnfvm, ld_index_op, ld_index_check)
535 GEN_VEXT_TRANS(vlxw_v, 2, rnfvm, ld_index_op, ld_index_check)
536 GEN_VEXT_TRANS(vlxe_v, 3, rnfvm, ld_index_op, ld_index_check)
537 GEN_VEXT_TRANS(vlxbu_v, 4, rnfvm, ld_index_op, ld_index_check)
538 GEN_VEXT_TRANS(vlxhu_v, 5, rnfvm, ld_index_op, ld_index_check)
539 GEN_VEXT_TRANS(vlxwu_v, 6, rnfvm, ld_index_op, ld_index_check)
541 static bool st_index_op(DisasContext *s, arg_rnfvm *a, uint8_t seq)
543     uint32_t data = 0;
544     gen_helper_ldst_index *fn;
545     static gen_helper_ldst_index * const fns[4][4] = {
546         { gen_helper_vsxb_v_b,  gen_helper_vsxb_v_h,
547           gen_helper_vsxb_v_w,  gen_helper_vsxb_v_d },
548         { NULL,                 gen_helper_vsxh_v_h,
549           gen_helper_vsxh_v_w,  gen_helper_vsxh_v_d },
550         { NULL,                 NULL,
551           gen_helper_vsxw_v_w,  gen_helper_vsxw_v_d },
552         { gen_helper_vsxe_v_b,  gen_helper_vsxe_v_h,
553           gen_helper_vsxe_v_w,  gen_helper_vsxe_v_d }
554     };
556     fn =  fns[seq][s->sew];
557     if (fn == NULL) {
558         return false;
559     }
561     data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
562     data = FIELD_DP32(data, VDATA, VM, a->vm);
563     data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
564     data = FIELD_DP32(data, VDATA, NF, a->nf);
565     return ldst_index_trans(a->rd, a->rs1, a->rs2, data, fn, s);
568 static bool st_index_check(DisasContext *s, arg_rnfvm* a)
570     return (vext_check_isa_ill(s) &&
571             vext_check_reg(s, a->rd, false) &&
572             vext_check_reg(s, a->rs2, false) &&
573             vext_check_nf(s, a->nf));
576 GEN_VEXT_TRANS(vsxb_v, 0, rnfvm, st_index_op, st_index_check)
577 GEN_VEXT_TRANS(vsxh_v, 1, rnfvm, st_index_op, st_index_check)
578 GEN_VEXT_TRANS(vsxw_v, 2, rnfvm, st_index_op, st_index_check)
579 GEN_VEXT_TRANS(vsxe_v, 3, rnfvm, st_index_op, st_index_check)
582  *** unit stride fault-only-first load
583  */
584 static bool ldff_trans(uint32_t vd, uint32_t rs1, uint32_t data,
585                        gen_helper_ldst_us *fn, DisasContext *s)
587     TCGv_ptr dest, mask;
588     TCGv base;
589     TCGv_i32 desc;
591     TCGLabel *over = gen_new_label();
592     tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
594     dest = tcg_temp_new_ptr();
595     mask = tcg_temp_new_ptr();
596     base = tcg_temp_new();
597     desc = tcg_const_i32(simd_desc(0, s->vlen / 8, data));
599     gen_get_gpr(base, rs1);
600     tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
601     tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
603     fn(dest, mask, base, cpu_env, desc);
605     tcg_temp_free_ptr(dest);
606     tcg_temp_free_ptr(mask);
607     tcg_temp_free(base);
608     tcg_temp_free_i32(desc);
609     gen_set_label(over);
610     return true;
613 static bool ldff_op(DisasContext *s, arg_r2nfvm *a, uint8_t seq)
615     uint32_t data = 0;
616     gen_helper_ldst_us *fn;
617     static gen_helper_ldst_us * const fns[7][4] = {
618         { gen_helper_vlbff_v_b,  gen_helper_vlbff_v_h,
619           gen_helper_vlbff_v_w,  gen_helper_vlbff_v_d },
620         { NULL,                  gen_helper_vlhff_v_h,
621           gen_helper_vlhff_v_w,  gen_helper_vlhff_v_d },
622         { NULL,                  NULL,
623           gen_helper_vlwff_v_w,  gen_helper_vlwff_v_d },
624         { gen_helper_vleff_v_b,  gen_helper_vleff_v_h,
625           gen_helper_vleff_v_w,  gen_helper_vleff_v_d },
626         { gen_helper_vlbuff_v_b, gen_helper_vlbuff_v_h,
627           gen_helper_vlbuff_v_w, gen_helper_vlbuff_v_d },
628         { NULL,                  gen_helper_vlhuff_v_h,
629           gen_helper_vlhuff_v_w, gen_helper_vlhuff_v_d },
630         { NULL,                  NULL,
631           gen_helper_vlwuff_v_w, gen_helper_vlwuff_v_d }
632     };
634     fn =  fns[seq][s->sew];
635     if (fn == NULL) {
636         return false;
637     }
639     data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
640     data = FIELD_DP32(data, VDATA, VM, a->vm);
641     data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
642     data = FIELD_DP32(data, VDATA, NF, a->nf);
643     return ldff_trans(a->rd, a->rs1, data, fn, s);
646 GEN_VEXT_TRANS(vlbff_v, 0, r2nfvm, ldff_op, ld_us_check)
647 GEN_VEXT_TRANS(vlhff_v, 1, r2nfvm, ldff_op, ld_us_check)
648 GEN_VEXT_TRANS(vlwff_v, 2, r2nfvm, ldff_op, ld_us_check)
649 GEN_VEXT_TRANS(vleff_v, 3, r2nfvm, ldff_op, ld_us_check)
650 GEN_VEXT_TRANS(vlbuff_v, 4, r2nfvm, ldff_op, ld_us_check)
651 GEN_VEXT_TRANS(vlhuff_v, 5, r2nfvm, ldff_op, ld_us_check)
652 GEN_VEXT_TRANS(vlwuff_v, 6, r2nfvm, ldff_op, ld_us_check)
655  *** vector atomic operation
656  */
657 typedef void gen_helper_amo(TCGv_ptr, TCGv_ptr, TCGv, TCGv_ptr,
658                             TCGv_env, TCGv_i32);
660 static bool amo_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
661                       uint32_t data, gen_helper_amo *fn, DisasContext *s)
663     TCGv_ptr dest, mask, index;
664     TCGv base;
665     TCGv_i32 desc;
667     TCGLabel *over = gen_new_label();
668     tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
670     dest = tcg_temp_new_ptr();
671     mask = tcg_temp_new_ptr();
672     index = tcg_temp_new_ptr();
673     base = tcg_temp_new();
674     desc = tcg_const_i32(simd_desc(0, s->vlen / 8, data));
676     gen_get_gpr(base, rs1);
677     tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
678     tcg_gen_addi_ptr(index, cpu_env, vreg_ofs(s, vs2));
679     tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
681     fn(dest, mask, base, index, cpu_env, desc);
683     tcg_temp_free_ptr(dest);
684     tcg_temp_free_ptr(mask);
685     tcg_temp_free_ptr(index);
686     tcg_temp_free(base);
687     tcg_temp_free_i32(desc);
688     gen_set_label(over);
689     return true;
692 static bool amo_op(DisasContext *s, arg_rwdvm *a, uint8_t seq)
694     uint32_t data = 0;
695     gen_helper_amo *fn;
696     static gen_helper_amo *const fnsw[9] = {
697         /* no atomic operation */
698         gen_helper_vamoswapw_v_w,
699         gen_helper_vamoaddw_v_w,
700         gen_helper_vamoxorw_v_w,
701         gen_helper_vamoandw_v_w,
702         gen_helper_vamoorw_v_w,
703         gen_helper_vamominw_v_w,
704         gen_helper_vamomaxw_v_w,
705         gen_helper_vamominuw_v_w,
706         gen_helper_vamomaxuw_v_w
707     };
708 #ifdef TARGET_RISCV64
709     static gen_helper_amo *const fnsd[18] = {
710         gen_helper_vamoswapw_v_d,
711         gen_helper_vamoaddw_v_d,
712         gen_helper_vamoxorw_v_d,
713         gen_helper_vamoandw_v_d,
714         gen_helper_vamoorw_v_d,
715         gen_helper_vamominw_v_d,
716         gen_helper_vamomaxw_v_d,
717         gen_helper_vamominuw_v_d,
718         gen_helper_vamomaxuw_v_d,
719         gen_helper_vamoswapd_v_d,
720         gen_helper_vamoaddd_v_d,
721         gen_helper_vamoxord_v_d,
722         gen_helper_vamoandd_v_d,
723         gen_helper_vamoord_v_d,
724         gen_helper_vamomind_v_d,
725         gen_helper_vamomaxd_v_d,
726         gen_helper_vamominud_v_d,
727         gen_helper_vamomaxud_v_d
728     };
729 #endif
731     if (tb_cflags(s->base.tb) & CF_PARALLEL) {
732         gen_helper_exit_atomic(cpu_env);
733         s->base.is_jmp = DISAS_NORETURN;
734         return true;
735     } else {
736         if (s->sew == 3) {
737 #ifdef TARGET_RISCV64
738             fn = fnsd[seq];
739 #else
740             /* Check done in amo_check(). */
741             g_assert_not_reached();
742 #endif
743         } else {
744             assert(seq < ARRAY_SIZE(fnsw));
745             fn = fnsw[seq];
746         }
747     }
749     data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
750     data = FIELD_DP32(data, VDATA, VM, a->vm);
751     data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
752     data = FIELD_DP32(data, VDATA, WD, a->wd);
753     return amo_trans(a->rd, a->rs1, a->rs2, data, fn, s);
756  * There are two rules check here.
758  * 1. SEW must be at least as wide as the AMO memory element size.
760  * 2. If SEW is greater than XLEN, an illegal instruction exception is raised.
761  */
762 static bool amo_check(DisasContext *s, arg_rwdvm* a)
764     return (!s->vill && has_ext(s, RVA) &&
765             (!a->wd || vext_check_overlap_mask(s, a->rd, a->vm, false)) &&
766             vext_check_reg(s, a->rd, false) &&
767             vext_check_reg(s, a->rs2, false) &&
768             ((1 << s->sew) <= sizeof(target_ulong)) &&
769             ((1 << s->sew) >= 4));
772 GEN_VEXT_TRANS(vamoswapw_v, 0, rwdvm, amo_op, amo_check)
773 GEN_VEXT_TRANS(vamoaddw_v, 1, rwdvm, amo_op, amo_check)
774 GEN_VEXT_TRANS(vamoxorw_v, 2, rwdvm, amo_op, amo_check)
775 GEN_VEXT_TRANS(vamoandw_v, 3, rwdvm, amo_op, amo_check)
776 GEN_VEXT_TRANS(vamoorw_v, 4, rwdvm, amo_op, amo_check)
777 GEN_VEXT_TRANS(vamominw_v, 5, rwdvm, amo_op, amo_check)
778 GEN_VEXT_TRANS(vamomaxw_v, 6, rwdvm, amo_op, amo_check)
779 GEN_VEXT_TRANS(vamominuw_v, 7, rwdvm, amo_op, amo_check)
780 GEN_VEXT_TRANS(vamomaxuw_v, 8, rwdvm, amo_op, amo_check)
781 #ifdef TARGET_RISCV64
782 GEN_VEXT_TRANS(vamoswapd_v, 9, rwdvm, amo_op, amo_check)
783 GEN_VEXT_TRANS(vamoaddd_v, 10, rwdvm, amo_op, amo_check)
784 GEN_VEXT_TRANS(vamoxord_v, 11, rwdvm, amo_op, amo_check)
785 GEN_VEXT_TRANS(vamoandd_v, 12, rwdvm, amo_op, amo_check)
786 GEN_VEXT_TRANS(vamoord_v, 13, rwdvm, amo_op, amo_check)
787 GEN_VEXT_TRANS(vamomind_v, 14, rwdvm, amo_op, amo_check)
788 GEN_VEXT_TRANS(vamomaxd_v, 15, rwdvm, amo_op, amo_check)
789 GEN_VEXT_TRANS(vamominud_v, 16, rwdvm, amo_op, amo_check)
790 GEN_VEXT_TRANS(vamomaxud_v, 17, rwdvm, amo_op, amo_check)
791 #endif
794  *** Vector Integer Arithmetic Instructions
795  */
796 #define MAXSZ(s) (s->vlen >> (3 - s->lmul))
798 static bool opivv_check(DisasContext *s, arg_rmrr *a)
800     return (vext_check_isa_ill(s) &&
801             vext_check_overlap_mask(s, a->rd, a->vm, false) &&
802             vext_check_reg(s, a->rd, false) &&
803             vext_check_reg(s, a->rs2, false) &&
804             vext_check_reg(s, a->rs1, false));
807 typedef void GVecGen3Fn(unsigned, uint32_t, uint32_t,
808                         uint32_t, uint32_t, uint32_t);
810 static inline bool
811 do_opivv_gvec(DisasContext *s, arg_rmrr *a, GVecGen3Fn *gvec_fn,
812               gen_helper_gvec_4_ptr *fn)
814     TCGLabel *over = gen_new_label();
815     if (!opivv_check(s, a)) {
816         return false;
817     }
819     tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
821     if (a->vm && s->vl_eq_vlmax) {
822         gvec_fn(s->sew, vreg_ofs(s, a->rd),
823                 vreg_ofs(s, a->rs2), vreg_ofs(s, a->rs1),
824                 MAXSZ(s), MAXSZ(s));
825     } else {
826         uint32_t data = 0;
828         data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
829         data = FIELD_DP32(data, VDATA, VM, a->vm);
830         data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
831         tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
832                            vreg_ofs(s, a->rs1), vreg_ofs(s, a->rs2),
833                            cpu_env, 0, s->vlen / 8, data, fn);
834     }
835     gen_set_label(over);
836     return true;
839 /* OPIVV with GVEC IR */
840 #define GEN_OPIVV_GVEC_TRANS(NAME, SUF) \
841 static bool trans_##NAME(DisasContext *s, arg_rmrr *a)             \
842 {                                                                  \
843     static gen_helper_gvec_4_ptr * const fns[4] = {                \
844         gen_helper_##NAME##_b, gen_helper_##NAME##_h,              \
845         gen_helper_##NAME##_w, gen_helper_##NAME##_d,              \
846     };                                                             \
847     return do_opivv_gvec(s, a, tcg_gen_gvec_##SUF, fns[s->sew]);   \
850 GEN_OPIVV_GVEC_TRANS(vadd_vv, add)
851 GEN_OPIVV_GVEC_TRANS(vsub_vv, sub)
853 typedef void gen_helper_opivx(TCGv_ptr, TCGv_ptr, TCGv, TCGv_ptr,
854                               TCGv_env, TCGv_i32);
856 static bool opivx_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, uint32_t vm,
857                         gen_helper_opivx *fn, DisasContext *s)
859     TCGv_ptr dest, src2, mask;
860     TCGv src1;
861     TCGv_i32 desc;
862     uint32_t data = 0;
864     TCGLabel *over = gen_new_label();
865     tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
867     dest = tcg_temp_new_ptr();
868     mask = tcg_temp_new_ptr();
869     src2 = tcg_temp_new_ptr();
870     src1 = tcg_temp_new();
871     gen_get_gpr(src1, rs1);
873     data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
874     data = FIELD_DP32(data, VDATA, VM, vm);
875     data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
876     desc = tcg_const_i32(simd_desc(0, s->vlen / 8, data));
878     tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
879     tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, vs2));
880     tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
882     fn(dest, mask, src1, src2, cpu_env, desc);
884     tcg_temp_free_ptr(dest);
885     tcg_temp_free_ptr(mask);
886     tcg_temp_free_ptr(src2);
887     tcg_temp_free(src1);
888     tcg_temp_free_i32(desc);
889     gen_set_label(over);
890     return true;
893 static bool opivx_check(DisasContext *s, arg_rmrr *a)
895     return (vext_check_isa_ill(s) &&
896             vext_check_overlap_mask(s, a->rd, a->vm, false) &&
897             vext_check_reg(s, a->rd, false) &&
898             vext_check_reg(s, a->rs2, false));
901 typedef void GVecGen2sFn(unsigned, uint32_t, uint32_t, TCGv_i64,
902                          uint32_t, uint32_t);
904 static inline bool
905 do_opivx_gvec(DisasContext *s, arg_rmrr *a, GVecGen2sFn *gvec_fn,
906               gen_helper_opivx *fn)
908     if (!opivx_check(s, a)) {
909         return false;
910     }
912     if (a->vm && s->vl_eq_vlmax) {
913         TCGv_i64 src1 = tcg_temp_new_i64();
914         TCGv tmp = tcg_temp_new();
916         gen_get_gpr(tmp, a->rs1);
917         tcg_gen_ext_tl_i64(src1, tmp);
918         gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2),
919                 src1, MAXSZ(s), MAXSZ(s));
921         tcg_temp_free_i64(src1);
922         tcg_temp_free(tmp);
923         return true;
924     }
925     return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s);
928 /* OPIVX with GVEC IR */
929 #define GEN_OPIVX_GVEC_TRANS(NAME, SUF) \
930 static bool trans_##NAME(DisasContext *s, arg_rmrr *a)             \
931 {                                                                  \
932     static gen_helper_opivx * const fns[4] = {                     \
933         gen_helper_##NAME##_b, gen_helper_##NAME##_h,              \
934         gen_helper_##NAME##_w, gen_helper_##NAME##_d,              \
935     };                                                             \
936     return do_opivx_gvec(s, a, tcg_gen_gvec_##SUF, fns[s->sew]);   \
939 GEN_OPIVX_GVEC_TRANS(vadd_vx, adds)
940 GEN_OPIVX_GVEC_TRANS(vsub_vx, subs)
942 static void gen_vec_rsub8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
944     tcg_gen_vec_sub8_i64(d, b, a);
947 static void gen_vec_rsub16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
949     tcg_gen_vec_sub16_i64(d, b, a);
952 static void gen_rsub_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
954     tcg_gen_sub_i32(ret, arg2, arg1);
957 static void gen_rsub_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
959     tcg_gen_sub_i64(ret, arg2, arg1);
962 static void gen_rsub_vec(unsigned vece, TCGv_vec r, TCGv_vec a, TCGv_vec b)
964     tcg_gen_sub_vec(vece, r, b, a);
967 static void tcg_gen_gvec_rsubs(unsigned vece, uint32_t dofs, uint32_t aofs,
968                                TCGv_i64 c, uint32_t oprsz, uint32_t maxsz)
970     static const TCGOpcode vecop_list[] = { INDEX_op_sub_vec, 0 };
971     static const GVecGen2s rsub_op[4] = {
972         { .fni8 = gen_vec_rsub8_i64,
973           .fniv = gen_rsub_vec,
974           .fno = gen_helper_vec_rsubs8,
975           .opt_opc = vecop_list,
976           .vece = MO_8 },
977         { .fni8 = gen_vec_rsub16_i64,
978           .fniv = gen_rsub_vec,
979           .fno = gen_helper_vec_rsubs16,
980           .opt_opc = vecop_list,
981           .vece = MO_16 },
982         { .fni4 = gen_rsub_i32,
983           .fniv = gen_rsub_vec,
984           .fno = gen_helper_vec_rsubs32,
985           .opt_opc = vecop_list,
986           .vece = MO_32 },
987         { .fni8 = gen_rsub_i64,
988           .fniv = gen_rsub_vec,
989           .fno = gen_helper_vec_rsubs64,
990           .opt_opc = vecop_list,
991           .prefer_i64 = TCG_TARGET_REG_BITS == 64,
992           .vece = MO_64 },
993     };
995     tcg_debug_assert(vece <= MO_64);
996     tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, c, &rsub_op[vece]);
999 GEN_OPIVX_GVEC_TRANS(vrsub_vx, rsubs)
1001 static bool opivi_trans(uint32_t vd, uint32_t imm, uint32_t vs2, uint32_t vm,
1002                         gen_helper_opivx *fn, DisasContext *s, int zx)
1004     TCGv_ptr dest, src2, mask;
1005     TCGv src1;
1006     TCGv_i32 desc;
1007     uint32_t data = 0;
1009     TCGLabel *over = gen_new_label();
1010     tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
1012     dest = tcg_temp_new_ptr();
1013     mask = tcg_temp_new_ptr();
1014     src2 = tcg_temp_new_ptr();
1015     if (zx) {
1016         src1 = tcg_const_tl(imm);
1017     } else {
1018         src1 = tcg_const_tl(sextract64(imm, 0, 5));
1019     }
1020     data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
1021     data = FIELD_DP32(data, VDATA, VM, vm);
1022     data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
1023     desc = tcg_const_i32(simd_desc(0, s->vlen / 8, data));
1025     tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
1026     tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, vs2));
1027     tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
1029     fn(dest, mask, src1, src2, cpu_env, desc);
1031     tcg_temp_free_ptr(dest);
1032     tcg_temp_free_ptr(mask);
1033     tcg_temp_free_ptr(src2);
1034     tcg_temp_free(src1);
1035     tcg_temp_free_i32(desc);
1036     gen_set_label(over);
1037     return true;
1040 typedef void GVecGen2iFn(unsigned, uint32_t, uint32_t, int64_t,
1041                          uint32_t, uint32_t);
1043 static inline bool
1044 do_opivi_gvec(DisasContext *s, arg_rmrr *a, GVecGen2iFn *gvec_fn,
1045               gen_helper_opivx *fn, int zx)
1047     if (!opivx_check(s, a)) {
1048         return false;
1049     }
1051     if (a->vm && s->vl_eq_vlmax) {
1052         if (zx) {
1053             gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2),
1054                     extract64(a->rs1, 0, 5), MAXSZ(s), MAXSZ(s));
1055         } else {
1056             gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2),
1057                     sextract64(a->rs1, 0, 5), MAXSZ(s), MAXSZ(s));
1058         }
1059     } else {
1060         return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s, zx);
1061     }
1062     return true;
1065 /* OPIVI with GVEC IR */
1066 #define GEN_OPIVI_GVEC_TRANS(NAME, ZX, OPIVX, SUF) \
1067 static bool trans_##NAME(DisasContext *s, arg_rmrr *a)             \
1068 {                                                                  \
1069     static gen_helper_opivx * const fns[4] = {                     \
1070         gen_helper_##OPIVX##_b, gen_helper_##OPIVX##_h,            \
1071         gen_helper_##OPIVX##_w, gen_helper_##OPIVX##_d,            \
1072     };                                                             \
1073     return do_opivi_gvec(s, a, tcg_gen_gvec_##SUF,                 \
1074                          fns[s->sew], ZX);                         \
1077 GEN_OPIVI_GVEC_TRANS(vadd_vi, 0, vadd_vx, addi)
1079 static void tcg_gen_gvec_rsubi(unsigned vece, uint32_t dofs, uint32_t aofs,
1080                                int64_t c, uint32_t oprsz, uint32_t maxsz)
1082     TCGv_i64 tmp = tcg_const_i64(c);
1083     tcg_gen_gvec_rsubs(vece, dofs, aofs, tmp, oprsz, maxsz);
1084     tcg_temp_free_i64(tmp);
1087 GEN_OPIVI_GVEC_TRANS(vrsub_vi, 0, vrsub_vx, rsubi)
1089 /* Vector Widening Integer Add/Subtract */
1091 /* OPIVV with WIDEN */
1092 static bool opivv_widen_check(DisasContext *s, arg_rmrr *a)
1094     return (vext_check_isa_ill(s) &&
1095             vext_check_overlap_mask(s, a->rd, a->vm, true) &&
1096             vext_check_reg(s, a->rd, true) &&
1097             vext_check_reg(s, a->rs2, false) &&
1098             vext_check_reg(s, a->rs1, false) &&
1099             vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs2,
1100                                      1 << s->lmul) &&
1101             vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs1,
1102                                      1 << s->lmul) &&
1103             (s->lmul < 0x3) && (s->sew < 0x3));
1106 static bool do_opivv_widen(DisasContext *s, arg_rmrr *a,
1107                            gen_helper_gvec_4_ptr *fn,
1108                            bool (*checkfn)(DisasContext *, arg_rmrr *))
1110     if (checkfn(s, a)) {
1111         uint32_t data = 0;
1112         TCGLabel *over = gen_new_label();
1113         tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
1115         data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
1116         data = FIELD_DP32(data, VDATA, VM, a->vm);
1117         data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
1118         tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
1119                            vreg_ofs(s, a->rs1),
1120                            vreg_ofs(s, a->rs2),
1121                            cpu_env, 0, s->vlen / 8,
1122                            data, fn);
1123         gen_set_label(over);
1124         return true;
1125     }
1126     return false;
1129 #define GEN_OPIVV_WIDEN_TRANS(NAME, CHECK) \
1130 static bool trans_##NAME(DisasContext *s, arg_rmrr *a)       \
1131 {                                                            \
1132     static gen_helper_gvec_4_ptr * const fns[3] = {          \
1133         gen_helper_##NAME##_b,                               \
1134         gen_helper_##NAME##_h,                               \
1135         gen_helper_##NAME##_w                                \
1136     };                                                       \
1137     return do_opivv_widen(s, a, fns[s->sew], CHECK);         \
1140 GEN_OPIVV_WIDEN_TRANS(vwaddu_vv, opivv_widen_check)
1141 GEN_OPIVV_WIDEN_TRANS(vwadd_vv, opivv_widen_check)
1142 GEN_OPIVV_WIDEN_TRANS(vwsubu_vv, opivv_widen_check)
1143 GEN_OPIVV_WIDEN_TRANS(vwsub_vv, opivv_widen_check)
1145 /* OPIVX with WIDEN */
1146 static bool opivx_widen_check(DisasContext *s, arg_rmrr *a)
1148     return (vext_check_isa_ill(s) &&
1149             vext_check_overlap_mask(s, a->rd, a->vm, true) &&
1150             vext_check_reg(s, a->rd, true) &&
1151             vext_check_reg(s, a->rs2, false) &&
1152             vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs2,
1153                                      1 << s->lmul) &&
1154             (s->lmul < 0x3) && (s->sew < 0x3));
1157 static bool do_opivx_widen(DisasContext *s, arg_rmrr *a,
1158                            gen_helper_opivx *fn)
1160     if (opivx_widen_check(s, a)) {
1161         return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s);
1162     }
1163     return false;
1166 #define GEN_OPIVX_WIDEN_TRANS(NAME) \
1167 static bool trans_##NAME(DisasContext *s, arg_rmrr *a)       \
1168 {                                                            \
1169     static gen_helper_opivx * const fns[3] = {               \
1170         gen_helper_##NAME##_b,                               \
1171         gen_helper_##NAME##_h,                               \
1172         gen_helper_##NAME##_w                                \
1173     };                                                       \
1174     return do_opivx_widen(s, a, fns[s->sew]);                \
1177 GEN_OPIVX_WIDEN_TRANS(vwaddu_vx)
1178 GEN_OPIVX_WIDEN_TRANS(vwadd_vx)
1179 GEN_OPIVX_WIDEN_TRANS(vwsubu_vx)
1180 GEN_OPIVX_WIDEN_TRANS(vwsub_vx)
1182 /* WIDEN OPIVV with WIDEN */
1183 static bool opiwv_widen_check(DisasContext *s, arg_rmrr *a)
1185     return (vext_check_isa_ill(s) &&
1186             vext_check_overlap_mask(s, a->rd, a->vm, true) &&
1187             vext_check_reg(s, a->rd, true) &&
1188             vext_check_reg(s, a->rs2, true) &&
1189             vext_check_reg(s, a->rs1, false) &&
1190             vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs1,
1191                                      1 << s->lmul) &&
1192             (s->lmul < 0x3) && (s->sew < 0x3));
1195 static bool do_opiwv_widen(DisasContext *s, arg_rmrr *a,
1196                            gen_helper_gvec_4_ptr *fn)
1198     if (opiwv_widen_check(s, a)) {
1199         uint32_t data = 0;
1200         TCGLabel *over = gen_new_label();
1201         tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
1203         data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
1204         data = FIELD_DP32(data, VDATA, VM, a->vm);
1205         data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
1206         tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
1207                            vreg_ofs(s, a->rs1),
1208                            vreg_ofs(s, a->rs2),
1209                            cpu_env, 0, s->vlen / 8, data, fn);
1210         gen_set_label(over);
1211         return true;
1212     }
1213     return false;
1216 #define GEN_OPIWV_WIDEN_TRANS(NAME) \
1217 static bool trans_##NAME(DisasContext *s, arg_rmrr *a)       \
1218 {                                                            \
1219     static gen_helper_gvec_4_ptr * const fns[3] = {          \
1220         gen_helper_##NAME##_b,                               \
1221         gen_helper_##NAME##_h,                               \
1222         gen_helper_##NAME##_w                                \
1223     };                                                       \
1224     return do_opiwv_widen(s, a, fns[s->sew]);                \
1227 GEN_OPIWV_WIDEN_TRANS(vwaddu_wv)
1228 GEN_OPIWV_WIDEN_TRANS(vwadd_wv)
1229 GEN_OPIWV_WIDEN_TRANS(vwsubu_wv)
1230 GEN_OPIWV_WIDEN_TRANS(vwsub_wv)
1232 /* WIDEN OPIVX with WIDEN */
1233 static bool opiwx_widen_check(DisasContext *s, arg_rmrr *a)
1235     return (vext_check_isa_ill(s) &&
1236             vext_check_overlap_mask(s, a->rd, a->vm, true) &&
1237             vext_check_reg(s, a->rd, true) &&
1238             vext_check_reg(s, a->rs2, true) &&
1239             (s->lmul < 0x3) && (s->sew < 0x3));
1242 static bool do_opiwx_widen(DisasContext *s, arg_rmrr *a,
1243                            gen_helper_opivx *fn)
1245     if (opiwx_widen_check(s, a)) {
1246         return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s);
1247     }
1248     return false;
1251 #define GEN_OPIWX_WIDEN_TRANS(NAME) \
1252 static bool trans_##NAME(DisasContext *s, arg_rmrr *a)       \
1253 {                                                            \
1254     static gen_helper_opivx * const fns[3] = {               \
1255         gen_helper_##NAME##_b,                               \
1256         gen_helper_##NAME##_h,                               \
1257         gen_helper_##NAME##_w                                \
1258     };                                                       \
1259     return do_opiwx_widen(s, a, fns[s->sew]);                \
1262 GEN_OPIWX_WIDEN_TRANS(vwaddu_wx)
1263 GEN_OPIWX_WIDEN_TRANS(vwadd_wx)
1264 GEN_OPIWX_WIDEN_TRANS(vwsubu_wx)
1265 GEN_OPIWX_WIDEN_TRANS(vwsub_wx)
1267 /* Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions */
1268 /* OPIVV without GVEC IR */
1269 #define GEN_OPIVV_TRANS(NAME, CHECK)                               \
1270 static bool trans_##NAME(DisasContext *s, arg_rmrr *a)             \
1271 {                                                                  \
1272     if (CHECK(s, a)) {                                             \
1273         uint32_t data = 0;                                         \
1274         static gen_helper_gvec_4_ptr * const fns[4] = {            \
1275             gen_helper_##NAME##_b, gen_helper_##NAME##_h,          \
1276             gen_helper_##NAME##_w, gen_helper_##NAME##_d,          \
1277         };                                                         \
1278         TCGLabel *over = gen_new_label();                          \
1279         tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);          \
1280                                                                    \
1281         data = FIELD_DP32(data, VDATA, MLEN, s->mlen);             \
1282         data = FIELD_DP32(data, VDATA, VM, a->vm);                 \
1283         data = FIELD_DP32(data, VDATA, LMUL, s->lmul);             \
1284         tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),     \
1285                            vreg_ofs(s, a->rs1),                    \
1286                            vreg_ofs(s, a->rs2), cpu_env, 0,        \
1287                            s->vlen / 8, data, fns[s->sew]);        \
1288         gen_set_label(over);                                       \
1289         return true;                                               \
1290     }                                                              \
1291     return false;                                                  \
1295  * For vadc and vsbc, an illegal instruction exception is raised if the
1296  * destination vector register is v0 and LMUL > 1. (Section 12.3)
1297  */
1298 static bool opivv_vadc_check(DisasContext *s, arg_rmrr *a)
1300     return (vext_check_isa_ill(s) &&
1301             vext_check_reg(s, a->rd, false) &&
1302             vext_check_reg(s, a->rs2, false) &&
1303             vext_check_reg(s, a->rs1, false) &&
1304             ((a->rd != 0) || (s->lmul == 0)));
1307 GEN_OPIVV_TRANS(vadc_vvm, opivv_vadc_check)
1308 GEN_OPIVV_TRANS(vsbc_vvm, opivv_vadc_check)
1311  * For vmadc and vmsbc, an illegal instruction exception is raised if the
1312  * destination vector register overlaps a source vector register group.
1313  */
1314 static bool opivv_vmadc_check(DisasContext *s, arg_rmrr *a)
1316     return (vext_check_isa_ill(s) &&
1317             vext_check_reg(s, a->rs2, false) &&
1318             vext_check_reg(s, a->rs1, false) &&
1319             vext_check_overlap_group(a->rd, 1, a->rs1, 1 << s->lmul) &&
1320             vext_check_overlap_group(a->rd, 1, a->rs2, 1 << s->lmul));
1323 GEN_OPIVV_TRANS(vmadc_vvm, opivv_vmadc_check)
1324 GEN_OPIVV_TRANS(vmsbc_vvm, opivv_vmadc_check)
1326 static bool opivx_vadc_check(DisasContext *s, arg_rmrr *a)
1328     return (vext_check_isa_ill(s) &&
1329             vext_check_reg(s, a->rd, false) &&
1330             vext_check_reg(s, a->rs2, false) &&
1331             ((a->rd != 0) || (s->lmul == 0)));
1334 /* OPIVX without GVEC IR */
1335 #define GEN_OPIVX_TRANS(NAME, CHECK)                                     \
1336 static bool trans_##NAME(DisasContext *s, arg_rmrr *a)                   \
1337 {                                                                        \
1338     if (CHECK(s, a)) {                                                   \
1339         static gen_helper_opivx * const fns[4] = {                       \
1340             gen_helper_##NAME##_b, gen_helper_##NAME##_h,                \
1341             gen_helper_##NAME##_w, gen_helper_##NAME##_d,                \
1342         };                                                               \
1343                                                                          \
1344         return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s);\
1345     }                                                                    \
1346     return false;                                                        \
1349 GEN_OPIVX_TRANS(vadc_vxm, opivx_vadc_check)
1350 GEN_OPIVX_TRANS(vsbc_vxm, opivx_vadc_check)
1352 static bool opivx_vmadc_check(DisasContext *s, arg_rmrr *a)
1354     return (vext_check_isa_ill(s) &&
1355             vext_check_reg(s, a->rs2, false) &&
1356             vext_check_overlap_group(a->rd, 1, a->rs2, 1 << s->lmul));
1359 GEN_OPIVX_TRANS(vmadc_vxm, opivx_vmadc_check)
1360 GEN_OPIVX_TRANS(vmsbc_vxm, opivx_vmadc_check)
1362 /* OPIVI without GVEC IR */
1363 #define GEN_OPIVI_TRANS(NAME, ZX, OPIVX, CHECK)                          \
1364 static bool trans_##NAME(DisasContext *s, arg_rmrr *a)                   \
1365 {                                                                        \
1366     if (CHECK(s, a)) {                                                   \
1367         static gen_helper_opivx * const fns[4] = {                       \
1368             gen_helper_##OPIVX##_b, gen_helper_##OPIVX##_h,              \
1369             gen_helper_##OPIVX##_w, gen_helper_##OPIVX##_d,              \
1370         };                                                               \
1371         return opivi_trans(a->rd, a->rs1, a->rs2, a->vm,                 \
1372                            fns[s->sew], s, ZX);                          \
1373     }                                                                    \
1374     return false;                                                        \
1377 GEN_OPIVI_TRANS(vadc_vim, 0, vadc_vxm, opivx_vadc_check)
1378 GEN_OPIVI_TRANS(vmadc_vim, 0, vmadc_vxm, opivx_vmadc_check)
1380 /* Vector Bitwise Logical Instructions */
1381 GEN_OPIVV_GVEC_TRANS(vand_vv, and)
1382 GEN_OPIVV_GVEC_TRANS(vor_vv,  or)
1383 GEN_OPIVV_GVEC_TRANS(vxor_vv, xor)
1384 GEN_OPIVX_GVEC_TRANS(vand_vx, ands)
1385 GEN_OPIVX_GVEC_TRANS(vor_vx,  ors)
1386 GEN_OPIVX_GVEC_TRANS(vxor_vx, xors)
1387 GEN_OPIVI_GVEC_TRANS(vand_vi, 0, vand_vx, andi)
1388 GEN_OPIVI_GVEC_TRANS(vor_vi, 0, vor_vx,  ori)
1389 GEN_OPIVI_GVEC_TRANS(vxor_vi, 0, vxor_vx, xori)
1391 /* Vector Single-Width Bit Shift Instructions */
1392 GEN_OPIVV_GVEC_TRANS(vsll_vv,  shlv)
1393 GEN_OPIVV_GVEC_TRANS(vsrl_vv,  shrv)
1394 GEN_OPIVV_GVEC_TRANS(vsra_vv,  sarv)
1396 typedef void GVecGen2sFn32(unsigned, uint32_t, uint32_t, TCGv_i32,
1397                            uint32_t, uint32_t);
1399 static inline bool
1400 do_opivx_gvec_shift(DisasContext *s, arg_rmrr *a, GVecGen2sFn32 *gvec_fn,
1401                     gen_helper_opivx *fn)
1403     if (!opivx_check(s, a)) {
1404         return false;
1405     }
1407     if (a->vm && s->vl_eq_vlmax) {
1408         TCGv_i32 src1 = tcg_temp_new_i32();
1409         TCGv tmp = tcg_temp_new();
1411         gen_get_gpr(tmp, a->rs1);
1412         tcg_gen_trunc_tl_i32(src1, tmp);
1413         tcg_gen_extract_i32(src1, src1, 0, s->sew + 3);
1414         gvec_fn(s->sew, vreg_ofs(s, a->rd), vreg_ofs(s, a->rs2),
1415                 src1, MAXSZ(s), MAXSZ(s));
1417         tcg_temp_free_i32(src1);
1418         tcg_temp_free(tmp);
1419         return true;
1420     }
1421     return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fn, s);
1424 #define GEN_OPIVX_GVEC_SHIFT_TRANS(NAME, SUF) \
1425 static bool trans_##NAME(DisasContext *s, arg_rmrr *a)                    \
1426 {                                                                         \
1427     static gen_helper_opivx * const fns[4] = {                            \
1428         gen_helper_##NAME##_b, gen_helper_##NAME##_h,                     \
1429         gen_helper_##NAME##_w, gen_helper_##NAME##_d,                     \
1430     };                                                                    \
1431                                                                           \
1432     return do_opivx_gvec_shift(s, a, tcg_gen_gvec_##SUF, fns[s->sew]);    \
1435 GEN_OPIVX_GVEC_SHIFT_TRANS(vsll_vx,  shls)
1436 GEN_OPIVX_GVEC_SHIFT_TRANS(vsrl_vx,  shrs)
1437 GEN_OPIVX_GVEC_SHIFT_TRANS(vsra_vx,  sars)
1439 GEN_OPIVI_GVEC_TRANS(vsll_vi, 1, vsll_vx,  shli)
1440 GEN_OPIVI_GVEC_TRANS(vsrl_vi, 1, vsrl_vx,  shri)
1441 GEN_OPIVI_GVEC_TRANS(vsra_vi, 1, vsra_vx,  sari)
1443 /* Vector Narrowing Integer Right Shift Instructions */
1444 static bool opivv_narrow_check(DisasContext *s, arg_rmrr *a)
1446     return (vext_check_isa_ill(s) &&
1447             vext_check_overlap_mask(s, a->rd, a->vm, false) &&
1448             vext_check_reg(s, a->rd, false) &&
1449             vext_check_reg(s, a->rs2, true) &&
1450             vext_check_reg(s, a->rs1, false) &&
1451             vext_check_overlap_group(a->rd, 1 << s->lmul, a->rs2,
1452                 2 << s->lmul) &&
1453             (s->lmul < 0x3) && (s->sew < 0x3));
1456 /* OPIVV with NARROW */
1457 #define GEN_OPIVV_NARROW_TRANS(NAME)                               \
1458 static bool trans_##NAME(DisasContext *s, arg_rmrr *a)             \
1459 {                                                                  \
1460     if (opivv_narrow_check(s, a)) {                                \
1461         uint32_t data = 0;                                         \
1462         static gen_helper_gvec_4_ptr * const fns[3] = {            \
1463             gen_helper_##NAME##_b,                                 \
1464             gen_helper_##NAME##_h,                                 \
1465             gen_helper_##NAME##_w,                                 \
1466         };                                                         \
1467         TCGLabel *over = gen_new_label();                          \
1468         tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);          \
1469                                                                    \
1470         data = FIELD_DP32(data, VDATA, MLEN, s->mlen);             \
1471         data = FIELD_DP32(data, VDATA, VM, a->vm);                 \
1472         data = FIELD_DP32(data, VDATA, LMUL, s->lmul);             \
1473         tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),     \
1474                            vreg_ofs(s, a->rs1),                    \
1475                            vreg_ofs(s, a->rs2), cpu_env, 0,        \
1476                            s->vlen / 8, data, fns[s->sew]);        \
1477         gen_set_label(over);                                       \
1478         return true;                                               \
1479     }                                                              \
1480     return false;                                                  \
1482 GEN_OPIVV_NARROW_TRANS(vnsra_vv)
1483 GEN_OPIVV_NARROW_TRANS(vnsrl_vv)
1485 static bool opivx_narrow_check(DisasContext *s, arg_rmrr *a)
1487     return (vext_check_isa_ill(s) &&
1488             vext_check_overlap_mask(s, a->rd, a->vm, false) &&
1489             vext_check_reg(s, a->rd, false) &&
1490             vext_check_reg(s, a->rs2, true) &&
1491             vext_check_overlap_group(a->rd, 1 << s->lmul, a->rs2,
1492                 2 << s->lmul) &&
1493             (s->lmul < 0x3) && (s->sew < 0x3));
1496 /* OPIVX with NARROW */
1497 #define GEN_OPIVX_NARROW_TRANS(NAME)                                     \
1498 static bool trans_##NAME(DisasContext *s, arg_rmrr *a)                   \
1499 {                                                                        \
1500     if (opivx_narrow_check(s, a)) {                                      \
1501         static gen_helper_opivx * const fns[3] = {                       \
1502             gen_helper_##NAME##_b,                                       \
1503             gen_helper_##NAME##_h,                                       \
1504             gen_helper_##NAME##_w,                                       \
1505         };                                                               \
1506         return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s);\
1507     }                                                                    \
1508     return false;                                                        \
1511 GEN_OPIVX_NARROW_TRANS(vnsra_vx)
1512 GEN_OPIVX_NARROW_TRANS(vnsrl_vx)
1514 /* OPIVI with NARROW */
1515 #define GEN_OPIVI_NARROW_TRANS(NAME, ZX, OPIVX)                          \
1516 static bool trans_##NAME(DisasContext *s, arg_rmrr *a)                   \
1517 {                                                                        \
1518     if (opivx_narrow_check(s, a)) {                                      \
1519         static gen_helper_opivx * const fns[3] = {                       \
1520             gen_helper_##OPIVX##_b,                                      \
1521             gen_helper_##OPIVX##_h,                                      \
1522             gen_helper_##OPIVX##_w,                                      \
1523         };                                                               \
1524         return opivi_trans(a->rd, a->rs1, a->rs2, a->vm,                 \
1525                            fns[s->sew], s, ZX);                          \
1526     }                                                                    \
1527     return false;                                                        \
1530 GEN_OPIVI_NARROW_TRANS(vnsra_vi, 1, vnsra_vx)
1531 GEN_OPIVI_NARROW_TRANS(vnsrl_vi, 1, vnsrl_vx)
1533 /* Vector Integer Comparison Instructions */
1535  * For all comparison instructions, an illegal instruction exception is raised
1536  * if the destination vector register overlaps a source vector register group
1537  * and LMUL > 1.
1538  */
1539 static bool opivv_cmp_check(DisasContext *s, arg_rmrr *a)
1541     return (vext_check_isa_ill(s) &&
1542             vext_check_reg(s, a->rs2, false) &&
1543             vext_check_reg(s, a->rs1, false) &&
1544             ((vext_check_overlap_group(a->rd, 1, a->rs1, 1 << s->lmul) &&
1545               vext_check_overlap_group(a->rd, 1, a->rs2, 1 << s->lmul)) ||
1546              (s->lmul == 0)));
1548 GEN_OPIVV_TRANS(vmseq_vv, opivv_cmp_check)
1549 GEN_OPIVV_TRANS(vmsne_vv, opivv_cmp_check)
1550 GEN_OPIVV_TRANS(vmsltu_vv, opivv_cmp_check)
1551 GEN_OPIVV_TRANS(vmslt_vv, opivv_cmp_check)
1552 GEN_OPIVV_TRANS(vmsleu_vv, opivv_cmp_check)
1553 GEN_OPIVV_TRANS(vmsle_vv, opivv_cmp_check)
1555 static bool opivx_cmp_check(DisasContext *s, arg_rmrr *a)
1557     return (vext_check_isa_ill(s) &&
1558             vext_check_reg(s, a->rs2, false) &&
1559             (vext_check_overlap_group(a->rd, 1, a->rs2, 1 << s->lmul) ||
1560              (s->lmul == 0)));
1563 GEN_OPIVX_TRANS(vmseq_vx, opivx_cmp_check)
1564 GEN_OPIVX_TRANS(vmsne_vx, opivx_cmp_check)
1565 GEN_OPIVX_TRANS(vmsltu_vx, opivx_cmp_check)
1566 GEN_OPIVX_TRANS(vmslt_vx, opivx_cmp_check)
1567 GEN_OPIVX_TRANS(vmsleu_vx, opivx_cmp_check)
1568 GEN_OPIVX_TRANS(vmsle_vx, opivx_cmp_check)
1569 GEN_OPIVX_TRANS(vmsgtu_vx, opivx_cmp_check)
1570 GEN_OPIVX_TRANS(vmsgt_vx, opivx_cmp_check)
1572 GEN_OPIVI_TRANS(vmseq_vi, 0, vmseq_vx, opivx_cmp_check)
1573 GEN_OPIVI_TRANS(vmsne_vi, 0, vmsne_vx, opivx_cmp_check)
1574 GEN_OPIVI_TRANS(vmsleu_vi, 1, vmsleu_vx, opivx_cmp_check)
1575 GEN_OPIVI_TRANS(vmsle_vi, 0, vmsle_vx, opivx_cmp_check)
1576 GEN_OPIVI_TRANS(vmsgtu_vi, 1, vmsgtu_vx, opivx_cmp_check)
1577 GEN_OPIVI_TRANS(vmsgt_vi, 0, vmsgt_vx, opivx_cmp_check)
1579 /* Vector Integer Min/Max Instructions */
1580 GEN_OPIVV_GVEC_TRANS(vminu_vv, umin)
1581 GEN_OPIVV_GVEC_TRANS(vmin_vv,  smin)
1582 GEN_OPIVV_GVEC_TRANS(vmaxu_vv, umax)
1583 GEN_OPIVV_GVEC_TRANS(vmax_vv,  smax)
1584 GEN_OPIVX_TRANS(vminu_vx, opivx_check)
1585 GEN_OPIVX_TRANS(vmin_vx,  opivx_check)
1586 GEN_OPIVX_TRANS(vmaxu_vx, opivx_check)
1587 GEN_OPIVX_TRANS(vmax_vx,  opivx_check)
1589 /* Vector Single-Width Integer Multiply Instructions */
1590 GEN_OPIVV_GVEC_TRANS(vmul_vv,  mul)
1591 GEN_OPIVV_TRANS(vmulh_vv, opivv_check)
1592 GEN_OPIVV_TRANS(vmulhu_vv, opivv_check)
1593 GEN_OPIVV_TRANS(vmulhsu_vv, opivv_check)
1594 GEN_OPIVX_GVEC_TRANS(vmul_vx,  muls)
1595 GEN_OPIVX_TRANS(vmulh_vx, opivx_check)
1596 GEN_OPIVX_TRANS(vmulhu_vx, opivx_check)
1597 GEN_OPIVX_TRANS(vmulhsu_vx, opivx_check)
1599 /* Vector Integer Divide Instructions */
1600 GEN_OPIVV_TRANS(vdivu_vv, opivv_check)
1601 GEN_OPIVV_TRANS(vdiv_vv, opivv_check)
1602 GEN_OPIVV_TRANS(vremu_vv, opivv_check)
1603 GEN_OPIVV_TRANS(vrem_vv, opivv_check)
1604 GEN_OPIVX_TRANS(vdivu_vx, opivx_check)
1605 GEN_OPIVX_TRANS(vdiv_vx, opivx_check)
1606 GEN_OPIVX_TRANS(vremu_vx, opivx_check)
1607 GEN_OPIVX_TRANS(vrem_vx, opivx_check)
1609 /* Vector Widening Integer Multiply Instructions */
1610 GEN_OPIVV_WIDEN_TRANS(vwmul_vv, opivv_widen_check)
1611 GEN_OPIVV_WIDEN_TRANS(vwmulu_vv, opivv_widen_check)
1612 GEN_OPIVV_WIDEN_TRANS(vwmulsu_vv, opivv_widen_check)
1613 GEN_OPIVX_WIDEN_TRANS(vwmul_vx)
1614 GEN_OPIVX_WIDEN_TRANS(vwmulu_vx)
1615 GEN_OPIVX_WIDEN_TRANS(vwmulsu_vx)
1617 /* Vector Single-Width Integer Multiply-Add Instructions */
1618 GEN_OPIVV_TRANS(vmacc_vv, opivv_check)
1619 GEN_OPIVV_TRANS(vnmsac_vv, opivv_check)
1620 GEN_OPIVV_TRANS(vmadd_vv, opivv_check)
1621 GEN_OPIVV_TRANS(vnmsub_vv, opivv_check)
1622 GEN_OPIVX_TRANS(vmacc_vx, opivx_check)
1623 GEN_OPIVX_TRANS(vnmsac_vx, opivx_check)
1624 GEN_OPIVX_TRANS(vmadd_vx, opivx_check)
1625 GEN_OPIVX_TRANS(vnmsub_vx, opivx_check)
1627 /* Vector Widening Integer Multiply-Add Instructions */
1628 GEN_OPIVV_WIDEN_TRANS(vwmaccu_vv, opivv_widen_check)
1629 GEN_OPIVV_WIDEN_TRANS(vwmacc_vv, opivv_widen_check)
1630 GEN_OPIVV_WIDEN_TRANS(vwmaccsu_vv, opivv_widen_check)
1631 GEN_OPIVX_WIDEN_TRANS(vwmaccu_vx)
1632 GEN_OPIVX_WIDEN_TRANS(vwmacc_vx)
1633 GEN_OPIVX_WIDEN_TRANS(vwmaccsu_vx)
1634 GEN_OPIVX_WIDEN_TRANS(vwmaccus_vx)
1636 /* Vector Integer Merge and Move Instructions */
1637 static bool trans_vmv_v_v(DisasContext *s, arg_vmv_v_v *a)
1639     if (vext_check_isa_ill(s) &&
1640         vext_check_reg(s, a->rd, false) &&
1641         vext_check_reg(s, a->rs1, false)) {
1643         if (s->vl_eq_vlmax) {
1644             tcg_gen_gvec_mov(s->sew, vreg_ofs(s, a->rd),
1645                              vreg_ofs(s, a->rs1),
1646                              MAXSZ(s), MAXSZ(s));
1647         } else {
1648             uint32_t data = FIELD_DP32(0, VDATA, LMUL, s->lmul);
1649             static gen_helper_gvec_2_ptr * const fns[4] = {
1650                 gen_helper_vmv_v_v_b, gen_helper_vmv_v_v_h,
1651                 gen_helper_vmv_v_v_w, gen_helper_vmv_v_v_d,
1652             };
1653             TCGLabel *over = gen_new_label();
1654             tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
1656             tcg_gen_gvec_2_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, a->rs1),
1657                                cpu_env, 0, s->vlen / 8, data, fns[s->sew]);
1658             gen_set_label(over);
1659         }
1660         return true;
1661     }
1662     return false;
1665 typedef void gen_helper_vmv_vx(TCGv_ptr, TCGv_i64, TCGv_env, TCGv_i32);
1666 static bool trans_vmv_v_x(DisasContext *s, arg_vmv_v_x *a)
1668     if (vext_check_isa_ill(s) &&
1669         vext_check_reg(s, a->rd, false)) {
1671         TCGv s1;
1672         TCGLabel *over = gen_new_label();
1673         tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
1675         s1 = tcg_temp_new();
1676         gen_get_gpr(s1, a->rs1);
1678         if (s->vl_eq_vlmax) {
1679             tcg_gen_gvec_dup_tl(s->sew, vreg_ofs(s, a->rd),
1680                                 MAXSZ(s), MAXSZ(s), s1);
1681         } else {
1682             TCGv_i32 desc ;
1683             TCGv_i64 s1_i64 = tcg_temp_new_i64();
1684             TCGv_ptr dest = tcg_temp_new_ptr();
1685             uint32_t data = FIELD_DP32(0, VDATA, LMUL, s->lmul);
1686             static gen_helper_vmv_vx * const fns[4] = {
1687                 gen_helper_vmv_v_x_b, gen_helper_vmv_v_x_h,
1688                 gen_helper_vmv_v_x_w, gen_helper_vmv_v_x_d,
1689             };
1691             tcg_gen_ext_tl_i64(s1_i64, s1);
1692             desc = tcg_const_i32(simd_desc(0, s->vlen / 8, data));
1693             tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, a->rd));
1694             fns[s->sew](dest, s1_i64, cpu_env, desc);
1696             tcg_temp_free_ptr(dest);
1697             tcg_temp_free_i32(desc);
1698             tcg_temp_free_i64(s1_i64);
1699         }
1701         tcg_temp_free(s1);
1702         gen_set_label(over);
1703         return true;
1704     }
1705     return false;
1708 static bool trans_vmv_v_i(DisasContext *s, arg_vmv_v_i *a)
1710     if (vext_check_isa_ill(s) &&
1711         vext_check_reg(s, a->rd, false)) {
1713         int64_t simm = sextract64(a->rs1, 0, 5);
1714         if (s->vl_eq_vlmax) {
1715             tcg_gen_gvec_dup_imm(s->sew, vreg_ofs(s, a->rd),
1716                                  MAXSZ(s), MAXSZ(s), simm);
1717         } else {
1718             TCGv_i32 desc;
1719             TCGv_i64 s1;
1720             TCGv_ptr dest;
1721             uint32_t data = FIELD_DP32(0, VDATA, LMUL, s->lmul);
1722             static gen_helper_vmv_vx * const fns[4] = {
1723                 gen_helper_vmv_v_x_b, gen_helper_vmv_v_x_h,
1724                 gen_helper_vmv_v_x_w, gen_helper_vmv_v_x_d,
1725             };
1726             TCGLabel *over = gen_new_label();
1727             tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
1729             s1 = tcg_const_i64(simm);
1730             dest = tcg_temp_new_ptr();
1731             desc = tcg_const_i32(simd_desc(0, s->vlen / 8, data));
1732             tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, a->rd));
1733             fns[s->sew](dest, s1, cpu_env, desc);
1735             tcg_temp_free_ptr(dest);
1736             tcg_temp_free_i32(desc);
1737             tcg_temp_free_i64(s1);
1738             gen_set_label(over);
1739         }
1740         return true;
1741     }
1742     return false;
1745 GEN_OPIVV_TRANS(vmerge_vvm, opivv_vadc_check)
1746 GEN_OPIVX_TRANS(vmerge_vxm, opivx_vadc_check)
1747 GEN_OPIVI_TRANS(vmerge_vim, 0, vmerge_vxm, opivx_vadc_check)
1750  *** Vector Fixed-Point Arithmetic Instructions
1751  */
1753 /* Vector Single-Width Saturating Add and Subtract */
1754 GEN_OPIVV_TRANS(vsaddu_vv, opivv_check)
1755 GEN_OPIVV_TRANS(vsadd_vv,  opivv_check)
1756 GEN_OPIVV_TRANS(vssubu_vv, opivv_check)
1757 GEN_OPIVV_TRANS(vssub_vv,  opivv_check)
1758 GEN_OPIVX_TRANS(vsaddu_vx,  opivx_check)
1759 GEN_OPIVX_TRANS(vsadd_vx,  opivx_check)
1760 GEN_OPIVX_TRANS(vssubu_vx,  opivx_check)
1761 GEN_OPIVX_TRANS(vssub_vx,  opivx_check)
1762 GEN_OPIVI_TRANS(vsaddu_vi, 1, vsaddu_vx, opivx_check)
1763 GEN_OPIVI_TRANS(vsadd_vi, 0, vsadd_vx, opivx_check)
1765 /* Vector Single-Width Averaging Add and Subtract */
1766 GEN_OPIVV_TRANS(vaadd_vv, opivv_check)
1767 GEN_OPIVV_TRANS(vasub_vv, opivv_check)
1768 GEN_OPIVX_TRANS(vaadd_vx,  opivx_check)
1769 GEN_OPIVX_TRANS(vasub_vx,  opivx_check)
1770 GEN_OPIVI_TRANS(vaadd_vi, 0, vaadd_vx, opivx_check)
1772 /* Vector Single-Width Fractional Multiply with Rounding and Saturation */
1773 GEN_OPIVV_TRANS(vsmul_vv, opivv_check)
1774 GEN_OPIVX_TRANS(vsmul_vx,  opivx_check)
1776 /* Vector Widening Saturating Scaled Multiply-Add */
1777 GEN_OPIVV_WIDEN_TRANS(vwsmaccu_vv, opivv_widen_check)
1778 GEN_OPIVV_WIDEN_TRANS(vwsmacc_vv, opivv_widen_check)
1779 GEN_OPIVV_WIDEN_TRANS(vwsmaccsu_vv, opivv_widen_check)
1780 GEN_OPIVX_WIDEN_TRANS(vwsmaccu_vx)
1781 GEN_OPIVX_WIDEN_TRANS(vwsmacc_vx)
1782 GEN_OPIVX_WIDEN_TRANS(vwsmaccsu_vx)
1783 GEN_OPIVX_WIDEN_TRANS(vwsmaccus_vx)
1785 /* Vector Single-Width Scaling Shift Instructions */
1786 GEN_OPIVV_TRANS(vssrl_vv, opivv_check)
1787 GEN_OPIVV_TRANS(vssra_vv, opivv_check)
1788 GEN_OPIVX_TRANS(vssrl_vx,  opivx_check)
1789 GEN_OPIVX_TRANS(vssra_vx,  opivx_check)
1790 GEN_OPIVI_TRANS(vssrl_vi, 1, vssrl_vx, opivx_check)
1791 GEN_OPIVI_TRANS(vssra_vi, 0, vssra_vx, opivx_check)
1793 /* Vector Narrowing Fixed-Point Clip Instructions */
1794 GEN_OPIVV_NARROW_TRANS(vnclipu_vv)
1795 GEN_OPIVV_NARROW_TRANS(vnclip_vv)
1796 GEN_OPIVX_NARROW_TRANS(vnclipu_vx)
1797 GEN_OPIVX_NARROW_TRANS(vnclip_vx)
1798 GEN_OPIVI_NARROW_TRANS(vnclipu_vi, 1, vnclipu_vx)
1799 GEN_OPIVI_NARROW_TRANS(vnclip_vi, 1, vnclip_vx)
1802  *** Vector Float Point Arithmetic Instructions
1803  */
1804 /* Vector Single-Width Floating-Point Add/Subtract Instructions */
1807  * If the current SEW does not correspond to a supported IEEE floating-point
1808  * type, an illegal instruction exception is raised.
1809  */
1810 static bool opfvv_check(DisasContext *s, arg_rmrr *a)
1812     return (vext_check_isa_ill(s) &&
1813             vext_check_overlap_mask(s, a->rd, a->vm, false) &&
1814             vext_check_reg(s, a->rd, false) &&
1815             vext_check_reg(s, a->rs2, false) &&
1816             vext_check_reg(s, a->rs1, false) &&
1817             (s->sew != 0));
1820 /* OPFVV without GVEC IR */
1821 #define GEN_OPFVV_TRANS(NAME, CHECK)                               \
1822 static bool trans_##NAME(DisasContext *s, arg_rmrr *a)             \
1823 {                                                                  \
1824     if (CHECK(s, a)) {                                             \
1825         uint32_t data = 0;                                         \
1826         static gen_helper_gvec_4_ptr * const fns[3] = {            \
1827             gen_helper_##NAME##_h,                                 \
1828             gen_helper_##NAME##_w,                                 \
1829             gen_helper_##NAME##_d,                                 \
1830         };                                                         \
1831         TCGLabel *over = gen_new_label();                          \
1832         gen_set_rm(s, 7);                                          \
1833         tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);          \
1834                                                                    \
1835         data = FIELD_DP32(data, VDATA, MLEN, s->mlen);             \
1836         data = FIELD_DP32(data, VDATA, VM, a->vm);                 \
1837         data = FIELD_DP32(data, VDATA, LMUL, s->lmul);             \
1838         tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),     \
1839                            vreg_ofs(s, a->rs1),                    \
1840                            vreg_ofs(s, a->rs2), cpu_env, 0,        \
1841                            s->vlen / 8, data, fns[s->sew - 1]);    \
1842         gen_set_label(over);                                       \
1843         return true;                                               \
1844     }                                                              \
1845     return false;                                                  \
1847 GEN_OPFVV_TRANS(vfadd_vv, opfvv_check)
1848 GEN_OPFVV_TRANS(vfsub_vv, opfvv_check)
1850 typedef void gen_helper_opfvf(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv_ptr,
1851                               TCGv_env, TCGv_i32);
1853 static bool opfvf_trans(uint32_t vd, uint32_t rs1, uint32_t vs2,
1854                         uint32_t data, gen_helper_opfvf *fn, DisasContext *s)
1856     TCGv_ptr dest, src2, mask;
1857     TCGv_i32 desc;
1859     TCGLabel *over = gen_new_label();
1860     tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
1862     dest = tcg_temp_new_ptr();
1863     mask = tcg_temp_new_ptr();
1864     src2 = tcg_temp_new_ptr();
1865     desc = tcg_const_i32(simd_desc(0, s->vlen / 8, data));
1867     tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd));
1868     tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, vs2));
1869     tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
1871     fn(dest, mask, cpu_fpr[rs1], src2, cpu_env, desc);
1873     tcg_temp_free_ptr(dest);
1874     tcg_temp_free_ptr(mask);
1875     tcg_temp_free_ptr(src2);
1876     tcg_temp_free_i32(desc);
1877     gen_set_label(over);
1878     return true;
1881 static bool opfvf_check(DisasContext *s, arg_rmrr *a)
1884  * If the current SEW does not correspond to a supported IEEE floating-point
1885  * type, an illegal instruction exception is raised
1886  */
1887     return (vext_check_isa_ill(s) &&
1888             vext_check_overlap_mask(s, a->rd, a->vm, false) &&
1889             vext_check_reg(s, a->rd, false) &&
1890             vext_check_reg(s, a->rs2, false) &&
1891             (s->sew != 0));
1894 /* OPFVF without GVEC IR */
1895 #define GEN_OPFVF_TRANS(NAME, CHECK)                              \
1896 static bool trans_##NAME(DisasContext *s, arg_rmrr *a)            \
1897 {                                                                 \
1898     if (CHECK(s, a)) {                                            \
1899         uint32_t data = 0;                                        \
1900         static gen_helper_opfvf *const fns[3] = {                 \
1901             gen_helper_##NAME##_h,                                \
1902             gen_helper_##NAME##_w,                                \
1903             gen_helper_##NAME##_d,                                \
1904         };                                                        \
1905         gen_set_rm(s, 7);                                         \
1906         data = FIELD_DP32(data, VDATA, MLEN, s->mlen);            \
1907         data = FIELD_DP32(data, VDATA, VM, a->vm);                \
1908         data = FIELD_DP32(data, VDATA, LMUL, s->lmul);            \
1909         return opfvf_trans(a->rd, a->rs1, a->rs2, data,           \
1910                            fns[s->sew - 1], s);                   \
1911     }                                                             \
1912     return false;                                                 \
1915 GEN_OPFVF_TRANS(vfadd_vf,  opfvf_check)
1916 GEN_OPFVF_TRANS(vfsub_vf,  opfvf_check)
1917 GEN_OPFVF_TRANS(vfrsub_vf,  opfvf_check)
1919 /* Vector Widening Floating-Point Add/Subtract Instructions */
1920 static bool opfvv_widen_check(DisasContext *s, arg_rmrr *a)
1922     return (vext_check_isa_ill(s) &&
1923             vext_check_overlap_mask(s, a->rd, a->vm, true) &&
1924             vext_check_reg(s, a->rd, true) &&
1925             vext_check_reg(s, a->rs2, false) &&
1926             vext_check_reg(s, a->rs1, false) &&
1927             vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs2,
1928                                      1 << s->lmul) &&
1929             vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs1,
1930                                      1 << s->lmul) &&
1931             (s->lmul < 0x3) && (s->sew < 0x3) && (s->sew != 0));
1934 /* OPFVV with WIDEN */
1935 #define GEN_OPFVV_WIDEN_TRANS(NAME, CHECK)                       \
1936 static bool trans_##NAME(DisasContext *s, arg_rmrr *a)           \
1937 {                                                                \
1938     if (CHECK(s, a)) {                                           \
1939         uint32_t data = 0;                                       \
1940         static gen_helper_gvec_4_ptr * const fns[2] = {          \
1941             gen_helper_##NAME##_h, gen_helper_##NAME##_w,        \
1942         };                                                       \
1943         TCGLabel *over = gen_new_label();                        \
1944         gen_set_rm(s, 7);                                        \
1945         tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);        \
1946                                                                  \
1947         data = FIELD_DP32(data, VDATA, MLEN, s->mlen);           \
1948         data = FIELD_DP32(data, VDATA, VM, a->vm);               \
1949         data = FIELD_DP32(data, VDATA, LMUL, s->lmul);           \
1950         tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),   \
1951                            vreg_ofs(s, a->rs1),                  \
1952                            vreg_ofs(s, a->rs2), cpu_env, 0,      \
1953                            s->vlen / 8, data, fns[s->sew - 1]);  \
1954         gen_set_label(over);                                     \
1955         return true;                                             \
1956     }                                                            \
1957     return false;                                                \
1960 GEN_OPFVV_WIDEN_TRANS(vfwadd_vv, opfvv_widen_check)
1961 GEN_OPFVV_WIDEN_TRANS(vfwsub_vv, opfvv_widen_check)
1963 static bool opfvf_widen_check(DisasContext *s, arg_rmrr *a)
1965     return (vext_check_isa_ill(s) &&
1966             vext_check_overlap_mask(s, a->rd, a->vm, true) &&
1967             vext_check_reg(s, a->rd, true) &&
1968             vext_check_reg(s, a->rs2, false) &&
1969             vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs2,
1970                                      1 << s->lmul) &&
1971             (s->lmul < 0x3) && (s->sew < 0x3) && (s->sew != 0));
1974 /* OPFVF with WIDEN */
1975 #define GEN_OPFVF_WIDEN_TRANS(NAME)                              \
1976 static bool trans_##NAME(DisasContext *s, arg_rmrr *a)           \
1977 {                                                                \
1978     if (opfvf_widen_check(s, a)) {                               \
1979         uint32_t data = 0;                                       \
1980         static gen_helper_opfvf *const fns[2] = {                \
1981             gen_helper_##NAME##_h, gen_helper_##NAME##_w,        \
1982         };                                                       \
1983         gen_set_rm(s, 7);                                        \
1984         data = FIELD_DP32(data, VDATA, MLEN, s->mlen);           \
1985         data = FIELD_DP32(data, VDATA, VM, a->vm);               \
1986         data = FIELD_DP32(data, VDATA, LMUL, s->lmul);           \
1987         return opfvf_trans(a->rd, a->rs1, a->rs2, data,          \
1988                            fns[s->sew - 1], s);                  \
1989     }                                                            \
1990     return false;                                                \
1993 GEN_OPFVF_WIDEN_TRANS(vfwadd_vf)
1994 GEN_OPFVF_WIDEN_TRANS(vfwsub_vf)
1996 static bool opfwv_widen_check(DisasContext *s, arg_rmrr *a)
1998     return (vext_check_isa_ill(s) &&
1999             vext_check_overlap_mask(s, a->rd, a->vm, true) &&
2000             vext_check_reg(s, a->rd, true) &&
2001             vext_check_reg(s, a->rs2, true) &&
2002             vext_check_reg(s, a->rs1, false) &&
2003             vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs1,
2004                                      1 << s->lmul) &&
2005             (s->lmul < 0x3) && (s->sew < 0x3) && (s->sew != 0));
2008 /* WIDEN OPFVV with WIDEN */
2009 #define GEN_OPFWV_WIDEN_TRANS(NAME)                                \
2010 static bool trans_##NAME(DisasContext *s, arg_rmrr *a)             \
2011 {                                                                  \
2012     if (opfwv_widen_check(s, a)) {                                 \
2013         uint32_t data = 0;                                         \
2014         static gen_helper_gvec_4_ptr * const fns[2] = {            \
2015             gen_helper_##NAME##_h, gen_helper_##NAME##_w,          \
2016         };                                                         \
2017         TCGLabel *over = gen_new_label();                          \
2018         gen_set_rm(s, 7);                                          \
2019         tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);          \
2020                                                                    \
2021         data = FIELD_DP32(data, VDATA, MLEN, s->mlen);             \
2022         data = FIELD_DP32(data, VDATA, VM, a->vm);                 \
2023         data = FIELD_DP32(data, VDATA, LMUL, s->lmul);             \
2024         tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),     \
2025                            vreg_ofs(s, a->rs1),                    \
2026                            vreg_ofs(s, a->rs2), cpu_env, 0,        \
2027                            s->vlen / 8, data, fns[s->sew - 1]);    \
2028         gen_set_label(over);                                       \
2029         return true;                                               \
2030     }                                                              \
2031     return false;                                                  \
2034 GEN_OPFWV_WIDEN_TRANS(vfwadd_wv)
2035 GEN_OPFWV_WIDEN_TRANS(vfwsub_wv)
2037 static bool opfwf_widen_check(DisasContext *s, arg_rmrr *a)
2039     return (vext_check_isa_ill(s) &&
2040             vext_check_overlap_mask(s, a->rd, a->vm, true) &&
2041             vext_check_reg(s, a->rd, true) &&
2042             vext_check_reg(s, a->rs2, true) &&
2043             (s->lmul < 0x3) && (s->sew < 0x3) && (s->sew != 0));
2046 /* WIDEN OPFVF with WIDEN */
2047 #define GEN_OPFWF_WIDEN_TRANS(NAME)                              \
2048 static bool trans_##NAME(DisasContext *s, arg_rmrr *a)           \
2049 {                                                                \
2050     if (opfwf_widen_check(s, a)) {                               \
2051         uint32_t data = 0;                                       \
2052         static gen_helper_opfvf *const fns[2] = {                \
2053             gen_helper_##NAME##_h, gen_helper_##NAME##_w,        \
2054         };                                                       \
2055         gen_set_rm(s, 7);                                        \
2056         data = FIELD_DP32(data, VDATA, MLEN, s->mlen);           \
2057         data = FIELD_DP32(data, VDATA, VM, a->vm);               \
2058         data = FIELD_DP32(data, VDATA, LMUL, s->lmul);           \
2059         return opfvf_trans(a->rd, a->rs1, a->rs2, data,          \
2060                            fns[s->sew - 1], s);                  \
2061     }                                                            \
2062     return false;                                                \
2065 GEN_OPFWF_WIDEN_TRANS(vfwadd_wf)
2066 GEN_OPFWF_WIDEN_TRANS(vfwsub_wf)
2068 /* Vector Single-Width Floating-Point Multiply/Divide Instructions */
2069 GEN_OPFVV_TRANS(vfmul_vv, opfvv_check)
2070 GEN_OPFVV_TRANS(vfdiv_vv, opfvv_check)
2071 GEN_OPFVF_TRANS(vfmul_vf,  opfvf_check)
2072 GEN_OPFVF_TRANS(vfdiv_vf,  opfvf_check)
2073 GEN_OPFVF_TRANS(vfrdiv_vf,  opfvf_check)
2075 /* Vector Widening Floating-Point Multiply */
2076 GEN_OPFVV_WIDEN_TRANS(vfwmul_vv, opfvv_widen_check)
2077 GEN_OPFVF_WIDEN_TRANS(vfwmul_vf)
2079 /* Vector Single-Width Floating-Point Fused Multiply-Add Instructions */
2080 GEN_OPFVV_TRANS(vfmacc_vv, opfvv_check)
2081 GEN_OPFVV_TRANS(vfnmacc_vv, opfvv_check)
2082 GEN_OPFVV_TRANS(vfmsac_vv, opfvv_check)
2083 GEN_OPFVV_TRANS(vfnmsac_vv, opfvv_check)
2084 GEN_OPFVV_TRANS(vfmadd_vv, opfvv_check)
2085 GEN_OPFVV_TRANS(vfnmadd_vv, opfvv_check)
2086 GEN_OPFVV_TRANS(vfmsub_vv, opfvv_check)
2087 GEN_OPFVV_TRANS(vfnmsub_vv, opfvv_check)
2088 GEN_OPFVF_TRANS(vfmacc_vf, opfvf_check)
2089 GEN_OPFVF_TRANS(vfnmacc_vf, opfvf_check)
2090 GEN_OPFVF_TRANS(vfmsac_vf, opfvf_check)
2091 GEN_OPFVF_TRANS(vfnmsac_vf, opfvf_check)
2092 GEN_OPFVF_TRANS(vfmadd_vf, opfvf_check)
2093 GEN_OPFVF_TRANS(vfnmadd_vf, opfvf_check)
2094 GEN_OPFVF_TRANS(vfmsub_vf, opfvf_check)
2095 GEN_OPFVF_TRANS(vfnmsub_vf, opfvf_check)
2097 /* Vector Widening Floating-Point Fused Multiply-Add Instructions */
2098 GEN_OPFVV_WIDEN_TRANS(vfwmacc_vv, opfvv_widen_check)
2099 GEN_OPFVV_WIDEN_TRANS(vfwnmacc_vv, opfvv_widen_check)
2100 GEN_OPFVV_WIDEN_TRANS(vfwmsac_vv, opfvv_widen_check)
2101 GEN_OPFVV_WIDEN_TRANS(vfwnmsac_vv, opfvv_widen_check)
2102 GEN_OPFVF_WIDEN_TRANS(vfwmacc_vf)
2103 GEN_OPFVF_WIDEN_TRANS(vfwnmacc_vf)
2104 GEN_OPFVF_WIDEN_TRANS(vfwmsac_vf)
2105 GEN_OPFVF_WIDEN_TRANS(vfwnmsac_vf)
2107 /* Vector Floating-Point Square-Root Instruction */
2110  * If the current SEW does not correspond to a supported IEEE floating-point
2111  * type, an illegal instruction exception is raised
2112  */
2113 static bool opfv_check(DisasContext *s, arg_rmr *a)
2115    return (vext_check_isa_ill(s) &&
2116             vext_check_overlap_mask(s, a->rd, a->vm, false) &&
2117             vext_check_reg(s, a->rd, false) &&
2118             vext_check_reg(s, a->rs2, false) &&
2119             (s->sew != 0));
2122 #define GEN_OPFV_TRANS(NAME, CHECK)                                \
2123 static bool trans_##NAME(DisasContext *s, arg_rmr *a)              \
2124 {                                                                  \
2125     if (CHECK(s, a)) {                                             \
2126         uint32_t data = 0;                                         \
2127         static gen_helper_gvec_3_ptr * const fns[3] = {            \
2128             gen_helper_##NAME##_h,                                 \
2129             gen_helper_##NAME##_w,                                 \
2130             gen_helper_##NAME##_d,                                 \
2131         };                                                         \
2132         TCGLabel *over = gen_new_label();                          \
2133         gen_set_rm(s, 7);                                          \
2134         tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);          \
2135                                                                    \
2136         data = FIELD_DP32(data, VDATA, MLEN, s->mlen);             \
2137         data = FIELD_DP32(data, VDATA, VM, a->vm);                 \
2138         data = FIELD_DP32(data, VDATA, LMUL, s->lmul);             \
2139         tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),     \
2140                            vreg_ofs(s, a->rs2), cpu_env, 0,        \
2141                            s->vlen / 8, data, fns[s->sew - 1]);    \
2142         gen_set_label(over);                                       \
2143         return true;                                               \
2144     }                                                              \
2145     return false;                                                  \
2148 GEN_OPFV_TRANS(vfsqrt_v, opfv_check)
2150 /* Vector Floating-Point MIN/MAX Instructions */
2151 GEN_OPFVV_TRANS(vfmin_vv, opfvv_check)
2152 GEN_OPFVV_TRANS(vfmax_vv, opfvv_check)
2153 GEN_OPFVF_TRANS(vfmin_vf, opfvf_check)
2154 GEN_OPFVF_TRANS(vfmax_vf, opfvf_check)
2156 /* Vector Floating-Point Sign-Injection Instructions */
2157 GEN_OPFVV_TRANS(vfsgnj_vv, opfvv_check)
2158 GEN_OPFVV_TRANS(vfsgnjn_vv, opfvv_check)
2159 GEN_OPFVV_TRANS(vfsgnjx_vv, opfvv_check)
2160 GEN_OPFVF_TRANS(vfsgnj_vf, opfvf_check)
2161 GEN_OPFVF_TRANS(vfsgnjn_vf, opfvf_check)
2162 GEN_OPFVF_TRANS(vfsgnjx_vf, opfvf_check)
2164 /* Vector Floating-Point Compare Instructions */
2165 static bool opfvv_cmp_check(DisasContext *s, arg_rmrr *a)
2167     return (vext_check_isa_ill(s) &&
2168             vext_check_reg(s, a->rs2, false) &&
2169             vext_check_reg(s, a->rs1, false) &&
2170             (s->sew != 0) &&
2171             ((vext_check_overlap_group(a->rd, 1, a->rs1, 1 << s->lmul) &&
2172               vext_check_overlap_group(a->rd, 1, a->rs2, 1 << s->lmul)) ||
2173              (s->lmul == 0)));
2176 GEN_OPFVV_TRANS(vmfeq_vv, opfvv_cmp_check)
2177 GEN_OPFVV_TRANS(vmfne_vv, opfvv_cmp_check)
2178 GEN_OPFVV_TRANS(vmflt_vv, opfvv_cmp_check)
2179 GEN_OPFVV_TRANS(vmfle_vv, opfvv_cmp_check)
2180 GEN_OPFVV_TRANS(vmford_vv, opfvv_cmp_check)
2182 static bool opfvf_cmp_check(DisasContext *s, arg_rmrr *a)
2184     return (vext_check_isa_ill(s) &&
2185             vext_check_reg(s, a->rs2, false) &&
2186             (s->sew != 0) &&
2187             (vext_check_overlap_group(a->rd, 1, a->rs2, 1 << s->lmul) ||
2188              (s->lmul == 0)));
2191 GEN_OPFVF_TRANS(vmfeq_vf, opfvf_cmp_check)
2192 GEN_OPFVF_TRANS(vmfne_vf, opfvf_cmp_check)
2193 GEN_OPFVF_TRANS(vmflt_vf, opfvf_cmp_check)
2194 GEN_OPFVF_TRANS(vmfle_vf, opfvf_cmp_check)
2195 GEN_OPFVF_TRANS(vmfgt_vf, opfvf_cmp_check)
2196 GEN_OPFVF_TRANS(vmfge_vf, opfvf_cmp_check)
2197 GEN_OPFVF_TRANS(vmford_vf, opfvf_cmp_check)
2199 /* Vector Floating-Point Classify Instruction */
2200 GEN_OPFV_TRANS(vfclass_v, opfv_check)
2202 /* Vector Floating-Point Merge Instruction */
2203 GEN_OPFVF_TRANS(vfmerge_vfm,  opfvf_check)
2205 static bool trans_vfmv_v_f(DisasContext *s, arg_vfmv_v_f *a)
2207     if (vext_check_isa_ill(s) &&
2208         vext_check_reg(s, a->rd, false) &&
2209         (s->sew != 0)) {
2211         if (s->vl_eq_vlmax) {
2212             tcg_gen_gvec_dup_i64(s->sew, vreg_ofs(s, a->rd),
2213                                  MAXSZ(s), MAXSZ(s), cpu_fpr[a->rs1]);
2214         } else {
2215             TCGv_ptr dest;
2216             TCGv_i32 desc;
2217             uint32_t data = FIELD_DP32(0, VDATA, LMUL, s->lmul);
2218             static gen_helper_vmv_vx * const fns[3] = {
2219                 gen_helper_vmv_v_x_h,
2220                 gen_helper_vmv_v_x_w,
2221                 gen_helper_vmv_v_x_d,
2222             };
2223             TCGLabel *over = gen_new_label();
2224             tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
2226             dest = tcg_temp_new_ptr();
2227             desc = tcg_const_i32(simd_desc(0, s->vlen / 8, data));
2228             tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, a->rd));
2229             fns[s->sew - 1](dest, cpu_fpr[a->rs1], cpu_env, desc);
2231             tcg_temp_free_ptr(dest);
2232             tcg_temp_free_i32(desc);
2233             gen_set_label(over);
2234         }
2235         return true;
2236     }
2237     return false;
2240 /* Single-Width Floating-Point/Integer Type-Convert Instructions */
2241 GEN_OPFV_TRANS(vfcvt_xu_f_v, opfv_check)
2242 GEN_OPFV_TRANS(vfcvt_x_f_v, opfv_check)
2243 GEN_OPFV_TRANS(vfcvt_f_xu_v, opfv_check)
2244 GEN_OPFV_TRANS(vfcvt_f_x_v, opfv_check)
2246 /* Widening Floating-Point/Integer Type-Convert Instructions */
2249  * If the current SEW does not correspond to a supported IEEE floating-point
2250  * type, an illegal instruction exception is raised
2251  */
2252 static bool opfv_widen_check(DisasContext *s, arg_rmr *a)
2254     return (vext_check_isa_ill(s) &&
2255             vext_check_overlap_mask(s, a->rd, a->vm, true) &&
2256             vext_check_reg(s, a->rd, true) &&
2257             vext_check_reg(s, a->rs2, false) &&
2258             vext_check_overlap_group(a->rd, 2 << s->lmul, a->rs2,
2259                                      1 << s->lmul) &&
2260             (s->lmul < 0x3) && (s->sew < 0x3) && (s->sew != 0));
2263 #define GEN_OPFV_WIDEN_TRANS(NAME)                                 \
2264 static bool trans_##NAME(DisasContext *s, arg_rmr *a)              \
2265 {                                                                  \
2266     if (opfv_widen_check(s, a)) {                                  \
2267         uint32_t data = 0;                                         \
2268         static gen_helper_gvec_3_ptr * const fns[2] = {            \
2269             gen_helper_##NAME##_h,                                 \
2270             gen_helper_##NAME##_w,                                 \
2271         };                                                         \
2272         TCGLabel *over = gen_new_label();                          \
2273         gen_set_rm(s, 7);                                          \
2274         tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);          \
2275                                                                    \
2276         data = FIELD_DP32(data, VDATA, MLEN, s->mlen);             \
2277         data = FIELD_DP32(data, VDATA, VM, a->vm);                 \
2278         data = FIELD_DP32(data, VDATA, LMUL, s->lmul);             \
2279         tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),     \
2280                            vreg_ofs(s, a->rs2), cpu_env, 0,        \
2281                            s->vlen / 8, data, fns[s->sew - 1]);    \
2282         gen_set_label(over);                                       \
2283         return true;                                               \
2284     }                                                              \
2285     return false;                                                  \
2288 GEN_OPFV_WIDEN_TRANS(vfwcvt_xu_f_v)
2289 GEN_OPFV_WIDEN_TRANS(vfwcvt_x_f_v)
2290 GEN_OPFV_WIDEN_TRANS(vfwcvt_f_xu_v)
2291 GEN_OPFV_WIDEN_TRANS(vfwcvt_f_x_v)
2292 GEN_OPFV_WIDEN_TRANS(vfwcvt_f_f_v)
2294 /* Narrowing Floating-Point/Integer Type-Convert Instructions */
2297  * If the current SEW does not correspond to a supported IEEE floating-point
2298  * type, an illegal instruction exception is raised
2299  */
2300 static bool opfv_narrow_check(DisasContext *s, arg_rmr *a)
2302     return (vext_check_isa_ill(s) &&
2303             vext_check_overlap_mask(s, a->rd, a->vm, false) &&
2304             vext_check_reg(s, a->rd, false) &&
2305             vext_check_reg(s, a->rs2, true) &&
2306             vext_check_overlap_group(a->rd, 1 << s->lmul, a->rs2,
2307                                      2 << s->lmul) &&
2308             (s->lmul < 0x3) && (s->sew < 0x3) && (s->sew != 0));
2311 #define GEN_OPFV_NARROW_TRANS(NAME)                                \
2312 static bool trans_##NAME(DisasContext *s, arg_rmr *a)              \
2313 {                                                                  \
2314     if (opfv_narrow_check(s, a)) {                                 \
2315         uint32_t data = 0;                                         \
2316         static gen_helper_gvec_3_ptr * const fns[2] = {            \
2317             gen_helper_##NAME##_h,                                 \
2318             gen_helper_##NAME##_w,                                 \
2319         };                                                         \
2320         TCGLabel *over = gen_new_label();                          \
2321         gen_set_rm(s, 7);                                          \
2322         tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);          \
2323                                                                    \
2324         data = FIELD_DP32(data, VDATA, MLEN, s->mlen);             \
2325         data = FIELD_DP32(data, VDATA, VM, a->vm);                 \
2326         data = FIELD_DP32(data, VDATA, LMUL, s->lmul);             \
2327         tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),     \
2328                            vreg_ofs(s, a->rs2), cpu_env, 0,        \
2329                            s->vlen / 8, data, fns[s->sew - 1]);    \
2330         gen_set_label(over);                                       \
2331         return true;                                               \
2332     }                                                              \
2333     return false;                                                  \
2336 GEN_OPFV_NARROW_TRANS(vfncvt_xu_f_v)
2337 GEN_OPFV_NARROW_TRANS(vfncvt_x_f_v)
2338 GEN_OPFV_NARROW_TRANS(vfncvt_f_xu_v)
2339 GEN_OPFV_NARROW_TRANS(vfncvt_f_x_v)
2340 GEN_OPFV_NARROW_TRANS(vfncvt_f_f_v)
2343  *** Vector Reduction Operations
2344  */
2345 /* Vector Single-Width Integer Reduction Instructions */
2346 static bool reduction_check(DisasContext *s, arg_rmrr *a)
2348     return vext_check_isa_ill(s) && vext_check_reg(s, a->rs2, false);
2351 GEN_OPIVV_TRANS(vredsum_vs, reduction_check)
2352 GEN_OPIVV_TRANS(vredmaxu_vs, reduction_check)
2353 GEN_OPIVV_TRANS(vredmax_vs, reduction_check)
2354 GEN_OPIVV_TRANS(vredminu_vs, reduction_check)
2355 GEN_OPIVV_TRANS(vredmin_vs, reduction_check)
2356 GEN_OPIVV_TRANS(vredand_vs, reduction_check)
2357 GEN_OPIVV_TRANS(vredor_vs, reduction_check)
2358 GEN_OPIVV_TRANS(vredxor_vs, reduction_check)
2360 /* Vector Widening Integer Reduction Instructions */
2361 GEN_OPIVV_WIDEN_TRANS(vwredsum_vs, reduction_check)
2362 GEN_OPIVV_WIDEN_TRANS(vwredsumu_vs, reduction_check)
2364 /* Vector Single-Width Floating-Point Reduction Instructions */
2365 GEN_OPFVV_TRANS(vfredsum_vs, reduction_check)
2366 GEN_OPFVV_TRANS(vfredmax_vs, reduction_check)
2367 GEN_OPFVV_TRANS(vfredmin_vs, reduction_check)
2369 /* Vector Widening Floating-Point Reduction Instructions */
2370 GEN_OPFVV_WIDEN_TRANS(vfwredsum_vs, reduction_check)
2373  *** Vector Mask Operations
2374  */
2376 /* Vector Mask-Register Logical Instructions */
2377 #define GEN_MM_TRANS(NAME)                                         \
2378 static bool trans_##NAME(DisasContext *s, arg_r *a)                \
2379 {                                                                  \
2380     if (vext_check_isa_ill(s)) {                                   \
2381         uint32_t data = 0;                                         \
2382         gen_helper_gvec_4_ptr *fn = gen_helper_##NAME;             \
2383         TCGLabel *over = gen_new_label();                          \
2384         tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);          \
2385                                                                    \
2386         data = FIELD_DP32(data, VDATA, MLEN, s->mlen);             \
2387         data = FIELD_DP32(data, VDATA, LMUL, s->lmul);             \
2388         tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),     \
2389                            vreg_ofs(s, a->rs1),                    \
2390                            vreg_ofs(s, a->rs2), cpu_env, 0,        \
2391                            s->vlen / 8, data, fn);                 \
2392         gen_set_label(over);                                       \
2393         return true;                                               \
2394     }                                                              \
2395     return false;                                                  \
2398 GEN_MM_TRANS(vmand_mm)
2399 GEN_MM_TRANS(vmnand_mm)
2400 GEN_MM_TRANS(vmandnot_mm)
2401 GEN_MM_TRANS(vmxor_mm)
2402 GEN_MM_TRANS(vmor_mm)
2403 GEN_MM_TRANS(vmnor_mm)
2404 GEN_MM_TRANS(vmornot_mm)
2405 GEN_MM_TRANS(vmxnor_mm)
2407 /* Vector mask population count vmpopc */
2408 static bool trans_vmpopc_m(DisasContext *s, arg_rmr *a)
2410     if (vext_check_isa_ill(s)) {
2411         TCGv_ptr src2, mask;
2412         TCGv dst;
2413         TCGv_i32 desc;
2414         uint32_t data = 0;
2415         data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
2416         data = FIELD_DP32(data, VDATA, VM, a->vm);
2417         data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
2419         mask = tcg_temp_new_ptr();
2420         src2 = tcg_temp_new_ptr();
2421         dst = tcg_temp_new();
2422         desc = tcg_const_i32(simd_desc(0, s->vlen / 8, data));
2424         tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, a->rs2));
2425         tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
2427         gen_helper_vmpopc_m(dst, mask, src2, cpu_env, desc);
2428         gen_set_gpr(a->rd, dst);
2430         tcg_temp_free_ptr(mask);
2431         tcg_temp_free_ptr(src2);
2432         tcg_temp_free(dst);
2433         tcg_temp_free_i32(desc);
2434         return true;
2435     }
2436     return false;
2439 /* vmfirst find-first-set mask bit */
2440 static bool trans_vmfirst_m(DisasContext *s, arg_rmr *a)
2442     if (vext_check_isa_ill(s)) {
2443         TCGv_ptr src2, mask;
2444         TCGv dst;
2445         TCGv_i32 desc;
2446         uint32_t data = 0;
2447         data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
2448         data = FIELD_DP32(data, VDATA, VM, a->vm);
2449         data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
2451         mask = tcg_temp_new_ptr();
2452         src2 = tcg_temp_new_ptr();
2453         dst = tcg_temp_new();
2454         desc = tcg_const_i32(simd_desc(0, s->vlen / 8, data));
2456         tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, a->rs2));
2457         tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0));
2459         gen_helper_vmfirst_m(dst, mask, src2, cpu_env, desc);
2460         gen_set_gpr(a->rd, dst);
2462         tcg_temp_free_ptr(mask);
2463         tcg_temp_free_ptr(src2);
2464         tcg_temp_free(dst);
2465         tcg_temp_free_i32(desc);
2466         return true;
2467     }
2468     return false;
2471 /* vmsbf.m set-before-first mask bit */
2472 /* vmsif.m set-includ-first mask bit */
2473 /* vmsof.m set-only-first mask bit */
2474 #define GEN_M_TRANS(NAME)                                          \
2475 static bool trans_##NAME(DisasContext *s, arg_rmr *a)              \
2476 {                                                                  \
2477     if (vext_check_isa_ill(s)) {                                   \
2478         uint32_t data = 0;                                         \
2479         gen_helper_gvec_3_ptr *fn = gen_helper_##NAME;             \
2480         TCGLabel *over = gen_new_label();                          \
2481         tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);          \
2482                                                                    \
2483         data = FIELD_DP32(data, VDATA, MLEN, s->mlen);             \
2484         data = FIELD_DP32(data, VDATA, VM, a->vm);                 \
2485         data = FIELD_DP32(data, VDATA, LMUL, s->lmul);             \
2486         tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd),                     \
2487                            vreg_ofs(s, 0), vreg_ofs(s, a->rs2),    \
2488                            cpu_env, 0, s->vlen / 8, data, fn);     \
2489         gen_set_label(over);                                       \
2490         return true;                                               \
2491     }                                                              \
2492     return false;                                                  \
2495 GEN_M_TRANS(vmsbf_m)
2496 GEN_M_TRANS(vmsif_m)
2497 GEN_M_TRANS(vmsof_m)
2499 /* Vector Iota Instruction */
2500 static bool trans_viota_m(DisasContext *s, arg_viota_m *a)
2502     if (vext_check_isa_ill(s) &&
2503         vext_check_reg(s, a->rd, false) &&
2504         vext_check_overlap_group(a->rd, 1 << s->lmul, a->rs2, 1) &&
2505         (a->vm != 0 || a->rd != 0)) {
2506         uint32_t data = 0;
2507         TCGLabel *over = gen_new_label();
2508         tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
2510         data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
2511         data = FIELD_DP32(data, VDATA, VM, a->vm);
2512         data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
2513         static gen_helper_gvec_3_ptr * const fns[4] = {
2514             gen_helper_viota_m_b, gen_helper_viota_m_h,
2515             gen_helper_viota_m_w, gen_helper_viota_m_d,
2516         };
2517         tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
2518                            vreg_ofs(s, a->rs2), cpu_env, 0,
2519                            s->vlen / 8, data, fns[s->sew]);
2520         gen_set_label(over);
2521         return true;
2522     }
2523     return false;
2526 /* Vector Element Index Instruction */
2527 static bool trans_vid_v(DisasContext *s, arg_vid_v *a)
2529     if (vext_check_isa_ill(s) &&
2530         vext_check_reg(s, a->rd, false) &&
2531         vext_check_overlap_mask(s, a->rd, a->vm, false)) {
2532         uint32_t data = 0;
2533         TCGLabel *over = gen_new_label();
2534         tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
2536         data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
2537         data = FIELD_DP32(data, VDATA, VM, a->vm);
2538         data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
2539         static gen_helper_gvec_2_ptr * const fns[4] = {
2540             gen_helper_vid_v_b, gen_helper_vid_v_h,
2541             gen_helper_vid_v_w, gen_helper_vid_v_d,
2542         };
2543         tcg_gen_gvec_2_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
2544                            cpu_env, 0, s->vlen / 8, data, fns[s->sew]);
2545         gen_set_label(over);
2546         return true;
2547     }
2548     return false;
2552  *** Vector Permutation Instructions
2553  */
2555 /* Integer Extract Instruction */
2557 static void load_element(TCGv_i64 dest, TCGv_ptr base,
2558                          int ofs, int sew)
2560     switch (sew) {
2561     case MO_8:
2562         tcg_gen_ld8u_i64(dest, base, ofs);
2563         break;
2564     case MO_16:
2565         tcg_gen_ld16u_i64(dest, base, ofs);
2566         break;
2567     case MO_32:
2568         tcg_gen_ld32u_i64(dest, base, ofs);
2569         break;
2570     case MO_64:
2571         tcg_gen_ld_i64(dest, base, ofs);
2572         break;
2573     default:
2574         g_assert_not_reached();
2575         break;
2576     }
2579 /* offset of the idx element with base regsiter r */
2580 static uint32_t endian_ofs(DisasContext *s, int r, int idx)
2582 #ifdef HOST_WORDS_BIGENDIAN
2583     return vreg_ofs(s, r) + ((idx ^ (7 >> s->sew)) << s->sew);
2584 #else
2585     return vreg_ofs(s, r) + (idx << s->sew);
2586 #endif
2589 /* adjust the index according to the endian */
2590 static void endian_adjust(TCGv_i32 ofs, int sew)
2592 #ifdef HOST_WORDS_BIGENDIAN
2593     tcg_gen_xori_i32(ofs, ofs, 7 >> sew);
2594 #endif
2597 /* Load idx >= VLMAX ? 0 : vreg[idx] */
2598 static void vec_element_loadx(DisasContext *s, TCGv_i64 dest,
2599                               int vreg, TCGv idx, int vlmax)
2601     TCGv_i32 ofs = tcg_temp_new_i32();
2602     TCGv_ptr base = tcg_temp_new_ptr();
2603     TCGv_i64 t_idx = tcg_temp_new_i64();
2604     TCGv_i64 t_vlmax, t_zero;
2606     /*
2607      * Mask the index to the length so that we do
2608      * not produce an out-of-range load.
2609      */
2610     tcg_gen_trunc_tl_i32(ofs, idx);
2611     tcg_gen_andi_i32(ofs, ofs, vlmax - 1);
2613     /* Convert the index to an offset. */
2614     endian_adjust(ofs, s->sew);
2615     tcg_gen_shli_i32(ofs, ofs, s->sew);
2617     /* Convert the index to a pointer. */
2618     tcg_gen_ext_i32_ptr(base, ofs);
2619     tcg_gen_add_ptr(base, base, cpu_env);
2621     /* Perform the load. */
2622     load_element(dest, base,
2623                  vreg_ofs(s, vreg), s->sew);
2624     tcg_temp_free_ptr(base);
2625     tcg_temp_free_i32(ofs);
2627     /* Flush out-of-range indexing to zero.  */
2628     t_vlmax = tcg_const_i64(vlmax);
2629     t_zero = tcg_const_i64(0);
2630     tcg_gen_extu_tl_i64(t_idx, idx);
2632     tcg_gen_movcond_i64(TCG_COND_LTU, dest, t_idx,
2633                         t_vlmax, dest, t_zero);
2635     tcg_temp_free_i64(t_vlmax);
2636     tcg_temp_free_i64(t_zero);
2637     tcg_temp_free_i64(t_idx);
2640 static void vec_element_loadi(DisasContext *s, TCGv_i64 dest,
2641                               int vreg, int idx)
2643     load_element(dest, cpu_env, endian_ofs(s, vreg, idx), s->sew);
2646 static bool trans_vext_x_v(DisasContext *s, arg_r *a)
2648     TCGv_i64 tmp = tcg_temp_new_i64();
2649     TCGv dest = tcg_temp_new();
2651     if (a->rs1 == 0) {
2652         /* Special case vmv.x.s rd, vs2. */
2653         vec_element_loadi(s, tmp, a->rs2, 0);
2654     } else {
2655         /* This instruction ignores LMUL and vector register groups */
2656         int vlmax = s->vlen >> (3 + s->sew);
2657         vec_element_loadx(s, tmp, a->rs2, cpu_gpr[a->rs1], vlmax);
2658     }
2659     tcg_gen_trunc_i64_tl(dest, tmp);
2660     gen_set_gpr(a->rd, dest);
2662     tcg_temp_free(dest);
2663     tcg_temp_free_i64(tmp);
2664     return true;
2667 /* Integer Scalar Move Instruction */
2669 static void store_element(TCGv_i64 val, TCGv_ptr base,
2670                           int ofs, int sew)
2672     switch (sew) {
2673     case MO_8:
2674         tcg_gen_st8_i64(val, base, ofs);
2675         break;
2676     case MO_16:
2677         tcg_gen_st16_i64(val, base, ofs);
2678         break;
2679     case MO_32:
2680         tcg_gen_st32_i64(val, base, ofs);
2681         break;
2682     case MO_64:
2683         tcg_gen_st_i64(val, base, ofs);
2684         break;
2685     default:
2686         g_assert_not_reached();
2687         break;
2688     }
2692  * Store vreg[idx] = val.
2693  * The index must be in range of VLMAX.
2694  */
2695 static void vec_element_storei(DisasContext *s, int vreg,
2696                                int idx, TCGv_i64 val)
2698     store_element(val, cpu_env, endian_ofs(s, vreg, idx), s->sew);
2701 /* vmv.s.x vd, rs1 # vd[0] = rs1 */
2702 static bool trans_vmv_s_x(DisasContext *s, arg_vmv_s_x *a)
2704     if (vext_check_isa_ill(s)) {
2705         /* This instruction ignores LMUL and vector register groups */
2706         int maxsz = s->vlen >> 3;
2707         TCGv_i64 t1;
2708         TCGLabel *over = gen_new_label();
2710         tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
2711         tcg_gen_gvec_dup_imm(SEW64, vreg_ofs(s, a->rd), maxsz, maxsz, 0);
2712         if (a->rs1 == 0) {
2713             goto done;
2714         }
2716         t1 = tcg_temp_new_i64();
2717         tcg_gen_extu_tl_i64(t1, cpu_gpr[a->rs1]);
2718         vec_element_storei(s, a->rd, 0, t1);
2719         tcg_temp_free_i64(t1);
2720     done:
2721         gen_set_label(over);
2722         return true;
2723     }
2724     return false;
2727 /* Floating-Point Scalar Move Instructions */
2728 static bool trans_vfmv_f_s(DisasContext *s, arg_vfmv_f_s *a)
2730     if (!s->vill && has_ext(s, RVF) &&
2731         (s->mstatus_fs != 0) && (s->sew != 0)) {
2732         unsigned int len = 8 << s->sew;
2734         vec_element_loadi(s, cpu_fpr[a->rd], a->rs2, 0);
2735         if (len < 64) {
2736             tcg_gen_ori_i64(cpu_fpr[a->rd], cpu_fpr[a->rd],
2737                             MAKE_64BIT_MASK(len, 64 - len));
2738         }
2740         mark_fs_dirty(s);
2741         return true;
2742     }
2743     return false;
2746 /* vfmv.s.f vd, rs1 # vd[0] = rs1 (vs2=0) */
2747 static bool trans_vfmv_s_f(DisasContext *s, arg_vfmv_s_f *a)
2749     if (!s->vill && has_ext(s, RVF) && (s->sew != 0)) {
2750         TCGv_i64 t1;
2751         /* The instructions ignore LMUL and vector register group. */
2752         uint32_t vlmax = s->vlen >> 3;
2754         /* if vl == 0, skip vector register write back */
2755         TCGLabel *over = gen_new_label();
2756         tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
2758         /* zeroed all elements */
2759         tcg_gen_gvec_dup_imm(SEW64, vreg_ofs(s, a->rd), vlmax, vlmax, 0);
2761         /* NaN-box f[rs1] as necessary for SEW */
2762         t1 = tcg_temp_new_i64();
2763         if (s->sew == MO_64 && !has_ext(s, RVD)) {
2764             tcg_gen_ori_i64(t1, cpu_fpr[a->rs1], MAKE_64BIT_MASK(32, 32));
2765         } else {
2766             tcg_gen_mov_i64(t1, cpu_fpr[a->rs1]);
2767         }
2768         vec_element_storei(s, a->rd, 0, t1);
2769         tcg_temp_free_i64(t1);
2770         gen_set_label(over);
2771         return true;
2772     }
2773     return false;
2776 /* Vector Slide Instructions */
2777 static bool slideup_check(DisasContext *s, arg_rmrr *a)
2779     return (vext_check_isa_ill(s) &&
2780             vext_check_overlap_mask(s, a->rd, a->vm, true) &&
2781             vext_check_reg(s, a->rd, false) &&
2782             vext_check_reg(s, a->rs2, false) &&
2783             (a->rd != a->rs2));
2786 GEN_OPIVX_TRANS(vslideup_vx, slideup_check)
2787 GEN_OPIVX_TRANS(vslide1up_vx, slideup_check)
2788 GEN_OPIVI_TRANS(vslideup_vi, 1, vslideup_vx, slideup_check)
2790 GEN_OPIVX_TRANS(vslidedown_vx, opivx_check)
2791 GEN_OPIVX_TRANS(vslide1down_vx, opivx_check)
2792 GEN_OPIVI_TRANS(vslidedown_vi, 1, vslidedown_vx, opivx_check)
2794 /* Vector Register Gather Instruction */
2795 static bool vrgather_vv_check(DisasContext *s, arg_rmrr *a)
2797     return (vext_check_isa_ill(s) &&
2798             vext_check_overlap_mask(s, a->rd, a->vm, true) &&
2799             vext_check_reg(s, a->rd, false) &&
2800             vext_check_reg(s, a->rs1, false) &&
2801             vext_check_reg(s, a->rs2, false) &&
2802             (a->rd != a->rs2) && (a->rd != a->rs1));
2805 GEN_OPIVV_TRANS(vrgather_vv, vrgather_vv_check)
2807 static bool vrgather_vx_check(DisasContext *s, arg_rmrr *a)
2809     return (vext_check_isa_ill(s) &&
2810             vext_check_overlap_mask(s, a->rd, a->vm, true) &&
2811             vext_check_reg(s, a->rd, false) &&
2812             vext_check_reg(s, a->rs2, false) &&
2813             (a->rd != a->rs2));
2816 /* vrgather.vx vd, vs2, rs1, vm # vd[i] = (x[rs1] >= VLMAX) ? 0 : vs2[rs1] */
2817 static bool trans_vrgather_vx(DisasContext *s, arg_rmrr *a)
2819     if (!vrgather_vx_check(s, a)) {
2820         return false;
2821     }
2823     if (a->vm && s->vl_eq_vlmax) {
2824         int vlmax = s->vlen / s->mlen;
2825         TCGv_i64 dest = tcg_temp_new_i64();
2827         if (a->rs1 == 0) {
2828             vec_element_loadi(s, dest, a->rs2, 0);
2829         } else {
2830             vec_element_loadx(s, dest, a->rs2, cpu_gpr[a->rs1], vlmax);
2831         }
2833         tcg_gen_gvec_dup_i64(s->sew, vreg_ofs(s, a->rd),
2834                              MAXSZ(s), MAXSZ(s), dest);
2835         tcg_temp_free_i64(dest);
2836     } else {
2837         static gen_helper_opivx * const fns[4] = {
2838             gen_helper_vrgather_vx_b, gen_helper_vrgather_vx_h,
2839             gen_helper_vrgather_vx_w, gen_helper_vrgather_vx_d
2840         };
2841         return opivx_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s);
2842     }
2843     return true;
2846 /* vrgather.vi vd, vs2, imm, vm # vd[i] = (imm >= VLMAX) ? 0 : vs2[imm] */
2847 static bool trans_vrgather_vi(DisasContext *s, arg_rmrr *a)
2849     if (!vrgather_vx_check(s, a)) {
2850         return false;
2851     }
2853     if (a->vm && s->vl_eq_vlmax) {
2854         if (a->rs1 >= s->vlen / s->mlen) {
2855             tcg_gen_gvec_dup_imm(SEW64, vreg_ofs(s, a->rd),
2856                                  MAXSZ(s), MAXSZ(s), 0);
2857         } else {
2858             tcg_gen_gvec_dup_mem(s->sew, vreg_ofs(s, a->rd),
2859                                  endian_ofs(s, a->rs2, a->rs1),
2860                                  MAXSZ(s), MAXSZ(s));
2861         }
2862     } else {
2863         static gen_helper_opivx * const fns[4] = {
2864             gen_helper_vrgather_vx_b, gen_helper_vrgather_vx_h,
2865             gen_helper_vrgather_vx_w, gen_helper_vrgather_vx_d
2866         };
2867         return opivi_trans(a->rd, a->rs1, a->rs2, a->vm, fns[s->sew], s, 1);
2868     }
2869     return true;
2872 /* Vector Compress Instruction */
2873 static bool vcompress_vm_check(DisasContext *s, arg_r *a)
2875     return (vext_check_isa_ill(s) &&
2876             vext_check_reg(s, a->rd, false) &&
2877             vext_check_reg(s, a->rs2, false) &&
2878             vext_check_overlap_group(a->rd, 1 << s->lmul, a->rs1, 1) &&
2879             (a->rd != a->rs2));
2882 static bool trans_vcompress_vm(DisasContext *s, arg_r *a)
2884     if (vcompress_vm_check(s, a)) {
2885         uint32_t data = 0;
2886         static gen_helper_gvec_4_ptr * const fns[4] = {
2887             gen_helper_vcompress_vm_b, gen_helper_vcompress_vm_h,
2888             gen_helper_vcompress_vm_w, gen_helper_vcompress_vm_d,
2889         };
2890         TCGLabel *over = gen_new_label();
2891         tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
2893         data = FIELD_DP32(data, VDATA, MLEN, s->mlen);
2894         data = FIELD_DP32(data, VDATA, LMUL, s->lmul);
2895         tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0),
2896                            vreg_ofs(s, a->rs1), vreg_ofs(s, a->rs2),
2897                            cpu_env, 0, s->vlen / 8, data, fns[s->sew]);
2898         gen_set_label(over);
2899         return true;
2900     }
2901     return false;