2 * RISC-V translation routines for the RVV Standard Extension.
4 * Copyright (c) 2020 T-Head Semiconductor Co., Ltd. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2 or later, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
18 #include "tcg/tcg-op-gvec.h"
19 #include "tcg/tcg-gvec-desc.h"
20 #include "internals.h"
22 static bool trans_vsetvl(DisasContext
*ctx
, arg_vsetvl
*a
)
26 if (!has_ext(ctx
, RVV
)) {
33 /* Using x0 as the rs1 register specifier, encodes an infinite AVL */
35 /* As the mask is at least one bit, RV_VLEN_MAX is >= VLMAX */
36 s1
= tcg_const_tl(RV_VLEN_MAX
);
39 gen_get_gpr(s1
, a
->rs1
);
41 gen_get_gpr(s2
, a
->rs2
);
42 gen_helper_vsetvl(dst
, cpu_env
, s1
, s2
);
43 gen_set_gpr(a
->rd
, dst
);
44 tcg_gen_movi_tl(cpu_pc
, ctx
->pc_succ_insn
);
45 lookup_and_goto_ptr(ctx
);
46 ctx
->base
.is_jmp
= DISAS_NORETURN
;
54 static bool trans_vsetvli(DisasContext
*ctx
, arg_vsetvli
*a
)
58 if (!has_ext(ctx
, RVV
)) {
62 s2
= tcg_const_tl(a
->zimm
);
65 /* Using x0 as the rs1 register specifier, encodes an infinite AVL */
67 /* As the mask is at least one bit, RV_VLEN_MAX is >= VLMAX */
68 s1
= tcg_const_tl(RV_VLEN_MAX
);
71 gen_get_gpr(s1
, a
->rs1
);
73 gen_helper_vsetvl(dst
, cpu_env
, s1
, s2
);
74 gen_set_gpr(a
->rd
, dst
);
75 gen_goto_tb(ctx
, 0, ctx
->pc_succ_insn
);
76 ctx
->base
.is_jmp
= DISAS_NORETURN
;
84 /* vector register offset from env */
85 static uint32_t vreg_ofs(DisasContext
*s
, int reg
)
87 return offsetof(CPURISCVState
, vreg
) + reg
* s
->vlen
/ 8;
93 * In cpu_get_tb_cpu_state(), set VILL if RVV was not present.
94 * So RVV is also be checked in this function.
96 static bool vext_check_isa_ill(DisasContext
*s
)
102 * There are two rules check here.
104 * 1. Vector register numbers are multiples of LMUL. (Section 3.2)
106 * 2. For all widening instructions, the destination LMUL value must also be
107 * a supported LMUL value. (Section 11.2)
109 static bool vext_check_reg(DisasContext
*s
, uint32_t reg
, bool widen
)
112 * The destination vector register group results are arranged as if both
113 * SEW and LMUL were at twice their current settings. (Section 11.2).
115 int legal
= widen
? 2 << s
->lmul
: 1 << s
->lmul
;
117 return !((s
->lmul
== 0x3 && widen
) || (reg
% legal
));
121 * There are two rules check here.
123 * 1. The destination vector register group for a masked vector instruction can
124 * only overlap the source mask register (v0) when LMUL=1. (Section 5.3)
126 * 2. In widen instructions and some other insturctions, like vslideup.vx,
127 * there is no need to check whether LMUL=1.
129 static bool vext_check_overlap_mask(DisasContext
*s
, uint32_t vd
, bool vm
,
132 return (vm
!= 0 || vd
!= 0) || (!force
&& (s
->lmul
== 0));
135 /* The LMUL setting must be such that LMUL * NFIELDS <= 8. (Section 7.8) */
136 static bool vext_check_nf(DisasContext
*s
, uint32_t nf
)
138 return (1 << s
->lmul
) * nf
<= 8;
141 /* common translation macro */
142 #define GEN_VEXT_TRANS(NAME, SEQ, ARGTYPE, OP, CHECK) \
143 static bool trans_##NAME(DisasContext *s, arg_##ARGTYPE *a)\
146 return OP(s, a, SEQ); \
152 *** unit stride load and store
154 typedef void gen_helper_ldst_us(TCGv_ptr
, TCGv_ptr
, TCGv
,
157 static bool ldst_us_trans(uint32_t vd
, uint32_t rs1
, uint32_t data
,
158 gen_helper_ldst_us
*fn
, DisasContext
*s
)
164 TCGLabel
*over
= gen_new_label();
165 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_vl
, 0, over
);
167 dest
= tcg_temp_new_ptr();
168 mask
= tcg_temp_new_ptr();
169 base
= tcg_temp_new();
172 * As simd_desc supports at most 256 bytes, and in this implementation,
173 * the max vector group length is 2048 bytes. So split it into two parts.
175 * The first part is vlen in bytes, encoded in maxsz of simd_desc.
176 * The second part is lmul, encoded in data of simd_desc.
178 desc
= tcg_const_i32(simd_desc(0, s
->vlen
/ 8, data
));
180 gen_get_gpr(base
, rs1
);
181 tcg_gen_addi_ptr(dest
, cpu_env
, vreg_ofs(s
, vd
));
182 tcg_gen_addi_ptr(mask
, cpu_env
, vreg_ofs(s
, 0));
184 fn(dest
, mask
, base
, cpu_env
, desc
);
186 tcg_temp_free_ptr(dest
);
187 tcg_temp_free_ptr(mask
);
189 tcg_temp_free_i32(desc
);
194 static bool ld_us_op(DisasContext
*s
, arg_r2nfvm
*a
, uint8_t seq
)
197 gen_helper_ldst_us
*fn
;
198 static gen_helper_ldst_us
* const fns
[2][7][4] = {
199 /* masked unit stride load */
200 { { gen_helper_vlb_v_b_mask
, gen_helper_vlb_v_h_mask
,
201 gen_helper_vlb_v_w_mask
, gen_helper_vlb_v_d_mask
},
202 { NULL
, gen_helper_vlh_v_h_mask
,
203 gen_helper_vlh_v_w_mask
, gen_helper_vlh_v_d_mask
},
205 gen_helper_vlw_v_w_mask
, gen_helper_vlw_v_d_mask
},
206 { gen_helper_vle_v_b_mask
, gen_helper_vle_v_h_mask
,
207 gen_helper_vle_v_w_mask
, gen_helper_vle_v_d_mask
},
208 { gen_helper_vlbu_v_b_mask
, gen_helper_vlbu_v_h_mask
,
209 gen_helper_vlbu_v_w_mask
, gen_helper_vlbu_v_d_mask
},
210 { NULL
, gen_helper_vlhu_v_h_mask
,
211 gen_helper_vlhu_v_w_mask
, gen_helper_vlhu_v_d_mask
},
213 gen_helper_vlwu_v_w_mask
, gen_helper_vlwu_v_d_mask
} },
214 /* unmasked unit stride load */
215 { { gen_helper_vlb_v_b
, gen_helper_vlb_v_h
,
216 gen_helper_vlb_v_w
, gen_helper_vlb_v_d
},
217 { NULL
, gen_helper_vlh_v_h
,
218 gen_helper_vlh_v_w
, gen_helper_vlh_v_d
},
220 gen_helper_vlw_v_w
, gen_helper_vlw_v_d
},
221 { gen_helper_vle_v_b
, gen_helper_vle_v_h
,
222 gen_helper_vle_v_w
, gen_helper_vle_v_d
},
223 { gen_helper_vlbu_v_b
, gen_helper_vlbu_v_h
,
224 gen_helper_vlbu_v_w
, gen_helper_vlbu_v_d
},
225 { NULL
, gen_helper_vlhu_v_h
,
226 gen_helper_vlhu_v_w
, gen_helper_vlhu_v_d
},
228 gen_helper_vlwu_v_w
, gen_helper_vlwu_v_d
} }
231 fn
= fns
[a
->vm
][seq
][s
->sew
];
236 data
= FIELD_DP32(data
, VDATA
, MLEN
, s
->mlen
);
237 data
= FIELD_DP32(data
, VDATA
, VM
, a
->vm
);
238 data
= FIELD_DP32(data
, VDATA
, LMUL
, s
->lmul
);
239 data
= FIELD_DP32(data
, VDATA
, NF
, a
->nf
);
240 return ldst_us_trans(a
->rd
, a
->rs1
, data
, fn
, s
);
243 static bool ld_us_check(DisasContext
*s
, arg_r2nfvm
* a
)
245 return (vext_check_isa_ill(s
) &&
246 vext_check_overlap_mask(s
, a
->rd
, a
->vm
, false) &&
247 vext_check_reg(s
, a
->rd
, false) &&
248 vext_check_nf(s
, a
->nf
));
251 GEN_VEXT_TRANS(vlb_v
, 0, r2nfvm
, ld_us_op
, ld_us_check
)
252 GEN_VEXT_TRANS(vlh_v
, 1, r2nfvm
, ld_us_op
, ld_us_check
)
253 GEN_VEXT_TRANS(vlw_v
, 2, r2nfvm
, ld_us_op
, ld_us_check
)
254 GEN_VEXT_TRANS(vle_v
, 3, r2nfvm
, ld_us_op
, ld_us_check
)
255 GEN_VEXT_TRANS(vlbu_v
, 4, r2nfvm
, ld_us_op
, ld_us_check
)
256 GEN_VEXT_TRANS(vlhu_v
, 5, r2nfvm
, ld_us_op
, ld_us_check
)
257 GEN_VEXT_TRANS(vlwu_v
, 6, r2nfvm
, ld_us_op
, ld_us_check
)
259 static bool st_us_op(DisasContext
*s
, arg_r2nfvm
*a
, uint8_t seq
)
262 gen_helper_ldst_us
*fn
;
263 static gen_helper_ldst_us
* const fns
[2][4][4] = {
264 /* masked unit stride load and store */
265 { { gen_helper_vsb_v_b_mask
, gen_helper_vsb_v_h_mask
,
266 gen_helper_vsb_v_w_mask
, gen_helper_vsb_v_d_mask
},
267 { NULL
, gen_helper_vsh_v_h_mask
,
268 gen_helper_vsh_v_w_mask
, gen_helper_vsh_v_d_mask
},
270 gen_helper_vsw_v_w_mask
, gen_helper_vsw_v_d_mask
},
271 { gen_helper_vse_v_b_mask
, gen_helper_vse_v_h_mask
,
272 gen_helper_vse_v_w_mask
, gen_helper_vse_v_d_mask
} },
273 /* unmasked unit stride store */
274 { { gen_helper_vsb_v_b
, gen_helper_vsb_v_h
,
275 gen_helper_vsb_v_w
, gen_helper_vsb_v_d
},
276 { NULL
, gen_helper_vsh_v_h
,
277 gen_helper_vsh_v_w
, gen_helper_vsh_v_d
},
279 gen_helper_vsw_v_w
, gen_helper_vsw_v_d
},
280 { gen_helper_vse_v_b
, gen_helper_vse_v_h
,
281 gen_helper_vse_v_w
, gen_helper_vse_v_d
} }
284 fn
= fns
[a
->vm
][seq
][s
->sew
];
289 data
= FIELD_DP32(data
, VDATA
, MLEN
, s
->mlen
);
290 data
= FIELD_DP32(data
, VDATA
, VM
, a
->vm
);
291 data
= FIELD_DP32(data
, VDATA
, LMUL
, s
->lmul
);
292 data
= FIELD_DP32(data
, VDATA
, NF
, a
->nf
);
293 return ldst_us_trans(a
->rd
, a
->rs1
, data
, fn
, s
);
296 static bool st_us_check(DisasContext
*s
, arg_r2nfvm
* a
)
298 return (vext_check_isa_ill(s
) &&
299 vext_check_reg(s
, a
->rd
, false) &&
300 vext_check_nf(s
, a
->nf
));
303 GEN_VEXT_TRANS(vsb_v
, 0, r2nfvm
, st_us_op
, st_us_check
)
304 GEN_VEXT_TRANS(vsh_v
, 1, r2nfvm
, st_us_op
, st_us_check
)
305 GEN_VEXT_TRANS(vsw_v
, 2, r2nfvm
, st_us_op
, st_us_check
)
306 GEN_VEXT_TRANS(vse_v
, 3, r2nfvm
, st_us_op
, st_us_check
)
309 *** stride load and store
311 typedef void gen_helper_ldst_stride(TCGv_ptr
, TCGv_ptr
, TCGv
,
312 TCGv
, TCGv_env
, TCGv_i32
);
314 static bool ldst_stride_trans(uint32_t vd
, uint32_t rs1
, uint32_t rs2
,
315 uint32_t data
, gen_helper_ldst_stride
*fn
,
322 TCGLabel
*over
= gen_new_label();
323 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_vl
, 0, over
);
325 dest
= tcg_temp_new_ptr();
326 mask
= tcg_temp_new_ptr();
327 base
= tcg_temp_new();
328 stride
= tcg_temp_new();
329 desc
= tcg_const_i32(simd_desc(0, s
->vlen
/ 8, data
));
331 gen_get_gpr(base
, rs1
);
332 gen_get_gpr(stride
, rs2
);
333 tcg_gen_addi_ptr(dest
, cpu_env
, vreg_ofs(s
, vd
));
334 tcg_gen_addi_ptr(mask
, cpu_env
, vreg_ofs(s
, 0));
336 fn(dest
, mask
, base
, stride
, cpu_env
, desc
);
338 tcg_temp_free_ptr(dest
);
339 tcg_temp_free_ptr(mask
);
341 tcg_temp_free(stride
);
342 tcg_temp_free_i32(desc
);
347 static bool ld_stride_op(DisasContext
*s
, arg_rnfvm
*a
, uint8_t seq
)
350 gen_helper_ldst_stride
*fn
;
351 static gen_helper_ldst_stride
* const fns
[7][4] = {
352 { gen_helper_vlsb_v_b
, gen_helper_vlsb_v_h
,
353 gen_helper_vlsb_v_w
, gen_helper_vlsb_v_d
},
354 { NULL
, gen_helper_vlsh_v_h
,
355 gen_helper_vlsh_v_w
, gen_helper_vlsh_v_d
},
357 gen_helper_vlsw_v_w
, gen_helper_vlsw_v_d
},
358 { gen_helper_vlse_v_b
, gen_helper_vlse_v_h
,
359 gen_helper_vlse_v_w
, gen_helper_vlse_v_d
},
360 { gen_helper_vlsbu_v_b
, gen_helper_vlsbu_v_h
,
361 gen_helper_vlsbu_v_w
, gen_helper_vlsbu_v_d
},
362 { NULL
, gen_helper_vlshu_v_h
,
363 gen_helper_vlshu_v_w
, gen_helper_vlshu_v_d
},
365 gen_helper_vlswu_v_w
, gen_helper_vlswu_v_d
},
368 fn
= fns
[seq
][s
->sew
];
373 data
= FIELD_DP32(data
, VDATA
, MLEN
, s
->mlen
);
374 data
= FIELD_DP32(data
, VDATA
, VM
, a
->vm
);
375 data
= FIELD_DP32(data
, VDATA
, LMUL
, s
->lmul
);
376 data
= FIELD_DP32(data
, VDATA
, NF
, a
->nf
);
377 return ldst_stride_trans(a
->rd
, a
->rs1
, a
->rs2
, data
, fn
, s
);
380 static bool ld_stride_check(DisasContext
*s
, arg_rnfvm
* a
)
382 return (vext_check_isa_ill(s
) &&
383 vext_check_overlap_mask(s
, a
->rd
, a
->vm
, false) &&
384 vext_check_reg(s
, a
->rd
, false) &&
385 vext_check_nf(s
, a
->nf
));
388 GEN_VEXT_TRANS(vlsb_v
, 0, rnfvm
, ld_stride_op
, ld_stride_check
)
389 GEN_VEXT_TRANS(vlsh_v
, 1, rnfvm
, ld_stride_op
, ld_stride_check
)
390 GEN_VEXT_TRANS(vlsw_v
, 2, rnfvm
, ld_stride_op
, ld_stride_check
)
391 GEN_VEXT_TRANS(vlse_v
, 3, rnfvm
, ld_stride_op
, ld_stride_check
)
392 GEN_VEXT_TRANS(vlsbu_v
, 4, rnfvm
, ld_stride_op
, ld_stride_check
)
393 GEN_VEXT_TRANS(vlshu_v
, 5, rnfvm
, ld_stride_op
, ld_stride_check
)
394 GEN_VEXT_TRANS(vlswu_v
, 6, rnfvm
, ld_stride_op
, ld_stride_check
)
396 static bool st_stride_op(DisasContext
*s
, arg_rnfvm
*a
, uint8_t seq
)
399 gen_helper_ldst_stride
*fn
;
400 static gen_helper_ldst_stride
* const fns
[4][4] = {
401 /* masked stride store */
402 { gen_helper_vssb_v_b
, gen_helper_vssb_v_h
,
403 gen_helper_vssb_v_w
, gen_helper_vssb_v_d
},
404 { NULL
, gen_helper_vssh_v_h
,
405 gen_helper_vssh_v_w
, gen_helper_vssh_v_d
},
407 gen_helper_vssw_v_w
, gen_helper_vssw_v_d
},
408 { gen_helper_vsse_v_b
, gen_helper_vsse_v_h
,
409 gen_helper_vsse_v_w
, gen_helper_vsse_v_d
}
412 data
= FIELD_DP32(data
, VDATA
, MLEN
, s
->mlen
);
413 data
= FIELD_DP32(data
, VDATA
, VM
, a
->vm
);
414 data
= FIELD_DP32(data
, VDATA
, LMUL
, s
->lmul
);
415 data
= FIELD_DP32(data
, VDATA
, NF
, a
->nf
);
416 fn
= fns
[seq
][s
->sew
];
421 return ldst_stride_trans(a
->rd
, a
->rs1
, a
->rs2
, data
, fn
, s
);
424 static bool st_stride_check(DisasContext
*s
, arg_rnfvm
* a
)
426 return (vext_check_isa_ill(s
) &&
427 vext_check_reg(s
, a
->rd
, false) &&
428 vext_check_nf(s
, a
->nf
));
431 GEN_VEXT_TRANS(vssb_v
, 0, rnfvm
, st_stride_op
, st_stride_check
)
432 GEN_VEXT_TRANS(vssh_v
, 1, rnfvm
, st_stride_op
, st_stride_check
)
433 GEN_VEXT_TRANS(vssw_v
, 2, rnfvm
, st_stride_op
, st_stride_check
)
434 GEN_VEXT_TRANS(vsse_v
, 3, rnfvm
, st_stride_op
, st_stride_check
)
437 *** index load and store
439 typedef void gen_helper_ldst_index(TCGv_ptr
, TCGv_ptr
, TCGv
,
440 TCGv_ptr
, TCGv_env
, TCGv_i32
);
442 static bool ldst_index_trans(uint32_t vd
, uint32_t rs1
, uint32_t vs2
,
443 uint32_t data
, gen_helper_ldst_index
*fn
,
446 TCGv_ptr dest
, mask
, index
;
450 TCGLabel
*over
= gen_new_label();
451 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_vl
, 0, over
);
453 dest
= tcg_temp_new_ptr();
454 mask
= tcg_temp_new_ptr();
455 index
= tcg_temp_new_ptr();
456 base
= tcg_temp_new();
457 desc
= tcg_const_i32(simd_desc(0, s
->vlen
/ 8, data
));
459 gen_get_gpr(base
, rs1
);
460 tcg_gen_addi_ptr(dest
, cpu_env
, vreg_ofs(s
, vd
));
461 tcg_gen_addi_ptr(index
, cpu_env
, vreg_ofs(s
, vs2
));
462 tcg_gen_addi_ptr(mask
, cpu_env
, vreg_ofs(s
, 0));
464 fn(dest
, mask
, base
, index
, cpu_env
, desc
);
466 tcg_temp_free_ptr(dest
);
467 tcg_temp_free_ptr(mask
);
468 tcg_temp_free_ptr(index
);
470 tcg_temp_free_i32(desc
);
475 static bool ld_index_op(DisasContext
*s
, arg_rnfvm
*a
, uint8_t seq
)
478 gen_helper_ldst_index
*fn
;
479 static gen_helper_ldst_index
* const fns
[7][4] = {
480 { gen_helper_vlxb_v_b
, gen_helper_vlxb_v_h
,
481 gen_helper_vlxb_v_w
, gen_helper_vlxb_v_d
},
482 { NULL
, gen_helper_vlxh_v_h
,
483 gen_helper_vlxh_v_w
, gen_helper_vlxh_v_d
},
485 gen_helper_vlxw_v_w
, gen_helper_vlxw_v_d
},
486 { gen_helper_vlxe_v_b
, gen_helper_vlxe_v_h
,
487 gen_helper_vlxe_v_w
, gen_helper_vlxe_v_d
},
488 { gen_helper_vlxbu_v_b
, gen_helper_vlxbu_v_h
,
489 gen_helper_vlxbu_v_w
, gen_helper_vlxbu_v_d
},
490 { NULL
, gen_helper_vlxhu_v_h
,
491 gen_helper_vlxhu_v_w
, gen_helper_vlxhu_v_d
},
493 gen_helper_vlxwu_v_w
, gen_helper_vlxwu_v_d
},
496 fn
= fns
[seq
][s
->sew
];
501 data
= FIELD_DP32(data
, VDATA
, MLEN
, s
->mlen
);
502 data
= FIELD_DP32(data
, VDATA
, VM
, a
->vm
);
503 data
= FIELD_DP32(data
, VDATA
, LMUL
, s
->lmul
);
504 data
= FIELD_DP32(data
, VDATA
, NF
, a
->nf
);
505 return ldst_index_trans(a
->rd
, a
->rs1
, a
->rs2
, data
, fn
, s
);
508 static bool ld_index_check(DisasContext
*s
, arg_rnfvm
* a
)
510 return (vext_check_isa_ill(s
) &&
511 vext_check_overlap_mask(s
, a
->rd
, a
->vm
, false) &&
512 vext_check_reg(s
, a
->rd
, false) &&
513 vext_check_reg(s
, a
->rs2
, false) &&
514 vext_check_nf(s
, a
->nf
));
517 GEN_VEXT_TRANS(vlxb_v
, 0, rnfvm
, ld_index_op
, ld_index_check
)
518 GEN_VEXT_TRANS(vlxh_v
, 1, rnfvm
, ld_index_op
, ld_index_check
)
519 GEN_VEXT_TRANS(vlxw_v
, 2, rnfvm
, ld_index_op
, ld_index_check
)
520 GEN_VEXT_TRANS(vlxe_v
, 3, rnfvm
, ld_index_op
, ld_index_check
)
521 GEN_VEXT_TRANS(vlxbu_v
, 4, rnfvm
, ld_index_op
, ld_index_check
)
522 GEN_VEXT_TRANS(vlxhu_v
, 5, rnfvm
, ld_index_op
, ld_index_check
)
523 GEN_VEXT_TRANS(vlxwu_v
, 6, rnfvm
, ld_index_op
, ld_index_check
)
525 static bool st_index_op(DisasContext
*s
, arg_rnfvm
*a
, uint8_t seq
)
528 gen_helper_ldst_index
*fn
;
529 static gen_helper_ldst_index
* const fns
[4][4] = {
530 { gen_helper_vsxb_v_b
, gen_helper_vsxb_v_h
,
531 gen_helper_vsxb_v_w
, gen_helper_vsxb_v_d
},
532 { NULL
, gen_helper_vsxh_v_h
,
533 gen_helper_vsxh_v_w
, gen_helper_vsxh_v_d
},
535 gen_helper_vsxw_v_w
, gen_helper_vsxw_v_d
},
536 { gen_helper_vsxe_v_b
, gen_helper_vsxe_v_h
,
537 gen_helper_vsxe_v_w
, gen_helper_vsxe_v_d
}
540 fn
= fns
[seq
][s
->sew
];
545 data
= FIELD_DP32(data
, VDATA
, MLEN
, s
->mlen
);
546 data
= FIELD_DP32(data
, VDATA
, VM
, a
->vm
);
547 data
= FIELD_DP32(data
, VDATA
, LMUL
, s
->lmul
);
548 data
= FIELD_DP32(data
, VDATA
, NF
, a
->nf
);
549 return ldst_index_trans(a
->rd
, a
->rs1
, a
->rs2
, data
, fn
, s
);
552 static bool st_index_check(DisasContext
*s
, arg_rnfvm
* a
)
554 return (vext_check_isa_ill(s
) &&
555 vext_check_reg(s
, a
->rd
, false) &&
556 vext_check_reg(s
, a
->rs2
, false) &&
557 vext_check_nf(s
, a
->nf
));
560 GEN_VEXT_TRANS(vsxb_v
, 0, rnfvm
, st_index_op
, st_index_check
)
561 GEN_VEXT_TRANS(vsxh_v
, 1, rnfvm
, st_index_op
, st_index_check
)
562 GEN_VEXT_TRANS(vsxw_v
, 2, rnfvm
, st_index_op
, st_index_check
)
563 GEN_VEXT_TRANS(vsxe_v
, 3, rnfvm
, st_index_op
, st_index_check
)
566 *** unit stride fault-only-first load
568 static bool ldff_trans(uint32_t vd
, uint32_t rs1
, uint32_t data
,
569 gen_helper_ldst_us
*fn
, DisasContext
*s
)
575 TCGLabel
*over
= gen_new_label();
576 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_vl
, 0, over
);
578 dest
= tcg_temp_new_ptr();
579 mask
= tcg_temp_new_ptr();
580 base
= tcg_temp_new();
581 desc
= tcg_const_i32(simd_desc(0, s
->vlen
/ 8, data
));
583 gen_get_gpr(base
, rs1
);
584 tcg_gen_addi_ptr(dest
, cpu_env
, vreg_ofs(s
, vd
));
585 tcg_gen_addi_ptr(mask
, cpu_env
, vreg_ofs(s
, 0));
587 fn(dest
, mask
, base
, cpu_env
, desc
);
589 tcg_temp_free_ptr(dest
);
590 tcg_temp_free_ptr(mask
);
592 tcg_temp_free_i32(desc
);
597 static bool ldff_op(DisasContext
*s
, arg_r2nfvm
*a
, uint8_t seq
)
600 gen_helper_ldst_us
*fn
;
601 static gen_helper_ldst_us
* const fns
[7][4] = {
602 { gen_helper_vlbff_v_b
, gen_helper_vlbff_v_h
,
603 gen_helper_vlbff_v_w
, gen_helper_vlbff_v_d
},
604 { NULL
, gen_helper_vlhff_v_h
,
605 gen_helper_vlhff_v_w
, gen_helper_vlhff_v_d
},
607 gen_helper_vlwff_v_w
, gen_helper_vlwff_v_d
},
608 { gen_helper_vleff_v_b
, gen_helper_vleff_v_h
,
609 gen_helper_vleff_v_w
, gen_helper_vleff_v_d
},
610 { gen_helper_vlbuff_v_b
, gen_helper_vlbuff_v_h
,
611 gen_helper_vlbuff_v_w
, gen_helper_vlbuff_v_d
},
612 { NULL
, gen_helper_vlhuff_v_h
,
613 gen_helper_vlhuff_v_w
, gen_helper_vlhuff_v_d
},
615 gen_helper_vlwuff_v_w
, gen_helper_vlwuff_v_d
}
618 fn
= fns
[seq
][s
->sew
];
623 data
= FIELD_DP32(data
, VDATA
, MLEN
, s
->mlen
);
624 data
= FIELD_DP32(data
, VDATA
, VM
, a
->vm
);
625 data
= FIELD_DP32(data
, VDATA
, LMUL
, s
->lmul
);
626 data
= FIELD_DP32(data
, VDATA
, NF
, a
->nf
);
627 return ldff_trans(a
->rd
, a
->rs1
, data
, fn
, s
);
630 GEN_VEXT_TRANS(vlbff_v
, 0, r2nfvm
, ldff_op
, ld_us_check
)
631 GEN_VEXT_TRANS(vlhff_v
, 1, r2nfvm
, ldff_op
, ld_us_check
)
632 GEN_VEXT_TRANS(vlwff_v
, 2, r2nfvm
, ldff_op
, ld_us_check
)
633 GEN_VEXT_TRANS(vleff_v
, 3, r2nfvm
, ldff_op
, ld_us_check
)
634 GEN_VEXT_TRANS(vlbuff_v
, 4, r2nfvm
, ldff_op
, ld_us_check
)
635 GEN_VEXT_TRANS(vlhuff_v
, 5, r2nfvm
, ldff_op
, ld_us_check
)
636 GEN_VEXT_TRANS(vlwuff_v
, 6, r2nfvm
, ldff_op
, ld_us_check
)
639 *** vector atomic operation
641 typedef void gen_helper_amo(TCGv_ptr
, TCGv_ptr
, TCGv
, TCGv_ptr
,
644 static bool amo_trans(uint32_t vd
, uint32_t rs1
, uint32_t vs2
,
645 uint32_t data
, gen_helper_amo
*fn
, DisasContext
*s
)
647 TCGv_ptr dest
, mask
, index
;
651 TCGLabel
*over
= gen_new_label();
652 tcg_gen_brcondi_tl(TCG_COND_EQ
, cpu_vl
, 0, over
);
654 dest
= tcg_temp_new_ptr();
655 mask
= tcg_temp_new_ptr();
656 index
= tcg_temp_new_ptr();
657 base
= tcg_temp_new();
658 desc
= tcg_const_i32(simd_desc(0, s
->vlen
/ 8, data
));
660 gen_get_gpr(base
, rs1
);
661 tcg_gen_addi_ptr(dest
, cpu_env
, vreg_ofs(s
, vd
));
662 tcg_gen_addi_ptr(index
, cpu_env
, vreg_ofs(s
, vs2
));
663 tcg_gen_addi_ptr(mask
, cpu_env
, vreg_ofs(s
, 0));
665 fn(dest
, mask
, base
, index
, cpu_env
, desc
);
667 tcg_temp_free_ptr(dest
);
668 tcg_temp_free_ptr(mask
);
669 tcg_temp_free_ptr(index
);
671 tcg_temp_free_i32(desc
);
676 static bool amo_op(DisasContext
*s
, arg_rwdvm
*a
, uint8_t seq
)
680 static gen_helper_amo
*const fnsw
[9] = {
681 /* no atomic operation */
682 gen_helper_vamoswapw_v_w
,
683 gen_helper_vamoaddw_v_w
,
684 gen_helper_vamoxorw_v_w
,
685 gen_helper_vamoandw_v_w
,
686 gen_helper_vamoorw_v_w
,
687 gen_helper_vamominw_v_w
,
688 gen_helper_vamomaxw_v_w
,
689 gen_helper_vamominuw_v_w
,
690 gen_helper_vamomaxuw_v_w
692 #ifdef TARGET_RISCV64
693 static gen_helper_amo
*const fnsd
[18] = {
694 gen_helper_vamoswapw_v_d
,
695 gen_helper_vamoaddw_v_d
,
696 gen_helper_vamoxorw_v_d
,
697 gen_helper_vamoandw_v_d
,
698 gen_helper_vamoorw_v_d
,
699 gen_helper_vamominw_v_d
,
700 gen_helper_vamomaxw_v_d
,
701 gen_helper_vamominuw_v_d
,
702 gen_helper_vamomaxuw_v_d
,
703 gen_helper_vamoswapd_v_d
,
704 gen_helper_vamoaddd_v_d
,
705 gen_helper_vamoxord_v_d
,
706 gen_helper_vamoandd_v_d
,
707 gen_helper_vamoord_v_d
,
708 gen_helper_vamomind_v_d
,
709 gen_helper_vamomaxd_v_d
,
710 gen_helper_vamominud_v_d
,
711 gen_helper_vamomaxud_v_d
715 if (tb_cflags(s
->base
.tb
) & CF_PARALLEL
) {
716 gen_helper_exit_atomic(cpu_env
);
717 s
->base
.is_jmp
= DISAS_NORETURN
;
721 #ifdef TARGET_RISCV64
724 /* Check done in amo_check(). */
725 g_assert_not_reached();
732 data
= FIELD_DP32(data
, VDATA
, MLEN
, s
->mlen
);
733 data
= FIELD_DP32(data
, VDATA
, VM
, a
->vm
);
734 data
= FIELD_DP32(data
, VDATA
, LMUL
, s
->lmul
);
735 data
= FIELD_DP32(data
, VDATA
, WD
, a
->wd
);
736 return amo_trans(a
->rd
, a
->rs1
, a
->rs2
, data
, fn
, s
);
739 * There are two rules check here.
741 * 1. SEW must be at least as wide as the AMO memory element size.
743 * 2. If SEW is greater than XLEN, an illegal instruction exception is raised.
745 static bool amo_check(DisasContext
*s
, arg_rwdvm
* a
)
747 return (!s
->vill
&& has_ext(s
, RVA
) &&
748 (!a
->wd
|| vext_check_overlap_mask(s
, a
->rd
, a
->vm
, false)) &&
749 vext_check_reg(s
, a
->rd
, false) &&
750 vext_check_reg(s
, a
->rs2
, false) &&
751 ((1 << s
->sew
) <= sizeof(target_ulong
)) &&
752 ((1 << s
->sew
) >= 4));
755 GEN_VEXT_TRANS(vamoswapw_v
, 0, rwdvm
, amo_op
, amo_check
)
756 GEN_VEXT_TRANS(vamoaddw_v
, 1, rwdvm
, amo_op
, amo_check
)
757 GEN_VEXT_TRANS(vamoxorw_v
, 2, rwdvm
, amo_op
, amo_check
)
758 GEN_VEXT_TRANS(vamoandw_v
, 3, rwdvm
, amo_op
, amo_check
)
759 GEN_VEXT_TRANS(vamoorw_v
, 4, rwdvm
, amo_op
, amo_check
)
760 GEN_VEXT_TRANS(vamominw_v
, 5, rwdvm
, amo_op
, amo_check
)
761 GEN_VEXT_TRANS(vamomaxw_v
, 6, rwdvm
, amo_op
, amo_check
)
762 GEN_VEXT_TRANS(vamominuw_v
, 7, rwdvm
, amo_op
, amo_check
)
763 GEN_VEXT_TRANS(vamomaxuw_v
, 8, rwdvm
, amo_op
, amo_check
)
764 #ifdef TARGET_RISCV64
765 GEN_VEXT_TRANS(vamoswapd_v
, 9, rwdvm
, amo_op
, amo_check
)
766 GEN_VEXT_TRANS(vamoaddd_v
, 10, rwdvm
, amo_op
, amo_check
)
767 GEN_VEXT_TRANS(vamoxord_v
, 11, rwdvm
, amo_op
, amo_check
)
768 GEN_VEXT_TRANS(vamoandd_v
, 12, rwdvm
, amo_op
, amo_check
)
769 GEN_VEXT_TRANS(vamoord_v
, 13, rwdvm
, amo_op
, amo_check
)
770 GEN_VEXT_TRANS(vamomind_v
, 14, rwdvm
, amo_op
, amo_check
)
771 GEN_VEXT_TRANS(vamomaxd_v
, 15, rwdvm
, amo_op
, amo_check
)
772 GEN_VEXT_TRANS(vamominud_v
, 16, rwdvm
, amo_op
, amo_check
)
773 GEN_VEXT_TRANS(vamomaxud_v
, 17, rwdvm
, amo_op
, amo_check
)