2 * RISC-V Vector Extension Helpers for QEMU.
4 * Copyright (c) 2020 T-Head Semiconductor Co., Ltd. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2 or later, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
20 #include "qemu/host-utils.h"
21 #include "qemu/bitops.h"
23 #include "exec/memop.h"
24 #include "exec/exec-all.h"
25 #include "exec/helper-proto.h"
26 #include "fpu/softfloat.h"
27 #include "tcg/tcg-gvec-desc.h"
28 #include "internals.h"
31 target_ulong
HELPER(vsetvl
)(CPURISCVState
*env
, target_ulong s1
,
35 RISCVCPU
*cpu
= env_archcpu(env
);
36 uint64_t lmul
= FIELD_EX64(s2
, VTYPE
, VLMUL
);
37 uint16_t sew
= 8 << FIELD_EX64(s2
, VTYPE
, VSEW
);
38 uint8_t ediv
= FIELD_EX64(s2
, VTYPE
, VEDIV
);
39 int xlen
= riscv_cpu_xlen(env
);
40 bool vill
= (s2
>> (xlen
- 1)) & 0x1;
41 target_ulong reserved
= s2
&
42 MAKE_64BIT_MASK(R_VTYPE_RESERVED_SHIFT
,
43 xlen
- 1 - R_VTYPE_RESERVED_SHIFT
);
46 /* Fractional LMUL. */
48 cpu
->cfg
.elen
>> (8 - lmul
) < sew
) {
53 if ((sew
> cpu
->cfg
.elen
)
57 /* only set vill bit. */
65 vlmax
= vext_get_vlmax(cpu
, s2
);
79 * Note that vector data is stored in host-endian 64-bit chunks,
80 * so addressing units smaller than that needs a host-endian fixup.
82 #ifdef HOST_WORDS_BIGENDIAN
83 #define H1(x) ((x) ^ 7)
84 #define H1_2(x) ((x) ^ 6)
85 #define H1_4(x) ((x) ^ 4)
86 #define H2(x) ((x) ^ 3)
87 #define H4(x) ((x) ^ 1)
98 static inline uint32_t vext_nf(uint32_t desc
)
100 return FIELD_EX32(simd_data(desc
), VDATA
, NF
);
103 static inline uint32_t vext_vm(uint32_t desc
)
105 return FIELD_EX32(simd_data(desc
), VDATA
, VM
);
109 * Encode LMUL to lmul as following:
120 static inline int32_t vext_lmul(uint32_t desc
)
122 return sextract32(FIELD_EX32(simd_data(desc
), VDATA
, LMUL
), 0, 3);
126 * Get the maximum number of elements can be operated.
128 * esz: log2 of element size in bytes.
130 static inline uint32_t vext_max_elems(uint32_t desc
, uint32_t esz
)
133 * As simd_desc support at most 2048 bytes, the max vlen is 1024 bits.
134 * so vlen in bytes (vlenb) is encoded as maxsz.
136 uint32_t vlenb
= simd_maxsz(desc
);
139 int scale
= vext_lmul(desc
) - esz
;
140 return scale
< 0 ? vlenb
>> -scale
: vlenb
<< scale
;
143 static inline target_ulong
adjust_addr(CPURISCVState
*env
, target_ulong addr
)
145 return (addr
& env
->cur_pmmask
) | env
->cur_pmbase
;
149 * This function checks watchpoint before real load operation.
151 * In softmmu mode, the TLB API probe_access is enough for watchpoint check.
152 * In user mode, there is no watchpoint support now.
154 * It will trigger an exception if there is no mapping in TLB
155 * and page table walk can't fill the TLB entry. Then the guest
156 * software can return here after process the exception or never return.
158 static void probe_pages(CPURISCVState
*env
, target_ulong addr
,
159 target_ulong len
, uintptr_t ra
,
160 MMUAccessType access_type
)
162 target_ulong pagelen
= -(addr
| TARGET_PAGE_MASK
);
163 target_ulong curlen
= MIN(pagelen
, len
);
165 probe_access(env
, adjust_addr(env
, addr
), curlen
, access_type
,
166 cpu_mmu_index(env
, false), ra
);
169 curlen
= len
- curlen
;
170 probe_access(env
, adjust_addr(env
, addr
), curlen
, access_type
,
171 cpu_mmu_index(env
, false), ra
);
175 static inline void vext_set_elem_mask(void *v0
, int index
,
178 int idx
= index
/ 64;
179 int pos
= index
% 64;
180 uint64_t old
= ((uint64_t *)v0
)[idx
];
181 ((uint64_t *)v0
)[idx
] = deposit64(old
, pos
, 1, value
);
185 * Earlier designs (pre-0.9) had a varying number of bits
186 * per mask value (MLEN). In the 0.9 design, MLEN=1.
189 static inline int vext_elem_mask(void *v0
, int index
)
191 int idx
= index
/ 64;
192 int pos
= index
% 64;
193 return (((uint64_t *)v0
)[idx
] >> pos
) & 1;
196 /* elements operations for load and store */
197 typedef void vext_ldst_elem_fn(CPURISCVState
*env
, target_ulong addr
,
198 uint32_t idx
, void *vd
, uintptr_t retaddr
);
200 #define GEN_VEXT_LD_ELEM(NAME, ETYPE, H, LDSUF) \
201 static void NAME(CPURISCVState *env, abi_ptr addr, \
202 uint32_t idx, void *vd, uintptr_t retaddr)\
204 ETYPE *cur = ((ETYPE *)vd + H(idx)); \
205 *cur = cpu_##LDSUF##_data_ra(env, addr, retaddr); \
208 GEN_VEXT_LD_ELEM(lde_b, int8_t, H1, ldsb)
209 GEN_VEXT_LD_ELEM(lde_h
, int16_t, H2
, ldsw
)
210 GEN_VEXT_LD_ELEM(lde_w
, int32_t, H4
, ldl
)
211 GEN_VEXT_LD_ELEM(lde_d
, int64_t, H8
, ldq
)
213 #define GEN_VEXT_ST_ELEM(NAME, ETYPE, H, STSUF) \
214 static void NAME(CPURISCVState *env, abi_ptr addr, \
215 uint32_t idx, void *vd, uintptr_t retaddr)\
217 ETYPE data = *((ETYPE *)vd + H(idx)); \
218 cpu_##STSUF##_data_ra(env, addr, data, retaddr); \
221 GEN_VEXT_ST_ELEM(ste_b
, int8_t, H1
, stb
)
222 GEN_VEXT_ST_ELEM(ste_h
, int16_t, H2
, stw
)
223 GEN_VEXT_ST_ELEM(ste_w
, int32_t, H4
, stl
)
224 GEN_VEXT_ST_ELEM(ste_d
, int64_t, H8
, stq
)
227 *** stride: access vector element from strided memory
230 vext_ldst_stride(void *vd
, void *v0
, target_ulong base
,
231 target_ulong stride
, CPURISCVState
*env
,
232 uint32_t desc
, uint32_t vm
,
233 vext_ldst_elem_fn
*ldst_elem
,
234 uint32_t esz
, uintptr_t ra
, MMUAccessType access_type
)
237 uint32_t nf
= vext_nf(desc
);
238 uint32_t max_elems
= vext_max_elems(desc
, esz
);
240 for (i
= env
->vstart
; i
< env
->vl
; i
++, env
->vstart
++) {
241 if (!vm
&& !vext_elem_mask(v0
, i
)) {
247 target_ulong addr
= base
+ stride
* i
+ (k
<< esz
);
248 ldst_elem(env
, adjust_addr(env
, addr
), i
+ k
* max_elems
, vd
, ra
);
255 #define GEN_VEXT_LD_STRIDE(NAME, ETYPE, LOAD_FN) \
256 void HELPER(NAME)(void *vd, void * v0, target_ulong base, \
257 target_ulong stride, CPURISCVState *env, \
260 uint32_t vm = vext_vm(desc); \
261 vext_ldst_stride(vd, v0, base, stride, env, desc, vm, LOAD_FN, \
262 ctzl(sizeof(ETYPE)), GETPC(), MMU_DATA_LOAD); \
265 GEN_VEXT_LD_STRIDE(vlse8_v
, int8_t, lde_b
)
266 GEN_VEXT_LD_STRIDE(vlse16_v
, int16_t, lde_h
)
267 GEN_VEXT_LD_STRIDE(vlse32_v
, int32_t, lde_w
)
268 GEN_VEXT_LD_STRIDE(vlse64_v
, int64_t, lde_d
)
270 #define GEN_VEXT_ST_STRIDE(NAME, ETYPE, STORE_FN) \
271 void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
272 target_ulong stride, CPURISCVState *env, \
275 uint32_t vm = vext_vm(desc); \
276 vext_ldst_stride(vd, v0, base, stride, env, desc, vm, STORE_FN, \
277 ctzl(sizeof(ETYPE)), GETPC(), MMU_DATA_STORE); \
280 GEN_VEXT_ST_STRIDE(vsse8_v
, int8_t, ste_b
)
281 GEN_VEXT_ST_STRIDE(vsse16_v
, int16_t, ste_h
)
282 GEN_VEXT_ST_STRIDE(vsse32_v
, int32_t, ste_w
)
283 GEN_VEXT_ST_STRIDE(vsse64_v
, int64_t, ste_d
)
286 *** unit-stride: access elements stored contiguously in memory
289 /* unmasked unit-stride load and store operation*/
291 vext_ldst_us(void *vd
, target_ulong base
, CPURISCVState
*env
, uint32_t desc
,
292 vext_ldst_elem_fn
*ldst_elem
, uint32_t esz
, uint32_t evl
,
293 uintptr_t ra
, MMUAccessType access_type
)
296 uint32_t nf
= vext_nf(desc
);
297 uint32_t max_elems
= vext_max_elems(desc
, esz
);
299 /* load bytes from guest memory */
300 for (i
= env
->vstart
; i
< evl
; i
++, env
->vstart
++) {
303 target_ulong addr
= base
+ ((i
* nf
+ k
) << esz
);
304 ldst_elem(env
, adjust_addr(env
, addr
), i
+ k
* max_elems
, vd
, ra
);
312 * masked unit-stride load and store operation will be a special case of stride,
313 * stride = NF * sizeof (MTYPE)
316 #define GEN_VEXT_LD_US(NAME, ETYPE, LOAD_FN) \
317 void HELPER(NAME##_mask)(void *vd, void *v0, target_ulong base, \
318 CPURISCVState *env, uint32_t desc) \
320 uint32_t stride = vext_nf(desc) << ctzl(sizeof(ETYPE)); \
321 vext_ldst_stride(vd, v0, base, stride, env, desc, false, LOAD_FN, \
322 ctzl(sizeof(ETYPE)), GETPC(), MMU_DATA_LOAD); \
325 void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
326 CPURISCVState *env, uint32_t desc) \
328 vext_ldst_us(vd, base, env, desc, LOAD_FN, \
329 ctzl(sizeof(ETYPE)), env->vl, GETPC(), MMU_DATA_LOAD); \
332 GEN_VEXT_LD_US(vle8_v
, int8_t, lde_b
)
333 GEN_VEXT_LD_US(vle16_v
, int16_t, lde_h
)
334 GEN_VEXT_LD_US(vle32_v
, int32_t, lde_w
)
335 GEN_VEXT_LD_US(vle64_v
, int64_t, lde_d
)
337 #define GEN_VEXT_ST_US(NAME, ETYPE, STORE_FN) \
338 void HELPER(NAME##_mask)(void *vd, void *v0, target_ulong base, \
339 CPURISCVState *env, uint32_t desc) \
341 uint32_t stride = vext_nf(desc) << ctzl(sizeof(ETYPE)); \
342 vext_ldst_stride(vd, v0, base, stride, env, desc, false, STORE_FN, \
343 ctzl(sizeof(ETYPE)), GETPC(), MMU_DATA_STORE); \
346 void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
347 CPURISCVState *env, uint32_t desc) \
349 vext_ldst_us(vd, base, env, desc, STORE_FN, \
350 ctzl(sizeof(ETYPE)), env->vl, GETPC(), MMU_DATA_STORE); \
353 GEN_VEXT_ST_US(vse8_v
, int8_t, ste_b
)
354 GEN_VEXT_ST_US(vse16_v
, int16_t, ste_h
)
355 GEN_VEXT_ST_US(vse32_v
, int32_t, ste_w
)
356 GEN_VEXT_ST_US(vse64_v
, int64_t, ste_d
)
359 *** unit stride mask load and store, EEW = 1
361 void HELPER(vlm_v
)(void *vd
, void *v0
, target_ulong base
,
362 CPURISCVState
*env
, uint32_t desc
)
364 /* evl = ceil(vl/8) */
365 uint8_t evl
= (env
->vl
+ 7) >> 3;
366 vext_ldst_us(vd
, base
, env
, desc
, lde_b
,
367 0, evl
, GETPC(), MMU_DATA_LOAD
);
370 void HELPER(vsm_v
)(void *vd
, void *v0
, target_ulong base
,
371 CPURISCVState
*env
, uint32_t desc
)
373 /* evl = ceil(vl/8) */
374 uint8_t evl
= (env
->vl
+ 7) >> 3;
375 vext_ldst_us(vd
, base
, env
, desc
, ste_b
,
376 0, evl
, GETPC(), MMU_DATA_STORE
);
380 *** index: access vector element from indexed memory
382 typedef target_ulong
vext_get_index_addr(target_ulong base
,
383 uint32_t idx
, void *vs2
);
385 #define GEN_VEXT_GET_INDEX_ADDR(NAME, ETYPE, H) \
386 static target_ulong NAME(target_ulong base, \
387 uint32_t idx, void *vs2) \
389 return (base + *((ETYPE *)vs2 + H(idx))); \
392 GEN_VEXT_GET_INDEX_ADDR(idx_b
, uint8_t, H1
)
393 GEN_VEXT_GET_INDEX_ADDR(idx_h
, uint16_t, H2
)
394 GEN_VEXT_GET_INDEX_ADDR(idx_w
, uint32_t, H4
)
395 GEN_VEXT_GET_INDEX_ADDR(idx_d
, uint64_t, H8
)
398 vext_ldst_index(void *vd
, void *v0
, target_ulong base
,
399 void *vs2
, CPURISCVState
*env
, uint32_t desc
,
400 vext_get_index_addr get_index_addr
,
401 vext_ldst_elem_fn
*ldst_elem
,
402 uint32_t esz
, uintptr_t ra
, MMUAccessType access_type
)
405 uint32_t nf
= vext_nf(desc
);
406 uint32_t vm
= vext_vm(desc
);
407 uint32_t max_elems
= vext_max_elems(desc
, esz
);
409 /* load bytes from guest memory */
410 for (i
= env
->vstart
; i
< env
->vl
; i
++, env
->vstart
++) {
411 if (!vm
&& !vext_elem_mask(v0
, i
)) {
417 abi_ptr addr
= get_index_addr(base
, i
, vs2
) + (k
<< esz
);
418 ldst_elem(env
, adjust_addr(env
, addr
), i
+ k
* max_elems
, vd
, ra
);
425 #define GEN_VEXT_LD_INDEX(NAME, ETYPE, INDEX_FN, LOAD_FN) \
426 void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
427 void *vs2, CPURISCVState *env, uint32_t desc) \
429 vext_ldst_index(vd, v0, base, vs2, env, desc, INDEX_FN, \
430 LOAD_FN, ctzl(sizeof(ETYPE)), GETPC(), MMU_DATA_LOAD); \
433 GEN_VEXT_LD_INDEX(vlxei8_8_v
, int8_t, idx_b
, lde_b
)
434 GEN_VEXT_LD_INDEX(vlxei8_16_v
, int16_t, idx_b
, lde_h
)
435 GEN_VEXT_LD_INDEX(vlxei8_32_v
, int32_t, idx_b
, lde_w
)
436 GEN_VEXT_LD_INDEX(vlxei8_64_v
, int64_t, idx_b
, lde_d
)
437 GEN_VEXT_LD_INDEX(vlxei16_8_v
, int8_t, idx_h
, lde_b
)
438 GEN_VEXT_LD_INDEX(vlxei16_16_v
, int16_t, idx_h
, lde_h
)
439 GEN_VEXT_LD_INDEX(vlxei16_32_v
, int32_t, idx_h
, lde_w
)
440 GEN_VEXT_LD_INDEX(vlxei16_64_v
, int64_t, idx_h
, lde_d
)
441 GEN_VEXT_LD_INDEX(vlxei32_8_v
, int8_t, idx_w
, lde_b
)
442 GEN_VEXT_LD_INDEX(vlxei32_16_v
, int16_t, idx_w
, lde_h
)
443 GEN_VEXT_LD_INDEX(vlxei32_32_v
, int32_t, idx_w
, lde_w
)
444 GEN_VEXT_LD_INDEX(vlxei32_64_v
, int64_t, idx_w
, lde_d
)
445 GEN_VEXT_LD_INDEX(vlxei64_8_v
, int8_t, idx_d
, lde_b
)
446 GEN_VEXT_LD_INDEX(vlxei64_16_v
, int16_t, idx_d
, lde_h
)
447 GEN_VEXT_LD_INDEX(vlxei64_32_v
, int32_t, idx_d
, lde_w
)
448 GEN_VEXT_LD_INDEX(vlxei64_64_v
, int64_t, idx_d
, lde_d
)
450 #define GEN_VEXT_ST_INDEX(NAME, ETYPE, INDEX_FN, STORE_FN) \
451 void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
452 void *vs2, CPURISCVState *env, uint32_t desc) \
454 vext_ldst_index(vd, v0, base, vs2, env, desc, INDEX_FN, \
455 STORE_FN, ctzl(sizeof(ETYPE)), \
456 GETPC(), MMU_DATA_STORE); \
459 GEN_VEXT_ST_INDEX(vsxei8_8_v
, int8_t, idx_b
, ste_b
)
460 GEN_VEXT_ST_INDEX(vsxei8_16_v
, int16_t, idx_b
, ste_h
)
461 GEN_VEXT_ST_INDEX(vsxei8_32_v
, int32_t, idx_b
, ste_w
)
462 GEN_VEXT_ST_INDEX(vsxei8_64_v
, int64_t, idx_b
, ste_d
)
463 GEN_VEXT_ST_INDEX(vsxei16_8_v
, int8_t, idx_h
, ste_b
)
464 GEN_VEXT_ST_INDEX(vsxei16_16_v
, int16_t, idx_h
, ste_h
)
465 GEN_VEXT_ST_INDEX(vsxei16_32_v
, int32_t, idx_h
, ste_w
)
466 GEN_VEXT_ST_INDEX(vsxei16_64_v
, int64_t, idx_h
, ste_d
)
467 GEN_VEXT_ST_INDEX(vsxei32_8_v
, int8_t, idx_w
, ste_b
)
468 GEN_VEXT_ST_INDEX(vsxei32_16_v
, int16_t, idx_w
, ste_h
)
469 GEN_VEXT_ST_INDEX(vsxei32_32_v
, int32_t, idx_w
, ste_w
)
470 GEN_VEXT_ST_INDEX(vsxei32_64_v
, int64_t, idx_w
, ste_d
)
471 GEN_VEXT_ST_INDEX(vsxei64_8_v
, int8_t, idx_d
, ste_b
)
472 GEN_VEXT_ST_INDEX(vsxei64_16_v
, int16_t, idx_d
, ste_h
)
473 GEN_VEXT_ST_INDEX(vsxei64_32_v
, int32_t, idx_d
, ste_w
)
474 GEN_VEXT_ST_INDEX(vsxei64_64_v
, int64_t, idx_d
, ste_d
)
477 *** unit-stride fault-only-fisrt load instructions
480 vext_ldff(void *vd
, void *v0
, target_ulong base
,
481 CPURISCVState
*env
, uint32_t desc
,
482 vext_ldst_elem_fn
*ldst_elem
,
483 uint32_t esz
, uintptr_t ra
)
486 uint32_t i
, k
, vl
= 0;
487 uint32_t nf
= vext_nf(desc
);
488 uint32_t vm
= vext_vm(desc
);
489 uint32_t max_elems
= vext_max_elems(desc
, esz
);
490 target_ulong addr
, offset
, remain
;
492 /* probe every access*/
493 for (i
= env
->vstart
; i
< env
->vl
; i
++) {
494 if (!vm
&& !vext_elem_mask(v0
, i
)) {
497 addr
= adjust_addr(env
, base
+ i
* (nf
<< esz
));
499 probe_pages(env
, addr
, nf
<< esz
, ra
, MMU_DATA_LOAD
);
501 /* if it triggers an exception, no need to check watchpoint */
504 offset
= -(addr
| TARGET_PAGE_MASK
);
505 host
= tlb_vaddr_to_host(env
, addr
, MMU_DATA_LOAD
,
506 cpu_mmu_index(env
, false));
508 #ifdef CONFIG_USER_ONLY
509 if (page_check_range(addr
, offset
, PAGE_READ
) < 0) {
514 probe_pages(env
, addr
, offset
, ra
, MMU_DATA_LOAD
);
520 if (remain
<= offset
) {
524 addr
= adjust_addr(env
, addr
+ offset
);
529 /* load bytes from guest memory */
533 for (i
= env
->vstart
; i
< env
->vl
; i
++) {
535 if (!vm
&& !vext_elem_mask(v0
, i
)) {
539 target_ulong addr
= base
+ ((i
* nf
+ k
) << esz
);
540 ldst_elem(env
, adjust_addr(env
, addr
), i
+ k
* max_elems
, vd
, ra
);
547 #define GEN_VEXT_LDFF(NAME, ETYPE, LOAD_FN) \
548 void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
549 CPURISCVState *env, uint32_t desc) \
551 vext_ldff(vd, v0, base, env, desc, LOAD_FN, \
552 ctzl(sizeof(ETYPE)), GETPC()); \
555 GEN_VEXT_LDFF(vle8ff_v
, int8_t, lde_b
)
556 GEN_VEXT_LDFF(vle16ff_v
, int16_t, lde_h
)
557 GEN_VEXT_LDFF(vle32ff_v
, int32_t, lde_w
)
558 GEN_VEXT_LDFF(vle64ff_v
, int64_t, lde_d
)
560 #define DO_SWAP(N, M) (M)
561 #define DO_AND(N, M) (N & M)
562 #define DO_XOR(N, M) (N ^ M)
563 #define DO_OR(N, M) (N | M)
564 #define DO_ADD(N, M) (N + M)
567 #define DO_MAX(N, M) ((N) >= (M) ? (N) : (M))
568 #define DO_MIN(N, M) ((N) >= (M) ? (M) : (N))
570 /* Unsigned min/max */
571 #define DO_MAXU(N, M) DO_MAX((UMTYPE)N, (UMTYPE)M)
572 #define DO_MINU(N, M) DO_MIN((UMTYPE)N, (UMTYPE)M)
575 *** load and store whole register instructions
578 vext_ldst_whole(void *vd
, target_ulong base
, CPURISCVState
*env
, uint32_t desc
,
579 vext_ldst_elem_fn
*ldst_elem
, uint32_t esz
, uintptr_t ra
,
580 MMUAccessType access_type
)
582 uint32_t i
, k
, off
, pos
;
583 uint32_t nf
= vext_nf(desc
);
584 uint32_t vlenb
= env_archcpu(env
)->cfg
.vlen
>> 3;
585 uint32_t max_elems
= vlenb
>> esz
;
587 k
= env
->vstart
/ max_elems
;
588 off
= env
->vstart
% max_elems
;
591 /* load/store rest of elements of current segment pointed by vstart */
592 for (pos
= off
; pos
< max_elems
; pos
++, env
->vstart
++) {
593 target_ulong addr
= base
+ ((pos
+ k
* max_elems
) << esz
);
594 ldst_elem(env
, adjust_addr(env
, addr
), pos
+ k
* max_elems
, vd
, ra
);
599 /* load/store elements for rest of segments */
600 for (; k
< nf
; k
++) {
601 for (i
= 0; i
< max_elems
; i
++, env
->vstart
++) {
602 target_ulong addr
= base
+ ((i
+ k
* max_elems
) << esz
);
603 ldst_elem(env
, adjust_addr(env
, addr
), i
+ k
* max_elems
, vd
, ra
);
610 #define GEN_VEXT_LD_WHOLE(NAME, ETYPE, LOAD_FN) \
611 void HELPER(NAME)(void *vd, target_ulong base, \
612 CPURISCVState *env, uint32_t desc) \
614 vext_ldst_whole(vd, base, env, desc, LOAD_FN, \
615 ctzl(sizeof(ETYPE)), GETPC(), \
619 GEN_VEXT_LD_WHOLE(vl1re8_v
, int8_t, lde_b
)
620 GEN_VEXT_LD_WHOLE(vl1re16_v
, int16_t, lde_h
)
621 GEN_VEXT_LD_WHOLE(vl1re32_v
, int32_t, lde_w
)
622 GEN_VEXT_LD_WHOLE(vl1re64_v
, int64_t, lde_d
)
623 GEN_VEXT_LD_WHOLE(vl2re8_v
, int8_t, lde_b
)
624 GEN_VEXT_LD_WHOLE(vl2re16_v
, int16_t, lde_h
)
625 GEN_VEXT_LD_WHOLE(vl2re32_v
, int32_t, lde_w
)
626 GEN_VEXT_LD_WHOLE(vl2re64_v
, int64_t, lde_d
)
627 GEN_VEXT_LD_WHOLE(vl4re8_v
, int8_t, lde_b
)
628 GEN_VEXT_LD_WHOLE(vl4re16_v
, int16_t, lde_h
)
629 GEN_VEXT_LD_WHOLE(vl4re32_v
, int32_t, lde_w
)
630 GEN_VEXT_LD_WHOLE(vl4re64_v
, int64_t, lde_d
)
631 GEN_VEXT_LD_WHOLE(vl8re8_v
, int8_t, lde_b
)
632 GEN_VEXT_LD_WHOLE(vl8re16_v
, int16_t, lde_h
)
633 GEN_VEXT_LD_WHOLE(vl8re32_v
, int32_t, lde_w
)
634 GEN_VEXT_LD_WHOLE(vl8re64_v
, int64_t, lde_d
)
636 #define GEN_VEXT_ST_WHOLE(NAME, ETYPE, STORE_FN) \
637 void HELPER(NAME)(void *vd, target_ulong base, \
638 CPURISCVState *env, uint32_t desc) \
640 vext_ldst_whole(vd, base, env, desc, STORE_FN, \
641 ctzl(sizeof(ETYPE)), GETPC(), \
645 GEN_VEXT_ST_WHOLE(vs1r_v
, int8_t, ste_b
)
646 GEN_VEXT_ST_WHOLE(vs2r_v
, int8_t, ste_b
)
647 GEN_VEXT_ST_WHOLE(vs4r_v
, int8_t, ste_b
)
648 GEN_VEXT_ST_WHOLE(vs8r_v
, int8_t, ste_b
)
651 *** Vector Integer Arithmetic Instructions
654 /* expand macro args before macro */
655 #define RVVCALL(macro, ...) macro(__VA_ARGS__)
657 /* (TD, T1, T2, TX1, TX2) */
658 #define OP_SSS_B int8_t, int8_t, int8_t, int8_t, int8_t
659 #define OP_SSS_H int16_t, int16_t, int16_t, int16_t, int16_t
660 #define OP_SSS_W int32_t, int32_t, int32_t, int32_t, int32_t
661 #define OP_SSS_D int64_t, int64_t, int64_t, int64_t, int64_t
662 #define OP_UUU_B uint8_t, uint8_t, uint8_t, uint8_t, uint8_t
663 #define OP_UUU_H uint16_t, uint16_t, uint16_t, uint16_t, uint16_t
664 #define OP_UUU_W uint32_t, uint32_t, uint32_t, uint32_t, uint32_t
665 #define OP_UUU_D uint64_t, uint64_t, uint64_t, uint64_t, uint64_t
666 #define OP_SUS_B int8_t, uint8_t, int8_t, uint8_t, int8_t
667 #define OP_SUS_H int16_t, uint16_t, int16_t, uint16_t, int16_t
668 #define OP_SUS_W int32_t, uint32_t, int32_t, uint32_t, int32_t
669 #define OP_SUS_D int64_t, uint64_t, int64_t, uint64_t, int64_t
670 #define WOP_UUU_B uint16_t, uint8_t, uint8_t, uint16_t, uint16_t
671 #define WOP_UUU_H uint32_t, uint16_t, uint16_t, uint32_t, uint32_t
672 #define WOP_UUU_W uint64_t, uint32_t, uint32_t, uint64_t, uint64_t
673 #define WOP_SSS_B int16_t, int8_t, int8_t, int16_t, int16_t
674 #define WOP_SSS_H int32_t, int16_t, int16_t, int32_t, int32_t
675 #define WOP_SSS_W int64_t, int32_t, int32_t, int64_t, int64_t
676 #define WOP_SUS_B int16_t, uint8_t, int8_t, uint16_t, int16_t
677 #define WOP_SUS_H int32_t, uint16_t, int16_t, uint32_t, int32_t
678 #define WOP_SUS_W int64_t, uint32_t, int32_t, uint64_t, int64_t
679 #define WOP_SSU_B int16_t, int8_t, uint8_t, int16_t, uint16_t
680 #define WOP_SSU_H int32_t, int16_t, uint16_t, int32_t, uint32_t
681 #define WOP_SSU_W int64_t, int32_t, uint32_t, int64_t, uint64_t
682 #define NOP_SSS_B int8_t, int8_t, int16_t, int8_t, int16_t
683 #define NOP_SSS_H int16_t, int16_t, int32_t, int16_t, int32_t
684 #define NOP_SSS_W int32_t, int32_t, int64_t, int32_t, int64_t
685 #define NOP_UUU_B uint8_t, uint8_t, uint16_t, uint8_t, uint16_t
686 #define NOP_UUU_H uint16_t, uint16_t, uint32_t, uint16_t, uint32_t
687 #define NOP_UUU_W uint32_t, uint32_t, uint64_t, uint32_t, uint64_t
689 /* operation of two vector elements */
690 typedef void opivv2_fn(void *vd
, void *vs1
, void *vs2
, int i
);
692 #define OPIVV2(NAME, TD, T1, T2, TX1, TX2, HD, HS1, HS2, OP) \
693 static void do_##NAME(void *vd, void *vs1, void *vs2, int i) \
695 TX1 s1 = *((T1 *)vs1 + HS1(i)); \
696 TX2 s2 = *((T2 *)vs2 + HS2(i)); \
697 *((TD *)vd + HD(i)) = OP(s2, s1); \
699 #define DO_SUB(N, M) (N - M)
700 #define DO_RSUB(N, M) (M - N)
702 RVVCALL(OPIVV2
, vadd_vv_b
, OP_SSS_B
, H1
, H1
, H1
, DO_ADD
)
703 RVVCALL(OPIVV2
, vadd_vv_h
, OP_SSS_H
, H2
, H2
, H2
, DO_ADD
)
704 RVVCALL(OPIVV2
, vadd_vv_w
, OP_SSS_W
, H4
, H4
, H4
, DO_ADD
)
705 RVVCALL(OPIVV2
, vadd_vv_d
, OP_SSS_D
, H8
, H8
, H8
, DO_ADD
)
706 RVVCALL(OPIVV2
, vsub_vv_b
, OP_SSS_B
, H1
, H1
, H1
, DO_SUB
)
707 RVVCALL(OPIVV2
, vsub_vv_h
, OP_SSS_H
, H2
, H2
, H2
, DO_SUB
)
708 RVVCALL(OPIVV2
, vsub_vv_w
, OP_SSS_W
, H4
, H4
, H4
, DO_SUB
)
709 RVVCALL(OPIVV2
, vsub_vv_d
, OP_SSS_D
, H8
, H8
, H8
, DO_SUB
)
711 static void do_vext_vv(void *vd
, void *v0
, void *vs1
, void *vs2
,
712 CPURISCVState
*env
, uint32_t desc
,
713 uint32_t esz
, uint32_t dsz
,
716 uint32_t vm
= vext_vm(desc
);
717 uint32_t vl
= env
->vl
;
720 for (i
= env
->vstart
; i
< vl
; i
++) {
721 if (!vm
&& !vext_elem_mask(v0
, i
)) {
729 /* generate the helpers for OPIVV */
730 #define GEN_VEXT_VV(NAME, ESZ, DSZ) \
731 void HELPER(NAME)(void *vd, void *v0, void *vs1, \
732 void *vs2, CPURISCVState *env, \
735 do_vext_vv(vd, v0, vs1, vs2, env, desc, ESZ, DSZ, \
739 GEN_VEXT_VV(vadd_vv_b
, 1, 1)
740 GEN_VEXT_VV(vadd_vv_h
, 2, 2)
741 GEN_VEXT_VV(vadd_vv_w
, 4, 4)
742 GEN_VEXT_VV(vadd_vv_d
, 8, 8)
743 GEN_VEXT_VV(vsub_vv_b
, 1, 1)
744 GEN_VEXT_VV(vsub_vv_h
, 2, 2)
745 GEN_VEXT_VV(vsub_vv_w
, 4, 4)
746 GEN_VEXT_VV(vsub_vv_d
, 8, 8)
748 typedef void opivx2_fn(void *vd
, target_long s1
, void *vs2
, int i
);
751 * (T1)s1 gives the real operator type.
752 * (TX1)(T1)s1 expands the operator type of widen or narrow operations.
754 #define OPIVX2(NAME, TD, T1, T2, TX1, TX2, HD, HS2, OP) \
755 static void do_##NAME(void *vd, target_long s1, void *vs2, int i) \
757 TX2 s2 = *((T2 *)vs2 + HS2(i)); \
758 *((TD *)vd + HD(i)) = OP(s2, (TX1)(T1)s1); \
761 RVVCALL(OPIVX2
, vadd_vx_b
, OP_SSS_B
, H1
, H1
, DO_ADD
)
762 RVVCALL(OPIVX2
, vadd_vx_h
, OP_SSS_H
, H2
, H2
, DO_ADD
)
763 RVVCALL(OPIVX2
, vadd_vx_w
, OP_SSS_W
, H4
, H4
, DO_ADD
)
764 RVVCALL(OPIVX2
, vadd_vx_d
, OP_SSS_D
, H8
, H8
, DO_ADD
)
765 RVVCALL(OPIVX2
, vsub_vx_b
, OP_SSS_B
, H1
, H1
, DO_SUB
)
766 RVVCALL(OPIVX2
, vsub_vx_h
, OP_SSS_H
, H2
, H2
, DO_SUB
)
767 RVVCALL(OPIVX2
, vsub_vx_w
, OP_SSS_W
, H4
, H4
, DO_SUB
)
768 RVVCALL(OPIVX2
, vsub_vx_d
, OP_SSS_D
, H8
, H8
, DO_SUB
)
769 RVVCALL(OPIVX2
, vrsub_vx_b
, OP_SSS_B
, H1
, H1
, DO_RSUB
)
770 RVVCALL(OPIVX2
, vrsub_vx_h
, OP_SSS_H
, H2
, H2
, DO_RSUB
)
771 RVVCALL(OPIVX2
, vrsub_vx_w
, OP_SSS_W
, H4
, H4
, DO_RSUB
)
772 RVVCALL(OPIVX2
, vrsub_vx_d
, OP_SSS_D
, H8
, H8
, DO_RSUB
)
774 static void do_vext_vx(void *vd
, void *v0
, target_long s1
, void *vs2
,
775 CPURISCVState
*env
, uint32_t desc
,
776 uint32_t esz
, uint32_t dsz
,
779 uint32_t vm
= vext_vm(desc
);
780 uint32_t vl
= env
->vl
;
783 for (i
= env
->vstart
; i
< vl
; i
++) {
784 if (!vm
&& !vext_elem_mask(v0
, i
)) {
792 /* generate the helpers for OPIVX */
793 #define GEN_VEXT_VX(NAME, ESZ, DSZ) \
794 void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
795 void *vs2, CPURISCVState *env, \
798 do_vext_vx(vd, v0, s1, vs2, env, desc, ESZ, DSZ, \
802 GEN_VEXT_VX(vadd_vx_b
, 1, 1)
803 GEN_VEXT_VX(vadd_vx_h
, 2, 2)
804 GEN_VEXT_VX(vadd_vx_w
, 4, 4)
805 GEN_VEXT_VX(vadd_vx_d
, 8, 8)
806 GEN_VEXT_VX(vsub_vx_b
, 1, 1)
807 GEN_VEXT_VX(vsub_vx_h
, 2, 2)
808 GEN_VEXT_VX(vsub_vx_w
, 4, 4)
809 GEN_VEXT_VX(vsub_vx_d
, 8, 8)
810 GEN_VEXT_VX(vrsub_vx_b
, 1, 1)
811 GEN_VEXT_VX(vrsub_vx_h
, 2, 2)
812 GEN_VEXT_VX(vrsub_vx_w
, 4, 4)
813 GEN_VEXT_VX(vrsub_vx_d
, 8, 8)
815 void HELPER(vec_rsubs8
)(void *d
, void *a
, uint64_t b
, uint32_t desc
)
817 intptr_t oprsz
= simd_oprsz(desc
);
820 for (i
= 0; i
< oprsz
; i
+= sizeof(uint8_t)) {
821 *(uint8_t *)(d
+ i
) = (uint8_t)b
- *(uint8_t *)(a
+ i
);
825 void HELPER(vec_rsubs16
)(void *d
, void *a
, uint64_t b
, uint32_t desc
)
827 intptr_t oprsz
= simd_oprsz(desc
);
830 for (i
= 0; i
< oprsz
; i
+= sizeof(uint16_t)) {
831 *(uint16_t *)(d
+ i
) = (uint16_t)b
- *(uint16_t *)(a
+ i
);
835 void HELPER(vec_rsubs32
)(void *d
, void *a
, uint64_t b
, uint32_t desc
)
837 intptr_t oprsz
= simd_oprsz(desc
);
840 for (i
= 0; i
< oprsz
; i
+= sizeof(uint32_t)) {
841 *(uint32_t *)(d
+ i
) = (uint32_t)b
- *(uint32_t *)(a
+ i
);
845 void HELPER(vec_rsubs64
)(void *d
, void *a
, uint64_t b
, uint32_t desc
)
847 intptr_t oprsz
= simd_oprsz(desc
);
850 for (i
= 0; i
< oprsz
; i
+= sizeof(uint64_t)) {
851 *(uint64_t *)(d
+ i
) = b
- *(uint64_t *)(a
+ i
);
855 /* Vector Widening Integer Add/Subtract */
856 #define WOP_UUU_B uint16_t, uint8_t, uint8_t, uint16_t, uint16_t
857 #define WOP_UUU_H uint32_t, uint16_t, uint16_t, uint32_t, uint32_t
858 #define WOP_UUU_W uint64_t, uint32_t, uint32_t, uint64_t, uint64_t
859 #define WOP_SSS_B int16_t, int8_t, int8_t, int16_t, int16_t
860 #define WOP_SSS_H int32_t, int16_t, int16_t, int32_t, int32_t
861 #define WOP_SSS_W int64_t, int32_t, int32_t, int64_t, int64_t
862 #define WOP_WUUU_B uint16_t, uint8_t, uint16_t, uint16_t, uint16_t
863 #define WOP_WUUU_H uint32_t, uint16_t, uint32_t, uint32_t, uint32_t
864 #define WOP_WUUU_W uint64_t, uint32_t, uint64_t, uint64_t, uint64_t
865 #define WOP_WSSS_B int16_t, int8_t, int16_t, int16_t, int16_t
866 #define WOP_WSSS_H int32_t, int16_t, int32_t, int32_t, int32_t
867 #define WOP_WSSS_W int64_t, int32_t, int64_t, int64_t, int64_t
868 RVVCALL(OPIVV2
, vwaddu_vv_b
, WOP_UUU_B
, H2
, H1
, H1
, DO_ADD
)
869 RVVCALL(OPIVV2
, vwaddu_vv_h
, WOP_UUU_H
, H4
, H2
, H2
, DO_ADD
)
870 RVVCALL(OPIVV2
, vwaddu_vv_w
, WOP_UUU_W
, H8
, H4
, H4
, DO_ADD
)
871 RVVCALL(OPIVV2
, vwsubu_vv_b
, WOP_UUU_B
, H2
, H1
, H1
, DO_SUB
)
872 RVVCALL(OPIVV2
, vwsubu_vv_h
, WOP_UUU_H
, H4
, H2
, H2
, DO_SUB
)
873 RVVCALL(OPIVV2
, vwsubu_vv_w
, WOP_UUU_W
, H8
, H4
, H4
, DO_SUB
)
874 RVVCALL(OPIVV2
, vwadd_vv_b
, WOP_SSS_B
, H2
, H1
, H1
, DO_ADD
)
875 RVVCALL(OPIVV2
, vwadd_vv_h
, WOP_SSS_H
, H4
, H2
, H2
, DO_ADD
)
876 RVVCALL(OPIVV2
, vwadd_vv_w
, WOP_SSS_W
, H8
, H4
, H4
, DO_ADD
)
877 RVVCALL(OPIVV2
, vwsub_vv_b
, WOP_SSS_B
, H2
, H1
, H1
, DO_SUB
)
878 RVVCALL(OPIVV2
, vwsub_vv_h
, WOP_SSS_H
, H4
, H2
, H2
, DO_SUB
)
879 RVVCALL(OPIVV2
, vwsub_vv_w
, WOP_SSS_W
, H8
, H4
, H4
, DO_SUB
)
880 RVVCALL(OPIVV2
, vwaddu_wv_b
, WOP_WUUU_B
, H2
, H1
, H1
, DO_ADD
)
881 RVVCALL(OPIVV2
, vwaddu_wv_h
, WOP_WUUU_H
, H4
, H2
, H2
, DO_ADD
)
882 RVVCALL(OPIVV2
, vwaddu_wv_w
, WOP_WUUU_W
, H8
, H4
, H4
, DO_ADD
)
883 RVVCALL(OPIVV2
, vwsubu_wv_b
, WOP_WUUU_B
, H2
, H1
, H1
, DO_SUB
)
884 RVVCALL(OPIVV2
, vwsubu_wv_h
, WOP_WUUU_H
, H4
, H2
, H2
, DO_SUB
)
885 RVVCALL(OPIVV2
, vwsubu_wv_w
, WOP_WUUU_W
, H8
, H4
, H4
, DO_SUB
)
886 RVVCALL(OPIVV2
, vwadd_wv_b
, WOP_WSSS_B
, H2
, H1
, H1
, DO_ADD
)
887 RVVCALL(OPIVV2
, vwadd_wv_h
, WOP_WSSS_H
, H4
, H2
, H2
, DO_ADD
)
888 RVVCALL(OPIVV2
, vwadd_wv_w
, WOP_WSSS_W
, H8
, H4
, H4
, DO_ADD
)
889 RVVCALL(OPIVV2
, vwsub_wv_b
, WOP_WSSS_B
, H2
, H1
, H1
, DO_SUB
)
890 RVVCALL(OPIVV2
, vwsub_wv_h
, WOP_WSSS_H
, H4
, H2
, H2
, DO_SUB
)
891 RVVCALL(OPIVV2
, vwsub_wv_w
, WOP_WSSS_W
, H8
, H4
, H4
, DO_SUB
)
892 GEN_VEXT_VV(vwaddu_vv_b
, 1, 2)
893 GEN_VEXT_VV(vwaddu_vv_h
, 2, 4)
894 GEN_VEXT_VV(vwaddu_vv_w
, 4, 8)
895 GEN_VEXT_VV(vwsubu_vv_b
, 1, 2)
896 GEN_VEXT_VV(vwsubu_vv_h
, 2, 4)
897 GEN_VEXT_VV(vwsubu_vv_w
, 4, 8)
898 GEN_VEXT_VV(vwadd_vv_b
, 1, 2)
899 GEN_VEXT_VV(vwadd_vv_h
, 2, 4)
900 GEN_VEXT_VV(vwadd_vv_w
, 4, 8)
901 GEN_VEXT_VV(vwsub_vv_b
, 1, 2)
902 GEN_VEXT_VV(vwsub_vv_h
, 2, 4)
903 GEN_VEXT_VV(vwsub_vv_w
, 4, 8)
904 GEN_VEXT_VV(vwaddu_wv_b
, 1, 2)
905 GEN_VEXT_VV(vwaddu_wv_h
, 2, 4)
906 GEN_VEXT_VV(vwaddu_wv_w
, 4, 8)
907 GEN_VEXT_VV(vwsubu_wv_b
, 1, 2)
908 GEN_VEXT_VV(vwsubu_wv_h
, 2, 4)
909 GEN_VEXT_VV(vwsubu_wv_w
, 4, 8)
910 GEN_VEXT_VV(vwadd_wv_b
, 1, 2)
911 GEN_VEXT_VV(vwadd_wv_h
, 2, 4)
912 GEN_VEXT_VV(vwadd_wv_w
, 4, 8)
913 GEN_VEXT_VV(vwsub_wv_b
, 1, 2)
914 GEN_VEXT_VV(vwsub_wv_h
, 2, 4)
915 GEN_VEXT_VV(vwsub_wv_w
, 4, 8)
917 RVVCALL(OPIVX2
, vwaddu_vx_b
, WOP_UUU_B
, H2
, H1
, DO_ADD
)
918 RVVCALL(OPIVX2
, vwaddu_vx_h
, WOP_UUU_H
, H4
, H2
, DO_ADD
)
919 RVVCALL(OPIVX2
, vwaddu_vx_w
, WOP_UUU_W
, H8
, H4
, DO_ADD
)
920 RVVCALL(OPIVX2
, vwsubu_vx_b
, WOP_UUU_B
, H2
, H1
, DO_SUB
)
921 RVVCALL(OPIVX2
, vwsubu_vx_h
, WOP_UUU_H
, H4
, H2
, DO_SUB
)
922 RVVCALL(OPIVX2
, vwsubu_vx_w
, WOP_UUU_W
, H8
, H4
, DO_SUB
)
923 RVVCALL(OPIVX2
, vwadd_vx_b
, WOP_SSS_B
, H2
, H1
, DO_ADD
)
924 RVVCALL(OPIVX2
, vwadd_vx_h
, WOP_SSS_H
, H4
, H2
, DO_ADD
)
925 RVVCALL(OPIVX2
, vwadd_vx_w
, WOP_SSS_W
, H8
, H4
, DO_ADD
)
926 RVVCALL(OPIVX2
, vwsub_vx_b
, WOP_SSS_B
, H2
, H1
, DO_SUB
)
927 RVVCALL(OPIVX2
, vwsub_vx_h
, WOP_SSS_H
, H4
, H2
, DO_SUB
)
928 RVVCALL(OPIVX2
, vwsub_vx_w
, WOP_SSS_W
, H8
, H4
, DO_SUB
)
929 RVVCALL(OPIVX2
, vwaddu_wx_b
, WOP_WUUU_B
, H2
, H1
, DO_ADD
)
930 RVVCALL(OPIVX2
, vwaddu_wx_h
, WOP_WUUU_H
, H4
, H2
, DO_ADD
)
931 RVVCALL(OPIVX2
, vwaddu_wx_w
, WOP_WUUU_W
, H8
, H4
, DO_ADD
)
932 RVVCALL(OPIVX2
, vwsubu_wx_b
, WOP_WUUU_B
, H2
, H1
, DO_SUB
)
933 RVVCALL(OPIVX2
, vwsubu_wx_h
, WOP_WUUU_H
, H4
, H2
, DO_SUB
)
934 RVVCALL(OPIVX2
, vwsubu_wx_w
, WOP_WUUU_W
, H8
, H4
, DO_SUB
)
935 RVVCALL(OPIVX2
, vwadd_wx_b
, WOP_WSSS_B
, H2
, H1
, DO_ADD
)
936 RVVCALL(OPIVX2
, vwadd_wx_h
, WOP_WSSS_H
, H4
, H2
, DO_ADD
)
937 RVVCALL(OPIVX2
, vwadd_wx_w
, WOP_WSSS_W
, H8
, H4
, DO_ADD
)
938 RVVCALL(OPIVX2
, vwsub_wx_b
, WOP_WSSS_B
, H2
, H1
, DO_SUB
)
939 RVVCALL(OPIVX2
, vwsub_wx_h
, WOP_WSSS_H
, H4
, H2
, DO_SUB
)
940 RVVCALL(OPIVX2
, vwsub_wx_w
, WOP_WSSS_W
, H8
, H4
, DO_SUB
)
941 GEN_VEXT_VX(vwaddu_vx_b
, 1, 2)
942 GEN_VEXT_VX(vwaddu_vx_h
, 2, 4)
943 GEN_VEXT_VX(vwaddu_vx_w
, 4, 8)
944 GEN_VEXT_VX(vwsubu_vx_b
, 1, 2)
945 GEN_VEXT_VX(vwsubu_vx_h
, 2, 4)
946 GEN_VEXT_VX(vwsubu_vx_w
, 4, 8)
947 GEN_VEXT_VX(vwadd_vx_b
, 1, 2)
948 GEN_VEXT_VX(vwadd_vx_h
, 2, 4)
949 GEN_VEXT_VX(vwadd_vx_w
, 4, 8)
950 GEN_VEXT_VX(vwsub_vx_b
, 1, 2)
951 GEN_VEXT_VX(vwsub_vx_h
, 2, 4)
952 GEN_VEXT_VX(vwsub_vx_w
, 4, 8)
953 GEN_VEXT_VX(vwaddu_wx_b
, 1, 2)
954 GEN_VEXT_VX(vwaddu_wx_h
, 2, 4)
955 GEN_VEXT_VX(vwaddu_wx_w
, 4, 8)
956 GEN_VEXT_VX(vwsubu_wx_b
, 1, 2)
957 GEN_VEXT_VX(vwsubu_wx_h
, 2, 4)
958 GEN_VEXT_VX(vwsubu_wx_w
, 4, 8)
959 GEN_VEXT_VX(vwadd_wx_b
, 1, 2)
960 GEN_VEXT_VX(vwadd_wx_h
, 2, 4)
961 GEN_VEXT_VX(vwadd_wx_w
, 4, 8)
962 GEN_VEXT_VX(vwsub_wx_b
, 1, 2)
963 GEN_VEXT_VX(vwsub_wx_h
, 2, 4)
964 GEN_VEXT_VX(vwsub_wx_w
, 4, 8)
966 /* Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions */
967 #define DO_VADC(N, M, C) (N + M + C)
968 #define DO_VSBC(N, M, C) (N - M - C)
970 #define GEN_VEXT_VADC_VVM(NAME, ETYPE, H, DO_OP) \
971 void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
972 CPURISCVState *env, uint32_t desc) \
974 uint32_t vl = env->vl; \
977 for (i = env->vstart; i < vl; i++) { \
978 ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
979 ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
980 ETYPE carry = vext_elem_mask(v0, i); \
982 *((ETYPE *)vd + H(i)) = DO_OP(s2, s1, carry); \
987 GEN_VEXT_VADC_VVM(vadc_vvm_b
, uint8_t, H1
, DO_VADC
)
988 GEN_VEXT_VADC_VVM(vadc_vvm_h
, uint16_t, H2
, DO_VADC
)
989 GEN_VEXT_VADC_VVM(vadc_vvm_w
, uint32_t, H4
, DO_VADC
)
990 GEN_VEXT_VADC_VVM(vadc_vvm_d
, uint64_t, H8
, DO_VADC
)
992 GEN_VEXT_VADC_VVM(vsbc_vvm_b
, uint8_t, H1
, DO_VSBC
)
993 GEN_VEXT_VADC_VVM(vsbc_vvm_h
, uint16_t, H2
, DO_VSBC
)
994 GEN_VEXT_VADC_VVM(vsbc_vvm_w
, uint32_t, H4
, DO_VSBC
)
995 GEN_VEXT_VADC_VVM(vsbc_vvm_d
, uint64_t, H8
, DO_VSBC
)
997 #define GEN_VEXT_VADC_VXM(NAME, ETYPE, H, DO_OP) \
998 void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
999 CPURISCVState *env, uint32_t desc) \
1001 uint32_t vl = env->vl; \
1004 for (i = env->vstart; i < vl; i++) { \
1005 ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
1006 ETYPE carry = vext_elem_mask(v0, i); \
1008 *((ETYPE *)vd + H(i)) = DO_OP(s2, (ETYPE)(target_long)s1, carry);\
1013 GEN_VEXT_VADC_VXM(vadc_vxm_b
, uint8_t, H1
, DO_VADC
)
1014 GEN_VEXT_VADC_VXM(vadc_vxm_h
, uint16_t, H2
, DO_VADC
)
1015 GEN_VEXT_VADC_VXM(vadc_vxm_w
, uint32_t, H4
, DO_VADC
)
1016 GEN_VEXT_VADC_VXM(vadc_vxm_d
, uint64_t, H8
, DO_VADC
)
1018 GEN_VEXT_VADC_VXM(vsbc_vxm_b
, uint8_t, H1
, DO_VSBC
)
1019 GEN_VEXT_VADC_VXM(vsbc_vxm_h
, uint16_t, H2
, DO_VSBC
)
1020 GEN_VEXT_VADC_VXM(vsbc_vxm_w
, uint32_t, H4
, DO_VSBC
)
1021 GEN_VEXT_VADC_VXM(vsbc_vxm_d
, uint64_t, H8
, DO_VSBC
)
1023 #define DO_MADC(N, M, C) (C ? (__typeof(N))(N + M + 1) <= N : \
1024 (__typeof(N))(N + M) < N)
1025 #define DO_MSBC(N, M, C) (C ? N <= M : N < M)
1027 #define GEN_VEXT_VMADC_VVM(NAME, ETYPE, H, DO_OP) \
1028 void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
1029 CPURISCVState *env, uint32_t desc) \
1031 uint32_t vl = env->vl; \
1032 uint32_t vm = vext_vm(desc); \
1035 for (i = env->vstart; i < vl; i++) { \
1036 ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
1037 ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
1038 ETYPE carry = !vm && vext_elem_mask(v0, i); \
1039 vext_set_elem_mask(vd, i, DO_OP(s2, s1, carry)); \
1044 GEN_VEXT_VMADC_VVM(vmadc_vvm_b
, uint8_t, H1
, DO_MADC
)
1045 GEN_VEXT_VMADC_VVM(vmadc_vvm_h
, uint16_t, H2
, DO_MADC
)
1046 GEN_VEXT_VMADC_VVM(vmadc_vvm_w
, uint32_t, H4
, DO_MADC
)
1047 GEN_VEXT_VMADC_VVM(vmadc_vvm_d
, uint64_t, H8
, DO_MADC
)
1049 GEN_VEXT_VMADC_VVM(vmsbc_vvm_b
, uint8_t, H1
, DO_MSBC
)
1050 GEN_VEXT_VMADC_VVM(vmsbc_vvm_h
, uint16_t, H2
, DO_MSBC
)
1051 GEN_VEXT_VMADC_VVM(vmsbc_vvm_w
, uint32_t, H4
, DO_MSBC
)
1052 GEN_VEXT_VMADC_VVM(vmsbc_vvm_d
, uint64_t, H8
, DO_MSBC
)
1054 #define GEN_VEXT_VMADC_VXM(NAME, ETYPE, H, DO_OP) \
1055 void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
1056 void *vs2, CPURISCVState *env, uint32_t desc) \
1058 uint32_t vl = env->vl; \
1059 uint32_t vm = vext_vm(desc); \
1062 for (i = env->vstart; i < vl; i++) { \
1063 ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
1064 ETYPE carry = !vm && vext_elem_mask(v0, i); \
1065 vext_set_elem_mask(vd, i, \
1066 DO_OP(s2, (ETYPE)(target_long)s1, carry)); \
1071 GEN_VEXT_VMADC_VXM(vmadc_vxm_b
, uint8_t, H1
, DO_MADC
)
1072 GEN_VEXT_VMADC_VXM(vmadc_vxm_h
, uint16_t, H2
, DO_MADC
)
1073 GEN_VEXT_VMADC_VXM(vmadc_vxm_w
, uint32_t, H4
, DO_MADC
)
1074 GEN_VEXT_VMADC_VXM(vmadc_vxm_d
, uint64_t, H8
, DO_MADC
)
1076 GEN_VEXT_VMADC_VXM(vmsbc_vxm_b
, uint8_t, H1
, DO_MSBC
)
1077 GEN_VEXT_VMADC_VXM(vmsbc_vxm_h
, uint16_t, H2
, DO_MSBC
)
1078 GEN_VEXT_VMADC_VXM(vmsbc_vxm_w
, uint32_t, H4
, DO_MSBC
)
1079 GEN_VEXT_VMADC_VXM(vmsbc_vxm_d
, uint64_t, H8
, DO_MSBC
)
1081 /* Vector Bitwise Logical Instructions */
1082 RVVCALL(OPIVV2
, vand_vv_b
, OP_SSS_B
, H1
, H1
, H1
, DO_AND
)
1083 RVVCALL(OPIVV2
, vand_vv_h
, OP_SSS_H
, H2
, H2
, H2
, DO_AND
)
1084 RVVCALL(OPIVV2
, vand_vv_w
, OP_SSS_W
, H4
, H4
, H4
, DO_AND
)
1085 RVVCALL(OPIVV2
, vand_vv_d
, OP_SSS_D
, H8
, H8
, H8
, DO_AND
)
1086 RVVCALL(OPIVV2
, vor_vv_b
, OP_SSS_B
, H1
, H1
, H1
, DO_OR
)
1087 RVVCALL(OPIVV2
, vor_vv_h
, OP_SSS_H
, H2
, H2
, H2
, DO_OR
)
1088 RVVCALL(OPIVV2
, vor_vv_w
, OP_SSS_W
, H4
, H4
, H4
, DO_OR
)
1089 RVVCALL(OPIVV2
, vor_vv_d
, OP_SSS_D
, H8
, H8
, H8
, DO_OR
)
1090 RVVCALL(OPIVV2
, vxor_vv_b
, OP_SSS_B
, H1
, H1
, H1
, DO_XOR
)
1091 RVVCALL(OPIVV2
, vxor_vv_h
, OP_SSS_H
, H2
, H2
, H2
, DO_XOR
)
1092 RVVCALL(OPIVV2
, vxor_vv_w
, OP_SSS_W
, H4
, H4
, H4
, DO_XOR
)
1093 RVVCALL(OPIVV2
, vxor_vv_d
, OP_SSS_D
, H8
, H8
, H8
, DO_XOR
)
1094 GEN_VEXT_VV(vand_vv_b
, 1, 1)
1095 GEN_VEXT_VV(vand_vv_h
, 2, 2)
1096 GEN_VEXT_VV(vand_vv_w
, 4, 4)
1097 GEN_VEXT_VV(vand_vv_d
, 8, 8)
1098 GEN_VEXT_VV(vor_vv_b
, 1, 1)
1099 GEN_VEXT_VV(vor_vv_h
, 2, 2)
1100 GEN_VEXT_VV(vor_vv_w
, 4, 4)
1101 GEN_VEXT_VV(vor_vv_d
, 8, 8)
1102 GEN_VEXT_VV(vxor_vv_b
, 1, 1)
1103 GEN_VEXT_VV(vxor_vv_h
, 2, 2)
1104 GEN_VEXT_VV(vxor_vv_w
, 4, 4)
1105 GEN_VEXT_VV(vxor_vv_d
, 8, 8)
1107 RVVCALL(OPIVX2
, vand_vx_b
, OP_SSS_B
, H1
, H1
, DO_AND
)
1108 RVVCALL(OPIVX2
, vand_vx_h
, OP_SSS_H
, H2
, H2
, DO_AND
)
1109 RVVCALL(OPIVX2
, vand_vx_w
, OP_SSS_W
, H4
, H4
, DO_AND
)
1110 RVVCALL(OPIVX2
, vand_vx_d
, OP_SSS_D
, H8
, H8
, DO_AND
)
1111 RVVCALL(OPIVX2
, vor_vx_b
, OP_SSS_B
, H1
, H1
, DO_OR
)
1112 RVVCALL(OPIVX2
, vor_vx_h
, OP_SSS_H
, H2
, H2
, DO_OR
)
1113 RVVCALL(OPIVX2
, vor_vx_w
, OP_SSS_W
, H4
, H4
, DO_OR
)
1114 RVVCALL(OPIVX2
, vor_vx_d
, OP_SSS_D
, H8
, H8
, DO_OR
)
1115 RVVCALL(OPIVX2
, vxor_vx_b
, OP_SSS_B
, H1
, H1
, DO_XOR
)
1116 RVVCALL(OPIVX2
, vxor_vx_h
, OP_SSS_H
, H2
, H2
, DO_XOR
)
1117 RVVCALL(OPIVX2
, vxor_vx_w
, OP_SSS_W
, H4
, H4
, DO_XOR
)
1118 RVVCALL(OPIVX2
, vxor_vx_d
, OP_SSS_D
, H8
, H8
, DO_XOR
)
1119 GEN_VEXT_VX(vand_vx_b
, 1, 1)
1120 GEN_VEXT_VX(vand_vx_h
, 2, 2)
1121 GEN_VEXT_VX(vand_vx_w
, 4, 4)
1122 GEN_VEXT_VX(vand_vx_d
, 8, 8)
1123 GEN_VEXT_VX(vor_vx_b
, 1, 1)
1124 GEN_VEXT_VX(vor_vx_h
, 2, 2)
1125 GEN_VEXT_VX(vor_vx_w
, 4, 4)
1126 GEN_VEXT_VX(vor_vx_d
, 8, 8)
1127 GEN_VEXT_VX(vxor_vx_b
, 1, 1)
1128 GEN_VEXT_VX(vxor_vx_h
, 2, 2)
1129 GEN_VEXT_VX(vxor_vx_w
, 4, 4)
1130 GEN_VEXT_VX(vxor_vx_d
, 8, 8)
1132 /* Vector Single-Width Bit Shift Instructions */
1133 #define DO_SLL(N, M) (N << (M))
1134 #define DO_SRL(N, M) (N >> (M))
1136 /* generate the helpers for shift instructions with two vector operators */
1137 #define GEN_VEXT_SHIFT_VV(NAME, TS1, TS2, HS1, HS2, OP, MASK) \
1138 void HELPER(NAME)(void *vd, void *v0, void *vs1, \
1139 void *vs2, CPURISCVState *env, uint32_t desc) \
1141 uint32_t vm = vext_vm(desc); \
1142 uint32_t vl = env->vl; \
1145 for (i = env->vstart; i < vl; i++) { \
1146 if (!vm && !vext_elem_mask(v0, i)) { \
1149 TS1 s1 = *((TS1 *)vs1 + HS1(i)); \
1150 TS2 s2 = *((TS2 *)vs2 + HS2(i)); \
1151 *((TS1 *)vd + HS1(i)) = OP(s2, s1 & MASK); \
1156 GEN_VEXT_SHIFT_VV(vsll_vv_b
, uint8_t, uint8_t, H1
, H1
, DO_SLL
, 0x7)
1157 GEN_VEXT_SHIFT_VV(vsll_vv_h
, uint16_t, uint16_t, H2
, H2
, DO_SLL
, 0xf)
1158 GEN_VEXT_SHIFT_VV(vsll_vv_w
, uint32_t, uint32_t, H4
, H4
, DO_SLL
, 0x1f)
1159 GEN_VEXT_SHIFT_VV(vsll_vv_d
, uint64_t, uint64_t, H8
, H8
, DO_SLL
, 0x3f)
1161 GEN_VEXT_SHIFT_VV(vsrl_vv_b
, uint8_t, uint8_t, H1
, H1
, DO_SRL
, 0x7)
1162 GEN_VEXT_SHIFT_VV(vsrl_vv_h
, uint16_t, uint16_t, H2
, H2
, DO_SRL
, 0xf)
1163 GEN_VEXT_SHIFT_VV(vsrl_vv_w
, uint32_t, uint32_t, H4
, H4
, DO_SRL
, 0x1f)
1164 GEN_VEXT_SHIFT_VV(vsrl_vv_d
, uint64_t, uint64_t, H8
, H8
, DO_SRL
, 0x3f)
1166 GEN_VEXT_SHIFT_VV(vsra_vv_b
, uint8_t, int8_t, H1
, H1
, DO_SRL
, 0x7)
1167 GEN_VEXT_SHIFT_VV(vsra_vv_h
, uint16_t, int16_t, H2
, H2
, DO_SRL
, 0xf)
1168 GEN_VEXT_SHIFT_VV(vsra_vv_w
, uint32_t, int32_t, H4
, H4
, DO_SRL
, 0x1f)
1169 GEN_VEXT_SHIFT_VV(vsra_vv_d
, uint64_t, int64_t, H8
, H8
, DO_SRL
, 0x3f)
1171 /* generate the helpers for shift instructions with one vector and one scalar */
1172 #define GEN_VEXT_SHIFT_VX(NAME, TD, TS2, HD, HS2, OP, MASK) \
1173 void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
1174 void *vs2, CPURISCVState *env, uint32_t desc) \
1176 uint32_t vm = vext_vm(desc); \
1177 uint32_t vl = env->vl; \
1180 for (i = env->vstart; i < vl; i++) { \
1181 if (!vm && !vext_elem_mask(v0, i)) { \
1184 TS2 s2 = *((TS2 *)vs2 + HS2(i)); \
1185 *((TD *)vd + HD(i)) = OP(s2, s1 & MASK); \
1190 GEN_VEXT_SHIFT_VX(vsll_vx_b
, uint8_t, int8_t, H1
, H1
, DO_SLL
, 0x7)
1191 GEN_VEXT_SHIFT_VX(vsll_vx_h
, uint16_t, int16_t, H2
, H2
, DO_SLL
, 0xf)
1192 GEN_VEXT_SHIFT_VX(vsll_vx_w
, uint32_t, int32_t, H4
, H4
, DO_SLL
, 0x1f)
1193 GEN_VEXT_SHIFT_VX(vsll_vx_d
, uint64_t, int64_t, H8
, H8
, DO_SLL
, 0x3f)
1195 GEN_VEXT_SHIFT_VX(vsrl_vx_b
, uint8_t, uint8_t, H1
, H1
, DO_SRL
, 0x7)
1196 GEN_VEXT_SHIFT_VX(vsrl_vx_h
, uint16_t, uint16_t, H2
, H2
, DO_SRL
, 0xf)
1197 GEN_VEXT_SHIFT_VX(vsrl_vx_w
, uint32_t, uint32_t, H4
, H4
, DO_SRL
, 0x1f)
1198 GEN_VEXT_SHIFT_VX(vsrl_vx_d
, uint64_t, uint64_t, H8
, H8
, DO_SRL
, 0x3f)
1200 GEN_VEXT_SHIFT_VX(vsra_vx_b
, int8_t, int8_t, H1
, H1
, DO_SRL
, 0x7)
1201 GEN_VEXT_SHIFT_VX(vsra_vx_h
, int16_t, int16_t, H2
, H2
, DO_SRL
, 0xf)
1202 GEN_VEXT_SHIFT_VX(vsra_vx_w
, int32_t, int32_t, H4
, H4
, DO_SRL
, 0x1f)
1203 GEN_VEXT_SHIFT_VX(vsra_vx_d
, int64_t, int64_t, H8
, H8
, DO_SRL
, 0x3f)
1205 /* Vector Narrowing Integer Right Shift Instructions */
1206 GEN_VEXT_SHIFT_VV(vnsrl_wv_b
, uint8_t, uint16_t, H1
, H2
, DO_SRL
, 0xf)
1207 GEN_VEXT_SHIFT_VV(vnsrl_wv_h
, uint16_t, uint32_t, H2
, H4
, DO_SRL
, 0x1f)
1208 GEN_VEXT_SHIFT_VV(vnsrl_wv_w
, uint32_t, uint64_t, H4
, H8
, DO_SRL
, 0x3f)
1209 GEN_VEXT_SHIFT_VV(vnsra_wv_b
, uint8_t, int16_t, H1
, H2
, DO_SRL
, 0xf)
1210 GEN_VEXT_SHIFT_VV(vnsra_wv_h
, uint16_t, int32_t, H2
, H4
, DO_SRL
, 0x1f)
1211 GEN_VEXT_SHIFT_VV(vnsra_wv_w
, uint32_t, int64_t, H4
, H8
, DO_SRL
, 0x3f)
1212 GEN_VEXT_SHIFT_VX(vnsrl_wx_b
, uint8_t, uint16_t, H1
, H2
, DO_SRL
, 0xf)
1213 GEN_VEXT_SHIFT_VX(vnsrl_wx_h
, uint16_t, uint32_t, H2
, H4
, DO_SRL
, 0x1f)
1214 GEN_VEXT_SHIFT_VX(vnsrl_wx_w
, uint32_t, uint64_t, H4
, H8
, DO_SRL
, 0x3f)
1215 GEN_VEXT_SHIFT_VX(vnsra_wx_b
, int8_t, int16_t, H1
, H2
, DO_SRL
, 0xf)
1216 GEN_VEXT_SHIFT_VX(vnsra_wx_h
, int16_t, int32_t, H2
, H4
, DO_SRL
, 0x1f)
1217 GEN_VEXT_SHIFT_VX(vnsra_wx_w
, int32_t, int64_t, H4
, H8
, DO_SRL
, 0x3f)
1219 /* Vector Integer Comparison Instructions */
1220 #define DO_MSEQ(N, M) (N == M)
1221 #define DO_MSNE(N, M) (N != M)
1222 #define DO_MSLT(N, M) (N < M)
1223 #define DO_MSLE(N, M) (N <= M)
1224 #define DO_MSGT(N, M) (N > M)
1226 #define GEN_VEXT_CMP_VV(NAME, ETYPE, H, DO_OP) \
1227 void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
1228 CPURISCVState *env, uint32_t desc) \
1230 uint32_t vm = vext_vm(desc); \
1231 uint32_t vl = env->vl; \
1234 for (i = env->vstart; i < vl; i++) { \
1235 ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
1236 ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
1237 if (!vm && !vext_elem_mask(v0, i)) { \
1240 vext_set_elem_mask(vd, i, DO_OP(s2, s1)); \
1245 GEN_VEXT_CMP_VV(vmseq_vv_b
, uint8_t, H1
, DO_MSEQ
)
1246 GEN_VEXT_CMP_VV(vmseq_vv_h
, uint16_t, H2
, DO_MSEQ
)
1247 GEN_VEXT_CMP_VV(vmseq_vv_w
, uint32_t, H4
, DO_MSEQ
)
1248 GEN_VEXT_CMP_VV(vmseq_vv_d
, uint64_t, H8
, DO_MSEQ
)
1250 GEN_VEXT_CMP_VV(vmsne_vv_b
, uint8_t, H1
, DO_MSNE
)
1251 GEN_VEXT_CMP_VV(vmsne_vv_h
, uint16_t, H2
, DO_MSNE
)
1252 GEN_VEXT_CMP_VV(vmsne_vv_w
, uint32_t, H4
, DO_MSNE
)
1253 GEN_VEXT_CMP_VV(vmsne_vv_d
, uint64_t, H8
, DO_MSNE
)
1255 GEN_VEXT_CMP_VV(vmsltu_vv_b
, uint8_t, H1
, DO_MSLT
)
1256 GEN_VEXT_CMP_VV(vmsltu_vv_h
, uint16_t, H2
, DO_MSLT
)
1257 GEN_VEXT_CMP_VV(vmsltu_vv_w
, uint32_t, H4
, DO_MSLT
)
1258 GEN_VEXT_CMP_VV(vmsltu_vv_d
, uint64_t, H8
, DO_MSLT
)
1260 GEN_VEXT_CMP_VV(vmslt_vv_b
, int8_t, H1
, DO_MSLT
)
1261 GEN_VEXT_CMP_VV(vmslt_vv_h
, int16_t, H2
, DO_MSLT
)
1262 GEN_VEXT_CMP_VV(vmslt_vv_w
, int32_t, H4
, DO_MSLT
)
1263 GEN_VEXT_CMP_VV(vmslt_vv_d
, int64_t, H8
, DO_MSLT
)
1265 GEN_VEXT_CMP_VV(vmsleu_vv_b
, uint8_t, H1
, DO_MSLE
)
1266 GEN_VEXT_CMP_VV(vmsleu_vv_h
, uint16_t, H2
, DO_MSLE
)
1267 GEN_VEXT_CMP_VV(vmsleu_vv_w
, uint32_t, H4
, DO_MSLE
)
1268 GEN_VEXT_CMP_VV(vmsleu_vv_d
, uint64_t, H8
, DO_MSLE
)
1270 GEN_VEXT_CMP_VV(vmsle_vv_b
, int8_t, H1
, DO_MSLE
)
1271 GEN_VEXT_CMP_VV(vmsle_vv_h
, int16_t, H2
, DO_MSLE
)
1272 GEN_VEXT_CMP_VV(vmsle_vv_w
, int32_t, H4
, DO_MSLE
)
1273 GEN_VEXT_CMP_VV(vmsle_vv_d
, int64_t, H8
, DO_MSLE
)
1275 #define GEN_VEXT_CMP_VX(NAME, ETYPE, H, DO_OP) \
1276 void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
1277 CPURISCVState *env, uint32_t desc) \
1279 uint32_t vm = vext_vm(desc); \
1280 uint32_t vl = env->vl; \
1283 for (i = env->vstart; i < vl; i++) { \
1284 ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
1285 if (!vm && !vext_elem_mask(v0, i)) { \
1288 vext_set_elem_mask(vd, i, \
1289 DO_OP(s2, (ETYPE)(target_long)s1)); \
1294 GEN_VEXT_CMP_VX(vmseq_vx_b
, uint8_t, H1
, DO_MSEQ
)
1295 GEN_VEXT_CMP_VX(vmseq_vx_h
, uint16_t, H2
, DO_MSEQ
)
1296 GEN_VEXT_CMP_VX(vmseq_vx_w
, uint32_t, H4
, DO_MSEQ
)
1297 GEN_VEXT_CMP_VX(vmseq_vx_d
, uint64_t, H8
, DO_MSEQ
)
1299 GEN_VEXT_CMP_VX(vmsne_vx_b
, uint8_t, H1
, DO_MSNE
)
1300 GEN_VEXT_CMP_VX(vmsne_vx_h
, uint16_t, H2
, DO_MSNE
)
1301 GEN_VEXT_CMP_VX(vmsne_vx_w
, uint32_t, H4
, DO_MSNE
)
1302 GEN_VEXT_CMP_VX(vmsne_vx_d
, uint64_t, H8
, DO_MSNE
)
1304 GEN_VEXT_CMP_VX(vmsltu_vx_b
, uint8_t, H1
, DO_MSLT
)
1305 GEN_VEXT_CMP_VX(vmsltu_vx_h
, uint16_t, H2
, DO_MSLT
)
1306 GEN_VEXT_CMP_VX(vmsltu_vx_w
, uint32_t, H4
, DO_MSLT
)
1307 GEN_VEXT_CMP_VX(vmsltu_vx_d
, uint64_t, H8
, DO_MSLT
)
1309 GEN_VEXT_CMP_VX(vmslt_vx_b
, int8_t, H1
, DO_MSLT
)
1310 GEN_VEXT_CMP_VX(vmslt_vx_h
, int16_t, H2
, DO_MSLT
)
1311 GEN_VEXT_CMP_VX(vmslt_vx_w
, int32_t, H4
, DO_MSLT
)
1312 GEN_VEXT_CMP_VX(vmslt_vx_d
, int64_t, H8
, DO_MSLT
)
1314 GEN_VEXT_CMP_VX(vmsleu_vx_b
, uint8_t, H1
, DO_MSLE
)
1315 GEN_VEXT_CMP_VX(vmsleu_vx_h
, uint16_t, H2
, DO_MSLE
)
1316 GEN_VEXT_CMP_VX(vmsleu_vx_w
, uint32_t, H4
, DO_MSLE
)
1317 GEN_VEXT_CMP_VX(vmsleu_vx_d
, uint64_t, H8
, DO_MSLE
)
1319 GEN_VEXT_CMP_VX(vmsle_vx_b
, int8_t, H1
, DO_MSLE
)
1320 GEN_VEXT_CMP_VX(vmsle_vx_h
, int16_t, H2
, DO_MSLE
)
1321 GEN_VEXT_CMP_VX(vmsle_vx_w
, int32_t, H4
, DO_MSLE
)
1322 GEN_VEXT_CMP_VX(vmsle_vx_d
, int64_t, H8
, DO_MSLE
)
1324 GEN_VEXT_CMP_VX(vmsgtu_vx_b
, uint8_t, H1
, DO_MSGT
)
1325 GEN_VEXT_CMP_VX(vmsgtu_vx_h
, uint16_t, H2
, DO_MSGT
)
1326 GEN_VEXT_CMP_VX(vmsgtu_vx_w
, uint32_t, H4
, DO_MSGT
)
1327 GEN_VEXT_CMP_VX(vmsgtu_vx_d
, uint64_t, H8
, DO_MSGT
)
1329 GEN_VEXT_CMP_VX(vmsgt_vx_b
, int8_t, H1
, DO_MSGT
)
1330 GEN_VEXT_CMP_VX(vmsgt_vx_h
, int16_t, H2
, DO_MSGT
)
1331 GEN_VEXT_CMP_VX(vmsgt_vx_w
, int32_t, H4
, DO_MSGT
)
1332 GEN_VEXT_CMP_VX(vmsgt_vx_d
, int64_t, H8
, DO_MSGT
)
1334 /* Vector Integer Min/Max Instructions */
1335 RVVCALL(OPIVV2
, vminu_vv_b
, OP_UUU_B
, H1
, H1
, H1
, DO_MIN
)
1336 RVVCALL(OPIVV2
, vminu_vv_h
, OP_UUU_H
, H2
, H2
, H2
, DO_MIN
)
1337 RVVCALL(OPIVV2
, vminu_vv_w
, OP_UUU_W
, H4
, H4
, H4
, DO_MIN
)
1338 RVVCALL(OPIVV2
, vminu_vv_d
, OP_UUU_D
, H8
, H8
, H8
, DO_MIN
)
1339 RVVCALL(OPIVV2
, vmin_vv_b
, OP_SSS_B
, H1
, H1
, H1
, DO_MIN
)
1340 RVVCALL(OPIVV2
, vmin_vv_h
, OP_SSS_H
, H2
, H2
, H2
, DO_MIN
)
1341 RVVCALL(OPIVV2
, vmin_vv_w
, OP_SSS_W
, H4
, H4
, H4
, DO_MIN
)
1342 RVVCALL(OPIVV2
, vmin_vv_d
, OP_SSS_D
, H8
, H8
, H8
, DO_MIN
)
1343 RVVCALL(OPIVV2
, vmaxu_vv_b
, OP_UUU_B
, H1
, H1
, H1
, DO_MAX
)
1344 RVVCALL(OPIVV2
, vmaxu_vv_h
, OP_UUU_H
, H2
, H2
, H2
, DO_MAX
)
1345 RVVCALL(OPIVV2
, vmaxu_vv_w
, OP_UUU_W
, H4
, H4
, H4
, DO_MAX
)
1346 RVVCALL(OPIVV2
, vmaxu_vv_d
, OP_UUU_D
, H8
, H8
, H8
, DO_MAX
)
1347 RVVCALL(OPIVV2
, vmax_vv_b
, OP_SSS_B
, H1
, H1
, H1
, DO_MAX
)
1348 RVVCALL(OPIVV2
, vmax_vv_h
, OP_SSS_H
, H2
, H2
, H2
, DO_MAX
)
1349 RVVCALL(OPIVV2
, vmax_vv_w
, OP_SSS_W
, H4
, H4
, H4
, DO_MAX
)
1350 RVVCALL(OPIVV2
, vmax_vv_d
, OP_SSS_D
, H8
, H8
, H8
, DO_MAX
)
1351 GEN_VEXT_VV(vminu_vv_b
, 1, 1)
1352 GEN_VEXT_VV(vminu_vv_h
, 2, 2)
1353 GEN_VEXT_VV(vminu_vv_w
, 4, 4)
1354 GEN_VEXT_VV(vminu_vv_d
, 8, 8)
1355 GEN_VEXT_VV(vmin_vv_b
, 1, 1)
1356 GEN_VEXT_VV(vmin_vv_h
, 2, 2)
1357 GEN_VEXT_VV(vmin_vv_w
, 4, 4)
1358 GEN_VEXT_VV(vmin_vv_d
, 8, 8)
1359 GEN_VEXT_VV(vmaxu_vv_b
, 1, 1)
1360 GEN_VEXT_VV(vmaxu_vv_h
, 2, 2)
1361 GEN_VEXT_VV(vmaxu_vv_w
, 4, 4)
1362 GEN_VEXT_VV(vmaxu_vv_d
, 8, 8)
1363 GEN_VEXT_VV(vmax_vv_b
, 1, 1)
1364 GEN_VEXT_VV(vmax_vv_h
, 2, 2)
1365 GEN_VEXT_VV(vmax_vv_w
, 4, 4)
1366 GEN_VEXT_VV(vmax_vv_d
, 8, 8)
1368 RVVCALL(OPIVX2
, vminu_vx_b
, OP_UUU_B
, H1
, H1
, DO_MIN
)
1369 RVVCALL(OPIVX2
, vminu_vx_h
, OP_UUU_H
, H2
, H2
, DO_MIN
)
1370 RVVCALL(OPIVX2
, vminu_vx_w
, OP_UUU_W
, H4
, H4
, DO_MIN
)
1371 RVVCALL(OPIVX2
, vminu_vx_d
, OP_UUU_D
, H8
, H8
, DO_MIN
)
1372 RVVCALL(OPIVX2
, vmin_vx_b
, OP_SSS_B
, H1
, H1
, DO_MIN
)
1373 RVVCALL(OPIVX2
, vmin_vx_h
, OP_SSS_H
, H2
, H2
, DO_MIN
)
1374 RVVCALL(OPIVX2
, vmin_vx_w
, OP_SSS_W
, H4
, H4
, DO_MIN
)
1375 RVVCALL(OPIVX2
, vmin_vx_d
, OP_SSS_D
, H8
, H8
, DO_MIN
)
1376 RVVCALL(OPIVX2
, vmaxu_vx_b
, OP_UUU_B
, H1
, H1
, DO_MAX
)
1377 RVVCALL(OPIVX2
, vmaxu_vx_h
, OP_UUU_H
, H2
, H2
, DO_MAX
)
1378 RVVCALL(OPIVX2
, vmaxu_vx_w
, OP_UUU_W
, H4
, H4
, DO_MAX
)
1379 RVVCALL(OPIVX2
, vmaxu_vx_d
, OP_UUU_D
, H8
, H8
, DO_MAX
)
1380 RVVCALL(OPIVX2
, vmax_vx_b
, OP_SSS_B
, H1
, H1
, DO_MAX
)
1381 RVVCALL(OPIVX2
, vmax_vx_h
, OP_SSS_H
, H2
, H2
, DO_MAX
)
1382 RVVCALL(OPIVX2
, vmax_vx_w
, OP_SSS_W
, H4
, H4
, DO_MAX
)
1383 RVVCALL(OPIVX2
, vmax_vx_d
, OP_SSS_D
, H8
, H8
, DO_MAX
)
1384 GEN_VEXT_VX(vminu_vx_b
, 1, 1)
1385 GEN_VEXT_VX(vminu_vx_h
, 2, 2)
1386 GEN_VEXT_VX(vminu_vx_w
, 4, 4)
1387 GEN_VEXT_VX(vminu_vx_d
, 8, 8)
1388 GEN_VEXT_VX(vmin_vx_b
, 1, 1)
1389 GEN_VEXT_VX(vmin_vx_h
, 2, 2)
1390 GEN_VEXT_VX(vmin_vx_w
, 4, 4)
1391 GEN_VEXT_VX(vmin_vx_d
, 8, 8)
1392 GEN_VEXT_VX(vmaxu_vx_b
, 1, 1)
1393 GEN_VEXT_VX(vmaxu_vx_h
, 2, 2)
1394 GEN_VEXT_VX(vmaxu_vx_w
, 4, 4)
1395 GEN_VEXT_VX(vmaxu_vx_d
, 8, 8)
1396 GEN_VEXT_VX(vmax_vx_b
, 1, 1)
1397 GEN_VEXT_VX(vmax_vx_h
, 2, 2)
1398 GEN_VEXT_VX(vmax_vx_w
, 4, 4)
1399 GEN_VEXT_VX(vmax_vx_d
, 8, 8)
1401 /* Vector Single-Width Integer Multiply Instructions */
1402 #define DO_MUL(N, M) (N * M)
1403 RVVCALL(OPIVV2
, vmul_vv_b
, OP_SSS_B
, H1
, H1
, H1
, DO_MUL
)
1404 RVVCALL(OPIVV2
, vmul_vv_h
, OP_SSS_H
, H2
, H2
, H2
, DO_MUL
)
1405 RVVCALL(OPIVV2
, vmul_vv_w
, OP_SSS_W
, H4
, H4
, H4
, DO_MUL
)
1406 RVVCALL(OPIVV2
, vmul_vv_d
, OP_SSS_D
, H8
, H8
, H8
, DO_MUL
)
1407 GEN_VEXT_VV(vmul_vv_b
, 1, 1)
1408 GEN_VEXT_VV(vmul_vv_h
, 2, 2)
1409 GEN_VEXT_VV(vmul_vv_w
, 4, 4)
1410 GEN_VEXT_VV(vmul_vv_d
, 8, 8)
1412 static int8_t do_mulh_b(int8_t s2
, int8_t s1
)
1414 return (int16_t)s2
* (int16_t)s1
>> 8;
1417 static int16_t do_mulh_h(int16_t s2
, int16_t s1
)
1419 return (int32_t)s2
* (int32_t)s1
>> 16;
1422 static int32_t do_mulh_w(int32_t s2
, int32_t s1
)
1424 return (int64_t)s2
* (int64_t)s1
>> 32;
1427 static int64_t do_mulh_d(int64_t s2
, int64_t s1
)
1429 uint64_t hi_64
, lo_64
;
1431 muls64(&lo_64
, &hi_64
, s1
, s2
);
1435 static uint8_t do_mulhu_b(uint8_t s2
, uint8_t s1
)
1437 return (uint16_t)s2
* (uint16_t)s1
>> 8;
1440 static uint16_t do_mulhu_h(uint16_t s2
, uint16_t s1
)
1442 return (uint32_t)s2
* (uint32_t)s1
>> 16;
1445 static uint32_t do_mulhu_w(uint32_t s2
, uint32_t s1
)
1447 return (uint64_t)s2
* (uint64_t)s1
>> 32;
1450 static uint64_t do_mulhu_d(uint64_t s2
, uint64_t s1
)
1452 uint64_t hi_64
, lo_64
;
1454 mulu64(&lo_64
, &hi_64
, s2
, s1
);
1458 static int8_t do_mulhsu_b(int8_t s2
, uint8_t s1
)
1460 return (int16_t)s2
* (uint16_t)s1
>> 8;
1463 static int16_t do_mulhsu_h(int16_t s2
, uint16_t s1
)
1465 return (int32_t)s2
* (uint32_t)s1
>> 16;
1468 static int32_t do_mulhsu_w(int32_t s2
, uint32_t s1
)
1470 return (int64_t)s2
* (uint64_t)s1
>> 32;
1474 * Let A = signed operand,
1475 * B = unsigned operand
1476 * P = mulu64(A, B), unsigned product
1478 * LET X = 2 ** 64 - A, 2's complement of A
1479 * SP = signed product
1483 * = -(2 ** 64 - A) * B
1484 * = A * B - 2 ** 64 * B
1489 * HI_P -= (A < 0 ? B : 0)
1492 static int64_t do_mulhsu_d(int64_t s2
, uint64_t s1
)
1494 uint64_t hi_64
, lo_64
;
1496 mulu64(&lo_64
, &hi_64
, s2
, s1
);
1498 hi_64
-= s2
< 0 ? s1
: 0;
1502 RVVCALL(OPIVV2
, vmulh_vv_b
, OP_SSS_B
, H1
, H1
, H1
, do_mulh_b
)
1503 RVVCALL(OPIVV2
, vmulh_vv_h
, OP_SSS_H
, H2
, H2
, H2
, do_mulh_h
)
1504 RVVCALL(OPIVV2
, vmulh_vv_w
, OP_SSS_W
, H4
, H4
, H4
, do_mulh_w
)
1505 RVVCALL(OPIVV2
, vmulh_vv_d
, OP_SSS_D
, H8
, H8
, H8
, do_mulh_d
)
1506 RVVCALL(OPIVV2
, vmulhu_vv_b
, OP_UUU_B
, H1
, H1
, H1
, do_mulhu_b
)
1507 RVVCALL(OPIVV2
, vmulhu_vv_h
, OP_UUU_H
, H2
, H2
, H2
, do_mulhu_h
)
1508 RVVCALL(OPIVV2
, vmulhu_vv_w
, OP_UUU_W
, H4
, H4
, H4
, do_mulhu_w
)
1509 RVVCALL(OPIVV2
, vmulhu_vv_d
, OP_UUU_D
, H8
, H8
, H8
, do_mulhu_d
)
1510 RVVCALL(OPIVV2
, vmulhsu_vv_b
, OP_SUS_B
, H1
, H1
, H1
, do_mulhsu_b
)
1511 RVVCALL(OPIVV2
, vmulhsu_vv_h
, OP_SUS_H
, H2
, H2
, H2
, do_mulhsu_h
)
1512 RVVCALL(OPIVV2
, vmulhsu_vv_w
, OP_SUS_W
, H4
, H4
, H4
, do_mulhsu_w
)
1513 RVVCALL(OPIVV2
, vmulhsu_vv_d
, OP_SUS_D
, H8
, H8
, H8
, do_mulhsu_d
)
1514 GEN_VEXT_VV(vmulh_vv_b
, 1, 1)
1515 GEN_VEXT_VV(vmulh_vv_h
, 2, 2)
1516 GEN_VEXT_VV(vmulh_vv_w
, 4, 4)
1517 GEN_VEXT_VV(vmulh_vv_d
, 8, 8)
1518 GEN_VEXT_VV(vmulhu_vv_b
, 1, 1)
1519 GEN_VEXT_VV(vmulhu_vv_h
, 2, 2)
1520 GEN_VEXT_VV(vmulhu_vv_w
, 4, 4)
1521 GEN_VEXT_VV(vmulhu_vv_d
, 8, 8)
1522 GEN_VEXT_VV(vmulhsu_vv_b
, 1, 1)
1523 GEN_VEXT_VV(vmulhsu_vv_h
, 2, 2)
1524 GEN_VEXT_VV(vmulhsu_vv_w
, 4, 4)
1525 GEN_VEXT_VV(vmulhsu_vv_d
, 8, 8)
1527 RVVCALL(OPIVX2
, vmul_vx_b
, OP_SSS_B
, H1
, H1
, DO_MUL
)
1528 RVVCALL(OPIVX2
, vmul_vx_h
, OP_SSS_H
, H2
, H2
, DO_MUL
)
1529 RVVCALL(OPIVX2
, vmul_vx_w
, OP_SSS_W
, H4
, H4
, DO_MUL
)
1530 RVVCALL(OPIVX2
, vmul_vx_d
, OP_SSS_D
, H8
, H8
, DO_MUL
)
1531 RVVCALL(OPIVX2
, vmulh_vx_b
, OP_SSS_B
, H1
, H1
, do_mulh_b
)
1532 RVVCALL(OPIVX2
, vmulh_vx_h
, OP_SSS_H
, H2
, H2
, do_mulh_h
)
1533 RVVCALL(OPIVX2
, vmulh_vx_w
, OP_SSS_W
, H4
, H4
, do_mulh_w
)
1534 RVVCALL(OPIVX2
, vmulh_vx_d
, OP_SSS_D
, H8
, H8
, do_mulh_d
)
1535 RVVCALL(OPIVX2
, vmulhu_vx_b
, OP_UUU_B
, H1
, H1
, do_mulhu_b
)
1536 RVVCALL(OPIVX2
, vmulhu_vx_h
, OP_UUU_H
, H2
, H2
, do_mulhu_h
)
1537 RVVCALL(OPIVX2
, vmulhu_vx_w
, OP_UUU_W
, H4
, H4
, do_mulhu_w
)
1538 RVVCALL(OPIVX2
, vmulhu_vx_d
, OP_UUU_D
, H8
, H8
, do_mulhu_d
)
1539 RVVCALL(OPIVX2
, vmulhsu_vx_b
, OP_SUS_B
, H1
, H1
, do_mulhsu_b
)
1540 RVVCALL(OPIVX2
, vmulhsu_vx_h
, OP_SUS_H
, H2
, H2
, do_mulhsu_h
)
1541 RVVCALL(OPIVX2
, vmulhsu_vx_w
, OP_SUS_W
, H4
, H4
, do_mulhsu_w
)
1542 RVVCALL(OPIVX2
, vmulhsu_vx_d
, OP_SUS_D
, H8
, H8
, do_mulhsu_d
)
1543 GEN_VEXT_VX(vmul_vx_b
, 1, 1)
1544 GEN_VEXT_VX(vmul_vx_h
, 2, 2)
1545 GEN_VEXT_VX(vmul_vx_w
, 4, 4)
1546 GEN_VEXT_VX(vmul_vx_d
, 8, 8)
1547 GEN_VEXT_VX(vmulh_vx_b
, 1, 1)
1548 GEN_VEXT_VX(vmulh_vx_h
, 2, 2)
1549 GEN_VEXT_VX(vmulh_vx_w
, 4, 4)
1550 GEN_VEXT_VX(vmulh_vx_d
, 8, 8)
1551 GEN_VEXT_VX(vmulhu_vx_b
, 1, 1)
1552 GEN_VEXT_VX(vmulhu_vx_h
, 2, 2)
1553 GEN_VEXT_VX(vmulhu_vx_w
, 4, 4)
1554 GEN_VEXT_VX(vmulhu_vx_d
, 8, 8)
1555 GEN_VEXT_VX(vmulhsu_vx_b
, 1, 1)
1556 GEN_VEXT_VX(vmulhsu_vx_h
, 2, 2)
1557 GEN_VEXT_VX(vmulhsu_vx_w
, 4, 4)
1558 GEN_VEXT_VX(vmulhsu_vx_d
, 8, 8)
1560 /* Vector Integer Divide Instructions */
1561 #define DO_DIVU(N, M) (unlikely(M == 0) ? (__typeof(N))(-1) : N / M)
1562 #define DO_REMU(N, M) (unlikely(M == 0) ? N : N % M)
1563 #define DO_DIV(N, M) (unlikely(M == 0) ? (__typeof(N))(-1) :\
1564 unlikely((N == -N) && (M == (__typeof(N))(-1))) ? N : N / M)
1565 #define DO_REM(N, M) (unlikely(M == 0) ? N :\
1566 unlikely((N == -N) && (M == (__typeof(N))(-1))) ? 0 : N % M)
1568 RVVCALL(OPIVV2
, vdivu_vv_b
, OP_UUU_B
, H1
, H1
, H1
, DO_DIVU
)
1569 RVVCALL(OPIVV2
, vdivu_vv_h
, OP_UUU_H
, H2
, H2
, H2
, DO_DIVU
)
1570 RVVCALL(OPIVV2
, vdivu_vv_w
, OP_UUU_W
, H4
, H4
, H4
, DO_DIVU
)
1571 RVVCALL(OPIVV2
, vdivu_vv_d
, OP_UUU_D
, H8
, H8
, H8
, DO_DIVU
)
1572 RVVCALL(OPIVV2
, vdiv_vv_b
, OP_SSS_B
, H1
, H1
, H1
, DO_DIV
)
1573 RVVCALL(OPIVV2
, vdiv_vv_h
, OP_SSS_H
, H2
, H2
, H2
, DO_DIV
)
1574 RVVCALL(OPIVV2
, vdiv_vv_w
, OP_SSS_W
, H4
, H4
, H4
, DO_DIV
)
1575 RVVCALL(OPIVV2
, vdiv_vv_d
, OP_SSS_D
, H8
, H8
, H8
, DO_DIV
)
1576 RVVCALL(OPIVV2
, vremu_vv_b
, OP_UUU_B
, H1
, H1
, H1
, DO_REMU
)
1577 RVVCALL(OPIVV2
, vremu_vv_h
, OP_UUU_H
, H2
, H2
, H2
, DO_REMU
)
1578 RVVCALL(OPIVV2
, vremu_vv_w
, OP_UUU_W
, H4
, H4
, H4
, DO_REMU
)
1579 RVVCALL(OPIVV2
, vremu_vv_d
, OP_UUU_D
, H8
, H8
, H8
, DO_REMU
)
1580 RVVCALL(OPIVV2
, vrem_vv_b
, OP_SSS_B
, H1
, H1
, H1
, DO_REM
)
1581 RVVCALL(OPIVV2
, vrem_vv_h
, OP_SSS_H
, H2
, H2
, H2
, DO_REM
)
1582 RVVCALL(OPIVV2
, vrem_vv_w
, OP_SSS_W
, H4
, H4
, H4
, DO_REM
)
1583 RVVCALL(OPIVV2
, vrem_vv_d
, OP_SSS_D
, H8
, H8
, H8
, DO_REM
)
1584 GEN_VEXT_VV(vdivu_vv_b
, 1, 1)
1585 GEN_VEXT_VV(vdivu_vv_h
, 2, 2)
1586 GEN_VEXT_VV(vdivu_vv_w
, 4, 4)
1587 GEN_VEXT_VV(vdivu_vv_d
, 8, 8)
1588 GEN_VEXT_VV(vdiv_vv_b
, 1, 1)
1589 GEN_VEXT_VV(vdiv_vv_h
, 2, 2)
1590 GEN_VEXT_VV(vdiv_vv_w
, 4, 4)
1591 GEN_VEXT_VV(vdiv_vv_d
, 8, 8)
1592 GEN_VEXT_VV(vremu_vv_b
, 1, 1)
1593 GEN_VEXT_VV(vremu_vv_h
, 2, 2)
1594 GEN_VEXT_VV(vremu_vv_w
, 4, 4)
1595 GEN_VEXT_VV(vremu_vv_d
, 8, 8)
1596 GEN_VEXT_VV(vrem_vv_b
, 1, 1)
1597 GEN_VEXT_VV(vrem_vv_h
, 2, 2)
1598 GEN_VEXT_VV(vrem_vv_w
, 4, 4)
1599 GEN_VEXT_VV(vrem_vv_d
, 8, 8)
1601 RVVCALL(OPIVX2
, vdivu_vx_b
, OP_UUU_B
, H1
, H1
, DO_DIVU
)
1602 RVVCALL(OPIVX2
, vdivu_vx_h
, OP_UUU_H
, H2
, H2
, DO_DIVU
)
1603 RVVCALL(OPIVX2
, vdivu_vx_w
, OP_UUU_W
, H4
, H4
, DO_DIVU
)
1604 RVVCALL(OPIVX2
, vdivu_vx_d
, OP_UUU_D
, H8
, H8
, DO_DIVU
)
1605 RVVCALL(OPIVX2
, vdiv_vx_b
, OP_SSS_B
, H1
, H1
, DO_DIV
)
1606 RVVCALL(OPIVX2
, vdiv_vx_h
, OP_SSS_H
, H2
, H2
, DO_DIV
)
1607 RVVCALL(OPIVX2
, vdiv_vx_w
, OP_SSS_W
, H4
, H4
, DO_DIV
)
1608 RVVCALL(OPIVX2
, vdiv_vx_d
, OP_SSS_D
, H8
, H8
, DO_DIV
)
1609 RVVCALL(OPIVX2
, vremu_vx_b
, OP_UUU_B
, H1
, H1
, DO_REMU
)
1610 RVVCALL(OPIVX2
, vremu_vx_h
, OP_UUU_H
, H2
, H2
, DO_REMU
)
1611 RVVCALL(OPIVX2
, vremu_vx_w
, OP_UUU_W
, H4
, H4
, DO_REMU
)
1612 RVVCALL(OPIVX2
, vremu_vx_d
, OP_UUU_D
, H8
, H8
, DO_REMU
)
1613 RVVCALL(OPIVX2
, vrem_vx_b
, OP_SSS_B
, H1
, H1
, DO_REM
)
1614 RVVCALL(OPIVX2
, vrem_vx_h
, OP_SSS_H
, H2
, H2
, DO_REM
)
1615 RVVCALL(OPIVX2
, vrem_vx_w
, OP_SSS_W
, H4
, H4
, DO_REM
)
1616 RVVCALL(OPIVX2
, vrem_vx_d
, OP_SSS_D
, H8
, H8
, DO_REM
)
1617 GEN_VEXT_VX(vdivu_vx_b
, 1, 1)
1618 GEN_VEXT_VX(vdivu_vx_h
, 2, 2)
1619 GEN_VEXT_VX(vdivu_vx_w
, 4, 4)
1620 GEN_VEXT_VX(vdivu_vx_d
, 8, 8)
1621 GEN_VEXT_VX(vdiv_vx_b
, 1, 1)
1622 GEN_VEXT_VX(vdiv_vx_h
, 2, 2)
1623 GEN_VEXT_VX(vdiv_vx_w
, 4, 4)
1624 GEN_VEXT_VX(vdiv_vx_d
, 8, 8)
1625 GEN_VEXT_VX(vremu_vx_b
, 1, 1)
1626 GEN_VEXT_VX(vremu_vx_h
, 2, 2)
1627 GEN_VEXT_VX(vremu_vx_w
, 4, 4)
1628 GEN_VEXT_VX(vremu_vx_d
, 8, 8)
1629 GEN_VEXT_VX(vrem_vx_b
, 1, 1)
1630 GEN_VEXT_VX(vrem_vx_h
, 2, 2)
1631 GEN_VEXT_VX(vrem_vx_w
, 4, 4)
1632 GEN_VEXT_VX(vrem_vx_d
, 8, 8)
1634 /* Vector Widening Integer Multiply Instructions */
1635 RVVCALL(OPIVV2
, vwmul_vv_b
, WOP_SSS_B
, H2
, H1
, H1
, DO_MUL
)
1636 RVVCALL(OPIVV2
, vwmul_vv_h
, WOP_SSS_H
, H4
, H2
, H2
, DO_MUL
)
1637 RVVCALL(OPIVV2
, vwmul_vv_w
, WOP_SSS_W
, H8
, H4
, H4
, DO_MUL
)
1638 RVVCALL(OPIVV2
, vwmulu_vv_b
, WOP_UUU_B
, H2
, H1
, H1
, DO_MUL
)
1639 RVVCALL(OPIVV2
, vwmulu_vv_h
, WOP_UUU_H
, H4
, H2
, H2
, DO_MUL
)
1640 RVVCALL(OPIVV2
, vwmulu_vv_w
, WOP_UUU_W
, H8
, H4
, H4
, DO_MUL
)
1641 RVVCALL(OPIVV2
, vwmulsu_vv_b
, WOP_SUS_B
, H2
, H1
, H1
, DO_MUL
)
1642 RVVCALL(OPIVV2
, vwmulsu_vv_h
, WOP_SUS_H
, H4
, H2
, H2
, DO_MUL
)
1643 RVVCALL(OPIVV2
, vwmulsu_vv_w
, WOP_SUS_W
, H8
, H4
, H4
, DO_MUL
)
1644 GEN_VEXT_VV(vwmul_vv_b
, 1, 2)
1645 GEN_VEXT_VV(vwmul_vv_h
, 2, 4)
1646 GEN_VEXT_VV(vwmul_vv_w
, 4, 8)
1647 GEN_VEXT_VV(vwmulu_vv_b
, 1, 2)
1648 GEN_VEXT_VV(vwmulu_vv_h
, 2, 4)
1649 GEN_VEXT_VV(vwmulu_vv_w
, 4, 8)
1650 GEN_VEXT_VV(vwmulsu_vv_b
, 1, 2)
1651 GEN_VEXT_VV(vwmulsu_vv_h
, 2, 4)
1652 GEN_VEXT_VV(vwmulsu_vv_w
, 4, 8)
1654 RVVCALL(OPIVX2
, vwmul_vx_b
, WOP_SSS_B
, H2
, H1
, DO_MUL
)
1655 RVVCALL(OPIVX2
, vwmul_vx_h
, WOP_SSS_H
, H4
, H2
, DO_MUL
)
1656 RVVCALL(OPIVX2
, vwmul_vx_w
, WOP_SSS_W
, H8
, H4
, DO_MUL
)
1657 RVVCALL(OPIVX2
, vwmulu_vx_b
, WOP_UUU_B
, H2
, H1
, DO_MUL
)
1658 RVVCALL(OPIVX2
, vwmulu_vx_h
, WOP_UUU_H
, H4
, H2
, DO_MUL
)
1659 RVVCALL(OPIVX2
, vwmulu_vx_w
, WOP_UUU_W
, H8
, H4
, DO_MUL
)
1660 RVVCALL(OPIVX2
, vwmulsu_vx_b
, WOP_SUS_B
, H2
, H1
, DO_MUL
)
1661 RVVCALL(OPIVX2
, vwmulsu_vx_h
, WOP_SUS_H
, H4
, H2
, DO_MUL
)
1662 RVVCALL(OPIVX2
, vwmulsu_vx_w
, WOP_SUS_W
, H8
, H4
, DO_MUL
)
1663 GEN_VEXT_VX(vwmul_vx_b
, 1, 2)
1664 GEN_VEXT_VX(vwmul_vx_h
, 2, 4)
1665 GEN_VEXT_VX(vwmul_vx_w
, 4, 8)
1666 GEN_VEXT_VX(vwmulu_vx_b
, 1, 2)
1667 GEN_VEXT_VX(vwmulu_vx_h
, 2, 4)
1668 GEN_VEXT_VX(vwmulu_vx_w
, 4, 8)
1669 GEN_VEXT_VX(vwmulsu_vx_b
, 1, 2)
1670 GEN_VEXT_VX(vwmulsu_vx_h
, 2, 4)
1671 GEN_VEXT_VX(vwmulsu_vx_w
, 4, 8)
1673 /* Vector Single-Width Integer Multiply-Add Instructions */
1674 #define OPIVV3(NAME, TD, T1, T2, TX1, TX2, HD, HS1, HS2, OP) \
1675 static void do_##NAME(void *vd, void *vs1, void *vs2, int i) \
1677 TX1 s1 = *((T1 *)vs1 + HS1(i)); \
1678 TX2 s2 = *((T2 *)vs2 + HS2(i)); \
1679 TD d = *((TD *)vd + HD(i)); \
1680 *((TD *)vd + HD(i)) = OP(s2, s1, d); \
1683 #define DO_MACC(N, M, D) (M * N + D)
1684 #define DO_NMSAC(N, M, D) (-(M * N) + D)
1685 #define DO_MADD(N, M, D) (M * D + N)
1686 #define DO_NMSUB(N, M, D) (-(M * D) + N)
1687 RVVCALL(OPIVV3
, vmacc_vv_b
, OP_SSS_B
, H1
, H1
, H1
, DO_MACC
)
1688 RVVCALL(OPIVV3
, vmacc_vv_h
, OP_SSS_H
, H2
, H2
, H2
, DO_MACC
)
1689 RVVCALL(OPIVV3
, vmacc_vv_w
, OP_SSS_W
, H4
, H4
, H4
, DO_MACC
)
1690 RVVCALL(OPIVV3
, vmacc_vv_d
, OP_SSS_D
, H8
, H8
, H8
, DO_MACC
)
1691 RVVCALL(OPIVV3
, vnmsac_vv_b
, OP_SSS_B
, H1
, H1
, H1
, DO_NMSAC
)
1692 RVVCALL(OPIVV3
, vnmsac_vv_h
, OP_SSS_H
, H2
, H2
, H2
, DO_NMSAC
)
1693 RVVCALL(OPIVV3
, vnmsac_vv_w
, OP_SSS_W
, H4
, H4
, H4
, DO_NMSAC
)
1694 RVVCALL(OPIVV3
, vnmsac_vv_d
, OP_SSS_D
, H8
, H8
, H8
, DO_NMSAC
)
1695 RVVCALL(OPIVV3
, vmadd_vv_b
, OP_SSS_B
, H1
, H1
, H1
, DO_MADD
)
1696 RVVCALL(OPIVV3
, vmadd_vv_h
, OP_SSS_H
, H2
, H2
, H2
, DO_MADD
)
1697 RVVCALL(OPIVV3
, vmadd_vv_w
, OP_SSS_W
, H4
, H4
, H4
, DO_MADD
)
1698 RVVCALL(OPIVV3
, vmadd_vv_d
, OP_SSS_D
, H8
, H8
, H8
, DO_MADD
)
1699 RVVCALL(OPIVV3
, vnmsub_vv_b
, OP_SSS_B
, H1
, H1
, H1
, DO_NMSUB
)
1700 RVVCALL(OPIVV3
, vnmsub_vv_h
, OP_SSS_H
, H2
, H2
, H2
, DO_NMSUB
)
1701 RVVCALL(OPIVV3
, vnmsub_vv_w
, OP_SSS_W
, H4
, H4
, H4
, DO_NMSUB
)
1702 RVVCALL(OPIVV3
, vnmsub_vv_d
, OP_SSS_D
, H8
, H8
, H8
, DO_NMSUB
)
1703 GEN_VEXT_VV(vmacc_vv_b
, 1, 1)
1704 GEN_VEXT_VV(vmacc_vv_h
, 2, 2)
1705 GEN_VEXT_VV(vmacc_vv_w
, 4, 4)
1706 GEN_VEXT_VV(vmacc_vv_d
, 8, 8)
1707 GEN_VEXT_VV(vnmsac_vv_b
, 1, 1)
1708 GEN_VEXT_VV(vnmsac_vv_h
, 2, 2)
1709 GEN_VEXT_VV(vnmsac_vv_w
, 4, 4)
1710 GEN_VEXT_VV(vnmsac_vv_d
, 8, 8)
1711 GEN_VEXT_VV(vmadd_vv_b
, 1, 1)
1712 GEN_VEXT_VV(vmadd_vv_h
, 2, 2)
1713 GEN_VEXT_VV(vmadd_vv_w
, 4, 4)
1714 GEN_VEXT_VV(vmadd_vv_d
, 8, 8)
1715 GEN_VEXT_VV(vnmsub_vv_b
, 1, 1)
1716 GEN_VEXT_VV(vnmsub_vv_h
, 2, 2)
1717 GEN_VEXT_VV(vnmsub_vv_w
, 4, 4)
1718 GEN_VEXT_VV(vnmsub_vv_d
, 8, 8)
1720 #define OPIVX3(NAME, TD, T1, T2, TX1, TX2, HD, HS2, OP) \
1721 static void do_##NAME(void *vd, target_long s1, void *vs2, int i) \
1723 TX2 s2 = *((T2 *)vs2 + HS2(i)); \
1724 TD d = *((TD *)vd + HD(i)); \
1725 *((TD *)vd + HD(i)) = OP(s2, (TX1)(T1)s1, d); \
1728 RVVCALL(OPIVX3
, vmacc_vx_b
, OP_SSS_B
, H1
, H1
, DO_MACC
)
1729 RVVCALL(OPIVX3
, vmacc_vx_h
, OP_SSS_H
, H2
, H2
, DO_MACC
)
1730 RVVCALL(OPIVX3
, vmacc_vx_w
, OP_SSS_W
, H4
, H4
, DO_MACC
)
1731 RVVCALL(OPIVX3
, vmacc_vx_d
, OP_SSS_D
, H8
, H8
, DO_MACC
)
1732 RVVCALL(OPIVX3
, vnmsac_vx_b
, OP_SSS_B
, H1
, H1
, DO_NMSAC
)
1733 RVVCALL(OPIVX3
, vnmsac_vx_h
, OP_SSS_H
, H2
, H2
, DO_NMSAC
)
1734 RVVCALL(OPIVX3
, vnmsac_vx_w
, OP_SSS_W
, H4
, H4
, DO_NMSAC
)
1735 RVVCALL(OPIVX3
, vnmsac_vx_d
, OP_SSS_D
, H8
, H8
, DO_NMSAC
)
1736 RVVCALL(OPIVX3
, vmadd_vx_b
, OP_SSS_B
, H1
, H1
, DO_MADD
)
1737 RVVCALL(OPIVX3
, vmadd_vx_h
, OP_SSS_H
, H2
, H2
, DO_MADD
)
1738 RVVCALL(OPIVX3
, vmadd_vx_w
, OP_SSS_W
, H4
, H4
, DO_MADD
)
1739 RVVCALL(OPIVX3
, vmadd_vx_d
, OP_SSS_D
, H8
, H8
, DO_MADD
)
1740 RVVCALL(OPIVX3
, vnmsub_vx_b
, OP_SSS_B
, H1
, H1
, DO_NMSUB
)
1741 RVVCALL(OPIVX3
, vnmsub_vx_h
, OP_SSS_H
, H2
, H2
, DO_NMSUB
)
1742 RVVCALL(OPIVX3
, vnmsub_vx_w
, OP_SSS_W
, H4
, H4
, DO_NMSUB
)
1743 RVVCALL(OPIVX3
, vnmsub_vx_d
, OP_SSS_D
, H8
, H8
, DO_NMSUB
)
1744 GEN_VEXT_VX(vmacc_vx_b
, 1, 1)
1745 GEN_VEXT_VX(vmacc_vx_h
, 2, 2)
1746 GEN_VEXT_VX(vmacc_vx_w
, 4, 4)
1747 GEN_VEXT_VX(vmacc_vx_d
, 8, 8)
1748 GEN_VEXT_VX(vnmsac_vx_b
, 1, 1)
1749 GEN_VEXT_VX(vnmsac_vx_h
, 2, 2)
1750 GEN_VEXT_VX(vnmsac_vx_w
, 4, 4)
1751 GEN_VEXT_VX(vnmsac_vx_d
, 8, 8)
1752 GEN_VEXT_VX(vmadd_vx_b
, 1, 1)
1753 GEN_VEXT_VX(vmadd_vx_h
, 2, 2)
1754 GEN_VEXT_VX(vmadd_vx_w
, 4, 4)
1755 GEN_VEXT_VX(vmadd_vx_d
, 8, 8)
1756 GEN_VEXT_VX(vnmsub_vx_b
, 1, 1)
1757 GEN_VEXT_VX(vnmsub_vx_h
, 2, 2)
1758 GEN_VEXT_VX(vnmsub_vx_w
, 4, 4)
1759 GEN_VEXT_VX(vnmsub_vx_d
, 8, 8)
1761 /* Vector Widening Integer Multiply-Add Instructions */
1762 RVVCALL(OPIVV3
, vwmaccu_vv_b
, WOP_UUU_B
, H2
, H1
, H1
, DO_MACC
)
1763 RVVCALL(OPIVV3
, vwmaccu_vv_h
, WOP_UUU_H
, H4
, H2
, H2
, DO_MACC
)
1764 RVVCALL(OPIVV3
, vwmaccu_vv_w
, WOP_UUU_W
, H8
, H4
, H4
, DO_MACC
)
1765 RVVCALL(OPIVV3
, vwmacc_vv_b
, WOP_SSS_B
, H2
, H1
, H1
, DO_MACC
)
1766 RVVCALL(OPIVV3
, vwmacc_vv_h
, WOP_SSS_H
, H4
, H2
, H2
, DO_MACC
)
1767 RVVCALL(OPIVV3
, vwmacc_vv_w
, WOP_SSS_W
, H8
, H4
, H4
, DO_MACC
)
1768 RVVCALL(OPIVV3
, vwmaccsu_vv_b
, WOP_SSU_B
, H2
, H1
, H1
, DO_MACC
)
1769 RVVCALL(OPIVV3
, vwmaccsu_vv_h
, WOP_SSU_H
, H4
, H2
, H2
, DO_MACC
)
1770 RVVCALL(OPIVV3
, vwmaccsu_vv_w
, WOP_SSU_W
, H8
, H4
, H4
, DO_MACC
)
1771 GEN_VEXT_VV(vwmaccu_vv_b
, 1, 2)
1772 GEN_VEXT_VV(vwmaccu_vv_h
, 2, 4)
1773 GEN_VEXT_VV(vwmaccu_vv_w
, 4, 8)
1774 GEN_VEXT_VV(vwmacc_vv_b
, 1, 2)
1775 GEN_VEXT_VV(vwmacc_vv_h
, 2, 4)
1776 GEN_VEXT_VV(vwmacc_vv_w
, 4, 8)
1777 GEN_VEXT_VV(vwmaccsu_vv_b
, 1, 2)
1778 GEN_VEXT_VV(vwmaccsu_vv_h
, 2, 4)
1779 GEN_VEXT_VV(vwmaccsu_vv_w
, 4, 8)
1781 RVVCALL(OPIVX3
, vwmaccu_vx_b
, WOP_UUU_B
, H2
, H1
, DO_MACC
)
1782 RVVCALL(OPIVX3
, vwmaccu_vx_h
, WOP_UUU_H
, H4
, H2
, DO_MACC
)
1783 RVVCALL(OPIVX3
, vwmaccu_vx_w
, WOP_UUU_W
, H8
, H4
, DO_MACC
)
1784 RVVCALL(OPIVX3
, vwmacc_vx_b
, WOP_SSS_B
, H2
, H1
, DO_MACC
)
1785 RVVCALL(OPIVX3
, vwmacc_vx_h
, WOP_SSS_H
, H4
, H2
, DO_MACC
)
1786 RVVCALL(OPIVX3
, vwmacc_vx_w
, WOP_SSS_W
, H8
, H4
, DO_MACC
)
1787 RVVCALL(OPIVX3
, vwmaccsu_vx_b
, WOP_SSU_B
, H2
, H1
, DO_MACC
)
1788 RVVCALL(OPIVX3
, vwmaccsu_vx_h
, WOP_SSU_H
, H4
, H2
, DO_MACC
)
1789 RVVCALL(OPIVX3
, vwmaccsu_vx_w
, WOP_SSU_W
, H8
, H4
, DO_MACC
)
1790 RVVCALL(OPIVX3
, vwmaccus_vx_b
, WOP_SUS_B
, H2
, H1
, DO_MACC
)
1791 RVVCALL(OPIVX3
, vwmaccus_vx_h
, WOP_SUS_H
, H4
, H2
, DO_MACC
)
1792 RVVCALL(OPIVX3
, vwmaccus_vx_w
, WOP_SUS_W
, H8
, H4
, DO_MACC
)
1793 GEN_VEXT_VX(vwmaccu_vx_b
, 1, 2)
1794 GEN_VEXT_VX(vwmaccu_vx_h
, 2, 4)
1795 GEN_VEXT_VX(vwmaccu_vx_w
, 4, 8)
1796 GEN_VEXT_VX(vwmacc_vx_b
, 1, 2)
1797 GEN_VEXT_VX(vwmacc_vx_h
, 2, 4)
1798 GEN_VEXT_VX(vwmacc_vx_w
, 4, 8)
1799 GEN_VEXT_VX(vwmaccsu_vx_b
, 1, 2)
1800 GEN_VEXT_VX(vwmaccsu_vx_h
, 2, 4)
1801 GEN_VEXT_VX(vwmaccsu_vx_w
, 4, 8)
1802 GEN_VEXT_VX(vwmaccus_vx_b
, 1, 2)
1803 GEN_VEXT_VX(vwmaccus_vx_h
, 2, 4)
1804 GEN_VEXT_VX(vwmaccus_vx_w
, 4, 8)
1806 /* Vector Integer Merge and Move Instructions */
1807 #define GEN_VEXT_VMV_VV(NAME, ETYPE, H) \
1808 void HELPER(NAME)(void *vd, void *vs1, CPURISCVState *env, \
1811 uint32_t vl = env->vl; \
1814 for (i = env->vstart; i < vl; i++) { \
1815 ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
1816 *((ETYPE *)vd + H(i)) = s1; \
1821 GEN_VEXT_VMV_VV(vmv_v_v_b
, int8_t, H1
)
1822 GEN_VEXT_VMV_VV(vmv_v_v_h
, int16_t, H2
)
1823 GEN_VEXT_VMV_VV(vmv_v_v_w
, int32_t, H4
)
1824 GEN_VEXT_VMV_VV(vmv_v_v_d
, int64_t, H8
)
1826 #define GEN_VEXT_VMV_VX(NAME, ETYPE, H) \
1827 void HELPER(NAME)(void *vd, uint64_t s1, CPURISCVState *env, \
1830 uint32_t vl = env->vl; \
1833 for (i = env->vstart; i < vl; i++) { \
1834 *((ETYPE *)vd + H(i)) = (ETYPE)s1; \
1839 GEN_VEXT_VMV_VX(vmv_v_x_b
, int8_t, H1
)
1840 GEN_VEXT_VMV_VX(vmv_v_x_h
, int16_t, H2
)
1841 GEN_VEXT_VMV_VX(vmv_v_x_w
, int32_t, H4
)
1842 GEN_VEXT_VMV_VX(vmv_v_x_d
, int64_t, H8
)
1844 #define GEN_VEXT_VMERGE_VV(NAME, ETYPE, H) \
1845 void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
1846 CPURISCVState *env, uint32_t desc) \
1848 uint32_t vl = env->vl; \
1851 for (i = env->vstart; i < vl; i++) { \
1852 ETYPE *vt = (!vext_elem_mask(v0, i) ? vs2 : vs1); \
1853 *((ETYPE *)vd + H(i)) = *(vt + H(i)); \
1858 GEN_VEXT_VMERGE_VV(vmerge_vvm_b
, int8_t, H1
)
1859 GEN_VEXT_VMERGE_VV(vmerge_vvm_h
, int16_t, H2
)
1860 GEN_VEXT_VMERGE_VV(vmerge_vvm_w
, int32_t, H4
)
1861 GEN_VEXT_VMERGE_VV(vmerge_vvm_d
, int64_t, H8
)
1863 #define GEN_VEXT_VMERGE_VX(NAME, ETYPE, H) \
1864 void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
1865 void *vs2, CPURISCVState *env, uint32_t desc) \
1867 uint32_t vl = env->vl; \
1870 for (i = env->vstart; i < vl; i++) { \
1871 ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
1872 ETYPE d = (!vext_elem_mask(v0, i) ? s2 : \
1873 (ETYPE)(target_long)s1); \
1874 *((ETYPE *)vd + H(i)) = d; \
1879 GEN_VEXT_VMERGE_VX(vmerge_vxm_b
, int8_t, H1
)
1880 GEN_VEXT_VMERGE_VX(vmerge_vxm_h
, int16_t, H2
)
1881 GEN_VEXT_VMERGE_VX(vmerge_vxm_w
, int32_t, H4
)
1882 GEN_VEXT_VMERGE_VX(vmerge_vxm_d
, int64_t, H8
)
1885 *** Vector Fixed-Point Arithmetic Instructions
1888 /* Vector Single-Width Saturating Add and Subtract */
1891 * As fixed point instructions probably have round mode and saturation,
1892 * define common macros for fixed point here.
1894 typedef void opivv2_rm_fn(void *vd
, void *vs1
, void *vs2
, int i
,
1895 CPURISCVState
*env
, int vxrm
);
1897 #define OPIVV2_RM(NAME, TD, T1, T2, TX1, TX2, HD, HS1, HS2, OP) \
1898 static inline void \
1899 do_##NAME(void *vd, void *vs1, void *vs2, int i, \
1900 CPURISCVState *env, int vxrm) \
1902 TX1 s1 = *((T1 *)vs1 + HS1(i)); \
1903 TX2 s2 = *((T2 *)vs2 + HS2(i)); \
1904 *((TD *)vd + HD(i)) = OP(env, vxrm, s2, s1); \
1908 vext_vv_rm_1(void *vd
, void *v0
, void *vs1
, void *vs2
,
1910 uint32_t vl
, uint32_t vm
, int vxrm
,
1913 for (uint32_t i
= env
->vstart
; i
< vl
; i
++) {
1914 if (!vm
&& !vext_elem_mask(v0
, i
)) {
1917 fn(vd
, vs1
, vs2
, i
, env
, vxrm
);
1923 vext_vv_rm_2(void *vd
, void *v0
, void *vs1
, void *vs2
,
1925 uint32_t desc
, uint32_t esz
, uint32_t dsz
,
1928 uint32_t vm
= vext_vm(desc
);
1929 uint32_t vl
= env
->vl
;
1931 switch (env
->vxrm
) {
1933 vext_vv_rm_1(vd
, v0
, vs1
, vs2
,
1934 env
, vl
, vm
, 0, fn
);
1937 vext_vv_rm_1(vd
, v0
, vs1
, vs2
,
1938 env
, vl
, vm
, 1, fn
);
1941 vext_vv_rm_1(vd
, v0
, vs1
, vs2
,
1942 env
, vl
, vm
, 2, fn
);
1945 vext_vv_rm_1(vd
, v0
, vs1
, vs2
,
1946 env
, vl
, vm
, 3, fn
);
1951 /* generate helpers for fixed point instructions with OPIVV format */
1952 #define GEN_VEXT_VV_RM(NAME, ESZ, DSZ) \
1953 void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
1954 CPURISCVState *env, uint32_t desc) \
1956 vext_vv_rm_2(vd, v0, vs1, vs2, env, desc, ESZ, DSZ, \
1960 static inline uint8_t saddu8(CPURISCVState
*env
, int vxrm
, uint8_t a
, uint8_t b
)
1962 uint8_t res
= a
+ b
;
1970 static inline uint16_t saddu16(CPURISCVState
*env
, int vxrm
, uint16_t a
,
1973 uint16_t res
= a
+ b
;
1981 static inline uint32_t saddu32(CPURISCVState
*env
, int vxrm
, uint32_t a
,
1984 uint32_t res
= a
+ b
;
1992 static inline uint64_t saddu64(CPURISCVState
*env
, int vxrm
, uint64_t a
,
1995 uint64_t res
= a
+ b
;
2003 RVVCALL(OPIVV2_RM
, vsaddu_vv_b
, OP_UUU_B
, H1
, H1
, H1
, saddu8
)
2004 RVVCALL(OPIVV2_RM
, vsaddu_vv_h
, OP_UUU_H
, H2
, H2
, H2
, saddu16
)
2005 RVVCALL(OPIVV2_RM
, vsaddu_vv_w
, OP_UUU_W
, H4
, H4
, H4
, saddu32
)
2006 RVVCALL(OPIVV2_RM
, vsaddu_vv_d
, OP_UUU_D
, H8
, H8
, H8
, saddu64
)
2007 GEN_VEXT_VV_RM(vsaddu_vv_b
, 1, 1)
2008 GEN_VEXT_VV_RM(vsaddu_vv_h
, 2, 2)
2009 GEN_VEXT_VV_RM(vsaddu_vv_w
, 4, 4)
2010 GEN_VEXT_VV_RM(vsaddu_vv_d
, 8, 8)
2012 typedef void opivx2_rm_fn(void *vd
, target_long s1
, void *vs2
, int i
,
2013 CPURISCVState
*env
, int vxrm
);
2015 #define OPIVX2_RM(NAME, TD, T1, T2, TX1, TX2, HD, HS2, OP) \
2016 static inline void \
2017 do_##NAME(void *vd, target_long s1, void *vs2, int i, \
2018 CPURISCVState *env, int vxrm) \
2020 TX2 s2 = *((T2 *)vs2 + HS2(i)); \
2021 *((TD *)vd + HD(i)) = OP(env, vxrm, s2, (TX1)(T1)s1); \
2025 vext_vx_rm_1(void *vd
, void *v0
, target_long s1
, void *vs2
,
2027 uint32_t vl
, uint32_t vm
, int vxrm
,
2030 for (uint32_t i
= env
->vstart
; i
< vl
; i
++) {
2031 if (!vm
&& !vext_elem_mask(v0
, i
)) {
2034 fn(vd
, s1
, vs2
, i
, env
, vxrm
);
2040 vext_vx_rm_2(void *vd
, void *v0
, target_long s1
, void *vs2
,
2042 uint32_t desc
, uint32_t esz
, uint32_t dsz
,
2045 uint32_t vm
= vext_vm(desc
);
2046 uint32_t vl
= env
->vl
;
2048 switch (env
->vxrm
) {
2050 vext_vx_rm_1(vd
, v0
, s1
, vs2
,
2051 env
, vl
, vm
, 0, fn
);
2054 vext_vx_rm_1(vd
, v0
, s1
, vs2
,
2055 env
, vl
, vm
, 1, fn
);
2058 vext_vx_rm_1(vd
, v0
, s1
, vs2
,
2059 env
, vl
, vm
, 2, fn
);
2062 vext_vx_rm_1(vd
, v0
, s1
, vs2
,
2063 env
, vl
, vm
, 3, fn
);
2068 /* generate helpers for fixed point instructions with OPIVX format */
2069 #define GEN_VEXT_VX_RM(NAME, ESZ, DSZ) \
2070 void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
2071 void *vs2, CPURISCVState *env, uint32_t desc) \
2073 vext_vx_rm_2(vd, v0, s1, vs2, env, desc, ESZ, DSZ, \
2077 RVVCALL(OPIVX2_RM
, vsaddu_vx_b
, OP_UUU_B
, H1
, H1
, saddu8
)
2078 RVVCALL(OPIVX2_RM
, vsaddu_vx_h
, OP_UUU_H
, H2
, H2
, saddu16
)
2079 RVVCALL(OPIVX2_RM
, vsaddu_vx_w
, OP_UUU_W
, H4
, H4
, saddu32
)
2080 RVVCALL(OPIVX2_RM
, vsaddu_vx_d
, OP_UUU_D
, H8
, H8
, saddu64
)
2081 GEN_VEXT_VX_RM(vsaddu_vx_b
, 1, 1)
2082 GEN_VEXT_VX_RM(vsaddu_vx_h
, 2, 2)
2083 GEN_VEXT_VX_RM(vsaddu_vx_w
, 4, 4)
2084 GEN_VEXT_VX_RM(vsaddu_vx_d
, 8, 8)
2086 static inline int8_t sadd8(CPURISCVState
*env
, int vxrm
, int8_t a
, int8_t b
)
2089 if ((res
^ a
) & (res
^ b
) & INT8_MIN
) {
2090 res
= a
> 0 ? INT8_MAX
: INT8_MIN
;
2096 static inline int16_t sadd16(CPURISCVState
*env
, int vxrm
, int16_t a
, int16_t b
)
2098 int16_t res
= a
+ b
;
2099 if ((res
^ a
) & (res
^ b
) & INT16_MIN
) {
2100 res
= a
> 0 ? INT16_MAX
: INT16_MIN
;
2106 static inline int32_t sadd32(CPURISCVState
*env
, int vxrm
, int32_t a
, int32_t b
)
2108 int32_t res
= a
+ b
;
2109 if ((res
^ a
) & (res
^ b
) & INT32_MIN
) {
2110 res
= a
> 0 ? INT32_MAX
: INT32_MIN
;
2116 static inline int64_t sadd64(CPURISCVState
*env
, int vxrm
, int64_t a
, int64_t b
)
2118 int64_t res
= a
+ b
;
2119 if ((res
^ a
) & (res
^ b
) & INT64_MIN
) {
2120 res
= a
> 0 ? INT64_MAX
: INT64_MIN
;
2126 RVVCALL(OPIVV2_RM
, vsadd_vv_b
, OP_SSS_B
, H1
, H1
, H1
, sadd8
)
2127 RVVCALL(OPIVV2_RM
, vsadd_vv_h
, OP_SSS_H
, H2
, H2
, H2
, sadd16
)
2128 RVVCALL(OPIVV2_RM
, vsadd_vv_w
, OP_SSS_W
, H4
, H4
, H4
, sadd32
)
2129 RVVCALL(OPIVV2_RM
, vsadd_vv_d
, OP_SSS_D
, H8
, H8
, H8
, sadd64
)
2130 GEN_VEXT_VV_RM(vsadd_vv_b
, 1, 1)
2131 GEN_VEXT_VV_RM(vsadd_vv_h
, 2, 2)
2132 GEN_VEXT_VV_RM(vsadd_vv_w
, 4, 4)
2133 GEN_VEXT_VV_RM(vsadd_vv_d
, 8, 8)
2135 RVVCALL(OPIVX2_RM
, vsadd_vx_b
, OP_SSS_B
, H1
, H1
, sadd8
)
2136 RVVCALL(OPIVX2_RM
, vsadd_vx_h
, OP_SSS_H
, H2
, H2
, sadd16
)
2137 RVVCALL(OPIVX2_RM
, vsadd_vx_w
, OP_SSS_W
, H4
, H4
, sadd32
)
2138 RVVCALL(OPIVX2_RM
, vsadd_vx_d
, OP_SSS_D
, H8
, H8
, sadd64
)
2139 GEN_VEXT_VX_RM(vsadd_vx_b
, 1, 1)
2140 GEN_VEXT_VX_RM(vsadd_vx_h
, 2, 2)
2141 GEN_VEXT_VX_RM(vsadd_vx_w
, 4, 4)
2142 GEN_VEXT_VX_RM(vsadd_vx_d
, 8, 8)
2144 static inline uint8_t ssubu8(CPURISCVState
*env
, int vxrm
, uint8_t a
, uint8_t b
)
2146 uint8_t res
= a
- b
;
2154 static inline uint16_t ssubu16(CPURISCVState
*env
, int vxrm
, uint16_t a
,
2157 uint16_t res
= a
- b
;
2165 static inline uint32_t ssubu32(CPURISCVState
*env
, int vxrm
, uint32_t a
,
2168 uint32_t res
= a
- b
;
2176 static inline uint64_t ssubu64(CPURISCVState
*env
, int vxrm
, uint64_t a
,
2179 uint64_t res
= a
- b
;
2187 RVVCALL(OPIVV2_RM
, vssubu_vv_b
, OP_UUU_B
, H1
, H1
, H1
, ssubu8
)
2188 RVVCALL(OPIVV2_RM
, vssubu_vv_h
, OP_UUU_H
, H2
, H2
, H2
, ssubu16
)
2189 RVVCALL(OPIVV2_RM
, vssubu_vv_w
, OP_UUU_W
, H4
, H4
, H4
, ssubu32
)
2190 RVVCALL(OPIVV2_RM
, vssubu_vv_d
, OP_UUU_D
, H8
, H8
, H8
, ssubu64
)
2191 GEN_VEXT_VV_RM(vssubu_vv_b
, 1, 1)
2192 GEN_VEXT_VV_RM(vssubu_vv_h
, 2, 2)
2193 GEN_VEXT_VV_RM(vssubu_vv_w
, 4, 4)
2194 GEN_VEXT_VV_RM(vssubu_vv_d
, 8, 8)
2196 RVVCALL(OPIVX2_RM
, vssubu_vx_b
, OP_UUU_B
, H1
, H1
, ssubu8
)
2197 RVVCALL(OPIVX2_RM
, vssubu_vx_h
, OP_UUU_H
, H2
, H2
, ssubu16
)
2198 RVVCALL(OPIVX2_RM
, vssubu_vx_w
, OP_UUU_W
, H4
, H4
, ssubu32
)
2199 RVVCALL(OPIVX2_RM
, vssubu_vx_d
, OP_UUU_D
, H8
, H8
, ssubu64
)
2200 GEN_VEXT_VX_RM(vssubu_vx_b
, 1, 1)
2201 GEN_VEXT_VX_RM(vssubu_vx_h
, 2, 2)
2202 GEN_VEXT_VX_RM(vssubu_vx_w
, 4, 4)
2203 GEN_VEXT_VX_RM(vssubu_vx_d
, 8, 8)
2205 static inline int8_t ssub8(CPURISCVState
*env
, int vxrm
, int8_t a
, int8_t b
)
2208 if ((res
^ a
) & (a
^ b
) & INT8_MIN
) {
2209 res
= a
>= 0 ? INT8_MAX
: INT8_MIN
;
2215 static inline int16_t ssub16(CPURISCVState
*env
, int vxrm
, int16_t a
, int16_t b
)
2217 int16_t res
= a
- b
;
2218 if ((res
^ a
) & (a
^ b
) & INT16_MIN
) {
2219 res
= a
>= 0 ? INT16_MAX
: INT16_MIN
;
2225 static inline int32_t ssub32(CPURISCVState
*env
, int vxrm
, int32_t a
, int32_t b
)
2227 int32_t res
= a
- b
;
2228 if ((res
^ a
) & (a
^ b
) & INT32_MIN
) {
2229 res
= a
>= 0 ? INT32_MAX
: INT32_MIN
;
2235 static inline int64_t ssub64(CPURISCVState
*env
, int vxrm
, int64_t a
, int64_t b
)
2237 int64_t res
= a
- b
;
2238 if ((res
^ a
) & (a
^ b
) & INT64_MIN
) {
2239 res
= a
>= 0 ? INT64_MAX
: INT64_MIN
;
2245 RVVCALL(OPIVV2_RM
, vssub_vv_b
, OP_SSS_B
, H1
, H1
, H1
, ssub8
)
2246 RVVCALL(OPIVV2_RM
, vssub_vv_h
, OP_SSS_H
, H2
, H2
, H2
, ssub16
)
2247 RVVCALL(OPIVV2_RM
, vssub_vv_w
, OP_SSS_W
, H4
, H4
, H4
, ssub32
)
2248 RVVCALL(OPIVV2_RM
, vssub_vv_d
, OP_SSS_D
, H8
, H8
, H8
, ssub64
)
2249 GEN_VEXT_VV_RM(vssub_vv_b
, 1, 1)
2250 GEN_VEXT_VV_RM(vssub_vv_h
, 2, 2)
2251 GEN_VEXT_VV_RM(vssub_vv_w
, 4, 4)
2252 GEN_VEXT_VV_RM(vssub_vv_d
, 8, 8)
2254 RVVCALL(OPIVX2_RM
, vssub_vx_b
, OP_SSS_B
, H1
, H1
, ssub8
)
2255 RVVCALL(OPIVX2_RM
, vssub_vx_h
, OP_SSS_H
, H2
, H2
, ssub16
)
2256 RVVCALL(OPIVX2_RM
, vssub_vx_w
, OP_SSS_W
, H4
, H4
, ssub32
)
2257 RVVCALL(OPIVX2_RM
, vssub_vx_d
, OP_SSS_D
, H8
, H8
, ssub64
)
2258 GEN_VEXT_VX_RM(vssub_vx_b
, 1, 1)
2259 GEN_VEXT_VX_RM(vssub_vx_h
, 2, 2)
2260 GEN_VEXT_VX_RM(vssub_vx_w
, 4, 4)
2261 GEN_VEXT_VX_RM(vssub_vx_d
, 8, 8)
2263 /* Vector Single-Width Averaging Add and Subtract */
2264 static inline uint8_t get_round(int vxrm
, uint64_t v
, uint8_t shift
)
2266 uint8_t d
= extract64(v
, shift
, 1);
2270 if (shift
== 0 || shift
> 64) {
2274 d1
= extract64(v
, shift
- 1, 1);
2275 D1
= extract64(v
, 0, shift
);
2276 if (vxrm
== 0) { /* round-to-nearest-up (add +0.5 LSB) */
2278 } else if (vxrm
== 1) { /* round-to-nearest-even */
2280 D2
= extract64(v
, 0, shift
- 1);
2281 return d1
& ((D2
!= 0) | d
);
2285 } else if (vxrm
== 3) { /* round-to-odd (OR bits into LSB, aka "jam") */
2286 return !d
& (D1
!= 0);
2288 return 0; /* round-down (truncate) */
2291 static inline int32_t aadd32(CPURISCVState
*env
, int vxrm
, int32_t a
, int32_t b
)
2293 int64_t res
= (int64_t)a
+ b
;
2294 uint8_t round
= get_round(vxrm
, res
, 1);
2296 return (res
>> 1) + round
;
2299 static inline int64_t aadd64(CPURISCVState
*env
, int vxrm
, int64_t a
, int64_t b
)
2301 int64_t res
= a
+ b
;
2302 uint8_t round
= get_round(vxrm
, res
, 1);
2303 int64_t over
= (res
^ a
) & (res
^ b
) & INT64_MIN
;
2305 /* With signed overflow, bit 64 is inverse of bit 63. */
2306 return ((res
>> 1) ^ over
) + round
;
2309 RVVCALL(OPIVV2_RM
, vaadd_vv_b
, OP_SSS_B
, H1
, H1
, H1
, aadd32
)
2310 RVVCALL(OPIVV2_RM
, vaadd_vv_h
, OP_SSS_H
, H2
, H2
, H2
, aadd32
)
2311 RVVCALL(OPIVV2_RM
, vaadd_vv_w
, OP_SSS_W
, H4
, H4
, H4
, aadd32
)
2312 RVVCALL(OPIVV2_RM
, vaadd_vv_d
, OP_SSS_D
, H8
, H8
, H8
, aadd64
)
2313 GEN_VEXT_VV_RM(vaadd_vv_b
, 1, 1)
2314 GEN_VEXT_VV_RM(vaadd_vv_h
, 2, 2)
2315 GEN_VEXT_VV_RM(vaadd_vv_w
, 4, 4)
2316 GEN_VEXT_VV_RM(vaadd_vv_d
, 8, 8)
2318 RVVCALL(OPIVX2_RM
, vaadd_vx_b
, OP_SSS_B
, H1
, H1
, aadd32
)
2319 RVVCALL(OPIVX2_RM
, vaadd_vx_h
, OP_SSS_H
, H2
, H2
, aadd32
)
2320 RVVCALL(OPIVX2_RM
, vaadd_vx_w
, OP_SSS_W
, H4
, H4
, aadd32
)
2321 RVVCALL(OPIVX2_RM
, vaadd_vx_d
, OP_SSS_D
, H8
, H8
, aadd64
)
2322 GEN_VEXT_VX_RM(vaadd_vx_b
, 1, 1)
2323 GEN_VEXT_VX_RM(vaadd_vx_h
, 2, 2)
2324 GEN_VEXT_VX_RM(vaadd_vx_w
, 4, 4)
2325 GEN_VEXT_VX_RM(vaadd_vx_d
, 8, 8)
2327 static inline uint32_t aaddu32(CPURISCVState
*env
, int vxrm
,
2328 uint32_t a
, uint32_t b
)
2330 uint64_t res
= (uint64_t)a
+ b
;
2331 uint8_t round
= get_round(vxrm
, res
, 1);
2333 return (res
>> 1) + round
;
2336 static inline uint64_t aaddu64(CPURISCVState
*env
, int vxrm
,
2337 uint64_t a
, uint64_t b
)
2339 uint64_t res
= a
+ b
;
2340 uint8_t round
= get_round(vxrm
, res
, 1);
2341 uint64_t over
= (uint64_t)(res
< a
) << 63;
2343 return ((res
>> 1) | over
) + round
;
2346 RVVCALL(OPIVV2_RM
, vaaddu_vv_b
, OP_UUU_B
, H1
, H1
, H1
, aaddu32
)
2347 RVVCALL(OPIVV2_RM
, vaaddu_vv_h
, OP_UUU_H
, H2
, H2
, H2
, aaddu32
)
2348 RVVCALL(OPIVV2_RM
, vaaddu_vv_w
, OP_UUU_W
, H4
, H4
, H4
, aaddu32
)
2349 RVVCALL(OPIVV2_RM
, vaaddu_vv_d
, OP_UUU_D
, H8
, H8
, H8
, aaddu64
)
2350 GEN_VEXT_VV_RM(vaaddu_vv_b
, 1, 1)
2351 GEN_VEXT_VV_RM(vaaddu_vv_h
, 2, 2)
2352 GEN_VEXT_VV_RM(vaaddu_vv_w
, 4, 4)
2353 GEN_VEXT_VV_RM(vaaddu_vv_d
, 8, 8)
2355 RVVCALL(OPIVX2_RM
, vaaddu_vx_b
, OP_UUU_B
, H1
, H1
, aaddu32
)
2356 RVVCALL(OPIVX2_RM
, vaaddu_vx_h
, OP_UUU_H
, H2
, H2
, aaddu32
)
2357 RVVCALL(OPIVX2_RM
, vaaddu_vx_w
, OP_UUU_W
, H4
, H4
, aaddu32
)
2358 RVVCALL(OPIVX2_RM
, vaaddu_vx_d
, OP_UUU_D
, H8
, H8
, aaddu64
)
2359 GEN_VEXT_VX_RM(vaaddu_vx_b
, 1, 1)
2360 GEN_VEXT_VX_RM(vaaddu_vx_h
, 2, 2)
2361 GEN_VEXT_VX_RM(vaaddu_vx_w
, 4, 4)
2362 GEN_VEXT_VX_RM(vaaddu_vx_d
, 8, 8)
2364 static inline int32_t asub32(CPURISCVState
*env
, int vxrm
, int32_t a
, int32_t b
)
2366 int64_t res
= (int64_t)a
- b
;
2367 uint8_t round
= get_round(vxrm
, res
, 1);
2369 return (res
>> 1) + round
;
2372 static inline int64_t asub64(CPURISCVState
*env
, int vxrm
, int64_t a
, int64_t b
)
2374 int64_t res
= (int64_t)a
- b
;
2375 uint8_t round
= get_round(vxrm
, res
, 1);
2376 int64_t over
= (res
^ a
) & (a
^ b
) & INT64_MIN
;
2378 /* With signed overflow, bit 64 is inverse of bit 63. */
2379 return ((res
>> 1) ^ over
) + round
;
2382 RVVCALL(OPIVV2_RM
, vasub_vv_b
, OP_SSS_B
, H1
, H1
, H1
, asub32
)
2383 RVVCALL(OPIVV2_RM
, vasub_vv_h
, OP_SSS_H
, H2
, H2
, H2
, asub32
)
2384 RVVCALL(OPIVV2_RM
, vasub_vv_w
, OP_SSS_W
, H4
, H4
, H4
, asub32
)
2385 RVVCALL(OPIVV2_RM
, vasub_vv_d
, OP_SSS_D
, H8
, H8
, H8
, asub64
)
2386 GEN_VEXT_VV_RM(vasub_vv_b
, 1, 1)
2387 GEN_VEXT_VV_RM(vasub_vv_h
, 2, 2)
2388 GEN_VEXT_VV_RM(vasub_vv_w
, 4, 4)
2389 GEN_VEXT_VV_RM(vasub_vv_d
, 8, 8)
2391 RVVCALL(OPIVX2_RM
, vasub_vx_b
, OP_SSS_B
, H1
, H1
, asub32
)
2392 RVVCALL(OPIVX2_RM
, vasub_vx_h
, OP_SSS_H
, H2
, H2
, asub32
)
2393 RVVCALL(OPIVX2_RM
, vasub_vx_w
, OP_SSS_W
, H4
, H4
, asub32
)
2394 RVVCALL(OPIVX2_RM
, vasub_vx_d
, OP_SSS_D
, H8
, H8
, asub64
)
2395 GEN_VEXT_VX_RM(vasub_vx_b
, 1, 1)
2396 GEN_VEXT_VX_RM(vasub_vx_h
, 2, 2)
2397 GEN_VEXT_VX_RM(vasub_vx_w
, 4, 4)
2398 GEN_VEXT_VX_RM(vasub_vx_d
, 8, 8)
2400 static inline uint32_t asubu32(CPURISCVState
*env
, int vxrm
,
2401 uint32_t a
, uint32_t b
)
2403 int64_t res
= (int64_t)a
- b
;
2404 uint8_t round
= get_round(vxrm
, res
, 1);
2406 return (res
>> 1) + round
;
2409 static inline uint64_t asubu64(CPURISCVState
*env
, int vxrm
,
2410 uint64_t a
, uint64_t b
)
2412 uint64_t res
= (uint64_t)a
- b
;
2413 uint8_t round
= get_round(vxrm
, res
, 1);
2414 uint64_t over
= (uint64_t)(res
> a
) << 63;
2416 return ((res
>> 1) | over
) + round
;
2419 RVVCALL(OPIVV2_RM
, vasubu_vv_b
, OP_UUU_B
, H1
, H1
, H1
, asubu32
)
2420 RVVCALL(OPIVV2_RM
, vasubu_vv_h
, OP_UUU_H
, H2
, H2
, H2
, asubu32
)
2421 RVVCALL(OPIVV2_RM
, vasubu_vv_w
, OP_UUU_W
, H4
, H4
, H4
, asubu32
)
2422 RVVCALL(OPIVV2_RM
, vasubu_vv_d
, OP_UUU_D
, H8
, H8
, H8
, asubu64
)
2423 GEN_VEXT_VV_RM(vasubu_vv_b
, 1, 1)
2424 GEN_VEXT_VV_RM(vasubu_vv_h
, 2, 2)
2425 GEN_VEXT_VV_RM(vasubu_vv_w
, 4, 4)
2426 GEN_VEXT_VV_RM(vasubu_vv_d
, 8, 8)
2428 RVVCALL(OPIVX2_RM
, vasubu_vx_b
, OP_UUU_B
, H1
, H1
, asubu32
)
2429 RVVCALL(OPIVX2_RM
, vasubu_vx_h
, OP_UUU_H
, H2
, H2
, asubu32
)
2430 RVVCALL(OPIVX2_RM
, vasubu_vx_w
, OP_UUU_W
, H4
, H4
, asubu32
)
2431 RVVCALL(OPIVX2_RM
, vasubu_vx_d
, OP_UUU_D
, H8
, H8
, asubu64
)
2432 GEN_VEXT_VX_RM(vasubu_vx_b
, 1, 1)
2433 GEN_VEXT_VX_RM(vasubu_vx_h
, 2, 2)
2434 GEN_VEXT_VX_RM(vasubu_vx_w
, 4, 4)
2435 GEN_VEXT_VX_RM(vasubu_vx_d
, 8, 8)
2437 /* Vector Single-Width Fractional Multiply with Rounding and Saturation */
2438 static inline int8_t vsmul8(CPURISCVState
*env
, int vxrm
, int8_t a
, int8_t b
)
2443 res
= (int16_t)a
* (int16_t)b
;
2444 round
= get_round(vxrm
, res
, 7);
2445 res
= (res
>> 7) + round
;
2447 if (res
> INT8_MAX
) {
2450 } else if (res
< INT8_MIN
) {
2458 static int16_t vsmul16(CPURISCVState
*env
, int vxrm
, int16_t a
, int16_t b
)
2463 res
= (int32_t)a
* (int32_t)b
;
2464 round
= get_round(vxrm
, res
, 15);
2465 res
= (res
>> 15) + round
;
2467 if (res
> INT16_MAX
) {
2470 } else if (res
< INT16_MIN
) {
2478 static int32_t vsmul32(CPURISCVState
*env
, int vxrm
, int32_t a
, int32_t b
)
2483 res
= (int64_t)a
* (int64_t)b
;
2484 round
= get_round(vxrm
, res
, 31);
2485 res
= (res
>> 31) + round
;
2487 if (res
> INT32_MAX
) {
2490 } else if (res
< INT32_MIN
) {
2498 static int64_t vsmul64(CPURISCVState
*env
, int vxrm
, int64_t a
, int64_t b
)
2501 uint64_t hi_64
, lo_64
;
2504 if (a
== INT64_MIN
&& b
== INT64_MIN
) {
2509 muls64(&lo_64
, &hi_64
, a
, b
);
2510 round
= get_round(vxrm
, lo_64
, 63);
2512 * Cannot overflow, as there are always
2513 * 2 sign bits after multiply.
2515 res
= (hi_64
<< 1) | (lo_64
>> 63);
2517 if (res
== INT64_MAX
) {
2526 RVVCALL(OPIVV2_RM
, vsmul_vv_b
, OP_SSS_B
, H1
, H1
, H1
, vsmul8
)
2527 RVVCALL(OPIVV2_RM
, vsmul_vv_h
, OP_SSS_H
, H2
, H2
, H2
, vsmul16
)
2528 RVVCALL(OPIVV2_RM
, vsmul_vv_w
, OP_SSS_W
, H4
, H4
, H4
, vsmul32
)
2529 RVVCALL(OPIVV2_RM
, vsmul_vv_d
, OP_SSS_D
, H8
, H8
, H8
, vsmul64
)
2530 GEN_VEXT_VV_RM(vsmul_vv_b
, 1, 1)
2531 GEN_VEXT_VV_RM(vsmul_vv_h
, 2, 2)
2532 GEN_VEXT_VV_RM(vsmul_vv_w
, 4, 4)
2533 GEN_VEXT_VV_RM(vsmul_vv_d
, 8, 8)
2535 RVVCALL(OPIVX2_RM
, vsmul_vx_b
, OP_SSS_B
, H1
, H1
, vsmul8
)
2536 RVVCALL(OPIVX2_RM
, vsmul_vx_h
, OP_SSS_H
, H2
, H2
, vsmul16
)
2537 RVVCALL(OPIVX2_RM
, vsmul_vx_w
, OP_SSS_W
, H4
, H4
, vsmul32
)
2538 RVVCALL(OPIVX2_RM
, vsmul_vx_d
, OP_SSS_D
, H8
, H8
, vsmul64
)
2539 GEN_VEXT_VX_RM(vsmul_vx_b
, 1, 1)
2540 GEN_VEXT_VX_RM(vsmul_vx_h
, 2, 2)
2541 GEN_VEXT_VX_RM(vsmul_vx_w
, 4, 4)
2542 GEN_VEXT_VX_RM(vsmul_vx_d
, 8, 8)
2544 /* Vector Single-Width Scaling Shift Instructions */
2545 static inline uint8_t
2546 vssrl8(CPURISCVState
*env
, int vxrm
, uint8_t a
, uint8_t b
)
2548 uint8_t round
, shift
= b
& 0x7;
2551 round
= get_round(vxrm
, a
, shift
);
2552 res
= (a
>> shift
) + round
;
2555 static inline uint16_t
2556 vssrl16(CPURISCVState
*env
, int vxrm
, uint16_t a
, uint16_t b
)
2558 uint8_t round
, shift
= b
& 0xf;
2561 round
= get_round(vxrm
, a
, shift
);
2562 res
= (a
>> shift
) + round
;
2565 static inline uint32_t
2566 vssrl32(CPURISCVState
*env
, int vxrm
, uint32_t a
, uint32_t b
)
2568 uint8_t round
, shift
= b
& 0x1f;
2571 round
= get_round(vxrm
, a
, shift
);
2572 res
= (a
>> shift
) + round
;
2575 static inline uint64_t
2576 vssrl64(CPURISCVState
*env
, int vxrm
, uint64_t a
, uint64_t b
)
2578 uint8_t round
, shift
= b
& 0x3f;
2581 round
= get_round(vxrm
, a
, shift
);
2582 res
= (a
>> shift
) + round
;
2585 RVVCALL(OPIVV2_RM
, vssrl_vv_b
, OP_UUU_B
, H1
, H1
, H1
, vssrl8
)
2586 RVVCALL(OPIVV2_RM
, vssrl_vv_h
, OP_UUU_H
, H2
, H2
, H2
, vssrl16
)
2587 RVVCALL(OPIVV2_RM
, vssrl_vv_w
, OP_UUU_W
, H4
, H4
, H4
, vssrl32
)
2588 RVVCALL(OPIVV2_RM
, vssrl_vv_d
, OP_UUU_D
, H8
, H8
, H8
, vssrl64
)
2589 GEN_VEXT_VV_RM(vssrl_vv_b
, 1, 1)
2590 GEN_VEXT_VV_RM(vssrl_vv_h
, 2, 2)
2591 GEN_VEXT_VV_RM(vssrl_vv_w
, 4, 4)
2592 GEN_VEXT_VV_RM(vssrl_vv_d
, 8, 8)
2594 RVVCALL(OPIVX2_RM
, vssrl_vx_b
, OP_UUU_B
, H1
, H1
, vssrl8
)
2595 RVVCALL(OPIVX2_RM
, vssrl_vx_h
, OP_UUU_H
, H2
, H2
, vssrl16
)
2596 RVVCALL(OPIVX2_RM
, vssrl_vx_w
, OP_UUU_W
, H4
, H4
, vssrl32
)
2597 RVVCALL(OPIVX2_RM
, vssrl_vx_d
, OP_UUU_D
, H8
, H8
, vssrl64
)
2598 GEN_VEXT_VX_RM(vssrl_vx_b
, 1, 1)
2599 GEN_VEXT_VX_RM(vssrl_vx_h
, 2, 2)
2600 GEN_VEXT_VX_RM(vssrl_vx_w
, 4, 4)
2601 GEN_VEXT_VX_RM(vssrl_vx_d
, 8, 8)
2603 static inline int8_t
2604 vssra8(CPURISCVState
*env
, int vxrm
, int8_t a
, int8_t b
)
2606 uint8_t round
, shift
= b
& 0x7;
2609 round
= get_round(vxrm
, a
, shift
);
2610 res
= (a
>> shift
) + round
;
2613 static inline int16_t
2614 vssra16(CPURISCVState
*env
, int vxrm
, int16_t a
, int16_t b
)
2616 uint8_t round
, shift
= b
& 0xf;
2619 round
= get_round(vxrm
, a
, shift
);
2620 res
= (a
>> shift
) + round
;
2623 static inline int32_t
2624 vssra32(CPURISCVState
*env
, int vxrm
, int32_t a
, int32_t b
)
2626 uint8_t round
, shift
= b
& 0x1f;
2629 round
= get_round(vxrm
, a
, shift
);
2630 res
= (a
>> shift
) + round
;
2633 static inline int64_t
2634 vssra64(CPURISCVState
*env
, int vxrm
, int64_t a
, int64_t b
)
2636 uint8_t round
, shift
= b
& 0x3f;
2639 round
= get_round(vxrm
, a
, shift
);
2640 res
= (a
>> shift
) + round
;
2644 RVVCALL(OPIVV2_RM
, vssra_vv_b
, OP_SSS_B
, H1
, H1
, H1
, vssra8
)
2645 RVVCALL(OPIVV2_RM
, vssra_vv_h
, OP_SSS_H
, H2
, H2
, H2
, vssra16
)
2646 RVVCALL(OPIVV2_RM
, vssra_vv_w
, OP_SSS_W
, H4
, H4
, H4
, vssra32
)
2647 RVVCALL(OPIVV2_RM
, vssra_vv_d
, OP_SSS_D
, H8
, H8
, H8
, vssra64
)
2648 GEN_VEXT_VV_RM(vssra_vv_b
, 1, 1)
2649 GEN_VEXT_VV_RM(vssra_vv_h
, 2, 2)
2650 GEN_VEXT_VV_RM(vssra_vv_w
, 4, 4)
2651 GEN_VEXT_VV_RM(vssra_vv_d
, 8, 8)
2653 RVVCALL(OPIVX2_RM
, vssra_vx_b
, OP_SSS_B
, H1
, H1
, vssra8
)
2654 RVVCALL(OPIVX2_RM
, vssra_vx_h
, OP_SSS_H
, H2
, H2
, vssra16
)
2655 RVVCALL(OPIVX2_RM
, vssra_vx_w
, OP_SSS_W
, H4
, H4
, vssra32
)
2656 RVVCALL(OPIVX2_RM
, vssra_vx_d
, OP_SSS_D
, H8
, H8
, vssra64
)
2657 GEN_VEXT_VX_RM(vssra_vx_b
, 1, 1)
2658 GEN_VEXT_VX_RM(vssra_vx_h
, 2, 2)
2659 GEN_VEXT_VX_RM(vssra_vx_w
, 4, 4)
2660 GEN_VEXT_VX_RM(vssra_vx_d
, 8, 8)
2662 /* Vector Narrowing Fixed-Point Clip Instructions */
2663 static inline int8_t
2664 vnclip8(CPURISCVState
*env
, int vxrm
, int16_t a
, int8_t b
)
2666 uint8_t round
, shift
= b
& 0xf;
2669 round
= get_round(vxrm
, a
, shift
);
2670 res
= (a
>> shift
) + round
;
2671 if (res
> INT8_MAX
) {
2674 } else if (res
< INT8_MIN
) {
2682 static inline int16_t
2683 vnclip16(CPURISCVState
*env
, int vxrm
, int32_t a
, int16_t b
)
2685 uint8_t round
, shift
= b
& 0x1f;
2688 round
= get_round(vxrm
, a
, shift
);
2689 res
= (a
>> shift
) + round
;
2690 if (res
> INT16_MAX
) {
2693 } else if (res
< INT16_MIN
) {
2701 static inline int32_t
2702 vnclip32(CPURISCVState
*env
, int vxrm
, int64_t a
, int32_t b
)
2704 uint8_t round
, shift
= b
& 0x3f;
2707 round
= get_round(vxrm
, a
, shift
);
2708 res
= (a
>> shift
) + round
;
2709 if (res
> INT32_MAX
) {
2712 } else if (res
< INT32_MIN
) {
2720 RVVCALL(OPIVV2_RM
, vnclip_wv_b
, NOP_SSS_B
, H1
, H2
, H1
, vnclip8
)
2721 RVVCALL(OPIVV2_RM
, vnclip_wv_h
, NOP_SSS_H
, H2
, H4
, H2
, vnclip16
)
2722 RVVCALL(OPIVV2_RM
, vnclip_wv_w
, NOP_SSS_W
, H4
, H8
, H4
, vnclip32
)
2723 GEN_VEXT_VV_RM(vnclip_wv_b
, 1, 1)
2724 GEN_VEXT_VV_RM(vnclip_wv_h
, 2, 2)
2725 GEN_VEXT_VV_RM(vnclip_wv_w
, 4, 4)
2727 RVVCALL(OPIVX2_RM
, vnclip_wx_b
, NOP_SSS_B
, H1
, H2
, vnclip8
)
2728 RVVCALL(OPIVX2_RM
, vnclip_wx_h
, NOP_SSS_H
, H2
, H4
, vnclip16
)
2729 RVVCALL(OPIVX2_RM
, vnclip_wx_w
, NOP_SSS_W
, H4
, H8
, vnclip32
)
2730 GEN_VEXT_VX_RM(vnclip_wx_b
, 1, 1)
2731 GEN_VEXT_VX_RM(vnclip_wx_h
, 2, 2)
2732 GEN_VEXT_VX_RM(vnclip_wx_w
, 4, 4)
2734 static inline uint8_t
2735 vnclipu8(CPURISCVState
*env
, int vxrm
, uint16_t a
, uint8_t b
)
2737 uint8_t round
, shift
= b
& 0xf;
2740 round
= get_round(vxrm
, a
, shift
);
2741 res
= (a
>> shift
) + round
;
2742 if (res
> UINT8_MAX
) {
2750 static inline uint16_t
2751 vnclipu16(CPURISCVState
*env
, int vxrm
, uint32_t a
, uint16_t b
)
2753 uint8_t round
, shift
= b
& 0x1f;
2756 round
= get_round(vxrm
, a
, shift
);
2757 res
= (a
>> shift
) + round
;
2758 if (res
> UINT16_MAX
) {
2766 static inline uint32_t
2767 vnclipu32(CPURISCVState
*env
, int vxrm
, uint64_t a
, uint32_t b
)
2769 uint8_t round
, shift
= b
& 0x3f;
2772 round
= get_round(vxrm
, a
, shift
);
2773 res
= (a
>> shift
) + round
;
2774 if (res
> UINT32_MAX
) {
2782 RVVCALL(OPIVV2_RM
, vnclipu_wv_b
, NOP_UUU_B
, H1
, H2
, H1
, vnclipu8
)
2783 RVVCALL(OPIVV2_RM
, vnclipu_wv_h
, NOP_UUU_H
, H2
, H4
, H2
, vnclipu16
)
2784 RVVCALL(OPIVV2_RM
, vnclipu_wv_w
, NOP_UUU_W
, H4
, H8
, H4
, vnclipu32
)
2785 GEN_VEXT_VV_RM(vnclipu_wv_b
, 1, 1)
2786 GEN_VEXT_VV_RM(vnclipu_wv_h
, 2, 2)
2787 GEN_VEXT_VV_RM(vnclipu_wv_w
, 4, 4)
2789 RVVCALL(OPIVX2_RM
, vnclipu_wx_b
, NOP_UUU_B
, H1
, H2
, vnclipu8
)
2790 RVVCALL(OPIVX2_RM
, vnclipu_wx_h
, NOP_UUU_H
, H2
, H4
, vnclipu16
)
2791 RVVCALL(OPIVX2_RM
, vnclipu_wx_w
, NOP_UUU_W
, H4
, H8
, vnclipu32
)
2792 GEN_VEXT_VX_RM(vnclipu_wx_b
, 1, 1)
2793 GEN_VEXT_VX_RM(vnclipu_wx_h
, 2, 2)
2794 GEN_VEXT_VX_RM(vnclipu_wx_w
, 4, 4)
2797 *** Vector Float Point Arithmetic Instructions
2799 /* Vector Single-Width Floating-Point Add/Subtract Instructions */
2800 #define OPFVV2(NAME, TD, T1, T2, TX1, TX2, HD, HS1, HS2, OP) \
2801 static void do_##NAME(void *vd, void *vs1, void *vs2, int i, \
2802 CPURISCVState *env) \
2804 TX1 s1 = *((T1 *)vs1 + HS1(i)); \
2805 TX2 s2 = *((T2 *)vs2 + HS2(i)); \
2806 *((TD *)vd + HD(i)) = OP(s2, s1, &env->fp_status); \
2809 #define GEN_VEXT_VV_ENV(NAME, ESZ, DSZ) \
2810 void HELPER(NAME)(void *vd, void *v0, void *vs1, \
2811 void *vs2, CPURISCVState *env, \
2814 uint32_t vm = vext_vm(desc); \
2815 uint32_t vl = env->vl; \
2818 for (i = env->vstart; i < vl; i++) { \
2819 if (!vm && !vext_elem_mask(v0, i)) { \
2822 do_##NAME(vd, vs1, vs2, i, env); \
2827 RVVCALL(OPFVV2
, vfadd_vv_h
, OP_UUU_H
, H2
, H2
, H2
, float16_add
)
2828 RVVCALL(OPFVV2
, vfadd_vv_w
, OP_UUU_W
, H4
, H4
, H4
, float32_add
)
2829 RVVCALL(OPFVV2
, vfadd_vv_d
, OP_UUU_D
, H8
, H8
, H8
, float64_add
)
2830 GEN_VEXT_VV_ENV(vfadd_vv_h
, 2, 2)
2831 GEN_VEXT_VV_ENV(vfadd_vv_w
, 4, 4)
2832 GEN_VEXT_VV_ENV(vfadd_vv_d
, 8, 8)
2834 #define OPFVF2(NAME, TD, T1, T2, TX1, TX2, HD, HS2, OP) \
2835 static void do_##NAME(void *vd, uint64_t s1, void *vs2, int i, \
2836 CPURISCVState *env) \
2838 TX2 s2 = *((T2 *)vs2 + HS2(i)); \
2839 *((TD *)vd + HD(i)) = OP(s2, (TX1)(T1)s1, &env->fp_status);\
2842 #define GEN_VEXT_VF(NAME, ESZ, DSZ) \
2843 void HELPER(NAME)(void *vd, void *v0, uint64_t s1, \
2844 void *vs2, CPURISCVState *env, \
2847 uint32_t vm = vext_vm(desc); \
2848 uint32_t vl = env->vl; \
2851 for (i = env->vstart; i < vl; i++) { \
2852 if (!vm && !vext_elem_mask(v0, i)) { \
2855 do_##NAME(vd, s1, vs2, i, env); \
2860 RVVCALL(OPFVF2
, vfadd_vf_h
, OP_UUU_H
, H2
, H2
, float16_add
)
2861 RVVCALL(OPFVF2
, vfadd_vf_w
, OP_UUU_W
, H4
, H4
, float32_add
)
2862 RVVCALL(OPFVF2
, vfadd_vf_d
, OP_UUU_D
, H8
, H8
, float64_add
)
2863 GEN_VEXT_VF(vfadd_vf_h
, 2, 2)
2864 GEN_VEXT_VF(vfadd_vf_w
, 4, 4)
2865 GEN_VEXT_VF(vfadd_vf_d
, 8, 8)
2867 RVVCALL(OPFVV2
, vfsub_vv_h
, OP_UUU_H
, H2
, H2
, H2
, float16_sub
)
2868 RVVCALL(OPFVV2
, vfsub_vv_w
, OP_UUU_W
, H4
, H4
, H4
, float32_sub
)
2869 RVVCALL(OPFVV2
, vfsub_vv_d
, OP_UUU_D
, H8
, H8
, H8
, float64_sub
)
2870 GEN_VEXT_VV_ENV(vfsub_vv_h
, 2, 2)
2871 GEN_VEXT_VV_ENV(vfsub_vv_w
, 4, 4)
2872 GEN_VEXT_VV_ENV(vfsub_vv_d
, 8, 8)
2873 RVVCALL(OPFVF2
, vfsub_vf_h
, OP_UUU_H
, H2
, H2
, float16_sub
)
2874 RVVCALL(OPFVF2
, vfsub_vf_w
, OP_UUU_W
, H4
, H4
, float32_sub
)
2875 RVVCALL(OPFVF2
, vfsub_vf_d
, OP_UUU_D
, H8
, H8
, float64_sub
)
2876 GEN_VEXT_VF(vfsub_vf_h
, 2, 2)
2877 GEN_VEXT_VF(vfsub_vf_w
, 4, 4)
2878 GEN_VEXT_VF(vfsub_vf_d
, 8, 8)
2880 static uint16_t float16_rsub(uint16_t a
, uint16_t b
, float_status
*s
)
2882 return float16_sub(b
, a
, s
);
2885 static uint32_t float32_rsub(uint32_t a
, uint32_t b
, float_status
*s
)
2887 return float32_sub(b
, a
, s
);
2890 static uint64_t float64_rsub(uint64_t a
, uint64_t b
, float_status
*s
)
2892 return float64_sub(b
, a
, s
);
2895 RVVCALL(OPFVF2
, vfrsub_vf_h
, OP_UUU_H
, H2
, H2
, float16_rsub
)
2896 RVVCALL(OPFVF2
, vfrsub_vf_w
, OP_UUU_W
, H4
, H4
, float32_rsub
)
2897 RVVCALL(OPFVF2
, vfrsub_vf_d
, OP_UUU_D
, H8
, H8
, float64_rsub
)
2898 GEN_VEXT_VF(vfrsub_vf_h
, 2, 2)
2899 GEN_VEXT_VF(vfrsub_vf_w
, 4, 4)
2900 GEN_VEXT_VF(vfrsub_vf_d
, 8, 8)
2902 /* Vector Widening Floating-Point Add/Subtract Instructions */
2903 static uint32_t vfwadd16(uint16_t a
, uint16_t b
, float_status
*s
)
2905 return float32_add(float16_to_float32(a
, true, s
),
2906 float16_to_float32(b
, true, s
), s
);
2909 static uint64_t vfwadd32(uint32_t a
, uint32_t b
, float_status
*s
)
2911 return float64_add(float32_to_float64(a
, s
),
2912 float32_to_float64(b
, s
), s
);
2916 RVVCALL(OPFVV2
, vfwadd_vv_h
, WOP_UUU_H
, H4
, H2
, H2
, vfwadd16
)
2917 RVVCALL(OPFVV2
, vfwadd_vv_w
, WOP_UUU_W
, H8
, H4
, H4
, vfwadd32
)
2918 GEN_VEXT_VV_ENV(vfwadd_vv_h
, 2, 4)
2919 GEN_VEXT_VV_ENV(vfwadd_vv_w
, 4, 8)
2920 RVVCALL(OPFVF2
, vfwadd_vf_h
, WOP_UUU_H
, H4
, H2
, vfwadd16
)
2921 RVVCALL(OPFVF2
, vfwadd_vf_w
, WOP_UUU_W
, H8
, H4
, vfwadd32
)
2922 GEN_VEXT_VF(vfwadd_vf_h
, 2, 4)
2923 GEN_VEXT_VF(vfwadd_vf_w
, 4, 8)
2925 static uint32_t vfwsub16(uint16_t a
, uint16_t b
, float_status
*s
)
2927 return float32_sub(float16_to_float32(a
, true, s
),
2928 float16_to_float32(b
, true, s
), s
);
2931 static uint64_t vfwsub32(uint32_t a
, uint32_t b
, float_status
*s
)
2933 return float64_sub(float32_to_float64(a
, s
),
2934 float32_to_float64(b
, s
), s
);
2938 RVVCALL(OPFVV2
, vfwsub_vv_h
, WOP_UUU_H
, H4
, H2
, H2
, vfwsub16
)
2939 RVVCALL(OPFVV2
, vfwsub_vv_w
, WOP_UUU_W
, H8
, H4
, H4
, vfwsub32
)
2940 GEN_VEXT_VV_ENV(vfwsub_vv_h
, 2, 4)
2941 GEN_VEXT_VV_ENV(vfwsub_vv_w
, 4, 8)
2942 RVVCALL(OPFVF2
, vfwsub_vf_h
, WOP_UUU_H
, H4
, H2
, vfwsub16
)
2943 RVVCALL(OPFVF2
, vfwsub_vf_w
, WOP_UUU_W
, H8
, H4
, vfwsub32
)
2944 GEN_VEXT_VF(vfwsub_vf_h
, 2, 4)
2945 GEN_VEXT_VF(vfwsub_vf_w
, 4, 8)
2947 static uint32_t vfwaddw16(uint32_t a
, uint16_t b
, float_status
*s
)
2949 return float32_add(a
, float16_to_float32(b
, true, s
), s
);
2952 static uint64_t vfwaddw32(uint64_t a
, uint32_t b
, float_status
*s
)
2954 return float64_add(a
, float32_to_float64(b
, s
), s
);
2957 RVVCALL(OPFVV2
, vfwadd_wv_h
, WOP_WUUU_H
, H4
, H2
, H2
, vfwaddw16
)
2958 RVVCALL(OPFVV2
, vfwadd_wv_w
, WOP_WUUU_W
, H8
, H4
, H4
, vfwaddw32
)
2959 GEN_VEXT_VV_ENV(vfwadd_wv_h
, 2, 4)
2960 GEN_VEXT_VV_ENV(vfwadd_wv_w
, 4, 8)
2961 RVVCALL(OPFVF2
, vfwadd_wf_h
, WOP_WUUU_H
, H4
, H2
, vfwaddw16
)
2962 RVVCALL(OPFVF2
, vfwadd_wf_w
, WOP_WUUU_W
, H8
, H4
, vfwaddw32
)
2963 GEN_VEXT_VF(vfwadd_wf_h
, 2, 4)
2964 GEN_VEXT_VF(vfwadd_wf_w
, 4, 8)
2966 static uint32_t vfwsubw16(uint32_t a
, uint16_t b
, float_status
*s
)
2968 return float32_sub(a
, float16_to_float32(b
, true, s
), s
);
2971 static uint64_t vfwsubw32(uint64_t a
, uint32_t b
, float_status
*s
)
2973 return float64_sub(a
, float32_to_float64(b
, s
), s
);
2976 RVVCALL(OPFVV2
, vfwsub_wv_h
, WOP_WUUU_H
, H4
, H2
, H2
, vfwsubw16
)
2977 RVVCALL(OPFVV2
, vfwsub_wv_w
, WOP_WUUU_W
, H8
, H4
, H4
, vfwsubw32
)
2978 GEN_VEXT_VV_ENV(vfwsub_wv_h
, 2, 4)
2979 GEN_VEXT_VV_ENV(vfwsub_wv_w
, 4, 8)
2980 RVVCALL(OPFVF2
, vfwsub_wf_h
, WOP_WUUU_H
, H4
, H2
, vfwsubw16
)
2981 RVVCALL(OPFVF2
, vfwsub_wf_w
, WOP_WUUU_W
, H8
, H4
, vfwsubw32
)
2982 GEN_VEXT_VF(vfwsub_wf_h
, 2, 4)
2983 GEN_VEXT_VF(vfwsub_wf_w
, 4, 8)
2985 /* Vector Single-Width Floating-Point Multiply/Divide Instructions */
2986 RVVCALL(OPFVV2
, vfmul_vv_h
, OP_UUU_H
, H2
, H2
, H2
, float16_mul
)
2987 RVVCALL(OPFVV2
, vfmul_vv_w
, OP_UUU_W
, H4
, H4
, H4
, float32_mul
)
2988 RVVCALL(OPFVV2
, vfmul_vv_d
, OP_UUU_D
, H8
, H8
, H8
, float64_mul
)
2989 GEN_VEXT_VV_ENV(vfmul_vv_h
, 2, 2)
2990 GEN_VEXT_VV_ENV(vfmul_vv_w
, 4, 4)
2991 GEN_VEXT_VV_ENV(vfmul_vv_d
, 8, 8)
2992 RVVCALL(OPFVF2
, vfmul_vf_h
, OP_UUU_H
, H2
, H2
, float16_mul
)
2993 RVVCALL(OPFVF2
, vfmul_vf_w
, OP_UUU_W
, H4
, H4
, float32_mul
)
2994 RVVCALL(OPFVF2
, vfmul_vf_d
, OP_UUU_D
, H8
, H8
, float64_mul
)
2995 GEN_VEXT_VF(vfmul_vf_h
, 2, 2)
2996 GEN_VEXT_VF(vfmul_vf_w
, 4, 4)
2997 GEN_VEXT_VF(vfmul_vf_d
, 8, 8)
2999 RVVCALL(OPFVV2
, vfdiv_vv_h
, OP_UUU_H
, H2
, H2
, H2
, float16_div
)
3000 RVVCALL(OPFVV2
, vfdiv_vv_w
, OP_UUU_W
, H4
, H4
, H4
, float32_div
)
3001 RVVCALL(OPFVV2
, vfdiv_vv_d
, OP_UUU_D
, H8
, H8
, H8
, float64_div
)
3002 GEN_VEXT_VV_ENV(vfdiv_vv_h
, 2, 2)
3003 GEN_VEXT_VV_ENV(vfdiv_vv_w
, 4, 4)
3004 GEN_VEXT_VV_ENV(vfdiv_vv_d
, 8, 8)
3005 RVVCALL(OPFVF2
, vfdiv_vf_h
, OP_UUU_H
, H2
, H2
, float16_div
)
3006 RVVCALL(OPFVF2
, vfdiv_vf_w
, OP_UUU_W
, H4
, H4
, float32_div
)
3007 RVVCALL(OPFVF2
, vfdiv_vf_d
, OP_UUU_D
, H8
, H8
, float64_div
)
3008 GEN_VEXT_VF(vfdiv_vf_h
, 2, 2)
3009 GEN_VEXT_VF(vfdiv_vf_w
, 4, 4)
3010 GEN_VEXT_VF(vfdiv_vf_d
, 8, 8)
3012 static uint16_t float16_rdiv(uint16_t a
, uint16_t b
, float_status
*s
)
3014 return float16_div(b
, a
, s
);
3017 static uint32_t float32_rdiv(uint32_t a
, uint32_t b
, float_status
*s
)
3019 return float32_div(b
, a
, s
);
3022 static uint64_t float64_rdiv(uint64_t a
, uint64_t b
, float_status
*s
)
3024 return float64_div(b
, a
, s
);
3027 RVVCALL(OPFVF2
, vfrdiv_vf_h
, OP_UUU_H
, H2
, H2
, float16_rdiv
)
3028 RVVCALL(OPFVF2
, vfrdiv_vf_w
, OP_UUU_W
, H4
, H4
, float32_rdiv
)
3029 RVVCALL(OPFVF2
, vfrdiv_vf_d
, OP_UUU_D
, H8
, H8
, float64_rdiv
)
3030 GEN_VEXT_VF(vfrdiv_vf_h
, 2, 2)
3031 GEN_VEXT_VF(vfrdiv_vf_w
, 4, 4)
3032 GEN_VEXT_VF(vfrdiv_vf_d
, 8, 8)
3034 /* Vector Widening Floating-Point Multiply */
3035 static uint32_t vfwmul16(uint16_t a
, uint16_t b
, float_status
*s
)
3037 return float32_mul(float16_to_float32(a
, true, s
),
3038 float16_to_float32(b
, true, s
), s
);
3041 static uint64_t vfwmul32(uint32_t a
, uint32_t b
, float_status
*s
)
3043 return float64_mul(float32_to_float64(a
, s
),
3044 float32_to_float64(b
, s
), s
);
3047 RVVCALL(OPFVV2
, vfwmul_vv_h
, WOP_UUU_H
, H4
, H2
, H2
, vfwmul16
)
3048 RVVCALL(OPFVV2
, vfwmul_vv_w
, WOP_UUU_W
, H8
, H4
, H4
, vfwmul32
)
3049 GEN_VEXT_VV_ENV(vfwmul_vv_h
, 2, 4)
3050 GEN_VEXT_VV_ENV(vfwmul_vv_w
, 4, 8)
3051 RVVCALL(OPFVF2
, vfwmul_vf_h
, WOP_UUU_H
, H4
, H2
, vfwmul16
)
3052 RVVCALL(OPFVF2
, vfwmul_vf_w
, WOP_UUU_W
, H8
, H4
, vfwmul32
)
3053 GEN_VEXT_VF(vfwmul_vf_h
, 2, 4)
3054 GEN_VEXT_VF(vfwmul_vf_w
, 4, 8)
3056 /* Vector Single-Width Floating-Point Fused Multiply-Add Instructions */
3057 #define OPFVV3(NAME, TD, T1, T2, TX1, TX2, HD, HS1, HS2, OP) \
3058 static void do_##NAME(void *vd, void *vs1, void *vs2, int i, \
3059 CPURISCVState *env) \
3061 TX1 s1 = *((T1 *)vs1 + HS1(i)); \
3062 TX2 s2 = *((T2 *)vs2 + HS2(i)); \
3063 TD d = *((TD *)vd + HD(i)); \
3064 *((TD *)vd + HD(i)) = OP(s2, s1, d, &env->fp_status); \
3067 static uint16_t fmacc16(uint16_t a
, uint16_t b
, uint16_t d
, float_status
*s
)
3069 return float16_muladd(a
, b
, d
, 0, s
);
3072 static uint32_t fmacc32(uint32_t a
, uint32_t b
, uint32_t d
, float_status
*s
)
3074 return float32_muladd(a
, b
, d
, 0, s
);
3077 static uint64_t fmacc64(uint64_t a
, uint64_t b
, uint64_t d
, float_status
*s
)
3079 return float64_muladd(a
, b
, d
, 0, s
);
3082 RVVCALL(OPFVV3
, vfmacc_vv_h
, OP_UUU_H
, H2
, H2
, H2
, fmacc16
)
3083 RVVCALL(OPFVV3
, vfmacc_vv_w
, OP_UUU_W
, H4
, H4
, H4
, fmacc32
)
3084 RVVCALL(OPFVV3
, vfmacc_vv_d
, OP_UUU_D
, H8
, H8
, H8
, fmacc64
)
3085 GEN_VEXT_VV_ENV(vfmacc_vv_h
, 2, 2)
3086 GEN_VEXT_VV_ENV(vfmacc_vv_w
, 4, 4)
3087 GEN_VEXT_VV_ENV(vfmacc_vv_d
, 8, 8)
3089 #define OPFVF3(NAME, TD, T1, T2, TX1, TX2, HD, HS2, OP) \
3090 static void do_##NAME(void *vd, uint64_t s1, void *vs2, int i, \
3091 CPURISCVState *env) \
3093 TX2 s2 = *((T2 *)vs2 + HS2(i)); \
3094 TD d = *((TD *)vd + HD(i)); \
3095 *((TD *)vd + HD(i)) = OP(s2, (TX1)(T1)s1, d, &env->fp_status);\
3098 RVVCALL(OPFVF3
, vfmacc_vf_h
, OP_UUU_H
, H2
, H2
, fmacc16
)
3099 RVVCALL(OPFVF3
, vfmacc_vf_w
, OP_UUU_W
, H4
, H4
, fmacc32
)
3100 RVVCALL(OPFVF3
, vfmacc_vf_d
, OP_UUU_D
, H8
, H8
, fmacc64
)
3101 GEN_VEXT_VF(vfmacc_vf_h
, 2, 2)
3102 GEN_VEXT_VF(vfmacc_vf_w
, 4, 4)
3103 GEN_VEXT_VF(vfmacc_vf_d
, 8, 8)
3105 static uint16_t fnmacc16(uint16_t a
, uint16_t b
, uint16_t d
, float_status
*s
)
3107 return float16_muladd(a
, b
, d
,
3108 float_muladd_negate_c
| float_muladd_negate_product
, s
);
3111 static uint32_t fnmacc32(uint32_t a
, uint32_t b
, uint32_t d
, float_status
*s
)
3113 return float32_muladd(a
, b
, d
,
3114 float_muladd_negate_c
| float_muladd_negate_product
, s
);
3117 static uint64_t fnmacc64(uint64_t a
, uint64_t b
, uint64_t d
, float_status
*s
)
3119 return float64_muladd(a
, b
, d
,
3120 float_muladd_negate_c
| float_muladd_negate_product
, s
);
3123 RVVCALL(OPFVV3
, vfnmacc_vv_h
, OP_UUU_H
, H2
, H2
, H2
, fnmacc16
)
3124 RVVCALL(OPFVV3
, vfnmacc_vv_w
, OP_UUU_W
, H4
, H4
, H4
, fnmacc32
)
3125 RVVCALL(OPFVV3
, vfnmacc_vv_d
, OP_UUU_D
, H8
, H8
, H8
, fnmacc64
)
3126 GEN_VEXT_VV_ENV(vfnmacc_vv_h
, 2, 2)
3127 GEN_VEXT_VV_ENV(vfnmacc_vv_w
, 4, 4)
3128 GEN_VEXT_VV_ENV(vfnmacc_vv_d
, 8, 8)
3129 RVVCALL(OPFVF3
, vfnmacc_vf_h
, OP_UUU_H
, H2
, H2
, fnmacc16
)
3130 RVVCALL(OPFVF3
, vfnmacc_vf_w
, OP_UUU_W
, H4
, H4
, fnmacc32
)
3131 RVVCALL(OPFVF3
, vfnmacc_vf_d
, OP_UUU_D
, H8
, H8
, fnmacc64
)
3132 GEN_VEXT_VF(vfnmacc_vf_h
, 2, 2)
3133 GEN_VEXT_VF(vfnmacc_vf_w
, 4, 4)
3134 GEN_VEXT_VF(vfnmacc_vf_d
, 8, 8)
3136 static uint16_t fmsac16(uint16_t a
, uint16_t b
, uint16_t d
, float_status
*s
)
3138 return float16_muladd(a
, b
, d
, float_muladd_negate_c
, s
);
3141 static uint32_t fmsac32(uint32_t a
, uint32_t b
, uint32_t d
, float_status
*s
)
3143 return float32_muladd(a
, b
, d
, float_muladd_negate_c
, s
);
3146 static uint64_t fmsac64(uint64_t a
, uint64_t b
, uint64_t d
, float_status
*s
)
3148 return float64_muladd(a
, b
, d
, float_muladd_negate_c
, s
);
3151 RVVCALL(OPFVV3
, vfmsac_vv_h
, OP_UUU_H
, H2
, H2
, H2
, fmsac16
)
3152 RVVCALL(OPFVV3
, vfmsac_vv_w
, OP_UUU_W
, H4
, H4
, H4
, fmsac32
)
3153 RVVCALL(OPFVV3
, vfmsac_vv_d
, OP_UUU_D
, H8
, H8
, H8
, fmsac64
)
3154 GEN_VEXT_VV_ENV(vfmsac_vv_h
, 2, 2)
3155 GEN_VEXT_VV_ENV(vfmsac_vv_w
, 4, 4)
3156 GEN_VEXT_VV_ENV(vfmsac_vv_d
, 8, 8)
3157 RVVCALL(OPFVF3
, vfmsac_vf_h
, OP_UUU_H
, H2
, H2
, fmsac16
)
3158 RVVCALL(OPFVF3
, vfmsac_vf_w
, OP_UUU_W
, H4
, H4
, fmsac32
)
3159 RVVCALL(OPFVF3
, vfmsac_vf_d
, OP_UUU_D
, H8
, H8
, fmsac64
)
3160 GEN_VEXT_VF(vfmsac_vf_h
, 2, 2)
3161 GEN_VEXT_VF(vfmsac_vf_w
, 4, 4)
3162 GEN_VEXT_VF(vfmsac_vf_d
, 8, 8)
3164 static uint16_t fnmsac16(uint16_t a
, uint16_t b
, uint16_t d
, float_status
*s
)
3166 return float16_muladd(a
, b
, d
, float_muladd_negate_product
, s
);
3169 static uint32_t fnmsac32(uint32_t a
, uint32_t b
, uint32_t d
, float_status
*s
)
3171 return float32_muladd(a
, b
, d
, float_muladd_negate_product
, s
);
3174 static uint64_t fnmsac64(uint64_t a
, uint64_t b
, uint64_t d
, float_status
*s
)
3176 return float64_muladd(a
, b
, d
, float_muladd_negate_product
, s
);
3179 RVVCALL(OPFVV3
, vfnmsac_vv_h
, OP_UUU_H
, H2
, H2
, H2
, fnmsac16
)
3180 RVVCALL(OPFVV3
, vfnmsac_vv_w
, OP_UUU_W
, H4
, H4
, H4
, fnmsac32
)
3181 RVVCALL(OPFVV3
, vfnmsac_vv_d
, OP_UUU_D
, H8
, H8
, H8
, fnmsac64
)
3182 GEN_VEXT_VV_ENV(vfnmsac_vv_h
, 2, 2)
3183 GEN_VEXT_VV_ENV(vfnmsac_vv_w
, 4, 4)
3184 GEN_VEXT_VV_ENV(vfnmsac_vv_d
, 8, 8)
3185 RVVCALL(OPFVF3
, vfnmsac_vf_h
, OP_UUU_H
, H2
, H2
, fnmsac16
)
3186 RVVCALL(OPFVF3
, vfnmsac_vf_w
, OP_UUU_W
, H4
, H4
, fnmsac32
)
3187 RVVCALL(OPFVF3
, vfnmsac_vf_d
, OP_UUU_D
, H8
, H8
, fnmsac64
)
3188 GEN_VEXT_VF(vfnmsac_vf_h
, 2, 2)
3189 GEN_VEXT_VF(vfnmsac_vf_w
, 4, 4)
3190 GEN_VEXT_VF(vfnmsac_vf_d
, 8, 8)
3192 static uint16_t fmadd16(uint16_t a
, uint16_t b
, uint16_t d
, float_status
*s
)
3194 return float16_muladd(d
, b
, a
, 0, s
);
3197 static uint32_t fmadd32(uint32_t a
, uint32_t b
, uint32_t d
, float_status
*s
)
3199 return float32_muladd(d
, b
, a
, 0, s
);
3202 static uint64_t fmadd64(uint64_t a
, uint64_t b
, uint64_t d
, float_status
*s
)
3204 return float64_muladd(d
, b
, a
, 0, s
);
3207 RVVCALL(OPFVV3
, vfmadd_vv_h
, OP_UUU_H
, H2
, H2
, H2
, fmadd16
)
3208 RVVCALL(OPFVV3
, vfmadd_vv_w
, OP_UUU_W
, H4
, H4
, H4
, fmadd32
)
3209 RVVCALL(OPFVV3
, vfmadd_vv_d
, OP_UUU_D
, H8
, H8
, H8
, fmadd64
)
3210 GEN_VEXT_VV_ENV(vfmadd_vv_h
, 2, 2)
3211 GEN_VEXT_VV_ENV(vfmadd_vv_w
, 4, 4)
3212 GEN_VEXT_VV_ENV(vfmadd_vv_d
, 8, 8)
3213 RVVCALL(OPFVF3
, vfmadd_vf_h
, OP_UUU_H
, H2
, H2
, fmadd16
)
3214 RVVCALL(OPFVF3
, vfmadd_vf_w
, OP_UUU_W
, H4
, H4
, fmadd32
)
3215 RVVCALL(OPFVF3
, vfmadd_vf_d
, OP_UUU_D
, H8
, H8
, fmadd64
)
3216 GEN_VEXT_VF(vfmadd_vf_h
, 2, 2)
3217 GEN_VEXT_VF(vfmadd_vf_w
, 4, 4)
3218 GEN_VEXT_VF(vfmadd_vf_d
, 8, 8)
3220 static uint16_t fnmadd16(uint16_t a
, uint16_t b
, uint16_t d
, float_status
*s
)
3222 return float16_muladd(d
, b
, a
,
3223 float_muladd_negate_c
| float_muladd_negate_product
, s
);
3226 static uint32_t fnmadd32(uint32_t a
, uint32_t b
, uint32_t d
, float_status
*s
)
3228 return float32_muladd(d
, b
, a
,
3229 float_muladd_negate_c
| float_muladd_negate_product
, s
);
3232 static uint64_t fnmadd64(uint64_t a
, uint64_t b
, uint64_t d
, float_status
*s
)
3234 return float64_muladd(d
, b
, a
,
3235 float_muladd_negate_c
| float_muladd_negate_product
, s
);
3238 RVVCALL(OPFVV3
, vfnmadd_vv_h
, OP_UUU_H
, H2
, H2
, H2
, fnmadd16
)
3239 RVVCALL(OPFVV3
, vfnmadd_vv_w
, OP_UUU_W
, H4
, H4
, H4
, fnmadd32
)
3240 RVVCALL(OPFVV3
, vfnmadd_vv_d
, OP_UUU_D
, H8
, H8
, H8
, fnmadd64
)
3241 GEN_VEXT_VV_ENV(vfnmadd_vv_h
, 2, 2)
3242 GEN_VEXT_VV_ENV(vfnmadd_vv_w
, 4, 4)
3243 GEN_VEXT_VV_ENV(vfnmadd_vv_d
, 8, 8)
3244 RVVCALL(OPFVF3
, vfnmadd_vf_h
, OP_UUU_H
, H2
, H2
, fnmadd16
)
3245 RVVCALL(OPFVF3
, vfnmadd_vf_w
, OP_UUU_W
, H4
, H4
, fnmadd32
)
3246 RVVCALL(OPFVF3
, vfnmadd_vf_d
, OP_UUU_D
, H8
, H8
, fnmadd64
)
3247 GEN_VEXT_VF(vfnmadd_vf_h
, 2, 2)
3248 GEN_VEXT_VF(vfnmadd_vf_w
, 4, 4)
3249 GEN_VEXT_VF(vfnmadd_vf_d
, 8, 8)
3251 static uint16_t fmsub16(uint16_t a
, uint16_t b
, uint16_t d
, float_status
*s
)
3253 return float16_muladd(d
, b
, a
, float_muladd_negate_c
, s
);
3256 static uint32_t fmsub32(uint32_t a
, uint32_t b
, uint32_t d
, float_status
*s
)
3258 return float32_muladd(d
, b
, a
, float_muladd_negate_c
, s
);
3261 static uint64_t fmsub64(uint64_t a
, uint64_t b
, uint64_t d
, float_status
*s
)
3263 return float64_muladd(d
, b
, a
, float_muladd_negate_c
, s
);
3266 RVVCALL(OPFVV3
, vfmsub_vv_h
, OP_UUU_H
, H2
, H2
, H2
, fmsub16
)
3267 RVVCALL(OPFVV3
, vfmsub_vv_w
, OP_UUU_W
, H4
, H4
, H4
, fmsub32
)
3268 RVVCALL(OPFVV3
, vfmsub_vv_d
, OP_UUU_D
, H8
, H8
, H8
, fmsub64
)
3269 GEN_VEXT_VV_ENV(vfmsub_vv_h
, 2, 2)
3270 GEN_VEXT_VV_ENV(vfmsub_vv_w
, 4, 4)
3271 GEN_VEXT_VV_ENV(vfmsub_vv_d
, 8, 8)
3272 RVVCALL(OPFVF3
, vfmsub_vf_h
, OP_UUU_H
, H2
, H2
, fmsub16
)
3273 RVVCALL(OPFVF3
, vfmsub_vf_w
, OP_UUU_W
, H4
, H4
, fmsub32
)
3274 RVVCALL(OPFVF3
, vfmsub_vf_d
, OP_UUU_D
, H8
, H8
, fmsub64
)
3275 GEN_VEXT_VF(vfmsub_vf_h
, 2, 2)
3276 GEN_VEXT_VF(vfmsub_vf_w
, 4, 4)
3277 GEN_VEXT_VF(vfmsub_vf_d
, 8, 8)
3279 static uint16_t fnmsub16(uint16_t a
, uint16_t b
, uint16_t d
, float_status
*s
)
3281 return float16_muladd(d
, b
, a
, float_muladd_negate_product
, s
);
3284 static uint32_t fnmsub32(uint32_t a
, uint32_t b
, uint32_t d
, float_status
*s
)
3286 return float32_muladd(d
, b
, a
, float_muladd_negate_product
, s
);
3289 static uint64_t fnmsub64(uint64_t a
, uint64_t b
, uint64_t d
, float_status
*s
)
3291 return float64_muladd(d
, b
, a
, float_muladd_negate_product
, s
);
3294 RVVCALL(OPFVV3
, vfnmsub_vv_h
, OP_UUU_H
, H2
, H2
, H2
, fnmsub16
)
3295 RVVCALL(OPFVV3
, vfnmsub_vv_w
, OP_UUU_W
, H4
, H4
, H4
, fnmsub32
)
3296 RVVCALL(OPFVV3
, vfnmsub_vv_d
, OP_UUU_D
, H8
, H8
, H8
, fnmsub64
)
3297 GEN_VEXT_VV_ENV(vfnmsub_vv_h
, 2, 2)
3298 GEN_VEXT_VV_ENV(vfnmsub_vv_w
, 4, 4)
3299 GEN_VEXT_VV_ENV(vfnmsub_vv_d
, 8, 8)
3300 RVVCALL(OPFVF3
, vfnmsub_vf_h
, OP_UUU_H
, H2
, H2
, fnmsub16
)
3301 RVVCALL(OPFVF3
, vfnmsub_vf_w
, OP_UUU_W
, H4
, H4
, fnmsub32
)
3302 RVVCALL(OPFVF3
, vfnmsub_vf_d
, OP_UUU_D
, H8
, H8
, fnmsub64
)
3303 GEN_VEXT_VF(vfnmsub_vf_h
, 2, 2)
3304 GEN_VEXT_VF(vfnmsub_vf_w
, 4, 4)
3305 GEN_VEXT_VF(vfnmsub_vf_d
, 8, 8)
3307 /* Vector Widening Floating-Point Fused Multiply-Add Instructions */
3308 static uint32_t fwmacc16(uint16_t a
, uint16_t b
, uint32_t d
, float_status
*s
)
3310 return float32_muladd(float16_to_float32(a
, true, s
),
3311 float16_to_float32(b
, true, s
), d
, 0, s
);
3314 static uint64_t fwmacc32(uint32_t a
, uint32_t b
, uint64_t d
, float_status
*s
)
3316 return float64_muladd(float32_to_float64(a
, s
),
3317 float32_to_float64(b
, s
), d
, 0, s
);
3320 RVVCALL(OPFVV3
, vfwmacc_vv_h
, WOP_UUU_H
, H4
, H2
, H2
, fwmacc16
)
3321 RVVCALL(OPFVV3
, vfwmacc_vv_w
, WOP_UUU_W
, H8
, H4
, H4
, fwmacc32
)
3322 GEN_VEXT_VV_ENV(vfwmacc_vv_h
, 2, 4)
3323 GEN_VEXT_VV_ENV(vfwmacc_vv_w
, 4, 8)
3324 RVVCALL(OPFVF3
, vfwmacc_vf_h
, WOP_UUU_H
, H4
, H2
, fwmacc16
)
3325 RVVCALL(OPFVF3
, vfwmacc_vf_w
, WOP_UUU_W
, H8
, H4
, fwmacc32
)
3326 GEN_VEXT_VF(vfwmacc_vf_h
, 2, 4)
3327 GEN_VEXT_VF(vfwmacc_vf_w
, 4, 8)
3329 static uint32_t fwnmacc16(uint16_t a
, uint16_t b
, uint32_t d
, float_status
*s
)
3331 return float32_muladd(float16_to_float32(a
, true, s
),
3332 float16_to_float32(b
, true, s
), d
,
3333 float_muladd_negate_c
| float_muladd_negate_product
, s
);
3336 static uint64_t fwnmacc32(uint32_t a
, uint32_t b
, uint64_t d
, float_status
*s
)
3338 return float64_muladd(float32_to_float64(a
, s
),
3339 float32_to_float64(b
, s
), d
,
3340 float_muladd_negate_c
| float_muladd_negate_product
, s
);
3343 RVVCALL(OPFVV3
, vfwnmacc_vv_h
, WOP_UUU_H
, H4
, H2
, H2
, fwnmacc16
)
3344 RVVCALL(OPFVV3
, vfwnmacc_vv_w
, WOP_UUU_W
, H8
, H4
, H4
, fwnmacc32
)
3345 GEN_VEXT_VV_ENV(vfwnmacc_vv_h
, 2, 4)
3346 GEN_VEXT_VV_ENV(vfwnmacc_vv_w
, 4, 8)
3347 RVVCALL(OPFVF3
, vfwnmacc_vf_h
, WOP_UUU_H
, H4
, H2
, fwnmacc16
)
3348 RVVCALL(OPFVF3
, vfwnmacc_vf_w
, WOP_UUU_W
, H8
, H4
, fwnmacc32
)
3349 GEN_VEXT_VF(vfwnmacc_vf_h
, 2, 4)
3350 GEN_VEXT_VF(vfwnmacc_vf_w
, 4, 8)
3352 static uint32_t fwmsac16(uint16_t a
, uint16_t b
, uint32_t d
, float_status
*s
)
3354 return float32_muladd(float16_to_float32(a
, true, s
),
3355 float16_to_float32(b
, true, s
), d
,
3356 float_muladd_negate_c
, s
);
3359 static uint64_t fwmsac32(uint32_t a
, uint32_t b
, uint64_t d
, float_status
*s
)
3361 return float64_muladd(float32_to_float64(a
, s
),
3362 float32_to_float64(b
, s
), d
,
3363 float_muladd_negate_c
, s
);
3366 RVVCALL(OPFVV3
, vfwmsac_vv_h
, WOP_UUU_H
, H4
, H2
, H2
, fwmsac16
)
3367 RVVCALL(OPFVV3
, vfwmsac_vv_w
, WOP_UUU_W
, H8
, H4
, H4
, fwmsac32
)
3368 GEN_VEXT_VV_ENV(vfwmsac_vv_h
, 2, 4)
3369 GEN_VEXT_VV_ENV(vfwmsac_vv_w
, 4, 8)
3370 RVVCALL(OPFVF3
, vfwmsac_vf_h
, WOP_UUU_H
, H4
, H2
, fwmsac16
)
3371 RVVCALL(OPFVF3
, vfwmsac_vf_w
, WOP_UUU_W
, H8
, H4
, fwmsac32
)
3372 GEN_VEXT_VF(vfwmsac_vf_h
, 2, 4)
3373 GEN_VEXT_VF(vfwmsac_vf_w
, 4, 8)
3375 static uint32_t fwnmsac16(uint16_t a
, uint16_t b
, uint32_t d
, float_status
*s
)
3377 return float32_muladd(float16_to_float32(a
, true, s
),
3378 float16_to_float32(b
, true, s
), d
,
3379 float_muladd_negate_product
, s
);
3382 static uint64_t fwnmsac32(uint32_t a
, uint32_t b
, uint64_t d
, float_status
*s
)
3384 return float64_muladd(float32_to_float64(a
, s
),
3385 float32_to_float64(b
, s
), d
,
3386 float_muladd_negate_product
, s
);
3389 RVVCALL(OPFVV3
, vfwnmsac_vv_h
, WOP_UUU_H
, H4
, H2
, H2
, fwnmsac16
)
3390 RVVCALL(OPFVV3
, vfwnmsac_vv_w
, WOP_UUU_W
, H8
, H4
, H4
, fwnmsac32
)
3391 GEN_VEXT_VV_ENV(vfwnmsac_vv_h
, 2, 4)
3392 GEN_VEXT_VV_ENV(vfwnmsac_vv_w
, 4, 8)
3393 RVVCALL(OPFVF3
, vfwnmsac_vf_h
, WOP_UUU_H
, H4
, H2
, fwnmsac16
)
3394 RVVCALL(OPFVF3
, vfwnmsac_vf_w
, WOP_UUU_W
, H8
, H4
, fwnmsac32
)
3395 GEN_VEXT_VF(vfwnmsac_vf_h
, 2, 4)
3396 GEN_VEXT_VF(vfwnmsac_vf_w
, 4, 8)
3398 /* Vector Floating-Point Square-Root Instruction */
3400 #define OP_UU_H uint16_t, uint16_t, uint16_t
3401 #define OP_UU_W uint32_t, uint32_t, uint32_t
3402 #define OP_UU_D uint64_t, uint64_t, uint64_t
3404 #define OPFVV1(NAME, TD, T2, TX2, HD, HS2, OP) \
3405 static void do_##NAME(void *vd, void *vs2, int i, \
3406 CPURISCVState *env) \
3408 TX2 s2 = *((T2 *)vs2 + HS2(i)); \
3409 *((TD *)vd + HD(i)) = OP(s2, &env->fp_status); \
3412 #define GEN_VEXT_V_ENV(NAME, ESZ, DSZ) \
3413 void HELPER(NAME)(void *vd, void *v0, void *vs2, \
3414 CPURISCVState *env, uint32_t desc) \
3416 uint32_t vm = vext_vm(desc); \
3417 uint32_t vl = env->vl; \
3423 for (i = env->vstart; i < vl; i++) { \
3424 if (!vm && !vext_elem_mask(v0, i)) { \
3427 do_##NAME(vd, vs2, i, env); \
3432 RVVCALL(OPFVV1
, vfsqrt_v_h
, OP_UU_H
, H2
, H2
, float16_sqrt
)
3433 RVVCALL(OPFVV1
, vfsqrt_v_w
, OP_UU_W
, H4
, H4
, float32_sqrt
)
3434 RVVCALL(OPFVV1
, vfsqrt_v_d
, OP_UU_D
, H8
, H8
, float64_sqrt
)
3435 GEN_VEXT_V_ENV(vfsqrt_v_h
, 2, 2)
3436 GEN_VEXT_V_ENV(vfsqrt_v_w
, 4, 4)
3437 GEN_VEXT_V_ENV(vfsqrt_v_d
, 8, 8)
3440 * Vector Floating-Point Reciprocal Square-Root Estimate Instruction
3442 * Adapted from riscv-v-spec recip.c:
3443 * https://github.com/riscv/riscv-v-spec/blob/master/recip.c
3445 static uint64_t frsqrt7(uint64_t f
, int exp_size
, int frac_size
)
3447 uint64_t sign
= extract64(f
, frac_size
+ exp_size
, 1);
3448 uint64_t exp
= extract64(f
, frac_size
, exp_size
);
3449 uint64_t frac
= extract64(f
, 0, frac_size
);
3451 const uint8_t lookup_table
[] = {
3452 52, 51, 50, 48, 47, 46, 44, 43,
3453 42, 41, 40, 39, 38, 36, 35, 34,
3454 33, 32, 31, 30, 30, 29, 28, 27,
3455 26, 25, 24, 23, 23, 22, 21, 20,
3456 19, 19, 18, 17, 16, 16, 15, 14,
3457 14, 13, 12, 12, 11, 10, 10, 9,
3458 9, 8, 7, 7, 6, 6, 5, 4,
3459 4, 3, 3, 2, 2, 1, 1, 0,
3460 127, 125, 123, 121, 119, 118, 116, 114,
3461 113, 111, 109, 108, 106, 105, 103, 102,
3462 100, 99, 97, 96, 95, 93, 92, 91,
3463 90, 88, 87, 86, 85, 84, 83, 82,
3464 80, 79, 78, 77, 76, 75, 74, 73,
3465 72, 71, 70, 70, 69, 68, 67, 66,
3466 65, 64, 63, 63, 62, 61, 60, 59,
3467 59, 58, 57, 56, 56, 55, 54, 53
3469 const int precision
= 7;
3471 if (exp
== 0 && frac
!= 0) { /* subnormal */
3472 /* Normalize the subnormal. */
3473 while (extract64(frac
, frac_size
- 1, 1) == 0) {
3478 frac
= (frac
<< 1) & MAKE_64BIT_MASK(0, frac_size
);
3481 int idx
= ((exp
& 1) << (precision
- 1)) |
3482 (frac
>> (frac_size
- precision
+ 1));
3483 uint64_t out_frac
= (uint64_t)(lookup_table
[idx
]) <<
3484 (frac_size
- precision
);
3485 uint64_t out_exp
= (3 * MAKE_64BIT_MASK(0, exp_size
- 1) + ~exp
) / 2;
3488 val
= deposit64(val
, 0, frac_size
, out_frac
);
3489 val
= deposit64(val
, frac_size
, exp_size
, out_exp
);
3490 val
= deposit64(val
, frac_size
+ exp_size
, 1, sign
);
3494 static float16
frsqrt7_h(float16 f
, float_status
*s
)
3496 int exp_size
= 5, frac_size
= 10;
3497 bool sign
= float16_is_neg(f
);
3500 * frsqrt7(sNaN) = canonical NaN
3501 * frsqrt7(-inf) = canonical NaN
3502 * frsqrt7(-normal) = canonical NaN
3503 * frsqrt7(-subnormal) = canonical NaN
3505 if (float16_is_signaling_nan(f
, s
) ||
3506 (float16_is_infinity(f
) && sign
) ||
3507 (float16_is_normal(f
) && sign
) ||
3508 (float16_is_zero_or_denormal(f
) && !float16_is_zero(f
) && sign
)) {
3509 s
->float_exception_flags
|= float_flag_invalid
;
3510 return float16_default_nan(s
);
3513 /* frsqrt7(qNaN) = canonical NaN */
3514 if (float16_is_quiet_nan(f
, s
)) {
3515 return float16_default_nan(s
);
3518 /* frsqrt7(+-0) = +-inf */
3519 if (float16_is_zero(f
)) {
3520 s
->float_exception_flags
|= float_flag_divbyzero
;
3521 return float16_set_sign(float16_infinity
, sign
);
3524 /* frsqrt7(+inf) = +0 */
3525 if (float16_is_infinity(f
) && !sign
) {
3526 return float16_set_sign(float16_zero
, sign
);
3529 /* +normal, +subnormal */
3530 uint64_t val
= frsqrt7(f
, exp_size
, frac_size
);
3531 return make_float16(val
);
3534 static float32
frsqrt7_s(float32 f
, float_status
*s
)
3536 int exp_size
= 8, frac_size
= 23;
3537 bool sign
= float32_is_neg(f
);
3540 * frsqrt7(sNaN) = canonical NaN
3541 * frsqrt7(-inf) = canonical NaN
3542 * frsqrt7(-normal) = canonical NaN
3543 * frsqrt7(-subnormal) = canonical NaN
3545 if (float32_is_signaling_nan(f
, s
) ||
3546 (float32_is_infinity(f
) && sign
) ||
3547 (float32_is_normal(f
) && sign
) ||
3548 (float32_is_zero_or_denormal(f
) && !float32_is_zero(f
) && sign
)) {
3549 s
->float_exception_flags
|= float_flag_invalid
;
3550 return float32_default_nan(s
);
3553 /* frsqrt7(qNaN) = canonical NaN */
3554 if (float32_is_quiet_nan(f
, s
)) {
3555 return float32_default_nan(s
);
3558 /* frsqrt7(+-0) = +-inf */
3559 if (float32_is_zero(f
)) {
3560 s
->float_exception_flags
|= float_flag_divbyzero
;
3561 return float32_set_sign(float32_infinity
, sign
);
3564 /* frsqrt7(+inf) = +0 */
3565 if (float32_is_infinity(f
) && !sign
) {
3566 return float32_set_sign(float32_zero
, sign
);
3569 /* +normal, +subnormal */
3570 uint64_t val
= frsqrt7(f
, exp_size
, frac_size
);
3571 return make_float32(val
);
3574 static float64
frsqrt7_d(float64 f
, float_status
*s
)
3576 int exp_size
= 11, frac_size
= 52;
3577 bool sign
= float64_is_neg(f
);
3580 * frsqrt7(sNaN) = canonical NaN
3581 * frsqrt7(-inf) = canonical NaN
3582 * frsqrt7(-normal) = canonical NaN
3583 * frsqrt7(-subnormal) = canonical NaN
3585 if (float64_is_signaling_nan(f
, s
) ||
3586 (float64_is_infinity(f
) && sign
) ||
3587 (float64_is_normal(f
) && sign
) ||
3588 (float64_is_zero_or_denormal(f
) && !float64_is_zero(f
) && sign
)) {
3589 s
->float_exception_flags
|= float_flag_invalid
;
3590 return float64_default_nan(s
);
3593 /* frsqrt7(qNaN) = canonical NaN */
3594 if (float64_is_quiet_nan(f
, s
)) {
3595 return float64_default_nan(s
);
3598 /* frsqrt7(+-0) = +-inf */
3599 if (float64_is_zero(f
)) {
3600 s
->float_exception_flags
|= float_flag_divbyzero
;
3601 return float64_set_sign(float64_infinity
, sign
);
3604 /* frsqrt7(+inf) = +0 */
3605 if (float64_is_infinity(f
) && !sign
) {
3606 return float64_set_sign(float64_zero
, sign
);
3609 /* +normal, +subnormal */
3610 uint64_t val
= frsqrt7(f
, exp_size
, frac_size
);
3611 return make_float64(val
);
3614 RVVCALL(OPFVV1
, vfrsqrt7_v_h
, OP_UU_H
, H2
, H2
, frsqrt7_h
)
3615 RVVCALL(OPFVV1
, vfrsqrt7_v_w
, OP_UU_W
, H4
, H4
, frsqrt7_s
)
3616 RVVCALL(OPFVV1
, vfrsqrt7_v_d
, OP_UU_D
, H8
, H8
, frsqrt7_d
)
3617 GEN_VEXT_V_ENV(vfrsqrt7_v_h
, 2, 2)
3618 GEN_VEXT_V_ENV(vfrsqrt7_v_w
, 4, 4)
3619 GEN_VEXT_V_ENV(vfrsqrt7_v_d
, 8, 8)
3622 * Vector Floating-Point Reciprocal Estimate Instruction
3624 * Adapted from riscv-v-spec recip.c:
3625 * https://github.com/riscv/riscv-v-spec/blob/master/recip.c
3627 static uint64_t frec7(uint64_t f
, int exp_size
, int frac_size
,
3630 uint64_t sign
= extract64(f
, frac_size
+ exp_size
, 1);
3631 uint64_t exp
= extract64(f
, frac_size
, exp_size
);
3632 uint64_t frac
= extract64(f
, 0, frac_size
);
3634 const uint8_t lookup_table
[] = {
3635 127, 125, 123, 121, 119, 117, 116, 114,
3636 112, 110, 109, 107, 105, 104, 102, 100,
3637 99, 97, 96, 94, 93, 91, 90, 88,
3638 87, 85, 84, 83, 81, 80, 79, 77,
3639 76, 75, 74, 72, 71, 70, 69, 68,
3640 66, 65, 64, 63, 62, 61, 60, 59,
3641 58, 57, 56, 55, 54, 53, 52, 51,
3642 50, 49, 48, 47, 46, 45, 44, 43,
3643 42, 41, 40, 40, 39, 38, 37, 36,
3644 35, 35, 34, 33, 32, 31, 31, 30,
3645 29, 28, 28, 27, 26, 25, 25, 24,
3646 23, 23, 22, 21, 21, 20, 19, 19,
3647 18, 17, 17, 16, 15, 15, 14, 14,
3648 13, 12, 12, 11, 11, 10, 9, 9,
3649 8, 8, 7, 7, 6, 5, 5, 4,
3650 4, 3, 3, 2, 2, 1, 1, 0
3652 const int precision
= 7;
3654 if (exp
== 0 && frac
!= 0) { /* subnormal */
3655 /* Normalize the subnormal. */
3656 while (extract64(frac
, frac_size
- 1, 1) == 0) {
3661 frac
= (frac
<< 1) & MAKE_64BIT_MASK(0, frac_size
);
3663 if (exp
!= 0 && exp
!= UINT64_MAX
) {
3665 * Overflow to inf or max value of same sign,
3666 * depending on sign and rounding mode.
3668 s
->float_exception_flags
|= (float_flag_inexact
|
3669 float_flag_overflow
);
3671 if ((s
->float_rounding_mode
== float_round_to_zero
) ||
3672 ((s
->float_rounding_mode
== float_round_down
) && !sign
) ||
3673 ((s
->float_rounding_mode
== float_round_up
) && sign
)) {
3674 /* Return greatest/negative finite value. */
3675 return (sign
<< (exp_size
+ frac_size
)) |
3676 (MAKE_64BIT_MASK(frac_size
, exp_size
) - 1);
3679 return (sign
<< (exp_size
+ frac_size
)) |
3680 MAKE_64BIT_MASK(frac_size
, exp_size
);
3685 int idx
= frac
>> (frac_size
- precision
);
3686 uint64_t out_frac
= (uint64_t)(lookup_table
[idx
]) <<
3687 (frac_size
- precision
);
3688 uint64_t out_exp
= 2 * MAKE_64BIT_MASK(0, exp_size
- 1) + ~exp
;
3690 if (out_exp
== 0 || out_exp
== UINT64_MAX
) {
3692 * The result is subnormal, but don't raise the underflow exception,
3693 * because there's no additional loss of precision.
3695 out_frac
= (out_frac
>> 1) | MAKE_64BIT_MASK(frac_size
- 1, 1);
3696 if (out_exp
== UINT64_MAX
) {
3703 val
= deposit64(val
, 0, frac_size
, out_frac
);
3704 val
= deposit64(val
, frac_size
, exp_size
, out_exp
);
3705 val
= deposit64(val
, frac_size
+ exp_size
, 1, sign
);
3709 static float16
frec7_h(float16 f
, float_status
*s
)
3711 int exp_size
= 5, frac_size
= 10;
3712 bool sign
= float16_is_neg(f
);
3714 /* frec7(+-inf) = +-0 */
3715 if (float16_is_infinity(f
)) {
3716 return float16_set_sign(float16_zero
, sign
);
3719 /* frec7(+-0) = +-inf */
3720 if (float16_is_zero(f
)) {
3721 s
->float_exception_flags
|= float_flag_divbyzero
;
3722 return float16_set_sign(float16_infinity
, sign
);
3725 /* frec7(sNaN) = canonical NaN */
3726 if (float16_is_signaling_nan(f
, s
)) {
3727 s
->float_exception_flags
|= float_flag_invalid
;
3728 return float16_default_nan(s
);
3731 /* frec7(qNaN) = canonical NaN */
3732 if (float16_is_quiet_nan(f
, s
)) {
3733 return float16_default_nan(s
);
3736 /* +-normal, +-subnormal */
3737 uint64_t val
= frec7(f
, exp_size
, frac_size
, s
);
3738 return make_float16(val
);
3741 static float32
frec7_s(float32 f
, float_status
*s
)
3743 int exp_size
= 8, frac_size
= 23;
3744 bool sign
= float32_is_neg(f
);
3746 /* frec7(+-inf) = +-0 */
3747 if (float32_is_infinity(f
)) {
3748 return float32_set_sign(float32_zero
, sign
);
3751 /* frec7(+-0) = +-inf */
3752 if (float32_is_zero(f
)) {
3753 s
->float_exception_flags
|= float_flag_divbyzero
;
3754 return float32_set_sign(float32_infinity
, sign
);
3757 /* frec7(sNaN) = canonical NaN */
3758 if (float32_is_signaling_nan(f
, s
)) {
3759 s
->float_exception_flags
|= float_flag_invalid
;
3760 return float32_default_nan(s
);
3763 /* frec7(qNaN) = canonical NaN */
3764 if (float32_is_quiet_nan(f
, s
)) {
3765 return float32_default_nan(s
);
3768 /* +-normal, +-subnormal */
3769 uint64_t val
= frec7(f
, exp_size
, frac_size
, s
);
3770 return make_float32(val
);
3773 static float64
frec7_d(float64 f
, float_status
*s
)
3775 int exp_size
= 11, frac_size
= 52;
3776 bool sign
= float64_is_neg(f
);
3778 /* frec7(+-inf) = +-0 */
3779 if (float64_is_infinity(f
)) {
3780 return float64_set_sign(float64_zero
, sign
);
3783 /* frec7(+-0) = +-inf */
3784 if (float64_is_zero(f
)) {
3785 s
->float_exception_flags
|= float_flag_divbyzero
;
3786 return float64_set_sign(float64_infinity
, sign
);
3789 /* frec7(sNaN) = canonical NaN */
3790 if (float64_is_signaling_nan(f
, s
)) {
3791 s
->float_exception_flags
|= float_flag_invalid
;
3792 return float64_default_nan(s
);
3795 /* frec7(qNaN) = canonical NaN */
3796 if (float64_is_quiet_nan(f
, s
)) {
3797 return float64_default_nan(s
);
3800 /* +-normal, +-subnormal */
3801 uint64_t val
= frec7(f
, exp_size
, frac_size
, s
);
3802 return make_float64(val
);
3805 RVVCALL(OPFVV1
, vfrec7_v_h
, OP_UU_H
, H2
, H2
, frec7_h
)
3806 RVVCALL(OPFVV1
, vfrec7_v_w
, OP_UU_W
, H4
, H4
, frec7_s
)
3807 RVVCALL(OPFVV1
, vfrec7_v_d
, OP_UU_D
, H8
, H8
, frec7_d
)
3808 GEN_VEXT_V_ENV(vfrec7_v_h
, 2, 2)
3809 GEN_VEXT_V_ENV(vfrec7_v_w
, 4, 4)
3810 GEN_VEXT_V_ENV(vfrec7_v_d
, 8, 8)
3812 /* Vector Floating-Point MIN/MAX Instructions */
3813 RVVCALL(OPFVV2
, vfmin_vv_h
, OP_UUU_H
, H2
, H2
, H2
, float16_minimum_number
)
3814 RVVCALL(OPFVV2
, vfmin_vv_w
, OP_UUU_W
, H4
, H4
, H4
, float32_minimum_number
)
3815 RVVCALL(OPFVV2
, vfmin_vv_d
, OP_UUU_D
, H8
, H8
, H8
, float64_minimum_number
)
3816 GEN_VEXT_VV_ENV(vfmin_vv_h
, 2, 2)
3817 GEN_VEXT_VV_ENV(vfmin_vv_w
, 4, 4)
3818 GEN_VEXT_VV_ENV(vfmin_vv_d
, 8, 8)
3819 RVVCALL(OPFVF2
, vfmin_vf_h
, OP_UUU_H
, H2
, H2
, float16_minimum_number
)
3820 RVVCALL(OPFVF2
, vfmin_vf_w
, OP_UUU_W
, H4
, H4
, float32_minimum_number
)
3821 RVVCALL(OPFVF2
, vfmin_vf_d
, OP_UUU_D
, H8
, H8
, float64_minimum_number
)
3822 GEN_VEXT_VF(vfmin_vf_h
, 2, 2)
3823 GEN_VEXT_VF(vfmin_vf_w
, 4, 4)
3824 GEN_VEXT_VF(vfmin_vf_d
, 8, 8)
3826 RVVCALL(OPFVV2
, vfmax_vv_h
, OP_UUU_H
, H2
, H2
, H2
, float16_maximum_number
)
3827 RVVCALL(OPFVV2
, vfmax_vv_w
, OP_UUU_W
, H4
, H4
, H4
, float32_maximum_number
)
3828 RVVCALL(OPFVV2
, vfmax_vv_d
, OP_UUU_D
, H8
, H8
, H8
, float64_maximum_number
)
3829 GEN_VEXT_VV_ENV(vfmax_vv_h
, 2, 2)
3830 GEN_VEXT_VV_ENV(vfmax_vv_w
, 4, 4)
3831 GEN_VEXT_VV_ENV(vfmax_vv_d
, 8, 8)
3832 RVVCALL(OPFVF2
, vfmax_vf_h
, OP_UUU_H
, H2
, H2
, float16_maximum_number
)
3833 RVVCALL(OPFVF2
, vfmax_vf_w
, OP_UUU_W
, H4
, H4
, float32_maximum_number
)
3834 RVVCALL(OPFVF2
, vfmax_vf_d
, OP_UUU_D
, H8
, H8
, float64_maximum_number
)
3835 GEN_VEXT_VF(vfmax_vf_h
, 2, 2)
3836 GEN_VEXT_VF(vfmax_vf_w
, 4, 4)
3837 GEN_VEXT_VF(vfmax_vf_d
, 8, 8)
3839 /* Vector Floating-Point Sign-Injection Instructions */
3840 static uint16_t fsgnj16(uint16_t a
, uint16_t b
, float_status
*s
)
3842 return deposit64(b
, 0, 15, a
);
3845 static uint32_t fsgnj32(uint32_t a
, uint32_t b
, float_status
*s
)
3847 return deposit64(b
, 0, 31, a
);
3850 static uint64_t fsgnj64(uint64_t a
, uint64_t b
, float_status
*s
)
3852 return deposit64(b
, 0, 63, a
);
3855 RVVCALL(OPFVV2
, vfsgnj_vv_h
, OP_UUU_H
, H2
, H2
, H2
, fsgnj16
)
3856 RVVCALL(OPFVV2
, vfsgnj_vv_w
, OP_UUU_W
, H4
, H4
, H4
, fsgnj32
)
3857 RVVCALL(OPFVV2
, vfsgnj_vv_d
, OP_UUU_D
, H8
, H8
, H8
, fsgnj64
)
3858 GEN_VEXT_VV_ENV(vfsgnj_vv_h
, 2, 2)
3859 GEN_VEXT_VV_ENV(vfsgnj_vv_w
, 4, 4)
3860 GEN_VEXT_VV_ENV(vfsgnj_vv_d
, 8, 8)
3861 RVVCALL(OPFVF2
, vfsgnj_vf_h
, OP_UUU_H
, H2
, H2
, fsgnj16
)
3862 RVVCALL(OPFVF2
, vfsgnj_vf_w
, OP_UUU_W
, H4
, H4
, fsgnj32
)
3863 RVVCALL(OPFVF2
, vfsgnj_vf_d
, OP_UUU_D
, H8
, H8
, fsgnj64
)
3864 GEN_VEXT_VF(vfsgnj_vf_h
, 2, 2)
3865 GEN_VEXT_VF(vfsgnj_vf_w
, 4, 4)
3866 GEN_VEXT_VF(vfsgnj_vf_d
, 8, 8)
3868 static uint16_t fsgnjn16(uint16_t a
, uint16_t b
, float_status
*s
)
3870 return deposit64(~b
, 0, 15, a
);
3873 static uint32_t fsgnjn32(uint32_t a
, uint32_t b
, float_status
*s
)
3875 return deposit64(~b
, 0, 31, a
);
3878 static uint64_t fsgnjn64(uint64_t a
, uint64_t b
, float_status
*s
)
3880 return deposit64(~b
, 0, 63, a
);
3883 RVVCALL(OPFVV2
, vfsgnjn_vv_h
, OP_UUU_H
, H2
, H2
, H2
, fsgnjn16
)
3884 RVVCALL(OPFVV2
, vfsgnjn_vv_w
, OP_UUU_W
, H4
, H4
, H4
, fsgnjn32
)
3885 RVVCALL(OPFVV2
, vfsgnjn_vv_d
, OP_UUU_D
, H8
, H8
, H8
, fsgnjn64
)
3886 GEN_VEXT_VV_ENV(vfsgnjn_vv_h
, 2, 2)
3887 GEN_VEXT_VV_ENV(vfsgnjn_vv_w
, 4, 4)
3888 GEN_VEXT_VV_ENV(vfsgnjn_vv_d
, 8, 8)
3889 RVVCALL(OPFVF2
, vfsgnjn_vf_h
, OP_UUU_H
, H2
, H2
, fsgnjn16
)
3890 RVVCALL(OPFVF2
, vfsgnjn_vf_w
, OP_UUU_W
, H4
, H4
, fsgnjn32
)
3891 RVVCALL(OPFVF2
, vfsgnjn_vf_d
, OP_UUU_D
, H8
, H8
, fsgnjn64
)
3892 GEN_VEXT_VF(vfsgnjn_vf_h
, 2, 2)
3893 GEN_VEXT_VF(vfsgnjn_vf_w
, 4, 4)
3894 GEN_VEXT_VF(vfsgnjn_vf_d
, 8, 8)
3896 static uint16_t fsgnjx16(uint16_t a
, uint16_t b
, float_status
*s
)
3898 return deposit64(b
^ a
, 0, 15, a
);
3901 static uint32_t fsgnjx32(uint32_t a
, uint32_t b
, float_status
*s
)
3903 return deposit64(b
^ a
, 0, 31, a
);
3906 static uint64_t fsgnjx64(uint64_t a
, uint64_t b
, float_status
*s
)
3908 return deposit64(b
^ a
, 0, 63, a
);
3911 RVVCALL(OPFVV2
, vfsgnjx_vv_h
, OP_UUU_H
, H2
, H2
, H2
, fsgnjx16
)
3912 RVVCALL(OPFVV2
, vfsgnjx_vv_w
, OP_UUU_W
, H4
, H4
, H4
, fsgnjx32
)
3913 RVVCALL(OPFVV2
, vfsgnjx_vv_d
, OP_UUU_D
, H8
, H8
, H8
, fsgnjx64
)
3914 GEN_VEXT_VV_ENV(vfsgnjx_vv_h
, 2, 2)
3915 GEN_VEXT_VV_ENV(vfsgnjx_vv_w
, 4, 4)
3916 GEN_VEXT_VV_ENV(vfsgnjx_vv_d
, 8, 8)
3917 RVVCALL(OPFVF2
, vfsgnjx_vf_h
, OP_UUU_H
, H2
, H2
, fsgnjx16
)
3918 RVVCALL(OPFVF2
, vfsgnjx_vf_w
, OP_UUU_W
, H4
, H4
, fsgnjx32
)
3919 RVVCALL(OPFVF2
, vfsgnjx_vf_d
, OP_UUU_D
, H8
, H8
, fsgnjx64
)
3920 GEN_VEXT_VF(vfsgnjx_vf_h
, 2, 2)
3921 GEN_VEXT_VF(vfsgnjx_vf_w
, 4, 4)
3922 GEN_VEXT_VF(vfsgnjx_vf_d
, 8, 8)
3924 /* Vector Floating-Point Compare Instructions */
3925 #define GEN_VEXT_CMP_VV_ENV(NAME, ETYPE, H, DO_OP) \
3926 void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
3927 CPURISCVState *env, uint32_t desc) \
3929 uint32_t vm = vext_vm(desc); \
3930 uint32_t vl = env->vl; \
3933 for (i = env->vstart; i < vl; i++) { \
3934 ETYPE s1 = *((ETYPE *)vs1 + H(i)); \
3935 ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
3936 if (!vm && !vext_elem_mask(v0, i)) { \
3939 vext_set_elem_mask(vd, i, \
3940 DO_OP(s2, s1, &env->fp_status)); \
3945 GEN_VEXT_CMP_VV_ENV(vmfeq_vv_h
, uint16_t, H2
, float16_eq_quiet
)
3946 GEN_VEXT_CMP_VV_ENV(vmfeq_vv_w
, uint32_t, H4
, float32_eq_quiet
)
3947 GEN_VEXT_CMP_VV_ENV(vmfeq_vv_d
, uint64_t, H8
, float64_eq_quiet
)
3949 #define GEN_VEXT_CMP_VF(NAME, ETYPE, H, DO_OP) \
3950 void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \
3951 CPURISCVState *env, uint32_t desc) \
3953 uint32_t vm = vext_vm(desc); \
3954 uint32_t vl = env->vl; \
3957 for (i = env->vstart; i < vl; i++) { \
3958 ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
3959 if (!vm && !vext_elem_mask(v0, i)) { \
3962 vext_set_elem_mask(vd, i, \
3963 DO_OP(s2, (ETYPE)s1, &env->fp_status)); \
3968 GEN_VEXT_CMP_VF(vmfeq_vf_h
, uint16_t, H2
, float16_eq_quiet
)
3969 GEN_VEXT_CMP_VF(vmfeq_vf_w
, uint32_t, H4
, float32_eq_quiet
)
3970 GEN_VEXT_CMP_VF(vmfeq_vf_d
, uint64_t, H8
, float64_eq_quiet
)
3972 static bool vmfne16(uint16_t a
, uint16_t b
, float_status
*s
)
3974 FloatRelation compare
= float16_compare_quiet(a
, b
, s
);
3975 return compare
!= float_relation_equal
;
3978 static bool vmfne32(uint32_t a
, uint32_t b
, float_status
*s
)
3980 FloatRelation compare
= float32_compare_quiet(a
, b
, s
);
3981 return compare
!= float_relation_equal
;
3984 static bool vmfne64(uint64_t a
, uint64_t b
, float_status
*s
)
3986 FloatRelation compare
= float64_compare_quiet(a
, b
, s
);
3987 return compare
!= float_relation_equal
;
3990 GEN_VEXT_CMP_VV_ENV(vmfne_vv_h
, uint16_t, H2
, vmfne16
)
3991 GEN_VEXT_CMP_VV_ENV(vmfne_vv_w
, uint32_t, H4
, vmfne32
)
3992 GEN_VEXT_CMP_VV_ENV(vmfne_vv_d
, uint64_t, H8
, vmfne64
)
3993 GEN_VEXT_CMP_VF(vmfne_vf_h
, uint16_t, H2
, vmfne16
)
3994 GEN_VEXT_CMP_VF(vmfne_vf_w
, uint32_t, H4
, vmfne32
)
3995 GEN_VEXT_CMP_VF(vmfne_vf_d
, uint64_t, H8
, vmfne64
)
3997 GEN_VEXT_CMP_VV_ENV(vmflt_vv_h
, uint16_t, H2
, float16_lt
)
3998 GEN_VEXT_CMP_VV_ENV(vmflt_vv_w
, uint32_t, H4
, float32_lt
)
3999 GEN_VEXT_CMP_VV_ENV(vmflt_vv_d
, uint64_t, H8
, float64_lt
)
4000 GEN_VEXT_CMP_VF(vmflt_vf_h
, uint16_t, H2
, float16_lt
)
4001 GEN_VEXT_CMP_VF(vmflt_vf_w
, uint32_t, H4
, float32_lt
)
4002 GEN_VEXT_CMP_VF(vmflt_vf_d
, uint64_t, H8
, float64_lt
)
4004 GEN_VEXT_CMP_VV_ENV(vmfle_vv_h
, uint16_t, H2
, float16_le
)
4005 GEN_VEXT_CMP_VV_ENV(vmfle_vv_w
, uint32_t, H4
, float32_le
)
4006 GEN_VEXT_CMP_VV_ENV(vmfle_vv_d
, uint64_t, H8
, float64_le
)
4007 GEN_VEXT_CMP_VF(vmfle_vf_h
, uint16_t, H2
, float16_le
)
4008 GEN_VEXT_CMP_VF(vmfle_vf_w
, uint32_t, H4
, float32_le
)
4009 GEN_VEXT_CMP_VF(vmfle_vf_d
, uint64_t, H8
, float64_le
)
4011 static bool vmfgt16(uint16_t a
, uint16_t b
, float_status
*s
)
4013 FloatRelation compare
= float16_compare(a
, b
, s
);
4014 return compare
== float_relation_greater
;
4017 static bool vmfgt32(uint32_t a
, uint32_t b
, float_status
*s
)
4019 FloatRelation compare
= float32_compare(a
, b
, s
);
4020 return compare
== float_relation_greater
;
4023 static bool vmfgt64(uint64_t a
, uint64_t b
, float_status
*s
)
4025 FloatRelation compare
= float64_compare(a
, b
, s
);
4026 return compare
== float_relation_greater
;
4029 GEN_VEXT_CMP_VF(vmfgt_vf_h
, uint16_t, H2
, vmfgt16
)
4030 GEN_VEXT_CMP_VF(vmfgt_vf_w
, uint32_t, H4
, vmfgt32
)
4031 GEN_VEXT_CMP_VF(vmfgt_vf_d
, uint64_t, H8
, vmfgt64
)
4033 static bool vmfge16(uint16_t a
, uint16_t b
, float_status
*s
)
4035 FloatRelation compare
= float16_compare(a
, b
, s
);
4036 return compare
== float_relation_greater
||
4037 compare
== float_relation_equal
;
4040 static bool vmfge32(uint32_t a
, uint32_t b
, float_status
*s
)
4042 FloatRelation compare
= float32_compare(a
, b
, s
);
4043 return compare
== float_relation_greater
||
4044 compare
== float_relation_equal
;
4047 static bool vmfge64(uint64_t a
, uint64_t b
, float_status
*s
)
4049 FloatRelation compare
= float64_compare(a
, b
, s
);
4050 return compare
== float_relation_greater
||
4051 compare
== float_relation_equal
;
4054 GEN_VEXT_CMP_VF(vmfge_vf_h
, uint16_t, H2
, vmfge16
)
4055 GEN_VEXT_CMP_VF(vmfge_vf_w
, uint32_t, H4
, vmfge32
)
4056 GEN_VEXT_CMP_VF(vmfge_vf_d
, uint64_t, H8
, vmfge64
)
4058 /* Vector Floating-Point Classify Instruction */
4059 #define OPIVV1(NAME, TD, T2, TX2, HD, HS2, OP) \
4060 static void do_##NAME(void *vd, void *vs2, int i) \
4062 TX2 s2 = *((T2 *)vs2 + HS2(i)); \
4063 *((TD *)vd + HD(i)) = OP(s2); \
4066 #define GEN_VEXT_V(NAME, ESZ, DSZ) \
4067 void HELPER(NAME)(void *vd, void *v0, void *vs2, \
4068 CPURISCVState *env, uint32_t desc) \
4070 uint32_t vm = vext_vm(desc); \
4071 uint32_t vl = env->vl; \
4074 for (i = env->vstart; i < vl; i++) { \
4075 if (!vm && !vext_elem_mask(v0, i)) { \
4078 do_##NAME(vd, vs2, i); \
4083 target_ulong
fclass_h(uint64_t frs1
)
4086 bool sign
= float16_is_neg(f
);
4088 if (float16_is_infinity(f
)) {
4089 return sign
? 1 << 0 : 1 << 7;
4090 } else if (float16_is_zero(f
)) {
4091 return sign
? 1 << 3 : 1 << 4;
4092 } else if (float16_is_zero_or_denormal(f
)) {
4093 return sign
? 1 << 2 : 1 << 5;
4094 } else if (float16_is_any_nan(f
)) {
4095 float_status s
= { }; /* for snan_bit_is_one */
4096 return float16_is_quiet_nan(f
, &s
) ? 1 << 9 : 1 << 8;
4098 return sign
? 1 << 1 : 1 << 6;
4102 target_ulong
fclass_s(uint64_t frs1
)
4105 bool sign
= float32_is_neg(f
);
4107 if (float32_is_infinity(f
)) {
4108 return sign
? 1 << 0 : 1 << 7;
4109 } else if (float32_is_zero(f
)) {
4110 return sign
? 1 << 3 : 1 << 4;
4111 } else if (float32_is_zero_or_denormal(f
)) {
4112 return sign
? 1 << 2 : 1 << 5;
4113 } else if (float32_is_any_nan(f
)) {
4114 float_status s
= { }; /* for snan_bit_is_one */
4115 return float32_is_quiet_nan(f
, &s
) ? 1 << 9 : 1 << 8;
4117 return sign
? 1 << 1 : 1 << 6;
4121 target_ulong
fclass_d(uint64_t frs1
)
4124 bool sign
= float64_is_neg(f
);
4126 if (float64_is_infinity(f
)) {
4127 return sign
? 1 << 0 : 1 << 7;
4128 } else if (float64_is_zero(f
)) {
4129 return sign
? 1 << 3 : 1 << 4;
4130 } else if (float64_is_zero_or_denormal(f
)) {
4131 return sign
? 1 << 2 : 1 << 5;
4132 } else if (float64_is_any_nan(f
)) {
4133 float_status s
= { }; /* for snan_bit_is_one */
4134 return float64_is_quiet_nan(f
, &s
) ? 1 << 9 : 1 << 8;
4136 return sign
? 1 << 1 : 1 << 6;
4140 RVVCALL(OPIVV1
, vfclass_v_h
, OP_UU_H
, H2
, H2
, fclass_h
)
4141 RVVCALL(OPIVV1
, vfclass_v_w
, OP_UU_W
, H4
, H4
, fclass_s
)
4142 RVVCALL(OPIVV1
, vfclass_v_d
, OP_UU_D
, H8
, H8
, fclass_d
)
4143 GEN_VEXT_V(vfclass_v_h
, 2, 2)
4144 GEN_VEXT_V(vfclass_v_w
, 4, 4)
4145 GEN_VEXT_V(vfclass_v_d
, 8, 8)
4147 /* Vector Floating-Point Merge Instruction */
4148 #define GEN_VFMERGE_VF(NAME, ETYPE, H) \
4149 void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \
4150 CPURISCVState *env, uint32_t desc) \
4152 uint32_t vm = vext_vm(desc); \
4153 uint32_t vl = env->vl; \
4156 for (i = env->vstart; i < vl; i++) { \
4157 ETYPE s2 = *((ETYPE *)vs2 + H(i)); \
4158 *((ETYPE *)vd + H(i)) \
4159 = (!vm && !vext_elem_mask(v0, i) ? s2 : s1); \
4164 GEN_VFMERGE_VF(vfmerge_vfm_h
, int16_t, H2
)
4165 GEN_VFMERGE_VF(vfmerge_vfm_w
, int32_t, H4
)
4166 GEN_VFMERGE_VF(vfmerge_vfm_d
, int64_t, H8
)
4168 /* Single-Width Floating-Point/Integer Type-Convert Instructions */
4169 /* vfcvt.xu.f.v vd, vs2, vm # Convert float to unsigned integer. */
4170 RVVCALL(OPFVV1
, vfcvt_xu_f_v_h
, OP_UU_H
, H2
, H2
, float16_to_uint16
)
4171 RVVCALL(OPFVV1
, vfcvt_xu_f_v_w
, OP_UU_W
, H4
, H4
, float32_to_uint32
)
4172 RVVCALL(OPFVV1
, vfcvt_xu_f_v_d
, OP_UU_D
, H8
, H8
, float64_to_uint64
)
4173 GEN_VEXT_V_ENV(vfcvt_xu_f_v_h
, 2, 2)
4174 GEN_VEXT_V_ENV(vfcvt_xu_f_v_w
, 4, 4)
4175 GEN_VEXT_V_ENV(vfcvt_xu_f_v_d
, 8, 8)
4177 /* vfcvt.x.f.v vd, vs2, vm # Convert float to signed integer. */
4178 RVVCALL(OPFVV1
, vfcvt_x_f_v_h
, OP_UU_H
, H2
, H2
, float16_to_int16
)
4179 RVVCALL(OPFVV1
, vfcvt_x_f_v_w
, OP_UU_W
, H4
, H4
, float32_to_int32
)
4180 RVVCALL(OPFVV1
, vfcvt_x_f_v_d
, OP_UU_D
, H8
, H8
, float64_to_int64
)
4181 GEN_VEXT_V_ENV(vfcvt_x_f_v_h
, 2, 2)
4182 GEN_VEXT_V_ENV(vfcvt_x_f_v_w
, 4, 4)
4183 GEN_VEXT_V_ENV(vfcvt_x_f_v_d
, 8, 8)
4185 /* vfcvt.f.xu.v vd, vs2, vm # Convert unsigned integer to float. */
4186 RVVCALL(OPFVV1
, vfcvt_f_xu_v_h
, OP_UU_H
, H2
, H2
, uint16_to_float16
)
4187 RVVCALL(OPFVV1
, vfcvt_f_xu_v_w
, OP_UU_W
, H4
, H4
, uint32_to_float32
)
4188 RVVCALL(OPFVV1
, vfcvt_f_xu_v_d
, OP_UU_D
, H8
, H8
, uint64_to_float64
)
4189 GEN_VEXT_V_ENV(vfcvt_f_xu_v_h
, 2, 2)
4190 GEN_VEXT_V_ENV(vfcvt_f_xu_v_w
, 4, 4)
4191 GEN_VEXT_V_ENV(vfcvt_f_xu_v_d
, 8, 8)
4193 /* vfcvt.f.x.v vd, vs2, vm # Convert integer to float. */
4194 RVVCALL(OPFVV1
, vfcvt_f_x_v_h
, OP_UU_H
, H2
, H2
, int16_to_float16
)
4195 RVVCALL(OPFVV1
, vfcvt_f_x_v_w
, OP_UU_W
, H4
, H4
, int32_to_float32
)
4196 RVVCALL(OPFVV1
, vfcvt_f_x_v_d
, OP_UU_D
, H8
, H8
, int64_to_float64
)
4197 GEN_VEXT_V_ENV(vfcvt_f_x_v_h
, 2, 2)
4198 GEN_VEXT_V_ENV(vfcvt_f_x_v_w
, 4, 4)
4199 GEN_VEXT_V_ENV(vfcvt_f_x_v_d
, 8, 8)
4201 /* Widening Floating-Point/Integer Type-Convert Instructions */
4203 #define WOP_UU_B uint16_t, uint8_t, uint8_t
4204 #define WOP_UU_H uint32_t, uint16_t, uint16_t
4205 #define WOP_UU_W uint64_t, uint32_t, uint32_t
4206 /* vfwcvt.xu.f.v vd, vs2, vm # Convert float to double-width unsigned integer.*/
4207 RVVCALL(OPFVV1
, vfwcvt_xu_f_v_h
, WOP_UU_H
, H4
, H2
, float16_to_uint32
)
4208 RVVCALL(OPFVV1
, vfwcvt_xu_f_v_w
, WOP_UU_W
, H8
, H4
, float32_to_uint64
)
4209 GEN_VEXT_V_ENV(vfwcvt_xu_f_v_h
, 2, 4)
4210 GEN_VEXT_V_ENV(vfwcvt_xu_f_v_w
, 4, 8)
4212 /* vfwcvt.x.f.v vd, vs2, vm # Convert float to double-width signed integer. */
4213 RVVCALL(OPFVV1
, vfwcvt_x_f_v_h
, WOP_UU_H
, H4
, H2
, float16_to_int32
)
4214 RVVCALL(OPFVV1
, vfwcvt_x_f_v_w
, WOP_UU_W
, H8
, H4
, float32_to_int64
)
4215 GEN_VEXT_V_ENV(vfwcvt_x_f_v_h
, 2, 4)
4216 GEN_VEXT_V_ENV(vfwcvt_x_f_v_w
, 4, 8)
4218 /* vfwcvt.f.xu.v vd, vs2, vm # Convert unsigned integer to double-width float */
4219 RVVCALL(OPFVV1
, vfwcvt_f_xu_v_b
, WOP_UU_B
, H2
, H1
, uint8_to_float16
)
4220 RVVCALL(OPFVV1
, vfwcvt_f_xu_v_h
, WOP_UU_H
, H4
, H2
, uint16_to_float32
)
4221 RVVCALL(OPFVV1
, vfwcvt_f_xu_v_w
, WOP_UU_W
, H8
, H4
, uint32_to_float64
)
4222 GEN_VEXT_V_ENV(vfwcvt_f_xu_v_b
, 1, 2)
4223 GEN_VEXT_V_ENV(vfwcvt_f_xu_v_h
, 2, 4)
4224 GEN_VEXT_V_ENV(vfwcvt_f_xu_v_w
, 4, 8)
4226 /* vfwcvt.f.x.v vd, vs2, vm # Convert integer to double-width float. */
4227 RVVCALL(OPFVV1
, vfwcvt_f_x_v_b
, WOP_UU_B
, H2
, H1
, int8_to_float16
)
4228 RVVCALL(OPFVV1
, vfwcvt_f_x_v_h
, WOP_UU_H
, H4
, H2
, int16_to_float32
)
4229 RVVCALL(OPFVV1
, vfwcvt_f_x_v_w
, WOP_UU_W
, H8
, H4
, int32_to_float64
)
4230 GEN_VEXT_V_ENV(vfwcvt_f_x_v_b
, 1, 2)
4231 GEN_VEXT_V_ENV(vfwcvt_f_x_v_h
, 2, 4)
4232 GEN_VEXT_V_ENV(vfwcvt_f_x_v_w
, 4, 8)
4235 * vfwcvt.f.f.v vd, vs2, vm
4236 * Convert single-width float to double-width float.
4238 static uint32_t vfwcvtffv16(uint16_t a
, float_status
*s
)
4240 return float16_to_float32(a
, true, s
);
4243 RVVCALL(OPFVV1
, vfwcvt_f_f_v_h
, WOP_UU_H
, H4
, H2
, vfwcvtffv16
)
4244 RVVCALL(OPFVV1
, vfwcvt_f_f_v_w
, WOP_UU_W
, H8
, H4
, float32_to_float64
)
4245 GEN_VEXT_V_ENV(vfwcvt_f_f_v_h
, 2, 4)
4246 GEN_VEXT_V_ENV(vfwcvt_f_f_v_w
, 4, 8)
4248 /* Narrowing Floating-Point/Integer Type-Convert Instructions */
4250 #define NOP_UU_B uint8_t, uint16_t, uint32_t
4251 #define NOP_UU_H uint16_t, uint32_t, uint32_t
4252 #define NOP_UU_W uint32_t, uint64_t, uint64_t
4253 /* vfncvt.xu.f.v vd, vs2, vm # Convert float to unsigned integer. */
4254 RVVCALL(OPFVV1
, vfncvt_xu_f_w_b
, NOP_UU_B
, H1
, H2
, float16_to_uint8
)
4255 RVVCALL(OPFVV1
, vfncvt_xu_f_w_h
, NOP_UU_H
, H2
, H4
, float32_to_uint16
)
4256 RVVCALL(OPFVV1
, vfncvt_xu_f_w_w
, NOP_UU_W
, H4
, H8
, float64_to_uint32
)
4257 GEN_VEXT_V_ENV(vfncvt_xu_f_w_b
, 1, 1)
4258 GEN_VEXT_V_ENV(vfncvt_xu_f_w_h
, 2, 2)
4259 GEN_VEXT_V_ENV(vfncvt_xu_f_w_w
, 4, 4)
4261 /* vfncvt.x.f.v vd, vs2, vm # Convert double-width float to signed integer. */
4262 RVVCALL(OPFVV1
, vfncvt_x_f_w_b
, NOP_UU_B
, H1
, H2
, float16_to_int8
)
4263 RVVCALL(OPFVV1
, vfncvt_x_f_w_h
, NOP_UU_H
, H2
, H4
, float32_to_int16
)
4264 RVVCALL(OPFVV1
, vfncvt_x_f_w_w
, NOP_UU_W
, H4
, H8
, float64_to_int32
)
4265 GEN_VEXT_V_ENV(vfncvt_x_f_w_b
, 1, 1)
4266 GEN_VEXT_V_ENV(vfncvt_x_f_w_h
, 2, 2)
4267 GEN_VEXT_V_ENV(vfncvt_x_f_w_w
, 4, 4)
4269 /* vfncvt.f.xu.v vd, vs2, vm # Convert double-width unsigned integer to float */
4270 RVVCALL(OPFVV1
, vfncvt_f_xu_w_h
, NOP_UU_H
, H2
, H4
, uint32_to_float16
)
4271 RVVCALL(OPFVV1
, vfncvt_f_xu_w_w
, NOP_UU_W
, H4
, H8
, uint64_to_float32
)
4272 GEN_VEXT_V_ENV(vfncvt_f_xu_w_h
, 2, 2)
4273 GEN_VEXT_V_ENV(vfncvt_f_xu_w_w
, 4, 4)
4275 /* vfncvt.f.x.v vd, vs2, vm # Convert double-width integer to float. */
4276 RVVCALL(OPFVV1
, vfncvt_f_x_w_h
, NOP_UU_H
, H2
, H4
, int32_to_float16
)
4277 RVVCALL(OPFVV1
, vfncvt_f_x_w_w
, NOP_UU_W
, H4
, H8
, int64_to_float32
)
4278 GEN_VEXT_V_ENV(vfncvt_f_x_w_h
, 2, 2)
4279 GEN_VEXT_V_ENV(vfncvt_f_x_w_w
, 4, 4)
4281 /* vfncvt.f.f.v vd, vs2, vm # Convert double float to single-width float. */
4282 static uint16_t vfncvtffv16(uint32_t a
, float_status
*s
)
4284 return float32_to_float16(a
, true, s
);
4287 RVVCALL(OPFVV1
, vfncvt_f_f_w_h
, NOP_UU_H
, H2
, H4
, vfncvtffv16
)
4288 RVVCALL(OPFVV1
, vfncvt_f_f_w_w
, NOP_UU_W
, H4
, H8
, float64_to_float32
)
4289 GEN_VEXT_V_ENV(vfncvt_f_f_w_h
, 2, 2)
4290 GEN_VEXT_V_ENV(vfncvt_f_f_w_w
, 4, 4)
4293 *** Vector Reduction Operations
4295 /* Vector Single-Width Integer Reduction Instructions */
4296 #define GEN_VEXT_RED(NAME, TD, TS2, HD, HS2, OP) \
4297 void HELPER(NAME)(void *vd, void *v0, void *vs1, \
4298 void *vs2, CPURISCVState *env, uint32_t desc) \
4300 uint32_t vm = vext_vm(desc); \
4301 uint32_t vl = env->vl; \
4303 TD s1 = *((TD *)vs1 + HD(0)); \
4305 for (i = env->vstart; i < vl; i++) { \
4306 TS2 s2 = *((TS2 *)vs2 + HS2(i)); \
4307 if (!vm && !vext_elem_mask(v0, i)) { \
4310 s1 = OP(s1, (TD)s2); \
4312 *((TD *)vd + HD(0)) = s1; \
4316 /* vd[0] = sum(vs1[0], vs2[*]) */
4317 GEN_VEXT_RED(vredsum_vs_b
, int8_t, int8_t, H1
, H1
, DO_ADD
)
4318 GEN_VEXT_RED(vredsum_vs_h
, int16_t, int16_t, H2
, H2
, DO_ADD
)
4319 GEN_VEXT_RED(vredsum_vs_w
, int32_t, int32_t, H4
, H4
, DO_ADD
)
4320 GEN_VEXT_RED(vredsum_vs_d
, int64_t, int64_t, H8
, H8
, DO_ADD
)
4322 /* vd[0] = maxu(vs1[0], vs2[*]) */
4323 GEN_VEXT_RED(vredmaxu_vs_b
, uint8_t, uint8_t, H1
, H1
, DO_MAX
)
4324 GEN_VEXT_RED(vredmaxu_vs_h
, uint16_t, uint16_t, H2
, H2
, DO_MAX
)
4325 GEN_VEXT_RED(vredmaxu_vs_w
, uint32_t, uint32_t, H4
, H4
, DO_MAX
)
4326 GEN_VEXT_RED(vredmaxu_vs_d
, uint64_t, uint64_t, H8
, H8
, DO_MAX
)
4328 /* vd[0] = max(vs1[0], vs2[*]) */
4329 GEN_VEXT_RED(vredmax_vs_b
, int8_t, int8_t, H1
, H1
, DO_MAX
)
4330 GEN_VEXT_RED(vredmax_vs_h
, int16_t, int16_t, H2
, H2
, DO_MAX
)
4331 GEN_VEXT_RED(vredmax_vs_w
, int32_t, int32_t, H4
, H4
, DO_MAX
)
4332 GEN_VEXT_RED(vredmax_vs_d
, int64_t, int64_t, H8
, H8
, DO_MAX
)
4334 /* vd[0] = minu(vs1[0], vs2[*]) */
4335 GEN_VEXT_RED(vredminu_vs_b
, uint8_t, uint8_t, H1
, H1
, DO_MIN
)
4336 GEN_VEXT_RED(vredminu_vs_h
, uint16_t, uint16_t, H2
, H2
, DO_MIN
)
4337 GEN_VEXT_RED(vredminu_vs_w
, uint32_t, uint32_t, H4
, H4
, DO_MIN
)
4338 GEN_VEXT_RED(vredminu_vs_d
, uint64_t, uint64_t, H8
, H8
, DO_MIN
)
4340 /* vd[0] = min(vs1[0], vs2[*]) */
4341 GEN_VEXT_RED(vredmin_vs_b
, int8_t, int8_t, H1
, H1
, DO_MIN
)
4342 GEN_VEXT_RED(vredmin_vs_h
, int16_t, int16_t, H2
, H2
, DO_MIN
)
4343 GEN_VEXT_RED(vredmin_vs_w
, int32_t, int32_t, H4
, H4
, DO_MIN
)
4344 GEN_VEXT_RED(vredmin_vs_d
, int64_t, int64_t, H8
, H8
, DO_MIN
)
4346 /* vd[0] = and(vs1[0], vs2[*]) */
4347 GEN_VEXT_RED(vredand_vs_b
, int8_t, int8_t, H1
, H1
, DO_AND
)
4348 GEN_VEXT_RED(vredand_vs_h
, int16_t, int16_t, H2
, H2
, DO_AND
)
4349 GEN_VEXT_RED(vredand_vs_w
, int32_t, int32_t, H4
, H4
, DO_AND
)
4350 GEN_VEXT_RED(vredand_vs_d
, int64_t, int64_t, H8
, H8
, DO_AND
)
4352 /* vd[0] = or(vs1[0], vs2[*]) */
4353 GEN_VEXT_RED(vredor_vs_b
, int8_t, int8_t, H1
, H1
, DO_OR
)
4354 GEN_VEXT_RED(vredor_vs_h
, int16_t, int16_t, H2
, H2
, DO_OR
)
4355 GEN_VEXT_RED(vredor_vs_w
, int32_t, int32_t, H4
, H4
, DO_OR
)
4356 GEN_VEXT_RED(vredor_vs_d
, int64_t, int64_t, H8
, H8
, DO_OR
)
4358 /* vd[0] = xor(vs1[0], vs2[*]) */
4359 GEN_VEXT_RED(vredxor_vs_b
, int8_t, int8_t, H1
, H1
, DO_XOR
)
4360 GEN_VEXT_RED(vredxor_vs_h
, int16_t, int16_t, H2
, H2
, DO_XOR
)
4361 GEN_VEXT_RED(vredxor_vs_w
, int32_t, int32_t, H4
, H4
, DO_XOR
)
4362 GEN_VEXT_RED(vredxor_vs_d
, int64_t, int64_t, H8
, H8
, DO_XOR
)
4364 /* Vector Widening Integer Reduction Instructions */
4365 /* signed sum reduction into double-width accumulator */
4366 GEN_VEXT_RED(vwredsum_vs_b
, int16_t, int8_t, H2
, H1
, DO_ADD
)
4367 GEN_VEXT_RED(vwredsum_vs_h
, int32_t, int16_t, H4
, H2
, DO_ADD
)
4368 GEN_VEXT_RED(vwredsum_vs_w
, int64_t, int32_t, H8
, H4
, DO_ADD
)
4370 /* Unsigned sum reduction into double-width accumulator */
4371 GEN_VEXT_RED(vwredsumu_vs_b
, uint16_t, uint8_t, H2
, H1
, DO_ADD
)
4372 GEN_VEXT_RED(vwredsumu_vs_h
, uint32_t, uint16_t, H4
, H2
, DO_ADD
)
4373 GEN_VEXT_RED(vwredsumu_vs_w
, uint64_t, uint32_t, H8
, H4
, DO_ADD
)
4375 /* Vector Single-Width Floating-Point Reduction Instructions */
4376 #define GEN_VEXT_FRED(NAME, TD, TS2, HD, HS2, OP) \
4377 void HELPER(NAME)(void *vd, void *v0, void *vs1, \
4378 void *vs2, CPURISCVState *env, \
4381 uint32_t vm = vext_vm(desc); \
4382 uint32_t vl = env->vl; \
4384 TD s1 = *((TD *)vs1 + HD(0)); \
4386 for (i = env->vstart; i < vl; i++) { \
4387 TS2 s2 = *((TS2 *)vs2 + HS2(i)); \
4388 if (!vm && !vext_elem_mask(v0, i)) { \
4391 s1 = OP(s1, (TD)s2, &env->fp_status); \
4393 *((TD *)vd + HD(0)) = s1; \
4398 GEN_VEXT_FRED(vfredsum_vs_h
, uint16_t, uint16_t, H2
, H2
, float16_add
)
4399 GEN_VEXT_FRED(vfredsum_vs_w
, uint32_t, uint32_t, H4
, H4
, float32_add
)
4400 GEN_VEXT_FRED(vfredsum_vs_d
, uint64_t, uint64_t, H8
, H8
, float64_add
)
4403 GEN_VEXT_FRED(vfredmax_vs_h
, uint16_t, uint16_t, H2
, H2
, float16_maximum_number
)
4404 GEN_VEXT_FRED(vfredmax_vs_w
, uint32_t, uint32_t, H4
, H4
, float32_maximum_number
)
4405 GEN_VEXT_FRED(vfredmax_vs_d
, uint64_t, uint64_t, H8
, H8
, float64_maximum_number
)
4408 GEN_VEXT_FRED(vfredmin_vs_h
, uint16_t, uint16_t, H2
, H2
, float16_minimum_number
)
4409 GEN_VEXT_FRED(vfredmin_vs_w
, uint32_t, uint32_t, H4
, H4
, float32_minimum_number
)
4410 GEN_VEXT_FRED(vfredmin_vs_d
, uint64_t, uint64_t, H8
, H8
, float64_minimum_number
)
4412 /* Vector Widening Floating-Point Reduction Instructions */
4413 /* Unordered reduce 2*SEW = 2*SEW + sum(promote(SEW)) */
4414 void HELPER(vfwredsum_vs_h
)(void *vd
, void *v0
, void *vs1
,
4415 void *vs2
, CPURISCVState
*env
, uint32_t desc
)
4417 uint32_t vm
= vext_vm(desc
);
4418 uint32_t vl
= env
->vl
;
4420 uint32_t s1
= *((uint32_t *)vs1
+ H4(0));
4422 for (i
= env
->vstart
; i
< vl
; i
++) {
4423 uint16_t s2
= *((uint16_t *)vs2
+ H2(i
));
4424 if (!vm
&& !vext_elem_mask(v0
, i
)) {
4427 s1
= float32_add(s1
, float16_to_float32(s2
, true, &env
->fp_status
),
4430 *((uint32_t *)vd
+ H4(0)) = s1
;
4434 void HELPER(vfwredsum_vs_w
)(void *vd
, void *v0
, void *vs1
,
4435 void *vs2
, CPURISCVState
*env
, uint32_t desc
)
4437 uint32_t vm
= vext_vm(desc
);
4438 uint32_t vl
= env
->vl
;
4440 uint64_t s1
= *((uint64_t *)vs1
);
4442 for (i
= env
->vstart
; i
< vl
; i
++) {
4443 uint32_t s2
= *((uint32_t *)vs2
+ H4(i
));
4444 if (!vm
&& !vext_elem_mask(v0
, i
)) {
4447 s1
= float64_add(s1
, float32_to_float64(s2
, &env
->fp_status
),
4450 *((uint64_t *)vd
) = s1
;
4455 *** Vector Mask Operations
4457 /* Vector Mask-Register Logical Instructions */
4458 #define GEN_VEXT_MASK_VV(NAME, OP) \
4459 void HELPER(NAME)(void *vd, void *v0, void *vs1, \
4460 void *vs2, CPURISCVState *env, \
4463 uint32_t vl = env->vl; \
4467 for (i = env->vstart; i < vl; i++) { \
4468 a = vext_elem_mask(vs1, i); \
4469 b = vext_elem_mask(vs2, i); \
4470 vext_set_elem_mask(vd, i, OP(b, a)); \
4475 #define DO_NAND(N, M) (!(N & M))
4476 #define DO_ANDNOT(N, M) (N & !M)
4477 #define DO_NOR(N, M) (!(N | M))
4478 #define DO_ORNOT(N, M) (N | !M)
4479 #define DO_XNOR(N, M) (!(N ^ M))
4481 GEN_VEXT_MASK_VV(vmand_mm
, DO_AND
)
4482 GEN_VEXT_MASK_VV(vmnand_mm
, DO_NAND
)
4483 GEN_VEXT_MASK_VV(vmandn_mm
, DO_ANDNOT
)
4484 GEN_VEXT_MASK_VV(vmxor_mm
, DO_XOR
)
4485 GEN_VEXT_MASK_VV(vmor_mm
, DO_OR
)
4486 GEN_VEXT_MASK_VV(vmnor_mm
, DO_NOR
)
4487 GEN_VEXT_MASK_VV(vmorn_mm
, DO_ORNOT
)
4488 GEN_VEXT_MASK_VV(vmxnor_mm
, DO_XNOR
)
4490 /* Vector count population in mask vcpop */
4491 target_ulong
HELPER(vcpop_m
)(void *v0
, void *vs2
, CPURISCVState
*env
,
4494 target_ulong cnt
= 0;
4495 uint32_t vm
= vext_vm(desc
);
4496 uint32_t vl
= env
->vl
;
4499 for (i
= env
->vstart
; i
< vl
; i
++) {
4500 if (vm
|| vext_elem_mask(v0
, i
)) {
4501 if (vext_elem_mask(vs2
, i
)) {
4510 /* vfirst find-first-set mask bit*/
4511 target_ulong
HELPER(vfirst_m
)(void *v0
, void *vs2
, CPURISCVState
*env
,
4514 uint32_t vm
= vext_vm(desc
);
4515 uint32_t vl
= env
->vl
;
4518 for (i
= env
->vstart
; i
< vl
; i
++) {
4519 if (vm
|| vext_elem_mask(v0
, i
)) {
4520 if (vext_elem_mask(vs2
, i
)) {
4529 enum set_mask_type
{
4535 static void vmsetm(void *vd
, void *v0
, void *vs2
, CPURISCVState
*env
,
4536 uint32_t desc
, enum set_mask_type type
)
4538 uint32_t vm
= vext_vm(desc
);
4539 uint32_t vl
= env
->vl
;
4541 bool first_mask_bit
= false;
4543 for (i
= env
->vstart
; i
< vl
; i
++) {
4544 if (!vm
&& !vext_elem_mask(v0
, i
)) {
4547 /* write a zero to all following active elements */
4548 if (first_mask_bit
) {
4549 vext_set_elem_mask(vd
, i
, 0);
4552 if (vext_elem_mask(vs2
, i
)) {
4553 first_mask_bit
= true;
4554 if (type
== BEFORE_FIRST
) {
4555 vext_set_elem_mask(vd
, i
, 0);
4557 vext_set_elem_mask(vd
, i
, 1);
4560 if (type
== ONLY_FIRST
) {
4561 vext_set_elem_mask(vd
, i
, 0);
4563 vext_set_elem_mask(vd
, i
, 1);
4570 void HELPER(vmsbf_m
)(void *vd
, void *v0
, void *vs2
, CPURISCVState
*env
,
4573 vmsetm(vd
, v0
, vs2
, env
, desc
, BEFORE_FIRST
);
4576 void HELPER(vmsif_m
)(void *vd
, void *v0
, void *vs2
, CPURISCVState
*env
,
4579 vmsetm(vd
, v0
, vs2
, env
, desc
, INCLUDE_FIRST
);
4582 void HELPER(vmsof_m
)(void *vd
, void *v0
, void *vs2
, CPURISCVState
*env
,
4585 vmsetm(vd
, v0
, vs2
, env
, desc
, ONLY_FIRST
);
4588 /* Vector Iota Instruction */
4589 #define GEN_VEXT_VIOTA_M(NAME, ETYPE, H) \
4590 void HELPER(NAME)(void *vd, void *v0, void *vs2, CPURISCVState *env, \
4593 uint32_t vm = vext_vm(desc); \
4594 uint32_t vl = env->vl; \
4598 for (i = env->vstart; i < vl; i++) { \
4599 if (!vm && !vext_elem_mask(v0, i)) { \
4602 *((ETYPE *)vd + H(i)) = sum; \
4603 if (vext_elem_mask(vs2, i)) { \
4610 GEN_VEXT_VIOTA_M(viota_m_b
, uint8_t, H1
)
4611 GEN_VEXT_VIOTA_M(viota_m_h
, uint16_t, H2
)
4612 GEN_VEXT_VIOTA_M(viota_m_w
, uint32_t, H4
)
4613 GEN_VEXT_VIOTA_M(viota_m_d
, uint64_t, H8
)
4615 /* Vector Element Index Instruction */
4616 #define GEN_VEXT_VID_V(NAME, ETYPE, H) \
4617 void HELPER(NAME)(void *vd, void *v0, CPURISCVState *env, uint32_t desc) \
4619 uint32_t vm = vext_vm(desc); \
4620 uint32_t vl = env->vl; \
4623 for (i = env->vstart; i < vl; i++) { \
4624 if (!vm && !vext_elem_mask(v0, i)) { \
4627 *((ETYPE *)vd + H(i)) = i; \
4632 GEN_VEXT_VID_V(vid_v_b
, uint8_t, H1
)
4633 GEN_VEXT_VID_V(vid_v_h
, uint16_t, H2
)
4634 GEN_VEXT_VID_V(vid_v_w
, uint32_t, H4
)
4635 GEN_VEXT_VID_V(vid_v_d
, uint64_t, H8
)
4638 *** Vector Permutation Instructions
4641 /* Vector Slide Instructions */
4642 #define GEN_VEXT_VSLIDEUP_VX(NAME, ETYPE, H) \
4643 void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
4644 CPURISCVState *env, uint32_t desc) \
4646 uint32_t vm = vext_vm(desc); \
4647 uint32_t vl = env->vl; \
4648 target_ulong offset = s1, i_min, i; \
4650 i_min = MAX(env->vstart, offset); \
4651 for (i = i_min; i < vl; i++) { \
4652 if (!vm && !vext_elem_mask(v0, i)) { \
4655 *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i - offset)); \
4659 /* vslideup.vx vd, vs2, rs1, vm # vd[i+rs1] = vs2[i] */
4660 GEN_VEXT_VSLIDEUP_VX(vslideup_vx_b
, uint8_t, H1
)
4661 GEN_VEXT_VSLIDEUP_VX(vslideup_vx_h
, uint16_t, H2
)
4662 GEN_VEXT_VSLIDEUP_VX(vslideup_vx_w
, uint32_t, H4
)
4663 GEN_VEXT_VSLIDEUP_VX(vslideup_vx_d
, uint64_t, H8
)
4665 #define GEN_VEXT_VSLIDEDOWN_VX(NAME, ETYPE, H) \
4666 void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
4667 CPURISCVState *env, uint32_t desc) \
4669 uint32_t vlmax = vext_max_elems(desc, ctzl(sizeof(ETYPE))); \
4670 uint32_t vm = vext_vm(desc); \
4671 uint32_t vl = env->vl; \
4672 target_ulong i_max, i; \
4674 i_max = MAX(MIN(s1 < vlmax ? vlmax - s1 : 0, vl), env->vstart); \
4675 for (i = env->vstart; i < i_max; ++i) { \
4676 if (vm || vext_elem_mask(v0, i)) { \
4677 *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i + s1)); \
4681 for (i = i_max; i < vl; ++i) { \
4682 if (vm || vext_elem_mask(v0, i)) { \
4683 *((ETYPE *)vd + H(i)) = 0; \
4690 /* vslidedown.vx vd, vs2, rs1, vm # vd[i] = vs2[i+rs1] */
4691 GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_b
, uint8_t, H1
)
4692 GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_h
, uint16_t, H2
)
4693 GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_w
, uint32_t, H4
)
4694 GEN_VEXT_VSLIDEDOWN_VX(vslidedown_vx_d
, uint64_t, H8
)
4696 #define GEN_VEXT_VSLIE1UP(ESZ, H) \
4697 static void vslide1up_##ESZ(void *vd, void *v0, target_ulong s1, void *vs2, \
4698 CPURISCVState *env, uint32_t desc) \
4700 typedef uint##ESZ##_t ETYPE; \
4701 uint32_t vm = vext_vm(desc); \
4702 uint32_t vl = env->vl; \
4705 for (i = env->vstart; i < vl; i++) { \
4706 if (!vm && !vext_elem_mask(v0, i)) { \
4710 *((ETYPE *)vd + H(i)) = s1; \
4712 *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i - 1)); \
4718 GEN_VEXT_VSLIE1UP(8, H1
)
4719 GEN_VEXT_VSLIE1UP(16, H2
)
4720 GEN_VEXT_VSLIE1UP(32, H4
)
4721 GEN_VEXT_VSLIE1UP(64, H8
)
4723 #define GEN_VEXT_VSLIDE1UP_VX(NAME, ESZ) \
4724 void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
4725 CPURISCVState *env, uint32_t desc) \
4727 vslide1up_##ESZ(vd, v0, s1, vs2, env, desc); \
4730 /* vslide1up.vx vd, vs2, rs1, vm # vd[0]=x[rs1], vd[i+1] = vs2[i] */
4731 GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_b
, 8)
4732 GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_h
, 16)
4733 GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_w
, 32)
4734 GEN_VEXT_VSLIDE1UP_VX(vslide1up_vx_d
, 64)
4736 #define GEN_VEXT_VSLIDE1DOWN(ESZ, H) \
4737 static void vslide1down_##ESZ(void *vd, void *v0, target_ulong s1, void *vs2, \
4738 CPURISCVState *env, uint32_t desc) \
4740 typedef uint##ESZ##_t ETYPE; \
4741 uint32_t vm = vext_vm(desc); \
4742 uint32_t vl = env->vl; \
4745 for (i = env->vstart; i < vl; i++) { \
4746 if (!vm && !vext_elem_mask(v0, i)) { \
4749 if (i == vl - 1) { \
4750 *((ETYPE *)vd + H(i)) = s1; \
4752 *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(i + 1)); \
4758 GEN_VEXT_VSLIDE1DOWN(8, H1
)
4759 GEN_VEXT_VSLIDE1DOWN(16, H2
)
4760 GEN_VEXT_VSLIDE1DOWN(32, H4
)
4761 GEN_VEXT_VSLIDE1DOWN(64, H8
)
4763 #define GEN_VEXT_VSLIDE1DOWN_VX(NAME, ESZ) \
4764 void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
4765 CPURISCVState *env, uint32_t desc) \
4767 vslide1down_##ESZ(vd, v0, s1, vs2, env, desc); \
4770 /* vslide1down.vx vd, vs2, rs1, vm # vd[i] = vs2[i+1], vd[vl-1]=x[rs1] */
4771 GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_b
, 8)
4772 GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_h
, 16)
4773 GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_w
, 32)
4774 GEN_VEXT_VSLIDE1DOWN_VX(vslide1down_vx_d
, 64)
4776 /* Vector Floating-Point Slide Instructions */
4777 #define GEN_VEXT_VFSLIDE1UP_VF(NAME, ESZ) \
4778 void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \
4779 CPURISCVState *env, uint32_t desc) \
4781 vslide1up_##ESZ(vd, v0, s1, vs2, env, desc); \
4784 /* vfslide1up.vf vd, vs2, rs1, vm # vd[0]=f[rs1], vd[i+1] = vs2[i] */
4785 GEN_VEXT_VFSLIDE1UP_VF(vfslide1up_vf_h
, 16)
4786 GEN_VEXT_VFSLIDE1UP_VF(vfslide1up_vf_w
, 32)
4787 GEN_VEXT_VFSLIDE1UP_VF(vfslide1up_vf_d
, 64)
4789 #define GEN_VEXT_VFSLIDE1DOWN_VF(NAME, ESZ) \
4790 void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \
4791 CPURISCVState *env, uint32_t desc) \
4793 vslide1down_##ESZ(vd, v0, s1, vs2, env, desc); \
4796 /* vfslide1down.vf vd, vs2, rs1, vm # vd[i] = vs2[i+1], vd[vl-1]=f[rs1] */
4797 GEN_VEXT_VFSLIDE1DOWN_VF(vfslide1down_vf_h
, 16)
4798 GEN_VEXT_VFSLIDE1DOWN_VF(vfslide1down_vf_w
, 32)
4799 GEN_VEXT_VFSLIDE1DOWN_VF(vfslide1down_vf_d
, 64)
4801 /* Vector Register Gather Instruction */
4802 #define GEN_VEXT_VRGATHER_VV(NAME, TS1, TS2, HS1, HS2) \
4803 void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
4804 CPURISCVState *env, uint32_t desc) \
4806 uint32_t vlmax = vext_max_elems(desc, ctzl(sizeof(TS2))); \
4807 uint32_t vm = vext_vm(desc); \
4808 uint32_t vl = env->vl; \
4812 for (i = env->vstart; i < vl; i++) { \
4813 if (!vm && !vext_elem_mask(v0, i)) { \
4816 index = *((TS1 *)vs1 + HS1(i)); \
4817 if (index >= vlmax) { \
4818 *((TS2 *)vd + HS2(i)) = 0; \
4820 *((TS2 *)vd + HS2(i)) = *((TS2 *)vs2 + HS2(index)); \
4826 /* vd[i] = (vs1[i] >= VLMAX) ? 0 : vs2[vs1[i]]; */
4827 GEN_VEXT_VRGATHER_VV(vrgather_vv_b
, uint8_t, uint8_t, H1
, H1
)
4828 GEN_VEXT_VRGATHER_VV(vrgather_vv_h
, uint16_t, uint16_t, H2
, H2
)
4829 GEN_VEXT_VRGATHER_VV(vrgather_vv_w
, uint32_t, uint32_t, H4
, H4
)
4830 GEN_VEXT_VRGATHER_VV(vrgather_vv_d
, uint64_t, uint64_t, H8
, H8
)
4832 GEN_VEXT_VRGATHER_VV(vrgatherei16_vv_b
, uint16_t, uint8_t, H2
, H1
)
4833 GEN_VEXT_VRGATHER_VV(vrgatherei16_vv_h
, uint16_t, uint16_t, H2
, H2
)
4834 GEN_VEXT_VRGATHER_VV(vrgatherei16_vv_w
, uint16_t, uint32_t, H2
, H4
)
4835 GEN_VEXT_VRGATHER_VV(vrgatherei16_vv_d
, uint16_t, uint64_t, H2
, H8
)
4837 #define GEN_VEXT_VRGATHER_VX(NAME, ETYPE, H) \
4838 void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \
4839 CPURISCVState *env, uint32_t desc) \
4841 uint32_t vlmax = vext_max_elems(desc, ctzl(sizeof(ETYPE))); \
4842 uint32_t vm = vext_vm(desc); \
4843 uint32_t vl = env->vl; \
4844 uint64_t index = s1; \
4847 for (i = env->vstart; i < vl; i++) { \
4848 if (!vm && !vext_elem_mask(v0, i)) { \
4851 if (index >= vlmax) { \
4852 *((ETYPE *)vd + H(i)) = 0; \
4854 *((ETYPE *)vd + H(i)) = *((ETYPE *)vs2 + H(index)); \
4860 /* vd[i] = (x[rs1] >= VLMAX) ? 0 : vs2[rs1] */
4861 GEN_VEXT_VRGATHER_VX(vrgather_vx_b
, uint8_t, H1
)
4862 GEN_VEXT_VRGATHER_VX(vrgather_vx_h
, uint16_t, H2
)
4863 GEN_VEXT_VRGATHER_VX(vrgather_vx_w
, uint32_t, H4
)
4864 GEN_VEXT_VRGATHER_VX(vrgather_vx_d
, uint64_t, H8
)
4866 /* Vector Compress Instruction */
4867 #define GEN_VEXT_VCOMPRESS_VM(NAME, ETYPE, H) \
4868 void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \
4869 CPURISCVState *env, uint32_t desc) \
4871 uint32_t vl = env->vl; \
4872 uint32_t num = 0, i; \
4874 for (i = env->vstart; i < vl; i++) { \
4875 if (!vext_elem_mask(vs1, i)) { \
4878 *((ETYPE *)vd + H(num)) = *((ETYPE *)vs2 + H(i)); \
4884 /* Compress into vd elements of vs2 where vs1 is enabled */
4885 GEN_VEXT_VCOMPRESS_VM(vcompress_vm_b
, uint8_t, H1
)
4886 GEN_VEXT_VCOMPRESS_VM(vcompress_vm_h
, uint16_t, H2
)
4887 GEN_VEXT_VCOMPRESS_VM(vcompress_vm_w
, uint32_t, H4
)
4888 GEN_VEXT_VCOMPRESS_VM(vcompress_vm_d
, uint64_t, H8
)
4890 /* Vector Whole Register Move */
4891 #define GEN_VEXT_VMV_WHOLE(NAME, LEN) \
4892 void HELPER(NAME)(void *vd, void *vs2, CPURISCVState *env, \
4896 uint32_t maxsz = simd_maxsz(desc); \
4897 uint32_t i = env->vstart; \
4899 memcpy((uint8_t *)vd + H1(i), \
4900 (uint8_t *)vs2 + H1(i), \
4901 maxsz - env->vstart); \
4906 GEN_VEXT_VMV_WHOLE(vmv1r_v
, 1)
4907 GEN_VEXT_VMV_WHOLE(vmv2r_v
, 2)
4908 GEN_VEXT_VMV_WHOLE(vmv4r_v
, 4)
4909 GEN_VEXT_VMV_WHOLE(vmv8r_v
, 8)
4911 /* Vector Integer Extension */
4912 #define GEN_VEXT_INT_EXT(NAME, ETYPE, DTYPE, HD, HS1) \
4913 void HELPER(NAME)(void *vd, void *v0, void *vs2, \
4914 CPURISCVState *env, uint32_t desc) \
4916 uint32_t vl = env->vl; \
4917 uint32_t vm = vext_vm(desc); \
4920 for (i = env->vstart; i < vl; i++) { \
4921 if (!vm && !vext_elem_mask(v0, i)) { \
4924 *((ETYPE *)vd + HD(i)) = *((DTYPE *)vs2 + HS1(i)); \
4929 GEN_VEXT_INT_EXT(vzext_vf2_h
, uint16_t, uint8_t, H2
, H1
)
4930 GEN_VEXT_INT_EXT(vzext_vf2_w
, uint32_t, uint16_t, H4
, H2
)
4931 GEN_VEXT_INT_EXT(vzext_vf2_d
, uint64_t, uint32_t, H8
, H4
)
4932 GEN_VEXT_INT_EXT(vzext_vf4_w
, uint32_t, uint8_t, H4
, H1
)
4933 GEN_VEXT_INT_EXT(vzext_vf4_d
, uint64_t, uint16_t, H8
, H2
)
4934 GEN_VEXT_INT_EXT(vzext_vf8_d
, uint64_t, uint8_t, H8
, H1
)
4936 GEN_VEXT_INT_EXT(vsext_vf2_h
, int16_t, int8_t, H2
, H1
)
4937 GEN_VEXT_INT_EXT(vsext_vf2_w
, int32_t, int16_t, H4
, H2
)
4938 GEN_VEXT_INT_EXT(vsext_vf2_d
, int64_t, int32_t, H8
, H4
)
4939 GEN_VEXT_INT_EXT(vsext_vf4_w
, int32_t, int8_t, H4
, H1
)
4940 GEN_VEXT_INT_EXT(vsext_vf4_d
, int64_t, int16_t, H8
, H2
)
4941 GEN_VEXT_INT_EXT(vsext_vf8_d
, int64_t, int8_t, H8
, H1
)