2 * RISC-V Vector Extension Internals
4 * Copyright (c) 2020 T-Head Semiconductor Co., Ltd. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2 or later, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
19 #ifndef TARGET_RISCV_VECTOR_INTERNALS_H
20 #define TARGET_RISCV_VECTOR_INTERNALS_H
22 #include "qemu/bitops.h"
24 #include "tcg/tcg-gvec-desc.h"
25 #include "internals.h"
27 #define VSTART_CHECK_EARLY_EXIT(env) do { \
28 if (env->vstart >= env->vl) { \
34 static inline uint32_t vext_nf(uint32_t desc
)
36 return FIELD_EX32(simd_data(desc
), VDATA
, NF
);
40 * Note that vector data is stored in host-endian 64-bit chunks,
41 * so addressing units smaller than that needs a host-endian fixup.
44 #define H1(x) ((x) ^ 7)
45 #define H1_2(x) ((x) ^ 6)
46 #define H1_4(x) ((x) ^ 4)
47 #define H2(x) ((x) ^ 3)
48 #define H4(x) ((x) ^ 1)
60 * Encode LMUL to lmul as following:
71 static inline int32_t vext_lmul(uint32_t desc
)
73 return sextract32(FIELD_EX32(simd_data(desc
), VDATA
, LMUL
), 0, 3);
76 static inline uint32_t vext_vm(uint32_t desc
)
78 return FIELD_EX32(simd_data(desc
), VDATA
, VM
);
81 static inline uint32_t vext_vma(uint32_t desc
)
83 return FIELD_EX32(simd_data(desc
), VDATA
, VMA
);
86 static inline uint32_t vext_vta(uint32_t desc
)
88 return FIELD_EX32(simd_data(desc
), VDATA
, VTA
);
91 static inline uint32_t vext_vta_all_1s(uint32_t desc
)
93 return FIELD_EX32(simd_data(desc
), VDATA
, VTA_ALL_1S
);
97 * Earlier designs (pre-0.9) had a varying number of bits
98 * per mask value (MLEN). In the 0.9 design, MLEN=1.
101 static inline int vext_elem_mask(void *v0
, int index
)
103 int idx
= index
/ 64;
104 int pos
= index
% 64;
105 return (((uint64_t *)v0
)[idx
] >> pos
) & 1;
109 * Get number of total elements, including prestart, body and tail elements.
110 * Note that when LMUL < 1, the tail includes the elements past VLMAX that
111 * are held in the same vector register.
113 static inline uint32_t vext_get_total_elems(CPURISCVState
*env
, uint32_t desc
,
116 uint32_t vlenb
= simd_maxsz(desc
);
117 uint32_t sew
= 1 << FIELD_EX64(env
->vtype
, VTYPE
, VSEW
);
118 int8_t emul
= ctzl(esz
) - ctzl(sew
) + vext_lmul(desc
) < 0 ? 0 :
119 ctzl(esz
) - ctzl(sew
) + vext_lmul(desc
);
120 return (vlenb
<< emul
) / esz
;
123 /* set agnostic elements to 1s */
124 void vext_set_elems_1s(void *base
, uint32_t is_agnostic
, uint32_t cnt
,
127 /* expand macro args before macro */
128 #define RVVCALL(macro, ...) macro(__VA_ARGS__)
131 #define OP_UU_B uint8_t, uint8_t, uint8_t
132 #define OP_UU_H uint16_t, uint16_t, uint16_t
133 #define OP_UU_W uint32_t, uint32_t, uint32_t
134 #define OP_UU_D uint64_t, uint64_t, uint64_t
136 /* (TD, T1, T2, TX1, TX2) */
137 #define OP_UUU_B uint8_t, uint8_t, uint8_t, uint8_t, uint8_t
138 #define OP_UUU_H uint16_t, uint16_t, uint16_t, uint16_t, uint16_t
139 #define OP_UUU_W uint32_t, uint32_t, uint32_t, uint32_t, uint32_t
140 #define OP_UUU_D uint64_t, uint64_t, uint64_t, uint64_t, uint64_t
142 #define OPIVV1(NAME, TD, T2, TX2, HD, HS2, OP) \
143 static void do_##NAME(void *vd, void *vs2, int i) \
145 TX2 s2 = *((T2 *)vs2 + HS2(i)); \
146 *((TD *)vd + HD(i)) = OP(s2); \
149 #define GEN_VEXT_V(NAME, ESZ) \
150 void HELPER(NAME)(void *vd, void *v0, void *vs2, \
151 CPURISCVState *env, uint32_t desc) \
153 uint32_t vm = vext_vm(desc); \
154 uint32_t vl = env->vl; \
155 uint32_t total_elems = \
156 vext_get_total_elems(env, desc, ESZ); \
157 uint32_t vta = vext_vta(desc); \
158 uint32_t vma = vext_vma(desc); \
161 VSTART_CHECK_EARLY_EXIT(env); \
163 for (i = env->vstart; i < vl; i++) { \
164 if (!vm && !vext_elem_mask(v0, i)) { \
165 /* set masked-off elements to 1s */ \
166 vext_set_elems_1s(vd, vma, i * ESZ, \
170 do_##NAME(vd, vs2, i); \
173 /* set tail elements to 1s */ \
174 vext_set_elems_1s(vd, vta, vl * ESZ, \
175 total_elems * ESZ); \
178 /* operation of two vector elements */
179 typedef void opivv2_fn(void *vd
, void *vs1
, void *vs2
, int i
);
181 #define OPIVV2(NAME, TD, T1, T2, TX1, TX2, HD, HS1, HS2, OP) \
182 static void do_##NAME(void *vd, void *vs1, void *vs2, int i) \
184 TX1 s1 = *((T1 *)vs1 + HS1(i)); \
185 TX2 s2 = *((T2 *)vs2 + HS2(i)); \
186 *((TD *)vd + HD(i)) = OP(s2, s1); \
189 void do_vext_vv(void *vd
, void *v0
, void *vs1
, void *vs2
,
190 CPURISCVState
*env
, uint32_t desc
,
191 opivv2_fn
*fn
, uint32_t esz
);
193 /* generate the helpers for OPIVV */
194 #define GEN_VEXT_VV(NAME, ESZ) \
195 void HELPER(NAME)(void *vd, void *v0, void *vs1, \
196 void *vs2, CPURISCVState *env, \
199 do_vext_vv(vd, v0, vs1, vs2, env, desc, \
203 typedef void opivx2_fn(void *vd
, target_long s1
, void *vs2
, int i
);
206 * (T1)s1 gives the real operator type.
207 * (TX1)(T1)s1 expands the operator type of widen or narrow operations.
209 #define OPIVX2(NAME, TD, T1, T2, TX1, TX2, HD, HS2, OP) \
210 static void do_##NAME(void *vd, target_long s1, void *vs2, int i) \
212 TX2 s2 = *((T2 *)vs2 + HS2(i)); \
213 *((TD *)vd + HD(i)) = OP(s2, (TX1)(T1)s1); \
216 void do_vext_vx(void *vd
, void *v0
, target_long s1
, void *vs2
,
217 CPURISCVState
*env
, uint32_t desc
,
218 opivx2_fn fn
, uint32_t esz
);
220 /* generate the helpers for OPIVX */
221 #define GEN_VEXT_VX(NAME, ESZ) \
222 void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \
223 void *vs2, CPURISCVState *env, \
226 do_vext_vx(vd, v0, s1, vs2, env, desc, \
230 /* Three of the widening shortening macros: */
231 /* (TD, T1, T2, TX1, TX2) */
232 #define WOP_UUU_B uint16_t, uint8_t, uint8_t, uint16_t, uint16_t
233 #define WOP_UUU_H uint32_t, uint16_t, uint16_t, uint32_t, uint32_t
234 #define WOP_UUU_W uint64_t, uint32_t, uint32_t, uint64_t, uint64_t
236 #endif /* TARGET_RISCV_VECTOR_INTERNALS_H */