1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * QEMU LoongArch LSX helper functions.
5 * Copyright (c) 2022-2023 Loongson Technology Corporation Limited
8 #include "qemu/osdep.h"
10 #include "exec/exec-all.h"
11 #include "exec/helper-proto.h"
13 #define DO_ADD(a, b) (a + b)
14 #define DO_SUB(a, b) (a - b)
16 #define DO_ODD_EVEN(NAME, BIT, E1, E2, DO_OP) \
17 void HELPER(NAME)(CPULoongArchState *env, \
18 uint32_t vd, uint32_t vj, uint32_t vk) \
21 VReg *Vd = &(env->fpr[vd].vreg); \
22 VReg *Vj = &(env->fpr[vj].vreg); \
23 VReg *Vk = &(env->fpr[vk].vreg); \
24 typedef __typeof(Vd->E1(0)) TD; \
26 for (i = 0; i < LSX_LEN/BIT; i++) { \
27 Vd->E1(i) = DO_OP((TD)Vj->E2(2 * i + 1), (TD)Vk->E2(2 * i)); \
31 DO_ODD_EVEN(vhaddw_h_b
, 16, H
, B
, DO_ADD
)
32 DO_ODD_EVEN(vhaddw_w_h
, 32, W
, H
, DO_ADD
)
33 DO_ODD_EVEN(vhaddw_d_w
, 64, D
, W
, DO_ADD
)
35 void HELPER(vhaddw_q_d
)(CPULoongArchState
*env
,
36 uint32_t vd
, uint32_t vj
, uint32_t vk
)
38 VReg
*Vd
= &(env
->fpr
[vd
].vreg
);
39 VReg
*Vj
= &(env
->fpr
[vj
].vreg
);
40 VReg
*Vk
= &(env
->fpr
[vk
].vreg
);
42 Vd
->Q(0) = int128_add(int128_makes64(Vj
->D(1)), int128_makes64(Vk
->D(0)));
45 DO_ODD_EVEN(vhsubw_h_b
, 16, H
, B
, DO_SUB
)
46 DO_ODD_EVEN(vhsubw_w_h
, 32, W
, H
, DO_SUB
)
47 DO_ODD_EVEN(vhsubw_d_w
, 64, D
, W
, DO_SUB
)
49 void HELPER(vhsubw_q_d
)(CPULoongArchState
*env
,
50 uint32_t vd
, uint32_t vj
, uint32_t vk
)
52 VReg
*Vd
= &(env
->fpr
[vd
].vreg
);
53 VReg
*Vj
= &(env
->fpr
[vj
].vreg
);
54 VReg
*Vk
= &(env
->fpr
[vk
].vreg
);
56 Vd
->Q(0) = int128_sub(int128_makes64(Vj
->D(1)), int128_makes64(Vk
->D(0)));
59 DO_ODD_EVEN(vhaddw_hu_bu
, 16, UH
, UB
, DO_ADD
)
60 DO_ODD_EVEN(vhaddw_wu_hu
, 32, UW
, UH
, DO_ADD
)
61 DO_ODD_EVEN(vhaddw_du_wu
, 64, UD
, UW
, DO_ADD
)
63 void HELPER(vhaddw_qu_du
)(CPULoongArchState
*env
,
64 uint32_t vd
, uint32_t vj
, uint32_t vk
)
66 VReg
*Vd
= &(env
->fpr
[vd
].vreg
);
67 VReg
*Vj
= &(env
->fpr
[vj
].vreg
);
68 VReg
*Vk
= &(env
->fpr
[vk
].vreg
);
70 Vd
->Q(0) = int128_add(int128_make64((uint64_t)Vj
->D(1)),
71 int128_make64((uint64_t)Vk
->D(0)));
74 DO_ODD_EVEN(vhsubw_hu_bu
, 16, UH
, UB
, DO_SUB
)
75 DO_ODD_EVEN(vhsubw_wu_hu
, 32, UW
, UH
, DO_SUB
)
76 DO_ODD_EVEN(vhsubw_du_wu
, 64, UD
, UW
, DO_SUB
)
78 void HELPER(vhsubw_qu_du
)(CPULoongArchState
*env
,
79 uint32_t vd
, uint32_t vj
, uint32_t vk
)
81 VReg
*Vd
= &(env
->fpr
[vd
].vreg
);
82 VReg
*Vj
= &(env
->fpr
[vj
].vreg
);
83 VReg
*Vk
= &(env
->fpr
[vk
].vreg
);
85 Vd
->Q(0) = int128_sub(int128_make64((uint64_t)Vj
->D(1)),
86 int128_make64((uint64_t)Vk
->D(0)));
89 #define DO_EVEN(NAME, BIT, E1, E2, DO_OP) \
90 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
93 VReg *Vd = (VReg *)vd; \
94 VReg *Vj = (VReg *)vj; \
95 VReg *Vk = (VReg *)vk; \
96 typedef __typeof(Vd->E1(0)) TD; \
97 for (i = 0; i < LSX_LEN/BIT; i++) { \
98 Vd->E1(i) = DO_OP((TD)Vj->E2(2 * i) ,(TD)Vk->E2(2 * i)); \
102 #define DO_ODD(NAME, BIT, E1, E2, DO_OP) \
103 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
106 VReg *Vd = (VReg *)vd; \
107 VReg *Vj = (VReg *)vj; \
108 VReg *Vk = (VReg *)vk; \
109 typedef __typeof(Vd->E1(0)) TD; \
110 for (i = 0; i < LSX_LEN/BIT; i++) { \
111 Vd->E1(i) = DO_OP((TD)Vj->E2(2 * i + 1), (TD)Vk->E2(2 * i + 1)); \
115 void HELPER(vaddwev_q_d
)(void *vd
, void *vj
, void *vk
, uint32_t v
)
117 VReg
*Vd
= (VReg
*)vd
;
118 VReg
*Vj
= (VReg
*)vj
;
119 VReg
*Vk
= (VReg
*)vk
;
121 Vd
->Q(0) = int128_add(int128_makes64(Vj
->D(0)), int128_makes64(Vk
->D(0)));
124 DO_EVEN(vaddwev_h_b
, 16, H
, B
, DO_ADD
)
125 DO_EVEN(vaddwev_w_h
, 32, W
, H
, DO_ADD
)
126 DO_EVEN(vaddwev_d_w
, 64, D
, W
, DO_ADD
)
128 void HELPER(vaddwod_q_d
)(void *vd
, void *vj
, void *vk
, uint32_t v
)
130 VReg
*Vd
= (VReg
*)vd
;
131 VReg
*Vj
= (VReg
*)vj
;
132 VReg
*Vk
= (VReg
*)vk
;
134 Vd
->Q(0) = int128_add(int128_makes64(Vj
->D(1)), int128_makes64(Vk
->D(1)));
137 DO_ODD(vaddwod_h_b
, 16, H
, B
, DO_ADD
)
138 DO_ODD(vaddwod_w_h
, 32, W
, H
, DO_ADD
)
139 DO_ODD(vaddwod_d_w
, 64, D
, W
, DO_ADD
)
141 void HELPER(vsubwev_q_d
)(void *vd
, void *vj
, void *vk
, uint32_t v
)
143 VReg
*Vd
= (VReg
*)vd
;
144 VReg
*Vj
= (VReg
*)vj
;
145 VReg
*Vk
= (VReg
*)vk
;
147 Vd
->Q(0) = int128_sub(int128_makes64(Vj
->D(0)), int128_makes64(Vk
->D(0)));
150 DO_EVEN(vsubwev_h_b
, 16, H
, B
, DO_SUB
)
151 DO_EVEN(vsubwev_w_h
, 32, W
, H
, DO_SUB
)
152 DO_EVEN(vsubwev_d_w
, 64, D
, W
, DO_SUB
)
154 void HELPER(vsubwod_q_d
)(void *vd
, void *vj
, void *vk
, uint32_t v
)
156 VReg
*Vd
= (VReg
*)vd
;
157 VReg
*Vj
= (VReg
*)vj
;
158 VReg
*Vk
= (VReg
*)vk
;
160 Vd
->Q(0) = int128_sub(int128_makes64(Vj
->D(1)), int128_makes64(Vk
->D(1)));
163 DO_ODD(vsubwod_h_b
, 16, H
, B
, DO_SUB
)
164 DO_ODD(vsubwod_w_h
, 32, W
, H
, DO_SUB
)
165 DO_ODD(vsubwod_d_w
, 64, D
, W
, DO_SUB
)
167 void HELPER(vaddwev_q_du
)(void *vd
, void *vj
, void *vk
, uint32_t v
)
169 VReg
*Vd
= (VReg
*)vd
;
170 VReg
*Vj
= (VReg
*)vj
;
171 VReg
*Vk
= (VReg
*)vk
;
173 Vd
->Q(0) = int128_add(int128_make64((uint64_t)Vj
->D(0)),
174 int128_make64((uint64_t)Vk
->D(0)));
177 DO_EVEN(vaddwev_h_bu
, 16, UH
, UB
, DO_ADD
)
178 DO_EVEN(vaddwev_w_hu
, 32, UW
, UH
, DO_ADD
)
179 DO_EVEN(vaddwev_d_wu
, 64, UD
, UW
, DO_ADD
)
181 void HELPER(vaddwod_q_du
)(void *vd
, void *vj
, void *vk
, uint32_t v
)
183 VReg
*Vd
= (VReg
*)vd
;
184 VReg
*Vj
= (VReg
*)vj
;
185 VReg
*Vk
= (VReg
*)vk
;
187 Vd
->Q(0) = int128_add(int128_make64((uint64_t)Vj
->D(1)),
188 int128_make64((uint64_t)Vk
->D(1)));
191 DO_ODD(vaddwod_h_bu
, 16, UH
, UB
, DO_ADD
)
192 DO_ODD(vaddwod_w_hu
, 32, UW
, UH
, DO_ADD
)
193 DO_ODD(vaddwod_d_wu
, 64, UD
, UW
, DO_ADD
)
195 void HELPER(vsubwev_q_du
)(void *vd
, void *vj
, void *vk
, uint32_t v
)
197 VReg
*Vd
= (VReg
*)vd
;
198 VReg
*Vj
= (VReg
*)vj
;
199 VReg
*Vk
= (VReg
*)vk
;
201 Vd
->Q(0) = int128_sub(int128_make64((uint64_t)Vj
->D(0)),
202 int128_make64((uint64_t)Vk
->D(0)));
205 DO_EVEN(vsubwev_h_bu
, 16, UH
, UB
, DO_SUB
)
206 DO_EVEN(vsubwev_w_hu
, 32, UW
, UH
, DO_SUB
)
207 DO_EVEN(vsubwev_d_wu
, 64, UD
, UW
, DO_SUB
)
209 void HELPER(vsubwod_q_du
)(void *vd
, void *vj
, void *vk
, uint32_t v
)
211 VReg
*Vd
= (VReg
*)vd
;
212 VReg
*Vj
= (VReg
*)vj
;
213 VReg
*Vk
= (VReg
*)vk
;
215 Vd
->Q(0) = int128_sub(int128_make64((uint64_t)Vj
->D(1)),
216 int128_make64((uint64_t)Vk
->D(1)));
219 DO_ODD(vsubwod_h_bu
, 16, UH
, UB
, DO_SUB
)
220 DO_ODD(vsubwod_w_hu
, 32, UW
, UH
, DO_SUB
)
221 DO_ODD(vsubwod_d_wu
, 64, UD
, UW
, DO_SUB
)
223 #define DO_EVEN_U_S(NAME, BIT, ES1, EU1, ES2, EU2, DO_OP) \
224 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
227 VReg *Vd = (VReg *)vd; \
228 VReg *Vj = (VReg *)vj; \
229 VReg *Vk = (VReg *)vk; \
230 typedef __typeof(Vd->ES1(0)) TDS; \
231 typedef __typeof(Vd->EU1(0)) TDU; \
232 for (i = 0; i < LSX_LEN/BIT; i++) { \
233 Vd->ES1(i) = DO_OP((TDU)Vj->EU2(2 * i) ,(TDS)Vk->ES2(2 * i)); \
237 #define DO_ODD_U_S(NAME, BIT, ES1, EU1, ES2, EU2, DO_OP) \
238 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
241 VReg *Vd = (VReg *)vd; \
242 VReg *Vj = (VReg *)vj; \
243 VReg *Vk = (VReg *)vk; \
244 typedef __typeof(Vd->ES1(0)) TDS; \
245 typedef __typeof(Vd->EU1(0)) TDU; \
246 for (i = 0; i < LSX_LEN/BIT; i++) { \
247 Vd->ES1(i) = DO_OP((TDU)Vj->EU2(2 * i + 1), (TDS)Vk->ES2(2 * i + 1)); \
251 void HELPER(vaddwev_q_du_d
)(void *vd
, void *vj
, void *vk
, uint32_t v
)
253 VReg
*Vd
= (VReg
*)vd
;
254 VReg
*Vj
= (VReg
*)vj
;
255 VReg
*Vk
= (VReg
*)vk
;
257 Vd
->Q(0) = int128_add(int128_make64((uint64_t)Vj
->D(0)),
258 int128_makes64(Vk
->D(0)));
261 DO_EVEN_U_S(vaddwev_h_bu_b
, 16, H
, UH
, B
, UB
, DO_ADD
)
262 DO_EVEN_U_S(vaddwev_w_hu_h
, 32, W
, UW
, H
, UH
, DO_ADD
)
263 DO_EVEN_U_S(vaddwev_d_wu_w
, 64, D
, UD
, W
, UW
, DO_ADD
)
265 void HELPER(vaddwod_q_du_d
)(void *vd
, void *vj
, void *vk
, uint32_t v
)
267 VReg
*Vd
= (VReg
*)vd
;
268 VReg
*Vj
= (VReg
*)vj
;
269 VReg
*Vk
= (VReg
*)vk
;
271 Vd
->Q(0) = int128_add(int128_make64((uint64_t)Vj
->D(1)),
272 int128_makes64(Vk
->D(1)));
275 DO_ODD_U_S(vaddwod_h_bu_b
, 16, H
, UH
, B
, UB
, DO_ADD
)
276 DO_ODD_U_S(vaddwod_w_hu_h
, 32, W
, UW
, H
, UH
, DO_ADD
)
277 DO_ODD_U_S(vaddwod_d_wu_w
, 64, D
, UD
, W
, UW
, DO_ADD
)
279 #define DO_VAVG(a, b) ((a >> 1) + (b >> 1) + (a & b & 1))
280 #define DO_VAVGR(a, b) ((a >> 1) + (b >> 1) + ((a | b) & 1))
282 #define DO_3OP(NAME, BIT, E, DO_OP) \
283 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
286 VReg *Vd = (VReg *)vd; \
287 VReg *Vj = (VReg *)vj; \
288 VReg *Vk = (VReg *)vk; \
289 for (i = 0; i < LSX_LEN/BIT; i++) { \
290 Vd->E(i) = DO_OP(Vj->E(i), Vk->E(i)); \
294 DO_3OP(vavg_b
, 8, B
, DO_VAVG
)
295 DO_3OP(vavg_h
, 16, H
, DO_VAVG
)
296 DO_3OP(vavg_w
, 32, W
, DO_VAVG
)
297 DO_3OP(vavg_d
, 64, D
, DO_VAVG
)
298 DO_3OP(vavgr_b
, 8, B
, DO_VAVGR
)
299 DO_3OP(vavgr_h
, 16, H
, DO_VAVGR
)
300 DO_3OP(vavgr_w
, 32, W
, DO_VAVGR
)
301 DO_3OP(vavgr_d
, 64, D
, DO_VAVGR
)
302 DO_3OP(vavg_bu
, 8, UB
, DO_VAVG
)
303 DO_3OP(vavg_hu
, 16, UH
, DO_VAVG
)
304 DO_3OP(vavg_wu
, 32, UW
, DO_VAVG
)
305 DO_3OP(vavg_du
, 64, UD
, DO_VAVG
)
306 DO_3OP(vavgr_bu
, 8, UB
, DO_VAVGR
)
307 DO_3OP(vavgr_hu
, 16, UH
, DO_VAVGR
)
308 DO_3OP(vavgr_wu
, 32, UW
, DO_VAVGR
)
309 DO_3OP(vavgr_du
, 64, UD
, DO_VAVGR
)
311 #define DO_VABSD(a, b) ((a > b) ? (a -b) : (b-a))
313 DO_3OP(vabsd_b
, 8, B
, DO_VABSD
)
314 DO_3OP(vabsd_h
, 16, H
, DO_VABSD
)
315 DO_3OP(vabsd_w
, 32, W
, DO_VABSD
)
316 DO_3OP(vabsd_d
, 64, D
, DO_VABSD
)
317 DO_3OP(vabsd_bu
, 8, UB
, DO_VABSD
)
318 DO_3OP(vabsd_hu
, 16, UH
, DO_VABSD
)
319 DO_3OP(vabsd_wu
, 32, UW
, DO_VABSD
)
320 DO_3OP(vabsd_du
, 64, UD
, DO_VABSD
)
322 #define DO_VABS(a) ((a < 0) ? (-a) : (a))
324 #define DO_VADDA(NAME, BIT, E, DO_OP) \
325 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
328 VReg *Vd = (VReg *)vd; \
329 VReg *Vj = (VReg *)vj; \
330 VReg *Vk = (VReg *)vk; \
331 for (i = 0; i < LSX_LEN/BIT; i++) { \
332 Vd->E(i) = DO_OP(Vj->E(i)) + DO_OP(Vk->E(i)); \
336 DO_VADDA(vadda_b
, 8, B
, DO_VABS
)
337 DO_VADDA(vadda_h
, 16, H
, DO_VABS
)
338 DO_VADDA(vadda_w
, 32, W
, DO_VABS
)
339 DO_VADDA(vadda_d
, 64, D
, DO_VABS
)
341 #define DO_MIN(a, b) (a < b ? a : b)
342 #define DO_MAX(a, b) (a > b ? a : b)
344 #define VMINMAXI(NAME, BIT, E, DO_OP) \
345 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t v) \
348 VReg *Vd = (VReg *)vd; \
349 VReg *Vj = (VReg *)vj; \
350 typedef __typeof(Vd->E(0)) TD; \
352 for (i = 0; i < LSX_LEN/BIT; i++) { \
353 Vd->E(i) = DO_OP(Vj->E(i), (TD)imm); \
357 VMINMAXI(vmini_b
, 8, B
, DO_MIN
)
358 VMINMAXI(vmini_h
, 16, H
, DO_MIN
)
359 VMINMAXI(vmini_w
, 32, W
, DO_MIN
)
360 VMINMAXI(vmini_d
, 64, D
, DO_MIN
)
361 VMINMAXI(vmaxi_b
, 8, B
, DO_MAX
)
362 VMINMAXI(vmaxi_h
, 16, H
, DO_MAX
)
363 VMINMAXI(vmaxi_w
, 32, W
, DO_MAX
)
364 VMINMAXI(vmaxi_d
, 64, D
, DO_MAX
)
365 VMINMAXI(vmini_bu
, 8, UB
, DO_MIN
)
366 VMINMAXI(vmini_hu
, 16, UH
, DO_MIN
)
367 VMINMAXI(vmini_wu
, 32, UW
, DO_MIN
)
368 VMINMAXI(vmini_du
, 64, UD
, DO_MIN
)
369 VMINMAXI(vmaxi_bu
, 8, UB
, DO_MAX
)
370 VMINMAXI(vmaxi_hu
, 16, UH
, DO_MAX
)
371 VMINMAXI(vmaxi_wu
, 32, UW
, DO_MAX
)
372 VMINMAXI(vmaxi_du
, 64, UD
, DO_MAX
)
374 #define DO_VMUH(NAME, BIT, E1, E2, DO_OP) \
375 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
378 VReg *Vd = (VReg *)vd; \
379 VReg *Vj = (VReg *)vj; \
380 VReg *Vk = (VReg *)vk; \
381 typedef __typeof(Vd->E1(0)) T; \
383 for (i = 0; i < LSX_LEN/BIT; i++) { \
384 Vd->E2(i) = ((T)Vj->E2(i)) * ((T)Vk->E2(i)) >> BIT; \
388 void HELPER(vmuh_d
)(void *vd
, void *vj
, void *vk
, uint32_t v
)
391 VReg
*Vd
= (VReg
*)vd
;
392 VReg
*Vj
= (VReg
*)vj
;
393 VReg
*Vk
= (VReg
*)vk
;
395 muls64(&l
, &h1
, Vj
->D(0), Vk
->D(0));
396 muls64(&l
, &h2
, Vj
->D(1), Vk
->D(1));
402 DO_VMUH(vmuh_b
, 8, H
, B
, DO_MUH
)
403 DO_VMUH(vmuh_h
, 16, W
, H
, DO_MUH
)
404 DO_VMUH(vmuh_w
, 32, D
, W
, DO_MUH
)
406 void HELPER(vmuh_du
)(void *vd
, void *vj
, void *vk
, uint32_t v
)
409 VReg
*Vd
= (VReg
*)vd
;
410 VReg
*Vj
= (VReg
*)vj
;
411 VReg
*Vk
= (VReg
*)vk
;
413 mulu64(&l
, &h1
, Vj
->D(0), Vk
->D(0));
414 mulu64(&l
, &h2
, Vj
->D(1), Vk
->D(1));
420 DO_VMUH(vmuh_bu
, 8, UH
, UB
, DO_MUH
)
421 DO_VMUH(vmuh_hu
, 16, UW
, UH
, DO_MUH
)
422 DO_VMUH(vmuh_wu
, 32, UD
, UW
, DO_MUH
)
424 #define DO_MUL(a, b) (a * b)
426 DO_EVEN(vmulwev_h_b
, 16, H
, B
, DO_MUL
)
427 DO_EVEN(vmulwev_w_h
, 32, W
, H
, DO_MUL
)
428 DO_EVEN(vmulwev_d_w
, 64, D
, W
, DO_MUL
)
430 DO_ODD(vmulwod_h_b
, 16, H
, B
, DO_MUL
)
431 DO_ODD(vmulwod_w_h
, 32, W
, H
, DO_MUL
)
432 DO_ODD(vmulwod_d_w
, 64, D
, W
, DO_MUL
)
434 DO_EVEN(vmulwev_h_bu
, 16, UH
, UB
, DO_MUL
)
435 DO_EVEN(vmulwev_w_hu
, 32, UW
, UH
, DO_MUL
)
436 DO_EVEN(vmulwev_d_wu
, 64, UD
, UW
, DO_MUL
)
438 DO_ODD(vmulwod_h_bu
, 16, UH
, UB
, DO_MUL
)
439 DO_ODD(vmulwod_w_hu
, 32, UW
, UH
, DO_MUL
)
440 DO_ODD(vmulwod_d_wu
, 64, UD
, UW
, DO_MUL
)
442 DO_EVEN_U_S(vmulwev_h_bu_b
, 16, H
, UH
, B
, UB
, DO_MUL
)
443 DO_EVEN_U_S(vmulwev_w_hu_h
, 32, W
, UW
, H
, UH
, DO_MUL
)
444 DO_EVEN_U_S(vmulwev_d_wu_w
, 64, D
, UD
, W
, UW
, DO_MUL
)
446 DO_ODD_U_S(vmulwod_h_bu_b
, 16, H
, UH
, B
, UB
, DO_MUL
)
447 DO_ODD_U_S(vmulwod_w_hu_h
, 32, W
, UW
, H
, UH
, DO_MUL
)
448 DO_ODD_U_S(vmulwod_d_wu_w
, 64, D
, UD
, W
, UW
, DO_MUL
)
450 #define DO_MADD(a, b, c) (a + b * c)
451 #define DO_MSUB(a, b, c) (a - b * c)
453 #define VMADDSUB(NAME, BIT, E, DO_OP) \
454 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
457 VReg *Vd = (VReg *)vd; \
458 VReg *Vj = (VReg *)vj; \
459 VReg *Vk = (VReg *)vk; \
460 for (i = 0; i < LSX_LEN/BIT; i++) { \
461 Vd->E(i) = DO_OP(Vd->E(i), Vj->E(i) ,Vk->E(i)); \
465 VMADDSUB(vmadd_b
, 8, B
, DO_MADD
)
466 VMADDSUB(vmadd_h
, 16, H
, DO_MADD
)
467 VMADDSUB(vmadd_w
, 32, W
, DO_MADD
)
468 VMADDSUB(vmadd_d
, 64, D
, DO_MADD
)
469 VMADDSUB(vmsub_b
, 8, B
, DO_MSUB
)
470 VMADDSUB(vmsub_h
, 16, H
, DO_MSUB
)
471 VMADDSUB(vmsub_w
, 32, W
, DO_MSUB
)
472 VMADDSUB(vmsub_d
, 64, D
, DO_MSUB
)
474 #define VMADDWEV(NAME, BIT, E1, E2, DO_OP) \
475 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
478 VReg *Vd = (VReg *)vd; \
479 VReg *Vj = (VReg *)vj; \
480 VReg *Vk = (VReg *)vk; \
481 typedef __typeof(Vd->E1(0)) TD; \
483 for (i = 0; i < LSX_LEN/BIT; i++) { \
484 Vd->E1(i) += DO_OP((TD)Vj->E2(2 * i), (TD)Vk->E2(2 * i)); \
488 VMADDWEV(vmaddwev_h_b
, 16, H
, B
, DO_MUL
)
489 VMADDWEV(vmaddwev_w_h
, 32, W
, H
, DO_MUL
)
490 VMADDWEV(vmaddwev_d_w
, 64, D
, W
, DO_MUL
)
491 VMADDWEV(vmaddwev_h_bu
, 16, UH
, UB
, DO_MUL
)
492 VMADDWEV(vmaddwev_w_hu
, 32, UW
, UH
, DO_MUL
)
493 VMADDWEV(vmaddwev_d_wu
, 64, UD
, UW
, DO_MUL
)
495 #define VMADDWOD(NAME, BIT, E1, E2, DO_OP) \
496 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
499 VReg *Vd = (VReg *)vd; \
500 VReg *Vj = (VReg *)vj; \
501 VReg *Vk = (VReg *)vk; \
502 typedef __typeof(Vd->E1(0)) TD; \
504 for (i = 0; i < LSX_LEN/BIT; i++) { \
505 Vd->E1(i) += DO_OP((TD)Vj->E2(2 * i + 1), \
506 (TD)Vk->E2(2 * i + 1)); \
510 VMADDWOD(vmaddwod_h_b
, 16, H
, B
, DO_MUL
)
511 VMADDWOD(vmaddwod_w_h
, 32, W
, H
, DO_MUL
)
512 VMADDWOD(vmaddwod_d_w
, 64, D
, W
, DO_MUL
)
513 VMADDWOD(vmaddwod_h_bu
, 16, UH
, UB
, DO_MUL
)
514 VMADDWOD(vmaddwod_w_hu
, 32, UW
, UH
, DO_MUL
)
515 VMADDWOD(vmaddwod_d_wu
, 64, UD
, UW
, DO_MUL
)
517 #define VMADDWEV_U_S(NAME, BIT, ES1, EU1, ES2, EU2, DO_OP) \
518 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
521 VReg *Vd = (VReg *)vd; \
522 VReg *Vj = (VReg *)vj; \
523 VReg *Vk = (VReg *)vk; \
524 typedef __typeof(Vd->ES1(0)) TS1; \
525 typedef __typeof(Vd->EU1(0)) TU1; \
527 for (i = 0; i < LSX_LEN/BIT; i++) { \
528 Vd->ES1(i) += DO_OP((TU1)Vj->EU2(2 * i), \
529 (TS1)Vk->ES2(2 * i)); \
533 VMADDWEV_U_S(vmaddwev_h_bu_b
, 16, H
, UH
, B
, UB
, DO_MUL
)
534 VMADDWEV_U_S(vmaddwev_w_hu_h
, 32, W
, UW
, H
, UH
, DO_MUL
)
535 VMADDWEV_U_S(vmaddwev_d_wu_w
, 64, D
, UD
, W
, UW
, DO_MUL
)
537 #define VMADDWOD_U_S(NAME, BIT, ES1, EU1, ES2, EU2, DO_OP) \
538 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
541 VReg *Vd = (VReg *)vd; \
542 VReg *Vj = (VReg *)vj; \
543 VReg *Vk = (VReg *)vk; \
544 typedef __typeof(Vd->ES1(0)) TS1; \
545 typedef __typeof(Vd->EU1(0)) TU1; \
547 for (i = 0; i < LSX_LEN/BIT; i++) { \
548 Vd->ES1(i) += DO_OP((TU1)Vj->EU2(2 * i + 1), \
549 (TS1)Vk->ES2(2 * i + 1)); \
553 VMADDWOD_U_S(vmaddwod_h_bu_b
, 16, H
, UH
, B
, UB
, DO_MUL
)
554 VMADDWOD_U_S(vmaddwod_w_hu_h
, 32, W
, UW
, H
, UH
, DO_MUL
)
555 VMADDWOD_U_S(vmaddwod_d_wu_w
, 64, D
, UD
, W
, UW
, DO_MUL
)
557 #define DO_DIVU(N, M) (unlikely(M == 0) ? 0 : N / M)
558 #define DO_REMU(N, M) (unlikely(M == 0) ? 0 : N % M)
559 #define DO_DIV(N, M) (unlikely(M == 0) ? 0 :\
560 unlikely((N == -N) && (M == (__typeof(N))(-1))) ? N : N / M)
561 #define DO_REM(N, M) (unlikely(M == 0) ? 0 :\
562 unlikely((N == -N) && (M == (__typeof(N))(-1))) ? 0 : N % M)
564 #define VDIV(NAME, BIT, E, DO_OP) \
565 void HELPER(NAME)(CPULoongArchState *env, \
566 uint32_t vd, uint32_t vj, uint32_t vk) \
569 VReg *Vd = &(env->fpr[vd].vreg); \
570 VReg *Vj = &(env->fpr[vj].vreg); \
571 VReg *Vk = &(env->fpr[vk].vreg); \
572 for (i = 0; i < LSX_LEN/BIT; i++) { \
573 Vd->E(i) = DO_OP(Vj->E(i), Vk->E(i)); \
577 VDIV(vdiv_b
, 8, B
, DO_DIV
)
578 VDIV(vdiv_h
, 16, H
, DO_DIV
)
579 VDIV(vdiv_w
, 32, W
, DO_DIV
)
580 VDIV(vdiv_d
, 64, D
, DO_DIV
)
581 VDIV(vdiv_bu
, 8, UB
, DO_DIVU
)
582 VDIV(vdiv_hu
, 16, UH
, DO_DIVU
)
583 VDIV(vdiv_wu
, 32, UW
, DO_DIVU
)
584 VDIV(vdiv_du
, 64, UD
, DO_DIVU
)
585 VDIV(vmod_b
, 8, B
, DO_REM
)
586 VDIV(vmod_h
, 16, H
, DO_REM
)
587 VDIV(vmod_w
, 32, W
, DO_REM
)
588 VDIV(vmod_d
, 64, D
, DO_REM
)
589 VDIV(vmod_bu
, 8, UB
, DO_REMU
)
590 VDIV(vmod_hu
, 16, UH
, DO_REMU
)
591 VDIV(vmod_wu
, 32, UW
, DO_REMU
)
592 VDIV(vmod_du
, 64, UD
, DO_REMU
)
594 #define VSAT_S(NAME, BIT, E) \
595 void HELPER(NAME)(void *vd, void *vj, uint64_t max, uint32_t v) \
598 VReg *Vd = (VReg *)vd; \
599 VReg *Vj = (VReg *)vj; \
600 typedef __typeof(Vd->E(0)) TD; \
602 for (i = 0; i < LSX_LEN/BIT; i++) { \
603 Vd->E(i) = Vj->E(i) > (TD)max ? (TD)max : \
604 Vj->E(i) < (TD)~max ? (TD)~max: Vj->E(i); \
609 VSAT_S(vsat_h
, 16, H
)
610 VSAT_S(vsat_w
, 32, W
)
611 VSAT_S(vsat_d
, 64, D
)
613 #define VSAT_U(NAME, BIT, E) \
614 void HELPER(NAME)(void *vd, void *vj, uint64_t max, uint32_t v) \
617 VReg *Vd = (VReg *)vd; \
618 VReg *Vj = (VReg *)vj; \
619 typedef __typeof(Vd->E(0)) TD; \
621 for (i = 0; i < LSX_LEN/BIT; i++) { \
622 Vd->E(i) = Vj->E(i) > (TD)max ? (TD)max : Vj->E(i); \
626 VSAT_U(vsat_bu
, 8, UB
)
627 VSAT_U(vsat_hu
, 16, UH
)
628 VSAT_U(vsat_wu
, 32, UW
)
629 VSAT_U(vsat_du
, 64, UD
)
631 #define VEXTH(NAME, BIT, E1, E2) \
632 void HELPER(NAME)(CPULoongArchState *env, uint32_t vd, uint32_t vj) \
635 VReg *Vd = &(env->fpr[vd].vreg); \
636 VReg *Vj = &(env->fpr[vj].vreg); \
638 for (i = 0; i < LSX_LEN/BIT; i++) { \
639 Vd->E1(i) = Vj->E2(i + LSX_LEN/BIT); \
643 void HELPER(vexth_q_d
)(CPULoongArchState
*env
, uint32_t vd
, uint32_t vj
)
645 VReg
*Vd
= &(env
->fpr
[vd
].vreg
);
646 VReg
*Vj
= &(env
->fpr
[vj
].vreg
);
648 Vd
->Q(0) = int128_makes64(Vj
->D(1));
651 void HELPER(vexth_qu_du
)(CPULoongArchState
*env
, uint32_t vd
, uint32_t vj
)
653 VReg
*Vd
= &(env
->fpr
[vd
].vreg
);
654 VReg
*Vj
= &(env
->fpr
[vj
].vreg
);
656 Vd
->Q(0) = int128_make64((uint64_t)Vj
->D(1));
659 VEXTH(vexth_h_b
, 16, H
, B
)
660 VEXTH(vexth_w_h
, 32, W
, H
)
661 VEXTH(vexth_d_w
, 64, D
, W
)
662 VEXTH(vexth_hu_bu
, 16, UH
, UB
)
663 VEXTH(vexth_wu_hu
, 32, UW
, UH
)
664 VEXTH(vexth_du_wu
, 64, UD
, UW
)
666 #define DO_SIGNCOV(a, b) (a == 0 ? 0 : a < 0 ? -b : b)
668 DO_3OP(vsigncov_b
, 8, B
, DO_SIGNCOV
)
669 DO_3OP(vsigncov_h
, 16, H
, DO_SIGNCOV
)
670 DO_3OP(vsigncov_w
, 32, W
, DO_SIGNCOV
)
671 DO_3OP(vsigncov_d
, 64, D
, DO_SIGNCOV
)
673 static uint64_t do_vmskltz_b(int64_t val
)
675 uint64_t m
= 0x8080808080808080ULL
;
676 uint64_t c
= val
& m
;
683 void HELPER(vmskltz_b
)(CPULoongArchState
*env
, uint32_t vd
, uint32_t vj
)
686 VReg
*Vd
= &(env
->fpr
[vd
].vreg
);
687 VReg
*Vj
= &(env
->fpr
[vj
].vreg
);
689 temp
= do_vmskltz_b(Vj
->D(0));
690 temp
|= (do_vmskltz_b(Vj
->D(1)) << 8);
695 static uint64_t do_vmskltz_h(int64_t val
)
697 uint64_t m
= 0x8000800080008000ULL
;
698 uint64_t c
= val
& m
;
704 void HELPER(vmskltz_h
)(CPULoongArchState
*env
, uint32_t vd
, uint32_t vj
)
707 VReg
*Vd
= &(env
->fpr
[vd
].vreg
);
708 VReg
*Vj
= &(env
->fpr
[vj
].vreg
);
710 temp
= do_vmskltz_h(Vj
->D(0));
711 temp
|= (do_vmskltz_h(Vj
->D(1)) << 4);
716 static uint64_t do_vmskltz_w(int64_t val
)
718 uint64_t m
= 0x8000000080000000ULL
;
719 uint64_t c
= val
& m
;
724 void HELPER(vmskltz_w
)(CPULoongArchState
*env
, uint32_t vd
, uint32_t vj
)
727 VReg
*Vd
= &(env
->fpr
[vd
].vreg
);
728 VReg
*Vj
= &(env
->fpr
[vj
].vreg
);
730 temp
= do_vmskltz_w(Vj
->D(0));
731 temp
|= (do_vmskltz_w(Vj
->D(1)) << 2);
736 static uint64_t do_vmskltz_d(int64_t val
)
738 return (uint64_t)val
>> 63;
740 void HELPER(vmskltz_d
)(CPULoongArchState
*env
, uint32_t vd
, uint32_t vj
)
743 VReg
*Vd
= &(env
->fpr
[vd
].vreg
);
744 VReg
*Vj
= &(env
->fpr
[vj
].vreg
);
746 temp
= do_vmskltz_d(Vj
->D(0));
747 temp
|= (do_vmskltz_d(Vj
->D(1)) << 1);
752 void HELPER(vmskgez_b
)(CPULoongArchState
*env
, uint32_t vd
, uint32_t vj
)
755 VReg
*Vd
= &(env
->fpr
[vd
].vreg
);
756 VReg
*Vj
= &(env
->fpr
[vj
].vreg
);
758 temp
= do_vmskltz_b(Vj
->D(0));
759 temp
|= (do_vmskltz_b(Vj
->D(1)) << 8);
760 Vd
->D(0) = (uint16_t)(~temp
);
764 static uint64_t do_vmskez_b(uint64_t a
)
766 uint64_t m
= 0x7f7f7f7f7f7f7f7fULL
;
767 uint64_t c
= ~(((a
& m
) + m
) | a
| m
);
774 void HELPER(vmsknz_b
)(CPULoongArchState
*env
, uint32_t vd
, uint32_t vj
)
777 VReg
*Vd
= &(env
->fpr
[vd
].vreg
);
778 VReg
*Vj
= &(env
->fpr
[vj
].vreg
);
780 temp
= do_vmskez_b(Vj
->D(0));
781 temp
|= (do_vmskez_b(Vj
->D(1)) << 8);
782 Vd
->D(0) = (uint16_t)(~temp
);
786 void HELPER(vnori_b
)(void *vd
, void *vj
, uint64_t imm
, uint32_t v
)
789 VReg
*Vd
= (VReg
*)vd
;
790 VReg
*Vj
= (VReg
*)vj
;
792 for (i
= 0; i
< LSX_LEN
/8; i
++) {
793 Vd
->B(i
) = ~(Vj
->B(i
) | (uint8_t)imm
);
797 #define VSLLWIL(NAME, BIT, E1, E2) \
798 void HELPER(NAME)(CPULoongArchState *env, \
799 uint32_t vd, uint32_t vj, uint32_t imm) \
803 VReg *Vd = &(env->fpr[vd].vreg); \
804 VReg *Vj = &(env->fpr[vj].vreg); \
805 typedef __typeof(temp.E1(0)) TD; \
809 for (i = 0; i < LSX_LEN/BIT; i++) { \
810 temp.E1(i) = (TD)Vj->E2(i) << (imm % BIT); \
815 void HELPER(vextl_q_d
)(CPULoongArchState
*env
, uint32_t vd
, uint32_t vj
)
817 VReg
*Vd
= &(env
->fpr
[vd
].vreg
);
818 VReg
*Vj
= &(env
->fpr
[vj
].vreg
);
820 Vd
->Q(0) = int128_makes64(Vj
->D(0));
823 void HELPER(vextl_qu_du
)(CPULoongArchState
*env
, uint32_t vd
, uint32_t vj
)
825 VReg
*Vd
= &(env
->fpr
[vd
].vreg
);
826 VReg
*Vj
= &(env
->fpr
[vj
].vreg
);
828 Vd
->Q(0) = int128_make64(Vj
->D(0));
831 VSLLWIL(vsllwil_h_b
, 16, H
, B
)
832 VSLLWIL(vsllwil_w_h
, 32, W
, H
)
833 VSLLWIL(vsllwil_d_w
, 64, D
, W
)
834 VSLLWIL(vsllwil_hu_bu
, 16, UH
, UB
)
835 VSLLWIL(vsllwil_wu_hu
, 32, UW
, UH
)
836 VSLLWIL(vsllwil_du_wu
, 64, UD
, UW
)
838 #define do_vsrlr(E, T) \
839 static T do_vsrlr_ ##E(T s1, int sh) \
844 return (s1 >> sh) + ((s1 >> (sh - 1)) & 0x1); \
849 do_vsrlr(H
, uint16_t)
850 do_vsrlr(W
, uint32_t)
851 do_vsrlr(D
, uint64_t)
853 #define VSRLR(NAME, BIT, T, E) \
854 void HELPER(NAME)(CPULoongArchState *env, \
855 uint32_t vd, uint32_t vj, uint32_t vk) \
858 VReg *Vd = &(env->fpr[vd].vreg); \
859 VReg *Vj = &(env->fpr[vj].vreg); \
860 VReg *Vk = &(env->fpr[vk].vreg); \
862 for (i = 0; i < LSX_LEN/BIT; i++) { \
863 Vd->E(i) = do_vsrlr_ ## E(Vj->E(i), ((T)Vk->E(i))%BIT); \
867 VSRLR(vsrlr_b
, 8, uint8_t, B
)
868 VSRLR(vsrlr_h
, 16, uint16_t, H
)
869 VSRLR(vsrlr_w
, 32, uint32_t, W
)
870 VSRLR(vsrlr_d
, 64, uint64_t, D
)
872 #define VSRLRI(NAME, BIT, E) \
873 void HELPER(NAME)(CPULoongArchState *env, \
874 uint32_t vd, uint32_t vj, uint32_t imm) \
877 VReg *Vd = &(env->fpr[vd].vreg); \
878 VReg *Vj = &(env->fpr[vj].vreg); \
880 for (i = 0; i < LSX_LEN/BIT; i++) { \
881 Vd->E(i) = do_vsrlr_ ## E(Vj->E(i), imm); \
885 VSRLRI(vsrlri_b
, 8, B
)
886 VSRLRI(vsrlri_h
, 16, H
)
887 VSRLRI(vsrlri_w
, 32, W
)
888 VSRLRI(vsrlri_d
, 64, D
)
890 #define do_vsrar(E, T) \
891 static T do_vsrar_ ##E(T s1, int sh) \
896 return (s1 >> sh) + ((s1 >> (sh - 1)) & 0x1); \
905 #define VSRAR(NAME, BIT, T, E) \
906 void HELPER(NAME)(CPULoongArchState *env, \
907 uint32_t vd, uint32_t vj, uint32_t vk) \
910 VReg *Vd = &(env->fpr[vd].vreg); \
911 VReg *Vj = &(env->fpr[vj].vreg); \
912 VReg *Vk = &(env->fpr[vk].vreg); \
914 for (i = 0; i < LSX_LEN/BIT; i++) { \
915 Vd->E(i) = do_vsrar_ ## E(Vj->E(i), ((T)Vk->E(i))%BIT); \
919 VSRAR(vsrar_b
, 8, uint8_t, B
)
920 VSRAR(vsrar_h
, 16, uint16_t, H
)
921 VSRAR(vsrar_w
, 32, uint32_t, W
)
922 VSRAR(vsrar_d
, 64, uint64_t, D
)
924 #define VSRARI(NAME, BIT, E) \
925 void HELPER(NAME)(CPULoongArchState *env, \
926 uint32_t vd, uint32_t vj, uint32_t imm) \
929 VReg *Vd = &(env->fpr[vd].vreg); \
930 VReg *Vj = &(env->fpr[vj].vreg); \
932 for (i = 0; i < LSX_LEN/BIT; i++) { \
933 Vd->E(i) = do_vsrar_ ## E(Vj->E(i), imm); \
937 VSRARI(vsrari_b
, 8, B
)
938 VSRARI(vsrari_h
, 16, H
)
939 VSRARI(vsrari_w
, 32, W
)
940 VSRARI(vsrari_d
, 64, D
)
942 #define R_SHIFT(a, b) (a >> b)
944 #define VSRLN(NAME, BIT, T, E1, E2) \
945 void HELPER(NAME)(CPULoongArchState *env, \
946 uint32_t vd, uint32_t vj, uint32_t vk) \
949 VReg *Vd = &(env->fpr[vd].vreg); \
950 VReg *Vj = &(env->fpr[vj].vreg); \
951 VReg *Vk = &(env->fpr[vk].vreg); \
953 for (i = 0; i < LSX_LEN/BIT; i++) { \
954 Vd->E1(i) = R_SHIFT((T)Vj->E2(i),((T)Vk->E2(i)) % BIT); \
959 VSRLN(vsrln_b_h
, 16, uint16_t, B
, H
)
960 VSRLN(vsrln_h_w
, 32, uint32_t, H
, W
)
961 VSRLN(vsrln_w_d
, 64, uint64_t, W
, D
)
963 #define VSRAN(NAME, BIT, T, E1, E2) \
964 void HELPER(NAME)(CPULoongArchState *env, \
965 uint32_t vd, uint32_t vj, uint32_t vk) \
968 VReg *Vd = &(env->fpr[vd].vreg); \
969 VReg *Vj = &(env->fpr[vj].vreg); \
970 VReg *Vk = &(env->fpr[vk].vreg); \
972 for (i = 0; i < LSX_LEN/BIT; i++) { \
973 Vd->E1(i) = R_SHIFT(Vj->E2(i), ((T)Vk->E2(i)) % BIT); \
978 VSRAN(vsran_b_h
, 16, uint16_t, B
, H
)
979 VSRAN(vsran_h_w
, 32, uint32_t, H
, W
)
980 VSRAN(vsran_w_d
, 64, uint64_t, W
, D
)
982 #define VSRLNI(NAME, BIT, T, E1, E2) \
983 void HELPER(NAME)(CPULoongArchState *env, \
984 uint32_t vd, uint32_t vj, uint32_t imm) \
988 VReg *Vd = &(env->fpr[vd].vreg); \
989 VReg *Vj = &(env->fpr[vj].vreg); \
994 for (i = 0; i < max; i++) { \
995 temp.E1(i) = R_SHIFT((T)Vj->E2(i), imm); \
996 temp.E1(i + max) = R_SHIFT((T)Vd->E2(i), imm); \
1001 void HELPER(vsrlni_d_q
)(CPULoongArchState
*env
,
1002 uint32_t vd
, uint32_t vj
, uint32_t imm
)
1005 VReg
*Vd
= &(env
->fpr
[vd
].vreg
);
1006 VReg
*Vj
= &(env
->fpr
[vj
].vreg
);
1010 temp
.D(0) = int128_getlo(int128_urshift(Vj
->Q(0), imm
% 128));
1011 temp
.D(1) = int128_getlo(int128_urshift(Vd
->Q(0), imm
% 128));
1015 VSRLNI(vsrlni_b_h
, 16, uint16_t, B
, H
)
1016 VSRLNI(vsrlni_h_w
, 32, uint32_t, H
, W
)
1017 VSRLNI(vsrlni_w_d
, 64, uint64_t, W
, D
)
1019 #define VSRANI(NAME, BIT, E1, E2) \
1020 void HELPER(NAME)(CPULoongArchState *env, \
1021 uint32_t vd, uint32_t vj, uint32_t imm) \
1025 VReg *Vd = &(env->fpr[vd].vreg); \
1026 VReg *Vj = &(env->fpr[vj].vreg); \
1030 max = LSX_LEN/BIT; \
1031 for (i = 0; i < max; i++) { \
1032 temp.E1(i) = R_SHIFT(Vj->E2(i), imm); \
1033 temp.E1(i + max) = R_SHIFT(Vd->E2(i), imm); \
1038 void HELPER(vsrani_d_q
)(CPULoongArchState
*env
,
1039 uint32_t vd
, uint32_t vj
, uint32_t imm
)
1042 VReg
*Vd
= &(env
->fpr
[vd
].vreg
);
1043 VReg
*Vj
= &(env
->fpr
[vj
].vreg
);
1047 temp
.D(0) = int128_getlo(int128_rshift(Vj
->Q(0), imm
% 128));
1048 temp
.D(1) = int128_getlo(int128_rshift(Vd
->Q(0), imm
% 128));
1052 VSRANI(vsrani_b_h
, 16, B
, H
)
1053 VSRANI(vsrani_h_w
, 32, H
, W
)
1054 VSRANI(vsrani_w_d
, 64, W
, D
)