1 /* SPDX-License-Identifier: GPL-2.0-or-later */
3 * QEMU LoongArch vector helper functions.
5 * Copyright (c) 2022-2023 Loongson Technology Corporation Limited
8 #include "qemu/osdep.h"
10 #include "exec/exec-all.h"
11 #include "exec/helper-proto.h"
12 #include "fpu/softfloat.h"
13 #include "internals.h"
16 #include "tcg/tcg-gvec-desc.h"
18 #define DO_ADD(a, b) (a + b)
19 #define DO_SUB(a, b) (a - b)
21 #define DO_ODD_EVEN(NAME, BIT, E1, E2, DO_OP) \
22 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
25 VReg *Vd = (VReg *)vd; \
26 VReg *Vj = (VReg *)vj; \
27 VReg *Vk = (VReg *)vk; \
28 typedef __typeof(Vd->E1(0)) TD; \
29 int oprsz = simd_oprsz(desc); \
31 for (i = 0; i < oprsz / (BIT / 8); i++) { \
32 Vd->E1(i) = DO_OP((TD)Vj->E2(2 * i + 1), (TD)Vk->E2(2 * i)); \
36 DO_ODD_EVEN(vhaddw_h_b
, 16, H
, B
, DO_ADD
)
37 DO_ODD_EVEN(vhaddw_w_h
, 32, W
, H
, DO_ADD
)
38 DO_ODD_EVEN(vhaddw_d_w
, 64, D
, W
, DO_ADD
)
40 void HELPER(vhaddw_q_d
)(void *vd
, void *vj
, void *vk
, uint32_t desc
)
43 VReg
*Vd
= (VReg
*)vd
;
44 VReg
*Vj
= (VReg
*)vj
;
45 VReg
*Vk
= (VReg
*)vk
;
46 int oprsz
= simd_oprsz(desc
);
48 for (i
= 0; i
< oprsz
/ 16 ; i
++) {
49 Vd
->Q(i
) = int128_add(int128_makes64(Vj
->D(2 * i
+ 1)),
50 int128_makes64(Vk
->D(2 * i
)));
54 DO_ODD_EVEN(vhsubw_h_b
, 16, H
, B
, DO_SUB
)
55 DO_ODD_EVEN(vhsubw_w_h
, 32, W
, H
, DO_SUB
)
56 DO_ODD_EVEN(vhsubw_d_w
, 64, D
, W
, DO_SUB
)
58 void HELPER(vhsubw_q_d
)(void *vd
, void *vj
, void *vk
, uint32_t desc
)
61 VReg
*Vd
= (VReg
*)vd
;
62 VReg
*Vj
= (VReg
*)vj
;
63 VReg
*Vk
= (VReg
*)vk
;
64 int oprsz
= simd_oprsz(desc
);
66 for (i
= 0; i
< oprsz
/ 16; i
++) {
67 Vd
->Q(i
) = int128_sub(int128_makes64(Vj
->D(2 * i
+ 1)),
68 int128_makes64(Vk
->D(2 * i
)));
72 DO_ODD_EVEN(vhaddw_hu_bu
, 16, UH
, UB
, DO_ADD
)
73 DO_ODD_EVEN(vhaddw_wu_hu
, 32, UW
, UH
, DO_ADD
)
74 DO_ODD_EVEN(vhaddw_du_wu
, 64, UD
, UW
, DO_ADD
)
76 void HELPER(vhaddw_qu_du
)(void *vd
, void *vj
, void *vk
, uint32_t desc
)
79 VReg
*Vd
= (VReg
*)vd
;
80 VReg
*Vj
= (VReg
*)vj
;
81 VReg
*Vk
= (VReg
*)vk
;
82 int oprsz
= simd_oprsz(desc
);
84 for (i
= 0; i
< oprsz
/ 16; i
++) {
85 Vd
->Q(i
) = int128_add(int128_make64(Vj
->UD(2 * i
+ 1)),
86 int128_make64(Vk
->UD(2 * i
)));
90 DO_ODD_EVEN(vhsubw_hu_bu
, 16, UH
, UB
, DO_SUB
)
91 DO_ODD_EVEN(vhsubw_wu_hu
, 32, UW
, UH
, DO_SUB
)
92 DO_ODD_EVEN(vhsubw_du_wu
, 64, UD
, UW
, DO_SUB
)
94 void HELPER(vhsubw_qu_du
)(void *vd
, void *vj
, void *vk
, uint32_t desc
)
97 VReg
*Vd
= (VReg
*)vd
;
98 VReg
*Vj
= (VReg
*)vj
;
99 VReg
*Vk
= (VReg
*)vk
;
100 int oprsz
= simd_oprsz(desc
);
102 for (i
= 0; i
< oprsz
/ 16; i
++) {
103 Vd
->Q(i
) = int128_sub(int128_make64(Vj
->UD(2 * i
+ 1)),
104 int128_make64(Vk
->UD(2 * i
)));
108 #define DO_EVEN(NAME, BIT, E1, E2, DO_OP) \
109 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
112 VReg *Vd = (VReg *)vd; \
113 VReg *Vj = (VReg *)vj; \
114 VReg *Vk = (VReg *)vk; \
115 typedef __typeof(Vd->E1(0)) TD; \
116 int oprsz = simd_oprsz(desc); \
118 for (i = 0; i < oprsz / (BIT / 8); i++) { \
119 Vd->E1(i) = DO_OP((TD)Vj->E2(2 * i) ,(TD)Vk->E2(2 * i)); \
123 #define DO_ODD(NAME, BIT, E1, E2, DO_OP) \
124 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
127 VReg *Vd = (VReg *)vd; \
128 VReg *Vj = (VReg *)vj; \
129 VReg *Vk = (VReg *)vk; \
130 typedef __typeof(Vd->E1(0)) TD; \
131 int oprsz = simd_oprsz(desc); \
133 for (i = 0; i < oprsz / (BIT / 8); i++) { \
134 Vd->E1(i) = DO_OP((TD)Vj->E2(2 * i + 1), (TD)Vk->E2(2 * i + 1)); \
138 void HELPER(vaddwev_q_d
)(void *vd
, void *vj
, void *vk
, uint32_t desc
)
141 VReg
*Vd
= (VReg
*)vd
;
142 VReg
*Vj
= (VReg
*)vj
;
143 VReg
*Vk
= (VReg
*)vk
;
144 int oprsz
= simd_oprsz(desc
);
146 for (i
= 0; i
< oprsz
/ 16; i
++) {
147 Vd
->Q(i
) = int128_add(int128_makes64(Vj
->D(2 * i
)),
148 int128_makes64(Vk
->D(2 * i
)));
152 DO_EVEN(vaddwev_h_b
, 16, H
, B
, DO_ADD
)
153 DO_EVEN(vaddwev_w_h
, 32, W
, H
, DO_ADD
)
154 DO_EVEN(vaddwev_d_w
, 64, D
, W
, DO_ADD
)
156 void HELPER(vaddwod_q_d
)(void *vd
, void *vj
, void *vk
, uint32_t desc
)
159 VReg
*Vd
= (VReg
*)vd
;
160 VReg
*Vj
= (VReg
*)vj
;
161 VReg
*Vk
= (VReg
*)vk
;
162 int oprsz
= simd_oprsz(desc
);
164 for (i
= 0; i
< oprsz
/ 16; i
++) {
165 Vd
->Q(i
) = int128_add(int128_makes64(Vj
->D(2 * i
+1)),
166 int128_makes64(Vk
->D(2 * i
+1)));
170 DO_ODD(vaddwod_h_b
, 16, H
, B
, DO_ADD
)
171 DO_ODD(vaddwod_w_h
, 32, W
, H
, DO_ADD
)
172 DO_ODD(vaddwod_d_w
, 64, D
, W
, DO_ADD
)
174 void HELPER(vsubwev_q_d
)(void *vd
, void *vj
, void *vk
, uint32_t desc
)
177 VReg
*Vd
= (VReg
*)vd
;
178 VReg
*Vj
= (VReg
*)vj
;
179 VReg
*Vk
= (VReg
*)vk
;
180 int oprsz
= simd_oprsz(desc
);
182 for (i
= 0; i
< oprsz
/ 16; i
++) {
183 Vd
->Q(i
) = int128_sub(int128_makes64(Vj
->D(2 * i
)),
184 int128_makes64(Vk
->D(2 * i
)));
188 DO_EVEN(vsubwev_h_b
, 16, H
, B
, DO_SUB
)
189 DO_EVEN(vsubwev_w_h
, 32, W
, H
, DO_SUB
)
190 DO_EVEN(vsubwev_d_w
, 64, D
, W
, DO_SUB
)
192 void HELPER(vsubwod_q_d
)(void *vd
, void *vj
, void *vk
, uint32_t desc
)
195 VReg
*Vd
= (VReg
*)vd
;
196 VReg
*Vj
= (VReg
*)vj
;
197 VReg
*Vk
= (VReg
*)vk
;
198 int oprsz
= simd_oprsz(desc
);
200 for (i
= 0; i
< oprsz
/ 16; i
++) {
201 Vd
->Q(i
) = int128_sub(int128_makes64(Vj
->D(2 * i
+ 1)),
202 int128_makes64(Vk
->D(2 * i
+ 1)));
206 DO_ODD(vsubwod_h_b
, 16, H
, B
, DO_SUB
)
207 DO_ODD(vsubwod_w_h
, 32, W
, H
, DO_SUB
)
208 DO_ODD(vsubwod_d_w
, 64, D
, W
, DO_SUB
)
210 void HELPER(vaddwev_q_du
)(void *vd
, void *vj
, void *vk
, uint32_t desc
)
213 VReg
*Vd
= (VReg
*)vd
;
214 VReg
*Vj
= (VReg
*)vj
;
215 VReg
*Vk
= (VReg
*)vk
;
216 int oprsz
= simd_oprsz(desc
);
218 for (i
= 0; i
< oprsz
/ 16; i
++) {
219 Vd
->Q(i
) = int128_add(int128_make64(Vj
->UD(2 * i
)),
220 int128_make64(Vk
->UD(2 * i
)));
224 DO_EVEN(vaddwev_h_bu
, 16, UH
, UB
, DO_ADD
)
225 DO_EVEN(vaddwev_w_hu
, 32, UW
, UH
, DO_ADD
)
226 DO_EVEN(vaddwev_d_wu
, 64, UD
, UW
, DO_ADD
)
228 void HELPER(vaddwod_q_du
)(void *vd
, void *vj
, void *vk
, uint32_t desc
)
231 VReg
*Vd
= (VReg
*)vd
;
232 VReg
*Vj
= (VReg
*)vj
;
233 VReg
*Vk
= (VReg
*)vk
;
234 int oprsz
= simd_oprsz(desc
);
236 for (i
= 0; i
< oprsz
/ 16; i
++) {
237 Vd
->Q(i
) = int128_add(int128_make64(Vj
->UD(2 * i
+ 1)),
238 int128_make64(Vk
->UD(2 * i
+ 1)));
242 DO_ODD(vaddwod_h_bu
, 16, UH
, UB
, DO_ADD
)
243 DO_ODD(vaddwod_w_hu
, 32, UW
, UH
, DO_ADD
)
244 DO_ODD(vaddwod_d_wu
, 64, UD
, UW
, DO_ADD
)
246 void HELPER(vsubwev_q_du
)(void *vd
, void *vj
, void *vk
, uint32_t desc
)
249 VReg
*Vd
= (VReg
*)vd
;
250 VReg
*Vj
= (VReg
*)vj
;
251 VReg
*Vk
= (VReg
*)vk
;
252 int oprsz
= simd_oprsz(desc
);
254 for (i
= 0; i
< oprsz
/ 16; i
++) {
255 Vd
->Q(i
) = int128_sub(int128_make64(Vj
->UD(2 * i
)),
256 int128_make64(Vk
->UD(2 * i
)));
260 DO_EVEN(vsubwev_h_bu
, 16, UH
, UB
, DO_SUB
)
261 DO_EVEN(vsubwev_w_hu
, 32, UW
, UH
, DO_SUB
)
262 DO_EVEN(vsubwev_d_wu
, 64, UD
, UW
, DO_SUB
)
264 void HELPER(vsubwod_q_du
)(void *vd
, void *vj
, void *vk
, uint32_t desc
)
267 VReg
*Vd
= (VReg
*)vd
;
268 VReg
*Vj
= (VReg
*)vj
;
269 VReg
*Vk
= (VReg
*)vk
;
270 int oprsz
= simd_oprsz(desc
);
272 for (i
= 0; i
< oprsz
/ 16; i
++) {
273 Vd
->Q(i
) = int128_sub(int128_make64(Vj
->UD(2 * i
+ 1)),
274 int128_make64(Vk
->UD(2 * i
+ 1)));
278 DO_ODD(vsubwod_h_bu
, 16, UH
, UB
, DO_SUB
)
279 DO_ODD(vsubwod_w_hu
, 32, UW
, UH
, DO_SUB
)
280 DO_ODD(vsubwod_d_wu
, 64, UD
, UW
, DO_SUB
)
282 #define DO_EVEN_U_S(NAME, BIT, ES1, EU1, ES2, EU2, DO_OP) \
283 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
286 VReg *Vd = (VReg *)vd; \
287 VReg *Vj = (VReg *)vj; \
288 VReg *Vk = (VReg *)vk; \
289 typedef __typeof(Vd->ES1(0)) TDS; \
290 typedef __typeof(Vd->EU1(0)) TDU; \
291 int oprsz = simd_oprsz(desc); \
293 for (i = 0; i < oprsz / (BIT / 8); i++) { \
294 Vd->ES1(i) = DO_OP((TDU)Vj->EU2(2 * i) ,(TDS)Vk->ES2(2 * i)); \
298 #define DO_ODD_U_S(NAME, BIT, ES1, EU1, ES2, EU2, DO_OP) \
299 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
302 VReg *Vd = (VReg *)vd; \
303 VReg *Vj = (VReg *)vj; \
304 VReg *Vk = (VReg *)vk; \
305 typedef __typeof(Vd->ES1(0)) TDS; \
306 typedef __typeof(Vd->EU1(0)) TDU; \
307 int oprsz = simd_oprsz(desc); \
309 for (i = 0; i < oprsz / (BIT / 8); i++) { \
310 Vd->ES1(i) = DO_OP((TDU)Vj->EU2(2 * i + 1), (TDS)Vk->ES2(2 * i + 1)); \
314 void HELPER(vaddwev_q_du_d
)(void *vd
, void *vj
, void *vk
, uint32_t desc
)
317 VReg
*Vd
= (VReg
*)vd
;
318 VReg
*Vj
= (VReg
*)vj
;
319 VReg
*Vk
= (VReg
*)vk
;
320 int oprsz
= simd_oprsz(desc
);
322 for (i
= 0; i
< oprsz
/ 16; i
++) {
323 Vd
->Q(i
) = int128_add(int128_make64(Vj
->UD(2 * i
)),
324 int128_makes64(Vk
->D(2 * i
)));
328 DO_EVEN_U_S(vaddwev_h_bu_b
, 16, H
, UH
, B
, UB
, DO_ADD
)
329 DO_EVEN_U_S(vaddwev_w_hu_h
, 32, W
, UW
, H
, UH
, DO_ADD
)
330 DO_EVEN_U_S(vaddwev_d_wu_w
, 64, D
, UD
, W
, UW
, DO_ADD
)
332 void HELPER(vaddwod_q_du_d
)(void *vd
, void *vj
, void *vk
, uint32_t desc
)
335 VReg
*Vd
= (VReg
*)vd
;
336 VReg
*Vj
= (VReg
*)vj
;
337 VReg
*Vk
= (VReg
*)vk
;
338 int oprsz
= simd_oprsz(desc
);
340 for (i
= 0; i
< oprsz
/ 16; i
++) {
341 Vd
->Q(i
) = int128_add(int128_make64(Vj
->UD(2 * i
+ 1)),
342 int128_makes64(Vk
->D(2 * i
+ 1)));
346 DO_ODD_U_S(vaddwod_h_bu_b
, 16, H
, UH
, B
, UB
, DO_ADD
)
347 DO_ODD_U_S(vaddwod_w_hu_h
, 32, W
, UW
, H
, UH
, DO_ADD
)
348 DO_ODD_U_S(vaddwod_d_wu_w
, 64, D
, UD
, W
, UW
, DO_ADD
)
350 #define DO_VAVG(a, b) ((a >> 1) + (b >> 1) + (a & b & 1))
351 #define DO_VAVGR(a, b) ((a >> 1) + (b >> 1) + ((a | b) & 1))
353 #define DO_3OP(NAME, BIT, E, DO_OP) \
354 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
357 VReg *Vd = (VReg *)vd; \
358 VReg *Vj = (VReg *)vj; \
359 VReg *Vk = (VReg *)vk; \
360 int oprsz = simd_oprsz(desc); \
362 for (i = 0; i < oprsz / (BIT / 8); i++) { \
363 Vd->E(i) = DO_OP(Vj->E(i), Vk->E(i)); \
367 DO_3OP(vavg_b
, 8, B
, DO_VAVG
)
368 DO_3OP(vavg_h
, 16, H
, DO_VAVG
)
369 DO_3OP(vavg_w
, 32, W
, DO_VAVG
)
370 DO_3OP(vavg_d
, 64, D
, DO_VAVG
)
371 DO_3OP(vavgr_b
, 8, B
, DO_VAVGR
)
372 DO_3OP(vavgr_h
, 16, H
, DO_VAVGR
)
373 DO_3OP(vavgr_w
, 32, W
, DO_VAVGR
)
374 DO_3OP(vavgr_d
, 64, D
, DO_VAVGR
)
375 DO_3OP(vavg_bu
, 8, UB
, DO_VAVG
)
376 DO_3OP(vavg_hu
, 16, UH
, DO_VAVG
)
377 DO_3OP(vavg_wu
, 32, UW
, DO_VAVG
)
378 DO_3OP(vavg_du
, 64, UD
, DO_VAVG
)
379 DO_3OP(vavgr_bu
, 8, UB
, DO_VAVGR
)
380 DO_3OP(vavgr_hu
, 16, UH
, DO_VAVGR
)
381 DO_3OP(vavgr_wu
, 32, UW
, DO_VAVGR
)
382 DO_3OP(vavgr_du
, 64, UD
, DO_VAVGR
)
384 #define DO_VABSD(a, b) ((a > b) ? (a -b) : (b-a))
386 DO_3OP(vabsd_b
, 8, B
, DO_VABSD
)
387 DO_3OP(vabsd_h
, 16, H
, DO_VABSD
)
388 DO_3OP(vabsd_w
, 32, W
, DO_VABSD
)
389 DO_3OP(vabsd_d
, 64, D
, DO_VABSD
)
390 DO_3OP(vabsd_bu
, 8, UB
, DO_VABSD
)
391 DO_3OP(vabsd_hu
, 16, UH
, DO_VABSD
)
392 DO_3OP(vabsd_wu
, 32, UW
, DO_VABSD
)
393 DO_3OP(vabsd_du
, 64, UD
, DO_VABSD
)
395 #define DO_VABS(a) ((a < 0) ? (-a) : (a))
397 #define DO_VADDA(NAME, BIT, E) \
398 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
401 VReg *Vd = (VReg *)vd; \
402 VReg *Vj = (VReg *)vj; \
403 VReg *Vk = (VReg *)vk; \
404 int oprsz = simd_oprsz(desc); \
406 for (i = 0; i < oprsz / (BIT / 8); i++) { \
407 Vd->E(i) = DO_VABS(Vj->E(i)) + DO_VABS(Vk->E(i)); \
411 DO_VADDA(vadda_b
, 8, B
)
412 DO_VADDA(vadda_h
, 16, H
)
413 DO_VADDA(vadda_w
, 32, W
)
414 DO_VADDA(vadda_d
, 64, D
)
416 #define DO_MIN(a, b) (a < b ? a : b)
417 #define DO_MAX(a, b) (a > b ? a : b)
419 #define VMINMAXI(NAME, BIT, E, DO_OP) \
420 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
423 VReg *Vd = (VReg *)vd; \
424 VReg *Vj = (VReg *)vj; \
425 typedef __typeof(Vd->E(0)) TD; \
426 int oprsz = simd_oprsz(desc); \
428 for (i = 0; i < oprsz / (BIT / 8); i++) { \
429 Vd->E(i) = DO_OP(Vj->E(i), (TD)imm); \
433 VMINMAXI(vmini_b
, 8, B
, DO_MIN
)
434 VMINMAXI(vmini_h
, 16, H
, DO_MIN
)
435 VMINMAXI(vmini_w
, 32, W
, DO_MIN
)
436 VMINMAXI(vmini_d
, 64, D
, DO_MIN
)
437 VMINMAXI(vmaxi_b
, 8, B
, DO_MAX
)
438 VMINMAXI(vmaxi_h
, 16, H
, DO_MAX
)
439 VMINMAXI(vmaxi_w
, 32, W
, DO_MAX
)
440 VMINMAXI(vmaxi_d
, 64, D
, DO_MAX
)
441 VMINMAXI(vmini_bu
, 8, UB
, DO_MIN
)
442 VMINMAXI(vmini_hu
, 16, UH
, DO_MIN
)
443 VMINMAXI(vmini_wu
, 32, UW
, DO_MIN
)
444 VMINMAXI(vmini_du
, 64, UD
, DO_MIN
)
445 VMINMAXI(vmaxi_bu
, 8, UB
, DO_MAX
)
446 VMINMAXI(vmaxi_hu
, 16, UH
, DO_MAX
)
447 VMINMAXI(vmaxi_wu
, 32, UW
, DO_MAX
)
448 VMINMAXI(vmaxi_du
, 64, UD
, DO_MAX
)
450 #define DO_VMUH(NAME, BIT, E1, E2, DO_OP) \
451 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
454 VReg *Vd = (VReg *)vd; \
455 VReg *Vj = (VReg *)vj; \
456 VReg *Vk = (VReg *)vk; \
457 typedef __typeof(Vd->E1(0)) T; \
458 int oprsz = simd_oprsz(desc); \
460 for (i = 0; i < oprsz / (BIT / 8); i++) { \
461 Vd->E2(i) = ((T)Vj->E2(i)) * ((T)Vk->E2(i)) >> BIT; \
465 void HELPER(vmuh_d
)(void *vd
, void *vj
, void *vk
, uint32_t desc
)
469 VReg
*Vd
= (VReg
*)vd
;
470 VReg
*Vj
= (VReg
*)vj
;
471 VReg
*Vk
= (VReg
*)vk
;
472 int oprsz
= simd_oprsz(desc
);
474 for (i
= 0; i
< oprsz
/ 8; i
++) {
475 muls64(&l
, &h
, Vj
->D(i
), Vk
->D(i
));
480 DO_VMUH(vmuh_b
, 8, H
, B
, DO_MUH
)
481 DO_VMUH(vmuh_h
, 16, W
, H
, DO_MUH
)
482 DO_VMUH(vmuh_w
, 32, D
, W
, DO_MUH
)
484 void HELPER(vmuh_du
)(void *vd
, void *vj
, void *vk
, uint32_t desc
)
488 VReg
*Vd
= (VReg
*)vd
;
489 VReg
*Vj
= (VReg
*)vj
;
490 VReg
*Vk
= (VReg
*)vk
;
491 int oprsz
= simd_oprsz(desc
);
493 for (i
= 0; i
< oprsz
/ 8; i
++) {
494 mulu64(&l
, &h
, Vj
->D(i
), Vk
->D(i
));
499 DO_VMUH(vmuh_bu
, 8, UH
, UB
, DO_MUH
)
500 DO_VMUH(vmuh_hu
, 16, UW
, UH
, DO_MUH
)
501 DO_VMUH(vmuh_wu
, 32, UD
, UW
, DO_MUH
)
503 #define DO_MUL(a, b) (a * b)
505 DO_EVEN(vmulwev_h_b
, 16, H
, B
, DO_MUL
)
506 DO_EVEN(vmulwev_w_h
, 32, W
, H
, DO_MUL
)
507 DO_EVEN(vmulwev_d_w
, 64, D
, W
, DO_MUL
)
509 DO_ODD(vmulwod_h_b
, 16, H
, B
, DO_MUL
)
510 DO_ODD(vmulwod_w_h
, 32, W
, H
, DO_MUL
)
511 DO_ODD(vmulwod_d_w
, 64, D
, W
, DO_MUL
)
513 DO_EVEN(vmulwev_h_bu
, 16, UH
, UB
, DO_MUL
)
514 DO_EVEN(vmulwev_w_hu
, 32, UW
, UH
, DO_MUL
)
515 DO_EVEN(vmulwev_d_wu
, 64, UD
, UW
, DO_MUL
)
517 DO_ODD(vmulwod_h_bu
, 16, UH
, UB
, DO_MUL
)
518 DO_ODD(vmulwod_w_hu
, 32, UW
, UH
, DO_MUL
)
519 DO_ODD(vmulwod_d_wu
, 64, UD
, UW
, DO_MUL
)
521 DO_EVEN_U_S(vmulwev_h_bu_b
, 16, H
, UH
, B
, UB
, DO_MUL
)
522 DO_EVEN_U_S(vmulwev_w_hu_h
, 32, W
, UW
, H
, UH
, DO_MUL
)
523 DO_EVEN_U_S(vmulwev_d_wu_w
, 64, D
, UD
, W
, UW
, DO_MUL
)
525 DO_ODD_U_S(vmulwod_h_bu_b
, 16, H
, UH
, B
, UB
, DO_MUL
)
526 DO_ODD_U_S(vmulwod_w_hu_h
, 32, W
, UW
, H
, UH
, DO_MUL
)
527 DO_ODD_U_S(vmulwod_d_wu_w
, 64, D
, UD
, W
, UW
, DO_MUL
)
529 #define DO_MADD(a, b, c) (a + b * c)
530 #define DO_MSUB(a, b, c) (a - b * c)
532 #define VMADDSUB(NAME, BIT, E, DO_OP) \
533 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
536 VReg *Vd = (VReg *)vd; \
537 VReg *Vj = (VReg *)vj; \
538 VReg *Vk = (VReg *)vk; \
539 int oprsz = simd_oprsz(desc); \
541 for (i = 0; i < oprsz / (BIT / 8); i++) { \
542 Vd->E(i) = DO_OP(Vd->E(i), Vj->E(i) ,Vk->E(i)); \
546 VMADDSUB(vmadd_b
, 8, B
, DO_MADD
)
547 VMADDSUB(vmadd_h
, 16, H
, DO_MADD
)
548 VMADDSUB(vmadd_w
, 32, W
, DO_MADD
)
549 VMADDSUB(vmadd_d
, 64, D
, DO_MADD
)
550 VMADDSUB(vmsub_b
, 8, B
, DO_MSUB
)
551 VMADDSUB(vmsub_h
, 16, H
, DO_MSUB
)
552 VMADDSUB(vmsub_w
, 32, W
, DO_MSUB
)
553 VMADDSUB(vmsub_d
, 64, D
, DO_MSUB
)
555 #define VMADDWEV(NAME, BIT, E1, E2, DO_OP) \
556 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
559 VReg *Vd = (VReg *)vd; \
560 VReg *Vj = (VReg *)vj; \
561 VReg *Vk = (VReg *)vk; \
562 typedef __typeof(Vd->E1(0)) TD; \
563 int oprsz = simd_oprsz(desc); \
565 for (i = 0; i < oprsz / (BIT / 8); i++) { \
566 Vd->E1(i) += DO_OP((TD)Vj->E2(2 * i), (TD)Vk->E2(2 * i)); \
570 VMADDWEV(vmaddwev_h_b
, 16, H
, B
, DO_MUL
)
571 VMADDWEV(vmaddwev_w_h
, 32, W
, H
, DO_MUL
)
572 VMADDWEV(vmaddwev_d_w
, 64, D
, W
, DO_MUL
)
573 VMADDWEV(vmaddwev_h_bu
, 16, UH
, UB
, DO_MUL
)
574 VMADDWEV(vmaddwev_w_hu
, 32, UW
, UH
, DO_MUL
)
575 VMADDWEV(vmaddwev_d_wu
, 64, UD
, UW
, DO_MUL
)
577 #define VMADDWOD(NAME, BIT, E1, E2, DO_OP) \
578 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
581 VReg *Vd = (VReg *)vd; \
582 VReg *Vj = (VReg *)vj; \
583 VReg *Vk = (VReg *)vk; \
584 typedef __typeof(Vd->E1(0)) TD; \
585 int oprsz = simd_oprsz(desc); \
587 for (i = 0; i < oprsz / (BIT / 8); i++) { \
588 Vd->E1(i) += DO_OP((TD)Vj->E2(2 * i + 1), \
589 (TD)Vk->E2(2 * i + 1)); \
593 VMADDWOD(vmaddwod_h_b
, 16, H
, B
, DO_MUL
)
594 VMADDWOD(vmaddwod_w_h
, 32, W
, H
, DO_MUL
)
595 VMADDWOD(vmaddwod_d_w
, 64, D
, W
, DO_MUL
)
596 VMADDWOD(vmaddwod_h_bu
, 16, UH
, UB
, DO_MUL
)
597 VMADDWOD(vmaddwod_w_hu
, 32, UW
, UH
, DO_MUL
)
598 VMADDWOD(vmaddwod_d_wu
, 64, UD
, UW
, DO_MUL
)
600 #define VMADDWEV_U_S(NAME, BIT, ES1, EU1, ES2, EU2, DO_OP) \
601 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
604 VReg *Vd = (VReg *)vd; \
605 VReg *Vj = (VReg *)vj; \
606 VReg *Vk = (VReg *)vk; \
607 typedef __typeof(Vd->ES1(0)) TS1; \
608 typedef __typeof(Vd->EU1(0)) TU1; \
609 int oprsz = simd_oprsz(desc); \
611 for (i = 0; i < oprsz / (BIT / 8); i++) { \
612 Vd->ES1(i) += DO_OP((TU1)Vj->EU2(2 * i), \
613 (TS1)Vk->ES2(2 * i)); \
617 VMADDWEV_U_S(vmaddwev_h_bu_b
, 16, H
, UH
, B
, UB
, DO_MUL
)
618 VMADDWEV_U_S(vmaddwev_w_hu_h
, 32, W
, UW
, H
, UH
, DO_MUL
)
619 VMADDWEV_U_S(vmaddwev_d_wu_w
, 64, D
, UD
, W
, UW
, DO_MUL
)
621 #define VMADDWOD_U_S(NAME, BIT, ES1, EU1, ES2, EU2, DO_OP) \
622 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
625 VReg *Vd = (VReg *)vd; \
626 VReg *Vj = (VReg *)vj; \
627 VReg *Vk = (VReg *)vk; \
628 typedef __typeof(Vd->ES1(0)) TS1; \
629 typedef __typeof(Vd->EU1(0)) TU1; \
630 int oprsz = simd_oprsz(desc); \
632 for (i = 0; i < oprsz / (BIT / 8); i++) { \
633 Vd->ES1(i) += DO_OP((TU1)Vj->EU2(2 * i + 1), \
634 (TS1)Vk->ES2(2 * i + 1)); \
638 VMADDWOD_U_S(vmaddwod_h_bu_b
, 16, H
, UH
, B
, UB
, DO_MUL
)
639 VMADDWOD_U_S(vmaddwod_w_hu_h
, 32, W
, UW
, H
, UH
, DO_MUL
)
640 VMADDWOD_U_S(vmaddwod_d_wu_w
, 64, D
, UD
, W
, UW
, DO_MUL
)
642 #define DO_DIVU(N, M) (unlikely(M == 0) ? 0 : N / M)
643 #define DO_REMU(N, M) (unlikely(M == 0) ? 0 : N % M)
644 #define DO_DIV(N, M) (unlikely(M == 0) ? 0 :\
645 unlikely((N == -N) && (M == (__typeof(N))(-1))) ? N : N / M)
646 #define DO_REM(N, M) (unlikely(M == 0) ? 0 :\
647 unlikely((N == -N) && (M == (__typeof(N))(-1))) ? 0 : N % M)
649 #define VDIV(NAME, BIT, E, DO_OP) \
650 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
653 VReg *Vd = (VReg *)vd; \
654 VReg *Vj = (VReg *)vj; \
655 VReg *Vk = (VReg *)vk; \
656 int oprsz = simd_oprsz(desc); \
658 for (i = 0; i < oprsz / (BIT / 8); i++) { \
659 Vd->E(i) = DO_OP(Vj->E(i), Vk->E(i)); \
663 VDIV(vdiv_b
, 8, B
, DO_DIV
)
664 VDIV(vdiv_h
, 16, H
, DO_DIV
)
665 VDIV(vdiv_w
, 32, W
, DO_DIV
)
666 VDIV(vdiv_d
, 64, D
, DO_DIV
)
667 VDIV(vdiv_bu
, 8, UB
, DO_DIVU
)
668 VDIV(vdiv_hu
, 16, UH
, DO_DIVU
)
669 VDIV(vdiv_wu
, 32, UW
, DO_DIVU
)
670 VDIV(vdiv_du
, 64, UD
, DO_DIVU
)
671 VDIV(vmod_b
, 8, B
, DO_REM
)
672 VDIV(vmod_h
, 16, H
, DO_REM
)
673 VDIV(vmod_w
, 32, W
, DO_REM
)
674 VDIV(vmod_d
, 64, D
, DO_REM
)
675 VDIV(vmod_bu
, 8, UB
, DO_REMU
)
676 VDIV(vmod_hu
, 16, UH
, DO_REMU
)
677 VDIV(vmod_wu
, 32, UW
, DO_REMU
)
678 VDIV(vmod_du
, 64, UD
, DO_REMU
)
680 #define VSAT_S(NAME, BIT, E) \
681 void HELPER(NAME)(void *vd, void *vj, uint64_t max, uint32_t desc) \
684 VReg *Vd = (VReg *)vd; \
685 VReg *Vj = (VReg *)vj; \
686 typedef __typeof(Vd->E(0)) TD; \
687 int oprsz = simd_oprsz(desc); \
689 for (i = 0; i < oprsz / (BIT / 8); i++) { \
690 Vd->E(i) = Vj->E(i) > (TD)max ? (TD)max : \
691 Vj->E(i) < (TD)~max ? (TD)~max: Vj->E(i); \
696 VSAT_S(vsat_h
, 16, H
)
697 VSAT_S(vsat_w
, 32, W
)
698 VSAT_S(vsat_d
, 64, D
)
700 #define VSAT_U(NAME, BIT, E) \
701 void HELPER(NAME)(void *vd, void *vj, uint64_t max, uint32_t desc) \
704 VReg *Vd = (VReg *)vd; \
705 VReg *Vj = (VReg *)vj; \
706 typedef __typeof(Vd->E(0)) TD; \
707 int oprsz = simd_oprsz(desc); \
709 for (i = 0; i < oprsz / (BIT / 8); i++) { \
710 Vd->E(i) = Vj->E(i) > (TD)max ? (TD)max : Vj->E(i); \
714 VSAT_U(vsat_bu
, 8, UB
)
715 VSAT_U(vsat_hu
, 16, UH
)
716 VSAT_U(vsat_wu
, 32, UW
)
717 VSAT_U(vsat_du
, 64, UD
)
719 #define VEXTH(NAME, BIT, E1, E2) \
720 void HELPER(NAME)(void *vd, void *vj, uint32_t desc) \
723 VReg *Vd = (VReg *)vd; \
724 VReg *Vj = (VReg *)vj; \
725 int oprsz = simd_oprsz(desc); \
727 ofs = LSX_LEN / BIT; \
728 for (i = 0; i < oprsz / 16; i++) { \
729 for (j = 0; j < ofs; j++) { \
730 Vd->E1(j + i * ofs) = Vj->E2(j + ofs + ofs * 2 * i); \
735 void HELPER(vexth_q_d
)(void *vd
, void *vj
, uint32_t desc
)
738 VReg
*Vd
= (VReg
*)vd
;
739 VReg
*Vj
= (VReg
*)vj
;
740 int oprsz
= simd_oprsz(desc
);
742 for (i
= 0; i
< oprsz
/ 16; i
++) {
743 Vd
->Q(i
) = int128_makes64(Vj
->D(2 * i
+ 1));
747 void HELPER(vexth_qu_du
)(void *vd
, void *vj
, uint32_t desc
)
750 VReg
*Vd
= (VReg
*)vd
;
751 VReg
*Vj
= (VReg
*)vj
;
752 int oprsz
= simd_oprsz(desc
);
754 for (i
= 0; i
< oprsz
/ 16; i
++) {
755 Vd
->Q(i
) = int128_make64(Vj
->UD(2 * i
+ 1));
759 VEXTH(vexth_h_b
, 16, H
, B
)
760 VEXTH(vexth_w_h
, 32, W
, H
)
761 VEXTH(vexth_d_w
, 64, D
, W
)
762 VEXTH(vexth_hu_bu
, 16, UH
, UB
)
763 VEXTH(vexth_wu_hu
, 32, UW
, UH
)
764 VEXTH(vexth_du_wu
, 64, UD
, UW
)
766 #define VEXT2XV(NAME, BIT, E1, E2) \
767 void HELPER(NAME)(void *vd, void *vj, uint32_t desc) \
771 VReg *Vd = (VReg *)vd; \
772 VReg *Vj = (VReg *)vj; \
773 int oprsz = simd_oprsz(desc); \
775 for (i = 0; i < oprsz / (BIT / 8); i++) { \
776 temp.E1(i) = Vj->E2(i); \
781 VEXT2XV(vext2xv_h_b
, 16, H
, B
)
782 VEXT2XV(vext2xv_w_b
, 32, W
, B
)
783 VEXT2XV(vext2xv_d_b
, 64, D
, B
)
784 VEXT2XV(vext2xv_w_h
, 32, W
, H
)
785 VEXT2XV(vext2xv_d_h
, 64, D
, H
)
786 VEXT2XV(vext2xv_d_w
, 64, D
, W
)
787 VEXT2XV(vext2xv_hu_bu
, 16, UH
, UB
)
788 VEXT2XV(vext2xv_wu_bu
, 32, UW
, UB
)
789 VEXT2XV(vext2xv_du_bu
, 64, UD
, UB
)
790 VEXT2XV(vext2xv_wu_hu
, 32, UW
, UH
)
791 VEXT2XV(vext2xv_du_hu
, 64, UD
, UH
)
792 VEXT2XV(vext2xv_du_wu
, 64, UD
, UW
)
794 #define DO_SIGNCOV(a, b) (a == 0 ? 0 : a < 0 ? -b : b)
796 DO_3OP(vsigncov_b
, 8, B
, DO_SIGNCOV
)
797 DO_3OP(vsigncov_h
, 16, H
, DO_SIGNCOV
)
798 DO_3OP(vsigncov_w
, 32, W
, DO_SIGNCOV
)
799 DO_3OP(vsigncov_d
, 64, D
, DO_SIGNCOV
)
801 static uint64_t do_vmskltz_b(int64_t val
)
803 uint64_t m
= 0x8080808080808080ULL
;
804 uint64_t c
= val
& m
;
811 void HELPER(vmskltz_b
)(void *vd
, void *vj
, uint32_t desc
)
815 VReg
*Vd
= (VReg
*)vd
;
816 VReg
*Vj
= (VReg
*)vj
;
817 int oprsz
= simd_oprsz(desc
);
819 for (i
= 0; i
< oprsz
/ 16; i
++) {
821 temp
= do_vmskltz_b(Vj
->D(2 * i
));
822 temp
|= (do_vmskltz_b(Vj
->D(2 * i
+ 1)) << 8);
824 Vd
->D(2 * i
+ 1) = 0;
828 static uint64_t do_vmskltz_h(int64_t val
)
830 uint64_t m
= 0x8000800080008000ULL
;
831 uint64_t c
= val
& m
;
837 void HELPER(vmskltz_h
)(void *vd
, void *vj
, uint32_t desc
)
841 VReg
*Vd
= (VReg
*)vd
;
842 VReg
*Vj
= (VReg
*)vj
;
843 int oprsz
= simd_oprsz(desc
);
845 for (i
= 0; i
< oprsz
/ 16; i
++) {
847 temp
= do_vmskltz_h(Vj
->D(2 * i
));
848 temp
|= (do_vmskltz_h(Vj
->D(2 * i
+ 1)) << 4);
850 Vd
->D(2 * i
+ 1) = 0;
854 static uint64_t do_vmskltz_w(int64_t val
)
856 uint64_t m
= 0x8000000080000000ULL
;
857 uint64_t c
= val
& m
;
862 void HELPER(vmskltz_w
)(void *vd
, void *vj
, uint32_t desc
)
866 VReg
*Vd
= (VReg
*)vd
;
867 VReg
*Vj
= (VReg
*)vj
;
868 int oprsz
= simd_oprsz(desc
);
870 for (i
= 0; i
< oprsz
/ 16; i
++) {
872 temp
= do_vmskltz_w(Vj
->D(2 * i
));
873 temp
|= (do_vmskltz_w(Vj
->D(2 * i
+ 1)) << 2);
875 Vd
->D(2 * i
+ 1) = 0;
879 static uint64_t do_vmskltz_d(int64_t val
)
881 return (uint64_t)val
>> 63;
883 void HELPER(vmskltz_d
)(void *vd
, void *vj
, uint32_t desc
)
887 VReg
*Vd
= (VReg
*)vd
;
888 VReg
*Vj
= (VReg
*)vj
;
889 int oprsz
= simd_oprsz(desc
);
891 for (i
= 0; i
< oprsz
/ 16; i
++) {
893 temp
= do_vmskltz_d(Vj
->D(2 * i
));
894 temp
|= (do_vmskltz_d(Vj
->D(2 * i
+ 1)) << 1);
896 Vd
->D(2 * i
+ 1) = 0;
900 void HELPER(vmskgez_b
)(void *vd
, void *vj
, uint32_t desc
)
904 VReg
*Vd
= (VReg
*)vd
;
905 VReg
*Vj
= (VReg
*)vj
;
906 int oprsz
= simd_oprsz(desc
);
908 for (i
= 0; i
< oprsz
/ 16; i
++) {
910 temp
= do_vmskltz_b(Vj
->D(2 * i
));
911 temp
|= (do_vmskltz_b(Vj
->D(2 * i
+ 1)) << 8);
912 Vd
->D(2 * i
) = (uint16_t)(~temp
);
913 Vd
->D(2 * i
+ 1) = 0;
917 static uint64_t do_vmskez_b(uint64_t a
)
919 uint64_t m
= 0x7f7f7f7f7f7f7f7fULL
;
920 uint64_t c
= ~(((a
& m
) + m
) | a
| m
);
927 void HELPER(vmsknz_b
)(void *vd
, void *vj
, uint32_t desc
)
931 VReg
*Vd
= (VReg
*)vd
;
932 VReg
*Vj
= (VReg
*)vj
;
933 int oprsz
= simd_oprsz(desc
);
935 for (i
= 0; i
< oprsz
/ 16; i
++) {
937 temp
= do_vmskez_b(Vj
->D(2 * i
));
938 temp
|= (do_vmskez_b(Vj
->D(2 * i
+ 1)) << 8);
939 Vd
->D(2 * i
) = (uint16_t)(~temp
);
940 Vd
->D(2 * i
+ 1) = 0;
944 void HELPER(vnori_b
)(void *vd
, void *vj
, uint64_t imm
, uint32_t desc
)
947 VReg
*Vd
= (VReg
*)vd
;
948 VReg
*Vj
= (VReg
*)vj
;
950 for (i
= 0; i
< simd_oprsz(desc
); i
++) {
951 Vd
->B(i
) = ~(Vj
->B(i
) | (uint8_t)imm
);
955 #define VSLLWIL(NAME, BIT, E1, E2) \
956 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
960 VReg *Vd = (VReg *)vd; \
961 VReg *Vj = (VReg *)vj; \
962 int oprsz = simd_oprsz(desc); \
963 typedef __typeof(temp.E1(0)) TD; \
965 ofs = LSX_LEN / BIT; \
966 for (i = 0; i < oprsz / 16; i++) { \
967 for (j = 0; j < ofs; j++) { \
968 temp.E1(j + ofs * i) = (TD)Vj->E2(j + ofs * 2 * i) << (imm % BIT); \
975 void HELPER(vextl_q_d
)(void *vd
, void *vj
, uint32_t desc
)
978 VReg
*Vd
= (VReg
*)vd
;
979 VReg
*Vj
= (VReg
*)vj
;
980 int oprsz
= simd_oprsz(desc
);
982 for (i
= 0; i
< oprsz
/ 16; i
++) {
983 Vd
->Q(i
) = int128_makes64(Vj
->D(2 * i
));
987 void HELPER(vextl_qu_du
)(void *vd
, void *vj
, uint32_t desc
)
990 VReg
*Vd
= (VReg
*)vd
;
991 VReg
*Vj
= (VReg
*)vj
;
992 int oprsz
= simd_oprsz(desc
);
994 for (i
= 0; i
< oprsz
/ 16; i
++) {
995 Vd
->Q(i
) = int128_make64(Vj
->UD(2 * i
));
999 VSLLWIL(vsllwil_h_b
, 16, H
, B
)
1000 VSLLWIL(vsllwil_w_h
, 32, W
, H
)
1001 VSLLWIL(vsllwil_d_w
, 64, D
, W
)
1002 VSLLWIL(vsllwil_hu_bu
, 16, UH
, UB
)
1003 VSLLWIL(vsllwil_wu_hu
, 32, UW
, UH
)
1004 VSLLWIL(vsllwil_du_wu
, 64, UD
, UW
)
1006 #define do_vsrlr(E, T) \
1007 static T do_vsrlr_ ##E(T s1, int sh) \
1012 return (s1 >> sh) + ((s1 >> (sh - 1)) & 0x1); \
1016 do_vsrlr(B
, uint8_t)
1017 do_vsrlr(H
, uint16_t)
1018 do_vsrlr(W
, uint32_t)
1019 do_vsrlr(D
, uint64_t)
1021 #define VSRLR(NAME, BIT, T, E) \
1022 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
1025 VReg *Vd = (VReg *)vd; \
1026 VReg *Vj = (VReg *)vj; \
1027 VReg *Vk = (VReg *)vk; \
1028 int oprsz = simd_oprsz(desc); \
1030 for (i = 0; i < oprsz / (BIT / 8); i++) { \
1031 Vd->E(i) = do_vsrlr_ ## E(Vj->E(i), ((T)Vk->E(i))%BIT); \
1035 VSRLR(vsrlr_b
, 8, uint8_t, B
)
1036 VSRLR(vsrlr_h
, 16, uint16_t, H
)
1037 VSRLR(vsrlr_w
, 32, uint32_t, W
)
1038 VSRLR(vsrlr_d
, 64, uint64_t, D
)
1040 #define VSRLRI(NAME, BIT, E) \
1041 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
1044 VReg *Vd = (VReg *)vd; \
1045 VReg *Vj = (VReg *)vj; \
1046 int oprsz = simd_oprsz(desc); \
1048 for (i = 0; i < oprsz / (BIT / 8); i++) { \
1049 Vd->E(i) = do_vsrlr_ ## E(Vj->E(i), imm); \
1053 VSRLRI(vsrlri_b
, 8, B
)
1054 VSRLRI(vsrlri_h
, 16, H
)
1055 VSRLRI(vsrlri_w
, 32, W
)
1056 VSRLRI(vsrlri_d
, 64, D
)
1058 #define do_vsrar(E, T) \
1059 static T do_vsrar_ ##E(T s1, int sh) \
1064 return (s1 >> sh) + ((s1 >> (sh - 1)) & 0x1); \
1069 do_vsrar(H
, int16_t)
1070 do_vsrar(W
, int32_t)
1071 do_vsrar(D
, int64_t)
1073 #define VSRAR(NAME, BIT, T, E) \
1074 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
1077 VReg *Vd = (VReg *)vd; \
1078 VReg *Vj = (VReg *)vj; \
1079 VReg *Vk = (VReg *)vk; \
1080 int oprsz = simd_oprsz(desc); \
1082 for (i = 0; i < oprsz / (BIT / 8); i++) { \
1083 Vd->E(i) = do_vsrar_ ## E(Vj->E(i), ((T)Vk->E(i))%BIT); \
1087 VSRAR(vsrar_b
, 8, uint8_t, B
)
1088 VSRAR(vsrar_h
, 16, uint16_t, H
)
1089 VSRAR(vsrar_w
, 32, uint32_t, W
)
1090 VSRAR(vsrar_d
, 64, uint64_t, D
)
1092 #define VSRARI(NAME, BIT, E) \
1093 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
1096 VReg *Vd = (VReg *)vd; \
1097 VReg *Vj = (VReg *)vj; \
1098 int oprsz = simd_oprsz(desc); \
1100 for (i = 0; i < oprsz / (BIT / 8); i++) { \
1101 Vd->E(i) = do_vsrar_ ## E(Vj->E(i), imm); \
1105 VSRARI(vsrari_b
, 8, B
)
1106 VSRARI(vsrari_h
, 16, H
)
1107 VSRARI(vsrari_w
, 32, W
)
1108 VSRARI(vsrari_d
, 64, D
)
1110 #define R_SHIFT(a, b) (a >> b)
1112 #define VSRLN(NAME, BIT, E1, E2) \
1113 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
1116 VReg *Vd = (VReg *)vd; \
1117 VReg *Vj = (VReg *)vj; \
1118 VReg *Vk = (VReg *)vk; \
1119 int oprsz = simd_oprsz(desc); \
1121 ofs = LSX_LEN / BIT; \
1122 for (i = 0; i < oprsz / 16; i++) { \
1123 for (j = 0; j < ofs; j++) { \
1124 Vd->E1(j + ofs * 2 * i) = R_SHIFT(Vj->E2(j + ofs * i), \
1125 Vk->E2(j + ofs * i) % BIT); \
1127 Vd->D(2 * i + 1) = 0; \
1131 VSRLN(vsrln_b_h
, 16, B
, UH
)
1132 VSRLN(vsrln_h_w
, 32, H
, UW
)
1133 VSRLN(vsrln_w_d
, 64, W
, UD
)
1135 #define VSRAN(NAME, BIT, E1, E2, E3) \
1136 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
1139 VReg *Vd = (VReg *)vd; \
1140 VReg *Vj = (VReg *)vj; \
1141 VReg *Vk = (VReg *)vk; \
1142 int oprsz = simd_oprsz(desc); \
1144 ofs = LSX_LEN / BIT; \
1145 for (i = 0; i < oprsz / 16; i++) { \
1146 for (j = 0; j < ofs; j++) { \
1147 Vd->E1(j + ofs * 2 * i) = R_SHIFT(Vj->E2(j + ofs * i), \
1148 Vk->E3(j + ofs * i) % BIT); \
1150 Vd->D(2 * i + 1) = 0; \
1154 VSRAN(vsran_b_h
, 16, B
, H
, UH
)
1155 VSRAN(vsran_h_w
, 32, H
, W
, UW
)
1156 VSRAN(vsran_w_d
, 64, W
, D
, UD
)
1158 #define VSRLNI(NAME, BIT, E1, E2) \
1159 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
1163 VReg *Vd = (VReg *)vd; \
1164 VReg *Vj = (VReg *)vj; \
1165 int oprsz = simd_oprsz(desc); \
1167 ofs = LSX_LEN / BIT; \
1168 for (i = 0; i < oprsz / 16; i++) { \
1169 for (j = 0; j < ofs; j++) { \
1170 temp.E1(j + ofs * 2 * i) = R_SHIFT(Vj->E2(j + ofs * i), imm); \
1171 temp.E1(j + ofs * (2 * i + 1)) = R_SHIFT(Vd->E2(j + ofs * i), \
1178 void HELPER(vsrlni_d_q
)(void *vd
, void *vj
, uint64_t imm
, uint32_t desc
)
1182 VReg
*Vd
= (VReg
*)vd
;
1183 VReg
*Vj
= (VReg
*)vj
;
1185 for (i
= 0; i
< 2; i
++) {
1186 temp
.D(2 * i
) = int128_getlo(int128_urshift(Vj
->Q(i
), imm
% 128));
1187 temp
.D(2 * i
+1) = int128_getlo(int128_urshift(Vd
->Q(i
), imm
% 128));
1192 VSRLNI(vsrlni_b_h
, 16, B
, UH
)
1193 VSRLNI(vsrlni_h_w
, 32, H
, UW
)
1194 VSRLNI(vsrlni_w_d
, 64, W
, UD
)
1196 #define VSRANI(NAME, BIT, E1, E2) \
1197 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
1201 VReg *Vd = (VReg *)vd; \
1202 VReg *Vj = (VReg *)vj; \
1203 int oprsz = simd_oprsz(desc); \
1205 ofs = LSX_LEN / BIT; \
1206 for (i = 0; i < oprsz / 16; i++) { \
1207 for (j = 0; j < ofs; j++) { \
1208 temp.E1(j + ofs * 2 * i) = R_SHIFT(Vj->E2(j + ofs * i), imm); \
1209 temp.E1(j + ofs * (2 * i + 1)) = R_SHIFT(Vd->E2(j + ofs * i), \
1216 void HELPER(vsrani_d_q
)(void *vd
, void *vj
, uint64_t imm
, uint32_t desc
)
1220 VReg
*Vd
= (VReg
*)vd
;
1221 VReg
*Vj
= (VReg
*)vj
;
1223 for (i
= 0; i
< 2; i
++) {
1224 temp
.D(2 * i
) = int128_getlo(int128_rshift(Vj
->Q(i
), imm
% 128));
1225 temp
.D(2 * i
+ 1) = int128_getlo(int128_rshift(Vd
->Q(i
), imm
% 128));
1230 VSRANI(vsrani_b_h
, 16, B
, H
)
1231 VSRANI(vsrani_h_w
, 32, H
, W
)
1232 VSRANI(vsrani_w_d
, 64, W
, D
)
1234 #define VSRLRN(NAME, BIT, E1, E2, E3) \
1235 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
1238 VReg *Vd = (VReg *)vd; \
1239 VReg *Vj = (VReg *)vj; \
1240 VReg *Vk = (VReg *)vk; \
1241 int oprsz = simd_oprsz(desc); \
1243 ofs = LSX_LEN / BIT; \
1244 for (i = 0; i < oprsz / 16; i++) { \
1245 for (j = 0; j < ofs; j++) { \
1246 Vd->E1(j + ofs * 2 * i) = do_vsrlr_ ##E2(Vj->E2(j + ofs * i), \
1247 Vk->E3(j + ofs * i) % BIT); \
1249 Vd->D(2 * i + 1) = 0; \
1253 VSRLRN(vsrlrn_b_h
, 16, B
, H
, UH
)
1254 VSRLRN(vsrlrn_h_w
, 32, H
, W
, UW
)
1255 VSRLRN(vsrlrn_w_d
, 64, W
, D
, UD
)
1257 #define VSRARN(NAME, BIT, E1, E2, E3) \
1258 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
1261 VReg *Vd = (VReg *)vd; \
1262 VReg *Vj = (VReg *)vj; \
1263 VReg *Vk = (VReg *)vk; \
1264 int oprsz = simd_oprsz(desc); \
1266 ofs = LSX_LEN / BIT; \
1267 for (i = 0; i < oprsz / 16; i++) { \
1268 for (j = 0; j < ofs; j++) { \
1269 Vd->E1(j + ofs * 2 * i) = do_vsrar_ ## E2(Vj->E2(j + ofs * i), \
1270 Vk->E3(j + ofs * i) % BIT); \
1272 Vd->D(2 * i + 1) = 0; \
1276 VSRARN(vsrarn_b_h
, 16, B
, H
, UH
)
1277 VSRARN(vsrarn_h_w
, 32, H
, W
, UW
)
1278 VSRARN(vsrarn_w_d
, 64, W
, D
, UD
)
1280 #define VSRLRNI(NAME, BIT, E1, E2) \
1281 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
1285 VReg *Vd = (VReg *)vd; \
1286 VReg *Vj = (VReg *)vj; \
1287 int oprsz = simd_oprsz(desc); \
1289 ofs = LSX_LEN / BIT; \
1290 for (i = 0; i < oprsz / 16; i++) { \
1291 for (j = 0; j < ofs; j++) { \
1292 temp.E1(j + ofs * 2 * i) = do_vsrlr_ ## E2(Vj->E2(j + ofs * i), imm); \
1293 temp.E1(j + ofs * (2 * i + 1)) = do_vsrlr_ ## E2(Vd->E2(j + ofs * i), \
1300 void HELPER(vsrlrni_d_q
)(void *vd
, void *vj
, uint64_t imm
, uint32_t desc
)
1304 VReg
*Vd
= (VReg
*)vd
;
1305 VReg
*Vj
= (VReg
*)vj
;
1307 int oprsz
= simd_oprsz(desc
);
1309 for (i
= 0; i
< oprsz
/ 16; i
++) {
1311 temp
.D(2 * i
) = int128_getlo(Vj
->Q(i
));
1312 temp
.D(2 * i
+ 1) = int128_getlo(Vd
->Q(i
));
1314 r
[2 * i
] = int128_and(int128_urshift(Vj
->Q(i
), (imm
- 1)),
1316 r
[2 * i
+ 1] = int128_and(int128_urshift(Vd
->Q(i
), (imm
- 1)),
1318 temp
.D(2 * i
) = int128_getlo(int128_add(int128_urshift(Vj
->Q(i
),
1320 temp
.D(2 * i
+ 1) = int128_getlo(int128_add(int128_urshift(Vd
->Q(i
),
1321 imm
), r
[ 2 * i
+ 1]));
1327 VSRLRNI(vsrlrni_b_h
, 16, B
, H
)
1328 VSRLRNI(vsrlrni_h_w
, 32, H
, W
)
1329 VSRLRNI(vsrlrni_w_d
, 64, W
, D
)
1331 #define VSRARNI(NAME, BIT, E1, E2) \
1332 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
1336 VReg *Vd = (VReg *)vd; \
1337 VReg *Vj = (VReg *)vj; \
1338 int oprsz = simd_oprsz(desc); \
1340 ofs = LSX_LEN / BIT; \
1341 for (i = 0; i < oprsz / 16; i++) { \
1342 for (j = 0; j < ofs; j++) { \
1343 temp.E1(j + ofs * 2 * i) = do_vsrar_ ## E2(Vj->E2(j + ofs * i), imm); \
1344 temp.E1(j + ofs * (2 * i + 1)) = do_vsrar_ ## E2(Vd->E2(j + ofs * i), \
1351 void HELPER(vsrarni_d_q
)(void *vd
, void *vj
, uint64_t imm
, uint32_t desc
)
1355 VReg
*Vd
= (VReg
*)vd
;
1356 VReg
*Vj
= (VReg
*)vj
;
1358 int oprsz
= simd_oprsz(desc
);
1360 for (i
= 0; i
< oprsz
/ 16; i
++) {
1362 temp
.D(2 * i
) = int128_getlo(Vj
->Q(i
));
1363 temp
.D(2 * i
+ 1) = int128_getlo(Vd
->Q(i
));
1365 r
[2 * i
] = int128_and(int128_rshift(Vj
->Q(i
), (imm
- 1)),
1367 r
[2 * i
+ 1] = int128_and(int128_rshift(Vd
->Q(i
), (imm
- 1)),
1369 temp
.D(2 * i
) = int128_getlo(int128_add(int128_rshift(Vj
->Q(i
),
1371 temp
.D(2 * i
+ 1) = int128_getlo(int128_add(int128_rshift(Vd
->Q(i
),
1372 imm
), r
[2 * i
+ 1]));
1378 VSRARNI(vsrarni_b_h
, 16, B
, H
)
1379 VSRARNI(vsrarni_h_w
, 32, H
, W
)
1380 VSRARNI(vsrarni_w_d
, 64, W
, D
)
1382 #define SSRLNS(NAME, T1, T2, T3) \
1383 static T1 do_ssrlns_ ## NAME(T2 e2, int sa, int sh) \
1389 shft_res = (((T1)e2) >> sa); \
1392 mask = (1ull << sh) -1; \
1393 if (shft_res > mask) { \
1400 SSRLNS(B
, uint16_t, int16_t, uint8_t)
1401 SSRLNS(H
, uint32_t, int32_t, uint16_t)
1402 SSRLNS(W
, uint64_t, int64_t, uint32_t)
1404 #define VSSRLN(NAME, BIT, E1, E2, E3) \
1405 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
1408 VReg *Vd = (VReg *)vd; \
1409 VReg *Vj = (VReg *)vj; \
1410 VReg *Vk = (VReg *)vk; \
1411 int oprsz = simd_oprsz(desc); \
1413 ofs = LSX_LEN / BIT; \
1414 for (i = 0; i < oprsz / 16; i++) { \
1415 for (j = 0; j < ofs; j++) { \
1416 Vd->E1(j + ofs * 2 * i) = do_ssrlns_ ## E1(Vj->E2(j + ofs * i), \
1417 Vk->E3(j + ofs * i) % BIT, \
1420 Vd->D(2 * i + 1) = 0; \
1424 VSSRLN(vssrln_b_h
, 16, B
, H
, UH
)
1425 VSSRLN(vssrln_h_w
, 32, H
, W
, UW
)
1426 VSSRLN(vssrln_w_d
, 64, W
, D
, UD
)
1428 #define SSRANS(E, T1, T2) \
1429 static T1 do_ssrans_ ## E(T1 e2, int sa, int sh) \
1435 shft_res = e2 >> sa; \
1438 mask = (1ll << sh) - 1; \
1439 if (shft_res > mask) { \
1441 } else if (shft_res < -(mask + 1)) { \
1448 SSRANS(B
, int16_t, int8_t)
1449 SSRANS(H
, int32_t, int16_t)
1450 SSRANS(W
, int64_t, int32_t)
1452 #define VSSRAN(NAME, BIT, E1, E2, E3) \
1453 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
1456 VReg *Vd = (VReg *)vd; \
1457 VReg *Vj = (VReg *)vj; \
1458 VReg *Vk = (VReg *)vk; \
1459 int oprsz = simd_oprsz(desc); \
1461 ofs = LSX_LEN / BIT; \
1462 for (i = 0; i < oprsz / 16; i++) { \
1463 for (j = 0; j < ofs; j++) { \
1464 Vd->E1(j + ofs * 2 * i) = do_ssrans_ ## E1(Vj->E2(j + ofs * i), \
1465 Vk->E3(j + ofs * i) % BIT, \
1468 Vd->D(2 * i + 1) = 0; \
1472 VSSRAN(vssran_b_h
, 16, B
, H
, UH
)
1473 VSSRAN(vssran_h_w
, 32, H
, W
, UW
)
1474 VSSRAN(vssran_w_d
, 64, W
, D
, UD
)
1476 #define SSRLNU(E, T1, T2, T3) \
1477 static T1 do_ssrlnu_ ## E(T3 e2, int sa, int sh) \
1483 shft_res = (((T1)e2) >> sa); \
1486 mask = (1ull << sh) - 1; \
1487 if (shft_res > mask) { \
1494 SSRLNU(B
, uint16_t, uint8_t, int16_t)
1495 SSRLNU(H
, uint32_t, uint16_t, int32_t)
1496 SSRLNU(W
, uint64_t, uint32_t, int64_t)
1498 #define VSSRLNU(NAME, BIT, E1, E2, E3) \
1499 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
1502 VReg *Vd = (VReg *)vd; \
1503 VReg *Vj = (VReg *)vj; \
1504 VReg *Vk = (VReg *)vk; \
1505 int oprsz = simd_oprsz(desc); \
1507 ofs = LSX_LEN / BIT; \
1508 for (i = 0; i < oprsz / 16; i++) { \
1509 for (j = 0; j < ofs; j++) { \
1510 Vd->E1(j + ofs * 2 * i) = do_ssrlnu_ ## E1(Vj->E2(j + ofs * i), \
1511 Vk->E3(j + ofs * i) % BIT, \
1514 Vd->D(2 * i + 1) = 0; \
1518 VSSRLNU(vssrln_bu_h
, 16, B
, H
, UH
)
1519 VSSRLNU(vssrln_hu_w
, 32, H
, W
, UW
)
1520 VSSRLNU(vssrln_wu_d
, 64, W
, D
, UD
)
1522 #define SSRANU(E, T1, T2, T3) \
1523 static T1 do_ssranu_ ## E(T3 e2, int sa, int sh) \
1529 shft_res = e2 >> sa; \
1535 mask = (1ull << sh) - 1; \
1536 if (shft_res > mask) { \
1543 SSRANU(B
, uint16_t, uint8_t, int16_t)
1544 SSRANU(H
, uint32_t, uint16_t, int32_t)
1545 SSRANU(W
, uint64_t, uint32_t, int64_t)
1547 #define VSSRANU(NAME, BIT, E1, E2, E3) \
1548 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
1551 VReg *Vd = (VReg *)vd; \
1552 VReg *Vj = (VReg *)vj; \
1553 VReg *Vk = (VReg *)vk; \
1554 int oprsz = simd_oprsz(desc); \
1556 ofs = LSX_LEN / BIT; \
1557 for (i = 0; i < oprsz / 16; i++) { \
1558 for (j = 0; j < ofs; j++) { \
1559 Vd->E1(j + ofs * 2 * i) = do_ssranu_ ## E1(Vj->E2(j + ofs * i), \
1560 Vk->E3(j + ofs * i) % BIT, \
1563 Vd->D(2 * i + 1) = 0; \
1567 VSSRANU(vssran_bu_h
, 16, B
, H
, UH
)
1568 VSSRANU(vssran_hu_w
, 32, H
, W
, UW
)
1569 VSSRANU(vssran_wu_d
, 64, W
, D
, UD
)
1571 #define VSSRLNI(NAME, BIT, E1, E2) \
1572 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
1576 VReg *Vd = (VReg *)vd; \
1577 VReg *Vj = (VReg *)vj; \
1578 int oprsz = simd_oprsz(desc); \
1580 ofs = LSX_LEN / BIT; \
1581 for (i = 0; i < oprsz / 16; i++) { \
1582 for (j = 0; j < ofs; j++) { \
1583 temp.E1(j + ofs * 2 * i) = do_ssrlns_ ## E1(Vj->E2(j + ofs * i), \
1584 imm, BIT / 2 - 1); \
1585 temp.E1(j + ofs * (2 * i + 1)) = do_ssrlns_ ## E1(Vd->E2(j + ofs * i), \
1586 imm, BIT / 2 - 1); \
1592 static void do_vssrlni_q(VReg
*Vd
, VReg
*Vj
,
1593 uint64_t imm
, int idx
, Int128 mask
)
1595 Int128 shft_res1
, shft_res2
;
1598 shft_res1
= Vj
->Q(idx
);
1599 shft_res2
= Vd
->Q(idx
);
1601 shft_res1
= int128_urshift(Vj
->Q(idx
), imm
);
1602 shft_res2
= int128_urshift(Vd
->Q(idx
), imm
);
1605 if (int128_ult(mask
, shft_res1
)) {
1606 Vd
->D(idx
* 2) = int128_getlo(mask
);
1608 Vd
->D(idx
* 2) = int128_getlo(shft_res1
);
1611 if (int128_ult(mask
, shft_res2
)) {
1612 Vd
->D(idx
* 2 + 1) = int128_getlo(mask
);
1614 Vd
->D(idx
* 2 + 1) = int128_getlo(shft_res2
);
1618 void HELPER(vssrlni_d_q
)(void *vd
, void *vj
, uint64_t imm
, uint32_t desc
)
1622 VReg
*Vd
= (VReg
*)vd
;
1623 VReg
*Vj
= (VReg
*)vj
;
1624 int oprsz
= simd_oprsz(desc
);
1626 mask
= int128_sub(int128_lshift(int128_one(), 63), int128_one());
1628 for (i
= 0; i
< oprsz
/ 16; i
++) {
1629 do_vssrlni_q(Vd
, Vj
, imm
, i
, mask
);
1633 VSSRLNI(vssrlni_b_h
, 16, B
, H
)
1634 VSSRLNI(vssrlni_h_w
, 32, H
, W
)
1635 VSSRLNI(vssrlni_w_d
, 64, W
, D
)
1637 #define VSSRANI(NAME, BIT, E1, E2) \
1638 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
1642 VReg *Vd = (VReg *)vd; \
1643 VReg *Vj = (VReg *)vj; \
1644 int oprsz = simd_oprsz(desc); \
1646 ofs = LSX_LEN / BIT; \
1647 for (i = 0; i < oprsz / 16; i++) { \
1648 for (j = 0; j < ofs; j++) { \
1649 temp.E1(j + ofs * 2 * i) = do_ssrans_ ## E1(Vj->E2(j + ofs * i), \
1650 imm, BIT / 2 - 1); \
1651 temp.E1(j + ofs * (2 * i + 1)) = do_ssrans_ ## E1(Vd->E2(j + ofs * i), \
1652 imm, BIT / 2 - 1); \
1658 static void do_vssrani_d_q(VReg
*Vd
, VReg
*Vj
,
1659 uint64_t imm
, int idx
, Int128 mask
, Int128 min
)
1661 Int128 shft_res1
, shft_res2
;
1664 shft_res1
= Vj
->Q(idx
);
1665 shft_res2
= Vd
->Q(idx
);
1667 shft_res1
= int128_rshift(Vj
->Q(idx
), imm
);
1668 shft_res2
= int128_rshift(Vd
->Q(idx
), imm
);
1671 if (int128_gt(shft_res1
, mask
)) {
1672 Vd
->D(idx
* 2) = int128_getlo(mask
);
1673 } else if (int128_lt(shft_res1
, int128_neg(min
))) {
1674 Vd
->D(idx
* 2) = int128_getlo(min
);
1676 Vd
->D(idx
* 2) = int128_getlo(shft_res1
);
1679 if (int128_gt(shft_res2
, mask
)) {
1680 Vd
->D(idx
* 2 + 1) = int128_getlo(mask
);
1681 } else if (int128_lt(shft_res2
, int128_neg(min
))) {
1682 Vd
->D(idx
* 2 + 1) = int128_getlo(min
);
1684 Vd
->D(idx
* 2 + 1) = int128_getlo(shft_res2
);
1688 void HELPER(vssrani_d_q
)(void *vd
, void *vj
, uint64_t imm
, uint32_t desc
)
1692 VReg
*Vd
= (VReg
*)vd
;
1693 VReg
*Vj
= (VReg
*)vj
;
1694 int oprsz
= simd_oprsz(desc
);
1696 mask
= int128_sub(int128_lshift(int128_one(), 63), int128_one());
1697 min
= int128_lshift(int128_one(), 63);
1699 for (i
= 0; i
< oprsz
/ 16; i
++) {
1700 do_vssrani_d_q(Vd
, Vj
, imm
, i
, mask
, min
);
1705 VSSRANI(vssrani_b_h
, 16, B
, H
)
1706 VSSRANI(vssrani_h_w
, 32, H
, W
)
1707 VSSRANI(vssrani_w_d
, 64, W
, D
)
1709 #define VSSRLNUI(NAME, BIT, E1, E2) \
1710 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
1714 VReg *Vd = (VReg *)vd; \
1715 VReg *Vj = (VReg *)vj; \
1716 int oprsz = simd_oprsz(desc); \
1718 ofs = LSX_LEN / BIT; \
1719 for (i = 0; i < oprsz / 16; i++) { \
1720 for (j = 0; j < ofs; j++) { \
1721 temp.E1(j + ofs * 2 * i) = do_ssrlnu_ ## E1(Vj->E2(j + ofs * i), \
1723 temp.E1(j + ofs * (2 * i + 1)) = do_ssrlnu_ ## E1(Vd->E2(j + ofs * i), \
1730 void HELPER(vssrlni_du_q
)(void *vd
, void *vj
, uint64_t imm
, uint32_t desc
)
1734 VReg
*Vd
= (VReg
*)vd
;
1735 VReg
*Vj
= (VReg
*)vj
;
1736 int oprsz
= simd_oprsz(desc
);
1738 mask
= int128_sub(int128_lshift(int128_one(), 64), int128_one());
1740 for (i
= 0; i
< oprsz
/ 16; i
++) {
1741 do_vssrlni_q(Vd
, Vj
, imm
, i
, mask
);
1745 VSSRLNUI(vssrlni_bu_h
, 16, B
, H
)
1746 VSSRLNUI(vssrlni_hu_w
, 32, H
, W
)
1747 VSSRLNUI(vssrlni_wu_d
, 64, W
, D
)
1749 #define VSSRANUI(NAME, BIT, E1, E2) \
1750 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
1754 VReg *Vd = (VReg *)vd; \
1755 VReg *Vj = (VReg *)vj; \
1756 int oprsz = simd_oprsz(desc); \
1758 ofs = LSX_LEN / BIT; \
1759 for (i = 0; i < oprsz / 16; i++) { \
1760 for (j = 0; j < ofs; j++) { \
1761 temp.E1(j + ofs * 2 * i) = do_ssranu_ ## E1(Vj->E2(j + ofs * i), \
1763 temp.E1(j + ofs * (2 * i + 1)) = do_ssranu_ ## E1(Vd->E2(j + ofs * i), \
1770 static void do_vssrani_du_q(VReg
*Vd
, VReg
*Vj
,
1771 uint64_t imm
, int idx
, Int128 mask
)
1773 Int128 shft_res1
, shft_res2
;
1776 shft_res1
= Vj
->Q(idx
);
1777 shft_res2
= Vd
->Q(idx
);
1779 shft_res1
= int128_rshift(Vj
->Q(idx
), imm
);
1780 shft_res2
= int128_rshift(Vd
->Q(idx
), imm
);
1783 if (int128_lt(Vj
->Q(idx
), int128_zero())) {
1784 shft_res1
= int128_zero();
1787 if (int128_lt(Vd
->Q(idx
), int128_zero())) {
1788 shft_res2
= int128_zero();
1790 if (int128_ult(mask
, shft_res1
)) {
1791 Vd
->D(idx
* 2) = int128_getlo(mask
);
1793 Vd
->D(idx
* 2) = int128_getlo(shft_res1
);
1796 if (int128_ult(mask
, shft_res2
)) {
1797 Vd
->D(idx
* 2 + 1) = int128_getlo(mask
);
1799 Vd
->D(idx
* 2 + 1) = int128_getlo(shft_res2
);
1804 void HELPER(vssrani_du_q
)(void *vd
, void *vj
, uint64_t imm
, uint32_t desc
)
1808 VReg
*Vd
= (VReg
*)vd
;
1809 VReg
*Vj
= (VReg
*)vj
;
1810 int oprsz
= simd_oprsz(desc
);
1812 mask
= int128_sub(int128_lshift(int128_one(), 64), int128_one());
1814 for (i
= 0; i
< oprsz
/ 16; i
++) {
1815 do_vssrani_du_q(Vd
, Vj
, imm
, i
, mask
);
1819 VSSRANUI(vssrani_bu_h
, 16, B
, H
)
1820 VSSRANUI(vssrani_hu_w
, 32, H
, W
)
1821 VSSRANUI(vssrani_wu_d
, 64, W
, D
)
1823 #define SSRLRNS(E1, E2, T1, T2, T3) \
1824 static T1 do_ssrlrns_ ## E1(T2 e2, int sa, int sh) \
1828 shft_res = do_vsrlr_ ## E2(e2, sa); \
1830 mask = (1ull << sh) -1; \
1831 if (shft_res > mask) { \
1838 SSRLRNS(B
, H
, uint16_t, int16_t, uint8_t)
1839 SSRLRNS(H
, W
, uint32_t, int32_t, uint16_t)
1840 SSRLRNS(W
, D
, uint64_t, int64_t, uint32_t)
1842 #define VSSRLRN(NAME, BIT, T, E1, E2) \
1843 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
1846 VReg *Vd = (VReg *)vd; \
1847 VReg *Vj = (VReg *)vj; \
1848 VReg *Vk = (VReg *)vk; \
1850 for (i = 0; i < LSX_LEN/BIT; i++) { \
1851 Vd->E1(i) = do_ssrlrns_ ## E1(Vj->E2(i), (T)Vk->E2(i)%BIT, BIT/2 -1); \
1856 VSSRLRN(vssrlrn_b_h
, 16, uint16_t, B
, H
)
1857 VSSRLRN(vssrlrn_h_w
, 32, uint32_t, H
, W
)
1858 VSSRLRN(vssrlrn_w_d
, 64, uint64_t, W
, D
)
1860 #define SSRARNS(E1, E2, T1, T2) \
1861 static T1 do_ssrarns_ ## E1(T1 e2, int sa, int sh) \
1865 shft_res = do_vsrar_ ## E2(e2, sa); \
1867 mask = (1ll << sh) -1; \
1868 if (shft_res > mask) { \
1870 } else if (shft_res < -(mask +1)) { \
1877 SSRARNS(B
, H
, int16_t, int8_t)
1878 SSRARNS(H
, W
, int32_t, int16_t)
1879 SSRARNS(W
, D
, int64_t, int32_t)
1881 #define VSSRARN(NAME, BIT, T, E1, E2) \
1882 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
1885 VReg *Vd = (VReg *)vd; \
1886 VReg *Vj = (VReg *)vj; \
1887 VReg *Vk = (VReg *)vk; \
1889 for (i = 0; i < LSX_LEN/BIT; i++) { \
1890 Vd->E1(i) = do_ssrarns_ ## E1(Vj->E2(i), (T)Vk->E2(i)%BIT, BIT/2 -1); \
1895 VSSRARN(vssrarn_b_h
, 16, uint16_t, B
, H
)
1896 VSSRARN(vssrarn_h_w
, 32, uint32_t, H
, W
)
1897 VSSRARN(vssrarn_w_d
, 64, uint64_t, W
, D
)
1899 #define SSRLRNU(E1, E2, T1, T2, T3) \
1900 static T1 do_ssrlrnu_ ## E1(T3 e2, int sa, int sh) \
1904 shft_res = do_vsrlr_ ## E2(e2, sa); \
1907 mask = (1ull << sh) -1; \
1908 if (shft_res > mask) { \
1915 SSRLRNU(B
, H
, uint16_t, uint8_t, int16_t)
1916 SSRLRNU(H
, W
, uint32_t, uint16_t, int32_t)
1917 SSRLRNU(W
, D
, uint64_t, uint32_t, int64_t)
1919 #define VSSRLRNU(NAME, BIT, T, E1, E2) \
1920 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
1923 VReg *Vd = (VReg *)vd; \
1924 VReg *Vj = (VReg *)vj; \
1925 VReg *Vk = (VReg *)vk; \
1927 for (i = 0; i < LSX_LEN/BIT; i++) { \
1928 Vd->E1(i) = do_ssrlrnu_ ## E1(Vj->E2(i), (T)Vk->E2(i)%BIT, BIT/2); \
1933 VSSRLRNU(vssrlrn_bu_h
, 16, uint16_t, B
, H
)
1934 VSSRLRNU(vssrlrn_hu_w
, 32, uint32_t, H
, W
)
1935 VSSRLRNU(vssrlrn_wu_d
, 64, uint64_t, W
, D
)
1937 #define SSRARNU(E1, E2, T1, T2, T3) \
1938 static T1 do_ssrarnu_ ## E1(T3 e2, int sa, int sh) \
1945 shft_res = do_vsrar_ ## E2(e2, sa); \
1948 mask = (1ull << sh) -1; \
1949 if (shft_res > mask) { \
1956 SSRARNU(B
, H
, uint16_t, uint8_t, int16_t)
1957 SSRARNU(H
, W
, uint32_t, uint16_t, int32_t)
1958 SSRARNU(W
, D
, uint64_t, uint32_t, int64_t)
1960 #define VSSRARNU(NAME, BIT, T, E1, E2) \
1961 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
1964 VReg *Vd = (VReg *)vd; \
1965 VReg *Vj = (VReg *)vj; \
1966 VReg *Vk = (VReg *)vk; \
1968 for (i = 0; i < LSX_LEN/BIT; i++) { \
1969 Vd->E1(i) = do_ssrarnu_ ## E1(Vj->E2(i), (T)Vk->E2(i)%BIT, BIT/2); \
1974 VSSRARNU(vssrarn_bu_h
, 16, uint16_t, B
, H
)
1975 VSSRARNU(vssrarn_hu_w
, 32, uint32_t, H
, W
)
1976 VSSRARNU(vssrarn_wu_d
, 64, uint64_t, W
, D
)
1978 #define VSSRLRNI(NAME, BIT, E1, E2) \
1979 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
1983 VReg *Vd = (VReg *)vd; \
1984 VReg *Vj = (VReg *)vj; \
1986 for (i = 0; i < LSX_LEN/BIT; i++) { \
1987 temp.E1(i) = do_ssrlrns_ ## E1(Vj->E2(i), imm, BIT/2 -1); \
1988 temp.E1(i + LSX_LEN/BIT) = do_ssrlrns_ ## E1(Vd->E2(i), imm, BIT/2 -1);\
1993 #define VSSRLRNI_Q(NAME, sh) \
1994 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
1996 Int128 shft_res1, shft_res2, mask, r1, r2; \
1997 VReg *Vd = (VReg *)vd; \
1998 VReg *Vj = (VReg *)vj; \
2001 shft_res1 = Vj->Q(0); \
2002 shft_res2 = Vd->Q(0); \
2004 r1 = int128_and(int128_urshift(Vj->Q(0), (imm -1)), int128_one()); \
2005 r2 = int128_and(int128_urshift(Vd->Q(0), (imm -1)), int128_one()); \
2007 shft_res1 = (int128_add(int128_urshift(Vj->Q(0), imm), r1)); \
2008 shft_res2 = (int128_add(int128_urshift(Vd->Q(0), imm), r2)); \
2011 mask = int128_sub(int128_lshift(int128_one(), sh), int128_one()); \
2013 if (int128_ult(mask, shft_res1)) { \
2014 Vd->D(0) = int128_getlo(mask); \
2016 Vd->D(0) = int128_getlo(shft_res1); \
2019 if (int128_ult(mask, shft_res2)) { \
2020 Vd->D(1) = int128_getlo(mask); \
2022 Vd->D(1) = int128_getlo(shft_res2); \
2026 VSSRLRNI(vssrlrni_b_h
, 16, B
, H
)
2027 VSSRLRNI(vssrlrni_h_w
, 32, H
, W
)
2028 VSSRLRNI(vssrlrni_w_d
, 64, W
, D
)
2029 VSSRLRNI_Q(vssrlrni_d_q
, 63)
2031 #define VSSRARNI(NAME, BIT, E1, E2) \
2032 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
2036 VReg *Vd = (VReg *)vd; \
2037 VReg *Vj = (VReg *)vj; \
2039 for (i = 0; i < LSX_LEN/BIT; i++) { \
2040 temp.E1(i) = do_ssrarns_ ## E1(Vj->E2(i), imm, BIT/2 -1); \
2041 temp.E1(i + LSX_LEN/BIT) = do_ssrarns_ ## E1(Vd->E2(i), imm, BIT/2 -1); \
2046 void HELPER(vssrarni_d_q
)(void *vd
, void *vj
, uint64_t imm
, uint32_t desc
)
2048 Int128 shft_res1
, shft_res2
, mask1
, mask2
, r1
, r2
;
2049 VReg
*Vd
= (VReg
*)vd
;
2050 VReg
*Vj
= (VReg
*)vj
;
2053 shft_res1
= Vj
->Q(0);
2054 shft_res2
= Vd
->Q(0);
2056 r1
= int128_and(int128_rshift(Vj
->Q(0), (imm
-1)), int128_one());
2057 r2
= int128_and(int128_rshift(Vd
->Q(0), (imm
-1)), int128_one());
2059 shft_res1
= int128_add(int128_rshift(Vj
->Q(0), imm
), r1
);
2060 shft_res2
= int128_add(int128_rshift(Vd
->Q(0), imm
), r2
);
2063 mask1
= int128_sub(int128_lshift(int128_one(), 63), int128_one());
2064 mask2
= int128_lshift(int128_one(), 63);
2066 if (int128_gt(shft_res1
, mask1
)) {
2067 Vd
->D(0) = int128_getlo(mask1
);
2068 } else if (int128_lt(shft_res1
, int128_neg(mask2
))) {
2069 Vd
->D(0) = int128_getlo(mask2
);
2071 Vd
->D(0) = int128_getlo(shft_res1
);
2074 if (int128_gt(shft_res2
, mask1
)) {
2075 Vd
->D(1) = int128_getlo(mask1
);
2076 } else if (int128_lt(shft_res2
, int128_neg(mask2
))) {
2077 Vd
->D(1) = int128_getlo(mask2
);
2079 Vd
->D(1) = int128_getlo(shft_res2
);
2083 VSSRARNI(vssrarni_b_h
, 16, B
, H
)
2084 VSSRARNI(vssrarni_h_w
, 32, H
, W
)
2085 VSSRARNI(vssrarni_w_d
, 64, W
, D
)
2087 #define VSSRLRNUI(NAME, BIT, E1, E2) \
2088 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
2092 VReg *Vd = (VReg *)vd; \
2093 VReg *Vj = (VReg *)vj; \
2095 for (i = 0; i < LSX_LEN/BIT; i++) { \
2096 temp.E1(i) = do_ssrlrnu_ ## E1(Vj->E2(i), imm, BIT/2); \
2097 temp.E1(i + LSX_LEN/BIT) = do_ssrlrnu_ ## E1(Vd->E2(i), imm, BIT/2); \
2102 VSSRLRNUI(vssrlrni_bu_h
, 16, B
, H
)
2103 VSSRLRNUI(vssrlrni_hu_w
, 32, H
, W
)
2104 VSSRLRNUI(vssrlrni_wu_d
, 64, W
, D
)
2105 VSSRLRNI_Q(vssrlrni_du_q
, 64)
2107 #define VSSRARNUI(NAME, BIT, E1, E2) \
2108 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
2112 VReg *Vd = (VReg *)vd; \
2113 VReg *Vj = (VReg *)vj; \
2115 for (i = 0; i < LSX_LEN/BIT; i++) { \
2116 temp.E1(i) = do_ssrarnu_ ## E1(Vj->E2(i), imm, BIT/2); \
2117 temp.E1(i + LSX_LEN/BIT) = do_ssrarnu_ ## E1(Vd->E2(i), imm, BIT/2); \
2122 void HELPER(vssrarni_du_q
)(void *vd
, void *vj
, uint64_t imm
, uint32_t desc
)
2124 Int128 shft_res1
, shft_res2
, mask1
, mask2
, r1
, r2
;
2125 VReg
*Vd
= (VReg
*)vd
;
2126 VReg
*Vj
= (VReg
*)vj
;
2129 shft_res1
= Vj
->Q(0);
2130 shft_res2
= Vd
->Q(0);
2132 r1
= int128_and(int128_rshift(Vj
->Q(0), (imm
-1)), int128_one());
2133 r2
= int128_and(int128_rshift(Vd
->Q(0), (imm
-1)), int128_one());
2135 shft_res1
= int128_add(int128_rshift(Vj
->Q(0), imm
), r1
);
2136 shft_res2
= int128_add(int128_rshift(Vd
->Q(0), imm
), r2
);
2139 if (int128_lt(Vj
->Q(0), int128_zero())) {
2140 shft_res1
= int128_zero();
2142 if (int128_lt(Vd
->Q(0), int128_zero())) {
2143 shft_res2
= int128_zero();
2146 mask1
= int128_sub(int128_lshift(int128_one(), 64), int128_one());
2147 mask2
= int128_lshift(int128_one(), 64);
2149 if (int128_gt(shft_res1
, mask1
)) {
2150 Vd
->D(0) = int128_getlo(mask1
);
2151 } else if (int128_lt(shft_res1
, int128_neg(mask2
))) {
2152 Vd
->D(0) = int128_getlo(mask2
);
2154 Vd
->D(0) = int128_getlo(shft_res1
);
2157 if (int128_gt(shft_res2
, mask1
)) {
2158 Vd
->D(1) = int128_getlo(mask1
);
2159 } else if (int128_lt(shft_res2
, int128_neg(mask2
))) {
2160 Vd
->D(1) = int128_getlo(mask2
);
2162 Vd
->D(1) = int128_getlo(shft_res2
);
2166 VSSRARNUI(vssrarni_bu_h
, 16, B
, H
)
2167 VSSRARNUI(vssrarni_hu_w
, 32, H
, W
)
2168 VSSRARNUI(vssrarni_wu_d
, 64, W
, D
)
2170 #define DO_2OP(NAME, BIT, E, DO_OP) \
2171 void HELPER(NAME)(void *vd, void *vj, uint32_t desc) \
2174 VReg *Vd = (VReg *)vd; \
2175 VReg *Vj = (VReg *)vj; \
2177 for (i = 0; i < LSX_LEN/BIT; i++) \
2179 Vd->E(i) = DO_OP(Vj->E(i)); \
2183 #define DO_CLO_B(N) (clz32(~N & 0xff) - 24)
2184 #define DO_CLO_H(N) (clz32(~N & 0xffff) - 16)
2185 #define DO_CLO_W(N) (clz32(~N))
2186 #define DO_CLO_D(N) (clz64(~N))
2187 #define DO_CLZ_B(N) (clz32(N) - 24)
2188 #define DO_CLZ_H(N) (clz32(N) - 16)
2189 #define DO_CLZ_W(N) (clz32(N))
2190 #define DO_CLZ_D(N) (clz64(N))
2192 DO_2OP(vclo_b
, 8, UB
, DO_CLO_B
)
2193 DO_2OP(vclo_h
, 16, UH
, DO_CLO_H
)
2194 DO_2OP(vclo_w
, 32, UW
, DO_CLO_W
)
2195 DO_2OP(vclo_d
, 64, UD
, DO_CLO_D
)
2196 DO_2OP(vclz_b
, 8, UB
, DO_CLZ_B
)
2197 DO_2OP(vclz_h
, 16, UH
, DO_CLZ_H
)
2198 DO_2OP(vclz_w
, 32, UW
, DO_CLZ_W
)
2199 DO_2OP(vclz_d
, 64, UD
, DO_CLZ_D
)
2201 #define VPCNT(NAME, BIT, E, FN) \
2202 void HELPER(NAME)(void *vd, void *vj, uint32_t desc) \
2205 VReg *Vd = (VReg *)vd; \
2206 VReg *Vj = (VReg *)vj; \
2208 for (i = 0; i < LSX_LEN/BIT; i++) \
2210 Vd->E(i) = FN(Vj->E(i)); \
2214 VPCNT(vpcnt_b
, 8, UB
, ctpop8
)
2215 VPCNT(vpcnt_h
, 16, UH
, ctpop16
)
2216 VPCNT(vpcnt_w
, 32, UW
, ctpop32
)
2217 VPCNT(vpcnt_d
, 64, UD
, ctpop64
)
2219 #define DO_BITCLR(a, bit) (a & ~(1ull << bit))
2220 #define DO_BITSET(a, bit) (a | 1ull << bit)
2221 #define DO_BITREV(a, bit) (a ^ (1ull << bit))
2223 #define DO_BIT(NAME, BIT, E, DO_OP) \
2224 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t v) \
2227 VReg *Vd = (VReg *)vd; \
2228 VReg *Vj = (VReg *)vj; \
2229 VReg *Vk = (VReg *)vk; \
2231 for (i = 0; i < LSX_LEN/BIT; i++) { \
2232 Vd->E(i) = DO_OP(Vj->E(i), Vk->E(i)%BIT); \
2236 DO_BIT(vbitclr_b
, 8, UB
, DO_BITCLR
)
2237 DO_BIT(vbitclr_h
, 16, UH
, DO_BITCLR
)
2238 DO_BIT(vbitclr_w
, 32, UW
, DO_BITCLR
)
2239 DO_BIT(vbitclr_d
, 64, UD
, DO_BITCLR
)
2240 DO_BIT(vbitset_b
, 8, UB
, DO_BITSET
)
2241 DO_BIT(vbitset_h
, 16, UH
, DO_BITSET
)
2242 DO_BIT(vbitset_w
, 32, UW
, DO_BITSET
)
2243 DO_BIT(vbitset_d
, 64, UD
, DO_BITSET
)
2244 DO_BIT(vbitrev_b
, 8, UB
, DO_BITREV
)
2245 DO_BIT(vbitrev_h
, 16, UH
, DO_BITREV
)
2246 DO_BIT(vbitrev_w
, 32, UW
, DO_BITREV
)
2247 DO_BIT(vbitrev_d
, 64, UD
, DO_BITREV
)
2249 #define DO_BITI(NAME, BIT, E, DO_OP) \
2250 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t v) \
2253 VReg *Vd = (VReg *)vd; \
2254 VReg *Vj = (VReg *)vj; \
2256 for (i = 0; i < LSX_LEN/BIT; i++) { \
2257 Vd->E(i) = DO_OP(Vj->E(i), imm); \
2261 DO_BITI(vbitclri_b
, 8, UB
, DO_BITCLR
)
2262 DO_BITI(vbitclri_h
, 16, UH
, DO_BITCLR
)
2263 DO_BITI(vbitclri_w
, 32, UW
, DO_BITCLR
)
2264 DO_BITI(vbitclri_d
, 64, UD
, DO_BITCLR
)
2265 DO_BITI(vbitseti_b
, 8, UB
, DO_BITSET
)
2266 DO_BITI(vbitseti_h
, 16, UH
, DO_BITSET
)
2267 DO_BITI(vbitseti_w
, 32, UW
, DO_BITSET
)
2268 DO_BITI(vbitseti_d
, 64, UD
, DO_BITSET
)
2269 DO_BITI(vbitrevi_b
, 8, UB
, DO_BITREV
)
2270 DO_BITI(vbitrevi_h
, 16, UH
, DO_BITREV
)
2271 DO_BITI(vbitrevi_w
, 32, UW
, DO_BITREV
)
2272 DO_BITI(vbitrevi_d
, 64, UD
, DO_BITREV
)
2274 #define VFRSTP(NAME, BIT, MASK, E) \
2275 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
2278 VReg *Vd = (VReg *)vd; \
2279 VReg *Vj = (VReg *)vj; \
2280 VReg *Vk = (VReg *)vk; \
2282 for (i = 0; i < LSX_LEN/BIT; i++) { \
2283 if (Vj->E(i) < 0) { \
2287 m = Vk->E(0) & MASK; \
2291 VFRSTP(vfrstp_b
, 8, 0xf, B
)
2292 VFRSTP(vfrstp_h
, 16, 0x7, H
)
2294 #define VFRSTPI(NAME, BIT, E) \
2295 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
2298 VReg *Vd = (VReg *)vd; \
2299 VReg *Vj = (VReg *)vj; \
2301 for (i = 0; i < LSX_LEN/BIT; i++) { \
2302 if (Vj->E(i) < 0) { \
2306 m = imm % (LSX_LEN/BIT); \
2310 VFRSTPI(vfrstpi_b
, 8, B
)
2311 VFRSTPI(vfrstpi_h
, 16, H
)
2313 static void vec_update_fcsr0_mask(CPULoongArchState
*env
,
2314 uintptr_t pc
, int mask
)
2316 int flags
= get_float_exception_flags(&env
->fp_status
);
2318 set_float_exception_flags(0, &env
->fp_status
);
2323 flags
= ieee_ex_to_loongarch(flags
);
2324 UPDATE_FP_CAUSE(env
->fcsr0
, flags
);
2327 if (GET_FP_ENABLES(env
->fcsr0
) & flags
) {
2328 do_raise_exception(env
, EXCCODE_FPE
, pc
);
2330 UPDATE_FP_FLAGS(env
->fcsr0
, flags
);
2334 static void vec_update_fcsr0(CPULoongArchState
*env
, uintptr_t pc
)
2336 vec_update_fcsr0_mask(env
, pc
, 0);
2339 static inline void vec_clear_cause(CPULoongArchState
*env
)
2341 SET_FP_CAUSE(env
->fcsr0
, 0);
2344 #define DO_3OP_F(NAME, BIT, E, FN) \
2345 void HELPER(NAME)(void *vd, void *vj, void *vk, \
2346 CPULoongArchState *env, uint32_t desc) \
2349 VReg *Vd = (VReg *)vd; \
2350 VReg *Vj = (VReg *)vj; \
2351 VReg *Vk = (VReg *)vk; \
2353 vec_clear_cause(env); \
2354 for (i = 0; i < LSX_LEN/BIT; i++) { \
2355 Vd->E(i) = FN(Vj->E(i), Vk->E(i), &env->fp_status); \
2356 vec_update_fcsr0(env, GETPC()); \
2360 DO_3OP_F(vfadd_s
, 32, UW
, float32_add
)
2361 DO_3OP_F(vfadd_d
, 64, UD
, float64_add
)
2362 DO_3OP_F(vfsub_s
, 32, UW
, float32_sub
)
2363 DO_3OP_F(vfsub_d
, 64, UD
, float64_sub
)
2364 DO_3OP_F(vfmul_s
, 32, UW
, float32_mul
)
2365 DO_3OP_F(vfmul_d
, 64, UD
, float64_mul
)
2366 DO_3OP_F(vfdiv_s
, 32, UW
, float32_div
)
2367 DO_3OP_F(vfdiv_d
, 64, UD
, float64_div
)
2368 DO_3OP_F(vfmax_s
, 32, UW
, float32_maxnum
)
2369 DO_3OP_F(vfmax_d
, 64, UD
, float64_maxnum
)
2370 DO_3OP_F(vfmin_s
, 32, UW
, float32_minnum
)
2371 DO_3OP_F(vfmin_d
, 64, UD
, float64_minnum
)
2372 DO_3OP_F(vfmaxa_s
, 32, UW
, float32_maxnummag
)
2373 DO_3OP_F(vfmaxa_d
, 64, UD
, float64_maxnummag
)
2374 DO_3OP_F(vfmina_s
, 32, UW
, float32_minnummag
)
2375 DO_3OP_F(vfmina_d
, 64, UD
, float64_minnummag
)
2377 #define DO_4OP_F(NAME, BIT, E, FN, flags) \
2378 void HELPER(NAME)(void *vd, void *vj, void *vk, void *va, \
2379 CPULoongArchState *env, uint32_t desc) \
2382 VReg *Vd = (VReg *)vd; \
2383 VReg *Vj = (VReg *)vj; \
2384 VReg *Vk = (VReg *)vk; \
2385 VReg *Va = (VReg *)va; \
2387 vec_clear_cause(env); \
2388 for (i = 0; i < LSX_LEN/BIT; i++) { \
2389 Vd->E(i) = FN(Vj->E(i), Vk->E(i), Va->E(i), flags, &env->fp_status); \
2390 vec_update_fcsr0(env, GETPC()); \
2394 DO_4OP_F(vfmadd_s
, 32, UW
, float32_muladd
, 0)
2395 DO_4OP_F(vfmadd_d
, 64, UD
, float64_muladd
, 0)
2396 DO_4OP_F(vfmsub_s
, 32, UW
, float32_muladd
, float_muladd_negate_c
)
2397 DO_4OP_F(vfmsub_d
, 64, UD
, float64_muladd
, float_muladd_negate_c
)
2398 DO_4OP_F(vfnmadd_s
, 32, UW
, float32_muladd
, float_muladd_negate_result
)
2399 DO_4OP_F(vfnmadd_d
, 64, UD
, float64_muladd
, float_muladd_negate_result
)
2400 DO_4OP_F(vfnmsub_s
, 32, UW
, float32_muladd
,
2401 float_muladd_negate_c
| float_muladd_negate_result
)
2402 DO_4OP_F(vfnmsub_d
, 64, UD
, float64_muladd
,
2403 float_muladd_negate_c
| float_muladd_negate_result
)
2405 #define DO_2OP_F(NAME, BIT, E, FN) \
2406 void HELPER(NAME)(void *vd, void *vj, \
2407 CPULoongArchState *env, uint32_t desc) \
2410 VReg *Vd = (VReg *)vd; \
2411 VReg *Vj = (VReg *)vj; \
2413 vec_clear_cause(env); \
2414 for (i = 0; i < LSX_LEN/BIT; i++) { \
2415 Vd->E(i) = FN(env, Vj->E(i)); \
2419 #define FLOGB(BIT, T) \
2420 static T do_flogb_## BIT(CPULoongArchState *env, T fj) \
2423 float_status *status = &env->fp_status; \
2424 FloatRoundMode old_mode = get_float_rounding_mode(status); \
2426 set_float_rounding_mode(float_round_down, status); \
2427 fp = float ## BIT ##_log2(fj, status); \
2428 fd = float ## BIT ##_round_to_int(fp, status); \
2429 set_float_rounding_mode(old_mode, status); \
2430 vec_update_fcsr0_mask(env, GETPC(), float_flag_inexact); \
2437 #define FCLASS(NAME, BIT, E, FN) \
2438 void HELPER(NAME)(void *vd, void *vj, \
2439 CPULoongArchState *env, uint32_t desc) \
2442 VReg *Vd = (VReg *)vd; \
2443 VReg *Vj = (VReg *)vj; \
2445 for (i = 0; i < LSX_LEN/BIT; i++) { \
2446 Vd->E(i) = FN(env, Vj->E(i)); \
2450 FCLASS(vfclass_s
, 32, UW
, helper_fclass_s
)
2451 FCLASS(vfclass_d
, 64, UD
, helper_fclass_d
)
2453 #define FSQRT(BIT, T) \
2454 static T do_fsqrt_## BIT(CPULoongArchState *env, T fj) \
2457 fd = float ## BIT ##_sqrt(fj, &env->fp_status); \
2458 vec_update_fcsr0(env, GETPC()); \
2465 #define FRECIP(BIT, T) \
2466 static T do_frecip_## BIT(CPULoongArchState *env, T fj) \
2469 fd = float ## BIT ##_div(float ## BIT ##_one, fj, &env->fp_status); \
2470 vec_update_fcsr0(env, GETPC()); \
2474 FRECIP(32, uint32_t)
2475 FRECIP(64, uint64_t)
2477 #define FRSQRT(BIT, T) \
2478 static T do_frsqrt_## BIT(CPULoongArchState *env, T fj) \
2481 fp = float ## BIT ##_sqrt(fj, &env->fp_status); \
2482 fd = float ## BIT ##_div(float ## BIT ##_one, fp, &env->fp_status); \
2483 vec_update_fcsr0(env, GETPC()); \
2487 FRSQRT(32, uint32_t)
2488 FRSQRT(64, uint64_t)
2490 DO_2OP_F(vflogb_s
, 32, UW
, do_flogb_32
)
2491 DO_2OP_F(vflogb_d
, 64, UD
, do_flogb_64
)
2492 DO_2OP_F(vfsqrt_s
, 32, UW
, do_fsqrt_32
)
2493 DO_2OP_F(vfsqrt_d
, 64, UD
, do_fsqrt_64
)
2494 DO_2OP_F(vfrecip_s
, 32, UW
, do_frecip_32
)
2495 DO_2OP_F(vfrecip_d
, 64, UD
, do_frecip_64
)
2496 DO_2OP_F(vfrsqrt_s
, 32, UW
, do_frsqrt_32
)
2497 DO_2OP_F(vfrsqrt_d
, 64, UD
, do_frsqrt_64
)
2499 static uint32_t float16_cvt_float32(uint16_t h
, float_status
*status
)
2501 return float16_to_float32(h
, true, status
);
2503 static uint64_t float32_cvt_float64(uint32_t s
, float_status
*status
)
2505 return float32_to_float64(s
, status
);
2508 static uint16_t float32_cvt_float16(uint32_t s
, float_status
*status
)
2510 return float32_to_float16(s
, true, status
);
2512 static uint32_t float64_cvt_float32(uint64_t d
, float_status
*status
)
2514 return float64_to_float32(d
, status
);
2517 void HELPER(vfcvtl_s_h
)(void *vd
, void *vj
,
2518 CPULoongArchState
*env
, uint32_t desc
)
2522 VReg
*Vd
= (VReg
*)vd
;
2523 VReg
*Vj
= (VReg
*)vj
;
2525 vec_clear_cause(env
);
2526 for (i
= 0; i
< LSX_LEN
/32; i
++) {
2527 temp
.UW(i
) = float16_cvt_float32(Vj
->UH(i
), &env
->fp_status
);
2528 vec_update_fcsr0(env
, GETPC());
2533 void HELPER(vfcvtl_d_s
)(void *vd
, void *vj
,
2534 CPULoongArchState
*env
, uint32_t desc
)
2538 VReg
*Vd
= (VReg
*)vd
;
2539 VReg
*Vj
= (VReg
*)vj
;
2541 vec_clear_cause(env
);
2542 for (i
= 0; i
< LSX_LEN
/64; i
++) {
2543 temp
.UD(i
) = float32_cvt_float64(Vj
->UW(i
), &env
->fp_status
);
2544 vec_update_fcsr0(env
, GETPC());
2549 void HELPER(vfcvth_s_h
)(void *vd
, void *vj
,
2550 CPULoongArchState
*env
, uint32_t desc
)
2554 VReg
*Vd
= (VReg
*)vd
;
2555 VReg
*Vj
= (VReg
*)vj
;
2557 vec_clear_cause(env
);
2558 for (i
= 0; i
< LSX_LEN
/32; i
++) {
2559 temp
.UW(i
) = float16_cvt_float32(Vj
->UH(i
+ 4), &env
->fp_status
);
2560 vec_update_fcsr0(env
, GETPC());
2565 void HELPER(vfcvth_d_s
)(void *vd
, void *vj
,
2566 CPULoongArchState
*env
, uint32_t desc
)
2570 VReg
*Vd
= (VReg
*)vd
;
2571 VReg
*Vj
= (VReg
*)vj
;
2573 vec_clear_cause(env
);
2574 for (i
= 0; i
< LSX_LEN
/64; i
++) {
2575 temp
.UD(i
) = float32_cvt_float64(Vj
->UW(i
+ 2), &env
->fp_status
);
2576 vec_update_fcsr0(env
, GETPC());
2581 void HELPER(vfcvt_h_s
)(void *vd
, void *vj
, void *vk
,
2582 CPULoongArchState
*env
, uint32_t desc
)
2586 VReg
*Vd
= (VReg
*)vd
;
2587 VReg
*Vj
= (VReg
*)vj
;
2588 VReg
*Vk
= (VReg
*)vk
;
2590 vec_clear_cause(env
);
2591 for(i
= 0; i
< LSX_LEN
/32; i
++) {
2592 temp
.UH(i
+ 4) = float32_cvt_float16(Vj
->UW(i
), &env
->fp_status
);
2593 temp
.UH(i
) = float32_cvt_float16(Vk
->UW(i
), &env
->fp_status
);
2594 vec_update_fcsr0(env
, GETPC());
2599 void HELPER(vfcvt_s_d
)(void *vd
, void *vj
, void *vk
,
2600 CPULoongArchState
*env
, uint32_t desc
)
2604 VReg
*Vd
= (VReg
*)vd
;
2605 VReg
*Vj
= (VReg
*)vj
;
2606 VReg
*Vk
= (VReg
*)vk
;
2608 vec_clear_cause(env
);
2609 for(i
= 0; i
< LSX_LEN
/64; i
++) {
2610 temp
.UW(i
+ 2) = float64_cvt_float32(Vj
->UD(i
), &env
->fp_status
);
2611 temp
.UW(i
) = float64_cvt_float32(Vk
->UD(i
), &env
->fp_status
);
2612 vec_update_fcsr0(env
, GETPC());
2617 void HELPER(vfrint_s
)(void *vd
, void *vj
,
2618 CPULoongArchState
*env
, uint32_t desc
)
2621 VReg
*Vd
= (VReg
*)vd
;
2622 VReg
*Vj
= (VReg
*)vj
;
2624 vec_clear_cause(env
);
2625 for (i
= 0; i
< 4; i
++) {
2626 Vd
->W(i
) = float32_round_to_int(Vj
->UW(i
), &env
->fp_status
);
2627 vec_update_fcsr0(env
, GETPC());
2631 void HELPER(vfrint_d
)(void *vd
, void *vj
,
2632 CPULoongArchState
*env
, uint32_t desc
)
2635 VReg
*Vd
= (VReg
*)vd
;
2636 VReg
*Vj
= (VReg
*)vj
;
2638 vec_clear_cause(env
);
2639 for (i
= 0; i
< 2; i
++) {
2640 Vd
->D(i
) = float64_round_to_int(Vj
->UD(i
), &env
->fp_status
);
2641 vec_update_fcsr0(env
, GETPC());
2645 #define FCVT_2OP(NAME, BIT, E, MODE) \
2646 void HELPER(NAME)(void *vd, void *vj, \
2647 CPULoongArchState *env, uint32_t desc) \
2650 VReg *Vd = (VReg *)vd; \
2651 VReg *Vj = (VReg *)vj; \
2653 vec_clear_cause(env); \
2654 for (i = 0; i < LSX_LEN/BIT; i++) { \
2655 FloatRoundMode old_mode = get_float_rounding_mode(&env->fp_status); \
2656 set_float_rounding_mode(MODE, &env->fp_status); \
2657 Vd->E(i) = float## BIT ## _round_to_int(Vj->E(i), &env->fp_status); \
2658 set_float_rounding_mode(old_mode, &env->fp_status); \
2659 vec_update_fcsr0(env, GETPC()); \
2663 FCVT_2OP(vfrintrne_s
, 32, UW
, float_round_nearest_even
)
2664 FCVT_2OP(vfrintrne_d
, 64, UD
, float_round_nearest_even
)
2665 FCVT_2OP(vfrintrz_s
, 32, UW
, float_round_to_zero
)
2666 FCVT_2OP(vfrintrz_d
, 64, UD
, float_round_to_zero
)
2667 FCVT_2OP(vfrintrp_s
, 32, UW
, float_round_up
)
2668 FCVT_2OP(vfrintrp_d
, 64, UD
, float_round_up
)
2669 FCVT_2OP(vfrintrm_s
, 32, UW
, float_round_down
)
2670 FCVT_2OP(vfrintrm_d
, 64, UD
, float_round_down
)
2672 #define FTINT(NAME, FMT1, FMT2, T1, T2, MODE) \
2673 static T2 do_ftint ## NAME(CPULoongArchState *env, T1 fj) \
2676 FloatRoundMode old_mode = get_float_rounding_mode(&env->fp_status); \
2678 set_float_rounding_mode(MODE, &env->fp_status); \
2679 fd = do_## FMT1 ##_to_## FMT2(env, fj); \
2680 set_float_rounding_mode(old_mode, &env->fp_status); \
2684 #define DO_FTINT(FMT1, FMT2, T1, T2) \
2685 static T2 do_## FMT1 ##_to_## FMT2(CPULoongArchState *env, T1 fj) \
2689 fd = FMT1 ##_to_## FMT2(fj, &env->fp_status); \
2690 if (get_float_exception_flags(&env->fp_status) & (float_flag_invalid)) { \
2691 if (FMT1 ##_is_any_nan(fj)) { \
2695 vec_update_fcsr0(env, GETPC()); \
2699 DO_FTINT(float32
, int32
, uint32_t, uint32_t)
2700 DO_FTINT(float64
, int64
, uint64_t, uint64_t)
2701 DO_FTINT(float32
, uint32
, uint32_t, uint32_t)
2702 DO_FTINT(float64
, uint64
, uint64_t, uint64_t)
2703 DO_FTINT(float64
, int32
, uint64_t, uint32_t)
2704 DO_FTINT(float32
, int64
, uint32_t, uint64_t)
2706 FTINT(rne_w_s
, float32
, int32
, uint32_t, uint32_t, float_round_nearest_even
)
2707 FTINT(rne_l_d
, float64
, int64
, uint64_t, uint64_t, float_round_nearest_even
)
2708 FTINT(rp_w_s
, float32
, int32
, uint32_t, uint32_t, float_round_up
)
2709 FTINT(rp_l_d
, float64
, int64
, uint64_t, uint64_t, float_round_up
)
2710 FTINT(rz_w_s
, float32
, int32
, uint32_t, uint32_t, float_round_to_zero
)
2711 FTINT(rz_l_d
, float64
, int64
, uint64_t, uint64_t, float_round_to_zero
)
2712 FTINT(rm_w_s
, float32
, int32
, uint32_t, uint32_t, float_round_down
)
2713 FTINT(rm_l_d
, float64
, int64
, uint64_t, uint64_t, float_round_down
)
2715 DO_2OP_F(vftintrne_w_s
, 32, UW
, do_ftintrne_w_s
)
2716 DO_2OP_F(vftintrne_l_d
, 64, UD
, do_ftintrne_l_d
)
2717 DO_2OP_F(vftintrp_w_s
, 32, UW
, do_ftintrp_w_s
)
2718 DO_2OP_F(vftintrp_l_d
, 64, UD
, do_ftintrp_l_d
)
2719 DO_2OP_F(vftintrz_w_s
, 32, UW
, do_ftintrz_w_s
)
2720 DO_2OP_F(vftintrz_l_d
, 64, UD
, do_ftintrz_l_d
)
2721 DO_2OP_F(vftintrm_w_s
, 32, UW
, do_ftintrm_w_s
)
2722 DO_2OP_F(vftintrm_l_d
, 64, UD
, do_ftintrm_l_d
)
2723 DO_2OP_F(vftint_w_s
, 32, UW
, do_float32_to_int32
)
2724 DO_2OP_F(vftint_l_d
, 64, UD
, do_float64_to_int64
)
2726 FTINT(rz_wu_s
, float32
, uint32
, uint32_t, uint32_t, float_round_to_zero
)
2727 FTINT(rz_lu_d
, float64
, uint64
, uint64_t, uint64_t, float_round_to_zero
)
2729 DO_2OP_F(vftintrz_wu_s
, 32, UW
, do_ftintrz_wu_s
)
2730 DO_2OP_F(vftintrz_lu_d
, 64, UD
, do_ftintrz_lu_d
)
2731 DO_2OP_F(vftint_wu_s
, 32, UW
, do_float32_to_uint32
)
2732 DO_2OP_F(vftint_lu_d
, 64, UD
, do_float64_to_uint64
)
2734 FTINT(rm_w_d
, float64
, int32
, uint64_t, uint32_t, float_round_down
)
2735 FTINT(rp_w_d
, float64
, int32
, uint64_t, uint32_t, float_round_up
)
2736 FTINT(rz_w_d
, float64
, int32
, uint64_t, uint32_t, float_round_to_zero
)
2737 FTINT(rne_w_d
, float64
, int32
, uint64_t, uint32_t, float_round_nearest_even
)
2739 #define FTINT_W_D(NAME, FN) \
2740 void HELPER(NAME)(void *vd, void *vj, void *vk, \
2741 CPULoongArchState *env, uint32_t desc) \
2745 VReg *Vd = (VReg *)vd; \
2746 VReg *Vj = (VReg *)vj; \
2747 VReg *Vk = (VReg *)vk; \
2749 vec_clear_cause(env); \
2750 for (i = 0; i < 2; i++) { \
2751 temp.W(i + 2) = FN(env, Vj->UD(i)); \
2752 temp.W(i) = FN(env, Vk->UD(i)); \
2757 FTINT_W_D(vftint_w_d
, do_float64_to_int32
)
2758 FTINT_W_D(vftintrm_w_d
, do_ftintrm_w_d
)
2759 FTINT_W_D(vftintrp_w_d
, do_ftintrp_w_d
)
2760 FTINT_W_D(vftintrz_w_d
, do_ftintrz_w_d
)
2761 FTINT_W_D(vftintrne_w_d
, do_ftintrne_w_d
)
2763 FTINT(rml_l_s
, float32
, int64
, uint32_t, uint64_t, float_round_down
)
2764 FTINT(rpl_l_s
, float32
, int64
, uint32_t, uint64_t, float_round_up
)
2765 FTINT(rzl_l_s
, float32
, int64
, uint32_t, uint64_t, float_round_to_zero
)
2766 FTINT(rnel_l_s
, float32
, int64
, uint32_t, uint64_t, float_round_nearest_even
)
2767 FTINT(rmh_l_s
, float32
, int64
, uint32_t, uint64_t, float_round_down
)
2768 FTINT(rph_l_s
, float32
, int64
, uint32_t, uint64_t, float_round_up
)
2769 FTINT(rzh_l_s
, float32
, int64
, uint32_t, uint64_t, float_round_to_zero
)
2770 FTINT(rneh_l_s
, float32
, int64
, uint32_t, uint64_t, float_round_nearest_even
)
2772 #define FTINTL_L_S(NAME, FN) \
2773 void HELPER(NAME)(void *vd, void *vj, \
2774 CPULoongArchState *env, uint32_t desc) \
2778 VReg *Vd = (VReg *)vd; \
2779 VReg *Vj = (VReg *)vj; \
2781 vec_clear_cause(env); \
2782 for (i = 0; i < 2; i++) { \
2783 temp.D(i) = FN(env, Vj->UW(i)); \
2788 FTINTL_L_S(vftintl_l_s
, do_float32_to_int64
)
2789 FTINTL_L_S(vftintrml_l_s
, do_ftintrml_l_s
)
2790 FTINTL_L_S(vftintrpl_l_s
, do_ftintrpl_l_s
)
2791 FTINTL_L_S(vftintrzl_l_s
, do_ftintrzl_l_s
)
2792 FTINTL_L_S(vftintrnel_l_s
, do_ftintrnel_l_s
)
2794 #define FTINTH_L_S(NAME, FN) \
2795 void HELPER(NAME)(void *vd, void *vj, \
2796 CPULoongArchState *env, uint32_t desc) \
2800 VReg *Vd = (VReg *)vd; \
2801 VReg *Vj = (VReg *)vj; \
2803 vec_clear_cause(env); \
2804 for (i = 0; i < 2; i++) { \
2805 temp.D(i) = FN(env, Vj->UW(i + 2)); \
2810 FTINTH_L_S(vftinth_l_s
, do_float32_to_int64
)
2811 FTINTH_L_S(vftintrmh_l_s
, do_ftintrmh_l_s
)
2812 FTINTH_L_S(vftintrph_l_s
, do_ftintrph_l_s
)
2813 FTINTH_L_S(vftintrzh_l_s
, do_ftintrzh_l_s
)
2814 FTINTH_L_S(vftintrneh_l_s
, do_ftintrneh_l_s
)
2816 #define FFINT(NAME, FMT1, FMT2, T1, T2) \
2817 static T2 do_ffint_ ## NAME(CPULoongArchState *env, T1 fj) \
2821 fd = FMT1 ##_to_## FMT2(fj, &env->fp_status); \
2822 vec_update_fcsr0(env, GETPC()); \
2826 FFINT(s_w
, int32
, float32
, int32_t, uint32_t)
2827 FFINT(d_l
, int64
, float64
, int64_t, uint64_t)
2828 FFINT(s_wu
, uint32
, float32
, uint32_t, uint32_t)
2829 FFINT(d_lu
, uint64
, float64
, uint64_t, uint64_t)
2831 DO_2OP_F(vffint_s_w
, 32, W
, do_ffint_s_w
)
2832 DO_2OP_F(vffint_d_l
, 64, D
, do_ffint_d_l
)
2833 DO_2OP_F(vffint_s_wu
, 32, UW
, do_ffint_s_wu
)
2834 DO_2OP_F(vffint_d_lu
, 64, UD
, do_ffint_d_lu
)
2836 void HELPER(vffintl_d_w
)(void *vd
, void *vj
,
2837 CPULoongArchState
*env
, uint32_t desc
)
2841 VReg
*Vd
= (VReg
*)vd
;
2842 VReg
*Vj
= (VReg
*)vj
;
2844 vec_clear_cause(env
);
2845 for (i
= 0; i
< 2; i
++) {
2846 temp
.D(i
) = int32_to_float64(Vj
->W(i
), &env
->fp_status
);
2847 vec_update_fcsr0(env
, GETPC());
2852 void HELPER(vffinth_d_w
)(void *vd
, void *vj
,
2853 CPULoongArchState
*env
, uint32_t desc
)
2857 VReg
*Vd
= (VReg
*)vd
;
2858 VReg
*Vj
= (VReg
*)vj
;
2860 vec_clear_cause(env
);
2861 for (i
= 0; i
< 2; i
++) {
2862 temp
.D(i
) = int32_to_float64(Vj
->W(i
+ 2), &env
->fp_status
);
2863 vec_update_fcsr0(env
, GETPC());
2868 void HELPER(vffint_s_l
)(void *vd
, void *vj
, void *vk
,
2869 CPULoongArchState
*env
, uint32_t desc
)
2873 VReg
*Vd
= (VReg
*)vd
;
2874 VReg
*Vj
= (VReg
*)vj
;
2875 VReg
*Vk
= (VReg
*)vk
;
2877 vec_clear_cause(env
);
2878 for (i
= 0; i
< 2; i
++) {
2879 temp
.W(i
+ 2) = int64_to_float32(Vj
->D(i
), &env
->fp_status
);
2880 temp
.W(i
) = int64_to_float32(Vk
->D(i
), &env
->fp_status
);
2881 vec_update_fcsr0(env
, GETPC());
2886 #define VSEQ(a, b) (a == b ? -1 : 0)
2887 #define VSLE(a, b) (a <= b ? -1 : 0)
2888 #define VSLT(a, b) (a < b ? -1 : 0)
2890 #define VCMPI(NAME, BIT, E, DO_OP) \
2891 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t v) \
2894 VReg *Vd = (VReg *)vd; \
2895 VReg *Vj = (VReg *)vj; \
2896 typedef __typeof(Vd->E(0)) TD; \
2898 for (i = 0; i < LSX_LEN/BIT; i++) { \
2899 Vd->E(i) = DO_OP(Vj->E(i), (TD)imm); \
2903 VCMPI(vseqi_b
, 8, B
, VSEQ
)
2904 VCMPI(vseqi_h
, 16, H
, VSEQ
)
2905 VCMPI(vseqi_w
, 32, W
, VSEQ
)
2906 VCMPI(vseqi_d
, 64, D
, VSEQ
)
2907 VCMPI(vslei_b
, 8, B
, VSLE
)
2908 VCMPI(vslei_h
, 16, H
, VSLE
)
2909 VCMPI(vslei_w
, 32, W
, VSLE
)
2910 VCMPI(vslei_d
, 64, D
, VSLE
)
2911 VCMPI(vslei_bu
, 8, UB
, VSLE
)
2912 VCMPI(vslei_hu
, 16, UH
, VSLE
)
2913 VCMPI(vslei_wu
, 32, UW
, VSLE
)
2914 VCMPI(vslei_du
, 64, UD
, VSLE
)
2915 VCMPI(vslti_b
, 8, B
, VSLT
)
2916 VCMPI(vslti_h
, 16, H
, VSLT
)
2917 VCMPI(vslti_w
, 32, W
, VSLT
)
2918 VCMPI(vslti_d
, 64, D
, VSLT
)
2919 VCMPI(vslti_bu
, 8, UB
, VSLT
)
2920 VCMPI(vslti_hu
, 16, UH
, VSLT
)
2921 VCMPI(vslti_wu
, 32, UW
, VSLT
)
2922 VCMPI(vslti_du
, 64, UD
, VSLT
)
2924 static uint64_t vfcmp_common(CPULoongArchState
*env
,
2925 FloatRelation cmp
, uint32_t flags
)
2930 case float_relation_less
:
2931 ret
= (flags
& FCMP_LT
);
2933 case float_relation_equal
:
2934 ret
= (flags
& FCMP_EQ
);
2936 case float_relation_greater
:
2937 ret
= (flags
& FCMP_GT
);
2939 case float_relation_unordered
:
2940 ret
= (flags
& FCMP_UN
);
2943 g_assert_not_reached();
2953 #define VFCMP(NAME, BIT, E, FN) \
2954 void HELPER(NAME)(CPULoongArchState *env, \
2955 uint32_t vd, uint32_t vj, uint32_t vk, uint32_t flags) \
2959 VReg *Vd = &(env->fpr[vd].vreg); \
2960 VReg *Vj = &(env->fpr[vj].vreg); \
2961 VReg *Vk = &(env->fpr[vk].vreg); \
2963 vec_clear_cause(env); \
2964 for (i = 0; i < LSX_LEN/BIT ; i++) { \
2965 FloatRelation cmp; \
2966 cmp = FN(Vj->E(i), Vk->E(i), &env->fp_status); \
2967 t.E(i) = vfcmp_common(env, cmp, flags); \
2968 vec_update_fcsr0(env, GETPC()); \
2973 VFCMP(vfcmp_c_s
, 32, UW
, float32_compare_quiet
)
2974 VFCMP(vfcmp_s_s
, 32, UW
, float32_compare
)
2975 VFCMP(vfcmp_c_d
, 64, UD
, float64_compare_quiet
)
2976 VFCMP(vfcmp_s_d
, 64, UD
, float64_compare
)
2978 void HELPER(vbitseli_b
)(void *vd
, void *vj
, uint64_t imm
, uint32_t v
)
2981 VReg
*Vd
= (VReg
*)vd
;
2982 VReg
*Vj
= (VReg
*)vj
;
2984 for (i
= 0; i
< 16; i
++) {
2985 Vd
->B(i
) = (~Vd
->B(i
) & Vj
->B(i
)) | (Vd
->B(i
) & imm
);
2989 /* Copy from target/arm/tcg/sve_helper.c */
2990 static inline bool do_match2(uint64_t n
, uint64_t m0
, uint64_t m1
, int esz
)
2992 uint64_t bits
= 8 << esz
;
2993 uint64_t ones
= dup_const(esz
, 1);
2994 uint64_t signs
= ones
<< (bits
- 1);
2995 uint64_t cmp0
, cmp1
;
2997 cmp1
= dup_const(esz
, n
);
3000 cmp0
= (cmp0
- ones
) & ~cmp0
;
3001 cmp1
= (cmp1
- ones
) & ~cmp1
;
3002 return (cmp0
| cmp1
) & signs
;
3005 #define SETANYEQZ(NAME, MO) \
3006 void HELPER(NAME)(CPULoongArchState *env, uint32_t cd, uint32_t vj) \
3008 VReg *Vj = &(env->fpr[vj].vreg); \
3010 env->cf[cd & 0x7] = do_match2(0, Vj->D(0), Vj->D(1), MO); \
3012 SETANYEQZ(vsetanyeqz_b
, MO_8
)
3013 SETANYEQZ(vsetanyeqz_h
, MO_16
)
3014 SETANYEQZ(vsetanyeqz_w
, MO_32
)
3015 SETANYEQZ(vsetanyeqz_d
, MO_64
)
3017 #define SETALLNEZ(NAME, MO) \
3018 void HELPER(NAME)(CPULoongArchState *env, uint32_t cd, uint32_t vj) \
3020 VReg *Vj = &(env->fpr[vj].vreg); \
3022 env->cf[cd & 0x7]= !do_match2(0, Vj->D(0), Vj->D(1), MO); \
3024 SETALLNEZ(vsetallnez_b
, MO_8
)
3025 SETALLNEZ(vsetallnez_h
, MO_16
)
3026 SETALLNEZ(vsetallnez_w
, MO_32
)
3027 SETALLNEZ(vsetallnez_d
, MO_64
)
3029 #define VPACKEV(NAME, BIT, E) \
3030 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
3034 VReg *Vd = (VReg *)vd; \
3035 VReg *Vj = (VReg *)vj; \
3036 VReg *Vk = (VReg *)vk; \
3038 for (i = 0; i < LSX_LEN/BIT; i++) { \
3039 temp.E(2 * i + 1) = Vj->E(2 * i); \
3040 temp.E(2 *i) = Vk->E(2 * i); \
3045 VPACKEV(vpackev_b
, 16, B
)
3046 VPACKEV(vpackev_h
, 32, H
)
3047 VPACKEV(vpackev_w
, 64, W
)
3048 VPACKEV(vpackev_d
, 128, D
)
3050 #define VPACKOD(NAME, BIT, E) \
3051 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
3055 VReg *Vd = (VReg *)vd; \
3056 VReg *Vj = (VReg *)vj; \
3057 VReg *Vk = (VReg *)vk; \
3059 for (i = 0; i < LSX_LEN/BIT; i++) { \
3060 temp.E(2 * i + 1) = Vj->E(2 * i + 1); \
3061 temp.E(2 * i) = Vk->E(2 * i + 1); \
3066 VPACKOD(vpackod_b
, 16, B
)
3067 VPACKOD(vpackod_h
, 32, H
)
3068 VPACKOD(vpackod_w
, 64, W
)
3069 VPACKOD(vpackod_d
, 128, D
)
3071 #define VPICKEV(NAME, BIT, E) \
3072 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
3076 VReg *Vd = (VReg *)vd; \
3077 VReg *Vj = (VReg *)vj; \
3078 VReg *Vk = (VReg *)vk; \
3080 for (i = 0; i < LSX_LEN/BIT; i++) { \
3081 temp.E(i + LSX_LEN/BIT) = Vj->E(2 * i); \
3082 temp.E(i) = Vk->E(2 * i); \
3087 VPICKEV(vpickev_b
, 16, B
)
3088 VPICKEV(vpickev_h
, 32, H
)
3089 VPICKEV(vpickev_w
, 64, W
)
3090 VPICKEV(vpickev_d
, 128, D
)
3092 #define VPICKOD(NAME, BIT, E) \
3093 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
3097 VReg *Vd = (VReg *)vd; \
3098 VReg *Vj = (VReg *)vj; \
3099 VReg *Vk = (VReg *)vk; \
3101 for (i = 0; i < LSX_LEN/BIT; i++) { \
3102 temp.E(i + LSX_LEN/BIT) = Vj->E(2 * i + 1); \
3103 temp.E(i) = Vk->E(2 * i + 1); \
3108 VPICKOD(vpickod_b
, 16, B
)
3109 VPICKOD(vpickod_h
, 32, H
)
3110 VPICKOD(vpickod_w
, 64, W
)
3111 VPICKOD(vpickod_d
, 128, D
)
3113 #define VILVL(NAME, BIT, E) \
3114 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
3118 VReg *Vd = (VReg *)vd; \
3119 VReg *Vj = (VReg *)vj; \
3120 VReg *Vk = (VReg *)vk; \
3122 for (i = 0; i < LSX_LEN/BIT; i++) { \
3123 temp.E(2 * i + 1) = Vj->E(i); \
3124 temp.E(2 * i) = Vk->E(i); \
3129 VILVL(vilvl_b
, 16, B
)
3130 VILVL(vilvl_h
, 32, H
)
3131 VILVL(vilvl_w
, 64, W
)
3132 VILVL(vilvl_d
, 128, D
)
3134 #define VILVH(NAME, BIT, E) \
3135 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
3139 VReg *Vd = (VReg *)vd; \
3140 VReg *Vj = (VReg *)vj; \
3141 VReg *Vk = (VReg *)vk; \
3143 for (i = 0; i < LSX_LEN/BIT; i++) { \
3144 temp.E(2 * i + 1) = Vj->E(i + LSX_LEN/BIT); \
3145 temp.E(2 * i) = Vk->E(i + LSX_LEN/BIT); \
3150 VILVH(vilvh_b
, 16, B
)
3151 VILVH(vilvh_h
, 32, H
)
3152 VILVH(vilvh_w
, 64, W
)
3153 VILVH(vilvh_d
, 128, D
)
3155 void HELPER(vshuf_b
)(void *vd
, void *vj
, void *vk
, void *va
, uint32_t desc
)
3159 VReg
*Vd
= (VReg
*)vd
;
3160 VReg
*Vj
= (VReg
*)vj
;
3161 VReg
*Vk
= (VReg
*)vk
;
3162 VReg
*Va
= (VReg
*)va
;
3165 for (i
= 0; i
< m
; i
++) {
3166 uint64_t k
= (uint8_t)Va
->B(i
) % (2 * m
);
3167 temp
.B(i
) = k
< m
? Vk
->B(k
) : Vj
->B(k
- m
);
3172 #define VSHUF(NAME, BIT, E) \
3173 void HELPER(NAME)(void *vd, void *vj, void *vk, uint32_t desc) \
3177 VReg *Vd = (VReg *)vd; \
3178 VReg *Vj = (VReg *)vj; \
3179 VReg *Vk = (VReg *)vk; \
3182 for (i = 0; i < m; i++) { \
3183 uint64_t k = ((uint8_t) Vd->E(i)) % (2 * m); \
3184 temp.E(i) = k < m ? Vk->E(k) : Vj->E(k - m); \
3189 VSHUF(vshuf_h
, 16, H
)
3190 VSHUF(vshuf_w
, 32, W
)
3191 VSHUF(vshuf_d
, 64, D
)
3193 #define VSHUF4I(NAME, BIT, E) \
3194 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
3198 VReg *Vd = (VReg *)vd; \
3199 VReg *Vj = (VReg *)vj; \
3201 for (i = 0; i < LSX_LEN/BIT; i++) { \
3202 temp.E(i) = Vj->E(((i) & 0xfc) + (((imm) >> \
3203 (2 * ((i) & 0x03))) & 0x03)); \
3208 VSHUF4I(vshuf4i_b
, 8, B
)
3209 VSHUF4I(vshuf4i_h
, 16, H
)
3210 VSHUF4I(vshuf4i_w
, 32, W
)
3212 void HELPER(vshuf4i_d
)(void *vd
, void *vj
, uint64_t imm
, uint32_t desc
)
3214 VReg
*Vd
= (VReg
*)vd
;
3215 VReg
*Vj
= (VReg
*)vj
;
3218 temp
.D(0) = (imm
& 2 ? Vj
: Vd
)->D(imm
& 1);
3219 temp
.D(1) = (imm
& 8 ? Vj
: Vd
)->D((imm
>> 2) & 1);
3223 void HELPER(vpermi_w
)(void *vd
, void *vj
, uint64_t imm
, uint32_t desc
)
3226 VReg
*Vd
= (VReg
*)vd
;
3227 VReg
*Vj
= (VReg
*)vj
;
3229 temp
.W(0) = Vj
->W(imm
& 0x3);
3230 temp
.W(1) = Vj
->W((imm
>> 2) & 0x3);
3231 temp
.W(2) = Vd
->W((imm
>> 4) & 0x3);
3232 temp
.W(3) = Vd
->W((imm
>> 6) & 0x3);
3236 #define VEXTRINS(NAME, BIT, E, MASK) \
3237 void HELPER(NAME)(void *vd, void *vj, uint64_t imm, uint32_t desc) \
3240 VReg *Vd = (VReg *)vd; \
3241 VReg *Vj = (VReg *)vj; \
3243 ins = (imm >> 4) & MASK; \
3244 extr = imm & MASK; \
3245 Vd->E(ins) = Vj->E(extr); \
3248 VEXTRINS(vextrins_b
, 8, B
, 0xf)
3249 VEXTRINS(vextrins_h
, 16, H
, 0x7)
3250 VEXTRINS(vextrins_w
, 32, W
, 0x3)
3251 VEXTRINS(vextrins_d
, 64, D
, 0x1)