2 * ARM AdvSIMD / SVE Vector Operations
4 * Copyright (c) 2018 Linaro
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "exec/helper-proto.h"
23 #include "tcg/tcg-gvec-desc.h"
24 #include "fpu/softfloat.h"
27 /* Note that vector data is stored in host-endian 64-bit chunks,
28 so addressing units smaller than that needs a host-endian fixup. */
29 #ifdef HOST_WORDS_BIGENDIAN
30 #define H1(x) ((x) ^ 7)
31 #define H2(x) ((x) ^ 3)
32 #define H4(x) ((x) ^ 1)
39 #define SET_QC() env->vfp.qc[0] = 1
41 static void clear_tail(void *vd
, uintptr_t opr_sz
, uintptr_t max_sz
)
43 uint64_t *d
= vd
+ opr_sz
;
46 for (i
= opr_sz
; i
< max_sz
; i
+= 8) {
51 /* Signed saturating rounding doubling multiply-accumulate high half, 16-bit */
52 static uint16_t inl_qrdmlah_s16(CPUARMState
*env
, int16_t src1
,
53 int16_t src2
, int16_t src3
)
56 * = ((a3 << 16) + ((e1 * e2) << 1) + (1 << 15)) >> 16
57 * = ((a3 << 15) + (e1 * e2) + (1 << 14)) >> 15
59 int32_t ret
= (int32_t)src1
* src2
;
60 ret
= ((int32_t)src3
<< 15) + ret
+ (1 << 14);
62 if (ret
!= (int16_t)ret
) {
64 ret
= (ret
< 0 ? -0x8000 : 0x7fff);
69 uint32_t HELPER(neon_qrdmlah_s16
)(CPUARMState
*env
, uint32_t src1
,
70 uint32_t src2
, uint32_t src3
)
72 uint16_t e1
= inl_qrdmlah_s16(env
, src1
, src2
, src3
);
73 uint16_t e2
= inl_qrdmlah_s16(env
, src1
>> 16, src2
>> 16, src3
>> 16);
74 return deposit32(e1
, 16, 16, e2
);
77 void HELPER(gvec_qrdmlah_s16
)(void *vd
, void *vn
, void *vm
,
78 void *ve
, uint32_t desc
)
80 uintptr_t opr_sz
= simd_oprsz(desc
);
84 CPUARMState
*env
= ve
;
87 for (i
= 0; i
< opr_sz
/ 2; ++i
) {
88 d
[i
] = inl_qrdmlah_s16(env
, n
[i
], m
[i
], d
[i
]);
90 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
93 /* Signed saturating rounding doubling multiply-subtract high half, 16-bit */
94 static uint16_t inl_qrdmlsh_s16(CPUARMState
*env
, int16_t src1
,
95 int16_t src2
, int16_t src3
)
97 /* Similarly, using subtraction:
98 * = ((a3 << 16) - ((e1 * e2) << 1) + (1 << 15)) >> 16
99 * = ((a3 << 15) - (e1 * e2) + (1 << 14)) >> 15
101 int32_t ret
= (int32_t)src1
* src2
;
102 ret
= ((int32_t)src3
<< 15) - ret
+ (1 << 14);
104 if (ret
!= (int16_t)ret
) {
106 ret
= (ret
< 0 ? -0x8000 : 0x7fff);
111 uint32_t HELPER(neon_qrdmlsh_s16
)(CPUARMState
*env
, uint32_t src1
,
112 uint32_t src2
, uint32_t src3
)
114 uint16_t e1
= inl_qrdmlsh_s16(env
, src1
, src2
, src3
);
115 uint16_t e2
= inl_qrdmlsh_s16(env
, src1
>> 16, src2
>> 16, src3
>> 16);
116 return deposit32(e1
, 16, 16, e2
);
119 void HELPER(gvec_qrdmlsh_s16
)(void *vd
, void *vn
, void *vm
,
120 void *ve
, uint32_t desc
)
122 uintptr_t opr_sz
= simd_oprsz(desc
);
126 CPUARMState
*env
= ve
;
129 for (i
= 0; i
< opr_sz
/ 2; ++i
) {
130 d
[i
] = inl_qrdmlsh_s16(env
, n
[i
], m
[i
], d
[i
]);
132 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
135 /* Signed saturating rounding doubling multiply-accumulate high half, 32-bit */
136 uint32_t HELPER(neon_qrdmlah_s32
)(CPUARMState
*env
, int32_t src1
,
137 int32_t src2
, int32_t src3
)
139 /* Simplify similarly to int_qrdmlah_s16 above. */
140 int64_t ret
= (int64_t)src1
* src2
;
141 ret
= ((int64_t)src3
<< 31) + ret
+ (1 << 30);
143 if (ret
!= (int32_t)ret
) {
145 ret
= (ret
< 0 ? INT32_MIN
: INT32_MAX
);
150 void HELPER(gvec_qrdmlah_s32
)(void *vd
, void *vn
, void *vm
,
151 void *ve
, uint32_t desc
)
153 uintptr_t opr_sz
= simd_oprsz(desc
);
157 CPUARMState
*env
= ve
;
160 for (i
= 0; i
< opr_sz
/ 4; ++i
) {
161 d
[i
] = helper_neon_qrdmlah_s32(env
, n
[i
], m
[i
], d
[i
]);
163 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
166 /* Signed saturating rounding doubling multiply-subtract high half, 32-bit */
167 uint32_t HELPER(neon_qrdmlsh_s32
)(CPUARMState
*env
, int32_t src1
,
168 int32_t src2
, int32_t src3
)
170 /* Simplify similarly to int_qrdmlsh_s16 above. */
171 int64_t ret
= (int64_t)src1
* src2
;
172 ret
= ((int64_t)src3
<< 31) - ret
+ (1 << 30);
174 if (ret
!= (int32_t)ret
) {
176 ret
= (ret
< 0 ? INT32_MIN
: INT32_MAX
);
181 void HELPER(gvec_qrdmlsh_s32
)(void *vd
, void *vn
, void *vm
,
182 void *ve
, uint32_t desc
)
184 uintptr_t opr_sz
= simd_oprsz(desc
);
188 CPUARMState
*env
= ve
;
191 for (i
= 0; i
< opr_sz
/ 4; ++i
) {
192 d
[i
] = helper_neon_qrdmlsh_s32(env
, n
[i
], m
[i
], d
[i
]);
194 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
197 /* Integer 8 and 16-bit dot-product.
199 * Note that for the loops herein, host endianness does not matter
200 * with respect to the ordering of data within the 64-bit lanes.
201 * All elements are treated equally, no matter where they are.
204 void HELPER(gvec_sdot_b
)(void *vd
, void *vn
, void *vm
, uint32_t desc
)
206 intptr_t i
, opr_sz
= simd_oprsz(desc
);
208 int8_t *n
= vn
, *m
= vm
;
210 for (i
= 0; i
< opr_sz
/ 4; ++i
) {
211 d
[i
] += n
[i
* 4 + 0] * m
[i
* 4 + 0]
212 + n
[i
* 4 + 1] * m
[i
* 4 + 1]
213 + n
[i
* 4 + 2] * m
[i
* 4 + 2]
214 + n
[i
* 4 + 3] * m
[i
* 4 + 3];
216 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
219 void HELPER(gvec_udot_b
)(void *vd
, void *vn
, void *vm
, uint32_t desc
)
221 intptr_t i
, opr_sz
= simd_oprsz(desc
);
223 uint8_t *n
= vn
, *m
= vm
;
225 for (i
= 0; i
< opr_sz
/ 4; ++i
) {
226 d
[i
] += n
[i
* 4 + 0] * m
[i
* 4 + 0]
227 + n
[i
* 4 + 1] * m
[i
* 4 + 1]
228 + n
[i
* 4 + 2] * m
[i
* 4 + 2]
229 + n
[i
* 4 + 3] * m
[i
* 4 + 3];
231 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
234 void HELPER(gvec_sdot_h
)(void *vd
, void *vn
, void *vm
, uint32_t desc
)
236 intptr_t i
, opr_sz
= simd_oprsz(desc
);
238 int16_t *n
= vn
, *m
= vm
;
240 for (i
= 0; i
< opr_sz
/ 8; ++i
) {
241 d
[i
] += (int64_t)n
[i
* 4 + 0] * m
[i
* 4 + 0]
242 + (int64_t)n
[i
* 4 + 1] * m
[i
* 4 + 1]
243 + (int64_t)n
[i
* 4 + 2] * m
[i
* 4 + 2]
244 + (int64_t)n
[i
* 4 + 3] * m
[i
* 4 + 3];
246 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
249 void HELPER(gvec_udot_h
)(void *vd
, void *vn
, void *vm
, uint32_t desc
)
251 intptr_t i
, opr_sz
= simd_oprsz(desc
);
253 uint16_t *n
= vn
, *m
= vm
;
255 for (i
= 0; i
< opr_sz
/ 8; ++i
) {
256 d
[i
] += (uint64_t)n
[i
* 4 + 0] * m
[i
* 4 + 0]
257 + (uint64_t)n
[i
* 4 + 1] * m
[i
* 4 + 1]
258 + (uint64_t)n
[i
* 4 + 2] * m
[i
* 4 + 2]
259 + (uint64_t)n
[i
* 4 + 3] * m
[i
* 4 + 3];
261 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
264 void HELPER(gvec_sdot_idx_b
)(void *vd
, void *vn
, void *vm
, uint32_t desc
)
266 intptr_t i
, segend
, opr_sz
= simd_oprsz(desc
), opr_sz_4
= opr_sz
/ 4;
267 intptr_t index
= simd_data(desc
);
270 int8_t *m_indexed
= (int8_t *)vm
+ index
* 4;
272 /* Notice the special case of opr_sz == 8, from aa64/aa32 advsimd.
273 * Otherwise opr_sz is a multiple of 16.
275 segend
= MIN(4, opr_sz_4
);
278 int8_t m0
= m_indexed
[i
* 4 + 0];
279 int8_t m1
= m_indexed
[i
* 4 + 1];
280 int8_t m2
= m_indexed
[i
* 4 + 2];
281 int8_t m3
= m_indexed
[i
* 4 + 3];
284 d
[i
] += n
[i
* 4 + 0] * m0
288 } while (++i
< segend
);
290 } while (i
< opr_sz_4
);
292 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
295 void HELPER(gvec_udot_idx_b
)(void *vd
, void *vn
, void *vm
, uint32_t desc
)
297 intptr_t i
, segend
, opr_sz
= simd_oprsz(desc
), opr_sz_4
= opr_sz
/ 4;
298 intptr_t index
= simd_data(desc
);
301 uint8_t *m_indexed
= (uint8_t *)vm
+ index
* 4;
303 /* Notice the special case of opr_sz == 8, from aa64/aa32 advsimd.
304 * Otherwise opr_sz is a multiple of 16.
306 segend
= MIN(4, opr_sz_4
);
309 uint8_t m0
= m_indexed
[i
* 4 + 0];
310 uint8_t m1
= m_indexed
[i
* 4 + 1];
311 uint8_t m2
= m_indexed
[i
* 4 + 2];
312 uint8_t m3
= m_indexed
[i
* 4 + 3];
315 d
[i
] += n
[i
* 4 + 0] * m0
319 } while (++i
< segend
);
321 } while (i
< opr_sz_4
);
323 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
326 void HELPER(gvec_sdot_idx_h
)(void *vd
, void *vn
, void *vm
, uint32_t desc
)
328 intptr_t i
, opr_sz
= simd_oprsz(desc
), opr_sz_8
= opr_sz
/ 8;
329 intptr_t index
= simd_data(desc
);
332 int16_t *m_indexed
= (int16_t *)vm
+ index
* 4;
334 /* This is supported by SVE only, so opr_sz is always a multiple of 16.
335 * Process the entire segment all at once, writing back the results
336 * only after we've consumed all of the inputs.
338 for (i
= 0; i
< opr_sz_8
; i
+= 2) {
341 d0
= n
[i
* 4 + 0] * (int64_t)m_indexed
[i
* 4 + 0];
342 d0
+= n
[i
* 4 + 1] * (int64_t)m_indexed
[i
* 4 + 1];
343 d0
+= n
[i
* 4 + 2] * (int64_t)m_indexed
[i
* 4 + 2];
344 d0
+= n
[i
* 4 + 3] * (int64_t)m_indexed
[i
* 4 + 3];
345 d1
= n
[i
* 4 + 4] * (int64_t)m_indexed
[i
* 4 + 0];
346 d1
+= n
[i
* 4 + 5] * (int64_t)m_indexed
[i
* 4 + 1];
347 d1
+= n
[i
* 4 + 6] * (int64_t)m_indexed
[i
* 4 + 2];
348 d1
+= n
[i
* 4 + 7] * (int64_t)m_indexed
[i
* 4 + 3];
354 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
357 void HELPER(gvec_udot_idx_h
)(void *vd
, void *vn
, void *vm
, uint32_t desc
)
359 intptr_t i
, opr_sz
= simd_oprsz(desc
), opr_sz_8
= opr_sz
/ 8;
360 intptr_t index
= simd_data(desc
);
363 uint16_t *m_indexed
= (uint16_t *)vm
+ index
* 4;
365 /* This is supported by SVE only, so opr_sz is always a multiple of 16.
366 * Process the entire segment all at once, writing back the results
367 * only after we've consumed all of the inputs.
369 for (i
= 0; i
< opr_sz_8
; i
+= 2) {
372 d0
= n
[i
* 4 + 0] * (uint64_t)m_indexed
[i
* 4 + 0];
373 d0
+= n
[i
* 4 + 1] * (uint64_t)m_indexed
[i
* 4 + 1];
374 d0
+= n
[i
* 4 + 2] * (uint64_t)m_indexed
[i
* 4 + 2];
375 d0
+= n
[i
* 4 + 3] * (uint64_t)m_indexed
[i
* 4 + 3];
376 d1
= n
[i
* 4 + 4] * (uint64_t)m_indexed
[i
* 4 + 0];
377 d1
+= n
[i
* 4 + 5] * (uint64_t)m_indexed
[i
* 4 + 1];
378 d1
+= n
[i
* 4 + 6] * (uint64_t)m_indexed
[i
* 4 + 2];
379 d1
+= n
[i
* 4 + 7] * (uint64_t)m_indexed
[i
* 4 + 3];
385 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
388 void HELPER(gvec_fcaddh
)(void *vd
, void *vn
, void *vm
,
389 void *vfpst
, uint32_t desc
)
391 uintptr_t opr_sz
= simd_oprsz(desc
);
395 float_status
*fpst
= vfpst
;
396 uint32_t neg_real
= extract32(desc
, SIMD_DATA_SHIFT
, 1);
397 uint32_t neg_imag
= neg_real
^ 1;
400 /* Shift boolean to the sign bit so we can xor to negate. */
404 for (i
= 0; i
< opr_sz
/ 2; i
+= 2) {
405 float16 e0
= n
[H2(i
)];
406 float16 e1
= m
[H2(i
+ 1)] ^ neg_imag
;
407 float16 e2
= n
[H2(i
+ 1)];
408 float16 e3
= m
[H2(i
)] ^ neg_real
;
410 d
[H2(i
)] = float16_add(e0
, e1
, fpst
);
411 d
[H2(i
+ 1)] = float16_add(e2
, e3
, fpst
);
413 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
416 void HELPER(gvec_fcadds
)(void *vd
, void *vn
, void *vm
,
417 void *vfpst
, uint32_t desc
)
419 uintptr_t opr_sz
= simd_oprsz(desc
);
423 float_status
*fpst
= vfpst
;
424 uint32_t neg_real
= extract32(desc
, SIMD_DATA_SHIFT
, 1);
425 uint32_t neg_imag
= neg_real
^ 1;
428 /* Shift boolean to the sign bit so we can xor to negate. */
432 for (i
= 0; i
< opr_sz
/ 4; i
+= 2) {
433 float32 e0
= n
[H4(i
)];
434 float32 e1
= m
[H4(i
+ 1)] ^ neg_imag
;
435 float32 e2
= n
[H4(i
+ 1)];
436 float32 e3
= m
[H4(i
)] ^ neg_real
;
438 d
[H4(i
)] = float32_add(e0
, e1
, fpst
);
439 d
[H4(i
+ 1)] = float32_add(e2
, e3
, fpst
);
441 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
444 void HELPER(gvec_fcaddd
)(void *vd
, void *vn
, void *vm
,
445 void *vfpst
, uint32_t desc
)
447 uintptr_t opr_sz
= simd_oprsz(desc
);
451 float_status
*fpst
= vfpst
;
452 uint64_t neg_real
= extract64(desc
, SIMD_DATA_SHIFT
, 1);
453 uint64_t neg_imag
= neg_real
^ 1;
456 /* Shift boolean to the sign bit so we can xor to negate. */
460 for (i
= 0; i
< opr_sz
/ 8; i
+= 2) {
462 float64 e1
= m
[i
+ 1] ^ neg_imag
;
463 float64 e2
= n
[i
+ 1];
464 float64 e3
= m
[i
] ^ neg_real
;
466 d
[i
] = float64_add(e0
, e1
, fpst
);
467 d
[i
+ 1] = float64_add(e2
, e3
, fpst
);
469 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
472 void HELPER(gvec_fcmlah
)(void *vd
, void *vn
, void *vm
,
473 void *vfpst
, uint32_t desc
)
475 uintptr_t opr_sz
= simd_oprsz(desc
);
479 float_status
*fpst
= vfpst
;
480 intptr_t flip
= extract32(desc
, SIMD_DATA_SHIFT
, 1);
481 uint32_t neg_imag
= extract32(desc
, SIMD_DATA_SHIFT
+ 1, 1);
482 uint32_t neg_real
= flip
^ neg_imag
;
485 /* Shift boolean to the sign bit so we can xor to negate. */
489 for (i
= 0; i
< opr_sz
/ 2; i
+= 2) {
490 float16 e2
= n
[H2(i
+ flip
)];
491 float16 e1
= m
[H2(i
+ flip
)] ^ neg_real
;
493 float16 e3
= m
[H2(i
+ 1 - flip
)] ^ neg_imag
;
495 d
[H2(i
)] = float16_muladd(e2
, e1
, d
[H2(i
)], 0, fpst
);
496 d
[H2(i
+ 1)] = float16_muladd(e4
, e3
, d
[H2(i
+ 1)], 0, fpst
);
498 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
501 void HELPER(gvec_fcmlah_idx
)(void *vd
, void *vn
, void *vm
,
502 void *vfpst
, uint32_t desc
)
504 uintptr_t opr_sz
= simd_oprsz(desc
);
508 float_status
*fpst
= vfpst
;
509 intptr_t flip
= extract32(desc
, SIMD_DATA_SHIFT
, 1);
510 uint32_t neg_imag
= extract32(desc
, SIMD_DATA_SHIFT
+ 1, 1);
511 intptr_t index
= extract32(desc
, SIMD_DATA_SHIFT
+ 2, 2);
512 uint32_t neg_real
= flip
^ neg_imag
;
513 intptr_t elements
= opr_sz
/ sizeof(float16
);
514 intptr_t eltspersegment
= 16 / sizeof(float16
);
517 /* Shift boolean to the sign bit so we can xor to negate. */
521 for (i
= 0; i
< elements
; i
+= eltspersegment
) {
522 float16 mr
= m
[H2(i
+ 2 * index
+ 0)];
523 float16 mi
= m
[H2(i
+ 2 * index
+ 1)];
524 float16 e1
= neg_real
^ (flip
? mi
: mr
);
525 float16 e3
= neg_imag
^ (flip
? mr
: mi
);
527 for (j
= i
; j
< i
+ eltspersegment
; j
+= 2) {
528 float16 e2
= n
[H2(j
+ flip
)];
531 d
[H2(j
)] = float16_muladd(e2
, e1
, d
[H2(j
)], 0, fpst
);
532 d
[H2(j
+ 1)] = float16_muladd(e4
, e3
, d
[H2(j
+ 1)], 0, fpst
);
535 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
538 void HELPER(gvec_fcmlas
)(void *vd
, void *vn
, void *vm
,
539 void *vfpst
, uint32_t desc
)
541 uintptr_t opr_sz
= simd_oprsz(desc
);
545 float_status
*fpst
= vfpst
;
546 intptr_t flip
= extract32(desc
, SIMD_DATA_SHIFT
, 1);
547 uint32_t neg_imag
= extract32(desc
, SIMD_DATA_SHIFT
+ 1, 1);
548 uint32_t neg_real
= flip
^ neg_imag
;
551 /* Shift boolean to the sign bit so we can xor to negate. */
555 for (i
= 0; i
< opr_sz
/ 4; i
+= 2) {
556 float32 e2
= n
[H4(i
+ flip
)];
557 float32 e1
= m
[H4(i
+ flip
)] ^ neg_real
;
559 float32 e3
= m
[H4(i
+ 1 - flip
)] ^ neg_imag
;
561 d
[H4(i
)] = float32_muladd(e2
, e1
, d
[H4(i
)], 0, fpst
);
562 d
[H4(i
+ 1)] = float32_muladd(e4
, e3
, d
[H4(i
+ 1)], 0, fpst
);
564 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
567 void HELPER(gvec_fcmlas_idx
)(void *vd
, void *vn
, void *vm
,
568 void *vfpst
, uint32_t desc
)
570 uintptr_t opr_sz
= simd_oprsz(desc
);
574 float_status
*fpst
= vfpst
;
575 intptr_t flip
= extract32(desc
, SIMD_DATA_SHIFT
, 1);
576 uint32_t neg_imag
= extract32(desc
, SIMD_DATA_SHIFT
+ 1, 1);
577 intptr_t index
= extract32(desc
, SIMD_DATA_SHIFT
+ 2, 2);
578 uint32_t neg_real
= flip
^ neg_imag
;
579 intptr_t elements
= opr_sz
/ sizeof(float32
);
580 intptr_t eltspersegment
= 16 / sizeof(float32
);
583 /* Shift boolean to the sign bit so we can xor to negate. */
587 for (i
= 0; i
< elements
; i
+= eltspersegment
) {
588 float32 mr
= m
[H4(i
+ 2 * index
+ 0)];
589 float32 mi
= m
[H4(i
+ 2 * index
+ 1)];
590 float32 e1
= neg_real
^ (flip
? mi
: mr
);
591 float32 e3
= neg_imag
^ (flip
? mr
: mi
);
593 for (j
= i
; j
< i
+ eltspersegment
; j
+= 2) {
594 float32 e2
= n
[H4(j
+ flip
)];
597 d
[H4(j
)] = float32_muladd(e2
, e1
, d
[H4(j
)], 0, fpst
);
598 d
[H4(j
+ 1)] = float32_muladd(e4
, e3
, d
[H4(j
+ 1)], 0, fpst
);
601 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
604 void HELPER(gvec_fcmlad
)(void *vd
, void *vn
, void *vm
,
605 void *vfpst
, uint32_t desc
)
607 uintptr_t opr_sz
= simd_oprsz(desc
);
611 float_status
*fpst
= vfpst
;
612 intptr_t flip
= extract32(desc
, SIMD_DATA_SHIFT
, 1);
613 uint64_t neg_imag
= extract32(desc
, SIMD_DATA_SHIFT
+ 1, 1);
614 uint64_t neg_real
= flip
^ neg_imag
;
617 /* Shift boolean to the sign bit so we can xor to negate. */
621 for (i
= 0; i
< opr_sz
/ 8; i
+= 2) {
622 float64 e2
= n
[i
+ flip
];
623 float64 e1
= m
[i
+ flip
] ^ neg_real
;
625 float64 e3
= m
[i
+ 1 - flip
] ^ neg_imag
;
627 d
[i
] = float64_muladd(e2
, e1
, d
[i
], 0, fpst
);
628 d
[i
+ 1] = float64_muladd(e4
, e3
, d
[i
+ 1], 0, fpst
);
630 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
633 #define DO_2OP(NAME, FUNC, TYPE) \
634 void HELPER(NAME)(void *vd, void *vn, void *stat, uint32_t desc) \
636 intptr_t i, oprsz = simd_oprsz(desc); \
637 TYPE *d = vd, *n = vn; \
638 for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
639 d[i] = FUNC(n[i], stat); \
641 clear_tail(d, oprsz, simd_maxsz(desc)); \
644 DO_2OP(gvec_frecpe_h
, helper_recpe_f16
, float16
)
645 DO_2OP(gvec_frecpe_s
, helper_recpe_f32
, float32
)
646 DO_2OP(gvec_frecpe_d
, helper_recpe_f64
, float64
)
648 DO_2OP(gvec_frsqrte_h
, helper_rsqrte_f16
, float16
)
649 DO_2OP(gvec_frsqrte_s
, helper_rsqrte_f32
, float32
)
650 DO_2OP(gvec_frsqrte_d
, helper_rsqrte_f64
, float64
)
654 /* Floating-point trigonometric starting value.
655 * See the ARM ARM pseudocode function FPTrigSMul.
657 static float16
float16_ftsmul(float16 op1
, uint16_t op2
, float_status
*stat
)
659 float16 result
= float16_mul(op1
, op1
, stat
);
660 if (!float16_is_any_nan(result
)) {
661 result
= float16_set_sign(result
, op2
& 1);
666 static float32
float32_ftsmul(float32 op1
, uint32_t op2
, float_status
*stat
)
668 float32 result
= float32_mul(op1
, op1
, stat
);
669 if (!float32_is_any_nan(result
)) {
670 result
= float32_set_sign(result
, op2
& 1);
675 static float64
float64_ftsmul(float64 op1
, uint64_t op2
, float_status
*stat
)
677 float64 result
= float64_mul(op1
, op1
, stat
);
678 if (!float64_is_any_nan(result
)) {
679 result
= float64_set_sign(result
, op2
& 1);
684 #define DO_3OP(NAME, FUNC, TYPE) \
685 void HELPER(NAME)(void *vd, void *vn, void *vm, void *stat, uint32_t desc) \
687 intptr_t i, oprsz = simd_oprsz(desc); \
688 TYPE *d = vd, *n = vn, *m = vm; \
689 for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
690 d[i] = FUNC(n[i], m[i], stat); \
692 clear_tail(d, oprsz, simd_maxsz(desc)); \
695 DO_3OP(gvec_fadd_h
, float16_add
, float16
)
696 DO_3OP(gvec_fadd_s
, float32_add
, float32
)
697 DO_3OP(gvec_fadd_d
, float64_add
, float64
)
699 DO_3OP(gvec_fsub_h
, float16_sub
, float16
)
700 DO_3OP(gvec_fsub_s
, float32_sub
, float32
)
701 DO_3OP(gvec_fsub_d
, float64_sub
, float64
)
703 DO_3OP(gvec_fmul_h
, float16_mul
, float16
)
704 DO_3OP(gvec_fmul_s
, float32_mul
, float32
)
705 DO_3OP(gvec_fmul_d
, float64_mul
, float64
)
707 DO_3OP(gvec_ftsmul_h
, float16_ftsmul
, float16
)
708 DO_3OP(gvec_ftsmul_s
, float32_ftsmul
, float32
)
709 DO_3OP(gvec_ftsmul_d
, float64_ftsmul
, float64
)
711 #ifdef TARGET_AARCH64
713 DO_3OP(gvec_recps_h
, helper_recpsf_f16
, float16
)
714 DO_3OP(gvec_recps_s
, helper_recpsf_f32
, float32
)
715 DO_3OP(gvec_recps_d
, helper_recpsf_f64
, float64
)
717 DO_3OP(gvec_rsqrts_h
, helper_rsqrtsf_f16
, float16
)
718 DO_3OP(gvec_rsqrts_s
, helper_rsqrtsf_f32
, float32
)
719 DO_3OP(gvec_rsqrts_d
, helper_rsqrtsf_f64
, float64
)
724 /* For the indexed ops, SVE applies the index per 128-bit vector segment.
725 * For AdvSIMD, there is of course only one such vector segment.
728 #define DO_MUL_IDX(NAME, TYPE, H) \
729 void HELPER(NAME)(void *vd, void *vn, void *vm, void *stat, uint32_t desc) \
731 intptr_t i, j, oprsz = simd_oprsz(desc), segment = 16 / sizeof(TYPE); \
732 intptr_t idx = simd_data(desc); \
733 TYPE *d = vd, *n = vn, *m = vm; \
734 for (i = 0; i < oprsz / sizeof(TYPE); i += segment) { \
735 TYPE mm = m[H(i + idx)]; \
736 for (j = 0; j < segment; j++) { \
737 d[i + j] = TYPE##_mul(n[i + j], mm, stat); \
742 DO_MUL_IDX(gvec_fmul_idx_h
, float16
, H2
)
743 DO_MUL_IDX(gvec_fmul_idx_s
, float32
, H4
)
744 DO_MUL_IDX(gvec_fmul_idx_d
, float64
, )
748 #define DO_FMLA_IDX(NAME, TYPE, H) \
749 void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, \
750 void *stat, uint32_t desc) \
752 intptr_t i, j, oprsz = simd_oprsz(desc), segment = 16 / sizeof(TYPE); \
753 TYPE op1_neg = extract32(desc, SIMD_DATA_SHIFT, 1); \
754 intptr_t idx = desc >> (SIMD_DATA_SHIFT + 1); \
755 TYPE *d = vd, *n = vn, *m = vm, *a = va; \
756 op1_neg <<= (8 * sizeof(TYPE) - 1); \
757 for (i = 0; i < oprsz / sizeof(TYPE); i += segment) { \
758 TYPE mm = m[H(i + idx)]; \
759 for (j = 0; j < segment; j++) { \
760 d[i + j] = TYPE##_muladd(n[i + j] ^ op1_neg, \
761 mm, a[i + j], 0, stat); \
766 DO_FMLA_IDX(gvec_fmla_idx_h
, float16
, H2
)
767 DO_FMLA_IDX(gvec_fmla_idx_s
, float32
, H4
)
768 DO_FMLA_IDX(gvec_fmla_idx_d
, float64
, )
772 #define DO_SAT(NAME, WTYPE, TYPEN, TYPEM, OP, MIN, MAX) \
773 void HELPER(NAME)(void *vd, void *vq, void *vn, void *vm, uint32_t desc) \
775 intptr_t i, oprsz = simd_oprsz(desc); \
776 TYPEN *d = vd, *n = vn; TYPEM *m = vm; \
778 for (i = 0; i < oprsz / sizeof(TYPEN); i++) { \
779 WTYPE dd = (WTYPE)n[i] OP m[i]; \
783 } else if (dd > MAX) { \
793 clear_tail(d, oprsz, simd_maxsz(desc)); \
796 DO_SAT(gvec_uqadd_b
, int, uint8_t, uint8_t, +, 0, UINT8_MAX
)
797 DO_SAT(gvec_uqadd_h
, int, uint16_t, uint16_t, +, 0, UINT16_MAX
)
798 DO_SAT(gvec_uqadd_s
, int64_t, uint32_t, uint32_t, +, 0, UINT32_MAX
)
800 DO_SAT(gvec_sqadd_b
, int, int8_t, int8_t, +, INT8_MIN
, INT8_MAX
)
801 DO_SAT(gvec_sqadd_h
, int, int16_t, int16_t, +, INT16_MIN
, INT16_MAX
)
802 DO_SAT(gvec_sqadd_s
, int64_t, int32_t, int32_t, +, INT32_MIN
, INT32_MAX
)
804 DO_SAT(gvec_uqsub_b
, int, uint8_t, uint8_t, -, 0, UINT8_MAX
)
805 DO_SAT(gvec_uqsub_h
, int, uint16_t, uint16_t, -, 0, UINT16_MAX
)
806 DO_SAT(gvec_uqsub_s
, int64_t, uint32_t, uint32_t, -, 0, UINT32_MAX
)
808 DO_SAT(gvec_sqsub_b
, int, int8_t, int8_t, -, INT8_MIN
, INT8_MAX
)
809 DO_SAT(gvec_sqsub_h
, int, int16_t, int16_t, -, INT16_MIN
, INT16_MAX
)
810 DO_SAT(gvec_sqsub_s
, int64_t, int32_t, int32_t, -, INT32_MIN
, INT32_MAX
)
814 void HELPER(gvec_uqadd_d
)(void *vd
, void *vq
, void *vn
,
815 void *vm
, uint32_t desc
)
817 intptr_t i
, oprsz
= simd_oprsz(desc
);
818 uint64_t *d
= vd
, *n
= vn
, *m
= vm
;
821 for (i
= 0; i
< oprsz
/ 8; i
++) {
822 uint64_t nn
= n
[i
], mm
= m
[i
], dd
= nn
+ mm
;
833 clear_tail(d
, oprsz
, simd_maxsz(desc
));
836 void HELPER(gvec_uqsub_d
)(void *vd
, void *vq
, void *vn
,
837 void *vm
, uint32_t desc
)
839 intptr_t i
, oprsz
= simd_oprsz(desc
);
840 uint64_t *d
= vd
, *n
= vn
, *m
= vm
;
843 for (i
= 0; i
< oprsz
/ 8; i
++) {
844 uint64_t nn
= n
[i
], mm
= m
[i
], dd
= nn
- mm
;
855 clear_tail(d
, oprsz
, simd_maxsz(desc
));
858 void HELPER(gvec_sqadd_d
)(void *vd
, void *vq
, void *vn
,
859 void *vm
, uint32_t desc
)
861 intptr_t i
, oprsz
= simd_oprsz(desc
);
862 int64_t *d
= vd
, *n
= vn
, *m
= vm
;
865 for (i
= 0; i
< oprsz
/ 8; i
++) {
866 int64_t nn
= n
[i
], mm
= m
[i
], dd
= nn
+ mm
;
867 if (((dd
^ nn
) & ~(nn
^ mm
)) & INT64_MIN
) {
868 dd
= (nn
>> 63) ^ ~INT64_MIN
;
877 clear_tail(d
, oprsz
, simd_maxsz(desc
));
880 void HELPER(gvec_sqsub_d
)(void *vd
, void *vq
, void *vn
,
881 void *vm
, uint32_t desc
)
883 intptr_t i
, oprsz
= simd_oprsz(desc
);
884 int64_t *d
= vd
, *n
= vn
, *m
= vm
;
887 for (i
= 0; i
< oprsz
/ 8; i
++) {
888 int64_t nn
= n
[i
], mm
= m
[i
], dd
= nn
- mm
;
889 if (((dd
^ nn
) & (nn
^ mm
)) & INT64_MIN
) {
890 dd
= (nn
>> 63) ^ ~INT64_MIN
;
899 clear_tail(d
, oprsz
, simd_maxsz(desc
));
903 * Convert float16 to float32, raising no exceptions and
904 * preserving exceptional values, including SNaN.
905 * This is effectively an unpack+repack operation.
907 static float32
float16_to_float32_by_bits(uint32_t f16
, bool fz16
)
909 const int f16_bias
= 15;
910 const int f32_bias
= 127;
911 uint32_t sign
= extract32(f16
, 15, 1);
912 uint32_t exp
= extract32(f16
, 10, 5);
913 uint32_t frac
= extract32(f16
, 0, 10);
918 } else if (exp
== 0) {
919 /* Zero or denormal. */
925 * Denormal; these are all normal float32.
926 * Shift the fraction so that the msb is at bit 11,
927 * then remove bit 11 as the implicit bit of the
928 * normalized float32. Note that we still go through
929 * the shift for normal numbers below, to put the
930 * float32 fraction at the right place.
932 int shift
= clz32(frac
) - 21;
933 frac
= (frac
<< shift
) & 0x3ff;
934 exp
= f32_bias
- f16_bias
- shift
+ 1;
938 /* Normal number; adjust the bias. */
939 exp
+= f32_bias
- f16_bias
;
945 return sign
| exp
| frac
;
948 static uint64_t load4_f16(uint64_t *ptr
, int is_q
, int is_2
)
951 * Branchless load of u32[0], u64[0], u32[1], or u64[1].
952 * Load the 2nd qword iff is_q & is_2.
953 * Shift to the 2nd dword iff !is_q & is_2.
954 * For !is_q & !is_2, the upper bits of the result are garbage.
956 return ptr
[is_q
& is_2
] >> ((is_2
& ~is_q
) << 5);
960 * Note that FMLAL requires oprsz == 8 or oprsz == 16,
961 * as there is not yet SVE versions that might use blocking.
964 static void do_fmlal(float32
*d
, void *vn
, void *vm
, float_status
*fpst
,
965 uint32_t desc
, bool fz16
)
967 intptr_t i
, oprsz
= simd_oprsz(desc
);
968 int is_s
= extract32(desc
, SIMD_DATA_SHIFT
, 1);
969 int is_2
= extract32(desc
, SIMD_DATA_SHIFT
+ 1, 1);
970 int is_q
= oprsz
== 16;
973 /* Pre-load all of the f16 data, avoiding overlap issues. */
974 n_4
= load4_f16(vn
, is_q
, is_2
);
975 m_4
= load4_f16(vm
, is_q
, is_2
);
977 /* Negate all inputs for FMLSL at once. */
979 n_4
^= 0x8000800080008000ull
;
982 for (i
= 0; i
< oprsz
/ 4; i
++) {
983 float32 n_1
= float16_to_float32_by_bits(n_4
>> (i
* 16), fz16
);
984 float32 m_1
= float16_to_float32_by_bits(m_4
>> (i
* 16), fz16
);
985 d
[H4(i
)] = float32_muladd(n_1
, m_1
, d
[H4(i
)], 0, fpst
);
987 clear_tail(d
, oprsz
, simd_maxsz(desc
));
990 void HELPER(gvec_fmlal_a32
)(void *vd
, void *vn
, void *vm
,
991 void *venv
, uint32_t desc
)
993 CPUARMState
*env
= venv
;
994 do_fmlal(vd
, vn
, vm
, &env
->vfp
.standard_fp_status
, desc
,
995 get_flush_inputs_to_zero(&env
->vfp
.fp_status_f16
));
998 void HELPER(gvec_fmlal_a64
)(void *vd
, void *vn
, void *vm
,
999 void *venv
, uint32_t desc
)
1001 CPUARMState
*env
= venv
;
1002 do_fmlal(vd
, vn
, vm
, &env
->vfp
.fp_status
, desc
,
1003 get_flush_inputs_to_zero(&env
->vfp
.fp_status_f16
));
1006 static void do_fmlal_idx(float32
*d
, void *vn
, void *vm
, float_status
*fpst
,
1007 uint32_t desc
, bool fz16
)
1009 intptr_t i
, oprsz
= simd_oprsz(desc
);
1010 int is_s
= extract32(desc
, SIMD_DATA_SHIFT
, 1);
1011 int is_2
= extract32(desc
, SIMD_DATA_SHIFT
+ 1, 1);
1012 int index
= extract32(desc
, SIMD_DATA_SHIFT
+ 2, 3);
1013 int is_q
= oprsz
== 16;
1017 /* Pre-load all of the f16 data, avoiding overlap issues. */
1018 n_4
= load4_f16(vn
, is_q
, is_2
);
1020 /* Negate all inputs for FMLSL at once. */
1022 n_4
^= 0x8000800080008000ull
;
1025 m_1
= float16_to_float32_by_bits(((float16
*)vm
)[H2(index
)], fz16
);
1027 for (i
= 0; i
< oprsz
/ 4; i
++) {
1028 float32 n_1
= float16_to_float32_by_bits(n_4
>> (i
* 16), fz16
);
1029 d
[H4(i
)] = float32_muladd(n_1
, m_1
, d
[H4(i
)], 0, fpst
);
1031 clear_tail(d
, oprsz
, simd_maxsz(desc
));
1034 void HELPER(gvec_fmlal_idx_a32
)(void *vd
, void *vn
, void *vm
,
1035 void *venv
, uint32_t desc
)
1037 CPUARMState
*env
= venv
;
1038 do_fmlal_idx(vd
, vn
, vm
, &env
->vfp
.standard_fp_status
, desc
,
1039 get_flush_inputs_to_zero(&env
->vfp
.fp_status_f16
));
1042 void HELPER(gvec_fmlal_idx_a64
)(void *vd
, void *vn
, void *vm
,
1043 void *venv
, uint32_t desc
)
1045 CPUARMState
*env
= venv
;
1046 do_fmlal_idx(vd
, vn
, vm
, &env
->vfp
.fp_status
, desc
,
1047 get_flush_inputs_to_zero(&env
->vfp
.fp_status_f16
));
1050 void HELPER(gvec_sshl_b
)(void *vd
, void *vn
, void *vm
, uint32_t desc
)
1052 intptr_t i
, opr_sz
= simd_oprsz(desc
);
1053 int8_t *d
= vd
, *n
= vn
, *m
= vm
;
1055 for (i
= 0; i
< opr_sz
; ++i
) {
1064 res
= nn
>> (mm
> -8 ? -mm
: 7);
1068 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
1071 void HELPER(gvec_sshl_h
)(void *vd
, void *vn
, void *vm
, uint32_t desc
)
1073 intptr_t i
, opr_sz
= simd_oprsz(desc
);
1074 int16_t *d
= vd
, *n
= vn
, *m
= vm
;
1076 for (i
= 0; i
< opr_sz
/ 2; ++i
) {
1077 int8_t mm
= m
[i
]; /* only 8 bits of shift are significant */
1085 res
= nn
>> (mm
> -16 ? -mm
: 15);
1089 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
1092 void HELPER(gvec_ushl_b
)(void *vd
, void *vn
, void *vm
, uint32_t desc
)
1094 intptr_t i
, opr_sz
= simd_oprsz(desc
);
1095 uint8_t *d
= vd
, *n
= vn
, *m
= vm
;
1097 for (i
= 0; i
< opr_sz
; ++i
) {
1112 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
1115 void HELPER(gvec_ushl_h
)(void *vd
, void *vn
, void *vm
, uint32_t desc
)
1117 intptr_t i
, opr_sz
= simd_oprsz(desc
);
1118 uint16_t *d
= vd
, *n
= vn
, *m
= vm
;
1120 for (i
= 0; i
< opr_sz
/ 2; ++i
) {
1121 int8_t mm
= m
[i
]; /* only 8 bits of shift are significant */
1135 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
1139 * 8x8->8 polynomial multiply.
1141 * Polynomial multiplication is like integer multiplication except the
1142 * partial products are XORed, not added.
1144 * TODO: expose this as a generic vector operation, as it is a common
1145 * crypto building block.
1147 void HELPER(gvec_pmul_b
)(void *vd
, void *vn
, void *vm
, uint32_t desc
)
1149 intptr_t i
, j
, opr_sz
= simd_oprsz(desc
);
1150 uint64_t *d
= vd
, *n
= vn
, *m
= vm
;
1152 for (i
= 0; i
< opr_sz
/ 8; ++i
) {
1157 for (j
= 0; j
< 8; ++j
) {
1158 uint64_t mask
= (nn
& 0x0101010101010101ull
) * 0xff;
1160 mm
= (mm
<< 1) & 0xfefefefefefefefeull
;
1165 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
1169 * 64x64->128 polynomial multiply.
1170 * Because of the lanes are not accessed in strict columns,
1171 * this probably cannot be turned into a generic helper.
1173 void HELPER(gvec_pmull_q
)(void *vd
, void *vn
, void *vm
, uint32_t desc
)
1175 intptr_t i
, j
, opr_sz
= simd_oprsz(desc
);
1176 intptr_t hi
= simd_data(desc
);
1177 uint64_t *d
= vd
, *n
= vn
, *m
= vm
;
1179 for (i
= 0; i
< opr_sz
/ 8; i
+= 2) {
1180 uint64_t nn
= n
[i
+ hi
];
1181 uint64_t mm
= m
[i
+ hi
];
1185 /* Bit 0 can only influence the low 64-bit result. */
1190 for (j
= 1; j
< 64; ++j
) {
1191 uint64_t mask
= -((nn
>> j
) & 1);
1192 rlo
^= (mm
<< j
) & mask
;
1193 rhi
^= (mm
>> (64 - j
)) & mask
;
1198 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
1202 * 8x8->16 polynomial multiply.
1204 * The byte inputs are expanded to (or extracted from) half-words.
1205 * Note that neon and sve2 get the inputs from different positions.
1206 * This allows 4 bytes to be processed in parallel with uint64_t.
1209 static uint64_t expand_byte_to_half(uint64_t x
)
1211 return (x
& 0x000000ff)
1212 | ((x
& 0x0000ff00) << 8)
1213 | ((x
& 0x00ff0000) << 16)
1214 | ((x
& 0xff000000) << 24);
1217 static uint64_t pmull_h(uint64_t op1
, uint64_t op2
)
1219 uint64_t result
= 0;
1222 for (i
= 0; i
< 8; ++i
) {
1223 uint64_t mask
= (op1
& 0x0001000100010001ull
) * 0xffff;
1224 result
^= op2
& mask
;
1231 void HELPER(neon_pmull_h
)(void *vd
, void *vn
, void *vm
, uint32_t desc
)
1233 int hi
= simd_data(desc
);
1234 uint64_t *d
= vd
, *n
= vn
, *m
= vm
;
1235 uint64_t nn
= n
[hi
], mm
= m
[hi
];
1237 d
[0] = pmull_h(expand_byte_to_half(nn
), expand_byte_to_half(mm
));
1240 d
[1] = pmull_h(expand_byte_to_half(nn
), expand_byte_to_half(mm
));
1242 clear_tail(d
, 16, simd_maxsz(desc
));
1245 #ifdef TARGET_AARCH64
1246 void HELPER(sve2_pmull_h
)(void *vd
, void *vn
, void *vm
, uint32_t desc
)
1248 int shift
= simd_data(desc
) * 8;
1249 intptr_t i
, opr_sz
= simd_oprsz(desc
);
1250 uint64_t *d
= vd
, *n
= vn
, *m
= vm
;
1252 for (i
= 0; i
< opr_sz
/ 8; ++i
) {
1253 uint64_t nn
= (n
[i
] >> shift
) & 0x00ff00ff00ff00ffull
;
1254 uint64_t mm
= (m
[i
] >> shift
) & 0x00ff00ff00ff00ffull
;
1256 d
[i
] = pmull_h(nn
, mm
);