2 * ARM AdvSIMD / SVE Vector Operations
4 * Copyright (c) 2018 Linaro
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "exec/helper-proto.h"
23 #include "tcg/tcg-gvec-desc.h"
24 #include "fpu/softfloat.h"
25 #include "vec_internal.h"
27 /* Note that vector data is stored in host-endian 64-bit chunks,
28 so addressing units smaller than that needs a host-endian fixup. */
29 #ifdef HOST_WORDS_BIGENDIAN
30 #define H1(x) ((x) ^ 7)
31 #define H2(x) ((x) ^ 3)
32 #define H4(x) ((x) ^ 1)
39 /* Signed saturating rounding doubling multiply-accumulate high half, 16-bit */
40 static int16_t do_sqrdmlah_h(int16_t src1
, int16_t src2
, int16_t src3
,
41 bool neg
, bool round
, uint32_t *sat
)
45 * = ((a3 << 16) + ((e1 * e2) << 1) + (1 << 15)) >> 16
46 * = ((a3 << 15) + (e1 * e2) + (1 << 14)) >> 15
48 int32_t ret
= (int32_t)src1
* src2
;
52 ret
+= ((int32_t)src3
<< 15) + (round
<< 14);
55 if (ret
!= (int16_t)ret
) {
57 ret
= (ret
< 0 ? INT16_MIN
: INT16_MAX
);
62 uint32_t HELPER(neon_qrdmlah_s16
)(CPUARMState
*env
, uint32_t src1
,
63 uint32_t src2
, uint32_t src3
)
65 uint32_t *sat
= &env
->vfp
.qc
[0];
66 uint16_t e1
= do_sqrdmlah_h(src1
, src2
, src3
, false, true, sat
);
67 uint16_t e2
= do_sqrdmlah_h(src1
>> 16, src2
>> 16, src3
>> 16,
69 return deposit32(e1
, 16, 16, e2
);
72 void HELPER(gvec_qrdmlah_s16
)(void *vd
, void *vn
, void *vm
,
73 void *vq
, uint32_t desc
)
75 uintptr_t opr_sz
= simd_oprsz(desc
);
81 for (i
= 0; i
< opr_sz
/ 2; ++i
) {
82 d
[i
] = do_sqrdmlah_h(n
[i
], m
[i
], d
[i
], false, true, vq
);
84 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
87 uint32_t HELPER(neon_qrdmlsh_s16
)(CPUARMState
*env
, uint32_t src1
,
88 uint32_t src2
, uint32_t src3
)
90 uint32_t *sat
= &env
->vfp
.qc
[0];
91 uint16_t e1
= do_sqrdmlah_h(src1
, src2
, src3
, true, true, sat
);
92 uint16_t e2
= do_sqrdmlah_h(src1
>> 16, src2
>> 16, src3
>> 16,
94 return deposit32(e1
, 16, 16, e2
);
97 void HELPER(gvec_qrdmlsh_s16
)(void *vd
, void *vn
, void *vm
,
98 void *vq
, uint32_t desc
)
100 uintptr_t opr_sz
= simd_oprsz(desc
);
106 for (i
= 0; i
< opr_sz
/ 2; ++i
) {
107 d
[i
] = do_sqrdmlah_h(n
[i
], m
[i
], d
[i
], true, true, vq
);
109 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
112 void HELPER(neon_sqdmulh_h
)(void *vd
, void *vn
, void *vm
,
113 void *vq
, uint32_t desc
)
115 intptr_t i
, opr_sz
= simd_oprsz(desc
);
116 int16_t *d
= vd
, *n
= vn
, *m
= vm
;
118 for (i
= 0; i
< opr_sz
/ 2; ++i
) {
119 d
[i
] = do_sqrdmlah_h(n
[i
], m
[i
], 0, false, false, vq
);
121 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
124 void HELPER(neon_sqrdmulh_h
)(void *vd
, void *vn
, void *vm
,
125 void *vq
, uint32_t desc
)
127 intptr_t i
, opr_sz
= simd_oprsz(desc
);
128 int16_t *d
= vd
, *n
= vn
, *m
= vm
;
130 for (i
= 0; i
< opr_sz
/ 2; ++i
) {
131 d
[i
] = do_sqrdmlah_h(n
[i
], m
[i
], 0, false, true, vq
);
133 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
136 /* Signed saturating rounding doubling multiply-accumulate high half, 32-bit */
137 static int32_t do_sqrdmlah_s(int32_t src1
, int32_t src2
, int32_t src3
,
138 bool neg
, bool round
, uint32_t *sat
)
140 /* Simplify similarly to int_qrdmlah_s16 above. */
141 int64_t ret
= (int64_t)src1
* src2
;
145 ret
+= ((int64_t)src3
<< 31) + (round
<< 30);
148 if (ret
!= (int32_t)ret
) {
150 ret
= (ret
< 0 ? INT32_MIN
: INT32_MAX
);
155 uint32_t HELPER(neon_qrdmlah_s32
)(CPUARMState
*env
, int32_t src1
,
156 int32_t src2
, int32_t src3
)
158 uint32_t *sat
= &env
->vfp
.qc
[0];
159 return do_sqrdmlah_s(src1
, src2
, src3
, false, true, sat
);
162 void HELPER(gvec_qrdmlah_s32
)(void *vd
, void *vn
, void *vm
,
163 void *vq
, uint32_t desc
)
165 uintptr_t opr_sz
= simd_oprsz(desc
);
171 for (i
= 0; i
< opr_sz
/ 4; ++i
) {
172 d
[i
] = do_sqrdmlah_s(n
[i
], m
[i
], d
[i
], false, true, vq
);
174 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
177 uint32_t HELPER(neon_qrdmlsh_s32
)(CPUARMState
*env
, int32_t src1
,
178 int32_t src2
, int32_t src3
)
180 uint32_t *sat
= &env
->vfp
.qc
[0];
181 return do_sqrdmlah_s(src1
, src2
, src3
, true, true, sat
);
184 void HELPER(gvec_qrdmlsh_s32
)(void *vd
, void *vn
, void *vm
,
185 void *vq
, uint32_t desc
)
187 uintptr_t opr_sz
= simd_oprsz(desc
);
193 for (i
= 0; i
< opr_sz
/ 4; ++i
) {
194 d
[i
] = do_sqrdmlah_s(n
[i
], m
[i
], d
[i
], true, true, vq
);
196 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
199 void HELPER(neon_sqdmulh_s
)(void *vd
, void *vn
, void *vm
,
200 void *vq
, uint32_t desc
)
202 intptr_t i
, opr_sz
= simd_oprsz(desc
);
203 int32_t *d
= vd
, *n
= vn
, *m
= vm
;
205 for (i
= 0; i
< opr_sz
/ 4; ++i
) {
206 d
[i
] = do_sqrdmlah_s(n
[i
], m
[i
], 0, false, false, vq
);
208 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
211 void HELPER(neon_sqrdmulh_s
)(void *vd
, void *vn
, void *vm
,
212 void *vq
, uint32_t desc
)
214 intptr_t i
, opr_sz
= simd_oprsz(desc
);
215 int32_t *d
= vd
, *n
= vn
, *m
= vm
;
217 for (i
= 0; i
< opr_sz
/ 4; ++i
) {
218 d
[i
] = do_sqrdmlah_s(n
[i
], m
[i
], 0, false, true, vq
);
220 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
223 /* Integer 8 and 16-bit dot-product.
225 * Note that for the loops herein, host endianness does not matter
226 * with respect to the ordering of data within the 64-bit lanes.
227 * All elements are treated equally, no matter where they are.
230 void HELPER(gvec_sdot_b
)(void *vd
, void *vn
, void *vm
, uint32_t desc
)
232 intptr_t i
, opr_sz
= simd_oprsz(desc
);
234 int8_t *n
= vn
, *m
= vm
;
236 for (i
= 0; i
< opr_sz
/ 4; ++i
) {
237 d
[i
] += n
[i
* 4 + 0] * m
[i
* 4 + 0]
238 + n
[i
* 4 + 1] * m
[i
* 4 + 1]
239 + n
[i
* 4 + 2] * m
[i
* 4 + 2]
240 + n
[i
* 4 + 3] * m
[i
* 4 + 3];
242 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
245 void HELPER(gvec_udot_b
)(void *vd
, void *vn
, void *vm
, uint32_t desc
)
247 intptr_t i
, opr_sz
= simd_oprsz(desc
);
249 uint8_t *n
= vn
, *m
= vm
;
251 for (i
= 0; i
< opr_sz
/ 4; ++i
) {
252 d
[i
] += n
[i
* 4 + 0] * m
[i
* 4 + 0]
253 + n
[i
* 4 + 1] * m
[i
* 4 + 1]
254 + n
[i
* 4 + 2] * m
[i
* 4 + 2]
255 + n
[i
* 4 + 3] * m
[i
* 4 + 3];
257 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
260 void HELPER(gvec_sdot_h
)(void *vd
, void *vn
, void *vm
, uint32_t desc
)
262 intptr_t i
, opr_sz
= simd_oprsz(desc
);
264 int16_t *n
= vn
, *m
= vm
;
266 for (i
= 0; i
< opr_sz
/ 8; ++i
) {
267 d
[i
] += (int64_t)n
[i
* 4 + 0] * m
[i
* 4 + 0]
268 + (int64_t)n
[i
* 4 + 1] * m
[i
* 4 + 1]
269 + (int64_t)n
[i
* 4 + 2] * m
[i
* 4 + 2]
270 + (int64_t)n
[i
* 4 + 3] * m
[i
* 4 + 3];
272 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
275 void HELPER(gvec_udot_h
)(void *vd
, void *vn
, void *vm
, uint32_t desc
)
277 intptr_t i
, opr_sz
= simd_oprsz(desc
);
279 uint16_t *n
= vn
, *m
= vm
;
281 for (i
= 0; i
< opr_sz
/ 8; ++i
) {
282 d
[i
] += (uint64_t)n
[i
* 4 + 0] * m
[i
* 4 + 0]
283 + (uint64_t)n
[i
* 4 + 1] * m
[i
* 4 + 1]
284 + (uint64_t)n
[i
* 4 + 2] * m
[i
* 4 + 2]
285 + (uint64_t)n
[i
* 4 + 3] * m
[i
* 4 + 3];
287 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
290 void HELPER(gvec_sdot_idx_b
)(void *vd
, void *vn
, void *vm
, uint32_t desc
)
292 intptr_t i
, segend
, opr_sz
= simd_oprsz(desc
), opr_sz_4
= opr_sz
/ 4;
293 intptr_t index
= simd_data(desc
);
296 int8_t *m_indexed
= (int8_t *)vm
+ H4(index
) * 4;
298 /* Notice the special case of opr_sz == 8, from aa64/aa32 advsimd.
299 * Otherwise opr_sz is a multiple of 16.
301 segend
= MIN(4, opr_sz_4
);
304 int8_t m0
= m_indexed
[i
* 4 + 0];
305 int8_t m1
= m_indexed
[i
* 4 + 1];
306 int8_t m2
= m_indexed
[i
* 4 + 2];
307 int8_t m3
= m_indexed
[i
* 4 + 3];
310 d
[i
] += n
[i
* 4 + 0] * m0
314 } while (++i
< segend
);
316 } while (i
< opr_sz_4
);
318 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
321 void HELPER(gvec_udot_idx_b
)(void *vd
, void *vn
, void *vm
, uint32_t desc
)
323 intptr_t i
, segend
, opr_sz
= simd_oprsz(desc
), opr_sz_4
= opr_sz
/ 4;
324 intptr_t index
= simd_data(desc
);
327 uint8_t *m_indexed
= (uint8_t *)vm
+ H4(index
) * 4;
329 /* Notice the special case of opr_sz == 8, from aa64/aa32 advsimd.
330 * Otherwise opr_sz is a multiple of 16.
332 segend
= MIN(4, opr_sz_4
);
335 uint8_t m0
= m_indexed
[i
* 4 + 0];
336 uint8_t m1
= m_indexed
[i
* 4 + 1];
337 uint8_t m2
= m_indexed
[i
* 4 + 2];
338 uint8_t m3
= m_indexed
[i
* 4 + 3];
341 d
[i
] += n
[i
* 4 + 0] * m0
345 } while (++i
< segend
);
347 } while (i
< opr_sz_4
);
349 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
352 void HELPER(gvec_sdot_idx_h
)(void *vd
, void *vn
, void *vm
, uint32_t desc
)
354 intptr_t i
, opr_sz
= simd_oprsz(desc
), opr_sz_8
= opr_sz
/ 8;
355 intptr_t index
= simd_data(desc
);
358 int16_t *m_indexed
= (int16_t *)vm
+ index
* 4;
360 /* This is supported by SVE only, so opr_sz is always a multiple of 16.
361 * Process the entire segment all at once, writing back the results
362 * only after we've consumed all of the inputs.
364 for (i
= 0; i
< opr_sz_8
; i
+= 2) {
367 d0
= n
[i
* 4 + 0] * (int64_t)m_indexed
[i
* 4 + 0];
368 d0
+= n
[i
* 4 + 1] * (int64_t)m_indexed
[i
* 4 + 1];
369 d0
+= n
[i
* 4 + 2] * (int64_t)m_indexed
[i
* 4 + 2];
370 d0
+= n
[i
* 4 + 3] * (int64_t)m_indexed
[i
* 4 + 3];
371 d1
= n
[i
* 4 + 4] * (int64_t)m_indexed
[i
* 4 + 0];
372 d1
+= n
[i
* 4 + 5] * (int64_t)m_indexed
[i
* 4 + 1];
373 d1
+= n
[i
* 4 + 6] * (int64_t)m_indexed
[i
* 4 + 2];
374 d1
+= n
[i
* 4 + 7] * (int64_t)m_indexed
[i
* 4 + 3];
380 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
383 void HELPER(gvec_udot_idx_h
)(void *vd
, void *vn
, void *vm
, uint32_t desc
)
385 intptr_t i
, opr_sz
= simd_oprsz(desc
), opr_sz_8
= opr_sz
/ 8;
386 intptr_t index
= simd_data(desc
);
389 uint16_t *m_indexed
= (uint16_t *)vm
+ index
* 4;
391 /* This is supported by SVE only, so opr_sz is always a multiple of 16.
392 * Process the entire segment all at once, writing back the results
393 * only after we've consumed all of the inputs.
395 for (i
= 0; i
< opr_sz_8
; i
+= 2) {
398 d0
= n
[i
* 4 + 0] * (uint64_t)m_indexed
[i
* 4 + 0];
399 d0
+= n
[i
* 4 + 1] * (uint64_t)m_indexed
[i
* 4 + 1];
400 d0
+= n
[i
* 4 + 2] * (uint64_t)m_indexed
[i
* 4 + 2];
401 d0
+= n
[i
* 4 + 3] * (uint64_t)m_indexed
[i
* 4 + 3];
402 d1
= n
[i
* 4 + 4] * (uint64_t)m_indexed
[i
* 4 + 0];
403 d1
+= n
[i
* 4 + 5] * (uint64_t)m_indexed
[i
* 4 + 1];
404 d1
+= n
[i
* 4 + 6] * (uint64_t)m_indexed
[i
* 4 + 2];
405 d1
+= n
[i
* 4 + 7] * (uint64_t)m_indexed
[i
* 4 + 3];
411 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
414 void HELPER(gvec_fcaddh
)(void *vd
, void *vn
, void *vm
,
415 void *vfpst
, uint32_t desc
)
417 uintptr_t opr_sz
= simd_oprsz(desc
);
421 float_status
*fpst
= vfpst
;
422 uint32_t neg_real
= extract32(desc
, SIMD_DATA_SHIFT
, 1);
423 uint32_t neg_imag
= neg_real
^ 1;
426 /* Shift boolean to the sign bit so we can xor to negate. */
430 for (i
= 0; i
< opr_sz
/ 2; i
+= 2) {
431 float16 e0
= n
[H2(i
)];
432 float16 e1
= m
[H2(i
+ 1)] ^ neg_imag
;
433 float16 e2
= n
[H2(i
+ 1)];
434 float16 e3
= m
[H2(i
)] ^ neg_real
;
436 d
[H2(i
)] = float16_add(e0
, e1
, fpst
);
437 d
[H2(i
+ 1)] = float16_add(e2
, e3
, fpst
);
439 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
442 void HELPER(gvec_fcadds
)(void *vd
, void *vn
, void *vm
,
443 void *vfpst
, uint32_t desc
)
445 uintptr_t opr_sz
= simd_oprsz(desc
);
449 float_status
*fpst
= vfpst
;
450 uint32_t neg_real
= extract32(desc
, SIMD_DATA_SHIFT
, 1);
451 uint32_t neg_imag
= neg_real
^ 1;
454 /* Shift boolean to the sign bit so we can xor to negate. */
458 for (i
= 0; i
< opr_sz
/ 4; i
+= 2) {
459 float32 e0
= n
[H4(i
)];
460 float32 e1
= m
[H4(i
+ 1)] ^ neg_imag
;
461 float32 e2
= n
[H4(i
+ 1)];
462 float32 e3
= m
[H4(i
)] ^ neg_real
;
464 d
[H4(i
)] = float32_add(e0
, e1
, fpst
);
465 d
[H4(i
+ 1)] = float32_add(e2
, e3
, fpst
);
467 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
470 void HELPER(gvec_fcaddd
)(void *vd
, void *vn
, void *vm
,
471 void *vfpst
, uint32_t desc
)
473 uintptr_t opr_sz
= simd_oprsz(desc
);
477 float_status
*fpst
= vfpst
;
478 uint64_t neg_real
= extract64(desc
, SIMD_DATA_SHIFT
, 1);
479 uint64_t neg_imag
= neg_real
^ 1;
482 /* Shift boolean to the sign bit so we can xor to negate. */
486 for (i
= 0; i
< opr_sz
/ 8; i
+= 2) {
488 float64 e1
= m
[i
+ 1] ^ neg_imag
;
489 float64 e2
= n
[i
+ 1];
490 float64 e3
= m
[i
] ^ neg_real
;
492 d
[i
] = float64_add(e0
, e1
, fpst
);
493 d
[i
+ 1] = float64_add(e2
, e3
, fpst
);
495 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
498 void HELPER(gvec_fcmlah
)(void *vd
, void *vn
, void *vm
,
499 void *vfpst
, uint32_t desc
)
501 uintptr_t opr_sz
= simd_oprsz(desc
);
505 float_status
*fpst
= vfpst
;
506 intptr_t flip
= extract32(desc
, SIMD_DATA_SHIFT
, 1);
507 uint32_t neg_imag
= extract32(desc
, SIMD_DATA_SHIFT
+ 1, 1);
508 uint32_t neg_real
= flip
^ neg_imag
;
511 /* Shift boolean to the sign bit so we can xor to negate. */
515 for (i
= 0; i
< opr_sz
/ 2; i
+= 2) {
516 float16 e2
= n
[H2(i
+ flip
)];
517 float16 e1
= m
[H2(i
+ flip
)] ^ neg_real
;
519 float16 e3
= m
[H2(i
+ 1 - flip
)] ^ neg_imag
;
521 d
[H2(i
)] = float16_muladd(e2
, e1
, d
[H2(i
)], 0, fpst
);
522 d
[H2(i
+ 1)] = float16_muladd(e4
, e3
, d
[H2(i
+ 1)], 0, fpst
);
524 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
527 void HELPER(gvec_fcmlah_idx
)(void *vd
, void *vn
, void *vm
,
528 void *vfpst
, uint32_t desc
)
530 uintptr_t opr_sz
= simd_oprsz(desc
);
534 float_status
*fpst
= vfpst
;
535 intptr_t flip
= extract32(desc
, SIMD_DATA_SHIFT
, 1);
536 uint32_t neg_imag
= extract32(desc
, SIMD_DATA_SHIFT
+ 1, 1);
537 intptr_t index
= extract32(desc
, SIMD_DATA_SHIFT
+ 2, 2);
538 uint32_t neg_real
= flip
^ neg_imag
;
539 intptr_t elements
= opr_sz
/ sizeof(float16
);
540 intptr_t eltspersegment
= 16 / sizeof(float16
);
543 /* Shift boolean to the sign bit so we can xor to negate. */
547 for (i
= 0; i
< elements
; i
+= eltspersegment
) {
548 float16 mr
= m
[H2(i
+ 2 * index
+ 0)];
549 float16 mi
= m
[H2(i
+ 2 * index
+ 1)];
550 float16 e1
= neg_real
^ (flip
? mi
: mr
);
551 float16 e3
= neg_imag
^ (flip
? mr
: mi
);
553 for (j
= i
; j
< i
+ eltspersegment
; j
+= 2) {
554 float16 e2
= n
[H2(j
+ flip
)];
557 d
[H2(j
)] = float16_muladd(e2
, e1
, d
[H2(j
)], 0, fpst
);
558 d
[H2(j
+ 1)] = float16_muladd(e4
, e3
, d
[H2(j
+ 1)], 0, fpst
);
561 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
564 void HELPER(gvec_fcmlas
)(void *vd
, void *vn
, void *vm
,
565 void *vfpst
, uint32_t desc
)
567 uintptr_t opr_sz
= simd_oprsz(desc
);
571 float_status
*fpst
= vfpst
;
572 intptr_t flip
= extract32(desc
, SIMD_DATA_SHIFT
, 1);
573 uint32_t neg_imag
= extract32(desc
, SIMD_DATA_SHIFT
+ 1, 1);
574 uint32_t neg_real
= flip
^ neg_imag
;
577 /* Shift boolean to the sign bit so we can xor to negate. */
581 for (i
= 0; i
< opr_sz
/ 4; i
+= 2) {
582 float32 e2
= n
[H4(i
+ flip
)];
583 float32 e1
= m
[H4(i
+ flip
)] ^ neg_real
;
585 float32 e3
= m
[H4(i
+ 1 - flip
)] ^ neg_imag
;
587 d
[H4(i
)] = float32_muladd(e2
, e1
, d
[H4(i
)], 0, fpst
);
588 d
[H4(i
+ 1)] = float32_muladd(e4
, e3
, d
[H4(i
+ 1)], 0, fpst
);
590 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
593 void HELPER(gvec_fcmlas_idx
)(void *vd
, void *vn
, void *vm
,
594 void *vfpst
, uint32_t desc
)
596 uintptr_t opr_sz
= simd_oprsz(desc
);
600 float_status
*fpst
= vfpst
;
601 intptr_t flip
= extract32(desc
, SIMD_DATA_SHIFT
, 1);
602 uint32_t neg_imag
= extract32(desc
, SIMD_DATA_SHIFT
+ 1, 1);
603 intptr_t index
= extract32(desc
, SIMD_DATA_SHIFT
+ 2, 2);
604 uint32_t neg_real
= flip
^ neg_imag
;
605 intptr_t elements
= opr_sz
/ sizeof(float32
);
606 intptr_t eltspersegment
= 16 / sizeof(float32
);
609 /* Shift boolean to the sign bit so we can xor to negate. */
613 for (i
= 0; i
< elements
; i
+= eltspersegment
) {
614 float32 mr
= m
[H4(i
+ 2 * index
+ 0)];
615 float32 mi
= m
[H4(i
+ 2 * index
+ 1)];
616 float32 e1
= neg_real
^ (flip
? mi
: mr
);
617 float32 e3
= neg_imag
^ (flip
? mr
: mi
);
619 for (j
= i
; j
< i
+ eltspersegment
; j
+= 2) {
620 float32 e2
= n
[H4(j
+ flip
)];
623 d
[H4(j
)] = float32_muladd(e2
, e1
, d
[H4(j
)], 0, fpst
);
624 d
[H4(j
+ 1)] = float32_muladd(e4
, e3
, d
[H4(j
+ 1)], 0, fpst
);
627 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
630 void HELPER(gvec_fcmlad
)(void *vd
, void *vn
, void *vm
,
631 void *vfpst
, uint32_t desc
)
633 uintptr_t opr_sz
= simd_oprsz(desc
);
637 float_status
*fpst
= vfpst
;
638 intptr_t flip
= extract32(desc
, SIMD_DATA_SHIFT
, 1);
639 uint64_t neg_imag
= extract32(desc
, SIMD_DATA_SHIFT
+ 1, 1);
640 uint64_t neg_real
= flip
^ neg_imag
;
643 /* Shift boolean to the sign bit so we can xor to negate. */
647 for (i
= 0; i
< opr_sz
/ 8; i
+= 2) {
648 float64 e2
= n
[i
+ flip
];
649 float64 e1
= m
[i
+ flip
] ^ neg_real
;
651 float64 e3
= m
[i
+ 1 - flip
] ^ neg_imag
;
653 d
[i
] = float64_muladd(e2
, e1
, d
[i
], 0, fpst
);
654 d
[i
+ 1] = float64_muladd(e4
, e3
, d
[i
+ 1], 0, fpst
);
656 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
660 * Floating point comparisons producing an integer result (all 1s or all 0s).
661 * Note that EQ doesn't signal InvalidOp for QNaNs but GE and GT do.
662 * Softfloat routines return 0/1, which we convert to the 0/-1 Neon requires.
664 static uint16_t float16_ceq(float16 op1
, float16 op2
, float_status
*stat
)
666 return -float16_eq_quiet(op1
, op2
, stat
);
669 static uint32_t float32_ceq(float32 op1
, float32 op2
, float_status
*stat
)
671 return -float32_eq_quiet(op1
, op2
, stat
);
674 static uint16_t float16_cge(float16 op1
, float16 op2
, float_status
*stat
)
676 return -float16_le(op2
, op1
, stat
);
679 static uint32_t float32_cge(float32 op1
, float32 op2
, float_status
*stat
)
681 return -float32_le(op2
, op1
, stat
);
684 static uint16_t float16_cgt(float16 op1
, float16 op2
, float_status
*stat
)
686 return -float16_lt(op2
, op1
, stat
);
689 static uint32_t float32_cgt(float32 op1
, float32 op2
, float_status
*stat
)
691 return -float32_lt(op2
, op1
, stat
);
694 static uint16_t float16_acge(float16 op1
, float16 op2
, float_status
*stat
)
696 return -float16_le(float16_abs(op2
), float16_abs(op1
), stat
);
699 static uint32_t float32_acge(float32 op1
, float32 op2
, float_status
*stat
)
701 return -float32_le(float32_abs(op2
), float32_abs(op1
), stat
);
704 static uint16_t float16_acgt(float16 op1
, float16 op2
, float_status
*stat
)
706 return -float16_lt(float16_abs(op2
), float16_abs(op1
), stat
);
709 static uint32_t float32_acgt(float32 op1
, float32 op2
, float_status
*stat
)
711 return -float32_lt(float32_abs(op2
), float32_abs(op1
), stat
);
714 static int16_t vfp_tosszh(float16 x
, void *fpstp
)
716 float_status
*fpst
= fpstp
;
717 if (float16_is_any_nan(x
)) {
718 float_raise(float_flag_invalid
, fpst
);
721 return float16_to_int16_round_to_zero(x
, fpst
);
724 static uint16_t vfp_touszh(float16 x
, void *fpstp
)
726 float_status
*fpst
= fpstp
;
727 if (float16_is_any_nan(x
)) {
728 float_raise(float_flag_invalid
, fpst
);
731 return float16_to_uint16_round_to_zero(x
, fpst
);
734 #define DO_2OP(NAME, FUNC, TYPE) \
735 void HELPER(NAME)(void *vd, void *vn, void *stat, uint32_t desc) \
737 intptr_t i, oprsz = simd_oprsz(desc); \
738 TYPE *d = vd, *n = vn; \
739 for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
740 d[i] = FUNC(n[i], stat); \
742 clear_tail(d, oprsz, simd_maxsz(desc)); \
745 DO_2OP(gvec_frecpe_h
, helper_recpe_f16
, float16
)
746 DO_2OP(gvec_frecpe_s
, helper_recpe_f32
, float32
)
747 DO_2OP(gvec_frecpe_d
, helper_recpe_f64
, float64
)
749 DO_2OP(gvec_frsqrte_h
, helper_rsqrte_f16
, float16
)
750 DO_2OP(gvec_frsqrte_s
, helper_rsqrte_f32
, float32
)
751 DO_2OP(gvec_frsqrte_d
, helper_rsqrte_f64
, float64
)
753 DO_2OP(gvec_vrintx_h
, float16_round_to_int
, float16
)
754 DO_2OP(gvec_vrintx_s
, float32_round_to_int
, float32
)
756 DO_2OP(gvec_sitos
, helper_vfp_sitos
, int32_t)
757 DO_2OP(gvec_uitos
, helper_vfp_uitos
, uint32_t)
758 DO_2OP(gvec_tosizs
, helper_vfp_tosizs
, float32
)
759 DO_2OP(gvec_touizs
, helper_vfp_touizs
, float32
)
760 DO_2OP(gvec_sstoh
, int16_to_float16
, int16_t)
761 DO_2OP(gvec_ustoh
, uint16_to_float16
, uint16_t)
762 DO_2OP(gvec_tosszh
, vfp_tosszh
, float16
)
763 DO_2OP(gvec_touszh
, vfp_touszh
, float16
)
765 #define WRAP_CMP0_FWD(FN, CMPOP, TYPE) \
766 static TYPE TYPE##_##FN##0(TYPE op, float_status *stat) \
768 return TYPE##_##CMPOP(op, TYPE##_zero, stat); \
771 #define WRAP_CMP0_REV(FN, CMPOP, TYPE) \
772 static TYPE TYPE##_##FN##0(TYPE op, float_status *stat) \
774 return TYPE##_##CMPOP(TYPE##_zero, op, stat); \
777 #define DO_2OP_CMP0(FN, CMPOP, DIRN) \
778 WRAP_CMP0_##DIRN(FN, CMPOP, float16) \
779 WRAP_CMP0_##DIRN(FN, CMPOP, float32) \
780 DO_2OP(gvec_f##FN##0_h, float16_##FN##0, float16) \
781 DO_2OP(gvec_f##FN##0_s, float32_##FN##0, float32)
783 DO_2OP_CMP0(cgt
, cgt
, FWD
)
784 DO_2OP_CMP0(cge
, cge
, FWD
)
785 DO_2OP_CMP0(ceq
, ceq
, FWD
)
786 DO_2OP_CMP0(clt
, cgt
, REV
)
787 DO_2OP_CMP0(cle
, cge
, REV
)
792 /* Floating-point trigonometric starting value.
793 * See the ARM ARM pseudocode function FPTrigSMul.
795 static float16
float16_ftsmul(float16 op1
, uint16_t op2
, float_status
*stat
)
797 float16 result
= float16_mul(op1
, op1
, stat
);
798 if (!float16_is_any_nan(result
)) {
799 result
= float16_set_sign(result
, op2
& 1);
804 static float32
float32_ftsmul(float32 op1
, uint32_t op2
, float_status
*stat
)
806 float32 result
= float32_mul(op1
, op1
, stat
);
807 if (!float32_is_any_nan(result
)) {
808 result
= float32_set_sign(result
, op2
& 1);
813 static float64
float64_ftsmul(float64 op1
, uint64_t op2
, float_status
*stat
)
815 float64 result
= float64_mul(op1
, op1
, stat
);
816 if (!float64_is_any_nan(result
)) {
817 result
= float64_set_sign(result
, op2
& 1);
822 static float16
float16_abd(float16 op1
, float16 op2
, float_status
*stat
)
824 return float16_abs(float16_sub(op1
, op2
, stat
));
827 static float32
float32_abd(float32 op1
, float32 op2
, float_status
*stat
)
829 return float32_abs(float32_sub(op1
, op2
, stat
));
833 * Reciprocal step. These are the AArch32 version which uses a
834 * non-fused multiply-and-subtract.
836 static float16
float16_recps_nf(float16 op1
, float16 op2
, float_status
*stat
)
838 op1
= float16_squash_input_denormal(op1
, stat
);
839 op2
= float16_squash_input_denormal(op2
, stat
);
841 if ((float16_is_infinity(op1
) && float16_is_zero(op2
)) ||
842 (float16_is_infinity(op2
) && float16_is_zero(op1
))) {
845 return float16_sub(float16_two
, float16_mul(op1
, op2
, stat
), stat
);
848 static float32
float32_recps_nf(float32 op1
, float32 op2
, float_status
*stat
)
850 op1
= float32_squash_input_denormal(op1
, stat
);
851 op2
= float32_squash_input_denormal(op2
, stat
);
853 if ((float32_is_infinity(op1
) && float32_is_zero(op2
)) ||
854 (float32_is_infinity(op2
) && float32_is_zero(op1
))) {
857 return float32_sub(float32_two
, float32_mul(op1
, op2
, stat
), stat
);
860 /* Reciprocal square-root step. AArch32 non-fused semantics. */
861 static float16
float16_rsqrts_nf(float16 op1
, float16 op2
, float_status
*stat
)
863 op1
= float16_squash_input_denormal(op1
, stat
);
864 op2
= float16_squash_input_denormal(op2
, stat
);
866 if ((float16_is_infinity(op1
) && float16_is_zero(op2
)) ||
867 (float16_is_infinity(op2
) && float16_is_zero(op1
))) {
868 return float16_one_point_five
;
870 op1
= float16_sub(float16_three
, float16_mul(op1
, op2
, stat
), stat
);
871 return float16_div(op1
, float16_two
, stat
);
874 static float32
float32_rsqrts_nf(float32 op1
, float32 op2
, float_status
*stat
)
876 op1
= float32_squash_input_denormal(op1
, stat
);
877 op2
= float32_squash_input_denormal(op2
, stat
);
879 if ((float32_is_infinity(op1
) && float32_is_zero(op2
)) ||
880 (float32_is_infinity(op2
) && float32_is_zero(op1
))) {
881 return float32_one_point_five
;
883 op1
= float32_sub(float32_three
, float32_mul(op1
, op2
, stat
), stat
);
884 return float32_div(op1
, float32_two
, stat
);
887 #define DO_3OP(NAME, FUNC, TYPE) \
888 void HELPER(NAME)(void *vd, void *vn, void *vm, void *stat, uint32_t desc) \
890 intptr_t i, oprsz = simd_oprsz(desc); \
891 TYPE *d = vd, *n = vn, *m = vm; \
892 for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
893 d[i] = FUNC(n[i], m[i], stat); \
895 clear_tail(d, oprsz, simd_maxsz(desc)); \
898 DO_3OP(gvec_fadd_h
, float16_add
, float16
)
899 DO_3OP(gvec_fadd_s
, float32_add
, float32
)
900 DO_3OP(gvec_fadd_d
, float64_add
, float64
)
902 DO_3OP(gvec_fsub_h
, float16_sub
, float16
)
903 DO_3OP(gvec_fsub_s
, float32_sub
, float32
)
904 DO_3OP(gvec_fsub_d
, float64_sub
, float64
)
906 DO_3OP(gvec_fmul_h
, float16_mul
, float16
)
907 DO_3OP(gvec_fmul_s
, float32_mul
, float32
)
908 DO_3OP(gvec_fmul_d
, float64_mul
, float64
)
910 DO_3OP(gvec_ftsmul_h
, float16_ftsmul
, float16
)
911 DO_3OP(gvec_ftsmul_s
, float32_ftsmul
, float32
)
912 DO_3OP(gvec_ftsmul_d
, float64_ftsmul
, float64
)
914 DO_3OP(gvec_fabd_h
, float16_abd
, float16
)
915 DO_3OP(gvec_fabd_s
, float32_abd
, float32
)
917 DO_3OP(gvec_fceq_h
, float16_ceq
, float16
)
918 DO_3OP(gvec_fceq_s
, float32_ceq
, float32
)
920 DO_3OP(gvec_fcge_h
, float16_cge
, float16
)
921 DO_3OP(gvec_fcge_s
, float32_cge
, float32
)
923 DO_3OP(gvec_fcgt_h
, float16_cgt
, float16
)
924 DO_3OP(gvec_fcgt_s
, float32_cgt
, float32
)
926 DO_3OP(gvec_facge_h
, float16_acge
, float16
)
927 DO_3OP(gvec_facge_s
, float32_acge
, float32
)
929 DO_3OP(gvec_facgt_h
, float16_acgt
, float16
)
930 DO_3OP(gvec_facgt_s
, float32_acgt
, float32
)
932 DO_3OP(gvec_fmax_h
, float16_max
, float16
)
933 DO_3OP(gvec_fmax_s
, float32_max
, float32
)
935 DO_3OP(gvec_fmin_h
, float16_min
, float16
)
936 DO_3OP(gvec_fmin_s
, float32_min
, float32
)
938 DO_3OP(gvec_fmaxnum_h
, float16_maxnum
, float16
)
939 DO_3OP(gvec_fmaxnum_s
, float32_maxnum
, float32
)
941 DO_3OP(gvec_fminnum_h
, float16_minnum
, float16
)
942 DO_3OP(gvec_fminnum_s
, float32_minnum
, float32
)
944 DO_3OP(gvec_recps_nf_h
, float16_recps_nf
, float16
)
945 DO_3OP(gvec_recps_nf_s
, float32_recps_nf
, float32
)
947 DO_3OP(gvec_rsqrts_nf_h
, float16_rsqrts_nf
, float16
)
948 DO_3OP(gvec_rsqrts_nf_s
, float32_rsqrts_nf
, float32
)
950 #ifdef TARGET_AARCH64
952 DO_3OP(gvec_recps_h
, helper_recpsf_f16
, float16
)
953 DO_3OP(gvec_recps_s
, helper_recpsf_f32
, float32
)
954 DO_3OP(gvec_recps_d
, helper_recpsf_f64
, float64
)
956 DO_3OP(gvec_rsqrts_h
, helper_rsqrtsf_f16
, float16
)
957 DO_3OP(gvec_rsqrts_s
, helper_rsqrtsf_f32
, float32
)
958 DO_3OP(gvec_rsqrts_d
, helper_rsqrtsf_f64
, float64
)
963 /* Non-fused multiply-add (unlike float16_muladd etc, which are fused) */
964 static float16
float16_muladd_nf(float16 dest
, float16 op1
, float16 op2
,
967 return float16_add(dest
, float16_mul(op1
, op2
, stat
), stat
);
970 static float32
float32_muladd_nf(float32 dest
, float32 op1
, float32 op2
,
973 return float32_add(dest
, float32_mul(op1
, op2
, stat
), stat
);
976 static float16
float16_mulsub_nf(float16 dest
, float16 op1
, float16 op2
,
979 return float16_sub(dest
, float16_mul(op1
, op2
, stat
), stat
);
982 static float32
float32_mulsub_nf(float32 dest
, float32 op1
, float32 op2
,
985 return float32_sub(dest
, float32_mul(op1
, op2
, stat
), stat
);
988 /* Fused versions; these have the semantics Neon VFMA/VFMS want */
989 static float16
float16_muladd_f(float16 dest
, float16 op1
, float16 op2
,
992 return float16_muladd(op1
, op2
, dest
, 0, stat
);
995 static float32
float32_muladd_f(float32 dest
, float32 op1
, float32 op2
,
998 return float32_muladd(op1
, op2
, dest
, 0, stat
);
1001 static float16
float16_mulsub_f(float16 dest
, float16 op1
, float16 op2
,
1004 return float16_muladd(float16_chs(op1
), op2
, dest
, 0, stat
);
1007 static float32
float32_mulsub_f(float32 dest
, float32 op1
, float32 op2
,
1010 return float32_muladd(float32_chs(op1
), op2
, dest
, 0, stat
);
1013 #define DO_MULADD(NAME, FUNC, TYPE) \
1014 void HELPER(NAME)(void *vd, void *vn, void *vm, void *stat, uint32_t desc) \
1016 intptr_t i, oprsz = simd_oprsz(desc); \
1017 TYPE *d = vd, *n = vn, *m = vm; \
1018 for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
1019 d[i] = FUNC(d[i], n[i], m[i], stat); \
1021 clear_tail(d, oprsz, simd_maxsz(desc)); \
1024 DO_MULADD(gvec_fmla_h
, float16_muladd_nf
, float16
)
1025 DO_MULADD(gvec_fmla_s
, float32_muladd_nf
, float32
)
1027 DO_MULADD(gvec_fmls_h
, float16_mulsub_nf
, float16
)
1028 DO_MULADD(gvec_fmls_s
, float32_mulsub_nf
, float32
)
1030 DO_MULADD(gvec_vfma_h
, float16_muladd_f
, float16
)
1031 DO_MULADD(gvec_vfma_s
, float32_muladd_f
, float32
)
1033 DO_MULADD(gvec_vfms_h
, float16_mulsub_f
, float16
)
1034 DO_MULADD(gvec_vfms_s
, float32_mulsub_f
, float32
)
1036 /* For the indexed ops, SVE applies the index per 128-bit vector segment.
1037 * For AdvSIMD, there is of course only one such vector segment.
1040 #define DO_MUL_IDX(NAME, TYPE, H) \
1041 void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
1043 intptr_t i, j, oprsz = simd_oprsz(desc); \
1044 intptr_t segment = MIN(16, oprsz) / sizeof(TYPE); \
1045 intptr_t idx = simd_data(desc); \
1046 TYPE *d = vd, *n = vn, *m = vm; \
1047 for (i = 0; i < oprsz / sizeof(TYPE); i += segment) { \
1048 TYPE mm = m[H(i + idx)]; \
1049 for (j = 0; j < segment; j++) { \
1050 d[i + j] = n[i + j] * mm; \
1053 clear_tail(d, oprsz, simd_maxsz(desc)); \
1056 DO_MUL_IDX(gvec_mul_idx_h
, uint16_t, H2
)
1057 DO_MUL_IDX(gvec_mul_idx_s
, uint32_t, H4
)
1058 DO_MUL_IDX(gvec_mul_idx_d
, uint64_t, )
1062 #define DO_MLA_IDX(NAME, TYPE, OP, H) \
1063 void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \
1065 intptr_t i, j, oprsz = simd_oprsz(desc); \
1066 intptr_t segment = MIN(16, oprsz) / sizeof(TYPE); \
1067 intptr_t idx = simd_data(desc); \
1068 TYPE *d = vd, *n = vn, *m = vm, *a = va; \
1069 for (i = 0; i < oprsz / sizeof(TYPE); i += segment) { \
1070 TYPE mm = m[H(i + idx)]; \
1071 for (j = 0; j < segment; j++) { \
1072 d[i + j] = a[i + j] OP n[i + j] * mm; \
1075 clear_tail(d, oprsz, simd_maxsz(desc)); \
1078 DO_MLA_IDX(gvec_mla_idx_h
, uint16_t, +, H2
)
1079 DO_MLA_IDX(gvec_mla_idx_s
, uint32_t, +, H4
)
1080 DO_MLA_IDX(gvec_mla_idx_d
, uint64_t, +, )
1082 DO_MLA_IDX(gvec_mls_idx_h
, uint16_t, -, H2
)
1083 DO_MLA_IDX(gvec_mls_idx_s
, uint32_t, -, H4
)
1084 DO_MLA_IDX(gvec_mls_idx_d
, uint64_t, -, )
1088 #define DO_FMUL_IDX(NAME, ADD, TYPE, H) \
1089 void HELPER(NAME)(void *vd, void *vn, void *vm, void *stat, uint32_t desc) \
1091 intptr_t i, j, oprsz = simd_oprsz(desc); \
1092 intptr_t segment = MIN(16, oprsz) / sizeof(TYPE); \
1093 intptr_t idx = simd_data(desc); \
1094 TYPE *d = vd, *n = vn, *m = vm; \
1095 for (i = 0; i < oprsz / sizeof(TYPE); i += segment) { \
1096 TYPE mm = m[H(i + idx)]; \
1097 for (j = 0; j < segment; j++) { \
1098 d[i + j] = TYPE##_##ADD(d[i + j], \
1099 TYPE##_mul(n[i + j], mm, stat), stat); \
1102 clear_tail(d, oprsz, simd_maxsz(desc)); \
1105 #define float16_nop(N, M, S) (M)
1106 #define float32_nop(N, M, S) (M)
1107 #define float64_nop(N, M, S) (M)
1109 DO_FMUL_IDX(gvec_fmul_idx_h
, nop
, float16
, H2
)
1110 DO_FMUL_IDX(gvec_fmul_idx_s
, nop
, float32
, H4
)
1111 DO_FMUL_IDX(gvec_fmul_idx_d
, nop
, float64
, )
1114 * Non-fused multiply-accumulate operations, for Neon. NB that unlike
1115 * the fused ops below they assume accumulate both from and into Vd.
1117 DO_FMUL_IDX(gvec_fmla_nf_idx_h
, add
, float16
, H2
)
1118 DO_FMUL_IDX(gvec_fmla_nf_idx_s
, add
, float32
, H4
)
1119 DO_FMUL_IDX(gvec_fmls_nf_idx_h
, sub
, float16
, H2
)
1120 DO_FMUL_IDX(gvec_fmls_nf_idx_s
, sub
, float32
, H4
)
1127 #define DO_FMLA_IDX(NAME, TYPE, H) \
1128 void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, \
1129 void *stat, uint32_t desc) \
1131 intptr_t i, j, oprsz = simd_oprsz(desc); \
1132 intptr_t segment = MIN(16, oprsz) / sizeof(TYPE); \
1133 TYPE op1_neg = extract32(desc, SIMD_DATA_SHIFT, 1); \
1134 intptr_t idx = desc >> (SIMD_DATA_SHIFT + 1); \
1135 TYPE *d = vd, *n = vn, *m = vm, *a = va; \
1136 op1_neg <<= (8 * sizeof(TYPE) - 1); \
1137 for (i = 0; i < oprsz / sizeof(TYPE); i += segment) { \
1138 TYPE mm = m[H(i + idx)]; \
1139 for (j = 0; j < segment; j++) { \
1140 d[i + j] = TYPE##_muladd(n[i + j] ^ op1_neg, \
1141 mm, a[i + j], 0, stat); \
1144 clear_tail(d, oprsz, simd_maxsz(desc)); \
1147 DO_FMLA_IDX(gvec_fmla_idx_h
, float16
, H2
)
1148 DO_FMLA_IDX(gvec_fmla_idx_s
, float32
, H4
)
1149 DO_FMLA_IDX(gvec_fmla_idx_d
, float64
, )
1153 #define DO_SAT(NAME, WTYPE, TYPEN, TYPEM, OP, MIN, MAX) \
1154 void HELPER(NAME)(void *vd, void *vq, void *vn, void *vm, uint32_t desc) \
1156 intptr_t i, oprsz = simd_oprsz(desc); \
1157 TYPEN *d = vd, *n = vn; TYPEM *m = vm; \
1159 for (i = 0; i < oprsz / sizeof(TYPEN); i++) { \
1160 WTYPE dd = (WTYPE)n[i] OP m[i]; \
1164 } else if (dd > MAX) { \
1171 uint32_t *qc = vq; \
1174 clear_tail(d, oprsz, simd_maxsz(desc)); \
1177 DO_SAT(gvec_uqadd_b
, int, uint8_t, uint8_t, +, 0, UINT8_MAX
)
1178 DO_SAT(gvec_uqadd_h
, int, uint16_t, uint16_t, +, 0, UINT16_MAX
)
1179 DO_SAT(gvec_uqadd_s
, int64_t, uint32_t, uint32_t, +, 0, UINT32_MAX
)
1181 DO_SAT(gvec_sqadd_b
, int, int8_t, int8_t, +, INT8_MIN
, INT8_MAX
)
1182 DO_SAT(gvec_sqadd_h
, int, int16_t, int16_t, +, INT16_MIN
, INT16_MAX
)
1183 DO_SAT(gvec_sqadd_s
, int64_t, int32_t, int32_t, +, INT32_MIN
, INT32_MAX
)
1185 DO_SAT(gvec_uqsub_b
, int, uint8_t, uint8_t, -, 0, UINT8_MAX
)
1186 DO_SAT(gvec_uqsub_h
, int, uint16_t, uint16_t, -, 0, UINT16_MAX
)
1187 DO_SAT(gvec_uqsub_s
, int64_t, uint32_t, uint32_t, -, 0, UINT32_MAX
)
1189 DO_SAT(gvec_sqsub_b
, int, int8_t, int8_t, -, INT8_MIN
, INT8_MAX
)
1190 DO_SAT(gvec_sqsub_h
, int, int16_t, int16_t, -, INT16_MIN
, INT16_MAX
)
1191 DO_SAT(gvec_sqsub_s
, int64_t, int32_t, int32_t, -, INT32_MIN
, INT32_MAX
)
1195 void HELPER(gvec_uqadd_d
)(void *vd
, void *vq
, void *vn
,
1196 void *vm
, uint32_t desc
)
1198 intptr_t i
, oprsz
= simd_oprsz(desc
);
1199 uint64_t *d
= vd
, *n
= vn
, *m
= vm
;
1202 for (i
= 0; i
< oprsz
/ 8; i
++) {
1203 uint64_t nn
= n
[i
], mm
= m
[i
], dd
= nn
+ mm
;
1214 clear_tail(d
, oprsz
, simd_maxsz(desc
));
1217 void HELPER(gvec_uqsub_d
)(void *vd
, void *vq
, void *vn
,
1218 void *vm
, uint32_t desc
)
1220 intptr_t i
, oprsz
= simd_oprsz(desc
);
1221 uint64_t *d
= vd
, *n
= vn
, *m
= vm
;
1224 for (i
= 0; i
< oprsz
/ 8; i
++) {
1225 uint64_t nn
= n
[i
], mm
= m
[i
], dd
= nn
- mm
;
1236 clear_tail(d
, oprsz
, simd_maxsz(desc
));
1239 void HELPER(gvec_sqadd_d
)(void *vd
, void *vq
, void *vn
,
1240 void *vm
, uint32_t desc
)
1242 intptr_t i
, oprsz
= simd_oprsz(desc
);
1243 int64_t *d
= vd
, *n
= vn
, *m
= vm
;
1246 for (i
= 0; i
< oprsz
/ 8; i
++) {
1247 int64_t nn
= n
[i
], mm
= m
[i
], dd
= nn
+ mm
;
1248 if (((dd
^ nn
) & ~(nn
^ mm
)) & INT64_MIN
) {
1249 dd
= (nn
>> 63) ^ ~INT64_MIN
;
1258 clear_tail(d
, oprsz
, simd_maxsz(desc
));
1261 void HELPER(gvec_sqsub_d
)(void *vd
, void *vq
, void *vn
,
1262 void *vm
, uint32_t desc
)
1264 intptr_t i
, oprsz
= simd_oprsz(desc
);
1265 int64_t *d
= vd
, *n
= vn
, *m
= vm
;
1268 for (i
= 0; i
< oprsz
/ 8; i
++) {
1269 int64_t nn
= n
[i
], mm
= m
[i
], dd
= nn
- mm
;
1270 if (((dd
^ nn
) & (nn
^ mm
)) & INT64_MIN
) {
1271 dd
= (nn
>> 63) ^ ~INT64_MIN
;
1280 clear_tail(d
, oprsz
, simd_maxsz(desc
));
1284 #define DO_SRA(NAME, TYPE) \
1285 void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
1287 intptr_t i, oprsz = simd_oprsz(desc); \
1288 int shift = simd_data(desc); \
1289 TYPE *d = vd, *n = vn; \
1290 for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
1291 d[i] += n[i] >> shift; \
1293 clear_tail(d, oprsz, simd_maxsz(desc)); \
1296 DO_SRA(gvec_ssra_b
, int8_t)
1297 DO_SRA(gvec_ssra_h
, int16_t)
1298 DO_SRA(gvec_ssra_s
, int32_t)
1299 DO_SRA(gvec_ssra_d
, int64_t)
1301 DO_SRA(gvec_usra_b
, uint8_t)
1302 DO_SRA(gvec_usra_h
, uint16_t)
1303 DO_SRA(gvec_usra_s
, uint32_t)
1304 DO_SRA(gvec_usra_d
, uint64_t)
1308 #define DO_RSHR(NAME, TYPE) \
1309 void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
1311 intptr_t i, oprsz = simd_oprsz(desc); \
1312 int shift = simd_data(desc); \
1313 TYPE *d = vd, *n = vn; \
1314 for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
1315 TYPE tmp = n[i] >> (shift - 1); \
1316 d[i] = (tmp >> 1) + (tmp & 1); \
1318 clear_tail(d, oprsz, simd_maxsz(desc)); \
1321 DO_RSHR(gvec_srshr_b
, int8_t)
1322 DO_RSHR(gvec_srshr_h
, int16_t)
1323 DO_RSHR(gvec_srshr_s
, int32_t)
1324 DO_RSHR(gvec_srshr_d
, int64_t)
1326 DO_RSHR(gvec_urshr_b
, uint8_t)
1327 DO_RSHR(gvec_urshr_h
, uint16_t)
1328 DO_RSHR(gvec_urshr_s
, uint32_t)
1329 DO_RSHR(gvec_urshr_d
, uint64_t)
1333 #define DO_RSRA(NAME, TYPE) \
1334 void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
1336 intptr_t i, oprsz = simd_oprsz(desc); \
1337 int shift = simd_data(desc); \
1338 TYPE *d = vd, *n = vn; \
1339 for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
1340 TYPE tmp = n[i] >> (shift - 1); \
1341 d[i] += (tmp >> 1) + (tmp & 1); \
1343 clear_tail(d, oprsz, simd_maxsz(desc)); \
1346 DO_RSRA(gvec_srsra_b
, int8_t)
1347 DO_RSRA(gvec_srsra_h
, int16_t)
1348 DO_RSRA(gvec_srsra_s
, int32_t)
1349 DO_RSRA(gvec_srsra_d
, int64_t)
1351 DO_RSRA(gvec_ursra_b
, uint8_t)
1352 DO_RSRA(gvec_ursra_h
, uint16_t)
1353 DO_RSRA(gvec_ursra_s
, uint32_t)
1354 DO_RSRA(gvec_ursra_d
, uint64_t)
1358 #define DO_SRI(NAME, TYPE) \
1359 void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
1361 intptr_t i, oprsz = simd_oprsz(desc); \
1362 int shift = simd_data(desc); \
1363 TYPE *d = vd, *n = vn; \
1364 for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
1365 d[i] = deposit64(d[i], 0, sizeof(TYPE) * 8 - shift, n[i] >> shift); \
1367 clear_tail(d, oprsz, simd_maxsz(desc)); \
1370 DO_SRI(gvec_sri_b
, uint8_t)
1371 DO_SRI(gvec_sri_h
, uint16_t)
1372 DO_SRI(gvec_sri_s
, uint32_t)
1373 DO_SRI(gvec_sri_d
, uint64_t)
1377 #define DO_SLI(NAME, TYPE) \
1378 void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
1380 intptr_t i, oprsz = simd_oprsz(desc); \
1381 int shift = simd_data(desc); \
1382 TYPE *d = vd, *n = vn; \
1383 for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
1384 d[i] = deposit64(d[i], shift, sizeof(TYPE) * 8 - shift, n[i]); \
1386 clear_tail(d, oprsz, simd_maxsz(desc)); \
1389 DO_SLI(gvec_sli_b
, uint8_t)
1390 DO_SLI(gvec_sli_h
, uint16_t)
1391 DO_SLI(gvec_sli_s
, uint32_t)
1392 DO_SLI(gvec_sli_d
, uint64_t)
1397 * Convert float16 to float32, raising no exceptions and
1398 * preserving exceptional values, including SNaN.
1399 * This is effectively an unpack+repack operation.
1401 static float32
float16_to_float32_by_bits(uint32_t f16
, bool fz16
)
1403 const int f16_bias
= 15;
1404 const int f32_bias
= 127;
1405 uint32_t sign
= extract32(f16
, 15, 1);
1406 uint32_t exp
= extract32(f16
, 10, 5);
1407 uint32_t frac
= extract32(f16
, 0, 10);
1412 } else if (exp
== 0) {
1413 /* Zero or denormal. */
1419 * Denormal; these are all normal float32.
1420 * Shift the fraction so that the msb is at bit 11,
1421 * then remove bit 11 as the implicit bit of the
1422 * normalized float32. Note that we still go through
1423 * the shift for normal numbers below, to put the
1424 * float32 fraction at the right place.
1426 int shift
= clz32(frac
) - 21;
1427 frac
= (frac
<< shift
) & 0x3ff;
1428 exp
= f32_bias
- f16_bias
- shift
+ 1;
1432 /* Normal number; adjust the bias. */
1433 exp
+= f32_bias
- f16_bias
;
1439 return sign
| exp
| frac
;
1442 static uint64_t load4_f16(uint64_t *ptr
, int is_q
, int is_2
)
1445 * Branchless load of u32[0], u64[0], u32[1], or u64[1].
1446 * Load the 2nd qword iff is_q & is_2.
1447 * Shift to the 2nd dword iff !is_q & is_2.
1448 * For !is_q & !is_2, the upper bits of the result are garbage.
1450 return ptr
[is_q
& is_2
] >> ((is_2
& ~is_q
) << 5);
1454 * Note that FMLAL requires oprsz == 8 or oprsz == 16,
1455 * as there is not yet SVE versions that might use blocking.
1458 static void do_fmlal(float32
*d
, void *vn
, void *vm
, float_status
*fpst
,
1459 uint32_t desc
, bool fz16
)
1461 intptr_t i
, oprsz
= simd_oprsz(desc
);
1462 int is_s
= extract32(desc
, SIMD_DATA_SHIFT
, 1);
1463 int is_2
= extract32(desc
, SIMD_DATA_SHIFT
+ 1, 1);
1464 int is_q
= oprsz
== 16;
1467 /* Pre-load all of the f16 data, avoiding overlap issues. */
1468 n_4
= load4_f16(vn
, is_q
, is_2
);
1469 m_4
= load4_f16(vm
, is_q
, is_2
);
1471 /* Negate all inputs for FMLSL at once. */
1473 n_4
^= 0x8000800080008000ull
;
1476 for (i
= 0; i
< oprsz
/ 4; i
++) {
1477 float32 n_1
= float16_to_float32_by_bits(n_4
>> (i
* 16), fz16
);
1478 float32 m_1
= float16_to_float32_by_bits(m_4
>> (i
* 16), fz16
);
1479 d
[H4(i
)] = float32_muladd(n_1
, m_1
, d
[H4(i
)], 0, fpst
);
1481 clear_tail(d
, oprsz
, simd_maxsz(desc
));
1484 void HELPER(gvec_fmlal_a32
)(void *vd
, void *vn
, void *vm
,
1485 void *venv
, uint32_t desc
)
1487 CPUARMState
*env
= venv
;
1488 do_fmlal(vd
, vn
, vm
, &env
->vfp
.standard_fp_status
, desc
,
1489 get_flush_inputs_to_zero(&env
->vfp
.fp_status_f16
));
1492 void HELPER(gvec_fmlal_a64
)(void *vd
, void *vn
, void *vm
,
1493 void *venv
, uint32_t desc
)
1495 CPUARMState
*env
= venv
;
1496 do_fmlal(vd
, vn
, vm
, &env
->vfp
.fp_status
, desc
,
1497 get_flush_inputs_to_zero(&env
->vfp
.fp_status_f16
));
1500 static void do_fmlal_idx(float32
*d
, void *vn
, void *vm
, float_status
*fpst
,
1501 uint32_t desc
, bool fz16
)
1503 intptr_t i
, oprsz
= simd_oprsz(desc
);
1504 int is_s
= extract32(desc
, SIMD_DATA_SHIFT
, 1);
1505 int is_2
= extract32(desc
, SIMD_DATA_SHIFT
+ 1, 1);
1506 int index
= extract32(desc
, SIMD_DATA_SHIFT
+ 2, 3);
1507 int is_q
= oprsz
== 16;
1511 /* Pre-load all of the f16 data, avoiding overlap issues. */
1512 n_4
= load4_f16(vn
, is_q
, is_2
);
1514 /* Negate all inputs for FMLSL at once. */
1516 n_4
^= 0x8000800080008000ull
;
1519 m_1
= float16_to_float32_by_bits(((float16
*)vm
)[H2(index
)], fz16
);
1521 for (i
= 0; i
< oprsz
/ 4; i
++) {
1522 float32 n_1
= float16_to_float32_by_bits(n_4
>> (i
* 16), fz16
);
1523 d
[H4(i
)] = float32_muladd(n_1
, m_1
, d
[H4(i
)], 0, fpst
);
1525 clear_tail(d
, oprsz
, simd_maxsz(desc
));
1528 void HELPER(gvec_fmlal_idx_a32
)(void *vd
, void *vn
, void *vm
,
1529 void *venv
, uint32_t desc
)
1531 CPUARMState
*env
= venv
;
1532 do_fmlal_idx(vd
, vn
, vm
, &env
->vfp
.standard_fp_status
, desc
,
1533 get_flush_inputs_to_zero(&env
->vfp
.fp_status_f16
));
1536 void HELPER(gvec_fmlal_idx_a64
)(void *vd
, void *vn
, void *vm
,
1537 void *venv
, uint32_t desc
)
1539 CPUARMState
*env
= venv
;
1540 do_fmlal_idx(vd
, vn
, vm
, &env
->vfp
.fp_status
, desc
,
1541 get_flush_inputs_to_zero(&env
->vfp
.fp_status_f16
));
1544 void HELPER(gvec_sshl_b
)(void *vd
, void *vn
, void *vm
, uint32_t desc
)
1546 intptr_t i
, opr_sz
= simd_oprsz(desc
);
1547 int8_t *d
= vd
, *n
= vn
, *m
= vm
;
1549 for (i
= 0; i
< opr_sz
; ++i
) {
1558 res
= nn
>> (mm
> -8 ? -mm
: 7);
1562 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
1565 void HELPER(gvec_sshl_h
)(void *vd
, void *vn
, void *vm
, uint32_t desc
)
1567 intptr_t i
, opr_sz
= simd_oprsz(desc
);
1568 int16_t *d
= vd
, *n
= vn
, *m
= vm
;
1570 for (i
= 0; i
< opr_sz
/ 2; ++i
) {
1571 int8_t mm
= m
[i
]; /* only 8 bits of shift are significant */
1579 res
= nn
>> (mm
> -16 ? -mm
: 15);
1583 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
1586 void HELPER(gvec_ushl_b
)(void *vd
, void *vn
, void *vm
, uint32_t desc
)
1588 intptr_t i
, opr_sz
= simd_oprsz(desc
);
1589 uint8_t *d
= vd
, *n
= vn
, *m
= vm
;
1591 for (i
= 0; i
< opr_sz
; ++i
) {
1606 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
1609 void HELPER(gvec_ushl_h
)(void *vd
, void *vn
, void *vm
, uint32_t desc
)
1611 intptr_t i
, opr_sz
= simd_oprsz(desc
);
1612 uint16_t *d
= vd
, *n
= vn
, *m
= vm
;
1614 for (i
= 0; i
< opr_sz
/ 2; ++i
) {
1615 int8_t mm
= m
[i
]; /* only 8 bits of shift are significant */
1629 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
1633 * 8x8->8 polynomial multiply.
1635 * Polynomial multiplication is like integer multiplication except the
1636 * partial products are XORed, not added.
1638 * TODO: expose this as a generic vector operation, as it is a common
1639 * crypto building block.
1641 void HELPER(gvec_pmul_b
)(void *vd
, void *vn
, void *vm
, uint32_t desc
)
1643 intptr_t i
, j
, opr_sz
= simd_oprsz(desc
);
1644 uint64_t *d
= vd
, *n
= vn
, *m
= vm
;
1646 for (i
= 0; i
< opr_sz
/ 8; ++i
) {
1651 for (j
= 0; j
< 8; ++j
) {
1652 uint64_t mask
= (nn
& 0x0101010101010101ull
) * 0xff;
1654 mm
= (mm
<< 1) & 0xfefefefefefefefeull
;
1659 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
1663 * 64x64->128 polynomial multiply.
1664 * Because of the lanes are not accessed in strict columns,
1665 * this probably cannot be turned into a generic helper.
1667 void HELPER(gvec_pmull_q
)(void *vd
, void *vn
, void *vm
, uint32_t desc
)
1669 intptr_t i
, j
, opr_sz
= simd_oprsz(desc
);
1670 intptr_t hi
= simd_data(desc
);
1671 uint64_t *d
= vd
, *n
= vn
, *m
= vm
;
1673 for (i
= 0; i
< opr_sz
/ 8; i
+= 2) {
1674 uint64_t nn
= n
[i
+ hi
];
1675 uint64_t mm
= m
[i
+ hi
];
1679 /* Bit 0 can only influence the low 64-bit result. */
1684 for (j
= 1; j
< 64; ++j
) {
1685 uint64_t mask
= -((nn
>> j
) & 1);
1686 rlo
^= (mm
<< j
) & mask
;
1687 rhi
^= (mm
>> (64 - j
)) & mask
;
1692 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
1696 * 8x8->16 polynomial multiply.
1698 * The byte inputs are expanded to (or extracted from) half-words.
1699 * Note that neon and sve2 get the inputs from different positions.
1700 * This allows 4 bytes to be processed in parallel with uint64_t.
1703 static uint64_t expand_byte_to_half(uint64_t x
)
1705 return (x
& 0x000000ff)
1706 | ((x
& 0x0000ff00) << 8)
1707 | ((x
& 0x00ff0000) << 16)
1708 | ((x
& 0xff000000) << 24);
1711 static uint64_t pmull_h(uint64_t op1
, uint64_t op2
)
1713 uint64_t result
= 0;
1716 for (i
= 0; i
< 8; ++i
) {
1717 uint64_t mask
= (op1
& 0x0001000100010001ull
) * 0xffff;
1718 result
^= op2
& mask
;
1725 void HELPER(neon_pmull_h
)(void *vd
, void *vn
, void *vm
, uint32_t desc
)
1727 int hi
= simd_data(desc
);
1728 uint64_t *d
= vd
, *n
= vn
, *m
= vm
;
1729 uint64_t nn
= n
[hi
], mm
= m
[hi
];
1731 d
[0] = pmull_h(expand_byte_to_half(nn
), expand_byte_to_half(mm
));
1734 d
[1] = pmull_h(expand_byte_to_half(nn
), expand_byte_to_half(mm
));
1736 clear_tail(d
, 16, simd_maxsz(desc
));
1739 #ifdef TARGET_AARCH64
1740 void HELPER(sve2_pmull_h
)(void *vd
, void *vn
, void *vm
, uint32_t desc
)
1742 int shift
= simd_data(desc
) * 8;
1743 intptr_t i
, opr_sz
= simd_oprsz(desc
);
1744 uint64_t *d
= vd
, *n
= vn
, *m
= vm
;
1746 for (i
= 0; i
< opr_sz
/ 8; ++i
) {
1747 uint64_t nn
= (n
[i
] >> shift
) & 0x00ff00ff00ff00ffull
;
1748 uint64_t mm
= (m
[i
] >> shift
) & 0x00ff00ff00ff00ffull
;
1750 d
[i
] = pmull_h(nn
, mm
);
1755 #define DO_CMP0(NAME, TYPE, OP) \
1756 void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
1758 intptr_t i, opr_sz = simd_oprsz(desc); \
1759 for (i = 0; i < opr_sz; i += sizeof(TYPE)) { \
1760 TYPE nn = *(TYPE *)(vn + i); \
1761 *(TYPE *)(vd + i) = -(nn OP 0); \
1763 clear_tail(vd, opr_sz, simd_maxsz(desc)); \
1766 DO_CMP0(gvec_ceq0_b
, int8_t, ==)
1767 DO_CMP0(gvec_clt0_b
, int8_t, <)
1768 DO_CMP0(gvec_cle0_b
, int8_t, <=)
1769 DO_CMP0(gvec_cgt0_b
, int8_t, >)
1770 DO_CMP0(gvec_cge0_b
, int8_t, >=)
1772 DO_CMP0(gvec_ceq0_h
, int16_t, ==)
1773 DO_CMP0(gvec_clt0_h
, int16_t, <)
1774 DO_CMP0(gvec_cle0_h
, int16_t, <=)
1775 DO_CMP0(gvec_cgt0_h
, int16_t, >)
1776 DO_CMP0(gvec_cge0_h
, int16_t, >=)
1780 #define DO_ABD(NAME, TYPE) \
1781 void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
1783 intptr_t i, opr_sz = simd_oprsz(desc); \
1784 TYPE *d = vd, *n = vn, *m = vm; \
1786 for (i = 0; i < opr_sz / sizeof(TYPE); ++i) { \
1787 d[i] = n[i] < m[i] ? m[i] - n[i] : n[i] - m[i]; \
1789 clear_tail(d, opr_sz, simd_maxsz(desc)); \
1792 DO_ABD(gvec_sabd_b
, int8_t)
1793 DO_ABD(gvec_sabd_h
, int16_t)
1794 DO_ABD(gvec_sabd_s
, int32_t)
1795 DO_ABD(gvec_sabd_d
, int64_t)
1797 DO_ABD(gvec_uabd_b
, uint8_t)
1798 DO_ABD(gvec_uabd_h
, uint16_t)
1799 DO_ABD(gvec_uabd_s
, uint32_t)
1800 DO_ABD(gvec_uabd_d
, uint64_t)
1804 #define DO_ABA(NAME, TYPE) \
1805 void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
1807 intptr_t i, opr_sz = simd_oprsz(desc); \
1808 TYPE *d = vd, *n = vn, *m = vm; \
1810 for (i = 0; i < opr_sz / sizeof(TYPE); ++i) { \
1811 d[i] += n[i] < m[i] ? m[i] - n[i] : n[i] - m[i]; \
1813 clear_tail(d, opr_sz, simd_maxsz(desc)); \
1816 DO_ABA(gvec_saba_b
, int8_t)
1817 DO_ABA(gvec_saba_h
, int16_t)
1818 DO_ABA(gvec_saba_s
, int32_t)
1819 DO_ABA(gvec_saba_d
, int64_t)
1821 DO_ABA(gvec_uaba_b
, uint8_t)
1822 DO_ABA(gvec_uaba_h
, uint16_t)
1823 DO_ABA(gvec_uaba_s
, uint32_t)
1824 DO_ABA(gvec_uaba_d
, uint64_t)
1828 #define DO_NEON_PAIRWISE(NAME, OP) \
1829 void HELPER(NAME##s)(void *vd, void *vn, void *vm, \
1830 void *stat, uint32_t oprsz) \
1832 float_status *fpst = stat; \
1838 /* Read all inputs before writing outputs in case vm == vd */ \
1839 r0 = float32_##OP(n[H4(0)], n[H4(1)], fpst); \
1840 r1 = float32_##OP(m[H4(0)], m[H4(1)], fpst); \
1846 void HELPER(NAME##h)(void *vd, void *vn, void *vm, \
1847 void *stat, uint32_t oprsz) \
1849 float_status *fpst = stat; \
1853 float16 r0, r1, r2, r3; \
1855 /* Read all inputs before writing outputs in case vm == vd */ \
1856 r0 = float16_##OP(n[H2(0)], n[H2(1)], fpst); \
1857 r1 = float16_##OP(n[H2(2)], n[H2(3)], fpst); \
1858 r2 = float16_##OP(m[H2(0)], m[H2(1)], fpst); \
1859 r3 = float16_##OP(m[H2(2)], m[H2(3)], fpst); \
1867 DO_NEON_PAIRWISE(neon_padd
, add
)
1868 DO_NEON_PAIRWISE(neon_pmax
, max
)
1869 DO_NEON_PAIRWISE(neon_pmin
, min
)
1871 #undef DO_NEON_PAIRWISE
1873 #define DO_VCVT_FIXED(NAME, FUNC, TYPE) \
1874 void HELPER(NAME)(void *vd, void *vn, void *stat, uint32_t desc) \
1876 intptr_t i, oprsz = simd_oprsz(desc); \
1877 int shift = simd_data(desc); \
1878 TYPE *d = vd, *n = vn; \
1879 float_status *fpst = stat; \
1880 for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
1881 d[i] = FUNC(n[i], shift, fpst); \
1883 clear_tail(d, oprsz, simd_maxsz(desc)); \
1886 DO_VCVT_FIXED(gvec_vcvt_sf
, helper_vfp_sltos
, uint32_t)
1887 DO_VCVT_FIXED(gvec_vcvt_uf
, helper_vfp_ultos
, uint32_t)
1888 DO_VCVT_FIXED(gvec_vcvt_fs
, helper_vfp_tosls_round_to_zero
, uint32_t)
1889 DO_VCVT_FIXED(gvec_vcvt_fu
, helper_vfp_touls_round_to_zero
, uint32_t)
1890 DO_VCVT_FIXED(gvec_vcvt_sh
, helper_vfp_shtoh
, uint16_t)
1891 DO_VCVT_FIXED(gvec_vcvt_uh
, helper_vfp_uhtoh
, uint16_t)
1892 DO_VCVT_FIXED(gvec_vcvt_hs
, helper_vfp_toshh_round_to_zero
, uint16_t)
1893 DO_VCVT_FIXED(gvec_vcvt_hu
, helper_vfp_touhh_round_to_zero
, uint16_t)
1895 #undef DO_VCVT_FIXED
1897 #define DO_VCVT_RMODE(NAME, FUNC, TYPE) \
1898 void HELPER(NAME)(void *vd, void *vn, void *stat, uint32_t desc) \
1900 float_status *fpst = stat; \
1901 intptr_t i, oprsz = simd_oprsz(desc); \
1902 uint32_t rmode = simd_data(desc); \
1903 uint32_t prev_rmode = get_float_rounding_mode(fpst); \
1904 TYPE *d = vd, *n = vn; \
1905 set_float_rounding_mode(rmode, fpst); \
1906 for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
1907 d[i] = FUNC(n[i], 0, fpst); \
1909 set_float_rounding_mode(prev_rmode, fpst); \
1910 clear_tail(d, oprsz, simd_maxsz(desc)); \
1913 DO_VCVT_RMODE(gvec_vcvt_rm_ss
, helper_vfp_tosls
, uint32_t)
1914 DO_VCVT_RMODE(gvec_vcvt_rm_us
, helper_vfp_touls
, uint32_t)
1915 DO_VCVT_RMODE(gvec_vcvt_rm_sh
, helper_vfp_toshh
, uint16_t)
1916 DO_VCVT_RMODE(gvec_vcvt_rm_uh
, helper_vfp_touhh
, uint16_t)
1918 #undef DO_VCVT_RMODE
1920 #define DO_VRINT_RMODE(NAME, FUNC, TYPE) \
1921 void HELPER(NAME)(void *vd, void *vn, void *stat, uint32_t desc) \
1923 float_status *fpst = stat; \
1924 intptr_t i, oprsz = simd_oprsz(desc); \
1925 uint32_t rmode = simd_data(desc); \
1926 uint32_t prev_rmode = get_float_rounding_mode(fpst); \
1927 TYPE *d = vd, *n = vn; \
1928 set_float_rounding_mode(rmode, fpst); \
1929 for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
1930 d[i] = FUNC(n[i], fpst); \
1932 set_float_rounding_mode(prev_rmode, fpst); \
1933 clear_tail(d, oprsz, simd_maxsz(desc)); \
1936 DO_VRINT_RMODE(gvec_vrint_rm_h
, helper_rinth
, uint16_t)
1937 DO_VRINT_RMODE(gvec_vrint_rm_s
, helper_rints
, uint32_t)
1939 #undef DO_VRINT_RMODE
1941 #ifdef TARGET_AARCH64
1942 void HELPER(simd_tblx
)(void *vd
, void *vm
, void *venv
, uint32_t desc
)
1944 const uint8_t *indices
= vm
;
1945 CPUARMState
*env
= venv
;
1946 size_t oprsz
= simd_oprsz(desc
);
1947 uint32_t rn
= extract32(desc
, SIMD_DATA_SHIFT
, 5);
1948 bool is_tbx
= extract32(desc
, SIMD_DATA_SHIFT
+ 5, 1);
1949 uint32_t table_len
= desc
>> (SIMD_DATA_SHIFT
+ 6);
1956 * We must construct the final result in a temp, lest the output
1957 * overlaps the input table. For TBL, begin with zero; for TBX,
1958 * begin with the original register contents. Note that we always
1959 * copy 16 bytes here to avoid an extra branch; clearing the high
1960 * bits of the register for oprsz == 8 is handled below.
1963 memcpy(&result
, vd
, 16);
1965 memset(&result
, 0, 16);
1968 for (size_t i
= 0; i
< oprsz
; ++i
) {
1969 uint32_t index
= indices
[H1(i
)];
1971 if (index
< table_len
) {
1973 * Convert index (a byte offset into the virtual table
1974 * which is a series of 128-bit vectors concatenated)
1975 * into the correct register element, bearing in mind
1976 * that the table can wrap around from V31 to V0.
1978 const uint8_t *table
= (const uint8_t *)
1979 aa64_vfp_qreg(env
, (rn
+ (index
>> 4)) % 32);
1980 result
.b
[H1(i
)] = table
[H1(index
% 16)];
1984 memcpy(vd
, &result
, 16);
1985 clear_tail(vd
, oprsz
, simd_maxsz(desc
));