2 * ARM AdvSIMD / SVE Vector Operations
4 * Copyright (c) 2018 Linaro
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "exec/helper-proto.h"
23 #include "tcg/tcg-gvec-desc.h"
24 #include "fpu/softfloat.h"
27 /* Note that vector data is stored in host-endian 64-bit chunks,
28 so addressing units smaller than that needs a host-endian fixup. */
29 #ifdef HOST_WORDS_BIGENDIAN
30 #define H1(x) ((x) ^ 7)
31 #define H2(x) ((x) ^ 3)
32 #define H4(x) ((x) ^ 1)
39 static void clear_tail(void *vd
, uintptr_t opr_sz
, uintptr_t max_sz
)
41 uint64_t *d
= vd
+ opr_sz
;
44 for (i
= opr_sz
; i
< max_sz
; i
+= 8) {
49 /* Signed saturating rounding doubling multiply-accumulate high half, 16-bit */
50 static int16_t inl_qrdmlah_s16(int16_t src1
, int16_t src2
,
51 int16_t src3
, uint32_t *sat
)
54 * = ((a3 << 16) + ((e1 * e2) << 1) + (1 << 15)) >> 16
55 * = ((a3 << 15) + (e1 * e2) + (1 << 14)) >> 15
57 int32_t ret
= (int32_t)src1
* src2
;
58 ret
= ((int32_t)src3
<< 15) + ret
+ (1 << 14);
60 if (ret
!= (int16_t)ret
) {
62 ret
= (ret
< 0 ? -0x8000 : 0x7fff);
67 uint32_t HELPER(neon_qrdmlah_s16
)(CPUARMState
*env
, uint32_t src1
,
68 uint32_t src2
, uint32_t src3
)
70 uint32_t *sat
= &env
->vfp
.qc
[0];
71 uint16_t e1
= inl_qrdmlah_s16(src1
, src2
, src3
, sat
);
72 uint16_t e2
= inl_qrdmlah_s16(src1
>> 16, src2
>> 16, src3
>> 16, sat
);
73 return deposit32(e1
, 16, 16, e2
);
76 void HELPER(gvec_qrdmlah_s16
)(void *vd
, void *vn
, void *vm
,
77 void *vq
, uint32_t desc
)
79 uintptr_t opr_sz
= simd_oprsz(desc
);
85 for (i
= 0; i
< opr_sz
/ 2; ++i
) {
86 d
[i
] = inl_qrdmlah_s16(n
[i
], m
[i
], d
[i
], vq
);
88 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
91 /* Signed saturating rounding doubling multiply-subtract high half, 16-bit */
92 static int16_t inl_qrdmlsh_s16(int16_t src1
, int16_t src2
,
93 int16_t src3
, uint32_t *sat
)
95 /* Similarly, using subtraction:
96 * = ((a3 << 16) - ((e1 * e2) << 1) + (1 << 15)) >> 16
97 * = ((a3 << 15) - (e1 * e2) + (1 << 14)) >> 15
99 int32_t ret
= (int32_t)src1
* src2
;
100 ret
= ((int32_t)src3
<< 15) - ret
+ (1 << 14);
102 if (ret
!= (int16_t)ret
) {
104 ret
= (ret
< 0 ? -0x8000 : 0x7fff);
109 uint32_t HELPER(neon_qrdmlsh_s16
)(CPUARMState
*env
, uint32_t src1
,
110 uint32_t src2
, uint32_t src3
)
112 uint32_t *sat
= &env
->vfp
.qc
[0];
113 uint16_t e1
= inl_qrdmlsh_s16(src1
, src2
, src3
, sat
);
114 uint16_t e2
= inl_qrdmlsh_s16(src1
>> 16, src2
>> 16, src3
>> 16, sat
);
115 return deposit32(e1
, 16, 16, e2
);
118 void HELPER(gvec_qrdmlsh_s16
)(void *vd
, void *vn
, void *vm
,
119 void *vq
, uint32_t desc
)
121 uintptr_t opr_sz
= simd_oprsz(desc
);
127 for (i
= 0; i
< opr_sz
/ 2; ++i
) {
128 d
[i
] = inl_qrdmlsh_s16(n
[i
], m
[i
], d
[i
], vq
);
130 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
133 /* Signed saturating rounding doubling multiply-accumulate high half, 32-bit */
134 static int32_t inl_qrdmlah_s32(int32_t src1
, int32_t src2
,
135 int32_t src3
, uint32_t *sat
)
137 /* Simplify similarly to int_qrdmlah_s16 above. */
138 int64_t ret
= (int64_t)src1
* src2
;
139 ret
= ((int64_t)src3
<< 31) + ret
+ (1 << 30);
141 if (ret
!= (int32_t)ret
) {
143 ret
= (ret
< 0 ? INT32_MIN
: INT32_MAX
);
148 uint32_t HELPER(neon_qrdmlah_s32
)(CPUARMState
*env
, int32_t src1
,
149 int32_t src2
, int32_t src3
)
151 uint32_t *sat
= &env
->vfp
.qc
[0];
152 return inl_qrdmlah_s32(src1
, src2
, src3
, sat
);
155 void HELPER(gvec_qrdmlah_s32
)(void *vd
, void *vn
, void *vm
,
156 void *vq
, uint32_t desc
)
158 uintptr_t opr_sz
= simd_oprsz(desc
);
164 for (i
= 0; i
< opr_sz
/ 4; ++i
) {
165 d
[i
] = inl_qrdmlah_s32(n
[i
], m
[i
], d
[i
], vq
);
167 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
170 /* Signed saturating rounding doubling multiply-subtract high half, 32-bit */
171 static int32_t inl_qrdmlsh_s32(int32_t src1
, int32_t src2
,
172 int32_t src3
, uint32_t *sat
)
174 /* Simplify similarly to int_qrdmlsh_s16 above. */
175 int64_t ret
= (int64_t)src1
* src2
;
176 ret
= ((int64_t)src3
<< 31) - ret
+ (1 << 30);
178 if (ret
!= (int32_t)ret
) {
180 ret
= (ret
< 0 ? INT32_MIN
: INT32_MAX
);
185 uint32_t HELPER(neon_qrdmlsh_s32
)(CPUARMState
*env
, int32_t src1
,
186 int32_t src2
, int32_t src3
)
188 uint32_t *sat
= &env
->vfp
.qc
[0];
189 return inl_qrdmlsh_s32(src1
, src2
, src3
, sat
);
192 void HELPER(gvec_qrdmlsh_s32
)(void *vd
, void *vn
, void *vm
,
193 void *vq
, uint32_t desc
)
195 uintptr_t opr_sz
= simd_oprsz(desc
);
201 for (i
= 0; i
< opr_sz
/ 4; ++i
) {
202 d
[i
] = inl_qrdmlsh_s32(n
[i
], m
[i
], d
[i
], vq
);
204 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
207 /* Integer 8 and 16-bit dot-product.
209 * Note that for the loops herein, host endianness does not matter
210 * with respect to the ordering of data within the 64-bit lanes.
211 * All elements are treated equally, no matter where they are.
214 void HELPER(gvec_sdot_b
)(void *vd
, void *vn
, void *vm
, uint32_t desc
)
216 intptr_t i
, opr_sz
= simd_oprsz(desc
);
218 int8_t *n
= vn
, *m
= vm
;
220 for (i
= 0; i
< opr_sz
/ 4; ++i
) {
221 d
[i
] += n
[i
* 4 + 0] * m
[i
* 4 + 0]
222 + n
[i
* 4 + 1] * m
[i
* 4 + 1]
223 + n
[i
* 4 + 2] * m
[i
* 4 + 2]
224 + n
[i
* 4 + 3] * m
[i
* 4 + 3];
226 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
229 void HELPER(gvec_udot_b
)(void *vd
, void *vn
, void *vm
, uint32_t desc
)
231 intptr_t i
, opr_sz
= simd_oprsz(desc
);
233 uint8_t *n
= vn
, *m
= vm
;
235 for (i
= 0; i
< opr_sz
/ 4; ++i
) {
236 d
[i
] += n
[i
* 4 + 0] * m
[i
* 4 + 0]
237 + n
[i
* 4 + 1] * m
[i
* 4 + 1]
238 + n
[i
* 4 + 2] * m
[i
* 4 + 2]
239 + n
[i
* 4 + 3] * m
[i
* 4 + 3];
241 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
244 void HELPER(gvec_sdot_h
)(void *vd
, void *vn
, void *vm
, uint32_t desc
)
246 intptr_t i
, opr_sz
= simd_oprsz(desc
);
248 int16_t *n
= vn
, *m
= vm
;
250 for (i
= 0; i
< opr_sz
/ 8; ++i
) {
251 d
[i
] += (int64_t)n
[i
* 4 + 0] * m
[i
* 4 + 0]
252 + (int64_t)n
[i
* 4 + 1] * m
[i
* 4 + 1]
253 + (int64_t)n
[i
* 4 + 2] * m
[i
* 4 + 2]
254 + (int64_t)n
[i
* 4 + 3] * m
[i
* 4 + 3];
256 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
259 void HELPER(gvec_udot_h
)(void *vd
, void *vn
, void *vm
, uint32_t desc
)
261 intptr_t i
, opr_sz
= simd_oprsz(desc
);
263 uint16_t *n
= vn
, *m
= vm
;
265 for (i
= 0; i
< opr_sz
/ 8; ++i
) {
266 d
[i
] += (uint64_t)n
[i
* 4 + 0] * m
[i
* 4 + 0]
267 + (uint64_t)n
[i
* 4 + 1] * m
[i
* 4 + 1]
268 + (uint64_t)n
[i
* 4 + 2] * m
[i
* 4 + 2]
269 + (uint64_t)n
[i
* 4 + 3] * m
[i
* 4 + 3];
271 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
274 void HELPER(gvec_sdot_idx_b
)(void *vd
, void *vn
, void *vm
, uint32_t desc
)
276 intptr_t i
, segend
, opr_sz
= simd_oprsz(desc
), opr_sz_4
= opr_sz
/ 4;
277 intptr_t index
= simd_data(desc
);
280 int8_t *m_indexed
= (int8_t *)vm
+ index
* 4;
282 /* Notice the special case of opr_sz == 8, from aa64/aa32 advsimd.
283 * Otherwise opr_sz is a multiple of 16.
285 segend
= MIN(4, opr_sz_4
);
288 int8_t m0
= m_indexed
[i
* 4 + 0];
289 int8_t m1
= m_indexed
[i
* 4 + 1];
290 int8_t m2
= m_indexed
[i
* 4 + 2];
291 int8_t m3
= m_indexed
[i
* 4 + 3];
294 d
[i
] += n
[i
* 4 + 0] * m0
298 } while (++i
< segend
);
300 } while (i
< opr_sz_4
);
302 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
305 void HELPER(gvec_udot_idx_b
)(void *vd
, void *vn
, void *vm
, uint32_t desc
)
307 intptr_t i
, segend
, opr_sz
= simd_oprsz(desc
), opr_sz_4
= opr_sz
/ 4;
308 intptr_t index
= simd_data(desc
);
311 uint8_t *m_indexed
= (uint8_t *)vm
+ index
* 4;
313 /* Notice the special case of opr_sz == 8, from aa64/aa32 advsimd.
314 * Otherwise opr_sz is a multiple of 16.
316 segend
= MIN(4, opr_sz_4
);
319 uint8_t m0
= m_indexed
[i
* 4 + 0];
320 uint8_t m1
= m_indexed
[i
* 4 + 1];
321 uint8_t m2
= m_indexed
[i
* 4 + 2];
322 uint8_t m3
= m_indexed
[i
* 4 + 3];
325 d
[i
] += n
[i
* 4 + 0] * m0
329 } while (++i
< segend
);
331 } while (i
< opr_sz_4
);
333 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
336 void HELPER(gvec_sdot_idx_h
)(void *vd
, void *vn
, void *vm
, uint32_t desc
)
338 intptr_t i
, opr_sz
= simd_oprsz(desc
), opr_sz_8
= opr_sz
/ 8;
339 intptr_t index
= simd_data(desc
);
342 int16_t *m_indexed
= (int16_t *)vm
+ index
* 4;
344 /* This is supported by SVE only, so opr_sz is always a multiple of 16.
345 * Process the entire segment all at once, writing back the results
346 * only after we've consumed all of the inputs.
348 for (i
= 0; i
< opr_sz_8
; i
+= 2) {
351 d0
= n
[i
* 4 + 0] * (int64_t)m_indexed
[i
* 4 + 0];
352 d0
+= n
[i
* 4 + 1] * (int64_t)m_indexed
[i
* 4 + 1];
353 d0
+= n
[i
* 4 + 2] * (int64_t)m_indexed
[i
* 4 + 2];
354 d0
+= n
[i
* 4 + 3] * (int64_t)m_indexed
[i
* 4 + 3];
355 d1
= n
[i
* 4 + 4] * (int64_t)m_indexed
[i
* 4 + 0];
356 d1
+= n
[i
* 4 + 5] * (int64_t)m_indexed
[i
* 4 + 1];
357 d1
+= n
[i
* 4 + 6] * (int64_t)m_indexed
[i
* 4 + 2];
358 d1
+= n
[i
* 4 + 7] * (int64_t)m_indexed
[i
* 4 + 3];
364 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
367 void HELPER(gvec_udot_idx_h
)(void *vd
, void *vn
, void *vm
, uint32_t desc
)
369 intptr_t i
, opr_sz
= simd_oprsz(desc
), opr_sz_8
= opr_sz
/ 8;
370 intptr_t index
= simd_data(desc
);
373 uint16_t *m_indexed
= (uint16_t *)vm
+ index
* 4;
375 /* This is supported by SVE only, so opr_sz is always a multiple of 16.
376 * Process the entire segment all at once, writing back the results
377 * only after we've consumed all of the inputs.
379 for (i
= 0; i
< opr_sz_8
; i
+= 2) {
382 d0
= n
[i
* 4 + 0] * (uint64_t)m_indexed
[i
* 4 + 0];
383 d0
+= n
[i
* 4 + 1] * (uint64_t)m_indexed
[i
* 4 + 1];
384 d0
+= n
[i
* 4 + 2] * (uint64_t)m_indexed
[i
* 4 + 2];
385 d0
+= n
[i
* 4 + 3] * (uint64_t)m_indexed
[i
* 4 + 3];
386 d1
= n
[i
* 4 + 4] * (uint64_t)m_indexed
[i
* 4 + 0];
387 d1
+= n
[i
* 4 + 5] * (uint64_t)m_indexed
[i
* 4 + 1];
388 d1
+= n
[i
* 4 + 6] * (uint64_t)m_indexed
[i
* 4 + 2];
389 d1
+= n
[i
* 4 + 7] * (uint64_t)m_indexed
[i
* 4 + 3];
395 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
398 void HELPER(gvec_fcaddh
)(void *vd
, void *vn
, void *vm
,
399 void *vfpst
, uint32_t desc
)
401 uintptr_t opr_sz
= simd_oprsz(desc
);
405 float_status
*fpst
= vfpst
;
406 uint32_t neg_real
= extract32(desc
, SIMD_DATA_SHIFT
, 1);
407 uint32_t neg_imag
= neg_real
^ 1;
410 /* Shift boolean to the sign bit so we can xor to negate. */
414 for (i
= 0; i
< opr_sz
/ 2; i
+= 2) {
415 float16 e0
= n
[H2(i
)];
416 float16 e1
= m
[H2(i
+ 1)] ^ neg_imag
;
417 float16 e2
= n
[H2(i
+ 1)];
418 float16 e3
= m
[H2(i
)] ^ neg_real
;
420 d
[H2(i
)] = float16_add(e0
, e1
, fpst
);
421 d
[H2(i
+ 1)] = float16_add(e2
, e3
, fpst
);
423 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
426 void HELPER(gvec_fcadds
)(void *vd
, void *vn
, void *vm
,
427 void *vfpst
, uint32_t desc
)
429 uintptr_t opr_sz
= simd_oprsz(desc
);
433 float_status
*fpst
= vfpst
;
434 uint32_t neg_real
= extract32(desc
, SIMD_DATA_SHIFT
, 1);
435 uint32_t neg_imag
= neg_real
^ 1;
438 /* Shift boolean to the sign bit so we can xor to negate. */
442 for (i
= 0; i
< opr_sz
/ 4; i
+= 2) {
443 float32 e0
= n
[H4(i
)];
444 float32 e1
= m
[H4(i
+ 1)] ^ neg_imag
;
445 float32 e2
= n
[H4(i
+ 1)];
446 float32 e3
= m
[H4(i
)] ^ neg_real
;
448 d
[H4(i
)] = float32_add(e0
, e1
, fpst
);
449 d
[H4(i
+ 1)] = float32_add(e2
, e3
, fpst
);
451 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
454 void HELPER(gvec_fcaddd
)(void *vd
, void *vn
, void *vm
,
455 void *vfpst
, uint32_t desc
)
457 uintptr_t opr_sz
= simd_oprsz(desc
);
461 float_status
*fpst
= vfpst
;
462 uint64_t neg_real
= extract64(desc
, SIMD_DATA_SHIFT
, 1);
463 uint64_t neg_imag
= neg_real
^ 1;
466 /* Shift boolean to the sign bit so we can xor to negate. */
470 for (i
= 0; i
< opr_sz
/ 8; i
+= 2) {
472 float64 e1
= m
[i
+ 1] ^ neg_imag
;
473 float64 e2
= n
[i
+ 1];
474 float64 e3
= m
[i
] ^ neg_real
;
476 d
[i
] = float64_add(e0
, e1
, fpst
);
477 d
[i
+ 1] = float64_add(e2
, e3
, fpst
);
479 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
482 void HELPER(gvec_fcmlah
)(void *vd
, void *vn
, void *vm
,
483 void *vfpst
, uint32_t desc
)
485 uintptr_t opr_sz
= simd_oprsz(desc
);
489 float_status
*fpst
= vfpst
;
490 intptr_t flip
= extract32(desc
, SIMD_DATA_SHIFT
, 1);
491 uint32_t neg_imag
= extract32(desc
, SIMD_DATA_SHIFT
+ 1, 1);
492 uint32_t neg_real
= flip
^ neg_imag
;
495 /* Shift boolean to the sign bit so we can xor to negate. */
499 for (i
= 0; i
< opr_sz
/ 2; i
+= 2) {
500 float16 e2
= n
[H2(i
+ flip
)];
501 float16 e1
= m
[H2(i
+ flip
)] ^ neg_real
;
503 float16 e3
= m
[H2(i
+ 1 - flip
)] ^ neg_imag
;
505 d
[H2(i
)] = float16_muladd(e2
, e1
, d
[H2(i
)], 0, fpst
);
506 d
[H2(i
+ 1)] = float16_muladd(e4
, e3
, d
[H2(i
+ 1)], 0, fpst
);
508 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
511 void HELPER(gvec_fcmlah_idx
)(void *vd
, void *vn
, void *vm
,
512 void *vfpst
, uint32_t desc
)
514 uintptr_t opr_sz
= simd_oprsz(desc
);
518 float_status
*fpst
= vfpst
;
519 intptr_t flip
= extract32(desc
, SIMD_DATA_SHIFT
, 1);
520 uint32_t neg_imag
= extract32(desc
, SIMD_DATA_SHIFT
+ 1, 1);
521 intptr_t index
= extract32(desc
, SIMD_DATA_SHIFT
+ 2, 2);
522 uint32_t neg_real
= flip
^ neg_imag
;
523 intptr_t elements
= opr_sz
/ sizeof(float16
);
524 intptr_t eltspersegment
= 16 / sizeof(float16
);
527 /* Shift boolean to the sign bit so we can xor to negate. */
531 for (i
= 0; i
< elements
; i
+= eltspersegment
) {
532 float16 mr
= m
[H2(i
+ 2 * index
+ 0)];
533 float16 mi
= m
[H2(i
+ 2 * index
+ 1)];
534 float16 e1
= neg_real
^ (flip
? mi
: mr
);
535 float16 e3
= neg_imag
^ (flip
? mr
: mi
);
537 for (j
= i
; j
< i
+ eltspersegment
; j
+= 2) {
538 float16 e2
= n
[H2(j
+ flip
)];
541 d
[H2(j
)] = float16_muladd(e2
, e1
, d
[H2(j
)], 0, fpst
);
542 d
[H2(j
+ 1)] = float16_muladd(e4
, e3
, d
[H2(j
+ 1)], 0, fpst
);
545 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
548 void HELPER(gvec_fcmlas
)(void *vd
, void *vn
, void *vm
,
549 void *vfpst
, uint32_t desc
)
551 uintptr_t opr_sz
= simd_oprsz(desc
);
555 float_status
*fpst
= vfpst
;
556 intptr_t flip
= extract32(desc
, SIMD_DATA_SHIFT
, 1);
557 uint32_t neg_imag
= extract32(desc
, SIMD_DATA_SHIFT
+ 1, 1);
558 uint32_t neg_real
= flip
^ neg_imag
;
561 /* Shift boolean to the sign bit so we can xor to negate. */
565 for (i
= 0; i
< opr_sz
/ 4; i
+= 2) {
566 float32 e2
= n
[H4(i
+ flip
)];
567 float32 e1
= m
[H4(i
+ flip
)] ^ neg_real
;
569 float32 e3
= m
[H4(i
+ 1 - flip
)] ^ neg_imag
;
571 d
[H4(i
)] = float32_muladd(e2
, e1
, d
[H4(i
)], 0, fpst
);
572 d
[H4(i
+ 1)] = float32_muladd(e4
, e3
, d
[H4(i
+ 1)], 0, fpst
);
574 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
577 void HELPER(gvec_fcmlas_idx
)(void *vd
, void *vn
, void *vm
,
578 void *vfpst
, uint32_t desc
)
580 uintptr_t opr_sz
= simd_oprsz(desc
);
584 float_status
*fpst
= vfpst
;
585 intptr_t flip
= extract32(desc
, SIMD_DATA_SHIFT
, 1);
586 uint32_t neg_imag
= extract32(desc
, SIMD_DATA_SHIFT
+ 1, 1);
587 intptr_t index
= extract32(desc
, SIMD_DATA_SHIFT
+ 2, 2);
588 uint32_t neg_real
= flip
^ neg_imag
;
589 intptr_t elements
= opr_sz
/ sizeof(float32
);
590 intptr_t eltspersegment
= 16 / sizeof(float32
);
593 /* Shift boolean to the sign bit so we can xor to negate. */
597 for (i
= 0; i
< elements
; i
+= eltspersegment
) {
598 float32 mr
= m
[H4(i
+ 2 * index
+ 0)];
599 float32 mi
= m
[H4(i
+ 2 * index
+ 1)];
600 float32 e1
= neg_real
^ (flip
? mi
: mr
);
601 float32 e3
= neg_imag
^ (flip
? mr
: mi
);
603 for (j
= i
; j
< i
+ eltspersegment
; j
+= 2) {
604 float32 e2
= n
[H4(j
+ flip
)];
607 d
[H4(j
)] = float32_muladd(e2
, e1
, d
[H4(j
)], 0, fpst
);
608 d
[H4(j
+ 1)] = float32_muladd(e4
, e3
, d
[H4(j
+ 1)], 0, fpst
);
611 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
614 void HELPER(gvec_fcmlad
)(void *vd
, void *vn
, void *vm
,
615 void *vfpst
, uint32_t desc
)
617 uintptr_t opr_sz
= simd_oprsz(desc
);
621 float_status
*fpst
= vfpst
;
622 intptr_t flip
= extract32(desc
, SIMD_DATA_SHIFT
, 1);
623 uint64_t neg_imag
= extract32(desc
, SIMD_DATA_SHIFT
+ 1, 1);
624 uint64_t neg_real
= flip
^ neg_imag
;
627 /* Shift boolean to the sign bit so we can xor to negate. */
631 for (i
= 0; i
< opr_sz
/ 8; i
+= 2) {
632 float64 e2
= n
[i
+ flip
];
633 float64 e1
= m
[i
+ flip
] ^ neg_real
;
635 float64 e3
= m
[i
+ 1 - flip
] ^ neg_imag
;
637 d
[i
] = float64_muladd(e2
, e1
, d
[i
], 0, fpst
);
638 d
[i
+ 1] = float64_muladd(e4
, e3
, d
[i
+ 1], 0, fpst
);
640 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
643 #define DO_2OP(NAME, FUNC, TYPE) \
644 void HELPER(NAME)(void *vd, void *vn, void *stat, uint32_t desc) \
646 intptr_t i, oprsz = simd_oprsz(desc); \
647 TYPE *d = vd, *n = vn; \
648 for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
649 d[i] = FUNC(n[i], stat); \
651 clear_tail(d, oprsz, simd_maxsz(desc)); \
654 DO_2OP(gvec_frecpe_h
, helper_recpe_f16
, float16
)
655 DO_2OP(gvec_frecpe_s
, helper_recpe_f32
, float32
)
656 DO_2OP(gvec_frecpe_d
, helper_recpe_f64
, float64
)
658 DO_2OP(gvec_frsqrte_h
, helper_rsqrte_f16
, float16
)
659 DO_2OP(gvec_frsqrte_s
, helper_rsqrte_f32
, float32
)
660 DO_2OP(gvec_frsqrte_d
, helper_rsqrte_f64
, float64
)
664 /* Floating-point trigonometric starting value.
665 * See the ARM ARM pseudocode function FPTrigSMul.
667 static float16
float16_ftsmul(float16 op1
, uint16_t op2
, float_status
*stat
)
669 float16 result
= float16_mul(op1
, op1
, stat
);
670 if (!float16_is_any_nan(result
)) {
671 result
= float16_set_sign(result
, op2
& 1);
676 static float32
float32_ftsmul(float32 op1
, uint32_t op2
, float_status
*stat
)
678 float32 result
= float32_mul(op1
, op1
, stat
);
679 if (!float32_is_any_nan(result
)) {
680 result
= float32_set_sign(result
, op2
& 1);
685 static float64
float64_ftsmul(float64 op1
, uint64_t op2
, float_status
*stat
)
687 float64 result
= float64_mul(op1
, op1
, stat
);
688 if (!float64_is_any_nan(result
)) {
689 result
= float64_set_sign(result
, op2
& 1);
694 static float32
float32_abd(float32 op1
, float32 op2
, float_status
*stat
)
696 return float32_abs(float32_sub(op1
, op2
, stat
));
699 #define DO_3OP(NAME, FUNC, TYPE) \
700 void HELPER(NAME)(void *vd, void *vn, void *vm, void *stat, uint32_t desc) \
702 intptr_t i, oprsz = simd_oprsz(desc); \
703 TYPE *d = vd, *n = vn, *m = vm; \
704 for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
705 d[i] = FUNC(n[i], m[i], stat); \
707 clear_tail(d, oprsz, simd_maxsz(desc)); \
710 DO_3OP(gvec_fadd_h
, float16_add
, float16
)
711 DO_3OP(gvec_fadd_s
, float32_add
, float32
)
712 DO_3OP(gvec_fadd_d
, float64_add
, float64
)
714 DO_3OP(gvec_fsub_h
, float16_sub
, float16
)
715 DO_3OP(gvec_fsub_s
, float32_sub
, float32
)
716 DO_3OP(gvec_fsub_d
, float64_sub
, float64
)
718 DO_3OP(gvec_fmul_h
, float16_mul
, float16
)
719 DO_3OP(gvec_fmul_s
, float32_mul
, float32
)
720 DO_3OP(gvec_fmul_d
, float64_mul
, float64
)
722 DO_3OP(gvec_ftsmul_h
, float16_ftsmul
, float16
)
723 DO_3OP(gvec_ftsmul_s
, float32_ftsmul
, float32
)
724 DO_3OP(gvec_ftsmul_d
, float64_ftsmul
, float64
)
726 DO_3OP(gvec_fabd_s
, float32_abd
, float32
)
728 #ifdef TARGET_AARCH64
730 DO_3OP(gvec_recps_h
, helper_recpsf_f16
, float16
)
731 DO_3OP(gvec_recps_s
, helper_recpsf_f32
, float32
)
732 DO_3OP(gvec_recps_d
, helper_recpsf_f64
, float64
)
734 DO_3OP(gvec_rsqrts_h
, helper_rsqrtsf_f16
, float16
)
735 DO_3OP(gvec_rsqrts_s
, helper_rsqrtsf_f32
, float32
)
736 DO_3OP(gvec_rsqrts_d
, helper_rsqrtsf_f64
, float64
)
741 /* For the indexed ops, SVE applies the index per 128-bit vector segment.
742 * For AdvSIMD, there is of course only one such vector segment.
745 #define DO_MUL_IDX(NAME, TYPE, H) \
746 void HELPER(NAME)(void *vd, void *vn, void *vm, void *stat, uint32_t desc) \
748 intptr_t i, j, oprsz = simd_oprsz(desc), segment = 16 / sizeof(TYPE); \
749 intptr_t idx = simd_data(desc); \
750 TYPE *d = vd, *n = vn, *m = vm; \
751 for (i = 0; i < oprsz / sizeof(TYPE); i += segment) { \
752 TYPE mm = m[H(i + idx)]; \
753 for (j = 0; j < segment; j++) { \
754 d[i + j] = TYPE##_mul(n[i + j], mm, stat); \
757 clear_tail(d, oprsz, simd_maxsz(desc)); \
760 DO_MUL_IDX(gvec_fmul_idx_h
, float16
, H2
)
761 DO_MUL_IDX(gvec_fmul_idx_s
, float32
, H4
)
762 DO_MUL_IDX(gvec_fmul_idx_d
, float64
, )
766 #define DO_FMLA_IDX(NAME, TYPE, H) \
767 void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, \
768 void *stat, uint32_t desc) \
770 intptr_t i, j, oprsz = simd_oprsz(desc), segment = 16 / sizeof(TYPE); \
771 TYPE op1_neg = extract32(desc, SIMD_DATA_SHIFT, 1); \
772 intptr_t idx = desc >> (SIMD_DATA_SHIFT + 1); \
773 TYPE *d = vd, *n = vn, *m = vm, *a = va; \
774 op1_neg <<= (8 * sizeof(TYPE) - 1); \
775 for (i = 0; i < oprsz / sizeof(TYPE); i += segment) { \
776 TYPE mm = m[H(i + idx)]; \
777 for (j = 0; j < segment; j++) { \
778 d[i + j] = TYPE##_muladd(n[i + j] ^ op1_neg, \
779 mm, a[i + j], 0, stat); \
782 clear_tail(d, oprsz, simd_maxsz(desc)); \
785 DO_FMLA_IDX(gvec_fmla_idx_h
, float16
, H2
)
786 DO_FMLA_IDX(gvec_fmla_idx_s
, float32
, H4
)
787 DO_FMLA_IDX(gvec_fmla_idx_d
, float64
, )
791 #define DO_SAT(NAME, WTYPE, TYPEN, TYPEM, OP, MIN, MAX) \
792 void HELPER(NAME)(void *vd, void *vq, void *vn, void *vm, uint32_t desc) \
794 intptr_t i, oprsz = simd_oprsz(desc); \
795 TYPEN *d = vd, *n = vn; TYPEM *m = vm; \
797 for (i = 0; i < oprsz / sizeof(TYPEN); i++) { \
798 WTYPE dd = (WTYPE)n[i] OP m[i]; \
802 } else if (dd > MAX) { \
812 clear_tail(d, oprsz, simd_maxsz(desc)); \
815 DO_SAT(gvec_uqadd_b
, int, uint8_t, uint8_t, +, 0, UINT8_MAX
)
816 DO_SAT(gvec_uqadd_h
, int, uint16_t, uint16_t, +, 0, UINT16_MAX
)
817 DO_SAT(gvec_uqadd_s
, int64_t, uint32_t, uint32_t, +, 0, UINT32_MAX
)
819 DO_SAT(gvec_sqadd_b
, int, int8_t, int8_t, +, INT8_MIN
, INT8_MAX
)
820 DO_SAT(gvec_sqadd_h
, int, int16_t, int16_t, +, INT16_MIN
, INT16_MAX
)
821 DO_SAT(gvec_sqadd_s
, int64_t, int32_t, int32_t, +, INT32_MIN
, INT32_MAX
)
823 DO_SAT(gvec_uqsub_b
, int, uint8_t, uint8_t, -, 0, UINT8_MAX
)
824 DO_SAT(gvec_uqsub_h
, int, uint16_t, uint16_t, -, 0, UINT16_MAX
)
825 DO_SAT(gvec_uqsub_s
, int64_t, uint32_t, uint32_t, -, 0, UINT32_MAX
)
827 DO_SAT(gvec_sqsub_b
, int, int8_t, int8_t, -, INT8_MIN
, INT8_MAX
)
828 DO_SAT(gvec_sqsub_h
, int, int16_t, int16_t, -, INT16_MIN
, INT16_MAX
)
829 DO_SAT(gvec_sqsub_s
, int64_t, int32_t, int32_t, -, INT32_MIN
, INT32_MAX
)
833 void HELPER(gvec_uqadd_d
)(void *vd
, void *vq
, void *vn
,
834 void *vm
, uint32_t desc
)
836 intptr_t i
, oprsz
= simd_oprsz(desc
);
837 uint64_t *d
= vd
, *n
= vn
, *m
= vm
;
840 for (i
= 0; i
< oprsz
/ 8; i
++) {
841 uint64_t nn
= n
[i
], mm
= m
[i
], dd
= nn
+ mm
;
852 clear_tail(d
, oprsz
, simd_maxsz(desc
));
855 void HELPER(gvec_uqsub_d
)(void *vd
, void *vq
, void *vn
,
856 void *vm
, uint32_t desc
)
858 intptr_t i
, oprsz
= simd_oprsz(desc
);
859 uint64_t *d
= vd
, *n
= vn
, *m
= vm
;
862 for (i
= 0; i
< oprsz
/ 8; i
++) {
863 uint64_t nn
= n
[i
], mm
= m
[i
], dd
= nn
- mm
;
874 clear_tail(d
, oprsz
, simd_maxsz(desc
));
877 void HELPER(gvec_sqadd_d
)(void *vd
, void *vq
, void *vn
,
878 void *vm
, uint32_t desc
)
880 intptr_t i
, oprsz
= simd_oprsz(desc
);
881 int64_t *d
= vd
, *n
= vn
, *m
= vm
;
884 for (i
= 0; i
< oprsz
/ 8; i
++) {
885 int64_t nn
= n
[i
], mm
= m
[i
], dd
= nn
+ mm
;
886 if (((dd
^ nn
) & ~(nn
^ mm
)) & INT64_MIN
) {
887 dd
= (nn
>> 63) ^ ~INT64_MIN
;
896 clear_tail(d
, oprsz
, simd_maxsz(desc
));
899 void HELPER(gvec_sqsub_d
)(void *vd
, void *vq
, void *vn
,
900 void *vm
, uint32_t desc
)
902 intptr_t i
, oprsz
= simd_oprsz(desc
);
903 int64_t *d
= vd
, *n
= vn
, *m
= vm
;
906 for (i
= 0; i
< oprsz
/ 8; i
++) {
907 int64_t nn
= n
[i
], mm
= m
[i
], dd
= nn
- mm
;
908 if (((dd
^ nn
) & (nn
^ mm
)) & INT64_MIN
) {
909 dd
= (nn
>> 63) ^ ~INT64_MIN
;
918 clear_tail(d
, oprsz
, simd_maxsz(desc
));
922 #define DO_SRA(NAME, TYPE) \
923 void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
925 intptr_t i, oprsz = simd_oprsz(desc); \
926 int shift = simd_data(desc); \
927 TYPE *d = vd, *n = vn; \
928 for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
929 d[i] += n[i] >> shift; \
931 clear_tail(d, oprsz, simd_maxsz(desc)); \
934 DO_SRA(gvec_ssra_b
, int8_t)
935 DO_SRA(gvec_ssra_h
, int16_t)
936 DO_SRA(gvec_ssra_s
, int32_t)
937 DO_SRA(gvec_ssra_d
, int64_t)
939 DO_SRA(gvec_usra_b
, uint8_t)
940 DO_SRA(gvec_usra_h
, uint16_t)
941 DO_SRA(gvec_usra_s
, uint32_t)
942 DO_SRA(gvec_usra_d
, uint64_t)
946 #define DO_RSHR(NAME, TYPE) \
947 void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
949 intptr_t i, oprsz = simd_oprsz(desc); \
950 int shift = simd_data(desc); \
951 TYPE *d = vd, *n = vn; \
952 for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
953 TYPE tmp = n[i] >> (shift - 1); \
954 d[i] = (tmp >> 1) + (tmp & 1); \
956 clear_tail(d, oprsz, simd_maxsz(desc)); \
959 DO_RSHR(gvec_srshr_b
, int8_t)
960 DO_RSHR(gvec_srshr_h
, int16_t)
961 DO_RSHR(gvec_srshr_s
, int32_t)
962 DO_RSHR(gvec_srshr_d
, int64_t)
964 DO_RSHR(gvec_urshr_b
, uint8_t)
965 DO_RSHR(gvec_urshr_h
, uint16_t)
966 DO_RSHR(gvec_urshr_s
, uint32_t)
967 DO_RSHR(gvec_urshr_d
, uint64_t)
971 #define DO_RSRA(NAME, TYPE) \
972 void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
974 intptr_t i, oprsz = simd_oprsz(desc); \
975 int shift = simd_data(desc); \
976 TYPE *d = vd, *n = vn; \
977 for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
978 TYPE tmp = n[i] >> (shift - 1); \
979 d[i] += (tmp >> 1) + (tmp & 1); \
981 clear_tail(d, oprsz, simd_maxsz(desc)); \
984 DO_RSRA(gvec_srsra_b
, int8_t)
985 DO_RSRA(gvec_srsra_h
, int16_t)
986 DO_RSRA(gvec_srsra_s
, int32_t)
987 DO_RSRA(gvec_srsra_d
, int64_t)
989 DO_RSRA(gvec_ursra_b
, uint8_t)
990 DO_RSRA(gvec_ursra_h
, uint16_t)
991 DO_RSRA(gvec_ursra_s
, uint32_t)
992 DO_RSRA(gvec_ursra_d
, uint64_t)
996 #define DO_SRI(NAME, TYPE) \
997 void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
999 intptr_t i, oprsz = simd_oprsz(desc); \
1000 int shift = simd_data(desc); \
1001 TYPE *d = vd, *n = vn; \
1002 for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
1003 d[i] = deposit64(d[i], 0, sizeof(TYPE) * 8 - shift, n[i] >> shift); \
1005 clear_tail(d, oprsz, simd_maxsz(desc)); \
1008 DO_SRI(gvec_sri_b
, uint8_t)
1009 DO_SRI(gvec_sri_h
, uint16_t)
1010 DO_SRI(gvec_sri_s
, uint32_t)
1011 DO_SRI(gvec_sri_d
, uint64_t)
1015 #define DO_SLI(NAME, TYPE) \
1016 void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
1018 intptr_t i, oprsz = simd_oprsz(desc); \
1019 int shift = simd_data(desc); \
1020 TYPE *d = vd, *n = vn; \
1021 for (i = 0; i < oprsz / sizeof(TYPE); i++) { \
1022 d[i] = deposit64(d[i], shift, sizeof(TYPE) * 8 - shift, n[i]); \
1024 clear_tail(d, oprsz, simd_maxsz(desc)); \
1027 DO_SLI(gvec_sli_b
, uint8_t)
1028 DO_SLI(gvec_sli_h
, uint16_t)
1029 DO_SLI(gvec_sli_s
, uint32_t)
1030 DO_SLI(gvec_sli_d
, uint64_t)
1035 * Convert float16 to float32, raising no exceptions and
1036 * preserving exceptional values, including SNaN.
1037 * This is effectively an unpack+repack operation.
1039 static float32
float16_to_float32_by_bits(uint32_t f16
, bool fz16
)
1041 const int f16_bias
= 15;
1042 const int f32_bias
= 127;
1043 uint32_t sign
= extract32(f16
, 15, 1);
1044 uint32_t exp
= extract32(f16
, 10, 5);
1045 uint32_t frac
= extract32(f16
, 0, 10);
1050 } else if (exp
== 0) {
1051 /* Zero or denormal. */
1057 * Denormal; these are all normal float32.
1058 * Shift the fraction so that the msb is at bit 11,
1059 * then remove bit 11 as the implicit bit of the
1060 * normalized float32. Note that we still go through
1061 * the shift for normal numbers below, to put the
1062 * float32 fraction at the right place.
1064 int shift
= clz32(frac
) - 21;
1065 frac
= (frac
<< shift
) & 0x3ff;
1066 exp
= f32_bias
- f16_bias
- shift
+ 1;
1070 /* Normal number; adjust the bias. */
1071 exp
+= f32_bias
- f16_bias
;
1077 return sign
| exp
| frac
;
1080 static uint64_t load4_f16(uint64_t *ptr
, int is_q
, int is_2
)
1083 * Branchless load of u32[0], u64[0], u32[1], or u64[1].
1084 * Load the 2nd qword iff is_q & is_2.
1085 * Shift to the 2nd dword iff !is_q & is_2.
1086 * For !is_q & !is_2, the upper bits of the result are garbage.
1088 return ptr
[is_q
& is_2
] >> ((is_2
& ~is_q
) << 5);
1092 * Note that FMLAL requires oprsz == 8 or oprsz == 16,
1093 * as there is not yet SVE versions that might use blocking.
1096 static void do_fmlal(float32
*d
, void *vn
, void *vm
, float_status
*fpst
,
1097 uint32_t desc
, bool fz16
)
1099 intptr_t i
, oprsz
= simd_oprsz(desc
);
1100 int is_s
= extract32(desc
, SIMD_DATA_SHIFT
, 1);
1101 int is_2
= extract32(desc
, SIMD_DATA_SHIFT
+ 1, 1);
1102 int is_q
= oprsz
== 16;
1105 /* Pre-load all of the f16 data, avoiding overlap issues. */
1106 n_4
= load4_f16(vn
, is_q
, is_2
);
1107 m_4
= load4_f16(vm
, is_q
, is_2
);
1109 /* Negate all inputs for FMLSL at once. */
1111 n_4
^= 0x8000800080008000ull
;
1114 for (i
= 0; i
< oprsz
/ 4; i
++) {
1115 float32 n_1
= float16_to_float32_by_bits(n_4
>> (i
* 16), fz16
);
1116 float32 m_1
= float16_to_float32_by_bits(m_4
>> (i
* 16), fz16
);
1117 d
[H4(i
)] = float32_muladd(n_1
, m_1
, d
[H4(i
)], 0, fpst
);
1119 clear_tail(d
, oprsz
, simd_maxsz(desc
));
1122 void HELPER(gvec_fmlal_a32
)(void *vd
, void *vn
, void *vm
,
1123 void *venv
, uint32_t desc
)
1125 CPUARMState
*env
= venv
;
1126 do_fmlal(vd
, vn
, vm
, &env
->vfp
.standard_fp_status
, desc
,
1127 get_flush_inputs_to_zero(&env
->vfp
.fp_status_f16
));
1130 void HELPER(gvec_fmlal_a64
)(void *vd
, void *vn
, void *vm
,
1131 void *venv
, uint32_t desc
)
1133 CPUARMState
*env
= venv
;
1134 do_fmlal(vd
, vn
, vm
, &env
->vfp
.fp_status
, desc
,
1135 get_flush_inputs_to_zero(&env
->vfp
.fp_status_f16
));
1138 static void do_fmlal_idx(float32
*d
, void *vn
, void *vm
, float_status
*fpst
,
1139 uint32_t desc
, bool fz16
)
1141 intptr_t i
, oprsz
= simd_oprsz(desc
);
1142 int is_s
= extract32(desc
, SIMD_DATA_SHIFT
, 1);
1143 int is_2
= extract32(desc
, SIMD_DATA_SHIFT
+ 1, 1);
1144 int index
= extract32(desc
, SIMD_DATA_SHIFT
+ 2, 3);
1145 int is_q
= oprsz
== 16;
1149 /* Pre-load all of the f16 data, avoiding overlap issues. */
1150 n_4
= load4_f16(vn
, is_q
, is_2
);
1152 /* Negate all inputs for FMLSL at once. */
1154 n_4
^= 0x8000800080008000ull
;
1157 m_1
= float16_to_float32_by_bits(((float16
*)vm
)[H2(index
)], fz16
);
1159 for (i
= 0; i
< oprsz
/ 4; i
++) {
1160 float32 n_1
= float16_to_float32_by_bits(n_4
>> (i
* 16), fz16
);
1161 d
[H4(i
)] = float32_muladd(n_1
, m_1
, d
[H4(i
)], 0, fpst
);
1163 clear_tail(d
, oprsz
, simd_maxsz(desc
));
1166 void HELPER(gvec_fmlal_idx_a32
)(void *vd
, void *vn
, void *vm
,
1167 void *venv
, uint32_t desc
)
1169 CPUARMState
*env
= venv
;
1170 do_fmlal_idx(vd
, vn
, vm
, &env
->vfp
.standard_fp_status
, desc
,
1171 get_flush_inputs_to_zero(&env
->vfp
.fp_status_f16
));
1174 void HELPER(gvec_fmlal_idx_a64
)(void *vd
, void *vn
, void *vm
,
1175 void *venv
, uint32_t desc
)
1177 CPUARMState
*env
= venv
;
1178 do_fmlal_idx(vd
, vn
, vm
, &env
->vfp
.fp_status
, desc
,
1179 get_flush_inputs_to_zero(&env
->vfp
.fp_status_f16
));
1182 void HELPER(gvec_sshl_b
)(void *vd
, void *vn
, void *vm
, uint32_t desc
)
1184 intptr_t i
, opr_sz
= simd_oprsz(desc
);
1185 int8_t *d
= vd
, *n
= vn
, *m
= vm
;
1187 for (i
= 0; i
< opr_sz
; ++i
) {
1196 res
= nn
>> (mm
> -8 ? -mm
: 7);
1200 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
1203 void HELPER(gvec_sshl_h
)(void *vd
, void *vn
, void *vm
, uint32_t desc
)
1205 intptr_t i
, opr_sz
= simd_oprsz(desc
);
1206 int16_t *d
= vd
, *n
= vn
, *m
= vm
;
1208 for (i
= 0; i
< opr_sz
/ 2; ++i
) {
1209 int8_t mm
= m
[i
]; /* only 8 bits of shift are significant */
1217 res
= nn
>> (mm
> -16 ? -mm
: 15);
1221 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
1224 void HELPER(gvec_ushl_b
)(void *vd
, void *vn
, void *vm
, uint32_t desc
)
1226 intptr_t i
, opr_sz
= simd_oprsz(desc
);
1227 uint8_t *d
= vd
, *n
= vn
, *m
= vm
;
1229 for (i
= 0; i
< opr_sz
; ++i
) {
1244 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
1247 void HELPER(gvec_ushl_h
)(void *vd
, void *vn
, void *vm
, uint32_t desc
)
1249 intptr_t i
, opr_sz
= simd_oprsz(desc
);
1250 uint16_t *d
= vd
, *n
= vn
, *m
= vm
;
1252 for (i
= 0; i
< opr_sz
/ 2; ++i
) {
1253 int8_t mm
= m
[i
]; /* only 8 bits of shift are significant */
1267 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
1271 * 8x8->8 polynomial multiply.
1273 * Polynomial multiplication is like integer multiplication except the
1274 * partial products are XORed, not added.
1276 * TODO: expose this as a generic vector operation, as it is a common
1277 * crypto building block.
1279 void HELPER(gvec_pmul_b
)(void *vd
, void *vn
, void *vm
, uint32_t desc
)
1281 intptr_t i
, j
, opr_sz
= simd_oprsz(desc
);
1282 uint64_t *d
= vd
, *n
= vn
, *m
= vm
;
1284 for (i
= 0; i
< opr_sz
/ 8; ++i
) {
1289 for (j
= 0; j
< 8; ++j
) {
1290 uint64_t mask
= (nn
& 0x0101010101010101ull
) * 0xff;
1292 mm
= (mm
<< 1) & 0xfefefefefefefefeull
;
1297 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
1301 * 64x64->128 polynomial multiply.
1302 * Because of the lanes are not accessed in strict columns,
1303 * this probably cannot be turned into a generic helper.
1305 void HELPER(gvec_pmull_q
)(void *vd
, void *vn
, void *vm
, uint32_t desc
)
1307 intptr_t i
, j
, opr_sz
= simd_oprsz(desc
);
1308 intptr_t hi
= simd_data(desc
);
1309 uint64_t *d
= vd
, *n
= vn
, *m
= vm
;
1311 for (i
= 0; i
< opr_sz
/ 8; i
+= 2) {
1312 uint64_t nn
= n
[i
+ hi
];
1313 uint64_t mm
= m
[i
+ hi
];
1317 /* Bit 0 can only influence the low 64-bit result. */
1322 for (j
= 1; j
< 64; ++j
) {
1323 uint64_t mask
= -((nn
>> j
) & 1);
1324 rlo
^= (mm
<< j
) & mask
;
1325 rhi
^= (mm
>> (64 - j
)) & mask
;
1330 clear_tail(d
, opr_sz
, simd_maxsz(desc
));
1334 * 8x8->16 polynomial multiply.
1336 * The byte inputs are expanded to (or extracted from) half-words.
1337 * Note that neon and sve2 get the inputs from different positions.
1338 * This allows 4 bytes to be processed in parallel with uint64_t.
1341 static uint64_t expand_byte_to_half(uint64_t x
)
1343 return (x
& 0x000000ff)
1344 | ((x
& 0x0000ff00) << 8)
1345 | ((x
& 0x00ff0000) << 16)
1346 | ((x
& 0xff000000) << 24);
1349 static uint64_t pmull_h(uint64_t op1
, uint64_t op2
)
1351 uint64_t result
= 0;
1354 for (i
= 0; i
< 8; ++i
) {
1355 uint64_t mask
= (op1
& 0x0001000100010001ull
) * 0xffff;
1356 result
^= op2
& mask
;
1363 void HELPER(neon_pmull_h
)(void *vd
, void *vn
, void *vm
, uint32_t desc
)
1365 int hi
= simd_data(desc
);
1366 uint64_t *d
= vd
, *n
= vn
, *m
= vm
;
1367 uint64_t nn
= n
[hi
], mm
= m
[hi
];
1369 d
[0] = pmull_h(expand_byte_to_half(nn
), expand_byte_to_half(mm
));
1372 d
[1] = pmull_h(expand_byte_to_half(nn
), expand_byte_to_half(mm
));
1374 clear_tail(d
, 16, simd_maxsz(desc
));
1377 #ifdef TARGET_AARCH64
1378 void HELPER(sve2_pmull_h
)(void *vd
, void *vn
, void *vm
, uint32_t desc
)
1380 int shift
= simd_data(desc
) * 8;
1381 intptr_t i
, opr_sz
= simd_oprsz(desc
);
1382 uint64_t *d
= vd
, *n
= vn
, *m
= vm
;
1384 for (i
= 0; i
< opr_sz
/ 8; ++i
) {
1385 uint64_t nn
= (n
[i
] >> shift
) & 0x00ff00ff00ff00ffull
;
1386 uint64_t mm
= (m
[i
] >> shift
) & 0x00ff00ff00ff00ffull
;
1388 d
[i
] = pmull_h(nn
, mm
);
1393 #define DO_CMP0(NAME, TYPE, OP) \
1394 void HELPER(NAME)(void *vd, void *vn, uint32_t desc) \
1396 intptr_t i, opr_sz = simd_oprsz(desc); \
1397 for (i = 0; i < opr_sz; i += sizeof(TYPE)) { \
1398 TYPE nn = *(TYPE *)(vn + i); \
1399 *(TYPE *)(vd + i) = -(nn OP 0); \
1401 clear_tail(vd, opr_sz, simd_maxsz(desc)); \
1404 DO_CMP0(gvec_ceq0_b
, int8_t, ==)
1405 DO_CMP0(gvec_clt0_b
, int8_t, <)
1406 DO_CMP0(gvec_cle0_b
, int8_t, <=)
1407 DO_CMP0(gvec_cgt0_b
, int8_t, >)
1408 DO_CMP0(gvec_cge0_b
, int8_t, >=)
1410 DO_CMP0(gvec_ceq0_h
, int16_t, ==)
1411 DO_CMP0(gvec_clt0_h
, int16_t, <)
1412 DO_CMP0(gvec_cle0_h
, int16_t, <=)
1413 DO_CMP0(gvec_cgt0_h
, int16_t, >)
1414 DO_CMP0(gvec_cge0_h
, int16_t, >=)
1418 #define DO_ABD(NAME, TYPE) \
1419 void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
1421 intptr_t i, opr_sz = simd_oprsz(desc); \
1422 TYPE *d = vd, *n = vn, *m = vm; \
1424 for (i = 0; i < opr_sz / sizeof(TYPE); ++i) { \
1425 d[i] = n[i] < m[i] ? m[i] - n[i] : n[i] - m[i]; \
1427 clear_tail(d, opr_sz, simd_maxsz(desc)); \
1430 DO_ABD(gvec_sabd_b
, int8_t)
1431 DO_ABD(gvec_sabd_h
, int16_t)
1432 DO_ABD(gvec_sabd_s
, int32_t)
1433 DO_ABD(gvec_sabd_d
, int64_t)
1435 DO_ABD(gvec_uabd_b
, uint8_t)
1436 DO_ABD(gvec_uabd_h
, uint16_t)
1437 DO_ABD(gvec_uabd_s
, uint32_t)
1438 DO_ABD(gvec_uabd_d
, uint64_t)
1442 #define DO_ABA(NAME, TYPE) \
1443 void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
1445 intptr_t i, opr_sz = simd_oprsz(desc); \
1446 TYPE *d = vd, *n = vn, *m = vm; \
1448 for (i = 0; i < opr_sz / sizeof(TYPE); ++i) { \
1449 d[i] += n[i] < m[i] ? m[i] - n[i] : n[i] - m[i]; \
1451 clear_tail(d, opr_sz, simd_maxsz(desc)); \
1454 DO_ABA(gvec_saba_b
, int8_t)
1455 DO_ABA(gvec_saba_h
, int16_t)
1456 DO_ABA(gvec_saba_s
, int32_t)
1457 DO_ABA(gvec_saba_d
, int64_t)
1459 DO_ABA(gvec_uaba_b
, uint8_t)
1460 DO_ABA(gvec_uaba_h
, uint16_t)
1461 DO_ABA(gvec_uaba_s
, uint32_t)
1462 DO_ABA(gvec_uaba_d
, uint64_t)