2 * ARM NEON vector operations.
4 * Copyright (c) 2007, 2008 CodeSourcery.
5 * Written by Paul Brook
7 * This code is licensed under the GNU GPL v2.
13 #include "exec/exec-all.h"
14 #include "exec/helper-proto.h"
16 #define SIGNBIT (uint32_t)0x80000000
17 #define SIGNBIT64 ((uint64_t)1 << 63)
19 #define SET_QC() env->vfp.xregs[ARM_VFP_FPSCR] |= CPSR_Q
21 #define NEON_TYPE1(name, type) \
26 #ifdef HOST_WORDS_BIGENDIAN
27 #define NEON_TYPE2(name, type) \
33 #define NEON_TYPE4(name, type) \
42 #define NEON_TYPE2(name, type) \
48 #define NEON_TYPE4(name, type) \
58 NEON_TYPE4(s8
, int8_t)
59 NEON_TYPE4(u8
, uint8_t)
60 NEON_TYPE2(s16
, int16_t)
61 NEON_TYPE2(u16
, uint16_t)
62 NEON_TYPE1(s32
, int32_t)
63 NEON_TYPE1(u32
, uint32_t)
68 /* Copy from a uint32_t to a vector structure type. */
69 #define NEON_UNPACK(vtype, dest, val) do { \
78 /* Copy from a vector structure type to a uint32_t. */
79 #define NEON_PACK(vtype, dest, val) do { \
89 NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1);
91 NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1); \
92 NEON_FN(vdest.v2, vsrc1.v2, vsrc2.v2);
94 NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1); \
95 NEON_FN(vdest.v2, vsrc1.v2, vsrc2.v2); \
96 NEON_FN(vdest.v3, vsrc1.v3, vsrc2.v3); \
97 NEON_FN(vdest.v4, vsrc1.v4, vsrc2.v4);
99 #define NEON_VOP_BODY(vtype, n) \
105 NEON_UNPACK(vtype, vsrc1, arg1); \
106 NEON_UNPACK(vtype, vsrc2, arg2); \
108 NEON_PACK(vtype, res, vdest); \
112 #define NEON_VOP(name, vtype, n) \
113 uint32_t HELPER(glue(neon_,name))(uint32_t arg1, uint32_t arg2) \
114 NEON_VOP_BODY(vtype, n)
116 #define NEON_VOP_ENV(name, vtype, n) \
117 uint32_t HELPER(glue(neon_,name))(CPUARMState *env, uint32_t arg1, uint32_t arg2) \
118 NEON_VOP_BODY(vtype, n)
120 /* Pairwise operations. */
121 /* For 32-bit elements each segment only contains a single element, so
122 the elementwise and pairwise operations are the same. */
124 NEON_FN(vdest.v1, vsrc1.v1, vsrc1.v2); \
125 NEON_FN(vdest.v2, vsrc2.v1, vsrc2.v2);
127 NEON_FN(vdest.v1, vsrc1.v1, vsrc1.v2); \
128 NEON_FN(vdest.v2, vsrc1.v3, vsrc1.v4); \
129 NEON_FN(vdest.v3, vsrc2.v1, vsrc2.v2); \
130 NEON_FN(vdest.v4, vsrc2.v3, vsrc2.v4); \
132 #define NEON_POP(name, vtype, n) \
133 uint32_t HELPER(glue(neon_,name))(uint32_t arg1, uint32_t arg2) \
139 NEON_UNPACK(vtype, vsrc1, arg1); \
140 NEON_UNPACK(vtype, vsrc2, arg2); \
142 NEON_PACK(vtype, res, vdest); \
146 /* Unary operators. */
147 #define NEON_VOP1(name, vtype, n) \
148 uint32_t HELPER(glue(neon_,name))(uint32_t arg) \
152 NEON_UNPACK(vtype, vsrc1, arg); \
154 NEON_PACK(vtype, arg, vdest); \
159 #define NEON_USAT(dest, src1, src2, type) do { \
160 uint32_t tmp = (uint32_t)src1 + (uint32_t)src2; \
161 if (tmp != (type)tmp) { \
167 #define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint8_t)
168 NEON_VOP_ENV(qadd_u8
, neon_u8
, 4)
170 #define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint16_t)
171 NEON_VOP_ENV(qadd_u16
, neon_u16
, 2)
175 uint32_t HELPER(neon_qadd_u32
)(CPUARMState
*env
, uint32_t a
, uint32_t b
)
177 uint32_t res
= a
+ b
;
185 uint64_t HELPER(neon_qadd_u64
)(CPUARMState
*env
, uint64_t src1
, uint64_t src2
)
197 #define NEON_SSAT(dest, src1, src2, type) do { \
198 int32_t tmp = (uint32_t)src1 + (uint32_t)src2; \
199 if (tmp != (type)tmp) { \
202 tmp = (1 << (sizeof(type) * 8 - 1)) - 1; \
204 tmp = 1 << (sizeof(type) * 8 - 1); \
209 #define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int8_t)
210 NEON_VOP_ENV(qadd_s8
, neon_s8
, 4)
212 #define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int16_t)
213 NEON_VOP_ENV(qadd_s16
, neon_s16
, 2)
217 uint32_t HELPER(neon_qadd_s32
)(CPUARMState
*env
, uint32_t a
, uint32_t b
)
219 uint32_t res
= a
+ b
;
220 if (((res
^ a
) & SIGNBIT
) && !((a
^ b
) & SIGNBIT
)) {
222 res
= ~(((int32_t)a
>> 31) ^ SIGNBIT
);
227 uint64_t HELPER(neon_qadd_s64
)(CPUARMState
*env
, uint64_t src1
, uint64_t src2
)
232 if (((res
^ src1
) & SIGNBIT64
) && !((src1
^ src2
) & SIGNBIT64
)) {
234 res
= ((int64_t)src1
>> 63) ^ ~SIGNBIT64
;
239 /* Unsigned saturating accumulate of signed value
241 * Op1/Rn is treated as signed
242 * Op2/Rd is treated as unsigned
244 * Explicit casting is used to ensure the correct sign extension of
245 * inputs. The result is treated as a unsigned value and saturated as such.
247 * We use a macro for the 8/16 bit cases which expects signed integers of va,
248 * vb, and vr for interim calculation and an unsigned 32 bit result value r.
251 #define USATACC(bits, shift) \
253 va = sextract32(a, shift, bits); \
254 vb = extract32(b, shift, bits); \
256 if (vr > UINT##bits##_MAX) { \
258 vr = UINT##bits##_MAX; \
259 } else if (vr < 0) { \
263 r = deposit32(r, shift, bits, vr); \
266 uint32_t HELPER(neon_uqadd_s8
)(CPUARMState
*env
, uint32_t a
, uint32_t b
)
278 uint32_t HELPER(neon_uqadd_s16
)(CPUARMState
*env
, uint32_t a
, uint32_t b
)
290 uint32_t HELPER(neon_uqadd_s32
)(CPUARMState
*env
, uint32_t a
, uint32_t b
)
292 int64_t va
= (int32_t)a
;
293 int64_t vb
= (uint32_t)b
;
294 int64_t vr
= va
+ vb
;
295 if (vr
> UINT32_MAX
) {
305 uint64_t HELPER(neon_uqadd_s64
)(CPUARMState
*env
, uint64_t a
, uint64_t b
)
309 /* We only need to look at the pattern of SIGN bits to detect
312 if (~a
& b
& ~res
& SIGNBIT64
) {
315 } else if (a
& ~b
& res
& SIGNBIT64
) {
322 /* Signed saturating accumulate of unsigned value
324 * Op1/Rn is treated as unsigned
325 * Op2/Rd is treated as signed
327 * The result is treated as a signed value and saturated as such
329 * We use a macro for the 8/16 bit cases which expects signed integers of va,
330 * vb, and vr for interim calculation and an unsigned 32 bit result value r.
333 #define SSATACC(bits, shift) \
335 va = extract32(a, shift, bits); \
336 vb = sextract32(b, shift, bits); \
338 if (vr > INT##bits##_MAX) { \
340 vr = INT##bits##_MAX; \
341 } else if (vr < INT##bits##_MIN) { \
343 vr = INT##bits##_MIN; \
345 r = deposit32(r, shift, bits, vr); \
348 uint32_t HELPER(neon_sqadd_u8
)(CPUARMState
*env
, uint32_t a
, uint32_t b
)
360 uint32_t HELPER(neon_sqadd_u16
)(CPUARMState
*env
, uint32_t a
, uint32_t b
)
373 uint32_t HELPER(neon_sqadd_u32
)(CPUARMState
*env
, uint32_t a
, uint32_t b
)
376 int64_t op1
= (uint32_t)a
;
377 int64_t op2
= (int32_t)b
;
379 if (res
> INT32_MAX
) {
382 } else if (res
< INT32_MIN
) {
389 uint64_t HELPER(neon_sqadd_u64
)(CPUARMState
*env
, uint64_t a
, uint64_t b
)
393 /* We only need to look at the pattern of SIGN bits to detect an overflow */
396 | (a
& ~b
)) & SIGNBIT64
) {
404 #define NEON_USAT(dest, src1, src2, type) do { \
405 uint32_t tmp = (uint32_t)src1 - (uint32_t)src2; \
406 if (tmp != (type)tmp) { \
412 #define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint8_t)
413 NEON_VOP_ENV(qsub_u8
, neon_u8
, 4)
415 #define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint16_t)
416 NEON_VOP_ENV(qsub_u16
, neon_u16
, 2)
420 uint32_t HELPER(neon_qsub_u32
)(CPUARMState
*env
, uint32_t a
, uint32_t b
)
422 uint32_t res
= a
- b
;
430 uint64_t HELPER(neon_qsub_u64
)(CPUARMState
*env
, uint64_t src1
, uint64_t src2
)
443 #define NEON_SSAT(dest, src1, src2, type) do { \
444 int32_t tmp = (uint32_t)src1 - (uint32_t)src2; \
445 if (tmp != (type)tmp) { \
448 tmp = (1 << (sizeof(type) * 8 - 1)) - 1; \
450 tmp = 1 << (sizeof(type) * 8 - 1); \
455 #define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int8_t)
456 NEON_VOP_ENV(qsub_s8
, neon_s8
, 4)
458 #define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int16_t)
459 NEON_VOP_ENV(qsub_s16
, neon_s16
, 2)
463 uint32_t HELPER(neon_qsub_s32
)(CPUARMState
*env
, uint32_t a
, uint32_t b
)
465 uint32_t res
= a
- b
;
466 if (((res
^ a
) & SIGNBIT
) && ((a
^ b
) & SIGNBIT
)) {
468 res
= ~(((int32_t)a
>> 31) ^ SIGNBIT
);
473 uint64_t HELPER(neon_qsub_s64
)(CPUARMState
*env
, uint64_t src1
, uint64_t src2
)
478 if (((res
^ src1
) & SIGNBIT64
) && ((src1
^ src2
) & SIGNBIT64
)) {
480 res
= ((int64_t)src1
>> 63) ^ ~SIGNBIT64
;
485 #define NEON_FN(dest, src1, src2) dest = (src1 + src2) >> 1
486 NEON_VOP(hadd_s8
, neon_s8
, 4)
487 NEON_VOP(hadd_u8
, neon_u8
, 4)
488 NEON_VOP(hadd_s16
, neon_s16
, 2)
489 NEON_VOP(hadd_u16
, neon_u16
, 2)
492 int32_t HELPER(neon_hadd_s32
)(int32_t src1
, int32_t src2
)
496 dest
= (src1
>> 1) + (src2
>> 1);
502 uint32_t HELPER(neon_hadd_u32
)(uint32_t src1
, uint32_t src2
)
506 dest
= (src1
>> 1) + (src2
>> 1);
512 #define NEON_FN(dest, src1, src2) dest = (src1 + src2 + 1) >> 1
513 NEON_VOP(rhadd_s8
, neon_s8
, 4)
514 NEON_VOP(rhadd_u8
, neon_u8
, 4)
515 NEON_VOP(rhadd_s16
, neon_s16
, 2)
516 NEON_VOP(rhadd_u16
, neon_u16
, 2)
519 int32_t HELPER(neon_rhadd_s32
)(int32_t src1
, int32_t src2
)
523 dest
= (src1
>> 1) + (src2
>> 1);
524 if ((src1
| src2
) & 1)
529 uint32_t HELPER(neon_rhadd_u32
)(uint32_t src1
, uint32_t src2
)
533 dest
= (src1
>> 1) + (src2
>> 1);
534 if ((src1
| src2
) & 1)
539 #define NEON_FN(dest, src1, src2) dest = (src1 - src2) >> 1
540 NEON_VOP(hsub_s8
, neon_s8
, 4)
541 NEON_VOP(hsub_u8
, neon_u8
, 4)
542 NEON_VOP(hsub_s16
, neon_s16
, 2)
543 NEON_VOP(hsub_u16
, neon_u16
, 2)
546 int32_t HELPER(neon_hsub_s32
)(int32_t src1
, int32_t src2
)
550 dest
= (src1
>> 1) - (src2
>> 1);
551 if ((~src1
) & src2
& 1)
556 uint32_t HELPER(neon_hsub_u32
)(uint32_t src1
, uint32_t src2
)
560 dest
= (src1
>> 1) - (src2
>> 1);
561 if ((~src1
) & src2
& 1)
566 #define NEON_FN(dest, src1, src2) dest = (src1 > src2) ? ~0 : 0
567 NEON_VOP(cgt_s8
, neon_s8
, 4)
568 NEON_VOP(cgt_u8
, neon_u8
, 4)
569 NEON_VOP(cgt_s16
, neon_s16
, 2)
570 NEON_VOP(cgt_u16
, neon_u16
, 2)
571 NEON_VOP(cgt_s32
, neon_s32
, 1)
572 NEON_VOP(cgt_u32
, neon_u32
, 1)
575 #define NEON_FN(dest, src1, src2) dest = (src1 >= src2) ? ~0 : 0
576 NEON_VOP(cge_s8
, neon_s8
, 4)
577 NEON_VOP(cge_u8
, neon_u8
, 4)
578 NEON_VOP(cge_s16
, neon_s16
, 2)
579 NEON_VOP(cge_u16
, neon_u16
, 2)
580 NEON_VOP(cge_s32
, neon_s32
, 1)
581 NEON_VOP(cge_u32
, neon_u32
, 1)
584 #define NEON_FN(dest, src1, src2) dest = (src1 < src2) ? src1 : src2
585 NEON_VOP(min_s8
, neon_s8
, 4)
586 NEON_VOP(min_u8
, neon_u8
, 4)
587 NEON_VOP(min_s16
, neon_s16
, 2)
588 NEON_VOP(min_u16
, neon_u16
, 2)
589 NEON_VOP(min_s32
, neon_s32
, 1)
590 NEON_VOP(min_u32
, neon_u32
, 1)
591 NEON_POP(pmin_s8
, neon_s8
, 4)
592 NEON_POP(pmin_u8
, neon_u8
, 4)
593 NEON_POP(pmin_s16
, neon_s16
, 2)
594 NEON_POP(pmin_u16
, neon_u16
, 2)
597 #define NEON_FN(dest, src1, src2) dest = (src1 > src2) ? src1 : src2
598 NEON_VOP(max_s8
, neon_s8
, 4)
599 NEON_VOP(max_u8
, neon_u8
, 4)
600 NEON_VOP(max_s16
, neon_s16
, 2)
601 NEON_VOP(max_u16
, neon_u16
, 2)
602 NEON_VOP(max_s32
, neon_s32
, 1)
603 NEON_VOP(max_u32
, neon_u32
, 1)
604 NEON_POP(pmax_s8
, neon_s8
, 4)
605 NEON_POP(pmax_u8
, neon_u8
, 4)
606 NEON_POP(pmax_s16
, neon_s16
, 2)
607 NEON_POP(pmax_u16
, neon_u16
, 2)
610 #define NEON_FN(dest, src1, src2) \
611 dest = (src1 > src2) ? (src1 - src2) : (src2 - src1)
612 NEON_VOP(abd_s8
, neon_s8
, 4)
613 NEON_VOP(abd_u8
, neon_u8
, 4)
614 NEON_VOP(abd_s16
, neon_s16
, 2)
615 NEON_VOP(abd_u16
, neon_u16
, 2)
616 NEON_VOP(abd_s32
, neon_s32
, 1)
617 NEON_VOP(abd_u32
, neon_u32
, 1)
620 #define NEON_FN(dest, src1, src2) do { \
622 tmp = (int8_t)src2; \
623 if (tmp >= (ssize_t)sizeof(src1) * 8 || \
624 tmp <= -(ssize_t)sizeof(src1) * 8) { \
626 } else if (tmp < 0) { \
627 dest = src1 >> -tmp; \
629 dest = src1 << tmp; \
631 NEON_VOP(shl_u8
, neon_u8
, 4)
632 NEON_VOP(shl_u16
, neon_u16
, 2)
633 NEON_VOP(shl_u32
, neon_u32
, 1)
636 uint64_t HELPER(neon_shl_u64
)(uint64_t val
, uint64_t shiftop
)
638 int8_t shift
= (int8_t)shiftop
;
639 if (shift
>= 64 || shift
<= -64) {
641 } else if (shift
< 0) {
649 #define NEON_FN(dest, src1, src2) do { \
651 tmp = (int8_t)src2; \
652 if (tmp >= (ssize_t)sizeof(src1) * 8) { \
654 } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
655 dest = src1 >> (sizeof(src1) * 8 - 1); \
656 } else if (tmp < 0) { \
657 dest = src1 >> -tmp; \
659 dest = src1 << tmp; \
661 NEON_VOP(shl_s8
, neon_s8
, 4)
662 NEON_VOP(shl_s16
, neon_s16
, 2)
663 NEON_VOP(shl_s32
, neon_s32
, 1)
666 uint64_t HELPER(neon_shl_s64
)(uint64_t valop
, uint64_t shiftop
)
668 int8_t shift
= (int8_t)shiftop
;
672 } else if (shift
<= -64) {
674 } else if (shift
< 0) {
682 #define NEON_FN(dest, src1, src2) do { \
684 tmp = (int8_t)src2; \
685 if ((tmp >= (ssize_t)sizeof(src1) * 8) \
686 || (tmp <= -(ssize_t)sizeof(src1) * 8)) { \
688 } else if (tmp < 0) { \
689 dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
691 dest = src1 << tmp; \
693 NEON_VOP(rshl_s8
, neon_s8
, 4)
694 NEON_VOP(rshl_s16
, neon_s16
, 2)
697 /* The addition of the rounding constant may overflow, so we use an
698 * intermediate 64 bit accumulator. */
699 uint32_t HELPER(neon_rshl_s32
)(uint32_t valop
, uint32_t shiftop
)
702 int32_t val
= (int32_t)valop
;
703 int8_t shift
= (int8_t)shiftop
;
704 if ((shift
>= 32) || (shift
<= -32)) {
706 } else if (shift
< 0) {
707 int64_t big_dest
= ((int64_t)val
+ (1 << (-1 - shift
)));
708 dest
= big_dest
>> -shift
;
715 /* Handling addition overflow with 64 bit input values is more
716 * tricky than with 32 bit values. */
717 uint64_t HELPER(neon_rshl_s64
)(uint64_t valop
, uint64_t shiftop
)
719 int8_t shift
= (int8_t)shiftop
;
721 if ((shift
>= 64) || (shift
<= -64)) {
723 } else if (shift
< 0) {
724 val
>>= (-shift
- 1);
725 if (val
== INT64_MAX
) {
726 /* In this case, it means that the rounding constant is 1,
727 * and the addition would overflow. Return the actual
728 * result directly. */
729 val
= 0x4000000000000000LL
;
740 #define NEON_FN(dest, src1, src2) do { \
742 tmp = (int8_t)src2; \
743 if (tmp >= (ssize_t)sizeof(src1) * 8 || \
744 tmp < -(ssize_t)sizeof(src1) * 8) { \
746 } else if (tmp == -(ssize_t)sizeof(src1) * 8) { \
747 dest = src1 >> (-tmp - 1); \
748 } else if (tmp < 0) { \
749 dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
751 dest = src1 << tmp; \
753 NEON_VOP(rshl_u8
, neon_u8
, 4)
754 NEON_VOP(rshl_u16
, neon_u16
, 2)
757 /* The addition of the rounding constant may overflow, so we use an
758 * intermediate 64 bit accumulator. */
759 uint32_t HELPER(neon_rshl_u32
)(uint32_t val
, uint32_t shiftop
)
762 int8_t shift
= (int8_t)shiftop
;
763 if (shift
>= 32 || shift
< -32) {
765 } else if (shift
== -32) {
767 } else if (shift
< 0) {
768 uint64_t big_dest
= ((uint64_t)val
+ (1 << (-1 - shift
)));
769 dest
= big_dest
>> -shift
;
776 /* Handling addition overflow with 64 bit input values is more
777 * tricky than with 32 bit values. */
778 uint64_t HELPER(neon_rshl_u64
)(uint64_t val
, uint64_t shiftop
)
780 int8_t shift
= (uint8_t)shiftop
;
781 if (shift
>= 64 || shift
< -64) {
783 } else if (shift
== -64) {
784 /* Rounding a 1-bit result just preserves that bit. */
786 } else if (shift
< 0) {
787 val
>>= (-shift
- 1);
788 if (val
== UINT64_MAX
) {
789 /* In this case, it means that the rounding constant is 1,
790 * and the addition would overflow. Return the actual
791 * result directly. */
792 val
= 0x8000000000000000ULL
;
803 #define NEON_FN(dest, src1, src2) do { \
805 tmp = (int8_t)src2; \
806 if (tmp >= (ssize_t)sizeof(src1) * 8) { \
813 } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
815 } else if (tmp < 0) { \
816 dest = src1 >> -tmp; \
818 dest = src1 << tmp; \
819 if ((dest >> tmp) != src1) { \
824 NEON_VOP_ENV(qshl_u8
, neon_u8
, 4)
825 NEON_VOP_ENV(qshl_u16
, neon_u16
, 2)
826 NEON_VOP_ENV(qshl_u32
, neon_u32
, 1)
829 uint64_t HELPER(neon_qshl_u64
)(CPUARMState
*env
, uint64_t val
, uint64_t shiftop
)
831 int8_t shift
= (int8_t)shiftop
;
837 } else if (shift
<= -64) {
839 } else if (shift
< 0) {
844 if ((val
>> shift
) != tmp
) {
852 #define NEON_FN(dest, src1, src2) do { \
854 tmp = (int8_t)src2; \
855 if (tmp >= (ssize_t)sizeof(src1) * 8) { \
858 dest = (uint32_t)(1 << (sizeof(src1) * 8 - 1)); \
865 } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
867 } else if (tmp < 0) { \
868 dest = src1 >> -tmp; \
870 dest = src1 << tmp; \
871 if ((dest >> tmp) != src1) { \
873 dest = (uint32_t)(1 << (sizeof(src1) * 8 - 1)); \
879 NEON_VOP_ENV(qshl_s8
, neon_s8
, 4)
880 NEON_VOP_ENV(qshl_s16
, neon_s16
, 2)
881 NEON_VOP_ENV(qshl_s32
, neon_s32
, 1)
884 uint64_t HELPER(neon_qshl_s64
)(CPUARMState
*env
, uint64_t valop
, uint64_t shiftop
)
886 int8_t shift
= (uint8_t)shiftop
;
891 val
= (val
>> 63) ^ ~SIGNBIT64
;
893 } else if (shift
<= -64) {
895 } else if (shift
< 0) {
900 if ((val
>> shift
) != tmp
) {
902 val
= (tmp
>> 63) ^ ~SIGNBIT64
;
908 #define NEON_FN(dest, src1, src2) do { \
909 if (src1 & (1 << (sizeof(src1) * 8 - 1))) { \
914 tmp = (int8_t)src2; \
915 if (tmp >= (ssize_t)sizeof(src1) * 8) { \
922 } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
924 } else if (tmp < 0) { \
925 dest = src1 >> -tmp; \
927 dest = src1 << tmp; \
928 if ((dest >> tmp) != src1) { \
934 NEON_VOP_ENV(qshlu_s8
, neon_u8
, 4)
935 NEON_VOP_ENV(qshlu_s16
, neon_u16
, 2)
938 uint32_t HELPER(neon_qshlu_s32
)(CPUARMState
*env
, uint32_t valop
, uint32_t shiftop
)
940 if ((int32_t)valop
< 0) {
944 return helper_neon_qshl_u32(env
, valop
, shiftop
);
947 uint64_t HELPER(neon_qshlu_s64
)(CPUARMState
*env
, uint64_t valop
, uint64_t shiftop
)
949 if ((int64_t)valop
< 0) {
953 return helper_neon_qshl_u64(env
, valop
, shiftop
);
956 #define NEON_FN(dest, src1, src2) do { \
958 tmp = (int8_t)src2; \
959 if (tmp >= (ssize_t)sizeof(src1) * 8) { \
966 } else if (tmp < -(ssize_t)sizeof(src1) * 8) { \
968 } else if (tmp == -(ssize_t)sizeof(src1) * 8) { \
969 dest = src1 >> (sizeof(src1) * 8 - 1); \
970 } else if (tmp < 0) { \
971 dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
973 dest = src1 << tmp; \
974 if ((dest >> tmp) != src1) { \
979 NEON_VOP_ENV(qrshl_u8
, neon_u8
, 4)
980 NEON_VOP_ENV(qrshl_u16
, neon_u16
, 2)
983 /* The addition of the rounding constant may overflow, so we use an
984 * intermediate 64 bit accumulator. */
985 uint32_t HELPER(neon_qrshl_u32
)(CPUARMState
*env
, uint32_t val
, uint32_t shiftop
)
988 int8_t shift
= (int8_t)shiftop
;
996 } else if (shift
< -32) {
998 } else if (shift
== -32) {
1000 } else if (shift
< 0) {
1001 uint64_t big_dest
= ((uint64_t)val
+ (1 << (-1 - shift
)));
1002 dest
= big_dest
>> -shift
;
1004 dest
= val
<< shift
;
1005 if ((dest
>> shift
) != val
) {
1013 /* Handling addition overflow with 64 bit input values is more
1014 * tricky than with 32 bit values. */
1015 uint64_t HELPER(neon_qrshl_u64
)(CPUARMState
*env
, uint64_t val
, uint64_t shiftop
)
1017 int8_t shift
= (int8_t)shiftop
;
1023 } else if (shift
< -64) {
1025 } else if (shift
== -64) {
1027 } else if (shift
< 0) {
1028 val
>>= (-shift
- 1);
1029 if (val
== UINT64_MAX
) {
1030 /* In this case, it means that the rounding constant is 1,
1031 * and the addition would overflow. Return the actual
1032 * result directly. */
1033 val
= 0x8000000000000000ULL
;
1041 if ((val
>> shift
) != tmp
) {
1049 #define NEON_FN(dest, src1, src2) do { \
1051 tmp = (int8_t)src2; \
1052 if (tmp >= (ssize_t)sizeof(src1) * 8) { \
1055 dest = (1 << (sizeof(src1) * 8 - 1)); \
1062 } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
1064 } else if (tmp < 0) { \
1065 dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
1067 dest = src1 << tmp; \
1068 if ((dest >> tmp) != src1) { \
1070 dest = (uint32_t)(1 << (sizeof(src1) * 8 - 1)); \
1076 NEON_VOP_ENV(qrshl_s8
, neon_s8
, 4)
1077 NEON_VOP_ENV(qrshl_s16
, neon_s16
, 2)
1080 /* The addition of the rounding constant may overflow, so we use an
1081 * intermediate 64 bit accumulator. */
1082 uint32_t HELPER(neon_qrshl_s32
)(CPUARMState
*env
, uint32_t valop
, uint32_t shiftop
)
1085 int32_t val
= (int32_t)valop
;
1086 int8_t shift
= (int8_t)shiftop
;
1090 dest
= (val
>> 31) ^ ~SIGNBIT
;
1094 } else if (shift
<= -32) {
1096 } else if (shift
< 0) {
1097 int64_t big_dest
= ((int64_t)val
+ (1 << (-1 - shift
)));
1098 dest
= big_dest
>> -shift
;
1100 dest
= val
<< shift
;
1101 if ((dest
>> shift
) != val
) {
1103 dest
= (val
>> 31) ^ ~SIGNBIT
;
1109 /* Handling addition overflow with 64 bit input values is more
1110 * tricky than with 32 bit values. */
1111 uint64_t HELPER(neon_qrshl_s64
)(CPUARMState
*env
, uint64_t valop
, uint64_t shiftop
)
1113 int8_t shift
= (uint8_t)shiftop
;
1114 int64_t val
= valop
;
1119 val
= (val
>> 63) ^ ~SIGNBIT64
;
1121 } else if (shift
<= -64) {
1123 } else if (shift
< 0) {
1124 val
>>= (-shift
- 1);
1125 if (val
== INT64_MAX
) {
1126 /* In this case, it means that the rounding constant is 1,
1127 * and the addition would overflow. Return the actual
1128 * result directly. */
1129 val
= 0x4000000000000000ULL
;
1137 if ((val
>> shift
) != tmp
) {
1139 val
= (tmp
>> 63) ^ ~SIGNBIT64
;
1145 uint32_t HELPER(neon_add_u8
)(uint32_t a
, uint32_t b
)
1148 mask
= (a
^ b
) & 0x80808080u
;
1151 return (a
+ b
) ^ mask
;
1154 uint32_t HELPER(neon_add_u16
)(uint32_t a
, uint32_t b
)
1157 mask
= (a
^ b
) & 0x80008000u
;
1160 return (a
+ b
) ^ mask
;
1163 #define NEON_FN(dest, src1, src2) dest = src1 + src2
1164 NEON_POP(padd_u8
, neon_u8
, 4)
1165 NEON_POP(padd_u16
, neon_u16
, 2)
1168 #define NEON_FN(dest, src1, src2) dest = src1 - src2
1169 NEON_VOP(sub_u8
, neon_u8
, 4)
1170 NEON_VOP(sub_u16
, neon_u16
, 2)
1173 #define NEON_FN(dest, src1, src2) dest = src1 * src2
1174 NEON_VOP(mul_u8
, neon_u8
, 4)
1175 NEON_VOP(mul_u16
, neon_u16
, 2)
1178 /* Polynomial multiplication is like integer multiplication except the
1179 partial products are XORed, not added. */
1180 uint32_t HELPER(neon_mul_p8
)(uint32_t op1
, uint32_t op2
)
1190 mask
|= (0xff << 8);
1191 if (op1
& (1 << 16))
1192 mask
|= (0xff << 16);
1193 if (op1
& (1 << 24))
1194 mask
|= (0xff << 24);
1195 result
^= op2
& mask
;
1196 op1
= (op1
>> 1) & 0x7f7f7f7f;
1197 op2
= (op2
<< 1) & 0xfefefefe;
1202 uint64_t HELPER(neon_mull_p8
)(uint32_t op1
, uint32_t op2
)
1204 uint64_t result
= 0;
1206 uint64_t op2ex
= op2
;
1207 op2ex
= (op2ex
& 0xff) |
1208 ((op2ex
& 0xff00) << 8) |
1209 ((op2ex
& 0xff0000) << 16) |
1210 ((op2ex
& 0xff000000) << 24);
1216 if (op1
& (1 << 8)) {
1217 mask
|= (0xffffU
<< 16);
1219 if (op1
& (1 << 16)) {
1220 mask
|= (0xffffULL
<< 32);
1222 if (op1
& (1 << 24)) {
1223 mask
|= (0xffffULL
<< 48);
1225 result
^= op2ex
& mask
;
1226 op1
= (op1
>> 1) & 0x7f7f7f7f;
1232 #define NEON_FN(dest, src1, src2) dest = (src1 & src2) ? -1 : 0
1233 NEON_VOP(tst_u8
, neon_u8
, 4)
1234 NEON_VOP(tst_u16
, neon_u16
, 2)
1235 NEON_VOP(tst_u32
, neon_u32
, 1)
1238 #define NEON_FN(dest, src1, src2) dest = (src1 == src2) ? -1 : 0
1239 NEON_VOP(ceq_u8
, neon_u8
, 4)
1240 NEON_VOP(ceq_u16
, neon_u16
, 2)
1241 NEON_VOP(ceq_u32
, neon_u32
, 1)
1244 #define NEON_FN(dest, src, dummy) dest = (src < 0) ? -src : src
1245 NEON_VOP1(abs_s8
, neon_s8
, 4)
1246 NEON_VOP1(abs_s16
, neon_s16
, 2)
1249 /* Count Leading Sign/Zero Bits. */
1250 static inline int do_clz8(uint8_t x
)
1258 static inline int do_clz16(uint16_t x
)
1261 for (n
= 16; x
; n
--)
1266 #define NEON_FN(dest, src, dummy) dest = do_clz8(src)
1267 NEON_VOP1(clz_u8
, neon_u8
, 4)
1270 #define NEON_FN(dest, src, dummy) dest = do_clz16(src)
1271 NEON_VOP1(clz_u16
, neon_u16
, 2)
1274 #define NEON_FN(dest, src, dummy) dest = do_clz8((src < 0) ? ~src : src) - 1
1275 NEON_VOP1(cls_s8
, neon_s8
, 4)
1278 #define NEON_FN(dest, src, dummy) dest = do_clz16((src < 0) ? ~src : src) - 1
1279 NEON_VOP1(cls_s16
, neon_s16
, 2)
1282 uint32_t HELPER(neon_cls_s32
)(uint32_t x
)
1287 for (count
= 32; x
; count
--)
1293 uint32_t HELPER(neon_cnt_u8
)(uint32_t x
)
1295 x
= (x
& 0x55555555) + ((x
>> 1) & 0x55555555);
1296 x
= (x
& 0x33333333) + ((x
>> 2) & 0x33333333);
1297 x
= (x
& 0x0f0f0f0f) + ((x
>> 4) & 0x0f0f0f0f);
1301 /* Reverse bits in each 8 bit word */
1302 uint32_t HELPER(neon_rbit_u8
)(uint32_t x
)
1304 x
= ((x
& 0xf0f0f0f0) >> 4)
1305 | ((x
& 0x0f0f0f0f) << 4);
1306 x
= ((x
& 0x88888888) >> 3)
1307 | ((x
& 0x44444444) >> 1)
1308 | ((x
& 0x22222222) << 1)
1309 | ((x
& 0x11111111) << 3);
1313 #define NEON_QDMULH16(dest, src1, src2, round) do { \
1314 uint32_t tmp = (int32_t)(int16_t) src1 * (int16_t) src2; \
1315 if ((tmp ^ (tmp << 1)) & SIGNBIT) { \
1317 tmp = (tmp >> 31) ^ ~SIGNBIT; \
1322 int32_t old = tmp; \
1324 if ((int32_t)tmp < old) { \
1326 tmp = SIGNBIT - 1; \
1331 #define NEON_FN(dest, src1, src2) NEON_QDMULH16(dest, src1, src2, 0)
1332 NEON_VOP_ENV(qdmulh_s16
, neon_s16
, 2)
1334 #define NEON_FN(dest, src1, src2) NEON_QDMULH16(dest, src1, src2, 1)
1335 NEON_VOP_ENV(qrdmulh_s16
, neon_s16
, 2)
1337 #undef NEON_QDMULH16
1339 #define NEON_QDMULH32(dest, src1, src2, round) do { \
1340 uint64_t tmp = (int64_t)(int32_t) src1 * (int32_t) src2; \
1341 if ((tmp ^ (tmp << 1)) & SIGNBIT64) { \
1343 tmp = (tmp >> 63) ^ ~SIGNBIT64; \
1348 int64_t old = tmp; \
1349 tmp += (int64_t)1 << 31; \
1350 if ((int64_t)tmp < old) { \
1352 tmp = SIGNBIT64 - 1; \
1357 #define NEON_FN(dest, src1, src2) NEON_QDMULH32(dest, src1, src2, 0)
1358 NEON_VOP_ENV(qdmulh_s32
, neon_s32
, 1)
1360 #define NEON_FN(dest, src1, src2) NEON_QDMULH32(dest, src1, src2, 1)
1361 NEON_VOP_ENV(qrdmulh_s32
, neon_s32
, 1)
1363 #undef NEON_QDMULH32
1365 uint32_t HELPER(neon_narrow_u8
)(uint64_t x
)
1367 return (x
& 0xffu
) | ((x
>> 8) & 0xff00u
) | ((x
>> 16) & 0xff0000u
)
1368 | ((x
>> 24) & 0xff000000u
);
1371 uint32_t HELPER(neon_narrow_u16
)(uint64_t x
)
1373 return (x
& 0xffffu
) | ((x
>> 16) & 0xffff0000u
);
1376 uint32_t HELPER(neon_narrow_high_u8
)(uint64_t x
)
1378 return ((x
>> 8) & 0xff) | ((x
>> 16) & 0xff00)
1379 | ((x
>> 24) & 0xff0000) | ((x
>> 32) & 0xff000000);
1382 uint32_t HELPER(neon_narrow_high_u16
)(uint64_t x
)
1384 return ((x
>> 16) & 0xffff) | ((x
>> 32) & 0xffff0000);
1387 uint32_t HELPER(neon_narrow_round_high_u8
)(uint64_t x
)
1389 x
&= 0xff80ff80ff80ff80ull
;
1390 x
+= 0x0080008000800080ull
;
1391 return ((x
>> 8) & 0xff) | ((x
>> 16) & 0xff00)
1392 | ((x
>> 24) & 0xff0000) | ((x
>> 32) & 0xff000000);
1395 uint32_t HELPER(neon_narrow_round_high_u16
)(uint64_t x
)
1397 x
&= 0xffff8000ffff8000ull
;
1398 x
+= 0x0000800000008000ull
;
1399 return ((x
>> 16) & 0xffff) | ((x
>> 32) & 0xffff0000);
1402 uint32_t HELPER(neon_unarrow_sat8
)(CPUARMState
*env
, uint64_t x
)
1418 res |= (uint32_t)d << (n / 2); \
1429 uint32_t HELPER(neon_narrow_sat_u8
)(CPUARMState
*env
, uint64_t x
)
1442 res |= (uint32_t)d << (n / 2);
1452 uint32_t HELPER(neon_narrow_sat_s8
)(CPUARMState
*env
, uint64_t x
)
1459 if (s != (int8_t)s) { \
1460 d = (s >> 15) ^ 0x7f; \
1465 res |= (uint32_t)d << (n / 2);
1475 uint32_t HELPER(neon_unarrow_sat16
)(CPUARMState
*env
, uint64_t x
)
1480 if (low
& 0x80000000) {
1483 } else if (low
> 0xffff) {
1488 if (high
& 0x80000000) {
1491 } else if (high
> 0xffff) {
1495 return low
| (high
<< 16);
1498 uint32_t HELPER(neon_narrow_sat_u16
)(CPUARMState
*env
, uint64_t x
)
1508 if (high
> 0xffff) {
1512 return low
| (high
<< 16);
1515 uint32_t HELPER(neon_narrow_sat_s16
)(CPUARMState
*env
, uint64_t x
)
1520 if (low
!= (int16_t)low
) {
1521 low
= (low
>> 31) ^ 0x7fff;
1525 if (high
!= (int16_t)high
) {
1526 high
= (high
>> 31) ^ 0x7fff;
1529 return (uint16_t)low
| (high
<< 16);
1532 uint32_t HELPER(neon_unarrow_sat32
)(CPUARMState
*env
, uint64_t x
)
1534 if (x
& 0x8000000000000000ull
) {
1538 if (x
> 0xffffffffu
) {
1545 uint32_t HELPER(neon_narrow_sat_u32
)(CPUARMState
*env
, uint64_t x
)
1547 if (x
> 0xffffffffu
) {
1554 uint32_t HELPER(neon_narrow_sat_s32
)(CPUARMState
*env
, uint64_t x
)
1556 if ((int64_t)x
!= (int32_t)x
) {
1558 return ((int64_t)x
>> 63) ^ 0x7fffffff;
1563 uint64_t HELPER(neon_widen_u8
)(uint32_t x
)
1568 tmp
= (uint8_t)(x
>> 8);
1570 tmp
= (uint8_t)(x
>> 16);
1572 tmp
= (uint8_t)(x
>> 24);
1577 uint64_t HELPER(neon_widen_s8
)(uint32_t x
)
1581 ret
= (uint16_t)(int8_t)x
;
1582 tmp
= (uint16_t)(int8_t)(x
>> 8);
1584 tmp
= (uint16_t)(int8_t)(x
>> 16);
1586 tmp
= (uint16_t)(int8_t)(x
>> 24);
1591 uint64_t HELPER(neon_widen_u16
)(uint32_t x
)
1593 uint64_t high
= (uint16_t)(x
>> 16);
1594 return ((uint16_t)x
) | (high
<< 32);
1597 uint64_t HELPER(neon_widen_s16
)(uint32_t x
)
1599 uint64_t high
= (int16_t)(x
>> 16);
1600 return ((uint32_t)(int16_t)x
) | (high
<< 32);
1603 uint64_t HELPER(neon_addl_u16
)(uint64_t a
, uint64_t b
)
1606 mask
= (a
^ b
) & 0x8000800080008000ull
;
1607 a
&= ~0x8000800080008000ull
;
1608 b
&= ~0x8000800080008000ull
;
1609 return (a
+ b
) ^ mask
;
1612 uint64_t HELPER(neon_addl_u32
)(uint64_t a
, uint64_t b
)
1615 mask
= (a
^ b
) & 0x8000000080000000ull
;
1616 a
&= ~0x8000000080000000ull
;
1617 b
&= ~0x8000000080000000ull
;
1618 return (a
+ b
) ^ mask
;
1621 uint64_t HELPER(neon_paddl_u16
)(uint64_t a
, uint64_t b
)
1626 tmp
= a
& 0x0000ffff0000ffffull
;
1627 tmp
+= (a
>> 16) & 0x0000ffff0000ffffull
;
1628 tmp2
= b
& 0xffff0000ffff0000ull
;
1629 tmp2
+= (b
<< 16) & 0xffff0000ffff0000ull
;
1630 return ( tmp
& 0xffff)
1631 | ((tmp
>> 16) & 0xffff0000ull
)
1632 | ((tmp2
<< 16) & 0xffff00000000ull
)
1633 | ( tmp2
& 0xffff000000000000ull
);
1636 uint64_t HELPER(neon_paddl_u32
)(uint64_t a
, uint64_t b
)
1638 uint32_t low
= a
+ (a
>> 32);
1639 uint32_t high
= b
+ (b
>> 32);
1640 return low
+ ((uint64_t)high
<< 32);
1643 uint64_t HELPER(neon_subl_u16
)(uint64_t a
, uint64_t b
)
1646 mask
= (a
^ ~b
) & 0x8000800080008000ull
;
1647 a
|= 0x8000800080008000ull
;
1648 b
&= ~0x8000800080008000ull
;
1649 return (a
- b
) ^ mask
;
1652 uint64_t HELPER(neon_subl_u32
)(uint64_t a
, uint64_t b
)
1655 mask
= (a
^ ~b
) & 0x8000000080000000ull
;
1656 a
|= 0x8000000080000000ull
;
1657 b
&= ~0x8000000080000000ull
;
1658 return (a
- b
) ^ mask
;
1661 uint64_t HELPER(neon_addl_saturate_s32
)(CPUARMState
*env
, uint64_t a
, uint64_t b
)
1669 if (((low
^ x
) & SIGNBIT
) && !((x
^ y
) & SIGNBIT
)) {
1671 low
= ((int32_t)x
>> 31) ^ ~SIGNBIT
;
1676 if (((high
^ x
) & SIGNBIT
) && !((x
^ y
) & SIGNBIT
)) {
1678 high
= ((int32_t)x
>> 31) ^ ~SIGNBIT
;
1680 return low
| ((uint64_t)high
<< 32);
1683 uint64_t HELPER(neon_addl_saturate_s64
)(CPUARMState
*env
, uint64_t a
, uint64_t b
)
1688 if (((result
^ a
) & SIGNBIT64
) && !((a
^ b
) & SIGNBIT64
)) {
1690 result
= ((int64_t)a
>> 63) ^ ~SIGNBIT64
;
1695 /* We have to do the arithmetic in a larger type than
1696 * the input type, because for example with a signed 32 bit
1697 * op the absolute difference can overflow a signed 32 bit value.
1699 #define DO_ABD(dest, x, y, intype, arithtype) do { \
1700 arithtype tmp_x = (intype)(x); \
1701 arithtype tmp_y = (intype)(y); \
1702 dest = ((tmp_x > tmp_y) ? tmp_x - tmp_y : tmp_y - tmp_x); \
1705 uint64_t HELPER(neon_abdl_u16
)(uint32_t a
, uint32_t b
)
1709 DO_ABD(result
, a
, b
, uint8_t, uint32_t);
1710 DO_ABD(tmp
, a
>> 8, b
>> 8, uint8_t, uint32_t);
1711 result
|= tmp
<< 16;
1712 DO_ABD(tmp
, a
>> 16, b
>> 16, uint8_t, uint32_t);
1713 result
|= tmp
<< 32;
1714 DO_ABD(tmp
, a
>> 24, b
>> 24, uint8_t, uint32_t);
1715 result
|= tmp
<< 48;
1719 uint64_t HELPER(neon_abdl_s16
)(uint32_t a
, uint32_t b
)
1723 DO_ABD(result
, a
, b
, int8_t, int32_t);
1724 DO_ABD(tmp
, a
>> 8, b
>> 8, int8_t, int32_t);
1725 result
|= tmp
<< 16;
1726 DO_ABD(tmp
, a
>> 16, b
>> 16, int8_t, int32_t);
1727 result
|= tmp
<< 32;
1728 DO_ABD(tmp
, a
>> 24, b
>> 24, int8_t, int32_t);
1729 result
|= tmp
<< 48;
1733 uint64_t HELPER(neon_abdl_u32
)(uint32_t a
, uint32_t b
)
1737 DO_ABD(result
, a
, b
, uint16_t, uint32_t);
1738 DO_ABD(tmp
, a
>> 16, b
>> 16, uint16_t, uint32_t);
1739 return result
| (tmp
<< 32);
1742 uint64_t HELPER(neon_abdl_s32
)(uint32_t a
, uint32_t b
)
1746 DO_ABD(result
, a
, b
, int16_t, int32_t);
1747 DO_ABD(tmp
, a
>> 16, b
>> 16, int16_t, int32_t);
1748 return result
| (tmp
<< 32);
1751 uint64_t HELPER(neon_abdl_u64
)(uint32_t a
, uint32_t b
)
1754 DO_ABD(result
, a
, b
, uint32_t, uint64_t);
1758 uint64_t HELPER(neon_abdl_s64
)(uint32_t a
, uint32_t b
)
1761 DO_ABD(result
, a
, b
, int32_t, int64_t);
1766 /* Widening multiply. Named type is the source type. */
1767 #define DO_MULL(dest, x, y, type1, type2) do { \
1770 dest = (type2)((type2)tmp_x * (type2)tmp_y); \
1773 uint64_t HELPER(neon_mull_u8
)(uint32_t a
, uint32_t b
)
1778 DO_MULL(result
, a
, b
, uint8_t, uint16_t);
1779 DO_MULL(tmp
, a
>> 8, b
>> 8, uint8_t, uint16_t);
1780 result
|= tmp
<< 16;
1781 DO_MULL(tmp
, a
>> 16, b
>> 16, uint8_t, uint16_t);
1782 result
|= tmp
<< 32;
1783 DO_MULL(tmp
, a
>> 24, b
>> 24, uint8_t, uint16_t);
1784 result
|= tmp
<< 48;
1788 uint64_t HELPER(neon_mull_s8
)(uint32_t a
, uint32_t b
)
1793 DO_MULL(result
, a
, b
, int8_t, uint16_t);
1794 DO_MULL(tmp
, a
>> 8, b
>> 8, int8_t, uint16_t);
1795 result
|= tmp
<< 16;
1796 DO_MULL(tmp
, a
>> 16, b
>> 16, int8_t, uint16_t);
1797 result
|= tmp
<< 32;
1798 DO_MULL(tmp
, a
>> 24, b
>> 24, int8_t, uint16_t);
1799 result
|= tmp
<< 48;
1803 uint64_t HELPER(neon_mull_u16
)(uint32_t a
, uint32_t b
)
1808 DO_MULL(result
, a
, b
, uint16_t, uint32_t);
1809 DO_MULL(tmp
, a
>> 16, b
>> 16, uint16_t, uint32_t);
1810 return result
| (tmp
<< 32);
1813 uint64_t HELPER(neon_mull_s16
)(uint32_t a
, uint32_t b
)
1818 DO_MULL(result
, a
, b
, int16_t, uint32_t);
1819 DO_MULL(tmp
, a
>> 16, b
>> 16, int16_t, uint32_t);
1820 return result
| (tmp
<< 32);
1823 uint64_t HELPER(neon_negl_u16
)(uint64_t x
)
1827 result
= (uint16_t)-x
;
1829 result
|= (uint64_t)tmp
<< 16;
1831 result
|= (uint64_t)tmp
<< 32;
1833 result
|= (uint64_t)tmp
<< 48;
1837 uint64_t HELPER(neon_negl_u32
)(uint64_t x
)
1840 uint32_t high
= -(x
>> 32);
1841 return low
| ((uint64_t)high
<< 32);
1844 /* Saturating sign manipulation. */
1845 /* ??? Make these use NEON_VOP1 */
1846 #define DO_QABS8(x) do { \
1847 if (x == (int8_t)0x80) { \
1850 } else if (x < 0) { \
1853 uint32_t HELPER(neon_qabs_s8
)(CPUARMState
*env
, uint32_t x
)
1856 NEON_UNPACK(neon_s8
, vec
, x
);
1861 NEON_PACK(neon_s8
, x
, vec
);
1866 #define DO_QNEG8(x) do { \
1867 if (x == (int8_t)0x80) { \
1873 uint32_t HELPER(neon_qneg_s8
)(CPUARMState
*env
, uint32_t x
)
1876 NEON_UNPACK(neon_s8
, vec
, x
);
1881 NEON_PACK(neon_s8
, x
, vec
);
1886 #define DO_QABS16(x) do { \
1887 if (x == (int16_t)0x8000) { \
1890 } else if (x < 0) { \
1893 uint32_t HELPER(neon_qabs_s16
)(CPUARMState
*env
, uint32_t x
)
1896 NEON_UNPACK(neon_s16
, vec
, x
);
1899 NEON_PACK(neon_s16
, x
, vec
);
1904 #define DO_QNEG16(x) do { \
1905 if (x == (int16_t)0x8000) { \
1911 uint32_t HELPER(neon_qneg_s16
)(CPUARMState
*env
, uint32_t x
)
1914 NEON_UNPACK(neon_s16
, vec
, x
);
1917 NEON_PACK(neon_s16
, x
, vec
);
1922 uint32_t HELPER(neon_qabs_s32
)(CPUARMState
*env
, uint32_t x
)
1927 } else if ((int32_t)x
< 0) {
1933 uint32_t HELPER(neon_qneg_s32
)(CPUARMState
*env
, uint32_t x
)
1944 uint64_t HELPER(neon_qabs_s64
)(CPUARMState
*env
, uint64_t x
)
1946 if (x
== SIGNBIT64
) {
1949 } else if ((int64_t)x
< 0) {
1955 uint64_t HELPER(neon_qneg_s64
)(CPUARMState
*env
, uint64_t x
)
1957 if (x
== SIGNBIT64
) {
1966 /* NEON Float helpers. */
1967 uint32_t HELPER(neon_abd_f32
)(uint32_t a
, uint32_t b
, void *fpstp
)
1969 float_status
*fpst
= fpstp
;
1970 float32 f0
= make_float32(a
);
1971 float32 f1
= make_float32(b
);
1972 return float32_val(float32_abs(float32_sub(f0
, f1
, fpst
)));
1975 /* Floating point comparisons produce an integer result.
1976 * Note that EQ doesn't signal InvalidOp for QNaNs but GE and GT do.
1977 * Softfloat routines return 0/1, which we convert to the 0/-1 Neon requires.
1979 uint32_t HELPER(neon_ceq_f32
)(uint32_t a
, uint32_t b
, void *fpstp
)
1981 float_status
*fpst
= fpstp
;
1982 return -float32_eq_quiet(make_float32(a
), make_float32(b
), fpst
);
1985 uint32_t HELPER(neon_cge_f32
)(uint32_t a
, uint32_t b
, void *fpstp
)
1987 float_status
*fpst
= fpstp
;
1988 return -float32_le(make_float32(b
), make_float32(a
), fpst
);
1991 uint32_t HELPER(neon_cgt_f32
)(uint32_t a
, uint32_t b
, void *fpstp
)
1993 float_status
*fpst
= fpstp
;
1994 return -float32_lt(make_float32(b
), make_float32(a
), fpst
);
1997 uint32_t HELPER(neon_acge_f32
)(uint32_t a
, uint32_t b
, void *fpstp
)
1999 float_status
*fpst
= fpstp
;
2000 float32 f0
= float32_abs(make_float32(a
));
2001 float32 f1
= float32_abs(make_float32(b
));
2002 return -float32_le(f1
, f0
, fpst
);
2005 uint32_t HELPER(neon_acgt_f32
)(uint32_t a
, uint32_t b
, void *fpstp
)
2007 float_status
*fpst
= fpstp
;
2008 float32 f0
= float32_abs(make_float32(a
));
2009 float32 f1
= float32_abs(make_float32(b
));
2010 return -float32_lt(f1
, f0
, fpst
);
2013 uint64_t HELPER(neon_acge_f64
)(uint64_t a
, uint64_t b
, void *fpstp
)
2015 float_status
*fpst
= fpstp
;
2016 float64 f0
= float64_abs(make_float64(a
));
2017 float64 f1
= float64_abs(make_float64(b
));
2018 return -float64_le(f1
, f0
, fpst
);
2021 uint64_t HELPER(neon_acgt_f64
)(uint64_t a
, uint64_t b
, void *fpstp
)
2023 float_status
*fpst
= fpstp
;
2024 float64 f0
= float64_abs(make_float64(a
));
2025 float64 f1
= float64_abs(make_float64(b
));
2026 return -float64_lt(f1
, f0
, fpst
);
2029 #define ELEM(V, N, SIZE) (((V) >> ((N) * (SIZE))) & ((1ull << (SIZE)) - 1))
2031 void HELPER(neon_qunzip8
)(CPUARMState
*env
, uint32_t rd
, uint32_t rm
)
2033 uint64_t zm0
= float64_val(env
->vfp
.regs
[rm
]);
2034 uint64_t zm1
= float64_val(env
->vfp
.regs
[rm
+ 1]);
2035 uint64_t zd0
= float64_val(env
->vfp
.regs
[rd
]);
2036 uint64_t zd1
= float64_val(env
->vfp
.regs
[rd
+ 1]);
2037 uint64_t d0
= ELEM(zd0
, 0, 8) | (ELEM(zd0
, 2, 8) << 8)
2038 | (ELEM(zd0
, 4, 8) << 16) | (ELEM(zd0
, 6, 8) << 24)
2039 | (ELEM(zd1
, 0, 8) << 32) | (ELEM(zd1
, 2, 8) << 40)
2040 | (ELEM(zd1
, 4, 8) << 48) | (ELEM(zd1
, 6, 8) << 56);
2041 uint64_t d1
= ELEM(zm0
, 0, 8) | (ELEM(zm0
, 2, 8) << 8)
2042 | (ELEM(zm0
, 4, 8) << 16) | (ELEM(zm0
, 6, 8) << 24)
2043 | (ELEM(zm1
, 0, 8) << 32) | (ELEM(zm1
, 2, 8) << 40)
2044 | (ELEM(zm1
, 4, 8) << 48) | (ELEM(zm1
, 6, 8) << 56);
2045 uint64_t m0
= ELEM(zd0
, 1, 8) | (ELEM(zd0
, 3, 8) << 8)
2046 | (ELEM(zd0
, 5, 8) << 16) | (ELEM(zd0
, 7, 8) << 24)
2047 | (ELEM(zd1
, 1, 8) << 32) | (ELEM(zd1
, 3, 8) << 40)
2048 | (ELEM(zd1
, 5, 8) << 48) | (ELEM(zd1
, 7, 8) << 56);
2049 uint64_t m1
= ELEM(zm0
, 1, 8) | (ELEM(zm0
, 3, 8) << 8)
2050 | (ELEM(zm0
, 5, 8) << 16) | (ELEM(zm0
, 7, 8) << 24)
2051 | (ELEM(zm1
, 1, 8) << 32) | (ELEM(zm1
, 3, 8) << 40)
2052 | (ELEM(zm1
, 5, 8) << 48) | (ELEM(zm1
, 7, 8) << 56);
2053 env
->vfp
.regs
[rm
] = make_float64(m0
);
2054 env
->vfp
.regs
[rm
+ 1] = make_float64(m1
);
2055 env
->vfp
.regs
[rd
] = make_float64(d0
);
2056 env
->vfp
.regs
[rd
+ 1] = make_float64(d1
);
2059 void HELPER(neon_qunzip16
)(CPUARMState
*env
, uint32_t rd
, uint32_t rm
)
2061 uint64_t zm0
= float64_val(env
->vfp
.regs
[rm
]);
2062 uint64_t zm1
= float64_val(env
->vfp
.regs
[rm
+ 1]);
2063 uint64_t zd0
= float64_val(env
->vfp
.regs
[rd
]);
2064 uint64_t zd1
= float64_val(env
->vfp
.regs
[rd
+ 1]);
2065 uint64_t d0
= ELEM(zd0
, 0, 16) | (ELEM(zd0
, 2, 16) << 16)
2066 | (ELEM(zd1
, 0, 16) << 32) | (ELEM(zd1
, 2, 16) << 48);
2067 uint64_t d1
= ELEM(zm0
, 0, 16) | (ELEM(zm0
, 2, 16) << 16)
2068 | (ELEM(zm1
, 0, 16) << 32) | (ELEM(zm1
, 2, 16) << 48);
2069 uint64_t m0
= ELEM(zd0
, 1, 16) | (ELEM(zd0
, 3, 16) << 16)
2070 | (ELEM(zd1
, 1, 16) << 32) | (ELEM(zd1
, 3, 16) << 48);
2071 uint64_t m1
= ELEM(zm0
, 1, 16) | (ELEM(zm0
, 3, 16) << 16)
2072 | (ELEM(zm1
, 1, 16) << 32) | (ELEM(zm1
, 3, 16) << 48);
2073 env
->vfp
.regs
[rm
] = make_float64(m0
);
2074 env
->vfp
.regs
[rm
+ 1] = make_float64(m1
);
2075 env
->vfp
.regs
[rd
] = make_float64(d0
);
2076 env
->vfp
.regs
[rd
+ 1] = make_float64(d1
);
2079 void HELPER(neon_qunzip32
)(CPUARMState
*env
, uint32_t rd
, uint32_t rm
)
2081 uint64_t zm0
= float64_val(env
->vfp
.regs
[rm
]);
2082 uint64_t zm1
= float64_val(env
->vfp
.regs
[rm
+ 1]);
2083 uint64_t zd0
= float64_val(env
->vfp
.regs
[rd
]);
2084 uint64_t zd1
= float64_val(env
->vfp
.regs
[rd
+ 1]);
2085 uint64_t d0
= ELEM(zd0
, 0, 32) | (ELEM(zd1
, 0, 32) << 32);
2086 uint64_t d1
= ELEM(zm0
, 0, 32) | (ELEM(zm1
, 0, 32) << 32);
2087 uint64_t m0
= ELEM(zd0
, 1, 32) | (ELEM(zd1
, 1, 32) << 32);
2088 uint64_t m1
= ELEM(zm0
, 1, 32) | (ELEM(zm1
, 1, 32) << 32);
2089 env
->vfp
.regs
[rm
] = make_float64(m0
);
2090 env
->vfp
.regs
[rm
+ 1] = make_float64(m1
);
2091 env
->vfp
.regs
[rd
] = make_float64(d0
);
2092 env
->vfp
.regs
[rd
+ 1] = make_float64(d1
);
2095 void HELPER(neon_unzip8
)(CPUARMState
*env
, uint32_t rd
, uint32_t rm
)
2097 uint64_t zm
= float64_val(env
->vfp
.regs
[rm
]);
2098 uint64_t zd
= float64_val(env
->vfp
.regs
[rd
]);
2099 uint64_t d0
= ELEM(zd
, 0, 8) | (ELEM(zd
, 2, 8) << 8)
2100 | (ELEM(zd
, 4, 8) << 16) | (ELEM(zd
, 6, 8) << 24)
2101 | (ELEM(zm
, 0, 8) << 32) | (ELEM(zm
, 2, 8) << 40)
2102 | (ELEM(zm
, 4, 8) << 48) | (ELEM(zm
, 6, 8) << 56);
2103 uint64_t m0
= ELEM(zd
, 1, 8) | (ELEM(zd
, 3, 8) << 8)
2104 | (ELEM(zd
, 5, 8) << 16) | (ELEM(zd
, 7, 8) << 24)
2105 | (ELEM(zm
, 1, 8) << 32) | (ELEM(zm
, 3, 8) << 40)
2106 | (ELEM(zm
, 5, 8) << 48) | (ELEM(zm
, 7, 8) << 56);
2107 env
->vfp
.regs
[rm
] = make_float64(m0
);
2108 env
->vfp
.regs
[rd
] = make_float64(d0
);
2111 void HELPER(neon_unzip16
)(CPUARMState
*env
, uint32_t rd
, uint32_t rm
)
2113 uint64_t zm
= float64_val(env
->vfp
.regs
[rm
]);
2114 uint64_t zd
= float64_val(env
->vfp
.regs
[rd
]);
2115 uint64_t d0
= ELEM(zd
, 0, 16) | (ELEM(zd
, 2, 16) << 16)
2116 | (ELEM(zm
, 0, 16) << 32) | (ELEM(zm
, 2, 16) << 48);
2117 uint64_t m0
= ELEM(zd
, 1, 16) | (ELEM(zd
, 3, 16) << 16)
2118 | (ELEM(zm
, 1, 16) << 32) | (ELEM(zm
, 3, 16) << 48);
2119 env
->vfp
.regs
[rm
] = make_float64(m0
);
2120 env
->vfp
.regs
[rd
] = make_float64(d0
);
2123 void HELPER(neon_qzip8
)(CPUARMState
*env
, uint32_t rd
, uint32_t rm
)
2125 uint64_t zm0
= float64_val(env
->vfp
.regs
[rm
]);
2126 uint64_t zm1
= float64_val(env
->vfp
.regs
[rm
+ 1]);
2127 uint64_t zd0
= float64_val(env
->vfp
.regs
[rd
]);
2128 uint64_t zd1
= float64_val(env
->vfp
.regs
[rd
+ 1]);
2129 uint64_t d0
= ELEM(zd0
, 0, 8) | (ELEM(zm0
, 0, 8) << 8)
2130 | (ELEM(zd0
, 1, 8) << 16) | (ELEM(zm0
, 1, 8) << 24)
2131 | (ELEM(zd0
, 2, 8) << 32) | (ELEM(zm0
, 2, 8) << 40)
2132 | (ELEM(zd0
, 3, 8) << 48) | (ELEM(zm0
, 3, 8) << 56);
2133 uint64_t d1
= ELEM(zd0
, 4, 8) | (ELEM(zm0
, 4, 8) << 8)
2134 | (ELEM(zd0
, 5, 8) << 16) | (ELEM(zm0
, 5, 8) << 24)
2135 | (ELEM(zd0
, 6, 8) << 32) | (ELEM(zm0
, 6, 8) << 40)
2136 | (ELEM(zd0
, 7, 8) << 48) | (ELEM(zm0
, 7, 8) << 56);
2137 uint64_t m0
= ELEM(zd1
, 0, 8) | (ELEM(zm1
, 0, 8) << 8)
2138 | (ELEM(zd1
, 1, 8) << 16) | (ELEM(zm1
, 1, 8) << 24)
2139 | (ELEM(zd1
, 2, 8) << 32) | (ELEM(zm1
, 2, 8) << 40)
2140 | (ELEM(zd1
, 3, 8) << 48) | (ELEM(zm1
, 3, 8) << 56);
2141 uint64_t m1
= ELEM(zd1
, 4, 8) | (ELEM(zm1
, 4, 8) << 8)
2142 | (ELEM(zd1
, 5, 8) << 16) | (ELEM(zm1
, 5, 8) << 24)
2143 | (ELEM(zd1
, 6, 8) << 32) | (ELEM(zm1
, 6, 8) << 40)
2144 | (ELEM(zd1
, 7, 8) << 48) | (ELEM(zm1
, 7, 8) << 56);
2145 env
->vfp
.regs
[rm
] = make_float64(m0
);
2146 env
->vfp
.regs
[rm
+ 1] = make_float64(m1
);
2147 env
->vfp
.regs
[rd
] = make_float64(d0
);
2148 env
->vfp
.regs
[rd
+ 1] = make_float64(d1
);
2151 void HELPER(neon_qzip16
)(CPUARMState
*env
, uint32_t rd
, uint32_t rm
)
2153 uint64_t zm0
= float64_val(env
->vfp
.regs
[rm
]);
2154 uint64_t zm1
= float64_val(env
->vfp
.regs
[rm
+ 1]);
2155 uint64_t zd0
= float64_val(env
->vfp
.regs
[rd
]);
2156 uint64_t zd1
= float64_val(env
->vfp
.regs
[rd
+ 1]);
2157 uint64_t d0
= ELEM(zd0
, 0, 16) | (ELEM(zm0
, 0, 16) << 16)
2158 | (ELEM(zd0
, 1, 16) << 32) | (ELEM(zm0
, 1, 16) << 48);
2159 uint64_t d1
= ELEM(zd0
, 2, 16) | (ELEM(zm0
, 2, 16) << 16)
2160 | (ELEM(zd0
, 3, 16) << 32) | (ELEM(zm0
, 3, 16) << 48);
2161 uint64_t m0
= ELEM(zd1
, 0, 16) | (ELEM(zm1
, 0, 16) << 16)
2162 | (ELEM(zd1
, 1, 16) << 32) | (ELEM(zm1
, 1, 16) << 48);
2163 uint64_t m1
= ELEM(zd1
, 2, 16) | (ELEM(zm1
, 2, 16) << 16)
2164 | (ELEM(zd1
, 3, 16) << 32) | (ELEM(zm1
, 3, 16) << 48);
2165 env
->vfp
.regs
[rm
] = make_float64(m0
);
2166 env
->vfp
.regs
[rm
+ 1] = make_float64(m1
);
2167 env
->vfp
.regs
[rd
] = make_float64(d0
);
2168 env
->vfp
.regs
[rd
+ 1] = make_float64(d1
);
2171 void HELPER(neon_qzip32
)(CPUARMState
*env
, uint32_t rd
, uint32_t rm
)
2173 uint64_t zm0
= float64_val(env
->vfp
.regs
[rm
]);
2174 uint64_t zm1
= float64_val(env
->vfp
.regs
[rm
+ 1]);
2175 uint64_t zd0
= float64_val(env
->vfp
.regs
[rd
]);
2176 uint64_t zd1
= float64_val(env
->vfp
.regs
[rd
+ 1]);
2177 uint64_t d0
= ELEM(zd0
, 0, 32) | (ELEM(zm0
, 0, 32) << 32);
2178 uint64_t d1
= ELEM(zd0
, 1, 32) | (ELEM(zm0
, 1, 32) << 32);
2179 uint64_t m0
= ELEM(zd1
, 0, 32) | (ELEM(zm1
, 0, 32) << 32);
2180 uint64_t m1
= ELEM(zd1
, 1, 32) | (ELEM(zm1
, 1, 32) << 32);
2181 env
->vfp
.regs
[rm
] = make_float64(m0
);
2182 env
->vfp
.regs
[rm
+ 1] = make_float64(m1
);
2183 env
->vfp
.regs
[rd
] = make_float64(d0
);
2184 env
->vfp
.regs
[rd
+ 1] = make_float64(d1
);
2187 void HELPER(neon_zip8
)(CPUARMState
*env
, uint32_t rd
, uint32_t rm
)
2189 uint64_t zm
= float64_val(env
->vfp
.regs
[rm
]);
2190 uint64_t zd
= float64_val(env
->vfp
.regs
[rd
]);
2191 uint64_t d0
= ELEM(zd
, 0, 8) | (ELEM(zm
, 0, 8) << 8)
2192 | (ELEM(zd
, 1, 8) << 16) | (ELEM(zm
, 1, 8) << 24)
2193 | (ELEM(zd
, 2, 8) << 32) | (ELEM(zm
, 2, 8) << 40)
2194 | (ELEM(zd
, 3, 8) << 48) | (ELEM(zm
, 3, 8) << 56);
2195 uint64_t m0
= ELEM(zd
, 4, 8) | (ELEM(zm
, 4, 8) << 8)
2196 | (ELEM(zd
, 5, 8) << 16) | (ELEM(zm
, 5, 8) << 24)
2197 | (ELEM(zd
, 6, 8) << 32) | (ELEM(zm
, 6, 8) << 40)
2198 | (ELEM(zd
, 7, 8) << 48) | (ELEM(zm
, 7, 8) << 56);
2199 env
->vfp
.regs
[rm
] = make_float64(m0
);
2200 env
->vfp
.regs
[rd
] = make_float64(d0
);
2203 void HELPER(neon_zip16
)(CPUARMState
*env
, uint32_t rd
, uint32_t rm
)
2205 uint64_t zm
= float64_val(env
->vfp
.regs
[rm
]);
2206 uint64_t zd
= float64_val(env
->vfp
.regs
[rd
]);
2207 uint64_t d0
= ELEM(zd
, 0, 16) | (ELEM(zm
, 0, 16) << 16)
2208 | (ELEM(zd
, 1, 16) << 32) | (ELEM(zm
, 1, 16) << 48);
2209 uint64_t m0
= ELEM(zd
, 2, 16) | (ELEM(zm
, 2, 16) << 16)
2210 | (ELEM(zd
, 3, 16) << 32) | (ELEM(zm
, 3, 16) << 48);
2211 env
->vfp
.regs
[rm
] = make_float64(m0
);
2212 env
->vfp
.regs
[rd
] = make_float64(d0
);
2215 /* Helper function for 64 bit polynomial multiply case:
2216 * perform PolynomialMult(op1, op2) and return either the top or
2217 * bottom half of the 128 bit result.
2219 uint64_t HELPER(neon_pmull_64_lo
)(uint64_t op1
, uint64_t op2
)
2224 for (bitnum
= 0; bitnum
< 64; bitnum
++) {
2225 if (op1
& (1ULL << bitnum
)) {
2226 res
^= op2
<< bitnum
;
2231 uint64_t HELPER(neon_pmull_64_hi
)(uint64_t op1
, uint64_t op2
)
2236 /* bit 0 of op1 can't influence the high 64 bits at all */
2237 for (bitnum
= 1; bitnum
< 64; bitnum
++) {
2238 if (op1
& (1ULL << bitnum
)) {
2239 res
^= op2
>> (64 - bitnum
);