2 * ARM NEON vector operations.
4 * Copyright (c) 2007, 2008 CodeSourcery.
5 * Written by Paul Brook
7 * This code is licensed under the GNU GPL v2.
9 #include "qemu/osdep.h"
12 #include "exec/helper-proto.h"
13 #include "fpu/softfloat.h"
14 #include "vec_internal.h"
16 #define SIGNBIT (uint32_t)0x80000000
17 #define SIGNBIT64 ((uint64_t)1 << 63)
19 #define SET_QC() env->vfp.qc[0] = 1
21 #define NEON_TYPE1(name, type) \
26 #ifdef HOST_WORDS_BIGENDIAN
27 #define NEON_TYPE2(name, type) \
33 #define NEON_TYPE4(name, type) \
42 #define NEON_TYPE2(name, type) \
48 #define NEON_TYPE4(name, type) \
58 NEON_TYPE4(s8
, int8_t)
59 NEON_TYPE4(u8
, uint8_t)
60 NEON_TYPE2(s16
, int16_t)
61 NEON_TYPE2(u16
, uint16_t)
62 NEON_TYPE1(s32
, int32_t)
63 NEON_TYPE1(u32
, uint32_t)
68 /* Copy from a uint32_t to a vector structure type. */
69 #define NEON_UNPACK(vtype, dest, val) do { \
78 /* Copy from a vector structure type to a uint32_t. */
79 #define NEON_PACK(vtype, dest, val) do { \
89 NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1);
91 NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1); \
92 NEON_FN(vdest.v2, vsrc1.v2, vsrc2.v2);
94 NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1); \
95 NEON_FN(vdest.v2, vsrc1.v2, vsrc2.v2); \
96 NEON_FN(vdest.v3, vsrc1.v3, vsrc2.v3); \
97 NEON_FN(vdest.v4, vsrc1.v4, vsrc2.v4);
99 #define NEON_VOP_BODY(vtype, n) \
105 NEON_UNPACK(vtype, vsrc1, arg1); \
106 NEON_UNPACK(vtype, vsrc2, arg2); \
108 NEON_PACK(vtype, res, vdest); \
112 #define NEON_VOP(name, vtype, n) \
113 uint32_t HELPER(glue(neon_,name))(uint32_t arg1, uint32_t arg2) \
114 NEON_VOP_BODY(vtype, n)
116 #define NEON_VOP_ENV(name, vtype, n) \
117 uint32_t HELPER(glue(neon_,name))(CPUARMState *env, uint32_t arg1, uint32_t arg2) \
118 NEON_VOP_BODY(vtype, n)
120 /* Pairwise operations. */
121 /* For 32-bit elements each segment only contains a single element, so
122 the elementwise and pairwise operations are the same. */
124 NEON_FN(vdest.v1, vsrc1.v1, vsrc1.v2); \
125 NEON_FN(vdest.v2, vsrc2.v1, vsrc2.v2);
127 NEON_FN(vdest.v1, vsrc1.v1, vsrc1.v2); \
128 NEON_FN(vdest.v2, vsrc1.v3, vsrc1.v4); \
129 NEON_FN(vdest.v3, vsrc2.v1, vsrc2.v2); \
130 NEON_FN(vdest.v4, vsrc2.v3, vsrc2.v4); \
132 #define NEON_POP(name, vtype, n) \
133 uint32_t HELPER(glue(neon_,name))(uint32_t arg1, uint32_t arg2) \
139 NEON_UNPACK(vtype, vsrc1, arg1); \
140 NEON_UNPACK(vtype, vsrc2, arg2); \
142 NEON_PACK(vtype, res, vdest); \
146 /* Unary operators. */
147 #define NEON_VOP1(name, vtype, n) \
148 uint32_t HELPER(glue(neon_,name))(uint32_t arg) \
152 NEON_UNPACK(vtype, vsrc1, arg); \
154 NEON_PACK(vtype, arg, vdest); \
159 #define NEON_USAT(dest, src1, src2, type) do { \
160 uint32_t tmp = (uint32_t)src1 + (uint32_t)src2; \
161 if (tmp != (type)tmp) { \
167 #define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint8_t)
168 NEON_VOP_ENV(qadd_u8
, neon_u8
, 4)
170 #define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint16_t)
171 NEON_VOP_ENV(qadd_u16
, neon_u16
, 2)
175 uint32_t HELPER(neon_qadd_u32
)(CPUARMState
*env
, uint32_t a
, uint32_t b
)
177 uint32_t res
= a
+ b
;
185 uint64_t HELPER(neon_qadd_u64
)(CPUARMState
*env
, uint64_t src1
, uint64_t src2
)
197 #define NEON_SSAT(dest, src1, src2, type) do { \
198 int32_t tmp = (uint32_t)src1 + (uint32_t)src2; \
199 if (tmp != (type)tmp) { \
202 tmp = (1 << (sizeof(type) * 8 - 1)) - 1; \
204 tmp = 1 << (sizeof(type) * 8 - 1); \
209 #define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int8_t)
210 NEON_VOP_ENV(qadd_s8
, neon_s8
, 4)
212 #define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int16_t)
213 NEON_VOP_ENV(qadd_s16
, neon_s16
, 2)
217 uint32_t HELPER(neon_qadd_s32
)(CPUARMState
*env
, uint32_t a
, uint32_t b
)
219 uint32_t res
= a
+ b
;
220 if (((res
^ a
) & SIGNBIT
) && !((a
^ b
) & SIGNBIT
)) {
222 res
= ~(((int32_t)a
>> 31) ^ SIGNBIT
);
227 uint64_t HELPER(neon_qadd_s64
)(CPUARMState
*env
, uint64_t src1
, uint64_t src2
)
232 if (((res
^ src1
) & SIGNBIT64
) && !((src1
^ src2
) & SIGNBIT64
)) {
234 res
= ((int64_t)src1
>> 63) ^ ~SIGNBIT64
;
239 /* Unsigned saturating accumulate of signed value
241 * Op1/Rn is treated as signed
242 * Op2/Rd is treated as unsigned
244 * Explicit casting is used to ensure the correct sign extension of
245 * inputs. The result is treated as a unsigned value and saturated as such.
247 * We use a macro for the 8/16 bit cases which expects signed integers of va,
248 * vb, and vr for interim calculation and an unsigned 32 bit result value r.
251 #define USATACC(bits, shift) \
253 va = sextract32(a, shift, bits); \
254 vb = extract32(b, shift, bits); \
256 if (vr > UINT##bits##_MAX) { \
258 vr = UINT##bits##_MAX; \
259 } else if (vr < 0) { \
263 r = deposit32(r, shift, bits, vr); \
266 uint32_t HELPER(neon_uqadd_s8
)(CPUARMState
*env
, uint32_t a
, uint32_t b
)
278 uint32_t HELPER(neon_uqadd_s16
)(CPUARMState
*env
, uint32_t a
, uint32_t b
)
290 uint32_t HELPER(neon_uqadd_s32
)(CPUARMState
*env
, uint32_t a
, uint32_t b
)
292 int64_t va
= (int32_t)a
;
293 int64_t vb
= (uint32_t)b
;
294 int64_t vr
= va
+ vb
;
295 if (vr
> UINT32_MAX
) {
305 uint64_t HELPER(neon_uqadd_s64
)(CPUARMState
*env
, uint64_t a
, uint64_t b
)
309 /* We only need to look at the pattern of SIGN bits to detect
312 if (~a
& b
& ~res
& SIGNBIT64
) {
315 } else if (a
& ~b
& res
& SIGNBIT64
) {
322 /* Signed saturating accumulate of unsigned value
324 * Op1/Rn is treated as unsigned
325 * Op2/Rd is treated as signed
327 * The result is treated as a signed value and saturated as such
329 * We use a macro for the 8/16 bit cases which expects signed integers of va,
330 * vb, and vr for interim calculation and an unsigned 32 bit result value r.
333 #define SSATACC(bits, shift) \
335 va = extract32(a, shift, bits); \
336 vb = sextract32(b, shift, bits); \
338 if (vr > INT##bits##_MAX) { \
340 vr = INT##bits##_MAX; \
341 } else if (vr < INT##bits##_MIN) { \
343 vr = INT##bits##_MIN; \
345 r = deposit32(r, shift, bits, vr); \
348 uint32_t HELPER(neon_sqadd_u8
)(CPUARMState
*env
, uint32_t a
, uint32_t b
)
360 uint32_t HELPER(neon_sqadd_u16
)(CPUARMState
*env
, uint32_t a
, uint32_t b
)
373 uint32_t HELPER(neon_sqadd_u32
)(CPUARMState
*env
, uint32_t a
, uint32_t b
)
376 int64_t op1
= (uint32_t)a
;
377 int64_t op2
= (int32_t)b
;
379 if (res
> INT32_MAX
) {
382 } else if (res
< INT32_MIN
) {
389 uint64_t HELPER(neon_sqadd_u64
)(CPUARMState
*env
, uint64_t a
, uint64_t b
)
393 /* We only need to look at the pattern of SIGN bits to detect an overflow */
396 | (a
& ~b
)) & SIGNBIT64
) {
404 #define NEON_USAT(dest, src1, src2, type) do { \
405 uint32_t tmp = (uint32_t)src1 - (uint32_t)src2; \
406 if (tmp != (type)tmp) { \
412 #define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint8_t)
413 NEON_VOP_ENV(qsub_u8
, neon_u8
, 4)
415 #define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint16_t)
416 NEON_VOP_ENV(qsub_u16
, neon_u16
, 2)
420 uint32_t HELPER(neon_qsub_u32
)(CPUARMState
*env
, uint32_t a
, uint32_t b
)
422 uint32_t res
= a
- b
;
430 uint64_t HELPER(neon_qsub_u64
)(CPUARMState
*env
, uint64_t src1
, uint64_t src2
)
443 #define NEON_SSAT(dest, src1, src2, type) do { \
444 int32_t tmp = (uint32_t)src1 - (uint32_t)src2; \
445 if (tmp != (type)tmp) { \
448 tmp = (1 << (sizeof(type) * 8 - 1)) - 1; \
450 tmp = 1 << (sizeof(type) * 8 - 1); \
455 #define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int8_t)
456 NEON_VOP_ENV(qsub_s8
, neon_s8
, 4)
458 #define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int16_t)
459 NEON_VOP_ENV(qsub_s16
, neon_s16
, 2)
463 uint32_t HELPER(neon_qsub_s32
)(CPUARMState
*env
, uint32_t a
, uint32_t b
)
465 uint32_t res
= a
- b
;
466 if (((res
^ a
) & SIGNBIT
) && ((a
^ b
) & SIGNBIT
)) {
468 res
= ~(((int32_t)a
>> 31) ^ SIGNBIT
);
473 uint64_t HELPER(neon_qsub_s64
)(CPUARMState
*env
, uint64_t src1
, uint64_t src2
)
478 if (((res
^ src1
) & SIGNBIT64
) && ((src1
^ src2
) & SIGNBIT64
)) {
480 res
= ((int64_t)src1
>> 63) ^ ~SIGNBIT64
;
485 #define NEON_FN(dest, src1, src2) dest = (src1 + src2) >> 1
486 NEON_VOP(hadd_s8
, neon_s8
, 4)
487 NEON_VOP(hadd_u8
, neon_u8
, 4)
488 NEON_VOP(hadd_s16
, neon_s16
, 2)
489 NEON_VOP(hadd_u16
, neon_u16
, 2)
492 int32_t HELPER(neon_hadd_s32
)(int32_t src1
, int32_t src2
)
496 dest
= (src1
>> 1) + (src2
>> 1);
502 uint32_t HELPER(neon_hadd_u32
)(uint32_t src1
, uint32_t src2
)
506 dest
= (src1
>> 1) + (src2
>> 1);
512 #define NEON_FN(dest, src1, src2) dest = (src1 + src2 + 1) >> 1
513 NEON_VOP(rhadd_s8
, neon_s8
, 4)
514 NEON_VOP(rhadd_u8
, neon_u8
, 4)
515 NEON_VOP(rhadd_s16
, neon_s16
, 2)
516 NEON_VOP(rhadd_u16
, neon_u16
, 2)
519 int32_t HELPER(neon_rhadd_s32
)(int32_t src1
, int32_t src2
)
523 dest
= (src1
>> 1) + (src2
>> 1);
524 if ((src1
| src2
) & 1)
529 uint32_t HELPER(neon_rhadd_u32
)(uint32_t src1
, uint32_t src2
)
533 dest
= (src1
>> 1) + (src2
>> 1);
534 if ((src1
| src2
) & 1)
539 #define NEON_FN(dest, src1, src2) dest = (src1 - src2) >> 1
540 NEON_VOP(hsub_s8
, neon_s8
, 4)
541 NEON_VOP(hsub_u8
, neon_u8
, 4)
542 NEON_VOP(hsub_s16
, neon_s16
, 2)
543 NEON_VOP(hsub_u16
, neon_u16
, 2)
546 int32_t HELPER(neon_hsub_s32
)(int32_t src1
, int32_t src2
)
550 dest
= (src1
>> 1) - (src2
>> 1);
551 if ((~src1
) & src2
& 1)
556 uint32_t HELPER(neon_hsub_u32
)(uint32_t src1
, uint32_t src2
)
560 dest
= (src1
>> 1) - (src2
>> 1);
561 if ((~src1
) & src2
& 1)
566 #define NEON_FN(dest, src1, src2) dest = (src1 < src2) ? src1 : src2
567 NEON_POP(pmin_s8
, neon_s8
, 4)
568 NEON_POP(pmin_u8
, neon_u8
, 4)
569 NEON_POP(pmin_s16
, neon_s16
, 2)
570 NEON_POP(pmin_u16
, neon_u16
, 2)
573 #define NEON_FN(dest, src1, src2) dest = (src1 > src2) ? src1 : src2
574 NEON_POP(pmax_s8
, neon_s8
, 4)
575 NEON_POP(pmax_u8
, neon_u8
, 4)
576 NEON_POP(pmax_s16
, neon_s16
, 2)
577 NEON_POP(pmax_u16
, neon_u16
, 2)
580 #define NEON_FN(dest, src1, src2) \
581 (dest = do_uqrshl_bhs(src1, (int8_t)src2, 16, false, NULL))
582 NEON_VOP(shl_u16
, neon_u16
, 2)
585 #define NEON_FN(dest, src1, src2) \
586 (dest = do_sqrshl_bhs(src1, (int8_t)src2, 16, false, NULL))
587 NEON_VOP(shl_s16
, neon_s16
, 2)
590 #define NEON_FN(dest, src1, src2) \
591 (dest = do_sqrshl_bhs(src1, (int8_t)src2, 8, true, NULL))
592 NEON_VOP(rshl_s8
, neon_s8
, 4)
595 #define NEON_FN(dest, src1, src2) \
596 (dest = do_sqrshl_bhs(src1, (int8_t)src2, 16, true, NULL))
597 NEON_VOP(rshl_s16
, neon_s16
, 2)
600 uint32_t HELPER(neon_rshl_s32
)(uint32_t val
, uint32_t shift
)
602 return do_sqrshl_bhs(val
, (int8_t)shift
, 32, true, NULL
);
605 uint64_t HELPER(neon_rshl_s64
)(uint64_t val
, uint64_t shift
)
607 return do_sqrshl_d(val
, (int8_t)shift
, true, NULL
);
610 #define NEON_FN(dest, src1, src2) \
611 (dest = do_uqrshl_bhs(src1, (int8_t)src2, 8, true, NULL))
612 NEON_VOP(rshl_u8
, neon_u8
, 4)
615 #define NEON_FN(dest, src1, src2) \
616 (dest = do_uqrshl_bhs(src1, (int8_t)src2, 16, true, NULL))
617 NEON_VOP(rshl_u16
, neon_u16
, 2)
620 uint32_t HELPER(neon_rshl_u32
)(uint32_t val
, uint32_t shift
)
622 return do_uqrshl_bhs(val
, (int8_t)shift
, 32, true, NULL
);
625 uint64_t HELPER(neon_rshl_u64
)(uint64_t val
, uint64_t shift
)
627 return do_uqrshl_d(val
, (int8_t)shift
, true, NULL
);
630 #define NEON_FN(dest, src1, src2) \
631 (dest = do_uqrshl_bhs(src1, (int8_t)src2, 8, false, env->vfp.qc))
632 NEON_VOP_ENV(qshl_u8
, neon_u8
, 4)
635 #define NEON_FN(dest, src1, src2) \
636 (dest = do_uqrshl_bhs(src1, (int8_t)src2, 16, false, env->vfp.qc))
637 NEON_VOP_ENV(qshl_u16
, neon_u16
, 2)
640 uint32_t HELPER(neon_qshl_u32
)(CPUARMState
*env
, uint32_t val
, uint32_t shift
)
642 return do_uqrshl_bhs(val
, (int8_t)shift
, 32, false, env
->vfp
.qc
);
645 uint64_t HELPER(neon_qshl_u64
)(CPUARMState
*env
, uint64_t val
, uint64_t shift
)
647 return do_uqrshl_d(val
, (int8_t)shift
, false, env
->vfp
.qc
);
650 #define NEON_FN(dest, src1, src2) \
651 (dest = do_sqrshl_bhs(src1, (int8_t)src2, 8, false, env->vfp.qc))
652 NEON_VOP_ENV(qshl_s8
, neon_s8
, 4)
655 #define NEON_FN(dest, src1, src2) \
656 (dest = do_sqrshl_bhs(src1, (int8_t)src2, 16, false, env->vfp.qc))
657 NEON_VOP_ENV(qshl_s16
, neon_s16
, 2)
660 uint32_t HELPER(neon_qshl_s32
)(CPUARMState
*env
, uint32_t val
, uint32_t shift
)
662 return do_sqrshl_bhs(val
, (int8_t)shift
, 32, false, env
->vfp
.qc
);
665 uint64_t HELPER(neon_qshl_s64
)(CPUARMState
*env
, uint64_t val
, uint64_t shift
)
667 return do_sqrshl_d(val
, (int8_t)shift
, false, env
->vfp
.qc
);
670 #define NEON_FN(dest, src1, src2) \
671 (dest = do_suqrshl_bhs(src1, (int8_t)src2, 8, false, env->vfp.qc))
672 NEON_VOP_ENV(qshlu_s8
, neon_s8
, 4)
675 #define NEON_FN(dest, src1, src2) \
676 (dest = do_suqrshl_bhs(src1, (int8_t)src2, 16, false, env->vfp.qc))
677 NEON_VOP_ENV(qshlu_s16
, neon_s16
, 2)
680 uint32_t HELPER(neon_qshlu_s32
)(CPUARMState
*env
, uint32_t val
, uint32_t shift
)
682 return do_suqrshl_bhs(val
, (int8_t)shift
, 32, false, env
->vfp
.qc
);
685 uint64_t HELPER(neon_qshlu_s64
)(CPUARMState
*env
, uint64_t val
, uint64_t shift
)
687 return do_suqrshl_d(val
, (int8_t)shift
, false, env
->vfp
.qc
);
690 #define NEON_FN(dest, src1, src2) \
691 (dest = do_uqrshl_bhs(src1, (int8_t)src2, 8, true, env->vfp.qc))
692 NEON_VOP_ENV(qrshl_u8
, neon_u8
, 4)
695 #define NEON_FN(dest, src1, src2) \
696 (dest = do_uqrshl_bhs(src1, (int8_t)src2, 16, true, env->vfp.qc))
697 NEON_VOP_ENV(qrshl_u16
, neon_u16
, 2)
700 uint32_t HELPER(neon_qrshl_u32
)(CPUARMState
*env
, uint32_t val
, uint32_t shift
)
702 return do_uqrshl_bhs(val
, (int8_t)shift
, 32, true, env
->vfp
.qc
);
705 uint64_t HELPER(neon_qrshl_u64
)(CPUARMState
*env
, uint64_t val
, uint64_t shift
)
707 return do_uqrshl_d(val
, (int8_t)shift
, true, env
->vfp
.qc
);
710 #define NEON_FN(dest, src1, src2) \
711 (dest = do_sqrshl_bhs(src1, (int8_t)src2, 8, true, env->vfp.qc))
712 NEON_VOP_ENV(qrshl_s8
, neon_s8
, 4)
715 #define NEON_FN(dest, src1, src2) \
716 (dest = do_sqrshl_bhs(src1, (int8_t)src2, 16, true, env->vfp.qc))
717 NEON_VOP_ENV(qrshl_s16
, neon_s16
, 2)
720 uint32_t HELPER(neon_qrshl_s32
)(CPUARMState
*env
, uint32_t val
, uint32_t shift
)
722 return do_sqrshl_bhs(val
, (int8_t)shift
, 32, true, env
->vfp
.qc
);
725 uint64_t HELPER(neon_qrshl_s64
)(CPUARMState
*env
, uint64_t val
, uint64_t shift
)
727 return do_sqrshl_d(val
, (int8_t)shift
, true, env
->vfp
.qc
);
730 uint32_t HELPER(neon_add_u8
)(uint32_t a
, uint32_t b
)
733 mask
= (a
^ b
) & 0x80808080u
;
736 return (a
+ b
) ^ mask
;
739 uint32_t HELPER(neon_add_u16
)(uint32_t a
, uint32_t b
)
742 mask
= (a
^ b
) & 0x80008000u
;
745 return (a
+ b
) ^ mask
;
748 #define NEON_FN(dest, src1, src2) dest = src1 + src2
749 NEON_POP(padd_u8
, neon_u8
, 4)
750 NEON_POP(padd_u16
, neon_u16
, 2)
753 #define NEON_FN(dest, src1, src2) dest = src1 - src2
754 NEON_VOP(sub_u8
, neon_u8
, 4)
755 NEON_VOP(sub_u16
, neon_u16
, 2)
758 #define NEON_FN(dest, src1, src2) dest = src1 * src2
759 NEON_VOP(mul_u8
, neon_u8
, 4)
760 NEON_VOP(mul_u16
, neon_u16
, 2)
763 #define NEON_FN(dest, src1, src2) dest = (src1 & src2) ? -1 : 0
764 NEON_VOP(tst_u8
, neon_u8
, 4)
765 NEON_VOP(tst_u16
, neon_u16
, 2)
766 NEON_VOP(tst_u32
, neon_u32
, 1)
769 /* Count Leading Sign/Zero Bits. */
770 static inline int do_clz8(uint8_t x
)
778 static inline int do_clz16(uint16_t x
)
786 #define NEON_FN(dest, src, dummy) dest = do_clz8(src)
787 NEON_VOP1(clz_u8
, neon_u8
, 4)
790 #define NEON_FN(dest, src, dummy) dest = do_clz16(src)
791 NEON_VOP1(clz_u16
, neon_u16
, 2)
794 #define NEON_FN(dest, src, dummy) dest = do_clz8((src < 0) ? ~src : src) - 1
795 NEON_VOP1(cls_s8
, neon_s8
, 4)
798 #define NEON_FN(dest, src, dummy) dest = do_clz16((src < 0) ? ~src : src) - 1
799 NEON_VOP1(cls_s16
, neon_s16
, 2)
802 uint32_t HELPER(neon_cls_s32
)(uint32_t x
)
807 for (count
= 32; x
; count
--)
813 uint32_t HELPER(neon_cnt_u8
)(uint32_t x
)
815 x
= (x
& 0x55555555) + ((x
>> 1) & 0x55555555);
816 x
= (x
& 0x33333333) + ((x
>> 2) & 0x33333333);
817 x
= (x
& 0x0f0f0f0f) + ((x
>> 4) & 0x0f0f0f0f);
821 /* Reverse bits in each 8 bit word */
822 uint32_t HELPER(neon_rbit_u8
)(uint32_t x
)
824 x
= ((x
& 0xf0f0f0f0) >> 4)
825 | ((x
& 0x0f0f0f0f) << 4);
826 x
= ((x
& 0x88888888) >> 3)
827 | ((x
& 0x44444444) >> 1)
828 | ((x
& 0x22222222) << 1)
829 | ((x
& 0x11111111) << 3);
833 #define NEON_QDMULH16(dest, src1, src2, round) do { \
834 uint32_t tmp = (int32_t)(int16_t) src1 * (int16_t) src2; \
835 if ((tmp ^ (tmp << 1)) & SIGNBIT) { \
837 tmp = (tmp >> 31) ^ ~SIGNBIT; \
844 if ((int32_t)tmp < old) { \
851 #define NEON_FN(dest, src1, src2) NEON_QDMULH16(dest, src1, src2, 0)
852 NEON_VOP_ENV(qdmulh_s16
, neon_s16
, 2)
854 #define NEON_FN(dest, src1, src2) NEON_QDMULH16(dest, src1, src2, 1)
855 NEON_VOP_ENV(qrdmulh_s16
, neon_s16
, 2)
859 #define NEON_QDMULH32(dest, src1, src2, round) do { \
860 uint64_t tmp = (int64_t)(int32_t) src1 * (int32_t) src2; \
861 if ((tmp ^ (tmp << 1)) & SIGNBIT64) { \
863 tmp = (tmp >> 63) ^ ~SIGNBIT64; \
869 tmp += (int64_t)1 << 31; \
870 if ((int64_t)tmp < old) { \
872 tmp = SIGNBIT64 - 1; \
877 #define NEON_FN(dest, src1, src2) NEON_QDMULH32(dest, src1, src2, 0)
878 NEON_VOP_ENV(qdmulh_s32
, neon_s32
, 1)
880 #define NEON_FN(dest, src1, src2) NEON_QDMULH32(dest, src1, src2, 1)
881 NEON_VOP_ENV(qrdmulh_s32
, neon_s32
, 1)
885 uint32_t HELPER(neon_narrow_u8
)(uint64_t x
)
887 return (x
& 0xffu
) | ((x
>> 8) & 0xff00u
) | ((x
>> 16) & 0xff0000u
)
888 | ((x
>> 24) & 0xff000000u
);
891 uint32_t HELPER(neon_narrow_u16
)(uint64_t x
)
893 return (x
& 0xffffu
) | ((x
>> 16) & 0xffff0000u
);
896 uint32_t HELPER(neon_narrow_high_u8
)(uint64_t x
)
898 return ((x
>> 8) & 0xff) | ((x
>> 16) & 0xff00)
899 | ((x
>> 24) & 0xff0000) | ((x
>> 32) & 0xff000000);
902 uint32_t HELPER(neon_narrow_high_u16
)(uint64_t x
)
904 return ((x
>> 16) & 0xffff) | ((x
>> 32) & 0xffff0000);
907 uint32_t HELPER(neon_narrow_round_high_u8
)(uint64_t x
)
909 x
&= 0xff80ff80ff80ff80ull
;
910 x
+= 0x0080008000800080ull
;
911 return ((x
>> 8) & 0xff) | ((x
>> 16) & 0xff00)
912 | ((x
>> 24) & 0xff0000) | ((x
>> 32) & 0xff000000);
915 uint32_t HELPER(neon_narrow_round_high_u16
)(uint64_t x
)
917 x
&= 0xffff8000ffff8000ull
;
918 x
+= 0x0000800000008000ull
;
919 return ((x
>> 16) & 0xffff) | ((x
>> 32) & 0xffff0000);
922 uint32_t HELPER(neon_unarrow_sat8
)(CPUARMState
*env
, uint64_t x
)
938 res |= (uint32_t)d << (n / 2); \
949 uint32_t HELPER(neon_narrow_sat_u8
)(CPUARMState
*env
, uint64_t x
)
962 res |= (uint32_t)d << (n / 2);
972 uint32_t HELPER(neon_narrow_sat_s8
)(CPUARMState
*env
, uint64_t x
)
979 if (s != (int8_t)s) { \
980 d = (s >> 15) ^ 0x7f; \
985 res |= (uint32_t)d << (n / 2);
995 uint32_t HELPER(neon_unarrow_sat16
)(CPUARMState
*env
, uint64_t x
)
1000 if (low
& 0x80000000) {
1003 } else if (low
> 0xffff) {
1008 if (high
& 0x80000000) {
1011 } else if (high
> 0xffff) {
1015 return low
| (high
<< 16);
1018 uint32_t HELPER(neon_narrow_sat_u16
)(CPUARMState
*env
, uint64_t x
)
1028 if (high
> 0xffff) {
1032 return low
| (high
<< 16);
1035 uint32_t HELPER(neon_narrow_sat_s16
)(CPUARMState
*env
, uint64_t x
)
1040 if (low
!= (int16_t)low
) {
1041 low
= (low
>> 31) ^ 0x7fff;
1045 if (high
!= (int16_t)high
) {
1046 high
= (high
>> 31) ^ 0x7fff;
1049 return (uint16_t)low
| (high
<< 16);
1052 uint32_t HELPER(neon_unarrow_sat32
)(CPUARMState
*env
, uint64_t x
)
1054 if (x
& 0x8000000000000000ull
) {
1058 if (x
> 0xffffffffu
) {
1065 uint32_t HELPER(neon_narrow_sat_u32
)(CPUARMState
*env
, uint64_t x
)
1067 if (x
> 0xffffffffu
) {
1074 uint32_t HELPER(neon_narrow_sat_s32
)(CPUARMState
*env
, uint64_t x
)
1076 if ((int64_t)x
!= (int32_t)x
) {
1078 return ((int64_t)x
>> 63) ^ 0x7fffffff;
1083 uint64_t HELPER(neon_widen_u8
)(uint32_t x
)
1088 tmp
= (uint8_t)(x
>> 8);
1090 tmp
= (uint8_t)(x
>> 16);
1092 tmp
= (uint8_t)(x
>> 24);
1097 uint64_t HELPER(neon_widen_s8
)(uint32_t x
)
1101 ret
= (uint16_t)(int8_t)x
;
1102 tmp
= (uint16_t)(int8_t)(x
>> 8);
1104 tmp
= (uint16_t)(int8_t)(x
>> 16);
1106 tmp
= (uint16_t)(int8_t)(x
>> 24);
1111 uint64_t HELPER(neon_widen_u16
)(uint32_t x
)
1113 uint64_t high
= (uint16_t)(x
>> 16);
1114 return ((uint16_t)x
) | (high
<< 32);
1117 uint64_t HELPER(neon_widen_s16
)(uint32_t x
)
1119 uint64_t high
= (int16_t)(x
>> 16);
1120 return ((uint32_t)(int16_t)x
) | (high
<< 32);
1123 uint64_t HELPER(neon_addl_u16
)(uint64_t a
, uint64_t b
)
1126 mask
= (a
^ b
) & 0x8000800080008000ull
;
1127 a
&= ~0x8000800080008000ull
;
1128 b
&= ~0x8000800080008000ull
;
1129 return (a
+ b
) ^ mask
;
1132 uint64_t HELPER(neon_addl_u32
)(uint64_t a
, uint64_t b
)
1135 mask
= (a
^ b
) & 0x8000000080000000ull
;
1136 a
&= ~0x8000000080000000ull
;
1137 b
&= ~0x8000000080000000ull
;
1138 return (a
+ b
) ^ mask
;
1141 uint64_t HELPER(neon_paddl_u16
)(uint64_t a
, uint64_t b
)
1146 tmp
= a
& 0x0000ffff0000ffffull
;
1147 tmp
+= (a
>> 16) & 0x0000ffff0000ffffull
;
1148 tmp2
= b
& 0xffff0000ffff0000ull
;
1149 tmp2
+= (b
<< 16) & 0xffff0000ffff0000ull
;
1150 return ( tmp
& 0xffff)
1151 | ((tmp
>> 16) & 0xffff0000ull
)
1152 | ((tmp2
<< 16) & 0xffff00000000ull
)
1153 | ( tmp2
& 0xffff000000000000ull
);
1156 uint64_t HELPER(neon_paddl_u32
)(uint64_t a
, uint64_t b
)
1158 uint32_t low
= a
+ (a
>> 32);
1159 uint32_t high
= b
+ (b
>> 32);
1160 return low
+ ((uint64_t)high
<< 32);
1163 uint64_t HELPER(neon_subl_u16
)(uint64_t a
, uint64_t b
)
1166 mask
= (a
^ ~b
) & 0x8000800080008000ull
;
1167 a
|= 0x8000800080008000ull
;
1168 b
&= ~0x8000800080008000ull
;
1169 return (a
- b
) ^ mask
;
1172 uint64_t HELPER(neon_subl_u32
)(uint64_t a
, uint64_t b
)
1175 mask
= (a
^ ~b
) & 0x8000000080000000ull
;
1176 a
|= 0x8000000080000000ull
;
1177 b
&= ~0x8000000080000000ull
;
1178 return (a
- b
) ^ mask
;
1181 uint64_t HELPER(neon_addl_saturate_s32
)(CPUARMState
*env
, uint64_t a
, uint64_t b
)
1189 if (((low
^ x
) & SIGNBIT
) && !((x
^ y
) & SIGNBIT
)) {
1191 low
= ((int32_t)x
>> 31) ^ ~SIGNBIT
;
1196 if (((high
^ x
) & SIGNBIT
) && !((x
^ y
) & SIGNBIT
)) {
1198 high
= ((int32_t)x
>> 31) ^ ~SIGNBIT
;
1200 return low
| ((uint64_t)high
<< 32);
1203 uint64_t HELPER(neon_addl_saturate_s64
)(CPUARMState
*env
, uint64_t a
, uint64_t b
)
1208 if (((result
^ a
) & SIGNBIT64
) && !((a
^ b
) & SIGNBIT64
)) {
1210 result
= ((int64_t)a
>> 63) ^ ~SIGNBIT64
;
1215 /* We have to do the arithmetic in a larger type than
1216 * the input type, because for example with a signed 32 bit
1217 * op the absolute difference can overflow a signed 32 bit value.
1219 #define DO_ABD(dest, x, y, intype, arithtype) do { \
1220 arithtype tmp_x = (intype)(x); \
1221 arithtype tmp_y = (intype)(y); \
1222 dest = ((tmp_x > tmp_y) ? tmp_x - tmp_y : tmp_y - tmp_x); \
1225 uint64_t HELPER(neon_abdl_u16
)(uint32_t a
, uint32_t b
)
1229 DO_ABD(result
, a
, b
, uint8_t, uint32_t);
1230 DO_ABD(tmp
, a
>> 8, b
>> 8, uint8_t, uint32_t);
1231 result
|= tmp
<< 16;
1232 DO_ABD(tmp
, a
>> 16, b
>> 16, uint8_t, uint32_t);
1233 result
|= tmp
<< 32;
1234 DO_ABD(tmp
, a
>> 24, b
>> 24, uint8_t, uint32_t);
1235 result
|= tmp
<< 48;
1239 uint64_t HELPER(neon_abdl_s16
)(uint32_t a
, uint32_t b
)
1243 DO_ABD(result
, a
, b
, int8_t, int32_t);
1244 DO_ABD(tmp
, a
>> 8, b
>> 8, int8_t, int32_t);
1245 result
|= tmp
<< 16;
1246 DO_ABD(tmp
, a
>> 16, b
>> 16, int8_t, int32_t);
1247 result
|= tmp
<< 32;
1248 DO_ABD(tmp
, a
>> 24, b
>> 24, int8_t, int32_t);
1249 result
|= tmp
<< 48;
1253 uint64_t HELPER(neon_abdl_u32
)(uint32_t a
, uint32_t b
)
1257 DO_ABD(result
, a
, b
, uint16_t, uint32_t);
1258 DO_ABD(tmp
, a
>> 16, b
>> 16, uint16_t, uint32_t);
1259 return result
| (tmp
<< 32);
1262 uint64_t HELPER(neon_abdl_s32
)(uint32_t a
, uint32_t b
)
1266 DO_ABD(result
, a
, b
, int16_t, int32_t);
1267 DO_ABD(tmp
, a
>> 16, b
>> 16, int16_t, int32_t);
1268 return result
| (tmp
<< 32);
1271 uint64_t HELPER(neon_abdl_u64
)(uint32_t a
, uint32_t b
)
1274 DO_ABD(result
, a
, b
, uint32_t, uint64_t);
1278 uint64_t HELPER(neon_abdl_s64
)(uint32_t a
, uint32_t b
)
1281 DO_ABD(result
, a
, b
, int32_t, int64_t);
1286 /* Widening multiply. Named type is the source type. */
1287 #define DO_MULL(dest, x, y, type1, type2) do { \
1290 dest = (type2)((type2)tmp_x * (type2)tmp_y); \
1293 uint64_t HELPER(neon_mull_u8
)(uint32_t a
, uint32_t b
)
1298 DO_MULL(result
, a
, b
, uint8_t, uint16_t);
1299 DO_MULL(tmp
, a
>> 8, b
>> 8, uint8_t, uint16_t);
1300 result
|= tmp
<< 16;
1301 DO_MULL(tmp
, a
>> 16, b
>> 16, uint8_t, uint16_t);
1302 result
|= tmp
<< 32;
1303 DO_MULL(tmp
, a
>> 24, b
>> 24, uint8_t, uint16_t);
1304 result
|= tmp
<< 48;
1308 uint64_t HELPER(neon_mull_s8
)(uint32_t a
, uint32_t b
)
1313 DO_MULL(result
, a
, b
, int8_t, uint16_t);
1314 DO_MULL(tmp
, a
>> 8, b
>> 8, int8_t, uint16_t);
1315 result
|= tmp
<< 16;
1316 DO_MULL(tmp
, a
>> 16, b
>> 16, int8_t, uint16_t);
1317 result
|= tmp
<< 32;
1318 DO_MULL(tmp
, a
>> 24, b
>> 24, int8_t, uint16_t);
1319 result
|= tmp
<< 48;
1323 uint64_t HELPER(neon_mull_u16
)(uint32_t a
, uint32_t b
)
1328 DO_MULL(result
, a
, b
, uint16_t, uint32_t);
1329 DO_MULL(tmp
, a
>> 16, b
>> 16, uint16_t, uint32_t);
1330 return result
| (tmp
<< 32);
1333 uint64_t HELPER(neon_mull_s16
)(uint32_t a
, uint32_t b
)
1338 DO_MULL(result
, a
, b
, int16_t, uint32_t);
1339 DO_MULL(tmp
, a
>> 16, b
>> 16, int16_t, uint32_t);
1340 return result
| (tmp
<< 32);
1343 uint64_t HELPER(neon_negl_u16
)(uint64_t x
)
1347 result
= (uint16_t)-x
;
1349 result
|= (uint64_t)tmp
<< 16;
1351 result
|= (uint64_t)tmp
<< 32;
1353 result
|= (uint64_t)tmp
<< 48;
1357 uint64_t HELPER(neon_negl_u32
)(uint64_t x
)
1360 uint32_t high
= -(x
>> 32);
1361 return low
| ((uint64_t)high
<< 32);
1364 /* Saturating sign manipulation. */
1365 /* ??? Make these use NEON_VOP1 */
1366 #define DO_QABS8(x) do { \
1367 if (x == (int8_t)0x80) { \
1370 } else if (x < 0) { \
1373 uint32_t HELPER(neon_qabs_s8
)(CPUARMState
*env
, uint32_t x
)
1376 NEON_UNPACK(neon_s8
, vec
, x
);
1381 NEON_PACK(neon_s8
, x
, vec
);
1386 #define DO_QNEG8(x) do { \
1387 if (x == (int8_t)0x80) { \
1393 uint32_t HELPER(neon_qneg_s8
)(CPUARMState
*env
, uint32_t x
)
1396 NEON_UNPACK(neon_s8
, vec
, x
);
1401 NEON_PACK(neon_s8
, x
, vec
);
1406 #define DO_QABS16(x) do { \
1407 if (x == (int16_t)0x8000) { \
1410 } else if (x < 0) { \
1413 uint32_t HELPER(neon_qabs_s16
)(CPUARMState
*env
, uint32_t x
)
1416 NEON_UNPACK(neon_s16
, vec
, x
);
1419 NEON_PACK(neon_s16
, x
, vec
);
1424 #define DO_QNEG16(x) do { \
1425 if (x == (int16_t)0x8000) { \
1431 uint32_t HELPER(neon_qneg_s16
)(CPUARMState
*env
, uint32_t x
)
1434 NEON_UNPACK(neon_s16
, vec
, x
);
1437 NEON_PACK(neon_s16
, x
, vec
);
1442 uint32_t HELPER(neon_qabs_s32
)(CPUARMState
*env
, uint32_t x
)
1447 } else if ((int32_t)x
< 0) {
1453 uint32_t HELPER(neon_qneg_s32
)(CPUARMState
*env
, uint32_t x
)
1464 uint64_t HELPER(neon_qabs_s64
)(CPUARMState
*env
, uint64_t x
)
1466 if (x
== SIGNBIT64
) {
1469 } else if ((int64_t)x
< 0) {
1475 uint64_t HELPER(neon_qneg_s64
)(CPUARMState
*env
, uint64_t x
)
1477 if (x
== SIGNBIT64
) {
1486 /* NEON Float helpers. */
1488 /* Floating point comparisons produce an integer result.
1489 * Note that EQ doesn't signal InvalidOp for QNaNs but GE and GT do.
1490 * Softfloat routines return 0/1, which we convert to the 0/-1 Neon requires.
1492 uint32_t HELPER(neon_ceq_f32
)(uint32_t a
, uint32_t b
, void *fpstp
)
1494 float_status
*fpst
= fpstp
;
1495 return -float32_eq_quiet(make_float32(a
), make_float32(b
), fpst
);
1498 uint32_t HELPER(neon_cge_f32
)(uint32_t a
, uint32_t b
, void *fpstp
)
1500 float_status
*fpst
= fpstp
;
1501 return -float32_le(make_float32(b
), make_float32(a
), fpst
);
1504 uint32_t HELPER(neon_cgt_f32
)(uint32_t a
, uint32_t b
, void *fpstp
)
1506 float_status
*fpst
= fpstp
;
1507 return -float32_lt(make_float32(b
), make_float32(a
), fpst
);
1510 uint32_t HELPER(neon_acge_f32
)(uint32_t a
, uint32_t b
, void *fpstp
)
1512 float_status
*fpst
= fpstp
;
1513 float32 f0
= float32_abs(make_float32(a
));
1514 float32 f1
= float32_abs(make_float32(b
));
1515 return -float32_le(f1
, f0
, fpst
);
1518 uint32_t HELPER(neon_acgt_f32
)(uint32_t a
, uint32_t b
, void *fpstp
)
1520 float_status
*fpst
= fpstp
;
1521 float32 f0
= float32_abs(make_float32(a
));
1522 float32 f1
= float32_abs(make_float32(b
));
1523 return -float32_lt(f1
, f0
, fpst
);
1526 uint64_t HELPER(neon_acge_f64
)(uint64_t a
, uint64_t b
, void *fpstp
)
1528 float_status
*fpst
= fpstp
;
1529 float64 f0
= float64_abs(make_float64(a
));
1530 float64 f1
= float64_abs(make_float64(b
));
1531 return -float64_le(f1
, f0
, fpst
);
1534 uint64_t HELPER(neon_acgt_f64
)(uint64_t a
, uint64_t b
, void *fpstp
)
1536 float_status
*fpst
= fpstp
;
1537 float64 f0
= float64_abs(make_float64(a
));
1538 float64 f1
= float64_abs(make_float64(b
));
1539 return -float64_lt(f1
, f0
, fpst
);
1542 #define ELEM(V, N, SIZE) (((V) >> ((N) * (SIZE))) & ((1ull << (SIZE)) - 1))
1544 void HELPER(neon_qunzip8
)(void *vd
, void *vm
)
1546 uint64_t *rd
= vd
, *rm
= vm
;
1547 uint64_t zd0
= rd
[0], zd1
= rd
[1];
1548 uint64_t zm0
= rm
[0], zm1
= rm
[1];
1550 uint64_t d0
= ELEM(zd0
, 0, 8) | (ELEM(zd0
, 2, 8) << 8)
1551 | (ELEM(zd0
, 4, 8) << 16) | (ELEM(zd0
, 6, 8) << 24)
1552 | (ELEM(zd1
, 0, 8) << 32) | (ELEM(zd1
, 2, 8) << 40)
1553 | (ELEM(zd1
, 4, 8) << 48) | (ELEM(zd1
, 6, 8) << 56);
1554 uint64_t d1
= ELEM(zm0
, 0, 8) | (ELEM(zm0
, 2, 8) << 8)
1555 | (ELEM(zm0
, 4, 8) << 16) | (ELEM(zm0
, 6, 8) << 24)
1556 | (ELEM(zm1
, 0, 8) << 32) | (ELEM(zm1
, 2, 8) << 40)
1557 | (ELEM(zm1
, 4, 8) << 48) | (ELEM(zm1
, 6, 8) << 56);
1558 uint64_t m0
= ELEM(zd0
, 1, 8) | (ELEM(zd0
, 3, 8) << 8)
1559 | (ELEM(zd0
, 5, 8) << 16) | (ELEM(zd0
, 7, 8) << 24)
1560 | (ELEM(zd1
, 1, 8) << 32) | (ELEM(zd1
, 3, 8) << 40)
1561 | (ELEM(zd1
, 5, 8) << 48) | (ELEM(zd1
, 7, 8) << 56);
1562 uint64_t m1
= ELEM(zm0
, 1, 8) | (ELEM(zm0
, 3, 8) << 8)
1563 | (ELEM(zm0
, 5, 8) << 16) | (ELEM(zm0
, 7, 8) << 24)
1564 | (ELEM(zm1
, 1, 8) << 32) | (ELEM(zm1
, 3, 8) << 40)
1565 | (ELEM(zm1
, 5, 8) << 48) | (ELEM(zm1
, 7, 8) << 56);
1573 void HELPER(neon_qunzip16
)(void *vd
, void *vm
)
1575 uint64_t *rd
= vd
, *rm
= vm
;
1576 uint64_t zd0
= rd
[0], zd1
= rd
[1];
1577 uint64_t zm0
= rm
[0], zm1
= rm
[1];
1579 uint64_t d0
= ELEM(zd0
, 0, 16) | (ELEM(zd0
, 2, 16) << 16)
1580 | (ELEM(zd1
, 0, 16) << 32) | (ELEM(zd1
, 2, 16) << 48);
1581 uint64_t d1
= ELEM(zm0
, 0, 16) | (ELEM(zm0
, 2, 16) << 16)
1582 | (ELEM(zm1
, 0, 16) << 32) | (ELEM(zm1
, 2, 16) << 48);
1583 uint64_t m0
= ELEM(zd0
, 1, 16) | (ELEM(zd0
, 3, 16) << 16)
1584 | (ELEM(zd1
, 1, 16) << 32) | (ELEM(zd1
, 3, 16) << 48);
1585 uint64_t m1
= ELEM(zm0
, 1, 16) | (ELEM(zm0
, 3, 16) << 16)
1586 | (ELEM(zm1
, 1, 16) << 32) | (ELEM(zm1
, 3, 16) << 48);
1594 void HELPER(neon_qunzip32
)(void *vd
, void *vm
)
1596 uint64_t *rd
= vd
, *rm
= vm
;
1597 uint64_t zd0
= rd
[0], zd1
= rd
[1];
1598 uint64_t zm0
= rm
[0], zm1
= rm
[1];
1600 uint64_t d0
= ELEM(zd0
, 0, 32) | (ELEM(zd1
, 0, 32) << 32);
1601 uint64_t d1
= ELEM(zm0
, 0, 32) | (ELEM(zm1
, 0, 32) << 32);
1602 uint64_t m0
= ELEM(zd0
, 1, 32) | (ELEM(zd1
, 1, 32) << 32);
1603 uint64_t m1
= ELEM(zm0
, 1, 32) | (ELEM(zm1
, 1, 32) << 32);
1611 void HELPER(neon_unzip8
)(void *vd
, void *vm
)
1613 uint64_t *rd
= vd
, *rm
= vm
;
1614 uint64_t zd
= rd
[0], zm
= rm
[0];
1616 uint64_t d0
= ELEM(zd
, 0, 8) | (ELEM(zd
, 2, 8) << 8)
1617 | (ELEM(zd
, 4, 8) << 16) | (ELEM(zd
, 6, 8) << 24)
1618 | (ELEM(zm
, 0, 8) << 32) | (ELEM(zm
, 2, 8) << 40)
1619 | (ELEM(zm
, 4, 8) << 48) | (ELEM(zm
, 6, 8) << 56);
1620 uint64_t m0
= ELEM(zd
, 1, 8) | (ELEM(zd
, 3, 8) << 8)
1621 | (ELEM(zd
, 5, 8) << 16) | (ELEM(zd
, 7, 8) << 24)
1622 | (ELEM(zm
, 1, 8) << 32) | (ELEM(zm
, 3, 8) << 40)
1623 | (ELEM(zm
, 5, 8) << 48) | (ELEM(zm
, 7, 8) << 56);
1629 void HELPER(neon_unzip16
)(void *vd
, void *vm
)
1631 uint64_t *rd
= vd
, *rm
= vm
;
1632 uint64_t zd
= rd
[0], zm
= rm
[0];
1634 uint64_t d0
= ELEM(zd
, 0, 16) | (ELEM(zd
, 2, 16) << 16)
1635 | (ELEM(zm
, 0, 16) << 32) | (ELEM(zm
, 2, 16) << 48);
1636 uint64_t m0
= ELEM(zd
, 1, 16) | (ELEM(zd
, 3, 16) << 16)
1637 | (ELEM(zm
, 1, 16) << 32) | (ELEM(zm
, 3, 16) << 48);
1643 void HELPER(neon_qzip8
)(void *vd
, void *vm
)
1645 uint64_t *rd
= vd
, *rm
= vm
;
1646 uint64_t zd0
= rd
[0], zd1
= rd
[1];
1647 uint64_t zm0
= rm
[0], zm1
= rm
[1];
1649 uint64_t d0
= ELEM(zd0
, 0, 8) | (ELEM(zm0
, 0, 8) << 8)
1650 | (ELEM(zd0
, 1, 8) << 16) | (ELEM(zm0
, 1, 8) << 24)
1651 | (ELEM(zd0
, 2, 8) << 32) | (ELEM(zm0
, 2, 8) << 40)
1652 | (ELEM(zd0
, 3, 8) << 48) | (ELEM(zm0
, 3, 8) << 56);
1653 uint64_t d1
= ELEM(zd0
, 4, 8) | (ELEM(zm0
, 4, 8) << 8)
1654 | (ELEM(zd0
, 5, 8) << 16) | (ELEM(zm0
, 5, 8) << 24)
1655 | (ELEM(zd0
, 6, 8) << 32) | (ELEM(zm0
, 6, 8) << 40)
1656 | (ELEM(zd0
, 7, 8) << 48) | (ELEM(zm0
, 7, 8) << 56);
1657 uint64_t m0
= ELEM(zd1
, 0, 8) | (ELEM(zm1
, 0, 8) << 8)
1658 | (ELEM(zd1
, 1, 8) << 16) | (ELEM(zm1
, 1, 8) << 24)
1659 | (ELEM(zd1
, 2, 8) << 32) | (ELEM(zm1
, 2, 8) << 40)
1660 | (ELEM(zd1
, 3, 8) << 48) | (ELEM(zm1
, 3, 8) << 56);
1661 uint64_t m1
= ELEM(zd1
, 4, 8) | (ELEM(zm1
, 4, 8) << 8)
1662 | (ELEM(zd1
, 5, 8) << 16) | (ELEM(zm1
, 5, 8) << 24)
1663 | (ELEM(zd1
, 6, 8) << 32) | (ELEM(zm1
, 6, 8) << 40)
1664 | (ELEM(zd1
, 7, 8) << 48) | (ELEM(zm1
, 7, 8) << 56);
1672 void HELPER(neon_qzip16
)(void *vd
, void *vm
)
1674 uint64_t *rd
= vd
, *rm
= vm
;
1675 uint64_t zd0
= rd
[0], zd1
= rd
[1];
1676 uint64_t zm0
= rm
[0], zm1
= rm
[1];
1678 uint64_t d0
= ELEM(zd0
, 0, 16) | (ELEM(zm0
, 0, 16) << 16)
1679 | (ELEM(zd0
, 1, 16) << 32) | (ELEM(zm0
, 1, 16) << 48);
1680 uint64_t d1
= ELEM(zd0
, 2, 16) | (ELEM(zm0
, 2, 16) << 16)
1681 | (ELEM(zd0
, 3, 16) << 32) | (ELEM(zm0
, 3, 16) << 48);
1682 uint64_t m0
= ELEM(zd1
, 0, 16) | (ELEM(zm1
, 0, 16) << 16)
1683 | (ELEM(zd1
, 1, 16) << 32) | (ELEM(zm1
, 1, 16) << 48);
1684 uint64_t m1
= ELEM(zd1
, 2, 16) | (ELEM(zm1
, 2, 16) << 16)
1685 | (ELEM(zd1
, 3, 16) << 32) | (ELEM(zm1
, 3, 16) << 48);
1693 void HELPER(neon_qzip32
)(void *vd
, void *vm
)
1695 uint64_t *rd
= vd
, *rm
= vm
;
1696 uint64_t zd0
= rd
[0], zd1
= rd
[1];
1697 uint64_t zm0
= rm
[0], zm1
= rm
[1];
1699 uint64_t d0
= ELEM(zd0
, 0, 32) | (ELEM(zm0
, 0, 32) << 32);
1700 uint64_t d1
= ELEM(zd0
, 1, 32) | (ELEM(zm0
, 1, 32) << 32);
1701 uint64_t m0
= ELEM(zd1
, 0, 32) | (ELEM(zm1
, 0, 32) << 32);
1702 uint64_t m1
= ELEM(zd1
, 1, 32) | (ELEM(zm1
, 1, 32) << 32);
1710 void HELPER(neon_zip8
)(void *vd
, void *vm
)
1712 uint64_t *rd
= vd
, *rm
= vm
;
1713 uint64_t zd
= rd
[0], zm
= rm
[0];
1715 uint64_t d0
= ELEM(zd
, 0, 8) | (ELEM(zm
, 0, 8) << 8)
1716 | (ELEM(zd
, 1, 8) << 16) | (ELEM(zm
, 1, 8) << 24)
1717 | (ELEM(zd
, 2, 8) << 32) | (ELEM(zm
, 2, 8) << 40)
1718 | (ELEM(zd
, 3, 8) << 48) | (ELEM(zm
, 3, 8) << 56);
1719 uint64_t m0
= ELEM(zd
, 4, 8) | (ELEM(zm
, 4, 8) << 8)
1720 | (ELEM(zd
, 5, 8) << 16) | (ELEM(zm
, 5, 8) << 24)
1721 | (ELEM(zd
, 6, 8) << 32) | (ELEM(zm
, 6, 8) << 40)
1722 | (ELEM(zd
, 7, 8) << 48) | (ELEM(zm
, 7, 8) << 56);
1728 void HELPER(neon_zip16
)(void *vd
, void *vm
)
1730 uint64_t *rd
= vd
, *rm
= vm
;
1731 uint64_t zd
= rd
[0], zm
= rm
[0];
1733 uint64_t d0
= ELEM(zd
, 0, 16) | (ELEM(zm
, 0, 16) << 16)
1734 | (ELEM(zd
, 1, 16) << 32) | (ELEM(zm
, 1, 16) << 48);
1735 uint64_t m0
= ELEM(zd
, 2, 16) | (ELEM(zm
, 2, 16) << 16)
1736 | (ELEM(zd
, 3, 16) << 32) | (ELEM(zm
, 3, 16) << 48);