2 * ARM NEON vector operations.
4 * Copyright (c) 2007, 2008 CodeSourcery.
5 * Written by Paul Brook
7 * This code is licensed under the GNU GPL v2.
13 #include "exec/exec-all.h"
16 #define SIGNBIT (uint32_t)0x80000000
17 #define SIGNBIT64 ((uint64_t)1 << 63)
19 #define SET_QC() env->vfp.xregs[ARM_VFP_FPSCR] |= CPSR_Q
21 #define NEON_TYPE1(name, type) \
26 #ifdef HOST_WORDS_BIGENDIAN
27 #define NEON_TYPE2(name, type) \
33 #define NEON_TYPE4(name, type) \
42 #define NEON_TYPE2(name, type) \
48 #define NEON_TYPE4(name, type) \
58 NEON_TYPE4(s8
, int8_t)
59 NEON_TYPE4(u8
, uint8_t)
60 NEON_TYPE2(s16
, int16_t)
61 NEON_TYPE2(u16
, uint16_t)
62 NEON_TYPE1(s32
, int32_t)
63 NEON_TYPE1(u32
, uint32_t)
68 /* Copy from a uint32_t to a vector structure type. */
69 #define NEON_UNPACK(vtype, dest, val) do { \
78 /* Copy from a vector structure type to a uint32_t. */
79 #define NEON_PACK(vtype, dest, val) do { \
89 NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1);
91 NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1); \
92 NEON_FN(vdest.v2, vsrc1.v2, vsrc2.v2);
94 NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1); \
95 NEON_FN(vdest.v2, vsrc1.v2, vsrc2.v2); \
96 NEON_FN(vdest.v3, vsrc1.v3, vsrc2.v3); \
97 NEON_FN(vdest.v4, vsrc1.v4, vsrc2.v4);
99 #define NEON_VOP_BODY(vtype, n) \
105 NEON_UNPACK(vtype, vsrc1, arg1); \
106 NEON_UNPACK(vtype, vsrc2, arg2); \
108 NEON_PACK(vtype, res, vdest); \
112 #define NEON_VOP(name, vtype, n) \
113 uint32_t HELPER(glue(neon_,name))(uint32_t arg1, uint32_t arg2) \
114 NEON_VOP_BODY(vtype, n)
116 #define NEON_VOP_ENV(name, vtype, n) \
117 uint32_t HELPER(glue(neon_,name))(CPUARMState *env, uint32_t arg1, uint32_t arg2) \
118 NEON_VOP_BODY(vtype, n)
120 /* Pairwise operations. */
121 /* For 32-bit elements each segment only contains a single element, so
122 the elementwise and pairwise operations are the same. */
124 NEON_FN(vdest.v1, vsrc1.v1, vsrc1.v2); \
125 NEON_FN(vdest.v2, vsrc2.v1, vsrc2.v2);
127 NEON_FN(vdest.v1, vsrc1.v1, vsrc1.v2); \
128 NEON_FN(vdest.v2, vsrc1.v3, vsrc1.v4); \
129 NEON_FN(vdest.v3, vsrc2.v1, vsrc2.v2); \
130 NEON_FN(vdest.v4, vsrc2.v3, vsrc2.v4); \
132 #define NEON_POP(name, vtype, n) \
133 uint32_t HELPER(glue(neon_,name))(uint32_t arg1, uint32_t arg2) \
139 NEON_UNPACK(vtype, vsrc1, arg1); \
140 NEON_UNPACK(vtype, vsrc2, arg2); \
142 NEON_PACK(vtype, res, vdest); \
146 /* Unary operators. */
147 #define NEON_VOP1(name, vtype, n) \
148 uint32_t HELPER(glue(neon_,name))(uint32_t arg) \
152 NEON_UNPACK(vtype, vsrc1, arg); \
154 NEON_PACK(vtype, arg, vdest); \
159 #define NEON_USAT(dest, src1, src2, type) do { \
160 uint32_t tmp = (uint32_t)src1 + (uint32_t)src2; \
161 if (tmp != (type)tmp) { \
167 #define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint8_t)
168 NEON_VOP_ENV(qadd_u8
, neon_u8
, 4)
170 #define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint16_t)
171 NEON_VOP_ENV(qadd_u16
, neon_u16
, 2)
175 uint32_t HELPER(neon_qadd_u32
)(CPUARMState
*env
, uint32_t a
, uint32_t b
)
177 uint32_t res
= a
+ b
;
185 uint64_t HELPER(neon_qadd_u64
)(CPUARMState
*env
, uint64_t src1
, uint64_t src2
)
197 #define NEON_SSAT(dest, src1, src2, type) do { \
198 int32_t tmp = (uint32_t)src1 + (uint32_t)src2; \
199 if (tmp != (type)tmp) { \
202 tmp = (1 << (sizeof(type) * 8 - 1)) - 1; \
204 tmp = 1 << (sizeof(type) * 8 - 1); \
209 #define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int8_t)
210 NEON_VOP_ENV(qadd_s8
, neon_s8
, 4)
212 #define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int16_t)
213 NEON_VOP_ENV(qadd_s16
, neon_s16
, 2)
217 uint32_t HELPER(neon_qadd_s32
)(CPUARMState
*env
, uint32_t a
, uint32_t b
)
219 uint32_t res
= a
+ b
;
220 if (((res
^ a
) & SIGNBIT
) && !((a
^ b
) & SIGNBIT
)) {
222 res
= ~(((int32_t)a
>> 31) ^ SIGNBIT
);
227 uint64_t HELPER(neon_qadd_s64
)(CPUARMState
*env
, uint64_t src1
, uint64_t src2
)
232 if (((res
^ src1
) & SIGNBIT64
) && !((src1
^ src2
) & SIGNBIT64
)) {
234 res
= ((int64_t)src1
>> 63) ^ ~SIGNBIT64
;
239 #define NEON_USAT(dest, src1, src2, type) do { \
240 uint32_t tmp = (uint32_t)src1 - (uint32_t)src2; \
241 if (tmp != (type)tmp) { \
247 #define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint8_t)
248 NEON_VOP_ENV(qsub_u8
, neon_u8
, 4)
250 #define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint16_t)
251 NEON_VOP_ENV(qsub_u16
, neon_u16
, 2)
255 uint32_t HELPER(neon_qsub_u32
)(CPUARMState
*env
, uint32_t a
, uint32_t b
)
257 uint32_t res
= a
- b
;
265 uint64_t HELPER(neon_qsub_u64
)(CPUARMState
*env
, uint64_t src1
, uint64_t src2
)
278 #define NEON_SSAT(dest, src1, src2, type) do { \
279 int32_t tmp = (uint32_t)src1 - (uint32_t)src2; \
280 if (tmp != (type)tmp) { \
283 tmp = (1 << (sizeof(type) * 8 - 1)) - 1; \
285 tmp = 1 << (sizeof(type) * 8 - 1); \
290 #define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int8_t)
291 NEON_VOP_ENV(qsub_s8
, neon_s8
, 4)
293 #define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int16_t)
294 NEON_VOP_ENV(qsub_s16
, neon_s16
, 2)
298 uint32_t HELPER(neon_qsub_s32
)(CPUARMState
*env
, uint32_t a
, uint32_t b
)
300 uint32_t res
= a
- b
;
301 if (((res
^ a
) & SIGNBIT
) && ((a
^ b
) & SIGNBIT
)) {
303 res
= ~(((int32_t)a
>> 31) ^ SIGNBIT
);
308 uint64_t HELPER(neon_qsub_s64
)(CPUARMState
*env
, uint64_t src1
, uint64_t src2
)
313 if (((res
^ src1
) & SIGNBIT64
) && ((src1
^ src2
) & SIGNBIT64
)) {
315 res
= ((int64_t)src1
>> 63) ^ ~SIGNBIT64
;
320 #define NEON_FN(dest, src1, src2) dest = (src1 + src2) >> 1
321 NEON_VOP(hadd_s8
, neon_s8
, 4)
322 NEON_VOP(hadd_u8
, neon_u8
, 4)
323 NEON_VOP(hadd_s16
, neon_s16
, 2)
324 NEON_VOP(hadd_u16
, neon_u16
, 2)
327 int32_t HELPER(neon_hadd_s32
)(int32_t src1
, int32_t src2
)
331 dest
= (src1
>> 1) + (src2
>> 1);
337 uint32_t HELPER(neon_hadd_u32
)(uint32_t src1
, uint32_t src2
)
341 dest
= (src1
>> 1) + (src2
>> 1);
347 #define NEON_FN(dest, src1, src2) dest = (src1 + src2 + 1) >> 1
348 NEON_VOP(rhadd_s8
, neon_s8
, 4)
349 NEON_VOP(rhadd_u8
, neon_u8
, 4)
350 NEON_VOP(rhadd_s16
, neon_s16
, 2)
351 NEON_VOP(rhadd_u16
, neon_u16
, 2)
354 int32_t HELPER(neon_rhadd_s32
)(int32_t src1
, int32_t src2
)
358 dest
= (src1
>> 1) + (src2
>> 1);
359 if ((src1
| src2
) & 1)
364 uint32_t HELPER(neon_rhadd_u32
)(uint32_t src1
, uint32_t src2
)
368 dest
= (src1
>> 1) + (src2
>> 1);
369 if ((src1
| src2
) & 1)
374 #define NEON_FN(dest, src1, src2) dest = (src1 - src2) >> 1
375 NEON_VOP(hsub_s8
, neon_s8
, 4)
376 NEON_VOP(hsub_u8
, neon_u8
, 4)
377 NEON_VOP(hsub_s16
, neon_s16
, 2)
378 NEON_VOP(hsub_u16
, neon_u16
, 2)
381 int32_t HELPER(neon_hsub_s32
)(int32_t src1
, int32_t src2
)
385 dest
= (src1
>> 1) - (src2
>> 1);
386 if ((~src1
) & src2
& 1)
391 uint32_t HELPER(neon_hsub_u32
)(uint32_t src1
, uint32_t src2
)
395 dest
= (src1
>> 1) - (src2
>> 1);
396 if ((~src1
) & src2
& 1)
401 #define NEON_FN(dest, src1, src2) dest = (src1 > src2) ? ~0 : 0
402 NEON_VOP(cgt_s8
, neon_s8
, 4)
403 NEON_VOP(cgt_u8
, neon_u8
, 4)
404 NEON_VOP(cgt_s16
, neon_s16
, 2)
405 NEON_VOP(cgt_u16
, neon_u16
, 2)
406 NEON_VOP(cgt_s32
, neon_s32
, 1)
407 NEON_VOP(cgt_u32
, neon_u32
, 1)
410 #define NEON_FN(dest, src1, src2) dest = (src1 >= src2) ? ~0 : 0
411 NEON_VOP(cge_s8
, neon_s8
, 4)
412 NEON_VOP(cge_u8
, neon_u8
, 4)
413 NEON_VOP(cge_s16
, neon_s16
, 2)
414 NEON_VOP(cge_u16
, neon_u16
, 2)
415 NEON_VOP(cge_s32
, neon_s32
, 1)
416 NEON_VOP(cge_u32
, neon_u32
, 1)
419 #define NEON_FN(dest, src1, src2) dest = (src1 < src2) ? src1 : src2
420 NEON_VOP(min_s8
, neon_s8
, 4)
421 NEON_VOP(min_u8
, neon_u8
, 4)
422 NEON_VOP(min_s16
, neon_s16
, 2)
423 NEON_VOP(min_u16
, neon_u16
, 2)
424 NEON_VOP(min_s32
, neon_s32
, 1)
425 NEON_VOP(min_u32
, neon_u32
, 1)
426 NEON_POP(pmin_s8
, neon_s8
, 4)
427 NEON_POP(pmin_u8
, neon_u8
, 4)
428 NEON_POP(pmin_s16
, neon_s16
, 2)
429 NEON_POP(pmin_u16
, neon_u16
, 2)
432 #define NEON_FN(dest, src1, src2) dest = (src1 > src2) ? src1 : src2
433 NEON_VOP(max_s8
, neon_s8
, 4)
434 NEON_VOP(max_u8
, neon_u8
, 4)
435 NEON_VOP(max_s16
, neon_s16
, 2)
436 NEON_VOP(max_u16
, neon_u16
, 2)
437 NEON_VOP(max_s32
, neon_s32
, 1)
438 NEON_VOP(max_u32
, neon_u32
, 1)
439 NEON_POP(pmax_s8
, neon_s8
, 4)
440 NEON_POP(pmax_u8
, neon_u8
, 4)
441 NEON_POP(pmax_s16
, neon_s16
, 2)
442 NEON_POP(pmax_u16
, neon_u16
, 2)
445 #define NEON_FN(dest, src1, src2) \
446 dest = (src1 > src2) ? (src1 - src2) : (src2 - src1)
447 NEON_VOP(abd_s8
, neon_s8
, 4)
448 NEON_VOP(abd_u8
, neon_u8
, 4)
449 NEON_VOP(abd_s16
, neon_s16
, 2)
450 NEON_VOP(abd_u16
, neon_u16
, 2)
451 NEON_VOP(abd_s32
, neon_s32
, 1)
452 NEON_VOP(abd_u32
, neon_u32
, 1)
455 #define NEON_FN(dest, src1, src2) do { \
457 tmp = (int8_t)src2; \
458 if (tmp >= (ssize_t)sizeof(src1) * 8 || \
459 tmp <= -(ssize_t)sizeof(src1) * 8) { \
461 } else if (tmp < 0) { \
462 dest = src1 >> -tmp; \
464 dest = src1 << tmp; \
466 NEON_VOP(shl_u8
, neon_u8
, 4)
467 NEON_VOP(shl_u16
, neon_u16
, 2)
468 NEON_VOP(shl_u32
, neon_u32
, 1)
471 uint64_t HELPER(neon_shl_u64
)(uint64_t val
, uint64_t shiftop
)
473 int8_t shift
= (int8_t)shiftop
;
474 if (shift
>= 64 || shift
<= -64) {
476 } else if (shift
< 0) {
484 #define NEON_FN(dest, src1, src2) do { \
486 tmp = (int8_t)src2; \
487 if (tmp >= (ssize_t)sizeof(src1) * 8) { \
489 } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
490 dest = src1 >> (sizeof(src1) * 8 - 1); \
491 } else if (tmp < 0) { \
492 dest = src1 >> -tmp; \
494 dest = src1 << tmp; \
496 NEON_VOP(shl_s8
, neon_s8
, 4)
497 NEON_VOP(shl_s16
, neon_s16
, 2)
498 NEON_VOP(shl_s32
, neon_s32
, 1)
501 uint64_t HELPER(neon_shl_s64
)(uint64_t valop
, uint64_t shiftop
)
503 int8_t shift
= (int8_t)shiftop
;
507 } else if (shift
<= -64) {
509 } else if (shift
< 0) {
517 #define NEON_FN(dest, src1, src2) do { \
519 tmp = (int8_t)src2; \
520 if ((tmp >= (ssize_t)sizeof(src1) * 8) \
521 || (tmp <= -(ssize_t)sizeof(src1) * 8)) { \
523 } else if (tmp < 0) { \
524 dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
526 dest = src1 << tmp; \
528 NEON_VOP(rshl_s8
, neon_s8
, 4)
529 NEON_VOP(rshl_s16
, neon_s16
, 2)
532 /* The addition of the rounding constant may overflow, so we use an
533 * intermediate 64 bit accumulator. */
534 uint32_t HELPER(neon_rshl_s32
)(uint32_t valop
, uint32_t shiftop
)
537 int32_t val
= (int32_t)valop
;
538 int8_t shift
= (int8_t)shiftop
;
539 if ((shift
>= 32) || (shift
<= -32)) {
541 } else if (shift
< 0) {
542 int64_t big_dest
= ((int64_t)val
+ (1 << (-1 - shift
)));
543 dest
= big_dest
>> -shift
;
550 /* Handling addition overflow with 64 bit input values is more
551 * tricky than with 32 bit values. */
552 uint64_t HELPER(neon_rshl_s64
)(uint64_t valop
, uint64_t shiftop
)
554 int8_t shift
= (int8_t)shiftop
;
556 if ((shift
>= 64) || (shift
<= -64)) {
558 } else if (shift
< 0) {
559 val
>>= (-shift
- 1);
560 if (val
== INT64_MAX
) {
561 /* In this case, it means that the rounding constant is 1,
562 * and the addition would overflow. Return the actual
563 * result directly. */
564 val
= 0x4000000000000000LL
;
575 #define NEON_FN(dest, src1, src2) do { \
577 tmp = (int8_t)src2; \
578 if (tmp >= (ssize_t)sizeof(src1) * 8 || \
579 tmp < -(ssize_t)sizeof(src1) * 8) { \
581 } else if (tmp == -(ssize_t)sizeof(src1) * 8) { \
582 dest = src1 >> (-tmp - 1); \
583 } else if (tmp < 0) { \
584 dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
586 dest = src1 << tmp; \
588 NEON_VOP(rshl_u8
, neon_u8
, 4)
589 NEON_VOP(rshl_u16
, neon_u16
, 2)
592 /* The addition of the rounding constant may overflow, so we use an
593 * intermediate 64 bit accumulator. */
594 uint32_t HELPER(neon_rshl_u32
)(uint32_t val
, uint32_t shiftop
)
597 int8_t shift
= (int8_t)shiftop
;
598 if (shift
>= 32 || shift
< -32) {
600 } else if (shift
== -32) {
602 } else if (shift
< 0) {
603 uint64_t big_dest
= ((uint64_t)val
+ (1 << (-1 - shift
)));
604 dest
= big_dest
>> -shift
;
611 /* Handling addition overflow with 64 bit input values is more
612 * tricky than with 32 bit values. */
613 uint64_t HELPER(neon_rshl_u64
)(uint64_t val
, uint64_t shiftop
)
615 int8_t shift
= (uint8_t)shiftop
;
616 if (shift
>= 64 || shift
< -64) {
618 } else if (shift
== -64) {
619 /* Rounding a 1-bit result just preserves that bit. */
621 } else if (shift
< 0) {
622 val
>>= (-shift
- 1);
623 if (val
== UINT64_MAX
) {
624 /* In this case, it means that the rounding constant is 1,
625 * and the addition would overflow. Return the actual
626 * result directly. */
627 val
= 0x8000000000000000ULL
;
638 #define NEON_FN(dest, src1, src2) do { \
640 tmp = (int8_t)src2; \
641 if (tmp >= (ssize_t)sizeof(src1) * 8) { \
648 } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
650 } else if (tmp < 0) { \
651 dest = src1 >> -tmp; \
653 dest = src1 << tmp; \
654 if ((dest >> tmp) != src1) { \
659 NEON_VOP_ENV(qshl_u8
, neon_u8
, 4)
660 NEON_VOP_ENV(qshl_u16
, neon_u16
, 2)
661 NEON_VOP_ENV(qshl_u32
, neon_u32
, 1)
664 uint64_t HELPER(neon_qshl_u64
)(CPUARMState
*env
, uint64_t val
, uint64_t shiftop
)
666 int8_t shift
= (int8_t)shiftop
;
672 } else if (shift
<= -64) {
674 } else if (shift
< 0) {
679 if ((val
>> shift
) != tmp
) {
687 #define NEON_FN(dest, src1, src2) do { \
689 tmp = (int8_t)src2; \
690 if (tmp >= (ssize_t)sizeof(src1) * 8) { \
693 dest = (uint32_t)(1 << (sizeof(src1) * 8 - 1)); \
700 } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
702 } else if (tmp < 0) { \
703 dest = src1 >> -tmp; \
705 dest = src1 << tmp; \
706 if ((dest >> tmp) != src1) { \
708 dest = (uint32_t)(1 << (sizeof(src1) * 8 - 1)); \
714 NEON_VOP_ENV(qshl_s8
, neon_s8
, 4)
715 NEON_VOP_ENV(qshl_s16
, neon_s16
, 2)
716 NEON_VOP_ENV(qshl_s32
, neon_s32
, 1)
719 uint64_t HELPER(neon_qshl_s64
)(CPUARMState
*env
, uint64_t valop
, uint64_t shiftop
)
721 int8_t shift
= (uint8_t)shiftop
;
726 val
= (val
>> 63) ^ ~SIGNBIT64
;
728 } else if (shift
<= -64) {
730 } else if (shift
< 0) {
735 if ((val
>> shift
) != tmp
) {
737 val
= (tmp
>> 63) ^ ~SIGNBIT64
;
743 #define NEON_FN(dest, src1, src2) do { \
744 if (src1 & (1 << (sizeof(src1) * 8 - 1))) { \
749 tmp = (int8_t)src2; \
750 if (tmp >= (ssize_t)sizeof(src1) * 8) { \
757 } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
759 } else if (tmp < 0) { \
760 dest = src1 >> -tmp; \
762 dest = src1 << tmp; \
763 if ((dest >> tmp) != src1) { \
769 NEON_VOP_ENV(qshlu_s8
, neon_u8
, 4)
770 NEON_VOP_ENV(qshlu_s16
, neon_u16
, 2)
773 uint32_t HELPER(neon_qshlu_s32
)(CPUARMState
*env
, uint32_t valop
, uint32_t shiftop
)
775 if ((int32_t)valop
< 0) {
779 return helper_neon_qshl_u32(env
, valop
, shiftop
);
782 uint64_t HELPER(neon_qshlu_s64
)(CPUARMState
*env
, uint64_t valop
, uint64_t shiftop
)
784 if ((int64_t)valop
< 0) {
788 return helper_neon_qshl_u64(env
, valop
, shiftop
);
791 #define NEON_FN(dest, src1, src2) do { \
793 tmp = (int8_t)src2; \
794 if (tmp >= (ssize_t)sizeof(src1) * 8) { \
801 } else if (tmp < -(ssize_t)sizeof(src1) * 8) { \
803 } else if (tmp == -(ssize_t)sizeof(src1) * 8) { \
804 dest = src1 >> (sizeof(src1) * 8 - 1); \
805 } else if (tmp < 0) { \
806 dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
808 dest = src1 << tmp; \
809 if ((dest >> tmp) != src1) { \
814 NEON_VOP_ENV(qrshl_u8
, neon_u8
, 4)
815 NEON_VOP_ENV(qrshl_u16
, neon_u16
, 2)
818 /* The addition of the rounding constant may overflow, so we use an
819 * intermediate 64 bit accumulator. */
820 uint32_t HELPER(neon_qrshl_u32
)(CPUARMState
*env
, uint32_t val
, uint32_t shiftop
)
823 int8_t shift
= (int8_t)shiftop
;
831 } else if (shift
< -32) {
833 } else if (shift
== -32) {
835 } else if (shift
< 0) {
836 uint64_t big_dest
= ((uint64_t)val
+ (1 << (-1 - shift
)));
837 dest
= big_dest
>> -shift
;
840 if ((dest
>> shift
) != val
) {
848 /* Handling addition overflow with 64 bit input values is more
849 * tricky than with 32 bit values. */
850 uint64_t HELPER(neon_qrshl_u64
)(CPUARMState
*env
, uint64_t val
, uint64_t shiftop
)
852 int8_t shift
= (int8_t)shiftop
;
858 } else if (shift
< -64) {
860 } else if (shift
== -64) {
862 } else if (shift
< 0) {
863 val
>>= (-shift
- 1);
864 if (val
== UINT64_MAX
) {
865 /* In this case, it means that the rounding constant is 1,
866 * and the addition would overflow. Return the actual
867 * result directly. */
868 val
= 0x8000000000000000ULL
;
876 if ((val
>> shift
) != tmp
) {
884 #define NEON_FN(dest, src1, src2) do { \
886 tmp = (int8_t)src2; \
887 if (tmp >= (ssize_t)sizeof(src1) * 8) { \
890 dest = (1 << (sizeof(src1) * 8 - 1)); \
897 } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
899 } else if (tmp < 0) { \
900 dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
902 dest = src1 << tmp; \
903 if ((dest >> tmp) != src1) { \
905 dest = (uint32_t)(1 << (sizeof(src1) * 8 - 1)); \
911 NEON_VOP_ENV(qrshl_s8
, neon_s8
, 4)
912 NEON_VOP_ENV(qrshl_s16
, neon_s16
, 2)
915 /* The addition of the rounding constant may overflow, so we use an
916 * intermediate 64 bit accumulator. */
917 uint32_t HELPER(neon_qrshl_s32
)(CPUARMState
*env
, uint32_t valop
, uint32_t shiftop
)
920 int32_t val
= (int32_t)valop
;
921 int8_t shift
= (int8_t)shiftop
;
925 dest
= (val
>> 31) ^ ~SIGNBIT
;
929 } else if (shift
<= -32) {
931 } else if (shift
< 0) {
932 int64_t big_dest
= ((int64_t)val
+ (1 << (-1 - shift
)));
933 dest
= big_dest
>> -shift
;
936 if ((dest
>> shift
) != val
) {
938 dest
= (val
>> 31) ^ ~SIGNBIT
;
944 /* Handling addition overflow with 64 bit input values is more
945 * tricky than with 32 bit values. */
946 uint64_t HELPER(neon_qrshl_s64
)(CPUARMState
*env
, uint64_t valop
, uint64_t shiftop
)
948 int8_t shift
= (uint8_t)shiftop
;
954 val
= (val
>> 63) ^ ~SIGNBIT64
;
956 } else if (shift
<= -64) {
958 } else if (shift
< 0) {
959 val
>>= (-shift
- 1);
960 if (val
== INT64_MAX
) {
961 /* In this case, it means that the rounding constant is 1,
962 * and the addition would overflow. Return the actual
963 * result directly. */
964 val
= 0x4000000000000000ULL
;
972 if ((val
>> shift
) != tmp
) {
974 val
= (tmp
>> 63) ^ ~SIGNBIT64
;
980 uint32_t HELPER(neon_add_u8
)(uint32_t a
, uint32_t b
)
983 mask
= (a
^ b
) & 0x80808080u
;
986 return (a
+ b
) ^ mask
;
989 uint32_t HELPER(neon_add_u16
)(uint32_t a
, uint32_t b
)
992 mask
= (a
^ b
) & 0x80008000u
;
995 return (a
+ b
) ^ mask
;
998 #define NEON_FN(dest, src1, src2) dest = src1 + src2
999 NEON_POP(padd_u8
, neon_u8
, 4)
1000 NEON_POP(padd_u16
, neon_u16
, 2)
1003 #define NEON_FN(dest, src1, src2) dest = src1 - src2
1004 NEON_VOP(sub_u8
, neon_u8
, 4)
1005 NEON_VOP(sub_u16
, neon_u16
, 2)
1008 #define NEON_FN(dest, src1, src2) dest = src1 * src2
1009 NEON_VOP(mul_u8
, neon_u8
, 4)
1010 NEON_VOP(mul_u16
, neon_u16
, 2)
1013 /* Polynomial multiplication is like integer multiplication except the
1014 partial products are XORed, not added. */
1015 uint32_t HELPER(neon_mul_p8
)(uint32_t op1
, uint32_t op2
)
1025 mask
|= (0xff << 8);
1026 if (op1
& (1 << 16))
1027 mask
|= (0xff << 16);
1028 if (op1
& (1 << 24))
1029 mask
|= (0xff << 24);
1030 result
^= op2
& mask
;
1031 op1
= (op1
>> 1) & 0x7f7f7f7f;
1032 op2
= (op2
<< 1) & 0xfefefefe;
1037 uint64_t HELPER(neon_mull_p8
)(uint32_t op1
, uint32_t op2
)
1039 uint64_t result
= 0;
1041 uint64_t op2ex
= op2
;
1042 op2ex
= (op2ex
& 0xff) |
1043 ((op2ex
& 0xff00) << 8) |
1044 ((op2ex
& 0xff0000) << 16) |
1045 ((op2ex
& 0xff000000) << 24);
1051 if (op1
& (1 << 8)) {
1052 mask
|= (0xffffU
<< 16);
1054 if (op1
& (1 << 16)) {
1055 mask
|= (0xffffULL
<< 32);
1057 if (op1
& (1 << 24)) {
1058 mask
|= (0xffffULL
<< 48);
1060 result
^= op2ex
& mask
;
1061 op1
= (op1
>> 1) & 0x7f7f7f7f;
1067 #define NEON_FN(dest, src1, src2) dest = (src1 & src2) ? -1 : 0
1068 NEON_VOP(tst_u8
, neon_u8
, 4)
1069 NEON_VOP(tst_u16
, neon_u16
, 2)
1070 NEON_VOP(tst_u32
, neon_u32
, 1)
1073 #define NEON_FN(dest, src1, src2) dest = (src1 == src2) ? -1 : 0
1074 NEON_VOP(ceq_u8
, neon_u8
, 4)
1075 NEON_VOP(ceq_u16
, neon_u16
, 2)
1076 NEON_VOP(ceq_u32
, neon_u32
, 1)
1079 #define NEON_FN(dest, src, dummy) dest = (src < 0) ? -src : src
1080 NEON_VOP1(abs_s8
, neon_s8
, 4)
1081 NEON_VOP1(abs_s16
, neon_s16
, 2)
1084 /* Count Leading Sign/Zero Bits. */
1085 static inline int do_clz8(uint8_t x
)
1093 static inline int do_clz16(uint16_t x
)
1096 for (n
= 16; x
; n
--)
1101 #define NEON_FN(dest, src, dummy) dest = do_clz8(src)
1102 NEON_VOP1(clz_u8
, neon_u8
, 4)
1105 #define NEON_FN(dest, src, dummy) dest = do_clz16(src)
1106 NEON_VOP1(clz_u16
, neon_u16
, 2)
1109 #define NEON_FN(dest, src, dummy) dest = do_clz8((src < 0) ? ~src : src) - 1
1110 NEON_VOP1(cls_s8
, neon_s8
, 4)
1113 #define NEON_FN(dest, src, dummy) dest = do_clz16((src < 0) ? ~src : src) - 1
1114 NEON_VOP1(cls_s16
, neon_s16
, 2)
1117 uint32_t HELPER(neon_cls_s32
)(uint32_t x
)
1122 for (count
= 32; x
; count
--)
1128 uint32_t HELPER(neon_cnt_u8
)(uint32_t x
)
1130 x
= (x
& 0x55555555) + ((x
>> 1) & 0x55555555);
1131 x
= (x
& 0x33333333) + ((x
>> 2) & 0x33333333);
1132 x
= (x
& 0x0f0f0f0f) + ((x
>> 4) & 0x0f0f0f0f);
1136 /* Reverse bits in each 8 bit word */
1137 uint32_t HELPER(neon_rbit_u8
)(uint32_t x
)
1139 x
= ((x
& 0xf0f0f0f0) >> 4)
1140 | ((x
& 0x0f0f0f0f) << 4);
1141 x
= ((x
& 0x88888888) >> 3)
1142 | ((x
& 0x44444444) >> 1)
1143 | ((x
& 0x22222222) << 1)
1144 | ((x
& 0x11111111) << 3);
1148 #define NEON_QDMULH16(dest, src1, src2, round) do { \
1149 uint32_t tmp = (int32_t)(int16_t) src1 * (int16_t) src2; \
1150 if ((tmp ^ (tmp << 1)) & SIGNBIT) { \
1152 tmp = (tmp >> 31) ^ ~SIGNBIT; \
1157 int32_t old = tmp; \
1159 if ((int32_t)tmp < old) { \
1161 tmp = SIGNBIT - 1; \
1166 #define NEON_FN(dest, src1, src2) NEON_QDMULH16(dest, src1, src2, 0)
1167 NEON_VOP_ENV(qdmulh_s16
, neon_s16
, 2)
1169 #define NEON_FN(dest, src1, src2) NEON_QDMULH16(dest, src1, src2, 1)
1170 NEON_VOP_ENV(qrdmulh_s16
, neon_s16
, 2)
1172 #undef NEON_QDMULH16
1174 #define NEON_QDMULH32(dest, src1, src2, round) do { \
1175 uint64_t tmp = (int64_t)(int32_t) src1 * (int32_t) src2; \
1176 if ((tmp ^ (tmp << 1)) & SIGNBIT64) { \
1178 tmp = (tmp >> 63) ^ ~SIGNBIT64; \
1183 int64_t old = tmp; \
1184 tmp += (int64_t)1 << 31; \
1185 if ((int64_t)tmp < old) { \
1187 tmp = SIGNBIT64 - 1; \
1192 #define NEON_FN(dest, src1, src2) NEON_QDMULH32(dest, src1, src2, 0)
1193 NEON_VOP_ENV(qdmulh_s32
, neon_s32
, 1)
1195 #define NEON_FN(dest, src1, src2) NEON_QDMULH32(dest, src1, src2, 1)
1196 NEON_VOP_ENV(qrdmulh_s32
, neon_s32
, 1)
1198 #undef NEON_QDMULH32
1200 uint32_t HELPER(neon_narrow_u8
)(uint64_t x
)
1202 return (x
& 0xffu
) | ((x
>> 8) & 0xff00u
) | ((x
>> 16) & 0xff0000u
)
1203 | ((x
>> 24) & 0xff000000u
);
1206 uint32_t HELPER(neon_narrow_u16
)(uint64_t x
)
1208 return (x
& 0xffffu
) | ((x
>> 16) & 0xffff0000u
);
1211 uint32_t HELPER(neon_narrow_high_u8
)(uint64_t x
)
1213 return ((x
>> 8) & 0xff) | ((x
>> 16) & 0xff00)
1214 | ((x
>> 24) & 0xff0000) | ((x
>> 32) & 0xff000000);
1217 uint32_t HELPER(neon_narrow_high_u16
)(uint64_t x
)
1219 return ((x
>> 16) & 0xffff) | ((x
>> 32) & 0xffff0000);
1222 uint32_t HELPER(neon_narrow_round_high_u8
)(uint64_t x
)
1224 x
&= 0xff80ff80ff80ff80ull
;
1225 x
+= 0x0080008000800080ull
;
1226 return ((x
>> 8) & 0xff) | ((x
>> 16) & 0xff00)
1227 | ((x
>> 24) & 0xff0000) | ((x
>> 32) & 0xff000000);
1230 uint32_t HELPER(neon_narrow_round_high_u16
)(uint64_t x
)
1232 x
&= 0xffff8000ffff8000ull
;
1233 x
+= 0x0000800000008000ull
;
1234 return ((x
>> 16) & 0xffff) | ((x
>> 32) & 0xffff0000);
1237 uint32_t HELPER(neon_unarrow_sat8
)(CPUARMState
*env
, uint64_t x
)
1253 res |= (uint32_t)d << (n / 2); \
1264 uint32_t HELPER(neon_narrow_sat_u8
)(CPUARMState
*env
, uint64_t x
)
1277 res |= (uint32_t)d << (n / 2);
1287 uint32_t HELPER(neon_narrow_sat_s8
)(CPUARMState
*env
, uint64_t x
)
1294 if (s != (int8_t)s) { \
1295 d = (s >> 15) ^ 0x7f; \
1300 res |= (uint32_t)d << (n / 2);
1310 uint32_t HELPER(neon_unarrow_sat16
)(CPUARMState
*env
, uint64_t x
)
1315 if (low
& 0x80000000) {
1318 } else if (low
> 0xffff) {
1323 if (high
& 0x80000000) {
1326 } else if (high
> 0xffff) {
1330 return low
| (high
<< 16);
1333 uint32_t HELPER(neon_narrow_sat_u16
)(CPUARMState
*env
, uint64_t x
)
1343 if (high
> 0xffff) {
1347 return low
| (high
<< 16);
1350 uint32_t HELPER(neon_narrow_sat_s16
)(CPUARMState
*env
, uint64_t x
)
1355 if (low
!= (int16_t)low
) {
1356 low
= (low
>> 31) ^ 0x7fff;
1360 if (high
!= (int16_t)high
) {
1361 high
= (high
>> 31) ^ 0x7fff;
1364 return (uint16_t)low
| (high
<< 16);
1367 uint32_t HELPER(neon_unarrow_sat32
)(CPUARMState
*env
, uint64_t x
)
1369 if (x
& 0x8000000000000000ull
) {
1373 if (x
> 0xffffffffu
) {
1380 uint32_t HELPER(neon_narrow_sat_u32
)(CPUARMState
*env
, uint64_t x
)
1382 if (x
> 0xffffffffu
) {
1389 uint32_t HELPER(neon_narrow_sat_s32
)(CPUARMState
*env
, uint64_t x
)
1391 if ((int64_t)x
!= (int32_t)x
) {
1393 return ((int64_t)x
>> 63) ^ 0x7fffffff;
1398 uint64_t HELPER(neon_widen_u8
)(uint32_t x
)
1403 tmp
= (uint8_t)(x
>> 8);
1405 tmp
= (uint8_t)(x
>> 16);
1407 tmp
= (uint8_t)(x
>> 24);
1412 uint64_t HELPER(neon_widen_s8
)(uint32_t x
)
1416 ret
= (uint16_t)(int8_t)x
;
1417 tmp
= (uint16_t)(int8_t)(x
>> 8);
1419 tmp
= (uint16_t)(int8_t)(x
>> 16);
1421 tmp
= (uint16_t)(int8_t)(x
>> 24);
1426 uint64_t HELPER(neon_widen_u16
)(uint32_t x
)
1428 uint64_t high
= (uint16_t)(x
>> 16);
1429 return ((uint16_t)x
) | (high
<< 32);
1432 uint64_t HELPER(neon_widen_s16
)(uint32_t x
)
1434 uint64_t high
= (int16_t)(x
>> 16);
1435 return ((uint32_t)(int16_t)x
) | (high
<< 32);
1438 uint64_t HELPER(neon_addl_u16
)(uint64_t a
, uint64_t b
)
1441 mask
= (a
^ b
) & 0x8000800080008000ull
;
1442 a
&= ~0x8000800080008000ull
;
1443 b
&= ~0x8000800080008000ull
;
1444 return (a
+ b
) ^ mask
;
1447 uint64_t HELPER(neon_addl_u32
)(uint64_t a
, uint64_t b
)
1450 mask
= (a
^ b
) & 0x8000000080000000ull
;
1451 a
&= ~0x8000000080000000ull
;
1452 b
&= ~0x8000000080000000ull
;
1453 return (a
+ b
) ^ mask
;
1456 uint64_t HELPER(neon_paddl_u16
)(uint64_t a
, uint64_t b
)
1461 tmp
= a
& 0x0000ffff0000ffffull
;
1462 tmp
+= (a
>> 16) & 0x0000ffff0000ffffull
;
1463 tmp2
= b
& 0xffff0000ffff0000ull
;
1464 tmp2
+= (b
<< 16) & 0xffff0000ffff0000ull
;
1465 return ( tmp
& 0xffff)
1466 | ((tmp
>> 16) & 0xffff0000ull
)
1467 | ((tmp2
<< 16) & 0xffff00000000ull
)
1468 | ( tmp2
& 0xffff000000000000ull
);
1471 uint64_t HELPER(neon_paddl_u32
)(uint64_t a
, uint64_t b
)
1473 uint32_t low
= a
+ (a
>> 32);
1474 uint32_t high
= b
+ (b
>> 32);
1475 return low
+ ((uint64_t)high
<< 32);
1478 uint64_t HELPER(neon_subl_u16
)(uint64_t a
, uint64_t b
)
1481 mask
= (a
^ ~b
) & 0x8000800080008000ull
;
1482 a
|= 0x8000800080008000ull
;
1483 b
&= ~0x8000800080008000ull
;
1484 return (a
- b
) ^ mask
;
1487 uint64_t HELPER(neon_subl_u32
)(uint64_t a
, uint64_t b
)
1490 mask
= (a
^ ~b
) & 0x8000000080000000ull
;
1491 a
|= 0x8000000080000000ull
;
1492 b
&= ~0x8000000080000000ull
;
1493 return (a
- b
) ^ mask
;
1496 uint64_t HELPER(neon_addl_saturate_s32
)(CPUARMState
*env
, uint64_t a
, uint64_t b
)
1504 if (((low
^ x
) & SIGNBIT
) && !((x
^ y
) & SIGNBIT
)) {
1506 low
= ((int32_t)x
>> 31) ^ ~SIGNBIT
;
1511 if (((high
^ x
) & SIGNBIT
) && !((x
^ y
) & SIGNBIT
)) {
1513 high
= ((int32_t)x
>> 31) ^ ~SIGNBIT
;
1515 return low
| ((uint64_t)high
<< 32);
1518 uint64_t HELPER(neon_addl_saturate_s64
)(CPUARMState
*env
, uint64_t a
, uint64_t b
)
1523 if (((result
^ a
) & SIGNBIT64
) && !((a
^ b
) & SIGNBIT64
)) {
1525 result
= ((int64_t)a
>> 63) ^ ~SIGNBIT64
;
1530 /* We have to do the arithmetic in a larger type than
1531 * the input type, because for example with a signed 32 bit
1532 * op the absolute difference can overflow a signed 32 bit value.
1534 #define DO_ABD(dest, x, y, intype, arithtype) do { \
1535 arithtype tmp_x = (intype)(x); \
1536 arithtype tmp_y = (intype)(y); \
1537 dest = ((tmp_x > tmp_y) ? tmp_x - tmp_y : tmp_y - tmp_x); \
1540 uint64_t HELPER(neon_abdl_u16
)(uint32_t a
, uint32_t b
)
1544 DO_ABD(result
, a
, b
, uint8_t, uint32_t);
1545 DO_ABD(tmp
, a
>> 8, b
>> 8, uint8_t, uint32_t);
1546 result
|= tmp
<< 16;
1547 DO_ABD(tmp
, a
>> 16, b
>> 16, uint8_t, uint32_t);
1548 result
|= tmp
<< 32;
1549 DO_ABD(tmp
, a
>> 24, b
>> 24, uint8_t, uint32_t);
1550 result
|= tmp
<< 48;
1554 uint64_t HELPER(neon_abdl_s16
)(uint32_t a
, uint32_t b
)
1558 DO_ABD(result
, a
, b
, int8_t, int32_t);
1559 DO_ABD(tmp
, a
>> 8, b
>> 8, int8_t, int32_t);
1560 result
|= tmp
<< 16;
1561 DO_ABD(tmp
, a
>> 16, b
>> 16, int8_t, int32_t);
1562 result
|= tmp
<< 32;
1563 DO_ABD(tmp
, a
>> 24, b
>> 24, int8_t, int32_t);
1564 result
|= tmp
<< 48;
1568 uint64_t HELPER(neon_abdl_u32
)(uint32_t a
, uint32_t b
)
1572 DO_ABD(result
, a
, b
, uint16_t, uint32_t);
1573 DO_ABD(tmp
, a
>> 16, b
>> 16, uint16_t, uint32_t);
1574 return result
| (tmp
<< 32);
1577 uint64_t HELPER(neon_abdl_s32
)(uint32_t a
, uint32_t b
)
1581 DO_ABD(result
, a
, b
, int16_t, int32_t);
1582 DO_ABD(tmp
, a
>> 16, b
>> 16, int16_t, int32_t);
1583 return result
| (tmp
<< 32);
1586 uint64_t HELPER(neon_abdl_u64
)(uint32_t a
, uint32_t b
)
1589 DO_ABD(result
, a
, b
, uint32_t, uint64_t);
1593 uint64_t HELPER(neon_abdl_s64
)(uint32_t a
, uint32_t b
)
1596 DO_ABD(result
, a
, b
, int32_t, int64_t);
1601 /* Widening multiply. Named type is the source type. */
1602 #define DO_MULL(dest, x, y, type1, type2) do { \
1605 dest = (type2)((type2)tmp_x * (type2)tmp_y); \
1608 uint64_t HELPER(neon_mull_u8
)(uint32_t a
, uint32_t b
)
1613 DO_MULL(result
, a
, b
, uint8_t, uint16_t);
1614 DO_MULL(tmp
, a
>> 8, b
>> 8, uint8_t, uint16_t);
1615 result
|= tmp
<< 16;
1616 DO_MULL(tmp
, a
>> 16, b
>> 16, uint8_t, uint16_t);
1617 result
|= tmp
<< 32;
1618 DO_MULL(tmp
, a
>> 24, b
>> 24, uint8_t, uint16_t);
1619 result
|= tmp
<< 48;
1623 uint64_t HELPER(neon_mull_s8
)(uint32_t a
, uint32_t b
)
1628 DO_MULL(result
, a
, b
, int8_t, uint16_t);
1629 DO_MULL(tmp
, a
>> 8, b
>> 8, int8_t, uint16_t);
1630 result
|= tmp
<< 16;
1631 DO_MULL(tmp
, a
>> 16, b
>> 16, int8_t, uint16_t);
1632 result
|= tmp
<< 32;
1633 DO_MULL(tmp
, a
>> 24, b
>> 24, int8_t, uint16_t);
1634 result
|= tmp
<< 48;
1638 uint64_t HELPER(neon_mull_u16
)(uint32_t a
, uint32_t b
)
1643 DO_MULL(result
, a
, b
, uint16_t, uint32_t);
1644 DO_MULL(tmp
, a
>> 16, b
>> 16, uint16_t, uint32_t);
1645 return result
| (tmp
<< 32);
1648 uint64_t HELPER(neon_mull_s16
)(uint32_t a
, uint32_t b
)
1653 DO_MULL(result
, a
, b
, int16_t, uint32_t);
1654 DO_MULL(tmp
, a
>> 16, b
>> 16, int16_t, uint32_t);
1655 return result
| (tmp
<< 32);
1658 uint64_t HELPER(neon_negl_u16
)(uint64_t x
)
1662 result
= (uint16_t)-x
;
1664 result
|= (uint64_t)tmp
<< 16;
1666 result
|= (uint64_t)tmp
<< 32;
1668 result
|= (uint64_t)tmp
<< 48;
1672 uint64_t HELPER(neon_negl_u32
)(uint64_t x
)
1675 uint32_t high
= -(x
>> 32);
1676 return low
| ((uint64_t)high
<< 32);
1679 /* Saturating sign manipulation. */
1680 /* ??? Make these use NEON_VOP1 */
1681 #define DO_QABS8(x) do { \
1682 if (x == (int8_t)0x80) { \
1685 } else if (x < 0) { \
1688 uint32_t HELPER(neon_qabs_s8
)(CPUARMState
*env
, uint32_t x
)
1691 NEON_UNPACK(neon_s8
, vec
, x
);
1696 NEON_PACK(neon_s8
, x
, vec
);
1701 #define DO_QNEG8(x) do { \
1702 if (x == (int8_t)0x80) { \
1708 uint32_t HELPER(neon_qneg_s8
)(CPUARMState
*env
, uint32_t x
)
1711 NEON_UNPACK(neon_s8
, vec
, x
);
1716 NEON_PACK(neon_s8
, x
, vec
);
1721 #define DO_QABS16(x) do { \
1722 if (x == (int16_t)0x8000) { \
1725 } else if (x < 0) { \
1728 uint32_t HELPER(neon_qabs_s16
)(CPUARMState
*env
, uint32_t x
)
1731 NEON_UNPACK(neon_s16
, vec
, x
);
1734 NEON_PACK(neon_s16
, x
, vec
);
1739 #define DO_QNEG16(x) do { \
1740 if (x == (int16_t)0x8000) { \
1746 uint32_t HELPER(neon_qneg_s16
)(CPUARMState
*env
, uint32_t x
)
1749 NEON_UNPACK(neon_s16
, vec
, x
);
1752 NEON_PACK(neon_s16
, x
, vec
);
1757 uint32_t HELPER(neon_qabs_s32
)(CPUARMState
*env
, uint32_t x
)
1762 } else if ((int32_t)x
< 0) {
1768 uint32_t HELPER(neon_qneg_s32
)(CPUARMState
*env
, uint32_t x
)
1779 /* NEON Float helpers. */
1780 uint32_t HELPER(neon_abd_f32
)(uint32_t a
, uint32_t b
, void *fpstp
)
1782 float_status
*fpst
= fpstp
;
1783 float32 f0
= make_float32(a
);
1784 float32 f1
= make_float32(b
);
1785 return float32_val(float32_abs(float32_sub(f0
, f1
, fpst
)));
1788 /* Floating point comparisons produce an integer result.
1789 * Note that EQ doesn't signal InvalidOp for QNaNs but GE and GT do.
1790 * Softfloat routines return 0/1, which we convert to the 0/-1 Neon requires.
1792 uint32_t HELPER(neon_ceq_f32
)(uint32_t a
, uint32_t b
, void *fpstp
)
1794 float_status
*fpst
= fpstp
;
1795 return -float32_eq_quiet(make_float32(a
), make_float32(b
), fpst
);
1798 uint32_t HELPER(neon_cge_f32
)(uint32_t a
, uint32_t b
, void *fpstp
)
1800 float_status
*fpst
= fpstp
;
1801 return -float32_le(make_float32(b
), make_float32(a
), fpst
);
1804 uint32_t HELPER(neon_cgt_f32
)(uint32_t a
, uint32_t b
, void *fpstp
)
1806 float_status
*fpst
= fpstp
;
1807 return -float32_lt(make_float32(b
), make_float32(a
), fpst
);
1810 uint32_t HELPER(neon_acge_f32
)(uint32_t a
, uint32_t b
, void *fpstp
)
1812 float_status
*fpst
= fpstp
;
1813 float32 f0
= float32_abs(make_float32(a
));
1814 float32 f1
= float32_abs(make_float32(b
));
1815 return -float32_le(f1
, f0
, fpst
);
1818 uint32_t HELPER(neon_acgt_f32
)(uint32_t a
, uint32_t b
, void *fpstp
)
1820 float_status
*fpst
= fpstp
;
1821 float32 f0
= float32_abs(make_float32(a
));
1822 float32 f1
= float32_abs(make_float32(b
));
1823 return -float32_lt(f1
, f0
, fpst
);
1826 uint64_t HELPER(neon_acge_f64
)(uint64_t a
, uint64_t b
, void *fpstp
)
1828 float_status
*fpst
= fpstp
;
1829 float64 f0
= float64_abs(make_float64(a
));
1830 float64 f1
= float64_abs(make_float64(b
));
1831 return -float64_le(f1
, f0
, fpst
);
1834 uint64_t HELPER(neon_acgt_f64
)(uint64_t a
, uint64_t b
, void *fpstp
)
1836 float_status
*fpst
= fpstp
;
1837 float64 f0
= float64_abs(make_float64(a
));
1838 float64 f1
= float64_abs(make_float64(b
));
1839 return -float64_lt(f1
, f0
, fpst
);
1842 #define ELEM(V, N, SIZE) (((V) >> ((N) * (SIZE))) & ((1ull << (SIZE)) - 1))
1844 void HELPER(neon_qunzip8
)(CPUARMState
*env
, uint32_t rd
, uint32_t rm
)
1846 uint64_t zm0
= float64_val(env
->vfp
.regs
[rm
]);
1847 uint64_t zm1
= float64_val(env
->vfp
.regs
[rm
+ 1]);
1848 uint64_t zd0
= float64_val(env
->vfp
.regs
[rd
]);
1849 uint64_t zd1
= float64_val(env
->vfp
.regs
[rd
+ 1]);
1850 uint64_t d0
= ELEM(zd0
, 0, 8) | (ELEM(zd0
, 2, 8) << 8)
1851 | (ELEM(zd0
, 4, 8) << 16) | (ELEM(zd0
, 6, 8) << 24)
1852 | (ELEM(zd1
, 0, 8) << 32) | (ELEM(zd1
, 2, 8) << 40)
1853 | (ELEM(zd1
, 4, 8) << 48) | (ELEM(zd1
, 6, 8) << 56);
1854 uint64_t d1
= ELEM(zm0
, 0, 8) | (ELEM(zm0
, 2, 8) << 8)
1855 | (ELEM(zm0
, 4, 8) << 16) | (ELEM(zm0
, 6, 8) << 24)
1856 | (ELEM(zm1
, 0, 8) << 32) | (ELEM(zm1
, 2, 8) << 40)
1857 | (ELEM(zm1
, 4, 8) << 48) | (ELEM(zm1
, 6, 8) << 56);
1858 uint64_t m0
= ELEM(zd0
, 1, 8) | (ELEM(zd0
, 3, 8) << 8)
1859 | (ELEM(zd0
, 5, 8) << 16) | (ELEM(zd0
, 7, 8) << 24)
1860 | (ELEM(zd1
, 1, 8) << 32) | (ELEM(zd1
, 3, 8) << 40)
1861 | (ELEM(zd1
, 5, 8) << 48) | (ELEM(zd1
, 7, 8) << 56);
1862 uint64_t m1
= ELEM(zm0
, 1, 8) | (ELEM(zm0
, 3, 8) << 8)
1863 | (ELEM(zm0
, 5, 8) << 16) | (ELEM(zm0
, 7, 8) << 24)
1864 | (ELEM(zm1
, 1, 8) << 32) | (ELEM(zm1
, 3, 8) << 40)
1865 | (ELEM(zm1
, 5, 8) << 48) | (ELEM(zm1
, 7, 8) << 56);
1866 env
->vfp
.regs
[rm
] = make_float64(m0
);
1867 env
->vfp
.regs
[rm
+ 1] = make_float64(m1
);
1868 env
->vfp
.regs
[rd
] = make_float64(d0
);
1869 env
->vfp
.regs
[rd
+ 1] = make_float64(d1
);
1872 void HELPER(neon_qunzip16
)(CPUARMState
*env
, uint32_t rd
, uint32_t rm
)
1874 uint64_t zm0
= float64_val(env
->vfp
.regs
[rm
]);
1875 uint64_t zm1
= float64_val(env
->vfp
.regs
[rm
+ 1]);
1876 uint64_t zd0
= float64_val(env
->vfp
.regs
[rd
]);
1877 uint64_t zd1
= float64_val(env
->vfp
.regs
[rd
+ 1]);
1878 uint64_t d0
= ELEM(zd0
, 0, 16) | (ELEM(zd0
, 2, 16) << 16)
1879 | (ELEM(zd1
, 0, 16) << 32) | (ELEM(zd1
, 2, 16) << 48);
1880 uint64_t d1
= ELEM(zm0
, 0, 16) | (ELEM(zm0
, 2, 16) << 16)
1881 | (ELEM(zm1
, 0, 16) << 32) | (ELEM(zm1
, 2, 16) << 48);
1882 uint64_t m0
= ELEM(zd0
, 1, 16) | (ELEM(zd0
, 3, 16) << 16)
1883 | (ELEM(zd1
, 1, 16) << 32) | (ELEM(zd1
, 3, 16) << 48);
1884 uint64_t m1
= ELEM(zm0
, 1, 16) | (ELEM(zm0
, 3, 16) << 16)
1885 | (ELEM(zm1
, 1, 16) << 32) | (ELEM(zm1
, 3, 16) << 48);
1886 env
->vfp
.regs
[rm
] = make_float64(m0
);
1887 env
->vfp
.regs
[rm
+ 1] = make_float64(m1
);
1888 env
->vfp
.regs
[rd
] = make_float64(d0
);
1889 env
->vfp
.regs
[rd
+ 1] = make_float64(d1
);
1892 void HELPER(neon_qunzip32
)(CPUARMState
*env
, uint32_t rd
, uint32_t rm
)
1894 uint64_t zm0
= float64_val(env
->vfp
.regs
[rm
]);
1895 uint64_t zm1
= float64_val(env
->vfp
.regs
[rm
+ 1]);
1896 uint64_t zd0
= float64_val(env
->vfp
.regs
[rd
]);
1897 uint64_t zd1
= float64_val(env
->vfp
.regs
[rd
+ 1]);
1898 uint64_t d0
= ELEM(zd0
, 0, 32) | (ELEM(zd1
, 0, 32) << 32);
1899 uint64_t d1
= ELEM(zm0
, 0, 32) | (ELEM(zm1
, 0, 32) << 32);
1900 uint64_t m0
= ELEM(zd0
, 1, 32) | (ELEM(zd1
, 1, 32) << 32);
1901 uint64_t m1
= ELEM(zm0
, 1, 32) | (ELEM(zm1
, 1, 32) << 32);
1902 env
->vfp
.regs
[rm
] = make_float64(m0
);
1903 env
->vfp
.regs
[rm
+ 1] = make_float64(m1
);
1904 env
->vfp
.regs
[rd
] = make_float64(d0
);
1905 env
->vfp
.regs
[rd
+ 1] = make_float64(d1
);
1908 void HELPER(neon_unzip8
)(CPUARMState
*env
, uint32_t rd
, uint32_t rm
)
1910 uint64_t zm
= float64_val(env
->vfp
.regs
[rm
]);
1911 uint64_t zd
= float64_val(env
->vfp
.regs
[rd
]);
1912 uint64_t d0
= ELEM(zd
, 0, 8) | (ELEM(zd
, 2, 8) << 8)
1913 | (ELEM(zd
, 4, 8) << 16) | (ELEM(zd
, 6, 8) << 24)
1914 | (ELEM(zm
, 0, 8) << 32) | (ELEM(zm
, 2, 8) << 40)
1915 | (ELEM(zm
, 4, 8) << 48) | (ELEM(zm
, 6, 8) << 56);
1916 uint64_t m0
= ELEM(zd
, 1, 8) | (ELEM(zd
, 3, 8) << 8)
1917 | (ELEM(zd
, 5, 8) << 16) | (ELEM(zd
, 7, 8) << 24)
1918 | (ELEM(zm
, 1, 8) << 32) | (ELEM(zm
, 3, 8) << 40)
1919 | (ELEM(zm
, 5, 8) << 48) | (ELEM(zm
, 7, 8) << 56);
1920 env
->vfp
.regs
[rm
] = make_float64(m0
);
1921 env
->vfp
.regs
[rd
] = make_float64(d0
);
1924 void HELPER(neon_unzip16
)(CPUARMState
*env
, uint32_t rd
, uint32_t rm
)
1926 uint64_t zm
= float64_val(env
->vfp
.regs
[rm
]);
1927 uint64_t zd
= float64_val(env
->vfp
.regs
[rd
]);
1928 uint64_t d0
= ELEM(zd
, 0, 16) | (ELEM(zd
, 2, 16) << 16)
1929 | (ELEM(zm
, 0, 16) << 32) | (ELEM(zm
, 2, 16) << 48);
1930 uint64_t m0
= ELEM(zd
, 1, 16) | (ELEM(zd
, 3, 16) << 16)
1931 | (ELEM(zm
, 1, 16) << 32) | (ELEM(zm
, 3, 16) << 48);
1932 env
->vfp
.regs
[rm
] = make_float64(m0
);
1933 env
->vfp
.regs
[rd
] = make_float64(d0
);
1936 void HELPER(neon_qzip8
)(CPUARMState
*env
, uint32_t rd
, uint32_t rm
)
1938 uint64_t zm0
= float64_val(env
->vfp
.regs
[rm
]);
1939 uint64_t zm1
= float64_val(env
->vfp
.regs
[rm
+ 1]);
1940 uint64_t zd0
= float64_val(env
->vfp
.regs
[rd
]);
1941 uint64_t zd1
= float64_val(env
->vfp
.regs
[rd
+ 1]);
1942 uint64_t d0
= ELEM(zd0
, 0, 8) | (ELEM(zm0
, 0, 8) << 8)
1943 | (ELEM(zd0
, 1, 8) << 16) | (ELEM(zm0
, 1, 8) << 24)
1944 | (ELEM(zd0
, 2, 8) << 32) | (ELEM(zm0
, 2, 8) << 40)
1945 | (ELEM(zd0
, 3, 8) << 48) | (ELEM(zm0
, 3, 8) << 56);
1946 uint64_t d1
= ELEM(zd0
, 4, 8) | (ELEM(zm0
, 4, 8) << 8)
1947 | (ELEM(zd0
, 5, 8) << 16) | (ELEM(zm0
, 5, 8) << 24)
1948 | (ELEM(zd0
, 6, 8) << 32) | (ELEM(zm0
, 6, 8) << 40)
1949 | (ELEM(zd0
, 7, 8) << 48) | (ELEM(zm0
, 7, 8) << 56);
1950 uint64_t m0
= ELEM(zd1
, 0, 8) | (ELEM(zm1
, 0, 8) << 8)
1951 | (ELEM(zd1
, 1, 8) << 16) | (ELEM(zm1
, 1, 8) << 24)
1952 | (ELEM(zd1
, 2, 8) << 32) | (ELEM(zm1
, 2, 8) << 40)
1953 | (ELEM(zd1
, 3, 8) << 48) | (ELEM(zm1
, 3, 8) << 56);
1954 uint64_t m1
= ELEM(zd1
, 4, 8) | (ELEM(zm1
, 4, 8) << 8)
1955 | (ELEM(zd1
, 5, 8) << 16) | (ELEM(zm1
, 5, 8) << 24)
1956 | (ELEM(zd1
, 6, 8) << 32) | (ELEM(zm1
, 6, 8) << 40)
1957 | (ELEM(zd1
, 7, 8) << 48) | (ELEM(zm1
, 7, 8) << 56);
1958 env
->vfp
.regs
[rm
] = make_float64(m0
);
1959 env
->vfp
.regs
[rm
+ 1] = make_float64(m1
);
1960 env
->vfp
.regs
[rd
] = make_float64(d0
);
1961 env
->vfp
.regs
[rd
+ 1] = make_float64(d1
);
1964 void HELPER(neon_qzip16
)(CPUARMState
*env
, uint32_t rd
, uint32_t rm
)
1966 uint64_t zm0
= float64_val(env
->vfp
.regs
[rm
]);
1967 uint64_t zm1
= float64_val(env
->vfp
.regs
[rm
+ 1]);
1968 uint64_t zd0
= float64_val(env
->vfp
.regs
[rd
]);
1969 uint64_t zd1
= float64_val(env
->vfp
.regs
[rd
+ 1]);
1970 uint64_t d0
= ELEM(zd0
, 0, 16) | (ELEM(zm0
, 0, 16) << 16)
1971 | (ELEM(zd0
, 1, 16) << 32) | (ELEM(zm0
, 1, 16) << 48);
1972 uint64_t d1
= ELEM(zd0
, 2, 16) | (ELEM(zm0
, 2, 16) << 16)
1973 | (ELEM(zd0
, 3, 16) << 32) | (ELEM(zm0
, 3, 16) << 48);
1974 uint64_t m0
= ELEM(zd1
, 0, 16) | (ELEM(zm1
, 0, 16) << 16)
1975 | (ELEM(zd1
, 1, 16) << 32) | (ELEM(zm1
, 1, 16) << 48);
1976 uint64_t m1
= ELEM(zd1
, 2, 16) | (ELEM(zm1
, 2, 16) << 16)
1977 | (ELEM(zd1
, 3, 16) << 32) | (ELEM(zm1
, 3, 16) << 48);
1978 env
->vfp
.regs
[rm
] = make_float64(m0
);
1979 env
->vfp
.regs
[rm
+ 1] = make_float64(m1
);
1980 env
->vfp
.regs
[rd
] = make_float64(d0
);
1981 env
->vfp
.regs
[rd
+ 1] = make_float64(d1
);
1984 void HELPER(neon_qzip32
)(CPUARMState
*env
, uint32_t rd
, uint32_t rm
)
1986 uint64_t zm0
= float64_val(env
->vfp
.regs
[rm
]);
1987 uint64_t zm1
= float64_val(env
->vfp
.regs
[rm
+ 1]);
1988 uint64_t zd0
= float64_val(env
->vfp
.regs
[rd
]);
1989 uint64_t zd1
= float64_val(env
->vfp
.regs
[rd
+ 1]);
1990 uint64_t d0
= ELEM(zd0
, 0, 32) | (ELEM(zm0
, 0, 32) << 32);
1991 uint64_t d1
= ELEM(zd0
, 1, 32) | (ELEM(zm0
, 1, 32) << 32);
1992 uint64_t m0
= ELEM(zd1
, 0, 32) | (ELEM(zm1
, 0, 32) << 32);
1993 uint64_t m1
= ELEM(zd1
, 1, 32) | (ELEM(zm1
, 1, 32) << 32);
1994 env
->vfp
.regs
[rm
] = make_float64(m0
);
1995 env
->vfp
.regs
[rm
+ 1] = make_float64(m1
);
1996 env
->vfp
.regs
[rd
] = make_float64(d0
);
1997 env
->vfp
.regs
[rd
+ 1] = make_float64(d1
);
2000 void HELPER(neon_zip8
)(CPUARMState
*env
, uint32_t rd
, uint32_t rm
)
2002 uint64_t zm
= float64_val(env
->vfp
.regs
[rm
]);
2003 uint64_t zd
= float64_val(env
->vfp
.regs
[rd
]);
2004 uint64_t d0
= ELEM(zd
, 0, 8) | (ELEM(zm
, 0, 8) << 8)
2005 | (ELEM(zd
, 1, 8) << 16) | (ELEM(zm
, 1, 8) << 24)
2006 | (ELEM(zd
, 2, 8) << 32) | (ELEM(zm
, 2, 8) << 40)
2007 | (ELEM(zd
, 3, 8) << 48) | (ELEM(zm
, 3, 8) << 56);
2008 uint64_t m0
= ELEM(zd
, 4, 8) | (ELEM(zm
, 4, 8) << 8)
2009 | (ELEM(zd
, 5, 8) << 16) | (ELEM(zm
, 5, 8) << 24)
2010 | (ELEM(zd
, 6, 8) << 32) | (ELEM(zm
, 6, 8) << 40)
2011 | (ELEM(zd
, 7, 8) << 48) | (ELEM(zm
, 7, 8) << 56);
2012 env
->vfp
.regs
[rm
] = make_float64(m0
);
2013 env
->vfp
.regs
[rd
] = make_float64(d0
);
2016 void HELPER(neon_zip16
)(CPUARMState
*env
, uint32_t rd
, uint32_t rm
)
2018 uint64_t zm
= float64_val(env
->vfp
.regs
[rm
]);
2019 uint64_t zd
= float64_val(env
->vfp
.regs
[rd
]);
2020 uint64_t d0
= ELEM(zd
, 0, 16) | (ELEM(zm
, 0, 16) << 16)
2021 | (ELEM(zd
, 1, 16) << 32) | (ELEM(zm
, 1, 16) << 48);
2022 uint64_t m0
= ELEM(zd
, 2, 16) | (ELEM(zm
, 2, 16) << 16)
2023 | (ELEM(zd
, 3, 16) << 32) | (ELEM(zm
, 3, 16) << 48);
2024 env
->vfp
.regs
[rm
] = make_float64(m0
);
2025 env
->vfp
.regs
[rd
] = make_float64(d0
);