spapr_drc: don't allow 'empty' DRCs to be unisolated or allocated
[qemu/ar7.git] / target-arm / neon_helper.c
blob47d13e908c12f3334e8ede90691e3d9efa4f7c08
1 /*
2 * ARM NEON vector operations.
4 * Copyright (c) 2007, 2008 CodeSourcery.
5 * Written by Paul Brook
7 * This code is licensed under the GNU GPL v2.
8 */
9 #include <stdlib.h>
10 #include <stdio.h>
12 #include "cpu.h"
13 #include "exec/exec-all.h"
14 #include "exec/helper-proto.h"
16 #define SIGNBIT (uint32_t)0x80000000
17 #define SIGNBIT64 ((uint64_t)1 << 63)
19 #define SET_QC() env->vfp.xregs[ARM_VFP_FPSCR] |= CPSR_Q
21 #define NEON_TYPE1(name, type) \
22 typedef struct \
23 { \
24 type v1; \
25 } neon_##name;
26 #ifdef HOST_WORDS_BIGENDIAN
27 #define NEON_TYPE2(name, type) \
28 typedef struct \
29 { \
30 type v2; \
31 type v1; \
32 } neon_##name;
33 #define NEON_TYPE4(name, type) \
34 typedef struct \
35 { \
36 type v4; \
37 type v3; \
38 type v2; \
39 type v1; \
40 } neon_##name;
41 #else
42 #define NEON_TYPE2(name, type) \
43 typedef struct \
44 { \
45 type v1; \
46 type v2; \
47 } neon_##name;
48 #define NEON_TYPE4(name, type) \
49 typedef struct \
50 { \
51 type v1; \
52 type v2; \
53 type v3; \
54 type v4; \
55 } neon_##name;
56 #endif
58 NEON_TYPE4(s8, int8_t)
59 NEON_TYPE4(u8, uint8_t)
60 NEON_TYPE2(s16, int16_t)
61 NEON_TYPE2(u16, uint16_t)
62 NEON_TYPE1(s32, int32_t)
63 NEON_TYPE1(u32, uint32_t)
64 #undef NEON_TYPE4
65 #undef NEON_TYPE2
66 #undef NEON_TYPE1
68 /* Copy from a uint32_t to a vector structure type. */
69 #define NEON_UNPACK(vtype, dest, val) do { \
70 union { \
71 vtype v; \
72 uint32_t i; \
73 } conv_u; \
74 conv_u.i = (val); \
75 dest = conv_u.v; \
76 } while(0)
78 /* Copy from a vector structure type to a uint32_t. */
79 #define NEON_PACK(vtype, dest, val) do { \
80 union { \
81 vtype v; \
82 uint32_t i; \
83 } conv_u; \
84 conv_u.v = (val); \
85 dest = conv_u.i; \
86 } while(0)
88 #define NEON_DO1 \
89 NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1);
90 #define NEON_DO2 \
91 NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1); \
92 NEON_FN(vdest.v2, vsrc1.v2, vsrc2.v2);
93 #define NEON_DO4 \
94 NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1); \
95 NEON_FN(vdest.v2, vsrc1.v2, vsrc2.v2); \
96 NEON_FN(vdest.v3, vsrc1.v3, vsrc2.v3); \
97 NEON_FN(vdest.v4, vsrc1.v4, vsrc2.v4);
99 #define NEON_VOP_BODY(vtype, n) \
101 uint32_t res; \
102 vtype vsrc1; \
103 vtype vsrc2; \
104 vtype vdest; \
105 NEON_UNPACK(vtype, vsrc1, arg1); \
106 NEON_UNPACK(vtype, vsrc2, arg2); \
107 NEON_DO##n; \
108 NEON_PACK(vtype, res, vdest); \
109 return res; \
112 #define NEON_VOP(name, vtype, n) \
113 uint32_t HELPER(glue(neon_,name))(uint32_t arg1, uint32_t arg2) \
114 NEON_VOP_BODY(vtype, n)
116 #define NEON_VOP_ENV(name, vtype, n) \
117 uint32_t HELPER(glue(neon_,name))(CPUARMState *env, uint32_t arg1, uint32_t arg2) \
118 NEON_VOP_BODY(vtype, n)
120 /* Pairwise operations. */
121 /* For 32-bit elements each segment only contains a single element, so
122 the elementwise and pairwise operations are the same. */
123 #define NEON_PDO2 \
124 NEON_FN(vdest.v1, vsrc1.v1, vsrc1.v2); \
125 NEON_FN(vdest.v2, vsrc2.v1, vsrc2.v2);
126 #define NEON_PDO4 \
127 NEON_FN(vdest.v1, vsrc1.v1, vsrc1.v2); \
128 NEON_FN(vdest.v2, vsrc1.v3, vsrc1.v4); \
129 NEON_FN(vdest.v3, vsrc2.v1, vsrc2.v2); \
130 NEON_FN(vdest.v4, vsrc2.v3, vsrc2.v4); \
132 #define NEON_POP(name, vtype, n) \
133 uint32_t HELPER(glue(neon_,name))(uint32_t arg1, uint32_t arg2) \
135 uint32_t res; \
136 vtype vsrc1; \
137 vtype vsrc2; \
138 vtype vdest; \
139 NEON_UNPACK(vtype, vsrc1, arg1); \
140 NEON_UNPACK(vtype, vsrc2, arg2); \
141 NEON_PDO##n; \
142 NEON_PACK(vtype, res, vdest); \
143 return res; \
146 /* Unary operators. */
147 #define NEON_VOP1(name, vtype, n) \
148 uint32_t HELPER(glue(neon_,name))(uint32_t arg) \
150 vtype vsrc1; \
151 vtype vdest; \
152 NEON_UNPACK(vtype, vsrc1, arg); \
153 NEON_DO##n; \
154 NEON_PACK(vtype, arg, vdest); \
155 return arg; \
159 #define NEON_USAT(dest, src1, src2, type) do { \
160 uint32_t tmp = (uint32_t)src1 + (uint32_t)src2; \
161 if (tmp != (type)tmp) { \
162 SET_QC(); \
163 dest = ~0; \
164 } else { \
165 dest = tmp; \
166 }} while(0)
167 #define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint8_t)
168 NEON_VOP_ENV(qadd_u8, neon_u8, 4)
169 #undef NEON_FN
170 #define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint16_t)
171 NEON_VOP_ENV(qadd_u16, neon_u16, 2)
172 #undef NEON_FN
173 #undef NEON_USAT
175 uint32_t HELPER(neon_qadd_u32)(CPUARMState *env, uint32_t a, uint32_t b)
177 uint32_t res = a + b;
178 if (res < a) {
179 SET_QC();
180 res = ~0;
182 return res;
185 uint64_t HELPER(neon_qadd_u64)(CPUARMState *env, uint64_t src1, uint64_t src2)
187 uint64_t res;
189 res = src1 + src2;
190 if (res < src1) {
191 SET_QC();
192 res = ~(uint64_t)0;
194 return res;
197 #define NEON_SSAT(dest, src1, src2, type) do { \
198 int32_t tmp = (uint32_t)src1 + (uint32_t)src2; \
199 if (tmp != (type)tmp) { \
200 SET_QC(); \
201 if (src2 > 0) { \
202 tmp = (1 << (sizeof(type) * 8 - 1)) - 1; \
203 } else { \
204 tmp = 1 << (sizeof(type) * 8 - 1); \
207 dest = tmp; \
208 } while(0)
209 #define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int8_t)
210 NEON_VOP_ENV(qadd_s8, neon_s8, 4)
211 #undef NEON_FN
212 #define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int16_t)
213 NEON_VOP_ENV(qadd_s16, neon_s16, 2)
214 #undef NEON_FN
215 #undef NEON_SSAT
217 uint32_t HELPER(neon_qadd_s32)(CPUARMState *env, uint32_t a, uint32_t b)
219 uint32_t res = a + b;
220 if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
221 SET_QC();
222 res = ~(((int32_t)a >> 31) ^ SIGNBIT);
224 return res;
227 uint64_t HELPER(neon_qadd_s64)(CPUARMState *env, uint64_t src1, uint64_t src2)
229 uint64_t res;
231 res = src1 + src2;
232 if (((res ^ src1) & SIGNBIT64) && !((src1 ^ src2) & SIGNBIT64)) {
233 SET_QC();
234 res = ((int64_t)src1 >> 63) ^ ~SIGNBIT64;
236 return res;
239 /* Unsigned saturating accumulate of signed value
241 * Op1/Rn is treated as signed
242 * Op2/Rd is treated as unsigned
244 * Explicit casting is used to ensure the correct sign extension of
245 * inputs. The result is treated as a unsigned value and saturated as such.
247 * We use a macro for the 8/16 bit cases which expects signed integers of va,
248 * vb, and vr for interim calculation and an unsigned 32 bit result value r.
251 #define USATACC(bits, shift) \
252 do { \
253 va = sextract32(a, shift, bits); \
254 vb = extract32(b, shift, bits); \
255 vr = va + vb; \
256 if (vr > UINT##bits##_MAX) { \
257 SET_QC(); \
258 vr = UINT##bits##_MAX; \
259 } else if (vr < 0) { \
260 SET_QC(); \
261 vr = 0; \
263 r = deposit32(r, shift, bits, vr); \
264 } while (0)
266 uint32_t HELPER(neon_uqadd_s8)(CPUARMState *env, uint32_t a, uint32_t b)
268 int16_t va, vb, vr;
269 uint32_t r = 0;
271 USATACC(8, 0);
272 USATACC(8, 8);
273 USATACC(8, 16);
274 USATACC(8, 24);
275 return r;
278 uint32_t HELPER(neon_uqadd_s16)(CPUARMState *env, uint32_t a, uint32_t b)
280 int32_t va, vb, vr;
281 uint64_t r = 0;
283 USATACC(16, 0);
284 USATACC(16, 16);
285 return r;
288 #undef USATACC
290 uint32_t HELPER(neon_uqadd_s32)(CPUARMState *env, uint32_t a, uint32_t b)
292 int64_t va = (int32_t)a;
293 int64_t vb = (uint32_t)b;
294 int64_t vr = va + vb;
295 if (vr > UINT32_MAX) {
296 SET_QC();
297 vr = UINT32_MAX;
298 } else if (vr < 0) {
299 SET_QC();
300 vr = 0;
302 return vr;
305 uint64_t HELPER(neon_uqadd_s64)(CPUARMState *env, uint64_t a, uint64_t b)
307 uint64_t res;
308 res = a + b;
309 /* We only need to look at the pattern of SIGN bits to detect
310 * +ve/-ve saturation
312 if (~a & b & ~res & SIGNBIT64) {
313 SET_QC();
314 res = UINT64_MAX;
315 } else if (a & ~b & res & SIGNBIT64) {
316 SET_QC();
317 res = 0;
319 return res;
322 /* Signed saturating accumulate of unsigned value
324 * Op1/Rn is treated as unsigned
325 * Op2/Rd is treated as signed
327 * The result is treated as a signed value and saturated as such
329 * We use a macro for the 8/16 bit cases which expects signed integers of va,
330 * vb, and vr for interim calculation and an unsigned 32 bit result value r.
333 #define SSATACC(bits, shift) \
334 do { \
335 va = extract32(a, shift, bits); \
336 vb = sextract32(b, shift, bits); \
337 vr = va + vb; \
338 if (vr > INT##bits##_MAX) { \
339 SET_QC(); \
340 vr = INT##bits##_MAX; \
341 } else if (vr < INT##bits##_MIN) { \
342 SET_QC(); \
343 vr = INT##bits##_MIN; \
345 r = deposit32(r, shift, bits, vr); \
346 } while (0)
348 uint32_t HELPER(neon_sqadd_u8)(CPUARMState *env, uint32_t a, uint32_t b)
350 int16_t va, vb, vr;
351 uint32_t r = 0;
353 SSATACC(8, 0);
354 SSATACC(8, 8);
355 SSATACC(8, 16);
356 SSATACC(8, 24);
357 return r;
360 uint32_t HELPER(neon_sqadd_u16)(CPUARMState *env, uint32_t a, uint32_t b)
362 int32_t va, vb, vr;
363 uint32_t r = 0;
365 SSATACC(16, 0);
366 SSATACC(16, 16);
368 return r;
371 #undef SSATACC
373 uint32_t HELPER(neon_sqadd_u32)(CPUARMState *env, uint32_t a, uint32_t b)
375 int64_t res;
376 int64_t op1 = (uint32_t)a;
377 int64_t op2 = (int32_t)b;
378 res = op1 + op2;
379 if (res > INT32_MAX) {
380 SET_QC();
381 res = INT32_MAX;
382 } else if (res < INT32_MIN) {
383 SET_QC();
384 res = INT32_MIN;
386 return res;
389 uint64_t HELPER(neon_sqadd_u64)(CPUARMState *env, uint64_t a, uint64_t b)
391 uint64_t res;
392 res = a + b;
393 /* We only need to look at the pattern of SIGN bits to detect an overflow */
394 if (((a & res)
395 | (~b & res)
396 | (a & ~b)) & SIGNBIT64) {
397 SET_QC();
398 res = INT64_MAX;
400 return res;
404 #define NEON_USAT(dest, src1, src2, type) do { \
405 uint32_t tmp = (uint32_t)src1 - (uint32_t)src2; \
406 if (tmp != (type)tmp) { \
407 SET_QC(); \
408 dest = 0; \
409 } else { \
410 dest = tmp; \
411 }} while(0)
412 #define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint8_t)
413 NEON_VOP_ENV(qsub_u8, neon_u8, 4)
414 #undef NEON_FN
415 #define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint16_t)
416 NEON_VOP_ENV(qsub_u16, neon_u16, 2)
417 #undef NEON_FN
418 #undef NEON_USAT
420 uint32_t HELPER(neon_qsub_u32)(CPUARMState *env, uint32_t a, uint32_t b)
422 uint32_t res = a - b;
423 if (res > a) {
424 SET_QC();
425 res = 0;
427 return res;
430 uint64_t HELPER(neon_qsub_u64)(CPUARMState *env, uint64_t src1, uint64_t src2)
432 uint64_t res;
434 if (src1 < src2) {
435 SET_QC();
436 res = 0;
437 } else {
438 res = src1 - src2;
440 return res;
443 #define NEON_SSAT(dest, src1, src2, type) do { \
444 int32_t tmp = (uint32_t)src1 - (uint32_t)src2; \
445 if (tmp != (type)tmp) { \
446 SET_QC(); \
447 if (src2 < 0) { \
448 tmp = (1 << (sizeof(type) * 8 - 1)) - 1; \
449 } else { \
450 tmp = 1 << (sizeof(type) * 8 - 1); \
453 dest = tmp; \
454 } while(0)
455 #define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int8_t)
456 NEON_VOP_ENV(qsub_s8, neon_s8, 4)
457 #undef NEON_FN
458 #define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int16_t)
459 NEON_VOP_ENV(qsub_s16, neon_s16, 2)
460 #undef NEON_FN
461 #undef NEON_SSAT
463 uint32_t HELPER(neon_qsub_s32)(CPUARMState *env, uint32_t a, uint32_t b)
465 uint32_t res = a - b;
466 if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
467 SET_QC();
468 res = ~(((int32_t)a >> 31) ^ SIGNBIT);
470 return res;
473 uint64_t HELPER(neon_qsub_s64)(CPUARMState *env, uint64_t src1, uint64_t src2)
475 uint64_t res;
477 res = src1 - src2;
478 if (((res ^ src1) & SIGNBIT64) && ((src1 ^ src2) & SIGNBIT64)) {
479 SET_QC();
480 res = ((int64_t)src1 >> 63) ^ ~SIGNBIT64;
482 return res;
485 #define NEON_FN(dest, src1, src2) dest = (src1 + src2) >> 1
486 NEON_VOP(hadd_s8, neon_s8, 4)
487 NEON_VOP(hadd_u8, neon_u8, 4)
488 NEON_VOP(hadd_s16, neon_s16, 2)
489 NEON_VOP(hadd_u16, neon_u16, 2)
490 #undef NEON_FN
492 int32_t HELPER(neon_hadd_s32)(int32_t src1, int32_t src2)
494 int32_t dest;
496 dest = (src1 >> 1) + (src2 >> 1);
497 if (src1 & src2 & 1)
498 dest++;
499 return dest;
502 uint32_t HELPER(neon_hadd_u32)(uint32_t src1, uint32_t src2)
504 uint32_t dest;
506 dest = (src1 >> 1) + (src2 >> 1);
507 if (src1 & src2 & 1)
508 dest++;
509 return dest;
512 #define NEON_FN(dest, src1, src2) dest = (src1 + src2 + 1) >> 1
513 NEON_VOP(rhadd_s8, neon_s8, 4)
514 NEON_VOP(rhadd_u8, neon_u8, 4)
515 NEON_VOP(rhadd_s16, neon_s16, 2)
516 NEON_VOP(rhadd_u16, neon_u16, 2)
517 #undef NEON_FN
519 int32_t HELPER(neon_rhadd_s32)(int32_t src1, int32_t src2)
521 int32_t dest;
523 dest = (src1 >> 1) + (src2 >> 1);
524 if ((src1 | src2) & 1)
525 dest++;
526 return dest;
529 uint32_t HELPER(neon_rhadd_u32)(uint32_t src1, uint32_t src2)
531 uint32_t dest;
533 dest = (src1 >> 1) + (src2 >> 1);
534 if ((src1 | src2) & 1)
535 dest++;
536 return dest;
539 #define NEON_FN(dest, src1, src2) dest = (src1 - src2) >> 1
540 NEON_VOP(hsub_s8, neon_s8, 4)
541 NEON_VOP(hsub_u8, neon_u8, 4)
542 NEON_VOP(hsub_s16, neon_s16, 2)
543 NEON_VOP(hsub_u16, neon_u16, 2)
544 #undef NEON_FN
546 int32_t HELPER(neon_hsub_s32)(int32_t src1, int32_t src2)
548 int32_t dest;
550 dest = (src1 >> 1) - (src2 >> 1);
551 if ((~src1) & src2 & 1)
552 dest--;
553 return dest;
556 uint32_t HELPER(neon_hsub_u32)(uint32_t src1, uint32_t src2)
558 uint32_t dest;
560 dest = (src1 >> 1) - (src2 >> 1);
561 if ((~src1) & src2 & 1)
562 dest--;
563 return dest;
566 #define NEON_FN(dest, src1, src2) dest = (src1 > src2) ? ~0 : 0
567 NEON_VOP(cgt_s8, neon_s8, 4)
568 NEON_VOP(cgt_u8, neon_u8, 4)
569 NEON_VOP(cgt_s16, neon_s16, 2)
570 NEON_VOP(cgt_u16, neon_u16, 2)
571 NEON_VOP(cgt_s32, neon_s32, 1)
572 NEON_VOP(cgt_u32, neon_u32, 1)
573 #undef NEON_FN
575 #define NEON_FN(dest, src1, src2) dest = (src1 >= src2) ? ~0 : 0
576 NEON_VOP(cge_s8, neon_s8, 4)
577 NEON_VOP(cge_u8, neon_u8, 4)
578 NEON_VOP(cge_s16, neon_s16, 2)
579 NEON_VOP(cge_u16, neon_u16, 2)
580 NEON_VOP(cge_s32, neon_s32, 1)
581 NEON_VOP(cge_u32, neon_u32, 1)
582 #undef NEON_FN
584 #define NEON_FN(dest, src1, src2) dest = (src1 < src2) ? src1 : src2
585 NEON_VOP(min_s8, neon_s8, 4)
586 NEON_VOP(min_u8, neon_u8, 4)
587 NEON_VOP(min_s16, neon_s16, 2)
588 NEON_VOP(min_u16, neon_u16, 2)
589 NEON_VOP(min_s32, neon_s32, 1)
590 NEON_VOP(min_u32, neon_u32, 1)
591 NEON_POP(pmin_s8, neon_s8, 4)
592 NEON_POP(pmin_u8, neon_u8, 4)
593 NEON_POP(pmin_s16, neon_s16, 2)
594 NEON_POP(pmin_u16, neon_u16, 2)
595 #undef NEON_FN
597 #define NEON_FN(dest, src1, src2) dest = (src1 > src2) ? src1 : src2
598 NEON_VOP(max_s8, neon_s8, 4)
599 NEON_VOP(max_u8, neon_u8, 4)
600 NEON_VOP(max_s16, neon_s16, 2)
601 NEON_VOP(max_u16, neon_u16, 2)
602 NEON_VOP(max_s32, neon_s32, 1)
603 NEON_VOP(max_u32, neon_u32, 1)
604 NEON_POP(pmax_s8, neon_s8, 4)
605 NEON_POP(pmax_u8, neon_u8, 4)
606 NEON_POP(pmax_s16, neon_s16, 2)
607 NEON_POP(pmax_u16, neon_u16, 2)
608 #undef NEON_FN
610 #define NEON_FN(dest, src1, src2) \
611 dest = (src1 > src2) ? (src1 - src2) : (src2 - src1)
612 NEON_VOP(abd_s8, neon_s8, 4)
613 NEON_VOP(abd_u8, neon_u8, 4)
614 NEON_VOP(abd_s16, neon_s16, 2)
615 NEON_VOP(abd_u16, neon_u16, 2)
616 NEON_VOP(abd_s32, neon_s32, 1)
617 NEON_VOP(abd_u32, neon_u32, 1)
618 #undef NEON_FN
620 #define NEON_FN(dest, src1, src2) do { \
621 int8_t tmp; \
622 tmp = (int8_t)src2; \
623 if (tmp >= (ssize_t)sizeof(src1) * 8 || \
624 tmp <= -(ssize_t)sizeof(src1) * 8) { \
625 dest = 0; \
626 } else if (tmp < 0) { \
627 dest = src1 >> -tmp; \
628 } else { \
629 dest = src1 << tmp; \
630 }} while (0)
631 NEON_VOP(shl_u8, neon_u8, 4)
632 NEON_VOP(shl_u16, neon_u16, 2)
633 NEON_VOP(shl_u32, neon_u32, 1)
634 #undef NEON_FN
636 uint64_t HELPER(neon_shl_u64)(uint64_t val, uint64_t shiftop)
638 int8_t shift = (int8_t)shiftop;
639 if (shift >= 64 || shift <= -64) {
640 val = 0;
641 } else if (shift < 0) {
642 val >>= -shift;
643 } else {
644 val <<= shift;
646 return val;
649 #define NEON_FN(dest, src1, src2) do { \
650 int8_t tmp; \
651 tmp = (int8_t)src2; \
652 if (tmp >= (ssize_t)sizeof(src1) * 8) { \
653 dest = 0; \
654 } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
655 dest = src1 >> (sizeof(src1) * 8 - 1); \
656 } else if (tmp < 0) { \
657 dest = src1 >> -tmp; \
658 } else { \
659 dest = src1 << tmp; \
660 }} while (0)
661 NEON_VOP(shl_s8, neon_s8, 4)
662 NEON_VOP(shl_s16, neon_s16, 2)
663 NEON_VOP(shl_s32, neon_s32, 1)
664 #undef NEON_FN
666 uint64_t HELPER(neon_shl_s64)(uint64_t valop, uint64_t shiftop)
668 int8_t shift = (int8_t)shiftop;
669 int64_t val = valop;
670 if (shift >= 64) {
671 val = 0;
672 } else if (shift <= -64) {
673 val >>= 63;
674 } else if (shift < 0) {
675 val >>= -shift;
676 } else {
677 val <<= shift;
679 return val;
682 #define NEON_FN(dest, src1, src2) do { \
683 int8_t tmp; \
684 tmp = (int8_t)src2; \
685 if ((tmp >= (ssize_t)sizeof(src1) * 8) \
686 || (tmp <= -(ssize_t)sizeof(src1) * 8)) { \
687 dest = 0; \
688 } else if (tmp < 0) { \
689 dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
690 } else { \
691 dest = src1 << tmp; \
692 }} while (0)
693 NEON_VOP(rshl_s8, neon_s8, 4)
694 NEON_VOP(rshl_s16, neon_s16, 2)
695 #undef NEON_FN
697 /* The addition of the rounding constant may overflow, so we use an
698 * intermediate 64 bit accumulator. */
699 uint32_t HELPER(neon_rshl_s32)(uint32_t valop, uint32_t shiftop)
701 int32_t dest;
702 int32_t val = (int32_t)valop;
703 int8_t shift = (int8_t)shiftop;
704 if ((shift >= 32) || (shift <= -32)) {
705 dest = 0;
706 } else if (shift < 0) {
707 int64_t big_dest = ((int64_t)val + (1 << (-1 - shift)));
708 dest = big_dest >> -shift;
709 } else {
710 dest = val << shift;
712 return dest;
715 /* Handling addition overflow with 64 bit input values is more
716 * tricky than with 32 bit values. */
717 uint64_t HELPER(neon_rshl_s64)(uint64_t valop, uint64_t shiftop)
719 int8_t shift = (int8_t)shiftop;
720 int64_t val = valop;
721 if ((shift >= 64) || (shift <= -64)) {
722 val = 0;
723 } else if (shift < 0) {
724 val >>= (-shift - 1);
725 if (val == INT64_MAX) {
726 /* In this case, it means that the rounding constant is 1,
727 * and the addition would overflow. Return the actual
728 * result directly. */
729 val = 0x4000000000000000LL;
730 } else {
731 val++;
732 val >>= 1;
734 } else {
735 val <<= shift;
737 return val;
740 #define NEON_FN(dest, src1, src2) do { \
741 int8_t tmp; \
742 tmp = (int8_t)src2; \
743 if (tmp >= (ssize_t)sizeof(src1) * 8 || \
744 tmp < -(ssize_t)sizeof(src1) * 8) { \
745 dest = 0; \
746 } else if (tmp == -(ssize_t)sizeof(src1) * 8) { \
747 dest = src1 >> (-tmp - 1); \
748 } else if (tmp < 0) { \
749 dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
750 } else { \
751 dest = src1 << tmp; \
752 }} while (0)
753 NEON_VOP(rshl_u8, neon_u8, 4)
754 NEON_VOP(rshl_u16, neon_u16, 2)
755 #undef NEON_FN
757 /* The addition of the rounding constant may overflow, so we use an
758 * intermediate 64 bit accumulator. */
759 uint32_t HELPER(neon_rshl_u32)(uint32_t val, uint32_t shiftop)
761 uint32_t dest;
762 int8_t shift = (int8_t)shiftop;
763 if (shift >= 32 || shift < -32) {
764 dest = 0;
765 } else if (shift == -32) {
766 dest = val >> 31;
767 } else if (shift < 0) {
768 uint64_t big_dest = ((uint64_t)val + (1 << (-1 - shift)));
769 dest = big_dest >> -shift;
770 } else {
771 dest = val << shift;
773 return dest;
776 /* Handling addition overflow with 64 bit input values is more
777 * tricky than with 32 bit values. */
778 uint64_t HELPER(neon_rshl_u64)(uint64_t val, uint64_t shiftop)
780 int8_t shift = (uint8_t)shiftop;
781 if (shift >= 64 || shift < -64) {
782 val = 0;
783 } else if (shift == -64) {
784 /* Rounding a 1-bit result just preserves that bit. */
785 val >>= 63;
786 } else if (shift < 0) {
787 val >>= (-shift - 1);
788 if (val == UINT64_MAX) {
789 /* In this case, it means that the rounding constant is 1,
790 * and the addition would overflow. Return the actual
791 * result directly. */
792 val = 0x8000000000000000ULL;
793 } else {
794 val++;
795 val >>= 1;
797 } else {
798 val <<= shift;
800 return val;
803 #define NEON_FN(dest, src1, src2) do { \
804 int8_t tmp; \
805 tmp = (int8_t)src2; \
806 if (tmp >= (ssize_t)sizeof(src1) * 8) { \
807 if (src1) { \
808 SET_QC(); \
809 dest = ~0; \
810 } else { \
811 dest = 0; \
813 } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
814 dest = 0; \
815 } else if (tmp < 0) { \
816 dest = src1 >> -tmp; \
817 } else { \
818 dest = src1 << tmp; \
819 if ((dest >> tmp) != src1) { \
820 SET_QC(); \
821 dest = ~0; \
823 }} while (0)
824 NEON_VOP_ENV(qshl_u8, neon_u8, 4)
825 NEON_VOP_ENV(qshl_u16, neon_u16, 2)
826 NEON_VOP_ENV(qshl_u32, neon_u32, 1)
827 #undef NEON_FN
829 uint64_t HELPER(neon_qshl_u64)(CPUARMState *env, uint64_t val, uint64_t shiftop)
831 int8_t shift = (int8_t)shiftop;
832 if (shift >= 64) {
833 if (val) {
834 val = ~(uint64_t)0;
835 SET_QC();
837 } else if (shift <= -64) {
838 val = 0;
839 } else if (shift < 0) {
840 val >>= -shift;
841 } else {
842 uint64_t tmp = val;
843 val <<= shift;
844 if ((val >> shift) != tmp) {
845 SET_QC();
846 val = ~(uint64_t)0;
849 return val;
852 #define NEON_FN(dest, src1, src2) do { \
853 int8_t tmp; \
854 tmp = (int8_t)src2; \
855 if (tmp >= (ssize_t)sizeof(src1) * 8) { \
856 if (src1) { \
857 SET_QC(); \
858 dest = (uint32_t)(1 << (sizeof(src1) * 8 - 1)); \
859 if (src1 > 0) { \
860 dest--; \
862 } else { \
863 dest = src1; \
865 } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
866 dest = src1 >> 31; \
867 } else if (tmp < 0) { \
868 dest = src1 >> -tmp; \
869 } else { \
870 dest = src1 << tmp; \
871 if ((dest >> tmp) != src1) { \
872 SET_QC(); \
873 dest = (uint32_t)(1 << (sizeof(src1) * 8 - 1)); \
874 if (src1 > 0) { \
875 dest--; \
878 }} while (0)
879 NEON_VOP_ENV(qshl_s8, neon_s8, 4)
880 NEON_VOP_ENV(qshl_s16, neon_s16, 2)
881 NEON_VOP_ENV(qshl_s32, neon_s32, 1)
882 #undef NEON_FN
884 uint64_t HELPER(neon_qshl_s64)(CPUARMState *env, uint64_t valop, uint64_t shiftop)
886 int8_t shift = (uint8_t)shiftop;
887 int64_t val = valop;
888 if (shift >= 64) {
889 if (val) {
890 SET_QC();
891 val = (val >> 63) ^ ~SIGNBIT64;
893 } else if (shift <= -64) {
894 val >>= 63;
895 } else if (shift < 0) {
896 val >>= -shift;
897 } else {
898 int64_t tmp = val;
899 val <<= shift;
900 if ((val >> shift) != tmp) {
901 SET_QC();
902 val = (tmp >> 63) ^ ~SIGNBIT64;
905 return val;
908 #define NEON_FN(dest, src1, src2) do { \
909 if (src1 & (1 << (sizeof(src1) * 8 - 1))) { \
910 SET_QC(); \
911 dest = 0; \
912 } else { \
913 int8_t tmp; \
914 tmp = (int8_t)src2; \
915 if (tmp >= (ssize_t)sizeof(src1) * 8) { \
916 if (src1) { \
917 SET_QC(); \
918 dest = ~0; \
919 } else { \
920 dest = 0; \
922 } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
923 dest = 0; \
924 } else if (tmp < 0) { \
925 dest = src1 >> -tmp; \
926 } else { \
927 dest = src1 << tmp; \
928 if ((dest >> tmp) != src1) { \
929 SET_QC(); \
930 dest = ~0; \
933 }} while (0)
934 NEON_VOP_ENV(qshlu_s8, neon_u8, 4)
935 NEON_VOP_ENV(qshlu_s16, neon_u16, 2)
936 #undef NEON_FN
938 uint32_t HELPER(neon_qshlu_s32)(CPUARMState *env, uint32_t valop, uint32_t shiftop)
940 if ((int32_t)valop < 0) {
941 SET_QC();
942 return 0;
944 return helper_neon_qshl_u32(env, valop, shiftop);
947 uint64_t HELPER(neon_qshlu_s64)(CPUARMState *env, uint64_t valop, uint64_t shiftop)
949 if ((int64_t)valop < 0) {
950 SET_QC();
951 return 0;
953 return helper_neon_qshl_u64(env, valop, shiftop);
956 #define NEON_FN(dest, src1, src2) do { \
957 int8_t tmp; \
958 tmp = (int8_t)src2; \
959 if (tmp >= (ssize_t)sizeof(src1) * 8) { \
960 if (src1) { \
961 SET_QC(); \
962 dest = ~0; \
963 } else { \
964 dest = 0; \
966 } else if (tmp < -(ssize_t)sizeof(src1) * 8) { \
967 dest = 0; \
968 } else if (tmp == -(ssize_t)sizeof(src1) * 8) { \
969 dest = src1 >> (sizeof(src1) * 8 - 1); \
970 } else if (tmp < 0) { \
971 dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
972 } else { \
973 dest = src1 << tmp; \
974 if ((dest >> tmp) != src1) { \
975 SET_QC(); \
976 dest = ~0; \
978 }} while (0)
979 NEON_VOP_ENV(qrshl_u8, neon_u8, 4)
980 NEON_VOP_ENV(qrshl_u16, neon_u16, 2)
981 #undef NEON_FN
983 /* The addition of the rounding constant may overflow, so we use an
984 * intermediate 64 bit accumulator. */
985 uint32_t HELPER(neon_qrshl_u32)(CPUARMState *env, uint32_t val, uint32_t shiftop)
987 uint32_t dest;
988 int8_t shift = (int8_t)shiftop;
989 if (shift >= 32) {
990 if (val) {
991 SET_QC();
992 dest = ~0;
993 } else {
994 dest = 0;
996 } else if (shift < -32) {
997 dest = 0;
998 } else if (shift == -32) {
999 dest = val >> 31;
1000 } else if (shift < 0) {
1001 uint64_t big_dest = ((uint64_t)val + (1 << (-1 - shift)));
1002 dest = big_dest >> -shift;
1003 } else {
1004 dest = val << shift;
1005 if ((dest >> shift) != val) {
1006 SET_QC();
1007 dest = ~0;
1010 return dest;
1013 /* Handling addition overflow with 64 bit input values is more
1014 * tricky than with 32 bit values. */
1015 uint64_t HELPER(neon_qrshl_u64)(CPUARMState *env, uint64_t val, uint64_t shiftop)
1017 int8_t shift = (int8_t)shiftop;
1018 if (shift >= 64) {
1019 if (val) {
1020 SET_QC();
1021 val = ~0;
1023 } else if (shift < -64) {
1024 val = 0;
1025 } else if (shift == -64) {
1026 val >>= 63;
1027 } else if (shift < 0) {
1028 val >>= (-shift - 1);
1029 if (val == UINT64_MAX) {
1030 /* In this case, it means that the rounding constant is 1,
1031 * and the addition would overflow. Return the actual
1032 * result directly. */
1033 val = 0x8000000000000000ULL;
1034 } else {
1035 val++;
1036 val >>= 1;
1038 } else { \
1039 uint64_t tmp = val;
1040 val <<= shift;
1041 if ((val >> shift) != tmp) {
1042 SET_QC();
1043 val = ~0;
1046 return val;
1049 #define NEON_FN(dest, src1, src2) do { \
1050 int8_t tmp; \
1051 tmp = (int8_t)src2; \
1052 if (tmp >= (ssize_t)sizeof(src1) * 8) { \
1053 if (src1) { \
1054 SET_QC(); \
1055 dest = (1 << (sizeof(src1) * 8 - 1)); \
1056 if (src1 > 0) { \
1057 dest--; \
1059 } else { \
1060 dest = 0; \
1062 } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
1063 dest = 0; \
1064 } else if (tmp < 0) { \
1065 dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
1066 } else { \
1067 dest = src1 << tmp; \
1068 if ((dest >> tmp) != src1) { \
1069 SET_QC(); \
1070 dest = (uint32_t)(1 << (sizeof(src1) * 8 - 1)); \
1071 if (src1 > 0) { \
1072 dest--; \
1075 }} while (0)
1076 NEON_VOP_ENV(qrshl_s8, neon_s8, 4)
1077 NEON_VOP_ENV(qrshl_s16, neon_s16, 2)
1078 #undef NEON_FN
1080 /* The addition of the rounding constant may overflow, so we use an
1081 * intermediate 64 bit accumulator. */
1082 uint32_t HELPER(neon_qrshl_s32)(CPUARMState *env, uint32_t valop, uint32_t shiftop)
1084 int32_t dest;
1085 int32_t val = (int32_t)valop;
1086 int8_t shift = (int8_t)shiftop;
1087 if (shift >= 32) {
1088 if (val) {
1089 SET_QC();
1090 dest = (val >> 31) ^ ~SIGNBIT;
1091 } else {
1092 dest = 0;
1094 } else if (shift <= -32) {
1095 dest = 0;
1096 } else if (shift < 0) {
1097 int64_t big_dest = ((int64_t)val + (1 << (-1 - shift)));
1098 dest = big_dest >> -shift;
1099 } else {
1100 dest = val << shift;
1101 if ((dest >> shift) != val) {
1102 SET_QC();
1103 dest = (val >> 31) ^ ~SIGNBIT;
1106 return dest;
1109 /* Handling addition overflow with 64 bit input values is more
1110 * tricky than with 32 bit values. */
1111 uint64_t HELPER(neon_qrshl_s64)(CPUARMState *env, uint64_t valop, uint64_t shiftop)
1113 int8_t shift = (uint8_t)shiftop;
1114 int64_t val = valop;
1116 if (shift >= 64) {
1117 if (val) {
1118 SET_QC();
1119 val = (val >> 63) ^ ~SIGNBIT64;
1121 } else if (shift <= -64) {
1122 val = 0;
1123 } else if (shift < 0) {
1124 val >>= (-shift - 1);
1125 if (val == INT64_MAX) {
1126 /* In this case, it means that the rounding constant is 1,
1127 * and the addition would overflow. Return the actual
1128 * result directly. */
1129 val = 0x4000000000000000ULL;
1130 } else {
1131 val++;
1132 val >>= 1;
1134 } else {
1135 int64_t tmp = val;
1136 val <<= shift;
1137 if ((val >> shift) != tmp) {
1138 SET_QC();
1139 val = (tmp >> 63) ^ ~SIGNBIT64;
1142 return val;
1145 uint32_t HELPER(neon_add_u8)(uint32_t a, uint32_t b)
1147 uint32_t mask;
1148 mask = (a ^ b) & 0x80808080u;
1149 a &= ~0x80808080u;
1150 b &= ~0x80808080u;
1151 return (a + b) ^ mask;
1154 uint32_t HELPER(neon_add_u16)(uint32_t a, uint32_t b)
1156 uint32_t mask;
1157 mask = (a ^ b) & 0x80008000u;
1158 a &= ~0x80008000u;
1159 b &= ~0x80008000u;
1160 return (a + b) ^ mask;
1163 #define NEON_FN(dest, src1, src2) dest = src1 + src2
1164 NEON_POP(padd_u8, neon_u8, 4)
1165 NEON_POP(padd_u16, neon_u16, 2)
1166 #undef NEON_FN
1168 #define NEON_FN(dest, src1, src2) dest = src1 - src2
1169 NEON_VOP(sub_u8, neon_u8, 4)
1170 NEON_VOP(sub_u16, neon_u16, 2)
1171 #undef NEON_FN
1173 #define NEON_FN(dest, src1, src2) dest = src1 * src2
1174 NEON_VOP(mul_u8, neon_u8, 4)
1175 NEON_VOP(mul_u16, neon_u16, 2)
1176 #undef NEON_FN
1178 /* Polynomial multiplication is like integer multiplication except the
1179 partial products are XORed, not added. */
1180 uint32_t HELPER(neon_mul_p8)(uint32_t op1, uint32_t op2)
1182 uint32_t mask;
1183 uint32_t result;
1184 result = 0;
1185 while (op1) {
1186 mask = 0;
1187 if (op1 & 1)
1188 mask |= 0xff;
1189 if (op1 & (1 << 8))
1190 mask |= (0xff << 8);
1191 if (op1 & (1 << 16))
1192 mask |= (0xff << 16);
1193 if (op1 & (1 << 24))
1194 mask |= (0xff << 24);
1195 result ^= op2 & mask;
1196 op1 = (op1 >> 1) & 0x7f7f7f7f;
1197 op2 = (op2 << 1) & 0xfefefefe;
1199 return result;
1202 uint64_t HELPER(neon_mull_p8)(uint32_t op1, uint32_t op2)
1204 uint64_t result = 0;
1205 uint64_t mask;
1206 uint64_t op2ex = op2;
1207 op2ex = (op2ex & 0xff) |
1208 ((op2ex & 0xff00) << 8) |
1209 ((op2ex & 0xff0000) << 16) |
1210 ((op2ex & 0xff000000) << 24);
1211 while (op1) {
1212 mask = 0;
1213 if (op1 & 1) {
1214 mask |= 0xffff;
1216 if (op1 & (1 << 8)) {
1217 mask |= (0xffffU << 16);
1219 if (op1 & (1 << 16)) {
1220 mask |= (0xffffULL << 32);
1222 if (op1 & (1 << 24)) {
1223 mask |= (0xffffULL << 48);
1225 result ^= op2ex & mask;
1226 op1 = (op1 >> 1) & 0x7f7f7f7f;
1227 op2ex <<= 1;
1229 return result;
1232 #define NEON_FN(dest, src1, src2) dest = (src1 & src2) ? -1 : 0
1233 NEON_VOP(tst_u8, neon_u8, 4)
1234 NEON_VOP(tst_u16, neon_u16, 2)
1235 NEON_VOP(tst_u32, neon_u32, 1)
1236 #undef NEON_FN
1238 #define NEON_FN(dest, src1, src2) dest = (src1 == src2) ? -1 : 0
1239 NEON_VOP(ceq_u8, neon_u8, 4)
1240 NEON_VOP(ceq_u16, neon_u16, 2)
1241 NEON_VOP(ceq_u32, neon_u32, 1)
1242 #undef NEON_FN
1244 #define NEON_FN(dest, src, dummy) dest = (src < 0) ? -src : src
1245 NEON_VOP1(abs_s8, neon_s8, 4)
1246 NEON_VOP1(abs_s16, neon_s16, 2)
1247 #undef NEON_FN
1249 /* Count Leading Sign/Zero Bits. */
1250 static inline int do_clz8(uint8_t x)
1252 int n;
1253 for (n = 8; x; n--)
1254 x >>= 1;
1255 return n;
1258 static inline int do_clz16(uint16_t x)
1260 int n;
1261 for (n = 16; x; n--)
1262 x >>= 1;
1263 return n;
1266 #define NEON_FN(dest, src, dummy) dest = do_clz8(src)
1267 NEON_VOP1(clz_u8, neon_u8, 4)
1268 #undef NEON_FN
1270 #define NEON_FN(dest, src, dummy) dest = do_clz16(src)
1271 NEON_VOP1(clz_u16, neon_u16, 2)
1272 #undef NEON_FN
1274 #define NEON_FN(dest, src, dummy) dest = do_clz8((src < 0) ? ~src : src) - 1
1275 NEON_VOP1(cls_s8, neon_s8, 4)
1276 #undef NEON_FN
1278 #define NEON_FN(dest, src, dummy) dest = do_clz16((src < 0) ? ~src : src) - 1
1279 NEON_VOP1(cls_s16, neon_s16, 2)
1280 #undef NEON_FN
1282 uint32_t HELPER(neon_cls_s32)(uint32_t x)
1284 int count;
1285 if ((int32_t)x < 0)
1286 x = ~x;
1287 for (count = 32; x; count--)
1288 x = x >> 1;
1289 return count - 1;
1292 /* Bit count. */
1293 uint32_t HELPER(neon_cnt_u8)(uint32_t x)
1295 x = (x & 0x55555555) + ((x >> 1) & 0x55555555);
1296 x = (x & 0x33333333) + ((x >> 2) & 0x33333333);
1297 x = (x & 0x0f0f0f0f) + ((x >> 4) & 0x0f0f0f0f);
1298 return x;
1301 /* Reverse bits in each 8 bit word */
1302 uint32_t HELPER(neon_rbit_u8)(uint32_t x)
1304 x = ((x & 0xf0f0f0f0) >> 4)
1305 | ((x & 0x0f0f0f0f) << 4);
1306 x = ((x & 0x88888888) >> 3)
1307 | ((x & 0x44444444) >> 1)
1308 | ((x & 0x22222222) << 1)
1309 | ((x & 0x11111111) << 3);
1310 return x;
1313 #define NEON_QDMULH16(dest, src1, src2, round) do { \
1314 uint32_t tmp = (int32_t)(int16_t) src1 * (int16_t) src2; \
1315 if ((tmp ^ (tmp << 1)) & SIGNBIT) { \
1316 SET_QC(); \
1317 tmp = (tmp >> 31) ^ ~SIGNBIT; \
1318 } else { \
1319 tmp <<= 1; \
1321 if (round) { \
1322 int32_t old = tmp; \
1323 tmp += 1 << 15; \
1324 if ((int32_t)tmp < old) { \
1325 SET_QC(); \
1326 tmp = SIGNBIT - 1; \
1329 dest = tmp >> 16; \
1330 } while(0)
1331 #define NEON_FN(dest, src1, src2) NEON_QDMULH16(dest, src1, src2, 0)
1332 NEON_VOP_ENV(qdmulh_s16, neon_s16, 2)
1333 #undef NEON_FN
1334 #define NEON_FN(dest, src1, src2) NEON_QDMULH16(dest, src1, src2, 1)
1335 NEON_VOP_ENV(qrdmulh_s16, neon_s16, 2)
1336 #undef NEON_FN
1337 #undef NEON_QDMULH16
1339 #define NEON_QDMULH32(dest, src1, src2, round) do { \
1340 uint64_t tmp = (int64_t)(int32_t) src1 * (int32_t) src2; \
1341 if ((tmp ^ (tmp << 1)) & SIGNBIT64) { \
1342 SET_QC(); \
1343 tmp = (tmp >> 63) ^ ~SIGNBIT64; \
1344 } else { \
1345 tmp <<= 1; \
1347 if (round) { \
1348 int64_t old = tmp; \
1349 tmp += (int64_t)1 << 31; \
1350 if ((int64_t)tmp < old) { \
1351 SET_QC(); \
1352 tmp = SIGNBIT64 - 1; \
1355 dest = tmp >> 32; \
1356 } while(0)
1357 #define NEON_FN(dest, src1, src2) NEON_QDMULH32(dest, src1, src2, 0)
1358 NEON_VOP_ENV(qdmulh_s32, neon_s32, 1)
1359 #undef NEON_FN
1360 #define NEON_FN(dest, src1, src2) NEON_QDMULH32(dest, src1, src2, 1)
1361 NEON_VOP_ENV(qrdmulh_s32, neon_s32, 1)
1362 #undef NEON_FN
1363 #undef NEON_QDMULH32
1365 uint32_t HELPER(neon_narrow_u8)(uint64_t x)
1367 return (x & 0xffu) | ((x >> 8) & 0xff00u) | ((x >> 16) & 0xff0000u)
1368 | ((x >> 24) & 0xff000000u);
1371 uint32_t HELPER(neon_narrow_u16)(uint64_t x)
1373 return (x & 0xffffu) | ((x >> 16) & 0xffff0000u);
1376 uint32_t HELPER(neon_narrow_high_u8)(uint64_t x)
1378 return ((x >> 8) & 0xff) | ((x >> 16) & 0xff00)
1379 | ((x >> 24) & 0xff0000) | ((x >> 32) & 0xff000000);
1382 uint32_t HELPER(neon_narrow_high_u16)(uint64_t x)
1384 return ((x >> 16) & 0xffff) | ((x >> 32) & 0xffff0000);
1387 uint32_t HELPER(neon_narrow_round_high_u8)(uint64_t x)
1389 x &= 0xff80ff80ff80ff80ull;
1390 x += 0x0080008000800080ull;
1391 return ((x >> 8) & 0xff) | ((x >> 16) & 0xff00)
1392 | ((x >> 24) & 0xff0000) | ((x >> 32) & 0xff000000);
1395 uint32_t HELPER(neon_narrow_round_high_u16)(uint64_t x)
1397 x &= 0xffff8000ffff8000ull;
1398 x += 0x0000800000008000ull;
1399 return ((x >> 16) & 0xffff) | ((x >> 32) & 0xffff0000);
1402 uint32_t HELPER(neon_unarrow_sat8)(CPUARMState *env, uint64_t x)
1404 uint16_t s;
1405 uint8_t d;
1406 uint32_t res = 0;
1407 #define SAT8(n) \
1408 s = x >> n; \
1409 if (s & 0x8000) { \
1410 SET_QC(); \
1411 } else { \
1412 if (s > 0xff) { \
1413 d = 0xff; \
1414 SET_QC(); \
1415 } else { \
1416 d = s; \
1418 res |= (uint32_t)d << (n / 2); \
1421 SAT8(0);
1422 SAT8(16);
1423 SAT8(32);
1424 SAT8(48);
1425 #undef SAT8
1426 return res;
1429 uint32_t HELPER(neon_narrow_sat_u8)(CPUARMState *env, uint64_t x)
1431 uint16_t s;
1432 uint8_t d;
1433 uint32_t res = 0;
1434 #define SAT8(n) \
1435 s = x >> n; \
1436 if (s > 0xff) { \
1437 d = 0xff; \
1438 SET_QC(); \
1439 } else { \
1440 d = s; \
1442 res |= (uint32_t)d << (n / 2);
1444 SAT8(0);
1445 SAT8(16);
1446 SAT8(32);
1447 SAT8(48);
1448 #undef SAT8
1449 return res;
1452 uint32_t HELPER(neon_narrow_sat_s8)(CPUARMState *env, uint64_t x)
1454 int16_t s;
1455 uint8_t d;
1456 uint32_t res = 0;
1457 #define SAT8(n) \
1458 s = x >> n; \
1459 if (s != (int8_t)s) { \
1460 d = (s >> 15) ^ 0x7f; \
1461 SET_QC(); \
1462 } else { \
1463 d = s; \
1465 res |= (uint32_t)d << (n / 2);
1467 SAT8(0);
1468 SAT8(16);
1469 SAT8(32);
1470 SAT8(48);
1471 #undef SAT8
1472 return res;
1475 uint32_t HELPER(neon_unarrow_sat16)(CPUARMState *env, uint64_t x)
1477 uint32_t high;
1478 uint32_t low;
1479 low = x;
1480 if (low & 0x80000000) {
1481 low = 0;
1482 SET_QC();
1483 } else if (low > 0xffff) {
1484 low = 0xffff;
1485 SET_QC();
1487 high = x >> 32;
1488 if (high & 0x80000000) {
1489 high = 0;
1490 SET_QC();
1491 } else if (high > 0xffff) {
1492 high = 0xffff;
1493 SET_QC();
1495 return low | (high << 16);
1498 uint32_t HELPER(neon_narrow_sat_u16)(CPUARMState *env, uint64_t x)
1500 uint32_t high;
1501 uint32_t low;
1502 low = x;
1503 if (low > 0xffff) {
1504 low = 0xffff;
1505 SET_QC();
1507 high = x >> 32;
1508 if (high > 0xffff) {
1509 high = 0xffff;
1510 SET_QC();
1512 return low | (high << 16);
1515 uint32_t HELPER(neon_narrow_sat_s16)(CPUARMState *env, uint64_t x)
1517 int32_t low;
1518 int32_t high;
1519 low = x;
1520 if (low != (int16_t)low) {
1521 low = (low >> 31) ^ 0x7fff;
1522 SET_QC();
1524 high = x >> 32;
1525 if (high != (int16_t)high) {
1526 high = (high >> 31) ^ 0x7fff;
1527 SET_QC();
1529 return (uint16_t)low | (high << 16);
1532 uint32_t HELPER(neon_unarrow_sat32)(CPUARMState *env, uint64_t x)
1534 if (x & 0x8000000000000000ull) {
1535 SET_QC();
1536 return 0;
1538 if (x > 0xffffffffu) {
1539 SET_QC();
1540 return 0xffffffffu;
1542 return x;
1545 uint32_t HELPER(neon_narrow_sat_u32)(CPUARMState *env, uint64_t x)
1547 if (x > 0xffffffffu) {
1548 SET_QC();
1549 return 0xffffffffu;
1551 return x;
1554 uint32_t HELPER(neon_narrow_sat_s32)(CPUARMState *env, uint64_t x)
1556 if ((int64_t)x != (int32_t)x) {
1557 SET_QC();
1558 return ((int64_t)x >> 63) ^ 0x7fffffff;
1560 return x;
1563 uint64_t HELPER(neon_widen_u8)(uint32_t x)
1565 uint64_t tmp;
1566 uint64_t ret;
1567 ret = (uint8_t)x;
1568 tmp = (uint8_t)(x >> 8);
1569 ret |= tmp << 16;
1570 tmp = (uint8_t)(x >> 16);
1571 ret |= tmp << 32;
1572 tmp = (uint8_t)(x >> 24);
1573 ret |= tmp << 48;
1574 return ret;
1577 uint64_t HELPER(neon_widen_s8)(uint32_t x)
1579 uint64_t tmp;
1580 uint64_t ret;
1581 ret = (uint16_t)(int8_t)x;
1582 tmp = (uint16_t)(int8_t)(x >> 8);
1583 ret |= tmp << 16;
1584 tmp = (uint16_t)(int8_t)(x >> 16);
1585 ret |= tmp << 32;
1586 tmp = (uint16_t)(int8_t)(x >> 24);
1587 ret |= tmp << 48;
1588 return ret;
1591 uint64_t HELPER(neon_widen_u16)(uint32_t x)
1593 uint64_t high = (uint16_t)(x >> 16);
1594 return ((uint16_t)x) | (high << 32);
1597 uint64_t HELPER(neon_widen_s16)(uint32_t x)
1599 uint64_t high = (int16_t)(x >> 16);
1600 return ((uint32_t)(int16_t)x) | (high << 32);
1603 uint64_t HELPER(neon_addl_u16)(uint64_t a, uint64_t b)
1605 uint64_t mask;
1606 mask = (a ^ b) & 0x8000800080008000ull;
1607 a &= ~0x8000800080008000ull;
1608 b &= ~0x8000800080008000ull;
1609 return (a + b) ^ mask;
1612 uint64_t HELPER(neon_addl_u32)(uint64_t a, uint64_t b)
1614 uint64_t mask;
1615 mask = (a ^ b) & 0x8000000080000000ull;
1616 a &= ~0x8000000080000000ull;
1617 b &= ~0x8000000080000000ull;
1618 return (a + b) ^ mask;
1621 uint64_t HELPER(neon_paddl_u16)(uint64_t a, uint64_t b)
1623 uint64_t tmp;
1624 uint64_t tmp2;
1626 tmp = a & 0x0000ffff0000ffffull;
1627 tmp += (a >> 16) & 0x0000ffff0000ffffull;
1628 tmp2 = b & 0xffff0000ffff0000ull;
1629 tmp2 += (b << 16) & 0xffff0000ffff0000ull;
1630 return ( tmp & 0xffff)
1631 | ((tmp >> 16) & 0xffff0000ull)
1632 | ((tmp2 << 16) & 0xffff00000000ull)
1633 | ( tmp2 & 0xffff000000000000ull);
1636 uint64_t HELPER(neon_paddl_u32)(uint64_t a, uint64_t b)
1638 uint32_t low = a + (a >> 32);
1639 uint32_t high = b + (b >> 32);
1640 return low + ((uint64_t)high << 32);
1643 uint64_t HELPER(neon_subl_u16)(uint64_t a, uint64_t b)
1645 uint64_t mask;
1646 mask = (a ^ ~b) & 0x8000800080008000ull;
1647 a |= 0x8000800080008000ull;
1648 b &= ~0x8000800080008000ull;
1649 return (a - b) ^ mask;
1652 uint64_t HELPER(neon_subl_u32)(uint64_t a, uint64_t b)
1654 uint64_t mask;
1655 mask = (a ^ ~b) & 0x8000000080000000ull;
1656 a |= 0x8000000080000000ull;
1657 b &= ~0x8000000080000000ull;
1658 return (a - b) ^ mask;
1661 uint64_t HELPER(neon_addl_saturate_s32)(CPUARMState *env, uint64_t a, uint64_t b)
1663 uint32_t x, y;
1664 uint32_t low, high;
1666 x = a;
1667 y = b;
1668 low = x + y;
1669 if (((low ^ x) & SIGNBIT) && !((x ^ y) & SIGNBIT)) {
1670 SET_QC();
1671 low = ((int32_t)x >> 31) ^ ~SIGNBIT;
1673 x = a >> 32;
1674 y = b >> 32;
1675 high = x + y;
1676 if (((high ^ x) & SIGNBIT) && !((x ^ y) & SIGNBIT)) {
1677 SET_QC();
1678 high = ((int32_t)x >> 31) ^ ~SIGNBIT;
1680 return low | ((uint64_t)high << 32);
1683 uint64_t HELPER(neon_addl_saturate_s64)(CPUARMState *env, uint64_t a, uint64_t b)
1685 uint64_t result;
1687 result = a + b;
1688 if (((result ^ a) & SIGNBIT64) && !((a ^ b) & SIGNBIT64)) {
1689 SET_QC();
1690 result = ((int64_t)a >> 63) ^ ~SIGNBIT64;
1692 return result;
1695 /* We have to do the arithmetic in a larger type than
1696 * the input type, because for example with a signed 32 bit
1697 * op the absolute difference can overflow a signed 32 bit value.
1699 #define DO_ABD(dest, x, y, intype, arithtype) do { \
1700 arithtype tmp_x = (intype)(x); \
1701 arithtype tmp_y = (intype)(y); \
1702 dest = ((tmp_x > tmp_y) ? tmp_x - tmp_y : tmp_y - tmp_x); \
1703 } while(0)
1705 uint64_t HELPER(neon_abdl_u16)(uint32_t a, uint32_t b)
1707 uint64_t tmp;
1708 uint64_t result;
1709 DO_ABD(result, a, b, uint8_t, uint32_t);
1710 DO_ABD(tmp, a >> 8, b >> 8, uint8_t, uint32_t);
1711 result |= tmp << 16;
1712 DO_ABD(tmp, a >> 16, b >> 16, uint8_t, uint32_t);
1713 result |= tmp << 32;
1714 DO_ABD(tmp, a >> 24, b >> 24, uint8_t, uint32_t);
1715 result |= tmp << 48;
1716 return result;
1719 uint64_t HELPER(neon_abdl_s16)(uint32_t a, uint32_t b)
1721 uint64_t tmp;
1722 uint64_t result;
1723 DO_ABD(result, a, b, int8_t, int32_t);
1724 DO_ABD(tmp, a >> 8, b >> 8, int8_t, int32_t);
1725 result |= tmp << 16;
1726 DO_ABD(tmp, a >> 16, b >> 16, int8_t, int32_t);
1727 result |= tmp << 32;
1728 DO_ABD(tmp, a >> 24, b >> 24, int8_t, int32_t);
1729 result |= tmp << 48;
1730 return result;
1733 uint64_t HELPER(neon_abdl_u32)(uint32_t a, uint32_t b)
1735 uint64_t tmp;
1736 uint64_t result;
1737 DO_ABD(result, a, b, uint16_t, uint32_t);
1738 DO_ABD(tmp, a >> 16, b >> 16, uint16_t, uint32_t);
1739 return result | (tmp << 32);
1742 uint64_t HELPER(neon_abdl_s32)(uint32_t a, uint32_t b)
1744 uint64_t tmp;
1745 uint64_t result;
1746 DO_ABD(result, a, b, int16_t, int32_t);
1747 DO_ABD(tmp, a >> 16, b >> 16, int16_t, int32_t);
1748 return result | (tmp << 32);
1751 uint64_t HELPER(neon_abdl_u64)(uint32_t a, uint32_t b)
1753 uint64_t result;
1754 DO_ABD(result, a, b, uint32_t, uint64_t);
1755 return result;
1758 uint64_t HELPER(neon_abdl_s64)(uint32_t a, uint32_t b)
1760 uint64_t result;
1761 DO_ABD(result, a, b, int32_t, int64_t);
1762 return result;
1764 #undef DO_ABD
1766 /* Widening multiply. Named type is the source type. */
1767 #define DO_MULL(dest, x, y, type1, type2) do { \
1768 type1 tmp_x = x; \
1769 type1 tmp_y = y; \
1770 dest = (type2)((type2)tmp_x * (type2)tmp_y); \
1771 } while(0)
1773 uint64_t HELPER(neon_mull_u8)(uint32_t a, uint32_t b)
1775 uint64_t tmp;
1776 uint64_t result;
1778 DO_MULL(result, a, b, uint8_t, uint16_t);
1779 DO_MULL(tmp, a >> 8, b >> 8, uint8_t, uint16_t);
1780 result |= tmp << 16;
1781 DO_MULL(tmp, a >> 16, b >> 16, uint8_t, uint16_t);
1782 result |= tmp << 32;
1783 DO_MULL(tmp, a >> 24, b >> 24, uint8_t, uint16_t);
1784 result |= tmp << 48;
1785 return result;
1788 uint64_t HELPER(neon_mull_s8)(uint32_t a, uint32_t b)
1790 uint64_t tmp;
1791 uint64_t result;
1793 DO_MULL(result, a, b, int8_t, uint16_t);
1794 DO_MULL(tmp, a >> 8, b >> 8, int8_t, uint16_t);
1795 result |= tmp << 16;
1796 DO_MULL(tmp, a >> 16, b >> 16, int8_t, uint16_t);
1797 result |= tmp << 32;
1798 DO_MULL(tmp, a >> 24, b >> 24, int8_t, uint16_t);
1799 result |= tmp << 48;
1800 return result;
1803 uint64_t HELPER(neon_mull_u16)(uint32_t a, uint32_t b)
1805 uint64_t tmp;
1806 uint64_t result;
1808 DO_MULL(result, a, b, uint16_t, uint32_t);
1809 DO_MULL(tmp, a >> 16, b >> 16, uint16_t, uint32_t);
1810 return result | (tmp << 32);
1813 uint64_t HELPER(neon_mull_s16)(uint32_t a, uint32_t b)
1815 uint64_t tmp;
1816 uint64_t result;
1818 DO_MULL(result, a, b, int16_t, uint32_t);
1819 DO_MULL(tmp, a >> 16, b >> 16, int16_t, uint32_t);
1820 return result | (tmp << 32);
1823 uint64_t HELPER(neon_negl_u16)(uint64_t x)
1825 uint16_t tmp;
1826 uint64_t result;
1827 result = (uint16_t)-x;
1828 tmp = -(x >> 16);
1829 result |= (uint64_t)tmp << 16;
1830 tmp = -(x >> 32);
1831 result |= (uint64_t)tmp << 32;
1832 tmp = -(x >> 48);
1833 result |= (uint64_t)tmp << 48;
1834 return result;
1837 uint64_t HELPER(neon_negl_u32)(uint64_t x)
1839 uint32_t low = -x;
1840 uint32_t high = -(x >> 32);
1841 return low | ((uint64_t)high << 32);
1844 /* Saturating sign manipulation. */
1845 /* ??? Make these use NEON_VOP1 */
1846 #define DO_QABS8(x) do { \
1847 if (x == (int8_t)0x80) { \
1848 x = 0x7f; \
1849 SET_QC(); \
1850 } else if (x < 0) { \
1851 x = -x; \
1852 }} while (0)
1853 uint32_t HELPER(neon_qabs_s8)(CPUARMState *env, uint32_t x)
1855 neon_s8 vec;
1856 NEON_UNPACK(neon_s8, vec, x);
1857 DO_QABS8(vec.v1);
1858 DO_QABS8(vec.v2);
1859 DO_QABS8(vec.v3);
1860 DO_QABS8(vec.v4);
1861 NEON_PACK(neon_s8, x, vec);
1862 return x;
1864 #undef DO_QABS8
1866 #define DO_QNEG8(x) do { \
1867 if (x == (int8_t)0x80) { \
1868 x = 0x7f; \
1869 SET_QC(); \
1870 } else { \
1871 x = -x; \
1872 }} while (0)
1873 uint32_t HELPER(neon_qneg_s8)(CPUARMState *env, uint32_t x)
1875 neon_s8 vec;
1876 NEON_UNPACK(neon_s8, vec, x);
1877 DO_QNEG8(vec.v1);
1878 DO_QNEG8(vec.v2);
1879 DO_QNEG8(vec.v3);
1880 DO_QNEG8(vec.v4);
1881 NEON_PACK(neon_s8, x, vec);
1882 return x;
1884 #undef DO_QNEG8
1886 #define DO_QABS16(x) do { \
1887 if (x == (int16_t)0x8000) { \
1888 x = 0x7fff; \
1889 SET_QC(); \
1890 } else if (x < 0) { \
1891 x = -x; \
1892 }} while (0)
1893 uint32_t HELPER(neon_qabs_s16)(CPUARMState *env, uint32_t x)
1895 neon_s16 vec;
1896 NEON_UNPACK(neon_s16, vec, x);
1897 DO_QABS16(vec.v1);
1898 DO_QABS16(vec.v2);
1899 NEON_PACK(neon_s16, x, vec);
1900 return x;
1902 #undef DO_QABS16
1904 #define DO_QNEG16(x) do { \
1905 if (x == (int16_t)0x8000) { \
1906 x = 0x7fff; \
1907 SET_QC(); \
1908 } else { \
1909 x = -x; \
1910 }} while (0)
1911 uint32_t HELPER(neon_qneg_s16)(CPUARMState *env, uint32_t x)
1913 neon_s16 vec;
1914 NEON_UNPACK(neon_s16, vec, x);
1915 DO_QNEG16(vec.v1);
1916 DO_QNEG16(vec.v2);
1917 NEON_PACK(neon_s16, x, vec);
1918 return x;
1920 #undef DO_QNEG16
1922 uint32_t HELPER(neon_qabs_s32)(CPUARMState *env, uint32_t x)
1924 if (x == SIGNBIT) {
1925 SET_QC();
1926 x = ~SIGNBIT;
1927 } else if ((int32_t)x < 0) {
1928 x = -x;
1930 return x;
1933 uint32_t HELPER(neon_qneg_s32)(CPUARMState *env, uint32_t x)
1935 if (x == SIGNBIT) {
1936 SET_QC();
1937 x = ~SIGNBIT;
1938 } else {
1939 x = -x;
1941 return x;
1944 uint64_t HELPER(neon_qabs_s64)(CPUARMState *env, uint64_t x)
1946 if (x == SIGNBIT64) {
1947 SET_QC();
1948 x = ~SIGNBIT64;
1949 } else if ((int64_t)x < 0) {
1950 x = -x;
1952 return x;
1955 uint64_t HELPER(neon_qneg_s64)(CPUARMState *env, uint64_t x)
1957 if (x == SIGNBIT64) {
1958 SET_QC();
1959 x = ~SIGNBIT64;
1960 } else {
1961 x = -x;
1963 return x;
1966 /* NEON Float helpers. */
1967 uint32_t HELPER(neon_abd_f32)(uint32_t a, uint32_t b, void *fpstp)
1969 float_status *fpst = fpstp;
1970 float32 f0 = make_float32(a);
1971 float32 f1 = make_float32(b);
1972 return float32_val(float32_abs(float32_sub(f0, f1, fpst)));
1975 /* Floating point comparisons produce an integer result.
1976 * Note that EQ doesn't signal InvalidOp for QNaNs but GE and GT do.
1977 * Softfloat routines return 0/1, which we convert to the 0/-1 Neon requires.
1979 uint32_t HELPER(neon_ceq_f32)(uint32_t a, uint32_t b, void *fpstp)
1981 float_status *fpst = fpstp;
1982 return -float32_eq_quiet(make_float32(a), make_float32(b), fpst);
1985 uint32_t HELPER(neon_cge_f32)(uint32_t a, uint32_t b, void *fpstp)
1987 float_status *fpst = fpstp;
1988 return -float32_le(make_float32(b), make_float32(a), fpst);
1991 uint32_t HELPER(neon_cgt_f32)(uint32_t a, uint32_t b, void *fpstp)
1993 float_status *fpst = fpstp;
1994 return -float32_lt(make_float32(b), make_float32(a), fpst);
1997 uint32_t HELPER(neon_acge_f32)(uint32_t a, uint32_t b, void *fpstp)
1999 float_status *fpst = fpstp;
2000 float32 f0 = float32_abs(make_float32(a));
2001 float32 f1 = float32_abs(make_float32(b));
2002 return -float32_le(f1, f0, fpst);
2005 uint32_t HELPER(neon_acgt_f32)(uint32_t a, uint32_t b, void *fpstp)
2007 float_status *fpst = fpstp;
2008 float32 f0 = float32_abs(make_float32(a));
2009 float32 f1 = float32_abs(make_float32(b));
2010 return -float32_lt(f1, f0, fpst);
2013 uint64_t HELPER(neon_acge_f64)(uint64_t a, uint64_t b, void *fpstp)
2015 float_status *fpst = fpstp;
2016 float64 f0 = float64_abs(make_float64(a));
2017 float64 f1 = float64_abs(make_float64(b));
2018 return -float64_le(f1, f0, fpst);
2021 uint64_t HELPER(neon_acgt_f64)(uint64_t a, uint64_t b, void *fpstp)
2023 float_status *fpst = fpstp;
2024 float64 f0 = float64_abs(make_float64(a));
2025 float64 f1 = float64_abs(make_float64(b));
2026 return -float64_lt(f1, f0, fpst);
2029 #define ELEM(V, N, SIZE) (((V) >> ((N) * (SIZE))) & ((1ull << (SIZE)) - 1))
2031 void HELPER(neon_qunzip8)(CPUARMState *env, uint32_t rd, uint32_t rm)
2033 uint64_t zm0 = float64_val(env->vfp.regs[rm]);
2034 uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]);
2035 uint64_t zd0 = float64_val(env->vfp.regs[rd]);
2036 uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]);
2037 uint64_t d0 = ELEM(zd0, 0, 8) | (ELEM(zd0, 2, 8) << 8)
2038 | (ELEM(zd0, 4, 8) << 16) | (ELEM(zd0, 6, 8) << 24)
2039 | (ELEM(zd1, 0, 8) << 32) | (ELEM(zd1, 2, 8) << 40)
2040 | (ELEM(zd1, 4, 8) << 48) | (ELEM(zd1, 6, 8) << 56);
2041 uint64_t d1 = ELEM(zm0, 0, 8) | (ELEM(zm0, 2, 8) << 8)
2042 | (ELEM(zm0, 4, 8) << 16) | (ELEM(zm0, 6, 8) << 24)
2043 | (ELEM(zm1, 0, 8) << 32) | (ELEM(zm1, 2, 8) << 40)
2044 | (ELEM(zm1, 4, 8) << 48) | (ELEM(zm1, 6, 8) << 56);
2045 uint64_t m0 = ELEM(zd0, 1, 8) | (ELEM(zd0, 3, 8) << 8)
2046 | (ELEM(zd0, 5, 8) << 16) | (ELEM(zd0, 7, 8) << 24)
2047 | (ELEM(zd1, 1, 8) << 32) | (ELEM(zd1, 3, 8) << 40)
2048 | (ELEM(zd1, 5, 8) << 48) | (ELEM(zd1, 7, 8) << 56);
2049 uint64_t m1 = ELEM(zm0, 1, 8) | (ELEM(zm0, 3, 8) << 8)
2050 | (ELEM(zm0, 5, 8) << 16) | (ELEM(zm0, 7, 8) << 24)
2051 | (ELEM(zm1, 1, 8) << 32) | (ELEM(zm1, 3, 8) << 40)
2052 | (ELEM(zm1, 5, 8) << 48) | (ELEM(zm1, 7, 8) << 56);
2053 env->vfp.regs[rm] = make_float64(m0);
2054 env->vfp.regs[rm + 1] = make_float64(m1);
2055 env->vfp.regs[rd] = make_float64(d0);
2056 env->vfp.regs[rd + 1] = make_float64(d1);
2059 void HELPER(neon_qunzip16)(CPUARMState *env, uint32_t rd, uint32_t rm)
2061 uint64_t zm0 = float64_val(env->vfp.regs[rm]);
2062 uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]);
2063 uint64_t zd0 = float64_val(env->vfp.regs[rd]);
2064 uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]);
2065 uint64_t d0 = ELEM(zd0, 0, 16) | (ELEM(zd0, 2, 16) << 16)
2066 | (ELEM(zd1, 0, 16) << 32) | (ELEM(zd1, 2, 16) << 48);
2067 uint64_t d1 = ELEM(zm0, 0, 16) | (ELEM(zm0, 2, 16) << 16)
2068 | (ELEM(zm1, 0, 16) << 32) | (ELEM(zm1, 2, 16) << 48);
2069 uint64_t m0 = ELEM(zd0, 1, 16) | (ELEM(zd0, 3, 16) << 16)
2070 | (ELEM(zd1, 1, 16) << 32) | (ELEM(zd1, 3, 16) << 48);
2071 uint64_t m1 = ELEM(zm0, 1, 16) | (ELEM(zm0, 3, 16) << 16)
2072 | (ELEM(zm1, 1, 16) << 32) | (ELEM(zm1, 3, 16) << 48);
2073 env->vfp.regs[rm] = make_float64(m0);
2074 env->vfp.regs[rm + 1] = make_float64(m1);
2075 env->vfp.regs[rd] = make_float64(d0);
2076 env->vfp.regs[rd + 1] = make_float64(d1);
2079 void HELPER(neon_qunzip32)(CPUARMState *env, uint32_t rd, uint32_t rm)
2081 uint64_t zm0 = float64_val(env->vfp.regs[rm]);
2082 uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]);
2083 uint64_t zd0 = float64_val(env->vfp.regs[rd]);
2084 uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]);
2085 uint64_t d0 = ELEM(zd0, 0, 32) | (ELEM(zd1, 0, 32) << 32);
2086 uint64_t d1 = ELEM(zm0, 0, 32) | (ELEM(zm1, 0, 32) << 32);
2087 uint64_t m0 = ELEM(zd0, 1, 32) | (ELEM(zd1, 1, 32) << 32);
2088 uint64_t m1 = ELEM(zm0, 1, 32) | (ELEM(zm1, 1, 32) << 32);
2089 env->vfp.regs[rm] = make_float64(m0);
2090 env->vfp.regs[rm + 1] = make_float64(m1);
2091 env->vfp.regs[rd] = make_float64(d0);
2092 env->vfp.regs[rd + 1] = make_float64(d1);
2095 void HELPER(neon_unzip8)(CPUARMState *env, uint32_t rd, uint32_t rm)
2097 uint64_t zm = float64_val(env->vfp.regs[rm]);
2098 uint64_t zd = float64_val(env->vfp.regs[rd]);
2099 uint64_t d0 = ELEM(zd, 0, 8) | (ELEM(zd, 2, 8) << 8)
2100 | (ELEM(zd, 4, 8) << 16) | (ELEM(zd, 6, 8) << 24)
2101 | (ELEM(zm, 0, 8) << 32) | (ELEM(zm, 2, 8) << 40)
2102 | (ELEM(zm, 4, 8) << 48) | (ELEM(zm, 6, 8) << 56);
2103 uint64_t m0 = ELEM(zd, 1, 8) | (ELEM(zd, 3, 8) << 8)
2104 | (ELEM(zd, 5, 8) << 16) | (ELEM(zd, 7, 8) << 24)
2105 | (ELEM(zm, 1, 8) << 32) | (ELEM(zm, 3, 8) << 40)
2106 | (ELEM(zm, 5, 8) << 48) | (ELEM(zm, 7, 8) << 56);
2107 env->vfp.regs[rm] = make_float64(m0);
2108 env->vfp.regs[rd] = make_float64(d0);
2111 void HELPER(neon_unzip16)(CPUARMState *env, uint32_t rd, uint32_t rm)
2113 uint64_t zm = float64_val(env->vfp.regs[rm]);
2114 uint64_t zd = float64_val(env->vfp.regs[rd]);
2115 uint64_t d0 = ELEM(zd, 0, 16) | (ELEM(zd, 2, 16) << 16)
2116 | (ELEM(zm, 0, 16) << 32) | (ELEM(zm, 2, 16) << 48);
2117 uint64_t m0 = ELEM(zd, 1, 16) | (ELEM(zd, 3, 16) << 16)
2118 | (ELEM(zm, 1, 16) << 32) | (ELEM(zm, 3, 16) << 48);
2119 env->vfp.regs[rm] = make_float64(m0);
2120 env->vfp.regs[rd] = make_float64(d0);
2123 void HELPER(neon_qzip8)(CPUARMState *env, uint32_t rd, uint32_t rm)
2125 uint64_t zm0 = float64_val(env->vfp.regs[rm]);
2126 uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]);
2127 uint64_t zd0 = float64_val(env->vfp.regs[rd]);
2128 uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]);
2129 uint64_t d0 = ELEM(zd0, 0, 8) | (ELEM(zm0, 0, 8) << 8)
2130 | (ELEM(zd0, 1, 8) << 16) | (ELEM(zm0, 1, 8) << 24)
2131 | (ELEM(zd0, 2, 8) << 32) | (ELEM(zm0, 2, 8) << 40)
2132 | (ELEM(zd0, 3, 8) << 48) | (ELEM(zm0, 3, 8) << 56);
2133 uint64_t d1 = ELEM(zd0, 4, 8) | (ELEM(zm0, 4, 8) << 8)
2134 | (ELEM(zd0, 5, 8) << 16) | (ELEM(zm0, 5, 8) << 24)
2135 | (ELEM(zd0, 6, 8) << 32) | (ELEM(zm0, 6, 8) << 40)
2136 | (ELEM(zd0, 7, 8) << 48) | (ELEM(zm0, 7, 8) << 56);
2137 uint64_t m0 = ELEM(zd1, 0, 8) | (ELEM(zm1, 0, 8) << 8)
2138 | (ELEM(zd1, 1, 8) << 16) | (ELEM(zm1, 1, 8) << 24)
2139 | (ELEM(zd1, 2, 8) << 32) | (ELEM(zm1, 2, 8) << 40)
2140 | (ELEM(zd1, 3, 8) << 48) | (ELEM(zm1, 3, 8) << 56);
2141 uint64_t m1 = ELEM(zd1, 4, 8) | (ELEM(zm1, 4, 8) << 8)
2142 | (ELEM(zd1, 5, 8) << 16) | (ELEM(zm1, 5, 8) << 24)
2143 | (ELEM(zd1, 6, 8) << 32) | (ELEM(zm1, 6, 8) << 40)
2144 | (ELEM(zd1, 7, 8) << 48) | (ELEM(zm1, 7, 8) << 56);
2145 env->vfp.regs[rm] = make_float64(m0);
2146 env->vfp.regs[rm + 1] = make_float64(m1);
2147 env->vfp.regs[rd] = make_float64(d0);
2148 env->vfp.regs[rd + 1] = make_float64(d1);
2151 void HELPER(neon_qzip16)(CPUARMState *env, uint32_t rd, uint32_t rm)
2153 uint64_t zm0 = float64_val(env->vfp.regs[rm]);
2154 uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]);
2155 uint64_t zd0 = float64_val(env->vfp.regs[rd]);
2156 uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]);
2157 uint64_t d0 = ELEM(zd0, 0, 16) | (ELEM(zm0, 0, 16) << 16)
2158 | (ELEM(zd0, 1, 16) << 32) | (ELEM(zm0, 1, 16) << 48);
2159 uint64_t d1 = ELEM(zd0, 2, 16) | (ELEM(zm0, 2, 16) << 16)
2160 | (ELEM(zd0, 3, 16) << 32) | (ELEM(zm0, 3, 16) << 48);
2161 uint64_t m0 = ELEM(zd1, 0, 16) | (ELEM(zm1, 0, 16) << 16)
2162 | (ELEM(zd1, 1, 16) << 32) | (ELEM(zm1, 1, 16) << 48);
2163 uint64_t m1 = ELEM(zd1, 2, 16) | (ELEM(zm1, 2, 16) << 16)
2164 | (ELEM(zd1, 3, 16) << 32) | (ELEM(zm1, 3, 16) << 48);
2165 env->vfp.regs[rm] = make_float64(m0);
2166 env->vfp.regs[rm + 1] = make_float64(m1);
2167 env->vfp.regs[rd] = make_float64(d0);
2168 env->vfp.regs[rd + 1] = make_float64(d1);
2171 void HELPER(neon_qzip32)(CPUARMState *env, uint32_t rd, uint32_t rm)
2173 uint64_t zm0 = float64_val(env->vfp.regs[rm]);
2174 uint64_t zm1 = float64_val(env->vfp.regs[rm + 1]);
2175 uint64_t zd0 = float64_val(env->vfp.regs[rd]);
2176 uint64_t zd1 = float64_val(env->vfp.regs[rd + 1]);
2177 uint64_t d0 = ELEM(zd0, 0, 32) | (ELEM(zm0, 0, 32) << 32);
2178 uint64_t d1 = ELEM(zd0, 1, 32) | (ELEM(zm0, 1, 32) << 32);
2179 uint64_t m0 = ELEM(zd1, 0, 32) | (ELEM(zm1, 0, 32) << 32);
2180 uint64_t m1 = ELEM(zd1, 1, 32) | (ELEM(zm1, 1, 32) << 32);
2181 env->vfp.regs[rm] = make_float64(m0);
2182 env->vfp.regs[rm + 1] = make_float64(m1);
2183 env->vfp.regs[rd] = make_float64(d0);
2184 env->vfp.regs[rd + 1] = make_float64(d1);
2187 void HELPER(neon_zip8)(CPUARMState *env, uint32_t rd, uint32_t rm)
2189 uint64_t zm = float64_val(env->vfp.regs[rm]);
2190 uint64_t zd = float64_val(env->vfp.regs[rd]);
2191 uint64_t d0 = ELEM(zd, 0, 8) | (ELEM(zm, 0, 8) << 8)
2192 | (ELEM(zd, 1, 8) << 16) | (ELEM(zm, 1, 8) << 24)
2193 | (ELEM(zd, 2, 8) << 32) | (ELEM(zm, 2, 8) << 40)
2194 | (ELEM(zd, 3, 8) << 48) | (ELEM(zm, 3, 8) << 56);
2195 uint64_t m0 = ELEM(zd, 4, 8) | (ELEM(zm, 4, 8) << 8)
2196 | (ELEM(zd, 5, 8) << 16) | (ELEM(zm, 5, 8) << 24)
2197 | (ELEM(zd, 6, 8) << 32) | (ELEM(zm, 6, 8) << 40)
2198 | (ELEM(zd, 7, 8) << 48) | (ELEM(zm, 7, 8) << 56);
2199 env->vfp.regs[rm] = make_float64(m0);
2200 env->vfp.regs[rd] = make_float64(d0);
2203 void HELPER(neon_zip16)(CPUARMState *env, uint32_t rd, uint32_t rm)
2205 uint64_t zm = float64_val(env->vfp.regs[rm]);
2206 uint64_t zd = float64_val(env->vfp.regs[rd]);
2207 uint64_t d0 = ELEM(zd, 0, 16) | (ELEM(zm, 0, 16) << 16)
2208 | (ELEM(zd, 1, 16) << 32) | (ELEM(zm, 1, 16) << 48);
2209 uint64_t m0 = ELEM(zd, 2, 16) | (ELEM(zm, 2, 16) << 16)
2210 | (ELEM(zd, 3, 16) << 32) | (ELEM(zm, 3, 16) << 48);
2211 env->vfp.regs[rm] = make_float64(m0);
2212 env->vfp.regs[rd] = make_float64(d0);
2215 /* Helper function for 64 bit polynomial multiply case:
2216 * perform PolynomialMult(op1, op2) and return either the top or
2217 * bottom half of the 128 bit result.
2219 uint64_t HELPER(neon_pmull_64_lo)(uint64_t op1, uint64_t op2)
2221 int bitnum;
2222 uint64_t res = 0;
2224 for (bitnum = 0; bitnum < 64; bitnum++) {
2225 if (op1 & (1ULL << bitnum)) {
2226 res ^= op2 << bitnum;
2229 return res;
2231 uint64_t HELPER(neon_pmull_64_hi)(uint64_t op1, uint64_t op2)
2233 int bitnum;
2234 uint64_t res = 0;
2236 /* bit 0 of op1 can't influence the high 64 bits at all */
2237 for (bitnum = 1; bitnum < 64; bitnum++) {
2238 if (op1 & (1ULL << bitnum)) {
2239 res ^= op2 >> (64 - bitnum);
2242 return res;