SPARC: Fix Leon3 cache control
[qemu.git] / target-arm / neon_helper.c
blobfead1525c4961b09682fc05439f6ec780b76f6f4
1 /*
2 * ARM NEON vector operations.
4 * Copyright (c) 2007, 2008 CodeSourcery.
5 * Written by Paul Brook
7 * This code is licenced under the GNU GPL v2.
8 */
9 #include <stdlib.h>
10 #include <stdio.h>
12 #include "cpu.h"
13 #include "exec-all.h"
14 #include "helpers.h"
16 #define SIGNBIT (uint32_t)0x80000000
17 #define SIGNBIT64 ((uint64_t)1 << 63)
19 #define SET_QC() env->vfp.xregs[ARM_VFP_FPSCR] = CPSR_Q
21 static float_status neon_float_status;
22 #define NFS &neon_float_status
24 /* Helper routines to perform bitwise copies between float and int. */
25 static inline float32 vfp_itos(uint32_t i)
27 union {
28 uint32_t i;
29 float32 s;
30 } v;
32 v.i = i;
33 return v.s;
36 static inline uint32_t vfp_stoi(float32 s)
38 union {
39 uint32_t i;
40 float32 s;
41 } v;
43 v.s = s;
44 return v.i;
47 #define NEON_TYPE1(name, type) \
48 typedef struct \
49 { \
50 type v1; \
51 } neon_##name;
52 #ifdef HOST_WORDS_BIGENDIAN
53 #define NEON_TYPE2(name, type) \
54 typedef struct \
55 { \
56 type v2; \
57 type v1; \
58 } neon_##name;
59 #define NEON_TYPE4(name, type) \
60 typedef struct \
61 { \
62 type v4; \
63 type v3; \
64 type v2; \
65 type v1; \
66 } neon_##name;
67 #else
68 #define NEON_TYPE2(name, type) \
69 typedef struct \
70 { \
71 type v1; \
72 type v2; \
73 } neon_##name;
74 #define NEON_TYPE4(name, type) \
75 typedef struct \
76 { \
77 type v1; \
78 type v2; \
79 type v3; \
80 type v4; \
81 } neon_##name;
82 #endif
84 NEON_TYPE4(s8, int8_t)
85 NEON_TYPE4(u8, uint8_t)
86 NEON_TYPE2(s16, int16_t)
87 NEON_TYPE2(u16, uint16_t)
88 NEON_TYPE1(s32, int32_t)
89 NEON_TYPE1(u32, uint32_t)
90 #undef NEON_TYPE4
91 #undef NEON_TYPE2
92 #undef NEON_TYPE1
94 /* Copy from a uint32_t to a vector structure type. */
95 #define NEON_UNPACK(vtype, dest, val) do { \
96 union { \
97 vtype v; \
98 uint32_t i; \
99 } conv_u; \
100 conv_u.i = (val); \
101 dest = conv_u.v; \
102 } while(0)
104 /* Copy from a vector structure type to a uint32_t. */
105 #define NEON_PACK(vtype, dest, val) do { \
106 union { \
107 vtype v; \
108 uint32_t i; \
109 } conv_u; \
110 conv_u.v = (val); \
111 dest = conv_u.i; \
112 } while(0)
114 #define NEON_DO1 \
115 NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1);
116 #define NEON_DO2 \
117 NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1); \
118 NEON_FN(vdest.v2, vsrc1.v2, vsrc2.v2);
119 #define NEON_DO4 \
120 NEON_FN(vdest.v1, vsrc1.v1, vsrc2.v1); \
121 NEON_FN(vdest.v2, vsrc1.v2, vsrc2.v2); \
122 NEON_FN(vdest.v3, vsrc1.v3, vsrc2.v3); \
123 NEON_FN(vdest.v4, vsrc1.v4, vsrc2.v4);
125 #define NEON_VOP_BODY(vtype, n) \
127 uint32_t res; \
128 vtype vsrc1; \
129 vtype vsrc2; \
130 vtype vdest; \
131 NEON_UNPACK(vtype, vsrc1, arg1); \
132 NEON_UNPACK(vtype, vsrc2, arg2); \
133 NEON_DO##n; \
134 NEON_PACK(vtype, res, vdest); \
135 return res; \
138 #define NEON_VOP(name, vtype, n) \
139 uint32_t HELPER(glue(neon_,name))(uint32_t arg1, uint32_t arg2) \
140 NEON_VOP_BODY(vtype, n)
142 #define NEON_VOP_ENV(name, vtype, n) \
143 uint32_t HELPER(glue(neon_,name))(CPUState *env, uint32_t arg1, uint32_t arg2) \
144 NEON_VOP_BODY(vtype, n)
146 /* Pairwise operations. */
147 /* For 32-bit elements each segment only contains a single element, so
148 the elementwise and pairwise operations are the same. */
149 #define NEON_PDO2 \
150 NEON_FN(vdest.v1, vsrc1.v1, vsrc1.v2); \
151 NEON_FN(vdest.v2, vsrc2.v1, vsrc2.v2);
152 #define NEON_PDO4 \
153 NEON_FN(vdest.v1, vsrc1.v1, vsrc1.v2); \
154 NEON_FN(vdest.v2, vsrc1.v3, vsrc1.v4); \
155 NEON_FN(vdest.v3, vsrc2.v1, vsrc2.v2); \
156 NEON_FN(vdest.v4, vsrc2.v3, vsrc2.v4); \
158 #define NEON_POP(name, vtype, n) \
159 uint32_t HELPER(glue(neon_,name))(uint32_t arg1, uint32_t arg2) \
161 uint32_t res; \
162 vtype vsrc1; \
163 vtype vsrc2; \
164 vtype vdest; \
165 NEON_UNPACK(vtype, vsrc1, arg1); \
166 NEON_UNPACK(vtype, vsrc2, arg2); \
167 NEON_PDO##n; \
168 NEON_PACK(vtype, res, vdest); \
169 return res; \
172 /* Unary operators. */
173 #define NEON_VOP1(name, vtype, n) \
174 uint32_t HELPER(glue(neon_,name))(uint32_t arg) \
176 vtype vsrc1; \
177 vtype vdest; \
178 NEON_UNPACK(vtype, vsrc1, arg); \
179 NEON_DO##n; \
180 NEON_PACK(vtype, arg, vdest); \
181 return arg; \
185 #define NEON_USAT(dest, src1, src2, type) do { \
186 uint32_t tmp = (uint32_t)src1 + (uint32_t)src2; \
187 if (tmp != (type)tmp) { \
188 SET_QC(); \
189 dest = ~0; \
190 } else { \
191 dest = tmp; \
192 }} while(0)
193 #define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint8_t)
194 NEON_VOP_ENV(qadd_u8, neon_u8, 4)
195 #undef NEON_FN
196 #define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint16_t)
197 NEON_VOP_ENV(qadd_u16, neon_u16, 2)
198 #undef NEON_FN
199 #undef NEON_USAT
201 #define NEON_SSAT(dest, src1, src2, type) do { \
202 int32_t tmp = (uint32_t)src1 + (uint32_t)src2; \
203 if (tmp != (type)tmp) { \
204 SET_QC(); \
205 if (src2 > 0) { \
206 tmp = (1 << (sizeof(type) * 8 - 1)) - 1; \
207 } else { \
208 tmp = 1 << (sizeof(type) * 8 - 1); \
211 dest = tmp; \
212 } while(0)
213 #define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int8_t)
214 NEON_VOP_ENV(qadd_s8, neon_s8, 4)
215 #undef NEON_FN
216 #define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int16_t)
217 NEON_VOP_ENV(qadd_s16, neon_s16, 2)
218 #undef NEON_FN
219 #undef NEON_SSAT
221 #define NEON_USAT(dest, src1, src2, type) do { \
222 uint32_t tmp = (uint32_t)src1 - (uint32_t)src2; \
223 if (tmp != (type)tmp) { \
224 SET_QC(); \
225 dest = 0; \
226 } else { \
227 dest = tmp; \
228 }} while(0)
229 #define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint8_t)
230 NEON_VOP_ENV(qsub_u8, neon_u8, 4)
231 #undef NEON_FN
232 #define NEON_FN(dest, src1, src2) NEON_USAT(dest, src1, src2, uint16_t)
233 NEON_VOP_ENV(qsub_u16, neon_u16, 2)
234 #undef NEON_FN
235 #undef NEON_USAT
237 #define NEON_SSAT(dest, src1, src2, type) do { \
238 int32_t tmp = (uint32_t)src1 - (uint32_t)src2; \
239 if (tmp != (type)tmp) { \
240 SET_QC(); \
241 if (src2 < 0) { \
242 tmp = (1 << (sizeof(type) * 8 - 1)) - 1; \
243 } else { \
244 tmp = 1 << (sizeof(type) * 8 - 1); \
247 dest = tmp; \
248 } while(0)
249 #define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int8_t)
250 NEON_VOP_ENV(qsub_s8, neon_s8, 4)
251 #undef NEON_FN
252 #define NEON_FN(dest, src1, src2) NEON_SSAT(dest, src1, src2, int16_t)
253 NEON_VOP_ENV(qsub_s16, neon_s16, 2)
254 #undef NEON_FN
255 #undef NEON_SSAT
257 #define NEON_FN(dest, src1, src2) dest = (src1 + src2) >> 1
258 NEON_VOP(hadd_s8, neon_s8, 4)
259 NEON_VOP(hadd_u8, neon_u8, 4)
260 NEON_VOP(hadd_s16, neon_s16, 2)
261 NEON_VOP(hadd_u16, neon_u16, 2)
262 #undef NEON_FN
264 int32_t HELPER(neon_hadd_s32)(int32_t src1, int32_t src2)
266 int32_t dest;
268 dest = (src1 >> 1) + (src2 >> 1);
269 if (src1 & src2 & 1)
270 dest++;
271 return dest;
274 uint32_t HELPER(neon_hadd_u32)(uint32_t src1, uint32_t src2)
276 uint32_t dest;
278 dest = (src1 >> 1) + (src2 >> 1);
279 if (src1 & src2 & 1)
280 dest++;
281 return dest;
284 #define NEON_FN(dest, src1, src2) dest = (src1 + src2 + 1) >> 1
285 NEON_VOP(rhadd_s8, neon_s8, 4)
286 NEON_VOP(rhadd_u8, neon_u8, 4)
287 NEON_VOP(rhadd_s16, neon_s16, 2)
288 NEON_VOP(rhadd_u16, neon_u16, 2)
289 #undef NEON_FN
291 int32_t HELPER(neon_rhadd_s32)(int32_t src1, int32_t src2)
293 int32_t dest;
295 dest = (src1 >> 1) + (src2 >> 1);
296 if ((src1 | src2) & 1)
297 dest++;
298 return dest;
301 uint32_t HELPER(neon_rhadd_u32)(uint32_t src1, uint32_t src2)
303 uint32_t dest;
305 dest = (src1 >> 1) + (src2 >> 1);
306 if ((src1 | src2) & 1)
307 dest++;
308 return dest;
311 #define NEON_FN(dest, src1, src2) dest = (src1 - src2) >> 1
312 NEON_VOP(hsub_s8, neon_s8, 4)
313 NEON_VOP(hsub_u8, neon_u8, 4)
314 NEON_VOP(hsub_s16, neon_s16, 2)
315 NEON_VOP(hsub_u16, neon_u16, 2)
316 #undef NEON_FN
318 int32_t HELPER(neon_hsub_s32)(int32_t src1, int32_t src2)
320 int32_t dest;
322 dest = (src1 >> 1) - (src2 >> 1);
323 if ((~src1) & src2 & 1)
324 dest--;
325 return dest;
328 uint32_t HELPER(neon_hsub_u32)(uint32_t src1, uint32_t src2)
330 uint32_t dest;
332 dest = (src1 >> 1) - (src2 >> 1);
333 if ((~src1) & src2 & 1)
334 dest--;
335 return dest;
338 #define NEON_FN(dest, src1, src2) dest = (src1 > src2) ? ~0 : 0
339 NEON_VOP(cgt_s8, neon_s8, 4)
340 NEON_VOP(cgt_u8, neon_u8, 4)
341 NEON_VOP(cgt_s16, neon_s16, 2)
342 NEON_VOP(cgt_u16, neon_u16, 2)
343 NEON_VOP(cgt_s32, neon_s32, 1)
344 NEON_VOP(cgt_u32, neon_u32, 1)
345 #undef NEON_FN
347 #define NEON_FN(dest, src1, src2) dest = (src1 >= src2) ? ~0 : 0
348 NEON_VOP(cge_s8, neon_s8, 4)
349 NEON_VOP(cge_u8, neon_u8, 4)
350 NEON_VOP(cge_s16, neon_s16, 2)
351 NEON_VOP(cge_u16, neon_u16, 2)
352 NEON_VOP(cge_s32, neon_s32, 1)
353 NEON_VOP(cge_u32, neon_u32, 1)
354 #undef NEON_FN
356 #define NEON_FN(dest, src1, src2) dest = (src1 < src2) ? src1 : src2
357 NEON_VOP(min_s8, neon_s8, 4)
358 NEON_VOP(min_u8, neon_u8, 4)
359 NEON_VOP(min_s16, neon_s16, 2)
360 NEON_VOP(min_u16, neon_u16, 2)
361 NEON_VOP(min_s32, neon_s32, 1)
362 NEON_VOP(min_u32, neon_u32, 1)
363 NEON_POP(pmin_s8, neon_s8, 4)
364 NEON_POP(pmin_u8, neon_u8, 4)
365 NEON_POP(pmin_s16, neon_s16, 2)
366 NEON_POP(pmin_u16, neon_u16, 2)
367 #undef NEON_FN
369 #define NEON_FN(dest, src1, src2) dest = (src1 > src2) ? src1 : src2
370 NEON_VOP(max_s8, neon_s8, 4)
371 NEON_VOP(max_u8, neon_u8, 4)
372 NEON_VOP(max_s16, neon_s16, 2)
373 NEON_VOP(max_u16, neon_u16, 2)
374 NEON_VOP(max_s32, neon_s32, 1)
375 NEON_VOP(max_u32, neon_u32, 1)
376 NEON_POP(pmax_s8, neon_s8, 4)
377 NEON_POP(pmax_u8, neon_u8, 4)
378 NEON_POP(pmax_s16, neon_s16, 2)
379 NEON_POP(pmax_u16, neon_u16, 2)
380 #undef NEON_FN
382 #define NEON_FN(dest, src1, src2) \
383 dest = (src1 > src2) ? (src1 - src2) : (src2 - src1)
384 NEON_VOP(abd_s8, neon_s8, 4)
385 NEON_VOP(abd_u8, neon_u8, 4)
386 NEON_VOP(abd_s16, neon_s16, 2)
387 NEON_VOP(abd_u16, neon_u16, 2)
388 NEON_VOP(abd_s32, neon_s32, 1)
389 NEON_VOP(abd_u32, neon_u32, 1)
390 #undef NEON_FN
392 #define NEON_FN(dest, src1, src2) do { \
393 int8_t tmp; \
394 tmp = (int8_t)src2; \
395 if (tmp >= (ssize_t)sizeof(src1) * 8 || \
396 tmp <= -(ssize_t)sizeof(src1) * 8) { \
397 dest = 0; \
398 } else if (tmp < 0) { \
399 dest = src1 >> -tmp; \
400 } else { \
401 dest = src1 << tmp; \
402 }} while (0)
403 NEON_VOP(shl_u8, neon_u8, 4)
404 NEON_VOP(shl_u16, neon_u16, 2)
405 NEON_VOP(shl_u32, neon_u32, 1)
406 #undef NEON_FN
408 uint64_t HELPER(neon_shl_u64)(uint64_t val, uint64_t shiftop)
410 int8_t shift = (int8_t)shiftop;
411 if (shift >= 64 || shift <= -64) {
412 val = 0;
413 } else if (shift < 0) {
414 val >>= -shift;
415 } else {
416 val <<= shift;
418 return val;
421 #define NEON_FN(dest, src1, src2) do { \
422 int8_t tmp; \
423 tmp = (int8_t)src2; \
424 if (tmp >= (ssize_t)sizeof(src1) * 8) { \
425 dest = 0; \
426 } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
427 dest = src1 >> (sizeof(src1) * 8 - 1); \
428 } else if (tmp < 0) { \
429 dest = src1 >> -tmp; \
430 } else { \
431 dest = src1 << tmp; \
432 }} while (0)
433 NEON_VOP(shl_s8, neon_s8, 4)
434 NEON_VOP(shl_s16, neon_s16, 2)
435 NEON_VOP(shl_s32, neon_s32, 1)
436 #undef NEON_FN
438 uint64_t HELPER(neon_shl_s64)(uint64_t valop, uint64_t shiftop)
440 int8_t shift = (int8_t)shiftop;
441 int64_t val = valop;
442 if (shift >= 64) {
443 val = 0;
444 } else if (shift <= -64) {
445 val >>= 63;
446 } else if (shift < 0) {
447 val >>= -shift;
448 } else {
449 val <<= shift;
451 return val;
454 #define NEON_FN(dest, src1, src2) do { \
455 int8_t tmp; \
456 tmp = (int8_t)src2; \
457 if (tmp >= (ssize_t)sizeof(src1) * 8) { \
458 dest = 0; \
459 } else if (tmp < -(ssize_t)sizeof(src1) * 8) { \
460 dest = src1 >> (sizeof(src1) * 8 - 1); \
461 } else if (tmp == -(ssize_t)sizeof(src1) * 8) { \
462 dest = src1 >> (tmp - 1); \
463 dest++; \
464 dest >>= 1; \
465 } else if (tmp < 0) { \
466 dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
467 } else { \
468 dest = src1 << tmp; \
469 }} while (0)
470 NEON_VOP(rshl_s8, neon_s8, 4)
471 NEON_VOP(rshl_s16, neon_s16, 2)
472 NEON_VOP(rshl_s32, neon_s32, 1)
473 #undef NEON_FN
475 uint64_t HELPER(neon_rshl_s64)(uint64_t valop, uint64_t shiftop)
477 int8_t shift = (int8_t)shiftop;
478 int64_t val = valop;
479 if (shift >= 64) {
480 val = 0;
481 } else if (shift < -64) {
482 val >>= 63;
483 } else if (shift == -63) {
484 val >>= 63;
485 val++;
486 val >>= 1;
487 } else if (shift < 0) {
488 val = (val + ((int64_t)1 << (-1 - shift))) >> -shift;
489 } else {
490 val <<= shift;
492 return val;
495 #define NEON_FN(dest, src1, src2) do { \
496 int8_t tmp; \
497 tmp = (int8_t)src2; \
498 if (tmp >= (ssize_t)sizeof(src1) * 8 || \
499 tmp < -(ssize_t)sizeof(src1) * 8) { \
500 dest = 0; \
501 } else if (tmp == -(ssize_t)sizeof(src1) * 8) { \
502 dest = src1 >> (tmp - 1); \
503 } else if (tmp < 0) { \
504 dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
505 } else { \
506 dest = src1 << tmp; \
507 }} while (0)
508 NEON_VOP(rshl_u8, neon_u8, 4)
509 NEON_VOP(rshl_u16, neon_u16, 2)
510 NEON_VOP(rshl_u32, neon_u32, 1)
511 #undef NEON_FN
513 uint64_t HELPER(neon_rshl_u64)(uint64_t val, uint64_t shiftop)
515 int8_t shift = (uint8_t)shiftop;
516 if (shift >= 64 || shift < 64) {
517 val = 0;
518 } else if (shift == -64) {
519 /* Rounding a 1-bit result just preserves that bit. */
520 val >>= 63;
521 } if (shift < 0) {
522 val = (val + ((uint64_t)1 << (-1 - shift))) >> -shift;
523 val >>= -shift;
524 } else {
525 val <<= shift;
527 return val;
530 #define NEON_FN(dest, src1, src2) do { \
531 int8_t tmp; \
532 tmp = (int8_t)src2; \
533 if (tmp >= (ssize_t)sizeof(src1) * 8) { \
534 if (src1) { \
535 SET_QC(); \
536 dest = ~0; \
537 } else { \
538 dest = 0; \
540 } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
541 dest = 0; \
542 } else if (tmp < 0) { \
543 dest = src1 >> -tmp; \
544 } else { \
545 dest = src1 << tmp; \
546 if ((dest >> tmp) != src1) { \
547 SET_QC(); \
548 dest = ~0; \
550 }} while (0)
551 NEON_VOP_ENV(qshl_u8, neon_u8, 4)
552 NEON_VOP_ENV(qshl_u16, neon_u16, 2)
553 NEON_VOP_ENV(qshl_u32, neon_u32, 1)
554 #undef NEON_FN
556 uint64_t HELPER(neon_qshl_u64)(CPUState *env, uint64_t val, uint64_t shiftop)
558 int8_t shift = (int8_t)shiftop;
559 if (shift >= 64) {
560 if (val) {
561 val = ~(uint64_t)0;
562 SET_QC();
564 } else if (shift <= -64) {
565 val = 0;
566 } else if (shift < 0) {
567 val >>= -shift;
568 } else {
569 uint64_t tmp = val;
570 val <<= shift;
571 if ((val >> shift) != tmp) {
572 SET_QC();
573 val = ~(uint64_t)0;
576 return val;
579 #define NEON_FN(dest, src1, src2) do { \
580 int8_t tmp; \
581 tmp = (int8_t)src2; \
582 if (tmp >= (ssize_t)sizeof(src1) * 8) { \
583 if (src1) { \
584 SET_QC(); \
585 dest = (uint32_t)(1 << (sizeof(src1) * 8 - 1)); \
586 if (src1 > 0) { \
587 dest--; \
589 } else { \
590 dest = src1; \
592 } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
593 dest = src1 >> 31; \
594 } else if (tmp < 0) { \
595 dest = src1 >> -tmp; \
596 } else { \
597 dest = src1 << tmp; \
598 if ((dest >> tmp) != src1) { \
599 SET_QC(); \
600 dest = (uint32_t)(1 << (sizeof(src1) * 8 - 1)); \
601 if (src1 > 0) { \
602 dest--; \
605 }} while (0)
606 NEON_VOP_ENV(qshl_s8, neon_s8, 4)
607 NEON_VOP_ENV(qshl_s16, neon_s16, 2)
608 NEON_VOP_ENV(qshl_s32, neon_s32, 1)
609 #undef NEON_FN
611 uint64_t HELPER(neon_qshl_s64)(CPUState *env, uint64_t valop, uint64_t shiftop)
613 int8_t shift = (uint8_t)shiftop;
614 int64_t val = valop;
615 if (shift >= 64) {
616 if (val) {
617 SET_QC();
618 val = (val >> 63) ^ ~SIGNBIT64;
620 } else if (shift <= -64) {
621 val >>= 63;
622 } else if (shift < 0) {
623 val >>= -shift;
624 } else {
625 int64_t tmp = val;
626 val <<= shift;
627 if ((val >> shift) != tmp) {
628 SET_QC();
629 val = (tmp >> 63) ^ ~SIGNBIT64;
632 return val;
635 #define NEON_FN(dest, src1, src2) do { \
636 if (src1 & (1 << (sizeof(src1) * 8 - 1))) { \
637 SET_QC(); \
638 dest = 0; \
639 } else { \
640 int8_t tmp; \
641 tmp = (int8_t)src2; \
642 if (tmp >= (ssize_t)sizeof(src1) * 8) { \
643 if (src1) { \
644 SET_QC(); \
645 dest = ~0; \
646 } else { \
647 dest = 0; \
649 } else if (tmp <= -(ssize_t)sizeof(src1) * 8) { \
650 dest = 0; \
651 } else if (tmp < 0) { \
652 dest = src1 >> -tmp; \
653 } else { \
654 dest = src1 << tmp; \
655 if ((dest >> tmp) != src1) { \
656 SET_QC(); \
657 dest = ~0; \
660 }} while (0)
661 NEON_VOP_ENV(qshlu_s8, neon_u8, 4)
662 NEON_VOP_ENV(qshlu_s16, neon_u16, 2)
663 #undef NEON_FN
665 uint32_t HELPER(neon_qshlu_s32)(CPUState *env, uint32_t valop, uint32_t shiftop)
667 if ((int32_t)valop < 0) {
668 SET_QC();
669 return 0;
671 return helper_neon_qshl_u32(env, valop, shiftop);
674 uint64_t HELPER(neon_qshlu_s64)(CPUState *env, uint64_t valop, uint64_t shiftop)
676 if ((int64_t)valop < 0) {
677 SET_QC();
678 return 0;
680 return helper_neon_qshl_u64(env, valop, shiftop);
683 /* FIXME: This is wrong. */
684 #define NEON_FN(dest, src1, src2) do { \
685 int8_t tmp; \
686 tmp = (int8_t)src2; \
687 if (tmp < 0) { \
688 dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
689 } else { \
690 dest = src1 << tmp; \
691 if ((dest >> tmp) != src1) { \
692 SET_QC(); \
693 dest = ~0; \
695 }} while (0)
696 NEON_VOP_ENV(qrshl_u8, neon_u8, 4)
697 NEON_VOP_ENV(qrshl_u16, neon_u16, 2)
698 NEON_VOP_ENV(qrshl_u32, neon_u32, 1)
699 #undef NEON_FN
701 uint64_t HELPER(neon_qrshl_u64)(CPUState *env, uint64_t val, uint64_t shiftop)
703 int8_t shift = (int8_t)shiftop;
704 if (shift < 0) {
705 val = (val + (1 << (-1 - shift))) >> -shift;
706 } else { \
707 uint64_t tmp = val;
708 val <<= shift;
709 if ((val >> shift) != tmp) {
710 SET_QC();
711 val = ~0;
714 return val;
717 #define NEON_FN(dest, src1, src2) do { \
718 int8_t tmp; \
719 tmp = (int8_t)src2; \
720 if (tmp < 0) { \
721 dest = (src1 + (1 << (-1 - tmp))) >> -tmp; \
722 } else { \
723 dest = src1 << tmp; \
724 if ((dest >> tmp) != src1) { \
725 SET_QC(); \
726 dest = src1 >> 31; \
728 }} while (0)
729 NEON_VOP_ENV(qrshl_s8, neon_s8, 4)
730 NEON_VOP_ENV(qrshl_s16, neon_s16, 2)
731 NEON_VOP_ENV(qrshl_s32, neon_s32, 1)
732 #undef NEON_FN
734 uint64_t HELPER(neon_qrshl_s64)(CPUState *env, uint64_t valop, uint64_t shiftop)
736 int8_t shift = (uint8_t)shiftop;
737 int64_t val = valop;
739 if (shift < 0) {
740 val = (val + (1 << (-1 - shift))) >> -shift;
741 } else {
742 int64_t tmp = val;;
743 val <<= shift;
744 if ((val >> shift) != tmp) {
745 SET_QC();
746 val = tmp >> 31;
749 return val;
752 uint32_t HELPER(neon_add_u8)(uint32_t a, uint32_t b)
754 uint32_t mask;
755 mask = (a ^ b) & 0x80808080u;
756 a &= ~0x80808080u;
757 b &= ~0x80808080u;
758 return (a + b) ^ mask;
761 uint32_t HELPER(neon_add_u16)(uint32_t a, uint32_t b)
763 uint32_t mask;
764 mask = (a ^ b) & 0x80008000u;
765 a &= ~0x80008000u;
766 b &= ~0x80008000u;
767 return (a + b) ^ mask;
770 #define NEON_FN(dest, src1, src2) dest = src1 + src2
771 NEON_POP(padd_u8, neon_u8, 4)
772 NEON_POP(padd_u16, neon_u16, 2)
773 #undef NEON_FN
775 #define NEON_FN(dest, src1, src2) dest = src1 - src2
776 NEON_VOP(sub_u8, neon_u8, 4)
777 NEON_VOP(sub_u16, neon_u16, 2)
778 #undef NEON_FN
780 #define NEON_FN(dest, src1, src2) dest = src1 * src2
781 NEON_VOP(mul_u8, neon_u8, 4)
782 NEON_VOP(mul_u16, neon_u16, 2)
783 #undef NEON_FN
785 /* Polynomial multiplication is like integer multiplication except the
786 partial products are XORed, not added. */
787 uint32_t HELPER(neon_mul_p8)(uint32_t op1, uint32_t op2)
789 uint32_t mask;
790 uint32_t result;
791 result = 0;
792 while (op1) {
793 mask = 0;
794 if (op1 & 1)
795 mask |= 0xff;
796 if (op1 & (1 << 8))
797 mask |= (0xff << 8);
798 if (op1 & (1 << 16))
799 mask |= (0xff << 16);
800 if (op1 & (1 << 24))
801 mask |= (0xff << 24);
802 result ^= op2 & mask;
803 op1 = (op1 >> 1) & 0x7f7f7f7f;
804 op2 = (op2 << 1) & 0xfefefefe;
806 return result;
809 #define NEON_FN(dest, src1, src2) dest = (src1 & src2) ? -1 : 0
810 NEON_VOP(tst_u8, neon_u8, 4)
811 NEON_VOP(tst_u16, neon_u16, 2)
812 NEON_VOP(tst_u32, neon_u32, 1)
813 #undef NEON_FN
815 #define NEON_FN(dest, src1, src2) dest = (src1 == src2) ? -1 : 0
816 NEON_VOP(ceq_u8, neon_u8, 4)
817 NEON_VOP(ceq_u16, neon_u16, 2)
818 NEON_VOP(ceq_u32, neon_u32, 1)
819 #undef NEON_FN
821 #define NEON_FN(dest, src, dummy) dest = (src < 0) ? -src : src
822 NEON_VOP1(abs_s8, neon_s8, 4)
823 NEON_VOP1(abs_s16, neon_s16, 2)
824 #undef NEON_FN
826 /* Count Leading Sign/Zero Bits. */
827 static inline int do_clz8(uint8_t x)
829 int n;
830 for (n = 8; x; n--)
831 x >>= 1;
832 return n;
835 static inline int do_clz16(uint16_t x)
837 int n;
838 for (n = 16; x; n--)
839 x >>= 1;
840 return n;
843 #define NEON_FN(dest, src, dummy) dest = do_clz8(src)
844 NEON_VOP1(clz_u8, neon_u8, 4)
845 #undef NEON_FN
847 #define NEON_FN(dest, src, dummy) dest = do_clz16(src)
848 NEON_VOP1(clz_u16, neon_u16, 2)
849 #undef NEON_FN
851 #define NEON_FN(dest, src, dummy) dest = do_clz8((src < 0) ? ~src : src) - 1
852 NEON_VOP1(cls_s8, neon_s8, 4)
853 #undef NEON_FN
855 #define NEON_FN(dest, src, dummy) dest = do_clz16((src < 0) ? ~src : src) - 1
856 NEON_VOP1(cls_s16, neon_s16, 2)
857 #undef NEON_FN
859 uint32_t HELPER(neon_cls_s32)(uint32_t x)
861 int count;
862 if ((int32_t)x < 0)
863 x = ~x;
864 for (count = 32; x; count--)
865 x = x >> 1;
866 return count - 1;
869 /* Bit count. */
870 uint32_t HELPER(neon_cnt_u8)(uint32_t x)
872 x = (x & 0x55555555) + ((x >> 1) & 0x55555555);
873 x = (x & 0x33333333) + ((x >> 2) & 0x33333333);
874 x = (x & 0x0f0f0f0f) + ((x >> 4) & 0x0f0f0f0f);
875 return x;
878 #define NEON_QDMULH16(dest, src1, src2, round) do { \
879 uint32_t tmp = (int32_t)(int16_t) src1 * (int16_t) src2; \
880 if ((tmp ^ (tmp << 1)) & SIGNBIT) { \
881 SET_QC(); \
882 tmp = (tmp >> 31) ^ ~SIGNBIT; \
883 } else { \
884 tmp <<= 1; \
886 if (round) { \
887 int32_t old = tmp; \
888 tmp += 1 << 15; \
889 if ((int32_t)tmp < old) { \
890 SET_QC(); \
891 tmp = SIGNBIT - 1; \
894 dest = tmp >> 16; \
895 } while(0)
896 #define NEON_FN(dest, src1, src2) NEON_QDMULH16(dest, src1, src2, 0)
897 NEON_VOP_ENV(qdmulh_s16, neon_s16, 2)
898 #undef NEON_FN
899 #define NEON_FN(dest, src1, src2) NEON_QDMULH16(dest, src1, src2, 1)
900 NEON_VOP_ENV(qrdmulh_s16, neon_s16, 2)
901 #undef NEON_FN
902 #undef NEON_QDMULH16
904 #define NEON_QDMULH32(dest, src1, src2, round) do { \
905 uint64_t tmp = (int64_t)(int32_t) src1 * (int32_t) src2; \
906 if ((tmp ^ (tmp << 1)) & SIGNBIT64) { \
907 SET_QC(); \
908 tmp = (tmp >> 63) ^ ~SIGNBIT64; \
909 } else { \
910 tmp <<= 1; \
912 if (round) { \
913 int64_t old = tmp; \
914 tmp += (int64_t)1 << 31; \
915 if ((int64_t)tmp < old) { \
916 SET_QC(); \
917 tmp = SIGNBIT64 - 1; \
920 dest = tmp >> 32; \
921 } while(0)
922 #define NEON_FN(dest, src1, src2) NEON_QDMULH32(dest, src1, src2, 0)
923 NEON_VOP_ENV(qdmulh_s32, neon_s32, 1)
924 #undef NEON_FN
925 #define NEON_FN(dest, src1, src2) NEON_QDMULH32(dest, src1, src2, 1)
926 NEON_VOP_ENV(qrdmulh_s32, neon_s32, 1)
927 #undef NEON_FN
928 #undef NEON_QDMULH32
930 uint32_t HELPER(neon_narrow_u8)(uint64_t x)
932 return (x & 0xffu) | ((x >> 8) & 0xff00u) | ((x >> 16) & 0xff0000u)
933 | ((x >> 24) & 0xff000000u);
936 uint32_t HELPER(neon_narrow_u16)(uint64_t x)
938 return (x & 0xffffu) | ((x >> 16) & 0xffff0000u);
941 uint32_t HELPER(neon_narrow_high_u8)(uint64_t x)
943 return ((x >> 8) & 0xff) | ((x >> 16) & 0xff00)
944 | ((x >> 24) & 0xff0000) | ((x >> 32) & 0xff000000);
947 uint32_t HELPER(neon_narrow_high_u16)(uint64_t x)
949 return ((x >> 16) & 0xffff) | ((x >> 32) & 0xffff0000);
952 uint32_t HELPER(neon_narrow_round_high_u8)(uint64_t x)
954 x &= 0xff80ff80ff80ff80ull;
955 x += 0x0080008000800080ull;
956 return ((x >> 8) & 0xff) | ((x >> 16) & 0xff00)
957 | ((x >> 24) & 0xff0000) | ((x >> 32) & 0xff000000);
960 uint32_t HELPER(neon_narrow_round_high_u16)(uint64_t x)
962 x &= 0xffff8000ffff8000ull;
963 x += 0x0000800000008000ull;
964 return ((x >> 16) & 0xffff) | ((x >> 32) & 0xffff0000);
967 uint32_t HELPER(neon_narrow_sat_u8)(CPUState *env, uint64_t x)
969 uint16_t s;
970 uint8_t d;
971 uint32_t res = 0;
972 #define SAT8(n) \
973 s = x >> n; \
974 if (s > 0xff) { \
975 d = 0xff; \
976 SET_QC(); \
977 } else { \
978 d = s; \
980 res |= (uint32_t)d << (n / 2);
982 SAT8(0);
983 SAT8(16);
984 SAT8(32);
985 SAT8(48);
986 #undef SAT8
987 return res;
990 uint32_t HELPER(neon_narrow_sat_s8)(CPUState *env, uint64_t x)
992 int16_t s;
993 uint8_t d;
994 uint32_t res = 0;
995 #define SAT8(n) \
996 s = x >> n; \
997 if (s != (int8_t)s) { \
998 d = (s >> 15) ^ 0x7f; \
999 SET_QC(); \
1000 } else { \
1001 d = s; \
1003 res |= (uint32_t)d << (n / 2);
1005 SAT8(0);
1006 SAT8(16);
1007 SAT8(32);
1008 SAT8(48);
1009 #undef SAT8
1010 return res;
1013 uint32_t HELPER(neon_narrow_sat_u16)(CPUState *env, uint64_t x)
1015 uint32_t high;
1016 uint32_t low;
1017 low = x;
1018 if (low > 0xffff) {
1019 low = 0xffff;
1020 SET_QC();
1022 high = x >> 32;
1023 if (high > 0xffff) {
1024 high = 0xffff;
1025 SET_QC();
1027 return low | (high << 16);
1030 uint32_t HELPER(neon_narrow_sat_s16)(CPUState *env, uint64_t x)
1032 int32_t low;
1033 int32_t high;
1034 low = x;
1035 if (low != (int16_t)low) {
1036 low = (low >> 31) ^ 0x7fff;
1037 SET_QC();
1039 high = x >> 32;
1040 if (high != (int16_t)high) {
1041 high = (high >> 31) ^ 0x7fff;
1042 SET_QC();
1044 return (uint16_t)low | (high << 16);
1047 uint32_t HELPER(neon_narrow_sat_u32)(CPUState *env, uint64_t x)
1049 if (x > 0xffffffffu) {
1050 SET_QC();
1051 return 0xffffffffu;
1053 return x;
1056 uint32_t HELPER(neon_narrow_sat_s32)(CPUState *env, uint64_t x)
1058 if ((int64_t)x != (int32_t)x) {
1059 SET_QC();
1060 return (x >> 63) ^ 0x7fffffff;
1062 return x;
1065 uint64_t HELPER(neon_widen_u8)(uint32_t x)
1067 uint64_t tmp;
1068 uint64_t ret;
1069 ret = (uint8_t)x;
1070 tmp = (uint8_t)(x >> 8);
1071 ret |= tmp << 16;
1072 tmp = (uint8_t)(x >> 16);
1073 ret |= tmp << 32;
1074 tmp = (uint8_t)(x >> 24);
1075 ret |= tmp << 48;
1076 return ret;
1079 uint64_t HELPER(neon_widen_s8)(uint32_t x)
1081 uint64_t tmp;
1082 uint64_t ret;
1083 ret = (uint16_t)(int8_t)x;
1084 tmp = (uint16_t)(int8_t)(x >> 8);
1085 ret |= tmp << 16;
1086 tmp = (uint16_t)(int8_t)(x >> 16);
1087 ret |= tmp << 32;
1088 tmp = (uint16_t)(int8_t)(x >> 24);
1089 ret |= tmp << 48;
1090 return ret;
1093 uint64_t HELPER(neon_widen_u16)(uint32_t x)
1095 uint64_t high = (uint16_t)(x >> 16);
1096 return ((uint16_t)x) | (high << 32);
1099 uint64_t HELPER(neon_widen_s16)(uint32_t x)
1101 uint64_t high = (int16_t)(x >> 16);
1102 return ((uint32_t)(int16_t)x) | (high << 32);
1105 uint64_t HELPER(neon_addl_u16)(uint64_t a, uint64_t b)
1107 uint64_t mask;
1108 mask = (a ^ b) & 0x8000800080008000ull;
1109 a &= ~0x8000800080008000ull;
1110 b &= ~0x8000800080008000ull;
1111 return (a + b) ^ mask;
1114 uint64_t HELPER(neon_addl_u32)(uint64_t a, uint64_t b)
1116 uint64_t mask;
1117 mask = (a ^ b) & 0x8000000080000000ull;
1118 a &= ~0x8000000080000000ull;
1119 b &= ~0x8000000080000000ull;
1120 return (a + b) ^ mask;
1123 uint64_t HELPER(neon_paddl_u16)(uint64_t a, uint64_t b)
1125 uint64_t tmp;
1126 uint64_t tmp2;
1128 tmp = a & 0x0000ffff0000ffffull;
1129 tmp += (a >> 16) & 0x0000ffff0000ffffull;
1130 tmp2 = b & 0xffff0000ffff0000ull;
1131 tmp2 += (b << 16) & 0xffff0000ffff0000ull;
1132 return ( tmp & 0xffff)
1133 | ((tmp >> 16) & 0xffff0000ull)
1134 | ((tmp2 << 16) & 0xffff00000000ull)
1135 | ( tmp2 & 0xffff000000000000ull);
1138 uint64_t HELPER(neon_paddl_u32)(uint64_t a, uint64_t b)
1140 uint32_t low = a + (a >> 32);
1141 uint32_t high = b + (b >> 32);
1142 return low + ((uint64_t)high << 32);
1145 uint64_t HELPER(neon_subl_u16)(uint64_t a, uint64_t b)
1147 uint64_t mask;
1148 mask = (a ^ ~b) & 0x8000800080008000ull;
1149 a |= 0x8000800080008000ull;
1150 b &= ~0x8000800080008000ull;
1151 return (a - b) ^ mask;
1154 uint64_t HELPER(neon_subl_u32)(uint64_t a, uint64_t b)
1156 uint64_t mask;
1157 mask = (a ^ ~b) & 0x8000000080000000ull;
1158 a |= 0x8000000080000000ull;
1159 b &= ~0x8000000080000000ull;
1160 return (a - b) ^ mask;
1163 uint64_t HELPER(neon_addl_saturate_s32)(CPUState *env, uint64_t a, uint64_t b)
1165 uint32_t x, y;
1166 uint32_t low, high;
1168 x = a;
1169 y = b;
1170 low = x + y;
1171 if (((low ^ x) & SIGNBIT) && !((x ^ y) & SIGNBIT)) {
1172 SET_QC();
1173 low = ((int32_t)x >> 31) ^ ~SIGNBIT;
1175 x = a >> 32;
1176 y = b >> 32;
1177 high = x + y;
1178 if (((high ^ x) & SIGNBIT) && !((x ^ y) & SIGNBIT)) {
1179 SET_QC();
1180 high = ((int32_t)x >> 31) ^ ~SIGNBIT;
1182 return low | ((uint64_t)high << 32);
1185 uint64_t HELPER(neon_addl_saturate_s64)(CPUState *env, uint64_t a, uint64_t b)
1187 uint64_t result;
1189 result = a + b;
1190 if (((result ^ a) & SIGNBIT64) && !((a ^ b) & SIGNBIT64)) {
1191 SET_QC();
1192 result = ((int64_t)a >> 63) ^ ~SIGNBIT64;
1194 return result;
1197 #define DO_ABD(dest, x, y, type) do { \
1198 type tmp_x = x; \
1199 type tmp_y = y; \
1200 dest = ((tmp_x > tmp_y) ? tmp_x - tmp_y : tmp_y - tmp_x); \
1201 } while(0)
1203 uint64_t HELPER(neon_abdl_u16)(uint32_t a, uint32_t b)
1205 uint64_t tmp;
1206 uint64_t result;
1207 DO_ABD(result, a, b, uint8_t);
1208 DO_ABD(tmp, a >> 8, b >> 8, uint8_t);
1209 result |= tmp << 16;
1210 DO_ABD(tmp, a >> 16, b >> 16, uint8_t);
1211 result |= tmp << 32;
1212 DO_ABD(tmp, a >> 24, b >> 24, uint8_t);
1213 result |= tmp << 48;
1214 return result;
1217 uint64_t HELPER(neon_abdl_s16)(uint32_t a, uint32_t b)
1219 uint64_t tmp;
1220 uint64_t result;
1221 DO_ABD(result, a, b, int8_t);
1222 DO_ABD(tmp, a >> 8, b >> 8, int8_t);
1223 result |= tmp << 16;
1224 DO_ABD(tmp, a >> 16, b >> 16, int8_t);
1225 result |= tmp << 32;
1226 DO_ABD(tmp, a >> 24, b >> 24, int8_t);
1227 result |= tmp << 48;
1228 return result;
1231 uint64_t HELPER(neon_abdl_u32)(uint32_t a, uint32_t b)
1233 uint64_t tmp;
1234 uint64_t result;
1235 DO_ABD(result, a, b, uint16_t);
1236 DO_ABD(tmp, a >> 16, b >> 16, uint16_t);
1237 return result | (tmp << 32);
1240 uint64_t HELPER(neon_abdl_s32)(uint32_t a, uint32_t b)
1242 uint64_t tmp;
1243 uint64_t result;
1244 DO_ABD(result, a, b, int16_t);
1245 DO_ABD(tmp, a >> 16, b >> 16, int16_t);
1246 return result | (tmp << 32);
1249 uint64_t HELPER(neon_abdl_u64)(uint32_t a, uint32_t b)
1251 uint64_t result;
1252 DO_ABD(result, a, b, uint32_t);
1253 return result;
1256 uint64_t HELPER(neon_abdl_s64)(uint32_t a, uint32_t b)
1258 uint64_t result;
1259 DO_ABD(result, a, b, int32_t);
1260 return result;
1262 #undef DO_ABD
1264 /* Widening multiply. Named type is the source type. */
1265 #define DO_MULL(dest, x, y, type1, type2) do { \
1266 type1 tmp_x = x; \
1267 type1 tmp_y = y; \
1268 dest = (type2)((type2)tmp_x * (type2)tmp_y); \
1269 } while(0)
1271 uint64_t HELPER(neon_mull_u8)(uint32_t a, uint32_t b)
1273 uint64_t tmp;
1274 uint64_t result;
1276 DO_MULL(result, a, b, uint8_t, uint16_t);
1277 DO_MULL(tmp, a >> 8, b >> 8, uint8_t, uint16_t);
1278 result |= tmp << 16;
1279 DO_MULL(tmp, a >> 16, b >> 16, uint8_t, uint16_t);
1280 result |= tmp << 32;
1281 DO_MULL(tmp, a >> 24, b >> 24, uint8_t, uint16_t);
1282 result |= tmp << 48;
1283 return result;
1286 uint64_t HELPER(neon_mull_s8)(uint32_t a, uint32_t b)
1288 uint64_t tmp;
1289 uint64_t result;
1291 DO_MULL(result, a, b, int8_t, uint16_t);
1292 DO_MULL(tmp, a >> 8, b >> 8, int8_t, uint16_t);
1293 result |= tmp << 16;
1294 DO_MULL(tmp, a >> 16, b >> 16, int8_t, uint16_t);
1295 result |= tmp << 32;
1296 DO_MULL(tmp, a >> 24, b >> 24, int8_t, uint16_t);
1297 result |= tmp << 48;
1298 return result;
1301 uint64_t HELPER(neon_mull_u16)(uint32_t a, uint32_t b)
1303 uint64_t tmp;
1304 uint64_t result;
1306 DO_MULL(result, a, b, uint16_t, uint32_t);
1307 DO_MULL(tmp, a >> 16, b >> 16, uint16_t, uint32_t);
1308 return result | (tmp << 32);
1311 uint64_t HELPER(neon_mull_s16)(uint32_t a, uint32_t b)
1313 uint64_t tmp;
1314 uint64_t result;
1316 DO_MULL(result, a, b, int16_t, uint32_t);
1317 DO_MULL(tmp, a >> 16, b >> 16, int16_t, uint32_t);
1318 return result | (tmp << 32);
1321 uint64_t HELPER(neon_negl_u16)(uint64_t x)
1323 uint16_t tmp;
1324 uint64_t result;
1325 result = (uint16_t)-x;
1326 tmp = -(x >> 16);
1327 result |= (uint64_t)tmp << 16;
1328 tmp = -(x >> 32);
1329 result |= (uint64_t)tmp << 32;
1330 tmp = -(x >> 48);
1331 result |= (uint64_t)tmp << 48;
1332 return result;
1335 #include <stdio.h>
1336 uint64_t HELPER(neon_negl_u32)(uint64_t x)
1338 uint32_t low = -x;
1339 uint32_t high = -(x >> 32);
1340 return low | ((uint64_t)high << 32);
1343 /* FIXME: There should be a native op for this. */
1344 uint64_t HELPER(neon_negl_u64)(uint64_t x)
1346 return -x;
1349 /* Saturnating sign manuipulation. */
1350 /* ??? Make these use NEON_VOP1 */
1351 #define DO_QABS8(x) do { \
1352 if (x == (int8_t)0x80) { \
1353 x = 0x7f; \
1354 SET_QC(); \
1355 } else if (x < 0) { \
1356 x = -x; \
1357 }} while (0)
1358 uint32_t HELPER(neon_qabs_s8)(CPUState *env, uint32_t x)
1360 neon_s8 vec;
1361 NEON_UNPACK(neon_s8, vec, x);
1362 DO_QABS8(vec.v1);
1363 DO_QABS8(vec.v2);
1364 DO_QABS8(vec.v3);
1365 DO_QABS8(vec.v4);
1366 NEON_PACK(neon_s8, x, vec);
1367 return x;
1369 #undef DO_QABS8
1371 #define DO_QNEG8(x) do { \
1372 if (x == (int8_t)0x80) { \
1373 x = 0x7f; \
1374 SET_QC(); \
1375 } else { \
1376 x = -x; \
1377 }} while (0)
1378 uint32_t HELPER(neon_qneg_s8)(CPUState *env, uint32_t x)
1380 neon_s8 vec;
1381 NEON_UNPACK(neon_s8, vec, x);
1382 DO_QNEG8(vec.v1);
1383 DO_QNEG8(vec.v2);
1384 DO_QNEG8(vec.v3);
1385 DO_QNEG8(vec.v4);
1386 NEON_PACK(neon_s8, x, vec);
1387 return x;
1389 #undef DO_QNEG8
1391 #define DO_QABS16(x) do { \
1392 if (x == (int16_t)0x8000) { \
1393 x = 0x7fff; \
1394 SET_QC(); \
1395 } else if (x < 0) { \
1396 x = -x; \
1397 }} while (0)
1398 uint32_t HELPER(neon_qabs_s16)(CPUState *env, uint32_t x)
1400 neon_s16 vec;
1401 NEON_UNPACK(neon_s16, vec, x);
1402 DO_QABS16(vec.v1);
1403 DO_QABS16(vec.v2);
1404 NEON_PACK(neon_s16, x, vec);
1405 return x;
1407 #undef DO_QABS16
1409 #define DO_QNEG16(x) do { \
1410 if (x == (int16_t)0x8000) { \
1411 x = 0x7fff; \
1412 SET_QC(); \
1413 } else { \
1414 x = -x; \
1415 }} while (0)
1416 uint32_t HELPER(neon_qneg_s16)(CPUState *env, uint32_t x)
1418 neon_s16 vec;
1419 NEON_UNPACK(neon_s16, vec, x);
1420 DO_QNEG16(vec.v1);
1421 DO_QNEG16(vec.v2);
1422 NEON_PACK(neon_s16, x, vec);
1423 return x;
1425 #undef DO_QNEG16
1427 uint32_t HELPER(neon_qabs_s32)(CPUState *env, uint32_t x)
1429 if (x == SIGNBIT) {
1430 SET_QC();
1431 x = ~SIGNBIT;
1432 } else if ((int32_t)x < 0) {
1433 x = -x;
1435 return x;
1438 uint32_t HELPER(neon_qneg_s32)(CPUState *env, uint32_t x)
1440 if (x == SIGNBIT) {
1441 SET_QC();
1442 x = ~SIGNBIT;
1443 } else {
1444 x = -x;
1446 return x;
1449 /* NEON Float helpers. */
1450 uint32_t HELPER(neon_min_f32)(uint32_t a, uint32_t b)
1452 float32 f0 = vfp_itos(a);
1453 float32 f1 = vfp_itos(b);
1454 return (float32_compare_quiet(f0, f1, NFS) == -1) ? a : b;
1457 uint32_t HELPER(neon_max_f32)(uint32_t a, uint32_t b)
1459 float32 f0 = vfp_itos(a);
1460 float32 f1 = vfp_itos(b);
1461 return (float32_compare_quiet(f0, f1, NFS) == 1) ? a : b;
1464 uint32_t HELPER(neon_abd_f32)(uint32_t a, uint32_t b)
1466 float32 f0 = vfp_itos(a);
1467 float32 f1 = vfp_itos(b);
1468 return vfp_stoi((float32_compare_quiet(f0, f1, NFS) == 1)
1469 ? float32_sub(f0, f1, NFS)
1470 : float32_sub(f1, f0, NFS));
1473 uint32_t HELPER(neon_add_f32)(uint32_t a, uint32_t b)
1475 return vfp_stoi(float32_add(vfp_itos(a), vfp_itos(b), NFS));
1478 uint32_t HELPER(neon_sub_f32)(uint32_t a, uint32_t b)
1480 return vfp_stoi(float32_sub(vfp_itos(a), vfp_itos(b), NFS));
1483 uint32_t HELPER(neon_mul_f32)(uint32_t a, uint32_t b)
1485 return vfp_stoi(float32_mul(vfp_itos(a), vfp_itos(b), NFS));
1488 /* Floating point comparisons produce an integer result. */
1489 #define NEON_VOP_FCMP(name, cmp) \
1490 uint32_t HELPER(neon_##name)(uint32_t a, uint32_t b) \
1492 if (float32_compare_quiet(vfp_itos(a), vfp_itos(b), NFS) cmp 0) \
1493 return ~0; \
1494 else \
1495 return 0; \
1498 NEON_VOP_FCMP(ceq_f32, ==)
1499 NEON_VOP_FCMP(cge_f32, >=)
1500 NEON_VOP_FCMP(cgt_f32, >)
1502 uint32_t HELPER(neon_acge_f32)(uint32_t a, uint32_t b)
1504 float32 f0 = float32_abs(vfp_itos(a));
1505 float32 f1 = float32_abs(vfp_itos(b));
1506 return (float32_compare_quiet(f0, f1,NFS) >= 0) ? ~0 : 0;
1509 uint32_t HELPER(neon_acgt_f32)(uint32_t a, uint32_t b)
1511 float32 f0 = float32_abs(vfp_itos(a));
1512 float32 f1 = float32_abs(vfp_itos(b));
1513 return (float32_compare_quiet(f0, f1, NFS) > 0) ? ~0 : 0;