2 * AArch64 specific helpers
4 * Copyright (c) 2013 Alexander Graf <agraf@suse.de>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "exec/gdbstub.h"
23 #include "exec/helper-proto.h"
24 #include "qemu/host-utils.h"
26 #include "sysemu/sysemu.h"
27 #include "qemu/bitops.h"
28 #include "internals.h"
29 #include "qemu/crc32c.h"
30 #include "exec/exec-all.h"
31 #include "exec/cpu_ldst.h"
32 #include "qemu/int128.h"
33 #include "qemu/atomic128.h"
35 #include "fpu/softfloat.h"
36 #include <zlib.h> /* For crc32 */
38 /* C2.4.7 Multiply and divide */
39 /* special cases for 0 and LLONG_MIN are mandated by the standard */
40 uint64_t HELPER(udiv64
)(uint64_t num
, uint64_t den
)
48 int64_t HELPER(sdiv64
)(int64_t num
, int64_t den
)
53 if (num
== LLONG_MIN
&& den
== -1) {
59 uint64_t HELPER(rbit64
)(uint64_t x
)
64 /* Convert a softfloat float_relation_ (as returned by
65 * the float*_compare functions) to the correct ARM
68 static inline uint32_t float_rel_to_flags(int res
)
72 case float_relation_equal
:
73 flags
= PSTATE_Z
| PSTATE_C
;
75 case float_relation_less
:
78 case float_relation_greater
:
81 case float_relation_unordered
:
83 flags
= PSTATE_C
| PSTATE_V
;
89 uint64_t HELPER(vfp_cmph_a64
)(uint32_t x
, uint32_t y
, void *fp_status
)
91 return float_rel_to_flags(float16_compare_quiet(x
, y
, fp_status
));
94 uint64_t HELPER(vfp_cmpeh_a64
)(uint32_t x
, uint32_t y
, void *fp_status
)
96 return float_rel_to_flags(float16_compare(x
, y
, fp_status
));
99 uint64_t HELPER(vfp_cmps_a64
)(float32 x
, float32 y
, void *fp_status
)
101 return float_rel_to_flags(float32_compare_quiet(x
, y
, fp_status
));
104 uint64_t HELPER(vfp_cmpes_a64
)(float32 x
, float32 y
, void *fp_status
)
106 return float_rel_to_flags(float32_compare(x
, y
, fp_status
));
109 uint64_t HELPER(vfp_cmpd_a64
)(float64 x
, float64 y
, void *fp_status
)
111 return float_rel_to_flags(float64_compare_quiet(x
, y
, fp_status
));
114 uint64_t HELPER(vfp_cmped_a64
)(float64 x
, float64 y
, void *fp_status
)
116 return float_rel_to_flags(float64_compare(x
, y
, fp_status
));
119 float32
HELPER(vfp_mulxs
)(float32 a
, float32 b
, void *fpstp
)
121 float_status
*fpst
= fpstp
;
123 a
= float32_squash_input_denormal(a
, fpst
);
124 b
= float32_squash_input_denormal(b
, fpst
);
126 if ((float32_is_zero(a
) && float32_is_infinity(b
)) ||
127 (float32_is_infinity(a
) && float32_is_zero(b
))) {
128 /* 2.0 with the sign bit set to sign(A) XOR sign(B) */
129 return make_float32((1U << 30) |
130 ((float32_val(a
) ^ float32_val(b
)) & (1U << 31)));
132 return float32_mul(a
, b
, fpst
);
135 float64
HELPER(vfp_mulxd
)(float64 a
, float64 b
, void *fpstp
)
137 float_status
*fpst
= fpstp
;
139 a
= float64_squash_input_denormal(a
, fpst
);
140 b
= float64_squash_input_denormal(b
, fpst
);
142 if ((float64_is_zero(a
) && float64_is_infinity(b
)) ||
143 (float64_is_infinity(a
) && float64_is_zero(b
))) {
144 /* 2.0 with the sign bit set to sign(A) XOR sign(B) */
145 return make_float64((1ULL << 62) |
146 ((float64_val(a
) ^ float64_val(b
)) & (1ULL << 63)));
148 return float64_mul(a
, b
, fpst
);
151 uint64_t HELPER(simd_tbl
)(CPUARMState
*env
, uint64_t result
, uint64_t indices
,
152 uint32_t rn
, uint32_t numregs
)
154 /* Helper function for SIMD TBL and TBX. We have to do the table
155 * lookup part for the 64 bits worth of indices we're passed in.
156 * result is the initial results vector (either zeroes for TBL
157 * or some guest values for TBX), rn the register number where
158 * the table starts, and numregs the number of registers in the table.
159 * We return the results of the lookups.
163 for (shift
= 0; shift
< 64; shift
+= 8) {
164 int index
= extract64(indices
, shift
, 8);
165 if (index
< 16 * numregs
) {
166 /* Convert index (a byte offset into the virtual table
167 * which is a series of 128-bit vectors concatenated)
168 * into the correct register element plus a bit offset
169 * into that element, bearing in mind that the table
170 * can wrap around from V31 to V0.
172 int elt
= (rn
* 2 + (index
>> 3)) % 64;
173 int bitidx
= (index
& 7) * 8;
174 uint64_t *q
= aa64_vfp_qreg(env
, elt
>> 1);
175 uint64_t val
= extract64(q
[elt
& 1], bitidx
, 8);
177 result
= deposit64(result
, shift
, 8, val
);
183 /* 64bit/double versions of the neon float compare functions */
184 uint64_t HELPER(neon_ceq_f64
)(float64 a
, float64 b
, void *fpstp
)
186 float_status
*fpst
= fpstp
;
187 return -float64_eq_quiet(a
, b
, fpst
);
190 uint64_t HELPER(neon_cge_f64
)(float64 a
, float64 b
, void *fpstp
)
192 float_status
*fpst
= fpstp
;
193 return -float64_le(b
, a
, fpst
);
196 uint64_t HELPER(neon_cgt_f64
)(float64 a
, float64 b
, void *fpstp
)
198 float_status
*fpst
= fpstp
;
199 return -float64_lt(b
, a
, fpst
);
202 /* Reciprocal step and sqrt step. Note that unlike the A32/T32
203 * versions, these do a fully fused multiply-add or
204 * multiply-add-and-halve.
206 #define float16_two make_float16(0x4000)
207 #define float16_three make_float16(0x4200)
208 #define float16_one_point_five make_float16(0x3e00)
210 #define float32_two make_float32(0x40000000)
211 #define float32_three make_float32(0x40400000)
212 #define float32_one_point_five make_float32(0x3fc00000)
214 #define float64_two make_float64(0x4000000000000000ULL)
215 #define float64_three make_float64(0x4008000000000000ULL)
216 #define float64_one_point_five make_float64(0x3FF8000000000000ULL)
218 uint32_t HELPER(recpsf_f16
)(uint32_t a
, uint32_t b
, void *fpstp
)
220 float_status
*fpst
= fpstp
;
222 a
= float16_squash_input_denormal(a
, fpst
);
223 b
= float16_squash_input_denormal(b
, fpst
);
226 if ((float16_is_infinity(a
) && float16_is_zero(b
)) ||
227 (float16_is_infinity(b
) && float16_is_zero(a
))) {
230 return float16_muladd(a
, b
, float16_two
, 0, fpst
);
233 float32
HELPER(recpsf_f32
)(float32 a
, float32 b
, void *fpstp
)
235 float_status
*fpst
= fpstp
;
237 a
= float32_squash_input_denormal(a
, fpst
);
238 b
= float32_squash_input_denormal(b
, fpst
);
241 if ((float32_is_infinity(a
) && float32_is_zero(b
)) ||
242 (float32_is_infinity(b
) && float32_is_zero(a
))) {
245 return float32_muladd(a
, b
, float32_two
, 0, fpst
);
248 float64
HELPER(recpsf_f64
)(float64 a
, float64 b
, void *fpstp
)
250 float_status
*fpst
= fpstp
;
252 a
= float64_squash_input_denormal(a
, fpst
);
253 b
= float64_squash_input_denormal(b
, fpst
);
256 if ((float64_is_infinity(a
) && float64_is_zero(b
)) ||
257 (float64_is_infinity(b
) && float64_is_zero(a
))) {
260 return float64_muladd(a
, b
, float64_two
, 0, fpst
);
263 uint32_t HELPER(rsqrtsf_f16
)(uint32_t a
, uint32_t b
, void *fpstp
)
265 float_status
*fpst
= fpstp
;
267 a
= float16_squash_input_denormal(a
, fpst
);
268 b
= float16_squash_input_denormal(b
, fpst
);
271 if ((float16_is_infinity(a
) && float16_is_zero(b
)) ||
272 (float16_is_infinity(b
) && float16_is_zero(a
))) {
273 return float16_one_point_five
;
275 return float16_muladd(a
, b
, float16_three
, float_muladd_halve_result
, fpst
);
278 float32
HELPER(rsqrtsf_f32
)(float32 a
, float32 b
, void *fpstp
)
280 float_status
*fpst
= fpstp
;
282 a
= float32_squash_input_denormal(a
, fpst
);
283 b
= float32_squash_input_denormal(b
, fpst
);
286 if ((float32_is_infinity(a
) && float32_is_zero(b
)) ||
287 (float32_is_infinity(b
) && float32_is_zero(a
))) {
288 return float32_one_point_five
;
290 return float32_muladd(a
, b
, float32_three
, float_muladd_halve_result
, fpst
);
293 float64
HELPER(rsqrtsf_f64
)(float64 a
, float64 b
, void *fpstp
)
295 float_status
*fpst
= fpstp
;
297 a
= float64_squash_input_denormal(a
, fpst
);
298 b
= float64_squash_input_denormal(b
, fpst
);
301 if ((float64_is_infinity(a
) && float64_is_zero(b
)) ||
302 (float64_is_infinity(b
) && float64_is_zero(a
))) {
303 return float64_one_point_five
;
305 return float64_muladd(a
, b
, float64_three
, float_muladd_halve_result
, fpst
);
308 /* Pairwise long add: add pairs of adjacent elements into
309 * double-width elements in the result (eg _s8 is an 8x8->16 op)
311 uint64_t HELPER(neon_addlp_s8
)(uint64_t a
)
313 uint64_t nsignmask
= 0x0080008000800080ULL
;
314 uint64_t wsignmask
= 0x8000800080008000ULL
;
315 uint64_t elementmask
= 0x00ff00ff00ff00ffULL
;
317 uint64_t res
, signres
;
319 /* Extract odd elements, sign extend each to a 16 bit field */
320 tmp1
= a
& elementmask
;
323 tmp1
= (tmp1
- nsignmask
) ^ wsignmask
;
324 /* Ditto for the even elements */
325 tmp2
= (a
>> 8) & elementmask
;
328 tmp2
= (tmp2
- nsignmask
) ^ wsignmask
;
330 /* calculate the result by summing bits 0..14, 16..22, etc,
331 * and then adjusting the sign bits 15, 23, etc manually.
332 * This ensures the addition can't overflow the 16 bit field.
334 signres
= (tmp1
^ tmp2
) & wsignmask
;
335 res
= (tmp1
& ~wsignmask
) + (tmp2
& ~wsignmask
);
341 uint64_t HELPER(neon_addlp_u8
)(uint64_t a
)
345 tmp
= a
& 0x00ff00ff00ff00ffULL
;
346 tmp
+= (a
>> 8) & 0x00ff00ff00ff00ffULL
;
350 uint64_t HELPER(neon_addlp_s16
)(uint64_t a
)
352 int32_t reslo
, reshi
;
354 reslo
= (int32_t)(int16_t)a
+ (int32_t)(int16_t)(a
>> 16);
355 reshi
= (int32_t)(int16_t)(a
>> 32) + (int32_t)(int16_t)(a
>> 48);
357 return (uint32_t)reslo
| (((uint64_t)reshi
) << 32);
360 uint64_t HELPER(neon_addlp_u16
)(uint64_t a
)
364 tmp
= a
& 0x0000ffff0000ffffULL
;
365 tmp
+= (a
>> 16) & 0x0000ffff0000ffffULL
;
369 /* Floating-point reciprocal exponent - see FPRecpX in ARM ARM */
370 uint32_t HELPER(frecpx_f16
)(uint32_t a
, void *fpstp
)
372 float_status
*fpst
= fpstp
;
373 uint16_t val16
, sbit
;
376 if (float16_is_any_nan(a
)) {
378 if (float16_is_signaling_nan(a
, fpst
)) {
379 float_raise(float_flag_invalid
, fpst
);
380 nan
= float16_silence_nan(a
, fpst
);
382 if (fpst
->default_nan_mode
) {
383 nan
= float16_default_nan(fpst
);
388 a
= float16_squash_input_denormal(a
, fpst
);
390 val16
= float16_val(a
);
391 sbit
= 0x8000 & val16
;
392 exp
= extract32(val16
, 10, 5);
395 return make_float16(deposit32(sbit
, 10, 5, 0x1e));
397 return make_float16(deposit32(sbit
, 10, 5, ~exp
));
401 float32
HELPER(frecpx_f32
)(float32 a
, void *fpstp
)
403 float_status
*fpst
= fpstp
;
404 uint32_t val32
, sbit
;
407 if (float32_is_any_nan(a
)) {
409 if (float32_is_signaling_nan(a
, fpst
)) {
410 float_raise(float_flag_invalid
, fpst
);
411 nan
= float32_silence_nan(a
, fpst
);
413 if (fpst
->default_nan_mode
) {
414 nan
= float32_default_nan(fpst
);
419 a
= float32_squash_input_denormal(a
, fpst
);
421 val32
= float32_val(a
);
422 sbit
= 0x80000000ULL
& val32
;
423 exp
= extract32(val32
, 23, 8);
426 return make_float32(sbit
| (0xfe << 23));
428 return make_float32(sbit
| (~exp
& 0xff) << 23);
432 float64
HELPER(frecpx_f64
)(float64 a
, void *fpstp
)
434 float_status
*fpst
= fpstp
;
435 uint64_t val64
, sbit
;
438 if (float64_is_any_nan(a
)) {
440 if (float64_is_signaling_nan(a
, fpst
)) {
441 float_raise(float_flag_invalid
, fpst
);
442 nan
= float64_silence_nan(a
, fpst
);
444 if (fpst
->default_nan_mode
) {
445 nan
= float64_default_nan(fpst
);
450 a
= float64_squash_input_denormal(a
, fpst
);
452 val64
= float64_val(a
);
453 sbit
= 0x8000000000000000ULL
& val64
;
454 exp
= extract64(float64_val(a
), 52, 11);
457 return make_float64(sbit
| (0x7feULL
<< 52));
459 return make_float64(sbit
| (~exp
& 0x7ffULL
) << 52);
463 float32
HELPER(fcvtx_f64_to_f32
)(float64 a
, CPUARMState
*env
)
465 /* Von Neumann rounding is implemented by using round-to-zero
466 * and then setting the LSB of the result if Inexact was raised.
469 float_status
*fpst
= &env
->vfp
.fp_status
;
470 float_status tstat
= *fpst
;
473 set_float_rounding_mode(float_round_to_zero
, &tstat
);
474 set_float_exception_flags(0, &tstat
);
475 r
= float64_to_float32(a
, &tstat
);
476 exflags
= get_float_exception_flags(&tstat
);
477 if (exflags
& float_flag_inexact
) {
478 r
= make_float32(float32_val(r
) | 1);
480 exflags
|= get_float_exception_flags(fpst
);
481 set_float_exception_flags(exflags
, fpst
);
485 /* 64-bit versions of the CRC helpers. Note that although the operation
486 * (and the prototypes of crc32c() and crc32() mean that only the bottom
487 * 32 bits of the accumulator and result are used, we pass and return
488 * uint64_t for convenience of the generated code. Unlike the 32-bit
489 * instruction set versions, val may genuinely have 64 bits of data in it.
490 * The upper bytes of val (above the number specified by 'bytes') must have
491 * been zeroed out by the caller.
493 uint64_t HELPER(crc32_64
)(uint64_t acc
, uint64_t val
, uint32_t bytes
)
499 /* zlib crc32 converts the accumulator and output to one's complement. */
500 return crc32(acc
^ 0xffffffff, buf
, bytes
) ^ 0xffffffff;
503 uint64_t HELPER(crc32c_64
)(uint64_t acc
, uint64_t val
, uint32_t bytes
)
509 /* Linux crc32c converts the output to one's complement. */
510 return crc32c(acc
, buf
, bytes
) ^ 0xffffffff;
513 uint64_t HELPER(paired_cmpxchg64_le
)(CPUARMState
*env
, uint64_t addr
,
514 uint64_t new_lo
, uint64_t new_hi
)
516 Int128 cmpv
= int128_make128(env
->exclusive_val
, env
->exclusive_high
);
517 Int128 newv
= int128_make128(new_lo
, new_hi
);
519 uintptr_t ra
= GETPC();
523 #ifdef CONFIG_USER_ONLY
524 /* ??? Enforce alignment. */
525 uint64_t *haddr
= g2h(addr
);
528 o0
= ldq_le_p(haddr
+ 0);
529 o1
= ldq_le_p(haddr
+ 1);
530 oldv
= int128_make128(o0
, o1
);
532 success
= int128_eq(oldv
, cmpv
);
534 stq_le_p(haddr
+ 0, int128_getlo(newv
));
535 stq_le_p(haddr
+ 1, int128_gethi(newv
));
539 int mem_idx
= cpu_mmu_index(env
, false);
540 TCGMemOpIdx oi0
= make_memop_idx(MO_LEQ
| MO_ALIGN_16
, mem_idx
);
541 TCGMemOpIdx oi1
= make_memop_idx(MO_LEQ
, mem_idx
);
543 o0
= helper_le_ldq_mmu(env
, addr
+ 0, oi0
, ra
);
544 o1
= helper_le_ldq_mmu(env
, addr
+ 8, oi1
, ra
);
545 oldv
= int128_make128(o0
, o1
);
547 success
= int128_eq(oldv
, cmpv
);
549 helper_le_stq_mmu(env
, addr
+ 0, int128_getlo(newv
), oi1
, ra
);
550 helper_le_stq_mmu(env
, addr
+ 8, int128_gethi(newv
), oi1
, ra
);
557 uint64_t HELPER(paired_cmpxchg64_le_parallel
)(CPUARMState
*env
, uint64_t addr
,
558 uint64_t new_lo
, uint64_t new_hi
)
560 Int128 oldv
, cmpv
, newv
;
561 uintptr_t ra
= GETPC();
566 assert(HAVE_CMPXCHG128
);
568 mem_idx
= cpu_mmu_index(env
, false);
569 oi
= make_memop_idx(MO_LEQ
| MO_ALIGN_16
, mem_idx
);
571 cmpv
= int128_make128(env
->exclusive_val
, env
->exclusive_high
);
572 newv
= int128_make128(new_lo
, new_hi
);
573 oldv
= helper_atomic_cmpxchgo_le_mmu(env
, addr
, cmpv
, newv
, oi
, ra
);
575 success
= int128_eq(oldv
, cmpv
);
579 uint64_t HELPER(paired_cmpxchg64_be
)(CPUARMState
*env
, uint64_t addr
,
580 uint64_t new_lo
, uint64_t new_hi
)
583 * High and low need to be switched here because this is not actually a
584 * 128bit store but two doublewords stored consecutively
586 Int128 cmpv
= int128_make128(env
->exclusive_val
, env
->exclusive_high
);
587 Int128 newv
= int128_make128(new_lo
, new_hi
);
589 uintptr_t ra
= GETPC();
593 #ifdef CONFIG_USER_ONLY
594 /* ??? Enforce alignment. */
595 uint64_t *haddr
= g2h(addr
);
598 o1
= ldq_be_p(haddr
+ 0);
599 o0
= ldq_be_p(haddr
+ 1);
600 oldv
= int128_make128(o0
, o1
);
602 success
= int128_eq(oldv
, cmpv
);
604 stq_be_p(haddr
+ 0, int128_gethi(newv
));
605 stq_be_p(haddr
+ 1, int128_getlo(newv
));
609 int mem_idx
= cpu_mmu_index(env
, false);
610 TCGMemOpIdx oi0
= make_memop_idx(MO_BEQ
| MO_ALIGN_16
, mem_idx
);
611 TCGMemOpIdx oi1
= make_memop_idx(MO_BEQ
, mem_idx
);
613 o1
= helper_be_ldq_mmu(env
, addr
+ 0, oi0
, ra
);
614 o0
= helper_be_ldq_mmu(env
, addr
+ 8, oi1
, ra
);
615 oldv
= int128_make128(o0
, o1
);
617 success
= int128_eq(oldv
, cmpv
);
619 helper_be_stq_mmu(env
, addr
+ 0, int128_gethi(newv
), oi1
, ra
);
620 helper_be_stq_mmu(env
, addr
+ 8, int128_getlo(newv
), oi1
, ra
);
627 uint64_t HELPER(paired_cmpxchg64_be_parallel
)(CPUARMState
*env
, uint64_t addr
,
628 uint64_t new_lo
, uint64_t new_hi
)
630 Int128 oldv
, cmpv
, newv
;
631 uintptr_t ra
= GETPC();
636 assert(HAVE_CMPXCHG128
);
638 mem_idx
= cpu_mmu_index(env
, false);
639 oi
= make_memop_idx(MO_BEQ
| MO_ALIGN_16
, mem_idx
);
642 * High and low need to be switched here because this is not actually a
643 * 128bit store but two doublewords stored consecutively
645 cmpv
= int128_make128(env
->exclusive_high
, env
->exclusive_val
);
646 newv
= int128_make128(new_hi
, new_lo
);
647 oldv
= helper_atomic_cmpxchgo_be_mmu(env
, addr
, cmpv
, newv
, oi
, ra
);
649 success
= int128_eq(oldv
, cmpv
);
653 /* Writes back the old data into Rs. */
654 void HELPER(casp_le_parallel
)(CPUARMState
*env
, uint32_t rs
, uint64_t addr
,
655 uint64_t new_lo
, uint64_t new_hi
)
657 Int128 oldv
, cmpv
, newv
;
658 uintptr_t ra
= GETPC();
662 assert(HAVE_CMPXCHG128
);
664 mem_idx
= cpu_mmu_index(env
, false);
665 oi
= make_memop_idx(MO_LEQ
| MO_ALIGN_16
, mem_idx
);
667 cmpv
= int128_make128(env
->xregs
[rs
], env
->xregs
[rs
+ 1]);
668 newv
= int128_make128(new_lo
, new_hi
);
669 oldv
= helper_atomic_cmpxchgo_le_mmu(env
, addr
, cmpv
, newv
, oi
, ra
);
671 env
->xregs
[rs
] = int128_getlo(oldv
);
672 env
->xregs
[rs
+ 1] = int128_gethi(oldv
);
675 void HELPER(casp_be_parallel
)(CPUARMState
*env
, uint32_t rs
, uint64_t addr
,
676 uint64_t new_hi
, uint64_t new_lo
)
678 Int128 oldv
, cmpv
, newv
;
679 uintptr_t ra
= GETPC();
683 assert(HAVE_CMPXCHG128
);
685 mem_idx
= cpu_mmu_index(env
, false);
686 oi
= make_memop_idx(MO_LEQ
| MO_ALIGN_16
, mem_idx
);
688 cmpv
= int128_make128(env
->xregs
[rs
+ 1], env
->xregs
[rs
]);
689 newv
= int128_make128(new_lo
, new_hi
);
690 oldv
= helper_atomic_cmpxchgo_be_mmu(env
, addr
, cmpv
, newv
, oi
, ra
);
692 env
->xregs
[rs
+ 1] = int128_getlo(oldv
);
693 env
->xregs
[rs
] = int128_gethi(oldv
);
697 * AdvSIMD half-precision
700 #define ADVSIMD_HELPER(name, suffix) HELPER(glue(glue(advsimd_, name), suffix))
702 #define ADVSIMD_HALFOP(name) \
703 uint32_t ADVSIMD_HELPER(name, h)(uint32_t a, uint32_t b, void *fpstp) \
705 float_status *fpst = fpstp; \
706 return float16_ ## name(a, b, fpst); \
715 ADVSIMD_HALFOP(minnum
)
716 ADVSIMD_HALFOP(maxnum
)
718 #define ADVSIMD_TWOHALFOP(name) \
719 uint32_t ADVSIMD_HELPER(name, 2h)(uint32_t two_a, uint32_t two_b, void *fpstp) \
721 float16 a1, a2, b1, b2; \
723 float_status *fpst = fpstp; \
724 a1 = extract32(two_a, 0, 16); \
725 a2 = extract32(two_a, 16, 16); \
726 b1 = extract32(two_b, 0, 16); \
727 b2 = extract32(two_b, 16, 16); \
728 r1 = float16_ ## name(a1, b1, fpst); \
729 r2 = float16_ ## name(a2, b2, fpst); \
730 return deposit32(r1, 16, 16, r2); \
733 ADVSIMD_TWOHALFOP(add
)
734 ADVSIMD_TWOHALFOP(sub
)
735 ADVSIMD_TWOHALFOP(mul
)
736 ADVSIMD_TWOHALFOP(div
)
737 ADVSIMD_TWOHALFOP(min
)
738 ADVSIMD_TWOHALFOP(max
)
739 ADVSIMD_TWOHALFOP(minnum
)
740 ADVSIMD_TWOHALFOP(maxnum
)
742 /* Data processing - scalar floating-point and advanced SIMD */
743 static float16
float16_mulx(float16 a
, float16 b
, void *fpstp
)
745 float_status
*fpst
= fpstp
;
747 a
= float16_squash_input_denormal(a
, fpst
);
748 b
= float16_squash_input_denormal(b
, fpst
);
750 if ((float16_is_zero(a
) && float16_is_infinity(b
)) ||
751 (float16_is_infinity(a
) && float16_is_zero(b
))) {
752 /* 2.0 with the sign bit set to sign(A) XOR sign(B) */
753 return make_float16((1U << 14) |
754 ((float16_val(a
) ^ float16_val(b
)) & (1U << 15)));
756 return float16_mul(a
, b
, fpst
);
760 ADVSIMD_TWOHALFOP(mulx
)
762 /* fused multiply-accumulate */
763 uint32_t HELPER(advsimd_muladdh
)(uint32_t a
, uint32_t b
, uint32_t c
,
766 float_status
*fpst
= fpstp
;
767 return float16_muladd(a
, b
, c
, 0, fpst
);
770 uint32_t HELPER(advsimd_muladd2h
)(uint32_t two_a
, uint32_t two_b
,
771 uint32_t two_c
, void *fpstp
)
773 float_status
*fpst
= fpstp
;
774 float16 a1
, a2
, b1
, b2
, c1
, c2
;
776 a1
= extract32(two_a
, 0, 16);
777 a2
= extract32(two_a
, 16, 16);
778 b1
= extract32(two_b
, 0, 16);
779 b2
= extract32(two_b
, 16, 16);
780 c1
= extract32(two_c
, 0, 16);
781 c2
= extract32(two_c
, 16, 16);
782 r1
= float16_muladd(a1
, b1
, c1
, 0, fpst
);
783 r2
= float16_muladd(a2
, b2
, c2
, 0, fpst
);
784 return deposit32(r1
, 16, 16, r2
);
788 * Floating point comparisons produce an integer result. Softfloat
789 * routines return float_relation types which we convert to the 0/-1
793 #define ADVSIMD_CMPRES(test) (test) ? 0xffff : 0
795 uint32_t HELPER(advsimd_ceq_f16
)(uint32_t a
, uint32_t b
, void *fpstp
)
797 float_status
*fpst
= fpstp
;
798 int compare
= float16_compare_quiet(a
, b
, fpst
);
799 return ADVSIMD_CMPRES(compare
== float_relation_equal
);
802 uint32_t HELPER(advsimd_cge_f16
)(uint32_t a
, uint32_t b
, void *fpstp
)
804 float_status
*fpst
= fpstp
;
805 int compare
= float16_compare(a
, b
, fpst
);
806 return ADVSIMD_CMPRES(compare
== float_relation_greater
||
807 compare
== float_relation_equal
);
810 uint32_t HELPER(advsimd_cgt_f16
)(uint32_t a
, uint32_t b
, void *fpstp
)
812 float_status
*fpst
= fpstp
;
813 int compare
= float16_compare(a
, b
, fpst
);
814 return ADVSIMD_CMPRES(compare
== float_relation_greater
);
817 uint32_t HELPER(advsimd_acge_f16
)(uint32_t a
, uint32_t b
, void *fpstp
)
819 float_status
*fpst
= fpstp
;
820 float16 f0
= float16_abs(a
);
821 float16 f1
= float16_abs(b
);
822 int compare
= float16_compare(f0
, f1
, fpst
);
823 return ADVSIMD_CMPRES(compare
== float_relation_greater
||
824 compare
== float_relation_equal
);
827 uint32_t HELPER(advsimd_acgt_f16
)(uint32_t a
, uint32_t b
, void *fpstp
)
829 float_status
*fpst
= fpstp
;
830 float16 f0
= float16_abs(a
);
831 float16 f1
= float16_abs(b
);
832 int compare
= float16_compare(f0
, f1
, fpst
);
833 return ADVSIMD_CMPRES(compare
== float_relation_greater
);
836 /* round to integral */
837 uint32_t HELPER(advsimd_rinth_exact
)(uint32_t x
, void *fp_status
)
839 return float16_round_to_int(x
, fp_status
);
842 uint32_t HELPER(advsimd_rinth
)(uint32_t x
, void *fp_status
)
844 int old_flags
= get_float_exception_flags(fp_status
), new_flags
;
847 ret
= float16_round_to_int(x
, fp_status
);
849 /* Suppress any inexact exceptions the conversion produced */
850 if (!(old_flags
& float_flag_inexact
)) {
851 new_flags
= get_float_exception_flags(fp_status
);
852 set_float_exception_flags(new_flags
& ~float_flag_inexact
, fp_status
);
859 * Half-precision floating point conversion functions
861 * There are a multitude of conversion functions with various
862 * different rounding modes. This is dealt with by the calling code
863 * setting the mode appropriately before calling the helper.
866 uint32_t HELPER(advsimd_f16tosinth
)(uint32_t a
, void *fpstp
)
868 float_status
*fpst
= fpstp
;
870 /* Invalid if we are passed a NaN */
871 if (float16_is_any_nan(a
)) {
872 float_raise(float_flag_invalid
, fpst
);
875 return float16_to_int16(a
, fpst
);
878 uint32_t HELPER(advsimd_f16touinth
)(uint32_t a
, void *fpstp
)
880 float_status
*fpst
= fpstp
;
882 /* Invalid if we are passed a NaN */
883 if (float16_is_any_nan(a
)) {
884 float_raise(float_flag_invalid
, fpst
);
887 return float16_to_uint16(a
, fpst
);
891 * Square Root and Reciprocal square root
894 uint32_t HELPER(sqrt_f16
)(uint32_t a
, void *fpstp
)
896 float_status
*s
= fpstp
;
898 return float16_sqrt(a
, s
);