2 * AArch64 specific helpers
4 * Copyright (c) 2013 Alexander Graf <agraf@suse.de>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "exec/gdbstub.h"
23 #include "exec/helper-proto.h"
24 #include "qemu/host-utils.h"
26 #include "sysemu/sysemu.h"
27 #include "qemu/bitops.h"
28 #include "internals.h"
29 #include "qemu/crc32c.h"
30 #include "exec/exec-all.h"
31 #include "exec/cpu_ldst.h"
32 #include "qemu/int128.h"
34 #include "fpu/softfloat.h"
35 #include <zlib.h> /* For crc32 */
37 /* C2.4.7 Multiply and divide */
38 /* special cases for 0 and LLONG_MIN are mandated by the standard */
39 uint64_t HELPER(udiv64
)(uint64_t num
, uint64_t den
)
47 int64_t HELPER(sdiv64
)(int64_t num
, int64_t den
)
52 if (num
== LLONG_MIN
&& den
== -1) {
58 uint64_t HELPER(rbit64
)(uint64_t x
)
63 /* Convert a softfloat float_relation_ (as returned by
64 * the float*_compare functions) to the correct ARM
67 static inline uint32_t float_rel_to_flags(int res
)
71 case float_relation_equal
:
72 flags
= PSTATE_Z
| PSTATE_C
;
74 case float_relation_less
:
77 case float_relation_greater
:
80 case float_relation_unordered
:
82 flags
= PSTATE_C
| PSTATE_V
;
88 uint64_t HELPER(vfp_cmps_a64
)(float32 x
, float32 y
, void *fp_status
)
90 return float_rel_to_flags(float32_compare_quiet(x
, y
, fp_status
));
93 uint64_t HELPER(vfp_cmpes_a64
)(float32 x
, float32 y
, void *fp_status
)
95 return float_rel_to_flags(float32_compare(x
, y
, fp_status
));
98 uint64_t HELPER(vfp_cmpd_a64
)(float64 x
, float64 y
, void *fp_status
)
100 return float_rel_to_flags(float64_compare_quiet(x
, y
, fp_status
));
103 uint64_t HELPER(vfp_cmped_a64
)(float64 x
, float64 y
, void *fp_status
)
105 return float_rel_to_flags(float64_compare(x
, y
, fp_status
));
108 float32
HELPER(vfp_mulxs
)(float32 a
, float32 b
, void *fpstp
)
110 float_status
*fpst
= fpstp
;
112 a
= float32_squash_input_denormal(a
, fpst
);
113 b
= float32_squash_input_denormal(b
, fpst
);
115 if ((float32_is_zero(a
) && float32_is_infinity(b
)) ||
116 (float32_is_infinity(a
) && float32_is_zero(b
))) {
117 /* 2.0 with the sign bit set to sign(A) XOR sign(B) */
118 return make_float32((1U << 30) |
119 ((float32_val(a
) ^ float32_val(b
)) & (1U << 31)));
121 return float32_mul(a
, b
, fpst
);
124 float64
HELPER(vfp_mulxd
)(float64 a
, float64 b
, void *fpstp
)
126 float_status
*fpst
= fpstp
;
128 a
= float64_squash_input_denormal(a
, fpst
);
129 b
= float64_squash_input_denormal(b
, fpst
);
131 if ((float64_is_zero(a
) && float64_is_infinity(b
)) ||
132 (float64_is_infinity(a
) && float64_is_zero(b
))) {
133 /* 2.0 with the sign bit set to sign(A) XOR sign(B) */
134 return make_float64((1ULL << 62) |
135 ((float64_val(a
) ^ float64_val(b
)) & (1ULL << 63)));
137 return float64_mul(a
, b
, fpst
);
140 uint64_t HELPER(simd_tbl
)(CPUARMState
*env
, uint64_t result
, uint64_t indices
,
141 uint32_t rn
, uint32_t numregs
)
143 /* Helper function for SIMD TBL and TBX. We have to do the table
144 * lookup part for the 64 bits worth of indices we're passed in.
145 * result is the initial results vector (either zeroes for TBL
146 * or some guest values for TBX), rn the register number where
147 * the table starts, and numregs the number of registers in the table.
148 * We return the results of the lookups.
152 for (shift
= 0; shift
< 64; shift
+= 8) {
153 int index
= extract64(indices
, shift
, 8);
154 if (index
< 16 * numregs
) {
155 /* Convert index (a byte offset into the virtual table
156 * which is a series of 128-bit vectors concatenated)
157 * into the correct register element plus a bit offset
158 * into that element, bearing in mind that the table
159 * can wrap around from V31 to V0.
161 int elt
= (rn
* 2 + (index
>> 3)) % 64;
162 int bitidx
= (index
& 7) * 8;
163 uint64_t *q
= aa64_vfp_qreg(env
, elt
>> 1);
164 uint64_t val
= extract64(q
[elt
& 1], bitidx
, 8);
166 result
= deposit64(result
, shift
, 8, val
);
172 /* 64bit/double versions of the neon float compare functions */
173 uint64_t HELPER(neon_ceq_f64
)(float64 a
, float64 b
, void *fpstp
)
175 float_status
*fpst
= fpstp
;
176 return -float64_eq_quiet(a
, b
, fpst
);
179 uint64_t HELPER(neon_cge_f64
)(float64 a
, float64 b
, void *fpstp
)
181 float_status
*fpst
= fpstp
;
182 return -float64_le(b
, a
, fpst
);
185 uint64_t HELPER(neon_cgt_f64
)(float64 a
, float64 b
, void *fpstp
)
187 float_status
*fpst
= fpstp
;
188 return -float64_lt(b
, a
, fpst
);
191 /* Reciprocal step and sqrt step. Note that unlike the A32/T32
192 * versions, these do a fully fused multiply-add or
193 * multiply-add-and-halve.
195 #define float16_two make_float16(0x4000)
196 #define float16_three make_float16(0x4200)
197 #define float16_one_point_five make_float16(0x3e00)
199 #define float32_two make_float32(0x40000000)
200 #define float32_three make_float32(0x40400000)
201 #define float32_one_point_five make_float32(0x3fc00000)
203 #define float64_two make_float64(0x4000000000000000ULL)
204 #define float64_three make_float64(0x4008000000000000ULL)
205 #define float64_one_point_five make_float64(0x3FF8000000000000ULL)
207 float16
HELPER(recpsf_f16
)(float16 a
, float16 b
, void *fpstp
)
209 float_status
*fpst
= fpstp
;
211 a
= float16_squash_input_denormal(a
, fpst
);
212 b
= float16_squash_input_denormal(b
, fpst
);
215 if ((float16_is_infinity(a
) && float16_is_zero(b
)) ||
216 (float16_is_infinity(b
) && float16_is_zero(a
))) {
219 return float16_muladd(a
, b
, float16_two
, 0, fpst
);
222 float32
HELPER(recpsf_f32
)(float32 a
, float32 b
, void *fpstp
)
224 float_status
*fpst
= fpstp
;
226 a
= float32_squash_input_denormal(a
, fpst
);
227 b
= float32_squash_input_denormal(b
, fpst
);
230 if ((float32_is_infinity(a
) && float32_is_zero(b
)) ||
231 (float32_is_infinity(b
) && float32_is_zero(a
))) {
234 return float32_muladd(a
, b
, float32_two
, 0, fpst
);
237 float64
HELPER(recpsf_f64
)(float64 a
, float64 b
, void *fpstp
)
239 float_status
*fpst
= fpstp
;
241 a
= float64_squash_input_denormal(a
, fpst
);
242 b
= float64_squash_input_denormal(b
, fpst
);
245 if ((float64_is_infinity(a
) && float64_is_zero(b
)) ||
246 (float64_is_infinity(b
) && float64_is_zero(a
))) {
249 return float64_muladd(a
, b
, float64_two
, 0, fpst
);
252 float16
HELPER(rsqrtsf_f16
)(float16 a
, float16 b
, void *fpstp
)
254 float_status
*fpst
= fpstp
;
256 a
= float16_squash_input_denormal(a
, fpst
);
257 b
= float16_squash_input_denormal(b
, fpst
);
260 if ((float16_is_infinity(a
) && float16_is_zero(b
)) ||
261 (float16_is_infinity(b
) && float16_is_zero(a
))) {
262 return float16_one_point_five
;
264 return float16_muladd(a
, b
, float16_three
, float_muladd_halve_result
, fpst
);
267 float32
HELPER(rsqrtsf_f32
)(float32 a
, float32 b
, void *fpstp
)
269 float_status
*fpst
= fpstp
;
271 a
= float32_squash_input_denormal(a
, fpst
);
272 b
= float32_squash_input_denormal(b
, fpst
);
275 if ((float32_is_infinity(a
) && float32_is_zero(b
)) ||
276 (float32_is_infinity(b
) && float32_is_zero(a
))) {
277 return float32_one_point_five
;
279 return float32_muladd(a
, b
, float32_three
, float_muladd_halve_result
, fpst
);
282 float64
HELPER(rsqrtsf_f64
)(float64 a
, float64 b
, void *fpstp
)
284 float_status
*fpst
= fpstp
;
286 a
= float64_squash_input_denormal(a
, fpst
);
287 b
= float64_squash_input_denormal(b
, fpst
);
290 if ((float64_is_infinity(a
) && float64_is_zero(b
)) ||
291 (float64_is_infinity(b
) && float64_is_zero(a
))) {
292 return float64_one_point_five
;
294 return float64_muladd(a
, b
, float64_three
, float_muladd_halve_result
, fpst
);
297 /* Pairwise long add: add pairs of adjacent elements into
298 * double-width elements in the result (eg _s8 is an 8x8->16 op)
300 uint64_t HELPER(neon_addlp_s8
)(uint64_t a
)
302 uint64_t nsignmask
= 0x0080008000800080ULL
;
303 uint64_t wsignmask
= 0x8000800080008000ULL
;
304 uint64_t elementmask
= 0x00ff00ff00ff00ffULL
;
306 uint64_t res
, signres
;
308 /* Extract odd elements, sign extend each to a 16 bit field */
309 tmp1
= a
& elementmask
;
312 tmp1
= (tmp1
- nsignmask
) ^ wsignmask
;
313 /* Ditto for the even elements */
314 tmp2
= (a
>> 8) & elementmask
;
317 tmp2
= (tmp2
- nsignmask
) ^ wsignmask
;
319 /* calculate the result by summing bits 0..14, 16..22, etc,
320 * and then adjusting the sign bits 15, 23, etc manually.
321 * This ensures the addition can't overflow the 16 bit field.
323 signres
= (tmp1
^ tmp2
) & wsignmask
;
324 res
= (tmp1
& ~wsignmask
) + (tmp2
& ~wsignmask
);
330 uint64_t HELPER(neon_addlp_u8
)(uint64_t a
)
334 tmp
= a
& 0x00ff00ff00ff00ffULL
;
335 tmp
+= (a
>> 8) & 0x00ff00ff00ff00ffULL
;
339 uint64_t HELPER(neon_addlp_s16
)(uint64_t a
)
341 int32_t reslo
, reshi
;
343 reslo
= (int32_t)(int16_t)a
+ (int32_t)(int16_t)(a
>> 16);
344 reshi
= (int32_t)(int16_t)(a
>> 32) + (int32_t)(int16_t)(a
>> 48);
346 return (uint32_t)reslo
| (((uint64_t)reshi
) << 32);
349 uint64_t HELPER(neon_addlp_u16
)(uint64_t a
)
353 tmp
= a
& 0x0000ffff0000ffffULL
;
354 tmp
+= (a
>> 16) & 0x0000ffff0000ffffULL
;
358 /* Floating-point reciprocal exponent - see FPRecpX in ARM ARM */
359 float16
HELPER(frecpx_f16
)(float16 a
, void *fpstp
)
361 float_status
*fpst
= fpstp
;
362 uint16_t val16
, sbit
;
365 if (float16_is_any_nan(a
)) {
367 if (float16_is_signaling_nan(a
, fpst
)) {
368 float_raise(float_flag_invalid
, fpst
);
369 nan
= float16_maybe_silence_nan(a
, fpst
);
371 if (fpst
->default_nan_mode
) {
372 nan
= float16_default_nan(fpst
);
377 val16
= float16_val(a
);
378 sbit
= 0x8000 & val16
;
379 exp
= extract32(val16
, 10, 5);
382 return make_float16(deposit32(sbit
, 10, 5, 0x1e));
384 return make_float16(deposit32(sbit
, 10, 5, ~exp
));
388 float32
HELPER(frecpx_f32
)(float32 a
, void *fpstp
)
390 float_status
*fpst
= fpstp
;
391 uint32_t val32
, sbit
;
394 if (float32_is_any_nan(a
)) {
396 if (float32_is_signaling_nan(a
, fpst
)) {
397 float_raise(float_flag_invalid
, fpst
);
398 nan
= float32_maybe_silence_nan(a
, fpst
);
400 if (fpst
->default_nan_mode
) {
401 nan
= float32_default_nan(fpst
);
406 val32
= float32_val(a
);
407 sbit
= 0x80000000ULL
& val32
;
408 exp
= extract32(val32
, 23, 8);
411 return make_float32(sbit
| (0xfe << 23));
413 return make_float32(sbit
| (~exp
& 0xff) << 23);
417 float64
HELPER(frecpx_f64
)(float64 a
, void *fpstp
)
419 float_status
*fpst
= fpstp
;
420 uint64_t val64
, sbit
;
423 if (float64_is_any_nan(a
)) {
425 if (float64_is_signaling_nan(a
, fpst
)) {
426 float_raise(float_flag_invalid
, fpst
);
427 nan
= float64_maybe_silence_nan(a
, fpst
);
429 if (fpst
->default_nan_mode
) {
430 nan
= float64_default_nan(fpst
);
435 val64
= float64_val(a
);
436 sbit
= 0x8000000000000000ULL
& val64
;
437 exp
= extract64(float64_val(a
), 52, 11);
440 return make_float64(sbit
| (0x7feULL
<< 52));
442 return make_float64(sbit
| (~exp
& 0x7ffULL
) << 52);
446 float32
HELPER(fcvtx_f64_to_f32
)(float64 a
, CPUARMState
*env
)
448 /* Von Neumann rounding is implemented by using round-to-zero
449 * and then setting the LSB of the result if Inexact was raised.
452 float_status
*fpst
= &env
->vfp
.fp_status
;
453 float_status tstat
= *fpst
;
456 set_float_rounding_mode(float_round_to_zero
, &tstat
);
457 set_float_exception_flags(0, &tstat
);
458 r
= float64_to_float32(a
, &tstat
);
459 r
= float32_maybe_silence_nan(r
, &tstat
);
460 exflags
= get_float_exception_flags(&tstat
);
461 if (exflags
& float_flag_inexact
) {
462 r
= make_float32(float32_val(r
) | 1);
464 exflags
|= get_float_exception_flags(fpst
);
465 set_float_exception_flags(exflags
, fpst
);
469 /* 64-bit versions of the CRC helpers. Note that although the operation
470 * (and the prototypes of crc32c() and crc32() mean that only the bottom
471 * 32 bits of the accumulator and result are used, we pass and return
472 * uint64_t for convenience of the generated code. Unlike the 32-bit
473 * instruction set versions, val may genuinely have 64 bits of data in it.
474 * The upper bytes of val (above the number specified by 'bytes') must have
475 * been zeroed out by the caller.
477 uint64_t HELPER(crc32_64
)(uint64_t acc
, uint64_t val
, uint32_t bytes
)
483 /* zlib crc32 converts the accumulator and output to one's complement. */
484 return crc32(acc
^ 0xffffffff, buf
, bytes
) ^ 0xffffffff;
487 uint64_t HELPER(crc32c_64
)(uint64_t acc
, uint64_t val
, uint32_t bytes
)
493 /* Linux crc32c converts the output to one's complement. */
494 return crc32c(acc
, buf
, bytes
) ^ 0xffffffff;
497 /* Returns 0 on success; 1 otherwise. */
498 static uint64_t do_paired_cmpxchg64_le(CPUARMState
*env
, uint64_t addr
,
499 uint64_t new_lo
, uint64_t new_hi
,
500 bool parallel
, uintptr_t ra
)
502 Int128 oldv
, cmpv
, newv
;
505 cmpv
= int128_make128(env
->exclusive_val
, env
->exclusive_high
);
506 newv
= int128_make128(new_lo
, new_hi
);
509 #ifndef CONFIG_ATOMIC128
510 cpu_loop_exit_atomic(ENV_GET_CPU(env
), ra
);
512 int mem_idx
= cpu_mmu_index(env
, false);
513 TCGMemOpIdx oi
= make_memop_idx(MO_LEQ
| MO_ALIGN_16
, mem_idx
);
514 oldv
= helper_atomic_cmpxchgo_le_mmu(env
, addr
, cmpv
, newv
, oi
, ra
);
515 success
= int128_eq(oldv
, cmpv
);
520 #ifdef CONFIG_USER_ONLY
521 /* ??? Enforce alignment. */
522 uint64_t *haddr
= g2h(addr
);
525 o0
= ldq_le_p(haddr
+ 0);
526 o1
= ldq_le_p(haddr
+ 1);
527 oldv
= int128_make128(o0
, o1
);
529 success
= int128_eq(oldv
, cmpv
);
531 stq_le_p(haddr
+ 0, int128_getlo(newv
));
532 stq_le_p(haddr
+ 1, int128_gethi(newv
));
536 int mem_idx
= cpu_mmu_index(env
, false);
537 TCGMemOpIdx oi0
= make_memop_idx(MO_LEQ
| MO_ALIGN_16
, mem_idx
);
538 TCGMemOpIdx oi1
= make_memop_idx(MO_LEQ
, mem_idx
);
540 o0
= helper_le_ldq_mmu(env
, addr
+ 0, oi0
, ra
);
541 o1
= helper_le_ldq_mmu(env
, addr
+ 8, oi1
, ra
);
542 oldv
= int128_make128(o0
, o1
);
544 success
= int128_eq(oldv
, cmpv
);
546 helper_le_stq_mmu(env
, addr
+ 0, int128_getlo(newv
), oi1
, ra
);
547 helper_le_stq_mmu(env
, addr
+ 8, int128_gethi(newv
), oi1
, ra
);
555 uint64_t HELPER(paired_cmpxchg64_le
)(CPUARMState
*env
, uint64_t addr
,
556 uint64_t new_lo
, uint64_t new_hi
)
558 return do_paired_cmpxchg64_le(env
, addr
, new_lo
, new_hi
, false, GETPC());
561 uint64_t HELPER(paired_cmpxchg64_le_parallel
)(CPUARMState
*env
, uint64_t addr
,
562 uint64_t new_lo
, uint64_t new_hi
)
564 return do_paired_cmpxchg64_le(env
, addr
, new_lo
, new_hi
, true, GETPC());
567 static uint64_t do_paired_cmpxchg64_be(CPUARMState
*env
, uint64_t addr
,
568 uint64_t new_lo
, uint64_t new_hi
,
569 bool parallel
, uintptr_t ra
)
571 Int128 oldv
, cmpv
, newv
;
574 /* high and low need to be switched here because this is not actually a
575 * 128bit store but two doublewords stored consecutively
577 cmpv
= int128_make128(env
->exclusive_high
, env
->exclusive_val
);
578 newv
= int128_make128(new_hi
, new_lo
);
581 #ifndef CONFIG_ATOMIC128
582 cpu_loop_exit_atomic(ENV_GET_CPU(env
), ra
);
584 int mem_idx
= cpu_mmu_index(env
, false);
585 TCGMemOpIdx oi
= make_memop_idx(MO_BEQ
| MO_ALIGN_16
, mem_idx
);
586 oldv
= helper_atomic_cmpxchgo_be_mmu(env
, addr
, cmpv
, newv
, oi
, ra
);
587 success
= int128_eq(oldv
, cmpv
);
592 #ifdef CONFIG_USER_ONLY
593 /* ??? Enforce alignment. */
594 uint64_t *haddr
= g2h(addr
);
597 o1
= ldq_be_p(haddr
+ 0);
598 o0
= ldq_be_p(haddr
+ 1);
599 oldv
= int128_make128(o0
, o1
);
601 success
= int128_eq(oldv
, cmpv
);
603 stq_be_p(haddr
+ 0, int128_gethi(newv
));
604 stq_be_p(haddr
+ 1, int128_getlo(newv
));
608 int mem_idx
= cpu_mmu_index(env
, false);
609 TCGMemOpIdx oi0
= make_memop_idx(MO_BEQ
| MO_ALIGN_16
, mem_idx
);
610 TCGMemOpIdx oi1
= make_memop_idx(MO_BEQ
, mem_idx
);
612 o1
= helper_be_ldq_mmu(env
, addr
+ 0, oi0
, ra
);
613 o0
= helper_be_ldq_mmu(env
, addr
+ 8, oi1
, ra
);
614 oldv
= int128_make128(o0
, o1
);
616 success
= int128_eq(oldv
, cmpv
);
618 helper_be_stq_mmu(env
, addr
+ 0, int128_gethi(newv
), oi1
, ra
);
619 helper_be_stq_mmu(env
, addr
+ 8, int128_getlo(newv
), oi1
, ra
);
627 uint64_t HELPER(paired_cmpxchg64_be
)(CPUARMState
*env
, uint64_t addr
,
628 uint64_t new_lo
, uint64_t new_hi
)
630 return do_paired_cmpxchg64_be(env
, addr
, new_lo
, new_hi
, false, GETPC());
633 uint64_t HELPER(paired_cmpxchg64_be_parallel
)(CPUARMState
*env
, uint64_t addr
,
634 uint64_t new_lo
, uint64_t new_hi
)
636 return do_paired_cmpxchg64_be(env
, addr
, new_lo
, new_hi
, true, GETPC());
640 * AdvSIMD half-precision
643 #define ADVSIMD_HELPER(name, suffix) HELPER(glue(glue(advsimd_, name), suffix))
645 #define ADVSIMD_HALFOP(name) \
646 float16 ADVSIMD_HELPER(name, h)(float16 a, float16 b, void *fpstp) \
648 float_status *fpst = fpstp; \
649 return float16_ ## name(a, b, fpst); \
658 ADVSIMD_HALFOP(minnum
)
659 ADVSIMD_HALFOP(maxnum
)
661 #define ADVSIMD_TWOHALFOP(name) \
662 uint32_t ADVSIMD_HELPER(name, 2h)(uint32_t two_a, uint32_t two_b, void *fpstp) \
664 float16 a1, a2, b1, b2; \
666 float_status *fpst = fpstp; \
667 a1 = extract32(two_a, 0, 16); \
668 a2 = extract32(two_a, 16, 16); \
669 b1 = extract32(two_b, 0, 16); \
670 b2 = extract32(two_b, 16, 16); \
671 r1 = float16_ ## name(a1, b1, fpst); \
672 r2 = float16_ ## name(a2, b2, fpst); \
673 return deposit32(r1, 16, 16, r2); \
676 ADVSIMD_TWOHALFOP(add
)
677 ADVSIMD_TWOHALFOP(sub
)
678 ADVSIMD_TWOHALFOP(mul
)
679 ADVSIMD_TWOHALFOP(div
)
680 ADVSIMD_TWOHALFOP(min
)
681 ADVSIMD_TWOHALFOP(max
)
682 ADVSIMD_TWOHALFOP(minnum
)
683 ADVSIMD_TWOHALFOP(maxnum
)
685 /* Data processing - scalar floating-point and advanced SIMD */
686 static float16
float16_mulx(float16 a
, float16 b
, void *fpstp
)
688 float_status
*fpst
= fpstp
;
690 a
= float16_squash_input_denormal(a
, fpst
);
691 b
= float16_squash_input_denormal(b
, fpst
);
693 if ((float16_is_zero(a
) && float16_is_infinity(b
)) ||
694 (float16_is_infinity(a
) && float16_is_zero(b
))) {
695 /* 2.0 with the sign bit set to sign(A) XOR sign(B) */
696 return make_float16((1U << 14) |
697 ((float16_val(a
) ^ float16_val(b
)) & (1U << 15)));
699 return float16_mul(a
, b
, fpst
);
703 ADVSIMD_TWOHALFOP(mulx
)
705 /* fused multiply-accumulate */
706 float16
HELPER(advsimd_muladdh
)(float16 a
, float16 b
, float16 c
, void *fpstp
)
708 float_status
*fpst
= fpstp
;
709 return float16_muladd(a
, b
, c
, 0, fpst
);
712 uint32_t HELPER(advsimd_muladd2h
)(uint32_t two_a
, uint32_t two_b
,
713 uint32_t two_c
, void *fpstp
)
715 float_status
*fpst
= fpstp
;
716 float16 a1
, a2
, b1
, b2
, c1
, c2
;
718 a1
= extract32(two_a
, 0, 16);
719 a2
= extract32(two_a
, 16, 16);
720 b1
= extract32(two_b
, 0, 16);
721 b2
= extract32(two_b
, 16, 16);
722 c1
= extract32(two_c
, 0, 16);
723 c2
= extract32(two_c
, 16, 16);
724 r1
= float16_muladd(a1
, b1
, c1
, 0, fpst
);
725 r2
= float16_muladd(a2
, b2
, c2
, 0, fpst
);
726 return deposit32(r1
, 16, 16, r2
);
730 * Floating point comparisons produce an integer result. Softfloat
731 * routines return float_relation types which we convert to the 0/-1
735 #define ADVSIMD_CMPRES(test) (test) ? 0xffff : 0
737 uint32_t HELPER(advsimd_ceq_f16
)(float16 a
, float16 b
, void *fpstp
)
739 float_status
*fpst
= fpstp
;
740 int compare
= float16_compare_quiet(a
, b
, fpst
);
741 return ADVSIMD_CMPRES(compare
== float_relation_equal
);
744 uint32_t HELPER(advsimd_cge_f16
)(float16 a
, float16 b
, void *fpstp
)
746 float_status
*fpst
= fpstp
;
747 int compare
= float16_compare(a
, b
, fpst
);
748 return ADVSIMD_CMPRES(compare
== float_relation_greater
||
749 compare
== float_relation_equal
);
752 uint32_t HELPER(advsimd_cgt_f16
)(float16 a
, float16 b
, void *fpstp
)
754 float_status
*fpst
= fpstp
;
755 int compare
= float16_compare(a
, b
, fpst
);
756 return ADVSIMD_CMPRES(compare
== float_relation_greater
);
759 uint32_t HELPER(advsimd_acge_f16
)(float16 a
, float16 b
, void *fpstp
)
761 float_status
*fpst
= fpstp
;
762 float16 f0
= float16_abs(a
);
763 float16 f1
= float16_abs(b
);
764 int compare
= float16_compare(f0
, f1
, fpst
);
765 return ADVSIMD_CMPRES(compare
== float_relation_greater
||
766 compare
== float_relation_equal
);
769 uint32_t HELPER(advsimd_acgt_f16
)(float16 a
, float16 b
, void *fpstp
)
771 float_status
*fpst
= fpstp
;
772 float16 f0
= float16_abs(a
);
773 float16 f1
= float16_abs(b
);
774 int compare
= float16_compare(f0
, f1
, fpst
);
775 return ADVSIMD_CMPRES(compare
== float_relation_greater
);
778 /* round to integral */
779 float16
HELPER(advsimd_rinth_exact
)(float16 x
, void *fp_status
)
781 return float16_round_to_int(x
, fp_status
);
784 float16
HELPER(advsimd_rinth
)(float16 x
, void *fp_status
)
786 int old_flags
= get_float_exception_flags(fp_status
), new_flags
;
789 ret
= float16_round_to_int(x
, fp_status
);
791 /* Suppress any inexact exceptions the conversion produced */
792 if (!(old_flags
& float_flag_inexact
)) {
793 new_flags
= get_float_exception_flags(fp_status
);
794 set_float_exception_flags(new_flags
& ~float_flag_inexact
, fp_status
);
801 * Half-precision floating point conversion functions
803 * There are a multitude of conversion functions with various
804 * different rounding modes. This is dealt with by the calling code
805 * setting the mode appropriately before calling the helper.
808 uint32_t HELPER(advsimd_f16tosinth
)(float16 a
, void *fpstp
)
810 float_status
*fpst
= fpstp
;
812 /* Invalid if we are passed a NaN */
813 if (float16_is_any_nan(a
)) {
814 float_raise(float_flag_invalid
, fpst
);
817 return float16_to_int16(a
, fpst
);
820 uint32_t HELPER(advsimd_f16touinth
)(float16 a
, void *fpstp
)
822 float_status
*fpst
= fpstp
;
824 /* Invalid if we are passed a NaN */
825 if (float16_is_any_nan(a
)) {
826 float_raise(float_flag_invalid
, fpst
);
829 return float16_to_uint16(a
, fpst
);
833 * Square Root and Reciprocal square root
836 float16
HELPER(sqrt_f16
)(float16 a
, void *fpstp
)
838 float_status
*s
= fpstp
;
840 return float16_sqrt(a
, s
);