2 * AArch64 specific helpers
4 * Copyright (c) 2013 Alexander Graf <agraf@suse.de>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/units.h"
23 #include "exec/gdbstub.h"
24 #include "exec/helper-proto.h"
25 #include "qemu/host-utils.h"
27 #include "qemu/main-loop.h"
28 #include "qemu/bitops.h"
29 #include "internals.h"
30 #include "qemu/crc32c.h"
31 #include "exec/exec-all.h"
32 #include "exec/cpu_ldst.h"
33 #include "qemu/int128.h"
34 #include "qemu/atomic128.h"
35 #include "fpu/softfloat.h"
36 #include <zlib.h> /* For crc32 */
38 /* C2.4.7 Multiply and divide */
39 /* special cases for 0 and LLONG_MIN are mandated by the standard */
40 uint64_t HELPER(udiv64
)(uint64_t num
, uint64_t den
)
48 int64_t HELPER(sdiv64
)(int64_t num
, int64_t den
)
53 if (num
== LLONG_MIN
&& den
== -1) {
59 uint64_t HELPER(rbit64
)(uint64_t x
)
64 void HELPER(msr_i_spsel
)(CPUARMState
*env
, uint32_t imm
)
66 update_spsel(env
, imm
);
69 static void daif_check(CPUARMState
*env
, uint32_t op
,
70 uint32_t imm
, uintptr_t ra
)
72 /* DAIF update to PSTATE. This is OK from EL0 only if UMA is set. */
73 if (arm_current_el(env
) == 0 && !(arm_sctlr(env
, 0) & SCTLR_UMA
)) {
74 raise_exception_ra(env
, EXCP_UDEF
,
75 syn_aa64_sysregtrap(0, extract32(op
, 0, 3),
76 extract32(op
, 3, 3), 4,
78 exception_target_el(env
), ra
);
82 void HELPER(msr_i_daifset
)(CPUARMState
*env
, uint32_t imm
)
84 daif_check(env
, 0x1e, imm
, GETPC());
85 env
->daif
|= (imm
<< 6) & PSTATE_DAIF
;
88 void HELPER(msr_i_daifclear
)(CPUARMState
*env
, uint32_t imm
)
90 daif_check(env
, 0x1f, imm
, GETPC());
91 env
->daif
&= ~((imm
<< 6) & PSTATE_DAIF
);
94 /* Convert a softfloat float_relation_ (as returned by
95 * the float*_compare functions) to the correct ARM
98 static inline uint32_t float_rel_to_flags(int res
)
102 case float_relation_equal
:
103 flags
= PSTATE_Z
| PSTATE_C
;
105 case float_relation_less
:
108 case float_relation_greater
:
111 case float_relation_unordered
:
113 flags
= PSTATE_C
| PSTATE_V
;
119 uint64_t HELPER(vfp_cmph_a64
)(uint32_t x
, uint32_t y
, void *fp_status
)
121 return float_rel_to_flags(float16_compare_quiet(x
, y
, fp_status
));
124 uint64_t HELPER(vfp_cmpeh_a64
)(uint32_t x
, uint32_t y
, void *fp_status
)
126 return float_rel_to_flags(float16_compare(x
, y
, fp_status
));
129 uint64_t HELPER(vfp_cmps_a64
)(float32 x
, float32 y
, void *fp_status
)
131 return float_rel_to_flags(float32_compare_quiet(x
, y
, fp_status
));
134 uint64_t HELPER(vfp_cmpes_a64
)(float32 x
, float32 y
, void *fp_status
)
136 return float_rel_to_flags(float32_compare(x
, y
, fp_status
));
139 uint64_t HELPER(vfp_cmpd_a64
)(float64 x
, float64 y
, void *fp_status
)
141 return float_rel_to_flags(float64_compare_quiet(x
, y
, fp_status
));
144 uint64_t HELPER(vfp_cmped_a64
)(float64 x
, float64 y
, void *fp_status
)
146 return float_rel_to_flags(float64_compare(x
, y
, fp_status
));
149 float32
HELPER(vfp_mulxs
)(float32 a
, float32 b
, void *fpstp
)
151 float_status
*fpst
= fpstp
;
153 a
= float32_squash_input_denormal(a
, fpst
);
154 b
= float32_squash_input_denormal(b
, fpst
);
156 if ((float32_is_zero(a
) && float32_is_infinity(b
)) ||
157 (float32_is_infinity(a
) && float32_is_zero(b
))) {
158 /* 2.0 with the sign bit set to sign(A) XOR sign(B) */
159 return make_float32((1U << 30) |
160 ((float32_val(a
) ^ float32_val(b
)) & (1U << 31)));
162 return float32_mul(a
, b
, fpst
);
165 float64
HELPER(vfp_mulxd
)(float64 a
, float64 b
, void *fpstp
)
167 float_status
*fpst
= fpstp
;
169 a
= float64_squash_input_denormal(a
, fpst
);
170 b
= float64_squash_input_denormal(b
, fpst
);
172 if ((float64_is_zero(a
) && float64_is_infinity(b
)) ||
173 (float64_is_infinity(a
) && float64_is_zero(b
))) {
174 /* 2.0 with the sign bit set to sign(A) XOR sign(B) */
175 return make_float64((1ULL << 62) |
176 ((float64_val(a
) ^ float64_val(b
)) & (1ULL << 63)));
178 return float64_mul(a
, b
, fpst
);
181 /* 64bit/double versions of the neon float compare functions */
182 uint64_t HELPER(neon_ceq_f64
)(float64 a
, float64 b
, void *fpstp
)
184 float_status
*fpst
= fpstp
;
185 return -float64_eq_quiet(a
, b
, fpst
);
188 uint64_t HELPER(neon_cge_f64
)(float64 a
, float64 b
, void *fpstp
)
190 float_status
*fpst
= fpstp
;
191 return -float64_le(b
, a
, fpst
);
194 uint64_t HELPER(neon_cgt_f64
)(float64 a
, float64 b
, void *fpstp
)
196 float_status
*fpst
= fpstp
;
197 return -float64_lt(b
, a
, fpst
);
200 /* Reciprocal step and sqrt step. Note that unlike the A32/T32
201 * versions, these do a fully fused multiply-add or
202 * multiply-add-and-halve.
205 uint32_t HELPER(recpsf_f16
)(uint32_t a
, uint32_t b
, void *fpstp
)
207 float_status
*fpst
= fpstp
;
209 a
= float16_squash_input_denormal(a
, fpst
);
210 b
= float16_squash_input_denormal(b
, fpst
);
213 if ((float16_is_infinity(a
) && float16_is_zero(b
)) ||
214 (float16_is_infinity(b
) && float16_is_zero(a
))) {
217 return float16_muladd(a
, b
, float16_two
, 0, fpst
);
220 float32
HELPER(recpsf_f32
)(float32 a
, float32 b
, void *fpstp
)
222 float_status
*fpst
= fpstp
;
224 a
= float32_squash_input_denormal(a
, fpst
);
225 b
= float32_squash_input_denormal(b
, fpst
);
228 if ((float32_is_infinity(a
) && float32_is_zero(b
)) ||
229 (float32_is_infinity(b
) && float32_is_zero(a
))) {
232 return float32_muladd(a
, b
, float32_two
, 0, fpst
);
235 float64
HELPER(recpsf_f64
)(float64 a
, float64 b
, void *fpstp
)
237 float_status
*fpst
= fpstp
;
239 a
= float64_squash_input_denormal(a
, fpst
);
240 b
= float64_squash_input_denormal(b
, fpst
);
243 if ((float64_is_infinity(a
) && float64_is_zero(b
)) ||
244 (float64_is_infinity(b
) && float64_is_zero(a
))) {
247 return float64_muladd(a
, b
, float64_two
, 0, fpst
);
250 uint32_t HELPER(rsqrtsf_f16
)(uint32_t a
, uint32_t b
, void *fpstp
)
252 float_status
*fpst
= fpstp
;
254 a
= float16_squash_input_denormal(a
, fpst
);
255 b
= float16_squash_input_denormal(b
, fpst
);
258 if ((float16_is_infinity(a
) && float16_is_zero(b
)) ||
259 (float16_is_infinity(b
) && float16_is_zero(a
))) {
260 return float16_one_point_five
;
262 return float16_muladd(a
, b
, float16_three
, float_muladd_halve_result
, fpst
);
265 float32
HELPER(rsqrtsf_f32
)(float32 a
, float32 b
, void *fpstp
)
267 float_status
*fpst
= fpstp
;
269 a
= float32_squash_input_denormal(a
, fpst
);
270 b
= float32_squash_input_denormal(b
, fpst
);
273 if ((float32_is_infinity(a
) && float32_is_zero(b
)) ||
274 (float32_is_infinity(b
) && float32_is_zero(a
))) {
275 return float32_one_point_five
;
277 return float32_muladd(a
, b
, float32_three
, float_muladd_halve_result
, fpst
);
280 float64
HELPER(rsqrtsf_f64
)(float64 a
, float64 b
, void *fpstp
)
282 float_status
*fpst
= fpstp
;
284 a
= float64_squash_input_denormal(a
, fpst
);
285 b
= float64_squash_input_denormal(b
, fpst
);
288 if ((float64_is_infinity(a
) && float64_is_zero(b
)) ||
289 (float64_is_infinity(b
) && float64_is_zero(a
))) {
290 return float64_one_point_five
;
292 return float64_muladd(a
, b
, float64_three
, float_muladd_halve_result
, fpst
);
295 /* Pairwise long add: add pairs of adjacent elements into
296 * double-width elements in the result (eg _s8 is an 8x8->16 op)
298 uint64_t HELPER(neon_addlp_s8
)(uint64_t a
)
300 uint64_t nsignmask
= 0x0080008000800080ULL
;
301 uint64_t wsignmask
= 0x8000800080008000ULL
;
302 uint64_t elementmask
= 0x00ff00ff00ff00ffULL
;
304 uint64_t res
, signres
;
306 /* Extract odd elements, sign extend each to a 16 bit field */
307 tmp1
= a
& elementmask
;
310 tmp1
= (tmp1
- nsignmask
) ^ wsignmask
;
311 /* Ditto for the even elements */
312 tmp2
= (a
>> 8) & elementmask
;
315 tmp2
= (tmp2
- nsignmask
) ^ wsignmask
;
317 /* calculate the result by summing bits 0..14, 16..22, etc,
318 * and then adjusting the sign bits 15, 23, etc manually.
319 * This ensures the addition can't overflow the 16 bit field.
321 signres
= (tmp1
^ tmp2
) & wsignmask
;
322 res
= (tmp1
& ~wsignmask
) + (tmp2
& ~wsignmask
);
328 uint64_t HELPER(neon_addlp_u8
)(uint64_t a
)
332 tmp
= a
& 0x00ff00ff00ff00ffULL
;
333 tmp
+= (a
>> 8) & 0x00ff00ff00ff00ffULL
;
337 uint64_t HELPER(neon_addlp_s16
)(uint64_t a
)
339 int32_t reslo
, reshi
;
341 reslo
= (int32_t)(int16_t)a
+ (int32_t)(int16_t)(a
>> 16);
342 reshi
= (int32_t)(int16_t)(a
>> 32) + (int32_t)(int16_t)(a
>> 48);
344 return (uint32_t)reslo
| (((uint64_t)reshi
) << 32);
347 uint64_t HELPER(neon_addlp_u16
)(uint64_t a
)
351 tmp
= a
& 0x0000ffff0000ffffULL
;
352 tmp
+= (a
>> 16) & 0x0000ffff0000ffffULL
;
356 /* Floating-point reciprocal exponent - see FPRecpX in ARM ARM */
357 uint32_t HELPER(frecpx_f16
)(uint32_t a
, void *fpstp
)
359 float_status
*fpst
= fpstp
;
360 uint16_t val16
, sbit
;
363 if (float16_is_any_nan(a
)) {
365 if (float16_is_signaling_nan(a
, fpst
)) {
366 float_raise(float_flag_invalid
, fpst
);
367 if (!fpst
->default_nan_mode
) {
368 nan
= float16_silence_nan(a
, fpst
);
371 if (fpst
->default_nan_mode
) {
372 nan
= float16_default_nan(fpst
);
377 a
= float16_squash_input_denormal(a
, fpst
);
379 val16
= float16_val(a
);
380 sbit
= 0x8000 & val16
;
381 exp
= extract32(val16
, 10, 5);
384 return make_float16(deposit32(sbit
, 10, 5, 0x1e));
386 return make_float16(deposit32(sbit
, 10, 5, ~exp
));
390 float32
HELPER(frecpx_f32
)(float32 a
, void *fpstp
)
392 float_status
*fpst
= fpstp
;
393 uint32_t val32
, sbit
;
396 if (float32_is_any_nan(a
)) {
398 if (float32_is_signaling_nan(a
, fpst
)) {
399 float_raise(float_flag_invalid
, fpst
);
400 if (!fpst
->default_nan_mode
) {
401 nan
= float32_silence_nan(a
, fpst
);
404 if (fpst
->default_nan_mode
) {
405 nan
= float32_default_nan(fpst
);
410 a
= float32_squash_input_denormal(a
, fpst
);
412 val32
= float32_val(a
);
413 sbit
= 0x80000000ULL
& val32
;
414 exp
= extract32(val32
, 23, 8);
417 return make_float32(sbit
| (0xfe << 23));
419 return make_float32(sbit
| (~exp
& 0xff) << 23);
423 float64
HELPER(frecpx_f64
)(float64 a
, void *fpstp
)
425 float_status
*fpst
= fpstp
;
426 uint64_t val64
, sbit
;
429 if (float64_is_any_nan(a
)) {
431 if (float64_is_signaling_nan(a
, fpst
)) {
432 float_raise(float_flag_invalid
, fpst
);
433 if (!fpst
->default_nan_mode
) {
434 nan
= float64_silence_nan(a
, fpst
);
437 if (fpst
->default_nan_mode
) {
438 nan
= float64_default_nan(fpst
);
443 a
= float64_squash_input_denormal(a
, fpst
);
445 val64
= float64_val(a
);
446 sbit
= 0x8000000000000000ULL
& val64
;
447 exp
= extract64(float64_val(a
), 52, 11);
450 return make_float64(sbit
| (0x7feULL
<< 52));
452 return make_float64(sbit
| (~exp
& 0x7ffULL
) << 52);
456 float32
HELPER(fcvtx_f64_to_f32
)(float64 a
, CPUARMState
*env
)
458 /* Von Neumann rounding is implemented by using round-to-zero
459 * and then setting the LSB of the result if Inexact was raised.
462 float_status
*fpst
= &env
->vfp
.fp_status
;
463 float_status tstat
= *fpst
;
466 set_float_rounding_mode(float_round_to_zero
, &tstat
);
467 set_float_exception_flags(0, &tstat
);
468 r
= float64_to_float32(a
, &tstat
);
469 exflags
= get_float_exception_flags(&tstat
);
470 if (exflags
& float_flag_inexact
) {
471 r
= make_float32(float32_val(r
) | 1);
473 exflags
|= get_float_exception_flags(fpst
);
474 set_float_exception_flags(exflags
, fpst
);
478 /* 64-bit versions of the CRC helpers. Note that although the operation
479 * (and the prototypes of crc32c() and crc32() mean that only the bottom
480 * 32 bits of the accumulator and result are used, we pass and return
481 * uint64_t for convenience of the generated code. Unlike the 32-bit
482 * instruction set versions, val may genuinely have 64 bits of data in it.
483 * The upper bytes of val (above the number specified by 'bytes') must have
484 * been zeroed out by the caller.
486 uint64_t HELPER(crc32_64
)(uint64_t acc
, uint64_t val
, uint32_t bytes
)
492 /* zlib crc32 converts the accumulator and output to one's complement. */
493 return crc32(acc
^ 0xffffffff, buf
, bytes
) ^ 0xffffffff;
496 uint64_t HELPER(crc32c_64
)(uint64_t acc
, uint64_t val
, uint32_t bytes
)
502 /* Linux crc32c converts the output to one's complement. */
503 return crc32c(acc
, buf
, bytes
) ^ 0xffffffff;
506 uint64_t HELPER(paired_cmpxchg64_le
)(CPUARMState
*env
, uint64_t addr
,
507 uint64_t new_lo
, uint64_t new_hi
)
509 Int128 cmpv
= int128_make128(env
->exclusive_val
, env
->exclusive_high
);
510 Int128 newv
= int128_make128(new_lo
, new_hi
);
512 uintptr_t ra
= GETPC();
515 int mem_idx
= cpu_mmu_index(env
, false);
516 MemOpIdx oi0
= make_memop_idx(MO_LEUQ
| MO_ALIGN_16
, mem_idx
);
517 MemOpIdx oi1
= make_memop_idx(MO_LEUQ
, mem_idx
);
519 o0
= cpu_ldq_le_mmu(env
, addr
+ 0, oi0
, ra
);
520 o1
= cpu_ldq_le_mmu(env
, addr
+ 8, oi1
, ra
);
521 oldv
= int128_make128(o0
, o1
);
523 success
= int128_eq(oldv
, cmpv
);
525 cpu_stq_le_mmu(env
, addr
+ 0, int128_getlo(newv
), oi1
, ra
);
526 cpu_stq_le_mmu(env
, addr
+ 8, int128_gethi(newv
), oi1
, ra
);
532 uint64_t HELPER(paired_cmpxchg64_le_parallel
)(CPUARMState
*env
, uint64_t addr
,
533 uint64_t new_lo
, uint64_t new_hi
)
535 Int128 oldv
, cmpv
, newv
;
536 uintptr_t ra
= GETPC();
541 assert(HAVE_CMPXCHG128
);
543 mem_idx
= cpu_mmu_index(env
, false);
544 oi
= make_memop_idx(MO_LE
| MO_128
| MO_ALIGN
, mem_idx
);
546 cmpv
= int128_make128(env
->exclusive_val
, env
->exclusive_high
);
547 newv
= int128_make128(new_lo
, new_hi
);
548 oldv
= cpu_atomic_cmpxchgo_le_mmu(env
, addr
, cmpv
, newv
, oi
, ra
);
550 success
= int128_eq(oldv
, cmpv
);
554 uint64_t HELPER(paired_cmpxchg64_be
)(CPUARMState
*env
, uint64_t addr
,
555 uint64_t new_lo
, uint64_t new_hi
)
558 * High and low need to be switched here because this is not actually a
559 * 128bit store but two doublewords stored consecutively
561 Int128 cmpv
= int128_make128(env
->exclusive_high
, env
->exclusive_val
);
562 Int128 newv
= int128_make128(new_hi
, new_lo
);
564 uintptr_t ra
= GETPC();
567 int mem_idx
= cpu_mmu_index(env
, false);
568 MemOpIdx oi0
= make_memop_idx(MO_BEUQ
| MO_ALIGN_16
, mem_idx
);
569 MemOpIdx oi1
= make_memop_idx(MO_BEUQ
, mem_idx
);
571 o1
= cpu_ldq_be_mmu(env
, addr
+ 0, oi0
, ra
);
572 o0
= cpu_ldq_be_mmu(env
, addr
+ 8, oi1
, ra
);
573 oldv
= int128_make128(o0
, o1
);
575 success
= int128_eq(oldv
, cmpv
);
577 cpu_stq_be_mmu(env
, addr
+ 0, int128_gethi(newv
), oi1
, ra
);
578 cpu_stq_be_mmu(env
, addr
+ 8, int128_getlo(newv
), oi1
, ra
);
584 uint64_t HELPER(paired_cmpxchg64_be_parallel
)(CPUARMState
*env
, uint64_t addr
,
585 uint64_t new_lo
, uint64_t new_hi
)
587 Int128 oldv
, cmpv
, newv
;
588 uintptr_t ra
= GETPC();
593 assert(HAVE_CMPXCHG128
);
595 mem_idx
= cpu_mmu_index(env
, false);
596 oi
= make_memop_idx(MO_BE
| MO_128
| MO_ALIGN
, mem_idx
);
599 * High and low need to be switched here because this is not actually a
600 * 128bit store but two doublewords stored consecutively
602 cmpv
= int128_make128(env
->exclusive_high
, env
->exclusive_val
);
603 newv
= int128_make128(new_hi
, new_lo
);
604 oldv
= cpu_atomic_cmpxchgo_be_mmu(env
, addr
, cmpv
, newv
, oi
, ra
);
606 success
= int128_eq(oldv
, cmpv
);
610 /* Writes back the old data into Rs. */
611 void HELPER(casp_le_parallel
)(CPUARMState
*env
, uint32_t rs
, uint64_t addr
,
612 uint64_t new_lo
, uint64_t new_hi
)
614 Int128 oldv
, cmpv
, newv
;
615 uintptr_t ra
= GETPC();
619 assert(HAVE_CMPXCHG128
);
621 mem_idx
= cpu_mmu_index(env
, false);
622 oi
= make_memop_idx(MO_LE
| MO_128
| MO_ALIGN
, mem_idx
);
624 cmpv
= int128_make128(env
->xregs
[rs
], env
->xregs
[rs
+ 1]);
625 newv
= int128_make128(new_lo
, new_hi
);
626 oldv
= cpu_atomic_cmpxchgo_le_mmu(env
, addr
, cmpv
, newv
, oi
, ra
);
628 env
->xregs
[rs
] = int128_getlo(oldv
);
629 env
->xregs
[rs
+ 1] = int128_gethi(oldv
);
632 void HELPER(casp_be_parallel
)(CPUARMState
*env
, uint32_t rs
, uint64_t addr
,
633 uint64_t new_hi
, uint64_t new_lo
)
635 Int128 oldv
, cmpv
, newv
;
636 uintptr_t ra
= GETPC();
640 assert(HAVE_CMPXCHG128
);
642 mem_idx
= cpu_mmu_index(env
, false);
643 oi
= make_memop_idx(MO_LE
| MO_128
| MO_ALIGN
, mem_idx
);
645 cmpv
= int128_make128(env
->xregs
[rs
+ 1], env
->xregs
[rs
]);
646 newv
= int128_make128(new_lo
, new_hi
);
647 oldv
= cpu_atomic_cmpxchgo_be_mmu(env
, addr
, cmpv
, newv
, oi
, ra
);
649 env
->xregs
[rs
+ 1] = int128_getlo(oldv
);
650 env
->xregs
[rs
] = int128_gethi(oldv
);
654 * AdvSIMD half-precision
657 #define ADVSIMD_HELPER(name, suffix) HELPER(glue(glue(advsimd_, name), suffix))
659 #define ADVSIMD_HALFOP(name) \
660 uint32_t ADVSIMD_HELPER(name, h)(uint32_t a, uint32_t b, void *fpstp) \
662 float_status *fpst = fpstp; \
663 return float16_ ## name(a, b, fpst); \
672 ADVSIMD_HALFOP(minnum
)
673 ADVSIMD_HALFOP(maxnum
)
675 #define ADVSIMD_TWOHALFOP(name) \
676 uint32_t ADVSIMD_HELPER(name, 2h)(uint32_t two_a, uint32_t two_b, void *fpstp) \
678 float16 a1, a2, b1, b2; \
680 float_status *fpst = fpstp; \
681 a1 = extract32(two_a, 0, 16); \
682 a2 = extract32(two_a, 16, 16); \
683 b1 = extract32(two_b, 0, 16); \
684 b2 = extract32(two_b, 16, 16); \
685 r1 = float16_ ## name(a1, b1, fpst); \
686 r2 = float16_ ## name(a2, b2, fpst); \
687 return deposit32(r1, 16, 16, r2); \
690 ADVSIMD_TWOHALFOP(add
)
691 ADVSIMD_TWOHALFOP(sub
)
692 ADVSIMD_TWOHALFOP(mul
)
693 ADVSIMD_TWOHALFOP(div
)
694 ADVSIMD_TWOHALFOP(min
)
695 ADVSIMD_TWOHALFOP(max
)
696 ADVSIMD_TWOHALFOP(minnum
)
697 ADVSIMD_TWOHALFOP(maxnum
)
699 /* Data processing - scalar floating-point and advanced SIMD */
700 static float16
float16_mulx(float16 a
, float16 b
, void *fpstp
)
702 float_status
*fpst
= fpstp
;
704 a
= float16_squash_input_denormal(a
, fpst
);
705 b
= float16_squash_input_denormal(b
, fpst
);
707 if ((float16_is_zero(a
) && float16_is_infinity(b
)) ||
708 (float16_is_infinity(a
) && float16_is_zero(b
))) {
709 /* 2.0 with the sign bit set to sign(A) XOR sign(B) */
710 return make_float16((1U << 14) |
711 ((float16_val(a
) ^ float16_val(b
)) & (1U << 15)));
713 return float16_mul(a
, b
, fpst
);
717 ADVSIMD_TWOHALFOP(mulx
)
719 /* fused multiply-accumulate */
720 uint32_t HELPER(advsimd_muladdh
)(uint32_t a
, uint32_t b
, uint32_t c
,
723 float_status
*fpst
= fpstp
;
724 return float16_muladd(a
, b
, c
, 0, fpst
);
727 uint32_t HELPER(advsimd_muladd2h
)(uint32_t two_a
, uint32_t two_b
,
728 uint32_t two_c
, void *fpstp
)
730 float_status
*fpst
= fpstp
;
731 float16 a1
, a2
, b1
, b2
, c1
, c2
;
733 a1
= extract32(two_a
, 0, 16);
734 a2
= extract32(two_a
, 16, 16);
735 b1
= extract32(two_b
, 0, 16);
736 b2
= extract32(two_b
, 16, 16);
737 c1
= extract32(two_c
, 0, 16);
738 c2
= extract32(two_c
, 16, 16);
739 r1
= float16_muladd(a1
, b1
, c1
, 0, fpst
);
740 r2
= float16_muladd(a2
, b2
, c2
, 0, fpst
);
741 return deposit32(r1
, 16, 16, r2
);
745 * Floating point comparisons produce an integer result. Softfloat
746 * routines return float_relation types which we convert to the 0/-1
750 #define ADVSIMD_CMPRES(test) (test) ? 0xffff : 0
752 uint32_t HELPER(advsimd_ceq_f16
)(uint32_t a
, uint32_t b
, void *fpstp
)
754 float_status
*fpst
= fpstp
;
755 int compare
= float16_compare_quiet(a
, b
, fpst
);
756 return ADVSIMD_CMPRES(compare
== float_relation_equal
);
759 uint32_t HELPER(advsimd_cge_f16
)(uint32_t a
, uint32_t b
, void *fpstp
)
761 float_status
*fpst
= fpstp
;
762 int compare
= float16_compare(a
, b
, fpst
);
763 return ADVSIMD_CMPRES(compare
== float_relation_greater
||
764 compare
== float_relation_equal
);
767 uint32_t HELPER(advsimd_cgt_f16
)(uint32_t a
, uint32_t b
, void *fpstp
)
769 float_status
*fpst
= fpstp
;
770 int compare
= float16_compare(a
, b
, fpst
);
771 return ADVSIMD_CMPRES(compare
== float_relation_greater
);
774 uint32_t HELPER(advsimd_acge_f16
)(uint32_t a
, uint32_t b
, void *fpstp
)
776 float_status
*fpst
= fpstp
;
777 float16 f0
= float16_abs(a
);
778 float16 f1
= float16_abs(b
);
779 int compare
= float16_compare(f0
, f1
, fpst
);
780 return ADVSIMD_CMPRES(compare
== float_relation_greater
||
781 compare
== float_relation_equal
);
784 uint32_t HELPER(advsimd_acgt_f16
)(uint32_t a
, uint32_t b
, void *fpstp
)
786 float_status
*fpst
= fpstp
;
787 float16 f0
= float16_abs(a
);
788 float16 f1
= float16_abs(b
);
789 int compare
= float16_compare(f0
, f1
, fpst
);
790 return ADVSIMD_CMPRES(compare
== float_relation_greater
);
793 /* round to integral */
794 uint32_t HELPER(advsimd_rinth_exact
)(uint32_t x
, void *fp_status
)
796 return float16_round_to_int(x
, fp_status
);
799 uint32_t HELPER(advsimd_rinth
)(uint32_t x
, void *fp_status
)
801 int old_flags
= get_float_exception_flags(fp_status
), new_flags
;
804 ret
= float16_round_to_int(x
, fp_status
);
806 /* Suppress any inexact exceptions the conversion produced */
807 if (!(old_flags
& float_flag_inexact
)) {
808 new_flags
= get_float_exception_flags(fp_status
);
809 set_float_exception_flags(new_flags
& ~float_flag_inexact
, fp_status
);
816 * Half-precision floating point conversion functions
818 * There are a multitude of conversion functions with various
819 * different rounding modes. This is dealt with by the calling code
820 * setting the mode appropriately before calling the helper.
823 uint32_t HELPER(advsimd_f16tosinth
)(uint32_t a
, void *fpstp
)
825 float_status
*fpst
= fpstp
;
827 /* Invalid if we are passed a NaN */
828 if (float16_is_any_nan(a
)) {
829 float_raise(float_flag_invalid
, fpst
);
832 return float16_to_int16(a
, fpst
);
835 uint32_t HELPER(advsimd_f16touinth
)(uint32_t a
, void *fpstp
)
837 float_status
*fpst
= fpstp
;
839 /* Invalid if we are passed a NaN */
840 if (float16_is_any_nan(a
)) {
841 float_raise(float_flag_invalid
, fpst
);
844 return float16_to_uint16(a
, fpst
);
847 static int el_from_spsr(uint32_t spsr
)
849 /* Return the exception level that this SPSR is requesting a return to,
850 * or -1 if it is invalid (an illegal return)
852 if (spsr
& PSTATE_nRW
) {
853 switch (spsr
& CPSR_M
) {
854 case ARM_CPU_MODE_USR
:
856 case ARM_CPU_MODE_HYP
:
858 case ARM_CPU_MODE_FIQ
:
859 case ARM_CPU_MODE_IRQ
:
860 case ARM_CPU_MODE_SVC
:
861 case ARM_CPU_MODE_ABT
:
862 case ARM_CPU_MODE_UND
:
863 case ARM_CPU_MODE_SYS
:
865 case ARM_CPU_MODE_MON
:
866 /* Returning to Mon from AArch64 is never possible,
867 * so this is an illegal return.
873 if (extract32(spsr
, 1, 1)) {
874 /* Return with reserved M[1] bit set */
877 if (extract32(spsr
, 0, 4) == 1) {
878 /* return to EL0 with M[0] bit set */
881 return extract32(spsr
, 2, 2);
885 static void cpsr_write_from_spsr_elx(CPUARMState
*env
,
890 /* Save SPSR_ELx.SS into PSTATE. */
891 env
->pstate
= (env
->pstate
& ~PSTATE_SS
) | (val
& PSTATE_SS
);
894 /* Move DIT to the correct location for CPSR */
895 if (val
& PSTATE_DIT
) {
900 mask
= aarch32_cpsr_valid_mask(env
->features
, \
901 &env_archcpu(env
)->isar
);
902 cpsr_write(env
, val
, mask
, CPSRWriteRaw
);
905 void HELPER(exception_return
)(CPUARMState
*env
, uint64_t new_pc
)
907 int cur_el
= arm_current_el(env
);
908 unsigned int spsr_idx
= aarch64_banked_spsr_index(cur_el
);
909 uint32_t spsr
= env
->banked_spsr
[spsr_idx
];
911 bool return_to_aa64
= (spsr
& PSTATE_nRW
) == 0;
913 aarch64_save_sp(env
, cur_el
);
915 arm_clear_exclusive(env
);
917 /* We must squash the PSTATE.SS bit to zero unless both of the
919 * 1. debug exceptions are currently disabled
920 * 2. singlestep will be active in the EL we return to
921 * We check 1 here and 2 after we've done the pstate/cpsr write() to
922 * transition to the EL we're going to.
924 if (arm_generate_debug_exceptions(env
)) {
928 new_el
= el_from_spsr(spsr
);
932 if (new_el
> cur_el
|| (new_el
== 2 && !arm_is_el2_enabled(env
))) {
933 /* Disallow return to an EL which is unimplemented or higher
934 * than the current one.
939 if (new_el
!= 0 && arm_el_is_aa64(env
, new_el
) != return_to_aa64
) {
940 /* Return to an EL which is configured for a different register width */
944 if (new_el
== 1 && (arm_hcr_el2_eff(env
) & HCR_TGE
)) {
948 qemu_mutex_lock_iothread();
949 arm_call_pre_el_change_hook(env_archcpu(env
));
950 qemu_mutex_unlock_iothread();
952 if (!return_to_aa64
) {
954 /* We do a raw CPSR write because aarch64_sync_64_to_32()
955 * will sort the register banks out for us, and we've already
956 * caught all the bad-mode cases in el_from_spsr().
958 cpsr_write_from_spsr_elx(env
, spsr
);
959 if (!arm_singlestep_active(env
)) {
960 env
->pstate
&= ~PSTATE_SS
;
962 aarch64_sync_64_to_32(env
);
965 env
->regs
[15] = new_pc
& ~0x1;
967 env
->regs
[15] = new_pc
& ~0x3;
969 helper_rebuild_hflags_a32(env
, new_el
);
970 qemu_log_mask(CPU_LOG_INT
, "Exception return from AArch64 EL%d to "
971 "AArch32 EL%d PC 0x%" PRIx32
"\n",
972 cur_el
, new_el
, env
->regs
[15]);
977 spsr
&= aarch64_pstate_valid_mask(&env_archcpu(env
)->isar
);
978 pstate_write(env
, spsr
);
979 if (!arm_singlestep_active(env
)) {
980 env
->pstate
&= ~PSTATE_SS
;
982 aarch64_restore_sp(env
, new_el
);
983 helper_rebuild_hflags_a64(env
, new_el
);
986 * Apply TBI to the exception return address. We had to delay this
987 * until after we selected the new EL, so that we could select the
988 * correct TBI+TBID bits. This is made easier by waiting until after
989 * the hflags rebuild, since we can pull the composite TBII field
992 tbii
= EX_TBFLAG_A64(env
->hflags
, TBII
);
993 if ((tbii
>> extract64(new_pc
, 55, 1)) & 1) {
994 /* TBI is enabled. */
995 int core_mmu_idx
= cpu_mmu_index(env
, false);
996 if (regime_has_2_ranges(core_to_aa64_mmu_idx(core_mmu_idx
))) {
997 new_pc
= sextract64(new_pc
, 0, 56);
999 new_pc
= extract64(new_pc
, 0, 56);
1004 qemu_log_mask(CPU_LOG_INT
, "Exception return from AArch64 EL%d to "
1005 "AArch64 EL%d PC 0x%" PRIx64
"\n",
1006 cur_el
, new_el
, env
->pc
);
1010 * Note that cur_el can never be 0. If new_el is 0, then
1011 * el0_a64 is return_to_aa64, else el0_a64 is ignored.
1013 aarch64_sve_change_el(env
, cur_el
, new_el
, return_to_aa64
);
1015 qemu_mutex_lock_iothread();
1016 arm_call_el_change_hook(env_archcpu(env
));
1017 qemu_mutex_unlock_iothread();
1022 /* Illegal return events of various kinds have architecturally
1023 * mandated behaviour:
1024 * restore NZCV and DAIF from SPSR_ELx
1026 * restore PC from ELR_ELx
1027 * no change to exception level, execution state or stack pointer
1029 env
->pstate
|= PSTATE_IL
;
1031 spsr
&= PSTATE_NZCV
| PSTATE_DAIF
;
1032 spsr
|= pstate_read(env
) & ~(PSTATE_NZCV
| PSTATE_DAIF
);
1033 pstate_write(env
, spsr
);
1034 if (!arm_singlestep_active(env
)) {
1035 env
->pstate
&= ~PSTATE_SS
;
1037 helper_rebuild_hflags_a64(env
, cur_el
);
1038 qemu_log_mask(LOG_GUEST_ERROR
, "Illegal exception return at EL%d: "
1039 "resuming execution at 0x%" PRIx64
"\n", cur_el
, env
->pc
);
1043 * Square Root and Reciprocal square root
1046 uint32_t HELPER(sqrt_f16
)(uint32_t a
, void *fpstp
)
1048 float_status
*s
= fpstp
;
1050 return float16_sqrt(a
, s
);
1053 void HELPER(dc_zva
)(CPUARMState
*env
, uint64_t vaddr_in
)
1056 * Implement DC ZVA, which zeroes a fixed-length block of memory.
1057 * Note that we do not implement the (architecturally mandated)
1058 * alignment fault for attempts to use this on Device memory
1059 * (which matches the usual QEMU behaviour of not implementing either
1060 * alignment faults or any memory attribute handling).
1062 int blocklen
= 4 << env_archcpu(env
)->dcz_blocksize
;
1063 uint64_t vaddr
= vaddr_in
& ~(blocklen
- 1);
1064 int mmu_idx
= cpu_mmu_index(env
, false);
1068 * Trapless lookup. In addition to actual invalid page, may
1069 * return NULL for I/O, watchpoints, clean pages, etc.
1071 mem
= tlb_vaddr_to_host(env
, vaddr
, MMU_DATA_STORE
, mmu_idx
);
1073 #ifndef CONFIG_USER_ONLY
1074 if (unlikely(!mem
)) {
1075 uintptr_t ra
= GETPC();
1078 * Trap if accessing an invalid page. DC_ZVA requires that we supply
1079 * the original pointer for an invalid page. But watchpoints require
1080 * that we probe the actual space. So do both.
1082 (void) probe_write(env
, vaddr_in
, 1, mmu_idx
, ra
);
1083 mem
= probe_write(env
, vaddr
, blocklen
, mmu_idx
, ra
);
1085 if (unlikely(!mem
)) {
1087 * The only remaining reason for mem == NULL is I/O.
1088 * Just do a series of byte writes as the architecture demands.
1090 for (int i
= 0; i
< blocklen
; i
++) {
1091 cpu_stb_mmuidx_ra(env
, vaddr
+ i
, 0, mmu_idx
, ra
);
1098 memset(mem
, 0, blocklen
);