2 * AArch64 specific helpers
4 * Copyright (c) 2013 Alexander Graf <agraf@suse.de>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "exec/gdbstub.h"
23 #include "qemu/host-utils.h"
24 #include "sysemu/sysemu.h"
25 #include "qemu/bitops.h"
26 #include "internals.h"
28 /* C2.4.7 Multiply and divide */
29 /* special cases for 0 and LLONG_MIN are mandated by the standard */
30 uint64_t HELPER(udiv64
)(uint64_t num
, uint64_t den
)
38 int64_t HELPER(sdiv64
)(int64_t num
, int64_t den
)
43 if (num
== LLONG_MIN
&& den
== -1) {
49 uint64_t HELPER(clz64
)(uint64_t x
)
54 uint64_t HELPER(cls64
)(uint64_t x
)
59 uint32_t HELPER(cls32
)(uint32_t x
)
64 uint32_t HELPER(clz32
)(uint32_t x
)
69 uint64_t HELPER(rbit64
)(uint64_t x
)
71 /* assign the correct byte position */
74 /* assign the correct nibble position */
75 x
= ((x
& 0xf0f0f0f0f0f0f0f0ULL
) >> 4)
76 | ((x
& 0x0f0f0f0f0f0f0f0fULL
) << 4);
78 /* assign the correct bit position */
79 x
= ((x
& 0x8888888888888888ULL
) >> 3)
80 | ((x
& 0x4444444444444444ULL
) >> 1)
81 | ((x
& 0x2222222222222222ULL
) << 1)
82 | ((x
& 0x1111111111111111ULL
) << 3);
87 /* Convert a softfloat float_relation_ (as returned by
88 * the float*_compare functions) to the correct ARM
91 static inline uint32_t float_rel_to_flags(int res
)
95 case float_relation_equal
:
96 flags
= PSTATE_Z
| PSTATE_C
;
98 case float_relation_less
:
101 case float_relation_greater
:
104 case float_relation_unordered
:
106 flags
= PSTATE_C
| PSTATE_V
;
112 uint64_t HELPER(vfp_cmps_a64
)(float32 x
, float32 y
, void *fp_status
)
114 return float_rel_to_flags(float32_compare_quiet(x
, y
, fp_status
));
117 uint64_t HELPER(vfp_cmpes_a64
)(float32 x
, float32 y
, void *fp_status
)
119 return float_rel_to_flags(float32_compare(x
, y
, fp_status
));
122 uint64_t HELPER(vfp_cmpd_a64
)(float64 x
, float64 y
, void *fp_status
)
124 return float_rel_to_flags(float64_compare_quiet(x
, y
, fp_status
));
127 uint64_t HELPER(vfp_cmped_a64
)(float64 x
, float64 y
, void *fp_status
)
129 return float_rel_to_flags(float64_compare(x
, y
, fp_status
));
132 float32
HELPER(vfp_mulxs
)(float32 a
, float32 b
, void *fpstp
)
134 float_status
*fpst
= fpstp
;
136 if ((float32_is_zero(a
) && float32_is_infinity(b
)) ||
137 (float32_is_infinity(a
) && float32_is_zero(b
))) {
138 /* 2.0 with the sign bit set to sign(A) XOR sign(B) */
139 return make_float32((1U << 30) |
140 ((float32_val(a
) ^ float32_val(b
)) & (1U << 31)));
142 return float32_mul(a
, b
, fpst
);
145 float64
HELPER(vfp_mulxd
)(float64 a
, float64 b
, void *fpstp
)
147 float_status
*fpst
= fpstp
;
149 if ((float64_is_zero(a
) && float64_is_infinity(b
)) ||
150 (float64_is_infinity(a
) && float64_is_zero(b
))) {
151 /* 2.0 with the sign bit set to sign(A) XOR sign(B) */
152 return make_float64((1ULL << 62) |
153 ((float64_val(a
) ^ float64_val(b
)) & (1ULL << 63)));
155 return float64_mul(a
, b
, fpst
);
158 uint64_t HELPER(simd_tbl
)(CPUARMState
*env
, uint64_t result
, uint64_t indices
,
159 uint32_t rn
, uint32_t numregs
)
161 /* Helper function for SIMD TBL and TBX. We have to do the table
162 * lookup part for the 64 bits worth of indices we're passed in.
163 * result is the initial results vector (either zeroes for TBL
164 * or some guest values for TBX), rn the register number where
165 * the table starts, and numregs the number of registers in the table.
166 * We return the results of the lookups.
170 for (shift
= 0; shift
< 64; shift
+= 8) {
171 int index
= extract64(indices
, shift
, 8);
172 if (index
< 16 * numregs
) {
173 /* Convert index (a byte offset into the virtual table
174 * which is a series of 128-bit vectors concatenated)
175 * into the correct vfp.regs[] element plus a bit offset
176 * into that element, bearing in mind that the table
177 * can wrap around from V31 to V0.
179 int elt
= (rn
* 2 + (index
>> 3)) % 64;
180 int bitidx
= (index
& 7) * 8;
181 uint64_t val
= extract64(env
->vfp
.regs
[elt
], bitidx
, 8);
183 result
= deposit64(result
, shift
, 8, val
);
189 /* Helper function for 64 bit polynomial multiply case:
190 * perform PolynomialMult(op1, op2) and return either the top or
191 * bottom half of the 128 bit result.
193 uint64_t HELPER(neon_pmull_64_lo
)(uint64_t op1
, uint64_t op2
)
198 for (bitnum
= 0; bitnum
< 64; bitnum
++) {
199 if (op1
& (1ULL << bitnum
)) {
200 res
^= op2
<< bitnum
;
205 uint64_t HELPER(neon_pmull_64_hi
)(uint64_t op1
, uint64_t op2
)
210 /* bit 0 of op1 can't influence the high 64 bits at all */
211 for (bitnum
= 1; bitnum
< 64; bitnum
++) {
212 if (op1
& (1ULL << bitnum
)) {
213 res
^= op2
>> (64 - bitnum
);
219 /* 64bit/double versions of the neon float compare functions */
220 uint64_t HELPER(neon_ceq_f64
)(float64 a
, float64 b
, void *fpstp
)
222 float_status
*fpst
= fpstp
;
223 return -float64_eq_quiet(a
, b
, fpst
);
226 uint64_t HELPER(neon_cge_f64
)(float64 a
, float64 b
, void *fpstp
)
228 float_status
*fpst
= fpstp
;
229 return -float64_le(b
, a
, fpst
);
232 uint64_t HELPER(neon_cgt_f64
)(float64 a
, float64 b
, void *fpstp
)
234 float_status
*fpst
= fpstp
;
235 return -float64_lt(b
, a
, fpst
);
238 /* Reciprocal step and sqrt step. Note that unlike the A32/T32
239 * versions, these do a fully fused multiply-add or
240 * multiply-add-and-halve.
242 #define float32_two make_float32(0x40000000)
243 #define float32_three make_float32(0x40400000)
244 #define float32_one_point_five make_float32(0x3fc00000)
246 #define float64_two make_float64(0x4000000000000000ULL)
247 #define float64_three make_float64(0x4008000000000000ULL)
248 #define float64_one_point_five make_float64(0x3FF8000000000000ULL)
250 float32
HELPER(recpsf_f32
)(float32 a
, float32 b
, void *fpstp
)
252 float_status
*fpst
= fpstp
;
255 if ((float32_is_infinity(a
) && float32_is_zero(b
)) ||
256 (float32_is_infinity(b
) && float32_is_zero(a
))) {
259 return float32_muladd(a
, b
, float32_two
, 0, fpst
);
262 float64
HELPER(recpsf_f64
)(float64 a
, float64 b
, void *fpstp
)
264 float_status
*fpst
= fpstp
;
267 if ((float64_is_infinity(a
) && float64_is_zero(b
)) ||
268 (float64_is_infinity(b
) && float64_is_zero(a
))) {
271 return float64_muladd(a
, b
, float64_two
, 0, fpst
);
274 float32
HELPER(rsqrtsf_f32
)(float32 a
, float32 b
, void *fpstp
)
276 float_status
*fpst
= fpstp
;
279 if ((float32_is_infinity(a
) && float32_is_zero(b
)) ||
280 (float32_is_infinity(b
) && float32_is_zero(a
))) {
281 return float32_one_point_five
;
283 return float32_muladd(a
, b
, float32_three
, float_muladd_halve_result
, fpst
);
286 float64
HELPER(rsqrtsf_f64
)(float64 a
, float64 b
, void *fpstp
)
288 float_status
*fpst
= fpstp
;
291 if ((float64_is_infinity(a
) && float64_is_zero(b
)) ||
292 (float64_is_infinity(b
) && float64_is_zero(a
))) {
293 return float64_one_point_five
;
295 return float64_muladd(a
, b
, float64_three
, float_muladd_halve_result
, fpst
);
298 /* Pairwise long add: add pairs of adjacent elements into
299 * double-width elements in the result (eg _s8 is an 8x8->16 op)
301 uint64_t HELPER(neon_addlp_s8
)(uint64_t a
)
303 uint64_t nsignmask
= 0x0080008000800080ULL
;
304 uint64_t wsignmask
= 0x8000800080008000ULL
;
305 uint64_t elementmask
= 0x00ff00ff00ff00ffULL
;
307 uint64_t res
, signres
;
309 /* Extract odd elements, sign extend each to a 16 bit field */
310 tmp1
= a
& elementmask
;
313 tmp1
= (tmp1
- nsignmask
) ^ wsignmask
;
314 /* Ditto for the even elements */
315 tmp2
= (a
>> 8) & elementmask
;
318 tmp2
= (tmp2
- nsignmask
) ^ wsignmask
;
320 /* calculate the result by summing bits 0..14, 16..22, etc,
321 * and then adjusting the sign bits 15, 23, etc manually.
322 * This ensures the addition can't overflow the 16 bit field.
324 signres
= (tmp1
^ tmp2
) & wsignmask
;
325 res
= (tmp1
& ~wsignmask
) + (tmp2
& ~wsignmask
);
331 uint64_t HELPER(neon_addlp_u8
)(uint64_t a
)
335 tmp
= a
& 0x00ff00ff00ff00ffULL
;
336 tmp
+= (a
>> 8) & 0x00ff00ff00ff00ffULL
;
340 uint64_t HELPER(neon_addlp_s16
)(uint64_t a
)
342 int32_t reslo
, reshi
;
344 reslo
= (int32_t)(int16_t)a
+ (int32_t)(int16_t)(a
>> 16);
345 reshi
= (int32_t)(int16_t)(a
>> 32) + (int32_t)(int16_t)(a
>> 48);
347 return (uint32_t)reslo
| (((uint64_t)reshi
) << 32);
350 uint64_t HELPER(neon_addlp_u16
)(uint64_t a
)
354 tmp
= a
& 0x0000ffff0000ffffULL
;
355 tmp
+= (a
>> 16) & 0x0000ffff0000ffffULL
;
359 /* Floating-point reciprocal exponent - see FPRecpX in ARM ARM */
360 float32
HELPER(frecpx_f32
)(float32 a
, void *fpstp
)
362 float_status
*fpst
= fpstp
;
363 uint32_t val32
, sbit
;
366 if (float32_is_any_nan(a
)) {
368 if (float32_is_signaling_nan(a
)) {
369 float_raise(float_flag_invalid
, fpst
);
370 nan
= float32_maybe_silence_nan(a
);
372 if (fpst
->default_nan_mode
) {
373 nan
= float32_default_nan
;
378 val32
= float32_val(a
);
379 sbit
= 0x80000000ULL
& val32
;
380 exp
= extract32(val32
, 23, 8);
383 return make_float32(sbit
| (0xfe << 23));
385 return make_float32(sbit
| (~exp
& 0xff) << 23);
389 float64
HELPER(frecpx_f64
)(float64 a
, void *fpstp
)
391 float_status
*fpst
= fpstp
;
392 uint64_t val64
, sbit
;
395 if (float64_is_any_nan(a
)) {
397 if (float64_is_signaling_nan(a
)) {
398 float_raise(float_flag_invalid
, fpst
);
399 nan
= float64_maybe_silence_nan(a
);
401 if (fpst
->default_nan_mode
) {
402 nan
= float64_default_nan
;
407 val64
= float64_val(a
);
408 sbit
= 0x8000000000000000ULL
& val64
;
409 exp
= extract64(float64_val(a
), 52, 11);
412 return make_float64(sbit
| (0x7feULL
<< 52));
414 return make_float64(sbit
| (~exp
& 0x7ffULL
) << 52);
418 float32
HELPER(fcvtx_f64_to_f32
)(float64 a
, CPUARMState
*env
)
420 /* Von Neumann rounding is implemented by using round-to-zero
421 * and then setting the LSB of the result if Inexact was raised.
424 float_status
*fpst
= &env
->vfp
.fp_status
;
425 float_status tstat
= *fpst
;
428 set_float_rounding_mode(float_round_to_zero
, &tstat
);
429 set_float_exception_flags(0, &tstat
);
430 r
= float64_to_float32(a
, &tstat
);
431 r
= float32_maybe_silence_nan(r
);
432 exflags
= get_float_exception_flags(&tstat
);
433 if (exflags
& float_flag_inexact
) {
434 r
= make_float32(float32_val(r
) | 1);
436 exflags
|= get_float_exception_flags(fpst
);
437 set_float_exception_flags(exflags
, fpst
);
441 /* Handle a CPU exception. */
442 void aarch64_cpu_do_interrupt(CPUState
*cs
)
444 ARMCPU
*cpu
= ARM_CPU(cs
);
445 CPUARMState
*env
= &cpu
->env
;
446 target_ulong addr
= env
->cp15
.c12_vbar
;
449 if (arm_current_pl(env
) == 0) {
455 } else if (pstate_read(env
) & PSTATE_SP
) {
459 arm_log_exception(cs
->exception_index
);
460 qemu_log_mask(CPU_LOG_INT
, "...from EL%d\n", arm_current_pl(env
));
461 if (qemu_loglevel_mask(CPU_LOG_INT
)
462 && !excp_is_internal(cs
->exception_index
)) {
463 qemu_log_mask(CPU_LOG_INT
, "...with ESR 0x%" PRIx32
"\n",
464 env
->exception
.syndrome
);
467 env
->cp15
.esr_el1
= env
->exception
.syndrome
;
468 env
->cp15
.far_el1
= env
->exception
.vaddress
;
470 switch (cs
->exception_index
) {
471 case EXCP_PREFETCH_ABORT
:
472 case EXCP_DATA_ABORT
:
473 qemu_log_mask(CPU_LOG_INT
, "...with FAR 0x%" PRIx64
"\n",
487 cpu_abort(cs
, "Unhandled exception 0x%x\n", cs
->exception_index
);
491 env
->banked_spsr
[0] = pstate_read(env
);
492 env
->sp_el
[arm_current_pl(env
)] = env
->xregs
[31];
493 env
->xregs
[31] = env
->sp_el
[1];
494 env
->elr_el1
= env
->pc
;
496 env
->banked_spsr
[0] = cpsr_read(env
);
498 env
->cp15
.esr_el1
|= 1 << 25;
500 env
->elr_el1
= env
->regs
[15];
502 for (i
= 0; i
< 15; i
++) {
503 env
->xregs
[i
] = env
->regs
[i
];
506 env
->condexec_bits
= 0;
509 pstate_write(env
, PSTATE_DAIF
| PSTATE_MODE_EL1h
);
513 cs
->interrupt_request
|= CPU_INTERRUPT_EXITTB
;