2 * AArch64 specific helpers
4 * Copyright (c) 2013 Alexander Graf <agraf@suse.de>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "exec/gdbstub.h"
23 #include "qemu/host-utils.h"
24 #include "sysemu/sysemu.h"
25 #include "qemu/bitops.h"
27 /* C2.4.7 Multiply and divide */
28 /* special cases for 0 and LLONG_MIN are mandated by the standard */
29 uint64_t HELPER(udiv64
)(uint64_t num
, uint64_t den
)
37 int64_t HELPER(sdiv64
)(int64_t num
, int64_t den
)
42 if (num
== LLONG_MIN
&& den
== -1) {
48 uint64_t HELPER(clz64
)(uint64_t x
)
53 uint64_t HELPER(cls64
)(uint64_t x
)
58 uint32_t HELPER(cls32
)(uint32_t x
)
63 uint32_t HELPER(clz32
)(uint32_t x
)
68 uint64_t HELPER(rbit64
)(uint64_t x
)
70 /* assign the correct byte position */
73 /* assign the correct nibble position */
74 x
= ((x
& 0xf0f0f0f0f0f0f0f0ULL
) >> 4)
75 | ((x
& 0x0f0f0f0f0f0f0f0fULL
) << 4);
77 /* assign the correct bit position */
78 x
= ((x
& 0x8888888888888888ULL
) >> 3)
79 | ((x
& 0x4444444444444444ULL
) >> 1)
80 | ((x
& 0x2222222222222222ULL
) << 1)
81 | ((x
& 0x1111111111111111ULL
) << 3);
86 /* Convert a softfloat float_relation_ (as returned by
87 * the float*_compare functions) to the correct ARM
90 static inline uint32_t float_rel_to_flags(int res
)
94 case float_relation_equal
:
95 flags
= PSTATE_Z
| PSTATE_C
;
97 case float_relation_less
:
100 case float_relation_greater
:
103 case float_relation_unordered
:
105 flags
= PSTATE_C
| PSTATE_V
;
111 uint64_t HELPER(vfp_cmps_a64
)(float32 x
, float32 y
, void *fp_status
)
113 return float_rel_to_flags(float32_compare_quiet(x
, y
, fp_status
));
116 uint64_t HELPER(vfp_cmpes_a64
)(float32 x
, float32 y
, void *fp_status
)
118 return float_rel_to_flags(float32_compare(x
, y
, fp_status
));
121 uint64_t HELPER(vfp_cmpd_a64
)(float64 x
, float64 y
, void *fp_status
)
123 return float_rel_to_flags(float64_compare_quiet(x
, y
, fp_status
));
126 uint64_t HELPER(vfp_cmped_a64
)(float64 x
, float64 y
, void *fp_status
)
128 return float_rel_to_flags(float64_compare(x
, y
, fp_status
));
131 float32
HELPER(vfp_mulxs
)(float32 a
, float32 b
, void *fpstp
)
133 float_status
*fpst
= fpstp
;
135 if ((float32_is_zero(a
) && float32_is_infinity(b
)) ||
136 (float32_is_infinity(a
) && float32_is_zero(b
))) {
137 /* 2.0 with the sign bit set to sign(A) XOR sign(B) */
138 return make_float32((1U << 30) |
139 ((float32_val(a
) ^ float32_val(b
)) & (1U << 31)));
141 return float32_mul(a
, b
, fpst
);
144 float64
HELPER(vfp_mulxd
)(float64 a
, float64 b
, void *fpstp
)
146 float_status
*fpst
= fpstp
;
148 if ((float64_is_zero(a
) && float64_is_infinity(b
)) ||
149 (float64_is_infinity(a
) && float64_is_zero(b
))) {
150 /* 2.0 with the sign bit set to sign(A) XOR sign(B) */
151 return make_float64((1ULL << 62) |
152 ((float64_val(a
) ^ float64_val(b
)) & (1ULL << 63)));
154 return float64_mul(a
, b
, fpst
);
157 uint64_t HELPER(simd_tbl
)(CPUARMState
*env
, uint64_t result
, uint64_t indices
,
158 uint32_t rn
, uint32_t numregs
)
160 /* Helper function for SIMD TBL and TBX. We have to do the table
161 * lookup part for the 64 bits worth of indices we're passed in.
162 * result is the initial results vector (either zeroes for TBL
163 * or some guest values for TBX), rn the register number where
164 * the table starts, and numregs the number of registers in the table.
165 * We return the results of the lookups.
169 for (shift
= 0; shift
< 64; shift
+= 8) {
170 int index
= extract64(indices
, shift
, 8);
171 if (index
< 16 * numregs
) {
172 /* Convert index (a byte offset into the virtual table
173 * which is a series of 128-bit vectors concatenated)
174 * into the correct vfp.regs[] element plus a bit offset
175 * into that element, bearing in mind that the table
176 * can wrap around from V31 to V0.
178 int elt
= (rn
* 2 + (index
>> 3)) % 64;
179 int bitidx
= (index
& 7) * 8;
180 uint64_t val
= extract64(env
->vfp
.regs
[elt
], bitidx
, 8);
182 result
= deposit64(result
, shift
, 8, val
);
188 /* Helper function for 64 bit polynomial multiply case:
189 * perform PolynomialMult(op1, op2) and return either the top or
190 * bottom half of the 128 bit result.
192 uint64_t HELPER(neon_pmull_64_lo
)(uint64_t op1
, uint64_t op2
)
197 for (bitnum
= 0; bitnum
< 64; bitnum
++) {
198 if (op1
& (1ULL << bitnum
)) {
199 res
^= op2
<< bitnum
;
204 uint64_t HELPER(neon_pmull_64_hi
)(uint64_t op1
, uint64_t op2
)
209 /* bit 0 of op1 can't influence the high 64 bits at all */
210 for (bitnum
= 1; bitnum
< 64; bitnum
++) {
211 if (op1
& (1ULL << bitnum
)) {
212 res
^= op2
>> (64 - bitnum
);
218 /* 64bit/double versions of the neon float compare functions */
219 uint64_t HELPER(neon_ceq_f64
)(float64 a
, float64 b
, void *fpstp
)
221 float_status
*fpst
= fpstp
;
222 return -float64_eq_quiet(a
, b
, fpst
);
225 uint64_t HELPER(neon_cge_f64
)(float64 a
, float64 b
, void *fpstp
)
227 float_status
*fpst
= fpstp
;
228 return -float64_le(b
, a
, fpst
);
231 uint64_t HELPER(neon_cgt_f64
)(float64 a
, float64 b
, void *fpstp
)
233 float_status
*fpst
= fpstp
;
234 return -float64_lt(b
, a
, fpst
);
237 /* Reciprocal step and sqrt step. Note that unlike the A32/T32
238 * versions, these do a fully fused multiply-add or
239 * multiply-add-and-halve.
241 #define float32_two make_float32(0x40000000)
242 #define float32_three make_float32(0x40400000)
243 #define float32_one_point_five make_float32(0x3fc00000)
245 #define float64_two make_float64(0x4000000000000000ULL)
246 #define float64_three make_float64(0x4008000000000000ULL)
247 #define float64_one_point_five make_float64(0x3FF8000000000000ULL)
249 float32
HELPER(recpsf_f32
)(float32 a
, float32 b
, void *fpstp
)
251 float_status
*fpst
= fpstp
;
254 if ((float32_is_infinity(a
) && float32_is_zero(b
)) ||
255 (float32_is_infinity(b
) && float32_is_zero(a
))) {
258 return float32_muladd(a
, b
, float32_two
, 0, fpst
);
261 float64
HELPER(recpsf_f64
)(float64 a
, float64 b
, void *fpstp
)
263 float_status
*fpst
= fpstp
;
266 if ((float64_is_infinity(a
) && float64_is_zero(b
)) ||
267 (float64_is_infinity(b
) && float64_is_zero(a
))) {
270 return float64_muladd(a
, b
, float64_two
, 0, fpst
);
273 float32
HELPER(rsqrtsf_f32
)(float32 a
, float32 b
, void *fpstp
)
275 float_status
*fpst
= fpstp
;
278 if ((float32_is_infinity(a
) && float32_is_zero(b
)) ||
279 (float32_is_infinity(b
) && float32_is_zero(a
))) {
280 return float32_one_point_five
;
282 return float32_muladd(a
, b
, float32_three
, float_muladd_halve_result
, fpst
);
285 float64
HELPER(rsqrtsf_f64
)(float64 a
, float64 b
, void *fpstp
)
287 float_status
*fpst
= fpstp
;
290 if ((float64_is_infinity(a
) && float64_is_zero(b
)) ||
291 (float64_is_infinity(b
) && float64_is_zero(a
))) {
292 return float64_one_point_five
;
294 return float64_muladd(a
, b
, float64_three
, float_muladd_halve_result
, fpst
);
297 /* Pairwise long add: add pairs of adjacent elements into
298 * double-width elements in the result (eg _s8 is an 8x8->16 op)
300 uint64_t HELPER(neon_addlp_s8
)(uint64_t a
)
302 uint64_t nsignmask
= 0x0080008000800080ULL
;
303 uint64_t wsignmask
= 0x8000800080008000ULL
;
304 uint64_t elementmask
= 0x00ff00ff00ff00ffULL
;
306 uint64_t res
, signres
;
308 /* Extract odd elements, sign extend each to a 16 bit field */
309 tmp1
= a
& elementmask
;
312 tmp1
= (tmp1
- nsignmask
) ^ wsignmask
;
313 /* Ditto for the even elements */
314 tmp2
= (a
>> 8) & elementmask
;
317 tmp2
= (tmp2
- nsignmask
) ^ wsignmask
;
319 /* calculate the result by summing bits 0..14, 16..22, etc,
320 * and then adjusting the sign bits 15, 23, etc manually.
321 * This ensures the addition can't overflow the 16 bit field.
323 signres
= (tmp1
^ tmp2
) & wsignmask
;
324 res
= (tmp1
& ~wsignmask
) + (tmp2
& ~wsignmask
);
330 uint64_t HELPER(neon_addlp_u8
)(uint64_t a
)
334 tmp
= a
& 0x00ff00ff00ff00ffULL
;
335 tmp
+= (a
>> 8) & 0x00ff00ff00ff00ffULL
;
339 uint64_t HELPER(neon_addlp_s16
)(uint64_t a
)
341 int32_t reslo
, reshi
;
343 reslo
= (int32_t)(int16_t)a
+ (int32_t)(int16_t)(a
>> 16);
344 reshi
= (int32_t)(int16_t)(a
>> 32) + (int32_t)(int16_t)(a
>> 48);
346 return (uint32_t)reslo
| (((uint64_t)reshi
) << 32);
349 uint64_t HELPER(neon_addlp_u16
)(uint64_t a
)
353 tmp
= a
& 0x0000ffff0000ffffULL
;
354 tmp
+= (a
>> 16) & 0x0000ffff0000ffffULL
;
358 /* Floating-point reciprocal exponent - see FPRecpX in ARM ARM */
359 float32
HELPER(frecpx_f32
)(float32 a
, void *fpstp
)
361 float_status
*fpst
= fpstp
;
362 uint32_t val32
, sbit
;
365 if (float32_is_any_nan(a
)) {
367 if (float32_is_signaling_nan(a
)) {
368 float_raise(float_flag_invalid
, fpst
);
369 nan
= float32_maybe_silence_nan(a
);
371 if (fpst
->default_nan_mode
) {
372 nan
= float32_default_nan
;
377 val32
= float32_val(a
);
378 sbit
= 0x80000000ULL
& val32
;
379 exp
= extract32(val32
, 23, 8);
382 return make_float32(sbit
| (0xfe << 23));
384 return make_float32(sbit
| (~exp
& 0xff) << 23);
388 float64
HELPER(frecpx_f64
)(float64 a
, void *fpstp
)
390 float_status
*fpst
= fpstp
;
391 uint64_t val64
, sbit
;
394 if (float64_is_any_nan(a
)) {
396 if (float64_is_signaling_nan(a
)) {
397 float_raise(float_flag_invalid
, fpst
);
398 nan
= float64_maybe_silence_nan(a
);
400 if (fpst
->default_nan_mode
) {
401 nan
= float64_default_nan
;
406 val64
= float64_val(a
);
407 sbit
= 0x8000000000000000ULL
& val64
;
408 exp
= extract64(float64_val(a
), 52, 11);
411 return make_float64(sbit
| (0x7feULL
<< 52));
413 return make_float64(sbit
| (~exp
& 0x7ffULL
) << 52);