2 * ARM translation: AArch32 VFP instructions
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
7 * Copyright (c) 2019 Linaro, Ltd.
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
24 * This file is intended to be included from translate.c; it uses
25 * some macros and definitions provided by that file.
26 * It might be possible to convert it to a standalone .c file eventually.
29 /* Include the generated VFP decoder */
30 #include "decode-vfp.inc.c"
31 #include "decode-vfp-uncond.inc.c"
34 * The imm8 encodes the sign bit, enough bits to represent an exponent in
35 * the range 01....1xx to 10....0xx, and the most significant 4 bits of
36 * the mantissa; see VFPExpandImm() in the v8 ARM ARM.
38 uint64_t vfp_expand_imm(int size
, uint8_t imm8
)
44 imm
= (extract32(imm8
, 7, 1) ? 0x8000 : 0) |
45 (extract32(imm8
, 6, 1) ? 0x3fc0 : 0x4000) |
46 extract32(imm8
, 0, 6);
50 imm
= (extract32(imm8
, 7, 1) ? 0x8000 : 0) |
51 (extract32(imm8
, 6, 1) ? 0x3e00 : 0x4000) |
52 (extract32(imm8
, 0, 6) << 3);
56 imm
= (extract32(imm8
, 7, 1) ? 0x8000 : 0) |
57 (extract32(imm8
, 6, 1) ? 0x3000 : 0x4000) |
58 (extract32(imm8
, 0, 6) << 6);
61 g_assert_not_reached();
67 * Return the offset of a 16-bit half of the specified VFP single-precision
68 * register. If top is true, returns the top 16 bits; otherwise the bottom
71 static inline long vfp_f16_offset(unsigned reg
, bool top
)
73 long offs
= vfp_reg_offset(false, reg
);
74 #ifdef HOST_WORDS_BIGENDIAN
87 * Check that VFP access is enabled. If it is, do the necessary
88 * M-profile lazy-FP handling and then return true.
89 * If not, emit code to generate an appropriate exception and
91 * The ignore_vfp_enabled argument specifies that we should ignore
92 * whether VFP is enabled via FPEXC[EN]: this should be true for FMXR/FMRX
93 * accesses to FPSID, FPEXC, MVFR0, MVFR1, MVFR2, and false for all other insns.
95 static bool full_vfp_access_check(DisasContext
*s
, bool ignore_vfp_enabled
)
98 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
99 gen_exception_insn(s
, 4, EXCP_NOCP
, syn_uncategorized(),
102 gen_exception_insn(s
, 4, EXCP_UDEF
,
103 syn_fp_access_trap(1, 0xe, false),
109 if (!s
->vfp_enabled
&& !ignore_vfp_enabled
) {
110 assert(!arm_dc_feature(s
, ARM_FEATURE_M
));
111 gen_exception_insn(s
, 4, EXCP_UDEF
, syn_uncategorized(),
112 default_exception_el(s
));
116 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
117 /* Handle M-profile lazy FP state mechanics */
119 /* Trigger lazy-state preservation if necessary */
122 * Lazy state saving affects external memory and also the NVIC,
123 * so we must mark it as an IO operation for icount.
125 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
128 gen_helper_v7m_preserve_fp_state(cpu_env
);
129 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
133 * If the preserve_fp_state helper doesn't throw an exception
134 * then it will clear LSPACT; we don't need to repeat this for
135 * any further FP insns in this TB.
137 s
->v7m_lspact
= false;
140 /* Update ownership of FP context: set FPCCR.S to match current state */
141 if (s
->v8m_fpccr_s_wrong
) {
144 tmp
= load_cpu_field(v7m
.fpccr
[M_REG_S
]);
146 tcg_gen_ori_i32(tmp
, tmp
, R_V7M_FPCCR_S_MASK
);
148 tcg_gen_andi_i32(tmp
, tmp
, ~R_V7M_FPCCR_S_MASK
);
150 store_cpu_field(tmp
, v7m
.fpccr
[M_REG_S
]);
151 /* Don't need to do this for any further FP insns in this TB */
152 s
->v8m_fpccr_s_wrong
= false;
155 if (s
->v7m_new_fp_ctxt_needed
) {
157 * Create new FP context by updating CONTROL.FPCA, CONTROL.SFPA
160 TCGv_i32 control
, fpscr
;
161 uint32_t bits
= R_V7M_CONTROL_FPCA_MASK
;
163 fpscr
= load_cpu_field(v7m
.fpdscr
[s
->v8m_secure
]);
164 gen_helper_vfp_set_fpscr(cpu_env
, fpscr
);
165 tcg_temp_free_i32(fpscr
);
167 * We don't need to arrange to end the TB, because the only
168 * parts of FPSCR which we cache in the TB flags are the VECLEN
169 * and VECSTRIDE, and those don't exist for M-profile.
173 bits
|= R_V7M_CONTROL_SFPA_MASK
;
175 control
= load_cpu_field(v7m
.control
[M_REG_S
]);
176 tcg_gen_ori_i32(control
, control
, bits
);
177 store_cpu_field(control
, v7m
.control
[M_REG_S
]);
178 /* Don't need to do this for any further FP insns in this TB */
179 s
->v7m_new_fp_ctxt_needed
= false;
187 * The most usual kind of VFP access check, for everything except
188 * FMXR/FMRX to the always-available special registers.
190 static bool vfp_access_check(DisasContext
*s
)
192 return full_vfp_access_check(s
, false);
195 static bool trans_VSEL(DisasContext
*s
, arg_VSEL
*a
)
200 if (!dc_isar_feature(aa32_vsel
, s
)) {
204 /* UNDEF accesses to D16-D31 if they don't exist */
205 if (dp
&& !dc_isar_feature(aa32_fp_d32
, s
) &&
206 ((a
->vm
| a
->vn
| a
->vd
) & 0x10)) {
213 if (!vfp_access_check(s
)) {
218 TCGv_i64 frn
, frm
, dest
;
219 TCGv_i64 tmp
, zero
, zf
, nf
, vf
;
221 zero
= tcg_const_i64(0);
223 frn
= tcg_temp_new_i64();
224 frm
= tcg_temp_new_i64();
225 dest
= tcg_temp_new_i64();
227 zf
= tcg_temp_new_i64();
228 nf
= tcg_temp_new_i64();
229 vf
= tcg_temp_new_i64();
231 tcg_gen_extu_i32_i64(zf
, cpu_ZF
);
232 tcg_gen_ext_i32_i64(nf
, cpu_NF
);
233 tcg_gen_ext_i32_i64(vf
, cpu_VF
);
235 neon_load_reg64(frn
, rn
);
236 neon_load_reg64(frm
, rm
);
239 tcg_gen_movcond_i64(TCG_COND_EQ
, dest
, zf
, zero
,
243 tcg_gen_movcond_i64(TCG_COND_LT
, dest
, vf
, zero
,
246 case 2: /* ge: N == V -> N ^ V == 0 */
247 tmp
= tcg_temp_new_i64();
248 tcg_gen_xor_i64(tmp
, vf
, nf
);
249 tcg_gen_movcond_i64(TCG_COND_GE
, dest
, tmp
, zero
,
251 tcg_temp_free_i64(tmp
);
253 case 3: /* gt: !Z && N == V */
254 tcg_gen_movcond_i64(TCG_COND_NE
, dest
, zf
, zero
,
256 tmp
= tcg_temp_new_i64();
257 tcg_gen_xor_i64(tmp
, vf
, nf
);
258 tcg_gen_movcond_i64(TCG_COND_GE
, dest
, tmp
, zero
,
260 tcg_temp_free_i64(tmp
);
263 neon_store_reg64(dest
, rd
);
264 tcg_temp_free_i64(frn
);
265 tcg_temp_free_i64(frm
);
266 tcg_temp_free_i64(dest
);
268 tcg_temp_free_i64(zf
);
269 tcg_temp_free_i64(nf
);
270 tcg_temp_free_i64(vf
);
272 tcg_temp_free_i64(zero
);
274 TCGv_i32 frn
, frm
, dest
;
277 zero
= tcg_const_i32(0);
279 frn
= tcg_temp_new_i32();
280 frm
= tcg_temp_new_i32();
281 dest
= tcg_temp_new_i32();
282 neon_load_reg32(frn
, rn
);
283 neon_load_reg32(frm
, rm
);
286 tcg_gen_movcond_i32(TCG_COND_EQ
, dest
, cpu_ZF
, zero
,
290 tcg_gen_movcond_i32(TCG_COND_LT
, dest
, cpu_VF
, zero
,
293 case 2: /* ge: N == V -> N ^ V == 0 */
294 tmp
= tcg_temp_new_i32();
295 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
296 tcg_gen_movcond_i32(TCG_COND_GE
, dest
, tmp
, zero
,
298 tcg_temp_free_i32(tmp
);
300 case 3: /* gt: !Z && N == V */
301 tcg_gen_movcond_i32(TCG_COND_NE
, dest
, cpu_ZF
, zero
,
303 tmp
= tcg_temp_new_i32();
304 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
305 tcg_gen_movcond_i32(TCG_COND_GE
, dest
, tmp
, zero
,
307 tcg_temp_free_i32(tmp
);
310 neon_store_reg32(dest
, rd
);
311 tcg_temp_free_i32(frn
);
312 tcg_temp_free_i32(frm
);
313 tcg_temp_free_i32(dest
);
315 tcg_temp_free_i32(zero
);
321 static bool trans_VMINMAXNM(DisasContext
*s
, arg_VMINMAXNM
*a
)
328 if (!dc_isar_feature(aa32_vminmaxnm
, s
)) {
332 /* UNDEF accesses to D16-D31 if they don't exist */
333 if (dp
&& !dc_isar_feature(aa32_fp_d32
, s
) &&
334 ((a
->vm
| a
->vn
| a
->vd
) & 0x10)) {
341 if (!vfp_access_check(s
)) {
345 fpst
= get_fpstatus_ptr(0);
348 TCGv_i64 frn
, frm
, dest
;
350 frn
= tcg_temp_new_i64();
351 frm
= tcg_temp_new_i64();
352 dest
= tcg_temp_new_i64();
354 neon_load_reg64(frn
, rn
);
355 neon_load_reg64(frm
, rm
);
357 gen_helper_vfp_minnumd(dest
, frn
, frm
, fpst
);
359 gen_helper_vfp_maxnumd(dest
, frn
, frm
, fpst
);
361 neon_store_reg64(dest
, rd
);
362 tcg_temp_free_i64(frn
);
363 tcg_temp_free_i64(frm
);
364 tcg_temp_free_i64(dest
);
366 TCGv_i32 frn
, frm
, dest
;
368 frn
= tcg_temp_new_i32();
369 frm
= tcg_temp_new_i32();
370 dest
= tcg_temp_new_i32();
372 neon_load_reg32(frn
, rn
);
373 neon_load_reg32(frm
, rm
);
375 gen_helper_vfp_minnums(dest
, frn
, frm
, fpst
);
377 gen_helper_vfp_maxnums(dest
, frn
, frm
, fpst
);
379 neon_store_reg32(dest
, rd
);
380 tcg_temp_free_i32(frn
);
381 tcg_temp_free_i32(frm
);
382 tcg_temp_free_i32(dest
);
385 tcg_temp_free_ptr(fpst
);
390 * Table for converting the most common AArch32 encoding of
391 * rounding mode to arm_fprounding order (which matches the
392 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
394 static const uint8_t fp_decode_rm
[] = {
401 static bool trans_VRINT(DisasContext
*s
, arg_VRINT
*a
)
407 int rounding
= fp_decode_rm
[a
->rm
];
409 if (!dc_isar_feature(aa32_vrint
, s
)) {
413 /* UNDEF accesses to D16-D31 if they don't exist */
414 if (dp
&& !dc_isar_feature(aa32_fp_d32
, s
) &&
415 ((a
->vm
| a
->vd
) & 0x10)) {
421 if (!vfp_access_check(s
)) {
425 fpst
= get_fpstatus_ptr(0);
427 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rounding
));
428 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
433 tcg_op
= tcg_temp_new_i64();
434 tcg_res
= tcg_temp_new_i64();
435 neon_load_reg64(tcg_op
, rm
);
436 gen_helper_rintd(tcg_res
, tcg_op
, fpst
);
437 neon_store_reg64(tcg_res
, rd
);
438 tcg_temp_free_i64(tcg_op
);
439 tcg_temp_free_i64(tcg_res
);
443 tcg_op
= tcg_temp_new_i32();
444 tcg_res
= tcg_temp_new_i32();
445 neon_load_reg32(tcg_op
, rm
);
446 gen_helper_rints(tcg_res
, tcg_op
, fpst
);
447 neon_store_reg32(tcg_res
, rd
);
448 tcg_temp_free_i32(tcg_op
);
449 tcg_temp_free_i32(tcg_res
);
452 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
453 tcg_temp_free_i32(tcg_rmode
);
455 tcg_temp_free_ptr(fpst
);
459 static bool trans_VCVT(DisasContext
*s
, arg_VCVT
*a
)
464 TCGv_i32 tcg_rmode
, tcg_shift
;
465 int rounding
= fp_decode_rm
[a
->rm
];
466 bool is_signed
= a
->op
;
468 if (!dc_isar_feature(aa32_vcvt_dr
, s
)) {
472 /* UNDEF accesses to D16-D31 if they don't exist */
473 if (dp
&& !dc_isar_feature(aa32_fp_d32
, s
) && (a
->vm
& 0x10)) {
479 if (!vfp_access_check(s
)) {
483 fpst
= get_fpstatus_ptr(0);
485 tcg_shift
= tcg_const_i32(0);
487 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rounding
));
488 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
491 TCGv_i64 tcg_double
, tcg_res
;
493 tcg_double
= tcg_temp_new_i64();
494 tcg_res
= tcg_temp_new_i64();
495 tcg_tmp
= tcg_temp_new_i32();
496 neon_load_reg64(tcg_double
, rm
);
498 gen_helper_vfp_tosld(tcg_res
, tcg_double
, tcg_shift
, fpst
);
500 gen_helper_vfp_tould(tcg_res
, tcg_double
, tcg_shift
, fpst
);
502 tcg_gen_extrl_i64_i32(tcg_tmp
, tcg_res
);
503 neon_store_reg32(tcg_tmp
, rd
);
504 tcg_temp_free_i32(tcg_tmp
);
505 tcg_temp_free_i64(tcg_res
);
506 tcg_temp_free_i64(tcg_double
);
508 TCGv_i32 tcg_single
, tcg_res
;
509 tcg_single
= tcg_temp_new_i32();
510 tcg_res
= tcg_temp_new_i32();
511 neon_load_reg32(tcg_single
, rm
);
513 gen_helper_vfp_tosls(tcg_res
, tcg_single
, tcg_shift
, fpst
);
515 gen_helper_vfp_touls(tcg_res
, tcg_single
, tcg_shift
, fpst
);
517 neon_store_reg32(tcg_res
, rd
);
518 tcg_temp_free_i32(tcg_res
);
519 tcg_temp_free_i32(tcg_single
);
522 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
523 tcg_temp_free_i32(tcg_rmode
);
525 tcg_temp_free_i32(tcg_shift
);
527 tcg_temp_free_ptr(fpst
);
532 static bool trans_VMOV_to_gp(DisasContext
*s
, arg_VMOV_to_gp
*a
)
534 /* VMOV scalar to general purpose register */
539 /* UNDEF accesses to D16-D31 if they don't exist */
540 if (!dc_isar_feature(aa32_fp_d32
, s
) && (a
->vn
& 0x10)) {
544 offset
= a
->index
<< a
->size
;
545 pass
= extract32(offset
, 2, 1);
546 offset
= extract32(offset
, 0, 2) * 8;
548 if (a
->size
!= 2 && !arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
552 if (!vfp_access_check(s
)) {
556 tmp
= neon_load_reg(a
->vn
, pass
);
560 tcg_gen_shri_i32(tmp
, tmp
, offset
);
571 tcg_gen_shri_i32(tmp
, tmp
, 16);
577 tcg_gen_sari_i32(tmp
, tmp
, 16);
586 store_reg(s
, a
->rt
, tmp
);
591 static bool trans_VMOV_from_gp(DisasContext
*s
, arg_VMOV_from_gp
*a
)
593 /* VMOV general purpose register to scalar */
598 /* UNDEF accesses to D16-D31 if they don't exist */
599 if (!dc_isar_feature(aa32_fp_d32
, s
) && (a
->vn
& 0x10)) {
603 offset
= a
->index
<< a
->size
;
604 pass
= extract32(offset
, 2, 1);
605 offset
= extract32(offset
, 0, 2) * 8;
607 if (a
->size
!= 2 && !arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
611 if (!vfp_access_check(s
)) {
615 tmp
= load_reg(s
, a
->rt
);
618 tmp2
= neon_load_reg(a
->vn
, pass
);
619 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
, offset
, 8);
620 tcg_temp_free_i32(tmp2
);
623 tmp2
= neon_load_reg(a
->vn
, pass
);
624 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
, offset
, 16);
625 tcg_temp_free_i32(tmp2
);
630 neon_store_reg(a
->vn
, pass
, tmp
);
635 static bool trans_VDUP(DisasContext
*s
, arg_VDUP
*a
)
637 /* VDUP (general purpose register) */
641 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
645 /* UNDEF accesses to D16-D31 if they don't exist */
646 if (!dc_isar_feature(aa32_fp_d32
, s
) && (a
->vn
& 0x10)) {
654 if (a
->q
&& (a
->vn
& 1)) {
658 vec_size
= a
->q
? 16 : 8;
667 if (!vfp_access_check(s
)) {
671 tmp
= load_reg(s
, a
->rt
);
672 tcg_gen_gvec_dup_i32(size
, neon_reg_offset(a
->vn
, 0),
673 vec_size
, vec_size
, tmp
);
674 tcg_temp_free_i32(tmp
);
679 static bool trans_VMSR_VMRS(DisasContext
*s
, arg_VMSR_VMRS
*a
)
682 bool ignore_vfp_enabled
= false;
684 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
686 * The only M-profile VFP vmrs/vmsr sysreg is FPSCR.
687 * Writes to R15 are UNPREDICTABLE; we choose to undef.
689 if (a
->rt
== 15 || a
->reg
!= ARM_VFP_FPSCR
) {
697 * VFPv2 allows access to FPSID from userspace; VFPv3 restricts
698 * all ID registers to privileged access only.
700 if (IS_USER(s
) && arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
703 ignore_vfp_enabled
= true;
707 if (IS_USER(s
) || !arm_dc_feature(s
, ARM_FEATURE_MVFR
)) {
710 ignore_vfp_enabled
= true;
713 if (IS_USER(s
) || !arm_dc_feature(s
, ARM_FEATURE_V8
)) {
716 ignore_vfp_enabled
= true;
724 ignore_vfp_enabled
= true;
727 case ARM_VFP_FPINST2
:
728 /* Not present in VFPv3 */
729 if (IS_USER(s
) || arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
737 if (!full_vfp_access_check(s
, ignore_vfp_enabled
)) {
742 /* VMRS, move VFP special register to gp register */
747 case ARM_VFP_FPINST2
:
751 tmp
= load_cpu_field(vfp
.xregs
[a
->reg
]);
755 tmp
= load_cpu_field(vfp
.xregs
[ARM_VFP_FPSCR
]);
756 tcg_gen_andi_i32(tmp
, tmp
, 0xf0000000);
758 tmp
= tcg_temp_new_i32();
759 gen_helper_vfp_get_fpscr(tmp
, cpu_env
);
763 g_assert_not_reached();
767 /* Set the 4 flag bits in the CPSR. */
769 tcg_temp_free_i32(tmp
);
771 store_reg(s
, a
->rt
, tmp
);
774 /* VMSR, move gp register to VFP special register */
780 /* Writes are ignored. */
783 tmp
= load_reg(s
, a
->rt
);
784 gen_helper_vfp_set_fpscr(cpu_env
, tmp
);
785 tcg_temp_free_i32(tmp
);
790 * TODO: VFP subarchitecture support.
791 * For now, keep the EN bit only
793 tmp
= load_reg(s
, a
->rt
);
794 tcg_gen_andi_i32(tmp
, tmp
, 1 << 30);
795 store_cpu_field(tmp
, vfp
.xregs
[a
->reg
]);
799 case ARM_VFP_FPINST2
:
800 tmp
= load_reg(s
, a
->rt
);
801 store_cpu_field(tmp
, vfp
.xregs
[a
->reg
]);
804 g_assert_not_reached();
811 static bool trans_VMOV_single(DisasContext
*s
, arg_VMOV_single
*a
)
815 if (!vfp_access_check(s
)) {
820 /* VFP to general purpose register */
821 tmp
= tcg_temp_new_i32();
822 neon_load_reg32(tmp
, a
->vn
);
824 /* Set the 4 flag bits in the CPSR. */
826 tcg_temp_free_i32(tmp
);
828 store_reg(s
, a
->rt
, tmp
);
831 /* general purpose register to VFP */
832 tmp
= load_reg(s
, a
->rt
);
833 neon_store_reg32(tmp
, a
->vn
);
834 tcg_temp_free_i32(tmp
);
840 static bool trans_VMOV_64_sp(DisasContext
*s
, arg_VMOV_64_sp
*a
)
845 * VMOV between two general-purpose registers and two single precision
846 * floating point registers
848 if (!vfp_access_check(s
)) {
854 tmp
= tcg_temp_new_i32();
855 neon_load_reg32(tmp
, a
->vm
);
856 store_reg(s
, a
->rt
, tmp
);
857 tmp
= tcg_temp_new_i32();
858 neon_load_reg32(tmp
, a
->vm
+ 1);
859 store_reg(s
, a
->rt2
, tmp
);
862 tmp
= load_reg(s
, a
->rt
);
863 neon_store_reg32(tmp
, a
->vm
);
864 tmp
= load_reg(s
, a
->rt2
);
865 neon_store_reg32(tmp
, a
->vm
+ 1);
871 static bool trans_VMOV_64_dp(DisasContext
*s
, arg_VMOV_64_sp
*a
)
876 * VMOV between two general-purpose registers and one double precision
877 * floating point register
880 /* UNDEF accesses to D16-D31 if they don't exist */
881 if (!dc_isar_feature(aa32_fp_d32
, s
) && (a
->vm
& 0x10)) {
885 if (!vfp_access_check(s
)) {
891 tmp
= tcg_temp_new_i32();
892 neon_load_reg32(tmp
, a
->vm
* 2);
893 store_reg(s
, a
->rt
, tmp
);
894 tmp
= tcg_temp_new_i32();
895 neon_load_reg32(tmp
, a
->vm
* 2 + 1);
896 store_reg(s
, a
->rt2
, tmp
);
899 tmp
= load_reg(s
, a
->rt
);
900 neon_store_reg32(tmp
, a
->vm
* 2);
901 tcg_temp_free_i32(tmp
);
902 tmp
= load_reg(s
, a
->rt2
);
903 neon_store_reg32(tmp
, a
->vm
* 2 + 1);
904 tcg_temp_free_i32(tmp
);
910 static bool trans_VLDR_VSTR_sp(DisasContext
*s
, arg_VLDR_VSTR_sp
*a
)
915 if (!vfp_access_check(s
)) {
919 offset
= a
->imm
<< 2;
924 if (s
->thumb
&& a
->rn
== 15) {
925 /* This is actually UNPREDICTABLE */
926 addr
= tcg_temp_new_i32();
927 tcg_gen_movi_i32(addr
, s
->pc
& ~2);
929 addr
= load_reg(s
, a
->rn
);
931 tcg_gen_addi_i32(addr
, addr
, offset
);
932 tmp
= tcg_temp_new_i32();
934 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
935 neon_store_reg32(tmp
, a
->vd
);
937 neon_load_reg32(tmp
, a
->vd
);
938 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
940 tcg_temp_free_i32(tmp
);
941 tcg_temp_free_i32(addr
);
946 static bool trans_VLDR_VSTR_dp(DisasContext
*s
, arg_VLDR_VSTR_sp
*a
)
952 /* UNDEF accesses to D16-D31 if they don't exist */
953 if (!dc_isar_feature(aa32_fp_d32
, s
) && (a
->vd
& 0x10)) {
957 if (!vfp_access_check(s
)) {
961 offset
= a
->imm
<< 2;
966 if (s
->thumb
&& a
->rn
== 15) {
967 /* This is actually UNPREDICTABLE */
968 addr
= tcg_temp_new_i32();
969 tcg_gen_movi_i32(addr
, s
->pc
& ~2);
971 addr
= load_reg(s
, a
->rn
);
973 tcg_gen_addi_i32(addr
, addr
, offset
);
974 tmp
= tcg_temp_new_i64();
976 gen_aa32_ld64(s
, tmp
, addr
, get_mem_index(s
));
977 neon_store_reg64(tmp
, a
->vd
);
979 neon_load_reg64(tmp
, a
->vd
);
980 gen_aa32_st64(s
, tmp
, addr
, get_mem_index(s
));
982 tcg_temp_free_i64(tmp
);
983 tcg_temp_free_i32(addr
);
988 static bool trans_VLDM_VSTM_sp(DisasContext
*s
, arg_VLDM_VSTM_sp
*a
)
996 if (n
== 0 || (a
->vd
+ n
) > 32) {
998 * UNPREDICTABLE cases for bad immediates: we choose to
999 * UNDEF to avoid generating huge numbers of TCG ops
1003 if (a
->rn
== 15 && a
->w
) {
1004 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
1008 if (!vfp_access_check(s
)) {
1012 if (s
->thumb
&& a
->rn
== 15) {
1013 /* This is actually UNPREDICTABLE */
1014 addr
= tcg_temp_new_i32();
1015 tcg_gen_movi_i32(addr
, s
->pc
& ~2);
1017 addr
= load_reg(s
, a
->rn
);
1021 tcg_gen_addi_i32(addr
, addr
, -(a
->imm
<< 2));
1024 if (s
->v8m_stackcheck
&& a
->rn
== 13 && a
->w
) {
1026 * Here 'addr' is the lowest address we will store to,
1027 * and is either the old SP (if post-increment) or
1028 * the new SP (if pre-decrement). For post-increment
1029 * where the old value is below the limit and the new
1030 * value is above, it is UNKNOWN whether the limit check
1031 * triggers; we choose to trigger.
1033 gen_helper_v8m_stackcheck(cpu_env
, addr
);
1037 tmp
= tcg_temp_new_i32();
1038 for (i
= 0; i
< n
; i
++) {
1041 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
1042 neon_store_reg32(tmp
, a
->vd
+ i
);
1045 neon_load_reg32(tmp
, a
->vd
+ i
);
1046 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
1048 tcg_gen_addi_i32(addr
, addr
, offset
);
1050 tcg_temp_free_i32(tmp
);
1054 offset
= -offset
* n
;
1055 tcg_gen_addi_i32(addr
, addr
, offset
);
1057 store_reg(s
, a
->rn
, addr
);
1059 tcg_temp_free_i32(addr
);
1065 static bool trans_VLDM_VSTM_dp(DisasContext
*s
, arg_VLDM_VSTM_dp
*a
)
1074 if (n
== 0 || (a
->vd
+ n
) > 32 || n
> 16) {
1076 * UNPREDICTABLE cases for bad immediates: we choose to
1077 * UNDEF to avoid generating huge numbers of TCG ops
1081 if (a
->rn
== 15 && a
->w
) {
1082 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
1086 /* UNDEF accesses to D16-D31 if they don't exist */
1087 if (!dc_isar_feature(aa32_fp_d32
, s
) && (a
->vd
+ n
) > 16) {
1091 if (!vfp_access_check(s
)) {
1095 if (s
->thumb
&& a
->rn
== 15) {
1096 /* This is actually UNPREDICTABLE */
1097 addr
= tcg_temp_new_i32();
1098 tcg_gen_movi_i32(addr
, s
->pc
& ~2);
1100 addr
= load_reg(s
, a
->rn
);
1104 tcg_gen_addi_i32(addr
, addr
, -(a
->imm
<< 2));
1107 if (s
->v8m_stackcheck
&& a
->rn
== 13 && a
->w
) {
1109 * Here 'addr' is the lowest address we will store to,
1110 * and is either the old SP (if post-increment) or
1111 * the new SP (if pre-decrement). For post-increment
1112 * where the old value is below the limit and the new
1113 * value is above, it is UNKNOWN whether the limit check
1114 * triggers; we choose to trigger.
1116 gen_helper_v8m_stackcheck(cpu_env
, addr
);
1120 tmp
= tcg_temp_new_i64();
1121 for (i
= 0; i
< n
; i
++) {
1124 gen_aa32_ld64(s
, tmp
, addr
, get_mem_index(s
));
1125 neon_store_reg64(tmp
, a
->vd
+ i
);
1128 neon_load_reg64(tmp
, a
->vd
+ i
);
1129 gen_aa32_st64(s
, tmp
, addr
, get_mem_index(s
));
1131 tcg_gen_addi_i32(addr
, addr
, offset
);
1133 tcg_temp_free_i64(tmp
);
1137 offset
= -offset
* n
;
1138 } else if (a
->imm
& 1) {
1145 tcg_gen_addi_i32(addr
, addr
, offset
);
1147 store_reg(s
, a
->rn
, addr
);
1149 tcg_temp_free_i32(addr
);
1156 * Types for callbacks for do_vfp_3op_sp() and do_vfp_3op_dp().
1157 * The callback should emit code to write a value to vd. If
1158 * do_vfp_3op_{sp,dp}() was passed reads_vd then the TCGv vd
1159 * will contain the old value of the relevant VFP register;
1160 * otherwise it must be written to only.
1162 typedef void VFPGen3OpSPFn(TCGv_i32 vd
,
1163 TCGv_i32 vn
, TCGv_i32 vm
, TCGv_ptr fpst
);
1164 typedef void VFPGen3OpDPFn(TCGv_i64 vd
,
1165 TCGv_i64 vn
, TCGv_i64 vm
, TCGv_ptr fpst
);
1168 * Types for callbacks for do_vfp_2op_sp() and do_vfp_2op_dp().
1169 * The callback should emit code to write a value to vd (which
1170 * should be written to only).
1172 typedef void VFPGen2OpSPFn(TCGv_i32 vd
, TCGv_i32 vm
);
1173 typedef void VFPGen2OpDPFn(TCGv_i64 vd
, TCGv_i64 vm
);
1176 * Return true if the specified S reg is in a scalar bank
1177 * (ie if it is s0..s7)
1179 static inline bool vfp_sreg_is_scalar(int reg
)
1181 return (reg
& 0x18) == 0;
1185 * Return true if the specified D reg is in a scalar bank
1186 * (ie if it is d0..d3 or d16..d19)
1188 static inline bool vfp_dreg_is_scalar(int reg
)
1190 return (reg
& 0xc) == 0;
1194 * Advance the S reg number forwards by delta within its bank
1195 * (ie increment the low 3 bits but leave the rest the same)
1197 static inline int vfp_advance_sreg(int reg
, int delta
)
1199 return ((reg
+ delta
) & 0x7) | (reg
& ~0x7);
1203 * Advance the D reg number forwards by delta within its bank
1204 * (ie increment the low 2 bits but leave the rest the same)
1206 static inline int vfp_advance_dreg(int reg
, int delta
)
1208 return ((reg
+ delta
) & 0x3) | (reg
& ~0x3);
1212 * Perform a 3-operand VFP data processing instruction. fn is the
1213 * callback to do the actual operation; this function deals with the
1214 * code to handle looping around for VFP vector processing.
1216 static bool do_vfp_3op_sp(DisasContext
*s
, VFPGen3OpSPFn
*fn
,
1217 int vd
, int vn
, int vm
, bool reads_vd
)
1219 uint32_t delta_m
= 0;
1220 uint32_t delta_d
= 0;
1221 int veclen
= s
->vec_len
;
1222 TCGv_i32 f0
, f1
, fd
;
1225 if (!dc_isar_feature(aa32_fpshvec
, s
) &&
1226 (veclen
!= 0 || s
->vec_stride
!= 0)) {
1230 if (!vfp_access_check(s
)) {
1235 /* Figure out what type of vector operation this is. */
1236 if (vfp_sreg_is_scalar(vd
)) {
1240 delta_d
= s
->vec_stride
+ 1;
1242 if (vfp_sreg_is_scalar(vm
)) {
1243 /* mixed scalar/vector */
1252 f0
= tcg_temp_new_i32();
1253 f1
= tcg_temp_new_i32();
1254 fd
= tcg_temp_new_i32();
1255 fpst
= get_fpstatus_ptr(0);
1257 neon_load_reg32(f0
, vn
);
1258 neon_load_reg32(f1
, vm
);
1262 neon_load_reg32(fd
, vd
);
1264 fn(fd
, f0
, f1
, fpst
);
1265 neon_store_reg32(fd
, vd
);
1271 /* Set up the operands for the next iteration */
1273 vd
= vfp_advance_sreg(vd
, delta_d
);
1274 vn
= vfp_advance_sreg(vn
, delta_d
);
1275 neon_load_reg32(f0
, vn
);
1277 vm
= vfp_advance_sreg(vm
, delta_m
);
1278 neon_load_reg32(f1
, vm
);
1282 tcg_temp_free_i32(f0
);
1283 tcg_temp_free_i32(f1
);
1284 tcg_temp_free_i32(fd
);
1285 tcg_temp_free_ptr(fpst
);
1290 static bool do_vfp_3op_dp(DisasContext
*s
, VFPGen3OpDPFn
*fn
,
1291 int vd
, int vn
, int vm
, bool reads_vd
)
1293 uint32_t delta_m
= 0;
1294 uint32_t delta_d
= 0;
1295 int veclen
= s
->vec_len
;
1296 TCGv_i64 f0
, f1
, fd
;
1299 /* UNDEF accesses to D16-D31 if they don't exist */
1300 if (!dc_isar_feature(aa32_fp_d32
, s
) && ((vd
| vn
| vm
) & 0x10)) {
1304 if (!dc_isar_feature(aa32_fpshvec
, s
) &&
1305 (veclen
!= 0 || s
->vec_stride
!= 0)) {
1309 if (!vfp_access_check(s
)) {
1314 /* Figure out what type of vector operation this is. */
1315 if (vfp_dreg_is_scalar(vd
)) {
1319 delta_d
= (s
->vec_stride
>> 1) + 1;
1321 if (vfp_dreg_is_scalar(vm
)) {
1322 /* mixed scalar/vector */
1331 f0
= tcg_temp_new_i64();
1332 f1
= tcg_temp_new_i64();
1333 fd
= tcg_temp_new_i64();
1334 fpst
= get_fpstatus_ptr(0);
1336 neon_load_reg64(f0
, vn
);
1337 neon_load_reg64(f1
, vm
);
1341 neon_load_reg64(fd
, vd
);
1343 fn(fd
, f0
, f1
, fpst
);
1344 neon_store_reg64(fd
, vd
);
1349 /* Set up the operands for the next iteration */
1351 vd
= vfp_advance_dreg(vd
, delta_d
);
1352 vn
= vfp_advance_dreg(vn
, delta_d
);
1353 neon_load_reg64(f0
, vn
);
1355 vm
= vfp_advance_dreg(vm
, delta_m
);
1356 neon_load_reg64(f1
, vm
);
1360 tcg_temp_free_i64(f0
);
1361 tcg_temp_free_i64(f1
);
1362 tcg_temp_free_i64(fd
);
1363 tcg_temp_free_ptr(fpst
);
1368 static bool do_vfp_2op_sp(DisasContext
*s
, VFPGen2OpSPFn
*fn
, int vd
, int vm
)
1370 uint32_t delta_m
= 0;
1371 uint32_t delta_d
= 0;
1372 int veclen
= s
->vec_len
;
1375 if (!dc_isar_feature(aa32_fpshvec
, s
) &&
1376 (veclen
!= 0 || s
->vec_stride
!= 0)) {
1380 if (!vfp_access_check(s
)) {
1385 /* Figure out what type of vector operation this is. */
1386 if (vfp_sreg_is_scalar(vd
)) {
1390 delta_d
= s
->vec_stride
+ 1;
1392 if (vfp_sreg_is_scalar(vm
)) {
1393 /* mixed scalar/vector */
1402 f0
= tcg_temp_new_i32();
1403 fd
= tcg_temp_new_i32();
1405 neon_load_reg32(f0
, vm
);
1409 neon_store_reg32(fd
, vd
);
1416 /* single source one-many */
1418 vd
= vfp_advance_sreg(vd
, delta_d
);
1419 neon_store_reg32(fd
, vd
);
1424 /* Set up the operands for the next iteration */
1426 vd
= vfp_advance_sreg(vd
, delta_d
);
1427 vm
= vfp_advance_sreg(vm
, delta_m
);
1428 neon_load_reg32(f0
, vm
);
1431 tcg_temp_free_i32(f0
);
1432 tcg_temp_free_i32(fd
);
1437 static bool do_vfp_2op_dp(DisasContext
*s
, VFPGen2OpDPFn
*fn
, int vd
, int vm
)
1439 uint32_t delta_m
= 0;
1440 uint32_t delta_d
= 0;
1441 int veclen
= s
->vec_len
;
1444 /* UNDEF accesses to D16-D31 if they don't exist */
1445 if (!dc_isar_feature(aa32_fp_d32
, s
) && ((vd
| vm
) & 0x10)) {
1449 if (!dc_isar_feature(aa32_fpshvec
, s
) &&
1450 (veclen
!= 0 || s
->vec_stride
!= 0)) {
1454 if (!vfp_access_check(s
)) {
1459 /* Figure out what type of vector operation this is. */
1460 if (vfp_dreg_is_scalar(vd
)) {
1464 delta_d
= (s
->vec_stride
>> 1) + 1;
1466 if (vfp_dreg_is_scalar(vm
)) {
1467 /* mixed scalar/vector */
1476 f0
= tcg_temp_new_i64();
1477 fd
= tcg_temp_new_i64();
1479 neon_load_reg64(f0
, vm
);
1483 neon_store_reg64(fd
, vd
);
1490 /* single source one-many */
1492 vd
= vfp_advance_dreg(vd
, delta_d
);
1493 neon_store_reg64(fd
, vd
);
1498 /* Set up the operands for the next iteration */
1500 vd
= vfp_advance_dreg(vd
, delta_d
);
1501 vd
= vfp_advance_dreg(vm
, delta_m
);
1502 neon_load_reg64(f0
, vm
);
1505 tcg_temp_free_i64(f0
);
1506 tcg_temp_free_i64(fd
);
1511 static void gen_VMLA_sp(TCGv_i32 vd
, TCGv_i32 vn
, TCGv_i32 vm
, TCGv_ptr fpst
)
1513 /* Note that order of inputs to the add matters for NaNs */
1514 TCGv_i32 tmp
= tcg_temp_new_i32();
1516 gen_helper_vfp_muls(tmp
, vn
, vm
, fpst
);
1517 gen_helper_vfp_adds(vd
, vd
, tmp
, fpst
);
1518 tcg_temp_free_i32(tmp
);
1521 static bool trans_VMLA_sp(DisasContext
*s
, arg_VMLA_sp
*a
)
1523 return do_vfp_3op_sp(s
, gen_VMLA_sp
, a
->vd
, a
->vn
, a
->vm
, true);
1526 static void gen_VMLA_dp(TCGv_i64 vd
, TCGv_i64 vn
, TCGv_i64 vm
, TCGv_ptr fpst
)
1528 /* Note that order of inputs to the add matters for NaNs */
1529 TCGv_i64 tmp
= tcg_temp_new_i64();
1531 gen_helper_vfp_muld(tmp
, vn
, vm
, fpst
);
1532 gen_helper_vfp_addd(vd
, vd
, tmp
, fpst
);
1533 tcg_temp_free_i64(tmp
);
1536 static bool trans_VMLA_dp(DisasContext
*s
, arg_VMLA_sp
*a
)
1538 return do_vfp_3op_dp(s
, gen_VMLA_dp
, a
->vd
, a
->vn
, a
->vm
, true);
1541 static void gen_VMLS_sp(TCGv_i32 vd
, TCGv_i32 vn
, TCGv_i32 vm
, TCGv_ptr fpst
)
1544 * VMLS: vd = vd + -(vn * vm)
1545 * Note that order of inputs to the add matters for NaNs.
1547 TCGv_i32 tmp
= tcg_temp_new_i32();
1549 gen_helper_vfp_muls(tmp
, vn
, vm
, fpst
);
1550 gen_helper_vfp_negs(tmp
, tmp
);
1551 gen_helper_vfp_adds(vd
, vd
, tmp
, fpst
);
1552 tcg_temp_free_i32(tmp
);
1555 static bool trans_VMLS_sp(DisasContext
*s
, arg_VMLS_sp
*a
)
1557 return do_vfp_3op_sp(s
, gen_VMLS_sp
, a
->vd
, a
->vn
, a
->vm
, true);
1560 static void gen_VMLS_dp(TCGv_i64 vd
, TCGv_i64 vn
, TCGv_i64 vm
, TCGv_ptr fpst
)
1563 * VMLS: vd = vd + -(vn * vm)
1564 * Note that order of inputs to the add matters for NaNs.
1566 TCGv_i64 tmp
= tcg_temp_new_i64();
1568 gen_helper_vfp_muld(tmp
, vn
, vm
, fpst
);
1569 gen_helper_vfp_negd(tmp
, tmp
);
1570 gen_helper_vfp_addd(vd
, vd
, tmp
, fpst
);
1571 tcg_temp_free_i64(tmp
);
1574 static bool trans_VMLS_dp(DisasContext
*s
, arg_VMLS_sp
*a
)
1576 return do_vfp_3op_dp(s
, gen_VMLS_dp
, a
->vd
, a
->vn
, a
->vm
, true);
1579 static void gen_VNMLS_sp(TCGv_i32 vd
, TCGv_i32 vn
, TCGv_i32 vm
, TCGv_ptr fpst
)
1582 * VNMLS: -fd + (fn * fm)
1583 * Note that it isn't valid to replace (-A + B) with (B - A) or similar
1584 * plausible looking simplifications because this will give wrong results
1587 TCGv_i32 tmp
= tcg_temp_new_i32();
1589 gen_helper_vfp_muls(tmp
, vn
, vm
, fpst
);
1590 gen_helper_vfp_negs(vd
, vd
);
1591 gen_helper_vfp_adds(vd
, vd
, tmp
, fpst
);
1592 tcg_temp_free_i32(tmp
);
1595 static bool trans_VNMLS_sp(DisasContext
*s
, arg_VNMLS_sp
*a
)
1597 return do_vfp_3op_sp(s
, gen_VNMLS_sp
, a
->vd
, a
->vn
, a
->vm
, true);
1600 static void gen_VNMLS_dp(TCGv_i64 vd
, TCGv_i64 vn
, TCGv_i64 vm
, TCGv_ptr fpst
)
1603 * VNMLS: -fd + (fn * fm)
1604 * Note that it isn't valid to replace (-A + B) with (B - A) or similar
1605 * plausible looking simplifications because this will give wrong results
1608 TCGv_i64 tmp
= tcg_temp_new_i64();
1610 gen_helper_vfp_muld(tmp
, vn
, vm
, fpst
);
1611 gen_helper_vfp_negd(vd
, vd
);
1612 gen_helper_vfp_addd(vd
, vd
, tmp
, fpst
);
1613 tcg_temp_free_i64(tmp
);
1616 static bool trans_VNMLS_dp(DisasContext
*s
, arg_VNMLS_sp
*a
)
1618 return do_vfp_3op_dp(s
, gen_VNMLS_dp
, a
->vd
, a
->vn
, a
->vm
, true);
1621 static void gen_VNMLA_sp(TCGv_i32 vd
, TCGv_i32 vn
, TCGv_i32 vm
, TCGv_ptr fpst
)
1623 /* VNMLA: -fd + -(fn * fm) */
1624 TCGv_i32 tmp
= tcg_temp_new_i32();
1626 gen_helper_vfp_muls(tmp
, vn
, vm
, fpst
);
1627 gen_helper_vfp_negs(tmp
, tmp
);
1628 gen_helper_vfp_negs(vd
, vd
);
1629 gen_helper_vfp_adds(vd
, vd
, tmp
, fpst
);
1630 tcg_temp_free_i32(tmp
);
1633 static bool trans_VNMLA_sp(DisasContext
*s
, arg_VNMLA_sp
*a
)
1635 return do_vfp_3op_sp(s
, gen_VNMLA_sp
, a
->vd
, a
->vn
, a
->vm
, true);
1638 static void gen_VNMLA_dp(TCGv_i64 vd
, TCGv_i64 vn
, TCGv_i64 vm
, TCGv_ptr fpst
)
1640 /* VNMLA: -fd + (fn * fm) */
1641 TCGv_i64 tmp
= tcg_temp_new_i64();
1643 gen_helper_vfp_muld(tmp
, vn
, vm
, fpst
);
1644 gen_helper_vfp_negd(tmp
, tmp
);
1645 gen_helper_vfp_negd(vd
, vd
);
1646 gen_helper_vfp_addd(vd
, vd
, tmp
, fpst
);
1647 tcg_temp_free_i64(tmp
);
1650 static bool trans_VNMLA_dp(DisasContext
*s
, arg_VNMLA_sp
*a
)
1652 return do_vfp_3op_dp(s
, gen_VNMLA_dp
, a
->vd
, a
->vn
, a
->vm
, true);
1655 static bool trans_VMUL_sp(DisasContext
*s
, arg_VMUL_sp
*a
)
1657 return do_vfp_3op_sp(s
, gen_helper_vfp_muls
, a
->vd
, a
->vn
, a
->vm
, false);
1660 static bool trans_VMUL_dp(DisasContext
*s
, arg_VMUL_sp
*a
)
1662 return do_vfp_3op_dp(s
, gen_helper_vfp_muld
, a
->vd
, a
->vn
, a
->vm
, false);
1665 static void gen_VNMUL_sp(TCGv_i32 vd
, TCGv_i32 vn
, TCGv_i32 vm
, TCGv_ptr fpst
)
1667 /* VNMUL: -(fn * fm) */
1668 gen_helper_vfp_muls(vd
, vn
, vm
, fpst
);
1669 gen_helper_vfp_negs(vd
, vd
);
1672 static bool trans_VNMUL_sp(DisasContext
*s
, arg_VNMUL_sp
*a
)
1674 return do_vfp_3op_sp(s
, gen_VNMUL_sp
, a
->vd
, a
->vn
, a
->vm
, false);
1677 static void gen_VNMUL_dp(TCGv_i64 vd
, TCGv_i64 vn
, TCGv_i64 vm
, TCGv_ptr fpst
)
1679 /* VNMUL: -(fn * fm) */
1680 gen_helper_vfp_muld(vd
, vn
, vm
, fpst
);
1681 gen_helper_vfp_negd(vd
, vd
);
1684 static bool trans_VNMUL_dp(DisasContext
*s
, arg_VNMUL_sp
*a
)
1686 return do_vfp_3op_dp(s
, gen_VNMUL_dp
, a
->vd
, a
->vn
, a
->vm
, false);
1689 static bool trans_VADD_sp(DisasContext
*s
, arg_VADD_sp
*a
)
1691 return do_vfp_3op_sp(s
, gen_helper_vfp_adds
, a
->vd
, a
->vn
, a
->vm
, false);
1694 static bool trans_VADD_dp(DisasContext
*s
, arg_VADD_sp
*a
)
1696 return do_vfp_3op_dp(s
, gen_helper_vfp_addd
, a
->vd
, a
->vn
, a
->vm
, false);
1699 static bool trans_VSUB_sp(DisasContext
*s
, arg_VSUB_sp
*a
)
1701 return do_vfp_3op_sp(s
, gen_helper_vfp_subs
, a
->vd
, a
->vn
, a
->vm
, false);
1704 static bool trans_VSUB_dp(DisasContext
*s
, arg_VSUB_sp
*a
)
1706 return do_vfp_3op_dp(s
, gen_helper_vfp_subd
, a
->vd
, a
->vn
, a
->vm
, false);
1709 static bool trans_VDIV_sp(DisasContext
*s
, arg_VDIV_sp
*a
)
1711 return do_vfp_3op_sp(s
, gen_helper_vfp_divs
, a
->vd
, a
->vn
, a
->vm
, false);
1714 static bool trans_VDIV_dp(DisasContext
*s
, arg_VDIV_sp
*a
)
1716 return do_vfp_3op_dp(s
, gen_helper_vfp_divd
, a
->vd
, a
->vn
, a
->vm
, false);
1719 static bool trans_VFM_sp(DisasContext
*s
, arg_VFM_sp
*a
)
1722 * VFNMA : fd = muladd(-fd, fn, fm)
1723 * VFNMS : fd = muladd(-fd, -fn, fm)
1724 * VFMA : fd = muladd( fd, fn, fm)
1725 * VFMS : fd = muladd( fd, -fn, fm)
1727 * These are fused multiply-add, and must be done as one floating
1728 * point operation with no rounding between the multiplication and
1729 * addition steps. NB that doing the negations here as separate
1730 * steps is correct : an input NaN should come out with its sign
1731 * bit flipped if it is a negated-input.
1734 TCGv_i32 vn
, vm
, vd
;
1737 * Present in VFPv4 only.
1738 * In v7A, UNPREDICTABLE with non-zero vector length/stride; from
1739 * v8A, must UNDEF. We choose to UNDEF for both v7A and v8A.
1741 if (!arm_dc_feature(s
, ARM_FEATURE_VFP4
) ||
1742 (s
->vec_len
!= 0 || s
->vec_stride
!= 0)) {
1746 if (!vfp_access_check(s
)) {
1750 vn
= tcg_temp_new_i32();
1751 vm
= tcg_temp_new_i32();
1752 vd
= tcg_temp_new_i32();
1754 neon_load_reg32(vn
, a
->vn
);
1755 neon_load_reg32(vm
, a
->vm
);
1758 gen_helper_vfp_negs(vn
, vn
);
1760 neon_load_reg32(vd
, a
->vd
);
1763 gen_helper_vfp_negs(vd
, vd
);
1765 fpst
= get_fpstatus_ptr(0);
1766 gen_helper_vfp_muladds(vd
, vn
, vm
, vd
, fpst
);
1767 neon_store_reg32(vd
, a
->vd
);
1769 tcg_temp_free_ptr(fpst
);
1770 tcg_temp_free_i32(vn
);
1771 tcg_temp_free_i32(vm
);
1772 tcg_temp_free_i32(vd
);
1777 static bool trans_VFM_dp(DisasContext
*s
, arg_VFM_sp
*a
)
1780 * VFNMA : fd = muladd(-fd, fn, fm)
1781 * VFNMS : fd = muladd(-fd, -fn, fm)
1782 * VFMA : fd = muladd( fd, fn, fm)
1783 * VFMS : fd = muladd( fd, -fn, fm)
1785 * These are fused multiply-add, and must be done as one floating
1786 * point operation with no rounding between the multiplication and
1787 * addition steps. NB that doing the negations here as separate
1788 * steps is correct : an input NaN should come out with its sign
1789 * bit flipped if it is a negated-input.
1792 TCGv_i64 vn
, vm
, vd
;
1795 * Present in VFPv4 only.
1796 * In v7A, UNPREDICTABLE with non-zero vector length/stride; from
1797 * v8A, must UNDEF. We choose to UNDEF for both v7A and v8A.
1799 if (!arm_dc_feature(s
, ARM_FEATURE_VFP4
) ||
1800 (s
->vec_len
!= 0 || s
->vec_stride
!= 0)) {
1804 /* UNDEF accesses to D16-D31 if they don't exist. */
1805 if (!dc_isar_feature(aa32_fp_d32
, s
) && ((a
->vd
| a
->vn
| a
->vm
) & 0x10)) {
1809 if (!vfp_access_check(s
)) {
1813 vn
= tcg_temp_new_i64();
1814 vm
= tcg_temp_new_i64();
1815 vd
= tcg_temp_new_i64();
1817 neon_load_reg64(vn
, a
->vn
);
1818 neon_load_reg64(vm
, a
->vm
);
1821 gen_helper_vfp_negd(vn
, vn
);
1823 neon_load_reg64(vd
, a
->vd
);
1826 gen_helper_vfp_negd(vd
, vd
);
1828 fpst
= get_fpstatus_ptr(0);
1829 gen_helper_vfp_muladdd(vd
, vn
, vm
, vd
, fpst
);
1830 neon_store_reg64(vd
, a
->vd
);
1832 tcg_temp_free_ptr(fpst
);
1833 tcg_temp_free_i64(vn
);
1834 tcg_temp_free_i64(vm
);
1835 tcg_temp_free_i64(vd
);
1840 static bool trans_VMOV_imm_sp(DisasContext
*s
, arg_VMOV_imm_sp
*a
)
1842 uint32_t delta_d
= 0;
1843 int veclen
= s
->vec_len
;
1849 if (!dc_isar_feature(aa32_fpshvec
, s
) &&
1850 (veclen
!= 0 || s
->vec_stride
!= 0)) {
1854 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
1858 if (!vfp_access_check(s
)) {
1863 /* Figure out what type of vector operation this is. */
1864 if (vfp_sreg_is_scalar(vd
)) {
1868 delta_d
= s
->vec_stride
+ 1;
1872 fd
= tcg_const_i32(vfp_expand_imm(MO_32
, a
->imm
));
1875 neon_store_reg32(fd
, vd
);
1881 /* Set up the operands for the next iteration */
1883 vd
= vfp_advance_sreg(vd
, delta_d
);
1886 tcg_temp_free_i32(fd
);
1890 static bool trans_VMOV_imm_dp(DisasContext
*s
, arg_VMOV_imm_dp
*a
)
1892 uint32_t delta_d
= 0;
1893 int veclen
= s
->vec_len
;
1899 /* UNDEF accesses to D16-D31 if they don't exist. */
1900 if (!dc_isar_feature(aa32_fp_d32
, s
) && (vd
& 0x10)) {
1904 if (!dc_isar_feature(aa32_fpshvec
, s
) &&
1905 (veclen
!= 0 || s
->vec_stride
!= 0)) {
1909 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
1913 if (!vfp_access_check(s
)) {
1918 /* Figure out what type of vector operation this is. */
1919 if (vfp_dreg_is_scalar(vd
)) {
1923 delta_d
= (s
->vec_stride
>> 1) + 1;
1927 fd
= tcg_const_i64(vfp_expand_imm(MO_64
, a
->imm
));
1930 neon_store_reg64(fd
, vd
);
1936 /* Set up the operands for the next iteration */
1938 vfp_advance_dreg(vd
, delta_d
);
1941 tcg_temp_free_i64(fd
);
1945 static bool trans_VMOV_reg_sp(DisasContext
*s
, arg_VMOV_reg_sp
*a
)
1947 return do_vfp_2op_sp(s
, tcg_gen_mov_i32
, a
->vd
, a
->vm
);
1950 static bool trans_VMOV_reg_dp(DisasContext
*s
, arg_VMOV_reg_dp
*a
)
1952 return do_vfp_2op_dp(s
, tcg_gen_mov_i64
, a
->vd
, a
->vm
);
1955 static bool trans_VABS_sp(DisasContext
*s
, arg_VABS_sp
*a
)
1957 return do_vfp_2op_sp(s
, gen_helper_vfp_abss
, a
->vd
, a
->vm
);
1960 static bool trans_VABS_dp(DisasContext
*s
, arg_VABS_dp
*a
)
1962 return do_vfp_2op_dp(s
, gen_helper_vfp_absd
, a
->vd
, a
->vm
);
1965 static bool trans_VNEG_sp(DisasContext
*s
, arg_VNEG_sp
*a
)
1967 return do_vfp_2op_sp(s
, gen_helper_vfp_negs
, a
->vd
, a
->vm
);
1970 static bool trans_VNEG_dp(DisasContext
*s
, arg_VNEG_dp
*a
)
1972 return do_vfp_2op_dp(s
, gen_helper_vfp_negd
, a
->vd
, a
->vm
);
1975 static void gen_VSQRT_sp(TCGv_i32 vd
, TCGv_i32 vm
)
1977 gen_helper_vfp_sqrts(vd
, vm
, cpu_env
);
1980 static bool trans_VSQRT_sp(DisasContext
*s
, arg_VSQRT_sp
*a
)
1982 return do_vfp_2op_sp(s
, gen_VSQRT_sp
, a
->vd
, a
->vm
);
1985 static void gen_VSQRT_dp(TCGv_i64 vd
, TCGv_i64 vm
)
1987 gen_helper_vfp_sqrtd(vd
, vm
, cpu_env
);
1990 static bool trans_VSQRT_dp(DisasContext
*s
, arg_VSQRT_dp
*a
)
1992 return do_vfp_2op_dp(s
, gen_VSQRT_dp
, a
->vd
, a
->vm
);
1995 static bool trans_VCMP_sp(DisasContext
*s
, arg_VCMP_sp
*a
)
1999 /* Vm/M bits must be zero for the Z variant */
2000 if (a
->z
&& a
->vm
!= 0) {
2004 if (!vfp_access_check(s
)) {
2008 vd
= tcg_temp_new_i32();
2009 vm
= tcg_temp_new_i32();
2011 neon_load_reg32(vd
, a
->vd
);
2013 tcg_gen_movi_i32(vm
, 0);
2015 neon_load_reg32(vm
, a
->vm
);
2019 gen_helper_vfp_cmpes(vd
, vm
, cpu_env
);
2021 gen_helper_vfp_cmps(vd
, vm
, cpu_env
);
2024 tcg_temp_free_i32(vd
);
2025 tcg_temp_free_i32(vm
);
2030 static bool trans_VCMP_dp(DisasContext
*s
, arg_VCMP_dp
*a
)
2034 /* Vm/M bits must be zero for the Z variant */
2035 if (a
->z
&& a
->vm
!= 0) {
2039 /* UNDEF accesses to D16-D31 if they don't exist. */
2040 if (!dc_isar_feature(aa32_fp_d32
, s
) && ((a
->vd
| a
->vm
) & 0x10)) {
2044 if (!vfp_access_check(s
)) {
2048 vd
= tcg_temp_new_i64();
2049 vm
= tcg_temp_new_i64();
2051 neon_load_reg64(vd
, a
->vd
);
2053 tcg_gen_movi_i64(vm
, 0);
2055 neon_load_reg64(vm
, a
->vm
);
2059 gen_helper_vfp_cmped(vd
, vm
, cpu_env
);
2061 gen_helper_vfp_cmpd(vd
, vm
, cpu_env
);
2064 tcg_temp_free_i64(vd
);
2065 tcg_temp_free_i64(vm
);
2070 static bool trans_VCVT_f32_f16(DisasContext
*s
, arg_VCVT_f32_f16
*a
)
2076 if (!dc_isar_feature(aa32_fp16_spconv
, s
)) {
2080 if (!vfp_access_check(s
)) {
2084 fpst
= get_fpstatus_ptr(false);
2085 ahp_mode
= get_ahp_flag();
2086 tmp
= tcg_temp_new_i32();
2087 /* The T bit tells us if we want the low or high 16 bits of Vm */
2088 tcg_gen_ld16u_i32(tmp
, cpu_env
, vfp_f16_offset(a
->vm
, a
->t
));
2089 gen_helper_vfp_fcvt_f16_to_f32(tmp
, tmp
, fpst
, ahp_mode
);
2090 neon_store_reg32(tmp
, a
->vd
);
2091 tcg_temp_free_i32(ahp_mode
);
2092 tcg_temp_free_ptr(fpst
);
2093 tcg_temp_free_i32(tmp
);
2097 static bool trans_VCVT_f64_f16(DisasContext
*s
, arg_VCVT_f64_f16
*a
)
2104 if (!dc_isar_feature(aa32_fp16_dpconv
, s
)) {
2108 /* UNDEF accesses to D16-D31 if they don't exist. */
2109 if (!dc_isar_feature(aa32_fp_d32
, s
) && (a
->vd
& 0x10)) {
2113 if (!vfp_access_check(s
)) {
2117 fpst
= get_fpstatus_ptr(false);
2118 ahp_mode
= get_ahp_flag();
2119 tmp
= tcg_temp_new_i32();
2120 /* The T bit tells us if we want the low or high 16 bits of Vm */
2121 tcg_gen_ld16u_i32(tmp
, cpu_env
, vfp_f16_offset(a
->vm
, a
->t
));
2122 vd
= tcg_temp_new_i64();
2123 gen_helper_vfp_fcvt_f16_to_f64(vd
, tmp
, fpst
, ahp_mode
);
2124 neon_store_reg64(vd
, a
->vd
);
2125 tcg_temp_free_i32(ahp_mode
);
2126 tcg_temp_free_ptr(fpst
);
2127 tcg_temp_free_i32(tmp
);
2128 tcg_temp_free_i64(vd
);
2132 static bool trans_VCVT_f16_f32(DisasContext
*s
, arg_VCVT_f16_f32
*a
)
2138 if (!dc_isar_feature(aa32_fp16_spconv
, s
)) {
2142 if (!vfp_access_check(s
)) {
2146 fpst
= get_fpstatus_ptr(false);
2147 ahp_mode
= get_ahp_flag();
2148 tmp
= tcg_temp_new_i32();
2150 neon_load_reg32(tmp
, a
->vm
);
2151 gen_helper_vfp_fcvt_f32_to_f16(tmp
, tmp
, fpst
, ahp_mode
);
2152 tcg_gen_st16_i32(tmp
, cpu_env
, vfp_f16_offset(a
->vd
, a
->t
));
2153 tcg_temp_free_i32(ahp_mode
);
2154 tcg_temp_free_ptr(fpst
);
2155 tcg_temp_free_i32(tmp
);
2159 static bool trans_VCVT_f16_f64(DisasContext
*s
, arg_VCVT_f16_f64
*a
)
2166 if (!dc_isar_feature(aa32_fp16_dpconv
, s
)) {
2170 /* UNDEF accesses to D16-D31 if they don't exist. */
2171 if (!dc_isar_feature(aa32_fp_d32
, s
) && (a
->vm
& 0x10)) {
2175 if (!vfp_access_check(s
)) {
2179 fpst
= get_fpstatus_ptr(false);
2180 ahp_mode
= get_ahp_flag();
2181 tmp
= tcg_temp_new_i32();
2182 vm
= tcg_temp_new_i64();
2184 neon_load_reg64(vm
, a
->vm
);
2185 gen_helper_vfp_fcvt_f64_to_f16(tmp
, vm
, fpst
, ahp_mode
);
2186 tcg_temp_free_i64(vm
);
2187 tcg_gen_st16_i32(tmp
, cpu_env
, vfp_f16_offset(a
->vd
, a
->t
));
2188 tcg_temp_free_i32(ahp_mode
);
2189 tcg_temp_free_ptr(fpst
);
2190 tcg_temp_free_i32(tmp
);
2194 static bool trans_VRINTR_sp(DisasContext
*s
, arg_VRINTR_sp
*a
)
2199 if (!dc_isar_feature(aa32_vrint
, s
)) {
2203 if (!vfp_access_check(s
)) {
2207 tmp
= tcg_temp_new_i32();
2208 neon_load_reg32(tmp
, a
->vm
);
2209 fpst
= get_fpstatus_ptr(false);
2210 gen_helper_rints(tmp
, tmp
, fpst
);
2211 neon_store_reg32(tmp
, a
->vd
);
2212 tcg_temp_free_ptr(fpst
);
2213 tcg_temp_free_i32(tmp
);
2217 static bool trans_VRINTR_dp(DisasContext
*s
, arg_VRINTR_sp
*a
)
2222 if (!dc_isar_feature(aa32_vrint
, s
)) {
2226 /* UNDEF accesses to D16-D31 if they don't exist. */
2227 if (!dc_isar_feature(aa32_fp_d32
, s
) && ((a
->vd
| a
->vm
) & 0x10)) {
2231 if (!vfp_access_check(s
)) {
2235 tmp
= tcg_temp_new_i64();
2236 neon_load_reg64(tmp
, a
->vm
);
2237 fpst
= get_fpstatus_ptr(false);
2238 gen_helper_rintd(tmp
, tmp
, fpst
);
2239 neon_store_reg64(tmp
, a
->vd
);
2240 tcg_temp_free_ptr(fpst
);
2241 tcg_temp_free_i64(tmp
);
2245 static bool trans_VRINTZ_sp(DisasContext
*s
, arg_VRINTZ_sp
*a
)
2251 if (!dc_isar_feature(aa32_vrint
, s
)) {
2255 if (!vfp_access_check(s
)) {
2259 tmp
= tcg_temp_new_i32();
2260 neon_load_reg32(tmp
, a
->vm
);
2261 fpst
= get_fpstatus_ptr(false);
2262 tcg_rmode
= tcg_const_i32(float_round_to_zero
);
2263 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
2264 gen_helper_rints(tmp
, tmp
, fpst
);
2265 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
2266 neon_store_reg32(tmp
, a
->vd
);
2267 tcg_temp_free_ptr(fpst
);
2268 tcg_temp_free_i32(tcg_rmode
);
2269 tcg_temp_free_i32(tmp
);
2273 static bool trans_VRINTZ_dp(DisasContext
*s
, arg_VRINTZ_sp
*a
)
2279 if (!dc_isar_feature(aa32_vrint
, s
)) {
2283 /* UNDEF accesses to D16-D31 if they don't exist. */
2284 if (!dc_isar_feature(aa32_fp_d32
, s
) && ((a
->vd
| a
->vm
) & 0x10)) {
2288 if (!vfp_access_check(s
)) {
2292 tmp
= tcg_temp_new_i64();
2293 neon_load_reg64(tmp
, a
->vm
);
2294 fpst
= get_fpstatus_ptr(false);
2295 tcg_rmode
= tcg_const_i32(float_round_to_zero
);
2296 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
2297 gen_helper_rintd(tmp
, tmp
, fpst
);
2298 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
2299 neon_store_reg64(tmp
, a
->vd
);
2300 tcg_temp_free_ptr(fpst
);
2301 tcg_temp_free_i64(tmp
);
2302 tcg_temp_free_i32(tcg_rmode
);
2306 static bool trans_VRINTX_sp(DisasContext
*s
, arg_VRINTX_sp
*a
)
2311 if (!dc_isar_feature(aa32_vrint
, s
)) {
2315 if (!vfp_access_check(s
)) {
2319 tmp
= tcg_temp_new_i32();
2320 neon_load_reg32(tmp
, a
->vm
);
2321 fpst
= get_fpstatus_ptr(false);
2322 gen_helper_rints_exact(tmp
, tmp
, fpst
);
2323 neon_store_reg32(tmp
, a
->vd
);
2324 tcg_temp_free_ptr(fpst
);
2325 tcg_temp_free_i32(tmp
);
2329 static bool trans_VRINTX_dp(DisasContext
*s
, arg_VRINTX_dp
*a
)
2334 if (!dc_isar_feature(aa32_vrint
, s
)) {
2338 /* UNDEF accesses to D16-D31 if they don't exist. */
2339 if (!dc_isar_feature(aa32_fp_d32
, s
) && ((a
->vd
| a
->vm
) & 0x10)) {
2343 if (!vfp_access_check(s
)) {
2347 tmp
= tcg_temp_new_i64();
2348 neon_load_reg64(tmp
, a
->vm
);
2349 fpst
= get_fpstatus_ptr(false);
2350 gen_helper_rintd_exact(tmp
, tmp
, fpst
);
2351 neon_store_reg64(tmp
, a
->vd
);
2352 tcg_temp_free_ptr(fpst
);
2353 tcg_temp_free_i64(tmp
);
2357 static bool trans_VCVT_sp(DisasContext
*s
, arg_VCVT_sp
*a
)
2362 /* UNDEF accesses to D16-D31 if they don't exist. */
2363 if (!dc_isar_feature(aa32_fp_d32
, s
) && (a
->vd
& 0x10)) {
2367 if (!vfp_access_check(s
)) {
2371 vm
= tcg_temp_new_i32();
2372 vd
= tcg_temp_new_i64();
2373 neon_load_reg32(vm
, a
->vm
);
2374 gen_helper_vfp_fcvtds(vd
, vm
, cpu_env
);
2375 neon_store_reg64(vd
, a
->vd
);
2376 tcg_temp_free_i32(vm
);
2377 tcg_temp_free_i64(vd
);
2381 static bool trans_VCVT_dp(DisasContext
*s
, arg_VCVT_dp
*a
)
2386 /* UNDEF accesses to D16-D31 if they don't exist. */
2387 if (!dc_isar_feature(aa32_fp_d32
, s
) && (a
->vm
& 0x10)) {
2391 if (!vfp_access_check(s
)) {
2395 vd
= tcg_temp_new_i32();
2396 vm
= tcg_temp_new_i64();
2397 neon_load_reg64(vm
, a
->vm
);
2398 gen_helper_vfp_fcvtsd(vd
, vm
, cpu_env
);
2399 neon_store_reg32(vd
, a
->vd
);
2400 tcg_temp_free_i32(vd
);
2401 tcg_temp_free_i64(vm
);
2405 static bool trans_VCVT_int_sp(DisasContext
*s
, arg_VCVT_int_sp
*a
)
2410 if (!vfp_access_check(s
)) {
2414 vm
= tcg_temp_new_i32();
2415 neon_load_reg32(vm
, a
->vm
);
2416 fpst
= get_fpstatus_ptr(false);
2419 gen_helper_vfp_sitos(vm
, vm
, fpst
);
2422 gen_helper_vfp_uitos(vm
, vm
, fpst
);
2424 neon_store_reg32(vm
, a
->vd
);
2425 tcg_temp_free_i32(vm
);
2426 tcg_temp_free_ptr(fpst
);
2430 static bool trans_VCVT_int_dp(DisasContext
*s
, arg_VCVT_int_dp
*a
)
2436 /* UNDEF accesses to D16-D31 if they don't exist. */
2437 if (!dc_isar_feature(aa32_fp_d32
, s
) && (a
->vd
& 0x10)) {
2441 if (!vfp_access_check(s
)) {
2445 vm
= tcg_temp_new_i32();
2446 vd
= tcg_temp_new_i64();
2447 neon_load_reg32(vm
, a
->vm
);
2448 fpst
= get_fpstatus_ptr(false);
2451 gen_helper_vfp_sitod(vd
, vm
, fpst
);
2454 gen_helper_vfp_uitod(vd
, vm
, fpst
);
2456 neon_store_reg64(vd
, a
->vd
);
2457 tcg_temp_free_i32(vm
);
2458 tcg_temp_free_i64(vd
);
2459 tcg_temp_free_ptr(fpst
);
2463 static bool trans_VJCVT(DisasContext
*s
, arg_VJCVT
*a
)
2468 if (!dc_isar_feature(aa32_jscvt
, s
)) {
2472 /* UNDEF accesses to D16-D31 if they don't exist. */
2473 if (!dc_isar_feature(aa32_fp_d32
, s
) && (a
->vm
& 0x10)) {
2477 if (!vfp_access_check(s
)) {
2481 vm
= tcg_temp_new_i64();
2482 vd
= tcg_temp_new_i32();
2483 neon_load_reg64(vm
, a
->vm
);
2484 gen_helper_vjcvt(vd
, vm
, cpu_env
);
2485 neon_store_reg32(vd
, a
->vd
);
2486 tcg_temp_free_i64(vm
);
2487 tcg_temp_free_i32(vd
);
2491 static bool trans_VCVT_fix_sp(DisasContext
*s
, arg_VCVT_fix_sp
*a
)
2497 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
2501 if (!vfp_access_check(s
)) {
2505 frac_bits
= (a
->opc
& 1) ? (32 - a
->imm
) : (16 - a
->imm
);
2507 vd
= tcg_temp_new_i32();
2508 neon_load_reg32(vd
, a
->vd
);
2510 fpst
= get_fpstatus_ptr(false);
2511 shift
= tcg_const_i32(frac_bits
);
2513 /* Switch on op:U:sx bits */
2516 gen_helper_vfp_shtos(vd
, vd
, shift
, fpst
);
2519 gen_helper_vfp_sltos(vd
, vd
, shift
, fpst
);
2522 gen_helper_vfp_uhtos(vd
, vd
, shift
, fpst
);
2525 gen_helper_vfp_ultos(vd
, vd
, shift
, fpst
);
2528 gen_helper_vfp_toshs_round_to_zero(vd
, vd
, shift
, fpst
);
2531 gen_helper_vfp_tosls_round_to_zero(vd
, vd
, shift
, fpst
);
2534 gen_helper_vfp_touhs_round_to_zero(vd
, vd
, shift
, fpst
);
2537 gen_helper_vfp_touls_round_to_zero(vd
, vd
, shift
, fpst
);
2540 g_assert_not_reached();
2543 neon_store_reg32(vd
, a
->vd
);
2544 tcg_temp_free_i32(vd
);
2545 tcg_temp_free_i32(shift
);
2546 tcg_temp_free_ptr(fpst
);
2550 static bool trans_VCVT_fix_dp(DisasContext
*s
, arg_VCVT_fix_dp
*a
)
2557 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
2561 /* UNDEF accesses to D16-D31 if they don't exist. */
2562 if (!dc_isar_feature(aa32_fp_d32
, s
) && (a
->vd
& 0x10)) {
2566 if (!vfp_access_check(s
)) {
2570 frac_bits
= (a
->opc
& 1) ? (32 - a
->imm
) : (16 - a
->imm
);
2572 vd
= tcg_temp_new_i64();
2573 neon_load_reg64(vd
, a
->vd
);
2575 fpst
= get_fpstatus_ptr(false);
2576 shift
= tcg_const_i32(frac_bits
);
2578 /* Switch on op:U:sx bits */
2581 gen_helper_vfp_shtod(vd
, vd
, shift
, fpst
);
2584 gen_helper_vfp_sltod(vd
, vd
, shift
, fpst
);
2587 gen_helper_vfp_uhtod(vd
, vd
, shift
, fpst
);
2590 gen_helper_vfp_ultod(vd
, vd
, shift
, fpst
);
2593 gen_helper_vfp_toshd_round_to_zero(vd
, vd
, shift
, fpst
);
2596 gen_helper_vfp_tosld_round_to_zero(vd
, vd
, shift
, fpst
);
2599 gen_helper_vfp_touhd_round_to_zero(vd
, vd
, shift
, fpst
);
2602 gen_helper_vfp_tould_round_to_zero(vd
, vd
, shift
, fpst
);
2605 g_assert_not_reached();
2608 neon_store_reg64(vd
, a
->vd
);
2609 tcg_temp_free_i64(vd
);
2610 tcg_temp_free_i32(shift
);
2611 tcg_temp_free_ptr(fpst
);
2615 static bool trans_VCVT_sp_int(DisasContext
*s
, arg_VCVT_sp_int
*a
)
2620 if (!vfp_access_check(s
)) {
2624 fpst
= get_fpstatus_ptr(false);
2625 vm
= tcg_temp_new_i32();
2626 neon_load_reg32(vm
, a
->vm
);
2630 gen_helper_vfp_tosizs(vm
, vm
, fpst
);
2632 gen_helper_vfp_tosis(vm
, vm
, fpst
);
2636 gen_helper_vfp_touizs(vm
, vm
, fpst
);
2638 gen_helper_vfp_touis(vm
, vm
, fpst
);
2641 neon_store_reg32(vm
, a
->vd
);
2642 tcg_temp_free_i32(vm
);
2643 tcg_temp_free_ptr(fpst
);
2647 static bool trans_VCVT_dp_int(DisasContext
*s
, arg_VCVT_dp_int
*a
)
2653 /* UNDEF accesses to D16-D31 if they don't exist. */
2654 if (!dc_isar_feature(aa32_fp_d32
, s
) && (a
->vm
& 0x10)) {
2658 if (!vfp_access_check(s
)) {
2662 fpst
= get_fpstatus_ptr(false);
2663 vm
= tcg_temp_new_i64();
2664 vd
= tcg_temp_new_i32();
2665 neon_load_reg64(vm
, a
->vm
);
2669 gen_helper_vfp_tosizd(vd
, vm
, fpst
);
2671 gen_helper_vfp_tosid(vd
, vm
, fpst
);
2675 gen_helper_vfp_touizd(vd
, vm
, fpst
);
2677 gen_helper_vfp_touid(vd
, vm
, fpst
);
2680 neon_store_reg32(vd
, a
->vd
);
2681 tcg_temp_free_i32(vd
);
2682 tcg_temp_free_i64(vm
);
2683 tcg_temp_free_ptr(fpst
);