2 * ARM translation: AArch32 VFP instructions
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
7 * Copyright (c) 2019 Linaro, Ltd.
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
24 * This file is intended to be included from translate.c; it uses
25 * some macros and definitions provided by that file.
26 * It might be possible to convert it to a standalone .c file eventually.
29 /* Include the generated VFP decoder */
30 #include "decode-vfp.inc.c"
31 #include "decode-vfp-uncond.inc.c"
34 * The imm8 encodes the sign bit, enough bits to represent an exponent in
35 * the range 01....1xx to 10....0xx, and the most significant 4 bits of
36 * the mantissa; see VFPExpandImm() in the v8 ARM ARM.
38 uint64_t vfp_expand_imm(int size
, uint8_t imm8
)
44 imm
= (extract32(imm8
, 7, 1) ? 0x8000 : 0) |
45 (extract32(imm8
, 6, 1) ? 0x3fc0 : 0x4000) |
46 extract32(imm8
, 0, 6);
50 imm
= (extract32(imm8
, 7, 1) ? 0x8000 : 0) |
51 (extract32(imm8
, 6, 1) ? 0x3e00 : 0x4000) |
52 (extract32(imm8
, 0, 6) << 3);
56 imm
= (extract32(imm8
, 7, 1) ? 0x8000 : 0) |
57 (extract32(imm8
, 6, 1) ? 0x3000 : 0x4000) |
58 (extract32(imm8
, 0, 6) << 6);
61 g_assert_not_reached();
67 * Return the offset of a 16-bit half of the specified VFP single-precision
68 * register. If top is true, returns the top 16 bits; otherwise the bottom
71 static inline long vfp_f16_offset(unsigned reg
, bool top
)
73 long offs
= vfp_reg_offset(false, reg
);
74 #ifdef HOST_WORDS_BIGENDIAN
87 * Check that VFP access is enabled. If it is, do the necessary
88 * M-profile lazy-FP handling and then return true.
89 * If not, emit code to generate an appropriate exception and
91 * The ignore_vfp_enabled argument specifies that we should ignore
92 * whether VFP is enabled via FPEXC[EN]: this should be true for FMXR/FMRX
93 * accesses to FPSID, FPEXC, MVFR0, MVFR1, MVFR2, and false for all other insns.
95 static bool full_vfp_access_check(DisasContext
*s
, bool ignore_vfp_enabled
)
98 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
99 gen_exception_insn(s
, s
->pc_curr
, EXCP_NOCP
, syn_uncategorized(),
102 gen_exception_insn(s
, s
->pc_curr
, EXCP_UDEF
,
103 syn_fp_access_trap(1, 0xe, false),
109 if (!s
->vfp_enabled
&& !ignore_vfp_enabled
) {
110 assert(!arm_dc_feature(s
, ARM_FEATURE_M
));
111 unallocated_encoding(s
);
115 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
116 /* Handle M-profile lazy FP state mechanics */
118 /* Trigger lazy-state preservation if necessary */
121 * Lazy state saving affects external memory and also the NVIC,
122 * so we must mark it as an IO operation for icount.
124 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
127 gen_helper_v7m_preserve_fp_state(cpu_env
);
128 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
132 * If the preserve_fp_state helper doesn't throw an exception
133 * then it will clear LSPACT; we don't need to repeat this for
134 * any further FP insns in this TB.
136 s
->v7m_lspact
= false;
139 /* Update ownership of FP context: set FPCCR.S to match current state */
140 if (s
->v8m_fpccr_s_wrong
) {
143 tmp
= load_cpu_field(v7m
.fpccr
[M_REG_S
]);
145 tcg_gen_ori_i32(tmp
, tmp
, R_V7M_FPCCR_S_MASK
);
147 tcg_gen_andi_i32(tmp
, tmp
, ~R_V7M_FPCCR_S_MASK
);
149 store_cpu_field(tmp
, v7m
.fpccr
[M_REG_S
]);
150 /* Don't need to do this for any further FP insns in this TB */
151 s
->v8m_fpccr_s_wrong
= false;
154 if (s
->v7m_new_fp_ctxt_needed
) {
156 * Create new FP context by updating CONTROL.FPCA, CONTROL.SFPA
159 TCGv_i32 control
, fpscr
;
160 uint32_t bits
= R_V7M_CONTROL_FPCA_MASK
;
162 fpscr
= load_cpu_field(v7m
.fpdscr
[s
->v8m_secure
]);
163 gen_helper_vfp_set_fpscr(cpu_env
, fpscr
);
164 tcg_temp_free_i32(fpscr
);
166 * We don't need to arrange to end the TB, because the only
167 * parts of FPSCR which we cache in the TB flags are the VECLEN
168 * and VECSTRIDE, and those don't exist for M-profile.
172 bits
|= R_V7M_CONTROL_SFPA_MASK
;
174 control
= load_cpu_field(v7m
.control
[M_REG_S
]);
175 tcg_gen_ori_i32(control
, control
, bits
);
176 store_cpu_field(control
, v7m
.control
[M_REG_S
]);
177 /* Don't need to do this for any further FP insns in this TB */
178 s
->v7m_new_fp_ctxt_needed
= false;
186 * The most usual kind of VFP access check, for everything except
187 * FMXR/FMRX to the always-available special registers.
189 static bool vfp_access_check(DisasContext
*s
)
191 return full_vfp_access_check(s
, false);
194 static bool trans_VSEL(DisasContext
*s
, arg_VSEL
*a
)
199 if (!dc_isar_feature(aa32_vsel
, s
)) {
203 /* UNDEF accesses to D16-D31 if they don't exist */
204 if (dp
&& !dc_isar_feature(aa32_fp_d32
, s
) &&
205 ((a
->vm
| a
->vn
| a
->vd
) & 0x10)) {
209 if (dp
&& !dc_isar_feature(aa32_fpdp
, s
)) {
217 if (!vfp_access_check(s
)) {
222 TCGv_i64 frn
, frm
, dest
;
223 TCGv_i64 tmp
, zero
, zf
, nf
, vf
;
225 zero
= tcg_const_i64(0);
227 frn
= tcg_temp_new_i64();
228 frm
= tcg_temp_new_i64();
229 dest
= tcg_temp_new_i64();
231 zf
= tcg_temp_new_i64();
232 nf
= tcg_temp_new_i64();
233 vf
= tcg_temp_new_i64();
235 tcg_gen_extu_i32_i64(zf
, cpu_ZF
);
236 tcg_gen_ext_i32_i64(nf
, cpu_NF
);
237 tcg_gen_ext_i32_i64(vf
, cpu_VF
);
239 neon_load_reg64(frn
, rn
);
240 neon_load_reg64(frm
, rm
);
243 tcg_gen_movcond_i64(TCG_COND_EQ
, dest
, zf
, zero
,
247 tcg_gen_movcond_i64(TCG_COND_LT
, dest
, vf
, zero
,
250 case 2: /* ge: N == V -> N ^ V == 0 */
251 tmp
= tcg_temp_new_i64();
252 tcg_gen_xor_i64(tmp
, vf
, nf
);
253 tcg_gen_movcond_i64(TCG_COND_GE
, dest
, tmp
, zero
,
255 tcg_temp_free_i64(tmp
);
257 case 3: /* gt: !Z && N == V */
258 tcg_gen_movcond_i64(TCG_COND_NE
, dest
, zf
, zero
,
260 tmp
= tcg_temp_new_i64();
261 tcg_gen_xor_i64(tmp
, vf
, nf
);
262 tcg_gen_movcond_i64(TCG_COND_GE
, dest
, tmp
, zero
,
264 tcg_temp_free_i64(tmp
);
267 neon_store_reg64(dest
, rd
);
268 tcg_temp_free_i64(frn
);
269 tcg_temp_free_i64(frm
);
270 tcg_temp_free_i64(dest
);
272 tcg_temp_free_i64(zf
);
273 tcg_temp_free_i64(nf
);
274 tcg_temp_free_i64(vf
);
276 tcg_temp_free_i64(zero
);
278 TCGv_i32 frn
, frm
, dest
;
281 zero
= tcg_const_i32(0);
283 frn
= tcg_temp_new_i32();
284 frm
= tcg_temp_new_i32();
285 dest
= tcg_temp_new_i32();
286 neon_load_reg32(frn
, rn
);
287 neon_load_reg32(frm
, rm
);
290 tcg_gen_movcond_i32(TCG_COND_EQ
, dest
, cpu_ZF
, zero
,
294 tcg_gen_movcond_i32(TCG_COND_LT
, dest
, cpu_VF
, zero
,
297 case 2: /* ge: N == V -> N ^ V == 0 */
298 tmp
= tcg_temp_new_i32();
299 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
300 tcg_gen_movcond_i32(TCG_COND_GE
, dest
, tmp
, zero
,
302 tcg_temp_free_i32(tmp
);
304 case 3: /* gt: !Z && N == V */
305 tcg_gen_movcond_i32(TCG_COND_NE
, dest
, cpu_ZF
, zero
,
307 tmp
= tcg_temp_new_i32();
308 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
309 tcg_gen_movcond_i32(TCG_COND_GE
, dest
, tmp
, zero
,
311 tcg_temp_free_i32(tmp
);
314 neon_store_reg32(dest
, rd
);
315 tcg_temp_free_i32(frn
);
316 tcg_temp_free_i32(frm
);
317 tcg_temp_free_i32(dest
);
319 tcg_temp_free_i32(zero
);
325 static bool trans_VMINMAXNM(DisasContext
*s
, arg_VMINMAXNM
*a
)
332 if (!dc_isar_feature(aa32_vminmaxnm
, s
)) {
336 /* UNDEF accesses to D16-D31 if they don't exist */
337 if (dp
&& !dc_isar_feature(aa32_fp_d32
, s
) &&
338 ((a
->vm
| a
->vn
| a
->vd
) & 0x10)) {
342 if (dp
&& !dc_isar_feature(aa32_fpdp
, s
)) {
350 if (!vfp_access_check(s
)) {
354 fpst
= get_fpstatus_ptr(0);
357 TCGv_i64 frn
, frm
, dest
;
359 frn
= tcg_temp_new_i64();
360 frm
= tcg_temp_new_i64();
361 dest
= tcg_temp_new_i64();
363 neon_load_reg64(frn
, rn
);
364 neon_load_reg64(frm
, rm
);
366 gen_helper_vfp_minnumd(dest
, frn
, frm
, fpst
);
368 gen_helper_vfp_maxnumd(dest
, frn
, frm
, fpst
);
370 neon_store_reg64(dest
, rd
);
371 tcg_temp_free_i64(frn
);
372 tcg_temp_free_i64(frm
);
373 tcg_temp_free_i64(dest
);
375 TCGv_i32 frn
, frm
, dest
;
377 frn
= tcg_temp_new_i32();
378 frm
= tcg_temp_new_i32();
379 dest
= tcg_temp_new_i32();
381 neon_load_reg32(frn
, rn
);
382 neon_load_reg32(frm
, rm
);
384 gen_helper_vfp_minnums(dest
, frn
, frm
, fpst
);
386 gen_helper_vfp_maxnums(dest
, frn
, frm
, fpst
);
388 neon_store_reg32(dest
, rd
);
389 tcg_temp_free_i32(frn
);
390 tcg_temp_free_i32(frm
);
391 tcg_temp_free_i32(dest
);
394 tcg_temp_free_ptr(fpst
);
399 * Table for converting the most common AArch32 encoding of
400 * rounding mode to arm_fprounding order (which matches the
401 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
403 static const uint8_t fp_decode_rm
[] = {
410 static bool trans_VRINT(DisasContext
*s
, arg_VRINT
*a
)
416 int rounding
= fp_decode_rm
[a
->rm
];
418 if (!dc_isar_feature(aa32_vrint
, s
)) {
422 /* UNDEF accesses to D16-D31 if they don't exist */
423 if (dp
&& !dc_isar_feature(aa32_fp_d32
, s
) &&
424 ((a
->vm
| a
->vd
) & 0x10)) {
428 if (dp
&& !dc_isar_feature(aa32_fpdp
, s
)) {
435 if (!vfp_access_check(s
)) {
439 fpst
= get_fpstatus_ptr(0);
441 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rounding
));
442 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
447 tcg_op
= tcg_temp_new_i64();
448 tcg_res
= tcg_temp_new_i64();
449 neon_load_reg64(tcg_op
, rm
);
450 gen_helper_rintd(tcg_res
, tcg_op
, fpst
);
451 neon_store_reg64(tcg_res
, rd
);
452 tcg_temp_free_i64(tcg_op
);
453 tcg_temp_free_i64(tcg_res
);
457 tcg_op
= tcg_temp_new_i32();
458 tcg_res
= tcg_temp_new_i32();
459 neon_load_reg32(tcg_op
, rm
);
460 gen_helper_rints(tcg_res
, tcg_op
, fpst
);
461 neon_store_reg32(tcg_res
, rd
);
462 tcg_temp_free_i32(tcg_op
);
463 tcg_temp_free_i32(tcg_res
);
466 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
467 tcg_temp_free_i32(tcg_rmode
);
469 tcg_temp_free_ptr(fpst
);
473 static bool trans_VCVT(DisasContext
*s
, arg_VCVT
*a
)
478 TCGv_i32 tcg_rmode
, tcg_shift
;
479 int rounding
= fp_decode_rm
[a
->rm
];
480 bool is_signed
= a
->op
;
482 if (!dc_isar_feature(aa32_vcvt_dr
, s
)) {
486 /* UNDEF accesses to D16-D31 if they don't exist */
487 if (dp
&& !dc_isar_feature(aa32_fp_d32
, s
) && (a
->vm
& 0x10)) {
491 if (dp
&& !dc_isar_feature(aa32_fpdp
, s
)) {
498 if (!vfp_access_check(s
)) {
502 fpst
= get_fpstatus_ptr(0);
504 tcg_shift
= tcg_const_i32(0);
506 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rounding
));
507 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
510 TCGv_i64 tcg_double
, tcg_res
;
512 tcg_double
= tcg_temp_new_i64();
513 tcg_res
= tcg_temp_new_i64();
514 tcg_tmp
= tcg_temp_new_i32();
515 neon_load_reg64(tcg_double
, rm
);
517 gen_helper_vfp_tosld(tcg_res
, tcg_double
, tcg_shift
, fpst
);
519 gen_helper_vfp_tould(tcg_res
, tcg_double
, tcg_shift
, fpst
);
521 tcg_gen_extrl_i64_i32(tcg_tmp
, tcg_res
);
522 neon_store_reg32(tcg_tmp
, rd
);
523 tcg_temp_free_i32(tcg_tmp
);
524 tcg_temp_free_i64(tcg_res
);
525 tcg_temp_free_i64(tcg_double
);
527 TCGv_i32 tcg_single
, tcg_res
;
528 tcg_single
= tcg_temp_new_i32();
529 tcg_res
= tcg_temp_new_i32();
530 neon_load_reg32(tcg_single
, rm
);
532 gen_helper_vfp_tosls(tcg_res
, tcg_single
, tcg_shift
, fpst
);
534 gen_helper_vfp_touls(tcg_res
, tcg_single
, tcg_shift
, fpst
);
536 neon_store_reg32(tcg_res
, rd
);
537 tcg_temp_free_i32(tcg_res
);
538 tcg_temp_free_i32(tcg_single
);
541 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
542 tcg_temp_free_i32(tcg_rmode
);
544 tcg_temp_free_i32(tcg_shift
);
546 tcg_temp_free_ptr(fpst
);
551 static bool trans_VMOV_to_gp(DisasContext
*s
, arg_VMOV_to_gp
*a
)
553 /* VMOV scalar to general purpose register */
558 /* UNDEF accesses to D16-D31 if they don't exist */
559 if (!dc_isar_feature(aa32_fp_d32
, s
) && (a
->vn
& 0x10)) {
563 offset
= a
->index
<< a
->size
;
564 pass
= extract32(offset
, 2, 1);
565 offset
= extract32(offset
, 0, 2) * 8;
567 if (a
->size
!= 2 && !arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
571 if (!vfp_access_check(s
)) {
575 tmp
= neon_load_reg(a
->vn
, pass
);
579 tcg_gen_shri_i32(tmp
, tmp
, offset
);
590 tcg_gen_shri_i32(tmp
, tmp
, 16);
596 tcg_gen_sari_i32(tmp
, tmp
, 16);
605 store_reg(s
, a
->rt
, tmp
);
610 static bool trans_VMOV_from_gp(DisasContext
*s
, arg_VMOV_from_gp
*a
)
612 /* VMOV general purpose register to scalar */
617 /* UNDEF accesses to D16-D31 if they don't exist */
618 if (!dc_isar_feature(aa32_fp_d32
, s
) && (a
->vn
& 0x10)) {
622 offset
= a
->index
<< a
->size
;
623 pass
= extract32(offset
, 2, 1);
624 offset
= extract32(offset
, 0, 2) * 8;
626 if (a
->size
!= 2 && !arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
630 if (!vfp_access_check(s
)) {
634 tmp
= load_reg(s
, a
->rt
);
637 tmp2
= neon_load_reg(a
->vn
, pass
);
638 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
, offset
, 8);
639 tcg_temp_free_i32(tmp2
);
642 tmp2
= neon_load_reg(a
->vn
, pass
);
643 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
, offset
, 16);
644 tcg_temp_free_i32(tmp2
);
649 neon_store_reg(a
->vn
, pass
, tmp
);
654 static bool trans_VDUP(DisasContext
*s
, arg_VDUP
*a
)
656 /* VDUP (general purpose register) */
660 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
664 /* UNDEF accesses to D16-D31 if they don't exist */
665 if (!dc_isar_feature(aa32_fp_d32
, s
) && (a
->vn
& 0x10)) {
673 if (a
->q
&& (a
->vn
& 1)) {
677 vec_size
= a
->q
? 16 : 8;
686 if (!vfp_access_check(s
)) {
690 tmp
= load_reg(s
, a
->rt
);
691 tcg_gen_gvec_dup_i32(size
, neon_reg_offset(a
->vn
, 0),
692 vec_size
, vec_size
, tmp
);
693 tcg_temp_free_i32(tmp
);
698 static bool trans_VMSR_VMRS(DisasContext
*s
, arg_VMSR_VMRS
*a
)
701 bool ignore_vfp_enabled
= false;
703 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
705 * The only M-profile VFP vmrs/vmsr sysreg is FPSCR.
706 * Accesses to R15 are UNPREDICTABLE; we choose to undef.
707 * (FPSCR -> r15 is a special case which writes to the PSR flags.)
709 if (a
->rt
== 15 && (!a
->l
|| a
->reg
!= ARM_VFP_FPSCR
)) {
717 * VFPv2 allows access to FPSID from userspace; VFPv3 restricts
718 * all ID registers to privileged access only.
720 if (IS_USER(s
) && arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
723 ignore_vfp_enabled
= true;
727 if (IS_USER(s
) || !arm_dc_feature(s
, ARM_FEATURE_MVFR
)) {
730 ignore_vfp_enabled
= true;
733 if (IS_USER(s
) || !arm_dc_feature(s
, ARM_FEATURE_V8
)) {
736 ignore_vfp_enabled
= true;
744 ignore_vfp_enabled
= true;
747 case ARM_VFP_FPINST2
:
748 /* Not present in VFPv3 */
749 if (IS_USER(s
) || arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
757 if (!full_vfp_access_check(s
, ignore_vfp_enabled
)) {
762 /* VMRS, move VFP special register to gp register */
768 if (s
->current_el
== 1) {
769 TCGv_i32 tcg_reg
, tcg_rt
;
772 gen_set_pc_im(s
, s
->pc_curr
);
773 tcg_reg
= tcg_const_i32(a
->reg
);
774 tcg_rt
= tcg_const_i32(a
->rt
);
775 gen_helper_check_hcr_el2_trap(cpu_env
, tcg_rt
, tcg_reg
);
776 tcg_temp_free_i32(tcg_reg
);
777 tcg_temp_free_i32(tcg_rt
);
782 case ARM_VFP_FPINST2
:
783 tmp
= load_cpu_field(vfp
.xregs
[a
->reg
]);
787 tmp
= load_cpu_field(vfp
.xregs
[ARM_VFP_FPSCR
]);
788 tcg_gen_andi_i32(tmp
, tmp
, 0xf0000000);
790 tmp
= tcg_temp_new_i32();
791 gen_helper_vfp_get_fpscr(tmp
, cpu_env
);
795 g_assert_not_reached();
799 /* Set the 4 flag bits in the CPSR. */
801 tcg_temp_free_i32(tmp
);
803 store_reg(s
, a
->rt
, tmp
);
806 /* VMSR, move gp register to VFP special register */
812 /* Writes are ignored. */
815 tmp
= load_reg(s
, a
->rt
);
816 gen_helper_vfp_set_fpscr(cpu_env
, tmp
);
817 tcg_temp_free_i32(tmp
);
822 * TODO: VFP subarchitecture support.
823 * For now, keep the EN bit only
825 tmp
= load_reg(s
, a
->rt
);
826 tcg_gen_andi_i32(tmp
, tmp
, 1 << 30);
827 store_cpu_field(tmp
, vfp
.xregs
[a
->reg
]);
831 case ARM_VFP_FPINST2
:
832 tmp
= load_reg(s
, a
->rt
);
833 store_cpu_field(tmp
, vfp
.xregs
[a
->reg
]);
836 g_assert_not_reached();
843 static bool trans_VMOV_single(DisasContext
*s
, arg_VMOV_single
*a
)
847 if (!vfp_access_check(s
)) {
852 /* VFP to general purpose register */
853 tmp
= tcg_temp_new_i32();
854 neon_load_reg32(tmp
, a
->vn
);
856 /* Set the 4 flag bits in the CPSR. */
858 tcg_temp_free_i32(tmp
);
860 store_reg(s
, a
->rt
, tmp
);
863 /* general purpose register to VFP */
864 tmp
= load_reg(s
, a
->rt
);
865 neon_store_reg32(tmp
, a
->vn
);
866 tcg_temp_free_i32(tmp
);
872 static bool trans_VMOV_64_sp(DisasContext
*s
, arg_VMOV_64_sp
*a
)
877 * VMOV between two general-purpose registers and two single precision
878 * floating point registers
880 if (!vfp_access_check(s
)) {
886 tmp
= tcg_temp_new_i32();
887 neon_load_reg32(tmp
, a
->vm
);
888 store_reg(s
, a
->rt
, tmp
);
889 tmp
= tcg_temp_new_i32();
890 neon_load_reg32(tmp
, a
->vm
+ 1);
891 store_reg(s
, a
->rt2
, tmp
);
894 tmp
= load_reg(s
, a
->rt
);
895 neon_store_reg32(tmp
, a
->vm
);
896 tcg_temp_free_i32(tmp
);
897 tmp
= load_reg(s
, a
->rt2
);
898 neon_store_reg32(tmp
, a
->vm
+ 1);
899 tcg_temp_free_i32(tmp
);
905 static bool trans_VMOV_64_dp(DisasContext
*s
, arg_VMOV_64_dp
*a
)
910 * VMOV between two general-purpose registers and one double precision
911 * floating point register
914 /* UNDEF accesses to D16-D31 if they don't exist */
915 if (!dc_isar_feature(aa32_fp_d32
, s
) && (a
->vm
& 0x10)) {
919 if (!vfp_access_check(s
)) {
925 tmp
= tcg_temp_new_i32();
926 neon_load_reg32(tmp
, a
->vm
* 2);
927 store_reg(s
, a
->rt
, tmp
);
928 tmp
= tcg_temp_new_i32();
929 neon_load_reg32(tmp
, a
->vm
* 2 + 1);
930 store_reg(s
, a
->rt2
, tmp
);
933 tmp
= load_reg(s
, a
->rt
);
934 neon_store_reg32(tmp
, a
->vm
* 2);
935 tcg_temp_free_i32(tmp
);
936 tmp
= load_reg(s
, a
->rt2
);
937 neon_store_reg32(tmp
, a
->vm
* 2 + 1);
938 tcg_temp_free_i32(tmp
);
944 static bool trans_VLDR_VSTR_sp(DisasContext
*s
, arg_VLDR_VSTR_sp
*a
)
949 if (!vfp_access_check(s
)) {
953 offset
= a
->imm
<< 2;
958 /* For thumb, use of PC is UNPREDICTABLE. */
959 addr
= add_reg_for_lit(s
, a
->rn
, offset
);
960 tmp
= tcg_temp_new_i32();
962 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
963 neon_store_reg32(tmp
, a
->vd
);
965 neon_load_reg32(tmp
, a
->vd
);
966 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
968 tcg_temp_free_i32(tmp
);
969 tcg_temp_free_i32(addr
);
974 static bool trans_VLDR_VSTR_dp(DisasContext
*s
, arg_VLDR_VSTR_dp
*a
)
980 /* UNDEF accesses to D16-D31 if they don't exist */
981 if (!dc_isar_feature(aa32_fp_d32
, s
) && (a
->vd
& 0x10)) {
985 if (!vfp_access_check(s
)) {
989 offset
= a
->imm
<< 2;
994 /* For thumb, use of PC is UNPREDICTABLE. */
995 addr
= add_reg_for_lit(s
, a
->rn
, offset
);
996 tmp
= tcg_temp_new_i64();
998 gen_aa32_ld64(s
, tmp
, addr
, get_mem_index(s
));
999 neon_store_reg64(tmp
, a
->vd
);
1001 neon_load_reg64(tmp
, a
->vd
);
1002 gen_aa32_st64(s
, tmp
, addr
, get_mem_index(s
));
1004 tcg_temp_free_i64(tmp
);
1005 tcg_temp_free_i32(addr
);
1010 static bool trans_VLDM_VSTM_sp(DisasContext
*s
, arg_VLDM_VSTM_sp
*a
)
1018 if (n
== 0 || (a
->vd
+ n
) > 32) {
1020 * UNPREDICTABLE cases for bad immediates: we choose to
1021 * UNDEF to avoid generating huge numbers of TCG ops
1025 if (a
->rn
== 15 && a
->w
) {
1026 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
1030 if (!vfp_access_check(s
)) {
1034 /* For thumb, use of PC is UNPREDICTABLE. */
1035 addr
= add_reg_for_lit(s
, a
->rn
, 0);
1038 tcg_gen_addi_i32(addr
, addr
, -(a
->imm
<< 2));
1041 if (s
->v8m_stackcheck
&& a
->rn
== 13 && a
->w
) {
1043 * Here 'addr' is the lowest address we will store to,
1044 * and is either the old SP (if post-increment) or
1045 * the new SP (if pre-decrement). For post-increment
1046 * where the old value is below the limit and the new
1047 * value is above, it is UNKNOWN whether the limit check
1048 * triggers; we choose to trigger.
1050 gen_helper_v8m_stackcheck(cpu_env
, addr
);
1054 tmp
= tcg_temp_new_i32();
1055 for (i
= 0; i
< n
; i
++) {
1058 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
1059 neon_store_reg32(tmp
, a
->vd
+ i
);
1062 neon_load_reg32(tmp
, a
->vd
+ i
);
1063 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
1065 tcg_gen_addi_i32(addr
, addr
, offset
);
1067 tcg_temp_free_i32(tmp
);
1071 offset
= -offset
* n
;
1072 tcg_gen_addi_i32(addr
, addr
, offset
);
1074 store_reg(s
, a
->rn
, addr
);
1076 tcg_temp_free_i32(addr
);
1082 static bool trans_VLDM_VSTM_dp(DisasContext
*s
, arg_VLDM_VSTM_dp
*a
)
1091 if (n
== 0 || (a
->vd
+ n
) > 32 || n
> 16) {
1093 * UNPREDICTABLE cases for bad immediates: we choose to
1094 * UNDEF to avoid generating huge numbers of TCG ops
1098 if (a
->rn
== 15 && a
->w
) {
1099 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
1103 /* UNDEF accesses to D16-D31 if they don't exist */
1104 if (!dc_isar_feature(aa32_fp_d32
, s
) && (a
->vd
+ n
) > 16) {
1108 if (!vfp_access_check(s
)) {
1112 /* For thumb, use of PC is UNPREDICTABLE. */
1113 addr
= add_reg_for_lit(s
, a
->rn
, 0);
1116 tcg_gen_addi_i32(addr
, addr
, -(a
->imm
<< 2));
1119 if (s
->v8m_stackcheck
&& a
->rn
== 13 && a
->w
) {
1121 * Here 'addr' is the lowest address we will store to,
1122 * and is either the old SP (if post-increment) or
1123 * the new SP (if pre-decrement). For post-increment
1124 * where the old value is below the limit and the new
1125 * value is above, it is UNKNOWN whether the limit check
1126 * triggers; we choose to trigger.
1128 gen_helper_v8m_stackcheck(cpu_env
, addr
);
1132 tmp
= tcg_temp_new_i64();
1133 for (i
= 0; i
< n
; i
++) {
1136 gen_aa32_ld64(s
, tmp
, addr
, get_mem_index(s
));
1137 neon_store_reg64(tmp
, a
->vd
+ i
);
1140 neon_load_reg64(tmp
, a
->vd
+ i
);
1141 gen_aa32_st64(s
, tmp
, addr
, get_mem_index(s
));
1143 tcg_gen_addi_i32(addr
, addr
, offset
);
1145 tcg_temp_free_i64(tmp
);
1149 offset
= -offset
* n
;
1150 } else if (a
->imm
& 1) {
1157 tcg_gen_addi_i32(addr
, addr
, offset
);
1159 store_reg(s
, a
->rn
, addr
);
1161 tcg_temp_free_i32(addr
);
1168 * Types for callbacks for do_vfp_3op_sp() and do_vfp_3op_dp().
1169 * The callback should emit code to write a value to vd. If
1170 * do_vfp_3op_{sp,dp}() was passed reads_vd then the TCGv vd
1171 * will contain the old value of the relevant VFP register;
1172 * otherwise it must be written to only.
1174 typedef void VFPGen3OpSPFn(TCGv_i32 vd
,
1175 TCGv_i32 vn
, TCGv_i32 vm
, TCGv_ptr fpst
);
1176 typedef void VFPGen3OpDPFn(TCGv_i64 vd
,
1177 TCGv_i64 vn
, TCGv_i64 vm
, TCGv_ptr fpst
);
1180 * Types for callbacks for do_vfp_2op_sp() and do_vfp_2op_dp().
1181 * The callback should emit code to write a value to vd (which
1182 * should be written to only).
1184 typedef void VFPGen2OpSPFn(TCGv_i32 vd
, TCGv_i32 vm
);
1185 typedef void VFPGen2OpDPFn(TCGv_i64 vd
, TCGv_i64 vm
);
1188 * Return true if the specified S reg is in a scalar bank
1189 * (ie if it is s0..s7)
1191 static inline bool vfp_sreg_is_scalar(int reg
)
1193 return (reg
& 0x18) == 0;
1197 * Return true if the specified D reg is in a scalar bank
1198 * (ie if it is d0..d3 or d16..d19)
1200 static inline bool vfp_dreg_is_scalar(int reg
)
1202 return (reg
& 0xc) == 0;
1206 * Advance the S reg number forwards by delta within its bank
1207 * (ie increment the low 3 bits but leave the rest the same)
1209 static inline int vfp_advance_sreg(int reg
, int delta
)
1211 return ((reg
+ delta
) & 0x7) | (reg
& ~0x7);
1215 * Advance the D reg number forwards by delta within its bank
1216 * (ie increment the low 2 bits but leave the rest the same)
1218 static inline int vfp_advance_dreg(int reg
, int delta
)
1220 return ((reg
+ delta
) & 0x3) | (reg
& ~0x3);
1224 * Perform a 3-operand VFP data processing instruction. fn is the
1225 * callback to do the actual operation; this function deals with the
1226 * code to handle looping around for VFP vector processing.
1228 static bool do_vfp_3op_sp(DisasContext
*s
, VFPGen3OpSPFn
*fn
,
1229 int vd
, int vn
, int vm
, bool reads_vd
)
1231 uint32_t delta_m
= 0;
1232 uint32_t delta_d
= 0;
1233 int veclen
= s
->vec_len
;
1234 TCGv_i32 f0
, f1
, fd
;
1237 if (!dc_isar_feature(aa32_fpshvec
, s
) &&
1238 (veclen
!= 0 || s
->vec_stride
!= 0)) {
1242 if (!vfp_access_check(s
)) {
1247 /* Figure out what type of vector operation this is. */
1248 if (vfp_sreg_is_scalar(vd
)) {
1252 delta_d
= s
->vec_stride
+ 1;
1254 if (vfp_sreg_is_scalar(vm
)) {
1255 /* mixed scalar/vector */
1264 f0
= tcg_temp_new_i32();
1265 f1
= tcg_temp_new_i32();
1266 fd
= tcg_temp_new_i32();
1267 fpst
= get_fpstatus_ptr(0);
1269 neon_load_reg32(f0
, vn
);
1270 neon_load_reg32(f1
, vm
);
1274 neon_load_reg32(fd
, vd
);
1276 fn(fd
, f0
, f1
, fpst
);
1277 neon_store_reg32(fd
, vd
);
1283 /* Set up the operands for the next iteration */
1285 vd
= vfp_advance_sreg(vd
, delta_d
);
1286 vn
= vfp_advance_sreg(vn
, delta_d
);
1287 neon_load_reg32(f0
, vn
);
1289 vm
= vfp_advance_sreg(vm
, delta_m
);
1290 neon_load_reg32(f1
, vm
);
1294 tcg_temp_free_i32(f0
);
1295 tcg_temp_free_i32(f1
);
1296 tcg_temp_free_i32(fd
);
1297 tcg_temp_free_ptr(fpst
);
1302 static bool do_vfp_3op_dp(DisasContext
*s
, VFPGen3OpDPFn
*fn
,
1303 int vd
, int vn
, int vm
, bool reads_vd
)
1305 uint32_t delta_m
= 0;
1306 uint32_t delta_d
= 0;
1307 int veclen
= s
->vec_len
;
1308 TCGv_i64 f0
, f1
, fd
;
1311 /* UNDEF accesses to D16-D31 if they don't exist */
1312 if (!dc_isar_feature(aa32_fp_d32
, s
) && ((vd
| vn
| vm
) & 0x10)) {
1316 if (!dc_isar_feature(aa32_fpdp
, s
)) {
1320 if (!dc_isar_feature(aa32_fpshvec
, s
) &&
1321 (veclen
!= 0 || s
->vec_stride
!= 0)) {
1325 if (!vfp_access_check(s
)) {
1330 /* Figure out what type of vector operation this is. */
1331 if (vfp_dreg_is_scalar(vd
)) {
1335 delta_d
= (s
->vec_stride
>> 1) + 1;
1337 if (vfp_dreg_is_scalar(vm
)) {
1338 /* mixed scalar/vector */
1347 f0
= tcg_temp_new_i64();
1348 f1
= tcg_temp_new_i64();
1349 fd
= tcg_temp_new_i64();
1350 fpst
= get_fpstatus_ptr(0);
1352 neon_load_reg64(f0
, vn
);
1353 neon_load_reg64(f1
, vm
);
1357 neon_load_reg64(fd
, vd
);
1359 fn(fd
, f0
, f1
, fpst
);
1360 neon_store_reg64(fd
, vd
);
1365 /* Set up the operands for the next iteration */
1367 vd
= vfp_advance_dreg(vd
, delta_d
);
1368 vn
= vfp_advance_dreg(vn
, delta_d
);
1369 neon_load_reg64(f0
, vn
);
1371 vm
= vfp_advance_dreg(vm
, delta_m
);
1372 neon_load_reg64(f1
, vm
);
1376 tcg_temp_free_i64(f0
);
1377 tcg_temp_free_i64(f1
);
1378 tcg_temp_free_i64(fd
);
1379 tcg_temp_free_ptr(fpst
);
1384 static bool do_vfp_2op_sp(DisasContext
*s
, VFPGen2OpSPFn
*fn
, int vd
, int vm
)
1386 uint32_t delta_m
= 0;
1387 uint32_t delta_d
= 0;
1388 int veclen
= s
->vec_len
;
1391 if (!dc_isar_feature(aa32_fpshvec
, s
) &&
1392 (veclen
!= 0 || s
->vec_stride
!= 0)) {
1396 if (!vfp_access_check(s
)) {
1401 /* Figure out what type of vector operation this is. */
1402 if (vfp_sreg_is_scalar(vd
)) {
1406 delta_d
= s
->vec_stride
+ 1;
1408 if (vfp_sreg_is_scalar(vm
)) {
1409 /* mixed scalar/vector */
1418 f0
= tcg_temp_new_i32();
1419 fd
= tcg_temp_new_i32();
1421 neon_load_reg32(f0
, vm
);
1425 neon_store_reg32(fd
, vd
);
1432 /* single source one-many */
1434 vd
= vfp_advance_sreg(vd
, delta_d
);
1435 neon_store_reg32(fd
, vd
);
1440 /* Set up the operands for the next iteration */
1442 vd
= vfp_advance_sreg(vd
, delta_d
);
1443 vm
= vfp_advance_sreg(vm
, delta_m
);
1444 neon_load_reg32(f0
, vm
);
1447 tcg_temp_free_i32(f0
);
1448 tcg_temp_free_i32(fd
);
1453 static bool do_vfp_2op_dp(DisasContext
*s
, VFPGen2OpDPFn
*fn
, int vd
, int vm
)
1455 uint32_t delta_m
= 0;
1456 uint32_t delta_d
= 0;
1457 int veclen
= s
->vec_len
;
1460 /* UNDEF accesses to D16-D31 if they don't exist */
1461 if (!dc_isar_feature(aa32_fp_d32
, s
) && ((vd
| vm
) & 0x10)) {
1465 if (!dc_isar_feature(aa32_fpdp
, s
)) {
1469 if (!dc_isar_feature(aa32_fpshvec
, s
) &&
1470 (veclen
!= 0 || s
->vec_stride
!= 0)) {
1474 if (!vfp_access_check(s
)) {
1479 /* Figure out what type of vector operation this is. */
1480 if (vfp_dreg_is_scalar(vd
)) {
1484 delta_d
= (s
->vec_stride
>> 1) + 1;
1486 if (vfp_dreg_is_scalar(vm
)) {
1487 /* mixed scalar/vector */
1496 f0
= tcg_temp_new_i64();
1497 fd
= tcg_temp_new_i64();
1499 neon_load_reg64(f0
, vm
);
1503 neon_store_reg64(fd
, vd
);
1510 /* single source one-many */
1512 vd
= vfp_advance_dreg(vd
, delta_d
);
1513 neon_store_reg64(fd
, vd
);
1518 /* Set up the operands for the next iteration */
1520 vd
= vfp_advance_dreg(vd
, delta_d
);
1521 vd
= vfp_advance_dreg(vm
, delta_m
);
1522 neon_load_reg64(f0
, vm
);
1525 tcg_temp_free_i64(f0
);
1526 tcg_temp_free_i64(fd
);
1531 static void gen_VMLA_sp(TCGv_i32 vd
, TCGv_i32 vn
, TCGv_i32 vm
, TCGv_ptr fpst
)
1533 /* Note that order of inputs to the add matters for NaNs */
1534 TCGv_i32 tmp
= tcg_temp_new_i32();
1536 gen_helper_vfp_muls(tmp
, vn
, vm
, fpst
);
1537 gen_helper_vfp_adds(vd
, vd
, tmp
, fpst
);
1538 tcg_temp_free_i32(tmp
);
1541 static bool trans_VMLA_sp(DisasContext
*s
, arg_VMLA_sp
*a
)
1543 return do_vfp_3op_sp(s
, gen_VMLA_sp
, a
->vd
, a
->vn
, a
->vm
, true);
1546 static void gen_VMLA_dp(TCGv_i64 vd
, TCGv_i64 vn
, TCGv_i64 vm
, TCGv_ptr fpst
)
1548 /* Note that order of inputs to the add matters for NaNs */
1549 TCGv_i64 tmp
= tcg_temp_new_i64();
1551 gen_helper_vfp_muld(tmp
, vn
, vm
, fpst
);
1552 gen_helper_vfp_addd(vd
, vd
, tmp
, fpst
);
1553 tcg_temp_free_i64(tmp
);
1556 static bool trans_VMLA_dp(DisasContext
*s
, arg_VMLA_dp
*a
)
1558 return do_vfp_3op_dp(s
, gen_VMLA_dp
, a
->vd
, a
->vn
, a
->vm
, true);
1561 static void gen_VMLS_sp(TCGv_i32 vd
, TCGv_i32 vn
, TCGv_i32 vm
, TCGv_ptr fpst
)
1564 * VMLS: vd = vd + -(vn * vm)
1565 * Note that order of inputs to the add matters for NaNs.
1567 TCGv_i32 tmp
= tcg_temp_new_i32();
1569 gen_helper_vfp_muls(tmp
, vn
, vm
, fpst
);
1570 gen_helper_vfp_negs(tmp
, tmp
);
1571 gen_helper_vfp_adds(vd
, vd
, tmp
, fpst
);
1572 tcg_temp_free_i32(tmp
);
1575 static bool trans_VMLS_sp(DisasContext
*s
, arg_VMLS_sp
*a
)
1577 return do_vfp_3op_sp(s
, gen_VMLS_sp
, a
->vd
, a
->vn
, a
->vm
, true);
1580 static void gen_VMLS_dp(TCGv_i64 vd
, TCGv_i64 vn
, TCGv_i64 vm
, TCGv_ptr fpst
)
1583 * VMLS: vd = vd + -(vn * vm)
1584 * Note that order of inputs to the add matters for NaNs.
1586 TCGv_i64 tmp
= tcg_temp_new_i64();
1588 gen_helper_vfp_muld(tmp
, vn
, vm
, fpst
);
1589 gen_helper_vfp_negd(tmp
, tmp
);
1590 gen_helper_vfp_addd(vd
, vd
, tmp
, fpst
);
1591 tcg_temp_free_i64(tmp
);
1594 static bool trans_VMLS_dp(DisasContext
*s
, arg_VMLS_dp
*a
)
1596 return do_vfp_3op_dp(s
, gen_VMLS_dp
, a
->vd
, a
->vn
, a
->vm
, true);
1599 static void gen_VNMLS_sp(TCGv_i32 vd
, TCGv_i32 vn
, TCGv_i32 vm
, TCGv_ptr fpst
)
1602 * VNMLS: -fd + (fn * fm)
1603 * Note that it isn't valid to replace (-A + B) with (B - A) or similar
1604 * plausible looking simplifications because this will give wrong results
1607 TCGv_i32 tmp
= tcg_temp_new_i32();
1609 gen_helper_vfp_muls(tmp
, vn
, vm
, fpst
);
1610 gen_helper_vfp_negs(vd
, vd
);
1611 gen_helper_vfp_adds(vd
, vd
, tmp
, fpst
);
1612 tcg_temp_free_i32(tmp
);
1615 static bool trans_VNMLS_sp(DisasContext
*s
, arg_VNMLS_sp
*a
)
1617 return do_vfp_3op_sp(s
, gen_VNMLS_sp
, a
->vd
, a
->vn
, a
->vm
, true);
1620 static void gen_VNMLS_dp(TCGv_i64 vd
, TCGv_i64 vn
, TCGv_i64 vm
, TCGv_ptr fpst
)
1623 * VNMLS: -fd + (fn * fm)
1624 * Note that it isn't valid to replace (-A + B) with (B - A) or similar
1625 * plausible looking simplifications because this will give wrong results
1628 TCGv_i64 tmp
= tcg_temp_new_i64();
1630 gen_helper_vfp_muld(tmp
, vn
, vm
, fpst
);
1631 gen_helper_vfp_negd(vd
, vd
);
1632 gen_helper_vfp_addd(vd
, vd
, tmp
, fpst
);
1633 tcg_temp_free_i64(tmp
);
1636 static bool trans_VNMLS_dp(DisasContext
*s
, arg_VNMLS_dp
*a
)
1638 return do_vfp_3op_dp(s
, gen_VNMLS_dp
, a
->vd
, a
->vn
, a
->vm
, true);
1641 static void gen_VNMLA_sp(TCGv_i32 vd
, TCGv_i32 vn
, TCGv_i32 vm
, TCGv_ptr fpst
)
1643 /* VNMLA: -fd + -(fn * fm) */
1644 TCGv_i32 tmp
= tcg_temp_new_i32();
1646 gen_helper_vfp_muls(tmp
, vn
, vm
, fpst
);
1647 gen_helper_vfp_negs(tmp
, tmp
);
1648 gen_helper_vfp_negs(vd
, vd
);
1649 gen_helper_vfp_adds(vd
, vd
, tmp
, fpst
);
1650 tcg_temp_free_i32(tmp
);
1653 static bool trans_VNMLA_sp(DisasContext
*s
, arg_VNMLA_sp
*a
)
1655 return do_vfp_3op_sp(s
, gen_VNMLA_sp
, a
->vd
, a
->vn
, a
->vm
, true);
1658 static void gen_VNMLA_dp(TCGv_i64 vd
, TCGv_i64 vn
, TCGv_i64 vm
, TCGv_ptr fpst
)
1660 /* VNMLA: -fd + (fn * fm) */
1661 TCGv_i64 tmp
= tcg_temp_new_i64();
1663 gen_helper_vfp_muld(tmp
, vn
, vm
, fpst
);
1664 gen_helper_vfp_negd(tmp
, tmp
);
1665 gen_helper_vfp_negd(vd
, vd
);
1666 gen_helper_vfp_addd(vd
, vd
, tmp
, fpst
);
1667 tcg_temp_free_i64(tmp
);
1670 static bool trans_VNMLA_dp(DisasContext
*s
, arg_VNMLA_dp
*a
)
1672 return do_vfp_3op_dp(s
, gen_VNMLA_dp
, a
->vd
, a
->vn
, a
->vm
, true);
1675 static bool trans_VMUL_sp(DisasContext
*s
, arg_VMUL_sp
*a
)
1677 return do_vfp_3op_sp(s
, gen_helper_vfp_muls
, a
->vd
, a
->vn
, a
->vm
, false);
1680 static bool trans_VMUL_dp(DisasContext
*s
, arg_VMUL_dp
*a
)
1682 return do_vfp_3op_dp(s
, gen_helper_vfp_muld
, a
->vd
, a
->vn
, a
->vm
, false);
1685 static void gen_VNMUL_sp(TCGv_i32 vd
, TCGv_i32 vn
, TCGv_i32 vm
, TCGv_ptr fpst
)
1687 /* VNMUL: -(fn * fm) */
1688 gen_helper_vfp_muls(vd
, vn
, vm
, fpst
);
1689 gen_helper_vfp_negs(vd
, vd
);
1692 static bool trans_VNMUL_sp(DisasContext
*s
, arg_VNMUL_sp
*a
)
1694 return do_vfp_3op_sp(s
, gen_VNMUL_sp
, a
->vd
, a
->vn
, a
->vm
, false);
1697 static void gen_VNMUL_dp(TCGv_i64 vd
, TCGv_i64 vn
, TCGv_i64 vm
, TCGv_ptr fpst
)
1699 /* VNMUL: -(fn * fm) */
1700 gen_helper_vfp_muld(vd
, vn
, vm
, fpst
);
1701 gen_helper_vfp_negd(vd
, vd
);
1704 static bool trans_VNMUL_dp(DisasContext
*s
, arg_VNMUL_dp
*a
)
1706 return do_vfp_3op_dp(s
, gen_VNMUL_dp
, a
->vd
, a
->vn
, a
->vm
, false);
1709 static bool trans_VADD_sp(DisasContext
*s
, arg_VADD_sp
*a
)
1711 return do_vfp_3op_sp(s
, gen_helper_vfp_adds
, a
->vd
, a
->vn
, a
->vm
, false);
1714 static bool trans_VADD_dp(DisasContext
*s
, arg_VADD_dp
*a
)
1716 return do_vfp_3op_dp(s
, gen_helper_vfp_addd
, a
->vd
, a
->vn
, a
->vm
, false);
1719 static bool trans_VSUB_sp(DisasContext
*s
, arg_VSUB_sp
*a
)
1721 return do_vfp_3op_sp(s
, gen_helper_vfp_subs
, a
->vd
, a
->vn
, a
->vm
, false);
1724 static bool trans_VSUB_dp(DisasContext
*s
, arg_VSUB_dp
*a
)
1726 return do_vfp_3op_dp(s
, gen_helper_vfp_subd
, a
->vd
, a
->vn
, a
->vm
, false);
1729 static bool trans_VDIV_sp(DisasContext
*s
, arg_VDIV_sp
*a
)
1731 return do_vfp_3op_sp(s
, gen_helper_vfp_divs
, a
->vd
, a
->vn
, a
->vm
, false);
1734 static bool trans_VDIV_dp(DisasContext
*s
, arg_VDIV_dp
*a
)
1736 return do_vfp_3op_dp(s
, gen_helper_vfp_divd
, a
->vd
, a
->vn
, a
->vm
, false);
1739 static bool trans_VFM_sp(DisasContext
*s
, arg_VFM_sp
*a
)
1742 * VFNMA : fd = muladd(-fd, fn, fm)
1743 * VFNMS : fd = muladd(-fd, -fn, fm)
1744 * VFMA : fd = muladd( fd, fn, fm)
1745 * VFMS : fd = muladd( fd, -fn, fm)
1747 * These are fused multiply-add, and must be done as one floating
1748 * point operation with no rounding between the multiplication and
1749 * addition steps. NB that doing the negations here as separate
1750 * steps is correct : an input NaN should come out with its sign
1751 * bit flipped if it is a negated-input.
1754 TCGv_i32 vn
, vm
, vd
;
1757 * Present in VFPv4 only.
1758 * In v7A, UNPREDICTABLE with non-zero vector length/stride; from
1759 * v8A, must UNDEF. We choose to UNDEF for both v7A and v8A.
1761 if (!arm_dc_feature(s
, ARM_FEATURE_VFP4
) ||
1762 (s
->vec_len
!= 0 || s
->vec_stride
!= 0)) {
1766 if (!vfp_access_check(s
)) {
1770 vn
= tcg_temp_new_i32();
1771 vm
= tcg_temp_new_i32();
1772 vd
= tcg_temp_new_i32();
1774 neon_load_reg32(vn
, a
->vn
);
1775 neon_load_reg32(vm
, a
->vm
);
1778 gen_helper_vfp_negs(vn
, vn
);
1780 neon_load_reg32(vd
, a
->vd
);
1783 gen_helper_vfp_negs(vd
, vd
);
1785 fpst
= get_fpstatus_ptr(0);
1786 gen_helper_vfp_muladds(vd
, vn
, vm
, vd
, fpst
);
1787 neon_store_reg32(vd
, a
->vd
);
1789 tcg_temp_free_ptr(fpst
);
1790 tcg_temp_free_i32(vn
);
1791 tcg_temp_free_i32(vm
);
1792 tcg_temp_free_i32(vd
);
1797 static bool trans_VFM_dp(DisasContext
*s
, arg_VFM_dp
*a
)
1800 * VFNMA : fd = muladd(-fd, fn, fm)
1801 * VFNMS : fd = muladd(-fd, -fn, fm)
1802 * VFMA : fd = muladd( fd, fn, fm)
1803 * VFMS : fd = muladd( fd, -fn, fm)
1805 * These are fused multiply-add, and must be done as one floating
1806 * point operation with no rounding between the multiplication and
1807 * addition steps. NB that doing the negations here as separate
1808 * steps is correct : an input NaN should come out with its sign
1809 * bit flipped if it is a negated-input.
1812 TCGv_i64 vn
, vm
, vd
;
1815 * Present in VFPv4 only.
1816 * In v7A, UNPREDICTABLE with non-zero vector length/stride; from
1817 * v8A, must UNDEF. We choose to UNDEF for both v7A and v8A.
1819 if (!arm_dc_feature(s
, ARM_FEATURE_VFP4
) ||
1820 (s
->vec_len
!= 0 || s
->vec_stride
!= 0)) {
1824 /* UNDEF accesses to D16-D31 if they don't exist. */
1825 if (!dc_isar_feature(aa32_fp_d32
, s
) && ((a
->vd
| a
->vn
| a
->vm
) & 0x10)) {
1829 if (!dc_isar_feature(aa32_fpdp
, s
)) {
1833 if (!vfp_access_check(s
)) {
1837 vn
= tcg_temp_new_i64();
1838 vm
= tcg_temp_new_i64();
1839 vd
= tcg_temp_new_i64();
1841 neon_load_reg64(vn
, a
->vn
);
1842 neon_load_reg64(vm
, a
->vm
);
1845 gen_helper_vfp_negd(vn
, vn
);
1847 neon_load_reg64(vd
, a
->vd
);
1850 gen_helper_vfp_negd(vd
, vd
);
1852 fpst
= get_fpstatus_ptr(0);
1853 gen_helper_vfp_muladdd(vd
, vn
, vm
, vd
, fpst
);
1854 neon_store_reg64(vd
, a
->vd
);
1856 tcg_temp_free_ptr(fpst
);
1857 tcg_temp_free_i64(vn
);
1858 tcg_temp_free_i64(vm
);
1859 tcg_temp_free_i64(vd
);
1864 static bool trans_VMOV_imm_sp(DisasContext
*s
, arg_VMOV_imm_sp
*a
)
1866 uint32_t delta_d
= 0;
1867 int veclen
= s
->vec_len
;
1873 if (!dc_isar_feature(aa32_fpshvec
, s
) &&
1874 (veclen
!= 0 || s
->vec_stride
!= 0)) {
1878 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
1882 if (!vfp_access_check(s
)) {
1887 /* Figure out what type of vector operation this is. */
1888 if (vfp_sreg_is_scalar(vd
)) {
1892 delta_d
= s
->vec_stride
+ 1;
1896 fd
= tcg_const_i32(vfp_expand_imm(MO_32
, a
->imm
));
1899 neon_store_reg32(fd
, vd
);
1905 /* Set up the operands for the next iteration */
1907 vd
= vfp_advance_sreg(vd
, delta_d
);
1910 tcg_temp_free_i32(fd
);
1914 static bool trans_VMOV_imm_dp(DisasContext
*s
, arg_VMOV_imm_dp
*a
)
1916 uint32_t delta_d
= 0;
1917 int veclen
= s
->vec_len
;
1923 /* UNDEF accesses to D16-D31 if they don't exist. */
1924 if (!dc_isar_feature(aa32_fp_d32
, s
) && (vd
& 0x10)) {
1928 if (!dc_isar_feature(aa32_fpdp
, s
)) {
1932 if (!dc_isar_feature(aa32_fpshvec
, s
) &&
1933 (veclen
!= 0 || s
->vec_stride
!= 0)) {
1937 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
1941 if (!vfp_access_check(s
)) {
1946 /* Figure out what type of vector operation this is. */
1947 if (vfp_dreg_is_scalar(vd
)) {
1951 delta_d
= (s
->vec_stride
>> 1) + 1;
1955 fd
= tcg_const_i64(vfp_expand_imm(MO_64
, a
->imm
));
1958 neon_store_reg64(fd
, vd
);
1964 /* Set up the operands for the next iteration */
1966 vd
= vfp_advance_dreg(vd
, delta_d
);
1969 tcg_temp_free_i64(fd
);
1973 static bool trans_VMOV_reg_sp(DisasContext
*s
, arg_VMOV_reg_sp
*a
)
1975 return do_vfp_2op_sp(s
, tcg_gen_mov_i32
, a
->vd
, a
->vm
);
1978 static bool trans_VMOV_reg_dp(DisasContext
*s
, arg_VMOV_reg_dp
*a
)
1980 return do_vfp_2op_dp(s
, tcg_gen_mov_i64
, a
->vd
, a
->vm
);
1983 static bool trans_VABS_sp(DisasContext
*s
, arg_VABS_sp
*a
)
1985 return do_vfp_2op_sp(s
, gen_helper_vfp_abss
, a
->vd
, a
->vm
);
1988 static bool trans_VABS_dp(DisasContext
*s
, arg_VABS_dp
*a
)
1990 return do_vfp_2op_dp(s
, gen_helper_vfp_absd
, a
->vd
, a
->vm
);
1993 static bool trans_VNEG_sp(DisasContext
*s
, arg_VNEG_sp
*a
)
1995 return do_vfp_2op_sp(s
, gen_helper_vfp_negs
, a
->vd
, a
->vm
);
1998 static bool trans_VNEG_dp(DisasContext
*s
, arg_VNEG_dp
*a
)
2000 return do_vfp_2op_dp(s
, gen_helper_vfp_negd
, a
->vd
, a
->vm
);
2003 static void gen_VSQRT_sp(TCGv_i32 vd
, TCGv_i32 vm
)
2005 gen_helper_vfp_sqrts(vd
, vm
, cpu_env
);
2008 static bool trans_VSQRT_sp(DisasContext
*s
, arg_VSQRT_sp
*a
)
2010 return do_vfp_2op_sp(s
, gen_VSQRT_sp
, a
->vd
, a
->vm
);
2013 static void gen_VSQRT_dp(TCGv_i64 vd
, TCGv_i64 vm
)
2015 gen_helper_vfp_sqrtd(vd
, vm
, cpu_env
);
2018 static bool trans_VSQRT_dp(DisasContext
*s
, arg_VSQRT_dp
*a
)
2020 return do_vfp_2op_dp(s
, gen_VSQRT_dp
, a
->vd
, a
->vm
);
2023 static bool trans_VCMP_sp(DisasContext
*s
, arg_VCMP_sp
*a
)
2027 /* Vm/M bits must be zero for the Z variant */
2028 if (a
->z
&& a
->vm
!= 0) {
2032 if (!vfp_access_check(s
)) {
2036 vd
= tcg_temp_new_i32();
2037 vm
= tcg_temp_new_i32();
2039 neon_load_reg32(vd
, a
->vd
);
2041 tcg_gen_movi_i32(vm
, 0);
2043 neon_load_reg32(vm
, a
->vm
);
2047 gen_helper_vfp_cmpes(vd
, vm
, cpu_env
);
2049 gen_helper_vfp_cmps(vd
, vm
, cpu_env
);
2052 tcg_temp_free_i32(vd
);
2053 tcg_temp_free_i32(vm
);
2058 static bool trans_VCMP_dp(DisasContext
*s
, arg_VCMP_dp
*a
)
2062 /* Vm/M bits must be zero for the Z variant */
2063 if (a
->z
&& a
->vm
!= 0) {
2067 /* UNDEF accesses to D16-D31 if they don't exist. */
2068 if (!dc_isar_feature(aa32_fp_d32
, s
) && ((a
->vd
| a
->vm
) & 0x10)) {
2072 if (!dc_isar_feature(aa32_fpdp
, s
)) {
2076 if (!vfp_access_check(s
)) {
2080 vd
= tcg_temp_new_i64();
2081 vm
= tcg_temp_new_i64();
2083 neon_load_reg64(vd
, a
->vd
);
2085 tcg_gen_movi_i64(vm
, 0);
2087 neon_load_reg64(vm
, a
->vm
);
2091 gen_helper_vfp_cmped(vd
, vm
, cpu_env
);
2093 gen_helper_vfp_cmpd(vd
, vm
, cpu_env
);
2096 tcg_temp_free_i64(vd
);
2097 tcg_temp_free_i64(vm
);
2102 static bool trans_VCVT_f32_f16(DisasContext
*s
, arg_VCVT_f32_f16
*a
)
2108 if (!dc_isar_feature(aa32_fp16_spconv
, s
)) {
2112 if (!vfp_access_check(s
)) {
2116 fpst
= get_fpstatus_ptr(false);
2117 ahp_mode
= get_ahp_flag();
2118 tmp
= tcg_temp_new_i32();
2119 /* The T bit tells us if we want the low or high 16 bits of Vm */
2120 tcg_gen_ld16u_i32(tmp
, cpu_env
, vfp_f16_offset(a
->vm
, a
->t
));
2121 gen_helper_vfp_fcvt_f16_to_f32(tmp
, tmp
, fpst
, ahp_mode
);
2122 neon_store_reg32(tmp
, a
->vd
);
2123 tcg_temp_free_i32(ahp_mode
);
2124 tcg_temp_free_ptr(fpst
);
2125 tcg_temp_free_i32(tmp
);
2129 static bool trans_VCVT_f64_f16(DisasContext
*s
, arg_VCVT_f64_f16
*a
)
2136 if (!dc_isar_feature(aa32_fp16_dpconv
, s
)) {
2140 /* UNDEF accesses to D16-D31 if they don't exist. */
2141 if (!dc_isar_feature(aa32_fp_d32
, s
) && (a
->vd
& 0x10)) {
2145 if (!dc_isar_feature(aa32_fpdp
, s
)) {
2149 if (!vfp_access_check(s
)) {
2153 fpst
= get_fpstatus_ptr(false);
2154 ahp_mode
= get_ahp_flag();
2155 tmp
= tcg_temp_new_i32();
2156 /* The T bit tells us if we want the low or high 16 bits of Vm */
2157 tcg_gen_ld16u_i32(tmp
, cpu_env
, vfp_f16_offset(a
->vm
, a
->t
));
2158 vd
= tcg_temp_new_i64();
2159 gen_helper_vfp_fcvt_f16_to_f64(vd
, tmp
, fpst
, ahp_mode
);
2160 neon_store_reg64(vd
, a
->vd
);
2161 tcg_temp_free_i32(ahp_mode
);
2162 tcg_temp_free_ptr(fpst
);
2163 tcg_temp_free_i32(tmp
);
2164 tcg_temp_free_i64(vd
);
2168 static bool trans_VCVT_f16_f32(DisasContext
*s
, arg_VCVT_f16_f32
*a
)
2174 if (!dc_isar_feature(aa32_fp16_spconv
, s
)) {
2178 if (!vfp_access_check(s
)) {
2182 fpst
= get_fpstatus_ptr(false);
2183 ahp_mode
= get_ahp_flag();
2184 tmp
= tcg_temp_new_i32();
2186 neon_load_reg32(tmp
, a
->vm
);
2187 gen_helper_vfp_fcvt_f32_to_f16(tmp
, tmp
, fpst
, ahp_mode
);
2188 tcg_gen_st16_i32(tmp
, cpu_env
, vfp_f16_offset(a
->vd
, a
->t
));
2189 tcg_temp_free_i32(ahp_mode
);
2190 tcg_temp_free_ptr(fpst
);
2191 tcg_temp_free_i32(tmp
);
2195 static bool trans_VCVT_f16_f64(DisasContext
*s
, arg_VCVT_f16_f64
*a
)
2202 if (!dc_isar_feature(aa32_fp16_dpconv
, s
)) {
2206 /* UNDEF accesses to D16-D31 if they don't exist. */
2207 if (!dc_isar_feature(aa32_fp_d32
, s
) && (a
->vm
& 0x10)) {
2211 if (!dc_isar_feature(aa32_fpdp
, s
)) {
2215 if (!vfp_access_check(s
)) {
2219 fpst
= get_fpstatus_ptr(false);
2220 ahp_mode
= get_ahp_flag();
2221 tmp
= tcg_temp_new_i32();
2222 vm
= tcg_temp_new_i64();
2224 neon_load_reg64(vm
, a
->vm
);
2225 gen_helper_vfp_fcvt_f64_to_f16(tmp
, vm
, fpst
, ahp_mode
);
2226 tcg_temp_free_i64(vm
);
2227 tcg_gen_st16_i32(tmp
, cpu_env
, vfp_f16_offset(a
->vd
, a
->t
));
2228 tcg_temp_free_i32(ahp_mode
);
2229 tcg_temp_free_ptr(fpst
);
2230 tcg_temp_free_i32(tmp
);
2234 static bool trans_VRINTR_sp(DisasContext
*s
, arg_VRINTR_sp
*a
)
2239 if (!dc_isar_feature(aa32_vrint
, s
)) {
2243 if (!vfp_access_check(s
)) {
2247 tmp
= tcg_temp_new_i32();
2248 neon_load_reg32(tmp
, a
->vm
);
2249 fpst
= get_fpstatus_ptr(false);
2250 gen_helper_rints(tmp
, tmp
, fpst
);
2251 neon_store_reg32(tmp
, a
->vd
);
2252 tcg_temp_free_ptr(fpst
);
2253 tcg_temp_free_i32(tmp
);
2257 static bool trans_VRINTR_dp(DisasContext
*s
, arg_VRINTR_dp
*a
)
2262 if (!dc_isar_feature(aa32_vrint
, s
)) {
2266 /* UNDEF accesses to D16-D31 if they don't exist. */
2267 if (!dc_isar_feature(aa32_fp_d32
, s
) && ((a
->vd
| a
->vm
) & 0x10)) {
2271 if (!dc_isar_feature(aa32_fpdp
, s
)) {
2275 if (!vfp_access_check(s
)) {
2279 tmp
= tcg_temp_new_i64();
2280 neon_load_reg64(tmp
, a
->vm
);
2281 fpst
= get_fpstatus_ptr(false);
2282 gen_helper_rintd(tmp
, tmp
, fpst
);
2283 neon_store_reg64(tmp
, a
->vd
);
2284 tcg_temp_free_ptr(fpst
);
2285 tcg_temp_free_i64(tmp
);
2289 static bool trans_VRINTZ_sp(DisasContext
*s
, arg_VRINTZ_sp
*a
)
2295 if (!dc_isar_feature(aa32_vrint
, s
)) {
2299 if (!vfp_access_check(s
)) {
2303 tmp
= tcg_temp_new_i32();
2304 neon_load_reg32(tmp
, a
->vm
);
2305 fpst
= get_fpstatus_ptr(false);
2306 tcg_rmode
= tcg_const_i32(float_round_to_zero
);
2307 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
2308 gen_helper_rints(tmp
, tmp
, fpst
);
2309 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
2310 neon_store_reg32(tmp
, a
->vd
);
2311 tcg_temp_free_ptr(fpst
);
2312 tcg_temp_free_i32(tcg_rmode
);
2313 tcg_temp_free_i32(tmp
);
2317 static bool trans_VRINTZ_dp(DisasContext
*s
, arg_VRINTZ_dp
*a
)
2323 if (!dc_isar_feature(aa32_vrint
, s
)) {
2327 /* UNDEF accesses to D16-D31 if they don't exist. */
2328 if (!dc_isar_feature(aa32_fp_d32
, s
) && ((a
->vd
| a
->vm
) & 0x10)) {
2332 if (!dc_isar_feature(aa32_fpdp
, s
)) {
2336 if (!vfp_access_check(s
)) {
2340 tmp
= tcg_temp_new_i64();
2341 neon_load_reg64(tmp
, a
->vm
);
2342 fpst
= get_fpstatus_ptr(false);
2343 tcg_rmode
= tcg_const_i32(float_round_to_zero
);
2344 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
2345 gen_helper_rintd(tmp
, tmp
, fpst
);
2346 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
2347 neon_store_reg64(tmp
, a
->vd
);
2348 tcg_temp_free_ptr(fpst
);
2349 tcg_temp_free_i64(tmp
);
2350 tcg_temp_free_i32(tcg_rmode
);
2354 static bool trans_VRINTX_sp(DisasContext
*s
, arg_VRINTX_sp
*a
)
2359 if (!dc_isar_feature(aa32_vrint
, s
)) {
2363 if (!vfp_access_check(s
)) {
2367 tmp
= tcg_temp_new_i32();
2368 neon_load_reg32(tmp
, a
->vm
);
2369 fpst
= get_fpstatus_ptr(false);
2370 gen_helper_rints_exact(tmp
, tmp
, fpst
);
2371 neon_store_reg32(tmp
, a
->vd
);
2372 tcg_temp_free_ptr(fpst
);
2373 tcg_temp_free_i32(tmp
);
2377 static bool trans_VRINTX_dp(DisasContext
*s
, arg_VRINTX_dp
*a
)
2382 if (!dc_isar_feature(aa32_vrint
, s
)) {
2386 /* UNDEF accesses to D16-D31 if they don't exist. */
2387 if (!dc_isar_feature(aa32_fp_d32
, s
) && ((a
->vd
| a
->vm
) & 0x10)) {
2391 if (!dc_isar_feature(aa32_fpdp
, s
)) {
2395 if (!vfp_access_check(s
)) {
2399 tmp
= tcg_temp_new_i64();
2400 neon_load_reg64(tmp
, a
->vm
);
2401 fpst
= get_fpstatus_ptr(false);
2402 gen_helper_rintd_exact(tmp
, tmp
, fpst
);
2403 neon_store_reg64(tmp
, a
->vd
);
2404 tcg_temp_free_ptr(fpst
);
2405 tcg_temp_free_i64(tmp
);
2409 static bool trans_VCVT_sp(DisasContext
*s
, arg_VCVT_sp
*a
)
2414 /* UNDEF accesses to D16-D31 if they don't exist. */
2415 if (!dc_isar_feature(aa32_fp_d32
, s
) && (a
->vd
& 0x10)) {
2419 if (!dc_isar_feature(aa32_fpdp
, s
)) {
2423 if (!vfp_access_check(s
)) {
2427 vm
= tcg_temp_new_i32();
2428 vd
= tcg_temp_new_i64();
2429 neon_load_reg32(vm
, a
->vm
);
2430 gen_helper_vfp_fcvtds(vd
, vm
, cpu_env
);
2431 neon_store_reg64(vd
, a
->vd
);
2432 tcg_temp_free_i32(vm
);
2433 tcg_temp_free_i64(vd
);
2437 static bool trans_VCVT_dp(DisasContext
*s
, arg_VCVT_dp
*a
)
2442 /* UNDEF accesses to D16-D31 if they don't exist. */
2443 if (!dc_isar_feature(aa32_fp_d32
, s
) && (a
->vm
& 0x10)) {
2447 if (!dc_isar_feature(aa32_fpdp
, s
)) {
2451 if (!vfp_access_check(s
)) {
2455 vd
= tcg_temp_new_i32();
2456 vm
= tcg_temp_new_i64();
2457 neon_load_reg64(vm
, a
->vm
);
2458 gen_helper_vfp_fcvtsd(vd
, vm
, cpu_env
);
2459 neon_store_reg32(vd
, a
->vd
);
2460 tcg_temp_free_i32(vd
);
2461 tcg_temp_free_i64(vm
);
2465 static bool trans_VCVT_int_sp(DisasContext
*s
, arg_VCVT_int_sp
*a
)
2470 if (!vfp_access_check(s
)) {
2474 vm
= tcg_temp_new_i32();
2475 neon_load_reg32(vm
, a
->vm
);
2476 fpst
= get_fpstatus_ptr(false);
2479 gen_helper_vfp_sitos(vm
, vm
, fpst
);
2482 gen_helper_vfp_uitos(vm
, vm
, fpst
);
2484 neon_store_reg32(vm
, a
->vd
);
2485 tcg_temp_free_i32(vm
);
2486 tcg_temp_free_ptr(fpst
);
2490 static bool trans_VCVT_int_dp(DisasContext
*s
, arg_VCVT_int_dp
*a
)
2496 /* UNDEF accesses to D16-D31 if they don't exist. */
2497 if (!dc_isar_feature(aa32_fp_d32
, s
) && (a
->vd
& 0x10)) {
2501 if (!dc_isar_feature(aa32_fpdp
, s
)) {
2505 if (!vfp_access_check(s
)) {
2509 vm
= tcg_temp_new_i32();
2510 vd
= tcg_temp_new_i64();
2511 neon_load_reg32(vm
, a
->vm
);
2512 fpst
= get_fpstatus_ptr(false);
2515 gen_helper_vfp_sitod(vd
, vm
, fpst
);
2518 gen_helper_vfp_uitod(vd
, vm
, fpst
);
2520 neon_store_reg64(vd
, a
->vd
);
2521 tcg_temp_free_i32(vm
);
2522 tcg_temp_free_i64(vd
);
2523 tcg_temp_free_ptr(fpst
);
2527 static bool trans_VJCVT(DisasContext
*s
, arg_VJCVT
*a
)
2532 if (!dc_isar_feature(aa32_jscvt
, s
)) {
2536 /* UNDEF accesses to D16-D31 if they don't exist. */
2537 if (!dc_isar_feature(aa32_fp_d32
, s
) && (a
->vm
& 0x10)) {
2541 if (!dc_isar_feature(aa32_fpdp
, s
)) {
2545 if (!vfp_access_check(s
)) {
2549 vm
= tcg_temp_new_i64();
2550 vd
= tcg_temp_new_i32();
2551 neon_load_reg64(vm
, a
->vm
);
2552 gen_helper_vjcvt(vd
, vm
, cpu_env
);
2553 neon_store_reg32(vd
, a
->vd
);
2554 tcg_temp_free_i64(vm
);
2555 tcg_temp_free_i32(vd
);
2559 static bool trans_VCVT_fix_sp(DisasContext
*s
, arg_VCVT_fix_sp
*a
)
2565 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
2569 if (!vfp_access_check(s
)) {
2573 frac_bits
= (a
->opc
& 1) ? (32 - a
->imm
) : (16 - a
->imm
);
2575 vd
= tcg_temp_new_i32();
2576 neon_load_reg32(vd
, a
->vd
);
2578 fpst
= get_fpstatus_ptr(false);
2579 shift
= tcg_const_i32(frac_bits
);
2581 /* Switch on op:U:sx bits */
2584 gen_helper_vfp_shtos(vd
, vd
, shift
, fpst
);
2587 gen_helper_vfp_sltos(vd
, vd
, shift
, fpst
);
2590 gen_helper_vfp_uhtos(vd
, vd
, shift
, fpst
);
2593 gen_helper_vfp_ultos(vd
, vd
, shift
, fpst
);
2596 gen_helper_vfp_toshs_round_to_zero(vd
, vd
, shift
, fpst
);
2599 gen_helper_vfp_tosls_round_to_zero(vd
, vd
, shift
, fpst
);
2602 gen_helper_vfp_touhs_round_to_zero(vd
, vd
, shift
, fpst
);
2605 gen_helper_vfp_touls_round_to_zero(vd
, vd
, shift
, fpst
);
2608 g_assert_not_reached();
2611 neon_store_reg32(vd
, a
->vd
);
2612 tcg_temp_free_i32(vd
);
2613 tcg_temp_free_i32(shift
);
2614 tcg_temp_free_ptr(fpst
);
2618 static bool trans_VCVT_fix_dp(DisasContext
*s
, arg_VCVT_fix_dp
*a
)
2625 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
2629 /* UNDEF accesses to D16-D31 if they don't exist. */
2630 if (!dc_isar_feature(aa32_fp_d32
, s
) && (a
->vd
& 0x10)) {
2634 if (!dc_isar_feature(aa32_fpdp
, s
)) {
2638 if (!vfp_access_check(s
)) {
2642 frac_bits
= (a
->opc
& 1) ? (32 - a
->imm
) : (16 - a
->imm
);
2644 vd
= tcg_temp_new_i64();
2645 neon_load_reg64(vd
, a
->vd
);
2647 fpst
= get_fpstatus_ptr(false);
2648 shift
= tcg_const_i32(frac_bits
);
2650 /* Switch on op:U:sx bits */
2653 gen_helper_vfp_shtod(vd
, vd
, shift
, fpst
);
2656 gen_helper_vfp_sltod(vd
, vd
, shift
, fpst
);
2659 gen_helper_vfp_uhtod(vd
, vd
, shift
, fpst
);
2662 gen_helper_vfp_ultod(vd
, vd
, shift
, fpst
);
2665 gen_helper_vfp_toshd_round_to_zero(vd
, vd
, shift
, fpst
);
2668 gen_helper_vfp_tosld_round_to_zero(vd
, vd
, shift
, fpst
);
2671 gen_helper_vfp_touhd_round_to_zero(vd
, vd
, shift
, fpst
);
2674 gen_helper_vfp_tould_round_to_zero(vd
, vd
, shift
, fpst
);
2677 g_assert_not_reached();
2680 neon_store_reg64(vd
, a
->vd
);
2681 tcg_temp_free_i64(vd
);
2682 tcg_temp_free_i32(shift
);
2683 tcg_temp_free_ptr(fpst
);
2687 static bool trans_VCVT_sp_int(DisasContext
*s
, arg_VCVT_sp_int
*a
)
2692 if (!vfp_access_check(s
)) {
2696 fpst
= get_fpstatus_ptr(false);
2697 vm
= tcg_temp_new_i32();
2698 neon_load_reg32(vm
, a
->vm
);
2702 gen_helper_vfp_tosizs(vm
, vm
, fpst
);
2704 gen_helper_vfp_tosis(vm
, vm
, fpst
);
2708 gen_helper_vfp_touizs(vm
, vm
, fpst
);
2710 gen_helper_vfp_touis(vm
, vm
, fpst
);
2713 neon_store_reg32(vm
, a
->vd
);
2714 tcg_temp_free_i32(vm
);
2715 tcg_temp_free_ptr(fpst
);
2719 static bool trans_VCVT_dp_int(DisasContext
*s
, arg_VCVT_dp_int
*a
)
2725 /* UNDEF accesses to D16-D31 if they don't exist. */
2726 if (!dc_isar_feature(aa32_fp_d32
, s
) && (a
->vm
& 0x10)) {
2730 if (!dc_isar_feature(aa32_fpdp
, s
)) {
2734 if (!vfp_access_check(s
)) {
2738 fpst
= get_fpstatus_ptr(false);
2739 vm
= tcg_temp_new_i64();
2740 vd
= tcg_temp_new_i32();
2741 neon_load_reg64(vm
, a
->vm
);
2745 gen_helper_vfp_tosizd(vd
, vm
, fpst
);
2747 gen_helper_vfp_tosid(vd
, vm
, fpst
);
2751 gen_helper_vfp_touizd(vd
, vm
, fpst
);
2753 gen_helper_vfp_touid(vd
, vm
, fpst
);
2756 neon_store_reg32(vd
, a
->vd
);
2757 tcg_temp_free_i32(vd
);
2758 tcg_temp_free_i64(vm
);
2759 tcg_temp_free_ptr(fpst
);