2 * ARM translation: AArch32 VFP instructions
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
7 * Copyright (c) 2019 Linaro, Ltd.
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
24 * This file is intended to be included from translate.c; it uses
25 * some macros and definitions provided by that file.
26 * It might be possible to convert it to a standalone .c file eventually.
29 /* Include the generated VFP decoder */
30 #include "decode-vfp.inc.c"
31 #include "decode-vfp-uncond.inc.c"
34 * The imm8 encodes the sign bit, enough bits to represent an exponent in
35 * the range 01....1xx to 10....0xx, and the most significant 4 bits of
36 * the mantissa; see VFPExpandImm() in the v8 ARM ARM.
38 uint64_t vfp_expand_imm(int size
, uint8_t imm8
)
44 imm
= (extract32(imm8
, 7, 1) ? 0x8000 : 0) |
45 (extract32(imm8
, 6, 1) ? 0x3fc0 : 0x4000) |
46 extract32(imm8
, 0, 6);
50 imm
= (extract32(imm8
, 7, 1) ? 0x8000 : 0) |
51 (extract32(imm8
, 6, 1) ? 0x3e00 : 0x4000) |
52 (extract32(imm8
, 0, 6) << 3);
56 imm
= (extract32(imm8
, 7, 1) ? 0x8000 : 0) |
57 (extract32(imm8
, 6, 1) ? 0x3000 : 0x4000) |
58 (extract32(imm8
, 0, 6) << 6);
61 g_assert_not_reached();
67 * Return the offset of a 16-bit half of the specified VFP single-precision
68 * register. If top is true, returns the top 16 bits; otherwise the bottom
71 static inline long vfp_f16_offset(unsigned reg
, bool top
)
73 long offs
= vfp_reg_offset(false, reg
);
74 #ifdef HOST_WORDS_BIGENDIAN
87 * Check that VFP access is enabled. If it is, do the necessary
88 * M-profile lazy-FP handling and then return true.
89 * If not, emit code to generate an appropriate exception and
91 * The ignore_vfp_enabled argument specifies that we should ignore
92 * whether VFP is enabled via FPEXC[EN]: this should be true for FMXR/FMRX
93 * accesses to FPSID, FPEXC, MVFR0, MVFR1, MVFR2, and false for all other insns.
95 static bool full_vfp_access_check(DisasContext
*s
, bool ignore_vfp_enabled
)
98 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
99 gen_exception_insn(s
, 4, EXCP_NOCP
, syn_uncategorized(),
102 gen_exception_insn(s
, 4, EXCP_UDEF
,
103 syn_fp_access_trap(1, 0xe, false),
109 if (!s
->vfp_enabled
&& !ignore_vfp_enabled
) {
110 assert(!arm_dc_feature(s
, ARM_FEATURE_M
));
111 gen_exception_insn(s
, 4, EXCP_UDEF
, syn_uncategorized(),
112 default_exception_el(s
));
116 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
117 /* Handle M-profile lazy FP state mechanics */
119 /* Trigger lazy-state preservation if necessary */
122 * Lazy state saving affects external memory and also the NVIC,
123 * so we must mark it as an IO operation for icount.
125 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
128 gen_helper_v7m_preserve_fp_state(cpu_env
);
129 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
133 * If the preserve_fp_state helper doesn't throw an exception
134 * then it will clear LSPACT; we don't need to repeat this for
135 * any further FP insns in this TB.
137 s
->v7m_lspact
= false;
140 /* Update ownership of FP context: set FPCCR.S to match current state */
141 if (s
->v8m_fpccr_s_wrong
) {
144 tmp
= load_cpu_field(v7m
.fpccr
[M_REG_S
]);
146 tcg_gen_ori_i32(tmp
, tmp
, R_V7M_FPCCR_S_MASK
);
148 tcg_gen_andi_i32(tmp
, tmp
, ~R_V7M_FPCCR_S_MASK
);
150 store_cpu_field(tmp
, v7m
.fpccr
[M_REG_S
]);
151 /* Don't need to do this for any further FP insns in this TB */
152 s
->v8m_fpccr_s_wrong
= false;
155 if (s
->v7m_new_fp_ctxt_needed
) {
157 * Create new FP context by updating CONTROL.FPCA, CONTROL.SFPA
160 TCGv_i32 control
, fpscr
;
161 uint32_t bits
= R_V7M_CONTROL_FPCA_MASK
;
163 fpscr
= load_cpu_field(v7m
.fpdscr
[s
->v8m_secure
]);
164 gen_helper_vfp_set_fpscr(cpu_env
, fpscr
);
165 tcg_temp_free_i32(fpscr
);
167 * We don't need to arrange to end the TB, because the only
168 * parts of FPSCR which we cache in the TB flags are the VECLEN
169 * and VECSTRIDE, and those don't exist for M-profile.
173 bits
|= R_V7M_CONTROL_SFPA_MASK
;
175 control
= load_cpu_field(v7m
.control
[M_REG_S
]);
176 tcg_gen_ori_i32(control
, control
, bits
);
177 store_cpu_field(control
, v7m
.control
[M_REG_S
]);
178 /* Don't need to do this for any further FP insns in this TB */
179 s
->v7m_new_fp_ctxt_needed
= false;
187 * The most usual kind of VFP access check, for everything except
188 * FMXR/FMRX to the always-available special registers.
190 static bool vfp_access_check(DisasContext
*s
)
192 return full_vfp_access_check(s
, false);
195 static bool trans_VSEL(DisasContext
*s
, arg_VSEL
*a
)
200 if (!dc_isar_feature(aa32_vsel
, s
)) {
204 /* UNDEF accesses to D16-D31 if they don't exist */
205 if (dp
&& !dc_isar_feature(aa32_fp_d32
, s
) &&
206 ((a
->vm
| a
->vn
| a
->vd
) & 0x10)) {
210 if (dp
&& !dc_isar_feature(aa32_fpdp
, s
)) {
218 if (!vfp_access_check(s
)) {
223 TCGv_i64 frn
, frm
, dest
;
224 TCGv_i64 tmp
, zero
, zf
, nf
, vf
;
226 zero
= tcg_const_i64(0);
228 frn
= tcg_temp_new_i64();
229 frm
= tcg_temp_new_i64();
230 dest
= tcg_temp_new_i64();
232 zf
= tcg_temp_new_i64();
233 nf
= tcg_temp_new_i64();
234 vf
= tcg_temp_new_i64();
236 tcg_gen_extu_i32_i64(zf
, cpu_ZF
);
237 tcg_gen_ext_i32_i64(nf
, cpu_NF
);
238 tcg_gen_ext_i32_i64(vf
, cpu_VF
);
240 neon_load_reg64(frn
, rn
);
241 neon_load_reg64(frm
, rm
);
244 tcg_gen_movcond_i64(TCG_COND_EQ
, dest
, zf
, zero
,
248 tcg_gen_movcond_i64(TCG_COND_LT
, dest
, vf
, zero
,
251 case 2: /* ge: N == V -> N ^ V == 0 */
252 tmp
= tcg_temp_new_i64();
253 tcg_gen_xor_i64(tmp
, vf
, nf
);
254 tcg_gen_movcond_i64(TCG_COND_GE
, dest
, tmp
, zero
,
256 tcg_temp_free_i64(tmp
);
258 case 3: /* gt: !Z && N == V */
259 tcg_gen_movcond_i64(TCG_COND_NE
, dest
, zf
, zero
,
261 tmp
= tcg_temp_new_i64();
262 tcg_gen_xor_i64(tmp
, vf
, nf
);
263 tcg_gen_movcond_i64(TCG_COND_GE
, dest
, tmp
, zero
,
265 tcg_temp_free_i64(tmp
);
268 neon_store_reg64(dest
, rd
);
269 tcg_temp_free_i64(frn
);
270 tcg_temp_free_i64(frm
);
271 tcg_temp_free_i64(dest
);
273 tcg_temp_free_i64(zf
);
274 tcg_temp_free_i64(nf
);
275 tcg_temp_free_i64(vf
);
277 tcg_temp_free_i64(zero
);
279 TCGv_i32 frn
, frm
, dest
;
282 zero
= tcg_const_i32(0);
284 frn
= tcg_temp_new_i32();
285 frm
= tcg_temp_new_i32();
286 dest
= tcg_temp_new_i32();
287 neon_load_reg32(frn
, rn
);
288 neon_load_reg32(frm
, rm
);
291 tcg_gen_movcond_i32(TCG_COND_EQ
, dest
, cpu_ZF
, zero
,
295 tcg_gen_movcond_i32(TCG_COND_LT
, dest
, cpu_VF
, zero
,
298 case 2: /* ge: N == V -> N ^ V == 0 */
299 tmp
= tcg_temp_new_i32();
300 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
301 tcg_gen_movcond_i32(TCG_COND_GE
, dest
, tmp
, zero
,
303 tcg_temp_free_i32(tmp
);
305 case 3: /* gt: !Z && N == V */
306 tcg_gen_movcond_i32(TCG_COND_NE
, dest
, cpu_ZF
, zero
,
308 tmp
= tcg_temp_new_i32();
309 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
310 tcg_gen_movcond_i32(TCG_COND_GE
, dest
, tmp
, zero
,
312 tcg_temp_free_i32(tmp
);
315 neon_store_reg32(dest
, rd
);
316 tcg_temp_free_i32(frn
);
317 tcg_temp_free_i32(frm
);
318 tcg_temp_free_i32(dest
);
320 tcg_temp_free_i32(zero
);
326 static bool trans_VMINMAXNM(DisasContext
*s
, arg_VMINMAXNM
*a
)
333 if (!dc_isar_feature(aa32_vminmaxnm
, s
)) {
337 /* UNDEF accesses to D16-D31 if they don't exist */
338 if (dp
&& !dc_isar_feature(aa32_fp_d32
, s
) &&
339 ((a
->vm
| a
->vn
| a
->vd
) & 0x10)) {
343 if (dp
&& !dc_isar_feature(aa32_fpdp
, s
)) {
351 if (!vfp_access_check(s
)) {
355 fpst
= get_fpstatus_ptr(0);
358 TCGv_i64 frn
, frm
, dest
;
360 frn
= tcg_temp_new_i64();
361 frm
= tcg_temp_new_i64();
362 dest
= tcg_temp_new_i64();
364 neon_load_reg64(frn
, rn
);
365 neon_load_reg64(frm
, rm
);
367 gen_helper_vfp_minnumd(dest
, frn
, frm
, fpst
);
369 gen_helper_vfp_maxnumd(dest
, frn
, frm
, fpst
);
371 neon_store_reg64(dest
, rd
);
372 tcg_temp_free_i64(frn
);
373 tcg_temp_free_i64(frm
);
374 tcg_temp_free_i64(dest
);
376 TCGv_i32 frn
, frm
, dest
;
378 frn
= tcg_temp_new_i32();
379 frm
= tcg_temp_new_i32();
380 dest
= tcg_temp_new_i32();
382 neon_load_reg32(frn
, rn
);
383 neon_load_reg32(frm
, rm
);
385 gen_helper_vfp_minnums(dest
, frn
, frm
, fpst
);
387 gen_helper_vfp_maxnums(dest
, frn
, frm
, fpst
);
389 neon_store_reg32(dest
, rd
);
390 tcg_temp_free_i32(frn
);
391 tcg_temp_free_i32(frm
);
392 tcg_temp_free_i32(dest
);
395 tcg_temp_free_ptr(fpst
);
400 * Table for converting the most common AArch32 encoding of
401 * rounding mode to arm_fprounding order (which matches the
402 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
404 static const uint8_t fp_decode_rm
[] = {
411 static bool trans_VRINT(DisasContext
*s
, arg_VRINT
*a
)
417 int rounding
= fp_decode_rm
[a
->rm
];
419 if (!dc_isar_feature(aa32_vrint
, s
)) {
423 /* UNDEF accesses to D16-D31 if they don't exist */
424 if (dp
&& !dc_isar_feature(aa32_fp_d32
, s
) &&
425 ((a
->vm
| a
->vd
) & 0x10)) {
429 if (dp
&& !dc_isar_feature(aa32_fpdp
, s
)) {
436 if (!vfp_access_check(s
)) {
440 fpst
= get_fpstatus_ptr(0);
442 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rounding
));
443 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
448 tcg_op
= tcg_temp_new_i64();
449 tcg_res
= tcg_temp_new_i64();
450 neon_load_reg64(tcg_op
, rm
);
451 gen_helper_rintd(tcg_res
, tcg_op
, fpst
);
452 neon_store_reg64(tcg_res
, rd
);
453 tcg_temp_free_i64(tcg_op
);
454 tcg_temp_free_i64(tcg_res
);
458 tcg_op
= tcg_temp_new_i32();
459 tcg_res
= tcg_temp_new_i32();
460 neon_load_reg32(tcg_op
, rm
);
461 gen_helper_rints(tcg_res
, tcg_op
, fpst
);
462 neon_store_reg32(tcg_res
, rd
);
463 tcg_temp_free_i32(tcg_op
);
464 tcg_temp_free_i32(tcg_res
);
467 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
468 tcg_temp_free_i32(tcg_rmode
);
470 tcg_temp_free_ptr(fpst
);
474 static bool trans_VCVT(DisasContext
*s
, arg_VCVT
*a
)
479 TCGv_i32 tcg_rmode
, tcg_shift
;
480 int rounding
= fp_decode_rm
[a
->rm
];
481 bool is_signed
= a
->op
;
483 if (!dc_isar_feature(aa32_vcvt_dr
, s
)) {
487 /* UNDEF accesses to D16-D31 if they don't exist */
488 if (dp
&& !dc_isar_feature(aa32_fp_d32
, s
) && (a
->vm
& 0x10)) {
492 if (dp
&& !dc_isar_feature(aa32_fpdp
, s
)) {
499 if (!vfp_access_check(s
)) {
503 fpst
= get_fpstatus_ptr(0);
505 tcg_shift
= tcg_const_i32(0);
507 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rounding
));
508 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
511 TCGv_i64 tcg_double
, tcg_res
;
513 tcg_double
= tcg_temp_new_i64();
514 tcg_res
= tcg_temp_new_i64();
515 tcg_tmp
= tcg_temp_new_i32();
516 neon_load_reg64(tcg_double
, rm
);
518 gen_helper_vfp_tosld(tcg_res
, tcg_double
, tcg_shift
, fpst
);
520 gen_helper_vfp_tould(tcg_res
, tcg_double
, tcg_shift
, fpst
);
522 tcg_gen_extrl_i64_i32(tcg_tmp
, tcg_res
);
523 neon_store_reg32(tcg_tmp
, rd
);
524 tcg_temp_free_i32(tcg_tmp
);
525 tcg_temp_free_i64(tcg_res
);
526 tcg_temp_free_i64(tcg_double
);
528 TCGv_i32 tcg_single
, tcg_res
;
529 tcg_single
= tcg_temp_new_i32();
530 tcg_res
= tcg_temp_new_i32();
531 neon_load_reg32(tcg_single
, rm
);
533 gen_helper_vfp_tosls(tcg_res
, tcg_single
, tcg_shift
, fpst
);
535 gen_helper_vfp_touls(tcg_res
, tcg_single
, tcg_shift
, fpst
);
537 neon_store_reg32(tcg_res
, rd
);
538 tcg_temp_free_i32(tcg_res
);
539 tcg_temp_free_i32(tcg_single
);
542 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
543 tcg_temp_free_i32(tcg_rmode
);
545 tcg_temp_free_i32(tcg_shift
);
547 tcg_temp_free_ptr(fpst
);
552 static bool trans_VMOV_to_gp(DisasContext
*s
, arg_VMOV_to_gp
*a
)
554 /* VMOV scalar to general purpose register */
559 /* UNDEF accesses to D16-D31 if they don't exist */
560 if (!dc_isar_feature(aa32_fp_d32
, s
) && (a
->vn
& 0x10)) {
564 offset
= a
->index
<< a
->size
;
565 pass
= extract32(offset
, 2, 1);
566 offset
= extract32(offset
, 0, 2) * 8;
568 if (a
->size
!= 2 && !arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
572 if (!vfp_access_check(s
)) {
576 tmp
= neon_load_reg(a
->vn
, pass
);
580 tcg_gen_shri_i32(tmp
, tmp
, offset
);
591 tcg_gen_shri_i32(tmp
, tmp
, 16);
597 tcg_gen_sari_i32(tmp
, tmp
, 16);
606 store_reg(s
, a
->rt
, tmp
);
611 static bool trans_VMOV_from_gp(DisasContext
*s
, arg_VMOV_from_gp
*a
)
613 /* VMOV general purpose register to scalar */
618 /* UNDEF accesses to D16-D31 if they don't exist */
619 if (!dc_isar_feature(aa32_fp_d32
, s
) && (a
->vn
& 0x10)) {
623 offset
= a
->index
<< a
->size
;
624 pass
= extract32(offset
, 2, 1);
625 offset
= extract32(offset
, 0, 2) * 8;
627 if (a
->size
!= 2 && !arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
631 if (!vfp_access_check(s
)) {
635 tmp
= load_reg(s
, a
->rt
);
638 tmp2
= neon_load_reg(a
->vn
, pass
);
639 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
, offset
, 8);
640 tcg_temp_free_i32(tmp2
);
643 tmp2
= neon_load_reg(a
->vn
, pass
);
644 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
, offset
, 16);
645 tcg_temp_free_i32(tmp2
);
650 neon_store_reg(a
->vn
, pass
, tmp
);
655 static bool trans_VDUP(DisasContext
*s
, arg_VDUP
*a
)
657 /* VDUP (general purpose register) */
661 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
665 /* UNDEF accesses to D16-D31 if they don't exist */
666 if (!dc_isar_feature(aa32_fp_d32
, s
) && (a
->vn
& 0x10)) {
674 if (a
->q
&& (a
->vn
& 1)) {
678 vec_size
= a
->q
? 16 : 8;
687 if (!vfp_access_check(s
)) {
691 tmp
= load_reg(s
, a
->rt
);
692 tcg_gen_gvec_dup_i32(size
, neon_reg_offset(a
->vn
, 0),
693 vec_size
, vec_size
, tmp
);
694 tcg_temp_free_i32(tmp
);
699 static bool trans_VMSR_VMRS(DisasContext
*s
, arg_VMSR_VMRS
*a
)
702 bool ignore_vfp_enabled
= false;
704 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
706 * The only M-profile VFP vmrs/vmsr sysreg is FPSCR.
707 * Writes to R15 are UNPREDICTABLE; we choose to undef.
709 if (a
->rt
== 15 || a
->reg
!= ARM_VFP_FPSCR
) {
717 * VFPv2 allows access to FPSID from userspace; VFPv3 restricts
718 * all ID registers to privileged access only.
720 if (IS_USER(s
) && arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
723 ignore_vfp_enabled
= true;
727 if (IS_USER(s
) || !arm_dc_feature(s
, ARM_FEATURE_MVFR
)) {
730 ignore_vfp_enabled
= true;
733 if (IS_USER(s
) || !arm_dc_feature(s
, ARM_FEATURE_V8
)) {
736 ignore_vfp_enabled
= true;
744 ignore_vfp_enabled
= true;
747 case ARM_VFP_FPINST2
:
748 /* Not present in VFPv3 */
749 if (IS_USER(s
) || arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
757 if (!full_vfp_access_check(s
, ignore_vfp_enabled
)) {
762 /* VMRS, move VFP special register to gp register */
767 case ARM_VFP_FPINST2
:
771 tmp
= load_cpu_field(vfp
.xregs
[a
->reg
]);
775 tmp
= load_cpu_field(vfp
.xregs
[ARM_VFP_FPSCR
]);
776 tcg_gen_andi_i32(tmp
, tmp
, 0xf0000000);
778 tmp
= tcg_temp_new_i32();
779 gen_helper_vfp_get_fpscr(tmp
, cpu_env
);
783 g_assert_not_reached();
787 /* Set the 4 flag bits in the CPSR. */
789 tcg_temp_free_i32(tmp
);
791 store_reg(s
, a
->rt
, tmp
);
794 /* VMSR, move gp register to VFP special register */
800 /* Writes are ignored. */
803 tmp
= load_reg(s
, a
->rt
);
804 gen_helper_vfp_set_fpscr(cpu_env
, tmp
);
805 tcg_temp_free_i32(tmp
);
810 * TODO: VFP subarchitecture support.
811 * For now, keep the EN bit only
813 tmp
= load_reg(s
, a
->rt
);
814 tcg_gen_andi_i32(tmp
, tmp
, 1 << 30);
815 store_cpu_field(tmp
, vfp
.xregs
[a
->reg
]);
819 case ARM_VFP_FPINST2
:
820 tmp
= load_reg(s
, a
->rt
);
821 store_cpu_field(tmp
, vfp
.xregs
[a
->reg
]);
824 g_assert_not_reached();
831 static bool trans_VMOV_single(DisasContext
*s
, arg_VMOV_single
*a
)
835 if (!vfp_access_check(s
)) {
840 /* VFP to general purpose register */
841 tmp
= tcg_temp_new_i32();
842 neon_load_reg32(tmp
, a
->vn
);
844 /* Set the 4 flag bits in the CPSR. */
846 tcg_temp_free_i32(tmp
);
848 store_reg(s
, a
->rt
, tmp
);
851 /* general purpose register to VFP */
852 tmp
= load_reg(s
, a
->rt
);
853 neon_store_reg32(tmp
, a
->vn
);
854 tcg_temp_free_i32(tmp
);
860 static bool trans_VMOV_64_sp(DisasContext
*s
, arg_VMOV_64_sp
*a
)
865 * VMOV between two general-purpose registers and two single precision
866 * floating point registers
868 if (!vfp_access_check(s
)) {
874 tmp
= tcg_temp_new_i32();
875 neon_load_reg32(tmp
, a
->vm
);
876 store_reg(s
, a
->rt
, tmp
);
877 tmp
= tcg_temp_new_i32();
878 neon_load_reg32(tmp
, a
->vm
+ 1);
879 store_reg(s
, a
->rt2
, tmp
);
882 tmp
= load_reg(s
, a
->rt
);
883 neon_store_reg32(tmp
, a
->vm
);
884 tmp
= load_reg(s
, a
->rt2
);
885 neon_store_reg32(tmp
, a
->vm
+ 1);
891 static bool trans_VMOV_64_dp(DisasContext
*s
, arg_VMOV_64_dp
*a
)
896 * VMOV between two general-purpose registers and one double precision
897 * floating point register
900 /* UNDEF accesses to D16-D31 if they don't exist */
901 if (!dc_isar_feature(aa32_fp_d32
, s
) && (a
->vm
& 0x10)) {
905 if (!vfp_access_check(s
)) {
911 tmp
= tcg_temp_new_i32();
912 neon_load_reg32(tmp
, a
->vm
* 2);
913 store_reg(s
, a
->rt
, tmp
);
914 tmp
= tcg_temp_new_i32();
915 neon_load_reg32(tmp
, a
->vm
* 2 + 1);
916 store_reg(s
, a
->rt2
, tmp
);
919 tmp
= load_reg(s
, a
->rt
);
920 neon_store_reg32(tmp
, a
->vm
* 2);
921 tcg_temp_free_i32(tmp
);
922 tmp
= load_reg(s
, a
->rt2
);
923 neon_store_reg32(tmp
, a
->vm
* 2 + 1);
924 tcg_temp_free_i32(tmp
);
930 static bool trans_VLDR_VSTR_sp(DisasContext
*s
, arg_VLDR_VSTR_sp
*a
)
935 if (!vfp_access_check(s
)) {
939 offset
= a
->imm
<< 2;
944 if (s
->thumb
&& a
->rn
== 15) {
945 /* This is actually UNPREDICTABLE */
946 addr
= tcg_temp_new_i32();
947 tcg_gen_movi_i32(addr
, s
->pc
& ~2);
949 addr
= load_reg(s
, a
->rn
);
951 tcg_gen_addi_i32(addr
, addr
, offset
);
952 tmp
= tcg_temp_new_i32();
954 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
955 neon_store_reg32(tmp
, a
->vd
);
957 neon_load_reg32(tmp
, a
->vd
);
958 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
960 tcg_temp_free_i32(tmp
);
961 tcg_temp_free_i32(addr
);
966 static bool trans_VLDR_VSTR_dp(DisasContext
*s
, arg_VLDR_VSTR_dp
*a
)
972 /* UNDEF accesses to D16-D31 if they don't exist */
973 if (!dc_isar_feature(aa32_fp_d32
, s
) && (a
->vd
& 0x10)) {
977 if (!vfp_access_check(s
)) {
981 offset
= a
->imm
<< 2;
986 if (s
->thumb
&& a
->rn
== 15) {
987 /* This is actually UNPREDICTABLE */
988 addr
= tcg_temp_new_i32();
989 tcg_gen_movi_i32(addr
, s
->pc
& ~2);
991 addr
= load_reg(s
, a
->rn
);
993 tcg_gen_addi_i32(addr
, addr
, offset
);
994 tmp
= tcg_temp_new_i64();
996 gen_aa32_ld64(s
, tmp
, addr
, get_mem_index(s
));
997 neon_store_reg64(tmp
, a
->vd
);
999 neon_load_reg64(tmp
, a
->vd
);
1000 gen_aa32_st64(s
, tmp
, addr
, get_mem_index(s
));
1002 tcg_temp_free_i64(tmp
);
1003 tcg_temp_free_i32(addr
);
1008 static bool trans_VLDM_VSTM_sp(DisasContext
*s
, arg_VLDM_VSTM_sp
*a
)
1016 if (n
== 0 || (a
->vd
+ n
) > 32) {
1018 * UNPREDICTABLE cases for bad immediates: we choose to
1019 * UNDEF to avoid generating huge numbers of TCG ops
1023 if (a
->rn
== 15 && a
->w
) {
1024 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
1028 if (!vfp_access_check(s
)) {
1032 if (s
->thumb
&& a
->rn
== 15) {
1033 /* This is actually UNPREDICTABLE */
1034 addr
= tcg_temp_new_i32();
1035 tcg_gen_movi_i32(addr
, s
->pc
& ~2);
1037 addr
= load_reg(s
, a
->rn
);
1041 tcg_gen_addi_i32(addr
, addr
, -(a
->imm
<< 2));
1044 if (s
->v8m_stackcheck
&& a
->rn
== 13 && a
->w
) {
1046 * Here 'addr' is the lowest address we will store to,
1047 * and is either the old SP (if post-increment) or
1048 * the new SP (if pre-decrement). For post-increment
1049 * where the old value is below the limit and the new
1050 * value is above, it is UNKNOWN whether the limit check
1051 * triggers; we choose to trigger.
1053 gen_helper_v8m_stackcheck(cpu_env
, addr
);
1057 tmp
= tcg_temp_new_i32();
1058 for (i
= 0; i
< n
; i
++) {
1061 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
1062 neon_store_reg32(tmp
, a
->vd
+ i
);
1065 neon_load_reg32(tmp
, a
->vd
+ i
);
1066 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
1068 tcg_gen_addi_i32(addr
, addr
, offset
);
1070 tcg_temp_free_i32(tmp
);
1074 offset
= -offset
* n
;
1075 tcg_gen_addi_i32(addr
, addr
, offset
);
1077 store_reg(s
, a
->rn
, addr
);
1079 tcg_temp_free_i32(addr
);
1085 static bool trans_VLDM_VSTM_dp(DisasContext
*s
, arg_VLDM_VSTM_dp
*a
)
1094 if (n
== 0 || (a
->vd
+ n
) > 32 || n
> 16) {
1096 * UNPREDICTABLE cases for bad immediates: we choose to
1097 * UNDEF to avoid generating huge numbers of TCG ops
1101 if (a
->rn
== 15 && a
->w
) {
1102 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
1106 /* UNDEF accesses to D16-D31 if they don't exist */
1107 if (!dc_isar_feature(aa32_fp_d32
, s
) && (a
->vd
+ n
) > 16) {
1111 if (!vfp_access_check(s
)) {
1115 if (s
->thumb
&& a
->rn
== 15) {
1116 /* This is actually UNPREDICTABLE */
1117 addr
= tcg_temp_new_i32();
1118 tcg_gen_movi_i32(addr
, s
->pc
& ~2);
1120 addr
= load_reg(s
, a
->rn
);
1124 tcg_gen_addi_i32(addr
, addr
, -(a
->imm
<< 2));
1127 if (s
->v8m_stackcheck
&& a
->rn
== 13 && a
->w
) {
1129 * Here 'addr' is the lowest address we will store to,
1130 * and is either the old SP (if post-increment) or
1131 * the new SP (if pre-decrement). For post-increment
1132 * where the old value is below the limit and the new
1133 * value is above, it is UNKNOWN whether the limit check
1134 * triggers; we choose to trigger.
1136 gen_helper_v8m_stackcheck(cpu_env
, addr
);
1140 tmp
= tcg_temp_new_i64();
1141 for (i
= 0; i
< n
; i
++) {
1144 gen_aa32_ld64(s
, tmp
, addr
, get_mem_index(s
));
1145 neon_store_reg64(tmp
, a
->vd
+ i
);
1148 neon_load_reg64(tmp
, a
->vd
+ i
);
1149 gen_aa32_st64(s
, tmp
, addr
, get_mem_index(s
));
1151 tcg_gen_addi_i32(addr
, addr
, offset
);
1153 tcg_temp_free_i64(tmp
);
1157 offset
= -offset
* n
;
1158 } else if (a
->imm
& 1) {
1165 tcg_gen_addi_i32(addr
, addr
, offset
);
1167 store_reg(s
, a
->rn
, addr
);
1169 tcg_temp_free_i32(addr
);
1176 * Types for callbacks for do_vfp_3op_sp() and do_vfp_3op_dp().
1177 * The callback should emit code to write a value to vd. If
1178 * do_vfp_3op_{sp,dp}() was passed reads_vd then the TCGv vd
1179 * will contain the old value of the relevant VFP register;
1180 * otherwise it must be written to only.
1182 typedef void VFPGen3OpSPFn(TCGv_i32 vd
,
1183 TCGv_i32 vn
, TCGv_i32 vm
, TCGv_ptr fpst
);
1184 typedef void VFPGen3OpDPFn(TCGv_i64 vd
,
1185 TCGv_i64 vn
, TCGv_i64 vm
, TCGv_ptr fpst
);
1188 * Types for callbacks for do_vfp_2op_sp() and do_vfp_2op_dp().
1189 * The callback should emit code to write a value to vd (which
1190 * should be written to only).
1192 typedef void VFPGen2OpSPFn(TCGv_i32 vd
, TCGv_i32 vm
);
1193 typedef void VFPGen2OpDPFn(TCGv_i64 vd
, TCGv_i64 vm
);
1196 * Return true if the specified S reg is in a scalar bank
1197 * (ie if it is s0..s7)
1199 static inline bool vfp_sreg_is_scalar(int reg
)
1201 return (reg
& 0x18) == 0;
1205 * Return true if the specified D reg is in a scalar bank
1206 * (ie if it is d0..d3 or d16..d19)
1208 static inline bool vfp_dreg_is_scalar(int reg
)
1210 return (reg
& 0xc) == 0;
1214 * Advance the S reg number forwards by delta within its bank
1215 * (ie increment the low 3 bits but leave the rest the same)
1217 static inline int vfp_advance_sreg(int reg
, int delta
)
1219 return ((reg
+ delta
) & 0x7) | (reg
& ~0x7);
1223 * Advance the D reg number forwards by delta within its bank
1224 * (ie increment the low 2 bits but leave the rest the same)
1226 static inline int vfp_advance_dreg(int reg
, int delta
)
1228 return ((reg
+ delta
) & 0x3) | (reg
& ~0x3);
1232 * Perform a 3-operand VFP data processing instruction. fn is the
1233 * callback to do the actual operation; this function deals with the
1234 * code to handle looping around for VFP vector processing.
1236 static bool do_vfp_3op_sp(DisasContext
*s
, VFPGen3OpSPFn
*fn
,
1237 int vd
, int vn
, int vm
, bool reads_vd
)
1239 uint32_t delta_m
= 0;
1240 uint32_t delta_d
= 0;
1241 int veclen
= s
->vec_len
;
1242 TCGv_i32 f0
, f1
, fd
;
1245 if (!dc_isar_feature(aa32_fpshvec
, s
) &&
1246 (veclen
!= 0 || s
->vec_stride
!= 0)) {
1250 if (!vfp_access_check(s
)) {
1255 /* Figure out what type of vector operation this is. */
1256 if (vfp_sreg_is_scalar(vd
)) {
1260 delta_d
= s
->vec_stride
+ 1;
1262 if (vfp_sreg_is_scalar(vm
)) {
1263 /* mixed scalar/vector */
1272 f0
= tcg_temp_new_i32();
1273 f1
= tcg_temp_new_i32();
1274 fd
= tcg_temp_new_i32();
1275 fpst
= get_fpstatus_ptr(0);
1277 neon_load_reg32(f0
, vn
);
1278 neon_load_reg32(f1
, vm
);
1282 neon_load_reg32(fd
, vd
);
1284 fn(fd
, f0
, f1
, fpst
);
1285 neon_store_reg32(fd
, vd
);
1291 /* Set up the operands for the next iteration */
1293 vd
= vfp_advance_sreg(vd
, delta_d
);
1294 vn
= vfp_advance_sreg(vn
, delta_d
);
1295 neon_load_reg32(f0
, vn
);
1297 vm
= vfp_advance_sreg(vm
, delta_m
);
1298 neon_load_reg32(f1
, vm
);
1302 tcg_temp_free_i32(f0
);
1303 tcg_temp_free_i32(f1
);
1304 tcg_temp_free_i32(fd
);
1305 tcg_temp_free_ptr(fpst
);
1310 static bool do_vfp_3op_dp(DisasContext
*s
, VFPGen3OpDPFn
*fn
,
1311 int vd
, int vn
, int vm
, bool reads_vd
)
1313 uint32_t delta_m
= 0;
1314 uint32_t delta_d
= 0;
1315 int veclen
= s
->vec_len
;
1316 TCGv_i64 f0
, f1
, fd
;
1319 /* UNDEF accesses to D16-D31 if they don't exist */
1320 if (!dc_isar_feature(aa32_fp_d32
, s
) && ((vd
| vn
| vm
) & 0x10)) {
1324 if (!dc_isar_feature(aa32_fpdp
, s
)) {
1328 if (!dc_isar_feature(aa32_fpshvec
, s
) &&
1329 (veclen
!= 0 || s
->vec_stride
!= 0)) {
1333 if (!vfp_access_check(s
)) {
1338 /* Figure out what type of vector operation this is. */
1339 if (vfp_dreg_is_scalar(vd
)) {
1343 delta_d
= (s
->vec_stride
>> 1) + 1;
1345 if (vfp_dreg_is_scalar(vm
)) {
1346 /* mixed scalar/vector */
1355 f0
= tcg_temp_new_i64();
1356 f1
= tcg_temp_new_i64();
1357 fd
= tcg_temp_new_i64();
1358 fpst
= get_fpstatus_ptr(0);
1360 neon_load_reg64(f0
, vn
);
1361 neon_load_reg64(f1
, vm
);
1365 neon_load_reg64(fd
, vd
);
1367 fn(fd
, f0
, f1
, fpst
);
1368 neon_store_reg64(fd
, vd
);
1373 /* Set up the operands for the next iteration */
1375 vd
= vfp_advance_dreg(vd
, delta_d
);
1376 vn
= vfp_advance_dreg(vn
, delta_d
);
1377 neon_load_reg64(f0
, vn
);
1379 vm
= vfp_advance_dreg(vm
, delta_m
);
1380 neon_load_reg64(f1
, vm
);
1384 tcg_temp_free_i64(f0
);
1385 tcg_temp_free_i64(f1
);
1386 tcg_temp_free_i64(fd
);
1387 tcg_temp_free_ptr(fpst
);
1392 static bool do_vfp_2op_sp(DisasContext
*s
, VFPGen2OpSPFn
*fn
, int vd
, int vm
)
1394 uint32_t delta_m
= 0;
1395 uint32_t delta_d
= 0;
1396 int veclen
= s
->vec_len
;
1399 if (!dc_isar_feature(aa32_fpshvec
, s
) &&
1400 (veclen
!= 0 || s
->vec_stride
!= 0)) {
1404 if (!vfp_access_check(s
)) {
1409 /* Figure out what type of vector operation this is. */
1410 if (vfp_sreg_is_scalar(vd
)) {
1414 delta_d
= s
->vec_stride
+ 1;
1416 if (vfp_sreg_is_scalar(vm
)) {
1417 /* mixed scalar/vector */
1426 f0
= tcg_temp_new_i32();
1427 fd
= tcg_temp_new_i32();
1429 neon_load_reg32(f0
, vm
);
1433 neon_store_reg32(fd
, vd
);
1440 /* single source one-many */
1442 vd
= vfp_advance_sreg(vd
, delta_d
);
1443 neon_store_reg32(fd
, vd
);
1448 /* Set up the operands for the next iteration */
1450 vd
= vfp_advance_sreg(vd
, delta_d
);
1451 vm
= vfp_advance_sreg(vm
, delta_m
);
1452 neon_load_reg32(f0
, vm
);
1455 tcg_temp_free_i32(f0
);
1456 tcg_temp_free_i32(fd
);
1461 static bool do_vfp_2op_dp(DisasContext
*s
, VFPGen2OpDPFn
*fn
, int vd
, int vm
)
1463 uint32_t delta_m
= 0;
1464 uint32_t delta_d
= 0;
1465 int veclen
= s
->vec_len
;
1468 /* UNDEF accesses to D16-D31 if they don't exist */
1469 if (!dc_isar_feature(aa32_fp_d32
, s
) && ((vd
| vm
) & 0x10)) {
1473 if (!dc_isar_feature(aa32_fpdp
, s
)) {
1477 if (!dc_isar_feature(aa32_fpshvec
, s
) &&
1478 (veclen
!= 0 || s
->vec_stride
!= 0)) {
1482 if (!vfp_access_check(s
)) {
1487 /* Figure out what type of vector operation this is. */
1488 if (vfp_dreg_is_scalar(vd
)) {
1492 delta_d
= (s
->vec_stride
>> 1) + 1;
1494 if (vfp_dreg_is_scalar(vm
)) {
1495 /* mixed scalar/vector */
1504 f0
= tcg_temp_new_i64();
1505 fd
= tcg_temp_new_i64();
1507 neon_load_reg64(f0
, vm
);
1511 neon_store_reg64(fd
, vd
);
1518 /* single source one-many */
1520 vd
= vfp_advance_dreg(vd
, delta_d
);
1521 neon_store_reg64(fd
, vd
);
1526 /* Set up the operands for the next iteration */
1528 vd
= vfp_advance_dreg(vd
, delta_d
);
1529 vd
= vfp_advance_dreg(vm
, delta_m
);
1530 neon_load_reg64(f0
, vm
);
1533 tcg_temp_free_i64(f0
);
1534 tcg_temp_free_i64(fd
);
1539 static void gen_VMLA_sp(TCGv_i32 vd
, TCGv_i32 vn
, TCGv_i32 vm
, TCGv_ptr fpst
)
1541 /* Note that order of inputs to the add matters for NaNs */
1542 TCGv_i32 tmp
= tcg_temp_new_i32();
1544 gen_helper_vfp_muls(tmp
, vn
, vm
, fpst
);
1545 gen_helper_vfp_adds(vd
, vd
, tmp
, fpst
);
1546 tcg_temp_free_i32(tmp
);
1549 static bool trans_VMLA_sp(DisasContext
*s
, arg_VMLA_sp
*a
)
1551 return do_vfp_3op_sp(s
, gen_VMLA_sp
, a
->vd
, a
->vn
, a
->vm
, true);
1554 static void gen_VMLA_dp(TCGv_i64 vd
, TCGv_i64 vn
, TCGv_i64 vm
, TCGv_ptr fpst
)
1556 /* Note that order of inputs to the add matters for NaNs */
1557 TCGv_i64 tmp
= tcg_temp_new_i64();
1559 gen_helper_vfp_muld(tmp
, vn
, vm
, fpst
);
1560 gen_helper_vfp_addd(vd
, vd
, tmp
, fpst
);
1561 tcg_temp_free_i64(tmp
);
1564 static bool trans_VMLA_dp(DisasContext
*s
, arg_VMLA_dp
*a
)
1566 return do_vfp_3op_dp(s
, gen_VMLA_dp
, a
->vd
, a
->vn
, a
->vm
, true);
1569 static void gen_VMLS_sp(TCGv_i32 vd
, TCGv_i32 vn
, TCGv_i32 vm
, TCGv_ptr fpst
)
1572 * VMLS: vd = vd + -(vn * vm)
1573 * Note that order of inputs to the add matters for NaNs.
1575 TCGv_i32 tmp
= tcg_temp_new_i32();
1577 gen_helper_vfp_muls(tmp
, vn
, vm
, fpst
);
1578 gen_helper_vfp_negs(tmp
, tmp
);
1579 gen_helper_vfp_adds(vd
, vd
, tmp
, fpst
);
1580 tcg_temp_free_i32(tmp
);
1583 static bool trans_VMLS_sp(DisasContext
*s
, arg_VMLS_sp
*a
)
1585 return do_vfp_3op_sp(s
, gen_VMLS_sp
, a
->vd
, a
->vn
, a
->vm
, true);
1588 static void gen_VMLS_dp(TCGv_i64 vd
, TCGv_i64 vn
, TCGv_i64 vm
, TCGv_ptr fpst
)
1591 * VMLS: vd = vd + -(vn * vm)
1592 * Note that order of inputs to the add matters for NaNs.
1594 TCGv_i64 tmp
= tcg_temp_new_i64();
1596 gen_helper_vfp_muld(tmp
, vn
, vm
, fpst
);
1597 gen_helper_vfp_negd(tmp
, tmp
);
1598 gen_helper_vfp_addd(vd
, vd
, tmp
, fpst
);
1599 tcg_temp_free_i64(tmp
);
1602 static bool trans_VMLS_dp(DisasContext
*s
, arg_VMLS_dp
*a
)
1604 return do_vfp_3op_dp(s
, gen_VMLS_dp
, a
->vd
, a
->vn
, a
->vm
, true);
1607 static void gen_VNMLS_sp(TCGv_i32 vd
, TCGv_i32 vn
, TCGv_i32 vm
, TCGv_ptr fpst
)
1610 * VNMLS: -fd + (fn * fm)
1611 * Note that it isn't valid to replace (-A + B) with (B - A) or similar
1612 * plausible looking simplifications because this will give wrong results
1615 TCGv_i32 tmp
= tcg_temp_new_i32();
1617 gen_helper_vfp_muls(tmp
, vn
, vm
, fpst
);
1618 gen_helper_vfp_negs(vd
, vd
);
1619 gen_helper_vfp_adds(vd
, vd
, tmp
, fpst
);
1620 tcg_temp_free_i32(tmp
);
1623 static bool trans_VNMLS_sp(DisasContext
*s
, arg_VNMLS_sp
*a
)
1625 return do_vfp_3op_sp(s
, gen_VNMLS_sp
, a
->vd
, a
->vn
, a
->vm
, true);
1628 static void gen_VNMLS_dp(TCGv_i64 vd
, TCGv_i64 vn
, TCGv_i64 vm
, TCGv_ptr fpst
)
1631 * VNMLS: -fd + (fn * fm)
1632 * Note that it isn't valid to replace (-A + B) with (B - A) or similar
1633 * plausible looking simplifications because this will give wrong results
1636 TCGv_i64 tmp
= tcg_temp_new_i64();
1638 gen_helper_vfp_muld(tmp
, vn
, vm
, fpst
);
1639 gen_helper_vfp_negd(vd
, vd
);
1640 gen_helper_vfp_addd(vd
, vd
, tmp
, fpst
);
1641 tcg_temp_free_i64(tmp
);
1644 static bool trans_VNMLS_dp(DisasContext
*s
, arg_VNMLS_dp
*a
)
1646 return do_vfp_3op_dp(s
, gen_VNMLS_dp
, a
->vd
, a
->vn
, a
->vm
, true);
1649 static void gen_VNMLA_sp(TCGv_i32 vd
, TCGv_i32 vn
, TCGv_i32 vm
, TCGv_ptr fpst
)
1651 /* VNMLA: -fd + -(fn * fm) */
1652 TCGv_i32 tmp
= tcg_temp_new_i32();
1654 gen_helper_vfp_muls(tmp
, vn
, vm
, fpst
);
1655 gen_helper_vfp_negs(tmp
, tmp
);
1656 gen_helper_vfp_negs(vd
, vd
);
1657 gen_helper_vfp_adds(vd
, vd
, tmp
, fpst
);
1658 tcg_temp_free_i32(tmp
);
1661 static bool trans_VNMLA_sp(DisasContext
*s
, arg_VNMLA_sp
*a
)
1663 return do_vfp_3op_sp(s
, gen_VNMLA_sp
, a
->vd
, a
->vn
, a
->vm
, true);
1666 static void gen_VNMLA_dp(TCGv_i64 vd
, TCGv_i64 vn
, TCGv_i64 vm
, TCGv_ptr fpst
)
1668 /* VNMLA: -fd + (fn * fm) */
1669 TCGv_i64 tmp
= tcg_temp_new_i64();
1671 gen_helper_vfp_muld(tmp
, vn
, vm
, fpst
);
1672 gen_helper_vfp_negd(tmp
, tmp
);
1673 gen_helper_vfp_negd(vd
, vd
);
1674 gen_helper_vfp_addd(vd
, vd
, tmp
, fpst
);
1675 tcg_temp_free_i64(tmp
);
1678 static bool trans_VNMLA_dp(DisasContext
*s
, arg_VNMLA_dp
*a
)
1680 return do_vfp_3op_dp(s
, gen_VNMLA_dp
, a
->vd
, a
->vn
, a
->vm
, true);
1683 static bool trans_VMUL_sp(DisasContext
*s
, arg_VMUL_sp
*a
)
1685 return do_vfp_3op_sp(s
, gen_helper_vfp_muls
, a
->vd
, a
->vn
, a
->vm
, false);
1688 static bool trans_VMUL_dp(DisasContext
*s
, arg_VMUL_dp
*a
)
1690 return do_vfp_3op_dp(s
, gen_helper_vfp_muld
, a
->vd
, a
->vn
, a
->vm
, false);
1693 static void gen_VNMUL_sp(TCGv_i32 vd
, TCGv_i32 vn
, TCGv_i32 vm
, TCGv_ptr fpst
)
1695 /* VNMUL: -(fn * fm) */
1696 gen_helper_vfp_muls(vd
, vn
, vm
, fpst
);
1697 gen_helper_vfp_negs(vd
, vd
);
1700 static bool trans_VNMUL_sp(DisasContext
*s
, arg_VNMUL_sp
*a
)
1702 return do_vfp_3op_sp(s
, gen_VNMUL_sp
, a
->vd
, a
->vn
, a
->vm
, false);
1705 static void gen_VNMUL_dp(TCGv_i64 vd
, TCGv_i64 vn
, TCGv_i64 vm
, TCGv_ptr fpst
)
1707 /* VNMUL: -(fn * fm) */
1708 gen_helper_vfp_muld(vd
, vn
, vm
, fpst
);
1709 gen_helper_vfp_negd(vd
, vd
);
1712 static bool trans_VNMUL_dp(DisasContext
*s
, arg_VNMUL_dp
*a
)
1714 return do_vfp_3op_dp(s
, gen_VNMUL_dp
, a
->vd
, a
->vn
, a
->vm
, false);
1717 static bool trans_VADD_sp(DisasContext
*s
, arg_VADD_sp
*a
)
1719 return do_vfp_3op_sp(s
, gen_helper_vfp_adds
, a
->vd
, a
->vn
, a
->vm
, false);
1722 static bool trans_VADD_dp(DisasContext
*s
, arg_VADD_dp
*a
)
1724 return do_vfp_3op_dp(s
, gen_helper_vfp_addd
, a
->vd
, a
->vn
, a
->vm
, false);
1727 static bool trans_VSUB_sp(DisasContext
*s
, arg_VSUB_sp
*a
)
1729 return do_vfp_3op_sp(s
, gen_helper_vfp_subs
, a
->vd
, a
->vn
, a
->vm
, false);
1732 static bool trans_VSUB_dp(DisasContext
*s
, arg_VSUB_dp
*a
)
1734 return do_vfp_3op_dp(s
, gen_helper_vfp_subd
, a
->vd
, a
->vn
, a
->vm
, false);
1737 static bool trans_VDIV_sp(DisasContext
*s
, arg_VDIV_sp
*a
)
1739 return do_vfp_3op_sp(s
, gen_helper_vfp_divs
, a
->vd
, a
->vn
, a
->vm
, false);
1742 static bool trans_VDIV_dp(DisasContext
*s
, arg_VDIV_dp
*a
)
1744 return do_vfp_3op_dp(s
, gen_helper_vfp_divd
, a
->vd
, a
->vn
, a
->vm
, false);
1747 static bool trans_VFM_sp(DisasContext
*s
, arg_VFM_sp
*a
)
1750 * VFNMA : fd = muladd(-fd, fn, fm)
1751 * VFNMS : fd = muladd(-fd, -fn, fm)
1752 * VFMA : fd = muladd( fd, fn, fm)
1753 * VFMS : fd = muladd( fd, -fn, fm)
1755 * These are fused multiply-add, and must be done as one floating
1756 * point operation with no rounding between the multiplication and
1757 * addition steps. NB that doing the negations here as separate
1758 * steps is correct : an input NaN should come out with its sign
1759 * bit flipped if it is a negated-input.
1762 TCGv_i32 vn
, vm
, vd
;
1765 * Present in VFPv4 only.
1766 * In v7A, UNPREDICTABLE with non-zero vector length/stride; from
1767 * v8A, must UNDEF. We choose to UNDEF for both v7A and v8A.
1769 if (!arm_dc_feature(s
, ARM_FEATURE_VFP4
) ||
1770 (s
->vec_len
!= 0 || s
->vec_stride
!= 0)) {
1774 if (!vfp_access_check(s
)) {
1778 vn
= tcg_temp_new_i32();
1779 vm
= tcg_temp_new_i32();
1780 vd
= tcg_temp_new_i32();
1782 neon_load_reg32(vn
, a
->vn
);
1783 neon_load_reg32(vm
, a
->vm
);
1786 gen_helper_vfp_negs(vn
, vn
);
1788 neon_load_reg32(vd
, a
->vd
);
1791 gen_helper_vfp_negs(vd
, vd
);
1793 fpst
= get_fpstatus_ptr(0);
1794 gen_helper_vfp_muladds(vd
, vn
, vm
, vd
, fpst
);
1795 neon_store_reg32(vd
, a
->vd
);
1797 tcg_temp_free_ptr(fpst
);
1798 tcg_temp_free_i32(vn
);
1799 tcg_temp_free_i32(vm
);
1800 tcg_temp_free_i32(vd
);
1805 static bool trans_VFM_dp(DisasContext
*s
, arg_VFM_dp
*a
)
1808 * VFNMA : fd = muladd(-fd, fn, fm)
1809 * VFNMS : fd = muladd(-fd, -fn, fm)
1810 * VFMA : fd = muladd( fd, fn, fm)
1811 * VFMS : fd = muladd( fd, -fn, fm)
1813 * These are fused multiply-add, and must be done as one floating
1814 * point operation with no rounding between the multiplication and
1815 * addition steps. NB that doing the negations here as separate
1816 * steps is correct : an input NaN should come out with its sign
1817 * bit flipped if it is a negated-input.
1820 TCGv_i64 vn
, vm
, vd
;
1823 * Present in VFPv4 only.
1824 * In v7A, UNPREDICTABLE with non-zero vector length/stride; from
1825 * v8A, must UNDEF. We choose to UNDEF for both v7A and v8A.
1827 if (!arm_dc_feature(s
, ARM_FEATURE_VFP4
) ||
1828 (s
->vec_len
!= 0 || s
->vec_stride
!= 0)) {
1832 /* UNDEF accesses to D16-D31 if they don't exist. */
1833 if (!dc_isar_feature(aa32_fp_d32
, s
) && ((a
->vd
| a
->vn
| a
->vm
) & 0x10)) {
1837 if (!dc_isar_feature(aa32_fpdp
, s
)) {
1841 if (!vfp_access_check(s
)) {
1845 vn
= tcg_temp_new_i64();
1846 vm
= tcg_temp_new_i64();
1847 vd
= tcg_temp_new_i64();
1849 neon_load_reg64(vn
, a
->vn
);
1850 neon_load_reg64(vm
, a
->vm
);
1853 gen_helper_vfp_negd(vn
, vn
);
1855 neon_load_reg64(vd
, a
->vd
);
1858 gen_helper_vfp_negd(vd
, vd
);
1860 fpst
= get_fpstatus_ptr(0);
1861 gen_helper_vfp_muladdd(vd
, vn
, vm
, vd
, fpst
);
1862 neon_store_reg64(vd
, a
->vd
);
1864 tcg_temp_free_ptr(fpst
);
1865 tcg_temp_free_i64(vn
);
1866 tcg_temp_free_i64(vm
);
1867 tcg_temp_free_i64(vd
);
1872 static bool trans_VMOV_imm_sp(DisasContext
*s
, arg_VMOV_imm_sp
*a
)
1874 uint32_t delta_d
= 0;
1875 int veclen
= s
->vec_len
;
1881 if (!dc_isar_feature(aa32_fpshvec
, s
) &&
1882 (veclen
!= 0 || s
->vec_stride
!= 0)) {
1886 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
1890 if (!vfp_access_check(s
)) {
1895 /* Figure out what type of vector operation this is. */
1896 if (vfp_sreg_is_scalar(vd
)) {
1900 delta_d
= s
->vec_stride
+ 1;
1904 fd
= tcg_const_i32(vfp_expand_imm(MO_32
, a
->imm
));
1907 neon_store_reg32(fd
, vd
);
1913 /* Set up the operands for the next iteration */
1915 vd
= vfp_advance_sreg(vd
, delta_d
);
1918 tcg_temp_free_i32(fd
);
1922 static bool trans_VMOV_imm_dp(DisasContext
*s
, arg_VMOV_imm_dp
*a
)
1924 uint32_t delta_d
= 0;
1925 int veclen
= s
->vec_len
;
1931 /* UNDEF accesses to D16-D31 if they don't exist. */
1932 if (!dc_isar_feature(aa32_fp_d32
, s
) && (vd
& 0x10)) {
1936 if (!dc_isar_feature(aa32_fpdp
, s
)) {
1940 if (!dc_isar_feature(aa32_fpshvec
, s
) &&
1941 (veclen
!= 0 || s
->vec_stride
!= 0)) {
1945 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
1949 if (!vfp_access_check(s
)) {
1954 /* Figure out what type of vector operation this is. */
1955 if (vfp_dreg_is_scalar(vd
)) {
1959 delta_d
= (s
->vec_stride
>> 1) + 1;
1963 fd
= tcg_const_i64(vfp_expand_imm(MO_64
, a
->imm
));
1966 neon_store_reg64(fd
, vd
);
1972 /* Set up the operands for the next iteration */
1974 vfp_advance_dreg(vd
, delta_d
);
1977 tcg_temp_free_i64(fd
);
1981 static bool trans_VMOV_reg_sp(DisasContext
*s
, arg_VMOV_reg_sp
*a
)
1983 return do_vfp_2op_sp(s
, tcg_gen_mov_i32
, a
->vd
, a
->vm
);
1986 static bool trans_VMOV_reg_dp(DisasContext
*s
, arg_VMOV_reg_dp
*a
)
1988 return do_vfp_2op_dp(s
, tcg_gen_mov_i64
, a
->vd
, a
->vm
);
1991 static bool trans_VABS_sp(DisasContext
*s
, arg_VABS_sp
*a
)
1993 return do_vfp_2op_sp(s
, gen_helper_vfp_abss
, a
->vd
, a
->vm
);
1996 static bool trans_VABS_dp(DisasContext
*s
, arg_VABS_dp
*a
)
1998 return do_vfp_2op_dp(s
, gen_helper_vfp_absd
, a
->vd
, a
->vm
);
2001 static bool trans_VNEG_sp(DisasContext
*s
, arg_VNEG_sp
*a
)
2003 return do_vfp_2op_sp(s
, gen_helper_vfp_negs
, a
->vd
, a
->vm
);
2006 static bool trans_VNEG_dp(DisasContext
*s
, arg_VNEG_dp
*a
)
2008 return do_vfp_2op_dp(s
, gen_helper_vfp_negd
, a
->vd
, a
->vm
);
2011 static void gen_VSQRT_sp(TCGv_i32 vd
, TCGv_i32 vm
)
2013 gen_helper_vfp_sqrts(vd
, vm
, cpu_env
);
2016 static bool trans_VSQRT_sp(DisasContext
*s
, arg_VSQRT_sp
*a
)
2018 return do_vfp_2op_sp(s
, gen_VSQRT_sp
, a
->vd
, a
->vm
);
2021 static void gen_VSQRT_dp(TCGv_i64 vd
, TCGv_i64 vm
)
2023 gen_helper_vfp_sqrtd(vd
, vm
, cpu_env
);
2026 static bool trans_VSQRT_dp(DisasContext
*s
, arg_VSQRT_dp
*a
)
2028 return do_vfp_2op_dp(s
, gen_VSQRT_dp
, a
->vd
, a
->vm
);
2031 static bool trans_VCMP_sp(DisasContext
*s
, arg_VCMP_sp
*a
)
2035 /* Vm/M bits must be zero for the Z variant */
2036 if (a
->z
&& a
->vm
!= 0) {
2040 if (!vfp_access_check(s
)) {
2044 vd
= tcg_temp_new_i32();
2045 vm
= tcg_temp_new_i32();
2047 neon_load_reg32(vd
, a
->vd
);
2049 tcg_gen_movi_i32(vm
, 0);
2051 neon_load_reg32(vm
, a
->vm
);
2055 gen_helper_vfp_cmpes(vd
, vm
, cpu_env
);
2057 gen_helper_vfp_cmps(vd
, vm
, cpu_env
);
2060 tcg_temp_free_i32(vd
);
2061 tcg_temp_free_i32(vm
);
2066 static bool trans_VCMP_dp(DisasContext
*s
, arg_VCMP_dp
*a
)
2070 /* Vm/M bits must be zero for the Z variant */
2071 if (a
->z
&& a
->vm
!= 0) {
2075 /* UNDEF accesses to D16-D31 if they don't exist. */
2076 if (!dc_isar_feature(aa32_fp_d32
, s
) && ((a
->vd
| a
->vm
) & 0x10)) {
2080 if (!dc_isar_feature(aa32_fpdp
, s
)) {
2084 if (!vfp_access_check(s
)) {
2088 vd
= tcg_temp_new_i64();
2089 vm
= tcg_temp_new_i64();
2091 neon_load_reg64(vd
, a
->vd
);
2093 tcg_gen_movi_i64(vm
, 0);
2095 neon_load_reg64(vm
, a
->vm
);
2099 gen_helper_vfp_cmped(vd
, vm
, cpu_env
);
2101 gen_helper_vfp_cmpd(vd
, vm
, cpu_env
);
2104 tcg_temp_free_i64(vd
);
2105 tcg_temp_free_i64(vm
);
2110 static bool trans_VCVT_f32_f16(DisasContext
*s
, arg_VCVT_f32_f16
*a
)
2116 if (!dc_isar_feature(aa32_fp16_spconv
, s
)) {
2120 if (!vfp_access_check(s
)) {
2124 fpst
= get_fpstatus_ptr(false);
2125 ahp_mode
= get_ahp_flag();
2126 tmp
= tcg_temp_new_i32();
2127 /* The T bit tells us if we want the low or high 16 bits of Vm */
2128 tcg_gen_ld16u_i32(tmp
, cpu_env
, vfp_f16_offset(a
->vm
, a
->t
));
2129 gen_helper_vfp_fcvt_f16_to_f32(tmp
, tmp
, fpst
, ahp_mode
);
2130 neon_store_reg32(tmp
, a
->vd
);
2131 tcg_temp_free_i32(ahp_mode
);
2132 tcg_temp_free_ptr(fpst
);
2133 tcg_temp_free_i32(tmp
);
2137 static bool trans_VCVT_f64_f16(DisasContext
*s
, arg_VCVT_f64_f16
*a
)
2144 if (!dc_isar_feature(aa32_fp16_dpconv
, s
)) {
2148 /* UNDEF accesses to D16-D31 if they don't exist. */
2149 if (!dc_isar_feature(aa32_fp_d32
, s
) && (a
->vd
& 0x10)) {
2153 if (!dc_isar_feature(aa32_fpdp
, s
)) {
2157 if (!vfp_access_check(s
)) {
2161 fpst
= get_fpstatus_ptr(false);
2162 ahp_mode
= get_ahp_flag();
2163 tmp
= tcg_temp_new_i32();
2164 /* The T bit tells us if we want the low or high 16 bits of Vm */
2165 tcg_gen_ld16u_i32(tmp
, cpu_env
, vfp_f16_offset(a
->vm
, a
->t
));
2166 vd
= tcg_temp_new_i64();
2167 gen_helper_vfp_fcvt_f16_to_f64(vd
, tmp
, fpst
, ahp_mode
);
2168 neon_store_reg64(vd
, a
->vd
);
2169 tcg_temp_free_i32(ahp_mode
);
2170 tcg_temp_free_ptr(fpst
);
2171 tcg_temp_free_i32(tmp
);
2172 tcg_temp_free_i64(vd
);
2176 static bool trans_VCVT_f16_f32(DisasContext
*s
, arg_VCVT_f16_f32
*a
)
2182 if (!dc_isar_feature(aa32_fp16_spconv
, s
)) {
2186 if (!vfp_access_check(s
)) {
2190 fpst
= get_fpstatus_ptr(false);
2191 ahp_mode
= get_ahp_flag();
2192 tmp
= tcg_temp_new_i32();
2194 neon_load_reg32(tmp
, a
->vm
);
2195 gen_helper_vfp_fcvt_f32_to_f16(tmp
, tmp
, fpst
, ahp_mode
);
2196 tcg_gen_st16_i32(tmp
, cpu_env
, vfp_f16_offset(a
->vd
, a
->t
));
2197 tcg_temp_free_i32(ahp_mode
);
2198 tcg_temp_free_ptr(fpst
);
2199 tcg_temp_free_i32(tmp
);
2203 static bool trans_VCVT_f16_f64(DisasContext
*s
, arg_VCVT_f16_f64
*a
)
2210 if (!dc_isar_feature(aa32_fp16_dpconv
, s
)) {
2214 /* UNDEF accesses to D16-D31 if they don't exist. */
2215 if (!dc_isar_feature(aa32_fp_d32
, s
) && (a
->vm
& 0x10)) {
2219 if (!dc_isar_feature(aa32_fpdp
, s
)) {
2223 if (!vfp_access_check(s
)) {
2227 fpst
= get_fpstatus_ptr(false);
2228 ahp_mode
= get_ahp_flag();
2229 tmp
= tcg_temp_new_i32();
2230 vm
= tcg_temp_new_i64();
2232 neon_load_reg64(vm
, a
->vm
);
2233 gen_helper_vfp_fcvt_f64_to_f16(tmp
, vm
, fpst
, ahp_mode
);
2234 tcg_temp_free_i64(vm
);
2235 tcg_gen_st16_i32(tmp
, cpu_env
, vfp_f16_offset(a
->vd
, a
->t
));
2236 tcg_temp_free_i32(ahp_mode
);
2237 tcg_temp_free_ptr(fpst
);
2238 tcg_temp_free_i32(tmp
);
2242 static bool trans_VRINTR_sp(DisasContext
*s
, arg_VRINTR_sp
*a
)
2247 if (!dc_isar_feature(aa32_vrint
, s
)) {
2251 if (!vfp_access_check(s
)) {
2255 tmp
= tcg_temp_new_i32();
2256 neon_load_reg32(tmp
, a
->vm
);
2257 fpst
= get_fpstatus_ptr(false);
2258 gen_helper_rints(tmp
, tmp
, fpst
);
2259 neon_store_reg32(tmp
, a
->vd
);
2260 tcg_temp_free_ptr(fpst
);
2261 tcg_temp_free_i32(tmp
);
2265 static bool trans_VRINTR_dp(DisasContext
*s
, arg_VRINTR_dp
*a
)
2270 if (!dc_isar_feature(aa32_vrint
, s
)) {
2274 /* UNDEF accesses to D16-D31 if they don't exist. */
2275 if (!dc_isar_feature(aa32_fp_d32
, s
) && ((a
->vd
| a
->vm
) & 0x10)) {
2279 if (!dc_isar_feature(aa32_fpdp
, s
)) {
2283 if (!vfp_access_check(s
)) {
2287 tmp
= tcg_temp_new_i64();
2288 neon_load_reg64(tmp
, a
->vm
);
2289 fpst
= get_fpstatus_ptr(false);
2290 gen_helper_rintd(tmp
, tmp
, fpst
);
2291 neon_store_reg64(tmp
, a
->vd
);
2292 tcg_temp_free_ptr(fpst
);
2293 tcg_temp_free_i64(tmp
);
2297 static bool trans_VRINTZ_sp(DisasContext
*s
, arg_VRINTZ_sp
*a
)
2303 if (!dc_isar_feature(aa32_vrint
, s
)) {
2307 if (!vfp_access_check(s
)) {
2311 tmp
= tcg_temp_new_i32();
2312 neon_load_reg32(tmp
, a
->vm
);
2313 fpst
= get_fpstatus_ptr(false);
2314 tcg_rmode
= tcg_const_i32(float_round_to_zero
);
2315 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
2316 gen_helper_rints(tmp
, tmp
, fpst
);
2317 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
2318 neon_store_reg32(tmp
, a
->vd
);
2319 tcg_temp_free_ptr(fpst
);
2320 tcg_temp_free_i32(tcg_rmode
);
2321 tcg_temp_free_i32(tmp
);
2325 static bool trans_VRINTZ_dp(DisasContext
*s
, arg_VRINTZ_dp
*a
)
2331 if (!dc_isar_feature(aa32_vrint
, s
)) {
2335 /* UNDEF accesses to D16-D31 if they don't exist. */
2336 if (!dc_isar_feature(aa32_fp_d32
, s
) && ((a
->vd
| a
->vm
) & 0x10)) {
2340 if (!dc_isar_feature(aa32_fpdp
, s
)) {
2344 if (!vfp_access_check(s
)) {
2348 tmp
= tcg_temp_new_i64();
2349 neon_load_reg64(tmp
, a
->vm
);
2350 fpst
= get_fpstatus_ptr(false);
2351 tcg_rmode
= tcg_const_i32(float_round_to_zero
);
2352 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
2353 gen_helper_rintd(tmp
, tmp
, fpst
);
2354 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
2355 neon_store_reg64(tmp
, a
->vd
);
2356 tcg_temp_free_ptr(fpst
);
2357 tcg_temp_free_i64(tmp
);
2358 tcg_temp_free_i32(tcg_rmode
);
2362 static bool trans_VRINTX_sp(DisasContext
*s
, arg_VRINTX_sp
*a
)
2367 if (!dc_isar_feature(aa32_vrint
, s
)) {
2371 if (!vfp_access_check(s
)) {
2375 tmp
= tcg_temp_new_i32();
2376 neon_load_reg32(tmp
, a
->vm
);
2377 fpst
= get_fpstatus_ptr(false);
2378 gen_helper_rints_exact(tmp
, tmp
, fpst
);
2379 neon_store_reg32(tmp
, a
->vd
);
2380 tcg_temp_free_ptr(fpst
);
2381 tcg_temp_free_i32(tmp
);
2385 static bool trans_VRINTX_dp(DisasContext
*s
, arg_VRINTX_dp
*a
)
2390 if (!dc_isar_feature(aa32_vrint
, s
)) {
2394 /* UNDEF accesses to D16-D31 if they don't exist. */
2395 if (!dc_isar_feature(aa32_fp_d32
, s
) && ((a
->vd
| a
->vm
) & 0x10)) {
2399 if (!dc_isar_feature(aa32_fpdp
, s
)) {
2403 if (!vfp_access_check(s
)) {
2407 tmp
= tcg_temp_new_i64();
2408 neon_load_reg64(tmp
, a
->vm
);
2409 fpst
= get_fpstatus_ptr(false);
2410 gen_helper_rintd_exact(tmp
, tmp
, fpst
);
2411 neon_store_reg64(tmp
, a
->vd
);
2412 tcg_temp_free_ptr(fpst
);
2413 tcg_temp_free_i64(tmp
);
2417 static bool trans_VCVT_sp(DisasContext
*s
, arg_VCVT_sp
*a
)
2422 /* UNDEF accesses to D16-D31 if they don't exist. */
2423 if (!dc_isar_feature(aa32_fp_d32
, s
) && (a
->vd
& 0x10)) {
2427 if (!dc_isar_feature(aa32_fpdp
, s
)) {
2431 if (!vfp_access_check(s
)) {
2435 vm
= tcg_temp_new_i32();
2436 vd
= tcg_temp_new_i64();
2437 neon_load_reg32(vm
, a
->vm
);
2438 gen_helper_vfp_fcvtds(vd
, vm
, cpu_env
);
2439 neon_store_reg64(vd
, a
->vd
);
2440 tcg_temp_free_i32(vm
);
2441 tcg_temp_free_i64(vd
);
2445 static bool trans_VCVT_dp(DisasContext
*s
, arg_VCVT_dp
*a
)
2450 /* UNDEF accesses to D16-D31 if they don't exist. */
2451 if (!dc_isar_feature(aa32_fp_d32
, s
) && (a
->vm
& 0x10)) {
2455 if (!dc_isar_feature(aa32_fpdp
, s
)) {
2459 if (!vfp_access_check(s
)) {
2463 vd
= tcg_temp_new_i32();
2464 vm
= tcg_temp_new_i64();
2465 neon_load_reg64(vm
, a
->vm
);
2466 gen_helper_vfp_fcvtsd(vd
, vm
, cpu_env
);
2467 neon_store_reg32(vd
, a
->vd
);
2468 tcg_temp_free_i32(vd
);
2469 tcg_temp_free_i64(vm
);
2473 static bool trans_VCVT_int_sp(DisasContext
*s
, arg_VCVT_int_sp
*a
)
2478 if (!vfp_access_check(s
)) {
2482 vm
= tcg_temp_new_i32();
2483 neon_load_reg32(vm
, a
->vm
);
2484 fpst
= get_fpstatus_ptr(false);
2487 gen_helper_vfp_sitos(vm
, vm
, fpst
);
2490 gen_helper_vfp_uitos(vm
, vm
, fpst
);
2492 neon_store_reg32(vm
, a
->vd
);
2493 tcg_temp_free_i32(vm
);
2494 tcg_temp_free_ptr(fpst
);
2498 static bool trans_VCVT_int_dp(DisasContext
*s
, arg_VCVT_int_dp
*a
)
2504 /* UNDEF accesses to D16-D31 if they don't exist. */
2505 if (!dc_isar_feature(aa32_fp_d32
, s
) && (a
->vd
& 0x10)) {
2509 if (!dc_isar_feature(aa32_fpdp
, s
)) {
2513 if (!vfp_access_check(s
)) {
2517 vm
= tcg_temp_new_i32();
2518 vd
= tcg_temp_new_i64();
2519 neon_load_reg32(vm
, a
->vm
);
2520 fpst
= get_fpstatus_ptr(false);
2523 gen_helper_vfp_sitod(vd
, vm
, fpst
);
2526 gen_helper_vfp_uitod(vd
, vm
, fpst
);
2528 neon_store_reg64(vd
, a
->vd
);
2529 tcg_temp_free_i32(vm
);
2530 tcg_temp_free_i64(vd
);
2531 tcg_temp_free_ptr(fpst
);
2535 static bool trans_VJCVT(DisasContext
*s
, arg_VJCVT
*a
)
2540 if (!dc_isar_feature(aa32_jscvt
, s
)) {
2544 /* UNDEF accesses to D16-D31 if they don't exist. */
2545 if (!dc_isar_feature(aa32_fp_d32
, s
) && (a
->vm
& 0x10)) {
2549 if (!dc_isar_feature(aa32_fpdp
, s
)) {
2553 if (!vfp_access_check(s
)) {
2557 vm
= tcg_temp_new_i64();
2558 vd
= tcg_temp_new_i32();
2559 neon_load_reg64(vm
, a
->vm
);
2560 gen_helper_vjcvt(vd
, vm
, cpu_env
);
2561 neon_store_reg32(vd
, a
->vd
);
2562 tcg_temp_free_i64(vm
);
2563 tcg_temp_free_i32(vd
);
2567 static bool trans_VCVT_fix_sp(DisasContext
*s
, arg_VCVT_fix_sp
*a
)
2573 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
2577 if (!vfp_access_check(s
)) {
2581 frac_bits
= (a
->opc
& 1) ? (32 - a
->imm
) : (16 - a
->imm
);
2583 vd
= tcg_temp_new_i32();
2584 neon_load_reg32(vd
, a
->vd
);
2586 fpst
= get_fpstatus_ptr(false);
2587 shift
= tcg_const_i32(frac_bits
);
2589 /* Switch on op:U:sx bits */
2592 gen_helper_vfp_shtos(vd
, vd
, shift
, fpst
);
2595 gen_helper_vfp_sltos(vd
, vd
, shift
, fpst
);
2598 gen_helper_vfp_uhtos(vd
, vd
, shift
, fpst
);
2601 gen_helper_vfp_ultos(vd
, vd
, shift
, fpst
);
2604 gen_helper_vfp_toshs_round_to_zero(vd
, vd
, shift
, fpst
);
2607 gen_helper_vfp_tosls_round_to_zero(vd
, vd
, shift
, fpst
);
2610 gen_helper_vfp_touhs_round_to_zero(vd
, vd
, shift
, fpst
);
2613 gen_helper_vfp_touls_round_to_zero(vd
, vd
, shift
, fpst
);
2616 g_assert_not_reached();
2619 neon_store_reg32(vd
, a
->vd
);
2620 tcg_temp_free_i32(vd
);
2621 tcg_temp_free_i32(shift
);
2622 tcg_temp_free_ptr(fpst
);
2626 static bool trans_VCVT_fix_dp(DisasContext
*s
, arg_VCVT_fix_dp
*a
)
2633 if (!arm_dc_feature(s
, ARM_FEATURE_VFP3
)) {
2637 /* UNDEF accesses to D16-D31 if they don't exist. */
2638 if (!dc_isar_feature(aa32_fp_d32
, s
) && (a
->vd
& 0x10)) {
2642 if (!dc_isar_feature(aa32_fpdp
, s
)) {
2646 if (!vfp_access_check(s
)) {
2650 frac_bits
= (a
->opc
& 1) ? (32 - a
->imm
) : (16 - a
->imm
);
2652 vd
= tcg_temp_new_i64();
2653 neon_load_reg64(vd
, a
->vd
);
2655 fpst
= get_fpstatus_ptr(false);
2656 shift
= tcg_const_i32(frac_bits
);
2658 /* Switch on op:U:sx bits */
2661 gen_helper_vfp_shtod(vd
, vd
, shift
, fpst
);
2664 gen_helper_vfp_sltod(vd
, vd
, shift
, fpst
);
2667 gen_helper_vfp_uhtod(vd
, vd
, shift
, fpst
);
2670 gen_helper_vfp_ultod(vd
, vd
, shift
, fpst
);
2673 gen_helper_vfp_toshd_round_to_zero(vd
, vd
, shift
, fpst
);
2676 gen_helper_vfp_tosld_round_to_zero(vd
, vd
, shift
, fpst
);
2679 gen_helper_vfp_touhd_round_to_zero(vd
, vd
, shift
, fpst
);
2682 gen_helper_vfp_tould_round_to_zero(vd
, vd
, shift
, fpst
);
2685 g_assert_not_reached();
2688 neon_store_reg64(vd
, a
->vd
);
2689 tcg_temp_free_i64(vd
);
2690 tcg_temp_free_i32(shift
);
2691 tcg_temp_free_ptr(fpst
);
2695 static bool trans_VCVT_sp_int(DisasContext
*s
, arg_VCVT_sp_int
*a
)
2700 if (!vfp_access_check(s
)) {
2704 fpst
= get_fpstatus_ptr(false);
2705 vm
= tcg_temp_new_i32();
2706 neon_load_reg32(vm
, a
->vm
);
2710 gen_helper_vfp_tosizs(vm
, vm
, fpst
);
2712 gen_helper_vfp_tosis(vm
, vm
, fpst
);
2716 gen_helper_vfp_touizs(vm
, vm
, fpst
);
2718 gen_helper_vfp_touis(vm
, vm
, fpst
);
2721 neon_store_reg32(vm
, a
->vd
);
2722 tcg_temp_free_i32(vm
);
2723 tcg_temp_free_ptr(fpst
);
2727 static bool trans_VCVT_dp_int(DisasContext
*s
, arg_VCVT_dp_int
*a
)
2733 /* UNDEF accesses to D16-D31 if they don't exist. */
2734 if (!dc_isar_feature(aa32_fp_d32
, s
) && (a
->vm
& 0x10)) {
2738 if (!dc_isar_feature(aa32_fpdp
, s
)) {
2742 if (!vfp_access_check(s
)) {
2746 fpst
= get_fpstatus_ptr(false);
2747 vm
= tcg_temp_new_i64();
2748 vd
= tcg_temp_new_i32();
2749 neon_load_reg64(vm
, a
->vm
);
2753 gen_helper_vfp_tosizd(vd
, vm
, fpst
);
2755 gen_helper_vfp_tosid(vd
, vm
, fpst
);
2759 gen_helper_vfp_touizd(vd
, vm
, fpst
);
2761 gen_helper_vfp_touid(vd
, vm
, fpst
);
2764 neon_store_reg32(vd
, a
->vd
);
2765 tcg_temp_free_i32(vd
);
2766 tcg_temp_free_i64(vm
);
2767 tcg_temp_free_ptr(fpst
);