2 * ARM translation: AArch32 VFP instructions
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
7 * Copyright (c) 2019 Linaro, Ltd.
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
24 * This file is intended to be included from translate.c; it uses
25 * some macros and definitions provided by that file.
26 * It might be possible to convert it to a standalone .c file eventually.
29 /* Include the generated VFP decoder */
30 #include "decode-vfp.inc.c"
31 #include "decode-vfp-uncond.inc.c"
34 * The imm8 encodes the sign bit, enough bits to represent an exponent in
35 * the range 01....1xx to 10....0xx, and the most significant 4 bits of
36 * the mantissa; see VFPExpandImm() in the v8 ARM ARM.
38 uint64_t vfp_expand_imm(int size
, uint8_t imm8
)
44 imm
= (extract32(imm8
, 7, 1) ? 0x8000 : 0) |
45 (extract32(imm8
, 6, 1) ? 0x3fc0 : 0x4000) |
46 extract32(imm8
, 0, 6);
50 imm
= (extract32(imm8
, 7, 1) ? 0x8000 : 0) |
51 (extract32(imm8
, 6, 1) ? 0x3e00 : 0x4000) |
52 (extract32(imm8
, 0, 6) << 3);
56 imm
= (extract32(imm8
, 7, 1) ? 0x8000 : 0) |
57 (extract32(imm8
, 6, 1) ? 0x3000 : 0x4000) |
58 (extract32(imm8
, 0, 6) << 6);
61 g_assert_not_reached();
67 * Return the offset of a 16-bit half of the specified VFP single-precision
68 * register. If top is true, returns the top 16 bits; otherwise the bottom
71 static inline long vfp_f16_offset(unsigned reg
, bool top
)
73 long offs
= vfp_reg_offset(false, reg
);
74 #ifdef HOST_WORDS_BIGENDIAN
87 * Check that VFP access is enabled. If it is, do the necessary
88 * M-profile lazy-FP handling and then return true.
89 * If not, emit code to generate an appropriate exception and
91 * The ignore_vfp_enabled argument specifies that we should ignore
92 * whether VFP is enabled via FPEXC[EN]: this should be true for FMXR/FMRX
93 * accesses to FPSID, FPEXC, MVFR0, MVFR1, MVFR2, and false for all other insns.
95 static bool full_vfp_access_check(DisasContext
*s
, bool ignore_vfp_enabled
)
98 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
99 gen_exception_insn(s
, s
->pc_curr
, EXCP_NOCP
, syn_uncategorized(),
102 gen_exception_insn(s
, s
->pc_curr
, EXCP_UDEF
,
103 syn_fp_access_trap(1, 0xe, false),
109 if (!s
->vfp_enabled
&& !ignore_vfp_enabled
) {
110 assert(!arm_dc_feature(s
, ARM_FEATURE_M
));
111 unallocated_encoding(s
);
115 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
116 /* Handle M-profile lazy FP state mechanics */
118 /* Trigger lazy-state preservation if necessary */
121 * Lazy state saving affects external memory and also the NVIC,
122 * so we must mark it as an IO operation for icount (and cause
123 * this to be the last insn in the TB).
125 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
126 s
->base
.is_jmp
= DISAS_UPDATE_EXIT
;
129 gen_helper_v7m_preserve_fp_state(cpu_env
);
131 * If the preserve_fp_state helper doesn't throw an exception
132 * then it will clear LSPACT; we don't need to repeat this for
133 * any further FP insns in this TB.
135 s
->v7m_lspact
= false;
138 /* Update ownership of FP context: set FPCCR.S to match current state */
139 if (s
->v8m_fpccr_s_wrong
) {
142 tmp
= load_cpu_field(v7m
.fpccr
[M_REG_S
]);
144 tcg_gen_ori_i32(tmp
, tmp
, R_V7M_FPCCR_S_MASK
);
146 tcg_gen_andi_i32(tmp
, tmp
, ~R_V7M_FPCCR_S_MASK
);
148 store_cpu_field(tmp
, v7m
.fpccr
[M_REG_S
]);
149 /* Don't need to do this for any further FP insns in this TB */
150 s
->v8m_fpccr_s_wrong
= false;
153 if (s
->v7m_new_fp_ctxt_needed
) {
155 * Create new FP context by updating CONTROL.FPCA, CONTROL.SFPA
158 TCGv_i32 control
, fpscr
;
159 uint32_t bits
= R_V7M_CONTROL_FPCA_MASK
;
161 fpscr
= load_cpu_field(v7m
.fpdscr
[s
->v8m_secure
]);
162 gen_helper_vfp_set_fpscr(cpu_env
, fpscr
);
163 tcg_temp_free_i32(fpscr
);
165 * We don't need to arrange to end the TB, because the only
166 * parts of FPSCR which we cache in the TB flags are the VECLEN
167 * and VECSTRIDE, and those don't exist for M-profile.
171 bits
|= R_V7M_CONTROL_SFPA_MASK
;
173 control
= load_cpu_field(v7m
.control
[M_REG_S
]);
174 tcg_gen_ori_i32(control
, control
, bits
);
175 store_cpu_field(control
, v7m
.control
[M_REG_S
]);
176 /* Don't need to do this for any further FP insns in this TB */
177 s
->v7m_new_fp_ctxt_needed
= false;
185 * The most usual kind of VFP access check, for everything except
186 * FMXR/FMRX to the always-available special registers.
188 static bool vfp_access_check(DisasContext
*s
)
190 return full_vfp_access_check(s
, false);
193 static bool trans_VSEL(DisasContext
*s
, arg_VSEL
*a
)
198 if (!dc_isar_feature(aa32_vsel
, s
)) {
202 if (dp
&& !dc_isar_feature(aa32_fpdp_v2
, s
)) {
206 /* UNDEF accesses to D16-D31 if they don't exist */
207 if (dp
&& !dc_isar_feature(aa32_simd_r32
, s
) &&
208 ((a
->vm
| a
->vn
| a
->vd
) & 0x10)) {
216 if (!vfp_access_check(s
)) {
221 TCGv_i64 frn
, frm
, dest
;
222 TCGv_i64 tmp
, zero
, zf
, nf
, vf
;
224 zero
= tcg_const_i64(0);
226 frn
= tcg_temp_new_i64();
227 frm
= tcg_temp_new_i64();
228 dest
= tcg_temp_new_i64();
230 zf
= tcg_temp_new_i64();
231 nf
= tcg_temp_new_i64();
232 vf
= tcg_temp_new_i64();
234 tcg_gen_extu_i32_i64(zf
, cpu_ZF
);
235 tcg_gen_ext_i32_i64(nf
, cpu_NF
);
236 tcg_gen_ext_i32_i64(vf
, cpu_VF
);
238 neon_load_reg64(frn
, rn
);
239 neon_load_reg64(frm
, rm
);
242 tcg_gen_movcond_i64(TCG_COND_EQ
, dest
, zf
, zero
,
246 tcg_gen_movcond_i64(TCG_COND_LT
, dest
, vf
, zero
,
249 case 2: /* ge: N == V -> N ^ V == 0 */
250 tmp
= tcg_temp_new_i64();
251 tcg_gen_xor_i64(tmp
, vf
, nf
);
252 tcg_gen_movcond_i64(TCG_COND_GE
, dest
, tmp
, zero
,
254 tcg_temp_free_i64(tmp
);
256 case 3: /* gt: !Z && N == V */
257 tcg_gen_movcond_i64(TCG_COND_NE
, dest
, zf
, zero
,
259 tmp
= tcg_temp_new_i64();
260 tcg_gen_xor_i64(tmp
, vf
, nf
);
261 tcg_gen_movcond_i64(TCG_COND_GE
, dest
, tmp
, zero
,
263 tcg_temp_free_i64(tmp
);
266 neon_store_reg64(dest
, rd
);
267 tcg_temp_free_i64(frn
);
268 tcg_temp_free_i64(frm
);
269 tcg_temp_free_i64(dest
);
271 tcg_temp_free_i64(zf
);
272 tcg_temp_free_i64(nf
);
273 tcg_temp_free_i64(vf
);
275 tcg_temp_free_i64(zero
);
277 TCGv_i32 frn
, frm
, dest
;
280 zero
= tcg_const_i32(0);
282 frn
= tcg_temp_new_i32();
283 frm
= tcg_temp_new_i32();
284 dest
= tcg_temp_new_i32();
285 neon_load_reg32(frn
, rn
);
286 neon_load_reg32(frm
, rm
);
289 tcg_gen_movcond_i32(TCG_COND_EQ
, dest
, cpu_ZF
, zero
,
293 tcg_gen_movcond_i32(TCG_COND_LT
, dest
, cpu_VF
, zero
,
296 case 2: /* ge: N == V -> N ^ V == 0 */
297 tmp
= tcg_temp_new_i32();
298 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
299 tcg_gen_movcond_i32(TCG_COND_GE
, dest
, tmp
, zero
,
301 tcg_temp_free_i32(tmp
);
303 case 3: /* gt: !Z && N == V */
304 tcg_gen_movcond_i32(TCG_COND_NE
, dest
, cpu_ZF
, zero
,
306 tmp
= tcg_temp_new_i32();
307 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
308 tcg_gen_movcond_i32(TCG_COND_GE
, dest
, tmp
, zero
,
310 tcg_temp_free_i32(tmp
);
313 neon_store_reg32(dest
, rd
);
314 tcg_temp_free_i32(frn
);
315 tcg_temp_free_i32(frm
);
316 tcg_temp_free_i32(dest
);
318 tcg_temp_free_i32(zero
);
325 * Table for converting the most common AArch32 encoding of
326 * rounding mode to arm_fprounding order (which matches the
327 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
329 static const uint8_t fp_decode_rm
[] = {
336 static bool trans_VRINT(DisasContext
*s
, arg_VRINT
*a
)
342 int rounding
= fp_decode_rm
[a
->rm
];
344 if (!dc_isar_feature(aa32_vrint
, s
)) {
348 if (dp
&& !dc_isar_feature(aa32_fpdp_v2
, s
)) {
352 /* UNDEF accesses to D16-D31 if they don't exist */
353 if (dp
&& !dc_isar_feature(aa32_simd_r32
, s
) &&
354 ((a
->vm
| a
->vd
) & 0x10)) {
361 if (!vfp_access_check(s
)) {
365 fpst
= get_fpstatus_ptr(0);
367 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rounding
));
368 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
373 tcg_op
= tcg_temp_new_i64();
374 tcg_res
= tcg_temp_new_i64();
375 neon_load_reg64(tcg_op
, rm
);
376 gen_helper_rintd(tcg_res
, tcg_op
, fpst
);
377 neon_store_reg64(tcg_res
, rd
);
378 tcg_temp_free_i64(tcg_op
);
379 tcg_temp_free_i64(tcg_res
);
383 tcg_op
= tcg_temp_new_i32();
384 tcg_res
= tcg_temp_new_i32();
385 neon_load_reg32(tcg_op
, rm
);
386 gen_helper_rints(tcg_res
, tcg_op
, fpst
);
387 neon_store_reg32(tcg_res
, rd
);
388 tcg_temp_free_i32(tcg_op
);
389 tcg_temp_free_i32(tcg_res
);
392 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
393 tcg_temp_free_i32(tcg_rmode
);
395 tcg_temp_free_ptr(fpst
);
399 static bool trans_VCVT(DisasContext
*s
, arg_VCVT
*a
)
404 TCGv_i32 tcg_rmode
, tcg_shift
;
405 int rounding
= fp_decode_rm
[a
->rm
];
406 bool is_signed
= a
->op
;
408 if (!dc_isar_feature(aa32_vcvt_dr
, s
)) {
412 if (dp
&& !dc_isar_feature(aa32_fpdp_v2
, s
)) {
416 /* UNDEF accesses to D16-D31 if they don't exist */
417 if (dp
&& !dc_isar_feature(aa32_simd_r32
, s
) && (a
->vm
& 0x10)) {
424 if (!vfp_access_check(s
)) {
428 fpst
= get_fpstatus_ptr(0);
430 tcg_shift
= tcg_const_i32(0);
432 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rounding
));
433 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
436 TCGv_i64 tcg_double
, tcg_res
;
438 tcg_double
= tcg_temp_new_i64();
439 tcg_res
= tcg_temp_new_i64();
440 tcg_tmp
= tcg_temp_new_i32();
441 neon_load_reg64(tcg_double
, rm
);
443 gen_helper_vfp_tosld(tcg_res
, tcg_double
, tcg_shift
, fpst
);
445 gen_helper_vfp_tould(tcg_res
, tcg_double
, tcg_shift
, fpst
);
447 tcg_gen_extrl_i64_i32(tcg_tmp
, tcg_res
);
448 neon_store_reg32(tcg_tmp
, rd
);
449 tcg_temp_free_i32(tcg_tmp
);
450 tcg_temp_free_i64(tcg_res
);
451 tcg_temp_free_i64(tcg_double
);
453 TCGv_i32 tcg_single
, tcg_res
;
454 tcg_single
= tcg_temp_new_i32();
455 tcg_res
= tcg_temp_new_i32();
456 neon_load_reg32(tcg_single
, rm
);
458 gen_helper_vfp_tosls(tcg_res
, tcg_single
, tcg_shift
, fpst
);
460 gen_helper_vfp_touls(tcg_res
, tcg_single
, tcg_shift
, fpst
);
462 neon_store_reg32(tcg_res
, rd
);
463 tcg_temp_free_i32(tcg_res
);
464 tcg_temp_free_i32(tcg_single
);
467 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
468 tcg_temp_free_i32(tcg_rmode
);
470 tcg_temp_free_i32(tcg_shift
);
472 tcg_temp_free_ptr(fpst
);
477 static bool trans_VMOV_to_gp(DisasContext
*s
, arg_VMOV_to_gp
*a
)
479 /* VMOV scalar to general purpose register */
484 /* SIZE == 2 is a VFP instruction; otherwise NEON. */
486 ? !dc_isar_feature(aa32_fpsp_v2
, s
)
487 : !arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
491 /* UNDEF accesses to D16-D31 if they don't exist */
492 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vn
& 0x10)) {
496 offset
= a
->index
<< a
->size
;
497 pass
= extract32(offset
, 2, 1);
498 offset
= extract32(offset
, 0, 2) * 8;
500 if (!vfp_access_check(s
)) {
504 tmp
= neon_load_reg(a
->vn
, pass
);
508 tcg_gen_shri_i32(tmp
, tmp
, offset
);
519 tcg_gen_shri_i32(tmp
, tmp
, 16);
525 tcg_gen_sari_i32(tmp
, tmp
, 16);
534 store_reg(s
, a
->rt
, tmp
);
539 static bool trans_VMOV_from_gp(DisasContext
*s
, arg_VMOV_from_gp
*a
)
541 /* VMOV general purpose register to scalar */
546 /* SIZE == 2 is a VFP instruction; otherwise NEON. */
548 ? !dc_isar_feature(aa32_fpsp_v2
, s
)
549 : !arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
553 /* UNDEF accesses to D16-D31 if they don't exist */
554 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vn
& 0x10)) {
558 offset
= a
->index
<< a
->size
;
559 pass
= extract32(offset
, 2, 1);
560 offset
= extract32(offset
, 0, 2) * 8;
562 if (!vfp_access_check(s
)) {
566 tmp
= load_reg(s
, a
->rt
);
569 tmp2
= neon_load_reg(a
->vn
, pass
);
570 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
, offset
, 8);
571 tcg_temp_free_i32(tmp2
);
574 tmp2
= neon_load_reg(a
->vn
, pass
);
575 tcg_gen_deposit_i32(tmp
, tmp2
, tmp
, offset
, 16);
576 tcg_temp_free_i32(tmp2
);
581 neon_store_reg(a
->vn
, pass
, tmp
);
586 static bool trans_VDUP(DisasContext
*s
, arg_VDUP
*a
)
588 /* VDUP (general purpose register) */
592 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
596 /* UNDEF accesses to D16-D31 if they don't exist */
597 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vn
& 0x10)) {
605 if (a
->q
&& (a
->vn
& 1)) {
609 vec_size
= a
->q
? 16 : 8;
618 if (!vfp_access_check(s
)) {
622 tmp
= load_reg(s
, a
->rt
);
623 tcg_gen_gvec_dup_i32(size
, neon_reg_offset(a
->vn
, 0),
624 vec_size
, vec_size
, tmp
);
625 tcg_temp_free_i32(tmp
);
630 static bool trans_VMSR_VMRS(DisasContext
*s
, arg_VMSR_VMRS
*a
)
633 bool ignore_vfp_enabled
= false;
635 if (!dc_isar_feature(aa32_fpsp_v2
, s
)) {
639 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
641 * The only M-profile VFP vmrs/vmsr sysreg is FPSCR.
642 * Accesses to R15 are UNPREDICTABLE; we choose to undef.
643 * (FPSCR -> r15 is a special case which writes to the PSR flags.)
645 if (a
->rt
== 15 && (!a
->l
|| a
->reg
!= ARM_VFP_FPSCR
)) {
653 * VFPv2 allows access to FPSID from userspace; VFPv3 restricts
654 * all ID registers to privileged access only.
656 if (IS_USER(s
) && dc_isar_feature(aa32_fpsp_v3
, s
)) {
659 ignore_vfp_enabled
= true;
663 if (IS_USER(s
) || !arm_dc_feature(s
, ARM_FEATURE_MVFR
)) {
666 ignore_vfp_enabled
= true;
669 if (IS_USER(s
) || !arm_dc_feature(s
, ARM_FEATURE_V8
)) {
672 ignore_vfp_enabled
= true;
680 ignore_vfp_enabled
= true;
683 case ARM_VFP_FPINST2
:
684 /* Not present in VFPv3 */
685 if (IS_USER(s
) || dc_isar_feature(aa32_fpsp_v3
, s
)) {
693 if (!full_vfp_access_check(s
, ignore_vfp_enabled
)) {
698 /* VMRS, move VFP special register to gp register */
704 if (s
->current_el
== 1) {
705 TCGv_i32 tcg_reg
, tcg_rt
;
708 gen_set_pc_im(s
, s
->pc_curr
);
709 tcg_reg
= tcg_const_i32(a
->reg
);
710 tcg_rt
= tcg_const_i32(a
->rt
);
711 gen_helper_check_hcr_el2_trap(cpu_env
, tcg_rt
, tcg_reg
);
712 tcg_temp_free_i32(tcg_reg
);
713 tcg_temp_free_i32(tcg_rt
);
718 case ARM_VFP_FPINST2
:
719 tmp
= load_cpu_field(vfp
.xregs
[a
->reg
]);
723 tmp
= load_cpu_field(vfp
.xregs
[ARM_VFP_FPSCR
]);
724 tcg_gen_andi_i32(tmp
, tmp
, 0xf0000000);
726 tmp
= tcg_temp_new_i32();
727 gen_helper_vfp_get_fpscr(tmp
, cpu_env
);
731 g_assert_not_reached();
735 /* Set the 4 flag bits in the CPSR. */
737 tcg_temp_free_i32(tmp
);
739 store_reg(s
, a
->rt
, tmp
);
742 /* VMSR, move gp register to VFP special register */
748 /* Writes are ignored. */
751 tmp
= load_reg(s
, a
->rt
);
752 gen_helper_vfp_set_fpscr(cpu_env
, tmp
);
753 tcg_temp_free_i32(tmp
);
758 * TODO: VFP subarchitecture support.
759 * For now, keep the EN bit only
761 tmp
= load_reg(s
, a
->rt
);
762 tcg_gen_andi_i32(tmp
, tmp
, 1 << 30);
763 store_cpu_field(tmp
, vfp
.xregs
[a
->reg
]);
767 case ARM_VFP_FPINST2
:
768 tmp
= load_reg(s
, a
->rt
);
769 store_cpu_field(tmp
, vfp
.xregs
[a
->reg
]);
772 g_assert_not_reached();
779 static bool trans_VMOV_single(DisasContext
*s
, arg_VMOV_single
*a
)
783 if (!dc_isar_feature(aa32_fpsp_v2
, s
)) {
787 if (!vfp_access_check(s
)) {
792 /* VFP to general purpose register */
793 tmp
= tcg_temp_new_i32();
794 neon_load_reg32(tmp
, a
->vn
);
796 /* Set the 4 flag bits in the CPSR. */
798 tcg_temp_free_i32(tmp
);
800 store_reg(s
, a
->rt
, tmp
);
803 /* general purpose register to VFP */
804 tmp
= load_reg(s
, a
->rt
);
805 neon_store_reg32(tmp
, a
->vn
);
806 tcg_temp_free_i32(tmp
);
812 static bool trans_VMOV_64_sp(DisasContext
*s
, arg_VMOV_64_sp
*a
)
816 if (!dc_isar_feature(aa32_fpsp_v2
, s
)) {
821 * VMOV between two general-purpose registers and two single precision
822 * floating point registers
824 if (!vfp_access_check(s
)) {
830 tmp
= tcg_temp_new_i32();
831 neon_load_reg32(tmp
, a
->vm
);
832 store_reg(s
, a
->rt
, tmp
);
833 tmp
= tcg_temp_new_i32();
834 neon_load_reg32(tmp
, a
->vm
+ 1);
835 store_reg(s
, a
->rt2
, tmp
);
838 tmp
= load_reg(s
, a
->rt
);
839 neon_store_reg32(tmp
, a
->vm
);
840 tcg_temp_free_i32(tmp
);
841 tmp
= load_reg(s
, a
->rt2
);
842 neon_store_reg32(tmp
, a
->vm
+ 1);
843 tcg_temp_free_i32(tmp
);
849 static bool trans_VMOV_64_dp(DisasContext
*s
, arg_VMOV_64_dp
*a
)
854 * VMOV between two general-purpose registers and one double precision
855 * floating point register. Note that this does not require support
856 * for double precision arithmetic.
858 if (!dc_isar_feature(aa32_fpsp_v2
, s
)) {
862 /* UNDEF accesses to D16-D31 if they don't exist */
863 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vm
& 0x10)) {
867 if (!vfp_access_check(s
)) {
873 tmp
= tcg_temp_new_i32();
874 neon_load_reg32(tmp
, a
->vm
* 2);
875 store_reg(s
, a
->rt
, tmp
);
876 tmp
= tcg_temp_new_i32();
877 neon_load_reg32(tmp
, a
->vm
* 2 + 1);
878 store_reg(s
, a
->rt2
, tmp
);
881 tmp
= load_reg(s
, a
->rt
);
882 neon_store_reg32(tmp
, a
->vm
* 2);
883 tcg_temp_free_i32(tmp
);
884 tmp
= load_reg(s
, a
->rt2
);
885 neon_store_reg32(tmp
, a
->vm
* 2 + 1);
886 tcg_temp_free_i32(tmp
);
892 static bool trans_VLDR_VSTR_sp(DisasContext
*s
, arg_VLDR_VSTR_sp
*a
)
897 if (!dc_isar_feature(aa32_fpsp_v2
, s
)) {
901 if (!vfp_access_check(s
)) {
905 offset
= a
->imm
<< 2;
910 /* For thumb, use of PC is UNPREDICTABLE. */
911 addr
= add_reg_for_lit(s
, a
->rn
, offset
);
912 tmp
= tcg_temp_new_i32();
914 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
915 neon_store_reg32(tmp
, a
->vd
);
917 neon_load_reg32(tmp
, a
->vd
);
918 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
920 tcg_temp_free_i32(tmp
);
921 tcg_temp_free_i32(addr
);
926 static bool trans_VLDR_VSTR_dp(DisasContext
*s
, arg_VLDR_VSTR_dp
*a
)
932 /* Note that this does not require support for double arithmetic. */
933 if (!dc_isar_feature(aa32_fpsp_v2
, s
)) {
937 /* UNDEF accesses to D16-D31 if they don't exist */
938 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vd
& 0x10)) {
942 if (!vfp_access_check(s
)) {
946 offset
= a
->imm
<< 2;
951 /* For thumb, use of PC is UNPREDICTABLE. */
952 addr
= add_reg_for_lit(s
, a
->rn
, offset
);
953 tmp
= tcg_temp_new_i64();
955 gen_aa32_ld64(s
, tmp
, addr
, get_mem_index(s
));
956 neon_store_reg64(tmp
, a
->vd
);
958 neon_load_reg64(tmp
, a
->vd
);
959 gen_aa32_st64(s
, tmp
, addr
, get_mem_index(s
));
961 tcg_temp_free_i64(tmp
);
962 tcg_temp_free_i32(addr
);
967 static bool trans_VLDM_VSTM_sp(DisasContext
*s
, arg_VLDM_VSTM_sp
*a
)
973 if (!dc_isar_feature(aa32_fpsp_v2
, s
)) {
979 if (n
== 0 || (a
->vd
+ n
) > 32) {
981 * UNPREDICTABLE cases for bad immediates: we choose to
982 * UNDEF to avoid generating huge numbers of TCG ops
986 if (a
->rn
== 15 && a
->w
) {
987 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
991 if (!vfp_access_check(s
)) {
995 /* For thumb, use of PC is UNPREDICTABLE. */
996 addr
= add_reg_for_lit(s
, a
->rn
, 0);
999 tcg_gen_addi_i32(addr
, addr
, -(a
->imm
<< 2));
1002 if (s
->v8m_stackcheck
&& a
->rn
== 13 && a
->w
) {
1004 * Here 'addr' is the lowest address we will store to,
1005 * and is either the old SP (if post-increment) or
1006 * the new SP (if pre-decrement). For post-increment
1007 * where the old value is below the limit and the new
1008 * value is above, it is UNKNOWN whether the limit check
1009 * triggers; we choose to trigger.
1011 gen_helper_v8m_stackcheck(cpu_env
, addr
);
1015 tmp
= tcg_temp_new_i32();
1016 for (i
= 0; i
< n
; i
++) {
1019 gen_aa32_ld32u(s
, tmp
, addr
, get_mem_index(s
));
1020 neon_store_reg32(tmp
, a
->vd
+ i
);
1023 neon_load_reg32(tmp
, a
->vd
+ i
);
1024 gen_aa32_st32(s
, tmp
, addr
, get_mem_index(s
));
1026 tcg_gen_addi_i32(addr
, addr
, offset
);
1028 tcg_temp_free_i32(tmp
);
1032 offset
= -offset
* n
;
1033 tcg_gen_addi_i32(addr
, addr
, offset
);
1035 store_reg(s
, a
->rn
, addr
);
1037 tcg_temp_free_i32(addr
);
1043 static bool trans_VLDM_VSTM_dp(DisasContext
*s
, arg_VLDM_VSTM_dp
*a
)
1050 /* Note that this does not require support for double arithmetic. */
1051 if (!dc_isar_feature(aa32_fpsp_v2
, s
)) {
1057 if (n
== 0 || (a
->vd
+ n
) > 32 || n
> 16) {
1059 * UNPREDICTABLE cases for bad immediates: we choose to
1060 * UNDEF to avoid generating huge numbers of TCG ops
1064 if (a
->rn
== 15 && a
->w
) {
1065 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
1069 /* UNDEF accesses to D16-D31 if they don't exist */
1070 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vd
+ n
) > 16) {
1074 if (!vfp_access_check(s
)) {
1078 /* For thumb, use of PC is UNPREDICTABLE. */
1079 addr
= add_reg_for_lit(s
, a
->rn
, 0);
1082 tcg_gen_addi_i32(addr
, addr
, -(a
->imm
<< 2));
1085 if (s
->v8m_stackcheck
&& a
->rn
== 13 && a
->w
) {
1087 * Here 'addr' is the lowest address we will store to,
1088 * and is either the old SP (if post-increment) or
1089 * the new SP (if pre-decrement). For post-increment
1090 * where the old value is below the limit and the new
1091 * value is above, it is UNKNOWN whether the limit check
1092 * triggers; we choose to trigger.
1094 gen_helper_v8m_stackcheck(cpu_env
, addr
);
1098 tmp
= tcg_temp_new_i64();
1099 for (i
= 0; i
< n
; i
++) {
1102 gen_aa32_ld64(s
, tmp
, addr
, get_mem_index(s
));
1103 neon_store_reg64(tmp
, a
->vd
+ i
);
1106 neon_load_reg64(tmp
, a
->vd
+ i
);
1107 gen_aa32_st64(s
, tmp
, addr
, get_mem_index(s
));
1109 tcg_gen_addi_i32(addr
, addr
, offset
);
1111 tcg_temp_free_i64(tmp
);
1115 offset
= -offset
* n
;
1116 } else if (a
->imm
& 1) {
1123 tcg_gen_addi_i32(addr
, addr
, offset
);
1125 store_reg(s
, a
->rn
, addr
);
1127 tcg_temp_free_i32(addr
);
1134 * Types for callbacks for do_vfp_3op_sp() and do_vfp_3op_dp().
1135 * The callback should emit code to write a value to vd. If
1136 * do_vfp_3op_{sp,dp}() was passed reads_vd then the TCGv vd
1137 * will contain the old value of the relevant VFP register;
1138 * otherwise it must be written to only.
1140 typedef void VFPGen3OpSPFn(TCGv_i32 vd
,
1141 TCGv_i32 vn
, TCGv_i32 vm
, TCGv_ptr fpst
);
1142 typedef void VFPGen3OpDPFn(TCGv_i64 vd
,
1143 TCGv_i64 vn
, TCGv_i64 vm
, TCGv_ptr fpst
);
1146 * Types for callbacks for do_vfp_2op_sp() and do_vfp_2op_dp().
1147 * The callback should emit code to write a value to vd (which
1148 * should be written to only).
1150 typedef void VFPGen2OpSPFn(TCGv_i32 vd
, TCGv_i32 vm
);
1151 typedef void VFPGen2OpDPFn(TCGv_i64 vd
, TCGv_i64 vm
);
1154 * Return true if the specified S reg is in a scalar bank
1155 * (ie if it is s0..s7)
1157 static inline bool vfp_sreg_is_scalar(int reg
)
1159 return (reg
& 0x18) == 0;
1163 * Return true if the specified D reg is in a scalar bank
1164 * (ie if it is d0..d3 or d16..d19)
1166 static inline bool vfp_dreg_is_scalar(int reg
)
1168 return (reg
& 0xc) == 0;
1172 * Advance the S reg number forwards by delta within its bank
1173 * (ie increment the low 3 bits but leave the rest the same)
1175 static inline int vfp_advance_sreg(int reg
, int delta
)
1177 return ((reg
+ delta
) & 0x7) | (reg
& ~0x7);
1181 * Advance the D reg number forwards by delta within its bank
1182 * (ie increment the low 2 bits but leave the rest the same)
1184 static inline int vfp_advance_dreg(int reg
, int delta
)
1186 return ((reg
+ delta
) & 0x3) | (reg
& ~0x3);
1190 * Perform a 3-operand VFP data processing instruction. fn is the
1191 * callback to do the actual operation; this function deals with the
1192 * code to handle looping around for VFP vector processing.
1194 static bool do_vfp_3op_sp(DisasContext
*s
, VFPGen3OpSPFn
*fn
,
1195 int vd
, int vn
, int vm
, bool reads_vd
)
1197 uint32_t delta_m
= 0;
1198 uint32_t delta_d
= 0;
1199 int veclen
= s
->vec_len
;
1200 TCGv_i32 f0
, f1
, fd
;
1203 if (!dc_isar_feature(aa32_fpsp_v2
, s
)) {
1207 if (!dc_isar_feature(aa32_fpshvec
, s
) &&
1208 (veclen
!= 0 || s
->vec_stride
!= 0)) {
1212 if (!vfp_access_check(s
)) {
1217 /* Figure out what type of vector operation this is. */
1218 if (vfp_sreg_is_scalar(vd
)) {
1222 delta_d
= s
->vec_stride
+ 1;
1224 if (vfp_sreg_is_scalar(vm
)) {
1225 /* mixed scalar/vector */
1234 f0
= tcg_temp_new_i32();
1235 f1
= tcg_temp_new_i32();
1236 fd
= tcg_temp_new_i32();
1237 fpst
= get_fpstatus_ptr(0);
1239 neon_load_reg32(f0
, vn
);
1240 neon_load_reg32(f1
, vm
);
1244 neon_load_reg32(fd
, vd
);
1246 fn(fd
, f0
, f1
, fpst
);
1247 neon_store_reg32(fd
, vd
);
1253 /* Set up the operands for the next iteration */
1255 vd
= vfp_advance_sreg(vd
, delta_d
);
1256 vn
= vfp_advance_sreg(vn
, delta_d
);
1257 neon_load_reg32(f0
, vn
);
1259 vm
= vfp_advance_sreg(vm
, delta_m
);
1260 neon_load_reg32(f1
, vm
);
1264 tcg_temp_free_i32(f0
);
1265 tcg_temp_free_i32(f1
);
1266 tcg_temp_free_i32(fd
);
1267 tcg_temp_free_ptr(fpst
);
1272 static bool do_vfp_3op_dp(DisasContext
*s
, VFPGen3OpDPFn
*fn
,
1273 int vd
, int vn
, int vm
, bool reads_vd
)
1275 uint32_t delta_m
= 0;
1276 uint32_t delta_d
= 0;
1277 int veclen
= s
->vec_len
;
1278 TCGv_i64 f0
, f1
, fd
;
1281 if (!dc_isar_feature(aa32_fpdp_v2
, s
)) {
1285 /* UNDEF accesses to D16-D31 if they don't exist */
1286 if (!dc_isar_feature(aa32_simd_r32
, s
) && ((vd
| vn
| vm
) & 0x10)) {
1290 if (!dc_isar_feature(aa32_fpshvec
, s
) &&
1291 (veclen
!= 0 || s
->vec_stride
!= 0)) {
1295 if (!vfp_access_check(s
)) {
1300 /* Figure out what type of vector operation this is. */
1301 if (vfp_dreg_is_scalar(vd
)) {
1305 delta_d
= (s
->vec_stride
>> 1) + 1;
1307 if (vfp_dreg_is_scalar(vm
)) {
1308 /* mixed scalar/vector */
1317 f0
= tcg_temp_new_i64();
1318 f1
= tcg_temp_new_i64();
1319 fd
= tcg_temp_new_i64();
1320 fpst
= get_fpstatus_ptr(0);
1322 neon_load_reg64(f0
, vn
);
1323 neon_load_reg64(f1
, vm
);
1327 neon_load_reg64(fd
, vd
);
1329 fn(fd
, f0
, f1
, fpst
);
1330 neon_store_reg64(fd
, vd
);
1335 /* Set up the operands for the next iteration */
1337 vd
= vfp_advance_dreg(vd
, delta_d
);
1338 vn
= vfp_advance_dreg(vn
, delta_d
);
1339 neon_load_reg64(f0
, vn
);
1341 vm
= vfp_advance_dreg(vm
, delta_m
);
1342 neon_load_reg64(f1
, vm
);
1346 tcg_temp_free_i64(f0
);
1347 tcg_temp_free_i64(f1
);
1348 tcg_temp_free_i64(fd
);
1349 tcg_temp_free_ptr(fpst
);
1354 static bool do_vfp_2op_sp(DisasContext
*s
, VFPGen2OpSPFn
*fn
, int vd
, int vm
)
1356 uint32_t delta_m
= 0;
1357 uint32_t delta_d
= 0;
1358 int veclen
= s
->vec_len
;
1361 if (!dc_isar_feature(aa32_fpsp_v2
, s
)) {
1365 if (!dc_isar_feature(aa32_fpshvec
, s
) &&
1366 (veclen
!= 0 || s
->vec_stride
!= 0)) {
1370 if (!vfp_access_check(s
)) {
1375 /* Figure out what type of vector operation this is. */
1376 if (vfp_sreg_is_scalar(vd
)) {
1380 delta_d
= s
->vec_stride
+ 1;
1382 if (vfp_sreg_is_scalar(vm
)) {
1383 /* mixed scalar/vector */
1392 f0
= tcg_temp_new_i32();
1393 fd
= tcg_temp_new_i32();
1395 neon_load_reg32(f0
, vm
);
1399 neon_store_reg32(fd
, vd
);
1406 /* single source one-many */
1408 vd
= vfp_advance_sreg(vd
, delta_d
);
1409 neon_store_reg32(fd
, vd
);
1414 /* Set up the operands for the next iteration */
1416 vd
= vfp_advance_sreg(vd
, delta_d
);
1417 vm
= vfp_advance_sreg(vm
, delta_m
);
1418 neon_load_reg32(f0
, vm
);
1421 tcg_temp_free_i32(f0
);
1422 tcg_temp_free_i32(fd
);
1427 static bool do_vfp_2op_dp(DisasContext
*s
, VFPGen2OpDPFn
*fn
, int vd
, int vm
)
1429 uint32_t delta_m
= 0;
1430 uint32_t delta_d
= 0;
1431 int veclen
= s
->vec_len
;
1434 if (!dc_isar_feature(aa32_fpdp_v2
, s
)) {
1438 /* UNDEF accesses to D16-D31 if they don't exist */
1439 if (!dc_isar_feature(aa32_simd_r32
, s
) && ((vd
| vm
) & 0x10)) {
1443 if (!dc_isar_feature(aa32_fpshvec
, s
) &&
1444 (veclen
!= 0 || s
->vec_stride
!= 0)) {
1448 if (!vfp_access_check(s
)) {
1453 /* Figure out what type of vector operation this is. */
1454 if (vfp_dreg_is_scalar(vd
)) {
1458 delta_d
= (s
->vec_stride
>> 1) + 1;
1460 if (vfp_dreg_is_scalar(vm
)) {
1461 /* mixed scalar/vector */
1470 f0
= tcg_temp_new_i64();
1471 fd
= tcg_temp_new_i64();
1473 neon_load_reg64(f0
, vm
);
1477 neon_store_reg64(fd
, vd
);
1484 /* single source one-many */
1486 vd
= vfp_advance_dreg(vd
, delta_d
);
1487 neon_store_reg64(fd
, vd
);
1492 /* Set up the operands for the next iteration */
1494 vd
= vfp_advance_dreg(vd
, delta_d
);
1495 vd
= vfp_advance_dreg(vm
, delta_m
);
1496 neon_load_reg64(f0
, vm
);
1499 tcg_temp_free_i64(f0
);
1500 tcg_temp_free_i64(fd
);
1505 static void gen_VMLA_sp(TCGv_i32 vd
, TCGv_i32 vn
, TCGv_i32 vm
, TCGv_ptr fpst
)
1507 /* Note that order of inputs to the add matters for NaNs */
1508 TCGv_i32 tmp
= tcg_temp_new_i32();
1510 gen_helper_vfp_muls(tmp
, vn
, vm
, fpst
);
1511 gen_helper_vfp_adds(vd
, vd
, tmp
, fpst
);
1512 tcg_temp_free_i32(tmp
);
1515 static bool trans_VMLA_sp(DisasContext
*s
, arg_VMLA_sp
*a
)
1517 return do_vfp_3op_sp(s
, gen_VMLA_sp
, a
->vd
, a
->vn
, a
->vm
, true);
1520 static void gen_VMLA_dp(TCGv_i64 vd
, TCGv_i64 vn
, TCGv_i64 vm
, TCGv_ptr fpst
)
1522 /* Note that order of inputs to the add matters for NaNs */
1523 TCGv_i64 tmp
= tcg_temp_new_i64();
1525 gen_helper_vfp_muld(tmp
, vn
, vm
, fpst
);
1526 gen_helper_vfp_addd(vd
, vd
, tmp
, fpst
);
1527 tcg_temp_free_i64(tmp
);
1530 static bool trans_VMLA_dp(DisasContext
*s
, arg_VMLA_dp
*a
)
1532 return do_vfp_3op_dp(s
, gen_VMLA_dp
, a
->vd
, a
->vn
, a
->vm
, true);
1535 static void gen_VMLS_sp(TCGv_i32 vd
, TCGv_i32 vn
, TCGv_i32 vm
, TCGv_ptr fpst
)
1538 * VMLS: vd = vd + -(vn * vm)
1539 * Note that order of inputs to the add matters for NaNs.
1541 TCGv_i32 tmp
= tcg_temp_new_i32();
1543 gen_helper_vfp_muls(tmp
, vn
, vm
, fpst
);
1544 gen_helper_vfp_negs(tmp
, tmp
);
1545 gen_helper_vfp_adds(vd
, vd
, tmp
, fpst
);
1546 tcg_temp_free_i32(tmp
);
1549 static bool trans_VMLS_sp(DisasContext
*s
, arg_VMLS_sp
*a
)
1551 return do_vfp_3op_sp(s
, gen_VMLS_sp
, a
->vd
, a
->vn
, a
->vm
, true);
1554 static void gen_VMLS_dp(TCGv_i64 vd
, TCGv_i64 vn
, TCGv_i64 vm
, TCGv_ptr fpst
)
1557 * VMLS: vd = vd + -(vn * vm)
1558 * Note that order of inputs to the add matters for NaNs.
1560 TCGv_i64 tmp
= tcg_temp_new_i64();
1562 gen_helper_vfp_muld(tmp
, vn
, vm
, fpst
);
1563 gen_helper_vfp_negd(tmp
, tmp
);
1564 gen_helper_vfp_addd(vd
, vd
, tmp
, fpst
);
1565 tcg_temp_free_i64(tmp
);
1568 static bool trans_VMLS_dp(DisasContext
*s
, arg_VMLS_dp
*a
)
1570 return do_vfp_3op_dp(s
, gen_VMLS_dp
, a
->vd
, a
->vn
, a
->vm
, true);
1573 static void gen_VNMLS_sp(TCGv_i32 vd
, TCGv_i32 vn
, TCGv_i32 vm
, TCGv_ptr fpst
)
1576 * VNMLS: -fd + (fn * fm)
1577 * Note that it isn't valid to replace (-A + B) with (B - A) or similar
1578 * plausible looking simplifications because this will give wrong results
1581 TCGv_i32 tmp
= tcg_temp_new_i32();
1583 gen_helper_vfp_muls(tmp
, vn
, vm
, fpst
);
1584 gen_helper_vfp_negs(vd
, vd
);
1585 gen_helper_vfp_adds(vd
, vd
, tmp
, fpst
);
1586 tcg_temp_free_i32(tmp
);
1589 static bool trans_VNMLS_sp(DisasContext
*s
, arg_VNMLS_sp
*a
)
1591 return do_vfp_3op_sp(s
, gen_VNMLS_sp
, a
->vd
, a
->vn
, a
->vm
, true);
1594 static void gen_VNMLS_dp(TCGv_i64 vd
, TCGv_i64 vn
, TCGv_i64 vm
, TCGv_ptr fpst
)
1597 * VNMLS: -fd + (fn * fm)
1598 * Note that it isn't valid to replace (-A + B) with (B - A) or similar
1599 * plausible looking simplifications because this will give wrong results
1602 TCGv_i64 tmp
= tcg_temp_new_i64();
1604 gen_helper_vfp_muld(tmp
, vn
, vm
, fpst
);
1605 gen_helper_vfp_negd(vd
, vd
);
1606 gen_helper_vfp_addd(vd
, vd
, tmp
, fpst
);
1607 tcg_temp_free_i64(tmp
);
1610 static bool trans_VNMLS_dp(DisasContext
*s
, arg_VNMLS_dp
*a
)
1612 return do_vfp_3op_dp(s
, gen_VNMLS_dp
, a
->vd
, a
->vn
, a
->vm
, true);
1615 static void gen_VNMLA_sp(TCGv_i32 vd
, TCGv_i32 vn
, TCGv_i32 vm
, TCGv_ptr fpst
)
1617 /* VNMLA: -fd + -(fn * fm) */
1618 TCGv_i32 tmp
= tcg_temp_new_i32();
1620 gen_helper_vfp_muls(tmp
, vn
, vm
, fpst
);
1621 gen_helper_vfp_negs(tmp
, tmp
);
1622 gen_helper_vfp_negs(vd
, vd
);
1623 gen_helper_vfp_adds(vd
, vd
, tmp
, fpst
);
1624 tcg_temp_free_i32(tmp
);
1627 static bool trans_VNMLA_sp(DisasContext
*s
, arg_VNMLA_sp
*a
)
1629 return do_vfp_3op_sp(s
, gen_VNMLA_sp
, a
->vd
, a
->vn
, a
->vm
, true);
1632 static void gen_VNMLA_dp(TCGv_i64 vd
, TCGv_i64 vn
, TCGv_i64 vm
, TCGv_ptr fpst
)
1634 /* VNMLA: -fd + (fn * fm) */
1635 TCGv_i64 tmp
= tcg_temp_new_i64();
1637 gen_helper_vfp_muld(tmp
, vn
, vm
, fpst
);
1638 gen_helper_vfp_negd(tmp
, tmp
);
1639 gen_helper_vfp_negd(vd
, vd
);
1640 gen_helper_vfp_addd(vd
, vd
, tmp
, fpst
);
1641 tcg_temp_free_i64(tmp
);
1644 static bool trans_VNMLA_dp(DisasContext
*s
, arg_VNMLA_dp
*a
)
1646 return do_vfp_3op_dp(s
, gen_VNMLA_dp
, a
->vd
, a
->vn
, a
->vm
, true);
1649 static bool trans_VMUL_sp(DisasContext
*s
, arg_VMUL_sp
*a
)
1651 return do_vfp_3op_sp(s
, gen_helper_vfp_muls
, a
->vd
, a
->vn
, a
->vm
, false);
1654 static bool trans_VMUL_dp(DisasContext
*s
, arg_VMUL_dp
*a
)
1656 return do_vfp_3op_dp(s
, gen_helper_vfp_muld
, a
->vd
, a
->vn
, a
->vm
, false);
1659 static void gen_VNMUL_sp(TCGv_i32 vd
, TCGv_i32 vn
, TCGv_i32 vm
, TCGv_ptr fpst
)
1661 /* VNMUL: -(fn * fm) */
1662 gen_helper_vfp_muls(vd
, vn
, vm
, fpst
);
1663 gen_helper_vfp_negs(vd
, vd
);
1666 static bool trans_VNMUL_sp(DisasContext
*s
, arg_VNMUL_sp
*a
)
1668 return do_vfp_3op_sp(s
, gen_VNMUL_sp
, a
->vd
, a
->vn
, a
->vm
, false);
1671 static void gen_VNMUL_dp(TCGv_i64 vd
, TCGv_i64 vn
, TCGv_i64 vm
, TCGv_ptr fpst
)
1673 /* VNMUL: -(fn * fm) */
1674 gen_helper_vfp_muld(vd
, vn
, vm
, fpst
);
1675 gen_helper_vfp_negd(vd
, vd
);
1678 static bool trans_VNMUL_dp(DisasContext
*s
, arg_VNMUL_dp
*a
)
1680 return do_vfp_3op_dp(s
, gen_VNMUL_dp
, a
->vd
, a
->vn
, a
->vm
, false);
1683 static bool trans_VADD_sp(DisasContext
*s
, arg_VADD_sp
*a
)
1685 return do_vfp_3op_sp(s
, gen_helper_vfp_adds
, a
->vd
, a
->vn
, a
->vm
, false);
1688 static bool trans_VADD_dp(DisasContext
*s
, arg_VADD_dp
*a
)
1690 return do_vfp_3op_dp(s
, gen_helper_vfp_addd
, a
->vd
, a
->vn
, a
->vm
, false);
1693 static bool trans_VSUB_sp(DisasContext
*s
, arg_VSUB_sp
*a
)
1695 return do_vfp_3op_sp(s
, gen_helper_vfp_subs
, a
->vd
, a
->vn
, a
->vm
, false);
1698 static bool trans_VSUB_dp(DisasContext
*s
, arg_VSUB_dp
*a
)
1700 return do_vfp_3op_dp(s
, gen_helper_vfp_subd
, a
->vd
, a
->vn
, a
->vm
, false);
1703 static bool trans_VDIV_sp(DisasContext
*s
, arg_VDIV_sp
*a
)
1705 return do_vfp_3op_sp(s
, gen_helper_vfp_divs
, a
->vd
, a
->vn
, a
->vm
, false);
1708 static bool trans_VDIV_dp(DisasContext
*s
, arg_VDIV_dp
*a
)
1710 return do_vfp_3op_dp(s
, gen_helper_vfp_divd
, a
->vd
, a
->vn
, a
->vm
, false);
1713 static bool trans_VMINNM_sp(DisasContext
*s
, arg_VMINNM_sp
*a
)
1715 if (!dc_isar_feature(aa32_vminmaxnm
, s
)) {
1718 return do_vfp_3op_sp(s
, gen_helper_vfp_minnums
,
1719 a
->vd
, a
->vn
, a
->vm
, false);
1722 static bool trans_VMAXNM_sp(DisasContext
*s
, arg_VMAXNM_sp
*a
)
1724 if (!dc_isar_feature(aa32_vminmaxnm
, s
)) {
1727 return do_vfp_3op_sp(s
, gen_helper_vfp_maxnums
,
1728 a
->vd
, a
->vn
, a
->vm
, false);
1731 static bool trans_VMINNM_dp(DisasContext
*s
, arg_VMINNM_dp
*a
)
1733 if (!dc_isar_feature(aa32_vminmaxnm
, s
)) {
1736 return do_vfp_3op_dp(s
, gen_helper_vfp_minnumd
,
1737 a
->vd
, a
->vn
, a
->vm
, false);
1740 static bool trans_VMAXNM_dp(DisasContext
*s
, arg_VMAXNM_dp
*a
)
1742 if (!dc_isar_feature(aa32_vminmaxnm
, s
)) {
1745 return do_vfp_3op_dp(s
, gen_helper_vfp_maxnumd
,
1746 a
->vd
, a
->vn
, a
->vm
, false);
1749 static bool do_vfm_sp(DisasContext
*s
, arg_VFMA_sp
*a
, bool neg_n
, bool neg_d
)
1752 * VFNMA : fd = muladd(-fd, fn, fm)
1753 * VFNMS : fd = muladd(-fd, -fn, fm)
1754 * VFMA : fd = muladd( fd, fn, fm)
1755 * VFMS : fd = muladd( fd, -fn, fm)
1757 * These are fused multiply-add, and must be done as one floating
1758 * point operation with no rounding between the multiplication and
1759 * addition steps. NB that doing the negations here as separate
1760 * steps is correct : an input NaN should come out with its sign
1761 * bit flipped if it is a negated-input.
1764 TCGv_i32 vn
, vm
, vd
;
1767 * Present in VFPv4 only.
1768 * Note that we can't rely on the SIMDFMAC check alone, because
1769 * in a Neon-no-VFP core that ID register field will be non-zero.
1771 if (!dc_isar_feature(aa32_simdfmac
, s
) ||
1772 !dc_isar_feature(aa32_fpsp_v2
, s
)) {
1776 * In v7A, UNPREDICTABLE with non-zero vector length/stride; from
1777 * v8A, must UNDEF. We choose to UNDEF for both v7A and v8A.
1779 if (s
->vec_len
!= 0 || s
->vec_stride
!= 0) {
1783 if (!vfp_access_check(s
)) {
1787 vn
= tcg_temp_new_i32();
1788 vm
= tcg_temp_new_i32();
1789 vd
= tcg_temp_new_i32();
1791 neon_load_reg32(vn
, a
->vn
);
1792 neon_load_reg32(vm
, a
->vm
);
1795 gen_helper_vfp_negs(vn
, vn
);
1797 neon_load_reg32(vd
, a
->vd
);
1800 gen_helper_vfp_negs(vd
, vd
);
1802 fpst
= get_fpstatus_ptr(0);
1803 gen_helper_vfp_muladds(vd
, vn
, vm
, vd
, fpst
);
1804 neon_store_reg32(vd
, a
->vd
);
1806 tcg_temp_free_ptr(fpst
);
1807 tcg_temp_free_i32(vn
);
1808 tcg_temp_free_i32(vm
);
1809 tcg_temp_free_i32(vd
);
1814 static bool trans_VFMA_sp(DisasContext
*s
, arg_VFMA_sp
*a
)
1816 return do_vfm_sp(s
, a
, false, false);
1819 static bool trans_VFMS_sp(DisasContext
*s
, arg_VFMS_sp
*a
)
1821 return do_vfm_sp(s
, a
, true, false);
1824 static bool trans_VFNMA_sp(DisasContext
*s
, arg_VFNMA_sp
*a
)
1826 return do_vfm_sp(s
, a
, false, true);
1829 static bool trans_VFNMS_sp(DisasContext
*s
, arg_VFNMS_sp
*a
)
1831 return do_vfm_sp(s
, a
, true, true);
1834 static bool do_vfm_dp(DisasContext
*s
, arg_VFMA_dp
*a
, bool neg_n
, bool neg_d
)
1837 * VFNMA : fd = muladd(-fd, fn, fm)
1838 * VFNMS : fd = muladd(-fd, -fn, fm)
1839 * VFMA : fd = muladd( fd, fn, fm)
1840 * VFMS : fd = muladd( fd, -fn, fm)
1842 * These are fused multiply-add, and must be done as one floating
1843 * point operation with no rounding between the multiplication and
1844 * addition steps. NB that doing the negations here as separate
1845 * steps is correct : an input NaN should come out with its sign
1846 * bit flipped if it is a negated-input.
1849 TCGv_i64 vn
, vm
, vd
;
1852 * Present in VFPv4 only.
1853 * Note that we can't rely on the SIMDFMAC check alone, because
1854 * in a Neon-no-VFP core that ID register field will be non-zero.
1856 if (!dc_isar_feature(aa32_simdfmac
, s
) ||
1857 !dc_isar_feature(aa32_fpdp_v2
, s
)) {
1861 * In v7A, UNPREDICTABLE with non-zero vector length/stride; from
1862 * v8A, must UNDEF. We choose to UNDEF for both v7A and v8A.
1864 if (s
->vec_len
!= 0 || s
->vec_stride
!= 0) {
1868 /* UNDEF accesses to D16-D31 if they don't exist. */
1869 if (!dc_isar_feature(aa32_simd_r32
, s
) &&
1870 ((a
->vd
| a
->vn
| a
->vm
) & 0x10)) {
1874 if (!vfp_access_check(s
)) {
1878 vn
= tcg_temp_new_i64();
1879 vm
= tcg_temp_new_i64();
1880 vd
= tcg_temp_new_i64();
1882 neon_load_reg64(vn
, a
->vn
);
1883 neon_load_reg64(vm
, a
->vm
);
1886 gen_helper_vfp_negd(vn
, vn
);
1888 neon_load_reg64(vd
, a
->vd
);
1891 gen_helper_vfp_negd(vd
, vd
);
1893 fpst
= get_fpstatus_ptr(0);
1894 gen_helper_vfp_muladdd(vd
, vn
, vm
, vd
, fpst
);
1895 neon_store_reg64(vd
, a
->vd
);
1897 tcg_temp_free_ptr(fpst
);
1898 tcg_temp_free_i64(vn
);
1899 tcg_temp_free_i64(vm
);
1900 tcg_temp_free_i64(vd
);
1905 static bool trans_VFMA_dp(DisasContext
*s
, arg_VFMA_dp
*a
)
1907 return do_vfm_dp(s
, a
, false, false);
1910 static bool trans_VFMS_dp(DisasContext
*s
, arg_VFMS_dp
*a
)
1912 return do_vfm_dp(s
, a
, true, false);
1915 static bool trans_VFNMA_dp(DisasContext
*s
, arg_VFNMA_dp
*a
)
1917 return do_vfm_dp(s
, a
, false, true);
1920 static bool trans_VFNMS_dp(DisasContext
*s
, arg_VFNMS_dp
*a
)
1922 return do_vfm_dp(s
, a
, true, true);
1925 static bool trans_VMOV_imm_sp(DisasContext
*s
, arg_VMOV_imm_sp
*a
)
1927 uint32_t delta_d
= 0;
1928 int veclen
= s
->vec_len
;
1934 if (!dc_isar_feature(aa32_fpsp_v3
, s
)) {
1938 if (!dc_isar_feature(aa32_fpshvec
, s
) &&
1939 (veclen
!= 0 || s
->vec_stride
!= 0)) {
1943 if (!vfp_access_check(s
)) {
1948 /* Figure out what type of vector operation this is. */
1949 if (vfp_sreg_is_scalar(vd
)) {
1953 delta_d
= s
->vec_stride
+ 1;
1957 fd
= tcg_const_i32(vfp_expand_imm(MO_32
, a
->imm
));
1960 neon_store_reg32(fd
, vd
);
1966 /* Set up the operands for the next iteration */
1968 vd
= vfp_advance_sreg(vd
, delta_d
);
1971 tcg_temp_free_i32(fd
);
1975 static bool trans_VMOV_imm_dp(DisasContext
*s
, arg_VMOV_imm_dp
*a
)
1977 uint32_t delta_d
= 0;
1978 int veclen
= s
->vec_len
;
1984 if (!dc_isar_feature(aa32_fpdp_v3
, s
)) {
1988 /* UNDEF accesses to D16-D31 if they don't exist. */
1989 if (!dc_isar_feature(aa32_simd_r32
, s
) && (vd
& 0x10)) {
1993 if (!dc_isar_feature(aa32_fpshvec
, s
) &&
1994 (veclen
!= 0 || s
->vec_stride
!= 0)) {
1998 if (!vfp_access_check(s
)) {
2003 /* Figure out what type of vector operation this is. */
2004 if (vfp_dreg_is_scalar(vd
)) {
2008 delta_d
= (s
->vec_stride
>> 1) + 1;
2012 fd
= tcg_const_i64(vfp_expand_imm(MO_64
, a
->imm
));
2015 neon_store_reg64(fd
, vd
);
2021 /* Set up the operands for the next iteration */
2023 vd
= vfp_advance_dreg(vd
, delta_d
);
2026 tcg_temp_free_i64(fd
);
2030 static bool trans_VMOV_reg_sp(DisasContext
*s
, arg_VMOV_reg_sp
*a
)
2032 return do_vfp_2op_sp(s
, tcg_gen_mov_i32
, a
->vd
, a
->vm
);
2035 static bool trans_VMOV_reg_dp(DisasContext
*s
, arg_VMOV_reg_dp
*a
)
2037 return do_vfp_2op_dp(s
, tcg_gen_mov_i64
, a
->vd
, a
->vm
);
2040 static bool trans_VABS_sp(DisasContext
*s
, arg_VABS_sp
*a
)
2042 return do_vfp_2op_sp(s
, gen_helper_vfp_abss
, a
->vd
, a
->vm
);
2045 static bool trans_VABS_dp(DisasContext
*s
, arg_VABS_dp
*a
)
2047 return do_vfp_2op_dp(s
, gen_helper_vfp_absd
, a
->vd
, a
->vm
);
2050 static bool trans_VNEG_sp(DisasContext
*s
, arg_VNEG_sp
*a
)
2052 return do_vfp_2op_sp(s
, gen_helper_vfp_negs
, a
->vd
, a
->vm
);
2055 static bool trans_VNEG_dp(DisasContext
*s
, arg_VNEG_dp
*a
)
2057 return do_vfp_2op_dp(s
, gen_helper_vfp_negd
, a
->vd
, a
->vm
);
2060 static void gen_VSQRT_sp(TCGv_i32 vd
, TCGv_i32 vm
)
2062 gen_helper_vfp_sqrts(vd
, vm
, cpu_env
);
2065 static bool trans_VSQRT_sp(DisasContext
*s
, arg_VSQRT_sp
*a
)
2067 return do_vfp_2op_sp(s
, gen_VSQRT_sp
, a
->vd
, a
->vm
);
2070 static void gen_VSQRT_dp(TCGv_i64 vd
, TCGv_i64 vm
)
2072 gen_helper_vfp_sqrtd(vd
, vm
, cpu_env
);
2075 static bool trans_VSQRT_dp(DisasContext
*s
, arg_VSQRT_dp
*a
)
2077 return do_vfp_2op_dp(s
, gen_VSQRT_dp
, a
->vd
, a
->vm
);
2080 static bool trans_VCMP_sp(DisasContext
*s
, arg_VCMP_sp
*a
)
2084 if (!dc_isar_feature(aa32_fpsp_v2
, s
)) {
2088 /* Vm/M bits must be zero for the Z variant */
2089 if (a
->z
&& a
->vm
!= 0) {
2093 if (!vfp_access_check(s
)) {
2097 vd
= tcg_temp_new_i32();
2098 vm
= tcg_temp_new_i32();
2100 neon_load_reg32(vd
, a
->vd
);
2102 tcg_gen_movi_i32(vm
, 0);
2104 neon_load_reg32(vm
, a
->vm
);
2108 gen_helper_vfp_cmpes(vd
, vm
, cpu_env
);
2110 gen_helper_vfp_cmps(vd
, vm
, cpu_env
);
2113 tcg_temp_free_i32(vd
);
2114 tcg_temp_free_i32(vm
);
2119 static bool trans_VCMP_dp(DisasContext
*s
, arg_VCMP_dp
*a
)
2123 if (!dc_isar_feature(aa32_fpdp_v2
, s
)) {
2127 /* Vm/M bits must be zero for the Z variant */
2128 if (a
->z
&& a
->vm
!= 0) {
2132 /* UNDEF accesses to D16-D31 if they don't exist. */
2133 if (!dc_isar_feature(aa32_simd_r32
, s
) && ((a
->vd
| a
->vm
) & 0x10)) {
2137 if (!vfp_access_check(s
)) {
2141 vd
= tcg_temp_new_i64();
2142 vm
= tcg_temp_new_i64();
2144 neon_load_reg64(vd
, a
->vd
);
2146 tcg_gen_movi_i64(vm
, 0);
2148 neon_load_reg64(vm
, a
->vm
);
2152 gen_helper_vfp_cmped(vd
, vm
, cpu_env
);
2154 gen_helper_vfp_cmpd(vd
, vm
, cpu_env
);
2157 tcg_temp_free_i64(vd
);
2158 tcg_temp_free_i64(vm
);
2163 static bool trans_VCVT_f32_f16(DisasContext
*s
, arg_VCVT_f32_f16
*a
)
2169 if (!dc_isar_feature(aa32_fp16_spconv
, s
)) {
2173 if (!vfp_access_check(s
)) {
2177 fpst
= get_fpstatus_ptr(false);
2178 ahp_mode
= get_ahp_flag();
2179 tmp
= tcg_temp_new_i32();
2180 /* The T bit tells us if we want the low or high 16 bits of Vm */
2181 tcg_gen_ld16u_i32(tmp
, cpu_env
, vfp_f16_offset(a
->vm
, a
->t
));
2182 gen_helper_vfp_fcvt_f16_to_f32(tmp
, tmp
, fpst
, ahp_mode
);
2183 neon_store_reg32(tmp
, a
->vd
);
2184 tcg_temp_free_i32(ahp_mode
);
2185 tcg_temp_free_ptr(fpst
);
2186 tcg_temp_free_i32(tmp
);
2190 static bool trans_VCVT_f64_f16(DisasContext
*s
, arg_VCVT_f64_f16
*a
)
2197 if (!dc_isar_feature(aa32_fpdp_v2
, s
)) {
2201 if (!dc_isar_feature(aa32_fp16_dpconv
, s
)) {
2205 /* UNDEF accesses to D16-D31 if they don't exist. */
2206 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vd
& 0x10)) {
2210 if (!vfp_access_check(s
)) {
2214 fpst
= get_fpstatus_ptr(false);
2215 ahp_mode
= get_ahp_flag();
2216 tmp
= tcg_temp_new_i32();
2217 /* The T bit tells us if we want the low or high 16 bits of Vm */
2218 tcg_gen_ld16u_i32(tmp
, cpu_env
, vfp_f16_offset(a
->vm
, a
->t
));
2219 vd
= tcg_temp_new_i64();
2220 gen_helper_vfp_fcvt_f16_to_f64(vd
, tmp
, fpst
, ahp_mode
);
2221 neon_store_reg64(vd
, a
->vd
);
2222 tcg_temp_free_i32(ahp_mode
);
2223 tcg_temp_free_ptr(fpst
);
2224 tcg_temp_free_i32(tmp
);
2225 tcg_temp_free_i64(vd
);
2229 static bool trans_VCVT_f16_f32(DisasContext
*s
, arg_VCVT_f16_f32
*a
)
2235 if (!dc_isar_feature(aa32_fp16_spconv
, s
)) {
2239 if (!vfp_access_check(s
)) {
2243 fpst
= get_fpstatus_ptr(false);
2244 ahp_mode
= get_ahp_flag();
2245 tmp
= tcg_temp_new_i32();
2247 neon_load_reg32(tmp
, a
->vm
);
2248 gen_helper_vfp_fcvt_f32_to_f16(tmp
, tmp
, fpst
, ahp_mode
);
2249 tcg_gen_st16_i32(tmp
, cpu_env
, vfp_f16_offset(a
->vd
, a
->t
));
2250 tcg_temp_free_i32(ahp_mode
);
2251 tcg_temp_free_ptr(fpst
);
2252 tcg_temp_free_i32(tmp
);
2256 static bool trans_VCVT_f16_f64(DisasContext
*s
, arg_VCVT_f16_f64
*a
)
2263 if (!dc_isar_feature(aa32_fpdp_v2
, s
)) {
2267 if (!dc_isar_feature(aa32_fp16_dpconv
, s
)) {
2271 /* UNDEF accesses to D16-D31 if they don't exist. */
2272 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vm
& 0x10)) {
2276 if (!vfp_access_check(s
)) {
2280 fpst
= get_fpstatus_ptr(false);
2281 ahp_mode
= get_ahp_flag();
2282 tmp
= tcg_temp_new_i32();
2283 vm
= tcg_temp_new_i64();
2285 neon_load_reg64(vm
, a
->vm
);
2286 gen_helper_vfp_fcvt_f64_to_f16(tmp
, vm
, fpst
, ahp_mode
);
2287 tcg_temp_free_i64(vm
);
2288 tcg_gen_st16_i32(tmp
, cpu_env
, vfp_f16_offset(a
->vd
, a
->t
));
2289 tcg_temp_free_i32(ahp_mode
);
2290 tcg_temp_free_ptr(fpst
);
2291 tcg_temp_free_i32(tmp
);
2295 static bool trans_VRINTR_sp(DisasContext
*s
, arg_VRINTR_sp
*a
)
2300 if (!dc_isar_feature(aa32_vrint
, s
)) {
2304 if (!vfp_access_check(s
)) {
2308 tmp
= tcg_temp_new_i32();
2309 neon_load_reg32(tmp
, a
->vm
);
2310 fpst
= get_fpstatus_ptr(false);
2311 gen_helper_rints(tmp
, tmp
, fpst
);
2312 neon_store_reg32(tmp
, a
->vd
);
2313 tcg_temp_free_ptr(fpst
);
2314 tcg_temp_free_i32(tmp
);
2318 static bool trans_VRINTR_dp(DisasContext
*s
, arg_VRINTR_dp
*a
)
2323 if (!dc_isar_feature(aa32_fpdp_v2
, s
)) {
2327 if (!dc_isar_feature(aa32_vrint
, s
)) {
2331 /* UNDEF accesses to D16-D31 if they don't exist. */
2332 if (!dc_isar_feature(aa32_simd_r32
, s
) && ((a
->vd
| a
->vm
) & 0x10)) {
2336 if (!vfp_access_check(s
)) {
2340 tmp
= tcg_temp_new_i64();
2341 neon_load_reg64(tmp
, a
->vm
);
2342 fpst
= get_fpstatus_ptr(false);
2343 gen_helper_rintd(tmp
, tmp
, fpst
);
2344 neon_store_reg64(tmp
, a
->vd
);
2345 tcg_temp_free_ptr(fpst
);
2346 tcg_temp_free_i64(tmp
);
2350 static bool trans_VRINTZ_sp(DisasContext
*s
, arg_VRINTZ_sp
*a
)
2356 if (!dc_isar_feature(aa32_vrint
, s
)) {
2360 if (!vfp_access_check(s
)) {
2364 tmp
= tcg_temp_new_i32();
2365 neon_load_reg32(tmp
, a
->vm
);
2366 fpst
= get_fpstatus_ptr(false);
2367 tcg_rmode
= tcg_const_i32(float_round_to_zero
);
2368 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
2369 gen_helper_rints(tmp
, tmp
, fpst
);
2370 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
2371 neon_store_reg32(tmp
, a
->vd
);
2372 tcg_temp_free_ptr(fpst
);
2373 tcg_temp_free_i32(tcg_rmode
);
2374 tcg_temp_free_i32(tmp
);
2378 static bool trans_VRINTZ_dp(DisasContext
*s
, arg_VRINTZ_dp
*a
)
2384 if (!dc_isar_feature(aa32_fpdp_v2
, s
)) {
2388 if (!dc_isar_feature(aa32_vrint
, s
)) {
2392 /* UNDEF accesses to D16-D31 if they don't exist. */
2393 if (!dc_isar_feature(aa32_simd_r32
, s
) && ((a
->vd
| a
->vm
) & 0x10)) {
2397 if (!vfp_access_check(s
)) {
2401 tmp
= tcg_temp_new_i64();
2402 neon_load_reg64(tmp
, a
->vm
);
2403 fpst
= get_fpstatus_ptr(false);
2404 tcg_rmode
= tcg_const_i32(float_round_to_zero
);
2405 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
2406 gen_helper_rintd(tmp
, tmp
, fpst
);
2407 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
2408 neon_store_reg64(tmp
, a
->vd
);
2409 tcg_temp_free_ptr(fpst
);
2410 tcg_temp_free_i64(tmp
);
2411 tcg_temp_free_i32(tcg_rmode
);
2415 static bool trans_VRINTX_sp(DisasContext
*s
, arg_VRINTX_sp
*a
)
2420 if (!dc_isar_feature(aa32_vrint
, s
)) {
2424 if (!vfp_access_check(s
)) {
2428 tmp
= tcg_temp_new_i32();
2429 neon_load_reg32(tmp
, a
->vm
);
2430 fpst
= get_fpstatus_ptr(false);
2431 gen_helper_rints_exact(tmp
, tmp
, fpst
);
2432 neon_store_reg32(tmp
, a
->vd
);
2433 tcg_temp_free_ptr(fpst
);
2434 tcg_temp_free_i32(tmp
);
2438 static bool trans_VRINTX_dp(DisasContext
*s
, arg_VRINTX_dp
*a
)
2443 if (!dc_isar_feature(aa32_fpdp_v2
, s
)) {
2447 if (!dc_isar_feature(aa32_vrint
, s
)) {
2451 /* UNDEF accesses to D16-D31 if they don't exist. */
2452 if (!dc_isar_feature(aa32_simd_r32
, s
) && ((a
->vd
| a
->vm
) & 0x10)) {
2456 if (!vfp_access_check(s
)) {
2460 tmp
= tcg_temp_new_i64();
2461 neon_load_reg64(tmp
, a
->vm
);
2462 fpst
= get_fpstatus_ptr(false);
2463 gen_helper_rintd_exact(tmp
, tmp
, fpst
);
2464 neon_store_reg64(tmp
, a
->vd
);
2465 tcg_temp_free_ptr(fpst
);
2466 tcg_temp_free_i64(tmp
);
2470 static bool trans_VCVT_sp(DisasContext
*s
, arg_VCVT_sp
*a
)
2475 if (!dc_isar_feature(aa32_fpdp_v2
, s
)) {
2479 /* UNDEF accesses to D16-D31 if they don't exist. */
2480 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vd
& 0x10)) {
2484 if (!vfp_access_check(s
)) {
2488 vm
= tcg_temp_new_i32();
2489 vd
= tcg_temp_new_i64();
2490 neon_load_reg32(vm
, a
->vm
);
2491 gen_helper_vfp_fcvtds(vd
, vm
, cpu_env
);
2492 neon_store_reg64(vd
, a
->vd
);
2493 tcg_temp_free_i32(vm
);
2494 tcg_temp_free_i64(vd
);
2498 static bool trans_VCVT_dp(DisasContext
*s
, arg_VCVT_dp
*a
)
2503 if (!dc_isar_feature(aa32_fpdp_v2
, s
)) {
2507 /* UNDEF accesses to D16-D31 if they don't exist. */
2508 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vm
& 0x10)) {
2512 if (!vfp_access_check(s
)) {
2516 vd
= tcg_temp_new_i32();
2517 vm
= tcg_temp_new_i64();
2518 neon_load_reg64(vm
, a
->vm
);
2519 gen_helper_vfp_fcvtsd(vd
, vm
, cpu_env
);
2520 neon_store_reg32(vd
, a
->vd
);
2521 tcg_temp_free_i32(vd
);
2522 tcg_temp_free_i64(vm
);
2526 static bool trans_VCVT_int_sp(DisasContext
*s
, arg_VCVT_int_sp
*a
)
2531 if (!dc_isar_feature(aa32_fpsp_v2
, s
)) {
2535 if (!vfp_access_check(s
)) {
2539 vm
= tcg_temp_new_i32();
2540 neon_load_reg32(vm
, a
->vm
);
2541 fpst
= get_fpstatus_ptr(false);
2544 gen_helper_vfp_sitos(vm
, vm
, fpst
);
2547 gen_helper_vfp_uitos(vm
, vm
, fpst
);
2549 neon_store_reg32(vm
, a
->vd
);
2550 tcg_temp_free_i32(vm
);
2551 tcg_temp_free_ptr(fpst
);
2555 static bool trans_VCVT_int_dp(DisasContext
*s
, arg_VCVT_int_dp
*a
)
2561 if (!dc_isar_feature(aa32_fpdp_v2
, s
)) {
2565 /* UNDEF accesses to D16-D31 if they don't exist. */
2566 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vd
& 0x10)) {
2570 if (!vfp_access_check(s
)) {
2574 vm
= tcg_temp_new_i32();
2575 vd
= tcg_temp_new_i64();
2576 neon_load_reg32(vm
, a
->vm
);
2577 fpst
= get_fpstatus_ptr(false);
2580 gen_helper_vfp_sitod(vd
, vm
, fpst
);
2583 gen_helper_vfp_uitod(vd
, vm
, fpst
);
2585 neon_store_reg64(vd
, a
->vd
);
2586 tcg_temp_free_i32(vm
);
2587 tcg_temp_free_i64(vd
);
2588 tcg_temp_free_ptr(fpst
);
2592 static bool trans_VJCVT(DisasContext
*s
, arg_VJCVT
*a
)
2597 if (!dc_isar_feature(aa32_fpdp_v2
, s
)) {
2601 if (!dc_isar_feature(aa32_jscvt
, s
)) {
2605 /* UNDEF accesses to D16-D31 if they don't exist. */
2606 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vm
& 0x10)) {
2610 if (!vfp_access_check(s
)) {
2614 vm
= tcg_temp_new_i64();
2615 vd
= tcg_temp_new_i32();
2616 neon_load_reg64(vm
, a
->vm
);
2617 gen_helper_vjcvt(vd
, vm
, cpu_env
);
2618 neon_store_reg32(vd
, a
->vd
);
2619 tcg_temp_free_i64(vm
);
2620 tcg_temp_free_i32(vd
);
2624 static bool trans_VCVT_fix_sp(DisasContext
*s
, arg_VCVT_fix_sp
*a
)
2630 if (!dc_isar_feature(aa32_fpsp_v3
, s
)) {
2634 if (!vfp_access_check(s
)) {
2638 frac_bits
= (a
->opc
& 1) ? (32 - a
->imm
) : (16 - a
->imm
);
2640 vd
= tcg_temp_new_i32();
2641 neon_load_reg32(vd
, a
->vd
);
2643 fpst
= get_fpstatus_ptr(false);
2644 shift
= tcg_const_i32(frac_bits
);
2646 /* Switch on op:U:sx bits */
2649 gen_helper_vfp_shtos(vd
, vd
, shift
, fpst
);
2652 gen_helper_vfp_sltos(vd
, vd
, shift
, fpst
);
2655 gen_helper_vfp_uhtos(vd
, vd
, shift
, fpst
);
2658 gen_helper_vfp_ultos(vd
, vd
, shift
, fpst
);
2661 gen_helper_vfp_toshs_round_to_zero(vd
, vd
, shift
, fpst
);
2664 gen_helper_vfp_tosls_round_to_zero(vd
, vd
, shift
, fpst
);
2667 gen_helper_vfp_touhs_round_to_zero(vd
, vd
, shift
, fpst
);
2670 gen_helper_vfp_touls_round_to_zero(vd
, vd
, shift
, fpst
);
2673 g_assert_not_reached();
2676 neon_store_reg32(vd
, a
->vd
);
2677 tcg_temp_free_i32(vd
);
2678 tcg_temp_free_i32(shift
);
2679 tcg_temp_free_ptr(fpst
);
2683 static bool trans_VCVT_fix_dp(DisasContext
*s
, arg_VCVT_fix_dp
*a
)
2690 if (!dc_isar_feature(aa32_fpdp_v3
, s
)) {
2694 /* UNDEF accesses to D16-D31 if they don't exist. */
2695 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vd
& 0x10)) {
2699 if (!vfp_access_check(s
)) {
2703 frac_bits
= (a
->opc
& 1) ? (32 - a
->imm
) : (16 - a
->imm
);
2705 vd
= tcg_temp_new_i64();
2706 neon_load_reg64(vd
, a
->vd
);
2708 fpst
= get_fpstatus_ptr(false);
2709 shift
= tcg_const_i32(frac_bits
);
2711 /* Switch on op:U:sx bits */
2714 gen_helper_vfp_shtod(vd
, vd
, shift
, fpst
);
2717 gen_helper_vfp_sltod(vd
, vd
, shift
, fpst
);
2720 gen_helper_vfp_uhtod(vd
, vd
, shift
, fpst
);
2723 gen_helper_vfp_ultod(vd
, vd
, shift
, fpst
);
2726 gen_helper_vfp_toshd_round_to_zero(vd
, vd
, shift
, fpst
);
2729 gen_helper_vfp_tosld_round_to_zero(vd
, vd
, shift
, fpst
);
2732 gen_helper_vfp_touhd_round_to_zero(vd
, vd
, shift
, fpst
);
2735 gen_helper_vfp_tould_round_to_zero(vd
, vd
, shift
, fpst
);
2738 g_assert_not_reached();
2741 neon_store_reg64(vd
, a
->vd
);
2742 tcg_temp_free_i64(vd
);
2743 tcg_temp_free_i32(shift
);
2744 tcg_temp_free_ptr(fpst
);
2748 static bool trans_VCVT_sp_int(DisasContext
*s
, arg_VCVT_sp_int
*a
)
2753 if (!dc_isar_feature(aa32_fpsp_v2
, s
)) {
2757 if (!vfp_access_check(s
)) {
2761 fpst
= get_fpstatus_ptr(false);
2762 vm
= tcg_temp_new_i32();
2763 neon_load_reg32(vm
, a
->vm
);
2767 gen_helper_vfp_tosizs(vm
, vm
, fpst
);
2769 gen_helper_vfp_tosis(vm
, vm
, fpst
);
2773 gen_helper_vfp_touizs(vm
, vm
, fpst
);
2775 gen_helper_vfp_touis(vm
, vm
, fpst
);
2778 neon_store_reg32(vm
, a
->vd
);
2779 tcg_temp_free_i32(vm
);
2780 tcg_temp_free_ptr(fpst
);
2784 static bool trans_VCVT_dp_int(DisasContext
*s
, arg_VCVT_dp_int
*a
)
2790 if (!dc_isar_feature(aa32_fpdp_v2
, s
)) {
2794 /* UNDEF accesses to D16-D31 if they don't exist. */
2795 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vm
& 0x10)) {
2799 if (!vfp_access_check(s
)) {
2803 fpst
= get_fpstatus_ptr(false);
2804 vm
= tcg_temp_new_i64();
2805 vd
= tcg_temp_new_i32();
2806 neon_load_reg64(vm
, a
->vm
);
2810 gen_helper_vfp_tosizd(vd
, vm
, fpst
);
2812 gen_helper_vfp_tosid(vd
, vm
, fpst
);
2816 gen_helper_vfp_touizd(vd
, vm
, fpst
);
2818 gen_helper_vfp_touid(vd
, vm
, fpst
);
2821 neon_store_reg32(vd
, a
->vd
);
2822 tcg_temp_free_i32(vd
);
2823 tcg_temp_free_i64(vm
);
2824 tcg_temp_free_ptr(fpst
);
2829 * Decode VLLDM and VLSTM are nonstandard because:
2830 * * if there is no FPU then these insns must NOP in
2831 * Secure state and UNDEF in Nonsecure state
2832 * * if there is an FPU then these insns do not have
2833 * the usual behaviour that vfp_access_check() provides of
2834 * being controlled by CPACR/NSACR enable bits or the
2835 * lazy-stacking logic.
2837 static bool trans_VLLDM_VLSTM(DisasContext
*s
, arg_VLLDM_VLSTM
*a
)
2841 if (!arm_dc_feature(s
, ARM_FEATURE_M
) ||
2842 !arm_dc_feature(s
, ARM_FEATURE_V8
)) {
2845 /* If not secure, UNDEF. */
2846 if (!s
->v8m_secure
) {
2849 /* If no fpu, NOP. */
2850 if (!dc_isar_feature(aa32_vfp
, s
)) {
2854 fptr
= load_reg(s
, a
->rn
);
2856 gen_helper_v7m_vlldm(cpu_env
, fptr
);
2858 gen_helper_v7m_vlstm(cpu_env
, fptr
);
2860 tcg_temp_free_i32(fptr
);
2862 /* End the TB, because we have updated FP control bits */
2863 s
->base
.is_jmp
= DISAS_UPDATE_EXIT
;