2 * ARM translation: AArch32 VFP instructions
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
7 * Copyright (c) 2019 Linaro, Ltd.
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include "qemu/osdep.h"
24 #include "tcg/tcg-op.h"
25 #include "tcg/tcg-op-gvec.h"
26 #include "exec/exec-all.h"
27 #include "exec/gen-icount.h"
28 #include "translate.h"
29 #include "translate-a32.h"
31 /* Include the generated VFP decoder */
32 #include "decode-vfp.c.inc"
33 #include "decode-vfp-uncond.c.inc"
35 static inline void vfp_load_reg64(TCGv_i64 var
, int reg
)
37 tcg_gen_ld_i64(var
, cpu_env
, vfp_reg_offset(true, reg
));
40 static inline void vfp_store_reg64(TCGv_i64 var
, int reg
)
42 tcg_gen_st_i64(var
, cpu_env
, vfp_reg_offset(true, reg
));
45 static inline void vfp_load_reg32(TCGv_i32 var
, int reg
)
47 tcg_gen_ld_i32(var
, cpu_env
, vfp_reg_offset(false, reg
));
50 static inline void vfp_store_reg32(TCGv_i32 var
, int reg
)
52 tcg_gen_st_i32(var
, cpu_env
, vfp_reg_offset(false, reg
));
56 * The imm8 encodes the sign bit, enough bits to represent an exponent in
57 * the range 01....1xx to 10....0xx, and the most significant 4 bits of
58 * the mantissa; see VFPExpandImm() in the v8 ARM ARM.
60 uint64_t vfp_expand_imm(int size
, uint8_t imm8
)
66 imm
= (extract32(imm8
, 7, 1) ? 0x8000 : 0) |
67 (extract32(imm8
, 6, 1) ? 0x3fc0 : 0x4000) |
68 extract32(imm8
, 0, 6);
72 imm
= (extract32(imm8
, 7, 1) ? 0x8000 : 0) |
73 (extract32(imm8
, 6, 1) ? 0x3e00 : 0x4000) |
74 (extract32(imm8
, 0, 6) << 3);
78 imm
= (extract32(imm8
, 7, 1) ? 0x8000 : 0) |
79 (extract32(imm8
, 6, 1) ? 0x3000 : 0x4000) |
80 (extract32(imm8
, 0, 6) << 6);
83 g_assert_not_reached();
89 * Return the offset of a 16-bit half of the specified VFP single-precision
90 * register. If top is true, returns the top 16 bits; otherwise the bottom
93 static inline long vfp_f16_offset(unsigned reg
, bool top
)
95 long offs
= vfp_reg_offset(false, reg
);
96 #ifdef HOST_WORDS_BIGENDIAN
109 * Generate code for M-profile lazy FP state preservation if needed;
110 * this corresponds to the pseudocode PreserveFPState() function.
112 static void gen_preserve_fp_state(DisasContext
*s
)
116 * Lazy state saving affects external memory and also the NVIC,
117 * so we must mark it as an IO operation for icount (and cause
118 * this to be the last insn in the TB).
120 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
121 s
->base
.is_jmp
= DISAS_UPDATE_EXIT
;
124 gen_helper_v7m_preserve_fp_state(cpu_env
);
126 * If the preserve_fp_state helper doesn't throw an exception
127 * then it will clear LSPACT; we don't need to repeat this for
128 * any further FP insns in this TB.
130 s
->v7m_lspact
= false;
135 * Check that VFP access is enabled. If it is, do the necessary
136 * M-profile lazy-FP handling and then return true.
137 * If not, emit code to generate an appropriate exception and
139 * The ignore_vfp_enabled argument specifies that we should ignore
140 * whether VFP is enabled via FPEXC[EN]: this should be true for FMXR/FMRX
141 * accesses to FPSID, FPEXC, MVFR0, MVFR1, MVFR2, and false for all other insns.
143 static bool full_vfp_access_check(DisasContext
*s
, bool ignore_vfp_enabled
)
146 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
148 * M-profile mostly catches the "FPU disabled" case early, in
149 * disas_m_nocp(), but a few insns (eg LCTP, WLSTP, DLSTP)
150 * which do coprocessor-checks are outside the large ranges of
151 * the encoding space handled by the patterns in m-nocp.decode,
152 * and for them we may need to raise NOCP here.
154 gen_exception_insn(s
, s
->pc_curr
, EXCP_NOCP
,
155 syn_uncategorized(), s
->fp_excp_el
);
157 gen_exception_insn(s
, s
->pc_curr
, EXCP_UDEF
,
158 syn_fp_access_trap(1, 0xe, false),
164 if (!s
->vfp_enabled
&& !ignore_vfp_enabled
) {
165 assert(!arm_dc_feature(s
, ARM_FEATURE_M
));
166 unallocated_encoding(s
);
170 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
171 /* Handle M-profile lazy FP state mechanics */
173 /* Trigger lazy-state preservation if necessary */
174 gen_preserve_fp_state(s
);
176 /* Update ownership of FP context: set FPCCR.S to match current state */
177 if (s
->v8m_fpccr_s_wrong
) {
180 tmp
= load_cpu_field(v7m
.fpccr
[M_REG_S
]);
182 tcg_gen_ori_i32(tmp
, tmp
, R_V7M_FPCCR_S_MASK
);
184 tcg_gen_andi_i32(tmp
, tmp
, ~R_V7M_FPCCR_S_MASK
);
186 store_cpu_field(tmp
, v7m
.fpccr
[M_REG_S
]);
187 /* Don't need to do this for any further FP insns in this TB */
188 s
->v8m_fpccr_s_wrong
= false;
191 if (s
->v7m_new_fp_ctxt_needed
) {
193 * Create new FP context by updating CONTROL.FPCA, CONTROL.SFPA,
194 * the FPSCR, and VPR.
196 TCGv_i32 control
, fpscr
;
197 uint32_t bits
= R_V7M_CONTROL_FPCA_MASK
;
199 fpscr
= load_cpu_field(v7m
.fpdscr
[s
->v8m_secure
]);
200 gen_helper_vfp_set_fpscr(cpu_env
, fpscr
);
201 tcg_temp_free_i32(fpscr
);
202 if (dc_isar_feature(aa32_mve
, s
)) {
203 TCGv_i32 z32
= tcg_const_i32(0);
204 store_cpu_field(z32
, v7m
.vpr
);
208 * We don't need to arrange to end the TB, because the only
209 * parts of FPSCR which we cache in the TB flags are the VECLEN
210 * and VECSTRIDE, and those don't exist for M-profile.
214 bits
|= R_V7M_CONTROL_SFPA_MASK
;
216 control
= load_cpu_field(v7m
.control
[M_REG_S
]);
217 tcg_gen_ori_i32(control
, control
, bits
);
218 store_cpu_field(control
, v7m
.control
[M_REG_S
]);
219 /* Don't need to do this for any further FP insns in this TB */
220 s
->v7m_new_fp_ctxt_needed
= false;
228 * The most usual kind of VFP access check, for everything except
229 * FMXR/FMRX to the always-available special registers.
231 bool vfp_access_check(DisasContext
*s
)
233 return full_vfp_access_check(s
, false);
236 static bool trans_VSEL(DisasContext
*s
, arg_VSEL
*a
)
241 if (!dc_isar_feature(aa32_vsel
, s
)) {
245 if (sz
== 3 && !dc_isar_feature(aa32_fpdp_v2
, s
)) {
249 if (sz
== 1 && !dc_isar_feature(aa32_fp16_arith
, s
)) {
253 /* UNDEF accesses to D16-D31 if they don't exist */
254 if (sz
== 3 && !dc_isar_feature(aa32_simd_r32
, s
) &&
255 ((a
->vm
| a
->vn
| a
->vd
) & 0x10)) {
263 if (!vfp_access_check(s
)) {
268 TCGv_i64 frn
, frm
, dest
;
269 TCGv_i64 tmp
, zero
, zf
, nf
, vf
;
271 zero
= tcg_const_i64(0);
273 frn
= tcg_temp_new_i64();
274 frm
= tcg_temp_new_i64();
275 dest
= tcg_temp_new_i64();
277 zf
= tcg_temp_new_i64();
278 nf
= tcg_temp_new_i64();
279 vf
= tcg_temp_new_i64();
281 tcg_gen_extu_i32_i64(zf
, cpu_ZF
);
282 tcg_gen_ext_i32_i64(nf
, cpu_NF
);
283 tcg_gen_ext_i32_i64(vf
, cpu_VF
);
285 vfp_load_reg64(frn
, rn
);
286 vfp_load_reg64(frm
, rm
);
289 tcg_gen_movcond_i64(TCG_COND_EQ
, dest
, zf
, zero
,
293 tcg_gen_movcond_i64(TCG_COND_LT
, dest
, vf
, zero
,
296 case 2: /* ge: N == V -> N ^ V == 0 */
297 tmp
= tcg_temp_new_i64();
298 tcg_gen_xor_i64(tmp
, vf
, nf
);
299 tcg_gen_movcond_i64(TCG_COND_GE
, dest
, tmp
, zero
,
301 tcg_temp_free_i64(tmp
);
303 case 3: /* gt: !Z && N == V */
304 tcg_gen_movcond_i64(TCG_COND_NE
, dest
, zf
, zero
,
306 tmp
= tcg_temp_new_i64();
307 tcg_gen_xor_i64(tmp
, vf
, nf
);
308 tcg_gen_movcond_i64(TCG_COND_GE
, dest
, tmp
, zero
,
310 tcg_temp_free_i64(tmp
);
313 vfp_store_reg64(dest
, rd
);
314 tcg_temp_free_i64(frn
);
315 tcg_temp_free_i64(frm
);
316 tcg_temp_free_i64(dest
);
318 tcg_temp_free_i64(zf
);
319 tcg_temp_free_i64(nf
);
320 tcg_temp_free_i64(vf
);
322 tcg_temp_free_i64(zero
);
324 TCGv_i32 frn
, frm
, dest
;
327 zero
= tcg_const_i32(0);
329 frn
= tcg_temp_new_i32();
330 frm
= tcg_temp_new_i32();
331 dest
= tcg_temp_new_i32();
332 vfp_load_reg32(frn
, rn
);
333 vfp_load_reg32(frm
, rm
);
336 tcg_gen_movcond_i32(TCG_COND_EQ
, dest
, cpu_ZF
, zero
,
340 tcg_gen_movcond_i32(TCG_COND_LT
, dest
, cpu_VF
, zero
,
343 case 2: /* ge: N == V -> N ^ V == 0 */
344 tmp
= tcg_temp_new_i32();
345 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
346 tcg_gen_movcond_i32(TCG_COND_GE
, dest
, tmp
, zero
,
348 tcg_temp_free_i32(tmp
);
350 case 3: /* gt: !Z && N == V */
351 tcg_gen_movcond_i32(TCG_COND_NE
, dest
, cpu_ZF
, zero
,
353 tmp
= tcg_temp_new_i32();
354 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
355 tcg_gen_movcond_i32(TCG_COND_GE
, dest
, tmp
, zero
,
357 tcg_temp_free_i32(tmp
);
360 /* For fp16 the top half is always zeroes */
362 tcg_gen_andi_i32(dest
, dest
, 0xffff);
364 vfp_store_reg32(dest
, rd
);
365 tcg_temp_free_i32(frn
);
366 tcg_temp_free_i32(frm
);
367 tcg_temp_free_i32(dest
);
369 tcg_temp_free_i32(zero
);
376 * Table for converting the most common AArch32 encoding of
377 * rounding mode to arm_fprounding order (which matches the
378 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
380 static const uint8_t fp_decode_rm
[] = {
387 static bool trans_VRINT(DisasContext
*s
, arg_VRINT
*a
)
393 int rounding
= fp_decode_rm
[a
->rm
];
395 if (!dc_isar_feature(aa32_vrint
, s
)) {
399 if (sz
== 3 && !dc_isar_feature(aa32_fpdp_v2
, s
)) {
403 if (sz
== 1 && !dc_isar_feature(aa32_fp16_arith
, s
)) {
407 /* UNDEF accesses to D16-D31 if they don't exist */
408 if (sz
== 3 && !dc_isar_feature(aa32_simd_r32
, s
) &&
409 ((a
->vm
| a
->vd
) & 0x10)) {
416 if (!vfp_access_check(s
)) {
421 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
423 fpst
= fpstatus_ptr(FPST_FPCR
);
426 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rounding
));
427 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
432 tcg_op
= tcg_temp_new_i64();
433 tcg_res
= tcg_temp_new_i64();
434 vfp_load_reg64(tcg_op
, rm
);
435 gen_helper_rintd(tcg_res
, tcg_op
, fpst
);
436 vfp_store_reg64(tcg_res
, rd
);
437 tcg_temp_free_i64(tcg_op
);
438 tcg_temp_free_i64(tcg_res
);
442 tcg_op
= tcg_temp_new_i32();
443 tcg_res
= tcg_temp_new_i32();
444 vfp_load_reg32(tcg_op
, rm
);
446 gen_helper_rinth(tcg_res
, tcg_op
, fpst
);
448 gen_helper_rints(tcg_res
, tcg_op
, fpst
);
450 vfp_store_reg32(tcg_res
, rd
);
451 tcg_temp_free_i32(tcg_op
);
452 tcg_temp_free_i32(tcg_res
);
455 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
456 tcg_temp_free_i32(tcg_rmode
);
458 tcg_temp_free_ptr(fpst
);
462 static bool trans_VCVT(DisasContext
*s
, arg_VCVT
*a
)
467 TCGv_i32 tcg_rmode
, tcg_shift
;
468 int rounding
= fp_decode_rm
[a
->rm
];
469 bool is_signed
= a
->op
;
471 if (!dc_isar_feature(aa32_vcvt_dr
, s
)) {
475 if (sz
== 3 && !dc_isar_feature(aa32_fpdp_v2
, s
)) {
479 if (sz
== 1 && !dc_isar_feature(aa32_fp16_arith
, s
)) {
483 /* UNDEF accesses to D16-D31 if they don't exist */
484 if (sz
== 3 && !dc_isar_feature(aa32_simd_r32
, s
) && (a
->vm
& 0x10)) {
491 if (!vfp_access_check(s
)) {
496 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
498 fpst
= fpstatus_ptr(FPST_FPCR
);
501 tcg_shift
= tcg_const_i32(0);
503 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rounding
));
504 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
507 TCGv_i64 tcg_double
, tcg_res
;
509 tcg_double
= tcg_temp_new_i64();
510 tcg_res
= tcg_temp_new_i64();
511 tcg_tmp
= tcg_temp_new_i32();
512 vfp_load_reg64(tcg_double
, rm
);
514 gen_helper_vfp_tosld(tcg_res
, tcg_double
, tcg_shift
, fpst
);
516 gen_helper_vfp_tould(tcg_res
, tcg_double
, tcg_shift
, fpst
);
518 tcg_gen_extrl_i64_i32(tcg_tmp
, tcg_res
);
519 vfp_store_reg32(tcg_tmp
, rd
);
520 tcg_temp_free_i32(tcg_tmp
);
521 tcg_temp_free_i64(tcg_res
);
522 tcg_temp_free_i64(tcg_double
);
524 TCGv_i32 tcg_single
, tcg_res
;
525 tcg_single
= tcg_temp_new_i32();
526 tcg_res
= tcg_temp_new_i32();
527 vfp_load_reg32(tcg_single
, rm
);
530 gen_helper_vfp_toslh(tcg_res
, tcg_single
, tcg_shift
, fpst
);
532 gen_helper_vfp_toulh(tcg_res
, tcg_single
, tcg_shift
, fpst
);
536 gen_helper_vfp_tosls(tcg_res
, tcg_single
, tcg_shift
, fpst
);
538 gen_helper_vfp_touls(tcg_res
, tcg_single
, tcg_shift
, fpst
);
541 vfp_store_reg32(tcg_res
, rd
);
542 tcg_temp_free_i32(tcg_res
);
543 tcg_temp_free_i32(tcg_single
);
546 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
547 tcg_temp_free_i32(tcg_rmode
);
549 tcg_temp_free_i32(tcg_shift
);
551 tcg_temp_free_ptr(fpst
);
556 static bool trans_VMOV_to_gp(DisasContext
*s
, arg_VMOV_to_gp
*a
)
558 /* VMOV scalar to general purpose register */
562 * SIZE == MO_32 is a VFP instruction; otherwise NEON. MVE has
563 * all sizes, whether the CPU has fp or not.
565 if (!dc_isar_feature(aa32_mve
, s
)) {
567 ? !dc_isar_feature(aa32_fpsp_v2
, s
)
568 : !arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
573 /* UNDEF accesses to D16-D31 if they don't exist */
574 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vn
& 0x10)) {
578 if (!vfp_access_check(s
)) {
582 tmp
= tcg_temp_new_i32();
583 read_neon_element32(tmp
, a
->vn
, a
->index
, a
->size
| (a
->u
? 0 : MO_SIGN
));
584 store_reg(s
, a
->rt
, tmp
);
589 static bool trans_VMOV_from_gp(DisasContext
*s
, arg_VMOV_from_gp
*a
)
591 /* VMOV general purpose register to scalar */
595 * SIZE == MO_32 is a VFP instruction; otherwise NEON. MVE has
596 * all sizes, whether the CPU has fp or not.
598 if (!dc_isar_feature(aa32_mve
, s
)) {
600 ? !dc_isar_feature(aa32_fpsp_v2
, s
)
601 : !arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
606 /* UNDEF accesses to D16-D31 if they don't exist */
607 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vn
& 0x10)) {
611 if (!vfp_access_check(s
)) {
615 tmp
= load_reg(s
, a
->rt
);
616 write_neon_element32(tmp
, a
->vn
, a
->index
, a
->size
);
617 tcg_temp_free_i32(tmp
);
622 static bool trans_VDUP(DisasContext
*s
, arg_VDUP
*a
)
624 /* VDUP (general purpose register) */
628 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
632 /* UNDEF accesses to D16-D31 if they don't exist */
633 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vn
& 0x10)) {
641 if (a
->q
&& (a
->vn
& 1)) {
645 vec_size
= a
->q
? 16 : 8;
654 if (!vfp_access_check(s
)) {
658 tmp
= load_reg(s
, a
->rt
);
659 tcg_gen_gvec_dup_i32(size
, neon_full_reg_offset(a
->vn
),
660 vec_size
, vec_size
, tmp
);
661 tcg_temp_free_i32(tmp
);
667 * M-profile provides two different sets of instructions that can
668 * access floating point system registers: VMSR/VMRS (which move
669 * to/from a general purpose register) and VLDR/VSTR sysreg (which
670 * move directly to/from memory). In some cases there are also side
671 * effects which must happen after any write to memory (which could
672 * cause an exception). So we implement the common logic for the
673 * sysreg access in gen_M_fp_sysreg_write() and gen_M_fp_sysreg_read(),
674 * which take pointers to callback functions which will perform the
675 * actual "read/write general purpose register" and "read/write
676 * memory" operations.
680 * Emit code to store the sysreg to its final destination; frees the
681 * TCG temp 'value' it is passed.
683 typedef void fp_sysreg_storefn(DisasContext
*s
, void *opaque
, TCGv_i32 value
);
685 * Emit code to load the value to be copied to the sysreg; returns
686 * a new TCG temporary
688 typedef TCGv_i32
fp_sysreg_loadfn(DisasContext
*s
, void *opaque
);
690 /* Common decode/access checks for fp sysreg read/write */
691 typedef enum FPSysRegCheckResult
{
692 FPSysRegCheckFailed
, /* caller should return false */
693 FPSysRegCheckDone
, /* caller should return true */
694 FPSysRegCheckContinue
, /* caller should continue generating code */
695 } FPSysRegCheckResult
;
697 static FPSysRegCheckResult
fp_sysreg_checks(DisasContext
*s
, int regno
)
699 if (!dc_isar_feature(aa32_fpsp_v2
, s
) && !dc_isar_feature(aa32_mve
, s
)) {
700 return FPSysRegCheckFailed
;
705 case QEMU_VFP_FPSCR_NZCV
:
707 case ARM_VFP_FPSCR_NZCVQC
:
708 if (!arm_dc_feature(s
, ARM_FEATURE_V8_1M
)) {
709 return FPSysRegCheckFailed
;
712 case ARM_VFP_FPCXT_S
:
713 case ARM_VFP_FPCXT_NS
:
714 if (!arm_dc_feature(s
, ARM_FEATURE_V8_1M
)) {
715 return FPSysRegCheckFailed
;
717 if (!s
->v8m_secure
) {
718 return FPSysRegCheckFailed
;
723 if (!dc_isar_feature(aa32_mve
, s
)) {
724 return FPSysRegCheckFailed
;
728 return FPSysRegCheckFailed
;
732 * FPCXT_NS is a special case: it has specific handling for
733 * "current FP state is inactive", and must do the PreserveFPState()
734 * but not the usual full set of actions done by ExecuteFPCheck().
735 * So we don't call vfp_access_check() and the callers must handle this.
737 if (regno
!= ARM_VFP_FPCXT_NS
&& !vfp_access_check(s
)) {
738 return FPSysRegCheckDone
;
740 return FPSysRegCheckContinue
;
743 static void gen_branch_fpInactive(DisasContext
*s
, TCGCond cond
,
747 * FPCXT_NS is a special case: it has specific handling for
748 * "current FP state is inactive", and must do the PreserveFPState()
749 * but not the usual full set of actions done by ExecuteFPCheck().
750 * We don't have a TB flag that matches the fpInactive check, so we
751 * do it at runtime as we don't expect FPCXT_NS accesses to be frequent.
753 * Emit code that checks fpInactive and does a conditional
754 * branch to label based on it:
755 * if cond is TCG_COND_NE then branch if fpInactive != 0 (ie if inactive)
756 * if cond is TCG_COND_EQ then branch if fpInactive == 0 (ie if active)
758 assert(cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
);
760 /* fpInactive = FPCCR_NS.ASPEN == 1 && CONTROL.FPCA == 0 */
761 TCGv_i32 aspen
, fpca
;
762 aspen
= load_cpu_field(v7m
.fpccr
[M_REG_NS
]);
763 fpca
= load_cpu_field(v7m
.control
[M_REG_S
]);
764 tcg_gen_andi_i32(aspen
, aspen
, R_V7M_FPCCR_ASPEN_MASK
);
765 tcg_gen_xori_i32(aspen
, aspen
, R_V7M_FPCCR_ASPEN_MASK
);
766 tcg_gen_andi_i32(fpca
, fpca
, R_V7M_CONTROL_FPCA_MASK
);
767 tcg_gen_or_i32(fpca
, fpca
, aspen
);
768 tcg_gen_brcondi_i32(tcg_invert_cond(cond
), fpca
, 0, label
);
769 tcg_temp_free_i32(aspen
);
770 tcg_temp_free_i32(fpca
);
773 static bool gen_M_fp_sysreg_write(DisasContext
*s
, int regno
,
775 fp_sysreg_loadfn
*loadfn
,
778 /* Do a write to an M-profile floating point system register */
780 TCGLabel
*lab_end
= NULL
;
782 switch (fp_sysreg_checks(s
, regno
)) {
783 case FPSysRegCheckFailed
:
785 case FPSysRegCheckDone
:
787 case FPSysRegCheckContinue
:
793 tmp
= loadfn(s
, opaque
);
794 gen_helper_vfp_set_fpscr(cpu_env
, tmp
);
795 tcg_temp_free_i32(tmp
);
798 case ARM_VFP_FPSCR_NZCVQC
:
801 tmp
= loadfn(s
, opaque
);
802 if (dc_isar_feature(aa32_mve
, s
)) {
803 /* QC is only present for MVE; otherwise RES0 */
804 TCGv_i32 qc
= tcg_temp_new_i32();
805 tcg_gen_andi_i32(qc
, tmp
, FPCR_QC
);
807 * The 4 vfp.qc[] fields need only be "zero" vs "non-zero";
808 * here writing the same value into all elements is simplest.
810 tcg_gen_gvec_dup_i32(MO_32
, offsetof(CPUARMState
, vfp
.qc
),
813 tcg_gen_andi_i32(tmp
, tmp
, FPCR_NZCV_MASK
);
814 fpscr
= load_cpu_field(vfp
.xregs
[ARM_VFP_FPSCR
]);
815 tcg_gen_andi_i32(fpscr
, fpscr
, ~FPCR_NZCV_MASK
);
816 tcg_gen_or_i32(fpscr
, fpscr
, tmp
);
817 store_cpu_field(fpscr
, vfp
.xregs
[ARM_VFP_FPSCR
]);
818 tcg_temp_free_i32(tmp
);
821 case ARM_VFP_FPCXT_NS
:
822 lab_end
= gen_new_label();
823 /* fpInactive case: write is a NOP, so branch to end */
824 gen_branch_fpInactive(s
, TCG_COND_NE
, lab_end
);
825 /* !fpInactive: PreserveFPState(), and reads same as FPCXT_S */
826 gen_preserve_fp_state(s
);
828 case ARM_VFP_FPCXT_S
:
830 TCGv_i32 sfpa
, control
;
832 * Set FPSCR and CONTROL.SFPA from value; the new FPSCR takes
833 * bits [27:0] from value and zeroes bits [31:28].
835 tmp
= loadfn(s
, opaque
);
836 sfpa
= tcg_temp_new_i32();
837 tcg_gen_shri_i32(sfpa
, tmp
, 31);
838 control
= load_cpu_field(v7m
.control
[M_REG_S
]);
839 tcg_gen_deposit_i32(control
, control
, sfpa
,
840 R_V7M_CONTROL_SFPA_SHIFT
, 1);
841 store_cpu_field(control
, v7m
.control
[M_REG_S
]);
842 tcg_gen_andi_i32(tmp
, tmp
, ~FPCR_NZCV_MASK
);
843 gen_helper_vfp_set_fpscr(cpu_env
, tmp
);
844 tcg_temp_free_i32(tmp
);
845 tcg_temp_free_i32(sfpa
);
849 /* Behaves as NOP if not privileged */
853 tmp
= loadfn(s
, opaque
);
854 store_cpu_field(tmp
, v7m
.vpr
);
859 tmp
= loadfn(s
, opaque
);
860 vpr
= load_cpu_field(v7m
.vpr
);
861 tcg_gen_deposit_i32(vpr
, vpr
, tmp
,
862 R_V7M_VPR_P0_SHIFT
, R_V7M_VPR_P0_LENGTH
);
863 store_cpu_field(vpr
, v7m
.vpr
);
864 tcg_temp_free_i32(tmp
);
868 g_assert_not_reached();
871 gen_set_label(lab_end
);
876 static bool gen_M_fp_sysreg_read(DisasContext
*s
, int regno
,
877 fp_sysreg_storefn
*storefn
,
880 /* Do a read from an M-profile floating point system register */
882 TCGLabel
*lab_end
= NULL
;
883 bool lookup_tb
= false;
885 switch (fp_sysreg_checks(s
, regno
)) {
886 case FPSysRegCheckFailed
:
888 case FPSysRegCheckDone
:
890 case FPSysRegCheckContinue
:
894 if (regno
== ARM_VFP_FPSCR_NZCVQC
&& !dc_isar_feature(aa32_mve
, s
)) {
895 /* QC is RES0 without MVE, so NZCVQC simplifies to NZCV */
896 regno
= QEMU_VFP_FPSCR_NZCV
;
901 tmp
= tcg_temp_new_i32();
902 gen_helper_vfp_get_fpscr(tmp
, cpu_env
);
903 storefn(s
, opaque
, tmp
);
905 case ARM_VFP_FPSCR_NZCVQC
:
906 tmp
= tcg_temp_new_i32();
907 gen_helper_vfp_get_fpscr(tmp
, cpu_env
);
908 tcg_gen_andi_i32(tmp
, tmp
, FPCR_NZCVQC_MASK
);
909 storefn(s
, opaque
, tmp
);
911 case QEMU_VFP_FPSCR_NZCV
:
913 * Read just NZCV; this is a special case to avoid the
914 * helper call for the "VMRS to CPSR.NZCV" insn.
916 tmp
= load_cpu_field(vfp
.xregs
[ARM_VFP_FPSCR
]);
917 tcg_gen_andi_i32(tmp
, tmp
, FPCR_NZCV_MASK
);
918 storefn(s
, opaque
, tmp
);
920 case ARM_VFP_FPCXT_S
:
922 TCGv_i32 control
, sfpa
, fpscr
;
923 /* Bits [27:0] from FPSCR, bit [31] from CONTROL.SFPA */
924 tmp
= tcg_temp_new_i32();
925 sfpa
= tcg_temp_new_i32();
926 gen_helper_vfp_get_fpscr(tmp
, cpu_env
);
927 tcg_gen_andi_i32(tmp
, tmp
, ~FPCR_NZCV_MASK
);
928 control
= load_cpu_field(v7m
.control
[M_REG_S
]);
929 tcg_gen_andi_i32(sfpa
, control
, R_V7M_CONTROL_SFPA_MASK
);
930 tcg_gen_shli_i32(sfpa
, sfpa
, 31 - R_V7M_CONTROL_SFPA_SHIFT
);
931 tcg_gen_or_i32(tmp
, tmp
, sfpa
);
932 tcg_temp_free_i32(sfpa
);
934 * Store result before updating FPSCR etc, in case
935 * it is a memory write which causes an exception.
937 storefn(s
, opaque
, tmp
);
939 * Now we must reset FPSCR from FPDSCR_NS, and clear
940 * CONTROL.SFPA; so we'll end the TB here.
942 tcg_gen_andi_i32(control
, control
, ~R_V7M_CONTROL_SFPA_MASK
);
943 store_cpu_field(control
, v7m
.control
[M_REG_S
]);
944 fpscr
= load_cpu_field(v7m
.fpdscr
[M_REG_NS
]);
945 gen_helper_vfp_set_fpscr(cpu_env
, fpscr
);
946 tcg_temp_free_i32(fpscr
);
950 case ARM_VFP_FPCXT_NS
:
952 TCGv_i32 control
, sfpa
, fpscr
, fpdscr
, zero
;
953 TCGLabel
*lab_active
= gen_new_label();
957 gen_branch_fpInactive(s
, TCG_COND_EQ
, lab_active
);
958 /* fpInactive case: reads as FPDSCR_NS */
959 TCGv_i32 tmp
= load_cpu_field(v7m
.fpdscr
[M_REG_NS
]);
960 storefn(s
, opaque
, tmp
);
961 lab_end
= gen_new_label();
964 gen_set_label(lab_active
);
965 /* !fpInactive: Reads the same as FPCXT_S, but side effects differ */
966 gen_preserve_fp_state(s
);
967 tmp
= tcg_temp_new_i32();
968 sfpa
= tcg_temp_new_i32();
969 fpscr
= tcg_temp_new_i32();
970 gen_helper_vfp_get_fpscr(fpscr
, cpu_env
);
971 tcg_gen_andi_i32(tmp
, fpscr
, ~FPCR_NZCV_MASK
);
972 control
= load_cpu_field(v7m
.control
[M_REG_S
]);
973 tcg_gen_andi_i32(sfpa
, control
, R_V7M_CONTROL_SFPA_MASK
);
974 tcg_gen_shli_i32(sfpa
, sfpa
, 31 - R_V7M_CONTROL_SFPA_SHIFT
);
975 tcg_gen_or_i32(tmp
, tmp
, sfpa
);
976 tcg_temp_free_i32(control
);
977 /* Store result before updating FPSCR, in case it faults */
978 storefn(s
, opaque
, tmp
);
979 /* If SFPA is zero then set FPSCR from FPDSCR_NS */
980 fpdscr
= load_cpu_field(v7m
.fpdscr
[M_REG_NS
]);
981 zero
= tcg_const_i32(0);
982 tcg_gen_movcond_i32(TCG_COND_EQ
, fpscr
, sfpa
, zero
, fpdscr
, fpscr
);
983 gen_helper_vfp_set_fpscr(cpu_env
, fpscr
);
984 tcg_temp_free_i32(zero
);
985 tcg_temp_free_i32(sfpa
);
986 tcg_temp_free_i32(fpdscr
);
987 tcg_temp_free_i32(fpscr
);
991 /* Behaves as NOP if not privileged */
995 tmp
= load_cpu_field(v7m
.vpr
);
996 storefn(s
, opaque
, tmp
);
999 tmp
= load_cpu_field(v7m
.vpr
);
1000 tcg_gen_extract_i32(tmp
, tmp
, R_V7M_VPR_P0_SHIFT
, R_V7M_VPR_P0_LENGTH
);
1001 storefn(s
, opaque
, tmp
);
1004 g_assert_not_reached();
1008 gen_set_label(lab_end
);
1016 static void fp_sysreg_to_gpr(DisasContext
*s
, void *opaque
, TCGv_i32 value
)
1018 arg_VMSR_VMRS
*a
= opaque
;
1021 /* Set the 4 flag bits in the CPSR */
1022 gen_set_nzcv(value
);
1023 tcg_temp_free_i32(value
);
1025 store_reg(s
, a
->rt
, value
);
1029 static TCGv_i32
gpr_to_fp_sysreg(DisasContext
*s
, void *opaque
)
1031 arg_VMSR_VMRS
*a
= opaque
;
1033 return load_reg(s
, a
->rt
);
1036 static bool gen_M_VMSR_VMRS(DisasContext
*s
, arg_VMSR_VMRS
*a
)
1039 * Accesses to R15 are UNPREDICTABLE; we choose to undef.
1040 * FPSCR -> r15 is a special case which writes to the PSR flags;
1041 * set a->reg to a special value to tell gen_M_fp_sysreg_read()
1042 * we only care about the top 4 bits of FPSCR there.
1045 if (a
->l
&& a
->reg
== ARM_VFP_FPSCR
) {
1046 a
->reg
= QEMU_VFP_FPSCR_NZCV
;
1053 /* VMRS, move FP system register to gp register */
1054 return gen_M_fp_sysreg_read(s
, a
->reg
, fp_sysreg_to_gpr
, a
);
1056 /* VMSR, move gp register to FP system register */
1057 return gen_M_fp_sysreg_write(s
, a
->reg
, gpr_to_fp_sysreg
, a
);
1061 static bool trans_VMSR_VMRS(DisasContext
*s
, arg_VMSR_VMRS
*a
)
1064 bool ignore_vfp_enabled
= false;
1066 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
1067 return gen_M_VMSR_VMRS(s
, a
);
1070 if (!dc_isar_feature(aa32_fpsp_v2
, s
)) {
1077 * VFPv2 allows access to FPSID from userspace; VFPv3 restricts
1078 * all ID registers to privileged access only.
1080 if (IS_USER(s
) && dc_isar_feature(aa32_fpsp_v3
, s
)) {
1083 ignore_vfp_enabled
= true;
1087 if (IS_USER(s
) || !arm_dc_feature(s
, ARM_FEATURE_MVFR
)) {
1090 ignore_vfp_enabled
= true;
1093 if (IS_USER(s
) || !arm_dc_feature(s
, ARM_FEATURE_V8
)) {
1096 ignore_vfp_enabled
= true;
1104 ignore_vfp_enabled
= true;
1106 case ARM_VFP_FPINST
:
1107 case ARM_VFP_FPINST2
:
1108 /* Not present in VFPv3 */
1109 if (IS_USER(s
) || dc_isar_feature(aa32_fpsp_v3
, s
)) {
1117 if (!full_vfp_access_check(s
, ignore_vfp_enabled
)) {
1122 /* VMRS, move VFP special register to gp register */
1128 if (s
->current_el
== 1) {
1129 TCGv_i32 tcg_reg
, tcg_rt
;
1131 gen_set_condexec(s
);
1132 gen_set_pc_im(s
, s
->pc_curr
);
1133 tcg_reg
= tcg_const_i32(a
->reg
);
1134 tcg_rt
= tcg_const_i32(a
->rt
);
1135 gen_helper_check_hcr_el2_trap(cpu_env
, tcg_rt
, tcg_reg
);
1136 tcg_temp_free_i32(tcg_reg
);
1137 tcg_temp_free_i32(tcg_rt
);
1141 case ARM_VFP_FPINST
:
1142 case ARM_VFP_FPINST2
:
1143 tmp
= load_cpu_field(vfp
.xregs
[a
->reg
]);
1147 tmp
= load_cpu_field(vfp
.xregs
[ARM_VFP_FPSCR
]);
1148 tcg_gen_andi_i32(tmp
, tmp
, FPCR_NZCV_MASK
);
1150 tmp
= tcg_temp_new_i32();
1151 gen_helper_vfp_get_fpscr(tmp
, cpu_env
);
1155 g_assert_not_reached();
1159 /* Set the 4 flag bits in the CPSR. */
1161 tcg_temp_free_i32(tmp
);
1163 store_reg(s
, a
->rt
, tmp
);
1166 /* VMSR, move gp register to VFP special register */
1172 /* Writes are ignored. */
1175 tmp
= load_reg(s
, a
->rt
);
1176 gen_helper_vfp_set_fpscr(cpu_env
, tmp
);
1177 tcg_temp_free_i32(tmp
);
1182 * TODO: VFP subarchitecture support.
1183 * For now, keep the EN bit only
1185 tmp
= load_reg(s
, a
->rt
);
1186 tcg_gen_andi_i32(tmp
, tmp
, 1 << 30);
1187 store_cpu_field(tmp
, vfp
.xregs
[a
->reg
]);
1190 case ARM_VFP_FPINST
:
1191 case ARM_VFP_FPINST2
:
1192 tmp
= load_reg(s
, a
->rt
);
1193 store_cpu_field(tmp
, vfp
.xregs
[a
->reg
]);
1196 g_assert_not_reached();
1203 static void fp_sysreg_to_memory(DisasContext
*s
, void *opaque
, TCGv_i32 value
)
1205 arg_vldr_sysreg
*a
= opaque
;
1206 uint32_t offset
= a
->imm
;
1213 addr
= load_reg(s
, a
->rn
);
1215 tcg_gen_addi_i32(addr
, addr
, offset
);
1218 if (s
->v8m_stackcheck
&& a
->rn
== 13 && a
->w
) {
1219 gen_helper_v8m_stackcheck(cpu_env
, addr
);
1222 gen_aa32_st_i32(s
, value
, addr
, get_mem_index(s
),
1223 MO_UL
| MO_ALIGN
| s
->be_data
);
1224 tcg_temp_free_i32(value
);
1229 tcg_gen_addi_i32(addr
, addr
, offset
);
1231 store_reg(s
, a
->rn
, addr
);
1233 tcg_temp_free_i32(addr
);
1237 static TCGv_i32
memory_to_fp_sysreg(DisasContext
*s
, void *opaque
)
1239 arg_vldr_sysreg
*a
= opaque
;
1240 uint32_t offset
= a
->imm
;
1242 TCGv_i32 value
= tcg_temp_new_i32();
1248 addr
= load_reg(s
, a
->rn
);
1250 tcg_gen_addi_i32(addr
, addr
, offset
);
1253 if (s
->v8m_stackcheck
&& a
->rn
== 13 && a
->w
) {
1254 gen_helper_v8m_stackcheck(cpu_env
, addr
);
1257 gen_aa32_ld_i32(s
, value
, addr
, get_mem_index(s
),
1258 MO_UL
| MO_ALIGN
| s
->be_data
);
1263 tcg_gen_addi_i32(addr
, addr
, offset
);
1265 store_reg(s
, a
->rn
, addr
);
1267 tcg_temp_free_i32(addr
);
1272 static bool trans_VLDR_sysreg(DisasContext
*s
, arg_vldr_sysreg
*a
)
1274 if (!arm_dc_feature(s
, ARM_FEATURE_V8_1M
)) {
1280 return gen_M_fp_sysreg_write(s
, a
->reg
, memory_to_fp_sysreg
, a
);
1283 static bool trans_VSTR_sysreg(DisasContext
*s
, arg_vldr_sysreg
*a
)
1285 if (!arm_dc_feature(s
, ARM_FEATURE_V8_1M
)) {
1291 return gen_M_fp_sysreg_read(s
, a
->reg
, fp_sysreg_to_memory
, a
);
1294 static bool trans_VMOV_half(DisasContext
*s
, arg_VMOV_single
*a
)
1298 if (!dc_isar_feature(aa32_fp16_arith
, s
)) {
1303 /* UNPREDICTABLE; we choose to UNDEF */
1307 if (!vfp_access_check(s
)) {
1312 /* VFP to general purpose register */
1313 tmp
= tcg_temp_new_i32();
1314 vfp_load_reg32(tmp
, a
->vn
);
1315 tcg_gen_andi_i32(tmp
, tmp
, 0xffff);
1316 store_reg(s
, a
->rt
, tmp
);
1318 /* general purpose register to VFP */
1319 tmp
= load_reg(s
, a
->rt
);
1320 tcg_gen_andi_i32(tmp
, tmp
, 0xffff);
1321 vfp_store_reg32(tmp
, a
->vn
);
1322 tcg_temp_free_i32(tmp
);
1328 static bool trans_VMOV_single(DisasContext
*s
, arg_VMOV_single
*a
)
1332 if (!dc_isar_feature(aa32_fpsp_v2
, s
) && !dc_isar_feature(aa32_mve
, s
)) {
1336 if (!vfp_access_check(s
)) {
1341 /* VFP to general purpose register */
1342 tmp
= tcg_temp_new_i32();
1343 vfp_load_reg32(tmp
, a
->vn
);
1345 /* Set the 4 flag bits in the CPSR. */
1347 tcg_temp_free_i32(tmp
);
1349 store_reg(s
, a
->rt
, tmp
);
1352 /* general purpose register to VFP */
1353 tmp
= load_reg(s
, a
->rt
);
1354 vfp_store_reg32(tmp
, a
->vn
);
1355 tcg_temp_free_i32(tmp
);
1361 static bool trans_VMOV_64_sp(DisasContext
*s
, arg_VMOV_64_sp
*a
)
1365 if (!dc_isar_feature(aa32_fpsp_v2
, s
) && !dc_isar_feature(aa32_mve
, s
)) {
1370 * VMOV between two general-purpose registers and two single precision
1371 * floating point registers
1373 if (!vfp_access_check(s
)) {
1378 /* fpreg to gpreg */
1379 tmp
= tcg_temp_new_i32();
1380 vfp_load_reg32(tmp
, a
->vm
);
1381 store_reg(s
, a
->rt
, tmp
);
1382 tmp
= tcg_temp_new_i32();
1383 vfp_load_reg32(tmp
, a
->vm
+ 1);
1384 store_reg(s
, a
->rt2
, tmp
);
1386 /* gpreg to fpreg */
1387 tmp
= load_reg(s
, a
->rt
);
1388 vfp_store_reg32(tmp
, a
->vm
);
1389 tcg_temp_free_i32(tmp
);
1390 tmp
= load_reg(s
, a
->rt2
);
1391 vfp_store_reg32(tmp
, a
->vm
+ 1);
1392 tcg_temp_free_i32(tmp
);
1398 static bool trans_VMOV_64_dp(DisasContext
*s
, arg_VMOV_64_dp
*a
)
1403 * VMOV between two general-purpose registers and one double precision
1404 * floating point register. Note that this does not require support
1405 * for double precision arithmetic.
1407 if (!dc_isar_feature(aa32_fpsp_v2
, s
) && !dc_isar_feature(aa32_mve
, s
)) {
1411 /* UNDEF accesses to D16-D31 if they don't exist */
1412 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vm
& 0x10)) {
1416 if (!vfp_access_check(s
)) {
1421 /* fpreg to gpreg */
1422 tmp
= tcg_temp_new_i32();
1423 vfp_load_reg32(tmp
, a
->vm
* 2);
1424 store_reg(s
, a
->rt
, tmp
);
1425 tmp
= tcg_temp_new_i32();
1426 vfp_load_reg32(tmp
, a
->vm
* 2 + 1);
1427 store_reg(s
, a
->rt2
, tmp
);
1429 /* gpreg to fpreg */
1430 tmp
= load_reg(s
, a
->rt
);
1431 vfp_store_reg32(tmp
, a
->vm
* 2);
1432 tcg_temp_free_i32(tmp
);
1433 tmp
= load_reg(s
, a
->rt2
);
1434 vfp_store_reg32(tmp
, a
->vm
* 2 + 1);
1435 tcg_temp_free_i32(tmp
);
1441 static bool trans_VLDR_VSTR_hp(DisasContext
*s
, arg_VLDR_VSTR_sp
*a
)
1446 if (!dc_isar_feature(aa32_fpsp_v2
, s
) && !dc_isar_feature(aa32_mve
, s
)) {
1450 if (!vfp_access_check(s
)) {
1454 /* imm8 field is offset/2 for fp16, unlike fp32 and fp64 */
1455 offset
= a
->imm
<< 1;
1460 /* For thumb, use of PC is UNPREDICTABLE. */
1461 addr
= add_reg_for_lit(s
, a
->rn
, offset
);
1462 tmp
= tcg_temp_new_i32();
1464 gen_aa32_ld_i32(s
, tmp
, addr
, get_mem_index(s
), MO_UW
| MO_ALIGN
);
1465 vfp_store_reg32(tmp
, a
->vd
);
1467 vfp_load_reg32(tmp
, a
->vd
);
1468 gen_aa32_st_i32(s
, tmp
, addr
, get_mem_index(s
), MO_UW
| MO_ALIGN
);
1470 tcg_temp_free_i32(tmp
);
1471 tcg_temp_free_i32(addr
);
1476 static bool trans_VLDR_VSTR_sp(DisasContext
*s
, arg_VLDR_VSTR_sp
*a
)
1481 if (!dc_isar_feature(aa32_fpsp_v2
, s
) && !dc_isar_feature(aa32_mve
, s
)) {
1485 if (!vfp_access_check(s
)) {
1489 offset
= a
->imm
<< 2;
1494 /* For thumb, use of PC is UNPREDICTABLE. */
1495 addr
= add_reg_for_lit(s
, a
->rn
, offset
);
1496 tmp
= tcg_temp_new_i32();
1498 gen_aa32_ld_i32(s
, tmp
, addr
, get_mem_index(s
), MO_UL
| MO_ALIGN
);
1499 vfp_store_reg32(tmp
, a
->vd
);
1501 vfp_load_reg32(tmp
, a
->vd
);
1502 gen_aa32_st_i32(s
, tmp
, addr
, get_mem_index(s
), MO_UL
| MO_ALIGN
);
1504 tcg_temp_free_i32(tmp
);
1505 tcg_temp_free_i32(addr
);
1510 static bool trans_VLDR_VSTR_dp(DisasContext
*s
, arg_VLDR_VSTR_dp
*a
)
1516 /* Note that this does not require support for double arithmetic. */
1517 if (!dc_isar_feature(aa32_fpsp_v2
, s
) && !dc_isar_feature(aa32_mve
, s
)) {
1521 /* UNDEF accesses to D16-D31 if they don't exist */
1522 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vd
& 0x10)) {
1526 if (!vfp_access_check(s
)) {
1530 offset
= a
->imm
<< 2;
1535 /* For thumb, use of PC is UNPREDICTABLE. */
1536 addr
= add_reg_for_lit(s
, a
->rn
, offset
);
1537 tmp
= tcg_temp_new_i64();
1539 gen_aa32_ld_i64(s
, tmp
, addr
, get_mem_index(s
), MO_Q
| MO_ALIGN_4
);
1540 vfp_store_reg64(tmp
, a
->vd
);
1542 vfp_load_reg64(tmp
, a
->vd
);
1543 gen_aa32_st_i64(s
, tmp
, addr
, get_mem_index(s
), MO_Q
| MO_ALIGN_4
);
1545 tcg_temp_free_i64(tmp
);
1546 tcg_temp_free_i32(addr
);
1551 static bool trans_VLDM_VSTM_sp(DisasContext
*s
, arg_VLDM_VSTM_sp
*a
)
1557 if (!dc_isar_feature(aa32_fpsp_v2
, s
) && !dc_isar_feature(aa32_mve
, s
)) {
1563 if (n
== 0 || (a
->vd
+ n
) > 32) {
1565 * UNPREDICTABLE cases for bad immediates: we choose to
1566 * UNDEF to avoid generating huge numbers of TCG ops
1570 if (a
->rn
== 15 && a
->w
) {
1571 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
1575 s
->eci_handled
= true;
1577 if (!vfp_access_check(s
)) {
1581 /* For thumb, use of PC is UNPREDICTABLE. */
1582 addr
= add_reg_for_lit(s
, a
->rn
, 0);
1585 tcg_gen_addi_i32(addr
, addr
, -(a
->imm
<< 2));
1588 if (s
->v8m_stackcheck
&& a
->rn
== 13 && a
->w
) {
1590 * Here 'addr' is the lowest address we will store to,
1591 * and is either the old SP (if post-increment) or
1592 * the new SP (if pre-decrement). For post-increment
1593 * where the old value is below the limit and the new
1594 * value is above, it is UNKNOWN whether the limit check
1595 * triggers; we choose to trigger.
1597 gen_helper_v8m_stackcheck(cpu_env
, addr
);
1601 tmp
= tcg_temp_new_i32();
1602 for (i
= 0; i
< n
; i
++) {
1605 gen_aa32_ld_i32(s
, tmp
, addr
, get_mem_index(s
), MO_UL
| MO_ALIGN
);
1606 vfp_store_reg32(tmp
, a
->vd
+ i
);
1609 vfp_load_reg32(tmp
, a
->vd
+ i
);
1610 gen_aa32_st_i32(s
, tmp
, addr
, get_mem_index(s
), MO_UL
| MO_ALIGN
);
1612 tcg_gen_addi_i32(addr
, addr
, offset
);
1614 tcg_temp_free_i32(tmp
);
1618 offset
= -offset
* n
;
1619 tcg_gen_addi_i32(addr
, addr
, offset
);
1621 store_reg(s
, a
->rn
, addr
);
1623 tcg_temp_free_i32(addr
);
1630 static bool trans_VLDM_VSTM_dp(DisasContext
*s
, arg_VLDM_VSTM_dp
*a
)
1637 /* Note that this does not require support for double arithmetic. */
1638 if (!dc_isar_feature(aa32_fpsp_v2
, s
) && !dc_isar_feature(aa32_mve
, s
)) {
1644 if (n
== 0 || (a
->vd
+ n
) > 32 || n
> 16) {
1646 * UNPREDICTABLE cases for bad immediates: we choose to
1647 * UNDEF to avoid generating huge numbers of TCG ops
1651 if (a
->rn
== 15 && a
->w
) {
1652 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
1656 /* UNDEF accesses to D16-D31 if they don't exist */
1657 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vd
+ n
) > 16) {
1661 s
->eci_handled
= true;
1663 if (!vfp_access_check(s
)) {
1667 /* For thumb, use of PC is UNPREDICTABLE. */
1668 addr
= add_reg_for_lit(s
, a
->rn
, 0);
1671 tcg_gen_addi_i32(addr
, addr
, -(a
->imm
<< 2));
1674 if (s
->v8m_stackcheck
&& a
->rn
== 13 && a
->w
) {
1676 * Here 'addr' is the lowest address we will store to,
1677 * and is either the old SP (if post-increment) or
1678 * the new SP (if pre-decrement). For post-increment
1679 * where the old value is below the limit and the new
1680 * value is above, it is UNKNOWN whether the limit check
1681 * triggers; we choose to trigger.
1683 gen_helper_v8m_stackcheck(cpu_env
, addr
);
1687 tmp
= tcg_temp_new_i64();
1688 for (i
= 0; i
< n
; i
++) {
1691 gen_aa32_ld_i64(s
, tmp
, addr
, get_mem_index(s
), MO_Q
| MO_ALIGN_4
);
1692 vfp_store_reg64(tmp
, a
->vd
+ i
);
1695 vfp_load_reg64(tmp
, a
->vd
+ i
);
1696 gen_aa32_st_i64(s
, tmp
, addr
, get_mem_index(s
), MO_Q
| MO_ALIGN_4
);
1698 tcg_gen_addi_i32(addr
, addr
, offset
);
1700 tcg_temp_free_i64(tmp
);
1704 offset
= -offset
* n
;
1705 } else if (a
->imm
& 1) {
1712 tcg_gen_addi_i32(addr
, addr
, offset
);
1714 store_reg(s
, a
->rn
, addr
);
1716 tcg_temp_free_i32(addr
);
1724 * Types for callbacks for do_vfp_3op_sp() and do_vfp_3op_dp().
1725 * The callback should emit code to write a value to vd. If
1726 * do_vfp_3op_{sp,dp}() was passed reads_vd then the TCGv vd
1727 * will contain the old value of the relevant VFP register;
1728 * otherwise it must be written to only.
1730 typedef void VFPGen3OpSPFn(TCGv_i32 vd
,
1731 TCGv_i32 vn
, TCGv_i32 vm
, TCGv_ptr fpst
);
1732 typedef void VFPGen3OpDPFn(TCGv_i64 vd
,
1733 TCGv_i64 vn
, TCGv_i64 vm
, TCGv_ptr fpst
);
1736 * Types for callbacks for do_vfp_2op_sp() and do_vfp_2op_dp().
1737 * The callback should emit code to write a value to vd (which
1738 * should be written to only).
1740 typedef void VFPGen2OpSPFn(TCGv_i32 vd
, TCGv_i32 vm
);
1741 typedef void VFPGen2OpDPFn(TCGv_i64 vd
, TCGv_i64 vm
);
1744 * Return true if the specified S reg is in a scalar bank
1745 * (ie if it is s0..s7)
1747 static inline bool vfp_sreg_is_scalar(int reg
)
1749 return (reg
& 0x18) == 0;
1753 * Return true if the specified D reg is in a scalar bank
1754 * (ie if it is d0..d3 or d16..d19)
1756 static inline bool vfp_dreg_is_scalar(int reg
)
1758 return (reg
& 0xc) == 0;
1762 * Advance the S reg number forwards by delta within its bank
1763 * (ie increment the low 3 bits but leave the rest the same)
1765 static inline int vfp_advance_sreg(int reg
, int delta
)
1767 return ((reg
+ delta
) & 0x7) | (reg
& ~0x7);
1771 * Advance the D reg number forwards by delta within its bank
1772 * (ie increment the low 2 bits but leave the rest the same)
1774 static inline int vfp_advance_dreg(int reg
, int delta
)
1776 return ((reg
+ delta
) & 0x3) | (reg
& ~0x3);
1780 * Perform a 3-operand VFP data processing instruction. fn is the
1781 * callback to do the actual operation; this function deals with the
1782 * code to handle looping around for VFP vector processing.
1784 static bool do_vfp_3op_sp(DisasContext
*s
, VFPGen3OpSPFn
*fn
,
1785 int vd
, int vn
, int vm
, bool reads_vd
)
1787 uint32_t delta_m
= 0;
1788 uint32_t delta_d
= 0;
1789 int veclen
= s
->vec_len
;
1790 TCGv_i32 f0
, f1
, fd
;
1793 if (!dc_isar_feature(aa32_fpsp_v2
, s
)) {
1797 if (!dc_isar_feature(aa32_fpshvec
, s
) &&
1798 (veclen
!= 0 || s
->vec_stride
!= 0)) {
1802 if (!vfp_access_check(s
)) {
1807 /* Figure out what type of vector operation this is. */
1808 if (vfp_sreg_is_scalar(vd
)) {
1812 delta_d
= s
->vec_stride
+ 1;
1814 if (vfp_sreg_is_scalar(vm
)) {
1815 /* mixed scalar/vector */
1824 f0
= tcg_temp_new_i32();
1825 f1
= tcg_temp_new_i32();
1826 fd
= tcg_temp_new_i32();
1827 fpst
= fpstatus_ptr(FPST_FPCR
);
1829 vfp_load_reg32(f0
, vn
);
1830 vfp_load_reg32(f1
, vm
);
1834 vfp_load_reg32(fd
, vd
);
1836 fn(fd
, f0
, f1
, fpst
);
1837 vfp_store_reg32(fd
, vd
);
1843 /* Set up the operands for the next iteration */
1845 vd
= vfp_advance_sreg(vd
, delta_d
);
1846 vn
= vfp_advance_sreg(vn
, delta_d
);
1847 vfp_load_reg32(f0
, vn
);
1849 vm
= vfp_advance_sreg(vm
, delta_m
);
1850 vfp_load_reg32(f1
, vm
);
1854 tcg_temp_free_i32(f0
);
1855 tcg_temp_free_i32(f1
);
1856 tcg_temp_free_i32(fd
);
1857 tcg_temp_free_ptr(fpst
);
1862 static bool do_vfp_3op_hp(DisasContext
*s
, VFPGen3OpSPFn
*fn
,
1863 int vd
, int vn
, int vm
, bool reads_vd
)
1866 * Do a half-precision operation. Functionally this is
1867 * the same as do_vfp_3op_sp(), except:
1868 * - it uses the FPST_FPCR_F16
1869 * - it doesn't need the VFP vector handling (fp16 is a
1870 * v8 feature, and in v8 VFP vectors don't exist)
1871 * - it does the aa32_fp16_arith feature test
1873 TCGv_i32 f0
, f1
, fd
;
1876 if (!dc_isar_feature(aa32_fp16_arith
, s
)) {
1880 if (s
->vec_len
!= 0 || s
->vec_stride
!= 0) {
1884 if (!vfp_access_check(s
)) {
1888 f0
= tcg_temp_new_i32();
1889 f1
= tcg_temp_new_i32();
1890 fd
= tcg_temp_new_i32();
1891 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
1893 vfp_load_reg32(f0
, vn
);
1894 vfp_load_reg32(f1
, vm
);
1897 vfp_load_reg32(fd
, vd
);
1899 fn(fd
, f0
, f1
, fpst
);
1900 vfp_store_reg32(fd
, vd
);
1902 tcg_temp_free_i32(f0
);
1903 tcg_temp_free_i32(f1
);
1904 tcg_temp_free_i32(fd
);
1905 tcg_temp_free_ptr(fpst
);
1910 static bool do_vfp_3op_dp(DisasContext
*s
, VFPGen3OpDPFn
*fn
,
1911 int vd
, int vn
, int vm
, bool reads_vd
)
1913 uint32_t delta_m
= 0;
1914 uint32_t delta_d
= 0;
1915 int veclen
= s
->vec_len
;
1916 TCGv_i64 f0
, f1
, fd
;
1919 if (!dc_isar_feature(aa32_fpdp_v2
, s
)) {
1923 /* UNDEF accesses to D16-D31 if they don't exist */
1924 if (!dc_isar_feature(aa32_simd_r32
, s
) && ((vd
| vn
| vm
) & 0x10)) {
1928 if (!dc_isar_feature(aa32_fpshvec
, s
) &&
1929 (veclen
!= 0 || s
->vec_stride
!= 0)) {
1933 if (!vfp_access_check(s
)) {
1938 /* Figure out what type of vector operation this is. */
1939 if (vfp_dreg_is_scalar(vd
)) {
1943 delta_d
= (s
->vec_stride
>> 1) + 1;
1945 if (vfp_dreg_is_scalar(vm
)) {
1946 /* mixed scalar/vector */
1955 f0
= tcg_temp_new_i64();
1956 f1
= tcg_temp_new_i64();
1957 fd
= tcg_temp_new_i64();
1958 fpst
= fpstatus_ptr(FPST_FPCR
);
1960 vfp_load_reg64(f0
, vn
);
1961 vfp_load_reg64(f1
, vm
);
1965 vfp_load_reg64(fd
, vd
);
1967 fn(fd
, f0
, f1
, fpst
);
1968 vfp_store_reg64(fd
, vd
);
1973 /* Set up the operands for the next iteration */
1975 vd
= vfp_advance_dreg(vd
, delta_d
);
1976 vn
= vfp_advance_dreg(vn
, delta_d
);
1977 vfp_load_reg64(f0
, vn
);
1979 vm
= vfp_advance_dreg(vm
, delta_m
);
1980 vfp_load_reg64(f1
, vm
);
1984 tcg_temp_free_i64(f0
);
1985 tcg_temp_free_i64(f1
);
1986 tcg_temp_free_i64(fd
);
1987 tcg_temp_free_ptr(fpst
);
1992 static bool do_vfp_2op_sp(DisasContext
*s
, VFPGen2OpSPFn
*fn
, int vd
, int vm
)
1994 uint32_t delta_m
= 0;
1995 uint32_t delta_d
= 0;
1996 int veclen
= s
->vec_len
;
1999 /* Note that the caller must check the aa32_fpsp_v2 feature. */
2001 if (!dc_isar_feature(aa32_fpshvec
, s
) &&
2002 (veclen
!= 0 || s
->vec_stride
!= 0)) {
2006 if (!vfp_access_check(s
)) {
2011 /* Figure out what type of vector operation this is. */
2012 if (vfp_sreg_is_scalar(vd
)) {
2016 delta_d
= s
->vec_stride
+ 1;
2018 if (vfp_sreg_is_scalar(vm
)) {
2019 /* mixed scalar/vector */
2028 f0
= tcg_temp_new_i32();
2029 fd
= tcg_temp_new_i32();
2031 vfp_load_reg32(f0
, vm
);
2035 vfp_store_reg32(fd
, vd
);
2042 /* single source one-many */
2044 vd
= vfp_advance_sreg(vd
, delta_d
);
2045 vfp_store_reg32(fd
, vd
);
2050 /* Set up the operands for the next iteration */
2052 vd
= vfp_advance_sreg(vd
, delta_d
);
2053 vm
= vfp_advance_sreg(vm
, delta_m
);
2054 vfp_load_reg32(f0
, vm
);
2057 tcg_temp_free_i32(f0
);
2058 tcg_temp_free_i32(fd
);
2063 static bool do_vfp_2op_hp(DisasContext
*s
, VFPGen2OpSPFn
*fn
, int vd
, int vm
)
2066 * Do a half-precision operation. Functionally this is
2067 * the same as do_vfp_2op_sp(), except:
2068 * - it doesn't need the VFP vector handling (fp16 is a
2069 * v8 feature, and in v8 VFP vectors don't exist)
2070 * - it does the aa32_fp16_arith feature test
2074 /* Note that the caller must check the aa32_fp16_arith feature */
2076 if (!dc_isar_feature(aa32_fp16_arith
, s
)) {
2080 if (s
->vec_len
!= 0 || s
->vec_stride
!= 0) {
2084 if (!vfp_access_check(s
)) {
2088 f0
= tcg_temp_new_i32();
2089 vfp_load_reg32(f0
, vm
);
2091 vfp_store_reg32(f0
, vd
);
2092 tcg_temp_free_i32(f0
);
2097 static bool do_vfp_2op_dp(DisasContext
*s
, VFPGen2OpDPFn
*fn
, int vd
, int vm
)
2099 uint32_t delta_m
= 0;
2100 uint32_t delta_d
= 0;
2101 int veclen
= s
->vec_len
;
2104 /* Note that the caller must check the aa32_fpdp_v2 feature. */
2106 /* UNDEF accesses to D16-D31 if they don't exist */
2107 if (!dc_isar_feature(aa32_simd_r32
, s
) && ((vd
| vm
) & 0x10)) {
2111 if (!dc_isar_feature(aa32_fpshvec
, s
) &&
2112 (veclen
!= 0 || s
->vec_stride
!= 0)) {
2116 if (!vfp_access_check(s
)) {
2121 /* Figure out what type of vector operation this is. */
2122 if (vfp_dreg_is_scalar(vd
)) {
2126 delta_d
= (s
->vec_stride
>> 1) + 1;
2128 if (vfp_dreg_is_scalar(vm
)) {
2129 /* mixed scalar/vector */
2138 f0
= tcg_temp_new_i64();
2139 fd
= tcg_temp_new_i64();
2141 vfp_load_reg64(f0
, vm
);
2145 vfp_store_reg64(fd
, vd
);
2152 /* single source one-many */
2154 vd
= vfp_advance_dreg(vd
, delta_d
);
2155 vfp_store_reg64(fd
, vd
);
2160 /* Set up the operands for the next iteration */
2162 vd
= vfp_advance_dreg(vd
, delta_d
);
2163 vd
= vfp_advance_dreg(vm
, delta_m
);
2164 vfp_load_reg64(f0
, vm
);
2167 tcg_temp_free_i64(f0
);
2168 tcg_temp_free_i64(fd
);
2173 static void gen_VMLA_hp(TCGv_i32 vd
, TCGv_i32 vn
, TCGv_i32 vm
, TCGv_ptr fpst
)
2175 /* Note that order of inputs to the add matters for NaNs */
2176 TCGv_i32 tmp
= tcg_temp_new_i32();
2178 gen_helper_vfp_mulh(tmp
, vn
, vm
, fpst
);
2179 gen_helper_vfp_addh(vd
, vd
, tmp
, fpst
);
2180 tcg_temp_free_i32(tmp
);
2183 static bool trans_VMLA_hp(DisasContext
*s
, arg_VMLA_sp
*a
)
2185 return do_vfp_3op_hp(s
, gen_VMLA_hp
, a
->vd
, a
->vn
, a
->vm
, true);
2188 static void gen_VMLA_sp(TCGv_i32 vd
, TCGv_i32 vn
, TCGv_i32 vm
, TCGv_ptr fpst
)
2190 /* Note that order of inputs to the add matters for NaNs */
2191 TCGv_i32 tmp
= tcg_temp_new_i32();
2193 gen_helper_vfp_muls(tmp
, vn
, vm
, fpst
);
2194 gen_helper_vfp_adds(vd
, vd
, tmp
, fpst
);
2195 tcg_temp_free_i32(tmp
);
2198 static bool trans_VMLA_sp(DisasContext
*s
, arg_VMLA_sp
*a
)
2200 return do_vfp_3op_sp(s
, gen_VMLA_sp
, a
->vd
, a
->vn
, a
->vm
, true);
2203 static void gen_VMLA_dp(TCGv_i64 vd
, TCGv_i64 vn
, TCGv_i64 vm
, TCGv_ptr fpst
)
2205 /* Note that order of inputs to the add matters for NaNs */
2206 TCGv_i64 tmp
= tcg_temp_new_i64();
2208 gen_helper_vfp_muld(tmp
, vn
, vm
, fpst
);
2209 gen_helper_vfp_addd(vd
, vd
, tmp
, fpst
);
2210 tcg_temp_free_i64(tmp
);
2213 static bool trans_VMLA_dp(DisasContext
*s
, arg_VMLA_dp
*a
)
2215 return do_vfp_3op_dp(s
, gen_VMLA_dp
, a
->vd
, a
->vn
, a
->vm
, true);
2218 static void gen_VMLS_hp(TCGv_i32 vd
, TCGv_i32 vn
, TCGv_i32 vm
, TCGv_ptr fpst
)
2221 * VMLS: vd = vd + -(vn * vm)
2222 * Note that order of inputs to the add matters for NaNs.
2224 TCGv_i32 tmp
= tcg_temp_new_i32();
2226 gen_helper_vfp_mulh(tmp
, vn
, vm
, fpst
);
2227 gen_helper_vfp_negh(tmp
, tmp
);
2228 gen_helper_vfp_addh(vd
, vd
, tmp
, fpst
);
2229 tcg_temp_free_i32(tmp
);
2232 static bool trans_VMLS_hp(DisasContext
*s
, arg_VMLS_sp
*a
)
2234 return do_vfp_3op_hp(s
, gen_VMLS_hp
, a
->vd
, a
->vn
, a
->vm
, true);
2237 static void gen_VMLS_sp(TCGv_i32 vd
, TCGv_i32 vn
, TCGv_i32 vm
, TCGv_ptr fpst
)
2240 * VMLS: vd = vd + -(vn * vm)
2241 * Note that order of inputs to the add matters for NaNs.
2243 TCGv_i32 tmp
= tcg_temp_new_i32();
2245 gen_helper_vfp_muls(tmp
, vn
, vm
, fpst
);
2246 gen_helper_vfp_negs(tmp
, tmp
);
2247 gen_helper_vfp_adds(vd
, vd
, tmp
, fpst
);
2248 tcg_temp_free_i32(tmp
);
2251 static bool trans_VMLS_sp(DisasContext
*s
, arg_VMLS_sp
*a
)
2253 return do_vfp_3op_sp(s
, gen_VMLS_sp
, a
->vd
, a
->vn
, a
->vm
, true);
2256 static void gen_VMLS_dp(TCGv_i64 vd
, TCGv_i64 vn
, TCGv_i64 vm
, TCGv_ptr fpst
)
2259 * VMLS: vd = vd + -(vn * vm)
2260 * Note that order of inputs to the add matters for NaNs.
2262 TCGv_i64 tmp
= tcg_temp_new_i64();
2264 gen_helper_vfp_muld(tmp
, vn
, vm
, fpst
);
2265 gen_helper_vfp_negd(tmp
, tmp
);
2266 gen_helper_vfp_addd(vd
, vd
, tmp
, fpst
);
2267 tcg_temp_free_i64(tmp
);
2270 static bool trans_VMLS_dp(DisasContext
*s
, arg_VMLS_dp
*a
)
2272 return do_vfp_3op_dp(s
, gen_VMLS_dp
, a
->vd
, a
->vn
, a
->vm
, true);
2275 static void gen_VNMLS_hp(TCGv_i32 vd
, TCGv_i32 vn
, TCGv_i32 vm
, TCGv_ptr fpst
)
2278 * VNMLS: -fd + (fn * fm)
2279 * Note that it isn't valid to replace (-A + B) with (B - A) or similar
2280 * plausible looking simplifications because this will give wrong results
2283 TCGv_i32 tmp
= tcg_temp_new_i32();
2285 gen_helper_vfp_mulh(tmp
, vn
, vm
, fpst
);
2286 gen_helper_vfp_negh(vd
, vd
);
2287 gen_helper_vfp_addh(vd
, vd
, tmp
, fpst
);
2288 tcg_temp_free_i32(tmp
);
2291 static bool trans_VNMLS_hp(DisasContext
*s
, arg_VNMLS_sp
*a
)
2293 return do_vfp_3op_hp(s
, gen_VNMLS_hp
, a
->vd
, a
->vn
, a
->vm
, true);
2296 static void gen_VNMLS_sp(TCGv_i32 vd
, TCGv_i32 vn
, TCGv_i32 vm
, TCGv_ptr fpst
)
2299 * VNMLS: -fd + (fn * fm)
2300 * Note that it isn't valid to replace (-A + B) with (B - A) or similar
2301 * plausible looking simplifications because this will give wrong results
2304 TCGv_i32 tmp
= tcg_temp_new_i32();
2306 gen_helper_vfp_muls(tmp
, vn
, vm
, fpst
);
2307 gen_helper_vfp_negs(vd
, vd
);
2308 gen_helper_vfp_adds(vd
, vd
, tmp
, fpst
);
2309 tcg_temp_free_i32(tmp
);
2312 static bool trans_VNMLS_sp(DisasContext
*s
, arg_VNMLS_sp
*a
)
2314 return do_vfp_3op_sp(s
, gen_VNMLS_sp
, a
->vd
, a
->vn
, a
->vm
, true);
2317 static void gen_VNMLS_dp(TCGv_i64 vd
, TCGv_i64 vn
, TCGv_i64 vm
, TCGv_ptr fpst
)
2320 * VNMLS: -fd + (fn * fm)
2321 * Note that it isn't valid to replace (-A + B) with (B - A) or similar
2322 * plausible looking simplifications because this will give wrong results
2325 TCGv_i64 tmp
= tcg_temp_new_i64();
2327 gen_helper_vfp_muld(tmp
, vn
, vm
, fpst
);
2328 gen_helper_vfp_negd(vd
, vd
);
2329 gen_helper_vfp_addd(vd
, vd
, tmp
, fpst
);
2330 tcg_temp_free_i64(tmp
);
2333 static bool trans_VNMLS_dp(DisasContext
*s
, arg_VNMLS_dp
*a
)
2335 return do_vfp_3op_dp(s
, gen_VNMLS_dp
, a
->vd
, a
->vn
, a
->vm
, true);
2338 static void gen_VNMLA_hp(TCGv_i32 vd
, TCGv_i32 vn
, TCGv_i32 vm
, TCGv_ptr fpst
)
2340 /* VNMLA: -fd + -(fn * fm) */
2341 TCGv_i32 tmp
= tcg_temp_new_i32();
2343 gen_helper_vfp_mulh(tmp
, vn
, vm
, fpst
);
2344 gen_helper_vfp_negh(tmp
, tmp
);
2345 gen_helper_vfp_negh(vd
, vd
);
2346 gen_helper_vfp_addh(vd
, vd
, tmp
, fpst
);
2347 tcg_temp_free_i32(tmp
);
2350 static bool trans_VNMLA_hp(DisasContext
*s
, arg_VNMLA_sp
*a
)
2352 return do_vfp_3op_hp(s
, gen_VNMLA_hp
, a
->vd
, a
->vn
, a
->vm
, true);
2355 static void gen_VNMLA_sp(TCGv_i32 vd
, TCGv_i32 vn
, TCGv_i32 vm
, TCGv_ptr fpst
)
2357 /* VNMLA: -fd + -(fn * fm) */
2358 TCGv_i32 tmp
= tcg_temp_new_i32();
2360 gen_helper_vfp_muls(tmp
, vn
, vm
, fpst
);
2361 gen_helper_vfp_negs(tmp
, tmp
);
2362 gen_helper_vfp_negs(vd
, vd
);
2363 gen_helper_vfp_adds(vd
, vd
, tmp
, fpst
);
2364 tcg_temp_free_i32(tmp
);
2367 static bool trans_VNMLA_sp(DisasContext
*s
, arg_VNMLA_sp
*a
)
2369 return do_vfp_3op_sp(s
, gen_VNMLA_sp
, a
->vd
, a
->vn
, a
->vm
, true);
2372 static void gen_VNMLA_dp(TCGv_i64 vd
, TCGv_i64 vn
, TCGv_i64 vm
, TCGv_ptr fpst
)
2374 /* VNMLA: -fd + (fn * fm) */
2375 TCGv_i64 tmp
= tcg_temp_new_i64();
2377 gen_helper_vfp_muld(tmp
, vn
, vm
, fpst
);
2378 gen_helper_vfp_negd(tmp
, tmp
);
2379 gen_helper_vfp_negd(vd
, vd
);
2380 gen_helper_vfp_addd(vd
, vd
, tmp
, fpst
);
2381 tcg_temp_free_i64(tmp
);
2384 static bool trans_VNMLA_dp(DisasContext
*s
, arg_VNMLA_dp
*a
)
2386 return do_vfp_3op_dp(s
, gen_VNMLA_dp
, a
->vd
, a
->vn
, a
->vm
, true);
2389 static bool trans_VMUL_hp(DisasContext
*s
, arg_VMUL_sp
*a
)
2391 return do_vfp_3op_hp(s
, gen_helper_vfp_mulh
, a
->vd
, a
->vn
, a
->vm
, false);
2394 static bool trans_VMUL_sp(DisasContext
*s
, arg_VMUL_sp
*a
)
2396 return do_vfp_3op_sp(s
, gen_helper_vfp_muls
, a
->vd
, a
->vn
, a
->vm
, false);
2399 static bool trans_VMUL_dp(DisasContext
*s
, arg_VMUL_dp
*a
)
2401 return do_vfp_3op_dp(s
, gen_helper_vfp_muld
, a
->vd
, a
->vn
, a
->vm
, false);
2404 static void gen_VNMUL_hp(TCGv_i32 vd
, TCGv_i32 vn
, TCGv_i32 vm
, TCGv_ptr fpst
)
2406 /* VNMUL: -(fn * fm) */
2407 gen_helper_vfp_mulh(vd
, vn
, vm
, fpst
);
2408 gen_helper_vfp_negh(vd
, vd
);
2411 static bool trans_VNMUL_hp(DisasContext
*s
, arg_VNMUL_sp
*a
)
2413 return do_vfp_3op_hp(s
, gen_VNMUL_hp
, a
->vd
, a
->vn
, a
->vm
, false);
2416 static void gen_VNMUL_sp(TCGv_i32 vd
, TCGv_i32 vn
, TCGv_i32 vm
, TCGv_ptr fpst
)
2418 /* VNMUL: -(fn * fm) */
2419 gen_helper_vfp_muls(vd
, vn
, vm
, fpst
);
2420 gen_helper_vfp_negs(vd
, vd
);
2423 static bool trans_VNMUL_sp(DisasContext
*s
, arg_VNMUL_sp
*a
)
2425 return do_vfp_3op_sp(s
, gen_VNMUL_sp
, a
->vd
, a
->vn
, a
->vm
, false);
2428 static void gen_VNMUL_dp(TCGv_i64 vd
, TCGv_i64 vn
, TCGv_i64 vm
, TCGv_ptr fpst
)
2430 /* VNMUL: -(fn * fm) */
2431 gen_helper_vfp_muld(vd
, vn
, vm
, fpst
);
2432 gen_helper_vfp_negd(vd
, vd
);
2435 static bool trans_VNMUL_dp(DisasContext
*s
, arg_VNMUL_dp
*a
)
2437 return do_vfp_3op_dp(s
, gen_VNMUL_dp
, a
->vd
, a
->vn
, a
->vm
, false);
2440 static bool trans_VADD_hp(DisasContext
*s
, arg_VADD_sp
*a
)
2442 return do_vfp_3op_hp(s
, gen_helper_vfp_addh
, a
->vd
, a
->vn
, a
->vm
, false);
2445 static bool trans_VADD_sp(DisasContext
*s
, arg_VADD_sp
*a
)
2447 return do_vfp_3op_sp(s
, gen_helper_vfp_adds
, a
->vd
, a
->vn
, a
->vm
, false);
2450 static bool trans_VADD_dp(DisasContext
*s
, arg_VADD_dp
*a
)
2452 return do_vfp_3op_dp(s
, gen_helper_vfp_addd
, a
->vd
, a
->vn
, a
->vm
, false);
2455 static bool trans_VSUB_hp(DisasContext
*s
, arg_VSUB_sp
*a
)
2457 return do_vfp_3op_hp(s
, gen_helper_vfp_subh
, a
->vd
, a
->vn
, a
->vm
, false);
2460 static bool trans_VSUB_sp(DisasContext
*s
, arg_VSUB_sp
*a
)
2462 return do_vfp_3op_sp(s
, gen_helper_vfp_subs
, a
->vd
, a
->vn
, a
->vm
, false);
2465 static bool trans_VSUB_dp(DisasContext
*s
, arg_VSUB_dp
*a
)
2467 return do_vfp_3op_dp(s
, gen_helper_vfp_subd
, a
->vd
, a
->vn
, a
->vm
, false);
2470 static bool trans_VDIV_hp(DisasContext
*s
, arg_VDIV_sp
*a
)
2472 return do_vfp_3op_hp(s
, gen_helper_vfp_divh
, a
->vd
, a
->vn
, a
->vm
, false);
2475 static bool trans_VDIV_sp(DisasContext
*s
, arg_VDIV_sp
*a
)
2477 return do_vfp_3op_sp(s
, gen_helper_vfp_divs
, a
->vd
, a
->vn
, a
->vm
, false);
2480 static bool trans_VDIV_dp(DisasContext
*s
, arg_VDIV_dp
*a
)
2482 return do_vfp_3op_dp(s
, gen_helper_vfp_divd
, a
->vd
, a
->vn
, a
->vm
, false);
2485 static bool trans_VMINNM_hp(DisasContext
*s
, arg_VMINNM_sp
*a
)
2487 if (!dc_isar_feature(aa32_vminmaxnm
, s
)) {
2490 return do_vfp_3op_hp(s
, gen_helper_vfp_minnumh
,
2491 a
->vd
, a
->vn
, a
->vm
, false);
2494 static bool trans_VMAXNM_hp(DisasContext
*s
, arg_VMAXNM_sp
*a
)
2496 if (!dc_isar_feature(aa32_vminmaxnm
, s
)) {
2499 return do_vfp_3op_hp(s
, gen_helper_vfp_maxnumh
,
2500 a
->vd
, a
->vn
, a
->vm
, false);
2503 static bool trans_VMINNM_sp(DisasContext
*s
, arg_VMINNM_sp
*a
)
2505 if (!dc_isar_feature(aa32_vminmaxnm
, s
)) {
2508 return do_vfp_3op_sp(s
, gen_helper_vfp_minnums
,
2509 a
->vd
, a
->vn
, a
->vm
, false);
2512 static bool trans_VMAXNM_sp(DisasContext
*s
, arg_VMAXNM_sp
*a
)
2514 if (!dc_isar_feature(aa32_vminmaxnm
, s
)) {
2517 return do_vfp_3op_sp(s
, gen_helper_vfp_maxnums
,
2518 a
->vd
, a
->vn
, a
->vm
, false);
2521 static bool trans_VMINNM_dp(DisasContext
*s
, arg_VMINNM_dp
*a
)
2523 if (!dc_isar_feature(aa32_vminmaxnm
, s
)) {
2526 return do_vfp_3op_dp(s
, gen_helper_vfp_minnumd
,
2527 a
->vd
, a
->vn
, a
->vm
, false);
2530 static bool trans_VMAXNM_dp(DisasContext
*s
, arg_VMAXNM_dp
*a
)
2532 if (!dc_isar_feature(aa32_vminmaxnm
, s
)) {
2535 return do_vfp_3op_dp(s
, gen_helper_vfp_maxnumd
,
2536 a
->vd
, a
->vn
, a
->vm
, false);
2539 static bool do_vfm_hp(DisasContext
*s
, arg_VFMA_sp
*a
, bool neg_n
, bool neg_d
)
2542 * VFNMA : fd = muladd(-fd, fn, fm)
2543 * VFNMS : fd = muladd(-fd, -fn, fm)
2544 * VFMA : fd = muladd( fd, fn, fm)
2545 * VFMS : fd = muladd( fd, -fn, fm)
2547 * These are fused multiply-add, and must be done as one floating
2548 * point operation with no rounding between the multiplication and
2549 * addition steps. NB that doing the negations here as separate
2550 * steps is correct : an input NaN should come out with its sign
2551 * bit flipped if it is a negated-input.
2554 TCGv_i32 vn
, vm
, vd
;
2557 * Present in VFPv4 only, and only with the FP16 extension.
2558 * Note that we can't rely on the SIMDFMAC check alone, because
2559 * in a Neon-no-VFP core that ID register field will be non-zero.
2561 if (!dc_isar_feature(aa32_fp16_arith
, s
) ||
2562 !dc_isar_feature(aa32_simdfmac
, s
) ||
2563 !dc_isar_feature(aa32_fpsp_v2
, s
)) {
2567 if (s
->vec_len
!= 0 || s
->vec_stride
!= 0) {
2571 if (!vfp_access_check(s
)) {
2575 vn
= tcg_temp_new_i32();
2576 vm
= tcg_temp_new_i32();
2577 vd
= tcg_temp_new_i32();
2579 vfp_load_reg32(vn
, a
->vn
);
2580 vfp_load_reg32(vm
, a
->vm
);
2583 gen_helper_vfp_negh(vn
, vn
);
2585 vfp_load_reg32(vd
, a
->vd
);
2588 gen_helper_vfp_negh(vd
, vd
);
2590 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
2591 gen_helper_vfp_muladdh(vd
, vn
, vm
, vd
, fpst
);
2592 vfp_store_reg32(vd
, a
->vd
);
2594 tcg_temp_free_ptr(fpst
);
2595 tcg_temp_free_i32(vn
);
2596 tcg_temp_free_i32(vm
);
2597 tcg_temp_free_i32(vd
);
2602 static bool do_vfm_sp(DisasContext
*s
, arg_VFMA_sp
*a
, bool neg_n
, bool neg_d
)
2605 * VFNMA : fd = muladd(-fd, fn, fm)
2606 * VFNMS : fd = muladd(-fd, -fn, fm)
2607 * VFMA : fd = muladd( fd, fn, fm)
2608 * VFMS : fd = muladd( fd, -fn, fm)
2610 * These are fused multiply-add, and must be done as one floating
2611 * point operation with no rounding between the multiplication and
2612 * addition steps. NB that doing the negations here as separate
2613 * steps is correct : an input NaN should come out with its sign
2614 * bit flipped if it is a negated-input.
2617 TCGv_i32 vn
, vm
, vd
;
2620 * Present in VFPv4 only.
2621 * Note that we can't rely on the SIMDFMAC check alone, because
2622 * in a Neon-no-VFP core that ID register field will be non-zero.
2624 if (!dc_isar_feature(aa32_simdfmac
, s
) ||
2625 !dc_isar_feature(aa32_fpsp_v2
, s
)) {
2629 * In v7A, UNPREDICTABLE with non-zero vector length/stride; from
2630 * v8A, must UNDEF. We choose to UNDEF for both v7A and v8A.
2632 if (s
->vec_len
!= 0 || s
->vec_stride
!= 0) {
2636 if (!vfp_access_check(s
)) {
2640 vn
= tcg_temp_new_i32();
2641 vm
= tcg_temp_new_i32();
2642 vd
= tcg_temp_new_i32();
2644 vfp_load_reg32(vn
, a
->vn
);
2645 vfp_load_reg32(vm
, a
->vm
);
2648 gen_helper_vfp_negs(vn
, vn
);
2650 vfp_load_reg32(vd
, a
->vd
);
2653 gen_helper_vfp_negs(vd
, vd
);
2655 fpst
= fpstatus_ptr(FPST_FPCR
);
2656 gen_helper_vfp_muladds(vd
, vn
, vm
, vd
, fpst
);
2657 vfp_store_reg32(vd
, a
->vd
);
2659 tcg_temp_free_ptr(fpst
);
2660 tcg_temp_free_i32(vn
);
2661 tcg_temp_free_i32(vm
);
2662 tcg_temp_free_i32(vd
);
2667 static bool do_vfm_dp(DisasContext
*s
, arg_VFMA_dp
*a
, bool neg_n
, bool neg_d
)
2670 * VFNMA : fd = muladd(-fd, fn, fm)
2671 * VFNMS : fd = muladd(-fd, -fn, fm)
2672 * VFMA : fd = muladd( fd, fn, fm)
2673 * VFMS : fd = muladd( fd, -fn, fm)
2675 * These are fused multiply-add, and must be done as one floating
2676 * point operation with no rounding between the multiplication and
2677 * addition steps. NB that doing the negations here as separate
2678 * steps is correct : an input NaN should come out with its sign
2679 * bit flipped if it is a negated-input.
2682 TCGv_i64 vn
, vm
, vd
;
2685 * Present in VFPv4 only.
2686 * Note that we can't rely on the SIMDFMAC check alone, because
2687 * in a Neon-no-VFP core that ID register field will be non-zero.
2689 if (!dc_isar_feature(aa32_simdfmac
, s
) ||
2690 !dc_isar_feature(aa32_fpdp_v2
, s
)) {
2694 * In v7A, UNPREDICTABLE with non-zero vector length/stride; from
2695 * v8A, must UNDEF. We choose to UNDEF for both v7A and v8A.
2697 if (s
->vec_len
!= 0 || s
->vec_stride
!= 0) {
2701 /* UNDEF accesses to D16-D31 if they don't exist. */
2702 if (!dc_isar_feature(aa32_simd_r32
, s
) &&
2703 ((a
->vd
| a
->vn
| a
->vm
) & 0x10)) {
2707 if (!vfp_access_check(s
)) {
2711 vn
= tcg_temp_new_i64();
2712 vm
= tcg_temp_new_i64();
2713 vd
= tcg_temp_new_i64();
2715 vfp_load_reg64(vn
, a
->vn
);
2716 vfp_load_reg64(vm
, a
->vm
);
2719 gen_helper_vfp_negd(vn
, vn
);
2721 vfp_load_reg64(vd
, a
->vd
);
2724 gen_helper_vfp_negd(vd
, vd
);
2726 fpst
= fpstatus_ptr(FPST_FPCR
);
2727 gen_helper_vfp_muladdd(vd
, vn
, vm
, vd
, fpst
);
2728 vfp_store_reg64(vd
, a
->vd
);
2730 tcg_temp_free_ptr(fpst
);
2731 tcg_temp_free_i64(vn
);
2732 tcg_temp_free_i64(vm
);
2733 tcg_temp_free_i64(vd
);
2738 #define MAKE_ONE_VFM_TRANS_FN(INSN, PREC, NEGN, NEGD) \
2739 static bool trans_##INSN##_##PREC(DisasContext *s, \
2740 arg_##INSN##_##PREC *a) \
2742 return do_vfm_##PREC(s, a, NEGN, NEGD); \
2745 #define MAKE_VFM_TRANS_FNS(PREC) \
2746 MAKE_ONE_VFM_TRANS_FN(VFMA, PREC, false, false) \
2747 MAKE_ONE_VFM_TRANS_FN(VFMS, PREC, true, false) \
2748 MAKE_ONE_VFM_TRANS_FN(VFNMA, PREC, false, true) \
2749 MAKE_ONE_VFM_TRANS_FN(VFNMS, PREC, true, true)
2751 MAKE_VFM_TRANS_FNS(hp
)
2752 MAKE_VFM_TRANS_FNS(sp
)
2753 MAKE_VFM_TRANS_FNS(dp
)
2755 static bool trans_VMOV_imm_hp(DisasContext
*s
, arg_VMOV_imm_sp
*a
)
2759 if (!dc_isar_feature(aa32_fp16_arith
, s
)) {
2763 if (s
->vec_len
!= 0 || s
->vec_stride
!= 0) {
2767 if (!vfp_access_check(s
)) {
2771 fd
= tcg_const_i32(vfp_expand_imm(MO_16
, a
->imm
));
2772 vfp_store_reg32(fd
, a
->vd
);
2773 tcg_temp_free_i32(fd
);
2777 static bool trans_VMOV_imm_sp(DisasContext
*s
, arg_VMOV_imm_sp
*a
)
2779 uint32_t delta_d
= 0;
2780 int veclen
= s
->vec_len
;
2786 if (!dc_isar_feature(aa32_fpsp_v3
, s
)) {
2790 if (!dc_isar_feature(aa32_fpshvec
, s
) &&
2791 (veclen
!= 0 || s
->vec_stride
!= 0)) {
2795 if (!vfp_access_check(s
)) {
2800 /* Figure out what type of vector operation this is. */
2801 if (vfp_sreg_is_scalar(vd
)) {
2805 delta_d
= s
->vec_stride
+ 1;
2809 fd
= tcg_const_i32(vfp_expand_imm(MO_32
, a
->imm
));
2812 vfp_store_reg32(fd
, vd
);
2818 /* Set up the operands for the next iteration */
2820 vd
= vfp_advance_sreg(vd
, delta_d
);
2823 tcg_temp_free_i32(fd
);
2827 static bool trans_VMOV_imm_dp(DisasContext
*s
, arg_VMOV_imm_dp
*a
)
2829 uint32_t delta_d
= 0;
2830 int veclen
= s
->vec_len
;
2836 if (!dc_isar_feature(aa32_fpdp_v3
, s
)) {
2840 /* UNDEF accesses to D16-D31 if they don't exist. */
2841 if (!dc_isar_feature(aa32_simd_r32
, s
) && (vd
& 0x10)) {
2845 if (!dc_isar_feature(aa32_fpshvec
, s
) &&
2846 (veclen
!= 0 || s
->vec_stride
!= 0)) {
2850 if (!vfp_access_check(s
)) {
2855 /* Figure out what type of vector operation this is. */
2856 if (vfp_dreg_is_scalar(vd
)) {
2860 delta_d
= (s
->vec_stride
>> 1) + 1;
2864 fd
= tcg_const_i64(vfp_expand_imm(MO_64
, a
->imm
));
2867 vfp_store_reg64(fd
, vd
);
2873 /* Set up the operands for the next iteration */
2875 vd
= vfp_advance_dreg(vd
, delta_d
);
2878 tcg_temp_free_i64(fd
);
2882 #define DO_VFP_2OP(INSN, PREC, FN, CHECK) \
2883 static bool trans_##INSN##_##PREC(DisasContext *s, \
2884 arg_##INSN##_##PREC *a) \
2886 if (!dc_isar_feature(CHECK, s)) { \
2889 return do_vfp_2op_##PREC(s, FN, a->vd, a->vm); \
2892 #define DO_VFP_VMOV(INSN, PREC, FN) \
2893 static bool trans_##INSN##_##PREC(DisasContext *s, \
2894 arg_##INSN##_##PREC *a) \
2896 if (!dc_isar_feature(aa32_fp##PREC##_v2, s) && \
2897 !dc_isar_feature(aa32_mve, s)) { \
2900 return do_vfp_2op_##PREC(s, FN, a->vd, a->vm); \
2903 DO_VFP_VMOV(VMOV_reg
, sp
, tcg_gen_mov_i32
)
2904 DO_VFP_VMOV(VMOV_reg
, dp
, tcg_gen_mov_i64
)
2906 DO_VFP_2OP(VABS
, hp
, gen_helper_vfp_absh
, aa32_fp16_arith
)
2907 DO_VFP_2OP(VABS
, sp
, gen_helper_vfp_abss
, aa32_fpsp_v2
)
2908 DO_VFP_2OP(VABS
, dp
, gen_helper_vfp_absd
, aa32_fpdp_v2
)
2910 DO_VFP_2OP(VNEG
, hp
, gen_helper_vfp_negh
, aa32_fp16_arith
)
2911 DO_VFP_2OP(VNEG
, sp
, gen_helper_vfp_negs
, aa32_fpsp_v2
)
2912 DO_VFP_2OP(VNEG
, dp
, gen_helper_vfp_negd
, aa32_fpdp_v2
)
2914 static void gen_VSQRT_hp(TCGv_i32 vd
, TCGv_i32 vm
)
2916 gen_helper_vfp_sqrth(vd
, vm
, cpu_env
);
2919 static void gen_VSQRT_sp(TCGv_i32 vd
, TCGv_i32 vm
)
2921 gen_helper_vfp_sqrts(vd
, vm
, cpu_env
);
2924 static void gen_VSQRT_dp(TCGv_i64 vd
, TCGv_i64 vm
)
2926 gen_helper_vfp_sqrtd(vd
, vm
, cpu_env
);
2929 DO_VFP_2OP(VSQRT
, hp
, gen_VSQRT_hp
, aa32_fp16_arith
)
2930 DO_VFP_2OP(VSQRT
, sp
, gen_VSQRT_sp
, aa32_fpsp_v2
)
2931 DO_VFP_2OP(VSQRT
, dp
, gen_VSQRT_dp
, aa32_fpdp_v2
)
2933 static bool trans_VCMP_hp(DisasContext
*s
, arg_VCMP_sp
*a
)
2937 if (!dc_isar_feature(aa32_fp16_arith
, s
)) {
2941 /* Vm/M bits must be zero for the Z variant */
2942 if (a
->z
&& a
->vm
!= 0) {
2946 if (!vfp_access_check(s
)) {
2950 vd
= tcg_temp_new_i32();
2951 vm
= tcg_temp_new_i32();
2953 vfp_load_reg32(vd
, a
->vd
);
2955 tcg_gen_movi_i32(vm
, 0);
2957 vfp_load_reg32(vm
, a
->vm
);
2961 gen_helper_vfp_cmpeh(vd
, vm
, cpu_env
);
2963 gen_helper_vfp_cmph(vd
, vm
, cpu_env
);
2966 tcg_temp_free_i32(vd
);
2967 tcg_temp_free_i32(vm
);
2972 static bool trans_VCMP_sp(DisasContext
*s
, arg_VCMP_sp
*a
)
2976 if (!dc_isar_feature(aa32_fpsp_v2
, s
)) {
2980 /* Vm/M bits must be zero for the Z variant */
2981 if (a
->z
&& a
->vm
!= 0) {
2985 if (!vfp_access_check(s
)) {
2989 vd
= tcg_temp_new_i32();
2990 vm
= tcg_temp_new_i32();
2992 vfp_load_reg32(vd
, a
->vd
);
2994 tcg_gen_movi_i32(vm
, 0);
2996 vfp_load_reg32(vm
, a
->vm
);
3000 gen_helper_vfp_cmpes(vd
, vm
, cpu_env
);
3002 gen_helper_vfp_cmps(vd
, vm
, cpu_env
);
3005 tcg_temp_free_i32(vd
);
3006 tcg_temp_free_i32(vm
);
3011 static bool trans_VCMP_dp(DisasContext
*s
, arg_VCMP_dp
*a
)
3015 if (!dc_isar_feature(aa32_fpdp_v2
, s
)) {
3019 /* Vm/M bits must be zero for the Z variant */
3020 if (a
->z
&& a
->vm
!= 0) {
3024 /* UNDEF accesses to D16-D31 if they don't exist. */
3025 if (!dc_isar_feature(aa32_simd_r32
, s
) && ((a
->vd
| a
->vm
) & 0x10)) {
3029 if (!vfp_access_check(s
)) {
3033 vd
= tcg_temp_new_i64();
3034 vm
= tcg_temp_new_i64();
3036 vfp_load_reg64(vd
, a
->vd
);
3038 tcg_gen_movi_i64(vm
, 0);
3040 vfp_load_reg64(vm
, a
->vm
);
3044 gen_helper_vfp_cmped(vd
, vm
, cpu_env
);
3046 gen_helper_vfp_cmpd(vd
, vm
, cpu_env
);
3049 tcg_temp_free_i64(vd
);
3050 tcg_temp_free_i64(vm
);
3055 static bool trans_VCVT_f32_f16(DisasContext
*s
, arg_VCVT_f32_f16
*a
)
3061 if (!dc_isar_feature(aa32_fp16_spconv
, s
)) {
3065 if (!vfp_access_check(s
)) {
3069 fpst
= fpstatus_ptr(FPST_FPCR
);
3070 ahp_mode
= get_ahp_flag();
3071 tmp
= tcg_temp_new_i32();
3072 /* The T bit tells us if we want the low or high 16 bits of Vm */
3073 tcg_gen_ld16u_i32(tmp
, cpu_env
, vfp_f16_offset(a
->vm
, a
->t
));
3074 gen_helper_vfp_fcvt_f16_to_f32(tmp
, tmp
, fpst
, ahp_mode
);
3075 vfp_store_reg32(tmp
, a
->vd
);
3076 tcg_temp_free_i32(ahp_mode
);
3077 tcg_temp_free_ptr(fpst
);
3078 tcg_temp_free_i32(tmp
);
3082 static bool trans_VCVT_f64_f16(DisasContext
*s
, arg_VCVT_f64_f16
*a
)
3089 if (!dc_isar_feature(aa32_fpdp_v2
, s
)) {
3093 if (!dc_isar_feature(aa32_fp16_dpconv
, s
)) {
3097 /* UNDEF accesses to D16-D31 if they don't exist. */
3098 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vd
& 0x10)) {
3102 if (!vfp_access_check(s
)) {
3106 fpst
= fpstatus_ptr(FPST_FPCR
);
3107 ahp_mode
= get_ahp_flag();
3108 tmp
= tcg_temp_new_i32();
3109 /* The T bit tells us if we want the low or high 16 bits of Vm */
3110 tcg_gen_ld16u_i32(tmp
, cpu_env
, vfp_f16_offset(a
->vm
, a
->t
));
3111 vd
= tcg_temp_new_i64();
3112 gen_helper_vfp_fcvt_f16_to_f64(vd
, tmp
, fpst
, ahp_mode
);
3113 vfp_store_reg64(vd
, a
->vd
);
3114 tcg_temp_free_i32(ahp_mode
);
3115 tcg_temp_free_ptr(fpst
);
3116 tcg_temp_free_i32(tmp
);
3117 tcg_temp_free_i64(vd
);
3121 static bool trans_VCVT_b16_f32(DisasContext
*s
, arg_VCVT_b16_f32
*a
)
3126 if (!dc_isar_feature(aa32_bf16
, s
)) {
3130 if (!vfp_access_check(s
)) {
3134 fpst
= fpstatus_ptr(FPST_FPCR
);
3135 tmp
= tcg_temp_new_i32();
3137 vfp_load_reg32(tmp
, a
->vm
);
3138 gen_helper_bfcvt(tmp
, tmp
, fpst
);
3139 tcg_gen_st16_i32(tmp
, cpu_env
, vfp_f16_offset(a
->vd
, a
->t
));
3140 tcg_temp_free_ptr(fpst
);
3141 tcg_temp_free_i32(tmp
);
3145 static bool trans_VCVT_f16_f32(DisasContext
*s
, arg_VCVT_f16_f32
*a
)
3151 if (!dc_isar_feature(aa32_fp16_spconv
, s
)) {
3155 if (!vfp_access_check(s
)) {
3159 fpst
= fpstatus_ptr(FPST_FPCR
);
3160 ahp_mode
= get_ahp_flag();
3161 tmp
= tcg_temp_new_i32();
3163 vfp_load_reg32(tmp
, a
->vm
);
3164 gen_helper_vfp_fcvt_f32_to_f16(tmp
, tmp
, fpst
, ahp_mode
);
3165 tcg_gen_st16_i32(tmp
, cpu_env
, vfp_f16_offset(a
->vd
, a
->t
));
3166 tcg_temp_free_i32(ahp_mode
);
3167 tcg_temp_free_ptr(fpst
);
3168 tcg_temp_free_i32(tmp
);
3172 static bool trans_VCVT_f16_f64(DisasContext
*s
, arg_VCVT_f16_f64
*a
)
3179 if (!dc_isar_feature(aa32_fpdp_v2
, s
)) {
3183 if (!dc_isar_feature(aa32_fp16_dpconv
, s
)) {
3187 /* UNDEF accesses to D16-D31 if they don't exist. */
3188 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vm
& 0x10)) {
3192 if (!vfp_access_check(s
)) {
3196 fpst
= fpstatus_ptr(FPST_FPCR
);
3197 ahp_mode
= get_ahp_flag();
3198 tmp
= tcg_temp_new_i32();
3199 vm
= tcg_temp_new_i64();
3201 vfp_load_reg64(vm
, a
->vm
);
3202 gen_helper_vfp_fcvt_f64_to_f16(tmp
, vm
, fpst
, ahp_mode
);
3203 tcg_temp_free_i64(vm
);
3204 tcg_gen_st16_i32(tmp
, cpu_env
, vfp_f16_offset(a
->vd
, a
->t
));
3205 tcg_temp_free_i32(ahp_mode
);
3206 tcg_temp_free_ptr(fpst
);
3207 tcg_temp_free_i32(tmp
);
3211 static bool trans_VRINTR_hp(DisasContext
*s
, arg_VRINTR_sp
*a
)
3216 if (!dc_isar_feature(aa32_fp16_arith
, s
)) {
3220 if (!vfp_access_check(s
)) {
3224 tmp
= tcg_temp_new_i32();
3225 vfp_load_reg32(tmp
, a
->vm
);
3226 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
3227 gen_helper_rinth(tmp
, tmp
, fpst
);
3228 vfp_store_reg32(tmp
, a
->vd
);
3229 tcg_temp_free_ptr(fpst
);
3230 tcg_temp_free_i32(tmp
);
3234 static bool trans_VRINTR_sp(DisasContext
*s
, arg_VRINTR_sp
*a
)
3239 if (!dc_isar_feature(aa32_vrint
, s
)) {
3243 if (!vfp_access_check(s
)) {
3247 tmp
= tcg_temp_new_i32();
3248 vfp_load_reg32(tmp
, a
->vm
);
3249 fpst
= fpstatus_ptr(FPST_FPCR
);
3250 gen_helper_rints(tmp
, tmp
, fpst
);
3251 vfp_store_reg32(tmp
, a
->vd
);
3252 tcg_temp_free_ptr(fpst
);
3253 tcg_temp_free_i32(tmp
);
3257 static bool trans_VRINTR_dp(DisasContext
*s
, arg_VRINTR_dp
*a
)
3262 if (!dc_isar_feature(aa32_fpdp_v2
, s
)) {
3266 if (!dc_isar_feature(aa32_vrint
, s
)) {
3270 /* UNDEF accesses to D16-D31 if they don't exist. */
3271 if (!dc_isar_feature(aa32_simd_r32
, s
) && ((a
->vd
| a
->vm
) & 0x10)) {
3275 if (!vfp_access_check(s
)) {
3279 tmp
= tcg_temp_new_i64();
3280 vfp_load_reg64(tmp
, a
->vm
);
3281 fpst
= fpstatus_ptr(FPST_FPCR
);
3282 gen_helper_rintd(tmp
, tmp
, fpst
);
3283 vfp_store_reg64(tmp
, a
->vd
);
3284 tcg_temp_free_ptr(fpst
);
3285 tcg_temp_free_i64(tmp
);
3289 static bool trans_VRINTZ_hp(DisasContext
*s
, arg_VRINTZ_sp
*a
)
3295 if (!dc_isar_feature(aa32_fp16_arith
, s
)) {
3299 if (!vfp_access_check(s
)) {
3303 tmp
= tcg_temp_new_i32();
3304 vfp_load_reg32(tmp
, a
->vm
);
3305 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
3306 tcg_rmode
= tcg_const_i32(float_round_to_zero
);
3307 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
3308 gen_helper_rinth(tmp
, tmp
, fpst
);
3309 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
3310 vfp_store_reg32(tmp
, a
->vd
);
3311 tcg_temp_free_ptr(fpst
);
3312 tcg_temp_free_i32(tcg_rmode
);
3313 tcg_temp_free_i32(tmp
);
3317 static bool trans_VRINTZ_sp(DisasContext
*s
, arg_VRINTZ_sp
*a
)
3323 if (!dc_isar_feature(aa32_vrint
, s
)) {
3327 if (!vfp_access_check(s
)) {
3331 tmp
= tcg_temp_new_i32();
3332 vfp_load_reg32(tmp
, a
->vm
);
3333 fpst
= fpstatus_ptr(FPST_FPCR
);
3334 tcg_rmode
= tcg_const_i32(float_round_to_zero
);
3335 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
3336 gen_helper_rints(tmp
, tmp
, fpst
);
3337 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
3338 vfp_store_reg32(tmp
, a
->vd
);
3339 tcg_temp_free_ptr(fpst
);
3340 tcg_temp_free_i32(tcg_rmode
);
3341 tcg_temp_free_i32(tmp
);
3345 static bool trans_VRINTZ_dp(DisasContext
*s
, arg_VRINTZ_dp
*a
)
3351 if (!dc_isar_feature(aa32_fpdp_v2
, s
)) {
3355 if (!dc_isar_feature(aa32_vrint
, s
)) {
3359 /* UNDEF accesses to D16-D31 if they don't exist. */
3360 if (!dc_isar_feature(aa32_simd_r32
, s
) && ((a
->vd
| a
->vm
) & 0x10)) {
3364 if (!vfp_access_check(s
)) {
3368 tmp
= tcg_temp_new_i64();
3369 vfp_load_reg64(tmp
, a
->vm
);
3370 fpst
= fpstatus_ptr(FPST_FPCR
);
3371 tcg_rmode
= tcg_const_i32(float_round_to_zero
);
3372 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
3373 gen_helper_rintd(tmp
, tmp
, fpst
);
3374 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
3375 vfp_store_reg64(tmp
, a
->vd
);
3376 tcg_temp_free_ptr(fpst
);
3377 tcg_temp_free_i64(tmp
);
3378 tcg_temp_free_i32(tcg_rmode
);
3382 static bool trans_VRINTX_hp(DisasContext
*s
, arg_VRINTX_sp
*a
)
3387 if (!dc_isar_feature(aa32_fp16_arith
, s
)) {
3391 if (!vfp_access_check(s
)) {
3395 tmp
= tcg_temp_new_i32();
3396 vfp_load_reg32(tmp
, a
->vm
);
3397 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
3398 gen_helper_rinth_exact(tmp
, tmp
, fpst
);
3399 vfp_store_reg32(tmp
, a
->vd
);
3400 tcg_temp_free_ptr(fpst
);
3401 tcg_temp_free_i32(tmp
);
3405 static bool trans_VRINTX_sp(DisasContext
*s
, arg_VRINTX_sp
*a
)
3410 if (!dc_isar_feature(aa32_vrint
, s
)) {
3414 if (!vfp_access_check(s
)) {
3418 tmp
= tcg_temp_new_i32();
3419 vfp_load_reg32(tmp
, a
->vm
);
3420 fpst
= fpstatus_ptr(FPST_FPCR
);
3421 gen_helper_rints_exact(tmp
, tmp
, fpst
);
3422 vfp_store_reg32(tmp
, a
->vd
);
3423 tcg_temp_free_ptr(fpst
);
3424 tcg_temp_free_i32(tmp
);
3428 static bool trans_VRINTX_dp(DisasContext
*s
, arg_VRINTX_dp
*a
)
3433 if (!dc_isar_feature(aa32_fpdp_v2
, s
)) {
3437 if (!dc_isar_feature(aa32_vrint
, s
)) {
3441 /* UNDEF accesses to D16-D31 if they don't exist. */
3442 if (!dc_isar_feature(aa32_simd_r32
, s
) && ((a
->vd
| a
->vm
) & 0x10)) {
3446 if (!vfp_access_check(s
)) {
3450 tmp
= tcg_temp_new_i64();
3451 vfp_load_reg64(tmp
, a
->vm
);
3452 fpst
= fpstatus_ptr(FPST_FPCR
);
3453 gen_helper_rintd_exact(tmp
, tmp
, fpst
);
3454 vfp_store_reg64(tmp
, a
->vd
);
3455 tcg_temp_free_ptr(fpst
);
3456 tcg_temp_free_i64(tmp
);
3460 static bool trans_VCVT_sp(DisasContext
*s
, arg_VCVT_sp
*a
)
3465 if (!dc_isar_feature(aa32_fpdp_v2
, s
)) {
3469 /* UNDEF accesses to D16-D31 if they don't exist. */
3470 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vd
& 0x10)) {
3474 if (!vfp_access_check(s
)) {
3478 vm
= tcg_temp_new_i32();
3479 vd
= tcg_temp_new_i64();
3480 vfp_load_reg32(vm
, a
->vm
);
3481 gen_helper_vfp_fcvtds(vd
, vm
, cpu_env
);
3482 vfp_store_reg64(vd
, a
->vd
);
3483 tcg_temp_free_i32(vm
);
3484 tcg_temp_free_i64(vd
);
3488 static bool trans_VCVT_dp(DisasContext
*s
, arg_VCVT_dp
*a
)
3493 if (!dc_isar_feature(aa32_fpdp_v2
, s
)) {
3497 /* UNDEF accesses to D16-D31 if they don't exist. */
3498 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vm
& 0x10)) {
3502 if (!vfp_access_check(s
)) {
3506 vd
= tcg_temp_new_i32();
3507 vm
= tcg_temp_new_i64();
3508 vfp_load_reg64(vm
, a
->vm
);
3509 gen_helper_vfp_fcvtsd(vd
, vm
, cpu_env
);
3510 vfp_store_reg32(vd
, a
->vd
);
3511 tcg_temp_free_i32(vd
);
3512 tcg_temp_free_i64(vm
);
3516 static bool trans_VCVT_int_hp(DisasContext
*s
, arg_VCVT_int_sp
*a
)
3521 if (!dc_isar_feature(aa32_fp16_arith
, s
)) {
3525 if (!vfp_access_check(s
)) {
3529 vm
= tcg_temp_new_i32();
3530 vfp_load_reg32(vm
, a
->vm
);
3531 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
3534 gen_helper_vfp_sitoh(vm
, vm
, fpst
);
3537 gen_helper_vfp_uitoh(vm
, vm
, fpst
);
3539 vfp_store_reg32(vm
, a
->vd
);
3540 tcg_temp_free_i32(vm
);
3541 tcg_temp_free_ptr(fpst
);
3545 static bool trans_VCVT_int_sp(DisasContext
*s
, arg_VCVT_int_sp
*a
)
3550 if (!dc_isar_feature(aa32_fpsp_v2
, s
)) {
3554 if (!vfp_access_check(s
)) {
3558 vm
= tcg_temp_new_i32();
3559 vfp_load_reg32(vm
, a
->vm
);
3560 fpst
= fpstatus_ptr(FPST_FPCR
);
3563 gen_helper_vfp_sitos(vm
, vm
, fpst
);
3566 gen_helper_vfp_uitos(vm
, vm
, fpst
);
3568 vfp_store_reg32(vm
, a
->vd
);
3569 tcg_temp_free_i32(vm
);
3570 tcg_temp_free_ptr(fpst
);
3574 static bool trans_VCVT_int_dp(DisasContext
*s
, arg_VCVT_int_dp
*a
)
3580 if (!dc_isar_feature(aa32_fpdp_v2
, s
)) {
3584 /* UNDEF accesses to D16-D31 if they don't exist. */
3585 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vd
& 0x10)) {
3589 if (!vfp_access_check(s
)) {
3593 vm
= tcg_temp_new_i32();
3594 vd
= tcg_temp_new_i64();
3595 vfp_load_reg32(vm
, a
->vm
);
3596 fpst
= fpstatus_ptr(FPST_FPCR
);
3599 gen_helper_vfp_sitod(vd
, vm
, fpst
);
3602 gen_helper_vfp_uitod(vd
, vm
, fpst
);
3604 vfp_store_reg64(vd
, a
->vd
);
3605 tcg_temp_free_i32(vm
);
3606 tcg_temp_free_i64(vd
);
3607 tcg_temp_free_ptr(fpst
);
3611 static bool trans_VJCVT(DisasContext
*s
, arg_VJCVT
*a
)
3616 if (!dc_isar_feature(aa32_fpdp_v2
, s
)) {
3620 if (!dc_isar_feature(aa32_jscvt
, s
)) {
3624 /* UNDEF accesses to D16-D31 if they don't exist. */
3625 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vm
& 0x10)) {
3629 if (!vfp_access_check(s
)) {
3633 vm
= tcg_temp_new_i64();
3634 vd
= tcg_temp_new_i32();
3635 vfp_load_reg64(vm
, a
->vm
);
3636 gen_helper_vjcvt(vd
, vm
, cpu_env
);
3637 vfp_store_reg32(vd
, a
->vd
);
3638 tcg_temp_free_i64(vm
);
3639 tcg_temp_free_i32(vd
);
3643 static bool trans_VCVT_fix_hp(DisasContext
*s
, arg_VCVT_fix_sp
*a
)
3649 if (!dc_isar_feature(aa32_fp16_arith
, s
)) {
3653 if (!vfp_access_check(s
)) {
3657 frac_bits
= (a
->opc
& 1) ? (32 - a
->imm
) : (16 - a
->imm
);
3659 vd
= tcg_temp_new_i32();
3660 vfp_load_reg32(vd
, a
->vd
);
3662 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
3663 shift
= tcg_const_i32(frac_bits
);
3665 /* Switch on op:U:sx bits */
3668 gen_helper_vfp_shtoh_round_to_nearest(vd
, vd
, shift
, fpst
);
3671 gen_helper_vfp_sltoh_round_to_nearest(vd
, vd
, shift
, fpst
);
3674 gen_helper_vfp_uhtoh_round_to_nearest(vd
, vd
, shift
, fpst
);
3677 gen_helper_vfp_ultoh_round_to_nearest(vd
, vd
, shift
, fpst
);
3680 gen_helper_vfp_toshh_round_to_zero(vd
, vd
, shift
, fpst
);
3683 gen_helper_vfp_toslh_round_to_zero(vd
, vd
, shift
, fpst
);
3686 gen_helper_vfp_touhh_round_to_zero(vd
, vd
, shift
, fpst
);
3689 gen_helper_vfp_toulh_round_to_zero(vd
, vd
, shift
, fpst
);
3692 g_assert_not_reached();
3695 vfp_store_reg32(vd
, a
->vd
);
3696 tcg_temp_free_i32(vd
);
3697 tcg_temp_free_i32(shift
);
3698 tcg_temp_free_ptr(fpst
);
3702 static bool trans_VCVT_fix_sp(DisasContext
*s
, arg_VCVT_fix_sp
*a
)
3708 if (!dc_isar_feature(aa32_fpsp_v3
, s
)) {
3712 if (!vfp_access_check(s
)) {
3716 frac_bits
= (a
->opc
& 1) ? (32 - a
->imm
) : (16 - a
->imm
);
3718 vd
= tcg_temp_new_i32();
3719 vfp_load_reg32(vd
, a
->vd
);
3721 fpst
= fpstatus_ptr(FPST_FPCR
);
3722 shift
= tcg_const_i32(frac_bits
);
3724 /* Switch on op:U:sx bits */
3727 gen_helper_vfp_shtos_round_to_nearest(vd
, vd
, shift
, fpst
);
3730 gen_helper_vfp_sltos_round_to_nearest(vd
, vd
, shift
, fpst
);
3733 gen_helper_vfp_uhtos_round_to_nearest(vd
, vd
, shift
, fpst
);
3736 gen_helper_vfp_ultos_round_to_nearest(vd
, vd
, shift
, fpst
);
3739 gen_helper_vfp_toshs_round_to_zero(vd
, vd
, shift
, fpst
);
3742 gen_helper_vfp_tosls_round_to_zero(vd
, vd
, shift
, fpst
);
3745 gen_helper_vfp_touhs_round_to_zero(vd
, vd
, shift
, fpst
);
3748 gen_helper_vfp_touls_round_to_zero(vd
, vd
, shift
, fpst
);
3751 g_assert_not_reached();
3754 vfp_store_reg32(vd
, a
->vd
);
3755 tcg_temp_free_i32(vd
);
3756 tcg_temp_free_i32(shift
);
3757 tcg_temp_free_ptr(fpst
);
3761 static bool trans_VCVT_fix_dp(DisasContext
*s
, arg_VCVT_fix_dp
*a
)
3768 if (!dc_isar_feature(aa32_fpdp_v3
, s
)) {
3772 /* UNDEF accesses to D16-D31 if they don't exist. */
3773 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vd
& 0x10)) {
3777 if (!vfp_access_check(s
)) {
3781 frac_bits
= (a
->opc
& 1) ? (32 - a
->imm
) : (16 - a
->imm
);
3783 vd
= tcg_temp_new_i64();
3784 vfp_load_reg64(vd
, a
->vd
);
3786 fpst
= fpstatus_ptr(FPST_FPCR
);
3787 shift
= tcg_const_i32(frac_bits
);
3789 /* Switch on op:U:sx bits */
3792 gen_helper_vfp_shtod_round_to_nearest(vd
, vd
, shift
, fpst
);
3795 gen_helper_vfp_sltod_round_to_nearest(vd
, vd
, shift
, fpst
);
3798 gen_helper_vfp_uhtod_round_to_nearest(vd
, vd
, shift
, fpst
);
3801 gen_helper_vfp_ultod_round_to_nearest(vd
, vd
, shift
, fpst
);
3804 gen_helper_vfp_toshd_round_to_zero(vd
, vd
, shift
, fpst
);
3807 gen_helper_vfp_tosld_round_to_zero(vd
, vd
, shift
, fpst
);
3810 gen_helper_vfp_touhd_round_to_zero(vd
, vd
, shift
, fpst
);
3813 gen_helper_vfp_tould_round_to_zero(vd
, vd
, shift
, fpst
);
3816 g_assert_not_reached();
3819 vfp_store_reg64(vd
, a
->vd
);
3820 tcg_temp_free_i64(vd
);
3821 tcg_temp_free_i32(shift
);
3822 tcg_temp_free_ptr(fpst
);
3826 static bool trans_VCVT_hp_int(DisasContext
*s
, arg_VCVT_sp_int
*a
)
3831 if (!dc_isar_feature(aa32_fp16_arith
, s
)) {
3835 if (!vfp_access_check(s
)) {
3839 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
3840 vm
= tcg_temp_new_i32();
3841 vfp_load_reg32(vm
, a
->vm
);
3845 gen_helper_vfp_tosizh(vm
, vm
, fpst
);
3847 gen_helper_vfp_tosih(vm
, vm
, fpst
);
3851 gen_helper_vfp_touizh(vm
, vm
, fpst
);
3853 gen_helper_vfp_touih(vm
, vm
, fpst
);
3856 vfp_store_reg32(vm
, a
->vd
);
3857 tcg_temp_free_i32(vm
);
3858 tcg_temp_free_ptr(fpst
);
3862 static bool trans_VCVT_sp_int(DisasContext
*s
, arg_VCVT_sp_int
*a
)
3867 if (!dc_isar_feature(aa32_fpsp_v2
, s
)) {
3871 if (!vfp_access_check(s
)) {
3875 fpst
= fpstatus_ptr(FPST_FPCR
);
3876 vm
= tcg_temp_new_i32();
3877 vfp_load_reg32(vm
, a
->vm
);
3881 gen_helper_vfp_tosizs(vm
, vm
, fpst
);
3883 gen_helper_vfp_tosis(vm
, vm
, fpst
);
3887 gen_helper_vfp_touizs(vm
, vm
, fpst
);
3889 gen_helper_vfp_touis(vm
, vm
, fpst
);
3892 vfp_store_reg32(vm
, a
->vd
);
3893 tcg_temp_free_i32(vm
);
3894 tcg_temp_free_ptr(fpst
);
3898 static bool trans_VCVT_dp_int(DisasContext
*s
, arg_VCVT_dp_int
*a
)
3904 if (!dc_isar_feature(aa32_fpdp_v2
, s
)) {
3908 /* UNDEF accesses to D16-D31 if they don't exist. */
3909 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vm
& 0x10)) {
3913 if (!vfp_access_check(s
)) {
3917 fpst
= fpstatus_ptr(FPST_FPCR
);
3918 vm
= tcg_temp_new_i64();
3919 vd
= tcg_temp_new_i32();
3920 vfp_load_reg64(vm
, a
->vm
);
3924 gen_helper_vfp_tosizd(vd
, vm
, fpst
);
3926 gen_helper_vfp_tosid(vd
, vm
, fpst
);
3930 gen_helper_vfp_touizd(vd
, vm
, fpst
);
3932 gen_helper_vfp_touid(vd
, vm
, fpst
);
3935 vfp_store_reg32(vd
, a
->vd
);
3936 tcg_temp_free_i32(vd
);
3937 tcg_temp_free_i64(vm
);
3938 tcg_temp_free_ptr(fpst
);
3942 static bool trans_VINS(DisasContext
*s
, arg_VINS
*a
)
3946 if (!dc_isar_feature(aa32_fp16_arith
, s
)) {
3950 if (s
->vec_len
!= 0 || s
->vec_stride
!= 0) {
3954 if (!vfp_access_check(s
)) {
3958 /* Insert low half of Vm into high half of Vd */
3959 rm
= tcg_temp_new_i32();
3960 rd
= tcg_temp_new_i32();
3961 vfp_load_reg32(rm
, a
->vm
);
3962 vfp_load_reg32(rd
, a
->vd
);
3963 tcg_gen_deposit_i32(rd
, rd
, rm
, 16, 16);
3964 vfp_store_reg32(rd
, a
->vd
);
3965 tcg_temp_free_i32(rm
);
3966 tcg_temp_free_i32(rd
);
3970 static bool trans_VMOVX(DisasContext
*s
, arg_VINS
*a
)
3974 if (!dc_isar_feature(aa32_fp16_arith
, s
)) {
3978 if (s
->vec_len
!= 0 || s
->vec_stride
!= 0) {
3982 if (!vfp_access_check(s
)) {
3986 /* Set Vd to high half of Vm */
3987 rm
= tcg_temp_new_i32();
3988 vfp_load_reg32(rm
, a
->vm
);
3989 tcg_gen_shri_i32(rm
, rm
, 16);
3990 vfp_store_reg32(rm
, a
->vd
);
3991 tcg_temp_free_i32(rm
);