2 * ARM translation: AArch32 VFP instructions
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
7 * Copyright (c) 2019 Linaro, Ltd.
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include "qemu/osdep.h"
24 #include "tcg/tcg-op.h"
25 #include "tcg/tcg-op-gvec.h"
26 #include "exec/exec-all.h"
27 #include "exec/gen-icount.h"
28 #include "translate.h"
29 #include "translate-a32.h"
31 /* Include the generated VFP decoder */
32 #include "decode-vfp.c.inc"
33 #include "decode-vfp-uncond.c.inc"
35 static inline void vfp_load_reg64(TCGv_i64 var
, int reg
)
37 tcg_gen_ld_i64(var
, cpu_env
, vfp_reg_offset(true, reg
));
40 static inline void vfp_store_reg64(TCGv_i64 var
, int reg
)
42 tcg_gen_st_i64(var
, cpu_env
, vfp_reg_offset(true, reg
));
45 static inline void vfp_load_reg32(TCGv_i32 var
, int reg
)
47 tcg_gen_ld_i32(var
, cpu_env
, vfp_reg_offset(false, reg
));
50 static inline void vfp_store_reg32(TCGv_i32 var
, int reg
)
52 tcg_gen_st_i32(var
, cpu_env
, vfp_reg_offset(false, reg
));
56 * The imm8 encodes the sign bit, enough bits to represent an exponent in
57 * the range 01....1xx to 10....0xx, and the most significant 4 bits of
58 * the mantissa; see VFPExpandImm() in the v8 ARM ARM.
60 uint64_t vfp_expand_imm(int size
, uint8_t imm8
)
66 imm
= (extract32(imm8
, 7, 1) ? 0x8000 : 0) |
67 (extract32(imm8
, 6, 1) ? 0x3fc0 : 0x4000) |
68 extract32(imm8
, 0, 6);
72 imm
= (extract32(imm8
, 7, 1) ? 0x8000 : 0) |
73 (extract32(imm8
, 6, 1) ? 0x3e00 : 0x4000) |
74 (extract32(imm8
, 0, 6) << 3);
78 imm
= (extract32(imm8
, 7, 1) ? 0x8000 : 0) |
79 (extract32(imm8
, 6, 1) ? 0x3000 : 0x4000) |
80 (extract32(imm8
, 0, 6) << 6);
83 g_assert_not_reached();
89 * Return the offset of a 16-bit half of the specified VFP single-precision
90 * register. If top is true, returns the top 16 bits; otherwise the bottom
93 static inline long vfp_f16_offset(unsigned reg
, bool top
)
95 long offs
= vfp_reg_offset(false, reg
);
96 #ifdef HOST_WORDS_BIGENDIAN
109 * Generate code for M-profile lazy FP state preservation if needed;
110 * this corresponds to the pseudocode PreserveFPState() function.
112 static void gen_preserve_fp_state(DisasContext
*s
)
116 * Lazy state saving affects external memory and also the NVIC,
117 * so we must mark it as an IO operation for icount (and cause
118 * this to be the last insn in the TB).
120 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
121 s
->base
.is_jmp
= DISAS_UPDATE_EXIT
;
124 gen_helper_v7m_preserve_fp_state(cpu_env
);
126 * If the preserve_fp_state helper doesn't throw an exception
127 * then it will clear LSPACT; we don't need to repeat this for
128 * any further FP insns in this TB.
130 s
->v7m_lspact
= false;
135 * Check that VFP access is enabled. If it is, do the necessary
136 * M-profile lazy-FP handling and then return true.
137 * If not, emit code to generate an appropriate exception and
139 * The ignore_vfp_enabled argument specifies that we should ignore
140 * whether VFP is enabled via FPEXC[EN]: this should be true for FMXR/FMRX
141 * accesses to FPSID, FPEXC, MVFR0, MVFR1, MVFR2, and false for all other insns.
143 static bool full_vfp_access_check(DisasContext
*s
, bool ignore_vfp_enabled
)
146 /* M-profile handled this earlier, in disas_m_nocp() */
147 assert (!arm_dc_feature(s
, ARM_FEATURE_M
));
148 gen_exception_insn(s
, s
->pc_curr
, EXCP_UDEF
,
149 syn_fp_access_trap(1, 0xe, false),
154 if (!s
->vfp_enabled
&& !ignore_vfp_enabled
) {
155 assert(!arm_dc_feature(s
, ARM_FEATURE_M
));
156 unallocated_encoding(s
);
160 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
161 /* Handle M-profile lazy FP state mechanics */
163 /* Trigger lazy-state preservation if necessary */
164 gen_preserve_fp_state(s
);
166 /* Update ownership of FP context: set FPCCR.S to match current state */
167 if (s
->v8m_fpccr_s_wrong
) {
170 tmp
= load_cpu_field(v7m
.fpccr
[M_REG_S
]);
172 tcg_gen_ori_i32(tmp
, tmp
, R_V7M_FPCCR_S_MASK
);
174 tcg_gen_andi_i32(tmp
, tmp
, ~R_V7M_FPCCR_S_MASK
);
176 store_cpu_field(tmp
, v7m
.fpccr
[M_REG_S
]);
177 /* Don't need to do this for any further FP insns in this TB */
178 s
->v8m_fpccr_s_wrong
= false;
181 if (s
->v7m_new_fp_ctxt_needed
) {
183 * Create new FP context by updating CONTROL.FPCA, CONTROL.SFPA
186 TCGv_i32 control
, fpscr
;
187 uint32_t bits
= R_V7M_CONTROL_FPCA_MASK
;
189 fpscr
= load_cpu_field(v7m
.fpdscr
[s
->v8m_secure
]);
190 gen_helper_vfp_set_fpscr(cpu_env
, fpscr
);
191 tcg_temp_free_i32(fpscr
);
193 * We don't need to arrange to end the TB, because the only
194 * parts of FPSCR which we cache in the TB flags are the VECLEN
195 * and VECSTRIDE, and those don't exist for M-profile.
199 bits
|= R_V7M_CONTROL_SFPA_MASK
;
201 control
= load_cpu_field(v7m
.control
[M_REG_S
]);
202 tcg_gen_ori_i32(control
, control
, bits
);
203 store_cpu_field(control
, v7m
.control
[M_REG_S
]);
204 /* Don't need to do this for any further FP insns in this TB */
205 s
->v7m_new_fp_ctxt_needed
= false;
213 * The most usual kind of VFP access check, for everything except
214 * FMXR/FMRX to the always-available special registers.
216 bool vfp_access_check(DisasContext
*s
)
218 return full_vfp_access_check(s
, false);
221 static bool trans_VSEL(DisasContext
*s
, arg_VSEL
*a
)
226 if (!dc_isar_feature(aa32_vsel
, s
)) {
230 if (sz
== 3 && !dc_isar_feature(aa32_fpdp_v2
, s
)) {
234 if (sz
== 1 && !dc_isar_feature(aa32_fp16_arith
, s
)) {
238 /* UNDEF accesses to D16-D31 if they don't exist */
239 if (sz
== 3 && !dc_isar_feature(aa32_simd_r32
, s
) &&
240 ((a
->vm
| a
->vn
| a
->vd
) & 0x10)) {
248 if (!vfp_access_check(s
)) {
253 TCGv_i64 frn
, frm
, dest
;
254 TCGv_i64 tmp
, zero
, zf
, nf
, vf
;
256 zero
= tcg_const_i64(0);
258 frn
= tcg_temp_new_i64();
259 frm
= tcg_temp_new_i64();
260 dest
= tcg_temp_new_i64();
262 zf
= tcg_temp_new_i64();
263 nf
= tcg_temp_new_i64();
264 vf
= tcg_temp_new_i64();
266 tcg_gen_extu_i32_i64(zf
, cpu_ZF
);
267 tcg_gen_ext_i32_i64(nf
, cpu_NF
);
268 tcg_gen_ext_i32_i64(vf
, cpu_VF
);
270 vfp_load_reg64(frn
, rn
);
271 vfp_load_reg64(frm
, rm
);
274 tcg_gen_movcond_i64(TCG_COND_EQ
, dest
, zf
, zero
,
278 tcg_gen_movcond_i64(TCG_COND_LT
, dest
, vf
, zero
,
281 case 2: /* ge: N == V -> N ^ V == 0 */
282 tmp
= tcg_temp_new_i64();
283 tcg_gen_xor_i64(tmp
, vf
, nf
);
284 tcg_gen_movcond_i64(TCG_COND_GE
, dest
, tmp
, zero
,
286 tcg_temp_free_i64(tmp
);
288 case 3: /* gt: !Z && N == V */
289 tcg_gen_movcond_i64(TCG_COND_NE
, dest
, zf
, zero
,
291 tmp
= tcg_temp_new_i64();
292 tcg_gen_xor_i64(tmp
, vf
, nf
);
293 tcg_gen_movcond_i64(TCG_COND_GE
, dest
, tmp
, zero
,
295 tcg_temp_free_i64(tmp
);
298 vfp_store_reg64(dest
, rd
);
299 tcg_temp_free_i64(frn
);
300 tcg_temp_free_i64(frm
);
301 tcg_temp_free_i64(dest
);
303 tcg_temp_free_i64(zf
);
304 tcg_temp_free_i64(nf
);
305 tcg_temp_free_i64(vf
);
307 tcg_temp_free_i64(zero
);
309 TCGv_i32 frn
, frm
, dest
;
312 zero
= tcg_const_i32(0);
314 frn
= tcg_temp_new_i32();
315 frm
= tcg_temp_new_i32();
316 dest
= tcg_temp_new_i32();
317 vfp_load_reg32(frn
, rn
);
318 vfp_load_reg32(frm
, rm
);
321 tcg_gen_movcond_i32(TCG_COND_EQ
, dest
, cpu_ZF
, zero
,
325 tcg_gen_movcond_i32(TCG_COND_LT
, dest
, cpu_VF
, zero
,
328 case 2: /* ge: N == V -> N ^ V == 0 */
329 tmp
= tcg_temp_new_i32();
330 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
331 tcg_gen_movcond_i32(TCG_COND_GE
, dest
, tmp
, zero
,
333 tcg_temp_free_i32(tmp
);
335 case 3: /* gt: !Z && N == V */
336 tcg_gen_movcond_i32(TCG_COND_NE
, dest
, cpu_ZF
, zero
,
338 tmp
= tcg_temp_new_i32();
339 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
340 tcg_gen_movcond_i32(TCG_COND_GE
, dest
, tmp
, zero
,
342 tcg_temp_free_i32(tmp
);
345 /* For fp16 the top half is always zeroes */
347 tcg_gen_andi_i32(dest
, dest
, 0xffff);
349 vfp_store_reg32(dest
, rd
);
350 tcg_temp_free_i32(frn
);
351 tcg_temp_free_i32(frm
);
352 tcg_temp_free_i32(dest
);
354 tcg_temp_free_i32(zero
);
361 * Table for converting the most common AArch32 encoding of
362 * rounding mode to arm_fprounding order (which matches the
363 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
365 static const uint8_t fp_decode_rm
[] = {
372 static bool trans_VRINT(DisasContext
*s
, arg_VRINT
*a
)
378 int rounding
= fp_decode_rm
[a
->rm
];
380 if (!dc_isar_feature(aa32_vrint
, s
)) {
384 if (sz
== 3 && !dc_isar_feature(aa32_fpdp_v2
, s
)) {
388 if (sz
== 1 && !dc_isar_feature(aa32_fp16_arith
, s
)) {
392 /* UNDEF accesses to D16-D31 if they don't exist */
393 if (sz
== 3 && !dc_isar_feature(aa32_simd_r32
, s
) &&
394 ((a
->vm
| a
->vd
) & 0x10)) {
401 if (!vfp_access_check(s
)) {
406 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
408 fpst
= fpstatus_ptr(FPST_FPCR
);
411 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rounding
));
412 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
417 tcg_op
= tcg_temp_new_i64();
418 tcg_res
= tcg_temp_new_i64();
419 vfp_load_reg64(tcg_op
, rm
);
420 gen_helper_rintd(tcg_res
, tcg_op
, fpst
);
421 vfp_store_reg64(tcg_res
, rd
);
422 tcg_temp_free_i64(tcg_op
);
423 tcg_temp_free_i64(tcg_res
);
427 tcg_op
= tcg_temp_new_i32();
428 tcg_res
= tcg_temp_new_i32();
429 vfp_load_reg32(tcg_op
, rm
);
431 gen_helper_rinth(tcg_res
, tcg_op
, fpst
);
433 gen_helper_rints(tcg_res
, tcg_op
, fpst
);
435 vfp_store_reg32(tcg_res
, rd
);
436 tcg_temp_free_i32(tcg_op
);
437 tcg_temp_free_i32(tcg_res
);
440 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
441 tcg_temp_free_i32(tcg_rmode
);
443 tcg_temp_free_ptr(fpst
);
447 static bool trans_VCVT(DisasContext
*s
, arg_VCVT
*a
)
452 TCGv_i32 tcg_rmode
, tcg_shift
;
453 int rounding
= fp_decode_rm
[a
->rm
];
454 bool is_signed
= a
->op
;
456 if (!dc_isar_feature(aa32_vcvt_dr
, s
)) {
460 if (sz
== 3 && !dc_isar_feature(aa32_fpdp_v2
, s
)) {
464 if (sz
== 1 && !dc_isar_feature(aa32_fp16_arith
, s
)) {
468 /* UNDEF accesses to D16-D31 if they don't exist */
469 if (sz
== 3 && !dc_isar_feature(aa32_simd_r32
, s
) && (a
->vm
& 0x10)) {
476 if (!vfp_access_check(s
)) {
481 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
483 fpst
= fpstatus_ptr(FPST_FPCR
);
486 tcg_shift
= tcg_const_i32(0);
488 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rounding
));
489 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
492 TCGv_i64 tcg_double
, tcg_res
;
494 tcg_double
= tcg_temp_new_i64();
495 tcg_res
= tcg_temp_new_i64();
496 tcg_tmp
= tcg_temp_new_i32();
497 vfp_load_reg64(tcg_double
, rm
);
499 gen_helper_vfp_tosld(tcg_res
, tcg_double
, tcg_shift
, fpst
);
501 gen_helper_vfp_tould(tcg_res
, tcg_double
, tcg_shift
, fpst
);
503 tcg_gen_extrl_i64_i32(tcg_tmp
, tcg_res
);
504 vfp_store_reg32(tcg_tmp
, rd
);
505 tcg_temp_free_i32(tcg_tmp
);
506 tcg_temp_free_i64(tcg_res
);
507 tcg_temp_free_i64(tcg_double
);
509 TCGv_i32 tcg_single
, tcg_res
;
510 tcg_single
= tcg_temp_new_i32();
511 tcg_res
= tcg_temp_new_i32();
512 vfp_load_reg32(tcg_single
, rm
);
515 gen_helper_vfp_toslh(tcg_res
, tcg_single
, tcg_shift
, fpst
);
517 gen_helper_vfp_toulh(tcg_res
, tcg_single
, tcg_shift
, fpst
);
521 gen_helper_vfp_tosls(tcg_res
, tcg_single
, tcg_shift
, fpst
);
523 gen_helper_vfp_touls(tcg_res
, tcg_single
, tcg_shift
, fpst
);
526 vfp_store_reg32(tcg_res
, rd
);
527 tcg_temp_free_i32(tcg_res
);
528 tcg_temp_free_i32(tcg_single
);
531 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
532 tcg_temp_free_i32(tcg_rmode
);
534 tcg_temp_free_i32(tcg_shift
);
536 tcg_temp_free_ptr(fpst
);
541 static bool trans_VMOV_to_gp(DisasContext
*s
, arg_VMOV_to_gp
*a
)
543 /* VMOV scalar to general purpose register */
547 * SIZE == MO_32 is a VFP instruction; otherwise NEON. MVE has
548 * all sizes, whether the CPU has fp or not.
550 if (!dc_isar_feature(aa32_mve
, s
)) {
552 ? !dc_isar_feature(aa32_fpsp_v2
, s
)
553 : !arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
558 /* UNDEF accesses to D16-D31 if they don't exist */
559 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vn
& 0x10)) {
563 if (!vfp_access_check(s
)) {
567 tmp
= tcg_temp_new_i32();
568 read_neon_element32(tmp
, a
->vn
, a
->index
, a
->size
| (a
->u
? 0 : MO_SIGN
));
569 store_reg(s
, a
->rt
, tmp
);
574 static bool trans_VMOV_from_gp(DisasContext
*s
, arg_VMOV_from_gp
*a
)
576 /* VMOV general purpose register to scalar */
580 * SIZE == MO_32 is a VFP instruction; otherwise NEON. MVE has
581 * all sizes, whether the CPU has fp or not.
583 if (!dc_isar_feature(aa32_mve
, s
)) {
585 ? !dc_isar_feature(aa32_fpsp_v2
, s
)
586 : !arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
591 /* UNDEF accesses to D16-D31 if they don't exist */
592 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vn
& 0x10)) {
596 if (!vfp_access_check(s
)) {
600 tmp
= load_reg(s
, a
->rt
);
601 write_neon_element32(tmp
, a
->vn
, a
->index
, a
->size
);
602 tcg_temp_free_i32(tmp
);
607 static bool trans_VDUP(DisasContext
*s
, arg_VDUP
*a
)
609 /* VDUP (general purpose register) */
613 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
617 /* UNDEF accesses to D16-D31 if they don't exist */
618 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vn
& 0x10)) {
626 if (a
->q
&& (a
->vn
& 1)) {
630 vec_size
= a
->q
? 16 : 8;
639 if (!vfp_access_check(s
)) {
643 tmp
= load_reg(s
, a
->rt
);
644 tcg_gen_gvec_dup_i32(size
, neon_full_reg_offset(a
->vn
),
645 vec_size
, vec_size
, tmp
);
646 tcg_temp_free_i32(tmp
);
652 * M-profile provides two different sets of instructions that can
653 * access floating point system registers: VMSR/VMRS (which move
654 * to/from a general purpose register) and VLDR/VSTR sysreg (which
655 * move directly to/from memory). In some cases there are also side
656 * effects which must happen after any write to memory (which could
657 * cause an exception). So we implement the common logic for the
658 * sysreg access in gen_M_fp_sysreg_write() and gen_M_fp_sysreg_read(),
659 * which take pointers to callback functions which will perform the
660 * actual "read/write general purpose register" and "read/write
661 * memory" operations.
665 * Emit code to store the sysreg to its final destination; frees the
666 * TCG temp 'value' it is passed.
668 typedef void fp_sysreg_storefn(DisasContext
*s
, void *opaque
, TCGv_i32 value
);
670 * Emit code to load the value to be copied to the sysreg; returns
671 * a new TCG temporary
673 typedef TCGv_i32
fp_sysreg_loadfn(DisasContext
*s
, void *opaque
);
675 /* Common decode/access checks for fp sysreg read/write */
676 typedef enum FPSysRegCheckResult
{
677 FPSysRegCheckFailed
, /* caller should return false */
678 FPSysRegCheckDone
, /* caller should return true */
679 FPSysRegCheckContinue
, /* caller should continue generating code */
680 } FPSysRegCheckResult
;
682 static FPSysRegCheckResult
fp_sysreg_checks(DisasContext
*s
, int regno
)
684 if (!dc_isar_feature(aa32_fpsp_v2
, s
) && !dc_isar_feature(aa32_mve
, s
)) {
685 return FPSysRegCheckFailed
;
690 case QEMU_VFP_FPSCR_NZCV
:
692 case ARM_VFP_FPSCR_NZCVQC
:
693 if (!arm_dc_feature(s
, ARM_FEATURE_V8_1M
)) {
694 return FPSysRegCheckFailed
;
697 case ARM_VFP_FPCXT_S
:
698 case ARM_VFP_FPCXT_NS
:
699 if (!arm_dc_feature(s
, ARM_FEATURE_V8_1M
)) {
700 return FPSysRegCheckFailed
;
702 if (!s
->v8m_secure
) {
703 return FPSysRegCheckFailed
;
708 if (!dc_isar_feature(aa32_mve
, s
)) {
709 return FPSysRegCheckFailed
;
713 return FPSysRegCheckFailed
;
717 * FPCXT_NS is a special case: it has specific handling for
718 * "current FP state is inactive", and must do the PreserveFPState()
719 * but not the usual full set of actions done by ExecuteFPCheck().
720 * So we don't call vfp_access_check() and the callers must handle this.
722 if (regno
!= ARM_VFP_FPCXT_NS
&& !vfp_access_check(s
)) {
723 return FPSysRegCheckDone
;
725 return FPSysRegCheckContinue
;
728 static void gen_branch_fpInactive(DisasContext
*s
, TCGCond cond
,
732 * FPCXT_NS is a special case: it has specific handling for
733 * "current FP state is inactive", and must do the PreserveFPState()
734 * but not the usual full set of actions done by ExecuteFPCheck().
735 * We don't have a TB flag that matches the fpInactive check, so we
736 * do it at runtime as we don't expect FPCXT_NS accesses to be frequent.
738 * Emit code that checks fpInactive and does a conditional
739 * branch to label based on it:
740 * if cond is TCG_COND_NE then branch if fpInactive != 0 (ie if inactive)
741 * if cond is TCG_COND_EQ then branch if fpInactive == 0 (ie if active)
743 assert(cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
);
745 /* fpInactive = FPCCR_NS.ASPEN == 1 && CONTROL.FPCA == 0 */
746 TCGv_i32 aspen
, fpca
;
747 aspen
= load_cpu_field(v7m
.fpccr
[M_REG_NS
]);
748 fpca
= load_cpu_field(v7m
.control
[M_REG_S
]);
749 tcg_gen_andi_i32(aspen
, aspen
, R_V7M_FPCCR_ASPEN_MASK
);
750 tcg_gen_xori_i32(aspen
, aspen
, R_V7M_FPCCR_ASPEN_MASK
);
751 tcg_gen_andi_i32(fpca
, fpca
, R_V7M_CONTROL_FPCA_MASK
);
752 tcg_gen_or_i32(fpca
, fpca
, aspen
);
753 tcg_gen_brcondi_i32(tcg_invert_cond(cond
), fpca
, 0, label
);
754 tcg_temp_free_i32(aspen
);
755 tcg_temp_free_i32(fpca
);
758 static bool gen_M_fp_sysreg_write(DisasContext
*s
, int regno
,
760 fp_sysreg_loadfn
*loadfn
,
763 /* Do a write to an M-profile floating point system register */
765 TCGLabel
*lab_end
= NULL
;
767 switch (fp_sysreg_checks(s
, regno
)) {
768 case FPSysRegCheckFailed
:
770 case FPSysRegCheckDone
:
772 case FPSysRegCheckContinue
:
778 tmp
= loadfn(s
, opaque
);
779 gen_helper_vfp_set_fpscr(cpu_env
, tmp
);
780 tcg_temp_free_i32(tmp
);
783 case ARM_VFP_FPSCR_NZCVQC
:
786 tmp
= loadfn(s
, opaque
);
787 if (dc_isar_feature(aa32_mve
, s
)) {
788 /* QC is only present for MVE; otherwise RES0 */
789 TCGv_i32 qc
= tcg_temp_new_i32();
790 tcg_gen_andi_i32(qc
, tmp
, FPCR_QC
);
792 * The 4 vfp.qc[] fields need only be "zero" vs "non-zero";
793 * here writing the same value into all elements is simplest.
795 tcg_gen_gvec_dup_i32(MO_32
, offsetof(CPUARMState
, vfp
.qc
),
798 tcg_gen_andi_i32(tmp
, tmp
, FPCR_NZCV_MASK
);
799 fpscr
= load_cpu_field(vfp
.xregs
[ARM_VFP_FPSCR
]);
800 tcg_gen_andi_i32(fpscr
, fpscr
, ~FPCR_NZCV_MASK
);
801 tcg_gen_or_i32(fpscr
, fpscr
, tmp
);
802 store_cpu_field(fpscr
, vfp
.xregs
[ARM_VFP_FPSCR
]);
803 tcg_temp_free_i32(tmp
);
806 case ARM_VFP_FPCXT_NS
:
807 lab_end
= gen_new_label();
808 /* fpInactive case: write is a NOP, so branch to end */
809 gen_branch_fpInactive(s
, TCG_COND_NE
, lab_end
);
810 /* !fpInactive: PreserveFPState(), and reads same as FPCXT_S */
811 gen_preserve_fp_state(s
);
813 case ARM_VFP_FPCXT_S
:
815 TCGv_i32 sfpa
, control
;
817 * Set FPSCR and CONTROL.SFPA from value; the new FPSCR takes
818 * bits [27:0] from value and zeroes bits [31:28].
820 tmp
= loadfn(s
, opaque
);
821 sfpa
= tcg_temp_new_i32();
822 tcg_gen_shri_i32(sfpa
, tmp
, 31);
823 control
= load_cpu_field(v7m
.control
[M_REG_S
]);
824 tcg_gen_deposit_i32(control
, control
, sfpa
,
825 R_V7M_CONTROL_SFPA_SHIFT
, 1);
826 store_cpu_field(control
, v7m
.control
[M_REG_S
]);
827 tcg_gen_andi_i32(tmp
, tmp
, ~FPCR_NZCV_MASK
);
828 gen_helper_vfp_set_fpscr(cpu_env
, tmp
);
829 tcg_temp_free_i32(tmp
);
830 tcg_temp_free_i32(sfpa
);
834 /* Behaves as NOP if not privileged */
838 tmp
= loadfn(s
, opaque
);
839 store_cpu_field(tmp
, v7m
.vpr
);
844 tmp
= loadfn(s
, opaque
);
845 vpr
= load_cpu_field(v7m
.vpr
);
846 tcg_gen_deposit_i32(vpr
, vpr
, tmp
,
847 R_V7M_VPR_P0_SHIFT
, R_V7M_VPR_P0_LENGTH
);
848 store_cpu_field(vpr
, v7m
.vpr
);
849 tcg_temp_free_i32(tmp
);
853 g_assert_not_reached();
856 gen_set_label(lab_end
);
861 static bool gen_M_fp_sysreg_read(DisasContext
*s
, int regno
,
862 fp_sysreg_storefn
*storefn
,
865 /* Do a read from an M-profile floating point system register */
867 TCGLabel
*lab_end
= NULL
;
868 bool lookup_tb
= false;
870 switch (fp_sysreg_checks(s
, regno
)) {
871 case FPSysRegCheckFailed
:
873 case FPSysRegCheckDone
:
875 case FPSysRegCheckContinue
:
879 if (regno
== ARM_VFP_FPSCR_NZCVQC
&& !dc_isar_feature(aa32_mve
, s
)) {
880 /* QC is RES0 without MVE, so NZCVQC simplifies to NZCV */
881 regno
= QEMU_VFP_FPSCR_NZCV
;
886 tmp
= tcg_temp_new_i32();
887 gen_helper_vfp_get_fpscr(tmp
, cpu_env
);
888 storefn(s
, opaque
, tmp
);
890 case ARM_VFP_FPSCR_NZCVQC
:
891 tmp
= tcg_temp_new_i32();
892 gen_helper_vfp_get_fpscr(tmp
, cpu_env
);
893 tcg_gen_andi_i32(tmp
, tmp
, FPCR_NZCVQC_MASK
);
894 storefn(s
, opaque
, tmp
);
896 case QEMU_VFP_FPSCR_NZCV
:
898 * Read just NZCV; this is a special case to avoid the
899 * helper call for the "VMRS to CPSR.NZCV" insn.
901 tmp
= load_cpu_field(vfp
.xregs
[ARM_VFP_FPSCR
]);
902 tcg_gen_andi_i32(tmp
, tmp
, FPCR_NZCV_MASK
);
903 storefn(s
, opaque
, tmp
);
905 case ARM_VFP_FPCXT_S
:
907 TCGv_i32 control
, sfpa
, fpscr
;
908 /* Bits [27:0] from FPSCR, bit [31] from CONTROL.SFPA */
909 tmp
= tcg_temp_new_i32();
910 sfpa
= tcg_temp_new_i32();
911 gen_helper_vfp_get_fpscr(tmp
, cpu_env
);
912 tcg_gen_andi_i32(tmp
, tmp
, ~FPCR_NZCV_MASK
);
913 control
= load_cpu_field(v7m
.control
[M_REG_S
]);
914 tcg_gen_andi_i32(sfpa
, control
, R_V7M_CONTROL_SFPA_MASK
);
915 tcg_gen_shli_i32(sfpa
, sfpa
, 31 - R_V7M_CONTROL_SFPA_SHIFT
);
916 tcg_gen_or_i32(tmp
, tmp
, sfpa
);
917 tcg_temp_free_i32(sfpa
);
919 * Store result before updating FPSCR etc, in case
920 * it is a memory write which causes an exception.
922 storefn(s
, opaque
, tmp
);
924 * Now we must reset FPSCR from FPDSCR_NS, and clear
925 * CONTROL.SFPA; so we'll end the TB here.
927 tcg_gen_andi_i32(control
, control
, ~R_V7M_CONTROL_SFPA_MASK
);
928 store_cpu_field(control
, v7m
.control
[M_REG_S
]);
929 fpscr
= load_cpu_field(v7m
.fpdscr
[M_REG_NS
]);
930 gen_helper_vfp_set_fpscr(cpu_env
, fpscr
);
931 tcg_temp_free_i32(fpscr
);
935 case ARM_VFP_FPCXT_NS
:
937 TCGv_i32 control
, sfpa
, fpscr
, fpdscr
, zero
;
938 TCGLabel
*lab_active
= gen_new_label();
942 gen_branch_fpInactive(s
, TCG_COND_EQ
, lab_active
);
943 /* fpInactive case: reads as FPDSCR_NS */
944 TCGv_i32 tmp
= load_cpu_field(v7m
.fpdscr
[M_REG_NS
]);
945 storefn(s
, opaque
, tmp
);
946 lab_end
= gen_new_label();
949 gen_set_label(lab_active
);
950 /* !fpInactive: Reads the same as FPCXT_S, but side effects differ */
951 gen_preserve_fp_state(s
);
952 tmp
= tcg_temp_new_i32();
953 sfpa
= tcg_temp_new_i32();
954 fpscr
= tcg_temp_new_i32();
955 gen_helper_vfp_get_fpscr(fpscr
, cpu_env
);
956 tcg_gen_andi_i32(tmp
, fpscr
, ~FPCR_NZCV_MASK
);
957 control
= load_cpu_field(v7m
.control
[M_REG_S
]);
958 tcg_gen_andi_i32(sfpa
, control
, R_V7M_CONTROL_SFPA_MASK
);
959 tcg_gen_shli_i32(sfpa
, sfpa
, 31 - R_V7M_CONTROL_SFPA_SHIFT
);
960 tcg_gen_or_i32(tmp
, tmp
, sfpa
);
961 tcg_temp_free_i32(control
);
962 /* Store result before updating FPSCR, in case it faults */
963 storefn(s
, opaque
, tmp
);
964 /* If SFPA is zero then set FPSCR from FPDSCR_NS */
965 fpdscr
= load_cpu_field(v7m
.fpdscr
[M_REG_NS
]);
966 zero
= tcg_const_i32(0);
967 tcg_gen_movcond_i32(TCG_COND_EQ
, fpscr
, sfpa
, zero
, fpdscr
, fpscr
);
968 gen_helper_vfp_set_fpscr(cpu_env
, fpscr
);
969 tcg_temp_free_i32(zero
);
970 tcg_temp_free_i32(sfpa
);
971 tcg_temp_free_i32(fpdscr
);
972 tcg_temp_free_i32(fpscr
);
976 /* Behaves as NOP if not privileged */
980 tmp
= load_cpu_field(v7m
.vpr
);
981 storefn(s
, opaque
, tmp
);
984 tmp
= load_cpu_field(v7m
.vpr
);
985 tcg_gen_extract_i32(tmp
, tmp
, R_V7M_VPR_P0_SHIFT
, R_V7M_VPR_P0_LENGTH
);
986 storefn(s
, opaque
, tmp
);
989 g_assert_not_reached();
993 gen_set_label(lab_end
);
1001 static void fp_sysreg_to_gpr(DisasContext
*s
, void *opaque
, TCGv_i32 value
)
1003 arg_VMSR_VMRS
*a
= opaque
;
1006 /* Set the 4 flag bits in the CPSR */
1007 gen_set_nzcv(value
);
1008 tcg_temp_free_i32(value
);
1010 store_reg(s
, a
->rt
, value
);
1014 static TCGv_i32
gpr_to_fp_sysreg(DisasContext
*s
, void *opaque
)
1016 arg_VMSR_VMRS
*a
= opaque
;
1018 return load_reg(s
, a
->rt
);
1021 static bool gen_M_VMSR_VMRS(DisasContext
*s
, arg_VMSR_VMRS
*a
)
1024 * Accesses to R15 are UNPREDICTABLE; we choose to undef.
1025 * FPSCR -> r15 is a special case which writes to the PSR flags;
1026 * set a->reg to a special value to tell gen_M_fp_sysreg_read()
1027 * we only care about the top 4 bits of FPSCR there.
1030 if (a
->l
&& a
->reg
== ARM_VFP_FPSCR
) {
1031 a
->reg
= QEMU_VFP_FPSCR_NZCV
;
1038 /* VMRS, move FP system register to gp register */
1039 return gen_M_fp_sysreg_read(s
, a
->reg
, fp_sysreg_to_gpr
, a
);
1041 /* VMSR, move gp register to FP system register */
1042 return gen_M_fp_sysreg_write(s
, a
->reg
, gpr_to_fp_sysreg
, a
);
1046 static bool trans_VMSR_VMRS(DisasContext
*s
, arg_VMSR_VMRS
*a
)
1049 bool ignore_vfp_enabled
= false;
1051 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
1052 return gen_M_VMSR_VMRS(s
, a
);
1055 if (!dc_isar_feature(aa32_fpsp_v2
, s
)) {
1062 * VFPv2 allows access to FPSID from userspace; VFPv3 restricts
1063 * all ID registers to privileged access only.
1065 if (IS_USER(s
) && dc_isar_feature(aa32_fpsp_v3
, s
)) {
1068 ignore_vfp_enabled
= true;
1072 if (IS_USER(s
) || !arm_dc_feature(s
, ARM_FEATURE_MVFR
)) {
1075 ignore_vfp_enabled
= true;
1078 if (IS_USER(s
) || !arm_dc_feature(s
, ARM_FEATURE_V8
)) {
1081 ignore_vfp_enabled
= true;
1089 ignore_vfp_enabled
= true;
1091 case ARM_VFP_FPINST
:
1092 case ARM_VFP_FPINST2
:
1093 /* Not present in VFPv3 */
1094 if (IS_USER(s
) || dc_isar_feature(aa32_fpsp_v3
, s
)) {
1102 if (!full_vfp_access_check(s
, ignore_vfp_enabled
)) {
1107 /* VMRS, move VFP special register to gp register */
1113 if (s
->current_el
== 1) {
1114 TCGv_i32 tcg_reg
, tcg_rt
;
1116 gen_set_condexec(s
);
1117 gen_set_pc_im(s
, s
->pc_curr
);
1118 tcg_reg
= tcg_const_i32(a
->reg
);
1119 tcg_rt
= tcg_const_i32(a
->rt
);
1120 gen_helper_check_hcr_el2_trap(cpu_env
, tcg_rt
, tcg_reg
);
1121 tcg_temp_free_i32(tcg_reg
);
1122 tcg_temp_free_i32(tcg_rt
);
1126 case ARM_VFP_FPINST
:
1127 case ARM_VFP_FPINST2
:
1128 tmp
= load_cpu_field(vfp
.xregs
[a
->reg
]);
1132 tmp
= load_cpu_field(vfp
.xregs
[ARM_VFP_FPSCR
]);
1133 tcg_gen_andi_i32(tmp
, tmp
, FPCR_NZCV_MASK
);
1135 tmp
= tcg_temp_new_i32();
1136 gen_helper_vfp_get_fpscr(tmp
, cpu_env
);
1140 g_assert_not_reached();
1144 /* Set the 4 flag bits in the CPSR. */
1146 tcg_temp_free_i32(tmp
);
1148 store_reg(s
, a
->rt
, tmp
);
1151 /* VMSR, move gp register to VFP special register */
1157 /* Writes are ignored. */
1160 tmp
= load_reg(s
, a
->rt
);
1161 gen_helper_vfp_set_fpscr(cpu_env
, tmp
);
1162 tcg_temp_free_i32(tmp
);
1167 * TODO: VFP subarchitecture support.
1168 * For now, keep the EN bit only
1170 tmp
= load_reg(s
, a
->rt
);
1171 tcg_gen_andi_i32(tmp
, tmp
, 1 << 30);
1172 store_cpu_field(tmp
, vfp
.xregs
[a
->reg
]);
1175 case ARM_VFP_FPINST
:
1176 case ARM_VFP_FPINST2
:
1177 tmp
= load_reg(s
, a
->rt
);
1178 store_cpu_field(tmp
, vfp
.xregs
[a
->reg
]);
1181 g_assert_not_reached();
1188 static void fp_sysreg_to_memory(DisasContext
*s
, void *opaque
, TCGv_i32 value
)
1190 arg_vldr_sysreg
*a
= opaque
;
1191 uint32_t offset
= a
->imm
;
1198 addr
= load_reg(s
, a
->rn
);
1200 tcg_gen_addi_i32(addr
, addr
, offset
);
1203 if (s
->v8m_stackcheck
&& a
->rn
== 13 && a
->w
) {
1204 gen_helper_v8m_stackcheck(cpu_env
, addr
);
1207 gen_aa32_st_i32(s
, value
, addr
, get_mem_index(s
),
1208 MO_UL
| MO_ALIGN
| s
->be_data
);
1209 tcg_temp_free_i32(value
);
1214 tcg_gen_addi_i32(addr
, addr
, offset
);
1216 store_reg(s
, a
->rn
, addr
);
1218 tcg_temp_free_i32(addr
);
1222 static TCGv_i32
memory_to_fp_sysreg(DisasContext
*s
, void *opaque
)
1224 arg_vldr_sysreg
*a
= opaque
;
1225 uint32_t offset
= a
->imm
;
1227 TCGv_i32 value
= tcg_temp_new_i32();
1233 addr
= load_reg(s
, a
->rn
);
1235 tcg_gen_addi_i32(addr
, addr
, offset
);
1238 if (s
->v8m_stackcheck
&& a
->rn
== 13 && a
->w
) {
1239 gen_helper_v8m_stackcheck(cpu_env
, addr
);
1242 gen_aa32_ld_i32(s
, value
, addr
, get_mem_index(s
),
1243 MO_UL
| MO_ALIGN
| s
->be_data
);
1248 tcg_gen_addi_i32(addr
, addr
, offset
);
1250 store_reg(s
, a
->rn
, addr
);
1252 tcg_temp_free_i32(addr
);
1257 static bool trans_VLDR_sysreg(DisasContext
*s
, arg_vldr_sysreg
*a
)
1259 if (!arm_dc_feature(s
, ARM_FEATURE_V8_1M
)) {
1265 return gen_M_fp_sysreg_write(s
, a
->reg
, memory_to_fp_sysreg
, a
);
1268 static bool trans_VSTR_sysreg(DisasContext
*s
, arg_vldr_sysreg
*a
)
1270 if (!arm_dc_feature(s
, ARM_FEATURE_V8_1M
)) {
1276 return gen_M_fp_sysreg_read(s
, a
->reg
, fp_sysreg_to_memory
, a
);
1279 static bool trans_VMOV_half(DisasContext
*s
, arg_VMOV_single
*a
)
1283 if (!dc_isar_feature(aa32_fp16_arith
, s
)) {
1288 /* UNPREDICTABLE; we choose to UNDEF */
1292 if (!vfp_access_check(s
)) {
1297 /* VFP to general purpose register */
1298 tmp
= tcg_temp_new_i32();
1299 vfp_load_reg32(tmp
, a
->vn
);
1300 tcg_gen_andi_i32(tmp
, tmp
, 0xffff);
1301 store_reg(s
, a
->rt
, tmp
);
1303 /* general purpose register to VFP */
1304 tmp
= load_reg(s
, a
->rt
);
1305 tcg_gen_andi_i32(tmp
, tmp
, 0xffff);
1306 vfp_store_reg32(tmp
, a
->vn
);
1307 tcg_temp_free_i32(tmp
);
1313 static bool trans_VMOV_single(DisasContext
*s
, arg_VMOV_single
*a
)
1317 if (!dc_isar_feature(aa32_fpsp_v2
, s
) && !dc_isar_feature(aa32_mve
, s
)) {
1321 if (!vfp_access_check(s
)) {
1326 /* VFP to general purpose register */
1327 tmp
= tcg_temp_new_i32();
1328 vfp_load_reg32(tmp
, a
->vn
);
1330 /* Set the 4 flag bits in the CPSR. */
1332 tcg_temp_free_i32(tmp
);
1334 store_reg(s
, a
->rt
, tmp
);
1337 /* general purpose register to VFP */
1338 tmp
= load_reg(s
, a
->rt
);
1339 vfp_store_reg32(tmp
, a
->vn
);
1340 tcg_temp_free_i32(tmp
);
1346 static bool trans_VMOV_64_sp(DisasContext
*s
, arg_VMOV_64_sp
*a
)
1350 if (!dc_isar_feature(aa32_fpsp_v2
, s
) && !dc_isar_feature(aa32_mve
, s
)) {
1355 * VMOV between two general-purpose registers and two single precision
1356 * floating point registers
1358 if (!vfp_access_check(s
)) {
1363 /* fpreg to gpreg */
1364 tmp
= tcg_temp_new_i32();
1365 vfp_load_reg32(tmp
, a
->vm
);
1366 store_reg(s
, a
->rt
, tmp
);
1367 tmp
= tcg_temp_new_i32();
1368 vfp_load_reg32(tmp
, a
->vm
+ 1);
1369 store_reg(s
, a
->rt2
, tmp
);
1371 /* gpreg to fpreg */
1372 tmp
= load_reg(s
, a
->rt
);
1373 vfp_store_reg32(tmp
, a
->vm
);
1374 tcg_temp_free_i32(tmp
);
1375 tmp
= load_reg(s
, a
->rt2
);
1376 vfp_store_reg32(tmp
, a
->vm
+ 1);
1377 tcg_temp_free_i32(tmp
);
1383 static bool trans_VMOV_64_dp(DisasContext
*s
, arg_VMOV_64_dp
*a
)
1388 * VMOV between two general-purpose registers and one double precision
1389 * floating point register. Note that this does not require support
1390 * for double precision arithmetic.
1392 if (!dc_isar_feature(aa32_fpsp_v2
, s
) && !dc_isar_feature(aa32_mve
, s
)) {
1396 /* UNDEF accesses to D16-D31 if they don't exist */
1397 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vm
& 0x10)) {
1401 if (!vfp_access_check(s
)) {
1406 /* fpreg to gpreg */
1407 tmp
= tcg_temp_new_i32();
1408 vfp_load_reg32(tmp
, a
->vm
* 2);
1409 store_reg(s
, a
->rt
, tmp
);
1410 tmp
= tcg_temp_new_i32();
1411 vfp_load_reg32(tmp
, a
->vm
* 2 + 1);
1412 store_reg(s
, a
->rt2
, tmp
);
1414 /* gpreg to fpreg */
1415 tmp
= load_reg(s
, a
->rt
);
1416 vfp_store_reg32(tmp
, a
->vm
* 2);
1417 tcg_temp_free_i32(tmp
);
1418 tmp
= load_reg(s
, a
->rt2
);
1419 vfp_store_reg32(tmp
, a
->vm
* 2 + 1);
1420 tcg_temp_free_i32(tmp
);
1426 static bool trans_VLDR_VSTR_hp(DisasContext
*s
, arg_VLDR_VSTR_sp
*a
)
1431 if (!dc_isar_feature(aa32_fpsp_v2
, s
) && !dc_isar_feature(aa32_mve
, s
)) {
1435 if (!vfp_access_check(s
)) {
1439 /* imm8 field is offset/2 for fp16, unlike fp32 and fp64 */
1440 offset
= a
->imm
<< 1;
1445 /* For thumb, use of PC is UNPREDICTABLE. */
1446 addr
= add_reg_for_lit(s
, a
->rn
, offset
);
1447 tmp
= tcg_temp_new_i32();
1449 gen_aa32_ld_i32(s
, tmp
, addr
, get_mem_index(s
), MO_UW
| MO_ALIGN
);
1450 vfp_store_reg32(tmp
, a
->vd
);
1452 vfp_load_reg32(tmp
, a
->vd
);
1453 gen_aa32_st_i32(s
, tmp
, addr
, get_mem_index(s
), MO_UW
| MO_ALIGN
);
1455 tcg_temp_free_i32(tmp
);
1456 tcg_temp_free_i32(addr
);
1461 static bool trans_VLDR_VSTR_sp(DisasContext
*s
, arg_VLDR_VSTR_sp
*a
)
1466 if (!dc_isar_feature(aa32_fpsp_v2
, s
) && !dc_isar_feature(aa32_mve
, s
)) {
1470 if (!vfp_access_check(s
)) {
1474 offset
= a
->imm
<< 2;
1479 /* For thumb, use of PC is UNPREDICTABLE. */
1480 addr
= add_reg_for_lit(s
, a
->rn
, offset
);
1481 tmp
= tcg_temp_new_i32();
1483 gen_aa32_ld_i32(s
, tmp
, addr
, get_mem_index(s
), MO_UL
| MO_ALIGN
);
1484 vfp_store_reg32(tmp
, a
->vd
);
1486 vfp_load_reg32(tmp
, a
->vd
);
1487 gen_aa32_st_i32(s
, tmp
, addr
, get_mem_index(s
), MO_UL
| MO_ALIGN
);
1489 tcg_temp_free_i32(tmp
);
1490 tcg_temp_free_i32(addr
);
1495 static bool trans_VLDR_VSTR_dp(DisasContext
*s
, arg_VLDR_VSTR_dp
*a
)
1501 /* Note that this does not require support for double arithmetic. */
1502 if (!dc_isar_feature(aa32_fpsp_v2
, s
) && !dc_isar_feature(aa32_mve
, s
)) {
1506 /* UNDEF accesses to D16-D31 if they don't exist */
1507 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vd
& 0x10)) {
1511 if (!vfp_access_check(s
)) {
1515 offset
= a
->imm
<< 2;
1520 /* For thumb, use of PC is UNPREDICTABLE. */
1521 addr
= add_reg_for_lit(s
, a
->rn
, offset
);
1522 tmp
= tcg_temp_new_i64();
1524 gen_aa32_ld_i64(s
, tmp
, addr
, get_mem_index(s
), MO_Q
| MO_ALIGN_4
);
1525 vfp_store_reg64(tmp
, a
->vd
);
1527 vfp_load_reg64(tmp
, a
->vd
);
1528 gen_aa32_st_i64(s
, tmp
, addr
, get_mem_index(s
), MO_Q
| MO_ALIGN_4
);
1530 tcg_temp_free_i64(tmp
);
1531 tcg_temp_free_i32(addr
);
1536 static bool trans_VLDM_VSTM_sp(DisasContext
*s
, arg_VLDM_VSTM_sp
*a
)
1542 if (!dc_isar_feature(aa32_fpsp_v2
, s
) && !dc_isar_feature(aa32_mve
, s
)) {
1548 if (n
== 0 || (a
->vd
+ n
) > 32) {
1550 * UNPREDICTABLE cases for bad immediates: we choose to
1551 * UNDEF to avoid generating huge numbers of TCG ops
1555 if (a
->rn
== 15 && a
->w
) {
1556 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
1560 if (!vfp_access_check(s
)) {
1564 /* For thumb, use of PC is UNPREDICTABLE. */
1565 addr
= add_reg_for_lit(s
, a
->rn
, 0);
1568 tcg_gen_addi_i32(addr
, addr
, -(a
->imm
<< 2));
1571 if (s
->v8m_stackcheck
&& a
->rn
== 13 && a
->w
) {
1573 * Here 'addr' is the lowest address we will store to,
1574 * and is either the old SP (if post-increment) or
1575 * the new SP (if pre-decrement). For post-increment
1576 * where the old value is below the limit and the new
1577 * value is above, it is UNKNOWN whether the limit check
1578 * triggers; we choose to trigger.
1580 gen_helper_v8m_stackcheck(cpu_env
, addr
);
1584 tmp
= tcg_temp_new_i32();
1585 for (i
= 0; i
< n
; i
++) {
1588 gen_aa32_ld_i32(s
, tmp
, addr
, get_mem_index(s
), MO_UL
| MO_ALIGN
);
1589 vfp_store_reg32(tmp
, a
->vd
+ i
);
1592 vfp_load_reg32(tmp
, a
->vd
+ i
);
1593 gen_aa32_st_i32(s
, tmp
, addr
, get_mem_index(s
), MO_UL
| MO_ALIGN
);
1595 tcg_gen_addi_i32(addr
, addr
, offset
);
1597 tcg_temp_free_i32(tmp
);
1601 offset
= -offset
* n
;
1602 tcg_gen_addi_i32(addr
, addr
, offset
);
1604 store_reg(s
, a
->rn
, addr
);
1606 tcg_temp_free_i32(addr
);
1612 static bool trans_VLDM_VSTM_dp(DisasContext
*s
, arg_VLDM_VSTM_dp
*a
)
1619 /* Note that this does not require support for double arithmetic. */
1620 if (!dc_isar_feature(aa32_fpsp_v2
, s
) && !dc_isar_feature(aa32_mve
, s
)) {
1626 if (n
== 0 || (a
->vd
+ n
) > 32 || n
> 16) {
1628 * UNPREDICTABLE cases for bad immediates: we choose to
1629 * UNDEF to avoid generating huge numbers of TCG ops
1633 if (a
->rn
== 15 && a
->w
) {
1634 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
1638 /* UNDEF accesses to D16-D31 if they don't exist */
1639 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vd
+ n
) > 16) {
1643 if (!vfp_access_check(s
)) {
1647 /* For thumb, use of PC is UNPREDICTABLE. */
1648 addr
= add_reg_for_lit(s
, a
->rn
, 0);
1651 tcg_gen_addi_i32(addr
, addr
, -(a
->imm
<< 2));
1654 if (s
->v8m_stackcheck
&& a
->rn
== 13 && a
->w
) {
1656 * Here 'addr' is the lowest address we will store to,
1657 * and is either the old SP (if post-increment) or
1658 * the new SP (if pre-decrement). For post-increment
1659 * where the old value is below the limit and the new
1660 * value is above, it is UNKNOWN whether the limit check
1661 * triggers; we choose to trigger.
1663 gen_helper_v8m_stackcheck(cpu_env
, addr
);
1667 tmp
= tcg_temp_new_i64();
1668 for (i
= 0; i
< n
; i
++) {
1671 gen_aa32_ld_i64(s
, tmp
, addr
, get_mem_index(s
), MO_Q
| MO_ALIGN_4
);
1672 vfp_store_reg64(tmp
, a
->vd
+ i
);
1675 vfp_load_reg64(tmp
, a
->vd
+ i
);
1676 gen_aa32_st_i64(s
, tmp
, addr
, get_mem_index(s
), MO_Q
| MO_ALIGN_4
);
1678 tcg_gen_addi_i32(addr
, addr
, offset
);
1680 tcg_temp_free_i64(tmp
);
1684 offset
= -offset
* n
;
1685 } else if (a
->imm
& 1) {
1692 tcg_gen_addi_i32(addr
, addr
, offset
);
1694 store_reg(s
, a
->rn
, addr
);
1696 tcg_temp_free_i32(addr
);
1703 * Types for callbacks for do_vfp_3op_sp() and do_vfp_3op_dp().
1704 * The callback should emit code to write a value to vd. If
1705 * do_vfp_3op_{sp,dp}() was passed reads_vd then the TCGv vd
1706 * will contain the old value of the relevant VFP register;
1707 * otherwise it must be written to only.
1709 typedef void VFPGen3OpSPFn(TCGv_i32 vd
,
1710 TCGv_i32 vn
, TCGv_i32 vm
, TCGv_ptr fpst
);
1711 typedef void VFPGen3OpDPFn(TCGv_i64 vd
,
1712 TCGv_i64 vn
, TCGv_i64 vm
, TCGv_ptr fpst
);
1715 * Types for callbacks for do_vfp_2op_sp() and do_vfp_2op_dp().
1716 * The callback should emit code to write a value to vd (which
1717 * should be written to only).
1719 typedef void VFPGen2OpSPFn(TCGv_i32 vd
, TCGv_i32 vm
);
1720 typedef void VFPGen2OpDPFn(TCGv_i64 vd
, TCGv_i64 vm
);
1723 * Return true if the specified S reg is in a scalar bank
1724 * (ie if it is s0..s7)
1726 static inline bool vfp_sreg_is_scalar(int reg
)
1728 return (reg
& 0x18) == 0;
1732 * Return true if the specified D reg is in a scalar bank
1733 * (ie if it is d0..d3 or d16..d19)
1735 static inline bool vfp_dreg_is_scalar(int reg
)
1737 return (reg
& 0xc) == 0;
1741 * Advance the S reg number forwards by delta within its bank
1742 * (ie increment the low 3 bits but leave the rest the same)
1744 static inline int vfp_advance_sreg(int reg
, int delta
)
1746 return ((reg
+ delta
) & 0x7) | (reg
& ~0x7);
1750 * Advance the D reg number forwards by delta within its bank
1751 * (ie increment the low 2 bits but leave the rest the same)
1753 static inline int vfp_advance_dreg(int reg
, int delta
)
1755 return ((reg
+ delta
) & 0x3) | (reg
& ~0x3);
1759 * Perform a 3-operand VFP data processing instruction. fn is the
1760 * callback to do the actual operation; this function deals with the
1761 * code to handle looping around for VFP vector processing.
1763 static bool do_vfp_3op_sp(DisasContext
*s
, VFPGen3OpSPFn
*fn
,
1764 int vd
, int vn
, int vm
, bool reads_vd
)
1766 uint32_t delta_m
= 0;
1767 uint32_t delta_d
= 0;
1768 int veclen
= s
->vec_len
;
1769 TCGv_i32 f0
, f1
, fd
;
1772 if (!dc_isar_feature(aa32_fpsp_v2
, s
)) {
1776 if (!dc_isar_feature(aa32_fpshvec
, s
) &&
1777 (veclen
!= 0 || s
->vec_stride
!= 0)) {
1781 if (!vfp_access_check(s
)) {
1786 /* Figure out what type of vector operation this is. */
1787 if (vfp_sreg_is_scalar(vd
)) {
1791 delta_d
= s
->vec_stride
+ 1;
1793 if (vfp_sreg_is_scalar(vm
)) {
1794 /* mixed scalar/vector */
1803 f0
= tcg_temp_new_i32();
1804 f1
= tcg_temp_new_i32();
1805 fd
= tcg_temp_new_i32();
1806 fpst
= fpstatus_ptr(FPST_FPCR
);
1808 vfp_load_reg32(f0
, vn
);
1809 vfp_load_reg32(f1
, vm
);
1813 vfp_load_reg32(fd
, vd
);
1815 fn(fd
, f0
, f1
, fpst
);
1816 vfp_store_reg32(fd
, vd
);
1822 /* Set up the operands for the next iteration */
1824 vd
= vfp_advance_sreg(vd
, delta_d
);
1825 vn
= vfp_advance_sreg(vn
, delta_d
);
1826 vfp_load_reg32(f0
, vn
);
1828 vm
= vfp_advance_sreg(vm
, delta_m
);
1829 vfp_load_reg32(f1
, vm
);
1833 tcg_temp_free_i32(f0
);
1834 tcg_temp_free_i32(f1
);
1835 tcg_temp_free_i32(fd
);
1836 tcg_temp_free_ptr(fpst
);
1841 static bool do_vfp_3op_hp(DisasContext
*s
, VFPGen3OpSPFn
*fn
,
1842 int vd
, int vn
, int vm
, bool reads_vd
)
1845 * Do a half-precision operation. Functionally this is
1846 * the same as do_vfp_3op_sp(), except:
1847 * - it uses the FPST_FPCR_F16
1848 * - it doesn't need the VFP vector handling (fp16 is a
1849 * v8 feature, and in v8 VFP vectors don't exist)
1850 * - it does the aa32_fp16_arith feature test
1852 TCGv_i32 f0
, f1
, fd
;
1855 if (!dc_isar_feature(aa32_fp16_arith
, s
)) {
1859 if (s
->vec_len
!= 0 || s
->vec_stride
!= 0) {
1863 if (!vfp_access_check(s
)) {
1867 f0
= tcg_temp_new_i32();
1868 f1
= tcg_temp_new_i32();
1869 fd
= tcg_temp_new_i32();
1870 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
1872 vfp_load_reg32(f0
, vn
);
1873 vfp_load_reg32(f1
, vm
);
1876 vfp_load_reg32(fd
, vd
);
1878 fn(fd
, f0
, f1
, fpst
);
1879 vfp_store_reg32(fd
, vd
);
1881 tcg_temp_free_i32(f0
);
1882 tcg_temp_free_i32(f1
);
1883 tcg_temp_free_i32(fd
);
1884 tcg_temp_free_ptr(fpst
);
1889 static bool do_vfp_3op_dp(DisasContext
*s
, VFPGen3OpDPFn
*fn
,
1890 int vd
, int vn
, int vm
, bool reads_vd
)
1892 uint32_t delta_m
= 0;
1893 uint32_t delta_d
= 0;
1894 int veclen
= s
->vec_len
;
1895 TCGv_i64 f0
, f1
, fd
;
1898 if (!dc_isar_feature(aa32_fpdp_v2
, s
)) {
1902 /* UNDEF accesses to D16-D31 if they don't exist */
1903 if (!dc_isar_feature(aa32_simd_r32
, s
) && ((vd
| vn
| vm
) & 0x10)) {
1907 if (!dc_isar_feature(aa32_fpshvec
, s
) &&
1908 (veclen
!= 0 || s
->vec_stride
!= 0)) {
1912 if (!vfp_access_check(s
)) {
1917 /* Figure out what type of vector operation this is. */
1918 if (vfp_dreg_is_scalar(vd
)) {
1922 delta_d
= (s
->vec_stride
>> 1) + 1;
1924 if (vfp_dreg_is_scalar(vm
)) {
1925 /* mixed scalar/vector */
1934 f0
= tcg_temp_new_i64();
1935 f1
= tcg_temp_new_i64();
1936 fd
= tcg_temp_new_i64();
1937 fpst
= fpstatus_ptr(FPST_FPCR
);
1939 vfp_load_reg64(f0
, vn
);
1940 vfp_load_reg64(f1
, vm
);
1944 vfp_load_reg64(fd
, vd
);
1946 fn(fd
, f0
, f1
, fpst
);
1947 vfp_store_reg64(fd
, vd
);
1952 /* Set up the operands for the next iteration */
1954 vd
= vfp_advance_dreg(vd
, delta_d
);
1955 vn
= vfp_advance_dreg(vn
, delta_d
);
1956 vfp_load_reg64(f0
, vn
);
1958 vm
= vfp_advance_dreg(vm
, delta_m
);
1959 vfp_load_reg64(f1
, vm
);
1963 tcg_temp_free_i64(f0
);
1964 tcg_temp_free_i64(f1
);
1965 tcg_temp_free_i64(fd
);
1966 tcg_temp_free_ptr(fpst
);
1971 static bool do_vfp_2op_sp(DisasContext
*s
, VFPGen2OpSPFn
*fn
, int vd
, int vm
)
1973 uint32_t delta_m
= 0;
1974 uint32_t delta_d
= 0;
1975 int veclen
= s
->vec_len
;
1978 /* Note that the caller must check the aa32_fpsp_v2 feature. */
1980 if (!dc_isar_feature(aa32_fpshvec
, s
) &&
1981 (veclen
!= 0 || s
->vec_stride
!= 0)) {
1985 if (!vfp_access_check(s
)) {
1990 /* Figure out what type of vector operation this is. */
1991 if (vfp_sreg_is_scalar(vd
)) {
1995 delta_d
= s
->vec_stride
+ 1;
1997 if (vfp_sreg_is_scalar(vm
)) {
1998 /* mixed scalar/vector */
2007 f0
= tcg_temp_new_i32();
2008 fd
= tcg_temp_new_i32();
2010 vfp_load_reg32(f0
, vm
);
2014 vfp_store_reg32(fd
, vd
);
2021 /* single source one-many */
2023 vd
= vfp_advance_sreg(vd
, delta_d
);
2024 vfp_store_reg32(fd
, vd
);
2029 /* Set up the operands for the next iteration */
2031 vd
= vfp_advance_sreg(vd
, delta_d
);
2032 vm
= vfp_advance_sreg(vm
, delta_m
);
2033 vfp_load_reg32(f0
, vm
);
2036 tcg_temp_free_i32(f0
);
2037 tcg_temp_free_i32(fd
);
2042 static bool do_vfp_2op_hp(DisasContext
*s
, VFPGen2OpSPFn
*fn
, int vd
, int vm
)
2045 * Do a half-precision operation. Functionally this is
2046 * the same as do_vfp_2op_sp(), except:
2047 * - it doesn't need the VFP vector handling (fp16 is a
2048 * v8 feature, and in v8 VFP vectors don't exist)
2049 * - it does the aa32_fp16_arith feature test
2053 /* Note that the caller must check the aa32_fp16_arith feature */
2055 if (!dc_isar_feature(aa32_fp16_arith
, s
)) {
2059 if (s
->vec_len
!= 0 || s
->vec_stride
!= 0) {
2063 if (!vfp_access_check(s
)) {
2067 f0
= tcg_temp_new_i32();
2068 vfp_load_reg32(f0
, vm
);
2070 vfp_store_reg32(f0
, vd
);
2071 tcg_temp_free_i32(f0
);
2076 static bool do_vfp_2op_dp(DisasContext
*s
, VFPGen2OpDPFn
*fn
, int vd
, int vm
)
2078 uint32_t delta_m
= 0;
2079 uint32_t delta_d
= 0;
2080 int veclen
= s
->vec_len
;
2083 /* Note that the caller must check the aa32_fpdp_v2 feature. */
2085 /* UNDEF accesses to D16-D31 if they don't exist */
2086 if (!dc_isar_feature(aa32_simd_r32
, s
) && ((vd
| vm
) & 0x10)) {
2090 if (!dc_isar_feature(aa32_fpshvec
, s
) &&
2091 (veclen
!= 0 || s
->vec_stride
!= 0)) {
2095 if (!vfp_access_check(s
)) {
2100 /* Figure out what type of vector operation this is. */
2101 if (vfp_dreg_is_scalar(vd
)) {
2105 delta_d
= (s
->vec_stride
>> 1) + 1;
2107 if (vfp_dreg_is_scalar(vm
)) {
2108 /* mixed scalar/vector */
2117 f0
= tcg_temp_new_i64();
2118 fd
= tcg_temp_new_i64();
2120 vfp_load_reg64(f0
, vm
);
2124 vfp_store_reg64(fd
, vd
);
2131 /* single source one-many */
2133 vd
= vfp_advance_dreg(vd
, delta_d
);
2134 vfp_store_reg64(fd
, vd
);
2139 /* Set up the operands for the next iteration */
2141 vd
= vfp_advance_dreg(vd
, delta_d
);
2142 vd
= vfp_advance_dreg(vm
, delta_m
);
2143 vfp_load_reg64(f0
, vm
);
2146 tcg_temp_free_i64(f0
);
2147 tcg_temp_free_i64(fd
);
2152 static void gen_VMLA_hp(TCGv_i32 vd
, TCGv_i32 vn
, TCGv_i32 vm
, TCGv_ptr fpst
)
2154 /* Note that order of inputs to the add matters for NaNs */
2155 TCGv_i32 tmp
= tcg_temp_new_i32();
2157 gen_helper_vfp_mulh(tmp
, vn
, vm
, fpst
);
2158 gen_helper_vfp_addh(vd
, vd
, tmp
, fpst
);
2159 tcg_temp_free_i32(tmp
);
2162 static bool trans_VMLA_hp(DisasContext
*s
, arg_VMLA_sp
*a
)
2164 return do_vfp_3op_hp(s
, gen_VMLA_hp
, a
->vd
, a
->vn
, a
->vm
, true);
2167 static void gen_VMLA_sp(TCGv_i32 vd
, TCGv_i32 vn
, TCGv_i32 vm
, TCGv_ptr fpst
)
2169 /* Note that order of inputs to the add matters for NaNs */
2170 TCGv_i32 tmp
= tcg_temp_new_i32();
2172 gen_helper_vfp_muls(tmp
, vn
, vm
, fpst
);
2173 gen_helper_vfp_adds(vd
, vd
, tmp
, fpst
);
2174 tcg_temp_free_i32(tmp
);
2177 static bool trans_VMLA_sp(DisasContext
*s
, arg_VMLA_sp
*a
)
2179 return do_vfp_3op_sp(s
, gen_VMLA_sp
, a
->vd
, a
->vn
, a
->vm
, true);
2182 static void gen_VMLA_dp(TCGv_i64 vd
, TCGv_i64 vn
, TCGv_i64 vm
, TCGv_ptr fpst
)
2184 /* Note that order of inputs to the add matters for NaNs */
2185 TCGv_i64 tmp
= tcg_temp_new_i64();
2187 gen_helper_vfp_muld(tmp
, vn
, vm
, fpst
);
2188 gen_helper_vfp_addd(vd
, vd
, tmp
, fpst
);
2189 tcg_temp_free_i64(tmp
);
2192 static bool trans_VMLA_dp(DisasContext
*s
, arg_VMLA_dp
*a
)
2194 return do_vfp_3op_dp(s
, gen_VMLA_dp
, a
->vd
, a
->vn
, a
->vm
, true);
2197 static void gen_VMLS_hp(TCGv_i32 vd
, TCGv_i32 vn
, TCGv_i32 vm
, TCGv_ptr fpst
)
2200 * VMLS: vd = vd + -(vn * vm)
2201 * Note that order of inputs to the add matters for NaNs.
2203 TCGv_i32 tmp
= tcg_temp_new_i32();
2205 gen_helper_vfp_mulh(tmp
, vn
, vm
, fpst
);
2206 gen_helper_vfp_negh(tmp
, tmp
);
2207 gen_helper_vfp_addh(vd
, vd
, tmp
, fpst
);
2208 tcg_temp_free_i32(tmp
);
2211 static bool trans_VMLS_hp(DisasContext
*s
, arg_VMLS_sp
*a
)
2213 return do_vfp_3op_hp(s
, gen_VMLS_hp
, a
->vd
, a
->vn
, a
->vm
, true);
2216 static void gen_VMLS_sp(TCGv_i32 vd
, TCGv_i32 vn
, TCGv_i32 vm
, TCGv_ptr fpst
)
2219 * VMLS: vd = vd + -(vn * vm)
2220 * Note that order of inputs to the add matters for NaNs.
2222 TCGv_i32 tmp
= tcg_temp_new_i32();
2224 gen_helper_vfp_muls(tmp
, vn
, vm
, fpst
);
2225 gen_helper_vfp_negs(tmp
, tmp
);
2226 gen_helper_vfp_adds(vd
, vd
, tmp
, fpst
);
2227 tcg_temp_free_i32(tmp
);
2230 static bool trans_VMLS_sp(DisasContext
*s
, arg_VMLS_sp
*a
)
2232 return do_vfp_3op_sp(s
, gen_VMLS_sp
, a
->vd
, a
->vn
, a
->vm
, true);
2235 static void gen_VMLS_dp(TCGv_i64 vd
, TCGv_i64 vn
, TCGv_i64 vm
, TCGv_ptr fpst
)
2238 * VMLS: vd = vd + -(vn * vm)
2239 * Note that order of inputs to the add matters for NaNs.
2241 TCGv_i64 tmp
= tcg_temp_new_i64();
2243 gen_helper_vfp_muld(tmp
, vn
, vm
, fpst
);
2244 gen_helper_vfp_negd(tmp
, tmp
);
2245 gen_helper_vfp_addd(vd
, vd
, tmp
, fpst
);
2246 tcg_temp_free_i64(tmp
);
2249 static bool trans_VMLS_dp(DisasContext
*s
, arg_VMLS_dp
*a
)
2251 return do_vfp_3op_dp(s
, gen_VMLS_dp
, a
->vd
, a
->vn
, a
->vm
, true);
2254 static void gen_VNMLS_hp(TCGv_i32 vd
, TCGv_i32 vn
, TCGv_i32 vm
, TCGv_ptr fpst
)
2257 * VNMLS: -fd + (fn * fm)
2258 * Note that it isn't valid to replace (-A + B) with (B - A) or similar
2259 * plausible looking simplifications because this will give wrong results
2262 TCGv_i32 tmp
= tcg_temp_new_i32();
2264 gen_helper_vfp_mulh(tmp
, vn
, vm
, fpst
);
2265 gen_helper_vfp_negh(vd
, vd
);
2266 gen_helper_vfp_addh(vd
, vd
, tmp
, fpst
);
2267 tcg_temp_free_i32(tmp
);
2270 static bool trans_VNMLS_hp(DisasContext
*s
, arg_VNMLS_sp
*a
)
2272 return do_vfp_3op_hp(s
, gen_VNMLS_hp
, a
->vd
, a
->vn
, a
->vm
, true);
2275 static void gen_VNMLS_sp(TCGv_i32 vd
, TCGv_i32 vn
, TCGv_i32 vm
, TCGv_ptr fpst
)
2278 * VNMLS: -fd + (fn * fm)
2279 * Note that it isn't valid to replace (-A + B) with (B - A) or similar
2280 * plausible looking simplifications because this will give wrong results
2283 TCGv_i32 tmp
= tcg_temp_new_i32();
2285 gen_helper_vfp_muls(tmp
, vn
, vm
, fpst
);
2286 gen_helper_vfp_negs(vd
, vd
);
2287 gen_helper_vfp_adds(vd
, vd
, tmp
, fpst
);
2288 tcg_temp_free_i32(tmp
);
2291 static bool trans_VNMLS_sp(DisasContext
*s
, arg_VNMLS_sp
*a
)
2293 return do_vfp_3op_sp(s
, gen_VNMLS_sp
, a
->vd
, a
->vn
, a
->vm
, true);
2296 static void gen_VNMLS_dp(TCGv_i64 vd
, TCGv_i64 vn
, TCGv_i64 vm
, TCGv_ptr fpst
)
2299 * VNMLS: -fd + (fn * fm)
2300 * Note that it isn't valid to replace (-A + B) with (B - A) or similar
2301 * plausible looking simplifications because this will give wrong results
2304 TCGv_i64 tmp
= tcg_temp_new_i64();
2306 gen_helper_vfp_muld(tmp
, vn
, vm
, fpst
);
2307 gen_helper_vfp_negd(vd
, vd
);
2308 gen_helper_vfp_addd(vd
, vd
, tmp
, fpst
);
2309 tcg_temp_free_i64(tmp
);
2312 static bool trans_VNMLS_dp(DisasContext
*s
, arg_VNMLS_dp
*a
)
2314 return do_vfp_3op_dp(s
, gen_VNMLS_dp
, a
->vd
, a
->vn
, a
->vm
, true);
2317 static void gen_VNMLA_hp(TCGv_i32 vd
, TCGv_i32 vn
, TCGv_i32 vm
, TCGv_ptr fpst
)
2319 /* VNMLA: -fd + -(fn * fm) */
2320 TCGv_i32 tmp
= tcg_temp_new_i32();
2322 gen_helper_vfp_mulh(tmp
, vn
, vm
, fpst
);
2323 gen_helper_vfp_negh(tmp
, tmp
);
2324 gen_helper_vfp_negh(vd
, vd
);
2325 gen_helper_vfp_addh(vd
, vd
, tmp
, fpst
);
2326 tcg_temp_free_i32(tmp
);
2329 static bool trans_VNMLA_hp(DisasContext
*s
, arg_VNMLA_sp
*a
)
2331 return do_vfp_3op_hp(s
, gen_VNMLA_hp
, a
->vd
, a
->vn
, a
->vm
, true);
2334 static void gen_VNMLA_sp(TCGv_i32 vd
, TCGv_i32 vn
, TCGv_i32 vm
, TCGv_ptr fpst
)
2336 /* VNMLA: -fd + -(fn * fm) */
2337 TCGv_i32 tmp
= tcg_temp_new_i32();
2339 gen_helper_vfp_muls(tmp
, vn
, vm
, fpst
);
2340 gen_helper_vfp_negs(tmp
, tmp
);
2341 gen_helper_vfp_negs(vd
, vd
);
2342 gen_helper_vfp_adds(vd
, vd
, tmp
, fpst
);
2343 tcg_temp_free_i32(tmp
);
2346 static bool trans_VNMLA_sp(DisasContext
*s
, arg_VNMLA_sp
*a
)
2348 return do_vfp_3op_sp(s
, gen_VNMLA_sp
, a
->vd
, a
->vn
, a
->vm
, true);
2351 static void gen_VNMLA_dp(TCGv_i64 vd
, TCGv_i64 vn
, TCGv_i64 vm
, TCGv_ptr fpst
)
2353 /* VNMLA: -fd + (fn * fm) */
2354 TCGv_i64 tmp
= tcg_temp_new_i64();
2356 gen_helper_vfp_muld(tmp
, vn
, vm
, fpst
);
2357 gen_helper_vfp_negd(tmp
, tmp
);
2358 gen_helper_vfp_negd(vd
, vd
);
2359 gen_helper_vfp_addd(vd
, vd
, tmp
, fpst
);
2360 tcg_temp_free_i64(tmp
);
2363 static bool trans_VNMLA_dp(DisasContext
*s
, arg_VNMLA_dp
*a
)
2365 return do_vfp_3op_dp(s
, gen_VNMLA_dp
, a
->vd
, a
->vn
, a
->vm
, true);
2368 static bool trans_VMUL_hp(DisasContext
*s
, arg_VMUL_sp
*a
)
2370 return do_vfp_3op_hp(s
, gen_helper_vfp_mulh
, a
->vd
, a
->vn
, a
->vm
, false);
2373 static bool trans_VMUL_sp(DisasContext
*s
, arg_VMUL_sp
*a
)
2375 return do_vfp_3op_sp(s
, gen_helper_vfp_muls
, a
->vd
, a
->vn
, a
->vm
, false);
2378 static bool trans_VMUL_dp(DisasContext
*s
, arg_VMUL_dp
*a
)
2380 return do_vfp_3op_dp(s
, gen_helper_vfp_muld
, a
->vd
, a
->vn
, a
->vm
, false);
2383 static void gen_VNMUL_hp(TCGv_i32 vd
, TCGv_i32 vn
, TCGv_i32 vm
, TCGv_ptr fpst
)
2385 /* VNMUL: -(fn * fm) */
2386 gen_helper_vfp_mulh(vd
, vn
, vm
, fpst
);
2387 gen_helper_vfp_negh(vd
, vd
);
2390 static bool trans_VNMUL_hp(DisasContext
*s
, arg_VNMUL_sp
*a
)
2392 return do_vfp_3op_hp(s
, gen_VNMUL_hp
, a
->vd
, a
->vn
, a
->vm
, false);
2395 static void gen_VNMUL_sp(TCGv_i32 vd
, TCGv_i32 vn
, TCGv_i32 vm
, TCGv_ptr fpst
)
2397 /* VNMUL: -(fn * fm) */
2398 gen_helper_vfp_muls(vd
, vn
, vm
, fpst
);
2399 gen_helper_vfp_negs(vd
, vd
);
2402 static bool trans_VNMUL_sp(DisasContext
*s
, arg_VNMUL_sp
*a
)
2404 return do_vfp_3op_sp(s
, gen_VNMUL_sp
, a
->vd
, a
->vn
, a
->vm
, false);
2407 static void gen_VNMUL_dp(TCGv_i64 vd
, TCGv_i64 vn
, TCGv_i64 vm
, TCGv_ptr fpst
)
2409 /* VNMUL: -(fn * fm) */
2410 gen_helper_vfp_muld(vd
, vn
, vm
, fpst
);
2411 gen_helper_vfp_negd(vd
, vd
);
2414 static bool trans_VNMUL_dp(DisasContext
*s
, arg_VNMUL_dp
*a
)
2416 return do_vfp_3op_dp(s
, gen_VNMUL_dp
, a
->vd
, a
->vn
, a
->vm
, false);
2419 static bool trans_VADD_hp(DisasContext
*s
, arg_VADD_sp
*a
)
2421 return do_vfp_3op_hp(s
, gen_helper_vfp_addh
, a
->vd
, a
->vn
, a
->vm
, false);
2424 static bool trans_VADD_sp(DisasContext
*s
, arg_VADD_sp
*a
)
2426 return do_vfp_3op_sp(s
, gen_helper_vfp_adds
, a
->vd
, a
->vn
, a
->vm
, false);
2429 static bool trans_VADD_dp(DisasContext
*s
, arg_VADD_dp
*a
)
2431 return do_vfp_3op_dp(s
, gen_helper_vfp_addd
, a
->vd
, a
->vn
, a
->vm
, false);
2434 static bool trans_VSUB_hp(DisasContext
*s
, arg_VSUB_sp
*a
)
2436 return do_vfp_3op_hp(s
, gen_helper_vfp_subh
, a
->vd
, a
->vn
, a
->vm
, false);
2439 static bool trans_VSUB_sp(DisasContext
*s
, arg_VSUB_sp
*a
)
2441 return do_vfp_3op_sp(s
, gen_helper_vfp_subs
, a
->vd
, a
->vn
, a
->vm
, false);
2444 static bool trans_VSUB_dp(DisasContext
*s
, arg_VSUB_dp
*a
)
2446 return do_vfp_3op_dp(s
, gen_helper_vfp_subd
, a
->vd
, a
->vn
, a
->vm
, false);
2449 static bool trans_VDIV_hp(DisasContext
*s
, arg_VDIV_sp
*a
)
2451 return do_vfp_3op_hp(s
, gen_helper_vfp_divh
, a
->vd
, a
->vn
, a
->vm
, false);
2454 static bool trans_VDIV_sp(DisasContext
*s
, arg_VDIV_sp
*a
)
2456 return do_vfp_3op_sp(s
, gen_helper_vfp_divs
, a
->vd
, a
->vn
, a
->vm
, false);
2459 static bool trans_VDIV_dp(DisasContext
*s
, arg_VDIV_dp
*a
)
2461 return do_vfp_3op_dp(s
, gen_helper_vfp_divd
, a
->vd
, a
->vn
, a
->vm
, false);
2464 static bool trans_VMINNM_hp(DisasContext
*s
, arg_VMINNM_sp
*a
)
2466 if (!dc_isar_feature(aa32_vminmaxnm
, s
)) {
2469 return do_vfp_3op_hp(s
, gen_helper_vfp_minnumh
,
2470 a
->vd
, a
->vn
, a
->vm
, false);
2473 static bool trans_VMAXNM_hp(DisasContext
*s
, arg_VMAXNM_sp
*a
)
2475 if (!dc_isar_feature(aa32_vminmaxnm
, s
)) {
2478 return do_vfp_3op_hp(s
, gen_helper_vfp_maxnumh
,
2479 a
->vd
, a
->vn
, a
->vm
, false);
2482 static bool trans_VMINNM_sp(DisasContext
*s
, arg_VMINNM_sp
*a
)
2484 if (!dc_isar_feature(aa32_vminmaxnm
, s
)) {
2487 return do_vfp_3op_sp(s
, gen_helper_vfp_minnums
,
2488 a
->vd
, a
->vn
, a
->vm
, false);
2491 static bool trans_VMAXNM_sp(DisasContext
*s
, arg_VMAXNM_sp
*a
)
2493 if (!dc_isar_feature(aa32_vminmaxnm
, s
)) {
2496 return do_vfp_3op_sp(s
, gen_helper_vfp_maxnums
,
2497 a
->vd
, a
->vn
, a
->vm
, false);
2500 static bool trans_VMINNM_dp(DisasContext
*s
, arg_VMINNM_dp
*a
)
2502 if (!dc_isar_feature(aa32_vminmaxnm
, s
)) {
2505 return do_vfp_3op_dp(s
, gen_helper_vfp_minnumd
,
2506 a
->vd
, a
->vn
, a
->vm
, false);
2509 static bool trans_VMAXNM_dp(DisasContext
*s
, arg_VMAXNM_dp
*a
)
2511 if (!dc_isar_feature(aa32_vminmaxnm
, s
)) {
2514 return do_vfp_3op_dp(s
, gen_helper_vfp_maxnumd
,
2515 a
->vd
, a
->vn
, a
->vm
, false);
2518 static bool do_vfm_hp(DisasContext
*s
, arg_VFMA_sp
*a
, bool neg_n
, bool neg_d
)
2521 * VFNMA : fd = muladd(-fd, fn, fm)
2522 * VFNMS : fd = muladd(-fd, -fn, fm)
2523 * VFMA : fd = muladd( fd, fn, fm)
2524 * VFMS : fd = muladd( fd, -fn, fm)
2526 * These are fused multiply-add, and must be done as one floating
2527 * point operation with no rounding between the multiplication and
2528 * addition steps. NB that doing the negations here as separate
2529 * steps is correct : an input NaN should come out with its sign
2530 * bit flipped if it is a negated-input.
2533 TCGv_i32 vn
, vm
, vd
;
2536 * Present in VFPv4 only, and only with the FP16 extension.
2537 * Note that we can't rely on the SIMDFMAC check alone, because
2538 * in a Neon-no-VFP core that ID register field will be non-zero.
2540 if (!dc_isar_feature(aa32_fp16_arith
, s
) ||
2541 !dc_isar_feature(aa32_simdfmac
, s
) ||
2542 !dc_isar_feature(aa32_fpsp_v2
, s
)) {
2546 if (s
->vec_len
!= 0 || s
->vec_stride
!= 0) {
2550 if (!vfp_access_check(s
)) {
2554 vn
= tcg_temp_new_i32();
2555 vm
= tcg_temp_new_i32();
2556 vd
= tcg_temp_new_i32();
2558 vfp_load_reg32(vn
, a
->vn
);
2559 vfp_load_reg32(vm
, a
->vm
);
2562 gen_helper_vfp_negh(vn
, vn
);
2564 vfp_load_reg32(vd
, a
->vd
);
2567 gen_helper_vfp_negh(vd
, vd
);
2569 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
2570 gen_helper_vfp_muladdh(vd
, vn
, vm
, vd
, fpst
);
2571 vfp_store_reg32(vd
, a
->vd
);
2573 tcg_temp_free_ptr(fpst
);
2574 tcg_temp_free_i32(vn
);
2575 tcg_temp_free_i32(vm
);
2576 tcg_temp_free_i32(vd
);
2581 static bool do_vfm_sp(DisasContext
*s
, arg_VFMA_sp
*a
, bool neg_n
, bool neg_d
)
2584 * VFNMA : fd = muladd(-fd, fn, fm)
2585 * VFNMS : fd = muladd(-fd, -fn, fm)
2586 * VFMA : fd = muladd( fd, fn, fm)
2587 * VFMS : fd = muladd( fd, -fn, fm)
2589 * These are fused multiply-add, and must be done as one floating
2590 * point operation with no rounding between the multiplication and
2591 * addition steps. NB that doing the negations here as separate
2592 * steps is correct : an input NaN should come out with its sign
2593 * bit flipped if it is a negated-input.
2596 TCGv_i32 vn
, vm
, vd
;
2599 * Present in VFPv4 only.
2600 * Note that we can't rely on the SIMDFMAC check alone, because
2601 * in a Neon-no-VFP core that ID register field will be non-zero.
2603 if (!dc_isar_feature(aa32_simdfmac
, s
) ||
2604 !dc_isar_feature(aa32_fpsp_v2
, s
)) {
2608 * In v7A, UNPREDICTABLE with non-zero vector length/stride; from
2609 * v8A, must UNDEF. We choose to UNDEF for both v7A and v8A.
2611 if (s
->vec_len
!= 0 || s
->vec_stride
!= 0) {
2615 if (!vfp_access_check(s
)) {
2619 vn
= tcg_temp_new_i32();
2620 vm
= tcg_temp_new_i32();
2621 vd
= tcg_temp_new_i32();
2623 vfp_load_reg32(vn
, a
->vn
);
2624 vfp_load_reg32(vm
, a
->vm
);
2627 gen_helper_vfp_negs(vn
, vn
);
2629 vfp_load_reg32(vd
, a
->vd
);
2632 gen_helper_vfp_negs(vd
, vd
);
2634 fpst
= fpstatus_ptr(FPST_FPCR
);
2635 gen_helper_vfp_muladds(vd
, vn
, vm
, vd
, fpst
);
2636 vfp_store_reg32(vd
, a
->vd
);
2638 tcg_temp_free_ptr(fpst
);
2639 tcg_temp_free_i32(vn
);
2640 tcg_temp_free_i32(vm
);
2641 tcg_temp_free_i32(vd
);
2646 static bool do_vfm_dp(DisasContext
*s
, arg_VFMA_dp
*a
, bool neg_n
, bool neg_d
)
2649 * VFNMA : fd = muladd(-fd, fn, fm)
2650 * VFNMS : fd = muladd(-fd, -fn, fm)
2651 * VFMA : fd = muladd( fd, fn, fm)
2652 * VFMS : fd = muladd( fd, -fn, fm)
2654 * These are fused multiply-add, and must be done as one floating
2655 * point operation with no rounding between the multiplication and
2656 * addition steps. NB that doing the negations here as separate
2657 * steps is correct : an input NaN should come out with its sign
2658 * bit flipped if it is a negated-input.
2661 TCGv_i64 vn
, vm
, vd
;
2664 * Present in VFPv4 only.
2665 * Note that we can't rely on the SIMDFMAC check alone, because
2666 * in a Neon-no-VFP core that ID register field will be non-zero.
2668 if (!dc_isar_feature(aa32_simdfmac
, s
) ||
2669 !dc_isar_feature(aa32_fpdp_v2
, s
)) {
2673 * In v7A, UNPREDICTABLE with non-zero vector length/stride; from
2674 * v8A, must UNDEF. We choose to UNDEF for both v7A and v8A.
2676 if (s
->vec_len
!= 0 || s
->vec_stride
!= 0) {
2680 /* UNDEF accesses to D16-D31 if they don't exist. */
2681 if (!dc_isar_feature(aa32_simd_r32
, s
) &&
2682 ((a
->vd
| a
->vn
| a
->vm
) & 0x10)) {
2686 if (!vfp_access_check(s
)) {
2690 vn
= tcg_temp_new_i64();
2691 vm
= tcg_temp_new_i64();
2692 vd
= tcg_temp_new_i64();
2694 vfp_load_reg64(vn
, a
->vn
);
2695 vfp_load_reg64(vm
, a
->vm
);
2698 gen_helper_vfp_negd(vn
, vn
);
2700 vfp_load_reg64(vd
, a
->vd
);
2703 gen_helper_vfp_negd(vd
, vd
);
2705 fpst
= fpstatus_ptr(FPST_FPCR
);
2706 gen_helper_vfp_muladdd(vd
, vn
, vm
, vd
, fpst
);
2707 vfp_store_reg64(vd
, a
->vd
);
2709 tcg_temp_free_ptr(fpst
);
2710 tcg_temp_free_i64(vn
);
2711 tcg_temp_free_i64(vm
);
2712 tcg_temp_free_i64(vd
);
2717 #define MAKE_ONE_VFM_TRANS_FN(INSN, PREC, NEGN, NEGD) \
2718 static bool trans_##INSN##_##PREC(DisasContext *s, \
2719 arg_##INSN##_##PREC *a) \
2721 return do_vfm_##PREC(s, a, NEGN, NEGD); \
2724 #define MAKE_VFM_TRANS_FNS(PREC) \
2725 MAKE_ONE_VFM_TRANS_FN(VFMA, PREC, false, false) \
2726 MAKE_ONE_VFM_TRANS_FN(VFMS, PREC, true, false) \
2727 MAKE_ONE_VFM_TRANS_FN(VFNMA, PREC, false, true) \
2728 MAKE_ONE_VFM_TRANS_FN(VFNMS, PREC, true, true)
2730 MAKE_VFM_TRANS_FNS(hp
)
2731 MAKE_VFM_TRANS_FNS(sp
)
2732 MAKE_VFM_TRANS_FNS(dp
)
2734 static bool trans_VMOV_imm_hp(DisasContext
*s
, arg_VMOV_imm_sp
*a
)
2738 if (!dc_isar_feature(aa32_fp16_arith
, s
)) {
2742 if (s
->vec_len
!= 0 || s
->vec_stride
!= 0) {
2746 if (!vfp_access_check(s
)) {
2750 fd
= tcg_const_i32(vfp_expand_imm(MO_16
, a
->imm
));
2751 vfp_store_reg32(fd
, a
->vd
);
2752 tcg_temp_free_i32(fd
);
2756 static bool trans_VMOV_imm_sp(DisasContext
*s
, arg_VMOV_imm_sp
*a
)
2758 uint32_t delta_d
= 0;
2759 int veclen
= s
->vec_len
;
2765 if (!dc_isar_feature(aa32_fpsp_v3
, s
)) {
2769 if (!dc_isar_feature(aa32_fpshvec
, s
) &&
2770 (veclen
!= 0 || s
->vec_stride
!= 0)) {
2774 if (!vfp_access_check(s
)) {
2779 /* Figure out what type of vector operation this is. */
2780 if (vfp_sreg_is_scalar(vd
)) {
2784 delta_d
= s
->vec_stride
+ 1;
2788 fd
= tcg_const_i32(vfp_expand_imm(MO_32
, a
->imm
));
2791 vfp_store_reg32(fd
, vd
);
2797 /* Set up the operands for the next iteration */
2799 vd
= vfp_advance_sreg(vd
, delta_d
);
2802 tcg_temp_free_i32(fd
);
2806 static bool trans_VMOV_imm_dp(DisasContext
*s
, arg_VMOV_imm_dp
*a
)
2808 uint32_t delta_d
= 0;
2809 int veclen
= s
->vec_len
;
2815 if (!dc_isar_feature(aa32_fpdp_v3
, s
)) {
2819 /* UNDEF accesses to D16-D31 if they don't exist. */
2820 if (!dc_isar_feature(aa32_simd_r32
, s
) && (vd
& 0x10)) {
2824 if (!dc_isar_feature(aa32_fpshvec
, s
) &&
2825 (veclen
!= 0 || s
->vec_stride
!= 0)) {
2829 if (!vfp_access_check(s
)) {
2834 /* Figure out what type of vector operation this is. */
2835 if (vfp_dreg_is_scalar(vd
)) {
2839 delta_d
= (s
->vec_stride
>> 1) + 1;
2843 fd
= tcg_const_i64(vfp_expand_imm(MO_64
, a
->imm
));
2846 vfp_store_reg64(fd
, vd
);
2852 /* Set up the operands for the next iteration */
2854 vd
= vfp_advance_dreg(vd
, delta_d
);
2857 tcg_temp_free_i64(fd
);
2861 #define DO_VFP_2OP(INSN, PREC, FN, CHECK) \
2862 static bool trans_##INSN##_##PREC(DisasContext *s, \
2863 arg_##INSN##_##PREC *a) \
2865 if (!dc_isar_feature(CHECK, s)) { \
2868 return do_vfp_2op_##PREC(s, FN, a->vd, a->vm); \
2871 #define DO_VFP_VMOV(INSN, PREC, FN) \
2872 static bool trans_##INSN##_##PREC(DisasContext *s, \
2873 arg_##INSN##_##PREC *a) \
2875 if (!dc_isar_feature(aa32_fp##PREC##_v2, s) && \
2876 !dc_isar_feature(aa32_mve, s)) { \
2879 return do_vfp_2op_##PREC(s, FN, a->vd, a->vm); \
2882 DO_VFP_VMOV(VMOV_reg
, sp
, tcg_gen_mov_i32
)
2883 DO_VFP_VMOV(VMOV_reg
, dp
, tcg_gen_mov_i64
)
2885 DO_VFP_2OP(VABS
, hp
, gen_helper_vfp_absh
, aa32_fp16_arith
)
2886 DO_VFP_2OP(VABS
, sp
, gen_helper_vfp_abss
, aa32_fpsp_v2
)
2887 DO_VFP_2OP(VABS
, dp
, gen_helper_vfp_absd
, aa32_fpdp_v2
)
2889 DO_VFP_2OP(VNEG
, hp
, gen_helper_vfp_negh
, aa32_fp16_arith
)
2890 DO_VFP_2OP(VNEG
, sp
, gen_helper_vfp_negs
, aa32_fpsp_v2
)
2891 DO_VFP_2OP(VNEG
, dp
, gen_helper_vfp_negd
, aa32_fpdp_v2
)
2893 static void gen_VSQRT_hp(TCGv_i32 vd
, TCGv_i32 vm
)
2895 gen_helper_vfp_sqrth(vd
, vm
, cpu_env
);
2898 static void gen_VSQRT_sp(TCGv_i32 vd
, TCGv_i32 vm
)
2900 gen_helper_vfp_sqrts(vd
, vm
, cpu_env
);
2903 static void gen_VSQRT_dp(TCGv_i64 vd
, TCGv_i64 vm
)
2905 gen_helper_vfp_sqrtd(vd
, vm
, cpu_env
);
2908 DO_VFP_2OP(VSQRT
, hp
, gen_VSQRT_hp
, aa32_fp16_arith
)
2909 DO_VFP_2OP(VSQRT
, sp
, gen_VSQRT_sp
, aa32_fpsp_v2
)
2910 DO_VFP_2OP(VSQRT
, dp
, gen_VSQRT_dp
, aa32_fpdp_v2
)
2912 static bool trans_VCMP_hp(DisasContext
*s
, arg_VCMP_sp
*a
)
2916 if (!dc_isar_feature(aa32_fp16_arith
, s
)) {
2920 /* Vm/M bits must be zero for the Z variant */
2921 if (a
->z
&& a
->vm
!= 0) {
2925 if (!vfp_access_check(s
)) {
2929 vd
= tcg_temp_new_i32();
2930 vm
= tcg_temp_new_i32();
2932 vfp_load_reg32(vd
, a
->vd
);
2934 tcg_gen_movi_i32(vm
, 0);
2936 vfp_load_reg32(vm
, a
->vm
);
2940 gen_helper_vfp_cmpeh(vd
, vm
, cpu_env
);
2942 gen_helper_vfp_cmph(vd
, vm
, cpu_env
);
2945 tcg_temp_free_i32(vd
);
2946 tcg_temp_free_i32(vm
);
2951 static bool trans_VCMP_sp(DisasContext
*s
, arg_VCMP_sp
*a
)
2955 if (!dc_isar_feature(aa32_fpsp_v2
, s
)) {
2959 /* Vm/M bits must be zero for the Z variant */
2960 if (a
->z
&& a
->vm
!= 0) {
2964 if (!vfp_access_check(s
)) {
2968 vd
= tcg_temp_new_i32();
2969 vm
= tcg_temp_new_i32();
2971 vfp_load_reg32(vd
, a
->vd
);
2973 tcg_gen_movi_i32(vm
, 0);
2975 vfp_load_reg32(vm
, a
->vm
);
2979 gen_helper_vfp_cmpes(vd
, vm
, cpu_env
);
2981 gen_helper_vfp_cmps(vd
, vm
, cpu_env
);
2984 tcg_temp_free_i32(vd
);
2985 tcg_temp_free_i32(vm
);
2990 static bool trans_VCMP_dp(DisasContext
*s
, arg_VCMP_dp
*a
)
2994 if (!dc_isar_feature(aa32_fpdp_v2
, s
)) {
2998 /* Vm/M bits must be zero for the Z variant */
2999 if (a
->z
&& a
->vm
!= 0) {
3003 /* UNDEF accesses to D16-D31 if they don't exist. */
3004 if (!dc_isar_feature(aa32_simd_r32
, s
) && ((a
->vd
| a
->vm
) & 0x10)) {
3008 if (!vfp_access_check(s
)) {
3012 vd
= tcg_temp_new_i64();
3013 vm
= tcg_temp_new_i64();
3015 vfp_load_reg64(vd
, a
->vd
);
3017 tcg_gen_movi_i64(vm
, 0);
3019 vfp_load_reg64(vm
, a
->vm
);
3023 gen_helper_vfp_cmped(vd
, vm
, cpu_env
);
3025 gen_helper_vfp_cmpd(vd
, vm
, cpu_env
);
3028 tcg_temp_free_i64(vd
);
3029 tcg_temp_free_i64(vm
);
3034 static bool trans_VCVT_f32_f16(DisasContext
*s
, arg_VCVT_f32_f16
*a
)
3040 if (!dc_isar_feature(aa32_fp16_spconv
, s
)) {
3044 if (!vfp_access_check(s
)) {
3048 fpst
= fpstatus_ptr(FPST_FPCR
);
3049 ahp_mode
= get_ahp_flag();
3050 tmp
= tcg_temp_new_i32();
3051 /* The T bit tells us if we want the low or high 16 bits of Vm */
3052 tcg_gen_ld16u_i32(tmp
, cpu_env
, vfp_f16_offset(a
->vm
, a
->t
));
3053 gen_helper_vfp_fcvt_f16_to_f32(tmp
, tmp
, fpst
, ahp_mode
);
3054 vfp_store_reg32(tmp
, a
->vd
);
3055 tcg_temp_free_i32(ahp_mode
);
3056 tcg_temp_free_ptr(fpst
);
3057 tcg_temp_free_i32(tmp
);
3061 static bool trans_VCVT_f64_f16(DisasContext
*s
, arg_VCVT_f64_f16
*a
)
3068 if (!dc_isar_feature(aa32_fpdp_v2
, s
)) {
3072 if (!dc_isar_feature(aa32_fp16_dpconv
, s
)) {
3076 /* UNDEF accesses to D16-D31 if they don't exist. */
3077 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vd
& 0x10)) {
3081 if (!vfp_access_check(s
)) {
3085 fpst
= fpstatus_ptr(FPST_FPCR
);
3086 ahp_mode
= get_ahp_flag();
3087 tmp
= tcg_temp_new_i32();
3088 /* The T bit tells us if we want the low or high 16 bits of Vm */
3089 tcg_gen_ld16u_i32(tmp
, cpu_env
, vfp_f16_offset(a
->vm
, a
->t
));
3090 vd
= tcg_temp_new_i64();
3091 gen_helper_vfp_fcvt_f16_to_f64(vd
, tmp
, fpst
, ahp_mode
);
3092 vfp_store_reg64(vd
, a
->vd
);
3093 tcg_temp_free_i32(ahp_mode
);
3094 tcg_temp_free_ptr(fpst
);
3095 tcg_temp_free_i32(tmp
);
3096 tcg_temp_free_i64(vd
);
3100 static bool trans_VCVT_b16_f32(DisasContext
*s
, arg_VCVT_b16_f32
*a
)
3105 if (!dc_isar_feature(aa32_bf16
, s
)) {
3109 if (!vfp_access_check(s
)) {
3113 fpst
= fpstatus_ptr(FPST_FPCR
);
3114 tmp
= tcg_temp_new_i32();
3116 vfp_load_reg32(tmp
, a
->vm
);
3117 gen_helper_bfcvt(tmp
, tmp
, fpst
);
3118 tcg_gen_st16_i32(tmp
, cpu_env
, vfp_f16_offset(a
->vd
, a
->t
));
3119 tcg_temp_free_ptr(fpst
);
3120 tcg_temp_free_i32(tmp
);
3124 static bool trans_VCVT_f16_f32(DisasContext
*s
, arg_VCVT_f16_f32
*a
)
3130 if (!dc_isar_feature(aa32_fp16_spconv
, s
)) {
3134 if (!vfp_access_check(s
)) {
3138 fpst
= fpstatus_ptr(FPST_FPCR
);
3139 ahp_mode
= get_ahp_flag();
3140 tmp
= tcg_temp_new_i32();
3142 vfp_load_reg32(tmp
, a
->vm
);
3143 gen_helper_vfp_fcvt_f32_to_f16(tmp
, tmp
, fpst
, ahp_mode
);
3144 tcg_gen_st16_i32(tmp
, cpu_env
, vfp_f16_offset(a
->vd
, a
->t
));
3145 tcg_temp_free_i32(ahp_mode
);
3146 tcg_temp_free_ptr(fpst
);
3147 tcg_temp_free_i32(tmp
);
3151 static bool trans_VCVT_f16_f64(DisasContext
*s
, arg_VCVT_f16_f64
*a
)
3158 if (!dc_isar_feature(aa32_fpdp_v2
, s
)) {
3162 if (!dc_isar_feature(aa32_fp16_dpconv
, s
)) {
3166 /* UNDEF accesses to D16-D31 if they don't exist. */
3167 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vm
& 0x10)) {
3171 if (!vfp_access_check(s
)) {
3175 fpst
= fpstatus_ptr(FPST_FPCR
);
3176 ahp_mode
= get_ahp_flag();
3177 tmp
= tcg_temp_new_i32();
3178 vm
= tcg_temp_new_i64();
3180 vfp_load_reg64(vm
, a
->vm
);
3181 gen_helper_vfp_fcvt_f64_to_f16(tmp
, vm
, fpst
, ahp_mode
);
3182 tcg_temp_free_i64(vm
);
3183 tcg_gen_st16_i32(tmp
, cpu_env
, vfp_f16_offset(a
->vd
, a
->t
));
3184 tcg_temp_free_i32(ahp_mode
);
3185 tcg_temp_free_ptr(fpst
);
3186 tcg_temp_free_i32(tmp
);
3190 static bool trans_VRINTR_hp(DisasContext
*s
, arg_VRINTR_sp
*a
)
3195 if (!dc_isar_feature(aa32_fp16_arith
, s
)) {
3199 if (!vfp_access_check(s
)) {
3203 tmp
= tcg_temp_new_i32();
3204 vfp_load_reg32(tmp
, a
->vm
);
3205 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
3206 gen_helper_rinth(tmp
, tmp
, fpst
);
3207 vfp_store_reg32(tmp
, a
->vd
);
3208 tcg_temp_free_ptr(fpst
);
3209 tcg_temp_free_i32(tmp
);
3213 static bool trans_VRINTR_sp(DisasContext
*s
, arg_VRINTR_sp
*a
)
3218 if (!dc_isar_feature(aa32_vrint
, s
)) {
3222 if (!vfp_access_check(s
)) {
3226 tmp
= tcg_temp_new_i32();
3227 vfp_load_reg32(tmp
, a
->vm
);
3228 fpst
= fpstatus_ptr(FPST_FPCR
);
3229 gen_helper_rints(tmp
, tmp
, fpst
);
3230 vfp_store_reg32(tmp
, a
->vd
);
3231 tcg_temp_free_ptr(fpst
);
3232 tcg_temp_free_i32(tmp
);
3236 static bool trans_VRINTR_dp(DisasContext
*s
, arg_VRINTR_dp
*a
)
3241 if (!dc_isar_feature(aa32_fpdp_v2
, s
)) {
3245 if (!dc_isar_feature(aa32_vrint
, s
)) {
3249 /* UNDEF accesses to D16-D31 if they don't exist. */
3250 if (!dc_isar_feature(aa32_simd_r32
, s
) && ((a
->vd
| a
->vm
) & 0x10)) {
3254 if (!vfp_access_check(s
)) {
3258 tmp
= tcg_temp_new_i64();
3259 vfp_load_reg64(tmp
, a
->vm
);
3260 fpst
= fpstatus_ptr(FPST_FPCR
);
3261 gen_helper_rintd(tmp
, tmp
, fpst
);
3262 vfp_store_reg64(tmp
, a
->vd
);
3263 tcg_temp_free_ptr(fpst
);
3264 tcg_temp_free_i64(tmp
);
3268 static bool trans_VRINTZ_hp(DisasContext
*s
, arg_VRINTZ_sp
*a
)
3274 if (!dc_isar_feature(aa32_fp16_arith
, s
)) {
3278 if (!vfp_access_check(s
)) {
3282 tmp
= tcg_temp_new_i32();
3283 vfp_load_reg32(tmp
, a
->vm
);
3284 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
3285 tcg_rmode
= tcg_const_i32(float_round_to_zero
);
3286 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
3287 gen_helper_rinth(tmp
, tmp
, fpst
);
3288 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
3289 vfp_store_reg32(tmp
, a
->vd
);
3290 tcg_temp_free_ptr(fpst
);
3291 tcg_temp_free_i32(tcg_rmode
);
3292 tcg_temp_free_i32(tmp
);
3296 static bool trans_VRINTZ_sp(DisasContext
*s
, arg_VRINTZ_sp
*a
)
3302 if (!dc_isar_feature(aa32_vrint
, s
)) {
3306 if (!vfp_access_check(s
)) {
3310 tmp
= tcg_temp_new_i32();
3311 vfp_load_reg32(tmp
, a
->vm
);
3312 fpst
= fpstatus_ptr(FPST_FPCR
);
3313 tcg_rmode
= tcg_const_i32(float_round_to_zero
);
3314 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
3315 gen_helper_rints(tmp
, tmp
, fpst
);
3316 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
3317 vfp_store_reg32(tmp
, a
->vd
);
3318 tcg_temp_free_ptr(fpst
);
3319 tcg_temp_free_i32(tcg_rmode
);
3320 tcg_temp_free_i32(tmp
);
3324 static bool trans_VRINTZ_dp(DisasContext
*s
, arg_VRINTZ_dp
*a
)
3330 if (!dc_isar_feature(aa32_fpdp_v2
, s
)) {
3334 if (!dc_isar_feature(aa32_vrint
, s
)) {
3338 /* UNDEF accesses to D16-D31 if they don't exist. */
3339 if (!dc_isar_feature(aa32_simd_r32
, s
) && ((a
->vd
| a
->vm
) & 0x10)) {
3343 if (!vfp_access_check(s
)) {
3347 tmp
= tcg_temp_new_i64();
3348 vfp_load_reg64(tmp
, a
->vm
);
3349 fpst
= fpstatus_ptr(FPST_FPCR
);
3350 tcg_rmode
= tcg_const_i32(float_round_to_zero
);
3351 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
3352 gen_helper_rintd(tmp
, tmp
, fpst
);
3353 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
3354 vfp_store_reg64(tmp
, a
->vd
);
3355 tcg_temp_free_ptr(fpst
);
3356 tcg_temp_free_i64(tmp
);
3357 tcg_temp_free_i32(tcg_rmode
);
3361 static bool trans_VRINTX_hp(DisasContext
*s
, arg_VRINTX_sp
*a
)
3366 if (!dc_isar_feature(aa32_fp16_arith
, s
)) {
3370 if (!vfp_access_check(s
)) {
3374 tmp
= tcg_temp_new_i32();
3375 vfp_load_reg32(tmp
, a
->vm
);
3376 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
3377 gen_helper_rinth_exact(tmp
, tmp
, fpst
);
3378 vfp_store_reg32(tmp
, a
->vd
);
3379 tcg_temp_free_ptr(fpst
);
3380 tcg_temp_free_i32(tmp
);
3384 static bool trans_VRINTX_sp(DisasContext
*s
, arg_VRINTX_sp
*a
)
3389 if (!dc_isar_feature(aa32_vrint
, s
)) {
3393 if (!vfp_access_check(s
)) {
3397 tmp
= tcg_temp_new_i32();
3398 vfp_load_reg32(tmp
, a
->vm
);
3399 fpst
= fpstatus_ptr(FPST_FPCR
);
3400 gen_helper_rints_exact(tmp
, tmp
, fpst
);
3401 vfp_store_reg32(tmp
, a
->vd
);
3402 tcg_temp_free_ptr(fpst
);
3403 tcg_temp_free_i32(tmp
);
3407 static bool trans_VRINTX_dp(DisasContext
*s
, arg_VRINTX_dp
*a
)
3412 if (!dc_isar_feature(aa32_fpdp_v2
, s
)) {
3416 if (!dc_isar_feature(aa32_vrint
, s
)) {
3420 /* UNDEF accesses to D16-D31 if they don't exist. */
3421 if (!dc_isar_feature(aa32_simd_r32
, s
) && ((a
->vd
| a
->vm
) & 0x10)) {
3425 if (!vfp_access_check(s
)) {
3429 tmp
= tcg_temp_new_i64();
3430 vfp_load_reg64(tmp
, a
->vm
);
3431 fpst
= fpstatus_ptr(FPST_FPCR
);
3432 gen_helper_rintd_exact(tmp
, tmp
, fpst
);
3433 vfp_store_reg64(tmp
, a
->vd
);
3434 tcg_temp_free_ptr(fpst
);
3435 tcg_temp_free_i64(tmp
);
3439 static bool trans_VCVT_sp(DisasContext
*s
, arg_VCVT_sp
*a
)
3444 if (!dc_isar_feature(aa32_fpdp_v2
, s
)) {
3448 /* UNDEF accesses to D16-D31 if they don't exist. */
3449 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vd
& 0x10)) {
3453 if (!vfp_access_check(s
)) {
3457 vm
= tcg_temp_new_i32();
3458 vd
= tcg_temp_new_i64();
3459 vfp_load_reg32(vm
, a
->vm
);
3460 gen_helper_vfp_fcvtds(vd
, vm
, cpu_env
);
3461 vfp_store_reg64(vd
, a
->vd
);
3462 tcg_temp_free_i32(vm
);
3463 tcg_temp_free_i64(vd
);
3467 static bool trans_VCVT_dp(DisasContext
*s
, arg_VCVT_dp
*a
)
3472 if (!dc_isar_feature(aa32_fpdp_v2
, s
)) {
3476 /* UNDEF accesses to D16-D31 if they don't exist. */
3477 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vm
& 0x10)) {
3481 if (!vfp_access_check(s
)) {
3485 vd
= tcg_temp_new_i32();
3486 vm
= tcg_temp_new_i64();
3487 vfp_load_reg64(vm
, a
->vm
);
3488 gen_helper_vfp_fcvtsd(vd
, vm
, cpu_env
);
3489 vfp_store_reg32(vd
, a
->vd
);
3490 tcg_temp_free_i32(vd
);
3491 tcg_temp_free_i64(vm
);
3495 static bool trans_VCVT_int_hp(DisasContext
*s
, arg_VCVT_int_sp
*a
)
3500 if (!dc_isar_feature(aa32_fp16_arith
, s
)) {
3504 if (!vfp_access_check(s
)) {
3508 vm
= tcg_temp_new_i32();
3509 vfp_load_reg32(vm
, a
->vm
);
3510 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
3513 gen_helper_vfp_sitoh(vm
, vm
, fpst
);
3516 gen_helper_vfp_uitoh(vm
, vm
, fpst
);
3518 vfp_store_reg32(vm
, a
->vd
);
3519 tcg_temp_free_i32(vm
);
3520 tcg_temp_free_ptr(fpst
);
3524 static bool trans_VCVT_int_sp(DisasContext
*s
, arg_VCVT_int_sp
*a
)
3529 if (!dc_isar_feature(aa32_fpsp_v2
, s
)) {
3533 if (!vfp_access_check(s
)) {
3537 vm
= tcg_temp_new_i32();
3538 vfp_load_reg32(vm
, a
->vm
);
3539 fpst
= fpstatus_ptr(FPST_FPCR
);
3542 gen_helper_vfp_sitos(vm
, vm
, fpst
);
3545 gen_helper_vfp_uitos(vm
, vm
, fpst
);
3547 vfp_store_reg32(vm
, a
->vd
);
3548 tcg_temp_free_i32(vm
);
3549 tcg_temp_free_ptr(fpst
);
3553 static bool trans_VCVT_int_dp(DisasContext
*s
, arg_VCVT_int_dp
*a
)
3559 if (!dc_isar_feature(aa32_fpdp_v2
, s
)) {
3563 /* UNDEF accesses to D16-D31 if they don't exist. */
3564 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vd
& 0x10)) {
3568 if (!vfp_access_check(s
)) {
3572 vm
= tcg_temp_new_i32();
3573 vd
= tcg_temp_new_i64();
3574 vfp_load_reg32(vm
, a
->vm
);
3575 fpst
= fpstatus_ptr(FPST_FPCR
);
3578 gen_helper_vfp_sitod(vd
, vm
, fpst
);
3581 gen_helper_vfp_uitod(vd
, vm
, fpst
);
3583 vfp_store_reg64(vd
, a
->vd
);
3584 tcg_temp_free_i32(vm
);
3585 tcg_temp_free_i64(vd
);
3586 tcg_temp_free_ptr(fpst
);
3590 static bool trans_VJCVT(DisasContext
*s
, arg_VJCVT
*a
)
3595 if (!dc_isar_feature(aa32_fpdp_v2
, s
)) {
3599 if (!dc_isar_feature(aa32_jscvt
, s
)) {
3603 /* UNDEF accesses to D16-D31 if they don't exist. */
3604 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vm
& 0x10)) {
3608 if (!vfp_access_check(s
)) {
3612 vm
= tcg_temp_new_i64();
3613 vd
= tcg_temp_new_i32();
3614 vfp_load_reg64(vm
, a
->vm
);
3615 gen_helper_vjcvt(vd
, vm
, cpu_env
);
3616 vfp_store_reg32(vd
, a
->vd
);
3617 tcg_temp_free_i64(vm
);
3618 tcg_temp_free_i32(vd
);
3622 static bool trans_VCVT_fix_hp(DisasContext
*s
, arg_VCVT_fix_sp
*a
)
3628 if (!dc_isar_feature(aa32_fp16_arith
, s
)) {
3632 if (!vfp_access_check(s
)) {
3636 frac_bits
= (a
->opc
& 1) ? (32 - a
->imm
) : (16 - a
->imm
);
3638 vd
= tcg_temp_new_i32();
3639 vfp_load_reg32(vd
, a
->vd
);
3641 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
3642 shift
= tcg_const_i32(frac_bits
);
3644 /* Switch on op:U:sx bits */
3647 gen_helper_vfp_shtoh_round_to_nearest(vd
, vd
, shift
, fpst
);
3650 gen_helper_vfp_sltoh_round_to_nearest(vd
, vd
, shift
, fpst
);
3653 gen_helper_vfp_uhtoh_round_to_nearest(vd
, vd
, shift
, fpst
);
3656 gen_helper_vfp_ultoh_round_to_nearest(vd
, vd
, shift
, fpst
);
3659 gen_helper_vfp_toshh_round_to_zero(vd
, vd
, shift
, fpst
);
3662 gen_helper_vfp_toslh_round_to_zero(vd
, vd
, shift
, fpst
);
3665 gen_helper_vfp_touhh_round_to_zero(vd
, vd
, shift
, fpst
);
3668 gen_helper_vfp_toulh_round_to_zero(vd
, vd
, shift
, fpst
);
3671 g_assert_not_reached();
3674 vfp_store_reg32(vd
, a
->vd
);
3675 tcg_temp_free_i32(vd
);
3676 tcg_temp_free_i32(shift
);
3677 tcg_temp_free_ptr(fpst
);
3681 static bool trans_VCVT_fix_sp(DisasContext
*s
, arg_VCVT_fix_sp
*a
)
3687 if (!dc_isar_feature(aa32_fpsp_v3
, s
)) {
3691 if (!vfp_access_check(s
)) {
3695 frac_bits
= (a
->opc
& 1) ? (32 - a
->imm
) : (16 - a
->imm
);
3697 vd
= tcg_temp_new_i32();
3698 vfp_load_reg32(vd
, a
->vd
);
3700 fpst
= fpstatus_ptr(FPST_FPCR
);
3701 shift
= tcg_const_i32(frac_bits
);
3703 /* Switch on op:U:sx bits */
3706 gen_helper_vfp_shtos_round_to_nearest(vd
, vd
, shift
, fpst
);
3709 gen_helper_vfp_sltos_round_to_nearest(vd
, vd
, shift
, fpst
);
3712 gen_helper_vfp_uhtos_round_to_nearest(vd
, vd
, shift
, fpst
);
3715 gen_helper_vfp_ultos_round_to_nearest(vd
, vd
, shift
, fpst
);
3718 gen_helper_vfp_toshs_round_to_zero(vd
, vd
, shift
, fpst
);
3721 gen_helper_vfp_tosls_round_to_zero(vd
, vd
, shift
, fpst
);
3724 gen_helper_vfp_touhs_round_to_zero(vd
, vd
, shift
, fpst
);
3727 gen_helper_vfp_touls_round_to_zero(vd
, vd
, shift
, fpst
);
3730 g_assert_not_reached();
3733 vfp_store_reg32(vd
, a
->vd
);
3734 tcg_temp_free_i32(vd
);
3735 tcg_temp_free_i32(shift
);
3736 tcg_temp_free_ptr(fpst
);
3740 static bool trans_VCVT_fix_dp(DisasContext
*s
, arg_VCVT_fix_dp
*a
)
3747 if (!dc_isar_feature(aa32_fpdp_v3
, s
)) {
3751 /* UNDEF accesses to D16-D31 if they don't exist. */
3752 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vd
& 0x10)) {
3756 if (!vfp_access_check(s
)) {
3760 frac_bits
= (a
->opc
& 1) ? (32 - a
->imm
) : (16 - a
->imm
);
3762 vd
= tcg_temp_new_i64();
3763 vfp_load_reg64(vd
, a
->vd
);
3765 fpst
= fpstatus_ptr(FPST_FPCR
);
3766 shift
= tcg_const_i32(frac_bits
);
3768 /* Switch on op:U:sx bits */
3771 gen_helper_vfp_shtod_round_to_nearest(vd
, vd
, shift
, fpst
);
3774 gen_helper_vfp_sltod_round_to_nearest(vd
, vd
, shift
, fpst
);
3777 gen_helper_vfp_uhtod_round_to_nearest(vd
, vd
, shift
, fpst
);
3780 gen_helper_vfp_ultod_round_to_nearest(vd
, vd
, shift
, fpst
);
3783 gen_helper_vfp_toshd_round_to_zero(vd
, vd
, shift
, fpst
);
3786 gen_helper_vfp_tosld_round_to_zero(vd
, vd
, shift
, fpst
);
3789 gen_helper_vfp_touhd_round_to_zero(vd
, vd
, shift
, fpst
);
3792 gen_helper_vfp_tould_round_to_zero(vd
, vd
, shift
, fpst
);
3795 g_assert_not_reached();
3798 vfp_store_reg64(vd
, a
->vd
);
3799 tcg_temp_free_i64(vd
);
3800 tcg_temp_free_i32(shift
);
3801 tcg_temp_free_ptr(fpst
);
3805 static bool trans_VCVT_hp_int(DisasContext
*s
, arg_VCVT_sp_int
*a
)
3810 if (!dc_isar_feature(aa32_fp16_arith
, s
)) {
3814 if (!vfp_access_check(s
)) {
3818 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
3819 vm
= tcg_temp_new_i32();
3820 vfp_load_reg32(vm
, a
->vm
);
3824 gen_helper_vfp_tosizh(vm
, vm
, fpst
);
3826 gen_helper_vfp_tosih(vm
, vm
, fpst
);
3830 gen_helper_vfp_touizh(vm
, vm
, fpst
);
3832 gen_helper_vfp_touih(vm
, vm
, fpst
);
3835 vfp_store_reg32(vm
, a
->vd
);
3836 tcg_temp_free_i32(vm
);
3837 tcg_temp_free_ptr(fpst
);
3841 static bool trans_VCVT_sp_int(DisasContext
*s
, arg_VCVT_sp_int
*a
)
3846 if (!dc_isar_feature(aa32_fpsp_v2
, s
)) {
3850 if (!vfp_access_check(s
)) {
3854 fpst
= fpstatus_ptr(FPST_FPCR
);
3855 vm
= tcg_temp_new_i32();
3856 vfp_load_reg32(vm
, a
->vm
);
3860 gen_helper_vfp_tosizs(vm
, vm
, fpst
);
3862 gen_helper_vfp_tosis(vm
, vm
, fpst
);
3866 gen_helper_vfp_touizs(vm
, vm
, fpst
);
3868 gen_helper_vfp_touis(vm
, vm
, fpst
);
3871 vfp_store_reg32(vm
, a
->vd
);
3872 tcg_temp_free_i32(vm
);
3873 tcg_temp_free_ptr(fpst
);
3877 static bool trans_VCVT_dp_int(DisasContext
*s
, arg_VCVT_dp_int
*a
)
3883 if (!dc_isar_feature(aa32_fpdp_v2
, s
)) {
3887 /* UNDEF accesses to D16-D31 if they don't exist. */
3888 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vm
& 0x10)) {
3892 if (!vfp_access_check(s
)) {
3896 fpst
= fpstatus_ptr(FPST_FPCR
);
3897 vm
= tcg_temp_new_i64();
3898 vd
= tcg_temp_new_i32();
3899 vfp_load_reg64(vm
, a
->vm
);
3903 gen_helper_vfp_tosizd(vd
, vm
, fpst
);
3905 gen_helper_vfp_tosid(vd
, vm
, fpst
);
3909 gen_helper_vfp_touizd(vd
, vm
, fpst
);
3911 gen_helper_vfp_touid(vd
, vm
, fpst
);
3914 vfp_store_reg32(vd
, a
->vd
);
3915 tcg_temp_free_i32(vd
);
3916 tcg_temp_free_i64(vm
);
3917 tcg_temp_free_ptr(fpst
);
3921 static bool trans_VINS(DisasContext
*s
, arg_VINS
*a
)
3925 if (!dc_isar_feature(aa32_fp16_arith
, s
)) {
3929 if (s
->vec_len
!= 0 || s
->vec_stride
!= 0) {
3933 if (!vfp_access_check(s
)) {
3937 /* Insert low half of Vm into high half of Vd */
3938 rm
= tcg_temp_new_i32();
3939 rd
= tcg_temp_new_i32();
3940 vfp_load_reg32(rm
, a
->vm
);
3941 vfp_load_reg32(rd
, a
->vd
);
3942 tcg_gen_deposit_i32(rd
, rd
, rm
, 16, 16);
3943 vfp_store_reg32(rd
, a
->vd
);
3944 tcg_temp_free_i32(rm
);
3945 tcg_temp_free_i32(rd
);
3949 static bool trans_VMOVX(DisasContext
*s
, arg_VINS
*a
)
3953 if (!dc_isar_feature(aa32_fp16_arith
, s
)) {
3957 if (s
->vec_len
!= 0 || s
->vec_stride
!= 0) {
3961 if (!vfp_access_check(s
)) {
3965 /* Set Vd to high half of Vm */
3966 rm
= tcg_temp_new_i32();
3967 vfp_load_reg32(rm
, a
->vm
);
3968 tcg_gen_shri_i32(rm
, rm
, 16);
3969 vfp_store_reg32(rm
, a
->vd
);
3970 tcg_temp_free_i32(rm
);