2 * ARM translation: AArch32 VFP instructions
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
7 * Copyright (c) 2019 Linaro, Ltd.
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include "qemu/osdep.h"
24 #include "tcg/tcg-op.h"
25 #include "tcg/tcg-op-gvec.h"
26 #include "exec/exec-all.h"
27 #include "exec/gen-icount.h"
28 #include "translate.h"
29 #include "translate-a32.h"
31 /* Include the generated VFP decoder */
32 #include "decode-vfp.c.inc"
33 #include "decode-vfp-uncond.c.inc"
35 static inline void vfp_load_reg64(TCGv_i64 var
, int reg
)
37 tcg_gen_ld_i64(var
, cpu_env
, vfp_reg_offset(true, reg
));
40 static inline void vfp_store_reg64(TCGv_i64 var
, int reg
)
42 tcg_gen_st_i64(var
, cpu_env
, vfp_reg_offset(true, reg
));
45 static inline void vfp_load_reg32(TCGv_i32 var
, int reg
)
47 tcg_gen_ld_i32(var
, cpu_env
, vfp_reg_offset(false, reg
));
50 static inline void vfp_store_reg32(TCGv_i32 var
, int reg
)
52 tcg_gen_st_i32(var
, cpu_env
, vfp_reg_offset(false, reg
));
56 * The imm8 encodes the sign bit, enough bits to represent an exponent in
57 * the range 01....1xx to 10....0xx, and the most significant 4 bits of
58 * the mantissa; see VFPExpandImm() in the v8 ARM ARM.
60 uint64_t vfp_expand_imm(int size
, uint8_t imm8
)
66 imm
= (extract32(imm8
, 7, 1) ? 0x8000 : 0) |
67 (extract32(imm8
, 6, 1) ? 0x3fc0 : 0x4000) |
68 extract32(imm8
, 0, 6);
72 imm
= (extract32(imm8
, 7, 1) ? 0x8000 : 0) |
73 (extract32(imm8
, 6, 1) ? 0x3e00 : 0x4000) |
74 (extract32(imm8
, 0, 6) << 3);
78 imm
= (extract32(imm8
, 7, 1) ? 0x8000 : 0) |
79 (extract32(imm8
, 6, 1) ? 0x3000 : 0x4000) |
80 (extract32(imm8
, 0, 6) << 6);
83 g_assert_not_reached();
89 * Return the offset of a 16-bit half of the specified VFP single-precision
90 * register. If top is true, returns the top 16 bits; otherwise the bottom
93 static inline long vfp_f16_offset(unsigned reg
, bool top
)
95 long offs
= vfp_reg_offset(false, reg
);
96 #ifdef HOST_WORDS_BIGENDIAN
109 * Generate code for M-profile lazy FP state preservation if needed;
110 * this corresponds to the pseudocode PreserveFPState() function.
112 static void gen_preserve_fp_state(DisasContext
*s
)
116 * Lazy state saving affects external memory and also the NVIC,
117 * so we must mark it as an IO operation for icount (and cause
118 * this to be the last insn in the TB).
120 if (tb_cflags(s
->base
.tb
) & CF_USE_ICOUNT
) {
121 s
->base
.is_jmp
= DISAS_UPDATE_EXIT
;
124 gen_helper_v7m_preserve_fp_state(cpu_env
);
126 * If the preserve_fp_state helper doesn't throw an exception
127 * then it will clear LSPACT; we don't need to repeat this for
128 * any further FP insns in this TB.
130 s
->v7m_lspact
= false;
135 * Check that VFP access is enabled. If it is, do the necessary
136 * M-profile lazy-FP handling and then return true.
137 * If not, emit code to generate an appropriate exception and
139 * The ignore_vfp_enabled argument specifies that we should ignore
140 * whether VFP is enabled via FPEXC[EN]: this should be true for FMXR/FMRX
141 * accesses to FPSID, FPEXC, MVFR0, MVFR1, MVFR2, and false for all other insns.
143 static bool full_vfp_access_check(DisasContext
*s
, bool ignore_vfp_enabled
)
146 /* M-profile handled this earlier, in disas_m_nocp() */
147 assert (!arm_dc_feature(s
, ARM_FEATURE_M
));
148 gen_exception_insn(s
, s
->pc_curr
, EXCP_UDEF
,
149 syn_fp_access_trap(1, 0xe, false),
154 if (!s
->vfp_enabled
&& !ignore_vfp_enabled
) {
155 assert(!arm_dc_feature(s
, ARM_FEATURE_M
));
156 unallocated_encoding(s
);
160 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
161 /* Handle M-profile lazy FP state mechanics */
163 /* Trigger lazy-state preservation if necessary */
164 gen_preserve_fp_state(s
);
166 /* Update ownership of FP context: set FPCCR.S to match current state */
167 if (s
->v8m_fpccr_s_wrong
) {
170 tmp
= load_cpu_field(v7m
.fpccr
[M_REG_S
]);
172 tcg_gen_ori_i32(tmp
, tmp
, R_V7M_FPCCR_S_MASK
);
174 tcg_gen_andi_i32(tmp
, tmp
, ~R_V7M_FPCCR_S_MASK
);
176 store_cpu_field(tmp
, v7m
.fpccr
[M_REG_S
]);
177 /* Don't need to do this for any further FP insns in this TB */
178 s
->v8m_fpccr_s_wrong
= false;
181 if (s
->v7m_new_fp_ctxt_needed
) {
183 * Create new FP context by updating CONTROL.FPCA, CONTROL.SFPA
186 TCGv_i32 control
, fpscr
;
187 uint32_t bits
= R_V7M_CONTROL_FPCA_MASK
;
189 fpscr
= load_cpu_field(v7m
.fpdscr
[s
->v8m_secure
]);
190 gen_helper_vfp_set_fpscr(cpu_env
, fpscr
);
191 tcg_temp_free_i32(fpscr
);
193 * We don't need to arrange to end the TB, because the only
194 * parts of FPSCR which we cache in the TB flags are the VECLEN
195 * and VECSTRIDE, and those don't exist for M-profile.
199 bits
|= R_V7M_CONTROL_SFPA_MASK
;
201 control
= load_cpu_field(v7m
.control
[M_REG_S
]);
202 tcg_gen_ori_i32(control
, control
, bits
);
203 store_cpu_field(control
, v7m
.control
[M_REG_S
]);
204 /* Don't need to do this for any further FP insns in this TB */
205 s
->v7m_new_fp_ctxt_needed
= false;
213 * The most usual kind of VFP access check, for everything except
214 * FMXR/FMRX to the always-available special registers.
216 bool vfp_access_check(DisasContext
*s
)
218 return full_vfp_access_check(s
, false);
221 static bool trans_VSEL(DisasContext
*s
, arg_VSEL
*a
)
226 if (!dc_isar_feature(aa32_vsel
, s
)) {
230 if (sz
== 3 && !dc_isar_feature(aa32_fpdp_v2
, s
)) {
234 if (sz
== 1 && !dc_isar_feature(aa32_fp16_arith
, s
)) {
238 /* UNDEF accesses to D16-D31 if they don't exist */
239 if (sz
== 3 && !dc_isar_feature(aa32_simd_r32
, s
) &&
240 ((a
->vm
| a
->vn
| a
->vd
) & 0x10)) {
248 if (!vfp_access_check(s
)) {
253 TCGv_i64 frn
, frm
, dest
;
254 TCGv_i64 tmp
, zero
, zf
, nf
, vf
;
256 zero
= tcg_const_i64(0);
258 frn
= tcg_temp_new_i64();
259 frm
= tcg_temp_new_i64();
260 dest
= tcg_temp_new_i64();
262 zf
= tcg_temp_new_i64();
263 nf
= tcg_temp_new_i64();
264 vf
= tcg_temp_new_i64();
266 tcg_gen_extu_i32_i64(zf
, cpu_ZF
);
267 tcg_gen_ext_i32_i64(nf
, cpu_NF
);
268 tcg_gen_ext_i32_i64(vf
, cpu_VF
);
270 vfp_load_reg64(frn
, rn
);
271 vfp_load_reg64(frm
, rm
);
274 tcg_gen_movcond_i64(TCG_COND_EQ
, dest
, zf
, zero
,
278 tcg_gen_movcond_i64(TCG_COND_LT
, dest
, vf
, zero
,
281 case 2: /* ge: N == V -> N ^ V == 0 */
282 tmp
= tcg_temp_new_i64();
283 tcg_gen_xor_i64(tmp
, vf
, nf
);
284 tcg_gen_movcond_i64(TCG_COND_GE
, dest
, tmp
, zero
,
286 tcg_temp_free_i64(tmp
);
288 case 3: /* gt: !Z && N == V */
289 tcg_gen_movcond_i64(TCG_COND_NE
, dest
, zf
, zero
,
291 tmp
= tcg_temp_new_i64();
292 tcg_gen_xor_i64(tmp
, vf
, nf
);
293 tcg_gen_movcond_i64(TCG_COND_GE
, dest
, tmp
, zero
,
295 tcg_temp_free_i64(tmp
);
298 vfp_store_reg64(dest
, rd
);
299 tcg_temp_free_i64(frn
);
300 tcg_temp_free_i64(frm
);
301 tcg_temp_free_i64(dest
);
303 tcg_temp_free_i64(zf
);
304 tcg_temp_free_i64(nf
);
305 tcg_temp_free_i64(vf
);
307 tcg_temp_free_i64(zero
);
309 TCGv_i32 frn
, frm
, dest
;
312 zero
= tcg_const_i32(0);
314 frn
= tcg_temp_new_i32();
315 frm
= tcg_temp_new_i32();
316 dest
= tcg_temp_new_i32();
317 vfp_load_reg32(frn
, rn
);
318 vfp_load_reg32(frm
, rm
);
321 tcg_gen_movcond_i32(TCG_COND_EQ
, dest
, cpu_ZF
, zero
,
325 tcg_gen_movcond_i32(TCG_COND_LT
, dest
, cpu_VF
, zero
,
328 case 2: /* ge: N == V -> N ^ V == 0 */
329 tmp
= tcg_temp_new_i32();
330 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
331 tcg_gen_movcond_i32(TCG_COND_GE
, dest
, tmp
, zero
,
333 tcg_temp_free_i32(tmp
);
335 case 3: /* gt: !Z && N == V */
336 tcg_gen_movcond_i32(TCG_COND_NE
, dest
, cpu_ZF
, zero
,
338 tmp
= tcg_temp_new_i32();
339 tcg_gen_xor_i32(tmp
, cpu_VF
, cpu_NF
);
340 tcg_gen_movcond_i32(TCG_COND_GE
, dest
, tmp
, zero
,
342 tcg_temp_free_i32(tmp
);
345 /* For fp16 the top half is always zeroes */
347 tcg_gen_andi_i32(dest
, dest
, 0xffff);
349 vfp_store_reg32(dest
, rd
);
350 tcg_temp_free_i32(frn
);
351 tcg_temp_free_i32(frm
);
352 tcg_temp_free_i32(dest
);
354 tcg_temp_free_i32(zero
);
361 * Table for converting the most common AArch32 encoding of
362 * rounding mode to arm_fprounding order (which matches the
363 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
365 static const uint8_t fp_decode_rm
[] = {
372 static bool trans_VRINT(DisasContext
*s
, arg_VRINT
*a
)
378 int rounding
= fp_decode_rm
[a
->rm
];
380 if (!dc_isar_feature(aa32_vrint
, s
)) {
384 if (sz
== 3 && !dc_isar_feature(aa32_fpdp_v2
, s
)) {
388 if (sz
== 1 && !dc_isar_feature(aa32_fp16_arith
, s
)) {
392 /* UNDEF accesses to D16-D31 if they don't exist */
393 if (sz
== 3 && !dc_isar_feature(aa32_simd_r32
, s
) &&
394 ((a
->vm
| a
->vd
) & 0x10)) {
401 if (!vfp_access_check(s
)) {
406 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
408 fpst
= fpstatus_ptr(FPST_FPCR
);
411 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rounding
));
412 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
417 tcg_op
= tcg_temp_new_i64();
418 tcg_res
= tcg_temp_new_i64();
419 vfp_load_reg64(tcg_op
, rm
);
420 gen_helper_rintd(tcg_res
, tcg_op
, fpst
);
421 vfp_store_reg64(tcg_res
, rd
);
422 tcg_temp_free_i64(tcg_op
);
423 tcg_temp_free_i64(tcg_res
);
427 tcg_op
= tcg_temp_new_i32();
428 tcg_res
= tcg_temp_new_i32();
429 vfp_load_reg32(tcg_op
, rm
);
431 gen_helper_rinth(tcg_res
, tcg_op
, fpst
);
433 gen_helper_rints(tcg_res
, tcg_op
, fpst
);
435 vfp_store_reg32(tcg_res
, rd
);
436 tcg_temp_free_i32(tcg_op
);
437 tcg_temp_free_i32(tcg_res
);
440 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
441 tcg_temp_free_i32(tcg_rmode
);
443 tcg_temp_free_ptr(fpst
);
447 static bool trans_VCVT(DisasContext
*s
, arg_VCVT
*a
)
452 TCGv_i32 tcg_rmode
, tcg_shift
;
453 int rounding
= fp_decode_rm
[a
->rm
];
454 bool is_signed
= a
->op
;
456 if (!dc_isar_feature(aa32_vcvt_dr
, s
)) {
460 if (sz
== 3 && !dc_isar_feature(aa32_fpdp_v2
, s
)) {
464 if (sz
== 1 && !dc_isar_feature(aa32_fp16_arith
, s
)) {
468 /* UNDEF accesses to D16-D31 if they don't exist */
469 if (sz
== 3 && !dc_isar_feature(aa32_simd_r32
, s
) && (a
->vm
& 0x10)) {
476 if (!vfp_access_check(s
)) {
481 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
483 fpst
= fpstatus_ptr(FPST_FPCR
);
486 tcg_shift
= tcg_const_i32(0);
488 tcg_rmode
= tcg_const_i32(arm_rmode_to_sf(rounding
));
489 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
492 TCGv_i64 tcg_double
, tcg_res
;
494 tcg_double
= tcg_temp_new_i64();
495 tcg_res
= tcg_temp_new_i64();
496 tcg_tmp
= tcg_temp_new_i32();
497 vfp_load_reg64(tcg_double
, rm
);
499 gen_helper_vfp_tosld(tcg_res
, tcg_double
, tcg_shift
, fpst
);
501 gen_helper_vfp_tould(tcg_res
, tcg_double
, tcg_shift
, fpst
);
503 tcg_gen_extrl_i64_i32(tcg_tmp
, tcg_res
);
504 vfp_store_reg32(tcg_tmp
, rd
);
505 tcg_temp_free_i32(tcg_tmp
);
506 tcg_temp_free_i64(tcg_res
);
507 tcg_temp_free_i64(tcg_double
);
509 TCGv_i32 tcg_single
, tcg_res
;
510 tcg_single
= tcg_temp_new_i32();
511 tcg_res
= tcg_temp_new_i32();
512 vfp_load_reg32(tcg_single
, rm
);
515 gen_helper_vfp_toslh(tcg_res
, tcg_single
, tcg_shift
, fpst
);
517 gen_helper_vfp_toulh(tcg_res
, tcg_single
, tcg_shift
, fpst
);
521 gen_helper_vfp_tosls(tcg_res
, tcg_single
, tcg_shift
, fpst
);
523 gen_helper_vfp_touls(tcg_res
, tcg_single
, tcg_shift
, fpst
);
526 vfp_store_reg32(tcg_res
, rd
);
527 tcg_temp_free_i32(tcg_res
);
528 tcg_temp_free_i32(tcg_single
);
531 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
532 tcg_temp_free_i32(tcg_rmode
);
534 tcg_temp_free_i32(tcg_shift
);
536 tcg_temp_free_ptr(fpst
);
541 static bool trans_VMOV_to_gp(DisasContext
*s
, arg_VMOV_to_gp
*a
)
543 /* VMOV scalar to general purpose register */
547 * SIZE == MO_32 is a VFP instruction; otherwise NEON. MVE has
548 * all sizes, whether the CPU has fp or not.
550 if (!dc_isar_feature(aa32_mve
, s
)) {
552 ? !dc_isar_feature(aa32_fpsp_v2
, s
)
553 : !arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
558 /* UNDEF accesses to D16-D31 if they don't exist */
559 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vn
& 0x10)) {
563 if (!vfp_access_check(s
)) {
567 tmp
= tcg_temp_new_i32();
568 read_neon_element32(tmp
, a
->vn
, a
->index
, a
->size
| (a
->u
? 0 : MO_SIGN
));
569 store_reg(s
, a
->rt
, tmp
);
574 static bool trans_VMOV_from_gp(DisasContext
*s
, arg_VMOV_from_gp
*a
)
576 /* VMOV general purpose register to scalar */
580 * SIZE == MO_32 is a VFP instruction; otherwise NEON. MVE has
581 * all sizes, whether the CPU has fp or not.
583 if (!dc_isar_feature(aa32_mve
, s
)) {
585 ? !dc_isar_feature(aa32_fpsp_v2
, s
)
586 : !arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
591 /* UNDEF accesses to D16-D31 if they don't exist */
592 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vn
& 0x10)) {
596 if (!vfp_access_check(s
)) {
600 tmp
= load_reg(s
, a
->rt
);
601 write_neon_element32(tmp
, a
->vn
, a
->index
, a
->size
);
602 tcg_temp_free_i32(tmp
);
607 static bool trans_VDUP(DisasContext
*s
, arg_VDUP
*a
)
609 /* VDUP (general purpose register) */
613 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
617 /* UNDEF accesses to D16-D31 if they don't exist */
618 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vn
& 0x10)) {
626 if (a
->q
&& (a
->vn
& 1)) {
630 vec_size
= a
->q
? 16 : 8;
639 if (!vfp_access_check(s
)) {
643 tmp
= load_reg(s
, a
->rt
);
644 tcg_gen_gvec_dup_i32(size
, neon_full_reg_offset(a
->vn
),
645 vec_size
, vec_size
, tmp
);
646 tcg_temp_free_i32(tmp
);
652 * M-profile provides two different sets of instructions that can
653 * access floating point system registers: VMSR/VMRS (which move
654 * to/from a general purpose register) and VLDR/VSTR sysreg (which
655 * move directly to/from memory). In some cases there are also side
656 * effects which must happen after any write to memory (which could
657 * cause an exception). So we implement the common logic for the
658 * sysreg access in gen_M_fp_sysreg_write() and gen_M_fp_sysreg_read(),
659 * which take pointers to callback functions which will perform the
660 * actual "read/write general purpose register" and "read/write
661 * memory" operations.
665 * Emit code to store the sysreg to its final destination; frees the
666 * TCG temp 'value' it is passed.
668 typedef void fp_sysreg_storefn(DisasContext
*s
, void *opaque
, TCGv_i32 value
);
670 * Emit code to load the value to be copied to the sysreg; returns
671 * a new TCG temporary
673 typedef TCGv_i32
fp_sysreg_loadfn(DisasContext
*s
, void *opaque
);
675 /* Common decode/access checks for fp sysreg read/write */
676 typedef enum FPSysRegCheckResult
{
677 FPSysRegCheckFailed
, /* caller should return false */
678 FPSysRegCheckDone
, /* caller should return true */
679 FPSysRegCheckContinue
, /* caller should continue generating code */
680 } FPSysRegCheckResult
;
682 static FPSysRegCheckResult
fp_sysreg_checks(DisasContext
*s
, int regno
)
684 if (!dc_isar_feature(aa32_fpsp_v2
, s
) && !dc_isar_feature(aa32_mve
, s
)) {
685 return FPSysRegCheckFailed
;
690 case QEMU_VFP_FPSCR_NZCV
:
692 case ARM_VFP_FPSCR_NZCVQC
:
693 if (!arm_dc_feature(s
, ARM_FEATURE_V8_1M
)) {
694 return FPSysRegCheckFailed
;
697 case ARM_VFP_FPCXT_S
:
698 case ARM_VFP_FPCXT_NS
:
699 if (!arm_dc_feature(s
, ARM_FEATURE_V8_1M
)) {
700 return FPSysRegCheckFailed
;
702 if (!s
->v8m_secure
) {
703 return FPSysRegCheckFailed
;
708 if (!dc_isar_feature(aa32_mve
, s
)) {
709 return FPSysRegCheckFailed
;
713 return FPSysRegCheckFailed
;
717 * FPCXT_NS is a special case: it has specific handling for
718 * "current FP state is inactive", and must do the PreserveFPState()
719 * but not the usual full set of actions done by ExecuteFPCheck().
720 * So we don't call vfp_access_check() and the callers must handle this.
722 if (regno
!= ARM_VFP_FPCXT_NS
&& !vfp_access_check(s
)) {
723 return FPSysRegCheckDone
;
725 return FPSysRegCheckContinue
;
728 static void gen_branch_fpInactive(DisasContext
*s
, TCGCond cond
,
732 * FPCXT_NS is a special case: it has specific handling for
733 * "current FP state is inactive", and must do the PreserveFPState()
734 * but not the usual full set of actions done by ExecuteFPCheck().
735 * We don't have a TB flag that matches the fpInactive check, so we
736 * do it at runtime as we don't expect FPCXT_NS accesses to be frequent.
738 * Emit code that checks fpInactive and does a conditional
739 * branch to label based on it:
740 * if cond is TCG_COND_NE then branch if fpInactive != 0 (ie if inactive)
741 * if cond is TCG_COND_EQ then branch if fpInactive == 0 (ie if active)
743 assert(cond
== TCG_COND_EQ
|| cond
== TCG_COND_NE
);
745 /* fpInactive = FPCCR_NS.ASPEN == 1 && CONTROL.FPCA == 0 */
746 TCGv_i32 aspen
, fpca
;
747 aspen
= load_cpu_field(v7m
.fpccr
[M_REG_NS
]);
748 fpca
= load_cpu_field(v7m
.control
[M_REG_S
]);
749 tcg_gen_andi_i32(aspen
, aspen
, R_V7M_FPCCR_ASPEN_MASK
);
750 tcg_gen_xori_i32(aspen
, aspen
, R_V7M_FPCCR_ASPEN_MASK
);
751 tcg_gen_andi_i32(fpca
, fpca
, R_V7M_CONTROL_FPCA_MASK
);
752 tcg_gen_or_i32(fpca
, fpca
, aspen
);
753 tcg_gen_brcondi_i32(tcg_invert_cond(cond
), fpca
, 0, label
);
754 tcg_temp_free_i32(aspen
);
755 tcg_temp_free_i32(fpca
);
758 static bool gen_M_fp_sysreg_write(DisasContext
*s
, int regno
,
760 fp_sysreg_loadfn
*loadfn
,
763 /* Do a write to an M-profile floating point system register */
765 TCGLabel
*lab_end
= NULL
;
767 switch (fp_sysreg_checks(s
, regno
)) {
768 case FPSysRegCheckFailed
:
770 case FPSysRegCheckDone
:
772 case FPSysRegCheckContinue
:
778 tmp
= loadfn(s
, opaque
);
779 gen_helper_vfp_set_fpscr(cpu_env
, tmp
);
780 tcg_temp_free_i32(tmp
);
783 case ARM_VFP_FPSCR_NZCVQC
:
786 tmp
= loadfn(s
, opaque
);
788 * TODO: when we implement MVE, write the QC bit.
789 * For non-MVE, QC is RES0.
791 tcg_gen_andi_i32(tmp
, tmp
, FPCR_NZCV_MASK
);
792 fpscr
= load_cpu_field(vfp
.xregs
[ARM_VFP_FPSCR
]);
793 tcg_gen_andi_i32(fpscr
, fpscr
, ~FPCR_NZCV_MASK
);
794 tcg_gen_or_i32(fpscr
, fpscr
, tmp
);
795 store_cpu_field(fpscr
, vfp
.xregs
[ARM_VFP_FPSCR
]);
796 tcg_temp_free_i32(tmp
);
799 case ARM_VFP_FPCXT_NS
:
800 lab_end
= gen_new_label();
801 /* fpInactive case: write is a NOP, so branch to end */
802 gen_branch_fpInactive(s
, TCG_COND_NE
, lab_end
);
803 /* !fpInactive: PreserveFPState(), and reads same as FPCXT_S */
804 gen_preserve_fp_state(s
);
806 case ARM_VFP_FPCXT_S
:
808 TCGv_i32 sfpa
, control
;
810 * Set FPSCR and CONTROL.SFPA from value; the new FPSCR takes
811 * bits [27:0] from value and zeroes bits [31:28].
813 tmp
= loadfn(s
, opaque
);
814 sfpa
= tcg_temp_new_i32();
815 tcg_gen_shri_i32(sfpa
, tmp
, 31);
816 control
= load_cpu_field(v7m
.control
[M_REG_S
]);
817 tcg_gen_deposit_i32(control
, control
, sfpa
,
818 R_V7M_CONTROL_SFPA_SHIFT
, 1);
819 store_cpu_field(control
, v7m
.control
[M_REG_S
]);
820 tcg_gen_andi_i32(tmp
, tmp
, ~FPCR_NZCV_MASK
);
821 gen_helper_vfp_set_fpscr(cpu_env
, tmp
);
822 tcg_temp_free_i32(tmp
);
823 tcg_temp_free_i32(sfpa
);
827 /* Behaves as NOP if not privileged */
831 tmp
= loadfn(s
, opaque
);
832 store_cpu_field(tmp
, v7m
.vpr
);
837 tmp
= loadfn(s
, opaque
);
838 vpr
= load_cpu_field(v7m
.vpr
);
839 tcg_gen_deposit_i32(vpr
, vpr
, tmp
,
840 R_V7M_VPR_P0_SHIFT
, R_V7M_VPR_P0_LENGTH
);
841 store_cpu_field(vpr
, v7m
.vpr
);
842 tcg_temp_free_i32(tmp
);
846 g_assert_not_reached();
849 gen_set_label(lab_end
);
854 static bool gen_M_fp_sysreg_read(DisasContext
*s
, int regno
,
855 fp_sysreg_storefn
*storefn
,
858 /* Do a read from an M-profile floating point system register */
860 TCGLabel
*lab_end
= NULL
;
861 bool lookup_tb
= false;
863 switch (fp_sysreg_checks(s
, regno
)) {
864 case FPSysRegCheckFailed
:
866 case FPSysRegCheckDone
:
868 case FPSysRegCheckContinue
:
874 tmp
= tcg_temp_new_i32();
875 gen_helper_vfp_get_fpscr(tmp
, cpu_env
);
876 storefn(s
, opaque
, tmp
);
878 case ARM_VFP_FPSCR_NZCVQC
:
880 * TODO: MVE has a QC bit, which we probably won't store
881 * in the xregs[] field. For non-MVE, where QC is RES0,
882 * we can just fall through to the FPSCR_NZCV case.
884 case QEMU_VFP_FPSCR_NZCV
:
886 * Read just NZCV; this is a special case to avoid the
887 * helper call for the "VMRS to CPSR.NZCV" insn.
889 tmp
= load_cpu_field(vfp
.xregs
[ARM_VFP_FPSCR
]);
890 tcg_gen_andi_i32(tmp
, tmp
, FPCR_NZCV_MASK
);
891 storefn(s
, opaque
, tmp
);
893 case ARM_VFP_FPCXT_S
:
895 TCGv_i32 control
, sfpa
, fpscr
;
896 /* Bits [27:0] from FPSCR, bit [31] from CONTROL.SFPA */
897 tmp
= tcg_temp_new_i32();
898 sfpa
= tcg_temp_new_i32();
899 gen_helper_vfp_get_fpscr(tmp
, cpu_env
);
900 tcg_gen_andi_i32(tmp
, tmp
, ~FPCR_NZCV_MASK
);
901 control
= load_cpu_field(v7m
.control
[M_REG_S
]);
902 tcg_gen_andi_i32(sfpa
, control
, R_V7M_CONTROL_SFPA_MASK
);
903 tcg_gen_shli_i32(sfpa
, sfpa
, 31 - R_V7M_CONTROL_SFPA_SHIFT
);
904 tcg_gen_or_i32(tmp
, tmp
, sfpa
);
905 tcg_temp_free_i32(sfpa
);
907 * Store result before updating FPSCR etc, in case
908 * it is a memory write which causes an exception.
910 storefn(s
, opaque
, tmp
);
912 * Now we must reset FPSCR from FPDSCR_NS, and clear
913 * CONTROL.SFPA; so we'll end the TB here.
915 tcg_gen_andi_i32(control
, control
, ~R_V7M_CONTROL_SFPA_MASK
);
916 store_cpu_field(control
, v7m
.control
[M_REG_S
]);
917 fpscr
= load_cpu_field(v7m
.fpdscr
[M_REG_NS
]);
918 gen_helper_vfp_set_fpscr(cpu_env
, fpscr
);
919 tcg_temp_free_i32(fpscr
);
923 case ARM_VFP_FPCXT_NS
:
925 TCGv_i32 control
, sfpa
, fpscr
, fpdscr
, zero
;
926 TCGLabel
*lab_active
= gen_new_label();
930 gen_branch_fpInactive(s
, TCG_COND_EQ
, lab_active
);
931 /* fpInactive case: reads as FPDSCR_NS */
932 TCGv_i32 tmp
= load_cpu_field(v7m
.fpdscr
[M_REG_NS
]);
933 storefn(s
, opaque
, tmp
);
934 lab_end
= gen_new_label();
937 gen_set_label(lab_active
);
938 /* !fpInactive: Reads the same as FPCXT_S, but side effects differ */
939 gen_preserve_fp_state(s
);
940 tmp
= tcg_temp_new_i32();
941 sfpa
= tcg_temp_new_i32();
942 fpscr
= tcg_temp_new_i32();
943 gen_helper_vfp_get_fpscr(fpscr
, cpu_env
);
944 tcg_gen_andi_i32(tmp
, fpscr
, ~FPCR_NZCV_MASK
);
945 control
= load_cpu_field(v7m
.control
[M_REG_S
]);
946 tcg_gen_andi_i32(sfpa
, control
, R_V7M_CONTROL_SFPA_MASK
);
947 tcg_gen_shli_i32(sfpa
, sfpa
, 31 - R_V7M_CONTROL_SFPA_SHIFT
);
948 tcg_gen_or_i32(tmp
, tmp
, sfpa
);
949 tcg_temp_free_i32(control
);
950 /* Store result before updating FPSCR, in case it faults */
951 storefn(s
, opaque
, tmp
);
952 /* If SFPA is zero then set FPSCR from FPDSCR_NS */
953 fpdscr
= load_cpu_field(v7m
.fpdscr
[M_REG_NS
]);
954 zero
= tcg_const_i32(0);
955 tcg_gen_movcond_i32(TCG_COND_EQ
, fpscr
, sfpa
, zero
, fpdscr
, fpscr
);
956 gen_helper_vfp_set_fpscr(cpu_env
, fpscr
);
957 tcg_temp_free_i32(zero
);
958 tcg_temp_free_i32(sfpa
);
959 tcg_temp_free_i32(fpdscr
);
960 tcg_temp_free_i32(fpscr
);
964 /* Behaves as NOP if not privileged */
968 tmp
= load_cpu_field(v7m
.vpr
);
969 storefn(s
, opaque
, tmp
);
972 tmp
= load_cpu_field(v7m
.vpr
);
973 tcg_gen_extract_i32(tmp
, tmp
, R_V7M_VPR_P0_SHIFT
, R_V7M_VPR_P0_LENGTH
);
974 storefn(s
, opaque
, tmp
);
977 g_assert_not_reached();
981 gen_set_label(lab_end
);
989 static void fp_sysreg_to_gpr(DisasContext
*s
, void *opaque
, TCGv_i32 value
)
991 arg_VMSR_VMRS
*a
= opaque
;
994 /* Set the 4 flag bits in the CPSR */
996 tcg_temp_free_i32(value
);
998 store_reg(s
, a
->rt
, value
);
1002 static TCGv_i32
gpr_to_fp_sysreg(DisasContext
*s
, void *opaque
)
1004 arg_VMSR_VMRS
*a
= opaque
;
1006 return load_reg(s
, a
->rt
);
1009 static bool gen_M_VMSR_VMRS(DisasContext
*s
, arg_VMSR_VMRS
*a
)
1012 * Accesses to R15 are UNPREDICTABLE; we choose to undef.
1013 * FPSCR -> r15 is a special case which writes to the PSR flags;
1014 * set a->reg to a special value to tell gen_M_fp_sysreg_read()
1015 * we only care about the top 4 bits of FPSCR there.
1018 if (a
->l
&& a
->reg
== ARM_VFP_FPSCR
) {
1019 a
->reg
= QEMU_VFP_FPSCR_NZCV
;
1026 /* VMRS, move FP system register to gp register */
1027 return gen_M_fp_sysreg_read(s
, a
->reg
, fp_sysreg_to_gpr
, a
);
1029 /* VMSR, move gp register to FP system register */
1030 return gen_M_fp_sysreg_write(s
, a
->reg
, gpr_to_fp_sysreg
, a
);
1034 static bool trans_VMSR_VMRS(DisasContext
*s
, arg_VMSR_VMRS
*a
)
1037 bool ignore_vfp_enabled
= false;
1039 if (arm_dc_feature(s
, ARM_FEATURE_M
)) {
1040 return gen_M_VMSR_VMRS(s
, a
);
1043 if (!dc_isar_feature(aa32_fpsp_v2
, s
)) {
1050 * VFPv2 allows access to FPSID from userspace; VFPv3 restricts
1051 * all ID registers to privileged access only.
1053 if (IS_USER(s
) && dc_isar_feature(aa32_fpsp_v3
, s
)) {
1056 ignore_vfp_enabled
= true;
1060 if (IS_USER(s
) || !arm_dc_feature(s
, ARM_FEATURE_MVFR
)) {
1063 ignore_vfp_enabled
= true;
1066 if (IS_USER(s
) || !arm_dc_feature(s
, ARM_FEATURE_V8
)) {
1069 ignore_vfp_enabled
= true;
1077 ignore_vfp_enabled
= true;
1079 case ARM_VFP_FPINST
:
1080 case ARM_VFP_FPINST2
:
1081 /* Not present in VFPv3 */
1082 if (IS_USER(s
) || dc_isar_feature(aa32_fpsp_v3
, s
)) {
1090 if (!full_vfp_access_check(s
, ignore_vfp_enabled
)) {
1095 /* VMRS, move VFP special register to gp register */
1101 if (s
->current_el
== 1) {
1102 TCGv_i32 tcg_reg
, tcg_rt
;
1104 gen_set_condexec(s
);
1105 gen_set_pc_im(s
, s
->pc_curr
);
1106 tcg_reg
= tcg_const_i32(a
->reg
);
1107 tcg_rt
= tcg_const_i32(a
->rt
);
1108 gen_helper_check_hcr_el2_trap(cpu_env
, tcg_rt
, tcg_reg
);
1109 tcg_temp_free_i32(tcg_reg
);
1110 tcg_temp_free_i32(tcg_rt
);
1114 case ARM_VFP_FPINST
:
1115 case ARM_VFP_FPINST2
:
1116 tmp
= load_cpu_field(vfp
.xregs
[a
->reg
]);
1120 tmp
= load_cpu_field(vfp
.xregs
[ARM_VFP_FPSCR
]);
1121 tcg_gen_andi_i32(tmp
, tmp
, FPCR_NZCV_MASK
);
1123 tmp
= tcg_temp_new_i32();
1124 gen_helper_vfp_get_fpscr(tmp
, cpu_env
);
1128 g_assert_not_reached();
1132 /* Set the 4 flag bits in the CPSR. */
1134 tcg_temp_free_i32(tmp
);
1136 store_reg(s
, a
->rt
, tmp
);
1139 /* VMSR, move gp register to VFP special register */
1145 /* Writes are ignored. */
1148 tmp
= load_reg(s
, a
->rt
);
1149 gen_helper_vfp_set_fpscr(cpu_env
, tmp
);
1150 tcg_temp_free_i32(tmp
);
1155 * TODO: VFP subarchitecture support.
1156 * For now, keep the EN bit only
1158 tmp
= load_reg(s
, a
->rt
);
1159 tcg_gen_andi_i32(tmp
, tmp
, 1 << 30);
1160 store_cpu_field(tmp
, vfp
.xregs
[a
->reg
]);
1163 case ARM_VFP_FPINST
:
1164 case ARM_VFP_FPINST2
:
1165 tmp
= load_reg(s
, a
->rt
);
1166 store_cpu_field(tmp
, vfp
.xregs
[a
->reg
]);
1169 g_assert_not_reached();
1176 static void fp_sysreg_to_memory(DisasContext
*s
, void *opaque
, TCGv_i32 value
)
1178 arg_vldr_sysreg
*a
= opaque
;
1179 uint32_t offset
= a
->imm
;
1186 addr
= load_reg(s
, a
->rn
);
1188 tcg_gen_addi_i32(addr
, addr
, offset
);
1191 if (s
->v8m_stackcheck
&& a
->rn
== 13 && a
->w
) {
1192 gen_helper_v8m_stackcheck(cpu_env
, addr
);
1195 gen_aa32_st_i32(s
, value
, addr
, get_mem_index(s
),
1196 MO_UL
| MO_ALIGN
| s
->be_data
);
1197 tcg_temp_free_i32(value
);
1202 tcg_gen_addi_i32(addr
, addr
, offset
);
1204 store_reg(s
, a
->rn
, addr
);
1206 tcg_temp_free_i32(addr
);
1210 static TCGv_i32
memory_to_fp_sysreg(DisasContext
*s
, void *opaque
)
1212 arg_vldr_sysreg
*a
= opaque
;
1213 uint32_t offset
= a
->imm
;
1215 TCGv_i32 value
= tcg_temp_new_i32();
1221 addr
= load_reg(s
, a
->rn
);
1223 tcg_gen_addi_i32(addr
, addr
, offset
);
1226 if (s
->v8m_stackcheck
&& a
->rn
== 13 && a
->w
) {
1227 gen_helper_v8m_stackcheck(cpu_env
, addr
);
1230 gen_aa32_ld_i32(s
, value
, addr
, get_mem_index(s
),
1231 MO_UL
| MO_ALIGN
| s
->be_data
);
1236 tcg_gen_addi_i32(addr
, addr
, offset
);
1238 store_reg(s
, a
->rn
, addr
);
1240 tcg_temp_free_i32(addr
);
1245 static bool trans_VLDR_sysreg(DisasContext
*s
, arg_vldr_sysreg
*a
)
1247 if (!arm_dc_feature(s
, ARM_FEATURE_V8_1M
)) {
1253 return gen_M_fp_sysreg_write(s
, a
->reg
, memory_to_fp_sysreg
, a
);
1256 static bool trans_VSTR_sysreg(DisasContext
*s
, arg_vldr_sysreg
*a
)
1258 if (!arm_dc_feature(s
, ARM_FEATURE_V8_1M
)) {
1264 return gen_M_fp_sysreg_read(s
, a
->reg
, fp_sysreg_to_memory
, a
);
1267 static bool trans_VMOV_half(DisasContext
*s
, arg_VMOV_single
*a
)
1271 if (!dc_isar_feature(aa32_fp16_arith
, s
)) {
1276 /* UNPREDICTABLE; we choose to UNDEF */
1280 if (!vfp_access_check(s
)) {
1285 /* VFP to general purpose register */
1286 tmp
= tcg_temp_new_i32();
1287 vfp_load_reg32(tmp
, a
->vn
);
1288 tcg_gen_andi_i32(tmp
, tmp
, 0xffff);
1289 store_reg(s
, a
->rt
, tmp
);
1291 /* general purpose register to VFP */
1292 tmp
= load_reg(s
, a
->rt
);
1293 tcg_gen_andi_i32(tmp
, tmp
, 0xffff);
1294 vfp_store_reg32(tmp
, a
->vn
);
1295 tcg_temp_free_i32(tmp
);
1301 static bool trans_VMOV_single(DisasContext
*s
, arg_VMOV_single
*a
)
1305 if (!dc_isar_feature(aa32_fpsp_v2
, s
) && !dc_isar_feature(aa32_mve
, s
)) {
1309 if (!vfp_access_check(s
)) {
1314 /* VFP to general purpose register */
1315 tmp
= tcg_temp_new_i32();
1316 vfp_load_reg32(tmp
, a
->vn
);
1318 /* Set the 4 flag bits in the CPSR. */
1320 tcg_temp_free_i32(tmp
);
1322 store_reg(s
, a
->rt
, tmp
);
1325 /* general purpose register to VFP */
1326 tmp
= load_reg(s
, a
->rt
);
1327 vfp_store_reg32(tmp
, a
->vn
);
1328 tcg_temp_free_i32(tmp
);
1334 static bool trans_VMOV_64_sp(DisasContext
*s
, arg_VMOV_64_sp
*a
)
1338 if (!dc_isar_feature(aa32_fpsp_v2
, s
) && !dc_isar_feature(aa32_mve
, s
)) {
1343 * VMOV between two general-purpose registers and two single precision
1344 * floating point registers
1346 if (!vfp_access_check(s
)) {
1351 /* fpreg to gpreg */
1352 tmp
= tcg_temp_new_i32();
1353 vfp_load_reg32(tmp
, a
->vm
);
1354 store_reg(s
, a
->rt
, tmp
);
1355 tmp
= tcg_temp_new_i32();
1356 vfp_load_reg32(tmp
, a
->vm
+ 1);
1357 store_reg(s
, a
->rt2
, tmp
);
1359 /* gpreg to fpreg */
1360 tmp
= load_reg(s
, a
->rt
);
1361 vfp_store_reg32(tmp
, a
->vm
);
1362 tcg_temp_free_i32(tmp
);
1363 tmp
= load_reg(s
, a
->rt2
);
1364 vfp_store_reg32(tmp
, a
->vm
+ 1);
1365 tcg_temp_free_i32(tmp
);
1371 static bool trans_VMOV_64_dp(DisasContext
*s
, arg_VMOV_64_dp
*a
)
1376 * VMOV between two general-purpose registers and one double precision
1377 * floating point register. Note that this does not require support
1378 * for double precision arithmetic.
1380 if (!dc_isar_feature(aa32_fpsp_v2
, s
) && !dc_isar_feature(aa32_mve
, s
)) {
1384 /* UNDEF accesses to D16-D31 if they don't exist */
1385 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vm
& 0x10)) {
1389 if (!vfp_access_check(s
)) {
1394 /* fpreg to gpreg */
1395 tmp
= tcg_temp_new_i32();
1396 vfp_load_reg32(tmp
, a
->vm
* 2);
1397 store_reg(s
, a
->rt
, tmp
);
1398 tmp
= tcg_temp_new_i32();
1399 vfp_load_reg32(tmp
, a
->vm
* 2 + 1);
1400 store_reg(s
, a
->rt2
, tmp
);
1402 /* gpreg to fpreg */
1403 tmp
= load_reg(s
, a
->rt
);
1404 vfp_store_reg32(tmp
, a
->vm
* 2);
1405 tcg_temp_free_i32(tmp
);
1406 tmp
= load_reg(s
, a
->rt2
);
1407 vfp_store_reg32(tmp
, a
->vm
* 2 + 1);
1408 tcg_temp_free_i32(tmp
);
1414 static bool trans_VLDR_VSTR_hp(DisasContext
*s
, arg_VLDR_VSTR_sp
*a
)
1419 if (!dc_isar_feature(aa32_fpsp_v2
, s
) && !dc_isar_feature(aa32_mve
, s
)) {
1423 if (!vfp_access_check(s
)) {
1427 /* imm8 field is offset/2 for fp16, unlike fp32 and fp64 */
1428 offset
= a
->imm
<< 1;
1433 /* For thumb, use of PC is UNPREDICTABLE. */
1434 addr
= add_reg_for_lit(s
, a
->rn
, offset
);
1435 tmp
= tcg_temp_new_i32();
1437 gen_aa32_ld_i32(s
, tmp
, addr
, get_mem_index(s
), MO_UW
| MO_ALIGN
);
1438 vfp_store_reg32(tmp
, a
->vd
);
1440 vfp_load_reg32(tmp
, a
->vd
);
1441 gen_aa32_st_i32(s
, tmp
, addr
, get_mem_index(s
), MO_UW
| MO_ALIGN
);
1443 tcg_temp_free_i32(tmp
);
1444 tcg_temp_free_i32(addr
);
1449 static bool trans_VLDR_VSTR_sp(DisasContext
*s
, arg_VLDR_VSTR_sp
*a
)
1454 if (!dc_isar_feature(aa32_fpsp_v2
, s
) && !dc_isar_feature(aa32_mve
, s
)) {
1458 if (!vfp_access_check(s
)) {
1462 offset
= a
->imm
<< 2;
1467 /* For thumb, use of PC is UNPREDICTABLE. */
1468 addr
= add_reg_for_lit(s
, a
->rn
, offset
);
1469 tmp
= tcg_temp_new_i32();
1471 gen_aa32_ld_i32(s
, tmp
, addr
, get_mem_index(s
), MO_UL
| MO_ALIGN
);
1472 vfp_store_reg32(tmp
, a
->vd
);
1474 vfp_load_reg32(tmp
, a
->vd
);
1475 gen_aa32_st_i32(s
, tmp
, addr
, get_mem_index(s
), MO_UL
| MO_ALIGN
);
1477 tcg_temp_free_i32(tmp
);
1478 tcg_temp_free_i32(addr
);
1483 static bool trans_VLDR_VSTR_dp(DisasContext
*s
, arg_VLDR_VSTR_dp
*a
)
1489 /* Note that this does not require support for double arithmetic. */
1490 if (!dc_isar_feature(aa32_fpsp_v2
, s
) && !dc_isar_feature(aa32_mve
, s
)) {
1494 /* UNDEF accesses to D16-D31 if they don't exist */
1495 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vd
& 0x10)) {
1499 if (!vfp_access_check(s
)) {
1503 offset
= a
->imm
<< 2;
1508 /* For thumb, use of PC is UNPREDICTABLE. */
1509 addr
= add_reg_for_lit(s
, a
->rn
, offset
);
1510 tmp
= tcg_temp_new_i64();
1512 gen_aa32_ld_i64(s
, tmp
, addr
, get_mem_index(s
), MO_Q
| MO_ALIGN_4
);
1513 vfp_store_reg64(tmp
, a
->vd
);
1515 vfp_load_reg64(tmp
, a
->vd
);
1516 gen_aa32_st_i64(s
, tmp
, addr
, get_mem_index(s
), MO_Q
| MO_ALIGN_4
);
1518 tcg_temp_free_i64(tmp
);
1519 tcg_temp_free_i32(addr
);
1524 static bool trans_VLDM_VSTM_sp(DisasContext
*s
, arg_VLDM_VSTM_sp
*a
)
1530 if (!dc_isar_feature(aa32_fpsp_v2
, s
) && !dc_isar_feature(aa32_mve
, s
)) {
1536 if (n
== 0 || (a
->vd
+ n
) > 32) {
1538 * UNPREDICTABLE cases for bad immediates: we choose to
1539 * UNDEF to avoid generating huge numbers of TCG ops
1543 if (a
->rn
== 15 && a
->w
) {
1544 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
1548 if (!vfp_access_check(s
)) {
1552 /* For thumb, use of PC is UNPREDICTABLE. */
1553 addr
= add_reg_for_lit(s
, a
->rn
, 0);
1556 tcg_gen_addi_i32(addr
, addr
, -(a
->imm
<< 2));
1559 if (s
->v8m_stackcheck
&& a
->rn
== 13 && a
->w
) {
1561 * Here 'addr' is the lowest address we will store to,
1562 * and is either the old SP (if post-increment) or
1563 * the new SP (if pre-decrement). For post-increment
1564 * where the old value is below the limit and the new
1565 * value is above, it is UNKNOWN whether the limit check
1566 * triggers; we choose to trigger.
1568 gen_helper_v8m_stackcheck(cpu_env
, addr
);
1572 tmp
= tcg_temp_new_i32();
1573 for (i
= 0; i
< n
; i
++) {
1576 gen_aa32_ld_i32(s
, tmp
, addr
, get_mem_index(s
), MO_UL
| MO_ALIGN
);
1577 vfp_store_reg32(tmp
, a
->vd
+ i
);
1580 vfp_load_reg32(tmp
, a
->vd
+ i
);
1581 gen_aa32_st_i32(s
, tmp
, addr
, get_mem_index(s
), MO_UL
| MO_ALIGN
);
1583 tcg_gen_addi_i32(addr
, addr
, offset
);
1585 tcg_temp_free_i32(tmp
);
1589 offset
= -offset
* n
;
1590 tcg_gen_addi_i32(addr
, addr
, offset
);
1592 store_reg(s
, a
->rn
, addr
);
1594 tcg_temp_free_i32(addr
);
1600 static bool trans_VLDM_VSTM_dp(DisasContext
*s
, arg_VLDM_VSTM_dp
*a
)
1607 /* Note that this does not require support for double arithmetic. */
1608 if (!dc_isar_feature(aa32_fpsp_v2
, s
) && !dc_isar_feature(aa32_mve
, s
)) {
1614 if (n
== 0 || (a
->vd
+ n
) > 32 || n
> 16) {
1616 * UNPREDICTABLE cases for bad immediates: we choose to
1617 * UNDEF to avoid generating huge numbers of TCG ops
1621 if (a
->rn
== 15 && a
->w
) {
1622 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
1626 /* UNDEF accesses to D16-D31 if they don't exist */
1627 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vd
+ n
) > 16) {
1631 if (!vfp_access_check(s
)) {
1635 /* For thumb, use of PC is UNPREDICTABLE. */
1636 addr
= add_reg_for_lit(s
, a
->rn
, 0);
1639 tcg_gen_addi_i32(addr
, addr
, -(a
->imm
<< 2));
1642 if (s
->v8m_stackcheck
&& a
->rn
== 13 && a
->w
) {
1644 * Here 'addr' is the lowest address we will store to,
1645 * and is either the old SP (if post-increment) or
1646 * the new SP (if pre-decrement). For post-increment
1647 * where the old value is below the limit and the new
1648 * value is above, it is UNKNOWN whether the limit check
1649 * triggers; we choose to trigger.
1651 gen_helper_v8m_stackcheck(cpu_env
, addr
);
1655 tmp
= tcg_temp_new_i64();
1656 for (i
= 0; i
< n
; i
++) {
1659 gen_aa32_ld_i64(s
, tmp
, addr
, get_mem_index(s
), MO_Q
| MO_ALIGN_4
);
1660 vfp_store_reg64(tmp
, a
->vd
+ i
);
1663 vfp_load_reg64(tmp
, a
->vd
+ i
);
1664 gen_aa32_st_i64(s
, tmp
, addr
, get_mem_index(s
), MO_Q
| MO_ALIGN_4
);
1666 tcg_gen_addi_i32(addr
, addr
, offset
);
1668 tcg_temp_free_i64(tmp
);
1672 offset
= -offset
* n
;
1673 } else if (a
->imm
& 1) {
1680 tcg_gen_addi_i32(addr
, addr
, offset
);
1682 store_reg(s
, a
->rn
, addr
);
1684 tcg_temp_free_i32(addr
);
1691 * Types for callbacks for do_vfp_3op_sp() and do_vfp_3op_dp().
1692 * The callback should emit code to write a value to vd. If
1693 * do_vfp_3op_{sp,dp}() was passed reads_vd then the TCGv vd
1694 * will contain the old value of the relevant VFP register;
1695 * otherwise it must be written to only.
1697 typedef void VFPGen3OpSPFn(TCGv_i32 vd
,
1698 TCGv_i32 vn
, TCGv_i32 vm
, TCGv_ptr fpst
);
1699 typedef void VFPGen3OpDPFn(TCGv_i64 vd
,
1700 TCGv_i64 vn
, TCGv_i64 vm
, TCGv_ptr fpst
);
1703 * Types for callbacks for do_vfp_2op_sp() and do_vfp_2op_dp().
1704 * The callback should emit code to write a value to vd (which
1705 * should be written to only).
1707 typedef void VFPGen2OpSPFn(TCGv_i32 vd
, TCGv_i32 vm
);
1708 typedef void VFPGen2OpDPFn(TCGv_i64 vd
, TCGv_i64 vm
);
1711 * Return true if the specified S reg is in a scalar bank
1712 * (ie if it is s0..s7)
1714 static inline bool vfp_sreg_is_scalar(int reg
)
1716 return (reg
& 0x18) == 0;
1720 * Return true if the specified D reg is in a scalar bank
1721 * (ie if it is d0..d3 or d16..d19)
1723 static inline bool vfp_dreg_is_scalar(int reg
)
1725 return (reg
& 0xc) == 0;
1729 * Advance the S reg number forwards by delta within its bank
1730 * (ie increment the low 3 bits but leave the rest the same)
1732 static inline int vfp_advance_sreg(int reg
, int delta
)
1734 return ((reg
+ delta
) & 0x7) | (reg
& ~0x7);
1738 * Advance the D reg number forwards by delta within its bank
1739 * (ie increment the low 2 bits but leave the rest the same)
1741 static inline int vfp_advance_dreg(int reg
, int delta
)
1743 return ((reg
+ delta
) & 0x3) | (reg
& ~0x3);
1747 * Perform a 3-operand VFP data processing instruction. fn is the
1748 * callback to do the actual operation; this function deals with the
1749 * code to handle looping around for VFP vector processing.
1751 static bool do_vfp_3op_sp(DisasContext
*s
, VFPGen3OpSPFn
*fn
,
1752 int vd
, int vn
, int vm
, bool reads_vd
)
1754 uint32_t delta_m
= 0;
1755 uint32_t delta_d
= 0;
1756 int veclen
= s
->vec_len
;
1757 TCGv_i32 f0
, f1
, fd
;
1760 if (!dc_isar_feature(aa32_fpsp_v2
, s
)) {
1764 if (!dc_isar_feature(aa32_fpshvec
, s
) &&
1765 (veclen
!= 0 || s
->vec_stride
!= 0)) {
1769 if (!vfp_access_check(s
)) {
1774 /* Figure out what type of vector operation this is. */
1775 if (vfp_sreg_is_scalar(vd
)) {
1779 delta_d
= s
->vec_stride
+ 1;
1781 if (vfp_sreg_is_scalar(vm
)) {
1782 /* mixed scalar/vector */
1791 f0
= tcg_temp_new_i32();
1792 f1
= tcg_temp_new_i32();
1793 fd
= tcg_temp_new_i32();
1794 fpst
= fpstatus_ptr(FPST_FPCR
);
1796 vfp_load_reg32(f0
, vn
);
1797 vfp_load_reg32(f1
, vm
);
1801 vfp_load_reg32(fd
, vd
);
1803 fn(fd
, f0
, f1
, fpst
);
1804 vfp_store_reg32(fd
, vd
);
1810 /* Set up the operands for the next iteration */
1812 vd
= vfp_advance_sreg(vd
, delta_d
);
1813 vn
= vfp_advance_sreg(vn
, delta_d
);
1814 vfp_load_reg32(f0
, vn
);
1816 vm
= vfp_advance_sreg(vm
, delta_m
);
1817 vfp_load_reg32(f1
, vm
);
1821 tcg_temp_free_i32(f0
);
1822 tcg_temp_free_i32(f1
);
1823 tcg_temp_free_i32(fd
);
1824 tcg_temp_free_ptr(fpst
);
1829 static bool do_vfp_3op_hp(DisasContext
*s
, VFPGen3OpSPFn
*fn
,
1830 int vd
, int vn
, int vm
, bool reads_vd
)
1833 * Do a half-precision operation. Functionally this is
1834 * the same as do_vfp_3op_sp(), except:
1835 * - it uses the FPST_FPCR_F16
1836 * - it doesn't need the VFP vector handling (fp16 is a
1837 * v8 feature, and in v8 VFP vectors don't exist)
1838 * - it does the aa32_fp16_arith feature test
1840 TCGv_i32 f0
, f1
, fd
;
1843 if (!dc_isar_feature(aa32_fp16_arith
, s
)) {
1847 if (s
->vec_len
!= 0 || s
->vec_stride
!= 0) {
1851 if (!vfp_access_check(s
)) {
1855 f0
= tcg_temp_new_i32();
1856 f1
= tcg_temp_new_i32();
1857 fd
= tcg_temp_new_i32();
1858 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
1860 vfp_load_reg32(f0
, vn
);
1861 vfp_load_reg32(f1
, vm
);
1864 vfp_load_reg32(fd
, vd
);
1866 fn(fd
, f0
, f1
, fpst
);
1867 vfp_store_reg32(fd
, vd
);
1869 tcg_temp_free_i32(f0
);
1870 tcg_temp_free_i32(f1
);
1871 tcg_temp_free_i32(fd
);
1872 tcg_temp_free_ptr(fpst
);
1877 static bool do_vfp_3op_dp(DisasContext
*s
, VFPGen3OpDPFn
*fn
,
1878 int vd
, int vn
, int vm
, bool reads_vd
)
1880 uint32_t delta_m
= 0;
1881 uint32_t delta_d
= 0;
1882 int veclen
= s
->vec_len
;
1883 TCGv_i64 f0
, f1
, fd
;
1886 if (!dc_isar_feature(aa32_fpdp_v2
, s
)) {
1890 /* UNDEF accesses to D16-D31 if they don't exist */
1891 if (!dc_isar_feature(aa32_simd_r32
, s
) && ((vd
| vn
| vm
) & 0x10)) {
1895 if (!dc_isar_feature(aa32_fpshvec
, s
) &&
1896 (veclen
!= 0 || s
->vec_stride
!= 0)) {
1900 if (!vfp_access_check(s
)) {
1905 /* Figure out what type of vector operation this is. */
1906 if (vfp_dreg_is_scalar(vd
)) {
1910 delta_d
= (s
->vec_stride
>> 1) + 1;
1912 if (vfp_dreg_is_scalar(vm
)) {
1913 /* mixed scalar/vector */
1922 f0
= tcg_temp_new_i64();
1923 f1
= tcg_temp_new_i64();
1924 fd
= tcg_temp_new_i64();
1925 fpst
= fpstatus_ptr(FPST_FPCR
);
1927 vfp_load_reg64(f0
, vn
);
1928 vfp_load_reg64(f1
, vm
);
1932 vfp_load_reg64(fd
, vd
);
1934 fn(fd
, f0
, f1
, fpst
);
1935 vfp_store_reg64(fd
, vd
);
1940 /* Set up the operands for the next iteration */
1942 vd
= vfp_advance_dreg(vd
, delta_d
);
1943 vn
= vfp_advance_dreg(vn
, delta_d
);
1944 vfp_load_reg64(f0
, vn
);
1946 vm
= vfp_advance_dreg(vm
, delta_m
);
1947 vfp_load_reg64(f1
, vm
);
1951 tcg_temp_free_i64(f0
);
1952 tcg_temp_free_i64(f1
);
1953 tcg_temp_free_i64(fd
);
1954 tcg_temp_free_ptr(fpst
);
1959 static bool do_vfp_2op_sp(DisasContext
*s
, VFPGen2OpSPFn
*fn
, int vd
, int vm
)
1961 uint32_t delta_m
= 0;
1962 uint32_t delta_d
= 0;
1963 int veclen
= s
->vec_len
;
1966 /* Note that the caller must check the aa32_fpsp_v2 feature. */
1968 if (!dc_isar_feature(aa32_fpshvec
, s
) &&
1969 (veclen
!= 0 || s
->vec_stride
!= 0)) {
1973 if (!vfp_access_check(s
)) {
1978 /* Figure out what type of vector operation this is. */
1979 if (vfp_sreg_is_scalar(vd
)) {
1983 delta_d
= s
->vec_stride
+ 1;
1985 if (vfp_sreg_is_scalar(vm
)) {
1986 /* mixed scalar/vector */
1995 f0
= tcg_temp_new_i32();
1996 fd
= tcg_temp_new_i32();
1998 vfp_load_reg32(f0
, vm
);
2002 vfp_store_reg32(fd
, vd
);
2009 /* single source one-many */
2011 vd
= vfp_advance_sreg(vd
, delta_d
);
2012 vfp_store_reg32(fd
, vd
);
2017 /* Set up the operands for the next iteration */
2019 vd
= vfp_advance_sreg(vd
, delta_d
);
2020 vm
= vfp_advance_sreg(vm
, delta_m
);
2021 vfp_load_reg32(f0
, vm
);
2024 tcg_temp_free_i32(f0
);
2025 tcg_temp_free_i32(fd
);
2030 static bool do_vfp_2op_hp(DisasContext
*s
, VFPGen2OpSPFn
*fn
, int vd
, int vm
)
2033 * Do a half-precision operation. Functionally this is
2034 * the same as do_vfp_2op_sp(), except:
2035 * - it doesn't need the VFP vector handling (fp16 is a
2036 * v8 feature, and in v8 VFP vectors don't exist)
2037 * - it does the aa32_fp16_arith feature test
2041 /* Note that the caller must check the aa32_fp16_arith feature */
2043 if (!dc_isar_feature(aa32_fp16_arith
, s
)) {
2047 if (s
->vec_len
!= 0 || s
->vec_stride
!= 0) {
2051 if (!vfp_access_check(s
)) {
2055 f0
= tcg_temp_new_i32();
2056 vfp_load_reg32(f0
, vm
);
2058 vfp_store_reg32(f0
, vd
);
2059 tcg_temp_free_i32(f0
);
2064 static bool do_vfp_2op_dp(DisasContext
*s
, VFPGen2OpDPFn
*fn
, int vd
, int vm
)
2066 uint32_t delta_m
= 0;
2067 uint32_t delta_d
= 0;
2068 int veclen
= s
->vec_len
;
2071 /* Note that the caller must check the aa32_fpdp_v2 feature. */
2073 /* UNDEF accesses to D16-D31 if they don't exist */
2074 if (!dc_isar_feature(aa32_simd_r32
, s
) && ((vd
| vm
) & 0x10)) {
2078 if (!dc_isar_feature(aa32_fpshvec
, s
) &&
2079 (veclen
!= 0 || s
->vec_stride
!= 0)) {
2083 if (!vfp_access_check(s
)) {
2088 /* Figure out what type of vector operation this is. */
2089 if (vfp_dreg_is_scalar(vd
)) {
2093 delta_d
= (s
->vec_stride
>> 1) + 1;
2095 if (vfp_dreg_is_scalar(vm
)) {
2096 /* mixed scalar/vector */
2105 f0
= tcg_temp_new_i64();
2106 fd
= tcg_temp_new_i64();
2108 vfp_load_reg64(f0
, vm
);
2112 vfp_store_reg64(fd
, vd
);
2119 /* single source one-many */
2121 vd
= vfp_advance_dreg(vd
, delta_d
);
2122 vfp_store_reg64(fd
, vd
);
2127 /* Set up the operands for the next iteration */
2129 vd
= vfp_advance_dreg(vd
, delta_d
);
2130 vd
= vfp_advance_dreg(vm
, delta_m
);
2131 vfp_load_reg64(f0
, vm
);
2134 tcg_temp_free_i64(f0
);
2135 tcg_temp_free_i64(fd
);
2140 static void gen_VMLA_hp(TCGv_i32 vd
, TCGv_i32 vn
, TCGv_i32 vm
, TCGv_ptr fpst
)
2142 /* Note that order of inputs to the add matters for NaNs */
2143 TCGv_i32 tmp
= tcg_temp_new_i32();
2145 gen_helper_vfp_mulh(tmp
, vn
, vm
, fpst
);
2146 gen_helper_vfp_addh(vd
, vd
, tmp
, fpst
);
2147 tcg_temp_free_i32(tmp
);
2150 static bool trans_VMLA_hp(DisasContext
*s
, arg_VMLA_sp
*a
)
2152 return do_vfp_3op_hp(s
, gen_VMLA_hp
, a
->vd
, a
->vn
, a
->vm
, true);
2155 static void gen_VMLA_sp(TCGv_i32 vd
, TCGv_i32 vn
, TCGv_i32 vm
, TCGv_ptr fpst
)
2157 /* Note that order of inputs to the add matters for NaNs */
2158 TCGv_i32 tmp
= tcg_temp_new_i32();
2160 gen_helper_vfp_muls(tmp
, vn
, vm
, fpst
);
2161 gen_helper_vfp_adds(vd
, vd
, tmp
, fpst
);
2162 tcg_temp_free_i32(tmp
);
2165 static bool trans_VMLA_sp(DisasContext
*s
, arg_VMLA_sp
*a
)
2167 return do_vfp_3op_sp(s
, gen_VMLA_sp
, a
->vd
, a
->vn
, a
->vm
, true);
2170 static void gen_VMLA_dp(TCGv_i64 vd
, TCGv_i64 vn
, TCGv_i64 vm
, TCGv_ptr fpst
)
2172 /* Note that order of inputs to the add matters for NaNs */
2173 TCGv_i64 tmp
= tcg_temp_new_i64();
2175 gen_helper_vfp_muld(tmp
, vn
, vm
, fpst
);
2176 gen_helper_vfp_addd(vd
, vd
, tmp
, fpst
);
2177 tcg_temp_free_i64(tmp
);
2180 static bool trans_VMLA_dp(DisasContext
*s
, arg_VMLA_dp
*a
)
2182 return do_vfp_3op_dp(s
, gen_VMLA_dp
, a
->vd
, a
->vn
, a
->vm
, true);
2185 static void gen_VMLS_hp(TCGv_i32 vd
, TCGv_i32 vn
, TCGv_i32 vm
, TCGv_ptr fpst
)
2188 * VMLS: vd = vd + -(vn * vm)
2189 * Note that order of inputs to the add matters for NaNs.
2191 TCGv_i32 tmp
= tcg_temp_new_i32();
2193 gen_helper_vfp_mulh(tmp
, vn
, vm
, fpst
);
2194 gen_helper_vfp_negh(tmp
, tmp
);
2195 gen_helper_vfp_addh(vd
, vd
, tmp
, fpst
);
2196 tcg_temp_free_i32(tmp
);
2199 static bool trans_VMLS_hp(DisasContext
*s
, arg_VMLS_sp
*a
)
2201 return do_vfp_3op_hp(s
, gen_VMLS_hp
, a
->vd
, a
->vn
, a
->vm
, true);
2204 static void gen_VMLS_sp(TCGv_i32 vd
, TCGv_i32 vn
, TCGv_i32 vm
, TCGv_ptr fpst
)
2207 * VMLS: vd = vd + -(vn * vm)
2208 * Note that order of inputs to the add matters for NaNs.
2210 TCGv_i32 tmp
= tcg_temp_new_i32();
2212 gen_helper_vfp_muls(tmp
, vn
, vm
, fpst
);
2213 gen_helper_vfp_negs(tmp
, tmp
);
2214 gen_helper_vfp_adds(vd
, vd
, tmp
, fpst
);
2215 tcg_temp_free_i32(tmp
);
2218 static bool trans_VMLS_sp(DisasContext
*s
, arg_VMLS_sp
*a
)
2220 return do_vfp_3op_sp(s
, gen_VMLS_sp
, a
->vd
, a
->vn
, a
->vm
, true);
2223 static void gen_VMLS_dp(TCGv_i64 vd
, TCGv_i64 vn
, TCGv_i64 vm
, TCGv_ptr fpst
)
2226 * VMLS: vd = vd + -(vn * vm)
2227 * Note that order of inputs to the add matters for NaNs.
2229 TCGv_i64 tmp
= tcg_temp_new_i64();
2231 gen_helper_vfp_muld(tmp
, vn
, vm
, fpst
);
2232 gen_helper_vfp_negd(tmp
, tmp
);
2233 gen_helper_vfp_addd(vd
, vd
, tmp
, fpst
);
2234 tcg_temp_free_i64(tmp
);
2237 static bool trans_VMLS_dp(DisasContext
*s
, arg_VMLS_dp
*a
)
2239 return do_vfp_3op_dp(s
, gen_VMLS_dp
, a
->vd
, a
->vn
, a
->vm
, true);
2242 static void gen_VNMLS_hp(TCGv_i32 vd
, TCGv_i32 vn
, TCGv_i32 vm
, TCGv_ptr fpst
)
2245 * VNMLS: -fd + (fn * fm)
2246 * Note that it isn't valid to replace (-A + B) with (B - A) or similar
2247 * plausible looking simplifications because this will give wrong results
2250 TCGv_i32 tmp
= tcg_temp_new_i32();
2252 gen_helper_vfp_mulh(tmp
, vn
, vm
, fpst
);
2253 gen_helper_vfp_negh(vd
, vd
);
2254 gen_helper_vfp_addh(vd
, vd
, tmp
, fpst
);
2255 tcg_temp_free_i32(tmp
);
2258 static bool trans_VNMLS_hp(DisasContext
*s
, arg_VNMLS_sp
*a
)
2260 return do_vfp_3op_hp(s
, gen_VNMLS_hp
, a
->vd
, a
->vn
, a
->vm
, true);
2263 static void gen_VNMLS_sp(TCGv_i32 vd
, TCGv_i32 vn
, TCGv_i32 vm
, TCGv_ptr fpst
)
2266 * VNMLS: -fd + (fn * fm)
2267 * Note that it isn't valid to replace (-A + B) with (B - A) or similar
2268 * plausible looking simplifications because this will give wrong results
2271 TCGv_i32 tmp
= tcg_temp_new_i32();
2273 gen_helper_vfp_muls(tmp
, vn
, vm
, fpst
);
2274 gen_helper_vfp_negs(vd
, vd
);
2275 gen_helper_vfp_adds(vd
, vd
, tmp
, fpst
);
2276 tcg_temp_free_i32(tmp
);
2279 static bool trans_VNMLS_sp(DisasContext
*s
, arg_VNMLS_sp
*a
)
2281 return do_vfp_3op_sp(s
, gen_VNMLS_sp
, a
->vd
, a
->vn
, a
->vm
, true);
2284 static void gen_VNMLS_dp(TCGv_i64 vd
, TCGv_i64 vn
, TCGv_i64 vm
, TCGv_ptr fpst
)
2287 * VNMLS: -fd + (fn * fm)
2288 * Note that it isn't valid to replace (-A + B) with (B - A) or similar
2289 * plausible looking simplifications because this will give wrong results
2292 TCGv_i64 tmp
= tcg_temp_new_i64();
2294 gen_helper_vfp_muld(tmp
, vn
, vm
, fpst
);
2295 gen_helper_vfp_negd(vd
, vd
);
2296 gen_helper_vfp_addd(vd
, vd
, tmp
, fpst
);
2297 tcg_temp_free_i64(tmp
);
2300 static bool trans_VNMLS_dp(DisasContext
*s
, arg_VNMLS_dp
*a
)
2302 return do_vfp_3op_dp(s
, gen_VNMLS_dp
, a
->vd
, a
->vn
, a
->vm
, true);
2305 static void gen_VNMLA_hp(TCGv_i32 vd
, TCGv_i32 vn
, TCGv_i32 vm
, TCGv_ptr fpst
)
2307 /* VNMLA: -fd + -(fn * fm) */
2308 TCGv_i32 tmp
= tcg_temp_new_i32();
2310 gen_helper_vfp_mulh(tmp
, vn
, vm
, fpst
);
2311 gen_helper_vfp_negh(tmp
, tmp
);
2312 gen_helper_vfp_negh(vd
, vd
);
2313 gen_helper_vfp_addh(vd
, vd
, tmp
, fpst
);
2314 tcg_temp_free_i32(tmp
);
2317 static bool trans_VNMLA_hp(DisasContext
*s
, arg_VNMLA_sp
*a
)
2319 return do_vfp_3op_hp(s
, gen_VNMLA_hp
, a
->vd
, a
->vn
, a
->vm
, true);
2322 static void gen_VNMLA_sp(TCGv_i32 vd
, TCGv_i32 vn
, TCGv_i32 vm
, TCGv_ptr fpst
)
2324 /* VNMLA: -fd + -(fn * fm) */
2325 TCGv_i32 tmp
= tcg_temp_new_i32();
2327 gen_helper_vfp_muls(tmp
, vn
, vm
, fpst
);
2328 gen_helper_vfp_negs(tmp
, tmp
);
2329 gen_helper_vfp_negs(vd
, vd
);
2330 gen_helper_vfp_adds(vd
, vd
, tmp
, fpst
);
2331 tcg_temp_free_i32(tmp
);
2334 static bool trans_VNMLA_sp(DisasContext
*s
, arg_VNMLA_sp
*a
)
2336 return do_vfp_3op_sp(s
, gen_VNMLA_sp
, a
->vd
, a
->vn
, a
->vm
, true);
2339 static void gen_VNMLA_dp(TCGv_i64 vd
, TCGv_i64 vn
, TCGv_i64 vm
, TCGv_ptr fpst
)
2341 /* VNMLA: -fd + (fn * fm) */
2342 TCGv_i64 tmp
= tcg_temp_new_i64();
2344 gen_helper_vfp_muld(tmp
, vn
, vm
, fpst
);
2345 gen_helper_vfp_negd(tmp
, tmp
);
2346 gen_helper_vfp_negd(vd
, vd
);
2347 gen_helper_vfp_addd(vd
, vd
, tmp
, fpst
);
2348 tcg_temp_free_i64(tmp
);
2351 static bool trans_VNMLA_dp(DisasContext
*s
, arg_VNMLA_dp
*a
)
2353 return do_vfp_3op_dp(s
, gen_VNMLA_dp
, a
->vd
, a
->vn
, a
->vm
, true);
2356 static bool trans_VMUL_hp(DisasContext
*s
, arg_VMUL_sp
*a
)
2358 return do_vfp_3op_hp(s
, gen_helper_vfp_mulh
, a
->vd
, a
->vn
, a
->vm
, false);
2361 static bool trans_VMUL_sp(DisasContext
*s
, arg_VMUL_sp
*a
)
2363 return do_vfp_3op_sp(s
, gen_helper_vfp_muls
, a
->vd
, a
->vn
, a
->vm
, false);
2366 static bool trans_VMUL_dp(DisasContext
*s
, arg_VMUL_dp
*a
)
2368 return do_vfp_3op_dp(s
, gen_helper_vfp_muld
, a
->vd
, a
->vn
, a
->vm
, false);
2371 static void gen_VNMUL_hp(TCGv_i32 vd
, TCGv_i32 vn
, TCGv_i32 vm
, TCGv_ptr fpst
)
2373 /* VNMUL: -(fn * fm) */
2374 gen_helper_vfp_mulh(vd
, vn
, vm
, fpst
);
2375 gen_helper_vfp_negh(vd
, vd
);
2378 static bool trans_VNMUL_hp(DisasContext
*s
, arg_VNMUL_sp
*a
)
2380 return do_vfp_3op_hp(s
, gen_VNMUL_hp
, a
->vd
, a
->vn
, a
->vm
, false);
2383 static void gen_VNMUL_sp(TCGv_i32 vd
, TCGv_i32 vn
, TCGv_i32 vm
, TCGv_ptr fpst
)
2385 /* VNMUL: -(fn * fm) */
2386 gen_helper_vfp_muls(vd
, vn
, vm
, fpst
);
2387 gen_helper_vfp_negs(vd
, vd
);
2390 static bool trans_VNMUL_sp(DisasContext
*s
, arg_VNMUL_sp
*a
)
2392 return do_vfp_3op_sp(s
, gen_VNMUL_sp
, a
->vd
, a
->vn
, a
->vm
, false);
2395 static void gen_VNMUL_dp(TCGv_i64 vd
, TCGv_i64 vn
, TCGv_i64 vm
, TCGv_ptr fpst
)
2397 /* VNMUL: -(fn * fm) */
2398 gen_helper_vfp_muld(vd
, vn
, vm
, fpst
);
2399 gen_helper_vfp_negd(vd
, vd
);
2402 static bool trans_VNMUL_dp(DisasContext
*s
, arg_VNMUL_dp
*a
)
2404 return do_vfp_3op_dp(s
, gen_VNMUL_dp
, a
->vd
, a
->vn
, a
->vm
, false);
2407 static bool trans_VADD_hp(DisasContext
*s
, arg_VADD_sp
*a
)
2409 return do_vfp_3op_hp(s
, gen_helper_vfp_addh
, a
->vd
, a
->vn
, a
->vm
, false);
2412 static bool trans_VADD_sp(DisasContext
*s
, arg_VADD_sp
*a
)
2414 return do_vfp_3op_sp(s
, gen_helper_vfp_adds
, a
->vd
, a
->vn
, a
->vm
, false);
2417 static bool trans_VADD_dp(DisasContext
*s
, arg_VADD_dp
*a
)
2419 return do_vfp_3op_dp(s
, gen_helper_vfp_addd
, a
->vd
, a
->vn
, a
->vm
, false);
2422 static bool trans_VSUB_hp(DisasContext
*s
, arg_VSUB_sp
*a
)
2424 return do_vfp_3op_hp(s
, gen_helper_vfp_subh
, a
->vd
, a
->vn
, a
->vm
, false);
2427 static bool trans_VSUB_sp(DisasContext
*s
, arg_VSUB_sp
*a
)
2429 return do_vfp_3op_sp(s
, gen_helper_vfp_subs
, a
->vd
, a
->vn
, a
->vm
, false);
2432 static bool trans_VSUB_dp(DisasContext
*s
, arg_VSUB_dp
*a
)
2434 return do_vfp_3op_dp(s
, gen_helper_vfp_subd
, a
->vd
, a
->vn
, a
->vm
, false);
2437 static bool trans_VDIV_hp(DisasContext
*s
, arg_VDIV_sp
*a
)
2439 return do_vfp_3op_hp(s
, gen_helper_vfp_divh
, a
->vd
, a
->vn
, a
->vm
, false);
2442 static bool trans_VDIV_sp(DisasContext
*s
, arg_VDIV_sp
*a
)
2444 return do_vfp_3op_sp(s
, gen_helper_vfp_divs
, a
->vd
, a
->vn
, a
->vm
, false);
2447 static bool trans_VDIV_dp(DisasContext
*s
, arg_VDIV_dp
*a
)
2449 return do_vfp_3op_dp(s
, gen_helper_vfp_divd
, a
->vd
, a
->vn
, a
->vm
, false);
2452 static bool trans_VMINNM_hp(DisasContext
*s
, arg_VMINNM_sp
*a
)
2454 if (!dc_isar_feature(aa32_vminmaxnm
, s
)) {
2457 return do_vfp_3op_hp(s
, gen_helper_vfp_minnumh
,
2458 a
->vd
, a
->vn
, a
->vm
, false);
2461 static bool trans_VMAXNM_hp(DisasContext
*s
, arg_VMAXNM_sp
*a
)
2463 if (!dc_isar_feature(aa32_vminmaxnm
, s
)) {
2466 return do_vfp_3op_hp(s
, gen_helper_vfp_maxnumh
,
2467 a
->vd
, a
->vn
, a
->vm
, false);
2470 static bool trans_VMINNM_sp(DisasContext
*s
, arg_VMINNM_sp
*a
)
2472 if (!dc_isar_feature(aa32_vminmaxnm
, s
)) {
2475 return do_vfp_3op_sp(s
, gen_helper_vfp_minnums
,
2476 a
->vd
, a
->vn
, a
->vm
, false);
2479 static bool trans_VMAXNM_sp(DisasContext
*s
, arg_VMAXNM_sp
*a
)
2481 if (!dc_isar_feature(aa32_vminmaxnm
, s
)) {
2484 return do_vfp_3op_sp(s
, gen_helper_vfp_maxnums
,
2485 a
->vd
, a
->vn
, a
->vm
, false);
2488 static bool trans_VMINNM_dp(DisasContext
*s
, arg_VMINNM_dp
*a
)
2490 if (!dc_isar_feature(aa32_vminmaxnm
, s
)) {
2493 return do_vfp_3op_dp(s
, gen_helper_vfp_minnumd
,
2494 a
->vd
, a
->vn
, a
->vm
, false);
2497 static bool trans_VMAXNM_dp(DisasContext
*s
, arg_VMAXNM_dp
*a
)
2499 if (!dc_isar_feature(aa32_vminmaxnm
, s
)) {
2502 return do_vfp_3op_dp(s
, gen_helper_vfp_maxnumd
,
2503 a
->vd
, a
->vn
, a
->vm
, false);
2506 static bool do_vfm_hp(DisasContext
*s
, arg_VFMA_sp
*a
, bool neg_n
, bool neg_d
)
2509 * VFNMA : fd = muladd(-fd, fn, fm)
2510 * VFNMS : fd = muladd(-fd, -fn, fm)
2511 * VFMA : fd = muladd( fd, fn, fm)
2512 * VFMS : fd = muladd( fd, -fn, fm)
2514 * These are fused multiply-add, and must be done as one floating
2515 * point operation with no rounding between the multiplication and
2516 * addition steps. NB that doing the negations here as separate
2517 * steps is correct : an input NaN should come out with its sign
2518 * bit flipped if it is a negated-input.
2521 TCGv_i32 vn
, vm
, vd
;
2524 * Present in VFPv4 only, and only with the FP16 extension.
2525 * Note that we can't rely on the SIMDFMAC check alone, because
2526 * in a Neon-no-VFP core that ID register field will be non-zero.
2528 if (!dc_isar_feature(aa32_fp16_arith
, s
) ||
2529 !dc_isar_feature(aa32_simdfmac
, s
) ||
2530 !dc_isar_feature(aa32_fpsp_v2
, s
)) {
2534 if (s
->vec_len
!= 0 || s
->vec_stride
!= 0) {
2538 if (!vfp_access_check(s
)) {
2542 vn
= tcg_temp_new_i32();
2543 vm
= tcg_temp_new_i32();
2544 vd
= tcg_temp_new_i32();
2546 vfp_load_reg32(vn
, a
->vn
);
2547 vfp_load_reg32(vm
, a
->vm
);
2550 gen_helper_vfp_negh(vn
, vn
);
2552 vfp_load_reg32(vd
, a
->vd
);
2555 gen_helper_vfp_negh(vd
, vd
);
2557 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
2558 gen_helper_vfp_muladdh(vd
, vn
, vm
, vd
, fpst
);
2559 vfp_store_reg32(vd
, a
->vd
);
2561 tcg_temp_free_ptr(fpst
);
2562 tcg_temp_free_i32(vn
);
2563 tcg_temp_free_i32(vm
);
2564 tcg_temp_free_i32(vd
);
2569 static bool do_vfm_sp(DisasContext
*s
, arg_VFMA_sp
*a
, bool neg_n
, bool neg_d
)
2572 * VFNMA : fd = muladd(-fd, fn, fm)
2573 * VFNMS : fd = muladd(-fd, -fn, fm)
2574 * VFMA : fd = muladd( fd, fn, fm)
2575 * VFMS : fd = muladd( fd, -fn, fm)
2577 * These are fused multiply-add, and must be done as one floating
2578 * point operation with no rounding between the multiplication and
2579 * addition steps. NB that doing the negations here as separate
2580 * steps is correct : an input NaN should come out with its sign
2581 * bit flipped if it is a negated-input.
2584 TCGv_i32 vn
, vm
, vd
;
2587 * Present in VFPv4 only.
2588 * Note that we can't rely on the SIMDFMAC check alone, because
2589 * in a Neon-no-VFP core that ID register field will be non-zero.
2591 if (!dc_isar_feature(aa32_simdfmac
, s
) ||
2592 !dc_isar_feature(aa32_fpsp_v2
, s
)) {
2596 * In v7A, UNPREDICTABLE with non-zero vector length/stride; from
2597 * v8A, must UNDEF. We choose to UNDEF for both v7A and v8A.
2599 if (s
->vec_len
!= 0 || s
->vec_stride
!= 0) {
2603 if (!vfp_access_check(s
)) {
2607 vn
= tcg_temp_new_i32();
2608 vm
= tcg_temp_new_i32();
2609 vd
= tcg_temp_new_i32();
2611 vfp_load_reg32(vn
, a
->vn
);
2612 vfp_load_reg32(vm
, a
->vm
);
2615 gen_helper_vfp_negs(vn
, vn
);
2617 vfp_load_reg32(vd
, a
->vd
);
2620 gen_helper_vfp_negs(vd
, vd
);
2622 fpst
= fpstatus_ptr(FPST_FPCR
);
2623 gen_helper_vfp_muladds(vd
, vn
, vm
, vd
, fpst
);
2624 vfp_store_reg32(vd
, a
->vd
);
2626 tcg_temp_free_ptr(fpst
);
2627 tcg_temp_free_i32(vn
);
2628 tcg_temp_free_i32(vm
);
2629 tcg_temp_free_i32(vd
);
2634 static bool do_vfm_dp(DisasContext
*s
, arg_VFMA_dp
*a
, bool neg_n
, bool neg_d
)
2637 * VFNMA : fd = muladd(-fd, fn, fm)
2638 * VFNMS : fd = muladd(-fd, -fn, fm)
2639 * VFMA : fd = muladd( fd, fn, fm)
2640 * VFMS : fd = muladd( fd, -fn, fm)
2642 * These are fused multiply-add, and must be done as one floating
2643 * point operation with no rounding between the multiplication and
2644 * addition steps. NB that doing the negations here as separate
2645 * steps is correct : an input NaN should come out with its sign
2646 * bit flipped if it is a negated-input.
2649 TCGv_i64 vn
, vm
, vd
;
2652 * Present in VFPv4 only.
2653 * Note that we can't rely on the SIMDFMAC check alone, because
2654 * in a Neon-no-VFP core that ID register field will be non-zero.
2656 if (!dc_isar_feature(aa32_simdfmac
, s
) ||
2657 !dc_isar_feature(aa32_fpdp_v2
, s
)) {
2661 * In v7A, UNPREDICTABLE with non-zero vector length/stride; from
2662 * v8A, must UNDEF. We choose to UNDEF for both v7A and v8A.
2664 if (s
->vec_len
!= 0 || s
->vec_stride
!= 0) {
2668 /* UNDEF accesses to D16-D31 if they don't exist. */
2669 if (!dc_isar_feature(aa32_simd_r32
, s
) &&
2670 ((a
->vd
| a
->vn
| a
->vm
) & 0x10)) {
2674 if (!vfp_access_check(s
)) {
2678 vn
= tcg_temp_new_i64();
2679 vm
= tcg_temp_new_i64();
2680 vd
= tcg_temp_new_i64();
2682 vfp_load_reg64(vn
, a
->vn
);
2683 vfp_load_reg64(vm
, a
->vm
);
2686 gen_helper_vfp_negd(vn
, vn
);
2688 vfp_load_reg64(vd
, a
->vd
);
2691 gen_helper_vfp_negd(vd
, vd
);
2693 fpst
= fpstatus_ptr(FPST_FPCR
);
2694 gen_helper_vfp_muladdd(vd
, vn
, vm
, vd
, fpst
);
2695 vfp_store_reg64(vd
, a
->vd
);
2697 tcg_temp_free_ptr(fpst
);
2698 tcg_temp_free_i64(vn
);
2699 tcg_temp_free_i64(vm
);
2700 tcg_temp_free_i64(vd
);
2705 #define MAKE_ONE_VFM_TRANS_FN(INSN, PREC, NEGN, NEGD) \
2706 static bool trans_##INSN##_##PREC(DisasContext *s, \
2707 arg_##INSN##_##PREC *a) \
2709 return do_vfm_##PREC(s, a, NEGN, NEGD); \
2712 #define MAKE_VFM_TRANS_FNS(PREC) \
2713 MAKE_ONE_VFM_TRANS_FN(VFMA, PREC, false, false) \
2714 MAKE_ONE_VFM_TRANS_FN(VFMS, PREC, true, false) \
2715 MAKE_ONE_VFM_TRANS_FN(VFNMA, PREC, false, true) \
2716 MAKE_ONE_VFM_TRANS_FN(VFNMS, PREC, true, true)
2718 MAKE_VFM_TRANS_FNS(hp
)
2719 MAKE_VFM_TRANS_FNS(sp
)
2720 MAKE_VFM_TRANS_FNS(dp
)
2722 static bool trans_VMOV_imm_hp(DisasContext
*s
, arg_VMOV_imm_sp
*a
)
2726 if (!dc_isar_feature(aa32_fp16_arith
, s
)) {
2730 if (s
->vec_len
!= 0 || s
->vec_stride
!= 0) {
2734 if (!vfp_access_check(s
)) {
2738 fd
= tcg_const_i32(vfp_expand_imm(MO_16
, a
->imm
));
2739 vfp_store_reg32(fd
, a
->vd
);
2740 tcg_temp_free_i32(fd
);
2744 static bool trans_VMOV_imm_sp(DisasContext
*s
, arg_VMOV_imm_sp
*a
)
2746 uint32_t delta_d
= 0;
2747 int veclen
= s
->vec_len
;
2753 if (!dc_isar_feature(aa32_fpsp_v3
, s
)) {
2757 if (!dc_isar_feature(aa32_fpshvec
, s
) &&
2758 (veclen
!= 0 || s
->vec_stride
!= 0)) {
2762 if (!vfp_access_check(s
)) {
2767 /* Figure out what type of vector operation this is. */
2768 if (vfp_sreg_is_scalar(vd
)) {
2772 delta_d
= s
->vec_stride
+ 1;
2776 fd
= tcg_const_i32(vfp_expand_imm(MO_32
, a
->imm
));
2779 vfp_store_reg32(fd
, vd
);
2785 /* Set up the operands for the next iteration */
2787 vd
= vfp_advance_sreg(vd
, delta_d
);
2790 tcg_temp_free_i32(fd
);
2794 static bool trans_VMOV_imm_dp(DisasContext
*s
, arg_VMOV_imm_dp
*a
)
2796 uint32_t delta_d
= 0;
2797 int veclen
= s
->vec_len
;
2803 if (!dc_isar_feature(aa32_fpdp_v3
, s
)) {
2807 /* UNDEF accesses to D16-D31 if they don't exist. */
2808 if (!dc_isar_feature(aa32_simd_r32
, s
) && (vd
& 0x10)) {
2812 if (!dc_isar_feature(aa32_fpshvec
, s
) &&
2813 (veclen
!= 0 || s
->vec_stride
!= 0)) {
2817 if (!vfp_access_check(s
)) {
2822 /* Figure out what type of vector operation this is. */
2823 if (vfp_dreg_is_scalar(vd
)) {
2827 delta_d
= (s
->vec_stride
>> 1) + 1;
2831 fd
= tcg_const_i64(vfp_expand_imm(MO_64
, a
->imm
));
2834 vfp_store_reg64(fd
, vd
);
2840 /* Set up the operands for the next iteration */
2842 vd
= vfp_advance_dreg(vd
, delta_d
);
2845 tcg_temp_free_i64(fd
);
2849 #define DO_VFP_2OP(INSN, PREC, FN, CHECK) \
2850 static bool trans_##INSN##_##PREC(DisasContext *s, \
2851 arg_##INSN##_##PREC *a) \
2853 if (!dc_isar_feature(CHECK, s)) { \
2856 return do_vfp_2op_##PREC(s, FN, a->vd, a->vm); \
2859 #define DO_VFP_VMOV(INSN, PREC, FN) \
2860 static bool trans_##INSN##_##PREC(DisasContext *s, \
2861 arg_##INSN##_##PREC *a) \
2863 if (!dc_isar_feature(aa32_fp##PREC##_v2, s) && \
2864 !dc_isar_feature(aa32_mve, s)) { \
2867 return do_vfp_2op_##PREC(s, FN, a->vd, a->vm); \
2870 DO_VFP_VMOV(VMOV_reg
, sp
, tcg_gen_mov_i32
)
2871 DO_VFP_VMOV(VMOV_reg
, dp
, tcg_gen_mov_i64
)
2873 DO_VFP_2OP(VABS
, hp
, gen_helper_vfp_absh
, aa32_fp16_arith
)
2874 DO_VFP_2OP(VABS
, sp
, gen_helper_vfp_abss
, aa32_fpsp_v2
)
2875 DO_VFP_2OP(VABS
, dp
, gen_helper_vfp_absd
, aa32_fpdp_v2
)
2877 DO_VFP_2OP(VNEG
, hp
, gen_helper_vfp_negh
, aa32_fp16_arith
)
2878 DO_VFP_2OP(VNEG
, sp
, gen_helper_vfp_negs
, aa32_fpsp_v2
)
2879 DO_VFP_2OP(VNEG
, dp
, gen_helper_vfp_negd
, aa32_fpdp_v2
)
2881 static void gen_VSQRT_hp(TCGv_i32 vd
, TCGv_i32 vm
)
2883 gen_helper_vfp_sqrth(vd
, vm
, cpu_env
);
2886 static void gen_VSQRT_sp(TCGv_i32 vd
, TCGv_i32 vm
)
2888 gen_helper_vfp_sqrts(vd
, vm
, cpu_env
);
2891 static void gen_VSQRT_dp(TCGv_i64 vd
, TCGv_i64 vm
)
2893 gen_helper_vfp_sqrtd(vd
, vm
, cpu_env
);
2896 DO_VFP_2OP(VSQRT
, hp
, gen_VSQRT_hp
, aa32_fp16_arith
)
2897 DO_VFP_2OP(VSQRT
, sp
, gen_VSQRT_sp
, aa32_fpsp_v2
)
2898 DO_VFP_2OP(VSQRT
, dp
, gen_VSQRT_dp
, aa32_fpdp_v2
)
2900 static bool trans_VCMP_hp(DisasContext
*s
, arg_VCMP_sp
*a
)
2904 if (!dc_isar_feature(aa32_fp16_arith
, s
)) {
2908 /* Vm/M bits must be zero for the Z variant */
2909 if (a
->z
&& a
->vm
!= 0) {
2913 if (!vfp_access_check(s
)) {
2917 vd
= tcg_temp_new_i32();
2918 vm
= tcg_temp_new_i32();
2920 vfp_load_reg32(vd
, a
->vd
);
2922 tcg_gen_movi_i32(vm
, 0);
2924 vfp_load_reg32(vm
, a
->vm
);
2928 gen_helper_vfp_cmpeh(vd
, vm
, cpu_env
);
2930 gen_helper_vfp_cmph(vd
, vm
, cpu_env
);
2933 tcg_temp_free_i32(vd
);
2934 tcg_temp_free_i32(vm
);
2939 static bool trans_VCMP_sp(DisasContext
*s
, arg_VCMP_sp
*a
)
2943 if (!dc_isar_feature(aa32_fpsp_v2
, s
)) {
2947 /* Vm/M bits must be zero for the Z variant */
2948 if (a
->z
&& a
->vm
!= 0) {
2952 if (!vfp_access_check(s
)) {
2956 vd
= tcg_temp_new_i32();
2957 vm
= tcg_temp_new_i32();
2959 vfp_load_reg32(vd
, a
->vd
);
2961 tcg_gen_movi_i32(vm
, 0);
2963 vfp_load_reg32(vm
, a
->vm
);
2967 gen_helper_vfp_cmpes(vd
, vm
, cpu_env
);
2969 gen_helper_vfp_cmps(vd
, vm
, cpu_env
);
2972 tcg_temp_free_i32(vd
);
2973 tcg_temp_free_i32(vm
);
2978 static bool trans_VCMP_dp(DisasContext
*s
, arg_VCMP_dp
*a
)
2982 if (!dc_isar_feature(aa32_fpdp_v2
, s
)) {
2986 /* Vm/M bits must be zero for the Z variant */
2987 if (a
->z
&& a
->vm
!= 0) {
2991 /* UNDEF accesses to D16-D31 if they don't exist. */
2992 if (!dc_isar_feature(aa32_simd_r32
, s
) && ((a
->vd
| a
->vm
) & 0x10)) {
2996 if (!vfp_access_check(s
)) {
3000 vd
= tcg_temp_new_i64();
3001 vm
= tcg_temp_new_i64();
3003 vfp_load_reg64(vd
, a
->vd
);
3005 tcg_gen_movi_i64(vm
, 0);
3007 vfp_load_reg64(vm
, a
->vm
);
3011 gen_helper_vfp_cmped(vd
, vm
, cpu_env
);
3013 gen_helper_vfp_cmpd(vd
, vm
, cpu_env
);
3016 tcg_temp_free_i64(vd
);
3017 tcg_temp_free_i64(vm
);
3022 static bool trans_VCVT_f32_f16(DisasContext
*s
, arg_VCVT_f32_f16
*a
)
3028 if (!dc_isar_feature(aa32_fp16_spconv
, s
)) {
3032 if (!vfp_access_check(s
)) {
3036 fpst
= fpstatus_ptr(FPST_FPCR
);
3037 ahp_mode
= get_ahp_flag();
3038 tmp
= tcg_temp_new_i32();
3039 /* The T bit tells us if we want the low or high 16 bits of Vm */
3040 tcg_gen_ld16u_i32(tmp
, cpu_env
, vfp_f16_offset(a
->vm
, a
->t
));
3041 gen_helper_vfp_fcvt_f16_to_f32(tmp
, tmp
, fpst
, ahp_mode
);
3042 vfp_store_reg32(tmp
, a
->vd
);
3043 tcg_temp_free_i32(ahp_mode
);
3044 tcg_temp_free_ptr(fpst
);
3045 tcg_temp_free_i32(tmp
);
3049 static bool trans_VCVT_f64_f16(DisasContext
*s
, arg_VCVT_f64_f16
*a
)
3056 if (!dc_isar_feature(aa32_fpdp_v2
, s
)) {
3060 if (!dc_isar_feature(aa32_fp16_dpconv
, s
)) {
3064 /* UNDEF accesses to D16-D31 if they don't exist. */
3065 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vd
& 0x10)) {
3069 if (!vfp_access_check(s
)) {
3073 fpst
= fpstatus_ptr(FPST_FPCR
);
3074 ahp_mode
= get_ahp_flag();
3075 tmp
= tcg_temp_new_i32();
3076 /* The T bit tells us if we want the low or high 16 bits of Vm */
3077 tcg_gen_ld16u_i32(tmp
, cpu_env
, vfp_f16_offset(a
->vm
, a
->t
));
3078 vd
= tcg_temp_new_i64();
3079 gen_helper_vfp_fcvt_f16_to_f64(vd
, tmp
, fpst
, ahp_mode
);
3080 vfp_store_reg64(vd
, a
->vd
);
3081 tcg_temp_free_i32(ahp_mode
);
3082 tcg_temp_free_ptr(fpst
);
3083 tcg_temp_free_i32(tmp
);
3084 tcg_temp_free_i64(vd
);
3088 static bool trans_VCVT_b16_f32(DisasContext
*s
, arg_VCVT_b16_f32
*a
)
3093 if (!dc_isar_feature(aa32_bf16
, s
)) {
3097 if (!vfp_access_check(s
)) {
3101 fpst
= fpstatus_ptr(FPST_FPCR
);
3102 tmp
= tcg_temp_new_i32();
3104 vfp_load_reg32(tmp
, a
->vm
);
3105 gen_helper_bfcvt(tmp
, tmp
, fpst
);
3106 tcg_gen_st16_i32(tmp
, cpu_env
, vfp_f16_offset(a
->vd
, a
->t
));
3107 tcg_temp_free_ptr(fpst
);
3108 tcg_temp_free_i32(tmp
);
3112 static bool trans_VCVT_f16_f32(DisasContext
*s
, arg_VCVT_f16_f32
*a
)
3118 if (!dc_isar_feature(aa32_fp16_spconv
, s
)) {
3122 if (!vfp_access_check(s
)) {
3126 fpst
= fpstatus_ptr(FPST_FPCR
);
3127 ahp_mode
= get_ahp_flag();
3128 tmp
= tcg_temp_new_i32();
3130 vfp_load_reg32(tmp
, a
->vm
);
3131 gen_helper_vfp_fcvt_f32_to_f16(tmp
, tmp
, fpst
, ahp_mode
);
3132 tcg_gen_st16_i32(tmp
, cpu_env
, vfp_f16_offset(a
->vd
, a
->t
));
3133 tcg_temp_free_i32(ahp_mode
);
3134 tcg_temp_free_ptr(fpst
);
3135 tcg_temp_free_i32(tmp
);
3139 static bool trans_VCVT_f16_f64(DisasContext
*s
, arg_VCVT_f16_f64
*a
)
3146 if (!dc_isar_feature(aa32_fpdp_v2
, s
)) {
3150 if (!dc_isar_feature(aa32_fp16_dpconv
, s
)) {
3154 /* UNDEF accesses to D16-D31 if they don't exist. */
3155 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vm
& 0x10)) {
3159 if (!vfp_access_check(s
)) {
3163 fpst
= fpstatus_ptr(FPST_FPCR
);
3164 ahp_mode
= get_ahp_flag();
3165 tmp
= tcg_temp_new_i32();
3166 vm
= tcg_temp_new_i64();
3168 vfp_load_reg64(vm
, a
->vm
);
3169 gen_helper_vfp_fcvt_f64_to_f16(tmp
, vm
, fpst
, ahp_mode
);
3170 tcg_temp_free_i64(vm
);
3171 tcg_gen_st16_i32(tmp
, cpu_env
, vfp_f16_offset(a
->vd
, a
->t
));
3172 tcg_temp_free_i32(ahp_mode
);
3173 tcg_temp_free_ptr(fpst
);
3174 tcg_temp_free_i32(tmp
);
3178 static bool trans_VRINTR_hp(DisasContext
*s
, arg_VRINTR_sp
*a
)
3183 if (!dc_isar_feature(aa32_fp16_arith
, s
)) {
3187 if (!vfp_access_check(s
)) {
3191 tmp
= tcg_temp_new_i32();
3192 vfp_load_reg32(tmp
, a
->vm
);
3193 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
3194 gen_helper_rinth(tmp
, tmp
, fpst
);
3195 vfp_store_reg32(tmp
, a
->vd
);
3196 tcg_temp_free_ptr(fpst
);
3197 tcg_temp_free_i32(tmp
);
3201 static bool trans_VRINTR_sp(DisasContext
*s
, arg_VRINTR_sp
*a
)
3206 if (!dc_isar_feature(aa32_vrint
, s
)) {
3210 if (!vfp_access_check(s
)) {
3214 tmp
= tcg_temp_new_i32();
3215 vfp_load_reg32(tmp
, a
->vm
);
3216 fpst
= fpstatus_ptr(FPST_FPCR
);
3217 gen_helper_rints(tmp
, tmp
, fpst
);
3218 vfp_store_reg32(tmp
, a
->vd
);
3219 tcg_temp_free_ptr(fpst
);
3220 tcg_temp_free_i32(tmp
);
3224 static bool trans_VRINTR_dp(DisasContext
*s
, arg_VRINTR_dp
*a
)
3229 if (!dc_isar_feature(aa32_fpdp_v2
, s
)) {
3233 if (!dc_isar_feature(aa32_vrint
, s
)) {
3237 /* UNDEF accesses to D16-D31 if they don't exist. */
3238 if (!dc_isar_feature(aa32_simd_r32
, s
) && ((a
->vd
| a
->vm
) & 0x10)) {
3242 if (!vfp_access_check(s
)) {
3246 tmp
= tcg_temp_new_i64();
3247 vfp_load_reg64(tmp
, a
->vm
);
3248 fpst
= fpstatus_ptr(FPST_FPCR
);
3249 gen_helper_rintd(tmp
, tmp
, fpst
);
3250 vfp_store_reg64(tmp
, a
->vd
);
3251 tcg_temp_free_ptr(fpst
);
3252 tcg_temp_free_i64(tmp
);
3256 static bool trans_VRINTZ_hp(DisasContext
*s
, arg_VRINTZ_sp
*a
)
3262 if (!dc_isar_feature(aa32_fp16_arith
, s
)) {
3266 if (!vfp_access_check(s
)) {
3270 tmp
= tcg_temp_new_i32();
3271 vfp_load_reg32(tmp
, a
->vm
);
3272 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
3273 tcg_rmode
= tcg_const_i32(float_round_to_zero
);
3274 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
3275 gen_helper_rinth(tmp
, tmp
, fpst
);
3276 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
3277 vfp_store_reg32(tmp
, a
->vd
);
3278 tcg_temp_free_ptr(fpst
);
3279 tcg_temp_free_i32(tcg_rmode
);
3280 tcg_temp_free_i32(tmp
);
3284 static bool trans_VRINTZ_sp(DisasContext
*s
, arg_VRINTZ_sp
*a
)
3290 if (!dc_isar_feature(aa32_vrint
, s
)) {
3294 if (!vfp_access_check(s
)) {
3298 tmp
= tcg_temp_new_i32();
3299 vfp_load_reg32(tmp
, a
->vm
);
3300 fpst
= fpstatus_ptr(FPST_FPCR
);
3301 tcg_rmode
= tcg_const_i32(float_round_to_zero
);
3302 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
3303 gen_helper_rints(tmp
, tmp
, fpst
);
3304 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
3305 vfp_store_reg32(tmp
, a
->vd
);
3306 tcg_temp_free_ptr(fpst
);
3307 tcg_temp_free_i32(tcg_rmode
);
3308 tcg_temp_free_i32(tmp
);
3312 static bool trans_VRINTZ_dp(DisasContext
*s
, arg_VRINTZ_dp
*a
)
3318 if (!dc_isar_feature(aa32_fpdp_v2
, s
)) {
3322 if (!dc_isar_feature(aa32_vrint
, s
)) {
3326 /* UNDEF accesses to D16-D31 if they don't exist. */
3327 if (!dc_isar_feature(aa32_simd_r32
, s
) && ((a
->vd
| a
->vm
) & 0x10)) {
3331 if (!vfp_access_check(s
)) {
3335 tmp
= tcg_temp_new_i64();
3336 vfp_load_reg64(tmp
, a
->vm
);
3337 fpst
= fpstatus_ptr(FPST_FPCR
);
3338 tcg_rmode
= tcg_const_i32(float_round_to_zero
);
3339 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
3340 gen_helper_rintd(tmp
, tmp
, fpst
);
3341 gen_helper_set_rmode(tcg_rmode
, tcg_rmode
, fpst
);
3342 vfp_store_reg64(tmp
, a
->vd
);
3343 tcg_temp_free_ptr(fpst
);
3344 tcg_temp_free_i64(tmp
);
3345 tcg_temp_free_i32(tcg_rmode
);
3349 static bool trans_VRINTX_hp(DisasContext
*s
, arg_VRINTX_sp
*a
)
3354 if (!dc_isar_feature(aa32_fp16_arith
, s
)) {
3358 if (!vfp_access_check(s
)) {
3362 tmp
= tcg_temp_new_i32();
3363 vfp_load_reg32(tmp
, a
->vm
);
3364 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
3365 gen_helper_rinth_exact(tmp
, tmp
, fpst
);
3366 vfp_store_reg32(tmp
, a
->vd
);
3367 tcg_temp_free_ptr(fpst
);
3368 tcg_temp_free_i32(tmp
);
3372 static bool trans_VRINTX_sp(DisasContext
*s
, arg_VRINTX_sp
*a
)
3377 if (!dc_isar_feature(aa32_vrint
, s
)) {
3381 if (!vfp_access_check(s
)) {
3385 tmp
= tcg_temp_new_i32();
3386 vfp_load_reg32(tmp
, a
->vm
);
3387 fpst
= fpstatus_ptr(FPST_FPCR
);
3388 gen_helper_rints_exact(tmp
, tmp
, fpst
);
3389 vfp_store_reg32(tmp
, a
->vd
);
3390 tcg_temp_free_ptr(fpst
);
3391 tcg_temp_free_i32(tmp
);
3395 static bool trans_VRINTX_dp(DisasContext
*s
, arg_VRINTX_dp
*a
)
3400 if (!dc_isar_feature(aa32_fpdp_v2
, s
)) {
3404 if (!dc_isar_feature(aa32_vrint
, s
)) {
3408 /* UNDEF accesses to D16-D31 if they don't exist. */
3409 if (!dc_isar_feature(aa32_simd_r32
, s
) && ((a
->vd
| a
->vm
) & 0x10)) {
3413 if (!vfp_access_check(s
)) {
3417 tmp
= tcg_temp_new_i64();
3418 vfp_load_reg64(tmp
, a
->vm
);
3419 fpst
= fpstatus_ptr(FPST_FPCR
);
3420 gen_helper_rintd_exact(tmp
, tmp
, fpst
);
3421 vfp_store_reg64(tmp
, a
->vd
);
3422 tcg_temp_free_ptr(fpst
);
3423 tcg_temp_free_i64(tmp
);
3427 static bool trans_VCVT_sp(DisasContext
*s
, arg_VCVT_sp
*a
)
3432 if (!dc_isar_feature(aa32_fpdp_v2
, s
)) {
3436 /* UNDEF accesses to D16-D31 if they don't exist. */
3437 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vd
& 0x10)) {
3441 if (!vfp_access_check(s
)) {
3445 vm
= tcg_temp_new_i32();
3446 vd
= tcg_temp_new_i64();
3447 vfp_load_reg32(vm
, a
->vm
);
3448 gen_helper_vfp_fcvtds(vd
, vm
, cpu_env
);
3449 vfp_store_reg64(vd
, a
->vd
);
3450 tcg_temp_free_i32(vm
);
3451 tcg_temp_free_i64(vd
);
3455 static bool trans_VCVT_dp(DisasContext
*s
, arg_VCVT_dp
*a
)
3460 if (!dc_isar_feature(aa32_fpdp_v2
, s
)) {
3464 /* UNDEF accesses to D16-D31 if they don't exist. */
3465 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vm
& 0x10)) {
3469 if (!vfp_access_check(s
)) {
3473 vd
= tcg_temp_new_i32();
3474 vm
= tcg_temp_new_i64();
3475 vfp_load_reg64(vm
, a
->vm
);
3476 gen_helper_vfp_fcvtsd(vd
, vm
, cpu_env
);
3477 vfp_store_reg32(vd
, a
->vd
);
3478 tcg_temp_free_i32(vd
);
3479 tcg_temp_free_i64(vm
);
3483 static bool trans_VCVT_int_hp(DisasContext
*s
, arg_VCVT_int_sp
*a
)
3488 if (!dc_isar_feature(aa32_fp16_arith
, s
)) {
3492 if (!vfp_access_check(s
)) {
3496 vm
= tcg_temp_new_i32();
3497 vfp_load_reg32(vm
, a
->vm
);
3498 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
3501 gen_helper_vfp_sitoh(vm
, vm
, fpst
);
3504 gen_helper_vfp_uitoh(vm
, vm
, fpst
);
3506 vfp_store_reg32(vm
, a
->vd
);
3507 tcg_temp_free_i32(vm
);
3508 tcg_temp_free_ptr(fpst
);
3512 static bool trans_VCVT_int_sp(DisasContext
*s
, arg_VCVT_int_sp
*a
)
3517 if (!dc_isar_feature(aa32_fpsp_v2
, s
)) {
3521 if (!vfp_access_check(s
)) {
3525 vm
= tcg_temp_new_i32();
3526 vfp_load_reg32(vm
, a
->vm
);
3527 fpst
= fpstatus_ptr(FPST_FPCR
);
3530 gen_helper_vfp_sitos(vm
, vm
, fpst
);
3533 gen_helper_vfp_uitos(vm
, vm
, fpst
);
3535 vfp_store_reg32(vm
, a
->vd
);
3536 tcg_temp_free_i32(vm
);
3537 tcg_temp_free_ptr(fpst
);
3541 static bool trans_VCVT_int_dp(DisasContext
*s
, arg_VCVT_int_dp
*a
)
3547 if (!dc_isar_feature(aa32_fpdp_v2
, s
)) {
3551 /* UNDEF accesses to D16-D31 if they don't exist. */
3552 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vd
& 0x10)) {
3556 if (!vfp_access_check(s
)) {
3560 vm
= tcg_temp_new_i32();
3561 vd
= tcg_temp_new_i64();
3562 vfp_load_reg32(vm
, a
->vm
);
3563 fpst
= fpstatus_ptr(FPST_FPCR
);
3566 gen_helper_vfp_sitod(vd
, vm
, fpst
);
3569 gen_helper_vfp_uitod(vd
, vm
, fpst
);
3571 vfp_store_reg64(vd
, a
->vd
);
3572 tcg_temp_free_i32(vm
);
3573 tcg_temp_free_i64(vd
);
3574 tcg_temp_free_ptr(fpst
);
3578 static bool trans_VJCVT(DisasContext
*s
, arg_VJCVT
*a
)
3583 if (!dc_isar_feature(aa32_fpdp_v2
, s
)) {
3587 if (!dc_isar_feature(aa32_jscvt
, s
)) {
3591 /* UNDEF accesses to D16-D31 if they don't exist. */
3592 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vm
& 0x10)) {
3596 if (!vfp_access_check(s
)) {
3600 vm
= tcg_temp_new_i64();
3601 vd
= tcg_temp_new_i32();
3602 vfp_load_reg64(vm
, a
->vm
);
3603 gen_helper_vjcvt(vd
, vm
, cpu_env
);
3604 vfp_store_reg32(vd
, a
->vd
);
3605 tcg_temp_free_i64(vm
);
3606 tcg_temp_free_i32(vd
);
3610 static bool trans_VCVT_fix_hp(DisasContext
*s
, arg_VCVT_fix_sp
*a
)
3616 if (!dc_isar_feature(aa32_fp16_arith
, s
)) {
3620 if (!vfp_access_check(s
)) {
3624 frac_bits
= (a
->opc
& 1) ? (32 - a
->imm
) : (16 - a
->imm
);
3626 vd
= tcg_temp_new_i32();
3627 vfp_load_reg32(vd
, a
->vd
);
3629 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
3630 shift
= tcg_const_i32(frac_bits
);
3632 /* Switch on op:U:sx bits */
3635 gen_helper_vfp_shtoh_round_to_nearest(vd
, vd
, shift
, fpst
);
3638 gen_helper_vfp_sltoh_round_to_nearest(vd
, vd
, shift
, fpst
);
3641 gen_helper_vfp_uhtoh_round_to_nearest(vd
, vd
, shift
, fpst
);
3644 gen_helper_vfp_ultoh_round_to_nearest(vd
, vd
, shift
, fpst
);
3647 gen_helper_vfp_toshh_round_to_zero(vd
, vd
, shift
, fpst
);
3650 gen_helper_vfp_toslh_round_to_zero(vd
, vd
, shift
, fpst
);
3653 gen_helper_vfp_touhh_round_to_zero(vd
, vd
, shift
, fpst
);
3656 gen_helper_vfp_toulh_round_to_zero(vd
, vd
, shift
, fpst
);
3659 g_assert_not_reached();
3662 vfp_store_reg32(vd
, a
->vd
);
3663 tcg_temp_free_i32(vd
);
3664 tcg_temp_free_i32(shift
);
3665 tcg_temp_free_ptr(fpst
);
3669 static bool trans_VCVT_fix_sp(DisasContext
*s
, arg_VCVT_fix_sp
*a
)
3675 if (!dc_isar_feature(aa32_fpsp_v3
, s
)) {
3679 if (!vfp_access_check(s
)) {
3683 frac_bits
= (a
->opc
& 1) ? (32 - a
->imm
) : (16 - a
->imm
);
3685 vd
= tcg_temp_new_i32();
3686 vfp_load_reg32(vd
, a
->vd
);
3688 fpst
= fpstatus_ptr(FPST_FPCR
);
3689 shift
= tcg_const_i32(frac_bits
);
3691 /* Switch on op:U:sx bits */
3694 gen_helper_vfp_shtos_round_to_nearest(vd
, vd
, shift
, fpst
);
3697 gen_helper_vfp_sltos_round_to_nearest(vd
, vd
, shift
, fpst
);
3700 gen_helper_vfp_uhtos_round_to_nearest(vd
, vd
, shift
, fpst
);
3703 gen_helper_vfp_ultos_round_to_nearest(vd
, vd
, shift
, fpst
);
3706 gen_helper_vfp_toshs_round_to_zero(vd
, vd
, shift
, fpst
);
3709 gen_helper_vfp_tosls_round_to_zero(vd
, vd
, shift
, fpst
);
3712 gen_helper_vfp_touhs_round_to_zero(vd
, vd
, shift
, fpst
);
3715 gen_helper_vfp_touls_round_to_zero(vd
, vd
, shift
, fpst
);
3718 g_assert_not_reached();
3721 vfp_store_reg32(vd
, a
->vd
);
3722 tcg_temp_free_i32(vd
);
3723 tcg_temp_free_i32(shift
);
3724 tcg_temp_free_ptr(fpst
);
3728 static bool trans_VCVT_fix_dp(DisasContext
*s
, arg_VCVT_fix_dp
*a
)
3735 if (!dc_isar_feature(aa32_fpdp_v3
, s
)) {
3739 /* UNDEF accesses to D16-D31 if they don't exist. */
3740 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vd
& 0x10)) {
3744 if (!vfp_access_check(s
)) {
3748 frac_bits
= (a
->opc
& 1) ? (32 - a
->imm
) : (16 - a
->imm
);
3750 vd
= tcg_temp_new_i64();
3751 vfp_load_reg64(vd
, a
->vd
);
3753 fpst
= fpstatus_ptr(FPST_FPCR
);
3754 shift
= tcg_const_i32(frac_bits
);
3756 /* Switch on op:U:sx bits */
3759 gen_helper_vfp_shtod_round_to_nearest(vd
, vd
, shift
, fpst
);
3762 gen_helper_vfp_sltod_round_to_nearest(vd
, vd
, shift
, fpst
);
3765 gen_helper_vfp_uhtod_round_to_nearest(vd
, vd
, shift
, fpst
);
3768 gen_helper_vfp_ultod_round_to_nearest(vd
, vd
, shift
, fpst
);
3771 gen_helper_vfp_toshd_round_to_zero(vd
, vd
, shift
, fpst
);
3774 gen_helper_vfp_tosld_round_to_zero(vd
, vd
, shift
, fpst
);
3777 gen_helper_vfp_touhd_round_to_zero(vd
, vd
, shift
, fpst
);
3780 gen_helper_vfp_tould_round_to_zero(vd
, vd
, shift
, fpst
);
3783 g_assert_not_reached();
3786 vfp_store_reg64(vd
, a
->vd
);
3787 tcg_temp_free_i64(vd
);
3788 tcg_temp_free_i32(shift
);
3789 tcg_temp_free_ptr(fpst
);
3793 static bool trans_VCVT_hp_int(DisasContext
*s
, arg_VCVT_sp_int
*a
)
3798 if (!dc_isar_feature(aa32_fp16_arith
, s
)) {
3802 if (!vfp_access_check(s
)) {
3806 fpst
= fpstatus_ptr(FPST_FPCR_F16
);
3807 vm
= tcg_temp_new_i32();
3808 vfp_load_reg32(vm
, a
->vm
);
3812 gen_helper_vfp_tosizh(vm
, vm
, fpst
);
3814 gen_helper_vfp_tosih(vm
, vm
, fpst
);
3818 gen_helper_vfp_touizh(vm
, vm
, fpst
);
3820 gen_helper_vfp_touih(vm
, vm
, fpst
);
3823 vfp_store_reg32(vm
, a
->vd
);
3824 tcg_temp_free_i32(vm
);
3825 tcg_temp_free_ptr(fpst
);
3829 static bool trans_VCVT_sp_int(DisasContext
*s
, arg_VCVT_sp_int
*a
)
3834 if (!dc_isar_feature(aa32_fpsp_v2
, s
)) {
3838 if (!vfp_access_check(s
)) {
3842 fpst
= fpstatus_ptr(FPST_FPCR
);
3843 vm
= tcg_temp_new_i32();
3844 vfp_load_reg32(vm
, a
->vm
);
3848 gen_helper_vfp_tosizs(vm
, vm
, fpst
);
3850 gen_helper_vfp_tosis(vm
, vm
, fpst
);
3854 gen_helper_vfp_touizs(vm
, vm
, fpst
);
3856 gen_helper_vfp_touis(vm
, vm
, fpst
);
3859 vfp_store_reg32(vm
, a
->vd
);
3860 tcg_temp_free_i32(vm
);
3861 tcg_temp_free_ptr(fpst
);
3865 static bool trans_VCVT_dp_int(DisasContext
*s
, arg_VCVT_dp_int
*a
)
3871 if (!dc_isar_feature(aa32_fpdp_v2
, s
)) {
3875 /* UNDEF accesses to D16-D31 if they don't exist. */
3876 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vm
& 0x10)) {
3880 if (!vfp_access_check(s
)) {
3884 fpst
= fpstatus_ptr(FPST_FPCR
);
3885 vm
= tcg_temp_new_i64();
3886 vd
= tcg_temp_new_i32();
3887 vfp_load_reg64(vm
, a
->vm
);
3891 gen_helper_vfp_tosizd(vd
, vm
, fpst
);
3893 gen_helper_vfp_tosid(vd
, vm
, fpst
);
3897 gen_helper_vfp_touizd(vd
, vm
, fpst
);
3899 gen_helper_vfp_touid(vd
, vm
, fpst
);
3902 vfp_store_reg32(vd
, a
->vd
);
3903 tcg_temp_free_i32(vd
);
3904 tcg_temp_free_i64(vm
);
3905 tcg_temp_free_ptr(fpst
);
3909 static bool trans_VINS(DisasContext
*s
, arg_VINS
*a
)
3913 if (!dc_isar_feature(aa32_fp16_arith
, s
)) {
3917 if (s
->vec_len
!= 0 || s
->vec_stride
!= 0) {
3921 if (!vfp_access_check(s
)) {
3925 /* Insert low half of Vm into high half of Vd */
3926 rm
= tcg_temp_new_i32();
3927 rd
= tcg_temp_new_i32();
3928 vfp_load_reg32(rm
, a
->vm
);
3929 vfp_load_reg32(rd
, a
->vd
);
3930 tcg_gen_deposit_i32(rd
, rd
, rm
, 16, 16);
3931 vfp_store_reg32(rd
, a
->vd
);
3932 tcg_temp_free_i32(rm
);
3933 tcg_temp_free_i32(rd
);
3937 static bool trans_VMOVX(DisasContext
*s
, arg_VINS
*a
)
3941 if (!dc_isar_feature(aa32_fp16_arith
, s
)) {
3945 if (s
->vec_len
!= 0 || s
->vec_stride
!= 0) {
3949 if (!vfp_access_check(s
)) {
3953 /* Set Vd to high half of Vm */
3954 rm
= tcg_temp_new_i32();
3955 vfp_load_reg32(rm
, a
->vm
);
3956 tcg_gen_shri_i32(rm
, rm
, 16);
3957 vfp_store_reg32(rm
, a
->vd
);
3958 tcg_temp_free_i32(rm
);