2 * ARM translation: AArch32 VFP instructions
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
7 * Copyright (c) 2019 Linaro, Ltd.
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
24 * This file is intended to be included from translate.c; it uses
25 * some macros and definitions provided by that file.
26 * It might be possible to convert it to a standalone .c file eventually.
29 /* Include the generated VFP decoder */
30 #include "decode-vfp.c.inc"
31 #include "decode-vfp-uncond.c.inc"
34 * The imm8 encodes the sign bit, enough bits to represent an exponent in
35 * the range 01....1xx to 10....0xx, and the most significant 4 bits of
36 * the mantissa; see VFPExpandImm() in the v8 ARM ARM.
38 uint64_t vfp_expand_imm(int size, uint8_t imm8)
44 imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
45 (extract32(imm8, 6, 1) ? 0x3fc0 : 0x4000) |
46 extract32(imm8, 0, 6);
50 imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
51 (extract32(imm8, 6, 1) ? 0x3e00 : 0x4000) |
52 (extract32(imm8, 0, 6) << 3);
56 imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
57 (extract32(imm8, 6, 1) ? 0x3000 : 0x4000) |
58 (extract32(imm8, 0, 6) << 6);
61 g_assert_not_reached();
67 * Return the offset of a 16-bit half of the specified VFP single-precision
68 * register. If top is true, returns the top 16 bits; otherwise the bottom
71 static inline long vfp_f16_offset(unsigned reg, bool top)
73 long offs = vfp_reg_offset(false, reg);
74 #ifdef HOST_WORDS_BIGENDIAN
87 * Check that VFP access is enabled. If it is, do the necessary
88 * M-profile lazy-FP handling and then return true.
89 * If not, emit code to generate an appropriate exception and
91 * The ignore_vfp_enabled argument specifies that we should ignore
92 * whether VFP is enabled via FPEXC[EN]: this should be true for FMXR/FMRX
93 * accesses to FPSID, FPEXC, MVFR0, MVFR1, MVFR2, and false for all other insns.
95 static bool full_vfp_access_check(DisasContext *s, bool ignore_vfp_enabled)
98 /* M-profile handled this earlier, in disas_m_nocp() */
99 assert (!arm_dc_feature(s, ARM_FEATURE_M));
100 gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
101 syn_fp_access_trap(1, 0xe, false),
106 if (!s->vfp_enabled && !ignore_vfp_enabled) {
107 assert(!arm_dc_feature(s, ARM_FEATURE_M));
108 unallocated_encoding(s);
112 if (arm_dc_feature(s, ARM_FEATURE_M)) {
113 /* Handle M-profile lazy FP state mechanics */
115 /* Trigger lazy-state preservation if necessary */
118 * Lazy state saving affects external memory and also the NVIC,
119 * so we must mark it as an IO operation for icount (and cause
120 * this to be the last insn in the TB).
122 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
123 s->base.is_jmp = DISAS_UPDATE_EXIT;
126 gen_helper_v7m_preserve_fp_state(cpu_env);
128 * If the preserve_fp_state helper doesn't throw an exception
129 * then it will clear LSPACT; we don't need to repeat this for
130 * any further FP insns in this TB.
132 s->v7m_lspact = false;
135 /* Update ownership of FP context: set FPCCR.S to match current state */
136 if (s->v8m_fpccr_s_wrong) {
139 tmp = load_cpu_field(v7m.fpccr[M_REG_S]);
141 tcg_gen_ori_i32(tmp, tmp, R_V7M_FPCCR_S_MASK);
143 tcg_gen_andi_i32(tmp, tmp, ~R_V7M_FPCCR_S_MASK);
145 store_cpu_field(tmp, v7m.fpccr[M_REG_S]);
146 /* Don't need to do this for any further FP insns in this TB */
147 s->v8m_fpccr_s_wrong = false;
150 if (s->v7m_new_fp_ctxt_needed) {
152 * Create new FP context by updating CONTROL.FPCA, CONTROL.SFPA
155 TCGv_i32 control, fpscr;
156 uint32_t bits = R_V7M_CONTROL_FPCA_MASK;
158 fpscr = load_cpu_field(v7m.fpdscr[s->v8m_secure]);
159 gen_helper_vfp_set_fpscr(cpu_env, fpscr);
160 tcg_temp_free_i32(fpscr);
162 * We don't need to arrange to end the TB, because the only
163 * parts of FPSCR which we cache in the TB flags are the VECLEN
164 * and VECSTRIDE, and those don't exist for M-profile.
168 bits |= R_V7M_CONTROL_SFPA_MASK;
170 control = load_cpu_field(v7m.control[M_REG_S]);
171 tcg_gen_ori_i32(control, control, bits);
172 store_cpu_field(control, v7m.control[M_REG_S]);
173 /* Don't need to do this for any further FP insns in this TB */
174 s->v7m_new_fp_ctxt_needed = false;
182 * The most usual kind of VFP access check, for everything except
183 * FMXR/FMRX to the always-available special registers.
185 static bool vfp_access_check(DisasContext *s)
187 return full_vfp_access_check(s, false);
190 static bool trans_VSEL(DisasContext *s, arg_VSEL *a)
195 if (!dc_isar_feature(aa32_vsel, s)) {
199 if (sz == 3 && !dc_isar_feature(aa32_fpdp_v2, s)) {
203 if (sz == 1 && !dc_isar_feature(aa32_fp16_arith, s)) {
207 /* UNDEF accesses to D16-D31 if they don't exist */
208 if (sz == 3 && !dc_isar_feature(aa32_simd_r32, s) &&
209 ((a->vm | a->vn | a->vd) & 0x10)) {
217 if (!vfp_access_check(s)) {
222 TCGv_i64 frn, frm, dest;
223 TCGv_i64 tmp, zero, zf, nf, vf;
225 zero = tcg_const_i64(0);
227 frn = tcg_temp_new_i64();
228 frm = tcg_temp_new_i64();
229 dest = tcg_temp_new_i64();
231 zf = tcg_temp_new_i64();
232 nf = tcg_temp_new_i64();
233 vf = tcg_temp_new_i64();
235 tcg_gen_extu_i32_i64(zf, cpu_ZF);
236 tcg_gen_ext_i32_i64(nf, cpu_NF);
237 tcg_gen_ext_i32_i64(vf, cpu_VF);
239 vfp_load_reg64(frn, rn);
240 vfp_load_reg64(frm, rm);
243 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
247 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
250 case 2: /* ge: N == V -> N ^ V == 0 */
251 tmp = tcg_temp_new_i64();
252 tcg_gen_xor_i64(tmp, vf, nf);
253 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
255 tcg_temp_free_i64(tmp);
257 case 3: /* gt: !Z && N == V */
258 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
260 tmp = tcg_temp_new_i64();
261 tcg_gen_xor_i64(tmp, vf, nf);
262 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
264 tcg_temp_free_i64(tmp);
267 vfp_store_reg64(dest, rd);
268 tcg_temp_free_i64(frn);
269 tcg_temp_free_i64(frm);
270 tcg_temp_free_i64(dest);
272 tcg_temp_free_i64(zf);
273 tcg_temp_free_i64(nf);
274 tcg_temp_free_i64(vf);
276 tcg_temp_free_i64(zero);
278 TCGv_i32 frn, frm, dest;
281 zero = tcg_const_i32(0);
283 frn = tcg_temp_new_i32();
284 frm = tcg_temp_new_i32();
285 dest = tcg_temp_new_i32();
286 vfp_load_reg32(frn, rn);
287 vfp_load_reg32(frm, rm);
290 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
294 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
297 case 2: /* ge: N == V -> N ^ V == 0 */
298 tmp = tcg_temp_new_i32();
299 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
300 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
302 tcg_temp_free_i32(tmp);
304 case 3: /* gt: !Z && N == V */
305 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
307 tmp = tcg_temp_new_i32();
308 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
309 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
311 tcg_temp_free_i32(tmp);
314 /* For fp16 the top half is always zeroes */
316 tcg_gen_andi_i32(dest, dest, 0xffff);
318 vfp_store_reg32(dest, rd);
319 tcg_temp_free_i32(frn);
320 tcg_temp_free_i32(frm);
321 tcg_temp_free_i32(dest);
323 tcg_temp_free_i32(zero);
330 * Table for converting the most common AArch32 encoding of
331 * rounding mode to arm_fprounding order (which matches the
332 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
334 static const uint8_t fp_decode_rm[] = {
341 static bool trans_VRINT(DisasContext *s, arg_VRINT *a)
347 int rounding = fp_decode_rm[a->rm];
349 if (!dc_isar_feature(aa32_vrint, s)) {
353 if (sz == 3 && !dc_isar_feature(aa32_fpdp_v2, s)) {
357 if (sz == 1 && !dc_isar_feature(aa32_fp16_arith, s)) {
361 /* UNDEF accesses to D16-D31 if they don't exist */
362 if (sz == 3 && !dc_isar_feature(aa32_simd_r32, s) &&
363 ((a->vm | a->vd) & 0x10)) {
370 if (!vfp_access_check(s)) {
375 fpst = fpstatus_ptr(FPST_FPCR_F16);
377 fpst = fpstatus_ptr(FPST_FPCR);
380 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
381 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
386 tcg_op = tcg_temp_new_i64();
387 tcg_res = tcg_temp_new_i64();
388 vfp_load_reg64(tcg_op, rm);
389 gen_helper_rintd(tcg_res, tcg_op, fpst);
390 vfp_store_reg64(tcg_res, rd);
391 tcg_temp_free_i64(tcg_op);
392 tcg_temp_free_i64(tcg_res);
396 tcg_op = tcg_temp_new_i32();
397 tcg_res = tcg_temp_new_i32();
398 vfp_load_reg32(tcg_op, rm);
400 gen_helper_rinth(tcg_res, tcg_op, fpst);
402 gen_helper_rints(tcg_res, tcg_op, fpst);
404 vfp_store_reg32(tcg_res, rd);
405 tcg_temp_free_i32(tcg_op);
406 tcg_temp_free_i32(tcg_res);
409 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
410 tcg_temp_free_i32(tcg_rmode);
412 tcg_temp_free_ptr(fpst);
416 static bool trans_VCVT(DisasContext *s, arg_VCVT *a)
421 TCGv_i32 tcg_rmode, tcg_shift;
422 int rounding = fp_decode_rm[a->rm];
423 bool is_signed = a->op;
425 if (!dc_isar_feature(aa32_vcvt_dr, s)) {
429 if (sz == 3 && !dc_isar_feature(aa32_fpdp_v2, s)) {
433 if (sz == 1 && !dc_isar_feature(aa32_fp16_arith, s)) {
437 /* UNDEF accesses to D16-D31 if they don't exist */
438 if (sz == 3 && !dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) {
445 if (!vfp_access_check(s)) {
450 fpst = fpstatus_ptr(FPST_FPCR_F16);
452 fpst = fpstatus_ptr(FPST_FPCR);
455 tcg_shift = tcg_const_i32(0);
457 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
458 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
461 TCGv_i64 tcg_double, tcg_res;
463 tcg_double = tcg_temp_new_i64();
464 tcg_res = tcg_temp_new_i64();
465 tcg_tmp = tcg_temp_new_i32();
466 vfp_load_reg64(tcg_double, rm);
468 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
470 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
472 tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
473 vfp_store_reg32(tcg_tmp, rd);
474 tcg_temp_free_i32(tcg_tmp);
475 tcg_temp_free_i64(tcg_res);
476 tcg_temp_free_i64(tcg_double);
478 TCGv_i32 tcg_single, tcg_res;
479 tcg_single = tcg_temp_new_i32();
480 tcg_res = tcg_temp_new_i32();
481 vfp_load_reg32(tcg_single, rm);
484 gen_helper_vfp_toslh(tcg_res, tcg_single, tcg_shift, fpst);
486 gen_helper_vfp_toulh(tcg_res, tcg_single, tcg_shift, fpst);
490 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
492 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
495 vfp_store_reg32(tcg_res, rd);
496 tcg_temp_free_i32(tcg_res);
497 tcg_temp_free_i32(tcg_single);
500 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
501 tcg_temp_free_i32(tcg_rmode);
503 tcg_temp_free_i32(tcg_shift);
505 tcg_temp_free_ptr(fpst);
510 static bool trans_VMOV_to_gp(DisasContext *s, arg_VMOV_to_gp *a)
512 /* VMOV scalar to general purpose register */
515 /* SIZE == MO_32 is a VFP instruction; otherwise NEON. */
517 ? !dc_isar_feature(aa32_fpsp_v2, s)
518 : !arm_dc_feature(s, ARM_FEATURE_NEON)) {
522 /* UNDEF accesses to D16-D31 if they don't exist */
523 if (!dc_isar_feature(aa32_simd_r32, s) && (a->vn & 0x10)) {
527 if (!vfp_access_check(s)) {
531 tmp = tcg_temp_new_i32();
532 read_neon_element32(tmp, a->vn, a->index, a->size | (a->u ? 0 : MO_SIGN));
533 store_reg(s, a->rt, tmp);
538 static bool trans_VMOV_from_gp(DisasContext *s, arg_VMOV_from_gp *a)
540 /* VMOV general purpose register to scalar */
543 /* SIZE == MO_32 is a VFP instruction; otherwise NEON. */
545 ? !dc_isar_feature(aa32_fpsp_v2, s)
546 : !arm_dc_feature(s, ARM_FEATURE_NEON)) {
550 /* UNDEF accesses to D16-D31 if they don't exist */
551 if (!dc_isar_feature(aa32_simd_r32, s) && (a->vn & 0x10)) {
555 if (!vfp_access_check(s)) {
559 tmp = load_reg(s, a->rt);
560 write_neon_element32(tmp, a->vn, a->index, a->size);
561 tcg_temp_free_i32(tmp);
566 static bool trans_VDUP(DisasContext *s, arg_VDUP *a)
568 /* VDUP (general purpose register) */
572 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
576 /* UNDEF accesses to D16-D31 if they don't exist */
577 if (!dc_isar_feature(aa32_simd_r32, s) && (a->vn & 0x10)) {
585 if (a->q && (a->vn & 1)) {
589 vec_size = a->q ? 16 : 8;
598 if (!vfp_access_check(s)) {
602 tmp = load_reg(s, a->rt);
603 tcg_gen_gvec_dup_i32(size, neon_full_reg_offset(a->vn),
604 vec_size, vec_size, tmp);
605 tcg_temp_free_i32(tmp);
610 static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a)
613 bool ignore_vfp_enabled = false;
615 if (!dc_isar_feature(aa32_fpsp_v2, s)) {
619 if (arm_dc_feature(s, ARM_FEATURE_M)) {
621 * The only M-profile VFP vmrs/vmsr sysreg is FPSCR.
622 * Accesses to R15 are UNPREDICTABLE; we choose to undef.
623 * (FPSCR -> r15 is a special case which writes to the PSR flags.)
625 if (a->rt == 15 && (!a->l || a->reg != ARM_VFP_FPSCR)) {
633 * VFPv2 allows access to FPSID from userspace; VFPv3 restricts
634 * all ID registers to privileged access only.
636 if (IS_USER(s) && dc_isar_feature(aa32_fpsp_v3, s)) {
639 ignore_vfp_enabled = true;
643 if (IS_USER(s) || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
646 ignore_vfp_enabled = true;
649 if (IS_USER(s) || !arm_dc_feature(s, ARM_FEATURE_V8)) {
652 ignore_vfp_enabled = true;
660 ignore_vfp_enabled = true;
663 case ARM_VFP_FPINST2:
664 /* Not present in VFPv3 */
665 if (IS_USER(s) || dc_isar_feature(aa32_fpsp_v3, s)) {
673 if (!full_vfp_access_check(s, ignore_vfp_enabled)) {
678 /* VMRS, move VFP special register to gp register */
684 if (s->current_el == 1) {
685 TCGv_i32 tcg_reg, tcg_rt;
688 gen_set_pc_im(s, s->pc_curr);
689 tcg_reg = tcg_const_i32(a->reg);
690 tcg_rt = tcg_const_i32(a->rt);
691 gen_helper_check_hcr_el2_trap(cpu_env, tcg_rt, tcg_reg);
692 tcg_temp_free_i32(tcg_reg);
693 tcg_temp_free_i32(tcg_rt);
698 case ARM_VFP_FPINST2:
699 tmp = load_cpu_field(vfp.xregs[a->reg]);
703 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
704 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
706 tmp = tcg_temp_new_i32();
707 gen_helper_vfp_get_fpscr(tmp, cpu_env);
711 g_assert_not_reached();
715 /* Set the 4 flag bits in the CPSR. */
717 tcg_temp_free_i32(tmp);
719 store_reg(s, a->rt, tmp);
722 /* VMSR, move gp register to VFP special register */
728 /* Writes are ignored. */
731 tmp = load_reg(s, a->rt);
732 gen_helper_vfp_set_fpscr(cpu_env, tmp);
733 tcg_temp_free_i32(tmp);
738 * TODO: VFP subarchitecture support.
739 * For now, keep the EN bit only
741 tmp = load_reg(s, a->rt);
742 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
743 store_cpu_field(tmp, vfp.xregs[a->reg]);
747 case ARM_VFP_FPINST2:
748 tmp = load_reg(s, a->rt);
749 store_cpu_field(tmp, vfp.xregs[a->reg]);
752 g_assert_not_reached();
759 static bool trans_VMOV_half(DisasContext *s, arg_VMOV_single *a)
763 if (!dc_isar_feature(aa32_fp16_arith, s)) {
768 /* UNPREDICTABLE; we choose to UNDEF */
772 if (!vfp_access_check(s)) {
777 /* VFP to general purpose register */
778 tmp = tcg_temp_new_i32();
779 vfp_load_reg32(tmp, a->vn);
780 tcg_gen_andi_i32(tmp, tmp, 0xffff);
781 store_reg(s, a->rt, tmp);
783 /* general purpose register to VFP */
784 tmp = load_reg(s, a->rt);
785 tcg_gen_andi_i32(tmp, tmp, 0xffff);
786 vfp_store_reg32(tmp, a->vn);
787 tcg_temp_free_i32(tmp);
793 static bool trans_VMOV_single(DisasContext *s, arg_VMOV_single *a)
797 if (!dc_isar_feature(aa32_fpsp_v2, s)) {
801 if (!vfp_access_check(s)) {
806 /* VFP to general purpose register */
807 tmp = tcg_temp_new_i32();
808 vfp_load_reg32(tmp, a->vn);
810 /* Set the 4 flag bits in the CPSR. */
812 tcg_temp_free_i32(tmp);
814 store_reg(s, a->rt, tmp);
817 /* general purpose register to VFP */
818 tmp = load_reg(s, a->rt);
819 vfp_store_reg32(tmp, a->vn);
820 tcg_temp_free_i32(tmp);
826 static bool trans_VMOV_64_sp(DisasContext *s, arg_VMOV_64_sp *a)
830 if (!dc_isar_feature(aa32_fpsp_v2, s)) {
835 * VMOV between two general-purpose registers and two single precision
836 * floating point registers
838 if (!vfp_access_check(s)) {
844 tmp = tcg_temp_new_i32();
845 vfp_load_reg32(tmp, a->vm);
846 store_reg(s, a->rt, tmp);
847 tmp = tcg_temp_new_i32();
848 vfp_load_reg32(tmp, a->vm + 1);
849 store_reg(s, a->rt2, tmp);
852 tmp = load_reg(s, a->rt);
853 vfp_store_reg32(tmp, a->vm);
854 tcg_temp_free_i32(tmp);
855 tmp = load_reg(s, a->rt2);
856 vfp_store_reg32(tmp, a->vm + 1);
857 tcg_temp_free_i32(tmp);
863 static bool trans_VMOV_64_dp(DisasContext *s, arg_VMOV_64_dp *a)
868 * VMOV between two general-purpose registers and one double precision
869 * floating point register. Note that this does not require support
870 * for double precision arithmetic.
872 if (!dc_isar_feature(aa32_fpsp_v2, s)) {
876 /* UNDEF accesses to D16-D31 if they don't exist */
877 if (!dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) {
881 if (!vfp_access_check(s)) {
887 tmp = tcg_temp_new_i32();
888 vfp_load_reg32(tmp, a->vm * 2);
889 store_reg(s, a->rt, tmp);
890 tmp = tcg_temp_new_i32();
891 vfp_load_reg32(tmp, a->vm * 2 + 1);
892 store_reg(s, a->rt2, tmp);
895 tmp = load_reg(s, a->rt);
896 vfp_store_reg32(tmp, a->vm * 2);
897 tcg_temp_free_i32(tmp);
898 tmp = load_reg(s, a->rt2);
899 vfp_store_reg32(tmp, a->vm * 2 + 1);
900 tcg_temp_free_i32(tmp);
906 static bool trans_VLDR_VSTR_hp(DisasContext *s, arg_VLDR_VSTR_sp *a)
911 if (!dc_isar_feature(aa32_fp16_arith, s)) {
915 if (!vfp_access_check(s)) {
919 /* imm8 field is offset/2 for fp16, unlike fp32 and fp64 */
920 offset = a->imm << 1;
925 /* For thumb, use of PC is UNPREDICTABLE. */
926 addr = add_reg_for_lit(s, a->rn, offset);
927 tmp = tcg_temp_new_i32();
929 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
930 vfp_store_reg32(tmp, a->vd);
932 vfp_load_reg32(tmp, a->vd);
933 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
935 tcg_temp_free_i32(tmp);
936 tcg_temp_free_i32(addr);
941 static bool trans_VLDR_VSTR_sp(DisasContext *s, arg_VLDR_VSTR_sp *a)
946 if (!dc_isar_feature(aa32_fpsp_v2, s)) {
950 if (!vfp_access_check(s)) {
954 offset = a->imm << 2;
959 /* For thumb, use of PC is UNPREDICTABLE. */
960 addr = add_reg_for_lit(s, a->rn, offset);
961 tmp = tcg_temp_new_i32();
963 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
964 vfp_store_reg32(tmp, a->vd);
966 vfp_load_reg32(tmp, a->vd);
967 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
969 tcg_temp_free_i32(tmp);
970 tcg_temp_free_i32(addr);
975 static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_dp *a)
981 /* Note that this does not require support for double arithmetic. */
982 if (!dc_isar_feature(aa32_fpsp_v2, s)) {
986 /* UNDEF accesses to D16-D31 if they don't exist */
987 if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
991 if (!vfp_access_check(s)) {
995 offset = a->imm << 2;
1000 /* For thumb, use of PC is UNPREDICTABLE. */
1001 addr = add_reg_for_lit(s, a->rn, offset);
1002 tmp = tcg_temp_new_i64();
1004 gen_aa32_ld64(s, tmp, addr, get_mem_index(s));
1005 vfp_store_reg64(tmp, a->vd);
1007 vfp_load_reg64(tmp, a->vd);
1008 gen_aa32_st64(s, tmp, addr, get_mem_index(s));
1010 tcg_temp_free_i64(tmp);
1011 tcg_temp_free_i32(addr);
1016 static bool trans_VLDM_VSTM_sp(DisasContext *s, arg_VLDM_VSTM_sp *a)
1022 if (!dc_isar_feature(aa32_fpsp_v2, s)) {
1028 if (n == 0 || (a->vd + n) > 32) {
1030 * UNPREDICTABLE cases for bad immediates: we choose to
1031 * UNDEF to avoid generating huge numbers of TCG ops
1035 if (a->rn == 15 && a->w) {
1036 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
1040 if (!vfp_access_check(s)) {
1044 /* For thumb, use of PC is UNPREDICTABLE. */
1045 addr = add_reg_for_lit(s, a->rn, 0);
1048 tcg_gen_addi_i32(addr, addr, -(a->imm << 2));
1051 if (s->v8m_stackcheck && a->rn == 13 && a->w) {
1053 * Here 'addr' is the lowest address we will store to,
1054 * and is either the old SP (if post-increment) or
1055 * the new SP (if pre-decrement). For post-increment
1056 * where the old value is below the limit and the new
1057 * value is above, it is UNKNOWN whether the limit check
1058 * triggers; we choose to trigger.
1060 gen_helper_v8m_stackcheck(cpu_env, addr);
1064 tmp = tcg_temp_new_i32();
1065 for (i = 0; i < n; i++) {
1068 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
1069 vfp_store_reg32(tmp, a->vd + i);
1072 vfp_load_reg32(tmp, a->vd + i);
1073 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
1075 tcg_gen_addi_i32(addr, addr, offset);
1077 tcg_temp_free_i32(tmp);
1081 offset = -offset * n;
1082 tcg_gen_addi_i32(addr, addr, offset);
1084 store_reg(s, a->rn, addr);
1086 tcg_temp_free_i32(addr);
1092 static bool trans_VLDM_VSTM_dp(DisasContext *s, arg_VLDM_VSTM_dp *a)
1099 /* Note that this does not require support for double arithmetic. */
1100 if (!dc_isar_feature(aa32_fpsp_v2, s)) {
1106 if (n == 0 || (a->vd + n) > 32 || n > 16) {
1108 * UNPREDICTABLE cases for bad immediates: we choose to
1109 * UNDEF to avoid generating huge numbers of TCG ops
1113 if (a->rn == 15 && a->w) {
1114 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
1118 /* UNDEF accesses to D16-D31 if they don't exist */
1119 if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd + n) > 16) {
1123 if (!vfp_access_check(s)) {
1127 /* For thumb, use of PC is UNPREDICTABLE. */
1128 addr = add_reg_for_lit(s, a->rn, 0);
1131 tcg_gen_addi_i32(addr, addr, -(a->imm << 2));
1134 if (s->v8m_stackcheck && a->rn == 13 && a->w) {
1136 * Here 'addr' is the lowest address we will store to,
1137 * and is either the old SP (if post-increment) or
1138 * the new SP (if pre-decrement). For post-increment
1139 * where the old value is below the limit and the new
1140 * value is above, it is UNKNOWN whether the limit check
1141 * triggers; we choose to trigger.
1143 gen_helper_v8m_stackcheck(cpu_env, addr);
1147 tmp = tcg_temp_new_i64();
1148 for (i = 0; i < n; i++) {
1151 gen_aa32_ld64(s, tmp, addr, get_mem_index(s));
1152 vfp_store_reg64(tmp, a->vd + i);
1155 vfp_load_reg64(tmp, a->vd + i);
1156 gen_aa32_st64(s, tmp, addr, get_mem_index(s));
1158 tcg_gen_addi_i32(addr, addr, offset);
1160 tcg_temp_free_i64(tmp);
1164 offset = -offset * n;
1165 } else if (a->imm & 1) {
1172 tcg_gen_addi_i32(addr, addr, offset);
1174 store_reg(s, a->rn, addr);
1176 tcg_temp_free_i32(addr);
1183 * Types for callbacks for do_vfp_3op_sp() and do_vfp_3op_dp().
1184 * The callback should emit code to write a value to vd. If
1185 * do_vfp_3op_{sp,dp}() was passed reads_vd then the TCGv vd
1186 * will contain the old value of the relevant VFP register;
1187 * otherwise it must be written to only.
1189 typedef void VFPGen3OpSPFn(TCGv_i32 vd,
1190 TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst);
1191 typedef void VFPGen3OpDPFn(TCGv_i64 vd,
1192 TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst);
1195 * Types for callbacks for do_vfp_2op_sp() and do_vfp_2op_dp().
1196 * The callback should emit code to write a value to vd (which
1197 * should be written to only).
1199 typedef void VFPGen2OpSPFn(TCGv_i32 vd, TCGv_i32 vm);
1200 typedef void VFPGen2OpDPFn(TCGv_i64 vd, TCGv_i64 vm);
1203 * Return true if the specified S reg is in a scalar bank
1204 * (ie if it is s0..s7)
1206 static inline bool vfp_sreg_is_scalar(int reg)
1208 return (reg & 0x18) == 0;
1212 * Return true if the specified D reg is in a scalar bank
1213 * (ie if it is d0..d3 or d16..d19)
1215 static inline bool vfp_dreg_is_scalar(int reg)
1217 return (reg & 0xc) == 0;
1221 * Advance the S reg number forwards by delta within its bank
1222 * (ie increment the low 3 bits but leave the rest the same)
1224 static inline int vfp_advance_sreg(int reg, int delta)
1226 return ((reg + delta) & 0x7) | (reg & ~0x7);
1230 * Advance the D reg number forwards by delta within its bank
1231 * (ie increment the low 2 bits but leave the rest the same)
1233 static inline int vfp_advance_dreg(int reg, int delta)
1235 return ((reg + delta) & 0x3) | (reg & ~0x3);
1239 * Perform a 3-operand VFP data processing instruction. fn is the
1240 * callback to do the actual operation; this function deals with the
1241 * code to handle looping around for VFP vector processing.
1243 static bool do_vfp_3op_sp(DisasContext *s, VFPGen3OpSPFn *fn,
1244 int vd, int vn, int vm, bool reads_vd)
1246 uint32_t delta_m = 0;
1247 uint32_t delta_d = 0;
1248 int veclen = s->vec_len;
1249 TCGv_i32 f0, f1, fd;
1252 if (!dc_isar_feature(aa32_fpsp_v2, s)) {
1256 if (!dc_isar_feature(aa32_fpshvec, s) &&
1257 (veclen != 0 || s->vec_stride != 0)) {
1261 if (!vfp_access_check(s)) {
1266 /* Figure out what type of vector operation this is. */
1267 if (vfp_sreg_is_scalar(vd)) {
1271 delta_d = s->vec_stride + 1;
1273 if (vfp_sreg_is_scalar(vm)) {
1274 /* mixed scalar/vector */
1283 f0 = tcg_temp_new_i32();
1284 f1 = tcg_temp_new_i32();
1285 fd = tcg_temp_new_i32();
1286 fpst = fpstatus_ptr(FPST_FPCR);
1288 vfp_load_reg32(f0, vn);
1289 vfp_load_reg32(f1, vm);
1293 vfp_load_reg32(fd, vd);
1295 fn(fd, f0, f1, fpst);
1296 vfp_store_reg32(fd, vd);
1302 /* Set up the operands for the next iteration */
1304 vd = vfp_advance_sreg(vd, delta_d);
1305 vn = vfp_advance_sreg(vn, delta_d);
1306 vfp_load_reg32(f0, vn);
1308 vm = vfp_advance_sreg(vm, delta_m);
1309 vfp_load_reg32(f1, vm);
1313 tcg_temp_free_i32(f0);
1314 tcg_temp_free_i32(f1);
1315 tcg_temp_free_i32(fd);
1316 tcg_temp_free_ptr(fpst);
1321 static bool do_vfp_3op_hp(DisasContext *s, VFPGen3OpSPFn *fn,
1322 int vd, int vn, int vm, bool reads_vd)
1325 * Do a half-precision operation. Functionally this is
1326 * the same as do_vfp_3op_sp(), except:
1327 * - it uses the FPST_FPCR_F16
1328 * - it doesn't need the VFP vector handling (fp16 is a
1329 * v8 feature, and in v8 VFP vectors don't exist)
1330 * - it does the aa32_fp16_arith feature test
1332 TCGv_i32 f0, f1, fd;
1335 if (!dc_isar_feature(aa32_fp16_arith, s)) {
1339 if (s->vec_len != 0 || s->vec_stride != 0) {
1343 if (!vfp_access_check(s)) {
1347 f0 = tcg_temp_new_i32();
1348 f1 = tcg_temp_new_i32();
1349 fd = tcg_temp_new_i32();
1350 fpst = fpstatus_ptr(FPST_FPCR_F16);
1352 vfp_load_reg32(f0, vn);
1353 vfp_load_reg32(f1, vm);
1356 vfp_load_reg32(fd, vd);
1358 fn(fd, f0, f1, fpst);
1359 vfp_store_reg32(fd, vd);
1361 tcg_temp_free_i32(f0);
1362 tcg_temp_free_i32(f1);
1363 tcg_temp_free_i32(fd);
1364 tcg_temp_free_ptr(fpst);
1369 static bool do_vfp_3op_dp(DisasContext *s, VFPGen3OpDPFn *fn,
1370 int vd, int vn, int vm, bool reads_vd)
1372 uint32_t delta_m = 0;
1373 uint32_t delta_d = 0;
1374 int veclen = s->vec_len;
1375 TCGv_i64 f0, f1, fd;
1378 if (!dc_isar_feature(aa32_fpdp_v2, s)) {
1382 /* UNDEF accesses to D16-D31 if they don't exist */
1383 if (!dc_isar_feature(aa32_simd_r32, s) && ((vd | vn | vm) & 0x10)) {
1387 if (!dc_isar_feature(aa32_fpshvec, s) &&
1388 (veclen != 0 || s->vec_stride != 0)) {
1392 if (!vfp_access_check(s)) {
1397 /* Figure out what type of vector operation this is. */
1398 if (vfp_dreg_is_scalar(vd)) {
1402 delta_d = (s->vec_stride >> 1) + 1;
1404 if (vfp_dreg_is_scalar(vm)) {
1405 /* mixed scalar/vector */
1414 f0 = tcg_temp_new_i64();
1415 f1 = tcg_temp_new_i64();
1416 fd = tcg_temp_new_i64();
1417 fpst = fpstatus_ptr(FPST_FPCR);
1419 vfp_load_reg64(f0, vn);
1420 vfp_load_reg64(f1, vm);
1424 vfp_load_reg64(fd, vd);
1426 fn(fd, f0, f1, fpst);
1427 vfp_store_reg64(fd, vd);
1432 /* Set up the operands for the next iteration */
1434 vd = vfp_advance_dreg(vd, delta_d);
1435 vn = vfp_advance_dreg(vn, delta_d);
1436 vfp_load_reg64(f0, vn);
1438 vm = vfp_advance_dreg(vm, delta_m);
1439 vfp_load_reg64(f1, vm);
1443 tcg_temp_free_i64(f0);
1444 tcg_temp_free_i64(f1);
1445 tcg_temp_free_i64(fd);
1446 tcg_temp_free_ptr(fpst);
1451 static bool do_vfp_2op_sp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm)
1453 uint32_t delta_m = 0;
1454 uint32_t delta_d = 0;
1455 int veclen = s->vec_len;
1458 if (!dc_isar_feature(aa32_fpsp_v2, s)) {
1462 if (!dc_isar_feature(aa32_fpshvec, s) &&
1463 (veclen != 0 || s->vec_stride != 0)) {
1467 if (!vfp_access_check(s)) {
1472 /* Figure out what type of vector operation this is. */
1473 if (vfp_sreg_is_scalar(vd)) {
1477 delta_d = s->vec_stride + 1;
1479 if (vfp_sreg_is_scalar(vm)) {
1480 /* mixed scalar/vector */
1489 f0 = tcg_temp_new_i32();
1490 fd = tcg_temp_new_i32();
1492 vfp_load_reg32(f0, vm);
1496 vfp_store_reg32(fd, vd);
1503 /* single source one-many */
1505 vd = vfp_advance_sreg(vd, delta_d);
1506 vfp_store_reg32(fd, vd);
1511 /* Set up the operands for the next iteration */
1513 vd = vfp_advance_sreg(vd, delta_d);
1514 vm = vfp_advance_sreg(vm, delta_m);
1515 vfp_load_reg32(f0, vm);
1518 tcg_temp_free_i32(f0);
1519 tcg_temp_free_i32(fd);
1524 static bool do_vfp_2op_hp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm)
1527 * Do a half-precision operation. Functionally this is
1528 * the same as do_vfp_2op_sp(), except:
1529 * - it doesn't need the VFP vector handling (fp16 is a
1530 * v8 feature, and in v8 VFP vectors don't exist)
1531 * - it does the aa32_fp16_arith feature test
1535 if (!dc_isar_feature(aa32_fp16_arith, s)) {
1539 if (s->vec_len != 0 || s->vec_stride != 0) {
1543 if (!vfp_access_check(s)) {
1547 f0 = tcg_temp_new_i32();
1548 vfp_load_reg32(f0, vm);
1550 vfp_store_reg32(f0, vd);
1551 tcg_temp_free_i32(f0);
1556 static bool do_vfp_2op_dp(DisasContext *s, VFPGen2OpDPFn *fn, int vd, int vm)
1558 uint32_t delta_m = 0;
1559 uint32_t delta_d = 0;
1560 int veclen = s->vec_len;
1563 if (!dc_isar_feature(aa32_fpdp_v2, s)) {
1567 /* UNDEF accesses to D16-D31 if they don't exist */
1568 if (!dc_isar_feature(aa32_simd_r32, s) && ((vd | vm) & 0x10)) {
1572 if (!dc_isar_feature(aa32_fpshvec, s) &&
1573 (veclen != 0 || s->vec_stride != 0)) {
1577 if (!vfp_access_check(s)) {
1582 /* Figure out what type of vector operation this is. */
1583 if (vfp_dreg_is_scalar(vd)) {
1587 delta_d = (s->vec_stride >> 1) + 1;
1589 if (vfp_dreg_is_scalar(vm)) {
1590 /* mixed scalar/vector */
1599 f0 = tcg_temp_new_i64();
1600 fd = tcg_temp_new_i64();
1602 vfp_load_reg64(f0, vm);
1606 vfp_store_reg64(fd, vd);
1613 /* single source one-many */
1615 vd = vfp_advance_dreg(vd, delta_d);
1616 vfp_store_reg64(fd, vd);
1621 /* Set up the operands for the next iteration */
1623 vd = vfp_advance_dreg(vd, delta_d);
1624 vd = vfp_advance_dreg(vm, delta_m);
1625 vfp_load_reg64(f0, vm);
1628 tcg_temp_free_i64(f0);
1629 tcg_temp_free_i64(fd);
1634 static void gen_VMLA_hp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
1636 /* Note that order of inputs to the add matters for NaNs */
1637 TCGv_i32 tmp = tcg_temp_new_i32();
1639 gen_helper_vfp_mulh(tmp, vn, vm, fpst);
1640 gen_helper_vfp_addh(vd, vd, tmp, fpst);
1641 tcg_temp_free_i32(tmp);
1644 static bool trans_VMLA_hp(DisasContext *s, arg_VMLA_sp *a)
1646 return do_vfp_3op_hp(s, gen_VMLA_hp, a->vd, a->vn, a->vm, true);
1649 static void gen_VMLA_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
1651 /* Note that order of inputs to the add matters for NaNs */
1652 TCGv_i32 tmp = tcg_temp_new_i32();
1654 gen_helper_vfp_muls(tmp, vn, vm, fpst);
1655 gen_helper_vfp_adds(vd, vd, tmp, fpst);
1656 tcg_temp_free_i32(tmp);
1659 static bool trans_VMLA_sp(DisasContext *s, arg_VMLA_sp *a)
1661 return do_vfp_3op_sp(s, gen_VMLA_sp, a->vd, a->vn, a->vm, true);
1664 static void gen_VMLA_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
1666 /* Note that order of inputs to the add matters for NaNs */
1667 TCGv_i64 tmp = tcg_temp_new_i64();
1669 gen_helper_vfp_muld(tmp, vn, vm, fpst);
1670 gen_helper_vfp_addd(vd, vd, tmp, fpst);
1671 tcg_temp_free_i64(tmp);
1674 static bool trans_VMLA_dp(DisasContext *s, arg_VMLA_dp *a)
1676 return do_vfp_3op_dp(s, gen_VMLA_dp, a->vd, a->vn, a->vm, true);
1679 static void gen_VMLS_hp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
1682 * VMLS: vd = vd + -(vn * vm)
1683 * Note that order of inputs to the add matters for NaNs.
1685 TCGv_i32 tmp = tcg_temp_new_i32();
1687 gen_helper_vfp_mulh(tmp, vn, vm, fpst);
1688 gen_helper_vfp_negh(tmp, tmp);
1689 gen_helper_vfp_addh(vd, vd, tmp, fpst);
1690 tcg_temp_free_i32(tmp);
1693 static bool trans_VMLS_hp(DisasContext *s, arg_VMLS_sp *a)
1695 return do_vfp_3op_hp(s, gen_VMLS_hp, a->vd, a->vn, a->vm, true);
1698 static void gen_VMLS_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
1701 * VMLS: vd = vd + -(vn * vm)
1702 * Note that order of inputs to the add matters for NaNs.
1704 TCGv_i32 tmp = tcg_temp_new_i32();
1706 gen_helper_vfp_muls(tmp, vn, vm, fpst);
1707 gen_helper_vfp_negs(tmp, tmp);
1708 gen_helper_vfp_adds(vd, vd, tmp, fpst);
1709 tcg_temp_free_i32(tmp);
1712 static bool trans_VMLS_sp(DisasContext *s, arg_VMLS_sp *a)
1714 return do_vfp_3op_sp(s, gen_VMLS_sp, a->vd, a->vn, a->vm, true);
1717 static void gen_VMLS_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
1720 * VMLS: vd = vd + -(vn * vm)
1721 * Note that order of inputs to the add matters for NaNs.
1723 TCGv_i64 tmp = tcg_temp_new_i64();
1725 gen_helper_vfp_muld(tmp, vn, vm, fpst);
1726 gen_helper_vfp_negd(tmp, tmp);
1727 gen_helper_vfp_addd(vd, vd, tmp, fpst);
1728 tcg_temp_free_i64(tmp);
1731 static bool trans_VMLS_dp(DisasContext *s, arg_VMLS_dp *a)
1733 return do_vfp_3op_dp(s, gen_VMLS_dp, a->vd, a->vn, a->vm, true);
1736 static void gen_VNMLS_hp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
1739 * VNMLS: -fd + (fn * fm)
1740 * Note that it isn't valid to replace (-A + B) with (B - A) or similar
1741 * plausible looking simplifications because this will give wrong results
1744 TCGv_i32 tmp = tcg_temp_new_i32();
1746 gen_helper_vfp_mulh(tmp, vn, vm, fpst);
1747 gen_helper_vfp_negh(vd, vd);
1748 gen_helper_vfp_addh(vd, vd, tmp, fpst);
1749 tcg_temp_free_i32(tmp);
1752 static bool trans_VNMLS_hp(DisasContext *s, arg_VNMLS_sp *a)
1754 return do_vfp_3op_hp(s, gen_VNMLS_hp, a->vd, a->vn, a->vm, true);
1757 static void gen_VNMLS_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
1760 * VNMLS: -fd + (fn * fm)
1761 * Note that it isn't valid to replace (-A + B) with (B - A) or similar
1762 * plausible looking simplifications because this will give wrong results
1765 TCGv_i32 tmp = tcg_temp_new_i32();
1767 gen_helper_vfp_muls(tmp, vn, vm, fpst);
1768 gen_helper_vfp_negs(vd, vd);
1769 gen_helper_vfp_adds(vd, vd, tmp, fpst);
1770 tcg_temp_free_i32(tmp);
1773 static bool trans_VNMLS_sp(DisasContext *s, arg_VNMLS_sp *a)
1775 return do_vfp_3op_sp(s, gen_VNMLS_sp, a->vd, a->vn, a->vm, true);
1778 static void gen_VNMLS_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
1781 * VNMLS: -fd + (fn * fm)
1782 * Note that it isn't valid to replace (-A + B) with (B - A) or similar
1783 * plausible looking simplifications because this will give wrong results
1786 TCGv_i64 tmp = tcg_temp_new_i64();
1788 gen_helper_vfp_muld(tmp, vn, vm, fpst);
1789 gen_helper_vfp_negd(vd, vd);
1790 gen_helper_vfp_addd(vd, vd, tmp, fpst);
1791 tcg_temp_free_i64(tmp);
1794 static bool trans_VNMLS_dp(DisasContext *s, arg_VNMLS_dp *a)
1796 return do_vfp_3op_dp(s, gen_VNMLS_dp, a->vd, a->vn, a->vm, true);
1799 static void gen_VNMLA_hp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
1801 /* VNMLA: -fd + -(fn * fm) */
1802 TCGv_i32 tmp = tcg_temp_new_i32();
1804 gen_helper_vfp_mulh(tmp, vn, vm, fpst);
1805 gen_helper_vfp_negh(tmp, tmp);
1806 gen_helper_vfp_negh(vd, vd);
1807 gen_helper_vfp_addh(vd, vd, tmp, fpst);
1808 tcg_temp_free_i32(tmp);
1811 static bool trans_VNMLA_hp(DisasContext *s, arg_VNMLA_sp *a)
1813 return do_vfp_3op_hp(s, gen_VNMLA_hp, a->vd, a->vn, a->vm, true);
1816 static void gen_VNMLA_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
1818 /* VNMLA: -fd + -(fn * fm) */
1819 TCGv_i32 tmp = tcg_temp_new_i32();
1821 gen_helper_vfp_muls(tmp, vn, vm, fpst);
1822 gen_helper_vfp_negs(tmp, tmp);
1823 gen_helper_vfp_negs(vd, vd);
1824 gen_helper_vfp_adds(vd, vd, tmp, fpst);
1825 tcg_temp_free_i32(tmp);
1828 static bool trans_VNMLA_sp(DisasContext *s, arg_VNMLA_sp *a)
1830 return do_vfp_3op_sp(s, gen_VNMLA_sp, a->vd, a->vn, a->vm, true);
1833 static void gen_VNMLA_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
1835 /* VNMLA: -fd + (fn * fm) */
1836 TCGv_i64 tmp = tcg_temp_new_i64();
1838 gen_helper_vfp_muld(tmp, vn, vm, fpst);
1839 gen_helper_vfp_negd(tmp, tmp);
1840 gen_helper_vfp_negd(vd, vd);
1841 gen_helper_vfp_addd(vd, vd, tmp, fpst);
1842 tcg_temp_free_i64(tmp);
1845 static bool trans_VNMLA_dp(DisasContext *s, arg_VNMLA_dp *a)
1847 return do_vfp_3op_dp(s, gen_VNMLA_dp, a->vd, a->vn, a->vm, true);
1850 static bool trans_VMUL_hp(DisasContext *s, arg_VMUL_sp *a)
1852 return do_vfp_3op_hp(s, gen_helper_vfp_mulh, a->vd, a->vn, a->vm, false);
1855 static bool trans_VMUL_sp(DisasContext *s, arg_VMUL_sp *a)
1857 return do_vfp_3op_sp(s, gen_helper_vfp_muls, a->vd, a->vn, a->vm, false);
1860 static bool trans_VMUL_dp(DisasContext *s, arg_VMUL_dp *a)
1862 return do_vfp_3op_dp(s, gen_helper_vfp_muld, a->vd, a->vn, a->vm, false);
1865 static void gen_VNMUL_hp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
1867 /* VNMUL: -(fn * fm) */
1868 gen_helper_vfp_mulh(vd, vn, vm, fpst);
1869 gen_helper_vfp_negh(vd, vd);
1872 static bool trans_VNMUL_hp(DisasContext *s, arg_VNMUL_sp *a)
1874 return do_vfp_3op_hp(s, gen_VNMUL_hp, a->vd, a->vn, a->vm, false);
1877 static void gen_VNMUL_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
1879 /* VNMUL: -(fn * fm) */
1880 gen_helper_vfp_muls(vd, vn, vm, fpst);
1881 gen_helper_vfp_negs(vd, vd);
1884 static bool trans_VNMUL_sp(DisasContext *s, arg_VNMUL_sp *a)
1886 return do_vfp_3op_sp(s, gen_VNMUL_sp, a->vd, a->vn, a->vm, false);
1889 static void gen_VNMUL_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
1891 /* VNMUL: -(fn * fm) */
1892 gen_helper_vfp_muld(vd, vn, vm, fpst);
1893 gen_helper_vfp_negd(vd, vd);
1896 static bool trans_VNMUL_dp(DisasContext *s, arg_VNMUL_dp *a)
1898 return do_vfp_3op_dp(s, gen_VNMUL_dp, a->vd, a->vn, a->vm, false);
1901 static bool trans_VADD_hp(DisasContext *s, arg_VADD_sp *a)
1903 return do_vfp_3op_hp(s, gen_helper_vfp_addh, a->vd, a->vn, a->vm, false);
1906 static bool trans_VADD_sp(DisasContext *s, arg_VADD_sp *a)
1908 return do_vfp_3op_sp(s, gen_helper_vfp_adds, a->vd, a->vn, a->vm, false);
1911 static bool trans_VADD_dp(DisasContext *s, arg_VADD_dp *a)
1913 return do_vfp_3op_dp(s, gen_helper_vfp_addd, a->vd, a->vn, a->vm, false);
1916 static bool trans_VSUB_hp(DisasContext *s, arg_VSUB_sp *a)
1918 return do_vfp_3op_hp(s, gen_helper_vfp_subh, a->vd, a->vn, a->vm, false);
1921 static bool trans_VSUB_sp(DisasContext *s, arg_VSUB_sp *a)
1923 return do_vfp_3op_sp(s, gen_helper_vfp_subs, a->vd, a->vn, a->vm, false);
1926 static bool trans_VSUB_dp(DisasContext *s, arg_VSUB_dp *a)
1928 return do_vfp_3op_dp(s, gen_helper_vfp_subd, a->vd, a->vn, a->vm, false);
1931 static bool trans_VDIV_hp(DisasContext *s, arg_VDIV_sp *a)
1933 return do_vfp_3op_hp(s, gen_helper_vfp_divh, a->vd, a->vn, a->vm, false);
1936 static bool trans_VDIV_sp(DisasContext *s, arg_VDIV_sp *a)
1938 return do_vfp_3op_sp(s, gen_helper_vfp_divs, a->vd, a->vn, a->vm, false);
1941 static bool trans_VDIV_dp(DisasContext *s, arg_VDIV_dp *a)
1943 return do_vfp_3op_dp(s, gen_helper_vfp_divd, a->vd, a->vn, a->vm, false);
1946 static bool trans_VMINNM_hp(DisasContext *s, arg_VMINNM_sp *a)
1948 if (!dc_isar_feature(aa32_vminmaxnm, s)) {
1951 return do_vfp_3op_hp(s, gen_helper_vfp_minnumh,
1952 a->vd, a->vn, a->vm, false);
1955 static bool trans_VMAXNM_hp(DisasContext *s, arg_VMAXNM_sp *a)
1957 if (!dc_isar_feature(aa32_vminmaxnm, s)) {
1960 return do_vfp_3op_hp(s, gen_helper_vfp_maxnumh,
1961 a->vd, a->vn, a->vm, false);
1964 static bool trans_VMINNM_sp(DisasContext *s, arg_VMINNM_sp *a)
1966 if (!dc_isar_feature(aa32_vminmaxnm, s)) {
1969 return do_vfp_3op_sp(s, gen_helper_vfp_minnums,
1970 a->vd, a->vn, a->vm, false);
1973 static bool trans_VMAXNM_sp(DisasContext *s, arg_VMAXNM_sp *a)
1975 if (!dc_isar_feature(aa32_vminmaxnm, s)) {
1978 return do_vfp_3op_sp(s, gen_helper_vfp_maxnums,
1979 a->vd, a->vn, a->vm, false);
1982 static bool trans_VMINNM_dp(DisasContext *s, arg_VMINNM_dp *a)
1984 if (!dc_isar_feature(aa32_vminmaxnm, s)) {
1987 return do_vfp_3op_dp(s, gen_helper_vfp_minnumd,
1988 a->vd, a->vn, a->vm, false);
1991 static bool trans_VMAXNM_dp(DisasContext *s, arg_VMAXNM_dp *a)
1993 if (!dc_isar_feature(aa32_vminmaxnm, s)) {
1996 return do_vfp_3op_dp(s, gen_helper_vfp_maxnumd,
1997 a->vd, a->vn, a->vm, false);
2000 static bool do_vfm_hp(DisasContext *s, arg_VFMA_sp *a, bool neg_n, bool neg_d)
2003 * VFNMA : fd = muladd(-fd, fn, fm)
2004 * VFNMS : fd = muladd(-fd, -fn, fm)
2005 * VFMA : fd = muladd( fd, fn, fm)
2006 * VFMS : fd = muladd( fd, -fn, fm)
2008 * These are fused multiply-add, and must be done as one floating
2009 * point operation with no rounding between the multiplication and
2010 * addition steps. NB that doing the negations here as separate
2011 * steps is correct : an input NaN should come out with its sign
2012 * bit flipped if it is a negated-input.
2015 TCGv_i32 vn, vm, vd;
2018 * Present in VFPv4 only, and only with the FP16 extension.
2019 * Note that we can't rely on the SIMDFMAC check alone, because
2020 * in a Neon-no-VFP core that ID register field will be non-zero.
2022 if (!dc_isar_feature(aa32_fp16_arith, s) ||
2023 !dc_isar_feature(aa32_simdfmac, s) ||
2024 !dc_isar_feature(aa32_fpsp_v2, s)) {
2028 if (s->vec_len != 0 || s->vec_stride != 0) {
2032 if (!vfp_access_check(s)) {
2036 vn = tcg_temp_new_i32();
2037 vm = tcg_temp_new_i32();
2038 vd = tcg_temp_new_i32();
2040 vfp_load_reg32(vn, a->vn);
2041 vfp_load_reg32(vm, a->vm);
2044 gen_helper_vfp_negh(vn, vn);
2046 vfp_load_reg32(vd, a->vd);
2049 gen_helper_vfp_negh(vd, vd);
2051 fpst = fpstatus_ptr(FPST_FPCR_F16);
2052 gen_helper_vfp_muladdh(vd, vn, vm, vd, fpst);
2053 vfp_store_reg32(vd, a->vd);
2055 tcg_temp_free_ptr(fpst);
2056 tcg_temp_free_i32(vn);
2057 tcg_temp_free_i32(vm);
2058 tcg_temp_free_i32(vd);
2063 static bool do_vfm_sp(DisasContext *s, arg_VFMA_sp *a, bool neg_n, bool neg_d)
2066 * VFNMA : fd = muladd(-fd, fn, fm)
2067 * VFNMS : fd = muladd(-fd, -fn, fm)
2068 * VFMA : fd = muladd( fd, fn, fm)
2069 * VFMS : fd = muladd( fd, -fn, fm)
2071 * These are fused multiply-add, and must be done as one floating
2072 * point operation with no rounding between the multiplication and
2073 * addition steps. NB that doing the negations here as separate
2074 * steps is correct : an input NaN should come out with its sign
2075 * bit flipped if it is a negated-input.
2078 TCGv_i32 vn, vm, vd;
2081 * Present in VFPv4 only.
2082 * Note that we can't rely on the SIMDFMAC check alone, because
2083 * in a Neon-no-VFP core that ID register field will be non-zero.
2085 if (!dc_isar_feature(aa32_simdfmac, s) ||
2086 !dc_isar_feature(aa32_fpsp_v2, s)) {
2090 * In v7A, UNPREDICTABLE with non-zero vector length/stride; from
2091 * v8A, must UNDEF. We choose to UNDEF for both v7A and v8A.
2093 if (s->vec_len != 0 || s->vec_stride != 0) {
2097 if (!vfp_access_check(s)) {
2101 vn = tcg_temp_new_i32();
2102 vm = tcg_temp_new_i32();
2103 vd = tcg_temp_new_i32();
2105 vfp_load_reg32(vn, a->vn);
2106 vfp_load_reg32(vm, a->vm);
2109 gen_helper_vfp_negs(vn, vn);
2111 vfp_load_reg32(vd, a->vd);
2114 gen_helper_vfp_negs(vd, vd);
2116 fpst = fpstatus_ptr(FPST_FPCR);
2117 gen_helper_vfp_muladds(vd, vn, vm, vd, fpst);
2118 vfp_store_reg32(vd, a->vd);
2120 tcg_temp_free_ptr(fpst);
2121 tcg_temp_free_i32(vn);
2122 tcg_temp_free_i32(vm);
2123 tcg_temp_free_i32(vd);
2128 static bool do_vfm_dp(DisasContext *s, arg_VFMA_dp *a, bool neg_n, bool neg_d)
2131 * VFNMA : fd = muladd(-fd, fn, fm)
2132 * VFNMS : fd = muladd(-fd, -fn, fm)
2133 * VFMA : fd = muladd( fd, fn, fm)
2134 * VFMS : fd = muladd( fd, -fn, fm)
2136 * These are fused multiply-add, and must be done as one floating
2137 * point operation with no rounding between the multiplication and
2138 * addition steps. NB that doing the negations here as separate
2139 * steps is correct : an input NaN should come out with its sign
2140 * bit flipped if it is a negated-input.
2143 TCGv_i64 vn, vm, vd;
2146 * Present in VFPv4 only.
2147 * Note that we can't rely on the SIMDFMAC check alone, because
2148 * in a Neon-no-VFP core that ID register field will be non-zero.
2150 if (!dc_isar_feature(aa32_simdfmac, s) ||
2151 !dc_isar_feature(aa32_fpdp_v2, s)) {
2155 * In v7A, UNPREDICTABLE with non-zero vector length/stride; from
2156 * v8A, must UNDEF. We choose to UNDEF for both v7A and v8A.
2158 if (s->vec_len != 0 || s->vec_stride != 0) {
2162 /* UNDEF accesses to D16-D31 if they don't exist. */
2163 if (!dc_isar_feature(aa32_simd_r32, s) &&
2164 ((a->vd | a->vn | a->vm) & 0x10)) {
2168 if (!vfp_access_check(s)) {
2172 vn = tcg_temp_new_i64();
2173 vm = tcg_temp_new_i64();
2174 vd = tcg_temp_new_i64();
2176 vfp_load_reg64(vn, a->vn);
2177 vfp_load_reg64(vm, a->vm);
2180 gen_helper_vfp_negd(vn, vn);
2182 vfp_load_reg64(vd, a->vd);
2185 gen_helper_vfp_negd(vd, vd);
2187 fpst = fpstatus_ptr(FPST_FPCR);
2188 gen_helper_vfp_muladdd(vd, vn, vm, vd, fpst);
2189 vfp_store_reg64(vd, a->vd);
2191 tcg_temp_free_ptr(fpst);
2192 tcg_temp_free_i64(vn);
2193 tcg_temp_free_i64(vm);
2194 tcg_temp_free_i64(vd);
2199 #define MAKE_ONE_VFM_TRANS_FN(INSN, PREC, NEGN, NEGD) \
2200 static bool trans_##INSN##_##PREC(DisasContext *s, \
2201 arg_##INSN##_##PREC *a) \
2203 return do_vfm_##PREC(s, a, NEGN, NEGD); \
2206 #define MAKE_VFM_TRANS_FNS(PREC) \
2207 MAKE_ONE_VFM_TRANS_FN(VFMA, PREC, false, false) \
2208 MAKE_ONE_VFM_TRANS_FN(VFMS, PREC, true, false) \
2209 MAKE_ONE_VFM_TRANS_FN(VFNMA, PREC, false, true) \
2210 MAKE_ONE_VFM_TRANS_FN(VFNMS, PREC, true, true)
2212 MAKE_VFM_TRANS_FNS(hp)
2213 MAKE_VFM_TRANS_FNS(sp)
2214 MAKE_VFM_TRANS_FNS(dp)
2216 static bool trans_VMOV_imm_hp(DisasContext *s, arg_VMOV_imm_sp *a)
2220 if (!dc_isar_feature(aa32_fp16_arith, s)) {
2224 if (s->vec_len != 0 || s->vec_stride != 0) {
2228 if (!vfp_access_check(s)) {
2232 fd = tcg_const_i32(vfp_expand_imm(MO_16, a->imm));
2233 vfp_store_reg32(fd, a->vd);
2234 tcg_temp_free_i32(fd);
2238 static bool trans_VMOV_imm_sp(DisasContext *s, arg_VMOV_imm_sp *a)
2240 uint32_t delta_d = 0;
2241 int veclen = s->vec_len;
2247 if (!dc_isar_feature(aa32_fpsp_v3, s)) {
2251 if (!dc_isar_feature(aa32_fpshvec, s) &&
2252 (veclen != 0 || s->vec_stride != 0)) {
2256 if (!vfp_access_check(s)) {
2261 /* Figure out what type of vector operation this is. */
2262 if (vfp_sreg_is_scalar(vd)) {
2266 delta_d = s->vec_stride + 1;
2270 fd = tcg_const_i32(vfp_expand_imm(MO_32, a->imm));
2273 vfp_store_reg32(fd, vd);
2279 /* Set up the operands for the next iteration */
2281 vd = vfp_advance_sreg(vd, delta_d);
2284 tcg_temp_free_i32(fd);
2288 static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a)
2290 uint32_t delta_d = 0;
2291 int veclen = s->vec_len;
2297 if (!dc_isar_feature(aa32_fpdp_v3, s)) {
2301 /* UNDEF accesses to D16-D31 if they don't exist. */
2302 if (!dc_isar_feature(aa32_simd_r32, s) && (vd & 0x10)) {
2306 if (!dc_isar_feature(aa32_fpshvec, s) &&
2307 (veclen != 0 || s->vec_stride != 0)) {
2311 if (!vfp_access_check(s)) {
2316 /* Figure out what type of vector operation this is. */
2317 if (vfp_dreg_is_scalar(vd)) {
2321 delta_d = (s->vec_stride >> 1) + 1;
2325 fd = tcg_const_i64(vfp_expand_imm(MO_64, a->imm));
2328 vfp_store_reg64(fd, vd);
2334 /* Set up the operands for the next iteration */
2336 vd = vfp_advance_dreg(vd, delta_d);
2339 tcg_temp_free_i64(fd);
2343 #define DO_VFP_2OP(INSN, PREC, FN) \
2344 static bool trans_##INSN##_##PREC(DisasContext *s, \
2345 arg_##INSN##_##PREC *a) \
2347 return do_vfp_2op_##PREC(s, FN, a->vd, a->vm); \
2350 DO_VFP_2OP(VMOV_reg, sp, tcg_gen_mov_i32)
2351 DO_VFP_2OP(VMOV_reg, dp, tcg_gen_mov_i64)
2353 DO_VFP_2OP(VABS, hp, gen_helper_vfp_absh)
2354 DO_VFP_2OP(VABS, sp, gen_helper_vfp_abss)
2355 DO_VFP_2OP(VABS, dp, gen_helper_vfp_absd)
2357 DO_VFP_2OP(VNEG, hp, gen_helper_vfp_negh)
2358 DO_VFP_2OP(VNEG, sp, gen_helper_vfp_negs)
2359 DO_VFP_2OP(VNEG, dp, gen_helper_vfp_negd)
2361 static void gen_VSQRT_hp(TCGv_i32 vd, TCGv_i32 vm)
2363 gen_helper_vfp_sqrth(vd, vm, cpu_env);
2366 static void gen_VSQRT_sp(TCGv_i32 vd, TCGv_i32 vm)
2368 gen_helper_vfp_sqrts(vd, vm, cpu_env);
2371 static void gen_VSQRT_dp(TCGv_i64 vd, TCGv_i64 vm)
2373 gen_helper_vfp_sqrtd(vd, vm, cpu_env);
2376 DO_VFP_2OP(VSQRT, hp, gen_VSQRT_hp)
2377 DO_VFP_2OP(VSQRT, sp, gen_VSQRT_sp)
2378 DO_VFP_2OP(VSQRT, dp, gen_VSQRT_dp)
2380 static bool trans_VCMP_hp(DisasContext *s, arg_VCMP_sp *a)
2384 if (!dc_isar_feature(aa32_fp16_arith, s)) {
2388 /* Vm/M bits must be zero for the Z variant */
2389 if (a->z && a->vm != 0) {
2393 if (!vfp_access_check(s)) {
2397 vd = tcg_temp_new_i32();
2398 vm = tcg_temp_new_i32();
2400 vfp_load_reg32(vd, a->vd);
2402 tcg_gen_movi_i32(vm, 0);
2404 vfp_load_reg32(vm, a->vm);
2408 gen_helper_vfp_cmpeh(vd, vm, cpu_env);
2410 gen_helper_vfp_cmph(vd, vm, cpu_env);
2413 tcg_temp_free_i32(vd);
2414 tcg_temp_free_i32(vm);
2419 static bool trans_VCMP_sp(DisasContext *s, arg_VCMP_sp *a)
2423 if (!dc_isar_feature(aa32_fpsp_v2, s)) {
2427 /* Vm/M bits must be zero for the Z variant */
2428 if (a->z && a->vm != 0) {
2432 if (!vfp_access_check(s)) {
2436 vd = tcg_temp_new_i32();
2437 vm = tcg_temp_new_i32();
2439 vfp_load_reg32(vd, a->vd);
2441 tcg_gen_movi_i32(vm, 0);
2443 vfp_load_reg32(vm, a->vm);
2447 gen_helper_vfp_cmpes(vd, vm, cpu_env);
2449 gen_helper_vfp_cmps(vd, vm, cpu_env);
2452 tcg_temp_free_i32(vd);
2453 tcg_temp_free_i32(vm);
2458 static bool trans_VCMP_dp(DisasContext *s, arg_VCMP_dp *a)
2462 if (!dc_isar_feature(aa32_fpdp_v2, s)) {
2466 /* Vm/M bits must be zero for the Z variant */
2467 if (a->z && a->vm != 0) {
2471 /* UNDEF accesses to D16-D31 if they don't exist. */
2472 if (!dc_isar_feature(aa32_simd_r32, s) && ((a->vd | a->vm) & 0x10)) {
2476 if (!vfp_access_check(s)) {
2480 vd = tcg_temp_new_i64();
2481 vm = tcg_temp_new_i64();
2483 vfp_load_reg64(vd, a->vd);
2485 tcg_gen_movi_i64(vm, 0);
2487 vfp_load_reg64(vm, a->vm);
2491 gen_helper_vfp_cmped(vd, vm, cpu_env);
2493 gen_helper_vfp_cmpd(vd, vm, cpu_env);
2496 tcg_temp_free_i64(vd);
2497 tcg_temp_free_i64(vm);
2502 static bool trans_VCVT_f32_f16(DisasContext *s, arg_VCVT_f32_f16 *a)
2508 if (!dc_isar_feature(aa32_fp16_spconv, s)) {
2512 if (!vfp_access_check(s)) {
2516 fpst = fpstatus_ptr(FPST_FPCR);
2517 ahp_mode = get_ahp_flag();
2518 tmp = tcg_temp_new_i32();
2519 /* The T bit tells us if we want the low or high 16 bits of Vm */
2520 tcg_gen_ld16u_i32(tmp, cpu_env, vfp_f16_offset(a->vm, a->t));
2521 gen_helper_vfp_fcvt_f16_to_f32(tmp, tmp, fpst, ahp_mode);
2522 vfp_store_reg32(tmp, a->vd);
2523 tcg_temp_free_i32(ahp_mode);
2524 tcg_temp_free_ptr(fpst);
2525 tcg_temp_free_i32(tmp);
2529 static bool trans_VCVT_f64_f16(DisasContext *s, arg_VCVT_f64_f16 *a)
2536 if (!dc_isar_feature(aa32_fpdp_v2, s)) {
2540 if (!dc_isar_feature(aa32_fp16_dpconv, s)) {
2544 /* UNDEF accesses to D16-D31 if they don't exist. */
2545 if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
2549 if (!vfp_access_check(s)) {
2553 fpst = fpstatus_ptr(FPST_FPCR);
2554 ahp_mode = get_ahp_flag();
2555 tmp = tcg_temp_new_i32();
2556 /* The T bit tells us if we want the low or high 16 bits of Vm */
2557 tcg_gen_ld16u_i32(tmp, cpu_env, vfp_f16_offset(a->vm, a->t));
2558 vd = tcg_temp_new_i64();
2559 gen_helper_vfp_fcvt_f16_to_f64(vd, tmp, fpst, ahp_mode);
2560 vfp_store_reg64(vd, a->vd);
2561 tcg_temp_free_i32(ahp_mode);
2562 tcg_temp_free_ptr(fpst);
2563 tcg_temp_free_i32(tmp);
2564 tcg_temp_free_i64(vd);
2568 static bool trans_VCVT_f16_f32(DisasContext *s, arg_VCVT_f16_f32 *a)
2574 if (!dc_isar_feature(aa32_fp16_spconv, s)) {
2578 if (!vfp_access_check(s)) {
2582 fpst = fpstatus_ptr(FPST_FPCR);
2583 ahp_mode = get_ahp_flag();
2584 tmp = tcg_temp_new_i32();
2586 vfp_load_reg32(tmp, a->vm);
2587 gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp_mode);
2588 tcg_gen_st16_i32(tmp, cpu_env, vfp_f16_offset(a->vd, a->t));
2589 tcg_temp_free_i32(ahp_mode);
2590 tcg_temp_free_ptr(fpst);
2591 tcg_temp_free_i32(tmp);
2595 static bool trans_VCVT_f16_f64(DisasContext *s, arg_VCVT_f16_f64 *a)
2602 if (!dc_isar_feature(aa32_fpdp_v2, s)) {
2606 if (!dc_isar_feature(aa32_fp16_dpconv, s)) {
2610 /* UNDEF accesses to D16-D31 if they don't exist. */
2611 if (!dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) {
2615 if (!vfp_access_check(s)) {
2619 fpst = fpstatus_ptr(FPST_FPCR);
2620 ahp_mode = get_ahp_flag();
2621 tmp = tcg_temp_new_i32();
2622 vm = tcg_temp_new_i64();
2624 vfp_load_reg64(vm, a->vm);
2625 gen_helper_vfp_fcvt_f64_to_f16(tmp, vm, fpst, ahp_mode);
2626 tcg_temp_free_i64(vm);
2627 tcg_gen_st16_i32(tmp, cpu_env, vfp_f16_offset(a->vd, a->t));
2628 tcg_temp_free_i32(ahp_mode);
2629 tcg_temp_free_ptr(fpst);
2630 tcg_temp_free_i32(tmp);
2634 static bool trans_VRINTR_hp(DisasContext *s, arg_VRINTR_sp *a)
2639 if (!dc_isar_feature(aa32_fp16_arith, s)) {
2643 if (!vfp_access_check(s)) {
2647 tmp = tcg_temp_new_i32();
2648 vfp_load_reg32(tmp, a->vm);
2649 fpst = fpstatus_ptr(FPST_FPCR_F16);
2650 gen_helper_rinth(tmp, tmp, fpst);
2651 vfp_store_reg32(tmp, a->vd);
2652 tcg_temp_free_ptr(fpst);
2653 tcg_temp_free_i32(tmp);
2657 static bool trans_VRINTR_sp(DisasContext *s, arg_VRINTR_sp *a)
2662 if (!dc_isar_feature(aa32_vrint, s)) {
2666 if (!vfp_access_check(s)) {
2670 tmp = tcg_temp_new_i32();
2671 vfp_load_reg32(tmp, a->vm);
2672 fpst = fpstatus_ptr(FPST_FPCR);
2673 gen_helper_rints(tmp, tmp, fpst);
2674 vfp_store_reg32(tmp, a->vd);
2675 tcg_temp_free_ptr(fpst);
2676 tcg_temp_free_i32(tmp);
2680 static bool trans_VRINTR_dp(DisasContext *s, arg_VRINTR_dp *a)
2685 if (!dc_isar_feature(aa32_fpdp_v2, s)) {
2689 if (!dc_isar_feature(aa32_vrint, s)) {
2693 /* UNDEF accesses to D16-D31 if they don't exist. */
2694 if (!dc_isar_feature(aa32_simd_r32, s) && ((a->vd | a->vm) & 0x10)) {
2698 if (!vfp_access_check(s)) {
2702 tmp = tcg_temp_new_i64();
2703 vfp_load_reg64(tmp, a->vm);
2704 fpst = fpstatus_ptr(FPST_FPCR);
2705 gen_helper_rintd(tmp, tmp, fpst);
2706 vfp_store_reg64(tmp, a->vd);
2707 tcg_temp_free_ptr(fpst);
2708 tcg_temp_free_i64(tmp);
2712 static bool trans_VRINTZ_hp(DisasContext *s, arg_VRINTZ_sp *a)
2718 if (!dc_isar_feature(aa32_fp16_arith, s)) {
2722 if (!vfp_access_check(s)) {
2726 tmp = tcg_temp_new_i32();
2727 vfp_load_reg32(tmp, a->vm);
2728 fpst = fpstatus_ptr(FPST_FPCR_F16);
2729 tcg_rmode = tcg_const_i32(float_round_to_zero);
2730 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
2731 gen_helper_rinth(tmp, tmp, fpst);
2732 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
2733 vfp_store_reg32(tmp, a->vd);
2734 tcg_temp_free_ptr(fpst);
2735 tcg_temp_free_i32(tcg_rmode);
2736 tcg_temp_free_i32(tmp);
2740 static bool trans_VRINTZ_sp(DisasContext *s, arg_VRINTZ_sp *a)
2746 if (!dc_isar_feature(aa32_vrint, s)) {
2750 if (!vfp_access_check(s)) {
2754 tmp = tcg_temp_new_i32();
2755 vfp_load_reg32(tmp, a->vm);
2756 fpst = fpstatus_ptr(FPST_FPCR);
2757 tcg_rmode = tcg_const_i32(float_round_to_zero);
2758 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
2759 gen_helper_rints(tmp, tmp, fpst);
2760 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
2761 vfp_store_reg32(tmp, a->vd);
2762 tcg_temp_free_ptr(fpst);
2763 tcg_temp_free_i32(tcg_rmode);
2764 tcg_temp_free_i32(tmp);
2768 static bool trans_VRINTZ_dp(DisasContext *s, arg_VRINTZ_dp *a)
2774 if (!dc_isar_feature(aa32_fpdp_v2, s)) {
2778 if (!dc_isar_feature(aa32_vrint, s)) {
2782 /* UNDEF accesses to D16-D31 if they don't exist. */
2783 if (!dc_isar_feature(aa32_simd_r32, s) && ((a->vd | a->vm) & 0x10)) {
2787 if (!vfp_access_check(s)) {
2791 tmp = tcg_temp_new_i64();
2792 vfp_load_reg64(tmp, a->vm);
2793 fpst = fpstatus_ptr(FPST_FPCR);
2794 tcg_rmode = tcg_const_i32(float_round_to_zero);
2795 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
2796 gen_helper_rintd(tmp, tmp, fpst);
2797 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
2798 vfp_store_reg64(tmp, a->vd);
2799 tcg_temp_free_ptr(fpst);
2800 tcg_temp_free_i64(tmp);
2801 tcg_temp_free_i32(tcg_rmode);
2805 static bool trans_VRINTX_hp(DisasContext *s, arg_VRINTX_sp *a)
2810 if (!dc_isar_feature(aa32_fp16_arith, s)) {
2814 if (!vfp_access_check(s)) {
2818 tmp = tcg_temp_new_i32();
2819 vfp_load_reg32(tmp, a->vm);
2820 fpst = fpstatus_ptr(FPST_FPCR_F16);
2821 gen_helper_rinth_exact(tmp, tmp, fpst);
2822 vfp_store_reg32(tmp, a->vd);
2823 tcg_temp_free_ptr(fpst);
2824 tcg_temp_free_i32(tmp);
2828 static bool trans_VRINTX_sp(DisasContext *s, arg_VRINTX_sp *a)
2833 if (!dc_isar_feature(aa32_vrint, s)) {
2837 if (!vfp_access_check(s)) {
2841 tmp = tcg_temp_new_i32();
2842 vfp_load_reg32(tmp, a->vm);
2843 fpst = fpstatus_ptr(FPST_FPCR);
2844 gen_helper_rints_exact(tmp, tmp, fpst);
2845 vfp_store_reg32(tmp, a->vd);
2846 tcg_temp_free_ptr(fpst);
2847 tcg_temp_free_i32(tmp);
2851 static bool trans_VRINTX_dp(DisasContext *s, arg_VRINTX_dp *a)
2856 if (!dc_isar_feature(aa32_fpdp_v2, s)) {
2860 if (!dc_isar_feature(aa32_vrint, s)) {
2864 /* UNDEF accesses to D16-D31 if they don't exist. */
2865 if (!dc_isar_feature(aa32_simd_r32, s) && ((a->vd | a->vm) & 0x10)) {
2869 if (!vfp_access_check(s)) {
2873 tmp = tcg_temp_new_i64();
2874 vfp_load_reg64(tmp, a->vm);
2875 fpst = fpstatus_ptr(FPST_FPCR);
2876 gen_helper_rintd_exact(tmp, tmp, fpst);
2877 vfp_store_reg64(tmp, a->vd);
2878 tcg_temp_free_ptr(fpst);
2879 tcg_temp_free_i64(tmp);
2883 static bool trans_VCVT_sp(DisasContext *s, arg_VCVT_sp *a)
2888 if (!dc_isar_feature(aa32_fpdp_v2, s)) {
2892 /* UNDEF accesses to D16-D31 if they don't exist. */
2893 if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
2897 if (!vfp_access_check(s)) {
2901 vm = tcg_temp_new_i32();
2902 vd = tcg_temp_new_i64();
2903 vfp_load_reg32(vm, a->vm);
2904 gen_helper_vfp_fcvtds(vd, vm, cpu_env);
2905 vfp_store_reg64(vd, a->vd);
2906 tcg_temp_free_i32(vm);
2907 tcg_temp_free_i64(vd);
2911 static bool trans_VCVT_dp(DisasContext *s, arg_VCVT_dp *a)
2916 if (!dc_isar_feature(aa32_fpdp_v2, s)) {
2920 /* UNDEF accesses to D16-D31 if they don't exist. */
2921 if (!dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) {
2925 if (!vfp_access_check(s)) {
2929 vd = tcg_temp_new_i32();
2930 vm = tcg_temp_new_i64();
2931 vfp_load_reg64(vm, a->vm);
2932 gen_helper_vfp_fcvtsd(vd, vm, cpu_env);
2933 vfp_store_reg32(vd, a->vd);
2934 tcg_temp_free_i32(vd);
2935 tcg_temp_free_i64(vm);
2939 static bool trans_VCVT_int_hp(DisasContext *s, arg_VCVT_int_sp *a)
2944 if (!dc_isar_feature(aa32_fp16_arith, s)) {
2948 if (!vfp_access_check(s)) {
2952 vm = tcg_temp_new_i32();
2953 vfp_load_reg32(vm, a->vm);
2954 fpst = fpstatus_ptr(FPST_FPCR_F16);
2957 gen_helper_vfp_sitoh(vm, vm, fpst);
2960 gen_helper_vfp_uitoh(vm, vm, fpst);
2962 vfp_store_reg32(vm, a->vd);
2963 tcg_temp_free_i32(vm);
2964 tcg_temp_free_ptr(fpst);
2968 static bool trans_VCVT_int_sp(DisasContext *s, arg_VCVT_int_sp *a)
2973 if (!dc_isar_feature(aa32_fpsp_v2, s)) {
2977 if (!vfp_access_check(s)) {
2981 vm = tcg_temp_new_i32();
2982 vfp_load_reg32(vm, a->vm);
2983 fpst = fpstatus_ptr(FPST_FPCR);
2986 gen_helper_vfp_sitos(vm, vm, fpst);
2989 gen_helper_vfp_uitos(vm, vm, fpst);
2991 vfp_store_reg32(vm, a->vd);
2992 tcg_temp_free_i32(vm);
2993 tcg_temp_free_ptr(fpst);
2997 static bool trans_VCVT_int_dp(DisasContext *s, arg_VCVT_int_dp *a)
3003 if (!dc_isar_feature(aa32_fpdp_v2, s)) {
3007 /* UNDEF accesses to D16-D31 if they don't exist. */
3008 if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
3012 if (!vfp_access_check(s)) {
3016 vm = tcg_temp_new_i32();
3017 vd = tcg_temp_new_i64();
3018 vfp_load_reg32(vm, a->vm);
3019 fpst = fpstatus_ptr(FPST_FPCR);
3022 gen_helper_vfp_sitod(vd, vm, fpst);
3025 gen_helper_vfp_uitod(vd, vm, fpst);
3027 vfp_store_reg64(vd, a->vd);
3028 tcg_temp_free_i32(vm);
3029 tcg_temp_free_i64(vd);
3030 tcg_temp_free_ptr(fpst);
3034 static bool trans_VJCVT(DisasContext *s, arg_VJCVT *a)
3039 if (!dc_isar_feature(aa32_fpdp_v2, s)) {
3043 if (!dc_isar_feature(aa32_jscvt, s)) {
3047 /* UNDEF accesses to D16-D31 if they don't exist. */
3048 if (!dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) {
3052 if (!vfp_access_check(s)) {
3056 vm = tcg_temp_new_i64();
3057 vd = tcg_temp_new_i32();
3058 vfp_load_reg64(vm, a->vm);
3059 gen_helper_vjcvt(vd, vm, cpu_env);
3060 vfp_store_reg32(vd, a->vd);
3061 tcg_temp_free_i64(vm);
3062 tcg_temp_free_i32(vd);
3066 static bool trans_VCVT_fix_hp(DisasContext *s, arg_VCVT_fix_sp *a)
3072 if (!dc_isar_feature(aa32_fp16_arith, s)) {
3076 if (!vfp_access_check(s)) {
3080 frac_bits = (a->opc & 1) ? (32 - a->imm) : (16 - a->imm);
3082 vd = tcg_temp_new_i32();
3083 vfp_load_reg32(vd, a->vd);
3085 fpst = fpstatus_ptr(FPST_FPCR_F16);
3086 shift = tcg_const_i32(frac_bits);
3088 /* Switch on op:U:sx bits */
3091 gen_helper_vfp_shtoh_round_to_nearest(vd, vd, shift, fpst);
3094 gen_helper_vfp_sltoh_round_to_nearest(vd, vd, shift, fpst);
3097 gen_helper_vfp_uhtoh_round_to_nearest(vd, vd, shift, fpst);
3100 gen_helper_vfp_ultoh_round_to_nearest(vd, vd, shift, fpst);
3103 gen_helper_vfp_toshh_round_to_zero(vd, vd, shift, fpst);
3106 gen_helper_vfp_toslh_round_to_zero(vd, vd, shift, fpst);
3109 gen_helper_vfp_touhh_round_to_zero(vd, vd, shift, fpst);
3112 gen_helper_vfp_toulh_round_to_zero(vd, vd, shift, fpst);
3115 g_assert_not_reached();
3118 vfp_store_reg32(vd, a->vd);
3119 tcg_temp_free_i32(vd);
3120 tcg_temp_free_i32(shift);
3121 tcg_temp_free_ptr(fpst);
3125 static bool trans_VCVT_fix_sp(DisasContext *s, arg_VCVT_fix_sp *a)
3131 if (!dc_isar_feature(aa32_fpsp_v3, s)) {
3135 if (!vfp_access_check(s)) {
3139 frac_bits = (a->opc & 1) ? (32 - a->imm) : (16 - a->imm);
3141 vd = tcg_temp_new_i32();
3142 vfp_load_reg32(vd, a->vd);
3144 fpst = fpstatus_ptr(FPST_FPCR);
3145 shift = tcg_const_i32(frac_bits);
3147 /* Switch on op:U:sx bits */
3150 gen_helper_vfp_shtos_round_to_nearest(vd, vd, shift, fpst);
3153 gen_helper_vfp_sltos_round_to_nearest(vd, vd, shift, fpst);
3156 gen_helper_vfp_uhtos_round_to_nearest(vd, vd, shift, fpst);
3159 gen_helper_vfp_ultos_round_to_nearest(vd, vd, shift, fpst);
3162 gen_helper_vfp_toshs_round_to_zero(vd, vd, shift, fpst);
3165 gen_helper_vfp_tosls_round_to_zero(vd, vd, shift, fpst);
3168 gen_helper_vfp_touhs_round_to_zero(vd, vd, shift, fpst);
3171 gen_helper_vfp_touls_round_to_zero(vd, vd, shift, fpst);
3174 g_assert_not_reached();
3177 vfp_store_reg32(vd, a->vd);
3178 tcg_temp_free_i32(vd);
3179 tcg_temp_free_i32(shift);
3180 tcg_temp_free_ptr(fpst);
3184 static bool trans_VCVT_fix_dp(DisasContext *s, arg_VCVT_fix_dp *a)
3191 if (!dc_isar_feature(aa32_fpdp_v3, s)) {
3195 /* UNDEF accesses to D16-D31 if they don't exist. */
3196 if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
3200 if (!vfp_access_check(s)) {
3204 frac_bits = (a->opc & 1) ? (32 - a->imm) : (16 - a->imm);
3206 vd = tcg_temp_new_i64();
3207 vfp_load_reg64(vd, a->vd);
3209 fpst = fpstatus_ptr(FPST_FPCR);
3210 shift = tcg_const_i32(frac_bits);
3212 /* Switch on op:U:sx bits */
3215 gen_helper_vfp_shtod_round_to_nearest(vd, vd, shift, fpst);
3218 gen_helper_vfp_sltod_round_to_nearest(vd, vd, shift, fpst);
3221 gen_helper_vfp_uhtod_round_to_nearest(vd, vd, shift, fpst);
3224 gen_helper_vfp_ultod_round_to_nearest(vd, vd, shift, fpst);
3227 gen_helper_vfp_toshd_round_to_zero(vd, vd, shift, fpst);
3230 gen_helper_vfp_tosld_round_to_zero(vd, vd, shift, fpst);
3233 gen_helper_vfp_touhd_round_to_zero(vd, vd, shift, fpst);
3236 gen_helper_vfp_tould_round_to_zero(vd, vd, shift, fpst);
3239 g_assert_not_reached();
3242 vfp_store_reg64(vd, a->vd);
3243 tcg_temp_free_i64(vd);
3244 tcg_temp_free_i32(shift);
3245 tcg_temp_free_ptr(fpst);
3249 static bool trans_VCVT_hp_int(DisasContext *s, arg_VCVT_sp_int *a)
3254 if (!dc_isar_feature(aa32_fp16_arith, s)) {
3258 if (!vfp_access_check(s)) {
3262 fpst = fpstatus_ptr(FPST_FPCR_F16);
3263 vm = tcg_temp_new_i32();
3264 vfp_load_reg32(vm, a->vm);
3268 gen_helper_vfp_tosizh(vm, vm, fpst);
3270 gen_helper_vfp_tosih(vm, vm, fpst);
3274 gen_helper_vfp_touizh(vm, vm, fpst);
3276 gen_helper_vfp_touih(vm, vm, fpst);
3279 vfp_store_reg32(vm, a->vd);
3280 tcg_temp_free_i32(vm);
3281 tcg_temp_free_ptr(fpst);
3285 static bool trans_VCVT_sp_int(DisasContext *s, arg_VCVT_sp_int *a)
3290 if (!dc_isar_feature(aa32_fpsp_v2, s)) {
3294 if (!vfp_access_check(s)) {
3298 fpst = fpstatus_ptr(FPST_FPCR);
3299 vm = tcg_temp_new_i32();
3300 vfp_load_reg32(vm, a->vm);
3304 gen_helper_vfp_tosizs(vm, vm, fpst);
3306 gen_helper_vfp_tosis(vm, vm, fpst);
3310 gen_helper_vfp_touizs(vm, vm, fpst);
3312 gen_helper_vfp_touis(vm, vm, fpst);
3315 vfp_store_reg32(vm, a->vd);
3316 tcg_temp_free_i32(vm);
3317 tcg_temp_free_ptr(fpst);
3321 static bool trans_VCVT_dp_int(DisasContext *s, arg_VCVT_dp_int *a)
3327 if (!dc_isar_feature(aa32_fpdp_v2, s)) {
3331 /* UNDEF accesses to D16-D31 if they don't exist. */
3332 if (!dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) {
3336 if (!vfp_access_check(s)) {
3340 fpst = fpstatus_ptr(FPST_FPCR);
3341 vm = tcg_temp_new_i64();
3342 vd = tcg_temp_new_i32();
3343 vfp_load_reg64(vm, a->vm);
3347 gen_helper_vfp_tosizd(vd, vm, fpst);
3349 gen_helper_vfp_tosid(vd, vm, fpst);
3353 gen_helper_vfp_touizd(vd, vm, fpst);
3355 gen_helper_vfp_touid(vd, vm, fpst);
3358 vfp_store_reg32(vd, a->vd);
3359 tcg_temp_free_i32(vd);
3360 tcg_temp_free_i64(vm);
3361 tcg_temp_free_ptr(fpst);
3366 * Decode VLLDM and VLSTM are nonstandard because:
3367 * * if there is no FPU then these insns must NOP in
3368 * Secure state and UNDEF in Nonsecure state
3369 * * if there is an FPU then these insns do not have
3370 * the usual behaviour that vfp_access_check() provides of
3371 * being controlled by CPACR/NSACR enable bits or the
3372 * lazy-stacking logic.
3374 static bool trans_VLLDM_VLSTM(DisasContext *s, arg_VLLDM_VLSTM *a)
3378 if (!arm_dc_feature(s, ARM_FEATURE_M) ||
3379 !arm_dc_feature(s, ARM_FEATURE_V8)) {
3383 * If not secure, UNDEF. We must emit code for this
3384 * rather than returning false so that this takes
3385 * precedence over the m-nocp.decode NOCP fallback.
3387 if (!s->v8m_secure) {
3388 unallocated_encoding(s);
3391 /* If no fpu, NOP. */
3392 if (!dc_isar_feature(aa32_vfp, s)) {
3396 fptr = load_reg(s, a->rn);
3398 gen_helper_v7m_vlldm(cpu_env, fptr);
3400 gen_helper_v7m_vlstm(cpu_env, fptr);
3402 tcg_temp_free_i32(fptr);
3404 /* End the TB, because we have updated FP control bits */
3405 s->base.is_jmp = DISAS_UPDATE_EXIT;
3409 static bool trans_NOCP(DisasContext *s, arg_nocp *a)
3412 * Handle M-profile early check for disabled coprocessor:
3413 * all we need to do here is emit the NOCP exception if
3414 * the coprocessor is disabled. Otherwise we return false
3415 * and the real VFP/etc decode will handle the insn.
3417 assert(arm_dc_feature(s, ARM_FEATURE_M));
3422 if (arm_dc_feature(s, ARM_FEATURE_V8_1M) &&
3423 (a->cp == 8 || a->cp == 9 || a->cp == 14 || a->cp == 15)) {
3424 /* in v8.1M cp 8, 9, 14, 15 also are governed by the cp10 enable */
3429 gen_exception_insn(s, s->pc_curr, EXCP_NOCP,
3430 syn_uncategorized(), default_exception_el(s));
3434 if (s->fp_excp_el != 0) {
3435 gen_exception_insn(s, s->pc_curr, EXCP_NOCP,
3436 syn_uncategorized(), s->fp_excp_el);
3443 static bool trans_NOCP_8_1(DisasContext *s, arg_nocp *a)
3445 /* This range needs a coprocessor check for v8.1M and later only */
3446 if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
3449 return trans_NOCP(s, a);
3452 static bool trans_VINS(DisasContext *s, arg_VINS *a)
3456 if (!dc_isar_feature(aa32_fp16_arith, s)) {
3460 if (s->vec_len != 0 || s->vec_stride != 0) {
3464 if (!vfp_access_check(s)) {
3468 /* Insert low half of Vm into high half of Vd */
3469 rm = tcg_temp_new_i32();
3470 rd = tcg_temp_new_i32();
3471 vfp_load_reg32(rm, a->vm);
3472 vfp_load_reg32(rd, a->vd);
3473 tcg_gen_deposit_i32(rd, rd, rm, 16, 16);
3474 vfp_store_reg32(rd, a->vd);
3475 tcg_temp_free_i32(rm);
3476 tcg_temp_free_i32(rd);
3480 static bool trans_VMOVX(DisasContext *s, arg_VINS *a)
3484 if (!dc_isar_feature(aa32_fp16_arith, s)) {
3488 if (s->vec_len != 0 || s->vec_stride != 0) {
3492 if (!vfp_access_check(s)) {
3496 /* Set Vd to high half of Vm */
3497 rm = tcg_temp_new_i32();
3498 vfp_load_reg32(rm, a->vm);
3499 tcg_gen_shri_i32(rm, rm, 16);
3500 vfp_store_reg32(rm, a->vd);
3501 tcg_temp_free_i32(rm);