2 * ARM translation: AArch32 VFP instructions
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
7 * Copyright (c) 2019 Linaro, Ltd.
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
24 * This file is intended to be included from translate.c; it uses
25 * some macros and definitions provided by that file.
26 * It might be possible to convert it to a standalone .c file eventually.
29 /* Include the generated VFP decoder */
30 #include "decode-vfp.c.inc"
31 #include "decode-vfp-uncond.c.inc"
34 * The imm8 encodes the sign bit, enough bits to represent an exponent in
35 * the range 01....1xx to 10....0xx, and the most significant 4 bits of
36 * the mantissa; see VFPExpandImm() in the v8 ARM ARM.
38 uint64_t vfp_expand_imm(int size, uint8_t imm8)
44 imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
45 (extract32(imm8, 6, 1) ? 0x3fc0 : 0x4000) |
46 extract32(imm8, 0, 6);
50 imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
51 (extract32(imm8, 6, 1) ? 0x3e00 : 0x4000) |
52 (extract32(imm8, 0, 6) << 3);
56 imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
57 (extract32(imm8, 6, 1) ? 0x3000 : 0x4000) |
58 (extract32(imm8, 0, 6) << 6);
61 g_assert_not_reached();
67 * Return the offset of a 16-bit half of the specified VFP single-precision
68 * register. If top is true, returns the top 16 bits; otherwise the bottom
71 static inline long vfp_f16_offset(unsigned reg, bool top)
73 long offs = vfp_reg_offset(false, reg);
74 #ifdef HOST_WORDS_BIGENDIAN
87 * Check that VFP access is enabled. If it is, do the necessary
88 * M-profile lazy-FP handling and then return true.
89 * If not, emit code to generate an appropriate exception and
91 * The ignore_vfp_enabled argument specifies that we should ignore
92 * whether VFP is enabled via FPEXC[EN]: this should be true for FMXR/FMRX
93 * accesses to FPSID, FPEXC, MVFR0, MVFR1, MVFR2, and false for all other insns.
95 static bool full_vfp_access_check(DisasContext *s, bool ignore_vfp_enabled)
98 /* M-profile handled this earlier, in disas_m_nocp() */
99 assert (!arm_dc_feature(s, ARM_FEATURE_M));
100 gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
101 syn_fp_access_trap(1, 0xe, false),
106 if (!s->vfp_enabled && !ignore_vfp_enabled) {
107 assert(!arm_dc_feature(s, ARM_FEATURE_M));
108 unallocated_encoding(s);
112 if (arm_dc_feature(s, ARM_FEATURE_M)) {
113 /* Handle M-profile lazy FP state mechanics */
115 /* Trigger lazy-state preservation if necessary */
118 * Lazy state saving affects external memory and also the NVIC,
119 * so we must mark it as an IO operation for icount (and cause
120 * this to be the last insn in the TB).
122 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
123 s->base.is_jmp = DISAS_UPDATE_EXIT;
126 gen_helper_v7m_preserve_fp_state(cpu_env);
128 * If the preserve_fp_state helper doesn't throw an exception
129 * then it will clear LSPACT; we don't need to repeat this for
130 * any further FP insns in this TB.
132 s->v7m_lspact = false;
135 /* Update ownership of FP context: set FPCCR.S to match current state */
136 if (s->v8m_fpccr_s_wrong) {
139 tmp = load_cpu_field(v7m.fpccr[M_REG_S]);
141 tcg_gen_ori_i32(tmp, tmp, R_V7M_FPCCR_S_MASK);
143 tcg_gen_andi_i32(tmp, tmp, ~R_V7M_FPCCR_S_MASK);
145 store_cpu_field(tmp, v7m.fpccr[M_REG_S]);
146 /* Don't need to do this for any further FP insns in this TB */
147 s->v8m_fpccr_s_wrong = false;
150 if (s->v7m_new_fp_ctxt_needed) {
152 * Create new FP context by updating CONTROL.FPCA, CONTROL.SFPA
155 TCGv_i32 control, fpscr;
156 uint32_t bits = R_V7M_CONTROL_FPCA_MASK;
158 fpscr = load_cpu_field(v7m.fpdscr[s->v8m_secure]);
159 gen_helper_vfp_set_fpscr(cpu_env, fpscr);
160 tcg_temp_free_i32(fpscr);
162 * We don't need to arrange to end the TB, because the only
163 * parts of FPSCR which we cache in the TB flags are the VECLEN
164 * and VECSTRIDE, and those don't exist for M-profile.
168 bits |= R_V7M_CONTROL_SFPA_MASK;
170 control = load_cpu_field(v7m.control[M_REG_S]);
171 tcg_gen_ori_i32(control, control, bits);
172 store_cpu_field(control, v7m.control[M_REG_S]);
173 /* Don't need to do this for any further FP insns in this TB */
174 s->v7m_new_fp_ctxt_needed = false;
182 * The most usual kind of VFP access check, for everything except
183 * FMXR/FMRX to the always-available special registers.
185 static bool vfp_access_check(DisasContext *s)
187 return full_vfp_access_check(s, false);
190 static bool trans_VSEL(DisasContext *s, arg_VSEL *a)
195 if (!dc_isar_feature(aa32_vsel, s)) {
199 if (sz == 3 && !dc_isar_feature(aa32_fpdp_v2, s)) {
203 if (sz == 1 && !dc_isar_feature(aa32_fp16_arith, s)) {
207 /* UNDEF accesses to D16-D31 if they don't exist */
208 if (sz == 3 && !dc_isar_feature(aa32_simd_r32, s) &&
209 ((a->vm | a->vn | a->vd) & 0x10)) {
217 if (!vfp_access_check(s)) {
222 TCGv_i64 frn, frm, dest;
223 TCGv_i64 tmp, zero, zf, nf, vf;
225 zero = tcg_const_i64(0);
227 frn = tcg_temp_new_i64();
228 frm = tcg_temp_new_i64();
229 dest = tcg_temp_new_i64();
231 zf = tcg_temp_new_i64();
232 nf = tcg_temp_new_i64();
233 vf = tcg_temp_new_i64();
235 tcg_gen_extu_i32_i64(zf, cpu_ZF);
236 tcg_gen_ext_i32_i64(nf, cpu_NF);
237 tcg_gen_ext_i32_i64(vf, cpu_VF);
239 vfp_load_reg64(frn, rn);
240 vfp_load_reg64(frm, rm);
243 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
247 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
250 case 2: /* ge: N == V -> N ^ V == 0 */
251 tmp = tcg_temp_new_i64();
252 tcg_gen_xor_i64(tmp, vf, nf);
253 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
255 tcg_temp_free_i64(tmp);
257 case 3: /* gt: !Z && N == V */
258 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
260 tmp = tcg_temp_new_i64();
261 tcg_gen_xor_i64(tmp, vf, nf);
262 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
264 tcg_temp_free_i64(tmp);
267 vfp_store_reg64(dest, rd);
268 tcg_temp_free_i64(frn);
269 tcg_temp_free_i64(frm);
270 tcg_temp_free_i64(dest);
272 tcg_temp_free_i64(zf);
273 tcg_temp_free_i64(nf);
274 tcg_temp_free_i64(vf);
276 tcg_temp_free_i64(zero);
278 TCGv_i32 frn, frm, dest;
281 zero = tcg_const_i32(0);
283 frn = tcg_temp_new_i32();
284 frm = tcg_temp_new_i32();
285 dest = tcg_temp_new_i32();
286 vfp_load_reg32(frn, rn);
287 vfp_load_reg32(frm, rm);
290 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
294 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
297 case 2: /* ge: N == V -> N ^ V == 0 */
298 tmp = tcg_temp_new_i32();
299 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
300 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
302 tcg_temp_free_i32(tmp);
304 case 3: /* gt: !Z && N == V */
305 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
307 tmp = tcg_temp_new_i32();
308 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
309 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
311 tcg_temp_free_i32(tmp);
314 /* For fp16 the top half is always zeroes */
316 tcg_gen_andi_i32(dest, dest, 0xffff);
318 vfp_store_reg32(dest, rd);
319 tcg_temp_free_i32(frn);
320 tcg_temp_free_i32(frm);
321 tcg_temp_free_i32(dest);
323 tcg_temp_free_i32(zero);
330 * Table for converting the most common AArch32 encoding of
331 * rounding mode to arm_fprounding order (which matches the
332 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
334 static const uint8_t fp_decode_rm[] = {
341 static bool trans_VRINT(DisasContext *s, arg_VRINT *a)
347 int rounding = fp_decode_rm[a->rm];
349 if (!dc_isar_feature(aa32_vrint, s)) {
353 if (sz == 3 && !dc_isar_feature(aa32_fpdp_v2, s)) {
357 if (sz == 1 && !dc_isar_feature(aa32_fp16_arith, s)) {
361 /* UNDEF accesses to D16-D31 if they don't exist */
362 if (sz == 3 && !dc_isar_feature(aa32_simd_r32, s) &&
363 ((a->vm | a->vd) & 0x10)) {
370 if (!vfp_access_check(s)) {
375 fpst = fpstatus_ptr(FPST_FPCR_F16);
377 fpst = fpstatus_ptr(FPST_FPCR);
380 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
381 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
386 tcg_op = tcg_temp_new_i64();
387 tcg_res = tcg_temp_new_i64();
388 vfp_load_reg64(tcg_op, rm);
389 gen_helper_rintd(tcg_res, tcg_op, fpst);
390 vfp_store_reg64(tcg_res, rd);
391 tcg_temp_free_i64(tcg_op);
392 tcg_temp_free_i64(tcg_res);
396 tcg_op = tcg_temp_new_i32();
397 tcg_res = tcg_temp_new_i32();
398 vfp_load_reg32(tcg_op, rm);
400 gen_helper_rinth(tcg_res, tcg_op, fpst);
402 gen_helper_rints(tcg_res, tcg_op, fpst);
404 vfp_store_reg32(tcg_res, rd);
405 tcg_temp_free_i32(tcg_op);
406 tcg_temp_free_i32(tcg_res);
409 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
410 tcg_temp_free_i32(tcg_rmode);
412 tcg_temp_free_ptr(fpst);
416 static bool trans_VCVT(DisasContext *s, arg_VCVT *a)
421 TCGv_i32 tcg_rmode, tcg_shift;
422 int rounding = fp_decode_rm[a->rm];
423 bool is_signed = a->op;
425 if (!dc_isar_feature(aa32_vcvt_dr, s)) {
429 if (sz == 3 && !dc_isar_feature(aa32_fpdp_v2, s)) {
433 if (sz == 1 && !dc_isar_feature(aa32_fp16_arith, s)) {
437 /* UNDEF accesses to D16-D31 if they don't exist */
438 if (sz == 3 && !dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) {
445 if (!vfp_access_check(s)) {
450 fpst = fpstatus_ptr(FPST_FPCR_F16);
452 fpst = fpstatus_ptr(FPST_FPCR);
455 tcg_shift = tcg_const_i32(0);
457 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
458 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
461 TCGv_i64 tcg_double, tcg_res;
463 tcg_double = tcg_temp_new_i64();
464 tcg_res = tcg_temp_new_i64();
465 tcg_tmp = tcg_temp_new_i32();
466 vfp_load_reg64(tcg_double, rm);
468 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
470 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
472 tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
473 vfp_store_reg32(tcg_tmp, rd);
474 tcg_temp_free_i32(tcg_tmp);
475 tcg_temp_free_i64(tcg_res);
476 tcg_temp_free_i64(tcg_double);
478 TCGv_i32 tcg_single, tcg_res;
479 tcg_single = tcg_temp_new_i32();
480 tcg_res = tcg_temp_new_i32();
481 vfp_load_reg32(tcg_single, rm);
484 gen_helper_vfp_toslh(tcg_res, tcg_single, tcg_shift, fpst);
486 gen_helper_vfp_toulh(tcg_res, tcg_single, tcg_shift, fpst);
490 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
492 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
495 vfp_store_reg32(tcg_res, rd);
496 tcg_temp_free_i32(tcg_res);
497 tcg_temp_free_i32(tcg_single);
500 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
501 tcg_temp_free_i32(tcg_rmode);
503 tcg_temp_free_i32(tcg_shift);
505 tcg_temp_free_ptr(fpst);
510 static bool trans_VMOV_to_gp(DisasContext *s, arg_VMOV_to_gp *a)
512 /* VMOV scalar to general purpose register */
515 /* SIZE == MO_32 is a VFP instruction; otherwise NEON. */
517 ? !dc_isar_feature(aa32_fpsp_v2, s)
518 : !arm_dc_feature(s, ARM_FEATURE_NEON)) {
522 /* UNDEF accesses to D16-D31 if they don't exist */
523 if (!dc_isar_feature(aa32_simd_r32, s) && (a->vn & 0x10)) {
527 if (!vfp_access_check(s)) {
531 tmp = tcg_temp_new_i32();
532 read_neon_element32(tmp, a->vn, a->index, a->size | (a->u ? 0 : MO_SIGN));
533 store_reg(s, a->rt, tmp);
538 static bool trans_VMOV_from_gp(DisasContext *s, arg_VMOV_from_gp *a)
540 /* VMOV general purpose register to scalar */
543 /* SIZE == MO_32 is a VFP instruction; otherwise NEON. */
545 ? !dc_isar_feature(aa32_fpsp_v2, s)
546 : !arm_dc_feature(s, ARM_FEATURE_NEON)) {
550 /* UNDEF accesses to D16-D31 if they don't exist */
551 if (!dc_isar_feature(aa32_simd_r32, s) && (a->vn & 0x10)) {
555 if (!vfp_access_check(s)) {
559 tmp = load_reg(s, a->rt);
560 write_neon_element32(tmp, a->vn, a->index, a->size);
561 tcg_temp_free_i32(tmp);
566 static bool trans_VDUP(DisasContext *s, arg_VDUP *a)
568 /* VDUP (general purpose register) */
572 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
576 /* UNDEF accesses to D16-D31 if they don't exist */
577 if (!dc_isar_feature(aa32_simd_r32, s) && (a->vn & 0x10)) {
585 if (a->q && (a->vn & 1)) {
589 vec_size = a->q ? 16 : 8;
598 if (!vfp_access_check(s)) {
602 tmp = load_reg(s, a->rt);
603 tcg_gen_gvec_dup_i32(size, neon_full_reg_offset(a->vn),
604 vec_size, vec_size, tmp);
605 tcg_temp_free_i32(tmp);
611 * M-profile provides two different sets of instructions that can
612 * access floating point system registers: VMSR/VMRS (which move
613 * to/from a general purpose register) and VLDR/VSTR sysreg (which
614 * move directly to/from memory). In some cases there are also side
615 * effects which must happen after any write to memory (which could
616 * cause an exception). So we implement the common logic for the
617 * sysreg access in gen_M_fp_sysreg_write() and gen_M_fp_sysreg_read(),
618 * which take pointers to callback functions which will perform the
619 * actual "read/write general purpose register" and "read/write
620 * memory" operations.
624 * Emit code to store the sysreg to its final destination; frees the
625 * TCG temp 'value' it is passed.
627 typedef void fp_sysreg_storefn(DisasContext *s, void *opaque, TCGv_i32 value);
629 * Emit code to load the value to be copied to the sysreg; returns
630 * a new TCG temporary
632 typedef TCGv_i32 fp_sysreg_loadfn(DisasContext *s, void *opaque);
634 /* Common decode/access checks for fp sysreg read/write */
635 typedef enum FPSysRegCheckResult {
636 FPSysRegCheckFailed, /* caller should return false */
637 FPSysRegCheckDone, /* caller should return true */
638 FPSysRegCheckContinue, /* caller should continue generating code */
639 } FPSysRegCheckResult;
641 static FPSysRegCheckResult fp_sysreg_checks(DisasContext *s, int regno)
643 if (!dc_isar_feature(aa32_fpsp_v2, s)) {
644 return FPSysRegCheckFailed;
649 case QEMU_VFP_FPSCR_NZCV:
651 case ARM_VFP_FPSCR_NZCVQC:
652 if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
657 return FPSysRegCheckFailed;
660 if (!vfp_access_check(s)) {
661 return FPSysRegCheckDone;
664 return FPSysRegCheckContinue;
667 static bool gen_M_fp_sysreg_write(DisasContext *s, int regno,
669 fp_sysreg_loadfn *loadfn,
672 /* Do a write to an M-profile floating point system register */
675 switch (fp_sysreg_checks(s, regno)) {
676 case FPSysRegCheckFailed:
678 case FPSysRegCheckDone:
680 case FPSysRegCheckContinue:
686 tmp = loadfn(s, opaque);
687 gen_helper_vfp_set_fpscr(cpu_env, tmp);
688 tcg_temp_free_i32(tmp);
691 case ARM_VFP_FPSCR_NZCVQC:
694 tmp = loadfn(s, opaque);
696 * TODO: when we implement MVE, write the QC bit.
697 * For non-MVE, QC is RES0.
699 tcg_gen_andi_i32(tmp, tmp, FPCR_NZCV_MASK);
700 fpscr = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
701 tcg_gen_andi_i32(fpscr, fpscr, ~FPCR_NZCV_MASK);
702 tcg_gen_or_i32(fpscr, fpscr, tmp);
703 store_cpu_field(fpscr, vfp.xregs[ARM_VFP_FPSCR]);
704 tcg_temp_free_i32(tmp);
708 g_assert_not_reached();
713 static bool gen_M_fp_sysreg_read(DisasContext *s, int regno,
714 fp_sysreg_storefn *storefn,
717 /* Do a read from an M-profile floating point system register */
720 switch (fp_sysreg_checks(s, regno)) {
721 case FPSysRegCheckFailed:
723 case FPSysRegCheckDone:
725 case FPSysRegCheckContinue:
731 tmp = tcg_temp_new_i32();
732 gen_helper_vfp_get_fpscr(tmp, cpu_env);
733 storefn(s, opaque, tmp);
735 case ARM_VFP_FPSCR_NZCVQC:
737 * TODO: MVE has a QC bit, which we probably won't store
738 * in the xregs[] field. For non-MVE, where QC is RES0,
739 * we can just fall through to the FPSCR_NZCV case.
741 case QEMU_VFP_FPSCR_NZCV:
743 * Read just NZCV; this is a special case to avoid the
744 * helper call for the "VMRS to CPSR.NZCV" insn.
746 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
747 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
748 storefn(s, opaque, tmp);
751 g_assert_not_reached();
756 static void fp_sysreg_to_gpr(DisasContext *s, void *opaque, TCGv_i32 value)
758 arg_VMSR_VMRS *a = opaque;
761 /* Set the 4 flag bits in the CPSR */
763 tcg_temp_free_i32(value);
765 store_reg(s, a->rt, value);
769 static TCGv_i32 gpr_to_fp_sysreg(DisasContext *s, void *opaque)
771 arg_VMSR_VMRS *a = opaque;
773 return load_reg(s, a->rt);
776 static bool gen_M_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a)
779 * Accesses to R15 are UNPREDICTABLE; we choose to undef.
780 * FPSCR -> r15 is a special case which writes to the PSR flags;
781 * set a->reg to a special value to tell gen_M_fp_sysreg_read()
782 * we only care about the top 4 bits of FPSCR there.
785 if (a->l && a->reg == ARM_VFP_FPSCR) {
786 a->reg = QEMU_VFP_FPSCR_NZCV;
793 /* VMRS, move FP system register to gp register */
794 return gen_M_fp_sysreg_read(s, a->reg, fp_sysreg_to_gpr, a);
796 /* VMSR, move gp register to FP system register */
797 return gen_M_fp_sysreg_write(s, a->reg, gpr_to_fp_sysreg, a);
801 static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a)
804 bool ignore_vfp_enabled = false;
806 if (arm_dc_feature(s, ARM_FEATURE_M)) {
807 return gen_M_VMSR_VMRS(s, a);
810 if (!dc_isar_feature(aa32_fpsp_v2, s)) {
817 * VFPv2 allows access to FPSID from userspace; VFPv3 restricts
818 * all ID registers to privileged access only.
820 if (IS_USER(s) && dc_isar_feature(aa32_fpsp_v3, s)) {
823 ignore_vfp_enabled = true;
827 if (IS_USER(s) || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
830 ignore_vfp_enabled = true;
833 if (IS_USER(s) || !arm_dc_feature(s, ARM_FEATURE_V8)) {
836 ignore_vfp_enabled = true;
844 ignore_vfp_enabled = true;
847 case ARM_VFP_FPINST2:
848 /* Not present in VFPv3 */
849 if (IS_USER(s) || dc_isar_feature(aa32_fpsp_v3, s)) {
857 if (!full_vfp_access_check(s, ignore_vfp_enabled)) {
862 /* VMRS, move VFP special register to gp register */
868 if (s->current_el == 1) {
869 TCGv_i32 tcg_reg, tcg_rt;
872 gen_set_pc_im(s, s->pc_curr);
873 tcg_reg = tcg_const_i32(a->reg);
874 tcg_rt = tcg_const_i32(a->rt);
875 gen_helper_check_hcr_el2_trap(cpu_env, tcg_rt, tcg_reg);
876 tcg_temp_free_i32(tcg_reg);
877 tcg_temp_free_i32(tcg_rt);
882 case ARM_VFP_FPINST2:
883 tmp = load_cpu_field(vfp.xregs[a->reg]);
887 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
888 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
890 tmp = tcg_temp_new_i32();
891 gen_helper_vfp_get_fpscr(tmp, cpu_env);
895 g_assert_not_reached();
899 /* Set the 4 flag bits in the CPSR. */
901 tcg_temp_free_i32(tmp);
903 store_reg(s, a->rt, tmp);
906 /* VMSR, move gp register to VFP special register */
912 /* Writes are ignored. */
915 tmp = load_reg(s, a->rt);
916 gen_helper_vfp_set_fpscr(cpu_env, tmp);
917 tcg_temp_free_i32(tmp);
922 * TODO: VFP subarchitecture support.
923 * For now, keep the EN bit only
925 tmp = load_reg(s, a->rt);
926 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
927 store_cpu_field(tmp, vfp.xregs[a->reg]);
931 case ARM_VFP_FPINST2:
932 tmp = load_reg(s, a->rt);
933 store_cpu_field(tmp, vfp.xregs[a->reg]);
936 g_assert_not_reached();
943 static void fp_sysreg_to_memory(DisasContext *s, void *opaque, TCGv_i32 value)
945 arg_vldr_sysreg *a = opaque;
946 uint32_t offset = a->imm;
953 addr = load_reg(s, a->rn);
955 tcg_gen_addi_i32(addr, addr, offset);
958 if (s->v8m_stackcheck && a->rn == 13 && a->w) {
959 gen_helper_v8m_stackcheck(cpu_env, addr);
962 gen_aa32_st_i32(s, value, addr, get_mem_index(s),
963 MO_UL | MO_ALIGN | s->be_data);
964 tcg_temp_free_i32(value);
969 tcg_gen_addi_i32(addr, addr, offset);
971 store_reg(s, a->rn, addr);
973 tcg_temp_free_i32(addr);
977 static TCGv_i32 memory_to_fp_sysreg(DisasContext *s, void *opaque)
979 arg_vldr_sysreg *a = opaque;
980 uint32_t offset = a->imm;
982 TCGv_i32 value = tcg_temp_new_i32();
988 addr = load_reg(s, a->rn);
990 tcg_gen_addi_i32(addr, addr, offset);
993 if (s->v8m_stackcheck && a->rn == 13 && a->w) {
994 gen_helper_v8m_stackcheck(cpu_env, addr);
997 gen_aa32_ld_i32(s, value, addr, get_mem_index(s),
998 MO_UL | MO_ALIGN | s->be_data);
1003 tcg_gen_addi_i32(addr, addr, offset);
1005 store_reg(s, a->rn, addr);
1007 tcg_temp_free_i32(addr);
1012 static bool trans_VLDR_sysreg(DisasContext *s, arg_vldr_sysreg *a)
1014 if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
1020 return gen_M_fp_sysreg_write(s, a->reg, memory_to_fp_sysreg, a);
1023 static bool trans_VSTR_sysreg(DisasContext *s, arg_vldr_sysreg *a)
1025 if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
1031 return gen_M_fp_sysreg_read(s, a->reg, fp_sysreg_to_memory, a);
1034 static bool trans_VMOV_half(DisasContext *s, arg_VMOV_single *a)
1038 if (!dc_isar_feature(aa32_fp16_arith, s)) {
1043 /* UNPREDICTABLE; we choose to UNDEF */
1047 if (!vfp_access_check(s)) {
1052 /* VFP to general purpose register */
1053 tmp = tcg_temp_new_i32();
1054 vfp_load_reg32(tmp, a->vn);
1055 tcg_gen_andi_i32(tmp, tmp, 0xffff);
1056 store_reg(s, a->rt, tmp);
1058 /* general purpose register to VFP */
1059 tmp = load_reg(s, a->rt);
1060 tcg_gen_andi_i32(tmp, tmp, 0xffff);
1061 vfp_store_reg32(tmp, a->vn);
1062 tcg_temp_free_i32(tmp);
1068 static bool trans_VMOV_single(DisasContext *s, arg_VMOV_single *a)
1072 if (!dc_isar_feature(aa32_fpsp_v2, s)) {
1076 if (!vfp_access_check(s)) {
1081 /* VFP to general purpose register */
1082 tmp = tcg_temp_new_i32();
1083 vfp_load_reg32(tmp, a->vn);
1085 /* Set the 4 flag bits in the CPSR. */
1087 tcg_temp_free_i32(tmp);
1089 store_reg(s, a->rt, tmp);
1092 /* general purpose register to VFP */
1093 tmp = load_reg(s, a->rt);
1094 vfp_store_reg32(tmp, a->vn);
1095 tcg_temp_free_i32(tmp);
1101 static bool trans_VMOV_64_sp(DisasContext *s, arg_VMOV_64_sp *a)
1105 if (!dc_isar_feature(aa32_fpsp_v2, s)) {
1110 * VMOV between two general-purpose registers and two single precision
1111 * floating point registers
1113 if (!vfp_access_check(s)) {
1118 /* fpreg to gpreg */
1119 tmp = tcg_temp_new_i32();
1120 vfp_load_reg32(tmp, a->vm);
1121 store_reg(s, a->rt, tmp);
1122 tmp = tcg_temp_new_i32();
1123 vfp_load_reg32(tmp, a->vm + 1);
1124 store_reg(s, a->rt2, tmp);
1126 /* gpreg to fpreg */
1127 tmp = load_reg(s, a->rt);
1128 vfp_store_reg32(tmp, a->vm);
1129 tcg_temp_free_i32(tmp);
1130 tmp = load_reg(s, a->rt2);
1131 vfp_store_reg32(tmp, a->vm + 1);
1132 tcg_temp_free_i32(tmp);
1138 static bool trans_VMOV_64_dp(DisasContext *s, arg_VMOV_64_dp *a)
1143 * VMOV between two general-purpose registers and one double precision
1144 * floating point register. Note that this does not require support
1145 * for double precision arithmetic.
1147 if (!dc_isar_feature(aa32_fpsp_v2, s)) {
1151 /* UNDEF accesses to D16-D31 if they don't exist */
1152 if (!dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) {
1156 if (!vfp_access_check(s)) {
1161 /* fpreg to gpreg */
1162 tmp = tcg_temp_new_i32();
1163 vfp_load_reg32(tmp, a->vm * 2);
1164 store_reg(s, a->rt, tmp);
1165 tmp = tcg_temp_new_i32();
1166 vfp_load_reg32(tmp, a->vm * 2 + 1);
1167 store_reg(s, a->rt2, tmp);
1169 /* gpreg to fpreg */
1170 tmp = load_reg(s, a->rt);
1171 vfp_store_reg32(tmp, a->vm * 2);
1172 tcg_temp_free_i32(tmp);
1173 tmp = load_reg(s, a->rt2);
1174 vfp_store_reg32(tmp, a->vm * 2 + 1);
1175 tcg_temp_free_i32(tmp);
1181 static bool trans_VLDR_VSTR_hp(DisasContext *s, arg_VLDR_VSTR_sp *a)
1186 if (!dc_isar_feature(aa32_fp16_arith, s)) {
1190 if (!vfp_access_check(s)) {
1194 /* imm8 field is offset/2 for fp16, unlike fp32 and fp64 */
1195 offset = a->imm << 1;
1200 /* For thumb, use of PC is UNPREDICTABLE. */
1201 addr = add_reg_for_lit(s, a->rn, offset);
1202 tmp = tcg_temp_new_i32();
1204 gen_aa32_ld16u(s, tmp, addr, get_mem_index(s));
1205 vfp_store_reg32(tmp, a->vd);
1207 vfp_load_reg32(tmp, a->vd);
1208 gen_aa32_st16(s, tmp, addr, get_mem_index(s));
1210 tcg_temp_free_i32(tmp);
1211 tcg_temp_free_i32(addr);
1216 static bool trans_VLDR_VSTR_sp(DisasContext *s, arg_VLDR_VSTR_sp *a)
1221 if (!dc_isar_feature(aa32_fpsp_v2, s)) {
1225 if (!vfp_access_check(s)) {
1229 offset = a->imm << 2;
1234 /* For thumb, use of PC is UNPREDICTABLE. */
1235 addr = add_reg_for_lit(s, a->rn, offset);
1236 tmp = tcg_temp_new_i32();
1238 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
1239 vfp_store_reg32(tmp, a->vd);
1241 vfp_load_reg32(tmp, a->vd);
1242 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
1244 tcg_temp_free_i32(tmp);
1245 tcg_temp_free_i32(addr);
1250 static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_dp *a)
1256 /* Note that this does not require support for double arithmetic. */
1257 if (!dc_isar_feature(aa32_fpsp_v2, s)) {
1261 /* UNDEF accesses to D16-D31 if they don't exist */
1262 if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
1266 if (!vfp_access_check(s)) {
1270 offset = a->imm << 2;
1275 /* For thumb, use of PC is UNPREDICTABLE. */
1276 addr = add_reg_for_lit(s, a->rn, offset);
1277 tmp = tcg_temp_new_i64();
1279 gen_aa32_ld64(s, tmp, addr, get_mem_index(s));
1280 vfp_store_reg64(tmp, a->vd);
1282 vfp_load_reg64(tmp, a->vd);
1283 gen_aa32_st64(s, tmp, addr, get_mem_index(s));
1285 tcg_temp_free_i64(tmp);
1286 tcg_temp_free_i32(addr);
1291 static bool trans_VLDM_VSTM_sp(DisasContext *s, arg_VLDM_VSTM_sp *a)
1297 if (!dc_isar_feature(aa32_fpsp_v2, s)) {
1303 if (n == 0 || (a->vd + n) > 32) {
1305 * UNPREDICTABLE cases for bad immediates: we choose to
1306 * UNDEF to avoid generating huge numbers of TCG ops
1310 if (a->rn == 15 && a->w) {
1311 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
1315 if (!vfp_access_check(s)) {
1319 /* For thumb, use of PC is UNPREDICTABLE. */
1320 addr = add_reg_for_lit(s, a->rn, 0);
1323 tcg_gen_addi_i32(addr, addr, -(a->imm << 2));
1326 if (s->v8m_stackcheck && a->rn == 13 && a->w) {
1328 * Here 'addr' is the lowest address we will store to,
1329 * and is either the old SP (if post-increment) or
1330 * the new SP (if pre-decrement). For post-increment
1331 * where the old value is below the limit and the new
1332 * value is above, it is UNKNOWN whether the limit check
1333 * triggers; we choose to trigger.
1335 gen_helper_v8m_stackcheck(cpu_env, addr);
1339 tmp = tcg_temp_new_i32();
1340 for (i = 0; i < n; i++) {
1343 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
1344 vfp_store_reg32(tmp, a->vd + i);
1347 vfp_load_reg32(tmp, a->vd + i);
1348 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
1350 tcg_gen_addi_i32(addr, addr, offset);
1352 tcg_temp_free_i32(tmp);
1356 offset = -offset * n;
1357 tcg_gen_addi_i32(addr, addr, offset);
1359 store_reg(s, a->rn, addr);
1361 tcg_temp_free_i32(addr);
1367 static bool trans_VLDM_VSTM_dp(DisasContext *s, arg_VLDM_VSTM_dp *a)
1374 /* Note that this does not require support for double arithmetic. */
1375 if (!dc_isar_feature(aa32_fpsp_v2, s)) {
1381 if (n == 0 || (a->vd + n) > 32 || n > 16) {
1383 * UNPREDICTABLE cases for bad immediates: we choose to
1384 * UNDEF to avoid generating huge numbers of TCG ops
1388 if (a->rn == 15 && a->w) {
1389 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
1393 /* UNDEF accesses to D16-D31 if they don't exist */
1394 if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd + n) > 16) {
1398 if (!vfp_access_check(s)) {
1402 /* For thumb, use of PC is UNPREDICTABLE. */
1403 addr = add_reg_for_lit(s, a->rn, 0);
1406 tcg_gen_addi_i32(addr, addr, -(a->imm << 2));
1409 if (s->v8m_stackcheck && a->rn == 13 && a->w) {
1411 * Here 'addr' is the lowest address we will store to,
1412 * and is either the old SP (if post-increment) or
1413 * the new SP (if pre-decrement). For post-increment
1414 * where the old value is below the limit and the new
1415 * value is above, it is UNKNOWN whether the limit check
1416 * triggers; we choose to trigger.
1418 gen_helper_v8m_stackcheck(cpu_env, addr);
1422 tmp = tcg_temp_new_i64();
1423 for (i = 0; i < n; i++) {
1426 gen_aa32_ld64(s, tmp, addr, get_mem_index(s));
1427 vfp_store_reg64(tmp, a->vd + i);
1430 vfp_load_reg64(tmp, a->vd + i);
1431 gen_aa32_st64(s, tmp, addr, get_mem_index(s));
1433 tcg_gen_addi_i32(addr, addr, offset);
1435 tcg_temp_free_i64(tmp);
1439 offset = -offset * n;
1440 } else if (a->imm & 1) {
1447 tcg_gen_addi_i32(addr, addr, offset);
1449 store_reg(s, a->rn, addr);
1451 tcg_temp_free_i32(addr);
1458 * Types for callbacks for do_vfp_3op_sp() and do_vfp_3op_dp().
1459 * The callback should emit code to write a value to vd. If
1460 * do_vfp_3op_{sp,dp}() was passed reads_vd then the TCGv vd
1461 * will contain the old value of the relevant VFP register;
1462 * otherwise it must be written to only.
1464 typedef void VFPGen3OpSPFn(TCGv_i32 vd,
1465 TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst);
1466 typedef void VFPGen3OpDPFn(TCGv_i64 vd,
1467 TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst);
1470 * Types for callbacks for do_vfp_2op_sp() and do_vfp_2op_dp().
1471 * The callback should emit code to write a value to vd (which
1472 * should be written to only).
1474 typedef void VFPGen2OpSPFn(TCGv_i32 vd, TCGv_i32 vm);
1475 typedef void VFPGen2OpDPFn(TCGv_i64 vd, TCGv_i64 vm);
1478 * Return true if the specified S reg is in a scalar bank
1479 * (ie if it is s0..s7)
1481 static inline bool vfp_sreg_is_scalar(int reg)
1483 return (reg & 0x18) == 0;
1487 * Return true if the specified D reg is in a scalar bank
1488 * (ie if it is d0..d3 or d16..d19)
1490 static inline bool vfp_dreg_is_scalar(int reg)
1492 return (reg & 0xc) == 0;
1496 * Advance the S reg number forwards by delta within its bank
1497 * (ie increment the low 3 bits but leave the rest the same)
1499 static inline int vfp_advance_sreg(int reg, int delta)
1501 return ((reg + delta) & 0x7) | (reg & ~0x7);
1505 * Advance the D reg number forwards by delta within its bank
1506 * (ie increment the low 2 bits but leave the rest the same)
1508 static inline int vfp_advance_dreg(int reg, int delta)
1510 return ((reg + delta) & 0x3) | (reg & ~0x3);
1514 * Perform a 3-operand VFP data processing instruction. fn is the
1515 * callback to do the actual operation; this function deals with the
1516 * code to handle looping around for VFP vector processing.
1518 static bool do_vfp_3op_sp(DisasContext *s, VFPGen3OpSPFn *fn,
1519 int vd, int vn, int vm, bool reads_vd)
1521 uint32_t delta_m = 0;
1522 uint32_t delta_d = 0;
1523 int veclen = s->vec_len;
1524 TCGv_i32 f0, f1, fd;
1527 if (!dc_isar_feature(aa32_fpsp_v2, s)) {
1531 if (!dc_isar_feature(aa32_fpshvec, s) &&
1532 (veclen != 0 || s->vec_stride != 0)) {
1536 if (!vfp_access_check(s)) {
1541 /* Figure out what type of vector operation this is. */
1542 if (vfp_sreg_is_scalar(vd)) {
1546 delta_d = s->vec_stride + 1;
1548 if (vfp_sreg_is_scalar(vm)) {
1549 /* mixed scalar/vector */
1558 f0 = tcg_temp_new_i32();
1559 f1 = tcg_temp_new_i32();
1560 fd = tcg_temp_new_i32();
1561 fpst = fpstatus_ptr(FPST_FPCR);
1563 vfp_load_reg32(f0, vn);
1564 vfp_load_reg32(f1, vm);
1568 vfp_load_reg32(fd, vd);
1570 fn(fd, f0, f1, fpst);
1571 vfp_store_reg32(fd, vd);
1577 /* Set up the operands for the next iteration */
1579 vd = vfp_advance_sreg(vd, delta_d);
1580 vn = vfp_advance_sreg(vn, delta_d);
1581 vfp_load_reg32(f0, vn);
1583 vm = vfp_advance_sreg(vm, delta_m);
1584 vfp_load_reg32(f1, vm);
1588 tcg_temp_free_i32(f0);
1589 tcg_temp_free_i32(f1);
1590 tcg_temp_free_i32(fd);
1591 tcg_temp_free_ptr(fpst);
1596 static bool do_vfp_3op_hp(DisasContext *s, VFPGen3OpSPFn *fn,
1597 int vd, int vn, int vm, bool reads_vd)
1600 * Do a half-precision operation. Functionally this is
1601 * the same as do_vfp_3op_sp(), except:
1602 * - it uses the FPST_FPCR_F16
1603 * - it doesn't need the VFP vector handling (fp16 is a
1604 * v8 feature, and in v8 VFP vectors don't exist)
1605 * - it does the aa32_fp16_arith feature test
1607 TCGv_i32 f0, f1, fd;
1610 if (!dc_isar_feature(aa32_fp16_arith, s)) {
1614 if (s->vec_len != 0 || s->vec_stride != 0) {
1618 if (!vfp_access_check(s)) {
1622 f0 = tcg_temp_new_i32();
1623 f1 = tcg_temp_new_i32();
1624 fd = tcg_temp_new_i32();
1625 fpst = fpstatus_ptr(FPST_FPCR_F16);
1627 vfp_load_reg32(f0, vn);
1628 vfp_load_reg32(f1, vm);
1631 vfp_load_reg32(fd, vd);
1633 fn(fd, f0, f1, fpst);
1634 vfp_store_reg32(fd, vd);
1636 tcg_temp_free_i32(f0);
1637 tcg_temp_free_i32(f1);
1638 tcg_temp_free_i32(fd);
1639 tcg_temp_free_ptr(fpst);
1644 static bool do_vfp_3op_dp(DisasContext *s, VFPGen3OpDPFn *fn,
1645 int vd, int vn, int vm, bool reads_vd)
1647 uint32_t delta_m = 0;
1648 uint32_t delta_d = 0;
1649 int veclen = s->vec_len;
1650 TCGv_i64 f0, f1, fd;
1653 if (!dc_isar_feature(aa32_fpdp_v2, s)) {
1657 /* UNDEF accesses to D16-D31 if they don't exist */
1658 if (!dc_isar_feature(aa32_simd_r32, s) && ((vd | vn | vm) & 0x10)) {
1662 if (!dc_isar_feature(aa32_fpshvec, s) &&
1663 (veclen != 0 || s->vec_stride != 0)) {
1667 if (!vfp_access_check(s)) {
1672 /* Figure out what type of vector operation this is. */
1673 if (vfp_dreg_is_scalar(vd)) {
1677 delta_d = (s->vec_stride >> 1) + 1;
1679 if (vfp_dreg_is_scalar(vm)) {
1680 /* mixed scalar/vector */
1689 f0 = tcg_temp_new_i64();
1690 f1 = tcg_temp_new_i64();
1691 fd = tcg_temp_new_i64();
1692 fpst = fpstatus_ptr(FPST_FPCR);
1694 vfp_load_reg64(f0, vn);
1695 vfp_load_reg64(f1, vm);
1699 vfp_load_reg64(fd, vd);
1701 fn(fd, f0, f1, fpst);
1702 vfp_store_reg64(fd, vd);
1707 /* Set up the operands for the next iteration */
1709 vd = vfp_advance_dreg(vd, delta_d);
1710 vn = vfp_advance_dreg(vn, delta_d);
1711 vfp_load_reg64(f0, vn);
1713 vm = vfp_advance_dreg(vm, delta_m);
1714 vfp_load_reg64(f1, vm);
1718 tcg_temp_free_i64(f0);
1719 tcg_temp_free_i64(f1);
1720 tcg_temp_free_i64(fd);
1721 tcg_temp_free_ptr(fpst);
1726 static bool do_vfp_2op_sp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm)
1728 uint32_t delta_m = 0;
1729 uint32_t delta_d = 0;
1730 int veclen = s->vec_len;
1733 if (!dc_isar_feature(aa32_fpsp_v2, s)) {
1737 if (!dc_isar_feature(aa32_fpshvec, s) &&
1738 (veclen != 0 || s->vec_stride != 0)) {
1742 if (!vfp_access_check(s)) {
1747 /* Figure out what type of vector operation this is. */
1748 if (vfp_sreg_is_scalar(vd)) {
1752 delta_d = s->vec_stride + 1;
1754 if (vfp_sreg_is_scalar(vm)) {
1755 /* mixed scalar/vector */
1764 f0 = tcg_temp_new_i32();
1765 fd = tcg_temp_new_i32();
1767 vfp_load_reg32(f0, vm);
1771 vfp_store_reg32(fd, vd);
1778 /* single source one-many */
1780 vd = vfp_advance_sreg(vd, delta_d);
1781 vfp_store_reg32(fd, vd);
1786 /* Set up the operands for the next iteration */
1788 vd = vfp_advance_sreg(vd, delta_d);
1789 vm = vfp_advance_sreg(vm, delta_m);
1790 vfp_load_reg32(f0, vm);
1793 tcg_temp_free_i32(f0);
1794 tcg_temp_free_i32(fd);
1799 static bool do_vfp_2op_hp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm)
1802 * Do a half-precision operation. Functionally this is
1803 * the same as do_vfp_2op_sp(), except:
1804 * - it doesn't need the VFP vector handling (fp16 is a
1805 * v8 feature, and in v8 VFP vectors don't exist)
1806 * - it does the aa32_fp16_arith feature test
1810 if (!dc_isar_feature(aa32_fp16_arith, s)) {
1814 if (s->vec_len != 0 || s->vec_stride != 0) {
1818 if (!vfp_access_check(s)) {
1822 f0 = tcg_temp_new_i32();
1823 vfp_load_reg32(f0, vm);
1825 vfp_store_reg32(f0, vd);
1826 tcg_temp_free_i32(f0);
1831 static bool do_vfp_2op_dp(DisasContext *s, VFPGen2OpDPFn *fn, int vd, int vm)
1833 uint32_t delta_m = 0;
1834 uint32_t delta_d = 0;
1835 int veclen = s->vec_len;
1838 if (!dc_isar_feature(aa32_fpdp_v2, s)) {
1842 /* UNDEF accesses to D16-D31 if they don't exist */
1843 if (!dc_isar_feature(aa32_simd_r32, s) && ((vd | vm) & 0x10)) {
1847 if (!dc_isar_feature(aa32_fpshvec, s) &&
1848 (veclen != 0 || s->vec_stride != 0)) {
1852 if (!vfp_access_check(s)) {
1857 /* Figure out what type of vector operation this is. */
1858 if (vfp_dreg_is_scalar(vd)) {
1862 delta_d = (s->vec_stride >> 1) + 1;
1864 if (vfp_dreg_is_scalar(vm)) {
1865 /* mixed scalar/vector */
1874 f0 = tcg_temp_new_i64();
1875 fd = tcg_temp_new_i64();
1877 vfp_load_reg64(f0, vm);
1881 vfp_store_reg64(fd, vd);
1888 /* single source one-many */
1890 vd = vfp_advance_dreg(vd, delta_d);
1891 vfp_store_reg64(fd, vd);
1896 /* Set up the operands for the next iteration */
1898 vd = vfp_advance_dreg(vd, delta_d);
1899 vd = vfp_advance_dreg(vm, delta_m);
1900 vfp_load_reg64(f0, vm);
1903 tcg_temp_free_i64(f0);
1904 tcg_temp_free_i64(fd);
1909 static void gen_VMLA_hp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
1911 /* Note that order of inputs to the add matters for NaNs */
1912 TCGv_i32 tmp = tcg_temp_new_i32();
1914 gen_helper_vfp_mulh(tmp, vn, vm, fpst);
1915 gen_helper_vfp_addh(vd, vd, tmp, fpst);
1916 tcg_temp_free_i32(tmp);
1919 static bool trans_VMLA_hp(DisasContext *s, arg_VMLA_sp *a)
1921 return do_vfp_3op_hp(s, gen_VMLA_hp, a->vd, a->vn, a->vm, true);
1924 static void gen_VMLA_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
1926 /* Note that order of inputs to the add matters for NaNs */
1927 TCGv_i32 tmp = tcg_temp_new_i32();
1929 gen_helper_vfp_muls(tmp, vn, vm, fpst);
1930 gen_helper_vfp_adds(vd, vd, tmp, fpst);
1931 tcg_temp_free_i32(tmp);
1934 static bool trans_VMLA_sp(DisasContext *s, arg_VMLA_sp *a)
1936 return do_vfp_3op_sp(s, gen_VMLA_sp, a->vd, a->vn, a->vm, true);
1939 static void gen_VMLA_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
1941 /* Note that order of inputs to the add matters for NaNs */
1942 TCGv_i64 tmp = tcg_temp_new_i64();
1944 gen_helper_vfp_muld(tmp, vn, vm, fpst);
1945 gen_helper_vfp_addd(vd, vd, tmp, fpst);
1946 tcg_temp_free_i64(tmp);
1949 static bool trans_VMLA_dp(DisasContext *s, arg_VMLA_dp *a)
1951 return do_vfp_3op_dp(s, gen_VMLA_dp, a->vd, a->vn, a->vm, true);
1954 static void gen_VMLS_hp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
1957 * VMLS: vd = vd + -(vn * vm)
1958 * Note that order of inputs to the add matters for NaNs.
1960 TCGv_i32 tmp = tcg_temp_new_i32();
1962 gen_helper_vfp_mulh(tmp, vn, vm, fpst);
1963 gen_helper_vfp_negh(tmp, tmp);
1964 gen_helper_vfp_addh(vd, vd, tmp, fpst);
1965 tcg_temp_free_i32(tmp);
1968 static bool trans_VMLS_hp(DisasContext *s, arg_VMLS_sp *a)
1970 return do_vfp_3op_hp(s, gen_VMLS_hp, a->vd, a->vn, a->vm, true);
1973 static void gen_VMLS_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
1976 * VMLS: vd = vd + -(vn * vm)
1977 * Note that order of inputs to the add matters for NaNs.
1979 TCGv_i32 tmp = tcg_temp_new_i32();
1981 gen_helper_vfp_muls(tmp, vn, vm, fpst);
1982 gen_helper_vfp_negs(tmp, tmp);
1983 gen_helper_vfp_adds(vd, vd, tmp, fpst);
1984 tcg_temp_free_i32(tmp);
1987 static bool trans_VMLS_sp(DisasContext *s, arg_VMLS_sp *a)
1989 return do_vfp_3op_sp(s, gen_VMLS_sp, a->vd, a->vn, a->vm, true);
1992 static void gen_VMLS_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
1995 * VMLS: vd = vd + -(vn * vm)
1996 * Note that order of inputs to the add matters for NaNs.
1998 TCGv_i64 tmp = tcg_temp_new_i64();
2000 gen_helper_vfp_muld(tmp, vn, vm, fpst);
2001 gen_helper_vfp_negd(tmp, tmp);
2002 gen_helper_vfp_addd(vd, vd, tmp, fpst);
2003 tcg_temp_free_i64(tmp);
2006 static bool trans_VMLS_dp(DisasContext *s, arg_VMLS_dp *a)
2008 return do_vfp_3op_dp(s, gen_VMLS_dp, a->vd, a->vn, a->vm, true);
2011 static void gen_VNMLS_hp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
2014 * VNMLS: -fd + (fn * fm)
2015 * Note that it isn't valid to replace (-A + B) with (B - A) or similar
2016 * plausible looking simplifications because this will give wrong results
2019 TCGv_i32 tmp = tcg_temp_new_i32();
2021 gen_helper_vfp_mulh(tmp, vn, vm, fpst);
2022 gen_helper_vfp_negh(vd, vd);
2023 gen_helper_vfp_addh(vd, vd, tmp, fpst);
2024 tcg_temp_free_i32(tmp);
2027 static bool trans_VNMLS_hp(DisasContext *s, arg_VNMLS_sp *a)
2029 return do_vfp_3op_hp(s, gen_VNMLS_hp, a->vd, a->vn, a->vm, true);
2032 static void gen_VNMLS_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
2035 * VNMLS: -fd + (fn * fm)
2036 * Note that it isn't valid to replace (-A + B) with (B - A) or similar
2037 * plausible looking simplifications because this will give wrong results
2040 TCGv_i32 tmp = tcg_temp_new_i32();
2042 gen_helper_vfp_muls(tmp, vn, vm, fpst);
2043 gen_helper_vfp_negs(vd, vd);
2044 gen_helper_vfp_adds(vd, vd, tmp, fpst);
2045 tcg_temp_free_i32(tmp);
2048 static bool trans_VNMLS_sp(DisasContext *s, arg_VNMLS_sp *a)
2050 return do_vfp_3op_sp(s, gen_VNMLS_sp, a->vd, a->vn, a->vm, true);
2053 static void gen_VNMLS_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
2056 * VNMLS: -fd + (fn * fm)
2057 * Note that it isn't valid to replace (-A + B) with (B - A) or similar
2058 * plausible looking simplifications because this will give wrong results
2061 TCGv_i64 tmp = tcg_temp_new_i64();
2063 gen_helper_vfp_muld(tmp, vn, vm, fpst);
2064 gen_helper_vfp_negd(vd, vd);
2065 gen_helper_vfp_addd(vd, vd, tmp, fpst);
2066 tcg_temp_free_i64(tmp);
2069 static bool trans_VNMLS_dp(DisasContext *s, arg_VNMLS_dp *a)
2071 return do_vfp_3op_dp(s, gen_VNMLS_dp, a->vd, a->vn, a->vm, true);
2074 static void gen_VNMLA_hp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
2076 /* VNMLA: -fd + -(fn * fm) */
2077 TCGv_i32 tmp = tcg_temp_new_i32();
2079 gen_helper_vfp_mulh(tmp, vn, vm, fpst);
2080 gen_helper_vfp_negh(tmp, tmp);
2081 gen_helper_vfp_negh(vd, vd);
2082 gen_helper_vfp_addh(vd, vd, tmp, fpst);
2083 tcg_temp_free_i32(tmp);
2086 static bool trans_VNMLA_hp(DisasContext *s, arg_VNMLA_sp *a)
2088 return do_vfp_3op_hp(s, gen_VNMLA_hp, a->vd, a->vn, a->vm, true);
2091 static void gen_VNMLA_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
2093 /* VNMLA: -fd + -(fn * fm) */
2094 TCGv_i32 tmp = tcg_temp_new_i32();
2096 gen_helper_vfp_muls(tmp, vn, vm, fpst);
2097 gen_helper_vfp_negs(tmp, tmp);
2098 gen_helper_vfp_negs(vd, vd);
2099 gen_helper_vfp_adds(vd, vd, tmp, fpst);
2100 tcg_temp_free_i32(tmp);
2103 static bool trans_VNMLA_sp(DisasContext *s, arg_VNMLA_sp *a)
2105 return do_vfp_3op_sp(s, gen_VNMLA_sp, a->vd, a->vn, a->vm, true);
2108 static void gen_VNMLA_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
2110 /* VNMLA: -fd + (fn * fm) */
2111 TCGv_i64 tmp = tcg_temp_new_i64();
2113 gen_helper_vfp_muld(tmp, vn, vm, fpst);
2114 gen_helper_vfp_negd(tmp, tmp);
2115 gen_helper_vfp_negd(vd, vd);
2116 gen_helper_vfp_addd(vd, vd, tmp, fpst);
2117 tcg_temp_free_i64(tmp);
2120 static bool trans_VNMLA_dp(DisasContext *s, arg_VNMLA_dp *a)
2122 return do_vfp_3op_dp(s, gen_VNMLA_dp, a->vd, a->vn, a->vm, true);
2125 static bool trans_VMUL_hp(DisasContext *s, arg_VMUL_sp *a)
2127 return do_vfp_3op_hp(s, gen_helper_vfp_mulh, a->vd, a->vn, a->vm, false);
2130 static bool trans_VMUL_sp(DisasContext *s, arg_VMUL_sp *a)
2132 return do_vfp_3op_sp(s, gen_helper_vfp_muls, a->vd, a->vn, a->vm, false);
2135 static bool trans_VMUL_dp(DisasContext *s, arg_VMUL_dp *a)
2137 return do_vfp_3op_dp(s, gen_helper_vfp_muld, a->vd, a->vn, a->vm, false);
2140 static void gen_VNMUL_hp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
2142 /* VNMUL: -(fn * fm) */
2143 gen_helper_vfp_mulh(vd, vn, vm, fpst);
2144 gen_helper_vfp_negh(vd, vd);
2147 static bool trans_VNMUL_hp(DisasContext *s, arg_VNMUL_sp *a)
2149 return do_vfp_3op_hp(s, gen_VNMUL_hp, a->vd, a->vn, a->vm, false);
2152 static void gen_VNMUL_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
2154 /* VNMUL: -(fn * fm) */
2155 gen_helper_vfp_muls(vd, vn, vm, fpst);
2156 gen_helper_vfp_negs(vd, vd);
2159 static bool trans_VNMUL_sp(DisasContext *s, arg_VNMUL_sp *a)
2161 return do_vfp_3op_sp(s, gen_VNMUL_sp, a->vd, a->vn, a->vm, false);
2164 static void gen_VNMUL_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
2166 /* VNMUL: -(fn * fm) */
2167 gen_helper_vfp_muld(vd, vn, vm, fpst);
2168 gen_helper_vfp_negd(vd, vd);
2171 static bool trans_VNMUL_dp(DisasContext *s, arg_VNMUL_dp *a)
2173 return do_vfp_3op_dp(s, gen_VNMUL_dp, a->vd, a->vn, a->vm, false);
2176 static bool trans_VADD_hp(DisasContext *s, arg_VADD_sp *a)
2178 return do_vfp_3op_hp(s, gen_helper_vfp_addh, a->vd, a->vn, a->vm, false);
2181 static bool trans_VADD_sp(DisasContext *s, arg_VADD_sp *a)
2183 return do_vfp_3op_sp(s, gen_helper_vfp_adds, a->vd, a->vn, a->vm, false);
2186 static bool trans_VADD_dp(DisasContext *s, arg_VADD_dp *a)
2188 return do_vfp_3op_dp(s, gen_helper_vfp_addd, a->vd, a->vn, a->vm, false);
2191 static bool trans_VSUB_hp(DisasContext *s, arg_VSUB_sp *a)
2193 return do_vfp_3op_hp(s, gen_helper_vfp_subh, a->vd, a->vn, a->vm, false);
2196 static bool trans_VSUB_sp(DisasContext *s, arg_VSUB_sp *a)
2198 return do_vfp_3op_sp(s, gen_helper_vfp_subs, a->vd, a->vn, a->vm, false);
2201 static bool trans_VSUB_dp(DisasContext *s, arg_VSUB_dp *a)
2203 return do_vfp_3op_dp(s, gen_helper_vfp_subd, a->vd, a->vn, a->vm, false);
2206 static bool trans_VDIV_hp(DisasContext *s, arg_VDIV_sp *a)
2208 return do_vfp_3op_hp(s, gen_helper_vfp_divh, a->vd, a->vn, a->vm, false);
2211 static bool trans_VDIV_sp(DisasContext *s, arg_VDIV_sp *a)
2213 return do_vfp_3op_sp(s, gen_helper_vfp_divs, a->vd, a->vn, a->vm, false);
2216 static bool trans_VDIV_dp(DisasContext *s, arg_VDIV_dp *a)
2218 return do_vfp_3op_dp(s, gen_helper_vfp_divd, a->vd, a->vn, a->vm, false);
2221 static bool trans_VMINNM_hp(DisasContext *s, arg_VMINNM_sp *a)
2223 if (!dc_isar_feature(aa32_vminmaxnm, s)) {
2226 return do_vfp_3op_hp(s, gen_helper_vfp_minnumh,
2227 a->vd, a->vn, a->vm, false);
2230 static bool trans_VMAXNM_hp(DisasContext *s, arg_VMAXNM_sp *a)
2232 if (!dc_isar_feature(aa32_vminmaxnm, s)) {
2235 return do_vfp_3op_hp(s, gen_helper_vfp_maxnumh,
2236 a->vd, a->vn, a->vm, false);
2239 static bool trans_VMINNM_sp(DisasContext *s, arg_VMINNM_sp *a)
2241 if (!dc_isar_feature(aa32_vminmaxnm, s)) {
2244 return do_vfp_3op_sp(s, gen_helper_vfp_minnums,
2245 a->vd, a->vn, a->vm, false);
2248 static bool trans_VMAXNM_sp(DisasContext *s, arg_VMAXNM_sp *a)
2250 if (!dc_isar_feature(aa32_vminmaxnm, s)) {
2253 return do_vfp_3op_sp(s, gen_helper_vfp_maxnums,
2254 a->vd, a->vn, a->vm, false);
2257 static bool trans_VMINNM_dp(DisasContext *s, arg_VMINNM_dp *a)
2259 if (!dc_isar_feature(aa32_vminmaxnm, s)) {
2262 return do_vfp_3op_dp(s, gen_helper_vfp_minnumd,
2263 a->vd, a->vn, a->vm, false);
2266 static bool trans_VMAXNM_dp(DisasContext *s, arg_VMAXNM_dp *a)
2268 if (!dc_isar_feature(aa32_vminmaxnm, s)) {
2271 return do_vfp_3op_dp(s, gen_helper_vfp_maxnumd,
2272 a->vd, a->vn, a->vm, false);
2275 static bool do_vfm_hp(DisasContext *s, arg_VFMA_sp *a, bool neg_n, bool neg_d)
2278 * VFNMA : fd = muladd(-fd, fn, fm)
2279 * VFNMS : fd = muladd(-fd, -fn, fm)
2280 * VFMA : fd = muladd( fd, fn, fm)
2281 * VFMS : fd = muladd( fd, -fn, fm)
2283 * These are fused multiply-add, and must be done as one floating
2284 * point operation with no rounding between the multiplication and
2285 * addition steps. NB that doing the negations here as separate
2286 * steps is correct : an input NaN should come out with its sign
2287 * bit flipped if it is a negated-input.
2290 TCGv_i32 vn, vm, vd;
2293 * Present in VFPv4 only, and only with the FP16 extension.
2294 * Note that we can't rely on the SIMDFMAC check alone, because
2295 * in a Neon-no-VFP core that ID register field will be non-zero.
2297 if (!dc_isar_feature(aa32_fp16_arith, s) ||
2298 !dc_isar_feature(aa32_simdfmac, s) ||
2299 !dc_isar_feature(aa32_fpsp_v2, s)) {
2303 if (s->vec_len != 0 || s->vec_stride != 0) {
2307 if (!vfp_access_check(s)) {
2311 vn = tcg_temp_new_i32();
2312 vm = tcg_temp_new_i32();
2313 vd = tcg_temp_new_i32();
2315 vfp_load_reg32(vn, a->vn);
2316 vfp_load_reg32(vm, a->vm);
2319 gen_helper_vfp_negh(vn, vn);
2321 vfp_load_reg32(vd, a->vd);
2324 gen_helper_vfp_negh(vd, vd);
2326 fpst = fpstatus_ptr(FPST_FPCR_F16);
2327 gen_helper_vfp_muladdh(vd, vn, vm, vd, fpst);
2328 vfp_store_reg32(vd, a->vd);
2330 tcg_temp_free_ptr(fpst);
2331 tcg_temp_free_i32(vn);
2332 tcg_temp_free_i32(vm);
2333 tcg_temp_free_i32(vd);
2338 static bool do_vfm_sp(DisasContext *s, arg_VFMA_sp *a, bool neg_n, bool neg_d)
2341 * VFNMA : fd = muladd(-fd, fn, fm)
2342 * VFNMS : fd = muladd(-fd, -fn, fm)
2343 * VFMA : fd = muladd( fd, fn, fm)
2344 * VFMS : fd = muladd( fd, -fn, fm)
2346 * These are fused multiply-add, and must be done as one floating
2347 * point operation with no rounding between the multiplication and
2348 * addition steps. NB that doing the negations here as separate
2349 * steps is correct : an input NaN should come out with its sign
2350 * bit flipped if it is a negated-input.
2353 TCGv_i32 vn, vm, vd;
2356 * Present in VFPv4 only.
2357 * Note that we can't rely on the SIMDFMAC check alone, because
2358 * in a Neon-no-VFP core that ID register field will be non-zero.
2360 if (!dc_isar_feature(aa32_simdfmac, s) ||
2361 !dc_isar_feature(aa32_fpsp_v2, s)) {
2365 * In v7A, UNPREDICTABLE with non-zero vector length/stride; from
2366 * v8A, must UNDEF. We choose to UNDEF for both v7A and v8A.
2368 if (s->vec_len != 0 || s->vec_stride != 0) {
2372 if (!vfp_access_check(s)) {
2376 vn = tcg_temp_new_i32();
2377 vm = tcg_temp_new_i32();
2378 vd = tcg_temp_new_i32();
2380 vfp_load_reg32(vn, a->vn);
2381 vfp_load_reg32(vm, a->vm);
2384 gen_helper_vfp_negs(vn, vn);
2386 vfp_load_reg32(vd, a->vd);
2389 gen_helper_vfp_negs(vd, vd);
2391 fpst = fpstatus_ptr(FPST_FPCR);
2392 gen_helper_vfp_muladds(vd, vn, vm, vd, fpst);
2393 vfp_store_reg32(vd, a->vd);
2395 tcg_temp_free_ptr(fpst);
2396 tcg_temp_free_i32(vn);
2397 tcg_temp_free_i32(vm);
2398 tcg_temp_free_i32(vd);
2403 static bool do_vfm_dp(DisasContext *s, arg_VFMA_dp *a, bool neg_n, bool neg_d)
2406 * VFNMA : fd = muladd(-fd, fn, fm)
2407 * VFNMS : fd = muladd(-fd, -fn, fm)
2408 * VFMA : fd = muladd( fd, fn, fm)
2409 * VFMS : fd = muladd( fd, -fn, fm)
2411 * These are fused multiply-add, and must be done as one floating
2412 * point operation with no rounding between the multiplication and
2413 * addition steps. NB that doing the negations here as separate
2414 * steps is correct : an input NaN should come out with its sign
2415 * bit flipped if it is a negated-input.
2418 TCGv_i64 vn, vm, vd;
2421 * Present in VFPv4 only.
2422 * Note that we can't rely on the SIMDFMAC check alone, because
2423 * in a Neon-no-VFP core that ID register field will be non-zero.
2425 if (!dc_isar_feature(aa32_simdfmac, s) ||
2426 !dc_isar_feature(aa32_fpdp_v2, s)) {
2430 * In v7A, UNPREDICTABLE with non-zero vector length/stride; from
2431 * v8A, must UNDEF. We choose to UNDEF for both v7A and v8A.
2433 if (s->vec_len != 0 || s->vec_stride != 0) {
2437 /* UNDEF accesses to D16-D31 if they don't exist. */
2438 if (!dc_isar_feature(aa32_simd_r32, s) &&
2439 ((a->vd | a->vn | a->vm) & 0x10)) {
2443 if (!vfp_access_check(s)) {
2447 vn = tcg_temp_new_i64();
2448 vm = tcg_temp_new_i64();
2449 vd = tcg_temp_new_i64();
2451 vfp_load_reg64(vn, a->vn);
2452 vfp_load_reg64(vm, a->vm);
2455 gen_helper_vfp_negd(vn, vn);
2457 vfp_load_reg64(vd, a->vd);
2460 gen_helper_vfp_negd(vd, vd);
2462 fpst = fpstatus_ptr(FPST_FPCR);
2463 gen_helper_vfp_muladdd(vd, vn, vm, vd, fpst);
2464 vfp_store_reg64(vd, a->vd);
2466 tcg_temp_free_ptr(fpst);
2467 tcg_temp_free_i64(vn);
2468 tcg_temp_free_i64(vm);
2469 tcg_temp_free_i64(vd);
2474 #define MAKE_ONE_VFM_TRANS_FN(INSN, PREC, NEGN, NEGD) \
2475 static bool trans_##INSN##_##PREC(DisasContext *s, \
2476 arg_##INSN##_##PREC *a) \
2478 return do_vfm_##PREC(s, a, NEGN, NEGD); \
2481 #define MAKE_VFM_TRANS_FNS(PREC) \
2482 MAKE_ONE_VFM_TRANS_FN(VFMA, PREC, false, false) \
2483 MAKE_ONE_VFM_TRANS_FN(VFMS, PREC, true, false) \
2484 MAKE_ONE_VFM_TRANS_FN(VFNMA, PREC, false, true) \
2485 MAKE_ONE_VFM_TRANS_FN(VFNMS, PREC, true, true)
2487 MAKE_VFM_TRANS_FNS(hp)
2488 MAKE_VFM_TRANS_FNS(sp)
2489 MAKE_VFM_TRANS_FNS(dp)
2491 static bool trans_VMOV_imm_hp(DisasContext *s, arg_VMOV_imm_sp *a)
2495 if (!dc_isar_feature(aa32_fp16_arith, s)) {
2499 if (s->vec_len != 0 || s->vec_stride != 0) {
2503 if (!vfp_access_check(s)) {
2507 fd = tcg_const_i32(vfp_expand_imm(MO_16, a->imm));
2508 vfp_store_reg32(fd, a->vd);
2509 tcg_temp_free_i32(fd);
2513 static bool trans_VMOV_imm_sp(DisasContext *s, arg_VMOV_imm_sp *a)
2515 uint32_t delta_d = 0;
2516 int veclen = s->vec_len;
2522 if (!dc_isar_feature(aa32_fpsp_v3, s)) {
2526 if (!dc_isar_feature(aa32_fpshvec, s) &&
2527 (veclen != 0 || s->vec_stride != 0)) {
2531 if (!vfp_access_check(s)) {
2536 /* Figure out what type of vector operation this is. */
2537 if (vfp_sreg_is_scalar(vd)) {
2541 delta_d = s->vec_stride + 1;
2545 fd = tcg_const_i32(vfp_expand_imm(MO_32, a->imm));
2548 vfp_store_reg32(fd, vd);
2554 /* Set up the operands for the next iteration */
2556 vd = vfp_advance_sreg(vd, delta_d);
2559 tcg_temp_free_i32(fd);
2563 static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a)
2565 uint32_t delta_d = 0;
2566 int veclen = s->vec_len;
2572 if (!dc_isar_feature(aa32_fpdp_v3, s)) {
2576 /* UNDEF accesses to D16-D31 if they don't exist. */
2577 if (!dc_isar_feature(aa32_simd_r32, s) && (vd & 0x10)) {
2581 if (!dc_isar_feature(aa32_fpshvec, s) &&
2582 (veclen != 0 || s->vec_stride != 0)) {
2586 if (!vfp_access_check(s)) {
2591 /* Figure out what type of vector operation this is. */
2592 if (vfp_dreg_is_scalar(vd)) {
2596 delta_d = (s->vec_stride >> 1) + 1;
2600 fd = tcg_const_i64(vfp_expand_imm(MO_64, a->imm));
2603 vfp_store_reg64(fd, vd);
2609 /* Set up the operands for the next iteration */
2611 vd = vfp_advance_dreg(vd, delta_d);
2614 tcg_temp_free_i64(fd);
2618 #define DO_VFP_2OP(INSN, PREC, FN) \
2619 static bool trans_##INSN##_##PREC(DisasContext *s, \
2620 arg_##INSN##_##PREC *a) \
2622 return do_vfp_2op_##PREC(s, FN, a->vd, a->vm); \
2625 DO_VFP_2OP(VMOV_reg, sp, tcg_gen_mov_i32)
2626 DO_VFP_2OP(VMOV_reg, dp, tcg_gen_mov_i64)
2628 DO_VFP_2OP(VABS, hp, gen_helper_vfp_absh)
2629 DO_VFP_2OP(VABS, sp, gen_helper_vfp_abss)
2630 DO_VFP_2OP(VABS, dp, gen_helper_vfp_absd)
2632 DO_VFP_2OP(VNEG, hp, gen_helper_vfp_negh)
2633 DO_VFP_2OP(VNEG, sp, gen_helper_vfp_negs)
2634 DO_VFP_2OP(VNEG, dp, gen_helper_vfp_negd)
2636 static void gen_VSQRT_hp(TCGv_i32 vd, TCGv_i32 vm)
2638 gen_helper_vfp_sqrth(vd, vm, cpu_env);
2641 static void gen_VSQRT_sp(TCGv_i32 vd, TCGv_i32 vm)
2643 gen_helper_vfp_sqrts(vd, vm, cpu_env);
2646 static void gen_VSQRT_dp(TCGv_i64 vd, TCGv_i64 vm)
2648 gen_helper_vfp_sqrtd(vd, vm, cpu_env);
2651 DO_VFP_2OP(VSQRT, hp, gen_VSQRT_hp)
2652 DO_VFP_2OP(VSQRT, sp, gen_VSQRT_sp)
2653 DO_VFP_2OP(VSQRT, dp, gen_VSQRT_dp)
2655 static bool trans_VCMP_hp(DisasContext *s, arg_VCMP_sp *a)
2659 if (!dc_isar_feature(aa32_fp16_arith, s)) {
2663 /* Vm/M bits must be zero for the Z variant */
2664 if (a->z && a->vm != 0) {
2668 if (!vfp_access_check(s)) {
2672 vd = tcg_temp_new_i32();
2673 vm = tcg_temp_new_i32();
2675 vfp_load_reg32(vd, a->vd);
2677 tcg_gen_movi_i32(vm, 0);
2679 vfp_load_reg32(vm, a->vm);
2683 gen_helper_vfp_cmpeh(vd, vm, cpu_env);
2685 gen_helper_vfp_cmph(vd, vm, cpu_env);
2688 tcg_temp_free_i32(vd);
2689 tcg_temp_free_i32(vm);
2694 static bool trans_VCMP_sp(DisasContext *s, arg_VCMP_sp *a)
2698 if (!dc_isar_feature(aa32_fpsp_v2, s)) {
2702 /* Vm/M bits must be zero for the Z variant */
2703 if (a->z && a->vm != 0) {
2707 if (!vfp_access_check(s)) {
2711 vd = tcg_temp_new_i32();
2712 vm = tcg_temp_new_i32();
2714 vfp_load_reg32(vd, a->vd);
2716 tcg_gen_movi_i32(vm, 0);
2718 vfp_load_reg32(vm, a->vm);
2722 gen_helper_vfp_cmpes(vd, vm, cpu_env);
2724 gen_helper_vfp_cmps(vd, vm, cpu_env);
2727 tcg_temp_free_i32(vd);
2728 tcg_temp_free_i32(vm);
2733 static bool trans_VCMP_dp(DisasContext *s, arg_VCMP_dp *a)
2737 if (!dc_isar_feature(aa32_fpdp_v2, s)) {
2741 /* Vm/M bits must be zero for the Z variant */
2742 if (a->z && a->vm != 0) {
2746 /* UNDEF accesses to D16-D31 if they don't exist. */
2747 if (!dc_isar_feature(aa32_simd_r32, s) && ((a->vd | a->vm) & 0x10)) {
2751 if (!vfp_access_check(s)) {
2755 vd = tcg_temp_new_i64();
2756 vm = tcg_temp_new_i64();
2758 vfp_load_reg64(vd, a->vd);
2760 tcg_gen_movi_i64(vm, 0);
2762 vfp_load_reg64(vm, a->vm);
2766 gen_helper_vfp_cmped(vd, vm, cpu_env);
2768 gen_helper_vfp_cmpd(vd, vm, cpu_env);
2771 tcg_temp_free_i64(vd);
2772 tcg_temp_free_i64(vm);
2777 static bool trans_VCVT_f32_f16(DisasContext *s, arg_VCVT_f32_f16 *a)
2783 if (!dc_isar_feature(aa32_fp16_spconv, s)) {
2787 if (!vfp_access_check(s)) {
2791 fpst = fpstatus_ptr(FPST_FPCR);
2792 ahp_mode = get_ahp_flag();
2793 tmp = tcg_temp_new_i32();
2794 /* The T bit tells us if we want the low or high 16 bits of Vm */
2795 tcg_gen_ld16u_i32(tmp, cpu_env, vfp_f16_offset(a->vm, a->t));
2796 gen_helper_vfp_fcvt_f16_to_f32(tmp, tmp, fpst, ahp_mode);
2797 vfp_store_reg32(tmp, a->vd);
2798 tcg_temp_free_i32(ahp_mode);
2799 tcg_temp_free_ptr(fpst);
2800 tcg_temp_free_i32(tmp);
2804 static bool trans_VCVT_f64_f16(DisasContext *s, arg_VCVT_f64_f16 *a)
2811 if (!dc_isar_feature(aa32_fpdp_v2, s)) {
2815 if (!dc_isar_feature(aa32_fp16_dpconv, s)) {
2819 /* UNDEF accesses to D16-D31 if they don't exist. */
2820 if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
2824 if (!vfp_access_check(s)) {
2828 fpst = fpstatus_ptr(FPST_FPCR);
2829 ahp_mode = get_ahp_flag();
2830 tmp = tcg_temp_new_i32();
2831 /* The T bit tells us if we want the low or high 16 bits of Vm */
2832 tcg_gen_ld16u_i32(tmp, cpu_env, vfp_f16_offset(a->vm, a->t));
2833 vd = tcg_temp_new_i64();
2834 gen_helper_vfp_fcvt_f16_to_f64(vd, tmp, fpst, ahp_mode);
2835 vfp_store_reg64(vd, a->vd);
2836 tcg_temp_free_i32(ahp_mode);
2837 tcg_temp_free_ptr(fpst);
2838 tcg_temp_free_i32(tmp);
2839 tcg_temp_free_i64(vd);
2843 static bool trans_VCVT_f16_f32(DisasContext *s, arg_VCVT_f16_f32 *a)
2849 if (!dc_isar_feature(aa32_fp16_spconv, s)) {
2853 if (!vfp_access_check(s)) {
2857 fpst = fpstatus_ptr(FPST_FPCR);
2858 ahp_mode = get_ahp_flag();
2859 tmp = tcg_temp_new_i32();
2861 vfp_load_reg32(tmp, a->vm);
2862 gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp_mode);
2863 tcg_gen_st16_i32(tmp, cpu_env, vfp_f16_offset(a->vd, a->t));
2864 tcg_temp_free_i32(ahp_mode);
2865 tcg_temp_free_ptr(fpst);
2866 tcg_temp_free_i32(tmp);
2870 static bool trans_VCVT_f16_f64(DisasContext *s, arg_VCVT_f16_f64 *a)
2877 if (!dc_isar_feature(aa32_fpdp_v2, s)) {
2881 if (!dc_isar_feature(aa32_fp16_dpconv, s)) {
2885 /* UNDEF accesses to D16-D31 if they don't exist. */
2886 if (!dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) {
2890 if (!vfp_access_check(s)) {
2894 fpst = fpstatus_ptr(FPST_FPCR);
2895 ahp_mode = get_ahp_flag();
2896 tmp = tcg_temp_new_i32();
2897 vm = tcg_temp_new_i64();
2899 vfp_load_reg64(vm, a->vm);
2900 gen_helper_vfp_fcvt_f64_to_f16(tmp, vm, fpst, ahp_mode);
2901 tcg_temp_free_i64(vm);
2902 tcg_gen_st16_i32(tmp, cpu_env, vfp_f16_offset(a->vd, a->t));
2903 tcg_temp_free_i32(ahp_mode);
2904 tcg_temp_free_ptr(fpst);
2905 tcg_temp_free_i32(tmp);
2909 static bool trans_VRINTR_hp(DisasContext *s, arg_VRINTR_sp *a)
2914 if (!dc_isar_feature(aa32_fp16_arith, s)) {
2918 if (!vfp_access_check(s)) {
2922 tmp = tcg_temp_new_i32();
2923 vfp_load_reg32(tmp, a->vm);
2924 fpst = fpstatus_ptr(FPST_FPCR_F16);
2925 gen_helper_rinth(tmp, tmp, fpst);
2926 vfp_store_reg32(tmp, a->vd);
2927 tcg_temp_free_ptr(fpst);
2928 tcg_temp_free_i32(tmp);
2932 static bool trans_VRINTR_sp(DisasContext *s, arg_VRINTR_sp *a)
2937 if (!dc_isar_feature(aa32_vrint, s)) {
2941 if (!vfp_access_check(s)) {
2945 tmp = tcg_temp_new_i32();
2946 vfp_load_reg32(tmp, a->vm);
2947 fpst = fpstatus_ptr(FPST_FPCR);
2948 gen_helper_rints(tmp, tmp, fpst);
2949 vfp_store_reg32(tmp, a->vd);
2950 tcg_temp_free_ptr(fpst);
2951 tcg_temp_free_i32(tmp);
2955 static bool trans_VRINTR_dp(DisasContext *s, arg_VRINTR_dp *a)
2960 if (!dc_isar_feature(aa32_fpdp_v2, s)) {
2964 if (!dc_isar_feature(aa32_vrint, s)) {
2968 /* UNDEF accesses to D16-D31 if they don't exist. */
2969 if (!dc_isar_feature(aa32_simd_r32, s) && ((a->vd | a->vm) & 0x10)) {
2973 if (!vfp_access_check(s)) {
2977 tmp = tcg_temp_new_i64();
2978 vfp_load_reg64(tmp, a->vm);
2979 fpst = fpstatus_ptr(FPST_FPCR);
2980 gen_helper_rintd(tmp, tmp, fpst);
2981 vfp_store_reg64(tmp, a->vd);
2982 tcg_temp_free_ptr(fpst);
2983 tcg_temp_free_i64(tmp);
2987 static bool trans_VRINTZ_hp(DisasContext *s, arg_VRINTZ_sp *a)
2993 if (!dc_isar_feature(aa32_fp16_arith, s)) {
2997 if (!vfp_access_check(s)) {
3001 tmp = tcg_temp_new_i32();
3002 vfp_load_reg32(tmp, a->vm);
3003 fpst = fpstatus_ptr(FPST_FPCR_F16);
3004 tcg_rmode = tcg_const_i32(float_round_to_zero);
3005 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
3006 gen_helper_rinth(tmp, tmp, fpst);
3007 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
3008 vfp_store_reg32(tmp, a->vd);
3009 tcg_temp_free_ptr(fpst);
3010 tcg_temp_free_i32(tcg_rmode);
3011 tcg_temp_free_i32(tmp);
3015 static bool trans_VRINTZ_sp(DisasContext *s, arg_VRINTZ_sp *a)
3021 if (!dc_isar_feature(aa32_vrint, s)) {
3025 if (!vfp_access_check(s)) {
3029 tmp = tcg_temp_new_i32();
3030 vfp_load_reg32(tmp, a->vm);
3031 fpst = fpstatus_ptr(FPST_FPCR);
3032 tcg_rmode = tcg_const_i32(float_round_to_zero);
3033 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
3034 gen_helper_rints(tmp, tmp, fpst);
3035 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
3036 vfp_store_reg32(tmp, a->vd);
3037 tcg_temp_free_ptr(fpst);
3038 tcg_temp_free_i32(tcg_rmode);
3039 tcg_temp_free_i32(tmp);
3043 static bool trans_VRINTZ_dp(DisasContext *s, arg_VRINTZ_dp *a)
3049 if (!dc_isar_feature(aa32_fpdp_v2, s)) {
3053 if (!dc_isar_feature(aa32_vrint, s)) {
3057 /* UNDEF accesses to D16-D31 if they don't exist. */
3058 if (!dc_isar_feature(aa32_simd_r32, s) && ((a->vd | a->vm) & 0x10)) {
3062 if (!vfp_access_check(s)) {
3066 tmp = tcg_temp_new_i64();
3067 vfp_load_reg64(tmp, a->vm);
3068 fpst = fpstatus_ptr(FPST_FPCR);
3069 tcg_rmode = tcg_const_i32(float_round_to_zero);
3070 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
3071 gen_helper_rintd(tmp, tmp, fpst);
3072 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
3073 vfp_store_reg64(tmp, a->vd);
3074 tcg_temp_free_ptr(fpst);
3075 tcg_temp_free_i64(tmp);
3076 tcg_temp_free_i32(tcg_rmode);
3080 static bool trans_VRINTX_hp(DisasContext *s, arg_VRINTX_sp *a)
3085 if (!dc_isar_feature(aa32_fp16_arith, s)) {
3089 if (!vfp_access_check(s)) {
3093 tmp = tcg_temp_new_i32();
3094 vfp_load_reg32(tmp, a->vm);
3095 fpst = fpstatus_ptr(FPST_FPCR_F16);
3096 gen_helper_rinth_exact(tmp, tmp, fpst);
3097 vfp_store_reg32(tmp, a->vd);
3098 tcg_temp_free_ptr(fpst);
3099 tcg_temp_free_i32(tmp);
3103 static bool trans_VRINTX_sp(DisasContext *s, arg_VRINTX_sp *a)
3108 if (!dc_isar_feature(aa32_vrint, s)) {
3112 if (!vfp_access_check(s)) {
3116 tmp = tcg_temp_new_i32();
3117 vfp_load_reg32(tmp, a->vm);
3118 fpst = fpstatus_ptr(FPST_FPCR);
3119 gen_helper_rints_exact(tmp, tmp, fpst);
3120 vfp_store_reg32(tmp, a->vd);
3121 tcg_temp_free_ptr(fpst);
3122 tcg_temp_free_i32(tmp);
3126 static bool trans_VRINTX_dp(DisasContext *s, arg_VRINTX_dp *a)
3131 if (!dc_isar_feature(aa32_fpdp_v2, s)) {
3135 if (!dc_isar_feature(aa32_vrint, s)) {
3139 /* UNDEF accesses to D16-D31 if they don't exist. */
3140 if (!dc_isar_feature(aa32_simd_r32, s) && ((a->vd | a->vm) & 0x10)) {
3144 if (!vfp_access_check(s)) {
3148 tmp = tcg_temp_new_i64();
3149 vfp_load_reg64(tmp, a->vm);
3150 fpst = fpstatus_ptr(FPST_FPCR);
3151 gen_helper_rintd_exact(tmp, tmp, fpst);
3152 vfp_store_reg64(tmp, a->vd);
3153 tcg_temp_free_ptr(fpst);
3154 tcg_temp_free_i64(tmp);
3158 static bool trans_VCVT_sp(DisasContext *s, arg_VCVT_sp *a)
3163 if (!dc_isar_feature(aa32_fpdp_v2, s)) {
3167 /* UNDEF accesses to D16-D31 if they don't exist. */
3168 if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
3172 if (!vfp_access_check(s)) {
3176 vm = tcg_temp_new_i32();
3177 vd = tcg_temp_new_i64();
3178 vfp_load_reg32(vm, a->vm);
3179 gen_helper_vfp_fcvtds(vd, vm, cpu_env);
3180 vfp_store_reg64(vd, a->vd);
3181 tcg_temp_free_i32(vm);
3182 tcg_temp_free_i64(vd);
3186 static bool trans_VCVT_dp(DisasContext *s, arg_VCVT_dp *a)
3191 if (!dc_isar_feature(aa32_fpdp_v2, s)) {
3195 /* UNDEF accesses to D16-D31 if they don't exist. */
3196 if (!dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) {
3200 if (!vfp_access_check(s)) {
3204 vd = tcg_temp_new_i32();
3205 vm = tcg_temp_new_i64();
3206 vfp_load_reg64(vm, a->vm);
3207 gen_helper_vfp_fcvtsd(vd, vm, cpu_env);
3208 vfp_store_reg32(vd, a->vd);
3209 tcg_temp_free_i32(vd);
3210 tcg_temp_free_i64(vm);
3214 static bool trans_VCVT_int_hp(DisasContext *s, arg_VCVT_int_sp *a)
3219 if (!dc_isar_feature(aa32_fp16_arith, s)) {
3223 if (!vfp_access_check(s)) {
3227 vm = tcg_temp_new_i32();
3228 vfp_load_reg32(vm, a->vm);
3229 fpst = fpstatus_ptr(FPST_FPCR_F16);
3232 gen_helper_vfp_sitoh(vm, vm, fpst);
3235 gen_helper_vfp_uitoh(vm, vm, fpst);
3237 vfp_store_reg32(vm, a->vd);
3238 tcg_temp_free_i32(vm);
3239 tcg_temp_free_ptr(fpst);
3243 static bool trans_VCVT_int_sp(DisasContext *s, arg_VCVT_int_sp *a)
3248 if (!dc_isar_feature(aa32_fpsp_v2, s)) {
3252 if (!vfp_access_check(s)) {
3256 vm = tcg_temp_new_i32();
3257 vfp_load_reg32(vm, a->vm);
3258 fpst = fpstatus_ptr(FPST_FPCR);
3261 gen_helper_vfp_sitos(vm, vm, fpst);
3264 gen_helper_vfp_uitos(vm, vm, fpst);
3266 vfp_store_reg32(vm, a->vd);
3267 tcg_temp_free_i32(vm);
3268 tcg_temp_free_ptr(fpst);
3272 static bool trans_VCVT_int_dp(DisasContext *s, arg_VCVT_int_dp *a)
3278 if (!dc_isar_feature(aa32_fpdp_v2, s)) {
3282 /* UNDEF accesses to D16-D31 if they don't exist. */
3283 if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
3287 if (!vfp_access_check(s)) {
3291 vm = tcg_temp_new_i32();
3292 vd = tcg_temp_new_i64();
3293 vfp_load_reg32(vm, a->vm);
3294 fpst = fpstatus_ptr(FPST_FPCR);
3297 gen_helper_vfp_sitod(vd, vm, fpst);
3300 gen_helper_vfp_uitod(vd, vm, fpst);
3302 vfp_store_reg64(vd, a->vd);
3303 tcg_temp_free_i32(vm);
3304 tcg_temp_free_i64(vd);
3305 tcg_temp_free_ptr(fpst);
3309 static bool trans_VJCVT(DisasContext *s, arg_VJCVT *a)
3314 if (!dc_isar_feature(aa32_fpdp_v2, s)) {
3318 if (!dc_isar_feature(aa32_jscvt, s)) {
3322 /* UNDEF accesses to D16-D31 if they don't exist. */
3323 if (!dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) {
3327 if (!vfp_access_check(s)) {
3331 vm = tcg_temp_new_i64();
3332 vd = tcg_temp_new_i32();
3333 vfp_load_reg64(vm, a->vm);
3334 gen_helper_vjcvt(vd, vm, cpu_env);
3335 vfp_store_reg32(vd, a->vd);
3336 tcg_temp_free_i64(vm);
3337 tcg_temp_free_i32(vd);
3341 static bool trans_VCVT_fix_hp(DisasContext *s, arg_VCVT_fix_sp *a)
3347 if (!dc_isar_feature(aa32_fp16_arith, s)) {
3351 if (!vfp_access_check(s)) {
3355 frac_bits = (a->opc & 1) ? (32 - a->imm) : (16 - a->imm);
3357 vd = tcg_temp_new_i32();
3358 vfp_load_reg32(vd, a->vd);
3360 fpst = fpstatus_ptr(FPST_FPCR_F16);
3361 shift = tcg_const_i32(frac_bits);
3363 /* Switch on op:U:sx bits */
3366 gen_helper_vfp_shtoh_round_to_nearest(vd, vd, shift, fpst);
3369 gen_helper_vfp_sltoh_round_to_nearest(vd, vd, shift, fpst);
3372 gen_helper_vfp_uhtoh_round_to_nearest(vd, vd, shift, fpst);
3375 gen_helper_vfp_ultoh_round_to_nearest(vd, vd, shift, fpst);
3378 gen_helper_vfp_toshh_round_to_zero(vd, vd, shift, fpst);
3381 gen_helper_vfp_toslh_round_to_zero(vd, vd, shift, fpst);
3384 gen_helper_vfp_touhh_round_to_zero(vd, vd, shift, fpst);
3387 gen_helper_vfp_toulh_round_to_zero(vd, vd, shift, fpst);
3390 g_assert_not_reached();
3393 vfp_store_reg32(vd, a->vd);
3394 tcg_temp_free_i32(vd);
3395 tcg_temp_free_i32(shift);
3396 tcg_temp_free_ptr(fpst);
3400 static bool trans_VCVT_fix_sp(DisasContext *s, arg_VCVT_fix_sp *a)
3406 if (!dc_isar_feature(aa32_fpsp_v3, s)) {
3410 if (!vfp_access_check(s)) {
3414 frac_bits = (a->opc & 1) ? (32 - a->imm) : (16 - a->imm);
3416 vd = tcg_temp_new_i32();
3417 vfp_load_reg32(vd, a->vd);
3419 fpst = fpstatus_ptr(FPST_FPCR);
3420 shift = tcg_const_i32(frac_bits);
3422 /* Switch on op:U:sx bits */
3425 gen_helper_vfp_shtos_round_to_nearest(vd, vd, shift, fpst);
3428 gen_helper_vfp_sltos_round_to_nearest(vd, vd, shift, fpst);
3431 gen_helper_vfp_uhtos_round_to_nearest(vd, vd, shift, fpst);
3434 gen_helper_vfp_ultos_round_to_nearest(vd, vd, shift, fpst);
3437 gen_helper_vfp_toshs_round_to_zero(vd, vd, shift, fpst);
3440 gen_helper_vfp_tosls_round_to_zero(vd, vd, shift, fpst);
3443 gen_helper_vfp_touhs_round_to_zero(vd, vd, shift, fpst);
3446 gen_helper_vfp_touls_round_to_zero(vd, vd, shift, fpst);
3449 g_assert_not_reached();
3452 vfp_store_reg32(vd, a->vd);
3453 tcg_temp_free_i32(vd);
3454 tcg_temp_free_i32(shift);
3455 tcg_temp_free_ptr(fpst);
3459 static bool trans_VCVT_fix_dp(DisasContext *s, arg_VCVT_fix_dp *a)
3466 if (!dc_isar_feature(aa32_fpdp_v3, s)) {
3470 /* UNDEF accesses to D16-D31 if they don't exist. */
3471 if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
3475 if (!vfp_access_check(s)) {
3479 frac_bits = (a->opc & 1) ? (32 - a->imm) : (16 - a->imm);
3481 vd = tcg_temp_new_i64();
3482 vfp_load_reg64(vd, a->vd);
3484 fpst = fpstatus_ptr(FPST_FPCR);
3485 shift = tcg_const_i32(frac_bits);
3487 /* Switch on op:U:sx bits */
3490 gen_helper_vfp_shtod_round_to_nearest(vd, vd, shift, fpst);
3493 gen_helper_vfp_sltod_round_to_nearest(vd, vd, shift, fpst);
3496 gen_helper_vfp_uhtod_round_to_nearest(vd, vd, shift, fpst);
3499 gen_helper_vfp_ultod_round_to_nearest(vd, vd, shift, fpst);
3502 gen_helper_vfp_toshd_round_to_zero(vd, vd, shift, fpst);
3505 gen_helper_vfp_tosld_round_to_zero(vd, vd, shift, fpst);
3508 gen_helper_vfp_touhd_round_to_zero(vd, vd, shift, fpst);
3511 gen_helper_vfp_tould_round_to_zero(vd, vd, shift, fpst);
3514 g_assert_not_reached();
3517 vfp_store_reg64(vd, a->vd);
3518 tcg_temp_free_i64(vd);
3519 tcg_temp_free_i32(shift);
3520 tcg_temp_free_ptr(fpst);
3524 static bool trans_VCVT_hp_int(DisasContext *s, arg_VCVT_sp_int *a)
3529 if (!dc_isar_feature(aa32_fp16_arith, s)) {
3533 if (!vfp_access_check(s)) {
3537 fpst = fpstatus_ptr(FPST_FPCR_F16);
3538 vm = tcg_temp_new_i32();
3539 vfp_load_reg32(vm, a->vm);
3543 gen_helper_vfp_tosizh(vm, vm, fpst);
3545 gen_helper_vfp_tosih(vm, vm, fpst);
3549 gen_helper_vfp_touizh(vm, vm, fpst);
3551 gen_helper_vfp_touih(vm, vm, fpst);
3554 vfp_store_reg32(vm, a->vd);
3555 tcg_temp_free_i32(vm);
3556 tcg_temp_free_ptr(fpst);
3560 static bool trans_VCVT_sp_int(DisasContext *s, arg_VCVT_sp_int *a)
3565 if (!dc_isar_feature(aa32_fpsp_v2, s)) {
3569 if (!vfp_access_check(s)) {
3573 fpst = fpstatus_ptr(FPST_FPCR);
3574 vm = tcg_temp_new_i32();
3575 vfp_load_reg32(vm, a->vm);
3579 gen_helper_vfp_tosizs(vm, vm, fpst);
3581 gen_helper_vfp_tosis(vm, vm, fpst);
3585 gen_helper_vfp_touizs(vm, vm, fpst);
3587 gen_helper_vfp_touis(vm, vm, fpst);
3590 vfp_store_reg32(vm, a->vd);
3591 tcg_temp_free_i32(vm);
3592 tcg_temp_free_ptr(fpst);
3596 static bool trans_VCVT_dp_int(DisasContext *s, arg_VCVT_dp_int *a)
3602 if (!dc_isar_feature(aa32_fpdp_v2, s)) {
3606 /* UNDEF accesses to D16-D31 if they don't exist. */
3607 if (!dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) {
3611 if (!vfp_access_check(s)) {
3615 fpst = fpstatus_ptr(FPST_FPCR);
3616 vm = tcg_temp_new_i64();
3617 vd = tcg_temp_new_i32();
3618 vfp_load_reg64(vm, a->vm);
3622 gen_helper_vfp_tosizd(vd, vm, fpst);
3624 gen_helper_vfp_tosid(vd, vm, fpst);
3628 gen_helper_vfp_touizd(vd, vm, fpst);
3630 gen_helper_vfp_touid(vd, vm, fpst);
3633 vfp_store_reg32(vd, a->vd);
3634 tcg_temp_free_i32(vd);
3635 tcg_temp_free_i64(vm);
3636 tcg_temp_free_ptr(fpst);
3641 * Decode VLLDM and VLSTM are nonstandard because:
3642 * * if there is no FPU then these insns must NOP in
3643 * Secure state and UNDEF in Nonsecure state
3644 * * if there is an FPU then these insns do not have
3645 * the usual behaviour that vfp_access_check() provides of
3646 * being controlled by CPACR/NSACR enable bits or the
3647 * lazy-stacking logic.
3649 static bool trans_VLLDM_VLSTM(DisasContext *s, arg_VLLDM_VLSTM *a)
3653 if (!arm_dc_feature(s, ARM_FEATURE_M) ||
3654 !arm_dc_feature(s, ARM_FEATURE_V8)) {
3658 * If not secure, UNDEF. We must emit code for this
3659 * rather than returning false so that this takes
3660 * precedence over the m-nocp.decode NOCP fallback.
3662 if (!s->v8m_secure) {
3663 unallocated_encoding(s);
3666 /* If no fpu, NOP. */
3667 if (!dc_isar_feature(aa32_vfp, s)) {
3671 fptr = load_reg(s, a->rn);
3673 gen_helper_v7m_vlldm(cpu_env, fptr);
3675 gen_helper_v7m_vlstm(cpu_env, fptr);
3677 tcg_temp_free_i32(fptr);
3679 /* End the TB, because we have updated FP control bits */
3680 s->base.is_jmp = DISAS_UPDATE_EXIT;
3684 static bool trans_VSCCLRM(DisasContext *s, arg_VSCCLRM *a)
3688 TCGv_i32 aspen, sfpa;
3690 if (!dc_isar_feature(aa32_m_sec_state, s)) {
3691 /* Before v8.1M, fall through in decode to NOCP check */
3695 /* Explicitly UNDEF because this takes precedence over NOCP */
3696 if (!arm_dc_feature(s, ARM_FEATURE_M_MAIN) || !s->v8m_secure) {
3697 unallocated_encoding(s);
3701 if (!dc_isar_feature(aa32_vfp_simd, s)) {
3702 /* NOP if we have neither FP nor MVE */
3707 * If FPCCR.ASPEN != 0 && CONTROL_S.SFPA == 0 then there is no
3708 * active floating point context so we must NOP (without doing
3709 * any lazy state preservation or the NOCP check).
3711 aspen = load_cpu_field(v7m.fpccr[M_REG_S]);
3712 sfpa = load_cpu_field(v7m.control[M_REG_S]);
3713 tcg_gen_andi_i32(aspen, aspen, R_V7M_FPCCR_ASPEN_MASK);
3714 tcg_gen_xori_i32(aspen, aspen, R_V7M_FPCCR_ASPEN_MASK);
3715 tcg_gen_andi_i32(sfpa, sfpa, R_V7M_CONTROL_SFPA_MASK);
3716 tcg_gen_or_i32(sfpa, sfpa, aspen);
3717 arm_gen_condlabel(s);
3718 tcg_gen_brcondi_i32(TCG_COND_EQ, sfpa, 0, s->condlabel);
3720 if (s->fp_excp_el != 0) {
3721 gen_exception_insn(s, s->pc_curr, EXCP_NOCP,
3722 syn_uncategorized(), s->fp_excp_el);
3726 topreg = a->vd + a->imm - 1;
3729 /* Convert to Sreg numbers if the insn specified in Dregs */
3731 topreg = topreg * 2 + 1;
3735 if (topreg > 63 || (topreg > 31 && !(topreg & 1))) {
3736 /* UNPREDICTABLE: we choose to undef */
3737 unallocated_encoding(s);
3741 /* Silently ignore requests to clear D16-D31 if they don't exist */
3742 if (topreg > 31 && !dc_isar_feature(aa32_simd_r32, s)) {
3746 if (!vfp_access_check(s)) {
3750 /* Zero the Sregs from btmreg to topreg inclusive. */
3751 zero = tcg_const_i64(0);
3753 write_neon_element64(zero, btmreg >> 1, 1, MO_32);
3756 for (; btmreg + 1 <= topreg; btmreg += 2) {
3757 write_neon_element64(zero, btmreg >> 1, 0, MO_64);
3759 if (btmreg == topreg) {
3760 write_neon_element64(zero, btmreg >> 1, 0, MO_32);
3763 assert(btmreg == topreg + 1);
3764 /* TODO: when MVE is implemented, zero VPR here */
3768 static bool trans_NOCP(DisasContext *s, arg_nocp *a)
3771 * Handle M-profile early check for disabled coprocessor:
3772 * all we need to do here is emit the NOCP exception if
3773 * the coprocessor is disabled. Otherwise we return false
3774 * and the real VFP/etc decode will handle the insn.
3776 assert(arm_dc_feature(s, ARM_FEATURE_M));
3781 if (arm_dc_feature(s, ARM_FEATURE_V8_1M) &&
3782 (a->cp == 8 || a->cp == 9 || a->cp == 14 || a->cp == 15)) {
3783 /* in v8.1M cp 8, 9, 14, 15 also are governed by the cp10 enable */
3788 gen_exception_insn(s, s->pc_curr, EXCP_NOCP,
3789 syn_uncategorized(), default_exception_el(s));
3793 if (s->fp_excp_el != 0) {
3794 gen_exception_insn(s, s->pc_curr, EXCP_NOCP,
3795 syn_uncategorized(), s->fp_excp_el);
3802 static bool trans_NOCP_8_1(DisasContext *s, arg_nocp *a)
3804 /* This range needs a coprocessor check for v8.1M and later only */
3805 if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
3808 return trans_NOCP(s, a);
3811 static bool trans_VINS(DisasContext *s, arg_VINS *a)
3815 if (!dc_isar_feature(aa32_fp16_arith, s)) {
3819 if (s->vec_len != 0 || s->vec_stride != 0) {
3823 if (!vfp_access_check(s)) {
3827 /* Insert low half of Vm into high half of Vd */
3828 rm = tcg_temp_new_i32();
3829 rd = tcg_temp_new_i32();
3830 vfp_load_reg32(rm, a->vm);
3831 vfp_load_reg32(rd, a->vd);
3832 tcg_gen_deposit_i32(rd, rd, rm, 16, 16);
3833 vfp_store_reg32(rd, a->vd);
3834 tcg_temp_free_i32(rm);
3835 tcg_temp_free_i32(rd);
3839 static bool trans_VMOVX(DisasContext *s, arg_VINS *a)
3843 if (!dc_isar_feature(aa32_fp16_arith, s)) {
3847 if (s->vec_len != 0 || s->vec_stride != 0) {
3851 if (!vfp_access_check(s)) {
3855 /* Set Vd to high half of Vm */
3856 rm = tcg_temp_new_i32();
3857 vfp_load_reg32(rm, a->vm);
3858 tcg_gen_shri_i32(rm, rm, 16);
3859 vfp_store_reg32(rm, a->vd);
3860 tcg_temp_free_i32(rm);