target/arm: Remove helper_double_saturate
[qemu/ar7.git] / target / arm / translate-vfp.inc.c
blob3e8ea80493b7294193f36eea1a65bb5f724f4889
1 /*
2 * ARM translation: AArch32 VFP instructions
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
7 * Copyright (c) 2019 Linaro, Ltd.
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
24 * This file is intended to be included from translate.c; it uses
25 * some macros and definitions provided by that file.
26 * It might be possible to convert it to a standalone .c file eventually.
29 /* Include the generated VFP decoder */
30 #include "decode-vfp.inc.c"
31 #include "decode-vfp-uncond.inc.c"
34 * The imm8 encodes the sign bit, enough bits to represent an exponent in
35 * the range 01....1xx to 10....0xx, and the most significant 4 bits of
36 * the mantissa; see VFPExpandImm() in the v8 ARM ARM.
38 uint64_t vfp_expand_imm(int size, uint8_t imm8)
40 uint64_t imm;
42 switch (size) {
43 case MO_64:
44 imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
45 (extract32(imm8, 6, 1) ? 0x3fc0 : 0x4000) |
46 extract32(imm8, 0, 6);
47 imm <<= 48;
48 break;
49 case MO_32:
50 imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
51 (extract32(imm8, 6, 1) ? 0x3e00 : 0x4000) |
52 (extract32(imm8, 0, 6) << 3);
53 imm <<= 16;
54 break;
55 case MO_16:
56 imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
57 (extract32(imm8, 6, 1) ? 0x3000 : 0x4000) |
58 (extract32(imm8, 0, 6) << 6);
59 break;
60 default:
61 g_assert_not_reached();
63 return imm;
67 * Return the offset of a 16-bit half of the specified VFP single-precision
68 * register. If top is true, returns the top 16 bits; otherwise the bottom
69 * 16 bits.
71 static inline long vfp_f16_offset(unsigned reg, bool top)
73 long offs = vfp_reg_offset(false, reg);
74 #ifdef HOST_WORDS_BIGENDIAN
75 if (!top) {
76 offs += 2;
78 #else
79 if (top) {
80 offs += 2;
82 #endif
83 return offs;
87 * Check that VFP access is enabled. If it is, do the necessary
88 * M-profile lazy-FP handling and then return true.
89 * If not, emit code to generate an appropriate exception and
90 * return false.
91 * The ignore_vfp_enabled argument specifies that we should ignore
92 * whether VFP is enabled via FPEXC[EN]: this should be true for FMXR/FMRX
93 * accesses to FPSID, FPEXC, MVFR0, MVFR1, MVFR2, and false for all other insns.
95 static bool full_vfp_access_check(DisasContext *s, bool ignore_vfp_enabled)
97 if (s->fp_excp_el) {
98 if (arm_dc_feature(s, ARM_FEATURE_M)) {
99 gen_exception_insn(s, s->pc_curr, EXCP_NOCP, syn_uncategorized(),
100 s->fp_excp_el);
101 } else {
102 gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
103 syn_fp_access_trap(1, 0xe, false),
104 s->fp_excp_el);
106 return false;
109 if (!s->vfp_enabled && !ignore_vfp_enabled) {
110 assert(!arm_dc_feature(s, ARM_FEATURE_M));
111 unallocated_encoding(s);
112 return false;
115 if (arm_dc_feature(s, ARM_FEATURE_M)) {
116 /* Handle M-profile lazy FP state mechanics */
118 /* Trigger lazy-state preservation if necessary */
119 if (s->v7m_lspact) {
121 * Lazy state saving affects external memory and also the NVIC,
122 * so we must mark it as an IO operation for icount.
124 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
125 gen_io_start();
127 gen_helper_v7m_preserve_fp_state(cpu_env);
128 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
129 gen_io_end();
132 * If the preserve_fp_state helper doesn't throw an exception
133 * then it will clear LSPACT; we don't need to repeat this for
134 * any further FP insns in this TB.
136 s->v7m_lspact = false;
139 /* Update ownership of FP context: set FPCCR.S to match current state */
140 if (s->v8m_fpccr_s_wrong) {
141 TCGv_i32 tmp;
143 tmp = load_cpu_field(v7m.fpccr[M_REG_S]);
144 if (s->v8m_secure) {
145 tcg_gen_ori_i32(tmp, tmp, R_V7M_FPCCR_S_MASK);
146 } else {
147 tcg_gen_andi_i32(tmp, tmp, ~R_V7M_FPCCR_S_MASK);
149 store_cpu_field(tmp, v7m.fpccr[M_REG_S]);
150 /* Don't need to do this for any further FP insns in this TB */
151 s->v8m_fpccr_s_wrong = false;
154 if (s->v7m_new_fp_ctxt_needed) {
156 * Create new FP context by updating CONTROL.FPCA, CONTROL.SFPA
157 * and the FPSCR.
159 TCGv_i32 control, fpscr;
160 uint32_t bits = R_V7M_CONTROL_FPCA_MASK;
162 fpscr = load_cpu_field(v7m.fpdscr[s->v8m_secure]);
163 gen_helper_vfp_set_fpscr(cpu_env, fpscr);
164 tcg_temp_free_i32(fpscr);
166 * We don't need to arrange to end the TB, because the only
167 * parts of FPSCR which we cache in the TB flags are the VECLEN
168 * and VECSTRIDE, and those don't exist for M-profile.
171 if (s->v8m_secure) {
172 bits |= R_V7M_CONTROL_SFPA_MASK;
174 control = load_cpu_field(v7m.control[M_REG_S]);
175 tcg_gen_ori_i32(control, control, bits);
176 store_cpu_field(control, v7m.control[M_REG_S]);
177 /* Don't need to do this for any further FP insns in this TB */
178 s->v7m_new_fp_ctxt_needed = false;
182 return true;
186 * The most usual kind of VFP access check, for everything except
187 * FMXR/FMRX to the always-available special registers.
189 static bool vfp_access_check(DisasContext *s)
191 return full_vfp_access_check(s, false);
194 static bool trans_VSEL(DisasContext *s, arg_VSEL *a)
196 uint32_t rd, rn, rm;
197 bool dp = a->dp;
199 if (!dc_isar_feature(aa32_vsel, s)) {
200 return false;
203 /* UNDEF accesses to D16-D31 if they don't exist */
204 if (dp && !dc_isar_feature(aa32_fp_d32, s) &&
205 ((a->vm | a->vn | a->vd) & 0x10)) {
206 return false;
209 if (dp && !dc_isar_feature(aa32_fpdp, s)) {
210 return false;
213 rd = a->vd;
214 rn = a->vn;
215 rm = a->vm;
217 if (!vfp_access_check(s)) {
218 return true;
221 if (dp) {
222 TCGv_i64 frn, frm, dest;
223 TCGv_i64 tmp, zero, zf, nf, vf;
225 zero = tcg_const_i64(0);
227 frn = tcg_temp_new_i64();
228 frm = tcg_temp_new_i64();
229 dest = tcg_temp_new_i64();
231 zf = tcg_temp_new_i64();
232 nf = tcg_temp_new_i64();
233 vf = tcg_temp_new_i64();
235 tcg_gen_extu_i32_i64(zf, cpu_ZF);
236 tcg_gen_ext_i32_i64(nf, cpu_NF);
237 tcg_gen_ext_i32_i64(vf, cpu_VF);
239 neon_load_reg64(frn, rn);
240 neon_load_reg64(frm, rm);
241 switch (a->cc) {
242 case 0: /* eq: Z */
243 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
244 frn, frm);
245 break;
246 case 1: /* vs: V */
247 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
248 frn, frm);
249 break;
250 case 2: /* ge: N == V -> N ^ V == 0 */
251 tmp = tcg_temp_new_i64();
252 tcg_gen_xor_i64(tmp, vf, nf);
253 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
254 frn, frm);
255 tcg_temp_free_i64(tmp);
256 break;
257 case 3: /* gt: !Z && N == V */
258 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
259 frn, frm);
260 tmp = tcg_temp_new_i64();
261 tcg_gen_xor_i64(tmp, vf, nf);
262 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
263 dest, frm);
264 tcg_temp_free_i64(tmp);
265 break;
267 neon_store_reg64(dest, rd);
268 tcg_temp_free_i64(frn);
269 tcg_temp_free_i64(frm);
270 tcg_temp_free_i64(dest);
272 tcg_temp_free_i64(zf);
273 tcg_temp_free_i64(nf);
274 tcg_temp_free_i64(vf);
276 tcg_temp_free_i64(zero);
277 } else {
278 TCGv_i32 frn, frm, dest;
279 TCGv_i32 tmp, zero;
281 zero = tcg_const_i32(0);
283 frn = tcg_temp_new_i32();
284 frm = tcg_temp_new_i32();
285 dest = tcg_temp_new_i32();
286 neon_load_reg32(frn, rn);
287 neon_load_reg32(frm, rm);
288 switch (a->cc) {
289 case 0: /* eq: Z */
290 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
291 frn, frm);
292 break;
293 case 1: /* vs: V */
294 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
295 frn, frm);
296 break;
297 case 2: /* ge: N == V -> N ^ V == 0 */
298 tmp = tcg_temp_new_i32();
299 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
300 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
301 frn, frm);
302 tcg_temp_free_i32(tmp);
303 break;
304 case 3: /* gt: !Z && N == V */
305 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
306 frn, frm);
307 tmp = tcg_temp_new_i32();
308 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
309 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
310 dest, frm);
311 tcg_temp_free_i32(tmp);
312 break;
314 neon_store_reg32(dest, rd);
315 tcg_temp_free_i32(frn);
316 tcg_temp_free_i32(frm);
317 tcg_temp_free_i32(dest);
319 tcg_temp_free_i32(zero);
322 return true;
325 static bool trans_VMINMAXNM(DisasContext *s, arg_VMINMAXNM *a)
327 uint32_t rd, rn, rm;
328 bool dp = a->dp;
329 bool vmin = a->op;
330 TCGv_ptr fpst;
332 if (!dc_isar_feature(aa32_vminmaxnm, s)) {
333 return false;
336 /* UNDEF accesses to D16-D31 if they don't exist */
337 if (dp && !dc_isar_feature(aa32_fp_d32, s) &&
338 ((a->vm | a->vn | a->vd) & 0x10)) {
339 return false;
342 if (dp && !dc_isar_feature(aa32_fpdp, s)) {
343 return false;
346 rd = a->vd;
347 rn = a->vn;
348 rm = a->vm;
350 if (!vfp_access_check(s)) {
351 return true;
354 fpst = get_fpstatus_ptr(0);
356 if (dp) {
357 TCGv_i64 frn, frm, dest;
359 frn = tcg_temp_new_i64();
360 frm = tcg_temp_new_i64();
361 dest = tcg_temp_new_i64();
363 neon_load_reg64(frn, rn);
364 neon_load_reg64(frm, rm);
365 if (vmin) {
366 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
367 } else {
368 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
370 neon_store_reg64(dest, rd);
371 tcg_temp_free_i64(frn);
372 tcg_temp_free_i64(frm);
373 tcg_temp_free_i64(dest);
374 } else {
375 TCGv_i32 frn, frm, dest;
377 frn = tcg_temp_new_i32();
378 frm = tcg_temp_new_i32();
379 dest = tcg_temp_new_i32();
381 neon_load_reg32(frn, rn);
382 neon_load_reg32(frm, rm);
383 if (vmin) {
384 gen_helper_vfp_minnums(dest, frn, frm, fpst);
385 } else {
386 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
388 neon_store_reg32(dest, rd);
389 tcg_temp_free_i32(frn);
390 tcg_temp_free_i32(frm);
391 tcg_temp_free_i32(dest);
394 tcg_temp_free_ptr(fpst);
395 return true;
399 * Table for converting the most common AArch32 encoding of
400 * rounding mode to arm_fprounding order (which matches the
401 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
403 static const uint8_t fp_decode_rm[] = {
404 FPROUNDING_TIEAWAY,
405 FPROUNDING_TIEEVEN,
406 FPROUNDING_POSINF,
407 FPROUNDING_NEGINF,
410 static bool trans_VRINT(DisasContext *s, arg_VRINT *a)
412 uint32_t rd, rm;
413 bool dp = a->dp;
414 TCGv_ptr fpst;
415 TCGv_i32 tcg_rmode;
416 int rounding = fp_decode_rm[a->rm];
418 if (!dc_isar_feature(aa32_vrint, s)) {
419 return false;
422 /* UNDEF accesses to D16-D31 if they don't exist */
423 if (dp && !dc_isar_feature(aa32_fp_d32, s) &&
424 ((a->vm | a->vd) & 0x10)) {
425 return false;
428 if (dp && !dc_isar_feature(aa32_fpdp, s)) {
429 return false;
432 rd = a->vd;
433 rm = a->vm;
435 if (!vfp_access_check(s)) {
436 return true;
439 fpst = get_fpstatus_ptr(0);
441 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
442 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
444 if (dp) {
445 TCGv_i64 tcg_op;
446 TCGv_i64 tcg_res;
447 tcg_op = tcg_temp_new_i64();
448 tcg_res = tcg_temp_new_i64();
449 neon_load_reg64(tcg_op, rm);
450 gen_helper_rintd(tcg_res, tcg_op, fpst);
451 neon_store_reg64(tcg_res, rd);
452 tcg_temp_free_i64(tcg_op);
453 tcg_temp_free_i64(tcg_res);
454 } else {
455 TCGv_i32 tcg_op;
456 TCGv_i32 tcg_res;
457 tcg_op = tcg_temp_new_i32();
458 tcg_res = tcg_temp_new_i32();
459 neon_load_reg32(tcg_op, rm);
460 gen_helper_rints(tcg_res, tcg_op, fpst);
461 neon_store_reg32(tcg_res, rd);
462 tcg_temp_free_i32(tcg_op);
463 tcg_temp_free_i32(tcg_res);
466 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
467 tcg_temp_free_i32(tcg_rmode);
469 tcg_temp_free_ptr(fpst);
470 return true;
473 static bool trans_VCVT(DisasContext *s, arg_VCVT *a)
475 uint32_t rd, rm;
476 bool dp = a->dp;
477 TCGv_ptr fpst;
478 TCGv_i32 tcg_rmode, tcg_shift;
479 int rounding = fp_decode_rm[a->rm];
480 bool is_signed = a->op;
482 if (!dc_isar_feature(aa32_vcvt_dr, s)) {
483 return false;
486 /* UNDEF accesses to D16-D31 if they don't exist */
487 if (dp && !dc_isar_feature(aa32_fp_d32, s) && (a->vm & 0x10)) {
488 return false;
491 if (dp && !dc_isar_feature(aa32_fpdp, s)) {
492 return false;
495 rd = a->vd;
496 rm = a->vm;
498 if (!vfp_access_check(s)) {
499 return true;
502 fpst = get_fpstatus_ptr(0);
504 tcg_shift = tcg_const_i32(0);
506 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
507 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
509 if (dp) {
510 TCGv_i64 tcg_double, tcg_res;
511 TCGv_i32 tcg_tmp;
512 tcg_double = tcg_temp_new_i64();
513 tcg_res = tcg_temp_new_i64();
514 tcg_tmp = tcg_temp_new_i32();
515 neon_load_reg64(tcg_double, rm);
516 if (is_signed) {
517 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
518 } else {
519 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
521 tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
522 neon_store_reg32(tcg_tmp, rd);
523 tcg_temp_free_i32(tcg_tmp);
524 tcg_temp_free_i64(tcg_res);
525 tcg_temp_free_i64(tcg_double);
526 } else {
527 TCGv_i32 tcg_single, tcg_res;
528 tcg_single = tcg_temp_new_i32();
529 tcg_res = tcg_temp_new_i32();
530 neon_load_reg32(tcg_single, rm);
531 if (is_signed) {
532 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
533 } else {
534 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
536 neon_store_reg32(tcg_res, rd);
537 tcg_temp_free_i32(tcg_res);
538 tcg_temp_free_i32(tcg_single);
541 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
542 tcg_temp_free_i32(tcg_rmode);
544 tcg_temp_free_i32(tcg_shift);
546 tcg_temp_free_ptr(fpst);
548 return true;
551 static bool trans_VMOV_to_gp(DisasContext *s, arg_VMOV_to_gp *a)
553 /* VMOV scalar to general purpose register */
554 TCGv_i32 tmp;
555 int pass;
556 uint32_t offset;
558 /* UNDEF accesses to D16-D31 if they don't exist */
559 if (!dc_isar_feature(aa32_fp_d32, s) && (a->vn & 0x10)) {
560 return false;
563 offset = a->index << a->size;
564 pass = extract32(offset, 2, 1);
565 offset = extract32(offset, 0, 2) * 8;
567 if (a->size != 2 && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
568 return false;
571 if (!vfp_access_check(s)) {
572 return true;
575 tmp = neon_load_reg(a->vn, pass);
576 switch (a->size) {
577 case 0:
578 if (offset) {
579 tcg_gen_shri_i32(tmp, tmp, offset);
581 if (a->u) {
582 gen_uxtb(tmp);
583 } else {
584 gen_sxtb(tmp);
586 break;
587 case 1:
588 if (a->u) {
589 if (offset) {
590 tcg_gen_shri_i32(tmp, tmp, 16);
591 } else {
592 gen_uxth(tmp);
594 } else {
595 if (offset) {
596 tcg_gen_sari_i32(tmp, tmp, 16);
597 } else {
598 gen_sxth(tmp);
601 break;
602 case 2:
603 break;
605 store_reg(s, a->rt, tmp);
607 return true;
610 static bool trans_VMOV_from_gp(DisasContext *s, arg_VMOV_from_gp *a)
612 /* VMOV general purpose register to scalar */
613 TCGv_i32 tmp, tmp2;
614 int pass;
615 uint32_t offset;
617 /* UNDEF accesses to D16-D31 if they don't exist */
618 if (!dc_isar_feature(aa32_fp_d32, s) && (a->vn & 0x10)) {
619 return false;
622 offset = a->index << a->size;
623 pass = extract32(offset, 2, 1);
624 offset = extract32(offset, 0, 2) * 8;
626 if (a->size != 2 && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
627 return false;
630 if (!vfp_access_check(s)) {
631 return true;
634 tmp = load_reg(s, a->rt);
635 switch (a->size) {
636 case 0:
637 tmp2 = neon_load_reg(a->vn, pass);
638 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
639 tcg_temp_free_i32(tmp2);
640 break;
641 case 1:
642 tmp2 = neon_load_reg(a->vn, pass);
643 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
644 tcg_temp_free_i32(tmp2);
645 break;
646 case 2:
647 break;
649 neon_store_reg(a->vn, pass, tmp);
651 return true;
654 static bool trans_VDUP(DisasContext *s, arg_VDUP *a)
656 /* VDUP (general purpose register) */
657 TCGv_i32 tmp;
658 int size, vec_size;
660 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
661 return false;
664 /* UNDEF accesses to D16-D31 if they don't exist */
665 if (!dc_isar_feature(aa32_fp_d32, s) && (a->vn & 0x10)) {
666 return false;
669 if (a->b && a->e) {
670 return false;
673 if (a->q && (a->vn & 1)) {
674 return false;
677 vec_size = a->q ? 16 : 8;
678 if (a->b) {
679 size = 0;
680 } else if (a->e) {
681 size = 1;
682 } else {
683 size = 2;
686 if (!vfp_access_check(s)) {
687 return true;
690 tmp = load_reg(s, a->rt);
691 tcg_gen_gvec_dup_i32(size, neon_reg_offset(a->vn, 0),
692 vec_size, vec_size, tmp);
693 tcg_temp_free_i32(tmp);
695 return true;
698 static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a)
700 TCGv_i32 tmp;
701 bool ignore_vfp_enabled = false;
703 if (arm_dc_feature(s, ARM_FEATURE_M)) {
705 * The only M-profile VFP vmrs/vmsr sysreg is FPSCR.
706 * Writes to R15 are UNPREDICTABLE; we choose to undef.
708 if (a->rt == 15 || a->reg != ARM_VFP_FPSCR) {
709 return false;
713 switch (a->reg) {
714 case ARM_VFP_FPSID:
716 * VFPv2 allows access to FPSID from userspace; VFPv3 restricts
717 * all ID registers to privileged access only.
719 if (IS_USER(s) && arm_dc_feature(s, ARM_FEATURE_VFP3)) {
720 return false;
722 ignore_vfp_enabled = true;
723 break;
724 case ARM_VFP_MVFR0:
725 case ARM_VFP_MVFR1:
726 if (IS_USER(s) || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
727 return false;
729 ignore_vfp_enabled = true;
730 break;
731 case ARM_VFP_MVFR2:
732 if (IS_USER(s) || !arm_dc_feature(s, ARM_FEATURE_V8)) {
733 return false;
735 ignore_vfp_enabled = true;
736 break;
737 case ARM_VFP_FPSCR:
738 break;
739 case ARM_VFP_FPEXC:
740 if (IS_USER(s)) {
741 return false;
743 ignore_vfp_enabled = true;
744 break;
745 case ARM_VFP_FPINST:
746 case ARM_VFP_FPINST2:
747 /* Not present in VFPv3 */
748 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_VFP3)) {
749 return false;
751 break;
752 default:
753 return false;
756 if (!full_vfp_access_check(s, ignore_vfp_enabled)) {
757 return true;
760 if (a->l) {
761 /* VMRS, move VFP special register to gp register */
762 switch (a->reg) {
763 case ARM_VFP_FPSID:
764 case ARM_VFP_FPEXC:
765 case ARM_VFP_FPINST:
766 case ARM_VFP_FPINST2:
767 case ARM_VFP_MVFR0:
768 case ARM_VFP_MVFR1:
769 case ARM_VFP_MVFR2:
770 tmp = load_cpu_field(vfp.xregs[a->reg]);
771 break;
772 case ARM_VFP_FPSCR:
773 if (a->rt == 15) {
774 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
775 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
776 } else {
777 tmp = tcg_temp_new_i32();
778 gen_helper_vfp_get_fpscr(tmp, cpu_env);
780 break;
781 default:
782 g_assert_not_reached();
785 if (a->rt == 15) {
786 /* Set the 4 flag bits in the CPSR. */
787 gen_set_nzcv(tmp);
788 tcg_temp_free_i32(tmp);
789 } else {
790 store_reg(s, a->rt, tmp);
792 } else {
793 /* VMSR, move gp register to VFP special register */
794 switch (a->reg) {
795 case ARM_VFP_FPSID:
796 case ARM_VFP_MVFR0:
797 case ARM_VFP_MVFR1:
798 case ARM_VFP_MVFR2:
799 /* Writes are ignored. */
800 break;
801 case ARM_VFP_FPSCR:
802 tmp = load_reg(s, a->rt);
803 gen_helper_vfp_set_fpscr(cpu_env, tmp);
804 tcg_temp_free_i32(tmp);
805 gen_lookup_tb(s);
806 break;
807 case ARM_VFP_FPEXC:
809 * TODO: VFP subarchitecture support.
810 * For now, keep the EN bit only
812 tmp = load_reg(s, a->rt);
813 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
814 store_cpu_field(tmp, vfp.xregs[a->reg]);
815 gen_lookup_tb(s);
816 break;
817 case ARM_VFP_FPINST:
818 case ARM_VFP_FPINST2:
819 tmp = load_reg(s, a->rt);
820 store_cpu_field(tmp, vfp.xregs[a->reg]);
821 break;
822 default:
823 g_assert_not_reached();
827 return true;
830 static bool trans_VMOV_single(DisasContext *s, arg_VMOV_single *a)
832 TCGv_i32 tmp;
834 if (!vfp_access_check(s)) {
835 return true;
838 if (a->l) {
839 /* VFP to general purpose register */
840 tmp = tcg_temp_new_i32();
841 neon_load_reg32(tmp, a->vn);
842 if (a->rt == 15) {
843 /* Set the 4 flag bits in the CPSR. */
844 gen_set_nzcv(tmp);
845 tcg_temp_free_i32(tmp);
846 } else {
847 store_reg(s, a->rt, tmp);
849 } else {
850 /* general purpose register to VFP */
851 tmp = load_reg(s, a->rt);
852 neon_store_reg32(tmp, a->vn);
853 tcg_temp_free_i32(tmp);
856 return true;
859 static bool trans_VMOV_64_sp(DisasContext *s, arg_VMOV_64_sp *a)
861 TCGv_i32 tmp;
864 * VMOV between two general-purpose registers and two single precision
865 * floating point registers
867 if (!vfp_access_check(s)) {
868 return true;
871 if (a->op) {
872 /* fpreg to gpreg */
873 tmp = tcg_temp_new_i32();
874 neon_load_reg32(tmp, a->vm);
875 store_reg(s, a->rt, tmp);
876 tmp = tcg_temp_new_i32();
877 neon_load_reg32(tmp, a->vm + 1);
878 store_reg(s, a->rt2, tmp);
879 } else {
880 /* gpreg to fpreg */
881 tmp = load_reg(s, a->rt);
882 neon_store_reg32(tmp, a->vm);
883 tmp = load_reg(s, a->rt2);
884 neon_store_reg32(tmp, a->vm + 1);
887 return true;
890 static bool trans_VMOV_64_dp(DisasContext *s, arg_VMOV_64_dp *a)
892 TCGv_i32 tmp;
895 * VMOV between two general-purpose registers and one double precision
896 * floating point register
899 /* UNDEF accesses to D16-D31 if they don't exist */
900 if (!dc_isar_feature(aa32_fp_d32, s) && (a->vm & 0x10)) {
901 return false;
904 if (!vfp_access_check(s)) {
905 return true;
908 if (a->op) {
909 /* fpreg to gpreg */
910 tmp = tcg_temp_new_i32();
911 neon_load_reg32(tmp, a->vm * 2);
912 store_reg(s, a->rt, tmp);
913 tmp = tcg_temp_new_i32();
914 neon_load_reg32(tmp, a->vm * 2 + 1);
915 store_reg(s, a->rt2, tmp);
916 } else {
917 /* gpreg to fpreg */
918 tmp = load_reg(s, a->rt);
919 neon_store_reg32(tmp, a->vm * 2);
920 tcg_temp_free_i32(tmp);
921 tmp = load_reg(s, a->rt2);
922 neon_store_reg32(tmp, a->vm * 2 + 1);
923 tcg_temp_free_i32(tmp);
926 return true;
929 static bool trans_VLDR_VSTR_sp(DisasContext *s, arg_VLDR_VSTR_sp *a)
931 uint32_t offset;
932 TCGv_i32 addr, tmp;
934 if (!vfp_access_check(s)) {
935 return true;
938 offset = a->imm << 2;
939 if (!a->u) {
940 offset = -offset;
943 /* For thumb, use of PC is UNPREDICTABLE. */
944 addr = add_reg_for_lit(s, a->rn, offset);
945 tmp = tcg_temp_new_i32();
946 if (a->l) {
947 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
948 neon_store_reg32(tmp, a->vd);
949 } else {
950 neon_load_reg32(tmp, a->vd);
951 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
953 tcg_temp_free_i32(tmp);
954 tcg_temp_free_i32(addr);
956 return true;
959 static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_dp *a)
961 uint32_t offset;
962 TCGv_i32 addr;
963 TCGv_i64 tmp;
965 /* UNDEF accesses to D16-D31 if they don't exist */
966 if (!dc_isar_feature(aa32_fp_d32, s) && (a->vd & 0x10)) {
967 return false;
970 if (!vfp_access_check(s)) {
971 return true;
974 offset = a->imm << 2;
975 if (!a->u) {
976 offset = -offset;
979 /* For thumb, use of PC is UNPREDICTABLE. */
980 addr = add_reg_for_lit(s, a->rn, offset);
981 tmp = tcg_temp_new_i64();
982 if (a->l) {
983 gen_aa32_ld64(s, tmp, addr, get_mem_index(s));
984 neon_store_reg64(tmp, a->vd);
985 } else {
986 neon_load_reg64(tmp, a->vd);
987 gen_aa32_st64(s, tmp, addr, get_mem_index(s));
989 tcg_temp_free_i64(tmp);
990 tcg_temp_free_i32(addr);
992 return true;
995 static bool trans_VLDM_VSTM_sp(DisasContext *s, arg_VLDM_VSTM_sp *a)
997 uint32_t offset;
998 TCGv_i32 addr, tmp;
999 int i, n;
1001 n = a->imm;
1003 if (n == 0 || (a->vd + n) > 32) {
1005 * UNPREDICTABLE cases for bad immediates: we choose to
1006 * UNDEF to avoid generating huge numbers of TCG ops
1008 return false;
1010 if (a->rn == 15 && a->w) {
1011 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
1012 return false;
1015 if (!vfp_access_check(s)) {
1016 return true;
1019 /* For thumb, use of PC is UNPREDICTABLE. */
1020 addr = add_reg_for_lit(s, a->rn, 0);
1021 if (a->p) {
1022 /* pre-decrement */
1023 tcg_gen_addi_i32(addr, addr, -(a->imm << 2));
1026 if (s->v8m_stackcheck && a->rn == 13 && a->w) {
1028 * Here 'addr' is the lowest address we will store to,
1029 * and is either the old SP (if post-increment) or
1030 * the new SP (if pre-decrement). For post-increment
1031 * where the old value is below the limit and the new
1032 * value is above, it is UNKNOWN whether the limit check
1033 * triggers; we choose to trigger.
1035 gen_helper_v8m_stackcheck(cpu_env, addr);
1038 offset = 4;
1039 tmp = tcg_temp_new_i32();
1040 for (i = 0; i < n; i++) {
1041 if (a->l) {
1042 /* load */
1043 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
1044 neon_store_reg32(tmp, a->vd + i);
1045 } else {
1046 /* store */
1047 neon_load_reg32(tmp, a->vd + i);
1048 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
1050 tcg_gen_addi_i32(addr, addr, offset);
1052 tcg_temp_free_i32(tmp);
1053 if (a->w) {
1054 /* writeback */
1055 if (a->p) {
1056 offset = -offset * n;
1057 tcg_gen_addi_i32(addr, addr, offset);
1059 store_reg(s, a->rn, addr);
1060 } else {
1061 tcg_temp_free_i32(addr);
1064 return true;
1067 static bool trans_VLDM_VSTM_dp(DisasContext *s, arg_VLDM_VSTM_dp *a)
1069 uint32_t offset;
1070 TCGv_i32 addr;
1071 TCGv_i64 tmp;
1072 int i, n;
1074 n = a->imm >> 1;
1076 if (n == 0 || (a->vd + n) > 32 || n > 16) {
1078 * UNPREDICTABLE cases for bad immediates: we choose to
1079 * UNDEF to avoid generating huge numbers of TCG ops
1081 return false;
1083 if (a->rn == 15 && a->w) {
1084 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
1085 return false;
1088 /* UNDEF accesses to D16-D31 if they don't exist */
1089 if (!dc_isar_feature(aa32_fp_d32, s) && (a->vd + n) > 16) {
1090 return false;
1093 if (!vfp_access_check(s)) {
1094 return true;
1097 /* For thumb, use of PC is UNPREDICTABLE. */
1098 addr = add_reg_for_lit(s, a->rn, 0);
1099 if (a->p) {
1100 /* pre-decrement */
1101 tcg_gen_addi_i32(addr, addr, -(a->imm << 2));
1104 if (s->v8m_stackcheck && a->rn == 13 && a->w) {
1106 * Here 'addr' is the lowest address we will store to,
1107 * and is either the old SP (if post-increment) or
1108 * the new SP (if pre-decrement). For post-increment
1109 * where the old value is below the limit and the new
1110 * value is above, it is UNKNOWN whether the limit check
1111 * triggers; we choose to trigger.
1113 gen_helper_v8m_stackcheck(cpu_env, addr);
1116 offset = 8;
1117 tmp = tcg_temp_new_i64();
1118 for (i = 0; i < n; i++) {
1119 if (a->l) {
1120 /* load */
1121 gen_aa32_ld64(s, tmp, addr, get_mem_index(s));
1122 neon_store_reg64(tmp, a->vd + i);
1123 } else {
1124 /* store */
1125 neon_load_reg64(tmp, a->vd + i);
1126 gen_aa32_st64(s, tmp, addr, get_mem_index(s));
1128 tcg_gen_addi_i32(addr, addr, offset);
1130 tcg_temp_free_i64(tmp);
1131 if (a->w) {
1132 /* writeback */
1133 if (a->p) {
1134 offset = -offset * n;
1135 } else if (a->imm & 1) {
1136 offset = 4;
1137 } else {
1138 offset = 0;
1141 if (offset != 0) {
1142 tcg_gen_addi_i32(addr, addr, offset);
1144 store_reg(s, a->rn, addr);
1145 } else {
1146 tcg_temp_free_i32(addr);
1149 return true;
1153 * Types for callbacks for do_vfp_3op_sp() and do_vfp_3op_dp().
1154 * The callback should emit code to write a value to vd. If
1155 * do_vfp_3op_{sp,dp}() was passed reads_vd then the TCGv vd
1156 * will contain the old value of the relevant VFP register;
1157 * otherwise it must be written to only.
1159 typedef void VFPGen3OpSPFn(TCGv_i32 vd,
1160 TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst);
1161 typedef void VFPGen3OpDPFn(TCGv_i64 vd,
1162 TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst);
1165 * Types for callbacks for do_vfp_2op_sp() and do_vfp_2op_dp().
1166 * The callback should emit code to write a value to vd (which
1167 * should be written to only).
1169 typedef void VFPGen2OpSPFn(TCGv_i32 vd, TCGv_i32 vm);
1170 typedef void VFPGen2OpDPFn(TCGv_i64 vd, TCGv_i64 vm);
1173 * Return true if the specified S reg is in a scalar bank
1174 * (ie if it is s0..s7)
1176 static inline bool vfp_sreg_is_scalar(int reg)
1178 return (reg & 0x18) == 0;
1182 * Return true if the specified D reg is in a scalar bank
1183 * (ie if it is d0..d3 or d16..d19)
1185 static inline bool vfp_dreg_is_scalar(int reg)
1187 return (reg & 0xc) == 0;
1191 * Advance the S reg number forwards by delta within its bank
1192 * (ie increment the low 3 bits but leave the rest the same)
1194 static inline int vfp_advance_sreg(int reg, int delta)
1196 return ((reg + delta) & 0x7) | (reg & ~0x7);
1200 * Advance the D reg number forwards by delta within its bank
1201 * (ie increment the low 2 bits but leave the rest the same)
1203 static inline int vfp_advance_dreg(int reg, int delta)
1205 return ((reg + delta) & 0x3) | (reg & ~0x3);
1209 * Perform a 3-operand VFP data processing instruction. fn is the
1210 * callback to do the actual operation; this function deals with the
1211 * code to handle looping around for VFP vector processing.
1213 static bool do_vfp_3op_sp(DisasContext *s, VFPGen3OpSPFn *fn,
1214 int vd, int vn, int vm, bool reads_vd)
1216 uint32_t delta_m = 0;
1217 uint32_t delta_d = 0;
1218 int veclen = s->vec_len;
1219 TCGv_i32 f0, f1, fd;
1220 TCGv_ptr fpst;
1222 if (!dc_isar_feature(aa32_fpshvec, s) &&
1223 (veclen != 0 || s->vec_stride != 0)) {
1224 return false;
1227 if (!vfp_access_check(s)) {
1228 return true;
1231 if (veclen > 0) {
1232 /* Figure out what type of vector operation this is. */
1233 if (vfp_sreg_is_scalar(vd)) {
1234 /* scalar */
1235 veclen = 0;
1236 } else {
1237 delta_d = s->vec_stride + 1;
1239 if (vfp_sreg_is_scalar(vm)) {
1240 /* mixed scalar/vector */
1241 delta_m = 0;
1242 } else {
1243 /* vector */
1244 delta_m = delta_d;
1249 f0 = tcg_temp_new_i32();
1250 f1 = tcg_temp_new_i32();
1251 fd = tcg_temp_new_i32();
1252 fpst = get_fpstatus_ptr(0);
1254 neon_load_reg32(f0, vn);
1255 neon_load_reg32(f1, vm);
1257 for (;;) {
1258 if (reads_vd) {
1259 neon_load_reg32(fd, vd);
1261 fn(fd, f0, f1, fpst);
1262 neon_store_reg32(fd, vd);
1264 if (veclen == 0) {
1265 break;
1268 /* Set up the operands for the next iteration */
1269 veclen--;
1270 vd = vfp_advance_sreg(vd, delta_d);
1271 vn = vfp_advance_sreg(vn, delta_d);
1272 neon_load_reg32(f0, vn);
1273 if (delta_m) {
1274 vm = vfp_advance_sreg(vm, delta_m);
1275 neon_load_reg32(f1, vm);
1279 tcg_temp_free_i32(f0);
1280 tcg_temp_free_i32(f1);
1281 tcg_temp_free_i32(fd);
1282 tcg_temp_free_ptr(fpst);
1284 return true;
1287 static bool do_vfp_3op_dp(DisasContext *s, VFPGen3OpDPFn *fn,
1288 int vd, int vn, int vm, bool reads_vd)
1290 uint32_t delta_m = 0;
1291 uint32_t delta_d = 0;
1292 int veclen = s->vec_len;
1293 TCGv_i64 f0, f1, fd;
1294 TCGv_ptr fpst;
1296 /* UNDEF accesses to D16-D31 if they don't exist */
1297 if (!dc_isar_feature(aa32_fp_d32, s) && ((vd | vn | vm) & 0x10)) {
1298 return false;
1301 if (!dc_isar_feature(aa32_fpdp, s)) {
1302 return false;
1305 if (!dc_isar_feature(aa32_fpshvec, s) &&
1306 (veclen != 0 || s->vec_stride != 0)) {
1307 return false;
1310 if (!vfp_access_check(s)) {
1311 return true;
1314 if (veclen > 0) {
1315 /* Figure out what type of vector operation this is. */
1316 if (vfp_dreg_is_scalar(vd)) {
1317 /* scalar */
1318 veclen = 0;
1319 } else {
1320 delta_d = (s->vec_stride >> 1) + 1;
1322 if (vfp_dreg_is_scalar(vm)) {
1323 /* mixed scalar/vector */
1324 delta_m = 0;
1325 } else {
1326 /* vector */
1327 delta_m = delta_d;
1332 f0 = tcg_temp_new_i64();
1333 f1 = tcg_temp_new_i64();
1334 fd = tcg_temp_new_i64();
1335 fpst = get_fpstatus_ptr(0);
1337 neon_load_reg64(f0, vn);
1338 neon_load_reg64(f1, vm);
1340 for (;;) {
1341 if (reads_vd) {
1342 neon_load_reg64(fd, vd);
1344 fn(fd, f0, f1, fpst);
1345 neon_store_reg64(fd, vd);
1347 if (veclen == 0) {
1348 break;
1350 /* Set up the operands for the next iteration */
1351 veclen--;
1352 vd = vfp_advance_dreg(vd, delta_d);
1353 vn = vfp_advance_dreg(vn, delta_d);
1354 neon_load_reg64(f0, vn);
1355 if (delta_m) {
1356 vm = vfp_advance_dreg(vm, delta_m);
1357 neon_load_reg64(f1, vm);
1361 tcg_temp_free_i64(f0);
1362 tcg_temp_free_i64(f1);
1363 tcg_temp_free_i64(fd);
1364 tcg_temp_free_ptr(fpst);
1366 return true;
1369 static bool do_vfp_2op_sp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm)
1371 uint32_t delta_m = 0;
1372 uint32_t delta_d = 0;
1373 int veclen = s->vec_len;
1374 TCGv_i32 f0, fd;
1376 if (!dc_isar_feature(aa32_fpshvec, s) &&
1377 (veclen != 0 || s->vec_stride != 0)) {
1378 return false;
1381 if (!vfp_access_check(s)) {
1382 return true;
1385 if (veclen > 0) {
1386 /* Figure out what type of vector operation this is. */
1387 if (vfp_sreg_is_scalar(vd)) {
1388 /* scalar */
1389 veclen = 0;
1390 } else {
1391 delta_d = s->vec_stride + 1;
1393 if (vfp_sreg_is_scalar(vm)) {
1394 /* mixed scalar/vector */
1395 delta_m = 0;
1396 } else {
1397 /* vector */
1398 delta_m = delta_d;
1403 f0 = tcg_temp_new_i32();
1404 fd = tcg_temp_new_i32();
1406 neon_load_reg32(f0, vm);
1408 for (;;) {
1409 fn(fd, f0);
1410 neon_store_reg32(fd, vd);
1412 if (veclen == 0) {
1413 break;
1416 if (delta_m == 0) {
1417 /* single source one-many */
1418 while (veclen--) {
1419 vd = vfp_advance_sreg(vd, delta_d);
1420 neon_store_reg32(fd, vd);
1422 break;
1425 /* Set up the operands for the next iteration */
1426 veclen--;
1427 vd = vfp_advance_sreg(vd, delta_d);
1428 vm = vfp_advance_sreg(vm, delta_m);
1429 neon_load_reg32(f0, vm);
1432 tcg_temp_free_i32(f0);
1433 tcg_temp_free_i32(fd);
1435 return true;
1438 static bool do_vfp_2op_dp(DisasContext *s, VFPGen2OpDPFn *fn, int vd, int vm)
1440 uint32_t delta_m = 0;
1441 uint32_t delta_d = 0;
1442 int veclen = s->vec_len;
1443 TCGv_i64 f0, fd;
1445 /* UNDEF accesses to D16-D31 if they don't exist */
1446 if (!dc_isar_feature(aa32_fp_d32, s) && ((vd | vm) & 0x10)) {
1447 return false;
1450 if (!dc_isar_feature(aa32_fpdp, s)) {
1451 return false;
1454 if (!dc_isar_feature(aa32_fpshvec, s) &&
1455 (veclen != 0 || s->vec_stride != 0)) {
1456 return false;
1459 if (!vfp_access_check(s)) {
1460 return true;
1463 if (veclen > 0) {
1464 /* Figure out what type of vector operation this is. */
1465 if (vfp_dreg_is_scalar(vd)) {
1466 /* scalar */
1467 veclen = 0;
1468 } else {
1469 delta_d = (s->vec_stride >> 1) + 1;
1471 if (vfp_dreg_is_scalar(vm)) {
1472 /* mixed scalar/vector */
1473 delta_m = 0;
1474 } else {
1475 /* vector */
1476 delta_m = delta_d;
1481 f0 = tcg_temp_new_i64();
1482 fd = tcg_temp_new_i64();
1484 neon_load_reg64(f0, vm);
1486 for (;;) {
1487 fn(fd, f0);
1488 neon_store_reg64(fd, vd);
1490 if (veclen == 0) {
1491 break;
1494 if (delta_m == 0) {
1495 /* single source one-many */
1496 while (veclen--) {
1497 vd = vfp_advance_dreg(vd, delta_d);
1498 neon_store_reg64(fd, vd);
1500 break;
1503 /* Set up the operands for the next iteration */
1504 veclen--;
1505 vd = vfp_advance_dreg(vd, delta_d);
1506 vd = vfp_advance_dreg(vm, delta_m);
1507 neon_load_reg64(f0, vm);
1510 tcg_temp_free_i64(f0);
1511 tcg_temp_free_i64(fd);
1513 return true;
1516 static void gen_VMLA_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
1518 /* Note that order of inputs to the add matters for NaNs */
1519 TCGv_i32 tmp = tcg_temp_new_i32();
1521 gen_helper_vfp_muls(tmp, vn, vm, fpst);
1522 gen_helper_vfp_adds(vd, vd, tmp, fpst);
1523 tcg_temp_free_i32(tmp);
1526 static bool trans_VMLA_sp(DisasContext *s, arg_VMLA_sp *a)
1528 return do_vfp_3op_sp(s, gen_VMLA_sp, a->vd, a->vn, a->vm, true);
1531 static void gen_VMLA_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
1533 /* Note that order of inputs to the add matters for NaNs */
1534 TCGv_i64 tmp = tcg_temp_new_i64();
1536 gen_helper_vfp_muld(tmp, vn, vm, fpst);
1537 gen_helper_vfp_addd(vd, vd, tmp, fpst);
1538 tcg_temp_free_i64(tmp);
1541 static bool trans_VMLA_dp(DisasContext *s, arg_VMLA_dp *a)
1543 return do_vfp_3op_dp(s, gen_VMLA_dp, a->vd, a->vn, a->vm, true);
1546 static void gen_VMLS_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
1549 * VMLS: vd = vd + -(vn * vm)
1550 * Note that order of inputs to the add matters for NaNs.
1552 TCGv_i32 tmp = tcg_temp_new_i32();
1554 gen_helper_vfp_muls(tmp, vn, vm, fpst);
1555 gen_helper_vfp_negs(tmp, tmp);
1556 gen_helper_vfp_adds(vd, vd, tmp, fpst);
1557 tcg_temp_free_i32(tmp);
1560 static bool trans_VMLS_sp(DisasContext *s, arg_VMLS_sp *a)
1562 return do_vfp_3op_sp(s, gen_VMLS_sp, a->vd, a->vn, a->vm, true);
1565 static void gen_VMLS_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
1568 * VMLS: vd = vd + -(vn * vm)
1569 * Note that order of inputs to the add matters for NaNs.
1571 TCGv_i64 tmp = tcg_temp_new_i64();
1573 gen_helper_vfp_muld(tmp, vn, vm, fpst);
1574 gen_helper_vfp_negd(tmp, tmp);
1575 gen_helper_vfp_addd(vd, vd, tmp, fpst);
1576 tcg_temp_free_i64(tmp);
1579 static bool trans_VMLS_dp(DisasContext *s, arg_VMLS_dp *a)
1581 return do_vfp_3op_dp(s, gen_VMLS_dp, a->vd, a->vn, a->vm, true);
1584 static void gen_VNMLS_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
1587 * VNMLS: -fd + (fn * fm)
1588 * Note that it isn't valid to replace (-A + B) with (B - A) or similar
1589 * plausible looking simplifications because this will give wrong results
1590 * for NaNs.
1592 TCGv_i32 tmp = tcg_temp_new_i32();
1594 gen_helper_vfp_muls(tmp, vn, vm, fpst);
1595 gen_helper_vfp_negs(vd, vd);
1596 gen_helper_vfp_adds(vd, vd, tmp, fpst);
1597 tcg_temp_free_i32(tmp);
1600 static bool trans_VNMLS_sp(DisasContext *s, arg_VNMLS_sp *a)
1602 return do_vfp_3op_sp(s, gen_VNMLS_sp, a->vd, a->vn, a->vm, true);
1605 static void gen_VNMLS_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
1608 * VNMLS: -fd + (fn * fm)
1609 * Note that it isn't valid to replace (-A + B) with (B - A) or similar
1610 * plausible looking simplifications because this will give wrong results
1611 * for NaNs.
1613 TCGv_i64 tmp = tcg_temp_new_i64();
1615 gen_helper_vfp_muld(tmp, vn, vm, fpst);
1616 gen_helper_vfp_negd(vd, vd);
1617 gen_helper_vfp_addd(vd, vd, tmp, fpst);
1618 tcg_temp_free_i64(tmp);
1621 static bool trans_VNMLS_dp(DisasContext *s, arg_VNMLS_dp *a)
1623 return do_vfp_3op_dp(s, gen_VNMLS_dp, a->vd, a->vn, a->vm, true);
1626 static void gen_VNMLA_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
1628 /* VNMLA: -fd + -(fn * fm) */
1629 TCGv_i32 tmp = tcg_temp_new_i32();
1631 gen_helper_vfp_muls(tmp, vn, vm, fpst);
1632 gen_helper_vfp_negs(tmp, tmp);
1633 gen_helper_vfp_negs(vd, vd);
1634 gen_helper_vfp_adds(vd, vd, tmp, fpst);
1635 tcg_temp_free_i32(tmp);
1638 static bool trans_VNMLA_sp(DisasContext *s, arg_VNMLA_sp *a)
1640 return do_vfp_3op_sp(s, gen_VNMLA_sp, a->vd, a->vn, a->vm, true);
1643 static void gen_VNMLA_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
1645 /* VNMLA: -fd + (fn * fm) */
1646 TCGv_i64 tmp = tcg_temp_new_i64();
1648 gen_helper_vfp_muld(tmp, vn, vm, fpst);
1649 gen_helper_vfp_negd(tmp, tmp);
1650 gen_helper_vfp_negd(vd, vd);
1651 gen_helper_vfp_addd(vd, vd, tmp, fpst);
1652 tcg_temp_free_i64(tmp);
1655 static bool trans_VNMLA_dp(DisasContext *s, arg_VNMLA_dp *a)
1657 return do_vfp_3op_dp(s, gen_VNMLA_dp, a->vd, a->vn, a->vm, true);
1660 static bool trans_VMUL_sp(DisasContext *s, arg_VMUL_sp *a)
1662 return do_vfp_3op_sp(s, gen_helper_vfp_muls, a->vd, a->vn, a->vm, false);
1665 static bool trans_VMUL_dp(DisasContext *s, arg_VMUL_dp *a)
1667 return do_vfp_3op_dp(s, gen_helper_vfp_muld, a->vd, a->vn, a->vm, false);
1670 static void gen_VNMUL_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
1672 /* VNMUL: -(fn * fm) */
1673 gen_helper_vfp_muls(vd, vn, vm, fpst);
1674 gen_helper_vfp_negs(vd, vd);
1677 static bool trans_VNMUL_sp(DisasContext *s, arg_VNMUL_sp *a)
1679 return do_vfp_3op_sp(s, gen_VNMUL_sp, a->vd, a->vn, a->vm, false);
1682 static void gen_VNMUL_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
1684 /* VNMUL: -(fn * fm) */
1685 gen_helper_vfp_muld(vd, vn, vm, fpst);
1686 gen_helper_vfp_negd(vd, vd);
1689 static bool trans_VNMUL_dp(DisasContext *s, arg_VNMUL_dp *a)
1691 return do_vfp_3op_dp(s, gen_VNMUL_dp, a->vd, a->vn, a->vm, false);
1694 static bool trans_VADD_sp(DisasContext *s, arg_VADD_sp *a)
1696 return do_vfp_3op_sp(s, gen_helper_vfp_adds, a->vd, a->vn, a->vm, false);
1699 static bool trans_VADD_dp(DisasContext *s, arg_VADD_dp *a)
1701 return do_vfp_3op_dp(s, gen_helper_vfp_addd, a->vd, a->vn, a->vm, false);
1704 static bool trans_VSUB_sp(DisasContext *s, arg_VSUB_sp *a)
1706 return do_vfp_3op_sp(s, gen_helper_vfp_subs, a->vd, a->vn, a->vm, false);
1709 static bool trans_VSUB_dp(DisasContext *s, arg_VSUB_dp *a)
1711 return do_vfp_3op_dp(s, gen_helper_vfp_subd, a->vd, a->vn, a->vm, false);
1714 static bool trans_VDIV_sp(DisasContext *s, arg_VDIV_sp *a)
1716 return do_vfp_3op_sp(s, gen_helper_vfp_divs, a->vd, a->vn, a->vm, false);
1719 static bool trans_VDIV_dp(DisasContext *s, arg_VDIV_dp *a)
1721 return do_vfp_3op_dp(s, gen_helper_vfp_divd, a->vd, a->vn, a->vm, false);
1724 static bool trans_VFM_sp(DisasContext *s, arg_VFM_sp *a)
1727 * VFNMA : fd = muladd(-fd, fn, fm)
1728 * VFNMS : fd = muladd(-fd, -fn, fm)
1729 * VFMA : fd = muladd( fd, fn, fm)
1730 * VFMS : fd = muladd( fd, -fn, fm)
1732 * These are fused multiply-add, and must be done as one floating
1733 * point operation with no rounding between the multiplication and
1734 * addition steps. NB that doing the negations here as separate
1735 * steps is correct : an input NaN should come out with its sign
1736 * bit flipped if it is a negated-input.
1738 TCGv_ptr fpst;
1739 TCGv_i32 vn, vm, vd;
1742 * Present in VFPv4 only.
1743 * In v7A, UNPREDICTABLE with non-zero vector length/stride; from
1744 * v8A, must UNDEF. We choose to UNDEF for both v7A and v8A.
1746 if (!arm_dc_feature(s, ARM_FEATURE_VFP4) ||
1747 (s->vec_len != 0 || s->vec_stride != 0)) {
1748 return false;
1751 if (!vfp_access_check(s)) {
1752 return true;
1755 vn = tcg_temp_new_i32();
1756 vm = tcg_temp_new_i32();
1757 vd = tcg_temp_new_i32();
1759 neon_load_reg32(vn, a->vn);
1760 neon_load_reg32(vm, a->vm);
1761 if (a->o2) {
1762 /* VFNMS, VFMS */
1763 gen_helper_vfp_negs(vn, vn);
1765 neon_load_reg32(vd, a->vd);
1766 if (a->o1 & 1) {
1767 /* VFNMA, VFNMS */
1768 gen_helper_vfp_negs(vd, vd);
1770 fpst = get_fpstatus_ptr(0);
1771 gen_helper_vfp_muladds(vd, vn, vm, vd, fpst);
1772 neon_store_reg32(vd, a->vd);
1774 tcg_temp_free_ptr(fpst);
1775 tcg_temp_free_i32(vn);
1776 tcg_temp_free_i32(vm);
1777 tcg_temp_free_i32(vd);
1779 return true;
1782 static bool trans_VFM_dp(DisasContext *s, arg_VFM_dp *a)
1785 * VFNMA : fd = muladd(-fd, fn, fm)
1786 * VFNMS : fd = muladd(-fd, -fn, fm)
1787 * VFMA : fd = muladd( fd, fn, fm)
1788 * VFMS : fd = muladd( fd, -fn, fm)
1790 * These are fused multiply-add, and must be done as one floating
1791 * point operation with no rounding between the multiplication and
1792 * addition steps. NB that doing the negations here as separate
1793 * steps is correct : an input NaN should come out with its sign
1794 * bit flipped if it is a negated-input.
1796 TCGv_ptr fpst;
1797 TCGv_i64 vn, vm, vd;
1800 * Present in VFPv4 only.
1801 * In v7A, UNPREDICTABLE with non-zero vector length/stride; from
1802 * v8A, must UNDEF. We choose to UNDEF for both v7A and v8A.
1804 if (!arm_dc_feature(s, ARM_FEATURE_VFP4) ||
1805 (s->vec_len != 0 || s->vec_stride != 0)) {
1806 return false;
1809 /* UNDEF accesses to D16-D31 if they don't exist. */
1810 if (!dc_isar_feature(aa32_fp_d32, s) && ((a->vd | a->vn | a->vm) & 0x10)) {
1811 return false;
1814 if (!dc_isar_feature(aa32_fpdp, s)) {
1815 return false;
1818 if (!vfp_access_check(s)) {
1819 return true;
1822 vn = tcg_temp_new_i64();
1823 vm = tcg_temp_new_i64();
1824 vd = tcg_temp_new_i64();
1826 neon_load_reg64(vn, a->vn);
1827 neon_load_reg64(vm, a->vm);
1828 if (a->o2) {
1829 /* VFNMS, VFMS */
1830 gen_helper_vfp_negd(vn, vn);
1832 neon_load_reg64(vd, a->vd);
1833 if (a->o1 & 1) {
1834 /* VFNMA, VFNMS */
1835 gen_helper_vfp_negd(vd, vd);
1837 fpst = get_fpstatus_ptr(0);
1838 gen_helper_vfp_muladdd(vd, vn, vm, vd, fpst);
1839 neon_store_reg64(vd, a->vd);
1841 tcg_temp_free_ptr(fpst);
1842 tcg_temp_free_i64(vn);
1843 tcg_temp_free_i64(vm);
1844 tcg_temp_free_i64(vd);
1846 return true;
1849 static bool trans_VMOV_imm_sp(DisasContext *s, arg_VMOV_imm_sp *a)
1851 uint32_t delta_d = 0;
1852 int veclen = s->vec_len;
1853 TCGv_i32 fd;
1854 uint32_t vd;
1856 vd = a->vd;
1858 if (!dc_isar_feature(aa32_fpshvec, s) &&
1859 (veclen != 0 || s->vec_stride != 0)) {
1860 return false;
1863 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
1864 return false;
1867 if (!vfp_access_check(s)) {
1868 return true;
1871 if (veclen > 0) {
1872 /* Figure out what type of vector operation this is. */
1873 if (vfp_sreg_is_scalar(vd)) {
1874 /* scalar */
1875 veclen = 0;
1876 } else {
1877 delta_d = s->vec_stride + 1;
1881 fd = tcg_const_i32(vfp_expand_imm(MO_32, a->imm));
1883 for (;;) {
1884 neon_store_reg32(fd, vd);
1886 if (veclen == 0) {
1887 break;
1890 /* Set up the operands for the next iteration */
1891 veclen--;
1892 vd = vfp_advance_sreg(vd, delta_d);
1895 tcg_temp_free_i32(fd);
1896 return true;
1899 static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a)
1901 uint32_t delta_d = 0;
1902 int veclen = s->vec_len;
1903 TCGv_i64 fd;
1904 uint32_t vd;
1906 vd = a->vd;
1908 /* UNDEF accesses to D16-D31 if they don't exist. */
1909 if (!dc_isar_feature(aa32_fp_d32, s) && (vd & 0x10)) {
1910 return false;
1913 if (!dc_isar_feature(aa32_fpdp, s)) {
1914 return false;
1917 if (!dc_isar_feature(aa32_fpshvec, s) &&
1918 (veclen != 0 || s->vec_stride != 0)) {
1919 return false;
1922 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
1923 return false;
1926 if (!vfp_access_check(s)) {
1927 return true;
1930 if (veclen > 0) {
1931 /* Figure out what type of vector operation this is. */
1932 if (vfp_dreg_is_scalar(vd)) {
1933 /* scalar */
1934 veclen = 0;
1935 } else {
1936 delta_d = (s->vec_stride >> 1) + 1;
1940 fd = tcg_const_i64(vfp_expand_imm(MO_64, a->imm));
1942 for (;;) {
1943 neon_store_reg64(fd, vd);
1945 if (veclen == 0) {
1946 break;
1949 /* Set up the operands for the next iteration */
1950 veclen--;
1951 vd = vfp_advance_dreg(vd, delta_d);
1954 tcg_temp_free_i64(fd);
1955 return true;
1958 static bool trans_VMOV_reg_sp(DisasContext *s, arg_VMOV_reg_sp *a)
1960 return do_vfp_2op_sp(s, tcg_gen_mov_i32, a->vd, a->vm);
1963 static bool trans_VMOV_reg_dp(DisasContext *s, arg_VMOV_reg_dp *a)
1965 return do_vfp_2op_dp(s, tcg_gen_mov_i64, a->vd, a->vm);
1968 static bool trans_VABS_sp(DisasContext *s, arg_VABS_sp *a)
1970 return do_vfp_2op_sp(s, gen_helper_vfp_abss, a->vd, a->vm);
1973 static bool trans_VABS_dp(DisasContext *s, arg_VABS_dp *a)
1975 return do_vfp_2op_dp(s, gen_helper_vfp_absd, a->vd, a->vm);
1978 static bool trans_VNEG_sp(DisasContext *s, arg_VNEG_sp *a)
1980 return do_vfp_2op_sp(s, gen_helper_vfp_negs, a->vd, a->vm);
1983 static bool trans_VNEG_dp(DisasContext *s, arg_VNEG_dp *a)
1985 return do_vfp_2op_dp(s, gen_helper_vfp_negd, a->vd, a->vm);
1988 static void gen_VSQRT_sp(TCGv_i32 vd, TCGv_i32 vm)
1990 gen_helper_vfp_sqrts(vd, vm, cpu_env);
1993 static bool trans_VSQRT_sp(DisasContext *s, arg_VSQRT_sp *a)
1995 return do_vfp_2op_sp(s, gen_VSQRT_sp, a->vd, a->vm);
1998 static void gen_VSQRT_dp(TCGv_i64 vd, TCGv_i64 vm)
2000 gen_helper_vfp_sqrtd(vd, vm, cpu_env);
2003 static bool trans_VSQRT_dp(DisasContext *s, arg_VSQRT_dp *a)
2005 return do_vfp_2op_dp(s, gen_VSQRT_dp, a->vd, a->vm);
2008 static bool trans_VCMP_sp(DisasContext *s, arg_VCMP_sp *a)
2010 TCGv_i32 vd, vm;
2012 /* Vm/M bits must be zero for the Z variant */
2013 if (a->z && a->vm != 0) {
2014 return false;
2017 if (!vfp_access_check(s)) {
2018 return true;
2021 vd = tcg_temp_new_i32();
2022 vm = tcg_temp_new_i32();
2024 neon_load_reg32(vd, a->vd);
2025 if (a->z) {
2026 tcg_gen_movi_i32(vm, 0);
2027 } else {
2028 neon_load_reg32(vm, a->vm);
2031 if (a->e) {
2032 gen_helper_vfp_cmpes(vd, vm, cpu_env);
2033 } else {
2034 gen_helper_vfp_cmps(vd, vm, cpu_env);
2037 tcg_temp_free_i32(vd);
2038 tcg_temp_free_i32(vm);
2040 return true;
2043 static bool trans_VCMP_dp(DisasContext *s, arg_VCMP_dp *a)
2045 TCGv_i64 vd, vm;
2047 /* Vm/M bits must be zero for the Z variant */
2048 if (a->z && a->vm != 0) {
2049 return false;
2052 /* UNDEF accesses to D16-D31 if they don't exist. */
2053 if (!dc_isar_feature(aa32_fp_d32, s) && ((a->vd | a->vm) & 0x10)) {
2054 return false;
2057 if (!dc_isar_feature(aa32_fpdp, s)) {
2058 return false;
2061 if (!vfp_access_check(s)) {
2062 return true;
2065 vd = tcg_temp_new_i64();
2066 vm = tcg_temp_new_i64();
2068 neon_load_reg64(vd, a->vd);
2069 if (a->z) {
2070 tcg_gen_movi_i64(vm, 0);
2071 } else {
2072 neon_load_reg64(vm, a->vm);
2075 if (a->e) {
2076 gen_helper_vfp_cmped(vd, vm, cpu_env);
2077 } else {
2078 gen_helper_vfp_cmpd(vd, vm, cpu_env);
2081 tcg_temp_free_i64(vd);
2082 tcg_temp_free_i64(vm);
2084 return true;
2087 static bool trans_VCVT_f32_f16(DisasContext *s, arg_VCVT_f32_f16 *a)
2089 TCGv_ptr fpst;
2090 TCGv_i32 ahp_mode;
2091 TCGv_i32 tmp;
2093 if (!dc_isar_feature(aa32_fp16_spconv, s)) {
2094 return false;
2097 if (!vfp_access_check(s)) {
2098 return true;
2101 fpst = get_fpstatus_ptr(false);
2102 ahp_mode = get_ahp_flag();
2103 tmp = tcg_temp_new_i32();
2104 /* The T bit tells us if we want the low or high 16 bits of Vm */
2105 tcg_gen_ld16u_i32(tmp, cpu_env, vfp_f16_offset(a->vm, a->t));
2106 gen_helper_vfp_fcvt_f16_to_f32(tmp, tmp, fpst, ahp_mode);
2107 neon_store_reg32(tmp, a->vd);
2108 tcg_temp_free_i32(ahp_mode);
2109 tcg_temp_free_ptr(fpst);
2110 tcg_temp_free_i32(tmp);
2111 return true;
2114 static bool trans_VCVT_f64_f16(DisasContext *s, arg_VCVT_f64_f16 *a)
2116 TCGv_ptr fpst;
2117 TCGv_i32 ahp_mode;
2118 TCGv_i32 tmp;
2119 TCGv_i64 vd;
2121 if (!dc_isar_feature(aa32_fp16_dpconv, s)) {
2122 return false;
2125 /* UNDEF accesses to D16-D31 if they don't exist. */
2126 if (!dc_isar_feature(aa32_fp_d32, s) && (a->vd & 0x10)) {
2127 return false;
2130 if (!dc_isar_feature(aa32_fpdp, s)) {
2131 return false;
2134 if (!vfp_access_check(s)) {
2135 return true;
2138 fpst = get_fpstatus_ptr(false);
2139 ahp_mode = get_ahp_flag();
2140 tmp = tcg_temp_new_i32();
2141 /* The T bit tells us if we want the low or high 16 bits of Vm */
2142 tcg_gen_ld16u_i32(tmp, cpu_env, vfp_f16_offset(a->vm, a->t));
2143 vd = tcg_temp_new_i64();
2144 gen_helper_vfp_fcvt_f16_to_f64(vd, tmp, fpst, ahp_mode);
2145 neon_store_reg64(vd, a->vd);
2146 tcg_temp_free_i32(ahp_mode);
2147 tcg_temp_free_ptr(fpst);
2148 tcg_temp_free_i32(tmp);
2149 tcg_temp_free_i64(vd);
2150 return true;
2153 static bool trans_VCVT_f16_f32(DisasContext *s, arg_VCVT_f16_f32 *a)
2155 TCGv_ptr fpst;
2156 TCGv_i32 ahp_mode;
2157 TCGv_i32 tmp;
2159 if (!dc_isar_feature(aa32_fp16_spconv, s)) {
2160 return false;
2163 if (!vfp_access_check(s)) {
2164 return true;
2167 fpst = get_fpstatus_ptr(false);
2168 ahp_mode = get_ahp_flag();
2169 tmp = tcg_temp_new_i32();
2171 neon_load_reg32(tmp, a->vm);
2172 gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp_mode);
2173 tcg_gen_st16_i32(tmp, cpu_env, vfp_f16_offset(a->vd, a->t));
2174 tcg_temp_free_i32(ahp_mode);
2175 tcg_temp_free_ptr(fpst);
2176 tcg_temp_free_i32(tmp);
2177 return true;
2180 static bool trans_VCVT_f16_f64(DisasContext *s, arg_VCVT_f16_f64 *a)
2182 TCGv_ptr fpst;
2183 TCGv_i32 ahp_mode;
2184 TCGv_i32 tmp;
2185 TCGv_i64 vm;
2187 if (!dc_isar_feature(aa32_fp16_dpconv, s)) {
2188 return false;
2191 /* UNDEF accesses to D16-D31 if they don't exist. */
2192 if (!dc_isar_feature(aa32_fp_d32, s) && (a->vm & 0x10)) {
2193 return false;
2196 if (!dc_isar_feature(aa32_fpdp, s)) {
2197 return false;
2200 if (!vfp_access_check(s)) {
2201 return true;
2204 fpst = get_fpstatus_ptr(false);
2205 ahp_mode = get_ahp_flag();
2206 tmp = tcg_temp_new_i32();
2207 vm = tcg_temp_new_i64();
2209 neon_load_reg64(vm, a->vm);
2210 gen_helper_vfp_fcvt_f64_to_f16(tmp, vm, fpst, ahp_mode);
2211 tcg_temp_free_i64(vm);
2212 tcg_gen_st16_i32(tmp, cpu_env, vfp_f16_offset(a->vd, a->t));
2213 tcg_temp_free_i32(ahp_mode);
2214 tcg_temp_free_ptr(fpst);
2215 tcg_temp_free_i32(tmp);
2216 return true;
2219 static bool trans_VRINTR_sp(DisasContext *s, arg_VRINTR_sp *a)
2221 TCGv_ptr fpst;
2222 TCGv_i32 tmp;
2224 if (!dc_isar_feature(aa32_vrint, s)) {
2225 return false;
2228 if (!vfp_access_check(s)) {
2229 return true;
2232 tmp = tcg_temp_new_i32();
2233 neon_load_reg32(tmp, a->vm);
2234 fpst = get_fpstatus_ptr(false);
2235 gen_helper_rints(tmp, tmp, fpst);
2236 neon_store_reg32(tmp, a->vd);
2237 tcg_temp_free_ptr(fpst);
2238 tcg_temp_free_i32(tmp);
2239 return true;
2242 static bool trans_VRINTR_dp(DisasContext *s, arg_VRINTR_dp *a)
2244 TCGv_ptr fpst;
2245 TCGv_i64 tmp;
2247 if (!dc_isar_feature(aa32_vrint, s)) {
2248 return false;
2251 /* UNDEF accesses to D16-D31 if they don't exist. */
2252 if (!dc_isar_feature(aa32_fp_d32, s) && ((a->vd | a->vm) & 0x10)) {
2253 return false;
2256 if (!dc_isar_feature(aa32_fpdp, s)) {
2257 return false;
2260 if (!vfp_access_check(s)) {
2261 return true;
2264 tmp = tcg_temp_new_i64();
2265 neon_load_reg64(tmp, a->vm);
2266 fpst = get_fpstatus_ptr(false);
2267 gen_helper_rintd(tmp, tmp, fpst);
2268 neon_store_reg64(tmp, a->vd);
2269 tcg_temp_free_ptr(fpst);
2270 tcg_temp_free_i64(tmp);
2271 return true;
2274 static bool trans_VRINTZ_sp(DisasContext *s, arg_VRINTZ_sp *a)
2276 TCGv_ptr fpst;
2277 TCGv_i32 tmp;
2278 TCGv_i32 tcg_rmode;
2280 if (!dc_isar_feature(aa32_vrint, s)) {
2281 return false;
2284 if (!vfp_access_check(s)) {
2285 return true;
2288 tmp = tcg_temp_new_i32();
2289 neon_load_reg32(tmp, a->vm);
2290 fpst = get_fpstatus_ptr(false);
2291 tcg_rmode = tcg_const_i32(float_round_to_zero);
2292 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
2293 gen_helper_rints(tmp, tmp, fpst);
2294 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
2295 neon_store_reg32(tmp, a->vd);
2296 tcg_temp_free_ptr(fpst);
2297 tcg_temp_free_i32(tcg_rmode);
2298 tcg_temp_free_i32(tmp);
2299 return true;
2302 static bool trans_VRINTZ_dp(DisasContext *s, arg_VRINTZ_dp *a)
2304 TCGv_ptr fpst;
2305 TCGv_i64 tmp;
2306 TCGv_i32 tcg_rmode;
2308 if (!dc_isar_feature(aa32_vrint, s)) {
2309 return false;
2312 /* UNDEF accesses to D16-D31 if they don't exist. */
2313 if (!dc_isar_feature(aa32_fp_d32, s) && ((a->vd | a->vm) & 0x10)) {
2314 return false;
2317 if (!dc_isar_feature(aa32_fpdp, s)) {
2318 return false;
2321 if (!vfp_access_check(s)) {
2322 return true;
2325 tmp = tcg_temp_new_i64();
2326 neon_load_reg64(tmp, a->vm);
2327 fpst = get_fpstatus_ptr(false);
2328 tcg_rmode = tcg_const_i32(float_round_to_zero);
2329 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
2330 gen_helper_rintd(tmp, tmp, fpst);
2331 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
2332 neon_store_reg64(tmp, a->vd);
2333 tcg_temp_free_ptr(fpst);
2334 tcg_temp_free_i64(tmp);
2335 tcg_temp_free_i32(tcg_rmode);
2336 return true;
2339 static bool trans_VRINTX_sp(DisasContext *s, arg_VRINTX_sp *a)
2341 TCGv_ptr fpst;
2342 TCGv_i32 tmp;
2344 if (!dc_isar_feature(aa32_vrint, s)) {
2345 return false;
2348 if (!vfp_access_check(s)) {
2349 return true;
2352 tmp = tcg_temp_new_i32();
2353 neon_load_reg32(tmp, a->vm);
2354 fpst = get_fpstatus_ptr(false);
2355 gen_helper_rints_exact(tmp, tmp, fpst);
2356 neon_store_reg32(tmp, a->vd);
2357 tcg_temp_free_ptr(fpst);
2358 tcg_temp_free_i32(tmp);
2359 return true;
2362 static bool trans_VRINTX_dp(DisasContext *s, arg_VRINTX_dp *a)
2364 TCGv_ptr fpst;
2365 TCGv_i64 tmp;
2367 if (!dc_isar_feature(aa32_vrint, s)) {
2368 return false;
2371 /* UNDEF accesses to D16-D31 if they don't exist. */
2372 if (!dc_isar_feature(aa32_fp_d32, s) && ((a->vd | a->vm) & 0x10)) {
2373 return false;
2376 if (!dc_isar_feature(aa32_fpdp, s)) {
2377 return false;
2380 if (!vfp_access_check(s)) {
2381 return true;
2384 tmp = tcg_temp_new_i64();
2385 neon_load_reg64(tmp, a->vm);
2386 fpst = get_fpstatus_ptr(false);
2387 gen_helper_rintd_exact(tmp, tmp, fpst);
2388 neon_store_reg64(tmp, a->vd);
2389 tcg_temp_free_ptr(fpst);
2390 tcg_temp_free_i64(tmp);
2391 return true;
2394 static bool trans_VCVT_sp(DisasContext *s, arg_VCVT_sp *a)
2396 TCGv_i64 vd;
2397 TCGv_i32 vm;
2399 /* UNDEF accesses to D16-D31 if they don't exist. */
2400 if (!dc_isar_feature(aa32_fp_d32, s) && (a->vd & 0x10)) {
2401 return false;
2404 if (!dc_isar_feature(aa32_fpdp, s)) {
2405 return false;
2408 if (!vfp_access_check(s)) {
2409 return true;
2412 vm = tcg_temp_new_i32();
2413 vd = tcg_temp_new_i64();
2414 neon_load_reg32(vm, a->vm);
2415 gen_helper_vfp_fcvtds(vd, vm, cpu_env);
2416 neon_store_reg64(vd, a->vd);
2417 tcg_temp_free_i32(vm);
2418 tcg_temp_free_i64(vd);
2419 return true;
2422 static bool trans_VCVT_dp(DisasContext *s, arg_VCVT_dp *a)
2424 TCGv_i64 vm;
2425 TCGv_i32 vd;
2427 /* UNDEF accesses to D16-D31 if they don't exist. */
2428 if (!dc_isar_feature(aa32_fp_d32, s) && (a->vm & 0x10)) {
2429 return false;
2432 if (!dc_isar_feature(aa32_fpdp, s)) {
2433 return false;
2436 if (!vfp_access_check(s)) {
2437 return true;
2440 vd = tcg_temp_new_i32();
2441 vm = tcg_temp_new_i64();
2442 neon_load_reg64(vm, a->vm);
2443 gen_helper_vfp_fcvtsd(vd, vm, cpu_env);
2444 neon_store_reg32(vd, a->vd);
2445 tcg_temp_free_i32(vd);
2446 tcg_temp_free_i64(vm);
2447 return true;
2450 static bool trans_VCVT_int_sp(DisasContext *s, arg_VCVT_int_sp *a)
2452 TCGv_i32 vm;
2453 TCGv_ptr fpst;
2455 if (!vfp_access_check(s)) {
2456 return true;
2459 vm = tcg_temp_new_i32();
2460 neon_load_reg32(vm, a->vm);
2461 fpst = get_fpstatus_ptr(false);
2462 if (a->s) {
2463 /* i32 -> f32 */
2464 gen_helper_vfp_sitos(vm, vm, fpst);
2465 } else {
2466 /* u32 -> f32 */
2467 gen_helper_vfp_uitos(vm, vm, fpst);
2469 neon_store_reg32(vm, a->vd);
2470 tcg_temp_free_i32(vm);
2471 tcg_temp_free_ptr(fpst);
2472 return true;
2475 static bool trans_VCVT_int_dp(DisasContext *s, arg_VCVT_int_dp *a)
2477 TCGv_i32 vm;
2478 TCGv_i64 vd;
2479 TCGv_ptr fpst;
2481 /* UNDEF accesses to D16-D31 if they don't exist. */
2482 if (!dc_isar_feature(aa32_fp_d32, s) && (a->vd & 0x10)) {
2483 return false;
2486 if (!dc_isar_feature(aa32_fpdp, s)) {
2487 return false;
2490 if (!vfp_access_check(s)) {
2491 return true;
2494 vm = tcg_temp_new_i32();
2495 vd = tcg_temp_new_i64();
2496 neon_load_reg32(vm, a->vm);
2497 fpst = get_fpstatus_ptr(false);
2498 if (a->s) {
2499 /* i32 -> f64 */
2500 gen_helper_vfp_sitod(vd, vm, fpst);
2501 } else {
2502 /* u32 -> f64 */
2503 gen_helper_vfp_uitod(vd, vm, fpst);
2505 neon_store_reg64(vd, a->vd);
2506 tcg_temp_free_i32(vm);
2507 tcg_temp_free_i64(vd);
2508 tcg_temp_free_ptr(fpst);
2509 return true;
2512 static bool trans_VJCVT(DisasContext *s, arg_VJCVT *a)
2514 TCGv_i32 vd;
2515 TCGv_i64 vm;
2517 if (!dc_isar_feature(aa32_jscvt, s)) {
2518 return false;
2521 /* UNDEF accesses to D16-D31 if they don't exist. */
2522 if (!dc_isar_feature(aa32_fp_d32, s) && (a->vm & 0x10)) {
2523 return false;
2526 if (!dc_isar_feature(aa32_fpdp, s)) {
2527 return false;
2530 if (!vfp_access_check(s)) {
2531 return true;
2534 vm = tcg_temp_new_i64();
2535 vd = tcg_temp_new_i32();
2536 neon_load_reg64(vm, a->vm);
2537 gen_helper_vjcvt(vd, vm, cpu_env);
2538 neon_store_reg32(vd, a->vd);
2539 tcg_temp_free_i64(vm);
2540 tcg_temp_free_i32(vd);
2541 return true;
2544 static bool trans_VCVT_fix_sp(DisasContext *s, arg_VCVT_fix_sp *a)
2546 TCGv_i32 vd, shift;
2547 TCGv_ptr fpst;
2548 int frac_bits;
2550 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
2551 return false;
2554 if (!vfp_access_check(s)) {
2555 return true;
2558 frac_bits = (a->opc & 1) ? (32 - a->imm) : (16 - a->imm);
2560 vd = tcg_temp_new_i32();
2561 neon_load_reg32(vd, a->vd);
2563 fpst = get_fpstatus_ptr(false);
2564 shift = tcg_const_i32(frac_bits);
2566 /* Switch on op:U:sx bits */
2567 switch (a->opc) {
2568 case 0:
2569 gen_helper_vfp_shtos(vd, vd, shift, fpst);
2570 break;
2571 case 1:
2572 gen_helper_vfp_sltos(vd, vd, shift, fpst);
2573 break;
2574 case 2:
2575 gen_helper_vfp_uhtos(vd, vd, shift, fpst);
2576 break;
2577 case 3:
2578 gen_helper_vfp_ultos(vd, vd, shift, fpst);
2579 break;
2580 case 4:
2581 gen_helper_vfp_toshs_round_to_zero(vd, vd, shift, fpst);
2582 break;
2583 case 5:
2584 gen_helper_vfp_tosls_round_to_zero(vd, vd, shift, fpst);
2585 break;
2586 case 6:
2587 gen_helper_vfp_touhs_round_to_zero(vd, vd, shift, fpst);
2588 break;
2589 case 7:
2590 gen_helper_vfp_touls_round_to_zero(vd, vd, shift, fpst);
2591 break;
2592 default:
2593 g_assert_not_reached();
2596 neon_store_reg32(vd, a->vd);
2597 tcg_temp_free_i32(vd);
2598 tcg_temp_free_i32(shift);
2599 tcg_temp_free_ptr(fpst);
2600 return true;
2603 static bool trans_VCVT_fix_dp(DisasContext *s, arg_VCVT_fix_dp *a)
2605 TCGv_i64 vd;
2606 TCGv_i32 shift;
2607 TCGv_ptr fpst;
2608 int frac_bits;
2610 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
2611 return false;
2614 /* UNDEF accesses to D16-D31 if they don't exist. */
2615 if (!dc_isar_feature(aa32_fp_d32, s) && (a->vd & 0x10)) {
2616 return false;
2619 if (!dc_isar_feature(aa32_fpdp, s)) {
2620 return false;
2623 if (!vfp_access_check(s)) {
2624 return true;
2627 frac_bits = (a->opc & 1) ? (32 - a->imm) : (16 - a->imm);
2629 vd = tcg_temp_new_i64();
2630 neon_load_reg64(vd, a->vd);
2632 fpst = get_fpstatus_ptr(false);
2633 shift = tcg_const_i32(frac_bits);
2635 /* Switch on op:U:sx bits */
2636 switch (a->opc) {
2637 case 0:
2638 gen_helper_vfp_shtod(vd, vd, shift, fpst);
2639 break;
2640 case 1:
2641 gen_helper_vfp_sltod(vd, vd, shift, fpst);
2642 break;
2643 case 2:
2644 gen_helper_vfp_uhtod(vd, vd, shift, fpst);
2645 break;
2646 case 3:
2647 gen_helper_vfp_ultod(vd, vd, shift, fpst);
2648 break;
2649 case 4:
2650 gen_helper_vfp_toshd_round_to_zero(vd, vd, shift, fpst);
2651 break;
2652 case 5:
2653 gen_helper_vfp_tosld_round_to_zero(vd, vd, shift, fpst);
2654 break;
2655 case 6:
2656 gen_helper_vfp_touhd_round_to_zero(vd, vd, shift, fpst);
2657 break;
2658 case 7:
2659 gen_helper_vfp_tould_round_to_zero(vd, vd, shift, fpst);
2660 break;
2661 default:
2662 g_assert_not_reached();
2665 neon_store_reg64(vd, a->vd);
2666 tcg_temp_free_i64(vd);
2667 tcg_temp_free_i32(shift);
2668 tcg_temp_free_ptr(fpst);
2669 return true;
2672 static bool trans_VCVT_sp_int(DisasContext *s, arg_VCVT_sp_int *a)
2674 TCGv_i32 vm;
2675 TCGv_ptr fpst;
2677 if (!vfp_access_check(s)) {
2678 return true;
2681 fpst = get_fpstatus_ptr(false);
2682 vm = tcg_temp_new_i32();
2683 neon_load_reg32(vm, a->vm);
2685 if (a->s) {
2686 if (a->rz) {
2687 gen_helper_vfp_tosizs(vm, vm, fpst);
2688 } else {
2689 gen_helper_vfp_tosis(vm, vm, fpst);
2691 } else {
2692 if (a->rz) {
2693 gen_helper_vfp_touizs(vm, vm, fpst);
2694 } else {
2695 gen_helper_vfp_touis(vm, vm, fpst);
2698 neon_store_reg32(vm, a->vd);
2699 tcg_temp_free_i32(vm);
2700 tcg_temp_free_ptr(fpst);
2701 return true;
2704 static bool trans_VCVT_dp_int(DisasContext *s, arg_VCVT_dp_int *a)
2706 TCGv_i32 vd;
2707 TCGv_i64 vm;
2708 TCGv_ptr fpst;
2710 /* UNDEF accesses to D16-D31 if they don't exist. */
2711 if (!dc_isar_feature(aa32_fp_d32, s) && (a->vm & 0x10)) {
2712 return false;
2715 if (!dc_isar_feature(aa32_fpdp, s)) {
2716 return false;
2719 if (!vfp_access_check(s)) {
2720 return true;
2723 fpst = get_fpstatus_ptr(false);
2724 vm = tcg_temp_new_i64();
2725 vd = tcg_temp_new_i32();
2726 neon_load_reg64(vm, a->vm);
2728 if (a->s) {
2729 if (a->rz) {
2730 gen_helper_vfp_tosizd(vd, vm, fpst);
2731 } else {
2732 gen_helper_vfp_tosid(vd, vm, fpst);
2734 } else {
2735 if (a->rz) {
2736 gen_helper_vfp_touizd(vd, vm, fpst);
2737 } else {
2738 gen_helper_vfp_touid(vd, vm, fpst);
2741 neon_store_reg32(vd, a->vd);
2742 tcg_temp_free_i32(vd);
2743 tcg_temp_free_i64(vm);
2744 tcg_temp_free_ptr(fpst);
2745 return true;