target/arm: Use vfp_expand_imm() for AArch32 VFP VMOV_imm
[qemu/ar7.git] / target / arm / translate-vfp.inc.c
blob8b732761f269c269e3973ade4a7c10902f014c68
1 /*
2 * ARM translation: AArch32 VFP instructions
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
7 * Copyright (c) 2019 Linaro, Ltd.
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
24 * This file is intended to be included from translate.c; it uses
25 * some macros and definitions provided by that file.
26 * It might be possible to convert it to a standalone .c file eventually.
29 /* Include the generated VFP decoder */
30 #include "decode-vfp.inc.c"
31 #include "decode-vfp-uncond.inc.c"
34 * The imm8 encodes the sign bit, enough bits to represent an exponent in
35 * the range 01....1xx to 10....0xx, and the most significant 4 bits of
36 * the mantissa; see VFPExpandImm() in the v8 ARM ARM.
38 uint64_t vfp_expand_imm(int size, uint8_t imm8)
40 uint64_t imm;
42 switch (size) {
43 case MO_64:
44 imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
45 (extract32(imm8, 6, 1) ? 0x3fc0 : 0x4000) |
46 extract32(imm8, 0, 6);
47 imm <<= 48;
48 break;
49 case MO_32:
50 imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
51 (extract32(imm8, 6, 1) ? 0x3e00 : 0x4000) |
52 (extract32(imm8, 0, 6) << 3);
53 imm <<= 16;
54 break;
55 case MO_16:
56 imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
57 (extract32(imm8, 6, 1) ? 0x3000 : 0x4000) |
58 (extract32(imm8, 0, 6) << 6);
59 break;
60 default:
61 g_assert_not_reached();
63 return imm;
67 * Return the offset of a 16-bit half of the specified VFP single-precision
68 * register. If top is true, returns the top 16 bits; otherwise the bottom
69 * 16 bits.
71 static inline long vfp_f16_offset(unsigned reg, bool top)
73 long offs = vfp_reg_offset(false, reg);
74 #ifdef HOST_WORDS_BIGENDIAN
75 if (!top) {
76 offs += 2;
78 #else
79 if (top) {
80 offs += 2;
82 #endif
83 return offs;
87 * Check that VFP access is enabled. If it is, do the necessary
88 * M-profile lazy-FP handling and then return true.
89 * If not, emit code to generate an appropriate exception and
90 * return false.
91 * The ignore_vfp_enabled argument specifies that we should ignore
92 * whether VFP is enabled via FPEXC[EN]: this should be true for FMXR/FMRX
93 * accesses to FPSID, FPEXC, MVFR0, MVFR1, MVFR2, and false for all other insns.
95 static bool full_vfp_access_check(DisasContext *s, bool ignore_vfp_enabled)
97 if (s->fp_excp_el) {
98 if (arm_dc_feature(s, ARM_FEATURE_M)) {
99 gen_exception_insn(s, 4, EXCP_NOCP, syn_uncategorized(),
100 s->fp_excp_el);
101 } else {
102 gen_exception_insn(s, 4, EXCP_UDEF,
103 syn_fp_access_trap(1, 0xe, false),
104 s->fp_excp_el);
106 return false;
109 if (!s->vfp_enabled && !ignore_vfp_enabled) {
110 assert(!arm_dc_feature(s, ARM_FEATURE_M));
111 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
112 default_exception_el(s));
113 return false;
116 if (arm_dc_feature(s, ARM_FEATURE_M)) {
117 /* Handle M-profile lazy FP state mechanics */
119 /* Trigger lazy-state preservation if necessary */
120 if (s->v7m_lspact) {
122 * Lazy state saving affects external memory and also the NVIC,
123 * so we must mark it as an IO operation for icount.
125 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
126 gen_io_start();
128 gen_helper_v7m_preserve_fp_state(cpu_env);
129 if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
130 gen_io_end();
133 * If the preserve_fp_state helper doesn't throw an exception
134 * then it will clear LSPACT; we don't need to repeat this for
135 * any further FP insns in this TB.
137 s->v7m_lspact = false;
140 /* Update ownership of FP context: set FPCCR.S to match current state */
141 if (s->v8m_fpccr_s_wrong) {
142 TCGv_i32 tmp;
144 tmp = load_cpu_field(v7m.fpccr[M_REG_S]);
145 if (s->v8m_secure) {
146 tcg_gen_ori_i32(tmp, tmp, R_V7M_FPCCR_S_MASK);
147 } else {
148 tcg_gen_andi_i32(tmp, tmp, ~R_V7M_FPCCR_S_MASK);
150 store_cpu_field(tmp, v7m.fpccr[M_REG_S]);
151 /* Don't need to do this for any further FP insns in this TB */
152 s->v8m_fpccr_s_wrong = false;
155 if (s->v7m_new_fp_ctxt_needed) {
157 * Create new FP context by updating CONTROL.FPCA, CONTROL.SFPA
158 * and the FPSCR.
160 TCGv_i32 control, fpscr;
161 uint32_t bits = R_V7M_CONTROL_FPCA_MASK;
163 fpscr = load_cpu_field(v7m.fpdscr[s->v8m_secure]);
164 gen_helper_vfp_set_fpscr(cpu_env, fpscr);
165 tcg_temp_free_i32(fpscr);
167 * We don't need to arrange to end the TB, because the only
168 * parts of FPSCR which we cache in the TB flags are the VECLEN
169 * and VECSTRIDE, and those don't exist for M-profile.
172 if (s->v8m_secure) {
173 bits |= R_V7M_CONTROL_SFPA_MASK;
175 control = load_cpu_field(v7m.control[M_REG_S]);
176 tcg_gen_ori_i32(control, control, bits);
177 store_cpu_field(control, v7m.control[M_REG_S]);
178 /* Don't need to do this for any further FP insns in this TB */
179 s->v7m_new_fp_ctxt_needed = false;
183 return true;
187 * The most usual kind of VFP access check, for everything except
188 * FMXR/FMRX to the always-available special registers.
190 static bool vfp_access_check(DisasContext *s)
192 return full_vfp_access_check(s, false);
195 static bool trans_VSEL(DisasContext *s, arg_VSEL *a)
197 uint32_t rd, rn, rm;
198 bool dp = a->dp;
200 if (!dc_isar_feature(aa32_vsel, s)) {
201 return false;
204 /* UNDEF accesses to D16-D31 if they don't exist */
205 if (dp && !dc_isar_feature(aa32_fp_d32, s) &&
206 ((a->vm | a->vn | a->vd) & 0x10)) {
207 return false;
209 rd = a->vd;
210 rn = a->vn;
211 rm = a->vm;
213 if (!vfp_access_check(s)) {
214 return true;
217 if (dp) {
218 TCGv_i64 frn, frm, dest;
219 TCGv_i64 tmp, zero, zf, nf, vf;
221 zero = tcg_const_i64(0);
223 frn = tcg_temp_new_i64();
224 frm = tcg_temp_new_i64();
225 dest = tcg_temp_new_i64();
227 zf = tcg_temp_new_i64();
228 nf = tcg_temp_new_i64();
229 vf = tcg_temp_new_i64();
231 tcg_gen_extu_i32_i64(zf, cpu_ZF);
232 tcg_gen_ext_i32_i64(nf, cpu_NF);
233 tcg_gen_ext_i32_i64(vf, cpu_VF);
235 neon_load_reg64(frn, rn);
236 neon_load_reg64(frm, rm);
237 switch (a->cc) {
238 case 0: /* eq: Z */
239 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
240 frn, frm);
241 break;
242 case 1: /* vs: V */
243 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
244 frn, frm);
245 break;
246 case 2: /* ge: N == V -> N ^ V == 0 */
247 tmp = tcg_temp_new_i64();
248 tcg_gen_xor_i64(tmp, vf, nf);
249 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
250 frn, frm);
251 tcg_temp_free_i64(tmp);
252 break;
253 case 3: /* gt: !Z && N == V */
254 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
255 frn, frm);
256 tmp = tcg_temp_new_i64();
257 tcg_gen_xor_i64(tmp, vf, nf);
258 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
259 dest, frm);
260 tcg_temp_free_i64(tmp);
261 break;
263 neon_store_reg64(dest, rd);
264 tcg_temp_free_i64(frn);
265 tcg_temp_free_i64(frm);
266 tcg_temp_free_i64(dest);
268 tcg_temp_free_i64(zf);
269 tcg_temp_free_i64(nf);
270 tcg_temp_free_i64(vf);
272 tcg_temp_free_i64(zero);
273 } else {
274 TCGv_i32 frn, frm, dest;
275 TCGv_i32 tmp, zero;
277 zero = tcg_const_i32(0);
279 frn = tcg_temp_new_i32();
280 frm = tcg_temp_new_i32();
281 dest = tcg_temp_new_i32();
282 neon_load_reg32(frn, rn);
283 neon_load_reg32(frm, rm);
284 switch (a->cc) {
285 case 0: /* eq: Z */
286 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
287 frn, frm);
288 break;
289 case 1: /* vs: V */
290 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
291 frn, frm);
292 break;
293 case 2: /* ge: N == V -> N ^ V == 0 */
294 tmp = tcg_temp_new_i32();
295 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
296 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
297 frn, frm);
298 tcg_temp_free_i32(tmp);
299 break;
300 case 3: /* gt: !Z && N == V */
301 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
302 frn, frm);
303 tmp = tcg_temp_new_i32();
304 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
305 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
306 dest, frm);
307 tcg_temp_free_i32(tmp);
308 break;
310 neon_store_reg32(dest, rd);
311 tcg_temp_free_i32(frn);
312 tcg_temp_free_i32(frm);
313 tcg_temp_free_i32(dest);
315 tcg_temp_free_i32(zero);
318 return true;
321 static bool trans_VMINMAXNM(DisasContext *s, arg_VMINMAXNM *a)
323 uint32_t rd, rn, rm;
324 bool dp = a->dp;
325 bool vmin = a->op;
326 TCGv_ptr fpst;
328 if (!dc_isar_feature(aa32_vminmaxnm, s)) {
329 return false;
332 /* UNDEF accesses to D16-D31 if they don't exist */
333 if (dp && !dc_isar_feature(aa32_fp_d32, s) &&
334 ((a->vm | a->vn | a->vd) & 0x10)) {
335 return false;
337 rd = a->vd;
338 rn = a->vn;
339 rm = a->vm;
341 if (!vfp_access_check(s)) {
342 return true;
345 fpst = get_fpstatus_ptr(0);
347 if (dp) {
348 TCGv_i64 frn, frm, dest;
350 frn = tcg_temp_new_i64();
351 frm = tcg_temp_new_i64();
352 dest = tcg_temp_new_i64();
354 neon_load_reg64(frn, rn);
355 neon_load_reg64(frm, rm);
356 if (vmin) {
357 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
358 } else {
359 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
361 neon_store_reg64(dest, rd);
362 tcg_temp_free_i64(frn);
363 tcg_temp_free_i64(frm);
364 tcg_temp_free_i64(dest);
365 } else {
366 TCGv_i32 frn, frm, dest;
368 frn = tcg_temp_new_i32();
369 frm = tcg_temp_new_i32();
370 dest = tcg_temp_new_i32();
372 neon_load_reg32(frn, rn);
373 neon_load_reg32(frm, rm);
374 if (vmin) {
375 gen_helper_vfp_minnums(dest, frn, frm, fpst);
376 } else {
377 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
379 neon_store_reg32(dest, rd);
380 tcg_temp_free_i32(frn);
381 tcg_temp_free_i32(frm);
382 tcg_temp_free_i32(dest);
385 tcg_temp_free_ptr(fpst);
386 return true;
390 * Table for converting the most common AArch32 encoding of
391 * rounding mode to arm_fprounding order (which matches the
392 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
394 static const uint8_t fp_decode_rm[] = {
395 FPROUNDING_TIEAWAY,
396 FPROUNDING_TIEEVEN,
397 FPROUNDING_POSINF,
398 FPROUNDING_NEGINF,
401 static bool trans_VRINT(DisasContext *s, arg_VRINT *a)
403 uint32_t rd, rm;
404 bool dp = a->dp;
405 TCGv_ptr fpst;
406 TCGv_i32 tcg_rmode;
407 int rounding = fp_decode_rm[a->rm];
409 if (!dc_isar_feature(aa32_vrint, s)) {
410 return false;
413 /* UNDEF accesses to D16-D31 if they don't exist */
414 if (dp && !dc_isar_feature(aa32_fp_d32, s) &&
415 ((a->vm | a->vd) & 0x10)) {
416 return false;
418 rd = a->vd;
419 rm = a->vm;
421 if (!vfp_access_check(s)) {
422 return true;
425 fpst = get_fpstatus_ptr(0);
427 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
428 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
430 if (dp) {
431 TCGv_i64 tcg_op;
432 TCGv_i64 tcg_res;
433 tcg_op = tcg_temp_new_i64();
434 tcg_res = tcg_temp_new_i64();
435 neon_load_reg64(tcg_op, rm);
436 gen_helper_rintd(tcg_res, tcg_op, fpst);
437 neon_store_reg64(tcg_res, rd);
438 tcg_temp_free_i64(tcg_op);
439 tcg_temp_free_i64(tcg_res);
440 } else {
441 TCGv_i32 tcg_op;
442 TCGv_i32 tcg_res;
443 tcg_op = tcg_temp_new_i32();
444 tcg_res = tcg_temp_new_i32();
445 neon_load_reg32(tcg_op, rm);
446 gen_helper_rints(tcg_res, tcg_op, fpst);
447 neon_store_reg32(tcg_res, rd);
448 tcg_temp_free_i32(tcg_op);
449 tcg_temp_free_i32(tcg_res);
452 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
453 tcg_temp_free_i32(tcg_rmode);
455 tcg_temp_free_ptr(fpst);
456 return true;
459 static bool trans_VCVT(DisasContext *s, arg_VCVT *a)
461 uint32_t rd, rm;
462 bool dp = a->dp;
463 TCGv_ptr fpst;
464 TCGv_i32 tcg_rmode, tcg_shift;
465 int rounding = fp_decode_rm[a->rm];
466 bool is_signed = a->op;
468 if (!dc_isar_feature(aa32_vcvt_dr, s)) {
469 return false;
472 /* UNDEF accesses to D16-D31 if they don't exist */
473 if (dp && !dc_isar_feature(aa32_fp_d32, s) && (a->vm & 0x10)) {
474 return false;
476 rd = a->vd;
477 rm = a->vm;
479 if (!vfp_access_check(s)) {
480 return true;
483 fpst = get_fpstatus_ptr(0);
485 tcg_shift = tcg_const_i32(0);
487 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
488 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
490 if (dp) {
491 TCGv_i64 tcg_double, tcg_res;
492 TCGv_i32 tcg_tmp;
493 tcg_double = tcg_temp_new_i64();
494 tcg_res = tcg_temp_new_i64();
495 tcg_tmp = tcg_temp_new_i32();
496 neon_load_reg64(tcg_double, rm);
497 if (is_signed) {
498 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
499 } else {
500 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
502 tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
503 neon_store_reg32(tcg_tmp, rd);
504 tcg_temp_free_i32(tcg_tmp);
505 tcg_temp_free_i64(tcg_res);
506 tcg_temp_free_i64(tcg_double);
507 } else {
508 TCGv_i32 tcg_single, tcg_res;
509 tcg_single = tcg_temp_new_i32();
510 tcg_res = tcg_temp_new_i32();
511 neon_load_reg32(tcg_single, rm);
512 if (is_signed) {
513 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
514 } else {
515 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
517 neon_store_reg32(tcg_res, rd);
518 tcg_temp_free_i32(tcg_res);
519 tcg_temp_free_i32(tcg_single);
522 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
523 tcg_temp_free_i32(tcg_rmode);
525 tcg_temp_free_i32(tcg_shift);
527 tcg_temp_free_ptr(fpst);
529 return true;
532 static bool trans_VMOV_to_gp(DisasContext *s, arg_VMOV_to_gp *a)
534 /* VMOV scalar to general purpose register */
535 TCGv_i32 tmp;
536 int pass;
537 uint32_t offset;
539 /* UNDEF accesses to D16-D31 if they don't exist */
540 if (!dc_isar_feature(aa32_fp_d32, s) && (a->vn & 0x10)) {
541 return false;
544 offset = a->index << a->size;
545 pass = extract32(offset, 2, 1);
546 offset = extract32(offset, 0, 2) * 8;
548 if (a->size != 2 && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
549 return false;
552 if (!vfp_access_check(s)) {
553 return true;
556 tmp = neon_load_reg(a->vn, pass);
557 switch (a->size) {
558 case 0:
559 if (offset) {
560 tcg_gen_shri_i32(tmp, tmp, offset);
562 if (a->u) {
563 gen_uxtb(tmp);
564 } else {
565 gen_sxtb(tmp);
567 break;
568 case 1:
569 if (a->u) {
570 if (offset) {
571 tcg_gen_shri_i32(tmp, tmp, 16);
572 } else {
573 gen_uxth(tmp);
575 } else {
576 if (offset) {
577 tcg_gen_sari_i32(tmp, tmp, 16);
578 } else {
579 gen_sxth(tmp);
582 break;
583 case 2:
584 break;
586 store_reg(s, a->rt, tmp);
588 return true;
591 static bool trans_VMOV_from_gp(DisasContext *s, arg_VMOV_from_gp *a)
593 /* VMOV general purpose register to scalar */
594 TCGv_i32 tmp, tmp2;
595 int pass;
596 uint32_t offset;
598 /* UNDEF accesses to D16-D31 if they don't exist */
599 if (!dc_isar_feature(aa32_fp_d32, s) && (a->vn & 0x10)) {
600 return false;
603 offset = a->index << a->size;
604 pass = extract32(offset, 2, 1);
605 offset = extract32(offset, 0, 2) * 8;
607 if (a->size != 2 && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
608 return false;
611 if (!vfp_access_check(s)) {
612 return true;
615 tmp = load_reg(s, a->rt);
616 switch (a->size) {
617 case 0:
618 tmp2 = neon_load_reg(a->vn, pass);
619 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
620 tcg_temp_free_i32(tmp2);
621 break;
622 case 1:
623 tmp2 = neon_load_reg(a->vn, pass);
624 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
625 tcg_temp_free_i32(tmp2);
626 break;
627 case 2:
628 break;
630 neon_store_reg(a->vn, pass, tmp);
632 return true;
635 static bool trans_VDUP(DisasContext *s, arg_VDUP *a)
637 /* VDUP (general purpose register) */
638 TCGv_i32 tmp;
639 int size, vec_size;
641 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
642 return false;
645 /* UNDEF accesses to D16-D31 if they don't exist */
646 if (!dc_isar_feature(aa32_fp_d32, s) && (a->vn & 0x10)) {
647 return false;
650 if (a->b && a->e) {
651 return false;
654 if (a->q && (a->vn & 1)) {
655 return false;
658 vec_size = a->q ? 16 : 8;
659 if (a->b) {
660 size = 0;
661 } else if (a->e) {
662 size = 1;
663 } else {
664 size = 2;
667 if (!vfp_access_check(s)) {
668 return true;
671 tmp = load_reg(s, a->rt);
672 tcg_gen_gvec_dup_i32(size, neon_reg_offset(a->vn, 0),
673 vec_size, vec_size, tmp);
674 tcg_temp_free_i32(tmp);
676 return true;
679 static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a)
681 TCGv_i32 tmp;
682 bool ignore_vfp_enabled = false;
684 if (arm_dc_feature(s, ARM_FEATURE_M)) {
686 * The only M-profile VFP vmrs/vmsr sysreg is FPSCR.
687 * Writes to R15 are UNPREDICTABLE; we choose to undef.
689 if (a->rt == 15 || a->reg != ARM_VFP_FPSCR) {
690 return false;
694 switch (a->reg) {
695 case ARM_VFP_FPSID:
697 * VFPv2 allows access to FPSID from userspace; VFPv3 restricts
698 * all ID registers to privileged access only.
700 if (IS_USER(s) && arm_dc_feature(s, ARM_FEATURE_VFP3)) {
701 return false;
703 ignore_vfp_enabled = true;
704 break;
705 case ARM_VFP_MVFR0:
706 case ARM_VFP_MVFR1:
707 if (IS_USER(s) || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
708 return false;
710 ignore_vfp_enabled = true;
711 break;
712 case ARM_VFP_MVFR2:
713 if (IS_USER(s) || !arm_dc_feature(s, ARM_FEATURE_V8)) {
714 return false;
716 ignore_vfp_enabled = true;
717 break;
718 case ARM_VFP_FPSCR:
719 break;
720 case ARM_VFP_FPEXC:
721 if (IS_USER(s)) {
722 return false;
724 ignore_vfp_enabled = true;
725 break;
726 case ARM_VFP_FPINST:
727 case ARM_VFP_FPINST2:
728 /* Not present in VFPv3 */
729 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_VFP3)) {
730 return false;
732 break;
733 default:
734 return false;
737 if (!full_vfp_access_check(s, ignore_vfp_enabled)) {
738 return true;
741 if (a->l) {
742 /* VMRS, move VFP special register to gp register */
743 switch (a->reg) {
744 case ARM_VFP_FPSID:
745 case ARM_VFP_FPEXC:
746 case ARM_VFP_FPINST:
747 case ARM_VFP_FPINST2:
748 case ARM_VFP_MVFR0:
749 case ARM_VFP_MVFR1:
750 case ARM_VFP_MVFR2:
751 tmp = load_cpu_field(vfp.xregs[a->reg]);
752 break;
753 case ARM_VFP_FPSCR:
754 if (a->rt == 15) {
755 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
756 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
757 } else {
758 tmp = tcg_temp_new_i32();
759 gen_helper_vfp_get_fpscr(tmp, cpu_env);
761 break;
762 default:
763 g_assert_not_reached();
766 if (a->rt == 15) {
767 /* Set the 4 flag bits in the CPSR. */
768 gen_set_nzcv(tmp);
769 tcg_temp_free_i32(tmp);
770 } else {
771 store_reg(s, a->rt, tmp);
773 } else {
774 /* VMSR, move gp register to VFP special register */
775 switch (a->reg) {
776 case ARM_VFP_FPSID:
777 case ARM_VFP_MVFR0:
778 case ARM_VFP_MVFR1:
779 case ARM_VFP_MVFR2:
780 /* Writes are ignored. */
781 break;
782 case ARM_VFP_FPSCR:
783 tmp = load_reg(s, a->rt);
784 gen_helper_vfp_set_fpscr(cpu_env, tmp);
785 tcg_temp_free_i32(tmp);
786 gen_lookup_tb(s);
787 break;
788 case ARM_VFP_FPEXC:
790 * TODO: VFP subarchitecture support.
791 * For now, keep the EN bit only
793 tmp = load_reg(s, a->rt);
794 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
795 store_cpu_field(tmp, vfp.xregs[a->reg]);
796 gen_lookup_tb(s);
797 break;
798 case ARM_VFP_FPINST:
799 case ARM_VFP_FPINST2:
800 tmp = load_reg(s, a->rt);
801 store_cpu_field(tmp, vfp.xregs[a->reg]);
802 break;
803 default:
804 g_assert_not_reached();
808 return true;
811 static bool trans_VMOV_single(DisasContext *s, arg_VMOV_single *a)
813 TCGv_i32 tmp;
815 if (!vfp_access_check(s)) {
816 return true;
819 if (a->l) {
820 /* VFP to general purpose register */
821 tmp = tcg_temp_new_i32();
822 neon_load_reg32(tmp, a->vn);
823 if (a->rt == 15) {
824 /* Set the 4 flag bits in the CPSR. */
825 gen_set_nzcv(tmp);
826 tcg_temp_free_i32(tmp);
827 } else {
828 store_reg(s, a->rt, tmp);
830 } else {
831 /* general purpose register to VFP */
832 tmp = load_reg(s, a->rt);
833 neon_store_reg32(tmp, a->vn);
834 tcg_temp_free_i32(tmp);
837 return true;
840 static bool trans_VMOV_64_sp(DisasContext *s, arg_VMOV_64_sp *a)
842 TCGv_i32 tmp;
845 * VMOV between two general-purpose registers and two single precision
846 * floating point registers
848 if (!vfp_access_check(s)) {
849 return true;
852 if (a->op) {
853 /* fpreg to gpreg */
854 tmp = tcg_temp_new_i32();
855 neon_load_reg32(tmp, a->vm);
856 store_reg(s, a->rt, tmp);
857 tmp = tcg_temp_new_i32();
858 neon_load_reg32(tmp, a->vm + 1);
859 store_reg(s, a->rt2, tmp);
860 } else {
861 /* gpreg to fpreg */
862 tmp = load_reg(s, a->rt);
863 neon_store_reg32(tmp, a->vm);
864 tmp = load_reg(s, a->rt2);
865 neon_store_reg32(tmp, a->vm + 1);
868 return true;
871 static bool trans_VMOV_64_dp(DisasContext *s, arg_VMOV_64_sp *a)
873 TCGv_i32 tmp;
876 * VMOV between two general-purpose registers and one double precision
877 * floating point register
880 /* UNDEF accesses to D16-D31 if they don't exist */
881 if (!dc_isar_feature(aa32_fp_d32, s) && (a->vm & 0x10)) {
882 return false;
885 if (!vfp_access_check(s)) {
886 return true;
889 if (a->op) {
890 /* fpreg to gpreg */
891 tmp = tcg_temp_new_i32();
892 neon_load_reg32(tmp, a->vm * 2);
893 store_reg(s, a->rt, tmp);
894 tmp = tcg_temp_new_i32();
895 neon_load_reg32(tmp, a->vm * 2 + 1);
896 store_reg(s, a->rt2, tmp);
897 } else {
898 /* gpreg to fpreg */
899 tmp = load_reg(s, a->rt);
900 neon_store_reg32(tmp, a->vm * 2);
901 tcg_temp_free_i32(tmp);
902 tmp = load_reg(s, a->rt2);
903 neon_store_reg32(tmp, a->vm * 2 + 1);
904 tcg_temp_free_i32(tmp);
907 return true;
910 static bool trans_VLDR_VSTR_sp(DisasContext *s, arg_VLDR_VSTR_sp *a)
912 uint32_t offset;
913 TCGv_i32 addr, tmp;
915 if (!vfp_access_check(s)) {
916 return true;
919 offset = a->imm << 2;
920 if (!a->u) {
921 offset = -offset;
924 if (s->thumb && a->rn == 15) {
925 /* This is actually UNPREDICTABLE */
926 addr = tcg_temp_new_i32();
927 tcg_gen_movi_i32(addr, s->pc & ~2);
928 } else {
929 addr = load_reg(s, a->rn);
931 tcg_gen_addi_i32(addr, addr, offset);
932 tmp = tcg_temp_new_i32();
933 if (a->l) {
934 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
935 neon_store_reg32(tmp, a->vd);
936 } else {
937 neon_load_reg32(tmp, a->vd);
938 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
940 tcg_temp_free_i32(tmp);
941 tcg_temp_free_i32(addr);
943 return true;
946 static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_sp *a)
948 uint32_t offset;
949 TCGv_i32 addr;
950 TCGv_i64 tmp;
952 /* UNDEF accesses to D16-D31 if they don't exist */
953 if (!dc_isar_feature(aa32_fp_d32, s) && (a->vd & 0x10)) {
954 return false;
957 if (!vfp_access_check(s)) {
958 return true;
961 offset = a->imm << 2;
962 if (!a->u) {
963 offset = -offset;
966 if (s->thumb && a->rn == 15) {
967 /* This is actually UNPREDICTABLE */
968 addr = tcg_temp_new_i32();
969 tcg_gen_movi_i32(addr, s->pc & ~2);
970 } else {
971 addr = load_reg(s, a->rn);
973 tcg_gen_addi_i32(addr, addr, offset);
974 tmp = tcg_temp_new_i64();
975 if (a->l) {
976 gen_aa32_ld64(s, tmp, addr, get_mem_index(s));
977 neon_store_reg64(tmp, a->vd);
978 } else {
979 neon_load_reg64(tmp, a->vd);
980 gen_aa32_st64(s, tmp, addr, get_mem_index(s));
982 tcg_temp_free_i64(tmp);
983 tcg_temp_free_i32(addr);
985 return true;
988 static bool trans_VLDM_VSTM_sp(DisasContext *s, arg_VLDM_VSTM_sp *a)
990 uint32_t offset;
991 TCGv_i32 addr, tmp;
992 int i, n;
994 n = a->imm;
996 if (n == 0 || (a->vd + n) > 32) {
998 * UNPREDICTABLE cases for bad immediates: we choose to
999 * UNDEF to avoid generating huge numbers of TCG ops
1001 return false;
1003 if (a->rn == 15 && a->w) {
1004 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
1005 return false;
1008 if (!vfp_access_check(s)) {
1009 return true;
1012 if (s->thumb && a->rn == 15) {
1013 /* This is actually UNPREDICTABLE */
1014 addr = tcg_temp_new_i32();
1015 tcg_gen_movi_i32(addr, s->pc & ~2);
1016 } else {
1017 addr = load_reg(s, a->rn);
1019 if (a->p) {
1020 /* pre-decrement */
1021 tcg_gen_addi_i32(addr, addr, -(a->imm << 2));
1024 if (s->v8m_stackcheck && a->rn == 13 && a->w) {
1026 * Here 'addr' is the lowest address we will store to,
1027 * and is either the old SP (if post-increment) or
1028 * the new SP (if pre-decrement). For post-increment
1029 * where the old value is below the limit and the new
1030 * value is above, it is UNKNOWN whether the limit check
1031 * triggers; we choose to trigger.
1033 gen_helper_v8m_stackcheck(cpu_env, addr);
1036 offset = 4;
1037 tmp = tcg_temp_new_i32();
1038 for (i = 0; i < n; i++) {
1039 if (a->l) {
1040 /* load */
1041 gen_aa32_ld32u(s, tmp, addr, get_mem_index(s));
1042 neon_store_reg32(tmp, a->vd + i);
1043 } else {
1044 /* store */
1045 neon_load_reg32(tmp, a->vd + i);
1046 gen_aa32_st32(s, tmp, addr, get_mem_index(s));
1048 tcg_gen_addi_i32(addr, addr, offset);
1050 tcg_temp_free_i32(tmp);
1051 if (a->w) {
1052 /* writeback */
1053 if (a->p) {
1054 offset = -offset * n;
1055 tcg_gen_addi_i32(addr, addr, offset);
1057 store_reg(s, a->rn, addr);
1058 } else {
1059 tcg_temp_free_i32(addr);
1062 return true;
1065 static bool trans_VLDM_VSTM_dp(DisasContext *s, arg_VLDM_VSTM_dp *a)
1067 uint32_t offset;
1068 TCGv_i32 addr;
1069 TCGv_i64 tmp;
1070 int i, n;
1072 n = a->imm >> 1;
1074 if (n == 0 || (a->vd + n) > 32 || n > 16) {
1076 * UNPREDICTABLE cases for bad immediates: we choose to
1077 * UNDEF to avoid generating huge numbers of TCG ops
1079 return false;
1081 if (a->rn == 15 && a->w) {
1082 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
1083 return false;
1086 /* UNDEF accesses to D16-D31 if they don't exist */
1087 if (!dc_isar_feature(aa32_fp_d32, s) && (a->vd + n) > 16) {
1088 return false;
1091 if (!vfp_access_check(s)) {
1092 return true;
1095 if (s->thumb && a->rn == 15) {
1096 /* This is actually UNPREDICTABLE */
1097 addr = tcg_temp_new_i32();
1098 tcg_gen_movi_i32(addr, s->pc & ~2);
1099 } else {
1100 addr = load_reg(s, a->rn);
1102 if (a->p) {
1103 /* pre-decrement */
1104 tcg_gen_addi_i32(addr, addr, -(a->imm << 2));
1107 if (s->v8m_stackcheck && a->rn == 13 && a->w) {
1109 * Here 'addr' is the lowest address we will store to,
1110 * and is either the old SP (if post-increment) or
1111 * the new SP (if pre-decrement). For post-increment
1112 * where the old value is below the limit and the new
1113 * value is above, it is UNKNOWN whether the limit check
1114 * triggers; we choose to trigger.
1116 gen_helper_v8m_stackcheck(cpu_env, addr);
1119 offset = 8;
1120 tmp = tcg_temp_new_i64();
1121 for (i = 0; i < n; i++) {
1122 if (a->l) {
1123 /* load */
1124 gen_aa32_ld64(s, tmp, addr, get_mem_index(s));
1125 neon_store_reg64(tmp, a->vd + i);
1126 } else {
1127 /* store */
1128 neon_load_reg64(tmp, a->vd + i);
1129 gen_aa32_st64(s, tmp, addr, get_mem_index(s));
1131 tcg_gen_addi_i32(addr, addr, offset);
1133 tcg_temp_free_i64(tmp);
1134 if (a->w) {
1135 /* writeback */
1136 if (a->p) {
1137 offset = -offset * n;
1138 } else if (a->imm & 1) {
1139 offset = 4;
1140 } else {
1141 offset = 0;
1144 if (offset != 0) {
1145 tcg_gen_addi_i32(addr, addr, offset);
1147 store_reg(s, a->rn, addr);
1148 } else {
1149 tcg_temp_free_i32(addr);
1152 return true;
1156 * Types for callbacks for do_vfp_3op_sp() and do_vfp_3op_dp().
1157 * The callback should emit code to write a value to vd. If
1158 * do_vfp_3op_{sp,dp}() was passed reads_vd then the TCGv vd
1159 * will contain the old value of the relevant VFP register;
1160 * otherwise it must be written to only.
1162 typedef void VFPGen3OpSPFn(TCGv_i32 vd,
1163 TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst);
1164 typedef void VFPGen3OpDPFn(TCGv_i64 vd,
1165 TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst);
1168 * Types for callbacks for do_vfp_2op_sp() and do_vfp_2op_dp().
1169 * The callback should emit code to write a value to vd (which
1170 * should be written to only).
1172 typedef void VFPGen2OpSPFn(TCGv_i32 vd, TCGv_i32 vm);
1173 typedef void VFPGen2OpDPFn(TCGv_i64 vd, TCGv_i64 vm);
1176 * Return true if the specified S reg is in a scalar bank
1177 * (ie if it is s0..s7)
1179 static inline bool vfp_sreg_is_scalar(int reg)
1181 return (reg & 0x18) == 0;
1185 * Return true if the specified D reg is in a scalar bank
1186 * (ie if it is d0..d3 or d16..d19)
1188 static inline bool vfp_dreg_is_scalar(int reg)
1190 return (reg & 0xc) == 0;
1194 * Advance the S reg number forwards by delta within its bank
1195 * (ie increment the low 3 bits but leave the rest the same)
1197 static inline int vfp_advance_sreg(int reg, int delta)
1199 return ((reg + delta) & 0x7) | (reg & ~0x7);
1203 * Advance the D reg number forwards by delta within its bank
1204 * (ie increment the low 2 bits but leave the rest the same)
1206 static inline int vfp_advance_dreg(int reg, int delta)
1208 return ((reg + delta) & 0x3) | (reg & ~0x3);
1212 * Perform a 3-operand VFP data processing instruction. fn is the
1213 * callback to do the actual operation; this function deals with the
1214 * code to handle looping around for VFP vector processing.
1216 static bool do_vfp_3op_sp(DisasContext *s, VFPGen3OpSPFn *fn,
1217 int vd, int vn, int vm, bool reads_vd)
1219 uint32_t delta_m = 0;
1220 uint32_t delta_d = 0;
1221 int veclen = s->vec_len;
1222 TCGv_i32 f0, f1, fd;
1223 TCGv_ptr fpst;
1225 if (!dc_isar_feature(aa32_fpshvec, s) &&
1226 (veclen != 0 || s->vec_stride != 0)) {
1227 return false;
1230 if (!vfp_access_check(s)) {
1231 return true;
1234 if (veclen > 0) {
1235 /* Figure out what type of vector operation this is. */
1236 if (vfp_sreg_is_scalar(vd)) {
1237 /* scalar */
1238 veclen = 0;
1239 } else {
1240 delta_d = s->vec_stride + 1;
1242 if (vfp_sreg_is_scalar(vm)) {
1243 /* mixed scalar/vector */
1244 delta_m = 0;
1245 } else {
1246 /* vector */
1247 delta_m = delta_d;
1252 f0 = tcg_temp_new_i32();
1253 f1 = tcg_temp_new_i32();
1254 fd = tcg_temp_new_i32();
1255 fpst = get_fpstatus_ptr(0);
1257 neon_load_reg32(f0, vn);
1258 neon_load_reg32(f1, vm);
1260 for (;;) {
1261 if (reads_vd) {
1262 neon_load_reg32(fd, vd);
1264 fn(fd, f0, f1, fpst);
1265 neon_store_reg32(fd, vd);
1267 if (veclen == 0) {
1268 break;
1271 /* Set up the operands for the next iteration */
1272 veclen--;
1273 vd = vfp_advance_sreg(vd, delta_d);
1274 vn = vfp_advance_sreg(vn, delta_d);
1275 neon_load_reg32(f0, vn);
1276 if (delta_m) {
1277 vm = vfp_advance_sreg(vm, delta_m);
1278 neon_load_reg32(f1, vm);
1282 tcg_temp_free_i32(f0);
1283 tcg_temp_free_i32(f1);
1284 tcg_temp_free_i32(fd);
1285 tcg_temp_free_ptr(fpst);
1287 return true;
1290 static bool do_vfp_3op_dp(DisasContext *s, VFPGen3OpDPFn *fn,
1291 int vd, int vn, int vm, bool reads_vd)
1293 uint32_t delta_m = 0;
1294 uint32_t delta_d = 0;
1295 int veclen = s->vec_len;
1296 TCGv_i64 f0, f1, fd;
1297 TCGv_ptr fpst;
1299 /* UNDEF accesses to D16-D31 if they don't exist */
1300 if (!dc_isar_feature(aa32_fp_d32, s) && ((vd | vn | vm) & 0x10)) {
1301 return false;
1304 if (!dc_isar_feature(aa32_fpshvec, s) &&
1305 (veclen != 0 || s->vec_stride != 0)) {
1306 return false;
1309 if (!vfp_access_check(s)) {
1310 return true;
1313 if (veclen > 0) {
1314 /* Figure out what type of vector operation this is. */
1315 if (vfp_dreg_is_scalar(vd)) {
1316 /* scalar */
1317 veclen = 0;
1318 } else {
1319 delta_d = (s->vec_stride >> 1) + 1;
1321 if (vfp_dreg_is_scalar(vm)) {
1322 /* mixed scalar/vector */
1323 delta_m = 0;
1324 } else {
1325 /* vector */
1326 delta_m = delta_d;
1331 f0 = tcg_temp_new_i64();
1332 f1 = tcg_temp_new_i64();
1333 fd = tcg_temp_new_i64();
1334 fpst = get_fpstatus_ptr(0);
1336 neon_load_reg64(f0, vn);
1337 neon_load_reg64(f1, vm);
1339 for (;;) {
1340 if (reads_vd) {
1341 neon_load_reg64(fd, vd);
1343 fn(fd, f0, f1, fpst);
1344 neon_store_reg64(fd, vd);
1346 if (veclen == 0) {
1347 break;
1349 /* Set up the operands for the next iteration */
1350 veclen--;
1351 vd = vfp_advance_dreg(vd, delta_d);
1352 vn = vfp_advance_dreg(vn, delta_d);
1353 neon_load_reg64(f0, vn);
1354 if (delta_m) {
1355 vm = vfp_advance_dreg(vm, delta_m);
1356 neon_load_reg64(f1, vm);
1360 tcg_temp_free_i64(f0);
1361 tcg_temp_free_i64(f1);
1362 tcg_temp_free_i64(fd);
1363 tcg_temp_free_ptr(fpst);
1365 return true;
1368 static bool do_vfp_2op_sp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm)
1370 uint32_t delta_m = 0;
1371 uint32_t delta_d = 0;
1372 int veclen = s->vec_len;
1373 TCGv_i32 f0, fd;
1375 if (!dc_isar_feature(aa32_fpshvec, s) &&
1376 (veclen != 0 || s->vec_stride != 0)) {
1377 return false;
1380 if (!vfp_access_check(s)) {
1381 return true;
1384 if (veclen > 0) {
1385 /* Figure out what type of vector operation this is. */
1386 if (vfp_sreg_is_scalar(vd)) {
1387 /* scalar */
1388 veclen = 0;
1389 } else {
1390 delta_d = s->vec_stride + 1;
1392 if (vfp_sreg_is_scalar(vm)) {
1393 /* mixed scalar/vector */
1394 delta_m = 0;
1395 } else {
1396 /* vector */
1397 delta_m = delta_d;
1402 f0 = tcg_temp_new_i32();
1403 fd = tcg_temp_new_i32();
1405 neon_load_reg32(f0, vm);
1407 for (;;) {
1408 fn(fd, f0);
1409 neon_store_reg32(fd, vd);
1411 if (veclen == 0) {
1412 break;
1415 if (delta_m == 0) {
1416 /* single source one-many */
1417 while (veclen--) {
1418 vd = vfp_advance_sreg(vd, delta_d);
1419 neon_store_reg32(fd, vd);
1421 break;
1424 /* Set up the operands for the next iteration */
1425 veclen--;
1426 vd = vfp_advance_sreg(vd, delta_d);
1427 vm = vfp_advance_sreg(vm, delta_m);
1428 neon_load_reg32(f0, vm);
1431 tcg_temp_free_i32(f0);
1432 tcg_temp_free_i32(fd);
1434 return true;
1437 static bool do_vfp_2op_dp(DisasContext *s, VFPGen2OpDPFn *fn, int vd, int vm)
1439 uint32_t delta_m = 0;
1440 uint32_t delta_d = 0;
1441 int veclen = s->vec_len;
1442 TCGv_i64 f0, fd;
1444 /* UNDEF accesses to D16-D31 if they don't exist */
1445 if (!dc_isar_feature(aa32_fp_d32, s) && ((vd | vm) & 0x10)) {
1446 return false;
1449 if (!dc_isar_feature(aa32_fpshvec, s) &&
1450 (veclen != 0 || s->vec_stride != 0)) {
1451 return false;
1454 if (!vfp_access_check(s)) {
1455 return true;
1458 if (veclen > 0) {
1459 /* Figure out what type of vector operation this is. */
1460 if (vfp_dreg_is_scalar(vd)) {
1461 /* scalar */
1462 veclen = 0;
1463 } else {
1464 delta_d = (s->vec_stride >> 1) + 1;
1466 if (vfp_dreg_is_scalar(vm)) {
1467 /* mixed scalar/vector */
1468 delta_m = 0;
1469 } else {
1470 /* vector */
1471 delta_m = delta_d;
1476 f0 = tcg_temp_new_i64();
1477 fd = tcg_temp_new_i64();
1479 neon_load_reg64(f0, vm);
1481 for (;;) {
1482 fn(fd, f0);
1483 neon_store_reg64(fd, vd);
1485 if (veclen == 0) {
1486 break;
1489 if (delta_m == 0) {
1490 /* single source one-many */
1491 while (veclen--) {
1492 vd = vfp_advance_dreg(vd, delta_d);
1493 neon_store_reg64(fd, vd);
1495 break;
1498 /* Set up the operands for the next iteration */
1499 veclen--;
1500 vd = vfp_advance_dreg(vd, delta_d);
1501 vd = vfp_advance_dreg(vm, delta_m);
1502 neon_load_reg64(f0, vm);
1505 tcg_temp_free_i64(f0);
1506 tcg_temp_free_i64(fd);
1508 return true;
1511 static void gen_VMLA_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
1513 /* Note that order of inputs to the add matters for NaNs */
1514 TCGv_i32 tmp = tcg_temp_new_i32();
1516 gen_helper_vfp_muls(tmp, vn, vm, fpst);
1517 gen_helper_vfp_adds(vd, vd, tmp, fpst);
1518 tcg_temp_free_i32(tmp);
1521 static bool trans_VMLA_sp(DisasContext *s, arg_VMLA_sp *a)
1523 return do_vfp_3op_sp(s, gen_VMLA_sp, a->vd, a->vn, a->vm, true);
1526 static void gen_VMLA_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
1528 /* Note that order of inputs to the add matters for NaNs */
1529 TCGv_i64 tmp = tcg_temp_new_i64();
1531 gen_helper_vfp_muld(tmp, vn, vm, fpst);
1532 gen_helper_vfp_addd(vd, vd, tmp, fpst);
1533 tcg_temp_free_i64(tmp);
1536 static bool trans_VMLA_dp(DisasContext *s, arg_VMLA_sp *a)
1538 return do_vfp_3op_dp(s, gen_VMLA_dp, a->vd, a->vn, a->vm, true);
1541 static void gen_VMLS_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
1544 * VMLS: vd = vd + -(vn * vm)
1545 * Note that order of inputs to the add matters for NaNs.
1547 TCGv_i32 tmp = tcg_temp_new_i32();
1549 gen_helper_vfp_muls(tmp, vn, vm, fpst);
1550 gen_helper_vfp_negs(tmp, tmp);
1551 gen_helper_vfp_adds(vd, vd, tmp, fpst);
1552 tcg_temp_free_i32(tmp);
1555 static bool trans_VMLS_sp(DisasContext *s, arg_VMLS_sp *a)
1557 return do_vfp_3op_sp(s, gen_VMLS_sp, a->vd, a->vn, a->vm, true);
1560 static void gen_VMLS_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
1563 * VMLS: vd = vd + -(vn * vm)
1564 * Note that order of inputs to the add matters for NaNs.
1566 TCGv_i64 tmp = tcg_temp_new_i64();
1568 gen_helper_vfp_muld(tmp, vn, vm, fpst);
1569 gen_helper_vfp_negd(tmp, tmp);
1570 gen_helper_vfp_addd(vd, vd, tmp, fpst);
1571 tcg_temp_free_i64(tmp);
1574 static bool trans_VMLS_dp(DisasContext *s, arg_VMLS_sp *a)
1576 return do_vfp_3op_dp(s, gen_VMLS_dp, a->vd, a->vn, a->vm, true);
1579 static void gen_VNMLS_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
1582 * VNMLS: -fd + (fn * fm)
1583 * Note that it isn't valid to replace (-A + B) with (B - A) or similar
1584 * plausible looking simplifications because this will give wrong results
1585 * for NaNs.
1587 TCGv_i32 tmp = tcg_temp_new_i32();
1589 gen_helper_vfp_muls(tmp, vn, vm, fpst);
1590 gen_helper_vfp_negs(vd, vd);
1591 gen_helper_vfp_adds(vd, vd, tmp, fpst);
1592 tcg_temp_free_i32(tmp);
1595 static bool trans_VNMLS_sp(DisasContext *s, arg_VNMLS_sp *a)
1597 return do_vfp_3op_sp(s, gen_VNMLS_sp, a->vd, a->vn, a->vm, true);
1600 static void gen_VNMLS_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
1603 * VNMLS: -fd + (fn * fm)
1604 * Note that it isn't valid to replace (-A + B) with (B - A) or similar
1605 * plausible looking simplifications because this will give wrong results
1606 * for NaNs.
1608 TCGv_i64 tmp = tcg_temp_new_i64();
1610 gen_helper_vfp_muld(tmp, vn, vm, fpst);
1611 gen_helper_vfp_negd(vd, vd);
1612 gen_helper_vfp_addd(vd, vd, tmp, fpst);
1613 tcg_temp_free_i64(tmp);
1616 static bool trans_VNMLS_dp(DisasContext *s, arg_VNMLS_sp *a)
1618 return do_vfp_3op_dp(s, gen_VNMLS_dp, a->vd, a->vn, a->vm, true);
1621 static void gen_VNMLA_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
1623 /* VNMLA: -fd + -(fn * fm) */
1624 TCGv_i32 tmp = tcg_temp_new_i32();
1626 gen_helper_vfp_muls(tmp, vn, vm, fpst);
1627 gen_helper_vfp_negs(tmp, tmp);
1628 gen_helper_vfp_negs(vd, vd);
1629 gen_helper_vfp_adds(vd, vd, tmp, fpst);
1630 tcg_temp_free_i32(tmp);
1633 static bool trans_VNMLA_sp(DisasContext *s, arg_VNMLA_sp *a)
1635 return do_vfp_3op_sp(s, gen_VNMLA_sp, a->vd, a->vn, a->vm, true);
1638 static void gen_VNMLA_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
1640 /* VNMLA: -fd + (fn * fm) */
1641 TCGv_i64 tmp = tcg_temp_new_i64();
1643 gen_helper_vfp_muld(tmp, vn, vm, fpst);
1644 gen_helper_vfp_negd(tmp, tmp);
1645 gen_helper_vfp_negd(vd, vd);
1646 gen_helper_vfp_addd(vd, vd, tmp, fpst);
1647 tcg_temp_free_i64(tmp);
1650 static bool trans_VNMLA_dp(DisasContext *s, arg_VNMLA_sp *a)
1652 return do_vfp_3op_dp(s, gen_VNMLA_dp, a->vd, a->vn, a->vm, true);
1655 static bool trans_VMUL_sp(DisasContext *s, arg_VMUL_sp *a)
1657 return do_vfp_3op_sp(s, gen_helper_vfp_muls, a->vd, a->vn, a->vm, false);
1660 static bool trans_VMUL_dp(DisasContext *s, arg_VMUL_sp *a)
1662 return do_vfp_3op_dp(s, gen_helper_vfp_muld, a->vd, a->vn, a->vm, false);
1665 static void gen_VNMUL_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
1667 /* VNMUL: -(fn * fm) */
1668 gen_helper_vfp_muls(vd, vn, vm, fpst);
1669 gen_helper_vfp_negs(vd, vd);
1672 static bool trans_VNMUL_sp(DisasContext *s, arg_VNMUL_sp *a)
1674 return do_vfp_3op_sp(s, gen_VNMUL_sp, a->vd, a->vn, a->vm, false);
1677 static void gen_VNMUL_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
1679 /* VNMUL: -(fn * fm) */
1680 gen_helper_vfp_muld(vd, vn, vm, fpst);
1681 gen_helper_vfp_negd(vd, vd);
1684 static bool trans_VNMUL_dp(DisasContext *s, arg_VNMUL_sp *a)
1686 return do_vfp_3op_dp(s, gen_VNMUL_dp, a->vd, a->vn, a->vm, false);
1689 static bool trans_VADD_sp(DisasContext *s, arg_VADD_sp *a)
1691 return do_vfp_3op_sp(s, gen_helper_vfp_adds, a->vd, a->vn, a->vm, false);
1694 static bool trans_VADD_dp(DisasContext *s, arg_VADD_sp *a)
1696 return do_vfp_3op_dp(s, gen_helper_vfp_addd, a->vd, a->vn, a->vm, false);
1699 static bool trans_VSUB_sp(DisasContext *s, arg_VSUB_sp *a)
1701 return do_vfp_3op_sp(s, gen_helper_vfp_subs, a->vd, a->vn, a->vm, false);
1704 static bool trans_VSUB_dp(DisasContext *s, arg_VSUB_sp *a)
1706 return do_vfp_3op_dp(s, gen_helper_vfp_subd, a->vd, a->vn, a->vm, false);
1709 static bool trans_VDIV_sp(DisasContext *s, arg_VDIV_sp *a)
1711 return do_vfp_3op_sp(s, gen_helper_vfp_divs, a->vd, a->vn, a->vm, false);
1714 static bool trans_VDIV_dp(DisasContext *s, arg_VDIV_sp *a)
1716 return do_vfp_3op_dp(s, gen_helper_vfp_divd, a->vd, a->vn, a->vm, false);
1719 static bool trans_VFM_sp(DisasContext *s, arg_VFM_sp *a)
1722 * VFNMA : fd = muladd(-fd, fn, fm)
1723 * VFNMS : fd = muladd(-fd, -fn, fm)
1724 * VFMA : fd = muladd( fd, fn, fm)
1725 * VFMS : fd = muladd( fd, -fn, fm)
1727 * These are fused multiply-add, and must be done as one floating
1728 * point operation with no rounding between the multiplication and
1729 * addition steps. NB that doing the negations here as separate
1730 * steps is correct : an input NaN should come out with its sign
1731 * bit flipped if it is a negated-input.
1733 TCGv_ptr fpst;
1734 TCGv_i32 vn, vm, vd;
1737 * Present in VFPv4 only.
1738 * In v7A, UNPREDICTABLE with non-zero vector length/stride; from
1739 * v8A, must UNDEF. We choose to UNDEF for both v7A and v8A.
1741 if (!arm_dc_feature(s, ARM_FEATURE_VFP4) ||
1742 (s->vec_len != 0 || s->vec_stride != 0)) {
1743 return false;
1746 if (!vfp_access_check(s)) {
1747 return true;
1750 vn = tcg_temp_new_i32();
1751 vm = tcg_temp_new_i32();
1752 vd = tcg_temp_new_i32();
1754 neon_load_reg32(vn, a->vn);
1755 neon_load_reg32(vm, a->vm);
1756 if (a->o2) {
1757 /* VFNMS, VFMS */
1758 gen_helper_vfp_negs(vn, vn);
1760 neon_load_reg32(vd, a->vd);
1761 if (a->o1 & 1) {
1762 /* VFNMA, VFNMS */
1763 gen_helper_vfp_negs(vd, vd);
1765 fpst = get_fpstatus_ptr(0);
1766 gen_helper_vfp_muladds(vd, vn, vm, vd, fpst);
1767 neon_store_reg32(vd, a->vd);
1769 tcg_temp_free_ptr(fpst);
1770 tcg_temp_free_i32(vn);
1771 tcg_temp_free_i32(vm);
1772 tcg_temp_free_i32(vd);
1774 return true;
1777 static bool trans_VFM_dp(DisasContext *s, arg_VFM_sp *a)
1780 * VFNMA : fd = muladd(-fd, fn, fm)
1781 * VFNMS : fd = muladd(-fd, -fn, fm)
1782 * VFMA : fd = muladd( fd, fn, fm)
1783 * VFMS : fd = muladd( fd, -fn, fm)
1785 * These are fused multiply-add, and must be done as one floating
1786 * point operation with no rounding between the multiplication and
1787 * addition steps. NB that doing the negations here as separate
1788 * steps is correct : an input NaN should come out with its sign
1789 * bit flipped if it is a negated-input.
1791 TCGv_ptr fpst;
1792 TCGv_i64 vn, vm, vd;
1795 * Present in VFPv4 only.
1796 * In v7A, UNPREDICTABLE with non-zero vector length/stride; from
1797 * v8A, must UNDEF. We choose to UNDEF for both v7A and v8A.
1799 if (!arm_dc_feature(s, ARM_FEATURE_VFP4) ||
1800 (s->vec_len != 0 || s->vec_stride != 0)) {
1801 return false;
1804 /* UNDEF accesses to D16-D31 if they don't exist. */
1805 if (!dc_isar_feature(aa32_fp_d32, s) && ((a->vd | a->vn | a->vm) & 0x10)) {
1806 return false;
1809 if (!vfp_access_check(s)) {
1810 return true;
1813 vn = tcg_temp_new_i64();
1814 vm = tcg_temp_new_i64();
1815 vd = tcg_temp_new_i64();
1817 neon_load_reg64(vn, a->vn);
1818 neon_load_reg64(vm, a->vm);
1819 if (a->o2) {
1820 /* VFNMS, VFMS */
1821 gen_helper_vfp_negd(vn, vn);
1823 neon_load_reg64(vd, a->vd);
1824 if (a->o1 & 1) {
1825 /* VFNMA, VFNMS */
1826 gen_helper_vfp_negd(vd, vd);
1828 fpst = get_fpstatus_ptr(0);
1829 gen_helper_vfp_muladdd(vd, vn, vm, vd, fpst);
1830 neon_store_reg64(vd, a->vd);
1832 tcg_temp_free_ptr(fpst);
1833 tcg_temp_free_i64(vn);
1834 tcg_temp_free_i64(vm);
1835 tcg_temp_free_i64(vd);
1837 return true;
1840 static bool trans_VMOV_imm_sp(DisasContext *s, arg_VMOV_imm_sp *a)
1842 uint32_t delta_d = 0;
1843 int veclen = s->vec_len;
1844 TCGv_i32 fd;
1845 uint32_t vd;
1847 vd = a->vd;
1849 if (!dc_isar_feature(aa32_fpshvec, s) &&
1850 (veclen != 0 || s->vec_stride != 0)) {
1851 return false;
1854 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
1855 return false;
1858 if (!vfp_access_check(s)) {
1859 return true;
1862 if (veclen > 0) {
1863 /* Figure out what type of vector operation this is. */
1864 if (vfp_sreg_is_scalar(vd)) {
1865 /* scalar */
1866 veclen = 0;
1867 } else {
1868 delta_d = s->vec_stride + 1;
1872 fd = tcg_const_i32(vfp_expand_imm(MO_32, a->imm));
1874 for (;;) {
1875 neon_store_reg32(fd, vd);
1877 if (veclen == 0) {
1878 break;
1881 /* Set up the operands for the next iteration */
1882 veclen--;
1883 vd = vfp_advance_sreg(vd, delta_d);
1886 tcg_temp_free_i32(fd);
1887 return true;
1890 static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a)
1892 uint32_t delta_d = 0;
1893 int veclen = s->vec_len;
1894 TCGv_i64 fd;
1895 uint32_t vd;
1897 vd = a->vd;
1899 /* UNDEF accesses to D16-D31 if they don't exist. */
1900 if (!dc_isar_feature(aa32_fp_d32, s) && (vd & 0x10)) {
1901 return false;
1904 if (!dc_isar_feature(aa32_fpshvec, s) &&
1905 (veclen != 0 || s->vec_stride != 0)) {
1906 return false;
1909 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
1910 return false;
1913 if (!vfp_access_check(s)) {
1914 return true;
1917 if (veclen > 0) {
1918 /* Figure out what type of vector operation this is. */
1919 if (vfp_dreg_is_scalar(vd)) {
1920 /* scalar */
1921 veclen = 0;
1922 } else {
1923 delta_d = (s->vec_stride >> 1) + 1;
1927 fd = tcg_const_i64(vfp_expand_imm(MO_64, a->imm));
1929 for (;;) {
1930 neon_store_reg64(fd, vd);
1932 if (veclen == 0) {
1933 break;
1936 /* Set up the operands for the next iteration */
1937 veclen--;
1938 vfp_advance_dreg(vd, delta_d);
1941 tcg_temp_free_i64(fd);
1942 return true;
1945 static bool trans_VMOV_reg_sp(DisasContext *s, arg_VMOV_reg_sp *a)
1947 return do_vfp_2op_sp(s, tcg_gen_mov_i32, a->vd, a->vm);
1950 static bool trans_VMOV_reg_dp(DisasContext *s, arg_VMOV_reg_dp *a)
1952 return do_vfp_2op_dp(s, tcg_gen_mov_i64, a->vd, a->vm);
1955 static bool trans_VABS_sp(DisasContext *s, arg_VABS_sp *a)
1957 return do_vfp_2op_sp(s, gen_helper_vfp_abss, a->vd, a->vm);
1960 static bool trans_VABS_dp(DisasContext *s, arg_VABS_dp *a)
1962 return do_vfp_2op_dp(s, gen_helper_vfp_absd, a->vd, a->vm);
1965 static bool trans_VNEG_sp(DisasContext *s, arg_VNEG_sp *a)
1967 return do_vfp_2op_sp(s, gen_helper_vfp_negs, a->vd, a->vm);
1970 static bool trans_VNEG_dp(DisasContext *s, arg_VNEG_dp *a)
1972 return do_vfp_2op_dp(s, gen_helper_vfp_negd, a->vd, a->vm);
1975 static void gen_VSQRT_sp(TCGv_i32 vd, TCGv_i32 vm)
1977 gen_helper_vfp_sqrts(vd, vm, cpu_env);
1980 static bool trans_VSQRT_sp(DisasContext *s, arg_VSQRT_sp *a)
1982 return do_vfp_2op_sp(s, gen_VSQRT_sp, a->vd, a->vm);
1985 static void gen_VSQRT_dp(TCGv_i64 vd, TCGv_i64 vm)
1987 gen_helper_vfp_sqrtd(vd, vm, cpu_env);
1990 static bool trans_VSQRT_dp(DisasContext *s, arg_VSQRT_dp *a)
1992 return do_vfp_2op_dp(s, gen_VSQRT_dp, a->vd, a->vm);
1995 static bool trans_VCMP_sp(DisasContext *s, arg_VCMP_sp *a)
1997 TCGv_i32 vd, vm;
1999 /* Vm/M bits must be zero for the Z variant */
2000 if (a->z && a->vm != 0) {
2001 return false;
2004 if (!vfp_access_check(s)) {
2005 return true;
2008 vd = tcg_temp_new_i32();
2009 vm = tcg_temp_new_i32();
2011 neon_load_reg32(vd, a->vd);
2012 if (a->z) {
2013 tcg_gen_movi_i32(vm, 0);
2014 } else {
2015 neon_load_reg32(vm, a->vm);
2018 if (a->e) {
2019 gen_helper_vfp_cmpes(vd, vm, cpu_env);
2020 } else {
2021 gen_helper_vfp_cmps(vd, vm, cpu_env);
2024 tcg_temp_free_i32(vd);
2025 tcg_temp_free_i32(vm);
2027 return true;
2030 static bool trans_VCMP_dp(DisasContext *s, arg_VCMP_dp *a)
2032 TCGv_i64 vd, vm;
2034 /* Vm/M bits must be zero for the Z variant */
2035 if (a->z && a->vm != 0) {
2036 return false;
2039 /* UNDEF accesses to D16-D31 if they don't exist. */
2040 if (!dc_isar_feature(aa32_fp_d32, s) && ((a->vd | a->vm) & 0x10)) {
2041 return false;
2044 if (!vfp_access_check(s)) {
2045 return true;
2048 vd = tcg_temp_new_i64();
2049 vm = tcg_temp_new_i64();
2051 neon_load_reg64(vd, a->vd);
2052 if (a->z) {
2053 tcg_gen_movi_i64(vm, 0);
2054 } else {
2055 neon_load_reg64(vm, a->vm);
2058 if (a->e) {
2059 gen_helper_vfp_cmped(vd, vm, cpu_env);
2060 } else {
2061 gen_helper_vfp_cmpd(vd, vm, cpu_env);
2064 tcg_temp_free_i64(vd);
2065 tcg_temp_free_i64(vm);
2067 return true;
2070 static bool trans_VCVT_f32_f16(DisasContext *s, arg_VCVT_f32_f16 *a)
2072 TCGv_ptr fpst;
2073 TCGv_i32 ahp_mode;
2074 TCGv_i32 tmp;
2076 if (!dc_isar_feature(aa32_fp16_spconv, s)) {
2077 return false;
2080 if (!vfp_access_check(s)) {
2081 return true;
2084 fpst = get_fpstatus_ptr(false);
2085 ahp_mode = get_ahp_flag();
2086 tmp = tcg_temp_new_i32();
2087 /* The T bit tells us if we want the low or high 16 bits of Vm */
2088 tcg_gen_ld16u_i32(tmp, cpu_env, vfp_f16_offset(a->vm, a->t));
2089 gen_helper_vfp_fcvt_f16_to_f32(tmp, tmp, fpst, ahp_mode);
2090 neon_store_reg32(tmp, a->vd);
2091 tcg_temp_free_i32(ahp_mode);
2092 tcg_temp_free_ptr(fpst);
2093 tcg_temp_free_i32(tmp);
2094 return true;
2097 static bool trans_VCVT_f64_f16(DisasContext *s, arg_VCVT_f64_f16 *a)
2099 TCGv_ptr fpst;
2100 TCGv_i32 ahp_mode;
2101 TCGv_i32 tmp;
2102 TCGv_i64 vd;
2104 if (!dc_isar_feature(aa32_fp16_dpconv, s)) {
2105 return false;
2108 /* UNDEF accesses to D16-D31 if they don't exist. */
2109 if (!dc_isar_feature(aa32_fp_d32, s) && (a->vd & 0x10)) {
2110 return false;
2113 if (!vfp_access_check(s)) {
2114 return true;
2117 fpst = get_fpstatus_ptr(false);
2118 ahp_mode = get_ahp_flag();
2119 tmp = tcg_temp_new_i32();
2120 /* The T bit tells us if we want the low or high 16 bits of Vm */
2121 tcg_gen_ld16u_i32(tmp, cpu_env, vfp_f16_offset(a->vm, a->t));
2122 vd = tcg_temp_new_i64();
2123 gen_helper_vfp_fcvt_f16_to_f64(vd, tmp, fpst, ahp_mode);
2124 neon_store_reg64(vd, a->vd);
2125 tcg_temp_free_i32(ahp_mode);
2126 tcg_temp_free_ptr(fpst);
2127 tcg_temp_free_i32(tmp);
2128 tcg_temp_free_i64(vd);
2129 return true;
2132 static bool trans_VCVT_f16_f32(DisasContext *s, arg_VCVT_f16_f32 *a)
2134 TCGv_ptr fpst;
2135 TCGv_i32 ahp_mode;
2136 TCGv_i32 tmp;
2138 if (!dc_isar_feature(aa32_fp16_spconv, s)) {
2139 return false;
2142 if (!vfp_access_check(s)) {
2143 return true;
2146 fpst = get_fpstatus_ptr(false);
2147 ahp_mode = get_ahp_flag();
2148 tmp = tcg_temp_new_i32();
2150 neon_load_reg32(tmp, a->vm);
2151 gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp_mode);
2152 tcg_gen_st16_i32(tmp, cpu_env, vfp_f16_offset(a->vd, a->t));
2153 tcg_temp_free_i32(ahp_mode);
2154 tcg_temp_free_ptr(fpst);
2155 tcg_temp_free_i32(tmp);
2156 return true;
2159 static bool trans_VCVT_f16_f64(DisasContext *s, arg_VCVT_f16_f64 *a)
2161 TCGv_ptr fpst;
2162 TCGv_i32 ahp_mode;
2163 TCGv_i32 tmp;
2164 TCGv_i64 vm;
2166 if (!dc_isar_feature(aa32_fp16_dpconv, s)) {
2167 return false;
2170 /* UNDEF accesses to D16-D31 if they don't exist. */
2171 if (!dc_isar_feature(aa32_fp_d32, s) && (a->vm & 0x10)) {
2172 return false;
2175 if (!vfp_access_check(s)) {
2176 return true;
2179 fpst = get_fpstatus_ptr(false);
2180 ahp_mode = get_ahp_flag();
2181 tmp = tcg_temp_new_i32();
2182 vm = tcg_temp_new_i64();
2184 neon_load_reg64(vm, a->vm);
2185 gen_helper_vfp_fcvt_f64_to_f16(tmp, vm, fpst, ahp_mode);
2186 tcg_temp_free_i64(vm);
2187 tcg_gen_st16_i32(tmp, cpu_env, vfp_f16_offset(a->vd, a->t));
2188 tcg_temp_free_i32(ahp_mode);
2189 tcg_temp_free_ptr(fpst);
2190 tcg_temp_free_i32(tmp);
2191 return true;
2194 static bool trans_VRINTR_sp(DisasContext *s, arg_VRINTR_sp *a)
2196 TCGv_ptr fpst;
2197 TCGv_i32 tmp;
2199 if (!dc_isar_feature(aa32_vrint, s)) {
2200 return false;
2203 if (!vfp_access_check(s)) {
2204 return true;
2207 tmp = tcg_temp_new_i32();
2208 neon_load_reg32(tmp, a->vm);
2209 fpst = get_fpstatus_ptr(false);
2210 gen_helper_rints(tmp, tmp, fpst);
2211 neon_store_reg32(tmp, a->vd);
2212 tcg_temp_free_ptr(fpst);
2213 tcg_temp_free_i32(tmp);
2214 return true;
2217 static bool trans_VRINTR_dp(DisasContext *s, arg_VRINTR_sp *a)
2219 TCGv_ptr fpst;
2220 TCGv_i64 tmp;
2222 if (!dc_isar_feature(aa32_vrint, s)) {
2223 return false;
2226 /* UNDEF accesses to D16-D31 if they don't exist. */
2227 if (!dc_isar_feature(aa32_fp_d32, s) && ((a->vd | a->vm) & 0x10)) {
2228 return false;
2231 if (!vfp_access_check(s)) {
2232 return true;
2235 tmp = tcg_temp_new_i64();
2236 neon_load_reg64(tmp, a->vm);
2237 fpst = get_fpstatus_ptr(false);
2238 gen_helper_rintd(tmp, tmp, fpst);
2239 neon_store_reg64(tmp, a->vd);
2240 tcg_temp_free_ptr(fpst);
2241 tcg_temp_free_i64(tmp);
2242 return true;
2245 static bool trans_VRINTZ_sp(DisasContext *s, arg_VRINTZ_sp *a)
2247 TCGv_ptr fpst;
2248 TCGv_i32 tmp;
2249 TCGv_i32 tcg_rmode;
2251 if (!dc_isar_feature(aa32_vrint, s)) {
2252 return false;
2255 if (!vfp_access_check(s)) {
2256 return true;
2259 tmp = tcg_temp_new_i32();
2260 neon_load_reg32(tmp, a->vm);
2261 fpst = get_fpstatus_ptr(false);
2262 tcg_rmode = tcg_const_i32(float_round_to_zero);
2263 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
2264 gen_helper_rints(tmp, tmp, fpst);
2265 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
2266 neon_store_reg32(tmp, a->vd);
2267 tcg_temp_free_ptr(fpst);
2268 tcg_temp_free_i32(tcg_rmode);
2269 tcg_temp_free_i32(tmp);
2270 return true;
2273 static bool trans_VRINTZ_dp(DisasContext *s, arg_VRINTZ_sp *a)
2275 TCGv_ptr fpst;
2276 TCGv_i64 tmp;
2277 TCGv_i32 tcg_rmode;
2279 if (!dc_isar_feature(aa32_vrint, s)) {
2280 return false;
2283 /* UNDEF accesses to D16-D31 if they don't exist. */
2284 if (!dc_isar_feature(aa32_fp_d32, s) && ((a->vd | a->vm) & 0x10)) {
2285 return false;
2288 if (!vfp_access_check(s)) {
2289 return true;
2292 tmp = tcg_temp_new_i64();
2293 neon_load_reg64(tmp, a->vm);
2294 fpst = get_fpstatus_ptr(false);
2295 tcg_rmode = tcg_const_i32(float_round_to_zero);
2296 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
2297 gen_helper_rintd(tmp, tmp, fpst);
2298 gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
2299 neon_store_reg64(tmp, a->vd);
2300 tcg_temp_free_ptr(fpst);
2301 tcg_temp_free_i64(tmp);
2302 tcg_temp_free_i32(tcg_rmode);
2303 return true;
2306 static bool trans_VRINTX_sp(DisasContext *s, arg_VRINTX_sp *a)
2308 TCGv_ptr fpst;
2309 TCGv_i32 tmp;
2311 if (!dc_isar_feature(aa32_vrint, s)) {
2312 return false;
2315 if (!vfp_access_check(s)) {
2316 return true;
2319 tmp = tcg_temp_new_i32();
2320 neon_load_reg32(tmp, a->vm);
2321 fpst = get_fpstatus_ptr(false);
2322 gen_helper_rints_exact(tmp, tmp, fpst);
2323 neon_store_reg32(tmp, a->vd);
2324 tcg_temp_free_ptr(fpst);
2325 tcg_temp_free_i32(tmp);
2326 return true;
2329 static bool trans_VRINTX_dp(DisasContext *s, arg_VRINTX_dp *a)
2331 TCGv_ptr fpst;
2332 TCGv_i64 tmp;
2334 if (!dc_isar_feature(aa32_vrint, s)) {
2335 return false;
2338 /* UNDEF accesses to D16-D31 if they don't exist. */
2339 if (!dc_isar_feature(aa32_fp_d32, s) && ((a->vd | a->vm) & 0x10)) {
2340 return false;
2343 if (!vfp_access_check(s)) {
2344 return true;
2347 tmp = tcg_temp_new_i64();
2348 neon_load_reg64(tmp, a->vm);
2349 fpst = get_fpstatus_ptr(false);
2350 gen_helper_rintd_exact(tmp, tmp, fpst);
2351 neon_store_reg64(tmp, a->vd);
2352 tcg_temp_free_ptr(fpst);
2353 tcg_temp_free_i64(tmp);
2354 return true;
2357 static bool trans_VCVT_sp(DisasContext *s, arg_VCVT_sp *a)
2359 TCGv_i64 vd;
2360 TCGv_i32 vm;
2362 /* UNDEF accesses to D16-D31 if they don't exist. */
2363 if (!dc_isar_feature(aa32_fp_d32, s) && (a->vd & 0x10)) {
2364 return false;
2367 if (!vfp_access_check(s)) {
2368 return true;
2371 vm = tcg_temp_new_i32();
2372 vd = tcg_temp_new_i64();
2373 neon_load_reg32(vm, a->vm);
2374 gen_helper_vfp_fcvtds(vd, vm, cpu_env);
2375 neon_store_reg64(vd, a->vd);
2376 tcg_temp_free_i32(vm);
2377 tcg_temp_free_i64(vd);
2378 return true;
2381 static bool trans_VCVT_dp(DisasContext *s, arg_VCVT_dp *a)
2383 TCGv_i64 vm;
2384 TCGv_i32 vd;
2386 /* UNDEF accesses to D16-D31 if they don't exist. */
2387 if (!dc_isar_feature(aa32_fp_d32, s) && (a->vm & 0x10)) {
2388 return false;
2391 if (!vfp_access_check(s)) {
2392 return true;
2395 vd = tcg_temp_new_i32();
2396 vm = tcg_temp_new_i64();
2397 neon_load_reg64(vm, a->vm);
2398 gen_helper_vfp_fcvtsd(vd, vm, cpu_env);
2399 neon_store_reg32(vd, a->vd);
2400 tcg_temp_free_i32(vd);
2401 tcg_temp_free_i64(vm);
2402 return true;
2405 static bool trans_VCVT_int_sp(DisasContext *s, arg_VCVT_int_sp *a)
2407 TCGv_i32 vm;
2408 TCGv_ptr fpst;
2410 if (!vfp_access_check(s)) {
2411 return true;
2414 vm = tcg_temp_new_i32();
2415 neon_load_reg32(vm, a->vm);
2416 fpst = get_fpstatus_ptr(false);
2417 if (a->s) {
2418 /* i32 -> f32 */
2419 gen_helper_vfp_sitos(vm, vm, fpst);
2420 } else {
2421 /* u32 -> f32 */
2422 gen_helper_vfp_uitos(vm, vm, fpst);
2424 neon_store_reg32(vm, a->vd);
2425 tcg_temp_free_i32(vm);
2426 tcg_temp_free_ptr(fpst);
2427 return true;
2430 static bool trans_VCVT_int_dp(DisasContext *s, arg_VCVT_int_dp *a)
2432 TCGv_i32 vm;
2433 TCGv_i64 vd;
2434 TCGv_ptr fpst;
2436 /* UNDEF accesses to D16-D31 if they don't exist. */
2437 if (!dc_isar_feature(aa32_fp_d32, s) && (a->vd & 0x10)) {
2438 return false;
2441 if (!vfp_access_check(s)) {
2442 return true;
2445 vm = tcg_temp_new_i32();
2446 vd = tcg_temp_new_i64();
2447 neon_load_reg32(vm, a->vm);
2448 fpst = get_fpstatus_ptr(false);
2449 if (a->s) {
2450 /* i32 -> f64 */
2451 gen_helper_vfp_sitod(vd, vm, fpst);
2452 } else {
2453 /* u32 -> f64 */
2454 gen_helper_vfp_uitod(vd, vm, fpst);
2456 neon_store_reg64(vd, a->vd);
2457 tcg_temp_free_i32(vm);
2458 tcg_temp_free_i64(vd);
2459 tcg_temp_free_ptr(fpst);
2460 return true;
2463 static bool trans_VJCVT(DisasContext *s, arg_VJCVT *a)
2465 TCGv_i32 vd;
2466 TCGv_i64 vm;
2468 if (!dc_isar_feature(aa32_jscvt, s)) {
2469 return false;
2472 /* UNDEF accesses to D16-D31 if they don't exist. */
2473 if (!dc_isar_feature(aa32_fp_d32, s) && (a->vm & 0x10)) {
2474 return false;
2477 if (!vfp_access_check(s)) {
2478 return true;
2481 vm = tcg_temp_new_i64();
2482 vd = tcg_temp_new_i32();
2483 neon_load_reg64(vm, a->vm);
2484 gen_helper_vjcvt(vd, vm, cpu_env);
2485 neon_store_reg32(vd, a->vd);
2486 tcg_temp_free_i64(vm);
2487 tcg_temp_free_i32(vd);
2488 return true;
2491 static bool trans_VCVT_fix_sp(DisasContext *s, arg_VCVT_fix_sp *a)
2493 TCGv_i32 vd, shift;
2494 TCGv_ptr fpst;
2495 int frac_bits;
2497 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
2498 return false;
2501 if (!vfp_access_check(s)) {
2502 return true;
2505 frac_bits = (a->opc & 1) ? (32 - a->imm) : (16 - a->imm);
2507 vd = tcg_temp_new_i32();
2508 neon_load_reg32(vd, a->vd);
2510 fpst = get_fpstatus_ptr(false);
2511 shift = tcg_const_i32(frac_bits);
2513 /* Switch on op:U:sx bits */
2514 switch (a->opc) {
2515 case 0:
2516 gen_helper_vfp_shtos(vd, vd, shift, fpst);
2517 break;
2518 case 1:
2519 gen_helper_vfp_sltos(vd, vd, shift, fpst);
2520 break;
2521 case 2:
2522 gen_helper_vfp_uhtos(vd, vd, shift, fpst);
2523 break;
2524 case 3:
2525 gen_helper_vfp_ultos(vd, vd, shift, fpst);
2526 break;
2527 case 4:
2528 gen_helper_vfp_toshs_round_to_zero(vd, vd, shift, fpst);
2529 break;
2530 case 5:
2531 gen_helper_vfp_tosls_round_to_zero(vd, vd, shift, fpst);
2532 break;
2533 case 6:
2534 gen_helper_vfp_touhs_round_to_zero(vd, vd, shift, fpst);
2535 break;
2536 case 7:
2537 gen_helper_vfp_touls_round_to_zero(vd, vd, shift, fpst);
2538 break;
2539 default:
2540 g_assert_not_reached();
2543 neon_store_reg32(vd, a->vd);
2544 tcg_temp_free_i32(vd);
2545 tcg_temp_free_i32(shift);
2546 tcg_temp_free_ptr(fpst);
2547 return true;
2550 static bool trans_VCVT_fix_dp(DisasContext *s, arg_VCVT_fix_dp *a)
2552 TCGv_i64 vd;
2553 TCGv_i32 shift;
2554 TCGv_ptr fpst;
2555 int frac_bits;
2557 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
2558 return false;
2561 /* UNDEF accesses to D16-D31 if they don't exist. */
2562 if (!dc_isar_feature(aa32_fp_d32, s) && (a->vd & 0x10)) {
2563 return false;
2566 if (!vfp_access_check(s)) {
2567 return true;
2570 frac_bits = (a->opc & 1) ? (32 - a->imm) : (16 - a->imm);
2572 vd = tcg_temp_new_i64();
2573 neon_load_reg64(vd, a->vd);
2575 fpst = get_fpstatus_ptr(false);
2576 shift = tcg_const_i32(frac_bits);
2578 /* Switch on op:U:sx bits */
2579 switch (a->opc) {
2580 case 0:
2581 gen_helper_vfp_shtod(vd, vd, shift, fpst);
2582 break;
2583 case 1:
2584 gen_helper_vfp_sltod(vd, vd, shift, fpst);
2585 break;
2586 case 2:
2587 gen_helper_vfp_uhtod(vd, vd, shift, fpst);
2588 break;
2589 case 3:
2590 gen_helper_vfp_ultod(vd, vd, shift, fpst);
2591 break;
2592 case 4:
2593 gen_helper_vfp_toshd_round_to_zero(vd, vd, shift, fpst);
2594 break;
2595 case 5:
2596 gen_helper_vfp_tosld_round_to_zero(vd, vd, shift, fpst);
2597 break;
2598 case 6:
2599 gen_helper_vfp_touhd_round_to_zero(vd, vd, shift, fpst);
2600 break;
2601 case 7:
2602 gen_helper_vfp_tould_round_to_zero(vd, vd, shift, fpst);
2603 break;
2604 default:
2605 g_assert_not_reached();
2608 neon_store_reg64(vd, a->vd);
2609 tcg_temp_free_i64(vd);
2610 tcg_temp_free_i32(shift);
2611 tcg_temp_free_ptr(fpst);
2612 return true;
2615 static bool trans_VCVT_sp_int(DisasContext *s, arg_VCVT_sp_int *a)
2617 TCGv_i32 vm;
2618 TCGv_ptr fpst;
2620 if (!vfp_access_check(s)) {
2621 return true;
2624 fpst = get_fpstatus_ptr(false);
2625 vm = tcg_temp_new_i32();
2626 neon_load_reg32(vm, a->vm);
2628 if (a->s) {
2629 if (a->rz) {
2630 gen_helper_vfp_tosizs(vm, vm, fpst);
2631 } else {
2632 gen_helper_vfp_tosis(vm, vm, fpst);
2634 } else {
2635 if (a->rz) {
2636 gen_helper_vfp_touizs(vm, vm, fpst);
2637 } else {
2638 gen_helper_vfp_touis(vm, vm, fpst);
2641 neon_store_reg32(vm, a->vd);
2642 tcg_temp_free_i32(vm);
2643 tcg_temp_free_ptr(fpst);
2644 return true;
2647 static bool trans_VCVT_dp_int(DisasContext *s, arg_VCVT_dp_int *a)
2649 TCGv_i32 vd;
2650 TCGv_i64 vm;
2651 TCGv_ptr fpst;
2653 /* UNDEF accesses to D16-D31 if they don't exist. */
2654 if (!dc_isar_feature(aa32_fp_d32, s) && (a->vm & 0x10)) {
2655 return false;
2658 if (!vfp_access_check(s)) {
2659 return true;
2662 fpst = get_fpstatus_ptr(false);
2663 vm = tcg_temp_new_i64();
2664 vd = tcg_temp_new_i32();
2665 neon_load_reg64(vm, a->vm);
2667 if (a->s) {
2668 if (a->rz) {
2669 gen_helper_vfp_tosizd(vd, vm, fpst);
2670 } else {
2671 gen_helper_vfp_tosid(vd, vm, fpst);
2673 } else {
2674 if (a->rz) {
2675 gen_helper_vfp_touizd(vd, vm, fpst);
2676 } else {
2677 gen_helper_vfp_touid(vd, vm, fpst);
2680 neon_store_reg32(vd, a->vd);
2681 tcg_temp_free_i32(vd);
2682 tcg_temp_free_i64(vm);
2683 tcg_temp_free_ptr(fpst);
2684 return true;