target/arm: Add missing TCG temp free in do_2shift_env_64()
[qemu/ar7.git] / target / arm / translate-neon.inc.c
blobf2c241a87e9e57c6d5358b7de4a96ded76b5806b
1 /*
2 * ARM translation: AArch32 Neon instructions
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
7 * Copyright (c) 2020 Linaro, Ltd.
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
24 * This file is intended to be included from translate.c; it uses
25 * some macros and definitions provided by that file.
26 * It might be possible to convert it to a standalone .c file eventually.
29 static inline int plus1(DisasContext *s, int x)
31 return x + 1;
34 static inline int rsub_64(DisasContext *s, int x)
36 return 64 - x;
39 static inline int rsub_32(DisasContext *s, int x)
41 return 32 - x;
43 static inline int rsub_16(DisasContext *s, int x)
45 return 16 - x;
47 static inline int rsub_8(DisasContext *s, int x)
49 return 8 - x;
52 /* Include the generated Neon decoder */
53 #include "decode-neon-dp.inc.c"
54 #include "decode-neon-ls.inc.c"
55 #include "decode-neon-shared.inc.c"
57 static bool trans_VCMLA(DisasContext *s, arg_VCMLA *a)
59 int opr_sz;
60 TCGv_ptr fpst;
61 gen_helper_gvec_3_ptr *fn_gvec_ptr;
63 if (!dc_isar_feature(aa32_vcma, s)
64 || (!a->size && !dc_isar_feature(aa32_fp16_arith, s))) {
65 return false;
68 /* UNDEF accesses to D16-D31 if they don't exist. */
69 if (!dc_isar_feature(aa32_simd_r32, s) &&
70 ((a->vd | a->vn | a->vm) & 0x10)) {
71 return false;
74 if ((a->vn | a->vm | a->vd) & a->q) {
75 return false;
78 if (!vfp_access_check(s)) {
79 return true;
82 opr_sz = (1 + a->q) * 8;
83 fpst = get_fpstatus_ptr(1);
84 fn_gvec_ptr = a->size ? gen_helper_gvec_fcmlas : gen_helper_gvec_fcmlah;
85 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a->vd),
86 vfp_reg_offset(1, a->vn),
87 vfp_reg_offset(1, a->vm),
88 fpst, opr_sz, opr_sz, a->rot,
89 fn_gvec_ptr);
90 tcg_temp_free_ptr(fpst);
91 return true;
94 static bool trans_VCADD(DisasContext *s, arg_VCADD *a)
96 int opr_sz;
97 TCGv_ptr fpst;
98 gen_helper_gvec_3_ptr *fn_gvec_ptr;
100 if (!dc_isar_feature(aa32_vcma, s)
101 || (!a->size && !dc_isar_feature(aa32_fp16_arith, s))) {
102 return false;
105 /* UNDEF accesses to D16-D31 if they don't exist. */
106 if (!dc_isar_feature(aa32_simd_r32, s) &&
107 ((a->vd | a->vn | a->vm) & 0x10)) {
108 return false;
111 if ((a->vn | a->vm | a->vd) & a->q) {
112 return false;
115 if (!vfp_access_check(s)) {
116 return true;
119 opr_sz = (1 + a->q) * 8;
120 fpst = get_fpstatus_ptr(1);
121 fn_gvec_ptr = a->size ? gen_helper_gvec_fcadds : gen_helper_gvec_fcaddh;
122 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a->vd),
123 vfp_reg_offset(1, a->vn),
124 vfp_reg_offset(1, a->vm),
125 fpst, opr_sz, opr_sz, a->rot,
126 fn_gvec_ptr);
127 tcg_temp_free_ptr(fpst);
128 return true;
131 static bool trans_VDOT(DisasContext *s, arg_VDOT *a)
133 int opr_sz;
134 gen_helper_gvec_3 *fn_gvec;
136 if (!dc_isar_feature(aa32_dp, s)) {
137 return false;
140 /* UNDEF accesses to D16-D31 if they don't exist. */
141 if (!dc_isar_feature(aa32_simd_r32, s) &&
142 ((a->vd | a->vn | a->vm) & 0x10)) {
143 return false;
146 if ((a->vn | a->vm | a->vd) & a->q) {
147 return false;
150 if (!vfp_access_check(s)) {
151 return true;
154 opr_sz = (1 + a->q) * 8;
155 fn_gvec = a->u ? gen_helper_gvec_udot_b : gen_helper_gvec_sdot_b;
156 tcg_gen_gvec_3_ool(vfp_reg_offset(1, a->vd),
157 vfp_reg_offset(1, a->vn),
158 vfp_reg_offset(1, a->vm),
159 opr_sz, opr_sz, 0, fn_gvec);
160 return true;
163 static bool trans_VFML(DisasContext *s, arg_VFML *a)
165 int opr_sz;
167 if (!dc_isar_feature(aa32_fhm, s)) {
168 return false;
171 /* UNDEF accesses to D16-D31 if they don't exist. */
172 if (!dc_isar_feature(aa32_simd_r32, s) &&
173 (a->vd & 0x10)) {
174 return false;
177 if (a->vd & a->q) {
178 return false;
181 if (!vfp_access_check(s)) {
182 return true;
185 opr_sz = (1 + a->q) * 8;
186 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a->vd),
187 vfp_reg_offset(a->q, a->vn),
188 vfp_reg_offset(a->q, a->vm),
189 cpu_env, opr_sz, opr_sz, a->s, /* is_2 == 0 */
190 gen_helper_gvec_fmlal_a32);
191 return true;
194 static bool trans_VCMLA_scalar(DisasContext *s, arg_VCMLA_scalar *a)
196 gen_helper_gvec_3_ptr *fn_gvec_ptr;
197 int opr_sz;
198 TCGv_ptr fpst;
200 if (!dc_isar_feature(aa32_vcma, s)) {
201 return false;
203 if (a->size == 0 && !dc_isar_feature(aa32_fp16_arith, s)) {
204 return false;
207 /* UNDEF accesses to D16-D31 if they don't exist. */
208 if (!dc_isar_feature(aa32_simd_r32, s) &&
209 ((a->vd | a->vn | a->vm) & 0x10)) {
210 return false;
213 if ((a->vd | a->vn) & a->q) {
214 return false;
217 if (!vfp_access_check(s)) {
218 return true;
221 fn_gvec_ptr = (a->size ? gen_helper_gvec_fcmlas_idx
222 : gen_helper_gvec_fcmlah_idx);
223 opr_sz = (1 + a->q) * 8;
224 fpst = get_fpstatus_ptr(1);
225 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a->vd),
226 vfp_reg_offset(1, a->vn),
227 vfp_reg_offset(1, a->vm),
228 fpst, opr_sz, opr_sz,
229 (a->index << 2) | a->rot, fn_gvec_ptr);
230 tcg_temp_free_ptr(fpst);
231 return true;
234 static bool trans_VDOT_scalar(DisasContext *s, arg_VDOT_scalar *a)
236 gen_helper_gvec_3 *fn_gvec;
237 int opr_sz;
238 TCGv_ptr fpst;
240 if (!dc_isar_feature(aa32_dp, s)) {
241 return false;
244 /* UNDEF accesses to D16-D31 if they don't exist. */
245 if (!dc_isar_feature(aa32_simd_r32, s) &&
246 ((a->vd | a->vn) & 0x10)) {
247 return false;
250 if ((a->vd | a->vn) & a->q) {
251 return false;
254 if (!vfp_access_check(s)) {
255 return true;
258 fn_gvec = a->u ? gen_helper_gvec_udot_idx_b : gen_helper_gvec_sdot_idx_b;
259 opr_sz = (1 + a->q) * 8;
260 fpst = get_fpstatus_ptr(1);
261 tcg_gen_gvec_3_ool(vfp_reg_offset(1, a->vd),
262 vfp_reg_offset(1, a->vn),
263 vfp_reg_offset(1, a->rm),
264 opr_sz, opr_sz, a->index, fn_gvec);
265 tcg_temp_free_ptr(fpst);
266 return true;
269 static bool trans_VFML_scalar(DisasContext *s, arg_VFML_scalar *a)
271 int opr_sz;
273 if (!dc_isar_feature(aa32_fhm, s)) {
274 return false;
277 /* UNDEF accesses to D16-D31 if they don't exist. */
278 if (!dc_isar_feature(aa32_simd_r32, s) &&
279 ((a->vd & 0x10) || (a->q && (a->vn & 0x10)))) {
280 return false;
283 if (a->vd & a->q) {
284 return false;
287 if (!vfp_access_check(s)) {
288 return true;
291 opr_sz = (1 + a->q) * 8;
292 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a->vd),
293 vfp_reg_offset(a->q, a->vn),
294 vfp_reg_offset(a->q, a->rm),
295 cpu_env, opr_sz, opr_sz,
296 (a->index << 2) | a->s, /* is_2 == 0 */
297 gen_helper_gvec_fmlal_idx_a32);
298 return true;
301 static struct {
302 int nregs;
303 int interleave;
304 int spacing;
305 } const neon_ls_element_type[11] = {
306 {1, 4, 1},
307 {1, 4, 2},
308 {4, 1, 1},
309 {2, 2, 2},
310 {1, 3, 1},
311 {1, 3, 2},
312 {3, 1, 1},
313 {1, 1, 1},
314 {1, 2, 1},
315 {1, 2, 2},
316 {2, 1, 1}
319 static void gen_neon_ldst_base_update(DisasContext *s, int rm, int rn,
320 int stride)
322 if (rm != 15) {
323 TCGv_i32 base;
325 base = load_reg(s, rn);
326 if (rm == 13) {
327 tcg_gen_addi_i32(base, base, stride);
328 } else {
329 TCGv_i32 index;
330 index = load_reg(s, rm);
331 tcg_gen_add_i32(base, base, index);
332 tcg_temp_free_i32(index);
334 store_reg(s, rn, base);
338 static bool trans_VLDST_multiple(DisasContext *s, arg_VLDST_multiple *a)
340 /* Neon load/store multiple structures */
341 int nregs, interleave, spacing, reg, n;
342 MemOp endian = s->be_data;
343 int mmu_idx = get_mem_index(s);
344 int size = a->size;
345 TCGv_i64 tmp64;
346 TCGv_i32 addr, tmp;
348 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
349 return false;
352 /* UNDEF accesses to D16-D31 if they don't exist */
353 if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
354 return false;
356 if (a->itype > 10) {
357 return false;
359 /* Catch UNDEF cases for bad values of align field */
360 switch (a->itype & 0xc) {
361 case 4:
362 if (a->align >= 2) {
363 return false;
365 break;
366 case 8:
367 if (a->align == 3) {
368 return false;
370 break;
371 default:
372 break;
374 nregs = neon_ls_element_type[a->itype].nregs;
375 interleave = neon_ls_element_type[a->itype].interleave;
376 spacing = neon_ls_element_type[a->itype].spacing;
377 if (size == 3 && (interleave | spacing) != 1) {
378 return false;
381 if (!vfp_access_check(s)) {
382 return true;
385 /* For our purposes, bytes are always little-endian. */
386 if (size == 0) {
387 endian = MO_LE;
390 * Consecutive little-endian elements from a single register
391 * can be promoted to a larger little-endian operation.
393 if (interleave == 1 && endian == MO_LE) {
394 size = 3;
396 tmp64 = tcg_temp_new_i64();
397 addr = tcg_temp_new_i32();
398 tmp = tcg_const_i32(1 << size);
399 load_reg_var(s, addr, a->rn);
400 for (reg = 0; reg < nregs; reg++) {
401 for (n = 0; n < 8 >> size; n++) {
402 int xs;
403 for (xs = 0; xs < interleave; xs++) {
404 int tt = a->vd + reg + spacing * xs;
406 if (a->l) {
407 gen_aa32_ld_i64(s, tmp64, addr, mmu_idx, endian | size);
408 neon_store_element64(tt, n, size, tmp64);
409 } else {
410 neon_load_element64(tmp64, tt, n, size);
411 gen_aa32_st_i64(s, tmp64, addr, mmu_idx, endian | size);
413 tcg_gen_add_i32(addr, addr, tmp);
417 tcg_temp_free_i32(addr);
418 tcg_temp_free_i32(tmp);
419 tcg_temp_free_i64(tmp64);
421 gen_neon_ldst_base_update(s, a->rm, a->rn, nregs * interleave * 8);
422 return true;
425 static bool trans_VLD_all_lanes(DisasContext *s, arg_VLD_all_lanes *a)
427 /* Neon load single structure to all lanes */
428 int reg, stride, vec_size;
429 int vd = a->vd;
430 int size = a->size;
431 int nregs = a->n + 1;
432 TCGv_i32 addr, tmp;
434 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
435 return false;
438 /* UNDEF accesses to D16-D31 if they don't exist */
439 if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
440 return false;
443 if (size == 3) {
444 if (nregs != 4 || a->a == 0) {
445 return false;
447 /* For VLD4 size == 3 a == 1 means 32 bits at 16 byte alignment */
448 size = 2;
450 if (nregs == 1 && a->a == 1 && size == 0) {
451 return false;
453 if (nregs == 3 && a->a == 1) {
454 return false;
457 if (!vfp_access_check(s)) {
458 return true;
462 * VLD1 to all lanes: T bit indicates how many Dregs to write.
463 * VLD2/3/4 to all lanes: T bit indicates register stride.
465 stride = a->t ? 2 : 1;
466 vec_size = nregs == 1 ? stride * 8 : 8;
468 tmp = tcg_temp_new_i32();
469 addr = tcg_temp_new_i32();
470 load_reg_var(s, addr, a->rn);
471 for (reg = 0; reg < nregs; reg++) {
472 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
473 s->be_data | size);
474 if ((vd & 1) && vec_size == 16) {
476 * We cannot write 16 bytes at once because the
477 * destination is unaligned.
479 tcg_gen_gvec_dup_i32(size, neon_reg_offset(vd, 0),
480 8, 8, tmp);
481 tcg_gen_gvec_mov(0, neon_reg_offset(vd + 1, 0),
482 neon_reg_offset(vd, 0), 8, 8);
483 } else {
484 tcg_gen_gvec_dup_i32(size, neon_reg_offset(vd, 0),
485 vec_size, vec_size, tmp);
487 tcg_gen_addi_i32(addr, addr, 1 << size);
488 vd += stride;
490 tcg_temp_free_i32(tmp);
491 tcg_temp_free_i32(addr);
493 gen_neon_ldst_base_update(s, a->rm, a->rn, (1 << size) * nregs);
495 return true;
498 static bool trans_VLDST_single(DisasContext *s, arg_VLDST_single *a)
500 /* Neon load/store single structure to one lane */
501 int reg;
502 int nregs = a->n + 1;
503 int vd = a->vd;
504 TCGv_i32 addr, tmp;
506 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
507 return false;
510 /* UNDEF accesses to D16-D31 if they don't exist */
511 if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
512 return false;
515 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
516 switch (nregs) {
517 case 1:
518 if (((a->align & (1 << a->size)) != 0) ||
519 (a->size == 2 && ((a->align & 3) == 1 || (a->align & 3) == 2))) {
520 return false;
522 break;
523 case 3:
524 if ((a->align & 1) != 0) {
525 return false;
527 /* fall through */
528 case 2:
529 if (a->size == 2 && (a->align & 2) != 0) {
530 return false;
532 break;
533 case 4:
534 if ((a->size == 2) && ((a->align & 3) == 3)) {
535 return false;
537 break;
538 default:
539 abort();
541 if ((vd + a->stride * (nregs - 1)) > 31) {
543 * Attempts to write off the end of the register file are
544 * UNPREDICTABLE; we choose to UNDEF because otherwise we would
545 * access off the end of the array that holds the register data.
547 return false;
550 if (!vfp_access_check(s)) {
551 return true;
554 tmp = tcg_temp_new_i32();
555 addr = tcg_temp_new_i32();
556 load_reg_var(s, addr, a->rn);
558 * TODO: if we implemented alignment exceptions, we should check
559 * addr against the alignment encoded in a->align here.
561 for (reg = 0; reg < nregs; reg++) {
562 if (a->l) {
563 gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s),
564 s->be_data | a->size);
565 neon_store_element(vd, a->reg_idx, a->size, tmp);
566 } else { /* Store */
567 neon_load_element(tmp, vd, a->reg_idx, a->size);
568 gen_aa32_st_i32(s, tmp, addr, get_mem_index(s),
569 s->be_data | a->size);
571 vd += a->stride;
572 tcg_gen_addi_i32(addr, addr, 1 << a->size);
574 tcg_temp_free_i32(addr);
575 tcg_temp_free_i32(tmp);
577 gen_neon_ldst_base_update(s, a->rm, a->rn, (1 << a->size) * nregs);
579 return true;
582 static bool do_3same(DisasContext *s, arg_3same *a, GVecGen3Fn fn)
584 int vec_size = a->q ? 16 : 8;
585 int rd_ofs = neon_reg_offset(a->vd, 0);
586 int rn_ofs = neon_reg_offset(a->vn, 0);
587 int rm_ofs = neon_reg_offset(a->vm, 0);
589 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
590 return false;
593 /* UNDEF accesses to D16-D31 if they don't exist. */
594 if (!dc_isar_feature(aa32_simd_r32, s) &&
595 ((a->vd | a->vn | a->vm) & 0x10)) {
596 return false;
599 if ((a->vn | a->vm | a->vd) & a->q) {
600 return false;
603 if (!vfp_access_check(s)) {
604 return true;
607 fn(a->size, rd_ofs, rn_ofs, rm_ofs, vec_size, vec_size);
608 return true;
611 #define DO_3SAME(INSN, FUNC) \
612 static bool trans_##INSN##_3s(DisasContext *s, arg_3same *a) \
614 return do_3same(s, a, FUNC); \
617 DO_3SAME(VADD, tcg_gen_gvec_add)
618 DO_3SAME(VSUB, tcg_gen_gvec_sub)
619 DO_3SAME(VAND, tcg_gen_gvec_and)
620 DO_3SAME(VBIC, tcg_gen_gvec_andc)
621 DO_3SAME(VORR, tcg_gen_gvec_or)
622 DO_3SAME(VORN, tcg_gen_gvec_orc)
623 DO_3SAME(VEOR, tcg_gen_gvec_xor)
624 DO_3SAME(VSHL_S, gen_gvec_sshl)
625 DO_3SAME(VSHL_U, gen_gvec_ushl)
626 DO_3SAME(VQADD_S, gen_gvec_sqadd_qc)
627 DO_3SAME(VQADD_U, gen_gvec_uqadd_qc)
628 DO_3SAME(VQSUB_S, gen_gvec_sqsub_qc)
629 DO_3SAME(VQSUB_U, gen_gvec_uqsub_qc)
631 /* These insns are all gvec_bitsel but with the inputs in various orders. */
632 #define DO_3SAME_BITSEL(INSN, O1, O2, O3) \
633 static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
634 uint32_t rn_ofs, uint32_t rm_ofs, \
635 uint32_t oprsz, uint32_t maxsz) \
637 tcg_gen_gvec_bitsel(vece, rd_ofs, O1, O2, O3, oprsz, maxsz); \
639 DO_3SAME(INSN, gen_##INSN##_3s)
641 DO_3SAME_BITSEL(VBSL, rd_ofs, rn_ofs, rm_ofs)
642 DO_3SAME_BITSEL(VBIT, rm_ofs, rn_ofs, rd_ofs)
643 DO_3SAME_BITSEL(VBIF, rm_ofs, rd_ofs, rn_ofs)
645 #define DO_3SAME_NO_SZ_3(INSN, FUNC) \
646 static bool trans_##INSN##_3s(DisasContext *s, arg_3same *a) \
648 if (a->size == 3) { \
649 return false; \
651 return do_3same(s, a, FUNC); \
654 DO_3SAME_NO_SZ_3(VMAX_S, tcg_gen_gvec_smax)
655 DO_3SAME_NO_SZ_3(VMAX_U, tcg_gen_gvec_umax)
656 DO_3SAME_NO_SZ_3(VMIN_S, tcg_gen_gvec_smin)
657 DO_3SAME_NO_SZ_3(VMIN_U, tcg_gen_gvec_umin)
658 DO_3SAME_NO_SZ_3(VMUL, tcg_gen_gvec_mul)
659 DO_3SAME_NO_SZ_3(VMLA, gen_gvec_mla)
660 DO_3SAME_NO_SZ_3(VMLS, gen_gvec_mls)
661 DO_3SAME_NO_SZ_3(VTST, gen_gvec_cmtst)
662 DO_3SAME_NO_SZ_3(VABD_S, gen_gvec_sabd)
663 DO_3SAME_NO_SZ_3(VABA_S, gen_gvec_saba)
664 DO_3SAME_NO_SZ_3(VABD_U, gen_gvec_uabd)
665 DO_3SAME_NO_SZ_3(VABA_U, gen_gvec_uaba)
667 #define DO_3SAME_CMP(INSN, COND) \
668 static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
669 uint32_t rn_ofs, uint32_t rm_ofs, \
670 uint32_t oprsz, uint32_t maxsz) \
672 tcg_gen_gvec_cmp(COND, vece, rd_ofs, rn_ofs, rm_ofs, oprsz, maxsz); \
674 DO_3SAME_NO_SZ_3(INSN, gen_##INSN##_3s)
676 DO_3SAME_CMP(VCGT_S, TCG_COND_GT)
677 DO_3SAME_CMP(VCGT_U, TCG_COND_GTU)
678 DO_3SAME_CMP(VCGE_S, TCG_COND_GE)
679 DO_3SAME_CMP(VCGE_U, TCG_COND_GEU)
680 DO_3SAME_CMP(VCEQ, TCG_COND_EQ)
682 #define WRAP_OOL_FN(WRAPNAME, FUNC) \
683 static void WRAPNAME(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, \
684 uint32_t rm_ofs, uint32_t oprsz, uint32_t maxsz) \
686 tcg_gen_gvec_3_ool(rd_ofs, rn_ofs, rm_ofs, oprsz, maxsz, 0, FUNC); \
689 WRAP_OOL_FN(gen_VMUL_p_3s, gen_helper_gvec_pmul_b)
691 static bool trans_VMUL_p_3s(DisasContext *s, arg_3same *a)
693 if (a->size != 0) {
694 return false;
696 return do_3same(s, a, gen_VMUL_p_3s);
699 #define DO_VQRDMLAH(INSN, FUNC) \
700 static bool trans_##INSN##_3s(DisasContext *s, arg_3same *a) \
702 if (!dc_isar_feature(aa32_rdm, s)) { \
703 return false; \
705 if (a->size != 1 && a->size != 2) { \
706 return false; \
708 return do_3same(s, a, FUNC); \
711 DO_VQRDMLAH(VQRDMLAH, gen_gvec_sqrdmlah_qc)
712 DO_VQRDMLAH(VQRDMLSH, gen_gvec_sqrdmlsh_qc)
714 #define DO_SHA1(NAME, FUNC) \
715 WRAP_OOL_FN(gen_##NAME##_3s, FUNC) \
716 static bool trans_##NAME##_3s(DisasContext *s, arg_3same *a) \
718 if (!dc_isar_feature(aa32_sha1, s)) { \
719 return false; \
721 return do_3same(s, a, gen_##NAME##_3s); \
724 DO_SHA1(SHA1C, gen_helper_crypto_sha1c)
725 DO_SHA1(SHA1P, gen_helper_crypto_sha1p)
726 DO_SHA1(SHA1M, gen_helper_crypto_sha1m)
727 DO_SHA1(SHA1SU0, gen_helper_crypto_sha1su0)
729 #define DO_SHA2(NAME, FUNC) \
730 WRAP_OOL_FN(gen_##NAME##_3s, FUNC) \
731 static bool trans_##NAME##_3s(DisasContext *s, arg_3same *a) \
733 if (!dc_isar_feature(aa32_sha2, s)) { \
734 return false; \
736 return do_3same(s, a, gen_##NAME##_3s); \
739 DO_SHA2(SHA256H, gen_helper_crypto_sha256h)
740 DO_SHA2(SHA256H2, gen_helper_crypto_sha256h2)
741 DO_SHA2(SHA256SU1, gen_helper_crypto_sha256su1)
743 #define DO_3SAME_64(INSN, FUNC) \
744 static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
745 uint32_t rn_ofs, uint32_t rm_ofs, \
746 uint32_t oprsz, uint32_t maxsz) \
748 static const GVecGen3 op = { .fni8 = FUNC }; \
749 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, oprsz, maxsz, &op); \
751 DO_3SAME(INSN, gen_##INSN##_3s)
753 #define DO_3SAME_64_ENV(INSN, FUNC) \
754 static void gen_##INSN##_elt(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m) \
756 FUNC(d, cpu_env, n, m); \
758 DO_3SAME_64(INSN, gen_##INSN##_elt)
760 DO_3SAME_64(VRSHL_S64, gen_helper_neon_rshl_s64)
761 DO_3SAME_64(VRSHL_U64, gen_helper_neon_rshl_u64)
762 DO_3SAME_64_ENV(VQSHL_S64, gen_helper_neon_qshl_s64)
763 DO_3SAME_64_ENV(VQSHL_U64, gen_helper_neon_qshl_u64)
764 DO_3SAME_64_ENV(VQRSHL_S64, gen_helper_neon_qrshl_s64)
765 DO_3SAME_64_ENV(VQRSHL_U64, gen_helper_neon_qrshl_u64)
767 #define DO_3SAME_32(INSN, FUNC) \
768 static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
769 uint32_t rn_ofs, uint32_t rm_ofs, \
770 uint32_t oprsz, uint32_t maxsz) \
772 static const GVecGen3 ops[4] = { \
773 { .fni4 = gen_helper_neon_##FUNC##8 }, \
774 { .fni4 = gen_helper_neon_##FUNC##16 }, \
775 { .fni4 = gen_helper_neon_##FUNC##32 }, \
776 { 0 }, \
777 }; \
778 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, oprsz, maxsz, &ops[vece]); \
780 static bool trans_##INSN##_3s(DisasContext *s, arg_3same *a) \
782 if (a->size > 2) { \
783 return false; \
785 return do_3same(s, a, gen_##INSN##_3s); \
789 * Some helper functions need to be passed the cpu_env. In order
790 * to use those with the gvec APIs like tcg_gen_gvec_3() we need
791 * to create wrapper functions whose prototype is a NeonGenTwoOpFn()
792 * and which call a NeonGenTwoOpEnvFn().
794 #define WRAP_ENV_FN(WRAPNAME, FUNC) \
795 static void WRAPNAME(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m) \
797 FUNC(d, cpu_env, n, m); \
800 #define DO_3SAME_32_ENV(INSN, FUNC) \
801 WRAP_ENV_FN(gen_##INSN##_tramp8, gen_helper_neon_##FUNC##8); \
802 WRAP_ENV_FN(gen_##INSN##_tramp16, gen_helper_neon_##FUNC##16); \
803 WRAP_ENV_FN(gen_##INSN##_tramp32, gen_helper_neon_##FUNC##32); \
804 static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
805 uint32_t rn_ofs, uint32_t rm_ofs, \
806 uint32_t oprsz, uint32_t maxsz) \
808 static const GVecGen3 ops[4] = { \
809 { .fni4 = gen_##INSN##_tramp8 }, \
810 { .fni4 = gen_##INSN##_tramp16 }, \
811 { .fni4 = gen_##INSN##_tramp32 }, \
812 { 0 }, \
813 }; \
814 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, oprsz, maxsz, &ops[vece]); \
816 static bool trans_##INSN##_3s(DisasContext *s, arg_3same *a) \
818 if (a->size > 2) { \
819 return false; \
821 return do_3same(s, a, gen_##INSN##_3s); \
824 DO_3SAME_32(VHADD_S, hadd_s)
825 DO_3SAME_32(VHADD_U, hadd_u)
826 DO_3SAME_32(VHSUB_S, hsub_s)
827 DO_3SAME_32(VHSUB_U, hsub_u)
828 DO_3SAME_32(VRHADD_S, rhadd_s)
829 DO_3SAME_32(VRHADD_U, rhadd_u)
830 DO_3SAME_32(VRSHL_S, rshl_s)
831 DO_3SAME_32(VRSHL_U, rshl_u)
833 DO_3SAME_32_ENV(VQSHL_S, qshl_s)
834 DO_3SAME_32_ENV(VQSHL_U, qshl_u)
835 DO_3SAME_32_ENV(VQRSHL_S, qrshl_s)
836 DO_3SAME_32_ENV(VQRSHL_U, qrshl_u)
838 static bool do_3same_pair(DisasContext *s, arg_3same *a, NeonGenTwoOpFn *fn)
840 /* Operations handled pairwise 32 bits at a time */
841 TCGv_i32 tmp, tmp2, tmp3;
843 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
844 return false;
847 /* UNDEF accesses to D16-D31 if they don't exist. */
848 if (!dc_isar_feature(aa32_simd_r32, s) &&
849 ((a->vd | a->vn | a->vm) & 0x10)) {
850 return false;
853 if (a->size == 3) {
854 return false;
857 if (!vfp_access_check(s)) {
858 return true;
861 assert(a->q == 0); /* enforced by decode patterns */
864 * Note that we have to be careful not to clobber the source operands
865 * in the "vm == vd" case by storing the result of the first pass too
866 * early. Since Q is 0 there are always just two passes, so instead
867 * of a complicated loop over each pass we just unroll.
869 tmp = neon_load_reg(a->vn, 0);
870 tmp2 = neon_load_reg(a->vn, 1);
871 fn(tmp, tmp, tmp2);
872 tcg_temp_free_i32(tmp2);
874 tmp3 = neon_load_reg(a->vm, 0);
875 tmp2 = neon_load_reg(a->vm, 1);
876 fn(tmp3, tmp3, tmp2);
877 tcg_temp_free_i32(tmp2);
879 neon_store_reg(a->vd, 0, tmp);
880 neon_store_reg(a->vd, 1, tmp3);
881 return true;
884 #define DO_3SAME_PAIR(INSN, func) \
885 static bool trans_##INSN##_3s(DisasContext *s, arg_3same *a) \
887 static NeonGenTwoOpFn * const fns[] = { \
888 gen_helper_neon_##func##8, \
889 gen_helper_neon_##func##16, \
890 gen_helper_neon_##func##32, \
891 }; \
892 if (a->size > 2) { \
893 return false; \
895 return do_3same_pair(s, a, fns[a->size]); \
898 /* 32-bit pairwise ops end up the same as the elementwise versions. */
899 #define gen_helper_neon_pmax_s32 tcg_gen_smax_i32
900 #define gen_helper_neon_pmax_u32 tcg_gen_umax_i32
901 #define gen_helper_neon_pmin_s32 tcg_gen_smin_i32
902 #define gen_helper_neon_pmin_u32 tcg_gen_umin_i32
903 #define gen_helper_neon_padd_u32 tcg_gen_add_i32
905 DO_3SAME_PAIR(VPMAX_S, pmax_s)
906 DO_3SAME_PAIR(VPMIN_S, pmin_s)
907 DO_3SAME_PAIR(VPMAX_U, pmax_u)
908 DO_3SAME_PAIR(VPMIN_U, pmin_u)
909 DO_3SAME_PAIR(VPADD, padd_u)
911 #define DO_3SAME_VQDMULH(INSN, FUNC) \
912 WRAP_ENV_FN(gen_##INSN##_tramp16, gen_helper_neon_##FUNC##_s16); \
913 WRAP_ENV_FN(gen_##INSN##_tramp32, gen_helper_neon_##FUNC##_s32); \
914 static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
915 uint32_t rn_ofs, uint32_t rm_ofs, \
916 uint32_t oprsz, uint32_t maxsz) \
918 static const GVecGen3 ops[2] = { \
919 { .fni4 = gen_##INSN##_tramp16 }, \
920 { .fni4 = gen_##INSN##_tramp32 }, \
921 }; \
922 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, oprsz, maxsz, &ops[vece - 1]); \
924 static bool trans_##INSN##_3s(DisasContext *s, arg_3same *a) \
926 if (a->size != 1 && a->size != 2) { \
927 return false; \
929 return do_3same(s, a, gen_##INSN##_3s); \
932 DO_3SAME_VQDMULH(VQDMULH, qdmulh)
933 DO_3SAME_VQDMULH(VQRDMULH, qrdmulh)
935 static bool do_3same_fp(DisasContext *s, arg_3same *a, VFPGen3OpSPFn *fn,
936 bool reads_vd)
939 * FP operations handled elementwise 32 bits at a time.
940 * If reads_vd is true then the old value of Vd will be
941 * loaded before calling the callback function. This is
942 * used for multiply-accumulate type operations.
944 TCGv_i32 tmp, tmp2;
945 int pass;
947 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
948 return false;
951 /* UNDEF accesses to D16-D31 if they don't exist. */
952 if (!dc_isar_feature(aa32_simd_r32, s) &&
953 ((a->vd | a->vn | a->vm) & 0x10)) {
954 return false;
957 if ((a->vn | a->vm | a->vd) & a->q) {
958 return false;
961 if (!vfp_access_check(s)) {
962 return true;
965 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
966 for (pass = 0; pass < (a->q ? 4 : 2); pass++) {
967 tmp = neon_load_reg(a->vn, pass);
968 tmp2 = neon_load_reg(a->vm, pass);
969 if (reads_vd) {
970 TCGv_i32 tmp_rd = neon_load_reg(a->vd, pass);
971 fn(tmp_rd, tmp, tmp2, fpstatus);
972 neon_store_reg(a->vd, pass, tmp_rd);
973 tcg_temp_free_i32(tmp);
974 } else {
975 fn(tmp, tmp, tmp2, fpstatus);
976 neon_store_reg(a->vd, pass, tmp);
978 tcg_temp_free_i32(tmp2);
980 tcg_temp_free_ptr(fpstatus);
981 return true;
985 * For all the functions using this macro, size == 1 means fp16,
986 * which is an architecture extension we don't implement yet.
988 #define DO_3S_FP_GVEC(INSN,FUNC) \
989 static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
990 uint32_t rn_ofs, uint32_t rm_ofs, \
991 uint32_t oprsz, uint32_t maxsz) \
993 TCGv_ptr fpst = get_fpstatus_ptr(1); \
994 tcg_gen_gvec_3_ptr(rd_ofs, rn_ofs, rm_ofs, fpst, \
995 oprsz, maxsz, 0, FUNC); \
996 tcg_temp_free_ptr(fpst); \
998 static bool trans_##INSN##_fp_3s(DisasContext *s, arg_3same *a) \
1000 if (a->size != 0) { \
1001 /* TODO fp16 support */ \
1002 return false; \
1004 return do_3same(s, a, gen_##INSN##_3s); \
1008 DO_3S_FP_GVEC(VADD, gen_helper_gvec_fadd_s)
1009 DO_3S_FP_GVEC(VSUB, gen_helper_gvec_fsub_s)
1010 DO_3S_FP_GVEC(VABD, gen_helper_gvec_fabd_s)
1011 DO_3S_FP_GVEC(VMUL, gen_helper_gvec_fmul_s)
1014 * For all the functions using this macro, size == 1 means fp16,
1015 * which is an architecture extension we don't implement yet.
1017 #define DO_3S_FP(INSN,FUNC,READS_VD) \
1018 static bool trans_##INSN##_fp_3s(DisasContext *s, arg_3same *a) \
1020 if (a->size != 0) { \
1021 /* TODO fp16 support */ \
1022 return false; \
1024 return do_3same_fp(s, a, FUNC, READS_VD); \
1027 DO_3S_FP(VCEQ, gen_helper_neon_ceq_f32, false)
1028 DO_3S_FP(VCGE, gen_helper_neon_cge_f32, false)
1029 DO_3S_FP(VCGT, gen_helper_neon_cgt_f32, false)
1030 DO_3S_FP(VACGE, gen_helper_neon_acge_f32, false)
1031 DO_3S_FP(VACGT, gen_helper_neon_acgt_f32, false)
1032 DO_3S_FP(VMAX, gen_helper_vfp_maxs, false)
1033 DO_3S_FP(VMIN, gen_helper_vfp_mins, false)
1035 static void gen_VMLA_fp_3s(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm,
1036 TCGv_ptr fpstatus)
1038 gen_helper_vfp_muls(vn, vn, vm, fpstatus);
1039 gen_helper_vfp_adds(vd, vd, vn, fpstatus);
1042 static void gen_VMLS_fp_3s(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm,
1043 TCGv_ptr fpstatus)
1045 gen_helper_vfp_muls(vn, vn, vm, fpstatus);
1046 gen_helper_vfp_subs(vd, vd, vn, fpstatus);
1049 DO_3S_FP(VMLA, gen_VMLA_fp_3s, true)
1050 DO_3S_FP(VMLS, gen_VMLS_fp_3s, true)
1052 static bool trans_VMAXNM_fp_3s(DisasContext *s, arg_3same *a)
1054 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
1055 return false;
1058 if (a->size != 0) {
1059 /* TODO fp16 support */
1060 return false;
1063 return do_3same_fp(s, a, gen_helper_vfp_maxnums, false);
1066 static bool trans_VMINNM_fp_3s(DisasContext *s, arg_3same *a)
1068 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
1069 return false;
1072 if (a->size != 0) {
1073 /* TODO fp16 support */
1074 return false;
1077 return do_3same_fp(s, a, gen_helper_vfp_minnums, false);
1080 WRAP_ENV_FN(gen_VRECPS_tramp, gen_helper_recps_f32)
1082 static void gen_VRECPS_fp_3s(unsigned vece, uint32_t rd_ofs,
1083 uint32_t rn_ofs, uint32_t rm_ofs,
1084 uint32_t oprsz, uint32_t maxsz)
1086 static const GVecGen3 ops = { .fni4 = gen_VRECPS_tramp };
1087 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, oprsz, maxsz, &ops);
1090 static bool trans_VRECPS_fp_3s(DisasContext *s, arg_3same *a)
1092 if (a->size != 0) {
1093 /* TODO fp16 support */
1094 return false;
1097 return do_3same(s, a, gen_VRECPS_fp_3s);
1100 WRAP_ENV_FN(gen_VRSQRTS_tramp, gen_helper_rsqrts_f32)
1102 static void gen_VRSQRTS_fp_3s(unsigned vece, uint32_t rd_ofs,
1103 uint32_t rn_ofs, uint32_t rm_ofs,
1104 uint32_t oprsz, uint32_t maxsz)
1106 static const GVecGen3 ops = { .fni4 = gen_VRSQRTS_tramp };
1107 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, oprsz, maxsz, &ops);
1110 static bool trans_VRSQRTS_fp_3s(DisasContext *s, arg_3same *a)
1112 if (a->size != 0) {
1113 /* TODO fp16 support */
1114 return false;
1117 return do_3same(s, a, gen_VRSQRTS_fp_3s);
1120 static void gen_VFMA_fp_3s(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm,
1121 TCGv_ptr fpstatus)
1123 gen_helper_vfp_muladds(vd, vn, vm, vd, fpstatus);
1126 static bool trans_VFMA_fp_3s(DisasContext *s, arg_3same *a)
1128 if (!dc_isar_feature(aa32_simdfmac, s)) {
1129 return false;
1132 if (a->size != 0) {
1133 /* TODO fp16 support */
1134 return false;
1137 return do_3same_fp(s, a, gen_VFMA_fp_3s, true);
1140 static void gen_VFMS_fp_3s(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm,
1141 TCGv_ptr fpstatus)
1143 gen_helper_vfp_negs(vn, vn);
1144 gen_helper_vfp_muladds(vd, vn, vm, vd, fpstatus);
1147 static bool trans_VFMS_fp_3s(DisasContext *s, arg_3same *a)
1149 if (!dc_isar_feature(aa32_simdfmac, s)) {
1150 return false;
1153 if (a->size != 0) {
1154 /* TODO fp16 support */
1155 return false;
1158 return do_3same_fp(s, a, gen_VFMS_fp_3s, true);
1161 static bool do_3same_fp_pair(DisasContext *s, arg_3same *a, VFPGen3OpSPFn *fn)
1163 /* FP operations handled pairwise 32 bits at a time */
1164 TCGv_i32 tmp, tmp2, tmp3;
1165 TCGv_ptr fpstatus;
1167 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
1168 return false;
1171 /* UNDEF accesses to D16-D31 if they don't exist. */
1172 if (!dc_isar_feature(aa32_simd_r32, s) &&
1173 ((a->vd | a->vn | a->vm) & 0x10)) {
1174 return false;
1177 if (!vfp_access_check(s)) {
1178 return true;
1181 assert(a->q == 0); /* enforced by decode patterns */
1184 * Note that we have to be careful not to clobber the source operands
1185 * in the "vm == vd" case by storing the result of the first pass too
1186 * early. Since Q is 0 there are always just two passes, so instead
1187 * of a complicated loop over each pass we just unroll.
1189 fpstatus = get_fpstatus_ptr(1);
1190 tmp = neon_load_reg(a->vn, 0);
1191 tmp2 = neon_load_reg(a->vn, 1);
1192 fn(tmp, tmp, tmp2, fpstatus);
1193 tcg_temp_free_i32(tmp2);
1195 tmp3 = neon_load_reg(a->vm, 0);
1196 tmp2 = neon_load_reg(a->vm, 1);
1197 fn(tmp3, tmp3, tmp2, fpstatus);
1198 tcg_temp_free_i32(tmp2);
1199 tcg_temp_free_ptr(fpstatus);
1201 neon_store_reg(a->vd, 0, tmp);
1202 neon_store_reg(a->vd, 1, tmp3);
1203 return true;
1207 * For all the functions using this macro, size == 1 means fp16,
1208 * which is an architecture extension we don't implement yet.
1210 #define DO_3S_FP_PAIR(INSN,FUNC) \
1211 static bool trans_##INSN##_fp_3s(DisasContext *s, arg_3same *a) \
1213 if (a->size != 0) { \
1214 /* TODO fp16 support */ \
1215 return false; \
1217 return do_3same_fp_pair(s, a, FUNC); \
1220 DO_3S_FP_PAIR(VPADD, gen_helper_vfp_adds)
1221 DO_3S_FP_PAIR(VPMAX, gen_helper_vfp_maxs)
1222 DO_3S_FP_PAIR(VPMIN, gen_helper_vfp_mins)
1224 static bool do_vector_2sh(DisasContext *s, arg_2reg_shift *a, GVecGen2iFn *fn)
1226 /* Handle a 2-reg-shift insn which can be vectorized. */
1227 int vec_size = a->q ? 16 : 8;
1228 int rd_ofs = neon_reg_offset(a->vd, 0);
1229 int rm_ofs = neon_reg_offset(a->vm, 0);
1231 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
1232 return false;
1235 /* UNDEF accesses to D16-D31 if they don't exist. */
1236 if (!dc_isar_feature(aa32_simd_r32, s) &&
1237 ((a->vd | a->vm) & 0x10)) {
1238 return false;
1241 if ((a->vm | a->vd) & a->q) {
1242 return false;
1245 if (!vfp_access_check(s)) {
1246 return true;
1249 fn(a->size, rd_ofs, rm_ofs, a->shift, vec_size, vec_size);
1250 return true;
1253 #define DO_2SH(INSN, FUNC) \
1254 static bool trans_##INSN##_2sh(DisasContext *s, arg_2reg_shift *a) \
1256 return do_vector_2sh(s, a, FUNC); \
1259 DO_2SH(VSHL, tcg_gen_gvec_shli)
1260 DO_2SH(VSLI, gen_gvec_sli)
1261 DO_2SH(VSRI, gen_gvec_sri)
1262 DO_2SH(VSRA_S, gen_gvec_ssra)
1263 DO_2SH(VSRA_U, gen_gvec_usra)
1264 DO_2SH(VRSHR_S, gen_gvec_srshr)
1265 DO_2SH(VRSHR_U, gen_gvec_urshr)
1266 DO_2SH(VRSRA_S, gen_gvec_srsra)
1267 DO_2SH(VRSRA_U, gen_gvec_ursra)
1269 static bool trans_VSHR_S_2sh(DisasContext *s, arg_2reg_shift *a)
1271 /* Signed shift out of range results in all-sign-bits */
1272 a->shift = MIN(a->shift, (8 << a->size) - 1);
1273 return do_vector_2sh(s, a, tcg_gen_gvec_sari);
1276 static void gen_zero_rd_2sh(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
1277 int64_t shift, uint32_t oprsz, uint32_t maxsz)
1279 tcg_gen_gvec_dup_imm(vece, rd_ofs, oprsz, maxsz, 0);
1282 static bool trans_VSHR_U_2sh(DisasContext *s, arg_2reg_shift *a)
1284 /* Shift out of range is architecturally valid and results in zero. */
1285 if (a->shift >= (8 << a->size)) {
1286 return do_vector_2sh(s, a, gen_zero_rd_2sh);
1287 } else {
1288 return do_vector_2sh(s, a, tcg_gen_gvec_shri);
1292 static bool do_2shift_env_64(DisasContext *s, arg_2reg_shift *a,
1293 NeonGenTwo64OpEnvFn *fn)
1296 * 2-reg-and-shift operations, size == 3 case, where the
1297 * function needs to be passed cpu_env.
1299 TCGv_i64 constimm;
1300 int pass;
1302 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
1303 return false;
1306 /* UNDEF accesses to D16-D31 if they don't exist. */
1307 if (!dc_isar_feature(aa32_simd_r32, s) &&
1308 ((a->vd | a->vm) & 0x10)) {
1309 return false;
1312 if ((a->vm | a->vd) & a->q) {
1313 return false;
1316 if (!vfp_access_check(s)) {
1317 return true;
1321 * To avoid excessive duplication of ops we implement shift
1322 * by immediate using the variable shift operations.
1324 constimm = tcg_const_i64(dup_const(a->size, a->shift));
1326 for (pass = 0; pass < a->q + 1; pass++) {
1327 TCGv_i64 tmp = tcg_temp_new_i64();
1329 neon_load_reg64(tmp, a->vm + pass);
1330 fn(tmp, cpu_env, tmp, constimm);
1331 neon_store_reg64(tmp, a->vd + pass);
1332 tcg_temp_free_i64(tmp);
1334 tcg_temp_free_i64(constimm);
1335 return true;
1338 static bool do_2shift_env_32(DisasContext *s, arg_2reg_shift *a,
1339 NeonGenTwoOpEnvFn *fn)
1342 * 2-reg-and-shift operations, size < 3 case, where the
1343 * helper needs to be passed cpu_env.
1345 TCGv_i32 constimm;
1346 int pass;
1348 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
1349 return false;
1352 /* UNDEF accesses to D16-D31 if they don't exist. */
1353 if (!dc_isar_feature(aa32_simd_r32, s) &&
1354 ((a->vd | a->vm) & 0x10)) {
1355 return false;
1358 if ((a->vm | a->vd) & a->q) {
1359 return false;
1362 if (!vfp_access_check(s)) {
1363 return true;
1367 * To avoid excessive duplication of ops we implement shift
1368 * by immediate using the variable shift operations.
1370 constimm = tcg_const_i32(dup_const(a->size, a->shift));
1372 for (pass = 0; pass < (a->q ? 4 : 2); pass++) {
1373 TCGv_i32 tmp = neon_load_reg(a->vm, pass);
1374 fn(tmp, cpu_env, tmp, constimm);
1375 neon_store_reg(a->vd, pass, tmp);
1377 tcg_temp_free_i32(constimm);
1378 return true;
1381 #define DO_2SHIFT_ENV(INSN, FUNC) \
1382 static bool trans_##INSN##_64_2sh(DisasContext *s, arg_2reg_shift *a) \
1384 return do_2shift_env_64(s, a, gen_helper_neon_##FUNC##64); \
1386 static bool trans_##INSN##_2sh(DisasContext *s, arg_2reg_shift *a) \
1388 static NeonGenTwoOpEnvFn * const fns[] = { \
1389 gen_helper_neon_##FUNC##8, \
1390 gen_helper_neon_##FUNC##16, \
1391 gen_helper_neon_##FUNC##32, \
1392 }; \
1393 assert(a->size < ARRAY_SIZE(fns)); \
1394 return do_2shift_env_32(s, a, fns[a->size]); \
1397 DO_2SHIFT_ENV(VQSHLU, qshlu_s)
1398 DO_2SHIFT_ENV(VQSHL_U, qshl_u)
1399 DO_2SHIFT_ENV(VQSHL_S, qshl_s)
1401 static bool do_2shift_narrow_64(DisasContext *s, arg_2reg_shift *a,
1402 NeonGenTwo64OpFn *shiftfn,
1403 NeonGenNarrowEnvFn *narrowfn)
1405 /* 2-reg-and-shift narrowing-shift operations, size == 3 case */
1406 TCGv_i64 constimm, rm1, rm2;
1407 TCGv_i32 rd;
1409 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
1410 return false;
1413 /* UNDEF accesses to D16-D31 if they don't exist. */
1414 if (!dc_isar_feature(aa32_simd_r32, s) &&
1415 ((a->vd | a->vm) & 0x10)) {
1416 return false;
1419 if (a->vm & 1) {
1420 return false;
1423 if (!vfp_access_check(s)) {
1424 return true;
1428 * This is always a right shift, and the shiftfn is always a
1429 * left-shift helper, which thus needs the negated shift count.
1431 constimm = tcg_const_i64(-a->shift);
1432 rm1 = tcg_temp_new_i64();
1433 rm2 = tcg_temp_new_i64();
1435 /* Load both inputs first to avoid potential overwrite if rm == rd */
1436 neon_load_reg64(rm1, a->vm);
1437 neon_load_reg64(rm2, a->vm + 1);
1439 shiftfn(rm1, rm1, constimm);
1440 rd = tcg_temp_new_i32();
1441 narrowfn(rd, cpu_env, rm1);
1442 neon_store_reg(a->vd, 0, rd);
1444 shiftfn(rm2, rm2, constimm);
1445 rd = tcg_temp_new_i32();
1446 narrowfn(rd, cpu_env, rm2);
1447 neon_store_reg(a->vd, 1, rd);
1449 tcg_temp_free_i64(rm1);
1450 tcg_temp_free_i64(rm2);
1451 tcg_temp_free_i64(constimm);
1453 return true;
1456 static bool do_2shift_narrow_32(DisasContext *s, arg_2reg_shift *a,
1457 NeonGenTwoOpFn *shiftfn,
1458 NeonGenNarrowEnvFn *narrowfn)
1460 /* 2-reg-and-shift narrowing-shift operations, size < 3 case */
1461 TCGv_i32 constimm, rm1, rm2, rm3, rm4;
1462 TCGv_i64 rtmp;
1463 uint32_t imm;
1465 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
1466 return false;
1469 /* UNDEF accesses to D16-D31 if they don't exist. */
1470 if (!dc_isar_feature(aa32_simd_r32, s) &&
1471 ((a->vd | a->vm) & 0x10)) {
1472 return false;
1475 if (a->vm & 1) {
1476 return false;
1479 if (!vfp_access_check(s)) {
1480 return true;
1484 * This is always a right shift, and the shiftfn is always a
1485 * left-shift helper, which thus needs the negated shift count
1486 * duplicated into each lane of the immediate value.
1488 if (a->size == 1) {
1489 imm = (uint16_t)(-a->shift);
1490 imm |= imm << 16;
1491 } else {
1492 /* size == 2 */
1493 imm = -a->shift;
1495 constimm = tcg_const_i32(imm);
1497 /* Load all inputs first to avoid potential overwrite */
1498 rm1 = neon_load_reg(a->vm, 0);
1499 rm2 = neon_load_reg(a->vm, 1);
1500 rm3 = neon_load_reg(a->vm + 1, 0);
1501 rm4 = neon_load_reg(a->vm + 1, 1);
1502 rtmp = tcg_temp_new_i64();
1504 shiftfn(rm1, rm1, constimm);
1505 shiftfn(rm2, rm2, constimm);
1507 tcg_gen_concat_i32_i64(rtmp, rm1, rm2);
1508 tcg_temp_free_i32(rm2);
1510 narrowfn(rm1, cpu_env, rtmp);
1511 neon_store_reg(a->vd, 0, rm1);
1513 shiftfn(rm3, rm3, constimm);
1514 shiftfn(rm4, rm4, constimm);
1515 tcg_temp_free_i32(constimm);
1517 tcg_gen_concat_i32_i64(rtmp, rm3, rm4);
1518 tcg_temp_free_i32(rm4);
1520 narrowfn(rm3, cpu_env, rtmp);
1521 tcg_temp_free_i64(rtmp);
1522 neon_store_reg(a->vd, 1, rm3);
1523 return true;
1526 #define DO_2SN_64(INSN, FUNC, NARROWFUNC) \
1527 static bool trans_##INSN##_2sh(DisasContext *s, arg_2reg_shift *a) \
1529 return do_2shift_narrow_64(s, a, FUNC, NARROWFUNC); \
1531 #define DO_2SN_32(INSN, FUNC, NARROWFUNC) \
1532 static bool trans_##INSN##_2sh(DisasContext *s, arg_2reg_shift *a) \
1534 return do_2shift_narrow_32(s, a, FUNC, NARROWFUNC); \
1537 static void gen_neon_narrow_u32(TCGv_i32 dest, TCGv_ptr env, TCGv_i64 src)
1539 tcg_gen_extrl_i64_i32(dest, src);
1542 static void gen_neon_narrow_u16(TCGv_i32 dest, TCGv_ptr env, TCGv_i64 src)
1544 gen_helper_neon_narrow_u16(dest, src);
1547 static void gen_neon_narrow_u8(TCGv_i32 dest, TCGv_ptr env, TCGv_i64 src)
1549 gen_helper_neon_narrow_u8(dest, src);
1552 DO_2SN_64(VSHRN_64, gen_ushl_i64, gen_neon_narrow_u32)
1553 DO_2SN_32(VSHRN_32, gen_ushl_i32, gen_neon_narrow_u16)
1554 DO_2SN_32(VSHRN_16, gen_helper_neon_shl_u16, gen_neon_narrow_u8)
1556 DO_2SN_64(VRSHRN_64, gen_helper_neon_rshl_u64, gen_neon_narrow_u32)
1557 DO_2SN_32(VRSHRN_32, gen_helper_neon_rshl_u32, gen_neon_narrow_u16)
1558 DO_2SN_32(VRSHRN_16, gen_helper_neon_rshl_u16, gen_neon_narrow_u8)
1560 DO_2SN_64(VQSHRUN_64, gen_sshl_i64, gen_helper_neon_unarrow_sat32)
1561 DO_2SN_32(VQSHRUN_32, gen_sshl_i32, gen_helper_neon_unarrow_sat16)
1562 DO_2SN_32(VQSHRUN_16, gen_helper_neon_shl_s16, gen_helper_neon_unarrow_sat8)
1564 DO_2SN_64(VQRSHRUN_64, gen_helper_neon_rshl_s64, gen_helper_neon_unarrow_sat32)
1565 DO_2SN_32(VQRSHRUN_32, gen_helper_neon_rshl_s32, gen_helper_neon_unarrow_sat16)
1566 DO_2SN_32(VQRSHRUN_16, gen_helper_neon_rshl_s16, gen_helper_neon_unarrow_sat8)
1567 DO_2SN_64(VQSHRN_S64, gen_sshl_i64, gen_helper_neon_narrow_sat_s32)
1568 DO_2SN_32(VQSHRN_S32, gen_sshl_i32, gen_helper_neon_narrow_sat_s16)
1569 DO_2SN_32(VQSHRN_S16, gen_helper_neon_shl_s16, gen_helper_neon_narrow_sat_s8)
1571 DO_2SN_64(VQRSHRN_S64, gen_helper_neon_rshl_s64, gen_helper_neon_narrow_sat_s32)
1572 DO_2SN_32(VQRSHRN_S32, gen_helper_neon_rshl_s32, gen_helper_neon_narrow_sat_s16)
1573 DO_2SN_32(VQRSHRN_S16, gen_helper_neon_rshl_s16, gen_helper_neon_narrow_sat_s8)
1575 DO_2SN_64(VQSHRN_U64, gen_ushl_i64, gen_helper_neon_narrow_sat_u32)
1576 DO_2SN_32(VQSHRN_U32, gen_ushl_i32, gen_helper_neon_narrow_sat_u16)
1577 DO_2SN_32(VQSHRN_U16, gen_helper_neon_shl_u16, gen_helper_neon_narrow_sat_u8)
1579 DO_2SN_64(VQRSHRN_U64, gen_helper_neon_rshl_u64, gen_helper_neon_narrow_sat_u32)
1580 DO_2SN_32(VQRSHRN_U32, gen_helper_neon_rshl_u32, gen_helper_neon_narrow_sat_u16)
1581 DO_2SN_32(VQRSHRN_U16, gen_helper_neon_rshl_u16, gen_helper_neon_narrow_sat_u8)
1583 static bool do_vshll_2sh(DisasContext *s, arg_2reg_shift *a,
1584 NeonGenWidenFn *widenfn, bool u)
1586 TCGv_i64 tmp;
1587 TCGv_i32 rm0, rm1;
1588 uint64_t widen_mask = 0;
1590 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
1591 return false;
1594 /* UNDEF accesses to D16-D31 if they don't exist. */
1595 if (!dc_isar_feature(aa32_simd_r32, s) &&
1596 ((a->vd | a->vm) & 0x10)) {
1597 return false;
1600 if (a->vd & 1) {
1601 return false;
1604 if (!vfp_access_check(s)) {
1605 return true;
1609 * This is a widen-and-shift operation. The shift is always less
1610 * than the width of the source type, so after widening the input
1611 * vector we can simply shift the whole 64-bit widened register,
1612 * and then clear the potential overflow bits resulting from left
1613 * bits of the narrow input appearing as right bits of the left
1614 * neighbour narrow input. Calculate a mask of bits to clear.
1616 if ((a->shift != 0) && (a->size < 2 || u)) {
1617 int esize = 8 << a->size;
1618 widen_mask = MAKE_64BIT_MASK(0, esize);
1619 widen_mask >>= esize - a->shift;
1620 widen_mask = dup_const(a->size + 1, widen_mask);
1623 rm0 = neon_load_reg(a->vm, 0);
1624 rm1 = neon_load_reg(a->vm, 1);
1625 tmp = tcg_temp_new_i64();
1627 widenfn(tmp, rm0);
1628 tcg_temp_free_i32(rm0);
1629 if (a->shift != 0) {
1630 tcg_gen_shli_i64(tmp, tmp, a->shift);
1631 tcg_gen_andi_i64(tmp, tmp, ~widen_mask);
1633 neon_store_reg64(tmp, a->vd);
1635 widenfn(tmp, rm1);
1636 tcg_temp_free_i32(rm1);
1637 if (a->shift != 0) {
1638 tcg_gen_shli_i64(tmp, tmp, a->shift);
1639 tcg_gen_andi_i64(tmp, tmp, ~widen_mask);
1641 neon_store_reg64(tmp, a->vd + 1);
1642 tcg_temp_free_i64(tmp);
1643 return true;
1646 static bool trans_VSHLL_S_2sh(DisasContext *s, arg_2reg_shift *a)
1648 static NeonGenWidenFn * const widenfn[] = {
1649 gen_helper_neon_widen_s8,
1650 gen_helper_neon_widen_s16,
1651 tcg_gen_ext_i32_i64,
1653 return do_vshll_2sh(s, a, widenfn[a->size], false);
1656 static bool trans_VSHLL_U_2sh(DisasContext *s, arg_2reg_shift *a)
1658 static NeonGenWidenFn * const widenfn[] = {
1659 gen_helper_neon_widen_u8,
1660 gen_helper_neon_widen_u16,
1661 tcg_gen_extu_i32_i64,
1663 return do_vshll_2sh(s, a, widenfn[a->size], true);
1666 static bool do_fp_2sh(DisasContext *s, arg_2reg_shift *a,
1667 NeonGenTwoSingleOPFn *fn)
1669 /* FP operations in 2-reg-and-shift group */
1670 TCGv_i32 tmp, shiftv;
1671 TCGv_ptr fpstatus;
1672 int pass;
1674 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
1675 return false;
1678 /* UNDEF accesses to D16-D31 if they don't exist. */
1679 if (!dc_isar_feature(aa32_simd_r32, s) &&
1680 ((a->vd | a->vm) & 0x10)) {
1681 return false;
1684 if ((a->vm | a->vd) & a->q) {
1685 return false;
1688 if (!vfp_access_check(s)) {
1689 return true;
1692 fpstatus = get_fpstatus_ptr(1);
1693 shiftv = tcg_const_i32(a->shift);
1694 for (pass = 0; pass < (a->q ? 4 : 2); pass++) {
1695 tmp = neon_load_reg(a->vm, pass);
1696 fn(tmp, tmp, shiftv, fpstatus);
1697 neon_store_reg(a->vd, pass, tmp);
1699 tcg_temp_free_ptr(fpstatus);
1700 tcg_temp_free_i32(shiftv);
1701 return true;
1704 #define DO_FP_2SH(INSN, FUNC) \
1705 static bool trans_##INSN##_2sh(DisasContext *s, arg_2reg_shift *a) \
1707 return do_fp_2sh(s, a, FUNC); \
1710 DO_FP_2SH(VCVT_SF, gen_helper_vfp_sltos)
1711 DO_FP_2SH(VCVT_UF, gen_helper_vfp_ultos)
1712 DO_FP_2SH(VCVT_FS, gen_helper_vfp_tosls_round_to_zero)
1713 DO_FP_2SH(VCVT_FU, gen_helper_vfp_touls_round_to_zero)
1715 static uint64_t asimd_imm_const(uint32_t imm, int cmode, int op)
1718 * Expand the encoded constant.
1719 * Note that cmode = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
1720 * We choose to not special-case this and will behave as if a
1721 * valid constant encoding of 0 had been given.
1722 * cmode = 15 op = 1 must UNDEF; we assume decode has handled that.
1724 switch (cmode) {
1725 case 0: case 1:
1726 /* no-op */
1727 break;
1728 case 2: case 3:
1729 imm <<= 8;
1730 break;
1731 case 4: case 5:
1732 imm <<= 16;
1733 break;
1734 case 6: case 7:
1735 imm <<= 24;
1736 break;
1737 case 8: case 9:
1738 imm |= imm << 16;
1739 break;
1740 case 10: case 11:
1741 imm = (imm << 8) | (imm << 24);
1742 break;
1743 case 12:
1744 imm = (imm << 8) | 0xff;
1745 break;
1746 case 13:
1747 imm = (imm << 16) | 0xffff;
1748 break;
1749 case 14:
1750 if (op) {
1752 * This is the only case where the top and bottom 32 bits
1753 * of the encoded constant differ.
1755 uint64_t imm64 = 0;
1756 int n;
1758 for (n = 0; n < 8; n++) {
1759 if (imm & (1 << n)) {
1760 imm64 |= (0xffULL << (n * 8));
1763 return imm64;
1765 imm |= (imm << 8) | (imm << 16) | (imm << 24);
1766 break;
1767 case 15:
1768 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
1769 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
1770 break;
1772 if (op) {
1773 imm = ~imm;
1775 return dup_const(MO_32, imm);
1778 static bool do_1reg_imm(DisasContext *s, arg_1reg_imm *a,
1779 GVecGen2iFn *fn)
1781 uint64_t imm;
1782 int reg_ofs, vec_size;
1784 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
1785 return false;
1788 /* UNDEF accesses to D16-D31 if they don't exist. */
1789 if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
1790 return false;
1793 if (a->vd & a->q) {
1794 return false;
1797 if (!vfp_access_check(s)) {
1798 return true;
1801 reg_ofs = neon_reg_offset(a->vd, 0);
1802 vec_size = a->q ? 16 : 8;
1803 imm = asimd_imm_const(a->imm, a->cmode, a->op);
1805 fn(MO_64, reg_ofs, reg_ofs, imm, vec_size, vec_size);
1806 return true;
1809 static void gen_VMOV_1r(unsigned vece, uint32_t dofs, uint32_t aofs,
1810 int64_t c, uint32_t oprsz, uint32_t maxsz)
1812 tcg_gen_gvec_dup_imm(MO_64, dofs, oprsz, maxsz, c);
1815 static bool trans_Vimm_1r(DisasContext *s, arg_1reg_imm *a)
1817 /* Handle decode of cmode/op here between VORR/VBIC/VMOV */
1818 GVecGen2iFn *fn;
1820 if ((a->cmode & 1) && a->cmode < 12) {
1821 /* for op=1, the imm will be inverted, so BIC becomes AND. */
1822 fn = a->op ? tcg_gen_gvec_andi : tcg_gen_gvec_ori;
1823 } else {
1824 /* There is one unallocated cmode/op combination in this space */
1825 if (a->cmode == 15 && a->op == 1) {
1826 return false;
1828 fn = gen_VMOV_1r;
1830 return do_1reg_imm(s, a, fn);
1833 static bool do_prewiden_3d(DisasContext *s, arg_3diff *a,
1834 NeonGenWidenFn *widenfn,
1835 NeonGenTwo64OpFn *opfn,
1836 bool src1_wide)
1838 /* 3-regs different lengths, prewidening case (VADDL/VSUBL/VAADW/VSUBW) */
1839 TCGv_i64 rn0_64, rn1_64, rm_64;
1840 TCGv_i32 rm;
1842 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
1843 return false;
1846 /* UNDEF accesses to D16-D31 if they don't exist. */
1847 if (!dc_isar_feature(aa32_simd_r32, s) &&
1848 ((a->vd | a->vn | a->vm) & 0x10)) {
1849 return false;
1852 if (!widenfn || !opfn) {
1853 /* size == 3 case, which is an entirely different insn group */
1854 return false;
1857 if ((a->vd & 1) || (src1_wide && (a->vn & 1))) {
1858 return false;
1861 if (!vfp_access_check(s)) {
1862 return true;
1865 rn0_64 = tcg_temp_new_i64();
1866 rn1_64 = tcg_temp_new_i64();
1867 rm_64 = tcg_temp_new_i64();
1869 if (src1_wide) {
1870 neon_load_reg64(rn0_64, a->vn);
1871 } else {
1872 TCGv_i32 tmp = neon_load_reg(a->vn, 0);
1873 widenfn(rn0_64, tmp);
1874 tcg_temp_free_i32(tmp);
1876 rm = neon_load_reg(a->vm, 0);
1878 widenfn(rm_64, rm);
1879 tcg_temp_free_i32(rm);
1880 opfn(rn0_64, rn0_64, rm_64);
1883 * Load second pass inputs before storing the first pass result, to
1884 * avoid incorrect results if a narrow input overlaps with the result.
1886 if (src1_wide) {
1887 neon_load_reg64(rn1_64, a->vn + 1);
1888 } else {
1889 TCGv_i32 tmp = neon_load_reg(a->vn, 1);
1890 widenfn(rn1_64, tmp);
1891 tcg_temp_free_i32(tmp);
1893 rm = neon_load_reg(a->vm, 1);
1895 neon_store_reg64(rn0_64, a->vd);
1897 widenfn(rm_64, rm);
1898 tcg_temp_free_i32(rm);
1899 opfn(rn1_64, rn1_64, rm_64);
1900 neon_store_reg64(rn1_64, a->vd + 1);
1902 tcg_temp_free_i64(rn0_64);
1903 tcg_temp_free_i64(rn1_64);
1904 tcg_temp_free_i64(rm_64);
1906 return true;
1909 #define DO_PREWIDEN(INSN, S, EXT, OP, SRC1WIDE) \
1910 static bool trans_##INSN##_3d(DisasContext *s, arg_3diff *a) \
1912 static NeonGenWidenFn * const widenfn[] = { \
1913 gen_helper_neon_widen_##S##8, \
1914 gen_helper_neon_widen_##S##16, \
1915 tcg_gen_##EXT##_i32_i64, \
1916 NULL, \
1917 }; \
1918 static NeonGenTwo64OpFn * const addfn[] = { \
1919 gen_helper_neon_##OP##l_u16, \
1920 gen_helper_neon_##OP##l_u32, \
1921 tcg_gen_##OP##_i64, \
1922 NULL, \
1923 }; \
1924 return do_prewiden_3d(s, a, widenfn[a->size], \
1925 addfn[a->size], SRC1WIDE); \
1928 DO_PREWIDEN(VADDL_S, s, ext, add, false)
1929 DO_PREWIDEN(VADDL_U, u, extu, add, false)
1930 DO_PREWIDEN(VSUBL_S, s, ext, sub, false)
1931 DO_PREWIDEN(VSUBL_U, u, extu, sub, false)
1932 DO_PREWIDEN(VADDW_S, s, ext, add, true)
1933 DO_PREWIDEN(VADDW_U, u, extu, add, true)
1934 DO_PREWIDEN(VSUBW_S, s, ext, sub, true)
1935 DO_PREWIDEN(VSUBW_U, u, extu, sub, true)
1937 static bool do_narrow_3d(DisasContext *s, arg_3diff *a,
1938 NeonGenTwo64OpFn *opfn, NeonGenNarrowFn *narrowfn)
1940 /* 3-regs different lengths, narrowing (VADDHN/VSUBHN/VRADDHN/VRSUBHN) */
1941 TCGv_i64 rn_64, rm_64;
1942 TCGv_i32 rd0, rd1;
1944 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
1945 return false;
1948 /* UNDEF accesses to D16-D31 if they don't exist. */
1949 if (!dc_isar_feature(aa32_simd_r32, s) &&
1950 ((a->vd | a->vn | a->vm) & 0x10)) {
1951 return false;
1954 if (!opfn || !narrowfn) {
1955 /* size == 3 case, which is an entirely different insn group */
1956 return false;
1959 if ((a->vn | a->vm) & 1) {
1960 return false;
1963 if (!vfp_access_check(s)) {
1964 return true;
1967 rn_64 = tcg_temp_new_i64();
1968 rm_64 = tcg_temp_new_i64();
1969 rd0 = tcg_temp_new_i32();
1970 rd1 = tcg_temp_new_i32();
1972 neon_load_reg64(rn_64, a->vn);
1973 neon_load_reg64(rm_64, a->vm);
1975 opfn(rn_64, rn_64, rm_64);
1977 narrowfn(rd0, rn_64);
1979 neon_load_reg64(rn_64, a->vn + 1);
1980 neon_load_reg64(rm_64, a->vm + 1);
1982 opfn(rn_64, rn_64, rm_64);
1984 narrowfn(rd1, rn_64);
1986 neon_store_reg(a->vd, 0, rd0);
1987 neon_store_reg(a->vd, 1, rd1);
1989 tcg_temp_free_i64(rn_64);
1990 tcg_temp_free_i64(rm_64);
1992 return true;
1995 #define DO_NARROW_3D(INSN, OP, NARROWTYPE, EXTOP) \
1996 static bool trans_##INSN##_3d(DisasContext *s, arg_3diff *a) \
1998 static NeonGenTwo64OpFn * const addfn[] = { \
1999 gen_helper_neon_##OP##l_u16, \
2000 gen_helper_neon_##OP##l_u32, \
2001 tcg_gen_##OP##_i64, \
2002 NULL, \
2003 }; \
2004 static NeonGenNarrowFn * const narrowfn[] = { \
2005 gen_helper_neon_##NARROWTYPE##_high_u8, \
2006 gen_helper_neon_##NARROWTYPE##_high_u16, \
2007 EXTOP, \
2008 NULL, \
2009 }; \
2010 return do_narrow_3d(s, a, addfn[a->size], narrowfn[a->size]); \
2013 static void gen_narrow_round_high_u32(TCGv_i32 rd, TCGv_i64 rn)
2015 tcg_gen_addi_i64(rn, rn, 1u << 31);
2016 tcg_gen_extrh_i64_i32(rd, rn);
2019 DO_NARROW_3D(VADDHN, add, narrow, tcg_gen_extrh_i64_i32)
2020 DO_NARROW_3D(VSUBHN, sub, narrow, tcg_gen_extrh_i64_i32)
2021 DO_NARROW_3D(VRADDHN, add, narrow_round, gen_narrow_round_high_u32)
2022 DO_NARROW_3D(VRSUBHN, sub, narrow_round, gen_narrow_round_high_u32)
2024 static bool do_long_3d(DisasContext *s, arg_3diff *a,
2025 NeonGenTwoOpWidenFn *opfn,
2026 NeonGenTwo64OpFn *accfn)
2029 * 3-regs different lengths, long operations.
2030 * These perform an operation on two inputs that returns a double-width
2031 * result, and then possibly perform an accumulation operation of
2032 * that result into the double-width destination.
2034 TCGv_i64 rd0, rd1, tmp;
2035 TCGv_i32 rn, rm;
2037 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
2038 return false;
2041 /* UNDEF accesses to D16-D31 if they don't exist. */
2042 if (!dc_isar_feature(aa32_simd_r32, s) &&
2043 ((a->vd | a->vn | a->vm) & 0x10)) {
2044 return false;
2047 if (!opfn) {
2048 /* size == 3 case, which is an entirely different insn group */
2049 return false;
2052 if (a->vd & 1) {
2053 return false;
2056 if (!vfp_access_check(s)) {
2057 return true;
2060 rd0 = tcg_temp_new_i64();
2061 rd1 = tcg_temp_new_i64();
2063 rn = neon_load_reg(a->vn, 0);
2064 rm = neon_load_reg(a->vm, 0);
2065 opfn(rd0, rn, rm);
2066 tcg_temp_free_i32(rn);
2067 tcg_temp_free_i32(rm);
2069 rn = neon_load_reg(a->vn, 1);
2070 rm = neon_load_reg(a->vm, 1);
2071 opfn(rd1, rn, rm);
2072 tcg_temp_free_i32(rn);
2073 tcg_temp_free_i32(rm);
2075 /* Don't store results until after all loads: they might overlap */
2076 if (accfn) {
2077 tmp = tcg_temp_new_i64();
2078 neon_load_reg64(tmp, a->vd);
2079 accfn(tmp, tmp, rd0);
2080 neon_store_reg64(tmp, a->vd);
2081 neon_load_reg64(tmp, a->vd + 1);
2082 accfn(tmp, tmp, rd1);
2083 neon_store_reg64(tmp, a->vd + 1);
2084 tcg_temp_free_i64(tmp);
2085 } else {
2086 neon_store_reg64(rd0, a->vd);
2087 neon_store_reg64(rd1, a->vd + 1);
2090 tcg_temp_free_i64(rd0);
2091 tcg_temp_free_i64(rd1);
2093 return true;
2096 static bool trans_VABDL_S_3d(DisasContext *s, arg_3diff *a)
2098 static NeonGenTwoOpWidenFn * const opfn[] = {
2099 gen_helper_neon_abdl_s16,
2100 gen_helper_neon_abdl_s32,
2101 gen_helper_neon_abdl_s64,
2102 NULL,
2105 return do_long_3d(s, a, opfn[a->size], NULL);
2108 static bool trans_VABDL_U_3d(DisasContext *s, arg_3diff *a)
2110 static NeonGenTwoOpWidenFn * const opfn[] = {
2111 gen_helper_neon_abdl_u16,
2112 gen_helper_neon_abdl_u32,
2113 gen_helper_neon_abdl_u64,
2114 NULL,
2117 return do_long_3d(s, a, opfn[a->size], NULL);
2120 static bool trans_VABAL_S_3d(DisasContext *s, arg_3diff *a)
2122 static NeonGenTwoOpWidenFn * const opfn[] = {
2123 gen_helper_neon_abdl_s16,
2124 gen_helper_neon_abdl_s32,
2125 gen_helper_neon_abdl_s64,
2126 NULL,
2128 static NeonGenTwo64OpFn * const addfn[] = {
2129 gen_helper_neon_addl_u16,
2130 gen_helper_neon_addl_u32,
2131 tcg_gen_add_i64,
2132 NULL,
2135 return do_long_3d(s, a, opfn[a->size], addfn[a->size]);
2138 static bool trans_VABAL_U_3d(DisasContext *s, arg_3diff *a)
2140 static NeonGenTwoOpWidenFn * const opfn[] = {
2141 gen_helper_neon_abdl_u16,
2142 gen_helper_neon_abdl_u32,
2143 gen_helper_neon_abdl_u64,
2144 NULL,
2146 static NeonGenTwo64OpFn * const addfn[] = {
2147 gen_helper_neon_addl_u16,
2148 gen_helper_neon_addl_u32,
2149 tcg_gen_add_i64,
2150 NULL,
2153 return do_long_3d(s, a, opfn[a->size], addfn[a->size]);
2156 static void gen_mull_s32(TCGv_i64 rd, TCGv_i32 rn, TCGv_i32 rm)
2158 TCGv_i32 lo = tcg_temp_new_i32();
2159 TCGv_i32 hi = tcg_temp_new_i32();
2161 tcg_gen_muls2_i32(lo, hi, rn, rm);
2162 tcg_gen_concat_i32_i64(rd, lo, hi);
2164 tcg_temp_free_i32(lo);
2165 tcg_temp_free_i32(hi);
2168 static void gen_mull_u32(TCGv_i64 rd, TCGv_i32 rn, TCGv_i32 rm)
2170 TCGv_i32 lo = tcg_temp_new_i32();
2171 TCGv_i32 hi = tcg_temp_new_i32();
2173 tcg_gen_mulu2_i32(lo, hi, rn, rm);
2174 tcg_gen_concat_i32_i64(rd, lo, hi);
2176 tcg_temp_free_i32(lo);
2177 tcg_temp_free_i32(hi);
2180 static bool trans_VMULL_S_3d(DisasContext *s, arg_3diff *a)
2182 static NeonGenTwoOpWidenFn * const opfn[] = {
2183 gen_helper_neon_mull_s8,
2184 gen_helper_neon_mull_s16,
2185 gen_mull_s32,
2186 NULL,
2189 return do_long_3d(s, a, opfn[a->size], NULL);
2192 static bool trans_VMULL_U_3d(DisasContext *s, arg_3diff *a)
2194 static NeonGenTwoOpWidenFn * const opfn[] = {
2195 gen_helper_neon_mull_u8,
2196 gen_helper_neon_mull_u16,
2197 gen_mull_u32,
2198 NULL,
2201 return do_long_3d(s, a, opfn[a->size], NULL);
2204 #define DO_VMLAL(INSN,MULL,ACC) \
2205 static bool trans_##INSN##_3d(DisasContext *s, arg_3diff *a) \
2207 static NeonGenTwoOpWidenFn * const opfn[] = { \
2208 gen_helper_neon_##MULL##8, \
2209 gen_helper_neon_##MULL##16, \
2210 gen_##MULL##32, \
2211 NULL, \
2212 }; \
2213 static NeonGenTwo64OpFn * const accfn[] = { \
2214 gen_helper_neon_##ACC##l_u16, \
2215 gen_helper_neon_##ACC##l_u32, \
2216 tcg_gen_##ACC##_i64, \
2217 NULL, \
2218 }; \
2219 return do_long_3d(s, a, opfn[a->size], accfn[a->size]); \
2222 DO_VMLAL(VMLAL_S,mull_s,add)
2223 DO_VMLAL(VMLAL_U,mull_u,add)
2224 DO_VMLAL(VMLSL_S,mull_s,sub)
2225 DO_VMLAL(VMLSL_U,mull_u,sub)
2227 static void gen_VQDMULL_16(TCGv_i64 rd, TCGv_i32 rn, TCGv_i32 rm)
2229 gen_helper_neon_mull_s16(rd, rn, rm);
2230 gen_helper_neon_addl_saturate_s32(rd, cpu_env, rd, rd);
2233 static void gen_VQDMULL_32(TCGv_i64 rd, TCGv_i32 rn, TCGv_i32 rm)
2235 gen_mull_s32(rd, rn, rm);
2236 gen_helper_neon_addl_saturate_s64(rd, cpu_env, rd, rd);
2239 static bool trans_VQDMULL_3d(DisasContext *s, arg_3diff *a)
2241 static NeonGenTwoOpWidenFn * const opfn[] = {
2242 NULL,
2243 gen_VQDMULL_16,
2244 gen_VQDMULL_32,
2245 NULL,
2248 return do_long_3d(s, a, opfn[a->size], NULL);
2251 static void gen_VQDMLAL_acc_16(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
2253 gen_helper_neon_addl_saturate_s32(rd, cpu_env, rn, rm);
2256 static void gen_VQDMLAL_acc_32(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
2258 gen_helper_neon_addl_saturate_s64(rd, cpu_env, rn, rm);
2261 static bool trans_VQDMLAL_3d(DisasContext *s, arg_3diff *a)
2263 static NeonGenTwoOpWidenFn * const opfn[] = {
2264 NULL,
2265 gen_VQDMULL_16,
2266 gen_VQDMULL_32,
2267 NULL,
2269 static NeonGenTwo64OpFn * const accfn[] = {
2270 NULL,
2271 gen_VQDMLAL_acc_16,
2272 gen_VQDMLAL_acc_32,
2273 NULL,
2276 return do_long_3d(s, a, opfn[a->size], accfn[a->size]);
2279 static void gen_VQDMLSL_acc_16(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
2281 gen_helper_neon_negl_u32(rm, rm);
2282 gen_helper_neon_addl_saturate_s32(rd, cpu_env, rn, rm);
2285 static void gen_VQDMLSL_acc_32(TCGv_i64 rd, TCGv_i64 rn, TCGv_i64 rm)
2287 tcg_gen_neg_i64(rm, rm);
2288 gen_helper_neon_addl_saturate_s64(rd, cpu_env, rn, rm);
2291 static bool trans_VQDMLSL_3d(DisasContext *s, arg_3diff *a)
2293 static NeonGenTwoOpWidenFn * const opfn[] = {
2294 NULL,
2295 gen_VQDMULL_16,
2296 gen_VQDMULL_32,
2297 NULL,
2299 static NeonGenTwo64OpFn * const accfn[] = {
2300 NULL,
2301 gen_VQDMLSL_acc_16,
2302 gen_VQDMLSL_acc_32,
2303 NULL,
2306 return do_long_3d(s, a, opfn[a->size], accfn[a->size]);
2309 static bool trans_VMULL_P_3d(DisasContext *s, arg_3diff *a)
2311 gen_helper_gvec_3 *fn_gvec;
2313 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
2314 return false;
2317 /* UNDEF accesses to D16-D31 if they don't exist. */
2318 if (!dc_isar_feature(aa32_simd_r32, s) &&
2319 ((a->vd | a->vn | a->vm) & 0x10)) {
2320 return false;
2323 if (a->vd & 1) {
2324 return false;
2327 switch (a->size) {
2328 case 0:
2329 fn_gvec = gen_helper_neon_pmull_h;
2330 break;
2331 case 2:
2332 if (!dc_isar_feature(aa32_pmull, s)) {
2333 return false;
2335 fn_gvec = gen_helper_gvec_pmull_q;
2336 break;
2337 default:
2338 return false;
2341 if (!vfp_access_check(s)) {
2342 return true;
2345 tcg_gen_gvec_3_ool(neon_reg_offset(a->vd, 0),
2346 neon_reg_offset(a->vn, 0),
2347 neon_reg_offset(a->vm, 0),
2348 16, 16, 0, fn_gvec);
2349 return true;