2 * ARM translation: AArch32 Neon instructions
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005-2007 CodeSourcery
6 * Copyright (c) 2007 OpenedHand, Ltd.
7 * Copyright (c) 2020 Linaro, Ltd.
9 * This library is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2 of the License, or (at your option) any later version.
14 * This library is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
24 * This file is intended to be included from translate.c; it uses
25 * some macros and definitions provided by that file.
26 * It might be possible to convert it to a standalone .c file eventually.
29 static inline int plus1(DisasContext
*s
, int x
)
34 static inline int rsub_64(DisasContext
*s
, int x
)
39 static inline int rsub_32(DisasContext
*s
, int x
)
43 static inline int rsub_16(DisasContext
*s
, int x
)
47 static inline int rsub_8(DisasContext
*s
, int x
)
52 /* Include the generated Neon decoder */
53 #include "decode-neon-dp.inc.c"
54 #include "decode-neon-ls.inc.c"
55 #include "decode-neon-shared.inc.c"
57 static bool trans_VCMLA(DisasContext
*s
, arg_VCMLA
*a
)
61 gen_helper_gvec_3_ptr
*fn_gvec_ptr
;
63 if (!dc_isar_feature(aa32_vcma
, s
)
64 || (!a
->size
&& !dc_isar_feature(aa32_fp16_arith
, s
))) {
68 /* UNDEF accesses to D16-D31 if they don't exist. */
69 if (!dc_isar_feature(aa32_simd_r32
, s
) &&
70 ((a
->vd
| a
->vn
| a
->vm
) & 0x10)) {
74 if ((a
->vn
| a
->vm
| a
->vd
) & a
->q
) {
78 if (!vfp_access_check(s
)) {
82 opr_sz
= (1 + a
->q
) * 8;
83 fpst
= get_fpstatus_ptr(1);
84 fn_gvec_ptr
= a
->size
? gen_helper_gvec_fcmlas
: gen_helper_gvec_fcmlah
;
85 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a
->vd
),
86 vfp_reg_offset(1, a
->vn
),
87 vfp_reg_offset(1, a
->vm
),
88 fpst
, opr_sz
, opr_sz
, a
->rot
,
90 tcg_temp_free_ptr(fpst
);
94 static bool trans_VCADD(DisasContext
*s
, arg_VCADD
*a
)
98 gen_helper_gvec_3_ptr
*fn_gvec_ptr
;
100 if (!dc_isar_feature(aa32_vcma
, s
)
101 || (!a
->size
&& !dc_isar_feature(aa32_fp16_arith
, s
))) {
105 /* UNDEF accesses to D16-D31 if they don't exist. */
106 if (!dc_isar_feature(aa32_simd_r32
, s
) &&
107 ((a
->vd
| a
->vn
| a
->vm
) & 0x10)) {
111 if ((a
->vn
| a
->vm
| a
->vd
) & a
->q
) {
115 if (!vfp_access_check(s
)) {
119 opr_sz
= (1 + a
->q
) * 8;
120 fpst
= get_fpstatus_ptr(1);
121 fn_gvec_ptr
= a
->size
? gen_helper_gvec_fcadds
: gen_helper_gvec_fcaddh
;
122 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a
->vd
),
123 vfp_reg_offset(1, a
->vn
),
124 vfp_reg_offset(1, a
->vm
),
125 fpst
, opr_sz
, opr_sz
, a
->rot
,
127 tcg_temp_free_ptr(fpst
);
131 static bool trans_VDOT(DisasContext
*s
, arg_VDOT
*a
)
134 gen_helper_gvec_3
*fn_gvec
;
136 if (!dc_isar_feature(aa32_dp
, s
)) {
140 /* UNDEF accesses to D16-D31 if they don't exist. */
141 if (!dc_isar_feature(aa32_simd_r32
, s
) &&
142 ((a
->vd
| a
->vn
| a
->vm
) & 0x10)) {
146 if ((a
->vn
| a
->vm
| a
->vd
) & a
->q
) {
150 if (!vfp_access_check(s
)) {
154 opr_sz
= (1 + a
->q
) * 8;
155 fn_gvec
= a
->u
? gen_helper_gvec_udot_b
: gen_helper_gvec_sdot_b
;
156 tcg_gen_gvec_3_ool(vfp_reg_offset(1, a
->vd
),
157 vfp_reg_offset(1, a
->vn
),
158 vfp_reg_offset(1, a
->vm
),
159 opr_sz
, opr_sz
, 0, fn_gvec
);
163 static bool trans_VFML(DisasContext
*s
, arg_VFML
*a
)
167 if (!dc_isar_feature(aa32_fhm
, s
)) {
171 /* UNDEF accesses to D16-D31 if they don't exist. */
172 if (!dc_isar_feature(aa32_simd_r32
, s
) &&
181 if (!vfp_access_check(s
)) {
185 opr_sz
= (1 + a
->q
) * 8;
186 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a
->vd
),
187 vfp_reg_offset(a
->q
, a
->vn
),
188 vfp_reg_offset(a
->q
, a
->vm
),
189 cpu_env
, opr_sz
, opr_sz
, a
->s
, /* is_2 == 0 */
190 gen_helper_gvec_fmlal_a32
);
194 static bool trans_VCMLA_scalar(DisasContext
*s
, arg_VCMLA_scalar
*a
)
196 gen_helper_gvec_3_ptr
*fn_gvec_ptr
;
200 if (!dc_isar_feature(aa32_vcma
, s
)) {
203 if (a
->size
== 0 && !dc_isar_feature(aa32_fp16_arith
, s
)) {
207 /* UNDEF accesses to D16-D31 if they don't exist. */
208 if (!dc_isar_feature(aa32_simd_r32
, s
) &&
209 ((a
->vd
| a
->vn
| a
->vm
) & 0x10)) {
213 if ((a
->vd
| a
->vn
) & a
->q
) {
217 if (!vfp_access_check(s
)) {
221 fn_gvec_ptr
= (a
->size
? gen_helper_gvec_fcmlas_idx
222 : gen_helper_gvec_fcmlah_idx
);
223 opr_sz
= (1 + a
->q
) * 8;
224 fpst
= get_fpstatus_ptr(1);
225 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a
->vd
),
226 vfp_reg_offset(1, a
->vn
),
227 vfp_reg_offset(1, a
->vm
),
228 fpst
, opr_sz
, opr_sz
,
229 (a
->index
<< 2) | a
->rot
, fn_gvec_ptr
);
230 tcg_temp_free_ptr(fpst
);
234 static bool trans_VDOT_scalar(DisasContext
*s
, arg_VDOT_scalar
*a
)
236 gen_helper_gvec_3
*fn_gvec
;
240 if (!dc_isar_feature(aa32_dp
, s
)) {
244 /* UNDEF accesses to D16-D31 if they don't exist. */
245 if (!dc_isar_feature(aa32_simd_r32
, s
) &&
246 ((a
->vd
| a
->vn
) & 0x10)) {
250 if ((a
->vd
| a
->vn
) & a
->q
) {
254 if (!vfp_access_check(s
)) {
258 fn_gvec
= a
->u
? gen_helper_gvec_udot_idx_b
: gen_helper_gvec_sdot_idx_b
;
259 opr_sz
= (1 + a
->q
) * 8;
260 fpst
= get_fpstatus_ptr(1);
261 tcg_gen_gvec_3_ool(vfp_reg_offset(1, a
->vd
),
262 vfp_reg_offset(1, a
->vn
),
263 vfp_reg_offset(1, a
->rm
),
264 opr_sz
, opr_sz
, a
->index
, fn_gvec
);
265 tcg_temp_free_ptr(fpst
);
269 static bool trans_VFML_scalar(DisasContext
*s
, arg_VFML_scalar
*a
)
273 if (!dc_isar_feature(aa32_fhm
, s
)) {
277 /* UNDEF accesses to D16-D31 if they don't exist. */
278 if (!dc_isar_feature(aa32_simd_r32
, s
) &&
279 ((a
->vd
& 0x10) || (a
->q
&& (a
->vn
& 0x10)))) {
287 if (!vfp_access_check(s
)) {
291 opr_sz
= (1 + a
->q
) * 8;
292 tcg_gen_gvec_3_ptr(vfp_reg_offset(1, a
->vd
),
293 vfp_reg_offset(a
->q
, a
->vn
),
294 vfp_reg_offset(a
->q
, a
->rm
),
295 cpu_env
, opr_sz
, opr_sz
,
296 (a
->index
<< 2) | a
->s
, /* is_2 == 0 */
297 gen_helper_gvec_fmlal_idx_a32
);
305 } const neon_ls_element_type
[11] = {
319 static void gen_neon_ldst_base_update(DisasContext
*s
, int rm
, int rn
,
325 base
= load_reg(s
, rn
);
327 tcg_gen_addi_i32(base
, base
, stride
);
330 index
= load_reg(s
, rm
);
331 tcg_gen_add_i32(base
, base
, index
);
332 tcg_temp_free_i32(index
);
334 store_reg(s
, rn
, base
);
338 static bool trans_VLDST_multiple(DisasContext
*s
, arg_VLDST_multiple
*a
)
340 /* Neon load/store multiple structures */
341 int nregs
, interleave
, spacing
, reg
, n
;
342 MemOp endian
= s
->be_data
;
343 int mmu_idx
= get_mem_index(s
);
348 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
352 /* UNDEF accesses to D16-D31 if they don't exist */
353 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vd
& 0x10)) {
359 /* Catch UNDEF cases for bad values of align field */
360 switch (a
->itype
& 0xc) {
374 nregs
= neon_ls_element_type
[a
->itype
].nregs
;
375 interleave
= neon_ls_element_type
[a
->itype
].interleave
;
376 spacing
= neon_ls_element_type
[a
->itype
].spacing
;
377 if (size
== 3 && (interleave
| spacing
) != 1) {
381 if (!vfp_access_check(s
)) {
385 /* For our purposes, bytes are always little-endian. */
390 * Consecutive little-endian elements from a single register
391 * can be promoted to a larger little-endian operation.
393 if (interleave
== 1 && endian
== MO_LE
) {
396 tmp64
= tcg_temp_new_i64();
397 addr
= tcg_temp_new_i32();
398 tmp
= tcg_const_i32(1 << size
);
399 load_reg_var(s
, addr
, a
->rn
);
400 for (reg
= 0; reg
< nregs
; reg
++) {
401 for (n
= 0; n
< 8 >> size
; n
++) {
403 for (xs
= 0; xs
< interleave
; xs
++) {
404 int tt
= a
->vd
+ reg
+ spacing
* xs
;
407 gen_aa32_ld_i64(s
, tmp64
, addr
, mmu_idx
, endian
| size
);
408 neon_store_element64(tt
, n
, size
, tmp64
);
410 neon_load_element64(tmp64
, tt
, n
, size
);
411 gen_aa32_st_i64(s
, tmp64
, addr
, mmu_idx
, endian
| size
);
413 tcg_gen_add_i32(addr
, addr
, tmp
);
417 tcg_temp_free_i32(addr
);
418 tcg_temp_free_i32(tmp
);
419 tcg_temp_free_i64(tmp64
);
421 gen_neon_ldst_base_update(s
, a
->rm
, a
->rn
, nregs
* interleave
* 8);
425 static bool trans_VLD_all_lanes(DisasContext
*s
, arg_VLD_all_lanes
*a
)
427 /* Neon load single structure to all lanes */
428 int reg
, stride
, vec_size
;
431 int nregs
= a
->n
+ 1;
434 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
438 /* UNDEF accesses to D16-D31 if they don't exist */
439 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vd
& 0x10)) {
444 if (nregs
!= 4 || a
->a
== 0) {
447 /* For VLD4 size == 3 a == 1 means 32 bits at 16 byte alignment */
450 if (nregs
== 1 && a
->a
== 1 && size
== 0) {
453 if (nregs
== 3 && a
->a
== 1) {
457 if (!vfp_access_check(s
)) {
462 * VLD1 to all lanes: T bit indicates how many Dregs to write.
463 * VLD2/3/4 to all lanes: T bit indicates register stride.
465 stride
= a
->t
? 2 : 1;
466 vec_size
= nregs
== 1 ? stride
* 8 : 8;
468 tmp
= tcg_temp_new_i32();
469 addr
= tcg_temp_new_i32();
470 load_reg_var(s
, addr
, a
->rn
);
471 for (reg
= 0; reg
< nregs
; reg
++) {
472 gen_aa32_ld_i32(s
, tmp
, addr
, get_mem_index(s
),
474 if ((vd
& 1) && vec_size
== 16) {
476 * We cannot write 16 bytes at once because the
477 * destination is unaligned.
479 tcg_gen_gvec_dup_i32(size
, neon_reg_offset(vd
, 0),
481 tcg_gen_gvec_mov(0, neon_reg_offset(vd
+ 1, 0),
482 neon_reg_offset(vd
, 0), 8, 8);
484 tcg_gen_gvec_dup_i32(size
, neon_reg_offset(vd
, 0),
485 vec_size
, vec_size
, tmp
);
487 tcg_gen_addi_i32(addr
, addr
, 1 << size
);
490 tcg_temp_free_i32(tmp
);
491 tcg_temp_free_i32(addr
);
493 gen_neon_ldst_base_update(s
, a
->rm
, a
->rn
, (1 << size
) * nregs
);
498 static bool trans_VLDST_single(DisasContext
*s
, arg_VLDST_single
*a
)
500 /* Neon load/store single structure to one lane */
502 int nregs
= a
->n
+ 1;
506 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
510 /* UNDEF accesses to D16-D31 if they don't exist */
511 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vd
& 0x10)) {
515 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
518 if (((a
->align
& (1 << a
->size
)) != 0) ||
519 (a
->size
== 2 && ((a
->align
& 3) == 1 || (a
->align
& 3) == 2))) {
524 if ((a
->align
& 1) != 0) {
529 if (a
->size
== 2 && (a
->align
& 2) != 0) {
534 if ((a
->size
== 2) && ((a
->align
& 3) == 3)) {
541 if ((vd
+ a
->stride
* (nregs
- 1)) > 31) {
543 * Attempts to write off the end of the register file are
544 * UNPREDICTABLE; we choose to UNDEF because otherwise we would
545 * access off the end of the array that holds the register data.
550 if (!vfp_access_check(s
)) {
554 tmp
= tcg_temp_new_i32();
555 addr
= tcg_temp_new_i32();
556 load_reg_var(s
, addr
, a
->rn
);
558 * TODO: if we implemented alignment exceptions, we should check
559 * addr against the alignment encoded in a->align here.
561 for (reg
= 0; reg
< nregs
; reg
++) {
563 gen_aa32_ld_i32(s
, tmp
, addr
, get_mem_index(s
),
564 s
->be_data
| a
->size
);
565 neon_store_element(vd
, a
->reg_idx
, a
->size
, tmp
);
567 neon_load_element(tmp
, vd
, a
->reg_idx
, a
->size
);
568 gen_aa32_st_i32(s
, tmp
, addr
, get_mem_index(s
),
569 s
->be_data
| a
->size
);
572 tcg_gen_addi_i32(addr
, addr
, 1 << a
->size
);
574 tcg_temp_free_i32(addr
);
575 tcg_temp_free_i32(tmp
);
577 gen_neon_ldst_base_update(s
, a
->rm
, a
->rn
, (1 << a
->size
) * nregs
);
582 static bool do_3same(DisasContext
*s
, arg_3same
*a
, GVecGen3Fn fn
)
584 int vec_size
= a
->q
? 16 : 8;
585 int rd_ofs
= neon_reg_offset(a
->vd
, 0);
586 int rn_ofs
= neon_reg_offset(a
->vn
, 0);
587 int rm_ofs
= neon_reg_offset(a
->vm
, 0);
589 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
593 /* UNDEF accesses to D16-D31 if they don't exist. */
594 if (!dc_isar_feature(aa32_simd_r32
, s
) &&
595 ((a
->vd
| a
->vn
| a
->vm
) & 0x10)) {
599 if ((a
->vn
| a
->vm
| a
->vd
) & a
->q
) {
603 if (!vfp_access_check(s
)) {
607 fn(a
->size
, rd_ofs
, rn_ofs
, rm_ofs
, vec_size
, vec_size
);
611 #define DO_3SAME(INSN, FUNC) \
612 static bool trans_##INSN##_3s(DisasContext *s, arg_3same *a) \
614 return do_3same(s, a, FUNC); \
617 DO_3SAME(VADD
, tcg_gen_gvec_add
)
618 DO_3SAME(VSUB
, tcg_gen_gvec_sub
)
619 DO_3SAME(VAND
, tcg_gen_gvec_and
)
620 DO_3SAME(VBIC
, tcg_gen_gvec_andc
)
621 DO_3SAME(VORR
, tcg_gen_gvec_or
)
622 DO_3SAME(VORN
, tcg_gen_gvec_orc
)
623 DO_3SAME(VEOR
, tcg_gen_gvec_xor
)
624 DO_3SAME(VSHL_S
, gen_gvec_sshl
)
625 DO_3SAME(VSHL_U
, gen_gvec_ushl
)
626 DO_3SAME(VQADD_S
, gen_gvec_sqadd_qc
)
627 DO_3SAME(VQADD_U
, gen_gvec_uqadd_qc
)
628 DO_3SAME(VQSUB_S
, gen_gvec_sqsub_qc
)
629 DO_3SAME(VQSUB_U
, gen_gvec_uqsub_qc
)
631 /* These insns are all gvec_bitsel but with the inputs in various orders. */
632 #define DO_3SAME_BITSEL(INSN, O1, O2, O3) \
633 static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
634 uint32_t rn_ofs, uint32_t rm_ofs, \
635 uint32_t oprsz, uint32_t maxsz) \
637 tcg_gen_gvec_bitsel(vece, rd_ofs, O1, O2, O3, oprsz, maxsz); \
639 DO_3SAME(INSN, gen_##INSN##_3s)
641 DO_3SAME_BITSEL(VBSL
, rd_ofs
, rn_ofs
, rm_ofs
)
642 DO_3SAME_BITSEL(VBIT
, rm_ofs
, rn_ofs
, rd_ofs
)
643 DO_3SAME_BITSEL(VBIF
, rm_ofs
, rd_ofs
, rn_ofs
)
645 #define DO_3SAME_NO_SZ_3(INSN, FUNC) \
646 static bool trans_##INSN##_3s(DisasContext *s, arg_3same *a) \
648 if (a->size == 3) { \
651 return do_3same(s, a, FUNC); \
654 DO_3SAME_NO_SZ_3(VMAX_S
, tcg_gen_gvec_smax
)
655 DO_3SAME_NO_SZ_3(VMAX_U
, tcg_gen_gvec_umax
)
656 DO_3SAME_NO_SZ_3(VMIN_S
, tcg_gen_gvec_smin
)
657 DO_3SAME_NO_SZ_3(VMIN_U
, tcg_gen_gvec_umin
)
658 DO_3SAME_NO_SZ_3(VMUL
, tcg_gen_gvec_mul
)
659 DO_3SAME_NO_SZ_3(VMLA
, gen_gvec_mla
)
660 DO_3SAME_NO_SZ_3(VMLS
, gen_gvec_mls
)
661 DO_3SAME_NO_SZ_3(VTST
, gen_gvec_cmtst
)
662 DO_3SAME_NO_SZ_3(VABD_S
, gen_gvec_sabd
)
663 DO_3SAME_NO_SZ_3(VABA_S
, gen_gvec_saba
)
664 DO_3SAME_NO_SZ_3(VABD_U
, gen_gvec_uabd
)
665 DO_3SAME_NO_SZ_3(VABA_U
, gen_gvec_uaba
)
667 #define DO_3SAME_CMP(INSN, COND) \
668 static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
669 uint32_t rn_ofs, uint32_t rm_ofs, \
670 uint32_t oprsz, uint32_t maxsz) \
672 tcg_gen_gvec_cmp(COND, vece, rd_ofs, rn_ofs, rm_ofs, oprsz, maxsz); \
674 DO_3SAME_NO_SZ_3(INSN, gen_##INSN##_3s)
676 DO_3SAME_CMP(VCGT_S
, TCG_COND_GT
)
677 DO_3SAME_CMP(VCGT_U
, TCG_COND_GTU
)
678 DO_3SAME_CMP(VCGE_S
, TCG_COND_GE
)
679 DO_3SAME_CMP(VCGE_U
, TCG_COND_GEU
)
680 DO_3SAME_CMP(VCEQ
, TCG_COND_EQ
)
682 #define WRAP_OOL_FN(WRAPNAME, FUNC) \
683 static void WRAPNAME(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs, \
684 uint32_t rm_ofs, uint32_t oprsz, uint32_t maxsz) \
686 tcg_gen_gvec_3_ool(rd_ofs, rn_ofs, rm_ofs, oprsz, maxsz, 0, FUNC); \
689 WRAP_OOL_FN(gen_VMUL_p_3s
, gen_helper_gvec_pmul_b
)
691 static bool trans_VMUL_p_3s(DisasContext
*s
, arg_3same
*a
)
696 return do_3same(s
, a
, gen_VMUL_p_3s
);
699 #define DO_VQRDMLAH(INSN, FUNC) \
700 static bool trans_##INSN##_3s(DisasContext *s, arg_3same *a) \
702 if (!dc_isar_feature(aa32_rdm, s)) { \
705 if (a->size != 1 && a->size != 2) { \
708 return do_3same(s, a, FUNC); \
711 DO_VQRDMLAH(VQRDMLAH
, gen_gvec_sqrdmlah_qc
)
712 DO_VQRDMLAH(VQRDMLSH
, gen_gvec_sqrdmlsh_qc
)
714 #define DO_SHA1(NAME, FUNC) \
715 WRAP_OOL_FN(gen_##NAME##_3s, FUNC) \
716 static bool trans_##NAME##_3s(DisasContext *s, arg_3same *a) \
718 if (!dc_isar_feature(aa32_sha1, s)) { \
721 return do_3same(s, a, gen_##NAME##_3s); \
724 DO_SHA1(SHA1C
, gen_helper_crypto_sha1c
)
725 DO_SHA1(SHA1P
, gen_helper_crypto_sha1p
)
726 DO_SHA1(SHA1M
, gen_helper_crypto_sha1m
)
727 DO_SHA1(SHA1SU0
, gen_helper_crypto_sha1su0
)
729 #define DO_SHA2(NAME, FUNC) \
730 WRAP_OOL_FN(gen_##NAME##_3s, FUNC) \
731 static bool trans_##NAME##_3s(DisasContext *s, arg_3same *a) \
733 if (!dc_isar_feature(aa32_sha2, s)) { \
736 return do_3same(s, a, gen_##NAME##_3s); \
739 DO_SHA2(SHA256H
, gen_helper_crypto_sha256h
)
740 DO_SHA2(SHA256H2
, gen_helper_crypto_sha256h2
)
741 DO_SHA2(SHA256SU1
, gen_helper_crypto_sha256su1
)
743 #define DO_3SAME_64(INSN, FUNC) \
744 static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
745 uint32_t rn_ofs, uint32_t rm_ofs, \
746 uint32_t oprsz, uint32_t maxsz) \
748 static const GVecGen3 op = { .fni8 = FUNC }; \
749 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, oprsz, maxsz, &op); \
751 DO_3SAME(INSN, gen_##INSN##_3s)
753 #define DO_3SAME_64_ENV(INSN, FUNC) \
754 static void gen_##INSN##_elt(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m) \
756 FUNC(d, cpu_env, n, m); \
758 DO_3SAME_64(INSN, gen_##INSN##_elt)
760 DO_3SAME_64(VRSHL_S64
, gen_helper_neon_rshl_s64
)
761 DO_3SAME_64(VRSHL_U64
, gen_helper_neon_rshl_u64
)
762 DO_3SAME_64_ENV(VQSHL_S64
, gen_helper_neon_qshl_s64
)
763 DO_3SAME_64_ENV(VQSHL_U64
, gen_helper_neon_qshl_u64
)
764 DO_3SAME_64_ENV(VQRSHL_S64
, gen_helper_neon_qrshl_s64
)
765 DO_3SAME_64_ENV(VQRSHL_U64
, gen_helper_neon_qrshl_u64
)
767 #define DO_3SAME_32(INSN, FUNC) \
768 static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
769 uint32_t rn_ofs, uint32_t rm_ofs, \
770 uint32_t oprsz, uint32_t maxsz) \
772 static const GVecGen3 ops[4] = { \
773 { .fni4 = gen_helper_neon_##FUNC##8 }, \
774 { .fni4 = gen_helper_neon_##FUNC##16 }, \
775 { .fni4 = gen_helper_neon_##FUNC##32 }, \
778 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, oprsz, maxsz, &ops[vece]); \
780 static bool trans_##INSN##_3s(DisasContext *s, arg_3same *a) \
785 return do_3same(s, a, gen_##INSN##_3s); \
789 * Some helper functions need to be passed the cpu_env. In order
790 * to use those with the gvec APIs like tcg_gen_gvec_3() we need
791 * to create wrapper functions whose prototype is a NeonGenTwoOpFn()
792 * and which call a NeonGenTwoOpEnvFn().
794 #define WRAP_ENV_FN(WRAPNAME, FUNC) \
795 static void WRAPNAME(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m) \
797 FUNC(d, cpu_env, n, m); \
800 #define DO_3SAME_32_ENV(INSN, FUNC) \
801 WRAP_ENV_FN(gen_##INSN##_tramp8, gen_helper_neon_##FUNC##8); \
802 WRAP_ENV_FN(gen_##INSN##_tramp16, gen_helper_neon_##FUNC##16); \
803 WRAP_ENV_FN(gen_##INSN##_tramp32, gen_helper_neon_##FUNC##32); \
804 static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
805 uint32_t rn_ofs, uint32_t rm_ofs, \
806 uint32_t oprsz, uint32_t maxsz) \
808 static const GVecGen3 ops[4] = { \
809 { .fni4 = gen_##INSN##_tramp8 }, \
810 { .fni4 = gen_##INSN##_tramp16 }, \
811 { .fni4 = gen_##INSN##_tramp32 }, \
814 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, oprsz, maxsz, &ops[vece]); \
816 static bool trans_##INSN##_3s(DisasContext *s, arg_3same *a) \
821 return do_3same(s, a, gen_##INSN##_3s); \
824 DO_3SAME_32(VHADD_S
, hadd_s
)
825 DO_3SAME_32(VHADD_U
, hadd_u
)
826 DO_3SAME_32(VHSUB_S
, hsub_s
)
827 DO_3SAME_32(VHSUB_U
, hsub_u
)
828 DO_3SAME_32(VRHADD_S
, rhadd_s
)
829 DO_3SAME_32(VRHADD_U
, rhadd_u
)
830 DO_3SAME_32(VRSHL_S
, rshl_s
)
831 DO_3SAME_32(VRSHL_U
, rshl_u
)
833 DO_3SAME_32_ENV(VQSHL_S
, qshl_s
)
834 DO_3SAME_32_ENV(VQSHL_U
, qshl_u
)
835 DO_3SAME_32_ENV(VQRSHL_S
, qrshl_s
)
836 DO_3SAME_32_ENV(VQRSHL_U
, qrshl_u
)
838 static bool do_3same_pair(DisasContext
*s
, arg_3same
*a
, NeonGenTwoOpFn
*fn
)
840 /* Operations handled pairwise 32 bits at a time */
841 TCGv_i32 tmp
, tmp2
, tmp3
;
843 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
847 /* UNDEF accesses to D16-D31 if they don't exist. */
848 if (!dc_isar_feature(aa32_simd_r32
, s
) &&
849 ((a
->vd
| a
->vn
| a
->vm
) & 0x10)) {
857 if (!vfp_access_check(s
)) {
861 assert(a
->q
== 0); /* enforced by decode patterns */
864 * Note that we have to be careful not to clobber the source operands
865 * in the "vm == vd" case by storing the result of the first pass too
866 * early. Since Q is 0 there are always just two passes, so instead
867 * of a complicated loop over each pass we just unroll.
869 tmp
= neon_load_reg(a
->vn
, 0);
870 tmp2
= neon_load_reg(a
->vn
, 1);
872 tcg_temp_free_i32(tmp2
);
874 tmp3
= neon_load_reg(a
->vm
, 0);
875 tmp2
= neon_load_reg(a
->vm
, 1);
876 fn(tmp3
, tmp3
, tmp2
);
877 tcg_temp_free_i32(tmp2
);
879 neon_store_reg(a
->vd
, 0, tmp
);
880 neon_store_reg(a
->vd
, 1, tmp3
);
884 #define DO_3SAME_PAIR(INSN, func) \
885 static bool trans_##INSN##_3s(DisasContext *s, arg_3same *a) \
887 static NeonGenTwoOpFn * const fns[] = { \
888 gen_helper_neon_##func##8, \
889 gen_helper_neon_##func##16, \
890 gen_helper_neon_##func##32, \
895 return do_3same_pair(s, a, fns[a->size]); \
898 /* 32-bit pairwise ops end up the same as the elementwise versions. */
899 #define gen_helper_neon_pmax_s32 tcg_gen_smax_i32
900 #define gen_helper_neon_pmax_u32 tcg_gen_umax_i32
901 #define gen_helper_neon_pmin_s32 tcg_gen_smin_i32
902 #define gen_helper_neon_pmin_u32 tcg_gen_umin_i32
903 #define gen_helper_neon_padd_u32 tcg_gen_add_i32
905 DO_3SAME_PAIR(VPMAX_S
, pmax_s
)
906 DO_3SAME_PAIR(VPMIN_S
, pmin_s
)
907 DO_3SAME_PAIR(VPMAX_U
, pmax_u
)
908 DO_3SAME_PAIR(VPMIN_U
, pmin_u
)
909 DO_3SAME_PAIR(VPADD
, padd_u
)
911 #define DO_3SAME_VQDMULH(INSN, FUNC) \
912 WRAP_ENV_FN(gen_##INSN##_tramp16, gen_helper_neon_##FUNC##_s16); \
913 WRAP_ENV_FN(gen_##INSN##_tramp32, gen_helper_neon_##FUNC##_s32); \
914 static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
915 uint32_t rn_ofs, uint32_t rm_ofs, \
916 uint32_t oprsz, uint32_t maxsz) \
918 static const GVecGen3 ops[2] = { \
919 { .fni4 = gen_##INSN##_tramp16 }, \
920 { .fni4 = gen_##INSN##_tramp32 }, \
922 tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, oprsz, maxsz, &ops[vece - 1]); \
924 static bool trans_##INSN##_3s(DisasContext *s, arg_3same *a) \
926 if (a->size != 1 && a->size != 2) { \
929 return do_3same(s, a, gen_##INSN##_3s); \
932 DO_3SAME_VQDMULH(VQDMULH
, qdmulh
)
933 DO_3SAME_VQDMULH(VQRDMULH
, qrdmulh
)
935 static bool do_3same_fp(DisasContext
*s
, arg_3same
*a
, VFPGen3OpSPFn
*fn
,
939 * FP operations handled elementwise 32 bits at a time.
940 * If reads_vd is true then the old value of Vd will be
941 * loaded before calling the callback function. This is
942 * used for multiply-accumulate type operations.
947 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
951 /* UNDEF accesses to D16-D31 if they don't exist. */
952 if (!dc_isar_feature(aa32_simd_r32
, s
) &&
953 ((a
->vd
| a
->vn
| a
->vm
) & 0x10)) {
957 if ((a
->vn
| a
->vm
| a
->vd
) & a
->q
) {
961 if (!vfp_access_check(s
)) {
965 TCGv_ptr fpstatus
= get_fpstatus_ptr(1);
966 for (pass
= 0; pass
< (a
->q
? 4 : 2); pass
++) {
967 tmp
= neon_load_reg(a
->vn
, pass
);
968 tmp2
= neon_load_reg(a
->vm
, pass
);
970 TCGv_i32 tmp_rd
= neon_load_reg(a
->vd
, pass
);
971 fn(tmp_rd
, tmp
, tmp2
, fpstatus
);
972 neon_store_reg(a
->vd
, pass
, tmp_rd
);
973 tcg_temp_free_i32(tmp
);
975 fn(tmp
, tmp
, tmp2
, fpstatus
);
976 neon_store_reg(a
->vd
, pass
, tmp
);
978 tcg_temp_free_i32(tmp2
);
980 tcg_temp_free_ptr(fpstatus
);
985 * For all the functions using this macro, size == 1 means fp16,
986 * which is an architecture extension we don't implement yet.
988 #define DO_3S_FP_GVEC(INSN,FUNC) \
989 static void gen_##INSN##_3s(unsigned vece, uint32_t rd_ofs, \
990 uint32_t rn_ofs, uint32_t rm_ofs, \
991 uint32_t oprsz, uint32_t maxsz) \
993 TCGv_ptr fpst = get_fpstatus_ptr(1); \
994 tcg_gen_gvec_3_ptr(rd_ofs, rn_ofs, rm_ofs, fpst, \
995 oprsz, maxsz, 0, FUNC); \
996 tcg_temp_free_ptr(fpst); \
998 static bool trans_##INSN##_fp_3s(DisasContext *s, arg_3same *a) \
1000 if (a->size != 0) { \
1001 /* TODO fp16 support */ \
1004 return do_3same(s, a, gen_##INSN##_3s); \
1008 DO_3S_FP_GVEC(VADD
, gen_helper_gvec_fadd_s
)
1009 DO_3S_FP_GVEC(VSUB
, gen_helper_gvec_fsub_s
)
1010 DO_3S_FP_GVEC(VABD
, gen_helper_gvec_fabd_s
)
1011 DO_3S_FP_GVEC(VMUL
, gen_helper_gvec_fmul_s
)
1014 * For all the functions using this macro, size == 1 means fp16,
1015 * which is an architecture extension we don't implement yet.
1017 #define DO_3S_FP(INSN,FUNC,READS_VD) \
1018 static bool trans_##INSN##_fp_3s(DisasContext *s, arg_3same *a) \
1020 if (a->size != 0) { \
1021 /* TODO fp16 support */ \
1024 return do_3same_fp(s, a, FUNC, READS_VD); \
1027 DO_3S_FP(VCEQ
, gen_helper_neon_ceq_f32
, false)
1028 DO_3S_FP(VCGE
, gen_helper_neon_cge_f32
, false)
1029 DO_3S_FP(VCGT
, gen_helper_neon_cgt_f32
, false)
1030 DO_3S_FP(VACGE
, gen_helper_neon_acge_f32
, false)
1031 DO_3S_FP(VACGT
, gen_helper_neon_acgt_f32
, false)
1032 DO_3S_FP(VMAX
, gen_helper_vfp_maxs
, false)
1033 DO_3S_FP(VMIN
, gen_helper_vfp_mins
, false)
1035 static void gen_VMLA_fp_3s(TCGv_i32 vd
, TCGv_i32 vn
, TCGv_i32 vm
,
1038 gen_helper_vfp_muls(vn
, vn
, vm
, fpstatus
);
1039 gen_helper_vfp_adds(vd
, vd
, vn
, fpstatus
);
1042 static void gen_VMLS_fp_3s(TCGv_i32 vd
, TCGv_i32 vn
, TCGv_i32 vm
,
1045 gen_helper_vfp_muls(vn
, vn
, vm
, fpstatus
);
1046 gen_helper_vfp_subs(vd
, vd
, vn
, fpstatus
);
1049 DO_3S_FP(VMLA
, gen_VMLA_fp_3s
, true)
1050 DO_3S_FP(VMLS
, gen_VMLS_fp_3s
, true)
1052 static bool trans_VMAXNM_fp_3s(DisasContext
*s
, arg_3same
*a
)
1054 if (!arm_dc_feature(s
, ARM_FEATURE_V8
)) {
1059 /* TODO fp16 support */
1063 return do_3same_fp(s
, a
, gen_helper_vfp_maxnums
, false);
1066 static bool trans_VMINNM_fp_3s(DisasContext
*s
, arg_3same
*a
)
1068 if (!arm_dc_feature(s
, ARM_FEATURE_V8
)) {
1073 /* TODO fp16 support */
1077 return do_3same_fp(s
, a
, gen_helper_vfp_minnums
, false);
1080 WRAP_ENV_FN(gen_VRECPS_tramp
, gen_helper_recps_f32
)
1082 static void gen_VRECPS_fp_3s(unsigned vece
, uint32_t rd_ofs
,
1083 uint32_t rn_ofs
, uint32_t rm_ofs
,
1084 uint32_t oprsz
, uint32_t maxsz
)
1086 static const GVecGen3 ops
= { .fni4
= gen_VRECPS_tramp
};
1087 tcg_gen_gvec_3(rd_ofs
, rn_ofs
, rm_ofs
, oprsz
, maxsz
, &ops
);
1090 static bool trans_VRECPS_fp_3s(DisasContext
*s
, arg_3same
*a
)
1093 /* TODO fp16 support */
1097 return do_3same(s
, a
, gen_VRECPS_fp_3s
);
1100 WRAP_ENV_FN(gen_VRSQRTS_tramp
, gen_helper_rsqrts_f32
)
1102 static void gen_VRSQRTS_fp_3s(unsigned vece
, uint32_t rd_ofs
,
1103 uint32_t rn_ofs
, uint32_t rm_ofs
,
1104 uint32_t oprsz
, uint32_t maxsz
)
1106 static const GVecGen3 ops
= { .fni4
= gen_VRSQRTS_tramp
};
1107 tcg_gen_gvec_3(rd_ofs
, rn_ofs
, rm_ofs
, oprsz
, maxsz
, &ops
);
1110 static bool trans_VRSQRTS_fp_3s(DisasContext
*s
, arg_3same
*a
)
1113 /* TODO fp16 support */
1117 return do_3same(s
, a
, gen_VRSQRTS_fp_3s
);
1120 static void gen_VFMA_fp_3s(TCGv_i32 vd
, TCGv_i32 vn
, TCGv_i32 vm
,
1123 gen_helper_vfp_muladds(vd
, vn
, vm
, vd
, fpstatus
);
1126 static bool trans_VFMA_fp_3s(DisasContext
*s
, arg_3same
*a
)
1128 if (!dc_isar_feature(aa32_simdfmac
, s
)) {
1133 /* TODO fp16 support */
1137 return do_3same_fp(s
, a
, gen_VFMA_fp_3s
, true);
1140 static void gen_VFMS_fp_3s(TCGv_i32 vd
, TCGv_i32 vn
, TCGv_i32 vm
,
1143 gen_helper_vfp_negs(vn
, vn
);
1144 gen_helper_vfp_muladds(vd
, vn
, vm
, vd
, fpstatus
);
1147 static bool trans_VFMS_fp_3s(DisasContext
*s
, arg_3same
*a
)
1149 if (!dc_isar_feature(aa32_simdfmac
, s
)) {
1154 /* TODO fp16 support */
1158 return do_3same_fp(s
, a
, gen_VFMS_fp_3s
, true);
1161 static bool do_3same_fp_pair(DisasContext
*s
, arg_3same
*a
, VFPGen3OpSPFn
*fn
)
1163 /* FP operations handled pairwise 32 bits at a time */
1164 TCGv_i32 tmp
, tmp2
, tmp3
;
1167 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
1171 /* UNDEF accesses to D16-D31 if they don't exist. */
1172 if (!dc_isar_feature(aa32_simd_r32
, s
) &&
1173 ((a
->vd
| a
->vn
| a
->vm
) & 0x10)) {
1177 if (!vfp_access_check(s
)) {
1181 assert(a
->q
== 0); /* enforced by decode patterns */
1184 * Note that we have to be careful not to clobber the source operands
1185 * in the "vm == vd" case by storing the result of the first pass too
1186 * early. Since Q is 0 there are always just two passes, so instead
1187 * of a complicated loop over each pass we just unroll.
1189 fpstatus
= get_fpstatus_ptr(1);
1190 tmp
= neon_load_reg(a
->vn
, 0);
1191 tmp2
= neon_load_reg(a
->vn
, 1);
1192 fn(tmp
, tmp
, tmp2
, fpstatus
);
1193 tcg_temp_free_i32(tmp2
);
1195 tmp3
= neon_load_reg(a
->vm
, 0);
1196 tmp2
= neon_load_reg(a
->vm
, 1);
1197 fn(tmp3
, tmp3
, tmp2
, fpstatus
);
1198 tcg_temp_free_i32(tmp2
);
1199 tcg_temp_free_ptr(fpstatus
);
1201 neon_store_reg(a
->vd
, 0, tmp
);
1202 neon_store_reg(a
->vd
, 1, tmp3
);
1207 * For all the functions using this macro, size == 1 means fp16,
1208 * which is an architecture extension we don't implement yet.
1210 #define DO_3S_FP_PAIR(INSN,FUNC) \
1211 static bool trans_##INSN##_fp_3s(DisasContext *s, arg_3same *a) \
1213 if (a->size != 0) { \
1214 /* TODO fp16 support */ \
1217 return do_3same_fp_pair(s, a, FUNC); \
1220 DO_3S_FP_PAIR(VPADD
, gen_helper_vfp_adds
)
1221 DO_3S_FP_PAIR(VPMAX
, gen_helper_vfp_maxs
)
1222 DO_3S_FP_PAIR(VPMIN
, gen_helper_vfp_mins
)
1224 static bool do_vector_2sh(DisasContext
*s
, arg_2reg_shift
*a
, GVecGen2iFn
*fn
)
1226 /* Handle a 2-reg-shift insn which can be vectorized. */
1227 int vec_size
= a
->q
? 16 : 8;
1228 int rd_ofs
= neon_reg_offset(a
->vd
, 0);
1229 int rm_ofs
= neon_reg_offset(a
->vm
, 0);
1231 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
1235 /* UNDEF accesses to D16-D31 if they don't exist. */
1236 if (!dc_isar_feature(aa32_simd_r32
, s
) &&
1237 ((a
->vd
| a
->vm
) & 0x10)) {
1241 if ((a
->vm
| a
->vd
) & a
->q
) {
1245 if (!vfp_access_check(s
)) {
1249 fn(a
->size
, rd_ofs
, rm_ofs
, a
->shift
, vec_size
, vec_size
);
1253 #define DO_2SH(INSN, FUNC) \
1254 static bool trans_##INSN##_2sh(DisasContext *s, arg_2reg_shift *a) \
1256 return do_vector_2sh(s, a, FUNC); \
1259 DO_2SH(VSHL, tcg_gen_gvec_shli)
1260 DO_2SH(VSLI
, gen_gvec_sli
)
1261 DO_2SH(VSRI
, gen_gvec_sri
)
1262 DO_2SH(VSRA_S
, gen_gvec_ssra
)
1263 DO_2SH(VSRA_U
, gen_gvec_usra
)
1264 DO_2SH(VRSHR_S
, gen_gvec_srshr
)
1265 DO_2SH(VRSHR_U
, gen_gvec_urshr
)
1266 DO_2SH(VRSRA_S
, gen_gvec_srsra
)
1267 DO_2SH(VRSRA_U
, gen_gvec_ursra
)
1269 static bool trans_VSHR_S_2sh(DisasContext
*s
, arg_2reg_shift
*a
)
1271 /* Signed shift out of range results in all-sign-bits */
1272 a
->shift
= MIN(a
->shift
, (8 << a
->size
) - 1);
1273 return do_vector_2sh(s
, a
, tcg_gen_gvec_sari
);
1276 static void gen_zero_rd_2sh(unsigned vece
, uint32_t rd_ofs
, uint32_t rm_ofs
,
1277 int64_t shift
, uint32_t oprsz
, uint32_t maxsz
)
1279 tcg_gen_gvec_dup_imm(vece
, rd_ofs
, oprsz
, maxsz
, 0);
1282 static bool trans_VSHR_U_2sh(DisasContext
*s
, arg_2reg_shift
*a
)
1284 /* Shift out of range is architecturally valid and results in zero. */
1285 if (a
->shift
>= (8 << a
->size
)) {
1286 return do_vector_2sh(s
, a
, gen_zero_rd_2sh
);
1288 return do_vector_2sh(s
, a
, tcg_gen_gvec_shri
);
1292 static bool do_2shift_env_64(DisasContext
*s
, arg_2reg_shift
*a
,
1293 NeonGenTwo64OpEnvFn
*fn
)
1296 * 2-reg-and-shift operations, size == 3 case, where the
1297 * function needs to be passed cpu_env.
1302 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
1306 /* UNDEF accesses to D16-D31 if they don't exist. */
1307 if (!dc_isar_feature(aa32_simd_r32
, s
) &&
1308 ((a
->vd
| a
->vm
) & 0x10)) {
1312 if ((a
->vm
| a
->vd
) & a
->q
) {
1316 if (!vfp_access_check(s
)) {
1321 * To avoid excessive duplication of ops we implement shift
1322 * by immediate using the variable shift operations.
1324 constimm
= tcg_const_i64(dup_const(a
->size
, a
->shift
));
1326 for (pass
= 0; pass
< a
->q
+ 1; pass
++) {
1327 TCGv_i64 tmp
= tcg_temp_new_i64();
1329 neon_load_reg64(tmp
, a
->vm
+ pass
);
1330 fn(tmp
, cpu_env
, tmp
, constimm
);
1331 neon_store_reg64(tmp
, a
->vd
+ pass
);
1332 tcg_temp_free_i64(tmp
);
1334 tcg_temp_free_i64(constimm
);
1338 static bool do_2shift_env_32(DisasContext
*s
, arg_2reg_shift
*a
,
1339 NeonGenTwoOpEnvFn
*fn
)
1342 * 2-reg-and-shift operations, size < 3 case, where the
1343 * helper needs to be passed cpu_env.
1348 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
1352 /* UNDEF accesses to D16-D31 if they don't exist. */
1353 if (!dc_isar_feature(aa32_simd_r32
, s
) &&
1354 ((a
->vd
| a
->vm
) & 0x10)) {
1358 if ((a
->vm
| a
->vd
) & a
->q
) {
1362 if (!vfp_access_check(s
)) {
1367 * To avoid excessive duplication of ops we implement shift
1368 * by immediate using the variable shift operations.
1370 constimm
= tcg_const_i32(dup_const(a
->size
, a
->shift
));
1372 for (pass
= 0; pass
< (a
->q
? 4 : 2); pass
++) {
1373 TCGv_i32 tmp
= neon_load_reg(a
->vm
, pass
);
1374 fn(tmp
, cpu_env
, tmp
, constimm
);
1375 neon_store_reg(a
->vd
, pass
, tmp
);
1377 tcg_temp_free_i32(constimm
);
1381 #define DO_2SHIFT_ENV(INSN, FUNC) \
1382 static bool trans_##INSN##_64_2sh(DisasContext *s, arg_2reg_shift *a) \
1384 return do_2shift_env_64(s, a, gen_helper_neon_##FUNC##64); \
1386 static bool trans_##INSN##_2sh(DisasContext *s, arg_2reg_shift *a) \
1388 static NeonGenTwoOpEnvFn * const fns[] = { \
1389 gen_helper_neon_##FUNC##8, \
1390 gen_helper_neon_##FUNC##16, \
1391 gen_helper_neon_##FUNC##32, \
1393 assert(a->size < ARRAY_SIZE(fns)); \
1394 return do_2shift_env_32(s, a, fns[a->size]); \
1397 DO_2SHIFT_ENV(VQSHLU
, qshlu_s
)
1398 DO_2SHIFT_ENV(VQSHL_U
, qshl_u
)
1399 DO_2SHIFT_ENV(VQSHL_S
, qshl_s
)
1401 static bool do_2shift_narrow_64(DisasContext
*s
, arg_2reg_shift
*a
,
1402 NeonGenTwo64OpFn
*shiftfn
,
1403 NeonGenNarrowEnvFn
*narrowfn
)
1405 /* 2-reg-and-shift narrowing-shift operations, size == 3 case */
1406 TCGv_i64 constimm
, rm1
, rm2
;
1409 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
1413 /* UNDEF accesses to D16-D31 if they don't exist. */
1414 if (!dc_isar_feature(aa32_simd_r32
, s
) &&
1415 ((a
->vd
| a
->vm
) & 0x10)) {
1423 if (!vfp_access_check(s
)) {
1428 * This is always a right shift, and the shiftfn is always a
1429 * left-shift helper, which thus needs the negated shift count.
1431 constimm
= tcg_const_i64(-a
->shift
);
1432 rm1
= tcg_temp_new_i64();
1433 rm2
= tcg_temp_new_i64();
1435 /* Load both inputs first to avoid potential overwrite if rm == rd */
1436 neon_load_reg64(rm1
, a
->vm
);
1437 neon_load_reg64(rm2
, a
->vm
+ 1);
1439 shiftfn(rm1
, rm1
, constimm
);
1440 rd
= tcg_temp_new_i32();
1441 narrowfn(rd
, cpu_env
, rm1
);
1442 neon_store_reg(a
->vd
, 0, rd
);
1444 shiftfn(rm2
, rm2
, constimm
);
1445 rd
= tcg_temp_new_i32();
1446 narrowfn(rd
, cpu_env
, rm2
);
1447 neon_store_reg(a
->vd
, 1, rd
);
1449 tcg_temp_free_i64(rm1
);
1450 tcg_temp_free_i64(rm2
);
1451 tcg_temp_free_i64(constimm
);
1456 static bool do_2shift_narrow_32(DisasContext
*s
, arg_2reg_shift
*a
,
1457 NeonGenTwoOpFn
*shiftfn
,
1458 NeonGenNarrowEnvFn
*narrowfn
)
1460 /* 2-reg-and-shift narrowing-shift operations, size < 3 case */
1461 TCGv_i32 constimm
, rm1
, rm2
, rm3
, rm4
;
1465 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
1469 /* UNDEF accesses to D16-D31 if they don't exist. */
1470 if (!dc_isar_feature(aa32_simd_r32
, s
) &&
1471 ((a
->vd
| a
->vm
) & 0x10)) {
1479 if (!vfp_access_check(s
)) {
1484 * This is always a right shift, and the shiftfn is always a
1485 * left-shift helper, which thus needs the negated shift count
1486 * duplicated into each lane of the immediate value.
1489 imm
= (uint16_t)(-a
->shift
);
1495 constimm
= tcg_const_i32(imm
);
1497 /* Load all inputs first to avoid potential overwrite */
1498 rm1
= neon_load_reg(a
->vm
, 0);
1499 rm2
= neon_load_reg(a
->vm
, 1);
1500 rm3
= neon_load_reg(a
->vm
+ 1, 0);
1501 rm4
= neon_load_reg(a
->vm
+ 1, 1);
1502 rtmp
= tcg_temp_new_i64();
1504 shiftfn(rm1
, rm1
, constimm
);
1505 shiftfn(rm2
, rm2
, constimm
);
1507 tcg_gen_concat_i32_i64(rtmp
, rm1
, rm2
);
1508 tcg_temp_free_i32(rm2
);
1510 narrowfn(rm1
, cpu_env
, rtmp
);
1511 neon_store_reg(a
->vd
, 0, rm1
);
1513 shiftfn(rm3
, rm3
, constimm
);
1514 shiftfn(rm4
, rm4
, constimm
);
1515 tcg_temp_free_i32(constimm
);
1517 tcg_gen_concat_i32_i64(rtmp
, rm3
, rm4
);
1518 tcg_temp_free_i32(rm4
);
1520 narrowfn(rm3
, cpu_env
, rtmp
);
1521 tcg_temp_free_i64(rtmp
);
1522 neon_store_reg(a
->vd
, 1, rm3
);
1526 #define DO_2SN_64(INSN, FUNC, NARROWFUNC) \
1527 static bool trans_##INSN##_2sh(DisasContext *s, arg_2reg_shift *a) \
1529 return do_2shift_narrow_64(s, a, FUNC, NARROWFUNC); \
1531 #define DO_2SN_32(INSN, FUNC, NARROWFUNC) \
1532 static bool trans_##INSN##_2sh(DisasContext *s, arg_2reg_shift *a) \
1534 return do_2shift_narrow_32(s, a, FUNC, NARROWFUNC); \
1537 static void gen_neon_narrow_u32(TCGv_i32 dest
, TCGv_ptr env
, TCGv_i64 src
)
1539 tcg_gen_extrl_i64_i32(dest
, src
);
1542 static void gen_neon_narrow_u16(TCGv_i32 dest
, TCGv_ptr env
, TCGv_i64 src
)
1544 gen_helper_neon_narrow_u16(dest
, src
);
1547 static void gen_neon_narrow_u8(TCGv_i32 dest
, TCGv_ptr env
, TCGv_i64 src
)
1549 gen_helper_neon_narrow_u8(dest
, src
);
1552 DO_2SN_64(VSHRN_64
, gen_ushl_i64
, gen_neon_narrow_u32
)
1553 DO_2SN_32(VSHRN_32
, gen_ushl_i32
, gen_neon_narrow_u16
)
1554 DO_2SN_32(VSHRN_16
, gen_helper_neon_shl_u16
, gen_neon_narrow_u8
)
1556 DO_2SN_64(VRSHRN_64
, gen_helper_neon_rshl_u64
, gen_neon_narrow_u32
)
1557 DO_2SN_32(VRSHRN_32
, gen_helper_neon_rshl_u32
, gen_neon_narrow_u16
)
1558 DO_2SN_32(VRSHRN_16
, gen_helper_neon_rshl_u16
, gen_neon_narrow_u8
)
1560 DO_2SN_64(VQSHRUN_64
, gen_sshl_i64
, gen_helper_neon_unarrow_sat32
)
1561 DO_2SN_32(VQSHRUN_32
, gen_sshl_i32
, gen_helper_neon_unarrow_sat16
)
1562 DO_2SN_32(VQSHRUN_16
, gen_helper_neon_shl_s16
, gen_helper_neon_unarrow_sat8
)
1564 DO_2SN_64(VQRSHRUN_64
, gen_helper_neon_rshl_s64
, gen_helper_neon_unarrow_sat32
)
1565 DO_2SN_32(VQRSHRUN_32
, gen_helper_neon_rshl_s32
, gen_helper_neon_unarrow_sat16
)
1566 DO_2SN_32(VQRSHRUN_16
, gen_helper_neon_rshl_s16
, gen_helper_neon_unarrow_sat8
)
1567 DO_2SN_64(VQSHRN_S64
, gen_sshl_i64
, gen_helper_neon_narrow_sat_s32
)
1568 DO_2SN_32(VQSHRN_S32
, gen_sshl_i32
, gen_helper_neon_narrow_sat_s16
)
1569 DO_2SN_32(VQSHRN_S16
, gen_helper_neon_shl_s16
, gen_helper_neon_narrow_sat_s8
)
1571 DO_2SN_64(VQRSHRN_S64
, gen_helper_neon_rshl_s64
, gen_helper_neon_narrow_sat_s32
)
1572 DO_2SN_32(VQRSHRN_S32
, gen_helper_neon_rshl_s32
, gen_helper_neon_narrow_sat_s16
)
1573 DO_2SN_32(VQRSHRN_S16
, gen_helper_neon_rshl_s16
, gen_helper_neon_narrow_sat_s8
)
1575 DO_2SN_64(VQSHRN_U64
, gen_ushl_i64
, gen_helper_neon_narrow_sat_u32
)
1576 DO_2SN_32(VQSHRN_U32
, gen_ushl_i32
, gen_helper_neon_narrow_sat_u16
)
1577 DO_2SN_32(VQSHRN_U16
, gen_helper_neon_shl_u16
, gen_helper_neon_narrow_sat_u8
)
1579 DO_2SN_64(VQRSHRN_U64
, gen_helper_neon_rshl_u64
, gen_helper_neon_narrow_sat_u32
)
1580 DO_2SN_32(VQRSHRN_U32
, gen_helper_neon_rshl_u32
, gen_helper_neon_narrow_sat_u16
)
1581 DO_2SN_32(VQRSHRN_U16
, gen_helper_neon_rshl_u16
, gen_helper_neon_narrow_sat_u8
)
1583 static bool do_vshll_2sh(DisasContext
*s
, arg_2reg_shift
*a
,
1584 NeonGenWidenFn
*widenfn
, bool u
)
1588 uint64_t widen_mask
= 0;
1590 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
1594 /* UNDEF accesses to D16-D31 if they don't exist. */
1595 if (!dc_isar_feature(aa32_simd_r32
, s
) &&
1596 ((a
->vd
| a
->vm
) & 0x10)) {
1604 if (!vfp_access_check(s
)) {
1609 * This is a widen-and-shift operation. The shift is always less
1610 * than the width of the source type, so after widening the input
1611 * vector we can simply shift the whole 64-bit widened register,
1612 * and then clear the potential overflow bits resulting from left
1613 * bits of the narrow input appearing as right bits of the left
1614 * neighbour narrow input. Calculate a mask of bits to clear.
1616 if ((a
->shift
!= 0) && (a
->size
< 2 || u
)) {
1617 int esize
= 8 << a
->size
;
1618 widen_mask
= MAKE_64BIT_MASK(0, esize
);
1619 widen_mask
>>= esize
- a
->shift
;
1620 widen_mask
= dup_const(a
->size
+ 1, widen_mask
);
1623 rm0
= neon_load_reg(a
->vm
, 0);
1624 rm1
= neon_load_reg(a
->vm
, 1);
1625 tmp
= tcg_temp_new_i64();
1628 tcg_temp_free_i32(rm0
);
1629 if (a
->shift
!= 0) {
1630 tcg_gen_shli_i64(tmp
, tmp
, a
->shift
);
1631 tcg_gen_andi_i64(tmp
, tmp
, ~widen_mask
);
1633 neon_store_reg64(tmp
, a
->vd
);
1636 tcg_temp_free_i32(rm1
);
1637 if (a
->shift
!= 0) {
1638 tcg_gen_shli_i64(tmp
, tmp
, a
->shift
);
1639 tcg_gen_andi_i64(tmp
, tmp
, ~widen_mask
);
1641 neon_store_reg64(tmp
, a
->vd
+ 1);
1642 tcg_temp_free_i64(tmp
);
1646 static bool trans_VSHLL_S_2sh(DisasContext
*s
, arg_2reg_shift
*a
)
1648 static NeonGenWidenFn
* const widenfn
[] = {
1649 gen_helper_neon_widen_s8
,
1650 gen_helper_neon_widen_s16
,
1651 tcg_gen_ext_i32_i64
,
1653 return do_vshll_2sh(s
, a
, widenfn
[a
->size
], false);
1656 static bool trans_VSHLL_U_2sh(DisasContext
*s
, arg_2reg_shift
*a
)
1658 static NeonGenWidenFn
* const widenfn
[] = {
1659 gen_helper_neon_widen_u8
,
1660 gen_helper_neon_widen_u16
,
1661 tcg_gen_extu_i32_i64
,
1663 return do_vshll_2sh(s
, a
, widenfn
[a
->size
], true);
1666 static bool do_fp_2sh(DisasContext
*s
, arg_2reg_shift
*a
,
1667 NeonGenTwoSingleOPFn
*fn
)
1669 /* FP operations in 2-reg-and-shift group */
1670 TCGv_i32 tmp
, shiftv
;
1674 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
1678 /* UNDEF accesses to D16-D31 if they don't exist. */
1679 if (!dc_isar_feature(aa32_simd_r32
, s
) &&
1680 ((a
->vd
| a
->vm
) & 0x10)) {
1684 if ((a
->vm
| a
->vd
) & a
->q
) {
1688 if (!vfp_access_check(s
)) {
1692 fpstatus
= get_fpstatus_ptr(1);
1693 shiftv
= tcg_const_i32(a
->shift
);
1694 for (pass
= 0; pass
< (a
->q
? 4 : 2); pass
++) {
1695 tmp
= neon_load_reg(a
->vm
, pass
);
1696 fn(tmp
, tmp
, shiftv
, fpstatus
);
1697 neon_store_reg(a
->vd
, pass
, tmp
);
1699 tcg_temp_free_ptr(fpstatus
);
1700 tcg_temp_free_i32(shiftv
);
1704 #define DO_FP_2SH(INSN, FUNC) \
1705 static bool trans_##INSN##_2sh(DisasContext *s, arg_2reg_shift *a) \
1707 return do_fp_2sh(s, a, FUNC); \
1710 DO_FP_2SH(VCVT_SF
, gen_helper_vfp_sltos
)
1711 DO_FP_2SH(VCVT_UF
, gen_helper_vfp_ultos
)
1712 DO_FP_2SH(VCVT_FS
, gen_helper_vfp_tosls_round_to_zero
)
1713 DO_FP_2SH(VCVT_FU
, gen_helper_vfp_touls_round_to_zero
)
1715 static uint64_t asimd_imm_const(uint32_t imm
, int cmode
, int op
)
1718 * Expand the encoded constant.
1719 * Note that cmode = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
1720 * We choose to not special-case this and will behave as if a
1721 * valid constant encoding of 0 had been given.
1722 * cmode = 15 op = 1 must UNDEF; we assume decode has handled that.
1741 imm
= (imm
<< 8) | (imm
<< 24);
1744 imm
= (imm
<< 8) | 0xff;
1747 imm
= (imm
<< 16) | 0xffff;
1752 * This is the only case where the top and bottom 32 bits
1753 * of the encoded constant differ.
1758 for (n
= 0; n
< 8; n
++) {
1759 if (imm
& (1 << n
)) {
1760 imm64
|= (0xffULL
<< (n
* 8));
1765 imm
|= (imm
<< 8) | (imm
<< 16) | (imm
<< 24);
1768 imm
= ((imm
& 0x80) << 24) | ((imm
& 0x3f) << 19)
1769 | ((imm
& 0x40) ? (0x1f << 25) : (1 << 30));
1775 return dup_const(MO_32
, imm
);
1778 static bool do_1reg_imm(DisasContext
*s
, arg_1reg_imm
*a
,
1782 int reg_ofs
, vec_size
;
1784 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
1788 /* UNDEF accesses to D16-D31 if they don't exist. */
1789 if (!dc_isar_feature(aa32_simd_r32
, s
) && (a
->vd
& 0x10)) {
1797 if (!vfp_access_check(s
)) {
1801 reg_ofs
= neon_reg_offset(a
->vd
, 0);
1802 vec_size
= a
->q
? 16 : 8;
1803 imm
= asimd_imm_const(a
->imm
, a
->cmode
, a
->op
);
1805 fn(MO_64
, reg_ofs
, reg_ofs
, imm
, vec_size
, vec_size
);
1809 static void gen_VMOV_1r(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
1810 int64_t c
, uint32_t oprsz
, uint32_t maxsz
)
1812 tcg_gen_gvec_dup_imm(MO_64
, dofs
, oprsz
, maxsz
, c
);
1815 static bool trans_Vimm_1r(DisasContext
*s
, arg_1reg_imm
*a
)
1817 /* Handle decode of cmode/op here between VORR/VBIC/VMOV */
1820 if ((a
->cmode
& 1) && a
->cmode
< 12) {
1821 /* for op=1, the imm will be inverted, so BIC becomes AND. */
1822 fn
= a
->op
? tcg_gen_gvec_andi
: tcg_gen_gvec_ori
;
1824 /* There is one unallocated cmode/op combination in this space */
1825 if (a
->cmode
== 15 && a
->op
== 1) {
1830 return do_1reg_imm(s
, a
, fn
);
1833 static bool do_prewiden_3d(DisasContext
*s
, arg_3diff
*a
,
1834 NeonGenWidenFn
*widenfn
,
1835 NeonGenTwo64OpFn
*opfn
,
1838 /* 3-regs different lengths, prewidening case (VADDL/VSUBL/VAADW/VSUBW) */
1839 TCGv_i64 rn0_64
, rn1_64
, rm_64
;
1842 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
1846 /* UNDEF accesses to D16-D31 if they don't exist. */
1847 if (!dc_isar_feature(aa32_simd_r32
, s
) &&
1848 ((a
->vd
| a
->vn
| a
->vm
) & 0x10)) {
1852 if (!widenfn
|| !opfn
) {
1853 /* size == 3 case, which is an entirely different insn group */
1857 if ((a
->vd
& 1) || (src1_wide
&& (a
->vn
& 1))) {
1861 if (!vfp_access_check(s
)) {
1865 rn0_64
= tcg_temp_new_i64();
1866 rn1_64
= tcg_temp_new_i64();
1867 rm_64
= tcg_temp_new_i64();
1870 neon_load_reg64(rn0_64
, a
->vn
);
1872 TCGv_i32 tmp
= neon_load_reg(a
->vn
, 0);
1873 widenfn(rn0_64
, tmp
);
1874 tcg_temp_free_i32(tmp
);
1876 rm
= neon_load_reg(a
->vm
, 0);
1879 tcg_temp_free_i32(rm
);
1880 opfn(rn0_64
, rn0_64
, rm_64
);
1883 * Load second pass inputs before storing the first pass result, to
1884 * avoid incorrect results if a narrow input overlaps with the result.
1887 neon_load_reg64(rn1_64
, a
->vn
+ 1);
1889 TCGv_i32 tmp
= neon_load_reg(a
->vn
, 1);
1890 widenfn(rn1_64
, tmp
);
1891 tcg_temp_free_i32(tmp
);
1893 rm
= neon_load_reg(a
->vm
, 1);
1895 neon_store_reg64(rn0_64
, a
->vd
);
1898 tcg_temp_free_i32(rm
);
1899 opfn(rn1_64
, rn1_64
, rm_64
);
1900 neon_store_reg64(rn1_64
, a
->vd
+ 1);
1902 tcg_temp_free_i64(rn0_64
);
1903 tcg_temp_free_i64(rn1_64
);
1904 tcg_temp_free_i64(rm_64
);
1909 #define DO_PREWIDEN(INSN, S, EXT, OP, SRC1WIDE) \
1910 static bool trans_##INSN##_3d(DisasContext *s, arg_3diff *a) \
1912 static NeonGenWidenFn * const widenfn[] = { \
1913 gen_helper_neon_widen_##S##8, \
1914 gen_helper_neon_widen_##S##16, \
1915 tcg_gen_##EXT##_i32_i64, \
1918 static NeonGenTwo64OpFn * const addfn[] = { \
1919 gen_helper_neon_##OP##l_u16, \
1920 gen_helper_neon_##OP##l_u32, \
1921 tcg_gen_##OP##_i64, \
1924 return do_prewiden_3d(s, a, widenfn[a->size], \
1925 addfn[a->size], SRC1WIDE); \
1928 DO_PREWIDEN(VADDL_S
, s
, ext
, add
, false)
1929 DO_PREWIDEN(VADDL_U
, u
, extu
, add
, false)
1930 DO_PREWIDEN(VSUBL_S
, s
, ext
, sub
, false)
1931 DO_PREWIDEN(VSUBL_U
, u
, extu
, sub
, false)
1932 DO_PREWIDEN(VADDW_S
, s
, ext
, add
, true)
1933 DO_PREWIDEN(VADDW_U
, u
, extu
, add
, true)
1934 DO_PREWIDEN(VSUBW_S
, s
, ext
, sub
, true)
1935 DO_PREWIDEN(VSUBW_U
, u
, extu
, sub
, true)
1937 static bool do_narrow_3d(DisasContext
*s
, arg_3diff
*a
,
1938 NeonGenTwo64OpFn
*opfn
, NeonGenNarrowFn
*narrowfn
)
1940 /* 3-regs different lengths, narrowing (VADDHN/VSUBHN/VRADDHN/VRSUBHN) */
1941 TCGv_i64 rn_64
, rm_64
;
1944 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
1948 /* UNDEF accesses to D16-D31 if they don't exist. */
1949 if (!dc_isar_feature(aa32_simd_r32
, s
) &&
1950 ((a
->vd
| a
->vn
| a
->vm
) & 0x10)) {
1954 if (!opfn
|| !narrowfn
) {
1955 /* size == 3 case, which is an entirely different insn group */
1959 if ((a
->vn
| a
->vm
) & 1) {
1963 if (!vfp_access_check(s
)) {
1967 rn_64
= tcg_temp_new_i64();
1968 rm_64
= tcg_temp_new_i64();
1969 rd0
= tcg_temp_new_i32();
1970 rd1
= tcg_temp_new_i32();
1972 neon_load_reg64(rn_64
, a
->vn
);
1973 neon_load_reg64(rm_64
, a
->vm
);
1975 opfn(rn_64
, rn_64
, rm_64
);
1977 narrowfn(rd0
, rn_64
);
1979 neon_load_reg64(rn_64
, a
->vn
+ 1);
1980 neon_load_reg64(rm_64
, a
->vm
+ 1);
1982 opfn(rn_64
, rn_64
, rm_64
);
1984 narrowfn(rd1
, rn_64
);
1986 neon_store_reg(a
->vd
, 0, rd0
);
1987 neon_store_reg(a
->vd
, 1, rd1
);
1989 tcg_temp_free_i64(rn_64
);
1990 tcg_temp_free_i64(rm_64
);
1995 #define DO_NARROW_3D(INSN, OP, NARROWTYPE, EXTOP) \
1996 static bool trans_##INSN##_3d(DisasContext *s, arg_3diff *a) \
1998 static NeonGenTwo64OpFn * const addfn[] = { \
1999 gen_helper_neon_##OP##l_u16, \
2000 gen_helper_neon_##OP##l_u32, \
2001 tcg_gen_##OP##_i64, \
2004 static NeonGenNarrowFn * const narrowfn[] = { \
2005 gen_helper_neon_##NARROWTYPE##_high_u8, \
2006 gen_helper_neon_##NARROWTYPE##_high_u16, \
2010 return do_narrow_3d(s, a, addfn[a->size], narrowfn[a->size]); \
2013 static void gen_narrow_round_high_u32(TCGv_i32 rd
, TCGv_i64 rn
)
2015 tcg_gen_addi_i64(rn
, rn
, 1u << 31);
2016 tcg_gen_extrh_i64_i32(rd
, rn
);
2019 DO_NARROW_3D(VADDHN
, add
, narrow
, tcg_gen_extrh_i64_i32
)
2020 DO_NARROW_3D(VSUBHN
, sub
, narrow
, tcg_gen_extrh_i64_i32
)
2021 DO_NARROW_3D(VRADDHN
, add
, narrow_round
, gen_narrow_round_high_u32
)
2022 DO_NARROW_3D(VRSUBHN
, sub
, narrow_round
, gen_narrow_round_high_u32
)
2024 static bool do_long_3d(DisasContext
*s
, arg_3diff
*a
,
2025 NeonGenTwoOpWidenFn
*opfn
,
2026 NeonGenTwo64OpFn
*accfn
)
2029 * 3-regs different lengths, long operations.
2030 * These perform an operation on two inputs that returns a double-width
2031 * result, and then possibly perform an accumulation operation of
2032 * that result into the double-width destination.
2034 TCGv_i64 rd0
, rd1
, tmp
;
2037 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
2041 /* UNDEF accesses to D16-D31 if they don't exist. */
2042 if (!dc_isar_feature(aa32_simd_r32
, s
) &&
2043 ((a
->vd
| a
->vn
| a
->vm
) & 0x10)) {
2048 /* size == 3 case, which is an entirely different insn group */
2056 if (!vfp_access_check(s
)) {
2060 rd0
= tcg_temp_new_i64();
2061 rd1
= tcg_temp_new_i64();
2063 rn
= neon_load_reg(a
->vn
, 0);
2064 rm
= neon_load_reg(a
->vm
, 0);
2066 tcg_temp_free_i32(rn
);
2067 tcg_temp_free_i32(rm
);
2069 rn
= neon_load_reg(a
->vn
, 1);
2070 rm
= neon_load_reg(a
->vm
, 1);
2072 tcg_temp_free_i32(rn
);
2073 tcg_temp_free_i32(rm
);
2075 /* Don't store results until after all loads: they might overlap */
2077 tmp
= tcg_temp_new_i64();
2078 neon_load_reg64(tmp
, a
->vd
);
2079 accfn(tmp
, tmp
, rd0
);
2080 neon_store_reg64(tmp
, a
->vd
);
2081 neon_load_reg64(tmp
, a
->vd
+ 1);
2082 accfn(tmp
, tmp
, rd1
);
2083 neon_store_reg64(tmp
, a
->vd
+ 1);
2084 tcg_temp_free_i64(tmp
);
2086 neon_store_reg64(rd0
, a
->vd
);
2087 neon_store_reg64(rd1
, a
->vd
+ 1);
2090 tcg_temp_free_i64(rd0
);
2091 tcg_temp_free_i64(rd1
);
2096 static bool trans_VABDL_S_3d(DisasContext
*s
, arg_3diff
*a
)
2098 static NeonGenTwoOpWidenFn
* const opfn
[] = {
2099 gen_helper_neon_abdl_s16
,
2100 gen_helper_neon_abdl_s32
,
2101 gen_helper_neon_abdl_s64
,
2105 return do_long_3d(s
, a
, opfn
[a
->size
], NULL
);
2108 static bool trans_VABDL_U_3d(DisasContext
*s
, arg_3diff
*a
)
2110 static NeonGenTwoOpWidenFn
* const opfn
[] = {
2111 gen_helper_neon_abdl_u16
,
2112 gen_helper_neon_abdl_u32
,
2113 gen_helper_neon_abdl_u64
,
2117 return do_long_3d(s
, a
, opfn
[a
->size
], NULL
);
2120 static bool trans_VABAL_S_3d(DisasContext
*s
, arg_3diff
*a
)
2122 static NeonGenTwoOpWidenFn
* const opfn
[] = {
2123 gen_helper_neon_abdl_s16
,
2124 gen_helper_neon_abdl_s32
,
2125 gen_helper_neon_abdl_s64
,
2128 static NeonGenTwo64OpFn
* const addfn
[] = {
2129 gen_helper_neon_addl_u16
,
2130 gen_helper_neon_addl_u32
,
2135 return do_long_3d(s
, a
, opfn
[a
->size
], addfn
[a
->size
]);
2138 static bool trans_VABAL_U_3d(DisasContext
*s
, arg_3diff
*a
)
2140 static NeonGenTwoOpWidenFn
* const opfn
[] = {
2141 gen_helper_neon_abdl_u16
,
2142 gen_helper_neon_abdl_u32
,
2143 gen_helper_neon_abdl_u64
,
2146 static NeonGenTwo64OpFn
* const addfn
[] = {
2147 gen_helper_neon_addl_u16
,
2148 gen_helper_neon_addl_u32
,
2153 return do_long_3d(s
, a
, opfn
[a
->size
], addfn
[a
->size
]);
2156 static void gen_mull_s32(TCGv_i64 rd
, TCGv_i32 rn
, TCGv_i32 rm
)
2158 TCGv_i32 lo
= tcg_temp_new_i32();
2159 TCGv_i32 hi
= tcg_temp_new_i32();
2161 tcg_gen_muls2_i32(lo
, hi
, rn
, rm
);
2162 tcg_gen_concat_i32_i64(rd
, lo
, hi
);
2164 tcg_temp_free_i32(lo
);
2165 tcg_temp_free_i32(hi
);
2168 static void gen_mull_u32(TCGv_i64 rd
, TCGv_i32 rn
, TCGv_i32 rm
)
2170 TCGv_i32 lo
= tcg_temp_new_i32();
2171 TCGv_i32 hi
= tcg_temp_new_i32();
2173 tcg_gen_mulu2_i32(lo
, hi
, rn
, rm
);
2174 tcg_gen_concat_i32_i64(rd
, lo
, hi
);
2176 tcg_temp_free_i32(lo
);
2177 tcg_temp_free_i32(hi
);
2180 static bool trans_VMULL_S_3d(DisasContext
*s
, arg_3diff
*a
)
2182 static NeonGenTwoOpWidenFn
* const opfn
[] = {
2183 gen_helper_neon_mull_s8
,
2184 gen_helper_neon_mull_s16
,
2189 return do_long_3d(s
, a
, opfn
[a
->size
], NULL
);
2192 static bool trans_VMULL_U_3d(DisasContext
*s
, arg_3diff
*a
)
2194 static NeonGenTwoOpWidenFn
* const opfn
[] = {
2195 gen_helper_neon_mull_u8
,
2196 gen_helper_neon_mull_u16
,
2201 return do_long_3d(s
, a
, opfn
[a
->size
], NULL
);
2204 #define DO_VMLAL(INSN,MULL,ACC) \
2205 static bool trans_##INSN##_3d(DisasContext *s, arg_3diff *a) \
2207 static NeonGenTwoOpWidenFn * const opfn[] = { \
2208 gen_helper_neon_##MULL##8, \
2209 gen_helper_neon_##MULL##16, \
2213 static NeonGenTwo64OpFn * const accfn[] = { \
2214 gen_helper_neon_##ACC##l_u16, \
2215 gen_helper_neon_##ACC##l_u32, \
2216 tcg_gen_##ACC##_i64, \
2219 return do_long_3d(s, a, opfn[a->size], accfn[a->size]); \
2222 DO_VMLAL(VMLAL_S
,mull_s
,add
)
2223 DO_VMLAL(VMLAL_U
,mull_u
,add
)
2224 DO_VMLAL(VMLSL_S
,mull_s
,sub
)
2225 DO_VMLAL(VMLSL_U
,mull_u
,sub
)
2227 static void gen_VQDMULL_16(TCGv_i64 rd
, TCGv_i32 rn
, TCGv_i32 rm
)
2229 gen_helper_neon_mull_s16(rd
, rn
, rm
);
2230 gen_helper_neon_addl_saturate_s32(rd
, cpu_env
, rd
, rd
);
2233 static void gen_VQDMULL_32(TCGv_i64 rd
, TCGv_i32 rn
, TCGv_i32 rm
)
2235 gen_mull_s32(rd
, rn
, rm
);
2236 gen_helper_neon_addl_saturate_s64(rd
, cpu_env
, rd
, rd
);
2239 static bool trans_VQDMULL_3d(DisasContext
*s
, arg_3diff
*a
)
2241 static NeonGenTwoOpWidenFn
* const opfn
[] = {
2248 return do_long_3d(s
, a
, opfn
[a
->size
], NULL
);
2251 static void gen_VQDMLAL_acc_16(TCGv_i64 rd
, TCGv_i64 rn
, TCGv_i64 rm
)
2253 gen_helper_neon_addl_saturate_s32(rd
, cpu_env
, rn
, rm
);
2256 static void gen_VQDMLAL_acc_32(TCGv_i64 rd
, TCGv_i64 rn
, TCGv_i64 rm
)
2258 gen_helper_neon_addl_saturate_s64(rd
, cpu_env
, rn
, rm
);
2261 static bool trans_VQDMLAL_3d(DisasContext
*s
, arg_3diff
*a
)
2263 static NeonGenTwoOpWidenFn
* const opfn
[] = {
2269 static NeonGenTwo64OpFn
* const accfn
[] = {
2276 return do_long_3d(s
, a
, opfn
[a
->size
], accfn
[a
->size
]);
2279 static void gen_VQDMLSL_acc_16(TCGv_i64 rd
, TCGv_i64 rn
, TCGv_i64 rm
)
2281 gen_helper_neon_negl_u32(rm
, rm
);
2282 gen_helper_neon_addl_saturate_s32(rd
, cpu_env
, rn
, rm
);
2285 static void gen_VQDMLSL_acc_32(TCGv_i64 rd
, TCGv_i64 rn
, TCGv_i64 rm
)
2287 tcg_gen_neg_i64(rm
, rm
);
2288 gen_helper_neon_addl_saturate_s64(rd
, cpu_env
, rn
, rm
);
2291 static bool trans_VQDMLSL_3d(DisasContext
*s
, arg_3diff
*a
)
2293 static NeonGenTwoOpWidenFn
* const opfn
[] = {
2299 static NeonGenTwo64OpFn
* const accfn
[] = {
2306 return do_long_3d(s
, a
, opfn
[a
->size
], accfn
[a
->size
]);
2309 static bool trans_VMULL_P_3d(DisasContext
*s
, arg_3diff
*a
)
2311 gen_helper_gvec_3
*fn_gvec
;
2313 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
2317 /* UNDEF accesses to D16-D31 if they don't exist. */
2318 if (!dc_isar_feature(aa32_simd_r32
, s
) &&
2319 ((a
->vd
| a
->vn
| a
->vm
) & 0x10)) {
2329 fn_gvec
= gen_helper_neon_pmull_h
;
2332 if (!dc_isar_feature(aa32_pmull
, s
)) {
2335 fn_gvec
= gen_helper_gvec_pmull_q
;
2341 if (!vfp_access_check(s
)) {
2345 tcg_gen_gvec_3_ool(neon_reg_offset(a
->vd
, 0),
2346 neon_reg_offset(a
->vn
, 0),
2347 neon_reg_offset(a
->vm
, 0),
2348 16, 16, 0, fn_gvec
);
2352 static void gen_neon_dup_low16(TCGv_i32 var
)
2354 TCGv_i32 tmp
= tcg_temp_new_i32();
2355 tcg_gen_ext16u_i32(var
, var
);
2356 tcg_gen_shli_i32(tmp
, var
, 16);
2357 tcg_gen_or_i32(var
, var
, tmp
);
2358 tcg_temp_free_i32(tmp
);
2361 static void gen_neon_dup_high16(TCGv_i32 var
)
2363 TCGv_i32 tmp
= tcg_temp_new_i32();
2364 tcg_gen_andi_i32(var
, var
, 0xffff0000);
2365 tcg_gen_shri_i32(tmp
, var
, 16);
2366 tcg_gen_or_i32(var
, var
, tmp
);
2367 tcg_temp_free_i32(tmp
);
2370 static inline TCGv_i32
neon_get_scalar(int size
, int reg
)
2374 tmp
= neon_load_reg(reg
& 7, reg
>> 4);
2376 gen_neon_dup_high16(tmp
);
2378 gen_neon_dup_low16(tmp
);
2381 tmp
= neon_load_reg(reg
& 15, reg
>> 4);
2386 static bool do_2scalar(DisasContext
*s
, arg_2scalar
*a
,
2387 NeonGenTwoOpFn
*opfn
, NeonGenTwoOpFn
*accfn
)
2390 * Two registers and a scalar: perform an operation between
2391 * the input elements and the scalar, and then possibly
2392 * perform an accumulation operation of that result into the
2398 if (!arm_dc_feature(s
, ARM_FEATURE_NEON
)) {
2402 /* UNDEF accesses to D16-D31 if they don't exist. */
2403 if (!dc_isar_feature(aa32_simd_r32
, s
) &&
2404 ((a
->vd
| a
->vn
| a
->vm
) & 0x10)) {
2409 /* Bad size (including size == 3, which is a different insn group) */
2413 if (a
->q
&& ((a
->vd
| a
->vn
) & 1)) {
2417 if (!vfp_access_check(s
)) {
2421 scalar
= neon_get_scalar(a
->size
, a
->vm
);
2423 for (pass
= 0; pass
< (a
->q
? 4 : 2); pass
++) {
2424 TCGv_i32 tmp
= neon_load_reg(a
->vn
, pass
);
2425 opfn(tmp
, tmp
, scalar
);
2427 TCGv_i32 rd
= neon_load_reg(a
->vd
, pass
);
2428 accfn(tmp
, rd
, tmp
);
2429 tcg_temp_free_i32(rd
);
2431 neon_store_reg(a
->vd
, pass
, tmp
);
2433 tcg_temp_free_i32(scalar
);
2437 static bool trans_VMUL_2sc(DisasContext
*s
, arg_2scalar
*a
)
2439 static NeonGenTwoOpFn
* const opfn
[] = {
2441 gen_helper_neon_mul_u16
,
2446 return do_2scalar(s
, a
, opfn
[a
->size
], NULL
);
2449 static bool trans_VMLA_2sc(DisasContext
*s
, arg_2scalar
*a
)
2451 static NeonGenTwoOpFn
* const opfn
[] = {
2453 gen_helper_neon_mul_u16
,
2457 static NeonGenTwoOpFn
* const accfn
[] = {
2459 gen_helper_neon_add_u16
,
2464 return do_2scalar(s
, a
, opfn
[a
->size
], accfn
[a
->size
]);
2467 static bool trans_VMLS_2sc(DisasContext
*s
, arg_2scalar
*a
)
2469 static NeonGenTwoOpFn
* const opfn
[] = {
2471 gen_helper_neon_mul_u16
,
2475 static NeonGenTwoOpFn
* const accfn
[] = {
2477 gen_helper_neon_sub_u16
,
2482 return do_2scalar(s
, a
, opfn
[a
->size
], accfn
[a
->size
]);
2486 * Rather than have a float-specific version of do_2scalar just for
2487 * three insns, we wrap a NeonGenTwoSingleOpFn to turn it into
2490 #define WRAP_FP_FN(WRAPNAME, FUNC) \
2491 static void WRAPNAME(TCGv_i32 rd, TCGv_i32 rn, TCGv_i32 rm) \
2493 TCGv_ptr fpstatus = get_fpstatus_ptr(1); \
2494 FUNC(rd, rn, rm, fpstatus); \
2495 tcg_temp_free_ptr(fpstatus); \
2498 WRAP_FP_FN(gen_VMUL_F_mul
, gen_helper_vfp_muls
)
2499 WRAP_FP_FN(gen_VMUL_F_add
, gen_helper_vfp_adds
)
2500 WRAP_FP_FN(gen_VMUL_F_sub
, gen_helper_vfp_subs
)
2502 static bool trans_VMUL_F_2sc(DisasContext
*s
, arg_2scalar
*a
)
2504 static NeonGenTwoOpFn
* const opfn
[] = {
2506 NULL
, /* TODO: fp16 support */
2511 return do_2scalar(s
, a
, opfn
[a
->size
], NULL
);
2514 static bool trans_VMLA_F_2sc(DisasContext
*s
, arg_2scalar
*a
)
2516 static NeonGenTwoOpFn
* const opfn
[] = {
2518 NULL
, /* TODO: fp16 support */
2522 static NeonGenTwoOpFn
* const accfn
[] = {
2524 NULL
, /* TODO: fp16 support */
2529 return do_2scalar(s
, a
, opfn
[a
->size
], accfn
[a
->size
]);
2532 static bool trans_VMLS_F_2sc(DisasContext
*s
, arg_2scalar
*a
)
2534 static NeonGenTwoOpFn
* const opfn
[] = {
2536 NULL
, /* TODO: fp16 support */
2540 static NeonGenTwoOpFn
* const accfn
[] = {
2542 NULL
, /* TODO: fp16 support */
2547 return do_2scalar(s
, a
, opfn
[a
->size
], accfn
[a
->size
]);
2550 WRAP_ENV_FN(gen_VQDMULH_16
, gen_helper_neon_qdmulh_s16
)
2551 WRAP_ENV_FN(gen_VQDMULH_32
, gen_helper_neon_qdmulh_s32
)
2552 WRAP_ENV_FN(gen_VQRDMULH_16
, gen_helper_neon_qrdmulh_s16
)
2553 WRAP_ENV_FN(gen_VQRDMULH_32
, gen_helper_neon_qrdmulh_s32
)
2555 static bool trans_VQDMULH_2sc(DisasContext
*s
, arg_2scalar
*a
)
2557 static NeonGenTwoOpFn
* const opfn
[] = {
2564 return do_2scalar(s
, a
, opfn
[a
->size
], NULL
);
2567 static bool trans_VQRDMULH_2sc(DisasContext
*s
, arg_2scalar
*a
)
2569 static NeonGenTwoOpFn
* const opfn
[] = {
2576 return do_2scalar(s
, a
, opfn
[a
->size
], NULL
);