2 * AArch64 SVE translation
4 * Copyright (c) 2018 Linaro, Ltd
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "exec/exec-all.h"
23 #include "tcg/tcg-op.h"
24 #include "tcg/tcg-op-gvec.h"
25 #include "tcg/tcg-gvec-desc.h"
28 #include "translate.h"
29 #include "internals.h"
30 #include "exec/helper-proto.h"
31 #include "exec/helper-gen.h"
33 #include "trace-tcg.h"
34 #include "translate-a64.h"
35 #include "fpu/softfloat.h"
38 typedef void GVecGen2sFn(unsigned, uint32_t, uint32_t,
39 TCGv_i64
, uint32_t, uint32_t);
41 typedef void gen_helper_gvec_flags_3(TCGv_i32
, TCGv_ptr
, TCGv_ptr
,
43 typedef void gen_helper_gvec_flags_4(TCGv_i32
, TCGv_ptr
, TCGv_ptr
,
44 TCGv_ptr
, TCGv_ptr
, TCGv_i32
);
46 typedef void gen_helper_gvec_mem(TCGv_env
, TCGv_ptr
, TCGv_i64
, TCGv_i32
);
47 typedef void gen_helper_gvec_mem_scatter(TCGv_env
, TCGv_ptr
, TCGv_ptr
,
48 TCGv_ptr
, TCGv_i64
, TCGv_i32
);
51 * Helpers for extracting complex instruction fields.
54 /* See e.g. ASR (immediate, predicated).
55 * Returns -1 for unallocated encoding; diagnose later.
57 static int tszimm_esz(DisasContext
*s
, int x
)
59 x
>>= 3; /* discard imm3 */
63 static int tszimm_shr(DisasContext
*s
, int x
)
65 return (16 << tszimm_esz(s
, x
)) - x
;
68 /* See e.g. LSL (immediate, predicated). */
69 static int tszimm_shl(DisasContext
*s
, int x
)
71 return x
- (8 << tszimm_esz(s
, x
));
74 static inline int plus1(DisasContext
*s
, int x
)
79 /* The SH bit is in bit 8. Extract the low 8 and shift. */
80 static inline int expand_imm_sh8s(DisasContext
*s
, int x
)
82 return (int8_t)x
<< (x
& 0x100 ? 8 : 0);
85 static inline int expand_imm_sh8u(DisasContext
*s
, int x
)
87 return (uint8_t)x
<< (x
& 0x100 ? 8 : 0);
90 /* Convert a 2-bit memory size (msz) to a 4-bit data type (dtype)
91 * with unsigned data. C.f. SVE Memory Contiguous Load Group.
93 static inline int msz_dtype(DisasContext
*s
, int msz
)
95 static const uint8_t dtype
[4] = { 0, 5, 10, 15 };
100 * Include the generated decoder.
103 #include "decode-sve.c.inc"
106 * Implement all of the translator functions referenced by the decoder.
109 /* Return the offset info CPUARMState of the predicate vector register Pn.
110 * Note for this purpose, FFR is P16.
112 static inline int pred_full_reg_offset(DisasContext
*s
, int regno
)
114 return offsetof(CPUARMState
, vfp
.pregs
[regno
]);
117 /* Return the byte size of the whole predicate register, VL / 64. */
118 static inline int pred_full_reg_size(DisasContext
*s
)
120 return s
->sve_len
>> 3;
123 /* Round up the size of a register to a size allowed by
124 * the tcg vector infrastructure. Any operation which uses this
125 * size may assume that the bits above pred_full_reg_size are zero,
126 * and must leave them the same way.
128 * Note that this is not needed for the vector registers as they
129 * are always properly sized for tcg vectors.
131 static int size_for_gvec(int size
)
136 return QEMU_ALIGN_UP(size
, 16);
140 static int pred_gvec_reg_size(DisasContext
*s
)
142 return size_for_gvec(pred_full_reg_size(s
));
145 /* Invoke an out-of-line helper on 2 Zregs. */
146 static void gen_gvec_ool_zz(DisasContext
*s
, gen_helper_gvec_2
*fn
,
147 int rd
, int rn
, int data
)
149 unsigned vsz
= vec_full_reg_size(s
);
150 tcg_gen_gvec_2_ool(vec_full_reg_offset(s
, rd
),
151 vec_full_reg_offset(s
, rn
),
155 /* Invoke an out-of-line helper on 3 Zregs. */
156 static void gen_gvec_ool_zzz(DisasContext
*s
, gen_helper_gvec_3
*fn
,
157 int rd
, int rn
, int rm
, int data
)
159 unsigned vsz
= vec_full_reg_size(s
);
160 tcg_gen_gvec_3_ool(vec_full_reg_offset(s
, rd
),
161 vec_full_reg_offset(s
, rn
),
162 vec_full_reg_offset(s
, rm
),
166 /* Invoke an out-of-line helper on 4 Zregs. */
167 static void gen_gvec_ool_zzzz(DisasContext
*s
, gen_helper_gvec_4
*fn
,
168 int rd
, int rn
, int rm
, int ra
, int data
)
170 unsigned vsz
= vec_full_reg_size(s
);
171 tcg_gen_gvec_4_ool(vec_full_reg_offset(s
, rd
),
172 vec_full_reg_offset(s
, rn
),
173 vec_full_reg_offset(s
, rm
),
174 vec_full_reg_offset(s
, ra
),
178 /* Invoke an out-of-line helper on 2 Zregs and a predicate. */
179 static void gen_gvec_ool_zzp(DisasContext
*s
, gen_helper_gvec_3
*fn
,
180 int rd
, int rn
, int pg
, int data
)
182 unsigned vsz
= vec_full_reg_size(s
);
183 tcg_gen_gvec_3_ool(vec_full_reg_offset(s
, rd
),
184 vec_full_reg_offset(s
, rn
),
185 pred_full_reg_offset(s
, pg
),
189 /* Invoke an out-of-line helper on 3 Zregs and a predicate. */
190 static void gen_gvec_ool_zzzp(DisasContext
*s
, gen_helper_gvec_4
*fn
,
191 int rd
, int rn
, int rm
, int pg
, int data
)
193 unsigned vsz
= vec_full_reg_size(s
);
194 tcg_gen_gvec_4_ool(vec_full_reg_offset(s
, rd
),
195 vec_full_reg_offset(s
, rn
),
196 vec_full_reg_offset(s
, rm
),
197 pred_full_reg_offset(s
, pg
),
201 /* Invoke a vector expander on two Zregs. */
202 static void gen_gvec_fn_zz(DisasContext
*s
, GVecGen2Fn
*gvec_fn
,
203 int esz
, int rd
, int rn
)
205 unsigned vsz
= vec_full_reg_size(s
);
206 gvec_fn(esz
, vec_full_reg_offset(s
, rd
),
207 vec_full_reg_offset(s
, rn
), vsz
, vsz
);
210 /* Invoke a vector expander on three Zregs. */
211 static void gen_gvec_fn_zzz(DisasContext
*s
, GVecGen3Fn
*gvec_fn
,
212 int esz
, int rd
, int rn
, int rm
)
214 unsigned vsz
= vec_full_reg_size(s
);
215 gvec_fn(esz
, vec_full_reg_offset(s
, rd
),
216 vec_full_reg_offset(s
, rn
),
217 vec_full_reg_offset(s
, rm
), vsz
, vsz
);
220 /* Invoke a vector expander on four Zregs. */
221 static void gen_gvec_fn_zzzz(DisasContext
*s
, GVecGen4Fn
*gvec_fn
,
222 int esz
, int rd
, int rn
, int rm
, int ra
)
224 unsigned vsz
= vec_full_reg_size(s
);
225 gvec_fn(esz
, vec_full_reg_offset(s
, rd
),
226 vec_full_reg_offset(s
, rn
),
227 vec_full_reg_offset(s
, rm
),
228 vec_full_reg_offset(s
, ra
), vsz
, vsz
);
231 /* Invoke a vector move on two Zregs. */
232 static bool do_mov_z(DisasContext
*s
, int rd
, int rn
)
234 if (sve_access_check(s
)) {
235 gen_gvec_fn_zz(s
, tcg_gen_gvec_mov
, MO_8
, rd
, rn
);
240 /* Initialize a Zreg with replications of a 64-bit immediate. */
241 static void do_dupi_z(DisasContext
*s
, int rd
, uint64_t word
)
243 unsigned vsz
= vec_full_reg_size(s
);
244 tcg_gen_gvec_dup_imm(MO_64
, vec_full_reg_offset(s
, rd
), vsz
, vsz
, word
);
247 /* Invoke a vector expander on three Pregs. */
248 static void gen_gvec_fn_ppp(DisasContext
*s
, GVecGen3Fn
*gvec_fn
,
249 int rd
, int rn
, int rm
)
251 unsigned psz
= pred_gvec_reg_size(s
);
252 gvec_fn(MO_64
, pred_full_reg_offset(s
, rd
),
253 pred_full_reg_offset(s
, rn
),
254 pred_full_reg_offset(s
, rm
), psz
, psz
);
257 /* Invoke a vector move on two Pregs. */
258 static bool do_mov_p(DisasContext
*s
, int rd
, int rn
)
260 if (sve_access_check(s
)) {
261 unsigned psz
= pred_gvec_reg_size(s
);
262 tcg_gen_gvec_mov(MO_8
, pred_full_reg_offset(s
, rd
),
263 pred_full_reg_offset(s
, rn
), psz
, psz
);
268 /* Set the cpu flags as per a return from an SVE helper. */
269 static void do_pred_flags(TCGv_i32 t
)
271 tcg_gen_mov_i32(cpu_NF
, t
);
272 tcg_gen_andi_i32(cpu_ZF
, t
, 2);
273 tcg_gen_andi_i32(cpu_CF
, t
, 1);
274 tcg_gen_movi_i32(cpu_VF
, 0);
277 /* Subroutines computing the ARM PredTest psuedofunction. */
278 static void do_predtest1(TCGv_i64 d
, TCGv_i64 g
)
280 TCGv_i32 t
= tcg_temp_new_i32();
282 gen_helper_sve_predtest1(t
, d
, g
);
284 tcg_temp_free_i32(t
);
287 static void do_predtest(DisasContext
*s
, int dofs
, int gofs
, int words
)
289 TCGv_ptr dptr
= tcg_temp_new_ptr();
290 TCGv_ptr gptr
= tcg_temp_new_ptr();
293 tcg_gen_addi_ptr(dptr
, cpu_env
, dofs
);
294 tcg_gen_addi_ptr(gptr
, cpu_env
, gofs
);
295 t
= tcg_const_i32(words
);
297 gen_helper_sve_predtest(t
, dptr
, gptr
, t
);
298 tcg_temp_free_ptr(dptr
);
299 tcg_temp_free_ptr(gptr
);
302 tcg_temp_free_i32(t
);
305 /* For each element size, the bits within a predicate word that are active. */
306 const uint64_t pred_esz_masks
[4] = {
307 0xffffffffffffffffull
, 0x5555555555555555ull
,
308 0x1111111111111111ull
, 0x0101010101010101ull
312 *** SVE Logical - Unpredicated Group
315 static bool do_zzz_fn(DisasContext
*s
, arg_rrr_esz
*a
, GVecGen3Fn
*gvec_fn
)
317 if (sve_access_check(s
)) {
318 gen_gvec_fn_zzz(s
, gvec_fn
, a
->esz
, a
->rd
, a
->rn
, a
->rm
);
323 static bool trans_AND_zzz(DisasContext
*s
, arg_rrr_esz
*a
)
325 return do_zzz_fn(s
, a
, tcg_gen_gvec_and
);
328 static bool trans_ORR_zzz(DisasContext
*s
, arg_rrr_esz
*a
)
330 return do_zzz_fn(s
, a
, tcg_gen_gvec_or
);
333 static bool trans_EOR_zzz(DisasContext
*s
, arg_rrr_esz
*a
)
335 return do_zzz_fn(s
, a
, tcg_gen_gvec_xor
);
338 static bool trans_BIC_zzz(DisasContext
*s
, arg_rrr_esz
*a
)
340 return do_zzz_fn(s
, a
, tcg_gen_gvec_andc
);
343 static void gen_xar8_i64(TCGv_i64 d
, TCGv_i64 n
, TCGv_i64 m
, int64_t sh
)
345 TCGv_i64 t
= tcg_temp_new_i64();
346 uint64_t mask
= dup_const(MO_8
, 0xff >> sh
);
348 tcg_gen_xor_i64(t
, n
, m
);
349 tcg_gen_shri_i64(d
, t
, sh
);
350 tcg_gen_shli_i64(t
, t
, 8 - sh
);
351 tcg_gen_andi_i64(d
, d
, mask
);
352 tcg_gen_andi_i64(t
, t
, ~mask
);
353 tcg_gen_or_i64(d
, d
, t
);
354 tcg_temp_free_i64(t
);
357 static void gen_xar16_i64(TCGv_i64 d
, TCGv_i64 n
, TCGv_i64 m
, int64_t sh
)
359 TCGv_i64 t
= tcg_temp_new_i64();
360 uint64_t mask
= dup_const(MO_16
, 0xffff >> sh
);
362 tcg_gen_xor_i64(t
, n
, m
);
363 tcg_gen_shri_i64(d
, t
, sh
);
364 tcg_gen_shli_i64(t
, t
, 16 - sh
);
365 tcg_gen_andi_i64(d
, d
, mask
);
366 tcg_gen_andi_i64(t
, t
, ~mask
);
367 tcg_gen_or_i64(d
, d
, t
);
368 tcg_temp_free_i64(t
);
371 static void gen_xar_i32(TCGv_i32 d
, TCGv_i32 n
, TCGv_i32 m
, int32_t sh
)
373 tcg_gen_xor_i32(d
, n
, m
);
374 tcg_gen_rotri_i32(d
, d
, sh
);
377 static void gen_xar_i64(TCGv_i64 d
, TCGv_i64 n
, TCGv_i64 m
, int64_t sh
)
379 tcg_gen_xor_i64(d
, n
, m
);
380 tcg_gen_rotri_i64(d
, d
, sh
);
383 static void gen_xar_vec(unsigned vece
, TCGv_vec d
, TCGv_vec n
,
384 TCGv_vec m
, int64_t sh
)
386 tcg_gen_xor_vec(vece
, d
, n
, m
);
387 tcg_gen_rotri_vec(vece
, d
, d
, sh
);
390 void gen_gvec_xar(unsigned vece
, uint32_t rd_ofs
, uint32_t rn_ofs
,
391 uint32_t rm_ofs
, int64_t shift
,
392 uint32_t opr_sz
, uint32_t max_sz
)
394 static const TCGOpcode vecop
[] = { INDEX_op_rotli_vec
, 0 };
395 static const GVecGen3i ops
[4] = {
396 { .fni8
= gen_xar8_i64
,
398 .fno
= gen_helper_sve2_xar_b
,
401 { .fni8
= gen_xar16_i64
,
403 .fno
= gen_helper_sve2_xar_h
,
406 { .fni4
= gen_xar_i32
,
408 .fno
= gen_helper_sve2_xar_s
,
411 { .fni8
= gen_xar_i64
,
413 .fno
= gen_helper_gvec_xar_d
,
417 int esize
= 8 << vece
;
419 /* The SVE2 range is 1 .. esize; the AdvSIMD range is 0 .. esize-1. */
420 tcg_debug_assert(shift
>= 0);
421 tcg_debug_assert(shift
<= esize
);
425 /* xar with no rotate devolves to xor. */
426 tcg_gen_gvec_xor(vece
, rd_ofs
, rn_ofs
, rm_ofs
, opr_sz
, max_sz
);
428 tcg_gen_gvec_3i(rd_ofs
, rn_ofs
, rm_ofs
, opr_sz
, max_sz
,
433 static bool trans_XAR(DisasContext
*s
, arg_rrri_esz
*a
)
435 if (a
->esz
< 0 || !dc_isar_feature(aa64_sve2
, s
)) {
438 if (sve_access_check(s
)) {
439 unsigned vsz
= vec_full_reg_size(s
);
440 gen_gvec_xar(a
->esz
, vec_full_reg_offset(s
, a
->rd
),
441 vec_full_reg_offset(s
, a
->rn
),
442 vec_full_reg_offset(s
, a
->rm
), a
->imm
, vsz
, vsz
);
447 static bool do_sve2_zzzz_fn(DisasContext
*s
, arg_rrrr_esz
*a
, GVecGen4Fn
*fn
)
449 if (!dc_isar_feature(aa64_sve2
, s
)) {
452 if (sve_access_check(s
)) {
453 gen_gvec_fn_zzzz(s
, fn
, a
->esz
, a
->rd
, a
->rn
, a
->rm
, a
->ra
);
458 static void gen_eor3_i64(TCGv_i64 d
, TCGv_i64 n
, TCGv_i64 m
, TCGv_i64 k
)
460 tcg_gen_xor_i64(d
, n
, m
);
461 tcg_gen_xor_i64(d
, d
, k
);
464 static void gen_eor3_vec(unsigned vece
, TCGv_vec d
, TCGv_vec n
,
465 TCGv_vec m
, TCGv_vec k
)
467 tcg_gen_xor_vec(vece
, d
, n
, m
);
468 tcg_gen_xor_vec(vece
, d
, d
, k
);
471 static void gen_eor3(unsigned vece
, uint32_t d
, uint32_t n
, uint32_t m
,
472 uint32_t a
, uint32_t oprsz
, uint32_t maxsz
)
474 static const GVecGen4 op
= {
475 .fni8
= gen_eor3_i64
,
476 .fniv
= gen_eor3_vec
,
477 .fno
= gen_helper_sve2_eor3
,
479 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
481 tcg_gen_gvec_4(d
, n
, m
, a
, oprsz
, maxsz
, &op
);
484 static bool trans_EOR3(DisasContext
*s
, arg_rrrr_esz
*a
)
486 return do_sve2_zzzz_fn(s
, a
, gen_eor3
);
489 static void gen_bcax_i64(TCGv_i64 d
, TCGv_i64 n
, TCGv_i64 m
, TCGv_i64 k
)
491 tcg_gen_andc_i64(d
, m
, k
);
492 tcg_gen_xor_i64(d
, d
, n
);
495 static void gen_bcax_vec(unsigned vece
, TCGv_vec d
, TCGv_vec n
,
496 TCGv_vec m
, TCGv_vec k
)
498 tcg_gen_andc_vec(vece
, d
, m
, k
);
499 tcg_gen_xor_vec(vece
, d
, d
, n
);
502 static void gen_bcax(unsigned vece
, uint32_t d
, uint32_t n
, uint32_t m
,
503 uint32_t a
, uint32_t oprsz
, uint32_t maxsz
)
505 static const GVecGen4 op
= {
506 .fni8
= gen_bcax_i64
,
507 .fniv
= gen_bcax_vec
,
508 .fno
= gen_helper_sve2_bcax
,
510 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
512 tcg_gen_gvec_4(d
, n
, m
, a
, oprsz
, maxsz
, &op
);
515 static bool trans_BCAX(DisasContext
*s
, arg_rrrr_esz
*a
)
517 return do_sve2_zzzz_fn(s
, a
, gen_bcax
);
520 static void gen_bsl(unsigned vece
, uint32_t d
, uint32_t n
, uint32_t m
,
521 uint32_t a
, uint32_t oprsz
, uint32_t maxsz
)
523 /* BSL differs from the generic bitsel in argument ordering. */
524 tcg_gen_gvec_bitsel(vece
, d
, a
, n
, m
, oprsz
, maxsz
);
527 static bool trans_BSL(DisasContext
*s
, arg_rrrr_esz
*a
)
529 return do_sve2_zzzz_fn(s
, a
, gen_bsl
);
532 static void gen_bsl1n_i64(TCGv_i64 d
, TCGv_i64 n
, TCGv_i64 m
, TCGv_i64 k
)
534 tcg_gen_andc_i64(n
, k
, n
);
535 tcg_gen_andc_i64(m
, m
, k
);
536 tcg_gen_or_i64(d
, n
, m
);
539 static void gen_bsl1n_vec(unsigned vece
, TCGv_vec d
, TCGv_vec n
,
540 TCGv_vec m
, TCGv_vec k
)
542 if (TCG_TARGET_HAS_bitsel_vec
) {
543 tcg_gen_not_vec(vece
, n
, n
);
544 tcg_gen_bitsel_vec(vece
, d
, k
, n
, m
);
546 tcg_gen_andc_vec(vece
, n
, k
, n
);
547 tcg_gen_andc_vec(vece
, m
, m
, k
);
548 tcg_gen_or_vec(vece
, d
, n
, m
);
552 static void gen_bsl1n(unsigned vece
, uint32_t d
, uint32_t n
, uint32_t m
,
553 uint32_t a
, uint32_t oprsz
, uint32_t maxsz
)
555 static const GVecGen4 op
= {
556 .fni8
= gen_bsl1n_i64
,
557 .fniv
= gen_bsl1n_vec
,
558 .fno
= gen_helper_sve2_bsl1n
,
560 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
562 tcg_gen_gvec_4(d
, n
, m
, a
, oprsz
, maxsz
, &op
);
565 static bool trans_BSL1N(DisasContext
*s
, arg_rrrr_esz
*a
)
567 return do_sve2_zzzz_fn(s
, a
, gen_bsl1n
);
570 static void gen_bsl2n_i64(TCGv_i64 d
, TCGv_i64 n
, TCGv_i64 m
, TCGv_i64 k
)
573 * Z[dn] = (n & k) | (~m & ~k)
576 tcg_gen_and_i64(n
, n
, k
);
577 if (TCG_TARGET_HAS_orc_i64
) {
578 tcg_gen_or_i64(m
, m
, k
);
579 tcg_gen_orc_i64(d
, n
, m
);
581 tcg_gen_nor_i64(m
, m
, k
);
582 tcg_gen_or_i64(d
, n
, m
);
586 static void gen_bsl2n_vec(unsigned vece
, TCGv_vec d
, TCGv_vec n
,
587 TCGv_vec m
, TCGv_vec k
)
589 if (TCG_TARGET_HAS_bitsel_vec
) {
590 tcg_gen_not_vec(vece
, m
, m
);
591 tcg_gen_bitsel_vec(vece
, d
, k
, n
, m
);
593 tcg_gen_and_vec(vece
, n
, n
, k
);
594 tcg_gen_or_vec(vece
, m
, m
, k
);
595 tcg_gen_orc_vec(vece
, d
, n
, m
);
599 static void gen_bsl2n(unsigned vece
, uint32_t d
, uint32_t n
, uint32_t m
,
600 uint32_t a
, uint32_t oprsz
, uint32_t maxsz
)
602 static const GVecGen4 op
= {
603 .fni8
= gen_bsl2n_i64
,
604 .fniv
= gen_bsl2n_vec
,
605 .fno
= gen_helper_sve2_bsl2n
,
607 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
609 tcg_gen_gvec_4(d
, n
, m
, a
, oprsz
, maxsz
, &op
);
612 static bool trans_BSL2N(DisasContext
*s
, arg_rrrr_esz
*a
)
614 return do_sve2_zzzz_fn(s
, a
, gen_bsl2n
);
617 static void gen_nbsl_i64(TCGv_i64 d
, TCGv_i64 n
, TCGv_i64 m
, TCGv_i64 k
)
619 tcg_gen_and_i64(n
, n
, k
);
620 tcg_gen_andc_i64(m
, m
, k
);
621 tcg_gen_nor_i64(d
, n
, m
);
624 static void gen_nbsl_vec(unsigned vece
, TCGv_vec d
, TCGv_vec n
,
625 TCGv_vec m
, TCGv_vec k
)
627 tcg_gen_bitsel_vec(vece
, d
, k
, n
, m
);
628 tcg_gen_not_vec(vece
, d
, d
);
631 static void gen_nbsl(unsigned vece
, uint32_t d
, uint32_t n
, uint32_t m
,
632 uint32_t a
, uint32_t oprsz
, uint32_t maxsz
)
634 static const GVecGen4 op
= {
635 .fni8
= gen_nbsl_i64
,
636 .fniv
= gen_nbsl_vec
,
637 .fno
= gen_helper_sve2_nbsl
,
639 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
641 tcg_gen_gvec_4(d
, n
, m
, a
, oprsz
, maxsz
, &op
);
644 static bool trans_NBSL(DisasContext
*s
, arg_rrrr_esz
*a
)
646 return do_sve2_zzzz_fn(s
, a
, gen_nbsl
);
650 *** SVE Integer Arithmetic - Unpredicated Group
653 static bool trans_ADD_zzz(DisasContext
*s
, arg_rrr_esz
*a
)
655 return do_zzz_fn(s
, a
, tcg_gen_gvec_add
);
658 static bool trans_SUB_zzz(DisasContext
*s
, arg_rrr_esz
*a
)
660 return do_zzz_fn(s
, a
, tcg_gen_gvec_sub
);
663 static bool trans_SQADD_zzz(DisasContext
*s
, arg_rrr_esz
*a
)
665 return do_zzz_fn(s
, a
, tcg_gen_gvec_ssadd
);
668 static bool trans_SQSUB_zzz(DisasContext
*s
, arg_rrr_esz
*a
)
670 return do_zzz_fn(s
, a
, tcg_gen_gvec_sssub
);
673 static bool trans_UQADD_zzz(DisasContext
*s
, arg_rrr_esz
*a
)
675 return do_zzz_fn(s
, a
, tcg_gen_gvec_usadd
);
678 static bool trans_UQSUB_zzz(DisasContext
*s
, arg_rrr_esz
*a
)
680 return do_zzz_fn(s
, a
, tcg_gen_gvec_ussub
);
684 *** SVE Integer Arithmetic - Binary Predicated Group
687 static bool do_zpzz_ool(DisasContext
*s
, arg_rprr_esz
*a
, gen_helper_gvec_4
*fn
)
692 if (sve_access_check(s
)) {
693 gen_gvec_ool_zzzp(s
, fn
, a
->rd
, a
->rn
, a
->rm
, a
->pg
, 0);
698 /* Select active elememnts from Zn and inactive elements from Zm,
699 * storing the result in Zd.
701 static void do_sel_z(DisasContext
*s
, int rd
, int rn
, int rm
, int pg
, int esz
)
703 static gen_helper_gvec_4
* const fns
[4] = {
704 gen_helper_sve_sel_zpzz_b
, gen_helper_sve_sel_zpzz_h
,
705 gen_helper_sve_sel_zpzz_s
, gen_helper_sve_sel_zpzz_d
707 gen_gvec_ool_zzzp(s
, fns
[esz
], rd
, rn
, rm
, pg
, 0);
710 #define DO_ZPZZ(NAME, name) \
711 static bool trans_##NAME##_zpzz(DisasContext *s, arg_rprr_esz *a) \
713 static gen_helper_gvec_4 * const fns[4] = { \
714 gen_helper_sve_##name##_zpzz_b, gen_helper_sve_##name##_zpzz_h, \
715 gen_helper_sve_##name##_zpzz_s, gen_helper_sve_##name##_zpzz_d, \
717 return do_zpzz_ool(s, a, fns[a->esz]); \
736 DO_ZPZZ(SMULH
, smulh
)
737 DO_ZPZZ(UMULH
, umulh
)
743 static bool trans_SDIV_zpzz(DisasContext
*s
, arg_rprr_esz
*a
)
745 static gen_helper_gvec_4
* const fns
[4] = {
746 NULL
, NULL
, gen_helper_sve_sdiv_zpzz_s
, gen_helper_sve_sdiv_zpzz_d
748 return do_zpzz_ool(s
, a
, fns
[a
->esz
]);
751 static bool trans_UDIV_zpzz(DisasContext
*s
, arg_rprr_esz
*a
)
753 static gen_helper_gvec_4
* const fns
[4] = {
754 NULL
, NULL
, gen_helper_sve_udiv_zpzz_s
, gen_helper_sve_udiv_zpzz_d
756 return do_zpzz_ool(s
, a
, fns
[a
->esz
]);
759 static bool trans_SEL_zpzz(DisasContext
*s
, arg_rprr_esz
*a
)
761 if (sve_access_check(s
)) {
762 do_sel_z(s
, a
->rd
, a
->rn
, a
->rm
, a
->pg
, a
->esz
);
770 *** SVE Integer Arithmetic - Unary Predicated Group
773 static bool do_zpz_ool(DisasContext
*s
, arg_rpr_esz
*a
, gen_helper_gvec_3
*fn
)
778 if (sve_access_check(s
)) {
779 gen_gvec_ool_zzp(s
, fn
, a
->rd
, a
->rn
, a
->pg
, 0);
784 #define DO_ZPZ(NAME, name) \
785 static bool trans_##NAME(DisasContext *s, arg_rpr_esz *a) \
787 static gen_helper_gvec_3 * const fns[4] = { \
788 gen_helper_sve_##name##_b, gen_helper_sve_##name##_h, \
789 gen_helper_sve_##name##_s, gen_helper_sve_##name##_d, \
791 return do_zpz_ool(s, a, fns[a->esz]); \
796 DO_ZPZ(CNT_zpz
, cnt_zpz
)
798 DO_ZPZ(NOT_zpz
, not_zpz
)
802 static bool trans_FABS(DisasContext
*s
, arg_rpr_esz
*a
)
804 static gen_helper_gvec_3
* const fns
[4] = {
806 gen_helper_sve_fabs_h
,
807 gen_helper_sve_fabs_s
,
808 gen_helper_sve_fabs_d
810 return do_zpz_ool(s
, a
, fns
[a
->esz
]);
813 static bool trans_FNEG(DisasContext
*s
, arg_rpr_esz
*a
)
815 static gen_helper_gvec_3
* const fns
[4] = {
817 gen_helper_sve_fneg_h
,
818 gen_helper_sve_fneg_s
,
819 gen_helper_sve_fneg_d
821 return do_zpz_ool(s
, a
, fns
[a
->esz
]);
824 static bool trans_SXTB(DisasContext
*s
, arg_rpr_esz
*a
)
826 static gen_helper_gvec_3
* const fns
[4] = {
828 gen_helper_sve_sxtb_h
,
829 gen_helper_sve_sxtb_s
,
830 gen_helper_sve_sxtb_d
832 return do_zpz_ool(s
, a
, fns
[a
->esz
]);
835 static bool trans_UXTB(DisasContext
*s
, arg_rpr_esz
*a
)
837 static gen_helper_gvec_3
* const fns
[4] = {
839 gen_helper_sve_uxtb_h
,
840 gen_helper_sve_uxtb_s
,
841 gen_helper_sve_uxtb_d
843 return do_zpz_ool(s
, a
, fns
[a
->esz
]);
846 static bool trans_SXTH(DisasContext
*s
, arg_rpr_esz
*a
)
848 static gen_helper_gvec_3
* const fns
[4] = {
850 gen_helper_sve_sxth_s
,
851 gen_helper_sve_sxth_d
853 return do_zpz_ool(s
, a
, fns
[a
->esz
]);
856 static bool trans_UXTH(DisasContext
*s
, arg_rpr_esz
*a
)
858 static gen_helper_gvec_3
* const fns
[4] = {
860 gen_helper_sve_uxth_s
,
861 gen_helper_sve_uxth_d
863 return do_zpz_ool(s
, a
, fns
[a
->esz
]);
866 static bool trans_SXTW(DisasContext
*s
, arg_rpr_esz
*a
)
868 return do_zpz_ool(s
, a
, a
->esz
== 3 ? gen_helper_sve_sxtw_d
: NULL
);
871 static bool trans_UXTW(DisasContext
*s
, arg_rpr_esz
*a
)
873 return do_zpz_ool(s
, a
, a
->esz
== 3 ? gen_helper_sve_uxtw_d
: NULL
);
879 *** SVE Integer Reduction Group
882 typedef void gen_helper_gvec_reduc(TCGv_i64
, TCGv_ptr
, TCGv_ptr
, TCGv_i32
);
883 static bool do_vpz_ool(DisasContext
*s
, arg_rpr_esz
*a
,
884 gen_helper_gvec_reduc
*fn
)
886 unsigned vsz
= vec_full_reg_size(s
);
894 if (!sve_access_check(s
)) {
898 desc
= tcg_const_i32(simd_desc(vsz
, vsz
, 0));
899 temp
= tcg_temp_new_i64();
900 t_zn
= tcg_temp_new_ptr();
901 t_pg
= tcg_temp_new_ptr();
903 tcg_gen_addi_ptr(t_zn
, cpu_env
, vec_full_reg_offset(s
, a
->rn
));
904 tcg_gen_addi_ptr(t_pg
, cpu_env
, pred_full_reg_offset(s
, a
->pg
));
905 fn(temp
, t_zn
, t_pg
, desc
);
906 tcg_temp_free_ptr(t_zn
);
907 tcg_temp_free_ptr(t_pg
);
908 tcg_temp_free_i32(desc
);
910 write_fp_dreg(s
, a
->rd
, temp
);
911 tcg_temp_free_i64(temp
);
915 #define DO_VPZ(NAME, name) \
916 static bool trans_##NAME(DisasContext *s, arg_rpr_esz *a) \
918 static gen_helper_gvec_reduc * const fns[4] = { \
919 gen_helper_sve_##name##_b, gen_helper_sve_##name##_h, \
920 gen_helper_sve_##name##_s, gen_helper_sve_##name##_d, \
922 return do_vpz_ool(s, a, fns[a->esz]); \
935 static bool trans_SADDV(DisasContext
*s
, arg_rpr_esz
*a
)
937 static gen_helper_gvec_reduc
* const fns
[4] = {
938 gen_helper_sve_saddv_b
, gen_helper_sve_saddv_h
,
939 gen_helper_sve_saddv_s
, NULL
941 return do_vpz_ool(s
, a
, fns
[a
->esz
]);
947 *** SVE Shift by Immediate - Predicated Group
951 * Copy Zn into Zd, storing zeros into inactive elements.
952 * If invert, store zeros into the active elements.
954 static bool do_movz_zpz(DisasContext
*s
, int rd
, int rn
, int pg
,
955 int esz
, bool invert
)
957 static gen_helper_gvec_3
* const fns
[4] = {
958 gen_helper_sve_movz_b
, gen_helper_sve_movz_h
,
959 gen_helper_sve_movz_s
, gen_helper_sve_movz_d
,
962 if (sve_access_check(s
)) {
963 gen_gvec_ool_zzp(s
, fns
[esz
], rd
, rn
, pg
, invert
);
968 static bool do_zpzi_ool(DisasContext
*s
, arg_rpri_esz
*a
,
969 gen_helper_gvec_3
*fn
)
971 if (sve_access_check(s
)) {
972 gen_gvec_ool_zzp(s
, fn
, a
->rd
, a
->rn
, a
->pg
, a
->imm
);
977 static bool trans_ASR_zpzi(DisasContext
*s
, arg_rpri_esz
*a
)
979 static gen_helper_gvec_3
* const fns
[4] = {
980 gen_helper_sve_asr_zpzi_b
, gen_helper_sve_asr_zpzi_h
,
981 gen_helper_sve_asr_zpzi_s
, gen_helper_sve_asr_zpzi_d
,
984 /* Invalid tsz encoding -- see tszimm_esz. */
987 /* Shift by element size is architecturally valid. For
988 arithmetic right-shift, it's the same as by one less. */
989 a
->imm
= MIN(a
->imm
, (8 << a
->esz
) - 1);
990 return do_zpzi_ool(s
, a
, fns
[a
->esz
]);
993 static bool trans_LSR_zpzi(DisasContext
*s
, arg_rpri_esz
*a
)
995 static gen_helper_gvec_3
* const fns
[4] = {
996 gen_helper_sve_lsr_zpzi_b
, gen_helper_sve_lsr_zpzi_h
,
997 gen_helper_sve_lsr_zpzi_s
, gen_helper_sve_lsr_zpzi_d
,
1002 /* Shift by element size is architecturally valid.
1003 For logical shifts, it is a zeroing operation. */
1004 if (a
->imm
>= (8 << a
->esz
)) {
1005 return do_movz_zpz(s
, a
->rd
, a
->rd
, a
->pg
, a
->esz
, true);
1007 return do_zpzi_ool(s
, a
, fns
[a
->esz
]);
1011 static bool trans_LSL_zpzi(DisasContext
*s
, arg_rpri_esz
*a
)
1013 static gen_helper_gvec_3
* const fns
[4] = {
1014 gen_helper_sve_lsl_zpzi_b
, gen_helper_sve_lsl_zpzi_h
,
1015 gen_helper_sve_lsl_zpzi_s
, gen_helper_sve_lsl_zpzi_d
,
1020 /* Shift by element size is architecturally valid.
1021 For logical shifts, it is a zeroing operation. */
1022 if (a
->imm
>= (8 << a
->esz
)) {
1023 return do_movz_zpz(s
, a
->rd
, a
->rd
, a
->pg
, a
->esz
, true);
1025 return do_zpzi_ool(s
, a
, fns
[a
->esz
]);
1029 static bool trans_ASRD(DisasContext
*s
, arg_rpri_esz
*a
)
1031 static gen_helper_gvec_3
* const fns
[4] = {
1032 gen_helper_sve_asrd_b
, gen_helper_sve_asrd_h
,
1033 gen_helper_sve_asrd_s
, gen_helper_sve_asrd_d
,
1038 /* Shift by element size is architecturally valid. For arithmetic
1039 right shift for division, it is a zeroing operation. */
1040 if (a
->imm
>= (8 << a
->esz
)) {
1041 return do_movz_zpz(s
, a
->rd
, a
->rd
, a
->pg
, a
->esz
, true);
1043 return do_zpzi_ool(s
, a
, fns
[a
->esz
]);
1047 static bool trans_SQSHL_zpzi(DisasContext
*s
, arg_rpri_esz
*a
)
1049 static gen_helper_gvec_3
* const fns
[4] = {
1050 gen_helper_sve2_sqshl_zpzi_b
, gen_helper_sve2_sqshl_zpzi_h
,
1051 gen_helper_sve2_sqshl_zpzi_s
, gen_helper_sve2_sqshl_zpzi_d
,
1053 if (a
->esz
< 0 || !dc_isar_feature(aa64_sve2
, s
)) {
1056 return do_zpzi_ool(s
, a
, fns
[a
->esz
]);
1059 static bool trans_UQSHL_zpzi(DisasContext
*s
, arg_rpri_esz
*a
)
1061 static gen_helper_gvec_3
* const fns
[4] = {
1062 gen_helper_sve2_uqshl_zpzi_b
, gen_helper_sve2_uqshl_zpzi_h
,
1063 gen_helper_sve2_uqshl_zpzi_s
, gen_helper_sve2_uqshl_zpzi_d
,
1065 if (a
->esz
< 0 || !dc_isar_feature(aa64_sve2
, s
)) {
1068 return do_zpzi_ool(s
, a
, fns
[a
->esz
]);
1071 static bool trans_SRSHR(DisasContext
*s
, arg_rpri_esz
*a
)
1073 static gen_helper_gvec_3
* const fns
[4] = {
1074 gen_helper_sve2_srshr_b
, gen_helper_sve2_srshr_h
,
1075 gen_helper_sve2_srshr_s
, gen_helper_sve2_srshr_d
,
1077 if (a
->esz
< 0 || !dc_isar_feature(aa64_sve2
, s
)) {
1080 return do_zpzi_ool(s
, a
, fns
[a
->esz
]);
1083 static bool trans_URSHR(DisasContext
*s
, arg_rpri_esz
*a
)
1085 static gen_helper_gvec_3
* const fns
[4] = {
1086 gen_helper_sve2_urshr_b
, gen_helper_sve2_urshr_h
,
1087 gen_helper_sve2_urshr_s
, gen_helper_sve2_urshr_d
,
1089 if (a
->esz
< 0 || !dc_isar_feature(aa64_sve2
, s
)) {
1092 return do_zpzi_ool(s
, a
, fns
[a
->esz
]);
1095 static bool trans_SQSHLU(DisasContext
*s
, arg_rpri_esz
*a
)
1097 static gen_helper_gvec_3
* const fns
[4] = {
1098 gen_helper_sve2_sqshlu_b
, gen_helper_sve2_sqshlu_h
,
1099 gen_helper_sve2_sqshlu_s
, gen_helper_sve2_sqshlu_d
,
1101 if (a
->esz
< 0 || !dc_isar_feature(aa64_sve2
, s
)) {
1104 return do_zpzi_ool(s
, a
, fns
[a
->esz
]);
1108 *** SVE Bitwise Shift - Predicated Group
1111 #define DO_ZPZW(NAME, name) \
1112 static bool trans_##NAME##_zpzw(DisasContext *s, arg_rprr_esz *a) \
1114 static gen_helper_gvec_4 * const fns[3] = { \
1115 gen_helper_sve_##name##_zpzw_b, gen_helper_sve_##name##_zpzw_h, \
1116 gen_helper_sve_##name##_zpzw_s, \
1118 if (a->esz < 0 || a->esz >= 3) { \
1121 return do_zpzz_ool(s, a, fns[a->esz]); \
1131 *** SVE Bitwise Shift - Unpredicated Group
1134 static bool do_shift_imm(DisasContext
*s
, arg_rri_esz
*a
, bool asr
,
1135 void (*gvec_fn
)(unsigned, uint32_t, uint32_t,
1136 int64_t, uint32_t, uint32_t))
1139 /* Invalid tsz encoding -- see tszimm_esz. */
1142 if (sve_access_check(s
)) {
1143 unsigned vsz
= vec_full_reg_size(s
);
1144 /* Shift by element size is architecturally valid. For
1145 arithmetic right-shift, it's the same as by one less.
1146 Otherwise it is a zeroing operation. */
1147 if (a
->imm
>= 8 << a
->esz
) {
1149 a
->imm
= (8 << a
->esz
) - 1;
1151 do_dupi_z(s
, a
->rd
, 0);
1155 gvec_fn(a
->esz
, vec_full_reg_offset(s
, a
->rd
),
1156 vec_full_reg_offset(s
, a
->rn
), a
->imm
, vsz
, vsz
);
1161 static bool trans_ASR_zzi(DisasContext
*s
, arg_rri_esz
*a
)
1163 return do_shift_imm(s
, a
, true, tcg_gen_gvec_sari
);
1166 static bool trans_LSR_zzi(DisasContext
*s
, arg_rri_esz
*a
)
1168 return do_shift_imm(s
, a
, false, tcg_gen_gvec_shri
);
1171 static bool trans_LSL_zzi(DisasContext
*s
, arg_rri_esz
*a
)
1173 return do_shift_imm(s
, a
, false, tcg_gen_gvec_shli
);
1176 static bool do_zzw_ool(DisasContext
*s
, arg_rrr_esz
*a
, gen_helper_gvec_3
*fn
)
1181 if (sve_access_check(s
)) {
1182 gen_gvec_ool_zzz(s
, fn
, a
->rd
, a
->rn
, a
->rm
, 0);
1187 #define DO_ZZW(NAME, name) \
1188 static bool trans_##NAME##_zzw(DisasContext *s, arg_rrr_esz *a) \
1190 static gen_helper_gvec_3 * const fns[4] = { \
1191 gen_helper_sve_##name##_zzw_b, gen_helper_sve_##name##_zzw_h, \
1192 gen_helper_sve_##name##_zzw_s, NULL \
1194 return do_zzw_ool(s, a, fns[a->esz]); \
1204 *** SVE Integer Multiply-Add Group
1207 static bool do_zpzzz_ool(DisasContext
*s
, arg_rprrr_esz
*a
,
1208 gen_helper_gvec_5
*fn
)
1210 if (sve_access_check(s
)) {
1211 unsigned vsz
= vec_full_reg_size(s
);
1212 tcg_gen_gvec_5_ool(vec_full_reg_offset(s
, a
->rd
),
1213 vec_full_reg_offset(s
, a
->ra
),
1214 vec_full_reg_offset(s
, a
->rn
),
1215 vec_full_reg_offset(s
, a
->rm
),
1216 pred_full_reg_offset(s
, a
->pg
),
1222 #define DO_ZPZZZ(NAME, name) \
1223 static bool trans_##NAME(DisasContext *s, arg_rprrr_esz *a) \
1225 static gen_helper_gvec_5 * const fns[4] = { \
1226 gen_helper_sve_##name##_b, gen_helper_sve_##name##_h, \
1227 gen_helper_sve_##name##_s, gen_helper_sve_##name##_d, \
1229 return do_zpzzz_ool(s, a, fns[a->esz]); \
1238 *** SVE Index Generation Group
1241 static void do_index(DisasContext
*s
, int esz
, int rd
,
1242 TCGv_i64 start
, TCGv_i64 incr
)
1244 unsigned vsz
= vec_full_reg_size(s
);
1245 TCGv_i32 desc
= tcg_const_i32(simd_desc(vsz
, vsz
, 0));
1246 TCGv_ptr t_zd
= tcg_temp_new_ptr();
1248 tcg_gen_addi_ptr(t_zd
, cpu_env
, vec_full_reg_offset(s
, rd
));
1250 gen_helper_sve_index_d(t_zd
, start
, incr
, desc
);
1252 typedef void index_fn(TCGv_ptr
, TCGv_i32
, TCGv_i32
, TCGv_i32
);
1253 static index_fn
* const fns
[3] = {
1254 gen_helper_sve_index_b
,
1255 gen_helper_sve_index_h
,
1256 gen_helper_sve_index_s
,
1258 TCGv_i32 s32
= tcg_temp_new_i32();
1259 TCGv_i32 i32
= tcg_temp_new_i32();
1261 tcg_gen_extrl_i64_i32(s32
, start
);
1262 tcg_gen_extrl_i64_i32(i32
, incr
);
1263 fns
[esz
](t_zd
, s32
, i32
, desc
);
1265 tcg_temp_free_i32(s32
);
1266 tcg_temp_free_i32(i32
);
1268 tcg_temp_free_ptr(t_zd
);
1269 tcg_temp_free_i32(desc
);
1272 static bool trans_INDEX_ii(DisasContext
*s
, arg_INDEX_ii
*a
)
1274 if (sve_access_check(s
)) {
1275 TCGv_i64 start
= tcg_const_i64(a
->imm1
);
1276 TCGv_i64 incr
= tcg_const_i64(a
->imm2
);
1277 do_index(s
, a
->esz
, a
->rd
, start
, incr
);
1278 tcg_temp_free_i64(start
);
1279 tcg_temp_free_i64(incr
);
1284 static bool trans_INDEX_ir(DisasContext
*s
, arg_INDEX_ir
*a
)
1286 if (sve_access_check(s
)) {
1287 TCGv_i64 start
= tcg_const_i64(a
->imm
);
1288 TCGv_i64 incr
= cpu_reg(s
, a
->rm
);
1289 do_index(s
, a
->esz
, a
->rd
, start
, incr
);
1290 tcg_temp_free_i64(start
);
1295 static bool trans_INDEX_ri(DisasContext
*s
, arg_INDEX_ri
*a
)
1297 if (sve_access_check(s
)) {
1298 TCGv_i64 start
= cpu_reg(s
, a
->rn
);
1299 TCGv_i64 incr
= tcg_const_i64(a
->imm
);
1300 do_index(s
, a
->esz
, a
->rd
, start
, incr
);
1301 tcg_temp_free_i64(incr
);
1306 static bool trans_INDEX_rr(DisasContext
*s
, arg_INDEX_rr
*a
)
1308 if (sve_access_check(s
)) {
1309 TCGv_i64 start
= cpu_reg(s
, a
->rn
);
1310 TCGv_i64 incr
= cpu_reg(s
, a
->rm
);
1311 do_index(s
, a
->esz
, a
->rd
, start
, incr
);
1317 *** SVE Stack Allocation Group
1320 static bool trans_ADDVL(DisasContext
*s
, arg_ADDVL
*a
)
1322 if (sve_access_check(s
)) {
1323 TCGv_i64 rd
= cpu_reg_sp(s
, a
->rd
);
1324 TCGv_i64 rn
= cpu_reg_sp(s
, a
->rn
);
1325 tcg_gen_addi_i64(rd
, rn
, a
->imm
* vec_full_reg_size(s
));
1330 static bool trans_ADDPL(DisasContext
*s
, arg_ADDPL
*a
)
1332 if (sve_access_check(s
)) {
1333 TCGv_i64 rd
= cpu_reg_sp(s
, a
->rd
);
1334 TCGv_i64 rn
= cpu_reg_sp(s
, a
->rn
);
1335 tcg_gen_addi_i64(rd
, rn
, a
->imm
* pred_full_reg_size(s
));
1340 static bool trans_RDVL(DisasContext
*s
, arg_RDVL
*a
)
1342 if (sve_access_check(s
)) {
1343 TCGv_i64 reg
= cpu_reg(s
, a
->rd
);
1344 tcg_gen_movi_i64(reg
, a
->imm
* vec_full_reg_size(s
));
1350 *** SVE Compute Vector Address Group
1353 static bool do_adr(DisasContext
*s
, arg_rrri
*a
, gen_helper_gvec_3
*fn
)
1355 if (sve_access_check(s
)) {
1356 gen_gvec_ool_zzz(s
, fn
, a
->rd
, a
->rn
, a
->rm
, a
->imm
);
1361 static bool trans_ADR_p32(DisasContext
*s
, arg_rrri
*a
)
1363 return do_adr(s
, a
, gen_helper_sve_adr_p32
);
1366 static bool trans_ADR_p64(DisasContext
*s
, arg_rrri
*a
)
1368 return do_adr(s
, a
, gen_helper_sve_adr_p64
);
1371 static bool trans_ADR_s32(DisasContext
*s
, arg_rrri
*a
)
1373 return do_adr(s
, a
, gen_helper_sve_adr_s32
);
1376 static bool trans_ADR_u32(DisasContext
*s
, arg_rrri
*a
)
1378 return do_adr(s
, a
, gen_helper_sve_adr_u32
);
1382 *** SVE Integer Misc - Unpredicated Group
1385 static bool trans_FEXPA(DisasContext
*s
, arg_rr_esz
*a
)
1387 static gen_helper_gvec_2
* const fns
[4] = {
1389 gen_helper_sve_fexpa_h
,
1390 gen_helper_sve_fexpa_s
,
1391 gen_helper_sve_fexpa_d
,
1396 if (sve_access_check(s
)) {
1397 gen_gvec_ool_zz(s
, fns
[a
->esz
], a
->rd
, a
->rn
, 0);
1402 static bool trans_FTSSEL(DisasContext
*s
, arg_rrr_esz
*a
)
1404 static gen_helper_gvec_3
* const fns
[4] = {
1406 gen_helper_sve_ftssel_h
,
1407 gen_helper_sve_ftssel_s
,
1408 gen_helper_sve_ftssel_d
,
1413 if (sve_access_check(s
)) {
1414 gen_gvec_ool_zzz(s
, fns
[a
->esz
], a
->rd
, a
->rn
, a
->rm
, 0);
1420 *** SVE Predicate Logical Operations Group
1423 static bool do_pppp_flags(DisasContext
*s
, arg_rprr_s
*a
,
1424 const GVecGen4
*gvec_op
)
1426 if (!sve_access_check(s
)) {
1430 unsigned psz
= pred_gvec_reg_size(s
);
1431 int dofs
= pred_full_reg_offset(s
, a
->rd
);
1432 int nofs
= pred_full_reg_offset(s
, a
->rn
);
1433 int mofs
= pred_full_reg_offset(s
, a
->rm
);
1434 int gofs
= pred_full_reg_offset(s
, a
->pg
);
1437 tcg_gen_gvec_4(dofs
, nofs
, mofs
, gofs
, psz
, psz
, gvec_op
);
1442 /* Do the operation and the flags generation in temps. */
1443 TCGv_i64 pd
= tcg_temp_new_i64();
1444 TCGv_i64 pn
= tcg_temp_new_i64();
1445 TCGv_i64 pm
= tcg_temp_new_i64();
1446 TCGv_i64 pg
= tcg_temp_new_i64();
1448 tcg_gen_ld_i64(pn
, cpu_env
, nofs
);
1449 tcg_gen_ld_i64(pm
, cpu_env
, mofs
);
1450 tcg_gen_ld_i64(pg
, cpu_env
, gofs
);
1452 gvec_op
->fni8(pd
, pn
, pm
, pg
);
1453 tcg_gen_st_i64(pd
, cpu_env
, dofs
);
1455 do_predtest1(pd
, pg
);
1457 tcg_temp_free_i64(pd
);
1458 tcg_temp_free_i64(pn
);
1459 tcg_temp_free_i64(pm
);
1460 tcg_temp_free_i64(pg
);
1462 /* The operation and flags generation is large. The computation
1463 * of the flags depends on the original contents of the guarding
1464 * predicate. If the destination overwrites the guarding predicate,
1465 * then the easiest way to get this right is to save a copy.
1468 if (a
->rd
== a
->pg
) {
1469 tofs
= offsetof(CPUARMState
, vfp
.preg_tmp
);
1470 tcg_gen_gvec_mov(0, tofs
, gofs
, psz
, psz
);
1473 tcg_gen_gvec_4(dofs
, nofs
, mofs
, gofs
, psz
, psz
, gvec_op
);
1474 do_predtest(s
, dofs
, tofs
, psz
/ 8);
1479 static void gen_and_pg_i64(TCGv_i64 pd
, TCGv_i64 pn
, TCGv_i64 pm
, TCGv_i64 pg
)
1481 tcg_gen_and_i64(pd
, pn
, pm
);
1482 tcg_gen_and_i64(pd
, pd
, pg
);
1485 static void gen_and_pg_vec(unsigned vece
, TCGv_vec pd
, TCGv_vec pn
,
1486 TCGv_vec pm
, TCGv_vec pg
)
1488 tcg_gen_and_vec(vece
, pd
, pn
, pm
);
1489 tcg_gen_and_vec(vece
, pd
, pd
, pg
);
1492 static bool trans_AND_pppp(DisasContext
*s
, arg_rprr_s
*a
)
1494 static const GVecGen4 op
= {
1495 .fni8
= gen_and_pg_i64
,
1496 .fniv
= gen_and_pg_vec
,
1497 .fno
= gen_helper_sve_and_pppp
,
1498 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
1502 if (!sve_access_check(s
)) {
1505 if (a
->rn
== a
->rm
) {
1506 if (a
->pg
== a
->rn
) {
1507 do_mov_p(s
, a
->rd
, a
->rn
);
1509 gen_gvec_fn_ppp(s
, tcg_gen_gvec_and
, a
->rd
, a
->rn
, a
->pg
);
1512 } else if (a
->pg
== a
->rn
|| a
->pg
== a
->rm
) {
1513 gen_gvec_fn_ppp(s
, tcg_gen_gvec_and
, a
->rd
, a
->rn
, a
->rm
);
1517 return do_pppp_flags(s
, a
, &op
);
1520 static void gen_bic_pg_i64(TCGv_i64 pd
, TCGv_i64 pn
, TCGv_i64 pm
, TCGv_i64 pg
)
1522 tcg_gen_andc_i64(pd
, pn
, pm
);
1523 tcg_gen_and_i64(pd
, pd
, pg
);
1526 static void gen_bic_pg_vec(unsigned vece
, TCGv_vec pd
, TCGv_vec pn
,
1527 TCGv_vec pm
, TCGv_vec pg
)
1529 tcg_gen_andc_vec(vece
, pd
, pn
, pm
);
1530 tcg_gen_and_vec(vece
, pd
, pd
, pg
);
1533 static bool trans_BIC_pppp(DisasContext
*s
, arg_rprr_s
*a
)
1535 static const GVecGen4 op
= {
1536 .fni8
= gen_bic_pg_i64
,
1537 .fniv
= gen_bic_pg_vec
,
1538 .fno
= gen_helper_sve_bic_pppp
,
1539 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
1542 if (!a
->s
&& a
->pg
== a
->rn
) {
1543 if (sve_access_check(s
)) {
1544 gen_gvec_fn_ppp(s
, tcg_gen_gvec_andc
, a
->rd
, a
->rn
, a
->rm
);
1548 return do_pppp_flags(s
, a
, &op
);
1551 static void gen_eor_pg_i64(TCGv_i64 pd
, TCGv_i64 pn
, TCGv_i64 pm
, TCGv_i64 pg
)
1553 tcg_gen_xor_i64(pd
, pn
, pm
);
1554 tcg_gen_and_i64(pd
, pd
, pg
);
1557 static void gen_eor_pg_vec(unsigned vece
, TCGv_vec pd
, TCGv_vec pn
,
1558 TCGv_vec pm
, TCGv_vec pg
)
1560 tcg_gen_xor_vec(vece
, pd
, pn
, pm
);
1561 tcg_gen_and_vec(vece
, pd
, pd
, pg
);
1564 static bool trans_EOR_pppp(DisasContext
*s
, arg_rprr_s
*a
)
1566 static const GVecGen4 op
= {
1567 .fni8
= gen_eor_pg_i64
,
1568 .fniv
= gen_eor_pg_vec
,
1569 .fno
= gen_helper_sve_eor_pppp
,
1570 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
1572 return do_pppp_flags(s
, a
, &op
);
1575 static bool trans_SEL_pppp(DisasContext
*s
, arg_rprr_s
*a
)
1580 if (sve_access_check(s
)) {
1581 unsigned psz
= pred_gvec_reg_size(s
);
1582 tcg_gen_gvec_bitsel(MO_8
, pred_full_reg_offset(s
, a
->rd
),
1583 pred_full_reg_offset(s
, a
->pg
),
1584 pred_full_reg_offset(s
, a
->rn
),
1585 pred_full_reg_offset(s
, a
->rm
), psz
, psz
);
1590 static void gen_orr_pg_i64(TCGv_i64 pd
, TCGv_i64 pn
, TCGv_i64 pm
, TCGv_i64 pg
)
1592 tcg_gen_or_i64(pd
, pn
, pm
);
1593 tcg_gen_and_i64(pd
, pd
, pg
);
1596 static void gen_orr_pg_vec(unsigned vece
, TCGv_vec pd
, TCGv_vec pn
,
1597 TCGv_vec pm
, TCGv_vec pg
)
1599 tcg_gen_or_vec(vece
, pd
, pn
, pm
);
1600 tcg_gen_and_vec(vece
, pd
, pd
, pg
);
1603 static bool trans_ORR_pppp(DisasContext
*s
, arg_rprr_s
*a
)
1605 static const GVecGen4 op
= {
1606 .fni8
= gen_orr_pg_i64
,
1607 .fniv
= gen_orr_pg_vec
,
1608 .fno
= gen_helper_sve_orr_pppp
,
1609 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
1612 if (!a
->s
&& a
->pg
== a
->rn
&& a
->rn
== a
->rm
) {
1613 return do_mov_p(s
, a
->rd
, a
->rn
);
1615 return do_pppp_flags(s
, a
, &op
);
1618 static void gen_orn_pg_i64(TCGv_i64 pd
, TCGv_i64 pn
, TCGv_i64 pm
, TCGv_i64 pg
)
1620 tcg_gen_orc_i64(pd
, pn
, pm
);
1621 tcg_gen_and_i64(pd
, pd
, pg
);
1624 static void gen_orn_pg_vec(unsigned vece
, TCGv_vec pd
, TCGv_vec pn
,
1625 TCGv_vec pm
, TCGv_vec pg
)
1627 tcg_gen_orc_vec(vece
, pd
, pn
, pm
);
1628 tcg_gen_and_vec(vece
, pd
, pd
, pg
);
1631 static bool trans_ORN_pppp(DisasContext
*s
, arg_rprr_s
*a
)
1633 static const GVecGen4 op
= {
1634 .fni8
= gen_orn_pg_i64
,
1635 .fniv
= gen_orn_pg_vec
,
1636 .fno
= gen_helper_sve_orn_pppp
,
1637 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
1639 return do_pppp_flags(s
, a
, &op
);
1642 static void gen_nor_pg_i64(TCGv_i64 pd
, TCGv_i64 pn
, TCGv_i64 pm
, TCGv_i64 pg
)
1644 tcg_gen_or_i64(pd
, pn
, pm
);
1645 tcg_gen_andc_i64(pd
, pg
, pd
);
1648 static void gen_nor_pg_vec(unsigned vece
, TCGv_vec pd
, TCGv_vec pn
,
1649 TCGv_vec pm
, TCGv_vec pg
)
1651 tcg_gen_or_vec(vece
, pd
, pn
, pm
);
1652 tcg_gen_andc_vec(vece
, pd
, pg
, pd
);
1655 static bool trans_NOR_pppp(DisasContext
*s
, arg_rprr_s
*a
)
1657 static const GVecGen4 op
= {
1658 .fni8
= gen_nor_pg_i64
,
1659 .fniv
= gen_nor_pg_vec
,
1660 .fno
= gen_helper_sve_nor_pppp
,
1661 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
1663 return do_pppp_flags(s
, a
, &op
);
1666 static void gen_nand_pg_i64(TCGv_i64 pd
, TCGv_i64 pn
, TCGv_i64 pm
, TCGv_i64 pg
)
1668 tcg_gen_and_i64(pd
, pn
, pm
);
1669 tcg_gen_andc_i64(pd
, pg
, pd
);
1672 static void gen_nand_pg_vec(unsigned vece
, TCGv_vec pd
, TCGv_vec pn
,
1673 TCGv_vec pm
, TCGv_vec pg
)
1675 tcg_gen_and_vec(vece
, pd
, pn
, pm
);
1676 tcg_gen_andc_vec(vece
, pd
, pg
, pd
);
1679 static bool trans_NAND_pppp(DisasContext
*s
, arg_rprr_s
*a
)
1681 static const GVecGen4 op
= {
1682 .fni8
= gen_nand_pg_i64
,
1683 .fniv
= gen_nand_pg_vec
,
1684 .fno
= gen_helper_sve_nand_pppp
,
1685 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
1687 return do_pppp_flags(s
, a
, &op
);
1691 *** SVE Predicate Misc Group
1694 static bool trans_PTEST(DisasContext
*s
, arg_PTEST
*a
)
1696 if (sve_access_check(s
)) {
1697 int nofs
= pred_full_reg_offset(s
, a
->rn
);
1698 int gofs
= pred_full_reg_offset(s
, a
->pg
);
1699 int words
= DIV_ROUND_UP(pred_full_reg_size(s
), 8);
1702 TCGv_i64 pn
= tcg_temp_new_i64();
1703 TCGv_i64 pg
= tcg_temp_new_i64();
1705 tcg_gen_ld_i64(pn
, cpu_env
, nofs
);
1706 tcg_gen_ld_i64(pg
, cpu_env
, gofs
);
1707 do_predtest1(pn
, pg
);
1709 tcg_temp_free_i64(pn
);
1710 tcg_temp_free_i64(pg
);
1712 do_predtest(s
, nofs
, gofs
, words
);
1718 /* See the ARM pseudocode DecodePredCount. */
1719 static unsigned decode_pred_count(unsigned fullsz
, int pattern
, int esz
)
1721 unsigned elements
= fullsz
>> esz
;
1725 case 0x0: /* POW2 */
1726 return pow2floor(elements
);
1737 case 0x9: /* VL16 */
1738 case 0xa: /* VL32 */
1739 case 0xb: /* VL64 */
1740 case 0xc: /* VL128 */
1741 case 0xd: /* VL256 */
1742 bound
= 16 << (pattern
- 9);
1744 case 0x1d: /* MUL4 */
1745 return elements
- elements
% 4;
1746 case 0x1e: /* MUL3 */
1747 return elements
- elements
% 3;
1748 case 0x1f: /* ALL */
1750 default: /* #uimm5 */
1753 return elements
>= bound
? bound
: 0;
1756 /* This handles all of the predicate initialization instructions,
1757 * PTRUE, PFALSE, SETFFR. For PFALSE, we will have set PAT == 32
1758 * so that decode_pred_count returns 0. For SETFFR, we will have
1759 * set RD == 16 == FFR.
1761 static bool do_predset(DisasContext
*s
, int esz
, int rd
, int pat
, bool setflag
)
1763 if (!sve_access_check(s
)) {
1767 unsigned fullsz
= vec_full_reg_size(s
);
1768 unsigned ofs
= pred_full_reg_offset(s
, rd
);
1769 unsigned numelem
, setsz
, i
;
1770 uint64_t word
, lastword
;
1773 numelem
= decode_pred_count(fullsz
, pat
, esz
);
1775 /* Determine what we must store into each bit, and how many. */
1777 lastword
= word
= 0;
1780 setsz
= numelem
<< esz
;
1781 lastword
= word
= pred_esz_masks
[esz
];
1783 lastword
&= MAKE_64BIT_MASK(0, setsz
% 64);
1787 t
= tcg_temp_new_i64();
1789 tcg_gen_movi_i64(t
, lastword
);
1790 tcg_gen_st_i64(t
, cpu_env
, ofs
);
1794 if (word
== lastword
) {
1795 unsigned maxsz
= size_for_gvec(fullsz
/ 8);
1796 unsigned oprsz
= size_for_gvec(setsz
/ 8);
1798 if (oprsz
* 8 == setsz
) {
1799 tcg_gen_gvec_dup_imm(MO_64
, ofs
, oprsz
, maxsz
, word
);
1807 tcg_gen_movi_i64(t
, word
);
1808 for (i
= 0; i
< QEMU_ALIGN_DOWN(setsz
, 8); i
+= 8) {
1809 tcg_gen_st_i64(t
, cpu_env
, ofs
+ i
);
1811 if (lastword
!= word
) {
1812 tcg_gen_movi_i64(t
, lastword
);
1813 tcg_gen_st_i64(t
, cpu_env
, ofs
+ i
);
1817 tcg_gen_movi_i64(t
, 0);
1818 for (; i
< fullsz
; i
+= 8) {
1819 tcg_gen_st_i64(t
, cpu_env
, ofs
+ i
);
1824 tcg_temp_free_i64(t
);
1828 tcg_gen_movi_i32(cpu_NF
, -(word
!= 0));
1829 tcg_gen_movi_i32(cpu_CF
, word
== 0);
1830 tcg_gen_movi_i32(cpu_VF
, 0);
1831 tcg_gen_mov_i32(cpu_ZF
, cpu_NF
);
1836 static bool trans_PTRUE(DisasContext
*s
, arg_PTRUE
*a
)
1838 return do_predset(s
, a
->esz
, a
->rd
, a
->pat
, a
->s
);
1841 static bool trans_SETFFR(DisasContext
*s
, arg_SETFFR
*a
)
1843 /* Note pat == 31 is #all, to set all elements. */
1844 return do_predset(s
, 0, FFR_PRED_NUM
, 31, false);
1847 static bool trans_PFALSE(DisasContext
*s
, arg_PFALSE
*a
)
1849 /* Note pat == 32 is #unimp, to set no elements. */
1850 return do_predset(s
, 0, a
->rd
, 32, false);
1853 static bool trans_RDFFR_p(DisasContext
*s
, arg_RDFFR_p
*a
)
1855 /* The path through do_pppp_flags is complicated enough to want to avoid
1856 * duplication. Frob the arguments into the form of a predicated AND.
1858 arg_rprr_s alt_a
= {
1859 .rd
= a
->rd
, .pg
= a
->pg
, .s
= a
->s
,
1860 .rn
= FFR_PRED_NUM
, .rm
= FFR_PRED_NUM
,
1862 return trans_AND_pppp(s
, &alt_a
);
1865 static bool trans_RDFFR(DisasContext
*s
, arg_RDFFR
*a
)
1867 return do_mov_p(s
, a
->rd
, FFR_PRED_NUM
);
1870 static bool trans_WRFFR(DisasContext
*s
, arg_WRFFR
*a
)
1872 return do_mov_p(s
, FFR_PRED_NUM
, a
->rn
);
1875 static bool do_pfirst_pnext(DisasContext
*s
, arg_rr_esz
*a
,
1876 void (*gen_fn
)(TCGv_i32
, TCGv_ptr
,
1877 TCGv_ptr
, TCGv_i32
))
1879 if (!sve_access_check(s
)) {
1883 TCGv_ptr t_pd
= tcg_temp_new_ptr();
1884 TCGv_ptr t_pg
= tcg_temp_new_ptr();
1888 desc
= FIELD_DP32(desc
, PREDDESC
, OPRSZ
, pred_full_reg_size(s
));
1889 desc
= FIELD_DP32(desc
, PREDDESC
, ESZ
, a
->esz
);
1891 tcg_gen_addi_ptr(t_pd
, cpu_env
, pred_full_reg_offset(s
, a
->rd
));
1892 tcg_gen_addi_ptr(t_pg
, cpu_env
, pred_full_reg_offset(s
, a
->rn
));
1893 t
= tcg_const_i32(desc
);
1895 gen_fn(t
, t_pd
, t_pg
, t
);
1896 tcg_temp_free_ptr(t_pd
);
1897 tcg_temp_free_ptr(t_pg
);
1900 tcg_temp_free_i32(t
);
1904 static bool trans_PFIRST(DisasContext
*s
, arg_rr_esz
*a
)
1906 return do_pfirst_pnext(s
, a
, gen_helper_sve_pfirst
);
1909 static bool trans_PNEXT(DisasContext
*s
, arg_rr_esz
*a
)
1911 return do_pfirst_pnext(s
, a
, gen_helper_sve_pnext
);
1915 *** SVE Element Count Group
1918 /* Perform an inline saturating addition of a 32-bit value within
1919 * a 64-bit register. The second operand is known to be positive,
1920 * which halves the comparisions we must perform to bound the result.
1922 static void do_sat_addsub_32(TCGv_i64 reg
, TCGv_i64 val
, bool u
, bool d
)
1928 /* Use normal 64-bit arithmetic to detect 32-bit overflow. */
1930 tcg_gen_ext32u_i64(reg
, reg
);
1932 tcg_gen_ext32s_i64(reg
, reg
);
1935 tcg_gen_sub_i64(reg
, reg
, val
);
1936 ibound
= (u
? 0 : INT32_MIN
);
1939 tcg_gen_add_i64(reg
, reg
, val
);
1940 ibound
= (u
? UINT32_MAX
: INT32_MAX
);
1943 bound
= tcg_const_i64(ibound
);
1944 tcg_gen_movcond_i64(cond
, reg
, reg
, bound
, bound
, reg
);
1945 tcg_temp_free_i64(bound
);
1948 /* Similarly with 64-bit values. */
1949 static void do_sat_addsub_64(TCGv_i64 reg
, TCGv_i64 val
, bool u
, bool d
)
1951 TCGv_i64 t0
= tcg_temp_new_i64();
1952 TCGv_i64 t1
= tcg_temp_new_i64();
1957 tcg_gen_sub_i64(t0
, reg
, val
);
1958 tcg_gen_movi_i64(t1
, 0);
1959 tcg_gen_movcond_i64(TCG_COND_LTU
, reg
, reg
, val
, t1
, t0
);
1961 tcg_gen_add_i64(t0
, reg
, val
);
1962 tcg_gen_movi_i64(t1
, -1);
1963 tcg_gen_movcond_i64(TCG_COND_LTU
, reg
, t0
, reg
, t1
, t0
);
1967 /* Detect signed overflow for subtraction. */
1968 tcg_gen_xor_i64(t0
, reg
, val
);
1969 tcg_gen_sub_i64(t1
, reg
, val
);
1970 tcg_gen_xor_i64(reg
, reg
, t1
);
1971 tcg_gen_and_i64(t0
, t0
, reg
);
1973 /* Bound the result. */
1974 tcg_gen_movi_i64(reg
, INT64_MIN
);
1975 t2
= tcg_const_i64(0);
1976 tcg_gen_movcond_i64(TCG_COND_LT
, reg
, t0
, t2
, reg
, t1
);
1978 /* Detect signed overflow for addition. */
1979 tcg_gen_xor_i64(t0
, reg
, val
);
1980 tcg_gen_add_i64(reg
, reg
, val
);
1981 tcg_gen_xor_i64(t1
, reg
, val
);
1982 tcg_gen_andc_i64(t0
, t1
, t0
);
1984 /* Bound the result. */
1985 tcg_gen_movi_i64(t1
, INT64_MAX
);
1986 t2
= tcg_const_i64(0);
1987 tcg_gen_movcond_i64(TCG_COND_LT
, reg
, t0
, t2
, t1
, reg
);
1989 tcg_temp_free_i64(t2
);
1991 tcg_temp_free_i64(t0
);
1992 tcg_temp_free_i64(t1
);
1995 /* Similarly with a vector and a scalar operand. */
1996 static void do_sat_addsub_vec(DisasContext
*s
, int esz
, int rd
, int rn
,
1997 TCGv_i64 val
, bool u
, bool d
)
1999 unsigned vsz
= vec_full_reg_size(s
);
2000 TCGv_ptr dptr
, nptr
;
2004 dptr
= tcg_temp_new_ptr();
2005 nptr
= tcg_temp_new_ptr();
2006 tcg_gen_addi_ptr(dptr
, cpu_env
, vec_full_reg_offset(s
, rd
));
2007 tcg_gen_addi_ptr(nptr
, cpu_env
, vec_full_reg_offset(s
, rn
));
2008 desc
= tcg_const_i32(simd_desc(vsz
, vsz
, 0));
2012 t32
= tcg_temp_new_i32();
2013 tcg_gen_extrl_i64_i32(t32
, val
);
2015 tcg_gen_neg_i32(t32
, t32
);
2018 gen_helper_sve_uqaddi_b(dptr
, nptr
, t32
, desc
);
2020 gen_helper_sve_sqaddi_b(dptr
, nptr
, t32
, desc
);
2022 tcg_temp_free_i32(t32
);
2026 t32
= tcg_temp_new_i32();
2027 tcg_gen_extrl_i64_i32(t32
, val
);
2029 tcg_gen_neg_i32(t32
, t32
);
2032 gen_helper_sve_uqaddi_h(dptr
, nptr
, t32
, desc
);
2034 gen_helper_sve_sqaddi_h(dptr
, nptr
, t32
, desc
);
2036 tcg_temp_free_i32(t32
);
2040 t64
= tcg_temp_new_i64();
2042 tcg_gen_neg_i64(t64
, val
);
2044 tcg_gen_mov_i64(t64
, val
);
2047 gen_helper_sve_uqaddi_s(dptr
, nptr
, t64
, desc
);
2049 gen_helper_sve_sqaddi_s(dptr
, nptr
, t64
, desc
);
2051 tcg_temp_free_i64(t64
);
2057 gen_helper_sve_uqsubi_d(dptr
, nptr
, val
, desc
);
2059 gen_helper_sve_uqaddi_d(dptr
, nptr
, val
, desc
);
2062 t64
= tcg_temp_new_i64();
2063 tcg_gen_neg_i64(t64
, val
);
2064 gen_helper_sve_sqaddi_d(dptr
, nptr
, t64
, desc
);
2065 tcg_temp_free_i64(t64
);
2067 gen_helper_sve_sqaddi_d(dptr
, nptr
, val
, desc
);
2072 g_assert_not_reached();
2075 tcg_temp_free_ptr(dptr
);
2076 tcg_temp_free_ptr(nptr
);
2077 tcg_temp_free_i32(desc
);
2080 static bool trans_CNT_r(DisasContext
*s
, arg_CNT_r
*a
)
2082 if (sve_access_check(s
)) {
2083 unsigned fullsz
= vec_full_reg_size(s
);
2084 unsigned numelem
= decode_pred_count(fullsz
, a
->pat
, a
->esz
);
2085 tcg_gen_movi_i64(cpu_reg(s
, a
->rd
), numelem
* a
->imm
);
2090 static bool trans_INCDEC_r(DisasContext
*s
, arg_incdec_cnt
*a
)
2092 if (sve_access_check(s
)) {
2093 unsigned fullsz
= vec_full_reg_size(s
);
2094 unsigned numelem
= decode_pred_count(fullsz
, a
->pat
, a
->esz
);
2095 int inc
= numelem
* a
->imm
* (a
->d
? -1 : 1);
2096 TCGv_i64 reg
= cpu_reg(s
, a
->rd
);
2098 tcg_gen_addi_i64(reg
, reg
, inc
);
2103 static bool trans_SINCDEC_r_32(DisasContext
*s
, arg_incdec_cnt
*a
)
2105 if (!sve_access_check(s
)) {
2109 unsigned fullsz
= vec_full_reg_size(s
);
2110 unsigned numelem
= decode_pred_count(fullsz
, a
->pat
, a
->esz
);
2111 int inc
= numelem
* a
->imm
;
2112 TCGv_i64 reg
= cpu_reg(s
, a
->rd
);
2114 /* Use normal 64-bit arithmetic to detect 32-bit overflow. */
2117 tcg_gen_ext32u_i64(reg
, reg
);
2119 tcg_gen_ext32s_i64(reg
, reg
);
2122 TCGv_i64 t
= tcg_const_i64(inc
);
2123 do_sat_addsub_32(reg
, t
, a
->u
, a
->d
);
2124 tcg_temp_free_i64(t
);
2129 static bool trans_SINCDEC_r_64(DisasContext
*s
, arg_incdec_cnt
*a
)
2131 if (!sve_access_check(s
)) {
2135 unsigned fullsz
= vec_full_reg_size(s
);
2136 unsigned numelem
= decode_pred_count(fullsz
, a
->pat
, a
->esz
);
2137 int inc
= numelem
* a
->imm
;
2138 TCGv_i64 reg
= cpu_reg(s
, a
->rd
);
2141 TCGv_i64 t
= tcg_const_i64(inc
);
2142 do_sat_addsub_64(reg
, t
, a
->u
, a
->d
);
2143 tcg_temp_free_i64(t
);
2148 static bool trans_INCDEC_v(DisasContext
*s
, arg_incdec2_cnt
*a
)
2154 unsigned fullsz
= vec_full_reg_size(s
);
2155 unsigned numelem
= decode_pred_count(fullsz
, a
->pat
, a
->esz
);
2156 int inc
= numelem
* a
->imm
;
2159 if (sve_access_check(s
)) {
2160 TCGv_i64 t
= tcg_const_i64(a
->d
? -inc
: inc
);
2161 tcg_gen_gvec_adds(a
->esz
, vec_full_reg_offset(s
, a
->rd
),
2162 vec_full_reg_offset(s
, a
->rn
),
2164 tcg_temp_free_i64(t
);
2167 do_mov_z(s
, a
->rd
, a
->rn
);
2172 static bool trans_SINCDEC_v(DisasContext
*s
, arg_incdec2_cnt
*a
)
2178 unsigned fullsz
= vec_full_reg_size(s
);
2179 unsigned numelem
= decode_pred_count(fullsz
, a
->pat
, a
->esz
);
2180 int inc
= numelem
* a
->imm
;
2183 if (sve_access_check(s
)) {
2184 TCGv_i64 t
= tcg_const_i64(inc
);
2185 do_sat_addsub_vec(s
, a
->esz
, a
->rd
, a
->rn
, t
, a
->u
, a
->d
);
2186 tcg_temp_free_i64(t
);
2189 do_mov_z(s
, a
->rd
, a
->rn
);
2195 *** SVE Bitwise Immediate Group
2198 static bool do_zz_dbm(DisasContext
*s
, arg_rr_dbm
*a
, GVecGen2iFn
*gvec_fn
)
2201 if (!logic_imm_decode_wmask(&imm
, extract32(a
->dbm
, 12, 1),
2202 extract32(a
->dbm
, 0, 6),
2203 extract32(a
->dbm
, 6, 6))) {
2206 if (sve_access_check(s
)) {
2207 unsigned vsz
= vec_full_reg_size(s
);
2208 gvec_fn(MO_64
, vec_full_reg_offset(s
, a
->rd
),
2209 vec_full_reg_offset(s
, a
->rn
), imm
, vsz
, vsz
);
2214 static bool trans_AND_zzi(DisasContext
*s
, arg_rr_dbm
*a
)
2216 return do_zz_dbm(s
, a
, tcg_gen_gvec_andi
);
2219 static bool trans_ORR_zzi(DisasContext
*s
, arg_rr_dbm
*a
)
2221 return do_zz_dbm(s
, a
, tcg_gen_gvec_ori
);
2224 static bool trans_EOR_zzi(DisasContext
*s
, arg_rr_dbm
*a
)
2226 return do_zz_dbm(s
, a
, tcg_gen_gvec_xori
);
2229 static bool trans_DUPM(DisasContext
*s
, arg_DUPM
*a
)
2232 if (!logic_imm_decode_wmask(&imm
, extract32(a
->dbm
, 12, 1),
2233 extract32(a
->dbm
, 0, 6),
2234 extract32(a
->dbm
, 6, 6))) {
2237 if (sve_access_check(s
)) {
2238 do_dupi_z(s
, a
->rd
, imm
);
2244 *** SVE Integer Wide Immediate - Predicated Group
2247 /* Implement all merging copies. This is used for CPY (immediate),
2248 * FCPY, CPY (scalar), CPY (SIMD&FP scalar).
2250 static void do_cpy_m(DisasContext
*s
, int esz
, int rd
, int rn
, int pg
,
2253 typedef void gen_cpy(TCGv_ptr
, TCGv_ptr
, TCGv_ptr
, TCGv_i64
, TCGv_i32
);
2254 static gen_cpy
* const fns
[4] = {
2255 gen_helper_sve_cpy_m_b
, gen_helper_sve_cpy_m_h
,
2256 gen_helper_sve_cpy_m_s
, gen_helper_sve_cpy_m_d
,
2258 unsigned vsz
= vec_full_reg_size(s
);
2259 TCGv_i32 desc
= tcg_const_i32(simd_desc(vsz
, vsz
, 0));
2260 TCGv_ptr t_zd
= tcg_temp_new_ptr();
2261 TCGv_ptr t_zn
= tcg_temp_new_ptr();
2262 TCGv_ptr t_pg
= tcg_temp_new_ptr();
2264 tcg_gen_addi_ptr(t_zd
, cpu_env
, vec_full_reg_offset(s
, rd
));
2265 tcg_gen_addi_ptr(t_zn
, cpu_env
, vec_full_reg_offset(s
, rn
));
2266 tcg_gen_addi_ptr(t_pg
, cpu_env
, pred_full_reg_offset(s
, pg
));
2268 fns
[esz
](t_zd
, t_zn
, t_pg
, val
, desc
);
2270 tcg_temp_free_ptr(t_zd
);
2271 tcg_temp_free_ptr(t_zn
);
2272 tcg_temp_free_ptr(t_pg
);
2273 tcg_temp_free_i32(desc
);
2276 static bool trans_FCPY(DisasContext
*s
, arg_FCPY
*a
)
2281 if (sve_access_check(s
)) {
2282 /* Decode the VFP immediate. */
2283 uint64_t imm
= vfp_expand_imm(a
->esz
, a
->imm
);
2284 TCGv_i64 t_imm
= tcg_const_i64(imm
);
2285 do_cpy_m(s
, a
->esz
, a
->rd
, a
->rn
, a
->pg
, t_imm
);
2286 tcg_temp_free_i64(t_imm
);
2291 static bool trans_CPY_m_i(DisasContext
*s
, arg_rpri_esz
*a
)
2293 if (a
->esz
== 0 && extract32(s
->insn
, 13, 1)) {
2296 if (sve_access_check(s
)) {
2297 TCGv_i64 t_imm
= tcg_const_i64(a
->imm
);
2298 do_cpy_m(s
, a
->esz
, a
->rd
, a
->rn
, a
->pg
, t_imm
);
2299 tcg_temp_free_i64(t_imm
);
2304 static bool trans_CPY_z_i(DisasContext
*s
, arg_CPY_z_i
*a
)
2306 static gen_helper_gvec_2i
* const fns
[4] = {
2307 gen_helper_sve_cpy_z_b
, gen_helper_sve_cpy_z_h
,
2308 gen_helper_sve_cpy_z_s
, gen_helper_sve_cpy_z_d
,
2311 if (a
->esz
== 0 && extract32(s
->insn
, 13, 1)) {
2314 if (sve_access_check(s
)) {
2315 unsigned vsz
= vec_full_reg_size(s
);
2316 TCGv_i64 t_imm
= tcg_const_i64(a
->imm
);
2317 tcg_gen_gvec_2i_ool(vec_full_reg_offset(s
, a
->rd
),
2318 pred_full_reg_offset(s
, a
->pg
),
2319 t_imm
, vsz
, vsz
, 0, fns
[a
->esz
]);
2320 tcg_temp_free_i64(t_imm
);
2326 *** SVE Permute Extract Group
2329 static bool do_EXT(DisasContext
*s
, int rd
, int rn
, int rm
, int imm
)
2331 if (!sve_access_check(s
)) {
2335 unsigned vsz
= vec_full_reg_size(s
);
2336 unsigned n_ofs
= imm
>= vsz
? 0 : imm
;
2337 unsigned n_siz
= vsz
- n_ofs
;
2338 unsigned d
= vec_full_reg_offset(s
, rd
);
2339 unsigned n
= vec_full_reg_offset(s
, rn
);
2340 unsigned m
= vec_full_reg_offset(s
, rm
);
2342 /* Use host vector move insns if we have appropriate sizes
2343 * and no unfortunate overlap.
2346 && n_ofs
== size_for_gvec(n_ofs
)
2347 && n_siz
== size_for_gvec(n_siz
)
2348 && (d
!= n
|| n_siz
<= n_ofs
)) {
2349 tcg_gen_gvec_mov(0, d
, n
+ n_ofs
, n_siz
, n_siz
);
2351 tcg_gen_gvec_mov(0, d
+ n_siz
, m
, n_ofs
, n_ofs
);
2354 tcg_gen_gvec_3_ool(d
, n
, m
, vsz
, vsz
, n_ofs
, gen_helper_sve_ext
);
2359 static bool trans_EXT(DisasContext
*s
, arg_EXT
*a
)
2361 return do_EXT(s
, a
->rd
, a
->rn
, a
->rm
, a
->imm
);
2364 static bool trans_EXT_sve2(DisasContext
*s
, arg_rri
*a
)
2366 if (!dc_isar_feature(aa64_sve2
, s
)) {
2369 return do_EXT(s
, a
->rd
, a
->rn
, (a
->rn
+ 1) % 32, a
->imm
);
2373 *** SVE Permute - Unpredicated Group
2376 static bool trans_DUP_s(DisasContext
*s
, arg_DUP_s
*a
)
2378 if (sve_access_check(s
)) {
2379 unsigned vsz
= vec_full_reg_size(s
);
2380 tcg_gen_gvec_dup_i64(a
->esz
, vec_full_reg_offset(s
, a
->rd
),
2381 vsz
, vsz
, cpu_reg_sp(s
, a
->rn
));
2386 static bool trans_DUP_x(DisasContext
*s
, arg_DUP_x
*a
)
2388 if ((a
->imm
& 0x1f) == 0) {
2391 if (sve_access_check(s
)) {
2392 unsigned vsz
= vec_full_reg_size(s
);
2393 unsigned dofs
= vec_full_reg_offset(s
, a
->rd
);
2394 unsigned esz
, index
;
2396 esz
= ctz32(a
->imm
);
2397 index
= a
->imm
>> (esz
+ 1);
2399 if ((index
<< esz
) < vsz
) {
2400 unsigned nofs
= vec_reg_offset(s
, a
->rn
, index
, esz
);
2401 tcg_gen_gvec_dup_mem(esz
, dofs
, nofs
, vsz
, vsz
);
2404 * While dup_mem handles 128-bit elements, dup_imm does not.
2405 * Thankfully element size doesn't matter for splatting zero.
2407 tcg_gen_gvec_dup_imm(MO_64
, dofs
, vsz
, vsz
, 0);
2413 static void do_insr_i64(DisasContext
*s
, arg_rrr_esz
*a
, TCGv_i64 val
)
2415 typedef void gen_insr(TCGv_ptr
, TCGv_ptr
, TCGv_i64
, TCGv_i32
);
2416 static gen_insr
* const fns
[4] = {
2417 gen_helper_sve_insr_b
, gen_helper_sve_insr_h
,
2418 gen_helper_sve_insr_s
, gen_helper_sve_insr_d
,
2420 unsigned vsz
= vec_full_reg_size(s
);
2421 TCGv_i32 desc
= tcg_const_i32(simd_desc(vsz
, vsz
, 0));
2422 TCGv_ptr t_zd
= tcg_temp_new_ptr();
2423 TCGv_ptr t_zn
= tcg_temp_new_ptr();
2425 tcg_gen_addi_ptr(t_zd
, cpu_env
, vec_full_reg_offset(s
, a
->rd
));
2426 tcg_gen_addi_ptr(t_zn
, cpu_env
, vec_full_reg_offset(s
, a
->rn
));
2428 fns
[a
->esz
](t_zd
, t_zn
, val
, desc
);
2430 tcg_temp_free_ptr(t_zd
);
2431 tcg_temp_free_ptr(t_zn
);
2432 tcg_temp_free_i32(desc
);
2435 static bool trans_INSR_f(DisasContext
*s
, arg_rrr_esz
*a
)
2437 if (sve_access_check(s
)) {
2438 TCGv_i64 t
= tcg_temp_new_i64();
2439 tcg_gen_ld_i64(t
, cpu_env
, vec_reg_offset(s
, a
->rm
, 0, MO_64
));
2440 do_insr_i64(s
, a
, t
);
2441 tcg_temp_free_i64(t
);
2446 static bool trans_INSR_r(DisasContext
*s
, arg_rrr_esz
*a
)
2448 if (sve_access_check(s
)) {
2449 do_insr_i64(s
, a
, cpu_reg(s
, a
->rm
));
2454 static bool trans_REV_v(DisasContext
*s
, arg_rr_esz
*a
)
2456 static gen_helper_gvec_2
* const fns
[4] = {
2457 gen_helper_sve_rev_b
, gen_helper_sve_rev_h
,
2458 gen_helper_sve_rev_s
, gen_helper_sve_rev_d
2461 if (sve_access_check(s
)) {
2462 gen_gvec_ool_zz(s
, fns
[a
->esz
], a
->rd
, a
->rn
, 0);
2467 static bool trans_TBL(DisasContext
*s
, arg_rrr_esz
*a
)
2469 static gen_helper_gvec_3
* const fns
[4] = {
2470 gen_helper_sve_tbl_b
, gen_helper_sve_tbl_h
,
2471 gen_helper_sve_tbl_s
, gen_helper_sve_tbl_d
2474 if (sve_access_check(s
)) {
2475 gen_gvec_ool_zzz(s
, fns
[a
->esz
], a
->rd
, a
->rn
, a
->rm
, 0);
2480 static bool trans_TBL_sve2(DisasContext
*s
, arg_rrr_esz
*a
)
2482 static gen_helper_gvec_4
* const fns
[4] = {
2483 gen_helper_sve2_tbl_b
, gen_helper_sve2_tbl_h
,
2484 gen_helper_sve2_tbl_s
, gen_helper_sve2_tbl_d
2487 if (!dc_isar_feature(aa64_sve2
, s
)) {
2490 if (sve_access_check(s
)) {
2491 gen_gvec_ool_zzzz(s
, fns
[a
->esz
], a
->rd
, a
->rn
,
2492 (a
->rn
+ 1) % 32, a
->rm
, 0);
2497 static bool trans_TBX(DisasContext
*s
, arg_rrr_esz
*a
)
2499 static gen_helper_gvec_3
* const fns
[4] = {
2500 gen_helper_sve2_tbx_b
, gen_helper_sve2_tbx_h
,
2501 gen_helper_sve2_tbx_s
, gen_helper_sve2_tbx_d
2504 if (!dc_isar_feature(aa64_sve2
, s
)) {
2507 if (sve_access_check(s
)) {
2508 gen_gvec_ool_zzz(s
, fns
[a
->esz
], a
->rd
, a
->rn
, a
->rm
, 0);
2513 static bool trans_UNPK(DisasContext
*s
, arg_UNPK
*a
)
2515 static gen_helper_gvec_2
* const fns
[4][2] = {
2517 { gen_helper_sve_sunpk_h
, gen_helper_sve_uunpk_h
},
2518 { gen_helper_sve_sunpk_s
, gen_helper_sve_uunpk_s
},
2519 { gen_helper_sve_sunpk_d
, gen_helper_sve_uunpk_d
},
2525 if (sve_access_check(s
)) {
2526 unsigned vsz
= vec_full_reg_size(s
);
2527 tcg_gen_gvec_2_ool(vec_full_reg_offset(s
, a
->rd
),
2528 vec_full_reg_offset(s
, a
->rn
)
2529 + (a
->h
? vsz
/ 2 : 0),
2530 vsz
, vsz
, 0, fns
[a
->esz
][a
->u
]);
2536 *** SVE Permute - Predicates Group
2539 static bool do_perm_pred3(DisasContext
*s
, arg_rrr_esz
*a
, bool high_odd
,
2540 gen_helper_gvec_3
*fn
)
2542 if (!sve_access_check(s
)) {
2546 unsigned vsz
= pred_full_reg_size(s
);
2548 TCGv_ptr t_d
= tcg_temp_new_ptr();
2549 TCGv_ptr t_n
= tcg_temp_new_ptr();
2550 TCGv_ptr t_m
= tcg_temp_new_ptr();
2554 desc
= FIELD_DP32(desc
, PREDDESC
, OPRSZ
, vsz
);
2555 desc
= FIELD_DP32(desc
, PREDDESC
, ESZ
, a
->esz
);
2556 desc
= FIELD_DP32(desc
, PREDDESC
, DATA
, high_odd
);
2558 tcg_gen_addi_ptr(t_d
, cpu_env
, pred_full_reg_offset(s
, a
->rd
));
2559 tcg_gen_addi_ptr(t_n
, cpu_env
, pred_full_reg_offset(s
, a
->rn
));
2560 tcg_gen_addi_ptr(t_m
, cpu_env
, pred_full_reg_offset(s
, a
->rm
));
2561 t_desc
= tcg_const_i32(desc
);
2563 fn(t_d
, t_n
, t_m
, t_desc
);
2565 tcg_temp_free_ptr(t_d
);
2566 tcg_temp_free_ptr(t_n
);
2567 tcg_temp_free_ptr(t_m
);
2568 tcg_temp_free_i32(t_desc
);
2572 static bool do_perm_pred2(DisasContext
*s
, arg_rr_esz
*a
, bool high_odd
,
2573 gen_helper_gvec_2
*fn
)
2575 if (!sve_access_check(s
)) {
2579 unsigned vsz
= pred_full_reg_size(s
);
2580 TCGv_ptr t_d
= tcg_temp_new_ptr();
2581 TCGv_ptr t_n
= tcg_temp_new_ptr();
2585 tcg_gen_addi_ptr(t_d
, cpu_env
, pred_full_reg_offset(s
, a
->rd
));
2586 tcg_gen_addi_ptr(t_n
, cpu_env
, pred_full_reg_offset(s
, a
->rn
));
2588 desc
= FIELD_DP32(desc
, PREDDESC
, OPRSZ
, vsz
);
2589 desc
= FIELD_DP32(desc
, PREDDESC
, ESZ
, a
->esz
);
2590 desc
= FIELD_DP32(desc
, PREDDESC
, DATA
, high_odd
);
2591 t_desc
= tcg_const_i32(desc
);
2593 fn(t_d
, t_n
, t_desc
);
2595 tcg_temp_free_i32(t_desc
);
2596 tcg_temp_free_ptr(t_d
);
2597 tcg_temp_free_ptr(t_n
);
2601 static bool trans_ZIP1_p(DisasContext
*s
, arg_rrr_esz
*a
)
2603 return do_perm_pred3(s
, a
, 0, gen_helper_sve_zip_p
);
2606 static bool trans_ZIP2_p(DisasContext
*s
, arg_rrr_esz
*a
)
2608 return do_perm_pred3(s
, a
, 1, gen_helper_sve_zip_p
);
2611 static bool trans_UZP1_p(DisasContext
*s
, arg_rrr_esz
*a
)
2613 return do_perm_pred3(s
, a
, 0, gen_helper_sve_uzp_p
);
2616 static bool trans_UZP2_p(DisasContext
*s
, arg_rrr_esz
*a
)
2618 return do_perm_pred3(s
, a
, 1, gen_helper_sve_uzp_p
);
2621 static bool trans_TRN1_p(DisasContext
*s
, arg_rrr_esz
*a
)
2623 return do_perm_pred3(s
, a
, 0, gen_helper_sve_trn_p
);
2626 static bool trans_TRN2_p(DisasContext
*s
, arg_rrr_esz
*a
)
2628 return do_perm_pred3(s
, a
, 1, gen_helper_sve_trn_p
);
2631 static bool trans_REV_p(DisasContext
*s
, arg_rr_esz
*a
)
2633 return do_perm_pred2(s
, a
, 0, gen_helper_sve_rev_p
);
2636 static bool trans_PUNPKLO(DisasContext
*s
, arg_PUNPKLO
*a
)
2638 return do_perm_pred2(s
, a
, 0, gen_helper_sve_punpk_p
);
2641 static bool trans_PUNPKHI(DisasContext
*s
, arg_PUNPKHI
*a
)
2643 return do_perm_pred2(s
, a
, 1, gen_helper_sve_punpk_p
);
2647 *** SVE Permute - Interleaving Group
2650 static bool do_zip(DisasContext
*s
, arg_rrr_esz
*a
, bool high
)
2652 static gen_helper_gvec_3
* const fns
[4] = {
2653 gen_helper_sve_zip_b
, gen_helper_sve_zip_h
,
2654 gen_helper_sve_zip_s
, gen_helper_sve_zip_d
,
2657 if (sve_access_check(s
)) {
2658 unsigned vsz
= vec_full_reg_size(s
);
2659 unsigned high_ofs
= high
? vsz
/ 2 : 0;
2660 tcg_gen_gvec_3_ool(vec_full_reg_offset(s
, a
->rd
),
2661 vec_full_reg_offset(s
, a
->rn
) + high_ofs
,
2662 vec_full_reg_offset(s
, a
->rm
) + high_ofs
,
2663 vsz
, vsz
, 0, fns
[a
->esz
]);
2668 static bool do_zzz_data_ool(DisasContext
*s
, arg_rrr_esz
*a
, int data
,
2669 gen_helper_gvec_3
*fn
)
2671 if (sve_access_check(s
)) {
2672 gen_gvec_ool_zzz(s
, fn
, a
->rd
, a
->rn
, a
->rm
, data
);
2677 static bool trans_ZIP1_z(DisasContext
*s
, arg_rrr_esz
*a
)
2679 return do_zip(s
, a
, false);
2682 static bool trans_ZIP2_z(DisasContext
*s
, arg_rrr_esz
*a
)
2684 return do_zip(s
, a
, true);
2687 static bool do_zip_q(DisasContext
*s
, arg_rrr_esz
*a
, bool high
)
2689 if (!dc_isar_feature(aa64_sve_f64mm
, s
)) {
2692 if (sve_access_check(s
)) {
2693 unsigned vsz
= vec_full_reg_size(s
);
2694 unsigned high_ofs
= high
? QEMU_ALIGN_DOWN(vsz
, 32) / 2 : 0;
2695 tcg_gen_gvec_3_ool(vec_full_reg_offset(s
, a
->rd
),
2696 vec_full_reg_offset(s
, a
->rn
) + high_ofs
,
2697 vec_full_reg_offset(s
, a
->rm
) + high_ofs
,
2698 vsz
, vsz
, 0, gen_helper_sve2_zip_q
);
2703 static bool trans_ZIP1_q(DisasContext
*s
, arg_rrr_esz
*a
)
2705 return do_zip_q(s
, a
, false);
2708 static bool trans_ZIP2_q(DisasContext
*s
, arg_rrr_esz
*a
)
2710 return do_zip_q(s
, a
, true);
2713 static gen_helper_gvec_3
* const uzp_fns
[4] = {
2714 gen_helper_sve_uzp_b
, gen_helper_sve_uzp_h
,
2715 gen_helper_sve_uzp_s
, gen_helper_sve_uzp_d
,
2718 static bool trans_UZP1_z(DisasContext
*s
, arg_rrr_esz
*a
)
2720 return do_zzz_data_ool(s
, a
, 0, uzp_fns
[a
->esz
]);
2723 static bool trans_UZP2_z(DisasContext
*s
, arg_rrr_esz
*a
)
2725 return do_zzz_data_ool(s
, a
, 1 << a
->esz
, uzp_fns
[a
->esz
]);
2728 static bool trans_UZP1_q(DisasContext
*s
, arg_rrr_esz
*a
)
2730 if (!dc_isar_feature(aa64_sve_f64mm
, s
)) {
2733 return do_zzz_data_ool(s
, a
, 0, gen_helper_sve2_uzp_q
);
2736 static bool trans_UZP2_q(DisasContext
*s
, arg_rrr_esz
*a
)
2738 if (!dc_isar_feature(aa64_sve_f64mm
, s
)) {
2741 return do_zzz_data_ool(s
, a
, 16, gen_helper_sve2_uzp_q
);
2744 static gen_helper_gvec_3
* const trn_fns
[4] = {
2745 gen_helper_sve_trn_b
, gen_helper_sve_trn_h
,
2746 gen_helper_sve_trn_s
, gen_helper_sve_trn_d
,
2749 static bool trans_TRN1_z(DisasContext
*s
, arg_rrr_esz
*a
)
2751 return do_zzz_data_ool(s
, a
, 0, trn_fns
[a
->esz
]);
2754 static bool trans_TRN2_z(DisasContext
*s
, arg_rrr_esz
*a
)
2756 return do_zzz_data_ool(s
, a
, 1 << a
->esz
, trn_fns
[a
->esz
]);
2759 static bool trans_TRN1_q(DisasContext
*s
, arg_rrr_esz
*a
)
2761 if (!dc_isar_feature(aa64_sve_f64mm
, s
)) {
2764 return do_zzz_data_ool(s
, a
, 0, gen_helper_sve2_trn_q
);
2767 static bool trans_TRN2_q(DisasContext
*s
, arg_rrr_esz
*a
)
2769 if (!dc_isar_feature(aa64_sve_f64mm
, s
)) {
2772 return do_zzz_data_ool(s
, a
, 16, gen_helper_sve2_trn_q
);
2776 *** SVE Permute Vector - Predicated Group
2779 static bool trans_COMPACT(DisasContext
*s
, arg_rpr_esz
*a
)
2781 static gen_helper_gvec_3
* const fns
[4] = {
2782 NULL
, NULL
, gen_helper_sve_compact_s
, gen_helper_sve_compact_d
2784 return do_zpz_ool(s
, a
, fns
[a
->esz
]);
2787 /* Call the helper that computes the ARM LastActiveElement pseudocode
2788 * function, scaled by the element size. This includes the not found
2789 * indication; e.g. not found for esz=3 is -8.
2791 static void find_last_active(DisasContext
*s
, TCGv_i32 ret
, int esz
, int pg
)
2793 /* Predicate sizes may be smaller and cannot use simd_desc. We cannot
2794 * round up, as we do elsewhere, because we need the exact size.
2796 TCGv_ptr t_p
= tcg_temp_new_ptr();
2800 desc
= FIELD_DP32(desc
, PREDDESC
, OPRSZ
, pred_full_reg_size(s
));
2801 desc
= FIELD_DP32(desc
, PREDDESC
, ESZ
, esz
);
2803 tcg_gen_addi_ptr(t_p
, cpu_env
, pred_full_reg_offset(s
, pg
));
2804 t_desc
= tcg_const_i32(desc
);
2806 gen_helper_sve_last_active_element(ret
, t_p
, t_desc
);
2808 tcg_temp_free_i32(t_desc
);
2809 tcg_temp_free_ptr(t_p
);
2812 /* Increment LAST to the offset of the next element in the vector,
2813 * wrapping around to 0.
2815 static void incr_last_active(DisasContext
*s
, TCGv_i32 last
, int esz
)
2817 unsigned vsz
= vec_full_reg_size(s
);
2819 tcg_gen_addi_i32(last
, last
, 1 << esz
);
2820 if (is_power_of_2(vsz
)) {
2821 tcg_gen_andi_i32(last
, last
, vsz
- 1);
2823 TCGv_i32 max
= tcg_const_i32(vsz
);
2824 TCGv_i32 zero
= tcg_const_i32(0);
2825 tcg_gen_movcond_i32(TCG_COND_GEU
, last
, last
, max
, zero
, last
);
2826 tcg_temp_free_i32(max
);
2827 tcg_temp_free_i32(zero
);
2831 /* If LAST < 0, set LAST to the offset of the last element in the vector. */
2832 static void wrap_last_active(DisasContext
*s
, TCGv_i32 last
, int esz
)
2834 unsigned vsz
= vec_full_reg_size(s
);
2836 if (is_power_of_2(vsz
)) {
2837 tcg_gen_andi_i32(last
, last
, vsz
- 1);
2839 TCGv_i32 max
= tcg_const_i32(vsz
- (1 << esz
));
2840 TCGv_i32 zero
= tcg_const_i32(0);
2841 tcg_gen_movcond_i32(TCG_COND_LT
, last
, last
, zero
, max
, last
);
2842 tcg_temp_free_i32(max
);
2843 tcg_temp_free_i32(zero
);
2847 /* Load an unsigned element of ESZ from BASE+OFS. */
2848 static TCGv_i64
load_esz(TCGv_ptr base
, int ofs
, int esz
)
2850 TCGv_i64 r
= tcg_temp_new_i64();
2854 tcg_gen_ld8u_i64(r
, base
, ofs
);
2857 tcg_gen_ld16u_i64(r
, base
, ofs
);
2860 tcg_gen_ld32u_i64(r
, base
, ofs
);
2863 tcg_gen_ld_i64(r
, base
, ofs
);
2866 g_assert_not_reached();
2871 /* Load an unsigned element of ESZ from RM[LAST]. */
2872 static TCGv_i64
load_last_active(DisasContext
*s
, TCGv_i32 last
,
2875 TCGv_ptr p
= tcg_temp_new_ptr();
2878 /* Convert offset into vector into offset into ENV.
2879 * The final adjustment for the vector register base
2880 * is added via constant offset to the load.
2882 #ifdef HOST_WORDS_BIGENDIAN
2883 /* Adjust for element ordering. See vec_reg_offset. */
2885 tcg_gen_xori_i32(last
, last
, 8 - (1 << esz
));
2888 tcg_gen_ext_i32_ptr(p
, last
);
2889 tcg_gen_add_ptr(p
, p
, cpu_env
);
2891 r
= load_esz(p
, vec_full_reg_offset(s
, rm
), esz
);
2892 tcg_temp_free_ptr(p
);
2897 /* Compute CLAST for a Zreg. */
2898 static bool do_clast_vector(DisasContext
*s
, arg_rprr_esz
*a
, bool before
)
2903 unsigned vsz
, esz
= a
->esz
;
2905 if (!sve_access_check(s
)) {
2909 last
= tcg_temp_local_new_i32();
2910 over
= gen_new_label();
2912 find_last_active(s
, last
, esz
, a
->pg
);
2914 /* There is of course no movcond for a 2048-bit vector,
2915 * so we must branch over the actual store.
2917 tcg_gen_brcondi_i32(TCG_COND_LT
, last
, 0, over
);
2920 incr_last_active(s
, last
, esz
);
2923 ele
= load_last_active(s
, last
, a
->rm
, esz
);
2924 tcg_temp_free_i32(last
);
2926 vsz
= vec_full_reg_size(s
);
2927 tcg_gen_gvec_dup_i64(esz
, vec_full_reg_offset(s
, a
->rd
), vsz
, vsz
, ele
);
2928 tcg_temp_free_i64(ele
);
2930 /* If this insn used MOVPRFX, we may need a second move. */
2931 if (a
->rd
!= a
->rn
) {
2932 TCGLabel
*done
= gen_new_label();
2935 gen_set_label(over
);
2936 do_mov_z(s
, a
->rd
, a
->rn
);
2938 gen_set_label(done
);
2940 gen_set_label(over
);
2945 static bool trans_CLASTA_z(DisasContext
*s
, arg_rprr_esz
*a
)
2947 return do_clast_vector(s
, a
, false);
2950 static bool trans_CLASTB_z(DisasContext
*s
, arg_rprr_esz
*a
)
2952 return do_clast_vector(s
, a
, true);
2955 /* Compute CLAST for a scalar. */
2956 static void do_clast_scalar(DisasContext
*s
, int esz
, int pg
, int rm
,
2957 bool before
, TCGv_i64 reg_val
)
2959 TCGv_i32 last
= tcg_temp_new_i32();
2960 TCGv_i64 ele
, cmp
, zero
;
2962 find_last_active(s
, last
, esz
, pg
);
2964 /* Extend the original value of last prior to incrementing. */
2965 cmp
= tcg_temp_new_i64();
2966 tcg_gen_ext_i32_i64(cmp
, last
);
2969 incr_last_active(s
, last
, esz
);
2972 /* The conceit here is that while last < 0 indicates not found, after
2973 * adjusting for cpu_env->vfp.zregs[rm], it is still a valid address
2974 * from which we can load garbage. We then discard the garbage with
2975 * a conditional move.
2977 ele
= load_last_active(s
, last
, rm
, esz
);
2978 tcg_temp_free_i32(last
);
2980 zero
= tcg_const_i64(0);
2981 tcg_gen_movcond_i64(TCG_COND_GE
, reg_val
, cmp
, zero
, ele
, reg_val
);
2983 tcg_temp_free_i64(zero
);
2984 tcg_temp_free_i64(cmp
);
2985 tcg_temp_free_i64(ele
);
2988 /* Compute CLAST for a Vreg. */
2989 static bool do_clast_fp(DisasContext
*s
, arg_rpr_esz
*a
, bool before
)
2991 if (sve_access_check(s
)) {
2993 int ofs
= vec_reg_offset(s
, a
->rd
, 0, esz
);
2994 TCGv_i64 reg
= load_esz(cpu_env
, ofs
, esz
);
2996 do_clast_scalar(s
, esz
, a
->pg
, a
->rn
, before
, reg
);
2997 write_fp_dreg(s
, a
->rd
, reg
);
2998 tcg_temp_free_i64(reg
);
3003 static bool trans_CLASTA_v(DisasContext
*s
, arg_rpr_esz
*a
)
3005 return do_clast_fp(s
, a
, false);
3008 static bool trans_CLASTB_v(DisasContext
*s
, arg_rpr_esz
*a
)
3010 return do_clast_fp(s
, a
, true);
3013 /* Compute CLAST for a Xreg. */
3014 static bool do_clast_general(DisasContext
*s
, arg_rpr_esz
*a
, bool before
)
3018 if (!sve_access_check(s
)) {
3022 reg
= cpu_reg(s
, a
->rd
);
3025 tcg_gen_ext8u_i64(reg
, reg
);
3028 tcg_gen_ext16u_i64(reg
, reg
);
3031 tcg_gen_ext32u_i64(reg
, reg
);
3036 g_assert_not_reached();
3039 do_clast_scalar(s
, a
->esz
, a
->pg
, a
->rn
, before
, reg
);
3043 static bool trans_CLASTA_r(DisasContext
*s
, arg_rpr_esz
*a
)
3045 return do_clast_general(s
, a
, false);
3048 static bool trans_CLASTB_r(DisasContext
*s
, arg_rpr_esz
*a
)
3050 return do_clast_general(s
, a
, true);
3053 /* Compute LAST for a scalar. */
3054 static TCGv_i64
do_last_scalar(DisasContext
*s
, int esz
,
3055 int pg
, int rm
, bool before
)
3057 TCGv_i32 last
= tcg_temp_new_i32();
3060 find_last_active(s
, last
, esz
, pg
);
3062 wrap_last_active(s
, last
, esz
);
3064 incr_last_active(s
, last
, esz
);
3067 ret
= load_last_active(s
, last
, rm
, esz
);
3068 tcg_temp_free_i32(last
);
3072 /* Compute LAST for a Vreg. */
3073 static bool do_last_fp(DisasContext
*s
, arg_rpr_esz
*a
, bool before
)
3075 if (sve_access_check(s
)) {
3076 TCGv_i64 val
= do_last_scalar(s
, a
->esz
, a
->pg
, a
->rn
, before
);
3077 write_fp_dreg(s
, a
->rd
, val
);
3078 tcg_temp_free_i64(val
);
3083 static bool trans_LASTA_v(DisasContext
*s
, arg_rpr_esz
*a
)
3085 return do_last_fp(s
, a
, false);
3088 static bool trans_LASTB_v(DisasContext
*s
, arg_rpr_esz
*a
)
3090 return do_last_fp(s
, a
, true);
3093 /* Compute LAST for a Xreg. */
3094 static bool do_last_general(DisasContext
*s
, arg_rpr_esz
*a
, bool before
)
3096 if (sve_access_check(s
)) {
3097 TCGv_i64 val
= do_last_scalar(s
, a
->esz
, a
->pg
, a
->rn
, before
);
3098 tcg_gen_mov_i64(cpu_reg(s
, a
->rd
), val
);
3099 tcg_temp_free_i64(val
);
3104 static bool trans_LASTA_r(DisasContext
*s
, arg_rpr_esz
*a
)
3106 return do_last_general(s
, a
, false);
3109 static bool trans_LASTB_r(DisasContext
*s
, arg_rpr_esz
*a
)
3111 return do_last_general(s
, a
, true);
3114 static bool trans_CPY_m_r(DisasContext
*s
, arg_rpr_esz
*a
)
3116 if (sve_access_check(s
)) {
3117 do_cpy_m(s
, a
->esz
, a
->rd
, a
->rd
, a
->pg
, cpu_reg_sp(s
, a
->rn
));
3122 static bool trans_CPY_m_v(DisasContext
*s
, arg_rpr_esz
*a
)
3124 if (sve_access_check(s
)) {
3125 int ofs
= vec_reg_offset(s
, a
->rn
, 0, a
->esz
);
3126 TCGv_i64 t
= load_esz(cpu_env
, ofs
, a
->esz
);
3127 do_cpy_m(s
, a
->esz
, a
->rd
, a
->rd
, a
->pg
, t
);
3128 tcg_temp_free_i64(t
);
3133 static bool trans_REVB(DisasContext
*s
, arg_rpr_esz
*a
)
3135 static gen_helper_gvec_3
* const fns
[4] = {
3137 gen_helper_sve_revb_h
,
3138 gen_helper_sve_revb_s
,
3139 gen_helper_sve_revb_d
,
3141 return do_zpz_ool(s
, a
, fns
[a
->esz
]);
3144 static bool trans_REVH(DisasContext
*s
, arg_rpr_esz
*a
)
3146 static gen_helper_gvec_3
* const fns
[4] = {
3149 gen_helper_sve_revh_s
,
3150 gen_helper_sve_revh_d
,
3152 return do_zpz_ool(s
, a
, fns
[a
->esz
]);
3155 static bool trans_REVW(DisasContext
*s
, arg_rpr_esz
*a
)
3157 return do_zpz_ool(s
, a
, a
->esz
== 3 ? gen_helper_sve_revw_d
: NULL
);
3160 static bool trans_RBIT(DisasContext
*s
, arg_rpr_esz
*a
)
3162 static gen_helper_gvec_3
* const fns
[4] = {
3163 gen_helper_sve_rbit_b
,
3164 gen_helper_sve_rbit_h
,
3165 gen_helper_sve_rbit_s
,
3166 gen_helper_sve_rbit_d
,
3168 return do_zpz_ool(s
, a
, fns
[a
->esz
]);
3171 static bool trans_SPLICE(DisasContext
*s
, arg_rprr_esz
*a
)
3173 if (sve_access_check(s
)) {
3174 gen_gvec_ool_zzzp(s
, gen_helper_sve_splice
,
3175 a
->rd
, a
->rn
, a
->rm
, a
->pg
, a
->esz
);
3180 static bool trans_SPLICE_sve2(DisasContext
*s
, arg_rpr_esz
*a
)
3182 if (!dc_isar_feature(aa64_sve2
, s
)) {
3185 if (sve_access_check(s
)) {
3186 gen_gvec_ool_zzzp(s
, gen_helper_sve_splice
,
3187 a
->rd
, a
->rn
, (a
->rn
+ 1) % 32, a
->pg
, a
->esz
);
3193 *** SVE Integer Compare - Vectors Group
3196 static bool do_ppzz_flags(DisasContext
*s
, arg_rprr_esz
*a
,
3197 gen_helper_gvec_flags_4
*gen_fn
)
3199 TCGv_ptr pd
, zn
, zm
, pg
;
3203 if (gen_fn
== NULL
) {
3206 if (!sve_access_check(s
)) {
3210 vsz
= vec_full_reg_size(s
);
3211 t
= tcg_const_i32(simd_desc(vsz
, vsz
, 0));
3212 pd
= tcg_temp_new_ptr();
3213 zn
= tcg_temp_new_ptr();
3214 zm
= tcg_temp_new_ptr();
3215 pg
= tcg_temp_new_ptr();
3217 tcg_gen_addi_ptr(pd
, cpu_env
, pred_full_reg_offset(s
, a
->rd
));
3218 tcg_gen_addi_ptr(zn
, cpu_env
, vec_full_reg_offset(s
, a
->rn
));
3219 tcg_gen_addi_ptr(zm
, cpu_env
, vec_full_reg_offset(s
, a
->rm
));
3220 tcg_gen_addi_ptr(pg
, cpu_env
, pred_full_reg_offset(s
, a
->pg
));
3222 gen_fn(t
, pd
, zn
, zm
, pg
, t
);
3224 tcg_temp_free_ptr(pd
);
3225 tcg_temp_free_ptr(zn
);
3226 tcg_temp_free_ptr(zm
);
3227 tcg_temp_free_ptr(pg
);
3231 tcg_temp_free_i32(t
);
3235 #define DO_PPZZ(NAME, name) \
3236 static bool trans_##NAME##_ppzz(DisasContext *s, arg_rprr_esz *a) \
3238 static gen_helper_gvec_flags_4 * const fns[4] = { \
3239 gen_helper_sve_##name##_ppzz_b, gen_helper_sve_##name##_ppzz_h, \
3240 gen_helper_sve_##name##_ppzz_s, gen_helper_sve_##name##_ppzz_d, \
3242 return do_ppzz_flags(s, a, fns[a->esz]); \
3245 DO_PPZZ(CMPEQ
, cmpeq
)
3246 DO_PPZZ(CMPNE
, cmpne
)
3247 DO_PPZZ(CMPGT
, cmpgt
)
3248 DO_PPZZ(CMPGE
, cmpge
)
3249 DO_PPZZ(CMPHI
, cmphi
)
3250 DO_PPZZ(CMPHS
, cmphs
)
3254 #define DO_PPZW(NAME, name) \
3255 static bool trans_##NAME##_ppzw(DisasContext *s, arg_rprr_esz *a) \
3257 static gen_helper_gvec_flags_4 * const fns[4] = { \
3258 gen_helper_sve_##name##_ppzw_b, gen_helper_sve_##name##_ppzw_h, \
3259 gen_helper_sve_##name##_ppzw_s, NULL \
3261 return do_ppzz_flags(s, a, fns[a->esz]); \
3264 DO_PPZW(CMPEQ
, cmpeq
)
3265 DO_PPZW(CMPNE
, cmpne
)
3266 DO_PPZW(CMPGT
, cmpgt
)
3267 DO_PPZW(CMPGE
, cmpge
)
3268 DO_PPZW(CMPHI
, cmphi
)
3269 DO_PPZW(CMPHS
, cmphs
)
3270 DO_PPZW(CMPLT
, cmplt
)
3271 DO_PPZW(CMPLE
, cmple
)
3272 DO_PPZW(CMPLO
, cmplo
)
3273 DO_PPZW(CMPLS
, cmpls
)
3278 *** SVE Integer Compare - Immediate Groups
3281 static bool do_ppzi_flags(DisasContext
*s
, arg_rpri_esz
*a
,
3282 gen_helper_gvec_flags_3
*gen_fn
)
3284 TCGv_ptr pd
, zn
, pg
;
3288 if (gen_fn
== NULL
) {
3291 if (!sve_access_check(s
)) {
3295 vsz
= vec_full_reg_size(s
);
3296 t
= tcg_const_i32(simd_desc(vsz
, vsz
, a
->imm
));
3297 pd
= tcg_temp_new_ptr();
3298 zn
= tcg_temp_new_ptr();
3299 pg
= tcg_temp_new_ptr();
3301 tcg_gen_addi_ptr(pd
, cpu_env
, pred_full_reg_offset(s
, a
->rd
));
3302 tcg_gen_addi_ptr(zn
, cpu_env
, vec_full_reg_offset(s
, a
->rn
));
3303 tcg_gen_addi_ptr(pg
, cpu_env
, pred_full_reg_offset(s
, a
->pg
));
3305 gen_fn(t
, pd
, zn
, pg
, t
);
3307 tcg_temp_free_ptr(pd
);
3308 tcg_temp_free_ptr(zn
);
3309 tcg_temp_free_ptr(pg
);
3313 tcg_temp_free_i32(t
);
3317 #define DO_PPZI(NAME, name) \
3318 static bool trans_##NAME##_ppzi(DisasContext *s, arg_rpri_esz *a) \
3320 static gen_helper_gvec_flags_3 * const fns[4] = { \
3321 gen_helper_sve_##name##_ppzi_b, gen_helper_sve_##name##_ppzi_h, \
3322 gen_helper_sve_##name##_ppzi_s, gen_helper_sve_##name##_ppzi_d, \
3324 return do_ppzi_flags(s, a, fns[a->esz]); \
3327 DO_PPZI(CMPEQ
, cmpeq
)
3328 DO_PPZI(CMPNE
, cmpne
)
3329 DO_PPZI(CMPGT
, cmpgt
)
3330 DO_PPZI(CMPGE
, cmpge
)
3331 DO_PPZI(CMPHI
, cmphi
)
3332 DO_PPZI(CMPHS
, cmphs
)
3333 DO_PPZI(CMPLT
, cmplt
)
3334 DO_PPZI(CMPLE
, cmple
)
3335 DO_PPZI(CMPLO
, cmplo
)
3336 DO_PPZI(CMPLS
, cmpls
)
3341 *** SVE Partition Break Group
3344 static bool do_brk3(DisasContext
*s
, arg_rprr_s
*a
,
3345 gen_helper_gvec_4
*fn
, gen_helper_gvec_flags_4
*fn_s
)
3347 if (!sve_access_check(s
)) {
3351 unsigned vsz
= pred_full_reg_size(s
);
3353 /* Predicate sizes may be smaller and cannot use simd_desc. */
3354 TCGv_ptr d
= tcg_temp_new_ptr();
3355 TCGv_ptr n
= tcg_temp_new_ptr();
3356 TCGv_ptr m
= tcg_temp_new_ptr();
3357 TCGv_ptr g
= tcg_temp_new_ptr();
3358 TCGv_i32 t
= tcg_const_i32(FIELD_DP32(0, PREDDESC
, OPRSZ
, vsz
));
3360 tcg_gen_addi_ptr(d
, cpu_env
, pred_full_reg_offset(s
, a
->rd
));
3361 tcg_gen_addi_ptr(n
, cpu_env
, pred_full_reg_offset(s
, a
->rn
));
3362 tcg_gen_addi_ptr(m
, cpu_env
, pred_full_reg_offset(s
, a
->rm
));
3363 tcg_gen_addi_ptr(g
, cpu_env
, pred_full_reg_offset(s
, a
->pg
));
3366 fn_s(t
, d
, n
, m
, g
, t
);
3371 tcg_temp_free_ptr(d
);
3372 tcg_temp_free_ptr(n
);
3373 tcg_temp_free_ptr(m
);
3374 tcg_temp_free_ptr(g
);
3375 tcg_temp_free_i32(t
);
3379 static bool do_brk2(DisasContext
*s
, arg_rpr_s
*a
,
3380 gen_helper_gvec_3
*fn
, gen_helper_gvec_flags_3
*fn_s
)
3382 if (!sve_access_check(s
)) {
3386 unsigned vsz
= pred_full_reg_size(s
);
3388 /* Predicate sizes may be smaller and cannot use simd_desc. */
3389 TCGv_ptr d
= tcg_temp_new_ptr();
3390 TCGv_ptr n
= tcg_temp_new_ptr();
3391 TCGv_ptr g
= tcg_temp_new_ptr();
3392 TCGv_i32 t
= tcg_const_i32(FIELD_DP32(0, PREDDESC
, OPRSZ
, vsz
));
3394 tcg_gen_addi_ptr(d
, cpu_env
, pred_full_reg_offset(s
, a
->rd
));
3395 tcg_gen_addi_ptr(n
, cpu_env
, pred_full_reg_offset(s
, a
->rn
));
3396 tcg_gen_addi_ptr(g
, cpu_env
, pred_full_reg_offset(s
, a
->pg
));
3399 fn_s(t
, d
, n
, g
, t
);
3404 tcg_temp_free_ptr(d
);
3405 tcg_temp_free_ptr(n
);
3406 tcg_temp_free_ptr(g
);
3407 tcg_temp_free_i32(t
);
3411 static bool trans_BRKPA(DisasContext
*s
, arg_rprr_s
*a
)
3413 return do_brk3(s
, a
, gen_helper_sve_brkpa
, gen_helper_sve_brkpas
);
3416 static bool trans_BRKPB(DisasContext
*s
, arg_rprr_s
*a
)
3418 return do_brk3(s
, a
, gen_helper_sve_brkpb
, gen_helper_sve_brkpbs
);
3421 static bool trans_BRKA_m(DisasContext
*s
, arg_rpr_s
*a
)
3423 return do_brk2(s
, a
, gen_helper_sve_brka_m
, gen_helper_sve_brkas_m
);
3426 static bool trans_BRKB_m(DisasContext
*s
, arg_rpr_s
*a
)
3428 return do_brk2(s
, a
, gen_helper_sve_brkb_m
, gen_helper_sve_brkbs_m
);
3431 static bool trans_BRKA_z(DisasContext
*s
, arg_rpr_s
*a
)
3433 return do_brk2(s
, a
, gen_helper_sve_brka_z
, gen_helper_sve_brkas_z
);
3436 static bool trans_BRKB_z(DisasContext
*s
, arg_rpr_s
*a
)
3438 return do_brk2(s
, a
, gen_helper_sve_brkb_z
, gen_helper_sve_brkbs_z
);
3441 static bool trans_BRKN(DisasContext
*s
, arg_rpr_s
*a
)
3443 return do_brk2(s
, a
, gen_helper_sve_brkn
, gen_helper_sve_brkns
);
3447 *** SVE Predicate Count Group
3450 static void do_cntp(DisasContext
*s
, TCGv_i64 val
, int esz
, int pn
, int pg
)
3452 unsigned psz
= pred_full_reg_size(s
);
3457 tcg_gen_ld_i64(val
, cpu_env
, pred_full_reg_offset(s
, pn
));
3459 TCGv_i64 g
= tcg_temp_new_i64();
3460 tcg_gen_ld_i64(g
, cpu_env
, pred_full_reg_offset(s
, pg
));
3461 tcg_gen_and_i64(val
, val
, g
);
3462 tcg_temp_free_i64(g
);
3465 /* Reduce the pred_esz_masks value simply to reduce the
3466 * size of the code generated here.
3468 psz_mask
= MAKE_64BIT_MASK(0, psz
* 8);
3469 tcg_gen_andi_i64(val
, val
, pred_esz_masks
[esz
] & psz_mask
);
3471 tcg_gen_ctpop_i64(val
, val
);
3473 TCGv_ptr t_pn
= tcg_temp_new_ptr();
3474 TCGv_ptr t_pg
= tcg_temp_new_ptr();
3478 desc
= FIELD_DP32(desc
, PREDDESC
, OPRSZ
, psz
);
3479 desc
= FIELD_DP32(desc
, PREDDESC
, ESZ
, esz
);
3481 tcg_gen_addi_ptr(t_pn
, cpu_env
, pred_full_reg_offset(s
, pn
));
3482 tcg_gen_addi_ptr(t_pg
, cpu_env
, pred_full_reg_offset(s
, pg
));
3483 t_desc
= tcg_const_i32(desc
);
3485 gen_helper_sve_cntp(val
, t_pn
, t_pg
, t_desc
);
3486 tcg_temp_free_ptr(t_pn
);
3487 tcg_temp_free_ptr(t_pg
);
3488 tcg_temp_free_i32(t_desc
);
3492 static bool trans_CNTP(DisasContext
*s
, arg_CNTP
*a
)
3494 if (sve_access_check(s
)) {
3495 do_cntp(s
, cpu_reg(s
, a
->rd
), a
->esz
, a
->rn
, a
->pg
);
3500 static bool trans_INCDECP_r(DisasContext
*s
, arg_incdec_pred
*a
)
3502 if (sve_access_check(s
)) {
3503 TCGv_i64 reg
= cpu_reg(s
, a
->rd
);
3504 TCGv_i64 val
= tcg_temp_new_i64();
3506 do_cntp(s
, val
, a
->esz
, a
->pg
, a
->pg
);
3508 tcg_gen_sub_i64(reg
, reg
, val
);
3510 tcg_gen_add_i64(reg
, reg
, val
);
3512 tcg_temp_free_i64(val
);
3517 static bool trans_INCDECP_z(DisasContext
*s
, arg_incdec2_pred
*a
)
3522 if (sve_access_check(s
)) {
3523 unsigned vsz
= vec_full_reg_size(s
);
3524 TCGv_i64 val
= tcg_temp_new_i64();
3525 GVecGen2sFn
*gvec_fn
= a
->d
? tcg_gen_gvec_subs
: tcg_gen_gvec_adds
;
3527 do_cntp(s
, val
, a
->esz
, a
->pg
, a
->pg
);
3528 gvec_fn(a
->esz
, vec_full_reg_offset(s
, a
->rd
),
3529 vec_full_reg_offset(s
, a
->rn
), val
, vsz
, vsz
);
3534 static bool trans_SINCDECP_r_32(DisasContext
*s
, arg_incdec_pred
*a
)
3536 if (sve_access_check(s
)) {
3537 TCGv_i64 reg
= cpu_reg(s
, a
->rd
);
3538 TCGv_i64 val
= tcg_temp_new_i64();
3540 do_cntp(s
, val
, a
->esz
, a
->pg
, a
->pg
);
3541 do_sat_addsub_32(reg
, val
, a
->u
, a
->d
);
3546 static bool trans_SINCDECP_r_64(DisasContext
*s
, arg_incdec_pred
*a
)
3548 if (sve_access_check(s
)) {
3549 TCGv_i64 reg
= cpu_reg(s
, a
->rd
);
3550 TCGv_i64 val
= tcg_temp_new_i64();
3552 do_cntp(s
, val
, a
->esz
, a
->pg
, a
->pg
);
3553 do_sat_addsub_64(reg
, val
, a
->u
, a
->d
);
3558 static bool trans_SINCDECP_z(DisasContext
*s
, arg_incdec2_pred
*a
)
3563 if (sve_access_check(s
)) {
3564 TCGv_i64 val
= tcg_temp_new_i64();
3565 do_cntp(s
, val
, a
->esz
, a
->pg
, a
->pg
);
3566 do_sat_addsub_vec(s
, a
->esz
, a
->rd
, a
->rn
, val
, a
->u
, a
->d
);
3572 *** SVE Integer Compare Scalars Group
3575 static bool trans_CTERM(DisasContext
*s
, arg_CTERM
*a
)
3577 if (!sve_access_check(s
)) {
3581 TCGCond cond
= (a
->ne
? TCG_COND_NE
: TCG_COND_EQ
);
3582 TCGv_i64 rn
= read_cpu_reg(s
, a
->rn
, a
->sf
);
3583 TCGv_i64 rm
= read_cpu_reg(s
, a
->rm
, a
->sf
);
3584 TCGv_i64 cmp
= tcg_temp_new_i64();
3586 tcg_gen_setcond_i64(cond
, cmp
, rn
, rm
);
3587 tcg_gen_extrl_i64_i32(cpu_NF
, cmp
);
3588 tcg_temp_free_i64(cmp
);
3590 /* VF = !NF & !CF. */
3591 tcg_gen_xori_i32(cpu_VF
, cpu_NF
, 1);
3592 tcg_gen_andc_i32(cpu_VF
, cpu_VF
, cpu_CF
);
3594 /* Both NF and VF actually look at bit 31. */
3595 tcg_gen_neg_i32(cpu_NF
, cpu_NF
);
3596 tcg_gen_neg_i32(cpu_VF
, cpu_VF
);
3600 static bool trans_WHILE(DisasContext
*s
, arg_WHILE
*a
)
3602 TCGv_i64 op0
, op1
, t0
, t1
, tmax
;
3605 unsigned vsz
= vec_full_reg_size(s
);
3609 /* Note that GE/HS has a->eq == 0 and GT/HI has a->eq == 1. */
3610 bool eq
= a
->eq
== a
->lt
;
3612 /* The greater-than conditions are all SVE2. */
3613 if (!a
->lt
&& !dc_isar_feature(aa64_sve2
, s
)) {
3616 if (!sve_access_check(s
)) {
3620 op0
= read_cpu_reg(s
, a
->rn
, 1);
3621 op1
= read_cpu_reg(s
, a
->rm
, 1);
3625 tcg_gen_ext32u_i64(op0
, op0
);
3626 tcg_gen_ext32u_i64(op1
, op1
);
3628 tcg_gen_ext32s_i64(op0
, op0
);
3629 tcg_gen_ext32s_i64(op1
, op1
);
3633 /* For the helper, compress the different conditions into a computation
3634 * of how many iterations for which the condition is true.
3636 t0
= tcg_temp_new_i64();
3637 t1
= tcg_temp_new_i64();
3640 tcg_gen_sub_i64(t0
, op1
, op0
);
3642 maxval
= a
->sf
? UINT64_MAX
: UINT32_MAX
;
3643 cond
= eq
? TCG_COND_LEU
: TCG_COND_LTU
;
3645 maxval
= a
->sf
? INT64_MAX
: INT32_MAX
;
3646 cond
= eq
? TCG_COND_LE
: TCG_COND_LT
;
3649 tcg_gen_sub_i64(t0
, op0
, op1
);
3652 cond
= eq
? TCG_COND_GEU
: TCG_COND_GTU
;
3654 maxval
= a
->sf
? INT64_MIN
: INT32_MIN
;
3655 cond
= eq
? TCG_COND_GE
: TCG_COND_GT
;
3659 tmax
= tcg_const_i64(vsz
>> a
->esz
);
3661 /* Equality means one more iteration. */
3662 tcg_gen_addi_i64(t0
, t0
, 1);
3665 * For the less-than while, if op1 is maxval (and the only time
3666 * the addition above could overflow), then we produce an all-true
3667 * predicate by setting the count to the vector length. This is
3668 * because the pseudocode is described as an increment + compare
3669 * loop, and the maximum integer would always compare true.
3670 * Similarly, the greater-than while has the same issue with the
3671 * minimum integer due to the decrement + compare loop.
3673 tcg_gen_movi_i64(t1
, maxval
);
3674 tcg_gen_movcond_i64(TCG_COND_EQ
, t0
, op1
, t1
, tmax
, t0
);
3677 /* Bound to the maximum. */
3678 tcg_gen_umin_i64(t0
, t0
, tmax
);
3679 tcg_temp_free_i64(tmax
);
3681 /* Set the count to zero if the condition is false. */
3682 tcg_gen_movi_i64(t1
, 0);
3683 tcg_gen_movcond_i64(cond
, t0
, op0
, op1
, t0
, t1
);
3684 tcg_temp_free_i64(t1
);
3686 /* Since we're bounded, pass as a 32-bit type. */
3687 t2
= tcg_temp_new_i32();
3688 tcg_gen_extrl_i64_i32(t2
, t0
);
3689 tcg_temp_free_i64(t0
);
3691 /* Scale elements to bits. */
3692 tcg_gen_shli_i32(t2
, t2
, a
->esz
);
3694 desc
= FIELD_DP32(desc
, PREDDESC
, OPRSZ
, vsz
/ 8);
3695 desc
= FIELD_DP32(desc
, PREDDESC
, ESZ
, a
->esz
);
3696 t3
= tcg_const_i32(desc
);
3698 ptr
= tcg_temp_new_ptr();
3699 tcg_gen_addi_ptr(ptr
, cpu_env
, pred_full_reg_offset(s
, a
->rd
));
3702 gen_helper_sve_whilel(t2
, ptr
, t2
, t3
);
3704 gen_helper_sve_whileg(t2
, ptr
, t2
, t3
);
3708 tcg_temp_free_ptr(ptr
);
3709 tcg_temp_free_i32(t2
);
3710 tcg_temp_free_i32(t3
);
3714 static bool trans_WHILE_ptr(DisasContext
*s
, arg_WHILE_ptr
*a
)
3716 TCGv_i64 op0
, op1
, diff
, t1
, tmax
;
3719 unsigned vsz
= vec_full_reg_size(s
);
3722 if (!dc_isar_feature(aa64_sve2
, s
)) {
3725 if (!sve_access_check(s
)) {
3729 op0
= read_cpu_reg(s
, a
->rn
, 1);
3730 op1
= read_cpu_reg(s
, a
->rm
, 1);
3732 tmax
= tcg_const_i64(vsz
);
3733 diff
= tcg_temp_new_i64();
3737 /* diff = abs(op1 - op0), noting that op0/1 are unsigned. */
3738 t1
= tcg_temp_new_i64();
3739 tcg_gen_sub_i64(diff
, op0
, op1
);
3740 tcg_gen_sub_i64(t1
, op1
, op0
);
3741 tcg_gen_movcond_i64(TCG_COND_GEU
, diff
, op0
, op1
, diff
, t1
);
3742 tcg_temp_free_i64(t1
);
3743 /* Round down to a multiple of ESIZE. */
3744 tcg_gen_andi_i64(diff
, diff
, -1 << a
->esz
);
3745 /* If op1 == op0, diff == 0, and the condition is always true. */
3746 tcg_gen_movcond_i64(TCG_COND_EQ
, diff
, op0
, op1
, tmax
, diff
);
3749 tcg_gen_sub_i64(diff
, op1
, op0
);
3750 /* Round down to a multiple of ESIZE. */
3751 tcg_gen_andi_i64(diff
, diff
, -1 << a
->esz
);
3752 /* If op0 >= op1, diff <= 0, the condition is always true. */
3753 tcg_gen_movcond_i64(TCG_COND_GEU
, diff
, op0
, op1
, tmax
, diff
);
3756 /* Bound to the maximum. */
3757 tcg_gen_umin_i64(diff
, diff
, tmax
);
3758 tcg_temp_free_i64(tmax
);
3760 /* Since we're bounded, pass as a 32-bit type. */
3761 t2
= tcg_temp_new_i32();
3762 tcg_gen_extrl_i64_i32(t2
, diff
);
3763 tcg_temp_free_i64(diff
);
3765 desc
= FIELD_DP32(desc
, PREDDESC
, OPRSZ
, vsz
/ 8);
3766 desc
= FIELD_DP32(desc
, PREDDESC
, ESZ
, a
->esz
);
3767 t3
= tcg_const_i32(desc
);
3769 ptr
= tcg_temp_new_ptr();
3770 tcg_gen_addi_ptr(ptr
, cpu_env
, pred_full_reg_offset(s
, a
->rd
));
3772 gen_helper_sve_whilel(t2
, ptr
, t2
, t3
);
3775 tcg_temp_free_ptr(ptr
);
3776 tcg_temp_free_i32(t2
);
3777 tcg_temp_free_i32(t3
);
3782 *** SVE Integer Wide Immediate - Unpredicated Group
3785 static bool trans_FDUP(DisasContext
*s
, arg_FDUP
*a
)
3790 if (sve_access_check(s
)) {
3791 unsigned vsz
= vec_full_reg_size(s
);
3792 int dofs
= vec_full_reg_offset(s
, a
->rd
);
3795 /* Decode the VFP immediate. */
3796 imm
= vfp_expand_imm(a
->esz
, a
->imm
);
3797 tcg_gen_gvec_dup_imm(a
->esz
, dofs
, vsz
, vsz
, imm
);
3802 static bool trans_DUP_i(DisasContext
*s
, arg_DUP_i
*a
)
3804 if (a
->esz
== 0 && extract32(s
->insn
, 13, 1)) {
3807 if (sve_access_check(s
)) {
3808 unsigned vsz
= vec_full_reg_size(s
);
3809 int dofs
= vec_full_reg_offset(s
, a
->rd
);
3811 tcg_gen_gvec_dup_imm(a
->esz
, dofs
, vsz
, vsz
, a
->imm
);
3816 static bool trans_ADD_zzi(DisasContext
*s
, arg_rri_esz
*a
)
3818 if (a
->esz
== 0 && extract32(s
->insn
, 13, 1)) {
3821 if (sve_access_check(s
)) {
3822 unsigned vsz
= vec_full_reg_size(s
);
3823 tcg_gen_gvec_addi(a
->esz
, vec_full_reg_offset(s
, a
->rd
),
3824 vec_full_reg_offset(s
, a
->rn
), a
->imm
, vsz
, vsz
);
3829 static bool trans_SUB_zzi(DisasContext
*s
, arg_rri_esz
*a
)
3832 return trans_ADD_zzi(s
, a
);
3835 static bool trans_SUBR_zzi(DisasContext
*s
, arg_rri_esz
*a
)
3837 static const TCGOpcode vecop_list
[] = { INDEX_op_sub_vec
, 0 };
3838 static const GVecGen2s op
[4] = {
3839 { .fni8
= tcg_gen_vec_sub8_i64
,
3840 .fniv
= tcg_gen_sub_vec
,
3841 .fno
= gen_helper_sve_subri_b
,
3842 .opt_opc
= vecop_list
,
3844 .scalar_first
= true },
3845 { .fni8
= tcg_gen_vec_sub16_i64
,
3846 .fniv
= tcg_gen_sub_vec
,
3847 .fno
= gen_helper_sve_subri_h
,
3848 .opt_opc
= vecop_list
,
3850 .scalar_first
= true },
3851 { .fni4
= tcg_gen_sub_i32
,
3852 .fniv
= tcg_gen_sub_vec
,
3853 .fno
= gen_helper_sve_subri_s
,
3854 .opt_opc
= vecop_list
,
3856 .scalar_first
= true },
3857 { .fni8
= tcg_gen_sub_i64
,
3858 .fniv
= tcg_gen_sub_vec
,
3859 .fno
= gen_helper_sve_subri_d
,
3860 .opt_opc
= vecop_list
,
3861 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
3863 .scalar_first
= true }
3866 if (a
->esz
== 0 && extract32(s
->insn
, 13, 1)) {
3869 if (sve_access_check(s
)) {
3870 unsigned vsz
= vec_full_reg_size(s
);
3871 TCGv_i64 c
= tcg_const_i64(a
->imm
);
3872 tcg_gen_gvec_2s(vec_full_reg_offset(s
, a
->rd
),
3873 vec_full_reg_offset(s
, a
->rn
),
3874 vsz
, vsz
, c
, &op
[a
->esz
]);
3875 tcg_temp_free_i64(c
);
3880 static bool trans_MUL_zzi(DisasContext
*s
, arg_rri_esz
*a
)
3882 if (sve_access_check(s
)) {
3883 unsigned vsz
= vec_full_reg_size(s
);
3884 tcg_gen_gvec_muli(a
->esz
, vec_full_reg_offset(s
, a
->rd
),
3885 vec_full_reg_offset(s
, a
->rn
), a
->imm
, vsz
, vsz
);
3890 static bool do_zzi_sat(DisasContext
*s
, arg_rri_esz
*a
, bool u
, bool d
)
3892 if (a
->esz
== 0 && extract32(s
->insn
, 13, 1)) {
3895 if (sve_access_check(s
)) {
3896 TCGv_i64 val
= tcg_const_i64(a
->imm
);
3897 do_sat_addsub_vec(s
, a
->esz
, a
->rd
, a
->rn
, val
, u
, d
);
3898 tcg_temp_free_i64(val
);
3903 static bool trans_SQADD_zzi(DisasContext
*s
, arg_rri_esz
*a
)
3905 return do_zzi_sat(s
, a
, false, false);
3908 static bool trans_UQADD_zzi(DisasContext
*s
, arg_rri_esz
*a
)
3910 return do_zzi_sat(s
, a
, true, false);
3913 static bool trans_SQSUB_zzi(DisasContext
*s
, arg_rri_esz
*a
)
3915 return do_zzi_sat(s
, a
, false, true);
3918 static bool trans_UQSUB_zzi(DisasContext
*s
, arg_rri_esz
*a
)
3920 return do_zzi_sat(s
, a
, true, true);
3923 static bool do_zzi_ool(DisasContext
*s
, arg_rri_esz
*a
, gen_helper_gvec_2i
*fn
)
3925 if (sve_access_check(s
)) {
3926 unsigned vsz
= vec_full_reg_size(s
);
3927 TCGv_i64 c
= tcg_const_i64(a
->imm
);
3929 tcg_gen_gvec_2i_ool(vec_full_reg_offset(s
, a
->rd
),
3930 vec_full_reg_offset(s
, a
->rn
),
3931 c
, vsz
, vsz
, 0, fn
);
3932 tcg_temp_free_i64(c
);
3937 #define DO_ZZI(NAME, name) \
3938 static bool trans_##NAME##_zzi(DisasContext *s, arg_rri_esz *a) \
3940 static gen_helper_gvec_2i * const fns[4] = { \
3941 gen_helper_sve_##name##i_b, gen_helper_sve_##name##i_h, \
3942 gen_helper_sve_##name##i_s, gen_helper_sve_##name##i_d, \
3944 return do_zzi_ool(s, a, fns[a->esz]); \
3954 static bool trans_DOT_zzzz(DisasContext
*s
, arg_DOT_zzzz
*a
)
3956 static gen_helper_gvec_4
* const fns
[2][2] = {
3957 { gen_helper_gvec_sdot_b
, gen_helper_gvec_sdot_h
},
3958 { gen_helper_gvec_udot_b
, gen_helper_gvec_udot_h
}
3961 if (sve_access_check(s
)) {
3962 gen_gvec_ool_zzzz(s
, fns
[a
->u
][a
->sz
], a
->rd
, a
->rn
, a
->rm
, a
->ra
, 0);
3968 * SVE Multiply - Indexed
3971 static bool do_zzxz_ool(DisasContext
*s
, arg_rrxr_esz
*a
,
3972 gen_helper_gvec_4
*fn
)
3977 if (sve_access_check(s
)) {
3978 gen_gvec_ool_zzzz(s
, fn
, a
->rd
, a
->rn
, a
->rm
, a
->ra
, a
->index
);
3983 #define DO_RRXR(NAME, FUNC) \
3984 static bool NAME(DisasContext *s, arg_rrxr_esz *a) \
3985 { return do_zzxz_ool(s, a, FUNC); }
3987 DO_RRXR(trans_SDOT_zzxw_s
, gen_helper_gvec_sdot_idx_b
)
3988 DO_RRXR(trans_SDOT_zzxw_d
, gen_helper_gvec_sdot_idx_h
)
3989 DO_RRXR(trans_UDOT_zzxw_s
, gen_helper_gvec_udot_idx_b
)
3990 DO_RRXR(trans_UDOT_zzxw_d
, gen_helper_gvec_udot_idx_h
)
3992 static bool trans_SUDOT_zzxw_s(DisasContext
*s
, arg_rrxr_esz
*a
)
3994 if (!dc_isar_feature(aa64_sve_i8mm
, s
)) {
3997 return do_zzxz_ool(s
, a
, gen_helper_gvec_sudot_idx_b
);
4000 static bool trans_USDOT_zzxw_s(DisasContext
*s
, arg_rrxr_esz
*a
)
4002 if (!dc_isar_feature(aa64_sve_i8mm
, s
)) {
4005 return do_zzxz_ool(s
, a
, gen_helper_gvec_usdot_idx_b
);
4010 static bool do_sve2_zzz_data(DisasContext
*s
, int rd
, int rn
, int rm
, int data
,
4011 gen_helper_gvec_3
*fn
)
4013 if (fn
== NULL
|| !dc_isar_feature(aa64_sve2
, s
)) {
4016 if (sve_access_check(s
)) {
4017 unsigned vsz
= vec_full_reg_size(s
);
4018 tcg_gen_gvec_3_ool(vec_full_reg_offset(s
, rd
),
4019 vec_full_reg_offset(s
, rn
),
4020 vec_full_reg_offset(s
, rm
),
4021 vsz
, vsz
, data
, fn
);
4026 #define DO_SVE2_RRX(NAME, FUNC) \
4027 static bool NAME(DisasContext *s, arg_rrx_esz *a) \
4028 { return do_sve2_zzz_data(s, a->rd, a->rn, a->rm, a->index, FUNC); }
4030 DO_SVE2_RRX(trans_MUL_zzx_h
, gen_helper_gvec_mul_idx_h
)
4031 DO_SVE2_RRX(trans_MUL_zzx_s
, gen_helper_gvec_mul_idx_s
)
4032 DO_SVE2_RRX(trans_MUL_zzx_d
, gen_helper_gvec_mul_idx_d
)
4034 DO_SVE2_RRX(trans_SQDMULH_zzx_h
, gen_helper_sve2_sqdmulh_idx_h
)
4035 DO_SVE2_RRX(trans_SQDMULH_zzx_s
, gen_helper_sve2_sqdmulh_idx_s
)
4036 DO_SVE2_RRX(trans_SQDMULH_zzx_d
, gen_helper_sve2_sqdmulh_idx_d
)
4038 DO_SVE2_RRX(trans_SQRDMULH_zzx_h
, gen_helper_sve2_sqrdmulh_idx_h
)
4039 DO_SVE2_RRX(trans_SQRDMULH_zzx_s
, gen_helper_sve2_sqrdmulh_idx_s
)
4040 DO_SVE2_RRX(trans_SQRDMULH_zzx_d
, gen_helper_sve2_sqrdmulh_idx_d
)
4044 #define DO_SVE2_RRX_TB(NAME, FUNC, TOP) \
4045 static bool NAME(DisasContext *s, arg_rrx_esz *a) \
4047 return do_sve2_zzz_data(s, a->rd, a->rn, a->rm, \
4048 (a->index << 1) | TOP, FUNC); \
4051 DO_SVE2_RRX_TB(trans_SQDMULLB_zzx_s
, gen_helper_sve2_sqdmull_idx_s
, false)
4052 DO_SVE2_RRX_TB(trans_SQDMULLB_zzx_d
, gen_helper_sve2_sqdmull_idx_d
, false)
4053 DO_SVE2_RRX_TB(trans_SQDMULLT_zzx_s
, gen_helper_sve2_sqdmull_idx_s
, true)
4054 DO_SVE2_RRX_TB(trans_SQDMULLT_zzx_d
, gen_helper_sve2_sqdmull_idx_d
, true)
4056 DO_SVE2_RRX_TB(trans_SMULLB_zzx_s
, gen_helper_sve2_smull_idx_s
, false)
4057 DO_SVE2_RRX_TB(trans_SMULLB_zzx_d
, gen_helper_sve2_smull_idx_d
, false)
4058 DO_SVE2_RRX_TB(trans_SMULLT_zzx_s
, gen_helper_sve2_smull_idx_s
, true)
4059 DO_SVE2_RRX_TB(trans_SMULLT_zzx_d
, gen_helper_sve2_smull_idx_d
, true)
4061 DO_SVE2_RRX_TB(trans_UMULLB_zzx_s
, gen_helper_sve2_umull_idx_s
, false)
4062 DO_SVE2_RRX_TB(trans_UMULLB_zzx_d
, gen_helper_sve2_umull_idx_d
, false)
4063 DO_SVE2_RRX_TB(trans_UMULLT_zzx_s
, gen_helper_sve2_umull_idx_s
, true)
4064 DO_SVE2_RRX_TB(trans_UMULLT_zzx_d
, gen_helper_sve2_umull_idx_d
, true)
4066 #undef DO_SVE2_RRX_TB
4068 static bool do_sve2_zzzz_data(DisasContext
*s
, int rd
, int rn
, int rm
, int ra
,
4069 int data
, gen_helper_gvec_4
*fn
)
4071 if (fn
== NULL
|| !dc_isar_feature(aa64_sve2
, s
)) {
4074 if (sve_access_check(s
)) {
4075 unsigned vsz
= vec_full_reg_size(s
);
4076 tcg_gen_gvec_4_ool(vec_full_reg_offset(s
, rd
),
4077 vec_full_reg_offset(s
, rn
),
4078 vec_full_reg_offset(s
, rm
),
4079 vec_full_reg_offset(s
, ra
),
4080 vsz
, vsz
, data
, fn
);
4085 #define DO_SVE2_RRXR(NAME, FUNC) \
4086 static bool NAME(DisasContext *s, arg_rrxr_esz *a) \
4087 { return do_sve2_zzzz_data(s, a->rd, a->rn, a->rm, a->ra, a->index, FUNC); }
4089 DO_SVE2_RRXR(trans_MLA_zzxz_h
, gen_helper_gvec_mla_idx_h
)
4090 DO_SVE2_RRXR(trans_MLA_zzxz_s
, gen_helper_gvec_mla_idx_s
)
4091 DO_SVE2_RRXR(trans_MLA_zzxz_d
, gen_helper_gvec_mla_idx_d
)
4093 DO_SVE2_RRXR(trans_MLS_zzxz_h
, gen_helper_gvec_mls_idx_h
)
4094 DO_SVE2_RRXR(trans_MLS_zzxz_s
, gen_helper_gvec_mls_idx_s
)
4095 DO_SVE2_RRXR(trans_MLS_zzxz_d
, gen_helper_gvec_mls_idx_d
)
4097 DO_SVE2_RRXR(trans_SQRDMLAH_zzxz_h
, gen_helper_sve2_sqrdmlah_idx_h
)
4098 DO_SVE2_RRXR(trans_SQRDMLAH_zzxz_s
, gen_helper_sve2_sqrdmlah_idx_s
)
4099 DO_SVE2_RRXR(trans_SQRDMLAH_zzxz_d
, gen_helper_sve2_sqrdmlah_idx_d
)
4101 DO_SVE2_RRXR(trans_SQRDMLSH_zzxz_h
, gen_helper_sve2_sqrdmlsh_idx_h
)
4102 DO_SVE2_RRXR(trans_SQRDMLSH_zzxz_s
, gen_helper_sve2_sqrdmlsh_idx_s
)
4103 DO_SVE2_RRXR(trans_SQRDMLSH_zzxz_d
, gen_helper_sve2_sqrdmlsh_idx_d
)
4107 #define DO_SVE2_RRXR_TB(NAME, FUNC, TOP) \
4108 static bool NAME(DisasContext *s, arg_rrxr_esz *a) \
4110 return do_sve2_zzzz_data(s, a->rd, a->rn, a->rm, a->rd, \
4111 (a->index << 1) | TOP, FUNC); \
4114 DO_SVE2_RRXR_TB(trans_SQDMLALB_zzxw_s
, gen_helper_sve2_sqdmlal_idx_s
, false)
4115 DO_SVE2_RRXR_TB(trans_SQDMLALB_zzxw_d
, gen_helper_sve2_sqdmlal_idx_d
, false)
4116 DO_SVE2_RRXR_TB(trans_SQDMLALT_zzxw_s
, gen_helper_sve2_sqdmlal_idx_s
, true)
4117 DO_SVE2_RRXR_TB(trans_SQDMLALT_zzxw_d
, gen_helper_sve2_sqdmlal_idx_d
, true)
4119 DO_SVE2_RRXR_TB(trans_SQDMLSLB_zzxw_s
, gen_helper_sve2_sqdmlsl_idx_s
, false)
4120 DO_SVE2_RRXR_TB(trans_SQDMLSLB_zzxw_d
, gen_helper_sve2_sqdmlsl_idx_d
, false)
4121 DO_SVE2_RRXR_TB(trans_SQDMLSLT_zzxw_s
, gen_helper_sve2_sqdmlsl_idx_s
, true)
4122 DO_SVE2_RRXR_TB(trans_SQDMLSLT_zzxw_d
, gen_helper_sve2_sqdmlsl_idx_d
, true)
4124 DO_SVE2_RRXR_TB(trans_SMLALB_zzxw_s
, gen_helper_sve2_smlal_idx_s
, false)
4125 DO_SVE2_RRXR_TB(trans_SMLALB_zzxw_d
, gen_helper_sve2_smlal_idx_d
, false)
4126 DO_SVE2_RRXR_TB(trans_SMLALT_zzxw_s
, gen_helper_sve2_smlal_idx_s
, true)
4127 DO_SVE2_RRXR_TB(trans_SMLALT_zzxw_d
, gen_helper_sve2_smlal_idx_d
, true)
4129 DO_SVE2_RRXR_TB(trans_UMLALB_zzxw_s
, gen_helper_sve2_umlal_idx_s
, false)
4130 DO_SVE2_RRXR_TB(trans_UMLALB_zzxw_d
, gen_helper_sve2_umlal_idx_d
, false)
4131 DO_SVE2_RRXR_TB(trans_UMLALT_zzxw_s
, gen_helper_sve2_umlal_idx_s
, true)
4132 DO_SVE2_RRXR_TB(trans_UMLALT_zzxw_d
, gen_helper_sve2_umlal_idx_d
, true)
4134 DO_SVE2_RRXR_TB(trans_SMLSLB_zzxw_s
, gen_helper_sve2_smlsl_idx_s
, false)
4135 DO_SVE2_RRXR_TB(trans_SMLSLB_zzxw_d
, gen_helper_sve2_smlsl_idx_d
, false)
4136 DO_SVE2_RRXR_TB(trans_SMLSLT_zzxw_s
, gen_helper_sve2_smlsl_idx_s
, true)
4137 DO_SVE2_RRXR_TB(trans_SMLSLT_zzxw_d
, gen_helper_sve2_smlsl_idx_d
, true)
4139 DO_SVE2_RRXR_TB(trans_UMLSLB_zzxw_s
, gen_helper_sve2_umlsl_idx_s
, false)
4140 DO_SVE2_RRXR_TB(trans_UMLSLB_zzxw_d
, gen_helper_sve2_umlsl_idx_d
, false)
4141 DO_SVE2_RRXR_TB(trans_UMLSLT_zzxw_s
, gen_helper_sve2_umlsl_idx_s
, true)
4142 DO_SVE2_RRXR_TB(trans_UMLSLT_zzxw_d
, gen_helper_sve2_umlsl_idx_d
, true)
4144 #undef DO_SVE2_RRXR_TB
4146 #define DO_SVE2_RRXR_ROT(NAME, FUNC) \
4147 static bool trans_##NAME(DisasContext *s, arg_##NAME *a) \
4149 return do_sve2_zzzz_data(s, a->rd, a->rn, a->rm, a->ra, \
4150 (a->index << 2) | a->rot, FUNC); \
4153 DO_SVE2_RRXR_ROT(CMLA_zzxz_h
, gen_helper_sve2_cmla_idx_h
)
4154 DO_SVE2_RRXR_ROT(CMLA_zzxz_s
, gen_helper_sve2_cmla_idx_s
)
4156 DO_SVE2_RRXR_ROT(SQRDCMLAH_zzxz_h
, gen_helper_sve2_sqrdcmlah_idx_h
)
4157 DO_SVE2_RRXR_ROT(SQRDCMLAH_zzxz_s
, gen_helper_sve2_sqrdcmlah_idx_s
)
4159 DO_SVE2_RRXR_ROT(CDOT_zzxw_s
, gen_helper_sve2_cdot_idx_s
)
4160 DO_SVE2_RRXR_ROT(CDOT_zzxw_d
, gen_helper_sve2_cdot_idx_d
)
4162 #undef DO_SVE2_RRXR_ROT
4165 *** SVE Floating Point Multiply-Add Indexed Group
4168 static bool do_FMLA_zzxz(DisasContext
*s
, arg_rrxr_esz
*a
, bool sub
)
4170 static gen_helper_gvec_4_ptr
* const fns
[3] = {
4171 gen_helper_gvec_fmla_idx_h
,
4172 gen_helper_gvec_fmla_idx_s
,
4173 gen_helper_gvec_fmla_idx_d
,
4176 if (sve_access_check(s
)) {
4177 unsigned vsz
= vec_full_reg_size(s
);
4178 TCGv_ptr status
= fpstatus_ptr(a
->esz
== MO_16
? FPST_FPCR_F16
: FPST_FPCR
);
4179 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s
, a
->rd
),
4180 vec_full_reg_offset(s
, a
->rn
),
4181 vec_full_reg_offset(s
, a
->rm
),
4182 vec_full_reg_offset(s
, a
->ra
),
4183 status
, vsz
, vsz
, (a
->index
<< 1) | sub
,
4185 tcg_temp_free_ptr(status
);
4190 static bool trans_FMLA_zzxz(DisasContext
*s
, arg_FMLA_zzxz
*a
)
4192 return do_FMLA_zzxz(s
, a
, false);
4195 static bool trans_FMLS_zzxz(DisasContext
*s
, arg_FMLA_zzxz
*a
)
4197 return do_FMLA_zzxz(s
, a
, true);
4201 *** SVE Floating Point Multiply Indexed Group
4204 static bool trans_FMUL_zzx(DisasContext
*s
, arg_FMUL_zzx
*a
)
4206 static gen_helper_gvec_3_ptr
* const fns
[3] = {
4207 gen_helper_gvec_fmul_idx_h
,
4208 gen_helper_gvec_fmul_idx_s
,
4209 gen_helper_gvec_fmul_idx_d
,
4212 if (sve_access_check(s
)) {
4213 unsigned vsz
= vec_full_reg_size(s
);
4214 TCGv_ptr status
= fpstatus_ptr(a
->esz
== MO_16
? FPST_FPCR_F16
: FPST_FPCR
);
4215 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s
, a
->rd
),
4216 vec_full_reg_offset(s
, a
->rn
),
4217 vec_full_reg_offset(s
, a
->rm
),
4218 status
, vsz
, vsz
, a
->index
, fns
[a
->esz
- 1]);
4219 tcg_temp_free_ptr(status
);
4225 *** SVE Floating Point Fast Reduction Group
4228 typedef void gen_helper_fp_reduce(TCGv_i64
, TCGv_ptr
, TCGv_ptr
,
4229 TCGv_ptr
, TCGv_i32
);
4231 static void do_reduce(DisasContext
*s
, arg_rpr_esz
*a
,
4232 gen_helper_fp_reduce
*fn
)
4234 unsigned vsz
= vec_full_reg_size(s
);
4235 unsigned p2vsz
= pow2ceil(vsz
);
4236 TCGv_i32 t_desc
= tcg_const_i32(simd_desc(vsz
, vsz
, p2vsz
));
4237 TCGv_ptr t_zn
, t_pg
, status
;
4240 temp
= tcg_temp_new_i64();
4241 t_zn
= tcg_temp_new_ptr();
4242 t_pg
= tcg_temp_new_ptr();
4244 tcg_gen_addi_ptr(t_zn
, cpu_env
, vec_full_reg_offset(s
, a
->rn
));
4245 tcg_gen_addi_ptr(t_pg
, cpu_env
, pred_full_reg_offset(s
, a
->pg
));
4246 status
= fpstatus_ptr(a
->esz
== MO_16
? FPST_FPCR_F16
: FPST_FPCR
);
4248 fn(temp
, t_zn
, t_pg
, status
, t_desc
);
4249 tcg_temp_free_ptr(t_zn
);
4250 tcg_temp_free_ptr(t_pg
);
4251 tcg_temp_free_ptr(status
);
4252 tcg_temp_free_i32(t_desc
);
4254 write_fp_dreg(s
, a
->rd
, temp
);
4255 tcg_temp_free_i64(temp
);
4258 #define DO_VPZ(NAME, name) \
4259 static bool trans_##NAME(DisasContext *s, arg_rpr_esz *a) \
4261 static gen_helper_fp_reduce * const fns[3] = { \
4262 gen_helper_sve_##name##_h, \
4263 gen_helper_sve_##name##_s, \
4264 gen_helper_sve_##name##_d, \
4266 if (a->esz == 0) { \
4269 if (sve_access_check(s)) { \
4270 do_reduce(s, a, fns[a->esz - 1]); \
4275 DO_VPZ(FADDV
, faddv
)
4276 DO_VPZ(FMINNMV
, fminnmv
)
4277 DO_VPZ(FMAXNMV
, fmaxnmv
)
4278 DO_VPZ(FMINV
, fminv
)
4279 DO_VPZ(FMAXV
, fmaxv
)
4282 *** SVE Floating Point Unary Operations - Unpredicated Group
4285 static void do_zz_fp(DisasContext
*s
, arg_rr_esz
*a
, gen_helper_gvec_2_ptr
*fn
)
4287 unsigned vsz
= vec_full_reg_size(s
);
4288 TCGv_ptr status
= fpstatus_ptr(a
->esz
== MO_16
? FPST_FPCR_F16
: FPST_FPCR
);
4290 tcg_gen_gvec_2_ptr(vec_full_reg_offset(s
, a
->rd
),
4291 vec_full_reg_offset(s
, a
->rn
),
4292 status
, vsz
, vsz
, 0, fn
);
4293 tcg_temp_free_ptr(status
);
4296 static bool trans_FRECPE(DisasContext
*s
, arg_rr_esz
*a
)
4298 static gen_helper_gvec_2_ptr
* const fns
[3] = {
4299 gen_helper_gvec_frecpe_h
,
4300 gen_helper_gvec_frecpe_s
,
4301 gen_helper_gvec_frecpe_d
,
4306 if (sve_access_check(s
)) {
4307 do_zz_fp(s
, a
, fns
[a
->esz
- 1]);
4312 static bool trans_FRSQRTE(DisasContext
*s
, arg_rr_esz
*a
)
4314 static gen_helper_gvec_2_ptr
* const fns
[3] = {
4315 gen_helper_gvec_frsqrte_h
,
4316 gen_helper_gvec_frsqrte_s
,
4317 gen_helper_gvec_frsqrte_d
,
4322 if (sve_access_check(s
)) {
4323 do_zz_fp(s
, a
, fns
[a
->esz
- 1]);
4329 *** SVE Floating Point Compare with Zero Group
4332 static void do_ppz_fp(DisasContext
*s
, arg_rpr_esz
*a
,
4333 gen_helper_gvec_3_ptr
*fn
)
4335 unsigned vsz
= vec_full_reg_size(s
);
4336 TCGv_ptr status
= fpstatus_ptr(a
->esz
== MO_16
? FPST_FPCR_F16
: FPST_FPCR
);
4338 tcg_gen_gvec_3_ptr(pred_full_reg_offset(s
, a
->rd
),
4339 vec_full_reg_offset(s
, a
->rn
),
4340 pred_full_reg_offset(s
, a
->pg
),
4341 status
, vsz
, vsz
, 0, fn
);
4342 tcg_temp_free_ptr(status
);
4345 #define DO_PPZ(NAME, name) \
4346 static bool trans_##NAME(DisasContext *s, arg_rpr_esz *a) \
4348 static gen_helper_gvec_3_ptr * const fns[3] = { \
4349 gen_helper_sve_##name##_h, \
4350 gen_helper_sve_##name##_s, \
4351 gen_helper_sve_##name##_d, \
4353 if (a->esz == 0) { \
4356 if (sve_access_check(s)) { \
4357 do_ppz_fp(s, a, fns[a->esz - 1]); \
4362 DO_PPZ(FCMGE_ppz0
, fcmge0
)
4363 DO_PPZ(FCMGT_ppz0
, fcmgt0
)
4364 DO_PPZ(FCMLE_ppz0
, fcmle0
)
4365 DO_PPZ(FCMLT_ppz0
, fcmlt0
)
4366 DO_PPZ(FCMEQ_ppz0
, fcmeq0
)
4367 DO_PPZ(FCMNE_ppz0
, fcmne0
)
4372 *** SVE floating-point trig multiply-add coefficient
4375 static bool trans_FTMAD(DisasContext
*s
, arg_FTMAD
*a
)
4377 static gen_helper_gvec_3_ptr
* const fns
[3] = {
4378 gen_helper_sve_ftmad_h
,
4379 gen_helper_sve_ftmad_s
,
4380 gen_helper_sve_ftmad_d
,
4386 if (sve_access_check(s
)) {
4387 unsigned vsz
= vec_full_reg_size(s
);
4388 TCGv_ptr status
= fpstatus_ptr(a
->esz
== MO_16
? FPST_FPCR_F16
: FPST_FPCR
);
4389 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s
, a
->rd
),
4390 vec_full_reg_offset(s
, a
->rn
),
4391 vec_full_reg_offset(s
, a
->rm
),
4392 status
, vsz
, vsz
, a
->imm
, fns
[a
->esz
- 1]);
4393 tcg_temp_free_ptr(status
);
4399 *** SVE Floating Point Accumulating Reduction Group
4402 static bool trans_FADDA(DisasContext
*s
, arg_rprr_esz
*a
)
4404 typedef void fadda_fn(TCGv_i64
, TCGv_i64
, TCGv_ptr
,
4405 TCGv_ptr
, TCGv_ptr
, TCGv_i32
);
4406 static fadda_fn
* const fns
[3] = {
4407 gen_helper_sve_fadda_h
,
4408 gen_helper_sve_fadda_s
,
4409 gen_helper_sve_fadda_d
,
4411 unsigned vsz
= vec_full_reg_size(s
);
4412 TCGv_ptr t_rm
, t_pg
, t_fpst
;
4419 if (!sve_access_check(s
)) {
4423 t_val
= load_esz(cpu_env
, vec_reg_offset(s
, a
->rn
, 0, a
->esz
), a
->esz
);
4424 t_rm
= tcg_temp_new_ptr();
4425 t_pg
= tcg_temp_new_ptr();
4426 tcg_gen_addi_ptr(t_rm
, cpu_env
, vec_full_reg_offset(s
, a
->rm
));
4427 tcg_gen_addi_ptr(t_pg
, cpu_env
, pred_full_reg_offset(s
, a
->pg
));
4428 t_fpst
= fpstatus_ptr(a
->esz
== MO_16
? FPST_FPCR_F16
: FPST_FPCR
);
4429 t_desc
= tcg_const_i32(simd_desc(vsz
, vsz
, 0));
4431 fns
[a
->esz
- 1](t_val
, t_val
, t_rm
, t_pg
, t_fpst
, t_desc
);
4433 tcg_temp_free_i32(t_desc
);
4434 tcg_temp_free_ptr(t_fpst
);
4435 tcg_temp_free_ptr(t_pg
);
4436 tcg_temp_free_ptr(t_rm
);
4438 write_fp_dreg(s
, a
->rd
, t_val
);
4439 tcg_temp_free_i64(t_val
);
4444 *** SVE Floating Point Arithmetic - Unpredicated Group
4447 static bool do_zzz_fp(DisasContext
*s
, arg_rrr_esz
*a
,
4448 gen_helper_gvec_3_ptr
*fn
)
4453 if (sve_access_check(s
)) {
4454 unsigned vsz
= vec_full_reg_size(s
);
4455 TCGv_ptr status
= fpstatus_ptr(a
->esz
== MO_16
? FPST_FPCR_F16
: FPST_FPCR
);
4456 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s
, a
->rd
),
4457 vec_full_reg_offset(s
, a
->rn
),
4458 vec_full_reg_offset(s
, a
->rm
),
4459 status
, vsz
, vsz
, 0, fn
);
4460 tcg_temp_free_ptr(status
);
4466 #define DO_FP3(NAME, name) \
4467 static bool trans_##NAME(DisasContext *s, arg_rrr_esz *a) \
4469 static gen_helper_gvec_3_ptr * const fns[4] = { \
4470 NULL, gen_helper_gvec_##name##_h, \
4471 gen_helper_gvec_##name##_s, gen_helper_gvec_##name##_d \
4473 return do_zzz_fp(s, a, fns[a->esz]); \
4476 DO_FP3(FADD_zzz
, fadd
)
4477 DO_FP3(FSUB_zzz
, fsub
)
4478 DO_FP3(FMUL_zzz
, fmul
)
4479 DO_FP3(FTSMUL
, ftsmul
)
4480 DO_FP3(FRECPS
, recps
)
4481 DO_FP3(FRSQRTS
, rsqrts
)
4486 *** SVE Floating Point Arithmetic - Predicated Group
4489 static bool do_zpzz_fp(DisasContext
*s
, arg_rprr_esz
*a
,
4490 gen_helper_gvec_4_ptr
*fn
)
4495 if (sve_access_check(s
)) {
4496 unsigned vsz
= vec_full_reg_size(s
);
4497 TCGv_ptr status
= fpstatus_ptr(a
->esz
== MO_16
? FPST_FPCR_F16
: FPST_FPCR
);
4498 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s
, a
->rd
),
4499 vec_full_reg_offset(s
, a
->rn
),
4500 vec_full_reg_offset(s
, a
->rm
),
4501 pred_full_reg_offset(s
, a
->pg
),
4502 status
, vsz
, vsz
, 0, fn
);
4503 tcg_temp_free_ptr(status
);
4508 #define DO_FP3(NAME, name) \
4509 static bool trans_##NAME(DisasContext *s, arg_rprr_esz *a) \
4511 static gen_helper_gvec_4_ptr * const fns[4] = { \
4512 NULL, gen_helper_sve_##name##_h, \
4513 gen_helper_sve_##name##_s, gen_helper_sve_##name##_d \
4515 return do_zpzz_fp(s, a, fns[a->esz]); \
4518 DO_FP3(FADD_zpzz
, fadd
)
4519 DO_FP3(FSUB_zpzz
, fsub
)
4520 DO_FP3(FMUL_zpzz
, fmul
)
4521 DO_FP3(FMIN_zpzz
, fmin
)
4522 DO_FP3(FMAX_zpzz
, fmax
)
4523 DO_FP3(FMINNM_zpzz
, fminnum
)
4524 DO_FP3(FMAXNM_zpzz
, fmaxnum
)
4526 DO_FP3(FSCALE
, fscalbn
)
4528 DO_FP3(FMULX
, fmulx
)
4532 typedef void gen_helper_sve_fp2scalar(TCGv_ptr
, TCGv_ptr
, TCGv_ptr
,
4533 TCGv_i64
, TCGv_ptr
, TCGv_i32
);
4535 static void do_fp_scalar(DisasContext
*s
, int zd
, int zn
, int pg
, bool is_fp16
,
4536 TCGv_i64 scalar
, gen_helper_sve_fp2scalar
*fn
)
4538 unsigned vsz
= vec_full_reg_size(s
);
4539 TCGv_ptr t_zd
, t_zn
, t_pg
, status
;
4542 t_zd
= tcg_temp_new_ptr();
4543 t_zn
= tcg_temp_new_ptr();
4544 t_pg
= tcg_temp_new_ptr();
4545 tcg_gen_addi_ptr(t_zd
, cpu_env
, vec_full_reg_offset(s
, zd
));
4546 tcg_gen_addi_ptr(t_zn
, cpu_env
, vec_full_reg_offset(s
, zn
));
4547 tcg_gen_addi_ptr(t_pg
, cpu_env
, pred_full_reg_offset(s
, pg
));
4549 status
= fpstatus_ptr(is_fp16
? FPST_FPCR_F16
: FPST_FPCR
);
4550 desc
= tcg_const_i32(simd_desc(vsz
, vsz
, 0));
4551 fn(t_zd
, t_zn
, t_pg
, scalar
, status
, desc
);
4553 tcg_temp_free_i32(desc
);
4554 tcg_temp_free_ptr(status
);
4555 tcg_temp_free_ptr(t_pg
);
4556 tcg_temp_free_ptr(t_zn
);
4557 tcg_temp_free_ptr(t_zd
);
4560 static void do_fp_imm(DisasContext
*s
, arg_rpri_esz
*a
, uint64_t imm
,
4561 gen_helper_sve_fp2scalar
*fn
)
4563 TCGv_i64 temp
= tcg_const_i64(imm
);
4564 do_fp_scalar(s
, a
->rd
, a
->rn
, a
->pg
, a
->esz
== MO_16
, temp
, fn
);
4565 tcg_temp_free_i64(temp
);
4568 #define DO_FP_IMM(NAME, name, const0, const1) \
4569 static bool trans_##NAME##_zpzi(DisasContext *s, arg_rpri_esz *a) \
4571 static gen_helper_sve_fp2scalar * const fns[3] = { \
4572 gen_helper_sve_##name##_h, \
4573 gen_helper_sve_##name##_s, \
4574 gen_helper_sve_##name##_d \
4576 static uint64_t const val[3][2] = { \
4577 { float16_##const0, float16_##const1 }, \
4578 { float32_##const0, float32_##const1 }, \
4579 { float64_##const0, float64_##const1 }, \
4581 if (a->esz == 0) { \
4584 if (sve_access_check(s)) { \
4585 do_fp_imm(s, a, val[a->esz - 1][a->imm], fns[a->esz - 1]); \
4590 DO_FP_IMM(FADD
, fadds
, half
, one
)
4591 DO_FP_IMM(FSUB
, fsubs
, half
, one
)
4592 DO_FP_IMM(FMUL
, fmuls
, half
, two
)
4593 DO_FP_IMM(FSUBR
, fsubrs
, half
, one
)
4594 DO_FP_IMM(FMAXNM
, fmaxnms
, zero
, one
)
4595 DO_FP_IMM(FMINNM
, fminnms
, zero
, one
)
4596 DO_FP_IMM(FMAX
, fmaxs
, zero
, one
)
4597 DO_FP_IMM(FMIN
, fmins
, zero
, one
)
4601 static bool do_fp_cmp(DisasContext
*s
, arg_rprr_esz
*a
,
4602 gen_helper_gvec_4_ptr
*fn
)
4607 if (sve_access_check(s
)) {
4608 unsigned vsz
= vec_full_reg_size(s
);
4609 TCGv_ptr status
= fpstatus_ptr(a
->esz
== MO_16
? FPST_FPCR_F16
: FPST_FPCR
);
4610 tcg_gen_gvec_4_ptr(pred_full_reg_offset(s
, a
->rd
),
4611 vec_full_reg_offset(s
, a
->rn
),
4612 vec_full_reg_offset(s
, a
->rm
),
4613 pred_full_reg_offset(s
, a
->pg
),
4614 status
, vsz
, vsz
, 0, fn
);
4615 tcg_temp_free_ptr(status
);
4620 #define DO_FPCMP(NAME, name) \
4621 static bool trans_##NAME##_ppzz(DisasContext *s, arg_rprr_esz *a) \
4623 static gen_helper_gvec_4_ptr * const fns[4] = { \
4624 NULL, gen_helper_sve_##name##_h, \
4625 gen_helper_sve_##name##_s, gen_helper_sve_##name##_d \
4627 return do_fp_cmp(s, a, fns[a->esz]); \
4630 DO_FPCMP(FCMGE
, fcmge
)
4631 DO_FPCMP(FCMGT
, fcmgt
)
4632 DO_FPCMP(FCMEQ
, fcmeq
)
4633 DO_FPCMP(FCMNE
, fcmne
)
4634 DO_FPCMP(FCMUO
, fcmuo
)
4635 DO_FPCMP(FACGE
, facge
)
4636 DO_FPCMP(FACGT
, facgt
)
4640 static bool trans_FCADD(DisasContext
*s
, arg_FCADD
*a
)
4642 static gen_helper_gvec_4_ptr
* const fns
[3] = {
4643 gen_helper_sve_fcadd_h
,
4644 gen_helper_sve_fcadd_s
,
4645 gen_helper_sve_fcadd_d
4651 if (sve_access_check(s
)) {
4652 unsigned vsz
= vec_full_reg_size(s
);
4653 TCGv_ptr status
= fpstatus_ptr(a
->esz
== MO_16
? FPST_FPCR_F16
: FPST_FPCR
);
4654 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s
, a
->rd
),
4655 vec_full_reg_offset(s
, a
->rn
),
4656 vec_full_reg_offset(s
, a
->rm
),
4657 pred_full_reg_offset(s
, a
->pg
),
4658 status
, vsz
, vsz
, a
->rot
, fns
[a
->esz
- 1]);
4659 tcg_temp_free_ptr(status
);
4664 static bool do_fmla(DisasContext
*s
, arg_rprrr_esz
*a
,
4665 gen_helper_gvec_5_ptr
*fn
)
4670 if (sve_access_check(s
)) {
4671 unsigned vsz
= vec_full_reg_size(s
);
4672 TCGv_ptr status
= fpstatus_ptr(a
->esz
== MO_16
? FPST_FPCR_F16
: FPST_FPCR
);
4673 tcg_gen_gvec_5_ptr(vec_full_reg_offset(s
, a
->rd
),
4674 vec_full_reg_offset(s
, a
->rn
),
4675 vec_full_reg_offset(s
, a
->rm
),
4676 vec_full_reg_offset(s
, a
->ra
),
4677 pred_full_reg_offset(s
, a
->pg
),
4678 status
, vsz
, vsz
, 0, fn
);
4679 tcg_temp_free_ptr(status
);
4684 #define DO_FMLA(NAME, name) \
4685 static bool trans_##NAME(DisasContext *s, arg_rprrr_esz *a) \
4687 static gen_helper_gvec_5_ptr * const fns[4] = { \
4688 NULL, gen_helper_sve_##name##_h, \
4689 gen_helper_sve_##name##_s, gen_helper_sve_##name##_d \
4691 return do_fmla(s, a, fns[a->esz]); \
4694 DO_FMLA(FMLA_zpzzz
, fmla_zpzzz
)
4695 DO_FMLA(FMLS_zpzzz
, fmls_zpzzz
)
4696 DO_FMLA(FNMLA_zpzzz
, fnmla_zpzzz
)
4697 DO_FMLA(FNMLS_zpzzz
, fnmls_zpzzz
)
4701 static bool trans_FCMLA_zpzzz(DisasContext
*s
, arg_FCMLA_zpzzz
*a
)
4703 static gen_helper_gvec_5_ptr
* const fns
[4] = {
4705 gen_helper_sve_fcmla_zpzzz_h
,
4706 gen_helper_sve_fcmla_zpzzz_s
,
4707 gen_helper_sve_fcmla_zpzzz_d
,
4713 if (sve_access_check(s
)) {
4714 unsigned vsz
= vec_full_reg_size(s
);
4715 TCGv_ptr status
= fpstatus_ptr(a
->esz
== MO_16
? FPST_FPCR_F16
: FPST_FPCR
);
4716 tcg_gen_gvec_5_ptr(vec_full_reg_offset(s
, a
->rd
),
4717 vec_full_reg_offset(s
, a
->rn
),
4718 vec_full_reg_offset(s
, a
->rm
),
4719 vec_full_reg_offset(s
, a
->ra
),
4720 pred_full_reg_offset(s
, a
->pg
),
4721 status
, vsz
, vsz
, a
->rot
, fns
[a
->esz
]);
4722 tcg_temp_free_ptr(status
);
4727 static bool trans_FCMLA_zzxz(DisasContext
*s
, arg_FCMLA_zzxz
*a
)
4729 static gen_helper_gvec_4_ptr
* const fns
[2] = {
4730 gen_helper_gvec_fcmlah_idx
,
4731 gen_helper_gvec_fcmlas_idx
,
4734 tcg_debug_assert(a
->esz
== 1 || a
->esz
== 2);
4735 tcg_debug_assert(a
->rd
== a
->ra
);
4736 if (sve_access_check(s
)) {
4737 unsigned vsz
= vec_full_reg_size(s
);
4738 TCGv_ptr status
= fpstatus_ptr(a
->esz
== MO_16
? FPST_FPCR_F16
: FPST_FPCR
);
4739 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s
, a
->rd
),
4740 vec_full_reg_offset(s
, a
->rn
),
4741 vec_full_reg_offset(s
, a
->rm
),
4742 vec_full_reg_offset(s
, a
->ra
),
4744 a
->index
* 4 + a
->rot
,
4746 tcg_temp_free_ptr(status
);
4752 *** SVE Floating Point Unary Operations Predicated Group
4755 static bool do_zpz_ptr(DisasContext
*s
, int rd
, int rn
, int pg
,
4756 bool is_fp16
, gen_helper_gvec_3_ptr
*fn
)
4758 if (sve_access_check(s
)) {
4759 unsigned vsz
= vec_full_reg_size(s
);
4760 TCGv_ptr status
= fpstatus_ptr(is_fp16
? FPST_FPCR_F16
: FPST_FPCR
);
4761 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s
, rd
),
4762 vec_full_reg_offset(s
, rn
),
4763 pred_full_reg_offset(s
, pg
),
4764 status
, vsz
, vsz
, 0, fn
);
4765 tcg_temp_free_ptr(status
);
4770 static bool trans_FCVT_sh(DisasContext
*s
, arg_rpr_esz
*a
)
4772 return do_zpz_ptr(s
, a
->rd
, a
->rn
, a
->pg
, false, gen_helper_sve_fcvt_sh
);
4775 static bool trans_FCVT_hs(DisasContext
*s
, arg_rpr_esz
*a
)
4777 return do_zpz_ptr(s
, a
->rd
, a
->rn
, a
->pg
, false, gen_helper_sve_fcvt_hs
);
4780 static bool trans_BFCVT(DisasContext
*s
, arg_rpr_esz
*a
)
4782 if (!dc_isar_feature(aa64_sve_bf16
, s
)) {
4785 return do_zpz_ptr(s
, a
->rd
, a
->rn
, a
->pg
, false, gen_helper_sve_bfcvt
);
4788 static bool trans_FCVT_dh(DisasContext
*s
, arg_rpr_esz
*a
)
4790 return do_zpz_ptr(s
, a
->rd
, a
->rn
, a
->pg
, false, gen_helper_sve_fcvt_dh
);
4793 static bool trans_FCVT_hd(DisasContext
*s
, arg_rpr_esz
*a
)
4795 return do_zpz_ptr(s
, a
->rd
, a
->rn
, a
->pg
, false, gen_helper_sve_fcvt_hd
);
4798 static bool trans_FCVT_ds(DisasContext
*s
, arg_rpr_esz
*a
)
4800 return do_zpz_ptr(s
, a
->rd
, a
->rn
, a
->pg
, false, gen_helper_sve_fcvt_ds
);
4803 static bool trans_FCVT_sd(DisasContext
*s
, arg_rpr_esz
*a
)
4805 return do_zpz_ptr(s
, a
->rd
, a
->rn
, a
->pg
, false, gen_helper_sve_fcvt_sd
);
4808 static bool trans_FCVTZS_hh(DisasContext
*s
, arg_rpr_esz
*a
)
4810 return do_zpz_ptr(s
, a
->rd
, a
->rn
, a
->pg
, true, gen_helper_sve_fcvtzs_hh
);
4813 static bool trans_FCVTZU_hh(DisasContext
*s
, arg_rpr_esz
*a
)
4815 return do_zpz_ptr(s
, a
->rd
, a
->rn
, a
->pg
, true, gen_helper_sve_fcvtzu_hh
);
4818 static bool trans_FCVTZS_hs(DisasContext
*s
, arg_rpr_esz
*a
)
4820 return do_zpz_ptr(s
, a
->rd
, a
->rn
, a
->pg
, true, gen_helper_sve_fcvtzs_hs
);
4823 static bool trans_FCVTZU_hs(DisasContext
*s
, arg_rpr_esz
*a
)
4825 return do_zpz_ptr(s
, a
->rd
, a
->rn
, a
->pg
, true, gen_helper_sve_fcvtzu_hs
);
4828 static bool trans_FCVTZS_hd(DisasContext
*s
, arg_rpr_esz
*a
)
4830 return do_zpz_ptr(s
, a
->rd
, a
->rn
, a
->pg
, true, gen_helper_sve_fcvtzs_hd
);
4833 static bool trans_FCVTZU_hd(DisasContext
*s
, arg_rpr_esz
*a
)
4835 return do_zpz_ptr(s
, a
->rd
, a
->rn
, a
->pg
, true, gen_helper_sve_fcvtzu_hd
);
4838 static bool trans_FCVTZS_ss(DisasContext
*s
, arg_rpr_esz
*a
)
4840 return do_zpz_ptr(s
, a
->rd
, a
->rn
, a
->pg
, false, gen_helper_sve_fcvtzs_ss
);
4843 static bool trans_FCVTZU_ss(DisasContext
*s
, arg_rpr_esz
*a
)
4845 return do_zpz_ptr(s
, a
->rd
, a
->rn
, a
->pg
, false, gen_helper_sve_fcvtzu_ss
);
4848 static bool trans_FCVTZS_sd(DisasContext
*s
, arg_rpr_esz
*a
)
4850 return do_zpz_ptr(s
, a
->rd
, a
->rn
, a
->pg
, false, gen_helper_sve_fcvtzs_sd
);
4853 static bool trans_FCVTZU_sd(DisasContext
*s
, arg_rpr_esz
*a
)
4855 return do_zpz_ptr(s
, a
->rd
, a
->rn
, a
->pg
, false, gen_helper_sve_fcvtzu_sd
);
4858 static bool trans_FCVTZS_ds(DisasContext
*s
, arg_rpr_esz
*a
)
4860 return do_zpz_ptr(s
, a
->rd
, a
->rn
, a
->pg
, false, gen_helper_sve_fcvtzs_ds
);
4863 static bool trans_FCVTZU_ds(DisasContext
*s
, arg_rpr_esz
*a
)
4865 return do_zpz_ptr(s
, a
->rd
, a
->rn
, a
->pg
, false, gen_helper_sve_fcvtzu_ds
);
4868 static bool trans_FCVTZS_dd(DisasContext
*s
, arg_rpr_esz
*a
)
4870 return do_zpz_ptr(s
, a
->rd
, a
->rn
, a
->pg
, false, gen_helper_sve_fcvtzs_dd
);
4873 static bool trans_FCVTZU_dd(DisasContext
*s
, arg_rpr_esz
*a
)
4875 return do_zpz_ptr(s
, a
->rd
, a
->rn
, a
->pg
, false, gen_helper_sve_fcvtzu_dd
);
4878 static gen_helper_gvec_3_ptr
* const frint_fns
[3] = {
4879 gen_helper_sve_frint_h
,
4880 gen_helper_sve_frint_s
,
4881 gen_helper_sve_frint_d
4884 static bool trans_FRINTI(DisasContext
*s
, arg_rpr_esz
*a
)
4889 return do_zpz_ptr(s
, a
->rd
, a
->rn
, a
->pg
, a
->esz
== MO_16
,
4890 frint_fns
[a
->esz
- 1]);
4893 static bool trans_FRINTX(DisasContext
*s
, arg_rpr_esz
*a
)
4895 static gen_helper_gvec_3_ptr
* const fns
[3] = {
4896 gen_helper_sve_frintx_h
,
4897 gen_helper_sve_frintx_s
,
4898 gen_helper_sve_frintx_d
4903 return do_zpz_ptr(s
, a
->rd
, a
->rn
, a
->pg
, a
->esz
== MO_16
, fns
[a
->esz
- 1]);
4906 static bool do_frint_mode(DisasContext
*s
, arg_rpr_esz
*a
,
4907 int mode
, gen_helper_gvec_3_ptr
*fn
)
4909 if (sve_access_check(s
)) {
4910 unsigned vsz
= vec_full_reg_size(s
);
4911 TCGv_i32 tmode
= tcg_const_i32(mode
);
4912 TCGv_ptr status
= fpstatus_ptr(a
->esz
== MO_16
? FPST_FPCR_F16
: FPST_FPCR
);
4914 gen_helper_set_rmode(tmode
, tmode
, status
);
4916 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s
, a
->rd
),
4917 vec_full_reg_offset(s
, a
->rn
),
4918 pred_full_reg_offset(s
, a
->pg
),
4919 status
, vsz
, vsz
, 0, fn
);
4921 gen_helper_set_rmode(tmode
, tmode
, status
);
4922 tcg_temp_free_i32(tmode
);
4923 tcg_temp_free_ptr(status
);
4928 static bool trans_FRINTN(DisasContext
*s
, arg_rpr_esz
*a
)
4933 return do_frint_mode(s
, a
, float_round_nearest_even
, frint_fns
[a
->esz
- 1]);
4936 static bool trans_FRINTP(DisasContext
*s
, arg_rpr_esz
*a
)
4941 return do_frint_mode(s
, a
, float_round_up
, frint_fns
[a
->esz
- 1]);
4944 static bool trans_FRINTM(DisasContext
*s
, arg_rpr_esz
*a
)
4949 return do_frint_mode(s
, a
, float_round_down
, frint_fns
[a
->esz
- 1]);
4952 static bool trans_FRINTZ(DisasContext
*s
, arg_rpr_esz
*a
)
4957 return do_frint_mode(s
, a
, float_round_to_zero
, frint_fns
[a
->esz
- 1]);
4960 static bool trans_FRINTA(DisasContext
*s
, arg_rpr_esz
*a
)
4965 return do_frint_mode(s
, a
, float_round_ties_away
, frint_fns
[a
->esz
- 1]);
4968 static bool trans_FRECPX(DisasContext
*s
, arg_rpr_esz
*a
)
4970 static gen_helper_gvec_3_ptr
* const fns
[3] = {
4971 gen_helper_sve_frecpx_h
,
4972 gen_helper_sve_frecpx_s
,
4973 gen_helper_sve_frecpx_d
4978 return do_zpz_ptr(s
, a
->rd
, a
->rn
, a
->pg
, a
->esz
== MO_16
, fns
[a
->esz
- 1]);
4981 static bool trans_FSQRT(DisasContext
*s
, arg_rpr_esz
*a
)
4983 static gen_helper_gvec_3_ptr
* const fns
[3] = {
4984 gen_helper_sve_fsqrt_h
,
4985 gen_helper_sve_fsqrt_s
,
4986 gen_helper_sve_fsqrt_d
4991 return do_zpz_ptr(s
, a
->rd
, a
->rn
, a
->pg
, a
->esz
== MO_16
, fns
[a
->esz
- 1]);
4994 static bool trans_SCVTF_hh(DisasContext
*s
, arg_rpr_esz
*a
)
4996 return do_zpz_ptr(s
, a
->rd
, a
->rn
, a
->pg
, true, gen_helper_sve_scvt_hh
);
4999 static bool trans_SCVTF_sh(DisasContext
*s
, arg_rpr_esz
*a
)
5001 return do_zpz_ptr(s
, a
->rd
, a
->rn
, a
->pg
, true, gen_helper_sve_scvt_sh
);
5004 static bool trans_SCVTF_dh(DisasContext
*s
, arg_rpr_esz
*a
)
5006 return do_zpz_ptr(s
, a
->rd
, a
->rn
, a
->pg
, true, gen_helper_sve_scvt_dh
);
5009 static bool trans_SCVTF_ss(DisasContext
*s
, arg_rpr_esz
*a
)
5011 return do_zpz_ptr(s
, a
->rd
, a
->rn
, a
->pg
, false, gen_helper_sve_scvt_ss
);
5014 static bool trans_SCVTF_ds(DisasContext
*s
, arg_rpr_esz
*a
)
5016 return do_zpz_ptr(s
, a
->rd
, a
->rn
, a
->pg
, false, gen_helper_sve_scvt_ds
);
5019 static bool trans_SCVTF_sd(DisasContext
*s
, arg_rpr_esz
*a
)
5021 return do_zpz_ptr(s
, a
->rd
, a
->rn
, a
->pg
, false, gen_helper_sve_scvt_sd
);
5024 static bool trans_SCVTF_dd(DisasContext
*s
, arg_rpr_esz
*a
)
5026 return do_zpz_ptr(s
, a
->rd
, a
->rn
, a
->pg
, false, gen_helper_sve_scvt_dd
);
5029 static bool trans_UCVTF_hh(DisasContext
*s
, arg_rpr_esz
*a
)
5031 return do_zpz_ptr(s
, a
->rd
, a
->rn
, a
->pg
, true, gen_helper_sve_ucvt_hh
);
5034 static bool trans_UCVTF_sh(DisasContext
*s
, arg_rpr_esz
*a
)
5036 return do_zpz_ptr(s
, a
->rd
, a
->rn
, a
->pg
, true, gen_helper_sve_ucvt_sh
);
5039 static bool trans_UCVTF_dh(DisasContext
*s
, arg_rpr_esz
*a
)
5041 return do_zpz_ptr(s
, a
->rd
, a
->rn
, a
->pg
, true, gen_helper_sve_ucvt_dh
);
5044 static bool trans_UCVTF_ss(DisasContext
*s
, arg_rpr_esz
*a
)
5046 return do_zpz_ptr(s
, a
->rd
, a
->rn
, a
->pg
, false, gen_helper_sve_ucvt_ss
);
5049 static bool trans_UCVTF_ds(DisasContext
*s
, arg_rpr_esz
*a
)
5051 return do_zpz_ptr(s
, a
->rd
, a
->rn
, a
->pg
, false, gen_helper_sve_ucvt_ds
);
5054 static bool trans_UCVTF_sd(DisasContext
*s
, arg_rpr_esz
*a
)
5056 return do_zpz_ptr(s
, a
->rd
, a
->rn
, a
->pg
, false, gen_helper_sve_ucvt_sd
);
5059 static bool trans_UCVTF_dd(DisasContext
*s
, arg_rpr_esz
*a
)
5061 return do_zpz_ptr(s
, a
->rd
, a
->rn
, a
->pg
, false, gen_helper_sve_ucvt_dd
);
5065 *** SVE Memory - 32-bit Gather and Unsized Contiguous Group
5068 /* Subroutine loading a vector register at VOFS of LEN bytes.
5069 * The load should begin at the address Rn + IMM.
5072 static void do_ldr(DisasContext
*s
, uint32_t vofs
, int len
, int rn
, int imm
)
5074 int len_align
= QEMU_ALIGN_DOWN(len
, 8);
5075 int len_remain
= len
% 8;
5076 int nparts
= len
/ 8 + ctpop8(len_remain
);
5077 int midx
= get_mem_index(s
);
5078 TCGv_i64 dirty_addr
, clean_addr
, t0
, t1
;
5080 dirty_addr
= tcg_temp_new_i64();
5081 tcg_gen_addi_i64(dirty_addr
, cpu_reg_sp(s
, rn
), imm
);
5082 clean_addr
= gen_mte_checkN(s
, dirty_addr
, false, rn
!= 31, len
);
5083 tcg_temp_free_i64(dirty_addr
);
5086 * Note that unpredicated load/store of vector/predicate registers
5087 * are defined as a stream of bytes, which equates to little-endian
5088 * operations on larger quantities.
5089 * Attempt to keep code expansion to a minimum by limiting the
5090 * amount of unrolling done.
5095 t0
= tcg_temp_new_i64();
5096 for (i
= 0; i
< len_align
; i
+= 8) {
5097 tcg_gen_qemu_ld_i64(t0
, clean_addr
, midx
, MO_LEQ
);
5098 tcg_gen_st_i64(t0
, cpu_env
, vofs
+ i
);
5099 tcg_gen_addi_i64(clean_addr
, clean_addr
, 8);
5101 tcg_temp_free_i64(t0
);
5103 TCGLabel
*loop
= gen_new_label();
5104 TCGv_ptr tp
, i
= tcg_const_local_ptr(0);
5106 /* Copy the clean address into a local temp, live across the loop. */
5108 clean_addr
= new_tmp_a64_local(s
);
5109 tcg_gen_mov_i64(clean_addr
, t0
);
5111 gen_set_label(loop
);
5113 t0
= tcg_temp_new_i64();
5114 tcg_gen_qemu_ld_i64(t0
, clean_addr
, midx
, MO_LEQ
);
5115 tcg_gen_addi_i64(clean_addr
, clean_addr
, 8);
5117 tp
= tcg_temp_new_ptr();
5118 tcg_gen_add_ptr(tp
, cpu_env
, i
);
5119 tcg_gen_addi_ptr(i
, i
, 8);
5120 tcg_gen_st_i64(t0
, tp
, vofs
);
5121 tcg_temp_free_ptr(tp
);
5122 tcg_temp_free_i64(t0
);
5124 tcg_gen_brcondi_ptr(TCG_COND_LTU
, i
, len_align
, loop
);
5125 tcg_temp_free_ptr(i
);
5129 * Predicate register loads can be any multiple of 2.
5130 * Note that we still store the entire 64-bit unit into cpu_env.
5133 t0
= tcg_temp_new_i64();
5134 switch (len_remain
) {
5138 tcg_gen_qemu_ld_i64(t0
, clean_addr
, midx
,
5139 MO_LE
| ctz32(len_remain
));
5143 t1
= tcg_temp_new_i64();
5144 tcg_gen_qemu_ld_i64(t0
, clean_addr
, midx
, MO_LEUL
);
5145 tcg_gen_addi_i64(clean_addr
, clean_addr
, 4);
5146 tcg_gen_qemu_ld_i64(t1
, clean_addr
, midx
, MO_LEUW
);
5147 tcg_gen_deposit_i64(t0
, t0
, t1
, 32, 32);
5148 tcg_temp_free_i64(t1
);
5152 g_assert_not_reached();
5154 tcg_gen_st_i64(t0
, cpu_env
, vofs
+ len_align
);
5155 tcg_temp_free_i64(t0
);
5159 /* Similarly for stores. */
5160 static void do_str(DisasContext
*s
, uint32_t vofs
, int len
, int rn
, int imm
)
5162 int len_align
= QEMU_ALIGN_DOWN(len
, 8);
5163 int len_remain
= len
% 8;
5164 int nparts
= len
/ 8 + ctpop8(len_remain
);
5165 int midx
= get_mem_index(s
);
5166 TCGv_i64 dirty_addr
, clean_addr
, t0
;
5168 dirty_addr
= tcg_temp_new_i64();
5169 tcg_gen_addi_i64(dirty_addr
, cpu_reg_sp(s
, rn
), imm
);
5170 clean_addr
= gen_mte_checkN(s
, dirty_addr
, false, rn
!= 31, len
);
5171 tcg_temp_free_i64(dirty_addr
);
5173 /* Note that unpredicated load/store of vector/predicate registers
5174 * are defined as a stream of bytes, which equates to little-endian
5175 * operations on larger quantities. There is no nice way to force
5176 * a little-endian store for aarch64_be-linux-user out of line.
5178 * Attempt to keep code expansion to a minimum by limiting the
5179 * amount of unrolling done.
5184 t0
= tcg_temp_new_i64();
5185 for (i
= 0; i
< len_align
; i
+= 8) {
5186 tcg_gen_ld_i64(t0
, cpu_env
, vofs
+ i
);
5187 tcg_gen_qemu_st_i64(t0
, clean_addr
, midx
, MO_LEQ
);
5188 tcg_gen_addi_i64(clean_addr
, clean_addr
, 8);
5190 tcg_temp_free_i64(t0
);
5192 TCGLabel
*loop
= gen_new_label();
5193 TCGv_ptr tp
, i
= tcg_const_local_ptr(0);
5195 /* Copy the clean address into a local temp, live across the loop. */
5197 clean_addr
= new_tmp_a64_local(s
);
5198 tcg_gen_mov_i64(clean_addr
, t0
);
5200 gen_set_label(loop
);
5202 t0
= tcg_temp_new_i64();
5203 tp
= tcg_temp_new_ptr();
5204 tcg_gen_add_ptr(tp
, cpu_env
, i
);
5205 tcg_gen_ld_i64(t0
, tp
, vofs
);
5206 tcg_gen_addi_ptr(i
, i
, 8);
5207 tcg_temp_free_ptr(tp
);
5209 tcg_gen_qemu_st_i64(t0
, clean_addr
, midx
, MO_LEQ
);
5210 tcg_gen_addi_i64(clean_addr
, clean_addr
, 8);
5211 tcg_temp_free_i64(t0
);
5213 tcg_gen_brcondi_ptr(TCG_COND_LTU
, i
, len_align
, loop
);
5214 tcg_temp_free_ptr(i
);
5217 /* Predicate register stores can be any multiple of 2. */
5219 t0
= tcg_temp_new_i64();
5220 tcg_gen_ld_i64(t0
, cpu_env
, vofs
+ len_align
);
5222 switch (len_remain
) {
5226 tcg_gen_qemu_st_i64(t0
, clean_addr
, midx
,
5227 MO_LE
| ctz32(len_remain
));
5231 tcg_gen_qemu_st_i64(t0
, clean_addr
, midx
, MO_LEUL
);
5232 tcg_gen_addi_i64(clean_addr
, clean_addr
, 4);
5233 tcg_gen_shri_i64(t0
, t0
, 32);
5234 tcg_gen_qemu_st_i64(t0
, clean_addr
, midx
, MO_LEUW
);
5238 g_assert_not_reached();
5240 tcg_temp_free_i64(t0
);
5244 static bool trans_LDR_zri(DisasContext
*s
, arg_rri
*a
)
5246 if (sve_access_check(s
)) {
5247 int size
= vec_full_reg_size(s
);
5248 int off
= vec_full_reg_offset(s
, a
->rd
);
5249 do_ldr(s
, off
, size
, a
->rn
, a
->imm
* size
);
5254 static bool trans_LDR_pri(DisasContext
*s
, arg_rri
*a
)
5256 if (sve_access_check(s
)) {
5257 int size
= pred_full_reg_size(s
);
5258 int off
= pred_full_reg_offset(s
, a
->rd
);
5259 do_ldr(s
, off
, size
, a
->rn
, a
->imm
* size
);
5264 static bool trans_STR_zri(DisasContext
*s
, arg_rri
*a
)
5266 if (sve_access_check(s
)) {
5267 int size
= vec_full_reg_size(s
);
5268 int off
= vec_full_reg_offset(s
, a
->rd
);
5269 do_str(s
, off
, size
, a
->rn
, a
->imm
* size
);
5274 static bool trans_STR_pri(DisasContext
*s
, arg_rri
*a
)
5276 if (sve_access_check(s
)) {
5277 int size
= pred_full_reg_size(s
);
5278 int off
= pred_full_reg_offset(s
, a
->rd
);
5279 do_str(s
, off
, size
, a
->rn
, a
->imm
* size
);
5285 *** SVE Memory - Contiguous Load Group
5288 /* The memory mode of the dtype. */
5289 static const MemOp dtype_mop
[16] = {
5290 MO_UB
, MO_UB
, MO_UB
, MO_UB
,
5291 MO_SL
, MO_UW
, MO_UW
, MO_UW
,
5292 MO_SW
, MO_SW
, MO_UL
, MO_UL
,
5293 MO_SB
, MO_SB
, MO_SB
, MO_Q
5296 #define dtype_msz(x) (dtype_mop[x] & MO_SIZE)
5298 /* The vector element size of dtype. */
5299 static const uint8_t dtype_esz
[16] = {
5306 static void do_mem_zpa(DisasContext
*s
, int zt
, int pg
, TCGv_i64 addr
,
5307 int dtype
, uint32_t mte_n
, bool is_write
,
5308 gen_helper_gvec_mem
*fn
)
5310 unsigned vsz
= vec_full_reg_size(s
);
5316 * For e.g. LD4, there are not enough arguments to pass all 4
5317 * registers as pointers, so encode the regno into the data field.
5318 * For consistency, do this even for LD1.
5320 if (s
->mte_active
[0]) {
5321 int msz
= dtype_msz(dtype
);
5323 desc
= FIELD_DP32(desc
, MTEDESC
, MIDX
, get_mem_index(s
));
5324 desc
= FIELD_DP32(desc
, MTEDESC
, TBI
, s
->tbid
);
5325 desc
= FIELD_DP32(desc
, MTEDESC
, TCMA
, s
->tcma
);
5326 desc
= FIELD_DP32(desc
, MTEDESC
, WRITE
, is_write
);
5327 desc
= FIELD_DP32(desc
, MTEDESC
, SIZEM1
, (mte_n
<< msz
) - 1);
5328 desc
<<= SVE_MTEDESC_SHIFT
;
5330 addr
= clean_data_tbi(s
, addr
);
5333 desc
= simd_desc(vsz
, vsz
, zt
| desc
);
5334 t_desc
= tcg_const_i32(desc
);
5335 t_pg
= tcg_temp_new_ptr();
5337 tcg_gen_addi_ptr(t_pg
, cpu_env
, pred_full_reg_offset(s
, pg
));
5338 fn(cpu_env
, t_pg
, addr
, t_desc
);
5340 tcg_temp_free_ptr(t_pg
);
5341 tcg_temp_free_i32(t_desc
);
5344 /* Indexed by [mte][be][dtype][nreg] */
5345 static gen_helper_gvec_mem
* const ldr_fns
[2][2][16][4] = {
5346 { /* mte inactive, little-endian */
5347 { { gen_helper_sve_ld1bb_r
, gen_helper_sve_ld2bb_r
,
5348 gen_helper_sve_ld3bb_r
, gen_helper_sve_ld4bb_r
},
5349 { gen_helper_sve_ld1bhu_r
, NULL
, NULL
, NULL
},
5350 { gen_helper_sve_ld1bsu_r
, NULL
, NULL
, NULL
},
5351 { gen_helper_sve_ld1bdu_r
, NULL
, NULL
, NULL
},
5353 { gen_helper_sve_ld1sds_le_r
, NULL
, NULL
, NULL
},
5354 { gen_helper_sve_ld1hh_le_r
, gen_helper_sve_ld2hh_le_r
,
5355 gen_helper_sve_ld3hh_le_r
, gen_helper_sve_ld4hh_le_r
},
5356 { gen_helper_sve_ld1hsu_le_r
, NULL
, NULL
, NULL
},
5357 { gen_helper_sve_ld1hdu_le_r
, NULL
, NULL
, NULL
},
5359 { gen_helper_sve_ld1hds_le_r
, NULL
, NULL
, NULL
},
5360 { gen_helper_sve_ld1hss_le_r
, NULL
, NULL
, NULL
},
5361 { gen_helper_sve_ld1ss_le_r
, gen_helper_sve_ld2ss_le_r
,
5362 gen_helper_sve_ld3ss_le_r
, gen_helper_sve_ld4ss_le_r
},
5363 { gen_helper_sve_ld1sdu_le_r
, NULL
, NULL
, NULL
},
5365 { gen_helper_sve_ld1bds_r
, NULL
, NULL
, NULL
},
5366 { gen_helper_sve_ld1bss_r
, NULL
, NULL
, NULL
},
5367 { gen_helper_sve_ld1bhs_r
, NULL
, NULL
, NULL
},
5368 { gen_helper_sve_ld1dd_le_r
, gen_helper_sve_ld2dd_le_r
,
5369 gen_helper_sve_ld3dd_le_r
, gen_helper_sve_ld4dd_le_r
} },
5371 /* mte inactive, big-endian */
5372 { { gen_helper_sve_ld1bb_r
, gen_helper_sve_ld2bb_r
,
5373 gen_helper_sve_ld3bb_r
, gen_helper_sve_ld4bb_r
},
5374 { gen_helper_sve_ld1bhu_r
, NULL
, NULL
, NULL
},
5375 { gen_helper_sve_ld1bsu_r
, NULL
, NULL
, NULL
},
5376 { gen_helper_sve_ld1bdu_r
, NULL
, NULL
, NULL
},
5378 { gen_helper_sve_ld1sds_be_r
, NULL
, NULL
, NULL
},
5379 { gen_helper_sve_ld1hh_be_r
, gen_helper_sve_ld2hh_be_r
,
5380 gen_helper_sve_ld3hh_be_r
, gen_helper_sve_ld4hh_be_r
},
5381 { gen_helper_sve_ld1hsu_be_r
, NULL
, NULL
, NULL
},
5382 { gen_helper_sve_ld1hdu_be_r
, NULL
, NULL
, NULL
},
5384 { gen_helper_sve_ld1hds_be_r
, NULL
, NULL
, NULL
},
5385 { gen_helper_sve_ld1hss_be_r
, NULL
, NULL
, NULL
},
5386 { gen_helper_sve_ld1ss_be_r
, gen_helper_sve_ld2ss_be_r
,
5387 gen_helper_sve_ld3ss_be_r
, gen_helper_sve_ld4ss_be_r
},
5388 { gen_helper_sve_ld1sdu_be_r
, NULL
, NULL
, NULL
},
5390 { gen_helper_sve_ld1bds_r
, NULL
, NULL
, NULL
},
5391 { gen_helper_sve_ld1bss_r
, NULL
, NULL
, NULL
},
5392 { gen_helper_sve_ld1bhs_r
, NULL
, NULL
, NULL
},
5393 { gen_helper_sve_ld1dd_be_r
, gen_helper_sve_ld2dd_be_r
,
5394 gen_helper_sve_ld3dd_be_r
, gen_helper_sve_ld4dd_be_r
} } },
5396 { /* mte active, little-endian */
5397 { { gen_helper_sve_ld1bb_r_mte
,
5398 gen_helper_sve_ld2bb_r_mte
,
5399 gen_helper_sve_ld3bb_r_mte
,
5400 gen_helper_sve_ld4bb_r_mte
},
5401 { gen_helper_sve_ld1bhu_r_mte
, NULL
, NULL
, NULL
},
5402 { gen_helper_sve_ld1bsu_r_mte
, NULL
, NULL
, NULL
},
5403 { gen_helper_sve_ld1bdu_r_mte
, NULL
, NULL
, NULL
},
5405 { gen_helper_sve_ld1sds_le_r_mte
, NULL
, NULL
, NULL
},
5406 { gen_helper_sve_ld1hh_le_r_mte
,
5407 gen_helper_sve_ld2hh_le_r_mte
,
5408 gen_helper_sve_ld3hh_le_r_mte
,
5409 gen_helper_sve_ld4hh_le_r_mte
},
5410 { gen_helper_sve_ld1hsu_le_r_mte
, NULL
, NULL
, NULL
},
5411 { gen_helper_sve_ld1hdu_le_r_mte
, NULL
, NULL
, NULL
},
5413 { gen_helper_sve_ld1hds_le_r_mte
, NULL
, NULL
, NULL
},
5414 { gen_helper_sve_ld1hss_le_r_mte
, NULL
, NULL
, NULL
},
5415 { gen_helper_sve_ld1ss_le_r_mte
,
5416 gen_helper_sve_ld2ss_le_r_mte
,
5417 gen_helper_sve_ld3ss_le_r_mte
,
5418 gen_helper_sve_ld4ss_le_r_mte
},
5419 { gen_helper_sve_ld1sdu_le_r_mte
, NULL
, NULL
, NULL
},
5421 { gen_helper_sve_ld1bds_r_mte
, NULL
, NULL
, NULL
},
5422 { gen_helper_sve_ld1bss_r_mte
, NULL
, NULL
, NULL
},
5423 { gen_helper_sve_ld1bhs_r_mte
, NULL
, NULL
, NULL
},
5424 { gen_helper_sve_ld1dd_le_r_mte
,
5425 gen_helper_sve_ld2dd_le_r_mte
,
5426 gen_helper_sve_ld3dd_le_r_mte
,
5427 gen_helper_sve_ld4dd_le_r_mte
} },
5429 /* mte active, big-endian */
5430 { { gen_helper_sve_ld1bb_r_mte
,
5431 gen_helper_sve_ld2bb_r_mte
,
5432 gen_helper_sve_ld3bb_r_mte
,
5433 gen_helper_sve_ld4bb_r_mte
},
5434 { gen_helper_sve_ld1bhu_r_mte
, NULL
, NULL
, NULL
},
5435 { gen_helper_sve_ld1bsu_r_mte
, NULL
, NULL
, NULL
},
5436 { gen_helper_sve_ld1bdu_r_mte
, NULL
, NULL
, NULL
},
5438 { gen_helper_sve_ld1sds_be_r_mte
, NULL
, NULL
, NULL
},
5439 { gen_helper_sve_ld1hh_be_r_mte
,
5440 gen_helper_sve_ld2hh_be_r_mte
,
5441 gen_helper_sve_ld3hh_be_r_mte
,
5442 gen_helper_sve_ld4hh_be_r_mte
},
5443 { gen_helper_sve_ld1hsu_be_r_mte
, NULL
, NULL
, NULL
},
5444 { gen_helper_sve_ld1hdu_be_r_mte
, NULL
, NULL
, NULL
},
5446 { gen_helper_sve_ld1hds_be_r_mte
, NULL
, NULL
, NULL
},
5447 { gen_helper_sve_ld1hss_be_r_mte
, NULL
, NULL
, NULL
},
5448 { gen_helper_sve_ld1ss_be_r_mte
,
5449 gen_helper_sve_ld2ss_be_r_mte
,
5450 gen_helper_sve_ld3ss_be_r_mte
,
5451 gen_helper_sve_ld4ss_be_r_mte
},
5452 { gen_helper_sve_ld1sdu_be_r_mte
, NULL
, NULL
, NULL
},
5454 { gen_helper_sve_ld1bds_r_mte
, NULL
, NULL
, NULL
},
5455 { gen_helper_sve_ld1bss_r_mte
, NULL
, NULL
, NULL
},
5456 { gen_helper_sve_ld1bhs_r_mte
, NULL
, NULL
, NULL
},
5457 { gen_helper_sve_ld1dd_be_r_mte
,
5458 gen_helper_sve_ld2dd_be_r_mte
,
5459 gen_helper_sve_ld3dd_be_r_mte
,
5460 gen_helper_sve_ld4dd_be_r_mte
} } },
5463 static void do_ld_zpa(DisasContext
*s
, int zt
, int pg
,
5464 TCGv_i64 addr
, int dtype
, int nreg
)
5466 gen_helper_gvec_mem
*fn
5467 = ldr_fns
[s
->mte_active
[0]][s
->be_data
== MO_BE
][dtype
][nreg
];
5470 * While there are holes in the table, they are not
5471 * accessible via the instruction encoding.
5474 do_mem_zpa(s
, zt
, pg
, addr
, dtype
, nreg
, false, fn
);
5477 static bool trans_LD_zprr(DisasContext
*s
, arg_rprr_load
*a
)
5482 if (sve_access_check(s
)) {
5483 TCGv_i64 addr
= new_tmp_a64(s
);
5484 tcg_gen_shli_i64(addr
, cpu_reg(s
, a
->rm
), dtype_msz(a
->dtype
));
5485 tcg_gen_add_i64(addr
, addr
, cpu_reg_sp(s
, a
->rn
));
5486 do_ld_zpa(s
, a
->rd
, a
->pg
, addr
, a
->dtype
, a
->nreg
);
5491 static bool trans_LD_zpri(DisasContext
*s
, arg_rpri_load
*a
)
5493 if (sve_access_check(s
)) {
5494 int vsz
= vec_full_reg_size(s
);
5495 int elements
= vsz
>> dtype_esz
[a
->dtype
];
5496 TCGv_i64 addr
= new_tmp_a64(s
);
5498 tcg_gen_addi_i64(addr
, cpu_reg_sp(s
, a
->rn
),
5499 (a
->imm
* elements
* (a
->nreg
+ 1))
5500 << dtype_msz(a
->dtype
));
5501 do_ld_zpa(s
, a
->rd
, a
->pg
, addr
, a
->dtype
, a
->nreg
);
5506 static bool trans_LDFF1_zprr(DisasContext
*s
, arg_rprr_load
*a
)
5508 static gen_helper_gvec_mem
* const fns
[2][2][16] = {
5509 { /* mte inactive, little-endian */
5510 { gen_helper_sve_ldff1bb_r
,
5511 gen_helper_sve_ldff1bhu_r
,
5512 gen_helper_sve_ldff1bsu_r
,
5513 gen_helper_sve_ldff1bdu_r
,
5515 gen_helper_sve_ldff1sds_le_r
,
5516 gen_helper_sve_ldff1hh_le_r
,
5517 gen_helper_sve_ldff1hsu_le_r
,
5518 gen_helper_sve_ldff1hdu_le_r
,
5520 gen_helper_sve_ldff1hds_le_r
,
5521 gen_helper_sve_ldff1hss_le_r
,
5522 gen_helper_sve_ldff1ss_le_r
,
5523 gen_helper_sve_ldff1sdu_le_r
,
5525 gen_helper_sve_ldff1bds_r
,
5526 gen_helper_sve_ldff1bss_r
,
5527 gen_helper_sve_ldff1bhs_r
,
5528 gen_helper_sve_ldff1dd_le_r
},
5530 /* mte inactive, big-endian */
5531 { gen_helper_sve_ldff1bb_r
,
5532 gen_helper_sve_ldff1bhu_r
,
5533 gen_helper_sve_ldff1bsu_r
,
5534 gen_helper_sve_ldff1bdu_r
,
5536 gen_helper_sve_ldff1sds_be_r
,
5537 gen_helper_sve_ldff1hh_be_r
,
5538 gen_helper_sve_ldff1hsu_be_r
,
5539 gen_helper_sve_ldff1hdu_be_r
,
5541 gen_helper_sve_ldff1hds_be_r
,
5542 gen_helper_sve_ldff1hss_be_r
,
5543 gen_helper_sve_ldff1ss_be_r
,
5544 gen_helper_sve_ldff1sdu_be_r
,
5546 gen_helper_sve_ldff1bds_r
,
5547 gen_helper_sve_ldff1bss_r
,
5548 gen_helper_sve_ldff1bhs_r
,
5549 gen_helper_sve_ldff1dd_be_r
} },
5551 { /* mte active, little-endian */
5552 { gen_helper_sve_ldff1bb_r_mte
,
5553 gen_helper_sve_ldff1bhu_r_mte
,
5554 gen_helper_sve_ldff1bsu_r_mte
,
5555 gen_helper_sve_ldff1bdu_r_mte
,
5557 gen_helper_sve_ldff1sds_le_r_mte
,
5558 gen_helper_sve_ldff1hh_le_r_mte
,
5559 gen_helper_sve_ldff1hsu_le_r_mte
,
5560 gen_helper_sve_ldff1hdu_le_r_mte
,
5562 gen_helper_sve_ldff1hds_le_r_mte
,
5563 gen_helper_sve_ldff1hss_le_r_mte
,
5564 gen_helper_sve_ldff1ss_le_r_mte
,
5565 gen_helper_sve_ldff1sdu_le_r_mte
,
5567 gen_helper_sve_ldff1bds_r_mte
,
5568 gen_helper_sve_ldff1bss_r_mte
,
5569 gen_helper_sve_ldff1bhs_r_mte
,
5570 gen_helper_sve_ldff1dd_le_r_mte
},
5572 /* mte active, big-endian */
5573 { gen_helper_sve_ldff1bb_r_mte
,
5574 gen_helper_sve_ldff1bhu_r_mte
,
5575 gen_helper_sve_ldff1bsu_r_mte
,
5576 gen_helper_sve_ldff1bdu_r_mte
,
5578 gen_helper_sve_ldff1sds_be_r_mte
,
5579 gen_helper_sve_ldff1hh_be_r_mte
,
5580 gen_helper_sve_ldff1hsu_be_r_mte
,
5581 gen_helper_sve_ldff1hdu_be_r_mte
,
5583 gen_helper_sve_ldff1hds_be_r_mte
,
5584 gen_helper_sve_ldff1hss_be_r_mte
,
5585 gen_helper_sve_ldff1ss_be_r_mte
,
5586 gen_helper_sve_ldff1sdu_be_r_mte
,
5588 gen_helper_sve_ldff1bds_r_mte
,
5589 gen_helper_sve_ldff1bss_r_mte
,
5590 gen_helper_sve_ldff1bhs_r_mte
,
5591 gen_helper_sve_ldff1dd_be_r_mte
} },
5594 if (sve_access_check(s
)) {
5595 TCGv_i64 addr
= new_tmp_a64(s
);
5596 tcg_gen_shli_i64(addr
, cpu_reg(s
, a
->rm
), dtype_msz(a
->dtype
));
5597 tcg_gen_add_i64(addr
, addr
, cpu_reg_sp(s
, a
->rn
));
5598 do_mem_zpa(s
, a
->rd
, a
->pg
, addr
, a
->dtype
, 1, false,
5599 fns
[s
->mte_active
[0]][s
->be_data
== MO_BE
][a
->dtype
]);
5604 static bool trans_LDNF1_zpri(DisasContext
*s
, arg_rpri_load
*a
)
5606 static gen_helper_gvec_mem
* const fns
[2][2][16] = {
5607 { /* mte inactive, little-endian */
5608 { gen_helper_sve_ldnf1bb_r
,
5609 gen_helper_sve_ldnf1bhu_r
,
5610 gen_helper_sve_ldnf1bsu_r
,
5611 gen_helper_sve_ldnf1bdu_r
,
5613 gen_helper_sve_ldnf1sds_le_r
,
5614 gen_helper_sve_ldnf1hh_le_r
,
5615 gen_helper_sve_ldnf1hsu_le_r
,
5616 gen_helper_sve_ldnf1hdu_le_r
,
5618 gen_helper_sve_ldnf1hds_le_r
,
5619 gen_helper_sve_ldnf1hss_le_r
,
5620 gen_helper_sve_ldnf1ss_le_r
,
5621 gen_helper_sve_ldnf1sdu_le_r
,
5623 gen_helper_sve_ldnf1bds_r
,
5624 gen_helper_sve_ldnf1bss_r
,
5625 gen_helper_sve_ldnf1bhs_r
,
5626 gen_helper_sve_ldnf1dd_le_r
},
5628 /* mte inactive, big-endian */
5629 { gen_helper_sve_ldnf1bb_r
,
5630 gen_helper_sve_ldnf1bhu_r
,
5631 gen_helper_sve_ldnf1bsu_r
,
5632 gen_helper_sve_ldnf1bdu_r
,
5634 gen_helper_sve_ldnf1sds_be_r
,
5635 gen_helper_sve_ldnf1hh_be_r
,
5636 gen_helper_sve_ldnf1hsu_be_r
,
5637 gen_helper_sve_ldnf1hdu_be_r
,
5639 gen_helper_sve_ldnf1hds_be_r
,
5640 gen_helper_sve_ldnf1hss_be_r
,
5641 gen_helper_sve_ldnf1ss_be_r
,
5642 gen_helper_sve_ldnf1sdu_be_r
,
5644 gen_helper_sve_ldnf1bds_r
,
5645 gen_helper_sve_ldnf1bss_r
,
5646 gen_helper_sve_ldnf1bhs_r
,
5647 gen_helper_sve_ldnf1dd_be_r
} },
5649 { /* mte inactive, little-endian */
5650 { gen_helper_sve_ldnf1bb_r_mte
,
5651 gen_helper_sve_ldnf1bhu_r_mte
,
5652 gen_helper_sve_ldnf1bsu_r_mte
,
5653 gen_helper_sve_ldnf1bdu_r_mte
,
5655 gen_helper_sve_ldnf1sds_le_r_mte
,
5656 gen_helper_sve_ldnf1hh_le_r_mte
,
5657 gen_helper_sve_ldnf1hsu_le_r_mte
,
5658 gen_helper_sve_ldnf1hdu_le_r_mte
,
5660 gen_helper_sve_ldnf1hds_le_r_mte
,
5661 gen_helper_sve_ldnf1hss_le_r_mte
,
5662 gen_helper_sve_ldnf1ss_le_r_mte
,
5663 gen_helper_sve_ldnf1sdu_le_r_mte
,
5665 gen_helper_sve_ldnf1bds_r_mte
,
5666 gen_helper_sve_ldnf1bss_r_mte
,
5667 gen_helper_sve_ldnf1bhs_r_mte
,
5668 gen_helper_sve_ldnf1dd_le_r_mte
},
5670 /* mte inactive, big-endian */
5671 { gen_helper_sve_ldnf1bb_r_mte
,
5672 gen_helper_sve_ldnf1bhu_r_mte
,
5673 gen_helper_sve_ldnf1bsu_r_mte
,
5674 gen_helper_sve_ldnf1bdu_r_mte
,
5676 gen_helper_sve_ldnf1sds_be_r_mte
,
5677 gen_helper_sve_ldnf1hh_be_r_mte
,
5678 gen_helper_sve_ldnf1hsu_be_r_mte
,
5679 gen_helper_sve_ldnf1hdu_be_r_mte
,
5681 gen_helper_sve_ldnf1hds_be_r_mte
,
5682 gen_helper_sve_ldnf1hss_be_r_mte
,
5683 gen_helper_sve_ldnf1ss_be_r_mte
,
5684 gen_helper_sve_ldnf1sdu_be_r_mte
,
5686 gen_helper_sve_ldnf1bds_r_mte
,
5687 gen_helper_sve_ldnf1bss_r_mte
,
5688 gen_helper_sve_ldnf1bhs_r_mte
,
5689 gen_helper_sve_ldnf1dd_be_r_mte
} },
5692 if (sve_access_check(s
)) {
5693 int vsz
= vec_full_reg_size(s
);
5694 int elements
= vsz
>> dtype_esz
[a
->dtype
];
5695 int off
= (a
->imm
* elements
) << dtype_msz(a
->dtype
);
5696 TCGv_i64 addr
= new_tmp_a64(s
);
5698 tcg_gen_addi_i64(addr
, cpu_reg_sp(s
, a
->rn
), off
);
5699 do_mem_zpa(s
, a
->rd
, a
->pg
, addr
, a
->dtype
, 1, false,
5700 fns
[s
->mte_active
[0]][s
->be_data
== MO_BE
][a
->dtype
]);
5705 static void do_ldrq(DisasContext
*s
, int zt
, int pg
, TCGv_i64 addr
, int dtype
)
5707 unsigned vsz
= vec_full_reg_size(s
);
5711 /* Load the first quadword using the normal predicated load helpers. */
5712 poff
= pred_full_reg_offset(s
, pg
);
5715 * Zero-extend the first 16 bits of the predicate into a temporary.
5716 * This avoids triggering an assert making sure we don't have bits
5717 * set within a predicate beyond VQ, but we have lowered VQ to 1
5718 * for this load operation.
5720 TCGv_i64 tmp
= tcg_temp_new_i64();
5721 #ifdef HOST_WORDS_BIGENDIAN
5724 tcg_gen_ld16u_i64(tmp
, cpu_env
, poff
);
5726 poff
= offsetof(CPUARMState
, vfp
.preg_tmp
);
5727 tcg_gen_st_i64(tmp
, cpu_env
, poff
);
5728 tcg_temp_free_i64(tmp
);
5731 t_pg
= tcg_temp_new_ptr();
5732 tcg_gen_addi_ptr(t_pg
, cpu_env
, poff
);
5734 gen_helper_gvec_mem
*fn
5735 = ldr_fns
[s
->mte_active
[0]][s
->be_data
== MO_BE
][dtype
][0];
5736 fn(cpu_env
, t_pg
, addr
, tcg_constant_i32(simd_desc(16, 16, zt
)));
5738 tcg_temp_free_ptr(t_pg
);
5740 /* Replicate that first quadword. */
5742 int doff
= vec_full_reg_offset(s
, zt
);
5743 tcg_gen_gvec_dup_mem(4, doff
+ 16, doff
, vsz
- 16, vsz
- 16);
5747 static bool trans_LD1RQ_zprr(DisasContext
*s
, arg_rprr_load
*a
)
5752 if (sve_access_check(s
)) {
5753 int msz
= dtype_msz(a
->dtype
);
5754 TCGv_i64 addr
= new_tmp_a64(s
);
5755 tcg_gen_shli_i64(addr
, cpu_reg(s
, a
->rm
), msz
);
5756 tcg_gen_add_i64(addr
, addr
, cpu_reg_sp(s
, a
->rn
));
5757 do_ldrq(s
, a
->rd
, a
->pg
, addr
, a
->dtype
);
5762 static bool trans_LD1RQ_zpri(DisasContext
*s
, arg_rpri_load
*a
)
5764 if (sve_access_check(s
)) {
5765 TCGv_i64 addr
= new_tmp_a64(s
);
5766 tcg_gen_addi_i64(addr
, cpu_reg_sp(s
, a
->rn
), a
->imm
* 16);
5767 do_ldrq(s
, a
->rd
, a
->pg
, addr
, a
->dtype
);
5772 static void do_ldro(DisasContext
*s
, int zt
, int pg
, TCGv_i64 addr
, int dtype
)
5774 unsigned vsz
= vec_full_reg_size(s
);
5781 * Note that this UNDEFINED check comes after CheckSVEEnabled()
5782 * in the ARM pseudocode, which is the sve_access_check() done
5783 * in our caller. We should not now return false from the caller.
5785 unallocated_encoding(s
);
5789 /* Load the first octaword using the normal predicated load helpers. */
5791 poff
= pred_full_reg_offset(s
, pg
);
5794 * Zero-extend the first 32 bits of the predicate into a temporary.
5795 * This avoids triggering an assert making sure we don't have bits
5796 * set within a predicate beyond VQ, but we have lowered VQ to 2
5797 * for this load operation.
5799 TCGv_i64 tmp
= tcg_temp_new_i64();
5800 #ifdef HOST_WORDS_BIGENDIAN
5803 tcg_gen_ld32u_i64(tmp
, cpu_env
, poff
);
5805 poff
= offsetof(CPUARMState
, vfp
.preg_tmp
);
5806 tcg_gen_st_i64(tmp
, cpu_env
, poff
);
5807 tcg_temp_free_i64(tmp
);
5810 t_pg
= tcg_temp_new_ptr();
5811 tcg_gen_addi_ptr(t_pg
, cpu_env
, poff
);
5813 gen_helper_gvec_mem
*fn
5814 = ldr_fns
[s
->mte_active
[0]][s
->be_data
== MO_BE
][dtype
][0];
5815 fn(cpu_env
, t_pg
, addr
, tcg_constant_i32(simd_desc(32, 32, zt
)));
5817 tcg_temp_free_ptr(t_pg
);
5820 * Replicate that first octaword.
5821 * The replication happens in units of 32; if the full vector size
5822 * is not a multiple of 32, the final bits are zeroed.
5824 doff
= vec_full_reg_offset(s
, zt
);
5825 vsz_r32
= QEMU_ALIGN_DOWN(vsz
, 32);
5827 tcg_gen_gvec_dup_mem(5, doff
+ 32, doff
, vsz_r32
- 32, vsz_r32
- 32);
5831 tcg_gen_gvec_dup_imm(MO_64
, doff
+ vsz_r32
, vsz
, vsz
, 0);
5835 static bool trans_LD1RO_zprr(DisasContext
*s
, arg_rprr_load
*a
)
5837 if (!dc_isar_feature(aa64_sve_f64mm
, s
)) {
5843 if (sve_access_check(s
)) {
5844 TCGv_i64 addr
= new_tmp_a64(s
);
5845 tcg_gen_shli_i64(addr
, cpu_reg(s
, a
->rm
), dtype_msz(a
->dtype
));
5846 tcg_gen_add_i64(addr
, addr
, cpu_reg_sp(s
, a
->rn
));
5847 do_ldro(s
, a
->rd
, a
->pg
, addr
, a
->dtype
);
5852 static bool trans_LD1RO_zpri(DisasContext
*s
, arg_rpri_load
*a
)
5854 if (!dc_isar_feature(aa64_sve_f64mm
, s
)) {
5857 if (sve_access_check(s
)) {
5858 TCGv_i64 addr
= new_tmp_a64(s
);
5859 tcg_gen_addi_i64(addr
, cpu_reg_sp(s
, a
->rn
), a
->imm
* 32);
5860 do_ldro(s
, a
->rd
, a
->pg
, addr
, a
->dtype
);
5865 /* Load and broadcast element. */
5866 static bool trans_LD1R_zpri(DisasContext
*s
, arg_rpri_load
*a
)
5868 unsigned vsz
= vec_full_reg_size(s
);
5869 unsigned psz
= pred_full_reg_size(s
);
5870 unsigned esz
= dtype_esz
[a
->dtype
];
5871 unsigned msz
= dtype_msz(a
->dtype
);
5873 TCGv_i64 temp
, clean_addr
;
5875 if (!sve_access_check(s
)) {
5879 over
= gen_new_label();
5881 /* If the guarding predicate has no bits set, no load occurs. */
5883 /* Reduce the pred_esz_masks value simply to reduce the
5884 * size of the code generated here.
5886 uint64_t psz_mask
= MAKE_64BIT_MASK(0, psz
* 8);
5887 temp
= tcg_temp_new_i64();
5888 tcg_gen_ld_i64(temp
, cpu_env
, pred_full_reg_offset(s
, a
->pg
));
5889 tcg_gen_andi_i64(temp
, temp
, pred_esz_masks
[esz
] & psz_mask
);
5890 tcg_gen_brcondi_i64(TCG_COND_EQ
, temp
, 0, over
);
5891 tcg_temp_free_i64(temp
);
5893 TCGv_i32 t32
= tcg_temp_new_i32();
5894 find_last_active(s
, t32
, esz
, a
->pg
);
5895 tcg_gen_brcondi_i32(TCG_COND_LT
, t32
, 0, over
);
5896 tcg_temp_free_i32(t32
);
5899 /* Load the data. */
5900 temp
= tcg_temp_new_i64();
5901 tcg_gen_addi_i64(temp
, cpu_reg_sp(s
, a
->rn
), a
->imm
<< msz
);
5902 clean_addr
= gen_mte_check1(s
, temp
, false, true, msz
);
5904 tcg_gen_qemu_ld_i64(temp
, clean_addr
, get_mem_index(s
),
5905 finalize_memop(s
, dtype_mop
[a
->dtype
]));
5907 /* Broadcast to *all* elements. */
5908 tcg_gen_gvec_dup_i64(esz
, vec_full_reg_offset(s
, a
->rd
),
5910 tcg_temp_free_i64(temp
);
5912 /* Zero the inactive elements. */
5913 gen_set_label(over
);
5914 return do_movz_zpz(s
, a
->rd
, a
->rd
, a
->pg
, esz
, false);
5917 static void do_st_zpa(DisasContext
*s
, int zt
, int pg
, TCGv_i64 addr
,
5918 int msz
, int esz
, int nreg
)
5920 static gen_helper_gvec_mem
* const fn_single
[2][2][4][4] = {
5921 { { { gen_helper_sve_st1bb_r
,
5922 gen_helper_sve_st1bh_r
,
5923 gen_helper_sve_st1bs_r
,
5924 gen_helper_sve_st1bd_r
},
5926 gen_helper_sve_st1hh_le_r
,
5927 gen_helper_sve_st1hs_le_r
,
5928 gen_helper_sve_st1hd_le_r
},
5930 gen_helper_sve_st1ss_le_r
,
5931 gen_helper_sve_st1sd_le_r
},
5933 gen_helper_sve_st1dd_le_r
} },
5934 { { gen_helper_sve_st1bb_r
,
5935 gen_helper_sve_st1bh_r
,
5936 gen_helper_sve_st1bs_r
,
5937 gen_helper_sve_st1bd_r
},
5939 gen_helper_sve_st1hh_be_r
,
5940 gen_helper_sve_st1hs_be_r
,
5941 gen_helper_sve_st1hd_be_r
},
5943 gen_helper_sve_st1ss_be_r
,
5944 gen_helper_sve_st1sd_be_r
},
5946 gen_helper_sve_st1dd_be_r
} } },
5948 { { { gen_helper_sve_st1bb_r_mte
,
5949 gen_helper_sve_st1bh_r_mte
,
5950 gen_helper_sve_st1bs_r_mte
,
5951 gen_helper_sve_st1bd_r_mte
},
5953 gen_helper_sve_st1hh_le_r_mte
,
5954 gen_helper_sve_st1hs_le_r_mte
,
5955 gen_helper_sve_st1hd_le_r_mte
},
5957 gen_helper_sve_st1ss_le_r_mte
,
5958 gen_helper_sve_st1sd_le_r_mte
},
5960 gen_helper_sve_st1dd_le_r_mte
} },
5961 { { gen_helper_sve_st1bb_r_mte
,
5962 gen_helper_sve_st1bh_r_mte
,
5963 gen_helper_sve_st1bs_r_mte
,
5964 gen_helper_sve_st1bd_r_mte
},
5966 gen_helper_sve_st1hh_be_r_mte
,
5967 gen_helper_sve_st1hs_be_r_mte
,
5968 gen_helper_sve_st1hd_be_r_mte
},
5970 gen_helper_sve_st1ss_be_r_mte
,
5971 gen_helper_sve_st1sd_be_r_mte
},
5973 gen_helper_sve_st1dd_be_r_mte
} } },
5975 static gen_helper_gvec_mem
* const fn_multiple
[2][2][3][4] = {
5976 { { { gen_helper_sve_st2bb_r
,
5977 gen_helper_sve_st2hh_le_r
,
5978 gen_helper_sve_st2ss_le_r
,
5979 gen_helper_sve_st2dd_le_r
},
5980 { gen_helper_sve_st3bb_r
,
5981 gen_helper_sve_st3hh_le_r
,
5982 gen_helper_sve_st3ss_le_r
,
5983 gen_helper_sve_st3dd_le_r
},
5984 { gen_helper_sve_st4bb_r
,
5985 gen_helper_sve_st4hh_le_r
,
5986 gen_helper_sve_st4ss_le_r
,
5987 gen_helper_sve_st4dd_le_r
} },
5988 { { gen_helper_sve_st2bb_r
,
5989 gen_helper_sve_st2hh_be_r
,
5990 gen_helper_sve_st2ss_be_r
,
5991 gen_helper_sve_st2dd_be_r
},
5992 { gen_helper_sve_st3bb_r
,
5993 gen_helper_sve_st3hh_be_r
,
5994 gen_helper_sve_st3ss_be_r
,
5995 gen_helper_sve_st3dd_be_r
},
5996 { gen_helper_sve_st4bb_r
,
5997 gen_helper_sve_st4hh_be_r
,
5998 gen_helper_sve_st4ss_be_r
,
5999 gen_helper_sve_st4dd_be_r
} } },
6000 { { { gen_helper_sve_st2bb_r_mte
,
6001 gen_helper_sve_st2hh_le_r_mte
,
6002 gen_helper_sve_st2ss_le_r_mte
,
6003 gen_helper_sve_st2dd_le_r_mte
},
6004 { gen_helper_sve_st3bb_r_mte
,
6005 gen_helper_sve_st3hh_le_r_mte
,
6006 gen_helper_sve_st3ss_le_r_mte
,
6007 gen_helper_sve_st3dd_le_r_mte
},
6008 { gen_helper_sve_st4bb_r_mte
,
6009 gen_helper_sve_st4hh_le_r_mte
,
6010 gen_helper_sve_st4ss_le_r_mte
,
6011 gen_helper_sve_st4dd_le_r_mte
} },
6012 { { gen_helper_sve_st2bb_r_mte
,
6013 gen_helper_sve_st2hh_be_r_mte
,
6014 gen_helper_sve_st2ss_be_r_mte
,
6015 gen_helper_sve_st2dd_be_r_mte
},
6016 { gen_helper_sve_st3bb_r_mte
,
6017 gen_helper_sve_st3hh_be_r_mte
,
6018 gen_helper_sve_st3ss_be_r_mte
,
6019 gen_helper_sve_st3dd_be_r_mte
},
6020 { gen_helper_sve_st4bb_r_mte
,
6021 gen_helper_sve_st4hh_be_r_mte
,
6022 gen_helper_sve_st4ss_be_r_mte
,
6023 gen_helper_sve_st4dd_be_r_mte
} } },
6025 gen_helper_gvec_mem
*fn
;
6026 int be
= s
->be_data
== MO_BE
;
6030 fn
= fn_single
[s
->mte_active
[0]][be
][msz
][esz
];
6033 /* ST2, ST3, ST4 -- msz == esz, enforced by encoding */
6035 fn
= fn_multiple
[s
->mte_active
[0]][be
][nreg
- 1][msz
];
6038 do_mem_zpa(s
, zt
, pg
, addr
, msz_dtype(s
, msz
), nreg
, true, fn
);
6041 static bool trans_ST_zprr(DisasContext
*s
, arg_rprr_store
*a
)
6043 if (a
->rm
== 31 || a
->msz
> a
->esz
) {
6046 if (sve_access_check(s
)) {
6047 TCGv_i64 addr
= new_tmp_a64(s
);
6048 tcg_gen_shli_i64(addr
, cpu_reg(s
, a
->rm
), a
->msz
);
6049 tcg_gen_add_i64(addr
, addr
, cpu_reg_sp(s
, a
->rn
));
6050 do_st_zpa(s
, a
->rd
, a
->pg
, addr
, a
->msz
, a
->esz
, a
->nreg
);
6055 static bool trans_ST_zpri(DisasContext
*s
, arg_rpri_store
*a
)
6057 if (a
->msz
> a
->esz
) {
6060 if (sve_access_check(s
)) {
6061 int vsz
= vec_full_reg_size(s
);
6062 int elements
= vsz
>> a
->esz
;
6063 TCGv_i64 addr
= new_tmp_a64(s
);
6065 tcg_gen_addi_i64(addr
, cpu_reg_sp(s
, a
->rn
),
6066 (a
->imm
* elements
* (a
->nreg
+ 1)) << a
->msz
);
6067 do_st_zpa(s
, a
->rd
, a
->pg
, addr
, a
->msz
, a
->esz
, a
->nreg
);
6073 *** SVE gather loads / scatter stores
6076 static void do_mem_zpz(DisasContext
*s
, int zt
, int pg
, int zm
,
6077 int scale
, TCGv_i64 scalar
, int msz
, bool is_write
,
6078 gen_helper_gvec_mem_scatter
*fn
)
6080 unsigned vsz
= vec_full_reg_size(s
);
6081 TCGv_ptr t_zm
= tcg_temp_new_ptr();
6082 TCGv_ptr t_pg
= tcg_temp_new_ptr();
6083 TCGv_ptr t_zt
= tcg_temp_new_ptr();
6087 if (s
->mte_active
[0]) {
6088 desc
= FIELD_DP32(desc
, MTEDESC
, MIDX
, get_mem_index(s
));
6089 desc
= FIELD_DP32(desc
, MTEDESC
, TBI
, s
->tbid
);
6090 desc
= FIELD_DP32(desc
, MTEDESC
, TCMA
, s
->tcma
);
6091 desc
= FIELD_DP32(desc
, MTEDESC
, WRITE
, is_write
);
6092 desc
= FIELD_DP32(desc
, MTEDESC
, SIZEM1
, (1 << msz
) - 1);
6093 desc
<<= SVE_MTEDESC_SHIFT
;
6095 desc
= simd_desc(vsz
, vsz
, desc
| scale
);
6096 t_desc
= tcg_const_i32(desc
);
6098 tcg_gen_addi_ptr(t_pg
, cpu_env
, pred_full_reg_offset(s
, pg
));
6099 tcg_gen_addi_ptr(t_zm
, cpu_env
, vec_full_reg_offset(s
, zm
));
6100 tcg_gen_addi_ptr(t_zt
, cpu_env
, vec_full_reg_offset(s
, zt
));
6101 fn(cpu_env
, t_zt
, t_pg
, t_zm
, scalar
, t_desc
);
6103 tcg_temp_free_ptr(t_zt
);
6104 tcg_temp_free_ptr(t_zm
);
6105 tcg_temp_free_ptr(t_pg
);
6106 tcg_temp_free_i32(t_desc
);
6109 /* Indexed by [mte][be][ff][xs][u][msz]. */
6110 static gen_helper_gvec_mem_scatter
* const
6111 gather_load_fn32
[2][2][2][2][2][3] = {
6112 { /* MTE Inactive */
6113 { /* Little-endian */
6114 { { { gen_helper_sve_ldbss_zsu
,
6115 gen_helper_sve_ldhss_le_zsu
,
6117 { gen_helper_sve_ldbsu_zsu
,
6118 gen_helper_sve_ldhsu_le_zsu
,
6119 gen_helper_sve_ldss_le_zsu
, } },
6120 { { gen_helper_sve_ldbss_zss
,
6121 gen_helper_sve_ldhss_le_zss
,
6123 { gen_helper_sve_ldbsu_zss
,
6124 gen_helper_sve_ldhsu_le_zss
,
6125 gen_helper_sve_ldss_le_zss
, } } },
6128 { { { gen_helper_sve_ldffbss_zsu
,
6129 gen_helper_sve_ldffhss_le_zsu
,
6131 { gen_helper_sve_ldffbsu_zsu
,
6132 gen_helper_sve_ldffhsu_le_zsu
,
6133 gen_helper_sve_ldffss_le_zsu
, } },
6134 { { gen_helper_sve_ldffbss_zss
,
6135 gen_helper_sve_ldffhss_le_zss
,
6137 { gen_helper_sve_ldffbsu_zss
,
6138 gen_helper_sve_ldffhsu_le_zss
,
6139 gen_helper_sve_ldffss_le_zss
, } } } },
6142 { { { gen_helper_sve_ldbss_zsu
,
6143 gen_helper_sve_ldhss_be_zsu
,
6145 { gen_helper_sve_ldbsu_zsu
,
6146 gen_helper_sve_ldhsu_be_zsu
,
6147 gen_helper_sve_ldss_be_zsu
, } },
6148 { { gen_helper_sve_ldbss_zss
,
6149 gen_helper_sve_ldhss_be_zss
,
6151 { gen_helper_sve_ldbsu_zss
,
6152 gen_helper_sve_ldhsu_be_zss
,
6153 gen_helper_sve_ldss_be_zss
, } } },
6156 { { { gen_helper_sve_ldffbss_zsu
,
6157 gen_helper_sve_ldffhss_be_zsu
,
6159 { gen_helper_sve_ldffbsu_zsu
,
6160 gen_helper_sve_ldffhsu_be_zsu
,
6161 gen_helper_sve_ldffss_be_zsu
, } },
6162 { { gen_helper_sve_ldffbss_zss
,
6163 gen_helper_sve_ldffhss_be_zss
,
6165 { gen_helper_sve_ldffbsu_zss
,
6166 gen_helper_sve_ldffhsu_be_zss
,
6167 gen_helper_sve_ldffss_be_zss
, } } } } },
6169 { /* Little-endian */
6170 { { { gen_helper_sve_ldbss_zsu_mte
,
6171 gen_helper_sve_ldhss_le_zsu_mte
,
6173 { gen_helper_sve_ldbsu_zsu_mte
,
6174 gen_helper_sve_ldhsu_le_zsu_mte
,
6175 gen_helper_sve_ldss_le_zsu_mte
, } },
6176 { { gen_helper_sve_ldbss_zss_mte
,
6177 gen_helper_sve_ldhss_le_zss_mte
,
6179 { gen_helper_sve_ldbsu_zss_mte
,
6180 gen_helper_sve_ldhsu_le_zss_mte
,
6181 gen_helper_sve_ldss_le_zss_mte
, } } },
6184 { { { gen_helper_sve_ldffbss_zsu_mte
,
6185 gen_helper_sve_ldffhss_le_zsu_mte
,
6187 { gen_helper_sve_ldffbsu_zsu_mte
,
6188 gen_helper_sve_ldffhsu_le_zsu_mte
,
6189 gen_helper_sve_ldffss_le_zsu_mte
, } },
6190 { { gen_helper_sve_ldffbss_zss_mte
,
6191 gen_helper_sve_ldffhss_le_zss_mte
,
6193 { gen_helper_sve_ldffbsu_zss_mte
,
6194 gen_helper_sve_ldffhsu_le_zss_mte
,
6195 gen_helper_sve_ldffss_le_zss_mte
, } } } },
6198 { { { gen_helper_sve_ldbss_zsu_mte
,
6199 gen_helper_sve_ldhss_be_zsu_mte
,
6201 { gen_helper_sve_ldbsu_zsu_mte
,
6202 gen_helper_sve_ldhsu_be_zsu_mte
,
6203 gen_helper_sve_ldss_be_zsu_mte
, } },
6204 { { gen_helper_sve_ldbss_zss_mte
,
6205 gen_helper_sve_ldhss_be_zss_mte
,
6207 { gen_helper_sve_ldbsu_zss_mte
,
6208 gen_helper_sve_ldhsu_be_zss_mte
,
6209 gen_helper_sve_ldss_be_zss_mte
, } } },
6212 { { { gen_helper_sve_ldffbss_zsu_mte
,
6213 gen_helper_sve_ldffhss_be_zsu_mte
,
6215 { gen_helper_sve_ldffbsu_zsu_mte
,
6216 gen_helper_sve_ldffhsu_be_zsu_mte
,
6217 gen_helper_sve_ldffss_be_zsu_mte
, } },
6218 { { gen_helper_sve_ldffbss_zss_mte
,
6219 gen_helper_sve_ldffhss_be_zss_mte
,
6221 { gen_helper_sve_ldffbsu_zss_mte
,
6222 gen_helper_sve_ldffhsu_be_zss_mte
,
6223 gen_helper_sve_ldffss_be_zss_mte
, } } } } },
6226 /* Note that we overload xs=2 to indicate 64-bit offset. */
6227 static gen_helper_gvec_mem_scatter
* const
6228 gather_load_fn64
[2][2][2][3][2][4] = {
6229 { /* MTE Inactive */
6230 { /* Little-endian */
6231 { { { gen_helper_sve_ldbds_zsu
,
6232 gen_helper_sve_ldhds_le_zsu
,
6233 gen_helper_sve_ldsds_le_zsu
,
6235 { gen_helper_sve_ldbdu_zsu
,
6236 gen_helper_sve_ldhdu_le_zsu
,
6237 gen_helper_sve_ldsdu_le_zsu
,
6238 gen_helper_sve_lddd_le_zsu
, } },
6239 { { gen_helper_sve_ldbds_zss
,
6240 gen_helper_sve_ldhds_le_zss
,
6241 gen_helper_sve_ldsds_le_zss
,
6243 { gen_helper_sve_ldbdu_zss
,
6244 gen_helper_sve_ldhdu_le_zss
,
6245 gen_helper_sve_ldsdu_le_zss
,
6246 gen_helper_sve_lddd_le_zss
, } },
6247 { { gen_helper_sve_ldbds_zd
,
6248 gen_helper_sve_ldhds_le_zd
,
6249 gen_helper_sve_ldsds_le_zd
,
6251 { gen_helper_sve_ldbdu_zd
,
6252 gen_helper_sve_ldhdu_le_zd
,
6253 gen_helper_sve_ldsdu_le_zd
,
6254 gen_helper_sve_lddd_le_zd
, } } },
6257 { { { gen_helper_sve_ldffbds_zsu
,
6258 gen_helper_sve_ldffhds_le_zsu
,
6259 gen_helper_sve_ldffsds_le_zsu
,
6261 { gen_helper_sve_ldffbdu_zsu
,
6262 gen_helper_sve_ldffhdu_le_zsu
,
6263 gen_helper_sve_ldffsdu_le_zsu
,
6264 gen_helper_sve_ldffdd_le_zsu
, } },
6265 { { gen_helper_sve_ldffbds_zss
,
6266 gen_helper_sve_ldffhds_le_zss
,
6267 gen_helper_sve_ldffsds_le_zss
,
6269 { gen_helper_sve_ldffbdu_zss
,
6270 gen_helper_sve_ldffhdu_le_zss
,
6271 gen_helper_sve_ldffsdu_le_zss
,
6272 gen_helper_sve_ldffdd_le_zss
, } },
6273 { { gen_helper_sve_ldffbds_zd
,
6274 gen_helper_sve_ldffhds_le_zd
,
6275 gen_helper_sve_ldffsds_le_zd
,
6277 { gen_helper_sve_ldffbdu_zd
,
6278 gen_helper_sve_ldffhdu_le_zd
,
6279 gen_helper_sve_ldffsdu_le_zd
,
6280 gen_helper_sve_ldffdd_le_zd
, } } } },
6282 { { { gen_helper_sve_ldbds_zsu
,
6283 gen_helper_sve_ldhds_be_zsu
,
6284 gen_helper_sve_ldsds_be_zsu
,
6286 { gen_helper_sve_ldbdu_zsu
,
6287 gen_helper_sve_ldhdu_be_zsu
,
6288 gen_helper_sve_ldsdu_be_zsu
,
6289 gen_helper_sve_lddd_be_zsu
, } },
6290 { { gen_helper_sve_ldbds_zss
,
6291 gen_helper_sve_ldhds_be_zss
,
6292 gen_helper_sve_ldsds_be_zss
,
6294 { gen_helper_sve_ldbdu_zss
,
6295 gen_helper_sve_ldhdu_be_zss
,
6296 gen_helper_sve_ldsdu_be_zss
,
6297 gen_helper_sve_lddd_be_zss
, } },
6298 { { gen_helper_sve_ldbds_zd
,
6299 gen_helper_sve_ldhds_be_zd
,
6300 gen_helper_sve_ldsds_be_zd
,
6302 { gen_helper_sve_ldbdu_zd
,
6303 gen_helper_sve_ldhdu_be_zd
,
6304 gen_helper_sve_ldsdu_be_zd
,
6305 gen_helper_sve_lddd_be_zd
, } } },
6308 { { { gen_helper_sve_ldffbds_zsu
,
6309 gen_helper_sve_ldffhds_be_zsu
,
6310 gen_helper_sve_ldffsds_be_zsu
,
6312 { gen_helper_sve_ldffbdu_zsu
,
6313 gen_helper_sve_ldffhdu_be_zsu
,
6314 gen_helper_sve_ldffsdu_be_zsu
,
6315 gen_helper_sve_ldffdd_be_zsu
, } },
6316 { { gen_helper_sve_ldffbds_zss
,
6317 gen_helper_sve_ldffhds_be_zss
,
6318 gen_helper_sve_ldffsds_be_zss
,
6320 { gen_helper_sve_ldffbdu_zss
,
6321 gen_helper_sve_ldffhdu_be_zss
,
6322 gen_helper_sve_ldffsdu_be_zss
,
6323 gen_helper_sve_ldffdd_be_zss
, } },
6324 { { gen_helper_sve_ldffbds_zd
,
6325 gen_helper_sve_ldffhds_be_zd
,
6326 gen_helper_sve_ldffsds_be_zd
,
6328 { gen_helper_sve_ldffbdu_zd
,
6329 gen_helper_sve_ldffhdu_be_zd
,
6330 gen_helper_sve_ldffsdu_be_zd
,
6331 gen_helper_sve_ldffdd_be_zd
, } } } } },
6333 { /* Little-endian */
6334 { { { gen_helper_sve_ldbds_zsu_mte
,
6335 gen_helper_sve_ldhds_le_zsu_mte
,
6336 gen_helper_sve_ldsds_le_zsu_mte
,
6338 { gen_helper_sve_ldbdu_zsu_mte
,
6339 gen_helper_sve_ldhdu_le_zsu_mte
,
6340 gen_helper_sve_ldsdu_le_zsu_mte
,
6341 gen_helper_sve_lddd_le_zsu_mte
, } },
6342 { { gen_helper_sve_ldbds_zss_mte
,
6343 gen_helper_sve_ldhds_le_zss_mte
,
6344 gen_helper_sve_ldsds_le_zss_mte
,
6346 { gen_helper_sve_ldbdu_zss_mte
,
6347 gen_helper_sve_ldhdu_le_zss_mte
,
6348 gen_helper_sve_ldsdu_le_zss_mte
,
6349 gen_helper_sve_lddd_le_zss_mte
, } },
6350 { { gen_helper_sve_ldbds_zd_mte
,
6351 gen_helper_sve_ldhds_le_zd_mte
,
6352 gen_helper_sve_ldsds_le_zd_mte
,
6354 { gen_helper_sve_ldbdu_zd_mte
,
6355 gen_helper_sve_ldhdu_le_zd_mte
,
6356 gen_helper_sve_ldsdu_le_zd_mte
,
6357 gen_helper_sve_lddd_le_zd_mte
, } } },
6360 { { { gen_helper_sve_ldffbds_zsu_mte
,
6361 gen_helper_sve_ldffhds_le_zsu_mte
,
6362 gen_helper_sve_ldffsds_le_zsu_mte
,
6364 { gen_helper_sve_ldffbdu_zsu_mte
,
6365 gen_helper_sve_ldffhdu_le_zsu_mte
,
6366 gen_helper_sve_ldffsdu_le_zsu_mte
,
6367 gen_helper_sve_ldffdd_le_zsu_mte
, } },
6368 { { gen_helper_sve_ldffbds_zss_mte
,
6369 gen_helper_sve_ldffhds_le_zss_mte
,
6370 gen_helper_sve_ldffsds_le_zss_mte
,
6372 { gen_helper_sve_ldffbdu_zss_mte
,
6373 gen_helper_sve_ldffhdu_le_zss_mte
,
6374 gen_helper_sve_ldffsdu_le_zss_mte
,
6375 gen_helper_sve_ldffdd_le_zss_mte
, } },
6376 { { gen_helper_sve_ldffbds_zd_mte
,
6377 gen_helper_sve_ldffhds_le_zd_mte
,
6378 gen_helper_sve_ldffsds_le_zd_mte
,
6380 { gen_helper_sve_ldffbdu_zd_mte
,
6381 gen_helper_sve_ldffhdu_le_zd_mte
,
6382 gen_helper_sve_ldffsdu_le_zd_mte
,
6383 gen_helper_sve_ldffdd_le_zd_mte
, } } } },
6385 { { { gen_helper_sve_ldbds_zsu_mte
,
6386 gen_helper_sve_ldhds_be_zsu_mte
,
6387 gen_helper_sve_ldsds_be_zsu_mte
,
6389 { gen_helper_sve_ldbdu_zsu_mte
,
6390 gen_helper_sve_ldhdu_be_zsu_mte
,
6391 gen_helper_sve_ldsdu_be_zsu_mte
,
6392 gen_helper_sve_lddd_be_zsu_mte
, } },
6393 { { gen_helper_sve_ldbds_zss_mte
,
6394 gen_helper_sve_ldhds_be_zss_mte
,
6395 gen_helper_sve_ldsds_be_zss_mte
,
6397 { gen_helper_sve_ldbdu_zss_mte
,
6398 gen_helper_sve_ldhdu_be_zss_mte
,
6399 gen_helper_sve_ldsdu_be_zss_mte
,
6400 gen_helper_sve_lddd_be_zss_mte
, } },
6401 { { gen_helper_sve_ldbds_zd_mte
,
6402 gen_helper_sve_ldhds_be_zd_mte
,
6403 gen_helper_sve_ldsds_be_zd_mte
,
6405 { gen_helper_sve_ldbdu_zd_mte
,
6406 gen_helper_sve_ldhdu_be_zd_mte
,
6407 gen_helper_sve_ldsdu_be_zd_mte
,
6408 gen_helper_sve_lddd_be_zd_mte
, } } },
6411 { { { gen_helper_sve_ldffbds_zsu_mte
,
6412 gen_helper_sve_ldffhds_be_zsu_mte
,
6413 gen_helper_sve_ldffsds_be_zsu_mte
,
6415 { gen_helper_sve_ldffbdu_zsu_mte
,
6416 gen_helper_sve_ldffhdu_be_zsu_mte
,
6417 gen_helper_sve_ldffsdu_be_zsu_mte
,
6418 gen_helper_sve_ldffdd_be_zsu_mte
, } },
6419 { { gen_helper_sve_ldffbds_zss_mte
,
6420 gen_helper_sve_ldffhds_be_zss_mte
,
6421 gen_helper_sve_ldffsds_be_zss_mte
,
6423 { gen_helper_sve_ldffbdu_zss_mte
,
6424 gen_helper_sve_ldffhdu_be_zss_mte
,
6425 gen_helper_sve_ldffsdu_be_zss_mte
,
6426 gen_helper_sve_ldffdd_be_zss_mte
, } },
6427 { { gen_helper_sve_ldffbds_zd_mte
,
6428 gen_helper_sve_ldffhds_be_zd_mte
,
6429 gen_helper_sve_ldffsds_be_zd_mte
,
6431 { gen_helper_sve_ldffbdu_zd_mte
,
6432 gen_helper_sve_ldffhdu_be_zd_mte
,
6433 gen_helper_sve_ldffsdu_be_zd_mte
,
6434 gen_helper_sve_ldffdd_be_zd_mte
, } } } } },
6437 static bool trans_LD1_zprz(DisasContext
*s
, arg_LD1_zprz
*a
)
6439 gen_helper_gvec_mem_scatter
*fn
= NULL
;
6440 bool be
= s
->be_data
== MO_BE
;
6441 bool mte
= s
->mte_active
[0];
6443 if (!sve_access_check(s
)) {
6449 fn
= gather_load_fn32
[mte
][be
][a
->ff
][a
->xs
][a
->u
][a
->msz
];
6452 fn
= gather_load_fn64
[mte
][be
][a
->ff
][a
->xs
][a
->u
][a
->msz
];
6457 do_mem_zpz(s
, a
->rd
, a
->pg
, a
->rm
, a
->scale
* a
->msz
,
6458 cpu_reg_sp(s
, a
->rn
), a
->msz
, false, fn
);
6462 static bool trans_LD1_zpiz(DisasContext
*s
, arg_LD1_zpiz
*a
)
6464 gen_helper_gvec_mem_scatter
*fn
= NULL
;
6465 bool be
= s
->be_data
== MO_BE
;
6466 bool mte
= s
->mte_active
[0];
6469 if (a
->esz
< a
->msz
|| (a
->esz
== a
->msz
&& !a
->u
)) {
6472 if (!sve_access_check(s
)) {
6478 fn
= gather_load_fn32
[mte
][be
][a
->ff
][0][a
->u
][a
->msz
];
6481 fn
= gather_load_fn64
[mte
][be
][a
->ff
][2][a
->u
][a
->msz
];
6486 /* Treat LD1_zpiz (zn[x] + imm) the same way as LD1_zprz (rn + zm[x])
6487 * by loading the immediate into the scalar parameter.
6489 imm
= tcg_const_i64(a
->imm
<< a
->msz
);
6490 do_mem_zpz(s
, a
->rd
, a
->pg
, a
->rn
, 0, imm
, a
->msz
, false, fn
);
6491 tcg_temp_free_i64(imm
);
6495 static bool trans_LDNT1_zprz(DisasContext
*s
, arg_LD1_zprz
*a
)
6497 if (!dc_isar_feature(aa64_sve2
, s
)) {
6500 return trans_LD1_zprz(s
, a
);
6503 /* Indexed by [mte][be][xs][msz]. */
6504 static gen_helper_gvec_mem_scatter
* const scatter_store_fn32
[2][2][2][3] = {
6505 { /* MTE Inactive */
6506 { /* Little-endian */
6507 { gen_helper_sve_stbs_zsu
,
6508 gen_helper_sve_sths_le_zsu
,
6509 gen_helper_sve_stss_le_zsu
, },
6510 { gen_helper_sve_stbs_zss
,
6511 gen_helper_sve_sths_le_zss
,
6512 gen_helper_sve_stss_le_zss
, } },
6514 { gen_helper_sve_stbs_zsu
,
6515 gen_helper_sve_sths_be_zsu
,
6516 gen_helper_sve_stss_be_zsu
, },
6517 { gen_helper_sve_stbs_zss
,
6518 gen_helper_sve_sths_be_zss
,
6519 gen_helper_sve_stss_be_zss
, } } },
6521 { /* Little-endian */
6522 { gen_helper_sve_stbs_zsu_mte
,
6523 gen_helper_sve_sths_le_zsu_mte
,
6524 gen_helper_sve_stss_le_zsu_mte
, },
6525 { gen_helper_sve_stbs_zss_mte
,
6526 gen_helper_sve_sths_le_zss_mte
,
6527 gen_helper_sve_stss_le_zss_mte
, } },
6529 { gen_helper_sve_stbs_zsu_mte
,
6530 gen_helper_sve_sths_be_zsu_mte
,
6531 gen_helper_sve_stss_be_zsu_mte
, },
6532 { gen_helper_sve_stbs_zss_mte
,
6533 gen_helper_sve_sths_be_zss_mte
,
6534 gen_helper_sve_stss_be_zss_mte
, } } },
6537 /* Note that we overload xs=2 to indicate 64-bit offset. */
6538 static gen_helper_gvec_mem_scatter
* const scatter_store_fn64
[2][2][3][4] = {
6539 { /* MTE Inactive */
6540 { /* Little-endian */
6541 { gen_helper_sve_stbd_zsu
,
6542 gen_helper_sve_sthd_le_zsu
,
6543 gen_helper_sve_stsd_le_zsu
,
6544 gen_helper_sve_stdd_le_zsu
, },
6545 { gen_helper_sve_stbd_zss
,
6546 gen_helper_sve_sthd_le_zss
,
6547 gen_helper_sve_stsd_le_zss
,
6548 gen_helper_sve_stdd_le_zss
, },
6549 { gen_helper_sve_stbd_zd
,
6550 gen_helper_sve_sthd_le_zd
,
6551 gen_helper_sve_stsd_le_zd
,
6552 gen_helper_sve_stdd_le_zd
, } },
6554 { gen_helper_sve_stbd_zsu
,
6555 gen_helper_sve_sthd_be_zsu
,
6556 gen_helper_sve_stsd_be_zsu
,
6557 gen_helper_sve_stdd_be_zsu
, },
6558 { gen_helper_sve_stbd_zss
,
6559 gen_helper_sve_sthd_be_zss
,
6560 gen_helper_sve_stsd_be_zss
,
6561 gen_helper_sve_stdd_be_zss
, },
6562 { gen_helper_sve_stbd_zd
,
6563 gen_helper_sve_sthd_be_zd
,
6564 gen_helper_sve_stsd_be_zd
,
6565 gen_helper_sve_stdd_be_zd
, } } },
6566 { /* MTE Inactive */
6567 { /* Little-endian */
6568 { gen_helper_sve_stbd_zsu_mte
,
6569 gen_helper_sve_sthd_le_zsu_mte
,
6570 gen_helper_sve_stsd_le_zsu_mte
,
6571 gen_helper_sve_stdd_le_zsu_mte
, },
6572 { gen_helper_sve_stbd_zss_mte
,
6573 gen_helper_sve_sthd_le_zss_mte
,
6574 gen_helper_sve_stsd_le_zss_mte
,
6575 gen_helper_sve_stdd_le_zss_mte
, },
6576 { gen_helper_sve_stbd_zd_mte
,
6577 gen_helper_sve_sthd_le_zd_mte
,
6578 gen_helper_sve_stsd_le_zd_mte
,
6579 gen_helper_sve_stdd_le_zd_mte
, } },
6581 { gen_helper_sve_stbd_zsu_mte
,
6582 gen_helper_sve_sthd_be_zsu_mte
,
6583 gen_helper_sve_stsd_be_zsu_mte
,
6584 gen_helper_sve_stdd_be_zsu_mte
, },
6585 { gen_helper_sve_stbd_zss_mte
,
6586 gen_helper_sve_sthd_be_zss_mte
,
6587 gen_helper_sve_stsd_be_zss_mte
,
6588 gen_helper_sve_stdd_be_zss_mte
, },
6589 { gen_helper_sve_stbd_zd_mte
,
6590 gen_helper_sve_sthd_be_zd_mte
,
6591 gen_helper_sve_stsd_be_zd_mte
,
6592 gen_helper_sve_stdd_be_zd_mte
, } } },
6595 static bool trans_ST1_zprz(DisasContext
*s
, arg_ST1_zprz
*a
)
6597 gen_helper_gvec_mem_scatter
*fn
;
6598 bool be
= s
->be_data
== MO_BE
;
6599 bool mte
= s
->mte_active
[0];
6601 if (a
->esz
< a
->msz
|| (a
->msz
== 0 && a
->scale
)) {
6604 if (!sve_access_check(s
)) {
6609 fn
= scatter_store_fn32
[mte
][be
][a
->xs
][a
->msz
];
6612 fn
= scatter_store_fn64
[mte
][be
][a
->xs
][a
->msz
];
6615 g_assert_not_reached();
6617 do_mem_zpz(s
, a
->rd
, a
->pg
, a
->rm
, a
->scale
* a
->msz
,
6618 cpu_reg_sp(s
, a
->rn
), a
->msz
, true, fn
);
6622 static bool trans_ST1_zpiz(DisasContext
*s
, arg_ST1_zpiz
*a
)
6624 gen_helper_gvec_mem_scatter
*fn
= NULL
;
6625 bool be
= s
->be_data
== MO_BE
;
6626 bool mte
= s
->mte_active
[0];
6629 if (a
->esz
< a
->msz
) {
6632 if (!sve_access_check(s
)) {
6638 fn
= scatter_store_fn32
[mte
][be
][0][a
->msz
];
6641 fn
= scatter_store_fn64
[mte
][be
][2][a
->msz
];
6646 /* Treat ST1_zpiz (zn[x] + imm) the same way as ST1_zprz (rn + zm[x])
6647 * by loading the immediate into the scalar parameter.
6649 imm
= tcg_const_i64(a
->imm
<< a
->msz
);
6650 do_mem_zpz(s
, a
->rd
, a
->pg
, a
->rn
, 0, imm
, a
->msz
, true, fn
);
6651 tcg_temp_free_i64(imm
);
6655 static bool trans_STNT1_zprz(DisasContext
*s
, arg_ST1_zprz
*a
)
6657 if (!dc_isar_feature(aa64_sve2
, s
)) {
6660 return trans_ST1_zprz(s
, a
);
6667 static bool trans_PRF(DisasContext
*s
, arg_PRF
*a
)
6669 /* Prefetch is a nop within QEMU. */
6670 (void)sve_access_check(s
);
6674 static bool trans_PRF_rr(DisasContext
*s
, arg_PRF_rr
*a
)
6679 /* Prefetch is a nop within QEMU. */
6680 (void)sve_access_check(s
);
6687 * TODO: The implementation so far could handle predicated merging movprfx.
6688 * The helper functions as written take an extra source register to
6689 * use in the operation, but the result is only written when predication
6690 * succeeds. For unpredicated movprfx, we need to rearrange the helpers
6691 * to allow the final write back to the destination to be unconditional.
6692 * For predicated zeroing movprfx, we need to rearrange the helpers to
6693 * allow the final write back to zero inactives.
6695 * In the meantime, just emit the moves.
6698 static bool trans_MOVPRFX(DisasContext
*s
, arg_MOVPRFX
*a
)
6700 return do_mov_z(s
, a
->rd
, a
->rn
);
6703 static bool trans_MOVPRFX_m(DisasContext
*s
, arg_rpr_esz
*a
)
6705 if (sve_access_check(s
)) {
6706 do_sel_z(s
, a
->rd
, a
->rn
, a
->rd
, a
->pg
, a
->esz
);
6711 static bool trans_MOVPRFX_z(DisasContext
*s
, arg_rpr_esz
*a
)
6713 return do_movz_zpz(s
, a
->rd
, a
->rn
, a
->pg
, a
->esz
, false);
6717 * SVE2 Integer Multiply - Unpredicated
6720 static bool trans_MUL_zzz(DisasContext
*s
, arg_rrr_esz
*a
)
6722 if (!dc_isar_feature(aa64_sve2
, s
)) {
6725 if (sve_access_check(s
)) {
6726 gen_gvec_fn_zzz(s
, tcg_gen_gvec_mul
, a
->esz
, a
->rd
, a
->rn
, a
->rm
);
6731 static bool do_sve2_zzz_ool(DisasContext
*s
, arg_rrr_esz
*a
,
6732 gen_helper_gvec_3
*fn
)
6734 if (fn
== NULL
|| !dc_isar_feature(aa64_sve2
, s
)) {
6737 if (sve_access_check(s
)) {
6738 gen_gvec_ool_zzz(s
, fn
, a
->rd
, a
->rn
, a
->rm
, 0);
6743 static bool trans_SMULH_zzz(DisasContext
*s
, arg_rrr_esz
*a
)
6745 static gen_helper_gvec_3
* const fns
[4] = {
6746 gen_helper_gvec_smulh_b
, gen_helper_gvec_smulh_h
,
6747 gen_helper_gvec_smulh_s
, gen_helper_gvec_smulh_d
,
6749 return do_sve2_zzz_ool(s
, a
, fns
[a
->esz
]);
6752 static bool trans_UMULH_zzz(DisasContext
*s
, arg_rrr_esz
*a
)
6754 static gen_helper_gvec_3
* const fns
[4] = {
6755 gen_helper_gvec_umulh_b
, gen_helper_gvec_umulh_h
,
6756 gen_helper_gvec_umulh_s
, gen_helper_gvec_umulh_d
,
6758 return do_sve2_zzz_ool(s
, a
, fns
[a
->esz
]);
6761 static bool trans_PMUL_zzz(DisasContext
*s
, arg_rrr_esz
*a
)
6763 return do_sve2_zzz_ool(s
, a
, gen_helper_gvec_pmul_b
);
6766 static bool trans_SQDMULH_zzz(DisasContext
*s
, arg_rrr_esz
*a
)
6768 static gen_helper_gvec_3
* const fns
[4] = {
6769 gen_helper_sve2_sqdmulh_b
, gen_helper_sve2_sqdmulh_h
,
6770 gen_helper_sve2_sqdmulh_s
, gen_helper_sve2_sqdmulh_d
,
6772 return do_sve2_zzz_ool(s
, a
, fns
[a
->esz
]);
6775 static bool trans_SQRDMULH_zzz(DisasContext
*s
, arg_rrr_esz
*a
)
6777 static gen_helper_gvec_3
* const fns
[4] = {
6778 gen_helper_sve2_sqrdmulh_b
, gen_helper_sve2_sqrdmulh_h
,
6779 gen_helper_sve2_sqrdmulh_s
, gen_helper_sve2_sqrdmulh_d
,
6781 return do_sve2_zzz_ool(s
, a
, fns
[a
->esz
]);
6785 * SVE2 Integer - Predicated
6788 static bool do_sve2_zpzz_ool(DisasContext
*s
, arg_rprr_esz
*a
,
6789 gen_helper_gvec_4
*fn
)
6791 if (!dc_isar_feature(aa64_sve2
, s
)) {
6794 return do_zpzz_ool(s
, a
, fn
);
6797 static bool trans_SADALP_zpzz(DisasContext
*s
, arg_rprr_esz
*a
)
6799 static gen_helper_gvec_4
* const fns
[3] = {
6800 gen_helper_sve2_sadalp_zpzz_h
,
6801 gen_helper_sve2_sadalp_zpzz_s
,
6802 gen_helper_sve2_sadalp_zpzz_d
,
6807 return do_sve2_zpzz_ool(s
, a
, fns
[a
->esz
- 1]);
6810 static bool trans_UADALP_zpzz(DisasContext
*s
, arg_rprr_esz
*a
)
6812 static gen_helper_gvec_4
* const fns
[3] = {
6813 gen_helper_sve2_uadalp_zpzz_h
,
6814 gen_helper_sve2_uadalp_zpzz_s
,
6815 gen_helper_sve2_uadalp_zpzz_d
,
6820 return do_sve2_zpzz_ool(s
, a
, fns
[a
->esz
- 1]);
6824 * SVE2 integer unary operations (predicated)
6827 static bool do_sve2_zpz_ool(DisasContext
*s
, arg_rpr_esz
*a
,
6828 gen_helper_gvec_3
*fn
)
6830 if (!dc_isar_feature(aa64_sve2
, s
)) {
6833 return do_zpz_ool(s
, a
, fn
);
6836 static bool trans_URECPE(DisasContext
*s
, arg_rpr_esz
*a
)
6841 return do_sve2_zpz_ool(s
, a
, gen_helper_sve2_urecpe_s
);
6844 static bool trans_URSQRTE(DisasContext
*s
, arg_rpr_esz
*a
)
6849 return do_sve2_zpz_ool(s
, a
, gen_helper_sve2_ursqrte_s
);
6852 static bool trans_SQABS(DisasContext
*s
, arg_rpr_esz
*a
)
6854 static gen_helper_gvec_3
* const fns
[4] = {
6855 gen_helper_sve2_sqabs_b
, gen_helper_sve2_sqabs_h
,
6856 gen_helper_sve2_sqabs_s
, gen_helper_sve2_sqabs_d
,
6858 return do_sve2_zpz_ool(s
, a
, fns
[a
->esz
]);
6861 static bool trans_SQNEG(DisasContext
*s
, arg_rpr_esz
*a
)
6863 static gen_helper_gvec_3
* const fns
[4] = {
6864 gen_helper_sve2_sqneg_b
, gen_helper_sve2_sqneg_h
,
6865 gen_helper_sve2_sqneg_s
, gen_helper_sve2_sqneg_d
,
6867 return do_sve2_zpz_ool(s
, a
, fns
[a
->esz
]);
6870 #define DO_SVE2_ZPZZ(NAME, name) \
6871 static bool trans_##NAME(DisasContext *s, arg_rprr_esz *a) \
6873 static gen_helper_gvec_4 * const fns[4] = { \
6874 gen_helper_sve2_##name##_zpzz_b, gen_helper_sve2_##name##_zpzz_h, \
6875 gen_helper_sve2_##name##_zpzz_s, gen_helper_sve2_##name##_zpzz_d, \
6877 return do_sve2_zpzz_ool(s, a, fns[a->esz]); \
6880 DO_SVE2_ZPZZ(SQSHL
, sqshl
)
6881 DO_SVE2_ZPZZ(SQRSHL
, sqrshl
)
6882 DO_SVE2_ZPZZ(SRSHL
, srshl
)
6884 DO_SVE2_ZPZZ(UQSHL
, uqshl
)
6885 DO_SVE2_ZPZZ(UQRSHL
, uqrshl
)
6886 DO_SVE2_ZPZZ(URSHL
, urshl
)
6888 DO_SVE2_ZPZZ(SHADD
, shadd
)
6889 DO_SVE2_ZPZZ(SRHADD
, srhadd
)
6890 DO_SVE2_ZPZZ(SHSUB
, shsub
)
6892 DO_SVE2_ZPZZ(UHADD
, uhadd
)
6893 DO_SVE2_ZPZZ(URHADD
, urhadd
)
6894 DO_SVE2_ZPZZ(UHSUB
, uhsub
)
6896 DO_SVE2_ZPZZ(ADDP
, addp
)
6897 DO_SVE2_ZPZZ(SMAXP
, smaxp
)
6898 DO_SVE2_ZPZZ(UMAXP
, umaxp
)
6899 DO_SVE2_ZPZZ(SMINP
, sminp
)
6900 DO_SVE2_ZPZZ(UMINP
, uminp
)
6902 DO_SVE2_ZPZZ(SQADD_zpzz
, sqadd
)
6903 DO_SVE2_ZPZZ(UQADD_zpzz
, uqadd
)
6904 DO_SVE2_ZPZZ(SQSUB_zpzz
, sqsub
)
6905 DO_SVE2_ZPZZ(UQSUB_zpzz
, uqsub
)
6906 DO_SVE2_ZPZZ(SUQADD
, suqadd
)
6907 DO_SVE2_ZPZZ(USQADD
, usqadd
)
6910 * SVE2 Widening Integer Arithmetic
6913 static bool do_sve2_zzw_ool(DisasContext
*s
, arg_rrr_esz
*a
,
6914 gen_helper_gvec_3
*fn
, int data
)
6916 if (fn
== NULL
|| !dc_isar_feature(aa64_sve2
, s
)) {
6919 if (sve_access_check(s
)) {
6920 unsigned vsz
= vec_full_reg_size(s
);
6921 tcg_gen_gvec_3_ool(vec_full_reg_offset(s
, a
->rd
),
6922 vec_full_reg_offset(s
, a
->rn
),
6923 vec_full_reg_offset(s
, a
->rm
),
6924 vsz
, vsz
, data
, fn
);
6929 #define DO_SVE2_ZZZ_TB(NAME, name, SEL1, SEL2) \
6930 static bool trans_##NAME(DisasContext *s, arg_rrr_esz *a) \
6932 static gen_helper_gvec_3 * const fns[4] = { \
6933 NULL, gen_helper_sve2_##name##_h, \
6934 gen_helper_sve2_##name##_s, gen_helper_sve2_##name##_d, \
6936 return do_sve2_zzw_ool(s, a, fns[a->esz], (SEL2 << 1) | SEL1); \
6939 DO_SVE2_ZZZ_TB(SADDLB
, saddl
, false, false)
6940 DO_SVE2_ZZZ_TB(SSUBLB
, ssubl
, false, false)
6941 DO_SVE2_ZZZ_TB(SABDLB
, sabdl
, false, false)
6943 DO_SVE2_ZZZ_TB(UADDLB
, uaddl
, false, false)
6944 DO_SVE2_ZZZ_TB(USUBLB
, usubl
, false, false)
6945 DO_SVE2_ZZZ_TB(UABDLB
, uabdl
, false, false)
6947 DO_SVE2_ZZZ_TB(SADDLT
, saddl
, true, true)
6948 DO_SVE2_ZZZ_TB(SSUBLT
, ssubl
, true, true)
6949 DO_SVE2_ZZZ_TB(SABDLT
, sabdl
, true, true)
6951 DO_SVE2_ZZZ_TB(UADDLT
, uaddl
, true, true)
6952 DO_SVE2_ZZZ_TB(USUBLT
, usubl
, true, true)
6953 DO_SVE2_ZZZ_TB(UABDLT
, uabdl
, true, true)
6955 DO_SVE2_ZZZ_TB(SADDLBT
, saddl
, false, true)
6956 DO_SVE2_ZZZ_TB(SSUBLBT
, ssubl
, false, true)
6957 DO_SVE2_ZZZ_TB(SSUBLTB
, ssubl
, true, false)
6959 DO_SVE2_ZZZ_TB(SQDMULLB_zzz
, sqdmull_zzz
, false, false)
6960 DO_SVE2_ZZZ_TB(SQDMULLT_zzz
, sqdmull_zzz
, true, true)
6962 DO_SVE2_ZZZ_TB(SMULLB_zzz
, smull_zzz
, false, false)
6963 DO_SVE2_ZZZ_TB(SMULLT_zzz
, smull_zzz
, true, true)
6965 DO_SVE2_ZZZ_TB(UMULLB_zzz
, umull_zzz
, false, false)
6966 DO_SVE2_ZZZ_TB(UMULLT_zzz
, umull_zzz
, true, true)
6968 static bool do_eor_tb(DisasContext
*s
, arg_rrr_esz
*a
, bool sel1
)
6970 static gen_helper_gvec_3
* const fns
[4] = {
6971 gen_helper_sve2_eoril_b
, gen_helper_sve2_eoril_h
,
6972 gen_helper_sve2_eoril_s
, gen_helper_sve2_eoril_d
,
6974 return do_sve2_zzw_ool(s
, a
, fns
[a
->esz
], (!sel1
<< 1) | sel1
);
6977 static bool trans_EORBT(DisasContext
*s
, arg_rrr_esz
*a
)
6979 return do_eor_tb(s
, a
, false);
6982 static bool trans_EORTB(DisasContext
*s
, arg_rrr_esz
*a
)
6984 return do_eor_tb(s
, a
, true);
6987 static bool do_trans_pmull(DisasContext
*s
, arg_rrr_esz
*a
, bool sel
)
6989 static gen_helper_gvec_3
* const fns
[4] = {
6990 gen_helper_gvec_pmull_q
, gen_helper_sve2_pmull_h
,
6991 NULL
, gen_helper_sve2_pmull_d
,
6993 if (a
->esz
== 0 && !dc_isar_feature(aa64_sve2_pmull128
, s
)) {
6996 return do_sve2_zzw_ool(s
, a
, fns
[a
->esz
], sel
);
6999 static bool trans_PMULLB(DisasContext
*s
, arg_rrr_esz
*a
)
7001 return do_trans_pmull(s
, a
, false);
7004 static bool trans_PMULLT(DisasContext
*s
, arg_rrr_esz
*a
)
7006 return do_trans_pmull(s
, a
, true);
7009 #define DO_SVE2_ZZZ_WTB(NAME, name, SEL2) \
7010 static bool trans_##NAME(DisasContext *s, arg_rrr_esz *a) \
7012 static gen_helper_gvec_3 * const fns[4] = { \
7013 NULL, gen_helper_sve2_##name##_h, \
7014 gen_helper_sve2_##name##_s, gen_helper_sve2_##name##_d, \
7016 return do_sve2_zzw_ool(s, a, fns[a->esz], SEL2); \
7019 DO_SVE2_ZZZ_WTB(SADDWB
, saddw
, false)
7020 DO_SVE2_ZZZ_WTB(SADDWT
, saddw
, true)
7021 DO_SVE2_ZZZ_WTB(SSUBWB
, ssubw
, false)
7022 DO_SVE2_ZZZ_WTB(SSUBWT
, ssubw
, true)
7024 DO_SVE2_ZZZ_WTB(UADDWB
, uaddw
, false)
7025 DO_SVE2_ZZZ_WTB(UADDWT
, uaddw
, true)
7026 DO_SVE2_ZZZ_WTB(USUBWB
, usubw
, false)
7027 DO_SVE2_ZZZ_WTB(USUBWT
, usubw
, true)
7029 static void gen_sshll_vec(unsigned vece
, TCGv_vec d
, TCGv_vec n
, int64_t imm
)
7033 int halfbits
= 4 << vece
;
7036 if (shl
== halfbits
) {
7037 TCGv_vec t
= tcg_temp_new_vec_matching(d
);
7038 tcg_gen_dupi_vec(vece
, t
, MAKE_64BIT_MASK(halfbits
, halfbits
));
7039 tcg_gen_and_vec(vece
, d
, n
, t
);
7040 tcg_temp_free_vec(t
);
7042 tcg_gen_sari_vec(vece
, d
, n
, halfbits
);
7043 tcg_gen_shli_vec(vece
, d
, d
, shl
);
7046 tcg_gen_shli_vec(vece
, d
, n
, halfbits
);
7047 tcg_gen_sari_vec(vece
, d
, d
, halfbits
- shl
);
7051 static void gen_ushll_i64(unsigned vece
, TCGv_i64 d
, TCGv_i64 n
, int imm
)
7053 int halfbits
= 4 << vece
;
7055 int shl
= (imm
>> 1);
7059 mask
= MAKE_64BIT_MASK(0, halfbits
);
7061 mask
= dup_const(vece
, mask
);
7063 shift
= shl
- top
* halfbits
;
7065 tcg_gen_shri_i64(d
, n
, -shift
);
7067 tcg_gen_shli_i64(d
, n
, shift
);
7069 tcg_gen_andi_i64(d
, d
, mask
);
7072 static void gen_ushll16_i64(TCGv_i64 d
, TCGv_i64 n
, int64_t imm
)
7074 gen_ushll_i64(MO_16
, d
, n
, imm
);
7077 static void gen_ushll32_i64(TCGv_i64 d
, TCGv_i64 n
, int64_t imm
)
7079 gen_ushll_i64(MO_32
, d
, n
, imm
);
7082 static void gen_ushll64_i64(TCGv_i64 d
, TCGv_i64 n
, int64_t imm
)
7084 gen_ushll_i64(MO_64
, d
, n
, imm
);
7087 static void gen_ushll_vec(unsigned vece
, TCGv_vec d
, TCGv_vec n
, int64_t imm
)
7089 int halfbits
= 4 << vece
;
7094 if (shl
== halfbits
) {
7095 TCGv_vec t
= tcg_temp_new_vec_matching(d
);
7096 tcg_gen_dupi_vec(vece
, t
, MAKE_64BIT_MASK(halfbits
, halfbits
));
7097 tcg_gen_and_vec(vece
, d
, n
, t
);
7098 tcg_temp_free_vec(t
);
7100 tcg_gen_shri_vec(vece
, d
, n
, halfbits
);
7101 tcg_gen_shli_vec(vece
, d
, d
, shl
);
7105 TCGv_vec t
= tcg_temp_new_vec_matching(d
);
7106 tcg_gen_dupi_vec(vece
, t
, MAKE_64BIT_MASK(0, halfbits
));
7107 tcg_gen_and_vec(vece
, d
, n
, t
);
7108 tcg_temp_free_vec(t
);
7110 tcg_gen_shli_vec(vece
, d
, n
, halfbits
);
7111 tcg_gen_shri_vec(vece
, d
, d
, halfbits
- shl
);
7116 static bool do_sve2_shll_tb(DisasContext
*s
, arg_rri_esz
*a
,
7119 static const TCGOpcode sshll_list
[] = {
7120 INDEX_op_shli_vec
, INDEX_op_sari_vec
, 0
7122 static const TCGOpcode ushll_list
[] = {
7123 INDEX_op_shli_vec
, INDEX_op_shri_vec
, 0
7125 static const GVecGen2i ops
[2][3] = {
7126 { { .fniv
= gen_sshll_vec
,
7127 .opt_opc
= sshll_list
,
7128 .fno
= gen_helper_sve2_sshll_h
,
7130 { .fniv
= gen_sshll_vec
,
7131 .opt_opc
= sshll_list
,
7132 .fno
= gen_helper_sve2_sshll_s
,
7134 { .fniv
= gen_sshll_vec
,
7135 .opt_opc
= sshll_list
,
7136 .fno
= gen_helper_sve2_sshll_d
,
7138 { { .fni8
= gen_ushll16_i64
,
7139 .fniv
= gen_ushll_vec
,
7140 .opt_opc
= ushll_list
,
7141 .fno
= gen_helper_sve2_ushll_h
,
7143 { .fni8
= gen_ushll32_i64
,
7144 .fniv
= gen_ushll_vec
,
7145 .opt_opc
= ushll_list
,
7146 .fno
= gen_helper_sve2_ushll_s
,
7148 { .fni8
= gen_ushll64_i64
,
7149 .fniv
= gen_ushll_vec
,
7150 .opt_opc
= ushll_list
,
7151 .fno
= gen_helper_sve2_ushll_d
,
7155 if (a
->esz
< 0 || a
->esz
> 2 || !dc_isar_feature(aa64_sve2
, s
)) {
7158 if (sve_access_check(s
)) {
7159 unsigned vsz
= vec_full_reg_size(s
);
7160 tcg_gen_gvec_2i(vec_full_reg_offset(s
, a
->rd
),
7161 vec_full_reg_offset(s
, a
->rn
),
7162 vsz
, vsz
, (a
->imm
<< 1) | sel
,
7168 static bool trans_SSHLLB(DisasContext
*s
, arg_rri_esz
*a
)
7170 return do_sve2_shll_tb(s
, a
, false, false);
7173 static bool trans_SSHLLT(DisasContext
*s
, arg_rri_esz
*a
)
7175 return do_sve2_shll_tb(s
, a
, true, false);
7178 static bool trans_USHLLB(DisasContext
*s
, arg_rri_esz
*a
)
7180 return do_sve2_shll_tb(s
, a
, false, true);
7183 static bool trans_USHLLT(DisasContext
*s
, arg_rri_esz
*a
)
7185 return do_sve2_shll_tb(s
, a
, true, true);
7188 static bool trans_BEXT(DisasContext
*s
, arg_rrr_esz
*a
)
7190 static gen_helper_gvec_3
* const fns
[4] = {
7191 gen_helper_sve2_bext_b
, gen_helper_sve2_bext_h
,
7192 gen_helper_sve2_bext_s
, gen_helper_sve2_bext_d
,
7194 if (!dc_isar_feature(aa64_sve2_bitperm
, s
)) {
7197 return do_sve2_zzw_ool(s
, a
, fns
[a
->esz
], 0);
7200 static bool trans_BDEP(DisasContext
*s
, arg_rrr_esz
*a
)
7202 static gen_helper_gvec_3
* const fns
[4] = {
7203 gen_helper_sve2_bdep_b
, gen_helper_sve2_bdep_h
,
7204 gen_helper_sve2_bdep_s
, gen_helper_sve2_bdep_d
,
7206 if (!dc_isar_feature(aa64_sve2_bitperm
, s
)) {
7209 return do_sve2_zzw_ool(s
, a
, fns
[a
->esz
], 0);
7212 static bool trans_BGRP(DisasContext
*s
, arg_rrr_esz
*a
)
7214 static gen_helper_gvec_3
* const fns
[4] = {
7215 gen_helper_sve2_bgrp_b
, gen_helper_sve2_bgrp_h
,
7216 gen_helper_sve2_bgrp_s
, gen_helper_sve2_bgrp_d
,
7218 if (!dc_isar_feature(aa64_sve2_bitperm
, s
)) {
7221 return do_sve2_zzw_ool(s
, a
, fns
[a
->esz
], 0);
7224 static bool do_cadd(DisasContext
*s
, arg_rrr_esz
*a
, bool sq
, bool rot
)
7226 static gen_helper_gvec_3
* const fns
[2][4] = {
7227 { gen_helper_sve2_cadd_b
, gen_helper_sve2_cadd_h
,
7228 gen_helper_sve2_cadd_s
, gen_helper_sve2_cadd_d
},
7229 { gen_helper_sve2_sqcadd_b
, gen_helper_sve2_sqcadd_h
,
7230 gen_helper_sve2_sqcadd_s
, gen_helper_sve2_sqcadd_d
},
7232 return do_sve2_zzw_ool(s
, a
, fns
[sq
][a
->esz
], rot
);
7235 static bool trans_CADD_rot90(DisasContext
*s
, arg_rrr_esz
*a
)
7237 return do_cadd(s
, a
, false, false);
7240 static bool trans_CADD_rot270(DisasContext
*s
, arg_rrr_esz
*a
)
7242 return do_cadd(s
, a
, false, true);
7245 static bool trans_SQCADD_rot90(DisasContext
*s
, arg_rrr_esz
*a
)
7247 return do_cadd(s
, a
, true, false);
7250 static bool trans_SQCADD_rot270(DisasContext
*s
, arg_rrr_esz
*a
)
7252 return do_cadd(s
, a
, true, true);
7255 static bool do_sve2_zzzz_ool(DisasContext
*s
, arg_rrrr_esz
*a
,
7256 gen_helper_gvec_4
*fn
, int data
)
7258 if (fn
== NULL
|| !dc_isar_feature(aa64_sve2
, s
)) {
7261 if (sve_access_check(s
)) {
7262 gen_gvec_ool_zzzz(s
, fn
, a
->rd
, a
->rn
, a
->rm
, a
->ra
, data
);
7267 static bool do_abal(DisasContext
*s
, arg_rrrr_esz
*a
, bool uns
, bool sel
)
7269 static gen_helper_gvec_4
* const fns
[2][4] = {
7270 { NULL
, gen_helper_sve2_sabal_h
,
7271 gen_helper_sve2_sabal_s
, gen_helper_sve2_sabal_d
},
7272 { NULL
, gen_helper_sve2_uabal_h
,
7273 gen_helper_sve2_uabal_s
, gen_helper_sve2_uabal_d
},
7275 return do_sve2_zzzz_ool(s
, a
, fns
[uns
][a
->esz
], sel
);
7278 static bool trans_SABALB(DisasContext
*s
, arg_rrrr_esz
*a
)
7280 return do_abal(s
, a
, false, false);
7283 static bool trans_SABALT(DisasContext
*s
, arg_rrrr_esz
*a
)
7285 return do_abal(s
, a
, false, true);
7288 static bool trans_UABALB(DisasContext
*s
, arg_rrrr_esz
*a
)
7290 return do_abal(s
, a
, true, false);
7293 static bool trans_UABALT(DisasContext
*s
, arg_rrrr_esz
*a
)
7295 return do_abal(s
, a
, true, true);
7298 static bool do_adcl(DisasContext
*s
, arg_rrrr_esz
*a
, bool sel
)
7300 static gen_helper_gvec_4
* const fns
[2] = {
7301 gen_helper_sve2_adcl_s
,
7302 gen_helper_sve2_adcl_d
,
7305 * Note that in this case the ESZ field encodes both size and sign.
7306 * Split out 'subtract' into bit 1 of the data field for the helper.
7308 return do_sve2_zzzz_ool(s
, a
, fns
[a
->esz
& 1], (a
->esz
& 2) | sel
);
7311 static bool trans_ADCLB(DisasContext
*s
, arg_rrrr_esz
*a
)
7313 return do_adcl(s
, a
, false);
7316 static bool trans_ADCLT(DisasContext
*s
, arg_rrrr_esz
*a
)
7318 return do_adcl(s
, a
, true);
7321 static bool do_sve2_fn2i(DisasContext
*s
, arg_rri_esz
*a
, GVecGen2iFn
*fn
)
7323 if (a
->esz
< 0 || !dc_isar_feature(aa64_sve2
, s
)) {
7326 if (sve_access_check(s
)) {
7327 unsigned vsz
= vec_full_reg_size(s
);
7328 unsigned rd_ofs
= vec_full_reg_offset(s
, a
->rd
);
7329 unsigned rn_ofs
= vec_full_reg_offset(s
, a
->rn
);
7330 fn(a
->esz
, rd_ofs
, rn_ofs
, a
->imm
, vsz
, vsz
);
7335 static bool trans_SSRA(DisasContext
*s
, arg_rri_esz
*a
)
7337 return do_sve2_fn2i(s
, a
, gen_gvec_ssra
);
7340 static bool trans_USRA(DisasContext
*s
, arg_rri_esz
*a
)
7342 return do_sve2_fn2i(s
, a
, gen_gvec_usra
);
7345 static bool trans_SRSRA(DisasContext
*s
, arg_rri_esz
*a
)
7347 return do_sve2_fn2i(s
, a
, gen_gvec_srsra
);
7350 static bool trans_URSRA(DisasContext
*s
, arg_rri_esz
*a
)
7352 return do_sve2_fn2i(s
, a
, gen_gvec_ursra
);
7355 static bool trans_SRI(DisasContext
*s
, arg_rri_esz
*a
)
7357 return do_sve2_fn2i(s
, a
, gen_gvec_sri
);
7360 static bool trans_SLI(DisasContext
*s
, arg_rri_esz
*a
)
7362 return do_sve2_fn2i(s
, a
, gen_gvec_sli
);
7365 static bool do_sve2_fn_zzz(DisasContext
*s
, arg_rrr_esz
*a
, GVecGen3Fn
*fn
)
7367 if (!dc_isar_feature(aa64_sve2
, s
)) {
7370 if (sve_access_check(s
)) {
7371 gen_gvec_fn_zzz(s
, fn
, a
->esz
, a
->rd
, a
->rn
, a
->rm
);
7376 static bool trans_SABA(DisasContext
*s
, arg_rrr_esz
*a
)
7378 return do_sve2_fn_zzz(s
, a
, gen_gvec_saba
);
7381 static bool trans_UABA(DisasContext
*s
, arg_rrr_esz
*a
)
7383 return do_sve2_fn_zzz(s
, a
, gen_gvec_uaba
);
7386 static bool do_sve2_narrow_extract(DisasContext
*s
, arg_rri_esz
*a
,
7387 const GVecGen2 ops
[3])
7389 if (a
->esz
< 0 || a
->esz
> MO_32
|| a
->imm
!= 0 ||
7390 !dc_isar_feature(aa64_sve2
, s
)) {
7393 if (sve_access_check(s
)) {
7394 unsigned vsz
= vec_full_reg_size(s
);
7395 tcg_gen_gvec_2(vec_full_reg_offset(s
, a
->rd
),
7396 vec_full_reg_offset(s
, a
->rn
),
7397 vsz
, vsz
, &ops
[a
->esz
]);
7402 static const TCGOpcode sqxtn_list
[] = {
7403 INDEX_op_shli_vec
, INDEX_op_smin_vec
, INDEX_op_smax_vec
, 0
7406 static void gen_sqxtnb_vec(unsigned vece
, TCGv_vec d
, TCGv_vec n
)
7408 TCGv_vec t
= tcg_temp_new_vec_matching(d
);
7409 int halfbits
= 4 << vece
;
7410 int64_t mask
= (1ull << halfbits
) - 1;
7411 int64_t min
= -1ull << (halfbits
- 1);
7412 int64_t max
= -min
- 1;
7414 tcg_gen_dupi_vec(vece
, t
, min
);
7415 tcg_gen_smax_vec(vece
, d
, n
, t
);
7416 tcg_gen_dupi_vec(vece
, t
, max
);
7417 tcg_gen_smin_vec(vece
, d
, d
, t
);
7418 tcg_gen_dupi_vec(vece
, t
, mask
);
7419 tcg_gen_and_vec(vece
, d
, d
, t
);
7420 tcg_temp_free_vec(t
);
7423 static bool trans_SQXTNB(DisasContext
*s
, arg_rri_esz
*a
)
7425 static const GVecGen2 ops
[3] = {
7426 { .fniv
= gen_sqxtnb_vec
,
7427 .opt_opc
= sqxtn_list
,
7428 .fno
= gen_helper_sve2_sqxtnb_h
,
7430 { .fniv
= gen_sqxtnb_vec
,
7431 .opt_opc
= sqxtn_list
,
7432 .fno
= gen_helper_sve2_sqxtnb_s
,
7434 { .fniv
= gen_sqxtnb_vec
,
7435 .opt_opc
= sqxtn_list
,
7436 .fno
= gen_helper_sve2_sqxtnb_d
,
7439 return do_sve2_narrow_extract(s
, a
, ops
);
7442 static void gen_sqxtnt_vec(unsigned vece
, TCGv_vec d
, TCGv_vec n
)
7444 TCGv_vec t
= tcg_temp_new_vec_matching(d
);
7445 int halfbits
= 4 << vece
;
7446 int64_t mask
= (1ull << halfbits
) - 1;
7447 int64_t min
= -1ull << (halfbits
- 1);
7448 int64_t max
= -min
- 1;
7450 tcg_gen_dupi_vec(vece
, t
, min
);
7451 tcg_gen_smax_vec(vece
, n
, n
, t
);
7452 tcg_gen_dupi_vec(vece
, t
, max
);
7453 tcg_gen_smin_vec(vece
, n
, n
, t
);
7454 tcg_gen_shli_vec(vece
, n
, n
, halfbits
);
7455 tcg_gen_dupi_vec(vece
, t
, mask
);
7456 tcg_gen_bitsel_vec(vece
, d
, t
, d
, n
);
7457 tcg_temp_free_vec(t
);
7460 static bool trans_SQXTNT(DisasContext
*s
, arg_rri_esz
*a
)
7462 static const GVecGen2 ops
[3] = {
7463 { .fniv
= gen_sqxtnt_vec
,
7464 .opt_opc
= sqxtn_list
,
7466 .fno
= gen_helper_sve2_sqxtnt_h
,
7468 { .fniv
= gen_sqxtnt_vec
,
7469 .opt_opc
= sqxtn_list
,
7471 .fno
= gen_helper_sve2_sqxtnt_s
,
7473 { .fniv
= gen_sqxtnt_vec
,
7474 .opt_opc
= sqxtn_list
,
7476 .fno
= gen_helper_sve2_sqxtnt_d
,
7479 return do_sve2_narrow_extract(s
, a
, ops
);
7482 static const TCGOpcode uqxtn_list
[] = {
7483 INDEX_op_shli_vec
, INDEX_op_umin_vec
, 0
7486 static void gen_uqxtnb_vec(unsigned vece
, TCGv_vec d
, TCGv_vec n
)
7488 TCGv_vec t
= tcg_temp_new_vec_matching(d
);
7489 int halfbits
= 4 << vece
;
7490 int64_t max
= (1ull << halfbits
) - 1;
7492 tcg_gen_dupi_vec(vece
, t
, max
);
7493 tcg_gen_umin_vec(vece
, d
, n
, t
);
7494 tcg_temp_free_vec(t
);
7497 static bool trans_UQXTNB(DisasContext
*s
, arg_rri_esz
*a
)
7499 static const GVecGen2 ops
[3] = {
7500 { .fniv
= gen_uqxtnb_vec
,
7501 .opt_opc
= uqxtn_list
,
7502 .fno
= gen_helper_sve2_uqxtnb_h
,
7504 { .fniv
= gen_uqxtnb_vec
,
7505 .opt_opc
= uqxtn_list
,
7506 .fno
= gen_helper_sve2_uqxtnb_s
,
7508 { .fniv
= gen_uqxtnb_vec
,
7509 .opt_opc
= uqxtn_list
,
7510 .fno
= gen_helper_sve2_uqxtnb_d
,
7513 return do_sve2_narrow_extract(s
, a
, ops
);
7516 static void gen_uqxtnt_vec(unsigned vece
, TCGv_vec d
, TCGv_vec n
)
7518 TCGv_vec t
= tcg_temp_new_vec_matching(d
);
7519 int halfbits
= 4 << vece
;
7520 int64_t max
= (1ull << halfbits
) - 1;
7522 tcg_gen_dupi_vec(vece
, t
, max
);
7523 tcg_gen_umin_vec(vece
, n
, n
, t
);
7524 tcg_gen_shli_vec(vece
, n
, n
, halfbits
);
7525 tcg_gen_bitsel_vec(vece
, d
, t
, d
, n
);
7526 tcg_temp_free_vec(t
);
7529 static bool trans_UQXTNT(DisasContext
*s
, arg_rri_esz
*a
)
7531 static const GVecGen2 ops
[3] = {
7532 { .fniv
= gen_uqxtnt_vec
,
7533 .opt_opc
= uqxtn_list
,
7535 .fno
= gen_helper_sve2_uqxtnt_h
,
7537 { .fniv
= gen_uqxtnt_vec
,
7538 .opt_opc
= uqxtn_list
,
7540 .fno
= gen_helper_sve2_uqxtnt_s
,
7542 { .fniv
= gen_uqxtnt_vec
,
7543 .opt_opc
= uqxtn_list
,
7545 .fno
= gen_helper_sve2_uqxtnt_d
,
7548 return do_sve2_narrow_extract(s
, a
, ops
);
7551 static const TCGOpcode sqxtun_list
[] = {
7552 INDEX_op_shli_vec
, INDEX_op_umin_vec
, INDEX_op_smax_vec
, 0
7555 static void gen_sqxtunb_vec(unsigned vece
, TCGv_vec d
, TCGv_vec n
)
7557 TCGv_vec t
= tcg_temp_new_vec_matching(d
);
7558 int halfbits
= 4 << vece
;
7559 int64_t max
= (1ull << halfbits
) - 1;
7561 tcg_gen_dupi_vec(vece
, t
, 0);
7562 tcg_gen_smax_vec(vece
, d
, n
, t
);
7563 tcg_gen_dupi_vec(vece
, t
, max
);
7564 tcg_gen_umin_vec(vece
, d
, d
, t
);
7565 tcg_temp_free_vec(t
);
7568 static bool trans_SQXTUNB(DisasContext
*s
, arg_rri_esz
*a
)
7570 static const GVecGen2 ops
[3] = {
7571 { .fniv
= gen_sqxtunb_vec
,
7572 .opt_opc
= sqxtun_list
,
7573 .fno
= gen_helper_sve2_sqxtunb_h
,
7575 { .fniv
= gen_sqxtunb_vec
,
7576 .opt_opc
= sqxtun_list
,
7577 .fno
= gen_helper_sve2_sqxtunb_s
,
7579 { .fniv
= gen_sqxtunb_vec
,
7580 .opt_opc
= sqxtun_list
,
7581 .fno
= gen_helper_sve2_sqxtunb_d
,
7584 return do_sve2_narrow_extract(s
, a
, ops
);
7587 static void gen_sqxtunt_vec(unsigned vece
, TCGv_vec d
, TCGv_vec n
)
7589 TCGv_vec t
= tcg_temp_new_vec_matching(d
);
7590 int halfbits
= 4 << vece
;
7591 int64_t max
= (1ull << halfbits
) - 1;
7593 tcg_gen_dupi_vec(vece
, t
, 0);
7594 tcg_gen_smax_vec(vece
, n
, n
, t
);
7595 tcg_gen_dupi_vec(vece
, t
, max
);
7596 tcg_gen_umin_vec(vece
, n
, n
, t
);
7597 tcg_gen_shli_vec(vece
, n
, n
, halfbits
);
7598 tcg_gen_bitsel_vec(vece
, d
, t
, d
, n
);
7599 tcg_temp_free_vec(t
);
7602 static bool trans_SQXTUNT(DisasContext
*s
, arg_rri_esz
*a
)
7604 static const GVecGen2 ops
[3] = {
7605 { .fniv
= gen_sqxtunt_vec
,
7606 .opt_opc
= sqxtun_list
,
7608 .fno
= gen_helper_sve2_sqxtunt_h
,
7610 { .fniv
= gen_sqxtunt_vec
,
7611 .opt_opc
= sqxtun_list
,
7613 .fno
= gen_helper_sve2_sqxtunt_s
,
7615 { .fniv
= gen_sqxtunt_vec
,
7616 .opt_opc
= sqxtun_list
,
7618 .fno
= gen_helper_sve2_sqxtunt_d
,
7621 return do_sve2_narrow_extract(s
, a
, ops
);
7624 static bool do_sve2_shr_narrow(DisasContext
*s
, arg_rri_esz
*a
,
7625 const GVecGen2i ops
[3])
7627 if (a
->esz
< 0 || a
->esz
> MO_32
|| !dc_isar_feature(aa64_sve2
, s
)) {
7630 assert(a
->imm
> 0 && a
->imm
<= (8 << a
->esz
));
7631 if (sve_access_check(s
)) {
7632 unsigned vsz
= vec_full_reg_size(s
);
7633 tcg_gen_gvec_2i(vec_full_reg_offset(s
, a
->rd
),
7634 vec_full_reg_offset(s
, a
->rn
),
7635 vsz
, vsz
, a
->imm
, &ops
[a
->esz
]);
7640 static void gen_shrnb_i64(unsigned vece
, TCGv_i64 d
, TCGv_i64 n
, int shr
)
7642 int halfbits
= 4 << vece
;
7643 uint64_t mask
= dup_const(vece
, MAKE_64BIT_MASK(0, halfbits
));
7645 tcg_gen_shri_i64(d
, n
, shr
);
7646 tcg_gen_andi_i64(d
, d
, mask
);
7649 static void gen_shrnb16_i64(TCGv_i64 d
, TCGv_i64 n
, int64_t shr
)
7651 gen_shrnb_i64(MO_16
, d
, n
, shr
);
7654 static void gen_shrnb32_i64(TCGv_i64 d
, TCGv_i64 n
, int64_t shr
)
7656 gen_shrnb_i64(MO_32
, d
, n
, shr
);
7659 static void gen_shrnb64_i64(TCGv_i64 d
, TCGv_i64 n
, int64_t shr
)
7661 gen_shrnb_i64(MO_64
, d
, n
, shr
);
7664 static void gen_shrnb_vec(unsigned vece
, TCGv_vec d
, TCGv_vec n
, int64_t shr
)
7666 TCGv_vec t
= tcg_temp_new_vec_matching(d
);
7667 int halfbits
= 4 << vece
;
7668 uint64_t mask
= MAKE_64BIT_MASK(0, halfbits
);
7670 tcg_gen_shri_vec(vece
, n
, n
, shr
);
7671 tcg_gen_dupi_vec(vece
, t
, mask
);
7672 tcg_gen_and_vec(vece
, d
, n
, t
);
7673 tcg_temp_free_vec(t
);
7676 static bool trans_SHRNB(DisasContext
*s
, arg_rri_esz
*a
)
7678 static const TCGOpcode vec_list
[] = { INDEX_op_shri_vec
, 0 };
7679 static const GVecGen2i ops
[3] = {
7680 { .fni8
= gen_shrnb16_i64
,
7681 .fniv
= gen_shrnb_vec
,
7682 .opt_opc
= vec_list
,
7683 .fno
= gen_helper_sve2_shrnb_h
,
7685 { .fni8
= gen_shrnb32_i64
,
7686 .fniv
= gen_shrnb_vec
,
7687 .opt_opc
= vec_list
,
7688 .fno
= gen_helper_sve2_shrnb_s
,
7690 { .fni8
= gen_shrnb64_i64
,
7691 .fniv
= gen_shrnb_vec
,
7692 .opt_opc
= vec_list
,
7693 .fno
= gen_helper_sve2_shrnb_d
,
7696 return do_sve2_shr_narrow(s
, a
, ops
);
7699 static void gen_shrnt_i64(unsigned vece
, TCGv_i64 d
, TCGv_i64 n
, int shr
)
7701 int halfbits
= 4 << vece
;
7702 uint64_t mask
= dup_const(vece
, MAKE_64BIT_MASK(0, halfbits
));
7704 tcg_gen_shli_i64(n
, n
, halfbits
- shr
);
7705 tcg_gen_andi_i64(n
, n
, ~mask
);
7706 tcg_gen_andi_i64(d
, d
, mask
);
7707 tcg_gen_or_i64(d
, d
, n
);
7710 static void gen_shrnt16_i64(TCGv_i64 d
, TCGv_i64 n
, int64_t shr
)
7712 gen_shrnt_i64(MO_16
, d
, n
, shr
);
7715 static void gen_shrnt32_i64(TCGv_i64 d
, TCGv_i64 n
, int64_t shr
)
7717 gen_shrnt_i64(MO_32
, d
, n
, shr
);
7720 static void gen_shrnt64_i64(TCGv_i64 d
, TCGv_i64 n
, int64_t shr
)
7722 tcg_gen_shri_i64(n
, n
, shr
);
7723 tcg_gen_deposit_i64(d
, d
, n
, 32, 32);
7726 static void gen_shrnt_vec(unsigned vece
, TCGv_vec d
, TCGv_vec n
, int64_t shr
)
7728 TCGv_vec t
= tcg_temp_new_vec_matching(d
);
7729 int halfbits
= 4 << vece
;
7730 uint64_t mask
= MAKE_64BIT_MASK(0, halfbits
);
7732 tcg_gen_shli_vec(vece
, n
, n
, halfbits
- shr
);
7733 tcg_gen_dupi_vec(vece
, t
, mask
);
7734 tcg_gen_bitsel_vec(vece
, d
, t
, d
, n
);
7735 tcg_temp_free_vec(t
);
7738 static bool trans_SHRNT(DisasContext
*s
, arg_rri_esz
*a
)
7740 static const TCGOpcode vec_list
[] = { INDEX_op_shli_vec
, 0 };
7741 static const GVecGen2i ops
[3] = {
7742 { .fni8
= gen_shrnt16_i64
,
7743 .fniv
= gen_shrnt_vec
,
7744 .opt_opc
= vec_list
,
7746 .fno
= gen_helper_sve2_shrnt_h
,
7748 { .fni8
= gen_shrnt32_i64
,
7749 .fniv
= gen_shrnt_vec
,
7750 .opt_opc
= vec_list
,
7752 .fno
= gen_helper_sve2_shrnt_s
,
7754 { .fni8
= gen_shrnt64_i64
,
7755 .fniv
= gen_shrnt_vec
,
7756 .opt_opc
= vec_list
,
7758 .fno
= gen_helper_sve2_shrnt_d
,
7761 return do_sve2_shr_narrow(s
, a
, ops
);
7764 static bool trans_RSHRNB(DisasContext
*s
, arg_rri_esz
*a
)
7766 static const GVecGen2i ops
[3] = {
7767 { .fno
= gen_helper_sve2_rshrnb_h
},
7768 { .fno
= gen_helper_sve2_rshrnb_s
},
7769 { .fno
= gen_helper_sve2_rshrnb_d
},
7771 return do_sve2_shr_narrow(s
, a
, ops
);
7774 static bool trans_RSHRNT(DisasContext
*s
, arg_rri_esz
*a
)
7776 static const GVecGen2i ops
[3] = {
7777 { .fno
= gen_helper_sve2_rshrnt_h
},
7778 { .fno
= gen_helper_sve2_rshrnt_s
},
7779 { .fno
= gen_helper_sve2_rshrnt_d
},
7781 return do_sve2_shr_narrow(s
, a
, ops
);
7784 static void gen_sqshrunb_vec(unsigned vece
, TCGv_vec d
,
7785 TCGv_vec n
, int64_t shr
)
7787 TCGv_vec t
= tcg_temp_new_vec_matching(d
);
7788 int halfbits
= 4 << vece
;
7790 tcg_gen_sari_vec(vece
, n
, n
, shr
);
7791 tcg_gen_dupi_vec(vece
, t
, 0);
7792 tcg_gen_smax_vec(vece
, n
, n
, t
);
7793 tcg_gen_dupi_vec(vece
, t
, MAKE_64BIT_MASK(0, halfbits
));
7794 tcg_gen_umin_vec(vece
, d
, n
, t
);
7795 tcg_temp_free_vec(t
);
7798 static bool trans_SQSHRUNB(DisasContext
*s
, arg_rri_esz
*a
)
7800 static const TCGOpcode vec_list
[] = {
7801 INDEX_op_sari_vec
, INDEX_op_smax_vec
, INDEX_op_umin_vec
, 0
7803 static const GVecGen2i ops
[3] = {
7804 { .fniv
= gen_sqshrunb_vec
,
7805 .opt_opc
= vec_list
,
7806 .fno
= gen_helper_sve2_sqshrunb_h
,
7808 { .fniv
= gen_sqshrunb_vec
,
7809 .opt_opc
= vec_list
,
7810 .fno
= gen_helper_sve2_sqshrunb_s
,
7812 { .fniv
= gen_sqshrunb_vec
,
7813 .opt_opc
= vec_list
,
7814 .fno
= gen_helper_sve2_sqshrunb_d
,
7817 return do_sve2_shr_narrow(s
, a
, ops
);
7820 static void gen_sqshrunt_vec(unsigned vece
, TCGv_vec d
,
7821 TCGv_vec n
, int64_t shr
)
7823 TCGv_vec t
= tcg_temp_new_vec_matching(d
);
7824 int halfbits
= 4 << vece
;
7826 tcg_gen_sari_vec(vece
, n
, n
, shr
);
7827 tcg_gen_dupi_vec(vece
, t
, 0);
7828 tcg_gen_smax_vec(vece
, n
, n
, t
);
7829 tcg_gen_dupi_vec(vece
, t
, MAKE_64BIT_MASK(0, halfbits
));
7830 tcg_gen_umin_vec(vece
, n
, n
, t
);
7831 tcg_gen_shli_vec(vece
, n
, n
, halfbits
);
7832 tcg_gen_bitsel_vec(vece
, d
, t
, d
, n
);
7833 tcg_temp_free_vec(t
);
7836 static bool trans_SQSHRUNT(DisasContext
*s
, arg_rri_esz
*a
)
7838 static const TCGOpcode vec_list
[] = {
7839 INDEX_op_shli_vec
, INDEX_op_sari_vec
,
7840 INDEX_op_smax_vec
, INDEX_op_umin_vec
, 0
7842 static const GVecGen2i ops
[3] = {
7843 { .fniv
= gen_sqshrunt_vec
,
7844 .opt_opc
= vec_list
,
7846 .fno
= gen_helper_sve2_sqshrunt_h
,
7848 { .fniv
= gen_sqshrunt_vec
,
7849 .opt_opc
= vec_list
,
7851 .fno
= gen_helper_sve2_sqshrunt_s
,
7853 { .fniv
= gen_sqshrunt_vec
,
7854 .opt_opc
= vec_list
,
7856 .fno
= gen_helper_sve2_sqshrunt_d
,
7859 return do_sve2_shr_narrow(s
, a
, ops
);
7862 static bool trans_SQRSHRUNB(DisasContext
*s
, arg_rri_esz
*a
)
7864 static const GVecGen2i ops
[3] = {
7865 { .fno
= gen_helper_sve2_sqrshrunb_h
},
7866 { .fno
= gen_helper_sve2_sqrshrunb_s
},
7867 { .fno
= gen_helper_sve2_sqrshrunb_d
},
7869 return do_sve2_shr_narrow(s
, a
, ops
);
7872 static bool trans_SQRSHRUNT(DisasContext
*s
, arg_rri_esz
*a
)
7874 static const GVecGen2i ops
[3] = {
7875 { .fno
= gen_helper_sve2_sqrshrunt_h
},
7876 { .fno
= gen_helper_sve2_sqrshrunt_s
},
7877 { .fno
= gen_helper_sve2_sqrshrunt_d
},
7879 return do_sve2_shr_narrow(s
, a
, ops
);
7882 static void gen_sqshrnb_vec(unsigned vece
, TCGv_vec d
,
7883 TCGv_vec n
, int64_t shr
)
7885 TCGv_vec t
= tcg_temp_new_vec_matching(d
);
7886 int halfbits
= 4 << vece
;
7887 int64_t max
= MAKE_64BIT_MASK(0, halfbits
- 1);
7888 int64_t min
= -max
- 1;
7890 tcg_gen_sari_vec(vece
, n
, n
, shr
);
7891 tcg_gen_dupi_vec(vece
, t
, min
);
7892 tcg_gen_smax_vec(vece
, n
, n
, t
);
7893 tcg_gen_dupi_vec(vece
, t
, max
);
7894 tcg_gen_smin_vec(vece
, n
, n
, t
);
7895 tcg_gen_dupi_vec(vece
, t
, MAKE_64BIT_MASK(0, halfbits
));
7896 tcg_gen_and_vec(vece
, d
, n
, t
);
7897 tcg_temp_free_vec(t
);
7900 static bool trans_SQSHRNB(DisasContext
*s
, arg_rri_esz
*a
)
7902 static const TCGOpcode vec_list
[] = {
7903 INDEX_op_sari_vec
, INDEX_op_smax_vec
, INDEX_op_smin_vec
, 0
7905 static const GVecGen2i ops
[3] = {
7906 { .fniv
= gen_sqshrnb_vec
,
7907 .opt_opc
= vec_list
,
7908 .fno
= gen_helper_sve2_sqshrnb_h
,
7910 { .fniv
= gen_sqshrnb_vec
,
7911 .opt_opc
= vec_list
,
7912 .fno
= gen_helper_sve2_sqshrnb_s
,
7914 { .fniv
= gen_sqshrnb_vec
,
7915 .opt_opc
= vec_list
,
7916 .fno
= gen_helper_sve2_sqshrnb_d
,
7919 return do_sve2_shr_narrow(s
, a
, ops
);
7922 static void gen_sqshrnt_vec(unsigned vece
, TCGv_vec d
,
7923 TCGv_vec n
, int64_t shr
)
7925 TCGv_vec t
= tcg_temp_new_vec_matching(d
);
7926 int halfbits
= 4 << vece
;
7927 int64_t max
= MAKE_64BIT_MASK(0, halfbits
- 1);
7928 int64_t min
= -max
- 1;
7930 tcg_gen_sari_vec(vece
, n
, n
, shr
);
7931 tcg_gen_dupi_vec(vece
, t
, min
);
7932 tcg_gen_smax_vec(vece
, n
, n
, t
);
7933 tcg_gen_dupi_vec(vece
, t
, max
);
7934 tcg_gen_smin_vec(vece
, n
, n
, t
);
7935 tcg_gen_shli_vec(vece
, n
, n
, halfbits
);
7936 tcg_gen_dupi_vec(vece
, t
, MAKE_64BIT_MASK(0, halfbits
));
7937 tcg_gen_bitsel_vec(vece
, d
, t
, d
, n
);
7938 tcg_temp_free_vec(t
);
7941 static bool trans_SQSHRNT(DisasContext
*s
, arg_rri_esz
*a
)
7943 static const TCGOpcode vec_list
[] = {
7944 INDEX_op_shli_vec
, INDEX_op_sari_vec
,
7945 INDEX_op_smax_vec
, INDEX_op_smin_vec
, 0
7947 static const GVecGen2i ops
[3] = {
7948 { .fniv
= gen_sqshrnt_vec
,
7949 .opt_opc
= vec_list
,
7951 .fno
= gen_helper_sve2_sqshrnt_h
,
7953 { .fniv
= gen_sqshrnt_vec
,
7954 .opt_opc
= vec_list
,
7956 .fno
= gen_helper_sve2_sqshrnt_s
,
7958 { .fniv
= gen_sqshrnt_vec
,
7959 .opt_opc
= vec_list
,
7961 .fno
= gen_helper_sve2_sqshrnt_d
,
7964 return do_sve2_shr_narrow(s
, a
, ops
);
7967 static bool trans_SQRSHRNB(DisasContext
*s
, arg_rri_esz
*a
)
7969 static const GVecGen2i ops
[3] = {
7970 { .fno
= gen_helper_sve2_sqrshrnb_h
},
7971 { .fno
= gen_helper_sve2_sqrshrnb_s
},
7972 { .fno
= gen_helper_sve2_sqrshrnb_d
},
7974 return do_sve2_shr_narrow(s
, a
, ops
);
7977 static bool trans_SQRSHRNT(DisasContext
*s
, arg_rri_esz
*a
)
7979 static const GVecGen2i ops
[3] = {
7980 { .fno
= gen_helper_sve2_sqrshrnt_h
},
7981 { .fno
= gen_helper_sve2_sqrshrnt_s
},
7982 { .fno
= gen_helper_sve2_sqrshrnt_d
},
7984 return do_sve2_shr_narrow(s
, a
, ops
);
7987 static void gen_uqshrnb_vec(unsigned vece
, TCGv_vec d
,
7988 TCGv_vec n
, int64_t shr
)
7990 TCGv_vec t
= tcg_temp_new_vec_matching(d
);
7991 int halfbits
= 4 << vece
;
7993 tcg_gen_shri_vec(vece
, n
, n
, shr
);
7994 tcg_gen_dupi_vec(vece
, t
, MAKE_64BIT_MASK(0, halfbits
));
7995 tcg_gen_umin_vec(vece
, d
, n
, t
);
7996 tcg_temp_free_vec(t
);
7999 static bool trans_UQSHRNB(DisasContext
*s
, arg_rri_esz
*a
)
8001 static const TCGOpcode vec_list
[] = {
8002 INDEX_op_shri_vec
, INDEX_op_umin_vec
, 0
8004 static const GVecGen2i ops
[3] = {
8005 { .fniv
= gen_uqshrnb_vec
,
8006 .opt_opc
= vec_list
,
8007 .fno
= gen_helper_sve2_uqshrnb_h
,
8009 { .fniv
= gen_uqshrnb_vec
,
8010 .opt_opc
= vec_list
,
8011 .fno
= gen_helper_sve2_uqshrnb_s
,
8013 { .fniv
= gen_uqshrnb_vec
,
8014 .opt_opc
= vec_list
,
8015 .fno
= gen_helper_sve2_uqshrnb_d
,
8018 return do_sve2_shr_narrow(s
, a
, ops
);
8021 static void gen_uqshrnt_vec(unsigned vece
, TCGv_vec d
,
8022 TCGv_vec n
, int64_t shr
)
8024 TCGv_vec t
= tcg_temp_new_vec_matching(d
);
8025 int halfbits
= 4 << vece
;
8027 tcg_gen_shri_vec(vece
, n
, n
, shr
);
8028 tcg_gen_dupi_vec(vece
, t
, MAKE_64BIT_MASK(0, halfbits
));
8029 tcg_gen_umin_vec(vece
, n
, n
, t
);
8030 tcg_gen_shli_vec(vece
, n
, n
, halfbits
);
8031 tcg_gen_bitsel_vec(vece
, d
, t
, d
, n
);
8032 tcg_temp_free_vec(t
);
8035 static bool trans_UQSHRNT(DisasContext
*s
, arg_rri_esz
*a
)
8037 static const TCGOpcode vec_list
[] = {
8038 INDEX_op_shli_vec
, INDEX_op_shri_vec
, INDEX_op_umin_vec
, 0
8040 static const GVecGen2i ops
[3] = {
8041 { .fniv
= gen_uqshrnt_vec
,
8042 .opt_opc
= vec_list
,
8044 .fno
= gen_helper_sve2_uqshrnt_h
,
8046 { .fniv
= gen_uqshrnt_vec
,
8047 .opt_opc
= vec_list
,
8049 .fno
= gen_helper_sve2_uqshrnt_s
,
8051 { .fniv
= gen_uqshrnt_vec
,
8052 .opt_opc
= vec_list
,
8054 .fno
= gen_helper_sve2_uqshrnt_d
,
8057 return do_sve2_shr_narrow(s
, a
, ops
);
8060 static bool trans_UQRSHRNB(DisasContext
*s
, arg_rri_esz
*a
)
8062 static const GVecGen2i ops
[3] = {
8063 { .fno
= gen_helper_sve2_uqrshrnb_h
},
8064 { .fno
= gen_helper_sve2_uqrshrnb_s
},
8065 { .fno
= gen_helper_sve2_uqrshrnb_d
},
8067 return do_sve2_shr_narrow(s
, a
, ops
);
8070 static bool trans_UQRSHRNT(DisasContext
*s
, arg_rri_esz
*a
)
8072 static const GVecGen2i ops
[3] = {
8073 { .fno
= gen_helper_sve2_uqrshrnt_h
},
8074 { .fno
= gen_helper_sve2_uqrshrnt_s
},
8075 { .fno
= gen_helper_sve2_uqrshrnt_d
},
8077 return do_sve2_shr_narrow(s
, a
, ops
);
8080 #define DO_SVE2_ZZZ_NARROW(NAME, name) \
8081 static bool trans_##NAME(DisasContext *s, arg_rrr_esz *a) \
8083 static gen_helper_gvec_3 * const fns[4] = { \
8084 NULL, gen_helper_sve2_##name##_h, \
8085 gen_helper_sve2_##name##_s, gen_helper_sve2_##name##_d, \
8087 return do_sve2_zzz_ool(s, a, fns[a->esz]); \
8090 DO_SVE2_ZZZ_NARROW(ADDHNB
, addhnb
)
8091 DO_SVE2_ZZZ_NARROW(ADDHNT
, addhnt
)
8092 DO_SVE2_ZZZ_NARROW(RADDHNB
, raddhnb
)
8093 DO_SVE2_ZZZ_NARROW(RADDHNT
, raddhnt
)
8095 DO_SVE2_ZZZ_NARROW(SUBHNB
, subhnb
)
8096 DO_SVE2_ZZZ_NARROW(SUBHNT
, subhnt
)
8097 DO_SVE2_ZZZ_NARROW(RSUBHNB
, rsubhnb
)
8098 DO_SVE2_ZZZ_NARROW(RSUBHNT
, rsubhnt
)
8100 static bool do_sve2_ppzz_flags(DisasContext
*s
, arg_rprr_esz
*a
,
8101 gen_helper_gvec_flags_4
*fn
)
8103 if (!dc_isar_feature(aa64_sve2
, s
)) {
8106 return do_ppzz_flags(s
, a
, fn
);
8109 #define DO_SVE2_PPZZ_MATCH(NAME, name) \
8110 static bool trans_##NAME(DisasContext *s, arg_rprr_esz *a) \
8112 static gen_helper_gvec_flags_4 * const fns[4] = { \
8113 gen_helper_sve2_##name##_ppzz_b, gen_helper_sve2_##name##_ppzz_h, \
8116 return do_sve2_ppzz_flags(s, a, fns[a->esz]); \
8119 DO_SVE2_PPZZ_MATCH(MATCH
, match
)
8120 DO_SVE2_PPZZ_MATCH(NMATCH
, nmatch
)
8122 static bool trans_HISTCNT(DisasContext
*s
, arg_rprr_esz
*a
)
8124 static gen_helper_gvec_4
* const fns
[2] = {
8125 gen_helper_sve2_histcnt_s
, gen_helper_sve2_histcnt_d
8130 return do_sve2_zpzz_ool(s
, a
, fns
[a
->esz
- 2]);
8133 static bool trans_HISTSEG(DisasContext
*s
, arg_rrr_esz
*a
)
8138 return do_sve2_zzz_ool(s
, a
, gen_helper_sve2_histseg
);
8141 static bool do_sve2_zpzz_fp(DisasContext
*s
, arg_rprr_esz
*a
,
8142 gen_helper_gvec_4_ptr
*fn
)
8144 if (!dc_isar_feature(aa64_sve2
, s
)) {
8147 return do_zpzz_fp(s
, a
, fn
);
8150 #define DO_SVE2_ZPZZ_FP(NAME, name) \
8151 static bool trans_##NAME(DisasContext *s, arg_rprr_esz *a) \
8153 static gen_helper_gvec_4_ptr * const fns[4] = { \
8154 NULL, gen_helper_sve2_##name##_zpzz_h, \
8155 gen_helper_sve2_##name##_zpzz_s, gen_helper_sve2_##name##_zpzz_d \
8157 return do_sve2_zpzz_fp(s, a, fns[a->esz]); \
8160 DO_SVE2_ZPZZ_FP(FADDP
, faddp
)
8161 DO_SVE2_ZPZZ_FP(FMAXNMP
, fmaxnmp
)
8162 DO_SVE2_ZPZZ_FP(FMINNMP
, fminnmp
)
8163 DO_SVE2_ZPZZ_FP(FMAXP
, fmaxp
)
8164 DO_SVE2_ZPZZ_FP(FMINP
, fminp
)
8167 * SVE Integer Multiply-Add (unpredicated)
8170 static bool trans_FMMLA(DisasContext
*s
, arg_rrrr_esz
*a
)
8172 gen_helper_gvec_4_ptr
*fn
;
8176 if (!dc_isar_feature(aa64_sve_f32mm
, s
)) {
8179 fn
= gen_helper_fmmla_s
;
8182 if (!dc_isar_feature(aa64_sve_f64mm
, s
)) {
8185 fn
= gen_helper_fmmla_d
;
8191 if (sve_access_check(s
)) {
8192 unsigned vsz
= vec_full_reg_size(s
);
8193 TCGv_ptr status
= fpstatus_ptr(FPST_FPCR
);
8194 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s
, a
->rd
),
8195 vec_full_reg_offset(s
, a
->rn
),
8196 vec_full_reg_offset(s
, a
->rm
),
8197 vec_full_reg_offset(s
, a
->ra
),
8198 status
, vsz
, vsz
, 0, fn
);
8199 tcg_temp_free_ptr(status
);
8204 static bool do_sqdmlal_zzzw(DisasContext
*s
, arg_rrrr_esz
*a
,
8205 bool sel1
, bool sel2
)
8207 static gen_helper_gvec_4
* const fns
[] = {
8208 NULL
, gen_helper_sve2_sqdmlal_zzzw_h
,
8209 gen_helper_sve2_sqdmlal_zzzw_s
, gen_helper_sve2_sqdmlal_zzzw_d
,
8211 return do_sve2_zzzz_ool(s
, a
, fns
[a
->esz
], (sel2
<< 1) | sel1
);
8214 static bool do_sqdmlsl_zzzw(DisasContext
*s
, arg_rrrr_esz
*a
,
8215 bool sel1
, bool sel2
)
8217 static gen_helper_gvec_4
* const fns
[] = {
8218 NULL
, gen_helper_sve2_sqdmlsl_zzzw_h
,
8219 gen_helper_sve2_sqdmlsl_zzzw_s
, gen_helper_sve2_sqdmlsl_zzzw_d
,
8221 return do_sve2_zzzz_ool(s
, a
, fns
[a
->esz
], (sel2
<< 1) | sel1
);
8224 static bool trans_SQDMLALB_zzzw(DisasContext
*s
, arg_rrrr_esz
*a
)
8226 return do_sqdmlal_zzzw(s
, a
, false, false);
8229 static bool trans_SQDMLALT_zzzw(DisasContext
*s
, arg_rrrr_esz
*a
)
8231 return do_sqdmlal_zzzw(s
, a
, true, true);
8234 static bool trans_SQDMLALBT(DisasContext
*s
, arg_rrrr_esz
*a
)
8236 return do_sqdmlal_zzzw(s
, a
, false, true);
8239 static bool trans_SQDMLSLB_zzzw(DisasContext
*s
, arg_rrrr_esz
*a
)
8241 return do_sqdmlsl_zzzw(s
, a
, false, false);
8244 static bool trans_SQDMLSLT_zzzw(DisasContext
*s
, arg_rrrr_esz
*a
)
8246 return do_sqdmlsl_zzzw(s
, a
, true, true);
8249 static bool trans_SQDMLSLBT(DisasContext
*s
, arg_rrrr_esz
*a
)
8251 return do_sqdmlsl_zzzw(s
, a
, false, true);
8254 static bool trans_SQRDMLAH_zzzz(DisasContext
*s
, arg_rrrr_esz
*a
)
8256 static gen_helper_gvec_4
* const fns
[] = {
8257 gen_helper_sve2_sqrdmlah_b
, gen_helper_sve2_sqrdmlah_h
,
8258 gen_helper_sve2_sqrdmlah_s
, gen_helper_sve2_sqrdmlah_d
,
8260 return do_sve2_zzzz_ool(s
, a
, fns
[a
->esz
], 0);
8263 static bool trans_SQRDMLSH_zzzz(DisasContext
*s
, arg_rrrr_esz
*a
)
8265 static gen_helper_gvec_4
* const fns
[] = {
8266 gen_helper_sve2_sqrdmlsh_b
, gen_helper_sve2_sqrdmlsh_h
,
8267 gen_helper_sve2_sqrdmlsh_s
, gen_helper_sve2_sqrdmlsh_d
,
8269 return do_sve2_zzzz_ool(s
, a
, fns
[a
->esz
], 0);
8272 static bool do_smlal_zzzw(DisasContext
*s
, arg_rrrr_esz
*a
, bool sel
)
8274 static gen_helper_gvec_4
* const fns
[] = {
8275 NULL
, gen_helper_sve2_smlal_zzzw_h
,
8276 gen_helper_sve2_smlal_zzzw_s
, gen_helper_sve2_smlal_zzzw_d
,
8278 return do_sve2_zzzz_ool(s
, a
, fns
[a
->esz
], sel
);
8281 static bool trans_SMLALB_zzzw(DisasContext
*s
, arg_rrrr_esz
*a
)
8283 return do_smlal_zzzw(s
, a
, false);
8286 static bool trans_SMLALT_zzzw(DisasContext
*s
, arg_rrrr_esz
*a
)
8288 return do_smlal_zzzw(s
, a
, true);
8291 static bool do_umlal_zzzw(DisasContext
*s
, arg_rrrr_esz
*a
, bool sel
)
8293 static gen_helper_gvec_4
* const fns
[] = {
8294 NULL
, gen_helper_sve2_umlal_zzzw_h
,
8295 gen_helper_sve2_umlal_zzzw_s
, gen_helper_sve2_umlal_zzzw_d
,
8297 return do_sve2_zzzz_ool(s
, a
, fns
[a
->esz
], sel
);
8300 static bool trans_UMLALB_zzzw(DisasContext
*s
, arg_rrrr_esz
*a
)
8302 return do_umlal_zzzw(s
, a
, false);
8305 static bool trans_UMLALT_zzzw(DisasContext
*s
, arg_rrrr_esz
*a
)
8307 return do_umlal_zzzw(s
, a
, true);
8310 static bool do_smlsl_zzzw(DisasContext
*s
, arg_rrrr_esz
*a
, bool sel
)
8312 static gen_helper_gvec_4
* const fns
[] = {
8313 NULL
, gen_helper_sve2_smlsl_zzzw_h
,
8314 gen_helper_sve2_smlsl_zzzw_s
, gen_helper_sve2_smlsl_zzzw_d
,
8316 return do_sve2_zzzz_ool(s
, a
, fns
[a
->esz
], sel
);
8319 static bool trans_SMLSLB_zzzw(DisasContext
*s
, arg_rrrr_esz
*a
)
8321 return do_smlsl_zzzw(s
, a
, false);
8324 static bool trans_SMLSLT_zzzw(DisasContext
*s
, arg_rrrr_esz
*a
)
8326 return do_smlsl_zzzw(s
, a
, true);
8329 static bool do_umlsl_zzzw(DisasContext
*s
, arg_rrrr_esz
*a
, bool sel
)
8331 static gen_helper_gvec_4
* const fns
[] = {
8332 NULL
, gen_helper_sve2_umlsl_zzzw_h
,
8333 gen_helper_sve2_umlsl_zzzw_s
, gen_helper_sve2_umlsl_zzzw_d
,
8335 return do_sve2_zzzz_ool(s
, a
, fns
[a
->esz
], sel
);
8338 static bool trans_UMLSLB_zzzw(DisasContext
*s
, arg_rrrr_esz
*a
)
8340 return do_umlsl_zzzw(s
, a
, false);
8343 static bool trans_UMLSLT_zzzw(DisasContext
*s
, arg_rrrr_esz
*a
)
8345 return do_umlsl_zzzw(s
, a
, true);
8348 static bool trans_CMLA_zzzz(DisasContext
*s
, arg_CMLA_zzzz
*a
)
8350 static gen_helper_gvec_4
* const fns
[] = {
8351 gen_helper_sve2_cmla_zzzz_b
, gen_helper_sve2_cmla_zzzz_h
,
8352 gen_helper_sve2_cmla_zzzz_s
, gen_helper_sve2_cmla_zzzz_d
,
8355 if (!dc_isar_feature(aa64_sve2
, s
)) {
8358 if (sve_access_check(s
)) {
8359 gen_gvec_ool_zzzz(s
, fns
[a
->esz
], a
->rd
, a
->rn
, a
->rm
, a
->ra
, a
->rot
);
8364 static bool trans_CDOT_zzzz(DisasContext
*s
, arg_CMLA_zzzz
*a
)
8366 if (!dc_isar_feature(aa64_sve2
, s
) || a
->esz
< MO_32
) {
8369 if (sve_access_check(s
)) {
8370 gen_helper_gvec_4
*fn
= (a
->esz
== MO_32
8371 ? gen_helper_sve2_cdot_zzzz_s
8372 : gen_helper_sve2_cdot_zzzz_d
);
8373 gen_gvec_ool_zzzz(s
, fn
, a
->rd
, a
->rn
, a
->rm
, a
->ra
, a
->rot
);
8378 static bool trans_SQRDCMLAH_zzzz(DisasContext
*s
, arg_SQRDCMLAH_zzzz
*a
)
8380 static gen_helper_gvec_4
* const fns
[] = {
8381 gen_helper_sve2_sqrdcmlah_zzzz_b
, gen_helper_sve2_sqrdcmlah_zzzz_h
,
8382 gen_helper_sve2_sqrdcmlah_zzzz_s
, gen_helper_sve2_sqrdcmlah_zzzz_d
,
8385 if (!dc_isar_feature(aa64_sve2
, s
)) {
8388 if (sve_access_check(s
)) {
8389 gen_gvec_ool_zzzz(s
, fns
[a
->esz
], a
->rd
, a
->rn
, a
->rm
, a
->ra
, a
->rot
);
8394 static bool trans_USDOT_zzzz(DisasContext
*s
, arg_USDOT_zzzz
*a
)
8396 if (a
->esz
!= 2 || !dc_isar_feature(aa64_sve_i8mm
, s
)) {
8399 if (sve_access_check(s
)) {
8400 unsigned vsz
= vec_full_reg_size(s
);
8401 tcg_gen_gvec_4_ool(vec_full_reg_offset(s
, a
->rd
),
8402 vec_full_reg_offset(s
, a
->rn
),
8403 vec_full_reg_offset(s
, a
->rm
),
8404 vec_full_reg_offset(s
, a
->ra
),
8405 vsz
, vsz
, 0, gen_helper_gvec_usdot_b
);
8410 static bool trans_AESMC(DisasContext
*s
, arg_AESMC
*a
)
8412 if (!dc_isar_feature(aa64_sve2_aes
, s
)) {
8415 if (sve_access_check(s
)) {
8416 gen_gvec_ool_zz(s
, gen_helper_crypto_aesmc
, a
->rd
, a
->rd
, a
->decrypt
);
8421 static bool do_aese(DisasContext
*s
, arg_rrr_esz
*a
, bool decrypt
)
8423 if (!dc_isar_feature(aa64_sve2_aes
, s
)) {
8426 if (sve_access_check(s
)) {
8427 gen_gvec_ool_zzz(s
, gen_helper_crypto_aese
,
8428 a
->rd
, a
->rn
, a
->rm
, decrypt
);
8433 static bool trans_AESE(DisasContext
*s
, arg_rrr_esz
*a
)
8435 return do_aese(s
, a
, false);
8438 static bool trans_AESD(DisasContext
*s
, arg_rrr_esz
*a
)
8440 return do_aese(s
, a
, true);
8443 static bool do_sm4(DisasContext
*s
, arg_rrr_esz
*a
, gen_helper_gvec_3
*fn
)
8445 if (!dc_isar_feature(aa64_sve2_sm4
, s
)) {
8448 if (sve_access_check(s
)) {
8449 gen_gvec_ool_zzz(s
, fn
, a
->rd
, a
->rn
, a
->rm
, 0);
8454 static bool trans_SM4E(DisasContext
*s
, arg_rrr_esz
*a
)
8456 return do_sm4(s
, a
, gen_helper_crypto_sm4e
);
8459 static bool trans_SM4EKEY(DisasContext
*s
, arg_rrr_esz
*a
)
8461 return do_sm4(s
, a
, gen_helper_crypto_sm4ekey
);
8464 static bool trans_RAX1(DisasContext
*s
, arg_rrr_esz
*a
)
8466 if (!dc_isar_feature(aa64_sve2_sha3
, s
)) {
8469 if (sve_access_check(s
)) {
8470 gen_gvec_fn_zzz(s
, gen_gvec_rax1
, MO_64
, a
->rd
, a
->rn
, a
->rm
);
8475 static bool trans_FCVTNT_sh(DisasContext
*s
, arg_rpr_esz
*a
)
8477 if (!dc_isar_feature(aa64_sve2
, s
)) {
8480 return do_zpz_ptr(s
, a
->rd
, a
->rn
, a
->pg
, false, gen_helper_sve2_fcvtnt_sh
);
8483 static bool trans_BFCVTNT(DisasContext
*s
, arg_rpr_esz
*a
)
8485 if (!dc_isar_feature(aa64_sve_bf16
, s
)) {
8488 return do_zpz_ptr(s
, a
->rd
, a
->rn
, a
->pg
, false, gen_helper_sve_bfcvtnt
);
8491 static bool trans_FCVTNT_ds(DisasContext
*s
, arg_rpr_esz
*a
)
8493 if (!dc_isar_feature(aa64_sve2
, s
)) {
8496 return do_zpz_ptr(s
, a
->rd
, a
->rn
, a
->pg
, false, gen_helper_sve2_fcvtnt_ds
);
8499 static bool trans_FCVTLT_hs(DisasContext
*s
, arg_rpr_esz
*a
)
8501 if (!dc_isar_feature(aa64_sve2
, s
)) {
8504 return do_zpz_ptr(s
, a
->rd
, a
->rn
, a
->pg
, false, gen_helper_sve2_fcvtlt_hs
);
8507 static bool trans_FCVTLT_sd(DisasContext
*s
, arg_rpr_esz
*a
)
8509 if (!dc_isar_feature(aa64_sve2
, s
)) {
8512 return do_zpz_ptr(s
, a
->rd
, a
->rn
, a
->pg
, false, gen_helper_sve2_fcvtlt_sd
);
8515 static bool trans_FCVTX_ds(DisasContext
*s
, arg_rpr_esz
*a
)
8517 if (!dc_isar_feature(aa64_sve2
, s
)) {
8520 return do_frint_mode(s
, a
, float_round_to_odd
, gen_helper_sve_fcvt_ds
);
8523 static bool trans_FCVTXNT_ds(DisasContext
*s
, arg_rpr_esz
*a
)
8525 if (!dc_isar_feature(aa64_sve2
, s
)) {
8528 return do_frint_mode(s
, a
, float_round_to_odd
, gen_helper_sve2_fcvtnt_ds
);
8531 static bool trans_FLOGB(DisasContext
*s
, arg_rpr_esz
*a
)
8533 static gen_helper_gvec_3_ptr
* const fns
[] = {
8534 NULL
, gen_helper_flogb_h
,
8535 gen_helper_flogb_s
, gen_helper_flogb_d
8538 if (!dc_isar_feature(aa64_sve2
, s
) || fns
[a
->esz
] == NULL
) {
8541 if (sve_access_check(s
)) {
8543 fpstatus_ptr(a
->esz
== MO_16
? FPST_FPCR_F16
: FPST_FPCR
);
8544 unsigned vsz
= vec_full_reg_size(s
);
8546 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s
, a
->rd
),
8547 vec_full_reg_offset(s
, a
->rn
),
8548 pred_full_reg_offset(s
, a
->pg
),
8549 status
, vsz
, vsz
, 0, fns
[a
->esz
]);
8550 tcg_temp_free_ptr(status
);
8555 static bool do_FMLAL_zzzw(DisasContext
*s
, arg_rrrr_esz
*a
, bool sub
, bool sel
)
8557 if (!dc_isar_feature(aa64_sve2
, s
)) {
8560 if (sve_access_check(s
)) {
8561 unsigned vsz
= vec_full_reg_size(s
);
8562 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s
, a
->rd
),
8563 vec_full_reg_offset(s
, a
->rn
),
8564 vec_full_reg_offset(s
, a
->rm
),
8565 vec_full_reg_offset(s
, a
->ra
),
8566 cpu_env
, vsz
, vsz
, (sel
<< 1) | sub
,
8567 gen_helper_sve2_fmlal_zzzw_s
);
8572 static bool trans_FMLALB_zzzw(DisasContext
*s
, arg_rrrr_esz
*a
)
8574 return do_FMLAL_zzzw(s
, a
, false, false);
8577 static bool trans_FMLALT_zzzw(DisasContext
*s
, arg_rrrr_esz
*a
)
8579 return do_FMLAL_zzzw(s
, a
, false, true);
8582 static bool trans_FMLSLB_zzzw(DisasContext
*s
, arg_rrrr_esz
*a
)
8584 return do_FMLAL_zzzw(s
, a
, true, false);
8587 static bool trans_FMLSLT_zzzw(DisasContext
*s
, arg_rrrr_esz
*a
)
8589 return do_FMLAL_zzzw(s
, a
, true, true);
8592 static bool do_FMLAL_zzxw(DisasContext
*s
, arg_rrxr_esz
*a
, bool sub
, bool sel
)
8594 if (!dc_isar_feature(aa64_sve2
, s
)) {
8597 if (sve_access_check(s
)) {
8598 unsigned vsz
= vec_full_reg_size(s
);
8599 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s
, a
->rd
),
8600 vec_full_reg_offset(s
, a
->rn
),
8601 vec_full_reg_offset(s
, a
->rm
),
8602 vec_full_reg_offset(s
, a
->ra
),
8604 (a
->index
<< 2) | (sel
<< 1) | sub
,
8605 gen_helper_sve2_fmlal_zzxw_s
);
8610 static bool trans_FMLALB_zzxw(DisasContext
*s
, arg_rrxr_esz
*a
)
8612 return do_FMLAL_zzxw(s
, a
, false, false);
8615 static bool trans_FMLALT_zzxw(DisasContext
*s
, arg_rrxr_esz
*a
)
8617 return do_FMLAL_zzxw(s
, a
, false, true);
8620 static bool trans_FMLSLB_zzxw(DisasContext
*s
, arg_rrxr_esz
*a
)
8622 return do_FMLAL_zzxw(s
, a
, true, false);
8625 static bool trans_FMLSLT_zzxw(DisasContext
*s
, arg_rrxr_esz
*a
)
8627 return do_FMLAL_zzxw(s
, a
, true, true);
8630 static bool do_i8mm_zzzz_ool(DisasContext
*s
, arg_rrrr_esz
*a
,
8631 gen_helper_gvec_4
*fn
, int data
)
8633 if (!dc_isar_feature(aa64_sve_i8mm
, s
)) {
8636 if (sve_access_check(s
)) {
8637 gen_gvec_ool_zzzz(s
, fn
, a
->rd
, a
->rn
, a
->rm
, a
->ra
, data
);
8642 static bool trans_SMMLA(DisasContext
*s
, arg_rrrr_esz
*a
)
8644 return do_i8mm_zzzz_ool(s
, a
, gen_helper_gvec_smmla_b
, 0);
8647 static bool trans_USMMLA(DisasContext
*s
, arg_rrrr_esz
*a
)
8649 return do_i8mm_zzzz_ool(s
, a
, gen_helper_gvec_usmmla_b
, 0);
8652 static bool trans_UMMLA(DisasContext
*s
, arg_rrrr_esz
*a
)
8654 return do_i8mm_zzzz_ool(s
, a
, gen_helper_gvec_ummla_b
, 0);
8657 static bool trans_BFDOT_zzzz(DisasContext
*s
, arg_rrrr_esz
*a
)
8659 if (!dc_isar_feature(aa64_sve_bf16
, s
)) {
8662 if (sve_access_check(s
)) {
8663 gen_gvec_ool_zzzz(s
, gen_helper_gvec_bfdot
,
8664 a
->rd
, a
->rn
, a
->rm
, a
->ra
, 0);
8669 static bool trans_BFDOT_zzxz(DisasContext
*s
, arg_rrxr_esz
*a
)
8671 if (!dc_isar_feature(aa64_sve_bf16
, s
)) {
8674 if (sve_access_check(s
)) {
8675 gen_gvec_ool_zzzz(s
, gen_helper_gvec_bfdot_idx
,
8676 a
->rd
, a
->rn
, a
->rm
, a
->ra
, a
->index
);
8681 static bool trans_BFMMLA(DisasContext
*s
, arg_rrrr_esz
*a
)
8683 if (!dc_isar_feature(aa64_sve_bf16
, s
)) {
8686 if (sve_access_check(s
)) {
8687 gen_gvec_ool_zzzz(s
, gen_helper_gvec_bfmmla
,
8688 a
->rd
, a
->rn
, a
->rm
, a
->ra
, 0);
8693 static bool do_BFMLAL_zzzw(DisasContext
*s
, arg_rrrr_esz
*a
, bool sel
)
8695 if (!dc_isar_feature(aa64_sve_bf16
, s
)) {
8698 if (sve_access_check(s
)) {
8699 TCGv_ptr status
= fpstatus_ptr(FPST_FPCR
);
8700 unsigned vsz
= vec_full_reg_size(s
);
8702 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s
, a
->rd
),
8703 vec_full_reg_offset(s
, a
->rn
),
8704 vec_full_reg_offset(s
, a
->rm
),
8705 vec_full_reg_offset(s
, a
->ra
),
8706 status
, vsz
, vsz
, sel
,
8707 gen_helper_gvec_bfmlal
);
8708 tcg_temp_free_ptr(status
);
8713 static bool trans_BFMLALB_zzzw(DisasContext
*s
, arg_rrrr_esz
*a
)
8715 return do_BFMLAL_zzzw(s
, a
, false);
8718 static bool trans_BFMLALT_zzzw(DisasContext
*s
, arg_rrrr_esz
*a
)
8720 return do_BFMLAL_zzzw(s
, a
, true);
8723 static bool do_BFMLAL_zzxw(DisasContext
*s
, arg_rrxr_esz
*a
, bool sel
)
8725 if (!dc_isar_feature(aa64_sve_bf16
, s
)) {
8728 if (sve_access_check(s
)) {
8729 TCGv_ptr status
= fpstatus_ptr(FPST_FPCR
);
8730 unsigned vsz
= vec_full_reg_size(s
);
8732 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s
, a
->rd
),
8733 vec_full_reg_offset(s
, a
->rn
),
8734 vec_full_reg_offset(s
, a
->rm
),
8735 vec_full_reg_offset(s
, a
->ra
),
8736 status
, vsz
, vsz
, (a
->index
<< 1) | sel
,
8737 gen_helper_gvec_bfmlal_idx
);
8738 tcg_temp_free_ptr(status
);
8743 static bool trans_BFMLALB_zzxw(DisasContext
*s
, arg_rrxr_esz
*a
)
8745 return do_BFMLAL_zzxw(s
, a
, false);
8748 static bool trans_BFMLALT_zzxw(DisasContext
*s
, arg_rrxr_esz
*a
)
8750 return do_BFMLAL_zzxw(s
, a
, true);