target/arm: Use TRANS_FEAT for do_last_fp
[qemu/ar7.git] / target / arm / translate-sve.c
blob841c1b564490ed41d100f6d38b344e1983a99501
1 /*
2 * AArch64 SVE translation
4 * Copyright (c) 2018 Linaro, Ltd
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/exec-all.h"
23 #include "tcg/tcg-op.h"
24 #include "tcg/tcg-op-gvec.h"
25 #include "tcg/tcg-gvec-desc.h"
26 #include "qemu/log.h"
27 #include "arm_ldst.h"
28 #include "translate.h"
29 #include "internals.h"
30 #include "exec/helper-proto.h"
31 #include "exec/helper-gen.h"
32 #include "exec/log.h"
33 #include "translate-a64.h"
34 #include "fpu/softfloat.h"
37 typedef void GVecGen2sFn(unsigned, uint32_t, uint32_t,
38 TCGv_i64, uint32_t, uint32_t);
40 typedef void gen_helper_gvec_flags_3(TCGv_i32, TCGv_ptr, TCGv_ptr,
41 TCGv_ptr, TCGv_i32);
42 typedef void gen_helper_gvec_flags_4(TCGv_i32, TCGv_ptr, TCGv_ptr,
43 TCGv_ptr, TCGv_ptr, TCGv_i32);
45 typedef void gen_helper_gvec_mem(TCGv_env, TCGv_ptr, TCGv_i64, TCGv_i32);
46 typedef void gen_helper_gvec_mem_scatter(TCGv_env, TCGv_ptr, TCGv_ptr,
47 TCGv_ptr, TCGv_i64, TCGv_i32);
50 * Helpers for extracting complex instruction fields.
53 /* See e.g. ASR (immediate, predicated).
54 * Returns -1 for unallocated encoding; diagnose later.
56 static int tszimm_esz(DisasContext *s, int x)
58 x >>= 3; /* discard imm3 */
59 return 31 - clz32(x);
62 static int tszimm_shr(DisasContext *s, int x)
64 return (16 << tszimm_esz(s, x)) - x;
67 /* See e.g. LSL (immediate, predicated). */
68 static int tszimm_shl(DisasContext *s, int x)
70 return x - (8 << tszimm_esz(s, x));
73 /* The SH bit is in bit 8. Extract the low 8 and shift. */
74 static inline int expand_imm_sh8s(DisasContext *s, int x)
76 return (int8_t)x << (x & 0x100 ? 8 : 0);
79 static inline int expand_imm_sh8u(DisasContext *s, int x)
81 return (uint8_t)x << (x & 0x100 ? 8 : 0);
84 /* Convert a 2-bit memory size (msz) to a 4-bit data type (dtype)
85 * with unsigned data. C.f. SVE Memory Contiguous Load Group.
87 static inline int msz_dtype(DisasContext *s, int msz)
89 static const uint8_t dtype[4] = { 0, 5, 10, 15 };
90 return dtype[msz];
94 * Include the generated decoder.
97 #include "decode-sve.c.inc"
100 * Implement all of the translator functions referenced by the decoder.
103 /* Return the offset info CPUARMState of the predicate vector register Pn.
104 * Note for this purpose, FFR is P16.
106 static inline int pred_full_reg_offset(DisasContext *s, int regno)
108 return offsetof(CPUARMState, vfp.pregs[regno]);
111 /* Return the byte size of the whole predicate register, VL / 64. */
112 static inline int pred_full_reg_size(DisasContext *s)
114 return s->sve_len >> 3;
117 /* Round up the size of a register to a size allowed by
118 * the tcg vector infrastructure. Any operation which uses this
119 * size may assume that the bits above pred_full_reg_size are zero,
120 * and must leave them the same way.
122 * Note that this is not needed for the vector registers as they
123 * are always properly sized for tcg vectors.
125 static int size_for_gvec(int size)
127 if (size <= 8) {
128 return 8;
129 } else {
130 return QEMU_ALIGN_UP(size, 16);
134 static int pred_gvec_reg_size(DisasContext *s)
136 return size_for_gvec(pred_full_reg_size(s));
139 /* Invoke an out-of-line helper on 2 Zregs. */
140 static bool gen_gvec_ool_zz(DisasContext *s, gen_helper_gvec_2 *fn,
141 int rd, int rn, int data)
143 if (fn == NULL) {
144 return false;
146 if (sve_access_check(s)) {
147 unsigned vsz = vec_full_reg_size(s);
148 tcg_gen_gvec_2_ool(vec_full_reg_offset(s, rd),
149 vec_full_reg_offset(s, rn),
150 vsz, vsz, data, fn);
152 return true;
155 /* Invoke an out-of-line helper on 3 Zregs. */
156 static bool gen_gvec_ool_zzz(DisasContext *s, gen_helper_gvec_3 *fn,
157 int rd, int rn, int rm, int data)
159 if (fn == NULL) {
160 return false;
162 if (sve_access_check(s)) {
163 unsigned vsz = vec_full_reg_size(s);
164 tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd),
165 vec_full_reg_offset(s, rn),
166 vec_full_reg_offset(s, rm),
167 vsz, vsz, data, fn);
169 return true;
172 static bool gen_gvec_ool_arg_zzz(DisasContext *s, gen_helper_gvec_3 *fn,
173 arg_rrr_esz *a, int data)
175 return gen_gvec_ool_zzz(s, fn, a->rd, a->rn, a->rm, data);
178 /* Invoke an out-of-line helper on 4 Zregs. */
179 static bool gen_gvec_ool_zzzz(DisasContext *s, gen_helper_gvec_4 *fn,
180 int rd, int rn, int rm, int ra, int data)
182 if (fn == NULL) {
183 return false;
185 if (sve_access_check(s)) {
186 unsigned vsz = vec_full_reg_size(s);
187 tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd),
188 vec_full_reg_offset(s, rn),
189 vec_full_reg_offset(s, rm),
190 vec_full_reg_offset(s, ra),
191 vsz, vsz, data, fn);
193 return true;
196 static bool gen_gvec_ool_arg_zzzz(DisasContext *s, gen_helper_gvec_4 *fn,
197 arg_rrrr_esz *a, int data)
199 return gen_gvec_ool_zzzz(s, fn, a->rd, a->rn, a->rm, a->ra, data);
202 static bool gen_gvec_ool_arg_zzxz(DisasContext *s, gen_helper_gvec_4 *fn,
203 arg_rrxr_esz *a)
205 return gen_gvec_ool_zzzz(s, fn, a->rd, a->rn, a->rm, a->ra, a->index);
208 /* Invoke an out-of-line helper on 2 Zregs and a predicate. */
209 static bool gen_gvec_ool_zzp(DisasContext *s, gen_helper_gvec_3 *fn,
210 int rd, int rn, int pg, int data)
212 if (fn == NULL) {
213 return false;
215 if (sve_access_check(s)) {
216 unsigned vsz = vec_full_reg_size(s);
217 tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd),
218 vec_full_reg_offset(s, rn),
219 pred_full_reg_offset(s, pg),
220 vsz, vsz, data, fn);
222 return true;
225 static bool gen_gvec_ool_arg_zpz(DisasContext *s, gen_helper_gvec_3 *fn,
226 arg_rpr_esz *a, int data)
228 return gen_gvec_ool_zzp(s, fn, a->rd, a->rn, a->pg, data);
231 static bool gen_gvec_ool_arg_zpzi(DisasContext *s, gen_helper_gvec_3 *fn,
232 arg_rpri_esz *a)
234 return gen_gvec_ool_zzp(s, fn, a->rd, a->rn, a->pg, a->imm);
237 /* Invoke an out-of-line helper on 3 Zregs and a predicate. */
238 static bool gen_gvec_ool_zzzp(DisasContext *s, gen_helper_gvec_4 *fn,
239 int rd, int rn, int rm, int pg, int data)
241 if (fn == NULL) {
242 return false;
244 if (sve_access_check(s)) {
245 unsigned vsz = vec_full_reg_size(s);
246 tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd),
247 vec_full_reg_offset(s, rn),
248 vec_full_reg_offset(s, rm),
249 pred_full_reg_offset(s, pg),
250 vsz, vsz, data, fn);
252 return true;
255 static bool gen_gvec_ool_arg_zpzz(DisasContext *s, gen_helper_gvec_4 *fn,
256 arg_rprr_esz *a, int data)
258 return gen_gvec_ool_zzzp(s, fn, a->rd, a->rn, a->rm, a->pg, data);
261 /* Invoke a vector expander on two Zregs and an immediate. */
262 static bool gen_gvec_fn_zzi(DisasContext *s, GVecGen2iFn *gvec_fn,
263 int esz, int rd, int rn, uint64_t imm)
265 if (gvec_fn == NULL) {
266 return false;
268 if (sve_access_check(s)) {
269 unsigned vsz = vec_full_reg_size(s);
270 gvec_fn(esz, vec_full_reg_offset(s, rd),
271 vec_full_reg_offset(s, rn), imm, vsz, vsz);
273 return true;
276 static bool gen_gvec_fn_arg_zzi(DisasContext *s, GVecGen2iFn *gvec_fn,
277 arg_rri_esz *a)
279 if (a->esz < 0) {
280 /* Invalid tsz encoding -- see tszimm_esz. */
281 return false;
283 return gen_gvec_fn_zzi(s, gvec_fn, a->esz, a->rd, a->rn, a->imm);
286 /* Invoke a vector expander on three Zregs. */
287 static bool gen_gvec_fn_zzz(DisasContext *s, GVecGen3Fn *gvec_fn,
288 int esz, int rd, int rn, int rm)
290 if (gvec_fn == NULL) {
291 return false;
293 if (sve_access_check(s)) {
294 unsigned vsz = vec_full_reg_size(s);
295 gvec_fn(esz, vec_full_reg_offset(s, rd),
296 vec_full_reg_offset(s, rn),
297 vec_full_reg_offset(s, rm), vsz, vsz);
299 return true;
302 static bool gen_gvec_fn_arg_zzz(DisasContext *s, GVecGen3Fn *fn,
303 arg_rrr_esz *a)
305 return gen_gvec_fn_zzz(s, fn, a->esz, a->rd, a->rn, a->rm);
308 /* Invoke a vector expander on four Zregs. */
309 static bool gen_gvec_fn_arg_zzzz(DisasContext *s, GVecGen4Fn *gvec_fn,
310 arg_rrrr_esz *a)
312 if (gvec_fn == NULL) {
313 return false;
315 if (sve_access_check(s)) {
316 unsigned vsz = vec_full_reg_size(s);
317 gvec_fn(a->esz, vec_full_reg_offset(s, a->rd),
318 vec_full_reg_offset(s, a->rn),
319 vec_full_reg_offset(s, a->rm),
320 vec_full_reg_offset(s, a->ra), vsz, vsz);
322 return true;
325 /* Invoke a vector move on two Zregs. */
326 static bool do_mov_z(DisasContext *s, int rd, int rn)
328 if (sve_access_check(s)) {
329 unsigned vsz = vec_full_reg_size(s);
330 tcg_gen_gvec_mov(MO_8, vec_full_reg_offset(s, rd),
331 vec_full_reg_offset(s, rn), vsz, vsz);
333 return true;
336 /* Initialize a Zreg with replications of a 64-bit immediate. */
337 static void do_dupi_z(DisasContext *s, int rd, uint64_t word)
339 unsigned vsz = vec_full_reg_size(s);
340 tcg_gen_gvec_dup_imm(MO_64, vec_full_reg_offset(s, rd), vsz, vsz, word);
343 /* Invoke a vector expander on three Pregs. */
344 static void gen_gvec_fn_ppp(DisasContext *s, GVecGen3Fn *gvec_fn,
345 int rd, int rn, int rm)
347 unsigned psz = pred_gvec_reg_size(s);
348 gvec_fn(MO_64, pred_full_reg_offset(s, rd),
349 pred_full_reg_offset(s, rn),
350 pred_full_reg_offset(s, rm), psz, psz);
353 /* Invoke a vector move on two Pregs. */
354 static bool do_mov_p(DisasContext *s, int rd, int rn)
356 if (sve_access_check(s)) {
357 unsigned psz = pred_gvec_reg_size(s);
358 tcg_gen_gvec_mov(MO_8, pred_full_reg_offset(s, rd),
359 pred_full_reg_offset(s, rn), psz, psz);
361 return true;
364 /* Set the cpu flags as per a return from an SVE helper. */
365 static void do_pred_flags(TCGv_i32 t)
367 tcg_gen_mov_i32(cpu_NF, t);
368 tcg_gen_andi_i32(cpu_ZF, t, 2);
369 tcg_gen_andi_i32(cpu_CF, t, 1);
370 tcg_gen_movi_i32(cpu_VF, 0);
373 /* Subroutines computing the ARM PredTest psuedofunction. */
374 static void do_predtest1(TCGv_i64 d, TCGv_i64 g)
376 TCGv_i32 t = tcg_temp_new_i32();
378 gen_helper_sve_predtest1(t, d, g);
379 do_pred_flags(t);
380 tcg_temp_free_i32(t);
383 static void do_predtest(DisasContext *s, int dofs, int gofs, int words)
385 TCGv_ptr dptr = tcg_temp_new_ptr();
386 TCGv_ptr gptr = tcg_temp_new_ptr();
387 TCGv_i32 t = tcg_temp_new_i32();
389 tcg_gen_addi_ptr(dptr, cpu_env, dofs);
390 tcg_gen_addi_ptr(gptr, cpu_env, gofs);
392 gen_helper_sve_predtest(t, dptr, gptr, tcg_constant_i32(words));
393 tcg_temp_free_ptr(dptr);
394 tcg_temp_free_ptr(gptr);
396 do_pred_flags(t);
397 tcg_temp_free_i32(t);
400 /* For each element size, the bits within a predicate word that are active. */
401 const uint64_t pred_esz_masks[4] = {
402 0xffffffffffffffffull, 0x5555555555555555ull,
403 0x1111111111111111ull, 0x0101010101010101ull
407 *** SVE Logical - Unpredicated Group
410 TRANS_FEAT(AND_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_and, a)
411 TRANS_FEAT(ORR_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_or, a)
412 TRANS_FEAT(EOR_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_xor, a)
413 TRANS_FEAT(BIC_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_andc, a)
415 static void gen_xar8_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, int64_t sh)
417 TCGv_i64 t = tcg_temp_new_i64();
418 uint64_t mask = dup_const(MO_8, 0xff >> sh);
420 tcg_gen_xor_i64(t, n, m);
421 tcg_gen_shri_i64(d, t, sh);
422 tcg_gen_shli_i64(t, t, 8 - sh);
423 tcg_gen_andi_i64(d, d, mask);
424 tcg_gen_andi_i64(t, t, ~mask);
425 tcg_gen_or_i64(d, d, t);
426 tcg_temp_free_i64(t);
429 static void gen_xar16_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, int64_t sh)
431 TCGv_i64 t = tcg_temp_new_i64();
432 uint64_t mask = dup_const(MO_16, 0xffff >> sh);
434 tcg_gen_xor_i64(t, n, m);
435 tcg_gen_shri_i64(d, t, sh);
436 tcg_gen_shli_i64(t, t, 16 - sh);
437 tcg_gen_andi_i64(d, d, mask);
438 tcg_gen_andi_i64(t, t, ~mask);
439 tcg_gen_or_i64(d, d, t);
440 tcg_temp_free_i64(t);
443 static void gen_xar_i32(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, int32_t sh)
445 tcg_gen_xor_i32(d, n, m);
446 tcg_gen_rotri_i32(d, d, sh);
449 static void gen_xar_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, int64_t sh)
451 tcg_gen_xor_i64(d, n, m);
452 tcg_gen_rotri_i64(d, d, sh);
455 static void gen_xar_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
456 TCGv_vec m, int64_t sh)
458 tcg_gen_xor_vec(vece, d, n, m);
459 tcg_gen_rotri_vec(vece, d, d, sh);
462 void gen_gvec_xar(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
463 uint32_t rm_ofs, int64_t shift,
464 uint32_t opr_sz, uint32_t max_sz)
466 static const TCGOpcode vecop[] = { INDEX_op_rotli_vec, 0 };
467 static const GVecGen3i ops[4] = {
468 { .fni8 = gen_xar8_i64,
469 .fniv = gen_xar_vec,
470 .fno = gen_helper_sve2_xar_b,
471 .opt_opc = vecop,
472 .vece = MO_8 },
473 { .fni8 = gen_xar16_i64,
474 .fniv = gen_xar_vec,
475 .fno = gen_helper_sve2_xar_h,
476 .opt_opc = vecop,
477 .vece = MO_16 },
478 { .fni4 = gen_xar_i32,
479 .fniv = gen_xar_vec,
480 .fno = gen_helper_sve2_xar_s,
481 .opt_opc = vecop,
482 .vece = MO_32 },
483 { .fni8 = gen_xar_i64,
484 .fniv = gen_xar_vec,
485 .fno = gen_helper_gvec_xar_d,
486 .opt_opc = vecop,
487 .vece = MO_64 }
489 int esize = 8 << vece;
491 /* The SVE2 range is 1 .. esize; the AdvSIMD range is 0 .. esize-1. */
492 tcg_debug_assert(shift >= 0);
493 tcg_debug_assert(shift <= esize);
494 shift &= esize - 1;
496 if (shift == 0) {
497 /* xar with no rotate devolves to xor. */
498 tcg_gen_gvec_xor(vece, rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz);
499 } else {
500 tcg_gen_gvec_3i(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz,
501 shift, &ops[vece]);
505 static bool trans_XAR(DisasContext *s, arg_rrri_esz *a)
507 if (a->esz < 0 || !dc_isar_feature(aa64_sve2, s)) {
508 return false;
510 if (sve_access_check(s)) {
511 unsigned vsz = vec_full_reg_size(s);
512 gen_gvec_xar(a->esz, vec_full_reg_offset(s, a->rd),
513 vec_full_reg_offset(s, a->rn),
514 vec_full_reg_offset(s, a->rm), a->imm, vsz, vsz);
516 return true;
519 static void gen_eor3_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k)
521 tcg_gen_xor_i64(d, n, m);
522 tcg_gen_xor_i64(d, d, k);
525 static void gen_eor3_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
526 TCGv_vec m, TCGv_vec k)
528 tcg_gen_xor_vec(vece, d, n, m);
529 tcg_gen_xor_vec(vece, d, d, k);
532 static void gen_eor3(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
533 uint32_t a, uint32_t oprsz, uint32_t maxsz)
535 static const GVecGen4 op = {
536 .fni8 = gen_eor3_i64,
537 .fniv = gen_eor3_vec,
538 .fno = gen_helper_sve2_eor3,
539 .vece = MO_64,
540 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
542 tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op);
545 TRANS_FEAT(EOR3, aa64_sve2, gen_gvec_fn_arg_zzzz, gen_eor3, a)
547 static void gen_bcax_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k)
549 tcg_gen_andc_i64(d, m, k);
550 tcg_gen_xor_i64(d, d, n);
553 static void gen_bcax_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
554 TCGv_vec m, TCGv_vec k)
556 tcg_gen_andc_vec(vece, d, m, k);
557 tcg_gen_xor_vec(vece, d, d, n);
560 static void gen_bcax(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
561 uint32_t a, uint32_t oprsz, uint32_t maxsz)
563 static const GVecGen4 op = {
564 .fni8 = gen_bcax_i64,
565 .fniv = gen_bcax_vec,
566 .fno = gen_helper_sve2_bcax,
567 .vece = MO_64,
568 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
570 tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op);
573 TRANS_FEAT(BCAX, aa64_sve2, gen_gvec_fn_arg_zzzz, gen_bcax, a)
575 static void gen_bsl(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
576 uint32_t a, uint32_t oprsz, uint32_t maxsz)
578 /* BSL differs from the generic bitsel in argument ordering. */
579 tcg_gen_gvec_bitsel(vece, d, a, n, m, oprsz, maxsz);
582 TRANS_FEAT(BSL, aa64_sve2, gen_gvec_fn_arg_zzzz, gen_bsl, a)
584 static void gen_bsl1n_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k)
586 tcg_gen_andc_i64(n, k, n);
587 tcg_gen_andc_i64(m, m, k);
588 tcg_gen_or_i64(d, n, m);
591 static void gen_bsl1n_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
592 TCGv_vec m, TCGv_vec k)
594 if (TCG_TARGET_HAS_bitsel_vec) {
595 tcg_gen_not_vec(vece, n, n);
596 tcg_gen_bitsel_vec(vece, d, k, n, m);
597 } else {
598 tcg_gen_andc_vec(vece, n, k, n);
599 tcg_gen_andc_vec(vece, m, m, k);
600 tcg_gen_or_vec(vece, d, n, m);
604 static void gen_bsl1n(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
605 uint32_t a, uint32_t oprsz, uint32_t maxsz)
607 static const GVecGen4 op = {
608 .fni8 = gen_bsl1n_i64,
609 .fniv = gen_bsl1n_vec,
610 .fno = gen_helper_sve2_bsl1n,
611 .vece = MO_64,
612 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
614 tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op);
617 TRANS_FEAT(BSL1N, aa64_sve2, gen_gvec_fn_arg_zzzz, gen_bsl1n, a)
619 static void gen_bsl2n_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k)
622 * Z[dn] = (n & k) | (~m & ~k)
623 * = | ~(m | k)
625 tcg_gen_and_i64(n, n, k);
626 if (TCG_TARGET_HAS_orc_i64) {
627 tcg_gen_or_i64(m, m, k);
628 tcg_gen_orc_i64(d, n, m);
629 } else {
630 tcg_gen_nor_i64(m, m, k);
631 tcg_gen_or_i64(d, n, m);
635 static void gen_bsl2n_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
636 TCGv_vec m, TCGv_vec k)
638 if (TCG_TARGET_HAS_bitsel_vec) {
639 tcg_gen_not_vec(vece, m, m);
640 tcg_gen_bitsel_vec(vece, d, k, n, m);
641 } else {
642 tcg_gen_and_vec(vece, n, n, k);
643 tcg_gen_or_vec(vece, m, m, k);
644 tcg_gen_orc_vec(vece, d, n, m);
648 static void gen_bsl2n(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
649 uint32_t a, uint32_t oprsz, uint32_t maxsz)
651 static const GVecGen4 op = {
652 .fni8 = gen_bsl2n_i64,
653 .fniv = gen_bsl2n_vec,
654 .fno = gen_helper_sve2_bsl2n,
655 .vece = MO_64,
656 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
658 tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op);
661 TRANS_FEAT(BSL2N, aa64_sve2, gen_gvec_fn_arg_zzzz, gen_bsl2n, a)
663 static void gen_nbsl_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k)
665 tcg_gen_and_i64(n, n, k);
666 tcg_gen_andc_i64(m, m, k);
667 tcg_gen_nor_i64(d, n, m);
670 static void gen_nbsl_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
671 TCGv_vec m, TCGv_vec k)
673 tcg_gen_bitsel_vec(vece, d, k, n, m);
674 tcg_gen_not_vec(vece, d, d);
677 static void gen_nbsl(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
678 uint32_t a, uint32_t oprsz, uint32_t maxsz)
680 static const GVecGen4 op = {
681 .fni8 = gen_nbsl_i64,
682 .fniv = gen_nbsl_vec,
683 .fno = gen_helper_sve2_nbsl,
684 .vece = MO_64,
685 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
687 tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op);
690 TRANS_FEAT(NBSL, aa64_sve2, gen_gvec_fn_arg_zzzz, gen_nbsl, a)
693 *** SVE Integer Arithmetic - Unpredicated Group
696 TRANS_FEAT(ADD_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_add, a)
697 TRANS_FEAT(SUB_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_sub, a)
698 TRANS_FEAT(SQADD_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_ssadd, a)
699 TRANS_FEAT(SQSUB_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_sssub, a)
700 TRANS_FEAT(UQADD_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_usadd, a)
701 TRANS_FEAT(UQSUB_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_ussub, a)
704 *** SVE Integer Arithmetic - Binary Predicated Group
707 /* Select active elememnts from Zn and inactive elements from Zm,
708 * storing the result in Zd.
710 static bool do_sel_z(DisasContext *s, int rd, int rn, int rm, int pg, int esz)
712 static gen_helper_gvec_4 * const fns[4] = {
713 gen_helper_sve_sel_zpzz_b, gen_helper_sve_sel_zpzz_h,
714 gen_helper_sve_sel_zpzz_s, gen_helper_sve_sel_zpzz_d
716 return gen_gvec_ool_zzzp(s, fns[esz], rd, rn, rm, pg, 0);
719 #define DO_ZPZZ(NAME, FEAT, name) \
720 static gen_helper_gvec_4 * const name##_zpzz_fns[4] = { \
721 gen_helper_##name##_zpzz_b, gen_helper_##name##_zpzz_h, \
722 gen_helper_##name##_zpzz_s, gen_helper_##name##_zpzz_d, \
723 }; \
724 TRANS_FEAT(NAME, FEAT, gen_gvec_ool_arg_zpzz, \
725 name##_zpzz_fns[a->esz], a, 0)
727 DO_ZPZZ(AND_zpzz, aa64_sve, sve_and)
728 DO_ZPZZ(EOR_zpzz, aa64_sve, sve_eor)
729 DO_ZPZZ(ORR_zpzz, aa64_sve, sve_orr)
730 DO_ZPZZ(BIC_zpzz, aa64_sve, sve_bic)
732 DO_ZPZZ(ADD_zpzz, aa64_sve, sve_add)
733 DO_ZPZZ(SUB_zpzz, aa64_sve, sve_sub)
735 DO_ZPZZ(SMAX_zpzz, aa64_sve, sve_smax)
736 DO_ZPZZ(UMAX_zpzz, aa64_sve, sve_umax)
737 DO_ZPZZ(SMIN_zpzz, aa64_sve, sve_smin)
738 DO_ZPZZ(UMIN_zpzz, aa64_sve, sve_umin)
739 DO_ZPZZ(SABD_zpzz, aa64_sve, sve_sabd)
740 DO_ZPZZ(UABD_zpzz, aa64_sve, sve_uabd)
742 DO_ZPZZ(MUL_zpzz, aa64_sve, sve_mul)
743 DO_ZPZZ(SMULH_zpzz, aa64_sve, sve_smulh)
744 DO_ZPZZ(UMULH_zpzz, aa64_sve, sve_umulh)
746 DO_ZPZZ(ASR_zpzz, aa64_sve, sve_asr)
747 DO_ZPZZ(LSR_zpzz, aa64_sve, sve_lsr)
748 DO_ZPZZ(LSL_zpzz, aa64_sve, sve_lsl)
750 static gen_helper_gvec_4 * const sdiv_fns[4] = {
751 NULL, NULL, gen_helper_sve_sdiv_zpzz_s, gen_helper_sve_sdiv_zpzz_d
753 TRANS_FEAT(SDIV_zpzz, aa64_sve, gen_gvec_ool_arg_zpzz, sdiv_fns[a->esz], a, 0)
755 static gen_helper_gvec_4 * const udiv_fns[4] = {
756 NULL, NULL, gen_helper_sve_udiv_zpzz_s, gen_helper_sve_udiv_zpzz_d
758 TRANS_FEAT(UDIV_zpzz, aa64_sve, gen_gvec_ool_arg_zpzz, udiv_fns[a->esz], a, 0)
760 static bool trans_SEL_zpzz(DisasContext *s, arg_rprr_esz *a)
762 return do_sel_z(s, a->rd, a->rn, a->rm, a->pg, a->esz);
766 *** SVE Integer Arithmetic - Unary Predicated Group
769 #define DO_ZPZ(NAME, FEAT, name) \
770 static gen_helper_gvec_3 * const name##_fns[4] = { \
771 gen_helper_##name##_b, gen_helper_##name##_h, \
772 gen_helper_##name##_s, gen_helper_##name##_d, \
773 }; \
774 TRANS_FEAT(NAME, FEAT, gen_gvec_ool_arg_zpz, name##_fns[a->esz], a, 0)
776 DO_ZPZ(CLS, aa64_sve, sve_cls)
777 DO_ZPZ(CLZ, aa64_sve, sve_clz)
778 DO_ZPZ(CNT_zpz, aa64_sve, sve_cnt_zpz)
779 DO_ZPZ(CNOT, aa64_sve, sve_cnot)
780 DO_ZPZ(NOT_zpz, aa64_sve, sve_not_zpz)
781 DO_ZPZ(ABS, aa64_sve, sve_abs)
782 DO_ZPZ(NEG, aa64_sve, sve_neg)
783 DO_ZPZ(RBIT, aa64_sve, sve_rbit)
785 static gen_helper_gvec_3 * const fabs_fns[4] = {
786 NULL, gen_helper_sve_fabs_h,
787 gen_helper_sve_fabs_s, gen_helper_sve_fabs_d,
789 TRANS_FEAT(FABS, aa64_sve, gen_gvec_ool_arg_zpz, fabs_fns[a->esz], a, 0)
791 static gen_helper_gvec_3 * const fneg_fns[4] = {
792 NULL, gen_helper_sve_fneg_h,
793 gen_helper_sve_fneg_s, gen_helper_sve_fneg_d,
795 TRANS_FEAT(FNEG, aa64_sve, gen_gvec_ool_arg_zpz, fneg_fns[a->esz], a, 0)
797 static gen_helper_gvec_3 * const sxtb_fns[4] = {
798 NULL, gen_helper_sve_sxtb_h,
799 gen_helper_sve_sxtb_s, gen_helper_sve_sxtb_d,
801 TRANS_FEAT(SXTB, aa64_sve, gen_gvec_ool_arg_zpz, sxtb_fns[a->esz], a, 0)
803 static gen_helper_gvec_3 * const uxtb_fns[4] = {
804 NULL, gen_helper_sve_uxtb_h,
805 gen_helper_sve_uxtb_s, gen_helper_sve_uxtb_d,
807 TRANS_FEAT(UXTB, aa64_sve, gen_gvec_ool_arg_zpz, uxtb_fns[a->esz], a, 0)
809 static gen_helper_gvec_3 * const sxth_fns[4] = {
810 NULL, NULL, gen_helper_sve_sxth_s, gen_helper_sve_sxth_d
812 TRANS_FEAT(SXTH, aa64_sve, gen_gvec_ool_arg_zpz, sxth_fns[a->esz], a, 0)
814 static gen_helper_gvec_3 * const uxth_fns[4] = {
815 NULL, NULL, gen_helper_sve_uxth_s, gen_helper_sve_uxth_d
817 TRANS_FEAT(UXTH, aa64_sve, gen_gvec_ool_arg_zpz, uxth_fns[a->esz], a, 0)
819 TRANS_FEAT(SXTW, aa64_sve, gen_gvec_ool_arg_zpz,
820 a->esz == 3 ? gen_helper_sve_sxtw_d : NULL, a, 0)
821 TRANS_FEAT(UXTW, aa64_sve, gen_gvec_ool_arg_zpz,
822 a->esz == 3 ? gen_helper_sve_uxtw_d : NULL, a, 0)
825 *** SVE Integer Reduction Group
828 typedef void gen_helper_gvec_reduc(TCGv_i64, TCGv_ptr, TCGv_ptr, TCGv_i32);
829 static bool do_vpz_ool(DisasContext *s, arg_rpr_esz *a,
830 gen_helper_gvec_reduc *fn)
832 unsigned vsz = vec_full_reg_size(s);
833 TCGv_ptr t_zn, t_pg;
834 TCGv_i32 desc;
835 TCGv_i64 temp;
837 if (fn == NULL) {
838 return false;
840 if (!sve_access_check(s)) {
841 return true;
844 desc = tcg_constant_i32(simd_desc(vsz, vsz, 0));
845 temp = tcg_temp_new_i64();
846 t_zn = tcg_temp_new_ptr();
847 t_pg = tcg_temp_new_ptr();
849 tcg_gen_addi_ptr(t_zn, cpu_env, vec_full_reg_offset(s, a->rn));
850 tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, a->pg));
851 fn(temp, t_zn, t_pg, desc);
852 tcg_temp_free_ptr(t_zn);
853 tcg_temp_free_ptr(t_pg);
855 write_fp_dreg(s, a->rd, temp);
856 tcg_temp_free_i64(temp);
857 return true;
860 #define DO_VPZ(NAME, name) \
861 static gen_helper_gvec_reduc * const name##_fns[4] = { \
862 gen_helper_sve_##name##_b, gen_helper_sve_##name##_h, \
863 gen_helper_sve_##name##_s, gen_helper_sve_##name##_d, \
864 }; \
865 TRANS_FEAT(NAME, aa64_sve, do_vpz_ool, a, name##_fns[a->esz])
867 DO_VPZ(ORV, orv)
868 DO_VPZ(ANDV, andv)
869 DO_VPZ(EORV, eorv)
871 DO_VPZ(UADDV, uaddv)
872 DO_VPZ(SMAXV, smaxv)
873 DO_VPZ(UMAXV, umaxv)
874 DO_VPZ(SMINV, sminv)
875 DO_VPZ(UMINV, uminv)
877 static gen_helper_gvec_reduc * const saddv_fns[4] = {
878 gen_helper_sve_saddv_b, gen_helper_sve_saddv_h,
879 gen_helper_sve_saddv_s, NULL
881 TRANS_FEAT(SADDV, aa64_sve, do_vpz_ool, a, saddv_fns[a->esz])
883 #undef DO_VPZ
886 *** SVE Shift by Immediate - Predicated Group
890 * Copy Zn into Zd, storing zeros into inactive elements.
891 * If invert, store zeros into the active elements.
893 static bool do_movz_zpz(DisasContext *s, int rd, int rn, int pg,
894 int esz, bool invert)
896 static gen_helper_gvec_3 * const fns[4] = {
897 gen_helper_sve_movz_b, gen_helper_sve_movz_h,
898 gen_helper_sve_movz_s, gen_helper_sve_movz_d,
900 return gen_gvec_ool_zzp(s, fns[esz], rd, rn, pg, invert);
903 static bool do_shift_zpzi(DisasContext *s, arg_rpri_esz *a, bool asr,
904 gen_helper_gvec_3 * const fns[4])
906 int max;
908 if (a->esz < 0) {
909 /* Invalid tsz encoding -- see tszimm_esz. */
910 return false;
914 * Shift by element size is architecturally valid.
915 * For arithmetic right-shift, it's the same as by one less.
916 * For logical shifts and ASRD, it is a zeroing operation.
918 max = 8 << a->esz;
919 if (a->imm >= max) {
920 if (asr) {
921 a->imm = max - 1;
922 } else {
923 return do_movz_zpz(s, a->rd, a->rd, a->pg, a->esz, true);
926 return gen_gvec_ool_arg_zpzi(s, fns[a->esz], a);
929 static gen_helper_gvec_3 * const asr_zpzi_fns[4] = {
930 gen_helper_sve_asr_zpzi_b, gen_helper_sve_asr_zpzi_h,
931 gen_helper_sve_asr_zpzi_s, gen_helper_sve_asr_zpzi_d,
933 TRANS_FEAT(ASR_zpzi, aa64_sve, do_shift_zpzi, a, true, asr_zpzi_fns)
935 static gen_helper_gvec_3 * const lsr_zpzi_fns[4] = {
936 gen_helper_sve_lsr_zpzi_b, gen_helper_sve_lsr_zpzi_h,
937 gen_helper_sve_lsr_zpzi_s, gen_helper_sve_lsr_zpzi_d,
939 TRANS_FEAT(LSR_zpzi, aa64_sve, do_shift_zpzi, a, false, lsr_zpzi_fns)
941 static gen_helper_gvec_3 * const lsl_zpzi_fns[4] = {
942 gen_helper_sve_lsl_zpzi_b, gen_helper_sve_lsl_zpzi_h,
943 gen_helper_sve_lsl_zpzi_s, gen_helper_sve_lsl_zpzi_d,
945 TRANS_FEAT(LSL_zpzi, aa64_sve, do_shift_zpzi, a, false, lsl_zpzi_fns)
947 static gen_helper_gvec_3 * const asrd_fns[4] = {
948 gen_helper_sve_asrd_b, gen_helper_sve_asrd_h,
949 gen_helper_sve_asrd_s, gen_helper_sve_asrd_d,
951 TRANS_FEAT(ASRD, aa64_sve, do_shift_zpzi, a, false, asrd_fns)
953 static gen_helper_gvec_3 * const sqshl_zpzi_fns[4] = {
954 gen_helper_sve2_sqshl_zpzi_b, gen_helper_sve2_sqshl_zpzi_h,
955 gen_helper_sve2_sqshl_zpzi_s, gen_helper_sve2_sqshl_zpzi_d,
957 TRANS_FEAT(SQSHL_zpzi, aa64_sve2, gen_gvec_ool_arg_zpzi,
958 a->esz < 0 ? NULL : sqshl_zpzi_fns[a->esz], a)
960 static gen_helper_gvec_3 * const uqshl_zpzi_fns[4] = {
961 gen_helper_sve2_uqshl_zpzi_b, gen_helper_sve2_uqshl_zpzi_h,
962 gen_helper_sve2_uqshl_zpzi_s, gen_helper_sve2_uqshl_zpzi_d,
964 TRANS_FEAT(UQSHL_zpzi, aa64_sve2, gen_gvec_ool_arg_zpzi,
965 a->esz < 0 ? NULL : uqshl_zpzi_fns[a->esz], a)
967 static gen_helper_gvec_3 * const srshr_fns[4] = {
968 gen_helper_sve2_srshr_b, gen_helper_sve2_srshr_h,
969 gen_helper_sve2_srshr_s, gen_helper_sve2_srshr_d,
971 TRANS_FEAT(SRSHR, aa64_sve2, gen_gvec_ool_arg_zpzi,
972 a->esz < 0 ? NULL : srshr_fns[a->esz], a)
974 static gen_helper_gvec_3 * const urshr_fns[4] = {
975 gen_helper_sve2_urshr_b, gen_helper_sve2_urshr_h,
976 gen_helper_sve2_urshr_s, gen_helper_sve2_urshr_d,
978 TRANS_FEAT(URSHR, aa64_sve2, gen_gvec_ool_arg_zpzi,
979 a->esz < 0 ? NULL : urshr_fns[a->esz], a)
981 static gen_helper_gvec_3 * const sqshlu_fns[4] = {
982 gen_helper_sve2_sqshlu_b, gen_helper_sve2_sqshlu_h,
983 gen_helper_sve2_sqshlu_s, gen_helper_sve2_sqshlu_d,
985 TRANS_FEAT(SQSHLU, aa64_sve2, gen_gvec_ool_arg_zpzi,
986 a->esz < 0 ? NULL : sqshlu_fns[a->esz], a)
989 *** SVE Bitwise Shift - Predicated Group
992 #define DO_ZPZW(NAME, name) \
993 static gen_helper_gvec_4 * const name##_zpzw_fns[4] = { \
994 gen_helper_sve_##name##_zpzw_b, gen_helper_sve_##name##_zpzw_h, \
995 gen_helper_sve_##name##_zpzw_s, NULL \
996 }; \
997 TRANS_FEAT(NAME##_zpzw, aa64_sve, gen_gvec_ool_arg_zpzz, \
998 a->esz < 0 ? NULL : name##_zpzw_fns[a->esz], a, 0)
1000 DO_ZPZW(ASR, asr)
1001 DO_ZPZW(LSR, lsr)
1002 DO_ZPZW(LSL, lsl)
1004 #undef DO_ZPZW
1007 *** SVE Bitwise Shift - Unpredicated Group
1010 static bool do_shift_imm(DisasContext *s, arg_rri_esz *a, bool asr,
1011 void (*gvec_fn)(unsigned, uint32_t, uint32_t,
1012 int64_t, uint32_t, uint32_t))
1014 if (a->esz < 0) {
1015 /* Invalid tsz encoding -- see tszimm_esz. */
1016 return false;
1018 if (sve_access_check(s)) {
1019 unsigned vsz = vec_full_reg_size(s);
1020 /* Shift by element size is architecturally valid. For
1021 arithmetic right-shift, it's the same as by one less.
1022 Otherwise it is a zeroing operation. */
1023 if (a->imm >= 8 << a->esz) {
1024 if (asr) {
1025 a->imm = (8 << a->esz) - 1;
1026 } else {
1027 do_dupi_z(s, a->rd, 0);
1028 return true;
1031 gvec_fn(a->esz, vec_full_reg_offset(s, a->rd),
1032 vec_full_reg_offset(s, a->rn), a->imm, vsz, vsz);
1034 return true;
1037 TRANS_FEAT(ASR_zzi, aa64_sve, do_shift_imm, a, true, tcg_gen_gvec_sari)
1038 TRANS_FEAT(LSR_zzi, aa64_sve, do_shift_imm, a, false, tcg_gen_gvec_shri)
1039 TRANS_FEAT(LSL_zzi, aa64_sve, do_shift_imm, a, false, tcg_gen_gvec_shli)
1041 #define DO_ZZW(NAME, name) \
1042 static gen_helper_gvec_3 * const name##_zzw_fns[4] = { \
1043 gen_helper_sve_##name##_zzw_b, gen_helper_sve_##name##_zzw_h, \
1044 gen_helper_sve_##name##_zzw_s, NULL \
1045 }; \
1046 TRANS_FEAT(NAME, aa64_sve, gen_gvec_ool_arg_zzz, \
1047 name##_zzw_fns[a->esz], a, 0)
1049 DO_ZZW(ASR_zzw, asr)
1050 DO_ZZW(LSR_zzw, lsr)
1051 DO_ZZW(LSL_zzw, lsl)
1053 #undef DO_ZZW
1056 *** SVE Integer Multiply-Add Group
1059 static bool do_zpzzz_ool(DisasContext *s, arg_rprrr_esz *a,
1060 gen_helper_gvec_5 *fn)
1062 if (sve_access_check(s)) {
1063 unsigned vsz = vec_full_reg_size(s);
1064 tcg_gen_gvec_5_ool(vec_full_reg_offset(s, a->rd),
1065 vec_full_reg_offset(s, a->ra),
1066 vec_full_reg_offset(s, a->rn),
1067 vec_full_reg_offset(s, a->rm),
1068 pred_full_reg_offset(s, a->pg),
1069 vsz, vsz, 0, fn);
1071 return true;
1074 static gen_helper_gvec_5 * const mla_fns[4] = {
1075 gen_helper_sve_mla_b, gen_helper_sve_mla_h,
1076 gen_helper_sve_mla_s, gen_helper_sve_mla_d,
1078 TRANS_FEAT(MLA, aa64_sve, do_zpzzz_ool, a, mla_fns[a->esz])
1080 static gen_helper_gvec_5 * const mls_fns[4] = {
1081 gen_helper_sve_mls_b, gen_helper_sve_mls_h,
1082 gen_helper_sve_mls_s, gen_helper_sve_mls_d,
1084 TRANS_FEAT(MLS, aa64_sve, do_zpzzz_ool, a, mls_fns[a->esz])
1087 *** SVE Index Generation Group
1090 static bool do_index(DisasContext *s, int esz, int rd,
1091 TCGv_i64 start, TCGv_i64 incr)
1093 unsigned vsz;
1094 TCGv_i32 desc;
1095 TCGv_ptr t_zd;
1097 if (!sve_access_check(s)) {
1098 return true;
1101 vsz = vec_full_reg_size(s);
1102 desc = tcg_constant_i32(simd_desc(vsz, vsz, 0));
1103 t_zd = tcg_temp_new_ptr();
1105 tcg_gen_addi_ptr(t_zd, cpu_env, vec_full_reg_offset(s, rd));
1106 if (esz == 3) {
1107 gen_helper_sve_index_d(t_zd, start, incr, desc);
1108 } else {
1109 typedef void index_fn(TCGv_ptr, TCGv_i32, TCGv_i32, TCGv_i32);
1110 static index_fn * const fns[3] = {
1111 gen_helper_sve_index_b,
1112 gen_helper_sve_index_h,
1113 gen_helper_sve_index_s,
1115 TCGv_i32 s32 = tcg_temp_new_i32();
1116 TCGv_i32 i32 = tcg_temp_new_i32();
1118 tcg_gen_extrl_i64_i32(s32, start);
1119 tcg_gen_extrl_i64_i32(i32, incr);
1120 fns[esz](t_zd, s32, i32, desc);
1122 tcg_temp_free_i32(s32);
1123 tcg_temp_free_i32(i32);
1125 tcg_temp_free_ptr(t_zd);
1126 return true;
1129 TRANS_FEAT(INDEX_ii, aa64_sve, do_index, a->esz, a->rd,
1130 tcg_constant_i64(a->imm1), tcg_constant_i64(a->imm2))
1131 TRANS_FEAT(INDEX_ir, aa64_sve, do_index, a->esz, a->rd,
1132 tcg_constant_i64(a->imm), cpu_reg(s, a->rm))
1133 TRANS_FEAT(INDEX_ri, aa64_sve, do_index, a->esz, a->rd,
1134 cpu_reg(s, a->rn), tcg_constant_i64(a->imm))
1135 TRANS_FEAT(INDEX_rr, aa64_sve, do_index, a->esz, a->rd,
1136 cpu_reg(s, a->rn), cpu_reg(s, a->rm))
1139 *** SVE Stack Allocation Group
1142 static bool trans_ADDVL(DisasContext *s, arg_ADDVL *a)
1144 if (sve_access_check(s)) {
1145 TCGv_i64 rd = cpu_reg_sp(s, a->rd);
1146 TCGv_i64 rn = cpu_reg_sp(s, a->rn);
1147 tcg_gen_addi_i64(rd, rn, a->imm * vec_full_reg_size(s));
1149 return true;
1152 static bool trans_ADDPL(DisasContext *s, arg_ADDPL *a)
1154 if (sve_access_check(s)) {
1155 TCGv_i64 rd = cpu_reg_sp(s, a->rd);
1156 TCGv_i64 rn = cpu_reg_sp(s, a->rn);
1157 tcg_gen_addi_i64(rd, rn, a->imm * pred_full_reg_size(s));
1159 return true;
1162 static bool trans_RDVL(DisasContext *s, arg_RDVL *a)
1164 if (sve_access_check(s)) {
1165 TCGv_i64 reg = cpu_reg(s, a->rd);
1166 tcg_gen_movi_i64(reg, a->imm * vec_full_reg_size(s));
1168 return true;
1172 *** SVE Compute Vector Address Group
1175 static bool do_adr(DisasContext *s, arg_rrri *a, gen_helper_gvec_3 *fn)
1177 return gen_gvec_ool_zzz(s, fn, a->rd, a->rn, a->rm, a->imm);
1180 TRANS_FEAT(ADR_p32, aa64_sve, do_adr, a, gen_helper_sve_adr_p32)
1181 TRANS_FEAT(ADR_p64, aa64_sve, do_adr, a, gen_helper_sve_adr_p64)
1182 TRANS_FEAT(ADR_s32, aa64_sve, do_adr, a, gen_helper_sve_adr_s32)
1183 TRANS_FEAT(ADR_u32, aa64_sve, do_adr, a, gen_helper_sve_adr_u32)
1186 *** SVE Integer Misc - Unpredicated Group
1189 static gen_helper_gvec_2 * const fexpa_fns[4] = {
1190 NULL, gen_helper_sve_fexpa_h,
1191 gen_helper_sve_fexpa_s, gen_helper_sve_fexpa_d,
1193 TRANS_FEAT(FEXPA, aa64_sve, gen_gvec_ool_zz,
1194 fexpa_fns[a->esz], a->rd, a->rn, 0)
1196 static gen_helper_gvec_3 * const ftssel_fns[4] = {
1197 NULL, gen_helper_sve_ftssel_h,
1198 gen_helper_sve_ftssel_s, gen_helper_sve_ftssel_d,
1200 TRANS_FEAT(FTSSEL, aa64_sve, gen_gvec_ool_arg_zzz, ftssel_fns[a->esz], a, 0)
1203 *** SVE Predicate Logical Operations Group
1206 static bool do_pppp_flags(DisasContext *s, arg_rprr_s *a,
1207 const GVecGen4 *gvec_op)
1209 if (!sve_access_check(s)) {
1210 return true;
1213 unsigned psz = pred_gvec_reg_size(s);
1214 int dofs = pred_full_reg_offset(s, a->rd);
1215 int nofs = pred_full_reg_offset(s, a->rn);
1216 int mofs = pred_full_reg_offset(s, a->rm);
1217 int gofs = pred_full_reg_offset(s, a->pg);
1219 if (!a->s) {
1220 tcg_gen_gvec_4(dofs, nofs, mofs, gofs, psz, psz, gvec_op);
1221 return true;
1224 if (psz == 8) {
1225 /* Do the operation and the flags generation in temps. */
1226 TCGv_i64 pd = tcg_temp_new_i64();
1227 TCGv_i64 pn = tcg_temp_new_i64();
1228 TCGv_i64 pm = tcg_temp_new_i64();
1229 TCGv_i64 pg = tcg_temp_new_i64();
1231 tcg_gen_ld_i64(pn, cpu_env, nofs);
1232 tcg_gen_ld_i64(pm, cpu_env, mofs);
1233 tcg_gen_ld_i64(pg, cpu_env, gofs);
1235 gvec_op->fni8(pd, pn, pm, pg);
1236 tcg_gen_st_i64(pd, cpu_env, dofs);
1238 do_predtest1(pd, pg);
1240 tcg_temp_free_i64(pd);
1241 tcg_temp_free_i64(pn);
1242 tcg_temp_free_i64(pm);
1243 tcg_temp_free_i64(pg);
1244 } else {
1245 /* The operation and flags generation is large. The computation
1246 * of the flags depends on the original contents of the guarding
1247 * predicate. If the destination overwrites the guarding predicate,
1248 * then the easiest way to get this right is to save a copy.
1250 int tofs = gofs;
1251 if (a->rd == a->pg) {
1252 tofs = offsetof(CPUARMState, vfp.preg_tmp);
1253 tcg_gen_gvec_mov(0, tofs, gofs, psz, psz);
1256 tcg_gen_gvec_4(dofs, nofs, mofs, gofs, psz, psz, gvec_op);
1257 do_predtest(s, dofs, tofs, psz / 8);
1259 return true;
1262 static void gen_and_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg)
1264 tcg_gen_and_i64(pd, pn, pm);
1265 tcg_gen_and_i64(pd, pd, pg);
1268 static void gen_and_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn,
1269 TCGv_vec pm, TCGv_vec pg)
1271 tcg_gen_and_vec(vece, pd, pn, pm);
1272 tcg_gen_and_vec(vece, pd, pd, pg);
1275 static bool trans_AND_pppp(DisasContext *s, arg_rprr_s *a)
1277 static const GVecGen4 op = {
1278 .fni8 = gen_and_pg_i64,
1279 .fniv = gen_and_pg_vec,
1280 .fno = gen_helper_sve_and_pppp,
1281 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
1284 if (!a->s) {
1285 if (!sve_access_check(s)) {
1286 return true;
1288 if (a->rn == a->rm) {
1289 if (a->pg == a->rn) {
1290 do_mov_p(s, a->rd, a->rn);
1291 } else {
1292 gen_gvec_fn_ppp(s, tcg_gen_gvec_and, a->rd, a->rn, a->pg);
1294 return true;
1295 } else if (a->pg == a->rn || a->pg == a->rm) {
1296 gen_gvec_fn_ppp(s, tcg_gen_gvec_and, a->rd, a->rn, a->rm);
1297 return true;
1300 return do_pppp_flags(s, a, &op);
1303 static void gen_bic_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg)
1305 tcg_gen_andc_i64(pd, pn, pm);
1306 tcg_gen_and_i64(pd, pd, pg);
1309 static void gen_bic_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn,
1310 TCGv_vec pm, TCGv_vec pg)
1312 tcg_gen_andc_vec(vece, pd, pn, pm);
1313 tcg_gen_and_vec(vece, pd, pd, pg);
1316 static bool trans_BIC_pppp(DisasContext *s, arg_rprr_s *a)
1318 static const GVecGen4 op = {
1319 .fni8 = gen_bic_pg_i64,
1320 .fniv = gen_bic_pg_vec,
1321 .fno = gen_helper_sve_bic_pppp,
1322 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
1325 if (!a->s && a->pg == a->rn) {
1326 if (sve_access_check(s)) {
1327 gen_gvec_fn_ppp(s, tcg_gen_gvec_andc, a->rd, a->rn, a->rm);
1329 return true;
1331 return do_pppp_flags(s, a, &op);
1334 static void gen_eor_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg)
1336 tcg_gen_xor_i64(pd, pn, pm);
1337 tcg_gen_and_i64(pd, pd, pg);
1340 static void gen_eor_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn,
1341 TCGv_vec pm, TCGv_vec pg)
1343 tcg_gen_xor_vec(vece, pd, pn, pm);
1344 tcg_gen_and_vec(vece, pd, pd, pg);
1347 static bool trans_EOR_pppp(DisasContext *s, arg_rprr_s *a)
1349 static const GVecGen4 op = {
1350 .fni8 = gen_eor_pg_i64,
1351 .fniv = gen_eor_pg_vec,
1352 .fno = gen_helper_sve_eor_pppp,
1353 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
1355 return do_pppp_flags(s, a, &op);
1358 static bool trans_SEL_pppp(DisasContext *s, arg_rprr_s *a)
1360 if (a->s) {
1361 return false;
1363 if (sve_access_check(s)) {
1364 unsigned psz = pred_gvec_reg_size(s);
1365 tcg_gen_gvec_bitsel(MO_8, pred_full_reg_offset(s, a->rd),
1366 pred_full_reg_offset(s, a->pg),
1367 pred_full_reg_offset(s, a->rn),
1368 pred_full_reg_offset(s, a->rm), psz, psz);
1370 return true;
1373 static void gen_orr_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg)
1375 tcg_gen_or_i64(pd, pn, pm);
1376 tcg_gen_and_i64(pd, pd, pg);
1379 static void gen_orr_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn,
1380 TCGv_vec pm, TCGv_vec pg)
1382 tcg_gen_or_vec(vece, pd, pn, pm);
1383 tcg_gen_and_vec(vece, pd, pd, pg);
1386 static bool trans_ORR_pppp(DisasContext *s, arg_rprr_s *a)
1388 static const GVecGen4 op = {
1389 .fni8 = gen_orr_pg_i64,
1390 .fniv = gen_orr_pg_vec,
1391 .fno = gen_helper_sve_orr_pppp,
1392 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
1395 if (!a->s && a->pg == a->rn && a->rn == a->rm) {
1396 return do_mov_p(s, a->rd, a->rn);
1398 return do_pppp_flags(s, a, &op);
1401 static void gen_orn_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg)
1403 tcg_gen_orc_i64(pd, pn, pm);
1404 tcg_gen_and_i64(pd, pd, pg);
1407 static void gen_orn_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn,
1408 TCGv_vec pm, TCGv_vec pg)
1410 tcg_gen_orc_vec(vece, pd, pn, pm);
1411 tcg_gen_and_vec(vece, pd, pd, pg);
1414 static bool trans_ORN_pppp(DisasContext *s, arg_rprr_s *a)
1416 static const GVecGen4 op = {
1417 .fni8 = gen_orn_pg_i64,
1418 .fniv = gen_orn_pg_vec,
1419 .fno = gen_helper_sve_orn_pppp,
1420 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
1422 return do_pppp_flags(s, a, &op);
1425 static void gen_nor_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg)
1427 tcg_gen_or_i64(pd, pn, pm);
1428 tcg_gen_andc_i64(pd, pg, pd);
1431 static void gen_nor_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn,
1432 TCGv_vec pm, TCGv_vec pg)
1434 tcg_gen_or_vec(vece, pd, pn, pm);
1435 tcg_gen_andc_vec(vece, pd, pg, pd);
1438 static bool trans_NOR_pppp(DisasContext *s, arg_rprr_s *a)
1440 static const GVecGen4 op = {
1441 .fni8 = gen_nor_pg_i64,
1442 .fniv = gen_nor_pg_vec,
1443 .fno = gen_helper_sve_nor_pppp,
1444 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
1446 return do_pppp_flags(s, a, &op);
1449 static void gen_nand_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg)
1451 tcg_gen_and_i64(pd, pn, pm);
1452 tcg_gen_andc_i64(pd, pg, pd);
1455 static void gen_nand_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn,
1456 TCGv_vec pm, TCGv_vec pg)
1458 tcg_gen_and_vec(vece, pd, pn, pm);
1459 tcg_gen_andc_vec(vece, pd, pg, pd);
1462 static bool trans_NAND_pppp(DisasContext *s, arg_rprr_s *a)
1464 static const GVecGen4 op = {
1465 .fni8 = gen_nand_pg_i64,
1466 .fniv = gen_nand_pg_vec,
1467 .fno = gen_helper_sve_nand_pppp,
1468 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
1470 return do_pppp_flags(s, a, &op);
1474 *** SVE Predicate Misc Group
1477 static bool trans_PTEST(DisasContext *s, arg_PTEST *a)
1479 if (sve_access_check(s)) {
1480 int nofs = pred_full_reg_offset(s, a->rn);
1481 int gofs = pred_full_reg_offset(s, a->pg);
1482 int words = DIV_ROUND_UP(pred_full_reg_size(s), 8);
1484 if (words == 1) {
1485 TCGv_i64 pn = tcg_temp_new_i64();
1486 TCGv_i64 pg = tcg_temp_new_i64();
1488 tcg_gen_ld_i64(pn, cpu_env, nofs);
1489 tcg_gen_ld_i64(pg, cpu_env, gofs);
1490 do_predtest1(pn, pg);
1492 tcg_temp_free_i64(pn);
1493 tcg_temp_free_i64(pg);
1494 } else {
1495 do_predtest(s, nofs, gofs, words);
1498 return true;
1501 /* See the ARM pseudocode DecodePredCount. */
1502 static unsigned decode_pred_count(unsigned fullsz, int pattern, int esz)
1504 unsigned elements = fullsz >> esz;
1505 unsigned bound;
1507 switch (pattern) {
1508 case 0x0: /* POW2 */
1509 return pow2floor(elements);
1510 case 0x1: /* VL1 */
1511 case 0x2: /* VL2 */
1512 case 0x3: /* VL3 */
1513 case 0x4: /* VL4 */
1514 case 0x5: /* VL5 */
1515 case 0x6: /* VL6 */
1516 case 0x7: /* VL7 */
1517 case 0x8: /* VL8 */
1518 bound = pattern;
1519 break;
1520 case 0x9: /* VL16 */
1521 case 0xa: /* VL32 */
1522 case 0xb: /* VL64 */
1523 case 0xc: /* VL128 */
1524 case 0xd: /* VL256 */
1525 bound = 16 << (pattern - 9);
1526 break;
1527 case 0x1d: /* MUL4 */
1528 return elements - elements % 4;
1529 case 0x1e: /* MUL3 */
1530 return elements - elements % 3;
1531 case 0x1f: /* ALL */
1532 return elements;
1533 default: /* #uimm5 */
1534 return 0;
1536 return elements >= bound ? bound : 0;
1539 /* This handles all of the predicate initialization instructions,
1540 * PTRUE, PFALSE, SETFFR. For PFALSE, we will have set PAT == 32
1541 * so that decode_pred_count returns 0. For SETFFR, we will have
1542 * set RD == 16 == FFR.
1544 static bool do_predset(DisasContext *s, int esz, int rd, int pat, bool setflag)
1546 if (!sve_access_check(s)) {
1547 return true;
1550 unsigned fullsz = vec_full_reg_size(s);
1551 unsigned ofs = pred_full_reg_offset(s, rd);
1552 unsigned numelem, setsz, i;
1553 uint64_t word, lastword;
1554 TCGv_i64 t;
1556 numelem = decode_pred_count(fullsz, pat, esz);
1558 /* Determine what we must store into each bit, and how many. */
1559 if (numelem == 0) {
1560 lastword = word = 0;
1561 setsz = fullsz;
1562 } else {
1563 setsz = numelem << esz;
1564 lastword = word = pred_esz_masks[esz];
1565 if (setsz % 64) {
1566 lastword &= MAKE_64BIT_MASK(0, setsz % 64);
1570 t = tcg_temp_new_i64();
1571 if (fullsz <= 64) {
1572 tcg_gen_movi_i64(t, lastword);
1573 tcg_gen_st_i64(t, cpu_env, ofs);
1574 goto done;
1577 if (word == lastword) {
1578 unsigned maxsz = size_for_gvec(fullsz / 8);
1579 unsigned oprsz = size_for_gvec(setsz / 8);
1581 if (oprsz * 8 == setsz) {
1582 tcg_gen_gvec_dup_imm(MO_64, ofs, oprsz, maxsz, word);
1583 goto done;
1587 setsz /= 8;
1588 fullsz /= 8;
1590 tcg_gen_movi_i64(t, word);
1591 for (i = 0; i < QEMU_ALIGN_DOWN(setsz, 8); i += 8) {
1592 tcg_gen_st_i64(t, cpu_env, ofs + i);
1594 if (lastword != word) {
1595 tcg_gen_movi_i64(t, lastword);
1596 tcg_gen_st_i64(t, cpu_env, ofs + i);
1597 i += 8;
1599 if (i < fullsz) {
1600 tcg_gen_movi_i64(t, 0);
1601 for (; i < fullsz; i += 8) {
1602 tcg_gen_st_i64(t, cpu_env, ofs + i);
1606 done:
1607 tcg_temp_free_i64(t);
1609 /* PTRUES */
1610 if (setflag) {
1611 tcg_gen_movi_i32(cpu_NF, -(word != 0));
1612 tcg_gen_movi_i32(cpu_CF, word == 0);
1613 tcg_gen_movi_i32(cpu_VF, 0);
1614 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
1616 return true;
1619 TRANS_FEAT(PTRUE, aa64_sve, do_predset, a->esz, a->rd, a->pat, a->s)
1621 /* Note pat == 31 is #all, to set all elements. */
1622 TRANS_FEAT(SETFFR, aa64_sve, do_predset, 0, FFR_PRED_NUM, 31, false)
1624 /* Note pat == 32 is #unimp, to set no elements. */
1625 TRANS_FEAT(PFALSE, aa64_sve, do_predset, 0, a->rd, 32, false)
1627 static bool trans_RDFFR_p(DisasContext *s, arg_RDFFR_p *a)
1629 /* The path through do_pppp_flags is complicated enough to want to avoid
1630 * duplication. Frob the arguments into the form of a predicated AND.
1632 arg_rprr_s alt_a = {
1633 .rd = a->rd, .pg = a->pg, .s = a->s,
1634 .rn = FFR_PRED_NUM, .rm = FFR_PRED_NUM,
1636 return trans_AND_pppp(s, &alt_a);
1639 TRANS_FEAT(RDFFR, aa64_sve, do_mov_p, a->rd, FFR_PRED_NUM)
1640 TRANS_FEAT(WRFFR, aa64_sve, do_mov_p, FFR_PRED_NUM, a->rn)
1642 static bool do_pfirst_pnext(DisasContext *s, arg_rr_esz *a,
1643 void (*gen_fn)(TCGv_i32, TCGv_ptr,
1644 TCGv_ptr, TCGv_i32))
1646 if (!sve_access_check(s)) {
1647 return true;
1650 TCGv_ptr t_pd = tcg_temp_new_ptr();
1651 TCGv_ptr t_pg = tcg_temp_new_ptr();
1652 TCGv_i32 t;
1653 unsigned desc = 0;
1655 desc = FIELD_DP32(desc, PREDDESC, OPRSZ, pred_full_reg_size(s));
1656 desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz);
1658 tcg_gen_addi_ptr(t_pd, cpu_env, pred_full_reg_offset(s, a->rd));
1659 tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, a->rn));
1660 t = tcg_temp_new_i32();
1662 gen_fn(t, t_pd, t_pg, tcg_constant_i32(desc));
1663 tcg_temp_free_ptr(t_pd);
1664 tcg_temp_free_ptr(t_pg);
1666 do_pred_flags(t);
1667 tcg_temp_free_i32(t);
1668 return true;
1671 TRANS_FEAT(PFIRST, aa64_sve, do_pfirst_pnext, a, gen_helper_sve_pfirst)
1672 TRANS_FEAT(PNEXT, aa64_sve, do_pfirst_pnext, a, gen_helper_sve_pnext)
1675 *** SVE Element Count Group
1678 /* Perform an inline saturating addition of a 32-bit value within
1679 * a 64-bit register. The second operand is known to be positive,
1680 * which halves the comparisions we must perform to bound the result.
1682 static void do_sat_addsub_32(TCGv_i64 reg, TCGv_i64 val, bool u, bool d)
1684 int64_t ibound;
1686 /* Use normal 64-bit arithmetic to detect 32-bit overflow. */
1687 if (u) {
1688 tcg_gen_ext32u_i64(reg, reg);
1689 } else {
1690 tcg_gen_ext32s_i64(reg, reg);
1692 if (d) {
1693 tcg_gen_sub_i64(reg, reg, val);
1694 ibound = (u ? 0 : INT32_MIN);
1695 tcg_gen_smax_i64(reg, reg, tcg_constant_i64(ibound));
1696 } else {
1697 tcg_gen_add_i64(reg, reg, val);
1698 ibound = (u ? UINT32_MAX : INT32_MAX);
1699 tcg_gen_smin_i64(reg, reg, tcg_constant_i64(ibound));
1703 /* Similarly with 64-bit values. */
1704 static void do_sat_addsub_64(TCGv_i64 reg, TCGv_i64 val, bool u, bool d)
1706 TCGv_i64 t0 = tcg_temp_new_i64();
1707 TCGv_i64 t2;
1709 if (u) {
1710 if (d) {
1711 tcg_gen_sub_i64(t0, reg, val);
1712 t2 = tcg_constant_i64(0);
1713 tcg_gen_movcond_i64(TCG_COND_LTU, reg, reg, val, t2, t0);
1714 } else {
1715 tcg_gen_add_i64(t0, reg, val);
1716 t2 = tcg_constant_i64(-1);
1717 tcg_gen_movcond_i64(TCG_COND_LTU, reg, t0, reg, t2, t0);
1719 } else {
1720 TCGv_i64 t1 = tcg_temp_new_i64();
1721 if (d) {
1722 /* Detect signed overflow for subtraction. */
1723 tcg_gen_xor_i64(t0, reg, val);
1724 tcg_gen_sub_i64(t1, reg, val);
1725 tcg_gen_xor_i64(reg, reg, t1);
1726 tcg_gen_and_i64(t0, t0, reg);
1728 /* Bound the result. */
1729 tcg_gen_movi_i64(reg, INT64_MIN);
1730 t2 = tcg_constant_i64(0);
1731 tcg_gen_movcond_i64(TCG_COND_LT, reg, t0, t2, reg, t1);
1732 } else {
1733 /* Detect signed overflow for addition. */
1734 tcg_gen_xor_i64(t0, reg, val);
1735 tcg_gen_add_i64(reg, reg, val);
1736 tcg_gen_xor_i64(t1, reg, val);
1737 tcg_gen_andc_i64(t0, t1, t0);
1739 /* Bound the result. */
1740 tcg_gen_movi_i64(t1, INT64_MAX);
1741 t2 = tcg_constant_i64(0);
1742 tcg_gen_movcond_i64(TCG_COND_LT, reg, t0, t2, t1, reg);
1744 tcg_temp_free_i64(t1);
1746 tcg_temp_free_i64(t0);
1749 /* Similarly with a vector and a scalar operand. */
1750 static void do_sat_addsub_vec(DisasContext *s, int esz, int rd, int rn,
1751 TCGv_i64 val, bool u, bool d)
1753 unsigned vsz = vec_full_reg_size(s);
1754 TCGv_ptr dptr, nptr;
1755 TCGv_i32 t32, desc;
1756 TCGv_i64 t64;
1758 dptr = tcg_temp_new_ptr();
1759 nptr = tcg_temp_new_ptr();
1760 tcg_gen_addi_ptr(dptr, cpu_env, vec_full_reg_offset(s, rd));
1761 tcg_gen_addi_ptr(nptr, cpu_env, vec_full_reg_offset(s, rn));
1762 desc = tcg_constant_i32(simd_desc(vsz, vsz, 0));
1764 switch (esz) {
1765 case MO_8:
1766 t32 = tcg_temp_new_i32();
1767 tcg_gen_extrl_i64_i32(t32, val);
1768 if (d) {
1769 tcg_gen_neg_i32(t32, t32);
1771 if (u) {
1772 gen_helper_sve_uqaddi_b(dptr, nptr, t32, desc);
1773 } else {
1774 gen_helper_sve_sqaddi_b(dptr, nptr, t32, desc);
1776 tcg_temp_free_i32(t32);
1777 break;
1779 case MO_16:
1780 t32 = tcg_temp_new_i32();
1781 tcg_gen_extrl_i64_i32(t32, val);
1782 if (d) {
1783 tcg_gen_neg_i32(t32, t32);
1785 if (u) {
1786 gen_helper_sve_uqaddi_h(dptr, nptr, t32, desc);
1787 } else {
1788 gen_helper_sve_sqaddi_h(dptr, nptr, t32, desc);
1790 tcg_temp_free_i32(t32);
1791 break;
1793 case MO_32:
1794 t64 = tcg_temp_new_i64();
1795 if (d) {
1796 tcg_gen_neg_i64(t64, val);
1797 } else {
1798 tcg_gen_mov_i64(t64, val);
1800 if (u) {
1801 gen_helper_sve_uqaddi_s(dptr, nptr, t64, desc);
1802 } else {
1803 gen_helper_sve_sqaddi_s(dptr, nptr, t64, desc);
1805 tcg_temp_free_i64(t64);
1806 break;
1808 case MO_64:
1809 if (u) {
1810 if (d) {
1811 gen_helper_sve_uqsubi_d(dptr, nptr, val, desc);
1812 } else {
1813 gen_helper_sve_uqaddi_d(dptr, nptr, val, desc);
1815 } else if (d) {
1816 t64 = tcg_temp_new_i64();
1817 tcg_gen_neg_i64(t64, val);
1818 gen_helper_sve_sqaddi_d(dptr, nptr, t64, desc);
1819 tcg_temp_free_i64(t64);
1820 } else {
1821 gen_helper_sve_sqaddi_d(dptr, nptr, val, desc);
1823 break;
1825 default:
1826 g_assert_not_reached();
1829 tcg_temp_free_ptr(dptr);
1830 tcg_temp_free_ptr(nptr);
1833 static bool trans_CNT_r(DisasContext *s, arg_CNT_r *a)
1835 if (sve_access_check(s)) {
1836 unsigned fullsz = vec_full_reg_size(s);
1837 unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz);
1838 tcg_gen_movi_i64(cpu_reg(s, a->rd), numelem * a->imm);
1840 return true;
1843 static bool trans_INCDEC_r(DisasContext *s, arg_incdec_cnt *a)
1845 if (sve_access_check(s)) {
1846 unsigned fullsz = vec_full_reg_size(s);
1847 unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz);
1848 int inc = numelem * a->imm * (a->d ? -1 : 1);
1849 TCGv_i64 reg = cpu_reg(s, a->rd);
1851 tcg_gen_addi_i64(reg, reg, inc);
1853 return true;
1856 static bool trans_SINCDEC_r_32(DisasContext *s, arg_incdec_cnt *a)
1858 if (!sve_access_check(s)) {
1859 return true;
1862 unsigned fullsz = vec_full_reg_size(s);
1863 unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz);
1864 int inc = numelem * a->imm;
1865 TCGv_i64 reg = cpu_reg(s, a->rd);
1867 /* Use normal 64-bit arithmetic to detect 32-bit overflow. */
1868 if (inc == 0) {
1869 if (a->u) {
1870 tcg_gen_ext32u_i64(reg, reg);
1871 } else {
1872 tcg_gen_ext32s_i64(reg, reg);
1874 } else {
1875 do_sat_addsub_32(reg, tcg_constant_i64(inc), a->u, a->d);
1877 return true;
1880 static bool trans_SINCDEC_r_64(DisasContext *s, arg_incdec_cnt *a)
1882 if (!sve_access_check(s)) {
1883 return true;
1886 unsigned fullsz = vec_full_reg_size(s);
1887 unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz);
1888 int inc = numelem * a->imm;
1889 TCGv_i64 reg = cpu_reg(s, a->rd);
1891 if (inc != 0) {
1892 do_sat_addsub_64(reg, tcg_constant_i64(inc), a->u, a->d);
1894 return true;
1897 static bool trans_INCDEC_v(DisasContext *s, arg_incdec2_cnt *a)
1899 if (a->esz == 0) {
1900 return false;
1903 unsigned fullsz = vec_full_reg_size(s);
1904 unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz);
1905 int inc = numelem * a->imm;
1907 if (inc != 0) {
1908 if (sve_access_check(s)) {
1909 tcg_gen_gvec_adds(a->esz, vec_full_reg_offset(s, a->rd),
1910 vec_full_reg_offset(s, a->rn),
1911 tcg_constant_i64(a->d ? -inc : inc),
1912 fullsz, fullsz);
1914 } else {
1915 do_mov_z(s, a->rd, a->rn);
1917 return true;
1920 static bool trans_SINCDEC_v(DisasContext *s, arg_incdec2_cnt *a)
1922 if (a->esz == 0) {
1923 return false;
1926 unsigned fullsz = vec_full_reg_size(s);
1927 unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz);
1928 int inc = numelem * a->imm;
1930 if (inc != 0) {
1931 if (sve_access_check(s)) {
1932 do_sat_addsub_vec(s, a->esz, a->rd, a->rn,
1933 tcg_constant_i64(inc), a->u, a->d);
1935 } else {
1936 do_mov_z(s, a->rd, a->rn);
1938 return true;
1942 *** SVE Bitwise Immediate Group
1945 static bool do_zz_dbm(DisasContext *s, arg_rr_dbm *a, GVecGen2iFn *gvec_fn)
1947 uint64_t imm;
1948 if (!logic_imm_decode_wmask(&imm, extract32(a->dbm, 12, 1),
1949 extract32(a->dbm, 0, 6),
1950 extract32(a->dbm, 6, 6))) {
1951 return false;
1953 return gen_gvec_fn_zzi(s, gvec_fn, MO_64, a->rd, a->rn, imm);
1956 TRANS_FEAT(AND_zzi, aa64_sve, do_zz_dbm, a, tcg_gen_gvec_andi)
1957 TRANS_FEAT(ORR_zzi, aa64_sve, do_zz_dbm, a, tcg_gen_gvec_ori)
1958 TRANS_FEAT(EOR_zzi, aa64_sve, do_zz_dbm, a, tcg_gen_gvec_xori)
1960 static bool trans_DUPM(DisasContext *s, arg_DUPM *a)
1962 uint64_t imm;
1963 if (!logic_imm_decode_wmask(&imm, extract32(a->dbm, 12, 1),
1964 extract32(a->dbm, 0, 6),
1965 extract32(a->dbm, 6, 6))) {
1966 return false;
1968 if (sve_access_check(s)) {
1969 do_dupi_z(s, a->rd, imm);
1971 return true;
1975 *** SVE Integer Wide Immediate - Predicated Group
1978 /* Implement all merging copies. This is used for CPY (immediate),
1979 * FCPY, CPY (scalar), CPY (SIMD&FP scalar).
1981 static void do_cpy_m(DisasContext *s, int esz, int rd, int rn, int pg,
1982 TCGv_i64 val)
1984 typedef void gen_cpy(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv_i32);
1985 static gen_cpy * const fns[4] = {
1986 gen_helper_sve_cpy_m_b, gen_helper_sve_cpy_m_h,
1987 gen_helper_sve_cpy_m_s, gen_helper_sve_cpy_m_d,
1989 unsigned vsz = vec_full_reg_size(s);
1990 TCGv_i32 desc = tcg_constant_i32(simd_desc(vsz, vsz, 0));
1991 TCGv_ptr t_zd = tcg_temp_new_ptr();
1992 TCGv_ptr t_zn = tcg_temp_new_ptr();
1993 TCGv_ptr t_pg = tcg_temp_new_ptr();
1995 tcg_gen_addi_ptr(t_zd, cpu_env, vec_full_reg_offset(s, rd));
1996 tcg_gen_addi_ptr(t_zn, cpu_env, vec_full_reg_offset(s, rn));
1997 tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg));
1999 fns[esz](t_zd, t_zn, t_pg, val, desc);
2001 tcg_temp_free_ptr(t_zd);
2002 tcg_temp_free_ptr(t_zn);
2003 tcg_temp_free_ptr(t_pg);
2006 static bool trans_FCPY(DisasContext *s, arg_FCPY *a)
2008 if (a->esz == 0) {
2009 return false;
2011 if (sve_access_check(s)) {
2012 /* Decode the VFP immediate. */
2013 uint64_t imm = vfp_expand_imm(a->esz, a->imm);
2014 do_cpy_m(s, a->esz, a->rd, a->rn, a->pg, tcg_constant_i64(imm));
2016 return true;
2019 static bool trans_CPY_m_i(DisasContext *s, arg_rpri_esz *a)
2021 if (a->esz == 0 && extract32(s->insn, 13, 1)) {
2022 return false;
2024 if (sve_access_check(s)) {
2025 do_cpy_m(s, a->esz, a->rd, a->rn, a->pg, tcg_constant_i64(a->imm));
2027 return true;
2030 static bool trans_CPY_z_i(DisasContext *s, arg_CPY_z_i *a)
2032 static gen_helper_gvec_2i * const fns[4] = {
2033 gen_helper_sve_cpy_z_b, gen_helper_sve_cpy_z_h,
2034 gen_helper_sve_cpy_z_s, gen_helper_sve_cpy_z_d,
2037 if (a->esz == 0 && extract32(s->insn, 13, 1)) {
2038 return false;
2040 if (sve_access_check(s)) {
2041 unsigned vsz = vec_full_reg_size(s);
2042 tcg_gen_gvec_2i_ool(vec_full_reg_offset(s, a->rd),
2043 pred_full_reg_offset(s, a->pg),
2044 tcg_constant_i64(a->imm),
2045 vsz, vsz, 0, fns[a->esz]);
2047 return true;
2051 *** SVE Permute Extract Group
2054 static bool do_EXT(DisasContext *s, int rd, int rn, int rm, int imm)
2056 if (!sve_access_check(s)) {
2057 return true;
2060 unsigned vsz = vec_full_reg_size(s);
2061 unsigned n_ofs = imm >= vsz ? 0 : imm;
2062 unsigned n_siz = vsz - n_ofs;
2063 unsigned d = vec_full_reg_offset(s, rd);
2064 unsigned n = vec_full_reg_offset(s, rn);
2065 unsigned m = vec_full_reg_offset(s, rm);
2067 /* Use host vector move insns if we have appropriate sizes
2068 * and no unfortunate overlap.
2070 if (m != d
2071 && n_ofs == size_for_gvec(n_ofs)
2072 && n_siz == size_for_gvec(n_siz)
2073 && (d != n || n_siz <= n_ofs)) {
2074 tcg_gen_gvec_mov(0, d, n + n_ofs, n_siz, n_siz);
2075 if (n_ofs != 0) {
2076 tcg_gen_gvec_mov(0, d + n_siz, m, n_ofs, n_ofs);
2078 } else {
2079 tcg_gen_gvec_3_ool(d, n, m, vsz, vsz, n_ofs, gen_helper_sve_ext);
2081 return true;
2084 TRANS_FEAT(EXT, aa64_sve, do_EXT, a->rd, a->rn, a->rm, a->imm)
2085 TRANS_FEAT(EXT_sve2, aa64_sve2, do_EXT, a->rd, a->rn, (a->rn + 1) % 32, a->imm)
2088 *** SVE Permute - Unpredicated Group
2091 static bool trans_DUP_s(DisasContext *s, arg_DUP_s *a)
2093 if (sve_access_check(s)) {
2094 unsigned vsz = vec_full_reg_size(s);
2095 tcg_gen_gvec_dup_i64(a->esz, vec_full_reg_offset(s, a->rd),
2096 vsz, vsz, cpu_reg_sp(s, a->rn));
2098 return true;
2101 static bool trans_DUP_x(DisasContext *s, arg_DUP_x *a)
2103 if ((a->imm & 0x1f) == 0) {
2104 return false;
2106 if (sve_access_check(s)) {
2107 unsigned vsz = vec_full_reg_size(s);
2108 unsigned dofs = vec_full_reg_offset(s, a->rd);
2109 unsigned esz, index;
2111 esz = ctz32(a->imm);
2112 index = a->imm >> (esz + 1);
2114 if ((index << esz) < vsz) {
2115 unsigned nofs = vec_reg_offset(s, a->rn, index, esz);
2116 tcg_gen_gvec_dup_mem(esz, dofs, nofs, vsz, vsz);
2117 } else {
2119 * While dup_mem handles 128-bit elements, dup_imm does not.
2120 * Thankfully element size doesn't matter for splatting zero.
2122 tcg_gen_gvec_dup_imm(MO_64, dofs, vsz, vsz, 0);
2125 return true;
2128 static void do_insr_i64(DisasContext *s, arg_rrr_esz *a, TCGv_i64 val)
2130 typedef void gen_insr(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv_i32);
2131 static gen_insr * const fns[4] = {
2132 gen_helper_sve_insr_b, gen_helper_sve_insr_h,
2133 gen_helper_sve_insr_s, gen_helper_sve_insr_d,
2135 unsigned vsz = vec_full_reg_size(s);
2136 TCGv_i32 desc = tcg_constant_i32(simd_desc(vsz, vsz, 0));
2137 TCGv_ptr t_zd = tcg_temp_new_ptr();
2138 TCGv_ptr t_zn = tcg_temp_new_ptr();
2140 tcg_gen_addi_ptr(t_zd, cpu_env, vec_full_reg_offset(s, a->rd));
2141 tcg_gen_addi_ptr(t_zn, cpu_env, vec_full_reg_offset(s, a->rn));
2143 fns[a->esz](t_zd, t_zn, val, desc);
2145 tcg_temp_free_ptr(t_zd);
2146 tcg_temp_free_ptr(t_zn);
2149 static bool trans_INSR_f(DisasContext *s, arg_rrr_esz *a)
2151 if (sve_access_check(s)) {
2152 TCGv_i64 t = tcg_temp_new_i64();
2153 tcg_gen_ld_i64(t, cpu_env, vec_reg_offset(s, a->rm, 0, MO_64));
2154 do_insr_i64(s, a, t);
2155 tcg_temp_free_i64(t);
2157 return true;
2160 static bool trans_INSR_r(DisasContext *s, arg_rrr_esz *a)
2162 if (sve_access_check(s)) {
2163 do_insr_i64(s, a, cpu_reg(s, a->rm));
2165 return true;
2168 static gen_helper_gvec_2 * const rev_fns[4] = {
2169 gen_helper_sve_rev_b, gen_helper_sve_rev_h,
2170 gen_helper_sve_rev_s, gen_helper_sve_rev_d
2172 TRANS_FEAT(REV_v, aa64_sve, gen_gvec_ool_zz, rev_fns[a->esz], a->rd, a->rn, 0)
2174 static gen_helper_gvec_3 * const sve_tbl_fns[4] = {
2175 gen_helper_sve_tbl_b, gen_helper_sve_tbl_h,
2176 gen_helper_sve_tbl_s, gen_helper_sve_tbl_d
2178 TRANS_FEAT(TBL, aa64_sve, gen_gvec_ool_arg_zzz, sve_tbl_fns[a->esz], a, 0)
2180 static gen_helper_gvec_4 * const sve2_tbl_fns[4] = {
2181 gen_helper_sve2_tbl_b, gen_helper_sve2_tbl_h,
2182 gen_helper_sve2_tbl_s, gen_helper_sve2_tbl_d
2184 TRANS_FEAT(TBL_sve2, aa64_sve2, gen_gvec_ool_zzzz, sve2_tbl_fns[a->esz],
2185 a->rd, a->rn, (a->rn + 1) % 32, a->rm, 0)
2187 static gen_helper_gvec_3 * const tbx_fns[4] = {
2188 gen_helper_sve2_tbx_b, gen_helper_sve2_tbx_h,
2189 gen_helper_sve2_tbx_s, gen_helper_sve2_tbx_d
2191 TRANS_FEAT(TBX, aa64_sve2, gen_gvec_ool_arg_zzz, tbx_fns[a->esz], a, 0)
2193 static bool trans_UNPK(DisasContext *s, arg_UNPK *a)
2195 static gen_helper_gvec_2 * const fns[4][2] = {
2196 { NULL, NULL },
2197 { gen_helper_sve_sunpk_h, gen_helper_sve_uunpk_h },
2198 { gen_helper_sve_sunpk_s, gen_helper_sve_uunpk_s },
2199 { gen_helper_sve_sunpk_d, gen_helper_sve_uunpk_d },
2202 if (a->esz == 0) {
2203 return false;
2205 if (sve_access_check(s)) {
2206 unsigned vsz = vec_full_reg_size(s);
2207 tcg_gen_gvec_2_ool(vec_full_reg_offset(s, a->rd),
2208 vec_full_reg_offset(s, a->rn)
2209 + (a->h ? vsz / 2 : 0),
2210 vsz, vsz, 0, fns[a->esz][a->u]);
2212 return true;
2216 *** SVE Permute - Predicates Group
2219 static bool do_perm_pred3(DisasContext *s, arg_rrr_esz *a, bool high_odd,
2220 gen_helper_gvec_3 *fn)
2222 if (!sve_access_check(s)) {
2223 return true;
2226 unsigned vsz = pred_full_reg_size(s);
2228 TCGv_ptr t_d = tcg_temp_new_ptr();
2229 TCGv_ptr t_n = tcg_temp_new_ptr();
2230 TCGv_ptr t_m = tcg_temp_new_ptr();
2231 uint32_t desc = 0;
2233 desc = FIELD_DP32(desc, PREDDESC, OPRSZ, vsz);
2234 desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz);
2235 desc = FIELD_DP32(desc, PREDDESC, DATA, high_odd);
2237 tcg_gen_addi_ptr(t_d, cpu_env, pred_full_reg_offset(s, a->rd));
2238 tcg_gen_addi_ptr(t_n, cpu_env, pred_full_reg_offset(s, a->rn));
2239 tcg_gen_addi_ptr(t_m, cpu_env, pred_full_reg_offset(s, a->rm));
2241 fn(t_d, t_n, t_m, tcg_constant_i32(desc));
2243 tcg_temp_free_ptr(t_d);
2244 tcg_temp_free_ptr(t_n);
2245 tcg_temp_free_ptr(t_m);
2246 return true;
2249 static bool do_perm_pred2(DisasContext *s, arg_rr_esz *a, bool high_odd,
2250 gen_helper_gvec_2 *fn)
2252 if (!sve_access_check(s)) {
2253 return true;
2256 unsigned vsz = pred_full_reg_size(s);
2257 TCGv_ptr t_d = tcg_temp_new_ptr();
2258 TCGv_ptr t_n = tcg_temp_new_ptr();
2259 uint32_t desc = 0;
2261 tcg_gen_addi_ptr(t_d, cpu_env, pred_full_reg_offset(s, a->rd));
2262 tcg_gen_addi_ptr(t_n, cpu_env, pred_full_reg_offset(s, a->rn));
2264 desc = FIELD_DP32(desc, PREDDESC, OPRSZ, vsz);
2265 desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz);
2266 desc = FIELD_DP32(desc, PREDDESC, DATA, high_odd);
2268 fn(t_d, t_n, tcg_constant_i32(desc));
2270 tcg_temp_free_ptr(t_d);
2271 tcg_temp_free_ptr(t_n);
2272 return true;
2275 TRANS_FEAT(ZIP1_p, aa64_sve, do_perm_pred3, a, 0, gen_helper_sve_zip_p)
2276 TRANS_FEAT(ZIP2_p, aa64_sve, do_perm_pred3, a, 1, gen_helper_sve_zip_p)
2277 TRANS_FEAT(UZP1_p, aa64_sve, do_perm_pred3, a, 0, gen_helper_sve_uzp_p)
2278 TRANS_FEAT(UZP2_p, aa64_sve, do_perm_pred3, a, 1, gen_helper_sve_uzp_p)
2279 TRANS_FEAT(TRN1_p, aa64_sve, do_perm_pred3, a, 0, gen_helper_sve_trn_p)
2280 TRANS_FEAT(TRN2_p, aa64_sve, do_perm_pred3, a, 1, gen_helper_sve_trn_p)
2282 TRANS_FEAT(REV_p, aa64_sve, do_perm_pred2, a, 0, gen_helper_sve_rev_p)
2283 TRANS_FEAT(PUNPKLO, aa64_sve, do_perm_pred2, a, 0, gen_helper_sve_punpk_p)
2284 TRANS_FEAT(PUNPKHI, aa64_sve, do_perm_pred2, a, 1, gen_helper_sve_punpk_p)
2287 *** SVE Permute - Interleaving Group
2290 static gen_helper_gvec_3 * const zip_fns[4] = {
2291 gen_helper_sve_zip_b, gen_helper_sve_zip_h,
2292 gen_helper_sve_zip_s, gen_helper_sve_zip_d,
2294 TRANS_FEAT(ZIP1_z, aa64_sve, gen_gvec_ool_arg_zzz,
2295 zip_fns[a->esz], a, 0)
2296 TRANS_FEAT(ZIP2_z, aa64_sve, gen_gvec_ool_arg_zzz,
2297 zip_fns[a->esz], a, vec_full_reg_size(s) / 2)
2299 TRANS_FEAT(ZIP1_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz,
2300 gen_helper_sve2_zip_q, a, 0)
2301 TRANS_FEAT(ZIP2_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz,
2302 gen_helper_sve2_zip_q, a,
2303 QEMU_ALIGN_DOWN(vec_full_reg_size(s), 32) / 2)
2305 static gen_helper_gvec_3 * const uzp_fns[4] = {
2306 gen_helper_sve_uzp_b, gen_helper_sve_uzp_h,
2307 gen_helper_sve_uzp_s, gen_helper_sve_uzp_d,
2310 TRANS_FEAT(UZP1_z, aa64_sve, gen_gvec_ool_arg_zzz,
2311 uzp_fns[a->esz], a, 0)
2312 TRANS_FEAT(UZP2_z, aa64_sve, gen_gvec_ool_arg_zzz,
2313 uzp_fns[a->esz], a, 1 << a->esz)
2315 TRANS_FEAT(UZP1_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz,
2316 gen_helper_sve2_uzp_q, a, 0)
2317 TRANS_FEAT(UZP2_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz,
2318 gen_helper_sve2_uzp_q, a, 16)
2320 static gen_helper_gvec_3 * const trn_fns[4] = {
2321 gen_helper_sve_trn_b, gen_helper_sve_trn_h,
2322 gen_helper_sve_trn_s, gen_helper_sve_trn_d,
2325 TRANS_FEAT(TRN1_z, aa64_sve, gen_gvec_ool_arg_zzz,
2326 trn_fns[a->esz], a, 0)
2327 TRANS_FEAT(TRN2_z, aa64_sve, gen_gvec_ool_arg_zzz,
2328 trn_fns[a->esz], a, 1 << a->esz)
2330 TRANS_FEAT(TRN1_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz,
2331 gen_helper_sve2_trn_q, a, 0)
2332 TRANS_FEAT(TRN2_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz,
2333 gen_helper_sve2_trn_q, a, 16)
2336 *** SVE Permute Vector - Predicated Group
2339 static gen_helper_gvec_3 * const compact_fns[4] = {
2340 NULL, NULL, gen_helper_sve_compact_s, gen_helper_sve_compact_d
2342 TRANS_FEAT(COMPACT, aa64_sve, gen_gvec_ool_arg_zpz, compact_fns[a->esz], a, 0)
2344 /* Call the helper that computes the ARM LastActiveElement pseudocode
2345 * function, scaled by the element size. This includes the not found
2346 * indication; e.g. not found for esz=3 is -8.
2348 static void find_last_active(DisasContext *s, TCGv_i32 ret, int esz, int pg)
2350 /* Predicate sizes may be smaller and cannot use simd_desc. We cannot
2351 * round up, as we do elsewhere, because we need the exact size.
2353 TCGv_ptr t_p = tcg_temp_new_ptr();
2354 unsigned desc = 0;
2356 desc = FIELD_DP32(desc, PREDDESC, OPRSZ, pred_full_reg_size(s));
2357 desc = FIELD_DP32(desc, PREDDESC, ESZ, esz);
2359 tcg_gen_addi_ptr(t_p, cpu_env, pred_full_reg_offset(s, pg));
2361 gen_helper_sve_last_active_element(ret, t_p, tcg_constant_i32(desc));
2363 tcg_temp_free_ptr(t_p);
2366 /* Increment LAST to the offset of the next element in the vector,
2367 * wrapping around to 0.
2369 static void incr_last_active(DisasContext *s, TCGv_i32 last, int esz)
2371 unsigned vsz = vec_full_reg_size(s);
2373 tcg_gen_addi_i32(last, last, 1 << esz);
2374 if (is_power_of_2(vsz)) {
2375 tcg_gen_andi_i32(last, last, vsz - 1);
2376 } else {
2377 TCGv_i32 max = tcg_constant_i32(vsz);
2378 TCGv_i32 zero = tcg_constant_i32(0);
2379 tcg_gen_movcond_i32(TCG_COND_GEU, last, last, max, zero, last);
2383 /* If LAST < 0, set LAST to the offset of the last element in the vector. */
2384 static void wrap_last_active(DisasContext *s, TCGv_i32 last, int esz)
2386 unsigned vsz = vec_full_reg_size(s);
2388 if (is_power_of_2(vsz)) {
2389 tcg_gen_andi_i32(last, last, vsz - 1);
2390 } else {
2391 TCGv_i32 max = tcg_constant_i32(vsz - (1 << esz));
2392 TCGv_i32 zero = tcg_constant_i32(0);
2393 tcg_gen_movcond_i32(TCG_COND_LT, last, last, zero, max, last);
2397 /* Load an unsigned element of ESZ from BASE+OFS. */
2398 static TCGv_i64 load_esz(TCGv_ptr base, int ofs, int esz)
2400 TCGv_i64 r = tcg_temp_new_i64();
2402 switch (esz) {
2403 case 0:
2404 tcg_gen_ld8u_i64(r, base, ofs);
2405 break;
2406 case 1:
2407 tcg_gen_ld16u_i64(r, base, ofs);
2408 break;
2409 case 2:
2410 tcg_gen_ld32u_i64(r, base, ofs);
2411 break;
2412 case 3:
2413 tcg_gen_ld_i64(r, base, ofs);
2414 break;
2415 default:
2416 g_assert_not_reached();
2418 return r;
2421 /* Load an unsigned element of ESZ from RM[LAST]. */
2422 static TCGv_i64 load_last_active(DisasContext *s, TCGv_i32 last,
2423 int rm, int esz)
2425 TCGv_ptr p = tcg_temp_new_ptr();
2426 TCGv_i64 r;
2428 /* Convert offset into vector into offset into ENV.
2429 * The final adjustment for the vector register base
2430 * is added via constant offset to the load.
2432 #if HOST_BIG_ENDIAN
2433 /* Adjust for element ordering. See vec_reg_offset. */
2434 if (esz < 3) {
2435 tcg_gen_xori_i32(last, last, 8 - (1 << esz));
2437 #endif
2438 tcg_gen_ext_i32_ptr(p, last);
2439 tcg_gen_add_ptr(p, p, cpu_env);
2441 r = load_esz(p, vec_full_reg_offset(s, rm), esz);
2442 tcg_temp_free_ptr(p);
2444 return r;
2447 /* Compute CLAST for a Zreg. */
2448 static bool do_clast_vector(DisasContext *s, arg_rprr_esz *a, bool before)
2450 TCGv_i32 last;
2451 TCGLabel *over;
2452 TCGv_i64 ele;
2453 unsigned vsz, esz = a->esz;
2455 if (!sve_access_check(s)) {
2456 return true;
2459 last = tcg_temp_local_new_i32();
2460 over = gen_new_label();
2462 find_last_active(s, last, esz, a->pg);
2464 /* There is of course no movcond for a 2048-bit vector,
2465 * so we must branch over the actual store.
2467 tcg_gen_brcondi_i32(TCG_COND_LT, last, 0, over);
2469 if (!before) {
2470 incr_last_active(s, last, esz);
2473 ele = load_last_active(s, last, a->rm, esz);
2474 tcg_temp_free_i32(last);
2476 vsz = vec_full_reg_size(s);
2477 tcg_gen_gvec_dup_i64(esz, vec_full_reg_offset(s, a->rd), vsz, vsz, ele);
2478 tcg_temp_free_i64(ele);
2480 /* If this insn used MOVPRFX, we may need a second move. */
2481 if (a->rd != a->rn) {
2482 TCGLabel *done = gen_new_label();
2483 tcg_gen_br(done);
2485 gen_set_label(over);
2486 do_mov_z(s, a->rd, a->rn);
2488 gen_set_label(done);
2489 } else {
2490 gen_set_label(over);
2492 return true;
2495 TRANS_FEAT(CLASTA_z, aa64_sve, do_clast_vector, a, false)
2496 TRANS_FEAT(CLASTB_z, aa64_sve, do_clast_vector, a, true)
2498 /* Compute CLAST for a scalar. */
2499 static void do_clast_scalar(DisasContext *s, int esz, int pg, int rm,
2500 bool before, TCGv_i64 reg_val)
2502 TCGv_i32 last = tcg_temp_new_i32();
2503 TCGv_i64 ele, cmp;
2505 find_last_active(s, last, esz, pg);
2507 /* Extend the original value of last prior to incrementing. */
2508 cmp = tcg_temp_new_i64();
2509 tcg_gen_ext_i32_i64(cmp, last);
2511 if (!before) {
2512 incr_last_active(s, last, esz);
2515 /* The conceit here is that while last < 0 indicates not found, after
2516 * adjusting for cpu_env->vfp.zregs[rm], it is still a valid address
2517 * from which we can load garbage. We then discard the garbage with
2518 * a conditional move.
2520 ele = load_last_active(s, last, rm, esz);
2521 tcg_temp_free_i32(last);
2523 tcg_gen_movcond_i64(TCG_COND_GE, reg_val, cmp, tcg_constant_i64(0),
2524 ele, reg_val);
2526 tcg_temp_free_i64(cmp);
2527 tcg_temp_free_i64(ele);
2530 /* Compute CLAST for a Vreg. */
2531 static bool do_clast_fp(DisasContext *s, arg_rpr_esz *a, bool before)
2533 if (sve_access_check(s)) {
2534 int esz = a->esz;
2535 int ofs = vec_reg_offset(s, a->rd, 0, esz);
2536 TCGv_i64 reg = load_esz(cpu_env, ofs, esz);
2538 do_clast_scalar(s, esz, a->pg, a->rn, before, reg);
2539 write_fp_dreg(s, a->rd, reg);
2540 tcg_temp_free_i64(reg);
2542 return true;
2545 TRANS_FEAT(CLASTA_v, aa64_sve, do_clast_fp, a, false)
2546 TRANS_FEAT(CLASTB_v, aa64_sve, do_clast_fp, a, true)
2548 /* Compute CLAST for a Xreg. */
2549 static bool do_clast_general(DisasContext *s, arg_rpr_esz *a, bool before)
2551 TCGv_i64 reg;
2553 if (!sve_access_check(s)) {
2554 return true;
2557 reg = cpu_reg(s, a->rd);
2558 switch (a->esz) {
2559 case 0:
2560 tcg_gen_ext8u_i64(reg, reg);
2561 break;
2562 case 1:
2563 tcg_gen_ext16u_i64(reg, reg);
2564 break;
2565 case 2:
2566 tcg_gen_ext32u_i64(reg, reg);
2567 break;
2568 case 3:
2569 break;
2570 default:
2571 g_assert_not_reached();
2574 do_clast_scalar(s, a->esz, a->pg, a->rn, before, reg);
2575 return true;
2578 TRANS_FEAT(CLASTA_r, aa64_sve, do_clast_general, a, false)
2579 TRANS_FEAT(CLASTB_r, aa64_sve, do_clast_general, a, true)
2581 /* Compute LAST for a scalar. */
2582 static TCGv_i64 do_last_scalar(DisasContext *s, int esz,
2583 int pg, int rm, bool before)
2585 TCGv_i32 last = tcg_temp_new_i32();
2586 TCGv_i64 ret;
2588 find_last_active(s, last, esz, pg);
2589 if (before) {
2590 wrap_last_active(s, last, esz);
2591 } else {
2592 incr_last_active(s, last, esz);
2595 ret = load_last_active(s, last, rm, esz);
2596 tcg_temp_free_i32(last);
2597 return ret;
2600 /* Compute LAST for a Vreg. */
2601 static bool do_last_fp(DisasContext *s, arg_rpr_esz *a, bool before)
2603 if (sve_access_check(s)) {
2604 TCGv_i64 val = do_last_scalar(s, a->esz, a->pg, a->rn, before);
2605 write_fp_dreg(s, a->rd, val);
2606 tcg_temp_free_i64(val);
2608 return true;
2611 TRANS_FEAT(LASTA_v, aa64_sve, do_last_fp, a, false)
2612 TRANS_FEAT(LASTB_v, aa64_sve, do_last_fp, a, true)
2614 /* Compute LAST for a Xreg. */
2615 static bool do_last_general(DisasContext *s, arg_rpr_esz *a, bool before)
2617 if (sve_access_check(s)) {
2618 TCGv_i64 val = do_last_scalar(s, a->esz, a->pg, a->rn, before);
2619 tcg_gen_mov_i64(cpu_reg(s, a->rd), val);
2620 tcg_temp_free_i64(val);
2622 return true;
2625 static bool trans_LASTA_r(DisasContext *s, arg_rpr_esz *a)
2627 return do_last_general(s, a, false);
2630 static bool trans_LASTB_r(DisasContext *s, arg_rpr_esz *a)
2632 return do_last_general(s, a, true);
2635 static bool trans_CPY_m_r(DisasContext *s, arg_rpr_esz *a)
2637 if (sve_access_check(s)) {
2638 do_cpy_m(s, a->esz, a->rd, a->rd, a->pg, cpu_reg_sp(s, a->rn));
2640 return true;
2643 static bool trans_CPY_m_v(DisasContext *s, arg_rpr_esz *a)
2645 if (sve_access_check(s)) {
2646 int ofs = vec_reg_offset(s, a->rn, 0, a->esz);
2647 TCGv_i64 t = load_esz(cpu_env, ofs, a->esz);
2648 do_cpy_m(s, a->esz, a->rd, a->rd, a->pg, t);
2649 tcg_temp_free_i64(t);
2651 return true;
2654 static gen_helper_gvec_3 * const revb_fns[4] = {
2655 NULL, gen_helper_sve_revb_h,
2656 gen_helper_sve_revb_s, gen_helper_sve_revb_d,
2658 TRANS_FEAT(REVB, aa64_sve, gen_gvec_ool_arg_zpz, revb_fns[a->esz], a, 0)
2660 static gen_helper_gvec_3 * const revh_fns[4] = {
2661 NULL, NULL, gen_helper_sve_revh_s, gen_helper_sve_revh_d,
2663 TRANS_FEAT(REVH, aa64_sve, gen_gvec_ool_arg_zpz, revh_fns[a->esz], a, 0)
2665 TRANS_FEAT(REVW, aa64_sve, gen_gvec_ool_arg_zpz,
2666 a->esz == 3 ? gen_helper_sve_revw_d : NULL, a, 0)
2668 static bool trans_SPLICE(DisasContext *s, arg_rprr_esz *a)
2670 return gen_gvec_ool_zzzp(s, gen_helper_sve_splice,
2671 a->rd, a->rn, a->rm, a->pg, a->esz);
2674 static bool trans_SPLICE_sve2(DisasContext *s, arg_rpr_esz *a)
2676 if (!dc_isar_feature(aa64_sve2, s)) {
2677 return false;
2679 return gen_gvec_ool_zzzp(s, gen_helper_sve_splice,
2680 a->rd, a->rn, (a->rn + 1) % 32, a->pg, a->esz);
2684 *** SVE Integer Compare - Vectors Group
2687 static bool do_ppzz_flags(DisasContext *s, arg_rprr_esz *a,
2688 gen_helper_gvec_flags_4 *gen_fn)
2690 TCGv_ptr pd, zn, zm, pg;
2691 unsigned vsz;
2692 TCGv_i32 t;
2694 if (gen_fn == NULL) {
2695 return false;
2697 if (!sve_access_check(s)) {
2698 return true;
2701 vsz = vec_full_reg_size(s);
2702 t = tcg_temp_new_i32();
2703 pd = tcg_temp_new_ptr();
2704 zn = tcg_temp_new_ptr();
2705 zm = tcg_temp_new_ptr();
2706 pg = tcg_temp_new_ptr();
2708 tcg_gen_addi_ptr(pd, cpu_env, pred_full_reg_offset(s, a->rd));
2709 tcg_gen_addi_ptr(zn, cpu_env, vec_full_reg_offset(s, a->rn));
2710 tcg_gen_addi_ptr(zm, cpu_env, vec_full_reg_offset(s, a->rm));
2711 tcg_gen_addi_ptr(pg, cpu_env, pred_full_reg_offset(s, a->pg));
2713 gen_fn(t, pd, zn, zm, pg, tcg_constant_i32(simd_desc(vsz, vsz, 0)));
2715 tcg_temp_free_ptr(pd);
2716 tcg_temp_free_ptr(zn);
2717 tcg_temp_free_ptr(zm);
2718 tcg_temp_free_ptr(pg);
2720 do_pred_flags(t);
2722 tcg_temp_free_i32(t);
2723 return true;
2726 #define DO_PPZZ(NAME, name) \
2727 static bool trans_##NAME##_ppzz(DisasContext *s, arg_rprr_esz *a) \
2729 static gen_helper_gvec_flags_4 * const fns[4] = { \
2730 gen_helper_sve_##name##_ppzz_b, gen_helper_sve_##name##_ppzz_h, \
2731 gen_helper_sve_##name##_ppzz_s, gen_helper_sve_##name##_ppzz_d, \
2732 }; \
2733 return do_ppzz_flags(s, a, fns[a->esz]); \
2736 DO_PPZZ(CMPEQ, cmpeq)
2737 DO_PPZZ(CMPNE, cmpne)
2738 DO_PPZZ(CMPGT, cmpgt)
2739 DO_PPZZ(CMPGE, cmpge)
2740 DO_PPZZ(CMPHI, cmphi)
2741 DO_PPZZ(CMPHS, cmphs)
2743 #undef DO_PPZZ
2745 #define DO_PPZW(NAME, name) \
2746 static bool trans_##NAME##_ppzw(DisasContext *s, arg_rprr_esz *a) \
2748 static gen_helper_gvec_flags_4 * const fns[4] = { \
2749 gen_helper_sve_##name##_ppzw_b, gen_helper_sve_##name##_ppzw_h, \
2750 gen_helper_sve_##name##_ppzw_s, NULL \
2751 }; \
2752 return do_ppzz_flags(s, a, fns[a->esz]); \
2755 DO_PPZW(CMPEQ, cmpeq)
2756 DO_PPZW(CMPNE, cmpne)
2757 DO_PPZW(CMPGT, cmpgt)
2758 DO_PPZW(CMPGE, cmpge)
2759 DO_PPZW(CMPHI, cmphi)
2760 DO_PPZW(CMPHS, cmphs)
2761 DO_PPZW(CMPLT, cmplt)
2762 DO_PPZW(CMPLE, cmple)
2763 DO_PPZW(CMPLO, cmplo)
2764 DO_PPZW(CMPLS, cmpls)
2766 #undef DO_PPZW
2769 *** SVE Integer Compare - Immediate Groups
2772 static bool do_ppzi_flags(DisasContext *s, arg_rpri_esz *a,
2773 gen_helper_gvec_flags_3 *gen_fn)
2775 TCGv_ptr pd, zn, pg;
2776 unsigned vsz;
2777 TCGv_i32 t;
2779 if (gen_fn == NULL) {
2780 return false;
2782 if (!sve_access_check(s)) {
2783 return true;
2786 vsz = vec_full_reg_size(s);
2787 t = tcg_temp_new_i32();
2788 pd = tcg_temp_new_ptr();
2789 zn = tcg_temp_new_ptr();
2790 pg = tcg_temp_new_ptr();
2792 tcg_gen_addi_ptr(pd, cpu_env, pred_full_reg_offset(s, a->rd));
2793 tcg_gen_addi_ptr(zn, cpu_env, vec_full_reg_offset(s, a->rn));
2794 tcg_gen_addi_ptr(pg, cpu_env, pred_full_reg_offset(s, a->pg));
2796 gen_fn(t, pd, zn, pg, tcg_constant_i32(simd_desc(vsz, vsz, a->imm)));
2798 tcg_temp_free_ptr(pd);
2799 tcg_temp_free_ptr(zn);
2800 tcg_temp_free_ptr(pg);
2802 do_pred_flags(t);
2804 tcg_temp_free_i32(t);
2805 return true;
2808 #define DO_PPZI(NAME, name) \
2809 static bool trans_##NAME##_ppzi(DisasContext *s, arg_rpri_esz *a) \
2811 static gen_helper_gvec_flags_3 * const fns[4] = { \
2812 gen_helper_sve_##name##_ppzi_b, gen_helper_sve_##name##_ppzi_h, \
2813 gen_helper_sve_##name##_ppzi_s, gen_helper_sve_##name##_ppzi_d, \
2814 }; \
2815 return do_ppzi_flags(s, a, fns[a->esz]); \
2818 DO_PPZI(CMPEQ, cmpeq)
2819 DO_PPZI(CMPNE, cmpne)
2820 DO_PPZI(CMPGT, cmpgt)
2821 DO_PPZI(CMPGE, cmpge)
2822 DO_PPZI(CMPHI, cmphi)
2823 DO_PPZI(CMPHS, cmphs)
2824 DO_PPZI(CMPLT, cmplt)
2825 DO_PPZI(CMPLE, cmple)
2826 DO_PPZI(CMPLO, cmplo)
2827 DO_PPZI(CMPLS, cmpls)
2829 #undef DO_PPZI
2832 *** SVE Partition Break Group
2835 static bool do_brk3(DisasContext *s, arg_rprr_s *a,
2836 gen_helper_gvec_4 *fn, gen_helper_gvec_flags_4 *fn_s)
2838 if (!sve_access_check(s)) {
2839 return true;
2842 unsigned vsz = pred_full_reg_size(s);
2844 /* Predicate sizes may be smaller and cannot use simd_desc. */
2845 TCGv_ptr d = tcg_temp_new_ptr();
2846 TCGv_ptr n = tcg_temp_new_ptr();
2847 TCGv_ptr m = tcg_temp_new_ptr();
2848 TCGv_ptr g = tcg_temp_new_ptr();
2849 TCGv_i32 desc = tcg_constant_i32(FIELD_DP32(0, PREDDESC, OPRSZ, vsz));
2851 tcg_gen_addi_ptr(d, cpu_env, pred_full_reg_offset(s, a->rd));
2852 tcg_gen_addi_ptr(n, cpu_env, pred_full_reg_offset(s, a->rn));
2853 tcg_gen_addi_ptr(m, cpu_env, pred_full_reg_offset(s, a->rm));
2854 tcg_gen_addi_ptr(g, cpu_env, pred_full_reg_offset(s, a->pg));
2856 if (a->s) {
2857 TCGv_i32 t = tcg_temp_new_i32();
2858 fn_s(t, d, n, m, g, desc);
2859 do_pred_flags(t);
2860 tcg_temp_free_i32(t);
2861 } else {
2862 fn(d, n, m, g, desc);
2864 tcg_temp_free_ptr(d);
2865 tcg_temp_free_ptr(n);
2866 tcg_temp_free_ptr(m);
2867 tcg_temp_free_ptr(g);
2868 return true;
2871 static bool do_brk2(DisasContext *s, arg_rpr_s *a,
2872 gen_helper_gvec_3 *fn, gen_helper_gvec_flags_3 *fn_s)
2874 if (!sve_access_check(s)) {
2875 return true;
2878 unsigned vsz = pred_full_reg_size(s);
2880 /* Predicate sizes may be smaller and cannot use simd_desc. */
2881 TCGv_ptr d = tcg_temp_new_ptr();
2882 TCGv_ptr n = tcg_temp_new_ptr();
2883 TCGv_ptr g = tcg_temp_new_ptr();
2884 TCGv_i32 desc = tcg_constant_i32(FIELD_DP32(0, PREDDESC, OPRSZ, vsz));
2886 tcg_gen_addi_ptr(d, cpu_env, pred_full_reg_offset(s, a->rd));
2887 tcg_gen_addi_ptr(n, cpu_env, pred_full_reg_offset(s, a->rn));
2888 tcg_gen_addi_ptr(g, cpu_env, pred_full_reg_offset(s, a->pg));
2890 if (a->s) {
2891 TCGv_i32 t = tcg_temp_new_i32();
2892 fn_s(t, d, n, g, desc);
2893 do_pred_flags(t);
2894 tcg_temp_free_i32(t);
2895 } else {
2896 fn(d, n, g, desc);
2898 tcg_temp_free_ptr(d);
2899 tcg_temp_free_ptr(n);
2900 tcg_temp_free_ptr(g);
2901 return true;
2904 static bool trans_BRKPA(DisasContext *s, arg_rprr_s *a)
2906 return do_brk3(s, a, gen_helper_sve_brkpa, gen_helper_sve_brkpas);
2909 static bool trans_BRKPB(DisasContext *s, arg_rprr_s *a)
2911 return do_brk3(s, a, gen_helper_sve_brkpb, gen_helper_sve_brkpbs);
2914 static bool trans_BRKA_m(DisasContext *s, arg_rpr_s *a)
2916 return do_brk2(s, a, gen_helper_sve_brka_m, gen_helper_sve_brkas_m);
2919 static bool trans_BRKB_m(DisasContext *s, arg_rpr_s *a)
2921 return do_brk2(s, a, gen_helper_sve_brkb_m, gen_helper_sve_brkbs_m);
2924 static bool trans_BRKA_z(DisasContext *s, arg_rpr_s *a)
2926 return do_brk2(s, a, gen_helper_sve_brka_z, gen_helper_sve_brkas_z);
2929 static bool trans_BRKB_z(DisasContext *s, arg_rpr_s *a)
2931 return do_brk2(s, a, gen_helper_sve_brkb_z, gen_helper_sve_brkbs_z);
2934 static bool trans_BRKN(DisasContext *s, arg_rpr_s *a)
2936 return do_brk2(s, a, gen_helper_sve_brkn, gen_helper_sve_brkns);
2940 *** SVE Predicate Count Group
2943 static void do_cntp(DisasContext *s, TCGv_i64 val, int esz, int pn, int pg)
2945 unsigned psz = pred_full_reg_size(s);
2947 if (psz <= 8) {
2948 uint64_t psz_mask;
2950 tcg_gen_ld_i64(val, cpu_env, pred_full_reg_offset(s, pn));
2951 if (pn != pg) {
2952 TCGv_i64 g = tcg_temp_new_i64();
2953 tcg_gen_ld_i64(g, cpu_env, pred_full_reg_offset(s, pg));
2954 tcg_gen_and_i64(val, val, g);
2955 tcg_temp_free_i64(g);
2958 /* Reduce the pred_esz_masks value simply to reduce the
2959 * size of the code generated here.
2961 psz_mask = MAKE_64BIT_MASK(0, psz * 8);
2962 tcg_gen_andi_i64(val, val, pred_esz_masks[esz] & psz_mask);
2964 tcg_gen_ctpop_i64(val, val);
2965 } else {
2966 TCGv_ptr t_pn = tcg_temp_new_ptr();
2967 TCGv_ptr t_pg = tcg_temp_new_ptr();
2968 unsigned desc = 0;
2970 desc = FIELD_DP32(desc, PREDDESC, OPRSZ, psz);
2971 desc = FIELD_DP32(desc, PREDDESC, ESZ, esz);
2973 tcg_gen_addi_ptr(t_pn, cpu_env, pred_full_reg_offset(s, pn));
2974 tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg));
2976 gen_helper_sve_cntp(val, t_pn, t_pg, tcg_constant_i32(desc));
2977 tcg_temp_free_ptr(t_pn);
2978 tcg_temp_free_ptr(t_pg);
2982 static bool trans_CNTP(DisasContext *s, arg_CNTP *a)
2984 if (sve_access_check(s)) {
2985 do_cntp(s, cpu_reg(s, a->rd), a->esz, a->rn, a->pg);
2987 return true;
2990 static bool trans_INCDECP_r(DisasContext *s, arg_incdec_pred *a)
2992 if (sve_access_check(s)) {
2993 TCGv_i64 reg = cpu_reg(s, a->rd);
2994 TCGv_i64 val = tcg_temp_new_i64();
2996 do_cntp(s, val, a->esz, a->pg, a->pg);
2997 if (a->d) {
2998 tcg_gen_sub_i64(reg, reg, val);
2999 } else {
3000 tcg_gen_add_i64(reg, reg, val);
3002 tcg_temp_free_i64(val);
3004 return true;
3007 static bool trans_INCDECP_z(DisasContext *s, arg_incdec2_pred *a)
3009 if (a->esz == 0) {
3010 return false;
3012 if (sve_access_check(s)) {
3013 unsigned vsz = vec_full_reg_size(s);
3014 TCGv_i64 val = tcg_temp_new_i64();
3015 GVecGen2sFn *gvec_fn = a->d ? tcg_gen_gvec_subs : tcg_gen_gvec_adds;
3017 do_cntp(s, val, a->esz, a->pg, a->pg);
3018 gvec_fn(a->esz, vec_full_reg_offset(s, a->rd),
3019 vec_full_reg_offset(s, a->rn), val, vsz, vsz);
3021 return true;
3024 static bool trans_SINCDECP_r_32(DisasContext *s, arg_incdec_pred *a)
3026 if (sve_access_check(s)) {
3027 TCGv_i64 reg = cpu_reg(s, a->rd);
3028 TCGv_i64 val = tcg_temp_new_i64();
3030 do_cntp(s, val, a->esz, a->pg, a->pg);
3031 do_sat_addsub_32(reg, val, a->u, a->d);
3033 return true;
3036 static bool trans_SINCDECP_r_64(DisasContext *s, arg_incdec_pred *a)
3038 if (sve_access_check(s)) {
3039 TCGv_i64 reg = cpu_reg(s, a->rd);
3040 TCGv_i64 val = tcg_temp_new_i64();
3042 do_cntp(s, val, a->esz, a->pg, a->pg);
3043 do_sat_addsub_64(reg, val, a->u, a->d);
3045 return true;
3048 static bool trans_SINCDECP_z(DisasContext *s, arg_incdec2_pred *a)
3050 if (a->esz == 0) {
3051 return false;
3053 if (sve_access_check(s)) {
3054 TCGv_i64 val = tcg_temp_new_i64();
3055 do_cntp(s, val, a->esz, a->pg, a->pg);
3056 do_sat_addsub_vec(s, a->esz, a->rd, a->rn, val, a->u, a->d);
3058 return true;
3062 *** SVE Integer Compare Scalars Group
3065 static bool trans_CTERM(DisasContext *s, arg_CTERM *a)
3067 if (!sve_access_check(s)) {
3068 return true;
3071 TCGCond cond = (a->ne ? TCG_COND_NE : TCG_COND_EQ);
3072 TCGv_i64 rn = read_cpu_reg(s, a->rn, a->sf);
3073 TCGv_i64 rm = read_cpu_reg(s, a->rm, a->sf);
3074 TCGv_i64 cmp = tcg_temp_new_i64();
3076 tcg_gen_setcond_i64(cond, cmp, rn, rm);
3077 tcg_gen_extrl_i64_i32(cpu_NF, cmp);
3078 tcg_temp_free_i64(cmp);
3080 /* VF = !NF & !CF. */
3081 tcg_gen_xori_i32(cpu_VF, cpu_NF, 1);
3082 tcg_gen_andc_i32(cpu_VF, cpu_VF, cpu_CF);
3084 /* Both NF and VF actually look at bit 31. */
3085 tcg_gen_neg_i32(cpu_NF, cpu_NF);
3086 tcg_gen_neg_i32(cpu_VF, cpu_VF);
3087 return true;
3090 static bool trans_WHILE(DisasContext *s, arg_WHILE *a)
3092 TCGv_i64 op0, op1, t0, t1, tmax;
3093 TCGv_i32 t2;
3094 TCGv_ptr ptr;
3095 unsigned vsz = vec_full_reg_size(s);
3096 unsigned desc = 0;
3097 TCGCond cond;
3098 uint64_t maxval;
3099 /* Note that GE/HS has a->eq == 0 and GT/HI has a->eq == 1. */
3100 bool eq = a->eq == a->lt;
3102 /* The greater-than conditions are all SVE2. */
3103 if (!a->lt && !dc_isar_feature(aa64_sve2, s)) {
3104 return false;
3106 if (!sve_access_check(s)) {
3107 return true;
3110 op0 = read_cpu_reg(s, a->rn, 1);
3111 op1 = read_cpu_reg(s, a->rm, 1);
3113 if (!a->sf) {
3114 if (a->u) {
3115 tcg_gen_ext32u_i64(op0, op0);
3116 tcg_gen_ext32u_i64(op1, op1);
3117 } else {
3118 tcg_gen_ext32s_i64(op0, op0);
3119 tcg_gen_ext32s_i64(op1, op1);
3123 /* For the helper, compress the different conditions into a computation
3124 * of how many iterations for which the condition is true.
3126 t0 = tcg_temp_new_i64();
3127 t1 = tcg_temp_new_i64();
3129 if (a->lt) {
3130 tcg_gen_sub_i64(t0, op1, op0);
3131 if (a->u) {
3132 maxval = a->sf ? UINT64_MAX : UINT32_MAX;
3133 cond = eq ? TCG_COND_LEU : TCG_COND_LTU;
3134 } else {
3135 maxval = a->sf ? INT64_MAX : INT32_MAX;
3136 cond = eq ? TCG_COND_LE : TCG_COND_LT;
3138 } else {
3139 tcg_gen_sub_i64(t0, op0, op1);
3140 if (a->u) {
3141 maxval = 0;
3142 cond = eq ? TCG_COND_GEU : TCG_COND_GTU;
3143 } else {
3144 maxval = a->sf ? INT64_MIN : INT32_MIN;
3145 cond = eq ? TCG_COND_GE : TCG_COND_GT;
3149 tmax = tcg_constant_i64(vsz >> a->esz);
3150 if (eq) {
3151 /* Equality means one more iteration. */
3152 tcg_gen_addi_i64(t0, t0, 1);
3155 * For the less-than while, if op1 is maxval (and the only time
3156 * the addition above could overflow), then we produce an all-true
3157 * predicate by setting the count to the vector length. This is
3158 * because the pseudocode is described as an increment + compare
3159 * loop, and the maximum integer would always compare true.
3160 * Similarly, the greater-than while has the same issue with the
3161 * minimum integer due to the decrement + compare loop.
3163 tcg_gen_movi_i64(t1, maxval);
3164 tcg_gen_movcond_i64(TCG_COND_EQ, t0, op1, t1, tmax, t0);
3167 /* Bound to the maximum. */
3168 tcg_gen_umin_i64(t0, t0, tmax);
3170 /* Set the count to zero if the condition is false. */
3171 tcg_gen_movi_i64(t1, 0);
3172 tcg_gen_movcond_i64(cond, t0, op0, op1, t0, t1);
3173 tcg_temp_free_i64(t1);
3175 /* Since we're bounded, pass as a 32-bit type. */
3176 t2 = tcg_temp_new_i32();
3177 tcg_gen_extrl_i64_i32(t2, t0);
3178 tcg_temp_free_i64(t0);
3180 /* Scale elements to bits. */
3181 tcg_gen_shli_i32(t2, t2, a->esz);
3183 desc = FIELD_DP32(desc, PREDDESC, OPRSZ, vsz / 8);
3184 desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz);
3186 ptr = tcg_temp_new_ptr();
3187 tcg_gen_addi_ptr(ptr, cpu_env, pred_full_reg_offset(s, a->rd));
3189 if (a->lt) {
3190 gen_helper_sve_whilel(t2, ptr, t2, tcg_constant_i32(desc));
3191 } else {
3192 gen_helper_sve_whileg(t2, ptr, t2, tcg_constant_i32(desc));
3194 do_pred_flags(t2);
3196 tcg_temp_free_ptr(ptr);
3197 tcg_temp_free_i32(t2);
3198 return true;
3201 static bool trans_WHILE_ptr(DisasContext *s, arg_WHILE_ptr *a)
3203 TCGv_i64 op0, op1, diff, t1, tmax;
3204 TCGv_i32 t2;
3205 TCGv_ptr ptr;
3206 unsigned vsz = vec_full_reg_size(s);
3207 unsigned desc = 0;
3209 if (!dc_isar_feature(aa64_sve2, s)) {
3210 return false;
3212 if (!sve_access_check(s)) {
3213 return true;
3216 op0 = read_cpu_reg(s, a->rn, 1);
3217 op1 = read_cpu_reg(s, a->rm, 1);
3219 tmax = tcg_constant_i64(vsz);
3220 diff = tcg_temp_new_i64();
3222 if (a->rw) {
3223 /* WHILERW */
3224 /* diff = abs(op1 - op0), noting that op0/1 are unsigned. */
3225 t1 = tcg_temp_new_i64();
3226 tcg_gen_sub_i64(diff, op0, op1);
3227 tcg_gen_sub_i64(t1, op1, op0);
3228 tcg_gen_movcond_i64(TCG_COND_GEU, diff, op0, op1, diff, t1);
3229 tcg_temp_free_i64(t1);
3230 /* Round down to a multiple of ESIZE. */
3231 tcg_gen_andi_i64(diff, diff, -1 << a->esz);
3232 /* If op1 == op0, diff == 0, and the condition is always true. */
3233 tcg_gen_movcond_i64(TCG_COND_EQ, diff, op0, op1, tmax, diff);
3234 } else {
3235 /* WHILEWR */
3236 tcg_gen_sub_i64(diff, op1, op0);
3237 /* Round down to a multiple of ESIZE. */
3238 tcg_gen_andi_i64(diff, diff, -1 << a->esz);
3239 /* If op0 >= op1, diff <= 0, the condition is always true. */
3240 tcg_gen_movcond_i64(TCG_COND_GEU, diff, op0, op1, tmax, diff);
3243 /* Bound to the maximum. */
3244 tcg_gen_umin_i64(diff, diff, tmax);
3246 /* Since we're bounded, pass as a 32-bit type. */
3247 t2 = tcg_temp_new_i32();
3248 tcg_gen_extrl_i64_i32(t2, diff);
3249 tcg_temp_free_i64(diff);
3251 desc = FIELD_DP32(desc, PREDDESC, OPRSZ, vsz / 8);
3252 desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz);
3254 ptr = tcg_temp_new_ptr();
3255 tcg_gen_addi_ptr(ptr, cpu_env, pred_full_reg_offset(s, a->rd));
3257 gen_helper_sve_whilel(t2, ptr, t2, tcg_constant_i32(desc));
3258 do_pred_flags(t2);
3260 tcg_temp_free_ptr(ptr);
3261 tcg_temp_free_i32(t2);
3262 return true;
3266 *** SVE Integer Wide Immediate - Unpredicated Group
3269 static bool trans_FDUP(DisasContext *s, arg_FDUP *a)
3271 if (a->esz == 0) {
3272 return false;
3274 if (sve_access_check(s)) {
3275 unsigned vsz = vec_full_reg_size(s);
3276 int dofs = vec_full_reg_offset(s, a->rd);
3277 uint64_t imm;
3279 /* Decode the VFP immediate. */
3280 imm = vfp_expand_imm(a->esz, a->imm);
3281 tcg_gen_gvec_dup_imm(a->esz, dofs, vsz, vsz, imm);
3283 return true;
3286 static bool trans_DUP_i(DisasContext *s, arg_DUP_i *a)
3288 if (a->esz == 0 && extract32(s->insn, 13, 1)) {
3289 return false;
3291 if (sve_access_check(s)) {
3292 unsigned vsz = vec_full_reg_size(s);
3293 int dofs = vec_full_reg_offset(s, a->rd);
3295 tcg_gen_gvec_dup_imm(a->esz, dofs, vsz, vsz, a->imm);
3297 return true;
3300 static bool trans_ADD_zzi(DisasContext *s, arg_rri_esz *a)
3302 if (a->esz == 0 && extract32(s->insn, 13, 1)) {
3303 return false;
3305 return gen_gvec_fn_arg_zzi(s, tcg_gen_gvec_addi, a);
3308 static bool trans_SUB_zzi(DisasContext *s, arg_rri_esz *a)
3310 a->imm = -a->imm;
3311 return trans_ADD_zzi(s, a);
3314 static bool trans_SUBR_zzi(DisasContext *s, arg_rri_esz *a)
3316 static const TCGOpcode vecop_list[] = { INDEX_op_sub_vec, 0 };
3317 static const GVecGen2s op[4] = {
3318 { .fni8 = tcg_gen_vec_sub8_i64,
3319 .fniv = tcg_gen_sub_vec,
3320 .fno = gen_helper_sve_subri_b,
3321 .opt_opc = vecop_list,
3322 .vece = MO_8,
3323 .scalar_first = true },
3324 { .fni8 = tcg_gen_vec_sub16_i64,
3325 .fniv = tcg_gen_sub_vec,
3326 .fno = gen_helper_sve_subri_h,
3327 .opt_opc = vecop_list,
3328 .vece = MO_16,
3329 .scalar_first = true },
3330 { .fni4 = tcg_gen_sub_i32,
3331 .fniv = tcg_gen_sub_vec,
3332 .fno = gen_helper_sve_subri_s,
3333 .opt_opc = vecop_list,
3334 .vece = MO_32,
3335 .scalar_first = true },
3336 { .fni8 = tcg_gen_sub_i64,
3337 .fniv = tcg_gen_sub_vec,
3338 .fno = gen_helper_sve_subri_d,
3339 .opt_opc = vecop_list,
3340 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
3341 .vece = MO_64,
3342 .scalar_first = true }
3345 if (a->esz == 0 && extract32(s->insn, 13, 1)) {
3346 return false;
3348 if (sve_access_check(s)) {
3349 unsigned vsz = vec_full_reg_size(s);
3350 tcg_gen_gvec_2s(vec_full_reg_offset(s, a->rd),
3351 vec_full_reg_offset(s, a->rn),
3352 vsz, vsz, tcg_constant_i64(a->imm), &op[a->esz]);
3354 return true;
3357 static bool trans_MUL_zzi(DisasContext *s, arg_rri_esz *a)
3359 if (sve_access_check(s)) {
3360 unsigned vsz = vec_full_reg_size(s);
3361 tcg_gen_gvec_muli(a->esz, vec_full_reg_offset(s, a->rd),
3362 vec_full_reg_offset(s, a->rn), a->imm, vsz, vsz);
3364 return true;
3367 static bool do_zzi_sat(DisasContext *s, arg_rri_esz *a, bool u, bool d)
3369 if (a->esz == 0 && extract32(s->insn, 13, 1)) {
3370 return false;
3372 if (sve_access_check(s)) {
3373 do_sat_addsub_vec(s, a->esz, a->rd, a->rn,
3374 tcg_constant_i64(a->imm), u, d);
3376 return true;
3379 static bool trans_SQADD_zzi(DisasContext *s, arg_rri_esz *a)
3381 return do_zzi_sat(s, a, false, false);
3384 static bool trans_UQADD_zzi(DisasContext *s, arg_rri_esz *a)
3386 return do_zzi_sat(s, a, true, false);
3389 static bool trans_SQSUB_zzi(DisasContext *s, arg_rri_esz *a)
3391 return do_zzi_sat(s, a, false, true);
3394 static bool trans_UQSUB_zzi(DisasContext *s, arg_rri_esz *a)
3396 return do_zzi_sat(s, a, true, true);
3399 static bool do_zzi_ool(DisasContext *s, arg_rri_esz *a, gen_helper_gvec_2i *fn)
3401 if (sve_access_check(s)) {
3402 unsigned vsz = vec_full_reg_size(s);
3403 tcg_gen_gvec_2i_ool(vec_full_reg_offset(s, a->rd),
3404 vec_full_reg_offset(s, a->rn),
3405 tcg_constant_i64(a->imm), vsz, vsz, 0, fn);
3407 return true;
3410 #define DO_ZZI(NAME, name) \
3411 static bool trans_##NAME##_zzi(DisasContext *s, arg_rri_esz *a) \
3413 static gen_helper_gvec_2i * const fns[4] = { \
3414 gen_helper_sve_##name##i_b, gen_helper_sve_##name##i_h, \
3415 gen_helper_sve_##name##i_s, gen_helper_sve_##name##i_d, \
3416 }; \
3417 return do_zzi_ool(s, a, fns[a->esz]); \
3420 DO_ZZI(SMAX, smax)
3421 DO_ZZI(UMAX, umax)
3422 DO_ZZI(SMIN, smin)
3423 DO_ZZI(UMIN, umin)
3425 #undef DO_ZZI
3427 static gen_helper_gvec_4 * const dot_fns[2][2] = {
3428 { gen_helper_gvec_sdot_b, gen_helper_gvec_sdot_h },
3429 { gen_helper_gvec_udot_b, gen_helper_gvec_udot_h }
3431 TRANS_FEAT(DOT_zzzz, aa64_sve, gen_gvec_ool_zzzz,
3432 dot_fns[a->u][a->sz], a->rd, a->rn, a->rm, a->ra, 0)
3435 * SVE Multiply - Indexed
3438 TRANS_FEAT(SDOT_zzxw_s, aa64_sve, gen_gvec_ool_arg_zzxz,
3439 gen_helper_gvec_sdot_idx_b, a)
3440 TRANS_FEAT(SDOT_zzxw_d, aa64_sve, gen_gvec_ool_arg_zzxz,
3441 gen_helper_gvec_sdot_idx_h, a)
3442 TRANS_FEAT(UDOT_zzxw_s, aa64_sve, gen_gvec_ool_arg_zzxz,
3443 gen_helper_gvec_udot_idx_b, a)
3444 TRANS_FEAT(UDOT_zzxw_d, aa64_sve, gen_gvec_ool_arg_zzxz,
3445 gen_helper_gvec_udot_idx_h, a)
3447 TRANS_FEAT(SUDOT_zzxw_s, aa64_sve_i8mm, gen_gvec_ool_arg_zzxz,
3448 gen_helper_gvec_sudot_idx_b, a)
3449 TRANS_FEAT(USDOT_zzxw_s, aa64_sve_i8mm, gen_gvec_ool_arg_zzxz,
3450 gen_helper_gvec_usdot_idx_b, a)
3452 #define DO_SVE2_RRX(NAME, FUNC) \
3453 TRANS_FEAT(NAME, aa64_sve, gen_gvec_ool_zzz, FUNC, \
3454 a->rd, a->rn, a->rm, a->index)
3456 DO_SVE2_RRX(MUL_zzx_h, gen_helper_gvec_mul_idx_h)
3457 DO_SVE2_RRX(MUL_zzx_s, gen_helper_gvec_mul_idx_s)
3458 DO_SVE2_RRX(MUL_zzx_d, gen_helper_gvec_mul_idx_d)
3460 DO_SVE2_RRX(SQDMULH_zzx_h, gen_helper_sve2_sqdmulh_idx_h)
3461 DO_SVE2_RRX(SQDMULH_zzx_s, gen_helper_sve2_sqdmulh_idx_s)
3462 DO_SVE2_RRX(SQDMULH_zzx_d, gen_helper_sve2_sqdmulh_idx_d)
3464 DO_SVE2_RRX(SQRDMULH_zzx_h, gen_helper_sve2_sqrdmulh_idx_h)
3465 DO_SVE2_RRX(SQRDMULH_zzx_s, gen_helper_sve2_sqrdmulh_idx_s)
3466 DO_SVE2_RRX(SQRDMULH_zzx_d, gen_helper_sve2_sqrdmulh_idx_d)
3468 #undef DO_SVE2_RRX
3470 #define DO_SVE2_RRX_TB(NAME, FUNC, TOP) \
3471 TRANS_FEAT(NAME, aa64_sve, gen_gvec_ool_zzz, FUNC, \
3472 a->rd, a->rn, a->rm, (a->index << 1) | TOP)
3474 DO_SVE2_RRX_TB(SQDMULLB_zzx_s, gen_helper_sve2_sqdmull_idx_s, false)
3475 DO_SVE2_RRX_TB(SQDMULLB_zzx_d, gen_helper_sve2_sqdmull_idx_d, false)
3476 DO_SVE2_RRX_TB(SQDMULLT_zzx_s, gen_helper_sve2_sqdmull_idx_s, true)
3477 DO_SVE2_RRX_TB(SQDMULLT_zzx_d, gen_helper_sve2_sqdmull_idx_d, true)
3479 DO_SVE2_RRX_TB(SMULLB_zzx_s, gen_helper_sve2_smull_idx_s, false)
3480 DO_SVE2_RRX_TB(SMULLB_zzx_d, gen_helper_sve2_smull_idx_d, false)
3481 DO_SVE2_RRX_TB(SMULLT_zzx_s, gen_helper_sve2_smull_idx_s, true)
3482 DO_SVE2_RRX_TB(SMULLT_zzx_d, gen_helper_sve2_smull_idx_d, true)
3484 DO_SVE2_RRX_TB(UMULLB_zzx_s, gen_helper_sve2_umull_idx_s, false)
3485 DO_SVE2_RRX_TB(UMULLB_zzx_d, gen_helper_sve2_umull_idx_d, false)
3486 DO_SVE2_RRX_TB(UMULLT_zzx_s, gen_helper_sve2_umull_idx_s, true)
3487 DO_SVE2_RRX_TB(UMULLT_zzx_d, gen_helper_sve2_umull_idx_d, true)
3489 #undef DO_SVE2_RRX_TB
3491 #define DO_SVE2_RRXR(NAME, FUNC) \
3492 TRANS_FEAT(NAME, aa64_sve2, gen_gvec_ool_arg_zzxz, FUNC, a)
3494 DO_SVE2_RRXR(MLA_zzxz_h, gen_helper_gvec_mla_idx_h)
3495 DO_SVE2_RRXR(MLA_zzxz_s, gen_helper_gvec_mla_idx_s)
3496 DO_SVE2_RRXR(MLA_zzxz_d, gen_helper_gvec_mla_idx_d)
3498 DO_SVE2_RRXR(MLS_zzxz_h, gen_helper_gvec_mls_idx_h)
3499 DO_SVE2_RRXR(MLS_zzxz_s, gen_helper_gvec_mls_idx_s)
3500 DO_SVE2_RRXR(MLS_zzxz_d, gen_helper_gvec_mls_idx_d)
3502 DO_SVE2_RRXR(SQRDMLAH_zzxz_h, gen_helper_sve2_sqrdmlah_idx_h)
3503 DO_SVE2_RRXR(SQRDMLAH_zzxz_s, gen_helper_sve2_sqrdmlah_idx_s)
3504 DO_SVE2_RRXR(SQRDMLAH_zzxz_d, gen_helper_sve2_sqrdmlah_idx_d)
3506 DO_SVE2_RRXR(SQRDMLSH_zzxz_h, gen_helper_sve2_sqrdmlsh_idx_h)
3507 DO_SVE2_RRXR(SQRDMLSH_zzxz_s, gen_helper_sve2_sqrdmlsh_idx_s)
3508 DO_SVE2_RRXR(SQRDMLSH_zzxz_d, gen_helper_sve2_sqrdmlsh_idx_d)
3510 #undef DO_SVE2_RRXR
3512 #define DO_SVE2_RRXR_TB(NAME, FUNC, TOP) \
3513 TRANS_FEAT(NAME, aa64_sve2, gen_gvec_ool_zzzz, FUNC, \
3514 a->rd, a->rn, a->rm, a->ra, (a->index << 1) | TOP)
3516 DO_SVE2_RRXR_TB(SQDMLALB_zzxw_s, gen_helper_sve2_sqdmlal_idx_s, false)
3517 DO_SVE2_RRXR_TB(SQDMLALB_zzxw_d, gen_helper_sve2_sqdmlal_idx_d, false)
3518 DO_SVE2_RRXR_TB(SQDMLALT_zzxw_s, gen_helper_sve2_sqdmlal_idx_s, true)
3519 DO_SVE2_RRXR_TB(SQDMLALT_zzxw_d, gen_helper_sve2_sqdmlal_idx_d, true)
3521 DO_SVE2_RRXR_TB(SQDMLSLB_zzxw_s, gen_helper_sve2_sqdmlsl_idx_s, false)
3522 DO_SVE2_RRXR_TB(SQDMLSLB_zzxw_d, gen_helper_sve2_sqdmlsl_idx_d, false)
3523 DO_SVE2_RRXR_TB(SQDMLSLT_zzxw_s, gen_helper_sve2_sqdmlsl_idx_s, true)
3524 DO_SVE2_RRXR_TB(SQDMLSLT_zzxw_d, gen_helper_sve2_sqdmlsl_idx_d, true)
3526 DO_SVE2_RRXR_TB(SMLALB_zzxw_s, gen_helper_sve2_smlal_idx_s, false)
3527 DO_SVE2_RRXR_TB(SMLALB_zzxw_d, gen_helper_sve2_smlal_idx_d, false)
3528 DO_SVE2_RRXR_TB(SMLALT_zzxw_s, gen_helper_sve2_smlal_idx_s, true)
3529 DO_SVE2_RRXR_TB(SMLALT_zzxw_d, gen_helper_sve2_smlal_idx_d, true)
3531 DO_SVE2_RRXR_TB(UMLALB_zzxw_s, gen_helper_sve2_umlal_idx_s, false)
3532 DO_SVE2_RRXR_TB(UMLALB_zzxw_d, gen_helper_sve2_umlal_idx_d, false)
3533 DO_SVE2_RRXR_TB(UMLALT_zzxw_s, gen_helper_sve2_umlal_idx_s, true)
3534 DO_SVE2_RRXR_TB(UMLALT_zzxw_d, gen_helper_sve2_umlal_idx_d, true)
3536 DO_SVE2_RRXR_TB(SMLSLB_zzxw_s, gen_helper_sve2_smlsl_idx_s, false)
3537 DO_SVE2_RRXR_TB(SMLSLB_zzxw_d, gen_helper_sve2_smlsl_idx_d, false)
3538 DO_SVE2_RRXR_TB(SMLSLT_zzxw_s, gen_helper_sve2_smlsl_idx_s, true)
3539 DO_SVE2_RRXR_TB(SMLSLT_zzxw_d, gen_helper_sve2_smlsl_idx_d, true)
3541 DO_SVE2_RRXR_TB(UMLSLB_zzxw_s, gen_helper_sve2_umlsl_idx_s, false)
3542 DO_SVE2_RRXR_TB(UMLSLB_zzxw_d, gen_helper_sve2_umlsl_idx_d, false)
3543 DO_SVE2_RRXR_TB(UMLSLT_zzxw_s, gen_helper_sve2_umlsl_idx_s, true)
3544 DO_SVE2_RRXR_TB(UMLSLT_zzxw_d, gen_helper_sve2_umlsl_idx_d, true)
3546 #undef DO_SVE2_RRXR_TB
3548 #define DO_SVE2_RRXR_ROT(NAME, FUNC) \
3549 TRANS_FEAT(NAME, aa64_sve2, gen_gvec_ool_zzzz, FUNC, \
3550 a->rd, a->rn, a->rm, a->ra, (a->index << 2) | a->rot)
3552 DO_SVE2_RRXR_ROT(CMLA_zzxz_h, gen_helper_sve2_cmla_idx_h)
3553 DO_SVE2_RRXR_ROT(CMLA_zzxz_s, gen_helper_sve2_cmla_idx_s)
3555 DO_SVE2_RRXR_ROT(SQRDCMLAH_zzxz_h, gen_helper_sve2_sqrdcmlah_idx_h)
3556 DO_SVE2_RRXR_ROT(SQRDCMLAH_zzxz_s, gen_helper_sve2_sqrdcmlah_idx_s)
3558 DO_SVE2_RRXR_ROT(CDOT_zzxw_s, gen_helper_sve2_cdot_idx_s)
3559 DO_SVE2_RRXR_ROT(CDOT_zzxw_d, gen_helper_sve2_cdot_idx_d)
3561 #undef DO_SVE2_RRXR_ROT
3564 *** SVE Floating Point Multiply-Add Indexed Group
3567 static bool do_FMLA_zzxz(DisasContext *s, arg_rrxr_esz *a, bool sub)
3569 static gen_helper_gvec_4_ptr * const fns[3] = {
3570 gen_helper_gvec_fmla_idx_h,
3571 gen_helper_gvec_fmla_idx_s,
3572 gen_helper_gvec_fmla_idx_d,
3575 if (sve_access_check(s)) {
3576 unsigned vsz = vec_full_reg_size(s);
3577 TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
3578 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, a->rd),
3579 vec_full_reg_offset(s, a->rn),
3580 vec_full_reg_offset(s, a->rm),
3581 vec_full_reg_offset(s, a->ra),
3582 status, vsz, vsz, (a->index << 1) | sub,
3583 fns[a->esz - 1]);
3584 tcg_temp_free_ptr(status);
3586 return true;
3589 static bool trans_FMLA_zzxz(DisasContext *s, arg_FMLA_zzxz *a)
3591 return do_FMLA_zzxz(s, a, false);
3594 static bool trans_FMLS_zzxz(DisasContext *s, arg_FMLA_zzxz *a)
3596 return do_FMLA_zzxz(s, a, true);
3600 *** SVE Floating Point Multiply Indexed Group
3603 static bool trans_FMUL_zzx(DisasContext *s, arg_FMUL_zzx *a)
3605 static gen_helper_gvec_3_ptr * const fns[3] = {
3606 gen_helper_gvec_fmul_idx_h,
3607 gen_helper_gvec_fmul_idx_s,
3608 gen_helper_gvec_fmul_idx_d,
3611 if (sve_access_check(s)) {
3612 unsigned vsz = vec_full_reg_size(s);
3613 TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
3614 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, a->rd),
3615 vec_full_reg_offset(s, a->rn),
3616 vec_full_reg_offset(s, a->rm),
3617 status, vsz, vsz, a->index, fns[a->esz - 1]);
3618 tcg_temp_free_ptr(status);
3620 return true;
3624 *** SVE Floating Point Fast Reduction Group
3627 typedef void gen_helper_fp_reduce(TCGv_i64, TCGv_ptr, TCGv_ptr,
3628 TCGv_ptr, TCGv_i32);
3630 static void do_reduce(DisasContext *s, arg_rpr_esz *a,
3631 gen_helper_fp_reduce *fn)
3633 unsigned vsz = vec_full_reg_size(s);
3634 unsigned p2vsz = pow2ceil(vsz);
3635 TCGv_i32 t_desc = tcg_constant_i32(simd_desc(vsz, vsz, p2vsz));
3636 TCGv_ptr t_zn, t_pg, status;
3637 TCGv_i64 temp;
3639 temp = tcg_temp_new_i64();
3640 t_zn = tcg_temp_new_ptr();
3641 t_pg = tcg_temp_new_ptr();
3643 tcg_gen_addi_ptr(t_zn, cpu_env, vec_full_reg_offset(s, a->rn));
3644 tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, a->pg));
3645 status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
3647 fn(temp, t_zn, t_pg, status, t_desc);
3648 tcg_temp_free_ptr(t_zn);
3649 tcg_temp_free_ptr(t_pg);
3650 tcg_temp_free_ptr(status);
3652 write_fp_dreg(s, a->rd, temp);
3653 tcg_temp_free_i64(temp);
3656 #define DO_VPZ(NAME, name) \
3657 static bool trans_##NAME(DisasContext *s, arg_rpr_esz *a) \
3659 static gen_helper_fp_reduce * const fns[3] = { \
3660 gen_helper_sve_##name##_h, \
3661 gen_helper_sve_##name##_s, \
3662 gen_helper_sve_##name##_d, \
3663 }; \
3664 if (a->esz == 0) { \
3665 return false; \
3667 if (sve_access_check(s)) { \
3668 do_reduce(s, a, fns[a->esz - 1]); \
3670 return true; \
3673 DO_VPZ(FADDV, faddv)
3674 DO_VPZ(FMINNMV, fminnmv)
3675 DO_VPZ(FMAXNMV, fmaxnmv)
3676 DO_VPZ(FMINV, fminv)
3677 DO_VPZ(FMAXV, fmaxv)
3680 *** SVE Floating Point Unary Operations - Unpredicated Group
3683 static void do_zz_fp(DisasContext *s, arg_rr_esz *a, gen_helper_gvec_2_ptr *fn)
3685 unsigned vsz = vec_full_reg_size(s);
3686 TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
3688 tcg_gen_gvec_2_ptr(vec_full_reg_offset(s, a->rd),
3689 vec_full_reg_offset(s, a->rn),
3690 status, vsz, vsz, 0, fn);
3691 tcg_temp_free_ptr(status);
3694 static bool trans_FRECPE(DisasContext *s, arg_rr_esz *a)
3696 static gen_helper_gvec_2_ptr * const fns[3] = {
3697 gen_helper_gvec_frecpe_h,
3698 gen_helper_gvec_frecpe_s,
3699 gen_helper_gvec_frecpe_d,
3701 if (a->esz == 0) {
3702 return false;
3704 if (sve_access_check(s)) {
3705 do_zz_fp(s, a, fns[a->esz - 1]);
3707 return true;
3710 static bool trans_FRSQRTE(DisasContext *s, arg_rr_esz *a)
3712 static gen_helper_gvec_2_ptr * const fns[3] = {
3713 gen_helper_gvec_frsqrte_h,
3714 gen_helper_gvec_frsqrte_s,
3715 gen_helper_gvec_frsqrte_d,
3717 if (a->esz == 0) {
3718 return false;
3720 if (sve_access_check(s)) {
3721 do_zz_fp(s, a, fns[a->esz - 1]);
3723 return true;
3727 *** SVE Floating Point Compare with Zero Group
3730 static void do_ppz_fp(DisasContext *s, arg_rpr_esz *a,
3731 gen_helper_gvec_3_ptr *fn)
3733 unsigned vsz = vec_full_reg_size(s);
3734 TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
3736 tcg_gen_gvec_3_ptr(pred_full_reg_offset(s, a->rd),
3737 vec_full_reg_offset(s, a->rn),
3738 pred_full_reg_offset(s, a->pg),
3739 status, vsz, vsz, 0, fn);
3740 tcg_temp_free_ptr(status);
3743 #define DO_PPZ(NAME, name) \
3744 static bool trans_##NAME(DisasContext *s, arg_rpr_esz *a) \
3746 static gen_helper_gvec_3_ptr * const fns[3] = { \
3747 gen_helper_sve_##name##_h, \
3748 gen_helper_sve_##name##_s, \
3749 gen_helper_sve_##name##_d, \
3750 }; \
3751 if (a->esz == 0) { \
3752 return false; \
3754 if (sve_access_check(s)) { \
3755 do_ppz_fp(s, a, fns[a->esz - 1]); \
3757 return true; \
3760 DO_PPZ(FCMGE_ppz0, fcmge0)
3761 DO_PPZ(FCMGT_ppz0, fcmgt0)
3762 DO_PPZ(FCMLE_ppz0, fcmle0)
3763 DO_PPZ(FCMLT_ppz0, fcmlt0)
3764 DO_PPZ(FCMEQ_ppz0, fcmeq0)
3765 DO_PPZ(FCMNE_ppz0, fcmne0)
3767 #undef DO_PPZ
3770 *** SVE floating-point trig multiply-add coefficient
3773 static bool trans_FTMAD(DisasContext *s, arg_FTMAD *a)
3775 static gen_helper_gvec_3_ptr * const fns[3] = {
3776 gen_helper_sve_ftmad_h,
3777 gen_helper_sve_ftmad_s,
3778 gen_helper_sve_ftmad_d,
3781 if (a->esz == 0) {
3782 return false;
3784 if (sve_access_check(s)) {
3785 unsigned vsz = vec_full_reg_size(s);
3786 TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
3787 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, a->rd),
3788 vec_full_reg_offset(s, a->rn),
3789 vec_full_reg_offset(s, a->rm),
3790 status, vsz, vsz, a->imm, fns[a->esz - 1]);
3791 tcg_temp_free_ptr(status);
3793 return true;
3797 *** SVE Floating Point Accumulating Reduction Group
3800 static bool trans_FADDA(DisasContext *s, arg_rprr_esz *a)
3802 typedef void fadda_fn(TCGv_i64, TCGv_i64, TCGv_ptr,
3803 TCGv_ptr, TCGv_ptr, TCGv_i32);
3804 static fadda_fn * const fns[3] = {
3805 gen_helper_sve_fadda_h,
3806 gen_helper_sve_fadda_s,
3807 gen_helper_sve_fadda_d,
3809 unsigned vsz = vec_full_reg_size(s);
3810 TCGv_ptr t_rm, t_pg, t_fpst;
3811 TCGv_i64 t_val;
3812 TCGv_i32 t_desc;
3814 if (a->esz == 0) {
3815 return false;
3817 if (!sve_access_check(s)) {
3818 return true;
3821 t_val = load_esz(cpu_env, vec_reg_offset(s, a->rn, 0, a->esz), a->esz);
3822 t_rm = tcg_temp_new_ptr();
3823 t_pg = tcg_temp_new_ptr();
3824 tcg_gen_addi_ptr(t_rm, cpu_env, vec_full_reg_offset(s, a->rm));
3825 tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, a->pg));
3826 t_fpst = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
3827 t_desc = tcg_constant_i32(simd_desc(vsz, vsz, 0));
3829 fns[a->esz - 1](t_val, t_val, t_rm, t_pg, t_fpst, t_desc);
3831 tcg_temp_free_ptr(t_fpst);
3832 tcg_temp_free_ptr(t_pg);
3833 tcg_temp_free_ptr(t_rm);
3835 write_fp_dreg(s, a->rd, t_val);
3836 tcg_temp_free_i64(t_val);
3837 return true;
3841 *** SVE Floating Point Arithmetic - Unpredicated Group
3844 static bool do_zzz_fp(DisasContext *s, arg_rrr_esz *a,
3845 gen_helper_gvec_3_ptr *fn)
3847 if (fn == NULL) {
3848 return false;
3850 if (sve_access_check(s)) {
3851 unsigned vsz = vec_full_reg_size(s);
3852 TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
3853 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, a->rd),
3854 vec_full_reg_offset(s, a->rn),
3855 vec_full_reg_offset(s, a->rm),
3856 status, vsz, vsz, 0, fn);
3857 tcg_temp_free_ptr(status);
3859 return true;
3863 #define DO_FP3(NAME, name) \
3864 static bool trans_##NAME(DisasContext *s, arg_rrr_esz *a) \
3866 static gen_helper_gvec_3_ptr * const fns[4] = { \
3867 NULL, gen_helper_gvec_##name##_h, \
3868 gen_helper_gvec_##name##_s, gen_helper_gvec_##name##_d \
3869 }; \
3870 return do_zzz_fp(s, a, fns[a->esz]); \
3873 DO_FP3(FADD_zzz, fadd)
3874 DO_FP3(FSUB_zzz, fsub)
3875 DO_FP3(FMUL_zzz, fmul)
3876 DO_FP3(FTSMUL, ftsmul)
3877 DO_FP3(FRECPS, recps)
3878 DO_FP3(FRSQRTS, rsqrts)
3880 #undef DO_FP3
3883 *** SVE Floating Point Arithmetic - Predicated Group
3886 static bool do_zpzz_fp(DisasContext *s, arg_rprr_esz *a,
3887 gen_helper_gvec_4_ptr *fn)
3889 if (fn == NULL) {
3890 return false;
3892 if (sve_access_check(s)) {
3893 unsigned vsz = vec_full_reg_size(s);
3894 TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
3895 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, a->rd),
3896 vec_full_reg_offset(s, a->rn),
3897 vec_full_reg_offset(s, a->rm),
3898 pred_full_reg_offset(s, a->pg),
3899 status, vsz, vsz, 0, fn);
3900 tcg_temp_free_ptr(status);
3902 return true;
3905 #define DO_FP3(NAME, name) \
3906 static bool trans_##NAME(DisasContext *s, arg_rprr_esz *a) \
3908 static gen_helper_gvec_4_ptr * const fns[4] = { \
3909 NULL, gen_helper_sve_##name##_h, \
3910 gen_helper_sve_##name##_s, gen_helper_sve_##name##_d \
3911 }; \
3912 return do_zpzz_fp(s, a, fns[a->esz]); \
3915 DO_FP3(FADD_zpzz, fadd)
3916 DO_FP3(FSUB_zpzz, fsub)
3917 DO_FP3(FMUL_zpzz, fmul)
3918 DO_FP3(FMIN_zpzz, fmin)
3919 DO_FP3(FMAX_zpzz, fmax)
3920 DO_FP3(FMINNM_zpzz, fminnum)
3921 DO_FP3(FMAXNM_zpzz, fmaxnum)
3922 DO_FP3(FABD, fabd)
3923 DO_FP3(FSCALE, fscalbn)
3924 DO_FP3(FDIV, fdiv)
3925 DO_FP3(FMULX, fmulx)
3927 #undef DO_FP3
3929 typedef void gen_helper_sve_fp2scalar(TCGv_ptr, TCGv_ptr, TCGv_ptr,
3930 TCGv_i64, TCGv_ptr, TCGv_i32);
3932 static void do_fp_scalar(DisasContext *s, int zd, int zn, int pg, bool is_fp16,
3933 TCGv_i64 scalar, gen_helper_sve_fp2scalar *fn)
3935 unsigned vsz = vec_full_reg_size(s);
3936 TCGv_ptr t_zd, t_zn, t_pg, status;
3937 TCGv_i32 desc;
3939 t_zd = tcg_temp_new_ptr();
3940 t_zn = tcg_temp_new_ptr();
3941 t_pg = tcg_temp_new_ptr();
3942 tcg_gen_addi_ptr(t_zd, cpu_env, vec_full_reg_offset(s, zd));
3943 tcg_gen_addi_ptr(t_zn, cpu_env, vec_full_reg_offset(s, zn));
3944 tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg));
3946 status = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR);
3947 desc = tcg_constant_i32(simd_desc(vsz, vsz, 0));
3948 fn(t_zd, t_zn, t_pg, scalar, status, desc);
3950 tcg_temp_free_ptr(status);
3951 tcg_temp_free_ptr(t_pg);
3952 tcg_temp_free_ptr(t_zn);
3953 tcg_temp_free_ptr(t_zd);
3956 static void do_fp_imm(DisasContext *s, arg_rpri_esz *a, uint64_t imm,
3957 gen_helper_sve_fp2scalar *fn)
3959 do_fp_scalar(s, a->rd, a->rn, a->pg, a->esz == MO_16,
3960 tcg_constant_i64(imm), fn);
3963 #define DO_FP_IMM(NAME, name, const0, const1) \
3964 static bool trans_##NAME##_zpzi(DisasContext *s, arg_rpri_esz *a) \
3966 static gen_helper_sve_fp2scalar * const fns[3] = { \
3967 gen_helper_sve_##name##_h, \
3968 gen_helper_sve_##name##_s, \
3969 gen_helper_sve_##name##_d \
3970 }; \
3971 static uint64_t const val[3][2] = { \
3972 { float16_##const0, float16_##const1 }, \
3973 { float32_##const0, float32_##const1 }, \
3974 { float64_##const0, float64_##const1 }, \
3975 }; \
3976 if (a->esz == 0) { \
3977 return false; \
3979 if (sve_access_check(s)) { \
3980 do_fp_imm(s, a, val[a->esz - 1][a->imm], fns[a->esz - 1]); \
3982 return true; \
3985 DO_FP_IMM(FADD, fadds, half, one)
3986 DO_FP_IMM(FSUB, fsubs, half, one)
3987 DO_FP_IMM(FMUL, fmuls, half, two)
3988 DO_FP_IMM(FSUBR, fsubrs, half, one)
3989 DO_FP_IMM(FMAXNM, fmaxnms, zero, one)
3990 DO_FP_IMM(FMINNM, fminnms, zero, one)
3991 DO_FP_IMM(FMAX, fmaxs, zero, one)
3992 DO_FP_IMM(FMIN, fmins, zero, one)
3994 #undef DO_FP_IMM
3996 static bool do_fp_cmp(DisasContext *s, arg_rprr_esz *a,
3997 gen_helper_gvec_4_ptr *fn)
3999 if (fn == NULL) {
4000 return false;
4002 if (sve_access_check(s)) {
4003 unsigned vsz = vec_full_reg_size(s);
4004 TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
4005 tcg_gen_gvec_4_ptr(pred_full_reg_offset(s, a->rd),
4006 vec_full_reg_offset(s, a->rn),
4007 vec_full_reg_offset(s, a->rm),
4008 pred_full_reg_offset(s, a->pg),
4009 status, vsz, vsz, 0, fn);
4010 tcg_temp_free_ptr(status);
4012 return true;
4015 #define DO_FPCMP(NAME, name) \
4016 static bool trans_##NAME##_ppzz(DisasContext *s, arg_rprr_esz *a) \
4018 static gen_helper_gvec_4_ptr * const fns[4] = { \
4019 NULL, gen_helper_sve_##name##_h, \
4020 gen_helper_sve_##name##_s, gen_helper_sve_##name##_d \
4021 }; \
4022 return do_fp_cmp(s, a, fns[a->esz]); \
4025 DO_FPCMP(FCMGE, fcmge)
4026 DO_FPCMP(FCMGT, fcmgt)
4027 DO_FPCMP(FCMEQ, fcmeq)
4028 DO_FPCMP(FCMNE, fcmne)
4029 DO_FPCMP(FCMUO, fcmuo)
4030 DO_FPCMP(FACGE, facge)
4031 DO_FPCMP(FACGT, facgt)
4033 #undef DO_FPCMP
4035 static bool trans_FCADD(DisasContext *s, arg_FCADD *a)
4037 static gen_helper_gvec_4_ptr * const fns[3] = {
4038 gen_helper_sve_fcadd_h,
4039 gen_helper_sve_fcadd_s,
4040 gen_helper_sve_fcadd_d
4043 if (a->esz == 0) {
4044 return false;
4046 if (sve_access_check(s)) {
4047 unsigned vsz = vec_full_reg_size(s);
4048 TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
4049 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, a->rd),
4050 vec_full_reg_offset(s, a->rn),
4051 vec_full_reg_offset(s, a->rm),
4052 pred_full_reg_offset(s, a->pg),
4053 status, vsz, vsz, a->rot, fns[a->esz - 1]);
4054 tcg_temp_free_ptr(status);
4056 return true;
4059 static bool do_fmla(DisasContext *s, arg_rprrr_esz *a,
4060 gen_helper_gvec_5_ptr *fn)
4062 if (a->esz == 0) {
4063 return false;
4065 if (sve_access_check(s)) {
4066 unsigned vsz = vec_full_reg_size(s);
4067 TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
4068 tcg_gen_gvec_5_ptr(vec_full_reg_offset(s, a->rd),
4069 vec_full_reg_offset(s, a->rn),
4070 vec_full_reg_offset(s, a->rm),
4071 vec_full_reg_offset(s, a->ra),
4072 pred_full_reg_offset(s, a->pg),
4073 status, vsz, vsz, 0, fn);
4074 tcg_temp_free_ptr(status);
4076 return true;
4079 #define DO_FMLA(NAME, name) \
4080 static bool trans_##NAME(DisasContext *s, arg_rprrr_esz *a) \
4082 static gen_helper_gvec_5_ptr * const fns[4] = { \
4083 NULL, gen_helper_sve_##name##_h, \
4084 gen_helper_sve_##name##_s, gen_helper_sve_##name##_d \
4085 }; \
4086 return do_fmla(s, a, fns[a->esz]); \
4089 DO_FMLA(FMLA_zpzzz, fmla_zpzzz)
4090 DO_FMLA(FMLS_zpzzz, fmls_zpzzz)
4091 DO_FMLA(FNMLA_zpzzz, fnmla_zpzzz)
4092 DO_FMLA(FNMLS_zpzzz, fnmls_zpzzz)
4094 #undef DO_FMLA
4096 static bool trans_FCMLA_zpzzz(DisasContext *s, arg_FCMLA_zpzzz *a)
4098 static gen_helper_gvec_5_ptr * const fns[4] = {
4099 NULL,
4100 gen_helper_sve_fcmla_zpzzz_h,
4101 gen_helper_sve_fcmla_zpzzz_s,
4102 gen_helper_sve_fcmla_zpzzz_d,
4105 if (a->esz == 0) {
4106 return false;
4108 if (sve_access_check(s)) {
4109 unsigned vsz = vec_full_reg_size(s);
4110 TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
4111 tcg_gen_gvec_5_ptr(vec_full_reg_offset(s, a->rd),
4112 vec_full_reg_offset(s, a->rn),
4113 vec_full_reg_offset(s, a->rm),
4114 vec_full_reg_offset(s, a->ra),
4115 pred_full_reg_offset(s, a->pg),
4116 status, vsz, vsz, a->rot, fns[a->esz]);
4117 tcg_temp_free_ptr(status);
4119 return true;
4122 static bool trans_FCMLA_zzxz(DisasContext *s, arg_FCMLA_zzxz *a)
4124 static gen_helper_gvec_4_ptr * const fns[2] = {
4125 gen_helper_gvec_fcmlah_idx,
4126 gen_helper_gvec_fcmlas_idx,
4129 tcg_debug_assert(a->esz == 1 || a->esz == 2);
4130 tcg_debug_assert(a->rd == a->ra);
4131 if (sve_access_check(s)) {
4132 unsigned vsz = vec_full_reg_size(s);
4133 TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
4134 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, a->rd),
4135 vec_full_reg_offset(s, a->rn),
4136 vec_full_reg_offset(s, a->rm),
4137 vec_full_reg_offset(s, a->ra),
4138 status, vsz, vsz,
4139 a->index * 4 + a->rot,
4140 fns[a->esz - 1]);
4141 tcg_temp_free_ptr(status);
4143 return true;
4147 *** SVE Floating Point Unary Operations Predicated Group
4150 static bool do_zpz_ptr(DisasContext *s, int rd, int rn, int pg,
4151 bool is_fp16, gen_helper_gvec_3_ptr *fn)
4153 if (sve_access_check(s)) {
4154 unsigned vsz = vec_full_reg_size(s);
4155 TCGv_ptr status = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR);
4156 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
4157 vec_full_reg_offset(s, rn),
4158 pred_full_reg_offset(s, pg),
4159 status, vsz, vsz, 0, fn);
4160 tcg_temp_free_ptr(status);
4162 return true;
4165 static bool trans_FCVT_sh(DisasContext *s, arg_rpr_esz *a)
4167 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvt_sh);
4170 static bool trans_FCVT_hs(DisasContext *s, arg_rpr_esz *a)
4172 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvt_hs);
4175 static bool trans_BFCVT(DisasContext *s, arg_rpr_esz *a)
4177 if (!dc_isar_feature(aa64_sve_bf16, s)) {
4178 return false;
4180 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_bfcvt);
4183 static bool trans_FCVT_dh(DisasContext *s, arg_rpr_esz *a)
4185 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvt_dh);
4188 static bool trans_FCVT_hd(DisasContext *s, arg_rpr_esz *a)
4190 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvt_hd);
4193 static bool trans_FCVT_ds(DisasContext *s, arg_rpr_esz *a)
4195 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvt_ds);
4198 static bool trans_FCVT_sd(DisasContext *s, arg_rpr_esz *a)
4200 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvt_sd);
4203 static bool trans_FCVTZS_hh(DisasContext *s, arg_rpr_esz *a)
4205 return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_fcvtzs_hh);
4208 static bool trans_FCVTZU_hh(DisasContext *s, arg_rpr_esz *a)
4210 return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_fcvtzu_hh);
4213 static bool trans_FCVTZS_hs(DisasContext *s, arg_rpr_esz *a)
4215 return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_fcvtzs_hs);
4218 static bool trans_FCVTZU_hs(DisasContext *s, arg_rpr_esz *a)
4220 return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_fcvtzu_hs);
4223 static bool trans_FCVTZS_hd(DisasContext *s, arg_rpr_esz *a)
4225 return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_fcvtzs_hd);
4228 static bool trans_FCVTZU_hd(DisasContext *s, arg_rpr_esz *a)
4230 return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_fcvtzu_hd);
4233 static bool trans_FCVTZS_ss(DisasContext *s, arg_rpr_esz *a)
4235 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvtzs_ss);
4238 static bool trans_FCVTZU_ss(DisasContext *s, arg_rpr_esz *a)
4240 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvtzu_ss);
4243 static bool trans_FCVTZS_sd(DisasContext *s, arg_rpr_esz *a)
4245 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvtzs_sd);
4248 static bool trans_FCVTZU_sd(DisasContext *s, arg_rpr_esz *a)
4250 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvtzu_sd);
4253 static bool trans_FCVTZS_ds(DisasContext *s, arg_rpr_esz *a)
4255 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvtzs_ds);
4258 static bool trans_FCVTZU_ds(DisasContext *s, arg_rpr_esz *a)
4260 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvtzu_ds);
4263 static bool trans_FCVTZS_dd(DisasContext *s, arg_rpr_esz *a)
4265 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvtzs_dd);
4268 static bool trans_FCVTZU_dd(DisasContext *s, arg_rpr_esz *a)
4270 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvtzu_dd);
4273 static gen_helper_gvec_3_ptr * const frint_fns[3] = {
4274 gen_helper_sve_frint_h,
4275 gen_helper_sve_frint_s,
4276 gen_helper_sve_frint_d
4279 static bool trans_FRINTI(DisasContext *s, arg_rpr_esz *a)
4281 if (a->esz == 0) {
4282 return false;
4284 return do_zpz_ptr(s, a->rd, a->rn, a->pg, a->esz == MO_16,
4285 frint_fns[a->esz - 1]);
4288 static bool trans_FRINTX(DisasContext *s, arg_rpr_esz *a)
4290 static gen_helper_gvec_3_ptr * const fns[3] = {
4291 gen_helper_sve_frintx_h,
4292 gen_helper_sve_frintx_s,
4293 gen_helper_sve_frintx_d
4295 if (a->esz == 0) {
4296 return false;
4298 return do_zpz_ptr(s, a->rd, a->rn, a->pg, a->esz == MO_16, fns[a->esz - 1]);
4301 static bool do_frint_mode(DisasContext *s, arg_rpr_esz *a,
4302 int mode, gen_helper_gvec_3_ptr *fn)
4304 if (sve_access_check(s)) {
4305 unsigned vsz = vec_full_reg_size(s);
4306 TCGv_i32 tmode = tcg_const_i32(mode);
4307 TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
4309 gen_helper_set_rmode(tmode, tmode, status);
4311 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, a->rd),
4312 vec_full_reg_offset(s, a->rn),
4313 pred_full_reg_offset(s, a->pg),
4314 status, vsz, vsz, 0, fn);
4316 gen_helper_set_rmode(tmode, tmode, status);
4317 tcg_temp_free_i32(tmode);
4318 tcg_temp_free_ptr(status);
4320 return true;
4323 static bool trans_FRINTN(DisasContext *s, arg_rpr_esz *a)
4325 if (a->esz == 0) {
4326 return false;
4328 return do_frint_mode(s, a, float_round_nearest_even, frint_fns[a->esz - 1]);
4331 static bool trans_FRINTP(DisasContext *s, arg_rpr_esz *a)
4333 if (a->esz == 0) {
4334 return false;
4336 return do_frint_mode(s, a, float_round_up, frint_fns[a->esz - 1]);
4339 static bool trans_FRINTM(DisasContext *s, arg_rpr_esz *a)
4341 if (a->esz == 0) {
4342 return false;
4344 return do_frint_mode(s, a, float_round_down, frint_fns[a->esz - 1]);
4347 static bool trans_FRINTZ(DisasContext *s, arg_rpr_esz *a)
4349 if (a->esz == 0) {
4350 return false;
4352 return do_frint_mode(s, a, float_round_to_zero, frint_fns[a->esz - 1]);
4355 static bool trans_FRINTA(DisasContext *s, arg_rpr_esz *a)
4357 if (a->esz == 0) {
4358 return false;
4360 return do_frint_mode(s, a, float_round_ties_away, frint_fns[a->esz - 1]);
4363 static bool trans_FRECPX(DisasContext *s, arg_rpr_esz *a)
4365 static gen_helper_gvec_3_ptr * const fns[3] = {
4366 gen_helper_sve_frecpx_h,
4367 gen_helper_sve_frecpx_s,
4368 gen_helper_sve_frecpx_d
4370 if (a->esz == 0) {
4371 return false;
4373 return do_zpz_ptr(s, a->rd, a->rn, a->pg, a->esz == MO_16, fns[a->esz - 1]);
4376 static bool trans_FSQRT(DisasContext *s, arg_rpr_esz *a)
4378 static gen_helper_gvec_3_ptr * const fns[3] = {
4379 gen_helper_sve_fsqrt_h,
4380 gen_helper_sve_fsqrt_s,
4381 gen_helper_sve_fsqrt_d
4383 if (a->esz == 0) {
4384 return false;
4386 return do_zpz_ptr(s, a->rd, a->rn, a->pg, a->esz == MO_16, fns[a->esz - 1]);
4389 static bool trans_SCVTF_hh(DisasContext *s, arg_rpr_esz *a)
4391 return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_scvt_hh);
4394 static bool trans_SCVTF_sh(DisasContext *s, arg_rpr_esz *a)
4396 return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_scvt_sh);
4399 static bool trans_SCVTF_dh(DisasContext *s, arg_rpr_esz *a)
4401 return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_scvt_dh);
4404 static bool trans_SCVTF_ss(DisasContext *s, arg_rpr_esz *a)
4406 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_scvt_ss);
4409 static bool trans_SCVTF_ds(DisasContext *s, arg_rpr_esz *a)
4411 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_scvt_ds);
4414 static bool trans_SCVTF_sd(DisasContext *s, arg_rpr_esz *a)
4416 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_scvt_sd);
4419 static bool trans_SCVTF_dd(DisasContext *s, arg_rpr_esz *a)
4421 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_scvt_dd);
4424 static bool trans_UCVTF_hh(DisasContext *s, arg_rpr_esz *a)
4426 return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_ucvt_hh);
4429 static bool trans_UCVTF_sh(DisasContext *s, arg_rpr_esz *a)
4431 return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_ucvt_sh);
4434 static bool trans_UCVTF_dh(DisasContext *s, arg_rpr_esz *a)
4436 return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_ucvt_dh);
4439 static bool trans_UCVTF_ss(DisasContext *s, arg_rpr_esz *a)
4441 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_ucvt_ss);
4444 static bool trans_UCVTF_ds(DisasContext *s, arg_rpr_esz *a)
4446 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_ucvt_ds);
4449 static bool trans_UCVTF_sd(DisasContext *s, arg_rpr_esz *a)
4451 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_ucvt_sd);
4454 static bool trans_UCVTF_dd(DisasContext *s, arg_rpr_esz *a)
4456 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_ucvt_dd);
4460 *** SVE Memory - 32-bit Gather and Unsized Contiguous Group
4463 /* Subroutine loading a vector register at VOFS of LEN bytes.
4464 * The load should begin at the address Rn + IMM.
4467 static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
4469 int len_align = QEMU_ALIGN_DOWN(len, 8);
4470 int len_remain = len % 8;
4471 int nparts = len / 8 + ctpop8(len_remain);
4472 int midx = get_mem_index(s);
4473 TCGv_i64 dirty_addr, clean_addr, t0, t1;
4475 dirty_addr = tcg_temp_new_i64();
4476 tcg_gen_addi_i64(dirty_addr, cpu_reg_sp(s, rn), imm);
4477 clean_addr = gen_mte_checkN(s, dirty_addr, false, rn != 31, len);
4478 tcg_temp_free_i64(dirty_addr);
4481 * Note that unpredicated load/store of vector/predicate registers
4482 * are defined as a stream of bytes, which equates to little-endian
4483 * operations on larger quantities.
4484 * Attempt to keep code expansion to a minimum by limiting the
4485 * amount of unrolling done.
4487 if (nparts <= 4) {
4488 int i;
4490 t0 = tcg_temp_new_i64();
4491 for (i = 0; i < len_align; i += 8) {
4492 tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEUQ);
4493 tcg_gen_st_i64(t0, cpu_env, vofs + i);
4494 tcg_gen_addi_i64(clean_addr, clean_addr, 8);
4496 tcg_temp_free_i64(t0);
4497 } else {
4498 TCGLabel *loop = gen_new_label();
4499 TCGv_ptr tp, i = tcg_const_local_ptr(0);
4501 /* Copy the clean address into a local temp, live across the loop. */
4502 t0 = clean_addr;
4503 clean_addr = new_tmp_a64_local(s);
4504 tcg_gen_mov_i64(clean_addr, t0);
4506 gen_set_label(loop);
4508 t0 = tcg_temp_new_i64();
4509 tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEUQ);
4510 tcg_gen_addi_i64(clean_addr, clean_addr, 8);
4512 tp = tcg_temp_new_ptr();
4513 tcg_gen_add_ptr(tp, cpu_env, i);
4514 tcg_gen_addi_ptr(i, i, 8);
4515 tcg_gen_st_i64(t0, tp, vofs);
4516 tcg_temp_free_ptr(tp);
4517 tcg_temp_free_i64(t0);
4519 tcg_gen_brcondi_ptr(TCG_COND_LTU, i, len_align, loop);
4520 tcg_temp_free_ptr(i);
4524 * Predicate register loads can be any multiple of 2.
4525 * Note that we still store the entire 64-bit unit into cpu_env.
4527 if (len_remain) {
4528 t0 = tcg_temp_new_i64();
4529 switch (len_remain) {
4530 case 2:
4531 case 4:
4532 case 8:
4533 tcg_gen_qemu_ld_i64(t0, clean_addr, midx,
4534 MO_LE | ctz32(len_remain));
4535 break;
4537 case 6:
4538 t1 = tcg_temp_new_i64();
4539 tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEUL);
4540 tcg_gen_addi_i64(clean_addr, clean_addr, 4);
4541 tcg_gen_qemu_ld_i64(t1, clean_addr, midx, MO_LEUW);
4542 tcg_gen_deposit_i64(t0, t0, t1, 32, 32);
4543 tcg_temp_free_i64(t1);
4544 break;
4546 default:
4547 g_assert_not_reached();
4549 tcg_gen_st_i64(t0, cpu_env, vofs + len_align);
4550 tcg_temp_free_i64(t0);
4554 /* Similarly for stores. */
4555 static void do_str(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
4557 int len_align = QEMU_ALIGN_DOWN(len, 8);
4558 int len_remain = len % 8;
4559 int nparts = len / 8 + ctpop8(len_remain);
4560 int midx = get_mem_index(s);
4561 TCGv_i64 dirty_addr, clean_addr, t0;
4563 dirty_addr = tcg_temp_new_i64();
4564 tcg_gen_addi_i64(dirty_addr, cpu_reg_sp(s, rn), imm);
4565 clean_addr = gen_mte_checkN(s, dirty_addr, false, rn != 31, len);
4566 tcg_temp_free_i64(dirty_addr);
4568 /* Note that unpredicated load/store of vector/predicate registers
4569 * are defined as a stream of bytes, which equates to little-endian
4570 * operations on larger quantities. There is no nice way to force
4571 * a little-endian store for aarch64_be-linux-user out of line.
4573 * Attempt to keep code expansion to a minimum by limiting the
4574 * amount of unrolling done.
4576 if (nparts <= 4) {
4577 int i;
4579 t0 = tcg_temp_new_i64();
4580 for (i = 0; i < len_align; i += 8) {
4581 tcg_gen_ld_i64(t0, cpu_env, vofs + i);
4582 tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUQ);
4583 tcg_gen_addi_i64(clean_addr, clean_addr, 8);
4585 tcg_temp_free_i64(t0);
4586 } else {
4587 TCGLabel *loop = gen_new_label();
4588 TCGv_ptr tp, i = tcg_const_local_ptr(0);
4590 /* Copy the clean address into a local temp, live across the loop. */
4591 t0 = clean_addr;
4592 clean_addr = new_tmp_a64_local(s);
4593 tcg_gen_mov_i64(clean_addr, t0);
4595 gen_set_label(loop);
4597 t0 = tcg_temp_new_i64();
4598 tp = tcg_temp_new_ptr();
4599 tcg_gen_add_ptr(tp, cpu_env, i);
4600 tcg_gen_ld_i64(t0, tp, vofs);
4601 tcg_gen_addi_ptr(i, i, 8);
4602 tcg_temp_free_ptr(tp);
4604 tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUQ);
4605 tcg_gen_addi_i64(clean_addr, clean_addr, 8);
4606 tcg_temp_free_i64(t0);
4608 tcg_gen_brcondi_ptr(TCG_COND_LTU, i, len_align, loop);
4609 tcg_temp_free_ptr(i);
4612 /* Predicate register stores can be any multiple of 2. */
4613 if (len_remain) {
4614 t0 = tcg_temp_new_i64();
4615 tcg_gen_ld_i64(t0, cpu_env, vofs + len_align);
4617 switch (len_remain) {
4618 case 2:
4619 case 4:
4620 case 8:
4621 tcg_gen_qemu_st_i64(t0, clean_addr, midx,
4622 MO_LE | ctz32(len_remain));
4623 break;
4625 case 6:
4626 tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUL);
4627 tcg_gen_addi_i64(clean_addr, clean_addr, 4);
4628 tcg_gen_shri_i64(t0, t0, 32);
4629 tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUW);
4630 break;
4632 default:
4633 g_assert_not_reached();
4635 tcg_temp_free_i64(t0);
4639 static bool trans_LDR_zri(DisasContext *s, arg_rri *a)
4641 if (sve_access_check(s)) {
4642 int size = vec_full_reg_size(s);
4643 int off = vec_full_reg_offset(s, a->rd);
4644 do_ldr(s, off, size, a->rn, a->imm * size);
4646 return true;
4649 static bool trans_LDR_pri(DisasContext *s, arg_rri *a)
4651 if (sve_access_check(s)) {
4652 int size = pred_full_reg_size(s);
4653 int off = pred_full_reg_offset(s, a->rd);
4654 do_ldr(s, off, size, a->rn, a->imm * size);
4656 return true;
4659 static bool trans_STR_zri(DisasContext *s, arg_rri *a)
4661 if (sve_access_check(s)) {
4662 int size = vec_full_reg_size(s);
4663 int off = vec_full_reg_offset(s, a->rd);
4664 do_str(s, off, size, a->rn, a->imm * size);
4666 return true;
4669 static bool trans_STR_pri(DisasContext *s, arg_rri *a)
4671 if (sve_access_check(s)) {
4672 int size = pred_full_reg_size(s);
4673 int off = pred_full_reg_offset(s, a->rd);
4674 do_str(s, off, size, a->rn, a->imm * size);
4676 return true;
4680 *** SVE Memory - Contiguous Load Group
4683 /* The memory mode of the dtype. */
4684 static const MemOp dtype_mop[16] = {
4685 MO_UB, MO_UB, MO_UB, MO_UB,
4686 MO_SL, MO_UW, MO_UW, MO_UW,
4687 MO_SW, MO_SW, MO_UL, MO_UL,
4688 MO_SB, MO_SB, MO_SB, MO_UQ
4691 #define dtype_msz(x) (dtype_mop[x] & MO_SIZE)
4693 /* The vector element size of dtype. */
4694 static const uint8_t dtype_esz[16] = {
4695 0, 1, 2, 3,
4696 3, 1, 2, 3,
4697 3, 2, 2, 3,
4698 3, 2, 1, 3
4701 static void do_mem_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr,
4702 int dtype, uint32_t mte_n, bool is_write,
4703 gen_helper_gvec_mem *fn)
4705 unsigned vsz = vec_full_reg_size(s);
4706 TCGv_ptr t_pg;
4707 int desc = 0;
4710 * For e.g. LD4, there are not enough arguments to pass all 4
4711 * registers as pointers, so encode the regno into the data field.
4712 * For consistency, do this even for LD1.
4714 if (s->mte_active[0]) {
4715 int msz = dtype_msz(dtype);
4717 desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s));
4718 desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
4719 desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
4720 desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
4721 desc = FIELD_DP32(desc, MTEDESC, SIZEM1, (mte_n << msz) - 1);
4722 desc <<= SVE_MTEDESC_SHIFT;
4723 } else {
4724 addr = clean_data_tbi(s, addr);
4727 desc = simd_desc(vsz, vsz, zt | desc);
4728 t_pg = tcg_temp_new_ptr();
4730 tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg));
4731 fn(cpu_env, t_pg, addr, tcg_constant_i32(desc));
4733 tcg_temp_free_ptr(t_pg);
4736 /* Indexed by [mte][be][dtype][nreg] */
4737 static gen_helper_gvec_mem * const ldr_fns[2][2][16][4] = {
4738 { /* mte inactive, little-endian */
4739 { { gen_helper_sve_ld1bb_r, gen_helper_sve_ld2bb_r,
4740 gen_helper_sve_ld3bb_r, gen_helper_sve_ld4bb_r },
4741 { gen_helper_sve_ld1bhu_r, NULL, NULL, NULL },
4742 { gen_helper_sve_ld1bsu_r, NULL, NULL, NULL },
4743 { gen_helper_sve_ld1bdu_r, NULL, NULL, NULL },
4745 { gen_helper_sve_ld1sds_le_r, NULL, NULL, NULL },
4746 { gen_helper_sve_ld1hh_le_r, gen_helper_sve_ld2hh_le_r,
4747 gen_helper_sve_ld3hh_le_r, gen_helper_sve_ld4hh_le_r },
4748 { gen_helper_sve_ld1hsu_le_r, NULL, NULL, NULL },
4749 { gen_helper_sve_ld1hdu_le_r, NULL, NULL, NULL },
4751 { gen_helper_sve_ld1hds_le_r, NULL, NULL, NULL },
4752 { gen_helper_sve_ld1hss_le_r, NULL, NULL, NULL },
4753 { gen_helper_sve_ld1ss_le_r, gen_helper_sve_ld2ss_le_r,
4754 gen_helper_sve_ld3ss_le_r, gen_helper_sve_ld4ss_le_r },
4755 { gen_helper_sve_ld1sdu_le_r, NULL, NULL, NULL },
4757 { gen_helper_sve_ld1bds_r, NULL, NULL, NULL },
4758 { gen_helper_sve_ld1bss_r, NULL, NULL, NULL },
4759 { gen_helper_sve_ld1bhs_r, NULL, NULL, NULL },
4760 { gen_helper_sve_ld1dd_le_r, gen_helper_sve_ld2dd_le_r,
4761 gen_helper_sve_ld3dd_le_r, gen_helper_sve_ld4dd_le_r } },
4763 /* mte inactive, big-endian */
4764 { { gen_helper_sve_ld1bb_r, gen_helper_sve_ld2bb_r,
4765 gen_helper_sve_ld3bb_r, gen_helper_sve_ld4bb_r },
4766 { gen_helper_sve_ld1bhu_r, NULL, NULL, NULL },
4767 { gen_helper_sve_ld1bsu_r, NULL, NULL, NULL },
4768 { gen_helper_sve_ld1bdu_r, NULL, NULL, NULL },
4770 { gen_helper_sve_ld1sds_be_r, NULL, NULL, NULL },
4771 { gen_helper_sve_ld1hh_be_r, gen_helper_sve_ld2hh_be_r,
4772 gen_helper_sve_ld3hh_be_r, gen_helper_sve_ld4hh_be_r },
4773 { gen_helper_sve_ld1hsu_be_r, NULL, NULL, NULL },
4774 { gen_helper_sve_ld1hdu_be_r, NULL, NULL, NULL },
4776 { gen_helper_sve_ld1hds_be_r, NULL, NULL, NULL },
4777 { gen_helper_sve_ld1hss_be_r, NULL, NULL, NULL },
4778 { gen_helper_sve_ld1ss_be_r, gen_helper_sve_ld2ss_be_r,
4779 gen_helper_sve_ld3ss_be_r, gen_helper_sve_ld4ss_be_r },
4780 { gen_helper_sve_ld1sdu_be_r, NULL, NULL, NULL },
4782 { gen_helper_sve_ld1bds_r, NULL, NULL, NULL },
4783 { gen_helper_sve_ld1bss_r, NULL, NULL, NULL },
4784 { gen_helper_sve_ld1bhs_r, NULL, NULL, NULL },
4785 { gen_helper_sve_ld1dd_be_r, gen_helper_sve_ld2dd_be_r,
4786 gen_helper_sve_ld3dd_be_r, gen_helper_sve_ld4dd_be_r } } },
4788 { /* mte active, little-endian */
4789 { { gen_helper_sve_ld1bb_r_mte,
4790 gen_helper_sve_ld2bb_r_mte,
4791 gen_helper_sve_ld3bb_r_mte,
4792 gen_helper_sve_ld4bb_r_mte },
4793 { gen_helper_sve_ld1bhu_r_mte, NULL, NULL, NULL },
4794 { gen_helper_sve_ld1bsu_r_mte, NULL, NULL, NULL },
4795 { gen_helper_sve_ld1bdu_r_mte, NULL, NULL, NULL },
4797 { gen_helper_sve_ld1sds_le_r_mte, NULL, NULL, NULL },
4798 { gen_helper_sve_ld1hh_le_r_mte,
4799 gen_helper_sve_ld2hh_le_r_mte,
4800 gen_helper_sve_ld3hh_le_r_mte,
4801 gen_helper_sve_ld4hh_le_r_mte },
4802 { gen_helper_sve_ld1hsu_le_r_mte, NULL, NULL, NULL },
4803 { gen_helper_sve_ld1hdu_le_r_mte, NULL, NULL, NULL },
4805 { gen_helper_sve_ld1hds_le_r_mte, NULL, NULL, NULL },
4806 { gen_helper_sve_ld1hss_le_r_mte, NULL, NULL, NULL },
4807 { gen_helper_sve_ld1ss_le_r_mte,
4808 gen_helper_sve_ld2ss_le_r_mte,
4809 gen_helper_sve_ld3ss_le_r_mte,
4810 gen_helper_sve_ld4ss_le_r_mte },
4811 { gen_helper_sve_ld1sdu_le_r_mte, NULL, NULL, NULL },
4813 { gen_helper_sve_ld1bds_r_mte, NULL, NULL, NULL },
4814 { gen_helper_sve_ld1bss_r_mte, NULL, NULL, NULL },
4815 { gen_helper_sve_ld1bhs_r_mte, NULL, NULL, NULL },
4816 { gen_helper_sve_ld1dd_le_r_mte,
4817 gen_helper_sve_ld2dd_le_r_mte,
4818 gen_helper_sve_ld3dd_le_r_mte,
4819 gen_helper_sve_ld4dd_le_r_mte } },
4821 /* mte active, big-endian */
4822 { { gen_helper_sve_ld1bb_r_mte,
4823 gen_helper_sve_ld2bb_r_mte,
4824 gen_helper_sve_ld3bb_r_mte,
4825 gen_helper_sve_ld4bb_r_mte },
4826 { gen_helper_sve_ld1bhu_r_mte, NULL, NULL, NULL },
4827 { gen_helper_sve_ld1bsu_r_mte, NULL, NULL, NULL },
4828 { gen_helper_sve_ld1bdu_r_mte, NULL, NULL, NULL },
4830 { gen_helper_sve_ld1sds_be_r_mte, NULL, NULL, NULL },
4831 { gen_helper_sve_ld1hh_be_r_mte,
4832 gen_helper_sve_ld2hh_be_r_mte,
4833 gen_helper_sve_ld3hh_be_r_mte,
4834 gen_helper_sve_ld4hh_be_r_mte },
4835 { gen_helper_sve_ld1hsu_be_r_mte, NULL, NULL, NULL },
4836 { gen_helper_sve_ld1hdu_be_r_mte, NULL, NULL, NULL },
4838 { gen_helper_sve_ld1hds_be_r_mte, NULL, NULL, NULL },
4839 { gen_helper_sve_ld1hss_be_r_mte, NULL, NULL, NULL },
4840 { gen_helper_sve_ld1ss_be_r_mte,
4841 gen_helper_sve_ld2ss_be_r_mte,
4842 gen_helper_sve_ld3ss_be_r_mte,
4843 gen_helper_sve_ld4ss_be_r_mte },
4844 { gen_helper_sve_ld1sdu_be_r_mte, NULL, NULL, NULL },
4846 { gen_helper_sve_ld1bds_r_mte, NULL, NULL, NULL },
4847 { gen_helper_sve_ld1bss_r_mte, NULL, NULL, NULL },
4848 { gen_helper_sve_ld1bhs_r_mte, NULL, NULL, NULL },
4849 { gen_helper_sve_ld1dd_be_r_mte,
4850 gen_helper_sve_ld2dd_be_r_mte,
4851 gen_helper_sve_ld3dd_be_r_mte,
4852 gen_helper_sve_ld4dd_be_r_mte } } },
4855 static void do_ld_zpa(DisasContext *s, int zt, int pg,
4856 TCGv_i64 addr, int dtype, int nreg)
4858 gen_helper_gvec_mem *fn
4859 = ldr_fns[s->mte_active[0]][s->be_data == MO_BE][dtype][nreg];
4862 * While there are holes in the table, they are not
4863 * accessible via the instruction encoding.
4865 assert(fn != NULL);
4866 do_mem_zpa(s, zt, pg, addr, dtype, nreg, false, fn);
4869 static bool trans_LD_zprr(DisasContext *s, arg_rprr_load *a)
4871 if (a->rm == 31) {
4872 return false;
4874 if (sve_access_check(s)) {
4875 TCGv_i64 addr = new_tmp_a64(s);
4876 tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype));
4877 tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn));
4878 do_ld_zpa(s, a->rd, a->pg, addr, a->dtype, a->nreg);
4880 return true;
4883 static bool trans_LD_zpri(DisasContext *s, arg_rpri_load *a)
4885 if (sve_access_check(s)) {
4886 int vsz = vec_full_reg_size(s);
4887 int elements = vsz >> dtype_esz[a->dtype];
4888 TCGv_i64 addr = new_tmp_a64(s);
4890 tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn),
4891 (a->imm * elements * (a->nreg + 1))
4892 << dtype_msz(a->dtype));
4893 do_ld_zpa(s, a->rd, a->pg, addr, a->dtype, a->nreg);
4895 return true;
4898 static bool trans_LDFF1_zprr(DisasContext *s, arg_rprr_load *a)
4900 static gen_helper_gvec_mem * const fns[2][2][16] = {
4901 { /* mte inactive, little-endian */
4902 { gen_helper_sve_ldff1bb_r,
4903 gen_helper_sve_ldff1bhu_r,
4904 gen_helper_sve_ldff1bsu_r,
4905 gen_helper_sve_ldff1bdu_r,
4907 gen_helper_sve_ldff1sds_le_r,
4908 gen_helper_sve_ldff1hh_le_r,
4909 gen_helper_sve_ldff1hsu_le_r,
4910 gen_helper_sve_ldff1hdu_le_r,
4912 gen_helper_sve_ldff1hds_le_r,
4913 gen_helper_sve_ldff1hss_le_r,
4914 gen_helper_sve_ldff1ss_le_r,
4915 gen_helper_sve_ldff1sdu_le_r,
4917 gen_helper_sve_ldff1bds_r,
4918 gen_helper_sve_ldff1bss_r,
4919 gen_helper_sve_ldff1bhs_r,
4920 gen_helper_sve_ldff1dd_le_r },
4922 /* mte inactive, big-endian */
4923 { gen_helper_sve_ldff1bb_r,
4924 gen_helper_sve_ldff1bhu_r,
4925 gen_helper_sve_ldff1bsu_r,
4926 gen_helper_sve_ldff1bdu_r,
4928 gen_helper_sve_ldff1sds_be_r,
4929 gen_helper_sve_ldff1hh_be_r,
4930 gen_helper_sve_ldff1hsu_be_r,
4931 gen_helper_sve_ldff1hdu_be_r,
4933 gen_helper_sve_ldff1hds_be_r,
4934 gen_helper_sve_ldff1hss_be_r,
4935 gen_helper_sve_ldff1ss_be_r,
4936 gen_helper_sve_ldff1sdu_be_r,
4938 gen_helper_sve_ldff1bds_r,
4939 gen_helper_sve_ldff1bss_r,
4940 gen_helper_sve_ldff1bhs_r,
4941 gen_helper_sve_ldff1dd_be_r } },
4943 { /* mte active, little-endian */
4944 { gen_helper_sve_ldff1bb_r_mte,
4945 gen_helper_sve_ldff1bhu_r_mte,
4946 gen_helper_sve_ldff1bsu_r_mte,
4947 gen_helper_sve_ldff1bdu_r_mte,
4949 gen_helper_sve_ldff1sds_le_r_mte,
4950 gen_helper_sve_ldff1hh_le_r_mte,
4951 gen_helper_sve_ldff1hsu_le_r_mte,
4952 gen_helper_sve_ldff1hdu_le_r_mte,
4954 gen_helper_sve_ldff1hds_le_r_mte,
4955 gen_helper_sve_ldff1hss_le_r_mte,
4956 gen_helper_sve_ldff1ss_le_r_mte,
4957 gen_helper_sve_ldff1sdu_le_r_mte,
4959 gen_helper_sve_ldff1bds_r_mte,
4960 gen_helper_sve_ldff1bss_r_mte,
4961 gen_helper_sve_ldff1bhs_r_mte,
4962 gen_helper_sve_ldff1dd_le_r_mte },
4964 /* mte active, big-endian */
4965 { gen_helper_sve_ldff1bb_r_mte,
4966 gen_helper_sve_ldff1bhu_r_mte,
4967 gen_helper_sve_ldff1bsu_r_mte,
4968 gen_helper_sve_ldff1bdu_r_mte,
4970 gen_helper_sve_ldff1sds_be_r_mte,
4971 gen_helper_sve_ldff1hh_be_r_mte,
4972 gen_helper_sve_ldff1hsu_be_r_mte,
4973 gen_helper_sve_ldff1hdu_be_r_mte,
4975 gen_helper_sve_ldff1hds_be_r_mte,
4976 gen_helper_sve_ldff1hss_be_r_mte,
4977 gen_helper_sve_ldff1ss_be_r_mte,
4978 gen_helper_sve_ldff1sdu_be_r_mte,
4980 gen_helper_sve_ldff1bds_r_mte,
4981 gen_helper_sve_ldff1bss_r_mte,
4982 gen_helper_sve_ldff1bhs_r_mte,
4983 gen_helper_sve_ldff1dd_be_r_mte } },
4986 if (sve_access_check(s)) {
4987 TCGv_i64 addr = new_tmp_a64(s);
4988 tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype));
4989 tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn));
4990 do_mem_zpa(s, a->rd, a->pg, addr, a->dtype, 1, false,
4991 fns[s->mte_active[0]][s->be_data == MO_BE][a->dtype]);
4993 return true;
4996 static bool trans_LDNF1_zpri(DisasContext *s, arg_rpri_load *a)
4998 static gen_helper_gvec_mem * const fns[2][2][16] = {
4999 { /* mte inactive, little-endian */
5000 { gen_helper_sve_ldnf1bb_r,
5001 gen_helper_sve_ldnf1bhu_r,
5002 gen_helper_sve_ldnf1bsu_r,
5003 gen_helper_sve_ldnf1bdu_r,
5005 gen_helper_sve_ldnf1sds_le_r,
5006 gen_helper_sve_ldnf1hh_le_r,
5007 gen_helper_sve_ldnf1hsu_le_r,
5008 gen_helper_sve_ldnf1hdu_le_r,
5010 gen_helper_sve_ldnf1hds_le_r,
5011 gen_helper_sve_ldnf1hss_le_r,
5012 gen_helper_sve_ldnf1ss_le_r,
5013 gen_helper_sve_ldnf1sdu_le_r,
5015 gen_helper_sve_ldnf1bds_r,
5016 gen_helper_sve_ldnf1bss_r,
5017 gen_helper_sve_ldnf1bhs_r,
5018 gen_helper_sve_ldnf1dd_le_r },
5020 /* mte inactive, big-endian */
5021 { gen_helper_sve_ldnf1bb_r,
5022 gen_helper_sve_ldnf1bhu_r,
5023 gen_helper_sve_ldnf1bsu_r,
5024 gen_helper_sve_ldnf1bdu_r,
5026 gen_helper_sve_ldnf1sds_be_r,
5027 gen_helper_sve_ldnf1hh_be_r,
5028 gen_helper_sve_ldnf1hsu_be_r,
5029 gen_helper_sve_ldnf1hdu_be_r,
5031 gen_helper_sve_ldnf1hds_be_r,
5032 gen_helper_sve_ldnf1hss_be_r,
5033 gen_helper_sve_ldnf1ss_be_r,
5034 gen_helper_sve_ldnf1sdu_be_r,
5036 gen_helper_sve_ldnf1bds_r,
5037 gen_helper_sve_ldnf1bss_r,
5038 gen_helper_sve_ldnf1bhs_r,
5039 gen_helper_sve_ldnf1dd_be_r } },
5041 { /* mte inactive, little-endian */
5042 { gen_helper_sve_ldnf1bb_r_mte,
5043 gen_helper_sve_ldnf1bhu_r_mte,
5044 gen_helper_sve_ldnf1bsu_r_mte,
5045 gen_helper_sve_ldnf1bdu_r_mte,
5047 gen_helper_sve_ldnf1sds_le_r_mte,
5048 gen_helper_sve_ldnf1hh_le_r_mte,
5049 gen_helper_sve_ldnf1hsu_le_r_mte,
5050 gen_helper_sve_ldnf1hdu_le_r_mte,
5052 gen_helper_sve_ldnf1hds_le_r_mte,
5053 gen_helper_sve_ldnf1hss_le_r_mte,
5054 gen_helper_sve_ldnf1ss_le_r_mte,
5055 gen_helper_sve_ldnf1sdu_le_r_mte,
5057 gen_helper_sve_ldnf1bds_r_mte,
5058 gen_helper_sve_ldnf1bss_r_mte,
5059 gen_helper_sve_ldnf1bhs_r_mte,
5060 gen_helper_sve_ldnf1dd_le_r_mte },
5062 /* mte inactive, big-endian */
5063 { gen_helper_sve_ldnf1bb_r_mte,
5064 gen_helper_sve_ldnf1bhu_r_mte,
5065 gen_helper_sve_ldnf1bsu_r_mte,
5066 gen_helper_sve_ldnf1bdu_r_mte,
5068 gen_helper_sve_ldnf1sds_be_r_mte,
5069 gen_helper_sve_ldnf1hh_be_r_mte,
5070 gen_helper_sve_ldnf1hsu_be_r_mte,
5071 gen_helper_sve_ldnf1hdu_be_r_mte,
5073 gen_helper_sve_ldnf1hds_be_r_mte,
5074 gen_helper_sve_ldnf1hss_be_r_mte,
5075 gen_helper_sve_ldnf1ss_be_r_mte,
5076 gen_helper_sve_ldnf1sdu_be_r_mte,
5078 gen_helper_sve_ldnf1bds_r_mte,
5079 gen_helper_sve_ldnf1bss_r_mte,
5080 gen_helper_sve_ldnf1bhs_r_mte,
5081 gen_helper_sve_ldnf1dd_be_r_mte } },
5084 if (sve_access_check(s)) {
5085 int vsz = vec_full_reg_size(s);
5086 int elements = vsz >> dtype_esz[a->dtype];
5087 int off = (a->imm * elements) << dtype_msz(a->dtype);
5088 TCGv_i64 addr = new_tmp_a64(s);
5090 tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), off);
5091 do_mem_zpa(s, a->rd, a->pg, addr, a->dtype, 1, false,
5092 fns[s->mte_active[0]][s->be_data == MO_BE][a->dtype]);
5094 return true;
5097 static void do_ldrq(DisasContext *s, int zt, int pg, TCGv_i64 addr, int dtype)
5099 unsigned vsz = vec_full_reg_size(s);
5100 TCGv_ptr t_pg;
5101 int poff;
5103 /* Load the first quadword using the normal predicated load helpers. */
5104 poff = pred_full_reg_offset(s, pg);
5105 if (vsz > 16) {
5107 * Zero-extend the first 16 bits of the predicate into a temporary.
5108 * This avoids triggering an assert making sure we don't have bits
5109 * set within a predicate beyond VQ, but we have lowered VQ to 1
5110 * for this load operation.
5112 TCGv_i64 tmp = tcg_temp_new_i64();
5113 #if HOST_BIG_ENDIAN
5114 poff += 6;
5115 #endif
5116 tcg_gen_ld16u_i64(tmp, cpu_env, poff);
5118 poff = offsetof(CPUARMState, vfp.preg_tmp);
5119 tcg_gen_st_i64(tmp, cpu_env, poff);
5120 tcg_temp_free_i64(tmp);
5123 t_pg = tcg_temp_new_ptr();
5124 tcg_gen_addi_ptr(t_pg, cpu_env, poff);
5126 gen_helper_gvec_mem *fn
5127 = ldr_fns[s->mte_active[0]][s->be_data == MO_BE][dtype][0];
5128 fn(cpu_env, t_pg, addr, tcg_constant_i32(simd_desc(16, 16, zt)));
5130 tcg_temp_free_ptr(t_pg);
5132 /* Replicate that first quadword. */
5133 if (vsz > 16) {
5134 int doff = vec_full_reg_offset(s, zt);
5135 tcg_gen_gvec_dup_mem(4, doff + 16, doff, vsz - 16, vsz - 16);
5139 static bool trans_LD1RQ_zprr(DisasContext *s, arg_rprr_load *a)
5141 if (a->rm == 31) {
5142 return false;
5144 if (sve_access_check(s)) {
5145 int msz = dtype_msz(a->dtype);
5146 TCGv_i64 addr = new_tmp_a64(s);
5147 tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), msz);
5148 tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn));
5149 do_ldrq(s, a->rd, a->pg, addr, a->dtype);
5151 return true;
5154 static bool trans_LD1RQ_zpri(DisasContext *s, arg_rpri_load *a)
5156 if (sve_access_check(s)) {
5157 TCGv_i64 addr = new_tmp_a64(s);
5158 tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), a->imm * 16);
5159 do_ldrq(s, a->rd, a->pg, addr, a->dtype);
5161 return true;
5164 static void do_ldro(DisasContext *s, int zt, int pg, TCGv_i64 addr, int dtype)
5166 unsigned vsz = vec_full_reg_size(s);
5167 unsigned vsz_r32;
5168 TCGv_ptr t_pg;
5169 int poff, doff;
5171 if (vsz < 32) {
5173 * Note that this UNDEFINED check comes after CheckSVEEnabled()
5174 * in the ARM pseudocode, which is the sve_access_check() done
5175 * in our caller. We should not now return false from the caller.
5177 unallocated_encoding(s);
5178 return;
5181 /* Load the first octaword using the normal predicated load helpers. */
5183 poff = pred_full_reg_offset(s, pg);
5184 if (vsz > 32) {
5186 * Zero-extend the first 32 bits of the predicate into a temporary.
5187 * This avoids triggering an assert making sure we don't have bits
5188 * set within a predicate beyond VQ, but we have lowered VQ to 2
5189 * for this load operation.
5191 TCGv_i64 tmp = tcg_temp_new_i64();
5192 #if HOST_BIG_ENDIAN
5193 poff += 4;
5194 #endif
5195 tcg_gen_ld32u_i64(tmp, cpu_env, poff);
5197 poff = offsetof(CPUARMState, vfp.preg_tmp);
5198 tcg_gen_st_i64(tmp, cpu_env, poff);
5199 tcg_temp_free_i64(tmp);
5202 t_pg = tcg_temp_new_ptr();
5203 tcg_gen_addi_ptr(t_pg, cpu_env, poff);
5205 gen_helper_gvec_mem *fn
5206 = ldr_fns[s->mte_active[0]][s->be_data == MO_BE][dtype][0];
5207 fn(cpu_env, t_pg, addr, tcg_constant_i32(simd_desc(32, 32, zt)));
5209 tcg_temp_free_ptr(t_pg);
5212 * Replicate that first octaword.
5213 * The replication happens in units of 32; if the full vector size
5214 * is not a multiple of 32, the final bits are zeroed.
5216 doff = vec_full_reg_offset(s, zt);
5217 vsz_r32 = QEMU_ALIGN_DOWN(vsz, 32);
5218 if (vsz >= 64) {
5219 tcg_gen_gvec_dup_mem(5, doff + 32, doff, vsz_r32 - 32, vsz_r32 - 32);
5221 vsz -= vsz_r32;
5222 if (vsz) {
5223 tcg_gen_gvec_dup_imm(MO_64, doff + vsz_r32, vsz, vsz, 0);
5227 static bool trans_LD1RO_zprr(DisasContext *s, arg_rprr_load *a)
5229 if (!dc_isar_feature(aa64_sve_f64mm, s)) {
5230 return false;
5232 if (a->rm == 31) {
5233 return false;
5235 if (sve_access_check(s)) {
5236 TCGv_i64 addr = new_tmp_a64(s);
5237 tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype));
5238 tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn));
5239 do_ldro(s, a->rd, a->pg, addr, a->dtype);
5241 return true;
5244 static bool trans_LD1RO_zpri(DisasContext *s, arg_rpri_load *a)
5246 if (!dc_isar_feature(aa64_sve_f64mm, s)) {
5247 return false;
5249 if (sve_access_check(s)) {
5250 TCGv_i64 addr = new_tmp_a64(s);
5251 tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), a->imm * 32);
5252 do_ldro(s, a->rd, a->pg, addr, a->dtype);
5254 return true;
5257 /* Load and broadcast element. */
5258 static bool trans_LD1R_zpri(DisasContext *s, arg_rpri_load *a)
5260 unsigned vsz = vec_full_reg_size(s);
5261 unsigned psz = pred_full_reg_size(s);
5262 unsigned esz = dtype_esz[a->dtype];
5263 unsigned msz = dtype_msz(a->dtype);
5264 TCGLabel *over;
5265 TCGv_i64 temp, clean_addr;
5267 if (!sve_access_check(s)) {
5268 return true;
5271 over = gen_new_label();
5273 /* If the guarding predicate has no bits set, no load occurs. */
5274 if (psz <= 8) {
5275 /* Reduce the pred_esz_masks value simply to reduce the
5276 * size of the code generated here.
5278 uint64_t psz_mask = MAKE_64BIT_MASK(0, psz * 8);
5279 temp = tcg_temp_new_i64();
5280 tcg_gen_ld_i64(temp, cpu_env, pred_full_reg_offset(s, a->pg));
5281 tcg_gen_andi_i64(temp, temp, pred_esz_masks[esz] & psz_mask);
5282 tcg_gen_brcondi_i64(TCG_COND_EQ, temp, 0, over);
5283 tcg_temp_free_i64(temp);
5284 } else {
5285 TCGv_i32 t32 = tcg_temp_new_i32();
5286 find_last_active(s, t32, esz, a->pg);
5287 tcg_gen_brcondi_i32(TCG_COND_LT, t32, 0, over);
5288 tcg_temp_free_i32(t32);
5291 /* Load the data. */
5292 temp = tcg_temp_new_i64();
5293 tcg_gen_addi_i64(temp, cpu_reg_sp(s, a->rn), a->imm << msz);
5294 clean_addr = gen_mte_check1(s, temp, false, true, msz);
5296 tcg_gen_qemu_ld_i64(temp, clean_addr, get_mem_index(s),
5297 finalize_memop(s, dtype_mop[a->dtype]));
5299 /* Broadcast to *all* elements. */
5300 tcg_gen_gvec_dup_i64(esz, vec_full_reg_offset(s, a->rd),
5301 vsz, vsz, temp);
5302 tcg_temp_free_i64(temp);
5304 /* Zero the inactive elements. */
5305 gen_set_label(over);
5306 return do_movz_zpz(s, a->rd, a->rd, a->pg, esz, false);
5309 static void do_st_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr,
5310 int msz, int esz, int nreg)
5312 static gen_helper_gvec_mem * const fn_single[2][2][4][4] = {
5313 { { { gen_helper_sve_st1bb_r,
5314 gen_helper_sve_st1bh_r,
5315 gen_helper_sve_st1bs_r,
5316 gen_helper_sve_st1bd_r },
5317 { NULL,
5318 gen_helper_sve_st1hh_le_r,
5319 gen_helper_sve_st1hs_le_r,
5320 gen_helper_sve_st1hd_le_r },
5321 { NULL, NULL,
5322 gen_helper_sve_st1ss_le_r,
5323 gen_helper_sve_st1sd_le_r },
5324 { NULL, NULL, NULL,
5325 gen_helper_sve_st1dd_le_r } },
5326 { { gen_helper_sve_st1bb_r,
5327 gen_helper_sve_st1bh_r,
5328 gen_helper_sve_st1bs_r,
5329 gen_helper_sve_st1bd_r },
5330 { NULL,
5331 gen_helper_sve_st1hh_be_r,
5332 gen_helper_sve_st1hs_be_r,
5333 gen_helper_sve_st1hd_be_r },
5334 { NULL, NULL,
5335 gen_helper_sve_st1ss_be_r,
5336 gen_helper_sve_st1sd_be_r },
5337 { NULL, NULL, NULL,
5338 gen_helper_sve_st1dd_be_r } } },
5340 { { { gen_helper_sve_st1bb_r_mte,
5341 gen_helper_sve_st1bh_r_mte,
5342 gen_helper_sve_st1bs_r_mte,
5343 gen_helper_sve_st1bd_r_mte },
5344 { NULL,
5345 gen_helper_sve_st1hh_le_r_mte,
5346 gen_helper_sve_st1hs_le_r_mte,
5347 gen_helper_sve_st1hd_le_r_mte },
5348 { NULL, NULL,
5349 gen_helper_sve_st1ss_le_r_mte,
5350 gen_helper_sve_st1sd_le_r_mte },
5351 { NULL, NULL, NULL,
5352 gen_helper_sve_st1dd_le_r_mte } },
5353 { { gen_helper_sve_st1bb_r_mte,
5354 gen_helper_sve_st1bh_r_mte,
5355 gen_helper_sve_st1bs_r_mte,
5356 gen_helper_sve_st1bd_r_mte },
5357 { NULL,
5358 gen_helper_sve_st1hh_be_r_mte,
5359 gen_helper_sve_st1hs_be_r_mte,
5360 gen_helper_sve_st1hd_be_r_mte },
5361 { NULL, NULL,
5362 gen_helper_sve_st1ss_be_r_mte,
5363 gen_helper_sve_st1sd_be_r_mte },
5364 { NULL, NULL, NULL,
5365 gen_helper_sve_st1dd_be_r_mte } } },
5367 static gen_helper_gvec_mem * const fn_multiple[2][2][3][4] = {
5368 { { { gen_helper_sve_st2bb_r,
5369 gen_helper_sve_st2hh_le_r,
5370 gen_helper_sve_st2ss_le_r,
5371 gen_helper_sve_st2dd_le_r },
5372 { gen_helper_sve_st3bb_r,
5373 gen_helper_sve_st3hh_le_r,
5374 gen_helper_sve_st3ss_le_r,
5375 gen_helper_sve_st3dd_le_r },
5376 { gen_helper_sve_st4bb_r,
5377 gen_helper_sve_st4hh_le_r,
5378 gen_helper_sve_st4ss_le_r,
5379 gen_helper_sve_st4dd_le_r } },
5380 { { gen_helper_sve_st2bb_r,
5381 gen_helper_sve_st2hh_be_r,
5382 gen_helper_sve_st2ss_be_r,
5383 gen_helper_sve_st2dd_be_r },
5384 { gen_helper_sve_st3bb_r,
5385 gen_helper_sve_st3hh_be_r,
5386 gen_helper_sve_st3ss_be_r,
5387 gen_helper_sve_st3dd_be_r },
5388 { gen_helper_sve_st4bb_r,
5389 gen_helper_sve_st4hh_be_r,
5390 gen_helper_sve_st4ss_be_r,
5391 gen_helper_sve_st4dd_be_r } } },
5392 { { { gen_helper_sve_st2bb_r_mte,
5393 gen_helper_sve_st2hh_le_r_mte,
5394 gen_helper_sve_st2ss_le_r_mte,
5395 gen_helper_sve_st2dd_le_r_mte },
5396 { gen_helper_sve_st3bb_r_mte,
5397 gen_helper_sve_st3hh_le_r_mte,
5398 gen_helper_sve_st3ss_le_r_mte,
5399 gen_helper_sve_st3dd_le_r_mte },
5400 { gen_helper_sve_st4bb_r_mte,
5401 gen_helper_sve_st4hh_le_r_mte,
5402 gen_helper_sve_st4ss_le_r_mte,
5403 gen_helper_sve_st4dd_le_r_mte } },
5404 { { gen_helper_sve_st2bb_r_mte,
5405 gen_helper_sve_st2hh_be_r_mte,
5406 gen_helper_sve_st2ss_be_r_mte,
5407 gen_helper_sve_st2dd_be_r_mte },
5408 { gen_helper_sve_st3bb_r_mte,
5409 gen_helper_sve_st3hh_be_r_mte,
5410 gen_helper_sve_st3ss_be_r_mte,
5411 gen_helper_sve_st3dd_be_r_mte },
5412 { gen_helper_sve_st4bb_r_mte,
5413 gen_helper_sve_st4hh_be_r_mte,
5414 gen_helper_sve_st4ss_be_r_mte,
5415 gen_helper_sve_st4dd_be_r_mte } } },
5417 gen_helper_gvec_mem *fn;
5418 int be = s->be_data == MO_BE;
5420 if (nreg == 0) {
5421 /* ST1 */
5422 fn = fn_single[s->mte_active[0]][be][msz][esz];
5423 nreg = 1;
5424 } else {
5425 /* ST2, ST3, ST4 -- msz == esz, enforced by encoding */
5426 assert(msz == esz);
5427 fn = fn_multiple[s->mte_active[0]][be][nreg - 1][msz];
5429 assert(fn != NULL);
5430 do_mem_zpa(s, zt, pg, addr, msz_dtype(s, msz), nreg, true, fn);
5433 static bool trans_ST_zprr(DisasContext *s, arg_rprr_store *a)
5435 if (a->rm == 31 || a->msz > a->esz) {
5436 return false;
5438 if (sve_access_check(s)) {
5439 TCGv_i64 addr = new_tmp_a64(s);
5440 tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), a->msz);
5441 tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn));
5442 do_st_zpa(s, a->rd, a->pg, addr, a->msz, a->esz, a->nreg);
5444 return true;
5447 static bool trans_ST_zpri(DisasContext *s, arg_rpri_store *a)
5449 if (a->msz > a->esz) {
5450 return false;
5452 if (sve_access_check(s)) {
5453 int vsz = vec_full_reg_size(s);
5454 int elements = vsz >> a->esz;
5455 TCGv_i64 addr = new_tmp_a64(s);
5457 tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn),
5458 (a->imm * elements * (a->nreg + 1)) << a->msz);
5459 do_st_zpa(s, a->rd, a->pg, addr, a->msz, a->esz, a->nreg);
5461 return true;
5465 *** SVE gather loads / scatter stores
5468 static void do_mem_zpz(DisasContext *s, int zt, int pg, int zm,
5469 int scale, TCGv_i64 scalar, int msz, bool is_write,
5470 gen_helper_gvec_mem_scatter *fn)
5472 unsigned vsz = vec_full_reg_size(s);
5473 TCGv_ptr t_zm = tcg_temp_new_ptr();
5474 TCGv_ptr t_pg = tcg_temp_new_ptr();
5475 TCGv_ptr t_zt = tcg_temp_new_ptr();
5476 int desc = 0;
5478 if (s->mte_active[0]) {
5479 desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s));
5480 desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
5481 desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
5482 desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
5483 desc = FIELD_DP32(desc, MTEDESC, SIZEM1, (1 << msz) - 1);
5484 desc <<= SVE_MTEDESC_SHIFT;
5486 desc = simd_desc(vsz, vsz, desc | scale);
5488 tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg));
5489 tcg_gen_addi_ptr(t_zm, cpu_env, vec_full_reg_offset(s, zm));
5490 tcg_gen_addi_ptr(t_zt, cpu_env, vec_full_reg_offset(s, zt));
5491 fn(cpu_env, t_zt, t_pg, t_zm, scalar, tcg_constant_i32(desc));
5493 tcg_temp_free_ptr(t_zt);
5494 tcg_temp_free_ptr(t_zm);
5495 tcg_temp_free_ptr(t_pg);
5498 /* Indexed by [mte][be][ff][xs][u][msz]. */
5499 static gen_helper_gvec_mem_scatter * const
5500 gather_load_fn32[2][2][2][2][2][3] = {
5501 { /* MTE Inactive */
5502 { /* Little-endian */
5503 { { { gen_helper_sve_ldbss_zsu,
5504 gen_helper_sve_ldhss_le_zsu,
5505 NULL, },
5506 { gen_helper_sve_ldbsu_zsu,
5507 gen_helper_sve_ldhsu_le_zsu,
5508 gen_helper_sve_ldss_le_zsu, } },
5509 { { gen_helper_sve_ldbss_zss,
5510 gen_helper_sve_ldhss_le_zss,
5511 NULL, },
5512 { gen_helper_sve_ldbsu_zss,
5513 gen_helper_sve_ldhsu_le_zss,
5514 gen_helper_sve_ldss_le_zss, } } },
5516 /* First-fault */
5517 { { { gen_helper_sve_ldffbss_zsu,
5518 gen_helper_sve_ldffhss_le_zsu,
5519 NULL, },
5520 { gen_helper_sve_ldffbsu_zsu,
5521 gen_helper_sve_ldffhsu_le_zsu,
5522 gen_helper_sve_ldffss_le_zsu, } },
5523 { { gen_helper_sve_ldffbss_zss,
5524 gen_helper_sve_ldffhss_le_zss,
5525 NULL, },
5526 { gen_helper_sve_ldffbsu_zss,
5527 gen_helper_sve_ldffhsu_le_zss,
5528 gen_helper_sve_ldffss_le_zss, } } } },
5530 { /* Big-endian */
5531 { { { gen_helper_sve_ldbss_zsu,
5532 gen_helper_sve_ldhss_be_zsu,
5533 NULL, },
5534 { gen_helper_sve_ldbsu_zsu,
5535 gen_helper_sve_ldhsu_be_zsu,
5536 gen_helper_sve_ldss_be_zsu, } },
5537 { { gen_helper_sve_ldbss_zss,
5538 gen_helper_sve_ldhss_be_zss,
5539 NULL, },
5540 { gen_helper_sve_ldbsu_zss,
5541 gen_helper_sve_ldhsu_be_zss,
5542 gen_helper_sve_ldss_be_zss, } } },
5544 /* First-fault */
5545 { { { gen_helper_sve_ldffbss_zsu,
5546 gen_helper_sve_ldffhss_be_zsu,
5547 NULL, },
5548 { gen_helper_sve_ldffbsu_zsu,
5549 gen_helper_sve_ldffhsu_be_zsu,
5550 gen_helper_sve_ldffss_be_zsu, } },
5551 { { gen_helper_sve_ldffbss_zss,
5552 gen_helper_sve_ldffhss_be_zss,
5553 NULL, },
5554 { gen_helper_sve_ldffbsu_zss,
5555 gen_helper_sve_ldffhsu_be_zss,
5556 gen_helper_sve_ldffss_be_zss, } } } } },
5557 { /* MTE Active */
5558 { /* Little-endian */
5559 { { { gen_helper_sve_ldbss_zsu_mte,
5560 gen_helper_sve_ldhss_le_zsu_mte,
5561 NULL, },
5562 { gen_helper_sve_ldbsu_zsu_mte,
5563 gen_helper_sve_ldhsu_le_zsu_mte,
5564 gen_helper_sve_ldss_le_zsu_mte, } },
5565 { { gen_helper_sve_ldbss_zss_mte,
5566 gen_helper_sve_ldhss_le_zss_mte,
5567 NULL, },
5568 { gen_helper_sve_ldbsu_zss_mte,
5569 gen_helper_sve_ldhsu_le_zss_mte,
5570 gen_helper_sve_ldss_le_zss_mte, } } },
5572 /* First-fault */
5573 { { { gen_helper_sve_ldffbss_zsu_mte,
5574 gen_helper_sve_ldffhss_le_zsu_mte,
5575 NULL, },
5576 { gen_helper_sve_ldffbsu_zsu_mte,
5577 gen_helper_sve_ldffhsu_le_zsu_mte,
5578 gen_helper_sve_ldffss_le_zsu_mte, } },
5579 { { gen_helper_sve_ldffbss_zss_mte,
5580 gen_helper_sve_ldffhss_le_zss_mte,
5581 NULL, },
5582 { gen_helper_sve_ldffbsu_zss_mte,
5583 gen_helper_sve_ldffhsu_le_zss_mte,
5584 gen_helper_sve_ldffss_le_zss_mte, } } } },
5586 { /* Big-endian */
5587 { { { gen_helper_sve_ldbss_zsu_mte,
5588 gen_helper_sve_ldhss_be_zsu_mte,
5589 NULL, },
5590 { gen_helper_sve_ldbsu_zsu_mte,
5591 gen_helper_sve_ldhsu_be_zsu_mte,
5592 gen_helper_sve_ldss_be_zsu_mte, } },
5593 { { gen_helper_sve_ldbss_zss_mte,
5594 gen_helper_sve_ldhss_be_zss_mte,
5595 NULL, },
5596 { gen_helper_sve_ldbsu_zss_mte,
5597 gen_helper_sve_ldhsu_be_zss_mte,
5598 gen_helper_sve_ldss_be_zss_mte, } } },
5600 /* First-fault */
5601 { { { gen_helper_sve_ldffbss_zsu_mte,
5602 gen_helper_sve_ldffhss_be_zsu_mte,
5603 NULL, },
5604 { gen_helper_sve_ldffbsu_zsu_mte,
5605 gen_helper_sve_ldffhsu_be_zsu_mte,
5606 gen_helper_sve_ldffss_be_zsu_mte, } },
5607 { { gen_helper_sve_ldffbss_zss_mte,
5608 gen_helper_sve_ldffhss_be_zss_mte,
5609 NULL, },
5610 { gen_helper_sve_ldffbsu_zss_mte,
5611 gen_helper_sve_ldffhsu_be_zss_mte,
5612 gen_helper_sve_ldffss_be_zss_mte, } } } } },
5615 /* Note that we overload xs=2 to indicate 64-bit offset. */
5616 static gen_helper_gvec_mem_scatter * const
5617 gather_load_fn64[2][2][2][3][2][4] = {
5618 { /* MTE Inactive */
5619 { /* Little-endian */
5620 { { { gen_helper_sve_ldbds_zsu,
5621 gen_helper_sve_ldhds_le_zsu,
5622 gen_helper_sve_ldsds_le_zsu,
5623 NULL, },
5624 { gen_helper_sve_ldbdu_zsu,
5625 gen_helper_sve_ldhdu_le_zsu,
5626 gen_helper_sve_ldsdu_le_zsu,
5627 gen_helper_sve_lddd_le_zsu, } },
5628 { { gen_helper_sve_ldbds_zss,
5629 gen_helper_sve_ldhds_le_zss,
5630 gen_helper_sve_ldsds_le_zss,
5631 NULL, },
5632 { gen_helper_sve_ldbdu_zss,
5633 gen_helper_sve_ldhdu_le_zss,
5634 gen_helper_sve_ldsdu_le_zss,
5635 gen_helper_sve_lddd_le_zss, } },
5636 { { gen_helper_sve_ldbds_zd,
5637 gen_helper_sve_ldhds_le_zd,
5638 gen_helper_sve_ldsds_le_zd,
5639 NULL, },
5640 { gen_helper_sve_ldbdu_zd,
5641 gen_helper_sve_ldhdu_le_zd,
5642 gen_helper_sve_ldsdu_le_zd,
5643 gen_helper_sve_lddd_le_zd, } } },
5645 /* First-fault */
5646 { { { gen_helper_sve_ldffbds_zsu,
5647 gen_helper_sve_ldffhds_le_zsu,
5648 gen_helper_sve_ldffsds_le_zsu,
5649 NULL, },
5650 { gen_helper_sve_ldffbdu_zsu,
5651 gen_helper_sve_ldffhdu_le_zsu,
5652 gen_helper_sve_ldffsdu_le_zsu,
5653 gen_helper_sve_ldffdd_le_zsu, } },
5654 { { gen_helper_sve_ldffbds_zss,
5655 gen_helper_sve_ldffhds_le_zss,
5656 gen_helper_sve_ldffsds_le_zss,
5657 NULL, },
5658 { gen_helper_sve_ldffbdu_zss,
5659 gen_helper_sve_ldffhdu_le_zss,
5660 gen_helper_sve_ldffsdu_le_zss,
5661 gen_helper_sve_ldffdd_le_zss, } },
5662 { { gen_helper_sve_ldffbds_zd,
5663 gen_helper_sve_ldffhds_le_zd,
5664 gen_helper_sve_ldffsds_le_zd,
5665 NULL, },
5666 { gen_helper_sve_ldffbdu_zd,
5667 gen_helper_sve_ldffhdu_le_zd,
5668 gen_helper_sve_ldffsdu_le_zd,
5669 gen_helper_sve_ldffdd_le_zd, } } } },
5670 { /* Big-endian */
5671 { { { gen_helper_sve_ldbds_zsu,
5672 gen_helper_sve_ldhds_be_zsu,
5673 gen_helper_sve_ldsds_be_zsu,
5674 NULL, },
5675 { gen_helper_sve_ldbdu_zsu,
5676 gen_helper_sve_ldhdu_be_zsu,
5677 gen_helper_sve_ldsdu_be_zsu,
5678 gen_helper_sve_lddd_be_zsu, } },
5679 { { gen_helper_sve_ldbds_zss,
5680 gen_helper_sve_ldhds_be_zss,
5681 gen_helper_sve_ldsds_be_zss,
5682 NULL, },
5683 { gen_helper_sve_ldbdu_zss,
5684 gen_helper_sve_ldhdu_be_zss,
5685 gen_helper_sve_ldsdu_be_zss,
5686 gen_helper_sve_lddd_be_zss, } },
5687 { { gen_helper_sve_ldbds_zd,
5688 gen_helper_sve_ldhds_be_zd,
5689 gen_helper_sve_ldsds_be_zd,
5690 NULL, },
5691 { gen_helper_sve_ldbdu_zd,
5692 gen_helper_sve_ldhdu_be_zd,
5693 gen_helper_sve_ldsdu_be_zd,
5694 gen_helper_sve_lddd_be_zd, } } },
5696 /* First-fault */
5697 { { { gen_helper_sve_ldffbds_zsu,
5698 gen_helper_sve_ldffhds_be_zsu,
5699 gen_helper_sve_ldffsds_be_zsu,
5700 NULL, },
5701 { gen_helper_sve_ldffbdu_zsu,
5702 gen_helper_sve_ldffhdu_be_zsu,
5703 gen_helper_sve_ldffsdu_be_zsu,
5704 gen_helper_sve_ldffdd_be_zsu, } },
5705 { { gen_helper_sve_ldffbds_zss,
5706 gen_helper_sve_ldffhds_be_zss,
5707 gen_helper_sve_ldffsds_be_zss,
5708 NULL, },
5709 { gen_helper_sve_ldffbdu_zss,
5710 gen_helper_sve_ldffhdu_be_zss,
5711 gen_helper_sve_ldffsdu_be_zss,
5712 gen_helper_sve_ldffdd_be_zss, } },
5713 { { gen_helper_sve_ldffbds_zd,
5714 gen_helper_sve_ldffhds_be_zd,
5715 gen_helper_sve_ldffsds_be_zd,
5716 NULL, },
5717 { gen_helper_sve_ldffbdu_zd,
5718 gen_helper_sve_ldffhdu_be_zd,
5719 gen_helper_sve_ldffsdu_be_zd,
5720 gen_helper_sve_ldffdd_be_zd, } } } } },
5721 { /* MTE Active */
5722 { /* Little-endian */
5723 { { { gen_helper_sve_ldbds_zsu_mte,
5724 gen_helper_sve_ldhds_le_zsu_mte,
5725 gen_helper_sve_ldsds_le_zsu_mte,
5726 NULL, },
5727 { gen_helper_sve_ldbdu_zsu_mte,
5728 gen_helper_sve_ldhdu_le_zsu_mte,
5729 gen_helper_sve_ldsdu_le_zsu_mte,
5730 gen_helper_sve_lddd_le_zsu_mte, } },
5731 { { gen_helper_sve_ldbds_zss_mte,
5732 gen_helper_sve_ldhds_le_zss_mte,
5733 gen_helper_sve_ldsds_le_zss_mte,
5734 NULL, },
5735 { gen_helper_sve_ldbdu_zss_mte,
5736 gen_helper_sve_ldhdu_le_zss_mte,
5737 gen_helper_sve_ldsdu_le_zss_mte,
5738 gen_helper_sve_lddd_le_zss_mte, } },
5739 { { gen_helper_sve_ldbds_zd_mte,
5740 gen_helper_sve_ldhds_le_zd_mte,
5741 gen_helper_sve_ldsds_le_zd_mte,
5742 NULL, },
5743 { gen_helper_sve_ldbdu_zd_mte,
5744 gen_helper_sve_ldhdu_le_zd_mte,
5745 gen_helper_sve_ldsdu_le_zd_mte,
5746 gen_helper_sve_lddd_le_zd_mte, } } },
5748 /* First-fault */
5749 { { { gen_helper_sve_ldffbds_zsu_mte,
5750 gen_helper_sve_ldffhds_le_zsu_mte,
5751 gen_helper_sve_ldffsds_le_zsu_mte,
5752 NULL, },
5753 { gen_helper_sve_ldffbdu_zsu_mte,
5754 gen_helper_sve_ldffhdu_le_zsu_mte,
5755 gen_helper_sve_ldffsdu_le_zsu_mte,
5756 gen_helper_sve_ldffdd_le_zsu_mte, } },
5757 { { gen_helper_sve_ldffbds_zss_mte,
5758 gen_helper_sve_ldffhds_le_zss_mte,
5759 gen_helper_sve_ldffsds_le_zss_mte,
5760 NULL, },
5761 { gen_helper_sve_ldffbdu_zss_mte,
5762 gen_helper_sve_ldffhdu_le_zss_mte,
5763 gen_helper_sve_ldffsdu_le_zss_mte,
5764 gen_helper_sve_ldffdd_le_zss_mte, } },
5765 { { gen_helper_sve_ldffbds_zd_mte,
5766 gen_helper_sve_ldffhds_le_zd_mte,
5767 gen_helper_sve_ldffsds_le_zd_mte,
5768 NULL, },
5769 { gen_helper_sve_ldffbdu_zd_mte,
5770 gen_helper_sve_ldffhdu_le_zd_mte,
5771 gen_helper_sve_ldffsdu_le_zd_mte,
5772 gen_helper_sve_ldffdd_le_zd_mte, } } } },
5773 { /* Big-endian */
5774 { { { gen_helper_sve_ldbds_zsu_mte,
5775 gen_helper_sve_ldhds_be_zsu_mte,
5776 gen_helper_sve_ldsds_be_zsu_mte,
5777 NULL, },
5778 { gen_helper_sve_ldbdu_zsu_mte,
5779 gen_helper_sve_ldhdu_be_zsu_mte,
5780 gen_helper_sve_ldsdu_be_zsu_mte,
5781 gen_helper_sve_lddd_be_zsu_mte, } },
5782 { { gen_helper_sve_ldbds_zss_mte,
5783 gen_helper_sve_ldhds_be_zss_mte,
5784 gen_helper_sve_ldsds_be_zss_mte,
5785 NULL, },
5786 { gen_helper_sve_ldbdu_zss_mte,
5787 gen_helper_sve_ldhdu_be_zss_mte,
5788 gen_helper_sve_ldsdu_be_zss_mte,
5789 gen_helper_sve_lddd_be_zss_mte, } },
5790 { { gen_helper_sve_ldbds_zd_mte,
5791 gen_helper_sve_ldhds_be_zd_mte,
5792 gen_helper_sve_ldsds_be_zd_mte,
5793 NULL, },
5794 { gen_helper_sve_ldbdu_zd_mte,
5795 gen_helper_sve_ldhdu_be_zd_mte,
5796 gen_helper_sve_ldsdu_be_zd_mte,
5797 gen_helper_sve_lddd_be_zd_mte, } } },
5799 /* First-fault */
5800 { { { gen_helper_sve_ldffbds_zsu_mte,
5801 gen_helper_sve_ldffhds_be_zsu_mte,
5802 gen_helper_sve_ldffsds_be_zsu_mte,
5803 NULL, },
5804 { gen_helper_sve_ldffbdu_zsu_mte,
5805 gen_helper_sve_ldffhdu_be_zsu_mte,
5806 gen_helper_sve_ldffsdu_be_zsu_mte,
5807 gen_helper_sve_ldffdd_be_zsu_mte, } },
5808 { { gen_helper_sve_ldffbds_zss_mte,
5809 gen_helper_sve_ldffhds_be_zss_mte,
5810 gen_helper_sve_ldffsds_be_zss_mte,
5811 NULL, },
5812 { gen_helper_sve_ldffbdu_zss_mte,
5813 gen_helper_sve_ldffhdu_be_zss_mte,
5814 gen_helper_sve_ldffsdu_be_zss_mte,
5815 gen_helper_sve_ldffdd_be_zss_mte, } },
5816 { { gen_helper_sve_ldffbds_zd_mte,
5817 gen_helper_sve_ldffhds_be_zd_mte,
5818 gen_helper_sve_ldffsds_be_zd_mte,
5819 NULL, },
5820 { gen_helper_sve_ldffbdu_zd_mte,
5821 gen_helper_sve_ldffhdu_be_zd_mte,
5822 gen_helper_sve_ldffsdu_be_zd_mte,
5823 gen_helper_sve_ldffdd_be_zd_mte, } } } } },
5826 static bool trans_LD1_zprz(DisasContext *s, arg_LD1_zprz *a)
5828 gen_helper_gvec_mem_scatter *fn = NULL;
5829 bool be = s->be_data == MO_BE;
5830 bool mte = s->mte_active[0];
5832 if (!sve_access_check(s)) {
5833 return true;
5836 switch (a->esz) {
5837 case MO_32:
5838 fn = gather_load_fn32[mte][be][a->ff][a->xs][a->u][a->msz];
5839 break;
5840 case MO_64:
5841 fn = gather_load_fn64[mte][be][a->ff][a->xs][a->u][a->msz];
5842 break;
5844 assert(fn != NULL);
5846 do_mem_zpz(s, a->rd, a->pg, a->rm, a->scale * a->msz,
5847 cpu_reg_sp(s, a->rn), a->msz, false, fn);
5848 return true;
5851 static bool trans_LD1_zpiz(DisasContext *s, arg_LD1_zpiz *a)
5853 gen_helper_gvec_mem_scatter *fn = NULL;
5854 bool be = s->be_data == MO_BE;
5855 bool mte = s->mte_active[0];
5857 if (a->esz < a->msz || (a->esz == a->msz && !a->u)) {
5858 return false;
5860 if (!sve_access_check(s)) {
5861 return true;
5864 switch (a->esz) {
5865 case MO_32:
5866 fn = gather_load_fn32[mte][be][a->ff][0][a->u][a->msz];
5867 break;
5868 case MO_64:
5869 fn = gather_load_fn64[mte][be][a->ff][2][a->u][a->msz];
5870 break;
5872 assert(fn != NULL);
5874 /* Treat LD1_zpiz (zn[x] + imm) the same way as LD1_zprz (rn + zm[x])
5875 * by loading the immediate into the scalar parameter.
5877 do_mem_zpz(s, a->rd, a->pg, a->rn, 0,
5878 tcg_constant_i64(a->imm << a->msz), a->msz, false, fn);
5879 return true;
5882 static bool trans_LDNT1_zprz(DisasContext *s, arg_LD1_zprz *a)
5884 gen_helper_gvec_mem_scatter *fn = NULL;
5885 bool be = s->be_data == MO_BE;
5886 bool mte = s->mte_active[0];
5888 if (a->esz < a->msz + !a->u) {
5889 return false;
5891 if (!dc_isar_feature(aa64_sve2, s)) {
5892 return false;
5894 if (!sve_access_check(s)) {
5895 return true;
5898 switch (a->esz) {
5899 case MO_32:
5900 fn = gather_load_fn32[mte][be][0][0][a->u][a->msz];
5901 break;
5902 case MO_64:
5903 fn = gather_load_fn64[mte][be][0][2][a->u][a->msz];
5904 break;
5906 assert(fn != NULL);
5908 do_mem_zpz(s, a->rd, a->pg, a->rn, 0,
5909 cpu_reg(s, a->rm), a->msz, false, fn);
5910 return true;
5913 /* Indexed by [mte][be][xs][msz]. */
5914 static gen_helper_gvec_mem_scatter * const scatter_store_fn32[2][2][2][3] = {
5915 { /* MTE Inactive */
5916 { /* Little-endian */
5917 { gen_helper_sve_stbs_zsu,
5918 gen_helper_sve_sths_le_zsu,
5919 gen_helper_sve_stss_le_zsu, },
5920 { gen_helper_sve_stbs_zss,
5921 gen_helper_sve_sths_le_zss,
5922 gen_helper_sve_stss_le_zss, } },
5923 { /* Big-endian */
5924 { gen_helper_sve_stbs_zsu,
5925 gen_helper_sve_sths_be_zsu,
5926 gen_helper_sve_stss_be_zsu, },
5927 { gen_helper_sve_stbs_zss,
5928 gen_helper_sve_sths_be_zss,
5929 gen_helper_sve_stss_be_zss, } } },
5930 { /* MTE Active */
5931 { /* Little-endian */
5932 { gen_helper_sve_stbs_zsu_mte,
5933 gen_helper_sve_sths_le_zsu_mte,
5934 gen_helper_sve_stss_le_zsu_mte, },
5935 { gen_helper_sve_stbs_zss_mte,
5936 gen_helper_sve_sths_le_zss_mte,
5937 gen_helper_sve_stss_le_zss_mte, } },
5938 { /* Big-endian */
5939 { gen_helper_sve_stbs_zsu_mte,
5940 gen_helper_sve_sths_be_zsu_mte,
5941 gen_helper_sve_stss_be_zsu_mte, },
5942 { gen_helper_sve_stbs_zss_mte,
5943 gen_helper_sve_sths_be_zss_mte,
5944 gen_helper_sve_stss_be_zss_mte, } } },
5947 /* Note that we overload xs=2 to indicate 64-bit offset. */
5948 static gen_helper_gvec_mem_scatter * const scatter_store_fn64[2][2][3][4] = {
5949 { /* MTE Inactive */
5950 { /* Little-endian */
5951 { gen_helper_sve_stbd_zsu,
5952 gen_helper_sve_sthd_le_zsu,
5953 gen_helper_sve_stsd_le_zsu,
5954 gen_helper_sve_stdd_le_zsu, },
5955 { gen_helper_sve_stbd_zss,
5956 gen_helper_sve_sthd_le_zss,
5957 gen_helper_sve_stsd_le_zss,
5958 gen_helper_sve_stdd_le_zss, },
5959 { gen_helper_sve_stbd_zd,
5960 gen_helper_sve_sthd_le_zd,
5961 gen_helper_sve_stsd_le_zd,
5962 gen_helper_sve_stdd_le_zd, } },
5963 { /* Big-endian */
5964 { gen_helper_sve_stbd_zsu,
5965 gen_helper_sve_sthd_be_zsu,
5966 gen_helper_sve_stsd_be_zsu,
5967 gen_helper_sve_stdd_be_zsu, },
5968 { gen_helper_sve_stbd_zss,
5969 gen_helper_sve_sthd_be_zss,
5970 gen_helper_sve_stsd_be_zss,
5971 gen_helper_sve_stdd_be_zss, },
5972 { gen_helper_sve_stbd_zd,
5973 gen_helper_sve_sthd_be_zd,
5974 gen_helper_sve_stsd_be_zd,
5975 gen_helper_sve_stdd_be_zd, } } },
5976 { /* MTE Inactive */
5977 { /* Little-endian */
5978 { gen_helper_sve_stbd_zsu_mte,
5979 gen_helper_sve_sthd_le_zsu_mte,
5980 gen_helper_sve_stsd_le_zsu_mte,
5981 gen_helper_sve_stdd_le_zsu_mte, },
5982 { gen_helper_sve_stbd_zss_mte,
5983 gen_helper_sve_sthd_le_zss_mte,
5984 gen_helper_sve_stsd_le_zss_mte,
5985 gen_helper_sve_stdd_le_zss_mte, },
5986 { gen_helper_sve_stbd_zd_mte,
5987 gen_helper_sve_sthd_le_zd_mte,
5988 gen_helper_sve_stsd_le_zd_mte,
5989 gen_helper_sve_stdd_le_zd_mte, } },
5990 { /* Big-endian */
5991 { gen_helper_sve_stbd_zsu_mte,
5992 gen_helper_sve_sthd_be_zsu_mte,
5993 gen_helper_sve_stsd_be_zsu_mte,
5994 gen_helper_sve_stdd_be_zsu_mte, },
5995 { gen_helper_sve_stbd_zss_mte,
5996 gen_helper_sve_sthd_be_zss_mte,
5997 gen_helper_sve_stsd_be_zss_mte,
5998 gen_helper_sve_stdd_be_zss_mte, },
5999 { gen_helper_sve_stbd_zd_mte,
6000 gen_helper_sve_sthd_be_zd_mte,
6001 gen_helper_sve_stsd_be_zd_mte,
6002 gen_helper_sve_stdd_be_zd_mte, } } },
6005 static bool trans_ST1_zprz(DisasContext *s, arg_ST1_zprz *a)
6007 gen_helper_gvec_mem_scatter *fn;
6008 bool be = s->be_data == MO_BE;
6009 bool mte = s->mte_active[0];
6011 if (a->esz < a->msz || (a->msz == 0 && a->scale)) {
6012 return false;
6014 if (!sve_access_check(s)) {
6015 return true;
6017 switch (a->esz) {
6018 case MO_32:
6019 fn = scatter_store_fn32[mte][be][a->xs][a->msz];
6020 break;
6021 case MO_64:
6022 fn = scatter_store_fn64[mte][be][a->xs][a->msz];
6023 break;
6024 default:
6025 g_assert_not_reached();
6027 do_mem_zpz(s, a->rd, a->pg, a->rm, a->scale * a->msz,
6028 cpu_reg_sp(s, a->rn), a->msz, true, fn);
6029 return true;
6032 static bool trans_ST1_zpiz(DisasContext *s, arg_ST1_zpiz *a)
6034 gen_helper_gvec_mem_scatter *fn = NULL;
6035 bool be = s->be_data == MO_BE;
6036 bool mte = s->mte_active[0];
6038 if (a->esz < a->msz) {
6039 return false;
6041 if (!sve_access_check(s)) {
6042 return true;
6045 switch (a->esz) {
6046 case MO_32:
6047 fn = scatter_store_fn32[mte][be][0][a->msz];
6048 break;
6049 case MO_64:
6050 fn = scatter_store_fn64[mte][be][2][a->msz];
6051 break;
6053 assert(fn != NULL);
6055 /* Treat ST1_zpiz (zn[x] + imm) the same way as ST1_zprz (rn + zm[x])
6056 * by loading the immediate into the scalar parameter.
6058 do_mem_zpz(s, a->rd, a->pg, a->rn, 0,
6059 tcg_constant_i64(a->imm << a->msz), a->msz, true, fn);
6060 return true;
6063 static bool trans_STNT1_zprz(DisasContext *s, arg_ST1_zprz *a)
6065 gen_helper_gvec_mem_scatter *fn;
6066 bool be = s->be_data == MO_BE;
6067 bool mte = s->mte_active[0];
6069 if (a->esz < a->msz) {
6070 return false;
6072 if (!dc_isar_feature(aa64_sve2, s)) {
6073 return false;
6075 if (!sve_access_check(s)) {
6076 return true;
6079 switch (a->esz) {
6080 case MO_32:
6081 fn = scatter_store_fn32[mte][be][0][a->msz];
6082 break;
6083 case MO_64:
6084 fn = scatter_store_fn64[mte][be][2][a->msz];
6085 break;
6086 default:
6087 g_assert_not_reached();
6090 do_mem_zpz(s, a->rd, a->pg, a->rn, 0,
6091 cpu_reg(s, a->rm), a->msz, true, fn);
6092 return true;
6096 * Prefetches
6099 static bool trans_PRF(DisasContext *s, arg_PRF *a)
6101 /* Prefetch is a nop within QEMU. */
6102 (void)sve_access_check(s);
6103 return true;
6106 static bool trans_PRF_rr(DisasContext *s, arg_PRF_rr *a)
6108 if (a->rm == 31) {
6109 return false;
6111 /* Prefetch is a nop within QEMU. */
6112 (void)sve_access_check(s);
6113 return true;
6117 * Move Prefix
6119 * TODO: The implementation so far could handle predicated merging movprfx.
6120 * The helper functions as written take an extra source register to
6121 * use in the operation, but the result is only written when predication
6122 * succeeds. For unpredicated movprfx, we need to rearrange the helpers
6123 * to allow the final write back to the destination to be unconditional.
6124 * For predicated zeroing movprfx, we need to rearrange the helpers to
6125 * allow the final write back to zero inactives.
6127 * In the meantime, just emit the moves.
6130 static bool trans_MOVPRFX(DisasContext *s, arg_MOVPRFX *a)
6132 return do_mov_z(s, a->rd, a->rn);
6135 static bool trans_MOVPRFX_m(DisasContext *s, arg_rpr_esz *a)
6137 return do_sel_z(s, a->rd, a->rn, a->rd, a->pg, a->esz);
6140 static bool trans_MOVPRFX_z(DisasContext *s, arg_rpr_esz *a)
6142 return do_movz_zpz(s, a->rd, a->rn, a->pg, a->esz, false);
6146 * SVE2 Integer Multiply - Unpredicated
6149 TRANS_FEAT(MUL_zzz, aa64_sve2, gen_gvec_fn_arg_zzz, tcg_gen_gvec_mul, a)
6151 static gen_helper_gvec_3 * const smulh_zzz_fns[4] = {
6152 gen_helper_gvec_smulh_b, gen_helper_gvec_smulh_h,
6153 gen_helper_gvec_smulh_s, gen_helper_gvec_smulh_d,
6155 TRANS_FEAT(SMULH_zzz, aa64_sve2, gen_gvec_ool_arg_zzz,
6156 smulh_zzz_fns[a->esz], a, 0)
6158 static gen_helper_gvec_3 * const umulh_zzz_fns[4] = {
6159 gen_helper_gvec_umulh_b, gen_helper_gvec_umulh_h,
6160 gen_helper_gvec_umulh_s, gen_helper_gvec_umulh_d,
6162 TRANS_FEAT(UMULH_zzz, aa64_sve2, gen_gvec_ool_arg_zzz,
6163 umulh_zzz_fns[a->esz], a, 0)
6165 TRANS_FEAT(PMUL_zzz, aa64_sve2, gen_gvec_ool_arg_zzz,
6166 gen_helper_gvec_pmul_b, a, 0)
6168 static gen_helper_gvec_3 * const sqdmulh_zzz_fns[4] = {
6169 gen_helper_sve2_sqdmulh_b, gen_helper_sve2_sqdmulh_h,
6170 gen_helper_sve2_sqdmulh_s, gen_helper_sve2_sqdmulh_d,
6172 TRANS_FEAT(SQDMULH_zzz, aa64_sve2, gen_gvec_ool_arg_zzz,
6173 sqdmulh_zzz_fns[a->esz], a, 0)
6175 static gen_helper_gvec_3 * const sqrdmulh_zzz_fns[4] = {
6176 gen_helper_sve2_sqrdmulh_b, gen_helper_sve2_sqrdmulh_h,
6177 gen_helper_sve2_sqrdmulh_s, gen_helper_sve2_sqrdmulh_d,
6179 TRANS_FEAT(SQRDMULH_zzz, aa64_sve2, gen_gvec_ool_arg_zzz,
6180 sqrdmulh_zzz_fns[a->esz], a, 0)
6183 * SVE2 Integer - Predicated
6186 static gen_helper_gvec_4 * const sadlp_fns[4] = {
6187 NULL, gen_helper_sve2_sadalp_zpzz_h,
6188 gen_helper_sve2_sadalp_zpzz_s, gen_helper_sve2_sadalp_zpzz_d,
6190 TRANS_FEAT(SADALP_zpzz, aa64_sve2, gen_gvec_ool_arg_zpzz,
6191 sadlp_fns[a->esz], a, 0)
6193 static gen_helper_gvec_4 * const uadlp_fns[4] = {
6194 NULL, gen_helper_sve2_uadalp_zpzz_h,
6195 gen_helper_sve2_uadalp_zpzz_s, gen_helper_sve2_uadalp_zpzz_d,
6197 TRANS_FEAT(UADALP_zpzz, aa64_sve2, gen_gvec_ool_arg_zpzz,
6198 uadlp_fns[a->esz], a, 0)
6201 * SVE2 integer unary operations (predicated)
6204 TRANS_FEAT(URECPE, aa64_sve2, gen_gvec_ool_arg_zpz,
6205 a->esz == 2 ? gen_helper_sve2_urecpe_s : NULL, a, 0)
6207 TRANS_FEAT(URSQRTE, aa64_sve2, gen_gvec_ool_arg_zpz,
6208 a->esz == 2 ? gen_helper_sve2_ursqrte_s : NULL, a, 0)
6210 static gen_helper_gvec_3 * const sqabs_fns[4] = {
6211 gen_helper_sve2_sqabs_b, gen_helper_sve2_sqabs_h,
6212 gen_helper_sve2_sqabs_s, gen_helper_sve2_sqabs_d,
6214 TRANS_FEAT(SQABS, aa64_sve2, gen_gvec_ool_arg_zpz, sqabs_fns[a->esz], a, 0)
6216 static gen_helper_gvec_3 * const sqneg_fns[4] = {
6217 gen_helper_sve2_sqneg_b, gen_helper_sve2_sqneg_h,
6218 gen_helper_sve2_sqneg_s, gen_helper_sve2_sqneg_d,
6220 TRANS_FEAT(SQNEG, aa64_sve2, gen_gvec_ool_arg_zpz, sqneg_fns[a->esz], a, 0)
6222 DO_ZPZZ(SQSHL, aa64_sve2, sve2_sqshl)
6223 DO_ZPZZ(SQRSHL, aa64_sve2, sve2_sqrshl)
6224 DO_ZPZZ(SRSHL, aa64_sve2, sve2_srshl)
6226 DO_ZPZZ(UQSHL, aa64_sve2, sve2_uqshl)
6227 DO_ZPZZ(UQRSHL, aa64_sve2, sve2_uqrshl)
6228 DO_ZPZZ(URSHL, aa64_sve2, sve2_urshl)
6230 DO_ZPZZ(SHADD, aa64_sve2, sve2_shadd)
6231 DO_ZPZZ(SRHADD, aa64_sve2, sve2_srhadd)
6232 DO_ZPZZ(SHSUB, aa64_sve2, sve2_shsub)
6234 DO_ZPZZ(UHADD, aa64_sve2, sve2_uhadd)
6235 DO_ZPZZ(URHADD, aa64_sve2, sve2_urhadd)
6236 DO_ZPZZ(UHSUB, aa64_sve2, sve2_uhsub)
6238 DO_ZPZZ(ADDP, aa64_sve2, sve2_addp)
6239 DO_ZPZZ(SMAXP, aa64_sve2, sve2_smaxp)
6240 DO_ZPZZ(UMAXP, aa64_sve2, sve2_umaxp)
6241 DO_ZPZZ(SMINP, aa64_sve2, sve2_sminp)
6242 DO_ZPZZ(UMINP, aa64_sve2, sve2_uminp)
6244 DO_ZPZZ(SQADD_zpzz, aa64_sve2, sve2_sqadd)
6245 DO_ZPZZ(UQADD_zpzz, aa64_sve2, sve2_uqadd)
6246 DO_ZPZZ(SQSUB_zpzz, aa64_sve2, sve2_sqsub)
6247 DO_ZPZZ(UQSUB_zpzz, aa64_sve2, sve2_uqsub)
6248 DO_ZPZZ(SUQADD, aa64_sve2, sve2_suqadd)
6249 DO_ZPZZ(USQADD, aa64_sve2, sve2_usqadd)
6252 * SVE2 Widening Integer Arithmetic
6255 static gen_helper_gvec_3 * const saddl_fns[4] = {
6256 NULL, gen_helper_sve2_saddl_h,
6257 gen_helper_sve2_saddl_s, gen_helper_sve2_saddl_d,
6259 TRANS_FEAT(SADDLB, aa64_sve2, gen_gvec_ool_arg_zzz,
6260 saddl_fns[a->esz], a, 0)
6261 TRANS_FEAT(SADDLT, aa64_sve2, gen_gvec_ool_arg_zzz,
6262 saddl_fns[a->esz], a, 3)
6263 TRANS_FEAT(SADDLBT, aa64_sve2, gen_gvec_ool_arg_zzz,
6264 saddl_fns[a->esz], a, 2)
6266 static gen_helper_gvec_3 * const ssubl_fns[4] = {
6267 NULL, gen_helper_sve2_ssubl_h,
6268 gen_helper_sve2_ssubl_s, gen_helper_sve2_ssubl_d,
6270 TRANS_FEAT(SSUBLB, aa64_sve2, gen_gvec_ool_arg_zzz,
6271 ssubl_fns[a->esz], a, 0)
6272 TRANS_FEAT(SSUBLT, aa64_sve2, gen_gvec_ool_arg_zzz,
6273 ssubl_fns[a->esz], a, 3)
6274 TRANS_FEAT(SSUBLBT, aa64_sve2, gen_gvec_ool_arg_zzz,
6275 ssubl_fns[a->esz], a, 2)
6276 TRANS_FEAT(SSUBLTB, aa64_sve2, gen_gvec_ool_arg_zzz,
6277 ssubl_fns[a->esz], a, 1)
6279 static gen_helper_gvec_3 * const sabdl_fns[4] = {
6280 NULL, gen_helper_sve2_sabdl_h,
6281 gen_helper_sve2_sabdl_s, gen_helper_sve2_sabdl_d,
6283 TRANS_FEAT(SABDLB, aa64_sve2, gen_gvec_ool_arg_zzz,
6284 sabdl_fns[a->esz], a, 0)
6285 TRANS_FEAT(SABDLT, aa64_sve2, gen_gvec_ool_arg_zzz,
6286 sabdl_fns[a->esz], a, 3)
6288 static gen_helper_gvec_3 * const uaddl_fns[4] = {
6289 NULL, gen_helper_sve2_uaddl_h,
6290 gen_helper_sve2_uaddl_s, gen_helper_sve2_uaddl_d,
6292 TRANS_FEAT(UADDLB, aa64_sve2, gen_gvec_ool_arg_zzz,
6293 uaddl_fns[a->esz], a, 0)
6294 TRANS_FEAT(UADDLT, aa64_sve2, gen_gvec_ool_arg_zzz,
6295 uaddl_fns[a->esz], a, 3)
6297 static gen_helper_gvec_3 * const usubl_fns[4] = {
6298 NULL, gen_helper_sve2_usubl_h,
6299 gen_helper_sve2_usubl_s, gen_helper_sve2_usubl_d,
6301 TRANS_FEAT(USUBLB, aa64_sve2, gen_gvec_ool_arg_zzz,
6302 usubl_fns[a->esz], a, 0)
6303 TRANS_FEAT(USUBLT, aa64_sve2, gen_gvec_ool_arg_zzz,
6304 usubl_fns[a->esz], a, 3)
6306 static gen_helper_gvec_3 * const uabdl_fns[4] = {
6307 NULL, gen_helper_sve2_uabdl_h,
6308 gen_helper_sve2_uabdl_s, gen_helper_sve2_uabdl_d,
6310 TRANS_FEAT(UABDLB, aa64_sve2, gen_gvec_ool_arg_zzz,
6311 uabdl_fns[a->esz], a, 0)
6312 TRANS_FEAT(UABDLT, aa64_sve2, gen_gvec_ool_arg_zzz,
6313 uabdl_fns[a->esz], a, 3)
6315 static gen_helper_gvec_3 * const sqdmull_fns[4] = {
6316 NULL, gen_helper_sve2_sqdmull_zzz_h,
6317 gen_helper_sve2_sqdmull_zzz_s, gen_helper_sve2_sqdmull_zzz_d,
6319 TRANS_FEAT(SQDMULLB_zzz, aa64_sve2, gen_gvec_ool_arg_zzz,
6320 sqdmull_fns[a->esz], a, 0)
6321 TRANS_FEAT(SQDMULLT_zzz, aa64_sve2, gen_gvec_ool_arg_zzz,
6322 sqdmull_fns[a->esz], a, 3)
6324 static gen_helper_gvec_3 * const smull_fns[4] = {
6325 NULL, gen_helper_sve2_smull_zzz_h,
6326 gen_helper_sve2_smull_zzz_s, gen_helper_sve2_smull_zzz_d,
6328 TRANS_FEAT(SMULLB_zzz, aa64_sve2, gen_gvec_ool_arg_zzz,
6329 smull_fns[a->esz], a, 0)
6330 TRANS_FEAT(SMULLT_zzz, aa64_sve2, gen_gvec_ool_arg_zzz,
6331 smull_fns[a->esz], a, 3)
6333 static gen_helper_gvec_3 * const umull_fns[4] = {
6334 NULL, gen_helper_sve2_umull_zzz_h,
6335 gen_helper_sve2_umull_zzz_s, gen_helper_sve2_umull_zzz_d,
6337 TRANS_FEAT(UMULLB_zzz, aa64_sve2, gen_gvec_ool_arg_zzz,
6338 umull_fns[a->esz], a, 0)
6339 TRANS_FEAT(UMULLT_zzz, aa64_sve2, gen_gvec_ool_arg_zzz,
6340 umull_fns[a->esz], a, 3)
6342 static gen_helper_gvec_3 * const eoril_fns[4] = {
6343 gen_helper_sve2_eoril_b, gen_helper_sve2_eoril_h,
6344 gen_helper_sve2_eoril_s, gen_helper_sve2_eoril_d,
6346 TRANS_FEAT(EORBT, aa64_sve2, gen_gvec_ool_arg_zzz, eoril_fns[a->esz], a, 2)
6347 TRANS_FEAT(EORTB, aa64_sve2, gen_gvec_ool_arg_zzz, eoril_fns[a->esz], a, 1)
6349 static bool do_trans_pmull(DisasContext *s, arg_rrr_esz *a, bool sel)
6351 static gen_helper_gvec_3 * const fns[4] = {
6352 gen_helper_gvec_pmull_q, gen_helper_sve2_pmull_h,
6353 NULL, gen_helper_sve2_pmull_d,
6355 if (a->esz == 0 && !dc_isar_feature(aa64_sve2_pmull128, s)) {
6356 return false;
6358 return gen_gvec_ool_arg_zzz(s, fns[a->esz], a, sel);
6361 TRANS_FEAT(PMULLB, aa64_sve2, do_trans_pmull, a, false)
6362 TRANS_FEAT(PMULLT, aa64_sve2, do_trans_pmull, a, true)
6364 static gen_helper_gvec_3 * const saddw_fns[4] = {
6365 NULL, gen_helper_sve2_saddw_h,
6366 gen_helper_sve2_saddw_s, gen_helper_sve2_saddw_d,
6368 TRANS_FEAT(SADDWB, aa64_sve2, gen_gvec_ool_arg_zzz, saddw_fns[a->esz], a, 0)
6369 TRANS_FEAT(SADDWT, aa64_sve2, gen_gvec_ool_arg_zzz, saddw_fns[a->esz], a, 1)
6371 static gen_helper_gvec_3 * const ssubw_fns[4] = {
6372 NULL, gen_helper_sve2_ssubw_h,
6373 gen_helper_sve2_ssubw_s, gen_helper_sve2_ssubw_d,
6375 TRANS_FEAT(SSUBWB, aa64_sve2, gen_gvec_ool_arg_zzz, ssubw_fns[a->esz], a, 0)
6376 TRANS_FEAT(SSUBWT, aa64_sve2, gen_gvec_ool_arg_zzz, ssubw_fns[a->esz], a, 1)
6378 static gen_helper_gvec_3 * const uaddw_fns[4] = {
6379 NULL, gen_helper_sve2_uaddw_h,
6380 gen_helper_sve2_uaddw_s, gen_helper_sve2_uaddw_d,
6382 TRANS_FEAT(UADDWB, aa64_sve2, gen_gvec_ool_arg_zzz, uaddw_fns[a->esz], a, 0)
6383 TRANS_FEAT(UADDWT, aa64_sve2, gen_gvec_ool_arg_zzz, uaddw_fns[a->esz], a, 1)
6385 static gen_helper_gvec_3 * const usubw_fns[4] = {
6386 NULL, gen_helper_sve2_usubw_h,
6387 gen_helper_sve2_usubw_s, gen_helper_sve2_usubw_d,
6389 TRANS_FEAT(USUBWB, aa64_sve2, gen_gvec_ool_arg_zzz, usubw_fns[a->esz], a, 0)
6390 TRANS_FEAT(USUBWT, aa64_sve2, gen_gvec_ool_arg_zzz, usubw_fns[a->esz], a, 1)
6392 static void gen_sshll_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t imm)
6394 int top = imm & 1;
6395 int shl = imm >> 1;
6396 int halfbits = 4 << vece;
6398 if (top) {
6399 if (shl == halfbits) {
6400 TCGv_vec t = tcg_temp_new_vec_matching(d);
6401 tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(halfbits, halfbits));
6402 tcg_gen_and_vec(vece, d, n, t);
6403 tcg_temp_free_vec(t);
6404 } else {
6405 tcg_gen_sari_vec(vece, d, n, halfbits);
6406 tcg_gen_shli_vec(vece, d, d, shl);
6408 } else {
6409 tcg_gen_shli_vec(vece, d, n, halfbits);
6410 tcg_gen_sari_vec(vece, d, d, halfbits - shl);
6414 static void gen_ushll_i64(unsigned vece, TCGv_i64 d, TCGv_i64 n, int imm)
6416 int halfbits = 4 << vece;
6417 int top = imm & 1;
6418 int shl = (imm >> 1);
6419 int shift;
6420 uint64_t mask;
6422 mask = MAKE_64BIT_MASK(0, halfbits);
6423 mask <<= shl;
6424 mask = dup_const(vece, mask);
6426 shift = shl - top * halfbits;
6427 if (shift < 0) {
6428 tcg_gen_shri_i64(d, n, -shift);
6429 } else {
6430 tcg_gen_shli_i64(d, n, shift);
6432 tcg_gen_andi_i64(d, d, mask);
6435 static void gen_ushll16_i64(TCGv_i64 d, TCGv_i64 n, int64_t imm)
6437 gen_ushll_i64(MO_16, d, n, imm);
6440 static void gen_ushll32_i64(TCGv_i64 d, TCGv_i64 n, int64_t imm)
6442 gen_ushll_i64(MO_32, d, n, imm);
6445 static void gen_ushll64_i64(TCGv_i64 d, TCGv_i64 n, int64_t imm)
6447 gen_ushll_i64(MO_64, d, n, imm);
6450 static void gen_ushll_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t imm)
6452 int halfbits = 4 << vece;
6453 int top = imm & 1;
6454 int shl = imm >> 1;
6456 if (top) {
6457 if (shl == halfbits) {
6458 TCGv_vec t = tcg_temp_new_vec_matching(d);
6459 tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(halfbits, halfbits));
6460 tcg_gen_and_vec(vece, d, n, t);
6461 tcg_temp_free_vec(t);
6462 } else {
6463 tcg_gen_shri_vec(vece, d, n, halfbits);
6464 tcg_gen_shli_vec(vece, d, d, shl);
6466 } else {
6467 if (shl == 0) {
6468 TCGv_vec t = tcg_temp_new_vec_matching(d);
6469 tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
6470 tcg_gen_and_vec(vece, d, n, t);
6471 tcg_temp_free_vec(t);
6472 } else {
6473 tcg_gen_shli_vec(vece, d, n, halfbits);
6474 tcg_gen_shri_vec(vece, d, d, halfbits - shl);
6479 static bool do_sve2_shll_tb(DisasContext *s, arg_rri_esz *a,
6480 bool sel, bool uns)
6482 static const TCGOpcode sshll_list[] = {
6483 INDEX_op_shli_vec, INDEX_op_sari_vec, 0
6485 static const TCGOpcode ushll_list[] = {
6486 INDEX_op_shli_vec, INDEX_op_shri_vec, 0
6488 static const GVecGen2i ops[2][3] = {
6489 { { .fniv = gen_sshll_vec,
6490 .opt_opc = sshll_list,
6491 .fno = gen_helper_sve2_sshll_h,
6492 .vece = MO_16 },
6493 { .fniv = gen_sshll_vec,
6494 .opt_opc = sshll_list,
6495 .fno = gen_helper_sve2_sshll_s,
6496 .vece = MO_32 },
6497 { .fniv = gen_sshll_vec,
6498 .opt_opc = sshll_list,
6499 .fno = gen_helper_sve2_sshll_d,
6500 .vece = MO_64 } },
6501 { { .fni8 = gen_ushll16_i64,
6502 .fniv = gen_ushll_vec,
6503 .opt_opc = ushll_list,
6504 .fno = gen_helper_sve2_ushll_h,
6505 .vece = MO_16 },
6506 { .fni8 = gen_ushll32_i64,
6507 .fniv = gen_ushll_vec,
6508 .opt_opc = ushll_list,
6509 .fno = gen_helper_sve2_ushll_s,
6510 .vece = MO_32 },
6511 { .fni8 = gen_ushll64_i64,
6512 .fniv = gen_ushll_vec,
6513 .opt_opc = ushll_list,
6514 .fno = gen_helper_sve2_ushll_d,
6515 .vece = MO_64 } },
6518 if (a->esz < 0 || a->esz > 2 || !dc_isar_feature(aa64_sve2, s)) {
6519 return false;
6521 if (sve_access_check(s)) {
6522 unsigned vsz = vec_full_reg_size(s);
6523 tcg_gen_gvec_2i(vec_full_reg_offset(s, a->rd),
6524 vec_full_reg_offset(s, a->rn),
6525 vsz, vsz, (a->imm << 1) | sel,
6526 &ops[uns][a->esz]);
6528 return true;
6531 static bool trans_SSHLLB(DisasContext *s, arg_rri_esz *a)
6533 return do_sve2_shll_tb(s, a, false, false);
6536 static bool trans_SSHLLT(DisasContext *s, arg_rri_esz *a)
6538 return do_sve2_shll_tb(s, a, true, false);
6541 static bool trans_USHLLB(DisasContext *s, arg_rri_esz *a)
6543 return do_sve2_shll_tb(s, a, false, true);
6546 static bool trans_USHLLT(DisasContext *s, arg_rri_esz *a)
6548 return do_sve2_shll_tb(s, a, true, true);
6551 static gen_helper_gvec_3 * const bext_fns[4] = {
6552 gen_helper_sve2_bext_b, gen_helper_sve2_bext_h,
6553 gen_helper_sve2_bext_s, gen_helper_sve2_bext_d,
6555 TRANS_FEAT(BEXT, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz,
6556 bext_fns[a->esz], a, 0)
6558 static gen_helper_gvec_3 * const bdep_fns[4] = {
6559 gen_helper_sve2_bdep_b, gen_helper_sve2_bdep_h,
6560 gen_helper_sve2_bdep_s, gen_helper_sve2_bdep_d,
6562 TRANS_FEAT(BDEP, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz,
6563 bdep_fns[a->esz], a, 0)
6565 static gen_helper_gvec_3 * const bgrp_fns[4] = {
6566 gen_helper_sve2_bgrp_b, gen_helper_sve2_bgrp_h,
6567 gen_helper_sve2_bgrp_s, gen_helper_sve2_bgrp_d,
6569 TRANS_FEAT(BGRP, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz,
6570 bgrp_fns[a->esz], a, 0)
6572 static gen_helper_gvec_3 * const cadd_fns[4] = {
6573 gen_helper_sve2_cadd_b, gen_helper_sve2_cadd_h,
6574 gen_helper_sve2_cadd_s, gen_helper_sve2_cadd_d,
6576 TRANS_FEAT(CADD_rot90, aa64_sve2, gen_gvec_ool_arg_zzz,
6577 cadd_fns[a->esz], a, 0)
6578 TRANS_FEAT(CADD_rot270, aa64_sve2, gen_gvec_ool_arg_zzz,
6579 cadd_fns[a->esz], a, 1)
6581 static gen_helper_gvec_3 * const sqcadd_fns[4] = {
6582 gen_helper_sve2_sqcadd_b, gen_helper_sve2_sqcadd_h,
6583 gen_helper_sve2_sqcadd_s, gen_helper_sve2_sqcadd_d,
6585 TRANS_FEAT(SQCADD_rot90, aa64_sve2, gen_gvec_ool_arg_zzz,
6586 sqcadd_fns[a->esz], a, 0)
6587 TRANS_FEAT(SQCADD_rot270, aa64_sve2, gen_gvec_ool_arg_zzz,
6588 sqcadd_fns[a->esz], a, 1)
6590 static gen_helper_gvec_4 * const sabal_fns[4] = {
6591 NULL, gen_helper_sve2_sabal_h,
6592 gen_helper_sve2_sabal_s, gen_helper_sve2_sabal_d,
6594 TRANS_FEAT(SABALB, aa64_sve2, gen_gvec_ool_arg_zzzz, sabal_fns[a->esz], a, 0)
6595 TRANS_FEAT(SABALT, aa64_sve2, gen_gvec_ool_arg_zzzz, sabal_fns[a->esz], a, 1)
6597 static gen_helper_gvec_4 * const uabal_fns[4] = {
6598 NULL, gen_helper_sve2_uabal_h,
6599 gen_helper_sve2_uabal_s, gen_helper_sve2_uabal_d,
6601 TRANS_FEAT(UABALB, aa64_sve2, gen_gvec_ool_arg_zzzz, uabal_fns[a->esz], a, 0)
6602 TRANS_FEAT(UABALT, aa64_sve2, gen_gvec_ool_arg_zzzz, uabal_fns[a->esz], a, 1)
6604 static bool do_adcl(DisasContext *s, arg_rrrr_esz *a, bool sel)
6606 static gen_helper_gvec_4 * const fns[2] = {
6607 gen_helper_sve2_adcl_s,
6608 gen_helper_sve2_adcl_d,
6611 * Note that in this case the ESZ field encodes both size and sign.
6612 * Split out 'subtract' into bit 1 of the data field for the helper.
6614 return gen_gvec_ool_arg_zzzz(s, fns[a->esz & 1], a, (a->esz & 2) | sel);
6617 TRANS_FEAT(ADCLB, aa64_sve2, do_adcl, a, false)
6618 TRANS_FEAT(ADCLT, aa64_sve2, do_adcl, a, true)
6620 TRANS_FEAT(SSRA, aa64_sve2, gen_gvec_fn_arg_zzi, gen_gvec_ssra, a)
6621 TRANS_FEAT(USRA, aa64_sve2, gen_gvec_fn_arg_zzi, gen_gvec_usra, a)
6622 TRANS_FEAT(SRSRA, aa64_sve2, gen_gvec_fn_arg_zzi, gen_gvec_srsra, a)
6623 TRANS_FEAT(URSRA, aa64_sve2, gen_gvec_fn_arg_zzi, gen_gvec_ursra, a)
6624 TRANS_FEAT(SRI, aa64_sve2, gen_gvec_fn_arg_zzi, gen_gvec_sri, a)
6625 TRANS_FEAT(SLI, aa64_sve2, gen_gvec_fn_arg_zzi, gen_gvec_sli, a)
6627 TRANS_FEAT(SABA, aa64_sve2, gen_gvec_fn_arg_zzz, gen_gvec_saba, a)
6628 TRANS_FEAT(UABA, aa64_sve2, gen_gvec_fn_arg_zzz, gen_gvec_uaba, a)
6630 static bool do_sve2_narrow_extract(DisasContext *s, arg_rri_esz *a,
6631 const GVecGen2 ops[3])
6633 if (a->esz < 0 || a->esz > MO_32 || a->imm != 0 ||
6634 !dc_isar_feature(aa64_sve2, s)) {
6635 return false;
6637 if (sve_access_check(s)) {
6638 unsigned vsz = vec_full_reg_size(s);
6639 tcg_gen_gvec_2(vec_full_reg_offset(s, a->rd),
6640 vec_full_reg_offset(s, a->rn),
6641 vsz, vsz, &ops[a->esz]);
6643 return true;
6646 static const TCGOpcode sqxtn_list[] = {
6647 INDEX_op_shli_vec, INDEX_op_smin_vec, INDEX_op_smax_vec, 0
6650 static void gen_sqxtnb_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
6652 TCGv_vec t = tcg_temp_new_vec_matching(d);
6653 int halfbits = 4 << vece;
6654 int64_t mask = (1ull << halfbits) - 1;
6655 int64_t min = -1ull << (halfbits - 1);
6656 int64_t max = -min - 1;
6658 tcg_gen_dupi_vec(vece, t, min);
6659 tcg_gen_smax_vec(vece, d, n, t);
6660 tcg_gen_dupi_vec(vece, t, max);
6661 tcg_gen_smin_vec(vece, d, d, t);
6662 tcg_gen_dupi_vec(vece, t, mask);
6663 tcg_gen_and_vec(vece, d, d, t);
6664 tcg_temp_free_vec(t);
6667 static bool trans_SQXTNB(DisasContext *s, arg_rri_esz *a)
6669 static const GVecGen2 ops[3] = {
6670 { .fniv = gen_sqxtnb_vec,
6671 .opt_opc = sqxtn_list,
6672 .fno = gen_helper_sve2_sqxtnb_h,
6673 .vece = MO_16 },
6674 { .fniv = gen_sqxtnb_vec,
6675 .opt_opc = sqxtn_list,
6676 .fno = gen_helper_sve2_sqxtnb_s,
6677 .vece = MO_32 },
6678 { .fniv = gen_sqxtnb_vec,
6679 .opt_opc = sqxtn_list,
6680 .fno = gen_helper_sve2_sqxtnb_d,
6681 .vece = MO_64 },
6683 return do_sve2_narrow_extract(s, a, ops);
6686 static void gen_sqxtnt_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
6688 TCGv_vec t = tcg_temp_new_vec_matching(d);
6689 int halfbits = 4 << vece;
6690 int64_t mask = (1ull << halfbits) - 1;
6691 int64_t min = -1ull << (halfbits - 1);
6692 int64_t max = -min - 1;
6694 tcg_gen_dupi_vec(vece, t, min);
6695 tcg_gen_smax_vec(vece, n, n, t);
6696 tcg_gen_dupi_vec(vece, t, max);
6697 tcg_gen_smin_vec(vece, n, n, t);
6698 tcg_gen_shli_vec(vece, n, n, halfbits);
6699 tcg_gen_dupi_vec(vece, t, mask);
6700 tcg_gen_bitsel_vec(vece, d, t, d, n);
6701 tcg_temp_free_vec(t);
6704 static bool trans_SQXTNT(DisasContext *s, arg_rri_esz *a)
6706 static const GVecGen2 ops[3] = {
6707 { .fniv = gen_sqxtnt_vec,
6708 .opt_opc = sqxtn_list,
6709 .load_dest = true,
6710 .fno = gen_helper_sve2_sqxtnt_h,
6711 .vece = MO_16 },
6712 { .fniv = gen_sqxtnt_vec,
6713 .opt_opc = sqxtn_list,
6714 .load_dest = true,
6715 .fno = gen_helper_sve2_sqxtnt_s,
6716 .vece = MO_32 },
6717 { .fniv = gen_sqxtnt_vec,
6718 .opt_opc = sqxtn_list,
6719 .load_dest = true,
6720 .fno = gen_helper_sve2_sqxtnt_d,
6721 .vece = MO_64 },
6723 return do_sve2_narrow_extract(s, a, ops);
6726 static const TCGOpcode uqxtn_list[] = {
6727 INDEX_op_shli_vec, INDEX_op_umin_vec, 0
6730 static void gen_uqxtnb_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
6732 TCGv_vec t = tcg_temp_new_vec_matching(d);
6733 int halfbits = 4 << vece;
6734 int64_t max = (1ull << halfbits) - 1;
6736 tcg_gen_dupi_vec(vece, t, max);
6737 tcg_gen_umin_vec(vece, d, n, t);
6738 tcg_temp_free_vec(t);
6741 static bool trans_UQXTNB(DisasContext *s, arg_rri_esz *a)
6743 static const GVecGen2 ops[3] = {
6744 { .fniv = gen_uqxtnb_vec,
6745 .opt_opc = uqxtn_list,
6746 .fno = gen_helper_sve2_uqxtnb_h,
6747 .vece = MO_16 },
6748 { .fniv = gen_uqxtnb_vec,
6749 .opt_opc = uqxtn_list,
6750 .fno = gen_helper_sve2_uqxtnb_s,
6751 .vece = MO_32 },
6752 { .fniv = gen_uqxtnb_vec,
6753 .opt_opc = uqxtn_list,
6754 .fno = gen_helper_sve2_uqxtnb_d,
6755 .vece = MO_64 },
6757 return do_sve2_narrow_extract(s, a, ops);
6760 static void gen_uqxtnt_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
6762 TCGv_vec t = tcg_temp_new_vec_matching(d);
6763 int halfbits = 4 << vece;
6764 int64_t max = (1ull << halfbits) - 1;
6766 tcg_gen_dupi_vec(vece, t, max);
6767 tcg_gen_umin_vec(vece, n, n, t);
6768 tcg_gen_shli_vec(vece, n, n, halfbits);
6769 tcg_gen_bitsel_vec(vece, d, t, d, n);
6770 tcg_temp_free_vec(t);
6773 static bool trans_UQXTNT(DisasContext *s, arg_rri_esz *a)
6775 static const GVecGen2 ops[3] = {
6776 { .fniv = gen_uqxtnt_vec,
6777 .opt_opc = uqxtn_list,
6778 .load_dest = true,
6779 .fno = gen_helper_sve2_uqxtnt_h,
6780 .vece = MO_16 },
6781 { .fniv = gen_uqxtnt_vec,
6782 .opt_opc = uqxtn_list,
6783 .load_dest = true,
6784 .fno = gen_helper_sve2_uqxtnt_s,
6785 .vece = MO_32 },
6786 { .fniv = gen_uqxtnt_vec,
6787 .opt_opc = uqxtn_list,
6788 .load_dest = true,
6789 .fno = gen_helper_sve2_uqxtnt_d,
6790 .vece = MO_64 },
6792 return do_sve2_narrow_extract(s, a, ops);
6795 static const TCGOpcode sqxtun_list[] = {
6796 INDEX_op_shli_vec, INDEX_op_umin_vec, INDEX_op_smax_vec, 0
6799 static void gen_sqxtunb_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
6801 TCGv_vec t = tcg_temp_new_vec_matching(d);
6802 int halfbits = 4 << vece;
6803 int64_t max = (1ull << halfbits) - 1;
6805 tcg_gen_dupi_vec(vece, t, 0);
6806 tcg_gen_smax_vec(vece, d, n, t);
6807 tcg_gen_dupi_vec(vece, t, max);
6808 tcg_gen_umin_vec(vece, d, d, t);
6809 tcg_temp_free_vec(t);
6812 static bool trans_SQXTUNB(DisasContext *s, arg_rri_esz *a)
6814 static const GVecGen2 ops[3] = {
6815 { .fniv = gen_sqxtunb_vec,
6816 .opt_opc = sqxtun_list,
6817 .fno = gen_helper_sve2_sqxtunb_h,
6818 .vece = MO_16 },
6819 { .fniv = gen_sqxtunb_vec,
6820 .opt_opc = sqxtun_list,
6821 .fno = gen_helper_sve2_sqxtunb_s,
6822 .vece = MO_32 },
6823 { .fniv = gen_sqxtunb_vec,
6824 .opt_opc = sqxtun_list,
6825 .fno = gen_helper_sve2_sqxtunb_d,
6826 .vece = MO_64 },
6828 return do_sve2_narrow_extract(s, a, ops);
6831 static void gen_sqxtunt_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
6833 TCGv_vec t = tcg_temp_new_vec_matching(d);
6834 int halfbits = 4 << vece;
6835 int64_t max = (1ull << halfbits) - 1;
6837 tcg_gen_dupi_vec(vece, t, 0);
6838 tcg_gen_smax_vec(vece, n, n, t);
6839 tcg_gen_dupi_vec(vece, t, max);
6840 tcg_gen_umin_vec(vece, n, n, t);
6841 tcg_gen_shli_vec(vece, n, n, halfbits);
6842 tcg_gen_bitsel_vec(vece, d, t, d, n);
6843 tcg_temp_free_vec(t);
6846 static bool trans_SQXTUNT(DisasContext *s, arg_rri_esz *a)
6848 static const GVecGen2 ops[3] = {
6849 { .fniv = gen_sqxtunt_vec,
6850 .opt_opc = sqxtun_list,
6851 .load_dest = true,
6852 .fno = gen_helper_sve2_sqxtunt_h,
6853 .vece = MO_16 },
6854 { .fniv = gen_sqxtunt_vec,
6855 .opt_opc = sqxtun_list,
6856 .load_dest = true,
6857 .fno = gen_helper_sve2_sqxtunt_s,
6858 .vece = MO_32 },
6859 { .fniv = gen_sqxtunt_vec,
6860 .opt_opc = sqxtun_list,
6861 .load_dest = true,
6862 .fno = gen_helper_sve2_sqxtunt_d,
6863 .vece = MO_64 },
6865 return do_sve2_narrow_extract(s, a, ops);
6868 static bool do_sve2_shr_narrow(DisasContext *s, arg_rri_esz *a,
6869 const GVecGen2i ops[3])
6871 if (a->esz < 0 || a->esz > MO_32 || !dc_isar_feature(aa64_sve2, s)) {
6872 return false;
6874 assert(a->imm > 0 && a->imm <= (8 << a->esz));
6875 if (sve_access_check(s)) {
6876 unsigned vsz = vec_full_reg_size(s);
6877 tcg_gen_gvec_2i(vec_full_reg_offset(s, a->rd),
6878 vec_full_reg_offset(s, a->rn),
6879 vsz, vsz, a->imm, &ops[a->esz]);
6881 return true;
6884 static void gen_shrnb_i64(unsigned vece, TCGv_i64 d, TCGv_i64 n, int shr)
6886 int halfbits = 4 << vece;
6887 uint64_t mask = dup_const(vece, MAKE_64BIT_MASK(0, halfbits));
6889 tcg_gen_shri_i64(d, n, shr);
6890 tcg_gen_andi_i64(d, d, mask);
6893 static void gen_shrnb16_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr)
6895 gen_shrnb_i64(MO_16, d, n, shr);
6898 static void gen_shrnb32_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr)
6900 gen_shrnb_i64(MO_32, d, n, shr);
6903 static void gen_shrnb64_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr)
6905 gen_shrnb_i64(MO_64, d, n, shr);
6908 static void gen_shrnb_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t shr)
6910 TCGv_vec t = tcg_temp_new_vec_matching(d);
6911 int halfbits = 4 << vece;
6912 uint64_t mask = MAKE_64BIT_MASK(0, halfbits);
6914 tcg_gen_shri_vec(vece, n, n, shr);
6915 tcg_gen_dupi_vec(vece, t, mask);
6916 tcg_gen_and_vec(vece, d, n, t);
6917 tcg_temp_free_vec(t);
6920 static bool trans_SHRNB(DisasContext *s, arg_rri_esz *a)
6922 static const TCGOpcode vec_list[] = { INDEX_op_shri_vec, 0 };
6923 static const GVecGen2i ops[3] = {
6924 { .fni8 = gen_shrnb16_i64,
6925 .fniv = gen_shrnb_vec,
6926 .opt_opc = vec_list,
6927 .fno = gen_helper_sve2_shrnb_h,
6928 .vece = MO_16 },
6929 { .fni8 = gen_shrnb32_i64,
6930 .fniv = gen_shrnb_vec,
6931 .opt_opc = vec_list,
6932 .fno = gen_helper_sve2_shrnb_s,
6933 .vece = MO_32 },
6934 { .fni8 = gen_shrnb64_i64,
6935 .fniv = gen_shrnb_vec,
6936 .opt_opc = vec_list,
6937 .fno = gen_helper_sve2_shrnb_d,
6938 .vece = MO_64 },
6940 return do_sve2_shr_narrow(s, a, ops);
6943 static void gen_shrnt_i64(unsigned vece, TCGv_i64 d, TCGv_i64 n, int shr)
6945 int halfbits = 4 << vece;
6946 uint64_t mask = dup_const(vece, MAKE_64BIT_MASK(0, halfbits));
6948 tcg_gen_shli_i64(n, n, halfbits - shr);
6949 tcg_gen_andi_i64(n, n, ~mask);
6950 tcg_gen_andi_i64(d, d, mask);
6951 tcg_gen_or_i64(d, d, n);
6954 static void gen_shrnt16_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr)
6956 gen_shrnt_i64(MO_16, d, n, shr);
6959 static void gen_shrnt32_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr)
6961 gen_shrnt_i64(MO_32, d, n, shr);
6964 static void gen_shrnt64_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr)
6966 tcg_gen_shri_i64(n, n, shr);
6967 tcg_gen_deposit_i64(d, d, n, 32, 32);
6970 static void gen_shrnt_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t shr)
6972 TCGv_vec t = tcg_temp_new_vec_matching(d);
6973 int halfbits = 4 << vece;
6974 uint64_t mask = MAKE_64BIT_MASK(0, halfbits);
6976 tcg_gen_shli_vec(vece, n, n, halfbits - shr);
6977 tcg_gen_dupi_vec(vece, t, mask);
6978 tcg_gen_bitsel_vec(vece, d, t, d, n);
6979 tcg_temp_free_vec(t);
6982 static bool trans_SHRNT(DisasContext *s, arg_rri_esz *a)
6984 static const TCGOpcode vec_list[] = { INDEX_op_shli_vec, 0 };
6985 static const GVecGen2i ops[3] = {
6986 { .fni8 = gen_shrnt16_i64,
6987 .fniv = gen_shrnt_vec,
6988 .opt_opc = vec_list,
6989 .load_dest = true,
6990 .fno = gen_helper_sve2_shrnt_h,
6991 .vece = MO_16 },
6992 { .fni8 = gen_shrnt32_i64,
6993 .fniv = gen_shrnt_vec,
6994 .opt_opc = vec_list,
6995 .load_dest = true,
6996 .fno = gen_helper_sve2_shrnt_s,
6997 .vece = MO_32 },
6998 { .fni8 = gen_shrnt64_i64,
6999 .fniv = gen_shrnt_vec,
7000 .opt_opc = vec_list,
7001 .load_dest = true,
7002 .fno = gen_helper_sve2_shrnt_d,
7003 .vece = MO_64 },
7005 return do_sve2_shr_narrow(s, a, ops);
7008 static bool trans_RSHRNB(DisasContext *s, arg_rri_esz *a)
7010 static const GVecGen2i ops[3] = {
7011 { .fno = gen_helper_sve2_rshrnb_h },
7012 { .fno = gen_helper_sve2_rshrnb_s },
7013 { .fno = gen_helper_sve2_rshrnb_d },
7015 return do_sve2_shr_narrow(s, a, ops);
7018 static bool trans_RSHRNT(DisasContext *s, arg_rri_esz *a)
7020 static const GVecGen2i ops[3] = {
7021 { .fno = gen_helper_sve2_rshrnt_h },
7022 { .fno = gen_helper_sve2_rshrnt_s },
7023 { .fno = gen_helper_sve2_rshrnt_d },
7025 return do_sve2_shr_narrow(s, a, ops);
7028 static void gen_sqshrunb_vec(unsigned vece, TCGv_vec d,
7029 TCGv_vec n, int64_t shr)
7031 TCGv_vec t = tcg_temp_new_vec_matching(d);
7032 int halfbits = 4 << vece;
7034 tcg_gen_sari_vec(vece, n, n, shr);
7035 tcg_gen_dupi_vec(vece, t, 0);
7036 tcg_gen_smax_vec(vece, n, n, t);
7037 tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
7038 tcg_gen_umin_vec(vece, d, n, t);
7039 tcg_temp_free_vec(t);
7042 static bool trans_SQSHRUNB(DisasContext *s, arg_rri_esz *a)
7044 static const TCGOpcode vec_list[] = {
7045 INDEX_op_sari_vec, INDEX_op_smax_vec, INDEX_op_umin_vec, 0
7047 static const GVecGen2i ops[3] = {
7048 { .fniv = gen_sqshrunb_vec,
7049 .opt_opc = vec_list,
7050 .fno = gen_helper_sve2_sqshrunb_h,
7051 .vece = MO_16 },
7052 { .fniv = gen_sqshrunb_vec,
7053 .opt_opc = vec_list,
7054 .fno = gen_helper_sve2_sqshrunb_s,
7055 .vece = MO_32 },
7056 { .fniv = gen_sqshrunb_vec,
7057 .opt_opc = vec_list,
7058 .fno = gen_helper_sve2_sqshrunb_d,
7059 .vece = MO_64 },
7061 return do_sve2_shr_narrow(s, a, ops);
7064 static void gen_sqshrunt_vec(unsigned vece, TCGv_vec d,
7065 TCGv_vec n, int64_t shr)
7067 TCGv_vec t = tcg_temp_new_vec_matching(d);
7068 int halfbits = 4 << vece;
7070 tcg_gen_sari_vec(vece, n, n, shr);
7071 tcg_gen_dupi_vec(vece, t, 0);
7072 tcg_gen_smax_vec(vece, n, n, t);
7073 tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
7074 tcg_gen_umin_vec(vece, n, n, t);
7075 tcg_gen_shli_vec(vece, n, n, halfbits);
7076 tcg_gen_bitsel_vec(vece, d, t, d, n);
7077 tcg_temp_free_vec(t);
7080 static bool trans_SQSHRUNT(DisasContext *s, arg_rri_esz *a)
7082 static const TCGOpcode vec_list[] = {
7083 INDEX_op_shli_vec, INDEX_op_sari_vec,
7084 INDEX_op_smax_vec, INDEX_op_umin_vec, 0
7086 static const GVecGen2i ops[3] = {
7087 { .fniv = gen_sqshrunt_vec,
7088 .opt_opc = vec_list,
7089 .load_dest = true,
7090 .fno = gen_helper_sve2_sqshrunt_h,
7091 .vece = MO_16 },
7092 { .fniv = gen_sqshrunt_vec,
7093 .opt_opc = vec_list,
7094 .load_dest = true,
7095 .fno = gen_helper_sve2_sqshrunt_s,
7096 .vece = MO_32 },
7097 { .fniv = gen_sqshrunt_vec,
7098 .opt_opc = vec_list,
7099 .load_dest = true,
7100 .fno = gen_helper_sve2_sqshrunt_d,
7101 .vece = MO_64 },
7103 return do_sve2_shr_narrow(s, a, ops);
7106 static bool trans_SQRSHRUNB(DisasContext *s, arg_rri_esz *a)
7108 static const GVecGen2i ops[3] = {
7109 { .fno = gen_helper_sve2_sqrshrunb_h },
7110 { .fno = gen_helper_sve2_sqrshrunb_s },
7111 { .fno = gen_helper_sve2_sqrshrunb_d },
7113 return do_sve2_shr_narrow(s, a, ops);
7116 static bool trans_SQRSHRUNT(DisasContext *s, arg_rri_esz *a)
7118 static const GVecGen2i ops[3] = {
7119 { .fno = gen_helper_sve2_sqrshrunt_h },
7120 { .fno = gen_helper_sve2_sqrshrunt_s },
7121 { .fno = gen_helper_sve2_sqrshrunt_d },
7123 return do_sve2_shr_narrow(s, a, ops);
7126 static void gen_sqshrnb_vec(unsigned vece, TCGv_vec d,
7127 TCGv_vec n, int64_t shr)
7129 TCGv_vec t = tcg_temp_new_vec_matching(d);
7130 int halfbits = 4 << vece;
7131 int64_t max = MAKE_64BIT_MASK(0, halfbits - 1);
7132 int64_t min = -max - 1;
7134 tcg_gen_sari_vec(vece, n, n, shr);
7135 tcg_gen_dupi_vec(vece, t, min);
7136 tcg_gen_smax_vec(vece, n, n, t);
7137 tcg_gen_dupi_vec(vece, t, max);
7138 tcg_gen_smin_vec(vece, n, n, t);
7139 tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
7140 tcg_gen_and_vec(vece, d, n, t);
7141 tcg_temp_free_vec(t);
7144 static bool trans_SQSHRNB(DisasContext *s, arg_rri_esz *a)
7146 static const TCGOpcode vec_list[] = {
7147 INDEX_op_sari_vec, INDEX_op_smax_vec, INDEX_op_smin_vec, 0
7149 static const GVecGen2i ops[3] = {
7150 { .fniv = gen_sqshrnb_vec,
7151 .opt_opc = vec_list,
7152 .fno = gen_helper_sve2_sqshrnb_h,
7153 .vece = MO_16 },
7154 { .fniv = gen_sqshrnb_vec,
7155 .opt_opc = vec_list,
7156 .fno = gen_helper_sve2_sqshrnb_s,
7157 .vece = MO_32 },
7158 { .fniv = gen_sqshrnb_vec,
7159 .opt_opc = vec_list,
7160 .fno = gen_helper_sve2_sqshrnb_d,
7161 .vece = MO_64 },
7163 return do_sve2_shr_narrow(s, a, ops);
7166 static void gen_sqshrnt_vec(unsigned vece, TCGv_vec d,
7167 TCGv_vec n, int64_t shr)
7169 TCGv_vec t = tcg_temp_new_vec_matching(d);
7170 int halfbits = 4 << vece;
7171 int64_t max = MAKE_64BIT_MASK(0, halfbits - 1);
7172 int64_t min = -max - 1;
7174 tcg_gen_sari_vec(vece, n, n, shr);
7175 tcg_gen_dupi_vec(vece, t, min);
7176 tcg_gen_smax_vec(vece, n, n, t);
7177 tcg_gen_dupi_vec(vece, t, max);
7178 tcg_gen_smin_vec(vece, n, n, t);
7179 tcg_gen_shli_vec(vece, n, n, halfbits);
7180 tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
7181 tcg_gen_bitsel_vec(vece, d, t, d, n);
7182 tcg_temp_free_vec(t);
7185 static bool trans_SQSHRNT(DisasContext *s, arg_rri_esz *a)
7187 static const TCGOpcode vec_list[] = {
7188 INDEX_op_shli_vec, INDEX_op_sari_vec,
7189 INDEX_op_smax_vec, INDEX_op_smin_vec, 0
7191 static const GVecGen2i ops[3] = {
7192 { .fniv = gen_sqshrnt_vec,
7193 .opt_opc = vec_list,
7194 .load_dest = true,
7195 .fno = gen_helper_sve2_sqshrnt_h,
7196 .vece = MO_16 },
7197 { .fniv = gen_sqshrnt_vec,
7198 .opt_opc = vec_list,
7199 .load_dest = true,
7200 .fno = gen_helper_sve2_sqshrnt_s,
7201 .vece = MO_32 },
7202 { .fniv = gen_sqshrnt_vec,
7203 .opt_opc = vec_list,
7204 .load_dest = true,
7205 .fno = gen_helper_sve2_sqshrnt_d,
7206 .vece = MO_64 },
7208 return do_sve2_shr_narrow(s, a, ops);
7211 static bool trans_SQRSHRNB(DisasContext *s, arg_rri_esz *a)
7213 static const GVecGen2i ops[3] = {
7214 { .fno = gen_helper_sve2_sqrshrnb_h },
7215 { .fno = gen_helper_sve2_sqrshrnb_s },
7216 { .fno = gen_helper_sve2_sqrshrnb_d },
7218 return do_sve2_shr_narrow(s, a, ops);
7221 static bool trans_SQRSHRNT(DisasContext *s, arg_rri_esz *a)
7223 static const GVecGen2i ops[3] = {
7224 { .fno = gen_helper_sve2_sqrshrnt_h },
7225 { .fno = gen_helper_sve2_sqrshrnt_s },
7226 { .fno = gen_helper_sve2_sqrshrnt_d },
7228 return do_sve2_shr_narrow(s, a, ops);
7231 static void gen_uqshrnb_vec(unsigned vece, TCGv_vec d,
7232 TCGv_vec n, int64_t shr)
7234 TCGv_vec t = tcg_temp_new_vec_matching(d);
7235 int halfbits = 4 << vece;
7237 tcg_gen_shri_vec(vece, n, n, shr);
7238 tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
7239 tcg_gen_umin_vec(vece, d, n, t);
7240 tcg_temp_free_vec(t);
7243 static bool trans_UQSHRNB(DisasContext *s, arg_rri_esz *a)
7245 static const TCGOpcode vec_list[] = {
7246 INDEX_op_shri_vec, INDEX_op_umin_vec, 0
7248 static const GVecGen2i ops[3] = {
7249 { .fniv = gen_uqshrnb_vec,
7250 .opt_opc = vec_list,
7251 .fno = gen_helper_sve2_uqshrnb_h,
7252 .vece = MO_16 },
7253 { .fniv = gen_uqshrnb_vec,
7254 .opt_opc = vec_list,
7255 .fno = gen_helper_sve2_uqshrnb_s,
7256 .vece = MO_32 },
7257 { .fniv = gen_uqshrnb_vec,
7258 .opt_opc = vec_list,
7259 .fno = gen_helper_sve2_uqshrnb_d,
7260 .vece = MO_64 },
7262 return do_sve2_shr_narrow(s, a, ops);
7265 static void gen_uqshrnt_vec(unsigned vece, TCGv_vec d,
7266 TCGv_vec n, int64_t shr)
7268 TCGv_vec t = tcg_temp_new_vec_matching(d);
7269 int halfbits = 4 << vece;
7271 tcg_gen_shri_vec(vece, n, n, shr);
7272 tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
7273 tcg_gen_umin_vec(vece, n, n, t);
7274 tcg_gen_shli_vec(vece, n, n, halfbits);
7275 tcg_gen_bitsel_vec(vece, d, t, d, n);
7276 tcg_temp_free_vec(t);
7279 static bool trans_UQSHRNT(DisasContext *s, arg_rri_esz *a)
7281 static const TCGOpcode vec_list[] = {
7282 INDEX_op_shli_vec, INDEX_op_shri_vec, INDEX_op_umin_vec, 0
7284 static const GVecGen2i ops[3] = {
7285 { .fniv = gen_uqshrnt_vec,
7286 .opt_opc = vec_list,
7287 .load_dest = true,
7288 .fno = gen_helper_sve2_uqshrnt_h,
7289 .vece = MO_16 },
7290 { .fniv = gen_uqshrnt_vec,
7291 .opt_opc = vec_list,
7292 .load_dest = true,
7293 .fno = gen_helper_sve2_uqshrnt_s,
7294 .vece = MO_32 },
7295 { .fniv = gen_uqshrnt_vec,
7296 .opt_opc = vec_list,
7297 .load_dest = true,
7298 .fno = gen_helper_sve2_uqshrnt_d,
7299 .vece = MO_64 },
7301 return do_sve2_shr_narrow(s, a, ops);
7304 static bool trans_UQRSHRNB(DisasContext *s, arg_rri_esz *a)
7306 static const GVecGen2i ops[3] = {
7307 { .fno = gen_helper_sve2_uqrshrnb_h },
7308 { .fno = gen_helper_sve2_uqrshrnb_s },
7309 { .fno = gen_helper_sve2_uqrshrnb_d },
7311 return do_sve2_shr_narrow(s, a, ops);
7314 static bool trans_UQRSHRNT(DisasContext *s, arg_rri_esz *a)
7316 static const GVecGen2i ops[3] = {
7317 { .fno = gen_helper_sve2_uqrshrnt_h },
7318 { .fno = gen_helper_sve2_uqrshrnt_s },
7319 { .fno = gen_helper_sve2_uqrshrnt_d },
7321 return do_sve2_shr_narrow(s, a, ops);
7324 #define DO_SVE2_ZZZ_NARROW(NAME, name) \
7325 static gen_helper_gvec_3 * const name##_fns[4] = { \
7326 NULL, gen_helper_sve2_##name##_h, \
7327 gen_helper_sve2_##name##_s, gen_helper_sve2_##name##_d, \
7328 }; \
7329 TRANS_FEAT(NAME, aa64_sve2, gen_gvec_ool_arg_zzz, \
7330 name##_fns[a->esz], a, 0)
7332 DO_SVE2_ZZZ_NARROW(ADDHNB, addhnb)
7333 DO_SVE2_ZZZ_NARROW(ADDHNT, addhnt)
7334 DO_SVE2_ZZZ_NARROW(RADDHNB, raddhnb)
7335 DO_SVE2_ZZZ_NARROW(RADDHNT, raddhnt)
7337 DO_SVE2_ZZZ_NARROW(SUBHNB, subhnb)
7338 DO_SVE2_ZZZ_NARROW(SUBHNT, subhnt)
7339 DO_SVE2_ZZZ_NARROW(RSUBHNB, rsubhnb)
7340 DO_SVE2_ZZZ_NARROW(RSUBHNT, rsubhnt)
7342 static bool do_sve2_ppzz_flags(DisasContext *s, arg_rprr_esz *a,
7343 gen_helper_gvec_flags_4 *fn)
7345 if (!dc_isar_feature(aa64_sve2, s)) {
7346 return false;
7348 return do_ppzz_flags(s, a, fn);
7351 #define DO_SVE2_PPZZ_MATCH(NAME, name) \
7352 static bool trans_##NAME(DisasContext *s, arg_rprr_esz *a) \
7354 static gen_helper_gvec_flags_4 * const fns[4] = { \
7355 gen_helper_sve2_##name##_ppzz_b, gen_helper_sve2_##name##_ppzz_h, \
7356 NULL, NULL \
7357 }; \
7358 return do_sve2_ppzz_flags(s, a, fns[a->esz]); \
7361 DO_SVE2_PPZZ_MATCH(MATCH, match)
7362 DO_SVE2_PPZZ_MATCH(NMATCH, nmatch)
7364 static gen_helper_gvec_4 * const histcnt_fns[4] = {
7365 NULL, NULL, gen_helper_sve2_histcnt_s, gen_helper_sve2_histcnt_d
7367 TRANS_FEAT(HISTCNT, aa64_sve2, gen_gvec_ool_arg_zpzz,
7368 histcnt_fns[a->esz], a, 0)
7370 TRANS_FEAT(HISTSEG, aa64_sve2, gen_gvec_ool_arg_zzz,
7371 a->esz == 0 ? gen_helper_sve2_histseg : NULL, a, 0)
7373 static bool do_sve2_zpzz_fp(DisasContext *s, arg_rprr_esz *a,
7374 gen_helper_gvec_4_ptr *fn)
7376 if (!dc_isar_feature(aa64_sve2, s)) {
7377 return false;
7379 return do_zpzz_fp(s, a, fn);
7382 #define DO_SVE2_ZPZZ_FP(NAME, name) \
7383 static bool trans_##NAME(DisasContext *s, arg_rprr_esz *a) \
7385 static gen_helper_gvec_4_ptr * const fns[4] = { \
7386 NULL, gen_helper_sve2_##name##_zpzz_h, \
7387 gen_helper_sve2_##name##_zpzz_s, gen_helper_sve2_##name##_zpzz_d \
7388 }; \
7389 return do_sve2_zpzz_fp(s, a, fns[a->esz]); \
7392 DO_SVE2_ZPZZ_FP(FADDP, faddp)
7393 DO_SVE2_ZPZZ_FP(FMAXNMP, fmaxnmp)
7394 DO_SVE2_ZPZZ_FP(FMINNMP, fminnmp)
7395 DO_SVE2_ZPZZ_FP(FMAXP, fmaxp)
7396 DO_SVE2_ZPZZ_FP(FMINP, fminp)
7399 * SVE Integer Multiply-Add (unpredicated)
7402 static bool trans_FMMLA(DisasContext *s, arg_rrrr_esz *a)
7404 gen_helper_gvec_4_ptr *fn;
7406 switch (a->esz) {
7407 case MO_32:
7408 if (!dc_isar_feature(aa64_sve_f32mm, s)) {
7409 return false;
7411 fn = gen_helper_fmmla_s;
7412 break;
7413 case MO_64:
7414 if (!dc_isar_feature(aa64_sve_f64mm, s)) {
7415 return false;
7417 fn = gen_helper_fmmla_d;
7418 break;
7419 default:
7420 return false;
7423 if (sve_access_check(s)) {
7424 unsigned vsz = vec_full_reg_size(s);
7425 TCGv_ptr status = fpstatus_ptr(FPST_FPCR);
7426 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, a->rd),
7427 vec_full_reg_offset(s, a->rn),
7428 vec_full_reg_offset(s, a->rm),
7429 vec_full_reg_offset(s, a->ra),
7430 status, vsz, vsz, 0, fn);
7431 tcg_temp_free_ptr(status);
7433 return true;
7436 static gen_helper_gvec_4 * const sqdmlal_zzzw_fns[] = {
7437 NULL, gen_helper_sve2_sqdmlal_zzzw_h,
7438 gen_helper_sve2_sqdmlal_zzzw_s, gen_helper_sve2_sqdmlal_zzzw_d,
7440 TRANS_FEAT(SQDMLALB_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz,
7441 sqdmlal_zzzw_fns[a->esz], a, 0)
7442 TRANS_FEAT(SQDMLALT_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz,
7443 sqdmlal_zzzw_fns[a->esz], a, 3)
7444 TRANS_FEAT(SQDMLALBT, aa64_sve2, gen_gvec_ool_arg_zzzz,
7445 sqdmlal_zzzw_fns[a->esz], a, 2)
7447 static gen_helper_gvec_4 * const sqdmlsl_zzzw_fns[] = {
7448 NULL, gen_helper_sve2_sqdmlsl_zzzw_h,
7449 gen_helper_sve2_sqdmlsl_zzzw_s, gen_helper_sve2_sqdmlsl_zzzw_d,
7451 TRANS_FEAT(SQDMLSLB_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz,
7452 sqdmlsl_zzzw_fns[a->esz], a, 0)
7453 TRANS_FEAT(SQDMLSLT_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz,
7454 sqdmlsl_zzzw_fns[a->esz], a, 3)
7455 TRANS_FEAT(SQDMLSLBT, aa64_sve2, gen_gvec_ool_arg_zzzz,
7456 sqdmlsl_zzzw_fns[a->esz], a, 2)
7458 static gen_helper_gvec_4 * const sqrdmlah_fns[] = {
7459 gen_helper_sve2_sqrdmlah_b, gen_helper_sve2_sqrdmlah_h,
7460 gen_helper_sve2_sqrdmlah_s, gen_helper_sve2_sqrdmlah_d,
7462 TRANS_FEAT(SQRDMLAH_zzzz, aa64_sve2, gen_gvec_ool_arg_zzzz,
7463 sqrdmlah_fns[a->esz], a, 0)
7465 static gen_helper_gvec_4 * const sqrdmlsh_fns[] = {
7466 gen_helper_sve2_sqrdmlsh_b, gen_helper_sve2_sqrdmlsh_h,
7467 gen_helper_sve2_sqrdmlsh_s, gen_helper_sve2_sqrdmlsh_d,
7469 TRANS_FEAT(SQRDMLSH_zzzz, aa64_sve2, gen_gvec_ool_arg_zzzz,
7470 sqrdmlsh_fns[a->esz], a, 0)
7472 static gen_helper_gvec_4 * const smlal_zzzw_fns[] = {
7473 NULL, gen_helper_sve2_smlal_zzzw_h,
7474 gen_helper_sve2_smlal_zzzw_s, gen_helper_sve2_smlal_zzzw_d,
7476 TRANS_FEAT(SMLALB_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz,
7477 smlal_zzzw_fns[a->esz], a, 0)
7478 TRANS_FEAT(SMLALT_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz,
7479 smlal_zzzw_fns[a->esz], a, 1)
7481 static gen_helper_gvec_4 * const umlal_zzzw_fns[] = {
7482 NULL, gen_helper_sve2_umlal_zzzw_h,
7483 gen_helper_sve2_umlal_zzzw_s, gen_helper_sve2_umlal_zzzw_d,
7485 TRANS_FEAT(UMLALB_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz,
7486 umlal_zzzw_fns[a->esz], a, 0)
7487 TRANS_FEAT(UMLALT_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz,
7488 umlal_zzzw_fns[a->esz], a, 1)
7490 static gen_helper_gvec_4 * const smlsl_zzzw_fns[] = {
7491 NULL, gen_helper_sve2_smlsl_zzzw_h,
7492 gen_helper_sve2_smlsl_zzzw_s, gen_helper_sve2_smlsl_zzzw_d,
7494 TRANS_FEAT(SMLSLB_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz,
7495 smlsl_zzzw_fns[a->esz], a, 0)
7496 TRANS_FEAT(SMLSLT_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz,
7497 smlsl_zzzw_fns[a->esz], a, 1)
7499 static gen_helper_gvec_4 * const umlsl_zzzw_fns[] = {
7500 NULL, gen_helper_sve2_umlsl_zzzw_h,
7501 gen_helper_sve2_umlsl_zzzw_s, gen_helper_sve2_umlsl_zzzw_d,
7503 TRANS_FEAT(UMLSLB_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz,
7504 umlsl_zzzw_fns[a->esz], a, 0)
7505 TRANS_FEAT(UMLSLT_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz,
7506 umlsl_zzzw_fns[a->esz], a, 1)
7508 static gen_helper_gvec_4 * const cmla_fns[] = {
7509 gen_helper_sve2_cmla_zzzz_b, gen_helper_sve2_cmla_zzzz_h,
7510 gen_helper_sve2_cmla_zzzz_s, gen_helper_sve2_cmla_zzzz_d,
7512 TRANS_FEAT(CMLA_zzzz, aa64_sve2, gen_gvec_ool_zzzz,
7513 cmla_fns[a->esz], a->rd, a->rn, a->rm, a->ra, a->rot)
7515 static gen_helper_gvec_4 * const cdot_fns[] = {
7516 NULL, NULL, gen_helper_sve2_cdot_zzzz_s, gen_helper_sve2_cdot_zzzz_d
7518 TRANS_FEAT(CDOT_zzzz, aa64_sve2, gen_gvec_ool_zzzz,
7519 cdot_fns[a->esz], a->rd, a->rn, a->rm, a->ra, a->rot)
7521 static gen_helper_gvec_4 * const sqrdcmlah_fns[] = {
7522 gen_helper_sve2_sqrdcmlah_zzzz_b, gen_helper_sve2_sqrdcmlah_zzzz_h,
7523 gen_helper_sve2_sqrdcmlah_zzzz_s, gen_helper_sve2_sqrdcmlah_zzzz_d,
7525 TRANS_FEAT(SQRDCMLAH_zzzz, aa64_sve2, gen_gvec_ool_zzzz,
7526 sqrdcmlah_fns[a->esz], a->rd, a->rn, a->rm, a->ra, a->rot)
7528 TRANS_FEAT(USDOT_zzzz, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz,
7529 a->esz == 2 ? gen_helper_gvec_usdot_b : NULL, a, 0)
7531 TRANS_FEAT(AESMC, aa64_sve2_aes, gen_gvec_ool_zz,
7532 gen_helper_crypto_aesmc, a->rd, a->rd, a->decrypt)
7534 TRANS_FEAT(AESE, aa64_sve2_aes, gen_gvec_ool_arg_zzz,
7535 gen_helper_crypto_aese, a, false)
7536 TRANS_FEAT(AESD, aa64_sve2_aes, gen_gvec_ool_arg_zzz,
7537 gen_helper_crypto_aese, a, true)
7539 TRANS_FEAT(SM4E, aa64_sve2_sm4, gen_gvec_ool_arg_zzz,
7540 gen_helper_crypto_sm4e, a, 0)
7541 TRANS_FEAT(SM4EKEY, aa64_sve2_sm4, gen_gvec_ool_arg_zzz,
7542 gen_helper_crypto_sm4ekey, a, 0)
7544 TRANS_FEAT(RAX1, aa64_sve2_sha3, gen_gvec_fn_arg_zzz, gen_gvec_rax1, a)
7546 static bool trans_FCVTNT_sh(DisasContext *s, arg_rpr_esz *a)
7548 if (!dc_isar_feature(aa64_sve2, s)) {
7549 return false;
7551 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve2_fcvtnt_sh);
7554 static bool trans_BFCVTNT(DisasContext *s, arg_rpr_esz *a)
7556 if (!dc_isar_feature(aa64_sve_bf16, s)) {
7557 return false;
7559 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_bfcvtnt);
7562 static bool trans_FCVTNT_ds(DisasContext *s, arg_rpr_esz *a)
7564 if (!dc_isar_feature(aa64_sve2, s)) {
7565 return false;
7567 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve2_fcvtnt_ds);
7570 static bool trans_FCVTLT_hs(DisasContext *s, arg_rpr_esz *a)
7572 if (!dc_isar_feature(aa64_sve2, s)) {
7573 return false;
7575 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve2_fcvtlt_hs);
7578 static bool trans_FCVTLT_sd(DisasContext *s, arg_rpr_esz *a)
7580 if (!dc_isar_feature(aa64_sve2, s)) {
7581 return false;
7583 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve2_fcvtlt_sd);
7586 static bool trans_FCVTX_ds(DisasContext *s, arg_rpr_esz *a)
7588 if (!dc_isar_feature(aa64_sve2, s)) {
7589 return false;
7591 return do_frint_mode(s, a, float_round_to_odd, gen_helper_sve_fcvt_ds);
7594 static bool trans_FCVTXNT_ds(DisasContext *s, arg_rpr_esz *a)
7596 if (!dc_isar_feature(aa64_sve2, s)) {
7597 return false;
7599 return do_frint_mode(s, a, float_round_to_odd, gen_helper_sve2_fcvtnt_ds);
7602 static bool trans_FLOGB(DisasContext *s, arg_rpr_esz *a)
7604 static gen_helper_gvec_3_ptr * const fns[] = {
7605 NULL, gen_helper_flogb_h,
7606 gen_helper_flogb_s, gen_helper_flogb_d
7609 if (!dc_isar_feature(aa64_sve2, s) || fns[a->esz] == NULL) {
7610 return false;
7612 if (sve_access_check(s)) {
7613 TCGv_ptr status =
7614 fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
7615 unsigned vsz = vec_full_reg_size(s);
7617 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, a->rd),
7618 vec_full_reg_offset(s, a->rn),
7619 pred_full_reg_offset(s, a->pg),
7620 status, vsz, vsz, 0, fns[a->esz]);
7621 tcg_temp_free_ptr(status);
7623 return true;
7626 static bool do_FMLAL_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sub, bool sel)
7628 if (!dc_isar_feature(aa64_sve2, s)) {
7629 return false;
7631 if (sve_access_check(s)) {
7632 unsigned vsz = vec_full_reg_size(s);
7633 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, a->rd),
7634 vec_full_reg_offset(s, a->rn),
7635 vec_full_reg_offset(s, a->rm),
7636 vec_full_reg_offset(s, a->ra),
7637 cpu_env, vsz, vsz, (sel << 1) | sub,
7638 gen_helper_sve2_fmlal_zzzw_s);
7640 return true;
7643 static bool trans_FMLALB_zzzw(DisasContext *s, arg_rrrr_esz *a)
7645 return do_FMLAL_zzzw(s, a, false, false);
7648 static bool trans_FMLALT_zzzw(DisasContext *s, arg_rrrr_esz *a)
7650 return do_FMLAL_zzzw(s, a, false, true);
7653 static bool trans_FMLSLB_zzzw(DisasContext *s, arg_rrrr_esz *a)
7655 return do_FMLAL_zzzw(s, a, true, false);
7658 static bool trans_FMLSLT_zzzw(DisasContext *s, arg_rrrr_esz *a)
7660 return do_FMLAL_zzzw(s, a, true, true);
7663 static bool do_FMLAL_zzxw(DisasContext *s, arg_rrxr_esz *a, bool sub, bool sel)
7665 if (!dc_isar_feature(aa64_sve2, s)) {
7666 return false;
7668 if (sve_access_check(s)) {
7669 unsigned vsz = vec_full_reg_size(s);
7670 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, a->rd),
7671 vec_full_reg_offset(s, a->rn),
7672 vec_full_reg_offset(s, a->rm),
7673 vec_full_reg_offset(s, a->ra),
7674 cpu_env, vsz, vsz,
7675 (a->index << 2) | (sel << 1) | sub,
7676 gen_helper_sve2_fmlal_zzxw_s);
7678 return true;
7681 static bool trans_FMLALB_zzxw(DisasContext *s, arg_rrxr_esz *a)
7683 return do_FMLAL_zzxw(s, a, false, false);
7686 static bool trans_FMLALT_zzxw(DisasContext *s, arg_rrxr_esz *a)
7688 return do_FMLAL_zzxw(s, a, false, true);
7691 static bool trans_FMLSLB_zzxw(DisasContext *s, arg_rrxr_esz *a)
7693 return do_FMLAL_zzxw(s, a, true, false);
7696 static bool trans_FMLSLT_zzxw(DisasContext *s, arg_rrxr_esz *a)
7698 return do_FMLAL_zzxw(s, a, true, true);
7701 TRANS_FEAT(SMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz,
7702 gen_helper_gvec_smmla_b, a, 0)
7703 TRANS_FEAT(USMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz,
7704 gen_helper_gvec_usmmla_b, a, 0)
7705 TRANS_FEAT(UMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz,
7706 gen_helper_gvec_ummla_b, a, 0)
7708 TRANS_FEAT(BFDOT_zzzz, aa64_sve_bf16, gen_gvec_ool_arg_zzzz,
7709 gen_helper_gvec_bfdot, a, 0)
7710 TRANS_FEAT(BFDOT_zzxz, aa64_sve_bf16, gen_gvec_ool_arg_zzxz,
7711 gen_helper_gvec_bfdot_idx, a)
7713 TRANS_FEAT(BFMMLA, aa64_sve_bf16, gen_gvec_ool_arg_zzzz,
7714 gen_helper_gvec_bfmmla, a, 0)
7716 static bool do_BFMLAL_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sel)
7718 if (!dc_isar_feature(aa64_sve_bf16, s)) {
7719 return false;
7721 if (sve_access_check(s)) {
7722 TCGv_ptr status = fpstatus_ptr(FPST_FPCR);
7723 unsigned vsz = vec_full_reg_size(s);
7725 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, a->rd),
7726 vec_full_reg_offset(s, a->rn),
7727 vec_full_reg_offset(s, a->rm),
7728 vec_full_reg_offset(s, a->ra),
7729 status, vsz, vsz, sel,
7730 gen_helper_gvec_bfmlal);
7731 tcg_temp_free_ptr(status);
7733 return true;
7736 static bool trans_BFMLALB_zzzw(DisasContext *s, arg_rrrr_esz *a)
7738 return do_BFMLAL_zzzw(s, a, false);
7741 static bool trans_BFMLALT_zzzw(DisasContext *s, arg_rrrr_esz *a)
7743 return do_BFMLAL_zzzw(s, a, true);
7746 static bool do_BFMLAL_zzxw(DisasContext *s, arg_rrxr_esz *a, bool sel)
7748 if (!dc_isar_feature(aa64_sve_bf16, s)) {
7749 return false;
7751 if (sve_access_check(s)) {
7752 TCGv_ptr status = fpstatus_ptr(FPST_FPCR);
7753 unsigned vsz = vec_full_reg_size(s);
7755 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, a->rd),
7756 vec_full_reg_offset(s, a->rn),
7757 vec_full_reg_offset(s, a->rm),
7758 vec_full_reg_offset(s, a->ra),
7759 status, vsz, vsz, (a->index << 1) | sel,
7760 gen_helper_gvec_bfmlal_idx);
7761 tcg_temp_free_ptr(status);
7763 return true;
7766 static bool trans_BFMLALB_zzxw(DisasContext *s, arg_rrxr_esz *a)
7768 return do_BFMLAL_zzxw(s, a, false);
7771 static bool trans_BFMLALT_zzxw(DisasContext *s, arg_rrxr_esz *a)
7773 return do_BFMLAL_zzxw(s, a, true);