target/arm: Use TRANS_FEAT for DO_FP3
[qemu/ar7.git] / target / arm / translate-sve.c
blobd596e7a0277f394e3b59e3228ff8ae847187e0d7
1 /*
2 * AArch64 SVE translation
4 * Copyright (c) 2018 Linaro, Ltd
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/exec-all.h"
23 #include "tcg/tcg-op.h"
24 #include "tcg/tcg-op-gvec.h"
25 #include "tcg/tcg-gvec-desc.h"
26 #include "qemu/log.h"
27 #include "arm_ldst.h"
28 #include "translate.h"
29 #include "internals.h"
30 #include "exec/helper-proto.h"
31 #include "exec/helper-gen.h"
32 #include "exec/log.h"
33 #include "translate-a64.h"
34 #include "fpu/softfloat.h"
37 typedef void GVecGen2sFn(unsigned, uint32_t, uint32_t,
38 TCGv_i64, uint32_t, uint32_t);
40 typedef void gen_helper_gvec_flags_3(TCGv_i32, TCGv_ptr, TCGv_ptr,
41 TCGv_ptr, TCGv_i32);
42 typedef void gen_helper_gvec_flags_4(TCGv_i32, TCGv_ptr, TCGv_ptr,
43 TCGv_ptr, TCGv_ptr, TCGv_i32);
45 typedef void gen_helper_gvec_mem(TCGv_env, TCGv_ptr, TCGv_i64, TCGv_i32);
46 typedef void gen_helper_gvec_mem_scatter(TCGv_env, TCGv_ptr, TCGv_ptr,
47 TCGv_ptr, TCGv_i64, TCGv_i32);
50 * Helpers for extracting complex instruction fields.
53 /* See e.g. ASR (immediate, predicated).
54 * Returns -1 for unallocated encoding; diagnose later.
56 static int tszimm_esz(DisasContext *s, int x)
58 x >>= 3; /* discard imm3 */
59 return 31 - clz32(x);
62 static int tszimm_shr(DisasContext *s, int x)
64 return (16 << tszimm_esz(s, x)) - x;
67 /* See e.g. LSL (immediate, predicated). */
68 static int tszimm_shl(DisasContext *s, int x)
70 return x - (8 << tszimm_esz(s, x));
73 /* The SH bit is in bit 8. Extract the low 8 and shift. */
74 static inline int expand_imm_sh8s(DisasContext *s, int x)
76 return (int8_t)x << (x & 0x100 ? 8 : 0);
79 static inline int expand_imm_sh8u(DisasContext *s, int x)
81 return (uint8_t)x << (x & 0x100 ? 8 : 0);
84 /* Convert a 2-bit memory size (msz) to a 4-bit data type (dtype)
85 * with unsigned data. C.f. SVE Memory Contiguous Load Group.
87 static inline int msz_dtype(DisasContext *s, int msz)
89 static const uint8_t dtype[4] = { 0, 5, 10, 15 };
90 return dtype[msz];
94 * Include the generated decoder.
97 #include "decode-sve.c.inc"
100 * Implement all of the translator functions referenced by the decoder.
103 /* Return the offset info CPUARMState of the predicate vector register Pn.
104 * Note for this purpose, FFR is P16.
106 static inline int pred_full_reg_offset(DisasContext *s, int regno)
108 return offsetof(CPUARMState, vfp.pregs[regno]);
111 /* Return the byte size of the whole predicate register, VL / 64. */
112 static inline int pred_full_reg_size(DisasContext *s)
114 return s->sve_len >> 3;
117 /* Round up the size of a register to a size allowed by
118 * the tcg vector infrastructure. Any operation which uses this
119 * size may assume that the bits above pred_full_reg_size are zero,
120 * and must leave them the same way.
122 * Note that this is not needed for the vector registers as they
123 * are always properly sized for tcg vectors.
125 static int size_for_gvec(int size)
127 if (size <= 8) {
128 return 8;
129 } else {
130 return QEMU_ALIGN_UP(size, 16);
134 static int pred_gvec_reg_size(DisasContext *s)
136 return size_for_gvec(pred_full_reg_size(s));
139 /* Invoke an out-of-line helper on 2 Zregs. */
140 static bool gen_gvec_ool_zz(DisasContext *s, gen_helper_gvec_2 *fn,
141 int rd, int rn, int data)
143 if (fn == NULL) {
144 return false;
146 if (sve_access_check(s)) {
147 unsigned vsz = vec_full_reg_size(s);
148 tcg_gen_gvec_2_ool(vec_full_reg_offset(s, rd),
149 vec_full_reg_offset(s, rn),
150 vsz, vsz, data, fn);
152 return true;
155 /* Invoke an out-of-line helper on 3 Zregs. */
156 static bool gen_gvec_ool_zzz(DisasContext *s, gen_helper_gvec_3 *fn,
157 int rd, int rn, int rm, int data)
159 if (fn == NULL) {
160 return false;
162 if (sve_access_check(s)) {
163 unsigned vsz = vec_full_reg_size(s);
164 tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd),
165 vec_full_reg_offset(s, rn),
166 vec_full_reg_offset(s, rm),
167 vsz, vsz, data, fn);
169 return true;
172 static bool gen_gvec_ool_arg_zzz(DisasContext *s, gen_helper_gvec_3 *fn,
173 arg_rrr_esz *a, int data)
175 return gen_gvec_ool_zzz(s, fn, a->rd, a->rn, a->rm, data);
178 /* Invoke an out-of-line helper on 3 Zregs, plus float_status. */
179 static bool gen_gvec_fpst_zzz(DisasContext *s, gen_helper_gvec_3_ptr *fn,
180 int rd, int rn, int rm,
181 int data, ARMFPStatusFlavour flavour)
183 if (fn == NULL) {
184 return false;
186 if (sve_access_check(s)) {
187 unsigned vsz = vec_full_reg_size(s);
188 TCGv_ptr status = fpstatus_ptr(flavour);
190 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
191 vec_full_reg_offset(s, rn),
192 vec_full_reg_offset(s, rm),
193 status, vsz, vsz, data, fn);
195 tcg_temp_free_ptr(status);
197 return true;
200 static bool gen_gvec_fpst_arg_zzz(DisasContext *s, gen_helper_gvec_3_ptr *fn,
201 arg_rrr_esz *a, int data)
203 return gen_gvec_fpst_zzz(s, fn, a->rd, a->rn, a->rm, data,
204 a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
207 /* Invoke an out-of-line helper on 4 Zregs. */
208 static bool gen_gvec_ool_zzzz(DisasContext *s, gen_helper_gvec_4 *fn,
209 int rd, int rn, int rm, int ra, int data)
211 if (fn == NULL) {
212 return false;
214 if (sve_access_check(s)) {
215 unsigned vsz = vec_full_reg_size(s);
216 tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd),
217 vec_full_reg_offset(s, rn),
218 vec_full_reg_offset(s, rm),
219 vec_full_reg_offset(s, ra),
220 vsz, vsz, data, fn);
222 return true;
225 static bool gen_gvec_ool_arg_zzzz(DisasContext *s, gen_helper_gvec_4 *fn,
226 arg_rrrr_esz *a, int data)
228 return gen_gvec_ool_zzzz(s, fn, a->rd, a->rn, a->rm, a->ra, data);
231 static bool gen_gvec_ool_arg_zzxz(DisasContext *s, gen_helper_gvec_4 *fn,
232 arg_rrxr_esz *a)
234 return gen_gvec_ool_zzzz(s, fn, a->rd, a->rn, a->rm, a->ra, a->index);
237 /* Invoke an out-of-line helper on 4 Zregs, plus a pointer. */
238 static bool gen_gvec_ptr_zzzz(DisasContext *s, gen_helper_gvec_4_ptr *fn,
239 int rd, int rn, int rm, int ra,
240 int data, TCGv_ptr ptr)
242 if (fn == NULL) {
243 return false;
245 if (sve_access_check(s)) {
246 unsigned vsz = vec_full_reg_size(s);
247 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, rd),
248 vec_full_reg_offset(s, rn),
249 vec_full_reg_offset(s, rm),
250 vec_full_reg_offset(s, ra),
251 ptr, vsz, vsz, data, fn);
253 return true;
256 static bool gen_gvec_fpst_zzzz(DisasContext *s, gen_helper_gvec_4_ptr *fn,
257 int rd, int rn, int rm, int ra,
258 int data, ARMFPStatusFlavour flavour)
260 TCGv_ptr status = fpstatus_ptr(flavour);
261 bool ret = gen_gvec_ptr_zzzz(s, fn, rd, rn, rm, ra, data, status);
262 tcg_temp_free_ptr(status);
263 return ret;
266 /* Invoke an out-of-line helper on 2 Zregs and a predicate. */
267 static bool gen_gvec_ool_zzp(DisasContext *s, gen_helper_gvec_3 *fn,
268 int rd, int rn, int pg, int data)
270 if (fn == NULL) {
271 return false;
273 if (sve_access_check(s)) {
274 unsigned vsz = vec_full_reg_size(s);
275 tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd),
276 vec_full_reg_offset(s, rn),
277 pred_full_reg_offset(s, pg),
278 vsz, vsz, data, fn);
280 return true;
283 static bool gen_gvec_ool_arg_zpz(DisasContext *s, gen_helper_gvec_3 *fn,
284 arg_rpr_esz *a, int data)
286 return gen_gvec_ool_zzp(s, fn, a->rd, a->rn, a->pg, data);
289 static bool gen_gvec_ool_arg_zpzi(DisasContext *s, gen_helper_gvec_3 *fn,
290 arg_rpri_esz *a)
292 return gen_gvec_ool_zzp(s, fn, a->rd, a->rn, a->pg, a->imm);
295 /* Invoke an out-of-line helper on 3 Zregs and a predicate. */
296 static bool gen_gvec_ool_zzzp(DisasContext *s, gen_helper_gvec_4 *fn,
297 int rd, int rn, int rm, int pg, int data)
299 if (fn == NULL) {
300 return false;
302 if (sve_access_check(s)) {
303 unsigned vsz = vec_full_reg_size(s);
304 tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd),
305 vec_full_reg_offset(s, rn),
306 vec_full_reg_offset(s, rm),
307 pred_full_reg_offset(s, pg),
308 vsz, vsz, data, fn);
310 return true;
313 static bool gen_gvec_ool_arg_zpzz(DisasContext *s, gen_helper_gvec_4 *fn,
314 arg_rprr_esz *a, int data)
316 return gen_gvec_ool_zzzp(s, fn, a->rd, a->rn, a->rm, a->pg, data);
319 /* Invoke a vector expander on two Zregs and an immediate. */
320 static bool gen_gvec_fn_zzi(DisasContext *s, GVecGen2iFn *gvec_fn,
321 int esz, int rd, int rn, uint64_t imm)
323 if (gvec_fn == NULL) {
324 return false;
326 if (sve_access_check(s)) {
327 unsigned vsz = vec_full_reg_size(s);
328 gvec_fn(esz, vec_full_reg_offset(s, rd),
329 vec_full_reg_offset(s, rn), imm, vsz, vsz);
331 return true;
334 static bool gen_gvec_fn_arg_zzi(DisasContext *s, GVecGen2iFn *gvec_fn,
335 arg_rri_esz *a)
337 if (a->esz < 0) {
338 /* Invalid tsz encoding -- see tszimm_esz. */
339 return false;
341 return gen_gvec_fn_zzi(s, gvec_fn, a->esz, a->rd, a->rn, a->imm);
344 /* Invoke a vector expander on three Zregs. */
345 static bool gen_gvec_fn_zzz(DisasContext *s, GVecGen3Fn *gvec_fn,
346 int esz, int rd, int rn, int rm)
348 if (gvec_fn == NULL) {
349 return false;
351 if (sve_access_check(s)) {
352 unsigned vsz = vec_full_reg_size(s);
353 gvec_fn(esz, vec_full_reg_offset(s, rd),
354 vec_full_reg_offset(s, rn),
355 vec_full_reg_offset(s, rm), vsz, vsz);
357 return true;
360 static bool gen_gvec_fn_arg_zzz(DisasContext *s, GVecGen3Fn *fn,
361 arg_rrr_esz *a)
363 return gen_gvec_fn_zzz(s, fn, a->esz, a->rd, a->rn, a->rm);
366 /* Invoke a vector expander on four Zregs. */
367 static bool gen_gvec_fn_arg_zzzz(DisasContext *s, GVecGen4Fn *gvec_fn,
368 arg_rrrr_esz *a)
370 if (gvec_fn == NULL) {
371 return false;
373 if (sve_access_check(s)) {
374 unsigned vsz = vec_full_reg_size(s);
375 gvec_fn(a->esz, vec_full_reg_offset(s, a->rd),
376 vec_full_reg_offset(s, a->rn),
377 vec_full_reg_offset(s, a->rm),
378 vec_full_reg_offset(s, a->ra), vsz, vsz);
380 return true;
383 /* Invoke a vector move on two Zregs. */
384 static bool do_mov_z(DisasContext *s, int rd, int rn)
386 if (sve_access_check(s)) {
387 unsigned vsz = vec_full_reg_size(s);
388 tcg_gen_gvec_mov(MO_8, vec_full_reg_offset(s, rd),
389 vec_full_reg_offset(s, rn), vsz, vsz);
391 return true;
394 /* Initialize a Zreg with replications of a 64-bit immediate. */
395 static void do_dupi_z(DisasContext *s, int rd, uint64_t word)
397 unsigned vsz = vec_full_reg_size(s);
398 tcg_gen_gvec_dup_imm(MO_64, vec_full_reg_offset(s, rd), vsz, vsz, word);
401 /* Invoke a vector expander on three Pregs. */
402 static bool gen_gvec_fn_ppp(DisasContext *s, GVecGen3Fn *gvec_fn,
403 int rd, int rn, int rm)
405 if (sve_access_check(s)) {
406 unsigned psz = pred_gvec_reg_size(s);
407 gvec_fn(MO_64, pred_full_reg_offset(s, rd),
408 pred_full_reg_offset(s, rn),
409 pred_full_reg_offset(s, rm), psz, psz);
411 return true;
414 /* Invoke a vector move on two Pregs. */
415 static bool do_mov_p(DisasContext *s, int rd, int rn)
417 if (sve_access_check(s)) {
418 unsigned psz = pred_gvec_reg_size(s);
419 tcg_gen_gvec_mov(MO_8, pred_full_reg_offset(s, rd),
420 pred_full_reg_offset(s, rn), psz, psz);
422 return true;
425 /* Set the cpu flags as per a return from an SVE helper. */
426 static void do_pred_flags(TCGv_i32 t)
428 tcg_gen_mov_i32(cpu_NF, t);
429 tcg_gen_andi_i32(cpu_ZF, t, 2);
430 tcg_gen_andi_i32(cpu_CF, t, 1);
431 tcg_gen_movi_i32(cpu_VF, 0);
434 /* Subroutines computing the ARM PredTest psuedofunction. */
435 static void do_predtest1(TCGv_i64 d, TCGv_i64 g)
437 TCGv_i32 t = tcg_temp_new_i32();
439 gen_helper_sve_predtest1(t, d, g);
440 do_pred_flags(t);
441 tcg_temp_free_i32(t);
444 static void do_predtest(DisasContext *s, int dofs, int gofs, int words)
446 TCGv_ptr dptr = tcg_temp_new_ptr();
447 TCGv_ptr gptr = tcg_temp_new_ptr();
448 TCGv_i32 t = tcg_temp_new_i32();
450 tcg_gen_addi_ptr(dptr, cpu_env, dofs);
451 tcg_gen_addi_ptr(gptr, cpu_env, gofs);
453 gen_helper_sve_predtest(t, dptr, gptr, tcg_constant_i32(words));
454 tcg_temp_free_ptr(dptr);
455 tcg_temp_free_ptr(gptr);
457 do_pred_flags(t);
458 tcg_temp_free_i32(t);
461 /* For each element size, the bits within a predicate word that are active. */
462 const uint64_t pred_esz_masks[4] = {
463 0xffffffffffffffffull, 0x5555555555555555ull,
464 0x1111111111111111ull, 0x0101010101010101ull
467 static bool trans_INVALID(DisasContext *s, arg_INVALID *a)
469 unallocated_encoding(s);
470 return true;
474 *** SVE Logical - Unpredicated Group
477 TRANS_FEAT(AND_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_and, a)
478 TRANS_FEAT(ORR_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_or, a)
479 TRANS_FEAT(EOR_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_xor, a)
480 TRANS_FEAT(BIC_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_andc, a)
482 static void gen_xar8_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, int64_t sh)
484 TCGv_i64 t = tcg_temp_new_i64();
485 uint64_t mask = dup_const(MO_8, 0xff >> sh);
487 tcg_gen_xor_i64(t, n, m);
488 tcg_gen_shri_i64(d, t, sh);
489 tcg_gen_shli_i64(t, t, 8 - sh);
490 tcg_gen_andi_i64(d, d, mask);
491 tcg_gen_andi_i64(t, t, ~mask);
492 tcg_gen_or_i64(d, d, t);
493 tcg_temp_free_i64(t);
496 static void gen_xar16_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, int64_t sh)
498 TCGv_i64 t = tcg_temp_new_i64();
499 uint64_t mask = dup_const(MO_16, 0xffff >> sh);
501 tcg_gen_xor_i64(t, n, m);
502 tcg_gen_shri_i64(d, t, sh);
503 tcg_gen_shli_i64(t, t, 16 - sh);
504 tcg_gen_andi_i64(d, d, mask);
505 tcg_gen_andi_i64(t, t, ~mask);
506 tcg_gen_or_i64(d, d, t);
507 tcg_temp_free_i64(t);
510 static void gen_xar_i32(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, int32_t sh)
512 tcg_gen_xor_i32(d, n, m);
513 tcg_gen_rotri_i32(d, d, sh);
516 static void gen_xar_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, int64_t sh)
518 tcg_gen_xor_i64(d, n, m);
519 tcg_gen_rotri_i64(d, d, sh);
522 static void gen_xar_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
523 TCGv_vec m, int64_t sh)
525 tcg_gen_xor_vec(vece, d, n, m);
526 tcg_gen_rotri_vec(vece, d, d, sh);
529 void gen_gvec_xar(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
530 uint32_t rm_ofs, int64_t shift,
531 uint32_t opr_sz, uint32_t max_sz)
533 static const TCGOpcode vecop[] = { INDEX_op_rotli_vec, 0 };
534 static const GVecGen3i ops[4] = {
535 { .fni8 = gen_xar8_i64,
536 .fniv = gen_xar_vec,
537 .fno = gen_helper_sve2_xar_b,
538 .opt_opc = vecop,
539 .vece = MO_8 },
540 { .fni8 = gen_xar16_i64,
541 .fniv = gen_xar_vec,
542 .fno = gen_helper_sve2_xar_h,
543 .opt_opc = vecop,
544 .vece = MO_16 },
545 { .fni4 = gen_xar_i32,
546 .fniv = gen_xar_vec,
547 .fno = gen_helper_sve2_xar_s,
548 .opt_opc = vecop,
549 .vece = MO_32 },
550 { .fni8 = gen_xar_i64,
551 .fniv = gen_xar_vec,
552 .fno = gen_helper_gvec_xar_d,
553 .opt_opc = vecop,
554 .vece = MO_64 }
556 int esize = 8 << vece;
558 /* The SVE2 range is 1 .. esize; the AdvSIMD range is 0 .. esize-1. */
559 tcg_debug_assert(shift >= 0);
560 tcg_debug_assert(shift <= esize);
561 shift &= esize - 1;
563 if (shift == 0) {
564 /* xar with no rotate devolves to xor. */
565 tcg_gen_gvec_xor(vece, rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz);
566 } else {
567 tcg_gen_gvec_3i(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz,
568 shift, &ops[vece]);
572 static bool trans_XAR(DisasContext *s, arg_rrri_esz *a)
574 if (a->esz < 0 || !dc_isar_feature(aa64_sve2, s)) {
575 return false;
577 if (sve_access_check(s)) {
578 unsigned vsz = vec_full_reg_size(s);
579 gen_gvec_xar(a->esz, vec_full_reg_offset(s, a->rd),
580 vec_full_reg_offset(s, a->rn),
581 vec_full_reg_offset(s, a->rm), a->imm, vsz, vsz);
583 return true;
586 static void gen_eor3_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k)
588 tcg_gen_xor_i64(d, n, m);
589 tcg_gen_xor_i64(d, d, k);
592 static void gen_eor3_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
593 TCGv_vec m, TCGv_vec k)
595 tcg_gen_xor_vec(vece, d, n, m);
596 tcg_gen_xor_vec(vece, d, d, k);
599 static void gen_eor3(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
600 uint32_t a, uint32_t oprsz, uint32_t maxsz)
602 static const GVecGen4 op = {
603 .fni8 = gen_eor3_i64,
604 .fniv = gen_eor3_vec,
605 .fno = gen_helper_sve2_eor3,
606 .vece = MO_64,
607 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
609 tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op);
612 TRANS_FEAT(EOR3, aa64_sve2, gen_gvec_fn_arg_zzzz, gen_eor3, a)
614 static void gen_bcax_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k)
616 tcg_gen_andc_i64(d, m, k);
617 tcg_gen_xor_i64(d, d, n);
620 static void gen_bcax_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
621 TCGv_vec m, TCGv_vec k)
623 tcg_gen_andc_vec(vece, d, m, k);
624 tcg_gen_xor_vec(vece, d, d, n);
627 static void gen_bcax(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
628 uint32_t a, uint32_t oprsz, uint32_t maxsz)
630 static const GVecGen4 op = {
631 .fni8 = gen_bcax_i64,
632 .fniv = gen_bcax_vec,
633 .fno = gen_helper_sve2_bcax,
634 .vece = MO_64,
635 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
637 tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op);
640 TRANS_FEAT(BCAX, aa64_sve2, gen_gvec_fn_arg_zzzz, gen_bcax, a)
642 static void gen_bsl(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
643 uint32_t a, uint32_t oprsz, uint32_t maxsz)
645 /* BSL differs from the generic bitsel in argument ordering. */
646 tcg_gen_gvec_bitsel(vece, d, a, n, m, oprsz, maxsz);
649 TRANS_FEAT(BSL, aa64_sve2, gen_gvec_fn_arg_zzzz, gen_bsl, a)
651 static void gen_bsl1n_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k)
653 tcg_gen_andc_i64(n, k, n);
654 tcg_gen_andc_i64(m, m, k);
655 tcg_gen_or_i64(d, n, m);
658 static void gen_bsl1n_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
659 TCGv_vec m, TCGv_vec k)
661 if (TCG_TARGET_HAS_bitsel_vec) {
662 tcg_gen_not_vec(vece, n, n);
663 tcg_gen_bitsel_vec(vece, d, k, n, m);
664 } else {
665 tcg_gen_andc_vec(vece, n, k, n);
666 tcg_gen_andc_vec(vece, m, m, k);
667 tcg_gen_or_vec(vece, d, n, m);
671 static void gen_bsl1n(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
672 uint32_t a, uint32_t oprsz, uint32_t maxsz)
674 static const GVecGen4 op = {
675 .fni8 = gen_bsl1n_i64,
676 .fniv = gen_bsl1n_vec,
677 .fno = gen_helper_sve2_bsl1n,
678 .vece = MO_64,
679 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
681 tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op);
684 TRANS_FEAT(BSL1N, aa64_sve2, gen_gvec_fn_arg_zzzz, gen_bsl1n, a)
686 static void gen_bsl2n_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k)
689 * Z[dn] = (n & k) | (~m & ~k)
690 * = | ~(m | k)
692 tcg_gen_and_i64(n, n, k);
693 if (TCG_TARGET_HAS_orc_i64) {
694 tcg_gen_or_i64(m, m, k);
695 tcg_gen_orc_i64(d, n, m);
696 } else {
697 tcg_gen_nor_i64(m, m, k);
698 tcg_gen_or_i64(d, n, m);
702 static void gen_bsl2n_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
703 TCGv_vec m, TCGv_vec k)
705 if (TCG_TARGET_HAS_bitsel_vec) {
706 tcg_gen_not_vec(vece, m, m);
707 tcg_gen_bitsel_vec(vece, d, k, n, m);
708 } else {
709 tcg_gen_and_vec(vece, n, n, k);
710 tcg_gen_or_vec(vece, m, m, k);
711 tcg_gen_orc_vec(vece, d, n, m);
715 static void gen_bsl2n(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
716 uint32_t a, uint32_t oprsz, uint32_t maxsz)
718 static const GVecGen4 op = {
719 .fni8 = gen_bsl2n_i64,
720 .fniv = gen_bsl2n_vec,
721 .fno = gen_helper_sve2_bsl2n,
722 .vece = MO_64,
723 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
725 tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op);
728 TRANS_FEAT(BSL2N, aa64_sve2, gen_gvec_fn_arg_zzzz, gen_bsl2n, a)
730 static void gen_nbsl_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k)
732 tcg_gen_and_i64(n, n, k);
733 tcg_gen_andc_i64(m, m, k);
734 tcg_gen_nor_i64(d, n, m);
737 static void gen_nbsl_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
738 TCGv_vec m, TCGv_vec k)
740 tcg_gen_bitsel_vec(vece, d, k, n, m);
741 tcg_gen_not_vec(vece, d, d);
744 static void gen_nbsl(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
745 uint32_t a, uint32_t oprsz, uint32_t maxsz)
747 static const GVecGen4 op = {
748 .fni8 = gen_nbsl_i64,
749 .fniv = gen_nbsl_vec,
750 .fno = gen_helper_sve2_nbsl,
751 .vece = MO_64,
752 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
754 tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op);
757 TRANS_FEAT(NBSL, aa64_sve2, gen_gvec_fn_arg_zzzz, gen_nbsl, a)
760 *** SVE Integer Arithmetic - Unpredicated Group
763 TRANS_FEAT(ADD_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_add, a)
764 TRANS_FEAT(SUB_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_sub, a)
765 TRANS_FEAT(SQADD_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_ssadd, a)
766 TRANS_FEAT(SQSUB_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_sssub, a)
767 TRANS_FEAT(UQADD_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_usadd, a)
768 TRANS_FEAT(UQSUB_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_ussub, a)
771 *** SVE Integer Arithmetic - Binary Predicated Group
774 /* Select active elememnts from Zn and inactive elements from Zm,
775 * storing the result in Zd.
777 static bool do_sel_z(DisasContext *s, int rd, int rn, int rm, int pg, int esz)
779 static gen_helper_gvec_4 * const fns[4] = {
780 gen_helper_sve_sel_zpzz_b, gen_helper_sve_sel_zpzz_h,
781 gen_helper_sve_sel_zpzz_s, gen_helper_sve_sel_zpzz_d
783 return gen_gvec_ool_zzzp(s, fns[esz], rd, rn, rm, pg, 0);
786 #define DO_ZPZZ(NAME, FEAT, name) \
787 static gen_helper_gvec_4 * const name##_zpzz_fns[4] = { \
788 gen_helper_##name##_zpzz_b, gen_helper_##name##_zpzz_h, \
789 gen_helper_##name##_zpzz_s, gen_helper_##name##_zpzz_d, \
790 }; \
791 TRANS_FEAT(NAME, FEAT, gen_gvec_ool_arg_zpzz, \
792 name##_zpzz_fns[a->esz], a, 0)
794 DO_ZPZZ(AND_zpzz, aa64_sve, sve_and)
795 DO_ZPZZ(EOR_zpzz, aa64_sve, sve_eor)
796 DO_ZPZZ(ORR_zpzz, aa64_sve, sve_orr)
797 DO_ZPZZ(BIC_zpzz, aa64_sve, sve_bic)
799 DO_ZPZZ(ADD_zpzz, aa64_sve, sve_add)
800 DO_ZPZZ(SUB_zpzz, aa64_sve, sve_sub)
802 DO_ZPZZ(SMAX_zpzz, aa64_sve, sve_smax)
803 DO_ZPZZ(UMAX_zpzz, aa64_sve, sve_umax)
804 DO_ZPZZ(SMIN_zpzz, aa64_sve, sve_smin)
805 DO_ZPZZ(UMIN_zpzz, aa64_sve, sve_umin)
806 DO_ZPZZ(SABD_zpzz, aa64_sve, sve_sabd)
807 DO_ZPZZ(UABD_zpzz, aa64_sve, sve_uabd)
809 DO_ZPZZ(MUL_zpzz, aa64_sve, sve_mul)
810 DO_ZPZZ(SMULH_zpzz, aa64_sve, sve_smulh)
811 DO_ZPZZ(UMULH_zpzz, aa64_sve, sve_umulh)
813 DO_ZPZZ(ASR_zpzz, aa64_sve, sve_asr)
814 DO_ZPZZ(LSR_zpzz, aa64_sve, sve_lsr)
815 DO_ZPZZ(LSL_zpzz, aa64_sve, sve_lsl)
817 static gen_helper_gvec_4 * const sdiv_fns[4] = {
818 NULL, NULL, gen_helper_sve_sdiv_zpzz_s, gen_helper_sve_sdiv_zpzz_d
820 TRANS_FEAT(SDIV_zpzz, aa64_sve, gen_gvec_ool_arg_zpzz, sdiv_fns[a->esz], a, 0)
822 static gen_helper_gvec_4 * const udiv_fns[4] = {
823 NULL, NULL, gen_helper_sve_udiv_zpzz_s, gen_helper_sve_udiv_zpzz_d
825 TRANS_FEAT(UDIV_zpzz, aa64_sve, gen_gvec_ool_arg_zpzz, udiv_fns[a->esz], a, 0)
827 TRANS_FEAT(SEL_zpzz, aa64_sve, do_sel_z, a->rd, a->rn, a->rm, a->pg, a->esz)
830 *** SVE Integer Arithmetic - Unary Predicated Group
833 #define DO_ZPZ(NAME, FEAT, name) \
834 static gen_helper_gvec_3 * const name##_fns[4] = { \
835 gen_helper_##name##_b, gen_helper_##name##_h, \
836 gen_helper_##name##_s, gen_helper_##name##_d, \
837 }; \
838 TRANS_FEAT(NAME, FEAT, gen_gvec_ool_arg_zpz, name##_fns[a->esz], a, 0)
840 DO_ZPZ(CLS, aa64_sve, sve_cls)
841 DO_ZPZ(CLZ, aa64_sve, sve_clz)
842 DO_ZPZ(CNT_zpz, aa64_sve, sve_cnt_zpz)
843 DO_ZPZ(CNOT, aa64_sve, sve_cnot)
844 DO_ZPZ(NOT_zpz, aa64_sve, sve_not_zpz)
845 DO_ZPZ(ABS, aa64_sve, sve_abs)
846 DO_ZPZ(NEG, aa64_sve, sve_neg)
847 DO_ZPZ(RBIT, aa64_sve, sve_rbit)
849 static gen_helper_gvec_3 * const fabs_fns[4] = {
850 NULL, gen_helper_sve_fabs_h,
851 gen_helper_sve_fabs_s, gen_helper_sve_fabs_d,
853 TRANS_FEAT(FABS, aa64_sve, gen_gvec_ool_arg_zpz, fabs_fns[a->esz], a, 0)
855 static gen_helper_gvec_3 * const fneg_fns[4] = {
856 NULL, gen_helper_sve_fneg_h,
857 gen_helper_sve_fneg_s, gen_helper_sve_fneg_d,
859 TRANS_FEAT(FNEG, aa64_sve, gen_gvec_ool_arg_zpz, fneg_fns[a->esz], a, 0)
861 static gen_helper_gvec_3 * const sxtb_fns[4] = {
862 NULL, gen_helper_sve_sxtb_h,
863 gen_helper_sve_sxtb_s, gen_helper_sve_sxtb_d,
865 TRANS_FEAT(SXTB, aa64_sve, gen_gvec_ool_arg_zpz, sxtb_fns[a->esz], a, 0)
867 static gen_helper_gvec_3 * const uxtb_fns[4] = {
868 NULL, gen_helper_sve_uxtb_h,
869 gen_helper_sve_uxtb_s, gen_helper_sve_uxtb_d,
871 TRANS_FEAT(UXTB, aa64_sve, gen_gvec_ool_arg_zpz, uxtb_fns[a->esz], a, 0)
873 static gen_helper_gvec_3 * const sxth_fns[4] = {
874 NULL, NULL, gen_helper_sve_sxth_s, gen_helper_sve_sxth_d
876 TRANS_FEAT(SXTH, aa64_sve, gen_gvec_ool_arg_zpz, sxth_fns[a->esz], a, 0)
878 static gen_helper_gvec_3 * const uxth_fns[4] = {
879 NULL, NULL, gen_helper_sve_uxth_s, gen_helper_sve_uxth_d
881 TRANS_FEAT(UXTH, aa64_sve, gen_gvec_ool_arg_zpz, uxth_fns[a->esz], a, 0)
883 TRANS_FEAT(SXTW, aa64_sve, gen_gvec_ool_arg_zpz,
884 a->esz == 3 ? gen_helper_sve_sxtw_d : NULL, a, 0)
885 TRANS_FEAT(UXTW, aa64_sve, gen_gvec_ool_arg_zpz,
886 a->esz == 3 ? gen_helper_sve_uxtw_d : NULL, a, 0)
889 *** SVE Integer Reduction Group
892 typedef void gen_helper_gvec_reduc(TCGv_i64, TCGv_ptr, TCGv_ptr, TCGv_i32);
893 static bool do_vpz_ool(DisasContext *s, arg_rpr_esz *a,
894 gen_helper_gvec_reduc *fn)
896 unsigned vsz = vec_full_reg_size(s);
897 TCGv_ptr t_zn, t_pg;
898 TCGv_i32 desc;
899 TCGv_i64 temp;
901 if (fn == NULL) {
902 return false;
904 if (!sve_access_check(s)) {
905 return true;
908 desc = tcg_constant_i32(simd_desc(vsz, vsz, 0));
909 temp = tcg_temp_new_i64();
910 t_zn = tcg_temp_new_ptr();
911 t_pg = tcg_temp_new_ptr();
913 tcg_gen_addi_ptr(t_zn, cpu_env, vec_full_reg_offset(s, a->rn));
914 tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, a->pg));
915 fn(temp, t_zn, t_pg, desc);
916 tcg_temp_free_ptr(t_zn);
917 tcg_temp_free_ptr(t_pg);
919 write_fp_dreg(s, a->rd, temp);
920 tcg_temp_free_i64(temp);
921 return true;
924 #define DO_VPZ(NAME, name) \
925 static gen_helper_gvec_reduc * const name##_fns[4] = { \
926 gen_helper_sve_##name##_b, gen_helper_sve_##name##_h, \
927 gen_helper_sve_##name##_s, gen_helper_sve_##name##_d, \
928 }; \
929 TRANS_FEAT(NAME, aa64_sve, do_vpz_ool, a, name##_fns[a->esz])
931 DO_VPZ(ORV, orv)
932 DO_VPZ(ANDV, andv)
933 DO_VPZ(EORV, eorv)
935 DO_VPZ(UADDV, uaddv)
936 DO_VPZ(SMAXV, smaxv)
937 DO_VPZ(UMAXV, umaxv)
938 DO_VPZ(SMINV, sminv)
939 DO_VPZ(UMINV, uminv)
941 static gen_helper_gvec_reduc * const saddv_fns[4] = {
942 gen_helper_sve_saddv_b, gen_helper_sve_saddv_h,
943 gen_helper_sve_saddv_s, NULL
945 TRANS_FEAT(SADDV, aa64_sve, do_vpz_ool, a, saddv_fns[a->esz])
947 #undef DO_VPZ
950 *** SVE Shift by Immediate - Predicated Group
954 * Copy Zn into Zd, storing zeros into inactive elements.
955 * If invert, store zeros into the active elements.
957 static bool do_movz_zpz(DisasContext *s, int rd, int rn, int pg,
958 int esz, bool invert)
960 static gen_helper_gvec_3 * const fns[4] = {
961 gen_helper_sve_movz_b, gen_helper_sve_movz_h,
962 gen_helper_sve_movz_s, gen_helper_sve_movz_d,
964 return gen_gvec_ool_zzp(s, fns[esz], rd, rn, pg, invert);
967 static bool do_shift_zpzi(DisasContext *s, arg_rpri_esz *a, bool asr,
968 gen_helper_gvec_3 * const fns[4])
970 int max;
972 if (a->esz < 0) {
973 /* Invalid tsz encoding -- see tszimm_esz. */
974 return false;
978 * Shift by element size is architecturally valid.
979 * For arithmetic right-shift, it's the same as by one less.
980 * For logical shifts and ASRD, it is a zeroing operation.
982 max = 8 << a->esz;
983 if (a->imm >= max) {
984 if (asr) {
985 a->imm = max - 1;
986 } else {
987 return do_movz_zpz(s, a->rd, a->rd, a->pg, a->esz, true);
990 return gen_gvec_ool_arg_zpzi(s, fns[a->esz], a);
993 static gen_helper_gvec_3 * const asr_zpzi_fns[4] = {
994 gen_helper_sve_asr_zpzi_b, gen_helper_sve_asr_zpzi_h,
995 gen_helper_sve_asr_zpzi_s, gen_helper_sve_asr_zpzi_d,
997 TRANS_FEAT(ASR_zpzi, aa64_sve, do_shift_zpzi, a, true, asr_zpzi_fns)
999 static gen_helper_gvec_3 * const lsr_zpzi_fns[4] = {
1000 gen_helper_sve_lsr_zpzi_b, gen_helper_sve_lsr_zpzi_h,
1001 gen_helper_sve_lsr_zpzi_s, gen_helper_sve_lsr_zpzi_d,
1003 TRANS_FEAT(LSR_zpzi, aa64_sve, do_shift_zpzi, a, false, lsr_zpzi_fns)
1005 static gen_helper_gvec_3 * const lsl_zpzi_fns[4] = {
1006 gen_helper_sve_lsl_zpzi_b, gen_helper_sve_lsl_zpzi_h,
1007 gen_helper_sve_lsl_zpzi_s, gen_helper_sve_lsl_zpzi_d,
1009 TRANS_FEAT(LSL_zpzi, aa64_sve, do_shift_zpzi, a, false, lsl_zpzi_fns)
1011 static gen_helper_gvec_3 * const asrd_fns[4] = {
1012 gen_helper_sve_asrd_b, gen_helper_sve_asrd_h,
1013 gen_helper_sve_asrd_s, gen_helper_sve_asrd_d,
1015 TRANS_FEAT(ASRD, aa64_sve, do_shift_zpzi, a, false, asrd_fns)
1017 static gen_helper_gvec_3 * const sqshl_zpzi_fns[4] = {
1018 gen_helper_sve2_sqshl_zpzi_b, gen_helper_sve2_sqshl_zpzi_h,
1019 gen_helper_sve2_sqshl_zpzi_s, gen_helper_sve2_sqshl_zpzi_d,
1021 TRANS_FEAT(SQSHL_zpzi, aa64_sve2, gen_gvec_ool_arg_zpzi,
1022 a->esz < 0 ? NULL : sqshl_zpzi_fns[a->esz], a)
1024 static gen_helper_gvec_3 * const uqshl_zpzi_fns[4] = {
1025 gen_helper_sve2_uqshl_zpzi_b, gen_helper_sve2_uqshl_zpzi_h,
1026 gen_helper_sve2_uqshl_zpzi_s, gen_helper_sve2_uqshl_zpzi_d,
1028 TRANS_FEAT(UQSHL_zpzi, aa64_sve2, gen_gvec_ool_arg_zpzi,
1029 a->esz < 0 ? NULL : uqshl_zpzi_fns[a->esz], a)
1031 static gen_helper_gvec_3 * const srshr_fns[4] = {
1032 gen_helper_sve2_srshr_b, gen_helper_sve2_srshr_h,
1033 gen_helper_sve2_srshr_s, gen_helper_sve2_srshr_d,
1035 TRANS_FEAT(SRSHR, aa64_sve2, gen_gvec_ool_arg_zpzi,
1036 a->esz < 0 ? NULL : srshr_fns[a->esz], a)
1038 static gen_helper_gvec_3 * const urshr_fns[4] = {
1039 gen_helper_sve2_urshr_b, gen_helper_sve2_urshr_h,
1040 gen_helper_sve2_urshr_s, gen_helper_sve2_urshr_d,
1042 TRANS_FEAT(URSHR, aa64_sve2, gen_gvec_ool_arg_zpzi,
1043 a->esz < 0 ? NULL : urshr_fns[a->esz], a)
1045 static gen_helper_gvec_3 * const sqshlu_fns[4] = {
1046 gen_helper_sve2_sqshlu_b, gen_helper_sve2_sqshlu_h,
1047 gen_helper_sve2_sqshlu_s, gen_helper_sve2_sqshlu_d,
1049 TRANS_FEAT(SQSHLU, aa64_sve2, gen_gvec_ool_arg_zpzi,
1050 a->esz < 0 ? NULL : sqshlu_fns[a->esz], a)
1053 *** SVE Bitwise Shift - Predicated Group
1056 #define DO_ZPZW(NAME, name) \
1057 static gen_helper_gvec_4 * const name##_zpzw_fns[4] = { \
1058 gen_helper_sve_##name##_zpzw_b, gen_helper_sve_##name##_zpzw_h, \
1059 gen_helper_sve_##name##_zpzw_s, NULL \
1060 }; \
1061 TRANS_FEAT(NAME##_zpzw, aa64_sve, gen_gvec_ool_arg_zpzz, \
1062 a->esz < 0 ? NULL : name##_zpzw_fns[a->esz], a, 0)
1064 DO_ZPZW(ASR, asr)
1065 DO_ZPZW(LSR, lsr)
1066 DO_ZPZW(LSL, lsl)
1068 #undef DO_ZPZW
1071 *** SVE Bitwise Shift - Unpredicated Group
1074 static bool do_shift_imm(DisasContext *s, arg_rri_esz *a, bool asr,
1075 void (*gvec_fn)(unsigned, uint32_t, uint32_t,
1076 int64_t, uint32_t, uint32_t))
1078 if (a->esz < 0) {
1079 /* Invalid tsz encoding -- see tszimm_esz. */
1080 return false;
1082 if (sve_access_check(s)) {
1083 unsigned vsz = vec_full_reg_size(s);
1084 /* Shift by element size is architecturally valid. For
1085 arithmetic right-shift, it's the same as by one less.
1086 Otherwise it is a zeroing operation. */
1087 if (a->imm >= 8 << a->esz) {
1088 if (asr) {
1089 a->imm = (8 << a->esz) - 1;
1090 } else {
1091 do_dupi_z(s, a->rd, 0);
1092 return true;
1095 gvec_fn(a->esz, vec_full_reg_offset(s, a->rd),
1096 vec_full_reg_offset(s, a->rn), a->imm, vsz, vsz);
1098 return true;
1101 TRANS_FEAT(ASR_zzi, aa64_sve, do_shift_imm, a, true, tcg_gen_gvec_sari)
1102 TRANS_FEAT(LSR_zzi, aa64_sve, do_shift_imm, a, false, tcg_gen_gvec_shri)
1103 TRANS_FEAT(LSL_zzi, aa64_sve, do_shift_imm, a, false, tcg_gen_gvec_shli)
1105 #define DO_ZZW(NAME, name) \
1106 static gen_helper_gvec_3 * const name##_zzw_fns[4] = { \
1107 gen_helper_sve_##name##_zzw_b, gen_helper_sve_##name##_zzw_h, \
1108 gen_helper_sve_##name##_zzw_s, NULL \
1109 }; \
1110 TRANS_FEAT(NAME, aa64_sve, gen_gvec_ool_arg_zzz, \
1111 name##_zzw_fns[a->esz], a, 0)
1113 DO_ZZW(ASR_zzw, asr)
1114 DO_ZZW(LSR_zzw, lsr)
1115 DO_ZZW(LSL_zzw, lsl)
1117 #undef DO_ZZW
1120 *** SVE Integer Multiply-Add Group
1123 static bool do_zpzzz_ool(DisasContext *s, arg_rprrr_esz *a,
1124 gen_helper_gvec_5 *fn)
1126 if (sve_access_check(s)) {
1127 unsigned vsz = vec_full_reg_size(s);
1128 tcg_gen_gvec_5_ool(vec_full_reg_offset(s, a->rd),
1129 vec_full_reg_offset(s, a->ra),
1130 vec_full_reg_offset(s, a->rn),
1131 vec_full_reg_offset(s, a->rm),
1132 pred_full_reg_offset(s, a->pg),
1133 vsz, vsz, 0, fn);
1135 return true;
1138 static gen_helper_gvec_5 * const mla_fns[4] = {
1139 gen_helper_sve_mla_b, gen_helper_sve_mla_h,
1140 gen_helper_sve_mla_s, gen_helper_sve_mla_d,
1142 TRANS_FEAT(MLA, aa64_sve, do_zpzzz_ool, a, mla_fns[a->esz])
1144 static gen_helper_gvec_5 * const mls_fns[4] = {
1145 gen_helper_sve_mls_b, gen_helper_sve_mls_h,
1146 gen_helper_sve_mls_s, gen_helper_sve_mls_d,
1148 TRANS_FEAT(MLS, aa64_sve, do_zpzzz_ool, a, mls_fns[a->esz])
1151 *** SVE Index Generation Group
1154 static bool do_index(DisasContext *s, int esz, int rd,
1155 TCGv_i64 start, TCGv_i64 incr)
1157 unsigned vsz;
1158 TCGv_i32 desc;
1159 TCGv_ptr t_zd;
1161 if (!sve_access_check(s)) {
1162 return true;
1165 vsz = vec_full_reg_size(s);
1166 desc = tcg_constant_i32(simd_desc(vsz, vsz, 0));
1167 t_zd = tcg_temp_new_ptr();
1169 tcg_gen_addi_ptr(t_zd, cpu_env, vec_full_reg_offset(s, rd));
1170 if (esz == 3) {
1171 gen_helper_sve_index_d(t_zd, start, incr, desc);
1172 } else {
1173 typedef void index_fn(TCGv_ptr, TCGv_i32, TCGv_i32, TCGv_i32);
1174 static index_fn * const fns[3] = {
1175 gen_helper_sve_index_b,
1176 gen_helper_sve_index_h,
1177 gen_helper_sve_index_s,
1179 TCGv_i32 s32 = tcg_temp_new_i32();
1180 TCGv_i32 i32 = tcg_temp_new_i32();
1182 tcg_gen_extrl_i64_i32(s32, start);
1183 tcg_gen_extrl_i64_i32(i32, incr);
1184 fns[esz](t_zd, s32, i32, desc);
1186 tcg_temp_free_i32(s32);
1187 tcg_temp_free_i32(i32);
1189 tcg_temp_free_ptr(t_zd);
1190 return true;
1193 TRANS_FEAT(INDEX_ii, aa64_sve, do_index, a->esz, a->rd,
1194 tcg_constant_i64(a->imm1), tcg_constant_i64(a->imm2))
1195 TRANS_FEAT(INDEX_ir, aa64_sve, do_index, a->esz, a->rd,
1196 tcg_constant_i64(a->imm), cpu_reg(s, a->rm))
1197 TRANS_FEAT(INDEX_ri, aa64_sve, do_index, a->esz, a->rd,
1198 cpu_reg(s, a->rn), tcg_constant_i64(a->imm))
1199 TRANS_FEAT(INDEX_rr, aa64_sve, do_index, a->esz, a->rd,
1200 cpu_reg(s, a->rn), cpu_reg(s, a->rm))
1203 *** SVE Stack Allocation Group
1206 static bool trans_ADDVL(DisasContext *s, arg_ADDVL *a)
1208 if (sve_access_check(s)) {
1209 TCGv_i64 rd = cpu_reg_sp(s, a->rd);
1210 TCGv_i64 rn = cpu_reg_sp(s, a->rn);
1211 tcg_gen_addi_i64(rd, rn, a->imm * vec_full_reg_size(s));
1213 return true;
1216 static bool trans_ADDPL(DisasContext *s, arg_ADDPL *a)
1218 if (sve_access_check(s)) {
1219 TCGv_i64 rd = cpu_reg_sp(s, a->rd);
1220 TCGv_i64 rn = cpu_reg_sp(s, a->rn);
1221 tcg_gen_addi_i64(rd, rn, a->imm * pred_full_reg_size(s));
1223 return true;
1226 static bool trans_RDVL(DisasContext *s, arg_RDVL *a)
1228 if (sve_access_check(s)) {
1229 TCGv_i64 reg = cpu_reg(s, a->rd);
1230 tcg_gen_movi_i64(reg, a->imm * vec_full_reg_size(s));
1232 return true;
1236 *** SVE Compute Vector Address Group
1239 static bool do_adr(DisasContext *s, arg_rrri *a, gen_helper_gvec_3 *fn)
1241 return gen_gvec_ool_zzz(s, fn, a->rd, a->rn, a->rm, a->imm);
1244 TRANS_FEAT(ADR_p32, aa64_sve, do_adr, a, gen_helper_sve_adr_p32)
1245 TRANS_FEAT(ADR_p64, aa64_sve, do_adr, a, gen_helper_sve_adr_p64)
1246 TRANS_FEAT(ADR_s32, aa64_sve, do_adr, a, gen_helper_sve_adr_s32)
1247 TRANS_FEAT(ADR_u32, aa64_sve, do_adr, a, gen_helper_sve_adr_u32)
1250 *** SVE Integer Misc - Unpredicated Group
1253 static gen_helper_gvec_2 * const fexpa_fns[4] = {
1254 NULL, gen_helper_sve_fexpa_h,
1255 gen_helper_sve_fexpa_s, gen_helper_sve_fexpa_d,
1257 TRANS_FEAT(FEXPA, aa64_sve, gen_gvec_ool_zz,
1258 fexpa_fns[a->esz], a->rd, a->rn, 0)
1260 static gen_helper_gvec_3 * const ftssel_fns[4] = {
1261 NULL, gen_helper_sve_ftssel_h,
1262 gen_helper_sve_ftssel_s, gen_helper_sve_ftssel_d,
1264 TRANS_FEAT(FTSSEL, aa64_sve, gen_gvec_ool_arg_zzz, ftssel_fns[a->esz], a, 0)
1267 *** SVE Predicate Logical Operations Group
1270 static bool do_pppp_flags(DisasContext *s, arg_rprr_s *a,
1271 const GVecGen4 *gvec_op)
1273 if (!sve_access_check(s)) {
1274 return true;
1277 unsigned psz = pred_gvec_reg_size(s);
1278 int dofs = pred_full_reg_offset(s, a->rd);
1279 int nofs = pred_full_reg_offset(s, a->rn);
1280 int mofs = pred_full_reg_offset(s, a->rm);
1281 int gofs = pred_full_reg_offset(s, a->pg);
1283 if (!a->s) {
1284 tcg_gen_gvec_4(dofs, nofs, mofs, gofs, psz, psz, gvec_op);
1285 return true;
1288 if (psz == 8) {
1289 /* Do the operation and the flags generation in temps. */
1290 TCGv_i64 pd = tcg_temp_new_i64();
1291 TCGv_i64 pn = tcg_temp_new_i64();
1292 TCGv_i64 pm = tcg_temp_new_i64();
1293 TCGv_i64 pg = tcg_temp_new_i64();
1295 tcg_gen_ld_i64(pn, cpu_env, nofs);
1296 tcg_gen_ld_i64(pm, cpu_env, mofs);
1297 tcg_gen_ld_i64(pg, cpu_env, gofs);
1299 gvec_op->fni8(pd, pn, pm, pg);
1300 tcg_gen_st_i64(pd, cpu_env, dofs);
1302 do_predtest1(pd, pg);
1304 tcg_temp_free_i64(pd);
1305 tcg_temp_free_i64(pn);
1306 tcg_temp_free_i64(pm);
1307 tcg_temp_free_i64(pg);
1308 } else {
1309 /* The operation and flags generation is large. The computation
1310 * of the flags depends on the original contents of the guarding
1311 * predicate. If the destination overwrites the guarding predicate,
1312 * then the easiest way to get this right is to save a copy.
1314 int tofs = gofs;
1315 if (a->rd == a->pg) {
1316 tofs = offsetof(CPUARMState, vfp.preg_tmp);
1317 tcg_gen_gvec_mov(0, tofs, gofs, psz, psz);
1320 tcg_gen_gvec_4(dofs, nofs, mofs, gofs, psz, psz, gvec_op);
1321 do_predtest(s, dofs, tofs, psz / 8);
1323 return true;
1326 static void gen_and_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg)
1328 tcg_gen_and_i64(pd, pn, pm);
1329 tcg_gen_and_i64(pd, pd, pg);
1332 static void gen_and_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn,
1333 TCGv_vec pm, TCGv_vec pg)
1335 tcg_gen_and_vec(vece, pd, pn, pm);
1336 tcg_gen_and_vec(vece, pd, pd, pg);
1339 static bool trans_AND_pppp(DisasContext *s, arg_rprr_s *a)
1341 static const GVecGen4 op = {
1342 .fni8 = gen_and_pg_i64,
1343 .fniv = gen_and_pg_vec,
1344 .fno = gen_helper_sve_and_pppp,
1345 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
1348 if (!a->s) {
1349 if (a->rn == a->rm) {
1350 if (a->pg == a->rn) {
1351 return do_mov_p(s, a->rd, a->rn);
1353 return gen_gvec_fn_ppp(s, tcg_gen_gvec_and, a->rd, a->rn, a->pg);
1354 } else if (a->pg == a->rn || a->pg == a->rm) {
1355 return gen_gvec_fn_ppp(s, tcg_gen_gvec_and, a->rd, a->rn, a->rm);
1358 return do_pppp_flags(s, a, &op);
1361 static void gen_bic_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg)
1363 tcg_gen_andc_i64(pd, pn, pm);
1364 tcg_gen_and_i64(pd, pd, pg);
1367 static void gen_bic_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn,
1368 TCGv_vec pm, TCGv_vec pg)
1370 tcg_gen_andc_vec(vece, pd, pn, pm);
1371 tcg_gen_and_vec(vece, pd, pd, pg);
1374 static bool trans_BIC_pppp(DisasContext *s, arg_rprr_s *a)
1376 static const GVecGen4 op = {
1377 .fni8 = gen_bic_pg_i64,
1378 .fniv = gen_bic_pg_vec,
1379 .fno = gen_helper_sve_bic_pppp,
1380 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
1383 if (!a->s && a->pg == a->rn) {
1384 return gen_gvec_fn_ppp(s, tcg_gen_gvec_andc, a->rd, a->rn, a->rm);
1386 return do_pppp_flags(s, a, &op);
1389 static void gen_eor_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg)
1391 tcg_gen_xor_i64(pd, pn, pm);
1392 tcg_gen_and_i64(pd, pd, pg);
1395 static void gen_eor_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn,
1396 TCGv_vec pm, TCGv_vec pg)
1398 tcg_gen_xor_vec(vece, pd, pn, pm);
1399 tcg_gen_and_vec(vece, pd, pd, pg);
1402 static bool trans_EOR_pppp(DisasContext *s, arg_rprr_s *a)
1404 static const GVecGen4 op = {
1405 .fni8 = gen_eor_pg_i64,
1406 .fniv = gen_eor_pg_vec,
1407 .fno = gen_helper_sve_eor_pppp,
1408 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
1411 /* Alias NOT (predicate) is EOR Pd.B, Pg/Z, Pn.B, Pg.B */
1412 if (!a->s && a->pg == a->rm) {
1413 return gen_gvec_fn_ppp(s, tcg_gen_gvec_andc, a->rd, a->pg, a->rn);
1415 return do_pppp_flags(s, a, &op);
1418 static bool trans_SEL_pppp(DisasContext *s, arg_rprr_s *a)
1420 if (a->s) {
1421 return false;
1423 if (sve_access_check(s)) {
1424 unsigned psz = pred_gvec_reg_size(s);
1425 tcg_gen_gvec_bitsel(MO_8, pred_full_reg_offset(s, a->rd),
1426 pred_full_reg_offset(s, a->pg),
1427 pred_full_reg_offset(s, a->rn),
1428 pred_full_reg_offset(s, a->rm), psz, psz);
1430 return true;
1433 static void gen_orr_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg)
1435 tcg_gen_or_i64(pd, pn, pm);
1436 tcg_gen_and_i64(pd, pd, pg);
1439 static void gen_orr_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn,
1440 TCGv_vec pm, TCGv_vec pg)
1442 tcg_gen_or_vec(vece, pd, pn, pm);
1443 tcg_gen_and_vec(vece, pd, pd, pg);
1446 static bool trans_ORR_pppp(DisasContext *s, arg_rprr_s *a)
1448 static const GVecGen4 op = {
1449 .fni8 = gen_orr_pg_i64,
1450 .fniv = gen_orr_pg_vec,
1451 .fno = gen_helper_sve_orr_pppp,
1452 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
1455 if (!a->s && a->pg == a->rn && a->rn == a->rm) {
1456 return do_mov_p(s, a->rd, a->rn);
1458 return do_pppp_flags(s, a, &op);
1461 static void gen_orn_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg)
1463 tcg_gen_orc_i64(pd, pn, pm);
1464 tcg_gen_and_i64(pd, pd, pg);
1467 static void gen_orn_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn,
1468 TCGv_vec pm, TCGv_vec pg)
1470 tcg_gen_orc_vec(vece, pd, pn, pm);
1471 tcg_gen_and_vec(vece, pd, pd, pg);
1474 static bool trans_ORN_pppp(DisasContext *s, arg_rprr_s *a)
1476 static const GVecGen4 op = {
1477 .fni8 = gen_orn_pg_i64,
1478 .fniv = gen_orn_pg_vec,
1479 .fno = gen_helper_sve_orn_pppp,
1480 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
1482 return do_pppp_flags(s, a, &op);
1485 static void gen_nor_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg)
1487 tcg_gen_or_i64(pd, pn, pm);
1488 tcg_gen_andc_i64(pd, pg, pd);
1491 static void gen_nor_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn,
1492 TCGv_vec pm, TCGv_vec pg)
1494 tcg_gen_or_vec(vece, pd, pn, pm);
1495 tcg_gen_andc_vec(vece, pd, pg, pd);
1498 static bool trans_NOR_pppp(DisasContext *s, arg_rprr_s *a)
1500 static const GVecGen4 op = {
1501 .fni8 = gen_nor_pg_i64,
1502 .fniv = gen_nor_pg_vec,
1503 .fno = gen_helper_sve_nor_pppp,
1504 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
1506 return do_pppp_flags(s, a, &op);
1509 static void gen_nand_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg)
1511 tcg_gen_and_i64(pd, pn, pm);
1512 tcg_gen_andc_i64(pd, pg, pd);
1515 static void gen_nand_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn,
1516 TCGv_vec pm, TCGv_vec pg)
1518 tcg_gen_and_vec(vece, pd, pn, pm);
1519 tcg_gen_andc_vec(vece, pd, pg, pd);
1522 static bool trans_NAND_pppp(DisasContext *s, arg_rprr_s *a)
1524 static const GVecGen4 op = {
1525 .fni8 = gen_nand_pg_i64,
1526 .fniv = gen_nand_pg_vec,
1527 .fno = gen_helper_sve_nand_pppp,
1528 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
1530 return do_pppp_flags(s, a, &op);
1534 *** SVE Predicate Misc Group
1537 static bool trans_PTEST(DisasContext *s, arg_PTEST *a)
1539 if (sve_access_check(s)) {
1540 int nofs = pred_full_reg_offset(s, a->rn);
1541 int gofs = pred_full_reg_offset(s, a->pg);
1542 int words = DIV_ROUND_UP(pred_full_reg_size(s), 8);
1544 if (words == 1) {
1545 TCGv_i64 pn = tcg_temp_new_i64();
1546 TCGv_i64 pg = tcg_temp_new_i64();
1548 tcg_gen_ld_i64(pn, cpu_env, nofs);
1549 tcg_gen_ld_i64(pg, cpu_env, gofs);
1550 do_predtest1(pn, pg);
1552 tcg_temp_free_i64(pn);
1553 tcg_temp_free_i64(pg);
1554 } else {
1555 do_predtest(s, nofs, gofs, words);
1558 return true;
1561 /* See the ARM pseudocode DecodePredCount. */
1562 static unsigned decode_pred_count(unsigned fullsz, int pattern, int esz)
1564 unsigned elements = fullsz >> esz;
1565 unsigned bound;
1567 switch (pattern) {
1568 case 0x0: /* POW2 */
1569 return pow2floor(elements);
1570 case 0x1: /* VL1 */
1571 case 0x2: /* VL2 */
1572 case 0x3: /* VL3 */
1573 case 0x4: /* VL4 */
1574 case 0x5: /* VL5 */
1575 case 0x6: /* VL6 */
1576 case 0x7: /* VL7 */
1577 case 0x8: /* VL8 */
1578 bound = pattern;
1579 break;
1580 case 0x9: /* VL16 */
1581 case 0xa: /* VL32 */
1582 case 0xb: /* VL64 */
1583 case 0xc: /* VL128 */
1584 case 0xd: /* VL256 */
1585 bound = 16 << (pattern - 9);
1586 break;
1587 case 0x1d: /* MUL4 */
1588 return elements - elements % 4;
1589 case 0x1e: /* MUL3 */
1590 return elements - elements % 3;
1591 case 0x1f: /* ALL */
1592 return elements;
1593 default: /* #uimm5 */
1594 return 0;
1596 return elements >= bound ? bound : 0;
1599 /* This handles all of the predicate initialization instructions,
1600 * PTRUE, PFALSE, SETFFR. For PFALSE, we will have set PAT == 32
1601 * so that decode_pred_count returns 0. For SETFFR, we will have
1602 * set RD == 16 == FFR.
1604 static bool do_predset(DisasContext *s, int esz, int rd, int pat, bool setflag)
1606 if (!sve_access_check(s)) {
1607 return true;
1610 unsigned fullsz = vec_full_reg_size(s);
1611 unsigned ofs = pred_full_reg_offset(s, rd);
1612 unsigned numelem, setsz, i;
1613 uint64_t word, lastword;
1614 TCGv_i64 t;
1616 numelem = decode_pred_count(fullsz, pat, esz);
1618 /* Determine what we must store into each bit, and how many. */
1619 if (numelem == 0) {
1620 lastword = word = 0;
1621 setsz = fullsz;
1622 } else {
1623 setsz = numelem << esz;
1624 lastword = word = pred_esz_masks[esz];
1625 if (setsz % 64) {
1626 lastword &= MAKE_64BIT_MASK(0, setsz % 64);
1630 t = tcg_temp_new_i64();
1631 if (fullsz <= 64) {
1632 tcg_gen_movi_i64(t, lastword);
1633 tcg_gen_st_i64(t, cpu_env, ofs);
1634 goto done;
1637 if (word == lastword) {
1638 unsigned maxsz = size_for_gvec(fullsz / 8);
1639 unsigned oprsz = size_for_gvec(setsz / 8);
1641 if (oprsz * 8 == setsz) {
1642 tcg_gen_gvec_dup_imm(MO_64, ofs, oprsz, maxsz, word);
1643 goto done;
1647 setsz /= 8;
1648 fullsz /= 8;
1650 tcg_gen_movi_i64(t, word);
1651 for (i = 0; i < QEMU_ALIGN_DOWN(setsz, 8); i += 8) {
1652 tcg_gen_st_i64(t, cpu_env, ofs + i);
1654 if (lastword != word) {
1655 tcg_gen_movi_i64(t, lastword);
1656 tcg_gen_st_i64(t, cpu_env, ofs + i);
1657 i += 8;
1659 if (i < fullsz) {
1660 tcg_gen_movi_i64(t, 0);
1661 for (; i < fullsz; i += 8) {
1662 tcg_gen_st_i64(t, cpu_env, ofs + i);
1666 done:
1667 tcg_temp_free_i64(t);
1669 /* PTRUES */
1670 if (setflag) {
1671 tcg_gen_movi_i32(cpu_NF, -(word != 0));
1672 tcg_gen_movi_i32(cpu_CF, word == 0);
1673 tcg_gen_movi_i32(cpu_VF, 0);
1674 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
1676 return true;
1679 TRANS_FEAT(PTRUE, aa64_sve, do_predset, a->esz, a->rd, a->pat, a->s)
1681 /* Note pat == 31 is #all, to set all elements. */
1682 TRANS_FEAT(SETFFR, aa64_sve, do_predset, 0, FFR_PRED_NUM, 31, false)
1684 /* Note pat == 32 is #unimp, to set no elements. */
1685 TRANS_FEAT(PFALSE, aa64_sve, do_predset, 0, a->rd, 32, false)
1687 static bool trans_RDFFR_p(DisasContext *s, arg_RDFFR_p *a)
1689 /* The path through do_pppp_flags is complicated enough to want to avoid
1690 * duplication. Frob the arguments into the form of a predicated AND.
1692 arg_rprr_s alt_a = {
1693 .rd = a->rd, .pg = a->pg, .s = a->s,
1694 .rn = FFR_PRED_NUM, .rm = FFR_PRED_NUM,
1696 return trans_AND_pppp(s, &alt_a);
1699 TRANS_FEAT(RDFFR, aa64_sve, do_mov_p, a->rd, FFR_PRED_NUM)
1700 TRANS_FEAT(WRFFR, aa64_sve, do_mov_p, FFR_PRED_NUM, a->rn)
1702 static bool do_pfirst_pnext(DisasContext *s, arg_rr_esz *a,
1703 void (*gen_fn)(TCGv_i32, TCGv_ptr,
1704 TCGv_ptr, TCGv_i32))
1706 if (!sve_access_check(s)) {
1707 return true;
1710 TCGv_ptr t_pd = tcg_temp_new_ptr();
1711 TCGv_ptr t_pg = tcg_temp_new_ptr();
1712 TCGv_i32 t;
1713 unsigned desc = 0;
1715 desc = FIELD_DP32(desc, PREDDESC, OPRSZ, pred_full_reg_size(s));
1716 desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz);
1718 tcg_gen_addi_ptr(t_pd, cpu_env, pred_full_reg_offset(s, a->rd));
1719 tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, a->rn));
1720 t = tcg_temp_new_i32();
1722 gen_fn(t, t_pd, t_pg, tcg_constant_i32(desc));
1723 tcg_temp_free_ptr(t_pd);
1724 tcg_temp_free_ptr(t_pg);
1726 do_pred_flags(t);
1727 tcg_temp_free_i32(t);
1728 return true;
1731 TRANS_FEAT(PFIRST, aa64_sve, do_pfirst_pnext, a, gen_helper_sve_pfirst)
1732 TRANS_FEAT(PNEXT, aa64_sve, do_pfirst_pnext, a, gen_helper_sve_pnext)
1735 *** SVE Element Count Group
1738 /* Perform an inline saturating addition of a 32-bit value within
1739 * a 64-bit register. The second operand is known to be positive,
1740 * which halves the comparisions we must perform to bound the result.
1742 static void do_sat_addsub_32(TCGv_i64 reg, TCGv_i64 val, bool u, bool d)
1744 int64_t ibound;
1746 /* Use normal 64-bit arithmetic to detect 32-bit overflow. */
1747 if (u) {
1748 tcg_gen_ext32u_i64(reg, reg);
1749 } else {
1750 tcg_gen_ext32s_i64(reg, reg);
1752 if (d) {
1753 tcg_gen_sub_i64(reg, reg, val);
1754 ibound = (u ? 0 : INT32_MIN);
1755 tcg_gen_smax_i64(reg, reg, tcg_constant_i64(ibound));
1756 } else {
1757 tcg_gen_add_i64(reg, reg, val);
1758 ibound = (u ? UINT32_MAX : INT32_MAX);
1759 tcg_gen_smin_i64(reg, reg, tcg_constant_i64(ibound));
1763 /* Similarly with 64-bit values. */
1764 static void do_sat_addsub_64(TCGv_i64 reg, TCGv_i64 val, bool u, bool d)
1766 TCGv_i64 t0 = tcg_temp_new_i64();
1767 TCGv_i64 t2;
1769 if (u) {
1770 if (d) {
1771 tcg_gen_sub_i64(t0, reg, val);
1772 t2 = tcg_constant_i64(0);
1773 tcg_gen_movcond_i64(TCG_COND_LTU, reg, reg, val, t2, t0);
1774 } else {
1775 tcg_gen_add_i64(t0, reg, val);
1776 t2 = tcg_constant_i64(-1);
1777 tcg_gen_movcond_i64(TCG_COND_LTU, reg, t0, reg, t2, t0);
1779 } else {
1780 TCGv_i64 t1 = tcg_temp_new_i64();
1781 if (d) {
1782 /* Detect signed overflow for subtraction. */
1783 tcg_gen_xor_i64(t0, reg, val);
1784 tcg_gen_sub_i64(t1, reg, val);
1785 tcg_gen_xor_i64(reg, reg, t1);
1786 tcg_gen_and_i64(t0, t0, reg);
1788 /* Bound the result. */
1789 tcg_gen_movi_i64(reg, INT64_MIN);
1790 t2 = tcg_constant_i64(0);
1791 tcg_gen_movcond_i64(TCG_COND_LT, reg, t0, t2, reg, t1);
1792 } else {
1793 /* Detect signed overflow for addition. */
1794 tcg_gen_xor_i64(t0, reg, val);
1795 tcg_gen_add_i64(reg, reg, val);
1796 tcg_gen_xor_i64(t1, reg, val);
1797 tcg_gen_andc_i64(t0, t1, t0);
1799 /* Bound the result. */
1800 tcg_gen_movi_i64(t1, INT64_MAX);
1801 t2 = tcg_constant_i64(0);
1802 tcg_gen_movcond_i64(TCG_COND_LT, reg, t0, t2, t1, reg);
1804 tcg_temp_free_i64(t1);
1806 tcg_temp_free_i64(t0);
1809 /* Similarly with a vector and a scalar operand. */
1810 static void do_sat_addsub_vec(DisasContext *s, int esz, int rd, int rn,
1811 TCGv_i64 val, bool u, bool d)
1813 unsigned vsz = vec_full_reg_size(s);
1814 TCGv_ptr dptr, nptr;
1815 TCGv_i32 t32, desc;
1816 TCGv_i64 t64;
1818 dptr = tcg_temp_new_ptr();
1819 nptr = tcg_temp_new_ptr();
1820 tcg_gen_addi_ptr(dptr, cpu_env, vec_full_reg_offset(s, rd));
1821 tcg_gen_addi_ptr(nptr, cpu_env, vec_full_reg_offset(s, rn));
1822 desc = tcg_constant_i32(simd_desc(vsz, vsz, 0));
1824 switch (esz) {
1825 case MO_8:
1826 t32 = tcg_temp_new_i32();
1827 tcg_gen_extrl_i64_i32(t32, val);
1828 if (d) {
1829 tcg_gen_neg_i32(t32, t32);
1831 if (u) {
1832 gen_helper_sve_uqaddi_b(dptr, nptr, t32, desc);
1833 } else {
1834 gen_helper_sve_sqaddi_b(dptr, nptr, t32, desc);
1836 tcg_temp_free_i32(t32);
1837 break;
1839 case MO_16:
1840 t32 = tcg_temp_new_i32();
1841 tcg_gen_extrl_i64_i32(t32, val);
1842 if (d) {
1843 tcg_gen_neg_i32(t32, t32);
1845 if (u) {
1846 gen_helper_sve_uqaddi_h(dptr, nptr, t32, desc);
1847 } else {
1848 gen_helper_sve_sqaddi_h(dptr, nptr, t32, desc);
1850 tcg_temp_free_i32(t32);
1851 break;
1853 case MO_32:
1854 t64 = tcg_temp_new_i64();
1855 if (d) {
1856 tcg_gen_neg_i64(t64, val);
1857 } else {
1858 tcg_gen_mov_i64(t64, val);
1860 if (u) {
1861 gen_helper_sve_uqaddi_s(dptr, nptr, t64, desc);
1862 } else {
1863 gen_helper_sve_sqaddi_s(dptr, nptr, t64, desc);
1865 tcg_temp_free_i64(t64);
1866 break;
1868 case MO_64:
1869 if (u) {
1870 if (d) {
1871 gen_helper_sve_uqsubi_d(dptr, nptr, val, desc);
1872 } else {
1873 gen_helper_sve_uqaddi_d(dptr, nptr, val, desc);
1875 } else if (d) {
1876 t64 = tcg_temp_new_i64();
1877 tcg_gen_neg_i64(t64, val);
1878 gen_helper_sve_sqaddi_d(dptr, nptr, t64, desc);
1879 tcg_temp_free_i64(t64);
1880 } else {
1881 gen_helper_sve_sqaddi_d(dptr, nptr, val, desc);
1883 break;
1885 default:
1886 g_assert_not_reached();
1889 tcg_temp_free_ptr(dptr);
1890 tcg_temp_free_ptr(nptr);
1893 static bool trans_CNT_r(DisasContext *s, arg_CNT_r *a)
1895 if (sve_access_check(s)) {
1896 unsigned fullsz = vec_full_reg_size(s);
1897 unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz);
1898 tcg_gen_movi_i64(cpu_reg(s, a->rd), numelem * a->imm);
1900 return true;
1903 static bool trans_INCDEC_r(DisasContext *s, arg_incdec_cnt *a)
1905 if (sve_access_check(s)) {
1906 unsigned fullsz = vec_full_reg_size(s);
1907 unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz);
1908 int inc = numelem * a->imm * (a->d ? -1 : 1);
1909 TCGv_i64 reg = cpu_reg(s, a->rd);
1911 tcg_gen_addi_i64(reg, reg, inc);
1913 return true;
1916 static bool trans_SINCDEC_r_32(DisasContext *s, arg_incdec_cnt *a)
1918 if (!sve_access_check(s)) {
1919 return true;
1922 unsigned fullsz = vec_full_reg_size(s);
1923 unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz);
1924 int inc = numelem * a->imm;
1925 TCGv_i64 reg = cpu_reg(s, a->rd);
1927 /* Use normal 64-bit arithmetic to detect 32-bit overflow. */
1928 if (inc == 0) {
1929 if (a->u) {
1930 tcg_gen_ext32u_i64(reg, reg);
1931 } else {
1932 tcg_gen_ext32s_i64(reg, reg);
1934 } else {
1935 do_sat_addsub_32(reg, tcg_constant_i64(inc), a->u, a->d);
1937 return true;
1940 static bool trans_SINCDEC_r_64(DisasContext *s, arg_incdec_cnt *a)
1942 if (!sve_access_check(s)) {
1943 return true;
1946 unsigned fullsz = vec_full_reg_size(s);
1947 unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz);
1948 int inc = numelem * a->imm;
1949 TCGv_i64 reg = cpu_reg(s, a->rd);
1951 if (inc != 0) {
1952 do_sat_addsub_64(reg, tcg_constant_i64(inc), a->u, a->d);
1954 return true;
1957 static bool trans_INCDEC_v(DisasContext *s, arg_incdec2_cnt *a)
1959 if (a->esz == 0) {
1960 return false;
1963 unsigned fullsz = vec_full_reg_size(s);
1964 unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz);
1965 int inc = numelem * a->imm;
1967 if (inc != 0) {
1968 if (sve_access_check(s)) {
1969 tcg_gen_gvec_adds(a->esz, vec_full_reg_offset(s, a->rd),
1970 vec_full_reg_offset(s, a->rn),
1971 tcg_constant_i64(a->d ? -inc : inc),
1972 fullsz, fullsz);
1974 } else {
1975 do_mov_z(s, a->rd, a->rn);
1977 return true;
1980 static bool trans_SINCDEC_v(DisasContext *s, arg_incdec2_cnt *a)
1982 if (a->esz == 0) {
1983 return false;
1986 unsigned fullsz = vec_full_reg_size(s);
1987 unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz);
1988 int inc = numelem * a->imm;
1990 if (inc != 0) {
1991 if (sve_access_check(s)) {
1992 do_sat_addsub_vec(s, a->esz, a->rd, a->rn,
1993 tcg_constant_i64(inc), a->u, a->d);
1995 } else {
1996 do_mov_z(s, a->rd, a->rn);
1998 return true;
2002 *** SVE Bitwise Immediate Group
2005 static bool do_zz_dbm(DisasContext *s, arg_rr_dbm *a, GVecGen2iFn *gvec_fn)
2007 uint64_t imm;
2008 if (!logic_imm_decode_wmask(&imm, extract32(a->dbm, 12, 1),
2009 extract32(a->dbm, 0, 6),
2010 extract32(a->dbm, 6, 6))) {
2011 return false;
2013 return gen_gvec_fn_zzi(s, gvec_fn, MO_64, a->rd, a->rn, imm);
2016 TRANS_FEAT(AND_zzi, aa64_sve, do_zz_dbm, a, tcg_gen_gvec_andi)
2017 TRANS_FEAT(ORR_zzi, aa64_sve, do_zz_dbm, a, tcg_gen_gvec_ori)
2018 TRANS_FEAT(EOR_zzi, aa64_sve, do_zz_dbm, a, tcg_gen_gvec_xori)
2020 static bool trans_DUPM(DisasContext *s, arg_DUPM *a)
2022 uint64_t imm;
2023 if (!logic_imm_decode_wmask(&imm, extract32(a->dbm, 12, 1),
2024 extract32(a->dbm, 0, 6),
2025 extract32(a->dbm, 6, 6))) {
2026 return false;
2028 if (sve_access_check(s)) {
2029 do_dupi_z(s, a->rd, imm);
2031 return true;
2035 *** SVE Integer Wide Immediate - Predicated Group
2038 /* Implement all merging copies. This is used for CPY (immediate),
2039 * FCPY, CPY (scalar), CPY (SIMD&FP scalar).
2041 static void do_cpy_m(DisasContext *s, int esz, int rd, int rn, int pg,
2042 TCGv_i64 val)
2044 typedef void gen_cpy(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv_i32);
2045 static gen_cpy * const fns[4] = {
2046 gen_helper_sve_cpy_m_b, gen_helper_sve_cpy_m_h,
2047 gen_helper_sve_cpy_m_s, gen_helper_sve_cpy_m_d,
2049 unsigned vsz = vec_full_reg_size(s);
2050 TCGv_i32 desc = tcg_constant_i32(simd_desc(vsz, vsz, 0));
2051 TCGv_ptr t_zd = tcg_temp_new_ptr();
2052 TCGv_ptr t_zn = tcg_temp_new_ptr();
2053 TCGv_ptr t_pg = tcg_temp_new_ptr();
2055 tcg_gen_addi_ptr(t_zd, cpu_env, vec_full_reg_offset(s, rd));
2056 tcg_gen_addi_ptr(t_zn, cpu_env, vec_full_reg_offset(s, rn));
2057 tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg));
2059 fns[esz](t_zd, t_zn, t_pg, val, desc);
2061 tcg_temp_free_ptr(t_zd);
2062 tcg_temp_free_ptr(t_zn);
2063 tcg_temp_free_ptr(t_pg);
2066 static bool trans_FCPY(DisasContext *s, arg_FCPY *a)
2068 if (a->esz == 0) {
2069 return false;
2071 if (sve_access_check(s)) {
2072 /* Decode the VFP immediate. */
2073 uint64_t imm = vfp_expand_imm(a->esz, a->imm);
2074 do_cpy_m(s, a->esz, a->rd, a->rn, a->pg, tcg_constant_i64(imm));
2076 return true;
2079 static bool trans_CPY_m_i(DisasContext *s, arg_rpri_esz *a)
2081 if (sve_access_check(s)) {
2082 do_cpy_m(s, a->esz, a->rd, a->rn, a->pg, tcg_constant_i64(a->imm));
2084 return true;
2087 static bool trans_CPY_z_i(DisasContext *s, arg_CPY_z_i *a)
2089 static gen_helper_gvec_2i * const fns[4] = {
2090 gen_helper_sve_cpy_z_b, gen_helper_sve_cpy_z_h,
2091 gen_helper_sve_cpy_z_s, gen_helper_sve_cpy_z_d,
2094 if (sve_access_check(s)) {
2095 unsigned vsz = vec_full_reg_size(s);
2096 tcg_gen_gvec_2i_ool(vec_full_reg_offset(s, a->rd),
2097 pred_full_reg_offset(s, a->pg),
2098 tcg_constant_i64(a->imm),
2099 vsz, vsz, 0, fns[a->esz]);
2101 return true;
2105 *** SVE Permute Extract Group
2108 static bool do_EXT(DisasContext *s, int rd, int rn, int rm, int imm)
2110 if (!sve_access_check(s)) {
2111 return true;
2114 unsigned vsz = vec_full_reg_size(s);
2115 unsigned n_ofs = imm >= vsz ? 0 : imm;
2116 unsigned n_siz = vsz - n_ofs;
2117 unsigned d = vec_full_reg_offset(s, rd);
2118 unsigned n = vec_full_reg_offset(s, rn);
2119 unsigned m = vec_full_reg_offset(s, rm);
2121 /* Use host vector move insns if we have appropriate sizes
2122 * and no unfortunate overlap.
2124 if (m != d
2125 && n_ofs == size_for_gvec(n_ofs)
2126 && n_siz == size_for_gvec(n_siz)
2127 && (d != n || n_siz <= n_ofs)) {
2128 tcg_gen_gvec_mov(0, d, n + n_ofs, n_siz, n_siz);
2129 if (n_ofs != 0) {
2130 tcg_gen_gvec_mov(0, d + n_siz, m, n_ofs, n_ofs);
2132 } else {
2133 tcg_gen_gvec_3_ool(d, n, m, vsz, vsz, n_ofs, gen_helper_sve_ext);
2135 return true;
2138 TRANS_FEAT(EXT, aa64_sve, do_EXT, a->rd, a->rn, a->rm, a->imm)
2139 TRANS_FEAT(EXT_sve2, aa64_sve2, do_EXT, a->rd, a->rn, (a->rn + 1) % 32, a->imm)
2142 *** SVE Permute - Unpredicated Group
2145 static bool trans_DUP_s(DisasContext *s, arg_DUP_s *a)
2147 if (sve_access_check(s)) {
2148 unsigned vsz = vec_full_reg_size(s);
2149 tcg_gen_gvec_dup_i64(a->esz, vec_full_reg_offset(s, a->rd),
2150 vsz, vsz, cpu_reg_sp(s, a->rn));
2152 return true;
2155 static bool trans_DUP_x(DisasContext *s, arg_DUP_x *a)
2157 if ((a->imm & 0x1f) == 0) {
2158 return false;
2160 if (sve_access_check(s)) {
2161 unsigned vsz = vec_full_reg_size(s);
2162 unsigned dofs = vec_full_reg_offset(s, a->rd);
2163 unsigned esz, index;
2165 esz = ctz32(a->imm);
2166 index = a->imm >> (esz + 1);
2168 if ((index << esz) < vsz) {
2169 unsigned nofs = vec_reg_offset(s, a->rn, index, esz);
2170 tcg_gen_gvec_dup_mem(esz, dofs, nofs, vsz, vsz);
2171 } else {
2173 * While dup_mem handles 128-bit elements, dup_imm does not.
2174 * Thankfully element size doesn't matter for splatting zero.
2176 tcg_gen_gvec_dup_imm(MO_64, dofs, vsz, vsz, 0);
2179 return true;
2182 static void do_insr_i64(DisasContext *s, arg_rrr_esz *a, TCGv_i64 val)
2184 typedef void gen_insr(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv_i32);
2185 static gen_insr * const fns[4] = {
2186 gen_helper_sve_insr_b, gen_helper_sve_insr_h,
2187 gen_helper_sve_insr_s, gen_helper_sve_insr_d,
2189 unsigned vsz = vec_full_reg_size(s);
2190 TCGv_i32 desc = tcg_constant_i32(simd_desc(vsz, vsz, 0));
2191 TCGv_ptr t_zd = tcg_temp_new_ptr();
2192 TCGv_ptr t_zn = tcg_temp_new_ptr();
2194 tcg_gen_addi_ptr(t_zd, cpu_env, vec_full_reg_offset(s, a->rd));
2195 tcg_gen_addi_ptr(t_zn, cpu_env, vec_full_reg_offset(s, a->rn));
2197 fns[a->esz](t_zd, t_zn, val, desc);
2199 tcg_temp_free_ptr(t_zd);
2200 tcg_temp_free_ptr(t_zn);
2203 static bool trans_INSR_f(DisasContext *s, arg_rrr_esz *a)
2205 if (sve_access_check(s)) {
2206 TCGv_i64 t = tcg_temp_new_i64();
2207 tcg_gen_ld_i64(t, cpu_env, vec_reg_offset(s, a->rm, 0, MO_64));
2208 do_insr_i64(s, a, t);
2209 tcg_temp_free_i64(t);
2211 return true;
2214 static bool trans_INSR_r(DisasContext *s, arg_rrr_esz *a)
2216 if (sve_access_check(s)) {
2217 do_insr_i64(s, a, cpu_reg(s, a->rm));
2219 return true;
2222 static gen_helper_gvec_2 * const rev_fns[4] = {
2223 gen_helper_sve_rev_b, gen_helper_sve_rev_h,
2224 gen_helper_sve_rev_s, gen_helper_sve_rev_d
2226 TRANS_FEAT(REV_v, aa64_sve, gen_gvec_ool_zz, rev_fns[a->esz], a->rd, a->rn, 0)
2228 static gen_helper_gvec_3 * const sve_tbl_fns[4] = {
2229 gen_helper_sve_tbl_b, gen_helper_sve_tbl_h,
2230 gen_helper_sve_tbl_s, gen_helper_sve_tbl_d
2232 TRANS_FEAT(TBL, aa64_sve, gen_gvec_ool_arg_zzz, sve_tbl_fns[a->esz], a, 0)
2234 static gen_helper_gvec_4 * const sve2_tbl_fns[4] = {
2235 gen_helper_sve2_tbl_b, gen_helper_sve2_tbl_h,
2236 gen_helper_sve2_tbl_s, gen_helper_sve2_tbl_d
2238 TRANS_FEAT(TBL_sve2, aa64_sve2, gen_gvec_ool_zzzz, sve2_tbl_fns[a->esz],
2239 a->rd, a->rn, (a->rn + 1) % 32, a->rm, 0)
2241 static gen_helper_gvec_3 * const tbx_fns[4] = {
2242 gen_helper_sve2_tbx_b, gen_helper_sve2_tbx_h,
2243 gen_helper_sve2_tbx_s, gen_helper_sve2_tbx_d
2245 TRANS_FEAT(TBX, aa64_sve2, gen_gvec_ool_arg_zzz, tbx_fns[a->esz], a, 0)
2247 static bool trans_UNPK(DisasContext *s, arg_UNPK *a)
2249 static gen_helper_gvec_2 * const fns[4][2] = {
2250 { NULL, NULL },
2251 { gen_helper_sve_sunpk_h, gen_helper_sve_uunpk_h },
2252 { gen_helper_sve_sunpk_s, gen_helper_sve_uunpk_s },
2253 { gen_helper_sve_sunpk_d, gen_helper_sve_uunpk_d },
2256 if (a->esz == 0) {
2257 return false;
2259 if (sve_access_check(s)) {
2260 unsigned vsz = vec_full_reg_size(s);
2261 tcg_gen_gvec_2_ool(vec_full_reg_offset(s, a->rd),
2262 vec_full_reg_offset(s, a->rn)
2263 + (a->h ? vsz / 2 : 0),
2264 vsz, vsz, 0, fns[a->esz][a->u]);
2266 return true;
2270 *** SVE Permute - Predicates Group
2273 static bool do_perm_pred3(DisasContext *s, arg_rrr_esz *a, bool high_odd,
2274 gen_helper_gvec_3 *fn)
2276 if (!sve_access_check(s)) {
2277 return true;
2280 unsigned vsz = pred_full_reg_size(s);
2282 TCGv_ptr t_d = tcg_temp_new_ptr();
2283 TCGv_ptr t_n = tcg_temp_new_ptr();
2284 TCGv_ptr t_m = tcg_temp_new_ptr();
2285 uint32_t desc = 0;
2287 desc = FIELD_DP32(desc, PREDDESC, OPRSZ, vsz);
2288 desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz);
2289 desc = FIELD_DP32(desc, PREDDESC, DATA, high_odd);
2291 tcg_gen_addi_ptr(t_d, cpu_env, pred_full_reg_offset(s, a->rd));
2292 tcg_gen_addi_ptr(t_n, cpu_env, pred_full_reg_offset(s, a->rn));
2293 tcg_gen_addi_ptr(t_m, cpu_env, pred_full_reg_offset(s, a->rm));
2295 fn(t_d, t_n, t_m, tcg_constant_i32(desc));
2297 tcg_temp_free_ptr(t_d);
2298 tcg_temp_free_ptr(t_n);
2299 tcg_temp_free_ptr(t_m);
2300 return true;
2303 static bool do_perm_pred2(DisasContext *s, arg_rr_esz *a, bool high_odd,
2304 gen_helper_gvec_2 *fn)
2306 if (!sve_access_check(s)) {
2307 return true;
2310 unsigned vsz = pred_full_reg_size(s);
2311 TCGv_ptr t_d = tcg_temp_new_ptr();
2312 TCGv_ptr t_n = tcg_temp_new_ptr();
2313 uint32_t desc = 0;
2315 tcg_gen_addi_ptr(t_d, cpu_env, pred_full_reg_offset(s, a->rd));
2316 tcg_gen_addi_ptr(t_n, cpu_env, pred_full_reg_offset(s, a->rn));
2318 desc = FIELD_DP32(desc, PREDDESC, OPRSZ, vsz);
2319 desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz);
2320 desc = FIELD_DP32(desc, PREDDESC, DATA, high_odd);
2322 fn(t_d, t_n, tcg_constant_i32(desc));
2324 tcg_temp_free_ptr(t_d);
2325 tcg_temp_free_ptr(t_n);
2326 return true;
2329 TRANS_FEAT(ZIP1_p, aa64_sve, do_perm_pred3, a, 0, gen_helper_sve_zip_p)
2330 TRANS_FEAT(ZIP2_p, aa64_sve, do_perm_pred3, a, 1, gen_helper_sve_zip_p)
2331 TRANS_FEAT(UZP1_p, aa64_sve, do_perm_pred3, a, 0, gen_helper_sve_uzp_p)
2332 TRANS_FEAT(UZP2_p, aa64_sve, do_perm_pred3, a, 1, gen_helper_sve_uzp_p)
2333 TRANS_FEAT(TRN1_p, aa64_sve, do_perm_pred3, a, 0, gen_helper_sve_trn_p)
2334 TRANS_FEAT(TRN2_p, aa64_sve, do_perm_pred3, a, 1, gen_helper_sve_trn_p)
2336 TRANS_FEAT(REV_p, aa64_sve, do_perm_pred2, a, 0, gen_helper_sve_rev_p)
2337 TRANS_FEAT(PUNPKLO, aa64_sve, do_perm_pred2, a, 0, gen_helper_sve_punpk_p)
2338 TRANS_FEAT(PUNPKHI, aa64_sve, do_perm_pred2, a, 1, gen_helper_sve_punpk_p)
2341 *** SVE Permute - Interleaving Group
2344 static gen_helper_gvec_3 * const zip_fns[4] = {
2345 gen_helper_sve_zip_b, gen_helper_sve_zip_h,
2346 gen_helper_sve_zip_s, gen_helper_sve_zip_d,
2348 TRANS_FEAT(ZIP1_z, aa64_sve, gen_gvec_ool_arg_zzz,
2349 zip_fns[a->esz], a, 0)
2350 TRANS_FEAT(ZIP2_z, aa64_sve, gen_gvec_ool_arg_zzz,
2351 zip_fns[a->esz], a, vec_full_reg_size(s) / 2)
2353 TRANS_FEAT(ZIP1_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz,
2354 gen_helper_sve2_zip_q, a, 0)
2355 TRANS_FEAT(ZIP2_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz,
2356 gen_helper_sve2_zip_q, a,
2357 QEMU_ALIGN_DOWN(vec_full_reg_size(s), 32) / 2)
2359 static gen_helper_gvec_3 * const uzp_fns[4] = {
2360 gen_helper_sve_uzp_b, gen_helper_sve_uzp_h,
2361 gen_helper_sve_uzp_s, gen_helper_sve_uzp_d,
2364 TRANS_FEAT(UZP1_z, aa64_sve, gen_gvec_ool_arg_zzz,
2365 uzp_fns[a->esz], a, 0)
2366 TRANS_FEAT(UZP2_z, aa64_sve, gen_gvec_ool_arg_zzz,
2367 uzp_fns[a->esz], a, 1 << a->esz)
2369 TRANS_FEAT(UZP1_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz,
2370 gen_helper_sve2_uzp_q, a, 0)
2371 TRANS_FEAT(UZP2_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz,
2372 gen_helper_sve2_uzp_q, a, 16)
2374 static gen_helper_gvec_3 * const trn_fns[4] = {
2375 gen_helper_sve_trn_b, gen_helper_sve_trn_h,
2376 gen_helper_sve_trn_s, gen_helper_sve_trn_d,
2379 TRANS_FEAT(TRN1_z, aa64_sve, gen_gvec_ool_arg_zzz,
2380 trn_fns[a->esz], a, 0)
2381 TRANS_FEAT(TRN2_z, aa64_sve, gen_gvec_ool_arg_zzz,
2382 trn_fns[a->esz], a, 1 << a->esz)
2384 TRANS_FEAT(TRN1_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz,
2385 gen_helper_sve2_trn_q, a, 0)
2386 TRANS_FEAT(TRN2_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz,
2387 gen_helper_sve2_trn_q, a, 16)
2390 *** SVE Permute Vector - Predicated Group
2393 static gen_helper_gvec_3 * const compact_fns[4] = {
2394 NULL, NULL, gen_helper_sve_compact_s, gen_helper_sve_compact_d
2396 TRANS_FEAT(COMPACT, aa64_sve, gen_gvec_ool_arg_zpz, compact_fns[a->esz], a, 0)
2398 /* Call the helper that computes the ARM LastActiveElement pseudocode
2399 * function, scaled by the element size. This includes the not found
2400 * indication; e.g. not found for esz=3 is -8.
2402 static void find_last_active(DisasContext *s, TCGv_i32 ret, int esz, int pg)
2404 /* Predicate sizes may be smaller and cannot use simd_desc. We cannot
2405 * round up, as we do elsewhere, because we need the exact size.
2407 TCGv_ptr t_p = tcg_temp_new_ptr();
2408 unsigned desc = 0;
2410 desc = FIELD_DP32(desc, PREDDESC, OPRSZ, pred_full_reg_size(s));
2411 desc = FIELD_DP32(desc, PREDDESC, ESZ, esz);
2413 tcg_gen_addi_ptr(t_p, cpu_env, pred_full_reg_offset(s, pg));
2415 gen_helper_sve_last_active_element(ret, t_p, tcg_constant_i32(desc));
2417 tcg_temp_free_ptr(t_p);
2420 /* Increment LAST to the offset of the next element in the vector,
2421 * wrapping around to 0.
2423 static void incr_last_active(DisasContext *s, TCGv_i32 last, int esz)
2425 unsigned vsz = vec_full_reg_size(s);
2427 tcg_gen_addi_i32(last, last, 1 << esz);
2428 if (is_power_of_2(vsz)) {
2429 tcg_gen_andi_i32(last, last, vsz - 1);
2430 } else {
2431 TCGv_i32 max = tcg_constant_i32(vsz);
2432 TCGv_i32 zero = tcg_constant_i32(0);
2433 tcg_gen_movcond_i32(TCG_COND_GEU, last, last, max, zero, last);
2437 /* If LAST < 0, set LAST to the offset of the last element in the vector. */
2438 static void wrap_last_active(DisasContext *s, TCGv_i32 last, int esz)
2440 unsigned vsz = vec_full_reg_size(s);
2442 if (is_power_of_2(vsz)) {
2443 tcg_gen_andi_i32(last, last, vsz - 1);
2444 } else {
2445 TCGv_i32 max = tcg_constant_i32(vsz - (1 << esz));
2446 TCGv_i32 zero = tcg_constant_i32(0);
2447 tcg_gen_movcond_i32(TCG_COND_LT, last, last, zero, max, last);
2451 /* Load an unsigned element of ESZ from BASE+OFS. */
2452 static TCGv_i64 load_esz(TCGv_ptr base, int ofs, int esz)
2454 TCGv_i64 r = tcg_temp_new_i64();
2456 switch (esz) {
2457 case 0:
2458 tcg_gen_ld8u_i64(r, base, ofs);
2459 break;
2460 case 1:
2461 tcg_gen_ld16u_i64(r, base, ofs);
2462 break;
2463 case 2:
2464 tcg_gen_ld32u_i64(r, base, ofs);
2465 break;
2466 case 3:
2467 tcg_gen_ld_i64(r, base, ofs);
2468 break;
2469 default:
2470 g_assert_not_reached();
2472 return r;
2475 /* Load an unsigned element of ESZ from RM[LAST]. */
2476 static TCGv_i64 load_last_active(DisasContext *s, TCGv_i32 last,
2477 int rm, int esz)
2479 TCGv_ptr p = tcg_temp_new_ptr();
2480 TCGv_i64 r;
2482 /* Convert offset into vector into offset into ENV.
2483 * The final adjustment for the vector register base
2484 * is added via constant offset to the load.
2486 #if HOST_BIG_ENDIAN
2487 /* Adjust for element ordering. See vec_reg_offset. */
2488 if (esz < 3) {
2489 tcg_gen_xori_i32(last, last, 8 - (1 << esz));
2491 #endif
2492 tcg_gen_ext_i32_ptr(p, last);
2493 tcg_gen_add_ptr(p, p, cpu_env);
2495 r = load_esz(p, vec_full_reg_offset(s, rm), esz);
2496 tcg_temp_free_ptr(p);
2498 return r;
2501 /* Compute CLAST for a Zreg. */
2502 static bool do_clast_vector(DisasContext *s, arg_rprr_esz *a, bool before)
2504 TCGv_i32 last;
2505 TCGLabel *over;
2506 TCGv_i64 ele;
2507 unsigned vsz, esz = a->esz;
2509 if (!sve_access_check(s)) {
2510 return true;
2513 last = tcg_temp_local_new_i32();
2514 over = gen_new_label();
2516 find_last_active(s, last, esz, a->pg);
2518 /* There is of course no movcond for a 2048-bit vector,
2519 * so we must branch over the actual store.
2521 tcg_gen_brcondi_i32(TCG_COND_LT, last, 0, over);
2523 if (!before) {
2524 incr_last_active(s, last, esz);
2527 ele = load_last_active(s, last, a->rm, esz);
2528 tcg_temp_free_i32(last);
2530 vsz = vec_full_reg_size(s);
2531 tcg_gen_gvec_dup_i64(esz, vec_full_reg_offset(s, a->rd), vsz, vsz, ele);
2532 tcg_temp_free_i64(ele);
2534 /* If this insn used MOVPRFX, we may need a second move. */
2535 if (a->rd != a->rn) {
2536 TCGLabel *done = gen_new_label();
2537 tcg_gen_br(done);
2539 gen_set_label(over);
2540 do_mov_z(s, a->rd, a->rn);
2542 gen_set_label(done);
2543 } else {
2544 gen_set_label(over);
2546 return true;
2549 TRANS_FEAT(CLASTA_z, aa64_sve, do_clast_vector, a, false)
2550 TRANS_FEAT(CLASTB_z, aa64_sve, do_clast_vector, a, true)
2552 /* Compute CLAST for a scalar. */
2553 static void do_clast_scalar(DisasContext *s, int esz, int pg, int rm,
2554 bool before, TCGv_i64 reg_val)
2556 TCGv_i32 last = tcg_temp_new_i32();
2557 TCGv_i64 ele, cmp;
2559 find_last_active(s, last, esz, pg);
2561 /* Extend the original value of last prior to incrementing. */
2562 cmp = tcg_temp_new_i64();
2563 tcg_gen_ext_i32_i64(cmp, last);
2565 if (!before) {
2566 incr_last_active(s, last, esz);
2569 /* The conceit here is that while last < 0 indicates not found, after
2570 * adjusting for cpu_env->vfp.zregs[rm], it is still a valid address
2571 * from which we can load garbage. We then discard the garbage with
2572 * a conditional move.
2574 ele = load_last_active(s, last, rm, esz);
2575 tcg_temp_free_i32(last);
2577 tcg_gen_movcond_i64(TCG_COND_GE, reg_val, cmp, tcg_constant_i64(0),
2578 ele, reg_val);
2580 tcg_temp_free_i64(cmp);
2581 tcg_temp_free_i64(ele);
2584 /* Compute CLAST for a Vreg. */
2585 static bool do_clast_fp(DisasContext *s, arg_rpr_esz *a, bool before)
2587 if (sve_access_check(s)) {
2588 int esz = a->esz;
2589 int ofs = vec_reg_offset(s, a->rd, 0, esz);
2590 TCGv_i64 reg = load_esz(cpu_env, ofs, esz);
2592 do_clast_scalar(s, esz, a->pg, a->rn, before, reg);
2593 write_fp_dreg(s, a->rd, reg);
2594 tcg_temp_free_i64(reg);
2596 return true;
2599 TRANS_FEAT(CLASTA_v, aa64_sve, do_clast_fp, a, false)
2600 TRANS_FEAT(CLASTB_v, aa64_sve, do_clast_fp, a, true)
2602 /* Compute CLAST for a Xreg. */
2603 static bool do_clast_general(DisasContext *s, arg_rpr_esz *a, bool before)
2605 TCGv_i64 reg;
2607 if (!sve_access_check(s)) {
2608 return true;
2611 reg = cpu_reg(s, a->rd);
2612 switch (a->esz) {
2613 case 0:
2614 tcg_gen_ext8u_i64(reg, reg);
2615 break;
2616 case 1:
2617 tcg_gen_ext16u_i64(reg, reg);
2618 break;
2619 case 2:
2620 tcg_gen_ext32u_i64(reg, reg);
2621 break;
2622 case 3:
2623 break;
2624 default:
2625 g_assert_not_reached();
2628 do_clast_scalar(s, a->esz, a->pg, a->rn, before, reg);
2629 return true;
2632 TRANS_FEAT(CLASTA_r, aa64_sve, do_clast_general, a, false)
2633 TRANS_FEAT(CLASTB_r, aa64_sve, do_clast_general, a, true)
2635 /* Compute LAST for a scalar. */
2636 static TCGv_i64 do_last_scalar(DisasContext *s, int esz,
2637 int pg, int rm, bool before)
2639 TCGv_i32 last = tcg_temp_new_i32();
2640 TCGv_i64 ret;
2642 find_last_active(s, last, esz, pg);
2643 if (before) {
2644 wrap_last_active(s, last, esz);
2645 } else {
2646 incr_last_active(s, last, esz);
2649 ret = load_last_active(s, last, rm, esz);
2650 tcg_temp_free_i32(last);
2651 return ret;
2654 /* Compute LAST for a Vreg. */
2655 static bool do_last_fp(DisasContext *s, arg_rpr_esz *a, bool before)
2657 if (sve_access_check(s)) {
2658 TCGv_i64 val = do_last_scalar(s, a->esz, a->pg, a->rn, before);
2659 write_fp_dreg(s, a->rd, val);
2660 tcg_temp_free_i64(val);
2662 return true;
2665 TRANS_FEAT(LASTA_v, aa64_sve, do_last_fp, a, false)
2666 TRANS_FEAT(LASTB_v, aa64_sve, do_last_fp, a, true)
2668 /* Compute LAST for a Xreg. */
2669 static bool do_last_general(DisasContext *s, arg_rpr_esz *a, bool before)
2671 if (sve_access_check(s)) {
2672 TCGv_i64 val = do_last_scalar(s, a->esz, a->pg, a->rn, before);
2673 tcg_gen_mov_i64(cpu_reg(s, a->rd), val);
2674 tcg_temp_free_i64(val);
2676 return true;
2679 TRANS_FEAT(LASTA_r, aa64_sve, do_last_general, a, false)
2680 TRANS_FEAT(LASTB_r, aa64_sve, do_last_general, a, true)
2682 static bool trans_CPY_m_r(DisasContext *s, arg_rpr_esz *a)
2684 if (sve_access_check(s)) {
2685 do_cpy_m(s, a->esz, a->rd, a->rd, a->pg, cpu_reg_sp(s, a->rn));
2687 return true;
2690 static bool trans_CPY_m_v(DisasContext *s, arg_rpr_esz *a)
2692 if (sve_access_check(s)) {
2693 int ofs = vec_reg_offset(s, a->rn, 0, a->esz);
2694 TCGv_i64 t = load_esz(cpu_env, ofs, a->esz);
2695 do_cpy_m(s, a->esz, a->rd, a->rd, a->pg, t);
2696 tcg_temp_free_i64(t);
2698 return true;
2701 static gen_helper_gvec_3 * const revb_fns[4] = {
2702 NULL, gen_helper_sve_revb_h,
2703 gen_helper_sve_revb_s, gen_helper_sve_revb_d,
2705 TRANS_FEAT(REVB, aa64_sve, gen_gvec_ool_arg_zpz, revb_fns[a->esz], a, 0)
2707 static gen_helper_gvec_3 * const revh_fns[4] = {
2708 NULL, NULL, gen_helper_sve_revh_s, gen_helper_sve_revh_d,
2710 TRANS_FEAT(REVH, aa64_sve, gen_gvec_ool_arg_zpz, revh_fns[a->esz], a, 0)
2712 TRANS_FEAT(REVW, aa64_sve, gen_gvec_ool_arg_zpz,
2713 a->esz == 3 ? gen_helper_sve_revw_d : NULL, a, 0)
2715 TRANS_FEAT(SPLICE, aa64_sve, gen_gvec_ool_arg_zpzz,
2716 gen_helper_sve_splice, a, a->esz)
2718 TRANS_FEAT(SPLICE_sve2, aa64_sve2, gen_gvec_ool_zzzp, gen_helper_sve_splice,
2719 a->rd, a->rn, (a->rn + 1) % 32, a->pg, a->esz)
2722 *** SVE Integer Compare - Vectors Group
2725 static bool do_ppzz_flags(DisasContext *s, arg_rprr_esz *a,
2726 gen_helper_gvec_flags_4 *gen_fn)
2728 TCGv_ptr pd, zn, zm, pg;
2729 unsigned vsz;
2730 TCGv_i32 t;
2732 if (gen_fn == NULL) {
2733 return false;
2735 if (!sve_access_check(s)) {
2736 return true;
2739 vsz = vec_full_reg_size(s);
2740 t = tcg_temp_new_i32();
2741 pd = tcg_temp_new_ptr();
2742 zn = tcg_temp_new_ptr();
2743 zm = tcg_temp_new_ptr();
2744 pg = tcg_temp_new_ptr();
2746 tcg_gen_addi_ptr(pd, cpu_env, pred_full_reg_offset(s, a->rd));
2747 tcg_gen_addi_ptr(zn, cpu_env, vec_full_reg_offset(s, a->rn));
2748 tcg_gen_addi_ptr(zm, cpu_env, vec_full_reg_offset(s, a->rm));
2749 tcg_gen_addi_ptr(pg, cpu_env, pred_full_reg_offset(s, a->pg));
2751 gen_fn(t, pd, zn, zm, pg, tcg_constant_i32(simd_desc(vsz, vsz, 0)));
2753 tcg_temp_free_ptr(pd);
2754 tcg_temp_free_ptr(zn);
2755 tcg_temp_free_ptr(zm);
2756 tcg_temp_free_ptr(pg);
2758 do_pred_flags(t);
2760 tcg_temp_free_i32(t);
2761 return true;
2764 #define DO_PPZZ(NAME, name) \
2765 static gen_helper_gvec_flags_4 * const name##_ppzz_fns[4] = { \
2766 gen_helper_sve_##name##_ppzz_b, gen_helper_sve_##name##_ppzz_h, \
2767 gen_helper_sve_##name##_ppzz_s, gen_helper_sve_##name##_ppzz_d, \
2768 }; \
2769 TRANS_FEAT(NAME##_ppzz, aa64_sve, do_ppzz_flags, \
2770 a, name##_ppzz_fns[a->esz])
2772 DO_PPZZ(CMPEQ, cmpeq)
2773 DO_PPZZ(CMPNE, cmpne)
2774 DO_PPZZ(CMPGT, cmpgt)
2775 DO_PPZZ(CMPGE, cmpge)
2776 DO_PPZZ(CMPHI, cmphi)
2777 DO_PPZZ(CMPHS, cmphs)
2779 #undef DO_PPZZ
2781 #define DO_PPZW(NAME, name) \
2782 static gen_helper_gvec_flags_4 * const name##_ppzw_fns[4] = { \
2783 gen_helper_sve_##name##_ppzw_b, gen_helper_sve_##name##_ppzw_h, \
2784 gen_helper_sve_##name##_ppzw_s, NULL \
2785 }; \
2786 TRANS_FEAT(NAME##_ppzw, aa64_sve, do_ppzz_flags, \
2787 a, name##_ppzw_fns[a->esz])
2789 DO_PPZW(CMPEQ, cmpeq)
2790 DO_PPZW(CMPNE, cmpne)
2791 DO_PPZW(CMPGT, cmpgt)
2792 DO_PPZW(CMPGE, cmpge)
2793 DO_PPZW(CMPHI, cmphi)
2794 DO_PPZW(CMPHS, cmphs)
2795 DO_PPZW(CMPLT, cmplt)
2796 DO_PPZW(CMPLE, cmple)
2797 DO_PPZW(CMPLO, cmplo)
2798 DO_PPZW(CMPLS, cmpls)
2800 #undef DO_PPZW
2803 *** SVE Integer Compare - Immediate Groups
2806 static bool do_ppzi_flags(DisasContext *s, arg_rpri_esz *a,
2807 gen_helper_gvec_flags_3 *gen_fn)
2809 TCGv_ptr pd, zn, pg;
2810 unsigned vsz;
2811 TCGv_i32 t;
2813 if (gen_fn == NULL) {
2814 return false;
2816 if (!sve_access_check(s)) {
2817 return true;
2820 vsz = vec_full_reg_size(s);
2821 t = tcg_temp_new_i32();
2822 pd = tcg_temp_new_ptr();
2823 zn = tcg_temp_new_ptr();
2824 pg = tcg_temp_new_ptr();
2826 tcg_gen_addi_ptr(pd, cpu_env, pred_full_reg_offset(s, a->rd));
2827 tcg_gen_addi_ptr(zn, cpu_env, vec_full_reg_offset(s, a->rn));
2828 tcg_gen_addi_ptr(pg, cpu_env, pred_full_reg_offset(s, a->pg));
2830 gen_fn(t, pd, zn, pg, tcg_constant_i32(simd_desc(vsz, vsz, a->imm)));
2832 tcg_temp_free_ptr(pd);
2833 tcg_temp_free_ptr(zn);
2834 tcg_temp_free_ptr(pg);
2836 do_pred_flags(t);
2838 tcg_temp_free_i32(t);
2839 return true;
2842 #define DO_PPZI(NAME, name) \
2843 static gen_helper_gvec_flags_3 * const name##_ppzi_fns[4] = { \
2844 gen_helper_sve_##name##_ppzi_b, gen_helper_sve_##name##_ppzi_h, \
2845 gen_helper_sve_##name##_ppzi_s, gen_helper_sve_##name##_ppzi_d, \
2846 }; \
2847 TRANS_FEAT(NAME##_ppzi, aa64_sve, do_ppzi_flags, a, \
2848 name##_ppzi_fns[a->esz])
2850 DO_PPZI(CMPEQ, cmpeq)
2851 DO_PPZI(CMPNE, cmpne)
2852 DO_PPZI(CMPGT, cmpgt)
2853 DO_PPZI(CMPGE, cmpge)
2854 DO_PPZI(CMPHI, cmphi)
2855 DO_PPZI(CMPHS, cmphs)
2856 DO_PPZI(CMPLT, cmplt)
2857 DO_PPZI(CMPLE, cmple)
2858 DO_PPZI(CMPLO, cmplo)
2859 DO_PPZI(CMPLS, cmpls)
2861 #undef DO_PPZI
2864 *** SVE Partition Break Group
2867 static bool do_brk3(DisasContext *s, arg_rprr_s *a,
2868 gen_helper_gvec_4 *fn, gen_helper_gvec_flags_4 *fn_s)
2870 if (!sve_access_check(s)) {
2871 return true;
2874 unsigned vsz = pred_full_reg_size(s);
2876 /* Predicate sizes may be smaller and cannot use simd_desc. */
2877 TCGv_ptr d = tcg_temp_new_ptr();
2878 TCGv_ptr n = tcg_temp_new_ptr();
2879 TCGv_ptr m = tcg_temp_new_ptr();
2880 TCGv_ptr g = tcg_temp_new_ptr();
2881 TCGv_i32 desc = tcg_constant_i32(FIELD_DP32(0, PREDDESC, OPRSZ, vsz));
2883 tcg_gen_addi_ptr(d, cpu_env, pred_full_reg_offset(s, a->rd));
2884 tcg_gen_addi_ptr(n, cpu_env, pred_full_reg_offset(s, a->rn));
2885 tcg_gen_addi_ptr(m, cpu_env, pred_full_reg_offset(s, a->rm));
2886 tcg_gen_addi_ptr(g, cpu_env, pred_full_reg_offset(s, a->pg));
2888 if (a->s) {
2889 TCGv_i32 t = tcg_temp_new_i32();
2890 fn_s(t, d, n, m, g, desc);
2891 do_pred_flags(t);
2892 tcg_temp_free_i32(t);
2893 } else {
2894 fn(d, n, m, g, desc);
2896 tcg_temp_free_ptr(d);
2897 tcg_temp_free_ptr(n);
2898 tcg_temp_free_ptr(m);
2899 tcg_temp_free_ptr(g);
2900 return true;
2903 static bool do_brk2(DisasContext *s, arg_rpr_s *a,
2904 gen_helper_gvec_3 *fn, gen_helper_gvec_flags_3 *fn_s)
2906 if (!sve_access_check(s)) {
2907 return true;
2910 unsigned vsz = pred_full_reg_size(s);
2912 /* Predicate sizes may be smaller and cannot use simd_desc. */
2913 TCGv_ptr d = tcg_temp_new_ptr();
2914 TCGv_ptr n = tcg_temp_new_ptr();
2915 TCGv_ptr g = tcg_temp_new_ptr();
2916 TCGv_i32 desc = tcg_constant_i32(FIELD_DP32(0, PREDDESC, OPRSZ, vsz));
2918 tcg_gen_addi_ptr(d, cpu_env, pred_full_reg_offset(s, a->rd));
2919 tcg_gen_addi_ptr(n, cpu_env, pred_full_reg_offset(s, a->rn));
2920 tcg_gen_addi_ptr(g, cpu_env, pred_full_reg_offset(s, a->pg));
2922 if (a->s) {
2923 TCGv_i32 t = tcg_temp_new_i32();
2924 fn_s(t, d, n, g, desc);
2925 do_pred_flags(t);
2926 tcg_temp_free_i32(t);
2927 } else {
2928 fn(d, n, g, desc);
2930 tcg_temp_free_ptr(d);
2931 tcg_temp_free_ptr(n);
2932 tcg_temp_free_ptr(g);
2933 return true;
2936 TRANS_FEAT(BRKPA, aa64_sve, do_brk3, a,
2937 gen_helper_sve_brkpa, gen_helper_sve_brkpas)
2938 TRANS_FEAT(BRKPB, aa64_sve, do_brk3, a,
2939 gen_helper_sve_brkpb, gen_helper_sve_brkpbs)
2941 TRANS_FEAT(BRKA_m, aa64_sve, do_brk2, a,
2942 gen_helper_sve_brka_m, gen_helper_sve_brkas_m)
2943 TRANS_FEAT(BRKB_m, aa64_sve, do_brk2, a,
2944 gen_helper_sve_brkb_m, gen_helper_sve_brkbs_m)
2946 TRANS_FEAT(BRKA_z, aa64_sve, do_brk2, a,
2947 gen_helper_sve_brka_z, gen_helper_sve_brkas_z)
2948 TRANS_FEAT(BRKB_z, aa64_sve, do_brk2, a,
2949 gen_helper_sve_brkb_z, gen_helper_sve_brkbs_z)
2951 TRANS_FEAT(BRKN, aa64_sve, do_brk2, a,
2952 gen_helper_sve_brkn, gen_helper_sve_brkns)
2955 *** SVE Predicate Count Group
2958 static void do_cntp(DisasContext *s, TCGv_i64 val, int esz, int pn, int pg)
2960 unsigned psz = pred_full_reg_size(s);
2962 if (psz <= 8) {
2963 uint64_t psz_mask;
2965 tcg_gen_ld_i64(val, cpu_env, pred_full_reg_offset(s, pn));
2966 if (pn != pg) {
2967 TCGv_i64 g = tcg_temp_new_i64();
2968 tcg_gen_ld_i64(g, cpu_env, pred_full_reg_offset(s, pg));
2969 tcg_gen_and_i64(val, val, g);
2970 tcg_temp_free_i64(g);
2973 /* Reduce the pred_esz_masks value simply to reduce the
2974 * size of the code generated here.
2976 psz_mask = MAKE_64BIT_MASK(0, psz * 8);
2977 tcg_gen_andi_i64(val, val, pred_esz_masks[esz] & psz_mask);
2979 tcg_gen_ctpop_i64(val, val);
2980 } else {
2981 TCGv_ptr t_pn = tcg_temp_new_ptr();
2982 TCGv_ptr t_pg = tcg_temp_new_ptr();
2983 unsigned desc = 0;
2985 desc = FIELD_DP32(desc, PREDDESC, OPRSZ, psz);
2986 desc = FIELD_DP32(desc, PREDDESC, ESZ, esz);
2988 tcg_gen_addi_ptr(t_pn, cpu_env, pred_full_reg_offset(s, pn));
2989 tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg));
2991 gen_helper_sve_cntp(val, t_pn, t_pg, tcg_constant_i32(desc));
2992 tcg_temp_free_ptr(t_pn);
2993 tcg_temp_free_ptr(t_pg);
2997 static bool trans_CNTP(DisasContext *s, arg_CNTP *a)
2999 if (sve_access_check(s)) {
3000 do_cntp(s, cpu_reg(s, a->rd), a->esz, a->rn, a->pg);
3002 return true;
3005 static bool trans_INCDECP_r(DisasContext *s, arg_incdec_pred *a)
3007 if (sve_access_check(s)) {
3008 TCGv_i64 reg = cpu_reg(s, a->rd);
3009 TCGv_i64 val = tcg_temp_new_i64();
3011 do_cntp(s, val, a->esz, a->pg, a->pg);
3012 if (a->d) {
3013 tcg_gen_sub_i64(reg, reg, val);
3014 } else {
3015 tcg_gen_add_i64(reg, reg, val);
3017 tcg_temp_free_i64(val);
3019 return true;
3022 static bool trans_INCDECP_z(DisasContext *s, arg_incdec2_pred *a)
3024 if (a->esz == 0) {
3025 return false;
3027 if (sve_access_check(s)) {
3028 unsigned vsz = vec_full_reg_size(s);
3029 TCGv_i64 val = tcg_temp_new_i64();
3030 GVecGen2sFn *gvec_fn = a->d ? tcg_gen_gvec_subs : tcg_gen_gvec_adds;
3032 do_cntp(s, val, a->esz, a->pg, a->pg);
3033 gvec_fn(a->esz, vec_full_reg_offset(s, a->rd),
3034 vec_full_reg_offset(s, a->rn), val, vsz, vsz);
3036 return true;
3039 static bool trans_SINCDECP_r_32(DisasContext *s, arg_incdec_pred *a)
3041 if (sve_access_check(s)) {
3042 TCGv_i64 reg = cpu_reg(s, a->rd);
3043 TCGv_i64 val = tcg_temp_new_i64();
3045 do_cntp(s, val, a->esz, a->pg, a->pg);
3046 do_sat_addsub_32(reg, val, a->u, a->d);
3048 return true;
3051 static bool trans_SINCDECP_r_64(DisasContext *s, arg_incdec_pred *a)
3053 if (sve_access_check(s)) {
3054 TCGv_i64 reg = cpu_reg(s, a->rd);
3055 TCGv_i64 val = tcg_temp_new_i64();
3057 do_cntp(s, val, a->esz, a->pg, a->pg);
3058 do_sat_addsub_64(reg, val, a->u, a->d);
3060 return true;
3063 static bool trans_SINCDECP_z(DisasContext *s, arg_incdec2_pred *a)
3065 if (a->esz == 0) {
3066 return false;
3068 if (sve_access_check(s)) {
3069 TCGv_i64 val = tcg_temp_new_i64();
3070 do_cntp(s, val, a->esz, a->pg, a->pg);
3071 do_sat_addsub_vec(s, a->esz, a->rd, a->rn, val, a->u, a->d);
3073 return true;
3077 *** SVE Integer Compare Scalars Group
3080 static bool trans_CTERM(DisasContext *s, arg_CTERM *a)
3082 if (!sve_access_check(s)) {
3083 return true;
3086 TCGCond cond = (a->ne ? TCG_COND_NE : TCG_COND_EQ);
3087 TCGv_i64 rn = read_cpu_reg(s, a->rn, a->sf);
3088 TCGv_i64 rm = read_cpu_reg(s, a->rm, a->sf);
3089 TCGv_i64 cmp = tcg_temp_new_i64();
3091 tcg_gen_setcond_i64(cond, cmp, rn, rm);
3092 tcg_gen_extrl_i64_i32(cpu_NF, cmp);
3093 tcg_temp_free_i64(cmp);
3095 /* VF = !NF & !CF. */
3096 tcg_gen_xori_i32(cpu_VF, cpu_NF, 1);
3097 tcg_gen_andc_i32(cpu_VF, cpu_VF, cpu_CF);
3099 /* Both NF and VF actually look at bit 31. */
3100 tcg_gen_neg_i32(cpu_NF, cpu_NF);
3101 tcg_gen_neg_i32(cpu_VF, cpu_VF);
3102 return true;
3105 static bool trans_WHILE(DisasContext *s, arg_WHILE *a)
3107 TCGv_i64 op0, op1, t0, t1, tmax;
3108 TCGv_i32 t2;
3109 TCGv_ptr ptr;
3110 unsigned vsz = vec_full_reg_size(s);
3111 unsigned desc = 0;
3112 TCGCond cond;
3113 uint64_t maxval;
3114 /* Note that GE/HS has a->eq == 0 and GT/HI has a->eq == 1. */
3115 bool eq = a->eq == a->lt;
3117 /* The greater-than conditions are all SVE2. */
3118 if (!a->lt && !dc_isar_feature(aa64_sve2, s)) {
3119 return false;
3121 if (!sve_access_check(s)) {
3122 return true;
3125 op0 = read_cpu_reg(s, a->rn, 1);
3126 op1 = read_cpu_reg(s, a->rm, 1);
3128 if (!a->sf) {
3129 if (a->u) {
3130 tcg_gen_ext32u_i64(op0, op0);
3131 tcg_gen_ext32u_i64(op1, op1);
3132 } else {
3133 tcg_gen_ext32s_i64(op0, op0);
3134 tcg_gen_ext32s_i64(op1, op1);
3138 /* For the helper, compress the different conditions into a computation
3139 * of how many iterations for which the condition is true.
3141 t0 = tcg_temp_new_i64();
3142 t1 = tcg_temp_new_i64();
3144 if (a->lt) {
3145 tcg_gen_sub_i64(t0, op1, op0);
3146 if (a->u) {
3147 maxval = a->sf ? UINT64_MAX : UINT32_MAX;
3148 cond = eq ? TCG_COND_LEU : TCG_COND_LTU;
3149 } else {
3150 maxval = a->sf ? INT64_MAX : INT32_MAX;
3151 cond = eq ? TCG_COND_LE : TCG_COND_LT;
3153 } else {
3154 tcg_gen_sub_i64(t0, op0, op1);
3155 if (a->u) {
3156 maxval = 0;
3157 cond = eq ? TCG_COND_GEU : TCG_COND_GTU;
3158 } else {
3159 maxval = a->sf ? INT64_MIN : INT32_MIN;
3160 cond = eq ? TCG_COND_GE : TCG_COND_GT;
3164 tmax = tcg_constant_i64(vsz >> a->esz);
3165 if (eq) {
3166 /* Equality means one more iteration. */
3167 tcg_gen_addi_i64(t0, t0, 1);
3170 * For the less-than while, if op1 is maxval (and the only time
3171 * the addition above could overflow), then we produce an all-true
3172 * predicate by setting the count to the vector length. This is
3173 * because the pseudocode is described as an increment + compare
3174 * loop, and the maximum integer would always compare true.
3175 * Similarly, the greater-than while has the same issue with the
3176 * minimum integer due to the decrement + compare loop.
3178 tcg_gen_movi_i64(t1, maxval);
3179 tcg_gen_movcond_i64(TCG_COND_EQ, t0, op1, t1, tmax, t0);
3182 /* Bound to the maximum. */
3183 tcg_gen_umin_i64(t0, t0, tmax);
3185 /* Set the count to zero if the condition is false. */
3186 tcg_gen_movi_i64(t1, 0);
3187 tcg_gen_movcond_i64(cond, t0, op0, op1, t0, t1);
3188 tcg_temp_free_i64(t1);
3190 /* Since we're bounded, pass as a 32-bit type. */
3191 t2 = tcg_temp_new_i32();
3192 tcg_gen_extrl_i64_i32(t2, t0);
3193 tcg_temp_free_i64(t0);
3195 /* Scale elements to bits. */
3196 tcg_gen_shli_i32(t2, t2, a->esz);
3198 desc = FIELD_DP32(desc, PREDDESC, OPRSZ, vsz / 8);
3199 desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz);
3201 ptr = tcg_temp_new_ptr();
3202 tcg_gen_addi_ptr(ptr, cpu_env, pred_full_reg_offset(s, a->rd));
3204 if (a->lt) {
3205 gen_helper_sve_whilel(t2, ptr, t2, tcg_constant_i32(desc));
3206 } else {
3207 gen_helper_sve_whileg(t2, ptr, t2, tcg_constant_i32(desc));
3209 do_pred_flags(t2);
3211 tcg_temp_free_ptr(ptr);
3212 tcg_temp_free_i32(t2);
3213 return true;
3216 static bool trans_WHILE_ptr(DisasContext *s, arg_WHILE_ptr *a)
3218 TCGv_i64 op0, op1, diff, t1, tmax;
3219 TCGv_i32 t2;
3220 TCGv_ptr ptr;
3221 unsigned vsz = vec_full_reg_size(s);
3222 unsigned desc = 0;
3224 if (!dc_isar_feature(aa64_sve2, s)) {
3225 return false;
3227 if (!sve_access_check(s)) {
3228 return true;
3231 op0 = read_cpu_reg(s, a->rn, 1);
3232 op1 = read_cpu_reg(s, a->rm, 1);
3234 tmax = tcg_constant_i64(vsz);
3235 diff = tcg_temp_new_i64();
3237 if (a->rw) {
3238 /* WHILERW */
3239 /* diff = abs(op1 - op0), noting that op0/1 are unsigned. */
3240 t1 = tcg_temp_new_i64();
3241 tcg_gen_sub_i64(diff, op0, op1);
3242 tcg_gen_sub_i64(t1, op1, op0);
3243 tcg_gen_movcond_i64(TCG_COND_GEU, diff, op0, op1, diff, t1);
3244 tcg_temp_free_i64(t1);
3245 /* Round down to a multiple of ESIZE. */
3246 tcg_gen_andi_i64(diff, diff, -1 << a->esz);
3247 /* If op1 == op0, diff == 0, and the condition is always true. */
3248 tcg_gen_movcond_i64(TCG_COND_EQ, diff, op0, op1, tmax, diff);
3249 } else {
3250 /* WHILEWR */
3251 tcg_gen_sub_i64(diff, op1, op0);
3252 /* Round down to a multiple of ESIZE. */
3253 tcg_gen_andi_i64(diff, diff, -1 << a->esz);
3254 /* If op0 >= op1, diff <= 0, the condition is always true. */
3255 tcg_gen_movcond_i64(TCG_COND_GEU, diff, op0, op1, tmax, diff);
3258 /* Bound to the maximum. */
3259 tcg_gen_umin_i64(diff, diff, tmax);
3261 /* Since we're bounded, pass as a 32-bit type. */
3262 t2 = tcg_temp_new_i32();
3263 tcg_gen_extrl_i64_i32(t2, diff);
3264 tcg_temp_free_i64(diff);
3266 desc = FIELD_DP32(desc, PREDDESC, OPRSZ, vsz / 8);
3267 desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz);
3269 ptr = tcg_temp_new_ptr();
3270 tcg_gen_addi_ptr(ptr, cpu_env, pred_full_reg_offset(s, a->rd));
3272 gen_helper_sve_whilel(t2, ptr, t2, tcg_constant_i32(desc));
3273 do_pred_flags(t2);
3275 tcg_temp_free_ptr(ptr);
3276 tcg_temp_free_i32(t2);
3277 return true;
3281 *** SVE Integer Wide Immediate - Unpredicated Group
3284 static bool trans_FDUP(DisasContext *s, arg_FDUP *a)
3286 if (a->esz == 0) {
3287 return false;
3289 if (sve_access_check(s)) {
3290 unsigned vsz = vec_full_reg_size(s);
3291 int dofs = vec_full_reg_offset(s, a->rd);
3292 uint64_t imm;
3294 /* Decode the VFP immediate. */
3295 imm = vfp_expand_imm(a->esz, a->imm);
3296 tcg_gen_gvec_dup_imm(a->esz, dofs, vsz, vsz, imm);
3298 return true;
3301 static bool trans_DUP_i(DisasContext *s, arg_DUP_i *a)
3303 if (sve_access_check(s)) {
3304 unsigned vsz = vec_full_reg_size(s);
3305 int dofs = vec_full_reg_offset(s, a->rd);
3306 tcg_gen_gvec_dup_imm(a->esz, dofs, vsz, vsz, a->imm);
3308 return true;
3311 TRANS_FEAT(ADD_zzi, aa64_sve, gen_gvec_fn_arg_zzi, tcg_gen_gvec_addi, a)
3313 static bool trans_SUB_zzi(DisasContext *s, arg_rri_esz *a)
3315 a->imm = -a->imm;
3316 return trans_ADD_zzi(s, a);
3319 static bool trans_SUBR_zzi(DisasContext *s, arg_rri_esz *a)
3321 static const TCGOpcode vecop_list[] = { INDEX_op_sub_vec, 0 };
3322 static const GVecGen2s op[4] = {
3323 { .fni8 = tcg_gen_vec_sub8_i64,
3324 .fniv = tcg_gen_sub_vec,
3325 .fno = gen_helper_sve_subri_b,
3326 .opt_opc = vecop_list,
3327 .vece = MO_8,
3328 .scalar_first = true },
3329 { .fni8 = tcg_gen_vec_sub16_i64,
3330 .fniv = tcg_gen_sub_vec,
3331 .fno = gen_helper_sve_subri_h,
3332 .opt_opc = vecop_list,
3333 .vece = MO_16,
3334 .scalar_first = true },
3335 { .fni4 = tcg_gen_sub_i32,
3336 .fniv = tcg_gen_sub_vec,
3337 .fno = gen_helper_sve_subri_s,
3338 .opt_opc = vecop_list,
3339 .vece = MO_32,
3340 .scalar_first = true },
3341 { .fni8 = tcg_gen_sub_i64,
3342 .fniv = tcg_gen_sub_vec,
3343 .fno = gen_helper_sve_subri_d,
3344 .opt_opc = vecop_list,
3345 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
3346 .vece = MO_64,
3347 .scalar_first = true }
3350 if (sve_access_check(s)) {
3351 unsigned vsz = vec_full_reg_size(s);
3352 tcg_gen_gvec_2s(vec_full_reg_offset(s, a->rd),
3353 vec_full_reg_offset(s, a->rn),
3354 vsz, vsz, tcg_constant_i64(a->imm), &op[a->esz]);
3356 return true;
3359 TRANS_FEAT(MUL_zzi, aa64_sve, gen_gvec_fn_arg_zzi, tcg_gen_gvec_muli, a)
3361 static bool do_zzi_sat(DisasContext *s, arg_rri_esz *a, bool u, bool d)
3363 if (sve_access_check(s)) {
3364 do_sat_addsub_vec(s, a->esz, a->rd, a->rn,
3365 tcg_constant_i64(a->imm), u, d);
3367 return true;
3370 TRANS_FEAT(SQADD_zzi, aa64_sve, do_zzi_sat, a, false, false)
3371 TRANS_FEAT(UQADD_zzi, aa64_sve, do_zzi_sat, a, true, false)
3372 TRANS_FEAT(SQSUB_zzi, aa64_sve, do_zzi_sat, a, false, true)
3373 TRANS_FEAT(UQSUB_zzi, aa64_sve, do_zzi_sat, a, true, true)
3375 static bool do_zzi_ool(DisasContext *s, arg_rri_esz *a, gen_helper_gvec_2i *fn)
3377 if (sve_access_check(s)) {
3378 unsigned vsz = vec_full_reg_size(s);
3379 tcg_gen_gvec_2i_ool(vec_full_reg_offset(s, a->rd),
3380 vec_full_reg_offset(s, a->rn),
3381 tcg_constant_i64(a->imm), vsz, vsz, 0, fn);
3383 return true;
3386 #define DO_ZZI(NAME, name) \
3387 static gen_helper_gvec_2i * const name##i_fns[4] = { \
3388 gen_helper_sve_##name##i_b, gen_helper_sve_##name##i_h, \
3389 gen_helper_sve_##name##i_s, gen_helper_sve_##name##i_d, \
3390 }; \
3391 TRANS_FEAT(NAME##_zzi, aa64_sve, do_zzi_ool, a, name##i_fns[a->esz])
3393 DO_ZZI(SMAX, smax)
3394 DO_ZZI(UMAX, umax)
3395 DO_ZZI(SMIN, smin)
3396 DO_ZZI(UMIN, umin)
3398 #undef DO_ZZI
3400 static gen_helper_gvec_4 * const dot_fns[2][2] = {
3401 { gen_helper_gvec_sdot_b, gen_helper_gvec_sdot_h },
3402 { gen_helper_gvec_udot_b, gen_helper_gvec_udot_h }
3404 TRANS_FEAT(DOT_zzzz, aa64_sve, gen_gvec_ool_zzzz,
3405 dot_fns[a->u][a->sz], a->rd, a->rn, a->rm, a->ra, 0)
3408 * SVE Multiply - Indexed
3411 TRANS_FEAT(SDOT_zzxw_s, aa64_sve, gen_gvec_ool_arg_zzxz,
3412 gen_helper_gvec_sdot_idx_b, a)
3413 TRANS_FEAT(SDOT_zzxw_d, aa64_sve, gen_gvec_ool_arg_zzxz,
3414 gen_helper_gvec_sdot_idx_h, a)
3415 TRANS_FEAT(UDOT_zzxw_s, aa64_sve, gen_gvec_ool_arg_zzxz,
3416 gen_helper_gvec_udot_idx_b, a)
3417 TRANS_FEAT(UDOT_zzxw_d, aa64_sve, gen_gvec_ool_arg_zzxz,
3418 gen_helper_gvec_udot_idx_h, a)
3420 TRANS_FEAT(SUDOT_zzxw_s, aa64_sve_i8mm, gen_gvec_ool_arg_zzxz,
3421 gen_helper_gvec_sudot_idx_b, a)
3422 TRANS_FEAT(USDOT_zzxw_s, aa64_sve_i8mm, gen_gvec_ool_arg_zzxz,
3423 gen_helper_gvec_usdot_idx_b, a)
3425 #define DO_SVE2_RRX(NAME, FUNC) \
3426 TRANS_FEAT(NAME, aa64_sve, gen_gvec_ool_zzz, FUNC, \
3427 a->rd, a->rn, a->rm, a->index)
3429 DO_SVE2_RRX(MUL_zzx_h, gen_helper_gvec_mul_idx_h)
3430 DO_SVE2_RRX(MUL_zzx_s, gen_helper_gvec_mul_idx_s)
3431 DO_SVE2_RRX(MUL_zzx_d, gen_helper_gvec_mul_idx_d)
3433 DO_SVE2_RRX(SQDMULH_zzx_h, gen_helper_sve2_sqdmulh_idx_h)
3434 DO_SVE2_RRX(SQDMULH_zzx_s, gen_helper_sve2_sqdmulh_idx_s)
3435 DO_SVE2_RRX(SQDMULH_zzx_d, gen_helper_sve2_sqdmulh_idx_d)
3437 DO_SVE2_RRX(SQRDMULH_zzx_h, gen_helper_sve2_sqrdmulh_idx_h)
3438 DO_SVE2_RRX(SQRDMULH_zzx_s, gen_helper_sve2_sqrdmulh_idx_s)
3439 DO_SVE2_RRX(SQRDMULH_zzx_d, gen_helper_sve2_sqrdmulh_idx_d)
3441 #undef DO_SVE2_RRX
3443 #define DO_SVE2_RRX_TB(NAME, FUNC, TOP) \
3444 TRANS_FEAT(NAME, aa64_sve, gen_gvec_ool_zzz, FUNC, \
3445 a->rd, a->rn, a->rm, (a->index << 1) | TOP)
3447 DO_SVE2_RRX_TB(SQDMULLB_zzx_s, gen_helper_sve2_sqdmull_idx_s, false)
3448 DO_SVE2_RRX_TB(SQDMULLB_zzx_d, gen_helper_sve2_sqdmull_idx_d, false)
3449 DO_SVE2_RRX_TB(SQDMULLT_zzx_s, gen_helper_sve2_sqdmull_idx_s, true)
3450 DO_SVE2_RRX_TB(SQDMULLT_zzx_d, gen_helper_sve2_sqdmull_idx_d, true)
3452 DO_SVE2_RRX_TB(SMULLB_zzx_s, gen_helper_sve2_smull_idx_s, false)
3453 DO_SVE2_RRX_TB(SMULLB_zzx_d, gen_helper_sve2_smull_idx_d, false)
3454 DO_SVE2_RRX_TB(SMULLT_zzx_s, gen_helper_sve2_smull_idx_s, true)
3455 DO_SVE2_RRX_TB(SMULLT_zzx_d, gen_helper_sve2_smull_idx_d, true)
3457 DO_SVE2_RRX_TB(UMULLB_zzx_s, gen_helper_sve2_umull_idx_s, false)
3458 DO_SVE2_RRX_TB(UMULLB_zzx_d, gen_helper_sve2_umull_idx_d, false)
3459 DO_SVE2_RRX_TB(UMULLT_zzx_s, gen_helper_sve2_umull_idx_s, true)
3460 DO_SVE2_RRX_TB(UMULLT_zzx_d, gen_helper_sve2_umull_idx_d, true)
3462 #undef DO_SVE2_RRX_TB
3464 #define DO_SVE2_RRXR(NAME, FUNC) \
3465 TRANS_FEAT(NAME, aa64_sve2, gen_gvec_ool_arg_zzxz, FUNC, a)
3467 DO_SVE2_RRXR(MLA_zzxz_h, gen_helper_gvec_mla_idx_h)
3468 DO_SVE2_RRXR(MLA_zzxz_s, gen_helper_gvec_mla_idx_s)
3469 DO_SVE2_RRXR(MLA_zzxz_d, gen_helper_gvec_mla_idx_d)
3471 DO_SVE2_RRXR(MLS_zzxz_h, gen_helper_gvec_mls_idx_h)
3472 DO_SVE2_RRXR(MLS_zzxz_s, gen_helper_gvec_mls_idx_s)
3473 DO_SVE2_RRXR(MLS_zzxz_d, gen_helper_gvec_mls_idx_d)
3475 DO_SVE2_RRXR(SQRDMLAH_zzxz_h, gen_helper_sve2_sqrdmlah_idx_h)
3476 DO_SVE2_RRXR(SQRDMLAH_zzxz_s, gen_helper_sve2_sqrdmlah_idx_s)
3477 DO_SVE2_RRXR(SQRDMLAH_zzxz_d, gen_helper_sve2_sqrdmlah_idx_d)
3479 DO_SVE2_RRXR(SQRDMLSH_zzxz_h, gen_helper_sve2_sqrdmlsh_idx_h)
3480 DO_SVE2_RRXR(SQRDMLSH_zzxz_s, gen_helper_sve2_sqrdmlsh_idx_s)
3481 DO_SVE2_RRXR(SQRDMLSH_zzxz_d, gen_helper_sve2_sqrdmlsh_idx_d)
3483 #undef DO_SVE2_RRXR
3485 #define DO_SVE2_RRXR_TB(NAME, FUNC, TOP) \
3486 TRANS_FEAT(NAME, aa64_sve2, gen_gvec_ool_zzzz, FUNC, \
3487 a->rd, a->rn, a->rm, a->ra, (a->index << 1) | TOP)
3489 DO_SVE2_RRXR_TB(SQDMLALB_zzxw_s, gen_helper_sve2_sqdmlal_idx_s, false)
3490 DO_SVE2_RRXR_TB(SQDMLALB_zzxw_d, gen_helper_sve2_sqdmlal_idx_d, false)
3491 DO_SVE2_RRXR_TB(SQDMLALT_zzxw_s, gen_helper_sve2_sqdmlal_idx_s, true)
3492 DO_SVE2_RRXR_TB(SQDMLALT_zzxw_d, gen_helper_sve2_sqdmlal_idx_d, true)
3494 DO_SVE2_RRXR_TB(SQDMLSLB_zzxw_s, gen_helper_sve2_sqdmlsl_idx_s, false)
3495 DO_SVE2_RRXR_TB(SQDMLSLB_zzxw_d, gen_helper_sve2_sqdmlsl_idx_d, false)
3496 DO_SVE2_RRXR_TB(SQDMLSLT_zzxw_s, gen_helper_sve2_sqdmlsl_idx_s, true)
3497 DO_SVE2_RRXR_TB(SQDMLSLT_zzxw_d, gen_helper_sve2_sqdmlsl_idx_d, true)
3499 DO_SVE2_RRXR_TB(SMLALB_zzxw_s, gen_helper_sve2_smlal_idx_s, false)
3500 DO_SVE2_RRXR_TB(SMLALB_zzxw_d, gen_helper_sve2_smlal_idx_d, false)
3501 DO_SVE2_RRXR_TB(SMLALT_zzxw_s, gen_helper_sve2_smlal_idx_s, true)
3502 DO_SVE2_RRXR_TB(SMLALT_zzxw_d, gen_helper_sve2_smlal_idx_d, true)
3504 DO_SVE2_RRXR_TB(UMLALB_zzxw_s, gen_helper_sve2_umlal_idx_s, false)
3505 DO_SVE2_RRXR_TB(UMLALB_zzxw_d, gen_helper_sve2_umlal_idx_d, false)
3506 DO_SVE2_RRXR_TB(UMLALT_zzxw_s, gen_helper_sve2_umlal_idx_s, true)
3507 DO_SVE2_RRXR_TB(UMLALT_zzxw_d, gen_helper_sve2_umlal_idx_d, true)
3509 DO_SVE2_RRXR_TB(SMLSLB_zzxw_s, gen_helper_sve2_smlsl_idx_s, false)
3510 DO_SVE2_RRXR_TB(SMLSLB_zzxw_d, gen_helper_sve2_smlsl_idx_d, false)
3511 DO_SVE2_RRXR_TB(SMLSLT_zzxw_s, gen_helper_sve2_smlsl_idx_s, true)
3512 DO_SVE2_RRXR_TB(SMLSLT_zzxw_d, gen_helper_sve2_smlsl_idx_d, true)
3514 DO_SVE2_RRXR_TB(UMLSLB_zzxw_s, gen_helper_sve2_umlsl_idx_s, false)
3515 DO_SVE2_RRXR_TB(UMLSLB_zzxw_d, gen_helper_sve2_umlsl_idx_d, false)
3516 DO_SVE2_RRXR_TB(UMLSLT_zzxw_s, gen_helper_sve2_umlsl_idx_s, true)
3517 DO_SVE2_RRXR_TB(UMLSLT_zzxw_d, gen_helper_sve2_umlsl_idx_d, true)
3519 #undef DO_SVE2_RRXR_TB
3521 #define DO_SVE2_RRXR_ROT(NAME, FUNC) \
3522 TRANS_FEAT(NAME, aa64_sve2, gen_gvec_ool_zzzz, FUNC, \
3523 a->rd, a->rn, a->rm, a->ra, (a->index << 2) | a->rot)
3525 DO_SVE2_RRXR_ROT(CMLA_zzxz_h, gen_helper_sve2_cmla_idx_h)
3526 DO_SVE2_RRXR_ROT(CMLA_zzxz_s, gen_helper_sve2_cmla_idx_s)
3528 DO_SVE2_RRXR_ROT(SQRDCMLAH_zzxz_h, gen_helper_sve2_sqrdcmlah_idx_h)
3529 DO_SVE2_RRXR_ROT(SQRDCMLAH_zzxz_s, gen_helper_sve2_sqrdcmlah_idx_s)
3531 DO_SVE2_RRXR_ROT(CDOT_zzxw_s, gen_helper_sve2_cdot_idx_s)
3532 DO_SVE2_RRXR_ROT(CDOT_zzxw_d, gen_helper_sve2_cdot_idx_d)
3534 #undef DO_SVE2_RRXR_ROT
3537 *** SVE Floating Point Multiply-Add Indexed Group
3540 static bool do_FMLA_zzxz(DisasContext *s, arg_rrxr_esz *a, bool sub)
3542 static gen_helper_gvec_4_ptr * const fns[4] = {
3543 NULL,
3544 gen_helper_gvec_fmla_idx_h,
3545 gen_helper_gvec_fmla_idx_s,
3546 gen_helper_gvec_fmla_idx_d,
3548 return gen_gvec_fpst_zzzz(s, fns[a->esz], a->rd, a->rn, a->rm, a->ra,
3549 (a->index << 1) | sub,
3550 a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
3553 TRANS_FEAT(FMLA_zzxz, aa64_sve, do_FMLA_zzxz, a, false)
3554 TRANS_FEAT(FMLS_zzxz, aa64_sve, do_FMLA_zzxz, a, true)
3557 *** SVE Floating Point Multiply Indexed Group
3560 static bool trans_FMUL_zzx(DisasContext *s, arg_FMUL_zzx *a)
3562 static gen_helper_gvec_3_ptr * const fns[3] = {
3563 gen_helper_gvec_fmul_idx_h,
3564 gen_helper_gvec_fmul_idx_s,
3565 gen_helper_gvec_fmul_idx_d,
3568 if (sve_access_check(s)) {
3569 unsigned vsz = vec_full_reg_size(s);
3570 TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
3571 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, a->rd),
3572 vec_full_reg_offset(s, a->rn),
3573 vec_full_reg_offset(s, a->rm),
3574 status, vsz, vsz, a->index, fns[a->esz - 1]);
3575 tcg_temp_free_ptr(status);
3577 return true;
3581 *** SVE Floating Point Fast Reduction Group
3584 typedef void gen_helper_fp_reduce(TCGv_i64, TCGv_ptr, TCGv_ptr,
3585 TCGv_ptr, TCGv_i32);
3587 static void do_reduce(DisasContext *s, arg_rpr_esz *a,
3588 gen_helper_fp_reduce *fn)
3590 unsigned vsz = vec_full_reg_size(s);
3591 unsigned p2vsz = pow2ceil(vsz);
3592 TCGv_i32 t_desc = tcg_constant_i32(simd_desc(vsz, vsz, p2vsz));
3593 TCGv_ptr t_zn, t_pg, status;
3594 TCGv_i64 temp;
3596 temp = tcg_temp_new_i64();
3597 t_zn = tcg_temp_new_ptr();
3598 t_pg = tcg_temp_new_ptr();
3600 tcg_gen_addi_ptr(t_zn, cpu_env, vec_full_reg_offset(s, a->rn));
3601 tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, a->pg));
3602 status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
3604 fn(temp, t_zn, t_pg, status, t_desc);
3605 tcg_temp_free_ptr(t_zn);
3606 tcg_temp_free_ptr(t_pg);
3607 tcg_temp_free_ptr(status);
3609 write_fp_dreg(s, a->rd, temp);
3610 tcg_temp_free_i64(temp);
3613 #define DO_VPZ(NAME, name) \
3614 static bool trans_##NAME(DisasContext *s, arg_rpr_esz *a) \
3616 static gen_helper_fp_reduce * const fns[3] = { \
3617 gen_helper_sve_##name##_h, \
3618 gen_helper_sve_##name##_s, \
3619 gen_helper_sve_##name##_d, \
3620 }; \
3621 if (a->esz == 0) { \
3622 return false; \
3624 if (sve_access_check(s)) { \
3625 do_reduce(s, a, fns[a->esz - 1]); \
3627 return true; \
3630 DO_VPZ(FADDV, faddv)
3631 DO_VPZ(FMINNMV, fminnmv)
3632 DO_VPZ(FMAXNMV, fmaxnmv)
3633 DO_VPZ(FMINV, fminv)
3634 DO_VPZ(FMAXV, fmaxv)
3637 *** SVE Floating Point Unary Operations - Unpredicated Group
3640 static void do_zz_fp(DisasContext *s, arg_rr_esz *a, gen_helper_gvec_2_ptr *fn)
3642 unsigned vsz = vec_full_reg_size(s);
3643 TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
3645 tcg_gen_gvec_2_ptr(vec_full_reg_offset(s, a->rd),
3646 vec_full_reg_offset(s, a->rn),
3647 status, vsz, vsz, 0, fn);
3648 tcg_temp_free_ptr(status);
3651 static bool trans_FRECPE(DisasContext *s, arg_rr_esz *a)
3653 static gen_helper_gvec_2_ptr * const fns[3] = {
3654 gen_helper_gvec_frecpe_h,
3655 gen_helper_gvec_frecpe_s,
3656 gen_helper_gvec_frecpe_d,
3658 if (a->esz == 0) {
3659 return false;
3661 if (sve_access_check(s)) {
3662 do_zz_fp(s, a, fns[a->esz - 1]);
3664 return true;
3667 static bool trans_FRSQRTE(DisasContext *s, arg_rr_esz *a)
3669 static gen_helper_gvec_2_ptr * const fns[3] = {
3670 gen_helper_gvec_frsqrte_h,
3671 gen_helper_gvec_frsqrte_s,
3672 gen_helper_gvec_frsqrte_d,
3674 if (a->esz == 0) {
3675 return false;
3677 if (sve_access_check(s)) {
3678 do_zz_fp(s, a, fns[a->esz - 1]);
3680 return true;
3684 *** SVE Floating Point Compare with Zero Group
3687 static void do_ppz_fp(DisasContext *s, arg_rpr_esz *a,
3688 gen_helper_gvec_3_ptr *fn)
3690 unsigned vsz = vec_full_reg_size(s);
3691 TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
3693 tcg_gen_gvec_3_ptr(pred_full_reg_offset(s, a->rd),
3694 vec_full_reg_offset(s, a->rn),
3695 pred_full_reg_offset(s, a->pg),
3696 status, vsz, vsz, 0, fn);
3697 tcg_temp_free_ptr(status);
3700 #define DO_PPZ(NAME, name) \
3701 static bool trans_##NAME(DisasContext *s, arg_rpr_esz *a) \
3703 static gen_helper_gvec_3_ptr * const fns[3] = { \
3704 gen_helper_sve_##name##_h, \
3705 gen_helper_sve_##name##_s, \
3706 gen_helper_sve_##name##_d, \
3707 }; \
3708 if (a->esz == 0) { \
3709 return false; \
3711 if (sve_access_check(s)) { \
3712 do_ppz_fp(s, a, fns[a->esz - 1]); \
3714 return true; \
3717 DO_PPZ(FCMGE_ppz0, fcmge0)
3718 DO_PPZ(FCMGT_ppz0, fcmgt0)
3719 DO_PPZ(FCMLE_ppz0, fcmle0)
3720 DO_PPZ(FCMLT_ppz0, fcmlt0)
3721 DO_PPZ(FCMEQ_ppz0, fcmeq0)
3722 DO_PPZ(FCMNE_ppz0, fcmne0)
3724 #undef DO_PPZ
3727 *** SVE floating-point trig multiply-add coefficient
3730 static bool trans_FTMAD(DisasContext *s, arg_FTMAD *a)
3732 static gen_helper_gvec_3_ptr * const fns[3] = {
3733 gen_helper_sve_ftmad_h,
3734 gen_helper_sve_ftmad_s,
3735 gen_helper_sve_ftmad_d,
3738 if (a->esz == 0) {
3739 return false;
3741 if (sve_access_check(s)) {
3742 unsigned vsz = vec_full_reg_size(s);
3743 TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
3744 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, a->rd),
3745 vec_full_reg_offset(s, a->rn),
3746 vec_full_reg_offset(s, a->rm),
3747 status, vsz, vsz, a->imm, fns[a->esz - 1]);
3748 tcg_temp_free_ptr(status);
3750 return true;
3754 *** SVE Floating Point Accumulating Reduction Group
3757 static bool trans_FADDA(DisasContext *s, arg_rprr_esz *a)
3759 typedef void fadda_fn(TCGv_i64, TCGv_i64, TCGv_ptr,
3760 TCGv_ptr, TCGv_ptr, TCGv_i32);
3761 static fadda_fn * const fns[3] = {
3762 gen_helper_sve_fadda_h,
3763 gen_helper_sve_fadda_s,
3764 gen_helper_sve_fadda_d,
3766 unsigned vsz = vec_full_reg_size(s);
3767 TCGv_ptr t_rm, t_pg, t_fpst;
3768 TCGv_i64 t_val;
3769 TCGv_i32 t_desc;
3771 if (a->esz == 0) {
3772 return false;
3774 if (!sve_access_check(s)) {
3775 return true;
3778 t_val = load_esz(cpu_env, vec_reg_offset(s, a->rn, 0, a->esz), a->esz);
3779 t_rm = tcg_temp_new_ptr();
3780 t_pg = tcg_temp_new_ptr();
3781 tcg_gen_addi_ptr(t_rm, cpu_env, vec_full_reg_offset(s, a->rm));
3782 tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, a->pg));
3783 t_fpst = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
3784 t_desc = tcg_constant_i32(simd_desc(vsz, vsz, 0));
3786 fns[a->esz - 1](t_val, t_val, t_rm, t_pg, t_fpst, t_desc);
3788 tcg_temp_free_ptr(t_fpst);
3789 tcg_temp_free_ptr(t_pg);
3790 tcg_temp_free_ptr(t_rm);
3792 write_fp_dreg(s, a->rd, t_val);
3793 tcg_temp_free_i64(t_val);
3794 return true;
3798 *** SVE Floating Point Arithmetic - Unpredicated Group
3801 #define DO_FP3(NAME, name) \
3802 static gen_helper_gvec_3_ptr * const name##_fns[4] = { \
3803 NULL, gen_helper_gvec_##name##_h, \
3804 gen_helper_gvec_##name##_s, gen_helper_gvec_##name##_d \
3805 }; \
3806 TRANS_FEAT(NAME, aa64_sve, gen_gvec_fpst_arg_zzz, name##_fns[a->esz], a, 0)
3808 DO_FP3(FADD_zzz, fadd)
3809 DO_FP3(FSUB_zzz, fsub)
3810 DO_FP3(FMUL_zzz, fmul)
3811 DO_FP3(FTSMUL, ftsmul)
3812 DO_FP3(FRECPS, recps)
3813 DO_FP3(FRSQRTS, rsqrts)
3815 #undef DO_FP3
3818 *** SVE Floating Point Arithmetic - Predicated Group
3821 static bool do_zpzz_fp(DisasContext *s, arg_rprr_esz *a,
3822 gen_helper_gvec_4_ptr *fn)
3824 if (fn == NULL) {
3825 return false;
3827 if (sve_access_check(s)) {
3828 unsigned vsz = vec_full_reg_size(s);
3829 TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
3830 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, a->rd),
3831 vec_full_reg_offset(s, a->rn),
3832 vec_full_reg_offset(s, a->rm),
3833 pred_full_reg_offset(s, a->pg),
3834 status, vsz, vsz, 0, fn);
3835 tcg_temp_free_ptr(status);
3837 return true;
3840 #define DO_FP3(NAME, name) \
3841 static bool trans_##NAME(DisasContext *s, arg_rprr_esz *a) \
3843 static gen_helper_gvec_4_ptr * const fns[4] = { \
3844 NULL, gen_helper_sve_##name##_h, \
3845 gen_helper_sve_##name##_s, gen_helper_sve_##name##_d \
3846 }; \
3847 return do_zpzz_fp(s, a, fns[a->esz]); \
3850 DO_FP3(FADD_zpzz, fadd)
3851 DO_FP3(FSUB_zpzz, fsub)
3852 DO_FP3(FMUL_zpzz, fmul)
3853 DO_FP3(FMIN_zpzz, fmin)
3854 DO_FP3(FMAX_zpzz, fmax)
3855 DO_FP3(FMINNM_zpzz, fminnum)
3856 DO_FP3(FMAXNM_zpzz, fmaxnum)
3857 DO_FP3(FABD, fabd)
3858 DO_FP3(FSCALE, fscalbn)
3859 DO_FP3(FDIV, fdiv)
3860 DO_FP3(FMULX, fmulx)
3862 #undef DO_FP3
3864 typedef void gen_helper_sve_fp2scalar(TCGv_ptr, TCGv_ptr, TCGv_ptr,
3865 TCGv_i64, TCGv_ptr, TCGv_i32);
3867 static void do_fp_scalar(DisasContext *s, int zd, int zn, int pg, bool is_fp16,
3868 TCGv_i64 scalar, gen_helper_sve_fp2scalar *fn)
3870 unsigned vsz = vec_full_reg_size(s);
3871 TCGv_ptr t_zd, t_zn, t_pg, status;
3872 TCGv_i32 desc;
3874 t_zd = tcg_temp_new_ptr();
3875 t_zn = tcg_temp_new_ptr();
3876 t_pg = tcg_temp_new_ptr();
3877 tcg_gen_addi_ptr(t_zd, cpu_env, vec_full_reg_offset(s, zd));
3878 tcg_gen_addi_ptr(t_zn, cpu_env, vec_full_reg_offset(s, zn));
3879 tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg));
3881 status = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR);
3882 desc = tcg_constant_i32(simd_desc(vsz, vsz, 0));
3883 fn(t_zd, t_zn, t_pg, scalar, status, desc);
3885 tcg_temp_free_ptr(status);
3886 tcg_temp_free_ptr(t_pg);
3887 tcg_temp_free_ptr(t_zn);
3888 tcg_temp_free_ptr(t_zd);
3891 static void do_fp_imm(DisasContext *s, arg_rpri_esz *a, uint64_t imm,
3892 gen_helper_sve_fp2scalar *fn)
3894 do_fp_scalar(s, a->rd, a->rn, a->pg, a->esz == MO_16,
3895 tcg_constant_i64(imm), fn);
3898 #define DO_FP_IMM(NAME, name, const0, const1) \
3899 static bool trans_##NAME##_zpzi(DisasContext *s, arg_rpri_esz *a) \
3901 static gen_helper_sve_fp2scalar * const fns[3] = { \
3902 gen_helper_sve_##name##_h, \
3903 gen_helper_sve_##name##_s, \
3904 gen_helper_sve_##name##_d \
3905 }; \
3906 static uint64_t const val[3][2] = { \
3907 { float16_##const0, float16_##const1 }, \
3908 { float32_##const0, float32_##const1 }, \
3909 { float64_##const0, float64_##const1 }, \
3910 }; \
3911 if (a->esz == 0) { \
3912 return false; \
3914 if (sve_access_check(s)) { \
3915 do_fp_imm(s, a, val[a->esz - 1][a->imm], fns[a->esz - 1]); \
3917 return true; \
3920 DO_FP_IMM(FADD, fadds, half, one)
3921 DO_FP_IMM(FSUB, fsubs, half, one)
3922 DO_FP_IMM(FMUL, fmuls, half, two)
3923 DO_FP_IMM(FSUBR, fsubrs, half, one)
3924 DO_FP_IMM(FMAXNM, fmaxnms, zero, one)
3925 DO_FP_IMM(FMINNM, fminnms, zero, one)
3926 DO_FP_IMM(FMAX, fmaxs, zero, one)
3927 DO_FP_IMM(FMIN, fmins, zero, one)
3929 #undef DO_FP_IMM
3931 static bool do_fp_cmp(DisasContext *s, arg_rprr_esz *a,
3932 gen_helper_gvec_4_ptr *fn)
3934 if (fn == NULL) {
3935 return false;
3937 if (sve_access_check(s)) {
3938 unsigned vsz = vec_full_reg_size(s);
3939 TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
3940 tcg_gen_gvec_4_ptr(pred_full_reg_offset(s, a->rd),
3941 vec_full_reg_offset(s, a->rn),
3942 vec_full_reg_offset(s, a->rm),
3943 pred_full_reg_offset(s, a->pg),
3944 status, vsz, vsz, 0, fn);
3945 tcg_temp_free_ptr(status);
3947 return true;
3950 #define DO_FPCMP(NAME, name) \
3951 static bool trans_##NAME##_ppzz(DisasContext *s, arg_rprr_esz *a) \
3953 static gen_helper_gvec_4_ptr * const fns[4] = { \
3954 NULL, gen_helper_sve_##name##_h, \
3955 gen_helper_sve_##name##_s, gen_helper_sve_##name##_d \
3956 }; \
3957 return do_fp_cmp(s, a, fns[a->esz]); \
3960 DO_FPCMP(FCMGE, fcmge)
3961 DO_FPCMP(FCMGT, fcmgt)
3962 DO_FPCMP(FCMEQ, fcmeq)
3963 DO_FPCMP(FCMNE, fcmne)
3964 DO_FPCMP(FCMUO, fcmuo)
3965 DO_FPCMP(FACGE, facge)
3966 DO_FPCMP(FACGT, facgt)
3968 #undef DO_FPCMP
3970 static bool trans_FCADD(DisasContext *s, arg_FCADD *a)
3972 static gen_helper_gvec_4_ptr * const fns[3] = {
3973 gen_helper_sve_fcadd_h,
3974 gen_helper_sve_fcadd_s,
3975 gen_helper_sve_fcadd_d
3978 if (a->esz == 0) {
3979 return false;
3981 if (sve_access_check(s)) {
3982 unsigned vsz = vec_full_reg_size(s);
3983 TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
3984 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, a->rd),
3985 vec_full_reg_offset(s, a->rn),
3986 vec_full_reg_offset(s, a->rm),
3987 pred_full_reg_offset(s, a->pg),
3988 status, vsz, vsz, a->rot, fns[a->esz - 1]);
3989 tcg_temp_free_ptr(status);
3991 return true;
3994 static bool do_fmla(DisasContext *s, arg_rprrr_esz *a,
3995 gen_helper_gvec_5_ptr *fn)
3997 if (a->esz == 0) {
3998 return false;
4000 if (sve_access_check(s)) {
4001 unsigned vsz = vec_full_reg_size(s);
4002 TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
4003 tcg_gen_gvec_5_ptr(vec_full_reg_offset(s, a->rd),
4004 vec_full_reg_offset(s, a->rn),
4005 vec_full_reg_offset(s, a->rm),
4006 vec_full_reg_offset(s, a->ra),
4007 pred_full_reg_offset(s, a->pg),
4008 status, vsz, vsz, 0, fn);
4009 tcg_temp_free_ptr(status);
4011 return true;
4014 #define DO_FMLA(NAME, name) \
4015 static bool trans_##NAME(DisasContext *s, arg_rprrr_esz *a) \
4017 static gen_helper_gvec_5_ptr * const fns[4] = { \
4018 NULL, gen_helper_sve_##name##_h, \
4019 gen_helper_sve_##name##_s, gen_helper_sve_##name##_d \
4020 }; \
4021 return do_fmla(s, a, fns[a->esz]); \
4024 DO_FMLA(FMLA_zpzzz, fmla_zpzzz)
4025 DO_FMLA(FMLS_zpzzz, fmls_zpzzz)
4026 DO_FMLA(FNMLA_zpzzz, fnmla_zpzzz)
4027 DO_FMLA(FNMLS_zpzzz, fnmls_zpzzz)
4029 #undef DO_FMLA
4031 static bool trans_FCMLA_zpzzz(DisasContext *s, arg_FCMLA_zpzzz *a)
4033 static gen_helper_gvec_5_ptr * const fns[4] = {
4034 NULL,
4035 gen_helper_sve_fcmla_zpzzz_h,
4036 gen_helper_sve_fcmla_zpzzz_s,
4037 gen_helper_sve_fcmla_zpzzz_d,
4040 if (a->esz == 0) {
4041 return false;
4043 if (sve_access_check(s)) {
4044 unsigned vsz = vec_full_reg_size(s);
4045 TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
4046 tcg_gen_gvec_5_ptr(vec_full_reg_offset(s, a->rd),
4047 vec_full_reg_offset(s, a->rn),
4048 vec_full_reg_offset(s, a->rm),
4049 vec_full_reg_offset(s, a->ra),
4050 pred_full_reg_offset(s, a->pg),
4051 status, vsz, vsz, a->rot, fns[a->esz]);
4052 tcg_temp_free_ptr(status);
4054 return true;
4057 static bool trans_FCMLA_zzxz(DisasContext *s, arg_FCMLA_zzxz *a)
4059 static gen_helper_gvec_4_ptr * const fns[4] = {
4060 NULL,
4061 gen_helper_gvec_fcmlah_idx,
4062 gen_helper_gvec_fcmlas_idx,
4063 NULL,
4066 tcg_debug_assert(a->rd == a->ra);
4068 return gen_gvec_fpst_zzzz(s, fns[a->esz], a->rd, a->rn, a->rm, a->ra,
4069 a->index * 4 + a->rot,
4070 a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
4074 *** SVE Floating Point Unary Operations Predicated Group
4077 static bool do_zpz_ptr(DisasContext *s, int rd, int rn, int pg,
4078 bool is_fp16, gen_helper_gvec_3_ptr *fn)
4080 if (sve_access_check(s)) {
4081 unsigned vsz = vec_full_reg_size(s);
4082 TCGv_ptr status = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR);
4083 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
4084 vec_full_reg_offset(s, rn),
4085 pred_full_reg_offset(s, pg),
4086 status, vsz, vsz, 0, fn);
4087 tcg_temp_free_ptr(status);
4089 return true;
4092 static bool trans_FCVT_sh(DisasContext *s, arg_rpr_esz *a)
4094 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvt_sh);
4097 static bool trans_FCVT_hs(DisasContext *s, arg_rpr_esz *a)
4099 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvt_hs);
4102 static bool trans_BFCVT(DisasContext *s, arg_rpr_esz *a)
4104 if (!dc_isar_feature(aa64_sve_bf16, s)) {
4105 return false;
4107 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_bfcvt);
4110 static bool trans_FCVT_dh(DisasContext *s, arg_rpr_esz *a)
4112 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvt_dh);
4115 static bool trans_FCVT_hd(DisasContext *s, arg_rpr_esz *a)
4117 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvt_hd);
4120 static bool trans_FCVT_ds(DisasContext *s, arg_rpr_esz *a)
4122 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvt_ds);
4125 static bool trans_FCVT_sd(DisasContext *s, arg_rpr_esz *a)
4127 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvt_sd);
4130 static bool trans_FCVTZS_hh(DisasContext *s, arg_rpr_esz *a)
4132 return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_fcvtzs_hh);
4135 static bool trans_FCVTZU_hh(DisasContext *s, arg_rpr_esz *a)
4137 return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_fcvtzu_hh);
4140 static bool trans_FCVTZS_hs(DisasContext *s, arg_rpr_esz *a)
4142 return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_fcvtzs_hs);
4145 static bool trans_FCVTZU_hs(DisasContext *s, arg_rpr_esz *a)
4147 return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_fcvtzu_hs);
4150 static bool trans_FCVTZS_hd(DisasContext *s, arg_rpr_esz *a)
4152 return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_fcvtzs_hd);
4155 static bool trans_FCVTZU_hd(DisasContext *s, arg_rpr_esz *a)
4157 return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_fcvtzu_hd);
4160 static bool trans_FCVTZS_ss(DisasContext *s, arg_rpr_esz *a)
4162 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvtzs_ss);
4165 static bool trans_FCVTZU_ss(DisasContext *s, arg_rpr_esz *a)
4167 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvtzu_ss);
4170 static bool trans_FCVTZS_sd(DisasContext *s, arg_rpr_esz *a)
4172 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvtzs_sd);
4175 static bool trans_FCVTZU_sd(DisasContext *s, arg_rpr_esz *a)
4177 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvtzu_sd);
4180 static bool trans_FCVTZS_ds(DisasContext *s, arg_rpr_esz *a)
4182 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvtzs_ds);
4185 static bool trans_FCVTZU_ds(DisasContext *s, arg_rpr_esz *a)
4187 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvtzu_ds);
4190 static bool trans_FCVTZS_dd(DisasContext *s, arg_rpr_esz *a)
4192 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvtzs_dd);
4195 static bool trans_FCVTZU_dd(DisasContext *s, arg_rpr_esz *a)
4197 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_fcvtzu_dd);
4200 static gen_helper_gvec_3_ptr * const frint_fns[3] = {
4201 gen_helper_sve_frint_h,
4202 gen_helper_sve_frint_s,
4203 gen_helper_sve_frint_d
4206 static bool trans_FRINTI(DisasContext *s, arg_rpr_esz *a)
4208 if (a->esz == 0) {
4209 return false;
4211 return do_zpz_ptr(s, a->rd, a->rn, a->pg, a->esz == MO_16,
4212 frint_fns[a->esz - 1]);
4215 static bool trans_FRINTX(DisasContext *s, arg_rpr_esz *a)
4217 static gen_helper_gvec_3_ptr * const fns[3] = {
4218 gen_helper_sve_frintx_h,
4219 gen_helper_sve_frintx_s,
4220 gen_helper_sve_frintx_d
4222 if (a->esz == 0) {
4223 return false;
4225 return do_zpz_ptr(s, a->rd, a->rn, a->pg, a->esz == MO_16, fns[a->esz - 1]);
4228 static bool do_frint_mode(DisasContext *s, arg_rpr_esz *a,
4229 int mode, gen_helper_gvec_3_ptr *fn)
4231 if (sve_access_check(s)) {
4232 unsigned vsz = vec_full_reg_size(s);
4233 TCGv_i32 tmode = tcg_const_i32(mode);
4234 TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
4236 gen_helper_set_rmode(tmode, tmode, status);
4238 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, a->rd),
4239 vec_full_reg_offset(s, a->rn),
4240 pred_full_reg_offset(s, a->pg),
4241 status, vsz, vsz, 0, fn);
4243 gen_helper_set_rmode(tmode, tmode, status);
4244 tcg_temp_free_i32(tmode);
4245 tcg_temp_free_ptr(status);
4247 return true;
4250 static bool trans_FRINTN(DisasContext *s, arg_rpr_esz *a)
4252 if (a->esz == 0) {
4253 return false;
4255 return do_frint_mode(s, a, float_round_nearest_even, frint_fns[a->esz - 1]);
4258 static bool trans_FRINTP(DisasContext *s, arg_rpr_esz *a)
4260 if (a->esz == 0) {
4261 return false;
4263 return do_frint_mode(s, a, float_round_up, frint_fns[a->esz - 1]);
4266 static bool trans_FRINTM(DisasContext *s, arg_rpr_esz *a)
4268 if (a->esz == 0) {
4269 return false;
4271 return do_frint_mode(s, a, float_round_down, frint_fns[a->esz - 1]);
4274 static bool trans_FRINTZ(DisasContext *s, arg_rpr_esz *a)
4276 if (a->esz == 0) {
4277 return false;
4279 return do_frint_mode(s, a, float_round_to_zero, frint_fns[a->esz - 1]);
4282 static bool trans_FRINTA(DisasContext *s, arg_rpr_esz *a)
4284 if (a->esz == 0) {
4285 return false;
4287 return do_frint_mode(s, a, float_round_ties_away, frint_fns[a->esz - 1]);
4290 static bool trans_FRECPX(DisasContext *s, arg_rpr_esz *a)
4292 static gen_helper_gvec_3_ptr * const fns[3] = {
4293 gen_helper_sve_frecpx_h,
4294 gen_helper_sve_frecpx_s,
4295 gen_helper_sve_frecpx_d
4297 if (a->esz == 0) {
4298 return false;
4300 return do_zpz_ptr(s, a->rd, a->rn, a->pg, a->esz == MO_16, fns[a->esz - 1]);
4303 static bool trans_FSQRT(DisasContext *s, arg_rpr_esz *a)
4305 static gen_helper_gvec_3_ptr * const fns[3] = {
4306 gen_helper_sve_fsqrt_h,
4307 gen_helper_sve_fsqrt_s,
4308 gen_helper_sve_fsqrt_d
4310 if (a->esz == 0) {
4311 return false;
4313 return do_zpz_ptr(s, a->rd, a->rn, a->pg, a->esz == MO_16, fns[a->esz - 1]);
4316 static bool trans_SCVTF_hh(DisasContext *s, arg_rpr_esz *a)
4318 return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_scvt_hh);
4321 static bool trans_SCVTF_sh(DisasContext *s, arg_rpr_esz *a)
4323 return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_scvt_sh);
4326 static bool trans_SCVTF_dh(DisasContext *s, arg_rpr_esz *a)
4328 return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_scvt_dh);
4331 static bool trans_SCVTF_ss(DisasContext *s, arg_rpr_esz *a)
4333 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_scvt_ss);
4336 static bool trans_SCVTF_ds(DisasContext *s, arg_rpr_esz *a)
4338 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_scvt_ds);
4341 static bool trans_SCVTF_sd(DisasContext *s, arg_rpr_esz *a)
4343 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_scvt_sd);
4346 static bool trans_SCVTF_dd(DisasContext *s, arg_rpr_esz *a)
4348 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_scvt_dd);
4351 static bool trans_UCVTF_hh(DisasContext *s, arg_rpr_esz *a)
4353 return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_ucvt_hh);
4356 static bool trans_UCVTF_sh(DisasContext *s, arg_rpr_esz *a)
4358 return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_ucvt_sh);
4361 static bool trans_UCVTF_dh(DisasContext *s, arg_rpr_esz *a)
4363 return do_zpz_ptr(s, a->rd, a->rn, a->pg, true, gen_helper_sve_ucvt_dh);
4366 static bool trans_UCVTF_ss(DisasContext *s, arg_rpr_esz *a)
4368 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_ucvt_ss);
4371 static bool trans_UCVTF_ds(DisasContext *s, arg_rpr_esz *a)
4373 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_ucvt_ds);
4376 static bool trans_UCVTF_sd(DisasContext *s, arg_rpr_esz *a)
4378 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_ucvt_sd);
4381 static bool trans_UCVTF_dd(DisasContext *s, arg_rpr_esz *a)
4383 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_ucvt_dd);
4387 *** SVE Memory - 32-bit Gather and Unsized Contiguous Group
4390 /* Subroutine loading a vector register at VOFS of LEN bytes.
4391 * The load should begin at the address Rn + IMM.
4394 static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
4396 int len_align = QEMU_ALIGN_DOWN(len, 8);
4397 int len_remain = len % 8;
4398 int nparts = len / 8 + ctpop8(len_remain);
4399 int midx = get_mem_index(s);
4400 TCGv_i64 dirty_addr, clean_addr, t0, t1;
4402 dirty_addr = tcg_temp_new_i64();
4403 tcg_gen_addi_i64(dirty_addr, cpu_reg_sp(s, rn), imm);
4404 clean_addr = gen_mte_checkN(s, dirty_addr, false, rn != 31, len);
4405 tcg_temp_free_i64(dirty_addr);
4408 * Note that unpredicated load/store of vector/predicate registers
4409 * are defined as a stream of bytes, which equates to little-endian
4410 * operations on larger quantities.
4411 * Attempt to keep code expansion to a minimum by limiting the
4412 * amount of unrolling done.
4414 if (nparts <= 4) {
4415 int i;
4417 t0 = tcg_temp_new_i64();
4418 for (i = 0; i < len_align; i += 8) {
4419 tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEUQ);
4420 tcg_gen_st_i64(t0, cpu_env, vofs + i);
4421 tcg_gen_addi_i64(clean_addr, clean_addr, 8);
4423 tcg_temp_free_i64(t0);
4424 } else {
4425 TCGLabel *loop = gen_new_label();
4426 TCGv_ptr tp, i = tcg_const_local_ptr(0);
4428 /* Copy the clean address into a local temp, live across the loop. */
4429 t0 = clean_addr;
4430 clean_addr = new_tmp_a64_local(s);
4431 tcg_gen_mov_i64(clean_addr, t0);
4433 gen_set_label(loop);
4435 t0 = tcg_temp_new_i64();
4436 tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEUQ);
4437 tcg_gen_addi_i64(clean_addr, clean_addr, 8);
4439 tp = tcg_temp_new_ptr();
4440 tcg_gen_add_ptr(tp, cpu_env, i);
4441 tcg_gen_addi_ptr(i, i, 8);
4442 tcg_gen_st_i64(t0, tp, vofs);
4443 tcg_temp_free_ptr(tp);
4444 tcg_temp_free_i64(t0);
4446 tcg_gen_brcondi_ptr(TCG_COND_LTU, i, len_align, loop);
4447 tcg_temp_free_ptr(i);
4451 * Predicate register loads can be any multiple of 2.
4452 * Note that we still store the entire 64-bit unit into cpu_env.
4454 if (len_remain) {
4455 t0 = tcg_temp_new_i64();
4456 switch (len_remain) {
4457 case 2:
4458 case 4:
4459 case 8:
4460 tcg_gen_qemu_ld_i64(t0, clean_addr, midx,
4461 MO_LE | ctz32(len_remain));
4462 break;
4464 case 6:
4465 t1 = tcg_temp_new_i64();
4466 tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEUL);
4467 tcg_gen_addi_i64(clean_addr, clean_addr, 4);
4468 tcg_gen_qemu_ld_i64(t1, clean_addr, midx, MO_LEUW);
4469 tcg_gen_deposit_i64(t0, t0, t1, 32, 32);
4470 tcg_temp_free_i64(t1);
4471 break;
4473 default:
4474 g_assert_not_reached();
4476 tcg_gen_st_i64(t0, cpu_env, vofs + len_align);
4477 tcg_temp_free_i64(t0);
4481 /* Similarly for stores. */
4482 static void do_str(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
4484 int len_align = QEMU_ALIGN_DOWN(len, 8);
4485 int len_remain = len % 8;
4486 int nparts = len / 8 + ctpop8(len_remain);
4487 int midx = get_mem_index(s);
4488 TCGv_i64 dirty_addr, clean_addr, t0;
4490 dirty_addr = tcg_temp_new_i64();
4491 tcg_gen_addi_i64(dirty_addr, cpu_reg_sp(s, rn), imm);
4492 clean_addr = gen_mte_checkN(s, dirty_addr, false, rn != 31, len);
4493 tcg_temp_free_i64(dirty_addr);
4495 /* Note that unpredicated load/store of vector/predicate registers
4496 * are defined as a stream of bytes, which equates to little-endian
4497 * operations on larger quantities. There is no nice way to force
4498 * a little-endian store for aarch64_be-linux-user out of line.
4500 * Attempt to keep code expansion to a minimum by limiting the
4501 * amount of unrolling done.
4503 if (nparts <= 4) {
4504 int i;
4506 t0 = tcg_temp_new_i64();
4507 for (i = 0; i < len_align; i += 8) {
4508 tcg_gen_ld_i64(t0, cpu_env, vofs + i);
4509 tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUQ);
4510 tcg_gen_addi_i64(clean_addr, clean_addr, 8);
4512 tcg_temp_free_i64(t0);
4513 } else {
4514 TCGLabel *loop = gen_new_label();
4515 TCGv_ptr tp, i = tcg_const_local_ptr(0);
4517 /* Copy the clean address into a local temp, live across the loop. */
4518 t0 = clean_addr;
4519 clean_addr = new_tmp_a64_local(s);
4520 tcg_gen_mov_i64(clean_addr, t0);
4522 gen_set_label(loop);
4524 t0 = tcg_temp_new_i64();
4525 tp = tcg_temp_new_ptr();
4526 tcg_gen_add_ptr(tp, cpu_env, i);
4527 tcg_gen_ld_i64(t0, tp, vofs);
4528 tcg_gen_addi_ptr(i, i, 8);
4529 tcg_temp_free_ptr(tp);
4531 tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUQ);
4532 tcg_gen_addi_i64(clean_addr, clean_addr, 8);
4533 tcg_temp_free_i64(t0);
4535 tcg_gen_brcondi_ptr(TCG_COND_LTU, i, len_align, loop);
4536 tcg_temp_free_ptr(i);
4539 /* Predicate register stores can be any multiple of 2. */
4540 if (len_remain) {
4541 t0 = tcg_temp_new_i64();
4542 tcg_gen_ld_i64(t0, cpu_env, vofs + len_align);
4544 switch (len_remain) {
4545 case 2:
4546 case 4:
4547 case 8:
4548 tcg_gen_qemu_st_i64(t0, clean_addr, midx,
4549 MO_LE | ctz32(len_remain));
4550 break;
4552 case 6:
4553 tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUL);
4554 tcg_gen_addi_i64(clean_addr, clean_addr, 4);
4555 tcg_gen_shri_i64(t0, t0, 32);
4556 tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUW);
4557 break;
4559 default:
4560 g_assert_not_reached();
4562 tcg_temp_free_i64(t0);
4566 static bool trans_LDR_zri(DisasContext *s, arg_rri *a)
4568 if (sve_access_check(s)) {
4569 int size = vec_full_reg_size(s);
4570 int off = vec_full_reg_offset(s, a->rd);
4571 do_ldr(s, off, size, a->rn, a->imm * size);
4573 return true;
4576 static bool trans_LDR_pri(DisasContext *s, arg_rri *a)
4578 if (sve_access_check(s)) {
4579 int size = pred_full_reg_size(s);
4580 int off = pred_full_reg_offset(s, a->rd);
4581 do_ldr(s, off, size, a->rn, a->imm * size);
4583 return true;
4586 static bool trans_STR_zri(DisasContext *s, arg_rri *a)
4588 if (sve_access_check(s)) {
4589 int size = vec_full_reg_size(s);
4590 int off = vec_full_reg_offset(s, a->rd);
4591 do_str(s, off, size, a->rn, a->imm * size);
4593 return true;
4596 static bool trans_STR_pri(DisasContext *s, arg_rri *a)
4598 if (sve_access_check(s)) {
4599 int size = pred_full_reg_size(s);
4600 int off = pred_full_reg_offset(s, a->rd);
4601 do_str(s, off, size, a->rn, a->imm * size);
4603 return true;
4607 *** SVE Memory - Contiguous Load Group
4610 /* The memory mode of the dtype. */
4611 static const MemOp dtype_mop[16] = {
4612 MO_UB, MO_UB, MO_UB, MO_UB,
4613 MO_SL, MO_UW, MO_UW, MO_UW,
4614 MO_SW, MO_SW, MO_UL, MO_UL,
4615 MO_SB, MO_SB, MO_SB, MO_UQ
4618 #define dtype_msz(x) (dtype_mop[x] & MO_SIZE)
4620 /* The vector element size of dtype. */
4621 static const uint8_t dtype_esz[16] = {
4622 0, 1, 2, 3,
4623 3, 1, 2, 3,
4624 3, 2, 2, 3,
4625 3, 2, 1, 3
4628 static void do_mem_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr,
4629 int dtype, uint32_t mte_n, bool is_write,
4630 gen_helper_gvec_mem *fn)
4632 unsigned vsz = vec_full_reg_size(s);
4633 TCGv_ptr t_pg;
4634 int desc = 0;
4637 * For e.g. LD4, there are not enough arguments to pass all 4
4638 * registers as pointers, so encode the regno into the data field.
4639 * For consistency, do this even for LD1.
4641 if (s->mte_active[0]) {
4642 int msz = dtype_msz(dtype);
4644 desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s));
4645 desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
4646 desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
4647 desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
4648 desc = FIELD_DP32(desc, MTEDESC, SIZEM1, (mte_n << msz) - 1);
4649 desc <<= SVE_MTEDESC_SHIFT;
4650 } else {
4651 addr = clean_data_tbi(s, addr);
4654 desc = simd_desc(vsz, vsz, zt | desc);
4655 t_pg = tcg_temp_new_ptr();
4657 tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg));
4658 fn(cpu_env, t_pg, addr, tcg_constant_i32(desc));
4660 tcg_temp_free_ptr(t_pg);
4663 /* Indexed by [mte][be][dtype][nreg] */
4664 static gen_helper_gvec_mem * const ldr_fns[2][2][16][4] = {
4665 { /* mte inactive, little-endian */
4666 { { gen_helper_sve_ld1bb_r, gen_helper_sve_ld2bb_r,
4667 gen_helper_sve_ld3bb_r, gen_helper_sve_ld4bb_r },
4668 { gen_helper_sve_ld1bhu_r, NULL, NULL, NULL },
4669 { gen_helper_sve_ld1bsu_r, NULL, NULL, NULL },
4670 { gen_helper_sve_ld1bdu_r, NULL, NULL, NULL },
4672 { gen_helper_sve_ld1sds_le_r, NULL, NULL, NULL },
4673 { gen_helper_sve_ld1hh_le_r, gen_helper_sve_ld2hh_le_r,
4674 gen_helper_sve_ld3hh_le_r, gen_helper_sve_ld4hh_le_r },
4675 { gen_helper_sve_ld1hsu_le_r, NULL, NULL, NULL },
4676 { gen_helper_sve_ld1hdu_le_r, NULL, NULL, NULL },
4678 { gen_helper_sve_ld1hds_le_r, NULL, NULL, NULL },
4679 { gen_helper_sve_ld1hss_le_r, NULL, NULL, NULL },
4680 { gen_helper_sve_ld1ss_le_r, gen_helper_sve_ld2ss_le_r,
4681 gen_helper_sve_ld3ss_le_r, gen_helper_sve_ld4ss_le_r },
4682 { gen_helper_sve_ld1sdu_le_r, NULL, NULL, NULL },
4684 { gen_helper_sve_ld1bds_r, NULL, NULL, NULL },
4685 { gen_helper_sve_ld1bss_r, NULL, NULL, NULL },
4686 { gen_helper_sve_ld1bhs_r, NULL, NULL, NULL },
4687 { gen_helper_sve_ld1dd_le_r, gen_helper_sve_ld2dd_le_r,
4688 gen_helper_sve_ld3dd_le_r, gen_helper_sve_ld4dd_le_r } },
4690 /* mte inactive, big-endian */
4691 { { gen_helper_sve_ld1bb_r, gen_helper_sve_ld2bb_r,
4692 gen_helper_sve_ld3bb_r, gen_helper_sve_ld4bb_r },
4693 { gen_helper_sve_ld1bhu_r, NULL, NULL, NULL },
4694 { gen_helper_sve_ld1bsu_r, NULL, NULL, NULL },
4695 { gen_helper_sve_ld1bdu_r, NULL, NULL, NULL },
4697 { gen_helper_sve_ld1sds_be_r, NULL, NULL, NULL },
4698 { gen_helper_sve_ld1hh_be_r, gen_helper_sve_ld2hh_be_r,
4699 gen_helper_sve_ld3hh_be_r, gen_helper_sve_ld4hh_be_r },
4700 { gen_helper_sve_ld1hsu_be_r, NULL, NULL, NULL },
4701 { gen_helper_sve_ld1hdu_be_r, NULL, NULL, NULL },
4703 { gen_helper_sve_ld1hds_be_r, NULL, NULL, NULL },
4704 { gen_helper_sve_ld1hss_be_r, NULL, NULL, NULL },
4705 { gen_helper_sve_ld1ss_be_r, gen_helper_sve_ld2ss_be_r,
4706 gen_helper_sve_ld3ss_be_r, gen_helper_sve_ld4ss_be_r },
4707 { gen_helper_sve_ld1sdu_be_r, NULL, NULL, NULL },
4709 { gen_helper_sve_ld1bds_r, NULL, NULL, NULL },
4710 { gen_helper_sve_ld1bss_r, NULL, NULL, NULL },
4711 { gen_helper_sve_ld1bhs_r, NULL, NULL, NULL },
4712 { gen_helper_sve_ld1dd_be_r, gen_helper_sve_ld2dd_be_r,
4713 gen_helper_sve_ld3dd_be_r, gen_helper_sve_ld4dd_be_r } } },
4715 { /* mte active, little-endian */
4716 { { gen_helper_sve_ld1bb_r_mte,
4717 gen_helper_sve_ld2bb_r_mte,
4718 gen_helper_sve_ld3bb_r_mte,
4719 gen_helper_sve_ld4bb_r_mte },
4720 { gen_helper_sve_ld1bhu_r_mte, NULL, NULL, NULL },
4721 { gen_helper_sve_ld1bsu_r_mte, NULL, NULL, NULL },
4722 { gen_helper_sve_ld1bdu_r_mte, NULL, NULL, NULL },
4724 { gen_helper_sve_ld1sds_le_r_mte, NULL, NULL, NULL },
4725 { gen_helper_sve_ld1hh_le_r_mte,
4726 gen_helper_sve_ld2hh_le_r_mte,
4727 gen_helper_sve_ld3hh_le_r_mte,
4728 gen_helper_sve_ld4hh_le_r_mte },
4729 { gen_helper_sve_ld1hsu_le_r_mte, NULL, NULL, NULL },
4730 { gen_helper_sve_ld1hdu_le_r_mte, NULL, NULL, NULL },
4732 { gen_helper_sve_ld1hds_le_r_mte, NULL, NULL, NULL },
4733 { gen_helper_sve_ld1hss_le_r_mte, NULL, NULL, NULL },
4734 { gen_helper_sve_ld1ss_le_r_mte,
4735 gen_helper_sve_ld2ss_le_r_mte,
4736 gen_helper_sve_ld3ss_le_r_mte,
4737 gen_helper_sve_ld4ss_le_r_mte },
4738 { gen_helper_sve_ld1sdu_le_r_mte, NULL, NULL, NULL },
4740 { gen_helper_sve_ld1bds_r_mte, NULL, NULL, NULL },
4741 { gen_helper_sve_ld1bss_r_mte, NULL, NULL, NULL },
4742 { gen_helper_sve_ld1bhs_r_mte, NULL, NULL, NULL },
4743 { gen_helper_sve_ld1dd_le_r_mte,
4744 gen_helper_sve_ld2dd_le_r_mte,
4745 gen_helper_sve_ld3dd_le_r_mte,
4746 gen_helper_sve_ld4dd_le_r_mte } },
4748 /* mte active, big-endian */
4749 { { gen_helper_sve_ld1bb_r_mte,
4750 gen_helper_sve_ld2bb_r_mte,
4751 gen_helper_sve_ld3bb_r_mte,
4752 gen_helper_sve_ld4bb_r_mte },
4753 { gen_helper_sve_ld1bhu_r_mte, NULL, NULL, NULL },
4754 { gen_helper_sve_ld1bsu_r_mte, NULL, NULL, NULL },
4755 { gen_helper_sve_ld1bdu_r_mte, NULL, NULL, NULL },
4757 { gen_helper_sve_ld1sds_be_r_mte, NULL, NULL, NULL },
4758 { gen_helper_sve_ld1hh_be_r_mte,
4759 gen_helper_sve_ld2hh_be_r_mte,
4760 gen_helper_sve_ld3hh_be_r_mte,
4761 gen_helper_sve_ld4hh_be_r_mte },
4762 { gen_helper_sve_ld1hsu_be_r_mte, NULL, NULL, NULL },
4763 { gen_helper_sve_ld1hdu_be_r_mte, NULL, NULL, NULL },
4765 { gen_helper_sve_ld1hds_be_r_mte, NULL, NULL, NULL },
4766 { gen_helper_sve_ld1hss_be_r_mte, NULL, NULL, NULL },
4767 { gen_helper_sve_ld1ss_be_r_mte,
4768 gen_helper_sve_ld2ss_be_r_mte,
4769 gen_helper_sve_ld3ss_be_r_mte,
4770 gen_helper_sve_ld4ss_be_r_mte },
4771 { gen_helper_sve_ld1sdu_be_r_mte, NULL, NULL, NULL },
4773 { gen_helper_sve_ld1bds_r_mte, NULL, NULL, NULL },
4774 { gen_helper_sve_ld1bss_r_mte, NULL, NULL, NULL },
4775 { gen_helper_sve_ld1bhs_r_mte, NULL, NULL, NULL },
4776 { gen_helper_sve_ld1dd_be_r_mte,
4777 gen_helper_sve_ld2dd_be_r_mte,
4778 gen_helper_sve_ld3dd_be_r_mte,
4779 gen_helper_sve_ld4dd_be_r_mte } } },
4782 static void do_ld_zpa(DisasContext *s, int zt, int pg,
4783 TCGv_i64 addr, int dtype, int nreg)
4785 gen_helper_gvec_mem *fn
4786 = ldr_fns[s->mte_active[0]][s->be_data == MO_BE][dtype][nreg];
4789 * While there are holes in the table, they are not
4790 * accessible via the instruction encoding.
4792 assert(fn != NULL);
4793 do_mem_zpa(s, zt, pg, addr, dtype, nreg, false, fn);
4796 static bool trans_LD_zprr(DisasContext *s, arg_rprr_load *a)
4798 if (a->rm == 31) {
4799 return false;
4801 if (sve_access_check(s)) {
4802 TCGv_i64 addr = new_tmp_a64(s);
4803 tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype));
4804 tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn));
4805 do_ld_zpa(s, a->rd, a->pg, addr, a->dtype, a->nreg);
4807 return true;
4810 static bool trans_LD_zpri(DisasContext *s, arg_rpri_load *a)
4812 if (sve_access_check(s)) {
4813 int vsz = vec_full_reg_size(s);
4814 int elements = vsz >> dtype_esz[a->dtype];
4815 TCGv_i64 addr = new_tmp_a64(s);
4817 tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn),
4818 (a->imm * elements * (a->nreg + 1))
4819 << dtype_msz(a->dtype));
4820 do_ld_zpa(s, a->rd, a->pg, addr, a->dtype, a->nreg);
4822 return true;
4825 static bool trans_LDFF1_zprr(DisasContext *s, arg_rprr_load *a)
4827 static gen_helper_gvec_mem * const fns[2][2][16] = {
4828 { /* mte inactive, little-endian */
4829 { gen_helper_sve_ldff1bb_r,
4830 gen_helper_sve_ldff1bhu_r,
4831 gen_helper_sve_ldff1bsu_r,
4832 gen_helper_sve_ldff1bdu_r,
4834 gen_helper_sve_ldff1sds_le_r,
4835 gen_helper_sve_ldff1hh_le_r,
4836 gen_helper_sve_ldff1hsu_le_r,
4837 gen_helper_sve_ldff1hdu_le_r,
4839 gen_helper_sve_ldff1hds_le_r,
4840 gen_helper_sve_ldff1hss_le_r,
4841 gen_helper_sve_ldff1ss_le_r,
4842 gen_helper_sve_ldff1sdu_le_r,
4844 gen_helper_sve_ldff1bds_r,
4845 gen_helper_sve_ldff1bss_r,
4846 gen_helper_sve_ldff1bhs_r,
4847 gen_helper_sve_ldff1dd_le_r },
4849 /* mte inactive, big-endian */
4850 { gen_helper_sve_ldff1bb_r,
4851 gen_helper_sve_ldff1bhu_r,
4852 gen_helper_sve_ldff1bsu_r,
4853 gen_helper_sve_ldff1bdu_r,
4855 gen_helper_sve_ldff1sds_be_r,
4856 gen_helper_sve_ldff1hh_be_r,
4857 gen_helper_sve_ldff1hsu_be_r,
4858 gen_helper_sve_ldff1hdu_be_r,
4860 gen_helper_sve_ldff1hds_be_r,
4861 gen_helper_sve_ldff1hss_be_r,
4862 gen_helper_sve_ldff1ss_be_r,
4863 gen_helper_sve_ldff1sdu_be_r,
4865 gen_helper_sve_ldff1bds_r,
4866 gen_helper_sve_ldff1bss_r,
4867 gen_helper_sve_ldff1bhs_r,
4868 gen_helper_sve_ldff1dd_be_r } },
4870 { /* mte active, little-endian */
4871 { gen_helper_sve_ldff1bb_r_mte,
4872 gen_helper_sve_ldff1bhu_r_mte,
4873 gen_helper_sve_ldff1bsu_r_mte,
4874 gen_helper_sve_ldff1bdu_r_mte,
4876 gen_helper_sve_ldff1sds_le_r_mte,
4877 gen_helper_sve_ldff1hh_le_r_mte,
4878 gen_helper_sve_ldff1hsu_le_r_mte,
4879 gen_helper_sve_ldff1hdu_le_r_mte,
4881 gen_helper_sve_ldff1hds_le_r_mte,
4882 gen_helper_sve_ldff1hss_le_r_mte,
4883 gen_helper_sve_ldff1ss_le_r_mte,
4884 gen_helper_sve_ldff1sdu_le_r_mte,
4886 gen_helper_sve_ldff1bds_r_mte,
4887 gen_helper_sve_ldff1bss_r_mte,
4888 gen_helper_sve_ldff1bhs_r_mte,
4889 gen_helper_sve_ldff1dd_le_r_mte },
4891 /* mte active, big-endian */
4892 { gen_helper_sve_ldff1bb_r_mte,
4893 gen_helper_sve_ldff1bhu_r_mte,
4894 gen_helper_sve_ldff1bsu_r_mte,
4895 gen_helper_sve_ldff1bdu_r_mte,
4897 gen_helper_sve_ldff1sds_be_r_mte,
4898 gen_helper_sve_ldff1hh_be_r_mte,
4899 gen_helper_sve_ldff1hsu_be_r_mte,
4900 gen_helper_sve_ldff1hdu_be_r_mte,
4902 gen_helper_sve_ldff1hds_be_r_mte,
4903 gen_helper_sve_ldff1hss_be_r_mte,
4904 gen_helper_sve_ldff1ss_be_r_mte,
4905 gen_helper_sve_ldff1sdu_be_r_mte,
4907 gen_helper_sve_ldff1bds_r_mte,
4908 gen_helper_sve_ldff1bss_r_mte,
4909 gen_helper_sve_ldff1bhs_r_mte,
4910 gen_helper_sve_ldff1dd_be_r_mte } },
4913 if (sve_access_check(s)) {
4914 TCGv_i64 addr = new_tmp_a64(s);
4915 tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype));
4916 tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn));
4917 do_mem_zpa(s, a->rd, a->pg, addr, a->dtype, 1, false,
4918 fns[s->mte_active[0]][s->be_data == MO_BE][a->dtype]);
4920 return true;
4923 static bool trans_LDNF1_zpri(DisasContext *s, arg_rpri_load *a)
4925 static gen_helper_gvec_mem * const fns[2][2][16] = {
4926 { /* mte inactive, little-endian */
4927 { gen_helper_sve_ldnf1bb_r,
4928 gen_helper_sve_ldnf1bhu_r,
4929 gen_helper_sve_ldnf1bsu_r,
4930 gen_helper_sve_ldnf1bdu_r,
4932 gen_helper_sve_ldnf1sds_le_r,
4933 gen_helper_sve_ldnf1hh_le_r,
4934 gen_helper_sve_ldnf1hsu_le_r,
4935 gen_helper_sve_ldnf1hdu_le_r,
4937 gen_helper_sve_ldnf1hds_le_r,
4938 gen_helper_sve_ldnf1hss_le_r,
4939 gen_helper_sve_ldnf1ss_le_r,
4940 gen_helper_sve_ldnf1sdu_le_r,
4942 gen_helper_sve_ldnf1bds_r,
4943 gen_helper_sve_ldnf1bss_r,
4944 gen_helper_sve_ldnf1bhs_r,
4945 gen_helper_sve_ldnf1dd_le_r },
4947 /* mte inactive, big-endian */
4948 { gen_helper_sve_ldnf1bb_r,
4949 gen_helper_sve_ldnf1bhu_r,
4950 gen_helper_sve_ldnf1bsu_r,
4951 gen_helper_sve_ldnf1bdu_r,
4953 gen_helper_sve_ldnf1sds_be_r,
4954 gen_helper_sve_ldnf1hh_be_r,
4955 gen_helper_sve_ldnf1hsu_be_r,
4956 gen_helper_sve_ldnf1hdu_be_r,
4958 gen_helper_sve_ldnf1hds_be_r,
4959 gen_helper_sve_ldnf1hss_be_r,
4960 gen_helper_sve_ldnf1ss_be_r,
4961 gen_helper_sve_ldnf1sdu_be_r,
4963 gen_helper_sve_ldnf1bds_r,
4964 gen_helper_sve_ldnf1bss_r,
4965 gen_helper_sve_ldnf1bhs_r,
4966 gen_helper_sve_ldnf1dd_be_r } },
4968 { /* mte inactive, little-endian */
4969 { gen_helper_sve_ldnf1bb_r_mte,
4970 gen_helper_sve_ldnf1bhu_r_mte,
4971 gen_helper_sve_ldnf1bsu_r_mte,
4972 gen_helper_sve_ldnf1bdu_r_mte,
4974 gen_helper_sve_ldnf1sds_le_r_mte,
4975 gen_helper_sve_ldnf1hh_le_r_mte,
4976 gen_helper_sve_ldnf1hsu_le_r_mte,
4977 gen_helper_sve_ldnf1hdu_le_r_mte,
4979 gen_helper_sve_ldnf1hds_le_r_mte,
4980 gen_helper_sve_ldnf1hss_le_r_mte,
4981 gen_helper_sve_ldnf1ss_le_r_mte,
4982 gen_helper_sve_ldnf1sdu_le_r_mte,
4984 gen_helper_sve_ldnf1bds_r_mte,
4985 gen_helper_sve_ldnf1bss_r_mte,
4986 gen_helper_sve_ldnf1bhs_r_mte,
4987 gen_helper_sve_ldnf1dd_le_r_mte },
4989 /* mte inactive, big-endian */
4990 { gen_helper_sve_ldnf1bb_r_mte,
4991 gen_helper_sve_ldnf1bhu_r_mte,
4992 gen_helper_sve_ldnf1bsu_r_mte,
4993 gen_helper_sve_ldnf1bdu_r_mte,
4995 gen_helper_sve_ldnf1sds_be_r_mte,
4996 gen_helper_sve_ldnf1hh_be_r_mte,
4997 gen_helper_sve_ldnf1hsu_be_r_mte,
4998 gen_helper_sve_ldnf1hdu_be_r_mte,
5000 gen_helper_sve_ldnf1hds_be_r_mte,
5001 gen_helper_sve_ldnf1hss_be_r_mte,
5002 gen_helper_sve_ldnf1ss_be_r_mte,
5003 gen_helper_sve_ldnf1sdu_be_r_mte,
5005 gen_helper_sve_ldnf1bds_r_mte,
5006 gen_helper_sve_ldnf1bss_r_mte,
5007 gen_helper_sve_ldnf1bhs_r_mte,
5008 gen_helper_sve_ldnf1dd_be_r_mte } },
5011 if (sve_access_check(s)) {
5012 int vsz = vec_full_reg_size(s);
5013 int elements = vsz >> dtype_esz[a->dtype];
5014 int off = (a->imm * elements) << dtype_msz(a->dtype);
5015 TCGv_i64 addr = new_tmp_a64(s);
5017 tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), off);
5018 do_mem_zpa(s, a->rd, a->pg, addr, a->dtype, 1, false,
5019 fns[s->mte_active[0]][s->be_data == MO_BE][a->dtype]);
5021 return true;
5024 static void do_ldrq(DisasContext *s, int zt, int pg, TCGv_i64 addr, int dtype)
5026 unsigned vsz = vec_full_reg_size(s);
5027 TCGv_ptr t_pg;
5028 int poff;
5030 /* Load the first quadword using the normal predicated load helpers. */
5031 poff = pred_full_reg_offset(s, pg);
5032 if (vsz > 16) {
5034 * Zero-extend the first 16 bits of the predicate into a temporary.
5035 * This avoids triggering an assert making sure we don't have bits
5036 * set within a predicate beyond VQ, but we have lowered VQ to 1
5037 * for this load operation.
5039 TCGv_i64 tmp = tcg_temp_new_i64();
5040 #if HOST_BIG_ENDIAN
5041 poff += 6;
5042 #endif
5043 tcg_gen_ld16u_i64(tmp, cpu_env, poff);
5045 poff = offsetof(CPUARMState, vfp.preg_tmp);
5046 tcg_gen_st_i64(tmp, cpu_env, poff);
5047 tcg_temp_free_i64(tmp);
5050 t_pg = tcg_temp_new_ptr();
5051 tcg_gen_addi_ptr(t_pg, cpu_env, poff);
5053 gen_helper_gvec_mem *fn
5054 = ldr_fns[s->mte_active[0]][s->be_data == MO_BE][dtype][0];
5055 fn(cpu_env, t_pg, addr, tcg_constant_i32(simd_desc(16, 16, zt)));
5057 tcg_temp_free_ptr(t_pg);
5059 /* Replicate that first quadword. */
5060 if (vsz > 16) {
5061 int doff = vec_full_reg_offset(s, zt);
5062 tcg_gen_gvec_dup_mem(4, doff + 16, doff, vsz - 16, vsz - 16);
5066 static bool trans_LD1RQ_zprr(DisasContext *s, arg_rprr_load *a)
5068 if (a->rm == 31) {
5069 return false;
5071 if (sve_access_check(s)) {
5072 int msz = dtype_msz(a->dtype);
5073 TCGv_i64 addr = new_tmp_a64(s);
5074 tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), msz);
5075 tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn));
5076 do_ldrq(s, a->rd, a->pg, addr, a->dtype);
5078 return true;
5081 static bool trans_LD1RQ_zpri(DisasContext *s, arg_rpri_load *a)
5083 if (sve_access_check(s)) {
5084 TCGv_i64 addr = new_tmp_a64(s);
5085 tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), a->imm * 16);
5086 do_ldrq(s, a->rd, a->pg, addr, a->dtype);
5088 return true;
5091 static void do_ldro(DisasContext *s, int zt, int pg, TCGv_i64 addr, int dtype)
5093 unsigned vsz = vec_full_reg_size(s);
5094 unsigned vsz_r32;
5095 TCGv_ptr t_pg;
5096 int poff, doff;
5098 if (vsz < 32) {
5100 * Note that this UNDEFINED check comes after CheckSVEEnabled()
5101 * in the ARM pseudocode, which is the sve_access_check() done
5102 * in our caller. We should not now return false from the caller.
5104 unallocated_encoding(s);
5105 return;
5108 /* Load the first octaword using the normal predicated load helpers. */
5110 poff = pred_full_reg_offset(s, pg);
5111 if (vsz > 32) {
5113 * Zero-extend the first 32 bits of the predicate into a temporary.
5114 * This avoids triggering an assert making sure we don't have bits
5115 * set within a predicate beyond VQ, but we have lowered VQ to 2
5116 * for this load operation.
5118 TCGv_i64 tmp = tcg_temp_new_i64();
5119 #if HOST_BIG_ENDIAN
5120 poff += 4;
5121 #endif
5122 tcg_gen_ld32u_i64(tmp, cpu_env, poff);
5124 poff = offsetof(CPUARMState, vfp.preg_tmp);
5125 tcg_gen_st_i64(tmp, cpu_env, poff);
5126 tcg_temp_free_i64(tmp);
5129 t_pg = tcg_temp_new_ptr();
5130 tcg_gen_addi_ptr(t_pg, cpu_env, poff);
5132 gen_helper_gvec_mem *fn
5133 = ldr_fns[s->mte_active[0]][s->be_data == MO_BE][dtype][0];
5134 fn(cpu_env, t_pg, addr, tcg_constant_i32(simd_desc(32, 32, zt)));
5136 tcg_temp_free_ptr(t_pg);
5139 * Replicate that first octaword.
5140 * The replication happens in units of 32; if the full vector size
5141 * is not a multiple of 32, the final bits are zeroed.
5143 doff = vec_full_reg_offset(s, zt);
5144 vsz_r32 = QEMU_ALIGN_DOWN(vsz, 32);
5145 if (vsz >= 64) {
5146 tcg_gen_gvec_dup_mem(5, doff + 32, doff, vsz_r32 - 32, vsz_r32 - 32);
5148 vsz -= vsz_r32;
5149 if (vsz) {
5150 tcg_gen_gvec_dup_imm(MO_64, doff + vsz_r32, vsz, vsz, 0);
5154 static bool trans_LD1RO_zprr(DisasContext *s, arg_rprr_load *a)
5156 if (!dc_isar_feature(aa64_sve_f64mm, s)) {
5157 return false;
5159 if (a->rm == 31) {
5160 return false;
5162 if (sve_access_check(s)) {
5163 TCGv_i64 addr = new_tmp_a64(s);
5164 tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype));
5165 tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn));
5166 do_ldro(s, a->rd, a->pg, addr, a->dtype);
5168 return true;
5171 static bool trans_LD1RO_zpri(DisasContext *s, arg_rpri_load *a)
5173 if (!dc_isar_feature(aa64_sve_f64mm, s)) {
5174 return false;
5176 if (sve_access_check(s)) {
5177 TCGv_i64 addr = new_tmp_a64(s);
5178 tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), a->imm * 32);
5179 do_ldro(s, a->rd, a->pg, addr, a->dtype);
5181 return true;
5184 /* Load and broadcast element. */
5185 static bool trans_LD1R_zpri(DisasContext *s, arg_rpri_load *a)
5187 unsigned vsz = vec_full_reg_size(s);
5188 unsigned psz = pred_full_reg_size(s);
5189 unsigned esz = dtype_esz[a->dtype];
5190 unsigned msz = dtype_msz(a->dtype);
5191 TCGLabel *over;
5192 TCGv_i64 temp, clean_addr;
5194 if (!sve_access_check(s)) {
5195 return true;
5198 over = gen_new_label();
5200 /* If the guarding predicate has no bits set, no load occurs. */
5201 if (psz <= 8) {
5202 /* Reduce the pred_esz_masks value simply to reduce the
5203 * size of the code generated here.
5205 uint64_t psz_mask = MAKE_64BIT_MASK(0, psz * 8);
5206 temp = tcg_temp_new_i64();
5207 tcg_gen_ld_i64(temp, cpu_env, pred_full_reg_offset(s, a->pg));
5208 tcg_gen_andi_i64(temp, temp, pred_esz_masks[esz] & psz_mask);
5209 tcg_gen_brcondi_i64(TCG_COND_EQ, temp, 0, over);
5210 tcg_temp_free_i64(temp);
5211 } else {
5212 TCGv_i32 t32 = tcg_temp_new_i32();
5213 find_last_active(s, t32, esz, a->pg);
5214 tcg_gen_brcondi_i32(TCG_COND_LT, t32, 0, over);
5215 tcg_temp_free_i32(t32);
5218 /* Load the data. */
5219 temp = tcg_temp_new_i64();
5220 tcg_gen_addi_i64(temp, cpu_reg_sp(s, a->rn), a->imm << msz);
5221 clean_addr = gen_mte_check1(s, temp, false, true, msz);
5223 tcg_gen_qemu_ld_i64(temp, clean_addr, get_mem_index(s),
5224 finalize_memop(s, dtype_mop[a->dtype]));
5226 /* Broadcast to *all* elements. */
5227 tcg_gen_gvec_dup_i64(esz, vec_full_reg_offset(s, a->rd),
5228 vsz, vsz, temp);
5229 tcg_temp_free_i64(temp);
5231 /* Zero the inactive elements. */
5232 gen_set_label(over);
5233 return do_movz_zpz(s, a->rd, a->rd, a->pg, esz, false);
5236 static void do_st_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr,
5237 int msz, int esz, int nreg)
5239 static gen_helper_gvec_mem * const fn_single[2][2][4][4] = {
5240 { { { gen_helper_sve_st1bb_r,
5241 gen_helper_sve_st1bh_r,
5242 gen_helper_sve_st1bs_r,
5243 gen_helper_sve_st1bd_r },
5244 { NULL,
5245 gen_helper_sve_st1hh_le_r,
5246 gen_helper_sve_st1hs_le_r,
5247 gen_helper_sve_st1hd_le_r },
5248 { NULL, NULL,
5249 gen_helper_sve_st1ss_le_r,
5250 gen_helper_sve_st1sd_le_r },
5251 { NULL, NULL, NULL,
5252 gen_helper_sve_st1dd_le_r } },
5253 { { gen_helper_sve_st1bb_r,
5254 gen_helper_sve_st1bh_r,
5255 gen_helper_sve_st1bs_r,
5256 gen_helper_sve_st1bd_r },
5257 { NULL,
5258 gen_helper_sve_st1hh_be_r,
5259 gen_helper_sve_st1hs_be_r,
5260 gen_helper_sve_st1hd_be_r },
5261 { NULL, NULL,
5262 gen_helper_sve_st1ss_be_r,
5263 gen_helper_sve_st1sd_be_r },
5264 { NULL, NULL, NULL,
5265 gen_helper_sve_st1dd_be_r } } },
5267 { { { gen_helper_sve_st1bb_r_mte,
5268 gen_helper_sve_st1bh_r_mte,
5269 gen_helper_sve_st1bs_r_mte,
5270 gen_helper_sve_st1bd_r_mte },
5271 { NULL,
5272 gen_helper_sve_st1hh_le_r_mte,
5273 gen_helper_sve_st1hs_le_r_mte,
5274 gen_helper_sve_st1hd_le_r_mte },
5275 { NULL, NULL,
5276 gen_helper_sve_st1ss_le_r_mte,
5277 gen_helper_sve_st1sd_le_r_mte },
5278 { NULL, NULL, NULL,
5279 gen_helper_sve_st1dd_le_r_mte } },
5280 { { gen_helper_sve_st1bb_r_mte,
5281 gen_helper_sve_st1bh_r_mte,
5282 gen_helper_sve_st1bs_r_mte,
5283 gen_helper_sve_st1bd_r_mte },
5284 { NULL,
5285 gen_helper_sve_st1hh_be_r_mte,
5286 gen_helper_sve_st1hs_be_r_mte,
5287 gen_helper_sve_st1hd_be_r_mte },
5288 { NULL, NULL,
5289 gen_helper_sve_st1ss_be_r_mte,
5290 gen_helper_sve_st1sd_be_r_mte },
5291 { NULL, NULL, NULL,
5292 gen_helper_sve_st1dd_be_r_mte } } },
5294 static gen_helper_gvec_mem * const fn_multiple[2][2][3][4] = {
5295 { { { gen_helper_sve_st2bb_r,
5296 gen_helper_sve_st2hh_le_r,
5297 gen_helper_sve_st2ss_le_r,
5298 gen_helper_sve_st2dd_le_r },
5299 { gen_helper_sve_st3bb_r,
5300 gen_helper_sve_st3hh_le_r,
5301 gen_helper_sve_st3ss_le_r,
5302 gen_helper_sve_st3dd_le_r },
5303 { gen_helper_sve_st4bb_r,
5304 gen_helper_sve_st4hh_le_r,
5305 gen_helper_sve_st4ss_le_r,
5306 gen_helper_sve_st4dd_le_r } },
5307 { { gen_helper_sve_st2bb_r,
5308 gen_helper_sve_st2hh_be_r,
5309 gen_helper_sve_st2ss_be_r,
5310 gen_helper_sve_st2dd_be_r },
5311 { gen_helper_sve_st3bb_r,
5312 gen_helper_sve_st3hh_be_r,
5313 gen_helper_sve_st3ss_be_r,
5314 gen_helper_sve_st3dd_be_r },
5315 { gen_helper_sve_st4bb_r,
5316 gen_helper_sve_st4hh_be_r,
5317 gen_helper_sve_st4ss_be_r,
5318 gen_helper_sve_st4dd_be_r } } },
5319 { { { gen_helper_sve_st2bb_r_mte,
5320 gen_helper_sve_st2hh_le_r_mte,
5321 gen_helper_sve_st2ss_le_r_mte,
5322 gen_helper_sve_st2dd_le_r_mte },
5323 { gen_helper_sve_st3bb_r_mte,
5324 gen_helper_sve_st3hh_le_r_mte,
5325 gen_helper_sve_st3ss_le_r_mte,
5326 gen_helper_sve_st3dd_le_r_mte },
5327 { gen_helper_sve_st4bb_r_mte,
5328 gen_helper_sve_st4hh_le_r_mte,
5329 gen_helper_sve_st4ss_le_r_mte,
5330 gen_helper_sve_st4dd_le_r_mte } },
5331 { { gen_helper_sve_st2bb_r_mte,
5332 gen_helper_sve_st2hh_be_r_mte,
5333 gen_helper_sve_st2ss_be_r_mte,
5334 gen_helper_sve_st2dd_be_r_mte },
5335 { gen_helper_sve_st3bb_r_mte,
5336 gen_helper_sve_st3hh_be_r_mte,
5337 gen_helper_sve_st3ss_be_r_mte,
5338 gen_helper_sve_st3dd_be_r_mte },
5339 { gen_helper_sve_st4bb_r_mte,
5340 gen_helper_sve_st4hh_be_r_mte,
5341 gen_helper_sve_st4ss_be_r_mte,
5342 gen_helper_sve_st4dd_be_r_mte } } },
5344 gen_helper_gvec_mem *fn;
5345 int be = s->be_data == MO_BE;
5347 if (nreg == 0) {
5348 /* ST1 */
5349 fn = fn_single[s->mte_active[0]][be][msz][esz];
5350 nreg = 1;
5351 } else {
5352 /* ST2, ST3, ST4 -- msz == esz, enforced by encoding */
5353 assert(msz == esz);
5354 fn = fn_multiple[s->mte_active[0]][be][nreg - 1][msz];
5356 assert(fn != NULL);
5357 do_mem_zpa(s, zt, pg, addr, msz_dtype(s, msz), nreg, true, fn);
5360 static bool trans_ST_zprr(DisasContext *s, arg_rprr_store *a)
5362 if (a->rm == 31 || a->msz > a->esz) {
5363 return false;
5365 if (sve_access_check(s)) {
5366 TCGv_i64 addr = new_tmp_a64(s);
5367 tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), a->msz);
5368 tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn));
5369 do_st_zpa(s, a->rd, a->pg, addr, a->msz, a->esz, a->nreg);
5371 return true;
5374 static bool trans_ST_zpri(DisasContext *s, arg_rpri_store *a)
5376 if (a->msz > a->esz) {
5377 return false;
5379 if (sve_access_check(s)) {
5380 int vsz = vec_full_reg_size(s);
5381 int elements = vsz >> a->esz;
5382 TCGv_i64 addr = new_tmp_a64(s);
5384 tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn),
5385 (a->imm * elements * (a->nreg + 1)) << a->msz);
5386 do_st_zpa(s, a->rd, a->pg, addr, a->msz, a->esz, a->nreg);
5388 return true;
5392 *** SVE gather loads / scatter stores
5395 static void do_mem_zpz(DisasContext *s, int zt, int pg, int zm,
5396 int scale, TCGv_i64 scalar, int msz, bool is_write,
5397 gen_helper_gvec_mem_scatter *fn)
5399 unsigned vsz = vec_full_reg_size(s);
5400 TCGv_ptr t_zm = tcg_temp_new_ptr();
5401 TCGv_ptr t_pg = tcg_temp_new_ptr();
5402 TCGv_ptr t_zt = tcg_temp_new_ptr();
5403 int desc = 0;
5405 if (s->mte_active[0]) {
5406 desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s));
5407 desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
5408 desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
5409 desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
5410 desc = FIELD_DP32(desc, MTEDESC, SIZEM1, (1 << msz) - 1);
5411 desc <<= SVE_MTEDESC_SHIFT;
5413 desc = simd_desc(vsz, vsz, desc | scale);
5415 tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg));
5416 tcg_gen_addi_ptr(t_zm, cpu_env, vec_full_reg_offset(s, zm));
5417 tcg_gen_addi_ptr(t_zt, cpu_env, vec_full_reg_offset(s, zt));
5418 fn(cpu_env, t_zt, t_pg, t_zm, scalar, tcg_constant_i32(desc));
5420 tcg_temp_free_ptr(t_zt);
5421 tcg_temp_free_ptr(t_zm);
5422 tcg_temp_free_ptr(t_pg);
5425 /* Indexed by [mte][be][ff][xs][u][msz]. */
5426 static gen_helper_gvec_mem_scatter * const
5427 gather_load_fn32[2][2][2][2][2][3] = {
5428 { /* MTE Inactive */
5429 { /* Little-endian */
5430 { { { gen_helper_sve_ldbss_zsu,
5431 gen_helper_sve_ldhss_le_zsu,
5432 NULL, },
5433 { gen_helper_sve_ldbsu_zsu,
5434 gen_helper_sve_ldhsu_le_zsu,
5435 gen_helper_sve_ldss_le_zsu, } },
5436 { { gen_helper_sve_ldbss_zss,
5437 gen_helper_sve_ldhss_le_zss,
5438 NULL, },
5439 { gen_helper_sve_ldbsu_zss,
5440 gen_helper_sve_ldhsu_le_zss,
5441 gen_helper_sve_ldss_le_zss, } } },
5443 /* First-fault */
5444 { { { gen_helper_sve_ldffbss_zsu,
5445 gen_helper_sve_ldffhss_le_zsu,
5446 NULL, },
5447 { gen_helper_sve_ldffbsu_zsu,
5448 gen_helper_sve_ldffhsu_le_zsu,
5449 gen_helper_sve_ldffss_le_zsu, } },
5450 { { gen_helper_sve_ldffbss_zss,
5451 gen_helper_sve_ldffhss_le_zss,
5452 NULL, },
5453 { gen_helper_sve_ldffbsu_zss,
5454 gen_helper_sve_ldffhsu_le_zss,
5455 gen_helper_sve_ldffss_le_zss, } } } },
5457 { /* Big-endian */
5458 { { { gen_helper_sve_ldbss_zsu,
5459 gen_helper_sve_ldhss_be_zsu,
5460 NULL, },
5461 { gen_helper_sve_ldbsu_zsu,
5462 gen_helper_sve_ldhsu_be_zsu,
5463 gen_helper_sve_ldss_be_zsu, } },
5464 { { gen_helper_sve_ldbss_zss,
5465 gen_helper_sve_ldhss_be_zss,
5466 NULL, },
5467 { gen_helper_sve_ldbsu_zss,
5468 gen_helper_sve_ldhsu_be_zss,
5469 gen_helper_sve_ldss_be_zss, } } },
5471 /* First-fault */
5472 { { { gen_helper_sve_ldffbss_zsu,
5473 gen_helper_sve_ldffhss_be_zsu,
5474 NULL, },
5475 { gen_helper_sve_ldffbsu_zsu,
5476 gen_helper_sve_ldffhsu_be_zsu,
5477 gen_helper_sve_ldffss_be_zsu, } },
5478 { { gen_helper_sve_ldffbss_zss,
5479 gen_helper_sve_ldffhss_be_zss,
5480 NULL, },
5481 { gen_helper_sve_ldffbsu_zss,
5482 gen_helper_sve_ldffhsu_be_zss,
5483 gen_helper_sve_ldffss_be_zss, } } } } },
5484 { /* MTE Active */
5485 { /* Little-endian */
5486 { { { gen_helper_sve_ldbss_zsu_mte,
5487 gen_helper_sve_ldhss_le_zsu_mte,
5488 NULL, },
5489 { gen_helper_sve_ldbsu_zsu_mte,
5490 gen_helper_sve_ldhsu_le_zsu_mte,
5491 gen_helper_sve_ldss_le_zsu_mte, } },
5492 { { gen_helper_sve_ldbss_zss_mte,
5493 gen_helper_sve_ldhss_le_zss_mte,
5494 NULL, },
5495 { gen_helper_sve_ldbsu_zss_mte,
5496 gen_helper_sve_ldhsu_le_zss_mte,
5497 gen_helper_sve_ldss_le_zss_mte, } } },
5499 /* First-fault */
5500 { { { gen_helper_sve_ldffbss_zsu_mte,
5501 gen_helper_sve_ldffhss_le_zsu_mte,
5502 NULL, },
5503 { gen_helper_sve_ldffbsu_zsu_mte,
5504 gen_helper_sve_ldffhsu_le_zsu_mte,
5505 gen_helper_sve_ldffss_le_zsu_mte, } },
5506 { { gen_helper_sve_ldffbss_zss_mte,
5507 gen_helper_sve_ldffhss_le_zss_mte,
5508 NULL, },
5509 { gen_helper_sve_ldffbsu_zss_mte,
5510 gen_helper_sve_ldffhsu_le_zss_mte,
5511 gen_helper_sve_ldffss_le_zss_mte, } } } },
5513 { /* Big-endian */
5514 { { { gen_helper_sve_ldbss_zsu_mte,
5515 gen_helper_sve_ldhss_be_zsu_mte,
5516 NULL, },
5517 { gen_helper_sve_ldbsu_zsu_mte,
5518 gen_helper_sve_ldhsu_be_zsu_mte,
5519 gen_helper_sve_ldss_be_zsu_mte, } },
5520 { { gen_helper_sve_ldbss_zss_mte,
5521 gen_helper_sve_ldhss_be_zss_mte,
5522 NULL, },
5523 { gen_helper_sve_ldbsu_zss_mte,
5524 gen_helper_sve_ldhsu_be_zss_mte,
5525 gen_helper_sve_ldss_be_zss_mte, } } },
5527 /* First-fault */
5528 { { { gen_helper_sve_ldffbss_zsu_mte,
5529 gen_helper_sve_ldffhss_be_zsu_mte,
5530 NULL, },
5531 { gen_helper_sve_ldffbsu_zsu_mte,
5532 gen_helper_sve_ldffhsu_be_zsu_mte,
5533 gen_helper_sve_ldffss_be_zsu_mte, } },
5534 { { gen_helper_sve_ldffbss_zss_mte,
5535 gen_helper_sve_ldffhss_be_zss_mte,
5536 NULL, },
5537 { gen_helper_sve_ldffbsu_zss_mte,
5538 gen_helper_sve_ldffhsu_be_zss_mte,
5539 gen_helper_sve_ldffss_be_zss_mte, } } } } },
5542 /* Note that we overload xs=2 to indicate 64-bit offset. */
5543 static gen_helper_gvec_mem_scatter * const
5544 gather_load_fn64[2][2][2][3][2][4] = {
5545 { /* MTE Inactive */
5546 { /* Little-endian */
5547 { { { gen_helper_sve_ldbds_zsu,
5548 gen_helper_sve_ldhds_le_zsu,
5549 gen_helper_sve_ldsds_le_zsu,
5550 NULL, },
5551 { gen_helper_sve_ldbdu_zsu,
5552 gen_helper_sve_ldhdu_le_zsu,
5553 gen_helper_sve_ldsdu_le_zsu,
5554 gen_helper_sve_lddd_le_zsu, } },
5555 { { gen_helper_sve_ldbds_zss,
5556 gen_helper_sve_ldhds_le_zss,
5557 gen_helper_sve_ldsds_le_zss,
5558 NULL, },
5559 { gen_helper_sve_ldbdu_zss,
5560 gen_helper_sve_ldhdu_le_zss,
5561 gen_helper_sve_ldsdu_le_zss,
5562 gen_helper_sve_lddd_le_zss, } },
5563 { { gen_helper_sve_ldbds_zd,
5564 gen_helper_sve_ldhds_le_zd,
5565 gen_helper_sve_ldsds_le_zd,
5566 NULL, },
5567 { gen_helper_sve_ldbdu_zd,
5568 gen_helper_sve_ldhdu_le_zd,
5569 gen_helper_sve_ldsdu_le_zd,
5570 gen_helper_sve_lddd_le_zd, } } },
5572 /* First-fault */
5573 { { { gen_helper_sve_ldffbds_zsu,
5574 gen_helper_sve_ldffhds_le_zsu,
5575 gen_helper_sve_ldffsds_le_zsu,
5576 NULL, },
5577 { gen_helper_sve_ldffbdu_zsu,
5578 gen_helper_sve_ldffhdu_le_zsu,
5579 gen_helper_sve_ldffsdu_le_zsu,
5580 gen_helper_sve_ldffdd_le_zsu, } },
5581 { { gen_helper_sve_ldffbds_zss,
5582 gen_helper_sve_ldffhds_le_zss,
5583 gen_helper_sve_ldffsds_le_zss,
5584 NULL, },
5585 { gen_helper_sve_ldffbdu_zss,
5586 gen_helper_sve_ldffhdu_le_zss,
5587 gen_helper_sve_ldffsdu_le_zss,
5588 gen_helper_sve_ldffdd_le_zss, } },
5589 { { gen_helper_sve_ldffbds_zd,
5590 gen_helper_sve_ldffhds_le_zd,
5591 gen_helper_sve_ldffsds_le_zd,
5592 NULL, },
5593 { gen_helper_sve_ldffbdu_zd,
5594 gen_helper_sve_ldffhdu_le_zd,
5595 gen_helper_sve_ldffsdu_le_zd,
5596 gen_helper_sve_ldffdd_le_zd, } } } },
5597 { /* Big-endian */
5598 { { { gen_helper_sve_ldbds_zsu,
5599 gen_helper_sve_ldhds_be_zsu,
5600 gen_helper_sve_ldsds_be_zsu,
5601 NULL, },
5602 { gen_helper_sve_ldbdu_zsu,
5603 gen_helper_sve_ldhdu_be_zsu,
5604 gen_helper_sve_ldsdu_be_zsu,
5605 gen_helper_sve_lddd_be_zsu, } },
5606 { { gen_helper_sve_ldbds_zss,
5607 gen_helper_sve_ldhds_be_zss,
5608 gen_helper_sve_ldsds_be_zss,
5609 NULL, },
5610 { gen_helper_sve_ldbdu_zss,
5611 gen_helper_sve_ldhdu_be_zss,
5612 gen_helper_sve_ldsdu_be_zss,
5613 gen_helper_sve_lddd_be_zss, } },
5614 { { gen_helper_sve_ldbds_zd,
5615 gen_helper_sve_ldhds_be_zd,
5616 gen_helper_sve_ldsds_be_zd,
5617 NULL, },
5618 { gen_helper_sve_ldbdu_zd,
5619 gen_helper_sve_ldhdu_be_zd,
5620 gen_helper_sve_ldsdu_be_zd,
5621 gen_helper_sve_lddd_be_zd, } } },
5623 /* First-fault */
5624 { { { gen_helper_sve_ldffbds_zsu,
5625 gen_helper_sve_ldffhds_be_zsu,
5626 gen_helper_sve_ldffsds_be_zsu,
5627 NULL, },
5628 { gen_helper_sve_ldffbdu_zsu,
5629 gen_helper_sve_ldffhdu_be_zsu,
5630 gen_helper_sve_ldffsdu_be_zsu,
5631 gen_helper_sve_ldffdd_be_zsu, } },
5632 { { gen_helper_sve_ldffbds_zss,
5633 gen_helper_sve_ldffhds_be_zss,
5634 gen_helper_sve_ldffsds_be_zss,
5635 NULL, },
5636 { gen_helper_sve_ldffbdu_zss,
5637 gen_helper_sve_ldffhdu_be_zss,
5638 gen_helper_sve_ldffsdu_be_zss,
5639 gen_helper_sve_ldffdd_be_zss, } },
5640 { { gen_helper_sve_ldffbds_zd,
5641 gen_helper_sve_ldffhds_be_zd,
5642 gen_helper_sve_ldffsds_be_zd,
5643 NULL, },
5644 { gen_helper_sve_ldffbdu_zd,
5645 gen_helper_sve_ldffhdu_be_zd,
5646 gen_helper_sve_ldffsdu_be_zd,
5647 gen_helper_sve_ldffdd_be_zd, } } } } },
5648 { /* MTE Active */
5649 { /* Little-endian */
5650 { { { gen_helper_sve_ldbds_zsu_mte,
5651 gen_helper_sve_ldhds_le_zsu_mte,
5652 gen_helper_sve_ldsds_le_zsu_mte,
5653 NULL, },
5654 { gen_helper_sve_ldbdu_zsu_mte,
5655 gen_helper_sve_ldhdu_le_zsu_mte,
5656 gen_helper_sve_ldsdu_le_zsu_mte,
5657 gen_helper_sve_lddd_le_zsu_mte, } },
5658 { { gen_helper_sve_ldbds_zss_mte,
5659 gen_helper_sve_ldhds_le_zss_mte,
5660 gen_helper_sve_ldsds_le_zss_mte,
5661 NULL, },
5662 { gen_helper_sve_ldbdu_zss_mte,
5663 gen_helper_sve_ldhdu_le_zss_mte,
5664 gen_helper_sve_ldsdu_le_zss_mte,
5665 gen_helper_sve_lddd_le_zss_mte, } },
5666 { { gen_helper_sve_ldbds_zd_mte,
5667 gen_helper_sve_ldhds_le_zd_mte,
5668 gen_helper_sve_ldsds_le_zd_mte,
5669 NULL, },
5670 { gen_helper_sve_ldbdu_zd_mte,
5671 gen_helper_sve_ldhdu_le_zd_mte,
5672 gen_helper_sve_ldsdu_le_zd_mte,
5673 gen_helper_sve_lddd_le_zd_mte, } } },
5675 /* First-fault */
5676 { { { gen_helper_sve_ldffbds_zsu_mte,
5677 gen_helper_sve_ldffhds_le_zsu_mte,
5678 gen_helper_sve_ldffsds_le_zsu_mte,
5679 NULL, },
5680 { gen_helper_sve_ldffbdu_zsu_mte,
5681 gen_helper_sve_ldffhdu_le_zsu_mte,
5682 gen_helper_sve_ldffsdu_le_zsu_mte,
5683 gen_helper_sve_ldffdd_le_zsu_mte, } },
5684 { { gen_helper_sve_ldffbds_zss_mte,
5685 gen_helper_sve_ldffhds_le_zss_mte,
5686 gen_helper_sve_ldffsds_le_zss_mte,
5687 NULL, },
5688 { gen_helper_sve_ldffbdu_zss_mte,
5689 gen_helper_sve_ldffhdu_le_zss_mte,
5690 gen_helper_sve_ldffsdu_le_zss_mte,
5691 gen_helper_sve_ldffdd_le_zss_mte, } },
5692 { { gen_helper_sve_ldffbds_zd_mte,
5693 gen_helper_sve_ldffhds_le_zd_mte,
5694 gen_helper_sve_ldffsds_le_zd_mte,
5695 NULL, },
5696 { gen_helper_sve_ldffbdu_zd_mte,
5697 gen_helper_sve_ldffhdu_le_zd_mte,
5698 gen_helper_sve_ldffsdu_le_zd_mte,
5699 gen_helper_sve_ldffdd_le_zd_mte, } } } },
5700 { /* Big-endian */
5701 { { { gen_helper_sve_ldbds_zsu_mte,
5702 gen_helper_sve_ldhds_be_zsu_mte,
5703 gen_helper_sve_ldsds_be_zsu_mte,
5704 NULL, },
5705 { gen_helper_sve_ldbdu_zsu_mte,
5706 gen_helper_sve_ldhdu_be_zsu_mte,
5707 gen_helper_sve_ldsdu_be_zsu_mte,
5708 gen_helper_sve_lddd_be_zsu_mte, } },
5709 { { gen_helper_sve_ldbds_zss_mte,
5710 gen_helper_sve_ldhds_be_zss_mte,
5711 gen_helper_sve_ldsds_be_zss_mte,
5712 NULL, },
5713 { gen_helper_sve_ldbdu_zss_mte,
5714 gen_helper_sve_ldhdu_be_zss_mte,
5715 gen_helper_sve_ldsdu_be_zss_mte,
5716 gen_helper_sve_lddd_be_zss_mte, } },
5717 { { gen_helper_sve_ldbds_zd_mte,
5718 gen_helper_sve_ldhds_be_zd_mte,
5719 gen_helper_sve_ldsds_be_zd_mte,
5720 NULL, },
5721 { gen_helper_sve_ldbdu_zd_mte,
5722 gen_helper_sve_ldhdu_be_zd_mte,
5723 gen_helper_sve_ldsdu_be_zd_mte,
5724 gen_helper_sve_lddd_be_zd_mte, } } },
5726 /* First-fault */
5727 { { { gen_helper_sve_ldffbds_zsu_mte,
5728 gen_helper_sve_ldffhds_be_zsu_mte,
5729 gen_helper_sve_ldffsds_be_zsu_mte,
5730 NULL, },
5731 { gen_helper_sve_ldffbdu_zsu_mte,
5732 gen_helper_sve_ldffhdu_be_zsu_mte,
5733 gen_helper_sve_ldffsdu_be_zsu_mte,
5734 gen_helper_sve_ldffdd_be_zsu_mte, } },
5735 { { gen_helper_sve_ldffbds_zss_mte,
5736 gen_helper_sve_ldffhds_be_zss_mte,
5737 gen_helper_sve_ldffsds_be_zss_mte,
5738 NULL, },
5739 { gen_helper_sve_ldffbdu_zss_mte,
5740 gen_helper_sve_ldffhdu_be_zss_mte,
5741 gen_helper_sve_ldffsdu_be_zss_mte,
5742 gen_helper_sve_ldffdd_be_zss_mte, } },
5743 { { gen_helper_sve_ldffbds_zd_mte,
5744 gen_helper_sve_ldffhds_be_zd_mte,
5745 gen_helper_sve_ldffsds_be_zd_mte,
5746 NULL, },
5747 { gen_helper_sve_ldffbdu_zd_mte,
5748 gen_helper_sve_ldffhdu_be_zd_mte,
5749 gen_helper_sve_ldffsdu_be_zd_mte,
5750 gen_helper_sve_ldffdd_be_zd_mte, } } } } },
5753 static bool trans_LD1_zprz(DisasContext *s, arg_LD1_zprz *a)
5755 gen_helper_gvec_mem_scatter *fn = NULL;
5756 bool be = s->be_data == MO_BE;
5757 bool mte = s->mte_active[0];
5759 if (!sve_access_check(s)) {
5760 return true;
5763 switch (a->esz) {
5764 case MO_32:
5765 fn = gather_load_fn32[mte][be][a->ff][a->xs][a->u][a->msz];
5766 break;
5767 case MO_64:
5768 fn = gather_load_fn64[mte][be][a->ff][a->xs][a->u][a->msz];
5769 break;
5771 assert(fn != NULL);
5773 do_mem_zpz(s, a->rd, a->pg, a->rm, a->scale * a->msz,
5774 cpu_reg_sp(s, a->rn), a->msz, false, fn);
5775 return true;
5778 static bool trans_LD1_zpiz(DisasContext *s, arg_LD1_zpiz *a)
5780 gen_helper_gvec_mem_scatter *fn = NULL;
5781 bool be = s->be_data == MO_BE;
5782 bool mte = s->mte_active[0];
5784 if (a->esz < a->msz || (a->esz == a->msz && !a->u)) {
5785 return false;
5787 if (!sve_access_check(s)) {
5788 return true;
5791 switch (a->esz) {
5792 case MO_32:
5793 fn = gather_load_fn32[mte][be][a->ff][0][a->u][a->msz];
5794 break;
5795 case MO_64:
5796 fn = gather_load_fn64[mte][be][a->ff][2][a->u][a->msz];
5797 break;
5799 assert(fn != NULL);
5801 /* Treat LD1_zpiz (zn[x] + imm) the same way as LD1_zprz (rn + zm[x])
5802 * by loading the immediate into the scalar parameter.
5804 do_mem_zpz(s, a->rd, a->pg, a->rn, 0,
5805 tcg_constant_i64(a->imm << a->msz), a->msz, false, fn);
5806 return true;
5809 static bool trans_LDNT1_zprz(DisasContext *s, arg_LD1_zprz *a)
5811 gen_helper_gvec_mem_scatter *fn = NULL;
5812 bool be = s->be_data == MO_BE;
5813 bool mte = s->mte_active[0];
5815 if (a->esz < a->msz + !a->u) {
5816 return false;
5818 if (!dc_isar_feature(aa64_sve2, s)) {
5819 return false;
5821 if (!sve_access_check(s)) {
5822 return true;
5825 switch (a->esz) {
5826 case MO_32:
5827 fn = gather_load_fn32[mte][be][0][0][a->u][a->msz];
5828 break;
5829 case MO_64:
5830 fn = gather_load_fn64[mte][be][0][2][a->u][a->msz];
5831 break;
5833 assert(fn != NULL);
5835 do_mem_zpz(s, a->rd, a->pg, a->rn, 0,
5836 cpu_reg(s, a->rm), a->msz, false, fn);
5837 return true;
5840 /* Indexed by [mte][be][xs][msz]. */
5841 static gen_helper_gvec_mem_scatter * const scatter_store_fn32[2][2][2][3] = {
5842 { /* MTE Inactive */
5843 { /* Little-endian */
5844 { gen_helper_sve_stbs_zsu,
5845 gen_helper_sve_sths_le_zsu,
5846 gen_helper_sve_stss_le_zsu, },
5847 { gen_helper_sve_stbs_zss,
5848 gen_helper_sve_sths_le_zss,
5849 gen_helper_sve_stss_le_zss, } },
5850 { /* Big-endian */
5851 { gen_helper_sve_stbs_zsu,
5852 gen_helper_sve_sths_be_zsu,
5853 gen_helper_sve_stss_be_zsu, },
5854 { gen_helper_sve_stbs_zss,
5855 gen_helper_sve_sths_be_zss,
5856 gen_helper_sve_stss_be_zss, } } },
5857 { /* MTE Active */
5858 { /* Little-endian */
5859 { gen_helper_sve_stbs_zsu_mte,
5860 gen_helper_sve_sths_le_zsu_mte,
5861 gen_helper_sve_stss_le_zsu_mte, },
5862 { gen_helper_sve_stbs_zss_mte,
5863 gen_helper_sve_sths_le_zss_mte,
5864 gen_helper_sve_stss_le_zss_mte, } },
5865 { /* Big-endian */
5866 { gen_helper_sve_stbs_zsu_mte,
5867 gen_helper_sve_sths_be_zsu_mte,
5868 gen_helper_sve_stss_be_zsu_mte, },
5869 { gen_helper_sve_stbs_zss_mte,
5870 gen_helper_sve_sths_be_zss_mte,
5871 gen_helper_sve_stss_be_zss_mte, } } },
5874 /* Note that we overload xs=2 to indicate 64-bit offset. */
5875 static gen_helper_gvec_mem_scatter * const scatter_store_fn64[2][2][3][4] = {
5876 { /* MTE Inactive */
5877 { /* Little-endian */
5878 { gen_helper_sve_stbd_zsu,
5879 gen_helper_sve_sthd_le_zsu,
5880 gen_helper_sve_stsd_le_zsu,
5881 gen_helper_sve_stdd_le_zsu, },
5882 { gen_helper_sve_stbd_zss,
5883 gen_helper_sve_sthd_le_zss,
5884 gen_helper_sve_stsd_le_zss,
5885 gen_helper_sve_stdd_le_zss, },
5886 { gen_helper_sve_stbd_zd,
5887 gen_helper_sve_sthd_le_zd,
5888 gen_helper_sve_stsd_le_zd,
5889 gen_helper_sve_stdd_le_zd, } },
5890 { /* Big-endian */
5891 { gen_helper_sve_stbd_zsu,
5892 gen_helper_sve_sthd_be_zsu,
5893 gen_helper_sve_stsd_be_zsu,
5894 gen_helper_sve_stdd_be_zsu, },
5895 { gen_helper_sve_stbd_zss,
5896 gen_helper_sve_sthd_be_zss,
5897 gen_helper_sve_stsd_be_zss,
5898 gen_helper_sve_stdd_be_zss, },
5899 { gen_helper_sve_stbd_zd,
5900 gen_helper_sve_sthd_be_zd,
5901 gen_helper_sve_stsd_be_zd,
5902 gen_helper_sve_stdd_be_zd, } } },
5903 { /* MTE Inactive */
5904 { /* Little-endian */
5905 { gen_helper_sve_stbd_zsu_mte,
5906 gen_helper_sve_sthd_le_zsu_mte,
5907 gen_helper_sve_stsd_le_zsu_mte,
5908 gen_helper_sve_stdd_le_zsu_mte, },
5909 { gen_helper_sve_stbd_zss_mte,
5910 gen_helper_sve_sthd_le_zss_mte,
5911 gen_helper_sve_stsd_le_zss_mte,
5912 gen_helper_sve_stdd_le_zss_mte, },
5913 { gen_helper_sve_stbd_zd_mte,
5914 gen_helper_sve_sthd_le_zd_mte,
5915 gen_helper_sve_stsd_le_zd_mte,
5916 gen_helper_sve_stdd_le_zd_mte, } },
5917 { /* Big-endian */
5918 { gen_helper_sve_stbd_zsu_mte,
5919 gen_helper_sve_sthd_be_zsu_mte,
5920 gen_helper_sve_stsd_be_zsu_mte,
5921 gen_helper_sve_stdd_be_zsu_mte, },
5922 { gen_helper_sve_stbd_zss_mte,
5923 gen_helper_sve_sthd_be_zss_mte,
5924 gen_helper_sve_stsd_be_zss_mte,
5925 gen_helper_sve_stdd_be_zss_mte, },
5926 { gen_helper_sve_stbd_zd_mte,
5927 gen_helper_sve_sthd_be_zd_mte,
5928 gen_helper_sve_stsd_be_zd_mte,
5929 gen_helper_sve_stdd_be_zd_mte, } } },
5932 static bool trans_ST1_zprz(DisasContext *s, arg_ST1_zprz *a)
5934 gen_helper_gvec_mem_scatter *fn;
5935 bool be = s->be_data == MO_BE;
5936 bool mte = s->mte_active[0];
5938 if (a->esz < a->msz || (a->msz == 0 && a->scale)) {
5939 return false;
5941 if (!sve_access_check(s)) {
5942 return true;
5944 switch (a->esz) {
5945 case MO_32:
5946 fn = scatter_store_fn32[mte][be][a->xs][a->msz];
5947 break;
5948 case MO_64:
5949 fn = scatter_store_fn64[mte][be][a->xs][a->msz];
5950 break;
5951 default:
5952 g_assert_not_reached();
5954 do_mem_zpz(s, a->rd, a->pg, a->rm, a->scale * a->msz,
5955 cpu_reg_sp(s, a->rn), a->msz, true, fn);
5956 return true;
5959 static bool trans_ST1_zpiz(DisasContext *s, arg_ST1_zpiz *a)
5961 gen_helper_gvec_mem_scatter *fn = NULL;
5962 bool be = s->be_data == MO_BE;
5963 bool mte = s->mte_active[0];
5965 if (a->esz < a->msz) {
5966 return false;
5968 if (!sve_access_check(s)) {
5969 return true;
5972 switch (a->esz) {
5973 case MO_32:
5974 fn = scatter_store_fn32[mte][be][0][a->msz];
5975 break;
5976 case MO_64:
5977 fn = scatter_store_fn64[mte][be][2][a->msz];
5978 break;
5980 assert(fn != NULL);
5982 /* Treat ST1_zpiz (zn[x] + imm) the same way as ST1_zprz (rn + zm[x])
5983 * by loading the immediate into the scalar parameter.
5985 do_mem_zpz(s, a->rd, a->pg, a->rn, 0,
5986 tcg_constant_i64(a->imm << a->msz), a->msz, true, fn);
5987 return true;
5990 static bool trans_STNT1_zprz(DisasContext *s, arg_ST1_zprz *a)
5992 gen_helper_gvec_mem_scatter *fn;
5993 bool be = s->be_data == MO_BE;
5994 bool mte = s->mte_active[0];
5996 if (a->esz < a->msz) {
5997 return false;
5999 if (!dc_isar_feature(aa64_sve2, s)) {
6000 return false;
6002 if (!sve_access_check(s)) {
6003 return true;
6006 switch (a->esz) {
6007 case MO_32:
6008 fn = scatter_store_fn32[mte][be][0][a->msz];
6009 break;
6010 case MO_64:
6011 fn = scatter_store_fn64[mte][be][2][a->msz];
6012 break;
6013 default:
6014 g_assert_not_reached();
6017 do_mem_zpz(s, a->rd, a->pg, a->rn, 0,
6018 cpu_reg(s, a->rm), a->msz, true, fn);
6019 return true;
6023 * Prefetches
6026 static bool trans_PRF(DisasContext *s, arg_PRF *a)
6028 /* Prefetch is a nop within QEMU. */
6029 (void)sve_access_check(s);
6030 return true;
6033 static bool trans_PRF_rr(DisasContext *s, arg_PRF_rr *a)
6035 if (a->rm == 31) {
6036 return false;
6038 /* Prefetch is a nop within QEMU. */
6039 (void)sve_access_check(s);
6040 return true;
6044 * Move Prefix
6046 * TODO: The implementation so far could handle predicated merging movprfx.
6047 * The helper functions as written take an extra source register to
6048 * use in the operation, but the result is only written when predication
6049 * succeeds. For unpredicated movprfx, we need to rearrange the helpers
6050 * to allow the final write back to the destination to be unconditional.
6051 * For predicated zeroing movprfx, we need to rearrange the helpers to
6052 * allow the final write back to zero inactives.
6054 * In the meantime, just emit the moves.
6057 TRANS_FEAT(MOVPRFX, aa64_sve, do_mov_z, a->rd, a->rn)
6058 TRANS_FEAT(MOVPRFX_m, aa64_sve, do_sel_z, a->rd, a->rn, a->rd, a->pg, a->esz)
6059 TRANS_FEAT(MOVPRFX_z, aa64_sve, do_movz_zpz, a->rd, a->rn, a->pg, a->esz, false)
6062 * SVE2 Integer Multiply - Unpredicated
6065 TRANS_FEAT(MUL_zzz, aa64_sve2, gen_gvec_fn_arg_zzz, tcg_gen_gvec_mul, a)
6067 static gen_helper_gvec_3 * const smulh_zzz_fns[4] = {
6068 gen_helper_gvec_smulh_b, gen_helper_gvec_smulh_h,
6069 gen_helper_gvec_smulh_s, gen_helper_gvec_smulh_d,
6071 TRANS_FEAT(SMULH_zzz, aa64_sve2, gen_gvec_ool_arg_zzz,
6072 smulh_zzz_fns[a->esz], a, 0)
6074 static gen_helper_gvec_3 * const umulh_zzz_fns[4] = {
6075 gen_helper_gvec_umulh_b, gen_helper_gvec_umulh_h,
6076 gen_helper_gvec_umulh_s, gen_helper_gvec_umulh_d,
6078 TRANS_FEAT(UMULH_zzz, aa64_sve2, gen_gvec_ool_arg_zzz,
6079 umulh_zzz_fns[a->esz], a, 0)
6081 TRANS_FEAT(PMUL_zzz, aa64_sve2, gen_gvec_ool_arg_zzz,
6082 gen_helper_gvec_pmul_b, a, 0)
6084 static gen_helper_gvec_3 * const sqdmulh_zzz_fns[4] = {
6085 gen_helper_sve2_sqdmulh_b, gen_helper_sve2_sqdmulh_h,
6086 gen_helper_sve2_sqdmulh_s, gen_helper_sve2_sqdmulh_d,
6088 TRANS_FEAT(SQDMULH_zzz, aa64_sve2, gen_gvec_ool_arg_zzz,
6089 sqdmulh_zzz_fns[a->esz], a, 0)
6091 static gen_helper_gvec_3 * const sqrdmulh_zzz_fns[4] = {
6092 gen_helper_sve2_sqrdmulh_b, gen_helper_sve2_sqrdmulh_h,
6093 gen_helper_sve2_sqrdmulh_s, gen_helper_sve2_sqrdmulh_d,
6095 TRANS_FEAT(SQRDMULH_zzz, aa64_sve2, gen_gvec_ool_arg_zzz,
6096 sqrdmulh_zzz_fns[a->esz], a, 0)
6099 * SVE2 Integer - Predicated
6102 static gen_helper_gvec_4 * const sadlp_fns[4] = {
6103 NULL, gen_helper_sve2_sadalp_zpzz_h,
6104 gen_helper_sve2_sadalp_zpzz_s, gen_helper_sve2_sadalp_zpzz_d,
6106 TRANS_FEAT(SADALP_zpzz, aa64_sve2, gen_gvec_ool_arg_zpzz,
6107 sadlp_fns[a->esz], a, 0)
6109 static gen_helper_gvec_4 * const uadlp_fns[4] = {
6110 NULL, gen_helper_sve2_uadalp_zpzz_h,
6111 gen_helper_sve2_uadalp_zpzz_s, gen_helper_sve2_uadalp_zpzz_d,
6113 TRANS_FEAT(UADALP_zpzz, aa64_sve2, gen_gvec_ool_arg_zpzz,
6114 uadlp_fns[a->esz], a, 0)
6117 * SVE2 integer unary operations (predicated)
6120 TRANS_FEAT(URECPE, aa64_sve2, gen_gvec_ool_arg_zpz,
6121 a->esz == 2 ? gen_helper_sve2_urecpe_s : NULL, a, 0)
6123 TRANS_FEAT(URSQRTE, aa64_sve2, gen_gvec_ool_arg_zpz,
6124 a->esz == 2 ? gen_helper_sve2_ursqrte_s : NULL, a, 0)
6126 static gen_helper_gvec_3 * const sqabs_fns[4] = {
6127 gen_helper_sve2_sqabs_b, gen_helper_sve2_sqabs_h,
6128 gen_helper_sve2_sqabs_s, gen_helper_sve2_sqabs_d,
6130 TRANS_FEAT(SQABS, aa64_sve2, gen_gvec_ool_arg_zpz, sqabs_fns[a->esz], a, 0)
6132 static gen_helper_gvec_3 * const sqneg_fns[4] = {
6133 gen_helper_sve2_sqneg_b, gen_helper_sve2_sqneg_h,
6134 gen_helper_sve2_sqneg_s, gen_helper_sve2_sqneg_d,
6136 TRANS_FEAT(SQNEG, aa64_sve2, gen_gvec_ool_arg_zpz, sqneg_fns[a->esz], a, 0)
6138 DO_ZPZZ(SQSHL, aa64_sve2, sve2_sqshl)
6139 DO_ZPZZ(SQRSHL, aa64_sve2, sve2_sqrshl)
6140 DO_ZPZZ(SRSHL, aa64_sve2, sve2_srshl)
6142 DO_ZPZZ(UQSHL, aa64_sve2, sve2_uqshl)
6143 DO_ZPZZ(UQRSHL, aa64_sve2, sve2_uqrshl)
6144 DO_ZPZZ(URSHL, aa64_sve2, sve2_urshl)
6146 DO_ZPZZ(SHADD, aa64_sve2, sve2_shadd)
6147 DO_ZPZZ(SRHADD, aa64_sve2, sve2_srhadd)
6148 DO_ZPZZ(SHSUB, aa64_sve2, sve2_shsub)
6150 DO_ZPZZ(UHADD, aa64_sve2, sve2_uhadd)
6151 DO_ZPZZ(URHADD, aa64_sve2, sve2_urhadd)
6152 DO_ZPZZ(UHSUB, aa64_sve2, sve2_uhsub)
6154 DO_ZPZZ(ADDP, aa64_sve2, sve2_addp)
6155 DO_ZPZZ(SMAXP, aa64_sve2, sve2_smaxp)
6156 DO_ZPZZ(UMAXP, aa64_sve2, sve2_umaxp)
6157 DO_ZPZZ(SMINP, aa64_sve2, sve2_sminp)
6158 DO_ZPZZ(UMINP, aa64_sve2, sve2_uminp)
6160 DO_ZPZZ(SQADD_zpzz, aa64_sve2, sve2_sqadd)
6161 DO_ZPZZ(UQADD_zpzz, aa64_sve2, sve2_uqadd)
6162 DO_ZPZZ(SQSUB_zpzz, aa64_sve2, sve2_sqsub)
6163 DO_ZPZZ(UQSUB_zpzz, aa64_sve2, sve2_uqsub)
6164 DO_ZPZZ(SUQADD, aa64_sve2, sve2_suqadd)
6165 DO_ZPZZ(USQADD, aa64_sve2, sve2_usqadd)
6168 * SVE2 Widening Integer Arithmetic
6171 static gen_helper_gvec_3 * const saddl_fns[4] = {
6172 NULL, gen_helper_sve2_saddl_h,
6173 gen_helper_sve2_saddl_s, gen_helper_sve2_saddl_d,
6175 TRANS_FEAT(SADDLB, aa64_sve2, gen_gvec_ool_arg_zzz,
6176 saddl_fns[a->esz], a, 0)
6177 TRANS_FEAT(SADDLT, aa64_sve2, gen_gvec_ool_arg_zzz,
6178 saddl_fns[a->esz], a, 3)
6179 TRANS_FEAT(SADDLBT, aa64_sve2, gen_gvec_ool_arg_zzz,
6180 saddl_fns[a->esz], a, 2)
6182 static gen_helper_gvec_3 * const ssubl_fns[4] = {
6183 NULL, gen_helper_sve2_ssubl_h,
6184 gen_helper_sve2_ssubl_s, gen_helper_sve2_ssubl_d,
6186 TRANS_FEAT(SSUBLB, aa64_sve2, gen_gvec_ool_arg_zzz,
6187 ssubl_fns[a->esz], a, 0)
6188 TRANS_FEAT(SSUBLT, aa64_sve2, gen_gvec_ool_arg_zzz,
6189 ssubl_fns[a->esz], a, 3)
6190 TRANS_FEAT(SSUBLBT, aa64_sve2, gen_gvec_ool_arg_zzz,
6191 ssubl_fns[a->esz], a, 2)
6192 TRANS_FEAT(SSUBLTB, aa64_sve2, gen_gvec_ool_arg_zzz,
6193 ssubl_fns[a->esz], a, 1)
6195 static gen_helper_gvec_3 * const sabdl_fns[4] = {
6196 NULL, gen_helper_sve2_sabdl_h,
6197 gen_helper_sve2_sabdl_s, gen_helper_sve2_sabdl_d,
6199 TRANS_FEAT(SABDLB, aa64_sve2, gen_gvec_ool_arg_zzz,
6200 sabdl_fns[a->esz], a, 0)
6201 TRANS_FEAT(SABDLT, aa64_sve2, gen_gvec_ool_arg_zzz,
6202 sabdl_fns[a->esz], a, 3)
6204 static gen_helper_gvec_3 * const uaddl_fns[4] = {
6205 NULL, gen_helper_sve2_uaddl_h,
6206 gen_helper_sve2_uaddl_s, gen_helper_sve2_uaddl_d,
6208 TRANS_FEAT(UADDLB, aa64_sve2, gen_gvec_ool_arg_zzz,
6209 uaddl_fns[a->esz], a, 0)
6210 TRANS_FEAT(UADDLT, aa64_sve2, gen_gvec_ool_arg_zzz,
6211 uaddl_fns[a->esz], a, 3)
6213 static gen_helper_gvec_3 * const usubl_fns[4] = {
6214 NULL, gen_helper_sve2_usubl_h,
6215 gen_helper_sve2_usubl_s, gen_helper_sve2_usubl_d,
6217 TRANS_FEAT(USUBLB, aa64_sve2, gen_gvec_ool_arg_zzz,
6218 usubl_fns[a->esz], a, 0)
6219 TRANS_FEAT(USUBLT, aa64_sve2, gen_gvec_ool_arg_zzz,
6220 usubl_fns[a->esz], a, 3)
6222 static gen_helper_gvec_3 * const uabdl_fns[4] = {
6223 NULL, gen_helper_sve2_uabdl_h,
6224 gen_helper_sve2_uabdl_s, gen_helper_sve2_uabdl_d,
6226 TRANS_FEAT(UABDLB, aa64_sve2, gen_gvec_ool_arg_zzz,
6227 uabdl_fns[a->esz], a, 0)
6228 TRANS_FEAT(UABDLT, aa64_sve2, gen_gvec_ool_arg_zzz,
6229 uabdl_fns[a->esz], a, 3)
6231 static gen_helper_gvec_3 * const sqdmull_fns[4] = {
6232 NULL, gen_helper_sve2_sqdmull_zzz_h,
6233 gen_helper_sve2_sqdmull_zzz_s, gen_helper_sve2_sqdmull_zzz_d,
6235 TRANS_FEAT(SQDMULLB_zzz, aa64_sve2, gen_gvec_ool_arg_zzz,
6236 sqdmull_fns[a->esz], a, 0)
6237 TRANS_FEAT(SQDMULLT_zzz, aa64_sve2, gen_gvec_ool_arg_zzz,
6238 sqdmull_fns[a->esz], a, 3)
6240 static gen_helper_gvec_3 * const smull_fns[4] = {
6241 NULL, gen_helper_sve2_smull_zzz_h,
6242 gen_helper_sve2_smull_zzz_s, gen_helper_sve2_smull_zzz_d,
6244 TRANS_FEAT(SMULLB_zzz, aa64_sve2, gen_gvec_ool_arg_zzz,
6245 smull_fns[a->esz], a, 0)
6246 TRANS_FEAT(SMULLT_zzz, aa64_sve2, gen_gvec_ool_arg_zzz,
6247 smull_fns[a->esz], a, 3)
6249 static gen_helper_gvec_3 * const umull_fns[4] = {
6250 NULL, gen_helper_sve2_umull_zzz_h,
6251 gen_helper_sve2_umull_zzz_s, gen_helper_sve2_umull_zzz_d,
6253 TRANS_FEAT(UMULLB_zzz, aa64_sve2, gen_gvec_ool_arg_zzz,
6254 umull_fns[a->esz], a, 0)
6255 TRANS_FEAT(UMULLT_zzz, aa64_sve2, gen_gvec_ool_arg_zzz,
6256 umull_fns[a->esz], a, 3)
6258 static gen_helper_gvec_3 * const eoril_fns[4] = {
6259 gen_helper_sve2_eoril_b, gen_helper_sve2_eoril_h,
6260 gen_helper_sve2_eoril_s, gen_helper_sve2_eoril_d,
6262 TRANS_FEAT(EORBT, aa64_sve2, gen_gvec_ool_arg_zzz, eoril_fns[a->esz], a, 2)
6263 TRANS_FEAT(EORTB, aa64_sve2, gen_gvec_ool_arg_zzz, eoril_fns[a->esz], a, 1)
6265 static bool do_trans_pmull(DisasContext *s, arg_rrr_esz *a, bool sel)
6267 static gen_helper_gvec_3 * const fns[4] = {
6268 gen_helper_gvec_pmull_q, gen_helper_sve2_pmull_h,
6269 NULL, gen_helper_sve2_pmull_d,
6271 if (a->esz == 0 && !dc_isar_feature(aa64_sve2_pmull128, s)) {
6272 return false;
6274 return gen_gvec_ool_arg_zzz(s, fns[a->esz], a, sel);
6277 TRANS_FEAT(PMULLB, aa64_sve2, do_trans_pmull, a, false)
6278 TRANS_FEAT(PMULLT, aa64_sve2, do_trans_pmull, a, true)
6280 static gen_helper_gvec_3 * const saddw_fns[4] = {
6281 NULL, gen_helper_sve2_saddw_h,
6282 gen_helper_sve2_saddw_s, gen_helper_sve2_saddw_d,
6284 TRANS_FEAT(SADDWB, aa64_sve2, gen_gvec_ool_arg_zzz, saddw_fns[a->esz], a, 0)
6285 TRANS_FEAT(SADDWT, aa64_sve2, gen_gvec_ool_arg_zzz, saddw_fns[a->esz], a, 1)
6287 static gen_helper_gvec_3 * const ssubw_fns[4] = {
6288 NULL, gen_helper_sve2_ssubw_h,
6289 gen_helper_sve2_ssubw_s, gen_helper_sve2_ssubw_d,
6291 TRANS_FEAT(SSUBWB, aa64_sve2, gen_gvec_ool_arg_zzz, ssubw_fns[a->esz], a, 0)
6292 TRANS_FEAT(SSUBWT, aa64_sve2, gen_gvec_ool_arg_zzz, ssubw_fns[a->esz], a, 1)
6294 static gen_helper_gvec_3 * const uaddw_fns[4] = {
6295 NULL, gen_helper_sve2_uaddw_h,
6296 gen_helper_sve2_uaddw_s, gen_helper_sve2_uaddw_d,
6298 TRANS_FEAT(UADDWB, aa64_sve2, gen_gvec_ool_arg_zzz, uaddw_fns[a->esz], a, 0)
6299 TRANS_FEAT(UADDWT, aa64_sve2, gen_gvec_ool_arg_zzz, uaddw_fns[a->esz], a, 1)
6301 static gen_helper_gvec_3 * const usubw_fns[4] = {
6302 NULL, gen_helper_sve2_usubw_h,
6303 gen_helper_sve2_usubw_s, gen_helper_sve2_usubw_d,
6305 TRANS_FEAT(USUBWB, aa64_sve2, gen_gvec_ool_arg_zzz, usubw_fns[a->esz], a, 0)
6306 TRANS_FEAT(USUBWT, aa64_sve2, gen_gvec_ool_arg_zzz, usubw_fns[a->esz], a, 1)
6308 static void gen_sshll_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t imm)
6310 int top = imm & 1;
6311 int shl = imm >> 1;
6312 int halfbits = 4 << vece;
6314 if (top) {
6315 if (shl == halfbits) {
6316 TCGv_vec t = tcg_temp_new_vec_matching(d);
6317 tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(halfbits, halfbits));
6318 tcg_gen_and_vec(vece, d, n, t);
6319 tcg_temp_free_vec(t);
6320 } else {
6321 tcg_gen_sari_vec(vece, d, n, halfbits);
6322 tcg_gen_shli_vec(vece, d, d, shl);
6324 } else {
6325 tcg_gen_shli_vec(vece, d, n, halfbits);
6326 tcg_gen_sari_vec(vece, d, d, halfbits - shl);
6330 static void gen_ushll_i64(unsigned vece, TCGv_i64 d, TCGv_i64 n, int imm)
6332 int halfbits = 4 << vece;
6333 int top = imm & 1;
6334 int shl = (imm >> 1);
6335 int shift;
6336 uint64_t mask;
6338 mask = MAKE_64BIT_MASK(0, halfbits);
6339 mask <<= shl;
6340 mask = dup_const(vece, mask);
6342 shift = shl - top * halfbits;
6343 if (shift < 0) {
6344 tcg_gen_shri_i64(d, n, -shift);
6345 } else {
6346 tcg_gen_shli_i64(d, n, shift);
6348 tcg_gen_andi_i64(d, d, mask);
6351 static void gen_ushll16_i64(TCGv_i64 d, TCGv_i64 n, int64_t imm)
6353 gen_ushll_i64(MO_16, d, n, imm);
6356 static void gen_ushll32_i64(TCGv_i64 d, TCGv_i64 n, int64_t imm)
6358 gen_ushll_i64(MO_32, d, n, imm);
6361 static void gen_ushll64_i64(TCGv_i64 d, TCGv_i64 n, int64_t imm)
6363 gen_ushll_i64(MO_64, d, n, imm);
6366 static void gen_ushll_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t imm)
6368 int halfbits = 4 << vece;
6369 int top = imm & 1;
6370 int shl = imm >> 1;
6372 if (top) {
6373 if (shl == halfbits) {
6374 TCGv_vec t = tcg_temp_new_vec_matching(d);
6375 tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(halfbits, halfbits));
6376 tcg_gen_and_vec(vece, d, n, t);
6377 tcg_temp_free_vec(t);
6378 } else {
6379 tcg_gen_shri_vec(vece, d, n, halfbits);
6380 tcg_gen_shli_vec(vece, d, d, shl);
6382 } else {
6383 if (shl == 0) {
6384 TCGv_vec t = tcg_temp_new_vec_matching(d);
6385 tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
6386 tcg_gen_and_vec(vece, d, n, t);
6387 tcg_temp_free_vec(t);
6388 } else {
6389 tcg_gen_shli_vec(vece, d, n, halfbits);
6390 tcg_gen_shri_vec(vece, d, d, halfbits - shl);
6395 static bool do_sve2_shll_tb(DisasContext *s, arg_rri_esz *a,
6396 bool sel, bool uns)
6398 static const TCGOpcode sshll_list[] = {
6399 INDEX_op_shli_vec, INDEX_op_sari_vec, 0
6401 static const TCGOpcode ushll_list[] = {
6402 INDEX_op_shli_vec, INDEX_op_shri_vec, 0
6404 static const GVecGen2i ops[2][3] = {
6405 { { .fniv = gen_sshll_vec,
6406 .opt_opc = sshll_list,
6407 .fno = gen_helper_sve2_sshll_h,
6408 .vece = MO_16 },
6409 { .fniv = gen_sshll_vec,
6410 .opt_opc = sshll_list,
6411 .fno = gen_helper_sve2_sshll_s,
6412 .vece = MO_32 },
6413 { .fniv = gen_sshll_vec,
6414 .opt_opc = sshll_list,
6415 .fno = gen_helper_sve2_sshll_d,
6416 .vece = MO_64 } },
6417 { { .fni8 = gen_ushll16_i64,
6418 .fniv = gen_ushll_vec,
6419 .opt_opc = ushll_list,
6420 .fno = gen_helper_sve2_ushll_h,
6421 .vece = MO_16 },
6422 { .fni8 = gen_ushll32_i64,
6423 .fniv = gen_ushll_vec,
6424 .opt_opc = ushll_list,
6425 .fno = gen_helper_sve2_ushll_s,
6426 .vece = MO_32 },
6427 { .fni8 = gen_ushll64_i64,
6428 .fniv = gen_ushll_vec,
6429 .opt_opc = ushll_list,
6430 .fno = gen_helper_sve2_ushll_d,
6431 .vece = MO_64 } },
6434 if (a->esz < 0 || a->esz > 2 || !dc_isar_feature(aa64_sve2, s)) {
6435 return false;
6437 if (sve_access_check(s)) {
6438 unsigned vsz = vec_full_reg_size(s);
6439 tcg_gen_gvec_2i(vec_full_reg_offset(s, a->rd),
6440 vec_full_reg_offset(s, a->rn),
6441 vsz, vsz, (a->imm << 1) | sel,
6442 &ops[uns][a->esz]);
6444 return true;
6447 static bool trans_SSHLLB(DisasContext *s, arg_rri_esz *a)
6449 return do_sve2_shll_tb(s, a, false, false);
6452 static bool trans_SSHLLT(DisasContext *s, arg_rri_esz *a)
6454 return do_sve2_shll_tb(s, a, true, false);
6457 static bool trans_USHLLB(DisasContext *s, arg_rri_esz *a)
6459 return do_sve2_shll_tb(s, a, false, true);
6462 static bool trans_USHLLT(DisasContext *s, arg_rri_esz *a)
6464 return do_sve2_shll_tb(s, a, true, true);
6467 static gen_helper_gvec_3 * const bext_fns[4] = {
6468 gen_helper_sve2_bext_b, gen_helper_sve2_bext_h,
6469 gen_helper_sve2_bext_s, gen_helper_sve2_bext_d,
6471 TRANS_FEAT(BEXT, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz,
6472 bext_fns[a->esz], a, 0)
6474 static gen_helper_gvec_3 * const bdep_fns[4] = {
6475 gen_helper_sve2_bdep_b, gen_helper_sve2_bdep_h,
6476 gen_helper_sve2_bdep_s, gen_helper_sve2_bdep_d,
6478 TRANS_FEAT(BDEP, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz,
6479 bdep_fns[a->esz], a, 0)
6481 static gen_helper_gvec_3 * const bgrp_fns[4] = {
6482 gen_helper_sve2_bgrp_b, gen_helper_sve2_bgrp_h,
6483 gen_helper_sve2_bgrp_s, gen_helper_sve2_bgrp_d,
6485 TRANS_FEAT(BGRP, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz,
6486 bgrp_fns[a->esz], a, 0)
6488 static gen_helper_gvec_3 * const cadd_fns[4] = {
6489 gen_helper_sve2_cadd_b, gen_helper_sve2_cadd_h,
6490 gen_helper_sve2_cadd_s, gen_helper_sve2_cadd_d,
6492 TRANS_FEAT(CADD_rot90, aa64_sve2, gen_gvec_ool_arg_zzz,
6493 cadd_fns[a->esz], a, 0)
6494 TRANS_FEAT(CADD_rot270, aa64_sve2, gen_gvec_ool_arg_zzz,
6495 cadd_fns[a->esz], a, 1)
6497 static gen_helper_gvec_3 * const sqcadd_fns[4] = {
6498 gen_helper_sve2_sqcadd_b, gen_helper_sve2_sqcadd_h,
6499 gen_helper_sve2_sqcadd_s, gen_helper_sve2_sqcadd_d,
6501 TRANS_FEAT(SQCADD_rot90, aa64_sve2, gen_gvec_ool_arg_zzz,
6502 sqcadd_fns[a->esz], a, 0)
6503 TRANS_FEAT(SQCADD_rot270, aa64_sve2, gen_gvec_ool_arg_zzz,
6504 sqcadd_fns[a->esz], a, 1)
6506 static gen_helper_gvec_4 * const sabal_fns[4] = {
6507 NULL, gen_helper_sve2_sabal_h,
6508 gen_helper_sve2_sabal_s, gen_helper_sve2_sabal_d,
6510 TRANS_FEAT(SABALB, aa64_sve2, gen_gvec_ool_arg_zzzz, sabal_fns[a->esz], a, 0)
6511 TRANS_FEAT(SABALT, aa64_sve2, gen_gvec_ool_arg_zzzz, sabal_fns[a->esz], a, 1)
6513 static gen_helper_gvec_4 * const uabal_fns[4] = {
6514 NULL, gen_helper_sve2_uabal_h,
6515 gen_helper_sve2_uabal_s, gen_helper_sve2_uabal_d,
6517 TRANS_FEAT(UABALB, aa64_sve2, gen_gvec_ool_arg_zzzz, uabal_fns[a->esz], a, 0)
6518 TRANS_FEAT(UABALT, aa64_sve2, gen_gvec_ool_arg_zzzz, uabal_fns[a->esz], a, 1)
6520 static bool do_adcl(DisasContext *s, arg_rrrr_esz *a, bool sel)
6522 static gen_helper_gvec_4 * const fns[2] = {
6523 gen_helper_sve2_adcl_s,
6524 gen_helper_sve2_adcl_d,
6527 * Note that in this case the ESZ field encodes both size and sign.
6528 * Split out 'subtract' into bit 1 of the data field for the helper.
6530 return gen_gvec_ool_arg_zzzz(s, fns[a->esz & 1], a, (a->esz & 2) | sel);
6533 TRANS_FEAT(ADCLB, aa64_sve2, do_adcl, a, false)
6534 TRANS_FEAT(ADCLT, aa64_sve2, do_adcl, a, true)
6536 TRANS_FEAT(SSRA, aa64_sve2, gen_gvec_fn_arg_zzi, gen_gvec_ssra, a)
6537 TRANS_FEAT(USRA, aa64_sve2, gen_gvec_fn_arg_zzi, gen_gvec_usra, a)
6538 TRANS_FEAT(SRSRA, aa64_sve2, gen_gvec_fn_arg_zzi, gen_gvec_srsra, a)
6539 TRANS_FEAT(URSRA, aa64_sve2, gen_gvec_fn_arg_zzi, gen_gvec_ursra, a)
6540 TRANS_FEAT(SRI, aa64_sve2, gen_gvec_fn_arg_zzi, gen_gvec_sri, a)
6541 TRANS_FEAT(SLI, aa64_sve2, gen_gvec_fn_arg_zzi, gen_gvec_sli, a)
6543 TRANS_FEAT(SABA, aa64_sve2, gen_gvec_fn_arg_zzz, gen_gvec_saba, a)
6544 TRANS_FEAT(UABA, aa64_sve2, gen_gvec_fn_arg_zzz, gen_gvec_uaba, a)
6546 static bool do_sve2_narrow_extract(DisasContext *s, arg_rri_esz *a,
6547 const GVecGen2 ops[3])
6549 if (a->esz < 0 || a->esz > MO_32 || a->imm != 0 ||
6550 !dc_isar_feature(aa64_sve2, s)) {
6551 return false;
6553 if (sve_access_check(s)) {
6554 unsigned vsz = vec_full_reg_size(s);
6555 tcg_gen_gvec_2(vec_full_reg_offset(s, a->rd),
6556 vec_full_reg_offset(s, a->rn),
6557 vsz, vsz, &ops[a->esz]);
6559 return true;
6562 static const TCGOpcode sqxtn_list[] = {
6563 INDEX_op_shli_vec, INDEX_op_smin_vec, INDEX_op_smax_vec, 0
6566 static void gen_sqxtnb_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
6568 TCGv_vec t = tcg_temp_new_vec_matching(d);
6569 int halfbits = 4 << vece;
6570 int64_t mask = (1ull << halfbits) - 1;
6571 int64_t min = -1ull << (halfbits - 1);
6572 int64_t max = -min - 1;
6574 tcg_gen_dupi_vec(vece, t, min);
6575 tcg_gen_smax_vec(vece, d, n, t);
6576 tcg_gen_dupi_vec(vece, t, max);
6577 tcg_gen_smin_vec(vece, d, d, t);
6578 tcg_gen_dupi_vec(vece, t, mask);
6579 tcg_gen_and_vec(vece, d, d, t);
6580 tcg_temp_free_vec(t);
6583 static bool trans_SQXTNB(DisasContext *s, arg_rri_esz *a)
6585 static const GVecGen2 ops[3] = {
6586 { .fniv = gen_sqxtnb_vec,
6587 .opt_opc = sqxtn_list,
6588 .fno = gen_helper_sve2_sqxtnb_h,
6589 .vece = MO_16 },
6590 { .fniv = gen_sqxtnb_vec,
6591 .opt_opc = sqxtn_list,
6592 .fno = gen_helper_sve2_sqxtnb_s,
6593 .vece = MO_32 },
6594 { .fniv = gen_sqxtnb_vec,
6595 .opt_opc = sqxtn_list,
6596 .fno = gen_helper_sve2_sqxtnb_d,
6597 .vece = MO_64 },
6599 return do_sve2_narrow_extract(s, a, ops);
6602 static void gen_sqxtnt_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
6604 TCGv_vec t = tcg_temp_new_vec_matching(d);
6605 int halfbits = 4 << vece;
6606 int64_t mask = (1ull << halfbits) - 1;
6607 int64_t min = -1ull << (halfbits - 1);
6608 int64_t max = -min - 1;
6610 tcg_gen_dupi_vec(vece, t, min);
6611 tcg_gen_smax_vec(vece, n, n, t);
6612 tcg_gen_dupi_vec(vece, t, max);
6613 tcg_gen_smin_vec(vece, n, n, t);
6614 tcg_gen_shli_vec(vece, n, n, halfbits);
6615 tcg_gen_dupi_vec(vece, t, mask);
6616 tcg_gen_bitsel_vec(vece, d, t, d, n);
6617 tcg_temp_free_vec(t);
6620 static bool trans_SQXTNT(DisasContext *s, arg_rri_esz *a)
6622 static const GVecGen2 ops[3] = {
6623 { .fniv = gen_sqxtnt_vec,
6624 .opt_opc = sqxtn_list,
6625 .load_dest = true,
6626 .fno = gen_helper_sve2_sqxtnt_h,
6627 .vece = MO_16 },
6628 { .fniv = gen_sqxtnt_vec,
6629 .opt_opc = sqxtn_list,
6630 .load_dest = true,
6631 .fno = gen_helper_sve2_sqxtnt_s,
6632 .vece = MO_32 },
6633 { .fniv = gen_sqxtnt_vec,
6634 .opt_opc = sqxtn_list,
6635 .load_dest = true,
6636 .fno = gen_helper_sve2_sqxtnt_d,
6637 .vece = MO_64 },
6639 return do_sve2_narrow_extract(s, a, ops);
6642 static const TCGOpcode uqxtn_list[] = {
6643 INDEX_op_shli_vec, INDEX_op_umin_vec, 0
6646 static void gen_uqxtnb_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
6648 TCGv_vec t = tcg_temp_new_vec_matching(d);
6649 int halfbits = 4 << vece;
6650 int64_t max = (1ull << halfbits) - 1;
6652 tcg_gen_dupi_vec(vece, t, max);
6653 tcg_gen_umin_vec(vece, d, n, t);
6654 tcg_temp_free_vec(t);
6657 static bool trans_UQXTNB(DisasContext *s, arg_rri_esz *a)
6659 static const GVecGen2 ops[3] = {
6660 { .fniv = gen_uqxtnb_vec,
6661 .opt_opc = uqxtn_list,
6662 .fno = gen_helper_sve2_uqxtnb_h,
6663 .vece = MO_16 },
6664 { .fniv = gen_uqxtnb_vec,
6665 .opt_opc = uqxtn_list,
6666 .fno = gen_helper_sve2_uqxtnb_s,
6667 .vece = MO_32 },
6668 { .fniv = gen_uqxtnb_vec,
6669 .opt_opc = uqxtn_list,
6670 .fno = gen_helper_sve2_uqxtnb_d,
6671 .vece = MO_64 },
6673 return do_sve2_narrow_extract(s, a, ops);
6676 static void gen_uqxtnt_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
6678 TCGv_vec t = tcg_temp_new_vec_matching(d);
6679 int halfbits = 4 << vece;
6680 int64_t max = (1ull << halfbits) - 1;
6682 tcg_gen_dupi_vec(vece, t, max);
6683 tcg_gen_umin_vec(vece, n, n, t);
6684 tcg_gen_shli_vec(vece, n, n, halfbits);
6685 tcg_gen_bitsel_vec(vece, d, t, d, n);
6686 tcg_temp_free_vec(t);
6689 static bool trans_UQXTNT(DisasContext *s, arg_rri_esz *a)
6691 static const GVecGen2 ops[3] = {
6692 { .fniv = gen_uqxtnt_vec,
6693 .opt_opc = uqxtn_list,
6694 .load_dest = true,
6695 .fno = gen_helper_sve2_uqxtnt_h,
6696 .vece = MO_16 },
6697 { .fniv = gen_uqxtnt_vec,
6698 .opt_opc = uqxtn_list,
6699 .load_dest = true,
6700 .fno = gen_helper_sve2_uqxtnt_s,
6701 .vece = MO_32 },
6702 { .fniv = gen_uqxtnt_vec,
6703 .opt_opc = uqxtn_list,
6704 .load_dest = true,
6705 .fno = gen_helper_sve2_uqxtnt_d,
6706 .vece = MO_64 },
6708 return do_sve2_narrow_extract(s, a, ops);
6711 static const TCGOpcode sqxtun_list[] = {
6712 INDEX_op_shli_vec, INDEX_op_umin_vec, INDEX_op_smax_vec, 0
6715 static void gen_sqxtunb_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
6717 TCGv_vec t = tcg_temp_new_vec_matching(d);
6718 int halfbits = 4 << vece;
6719 int64_t max = (1ull << halfbits) - 1;
6721 tcg_gen_dupi_vec(vece, t, 0);
6722 tcg_gen_smax_vec(vece, d, n, t);
6723 tcg_gen_dupi_vec(vece, t, max);
6724 tcg_gen_umin_vec(vece, d, d, t);
6725 tcg_temp_free_vec(t);
6728 static bool trans_SQXTUNB(DisasContext *s, arg_rri_esz *a)
6730 static const GVecGen2 ops[3] = {
6731 { .fniv = gen_sqxtunb_vec,
6732 .opt_opc = sqxtun_list,
6733 .fno = gen_helper_sve2_sqxtunb_h,
6734 .vece = MO_16 },
6735 { .fniv = gen_sqxtunb_vec,
6736 .opt_opc = sqxtun_list,
6737 .fno = gen_helper_sve2_sqxtunb_s,
6738 .vece = MO_32 },
6739 { .fniv = gen_sqxtunb_vec,
6740 .opt_opc = sqxtun_list,
6741 .fno = gen_helper_sve2_sqxtunb_d,
6742 .vece = MO_64 },
6744 return do_sve2_narrow_extract(s, a, ops);
6747 static void gen_sqxtunt_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
6749 TCGv_vec t = tcg_temp_new_vec_matching(d);
6750 int halfbits = 4 << vece;
6751 int64_t max = (1ull << halfbits) - 1;
6753 tcg_gen_dupi_vec(vece, t, 0);
6754 tcg_gen_smax_vec(vece, n, n, t);
6755 tcg_gen_dupi_vec(vece, t, max);
6756 tcg_gen_umin_vec(vece, n, n, t);
6757 tcg_gen_shli_vec(vece, n, n, halfbits);
6758 tcg_gen_bitsel_vec(vece, d, t, d, n);
6759 tcg_temp_free_vec(t);
6762 static bool trans_SQXTUNT(DisasContext *s, arg_rri_esz *a)
6764 static const GVecGen2 ops[3] = {
6765 { .fniv = gen_sqxtunt_vec,
6766 .opt_opc = sqxtun_list,
6767 .load_dest = true,
6768 .fno = gen_helper_sve2_sqxtunt_h,
6769 .vece = MO_16 },
6770 { .fniv = gen_sqxtunt_vec,
6771 .opt_opc = sqxtun_list,
6772 .load_dest = true,
6773 .fno = gen_helper_sve2_sqxtunt_s,
6774 .vece = MO_32 },
6775 { .fniv = gen_sqxtunt_vec,
6776 .opt_opc = sqxtun_list,
6777 .load_dest = true,
6778 .fno = gen_helper_sve2_sqxtunt_d,
6779 .vece = MO_64 },
6781 return do_sve2_narrow_extract(s, a, ops);
6784 static bool do_sve2_shr_narrow(DisasContext *s, arg_rri_esz *a,
6785 const GVecGen2i ops[3])
6787 if (a->esz < 0 || a->esz > MO_32 || !dc_isar_feature(aa64_sve2, s)) {
6788 return false;
6790 assert(a->imm > 0 && a->imm <= (8 << a->esz));
6791 if (sve_access_check(s)) {
6792 unsigned vsz = vec_full_reg_size(s);
6793 tcg_gen_gvec_2i(vec_full_reg_offset(s, a->rd),
6794 vec_full_reg_offset(s, a->rn),
6795 vsz, vsz, a->imm, &ops[a->esz]);
6797 return true;
6800 static void gen_shrnb_i64(unsigned vece, TCGv_i64 d, TCGv_i64 n, int shr)
6802 int halfbits = 4 << vece;
6803 uint64_t mask = dup_const(vece, MAKE_64BIT_MASK(0, halfbits));
6805 tcg_gen_shri_i64(d, n, shr);
6806 tcg_gen_andi_i64(d, d, mask);
6809 static void gen_shrnb16_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr)
6811 gen_shrnb_i64(MO_16, d, n, shr);
6814 static void gen_shrnb32_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr)
6816 gen_shrnb_i64(MO_32, d, n, shr);
6819 static void gen_shrnb64_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr)
6821 gen_shrnb_i64(MO_64, d, n, shr);
6824 static void gen_shrnb_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t shr)
6826 TCGv_vec t = tcg_temp_new_vec_matching(d);
6827 int halfbits = 4 << vece;
6828 uint64_t mask = MAKE_64BIT_MASK(0, halfbits);
6830 tcg_gen_shri_vec(vece, n, n, shr);
6831 tcg_gen_dupi_vec(vece, t, mask);
6832 tcg_gen_and_vec(vece, d, n, t);
6833 tcg_temp_free_vec(t);
6836 static bool trans_SHRNB(DisasContext *s, arg_rri_esz *a)
6838 static const TCGOpcode vec_list[] = { INDEX_op_shri_vec, 0 };
6839 static const GVecGen2i ops[3] = {
6840 { .fni8 = gen_shrnb16_i64,
6841 .fniv = gen_shrnb_vec,
6842 .opt_opc = vec_list,
6843 .fno = gen_helper_sve2_shrnb_h,
6844 .vece = MO_16 },
6845 { .fni8 = gen_shrnb32_i64,
6846 .fniv = gen_shrnb_vec,
6847 .opt_opc = vec_list,
6848 .fno = gen_helper_sve2_shrnb_s,
6849 .vece = MO_32 },
6850 { .fni8 = gen_shrnb64_i64,
6851 .fniv = gen_shrnb_vec,
6852 .opt_opc = vec_list,
6853 .fno = gen_helper_sve2_shrnb_d,
6854 .vece = MO_64 },
6856 return do_sve2_shr_narrow(s, a, ops);
6859 static void gen_shrnt_i64(unsigned vece, TCGv_i64 d, TCGv_i64 n, int shr)
6861 int halfbits = 4 << vece;
6862 uint64_t mask = dup_const(vece, MAKE_64BIT_MASK(0, halfbits));
6864 tcg_gen_shli_i64(n, n, halfbits - shr);
6865 tcg_gen_andi_i64(n, n, ~mask);
6866 tcg_gen_andi_i64(d, d, mask);
6867 tcg_gen_or_i64(d, d, n);
6870 static void gen_shrnt16_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr)
6872 gen_shrnt_i64(MO_16, d, n, shr);
6875 static void gen_shrnt32_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr)
6877 gen_shrnt_i64(MO_32, d, n, shr);
6880 static void gen_shrnt64_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr)
6882 tcg_gen_shri_i64(n, n, shr);
6883 tcg_gen_deposit_i64(d, d, n, 32, 32);
6886 static void gen_shrnt_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t shr)
6888 TCGv_vec t = tcg_temp_new_vec_matching(d);
6889 int halfbits = 4 << vece;
6890 uint64_t mask = MAKE_64BIT_MASK(0, halfbits);
6892 tcg_gen_shli_vec(vece, n, n, halfbits - shr);
6893 tcg_gen_dupi_vec(vece, t, mask);
6894 tcg_gen_bitsel_vec(vece, d, t, d, n);
6895 tcg_temp_free_vec(t);
6898 static bool trans_SHRNT(DisasContext *s, arg_rri_esz *a)
6900 static const TCGOpcode vec_list[] = { INDEX_op_shli_vec, 0 };
6901 static const GVecGen2i ops[3] = {
6902 { .fni8 = gen_shrnt16_i64,
6903 .fniv = gen_shrnt_vec,
6904 .opt_opc = vec_list,
6905 .load_dest = true,
6906 .fno = gen_helper_sve2_shrnt_h,
6907 .vece = MO_16 },
6908 { .fni8 = gen_shrnt32_i64,
6909 .fniv = gen_shrnt_vec,
6910 .opt_opc = vec_list,
6911 .load_dest = true,
6912 .fno = gen_helper_sve2_shrnt_s,
6913 .vece = MO_32 },
6914 { .fni8 = gen_shrnt64_i64,
6915 .fniv = gen_shrnt_vec,
6916 .opt_opc = vec_list,
6917 .load_dest = true,
6918 .fno = gen_helper_sve2_shrnt_d,
6919 .vece = MO_64 },
6921 return do_sve2_shr_narrow(s, a, ops);
6924 static bool trans_RSHRNB(DisasContext *s, arg_rri_esz *a)
6926 static const GVecGen2i ops[3] = {
6927 { .fno = gen_helper_sve2_rshrnb_h },
6928 { .fno = gen_helper_sve2_rshrnb_s },
6929 { .fno = gen_helper_sve2_rshrnb_d },
6931 return do_sve2_shr_narrow(s, a, ops);
6934 static bool trans_RSHRNT(DisasContext *s, arg_rri_esz *a)
6936 static const GVecGen2i ops[3] = {
6937 { .fno = gen_helper_sve2_rshrnt_h },
6938 { .fno = gen_helper_sve2_rshrnt_s },
6939 { .fno = gen_helper_sve2_rshrnt_d },
6941 return do_sve2_shr_narrow(s, a, ops);
6944 static void gen_sqshrunb_vec(unsigned vece, TCGv_vec d,
6945 TCGv_vec n, int64_t shr)
6947 TCGv_vec t = tcg_temp_new_vec_matching(d);
6948 int halfbits = 4 << vece;
6950 tcg_gen_sari_vec(vece, n, n, shr);
6951 tcg_gen_dupi_vec(vece, t, 0);
6952 tcg_gen_smax_vec(vece, n, n, t);
6953 tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
6954 tcg_gen_umin_vec(vece, d, n, t);
6955 tcg_temp_free_vec(t);
6958 static bool trans_SQSHRUNB(DisasContext *s, arg_rri_esz *a)
6960 static const TCGOpcode vec_list[] = {
6961 INDEX_op_sari_vec, INDEX_op_smax_vec, INDEX_op_umin_vec, 0
6963 static const GVecGen2i ops[3] = {
6964 { .fniv = gen_sqshrunb_vec,
6965 .opt_opc = vec_list,
6966 .fno = gen_helper_sve2_sqshrunb_h,
6967 .vece = MO_16 },
6968 { .fniv = gen_sqshrunb_vec,
6969 .opt_opc = vec_list,
6970 .fno = gen_helper_sve2_sqshrunb_s,
6971 .vece = MO_32 },
6972 { .fniv = gen_sqshrunb_vec,
6973 .opt_opc = vec_list,
6974 .fno = gen_helper_sve2_sqshrunb_d,
6975 .vece = MO_64 },
6977 return do_sve2_shr_narrow(s, a, ops);
6980 static void gen_sqshrunt_vec(unsigned vece, TCGv_vec d,
6981 TCGv_vec n, int64_t shr)
6983 TCGv_vec t = tcg_temp_new_vec_matching(d);
6984 int halfbits = 4 << vece;
6986 tcg_gen_sari_vec(vece, n, n, shr);
6987 tcg_gen_dupi_vec(vece, t, 0);
6988 tcg_gen_smax_vec(vece, n, n, t);
6989 tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
6990 tcg_gen_umin_vec(vece, n, n, t);
6991 tcg_gen_shli_vec(vece, n, n, halfbits);
6992 tcg_gen_bitsel_vec(vece, d, t, d, n);
6993 tcg_temp_free_vec(t);
6996 static bool trans_SQSHRUNT(DisasContext *s, arg_rri_esz *a)
6998 static const TCGOpcode vec_list[] = {
6999 INDEX_op_shli_vec, INDEX_op_sari_vec,
7000 INDEX_op_smax_vec, INDEX_op_umin_vec, 0
7002 static const GVecGen2i ops[3] = {
7003 { .fniv = gen_sqshrunt_vec,
7004 .opt_opc = vec_list,
7005 .load_dest = true,
7006 .fno = gen_helper_sve2_sqshrunt_h,
7007 .vece = MO_16 },
7008 { .fniv = gen_sqshrunt_vec,
7009 .opt_opc = vec_list,
7010 .load_dest = true,
7011 .fno = gen_helper_sve2_sqshrunt_s,
7012 .vece = MO_32 },
7013 { .fniv = gen_sqshrunt_vec,
7014 .opt_opc = vec_list,
7015 .load_dest = true,
7016 .fno = gen_helper_sve2_sqshrunt_d,
7017 .vece = MO_64 },
7019 return do_sve2_shr_narrow(s, a, ops);
7022 static bool trans_SQRSHRUNB(DisasContext *s, arg_rri_esz *a)
7024 static const GVecGen2i ops[3] = {
7025 { .fno = gen_helper_sve2_sqrshrunb_h },
7026 { .fno = gen_helper_sve2_sqrshrunb_s },
7027 { .fno = gen_helper_sve2_sqrshrunb_d },
7029 return do_sve2_shr_narrow(s, a, ops);
7032 static bool trans_SQRSHRUNT(DisasContext *s, arg_rri_esz *a)
7034 static const GVecGen2i ops[3] = {
7035 { .fno = gen_helper_sve2_sqrshrunt_h },
7036 { .fno = gen_helper_sve2_sqrshrunt_s },
7037 { .fno = gen_helper_sve2_sqrshrunt_d },
7039 return do_sve2_shr_narrow(s, a, ops);
7042 static void gen_sqshrnb_vec(unsigned vece, TCGv_vec d,
7043 TCGv_vec n, int64_t shr)
7045 TCGv_vec t = tcg_temp_new_vec_matching(d);
7046 int halfbits = 4 << vece;
7047 int64_t max = MAKE_64BIT_MASK(0, halfbits - 1);
7048 int64_t min = -max - 1;
7050 tcg_gen_sari_vec(vece, n, n, shr);
7051 tcg_gen_dupi_vec(vece, t, min);
7052 tcg_gen_smax_vec(vece, n, n, t);
7053 tcg_gen_dupi_vec(vece, t, max);
7054 tcg_gen_smin_vec(vece, n, n, t);
7055 tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
7056 tcg_gen_and_vec(vece, d, n, t);
7057 tcg_temp_free_vec(t);
7060 static bool trans_SQSHRNB(DisasContext *s, arg_rri_esz *a)
7062 static const TCGOpcode vec_list[] = {
7063 INDEX_op_sari_vec, INDEX_op_smax_vec, INDEX_op_smin_vec, 0
7065 static const GVecGen2i ops[3] = {
7066 { .fniv = gen_sqshrnb_vec,
7067 .opt_opc = vec_list,
7068 .fno = gen_helper_sve2_sqshrnb_h,
7069 .vece = MO_16 },
7070 { .fniv = gen_sqshrnb_vec,
7071 .opt_opc = vec_list,
7072 .fno = gen_helper_sve2_sqshrnb_s,
7073 .vece = MO_32 },
7074 { .fniv = gen_sqshrnb_vec,
7075 .opt_opc = vec_list,
7076 .fno = gen_helper_sve2_sqshrnb_d,
7077 .vece = MO_64 },
7079 return do_sve2_shr_narrow(s, a, ops);
7082 static void gen_sqshrnt_vec(unsigned vece, TCGv_vec d,
7083 TCGv_vec n, int64_t shr)
7085 TCGv_vec t = tcg_temp_new_vec_matching(d);
7086 int halfbits = 4 << vece;
7087 int64_t max = MAKE_64BIT_MASK(0, halfbits - 1);
7088 int64_t min = -max - 1;
7090 tcg_gen_sari_vec(vece, n, n, shr);
7091 tcg_gen_dupi_vec(vece, t, min);
7092 tcg_gen_smax_vec(vece, n, n, t);
7093 tcg_gen_dupi_vec(vece, t, max);
7094 tcg_gen_smin_vec(vece, n, n, t);
7095 tcg_gen_shli_vec(vece, n, n, halfbits);
7096 tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
7097 tcg_gen_bitsel_vec(vece, d, t, d, n);
7098 tcg_temp_free_vec(t);
7101 static bool trans_SQSHRNT(DisasContext *s, arg_rri_esz *a)
7103 static const TCGOpcode vec_list[] = {
7104 INDEX_op_shli_vec, INDEX_op_sari_vec,
7105 INDEX_op_smax_vec, INDEX_op_smin_vec, 0
7107 static const GVecGen2i ops[3] = {
7108 { .fniv = gen_sqshrnt_vec,
7109 .opt_opc = vec_list,
7110 .load_dest = true,
7111 .fno = gen_helper_sve2_sqshrnt_h,
7112 .vece = MO_16 },
7113 { .fniv = gen_sqshrnt_vec,
7114 .opt_opc = vec_list,
7115 .load_dest = true,
7116 .fno = gen_helper_sve2_sqshrnt_s,
7117 .vece = MO_32 },
7118 { .fniv = gen_sqshrnt_vec,
7119 .opt_opc = vec_list,
7120 .load_dest = true,
7121 .fno = gen_helper_sve2_sqshrnt_d,
7122 .vece = MO_64 },
7124 return do_sve2_shr_narrow(s, a, ops);
7127 static bool trans_SQRSHRNB(DisasContext *s, arg_rri_esz *a)
7129 static const GVecGen2i ops[3] = {
7130 { .fno = gen_helper_sve2_sqrshrnb_h },
7131 { .fno = gen_helper_sve2_sqrshrnb_s },
7132 { .fno = gen_helper_sve2_sqrshrnb_d },
7134 return do_sve2_shr_narrow(s, a, ops);
7137 static bool trans_SQRSHRNT(DisasContext *s, arg_rri_esz *a)
7139 static const GVecGen2i ops[3] = {
7140 { .fno = gen_helper_sve2_sqrshrnt_h },
7141 { .fno = gen_helper_sve2_sqrshrnt_s },
7142 { .fno = gen_helper_sve2_sqrshrnt_d },
7144 return do_sve2_shr_narrow(s, a, ops);
7147 static void gen_uqshrnb_vec(unsigned vece, TCGv_vec d,
7148 TCGv_vec n, int64_t shr)
7150 TCGv_vec t = tcg_temp_new_vec_matching(d);
7151 int halfbits = 4 << vece;
7153 tcg_gen_shri_vec(vece, n, n, shr);
7154 tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
7155 tcg_gen_umin_vec(vece, d, n, t);
7156 tcg_temp_free_vec(t);
7159 static bool trans_UQSHRNB(DisasContext *s, arg_rri_esz *a)
7161 static const TCGOpcode vec_list[] = {
7162 INDEX_op_shri_vec, INDEX_op_umin_vec, 0
7164 static const GVecGen2i ops[3] = {
7165 { .fniv = gen_uqshrnb_vec,
7166 .opt_opc = vec_list,
7167 .fno = gen_helper_sve2_uqshrnb_h,
7168 .vece = MO_16 },
7169 { .fniv = gen_uqshrnb_vec,
7170 .opt_opc = vec_list,
7171 .fno = gen_helper_sve2_uqshrnb_s,
7172 .vece = MO_32 },
7173 { .fniv = gen_uqshrnb_vec,
7174 .opt_opc = vec_list,
7175 .fno = gen_helper_sve2_uqshrnb_d,
7176 .vece = MO_64 },
7178 return do_sve2_shr_narrow(s, a, ops);
7181 static void gen_uqshrnt_vec(unsigned vece, TCGv_vec d,
7182 TCGv_vec n, int64_t shr)
7184 TCGv_vec t = tcg_temp_new_vec_matching(d);
7185 int halfbits = 4 << vece;
7187 tcg_gen_shri_vec(vece, n, n, shr);
7188 tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
7189 tcg_gen_umin_vec(vece, n, n, t);
7190 tcg_gen_shli_vec(vece, n, n, halfbits);
7191 tcg_gen_bitsel_vec(vece, d, t, d, n);
7192 tcg_temp_free_vec(t);
7195 static bool trans_UQSHRNT(DisasContext *s, arg_rri_esz *a)
7197 static const TCGOpcode vec_list[] = {
7198 INDEX_op_shli_vec, INDEX_op_shri_vec, INDEX_op_umin_vec, 0
7200 static const GVecGen2i ops[3] = {
7201 { .fniv = gen_uqshrnt_vec,
7202 .opt_opc = vec_list,
7203 .load_dest = true,
7204 .fno = gen_helper_sve2_uqshrnt_h,
7205 .vece = MO_16 },
7206 { .fniv = gen_uqshrnt_vec,
7207 .opt_opc = vec_list,
7208 .load_dest = true,
7209 .fno = gen_helper_sve2_uqshrnt_s,
7210 .vece = MO_32 },
7211 { .fniv = gen_uqshrnt_vec,
7212 .opt_opc = vec_list,
7213 .load_dest = true,
7214 .fno = gen_helper_sve2_uqshrnt_d,
7215 .vece = MO_64 },
7217 return do_sve2_shr_narrow(s, a, ops);
7220 static bool trans_UQRSHRNB(DisasContext *s, arg_rri_esz *a)
7222 static const GVecGen2i ops[3] = {
7223 { .fno = gen_helper_sve2_uqrshrnb_h },
7224 { .fno = gen_helper_sve2_uqrshrnb_s },
7225 { .fno = gen_helper_sve2_uqrshrnb_d },
7227 return do_sve2_shr_narrow(s, a, ops);
7230 static bool trans_UQRSHRNT(DisasContext *s, arg_rri_esz *a)
7232 static const GVecGen2i ops[3] = {
7233 { .fno = gen_helper_sve2_uqrshrnt_h },
7234 { .fno = gen_helper_sve2_uqrshrnt_s },
7235 { .fno = gen_helper_sve2_uqrshrnt_d },
7237 return do_sve2_shr_narrow(s, a, ops);
7240 #define DO_SVE2_ZZZ_NARROW(NAME, name) \
7241 static gen_helper_gvec_3 * const name##_fns[4] = { \
7242 NULL, gen_helper_sve2_##name##_h, \
7243 gen_helper_sve2_##name##_s, gen_helper_sve2_##name##_d, \
7244 }; \
7245 TRANS_FEAT(NAME, aa64_sve2, gen_gvec_ool_arg_zzz, \
7246 name##_fns[a->esz], a, 0)
7248 DO_SVE2_ZZZ_NARROW(ADDHNB, addhnb)
7249 DO_SVE2_ZZZ_NARROW(ADDHNT, addhnt)
7250 DO_SVE2_ZZZ_NARROW(RADDHNB, raddhnb)
7251 DO_SVE2_ZZZ_NARROW(RADDHNT, raddhnt)
7253 DO_SVE2_ZZZ_NARROW(SUBHNB, subhnb)
7254 DO_SVE2_ZZZ_NARROW(SUBHNT, subhnt)
7255 DO_SVE2_ZZZ_NARROW(RSUBHNB, rsubhnb)
7256 DO_SVE2_ZZZ_NARROW(RSUBHNT, rsubhnt)
7258 static gen_helper_gvec_flags_4 * const match_fns[4] = {
7259 gen_helper_sve2_match_ppzz_b, gen_helper_sve2_match_ppzz_h, NULL, NULL
7261 TRANS_FEAT(MATCH, aa64_sve2, do_ppzz_flags, a, match_fns[a->esz])
7263 static gen_helper_gvec_flags_4 * const nmatch_fns[4] = {
7264 gen_helper_sve2_nmatch_ppzz_b, gen_helper_sve2_nmatch_ppzz_h, NULL, NULL
7266 TRANS_FEAT(NMATCH, aa64_sve2, do_ppzz_flags, a, nmatch_fns[a->esz])
7268 static gen_helper_gvec_4 * const histcnt_fns[4] = {
7269 NULL, NULL, gen_helper_sve2_histcnt_s, gen_helper_sve2_histcnt_d
7271 TRANS_FEAT(HISTCNT, aa64_sve2, gen_gvec_ool_arg_zpzz,
7272 histcnt_fns[a->esz], a, 0)
7274 TRANS_FEAT(HISTSEG, aa64_sve2, gen_gvec_ool_arg_zzz,
7275 a->esz == 0 ? gen_helper_sve2_histseg : NULL, a, 0)
7277 static bool do_sve2_zpzz_fp(DisasContext *s, arg_rprr_esz *a,
7278 gen_helper_gvec_4_ptr *fn)
7280 if (!dc_isar_feature(aa64_sve2, s)) {
7281 return false;
7283 return do_zpzz_fp(s, a, fn);
7286 #define DO_SVE2_ZPZZ_FP(NAME, name) \
7287 static bool trans_##NAME(DisasContext *s, arg_rprr_esz *a) \
7289 static gen_helper_gvec_4_ptr * const fns[4] = { \
7290 NULL, gen_helper_sve2_##name##_zpzz_h, \
7291 gen_helper_sve2_##name##_zpzz_s, gen_helper_sve2_##name##_zpzz_d \
7292 }; \
7293 return do_sve2_zpzz_fp(s, a, fns[a->esz]); \
7296 DO_SVE2_ZPZZ_FP(FADDP, faddp)
7297 DO_SVE2_ZPZZ_FP(FMAXNMP, fmaxnmp)
7298 DO_SVE2_ZPZZ_FP(FMINNMP, fminnmp)
7299 DO_SVE2_ZPZZ_FP(FMAXP, fmaxp)
7300 DO_SVE2_ZPZZ_FP(FMINP, fminp)
7303 * SVE Integer Multiply-Add (unpredicated)
7306 TRANS_FEAT(FMMLA_s, aa64_sve_f32mm, gen_gvec_fpst_zzzz, gen_helper_fmmla_s,
7307 a->rd, a->rn, a->rm, a->ra, 0, FPST_FPCR)
7308 TRANS_FEAT(FMMLA_d, aa64_sve_f64mm, gen_gvec_fpst_zzzz, gen_helper_fmmla_d,
7309 a->rd, a->rn, a->rm, a->ra, 0, FPST_FPCR)
7311 static gen_helper_gvec_4 * const sqdmlal_zzzw_fns[] = {
7312 NULL, gen_helper_sve2_sqdmlal_zzzw_h,
7313 gen_helper_sve2_sqdmlal_zzzw_s, gen_helper_sve2_sqdmlal_zzzw_d,
7315 TRANS_FEAT(SQDMLALB_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz,
7316 sqdmlal_zzzw_fns[a->esz], a, 0)
7317 TRANS_FEAT(SQDMLALT_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz,
7318 sqdmlal_zzzw_fns[a->esz], a, 3)
7319 TRANS_FEAT(SQDMLALBT, aa64_sve2, gen_gvec_ool_arg_zzzz,
7320 sqdmlal_zzzw_fns[a->esz], a, 2)
7322 static gen_helper_gvec_4 * const sqdmlsl_zzzw_fns[] = {
7323 NULL, gen_helper_sve2_sqdmlsl_zzzw_h,
7324 gen_helper_sve2_sqdmlsl_zzzw_s, gen_helper_sve2_sqdmlsl_zzzw_d,
7326 TRANS_FEAT(SQDMLSLB_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz,
7327 sqdmlsl_zzzw_fns[a->esz], a, 0)
7328 TRANS_FEAT(SQDMLSLT_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz,
7329 sqdmlsl_zzzw_fns[a->esz], a, 3)
7330 TRANS_FEAT(SQDMLSLBT, aa64_sve2, gen_gvec_ool_arg_zzzz,
7331 sqdmlsl_zzzw_fns[a->esz], a, 2)
7333 static gen_helper_gvec_4 * const sqrdmlah_fns[] = {
7334 gen_helper_sve2_sqrdmlah_b, gen_helper_sve2_sqrdmlah_h,
7335 gen_helper_sve2_sqrdmlah_s, gen_helper_sve2_sqrdmlah_d,
7337 TRANS_FEAT(SQRDMLAH_zzzz, aa64_sve2, gen_gvec_ool_arg_zzzz,
7338 sqrdmlah_fns[a->esz], a, 0)
7340 static gen_helper_gvec_4 * const sqrdmlsh_fns[] = {
7341 gen_helper_sve2_sqrdmlsh_b, gen_helper_sve2_sqrdmlsh_h,
7342 gen_helper_sve2_sqrdmlsh_s, gen_helper_sve2_sqrdmlsh_d,
7344 TRANS_FEAT(SQRDMLSH_zzzz, aa64_sve2, gen_gvec_ool_arg_zzzz,
7345 sqrdmlsh_fns[a->esz], a, 0)
7347 static gen_helper_gvec_4 * const smlal_zzzw_fns[] = {
7348 NULL, gen_helper_sve2_smlal_zzzw_h,
7349 gen_helper_sve2_smlal_zzzw_s, gen_helper_sve2_smlal_zzzw_d,
7351 TRANS_FEAT(SMLALB_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz,
7352 smlal_zzzw_fns[a->esz], a, 0)
7353 TRANS_FEAT(SMLALT_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz,
7354 smlal_zzzw_fns[a->esz], a, 1)
7356 static gen_helper_gvec_4 * const umlal_zzzw_fns[] = {
7357 NULL, gen_helper_sve2_umlal_zzzw_h,
7358 gen_helper_sve2_umlal_zzzw_s, gen_helper_sve2_umlal_zzzw_d,
7360 TRANS_FEAT(UMLALB_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz,
7361 umlal_zzzw_fns[a->esz], a, 0)
7362 TRANS_FEAT(UMLALT_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz,
7363 umlal_zzzw_fns[a->esz], a, 1)
7365 static gen_helper_gvec_4 * const smlsl_zzzw_fns[] = {
7366 NULL, gen_helper_sve2_smlsl_zzzw_h,
7367 gen_helper_sve2_smlsl_zzzw_s, gen_helper_sve2_smlsl_zzzw_d,
7369 TRANS_FEAT(SMLSLB_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz,
7370 smlsl_zzzw_fns[a->esz], a, 0)
7371 TRANS_FEAT(SMLSLT_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz,
7372 smlsl_zzzw_fns[a->esz], a, 1)
7374 static gen_helper_gvec_4 * const umlsl_zzzw_fns[] = {
7375 NULL, gen_helper_sve2_umlsl_zzzw_h,
7376 gen_helper_sve2_umlsl_zzzw_s, gen_helper_sve2_umlsl_zzzw_d,
7378 TRANS_FEAT(UMLSLB_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz,
7379 umlsl_zzzw_fns[a->esz], a, 0)
7380 TRANS_FEAT(UMLSLT_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz,
7381 umlsl_zzzw_fns[a->esz], a, 1)
7383 static gen_helper_gvec_4 * const cmla_fns[] = {
7384 gen_helper_sve2_cmla_zzzz_b, gen_helper_sve2_cmla_zzzz_h,
7385 gen_helper_sve2_cmla_zzzz_s, gen_helper_sve2_cmla_zzzz_d,
7387 TRANS_FEAT(CMLA_zzzz, aa64_sve2, gen_gvec_ool_zzzz,
7388 cmla_fns[a->esz], a->rd, a->rn, a->rm, a->ra, a->rot)
7390 static gen_helper_gvec_4 * const cdot_fns[] = {
7391 NULL, NULL, gen_helper_sve2_cdot_zzzz_s, gen_helper_sve2_cdot_zzzz_d
7393 TRANS_FEAT(CDOT_zzzz, aa64_sve2, gen_gvec_ool_zzzz,
7394 cdot_fns[a->esz], a->rd, a->rn, a->rm, a->ra, a->rot)
7396 static gen_helper_gvec_4 * const sqrdcmlah_fns[] = {
7397 gen_helper_sve2_sqrdcmlah_zzzz_b, gen_helper_sve2_sqrdcmlah_zzzz_h,
7398 gen_helper_sve2_sqrdcmlah_zzzz_s, gen_helper_sve2_sqrdcmlah_zzzz_d,
7400 TRANS_FEAT(SQRDCMLAH_zzzz, aa64_sve2, gen_gvec_ool_zzzz,
7401 sqrdcmlah_fns[a->esz], a->rd, a->rn, a->rm, a->ra, a->rot)
7403 TRANS_FEAT(USDOT_zzzz, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz,
7404 a->esz == 2 ? gen_helper_gvec_usdot_b : NULL, a, 0)
7406 TRANS_FEAT(AESMC, aa64_sve2_aes, gen_gvec_ool_zz,
7407 gen_helper_crypto_aesmc, a->rd, a->rd, a->decrypt)
7409 TRANS_FEAT(AESE, aa64_sve2_aes, gen_gvec_ool_arg_zzz,
7410 gen_helper_crypto_aese, a, false)
7411 TRANS_FEAT(AESD, aa64_sve2_aes, gen_gvec_ool_arg_zzz,
7412 gen_helper_crypto_aese, a, true)
7414 TRANS_FEAT(SM4E, aa64_sve2_sm4, gen_gvec_ool_arg_zzz,
7415 gen_helper_crypto_sm4e, a, 0)
7416 TRANS_FEAT(SM4EKEY, aa64_sve2_sm4, gen_gvec_ool_arg_zzz,
7417 gen_helper_crypto_sm4ekey, a, 0)
7419 TRANS_FEAT(RAX1, aa64_sve2_sha3, gen_gvec_fn_arg_zzz, gen_gvec_rax1, a)
7421 static bool trans_FCVTNT_sh(DisasContext *s, arg_rpr_esz *a)
7423 if (!dc_isar_feature(aa64_sve2, s)) {
7424 return false;
7426 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve2_fcvtnt_sh);
7429 static bool trans_BFCVTNT(DisasContext *s, arg_rpr_esz *a)
7431 if (!dc_isar_feature(aa64_sve_bf16, s)) {
7432 return false;
7434 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve_bfcvtnt);
7437 static bool trans_FCVTNT_ds(DisasContext *s, arg_rpr_esz *a)
7439 if (!dc_isar_feature(aa64_sve2, s)) {
7440 return false;
7442 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve2_fcvtnt_ds);
7445 static bool trans_FCVTLT_hs(DisasContext *s, arg_rpr_esz *a)
7447 if (!dc_isar_feature(aa64_sve2, s)) {
7448 return false;
7450 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve2_fcvtlt_hs);
7453 static bool trans_FCVTLT_sd(DisasContext *s, arg_rpr_esz *a)
7455 if (!dc_isar_feature(aa64_sve2, s)) {
7456 return false;
7458 return do_zpz_ptr(s, a->rd, a->rn, a->pg, false, gen_helper_sve2_fcvtlt_sd);
7461 static bool trans_FCVTX_ds(DisasContext *s, arg_rpr_esz *a)
7463 if (!dc_isar_feature(aa64_sve2, s)) {
7464 return false;
7466 return do_frint_mode(s, a, float_round_to_odd, gen_helper_sve_fcvt_ds);
7469 static bool trans_FCVTXNT_ds(DisasContext *s, arg_rpr_esz *a)
7471 if (!dc_isar_feature(aa64_sve2, s)) {
7472 return false;
7474 return do_frint_mode(s, a, float_round_to_odd, gen_helper_sve2_fcvtnt_ds);
7477 static bool trans_FLOGB(DisasContext *s, arg_rpr_esz *a)
7479 static gen_helper_gvec_3_ptr * const fns[] = {
7480 NULL, gen_helper_flogb_h,
7481 gen_helper_flogb_s, gen_helper_flogb_d
7484 if (!dc_isar_feature(aa64_sve2, s) || fns[a->esz] == NULL) {
7485 return false;
7487 if (sve_access_check(s)) {
7488 TCGv_ptr status =
7489 fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
7490 unsigned vsz = vec_full_reg_size(s);
7492 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, a->rd),
7493 vec_full_reg_offset(s, a->rn),
7494 pred_full_reg_offset(s, a->pg),
7495 status, vsz, vsz, 0, fns[a->esz]);
7496 tcg_temp_free_ptr(status);
7498 return true;
7501 static bool do_FMLAL_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sub, bool sel)
7503 if (!dc_isar_feature(aa64_sve2, s)) {
7504 return false;
7506 return gen_gvec_ptr_zzzz(s, gen_helper_sve2_fmlal_zzzw_s,
7507 a->rd, a->rn, a->rm, a->ra,
7508 (sel << 1) | sub, cpu_env);
7511 static bool trans_FMLALB_zzzw(DisasContext *s, arg_rrrr_esz *a)
7513 return do_FMLAL_zzzw(s, a, false, false);
7516 static bool trans_FMLALT_zzzw(DisasContext *s, arg_rrrr_esz *a)
7518 return do_FMLAL_zzzw(s, a, false, true);
7521 static bool trans_FMLSLB_zzzw(DisasContext *s, arg_rrrr_esz *a)
7523 return do_FMLAL_zzzw(s, a, true, false);
7526 static bool trans_FMLSLT_zzzw(DisasContext *s, arg_rrrr_esz *a)
7528 return do_FMLAL_zzzw(s, a, true, true);
7531 static bool do_FMLAL_zzxw(DisasContext *s, arg_rrxr_esz *a, bool sub, bool sel)
7533 if (!dc_isar_feature(aa64_sve2, s)) {
7534 return false;
7536 return gen_gvec_ptr_zzzz(s, gen_helper_sve2_fmlal_zzxw_s,
7537 a->rd, a->rn, a->rm, a->ra,
7538 (a->index << 2) | (sel << 1) | sub, cpu_env);
7541 static bool trans_FMLALB_zzxw(DisasContext *s, arg_rrxr_esz *a)
7543 return do_FMLAL_zzxw(s, a, false, false);
7546 static bool trans_FMLALT_zzxw(DisasContext *s, arg_rrxr_esz *a)
7548 return do_FMLAL_zzxw(s, a, false, true);
7551 static bool trans_FMLSLB_zzxw(DisasContext *s, arg_rrxr_esz *a)
7553 return do_FMLAL_zzxw(s, a, true, false);
7556 static bool trans_FMLSLT_zzxw(DisasContext *s, arg_rrxr_esz *a)
7558 return do_FMLAL_zzxw(s, a, true, true);
7561 TRANS_FEAT(SMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz,
7562 gen_helper_gvec_smmla_b, a, 0)
7563 TRANS_FEAT(USMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz,
7564 gen_helper_gvec_usmmla_b, a, 0)
7565 TRANS_FEAT(UMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz,
7566 gen_helper_gvec_ummla_b, a, 0)
7568 TRANS_FEAT(BFDOT_zzzz, aa64_sve_bf16, gen_gvec_ool_arg_zzzz,
7569 gen_helper_gvec_bfdot, a, 0)
7570 TRANS_FEAT(BFDOT_zzxz, aa64_sve_bf16, gen_gvec_ool_arg_zzxz,
7571 gen_helper_gvec_bfdot_idx, a)
7573 TRANS_FEAT(BFMMLA, aa64_sve_bf16, gen_gvec_ool_arg_zzzz,
7574 gen_helper_gvec_bfmmla, a, 0)
7576 static bool do_BFMLAL_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sel)
7578 return gen_gvec_fpst_zzzz(s, gen_helper_gvec_bfmlal,
7579 a->rd, a->rn, a->rm, a->ra, sel, FPST_FPCR);
7582 TRANS_FEAT(BFMLALB_zzzw, aa64_sve_bf16, do_BFMLAL_zzzw, a, false)
7583 TRANS_FEAT(BFMLALT_zzzw, aa64_sve_bf16, do_BFMLAL_zzzw, a, true)
7585 static bool do_BFMLAL_zzxw(DisasContext *s, arg_rrxr_esz *a, bool sel)
7587 return gen_gvec_fpst_zzzz(s, gen_helper_gvec_bfmlal_idx,
7588 a->rd, a->rn, a->rm, a->ra,
7589 (a->index << 1) | sel, FPST_FPCR);
7592 TRANS_FEAT(BFMLALB_zzxw, aa64_sve_bf16, do_BFMLAL_zzxw, a, false)
7593 TRANS_FEAT(BFMLALT_zzxw, aa64_sve_bf16, do_BFMLAL_zzxw, a, true)