target/arm: Use TRANS_FEAT for do_ppz_fp
[qemu/ar7.git] / target / arm / translate-sve.c
blob2ee48186ba0659a43ff525d181867642c5e58cb6
1 /*
2 * AArch64 SVE translation
4 * Copyright (c) 2018 Linaro, Ltd
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/exec-all.h"
23 #include "tcg/tcg-op.h"
24 #include "tcg/tcg-op-gvec.h"
25 #include "tcg/tcg-gvec-desc.h"
26 #include "qemu/log.h"
27 #include "arm_ldst.h"
28 #include "translate.h"
29 #include "internals.h"
30 #include "exec/helper-proto.h"
31 #include "exec/helper-gen.h"
32 #include "exec/log.h"
33 #include "translate-a64.h"
34 #include "fpu/softfloat.h"
37 typedef void GVecGen2sFn(unsigned, uint32_t, uint32_t,
38 TCGv_i64, uint32_t, uint32_t);
40 typedef void gen_helper_gvec_flags_3(TCGv_i32, TCGv_ptr, TCGv_ptr,
41 TCGv_ptr, TCGv_i32);
42 typedef void gen_helper_gvec_flags_4(TCGv_i32, TCGv_ptr, TCGv_ptr,
43 TCGv_ptr, TCGv_ptr, TCGv_i32);
45 typedef void gen_helper_gvec_mem(TCGv_env, TCGv_ptr, TCGv_i64, TCGv_i32);
46 typedef void gen_helper_gvec_mem_scatter(TCGv_env, TCGv_ptr, TCGv_ptr,
47 TCGv_ptr, TCGv_i64, TCGv_i32);
50 * Helpers for extracting complex instruction fields.
53 /* See e.g. ASR (immediate, predicated).
54 * Returns -1 for unallocated encoding; diagnose later.
56 static int tszimm_esz(DisasContext *s, int x)
58 x >>= 3; /* discard imm3 */
59 return 31 - clz32(x);
62 static int tszimm_shr(DisasContext *s, int x)
64 return (16 << tszimm_esz(s, x)) - x;
67 /* See e.g. LSL (immediate, predicated). */
68 static int tszimm_shl(DisasContext *s, int x)
70 return x - (8 << tszimm_esz(s, x));
73 /* The SH bit is in bit 8. Extract the low 8 and shift. */
74 static inline int expand_imm_sh8s(DisasContext *s, int x)
76 return (int8_t)x << (x & 0x100 ? 8 : 0);
79 static inline int expand_imm_sh8u(DisasContext *s, int x)
81 return (uint8_t)x << (x & 0x100 ? 8 : 0);
84 /* Convert a 2-bit memory size (msz) to a 4-bit data type (dtype)
85 * with unsigned data. C.f. SVE Memory Contiguous Load Group.
87 static inline int msz_dtype(DisasContext *s, int msz)
89 static const uint8_t dtype[4] = { 0, 5, 10, 15 };
90 return dtype[msz];
94 * Include the generated decoder.
97 #include "decode-sve.c.inc"
100 * Implement all of the translator functions referenced by the decoder.
103 /* Return the offset info CPUARMState of the predicate vector register Pn.
104 * Note for this purpose, FFR is P16.
106 static inline int pred_full_reg_offset(DisasContext *s, int regno)
108 return offsetof(CPUARMState, vfp.pregs[regno]);
111 /* Return the byte size of the whole predicate register, VL / 64. */
112 static inline int pred_full_reg_size(DisasContext *s)
114 return s->sve_len >> 3;
117 /* Round up the size of a register to a size allowed by
118 * the tcg vector infrastructure. Any operation which uses this
119 * size may assume that the bits above pred_full_reg_size are zero,
120 * and must leave them the same way.
122 * Note that this is not needed for the vector registers as they
123 * are always properly sized for tcg vectors.
125 static int size_for_gvec(int size)
127 if (size <= 8) {
128 return 8;
129 } else {
130 return QEMU_ALIGN_UP(size, 16);
134 static int pred_gvec_reg_size(DisasContext *s)
136 return size_for_gvec(pred_full_reg_size(s));
139 /* Invoke an out-of-line helper on 2 Zregs. */
140 static bool gen_gvec_ool_zz(DisasContext *s, gen_helper_gvec_2 *fn,
141 int rd, int rn, int data)
143 if (fn == NULL) {
144 return false;
146 if (sve_access_check(s)) {
147 unsigned vsz = vec_full_reg_size(s);
148 tcg_gen_gvec_2_ool(vec_full_reg_offset(s, rd),
149 vec_full_reg_offset(s, rn),
150 vsz, vsz, data, fn);
152 return true;
155 static bool gen_gvec_fpst_zz(DisasContext *s, gen_helper_gvec_2_ptr *fn,
156 int rd, int rn, int data,
157 ARMFPStatusFlavour flavour)
159 if (fn == NULL) {
160 return false;
162 if (sve_access_check(s)) {
163 unsigned vsz = vec_full_reg_size(s);
164 TCGv_ptr status = fpstatus_ptr(flavour);
166 tcg_gen_gvec_2_ptr(vec_full_reg_offset(s, rd),
167 vec_full_reg_offset(s, rn),
168 status, vsz, vsz, data, fn);
169 tcg_temp_free_ptr(status);
171 return true;
174 static bool gen_gvec_fpst_arg_zz(DisasContext *s, gen_helper_gvec_2_ptr *fn,
175 arg_rr_esz *a, int data)
177 return gen_gvec_fpst_zz(s, fn, a->rd, a->rn, data,
178 a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
181 /* Invoke an out-of-line helper on 3 Zregs. */
182 static bool gen_gvec_ool_zzz(DisasContext *s, gen_helper_gvec_3 *fn,
183 int rd, int rn, int rm, int data)
185 if (fn == NULL) {
186 return false;
188 if (sve_access_check(s)) {
189 unsigned vsz = vec_full_reg_size(s);
190 tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd),
191 vec_full_reg_offset(s, rn),
192 vec_full_reg_offset(s, rm),
193 vsz, vsz, data, fn);
195 return true;
198 static bool gen_gvec_ool_arg_zzz(DisasContext *s, gen_helper_gvec_3 *fn,
199 arg_rrr_esz *a, int data)
201 return gen_gvec_ool_zzz(s, fn, a->rd, a->rn, a->rm, data);
204 /* Invoke an out-of-line helper on 3 Zregs, plus float_status. */
205 static bool gen_gvec_fpst_zzz(DisasContext *s, gen_helper_gvec_3_ptr *fn,
206 int rd, int rn, int rm,
207 int data, ARMFPStatusFlavour flavour)
209 if (fn == NULL) {
210 return false;
212 if (sve_access_check(s)) {
213 unsigned vsz = vec_full_reg_size(s);
214 TCGv_ptr status = fpstatus_ptr(flavour);
216 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
217 vec_full_reg_offset(s, rn),
218 vec_full_reg_offset(s, rm),
219 status, vsz, vsz, data, fn);
221 tcg_temp_free_ptr(status);
223 return true;
226 static bool gen_gvec_fpst_arg_zzz(DisasContext *s, gen_helper_gvec_3_ptr *fn,
227 arg_rrr_esz *a, int data)
229 return gen_gvec_fpst_zzz(s, fn, a->rd, a->rn, a->rm, data,
230 a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
233 /* Invoke an out-of-line helper on 4 Zregs. */
234 static bool gen_gvec_ool_zzzz(DisasContext *s, gen_helper_gvec_4 *fn,
235 int rd, int rn, int rm, int ra, int data)
237 if (fn == NULL) {
238 return false;
240 if (sve_access_check(s)) {
241 unsigned vsz = vec_full_reg_size(s);
242 tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd),
243 vec_full_reg_offset(s, rn),
244 vec_full_reg_offset(s, rm),
245 vec_full_reg_offset(s, ra),
246 vsz, vsz, data, fn);
248 return true;
251 static bool gen_gvec_ool_arg_zzzz(DisasContext *s, gen_helper_gvec_4 *fn,
252 arg_rrrr_esz *a, int data)
254 return gen_gvec_ool_zzzz(s, fn, a->rd, a->rn, a->rm, a->ra, data);
257 static bool gen_gvec_ool_arg_zzxz(DisasContext *s, gen_helper_gvec_4 *fn,
258 arg_rrxr_esz *a)
260 return gen_gvec_ool_zzzz(s, fn, a->rd, a->rn, a->rm, a->ra, a->index);
263 /* Invoke an out-of-line helper on 4 Zregs, plus a pointer. */
264 static bool gen_gvec_ptr_zzzz(DisasContext *s, gen_helper_gvec_4_ptr *fn,
265 int rd, int rn, int rm, int ra,
266 int data, TCGv_ptr ptr)
268 if (fn == NULL) {
269 return false;
271 if (sve_access_check(s)) {
272 unsigned vsz = vec_full_reg_size(s);
273 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, rd),
274 vec_full_reg_offset(s, rn),
275 vec_full_reg_offset(s, rm),
276 vec_full_reg_offset(s, ra),
277 ptr, vsz, vsz, data, fn);
279 return true;
282 static bool gen_gvec_fpst_zzzz(DisasContext *s, gen_helper_gvec_4_ptr *fn,
283 int rd, int rn, int rm, int ra,
284 int data, ARMFPStatusFlavour flavour)
286 TCGv_ptr status = fpstatus_ptr(flavour);
287 bool ret = gen_gvec_ptr_zzzz(s, fn, rd, rn, rm, ra, data, status);
288 tcg_temp_free_ptr(status);
289 return ret;
292 /* Invoke an out-of-line helper on 2 Zregs and a predicate. */
293 static bool gen_gvec_ool_zzp(DisasContext *s, gen_helper_gvec_3 *fn,
294 int rd, int rn, int pg, int data)
296 if (fn == NULL) {
297 return false;
299 if (sve_access_check(s)) {
300 unsigned vsz = vec_full_reg_size(s);
301 tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd),
302 vec_full_reg_offset(s, rn),
303 pred_full_reg_offset(s, pg),
304 vsz, vsz, data, fn);
306 return true;
309 static bool gen_gvec_ool_arg_zpz(DisasContext *s, gen_helper_gvec_3 *fn,
310 arg_rpr_esz *a, int data)
312 return gen_gvec_ool_zzp(s, fn, a->rd, a->rn, a->pg, data);
315 static bool gen_gvec_ool_arg_zpzi(DisasContext *s, gen_helper_gvec_3 *fn,
316 arg_rpri_esz *a)
318 return gen_gvec_ool_zzp(s, fn, a->rd, a->rn, a->pg, a->imm);
321 static bool gen_gvec_fpst_zzp(DisasContext *s, gen_helper_gvec_3_ptr *fn,
322 int rd, int rn, int pg, int data,
323 ARMFPStatusFlavour flavour)
325 if (fn == NULL) {
326 return false;
328 if (sve_access_check(s)) {
329 unsigned vsz = vec_full_reg_size(s);
330 TCGv_ptr status = fpstatus_ptr(flavour);
332 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
333 vec_full_reg_offset(s, rn),
334 pred_full_reg_offset(s, pg),
335 status, vsz, vsz, data, fn);
336 tcg_temp_free_ptr(status);
338 return true;
341 static bool gen_gvec_fpst_arg_zpz(DisasContext *s, gen_helper_gvec_3_ptr *fn,
342 arg_rpr_esz *a, int data,
343 ARMFPStatusFlavour flavour)
345 return gen_gvec_fpst_zzp(s, fn, a->rd, a->rn, a->pg, data, flavour);
348 /* Invoke an out-of-line helper on 3 Zregs and a predicate. */
349 static bool gen_gvec_ool_zzzp(DisasContext *s, gen_helper_gvec_4 *fn,
350 int rd, int rn, int rm, int pg, int data)
352 if (fn == NULL) {
353 return false;
355 if (sve_access_check(s)) {
356 unsigned vsz = vec_full_reg_size(s);
357 tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd),
358 vec_full_reg_offset(s, rn),
359 vec_full_reg_offset(s, rm),
360 pred_full_reg_offset(s, pg),
361 vsz, vsz, data, fn);
363 return true;
366 static bool gen_gvec_ool_arg_zpzz(DisasContext *s, gen_helper_gvec_4 *fn,
367 arg_rprr_esz *a, int data)
369 return gen_gvec_ool_zzzp(s, fn, a->rd, a->rn, a->rm, a->pg, data);
372 /* Invoke a vector expander on two Zregs and an immediate. */
373 static bool gen_gvec_fn_zzi(DisasContext *s, GVecGen2iFn *gvec_fn,
374 int esz, int rd, int rn, uint64_t imm)
376 if (gvec_fn == NULL) {
377 return false;
379 if (sve_access_check(s)) {
380 unsigned vsz = vec_full_reg_size(s);
381 gvec_fn(esz, vec_full_reg_offset(s, rd),
382 vec_full_reg_offset(s, rn), imm, vsz, vsz);
384 return true;
387 static bool gen_gvec_fn_arg_zzi(DisasContext *s, GVecGen2iFn *gvec_fn,
388 arg_rri_esz *a)
390 if (a->esz < 0) {
391 /* Invalid tsz encoding -- see tszimm_esz. */
392 return false;
394 return gen_gvec_fn_zzi(s, gvec_fn, a->esz, a->rd, a->rn, a->imm);
397 /* Invoke a vector expander on three Zregs. */
398 static bool gen_gvec_fn_zzz(DisasContext *s, GVecGen3Fn *gvec_fn,
399 int esz, int rd, int rn, int rm)
401 if (gvec_fn == NULL) {
402 return false;
404 if (sve_access_check(s)) {
405 unsigned vsz = vec_full_reg_size(s);
406 gvec_fn(esz, vec_full_reg_offset(s, rd),
407 vec_full_reg_offset(s, rn),
408 vec_full_reg_offset(s, rm), vsz, vsz);
410 return true;
413 static bool gen_gvec_fn_arg_zzz(DisasContext *s, GVecGen3Fn *fn,
414 arg_rrr_esz *a)
416 return gen_gvec_fn_zzz(s, fn, a->esz, a->rd, a->rn, a->rm);
419 /* Invoke a vector expander on four Zregs. */
420 static bool gen_gvec_fn_arg_zzzz(DisasContext *s, GVecGen4Fn *gvec_fn,
421 arg_rrrr_esz *a)
423 if (gvec_fn == NULL) {
424 return false;
426 if (sve_access_check(s)) {
427 unsigned vsz = vec_full_reg_size(s);
428 gvec_fn(a->esz, vec_full_reg_offset(s, a->rd),
429 vec_full_reg_offset(s, a->rn),
430 vec_full_reg_offset(s, a->rm),
431 vec_full_reg_offset(s, a->ra), vsz, vsz);
433 return true;
436 /* Invoke a vector move on two Zregs. */
437 static bool do_mov_z(DisasContext *s, int rd, int rn)
439 if (sve_access_check(s)) {
440 unsigned vsz = vec_full_reg_size(s);
441 tcg_gen_gvec_mov(MO_8, vec_full_reg_offset(s, rd),
442 vec_full_reg_offset(s, rn), vsz, vsz);
444 return true;
447 /* Initialize a Zreg with replications of a 64-bit immediate. */
448 static void do_dupi_z(DisasContext *s, int rd, uint64_t word)
450 unsigned vsz = vec_full_reg_size(s);
451 tcg_gen_gvec_dup_imm(MO_64, vec_full_reg_offset(s, rd), vsz, vsz, word);
454 /* Invoke a vector expander on three Pregs. */
455 static bool gen_gvec_fn_ppp(DisasContext *s, GVecGen3Fn *gvec_fn,
456 int rd, int rn, int rm)
458 if (sve_access_check(s)) {
459 unsigned psz = pred_gvec_reg_size(s);
460 gvec_fn(MO_64, pred_full_reg_offset(s, rd),
461 pred_full_reg_offset(s, rn),
462 pred_full_reg_offset(s, rm), psz, psz);
464 return true;
467 /* Invoke a vector move on two Pregs. */
468 static bool do_mov_p(DisasContext *s, int rd, int rn)
470 if (sve_access_check(s)) {
471 unsigned psz = pred_gvec_reg_size(s);
472 tcg_gen_gvec_mov(MO_8, pred_full_reg_offset(s, rd),
473 pred_full_reg_offset(s, rn), psz, psz);
475 return true;
478 /* Set the cpu flags as per a return from an SVE helper. */
479 static void do_pred_flags(TCGv_i32 t)
481 tcg_gen_mov_i32(cpu_NF, t);
482 tcg_gen_andi_i32(cpu_ZF, t, 2);
483 tcg_gen_andi_i32(cpu_CF, t, 1);
484 tcg_gen_movi_i32(cpu_VF, 0);
487 /* Subroutines computing the ARM PredTest psuedofunction. */
488 static void do_predtest1(TCGv_i64 d, TCGv_i64 g)
490 TCGv_i32 t = tcg_temp_new_i32();
492 gen_helper_sve_predtest1(t, d, g);
493 do_pred_flags(t);
494 tcg_temp_free_i32(t);
497 static void do_predtest(DisasContext *s, int dofs, int gofs, int words)
499 TCGv_ptr dptr = tcg_temp_new_ptr();
500 TCGv_ptr gptr = tcg_temp_new_ptr();
501 TCGv_i32 t = tcg_temp_new_i32();
503 tcg_gen_addi_ptr(dptr, cpu_env, dofs);
504 tcg_gen_addi_ptr(gptr, cpu_env, gofs);
506 gen_helper_sve_predtest(t, dptr, gptr, tcg_constant_i32(words));
507 tcg_temp_free_ptr(dptr);
508 tcg_temp_free_ptr(gptr);
510 do_pred_flags(t);
511 tcg_temp_free_i32(t);
514 /* For each element size, the bits within a predicate word that are active. */
515 const uint64_t pred_esz_masks[4] = {
516 0xffffffffffffffffull, 0x5555555555555555ull,
517 0x1111111111111111ull, 0x0101010101010101ull
520 static bool trans_INVALID(DisasContext *s, arg_INVALID *a)
522 unallocated_encoding(s);
523 return true;
527 *** SVE Logical - Unpredicated Group
530 TRANS_FEAT(AND_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_and, a)
531 TRANS_FEAT(ORR_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_or, a)
532 TRANS_FEAT(EOR_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_xor, a)
533 TRANS_FEAT(BIC_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_andc, a)
535 static void gen_xar8_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, int64_t sh)
537 TCGv_i64 t = tcg_temp_new_i64();
538 uint64_t mask = dup_const(MO_8, 0xff >> sh);
540 tcg_gen_xor_i64(t, n, m);
541 tcg_gen_shri_i64(d, t, sh);
542 tcg_gen_shli_i64(t, t, 8 - sh);
543 tcg_gen_andi_i64(d, d, mask);
544 tcg_gen_andi_i64(t, t, ~mask);
545 tcg_gen_or_i64(d, d, t);
546 tcg_temp_free_i64(t);
549 static void gen_xar16_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, int64_t sh)
551 TCGv_i64 t = tcg_temp_new_i64();
552 uint64_t mask = dup_const(MO_16, 0xffff >> sh);
554 tcg_gen_xor_i64(t, n, m);
555 tcg_gen_shri_i64(d, t, sh);
556 tcg_gen_shli_i64(t, t, 16 - sh);
557 tcg_gen_andi_i64(d, d, mask);
558 tcg_gen_andi_i64(t, t, ~mask);
559 tcg_gen_or_i64(d, d, t);
560 tcg_temp_free_i64(t);
563 static void gen_xar_i32(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, int32_t sh)
565 tcg_gen_xor_i32(d, n, m);
566 tcg_gen_rotri_i32(d, d, sh);
569 static void gen_xar_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, int64_t sh)
571 tcg_gen_xor_i64(d, n, m);
572 tcg_gen_rotri_i64(d, d, sh);
575 static void gen_xar_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
576 TCGv_vec m, int64_t sh)
578 tcg_gen_xor_vec(vece, d, n, m);
579 tcg_gen_rotri_vec(vece, d, d, sh);
582 void gen_gvec_xar(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
583 uint32_t rm_ofs, int64_t shift,
584 uint32_t opr_sz, uint32_t max_sz)
586 static const TCGOpcode vecop[] = { INDEX_op_rotli_vec, 0 };
587 static const GVecGen3i ops[4] = {
588 { .fni8 = gen_xar8_i64,
589 .fniv = gen_xar_vec,
590 .fno = gen_helper_sve2_xar_b,
591 .opt_opc = vecop,
592 .vece = MO_8 },
593 { .fni8 = gen_xar16_i64,
594 .fniv = gen_xar_vec,
595 .fno = gen_helper_sve2_xar_h,
596 .opt_opc = vecop,
597 .vece = MO_16 },
598 { .fni4 = gen_xar_i32,
599 .fniv = gen_xar_vec,
600 .fno = gen_helper_sve2_xar_s,
601 .opt_opc = vecop,
602 .vece = MO_32 },
603 { .fni8 = gen_xar_i64,
604 .fniv = gen_xar_vec,
605 .fno = gen_helper_gvec_xar_d,
606 .opt_opc = vecop,
607 .vece = MO_64 }
609 int esize = 8 << vece;
611 /* The SVE2 range is 1 .. esize; the AdvSIMD range is 0 .. esize-1. */
612 tcg_debug_assert(shift >= 0);
613 tcg_debug_assert(shift <= esize);
614 shift &= esize - 1;
616 if (shift == 0) {
617 /* xar with no rotate devolves to xor. */
618 tcg_gen_gvec_xor(vece, rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz);
619 } else {
620 tcg_gen_gvec_3i(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz,
621 shift, &ops[vece]);
625 static bool trans_XAR(DisasContext *s, arg_rrri_esz *a)
627 if (a->esz < 0 || !dc_isar_feature(aa64_sve2, s)) {
628 return false;
630 if (sve_access_check(s)) {
631 unsigned vsz = vec_full_reg_size(s);
632 gen_gvec_xar(a->esz, vec_full_reg_offset(s, a->rd),
633 vec_full_reg_offset(s, a->rn),
634 vec_full_reg_offset(s, a->rm), a->imm, vsz, vsz);
636 return true;
639 static void gen_eor3_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k)
641 tcg_gen_xor_i64(d, n, m);
642 tcg_gen_xor_i64(d, d, k);
645 static void gen_eor3_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
646 TCGv_vec m, TCGv_vec k)
648 tcg_gen_xor_vec(vece, d, n, m);
649 tcg_gen_xor_vec(vece, d, d, k);
652 static void gen_eor3(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
653 uint32_t a, uint32_t oprsz, uint32_t maxsz)
655 static const GVecGen4 op = {
656 .fni8 = gen_eor3_i64,
657 .fniv = gen_eor3_vec,
658 .fno = gen_helper_sve2_eor3,
659 .vece = MO_64,
660 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
662 tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op);
665 TRANS_FEAT(EOR3, aa64_sve2, gen_gvec_fn_arg_zzzz, gen_eor3, a)
667 static void gen_bcax_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k)
669 tcg_gen_andc_i64(d, m, k);
670 tcg_gen_xor_i64(d, d, n);
673 static void gen_bcax_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
674 TCGv_vec m, TCGv_vec k)
676 tcg_gen_andc_vec(vece, d, m, k);
677 tcg_gen_xor_vec(vece, d, d, n);
680 static void gen_bcax(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
681 uint32_t a, uint32_t oprsz, uint32_t maxsz)
683 static const GVecGen4 op = {
684 .fni8 = gen_bcax_i64,
685 .fniv = gen_bcax_vec,
686 .fno = gen_helper_sve2_bcax,
687 .vece = MO_64,
688 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
690 tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op);
693 TRANS_FEAT(BCAX, aa64_sve2, gen_gvec_fn_arg_zzzz, gen_bcax, a)
695 static void gen_bsl(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
696 uint32_t a, uint32_t oprsz, uint32_t maxsz)
698 /* BSL differs from the generic bitsel in argument ordering. */
699 tcg_gen_gvec_bitsel(vece, d, a, n, m, oprsz, maxsz);
702 TRANS_FEAT(BSL, aa64_sve2, gen_gvec_fn_arg_zzzz, gen_bsl, a)
704 static void gen_bsl1n_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k)
706 tcg_gen_andc_i64(n, k, n);
707 tcg_gen_andc_i64(m, m, k);
708 tcg_gen_or_i64(d, n, m);
711 static void gen_bsl1n_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
712 TCGv_vec m, TCGv_vec k)
714 if (TCG_TARGET_HAS_bitsel_vec) {
715 tcg_gen_not_vec(vece, n, n);
716 tcg_gen_bitsel_vec(vece, d, k, n, m);
717 } else {
718 tcg_gen_andc_vec(vece, n, k, n);
719 tcg_gen_andc_vec(vece, m, m, k);
720 tcg_gen_or_vec(vece, d, n, m);
724 static void gen_bsl1n(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
725 uint32_t a, uint32_t oprsz, uint32_t maxsz)
727 static const GVecGen4 op = {
728 .fni8 = gen_bsl1n_i64,
729 .fniv = gen_bsl1n_vec,
730 .fno = gen_helper_sve2_bsl1n,
731 .vece = MO_64,
732 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
734 tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op);
737 TRANS_FEAT(BSL1N, aa64_sve2, gen_gvec_fn_arg_zzzz, gen_bsl1n, a)
739 static void gen_bsl2n_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k)
742 * Z[dn] = (n & k) | (~m & ~k)
743 * = | ~(m | k)
745 tcg_gen_and_i64(n, n, k);
746 if (TCG_TARGET_HAS_orc_i64) {
747 tcg_gen_or_i64(m, m, k);
748 tcg_gen_orc_i64(d, n, m);
749 } else {
750 tcg_gen_nor_i64(m, m, k);
751 tcg_gen_or_i64(d, n, m);
755 static void gen_bsl2n_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
756 TCGv_vec m, TCGv_vec k)
758 if (TCG_TARGET_HAS_bitsel_vec) {
759 tcg_gen_not_vec(vece, m, m);
760 tcg_gen_bitsel_vec(vece, d, k, n, m);
761 } else {
762 tcg_gen_and_vec(vece, n, n, k);
763 tcg_gen_or_vec(vece, m, m, k);
764 tcg_gen_orc_vec(vece, d, n, m);
768 static void gen_bsl2n(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
769 uint32_t a, uint32_t oprsz, uint32_t maxsz)
771 static const GVecGen4 op = {
772 .fni8 = gen_bsl2n_i64,
773 .fniv = gen_bsl2n_vec,
774 .fno = gen_helper_sve2_bsl2n,
775 .vece = MO_64,
776 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
778 tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op);
781 TRANS_FEAT(BSL2N, aa64_sve2, gen_gvec_fn_arg_zzzz, gen_bsl2n, a)
783 static void gen_nbsl_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k)
785 tcg_gen_and_i64(n, n, k);
786 tcg_gen_andc_i64(m, m, k);
787 tcg_gen_nor_i64(d, n, m);
790 static void gen_nbsl_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
791 TCGv_vec m, TCGv_vec k)
793 tcg_gen_bitsel_vec(vece, d, k, n, m);
794 tcg_gen_not_vec(vece, d, d);
797 static void gen_nbsl(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
798 uint32_t a, uint32_t oprsz, uint32_t maxsz)
800 static const GVecGen4 op = {
801 .fni8 = gen_nbsl_i64,
802 .fniv = gen_nbsl_vec,
803 .fno = gen_helper_sve2_nbsl,
804 .vece = MO_64,
805 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
807 tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op);
810 TRANS_FEAT(NBSL, aa64_sve2, gen_gvec_fn_arg_zzzz, gen_nbsl, a)
813 *** SVE Integer Arithmetic - Unpredicated Group
816 TRANS_FEAT(ADD_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_add, a)
817 TRANS_FEAT(SUB_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_sub, a)
818 TRANS_FEAT(SQADD_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_ssadd, a)
819 TRANS_FEAT(SQSUB_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_sssub, a)
820 TRANS_FEAT(UQADD_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_usadd, a)
821 TRANS_FEAT(UQSUB_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_ussub, a)
824 *** SVE Integer Arithmetic - Binary Predicated Group
827 /* Select active elememnts from Zn and inactive elements from Zm,
828 * storing the result in Zd.
830 static bool do_sel_z(DisasContext *s, int rd, int rn, int rm, int pg, int esz)
832 static gen_helper_gvec_4 * const fns[4] = {
833 gen_helper_sve_sel_zpzz_b, gen_helper_sve_sel_zpzz_h,
834 gen_helper_sve_sel_zpzz_s, gen_helper_sve_sel_zpzz_d
836 return gen_gvec_ool_zzzp(s, fns[esz], rd, rn, rm, pg, 0);
839 #define DO_ZPZZ(NAME, FEAT, name) \
840 static gen_helper_gvec_4 * const name##_zpzz_fns[4] = { \
841 gen_helper_##name##_zpzz_b, gen_helper_##name##_zpzz_h, \
842 gen_helper_##name##_zpzz_s, gen_helper_##name##_zpzz_d, \
843 }; \
844 TRANS_FEAT(NAME, FEAT, gen_gvec_ool_arg_zpzz, \
845 name##_zpzz_fns[a->esz], a, 0)
847 DO_ZPZZ(AND_zpzz, aa64_sve, sve_and)
848 DO_ZPZZ(EOR_zpzz, aa64_sve, sve_eor)
849 DO_ZPZZ(ORR_zpzz, aa64_sve, sve_orr)
850 DO_ZPZZ(BIC_zpzz, aa64_sve, sve_bic)
852 DO_ZPZZ(ADD_zpzz, aa64_sve, sve_add)
853 DO_ZPZZ(SUB_zpzz, aa64_sve, sve_sub)
855 DO_ZPZZ(SMAX_zpzz, aa64_sve, sve_smax)
856 DO_ZPZZ(UMAX_zpzz, aa64_sve, sve_umax)
857 DO_ZPZZ(SMIN_zpzz, aa64_sve, sve_smin)
858 DO_ZPZZ(UMIN_zpzz, aa64_sve, sve_umin)
859 DO_ZPZZ(SABD_zpzz, aa64_sve, sve_sabd)
860 DO_ZPZZ(UABD_zpzz, aa64_sve, sve_uabd)
862 DO_ZPZZ(MUL_zpzz, aa64_sve, sve_mul)
863 DO_ZPZZ(SMULH_zpzz, aa64_sve, sve_smulh)
864 DO_ZPZZ(UMULH_zpzz, aa64_sve, sve_umulh)
866 DO_ZPZZ(ASR_zpzz, aa64_sve, sve_asr)
867 DO_ZPZZ(LSR_zpzz, aa64_sve, sve_lsr)
868 DO_ZPZZ(LSL_zpzz, aa64_sve, sve_lsl)
870 static gen_helper_gvec_4 * const sdiv_fns[4] = {
871 NULL, NULL, gen_helper_sve_sdiv_zpzz_s, gen_helper_sve_sdiv_zpzz_d
873 TRANS_FEAT(SDIV_zpzz, aa64_sve, gen_gvec_ool_arg_zpzz, sdiv_fns[a->esz], a, 0)
875 static gen_helper_gvec_4 * const udiv_fns[4] = {
876 NULL, NULL, gen_helper_sve_udiv_zpzz_s, gen_helper_sve_udiv_zpzz_d
878 TRANS_FEAT(UDIV_zpzz, aa64_sve, gen_gvec_ool_arg_zpzz, udiv_fns[a->esz], a, 0)
880 TRANS_FEAT(SEL_zpzz, aa64_sve, do_sel_z, a->rd, a->rn, a->rm, a->pg, a->esz)
883 *** SVE Integer Arithmetic - Unary Predicated Group
886 #define DO_ZPZ(NAME, FEAT, name) \
887 static gen_helper_gvec_3 * const name##_fns[4] = { \
888 gen_helper_##name##_b, gen_helper_##name##_h, \
889 gen_helper_##name##_s, gen_helper_##name##_d, \
890 }; \
891 TRANS_FEAT(NAME, FEAT, gen_gvec_ool_arg_zpz, name##_fns[a->esz], a, 0)
893 DO_ZPZ(CLS, aa64_sve, sve_cls)
894 DO_ZPZ(CLZ, aa64_sve, sve_clz)
895 DO_ZPZ(CNT_zpz, aa64_sve, sve_cnt_zpz)
896 DO_ZPZ(CNOT, aa64_sve, sve_cnot)
897 DO_ZPZ(NOT_zpz, aa64_sve, sve_not_zpz)
898 DO_ZPZ(ABS, aa64_sve, sve_abs)
899 DO_ZPZ(NEG, aa64_sve, sve_neg)
900 DO_ZPZ(RBIT, aa64_sve, sve_rbit)
902 static gen_helper_gvec_3 * const fabs_fns[4] = {
903 NULL, gen_helper_sve_fabs_h,
904 gen_helper_sve_fabs_s, gen_helper_sve_fabs_d,
906 TRANS_FEAT(FABS, aa64_sve, gen_gvec_ool_arg_zpz, fabs_fns[a->esz], a, 0)
908 static gen_helper_gvec_3 * const fneg_fns[4] = {
909 NULL, gen_helper_sve_fneg_h,
910 gen_helper_sve_fneg_s, gen_helper_sve_fneg_d,
912 TRANS_FEAT(FNEG, aa64_sve, gen_gvec_ool_arg_zpz, fneg_fns[a->esz], a, 0)
914 static gen_helper_gvec_3 * const sxtb_fns[4] = {
915 NULL, gen_helper_sve_sxtb_h,
916 gen_helper_sve_sxtb_s, gen_helper_sve_sxtb_d,
918 TRANS_FEAT(SXTB, aa64_sve, gen_gvec_ool_arg_zpz, sxtb_fns[a->esz], a, 0)
920 static gen_helper_gvec_3 * const uxtb_fns[4] = {
921 NULL, gen_helper_sve_uxtb_h,
922 gen_helper_sve_uxtb_s, gen_helper_sve_uxtb_d,
924 TRANS_FEAT(UXTB, aa64_sve, gen_gvec_ool_arg_zpz, uxtb_fns[a->esz], a, 0)
926 static gen_helper_gvec_3 * const sxth_fns[4] = {
927 NULL, NULL, gen_helper_sve_sxth_s, gen_helper_sve_sxth_d
929 TRANS_FEAT(SXTH, aa64_sve, gen_gvec_ool_arg_zpz, sxth_fns[a->esz], a, 0)
931 static gen_helper_gvec_3 * const uxth_fns[4] = {
932 NULL, NULL, gen_helper_sve_uxth_s, gen_helper_sve_uxth_d
934 TRANS_FEAT(UXTH, aa64_sve, gen_gvec_ool_arg_zpz, uxth_fns[a->esz], a, 0)
936 TRANS_FEAT(SXTW, aa64_sve, gen_gvec_ool_arg_zpz,
937 a->esz == 3 ? gen_helper_sve_sxtw_d : NULL, a, 0)
938 TRANS_FEAT(UXTW, aa64_sve, gen_gvec_ool_arg_zpz,
939 a->esz == 3 ? gen_helper_sve_uxtw_d : NULL, a, 0)
942 *** SVE Integer Reduction Group
945 typedef void gen_helper_gvec_reduc(TCGv_i64, TCGv_ptr, TCGv_ptr, TCGv_i32);
946 static bool do_vpz_ool(DisasContext *s, arg_rpr_esz *a,
947 gen_helper_gvec_reduc *fn)
949 unsigned vsz = vec_full_reg_size(s);
950 TCGv_ptr t_zn, t_pg;
951 TCGv_i32 desc;
952 TCGv_i64 temp;
954 if (fn == NULL) {
955 return false;
957 if (!sve_access_check(s)) {
958 return true;
961 desc = tcg_constant_i32(simd_desc(vsz, vsz, 0));
962 temp = tcg_temp_new_i64();
963 t_zn = tcg_temp_new_ptr();
964 t_pg = tcg_temp_new_ptr();
966 tcg_gen_addi_ptr(t_zn, cpu_env, vec_full_reg_offset(s, a->rn));
967 tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, a->pg));
968 fn(temp, t_zn, t_pg, desc);
969 tcg_temp_free_ptr(t_zn);
970 tcg_temp_free_ptr(t_pg);
972 write_fp_dreg(s, a->rd, temp);
973 tcg_temp_free_i64(temp);
974 return true;
977 #define DO_VPZ(NAME, name) \
978 static gen_helper_gvec_reduc * const name##_fns[4] = { \
979 gen_helper_sve_##name##_b, gen_helper_sve_##name##_h, \
980 gen_helper_sve_##name##_s, gen_helper_sve_##name##_d, \
981 }; \
982 TRANS_FEAT(NAME, aa64_sve, do_vpz_ool, a, name##_fns[a->esz])
984 DO_VPZ(ORV, orv)
985 DO_VPZ(ANDV, andv)
986 DO_VPZ(EORV, eorv)
988 DO_VPZ(UADDV, uaddv)
989 DO_VPZ(SMAXV, smaxv)
990 DO_VPZ(UMAXV, umaxv)
991 DO_VPZ(SMINV, sminv)
992 DO_VPZ(UMINV, uminv)
994 static gen_helper_gvec_reduc * const saddv_fns[4] = {
995 gen_helper_sve_saddv_b, gen_helper_sve_saddv_h,
996 gen_helper_sve_saddv_s, NULL
998 TRANS_FEAT(SADDV, aa64_sve, do_vpz_ool, a, saddv_fns[a->esz])
1000 #undef DO_VPZ
1003 *** SVE Shift by Immediate - Predicated Group
1007 * Copy Zn into Zd, storing zeros into inactive elements.
1008 * If invert, store zeros into the active elements.
1010 static bool do_movz_zpz(DisasContext *s, int rd, int rn, int pg,
1011 int esz, bool invert)
1013 static gen_helper_gvec_3 * const fns[4] = {
1014 gen_helper_sve_movz_b, gen_helper_sve_movz_h,
1015 gen_helper_sve_movz_s, gen_helper_sve_movz_d,
1017 return gen_gvec_ool_zzp(s, fns[esz], rd, rn, pg, invert);
1020 static bool do_shift_zpzi(DisasContext *s, arg_rpri_esz *a, bool asr,
1021 gen_helper_gvec_3 * const fns[4])
1023 int max;
1025 if (a->esz < 0) {
1026 /* Invalid tsz encoding -- see tszimm_esz. */
1027 return false;
1031 * Shift by element size is architecturally valid.
1032 * For arithmetic right-shift, it's the same as by one less.
1033 * For logical shifts and ASRD, it is a zeroing operation.
1035 max = 8 << a->esz;
1036 if (a->imm >= max) {
1037 if (asr) {
1038 a->imm = max - 1;
1039 } else {
1040 return do_movz_zpz(s, a->rd, a->rd, a->pg, a->esz, true);
1043 return gen_gvec_ool_arg_zpzi(s, fns[a->esz], a);
1046 static gen_helper_gvec_3 * const asr_zpzi_fns[4] = {
1047 gen_helper_sve_asr_zpzi_b, gen_helper_sve_asr_zpzi_h,
1048 gen_helper_sve_asr_zpzi_s, gen_helper_sve_asr_zpzi_d,
1050 TRANS_FEAT(ASR_zpzi, aa64_sve, do_shift_zpzi, a, true, asr_zpzi_fns)
1052 static gen_helper_gvec_3 * const lsr_zpzi_fns[4] = {
1053 gen_helper_sve_lsr_zpzi_b, gen_helper_sve_lsr_zpzi_h,
1054 gen_helper_sve_lsr_zpzi_s, gen_helper_sve_lsr_zpzi_d,
1056 TRANS_FEAT(LSR_zpzi, aa64_sve, do_shift_zpzi, a, false, lsr_zpzi_fns)
1058 static gen_helper_gvec_3 * const lsl_zpzi_fns[4] = {
1059 gen_helper_sve_lsl_zpzi_b, gen_helper_sve_lsl_zpzi_h,
1060 gen_helper_sve_lsl_zpzi_s, gen_helper_sve_lsl_zpzi_d,
1062 TRANS_FEAT(LSL_zpzi, aa64_sve, do_shift_zpzi, a, false, lsl_zpzi_fns)
1064 static gen_helper_gvec_3 * const asrd_fns[4] = {
1065 gen_helper_sve_asrd_b, gen_helper_sve_asrd_h,
1066 gen_helper_sve_asrd_s, gen_helper_sve_asrd_d,
1068 TRANS_FEAT(ASRD, aa64_sve, do_shift_zpzi, a, false, asrd_fns)
1070 static gen_helper_gvec_3 * const sqshl_zpzi_fns[4] = {
1071 gen_helper_sve2_sqshl_zpzi_b, gen_helper_sve2_sqshl_zpzi_h,
1072 gen_helper_sve2_sqshl_zpzi_s, gen_helper_sve2_sqshl_zpzi_d,
1074 TRANS_FEAT(SQSHL_zpzi, aa64_sve2, gen_gvec_ool_arg_zpzi,
1075 a->esz < 0 ? NULL : sqshl_zpzi_fns[a->esz], a)
1077 static gen_helper_gvec_3 * const uqshl_zpzi_fns[4] = {
1078 gen_helper_sve2_uqshl_zpzi_b, gen_helper_sve2_uqshl_zpzi_h,
1079 gen_helper_sve2_uqshl_zpzi_s, gen_helper_sve2_uqshl_zpzi_d,
1081 TRANS_FEAT(UQSHL_zpzi, aa64_sve2, gen_gvec_ool_arg_zpzi,
1082 a->esz < 0 ? NULL : uqshl_zpzi_fns[a->esz], a)
1084 static gen_helper_gvec_3 * const srshr_fns[4] = {
1085 gen_helper_sve2_srshr_b, gen_helper_sve2_srshr_h,
1086 gen_helper_sve2_srshr_s, gen_helper_sve2_srshr_d,
1088 TRANS_FEAT(SRSHR, aa64_sve2, gen_gvec_ool_arg_zpzi,
1089 a->esz < 0 ? NULL : srshr_fns[a->esz], a)
1091 static gen_helper_gvec_3 * const urshr_fns[4] = {
1092 gen_helper_sve2_urshr_b, gen_helper_sve2_urshr_h,
1093 gen_helper_sve2_urshr_s, gen_helper_sve2_urshr_d,
1095 TRANS_FEAT(URSHR, aa64_sve2, gen_gvec_ool_arg_zpzi,
1096 a->esz < 0 ? NULL : urshr_fns[a->esz], a)
1098 static gen_helper_gvec_3 * const sqshlu_fns[4] = {
1099 gen_helper_sve2_sqshlu_b, gen_helper_sve2_sqshlu_h,
1100 gen_helper_sve2_sqshlu_s, gen_helper_sve2_sqshlu_d,
1102 TRANS_FEAT(SQSHLU, aa64_sve2, gen_gvec_ool_arg_zpzi,
1103 a->esz < 0 ? NULL : sqshlu_fns[a->esz], a)
1106 *** SVE Bitwise Shift - Predicated Group
1109 #define DO_ZPZW(NAME, name) \
1110 static gen_helper_gvec_4 * const name##_zpzw_fns[4] = { \
1111 gen_helper_sve_##name##_zpzw_b, gen_helper_sve_##name##_zpzw_h, \
1112 gen_helper_sve_##name##_zpzw_s, NULL \
1113 }; \
1114 TRANS_FEAT(NAME##_zpzw, aa64_sve, gen_gvec_ool_arg_zpzz, \
1115 a->esz < 0 ? NULL : name##_zpzw_fns[a->esz], a, 0)
1117 DO_ZPZW(ASR, asr)
1118 DO_ZPZW(LSR, lsr)
1119 DO_ZPZW(LSL, lsl)
1121 #undef DO_ZPZW
1124 *** SVE Bitwise Shift - Unpredicated Group
1127 static bool do_shift_imm(DisasContext *s, arg_rri_esz *a, bool asr,
1128 void (*gvec_fn)(unsigned, uint32_t, uint32_t,
1129 int64_t, uint32_t, uint32_t))
1131 if (a->esz < 0) {
1132 /* Invalid tsz encoding -- see tszimm_esz. */
1133 return false;
1135 if (sve_access_check(s)) {
1136 unsigned vsz = vec_full_reg_size(s);
1137 /* Shift by element size is architecturally valid. For
1138 arithmetic right-shift, it's the same as by one less.
1139 Otherwise it is a zeroing operation. */
1140 if (a->imm >= 8 << a->esz) {
1141 if (asr) {
1142 a->imm = (8 << a->esz) - 1;
1143 } else {
1144 do_dupi_z(s, a->rd, 0);
1145 return true;
1148 gvec_fn(a->esz, vec_full_reg_offset(s, a->rd),
1149 vec_full_reg_offset(s, a->rn), a->imm, vsz, vsz);
1151 return true;
1154 TRANS_FEAT(ASR_zzi, aa64_sve, do_shift_imm, a, true, tcg_gen_gvec_sari)
1155 TRANS_FEAT(LSR_zzi, aa64_sve, do_shift_imm, a, false, tcg_gen_gvec_shri)
1156 TRANS_FEAT(LSL_zzi, aa64_sve, do_shift_imm, a, false, tcg_gen_gvec_shli)
1158 #define DO_ZZW(NAME, name) \
1159 static gen_helper_gvec_3 * const name##_zzw_fns[4] = { \
1160 gen_helper_sve_##name##_zzw_b, gen_helper_sve_##name##_zzw_h, \
1161 gen_helper_sve_##name##_zzw_s, NULL \
1162 }; \
1163 TRANS_FEAT(NAME, aa64_sve, gen_gvec_ool_arg_zzz, \
1164 name##_zzw_fns[a->esz], a, 0)
1166 DO_ZZW(ASR_zzw, asr)
1167 DO_ZZW(LSR_zzw, lsr)
1168 DO_ZZW(LSL_zzw, lsl)
1170 #undef DO_ZZW
1173 *** SVE Integer Multiply-Add Group
1176 static bool do_zpzzz_ool(DisasContext *s, arg_rprrr_esz *a,
1177 gen_helper_gvec_5 *fn)
1179 if (sve_access_check(s)) {
1180 unsigned vsz = vec_full_reg_size(s);
1181 tcg_gen_gvec_5_ool(vec_full_reg_offset(s, a->rd),
1182 vec_full_reg_offset(s, a->ra),
1183 vec_full_reg_offset(s, a->rn),
1184 vec_full_reg_offset(s, a->rm),
1185 pred_full_reg_offset(s, a->pg),
1186 vsz, vsz, 0, fn);
1188 return true;
1191 static gen_helper_gvec_5 * const mla_fns[4] = {
1192 gen_helper_sve_mla_b, gen_helper_sve_mla_h,
1193 gen_helper_sve_mla_s, gen_helper_sve_mla_d,
1195 TRANS_FEAT(MLA, aa64_sve, do_zpzzz_ool, a, mla_fns[a->esz])
1197 static gen_helper_gvec_5 * const mls_fns[4] = {
1198 gen_helper_sve_mls_b, gen_helper_sve_mls_h,
1199 gen_helper_sve_mls_s, gen_helper_sve_mls_d,
1201 TRANS_FEAT(MLS, aa64_sve, do_zpzzz_ool, a, mls_fns[a->esz])
1204 *** SVE Index Generation Group
1207 static bool do_index(DisasContext *s, int esz, int rd,
1208 TCGv_i64 start, TCGv_i64 incr)
1210 unsigned vsz;
1211 TCGv_i32 desc;
1212 TCGv_ptr t_zd;
1214 if (!sve_access_check(s)) {
1215 return true;
1218 vsz = vec_full_reg_size(s);
1219 desc = tcg_constant_i32(simd_desc(vsz, vsz, 0));
1220 t_zd = tcg_temp_new_ptr();
1222 tcg_gen_addi_ptr(t_zd, cpu_env, vec_full_reg_offset(s, rd));
1223 if (esz == 3) {
1224 gen_helper_sve_index_d(t_zd, start, incr, desc);
1225 } else {
1226 typedef void index_fn(TCGv_ptr, TCGv_i32, TCGv_i32, TCGv_i32);
1227 static index_fn * const fns[3] = {
1228 gen_helper_sve_index_b,
1229 gen_helper_sve_index_h,
1230 gen_helper_sve_index_s,
1232 TCGv_i32 s32 = tcg_temp_new_i32();
1233 TCGv_i32 i32 = tcg_temp_new_i32();
1235 tcg_gen_extrl_i64_i32(s32, start);
1236 tcg_gen_extrl_i64_i32(i32, incr);
1237 fns[esz](t_zd, s32, i32, desc);
1239 tcg_temp_free_i32(s32);
1240 tcg_temp_free_i32(i32);
1242 tcg_temp_free_ptr(t_zd);
1243 return true;
1246 TRANS_FEAT(INDEX_ii, aa64_sve, do_index, a->esz, a->rd,
1247 tcg_constant_i64(a->imm1), tcg_constant_i64(a->imm2))
1248 TRANS_FEAT(INDEX_ir, aa64_sve, do_index, a->esz, a->rd,
1249 tcg_constant_i64(a->imm), cpu_reg(s, a->rm))
1250 TRANS_FEAT(INDEX_ri, aa64_sve, do_index, a->esz, a->rd,
1251 cpu_reg(s, a->rn), tcg_constant_i64(a->imm))
1252 TRANS_FEAT(INDEX_rr, aa64_sve, do_index, a->esz, a->rd,
1253 cpu_reg(s, a->rn), cpu_reg(s, a->rm))
1256 *** SVE Stack Allocation Group
1259 static bool trans_ADDVL(DisasContext *s, arg_ADDVL *a)
1261 if (sve_access_check(s)) {
1262 TCGv_i64 rd = cpu_reg_sp(s, a->rd);
1263 TCGv_i64 rn = cpu_reg_sp(s, a->rn);
1264 tcg_gen_addi_i64(rd, rn, a->imm * vec_full_reg_size(s));
1266 return true;
1269 static bool trans_ADDPL(DisasContext *s, arg_ADDPL *a)
1271 if (sve_access_check(s)) {
1272 TCGv_i64 rd = cpu_reg_sp(s, a->rd);
1273 TCGv_i64 rn = cpu_reg_sp(s, a->rn);
1274 tcg_gen_addi_i64(rd, rn, a->imm * pred_full_reg_size(s));
1276 return true;
1279 static bool trans_RDVL(DisasContext *s, arg_RDVL *a)
1281 if (sve_access_check(s)) {
1282 TCGv_i64 reg = cpu_reg(s, a->rd);
1283 tcg_gen_movi_i64(reg, a->imm * vec_full_reg_size(s));
1285 return true;
1289 *** SVE Compute Vector Address Group
1292 static bool do_adr(DisasContext *s, arg_rrri *a, gen_helper_gvec_3 *fn)
1294 return gen_gvec_ool_zzz(s, fn, a->rd, a->rn, a->rm, a->imm);
1297 TRANS_FEAT(ADR_p32, aa64_sve, do_adr, a, gen_helper_sve_adr_p32)
1298 TRANS_FEAT(ADR_p64, aa64_sve, do_adr, a, gen_helper_sve_adr_p64)
1299 TRANS_FEAT(ADR_s32, aa64_sve, do_adr, a, gen_helper_sve_adr_s32)
1300 TRANS_FEAT(ADR_u32, aa64_sve, do_adr, a, gen_helper_sve_adr_u32)
1303 *** SVE Integer Misc - Unpredicated Group
1306 static gen_helper_gvec_2 * const fexpa_fns[4] = {
1307 NULL, gen_helper_sve_fexpa_h,
1308 gen_helper_sve_fexpa_s, gen_helper_sve_fexpa_d,
1310 TRANS_FEAT(FEXPA, aa64_sve, gen_gvec_ool_zz,
1311 fexpa_fns[a->esz], a->rd, a->rn, 0)
1313 static gen_helper_gvec_3 * const ftssel_fns[4] = {
1314 NULL, gen_helper_sve_ftssel_h,
1315 gen_helper_sve_ftssel_s, gen_helper_sve_ftssel_d,
1317 TRANS_FEAT(FTSSEL, aa64_sve, gen_gvec_ool_arg_zzz, ftssel_fns[a->esz], a, 0)
1320 *** SVE Predicate Logical Operations Group
1323 static bool do_pppp_flags(DisasContext *s, arg_rprr_s *a,
1324 const GVecGen4 *gvec_op)
1326 if (!sve_access_check(s)) {
1327 return true;
1330 unsigned psz = pred_gvec_reg_size(s);
1331 int dofs = pred_full_reg_offset(s, a->rd);
1332 int nofs = pred_full_reg_offset(s, a->rn);
1333 int mofs = pred_full_reg_offset(s, a->rm);
1334 int gofs = pred_full_reg_offset(s, a->pg);
1336 if (!a->s) {
1337 tcg_gen_gvec_4(dofs, nofs, mofs, gofs, psz, psz, gvec_op);
1338 return true;
1341 if (psz == 8) {
1342 /* Do the operation and the flags generation in temps. */
1343 TCGv_i64 pd = tcg_temp_new_i64();
1344 TCGv_i64 pn = tcg_temp_new_i64();
1345 TCGv_i64 pm = tcg_temp_new_i64();
1346 TCGv_i64 pg = tcg_temp_new_i64();
1348 tcg_gen_ld_i64(pn, cpu_env, nofs);
1349 tcg_gen_ld_i64(pm, cpu_env, mofs);
1350 tcg_gen_ld_i64(pg, cpu_env, gofs);
1352 gvec_op->fni8(pd, pn, pm, pg);
1353 tcg_gen_st_i64(pd, cpu_env, dofs);
1355 do_predtest1(pd, pg);
1357 tcg_temp_free_i64(pd);
1358 tcg_temp_free_i64(pn);
1359 tcg_temp_free_i64(pm);
1360 tcg_temp_free_i64(pg);
1361 } else {
1362 /* The operation and flags generation is large. The computation
1363 * of the flags depends on the original contents of the guarding
1364 * predicate. If the destination overwrites the guarding predicate,
1365 * then the easiest way to get this right is to save a copy.
1367 int tofs = gofs;
1368 if (a->rd == a->pg) {
1369 tofs = offsetof(CPUARMState, vfp.preg_tmp);
1370 tcg_gen_gvec_mov(0, tofs, gofs, psz, psz);
1373 tcg_gen_gvec_4(dofs, nofs, mofs, gofs, psz, psz, gvec_op);
1374 do_predtest(s, dofs, tofs, psz / 8);
1376 return true;
1379 static void gen_and_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg)
1381 tcg_gen_and_i64(pd, pn, pm);
1382 tcg_gen_and_i64(pd, pd, pg);
1385 static void gen_and_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn,
1386 TCGv_vec pm, TCGv_vec pg)
1388 tcg_gen_and_vec(vece, pd, pn, pm);
1389 tcg_gen_and_vec(vece, pd, pd, pg);
1392 static bool trans_AND_pppp(DisasContext *s, arg_rprr_s *a)
1394 static const GVecGen4 op = {
1395 .fni8 = gen_and_pg_i64,
1396 .fniv = gen_and_pg_vec,
1397 .fno = gen_helper_sve_and_pppp,
1398 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
1401 if (!a->s) {
1402 if (a->rn == a->rm) {
1403 if (a->pg == a->rn) {
1404 return do_mov_p(s, a->rd, a->rn);
1406 return gen_gvec_fn_ppp(s, tcg_gen_gvec_and, a->rd, a->rn, a->pg);
1407 } else if (a->pg == a->rn || a->pg == a->rm) {
1408 return gen_gvec_fn_ppp(s, tcg_gen_gvec_and, a->rd, a->rn, a->rm);
1411 return do_pppp_flags(s, a, &op);
1414 static void gen_bic_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg)
1416 tcg_gen_andc_i64(pd, pn, pm);
1417 tcg_gen_and_i64(pd, pd, pg);
1420 static void gen_bic_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn,
1421 TCGv_vec pm, TCGv_vec pg)
1423 tcg_gen_andc_vec(vece, pd, pn, pm);
1424 tcg_gen_and_vec(vece, pd, pd, pg);
1427 static bool trans_BIC_pppp(DisasContext *s, arg_rprr_s *a)
1429 static const GVecGen4 op = {
1430 .fni8 = gen_bic_pg_i64,
1431 .fniv = gen_bic_pg_vec,
1432 .fno = gen_helper_sve_bic_pppp,
1433 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
1436 if (!a->s && a->pg == a->rn) {
1437 return gen_gvec_fn_ppp(s, tcg_gen_gvec_andc, a->rd, a->rn, a->rm);
1439 return do_pppp_flags(s, a, &op);
1442 static void gen_eor_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg)
1444 tcg_gen_xor_i64(pd, pn, pm);
1445 tcg_gen_and_i64(pd, pd, pg);
1448 static void gen_eor_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn,
1449 TCGv_vec pm, TCGv_vec pg)
1451 tcg_gen_xor_vec(vece, pd, pn, pm);
1452 tcg_gen_and_vec(vece, pd, pd, pg);
1455 static bool trans_EOR_pppp(DisasContext *s, arg_rprr_s *a)
1457 static const GVecGen4 op = {
1458 .fni8 = gen_eor_pg_i64,
1459 .fniv = gen_eor_pg_vec,
1460 .fno = gen_helper_sve_eor_pppp,
1461 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
1464 /* Alias NOT (predicate) is EOR Pd.B, Pg/Z, Pn.B, Pg.B */
1465 if (!a->s && a->pg == a->rm) {
1466 return gen_gvec_fn_ppp(s, tcg_gen_gvec_andc, a->rd, a->pg, a->rn);
1468 return do_pppp_flags(s, a, &op);
1471 static bool trans_SEL_pppp(DisasContext *s, arg_rprr_s *a)
1473 if (a->s) {
1474 return false;
1476 if (sve_access_check(s)) {
1477 unsigned psz = pred_gvec_reg_size(s);
1478 tcg_gen_gvec_bitsel(MO_8, pred_full_reg_offset(s, a->rd),
1479 pred_full_reg_offset(s, a->pg),
1480 pred_full_reg_offset(s, a->rn),
1481 pred_full_reg_offset(s, a->rm), psz, psz);
1483 return true;
1486 static void gen_orr_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg)
1488 tcg_gen_or_i64(pd, pn, pm);
1489 tcg_gen_and_i64(pd, pd, pg);
1492 static void gen_orr_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn,
1493 TCGv_vec pm, TCGv_vec pg)
1495 tcg_gen_or_vec(vece, pd, pn, pm);
1496 tcg_gen_and_vec(vece, pd, pd, pg);
1499 static bool trans_ORR_pppp(DisasContext *s, arg_rprr_s *a)
1501 static const GVecGen4 op = {
1502 .fni8 = gen_orr_pg_i64,
1503 .fniv = gen_orr_pg_vec,
1504 .fno = gen_helper_sve_orr_pppp,
1505 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
1508 if (!a->s && a->pg == a->rn && a->rn == a->rm) {
1509 return do_mov_p(s, a->rd, a->rn);
1511 return do_pppp_flags(s, a, &op);
1514 static void gen_orn_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg)
1516 tcg_gen_orc_i64(pd, pn, pm);
1517 tcg_gen_and_i64(pd, pd, pg);
1520 static void gen_orn_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn,
1521 TCGv_vec pm, TCGv_vec pg)
1523 tcg_gen_orc_vec(vece, pd, pn, pm);
1524 tcg_gen_and_vec(vece, pd, pd, pg);
1527 static bool trans_ORN_pppp(DisasContext *s, arg_rprr_s *a)
1529 static const GVecGen4 op = {
1530 .fni8 = gen_orn_pg_i64,
1531 .fniv = gen_orn_pg_vec,
1532 .fno = gen_helper_sve_orn_pppp,
1533 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
1535 return do_pppp_flags(s, a, &op);
1538 static void gen_nor_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg)
1540 tcg_gen_or_i64(pd, pn, pm);
1541 tcg_gen_andc_i64(pd, pg, pd);
1544 static void gen_nor_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn,
1545 TCGv_vec pm, TCGv_vec pg)
1547 tcg_gen_or_vec(vece, pd, pn, pm);
1548 tcg_gen_andc_vec(vece, pd, pg, pd);
1551 static bool trans_NOR_pppp(DisasContext *s, arg_rprr_s *a)
1553 static const GVecGen4 op = {
1554 .fni8 = gen_nor_pg_i64,
1555 .fniv = gen_nor_pg_vec,
1556 .fno = gen_helper_sve_nor_pppp,
1557 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
1559 return do_pppp_flags(s, a, &op);
1562 static void gen_nand_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg)
1564 tcg_gen_and_i64(pd, pn, pm);
1565 tcg_gen_andc_i64(pd, pg, pd);
1568 static void gen_nand_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn,
1569 TCGv_vec pm, TCGv_vec pg)
1571 tcg_gen_and_vec(vece, pd, pn, pm);
1572 tcg_gen_andc_vec(vece, pd, pg, pd);
1575 static bool trans_NAND_pppp(DisasContext *s, arg_rprr_s *a)
1577 static const GVecGen4 op = {
1578 .fni8 = gen_nand_pg_i64,
1579 .fniv = gen_nand_pg_vec,
1580 .fno = gen_helper_sve_nand_pppp,
1581 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
1583 return do_pppp_flags(s, a, &op);
1587 *** SVE Predicate Misc Group
1590 static bool trans_PTEST(DisasContext *s, arg_PTEST *a)
1592 if (sve_access_check(s)) {
1593 int nofs = pred_full_reg_offset(s, a->rn);
1594 int gofs = pred_full_reg_offset(s, a->pg);
1595 int words = DIV_ROUND_UP(pred_full_reg_size(s), 8);
1597 if (words == 1) {
1598 TCGv_i64 pn = tcg_temp_new_i64();
1599 TCGv_i64 pg = tcg_temp_new_i64();
1601 tcg_gen_ld_i64(pn, cpu_env, nofs);
1602 tcg_gen_ld_i64(pg, cpu_env, gofs);
1603 do_predtest1(pn, pg);
1605 tcg_temp_free_i64(pn);
1606 tcg_temp_free_i64(pg);
1607 } else {
1608 do_predtest(s, nofs, gofs, words);
1611 return true;
1614 /* See the ARM pseudocode DecodePredCount. */
1615 static unsigned decode_pred_count(unsigned fullsz, int pattern, int esz)
1617 unsigned elements = fullsz >> esz;
1618 unsigned bound;
1620 switch (pattern) {
1621 case 0x0: /* POW2 */
1622 return pow2floor(elements);
1623 case 0x1: /* VL1 */
1624 case 0x2: /* VL2 */
1625 case 0x3: /* VL3 */
1626 case 0x4: /* VL4 */
1627 case 0x5: /* VL5 */
1628 case 0x6: /* VL6 */
1629 case 0x7: /* VL7 */
1630 case 0x8: /* VL8 */
1631 bound = pattern;
1632 break;
1633 case 0x9: /* VL16 */
1634 case 0xa: /* VL32 */
1635 case 0xb: /* VL64 */
1636 case 0xc: /* VL128 */
1637 case 0xd: /* VL256 */
1638 bound = 16 << (pattern - 9);
1639 break;
1640 case 0x1d: /* MUL4 */
1641 return elements - elements % 4;
1642 case 0x1e: /* MUL3 */
1643 return elements - elements % 3;
1644 case 0x1f: /* ALL */
1645 return elements;
1646 default: /* #uimm5 */
1647 return 0;
1649 return elements >= bound ? bound : 0;
1652 /* This handles all of the predicate initialization instructions,
1653 * PTRUE, PFALSE, SETFFR. For PFALSE, we will have set PAT == 32
1654 * so that decode_pred_count returns 0. For SETFFR, we will have
1655 * set RD == 16 == FFR.
1657 static bool do_predset(DisasContext *s, int esz, int rd, int pat, bool setflag)
1659 if (!sve_access_check(s)) {
1660 return true;
1663 unsigned fullsz = vec_full_reg_size(s);
1664 unsigned ofs = pred_full_reg_offset(s, rd);
1665 unsigned numelem, setsz, i;
1666 uint64_t word, lastword;
1667 TCGv_i64 t;
1669 numelem = decode_pred_count(fullsz, pat, esz);
1671 /* Determine what we must store into each bit, and how many. */
1672 if (numelem == 0) {
1673 lastword = word = 0;
1674 setsz = fullsz;
1675 } else {
1676 setsz = numelem << esz;
1677 lastword = word = pred_esz_masks[esz];
1678 if (setsz % 64) {
1679 lastword &= MAKE_64BIT_MASK(0, setsz % 64);
1683 t = tcg_temp_new_i64();
1684 if (fullsz <= 64) {
1685 tcg_gen_movi_i64(t, lastword);
1686 tcg_gen_st_i64(t, cpu_env, ofs);
1687 goto done;
1690 if (word == lastword) {
1691 unsigned maxsz = size_for_gvec(fullsz / 8);
1692 unsigned oprsz = size_for_gvec(setsz / 8);
1694 if (oprsz * 8 == setsz) {
1695 tcg_gen_gvec_dup_imm(MO_64, ofs, oprsz, maxsz, word);
1696 goto done;
1700 setsz /= 8;
1701 fullsz /= 8;
1703 tcg_gen_movi_i64(t, word);
1704 for (i = 0; i < QEMU_ALIGN_DOWN(setsz, 8); i += 8) {
1705 tcg_gen_st_i64(t, cpu_env, ofs + i);
1707 if (lastword != word) {
1708 tcg_gen_movi_i64(t, lastword);
1709 tcg_gen_st_i64(t, cpu_env, ofs + i);
1710 i += 8;
1712 if (i < fullsz) {
1713 tcg_gen_movi_i64(t, 0);
1714 for (; i < fullsz; i += 8) {
1715 tcg_gen_st_i64(t, cpu_env, ofs + i);
1719 done:
1720 tcg_temp_free_i64(t);
1722 /* PTRUES */
1723 if (setflag) {
1724 tcg_gen_movi_i32(cpu_NF, -(word != 0));
1725 tcg_gen_movi_i32(cpu_CF, word == 0);
1726 tcg_gen_movi_i32(cpu_VF, 0);
1727 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
1729 return true;
1732 TRANS_FEAT(PTRUE, aa64_sve, do_predset, a->esz, a->rd, a->pat, a->s)
1734 /* Note pat == 31 is #all, to set all elements. */
1735 TRANS_FEAT(SETFFR, aa64_sve, do_predset, 0, FFR_PRED_NUM, 31, false)
1737 /* Note pat == 32 is #unimp, to set no elements. */
1738 TRANS_FEAT(PFALSE, aa64_sve, do_predset, 0, a->rd, 32, false)
1740 static bool trans_RDFFR_p(DisasContext *s, arg_RDFFR_p *a)
1742 /* The path through do_pppp_flags is complicated enough to want to avoid
1743 * duplication. Frob the arguments into the form of a predicated AND.
1745 arg_rprr_s alt_a = {
1746 .rd = a->rd, .pg = a->pg, .s = a->s,
1747 .rn = FFR_PRED_NUM, .rm = FFR_PRED_NUM,
1749 return trans_AND_pppp(s, &alt_a);
1752 TRANS_FEAT(RDFFR, aa64_sve, do_mov_p, a->rd, FFR_PRED_NUM)
1753 TRANS_FEAT(WRFFR, aa64_sve, do_mov_p, FFR_PRED_NUM, a->rn)
1755 static bool do_pfirst_pnext(DisasContext *s, arg_rr_esz *a,
1756 void (*gen_fn)(TCGv_i32, TCGv_ptr,
1757 TCGv_ptr, TCGv_i32))
1759 if (!sve_access_check(s)) {
1760 return true;
1763 TCGv_ptr t_pd = tcg_temp_new_ptr();
1764 TCGv_ptr t_pg = tcg_temp_new_ptr();
1765 TCGv_i32 t;
1766 unsigned desc = 0;
1768 desc = FIELD_DP32(desc, PREDDESC, OPRSZ, pred_full_reg_size(s));
1769 desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz);
1771 tcg_gen_addi_ptr(t_pd, cpu_env, pred_full_reg_offset(s, a->rd));
1772 tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, a->rn));
1773 t = tcg_temp_new_i32();
1775 gen_fn(t, t_pd, t_pg, tcg_constant_i32(desc));
1776 tcg_temp_free_ptr(t_pd);
1777 tcg_temp_free_ptr(t_pg);
1779 do_pred_flags(t);
1780 tcg_temp_free_i32(t);
1781 return true;
1784 TRANS_FEAT(PFIRST, aa64_sve, do_pfirst_pnext, a, gen_helper_sve_pfirst)
1785 TRANS_FEAT(PNEXT, aa64_sve, do_pfirst_pnext, a, gen_helper_sve_pnext)
1788 *** SVE Element Count Group
1791 /* Perform an inline saturating addition of a 32-bit value within
1792 * a 64-bit register. The second operand is known to be positive,
1793 * which halves the comparisions we must perform to bound the result.
1795 static void do_sat_addsub_32(TCGv_i64 reg, TCGv_i64 val, bool u, bool d)
1797 int64_t ibound;
1799 /* Use normal 64-bit arithmetic to detect 32-bit overflow. */
1800 if (u) {
1801 tcg_gen_ext32u_i64(reg, reg);
1802 } else {
1803 tcg_gen_ext32s_i64(reg, reg);
1805 if (d) {
1806 tcg_gen_sub_i64(reg, reg, val);
1807 ibound = (u ? 0 : INT32_MIN);
1808 tcg_gen_smax_i64(reg, reg, tcg_constant_i64(ibound));
1809 } else {
1810 tcg_gen_add_i64(reg, reg, val);
1811 ibound = (u ? UINT32_MAX : INT32_MAX);
1812 tcg_gen_smin_i64(reg, reg, tcg_constant_i64(ibound));
1816 /* Similarly with 64-bit values. */
1817 static void do_sat_addsub_64(TCGv_i64 reg, TCGv_i64 val, bool u, bool d)
1819 TCGv_i64 t0 = tcg_temp_new_i64();
1820 TCGv_i64 t2;
1822 if (u) {
1823 if (d) {
1824 tcg_gen_sub_i64(t0, reg, val);
1825 t2 = tcg_constant_i64(0);
1826 tcg_gen_movcond_i64(TCG_COND_LTU, reg, reg, val, t2, t0);
1827 } else {
1828 tcg_gen_add_i64(t0, reg, val);
1829 t2 = tcg_constant_i64(-1);
1830 tcg_gen_movcond_i64(TCG_COND_LTU, reg, t0, reg, t2, t0);
1832 } else {
1833 TCGv_i64 t1 = tcg_temp_new_i64();
1834 if (d) {
1835 /* Detect signed overflow for subtraction. */
1836 tcg_gen_xor_i64(t0, reg, val);
1837 tcg_gen_sub_i64(t1, reg, val);
1838 tcg_gen_xor_i64(reg, reg, t1);
1839 tcg_gen_and_i64(t0, t0, reg);
1841 /* Bound the result. */
1842 tcg_gen_movi_i64(reg, INT64_MIN);
1843 t2 = tcg_constant_i64(0);
1844 tcg_gen_movcond_i64(TCG_COND_LT, reg, t0, t2, reg, t1);
1845 } else {
1846 /* Detect signed overflow for addition. */
1847 tcg_gen_xor_i64(t0, reg, val);
1848 tcg_gen_add_i64(reg, reg, val);
1849 tcg_gen_xor_i64(t1, reg, val);
1850 tcg_gen_andc_i64(t0, t1, t0);
1852 /* Bound the result. */
1853 tcg_gen_movi_i64(t1, INT64_MAX);
1854 t2 = tcg_constant_i64(0);
1855 tcg_gen_movcond_i64(TCG_COND_LT, reg, t0, t2, t1, reg);
1857 tcg_temp_free_i64(t1);
1859 tcg_temp_free_i64(t0);
1862 /* Similarly with a vector and a scalar operand. */
1863 static void do_sat_addsub_vec(DisasContext *s, int esz, int rd, int rn,
1864 TCGv_i64 val, bool u, bool d)
1866 unsigned vsz = vec_full_reg_size(s);
1867 TCGv_ptr dptr, nptr;
1868 TCGv_i32 t32, desc;
1869 TCGv_i64 t64;
1871 dptr = tcg_temp_new_ptr();
1872 nptr = tcg_temp_new_ptr();
1873 tcg_gen_addi_ptr(dptr, cpu_env, vec_full_reg_offset(s, rd));
1874 tcg_gen_addi_ptr(nptr, cpu_env, vec_full_reg_offset(s, rn));
1875 desc = tcg_constant_i32(simd_desc(vsz, vsz, 0));
1877 switch (esz) {
1878 case MO_8:
1879 t32 = tcg_temp_new_i32();
1880 tcg_gen_extrl_i64_i32(t32, val);
1881 if (d) {
1882 tcg_gen_neg_i32(t32, t32);
1884 if (u) {
1885 gen_helper_sve_uqaddi_b(dptr, nptr, t32, desc);
1886 } else {
1887 gen_helper_sve_sqaddi_b(dptr, nptr, t32, desc);
1889 tcg_temp_free_i32(t32);
1890 break;
1892 case MO_16:
1893 t32 = tcg_temp_new_i32();
1894 tcg_gen_extrl_i64_i32(t32, val);
1895 if (d) {
1896 tcg_gen_neg_i32(t32, t32);
1898 if (u) {
1899 gen_helper_sve_uqaddi_h(dptr, nptr, t32, desc);
1900 } else {
1901 gen_helper_sve_sqaddi_h(dptr, nptr, t32, desc);
1903 tcg_temp_free_i32(t32);
1904 break;
1906 case MO_32:
1907 t64 = tcg_temp_new_i64();
1908 if (d) {
1909 tcg_gen_neg_i64(t64, val);
1910 } else {
1911 tcg_gen_mov_i64(t64, val);
1913 if (u) {
1914 gen_helper_sve_uqaddi_s(dptr, nptr, t64, desc);
1915 } else {
1916 gen_helper_sve_sqaddi_s(dptr, nptr, t64, desc);
1918 tcg_temp_free_i64(t64);
1919 break;
1921 case MO_64:
1922 if (u) {
1923 if (d) {
1924 gen_helper_sve_uqsubi_d(dptr, nptr, val, desc);
1925 } else {
1926 gen_helper_sve_uqaddi_d(dptr, nptr, val, desc);
1928 } else if (d) {
1929 t64 = tcg_temp_new_i64();
1930 tcg_gen_neg_i64(t64, val);
1931 gen_helper_sve_sqaddi_d(dptr, nptr, t64, desc);
1932 tcg_temp_free_i64(t64);
1933 } else {
1934 gen_helper_sve_sqaddi_d(dptr, nptr, val, desc);
1936 break;
1938 default:
1939 g_assert_not_reached();
1942 tcg_temp_free_ptr(dptr);
1943 tcg_temp_free_ptr(nptr);
1946 static bool trans_CNT_r(DisasContext *s, arg_CNT_r *a)
1948 if (sve_access_check(s)) {
1949 unsigned fullsz = vec_full_reg_size(s);
1950 unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz);
1951 tcg_gen_movi_i64(cpu_reg(s, a->rd), numelem * a->imm);
1953 return true;
1956 static bool trans_INCDEC_r(DisasContext *s, arg_incdec_cnt *a)
1958 if (sve_access_check(s)) {
1959 unsigned fullsz = vec_full_reg_size(s);
1960 unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz);
1961 int inc = numelem * a->imm * (a->d ? -1 : 1);
1962 TCGv_i64 reg = cpu_reg(s, a->rd);
1964 tcg_gen_addi_i64(reg, reg, inc);
1966 return true;
1969 static bool trans_SINCDEC_r_32(DisasContext *s, arg_incdec_cnt *a)
1971 if (!sve_access_check(s)) {
1972 return true;
1975 unsigned fullsz = vec_full_reg_size(s);
1976 unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz);
1977 int inc = numelem * a->imm;
1978 TCGv_i64 reg = cpu_reg(s, a->rd);
1980 /* Use normal 64-bit arithmetic to detect 32-bit overflow. */
1981 if (inc == 0) {
1982 if (a->u) {
1983 tcg_gen_ext32u_i64(reg, reg);
1984 } else {
1985 tcg_gen_ext32s_i64(reg, reg);
1987 } else {
1988 do_sat_addsub_32(reg, tcg_constant_i64(inc), a->u, a->d);
1990 return true;
1993 static bool trans_SINCDEC_r_64(DisasContext *s, arg_incdec_cnt *a)
1995 if (!sve_access_check(s)) {
1996 return true;
1999 unsigned fullsz = vec_full_reg_size(s);
2000 unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz);
2001 int inc = numelem * a->imm;
2002 TCGv_i64 reg = cpu_reg(s, a->rd);
2004 if (inc != 0) {
2005 do_sat_addsub_64(reg, tcg_constant_i64(inc), a->u, a->d);
2007 return true;
2010 static bool trans_INCDEC_v(DisasContext *s, arg_incdec2_cnt *a)
2012 if (a->esz == 0) {
2013 return false;
2016 unsigned fullsz = vec_full_reg_size(s);
2017 unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz);
2018 int inc = numelem * a->imm;
2020 if (inc != 0) {
2021 if (sve_access_check(s)) {
2022 tcg_gen_gvec_adds(a->esz, vec_full_reg_offset(s, a->rd),
2023 vec_full_reg_offset(s, a->rn),
2024 tcg_constant_i64(a->d ? -inc : inc),
2025 fullsz, fullsz);
2027 } else {
2028 do_mov_z(s, a->rd, a->rn);
2030 return true;
2033 static bool trans_SINCDEC_v(DisasContext *s, arg_incdec2_cnt *a)
2035 if (a->esz == 0) {
2036 return false;
2039 unsigned fullsz = vec_full_reg_size(s);
2040 unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz);
2041 int inc = numelem * a->imm;
2043 if (inc != 0) {
2044 if (sve_access_check(s)) {
2045 do_sat_addsub_vec(s, a->esz, a->rd, a->rn,
2046 tcg_constant_i64(inc), a->u, a->d);
2048 } else {
2049 do_mov_z(s, a->rd, a->rn);
2051 return true;
2055 *** SVE Bitwise Immediate Group
2058 static bool do_zz_dbm(DisasContext *s, arg_rr_dbm *a, GVecGen2iFn *gvec_fn)
2060 uint64_t imm;
2061 if (!logic_imm_decode_wmask(&imm, extract32(a->dbm, 12, 1),
2062 extract32(a->dbm, 0, 6),
2063 extract32(a->dbm, 6, 6))) {
2064 return false;
2066 return gen_gvec_fn_zzi(s, gvec_fn, MO_64, a->rd, a->rn, imm);
2069 TRANS_FEAT(AND_zzi, aa64_sve, do_zz_dbm, a, tcg_gen_gvec_andi)
2070 TRANS_FEAT(ORR_zzi, aa64_sve, do_zz_dbm, a, tcg_gen_gvec_ori)
2071 TRANS_FEAT(EOR_zzi, aa64_sve, do_zz_dbm, a, tcg_gen_gvec_xori)
2073 static bool trans_DUPM(DisasContext *s, arg_DUPM *a)
2075 uint64_t imm;
2076 if (!logic_imm_decode_wmask(&imm, extract32(a->dbm, 12, 1),
2077 extract32(a->dbm, 0, 6),
2078 extract32(a->dbm, 6, 6))) {
2079 return false;
2081 if (sve_access_check(s)) {
2082 do_dupi_z(s, a->rd, imm);
2084 return true;
2088 *** SVE Integer Wide Immediate - Predicated Group
2091 /* Implement all merging copies. This is used for CPY (immediate),
2092 * FCPY, CPY (scalar), CPY (SIMD&FP scalar).
2094 static void do_cpy_m(DisasContext *s, int esz, int rd, int rn, int pg,
2095 TCGv_i64 val)
2097 typedef void gen_cpy(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv_i32);
2098 static gen_cpy * const fns[4] = {
2099 gen_helper_sve_cpy_m_b, gen_helper_sve_cpy_m_h,
2100 gen_helper_sve_cpy_m_s, gen_helper_sve_cpy_m_d,
2102 unsigned vsz = vec_full_reg_size(s);
2103 TCGv_i32 desc = tcg_constant_i32(simd_desc(vsz, vsz, 0));
2104 TCGv_ptr t_zd = tcg_temp_new_ptr();
2105 TCGv_ptr t_zn = tcg_temp_new_ptr();
2106 TCGv_ptr t_pg = tcg_temp_new_ptr();
2108 tcg_gen_addi_ptr(t_zd, cpu_env, vec_full_reg_offset(s, rd));
2109 tcg_gen_addi_ptr(t_zn, cpu_env, vec_full_reg_offset(s, rn));
2110 tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg));
2112 fns[esz](t_zd, t_zn, t_pg, val, desc);
2114 tcg_temp_free_ptr(t_zd);
2115 tcg_temp_free_ptr(t_zn);
2116 tcg_temp_free_ptr(t_pg);
2119 static bool trans_FCPY(DisasContext *s, arg_FCPY *a)
2121 if (a->esz == 0) {
2122 return false;
2124 if (sve_access_check(s)) {
2125 /* Decode the VFP immediate. */
2126 uint64_t imm = vfp_expand_imm(a->esz, a->imm);
2127 do_cpy_m(s, a->esz, a->rd, a->rn, a->pg, tcg_constant_i64(imm));
2129 return true;
2132 static bool trans_CPY_m_i(DisasContext *s, arg_rpri_esz *a)
2134 if (sve_access_check(s)) {
2135 do_cpy_m(s, a->esz, a->rd, a->rn, a->pg, tcg_constant_i64(a->imm));
2137 return true;
2140 static bool trans_CPY_z_i(DisasContext *s, arg_CPY_z_i *a)
2142 static gen_helper_gvec_2i * const fns[4] = {
2143 gen_helper_sve_cpy_z_b, gen_helper_sve_cpy_z_h,
2144 gen_helper_sve_cpy_z_s, gen_helper_sve_cpy_z_d,
2147 if (sve_access_check(s)) {
2148 unsigned vsz = vec_full_reg_size(s);
2149 tcg_gen_gvec_2i_ool(vec_full_reg_offset(s, a->rd),
2150 pred_full_reg_offset(s, a->pg),
2151 tcg_constant_i64(a->imm),
2152 vsz, vsz, 0, fns[a->esz]);
2154 return true;
2158 *** SVE Permute Extract Group
2161 static bool do_EXT(DisasContext *s, int rd, int rn, int rm, int imm)
2163 if (!sve_access_check(s)) {
2164 return true;
2167 unsigned vsz = vec_full_reg_size(s);
2168 unsigned n_ofs = imm >= vsz ? 0 : imm;
2169 unsigned n_siz = vsz - n_ofs;
2170 unsigned d = vec_full_reg_offset(s, rd);
2171 unsigned n = vec_full_reg_offset(s, rn);
2172 unsigned m = vec_full_reg_offset(s, rm);
2174 /* Use host vector move insns if we have appropriate sizes
2175 * and no unfortunate overlap.
2177 if (m != d
2178 && n_ofs == size_for_gvec(n_ofs)
2179 && n_siz == size_for_gvec(n_siz)
2180 && (d != n || n_siz <= n_ofs)) {
2181 tcg_gen_gvec_mov(0, d, n + n_ofs, n_siz, n_siz);
2182 if (n_ofs != 0) {
2183 tcg_gen_gvec_mov(0, d + n_siz, m, n_ofs, n_ofs);
2185 } else {
2186 tcg_gen_gvec_3_ool(d, n, m, vsz, vsz, n_ofs, gen_helper_sve_ext);
2188 return true;
2191 TRANS_FEAT(EXT, aa64_sve, do_EXT, a->rd, a->rn, a->rm, a->imm)
2192 TRANS_FEAT(EXT_sve2, aa64_sve2, do_EXT, a->rd, a->rn, (a->rn + 1) % 32, a->imm)
2195 *** SVE Permute - Unpredicated Group
2198 static bool trans_DUP_s(DisasContext *s, arg_DUP_s *a)
2200 if (sve_access_check(s)) {
2201 unsigned vsz = vec_full_reg_size(s);
2202 tcg_gen_gvec_dup_i64(a->esz, vec_full_reg_offset(s, a->rd),
2203 vsz, vsz, cpu_reg_sp(s, a->rn));
2205 return true;
2208 static bool trans_DUP_x(DisasContext *s, arg_DUP_x *a)
2210 if ((a->imm & 0x1f) == 0) {
2211 return false;
2213 if (sve_access_check(s)) {
2214 unsigned vsz = vec_full_reg_size(s);
2215 unsigned dofs = vec_full_reg_offset(s, a->rd);
2216 unsigned esz, index;
2218 esz = ctz32(a->imm);
2219 index = a->imm >> (esz + 1);
2221 if ((index << esz) < vsz) {
2222 unsigned nofs = vec_reg_offset(s, a->rn, index, esz);
2223 tcg_gen_gvec_dup_mem(esz, dofs, nofs, vsz, vsz);
2224 } else {
2226 * While dup_mem handles 128-bit elements, dup_imm does not.
2227 * Thankfully element size doesn't matter for splatting zero.
2229 tcg_gen_gvec_dup_imm(MO_64, dofs, vsz, vsz, 0);
2232 return true;
2235 static void do_insr_i64(DisasContext *s, arg_rrr_esz *a, TCGv_i64 val)
2237 typedef void gen_insr(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv_i32);
2238 static gen_insr * const fns[4] = {
2239 gen_helper_sve_insr_b, gen_helper_sve_insr_h,
2240 gen_helper_sve_insr_s, gen_helper_sve_insr_d,
2242 unsigned vsz = vec_full_reg_size(s);
2243 TCGv_i32 desc = tcg_constant_i32(simd_desc(vsz, vsz, 0));
2244 TCGv_ptr t_zd = tcg_temp_new_ptr();
2245 TCGv_ptr t_zn = tcg_temp_new_ptr();
2247 tcg_gen_addi_ptr(t_zd, cpu_env, vec_full_reg_offset(s, a->rd));
2248 tcg_gen_addi_ptr(t_zn, cpu_env, vec_full_reg_offset(s, a->rn));
2250 fns[a->esz](t_zd, t_zn, val, desc);
2252 tcg_temp_free_ptr(t_zd);
2253 tcg_temp_free_ptr(t_zn);
2256 static bool trans_INSR_f(DisasContext *s, arg_rrr_esz *a)
2258 if (sve_access_check(s)) {
2259 TCGv_i64 t = tcg_temp_new_i64();
2260 tcg_gen_ld_i64(t, cpu_env, vec_reg_offset(s, a->rm, 0, MO_64));
2261 do_insr_i64(s, a, t);
2262 tcg_temp_free_i64(t);
2264 return true;
2267 static bool trans_INSR_r(DisasContext *s, arg_rrr_esz *a)
2269 if (sve_access_check(s)) {
2270 do_insr_i64(s, a, cpu_reg(s, a->rm));
2272 return true;
2275 static gen_helper_gvec_2 * const rev_fns[4] = {
2276 gen_helper_sve_rev_b, gen_helper_sve_rev_h,
2277 gen_helper_sve_rev_s, gen_helper_sve_rev_d
2279 TRANS_FEAT(REV_v, aa64_sve, gen_gvec_ool_zz, rev_fns[a->esz], a->rd, a->rn, 0)
2281 static gen_helper_gvec_3 * const sve_tbl_fns[4] = {
2282 gen_helper_sve_tbl_b, gen_helper_sve_tbl_h,
2283 gen_helper_sve_tbl_s, gen_helper_sve_tbl_d
2285 TRANS_FEAT(TBL, aa64_sve, gen_gvec_ool_arg_zzz, sve_tbl_fns[a->esz], a, 0)
2287 static gen_helper_gvec_4 * const sve2_tbl_fns[4] = {
2288 gen_helper_sve2_tbl_b, gen_helper_sve2_tbl_h,
2289 gen_helper_sve2_tbl_s, gen_helper_sve2_tbl_d
2291 TRANS_FEAT(TBL_sve2, aa64_sve2, gen_gvec_ool_zzzz, sve2_tbl_fns[a->esz],
2292 a->rd, a->rn, (a->rn + 1) % 32, a->rm, 0)
2294 static gen_helper_gvec_3 * const tbx_fns[4] = {
2295 gen_helper_sve2_tbx_b, gen_helper_sve2_tbx_h,
2296 gen_helper_sve2_tbx_s, gen_helper_sve2_tbx_d
2298 TRANS_FEAT(TBX, aa64_sve2, gen_gvec_ool_arg_zzz, tbx_fns[a->esz], a, 0)
2300 static bool trans_UNPK(DisasContext *s, arg_UNPK *a)
2302 static gen_helper_gvec_2 * const fns[4][2] = {
2303 { NULL, NULL },
2304 { gen_helper_sve_sunpk_h, gen_helper_sve_uunpk_h },
2305 { gen_helper_sve_sunpk_s, gen_helper_sve_uunpk_s },
2306 { gen_helper_sve_sunpk_d, gen_helper_sve_uunpk_d },
2309 if (a->esz == 0) {
2310 return false;
2312 if (sve_access_check(s)) {
2313 unsigned vsz = vec_full_reg_size(s);
2314 tcg_gen_gvec_2_ool(vec_full_reg_offset(s, a->rd),
2315 vec_full_reg_offset(s, a->rn)
2316 + (a->h ? vsz / 2 : 0),
2317 vsz, vsz, 0, fns[a->esz][a->u]);
2319 return true;
2323 *** SVE Permute - Predicates Group
2326 static bool do_perm_pred3(DisasContext *s, arg_rrr_esz *a, bool high_odd,
2327 gen_helper_gvec_3 *fn)
2329 if (!sve_access_check(s)) {
2330 return true;
2333 unsigned vsz = pred_full_reg_size(s);
2335 TCGv_ptr t_d = tcg_temp_new_ptr();
2336 TCGv_ptr t_n = tcg_temp_new_ptr();
2337 TCGv_ptr t_m = tcg_temp_new_ptr();
2338 uint32_t desc = 0;
2340 desc = FIELD_DP32(desc, PREDDESC, OPRSZ, vsz);
2341 desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz);
2342 desc = FIELD_DP32(desc, PREDDESC, DATA, high_odd);
2344 tcg_gen_addi_ptr(t_d, cpu_env, pred_full_reg_offset(s, a->rd));
2345 tcg_gen_addi_ptr(t_n, cpu_env, pred_full_reg_offset(s, a->rn));
2346 tcg_gen_addi_ptr(t_m, cpu_env, pred_full_reg_offset(s, a->rm));
2348 fn(t_d, t_n, t_m, tcg_constant_i32(desc));
2350 tcg_temp_free_ptr(t_d);
2351 tcg_temp_free_ptr(t_n);
2352 tcg_temp_free_ptr(t_m);
2353 return true;
2356 static bool do_perm_pred2(DisasContext *s, arg_rr_esz *a, bool high_odd,
2357 gen_helper_gvec_2 *fn)
2359 if (!sve_access_check(s)) {
2360 return true;
2363 unsigned vsz = pred_full_reg_size(s);
2364 TCGv_ptr t_d = tcg_temp_new_ptr();
2365 TCGv_ptr t_n = tcg_temp_new_ptr();
2366 uint32_t desc = 0;
2368 tcg_gen_addi_ptr(t_d, cpu_env, pred_full_reg_offset(s, a->rd));
2369 tcg_gen_addi_ptr(t_n, cpu_env, pred_full_reg_offset(s, a->rn));
2371 desc = FIELD_DP32(desc, PREDDESC, OPRSZ, vsz);
2372 desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz);
2373 desc = FIELD_DP32(desc, PREDDESC, DATA, high_odd);
2375 fn(t_d, t_n, tcg_constant_i32(desc));
2377 tcg_temp_free_ptr(t_d);
2378 tcg_temp_free_ptr(t_n);
2379 return true;
2382 TRANS_FEAT(ZIP1_p, aa64_sve, do_perm_pred3, a, 0, gen_helper_sve_zip_p)
2383 TRANS_FEAT(ZIP2_p, aa64_sve, do_perm_pred3, a, 1, gen_helper_sve_zip_p)
2384 TRANS_FEAT(UZP1_p, aa64_sve, do_perm_pred3, a, 0, gen_helper_sve_uzp_p)
2385 TRANS_FEAT(UZP2_p, aa64_sve, do_perm_pred3, a, 1, gen_helper_sve_uzp_p)
2386 TRANS_FEAT(TRN1_p, aa64_sve, do_perm_pred3, a, 0, gen_helper_sve_trn_p)
2387 TRANS_FEAT(TRN2_p, aa64_sve, do_perm_pred3, a, 1, gen_helper_sve_trn_p)
2389 TRANS_FEAT(REV_p, aa64_sve, do_perm_pred2, a, 0, gen_helper_sve_rev_p)
2390 TRANS_FEAT(PUNPKLO, aa64_sve, do_perm_pred2, a, 0, gen_helper_sve_punpk_p)
2391 TRANS_FEAT(PUNPKHI, aa64_sve, do_perm_pred2, a, 1, gen_helper_sve_punpk_p)
2394 *** SVE Permute - Interleaving Group
2397 static gen_helper_gvec_3 * const zip_fns[4] = {
2398 gen_helper_sve_zip_b, gen_helper_sve_zip_h,
2399 gen_helper_sve_zip_s, gen_helper_sve_zip_d,
2401 TRANS_FEAT(ZIP1_z, aa64_sve, gen_gvec_ool_arg_zzz,
2402 zip_fns[a->esz], a, 0)
2403 TRANS_FEAT(ZIP2_z, aa64_sve, gen_gvec_ool_arg_zzz,
2404 zip_fns[a->esz], a, vec_full_reg_size(s) / 2)
2406 TRANS_FEAT(ZIP1_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz,
2407 gen_helper_sve2_zip_q, a, 0)
2408 TRANS_FEAT(ZIP2_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz,
2409 gen_helper_sve2_zip_q, a,
2410 QEMU_ALIGN_DOWN(vec_full_reg_size(s), 32) / 2)
2412 static gen_helper_gvec_3 * const uzp_fns[4] = {
2413 gen_helper_sve_uzp_b, gen_helper_sve_uzp_h,
2414 gen_helper_sve_uzp_s, gen_helper_sve_uzp_d,
2417 TRANS_FEAT(UZP1_z, aa64_sve, gen_gvec_ool_arg_zzz,
2418 uzp_fns[a->esz], a, 0)
2419 TRANS_FEAT(UZP2_z, aa64_sve, gen_gvec_ool_arg_zzz,
2420 uzp_fns[a->esz], a, 1 << a->esz)
2422 TRANS_FEAT(UZP1_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz,
2423 gen_helper_sve2_uzp_q, a, 0)
2424 TRANS_FEAT(UZP2_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz,
2425 gen_helper_sve2_uzp_q, a, 16)
2427 static gen_helper_gvec_3 * const trn_fns[4] = {
2428 gen_helper_sve_trn_b, gen_helper_sve_trn_h,
2429 gen_helper_sve_trn_s, gen_helper_sve_trn_d,
2432 TRANS_FEAT(TRN1_z, aa64_sve, gen_gvec_ool_arg_zzz,
2433 trn_fns[a->esz], a, 0)
2434 TRANS_FEAT(TRN2_z, aa64_sve, gen_gvec_ool_arg_zzz,
2435 trn_fns[a->esz], a, 1 << a->esz)
2437 TRANS_FEAT(TRN1_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz,
2438 gen_helper_sve2_trn_q, a, 0)
2439 TRANS_FEAT(TRN2_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz,
2440 gen_helper_sve2_trn_q, a, 16)
2443 *** SVE Permute Vector - Predicated Group
2446 static gen_helper_gvec_3 * const compact_fns[4] = {
2447 NULL, NULL, gen_helper_sve_compact_s, gen_helper_sve_compact_d
2449 TRANS_FEAT(COMPACT, aa64_sve, gen_gvec_ool_arg_zpz, compact_fns[a->esz], a, 0)
2451 /* Call the helper that computes the ARM LastActiveElement pseudocode
2452 * function, scaled by the element size. This includes the not found
2453 * indication; e.g. not found for esz=3 is -8.
2455 static void find_last_active(DisasContext *s, TCGv_i32 ret, int esz, int pg)
2457 /* Predicate sizes may be smaller and cannot use simd_desc. We cannot
2458 * round up, as we do elsewhere, because we need the exact size.
2460 TCGv_ptr t_p = tcg_temp_new_ptr();
2461 unsigned desc = 0;
2463 desc = FIELD_DP32(desc, PREDDESC, OPRSZ, pred_full_reg_size(s));
2464 desc = FIELD_DP32(desc, PREDDESC, ESZ, esz);
2466 tcg_gen_addi_ptr(t_p, cpu_env, pred_full_reg_offset(s, pg));
2468 gen_helper_sve_last_active_element(ret, t_p, tcg_constant_i32(desc));
2470 tcg_temp_free_ptr(t_p);
2473 /* Increment LAST to the offset of the next element in the vector,
2474 * wrapping around to 0.
2476 static void incr_last_active(DisasContext *s, TCGv_i32 last, int esz)
2478 unsigned vsz = vec_full_reg_size(s);
2480 tcg_gen_addi_i32(last, last, 1 << esz);
2481 if (is_power_of_2(vsz)) {
2482 tcg_gen_andi_i32(last, last, vsz - 1);
2483 } else {
2484 TCGv_i32 max = tcg_constant_i32(vsz);
2485 TCGv_i32 zero = tcg_constant_i32(0);
2486 tcg_gen_movcond_i32(TCG_COND_GEU, last, last, max, zero, last);
2490 /* If LAST < 0, set LAST to the offset of the last element in the vector. */
2491 static void wrap_last_active(DisasContext *s, TCGv_i32 last, int esz)
2493 unsigned vsz = vec_full_reg_size(s);
2495 if (is_power_of_2(vsz)) {
2496 tcg_gen_andi_i32(last, last, vsz - 1);
2497 } else {
2498 TCGv_i32 max = tcg_constant_i32(vsz - (1 << esz));
2499 TCGv_i32 zero = tcg_constant_i32(0);
2500 tcg_gen_movcond_i32(TCG_COND_LT, last, last, zero, max, last);
2504 /* Load an unsigned element of ESZ from BASE+OFS. */
2505 static TCGv_i64 load_esz(TCGv_ptr base, int ofs, int esz)
2507 TCGv_i64 r = tcg_temp_new_i64();
2509 switch (esz) {
2510 case 0:
2511 tcg_gen_ld8u_i64(r, base, ofs);
2512 break;
2513 case 1:
2514 tcg_gen_ld16u_i64(r, base, ofs);
2515 break;
2516 case 2:
2517 tcg_gen_ld32u_i64(r, base, ofs);
2518 break;
2519 case 3:
2520 tcg_gen_ld_i64(r, base, ofs);
2521 break;
2522 default:
2523 g_assert_not_reached();
2525 return r;
2528 /* Load an unsigned element of ESZ from RM[LAST]. */
2529 static TCGv_i64 load_last_active(DisasContext *s, TCGv_i32 last,
2530 int rm, int esz)
2532 TCGv_ptr p = tcg_temp_new_ptr();
2533 TCGv_i64 r;
2535 /* Convert offset into vector into offset into ENV.
2536 * The final adjustment for the vector register base
2537 * is added via constant offset to the load.
2539 #if HOST_BIG_ENDIAN
2540 /* Adjust for element ordering. See vec_reg_offset. */
2541 if (esz < 3) {
2542 tcg_gen_xori_i32(last, last, 8 - (1 << esz));
2544 #endif
2545 tcg_gen_ext_i32_ptr(p, last);
2546 tcg_gen_add_ptr(p, p, cpu_env);
2548 r = load_esz(p, vec_full_reg_offset(s, rm), esz);
2549 tcg_temp_free_ptr(p);
2551 return r;
2554 /* Compute CLAST for a Zreg. */
2555 static bool do_clast_vector(DisasContext *s, arg_rprr_esz *a, bool before)
2557 TCGv_i32 last;
2558 TCGLabel *over;
2559 TCGv_i64 ele;
2560 unsigned vsz, esz = a->esz;
2562 if (!sve_access_check(s)) {
2563 return true;
2566 last = tcg_temp_local_new_i32();
2567 over = gen_new_label();
2569 find_last_active(s, last, esz, a->pg);
2571 /* There is of course no movcond for a 2048-bit vector,
2572 * so we must branch over the actual store.
2574 tcg_gen_brcondi_i32(TCG_COND_LT, last, 0, over);
2576 if (!before) {
2577 incr_last_active(s, last, esz);
2580 ele = load_last_active(s, last, a->rm, esz);
2581 tcg_temp_free_i32(last);
2583 vsz = vec_full_reg_size(s);
2584 tcg_gen_gvec_dup_i64(esz, vec_full_reg_offset(s, a->rd), vsz, vsz, ele);
2585 tcg_temp_free_i64(ele);
2587 /* If this insn used MOVPRFX, we may need a second move. */
2588 if (a->rd != a->rn) {
2589 TCGLabel *done = gen_new_label();
2590 tcg_gen_br(done);
2592 gen_set_label(over);
2593 do_mov_z(s, a->rd, a->rn);
2595 gen_set_label(done);
2596 } else {
2597 gen_set_label(over);
2599 return true;
2602 TRANS_FEAT(CLASTA_z, aa64_sve, do_clast_vector, a, false)
2603 TRANS_FEAT(CLASTB_z, aa64_sve, do_clast_vector, a, true)
2605 /* Compute CLAST for a scalar. */
2606 static void do_clast_scalar(DisasContext *s, int esz, int pg, int rm,
2607 bool before, TCGv_i64 reg_val)
2609 TCGv_i32 last = tcg_temp_new_i32();
2610 TCGv_i64 ele, cmp;
2612 find_last_active(s, last, esz, pg);
2614 /* Extend the original value of last prior to incrementing. */
2615 cmp = tcg_temp_new_i64();
2616 tcg_gen_ext_i32_i64(cmp, last);
2618 if (!before) {
2619 incr_last_active(s, last, esz);
2622 /* The conceit here is that while last < 0 indicates not found, after
2623 * adjusting for cpu_env->vfp.zregs[rm], it is still a valid address
2624 * from which we can load garbage. We then discard the garbage with
2625 * a conditional move.
2627 ele = load_last_active(s, last, rm, esz);
2628 tcg_temp_free_i32(last);
2630 tcg_gen_movcond_i64(TCG_COND_GE, reg_val, cmp, tcg_constant_i64(0),
2631 ele, reg_val);
2633 tcg_temp_free_i64(cmp);
2634 tcg_temp_free_i64(ele);
2637 /* Compute CLAST for a Vreg. */
2638 static bool do_clast_fp(DisasContext *s, arg_rpr_esz *a, bool before)
2640 if (sve_access_check(s)) {
2641 int esz = a->esz;
2642 int ofs = vec_reg_offset(s, a->rd, 0, esz);
2643 TCGv_i64 reg = load_esz(cpu_env, ofs, esz);
2645 do_clast_scalar(s, esz, a->pg, a->rn, before, reg);
2646 write_fp_dreg(s, a->rd, reg);
2647 tcg_temp_free_i64(reg);
2649 return true;
2652 TRANS_FEAT(CLASTA_v, aa64_sve, do_clast_fp, a, false)
2653 TRANS_FEAT(CLASTB_v, aa64_sve, do_clast_fp, a, true)
2655 /* Compute CLAST for a Xreg. */
2656 static bool do_clast_general(DisasContext *s, arg_rpr_esz *a, bool before)
2658 TCGv_i64 reg;
2660 if (!sve_access_check(s)) {
2661 return true;
2664 reg = cpu_reg(s, a->rd);
2665 switch (a->esz) {
2666 case 0:
2667 tcg_gen_ext8u_i64(reg, reg);
2668 break;
2669 case 1:
2670 tcg_gen_ext16u_i64(reg, reg);
2671 break;
2672 case 2:
2673 tcg_gen_ext32u_i64(reg, reg);
2674 break;
2675 case 3:
2676 break;
2677 default:
2678 g_assert_not_reached();
2681 do_clast_scalar(s, a->esz, a->pg, a->rn, before, reg);
2682 return true;
2685 TRANS_FEAT(CLASTA_r, aa64_sve, do_clast_general, a, false)
2686 TRANS_FEAT(CLASTB_r, aa64_sve, do_clast_general, a, true)
2688 /* Compute LAST for a scalar. */
2689 static TCGv_i64 do_last_scalar(DisasContext *s, int esz,
2690 int pg, int rm, bool before)
2692 TCGv_i32 last = tcg_temp_new_i32();
2693 TCGv_i64 ret;
2695 find_last_active(s, last, esz, pg);
2696 if (before) {
2697 wrap_last_active(s, last, esz);
2698 } else {
2699 incr_last_active(s, last, esz);
2702 ret = load_last_active(s, last, rm, esz);
2703 tcg_temp_free_i32(last);
2704 return ret;
2707 /* Compute LAST for a Vreg. */
2708 static bool do_last_fp(DisasContext *s, arg_rpr_esz *a, bool before)
2710 if (sve_access_check(s)) {
2711 TCGv_i64 val = do_last_scalar(s, a->esz, a->pg, a->rn, before);
2712 write_fp_dreg(s, a->rd, val);
2713 tcg_temp_free_i64(val);
2715 return true;
2718 TRANS_FEAT(LASTA_v, aa64_sve, do_last_fp, a, false)
2719 TRANS_FEAT(LASTB_v, aa64_sve, do_last_fp, a, true)
2721 /* Compute LAST for a Xreg. */
2722 static bool do_last_general(DisasContext *s, arg_rpr_esz *a, bool before)
2724 if (sve_access_check(s)) {
2725 TCGv_i64 val = do_last_scalar(s, a->esz, a->pg, a->rn, before);
2726 tcg_gen_mov_i64(cpu_reg(s, a->rd), val);
2727 tcg_temp_free_i64(val);
2729 return true;
2732 TRANS_FEAT(LASTA_r, aa64_sve, do_last_general, a, false)
2733 TRANS_FEAT(LASTB_r, aa64_sve, do_last_general, a, true)
2735 static bool trans_CPY_m_r(DisasContext *s, arg_rpr_esz *a)
2737 if (sve_access_check(s)) {
2738 do_cpy_m(s, a->esz, a->rd, a->rd, a->pg, cpu_reg_sp(s, a->rn));
2740 return true;
2743 static bool trans_CPY_m_v(DisasContext *s, arg_rpr_esz *a)
2745 if (sve_access_check(s)) {
2746 int ofs = vec_reg_offset(s, a->rn, 0, a->esz);
2747 TCGv_i64 t = load_esz(cpu_env, ofs, a->esz);
2748 do_cpy_m(s, a->esz, a->rd, a->rd, a->pg, t);
2749 tcg_temp_free_i64(t);
2751 return true;
2754 static gen_helper_gvec_3 * const revb_fns[4] = {
2755 NULL, gen_helper_sve_revb_h,
2756 gen_helper_sve_revb_s, gen_helper_sve_revb_d,
2758 TRANS_FEAT(REVB, aa64_sve, gen_gvec_ool_arg_zpz, revb_fns[a->esz], a, 0)
2760 static gen_helper_gvec_3 * const revh_fns[4] = {
2761 NULL, NULL, gen_helper_sve_revh_s, gen_helper_sve_revh_d,
2763 TRANS_FEAT(REVH, aa64_sve, gen_gvec_ool_arg_zpz, revh_fns[a->esz], a, 0)
2765 TRANS_FEAT(REVW, aa64_sve, gen_gvec_ool_arg_zpz,
2766 a->esz == 3 ? gen_helper_sve_revw_d : NULL, a, 0)
2768 TRANS_FEAT(SPLICE, aa64_sve, gen_gvec_ool_arg_zpzz,
2769 gen_helper_sve_splice, a, a->esz)
2771 TRANS_FEAT(SPLICE_sve2, aa64_sve2, gen_gvec_ool_zzzp, gen_helper_sve_splice,
2772 a->rd, a->rn, (a->rn + 1) % 32, a->pg, a->esz)
2775 *** SVE Integer Compare - Vectors Group
2778 static bool do_ppzz_flags(DisasContext *s, arg_rprr_esz *a,
2779 gen_helper_gvec_flags_4 *gen_fn)
2781 TCGv_ptr pd, zn, zm, pg;
2782 unsigned vsz;
2783 TCGv_i32 t;
2785 if (gen_fn == NULL) {
2786 return false;
2788 if (!sve_access_check(s)) {
2789 return true;
2792 vsz = vec_full_reg_size(s);
2793 t = tcg_temp_new_i32();
2794 pd = tcg_temp_new_ptr();
2795 zn = tcg_temp_new_ptr();
2796 zm = tcg_temp_new_ptr();
2797 pg = tcg_temp_new_ptr();
2799 tcg_gen_addi_ptr(pd, cpu_env, pred_full_reg_offset(s, a->rd));
2800 tcg_gen_addi_ptr(zn, cpu_env, vec_full_reg_offset(s, a->rn));
2801 tcg_gen_addi_ptr(zm, cpu_env, vec_full_reg_offset(s, a->rm));
2802 tcg_gen_addi_ptr(pg, cpu_env, pred_full_reg_offset(s, a->pg));
2804 gen_fn(t, pd, zn, zm, pg, tcg_constant_i32(simd_desc(vsz, vsz, 0)));
2806 tcg_temp_free_ptr(pd);
2807 tcg_temp_free_ptr(zn);
2808 tcg_temp_free_ptr(zm);
2809 tcg_temp_free_ptr(pg);
2811 do_pred_flags(t);
2813 tcg_temp_free_i32(t);
2814 return true;
2817 #define DO_PPZZ(NAME, name) \
2818 static gen_helper_gvec_flags_4 * const name##_ppzz_fns[4] = { \
2819 gen_helper_sve_##name##_ppzz_b, gen_helper_sve_##name##_ppzz_h, \
2820 gen_helper_sve_##name##_ppzz_s, gen_helper_sve_##name##_ppzz_d, \
2821 }; \
2822 TRANS_FEAT(NAME##_ppzz, aa64_sve, do_ppzz_flags, \
2823 a, name##_ppzz_fns[a->esz])
2825 DO_PPZZ(CMPEQ, cmpeq)
2826 DO_PPZZ(CMPNE, cmpne)
2827 DO_PPZZ(CMPGT, cmpgt)
2828 DO_PPZZ(CMPGE, cmpge)
2829 DO_PPZZ(CMPHI, cmphi)
2830 DO_PPZZ(CMPHS, cmphs)
2832 #undef DO_PPZZ
2834 #define DO_PPZW(NAME, name) \
2835 static gen_helper_gvec_flags_4 * const name##_ppzw_fns[4] = { \
2836 gen_helper_sve_##name##_ppzw_b, gen_helper_sve_##name##_ppzw_h, \
2837 gen_helper_sve_##name##_ppzw_s, NULL \
2838 }; \
2839 TRANS_FEAT(NAME##_ppzw, aa64_sve, do_ppzz_flags, \
2840 a, name##_ppzw_fns[a->esz])
2842 DO_PPZW(CMPEQ, cmpeq)
2843 DO_PPZW(CMPNE, cmpne)
2844 DO_PPZW(CMPGT, cmpgt)
2845 DO_PPZW(CMPGE, cmpge)
2846 DO_PPZW(CMPHI, cmphi)
2847 DO_PPZW(CMPHS, cmphs)
2848 DO_PPZW(CMPLT, cmplt)
2849 DO_PPZW(CMPLE, cmple)
2850 DO_PPZW(CMPLO, cmplo)
2851 DO_PPZW(CMPLS, cmpls)
2853 #undef DO_PPZW
2856 *** SVE Integer Compare - Immediate Groups
2859 static bool do_ppzi_flags(DisasContext *s, arg_rpri_esz *a,
2860 gen_helper_gvec_flags_3 *gen_fn)
2862 TCGv_ptr pd, zn, pg;
2863 unsigned vsz;
2864 TCGv_i32 t;
2866 if (gen_fn == NULL) {
2867 return false;
2869 if (!sve_access_check(s)) {
2870 return true;
2873 vsz = vec_full_reg_size(s);
2874 t = tcg_temp_new_i32();
2875 pd = tcg_temp_new_ptr();
2876 zn = tcg_temp_new_ptr();
2877 pg = tcg_temp_new_ptr();
2879 tcg_gen_addi_ptr(pd, cpu_env, pred_full_reg_offset(s, a->rd));
2880 tcg_gen_addi_ptr(zn, cpu_env, vec_full_reg_offset(s, a->rn));
2881 tcg_gen_addi_ptr(pg, cpu_env, pred_full_reg_offset(s, a->pg));
2883 gen_fn(t, pd, zn, pg, tcg_constant_i32(simd_desc(vsz, vsz, a->imm)));
2885 tcg_temp_free_ptr(pd);
2886 tcg_temp_free_ptr(zn);
2887 tcg_temp_free_ptr(pg);
2889 do_pred_flags(t);
2891 tcg_temp_free_i32(t);
2892 return true;
2895 #define DO_PPZI(NAME, name) \
2896 static gen_helper_gvec_flags_3 * const name##_ppzi_fns[4] = { \
2897 gen_helper_sve_##name##_ppzi_b, gen_helper_sve_##name##_ppzi_h, \
2898 gen_helper_sve_##name##_ppzi_s, gen_helper_sve_##name##_ppzi_d, \
2899 }; \
2900 TRANS_FEAT(NAME##_ppzi, aa64_sve, do_ppzi_flags, a, \
2901 name##_ppzi_fns[a->esz])
2903 DO_PPZI(CMPEQ, cmpeq)
2904 DO_PPZI(CMPNE, cmpne)
2905 DO_PPZI(CMPGT, cmpgt)
2906 DO_PPZI(CMPGE, cmpge)
2907 DO_PPZI(CMPHI, cmphi)
2908 DO_PPZI(CMPHS, cmphs)
2909 DO_PPZI(CMPLT, cmplt)
2910 DO_PPZI(CMPLE, cmple)
2911 DO_PPZI(CMPLO, cmplo)
2912 DO_PPZI(CMPLS, cmpls)
2914 #undef DO_PPZI
2917 *** SVE Partition Break Group
2920 static bool do_brk3(DisasContext *s, arg_rprr_s *a,
2921 gen_helper_gvec_4 *fn, gen_helper_gvec_flags_4 *fn_s)
2923 if (!sve_access_check(s)) {
2924 return true;
2927 unsigned vsz = pred_full_reg_size(s);
2929 /* Predicate sizes may be smaller and cannot use simd_desc. */
2930 TCGv_ptr d = tcg_temp_new_ptr();
2931 TCGv_ptr n = tcg_temp_new_ptr();
2932 TCGv_ptr m = tcg_temp_new_ptr();
2933 TCGv_ptr g = tcg_temp_new_ptr();
2934 TCGv_i32 desc = tcg_constant_i32(FIELD_DP32(0, PREDDESC, OPRSZ, vsz));
2936 tcg_gen_addi_ptr(d, cpu_env, pred_full_reg_offset(s, a->rd));
2937 tcg_gen_addi_ptr(n, cpu_env, pred_full_reg_offset(s, a->rn));
2938 tcg_gen_addi_ptr(m, cpu_env, pred_full_reg_offset(s, a->rm));
2939 tcg_gen_addi_ptr(g, cpu_env, pred_full_reg_offset(s, a->pg));
2941 if (a->s) {
2942 TCGv_i32 t = tcg_temp_new_i32();
2943 fn_s(t, d, n, m, g, desc);
2944 do_pred_flags(t);
2945 tcg_temp_free_i32(t);
2946 } else {
2947 fn(d, n, m, g, desc);
2949 tcg_temp_free_ptr(d);
2950 tcg_temp_free_ptr(n);
2951 tcg_temp_free_ptr(m);
2952 tcg_temp_free_ptr(g);
2953 return true;
2956 static bool do_brk2(DisasContext *s, arg_rpr_s *a,
2957 gen_helper_gvec_3 *fn, gen_helper_gvec_flags_3 *fn_s)
2959 if (!sve_access_check(s)) {
2960 return true;
2963 unsigned vsz = pred_full_reg_size(s);
2965 /* Predicate sizes may be smaller and cannot use simd_desc. */
2966 TCGv_ptr d = tcg_temp_new_ptr();
2967 TCGv_ptr n = tcg_temp_new_ptr();
2968 TCGv_ptr g = tcg_temp_new_ptr();
2969 TCGv_i32 desc = tcg_constant_i32(FIELD_DP32(0, PREDDESC, OPRSZ, vsz));
2971 tcg_gen_addi_ptr(d, cpu_env, pred_full_reg_offset(s, a->rd));
2972 tcg_gen_addi_ptr(n, cpu_env, pred_full_reg_offset(s, a->rn));
2973 tcg_gen_addi_ptr(g, cpu_env, pred_full_reg_offset(s, a->pg));
2975 if (a->s) {
2976 TCGv_i32 t = tcg_temp_new_i32();
2977 fn_s(t, d, n, g, desc);
2978 do_pred_flags(t);
2979 tcg_temp_free_i32(t);
2980 } else {
2981 fn(d, n, g, desc);
2983 tcg_temp_free_ptr(d);
2984 tcg_temp_free_ptr(n);
2985 tcg_temp_free_ptr(g);
2986 return true;
2989 TRANS_FEAT(BRKPA, aa64_sve, do_brk3, a,
2990 gen_helper_sve_brkpa, gen_helper_sve_brkpas)
2991 TRANS_FEAT(BRKPB, aa64_sve, do_brk3, a,
2992 gen_helper_sve_brkpb, gen_helper_sve_brkpbs)
2994 TRANS_FEAT(BRKA_m, aa64_sve, do_brk2, a,
2995 gen_helper_sve_brka_m, gen_helper_sve_brkas_m)
2996 TRANS_FEAT(BRKB_m, aa64_sve, do_brk2, a,
2997 gen_helper_sve_brkb_m, gen_helper_sve_brkbs_m)
2999 TRANS_FEAT(BRKA_z, aa64_sve, do_brk2, a,
3000 gen_helper_sve_brka_z, gen_helper_sve_brkas_z)
3001 TRANS_FEAT(BRKB_z, aa64_sve, do_brk2, a,
3002 gen_helper_sve_brkb_z, gen_helper_sve_brkbs_z)
3004 TRANS_FEAT(BRKN, aa64_sve, do_brk2, a,
3005 gen_helper_sve_brkn, gen_helper_sve_brkns)
3008 *** SVE Predicate Count Group
3011 static void do_cntp(DisasContext *s, TCGv_i64 val, int esz, int pn, int pg)
3013 unsigned psz = pred_full_reg_size(s);
3015 if (psz <= 8) {
3016 uint64_t psz_mask;
3018 tcg_gen_ld_i64(val, cpu_env, pred_full_reg_offset(s, pn));
3019 if (pn != pg) {
3020 TCGv_i64 g = tcg_temp_new_i64();
3021 tcg_gen_ld_i64(g, cpu_env, pred_full_reg_offset(s, pg));
3022 tcg_gen_and_i64(val, val, g);
3023 tcg_temp_free_i64(g);
3026 /* Reduce the pred_esz_masks value simply to reduce the
3027 * size of the code generated here.
3029 psz_mask = MAKE_64BIT_MASK(0, psz * 8);
3030 tcg_gen_andi_i64(val, val, pred_esz_masks[esz] & psz_mask);
3032 tcg_gen_ctpop_i64(val, val);
3033 } else {
3034 TCGv_ptr t_pn = tcg_temp_new_ptr();
3035 TCGv_ptr t_pg = tcg_temp_new_ptr();
3036 unsigned desc = 0;
3038 desc = FIELD_DP32(desc, PREDDESC, OPRSZ, psz);
3039 desc = FIELD_DP32(desc, PREDDESC, ESZ, esz);
3041 tcg_gen_addi_ptr(t_pn, cpu_env, pred_full_reg_offset(s, pn));
3042 tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg));
3044 gen_helper_sve_cntp(val, t_pn, t_pg, tcg_constant_i32(desc));
3045 tcg_temp_free_ptr(t_pn);
3046 tcg_temp_free_ptr(t_pg);
3050 static bool trans_CNTP(DisasContext *s, arg_CNTP *a)
3052 if (sve_access_check(s)) {
3053 do_cntp(s, cpu_reg(s, a->rd), a->esz, a->rn, a->pg);
3055 return true;
3058 static bool trans_INCDECP_r(DisasContext *s, arg_incdec_pred *a)
3060 if (sve_access_check(s)) {
3061 TCGv_i64 reg = cpu_reg(s, a->rd);
3062 TCGv_i64 val = tcg_temp_new_i64();
3064 do_cntp(s, val, a->esz, a->pg, a->pg);
3065 if (a->d) {
3066 tcg_gen_sub_i64(reg, reg, val);
3067 } else {
3068 tcg_gen_add_i64(reg, reg, val);
3070 tcg_temp_free_i64(val);
3072 return true;
3075 static bool trans_INCDECP_z(DisasContext *s, arg_incdec2_pred *a)
3077 if (a->esz == 0) {
3078 return false;
3080 if (sve_access_check(s)) {
3081 unsigned vsz = vec_full_reg_size(s);
3082 TCGv_i64 val = tcg_temp_new_i64();
3083 GVecGen2sFn *gvec_fn = a->d ? tcg_gen_gvec_subs : tcg_gen_gvec_adds;
3085 do_cntp(s, val, a->esz, a->pg, a->pg);
3086 gvec_fn(a->esz, vec_full_reg_offset(s, a->rd),
3087 vec_full_reg_offset(s, a->rn), val, vsz, vsz);
3089 return true;
3092 static bool trans_SINCDECP_r_32(DisasContext *s, arg_incdec_pred *a)
3094 if (sve_access_check(s)) {
3095 TCGv_i64 reg = cpu_reg(s, a->rd);
3096 TCGv_i64 val = tcg_temp_new_i64();
3098 do_cntp(s, val, a->esz, a->pg, a->pg);
3099 do_sat_addsub_32(reg, val, a->u, a->d);
3101 return true;
3104 static bool trans_SINCDECP_r_64(DisasContext *s, arg_incdec_pred *a)
3106 if (sve_access_check(s)) {
3107 TCGv_i64 reg = cpu_reg(s, a->rd);
3108 TCGv_i64 val = tcg_temp_new_i64();
3110 do_cntp(s, val, a->esz, a->pg, a->pg);
3111 do_sat_addsub_64(reg, val, a->u, a->d);
3113 return true;
3116 static bool trans_SINCDECP_z(DisasContext *s, arg_incdec2_pred *a)
3118 if (a->esz == 0) {
3119 return false;
3121 if (sve_access_check(s)) {
3122 TCGv_i64 val = tcg_temp_new_i64();
3123 do_cntp(s, val, a->esz, a->pg, a->pg);
3124 do_sat_addsub_vec(s, a->esz, a->rd, a->rn, val, a->u, a->d);
3126 return true;
3130 *** SVE Integer Compare Scalars Group
3133 static bool trans_CTERM(DisasContext *s, arg_CTERM *a)
3135 if (!sve_access_check(s)) {
3136 return true;
3139 TCGCond cond = (a->ne ? TCG_COND_NE : TCG_COND_EQ);
3140 TCGv_i64 rn = read_cpu_reg(s, a->rn, a->sf);
3141 TCGv_i64 rm = read_cpu_reg(s, a->rm, a->sf);
3142 TCGv_i64 cmp = tcg_temp_new_i64();
3144 tcg_gen_setcond_i64(cond, cmp, rn, rm);
3145 tcg_gen_extrl_i64_i32(cpu_NF, cmp);
3146 tcg_temp_free_i64(cmp);
3148 /* VF = !NF & !CF. */
3149 tcg_gen_xori_i32(cpu_VF, cpu_NF, 1);
3150 tcg_gen_andc_i32(cpu_VF, cpu_VF, cpu_CF);
3152 /* Both NF and VF actually look at bit 31. */
3153 tcg_gen_neg_i32(cpu_NF, cpu_NF);
3154 tcg_gen_neg_i32(cpu_VF, cpu_VF);
3155 return true;
3158 static bool trans_WHILE(DisasContext *s, arg_WHILE *a)
3160 TCGv_i64 op0, op1, t0, t1, tmax;
3161 TCGv_i32 t2;
3162 TCGv_ptr ptr;
3163 unsigned vsz = vec_full_reg_size(s);
3164 unsigned desc = 0;
3165 TCGCond cond;
3166 uint64_t maxval;
3167 /* Note that GE/HS has a->eq == 0 and GT/HI has a->eq == 1. */
3168 bool eq = a->eq == a->lt;
3170 /* The greater-than conditions are all SVE2. */
3171 if (!a->lt && !dc_isar_feature(aa64_sve2, s)) {
3172 return false;
3174 if (!sve_access_check(s)) {
3175 return true;
3178 op0 = read_cpu_reg(s, a->rn, 1);
3179 op1 = read_cpu_reg(s, a->rm, 1);
3181 if (!a->sf) {
3182 if (a->u) {
3183 tcg_gen_ext32u_i64(op0, op0);
3184 tcg_gen_ext32u_i64(op1, op1);
3185 } else {
3186 tcg_gen_ext32s_i64(op0, op0);
3187 tcg_gen_ext32s_i64(op1, op1);
3191 /* For the helper, compress the different conditions into a computation
3192 * of how many iterations for which the condition is true.
3194 t0 = tcg_temp_new_i64();
3195 t1 = tcg_temp_new_i64();
3197 if (a->lt) {
3198 tcg_gen_sub_i64(t0, op1, op0);
3199 if (a->u) {
3200 maxval = a->sf ? UINT64_MAX : UINT32_MAX;
3201 cond = eq ? TCG_COND_LEU : TCG_COND_LTU;
3202 } else {
3203 maxval = a->sf ? INT64_MAX : INT32_MAX;
3204 cond = eq ? TCG_COND_LE : TCG_COND_LT;
3206 } else {
3207 tcg_gen_sub_i64(t0, op0, op1);
3208 if (a->u) {
3209 maxval = 0;
3210 cond = eq ? TCG_COND_GEU : TCG_COND_GTU;
3211 } else {
3212 maxval = a->sf ? INT64_MIN : INT32_MIN;
3213 cond = eq ? TCG_COND_GE : TCG_COND_GT;
3217 tmax = tcg_constant_i64(vsz >> a->esz);
3218 if (eq) {
3219 /* Equality means one more iteration. */
3220 tcg_gen_addi_i64(t0, t0, 1);
3223 * For the less-than while, if op1 is maxval (and the only time
3224 * the addition above could overflow), then we produce an all-true
3225 * predicate by setting the count to the vector length. This is
3226 * because the pseudocode is described as an increment + compare
3227 * loop, and the maximum integer would always compare true.
3228 * Similarly, the greater-than while has the same issue with the
3229 * minimum integer due to the decrement + compare loop.
3231 tcg_gen_movi_i64(t1, maxval);
3232 tcg_gen_movcond_i64(TCG_COND_EQ, t0, op1, t1, tmax, t0);
3235 /* Bound to the maximum. */
3236 tcg_gen_umin_i64(t0, t0, tmax);
3238 /* Set the count to zero if the condition is false. */
3239 tcg_gen_movi_i64(t1, 0);
3240 tcg_gen_movcond_i64(cond, t0, op0, op1, t0, t1);
3241 tcg_temp_free_i64(t1);
3243 /* Since we're bounded, pass as a 32-bit type. */
3244 t2 = tcg_temp_new_i32();
3245 tcg_gen_extrl_i64_i32(t2, t0);
3246 tcg_temp_free_i64(t0);
3248 /* Scale elements to bits. */
3249 tcg_gen_shli_i32(t2, t2, a->esz);
3251 desc = FIELD_DP32(desc, PREDDESC, OPRSZ, vsz / 8);
3252 desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz);
3254 ptr = tcg_temp_new_ptr();
3255 tcg_gen_addi_ptr(ptr, cpu_env, pred_full_reg_offset(s, a->rd));
3257 if (a->lt) {
3258 gen_helper_sve_whilel(t2, ptr, t2, tcg_constant_i32(desc));
3259 } else {
3260 gen_helper_sve_whileg(t2, ptr, t2, tcg_constant_i32(desc));
3262 do_pred_flags(t2);
3264 tcg_temp_free_ptr(ptr);
3265 tcg_temp_free_i32(t2);
3266 return true;
3269 static bool trans_WHILE_ptr(DisasContext *s, arg_WHILE_ptr *a)
3271 TCGv_i64 op0, op1, diff, t1, tmax;
3272 TCGv_i32 t2;
3273 TCGv_ptr ptr;
3274 unsigned vsz = vec_full_reg_size(s);
3275 unsigned desc = 0;
3277 if (!dc_isar_feature(aa64_sve2, s)) {
3278 return false;
3280 if (!sve_access_check(s)) {
3281 return true;
3284 op0 = read_cpu_reg(s, a->rn, 1);
3285 op1 = read_cpu_reg(s, a->rm, 1);
3287 tmax = tcg_constant_i64(vsz);
3288 diff = tcg_temp_new_i64();
3290 if (a->rw) {
3291 /* WHILERW */
3292 /* diff = abs(op1 - op0), noting that op0/1 are unsigned. */
3293 t1 = tcg_temp_new_i64();
3294 tcg_gen_sub_i64(diff, op0, op1);
3295 tcg_gen_sub_i64(t1, op1, op0);
3296 tcg_gen_movcond_i64(TCG_COND_GEU, diff, op0, op1, diff, t1);
3297 tcg_temp_free_i64(t1);
3298 /* Round down to a multiple of ESIZE. */
3299 tcg_gen_andi_i64(diff, diff, -1 << a->esz);
3300 /* If op1 == op0, diff == 0, and the condition is always true. */
3301 tcg_gen_movcond_i64(TCG_COND_EQ, diff, op0, op1, tmax, diff);
3302 } else {
3303 /* WHILEWR */
3304 tcg_gen_sub_i64(diff, op1, op0);
3305 /* Round down to a multiple of ESIZE. */
3306 tcg_gen_andi_i64(diff, diff, -1 << a->esz);
3307 /* If op0 >= op1, diff <= 0, the condition is always true. */
3308 tcg_gen_movcond_i64(TCG_COND_GEU, diff, op0, op1, tmax, diff);
3311 /* Bound to the maximum. */
3312 tcg_gen_umin_i64(diff, diff, tmax);
3314 /* Since we're bounded, pass as a 32-bit type. */
3315 t2 = tcg_temp_new_i32();
3316 tcg_gen_extrl_i64_i32(t2, diff);
3317 tcg_temp_free_i64(diff);
3319 desc = FIELD_DP32(desc, PREDDESC, OPRSZ, vsz / 8);
3320 desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz);
3322 ptr = tcg_temp_new_ptr();
3323 tcg_gen_addi_ptr(ptr, cpu_env, pred_full_reg_offset(s, a->rd));
3325 gen_helper_sve_whilel(t2, ptr, t2, tcg_constant_i32(desc));
3326 do_pred_flags(t2);
3328 tcg_temp_free_ptr(ptr);
3329 tcg_temp_free_i32(t2);
3330 return true;
3334 *** SVE Integer Wide Immediate - Unpredicated Group
3337 static bool trans_FDUP(DisasContext *s, arg_FDUP *a)
3339 if (a->esz == 0) {
3340 return false;
3342 if (sve_access_check(s)) {
3343 unsigned vsz = vec_full_reg_size(s);
3344 int dofs = vec_full_reg_offset(s, a->rd);
3345 uint64_t imm;
3347 /* Decode the VFP immediate. */
3348 imm = vfp_expand_imm(a->esz, a->imm);
3349 tcg_gen_gvec_dup_imm(a->esz, dofs, vsz, vsz, imm);
3351 return true;
3354 static bool trans_DUP_i(DisasContext *s, arg_DUP_i *a)
3356 if (sve_access_check(s)) {
3357 unsigned vsz = vec_full_reg_size(s);
3358 int dofs = vec_full_reg_offset(s, a->rd);
3359 tcg_gen_gvec_dup_imm(a->esz, dofs, vsz, vsz, a->imm);
3361 return true;
3364 TRANS_FEAT(ADD_zzi, aa64_sve, gen_gvec_fn_arg_zzi, tcg_gen_gvec_addi, a)
3366 static bool trans_SUB_zzi(DisasContext *s, arg_rri_esz *a)
3368 a->imm = -a->imm;
3369 return trans_ADD_zzi(s, a);
3372 static bool trans_SUBR_zzi(DisasContext *s, arg_rri_esz *a)
3374 static const TCGOpcode vecop_list[] = { INDEX_op_sub_vec, 0 };
3375 static const GVecGen2s op[4] = {
3376 { .fni8 = tcg_gen_vec_sub8_i64,
3377 .fniv = tcg_gen_sub_vec,
3378 .fno = gen_helper_sve_subri_b,
3379 .opt_opc = vecop_list,
3380 .vece = MO_8,
3381 .scalar_first = true },
3382 { .fni8 = tcg_gen_vec_sub16_i64,
3383 .fniv = tcg_gen_sub_vec,
3384 .fno = gen_helper_sve_subri_h,
3385 .opt_opc = vecop_list,
3386 .vece = MO_16,
3387 .scalar_first = true },
3388 { .fni4 = tcg_gen_sub_i32,
3389 .fniv = tcg_gen_sub_vec,
3390 .fno = gen_helper_sve_subri_s,
3391 .opt_opc = vecop_list,
3392 .vece = MO_32,
3393 .scalar_first = true },
3394 { .fni8 = tcg_gen_sub_i64,
3395 .fniv = tcg_gen_sub_vec,
3396 .fno = gen_helper_sve_subri_d,
3397 .opt_opc = vecop_list,
3398 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
3399 .vece = MO_64,
3400 .scalar_first = true }
3403 if (sve_access_check(s)) {
3404 unsigned vsz = vec_full_reg_size(s);
3405 tcg_gen_gvec_2s(vec_full_reg_offset(s, a->rd),
3406 vec_full_reg_offset(s, a->rn),
3407 vsz, vsz, tcg_constant_i64(a->imm), &op[a->esz]);
3409 return true;
3412 TRANS_FEAT(MUL_zzi, aa64_sve, gen_gvec_fn_arg_zzi, tcg_gen_gvec_muli, a)
3414 static bool do_zzi_sat(DisasContext *s, arg_rri_esz *a, bool u, bool d)
3416 if (sve_access_check(s)) {
3417 do_sat_addsub_vec(s, a->esz, a->rd, a->rn,
3418 tcg_constant_i64(a->imm), u, d);
3420 return true;
3423 TRANS_FEAT(SQADD_zzi, aa64_sve, do_zzi_sat, a, false, false)
3424 TRANS_FEAT(UQADD_zzi, aa64_sve, do_zzi_sat, a, true, false)
3425 TRANS_FEAT(SQSUB_zzi, aa64_sve, do_zzi_sat, a, false, true)
3426 TRANS_FEAT(UQSUB_zzi, aa64_sve, do_zzi_sat, a, true, true)
3428 static bool do_zzi_ool(DisasContext *s, arg_rri_esz *a, gen_helper_gvec_2i *fn)
3430 if (sve_access_check(s)) {
3431 unsigned vsz = vec_full_reg_size(s);
3432 tcg_gen_gvec_2i_ool(vec_full_reg_offset(s, a->rd),
3433 vec_full_reg_offset(s, a->rn),
3434 tcg_constant_i64(a->imm), vsz, vsz, 0, fn);
3436 return true;
3439 #define DO_ZZI(NAME, name) \
3440 static gen_helper_gvec_2i * const name##i_fns[4] = { \
3441 gen_helper_sve_##name##i_b, gen_helper_sve_##name##i_h, \
3442 gen_helper_sve_##name##i_s, gen_helper_sve_##name##i_d, \
3443 }; \
3444 TRANS_FEAT(NAME##_zzi, aa64_sve, do_zzi_ool, a, name##i_fns[a->esz])
3446 DO_ZZI(SMAX, smax)
3447 DO_ZZI(UMAX, umax)
3448 DO_ZZI(SMIN, smin)
3449 DO_ZZI(UMIN, umin)
3451 #undef DO_ZZI
3453 static gen_helper_gvec_4 * const dot_fns[2][2] = {
3454 { gen_helper_gvec_sdot_b, gen_helper_gvec_sdot_h },
3455 { gen_helper_gvec_udot_b, gen_helper_gvec_udot_h }
3457 TRANS_FEAT(DOT_zzzz, aa64_sve, gen_gvec_ool_zzzz,
3458 dot_fns[a->u][a->sz], a->rd, a->rn, a->rm, a->ra, 0)
3461 * SVE Multiply - Indexed
3464 TRANS_FEAT(SDOT_zzxw_s, aa64_sve, gen_gvec_ool_arg_zzxz,
3465 gen_helper_gvec_sdot_idx_b, a)
3466 TRANS_FEAT(SDOT_zzxw_d, aa64_sve, gen_gvec_ool_arg_zzxz,
3467 gen_helper_gvec_sdot_idx_h, a)
3468 TRANS_FEAT(UDOT_zzxw_s, aa64_sve, gen_gvec_ool_arg_zzxz,
3469 gen_helper_gvec_udot_idx_b, a)
3470 TRANS_FEAT(UDOT_zzxw_d, aa64_sve, gen_gvec_ool_arg_zzxz,
3471 gen_helper_gvec_udot_idx_h, a)
3473 TRANS_FEAT(SUDOT_zzxw_s, aa64_sve_i8mm, gen_gvec_ool_arg_zzxz,
3474 gen_helper_gvec_sudot_idx_b, a)
3475 TRANS_FEAT(USDOT_zzxw_s, aa64_sve_i8mm, gen_gvec_ool_arg_zzxz,
3476 gen_helper_gvec_usdot_idx_b, a)
3478 #define DO_SVE2_RRX(NAME, FUNC) \
3479 TRANS_FEAT(NAME, aa64_sve, gen_gvec_ool_zzz, FUNC, \
3480 a->rd, a->rn, a->rm, a->index)
3482 DO_SVE2_RRX(MUL_zzx_h, gen_helper_gvec_mul_idx_h)
3483 DO_SVE2_RRX(MUL_zzx_s, gen_helper_gvec_mul_idx_s)
3484 DO_SVE2_RRX(MUL_zzx_d, gen_helper_gvec_mul_idx_d)
3486 DO_SVE2_RRX(SQDMULH_zzx_h, gen_helper_sve2_sqdmulh_idx_h)
3487 DO_SVE2_RRX(SQDMULH_zzx_s, gen_helper_sve2_sqdmulh_idx_s)
3488 DO_SVE2_RRX(SQDMULH_zzx_d, gen_helper_sve2_sqdmulh_idx_d)
3490 DO_SVE2_RRX(SQRDMULH_zzx_h, gen_helper_sve2_sqrdmulh_idx_h)
3491 DO_SVE2_RRX(SQRDMULH_zzx_s, gen_helper_sve2_sqrdmulh_idx_s)
3492 DO_SVE2_RRX(SQRDMULH_zzx_d, gen_helper_sve2_sqrdmulh_idx_d)
3494 #undef DO_SVE2_RRX
3496 #define DO_SVE2_RRX_TB(NAME, FUNC, TOP) \
3497 TRANS_FEAT(NAME, aa64_sve, gen_gvec_ool_zzz, FUNC, \
3498 a->rd, a->rn, a->rm, (a->index << 1) | TOP)
3500 DO_SVE2_RRX_TB(SQDMULLB_zzx_s, gen_helper_sve2_sqdmull_idx_s, false)
3501 DO_SVE2_RRX_TB(SQDMULLB_zzx_d, gen_helper_sve2_sqdmull_idx_d, false)
3502 DO_SVE2_RRX_TB(SQDMULLT_zzx_s, gen_helper_sve2_sqdmull_idx_s, true)
3503 DO_SVE2_RRX_TB(SQDMULLT_zzx_d, gen_helper_sve2_sqdmull_idx_d, true)
3505 DO_SVE2_RRX_TB(SMULLB_zzx_s, gen_helper_sve2_smull_idx_s, false)
3506 DO_SVE2_RRX_TB(SMULLB_zzx_d, gen_helper_sve2_smull_idx_d, false)
3507 DO_SVE2_RRX_TB(SMULLT_zzx_s, gen_helper_sve2_smull_idx_s, true)
3508 DO_SVE2_RRX_TB(SMULLT_zzx_d, gen_helper_sve2_smull_idx_d, true)
3510 DO_SVE2_RRX_TB(UMULLB_zzx_s, gen_helper_sve2_umull_idx_s, false)
3511 DO_SVE2_RRX_TB(UMULLB_zzx_d, gen_helper_sve2_umull_idx_d, false)
3512 DO_SVE2_RRX_TB(UMULLT_zzx_s, gen_helper_sve2_umull_idx_s, true)
3513 DO_SVE2_RRX_TB(UMULLT_zzx_d, gen_helper_sve2_umull_idx_d, true)
3515 #undef DO_SVE2_RRX_TB
3517 #define DO_SVE2_RRXR(NAME, FUNC) \
3518 TRANS_FEAT(NAME, aa64_sve2, gen_gvec_ool_arg_zzxz, FUNC, a)
3520 DO_SVE2_RRXR(MLA_zzxz_h, gen_helper_gvec_mla_idx_h)
3521 DO_SVE2_RRXR(MLA_zzxz_s, gen_helper_gvec_mla_idx_s)
3522 DO_SVE2_RRXR(MLA_zzxz_d, gen_helper_gvec_mla_idx_d)
3524 DO_SVE2_RRXR(MLS_zzxz_h, gen_helper_gvec_mls_idx_h)
3525 DO_SVE2_RRXR(MLS_zzxz_s, gen_helper_gvec_mls_idx_s)
3526 DO_SVE2_RRXR(MLS_zzxz_d, gen_helper_gvec_mls_idx_d)
3528 DO_SVE2_RRXR(SQRDMLAH_zzxz_h, gen_helper_sve2_sqrdmlah_idx_h)
3529 DO_SVE2_RRXR(SQRDMLAH_zzxz_s, gen_helper_sve2_sqrdmlah_idx_s)
3530 DO_SVE2_RRXR(SQRDMLAH_zzxz_d, gen_helper_sve2_sqrdmlah_idx_d)
3532 DO_SVE2_RRXR(SQRDMLSH_zzxz_h, gen_helper_sve2_sqrdmlsh_idx_h)
3533 DO_SVE2_RRXR(SQRDMLSH_zzxz_s, gen_helper_sve2_sqrdmlsh_idx_s)
3534 DO_SVE2_RRXR(SQRDMLSH_zzxz_d, gen_helper_sve2_sqrdmlsh_idx_d)
3536 #undef DO_SVE2_RRXR
3538 #define DO_SVE2_RRXR_TB(NAME, FUNC, TOP) \
3539 TRANS_FEAT(NAME, aa64_sve2, gen_gvec_ool_zzzz, FUNC, \
3540 a->rd, a->rn, a->rm, a->ra, (a->index << 1) | TOP)
3542 DO_SVE2_RRXR_TB(SQDMLALB_zzxw_s, gen_helper_sve2_sqdmlal_idx_s, false)
3543 DO_SVE2_RRXR_TB(SQDMLALB_zzxw_d, gen_helper_sve2_sqdmlal_idx_d, false)
3544 DO_SVE2_RRXR_TB(SQDMLALT_zzxw_s, gen_helper_sve2_sqdmlal_idx_s, true)
3545 DO_SVE2_RRXR_TB(SQDMLALT_zzxw_d, gen_helper_sve2_sqdmlal_idx_d, true)
3547 DO_SVE2_RRXR_TB(SQDMLSLB_zzxw_s, gen_helper_sve2_sqdmlsl_idx_s, false)
3548 DO_SVE2_RRXR_TB(SQDMLSLB_zzxw_d, gen_helper_sve2_sqdmlsl_idx_d, false)
3549 DO_SVE2_RRXR_TB(SQDMLSLT_zzxw_s, gen_helper_sve2_sqdmlsl_idx_s, true)
3550 DO_SVE2_RRXR_TB(SQDMLSLT_zzxw_d, gen_helper_sve2_sqdmlsl_idx_d, true)
3552 DO_SVE2_RRXR_TB(SMLALB_zzxw_s, gen_helper_sve2_smlal_idx_s, false)
3553 DO_SVE2_RRXR_TB(SMLALB_zzxw_d, gen_helper_sve2_smlal_idx_d, false)
3554 DO_SVE2_RRXR_TB(SMLALT_zzxw_s, gen_helper_sve2_smlal_idx_s, true)
3555 DO_SVE2_RRXR_TB(SMLALT_zzxw_d, gen_helper_sve2_smlal_idx_d, true)
3557 DO_SVE2_RRXR_TB(UMLALB_zzxw_s, gen_helper_sve2_umlal_idx_s, false)
3558 DO_SVE2_RRXR_TB(UMLALB_zzxw_d, gen_helper_sve2_umlal_idx_d, false)
3559 DO_SVE2_RRXR_TB(UMLALT_zzxw_s, gen_helper_sve2_umlal_idx_s, true)
3560 DO_SVE2_RRXR_TB(UMLALT_zzxw_d, gen_helper_sve2_umlal_idx_d, true)
3562 DO_SVE2_RRXR_TB(SMLSLB_zzxw_s, gen_helper_sve2_smlsl_idx_s, false)
3563 DO_SVE2_RRXR_TB(SMLSLB_zzxw_d, gen_helper_sve2_smlsl_idx_d, false)
3564 DO_SVE2_RRXR_TB(SMLSLT_zzxw_s, gen_helper_sve2_smlsl_idx_s, true)
3565 DO_SVE2_RRXR_TB(SMLSLT_zzxw_d, gen_helper_sve2_smlsl_idx_d, true)
3567 DO_SVE2_RRXR_TB(UMLSLB_zzxw_s, gen_helper_sve2_umlsl_idx_s, false)
3568 DO_SVE2_RRXR_TB(UMLSLB_zzxw_d, gen_helper_sve2_umlsl_idx_d, false)
3569 DO_SVE2_RRXR_TB(UMLSLT_zzxw_s, gen_helper_sve2_umlsl_idx_s, true)
3570 DO_SVE2_RRXR_TB(UMLSLT_zzxw_d, gen_helper_sve2_umlsl_idx_d, true)
3572 #undef DO_SVE2_RRXR_TB
3574 #define DO_SVE2_RRXR_ROT(NAME, FUNC) \
3575 TRANS_FEAT(NAME, aa64_sve2, gen_gvec_ool_zzzz, FUNC, \
3576 a->rd, a->rn, a->rm, a->ra, (a->index << 2) | a->rot)
3578 DO_SVE2_RRXR_ROT(CMLA_zzxz_h, gen_helper_sve2_cmla_idx_h)
3579 DO_SVE2_RRXR_ROT(CMLA_zzxz_s, gen_helper_sve2_cmla_idx_s)
3581 DO_SVE2_RRXR_ROT(SQRDCMLAH_zzxz_h, gen_helper_sve2_sqrdcmlah_idx_h)
3582 DO_SVE2_RRXR_ROT(SQRDCMLAH_zzxz_s, gen_helper_sve2_sqrdcmlah_idx_s)
3584 DO_SVE2_RRXR_ROT(CDOT_zzxw_s, gen_helper_sve2_cdot_idx_s)
3585 DO_SVE2_RRXR_ROT(CDOT_zzxw_d, gen_helper_sve2_cdot_idx_d)
3587 #undef DO_SVE2_RRXR_ROT
3590 *** SVE Floating Point Multiply-Add Indexed Group
3593 static bool do_FMLA_zzxz(DisasContext *s, arg_rrxr_esz *a, bool sub)
3595 static gen_helper_gvec_4_ptr * const fns[4] = {
3596 NULL,
3597 gen_helper_gvec_fmla_idx_h,
3598 gen_helper_gvec_fmla_idx_s,
3599 gen_helper_gvec_fmla_idx_d,
3601 return gen_gvec_fpst_zzzz(s, fns[a->esz], a->rd, a->rn, a->rm, a->ra,
3602 (a->index << 1) | sub,
3603 a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
3606 TRANS_FEAT(FMLA_zzxz, aa64_sve, do_FMLA_zzxz, a, false)
3607 TRANS_FEAT(FMLS_zzxz, aa64_sve, do_FMLA_zzxz, a, true)
3610 *** SVE Floating Point Multiply Indexed Group
3613 static gen_helper_gvec_3_ptr * const fmul_idx_fns[4] = {
3614 NULL, gen_helper_gvec_fmul_idx_h,
3615 gen_helper_gvec_fmul_idx_s, gen_helper_gvec_fmul_idx_d,
3617 TRANS_FEAT(FMUL_zzx, aa64_sve, gen_gvec_fpst_zzz,
3618 fmul_idx_fns[a->esz], a->rd, a->rn, a->rm, a->index,
3619 a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR)
3622 *** SVE Floating Point Fast Reduction Group
3625 typedef void gen_helper_fp_reduce(TCGv_i64, TCGv_ptr, TCGv_ptr,
3626 TCGv_ptr, TCGv_i32);
3628 static bool do_reduce(DisasContext *s, arg_rpr_esz *a,
3629 gen_helper_fp_reduce *fn)
3631 unsigned vsz, p2vsz;
3632 TCGv_i32 t_desc;
3633 TCGv_ptr t_zn, t_pg, status;
3634 TCGv_i64 temp;
3636 if (fn == NULL) {
3637 return false;
3639 if (!sve_access_check(s)) {
3640 return true;
3643 vsz = vec_full_reg_size(s);
3644 p2vsz = pow2ceil(vsz);
3645 t_desc = tcg_constant_i32(simd_desc(vsz, vsz, p2vsz));
3646 temp = tcg_temp_new_i64();
3647 t_zn = tcg_temp_new_ptr();
3648 t_pg = tcg_temp_new_ptr();
3650 tcg_gen_addi_ptr(t_zn, cpu_env, vec_full_reg_offset(s, a->rn));
3651 tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, a->pg));
3652 status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
3654 fn(temp, t_zn, t_pg, status, t_desc);
3655 tcg_temp_free_ptr(t_zn);
3656 tcg_temp_free_ptr(t_pg);
3657 tcg_temp_free_ptr(status);
3659 write_fp_dreg(s, a->rd, temp);
3660 tcg_temp_free_i64(temp);
3661 return true;
3664 #define DO_VPZ(NAME, name) \
3665 static gen_helper_fp_reduce * const name##_fns[4] = { \
3666 NULL, gen_helper_sve_##name##_h, \
3667 gen_helper_sve_##name##_s, gen_helper_sve_##name##_d, \
3668 }; \
3669 TRANS_FEAT(NAME, aa64_sve, do_reduce, a, name##_fns[a->esz])
3671 DO_VPZ(FADDV, faddv)
3672 DO_VPZ(FMINNMV, fminnmv)
3673 DO_VPZ(FMAXNMV, fmaxnmv)
3674 DO_VPZ(FMINV, fminv)
3675 DO_VPZ(FMAXV, fmaxv)
3677 #undef DO_VPZ
3680 *** SVE Floating Point Unary Operations - Unpredicated Group
3683 static gen_helper_gvec_2_ptr * const frecpe_fns[] = {
3684 NULL, gen_helper_gvec_frecpe_h,
3685 gen_helper_gvec_frecpe_s, gen_helper_gvec_frecpe_d,
3687 TRANS_FEAT(FRECPE, aa64_sve, gen_gvec_fpst_arg_zz, frecpe_fns[a->esz], a, 0)
3689 static gen_helper_gvec_2_ptr * const frsqrte_fns[] = {
3690 NULL, gen_helper_gvec_frsqrte_h,
3691 gen_helper_gvec_frsqrte_s, gen_helper_gvec_frsqrte_d,
3693 TRANS_FEAT(FRSQRTE, aa64_sve, gen_gvec_fpst_arg_zz, frsqrte_fns[a->esz], a, 0)
3696 *** SVE Floating Point Compare with Zero Group
3699 static bool do_ppz_fp(DisasContext *s, arg_rpr_esz *a,
3700 gen_helper_gvec_3_ptr *fn)
3702 if (fn == NULL) {
3703 return false;
3705 if (sve_access_check(s)) {
3706 unsigned vsz = vec_full_reg_size(s);
3707 TCGv_ptr status =
3708 fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
3710 tcg_gen_gvec_3_ptr(pred_full_reg_offset(s, a->rd),
3711 vec_full_reg_offset(s, a->rn),
3712 pred_full_reg_offset(s, a->pg),
3713 status, vsz, vsz, 0, fn);
3714 tcg_temp_free_ptr(status);
3716 return true;
3719 #define DO_PPZ(NAME, name) \
3720 static gen_helper_gvec_3_ptr * const name##_fns[] = { \
3721 NULL, gen_helper_sve_##name##_h, \
3722 gen_helper_sve_##name##_s, gen_helper_sve_##name##_d, \
3723 }; \
3724 TRANS_FEAT(NAME, aa64_sve, do_ppz_fp, a, name##_fns[a->esz])
3726 DO_PPZ(FCMGE_ppz0, fcmge0)
3727 DO_PPZ(FCMGT_ppz0, fcmgt0)
3728 DO_PPZ(FCMLE_ppz0, fcmle0)
3729 DO_PPZ(FCMLT_ppz0, fcmlt0)
3730 DO_PPZ(FCMEQ_ppz0, fcmeq0)
3731 DO_PPZ(FCMNE_ppz0, fcmne0)
3733 #undef DO_PPZ
3736 *** SVE floating-point trig multiply-add coefficient
3739 static gen_helper_gvec_3_ptr * const ftmad_fns[4] = {
3740 NULL, gen_helper_sve_ftmad_h,
3741 gen_helper_sve_ftmad_s, gen_helper_sve_ftmad_d,
3743 TRANS_FEAT(FTMAD, aa64_sve, gen_gvec_fpst_zzz,
3744 ftmad_fns[a->esz], a->rd, a->rn, a->rm, a->imm,
3745 a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR)
3748 *** SVE Floating Point Accumulating Reduction Group
3751 static bool trans_FADDA(DisasContext *s, arg_rprr_esz *a)
3753 typedef void fadda_fn(TCGv_i64, TCGv_i64, TCGv_ptr,
3754 TCGv_ptr, TCGv_ptr, TCGv_i32);
3755 static fadda_fn * const fns[3] = {
3756 gen_helper_sve_fadda_h,
3757 gen_helper_sve_fadda_s,
3758 gen_helper_sve_fadda_d,
3760 unsigned vsz = vec_full_reg_size(s);
3761 TCGv_ptr t_rm, t_pg, t_fpst;
3762 TCGv_i64 t_val;
3763 TCGv_i32 t_desc;
3765 if (a->esz == 0) {
3766 return false;
3768 if (!sve_access_check(s)) {
3769 return true;
3772 t_val = load_esz(cpu_env, vec_reg_offset(s, a->rn, 0, a->esz), a->esz);
3773 t_rm = tcg_temp_new_ptr();
3774 t_pg = tcg_temp_new_ptr();
3775 tcg_gen_addi_ptr(t_rm, cpu_env, vec_full_reg_offset(s, a->rm));
3776 tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, a->pg));
3777 t_fpst = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
3778 t_desc = tcg_constant_i32(simd_desc(vsz, vsz, 0));
3780 fns[a->esz - 1](t_val, t_val, t_rm, t_pg, t_fpst, t_desc);
3782 tcg_temp_free_ptr(t_fpst);
3783 tcg_temp_free_ptr(t_pg);
3784 tcg_temp_free_ptr(t_rm);
3786 write_fp_dreg(s, a->rd, t_val);
3787 tcg_temp_free_i64(t_val);
3788 return true;
3792 *** SVE Floating Point Arithmetic - Unpredicated Group
3795 #define DO_FP3(NAME, name) \
3796 static gen_helper_gvec_3_ptr * const name##_fns[4] = { \
3797 NULL, gen_helper_gvec_##name##_h, \
3798 gen_helper_gvec_##name##_s, gen_helper_gvec_##name##_d \
3799 }; \
3800 TRANS_FEAT(NAME, aa64_sve, gen_gvec_fpst_arg_zzz, name##_fns[a->esz], a, 0)
3802 DO_FP3(FADD_zzz, fadd)
3803 DO_FP3(FSUB_zzz, fsub)
3804 DO_FP3(FMUL_zzz, fmul)
3805 DO_FP3(FTSMUL, ftsmul)
3806 DO_FP3(FRECPS, recps)
3807 DO_FP3(FRSQRTS, rsqrts)
3809 #undef DO_FP3
3812 *** SVE Floating Point Arithmetic - Predicated Group
3815 static bool do_zpzz_fp(DisasContext *s, arg_rprr_esz *a,
3816 gen_helper_gvec_4_ptr *fn)
3818 if (fn == NULL) {
3819 return false;
3821 if (sve_access_check(s)) {
3822 unsigned vsz = vec_full_reg_size(s);
3823 TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
3824 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, a->rd),
3825 vec_full_reg_offset(s, a->rn),
3826 vec_full_reg_offset(s, a->rm),
3827 pred_full_reg_offset(s, a->pg),
3828 status, vsz, vsz, 0, fn);
3829 tcg_temp_free_ptr(status);
3831 return true;
3834 #define DO_FP3(NAME, name) \
3835 static bool trans_##NAME(DisasContext *s, arg_rprr_esz *a) \
3837 static gen_helper_gvec_4_ptr * const fns[4] = { \
3838 NULL, gen_helper_sve_##name##_h, \
3839 gen_helper_sve_##name##_s, gen_helper_sve_##name##_d \
3840 }; \
3841 return do_zpzz_fp(s, a, fns[a->esz]); \
3844 DO_FP3(FADD_zpzz, fadd)
3845 DO_FP3(FSUB_zpzz, fsub)
3846 DO_FP3(FMUL_zpzz, fmul)
3847 DO_FP3(FMIN_zpzz, fmin)
3848 DO_FP3(FMAX_zpzz, fmax)
3849 DO_FP3(FMINNM_zpzz, fminnum)
3850 DO_FP3(FMAXNM_zpzz, fmaxnum)
3851 DO_FP3(FABD, fabd)
3852 DO_FP3(FSCALE, fscalbn)
3853 DO_FP3(FDIV, fdiv)
3854 DO_FP3(FMULX, fmulx)
3856 #undef DO_FP3
3858 typedef void gen_helper_sve_fp2scalar(TCGv_ptr, TCGv_ptr, TCGv_ptr,
3859 TCGv_i64, TCGv_ptr, TCGv_i32);
3861 static void do_fp_scalar(DisasContext *s, int zd, int zn, int pg, bool is_fp16,
3862 TCGv_i64 scalar, gen_helper_sve_fp2scalar *fn)
3864 unsigned vsz = vec_full_reg_size(s);
3865 TCGv_ptr t_zd, t_zn, t_pg, status;
3866 TCGv_i32 desc;
3868 t_zd = tcg_temp_new_ptr();
3869 t_zn = tcg_temp_new_ptr();
3870 t_pg = tcg_temp_new_ptr();
3871 tcg_gen_addi_ptr(t_zd, cpu_env, vec_full_reg_offset(s, zd));
3872 tcg_gen_addi_ptr(t_zn, cpu_env, vec_full_reg_offset(s, zn));
3873 tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg));
3875 status = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR);
3876 desc = tcg_constant_i32(simd_desc(vsz, vsz, 0));
3877 fn(t_zd, t_zn, t_pg, scalar, status, desc);
3879 tcg_temp_free_ptr(status);
3880 tcg_temp_free_ptr(t_pg);
3881 tcg_temp_free_ptr(t_zn);
3882 tcg_temp_free_ptr(t_zd);
3885 static void do_fp_imm(DisasContext *s, arg_rpri_esz *a, uint64_t imm,
3886 gen_helper_sve_fp2scalar *fn)
3888 do_fp_scalar(s, a->rd, a->rn, a->pg, a->esz == MO_16,
3889 tcg_constant_i64(imm), fn);
3892 #define DO_FP_IMM(NAME, name, const0, const1) \
3893 static bool trans_##NAME##_zpzi(DisasContext *s, arg_rpri_esz *a) \
3895 static gen_helper_sve_fp2scalar * const fns[3] = { \
3896 gen_helper_sve_##name##_h, \
3897 gen_helper_sve_##name##_s, \
3898 gen_helper_sve_##name##_d \
3899 }; \
3900 static uint64_t const val[3][2] = { \
3901 { float16_##const0, float16_##const1 }, \
3902 { float32_##const0, float32_##const1 }, \
3903 { float64_##const0, float64_##const1 }, \
3904 }; \
3905 if (a->esz == 0) { \
3906 return false; \
3908 if (sve_access_check(s)) { \
3909 do_fp_imm(s, a, val[a->esz - 1][a->imm], fns[a->esz - 1]); \
3911 return true; \
3914 DO_FP_IMM(FADD, fadds, half, one)
3915 DO_FP_IMM(FSUB, fsubs, half, one)
3916 DO_FP_IMM(FMUL, fmuls, half, two)
3917 DO_FP_IMM(FSUBR, fsubrs, half, one)
3918 DO_FP_IMM(FMAXNM, fmaxnms, zero, one)
3919 DO_FP_IMM(FMINNM, fminnms, zero, one)
3920 DO_FP_IMM(FMAX, fmaxs, zero, one)
3921 DO_FP_IMM(FMIN, fmins, zero, one)
3923 #undef DO_FP_IMM
3925 static bool do_fp_cmp(DisasContext *s, arg_rprr_esz *a,
3926 gen_helper_gvec_4_ptr *fn)
3928 if (fn == NULL) {
3929 return false;
3931 if (sve_access_check(s)) {
3932 unsigned vsz = vec_full_reg_size(s);
3933 TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
3934 tcg_gen_gvec_4_ptr(pred_full_reg_offset(s, a->rd),
3935 vec_full_reg_offset(s, a->rn),
3936 vec_full_reg_offset(s, a->rm),
3937 pred_full_reg_offset(s, a->pg),
3938 status, vsz, vsz, 0, fn);
3939 tcg_temp_free_ptr(status);
3941 return true;
3944 #define DO_FPCMP(NAME, name) \
3945 static bool trans_##NAME##_ppzz(DisasContext *s, arg_rprr_esz *a) \
3947 static gen_helper_gvec_4_ptr * const fns[4] = { \
3948 NULL, gen_helper_sve_##name##_h, \
3949 gen_helper_sve_##name##_s, gen_helper_sve_##name##_d \
3950 }; \
3951 return do_fp_cmp(s, a, fns[a->esz]); \
3954 DO_FPCMP(FCMGE, fcmge)
3955 DO_FPCMP(FCMGT, fcmgt)
3956 DO_FPCMP(FCMEQ, fcmeq)
3957 DO_FPCMP(FCMNE, fcmne)
3958 DO_FPCMP(FCMUO, fcmuo)
3959 DO_FPCMP(FACGE, facge)
3960 DO_FPCMP(FACGT, facgt)
3962 #undef DO_FPCMP
3964 static bool trans_FCADD(DisasContext *s, arg_FCADD *a)
3966 static gen_helper_gvec_4_ptr * const fns[3] = {
3967 gen_helper_sve_fcadd_h,
3968 gen_helper_sve_fcadd_s,
3969 gen_helper_sve_fcadd_d
3972 if (a->esz == 0) {
3973 return false;
3975 if (sve_access_check(s)) {
3976 unsigned vsz = vec_full_reg_size(s);
3977 TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
3978 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, a->rd),
3979 vec_full_reg_offset(s, a->rn),
3980 vec_full_reg_offset(s, a->rm),
3981 pred_full_reg_offset(s, a->pg),
3982 status, vsz, vsz, a->rot, fns[a->esz - 1]);
3983 tcg_temp_free_ptr(status);
3985 return true;
3988 static bool do_fmla(DisasContext *s, arg_rprrr_esz *a,
3989 gen_helper_gvec_5_ptr *fn)
3991 if (a->esz == 0) {
3992 return false;
3994 if (sve_access_check(s)) {
3995 unsigned vsz = vec_full_reg_size(s);
3996 TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
3997 tcg_gen_gvec_5_ptr(vec_full_reg_offset(s, a->rd),
3998 vec_full_reg_offset(s, a->rn),
3999 vec_full_reg_offset(s, a->rm),
4000 vec_full_reg_offset(s, a->ra),
4001 pred_full_reg_offset(s, a->pg),
4002 status, vsz, vsz, 0, fn);
4003 tcg_temp_free_ptr(status);
4005 return true;
4008 #define DO_FMLA(NAME, name) \
4009 static bool trans_##NAME(DisasContext *s, arg_rprrr_esz *a) \
4011 static gen_helper_gvec_5_ptr * const fns[4] = { \
4012 NULL, gen_helper_sve_##name##_h, \
4013 gen_helper_sve_##name##_s, gen_helper_sve_##name##_d \
4014 }; \
4015 return do_fmla(s, a, fns[a->esz]); \
4018 DO_FMLA(FMLA_zpzzz, fmla_zpzzz)
4019 DO_FMLA(FMLS_zpzzz, fmls_zpzzz)
4020 DO_FMLA(FNMLA_zpzzz, fnmla_zpzzz)
4021 DO_FMLA(FNMLS_zpzzz, fnmls_zpzzz)
4023 #undef DO_FMLA
4025 static bool trans_FCMLA_zpzzz(DisasContext *s, arg_FCMLA_zpzzz *a)
4027 static gen_helper_gvec_5_ptr * const fns[4] = {
4028 NULL,
4029 gen_helper_sve_fcmla_zpzzz_h,
4030 gen_helper_sve_fcmla_zpzzz_s,
4031 gen_helper_sve_fcmla_zpzzz_d,
4034 if (a->esz == 0) {
4035 return false;
4037 if (sve_access_check(s)) {
4038 unsigned vsz = vec_full_reg_size(s);
4039 TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
4040 tcg_gen_gvec_5_ptr(vec_full_reg_offset(s, a->rd),
4041 vec_full_reg_offset(s, a->rn),
4042 vec_full_reg_offset(s, a->rm),
4043 vec_full_reg_offset(s, a->ra),
4044 pred_full_reg_offset(s, a->pg),
4045 status, vsz, vsz, a->rot, fns[a->esz]);
4046 tcg_temp_free_ptr(status);
4048 return true;
4051 static bool trans_FCMLA_zzxz(DisasContext *s, arg_FCMLA_zzxz *a)
4053 static gen_helper_gvec_4_ptr * const fns[4] = {
4054 NULL,
4055 gen_helper_gvec_fcmlah_idx,
4056 gen_helper_gvec_fcmlas_idx,
4057 NULL,
4060 tcg_debug_assert(a->rd == a->ra);
4062 return gen_gvec_fpst_zzzz(s, fns[a->esz], a->rd, a->rn, a->rm, a->ra,
4063 a->index * 4 + a->rot,
4064 a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
4068 *** SVE Floating Point Unary Operations Predicated Group
4071 TRANS_FEAT(FCVT_sh, aa64_sve, gen_gvec_fpst_arg_zpz,
4072 gen_helper_sve_fcvt_sh, a, 0, FPST_FPCR)
4073 TRANS_FEAT(FCVT_hs, aa64_sve, gen_gvec_fpst_arg_zpz,
4074 gen_helper_sve_fcvt_hs, a, 0, FPST_FPCR)
4076 TRANS_FEAT(BFCVT, aa64_sve_bf16, gen_gvec_fpst_arg_zpz,
4077 gen_helper_sve_bfcvt, a, 0, FPST_FPCR)
4079 TRANS_FEAT(FCVT_dh, aa64_sve, gen_gvec_fpst_arg_zpz,
4080 gen_helper_sve_fcvt_dh, a, 0, FPST_FPCR)
4081 TRANS_FEAT(FCVT_hd, aa64_sve, gen_gvec_fpst_arg_zpz,
4082 gen_helper_sve_fcvt_hd, a, 0, FPST_FPCR)
4083 TRANS_FEAT(FCVT_ds, aa64_sve, gen_gvec_fpst_arg_zpz,
4084 gen_helper_sve_fcvt_ds, a, 0, FPST_FPCR)
4085 TRANS_FEAT(FCVT_sd, aa64_sve, gen_gvec_fpst_arg_zpz,
4086 gen_helper_sve_fcvt_sd, a, 0, FPST_FPCR)
4088 TRANS_FEAT(FCVTZS_hh, aa64_sve, gen_gvec_fpst_arg_zpz,
4089 gen_helper_sve_fcvtzs_hh, a, 0, FPST_FPCR_F16)
4090 TRANS_FEAT(FCVTZU_hh, aa64_sve, gen_gvec_fpst_arg_zpz,
4091 gen_helper_sve_fcvtzu_hh, a, 0, FPST_FPCR_F16)
4092 TRANS_FEAT(FCVTZS_hs, aa64_sve, gen_gvec_fpst_arg_zpz,
4093 gen_helper_sve_fcvtzs_hs, a, 0, FPST_FPCR_F16)
4094 TRANS_FEAT(FCVTZU_hs, aa64_sve, gen_gvec_fpst_arg_zpz,
4095 gen_helper_sve_fcvtzu_hs, a, 0, FPST_FPCR_F16)
4096 TRANS_FEAT(FCVTZS_hd, aa64_sve, gen_gvec_fpst_arg_zpz,
4097 gen_helper_sve_fcvtzs_hd, a, 0, FPST_FPCR_F16)
4098 TRANS_FEAT(FCVTZU_hd, aa64_sve, gen_gvec_fpst_arg_zpz,
4099 gen_helper_sve_fcvtzu_hd, a, 0, FPST_FPCR_F16)
4101 TRANS_FEAT(FCVTZS_ss, aa64_sve, gen_gvec_fpst_arg_zpz,
4102 gen_helper_sve_fcvtzs_ss, a, 0, FPST_FPCR)
4103 TRANS_FEAT(FCVTZU_ss, aa64_sve, gen_gvec_fpst_arg_zpz,
4104 gen_helper_sve_fcvtzu_ss, a, 0, FPST_FPCR)
4105 TRANS_FEAT(FCVTZS_sd, aa64_sve, gen_gvec_fpst_arg_zpz,
4106 gen_helper_sve_fcvtzs_sd, a, 0, FPST_FPCR)
4107 TRANS_FEAT(FCVTZU_sd, aa64_sve, gen_gvec_fpst_arg_zpz,
4108 gen_helper_sve_fcvtzu_sd, a, 0, FPST_FPCR)
4109 TRANS_FEAT(FCVTZS_ds, aa64_sve, gen_gvec_fpst_arg_zpz,
4110 gen_helper_sve_fcvtzs_ds, a, 0, FPST_FPCR)
4111 TRANS_FEAT(FCVTZU_ds, aa64_sve, gen_gvec_fpst_arg_zpz,
4112 gen_helper_sve_fcvtzu_ds, a, 0, FPST_FPCR)
4114 TRANS_FEAT(FCVTZS_dd, aa64_sve, gen_gvec_fpst_arg_zpz,
4115 gen_helper_sve_fcvtzs_dd, a, 0, FPST_FPCR)
4116 TRANS_FEAT(FCVTZU_dd, aa64_sve, gen_gvec_fpst_arg_zpz,
4117 gen_helper_sve_fcvtzu_dd, a, 0, FPST_FPCR)
4119 static gen_helper_gvec_3_ptr * const frint_fns[] = {
4120 NULL,
4121 gen_helper_sve_frint_h,
4122 gen_helper_sve_frint_s,
4123 gen_helper_sve_frint_d
4125 TRANS_FEAT(FRINTI, aa64_sve, gen_gvec_fpst_arg_zpz, frint_fns[a->esz],
4126 a, 0, a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR)
4128 static gen_helper_gvec_3_ptr * const frintx_fns[] = {
4129 NULL,
4130 gen_helper_sve_frintx_h,
4131 gen_helper_sve_frintx_s,
4132 gen_helper_sve_frintx_d
4134 TRANS_FEAT(FRINTX, aa64_sve, gen_gvec_fpst_arg_zpz, frintx_fns[a->esz],
4135 a, 0, a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
4137 static bool do_frint_mode(DisasContext *s, arg_rpr_esz *a,
4138 int mode, gen_helper_gvec_3_ptr *fn)
4140 unsigned vsz;
4141 TCGv_i32 tmode;
4142 TCGv_ptr status;
4144 if (fn == NULL) {
4145 return false;
4147 if (!sve_access_check(s)) {
4148 return true;
4151 vsz = vec_full_reg_size(s);
4152 tmode = tcg_const_i32(mode);
4153 status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
4155 gen_helper_set_rmode(tmode, tmode, status);
4157 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, a->rd),
4158 vec_full_reg_offset(s, a->rn),
4159 pred_full_reg_offset(s, a->pg),
4160 status, vsz, vsz, 0, fn);
4162 gen_helper_set_rmode(tmode, tmode, status);
4163 tcg_temp_free_i32(tmode);
4164 tcg_temp_free_ptr(status);
4165 return true;
4168 TRANS_FEAT(FRINTN, aa64_sve, do_frint_mode, a,
4169 float_round_nearest_even, frint_fns[a->esz])
4170 TRANS_FEAT(FRINTP, aa64_sve, do_frint_mode, a,
4171 float_round_up, frint_fns[a->esz])
4172 TRANS_FEAT(FRINTM, aa64_sve, do_frint_mode, a,
4173 float_round_down, frint_fns[a->esz])
4174 TRANS_FEAT(FRINTZ, aa64_sve, do_frint_mode, a,
4175 float_round_to_zero, frint_fns[a->esz])
4176 TRANS_FEAT(FRINTA, aa64_sve, do_frint_mode, a,
4177 float_round_ties_away, frint_fns[a->esz])
4179 static gen_helper_gvec_3_ptr * const frecpx_fns[] = {
4180 NULL, gen_helper_sve_frecpx_h,
4181 gen_helper_sve_frecpx_s, gen_helper_sve_frecpx_d,
4183 TRANS_FEAT(FRECPX, aa64_sve, gen_gvec_fpst_arg_zpz, frecpx_fns[a->esz],
4184 a, 0, a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR)
4186 static gen_helper_gvec_3_ptr * const fsqrt_fns[] = {
4187 NULL, gen_helper_sve_fsqrt_h,
4188 gen_helper_sve_fsqrt_s, gen_helper_sve_fsqrt_d,
4190 TRANS_FEAT(FSQRT, aa64_sve, gen_gvec_fpst_arg_zpz, fsqrt_fns[a->esz],
4191 a, 0, a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR)
4193 TRANS_FEAT(SCVTF_hh, aa64_sve, gen_gvec_fpst_arg_zpz,
4194 gen_helper_sve_scvt_hh, a, 0, FPST_FPCR_F16)
4195 TRANS_FEAT(SCVTF_sh, aa64_sve, gen_gvec_fpst_arg_zpz,
4196 gen_helper_sve_scvt_sh, a, 0, FPST_FPCR_F16)
4197 TRANS_FEAT(SCVTF_dh, aa64_sve, gen_gvec_fpst_arg_zpz,
4198 gen_helper_sve_scvt_dh, a, 0, FPST_FPCR_F16)
4200 TRANS_FEAT(SCVTF_ss, aa64_sve, gen_gvec_fpst_arg_zpz,
4201 gen_helper_sve_scvt_ss, a, 0, FPST_FPCR)
4202 TRANS_FEAT(SCVTF_ds, aa64_sve, gen_gvec_fpst_arg_zpz,
4203 gen_helper_sve_scvt_ds, a, 0, FPST_FPCR)
4205 TRANS_FEAT(SCVTF_sd, aa64_sve, gen_gvec_fpst_arg_zpz,
4206 gen_helper_sve_scvt_sd, a, 0, FPST_FPCR)
4207 TRANS_FEAT(SCVTF_dd, aa64_sve, gen_gvec_fpst_arg_zpz,
4208 gen_helper_sve_scvt_dd, a, 0, FPST_FPCR)
4210 TRANS_FEAT(UCVTF_hh, aa64_sve, gen_gvec_fpst_arg_zpz,
4211 gen_helper_sve_ucvt_hh, a, 0, FPST_FPCR_F16)
4212 TRANS_FEAT(UCVTF_sh, aa64_sve, gen_gvec_fpst_arg_zpz,
4213 gen_helper_sve_ucvt_sh, a, 0, FPST_FPCR_F16)
4214 TRANS_FEAT(UCVTF_dh, aa64_sve, gen_gvec_fpst_arg_zpz,
4215 gen_helper_sve_ucvt_dh, a, 0, FPST_FPCR_F16)
4217 TRANS_FEAT(UCVTF_ss, aa64_sve, gen_gvec_fpst_arg_zpz,
4218 gen_helper_sve_ucvt_ss, a, 0, FPST_FPCR)
4219 TRANS_FEAT(UCVTF_ds, aa64_sve, gen_gvec_fpst_arg_zpz,
4220 gen_helper_sve_ucvt_ds, a, 0, FPST_FPCR)
4221 TRANS_FEAT(UCVTF_sd, aa64_sve, gen_gvec_fpst_arg_zpz,
4222 gen_helper_sve_ucvt_sd, a, 0, FPST_FPCR)
4224 TRANS_FEAT(UCVTF_dd, aa64_sve, gen_gvec_fpst_arg_zpz,
4225 gen_helper_sve_ucvt_dd, a, 0, FPST_FPCR)
4228 *** SVE Memory - 32-bit Gather and Unsized Contiguous Group
4231 /* Subroutine loading a vector register at VOFS of LEN bytes.
4232 * The load should begin at the address Rn + IMM.
4235 static void do_ldr(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
4237 int len_align = QEMU_ALIGN_DOWN(len, 8);
4238 int len_remain = len % 8;
4239 int nparts = len / 8 + ctpop8(len_remain);
4240 int midx = get_mem_index(s);
4241 TCGv_i64 dirty_addr, clean_addr, t0, t1;
4243 dirty_addr = tcg_temp_new_i64();
4244 tcg_gen_addi_i64(dirty_addr, cpu_reg_sp(s, rn), imm);
4245 clean_addr = gen_mte_checkN(s, dirty_addr, false, rn != 31, len);
4246 tcg_temp_free_i64(dirty_addr);
4249 * Note that unpredicated load/store of vector/predicate registers
4250 * are defined as a stream of bytes, which equates to little-endian
4251 * operations on larger quantities.
4252 * Attempt to keep code expansion to a minimum by limiting the
4253 * amount of unrolling done.
4255 if (nparts <= 4) {
4256 int i;
4258 t0 = tcg_temp_new_i64();
4259 for (i = 0; i < len_align; i += 8) {
4260 tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEUQ);
4261 tcg_gen_st_i64(t0, cpu_env, vofs + i);
4262 tcg_gen_addi_i64(clean_addr, clean_addr, 8);
4264 tcg_temp_free_i64(t0);
4265 } else {
4266 TCGLabel *loop = gen_new_label();
4267 TCGv_ptr tp, i = tcg_const_local_ptr(0);
4269 /* Copy the clean address into a local temp, live across the loop. */
4270 t0 = clean_addr;
4271 clean_addr = new_tmp_a64_local(s);
4272 tcg_gen_mov_i64(clean_addr, t0);
4274 gen_set_label(loop);
4276 t0 = tcg_temp_new_i64();
4277 tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEUQ);
4278 tcg_gen_addi_i64(clean_addr, clean_addr, 8);
4280 tp = tcg_temp_new_ptr();
4281 tcg_gen_add_ptr(tp, cpu_env, i);
4282 tcg_gen_addi_ptr(i, i, 8);
4283 tcg_gen_st_i64(t0, tp, vofs);
4284 tcg_temp_free_ptr(tp);
4285 tcg_temp_free_i64(t0);
4287 tcg_gen_brcondi_ptr(TCG_COND_LTU, i, len_align, loop);
4288 tcg_temp_free_ptr(i);
4292 * Predicate register loads can be any multiple of 2.
4293 * Note that we still store the entire 64-bit unit into cpu_env.
4295 if (len_remain) {
4296 t0 = tcg_temp_new_i64();
4297 switch (len_remain) {
4298 case 2:
4299 case 4:
4300 case 8:
4301 tcg_gen_qemu_ld_i64(t0, clean_addr, midx,
4302 MO_LE | ctz32(len_remain));
4303 break;
4305 case 6:
4306 t1 = tcg_temp_new_i64();
4307 tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEUL);
4308 tcg_gen_addi_i64(clean_addr, clean_addr, 4);
4309 tcg_gen_qemu_ld_i64(t1, clean_addr, midx, MO_LEUW);
4310 tcg_gen_deposit_i64(t0, t0, t1, 32, 32);
4311 tcg_temp_free_i64(t1);
4312 break;
4314 default:
4315 g_assert_not_reached();
4317 tcg_gen_st_i64(t0, cpu_env, vofs + len_align);
4318 tcg_temp_free_i64(t0);
4322 /* Similarly for stores. */
4323 static void do_str(DisasContext *s, uint32_t vofs, int len, int rn, int imm)
4325 int len_align = QEMU_ALIGN_DOWN(len, 8);
4326 int len_remain = len % 8;
4327 int nparts = len / 8 + ctpop8(len_remain);
4328 int midx = get_mem_index(s);
4329 TCGv_i64 dirty_addr, clean_addr, t0;
4331 dirty_addr = tcg_temp_new_i64();
4332 tcg_gen_addi_i64(dirty_addr, cpu_reg_sp(s, rn), imm);
4333 clean_addr = gen_mte_checkN(s, dirty_addr, false, rn != 31, len);
4334 tcg_temp_free_i64(dirty_addr);
4336 /* Note that unpredicated load/store of vector/predicate registers
4337 * are defined as a stream of bytes, which equates to little-endian
4338 * operations on larger quantities. There is no nice way to force
4339 * a little-endian store for aarch64_be-linux-user out of line.
4341 * Attempt to keep code expansion to a minimum by limiting the
4342 * amount of unrolling done.
4344 if (nparts <= 4) {
4345 int i;
4347 t0 = tcg_temp_new_i64();
4348 for (i = 0; i < len_align; i += 8) {
4349 tcg_gen_ld_i64(t0, cpu_env, vofs + i);
4350 tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUQ);
4351 tcg_gen_addi_i64(clean_addr, clean_addr, 8);
4353 tcg_temp_free_i64(t0);
4354 } else {
4355 TCGLabel *loop = gen_new_label();
4356 TCGv_ptr tp, i = tcg_const_local_ptr(0);
4358 /* Copy the clean address into a local temp, live across the loop. */
4359 t0 = clean_addr;
4360 clean_addr = new_tmp_a64_local(s);
4361 tcg_gen_mov_i64(clean_addr, t0);
4363 gen_set_label(loop);
4365 t0 = tcg_temp_new_i64();
4366 tp = tcg_temp_new_ptr();
4367 tcg_gen_add_ptr(tp, cpu_env, i);
4368 tcg_gen_ld_i64(t0, tp, vofs);
4369 tcg_gen_addi_ptr(i, i, 8);
4370 tcg_temp_free_ptr(tp);
4372 tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUQ);
4373 tcg_gen_addi_i64(clean_addr, clean_addr, 8);
4374 tcg_temp_free_i64(t0);
4376 tcg_gen_brcondi_ptr(TCG_COND_LTU, i, len_align, loop);
4377 tcg_temp_free_ptr(i);
4380 /* Predicate register stores can be any multiple of 2. */
4381 if (len_remain) {
4382 t0 = tcg_temp_new_i64();
4383 tcg_gen_ld_i64(t0, cpu_env, vofs + len_align);
4385 switch (len_remain) {
4386 case 2:
4387 case 4:
4388 case 8:
4389 tcg_gen_qemu_st_i64(t0, clean_addr, midx,
4390 MO_LE | ctz32(len_remain));
4391 break;
4393 case 6:
4394 tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUL);
4395 tcg_gen_addi_i64(clean_addr, clean_addr, 4);
4396 tcg_gen_shri_i64(t0, t0, 32);
4397 tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUW);
4398 break;
4400 default:
4401 g_assert_not_reached();
4403 tcg_temp_free_i64(t0);
4407 static bool trans_LDR_zri(DisasContext *s, arg_rri *a)
4409 if (sve_access_check(s)) {
4410 int size = vec_full_reg_size(s);
4411 int off = vec_full_reg_offset(s, a->rd);
4412 do_ldr(s, off, size, a->rn, a->imm * size);
4414 return true;
4417 static bool trans_LDR_pri(DisasContext *s, arg_rri *a)
4419 if (sve_access_check(s)) {
4420 int size = pred_full_reg_size(s);
4421 int off = pred_full_reg_offset(s, a->rd);
4422 do_ldr(s, off, size, a->rn, a->imm * size);
4424 return true;
4427 static bool trans_STR_zri(DisasContext *s, arg_rri *a)
4429 if (sve_access_check(s)) {
4430 int size = vec_full_reg_size(s);
4431 int off = vec_full_reg_offset(s, a->rd);
4432 do_str(s, off, size, a->rn, a->imm * size);
4434 return true;
4437 static bool trans_STR_pri(DisasContext *s, arg_rri *a)
4439 if (sve_access_check(s)) {
4440 int size = pred_full_reg_size(s);
4441 int off = pred_full_reg_offset(s, a->rd);
4442 do_str(s, off, size, a->rn, a->imm * size);
4444 return true;
4448 *** SVE Memory - Contiguous Load Group
4451 /* The memory mode of the dtype. */
4452 static const MemOp dtype_mop[16] = {
4453 MO_UB, MO_UB, MO_UB, MO_UB,
4454 MO_SL, MO_UW, MO_UW, MO_UW,
4455 MO_SW, MO_SW, MO_UL, MO_UL,
4456 MO_SB, MO_SB, MO_SB, MO_UQ
4459 #define dtype_msz(x) (dtype_mop[x] & MO_SIZE)
4461 /* The vector element size of dtype. */
4462 static const uint8_t dtype_esz[16] = {
4463 0, 1, 2, 3,
4464 3, 1, 2, 3,
4465 3, 2, 2, 3,
4466 3, 2, 1, 3
4469 static void do_mem_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr,
4470 int dtype, uint32_t mte_n, bool is_write,
4471 gen_helper_gvec_mem *fn)
4473 unsigned vsz = vec_full_reg_size(s);
4474 TCGv_ptr t_pg;
4475 int desc = 0;
4478 * For e.g. LD4, there are not enough arguments to pass all 4
4479 * registers as pointers, so encode the regno into the data field.
4480 * For consistency, do this even for LD1.
4482 if (s->mte_active[0]) {
4483 int msz = dtype_msz(dtype);
4485 desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s));
4486 desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
4487 desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
4488 desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
4489 desc = FIELD_DP32(desc, MTEDESC, SIZEM1, (mte_n << msz) - 1);
4490 desc <<= SVE_MTEDESC_SHIFT;
4491 } else {
4492 addr = clean_data_tbi(s, addr);
4495 desc = simd_desc(vsz, vsz, zt | desc);
4496 t_pg = tcg_temp_new_ptr();
4498 tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg));
4499 fn(cpu_env, t_pg, addr, tcg_constant_i32(desc));
4501 tcg_temp_free_ptr(t_pg);
4504 /* Indexed by [mte][be][dtype][nreg] */
4505 static gen_helper_gvec_mem * const ldr_fns[2][2][16][4] = {
4506 { /* mte inactive, little-endian */
4507 { { gen_helper_sve_ld1bb_r, gen_helper_sve_ld2bb_r,
4508 gen_helper_sve_ld3bb_r, gen_helper_sve_ld4bb_r },
4509 { gen_helper_sve_ld1bhu_r, NULL, NULL, NULL },
4510 { gen_helper_sve_ld1bsu_r, NULL, NULL, NULL },
4511 { gen_helper_sve_ld1bdu_r, NULL, NULL, NULL },
4513 { gen_helper_sve_ld1sds_le_r, NULL, NULL, NULL },
4514 { gen_helper_sve_ld1hh_le_r, gen_helper_sve_ld2hh_le_r,
4515 gen_helper_sve_ld3hh_le_r, gen_helper_sve_ld4hh_le_r },
4516 { gen_helper_sve_ld1hsu_le_r, NULL, NULL, NULL },
4517 { gen_helper_sve_ld1hdu_le_r, NULL, NULL, NULL },
4519 { gen_helper_sve_ld1hds_le_r, NULL, NULL, NULL },
4520 { gen_helper_sve_ld1hss_le_r, NULL, NULL, NULL },
4521 { gen_helper_sve_ld1ss_le_r, gen_helper_sve_ld2ss_le_r,
4522 gen_helper_sve_ld3ss_le_r, gen_helper_sve_ld4ss_le_r },
4523 { gen_helper_sve_ld1sdu_le_r, NULL, NULL, NULL },
4525 { gen_helper_sve_ld1bds_r, NULL, NULL, NULL },
4526 { gen_helper_sve_ld1bss_r, NULL, NULL, NULL },
4527 { gen_helper_sve_ld1bhs_r, NULL, NULL, NULL },
4528 { gen_helper_sve_ld1dd_le_r, gen_helper_sve_ld2dd_le_r,
4529 gen_helper_sve_ld3dd_le_r, gen_helper_sve_ld4dd_le_r } },
4531 /* mte inactive, big-endian */
4532 { { gen_helper_sve_ld1bb_r, gen_helper_sve_ld2bb_r,
4533 gen_helper_sve_ld3bb_r, gen_helper_sve_ld4bb_r },
4534 { gen_helper_sve_ld1bhu_r, NULL, NULL, NULL },
4535 { gen_helper_sve_ld1bsu_r, NULL, NULL, NULL },
4536 { gen_helper_sve_ld1bdu_r, NULL, NULL, NULL },
4538 { gen_helper_sve_ld1sds_be_r, NULL, NULL, NULL },
4539 { gen_helper_sve_ld1hh_be_r, gen_helper_sve_ld2hh_be_r,
4540 gen_helper_sve_ld3hh_be_r, gen_helper_sve_ld4hh_be_r },
4541 { gen_helper_sve_ld1hsu_be_r, NULL, NULL, NULL },
4542 { gen_helper_sve_ld1hdu_be_r, NULL, NULL, NULL },
4544 { gen_helper_sve_ld1hds_be_r, NULL, NULL, NULL },
4545 { gen_helper_sve_ld1hss_be_r, NULL, NULL, NULL },
4546 { gen_helper_sve_ld1ss_be_r, gen_helper_sve_ld2ss_be_r,
4547 gen_helper_sve_ld3ss_be_r, gen_helper_sve_ld4ss_be_r },
4548 { gen_helper_sve_ld1sdu_be_r, NULL, NULL, NULL },
4550 { gen_helper_sve_ld1bds_r, NULL, NULL, NULL },
4551 { gen_helper_sve_ld1bss_r, NULL, NULL, NULL },
4552 { gen_helper_sve_ld1bhs_r, NULL, NULL, NULL },
4553 { gen_helper_sve_ld1dd_be_r, gen_helper_sve_ld2dd_be_r,
4554 gen_helper_sve_ld3dd_be_r, gen_helper_sve_ld4dd_be_r } } },
4556 { /* mte active, little-endian */
4557 { { gen_helper_sve_ld1bb_r_mte,
4558 gen_helper_sve_ld2bb_r_mte,
4559 gen_helper_sve_ld3bb_r_mte,
4560 gen_helper_sve_ld4bb_r_mte },
4561 { gen_helper_sve_ld1bhu_r_mte, NULL, NULL, NULL },
4562 { gen_helper_sve_ld1bsu_r_mte, NULL, NULL, NULL },
4563 { gen_helper_sve_ld1bdu_r_mte, NULL, NULL, NULL },
4565 { gen_helper_sve_ld1sds_le_r_mte, NULL, NULL, NULL },
4566 { gen_helper_sve_ld1hh_le_r_mte,
4567 gen_helper_sve_ld2hh_le_r_mte,
4568 gen_helper_sve_ld3hh_le_r_mte,
4569 gen_helper_sve_ld4hh_le_r_mte },
4570 { gen_helper_sve_ld1hsu_le_r_mte, NULL, NULL, NULL },
4571 { gen_helper_sve_ld1hdu_le_r_mte, NULL, NULL, NULL },
4573 { gen_helper_sve_ld1hds_le_r_mte, NULL, NULL, NULL },
4574 { gen_helper_sve_ld1hss_le_r_mte, NULL, NULL, NULL },
4575 { gen_helper_sve_ld1ss_le_r_mte,
4576 gen_helper_sve_ld2ss_le_r_mte,
4577 gen_helper_sve_ld3ss_le_r_mte,
4578 gen_helper_sve_ld4ss_le_r_mte },
4579 { gen_helper_sve_ld1sdu_le_r_mte, NULL, NULL, NULL },
4581 { gen_helper_sve_ld1bds_r_mte, NULL, NULL, NULL },
4582 { gen_helper_sve_ld1bss_r_mte, NULL, NULL, NULL },
4583 { gen_helper_sve_ld1bhs_r_mte, NULL, NULL, NULL },
4584 { gen_helper_sve_ld1dd_le_r_mte,
4585 gen_helper_sve_ld2dd_le_r_mte,
4586 gen_helper_sve_ld3dd_le_r_mte,
4587 gen_helper_sve_ld4dd_le_r_mte } },
4589 /* mte active, big-endian */
4590 { { gen_helper_sve_ld1bb_r_mte,
4591 gen_helper_sve_ld2bb_r_mte,
4592 gen_helper_sve_ld3bb_r_mte,
4593 gen_helper_sve_ld4bb_r_mte },
4594 { gen_helper_sve_ld1bhu_r_mte, NULL, NULL, NULL },
4595 { gen_helper_sve_ld1bsu_r_mte, NULL, NULL, NULL },
4596 { gen_helper_sve_ld1bdu_r_mte, NULL, NULL, NULL },
4598 { gen_helper_sve_ld1sds_be_r_mte, NULL, NULL, NULL },
4599 { gen_helper_sve_ld1hh_be_r_mte,
4600 gen_helper_sve_ld2hh_be_r_mte,
4601 gen_helper_sve_ld3hh_be_r_mte,
4602 gen_helper_sve_ld4hh_be_r_mte },
4603 { gen_helper_sve_ld1hsu_be_r_mte, NULL, NULL, NULL },
4604 { gen_helper_sve_ld1hdu_be_r_mte, NULL, NULL, NULL },
4606 { gen_helper_sve_ld1hds_be_r_mte, NULL, NULL, NULL },
4607 { gen_helper_sve_ld1hss_be_r_mte, NULL, NULL, NULL },
4608 { gen_helper_sve_ld1ss_be_r_mte,
4609 gen_helper_sve_ld2ss_be_r_mte,
4610 gen_helper_sve_ld3ss_be_r_mte,
4611 gen_helper_sve_ld4ss_be_r_mte },
4612 { gen_helper_sve_ld1sdu_be_r_mte, NULL, NULL, NULL },
4614 { gen_helper_sve_ld1bds_r_mte, NULL, NULL, NULL },
4615 { gen_helper_sve_ld1bss_r_mte, NULL, NULL, NULL },
4616 { gen_helper_sve_ld1bhs_r_mte, NULL, NULL, NULL },
4617 { gen_helper_sve_ld1dd_be_r_mte,
4618 gen_helper_sve_ld2dd_be_r_mte,
4619 gen_helper_sve_ld3dd_be_r_mte,
4620 gen_helper_sve_ld4dd_be_r_mte } } },
4623 static void do_ld_zpa(DisasContext *s, int zt, int pg,
4624 TCGv_i64 addr, int dtype, int nreg)
4626 gen_helper_gvec_mem *fn
4627 = ldr_fns[s->mte_active[0]][s->be_data == MO_BE][dtype][nreg];
4630 * While there are holes in the table, they are not
4631 * accessible via the instruction encoding.
4633 assert(fn != NULL);
4634 do_mem_zpa(s, zt, pg, addr, dtype, nreg, false, fn);
4637 static bool trans_LD_zprr(DisasContext *s, arg_rprr_load *a)
4639 if (a->rm == 31) {
4640 return false;
4642 if (sve_access_check(s)) {
4643 TCGv_i64 addr = new_tmp_a64(s);
4644 tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype));
4645 tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn));
4646 do_ld_zpa(s, a->rd, a->pg, addr, a->dtype, a->nreg);
4648 return true;
4651 static bool trans_LD_zpri(DisasContext *s, arg_rpri_load *a)
4653 if (sve_access_check(s)) {
4654 int vsz = vec_full_reg_size(s);
4655 int elements = vsz >> dtype_esz[a->dtype];
4656 TCGv_i64 addr = new_tmp_a64(s);
4658 tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn),
4659 (a->imm * elements * (a->nreg + 1))
4660 << dtype_msz(a->dtype));
4661 do_ld_zpa(s, a->rd, a->pg, addr, a->dtype, a->nreg);
4663 return true;
4666 static bool trans_LDFF1_zprr(DisasContext *s, arg_rprr_load *a)
4668 static gen_helper_gvec_mem * const fns[2][2][16] = {
4669 { /* mte inactive, little-endian */
4670 { gen_helper_sve_ldff1bb_r,
4671 gen_helper_sve_ldff1bhu_r,
4672 gen_helper_sve_ldff1bsu_r,
4673 gen_helper_sve_ldff1bdu_r,
4675 gen_helper_sve_ldff1sds_le_r,
4676 gen_helper_sve_ldff1hh_le_r,
4677 gen_helper_sve_ldff1hsu_le_r,
4678 gen_helper_sve_ldff1hdu_le_r,
4680 gen_helper_sve_ldff1hds_le_r,
4681 gen_helper_sve_ldff1hss_le_r,
4682 gen_helper_sve_ldff1ss_le_r,
4683 gen_helper_sve_ldff1sdu_le_r,
4685 gen_helper_sve_ldff1bds_r,
4686 gen_helper_sve_ldff1bss_r,
4687 gen_helper_sve_ldff1bhs_r,
4688 gen_helper_sve_ldff1dd_le_r },
4690 /* mte inactive, big-endian */
4691 { gen_helper_sve_ldff1bb_r,
4692 gen_helper_sve_ldff1bhu_r,
4693 gen_helper_sve_ldff1bsu_r,
4694 gen_helper_sve_ldff1bdu_r,
4696 gen_helper_sve_ldff1sds_be_r,
4697 gen_helper_sve_ldff1hh_be_r,
4698 gen_helper_sve_ldff1hsu_be_r,
4699 gen_helper_sve_ldff1hdu_be_r,
4701 gen_helper_sve_ldff1hds_be_r,
4702 gen_helper_sve_ldff1hss_be_r,
4703 gen_helper_sve_ldff1ss_be_r,
4704 gen_helper_sve_ldff1sdu_be_r,
4706 gen_helper_sve_ldff1bds_r,
4707 gen_helper_sve_ldff1bss_r,
4708 gen_helper_sve_ldff1bhs_r,
4709 gen_helper_sve_ldff1dd_be_r } },
4711 { /* mte active, little-endian */
4712 { gen_helper_sve_ldff1bb_r_mte,
4713 gen_helper_sve_ldff1bhu_r_mte,
4714 gen_helper_sve_ldff1bsu_r_mte,
4715 gen_helper_sve_ldff1bdu_r_mte,
4717 gen_helper_sve_ldff1sds_le_r_mte,
4718 gen_helper_sve_ldff1hh_le_r_mte,
4719 gen_helper_sve_ldff1hsu_le_r_mte,
4720 gen_helper_sve_ldff1hdu_le_r_mte,
4722 gen_helper_sve_ldff1hds_le_r_mte,
4723 gen_helper_sve_ldff1hss_le_r_mte,
4724 gen_helper_sve_ldff1ss_le_r_mte,
4725 gen_helper_sve_ldff1sdu_le_r_mte,
4727 gen_helper_sve_ldff1bds_r_mte,
4728 gen_helper_sve_ldff1bss_r_mte,
4729 gen_helper_sve_ldff1bhs_r_mte,
4730 gen_helper_sve_ldff1dd_le_r_mte },
4732 /* mte active, big-endian */
4733 { gen_helper_sve_ldff1bb_r_mte,
4734 gen_helper_sve_ldff1bhu_r_mte,
4735 gen_helper_sve_ldff1bsu_r_mte,
4736 gen_helper_sve_ldff1bdu_r_mte,
4738 gen_helper_sve_ldff1sds_be_r_mte,
4739 gen_helper_sve_ldff1hh_be_r_mte,
4740 gen_helper_sve_ldff1hsu_be_r_mte,
4741 gen_helper_sve_ldff1hdu_be_r_mte,
4743 gen_helper_sve_ldff1hds_be_r_mte,
4744 gen_helper_sve_ldff1hss_be_r_mte,
4745 gen_helper_sve_ldff1ss_be_r_mte,
4746 gen_helper_sve_ldff1sdu_be_r_mte,
4748 gen_helper_sve_ldff1bds_r_mte,
4749 gen_helper_sve_ldff1bss_r_mte,
4750 gen_helper_sve_ldff1bhs_r_mte,
4751 gen_helper_sve_ldff1dd_be_r_mte } },
4754 if (sve_access_check(s)) {
4755 TCGv_i64 addr = new_tmp_a64(s);
4756 tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype));
4757 tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn));
4758 do_mem_zpa(s, a->rd, a->pg, addr, a->dtype, 1, false,
4759 fns[s->mte_active[0]][s->be_data == MO_BE][a->dtype]);
4761 return true;
4764 static bool trans_LDNF1_zpri(DisasContext *s, arg_rpri_load *a)
4766 static gen_helper_gvec_mem * const fns[2][2][16] = {
4767 { /* mte inactive, little-endian */
4768 { gen_helper_sve_ldnf1bb_r,
4769 gen_helper_sve_ldnf1bhu_r,
4770 gen_helper_sve_ldnf1bsu_r,
4771 gen_helper_sve_ldnf1bdu_r,
4773 gen_helper_sve_ldnf1sds_le_r,
4774 gen_helper_sve_ldnf1hh_le_r,
4775 gen_helper_sve_ldnf1hsu_le_r,
4776 gen_helper_sve_ldnf1hdu_le_r,
4778 gen_helper_sve_ldnf1hds_le_r,
4779 gen_helper_sve_ldnf1hss_le_r,
4780 gen_helper_sve_ldnf1ss_le_r,
4781 gen_helper_sve_ldnf1sdu_le_r,
4783 gen_helper_sve_ldnf1bds_r,
4784 gen_helper_sve_ldnf1bss_r,
4785 gen_helper_sve_ldnf1bhs_r,
4786 gen_helper_sve_ldnf1dd_le_r },
4788 /* mte inactive, big-endian */
4789 { gen_helper_sve_ldnf1bb_r,
4790 gen_helper_sve_ldnf1bhu_r,
4791 gen_helper_sve_ldnf1bsu_r,
4792 gen_helper_sve_ldnf1bdu_r,
4794 gen_helper_sve_ldnf1sds_be_r,
4795 gen_helper_sve_ldnf1hh_be_r,
4796 gen_helper_sve_ldnf1hsu_be_r,
4797 gen_helper_sve_ldnf1hdu_be_r,
4799 gen_helper_sve_ldnf1hds_be_r,
4800 gen_helper_sve_ldnf1hss_be_r,
4801 gen_helper_sve_ldnf1ss_be_r,
4802 gen_helper_sve_ldnf1sdu_be_r,
4804 gen_helper_sve_ldnf1bds_r,
4805 gen_helper_sve_ldnf1bss_r,
4806 gen_helper_sve_ldnf1bhs_r,
4807 gen_helper_sve_ldnf1dd_be_r } },
4809 { /* mte inactive, little-endian */
4810 { gen_helper_sve_ldnf1bb_r_mte,
4811 gen_helper_sve_ldnf1bhu_r_mte,
4812 gen_helper_sve_ldnf1bsu_r_mte,
4813 gen_helper_sve_ldnf1bdu_r_mte,
4815 gen_helper_sve_ldnf1sds_le_r_mte,
4816 gen_helper_sve_ldnf1hh_le_r_mte,
4817 gen_helper_sve_ldnf1hsu_le_r_mte,
4818 gen_helper_sve_ldnf1hdu_le_r_mte,
4820 gen_helper_sve_ldnf1hds_le_r_mte,
4821 gen_helper_sve_ldnf1hss_le_r_mte,
4822 gen_helper_sve_ldnf1ss_le_r_mte,
4823 gen_helper_sve_ldnf1sdu_le_r_mte,
4825 gen_helper_sve_ldnf1bds_r_mte,
4826 gen_helper_sve_ldnf1bss_r_mte,
4827 gen_helper_sve_ldnf1bhs_r_mte,
4828 gen_helper_sve_ldnf1dd_le_r_mte },
4830 /* mte inactive, big-endian */
4831 { gen_helper_sve_ldnf1bb_r_mte,
4832 gen_helper_sve_ldnf1bhu_r_mte,
4833 gen_helper_sve_ldnf1bsu_r_mte,
4834 gen_helper_sve_ldnf1bdu_r_mte,
4836 gen_helper_sve_ldnf1sds_be_r_mte,
4837 gen_helper_sve_ldnf1hh_be_r_mte,
4838 gen_helper_sve_ldnf1hsu_be_r_mte,
4839 gen_helper_sve_ldnf1hdu_be_r_mte,
4841 gen_helper_sve_ldnf1hds_be_r_mte,
4842 gen_helper_sve_ldnf1hss_be_r_mte,
4843 gen_helper_sve_ldnf1ss_be_r_mte,
4844 gen_helper_sve_ldnf1sdu_be_r_mte,
4846 gen_helper_sve_ldnf1bds_r_mte,
4847 gen_helper_sve_ldnf1bss_r_mte,
4848 gen_helper_sve_ldnf1bhs_r_mte,
4849 gen_helper_sve_ldnf1dd_be_r_mte } },
4852 if (sve_access_check(s)) {
4853 int vsz = vec_full_reg_size(s);
4854 int elements = vsz >> dtype_esz[a->dtype];
4855 int off = (a->imm * elements) << dtype_msz(a->dtype);
4856 TCGv_i64 addr = new_tmp_a64(s);
4858 tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), off);
4859 do_mem_zpa(s, a->rd, a->pg, addr, a->dtype, 1, false,
4860 fns[s->mte_active[0]][s->be_data == MO_BE][a->dtype]);
4862 return true;
4865 static void do_ldrq(DisasContext *s, int zt, int pg, TCGv_i64 addr, int dtype)
4867 unsigned vsz = vec_full_reg_size(s);
4868 TCGv_ptr t_pg;
4869 int poff;
4871 /* Load the first quadword using the normal predicated load helpers. */
4872 poff = pred_full_reg_offset(s, pg);
4873 if (vsz > 16) {
4875 * Zero-extend the first 16 bits of the predicate into a temporary.
4876 * This avoids triggering an assert making sure we don't have bits
4877 * set within a predicate beyond VQ, but we have lowered VQ to 1
4878 * for this load operation.
4880 TCGv_i64 tmp = tcg_temp_new_i64();
4881 #if HOST_BIG_ENDIAN
4882 poff += 6;
4883 #endif
4884 tcg_gen_ld16u_i64(tmp, cpu_env, poff);
4886 poff = offsetof(CPUARMState, vfp.preg_tmp);
4887 tcg_gen_st_i64(tmp, cpu_env, poff);
4888 tcg_temp_free_i64(tmp);
4891 t_pg = tcg_temp_new_ptr();
4892 tcg_gen_addi_ptr(t_pg, cpu_env, poff);
4894 gen_helper_gvec_mem *fn
4895 = ldr_fns[s->mte_active[0]][s->be_data == MO_BE][dtype][0];
4896 fn(cpu_env, t_pg, addr, tcg_constant_i32(simd_desc(16, 16, zt)));
4898 tcg_temp_free_ptr(t_pg);
4900 /* Replicate that first quadword. */
4901 if (vsz > 16) {
4902 int doff = vec_full_reg_offset(s, zt);
4903 tcg_gen_gvec_dup_mem(4, doff + 16, doff, vsz - 16, vsz - 16);
4907 static bool trans_LD1RQ_zprr(DisasContext *s, arg_rprr_load *a)
4909 if (a->rm == 31) {
4910 return false;
4912 if (sve_access_check(s)) {
4913 int msz = dtype_msz(a->dtype);
4914 TCGv_i64 addr = new_tmp_a64(s);
4915 tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), msz);
4916 tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn));
4917 do_ldrq(s, a->rd, a->pg, addr, a->dtype);
4919 return true;
4922 static bool trans_LD1RQ_zpri(DisasContext *s, arg_rpri_load *a)
4924 if (sve_access_check(s)) {
4925 TCGv_i64 addr = new_tmp_a64(s);
4926 tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), a->imm * 16);
4927 do_ldrq(s, a->rd, a->pg, addr, a->dtype);
4929 return true;
4932 static void do_ldro(DisasContext *s, int zt, int pg, TCGv_i64 addr, int dtype)
4934 unsigned vsz = vec_full_reg_size(s);
4935 unsigned vsz_r32;
4936 TCGv_ptr t_pg;
4937 int poff, doff;
4939 if (vsz < 32) {
4941 * Note that this UNDEFINED check comes after CheckSVEEnabled()
4942 * in the ARM pseudocode, which is the sve_access_check() done
4943 * in our caller. We should not now return false from the caller.
4945 unallocated_encoding(s);
4946 return;
4949 /* Load the first octaword using the normal predicated load helpers. */
4951 poff = pred_full_reg_offset(s, pg);
4952 if (vsz > 32) {
4954 * Zero-extend the first 32 bits of the predicate into a temporary.
4955 * This avoids triggering an assert making sure we don't have bits
4956 * set within a predicate beyond VQ, but we have lowered VQ to 2
4957 * for this load operation.
4959 TCGv_i64 tmp = tcg_temp_new_i64();
4960 #if HOST_BIG_ENDIAN
4961 poff += 4;
4962 #endif
4963 tcg_gen_ld32u_i64(tmp, cpu_env, poff);
4965 poff = offsetof(CPUARMState, vfp.preg_tmp);
4966 tcg_gen_st_i64(tmp, cpu_env, poff);
4967 tcg_temp_free_i64(tmp);
4970 t_pg = tcg_temp_new_ptr();
4971 tcg_gen_addi_ptr(t_pg, cpu_env, poff);
4973 gen_helper_gvec_mem *fn
4974 = ldr_fns[s->mte_active[0]][s->be_data == MO_BE][dtype][0];
4975 fn(cpu_env, t_pg, addr, tcg_constant_i32(simd_desc(32, 32, zt)));
4977 tcg_temp_free_ptr(t_pg);
4980 * Replicate that first octaword.
4981 * The replication happens in units of 32; if the full vector size
4982 * is not a multiple of 32, the final bits are zeroed.
4984 doff = vec_full_reg_offset(s, zt);
4985 vsz_r32 = QEMU_ALIGN_DOWN(vsz, 32);
4986 if (vsz >= 64) {
4987 tcg_gen_gvec_dup_mem(5, doff + 32, doff, vsz_r32 - 32, vsz_r32 - 32);
4989 vsz -= vsz_r32;
4990 if (vsz) {
4991 tcg_gen_gvec_dup_imm(MO_64, doff + vsz_r32, vsz, vsz, 0);
4995 static bool trans_LD1RO_zprr(DisasContext *s, arg_rprr_load *a)
4997 if (!dc_isar_feature(aa64_sve_f64mm, s)) {
4998 return false;
5000 if (a->rm == 31) {
5001 return false;
5003 if (sve_access_check(s)) {
5004 TCGv_i64 addr = new_tmp_a64(s);
5005 tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype));
5006 tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn));
5007 do_ldro(s, a->rd, a->pg, addr, a->dtype);
5009 return true;
5012 static bool trans_LD1RO_zpri(DisasContext *s, arg_rpri_load *a)
5014 if (!dc_isar_feature(aa64_sve_f64mm, s)) {
5015 return false;
5017 if (sve_access_check(s)) {
5018 TCGv_i64 addr = new_tmp_a64(s);
5019 tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), a->imm * 32);
5020 do_ldro(s, a->rd, a->pg, addr, a->dtype);
5022 return true;
5025 /* Load and broadcast element. */
5026 static bool trans_LD1R_zpri(DisasContext *s, arg_rpri_load *a)
5028 unsigned vsz = vec_full_reg_size(s);
5029 unsigned psz = pred_full_reg_size(s);
5030 unsigned esz = dtype_esz[a->dtype];
5031 unsigned msz = dtype_msz(a->dtype);
5032 TCGLabel *over;
5033 TCGv_i64 temp, clean_addr;
5035 if (!sve_access_check(s)) {
5036 return true;
5039 over = gen_new_label();
5041 /* If the guarding predicate has no bits set, no load occurs. */
5042 if (psz <= 8) {
5043 /* Reduce the pred_esz_masks value simply to reduce the
5044 * size of the code generated here.
5046 uint64_t psz_mask = MAKE_64BIT_MASK(0, psz * 8);
5047 temp = tcg_temp_new_i64();
5048 tcg_gen_ld_i64(temp, cpu_env, pred_full_reg_offset(s, a->pg));
5049 tcg_gen_andi_i64(temp, temp, pred_esz_masks[esz] & psz_mask);
5050 tcg_gen_brcondi_i64(TCG_COND_EQ, temp, 0, over);
5051 tcg_temp_free_i64(temp);
5052 } else {
5053 TCGv_i32 t32 = tcg_temp_new_i32();
5054 find_last_active(s, t32, esz, a->pg);
5055 tcg_gen_brcondi_i32(TCG_COND_LT, t32, 0, over);
5056 tcg_temp_free_i32(t32);
5059 /* Load the data. */
5060 temp = tcg_temp_new_i64();
5061 tcg_gen_addi_i64(temp, cpu_reg_sp(s, a->rn), a->imm << msz);
5062 clean_addr = gen_mte_check1(s, temp, false, true, msz);
5064 tcg_gen_qemu_ld_i64(temp, clean_addr, get_mem_index(s),
5065 finalize_memop(s, dtype_mop[a->dtype]));
5067 /* Broadcast to *all* elements. */
5068 tcg_gen_gvec_dup_i64(esz, vec_full_reg_offset(s, a->rd),
5069 vsz, vsz, temp);
5070 tcg_temp_free_i64(temp);
5072 /* Zero the inactive elements. */
5073 gen_set_label(over);
5074 return do_movz_zpz(s, a->rd, a->rd, a->pg, esz, false);
5077 static void do_st_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr,
5078 int msz, int esz, int nreg)
5080 static gen_helper_gvec_mem * const fn_single[2][2][4][4] = {
5081 { { { gen_helper_sve_st1bb_r,
5082 gen_helper_sve_st1bh_r,
5083 gen_helper_sve_st1bs_r,
5084 gen_helper_sve_st1bd_r },
5085 { NULL,
5086 gen_helper_sve_st1hh_le_r,
5087 gen_helper_sve_st1hs_le_r,
5088 gen_helper_sve_st1hd_le_r },
5089 { NULL, NULL,
5090 gen_helper_sve_st1ss_le_r,
5091 gen_helper_sve_st1sd_le_r },
5092 { NULL, NULL, NULL,
5093 gen_helper_sve_st1dd_le_r } },
5094 { { gen_helper_sve_st1bb_r,
5095 gen_helper_sve_st1bh_r,
5096 gen_helper_sve_st1bs_r,
5097 gen_helper_sve_st1bd_r },
5098 { NULL,
5099 gen_helper_sve_st1hh_be_r,
5100 gen_helper_sve_st1hs_be_r,
5101 gen_helper_sve_st1hd_be_r },
5102 { NULL, NULL,
5103 gen_helper_sve_st1ss_be_r,
5104 gen_helper_sve_st1sd_be_r },
5105 { NULL, NULL, NULL,
5106 gen_helper_sve_st1dd_be_r } } },
5108 { { { gen_helper_sve_st1bb_r_mte,
5109 gen_helper_sve_st1bh_r_mte,
5110 gen_helper_sve_st1bs_r_mte,
5111 gen_helper_sve_st1bd_r_mte },
5112 { NULL,
5113 gen_helper_sve_st1hh_le_r_mte,
5114 gen_helper_sve_st1hs_le_r_mte,
5115 gen_helper_sve_st1hd_le_r_mte },
5116 { NULL, NULL,
5117 gen_helper_sve_st1ss_le_r_mte,
5118 gen_helper_sve_st1sd_le_r_mte },
5119 { NULL, NULL, NULL,
5120 gen_helper_sve_st1dd_le_r_mte } },
5121 { { gen_helper_sve_st1bb_r_mte,
5122 gen_helper_sve_st1bh_r_mte,
5123 gen_helper_sve_st1bs_r_mte,
5124 gen_helper_sve_st1bd_r_mte },
5125 { NULL,
5126 gen_helper_sve_st1hh_be_r_mte,
5127 gen_helper_sve_st1hs_be_r_mte,
5128 gen_helper_sve_st1hd_be_r_mte },
5129 { NULL, NULL,
5130 gen_helper_sve_st1ss_be_r_mte,
5131 gen_helper_sve_st1sd_be_r_mte },
5132 { NULL, NULL, NULL,
5133 gen_helper_sve_st1dd_be_r_mte } } },
5135 static gen_helper_gvec_mem * const fn_multiple[2][2][3][4] = {
5136 { { { gen_helper_sve_st2bb_r,
5137 gen_helper_sve_st2hh_le_r,
5138 gen_helper_sve_st2ss_le_r,
5139 gen_helper_sve_st2dd_le_r },
5140 { gen_helper_sve_st3bb_r,
5141 gen_helper_sve_st3hh_le_r,
5142 gen_helper_sve_st3ss_le_r,
5143 gen_helper_sve_st3dd_le_r },
5144 { gen_helper_sve_st4bb_r,
5145 gen_helper_sve_st4hh_le_r,
5146 gen_helper_sve_st4ss_le_r,
5147 gen_helper_sve_st4dd_le_r } },
5148 { { gen_helper_sve_st2bb_r,
5149 gen_helper_sve_st2hh_be_r,
5150 gen_helper_sve_st2ss_be_r,
5151 gen_helper_sve_st2dd_be_r },
5152 { gen_helper_sve_st3bb_r,
5153 gen_helper_sve_st3hh_be_r,
5154 gen_helper_sve_st3ss_be_r,
5155 gen_helper_sve_st3dd_be_r },
5156 { gen_helper_sve_st4bb_r,
5157 gen_helper_sve_st4hh_be_r,
5158 gen_helper_sve_st4ss_be_r,
5159 gen_helper_sve_st4dd_be_r } } },
5160 { { { gen_helper_sve_st2bb_r_mte,
5161 gen_helper_sve_st2hh_le_r_mte,
5162 gen_helper_sve_st2ss_le_r_mte,
5163 gen_helper_sve_st2dd_le_r_mte },
5164 { gen_helper_sve_st3bb_r_mte,
5165 gen_helper_sve_st3hh_le_r_mte,
5166 gen_helper_sve_st3ss_le_r_mte,
5167 gen_helper_sve_st3dd_le_r_mte },
5168 { gen_helper_sve_st4bb_r_mte,
5169 gen_helper_sve_st4hh_le_r_mte,
5170 gen_helper_sve_st4ss_le_r_mte,
5171 gen_helper_sve_st4dd_le_r_mte } },
5172 { { gen_helper_sve_st2bb_r_mte,
5173 gen_helper_sve_st2hh_be_r_mte,
5174 gen_helper_sve_st2ss_be_r_mte,
5175 gen_helper_sve_st2dd_be_r_mte },
5176 { gen_helper_sve_st3bb_r_mte,
5177 gen_helper_sve_st3hh_be_r_mte,
5178 gen_helper_sve_st3ss_be_r_mte,
5179 gen_helper_sve_st3dd_be_r_mte },
5180 { gen_helper_sve_st4bb_r_mte,
5181 gen_helper_sve_st4hh_be_r_mte,
5182 gen_helper_sve_st4ss_be_r_mte,
5183 gen_helper_sve_st4dd_be_r_mte } } },
5185 gen_helper_gvec_mem *fn;
5186 int be = s->be_data == MO_BE;
5188 if (nreg == 0) {
5189 /* ST1 */
5190 fn = fn_single[s->mte_active[0]][be][msz][esz];
5191 nreg = 1;
5192 } else {
5193 /* ST2, ST3, ST4 -- msz == esz, enforced by encoding */
5194 assert(msz == esz);
5195 fn = fn_multiple[s->mte_active[0]][be][nreg - 1][msz];
5197 assert(fn != NULL);
5198 do_mem_zpa(s, zt, pg, addr, msz_dtype(s, msz), nreg, true, fn);
5201 static bool trans_ST_zprr(DisasContext *s, arg_rprr_store *a)
5203 if (a->rm == 31 || a->msz > a->esz) {
5204 return false;
5206 if (sve_access_check(s)) {
5207 TCGv_i64 addr = new_tmp_a64(s);
5208 tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), a->msz);
5209 tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn));
5210 do_st_zpa(s, a->rd, a->pg, addr, a->msz, a->esz, a->nreg);
5212 return true;
5215 static bool trans_ST_zpri(DisasContext *s, arg_rpri_store *a)
5217 if (a->msz > a->esz) {
5218 return false;
5220 if (sve_access_check(s)) {
5221 int vsz = vec_full_reg_size(s);
5222 int elements = vsz >> a->esz;
5223 TCGv_i64 addr = new_tmp_a64(s);
5225 tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn),
5226 (a->imm * elements * (a->nreg + 1)) << a->msz);
5227 do_st_zpa(s, a->rd, a->pg, addr, a->msz, a->esz, a->nreg);
5229 return true;
5233 *** SVE gather loads / scatter stores
5236 static void do_mem_zpz(DisasContext *s, int zt, int pg, int zm,
5237 int scale, TCGv_i64 scalar, int msz, bool is_write,
5238 gen_helper_gvec_mem_scatter *fn)
5240 unsigned vsz = vec_full_reg_size(s);
5241 TCGv_ptr t_zm = tcg_temp_new_ptr();
5242 TCGv_ptr t_pg = tcg_temp_new_ptr();
5243 TCGv_ptr t_zt = tcg_temp_new_ptr();
5244 int desc = 0;
5246 if (s->mte_active[0]) {
5247 desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s));
5248 desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
5249 desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
5250 desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
5251 desc = FIELD_DP32(desc, MTEDESC, SIZEM1, (1 << msz) - 1);
5252 desc <<= SVE_MTEDESC_SHIFT;
5254 desc = simd_desc(vsz, vsz, desc | scale);
5256 tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg));
5257 tcg_gen_addi_ptr(t_zm, cpu_env, vec_full_reg_offset(s, zm));
5258 tcg_gen_addi_ptr(t_zt, cpu_env, vec_full_reg_offset(s, zt));
5259 fn(cpu_env, t_zt, t_pg, t_zm, scalar, tcg_constant_i32(desc));
5261 tcg_temp_free_ptr(t_zt);
5262 tcg_temp_free_ptr(t_zm);
5263 tcg_temp_free_ptr(t_pg);
5266 /* Indexed by [mte][be][ff][xs][u][msz]. */
5267 static gen_helper_gvec_mem_scatter * const
5268 gather_load_fn32[2][2][2][2][2][3] = {
5269 { /* MTE Inactive */
5270 { /* Little-endian */
5271 { { { gen_helper_sve_ldbss_zsu,
5272 gen_helper_sve_ldhss_le_zsu,
5273 NULL, },
5274 { gen_helper_sve_ldbsu_zsu,
5275 gen_helper_sve_ldhsu_le_zsu,
5276 gen_helper_sve_ldss_le_zsu, } },
5277 { { gen_helper_sve_ldbss_zss,
5278 gen_helper_sve_ldhss_le_zss,
5279 NULL, },
5280 { gen_helper_sve_ldbsu_zss,
5281 gen_helper_sve_ldhsu_le_zss,
5282 gen_helper_sve_ldss_le_zss, } } },
5284 /* First-fault */
5285 { { { gen_helper_sve_ldffbss_zsu,
5286 gen_helper_sve_ldffhss_le_zsu,
5287 NULL, },
5288 { gen_helper_sve_ldffbsu_zsu,
5289 gen_helper_sve_ldffhsu_le_zsu,
5290 gen_helper_sve_ldffss_le_zsu, } },
5291 { { gen_helper_sve_ldffbss_zss,
5292 gen_helper_sve_ldffhss_le_zss,
5293 NULL, },
5294 { gen_helper_sve_ldffbsu_zss,
5295 gen_helper_sve_ldffhsu_le_zss,
5296 gen_helper_sve_ldffss_le_zss, } } } },
5298 { /* Big-endian */
5299 { { { gen_helper_sve_ldbss_zsu,
5300 gen_helper_sve_ldhss_be_zsu,
5301 NULL, },
5302 { gen_helper_sve_ldbsu_zsu,
5303 gen_helper_sve_ldhsu_be_zsu,
5304 gen_helper_sve_ldss_be_zsu, } },
5305 { { gen_helper_sve_ldbss_zss,
5306 gen_helper_sve_ldhss_be_zss,
5307 NULL, },
5308 { gen_helper_sve_ldbsu_zss,
5309 gen_helper_sve_ldhsu_be_zss,
5310 gen_helper_sve_ldss_be_zss, } } },
5312 /* First-fault */
5313 { { { gen_helper_sve_ldffbss_zsu,
5314 gen_helper_sve_ldffhss_be_zsu,
5315 NULL, },
5316 { gen_helper_sve_ldffbsu_zsu,
5317 gen_helper_sve_ldffhsu_be_zsu,
5318 gen_helper_sve_ldffss_be_zsu, } },
5319 { { gen_helper_sve_ldffbss_zss,
5320 gen_helper_sve_ldffhss_be_zss,
5321 NULL, },
5322 { gen_helper_sve_ldffbsu_zss,
5323 gen_helper_sve_ldffhsu_be_zss,
5324 gen_helper_sve_ldffss_be_zss, } } } } },
5325 { /* MTE Active */
5326 { /* Little-endian */
5327 { { { gen_helper_sve_ldbss_zsu_mte,
5328 gen_helper_sve_ldhss_le_zsu_mte,
5329 NULL, },
5330 { gen_helper_sve_ldbsu_zsu_mte,
5331 gen_helper_sve_ldhsu_le_zsu_mte,
5332 gen_helper_sve_ldss_le_zsu_mte, } },
5333 { { gen_helper_sve_ldbss_zss_mte,
5334 gen_helper_sve_ldhss_le_zss_mte,
5335 NULL, },
5336 { gen_helper_sve_ldbsu_zss_mte,
5337 gen_helper_sve_ldhsu_le_zss_mte,
5338 gen_helper_sve_ldss_le_zss_mte, } } },
5340 /* First-fault */
5341 { { { gen_helper_sve_ldffbss_zsu_mte,
5342 gen_helper_sve_ldffhss_le_zsu_mte,
5343 NULL, },
5344 { gen_helper_sve_ldffbsu_zsu_mte,
5345 gen_helper_sve_ldffhsu_le_zsu_mte,
5346 gen_helper_sve_ldffss_le_zsu_mte, } },
5347 { { gen_helper_sve_ldffbss_zss_mte,
5348 gen_helper_sve_ldffhss_le_zss_mte,
5349 NULL, },
5350 { gen_helper_sve_ldffbsu_zss_mte,
5351 gen_helper_sve_ldffhsu_le_zss_mte,
5352 gen_helper_sve_ldffss_le_zss_mte, } } } },
5354 { /* Big-endian */
5355 { { { gen_helper_sve_ldbss_zsu_mte,
5356 gen_helper_sve_ldhss_be_zsu_mte,
5357 NULL, },
5358 { gen_helper_sve_ldbsu_zsu_mte,
5359 gen_helper_sve_ldhsu_be_zsu_mte,
5360 gen_helper_sve_ldss_be_zsu_mte, } },
5361 { { gen_helper_sve_ldbss_zss_mte,
5362 gen_helper_sve_ldhss_be_zss_mte,
5363 NULL, },
5364 { gen_helper_sve_ldbsu_zss_mte,
5365 gen_helper_sve_ldhsu_be_zss_mte,
5366 gen_helper_sve_ldss_be_zss_mte, } } },
5368 /* First-fault */
5369 { { { gen_helper_sve_ldffbss_zsu_mte,
5370 gen_helper_sve_ldffhss_be_zsu_mte,
5371 NULL, },
5372 { gen_helper_sve_ldffbsu_zsu_mte,
5373 gen_helper_sve_ldffhsu_be_zsu_mte,
5374 gen_helper_sve_ldffss_be_zsu_mte, } },
5375 { { gen_helper_sve_ldffbss_zss_mte,
5376 gen_helper_sve_ldffhss_be_zss_mte,
5377 NULL, },
5378 { gen_helper_sve_ldffbsu_zss_mte,
5379 gen_helper_sve_ldffhsu_be_zss_mte,
5380 gen_helper_sve_ldffss_be_zss_mte, } } } } },
5383 /* Note that we overload xs=2 to indicate 64-bit offset. */
5384 static gen_helper_gvec_mem_scatter * const
5385 gather_load_fn64[2][2][2][3][2][4] = {
5386 { /* MTE Inactive */
5387 { /* Little-endian */
5388 { { { gen_helper_sve_ldbds_zsu,
5389 gen_helper_sve_ldhds_le_zsu,
5390 gen_helper_sve_ldsds_le_zsu,
5391 NULL, },
5392 { gen_helper_sve_ldbdu_zsu,
5393 gen_helper_sve_ldhdu_le_zsu,
5394 gen_helper_sve_ldsdu_le_zsu,
5395 gen_helper_sve_lddd_le_zsu, } },
5396 { { gen_helper_sve_ldbds_zss,
5397 gen_helper_sve_ldhds_le_zss,
5398 gen_helper_sve_ldsds_le_zss,
5399 NULL, },
5400 { gen_helper_sve_ldbdu_zss,
5401 gen_helper_sve_ldhdu_le_zss,
5402 gen_helper_sve_ldsdu_le_zss,
5403 gen_helper_sve_lddd_le_zss, } },
5404 { { gen_helper_sve_ldbds_zd,
5405 gen_helper_sve_ldhds_le_zd,
5406 gen_helper_sve_ldsds_le_zd,
5407 NULL, },
5408 { gen_helper_sve_ldbdu_zd,
5409 gen_helper_sve_ldhdu_le_zd,
5410 gen_helper_sve_ldsdu_le_zd,
5411 gen_helper_sve_lddd_le_zd, } } },
5413 /* First-fault */
5414 { { { gen_helper_sve_ldffbds_zsu,
5415 gen_helper_sve_ldffhds_le_zsu,
5416 gen_helper_sve_ldffsds_le_zsu,
5417 NULL, },
5418 { gen_helper_sve_ldffbdu_zsu,
5419 gen_helper_sve_ldffhdu_le_zsu,
5420 gen_helper_sve_ldffsdu_le_zsu,
5421 gen_helper_sve_ldffdd_le_zsu, } },
5422 { { gen_helper_sve_ldffbds_zss,
5423 gen_helper_sve_ldffhds_le_zss,
5424 gen_helper_sve_ldffsds_le_zss,
5425 NULL, },
5426 { gen_helper_sve_ldffbdu_zss,
5427 gen_helper_sve_ldffhdu_le_zss,
5428 gen_helper_sve_ldffsdu_le_zss,
5429 gen_helper_sve_ldffdd_le_zss, } },
5430 { { gen_helper_sve_ldffbds_zd,
5431 gen_helper_sve_ldffhds_le_zd,
5432 gen_helper_sve_ldffsds_le_zd,
5433 NULL, },
5434 { gen_helper_sve_ldffbdu_zd,
5435 gen_helper_sve_ldffhdu_le_zd,
5436 gen_helper_sve_ldffsdu_le_zd,
5437 gen_helper_sve_ldffdd_le_zd, } } } },
5438 { /* Big-endian */
5439 { { { gen_helper_sve_ldbds_zsu,
5440 gen_helper_sve_ldhds_be_zsu,
5441 gen_helper_sve_ldsds_be_zsu,
5442 NULL, },
5443 { gen_helper_sve_ldbdu_zsu,
5444 gen_helper_sve_ldhdu_be_zsu,
5445 gen_helper_sve_ldsdu_be_zsu,
5446 gen_helper_sve_lddd_be_zsu, } },
5447 { { gen_helper_sve_ldbds_zss,
5448 gen_helper_sve_ldhds_be_zss,
5449 gen_helper_sve_ldsds_be_zss,
5450 NULL, },
5451 { gen_helper_sve_ldbdu_zss,
5452 gen_helper_sve_ldhdu_be_zss,
5453 gen_helper_sve_ldsdu_be_zss,
5454 gen_helper_sve_lddd_be_zss, } },
5455 { { gen_helper_sve_ldbds_zd,
5456 gen_helper_sve_ldhds_be_zd,
5457 gen_helper_sve_ldsds_be_zd,
5458 NULL, },
5459 { gen_helper_sve_ldbdu_zd,
5460 gen_helper_sve_ldhdu_be_zd,
5461 gen_helper_sve_ldsdu_be_zd,
5462 gen_helper_sve_lddd_be_zd, } } },
5464 /* First-fault */
5465 { { { gen_helper_sve_ldffbds_zsu,
5466 gen_helper_sve_ldffhds_be_zsu,
5467 gen_helper_sve_ldffsds_be_zsu,
5468 NULL, },
5469 { gen_helper_sve_ldffbdu_zsu,
5470 gen_helper_sve_ldffhdu_be_zsu,
5471 gen_helper_sve_ldffsdu_be_zsu,
5472 gen_helper_sve_ldffdd_be_zsu, } },
5473 { { gen_helper_sve_ldffbds_zss,
5474 gen_helper_sve_ldffhds_be_zss,
5475 gen_helper_sve_ldffsds_be_zss,
5476 NULL, },
5477 { gen_helper_sve_ldffbdu_zss,
5478 gen_helper_sve_ldffhdu_be_zss,
5479 gen_helper_sve_ldffsdu_be_zss,
5480 gen_helper_sve_ldffdd_be_zss, } },
5481 { { gen_helper_sve_ldffbds_zd,
5482 gen_helper_sve_ldffhds_be_zd,
5483 gen_helper_sve_ldffsds_be_zd,
5484 NULL, },
5485 { gen_helper_sve_ldffbdu_zd,
5486 gen_helper_sve_ldffhdu_be_zd,
5487 gen_helper_sve_ldffsdu_be_zd,
5488 gen_helper_sve_ldffdd_be_zd, } } } } },
5489 { /* MTE Active */
5490 { /* Little-endian */
5491 { { { gen_helper_sve_ldbds_zsu_mte,
5492 gen_helper_sve_ldhds_le_zsu_mte,
5493 gen_helper_sve_ldsds_le_zsu_mte,
5494 NULL, },
5495 { gen_helper_sve_ldbdu_zsu_mte,
5496 gen_helper_sve_ldhdu_le_zsu_mte,
5497 gen_helper_sve_ldsdu_le_zsu_mte,
5498 gen_helper_sve_lddd_le_zsu_mte, } },
5499 { { gen_helper_sve_ldbds_zss_mte,
5500 gen_helper_sve_ldhds_le_zss_mte,
5501 gen_helper_sve_ldsds_le_zss_mte,
5502 NULL, },
5503 { gen_helper_sve_ldbdu_zss_mte,
5504 gen_helper_sve_ldhdu_le_zss_mte,
5505 gen_helper_sve_ldsdu_le_zss_mte,
5506 gen_helper_sve_lddd_le_zss_mte, } },
5507 { { gen_helper_sve_ldbds_zd_mte,
5508 gen_helper_sve_ldhds_le_zd_mte,
5509 gen_helper_sve_ldsds_le_zd_mte,
5510 NULL, },
5511 { gen_helper_sve_ldbdu_zd_mte,
5512 gen_helper_sve_ldhdu_le_zd_mte,
5513 gen_helper_sve_ldsdu_le_zd_mte,
5514 gen_helper_sve_lddd_le_zd_mte, } } },
5516 /* First-fault */
5517 { { { gen_helper_sve_ldffbds_zsu_mte,
5518 gen_helper_sve_ldffhds_le_zsu_mte,
5519 gen_helper_sve_ldffsds_le_zsu_mte,
5520 NULL, },
5521 { gen_helper_sve_ldffbdu_zsu_mte,
5522 gen_helper_sve_ldffhdu_le_zsu_mte,
5523 gen_helper_sve_ldffsdu_le_zsu_mte,
5524 gen_helper_sve_ldffdd_le_zsu_mte, } },
5525 { { gen_helper_sve_ldffbds_zss_mte,
5526 gen_helper_sve_ldffhds_le_zss_mte,
5527 gen_helper_sve_ldffsds_le_zss_mte,
5528 NULL, },
5529 { gen_helper_sve_ldffbdu_zss_mte,
5530 gen_helper_sve_ldffhdu_le_zss_mte,
5531 gen_helper_sve_ldffsdu_le_zss_mte,
5532 gen_helper_sve_ldffdd_le_zss_mte, } },
5533 { { gen_helper_sve_ldffbds_zd_mte,
5534 gen_helper_sve_ldffhds_le_zd_mte,
5535 gen_helper_sve_ldffsds_le_zd_mte,
5536 NULL, },
5537 { gen_helper_sve_ldffbdu_zd_mte,
5538 gen_helper_sve_ldffhdu_le_zd_mte,
5539 gen_helper_sve_ldffsdu_le_zd_mte,
5540 gen_helper_sve_ldffdd_le_zd_mte, } } } },
5541 { /* Big-endian */
5542 { { { gen_helper_sve_ldbds_zsu_mte,
5543 gen_helper_sve_ldhds_be_zsu_mte,
5544 gen_helper_sve_ldsds_be_zsu_mte,
5545 NULL, },
5546 { gen_helper_sve_ldbdu_zsu_mte,
5547 gen_helper_sve_ldhdu_be_zsu_mte,
5548 gen_helper_sve_ldsdu_be_zsu_mte,
5549 gen_helper_sve_lddd_be_zsu_mte, } },
5550 { { gen_helper_sve_ldbds_zss_mte,
5551 gen_helper_sve_ldhds_be_zss_mte,
5552 gen_helper_sve_ldsds_be_zss_mte,
5553 NULL, },
5554 { gen_helper_sve_ldbdu_zss_mte,
5555 gen_helper_sve_ldhdu_be_zss_mte,
5556 gen_helper_sve_ldsdu_be_zss_mte,
5557 gen_helper_sve_lddd_be_zss_mte, } },
5558 { { gen_helper_sve_ldbds_zd_mte,
5559 gen_helper_sve_ldhds_be_zd_mte,
5560 gen_helper_sve_ldsds_be_zd_mte,
5561 NULL, },
5562 { gen_helper_sve_ldbdu_zd_mte,
5563 gen_helper_sve_ldhdu_be_zd_mte,
5564 gen_helper_sve_ldsdu_be_zd_mte,
5565 gen_helper_sve_lddd_be_zd_mte, } } },
5567 /* First-fault */
5568 { { { gen_helper_sve_ldffbds_zsu_mte,
5569 gen_helper_sve_ldffhds_be_zsu_mte,
5570 gen_helper_sve_ldffsds_be_zsu_mte,
5571 NULL, },
5572 { gen_helper_sve_ldffbdu_zsu_mte,
5573 gen_helper_sve_ldffhdu_be_zsu_mte,
5574 gen_helper_sve_ldffsdu_be_zsu_mte,
5575 gen_helper_sve_ldffdd_be_zsu_mte, } },
5576 { { gen_helper_sve_ldffbds_zss_mte,
5577 gen_helper_sve_ldffhds_be_zss_mte,
5578 gen_helper_sve_ldffsds_be_zss_mte,
5579 NULL, },
5580 { gen_helper_sve_ldffbdu_zss_mte,
5581 gen_helper_sve_ldffhdu_be_zss_mte,
5582 gen_helper_sve_ldffsdu_be_zss_mte,
5583 gen_helper_sve_ldffdd_be_zss_mte, } },
5584 { { gen_helper_sve_ldffbds_zd_mte,
5585 gen_helper_sve_ldffhds_be_zd_mte,
5586 gen_helper_sve_ldffsds_be_zd_mte,
5587 NULL, },
5588 { gen_helper_sve_ldffbdu_zd_mte,
5589 gen_helper_sve_ldffhdu_be_zd_mte,
5590 gen_helper_sve_ldffsdu_be_zd_mte,
5591 gen_helper_sve_ldffdd_be_zd_mte, } } } } },
5594 static bool trans_LD1_zprz(DisasContext *s, arg_LD1_zprz *a)
5596 gen_helper_gvec_mem_scatter *fn = NULL;
5597 bool be = s->be_data == MO_BE;
5598 bool mte = s->mte_active[0];
5600 if (!sve_access_check(s)) {
5601 return true;
5604 switch (a->esz) {
5605 case MO_32:
5606 fn = gather_load_fn32[mte][be][a->ff][a->xs][a->u][a->msz];
5607 break;
5608 case MO_64:
5609 fn = gather_load_fn64[mte][be][a->ff][a->xs][a->u][a->msz];
5610 break;
5612 assert(fn != NULL);
5614 do_mem_zpz(s, a->rd, a->pg, a->rm, a->scale * a->msz,
5615 cpu_reg_sp(s, a->rn), a->msz, false, fn);
5616 return true;
5619 static bool trans_LD1_zpiz(DisasContext *s, arg_LD1_zpiz *a)
5621 gen_helper_gvec_mem_scatter *fn = NULL;
5622 bool be = s->be_data == MO_BE;
5623 bool mte = s->mte_active[0];
5625 if (a->esz < a->msz || (a->esz == a->msz && !a->u)) {
5626 return false;
5628 if (!sve_access_check(s)) {
5629 return true;
5632 switch (a->esz) {
5633 case MO_32:
5634 fn = gather_load_fn32[mte][be][a->ff][0][a->u][a->msz];
5635 break;
5636 case MO_64:
5637 fn = gather_load_fn64[mte][be][a->ff][2][a->u][a->msz];
5638 break;
5640 assert(fn != NULL);
5642 /* Treat LD1_zpiz (zn[x] + imm) the same way as LD1_zprz (rn + zm[x])
5643 * by loading the immediate into the scalar parameter.
5645 do_mem_zpz(s, a->rd, a->pg, a->rn, 0,
5646 tcg_constant_i64(a->imm << a->msz), a->msz, false, fn);
5647 return true;
5650 static bool trans_LDNT1_zprz(DisasContext *s, arg_LD1_zprz *a)
5652 gen_helper_gvec_mem_scatter *fn = NULL;
5653 bool be = s->be_data == MO_BE;
5654 bool mte = s->mte_active[0];
5656 if (a->esz < a->msz + !a->u) {
5657 return false;
5659 if (!dc_isar_feature(aa64_sve2, s)) {
5660 return false;
5662 if (!sve_access_check(s)) {
5663 return true;
5666 switch (a->esz) {
5667 case MO_32:
5668 fn = gather_load_fn32[mte][be][0][0][a->u][a->msz];
5669 break;
5670 case MO_64:
5671 fn = gather_load_fn64[mte][be][0][2][a->u][a->msz];
5672 break;
5674 assert(fn != NULL);
5676 do_mem_zpz(s, a->rd, a->pg, a->rn, 0,
5677 cpu_reg(s, a->rm), a->msz, false, fn);
5678 return true;
5681 /* Indexed by [mte][be][xs][msz]. */
5682 static gen_helper_gvec_mem_scatter * const scatter_store_fn32[2][2][2][3] = {
5683 { /* MTE Inactive */
5684 { /* Little-endian */
5685 { gen_helper_sve_stbs_zsu,
5686 gen_helper_sve_sths_le_zsu,
5687 gen_helper_sve_stss_le_zsu, },
5688 { gen_helper_sve_stbs_zss,
5689 gen_helper_sve_sths_le_zss,
5690 gen_helper_sve_stss_le_zss, } },
5691 { /* Big-endian */
5692 { gen_helper_sve_stbs_zsu,
5693 gen_helper_sve_sths_be_zsu,
5694 gen_helper_sve_stss_be_zsu, },
5695 { gen_helper_sve_stbs_zss,
5696 gen_helper_sve_sths_be_zss,
5697 gen_helper_sve_stss_be_zss, } } },
5698 { /* MTE Active */
5699 { /* Little-endian */
5700 { gen_helper_sve_stbs_zsu_mte,
5701 gen_helper_sve_sths_le_zsu_mte,
5702 gen_helper_sve_stss_le_zsu_mte, },
5703 { gen_helper_sve_stbs_zss_mte,
5704 gen_helper_sve_sths_le_zss_mte,
5705 gen_helper_sve_stss_le_zss_mte, } },
5706 { /* Big-endian */
5707 { gen_helper_sve_stbs_zsu_mte,
5708 gen_helper_sve_sths_be_zsu_mte,
5709 gen_helper_sve_stss_be_zsu_mte, },
5710 { gen_helper_sve_stbs_zss_mte,
5711 gen_helper_sve_sths_be_zss_mte,
5712 gen_helper_sve_stss_be_zss_mte, } } },
5715 /* Note that we overload xs=2 to indicate 64-bit offset. */
5716 static gen_helper_gvec_mem_scatter * const scatter_store_fn64[2][2][3][4] = {
5717 { /* MTE Inactive */
5718 { /* Little-endian */
5719 { gen_helper_sve_stbd_zsu,
5720 gen_helper_sve_sthd_le_zsu,
5721 gen_helper_sve_stsd_le_zsu,
5722 gen_helper_sve_stdd_le_zsu, },
5723 { gen_helper_sve_stbd_zss,
5724 gen_helper_sve_sthd_le_zss,
5725 gen_helper_sve_stsd_le_zss,
5726 gen_helper_sve_stdd_le_zss, },
5727 { gen_helper_sve_stbd_zd,
5728 gen_helper_sve_sthd_le_zd,
5729 gen_helper_sve_stsd_le_zd,
5730 gen_helper_sve_stdd_le_zd, } },
5731 { /* Big-endian */
5732 { gen_helper_sve_stbd_zsu,
5733 gen_helper_sve_sthd_be_zsu,
5734 gen_helper_sve_stsd_be_zsu,
5735 gen_helper_sve_stdd_be_zsu, },
5736 { gen_helper_sve_stbd_zss,
5737 gen_helper_sve_sthd_be_zss,
5738 gen_helper_sve_stsd_be_zss,
5739 gen_helper_sve_stdd_be_zss, },
5740 { gen_helper_sve_stbd_zd,
5741 gen_helper_sve_sthd_be_zd,
5742 gen_helper_sve_stsd_be_zd,
5743 gen_helper_sve_stdd_be_zd, } } },
5744 { /* MTE Inactive */
5745 { /* Little-endian */
5746 { gen_helper_sve_stbd_zsu_mte,
5747 gen_helper_sve_sthd_le_zsu_mte,
5748 gen_helper_sve_stsd_le_zsu_mte,
5749 gen_helper_sve_stdd_le_zsu_mte, },
5750 { gen_helper_sve_stbd_zss_mte,
5751 gen_helper_sve_sthd_le_zss_mte,
5752 gen_helper_sve_stsd_le_zss_mte,
5753 gen_helper_sve_stdd_le_zss_mte, },
5754 { gen_helper_sve_stbd_zd_mte,
5755 gen_helper_sve_sthd_le_zd_mte,
5756 gen_helper_sve_stsd_le_zd_mte,
5757 gen_helper_sve_stdd_le_zd_mte, } },
5758 { /* Big-endian */
5759 { gen_helper_sve_stbd_zsu_mte,
5760 gen_helper_sve_sthd_be_zsu_mte,
5761 gen_helper_sve_stsd_be_zsu_mte,
5762 gen_helper_sve_stdd_be_zsu_mte, },
5763 { gen_helper_sve_stbd_zss_mte,
5764 gen_helper_sve_sthd_be_zss_mte,
5765 gen_helper_sve_stsd_be_zss_mte,
5766 gen_helper_sve_stdd_be_zss_mte, },
5767 { gen_helper_sve_stbd_zd_mte,
5768 gen_helper_sve_sthd_be_zd_mte,
5769 gen_helper_sve_stsd_be_zd_mte,
5770 gen_helper_sve_stdd_be_zd_mte, } } },
5773 static bool trans_ST1_zprz(DisasContext *s, arg_ST1_zprz *a)
5775 gen_helper_gvec_mem_scatter *fn;
5776 bool be = s->be_data == MO_BE;
5777 bool mte = s->mte_active[0];
5779 if (a->esz < a->msz || (a->msz == 0 && a->scale)) {
5780 return false;
5782 if (!sve_access_check(s)) {
5783 return true;
5785 switch (a->esz) {
5786 case MO_32:
5787 fn = scatter_store_fn32[mte][be][a->xs][a->msz];
5788 break;
5789 case MO_64:
5790 fn = scatter_store_fn64[mte][be][a->xs][a->msz];
5791 break;
5792 default:
5793 g_assert_not_reached();
5795 do_mem_zpz(s, a->rd, a->pg, a->rm, a->scale * a->msz,
5796 cpu_reg_sp(s, a->rn), a->msz, true, fn);
5797 return true;
5800 static bool trans_ST1_zpiz(DisasContext *s, arg_ST1_zpiz *a)
5802 gen_helper_gvec_mem_scatter *fn = NULL;
5803 bool be = s->be_data == MO_BE;
5804 bool mte = s->mte_active[0];
5806 if (a->esz < a->msz) {
5807 return false;
5809 if (!sve_access_check(s)) {
5810 return true;
5813 switch (a->esz) {
5814 case MO_32:
5815 fn = scatter_store_fn32[mte][be][0][a->msz];
5816 break;
5817 case MO_64:
5818 fn = scatter_store_fn64[mte][be][2][a->msz];
5819 break;
5821 assert(fn != NULL);
5823 /* Treat ST1_zpiz (zn[x] + imm) the same way as ST1_zprz (rn + zm[x])
5824 * by loading the immediate into the scalar parameter.
5826 do_mem_zpz(s, a->rd, a->pg, a->rn, 0,
5827 tcg_constant_i64(a->imm << a->msz), a->msz, true, fn);
5828 return true;
5831 static bool trans_STNT1_zprz(DisasContext *s, arg_ST1_zprz *a)
5833 gen_helper_gvec_mem_scatter *fn;
5834 bool be = s->be_data == MO_BE;
5835 bool mte = s->mte_active[0];
5837 if (a->esz < a->msz) {
5838 return false;
5840 if (!dc_isar_feature(aa64_sve2, s)) {
5841 return false;
5843 if (!sve_access_check(s)) {
5844 return true;
5847 switch (a->esz) {
5848 case MO_32:
5849 fn = scatter_store_fn32[mte][be][0][a->msz];
5850 break;
5851 case MO_64:
5852 fn = scatter_store_fn64[mte][be][2][a->msz];
5853 break;
5854 default:
5855 g_assert_not_reached();
5858 do_mem_zpz(s, a->rd, a->pg, a->rn, 0,
5859 cpu_reg(s, a->rm), a->msz, true, fn);
5860 return true;
5864 * Prefetches
5867 static bool trans_PRF(DisasContext *s, arg_PRF *a)
5869 /* Prefetch is a nop within QEMU. */
5870 (void)sve_access_check(s);
5871 return true;
5874 static bool trans_PRF_rr(DisasContext *s, arg_PRF_rr *a)
5876 if (a->rm == 31) {
5877 return false;
5879 /* Prefetch is a nop within QEMU. */
5880 (void)sve_access_check(s);
5881 return true;
5885 * Move Prefix
5887 * TODO: The implementation so far could handle predicated merging movprfx.
5888 * The helper functions as written take an extra source register to
5889 * use in the operation, but the result is only written when predication
5890 * succeeds. For unpredicated movprfx, we need to rearrange the helpers
5891 * to allow the final write back to the destination to be unconditional.
5892 * For predicated zeroing movprfx, we need to rearrange the helpers to
5893 * allow the final write back to zero inactives.
5895 * In the meantime, just emit the moves.
5898 TRANS_FEAT(MOVPRFX, aa64_sve, do_mov_z, a->rd, a->rn)
5899 TRANS_FEAT(MOVPRFX_m, aa64_sve, do_sel_z, a->rd, a->rn, a->rd, a->pg, a->esz)
5900 TRANS_FEAT(MOVPRFX_z, aa64_sve, do_movz_zpz, a->rd, a->rn, a->pg, a->esz, false)
5903 * SVE2 Integer Multiply - Unpredicated
5906 TRANS_FEAT(MUL_zzz, aa64_sve2, gen_gvec_fn_arg_zzz, tcg_gen_gvec_mul, a)
5908 static gen_helper_gvec_3 * const smulh_zzz_fns[4] = {
5909 gen_helper_gvec_smulh_b, gen_helper_gvec_smulh_h,
5910 gen_helper_gvec_smulh_s, gen_helper_gvec_smulh_d,
5912 TRANS_FEAT(SMULH_zzz, aa64_sve2, gen_gvec_ool_arg_zzz,
5913 smulh_zzz_fns[a->esz], a, 0)
5915 static gen_helper_gvec_3 * const umulh_zzz_fns[4] = {
5916 gen_helper_gvec_umulh_b, gen_helper_gvec_umulh_h,
5917 gen_helper_gvec_umulh_s, gen_helper_gvec_umulh_d,
5919 TRANS_FEAT(UMULH_zzz, aa64_sve2, gen_gvec_ool_arg_zzz,
5920 umulh_zzz_fns[a->esz], a, 0)
5922 TRANS_FEAT(PMUL_zzz, aa64_sve2, gen_gvec_ool_arg_zzz,
5923 gen_helper_gvec_pmul_b, a, 0)
5925 static gen_helper_gvec_3 * const sqdmulh_zzz_fns[4] = {
5926 gen_helper_sve2_sqdmulh_b, gen_helper_sve2_sqdmulh_h,
5927 gen_helper_sve2_sqdmulh_s, gen_helper_sve2_sqdmulh_d,
5929 TRANS_FEAT(SQDMULH_zzz, aa64_sve2, gen_gvec_ool_arg_zzz,
5930 sqdmulh_zzz_fns[a->esz], a, 0)
5932 static gen_helper_gvec_3 * const sqrdmulh_zzz_fns[4] = {
5933 gen_helper_sve2_sqrdmulh_b, gen_helper_sve2_sqrdmulh_h,
5934 gen_helper_sve2_sqrdmulh_s, gen_helper_sve2_sqrdmulh_d,
5936 TRANS_FEAT(SQRDMULH_zzz, aa64_sve2, gen_gvec_ool_arg_zzz,
5937 sqrdmulh_zzz_fns[a->esz], a, 0)
5940 * SVE2 Integer - Predicated
5943 static gen_helper_gvec_4 * const sadlp_fns[4] = {
5944 NULL, gen_helper_sve2_sadalp_zpzz_h,
5945 gen_helper_sve2_sadalp_zpzz_s, gen_helper_sve2_sadalp_zpzz_d,
5947 TRANS_FEAT(SADALP_zpzz, aa64_sve2, gen_gvec_ool_arg_zpzz,
5948 sadlp_fns[a->esz], a, 0)
5950 static gen_helper_gvec_4 * const uadlp_fns[4] = {
5951 NULL, gen_helper_sve2_uadalp_zpzz_h,
5952 gen_helper_sve2_uadalp_zpzz_s, gen_helper_sve2_uadalp_zpzz_d,
5954 TRANS_FEAT(UADALP_zpzz, aa64_sve2, gen_gvec_ool_arg_zpzz,
5955 uadlp_fns[a->esz], a, 0)
5958 * SVE2 integer unary operations (predicated)
5961 TRANS_FEAT(URECPE, aa64_sve2, gen_gvec_ool_arg_zpz,
5962 a->esz == 2 ? gen_helper_sve2_urecpe_s : NULL, a, 0)
5964 TRANS_FEAT(URSQRTE, aa64_sve2, gen_gvec_ool_arg_zpz,
5965 a->esz == 2 ? gen_helper_sve2_ursqrte_s : NULL, a, 0)
5967 static gen_helper_gvec_3 * const sqabs_fns[4] = {
5968 gen_helper_sve2_sqabs_b, gen_helper_sve2_sqabs_h,
5969 gen_helper_sve2_sqabs_s, gen_helper_sve2_sqabs_d,
5971 TRANS_FEAT(SQABS, aa64_sve2, gen_gvec_ool_arg_zpz, sqabs_fns[a->esz], a, 0)
5973 static gen_helper_gvec_3 * const sqneg_fns[4] = {
5974 gen_helper_sve2_sqneg_b, gen_helper_sve2_sqneg_h,
5975 gen_helper_sve2_sqneg_s, gen_helper_sve2_sqneg_d,
5977 TRANS_FEAT(SQNEG, aa64_sve2, gen_gvec_ool_arg_zpz, sqneg_fns[a->esz], a, 0)
5979 DO_ZPZZ(SQSHL, aa64_sve2, sve2_sqshl)
5980 DO_ZPZZ(SQRSHL, aa64_sve2, sve2_sqrshl)
5981 DO_ZPZZ(SRSHL, aa64_sve2, sve2_srshl)
5983 DO_ZPZZ(UQSHL, aa64_sve2, sve2_uqshl)
5984 DO_ZPZZ(UQRSHL, aa64_sve2, sve2_uqrshl)
5985 DO_ZPZZ(URSHL, aa64_sve2, sve2_urshl)
5987 DO_ZPZZ(SHADD, aa64_sve2, sve2_shadd)
5988 DO_ZPZZ(SRHADD, aa64_sve2, sve2_srhadd)
5989 DO_ZPZZ(SHSUB, aa64_sve2, sve2_shsub)
5991 DO_ZPZZ(UHADD, aa64_sve2, sve2_uhadd)
5992 DO_ZPZZ(URHADD, aa64_sve2, sve2_urhadd)
5993 DO_ZPZZ(UHSUB, aa64_sve2, sve2_uhsub)
5995 DO_ZPZZ(ADDP, aa64_sve2, sve2_addp)
5996 DO_ZPZZ(SMAXP, aa64_sve2, sve2_smaxp)
5997 DO_ZPZZ(UMAXP, aa64_sve2, sve2_umaxp)
5998 DO_ZPZZ(SMINP, aa64_sve2, sve2_sminp)
5999 DO_ZPZZ(UMINP, aa64_sve2, sve2_uminp)
6001 DO_ZPZZ(SQADD_zpzz, aa64_sve2, sve2_sqadd)
6002 DO_ZPZZ(UQADD_zpzz, aa64_sve2, sve2_uqadd)
6003 DO_ZPZZ(SQSUB_zpzz, aa64_sve2, sve2_sqsub)
6004 DO_ZPZZ(UQSUB_zpzz, aa64_sve2, sve2_uqsub)
6005 DO_ZPZZ(SUQADD, aa64_sve2, sve2_suqadd)
6006 DO_ZPZZ(USQADD, aa64_sve2, sve2_usqadd)
6009 * SVE2 Widening Integer Arithmetic
6012 static gen_helper_gvec_3 * const saddl_fns[4] = {
6013 NULL, gen_helper_sve2_saddl_h,
6014 gen_helper_sve2_saddl_s, gen_helper_sve2_saddl_d,
6016 TRANS_FEAT(SADDLB, aa64_sve2, gen_gvec_ool_arg_zzz,
6017 saddl_fns[a->esz], a, 0)
6018 TRANS_FEAT(SADDLT, aa64_sve2, gen_gvec_ool_arg_zzz,
6019 saddl_fns[a->esz], a, 3)
6020 TRANS_FEAT(SADDLBT, aa64_sve2, gen_gvec_ool_arg_zzz,
6021 saddl_fns[a->esz], a, 2)
6023 static gen_helper_gvec_3 * const ssubl_fns[4] = {
6024 NULL, gen_helper_sve2_ssubl_h,
6025 gen_helper_sve2_ssubl_s, gen_helper_sve2_ssubl_d,
6027 TRANS_FEAT(SSUBLB, aa64_sve2, gen_gvec_ool_arg_zzz,
6028 ssubl_fns[a->esz], a, 0)
6029 TRANS_FEAT(SSUBLT, aa64_sve2, gen_gvec_ool_arg_zzz,
6030 ssubl_fns[a->esz], a, 3)
6031 TRANS_FEAT(SSUBLBT, aa64_sve2, gen_gvec_ool_arg_zzz,
6032 ssubl_fns[a->esz], a, 2)
6033 TRANS_FEAT(SSUBLTB, aa64_sve2, gen_gvec_ool_arg_zzz,
6034 ssubl_fns[a->esz], a, 1)
6036 static gen_helper_gvec_3 * const sabdl_fns[4] = {
6037 NULL, gen_helper_sve2_sabdl_h,
6038 gen_helper_sve2_sabdl_s, gen_helper_sve2_sabdl_d,
6040 TRANS_FEAT(SABDLB, aa64_sve2, gen_gvec_ool_arg_zzz,
6041 sabdl_fns[a->esz], a, 0)
6042 TRANS_FEAT(SABDLT, aa64_sve2, gen_gvec_ool_arg_zzz,
6043 sabdl_fns[a->esz], a, 3)
6045 static gen_helper_gvec_3 * const uaddl_fns[4] = {
6046 NULL, gen_helper_sve2_uaddl_h,
6047 gen_helper_sve2_uaddl_s, gen_helper_sve2_uaddl_d,
6049 TRANS_FEAT(UADDLB, aa64_sve2, gen_gvec_ool_arg_zzz,
6050 uaddl_fns[a->esz], a, 0)
6051 TRANS_FEAT(UADDLT, aa64_sve2, gen_gvec_ool_arg_zzz,
6052 uaddl_fns[a->esz], a, 3)
6054 static gen_helper_gvec_3 * const usubl_fns[4] = {
6055 NULL, gen_helper_sve2_usubl_h,
6056 gen_helper_sve2_usubl_s, gen_helper_sve2_usubl_d,
6058 TRANS_FEAT(USUBLB, aa64_sve2, gen_gvec_ool_arg_zzz,
6059 usubl_fns[a->esz], a, 0)
6060 TRANS_FEAT(USUBLT, aa64_sve2, gen_gvec_ool_arg_zzz,
6061 usubl_fns[a->esz], a, 3)
6063 static gen_helper_gvec_3 * const uabdl_fns[4] = {
6064 NULL, gen_helper_sve2_uabdl_h,
6065 gen_helper_sve2_uabdl_s, gen_helper_sve2_uabdl_d,
6067 TRANS_FEAT(UABDLB, aa64_sve2, gen_gvec_ool_arg_zzz,
6068 uabdl_fns[a->esz], a, 0)
6069 TRANS_FEAT(UABDLT, aa64_sve2, gen_gvec_ool_arg_zzz,
6070 uabdl_fns[a->esz], a, 3)
6072 static gen_helper_gvec_3 * const sqdmull_fns[4] = {
6073 NULL, gen_helper_sve2_sqdmull_zzz_h,
6074 gen_helper_sve2_sqdmull_zzz_s, gen_helper_sve2_sqdmull_zzz_d,
6076 TRANS_FEAT(SQDMULLB_zzz, aa64_sve2, gen_gvec_ool_arg_zzz,
6077 sqdmull_fns[a->esz], a, 0)
6078 TRANS_FEAT(SQDMULLT_zzz, aa64_sve2, gen_gvec_ool_arg_zzz,
6079 sqdmull_fns[a->esz], a, 3)
6081 static gen_helper_gvec_3 * const smull_fns[4] = {
6082 NULL, gen_helper_sve2_smull_zzz_h,
6083 gen_helper_sve2_smull_zzz_s, gen_helper_sve2_smull_zzz_d,
6085 TRANS_FEAT(SMULLB_zzz, aa64_sve2, gen_gvec_ool_arg_zzz,
6086 smull_fns[a->esz], a, 0)
6087 TRANS_FEAT(SMULLT_zzz, aa64_sve2, gen_gvec_ool_arg_zzz,
6088 smull_fns[a->esz], a, 3)
6090 static gen_helper_gvec_3 * const umull_fns[4] = {
6091 NULL, gen_helper_sve2_umull_zzz_h,
6092 gen_helper_sve2_umull_zzz_s, gen_helper_sve2_umull_zzz_d,
6094 TRANS_FEAT(UMULLB_zzz, aa64_sve2, gen_gvec_ool_arg_zzz,
6095 umull_fns[a->esz], a, 0)
6096 TRANS_FEAT(UMULLT_zzz, aa64_sve2, gen_gvec_ool_arg_zzz,
6097 umull_fns[a->esz], a, 3)
6099 static gen_helper_gvec_3 * const eoril_fns[4] = {
6100 gen_helper_sve2_eoril_b, gen_helper_sve2_eoril_h,
6101 gen_helper_sve2_eoril_s, gen_helper_sve2_eoril_d,
6103 TRANS_FEAT(EORBT, aa64_sve2, gen_gvec_ool_arg_zzz, eoril_fns[a->esz], a, 2)
6104 TRANS_FEAT(EORTB, aa64_sve2, gen_gvec_ool_arg_zzz, eoril_fns[a->esz], a, 1)
6106 static bool do_trans_pmull(DisasContext *s, arg_rrr_esz *a, bool sel)
6108 static gen_helper_gvec_3 * const fns[4] = {
6109 gen_helper_gvec_pmull_q, gen_helper_sve2_pmull_h,
6110 NULL, gen_helper_sve2_pmull_d,
6112 if (a->esz == 0 && !dc_isar_feature(aa64_sve2_pmull128, s)) {
6113 return false;
6115 return gen_gvec_ool_arg_zzz(s, fns[a->esz], a, sel);
6118 TRANS_FEAT(PMULLB, aa64_sve2, do_trans_pmull, a, false)
6119 TRANS_FEAT(PMULLT, aa64_sve2, do_trans_pmull, a, true)
6121 static gen_helper_gvec_3 * const saddw_fns[4] = {
6122 NULL, gen_helper_sve2_saddw_h,
6123 gen_helper_sve2_saddw_s, gen_helper_sve2_saddw_d,
6125 TRANS_FEAT(SADDWB, aa64_sve2, gen_gvec_ool_arg_zzz, saddw_fns[a->esz], a, 0)
6126 TRANS_FEAT(SADDWT, aa64_sve2, gen_gvec_ool_arg_zzz, saddw_fns[a->esz], a, 1)
6128 static gen_helper_gvec_3 * const ssubw_fns[4] = {
6129 NULL, gen_helper_sve2_ssubw_h,
6130 gen_helper_sve2_ssubw_s, gen_helper_sve2_ssubw_d,
6132 TRANS_FEAT(SSUBWB, aa64_sve2, gen_gvec_ool_arg_zzz, ssubw_fns[a->esz], a, 0)
6133 TRANS_FEAT(SSUBWT, aa64_sve2, gen_gvec_ool_arg_zzz, ssubw_fns[a->esz], a, 1)
6135 static gen_helper_gvec_3 * const uaddw_fns[4] = {
6136 NULL, gen_helper_sve2_uaddw_h,
6137 gen_helper_sve2_uaddw_s, gen_helper_sve2_uaddw_d,
6139 TRANS_FEAT(UADDWB, aa64_sve2, gen_gvec_ool_arg_zzz, uaddw_fns[a->esz], a, 0)
6140 TRANS_FEAT(UADDWT, aa64_sve2, gen_gvec_ool_arg_zzz, uaddw_fns[a->esz], a, 1)
6142 static gen_helper_gvec_3 * const usubw_fns[4] = {
6143 NULL, gen_helper_sve2_usubw_h,
6144 gen_helper_sve2_usubw_s, gen_helper_sve2_usubw_d,
6146 TRANS_FEAT(USUBWB, aa64_sve2, gen_gvec_ool_arg_zzz, usubw_fns[a->esz], a, 0)
6147 TRANS_FEAT(USUBWT, aa64_sve2, gen_gvec_ool_arg_zzz, usubw_fns[a->esz], a, 1)
6149 static void gen_sshll_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t imm)
6151 int top = imm & 1;
6152 int shl = imm >> 1;
6153 int halfbits = 4 << vece;
6155 if (top) {
6156 if (shl == halfbits) {
6157 TCGv_vec t = tcg_temp_new_vec_matching(d);
6158 tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(halfbits, halfbits));
6159 tcg_gen_and_vec(vece, d, n, t);
6160 tcg_temp_free_vec(t);
6161 } else {
6162 tcg_gen_sari_vec(vece, d, n, halfbits);
6163 tcg_gen_shli_vec(vece, d, d, shl);
6165 } else {
6166 tcg_gen_shli_vec(vece, d, n, halfbits);
6167 tcg_gen_sari_vec(vece, d, d, halfbits - shl);
6171 static void gen_ushll_i64(unsigned vece, TCGv_i64 d, TCGv_i64 n, int imm)
6173 int halfbits = 4 << vece;
6174 int top = imm & 1;
6175 int shl = (imm >> 1);
6176 int shift;
6177 uint64_t mask;
6179 mask = MAKE_64BIT_MASK(0, halfbits);
6180 mask <<= shl;
6181 mask = dup_const(vece, mask);
6183 shift = shl - top * halfbits;
6184 if (shift < 0) {
6185 tcg_gen_shri_i64(d, n, -shift);
6186 } else {
6187 tcg_gen_shli_i64(d, n, shift);
6189 tcg_gen_andi_i64(d, d, mask);
6192 static void gen_ushll16_i64(TCGv_i64 d, TCGv_i64 n, int64_t imm)
6194 gen_ushll_i64(MO_16, d, n, imm);
6197 static void gen_ushll32_i64(TCGv_i64 d, TCGv_i64 n, int64_t imm)
6199 gen_ushll_i64(MO_32, d, n, imm);
6202 static void gen_ushll64_i64(TCGv_i64 d, TCGv_i64 n, int64_t imm)
6204 gen_ushll_i64(MO_64, d, n, imm);
6207 static void gen_ushll_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t imm)
6209 int halfbits = 4 << vece;
6210 int top = imm & 1;
6211 int shl = imm >> 1;
6213 if (top) {
6214 if (shl == halfbits) {
6215 TCGv_vec t = tcg_temp_new_vec_matching(d);
6216 tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(halfbits, halfbits));
6217 tcg_gen_and_vec(vece, d, n, t);
6218 tcg_temp_free_vec(t);
6219 } else {
6220 tcg_gen_shri_vec(vece, d, n, halfbits);
6221 tcg_gen_shli_vec(vece, d, d, shl);
6223 } else {
6224 if (shl == 0) {
6225 TCGv_vec t = tcg_temp_new_vec_matching(d);
6226 tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
6227 tcg_gen_and_vec(vece, d, n, t);
6228 tcg_temp_free_vec(t);
6229 } else {
6230 tcg_gen_shli_vec(vece, d, n, halfbits);
6231 tcg_gen_shri_vec(vece, d, d, halfbits - shl);
6236 static bool do_sve2_shll_tb(DisasContext *s, arg_rri_esz *a,
6237 bool sel, bool uns)
6239 static const TCGOpcode sshll_list[] = {
6240 INDEX_op_shli_vec, INDEX_op_sari_vec, 0
6242 static const TCGOpcode ushll_list[] = {
6243 INDEX_op_shli_vec, INDEX_op_shri_vec, 0
6245 static const GVecGen2i ops[2][3] = {
6246 { { .fniv = gen_sshll_vec,
6247 .opt_opc = sshll_list,
6248 .fno = gen_helper_sve2_sshll_h,
6249 .vece = MO_16 },
6250 { .fniv = gen_sshll_vec,
6251 .opt_opc = sshll_list,
6252 .fno = gen_helper_sve2_sshll_s,
6253 .vece = MO_32 },
6254 { .fniv = gen_sshll_vec,
6255 .opt_opc = sshll_list,
6256 .fno = gen_helper_sve2_sshll_d,
6257 .vece = MO_64 } },
6258 { { .fni8 = gen_ushll16_i64,
6259 .fniv = gen_ushll_vec,
6260 .opt_opc = ushll_list,
6261 .fno = gen_helper_sve2_ushll_h,
6262 .vece = MO_16 },
6263 { .fni8 = gen_ushll32_i64,
6264 .fniv = gen_ushll_vec,
6265 .opt_opc = ushll_list,
6266 .fno = gen_helper_sve2_ushll_s,
6267 .vece = MO_32 },
6268 { .fni8 = gen_ushll64_i64,
6269 .fniv = gen_ushll_vec,
6270 .opt_opc = ushll_list,
6271 .fno = gen_helper_sve2_ushll_d,
6272 .vece = MO_64 } },
6275 if (a->esz < 0 || a->esz > 2 || !dc_isar_feature(aa64_sve2, s)) {
6276 return false;
6278 if (sve_access_check(s)) {
6279 unsigned vsz = vec_full_reg_size(s);
6280 tcg_gen_gvec_2i(vec_full_reg_offset(s, a->rd),
6281 vec_full_reg_offset(s, a->rn),
6282 vsz, vsz, (a->imm << 1) | sel,
6283 &ops[uns][a->esz]);
6285 return true;
6288 static bool trans_SSHLLB(DisasContext *s, arg_rri_esz *a)
6290 return do_sve2_shll_tb(s, a, false, false);
6293 static bool trans_SSHLLT(DisasContext *s, arg_rri_esz *a)
6295 return do_sve2_shll_tb(s, a, true, false);
6298 static bool trans_USHLLB(DisasContext *s, arg_rri_esz *a)
6300 return do_sve2_shll_tb(s, a, false, true);
6303 static bool trans_USHLLT(DisasContext *s, arg_rri_esz *a)
6305 return do_sve2_shll_tb(s, a, true, true);
6308 static gen_helper_gvec_3 * const bext_fns[4] = {
6309 gen_helper_sve2_bext_b, gen_helper_sve2_bext_h,
6310 gen_helper_sve2_bext_s, gen_helper_sve2_bext_d,
6312 TRANS_FEAT(BEXT, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz,
6313 bext_fns[a->esz], a, 0)
6315 static gen_helper_gvec_3 * const bdep_fns[4] = {
6316 gen_helper_sve2_bdep_b, gen_helper_sve2_bdep_h,
6317 gen_helper_sve2_bdep_s, gen_helper_sve2_bdep_d,
6319 TRANS_FEAT(BDEP, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz,
6320 bdep_fns[a->esz], a, 0)
6322 static gen_helper_gvec_3 * const bgrp_fns[4] = {
6323 gen_helper_sve2_bgrp_b, gen_helper_sve2_bgrp_h,
6324 gen_helper_sve2_bgrp_s, gen_helper_sve2_bgrp_d,
6326 TRANS_FEAT(BGRP, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz,
6327 bgrp_fns[a->esz], a, 0)
6329 static gen_helper_gvec_3 * const cadd_fns[4] = {
6330 gen_helper_sve2_cadd_b, gen_helper_sve2_cadd_h,
6331 gen_helper_sve2_cadd_s, gen_helper_sve2_cadd_d,
6333 TRANS_FEAT(CADD_rot90, aa64_sve2, gen_gvec_ool_arg_zzz,
6334 cadd_fns[a->esz], a, 0)
6335 TRANS_FEAT(CADD_rot270, aa64_sve2, gen_gvec_ool_arg_zzz,
6336 cadd_fns[a->esz], a, 1)
6338 static gen_helper_gvec_3 * const sqcadd_fns[4] = {
6339 gen_helper_sve2_sqcadd_b, gen_helper_sve2_sqcadd_h,
6340 gen_helper_sve2_sqcadd_s, gen_helper_sve2_sqcadd_d,
6342 TRANS_FEAT(SQCADD_rot90, aa64_sve2, gen_gvec_ool_arg_zzz,
6343 sqcadd_fns[a->esz], a, 0)
6344 TRANS_FEAT(SQCADD_rot270, aa64_sve2, gen_gvec_ool_arg_zzz,
6345 sqcadd_fns[a->esz], a, 1)
6347 static gen_helper_gvec_4 * const sabal_fns[4] = {
6348 NULL, gen_helper_sve2_sabal_h,
6349 gen_helper_sve2_sabal_s, gen_helper_sve2_sabal_d,
6351 TRANS_FEAT(SABALB, aa64_sve2, gen_gvec_ool_arg_zzzz, sabal_fns[a->esz], a, 0)
6352 TRANS_FEAT(SABALT, aa64_sve2, gen_gvec_ool_arg_zzzz, sabal_fns[a->esz], a, 1)
6354 static gen_helper_gvec_4 * const uabal_fns[4] = {
6355 NULL, gen_helper_sve2_uabal_h,
6356 gen_helper_sve2_uabal_s, gen_helper_sve2_uabal_d,
6358 TRANS_FEAT(UABALB, aa64_sve2, gen_gvec_ool_arg_zzzz, uabal_fns[a->esz], a, 0)
6359 TRANS_FEAT(UABALT, aa64_sve2, gen_gvec_ool_arg_zzzz, uabal_fns[a->esz], a, 1)
6361 static bool do_adcl(DisasContext *s, arg_rrrr_esz *a, bool sel)
6363 static gen_helper_gvec_4 * const fns[2] = {
6364 gen_helper_sve2_adcl_s,
6365 gen_helper_sve2_adcl_d,
6368 * Note that in this case the ESZ field encodes both size and sign.
6369 * Split out 'subtract' into bit 1 of the data field for the helper.
6371 return gen_gvec_ool_arg_zzzz(s, fns[a->esz & 1], a, (a->esz & 2) | sel);
6374 TRANS_FEAT(ADCLB, aa64_sve2, do_adcl, a, false)
6375 TRANS_FEAT(ADCLT, aa64_sve2, do_adcl, a, true)
6377 TRANS_FEAT(SSRA, aa64_sve2, gen_gvec_fn_arg_zzi, gen_gvec_ssra, a)
6378 TRANS_FEAT(USRA, aa64_sve2, gen_gvec_fn_arg_zzi, gen_gvec_usra, a)
6379 TRANS_FEAT(SRSRA, aa64_sve2, gen_gvec_fn_arg_zzi, gen_gvec_srsra, a)
6380 TRANS_FEAT(URSRA, aa64_sve2, gen_gvec_fn_arg_zzi, gen_gvec_ursra, a)
6381 TRANS_FEAT(SRI, aa64_sve2, gen_gvec_fn_arg_zzi, gen_gvec_sri, a)
6382 TRANS_FEAT(SLI, aa64_sve2, gen_gvec_fn_arg_zzi, gen_gvec_sli, a)
6384 TRANS_FEAT(SABA, aa64_sve2, gen_gvec_fn_arg_zzz, gen_gvec_saba, a)
6385 TRANS_FEAT(UABA, aa64_sve2, gen_gvec_fn_arg_zzz, gen_gvec_uaba, a)
6387 static bool do_sve2_narrow_extract(DisasContext *s, arg_rri_esz *a,
6388 const GVecGen2 ops[3])
6390 if (a->esz < 0 || a->esz > MO_32 || a->imm != 0 ||
6391 !dc_isar_feature(aa64_sve2, s)) {
6392 return false;
6394 if (sve_access_check(s)) {
6395 unsigned vsz = vec_full_reg_size(s);
6396 tcg_gen_gvec_2(vec_full_reg_offset(s, a->rd),
6397 vec_full_reg_offset(s, a->rn),
6398 vsz, vsz, &ops[a->esz]);
6400 return true;
6403 static const TCGOpcode sqxtn_list[] = {
6404 INDEX_op_shli_vec, INDEX_op_smin_vec, INDEX_op_smax_vec, 0
6407 static void gen_sqxtnb_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
6409 TCGv_vec t = tcg_temp_new_vec_matching(d);
6410 int halfbits = 4 << vece;
6411 int64_t mask = (1ull << halfbits) - 1;
6412 int64_t min = -1ull << (halfbits - 1);
6413 int64_t max = -min - 1;
6415 tcg_gen_dupi_vec(vece, t, min);
6416 tcg_gen_smax_vec(vece, d, n, t);
6417 tcg_gen_dupi_vec(vece, t, max);
6418 tcg_gen_smin_vec(vece, d, d, t);
6419 tcg_gen_dupi_vec(vece, t, mask);
6420 tcg_gen_and_vec(vece, d, d, t);
6421 tcg_temp_free_vec(t);
6424 static bool trans_SQXTNB(DisasContext *s, arg_rri_esz *a)
6426 static const GVecGen2 ops[3] = {
6427 { .fniv = gen_sqxtnb_vec,
6428 .opt_opc = sqxtn_list,
6429 .fno = gen_helper_sve2_sqxtnb_h,
6430 .vece = MO_16 },
6431 { .fniv = gen_sqxtnb_vec,
6432 .opt_opc = sqxtn_list,
6433 .fno = gen_helper_sve2_sqxtnb_s,
6434 .vece = MO_32 },
6435 { .fniv = gen_sqxtnb_vec,
6436 .opt_opc = sqxtn_list,
6437 .fno = gen_helper_sve2_sqxtnb_d,
6438 .vece = MO_64 },
6440 return do_sve2_narrow_extract(s, a, ops);
6443 static void gen_sqxtnt_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
6445 TCGv_vec t = tcg_temp_new_vec_matching(d);
6446 int halfbits = 4 << vece;
6447 int64_t mask = (1ull << halfbits) - 1;
6448 int64_t min = -1ull << (halfbits - 1);
6449 int64_t max = -min - 1;
6451 tcg_gen_dupi_vec(vece, t, min);
6452 tcg_gen_smax_vec(vece, n, n, t);
6453 tcg_gen_dupi_vec(vece, t, max);
6454 tcg_gen_smin_vec(vece, n, n, t);
6455 tcg_gen_shli_vec(vece, n, n, halfbits);
6456 tcg_gen_dupi_vec(vece, t, mask);
6457 tcg_gen_bitsel_vec(vece, d, t, d, n);
6458 tcg_temp_free_vec(t);
6461 static bool trans_SQXTNT(DisasContext *s, arg_rri_esz *a)
6463 static const GVecGen2 ops[3] = {
6464 { .fniv = gen_sqxtnt_vec,
6465 .opt_opc = sqxtn_list,
6466 .load_dest = true,
6467 .fno = gen_helper_sve2_sqxtnt_h,
6468 .vece = MO_16 },
6469 { .fniv = gen_sqxtnt_vec,
6470 .opt_opc = sqxtn_list,
6471 .load_dest = true,
6472 .fno = gen_helper_sve2_sqxtnt_s,
6473 .vece = MO_32 },
6474 { .fniv = gen_sqxtnt_vec,
6475 .opt_opc = sqxtn_list,
6476 .load_dest = true,
6477 .fno = gen_helper_sve2_sqxtnt_d,
6478 .vece = MO_64 },
6480 return do_sve2_narrow_extract(s, a, ops);
6483 static const TCGOpcode uqxtn_list[] = {
6484 INDEX_op_shli_vec, INDEX_op_umin_vec, 0
6487 static void gen_uqxtnb_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
6489 TCGv_vec t = tcg_temp_new_vec_matching(d);
6490 int halfbits = 4 << vece;
6491 int64_t max = (1ull << halfbits) - 1;
6493 tcg_gen_dupi_vec(vece, t, max);
6494 tcg_gen_umin_vec(vece, d, n, t);
6495 tcg_temp_free_vec(t);
6498 static bool trans_UQXTNB(DisasContext *s, arg_rri_esz *a)
6500 static const GVecGen2 ops[3] = {
6501 { .fniv = gen_uqxtnb_vec,
6502 .opt_opc = uqxtn_list,
6503 .fno = gen_helper_sve2_uqxtnb_h,
6504 .vece = MO_16 },
6505 { .fniv = gen_uqxtnb_vec,
6506 .opt_opc = uqxtn_list,
6507 .fno = gen_helper_sve2_uqxtnb_s,
6508 .vece = MO_32 },
6509 { .fniv = gen_uqxtnb_vec,
6510 .opt_opc = uqxtn_list,
6511 .fno = gen_helper_sve2_uqxtnb_d,
6512 .vece = MO_64 },
6514 return do_sve2_narrow_extract(s, a, ops);
6517 static void gen_uqxtnt_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
6519 TCGv_vec t = tcg_temp_new_vec_matching(d);
6520 int halfbits = 4 << vece;
6521 int64_t max = (1ull << halfbits) - 1;
6523 tcg_gen_dupi_vec(vece, t, max);
6524 tcg_gen_umin_vec(vece, n, n, t);
6525 tcg_gen_shli_vec(vece, n, n, halfbits);
6526 tcg_gen_bitsel_vec(vece, d, t, d, n);
6527 tcg_temp_free_vec(t);
6530 static bool trans_UQXTNT(DisasContext *s, arg_rri_esz *a)
6532 static const GVecGen2 ops[3] = {
6533 { .fniv = gen_uqxtnt_vec,
6534 .opt_opc = uqxtn_list,
6535 .load_dest = true,
6536 .fno = gen_helper_sve2_uqxtnt_h,
6537 .vece = MO_16 },
6538 { .fniv = gen_uqxtnt_vec,
6539 .opt_opc = uqxtn_list,
6540 .load_dest = true,
6541 .fno = gen_helper_sve2_uqxtnt_s,
6542 .vece = MO_32 },
6543 { .fniv = gen_uqxtnt_vec,
6544 .opt_opc = uqxtn_list,
6545 .load_dest = true,
6546 .fno = gen_helper_sve2_uqxtnt_d,
6547 .vece = MO_64 },
6549 return do_sve2_narrow_extract(s, a, ops);
6552 static const TCGOpcode sqxtun_list[] = {
6553 INDEX_op_shli_vec, INDEX_op_umin_vec, INDEX_op_smax_vec, 0
6556 static void gen_sqxtunb_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
6558 TCGv_vec t = tcg_temp_new_vec_matching(d);
6559 int halfbits = 4 << vece;
6560 int64_t max = (1ull << halfbits) - 1;
6562 tcg_gen_dupi_vec(vece, t, 0);
6563 tcg_gen_smax_vec(vece, d, n, t);
6564 tcg_gen_dupi_vec(vece, t, max);
6565 tcg_gen_umin_vec(vece, d, d, t);
6566 tcg_temp_free_vec(t);
6569 static bool trans_SQXTUNB(DisasContext *s, arg_rri_esz *a)
6571 static const GVecGen2 ops[3] = {
6572 { .fniv = gen_sqxtunb_vec,
6573 .opt_opc = sqxtun_list,
6574 .fno = gen_helper_sve2_sqxtunb_h,
6575 .vece = MO_16 },
6576 { .fniv = gen_sqxtunb_vec,
6577 .opt_opc = sqxtun_list,
6578 .fno = gen_helper_sve2_sqxtunb_s,
6579 .vece = MO_32 },
6580 { .fniv = gen_sqxtunb_vec,
6581 .opt_opc = sqxtun_list,
6582 .fno = gen_helper_sve2_sqxtunb_d,
6583 .vece = MO_64 },
6585 return do_sve2_narrow_extract(s, a, ops);
6588 static void gen_sqxtunt_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
6590 TCGv_vec t = tcg_temp_new_vec_matching(d);
6591 int halfbits = 4 << vece;
6592 int64_t max = (1ull << halfbits) - 1;
6594 tcg_gen_dupi_vec(vece, t, 0);
6595 tcg_gen_smax_vec(vece, n, n, t);
6596 tcg_gen_dupi_vec(vece, t, max);
6597 tcg_gen_umin_vec(vece, n, n, t);
6598 tcg_gen_shli_vec(vece, n, n, halfbits);
6599 tcg_gen_bitsel_vec(vece, d, t, d, n);
6600 tcg_temp_free_vec(t);
6603 static bool trans_SQXTUNT(DisasContext *s, arg_rri_esz *a)
6605 static const GVecGen2 ops[3] = {
6606 { .fniv = gen_sqxtunt_vec,
6607 .opt_opc = sqxtun_list,
6608 .load_dest = true,
6609 .fno = gen_helper_sve2_sqxtunt_h,
6610 .vece = MO_16 },
6611 { .fniv = gen_sqxtunt_vec,
6612 .opt_opc = sqxtun_list,
6613 .load_dest = true,
6614 .fno = gen_helper_sve2_sqxtunt_s,
6615 .vece = MO_32 },
6616 { .fniv = gen_sqxtunt_vec,
6617 .opt_opc = sqxtun_list,
6618 .load_dest = true,
6619 .fno = gen_helper_sve2_sqxtunt_d,
6620 .vece = MO_64 },
6622 return do_sve2_narrow_extract(s, a, ops);
6625 static bool do_sve2_shr_narrow(DisasContext *s, arg_rri_esz *a,
6626 const GVecGen2i ops[3])
6628 if (a->esz < 0 || a->esz > MO_32 || !dc_isar_feature(aa64_sve2, s)) {
6629 return false;
6631 assert(a->imm > 0 && a->imm <= (8 << a->esz));
6632 if (sve_access_check(s)) {
6633 unsigned vsz = vec_full_reg_size(s);
6634 tcg_gen_gvec_2i(vec_full_reg_offset(s, a->rd),
6635 vec_full_reg_offset(s, a->rn),
6636 vsz, vsz, a->imm, &ops[a->esz]);
6638 return true;
6641 static void gen_shrnb_i64(unsigned vece, TCGv_i64 d, TCGv_i64 n, int shr)
6643 int halfbits = 4 << vece;
6644 uint64_t mask = dup_const(vece, MAKE_64BIT_MASK(0, halfbits));
6646 tcg_gen_shri_i64(d, n, shr);
6647 tcg_gen_andi_i64(d, d, mask);
6650 static void gen_shrnb16_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr)
6652 gen_shrnb_i64(MO_16, d, n, shr);
6655 static void gen_shrnb32_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr)
6657 gen_shrnb_i64(MO_32, d, n, shr);
6660 static void gen_shrnb64_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr)
6662 gen_shrnb_i64(MO_64, d, n, shr);
6665 static void gen_shrnb_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t shr)
6667 TCGv_vec t = tcg_temp_new_vec_matching(d);
6668 int halfbits = 4 << vece;
6669 uint64_t mask = MAKE_64BIT_MASK(0, halfbits);
6671 tcg_gen_shri_vec(vece, n, n, shr);
6672 tcg_gen_dupi_vec(vece, t, mask);
6673 tcg_gen_and_vec(vece, d, n, t);
6674 tcg_temp_free_vec(t);
6677 static bool trans_SHRNB(DisasContext *s, arg_rri_esz *a)
6679 static const TCGOpcode vec_list[] = { INDEX_op_shri_vec, 0 };
6680 static const GVecGen2i ops[3] = {
6681 { .fni8 = gen_shrnb16_i64,
6682 .fniv = gen_shrnb_vec,
6683 .opt_opc = vec_list,
6684 .fno = gen_helper_sve2_shrnb_h,
6685 .vece = MO_16 },
6686 { .fni8 = gen_shrnb32_i64,
6687 .fniv = gen_shrnb_vec,
6688 .opt_opc = vec_list,
6689 .fno = gen_helper_sve2_shrnb_s,
6690 .vece = MO_32 },
6691 { .fni8 = gen_shrnb64_i64,
6692 .fniv = gen_shrnb_vec,
6693 .opt_opc = vec_list,
6694 .fno = gen_helper_sve2_shrnb_d,
6695 .vece = MO_64 },
6697 return do_sve2_shr_narrow(s, a, ops);
6700 static void gen_shrnt_i64(unsigned vece, TCGv_i64 d, TCGv_i64 n, int shr)
6702 int halfbits = 4 << vece;
6703 uint64_t mask = dup_const(vece, MAKE_64BIT_MASK(0, halfbits));
6705 tcg_gen_shli_i64(n, n, halfbits - shr);
6706 tcg_gen_andi_i64(n, n, ~mask);
6707 tcg_gen_andi_i64(d, d, mask);
6708 tcg_gen_or_i64(d, d, n);
6711 static void gen_shrnt16_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr)
6713 gen_shrnt_i64(MO_16, d, n, shr);
6716 static void gen_shrnt32_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr)
6718 gen_shrnt_i64(MO_32, d, n, shr);
6721 static void gen_shrnt64_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr)
6723 tcg_gen_shri_i64(n, n, shr);
6724 tcg_gen_deposit_i64(d, d, n, 32, 32);
6727 static void gen_shrnt_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t shr)
6729 TCGv_vec t = tcg_temp_new_vec_matching(d);
6730 int halfbits = 4 << vece;
6731 uint64_t mask = MAKE_64BIT_MASK(0, halfbits);
6733 tcg_gen_shli_vec(vece, n, n, halfbits - shr);
6734 tcg_gen_dupi_vec(vece, t, mask);
6735 tcg_gen_bitsel_vec(vece, d, t, d, n);
6736 tcg_temp_free_vec(t);
6739 static bool trans_SHRNT(DisasContext *s, arg_rri_esz *a)
6741 static const TCGOpcode vec_list[] = { INDEX_op_shli_vec, 0 };
6742 static const GVecGen2i ops[3] = {
6743 { .fni8 = gen_shrnt16_i64,
6744 .fniv = gen_shrnt_vec,
6745 .opt_opc = vec_list,
6746 .load_dest = true,
6747 .fno = gen_helper_sve2_shrnt_h,
6748 .vece = MO_16 },
6749 { .fni8 = gen_shrnt32_i64,
6750 .fniv = gen_shrnt_vec,
6751 .opt_opc = vec_list,
6752 .load_dest = true,
6753 .fno = gen_helper_sve2_shrnt_s,
6754 .vece = MO_32 },
6755 { .fni8 = gen_shrnt64_i64,
6756 .fniv = gen_shrnt_vec,
6757 .opt_opc = vec_list,
6758 .load_dest = true,
6759 .fno = gen_helper_sve2_shrnt_d,
6760 .vece = MO_64 },
6762 return do_sve2_shr_narrow(s, a, ops);
6765 static bool trans_RSHRNB(DisasContext *s, arg_rri_esz *a)
6767 static const GVecGen2i ops[3] = {
6768 { .fno = gen_helper_sve2_rshrnb_h },
6769 { .fno = gen_helper_sve2_rshrnb_s },
6770 { .fno = gen_helper_sve2_rshrnb_d },
6772 return do_sve2_shr_narrow(s, a, ops);
6775 static bool trans_RSHRNT(DisasContext *s, arg_rri_esz *a)
6777 static const GVecGen2i ops[3] = {
6778 { .fno = gen_helper_sve2_rshrnt_h },
6779 { .fno = gen_helper_sve2_rshrnt_s },
6780 { .fno = gen_helper_sve2_rshrnt_d },
6782 return do_sve2_shr_narrow(s, a, ops);
6785 static void gen_sqshrunb_vec(unsigned vece, TCGv_vec d,
6786 TCGv_vec n, int64_t shr)
6788 TCGv_vec t = tcg_temp_new_vec_matching(d);
6789 int halfbits = 4 << vece;
6791 tcg_gen_sari_vec(vece, n, n, shr);
6792 tcg_gen_dupi_vec(vece, t, 0);
6793 tcg_gen_smax_vec(vece, n, n, t);
6794 tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
6795 tcg_gen_umin_vec(vece, d, n, t);
6796 tcg_temp_free_vec(t);
6799 static bool trans_SQSHRUNB(DisasContext *s, arg_rri_esz *a)
6801 static const TCGOpcode vec_list[] = {
6802 INDEX_op_sari_vec, INDEX_op_smax_vec, INDEX_op_umin_vec, 0
6804 static const GVecGen2i ops[3] = {
6805 { .fniv = gen_sqshrunb_vec,
6806 .opt_opc = vec_list,
6807 .fno = gen_helper_sve2_sqshrunb_h,
6808 .vece = MO_16 },
6809 { .fniv = gen_sqshrunb_vec,
6810 .opt_opc = vec_list,
6811 .fno = gen_helper_sve2_sqshrunb_s,
6812 .vece = MO_32 },
6813 { .fniv = gen_sqshrunb_vec,
6814 .opt_opc = vec_list,
6815 .fno = gen_helper_sve2_sqshrunb_d,
6816 .vece = MO_64 },
6818 return do_sve2_shr_narrow(s, a, ops);
6821 static void gen_sqshrunt_vec(unsigned vece, TCGv_vec d,
6822 TCGv_vec n, int64_t shr)
6824 TCGv_vec t = tcg_temp_new_vec_matching(d);
6825 int halfbits = 4 << vece;
6827 tcg_gen_sari_vec(vece, n, n, shr);
6828 tcg_gen_dupi_vec(vece, t, 0);
6829 tcg_gen_smax_vec(vece, n, n, t);
6830 tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
6831 tcg_gen_umin_vec(vece, n, n, t);
6832 tcg_gen_shli_vec(vece, n, n, halfbits);
6833 tcg_gen_bitsel_vec(vece, d, t, d, n);
6834 tcg_temp_free_vec(t);
6837 static bool trans_SQSHRUNT(DisasContext *s, arg_rri_esz *a)
6839 static const TCGOpcode vec_list[] = {
6840 INDEX_op_shli_vec, INDEX_op_sari_vec,
6841 INDEX_op_smax_vec, INDEX_op_umin_vec, 0
6843 static const GVecGen2i ops[3] = {
6844 { .fniv = gen_sqshrunt_vec,
6845 .opt_opc = vec_list,
6846 .load_dest = true,
6847 .fno = gen_helper_sve2_sqshrunt_h,
6848 .vece = MO_16 },
6849 { .fniv = gen_sqshrunt_vec,
6850 .opt_opc = vec_list,
6851 .load_dest = true,
6852 .fno = gen_helper_sve2_sqshrunt_s,
6853 .vece = MO_32 },
6854 { .fniv = gen_sqshrunt_vec,
6855 .opt_opc = vec_list,
6856 .load_dest = true,
6857 .fno = gen_helper_sve2_sqshrunt_d,
6858 .vece = MO_64 },
6860 return do_sve2_shr_narrow(s, a, ops);
6863 static bool trans_SQRSHRUNB(DisasContext *s, arg_rri_esz *a)
6865 static const GVecGen2i ops[3] = {
6866 { .fno = gen_helper_sve2_sqrshrunb_h },
6867 { .fno = gen_helper_sve2_sqrshrunb_s },
6868 { .fno = gen_helper_sve2_sqrshrunb_d },
6870 return do_sve2_shr_narrow(s, a, ops);
6873 static bool trans_SQRSHRUNT(DisasContext *s, arg_rri_esz *a)
6875 static const GVecGen2i ops[3] = {
6876 { .fno = gen_helper_sve2_sqrshrunt_h },
6877 { .fno = gen_helper_sve2_sqrshrunt_s },
6878 { .fno = gen_helper_sve2_sqrshrunt_d },
6880 return do_sve2_shr_narrow(s, a, ops);
6883 static void gen_sqshrnb_vec(unsigned vece, TCGv_vec d,
6884 TCGv_vec n, int64_t shr)
6886 TCGv_vec t = tcg_temp_new_vec_matching(d);
6887 int halfbits = 4 << vece;
6888 int64_t max = MAKE_64BIT_MASK(0, halfbits - 1);
6889 int64_t min = -max - 1;
6891 tcg_gen_sari_vec(vece, n, n, shr);
6892 tcg_gen_dupi_vec(vece, t, min);
6893 tcg_gen_smax_vec(vece, n, n, t);
6894 tcg_gen_dupi_vec(vece, t, max);
6895 tcg_gen_smin_vec(vece, n, n, t);
6896 tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
6897 tcg_gen_and_vec(vece, d, n, t);
6898 tcg_temp_free_vec(t);
6901 static bool trans_SQSHRNB(DisasContext *s, arg_rri_esz *a)
6903 static const TCGOpcode vec_list[] = {
6904 INDEX_op_sari_vec, INDEX_op_smax_vec, INDEX_op_smin_vec, 0
6906 static const GVecGen2i ops[3] = {
6907 { .fniv = gen_sqshrnb_vec,
6908 .opt_opc = vec_list,
6909 .fno = gen_helper_sve2_sqshrnb_h,
6910 .vece = MO_16 },
6911 { .fniv = gen_sqshrnb_vec,
6912 .opt_opc = vec_list,
6913 .fno = gen_helper_sve2_sqshrnb_s,
6914 .vece = MO_32 },
6915 { .fniv = gen_sqshrnb_vec,
6916 .opt_opc = vec_list,
6917 .fno = gen_helper_sve2_sqshrnb_d,
6918 .vece = MO_64 },
6920 return do_sve2_shr_narrow(s, a, ops);
6923 static void gen_sqshrnt_vec(unsigned vece, TCGv_vec d,
6924 TCGv_vec n, int64_t shr)
6926 TCGv_vec t = tcg_temp_new_vec_matching(d);
6927 int halfbits = 4 << vece;
6928 int64_t max = MAKE_64BIT_MASK(0, halfbits - 1);
6929 int64_t min = -max - 1;
6931 tcg_gen_sari_vec(vece, n, n, shr);
6932 tcg_gen_dupi_vec(vece, t, min);
6933 tcg_gen_smax_vec(vece, n, n, t);
6934 tcg_gen_dupi_vec(vece, t, max);
6935 tcg_gen_smin_vec(vece, n, n, t);
6936 tcg_gen_shli_vec(vece, n, n, halfbits);
6937 tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
6938 tcg_gen_bitsel_vec(vece, d, t, d, n);
6939 tcg_temp_free_vec(t);
6942 static bool trans_SQSHRNT(DisasContext *s, arg_rri_esz *a)
6944 static const TCGOpcode vec_list[] = {
6945 INDEX_op_shli_vec, INDEX_op_sari_vec,
6946 INDEX_op_smax_vec, INDEX_op_smin_vec, 0
6948 static const GVecGen2i ops[3] = {
6949 { .fniv = gen_sqshrnt_vec,
6950 .opt_opc = vec_list,
6951 .load_dest = true,
6952 .fno = gen_helper_sve2_sqshrnt_h,
6953 .vece = MO_16 },
6954 { .fniv = gen_sqshrnt_vec,
6955 .opt_opc = vec_list,
6956 .load_dest = true,
6957 .fno = gen_helper_sve2_sqshrnt_s,
6958 .vece = MO_32 },
6959 { .fniv = gen_sqshrnt_vec,
6960 .opt_opc = vec_list,
6961 .load_dest = true,
6962 .fno = gen_helper_sve2_sqshrnt_d,
6963 .vece = MO_64 },
6965 return do_sve2_shr_narrow(s, a, ops);
6968 static bool trans_SQRSHRNB(DisasContext *s, arg_rri_esz *a)
6970 static const GVecGen2i ops[3] = {
6971 { .fno = gen_helper_sve2_sqrshrnb_h },
6972 { .fno = gen_helper_sve2_sqrshrnb_s },
6973 { .fno = gen_helper_sve2_sqrshrnb_d },
6975 return do_sve2_shr_narrow(s, a, ops);
6978 static bool trans_SQRSHRNT(DisasContext *s, arg_rri_esz *a)
6980 static const GVecGen2i ops[3] = {
6981 { .fno = gen_helper_sve2_sqrshrnt_h },
6982 { .fno = gen_helper_sve2_sqrshrnt_s },
6983 { .fno = gen_helper_sve2_sqrshrnt_d },
6985 return do_sve2_shr_narrow(s, a, ops);
6988 static void gen_uqshrnb_vec(unsigned vece, TCGv_vec d,
6989 TCGv_vec n, int64_t shr)
6991 TCGv_vec t = tcg_temp_new_vec_matching(d);
6992 int halfbits = 4 << vece;
6994 tcg_gen_shri_vec(vece, n, n, shr);
6995 tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
6996 tcg_gen_umin_vec(vece, d, n, t);
6997 tcg_temp_free_vec(t);
7000 static bool trans_UQSHRNB(DisasContext *s, arg_rri_esz *a)
7002 static const TCGOpcode vec_list[] = {
7003 INDEX_op_shri_vec, INDEX_op_umin_vec, 0
7005 static const GVecGen2i ops[3] = {
7006 { .fniv = gen_uqshrnb_vec,
7007 .opt_opc = vec_list,
7008 .fno = gen_helper_sve2_uqshrnb_h,
7009 .vece = MO_16 },
7010 { .fniv = gen_uqshrnb_vec,
7011 .opt_opc = vec_list,
7012 .fno = gen_helper_sve2_uqshrnb_s,
7013 .vece = MO_32 },
7014 { .fniv = gen_uqshrnb_vec,
7015 .opt_opc = vec_list,
7016 .fno = gen_helper_sve2_uqshrnb_d,
7017 .vece = MO_64 },
7019 return do_sve2_shr_narrow(s, a, ops);
7022 static void gen_uqshrnt_vec(unsigned vece, TCGv_vec d,
7023 TCGv_vec n, int64_t shr)
7025 TCGv_vec t = tcg_temp_new_vec_matching(d);
7026 int halfbits = 4 << vece;
7028 tcg_gen_shri_vec(vece, n, n, shr);
7029 tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
7030 tcg_gen_umin_vec(vece, n, n, t);
7031 tcg_gen_shli_vec(vece, n, n, halfbits);
7032 tcg_gen_bitsel_vec(vece, d, t, d, n);
7033 tcg_temp_free_vec(t);
7036 static bool trans_UQSHRNT(DisasContext *s, arg_rri_esz *a)
7038 static const TCGOpcode vec_list[] = {
7039 INDEX_op_shli_vec, INDEX_op_shri_vec, INDEX_op_umin_vec, 0
7041 static const GVecGen2i ops[3] = {
7042 { .fniv = gen_uqshrnt_vec,
7043 .opt_opc = vec_list,
7044 .load_dest = true,
7045 .fno = gen_helper_sve2_uqshrnt_h,
7046 .vece = MO_16 },
7047 { .fniv = gen_uqshrnt_vec,
7048 .opt_opc = vec_list,
7049 .load_dest = true,
7050 .fno = gen_helper_sve2_uqshrnt_s,
7051 .vece = MO_32 },
7052 { .fniv = gen_uqshrnt_vec,
7053 .opt_opc = vec_list,
7054 .load_dest = true,
7055 .fno = gen_helper_sve2_uqshrnt_d,
7056 .vece = MO_64 },
7058 return do_sve2_shr_narrow(s, a, ops);
7061 static bool trans_UQRSHRNB(DisasContext *s, arg_rri_esz *a)
7063 static const GVecGen2i ops[3] = {
7064 { .fno = gen_helper_sve2_uqrshrnb_h },
7065 { .fno = gen_helper_sve2_uqrshrnb_s },
7066 { .fno = gen_helper_sve2_uqrshrnb_d },
7068 return do_sve2_shr_narrow(s, a, ops);
7071 static bool trans_UQRSHRNT(DisasContext *s, arg_rri_esz *a)
7073 static const GVecGen2i ops[3] = {
7074 { .fno = gen_helper_sve2_uqrshrnt_h },
7075 { .fno = gen_helper_sve2_uqrshrnt_s },
7076 { .fno = gen_helper_sve2_uqrshrnt_d },
7078 return do_sve2_shr_narrow(s, a, ops);
7081 #define DO_SVE2_ZZZ_NARROW(NAME, name) \
7082 static gen_helper_gvec_3 * const name##_fns[4] = { \
7083 NULL, gen_helper_sve2_##name##_h, \
7084 gen_helper_sve2_##name##_s, gen_helper_sve2_##name##_d, \
7085 }; \
7086 TRANS_FEAT(NAME, aa64_sve2, gen_gvec_ool_arg_zzz, \
7087 name##_fns[a->esz], a, 0)
7089 DO_SVE2_ZZZ_NARROW(ADDHNB, addhnb)
7090 DO_SVE2_ZZZ_NARROW(ADDHNT, addhnt)
7091 DO_SVE2_ZZZ_NARROW(RADDHNB, raddhnb)
7092 DO_SVE2_ZZZ_NARROW(RADDHNT, raddhnt)
7094 DO_SVE2_ZZZ_NARROW(SUBHNB, subhnb)
7095 DO_SVE2_ZZZ_NARROW(SUBHNT, subhnt)
7096 DO_SVE2_ZZZ_NARROW(RSUBHNB, rsubhnb)
7097 DO_SVE2_ZZZ_NARROW(RSUBHNT, rsubhnt)
7099 static gen_helper_gvec_flags_4 * const match_fns[4] = {
7100 gen_helper_sve2_match_ppzz_b, gen_helper_sve2_match_ppzz_h, NULL, NULL
7102 TRANS_FEAT(MATCH, aa64_sve2, do_ppzz_flags, a, match_fns[a->esz])
7104 static gen_helper_gvec_flags_4 * const nmatch_fns[4] = {
7105 gen_helper_sve2_nmatch_ppzz_b, gen_helper_sve2_nmatch_ppzz_h, NULL, NULL
7107 TRANS_FEAT(NMATCH, aa64_sve2, do_ppzz_flags, a, nmatch_fns[a->esz])
7109 static gen_helper_gvec_4 * const histcnt_fns[4] = {
7110 NULL, NULL, gen_helper_sve2_histcnt_s, gen_helper_sve2_histcnt_d
7112 TRANS_FEAT(HISTCNT, aa64_sve2, gen_gvec_ool_arg_zpzz,
7113 histcnt_fns[a->esz], a, 0)
7115 TRANS_FEAT(HISTSEG, aa64_sve2, gen_gvec_ool_arg_zzz,
7116 a->esz == 0 ? gen_helper_sve2_histseg : NULL, a, 0)
7118 static bool do_sve2_zpzz_fp(DisasContext *s, arg_rprr_esz *a,
7119 gen_helper_gvec_4_ptr *fn)
7121 if (!dc_isar_feature(aa64_sve2, s)) {
7122 return false;
7124 return do_zpzz_fp(s, a, fn);
7127 #define DO_SVE2_ZPZZ_FP(NAME, name) \
7128 static bool trans_##NAME(DisasContext *s, arg_rprr_esz *a) \
7130 static gen_helper_gvec_4_ptr * const fns[4] = { \
7131 NULL, gen_helper_sve2_##name##_zpzz_h, \
7132 gen_helper_sve2_##name##_zpzz_s, gen_helper_sve2_##name##_zpzz_d \
7133 }; \
7134 return do_sve2_zpzz_fp(s, a, fns[a->esz]); \
7137 DO_SVE2_ZPZZ_FP(FADDP, faddp)
7138 DO_SVE2_ZPZZ_FP(FMAXNMP, fmaxnmp)
7139 DO_SVE2_ZPZZ_FP(FMINNMP, fminnmp)
7140 DO_SVE2_ZPZZ_FP(FMAXP, fmaxp)
7141 DO_SVE2_ZPZZ_FP(FMINP, fminp)
7144 * SVE Integer Multiply-Add (unpredicated)
7147 TRANS_FEAT(FMMLA_s, aa64_sve_f32mm, gen_gvec_fpst_zzzz, gen_helper_fmmla_s,
7148 a->rd, a->rn, a->rm, a->ra, 0, FPST_FPCR)
7149 TRANS_FEAT(FMMLA_d, aa64_sve_f64mm, gen_gvec_fpst_zzzz, gen_helper_fmmla_d,
7150 a->rd, a->rn, a->rm, a->ra, 0, FPST_FPCR)
7152 static gen_helper_gvec_4 * const sqdmlal_zzzw_fns[] = {
7153 NULL, gen_helper_sve2_sqdmlal_zzzw_h,
7154 gen_helper_sve2_sqdmlal_zzzw_s, gen_helper_sve2_sqdmlal_zzzw_d,
7156 TRANS_FEAT(SQDMLALB_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz,
7157 sqdmlal_zzzw_fns[a->esz], a, 0)
7158 TRANS_FEAT(SQDMLALT_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz,
7159 sqdmlal_zzzw_fns[a->esz], a, 3)
7160 TRANS_FEAT(SQDMLALBT, aa64_sve2, gen_gvec_ool_arg_zzzz,
7161 sqdmlal_zzzw_fns[a->esz], a, 2)
7163 static gen_helper_gvec_4 * const sqdmlsl_zzzw_fns[] = {
7164 NULL, gen_helper_sve2_sqdmlsl_zzzw_h,
7165 gen_helper_sve2_sqdmlsl_zzzw_s, gen_helper_sve2_sqdmlsl_zzzw_d,
7167 TRANS_FEAT(SQDMLSLB_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz,
7168 sqdmlsl_zzzw_fns[a->esz], a, 0)
7169 TRANS_FEAT(SQDMLSLT_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz,
7170 sqdmlsl_zzzw_fns[a->esz], a, 3)
7171 TRANS_FEAT(SQDMLSLBT, aa64_sve2, gen_gvec_ool_arg_zzzz,
7172 sqdmlsl_zzzw_fns[a->esz], a, 2)
7174 static gen_helper_gvec_4 * const sqrdmlah_fns[] = {
7175 gen_helper_sve2_sqrdmlah_b, gen_helper_sve2_sqrdmlah_h,
7176 gen_helper_sve2_sqrdmlah_s, gen_helper_sve2_sqrdmlah_d,
7178 TRANS_FEAT(SQRDMLAH_zzzz, aa64_sve2, gen_gvec_ool_arg_zzzz,
7179 sqrdmlah_fns[a->esz], a, 0)
7181 static gen_helper_gvec_4 * const sqrdmlsh_fns[] = {
7182 gen_helper_sve2_sqrdmlsh_b, gen_helper_sve2_sqrdmlsh_h,
7183 gen_helper_sve2_sqrdmlsh_s, gen_helper_sve2_sqrdmlsh_d,
7185 TRANS_FEAT(SQRDMLSH_zzzz, aa64_sve2, gen_gvec_ool_arg_zzzz,
7186 sqrdmlsh_fns[a->esz], a, 0)
7188 static gen_helper_gvec_4 * const smlal_zzzw_fns[] = {
7189 NULL, gen_helper_sve2_smlal_zzzw_h,
7190 gen_helper_sve2_smlal_zzzw_s, gen_helper_sve2_smlal_zzzw_d,
7192 TRANS_FEAT(SMLALB_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz,
7193 smlal_zzzw_fns[a->esz], a, 0)
7194 TRANS_FEAT(SMLALT_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz,
7195 smlal_zzzw_fns[a->esz], a, 1)
7197 static gen_helper_gvec_4 * const umlal_zzzw_fns[] = {
7198 NULL, gen_helper_sve2_umlal_zzzw_h,
7199 gen_helper_sve2_umlal_zzzw_s, gen_helper_sve2_umlal_zzzw_d,
7201 TRANS_FEAT(UMLALB_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz,
7202 umlal_zzzw_fns[a->esz], a, 0)
7203 TRANS_FEAT(UMLALT_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz,
7204 umlal_zzzw_fns[a->esz], a, 1)
7206 static gen_helper_gvec_4 * const smlsl_zzzw_fns[] = {
7207 NULL, gen_helper_sve2_smlsl_zzzw_h,
7208 gen_helper_sve2_smlsl_zzzw_s, gen_helper_sve2_smlsl_zzzw_d,
7210 TRANS_FEAT(SMLSLB_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz,
7211 smlsl_zzzw_fns[a->esz], a, 0)
7212 TRANS_FEAT(SMLSLT_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz,
7213 smlsl_zzzw_fns[a->esz], a, 1)
7215 static gen_helper_gvec_4 * const umlsl_zzzw_fns[] = {
7216 NULL, gen_helper_sve2_umlsl_zzzw_h,
7217 gen_helper_sve2_umlsl_zzzw_s, gen_helper_sve2_umlsl_zzzw_d,
7219 TRANS_FEAT(UMLSLB_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz,
7220 umlsl_zzzw_fns[a->esz], a, 0)
7221 TRANS_FEAT(UMLSLT_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz,
7222 umlsl_zzzw_fns[a->esz], a, 1)
7224 static gen_helper_gvec_4 * const cmla_fns[] = {
7225 gen_helper_sve2_cmla_zzzz_b, gen_helper_sve2_cmla_zzzz_h,
7226 gen_helper_sve2_cmla_zzzz_s, gen_helper_sve2_cmla_zzzz_d,
7228 TRANS_FEAT(CMLA_zzzz, aa64_sve2, gen_gvec_ool_zzzz,
7229 cmla_fns[a->esz], a->rd, a->rn, a->rm, a->ra, a->rot)
7231 static gen_helper_gvec_4 * const cdot_fns[] = {
7232 NULL, NULL, gen_helper_sve2_cdot_zzzz_s, gen_helper_sve2_cdot_zzzz_d
7234 TRANS_FEAT(CDOT_zzzz, aa64_sve2, gen_gvec_ool_zzzz,
7235 cdot_fns[a->esz], a->rd, a->rn, a->rm, a->ra, a->rot)
7237 static gen_helper_gvec_4 * const sqrdcmlah_fns[] = {
7238 gen_helper_sve2_sqrdcmlah_zzzz_b, gen_helper_sve2_sqrdcmlah_zzzz_h,
7239 gen_helper_sve2_sqrdcmlah_zzzz_s, gen_helper_sve2_sqrdcmlah_zzzz_d,
7241 TRANS_FEAT(SQRDCMLAH_zzzz, aa64_sve2, gen_gvec_ool_zzzz,
7242 sqrdcmlah_fns[a->esz], a->rd, a->rn, a->rm, a->ra, a->rot)
7244 TRANS_FEAT(USDOT_zzzz, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz,
7245 a->esz == 2 ? gen_helper_gvec_usdot_b : NULL, a, 0)
7247 TRANS_FEAT(AESMC, aa64_sve2_aes, gen_gvec_ool_zz,
7248 gen_helper_crypto_aesmc, a->rd, a->rd, a->decrypt)
7250 TRANS_FEAT(AESE, aa64_sve2_aes, gen_gvec_ool_arg_zzz,
7251 gen_helper_crypto_aese, a, false)
7252 TRANS_FEAT(AESD, aa64_sve2_aes, gen_gvec_ool_arg_zzz,
7253 gen_helper_crypto_aese, a, true)
7255 TRANS_FEAT(SM4E, aa64_sve2_sm4, gen_gvec_ool_arg_zzz,
7256 gen_helper_crypto_sm4e, a, 0)
7257 TRANS_FEAT(SM4EKEY, aa64_sve2_sm4, gen_gvec_ool_arg_zzz,
7258 gen_helper_crypto_sm4ekey, a, 0)
7260 TRANS_FEAT(RAX1, aa64_sve2_sha3, gen_gvec_fn_arg_zzz, gen_gvec_rax1, a)
7262 TRANS_FEAT(FCVTNT_sh, aa64_sve2, gen_gvec_fpst_arg_zpz,
7263 gen_helper_sve2_fcvtnt_sh, a, 0, FPST_FPCR)
7264 TRANS_FEAT(FCVTNT_ds, aa64_sve2, gen_gvec_fpst_arg_zpz,
7265 gen_helper_sve2_fcvtnt_ds, a, 0, FPST_FPCR)
7267 TRANS_FEAT(BFCVTNT, aa64_sve_bf16, gen_gvec_fpst_arg_zpz,
7268 gen_helper_sve_bfcvtnt, a, 0, FPST_FPCR)
7270 TRANS_FEAT(FCVTLT_hs, aa64_sve2, gen_gvec_fpst_arg_zpz,
7271 gen_helper_sve2_fcvtlt_hs, a, 0, FPST_FPCR)
7272 TRANS_FEAT(FCVTLT_sd, aa64_sve2, gen_gvec_fpst_arg_zpz,
7273 gen_helper_sve2_fcvtlt_sd, a, 0, FPST_FPCR)
7275 TRANS_FEAT(FCVTX_ds, aa64_sve2, do_frint_mode, a,
7276 float_round_to_odd, gen_helper_sve_fcvt_ds)
7277 TRANS_FEAT(FCVTXNT_ds, aa64_sve2, do_frint_mode, a,
7278 float_round_to_odd, gen_helper_sve2_fcvtnt_ds)
7280 static gen_helper_gvec_3_ptr * const flogb_fns[] = {
7281 NULL, gen_helper_flogb_h,
7282 gen_helper_flogb_s, gen_helper_flogb_d
7284 TRANS_FEAT(FLOGB, aa64_sve2, gen_gvec_fpst_arg_zpz, flogb_fns[a->esz],
7285 a, 0, a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR)
7287 static bool do_FMLAL_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sub, bool sel)
7289 if (!dc_isar_feature(aa64_sve2, s)) {
7290 return false;
7292 return gen_gvec_ptr_zzzz(s, gen_helper_sve2_fmlal_zzzw_s,
7293 a->rd, a->rn, a->rm, a->ra,
7294 (sel << 1) | sub, cpu_env);
7297 static bool trans_FMLALB_zzzw(DisasContext *s, arg_rrrr_esz *a)
7299 return do_FMLAL_zzzw(s, a, false, false);
7302 static bool trans_FMLALT_zzzw(DisasContext *s, arg_rrrr_esz *a)
7304 return do_FMLAL_zzzw(s, a, false, true);
7307 static bool trans_FMLSLB_zzzw(DisasContext *s, arg_rrrr_esz *a)
7309 return do_FMLAL_zzzw(s, a, true, false);
7312 static bool trans_FMLSLT_zzzw(DisasContext *s, arg_rrrr_esz *a)
7314 return do_FMLAL_zzzw(s, a, true, true);
7317 static bool do_FMLAL_zzxw(DisasContext *s, arg_rrxr_esz *a, bool sub, bool sel)
7319 if (!dc_isar_feature(aa64_sve2, s)) {
7320 return false;
7322 return gen_gvec_ptr_zzzz(s, gen_helper_sve2_fmlal_zzxw_s,
7323 a->rd, a->rn, a->rm, a->ra,
7324 (a->index << 2) | (sel << 1) | sub, cpu_env);
7327 static bool trans_FMLALB_zzxw(DisasContext *s, arg_rrxr_esz *a)
7329 return do_FMLAL_zzxw(s, a, false, false);
7332 static bool trans_FMLALT_zzxw(DisasContext *s, arg_rrxr_esz *a)
7334 return do_FMLAL_zzxw(s, a, false, true);
7337 static bool trans_FMLSLB_zzxw(DisasContext *s, arg_rrxr_esz *a)
7339 return do_FMLAL_zzxw(s, a, true, false);
7342 static bool trans_FMLSLT_zzxw(DisasContext *s, arg_rrxr_esz *a)
7344 return do_FMLAL_zzxw(s, a, true, true);
7347 TRANS_FEAT(SMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz,
7348 gen_helper_gvec_smmla_b, a, 0)
7349 TRANS_FEAT(USMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz,
7350 gen_helper_gvec_usmmla_b, a, 0)
7351 TRANS_FEAT(UMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz,
7352 gen_helper_gvec_ummla_b, a, 0)
7354 TRANS_FEAT(BFDOT_zzzz, aa64_sve_bf16, gen_gvec_ool_arg_zzzz,
7355 gen_helper_gvec_bfdot, a, 0)
7356 TRANS_FEAT(BFDOT_zzxz, aa64_sve_bf16, gen_gvec_ool_arg_zzxz,
7357 gen_helper_gvec_bfdot_idx, a)
7359 TRANS_FEAT(BFMMLA, aa64_sve_bf16, gen_gvec_ool_arg_zzzz,
7360 gen_helper_gvec_bfmmla, a, 0)
7362 static bool do_BFMLAL_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sel)
7364 return gen_gvec_fpst_zzzz(s, gen_helper_gvec_bfmlal,
7365 a->rd, a->rn, a->rm, a->ra, sel, FPST_FPCR);
7368 TRANS_FEAT(BFMLALB_zzzw, aa64_sve_bf16, do_BFMLAL_zzzw, a, false)
7369 TRANS_FEAT(BFMLALT_zzzw, aa64_sve_bf16, do_BFMLAL_zzzw, a, true)
7371 static bool do_BFMLAL_zzxw(DisasContext *s, arg_rrxr_esz *a, bool sel)
7373 return gen_gvec_fpst_zzzz(s, gen_helper_gvec_bfmlal_idx,
7374 a->rd, a->rn, a->rm, a->ra,
7375 (a->index << 1) | sel, FPST_FPCR);
7378 TRANS_FEAT(BFMLALB_zzxw, aa64_sve_bf16, do_BFMLAL_zzxw, a, false)
7379 TRANS_FEAT(BFMLALT_zzxw, aa64_sve_bf16, do_BFMLAL_zzxw, a, true)