tests/tcg/loongarch64: Add fp comparison instructions test
[qemu/rayw.git] / target / arm / translate-sve.c
blob41f8b12259e0b81b6a9c3689839acd24e0675ffb
1 /*
2 * AArch64 SVE translation
4 * Copyright (c) 2018 Linaro, Ltd
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/exec-all.h"
23 #include "tcg/tcg-op.h"
24 #include "tcg/tcg-op-gvec.h"
25 #include "tcg/tcg-gvec-desc.h"
26 #include "qemu/log.h"
27 #include "arm_ldst.h"
28 #include "translate.h"
29 #include "internals.h"
30 #include "exec/helper-proto.h"
31 #include "exec/helper-gen.h"
32 #include "exec/log.h"
33 #include "translate-a64.h"
34 #include "fpu/softfloat.h"
37 typedef void GVecGen2sFn(unsigned, uint32_t, uint32_t,
38 TCGv_i64, uint32_t, uint32_t);
40 typedef void gen_helper_gvec_flags_3(TCGv_i32, TCGv_ptr, TCGv_ptr,
41 TCGv_ptr, TCGv_i32);
42 typedef void gen_helper_gvec_flags_4(TCGv_i32, TCGv_ptr, TCGv_ptr,
43 TCGv_ptr, TCGv_ptr, TCGv_i32);
45 typedef void gen_helper_gvec_mem(TCGv_env, TCGv_ptr, TCGv_i64, TCGv_i32);
46 typedef void gen_helper_gvec_mem_scatter(TCGv_env, TCGv_ptr, TCGv_ptr,
47 TCGv_ptr, TCGv_i64, TCGv_i32);
50 * Helpers for extracting complex instruction fields.
53 /* See e.g. ASR (immediate, predicated).
54 * Returns -1 for unallocated encoding; diagnose later.
56 static int tszimm_esz(DisasContext *s, int x)
58 x >>= 3; /* discard imm3 */
59 return 31 - clz32(x);
62 static int tszimm_shr(DisasContext *s, int x)
64 return (16 << tszimm_esz(s, x)) - x;
67 /* See e.g. LSL (immediate, predicated). */
68 static int tszimm_shl(DisasContext *s, int x)
70 return x - (8 << tszimm_esz(s, x));
73 /* The SH bit is in bit 8. Extract the low 8 and shift. */
74 static inline int expand_imm_sh8s(DisasContext *s, int x)
76 return (int8_t)x << (x & 0x100 ? 8 : 0);
79 static inline int expand_imm_sh8u(DisasContext *s, int x)
81 return (uint8_t)x << (x & 0x100 ? 8 : 0);
84 /* Convert a 2-bit memory size (msz) to a 4-bit data type (dtype)
85 * with unsigned data. C.f. SVE Memory Contiguous Load Group.
87 static inline int msz_dtype(DisasContext *s, int msz)
89 static const uint8_t dtype[4] = { 0, 5, 10, 15 };
90 return dtype[msz];
94 * Include the generated decoder.
97 #include "decode-sve.c.inc"
100 * Implement all of the translator functions referenced by the decoder.
103 /* Invoke an out-of-line helper on 2 Zregs. */
104 static bool gen_gvec_ool_zz(DisasContext *s, gen_helper_gvec_2 *fn,
105 int rd, int rn, int data)
107 if (fn == NULL) {
108 return false;
110 if (sve_access_check(s)) {
111 unsigned vsz = vec_full_reg_size(s);
112 tcg_gen_gvec_2_ool(vec_full_reg_offset(s, rd),
113 vec_full_reg_offset(s, rn),
114 vsz, vsz, data, fn);
116 return true;
119 static bool gen_gvec_fpst_zz(DisasContext *s, gen_helper_gvec_2_ptr *fn,
120 int rd, int rn, int data,
121 ARMFPStatusFlavour flavour)
123 if (fn == NULL) {
124 return false;
126 if (sve_access_check(s)) {
127 unsigned vsz = vec_full_reg_size(s);
128 TCGv_ptr status = fpstatus_ptr(flavour);
130 tcg_gen_gvec_2_ptr(vec_full_reg_offset(s, rd),
131 vec_full_reg_offset(s, rn),
132 status, vsz, vsz, data, fn);
133 tcg_temp_free_ptr(status);
135 return true;
138 static bool gen_gvec_fpst_arg_zz(DisasContext *s, gen_helper_gvec_2_ptr *fn,
139 arg_rr_esz *a, int data)
141 return gen_gvec_fpst_zz(s, fn, a->rd, a->rn, data,
142 a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
145 /* Invoke an out-of-line helper on 3 Zregs. */
146 static bool gen_gvec_ool_zzz(DisasContext *s, gen_helper_gvec_3 *fn,
147 int rd, int rn, int rm, int data)
149 if (fn == NULL) {
150 return false;
152 if (sve_access_check(s)) {
153 unsigned vsz = vec_full_reg_size(s);
154 tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd),
155 vec_full_reg_offset(s, rn),
156 vec_full_reg_offset(s, rm),
157 vsz, vsz, data, fn);
159 return true;
162 static bool gen_gvec_ool_arg_zzz(DisasContext *s, gen_helper_gvec_3 *fn,
163 arg_rrr_esz *a, int data)
165 return gen_gvec_ool_zzz(s, fn, a->rd, a->rn, a->rm, data);
168 /* Invoke an out-of-line helper on 3 Zregs, plus float_status. */
169 static bool gen_gvec_fpst_zzz(DisasContext *s, gen_helper_gvec_3_ptr *fn,
170 int rd, int rn, int rm,
171 int data, ARMFPStatusFlavour flavour)
173 if (fn == NULL) {
174 return false;
176 if (sve_access_check(s)) {
177 unsigned vsz = vec_full_reg_size(s);
178 TCGv_ptr status = fpstatus_ptr(flavour);
180 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
181 vec_full_reg_offset(s, rn),
182 vec_full_reg_offset(s, rm),
183 status, vsz, vsz, data, fn);
185 tcg_temp_free_ptr(status);
187 return true;
190 static bool gen_gvec_fpst_arg_zzz(DisasContext *s, gen_helper_gvec_3_ptr *fn,
191 arg_rrr_esz *a, int data)
193 return gen_gvec_fpst_zzz(s, fn, a->rd, a->rn, a->rm, data,
194 a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
197 /* Invoke an out-of-line helper on 4 Zregs. */
198 static bool gen_gvec_ool_zzzz(DisasContext *s, gen_helper_gvec_4 *fn,
199 int rd, int rn, int rm, int ra, int data)
201 if (fn == NULL) {
202 return false;
204 if (sve_access_check(s)) {
205 unsigned vsz = vec_full_reg_size(s);
206 tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd),
207 vec_full_reg_offset(s, rn),
208 vec_full_reg_offset(s, rm),
209 vec_full_reg_offset(s, ra),
210 vsz, vsz, data, fn);
212 return true;
215 static bool gen_gvec_ool_arg_zzzz(DisasContext *s, gen_helper_gvec_4 *fn,
216 arg_rrrr_esz *a, int data)
218 return gen_gvec_ool_zzzz(s, fn, a->rd, a->rn, a->rm, a->ra, data);
221 static bool gen_gvec_ool_arg_zzxz(DisasContext *s, gen_helper_gvec_4 *fn,
222 arg_rrxr_esz *a)
224 return gen_gvec_ool_zzzz(s, fn, a->rd, a->rn, a->rm, a->ra, a->index);
227 /* Invoke an out-of-line helper on 4 Zregs, plus a pointer. */
228 static bool gen_gvec_ptr_zzzz(DisasContext *s, gen_helper_gvec_4_ptr *fn,
229 int rd, int rn, int rm, int ra,
230 int data, TCGv_ptr ptr)
232 if (fn == NULL) {
233 return false;
235 if (sve_access_check(s)) {
236 unsigned vsz = vec_full_reg_size(s);
237 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, rd),
238 vec_full_reg_offset(s, rn),
239 vec_full_reg_offset(s, rm),
240 vec_full_reg_offset(s, ra),
241 ptr, vsz, vsz, data, fn);
243 return true;
246 static bool gen_gvec_fpst_zzzz(DisasContext *s, gen_helper_gvec_4_ptr *fn,
247 int rd, int rn, int rm, int ra,
248 int data, ARMFPStatusFlavour flavour)
250 TCGv_ptr status = fpstatus_ptr(flavour);
251 bool ret = gen_gvec_ptr_zzzz(s, fn, rd, rn, rm, ra, data, status);
252 tcg_temp_free_ptr(status);
253 return ret;
256 /* Invoke an out-of-line helper on 4 Zregs, 1 Preg, plus fpst. */
257 static bool gen_gvec_fpst_zzzzp(DisasContext *s, gen_helper_gvec_5_ptr *fn,
258 int rd, int rn, int rm, int ra, int pg,
259 int data, ARMFPStatusFlavour flavour)
261 if (fn == NULL) {
262 return false;
264 if (sve_access_check(s)) {
265 unsigned vsz = vec_full_reg_size(s);
266 TCGv_ptr status = fpstatus_ptr(flavour);
268 tcg_gen_gvec_5_ptr(vec_full_reg_offset(s, rd),
269 vec_full_reg_offset(s, rn),
270 vec_full_reg_offset(s, rm),
271 vec_full_reg_offset(s, ra),
272 pred_full_reg_offset(s, pg),
273 status, vsz, vsz, data, fn);
275 tcg_temp_free_ptr(status);
277 return true;
280 /* Invoke an out-of-line helper on 2 Zregs and a predicate. */
281 static bool gen_gvec_ool_zzp(DisasContext *s, gen_helper_gvec_3 *fn,
282 int rd, int rn, int pg, int data)
284 if (fn == NULL) {
285 return false;
287 if (sve_access_check(s)) {
288 unsigned vsz = vec_full_reg_size(s);
289 tcg_gen_gvec_3_ool(vec_full_reg_offset(s, rd),
290 vec_full_reg_offset(s, rn),
291 pred_full_reg_offset(s, pg),
292 vsz, vsz, data, fn);
294 return true;
297 static bool gen_gvec_ool_arg_zpz(DisasContext *s, gen_helper_gvec_3 *fn,
298 arg_rpr_esz *a, int data)
300 return gen_gvec_ool_zzp(s, fn, a->rd, a->rn, a->pg, data);
303 static bool gen_gvec_ool_arg_zpzi(DisasContext *s, gen_helper_gvec_3 *fn,
304 arg_rpri_esz *a)
306 return gen_gvec_ool_zzp(s, fn, a->rd, a->rn, a->pg, a->imm);
309 static bool gen_gvec_fpst_zzp(DisasContext *s, gen_helper_gvec_3_ptr *fn,
310 int rd, int rn, int pg, int data,
311 ARMFPStatusFlavour flavour)
313 if (fn == NULL) {
314 return false;
316 if (sve_access_check(s)) {
317 unsigned vsz = vec_full_reg_size(s);
318 TCGv_ptr status = fpstatus_ptr(flavour);
320 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, rd),
321 vec_full_reg_offset(s, rn),
322 pred_full_reg_offset(s, pg),
323 status, vsz, vsz, data, fn);
324 tcg_temp_free_ptr(status);
326 return true;
329 static bool gen_gvec_fpst_arg_zpz(DisasContext *s, gen_helper_gvec_3_ptr *fn,
330 arg_rpr_esz *a, int data,
331 ARMFPStatusFlavour flavour)
333 return gen_gvec_fpst_zzp(s, fn, a->rd, a->rn, a->pg, data, flavour);
336 /* Invoke an out-of-line helper on 3 Zregs and a predicate. */
337 static bool gen_gvec_ool_zzzp(DisasContext *s, gen_helper_gvec_4 *fn,
338 int rd, int rn, int rm, int pg, int data)
340 if (fn == NULL) {
341 return false;
343 if (sve_access_check(s)) {
344 unsigned vsz = vec_full_reg_size(s);
345 tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd),
346 vec_full_reg_offset(s, rn),
347 vec_full_reg_offset(s, rm),
348 pred_full_reg_offset(s, pg),
349 vsz, vsz, data, fn);
351 return true;
354 static bool gen_gvec_ool_arg_zpzz(DisasContext *s, gen_helper_gvec_4 *fn,
355 arg_rprr_esz *a, int data)
357 return gen_gvec_ool_zzzp(s, fn, a->rd, a->rn, a->rm, a->pg, data);
360 /* Invoke an out-of-line helper on 3 Zregs and a predicate. */
361 static bool gen_gvec_fpst_zzzp(DisasContext *s, gen_helper_gvec_4_ptr *fn,
362 int rd, int rn, int rm, int pg, int data,
363 ARMFPStatusFlavour flavour)
365 if (fn == NULL) {
366 return false;
368 if (sve_access_check(s)) {
369 unsigned vsz = vec_full_reg_size(s);
370 TCGv_ptr status = fpstatus_ptr(flavour);
372 tcg_gen_gvec_4_ptr(vec_full_reg_offset(s, rd),
373 vec_full_reg_offset(s, rn),
374 vec_full_reg_offset(s, rm),
375 pred_full_reg_offset(s, pg),
376 status, vsz, vsz, data, fn);
377 tcg_temp_free_ptr(status);
379 return true;
382 static bool gen_gvec_fpst_arg_zpzz(DisasContext *s, gen_helper_gvec_4_ptr *fn,
383 arg_rprr_esz *a)
385 return gen_gvec_fpst_zzzp(s, fn, a->rd, a->rn, a->rm, a->pg, 0,
386 a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
389 /* Invoke a vector expander on two Zregs and an immediate. */
390 static bool gen_gvec_fn_zzi(DisasContext *s, GVecGen2iFn *gvec_fn,
391 int esz, int rd, int rn, uint64_t imm)
393 if (gvec_fn == NULL) {
394 return false;
396 if (sve_access_check(s)) {
397 unsigned vsz = vec_full_reg_size(s);
398 gvec_fn(esz, vec_full_reg_offset(s, rd),
399 vec_full_reg_offset(s, rn), imm, vsz, vsz);
401 return true;
404 static bool gen_gvec_fn_arg_zzi(DisasContext *s, GVecGen2iFn *gvec_fn,
405 arg_rri_esz *a)
407 if (a->esz < 0) {
408 /* Invalid tsz encoding -- see tszimm_esz. */
409 return false;
411 return gen_gvec_fn_zzi(s, gvec_fn, a->esz, a->rd, a->rn, a->imm);
414 /* Invoke a vector expander on three Zregs. */
415 static bool gen_gvec_fn_zzz(DisasContext *s, GVecGen3Fn *gvec_fn,
416 int esz, int rd, int rn, int rm)
418 if (gvec_fn == NULL) {
419 return false;
421 if (sve_access_check(s)) {
422 unsigned vsz = vec_full_reg_size(s);
423 gvec_fn(esz, vec_full_reg_offset(s, rd),
424 vec_full_reg_offset(s, rn),
425 vec_full_reg_offset(s, rm), vsz, vsz);
427 return true;
430 static bool gen_gvec_fn_arg_zzz(DisasContext *s, GVecGen3Fn *fn,
431 arg_rrr_esz *a)
433 return gen_gvec_fn_zzz(s, fn, a->esz, a->rd, a->rn, a->rm);
436 /* Invoke a vector expander on four Zregs. */
437 static bool gen_gvec_fn_arg_zzzz(DisasContext *s, GVecGen4Fn *gvec_fn,
438 arg_rrrr_esz *a)
440 if (gvec_fn == NULL) {
441 return false;
443 if (sve_access_check(s)) {
444 unsigned vsz = vec_full_reg_size(s);
445 gvec_fn(a->esz, vec_full_reg_offset(s, a->rd),
446 vec_full_reg_offset(s, a->rn),
447 vec_full_reg_offset(s, a->rm),
448 vec_full_reg_offset(s, a->ra), vsz, vsz);
450 return true;
453 /* Invoke a vector move on two Zregs. */
454 static bool do_mov_z(DisasContext *s, int rd, int rn)
456 if (sve_access_check(s)) {
457 unsigned vsz = vec_full_reg_size(s);
458 tcg_gen_gvec_mov(MO_8, vec_full_reg_offset(s, rd),
459 vec_full_reg_offset(s, rn), vsz, vsz);
461 return true;
464 /* Initialize a Zreg with replications of a 64-bit immediate. */
465 static void do_dupi_z(DisasContext *s, int rd, uint64_t word)
467 unsigned vsz = vec_full_reg_size(s);
468 tcg_gen_gvec_dup_imm(MO_64, vec_full_reg_offset(s, rd), vsz, vsz, word);
471 /* Invoke a vector expander on three Pregs. */
472 static bool gen_gvec_fn_ppp(DisasContext *s, GVecGen3Fn *gvec_fn,
473 int rd, int rn, int rm)
475 if (sve_access_check(s)) {
476 unsigned psz = pred_gvec_reg_size(s);
477 gvec_fn(MO_64, pred_full_reg_offset(s, rd),
478 pred_full_reg_offset(s, rn),
479 pred_full_reg_offset(s, rm), psz, psz);
481 return true;
484 /* Invoke a vector move on two Pregs. */
485 static bool do_mov_p(DisasContext *s, int rd, int rn)
487 if (sve_access_check(s)) {
488 unsigned psz = pred_gvec_reg_size(s);
489 tcg_gen_gvec_mov(MO_8, pred_full_reg_offset(s, rd),
490 pred_full_reg_offset(s, rn), psz, psz);
492 return true;
495 /* Set the cpu flags as per a return from an SVE helper. */
496 static void do_pred_flags(TCGv_i32 t)
498 tcg_gen_mov_i32(cpu_NF, t);
499 tcg_gen_andi_i32(cpu_ZF, t, 2);
500 tcg_gen_andi_i32(cpu_CF, t, 1);
501 tcg_gen_movi_i32(cpu_VF, 0);
504 /* Subroutines computing the ARM PredTest psuedofunction. */
505 static void do_predtest1(TCGv_i64 d, TCGv_i64 g)
507 TCGv_i32 t = tcg_temp_new_i32();
509 gen_helper_sve_predtest1(t, d, g);
510 do_pred_flags(t);
511 tcg_temp_free_i32(t);
514 static void do_predtest(DisasContext *s, int dofs, int gofs, int words)
516 TCGv_ptr dptr = tcg_temp_new_ptr();
517 TCGv_ptr gptr = tcg_temp_new_ptr();
518 TCGv_i32 t = tcg_temp_new_i32();
520 tcg_gen_addi_ptr(dptr, cpu_env, dofs);
521 tcg_gen_addi_ptr(gptr, cpu_env, gofs);
523 gen_helper_sve_predtest(t, dptr, gptr, tcg_constant_i32(words));
524 tcg_temp_free_ptr(dptr);
525 tcg_temp_free_ptr(gptr);
527 do_pred_flags(t);
528 tcg_temp_free_i32(t);
531 /* For each element size, the bits within a predicate word that are active. */
532 const uint64_t pred_esz_masks[4] = {
533 0xffffffffffffffffull, 0x5555555555555555ull,
534 0x1111111111111111ull, 0x0101010101010101ull
537 static bool trans_INVALID(DisasContext *s, arg_INVALID *a)
539 unallocated_encoding(s);
540 return true;
544 *** SVE Logical - Unpredicated Group
547 TRANS_FEAT(AND_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_and, a)
548 TRANS_FEAT(ORR_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_or, a)
549 TRANS_FEAT(EOR_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_xor, a)
550 TRANS_FEAT(BIC_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_andc, a)
552 static void gen_xar8_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, int64_t sh)
554 TCGv_i64 t = tcg_temp_new_i64();
555 uint64_t mask = dup_const(MO_8, 0xff >> sh);
557 tcg_gen_xor_i64(t, n, m);
558 tcg_gen_shri_i64(d, t, sh);
559 tcg_gen_shli_i64(t, t, 8 - sh);
560 tcg_gen_andi_i64(d, d, mask);
561 tcg_gen_andi_i64(t, t, ~mask);
562 tcg_gen_or_i64(d, d, t);
563 tcg_temp_free_i64(t);
566 static void gen_xar16_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, int64_t sh)
568 TCGv_i64 t = tcg_temp_new_i64();
569 uint64_t mask = dup_const(MO_16, 0xffff >> sh);
571 tcg_gen_xor_i64(t, n, m);
572 tcg_gen_shri_i64(d, t, sh);
573 tcg_gen_shli_i64(t, t, 16 - sh);
574 tcg_gen_andi_i64(d, d, mask);
575 tcg_gen_andi_i64(t, t, ~mask);
576 tcg_gen_or_i64(d, d, t);
577 tcg_temp_free_i64(t);
580 static void gen_xar_i32(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, int32_t sh)
582 tcg_gen_xor_i32(d, n, m);
583 tcg_gen_rotri_i32(d, d, sh);
586 static void gen_xar_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, int64_t sh)
588 tcg_gen_xor_i64(d, n, m);
589 tcg_gen_rotri_i64(d, d, sh);
592 static void gen_xar_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
593 TCGv_vec m, int64_t sh)
595 tcg_gen_xor_vec(vece, d, n, m);
596 tcg_gen_rotri_vec(vece, d, d, sh);
599 void gen_gvec_xar(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
600 uint32_t rm_ofs, int64_t shift,
601 uint32_t opr_sz, uint32_t max_sz)
603 static const TCGOpcode vecop[] = { INDEX_op_rotli_vec, 0 };
604 static const GVecGen3i ops[4] = {
605 { .fni8 = gen_xar8_i64,
606 .fniv = gen_xar_vec,
607 .fno = gen_helper_sve2_xar_b,
608 .opt_opc = vecop,
609 .vece = MO_8 },
610 { .fni8 = gen_xar16_i64,
611 .fniv = gen_xar_vec,
612 .fno = gen_helper_sve2_xar_h,
613 .opt_opc = vecop,
614 .vece = MO_16 },
615 { .fni4 = gen_xar_i32,
616 .fniv = gen_xar_vec,
617 .fno = gen_helper_sve2_xar_s,
618 .opt_opc = vecop,
619 .vece = MO_32 },
620 { .fni8 = gen_xar_i64,
621 .fniv = gen_xar_vec,
622 .fno = gen_helper_gvec_xar_d,
623 .opt_opc = vecop,
624 .vece = MO_64 }
626 int esize = 8 << vece;
628 /* The SVE2 range is 1 .. esize; the AdvSIMD range is 0 .. esize-1. */
629 tcg_debug_assert(shift >= 0);
630 tcg_debug_assert(shift <= esize);
631 shift &= esize - 1;
633 if (shift == 0) {
634 /* xar with no rotate devolves to xor. */
635 tcg_gen_gvec_xor(vece, rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz);
636 } else {
637 tcg_gen_gvec_3i(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz,
638 shift, &ops[vece]);
642 static bool trans_XAR(DisasContext *s, arg_rrri_esz *a)
644 if (a->esz < 0 || !dc_isar_feature(aa64_sve2, s)) {
645 return false;
647 if (sve_access_check(s)) {
648 unsigned vsz = vec_full_reg_size(s);
649 gen_gvec_xar(a->esz, vec_full_reg_offset(s, a->rd),
650 vec_full_reg_offset(s, a->rn),
651 vec_full_reg_offset(s, a->rm), a->imm, vsz, vsz);
653 return true;
656 static void gen_eor3_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k)
658 tcg_gen_xor_i64(d, n, m);
659 tcg_gen_xor_i64(d, d, k);
662 static void gen_eor3_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
663 TCGv_vec m, TCGv_vec k)
665 tcg_gen_xor_vec(vece, d, n, m);
666 tcg_gen_xor_vec(vece, d, d, k);
669 static void gen_eor3(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
670 uint32_t a, uint32_t oprsz, uint32_t maxsz)
672 static const GVecGen4 op = {
673 .fni8 = gen_eor3_i64,
674 .fniv = gen_eor3_vec,
675 .fno = gen_helper_sve2_eor3,
676 .vece = MO_64,
677 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
679 tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op);
682 TRANS_FEAT(EOR3, aa64_sve2, gen_gvec_fn_arg_zzzz, gen_eor3, a)
684 static void gen_bcax_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k)
686 tcg_gen_andc_i64(d, m, k);
687 tcg_gen_xor_i64(d, d, n);
690 static void gen_bcax_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
691 TCGv_vec m, TCGv_vec k)
693 tcg_gen_andc_vec(vece, d, m, k);
694 tcg_gen_xor_vec(vece, d, d, n);
697 static void gen_bcax(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
698 uint32_t a, uint32_t oprsz, uint32_t maxsz)
700 static const GVecGen4 op = {
701 .fni8 = gen_bcax_i64,
702 .fniv = gen_bcax_vec,
703 .fno = gen_helper_sve2_bcax,
704 .vece = MO_64,
705 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
707 tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op);
710 TRANS_FEAT(BCAX, aa64_sve2, gen_gvec_fn_arg_zzzz, gen_bcax, a)
712 static void gen_bsl(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
713 uint32_t a, uint32_t oprsz, uint32_t maxsz)
715 /* BSL differs from the generic bitsel in argument ordering. */
716 tcg_gen_gvec_bitsel(vece, d, a, n, m, oprsz, maxsz);
719 TRANS_FEAT(BSL, aa64_sve2, gen_gvec_fn_arg_zzzz, gen_bsl, a)
721 static void gen_bsl1n_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k)
723 tcg_gen_andc_i64(n, k, n);
724 tcg_gen_andc_i64(m, m, k);
725 tcg_gen_or_i64(d, n, m);
728 static void gen_bsl1n_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
729 TCGv_vec m, TCGv_vec k)
731 if (TCG_TARGET_HAS_bitsel_vec) {
732 tcg_gen_not_vec(vece, n, n);
733 tcg_gen_bitsel_vec(vece, d, k, n, m);
734 } else {
735 tcg_gen_andc_vec(vece, n, k, n);
736 tcg_gen_andc_vec(vece, m, m, k);
737 tcg_gen_or_vec(vece, d, n, m);
741 static void gen_bsl1n(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
742 uint32_t a, uint32_t oprsz, uint32_t maxsz)
744 static const GVecGen4 op = {
745 .fni8 = gen_bsl1n_i64,
746 .fniv = gen_bsl1n_vec,
747 .fno = gen_helper_sve2_bsl1n,
748 .vece = MO_64,
749 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
751 tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op);
754 TRANS_FEAT(BSL1N, aa64_sve2, gen_gvec_fn_arg_zzzz, gen_bsl1n, a)
756 static void gen_bsl2n_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k)
759 * Z[dn] = (n & k) | (~m & ~k)
760 * = | ~(m | k)
762 tcg_gen_and_i64(n, n, k);
763 if (TCG_TARGET_HAS_orc_i64) {
764 tcg_gen_or_i64(m, m, k);
765 tcg_gen_orc_i64(d, n, m);
766 } else {
767 tcg_gen_nor_i64(m, m, k);
768 tcg_gen_or_i64(d, n, m);
772 static void gen_bsl2n_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
773 TCGv_vec m, TCGv_vec k)
775 if (TCG_TARGET_HAS_bitsel_vec) {
776 tcg_gen_not_vec(vece, m, m);
777 tcg_gen_bitsel_vec(vece, d, k, n, m);
778 } else {
779 tcg_gen_and_vec(vece, n, n, k);
780 tcg_gen_or_vec(vece, m, m, k);
781 tcg_gen_orc_vec(vece, d, n, m);
785 static void gen_bsl2n(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
786 uint32_t a, uint32_t oprsz, uint32_t maxsz)
788 static const GVecGen4 op = {
789 .fni8 = gen_bsl2n_i64,
790 .fniv = gen_bsl2n_vec,
791 .fno = gen_helper_sve2_bsl2n,
792 .vece = MO_64,
793 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
795 tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op);
798 TRANS_FEAT(BSL2N, aa64_sve2, gen_gvec_fn_arg_zzzz, gen_bsl2n, a)
800 static void gen_nbsl_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 k)
802 tcg_gen_and_i64(n, n, k);
803 tcg_gen_andc_i64(m, m, k);
804 tcg_gen_nor_i64(d, n, m);
807 static void gen_nbsl_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
808 TCGv_vec m, TCGv_vec k)
810 tcg_gen_bitsel_vec(vece, d, k, n, m);
811 tcg_gen_not_vec(vece, d, d);
814 static void gen_nbsl(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
815 uint32_t a, uint32_t oprsz, uint32_t maxsz)
817 static const GVecGen4 op = {
818 .fni8 = gen_nbsl_i64,
819 .fniv = gen_nbsl_vec,
820 .fno = gen_helper_sve2_nbsl,
821 .vece = MO_64,
822 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
824 tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &op);
827 TRANS_FEAT(NBSL, aa64_sve2, gen_gvec_fn_arg_zzzz, gen_nbsl, a)
830 *** SVE Integer Arithmetic - Unpredicated Group
833 TRANS_FEAT(ADD_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_add, a)
834 TRANS_FEAT(SUB_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_sub, a)
835 TRANS_FEAT(SQADD_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_ssadd, a)
836 TRANS_FEAT(SQSUB_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_sssub, a)
837 TRANS_FEAT(UQADD_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_usadd, a)
838 TRANS_FEAT(UQSUB_zzz, aa64_sve, gen_gvec_fn_arg_zzz, tcg_gen_gvec_ussub, a)
841 *** SVE Integer Arithmetic - Binary Predicated Group
844 /* Select active elememnts from Zn and inactive elements from Zm,
845 * storing the result in Zd.
847 static bool do_sel_z(DisasContext *s, int rd, int rn, int rm, int pg, int esz)
849 static gen_helper_gvec_4 * const fns[4] = {
850 gen_helper_sve_sel_zpzz_b, gen_helper_sve_sel_zpzz_h,
851 gen_helper_sve_sel_zpzz_s, gen_helper_sve_sel_zpzz_d
853 return gen_gvec_ool_zzzp(s, fns[esz], rd, rn, rm, pg, 0);
856 #define DO_ZPZZ(NAME, FEAT, name) \
857 static gen_helper_gvec_4 * const name##_zpzz_fns[4] = { \
858 gen_helper_##name##_zpzz_b, gen_helper_##name##_zpzz_h, \
859 gen_helper_##name##_zpzz_s, gen_helper_##name##_zpzz_d, \
860 }; \
861 TRANS_FEAT(NAME, FEAT, gen_gvec_ool_arg_zpzz, \
862 name##_zpzz_fns[a->esz], a, 0)
864 DO_ZPZZ(AND_zpzz, aa64_sve, sve_and)
865 DO_ZPZZ(EOR_zpzz, aa64_sve, sve_eor)
866 DO_ZPZZ(ORR_zpzz, aa64_sve, sve_orr)
867 DO_ZPZZ(BIC_zpzz, aa64_sve, sve_bic)
869 DO_ZPZZ(ADD_zpzz, aa64_sve, sve_add)
870 DO_ZPZZ(SUB_zpzz, aa64_sve, sve_sub)
872 DO_ZPZZ(SMAX_zpzz, aa64_sve, sve_smax)
873 DO_ZPZZ(UMAX_zpzz, aa64_sve, sve_umax)
874 DO_ZPZZ(SMIN_zpzz, aa64_sve, sve_smin)
875 DO_ZPZZ(UMIN_zpzz, aa64_sve, sve_umin)
876 DO_ZPZZ(SABD_zpzz, aa64_sve, sve_sabd)
877 DO_ZPZZ(UABD_zpzz, aa64_sve, sve_uabd)
879 DO_ZPZZ(MUL_zpzz, aa64_sve, sve_mul)
880 DO_ZPZZ(SMULH_zpzz, aa64_sve, sve_smulh)
881 DO_ZPZZ(UMULH_zpzz, aa64_sve, sve_umulh)
883 DO_ZPZZ(ASR_zpzz, aa64_sve, sve_asr)
884 DO_ZPZZ(LSR_zpzz, aa64_sve, sve_lsr)
885 DO_ZPZZ(LSL_zpzz, aa64_sve, sve_lsl)
887 static gen_helper_gvec_4 * const sdiv_fns[4] = {
888 NULL, NULL, gen_helper_sve_sdiv_zpzz_s, gen_helper_sve_sdiv_zpzz_d
890 TRANS_FEAT(SDIV_zpzz, aa64_sve, gen_gvec_ool_arg_zpzz, sdiv_fns[a->esz], a, 0)
892 static gen_helper_gvec_4 * const udiv_fns[4] = {
893 NULL, NULL, gen_helper_sve_udiv_zpzz_s, gen_helper_sve_udiv_zpzz_d
895 TRANS_FEAT(UDIV_zpzz, aa64_sve, gen_gvec_ool_arg_zpzz, udiv_fns[a->esz], a, 0)
897 TRANS_FEAT(SEL_zpzz, aa64_sve, do_sel_z, a->rd, a->rn, a->rm, a->pg, a->esz)
900 *** SVE Integer Arithmetic - Unary Predicated Group
903 #define DO_ZPZ(NAME, FEAT, name) \
904 static gen_helper_gvec_3 * const name##_fns[4] = { \
905 gen_helper_##name##_b, gen_helper_##name##_h, \
906 gen_helper_##name##_s, gen_helper_##name##_d, \
907 }; \
908 TRANS_FEAT(NAME, FEAT, gen_gvec_ool_arg_zpz, name##_fns[a->esz], a, 0)
910 DO_ZPZ(CLS, aa64_sve, sve_cls)
911 DO_ZPZ(CLZ, aa64_sve, sve_clz)
912 DO_ZPZ(CNT_zpz, aa64_sve, sve_cnt_zpz)
913 DO_ZPZ(CNOT, aa64_sve, sve_cnot)
914 DO_ZPZ(NOT_zpz, aa64_sve, sve_not_zpz)
915 DO_ZPZ(ABS, aa64_sve, sve_abs)
916 DO_ZPZ(NEG, aa64_sve, sve_neg)
917 DO_ZPZ(RBIT, aa64_sve, sve_rbit)
919 static gen_helper_gvec_3 * const fabs_fns[4] = {
920 NULL, gen_helper_sve_fabs_h,
921 gen_helper_sve_fabs_s, gen_helper_sve_fabs_d,
923 TRANS_FEAT(FABS, aa64_sve, gen_gvec_ool_arg_zpz, fabs_fns[a->esz], a, 0)
925 static gen_helper_gvec_3 * const fneg_fns[4] = {
926 NULL, gen_helper_sve_fneg_h,
927 gen_helper_sve_fneg_s, gen_helper_sve_fneg_d,
929 TRANS_FEAT(FNEG, aa64_sve, gen_gvec_ool_arg_zpz, fneg_fns[a->esz], a, 0)
931 static gen_helper_gvec_3 * const sxtb_fns[4] = {
932 NULL, gen_helper_sve_sxtb_h,
933 gen_helper_sve_sxtb_s, gen_helper_sve_sxtb_d,
935 TRANS_FEAT(SXTB, aa64_sve, gen_gvec_ool_arg_zpz, sxtb_fns[a->esz], a, 0)
937 static gen_helper_gvec_3 * const uxtb_fns[4] = {
938 NULL, gen_helper_sve_uxtb_h,
939 gen_helper_sve_uxtb_s, gen_helper_sve_uxtb_d,
941 TRANS_FEAT(UXTB, aa64_sve, gen_gvec_ool_arg_zpz, uxtb_fns[a->esz], a, 0)
943 static gen_helper_gvec_3 * const sxth_fns[4] = {
944 NULL, NULL, gen_helper_sve_sxth_s, gen_helper_sve_sxth_d
946 TRANS_FEAT(SXTH, aa64_sve, gen_gvec_ool_arg_zpz, sxth_fns[a->esz], a, 0)
948 static gen_helper_gvec_3 * const uxth_fns[4] = {
949 NULL, NULL, gen_helper_sve_uxth_s, gen_helper_sve_uxth_d
951 TRANS_FEAT(UXTH, aa64_sve, gen_gvec_ool_arg_zpz, uxth_fns[a->esz], a, 0)
953 TRANS_FEAT(SXTW, aa64_sve, gen_gvec_ool_arg_zpz,
954 a->esz == 3 ? gen_helper_sve_sxtw_d : NULL, a, 0)
955 TRANS_FEAT(UXTW, aa64_sve, gen_gvec_ool_arg_zpz,
956 a->esz == 3 ? gen_helper_sve_uxtw_d : NULL, a, 0)
959 *** SVE Integer Reduction Group
962 typedef void gen_helper_gvec_reduc(TCGv_i64, TCGv_ptr, TCGv_ptr, TCGv_i32);
963 static bool do_vpz_ool(DisasContext *s, arg_rpr_esz *a,
964 gen_helper_gvec_reduc *fn)
966 unsigned vsz = vec_full_reg_size(s);
967 TCGv_ptr t_zn, t_pg;
968 TCGv_i32 desc;
969 TCGv_i64 temp;
971 if (fn == NULL) {
972 return false;
974 if (!sve_access_check(s)) {
975 return true;
978 desc = tcg_constant_i32(simd_desc(vsz, vsz, 0));
979 temp = tcg_temp_new_i64();
980 t_zn = tcg_temp_new_ptr();
981 t_pg = tcg_temp_new_ptr();
983 tcg_gen_addi_ptr(t_zn, cpu_env, vec_full_reg_offset(s, a->rn));
984 tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, a->pg));
985 fn(temp, t_zn, t_pg, desc);
986 tcg_temp_free_ptr(t_zn);
987 tcg_temp_free_ptr(t_pg);
989 write_fp_dreg(s, a->rd, temp);
990 tcg_temp_free_i64(temp);
991 return true;
994 #define DO_VPZ(NAME, name) \
995 static gen_helper_gvec_reduc * const name##_fns[4] = { \
996 gen_helper_sve_##name##_b, gen_helper_sve_##name##_h, \
997 gen_helper_sve_##name##_s, gen_helper_sve_##name##_d, \
998 }; \
999 TRANS_FEAT(NAME, aa64_sve, do_vpz_ool, a, name##_fns[a->esz])
1001 DO_VPZ(ORV, orv)
1002 DO_VPZ(ANDV, andv)
1003 DO_VPZ(EORV, eorv)
1005 DO_VPZ(UADDV, uaddv)
1006 DO_VPZ(SMAXV, smaxv)
1007 DO_VPZ(UMAXV, umaxv)
1008 DO_VPZ(SMINV, sminv)
1009 DO_VPZ(UMINV, uminv)
1011 static gen_helper_gvec_reduc * const saddv_fns[4] = {
1012 gen_helper_sve_saddv_b, gen_helper_sve_saddv_h,
1013 gen_helper_sve_saddv_s, NULL
1015 TRANS_FEAT(SADDV, aa64_sve, do_vpz_ool, a, saddv_fns[a->esz])
1017 #undef DO_VPZ
1020 *** SVE Shift by Immediate - Predicated Group
1024 * Copy Zn into Zd, storing zeros into inactive elements.
1025 * If invert, store zeros into the active elements.
1027 static bool do_movz_zpz(DisasContext *s, int rd, int rn, int pg,
1028 int esz, bool invert)
1030 static gen_helper_gvec_3 * const fns[4] = {
1031 gen_helper_sve_movz_b, gen_helper_sve_movz_h,
1032 gen_helper_sve_movz_s, gen_helper_sve_movz_d,
1034 return gen_gvec_ool_zzp(s, fns[esz], rd, rn, pg, invert);
1037 static bool do_shift_zpzi(DisasContext *s, arg_rpri_esz *a, bool asr,
1038 gen_helper_gvec_3 * const fns[4])
1040 int max;
1042 if (a->esz < 0) {
1043 /* Invalid tsz encoding -- see tszimm_esz. */
1044 return false;
1048 * Shift by element size is architecturally valid.
1049 * For arithmetic right-shift, it's the same as by one less.
1050 * For logical shifts and ASRD, it is a zeroing operation.
1052 max = 8 << a->esz;
1053 if (a->imm >= max) {
1054 if (asr) {
1055 a->imm = max - 1;
1056 } else {
1057 return do_movz_zpz(s, a->rd, a->rd, a->pg, a->esz, true);
1060 return gen_gvec_ool_arg_zpzi(s, fns[a->esz], a);
1063 static gen_helper_gvec_3 * const asr_zpzi_fns[4] = {
1064 gen_helper_sve_asr_zpzi_b, gen_helper_sve_asr_zpzi_h,
1065 gen_helper_sve_asr_zpzi_s, gen_helper_sve_asr_zpzi_d,
1067 TRANS_FEAT(ASR_zpzi, aa64_sve, do_shift_zpzi, a, true, asr_zpzi_fns)
1069 static gen_helper_gvec_3 * const lsr_zpzi_fns[4] = {
1070 gen_helper_sve_lsr_zpzi_b, gen_helper_sve_lsr_zpzi_h,
1071 gen_helper_sve_lsr_zpzi_s, gen_helper_sve_lsr_zpzi_d,
1073 TRANS_FEAT(LSR_zpzi, aa64_sve, do_shift_zpzi, a, false, lsr_zpzi_fns)
1075 static gen_helper_gvec_3 * const lsl_zpzi_fns[4] = {
1076 gen_helper_sve_lsl_zpzi_b, gen_helper_sve_lsl_zpzi_h,
1077 gen_helper_sve_lsl_zpzi_s, gen_helper_sve_lsl_zpzi_d,
1079 TRANS_FEAT(LSL_zpzi, aa64_sve, do_shift_zpzi, a, false, lsl_zpzi_fns)
1081 static gen_helper_gvec_3 * const asrd_fns[4] = {
1082 gen_helper_sve_asrd_b, gen_helper_sve_asrd_h,
1083 gen_helper_sve_asrd_s, gen_helper_sve_asrd_d,
1085 TRANS_FEAT(ASRD, aa64_sve, do_shift_zpzi, a, false, asrd_fns)
1087 static gen_helper_gvec_3 * const sqshl_zpzi_fns[4] = {
1088 gen_helper_sve2_sqshl_zpzi_b, gen_helper_sve2_sqshl_zpzi_h,
1089 gen_helper_sve2_sqshl_zpzi_s, gen_helper_sve2_sqshl_zpzi_d,
1091 TRANS_FEAT(SQSHL_zpzi, aa64_sve2, gen_gvec_ool_arg_zpzi,
1092 a->esz < 0 ? NULL : sqshl_zpzi_fns[a->esz], a)
1094 static gen_helper_gvec_3 * const uqshl_zpzi_fns[4] = {
1095 gen_helper_sve2_uqshl_zpzi_b, gen_helper_sve2_uqshl_zpzi_h,
1096 gen_helper_sve2_uqshl_zpzi_s, gen_helper_sve2_uqshl_zpzi_d,
1098 TRANS_FEAT(UQSHL_zpzi, aa64_sve2, gen_gvec_ool_arg_zpzi,
1099 a->esz < 0 ? NULL : uqshl_zpzi_fns[a->esz], a)
1101 static gen_helper_gvec_3 * const srshr_fns[4] = {
1102 gen_helper_sve2_srshr_b, gen_helper_sve2_srshr_h,
1103 gen_helper_sve2_srshr_s, gen_helper_sve2_srshr_d,
1105 TRANS_FEAT(SRSHR, aa64_sve2, gen_gvec_ool_arg_zpzi,
1106 a->esz < 0 ? NULL : srshr_fns[a->esz], a)
1108 static gen_helper_gvec_3 * const urshr_fns[4] = {
1109 gen_helper_sve2_urshr_b, gen_helper_sve2_urshr_h,
1110 gen_helper_sve2_urshr_s, gen_helper_sve2_urshr_d,
1112 TRANS_FEAT(URSHR, aa64_sve2, gen_gvec_ool_arg_zpzi,
1113 a->esz < 0 ? NULL : urshr_fns[a->esz], a)
1115 static gen_helper_gvec_3 * const sqshlu_fns[4] = {
1116 gen_helper_sve2_sqshlu_b, gen_helper_sve2_sqshlu_h,
1117 gen_helper_sve2_sqshlu_s, gen_helper_sve2_sqshlu_d,
1119 TRANS_FEAT(SQSHLU, aa64_sve2, gen_gvec_ool_arg_zpzi,
1120 a->esz < 0 ? NULL : sqshlu_fns[a->esz], a)
1123 *** SVE Bitwise Shift - Predicated Group
1126 #define DO_ZPZW(NAME, name) \
1127 static gen_helper_gvec_4 * const name##_zpzw_fns[4] = { \
1128 gen_helper_sve_##name##_zpzw_b, gen_helper_sve_##name##_zpzw_h, \
1129 gen_helper_sve_##name##_zpzw_s, NULL \
1130 }; \
1131 TRANS_FEAT(NAME##_zpzw, aa64_sve, gen_gvec_ool_arg_zpzz, \
1132 a->esz < 0 ? NULL : name##_zpzw_fns[a->esz], a, 0)
1134 DO_ZPZW(ASR, asr)
1135 DO_ZPZW(LSR, lsr)
1136 DO_ZPZW(LSL, lsl)
1138 #undef DO_ZPZW
1141 *** SVE Bitwise Shift - Unpredicated Group
1144 static bool do_shift_imm(DisasContext *s, arg_rri_esz *a, bool asr,
1145 void (*gvec_fn)(unsigned, uint32_t, uint32_t,
1146 int64_t, uint32_t, uint32_t))
1148 if (a->esz < 0) {
1149 /* Invalid tsz encoding -- see tszimm_esz. */
1150 return false;
1152 if (sve_access_check(s)) {
1153 unsigned vsz = vec_full_reg_size(s);
1154 /* Shift by element size is architecturally valid. For
1155 arithmetic right-shift, it's the same as by one less.
1156 Otherwise it is a zeroing operation. */
1157 if (a->imm >= 8 << a->esz) {
1158 if (asr) {
1159 a->imm = (8 << a->esz) - 1;
1160 } else {
1161 do_dupi_z(s, a->rd, 0);
1162 return true;
1165 gvec_fn(a->esz, vec_full_reg_offset(s, a->rd),
1166 vec_full_reg_offset(s, a->rn), a->imm, vsz, vsz);
1168 return true;
1171 TRANS_FEAT(ASR_zzi, aa64_sve, do_shift_imm, a, true, tcg_gen_gvec_sari)
1172 TRANS_FEAT(LSR_zzi, aa64_sve, do_shift_imm, a, false, tcg_gen_gvec_shri)
1173 TRANS_FEAT(LSL_zzi, aa64_sve, do_shift_imm, a, false, tcg_gen_gvec_shli)
1175 #define DO_ZZW(NAME, name) \
1176 static gen_helper_gvec_3 * const name##_zzw_fns[4] = { \
1177 gen_helper_sve_##name##_zzw_b, gen_helper_sve_##name##_zzw_h, \
1178 gen_helper_sve_##name##_zzw_s, NULL \
1179 }; \
1180 TRANS_FEAT(NAME, aa64_sve, gen_gvec_ool_arg_zzz, \
1181 name##_zzw_fns[a->esz], a, 0)
1183 DO_ZZW(ASR_zzw, asr)
1184 DO_ZZW(LSR_zzw, lsr)
1185 DO_ZZW(LSL_zzw, lsl)
1187 #undef DO_ZZW
1190 *** SVE Integer Multiply-Add Group
1193 static bool do_zpzzz_ool(DisasContext *s, arg_rprrr_esz *a,
1194 gen_helper_gvec_5 *fn)
1196 if (sve_access_check(s)) {
1197 unsigned vsz = vec_full_reg_size(s);
1198 tcg_gen_gvec_5_ool(vec_full_reg_offset(s, a->rd),
1199 vec_full_reg_offset(s, a->ra),
1200 vec_full_reg_offset(s, a->rn),
1201 vec_full_reg_offset(s, a->rm),
1202 pred_full_reg_offset(s, a->pg),
1203 vsz, vsz, 0, fn);
1205 return true;
1208 static gen_helper_gvec_5 * const mla_fns[4] = {
1209 gen_helper_sve_mla_b, gen_helper_sve_mla_h,
1210 gen_helper_sve_mla_s, gen_helper_sve_mla_d,
1212 TRANS_FEAT(MLA, aa64_sve, do_zpzzz_ool, a, mla_fns[a->esz])
1214 static gen_helper_gvec_5 * const mls_fns[4] = {
1215 gen_helper_sve_mls_b, gen_helper_sve_mls_h,
1216 gen_helper_sve_mls_s, gen_helper_sve_mls_d,
1218 TRANS_FEAT(MLS, aa64_sve, do_zpzzz_ool, a, mls_fns[a->esz])
1221 *** SVE Index Generation Group
1224 static bool do_index(DisasContext *s, int esz, int rd,
1225 TCGv_i64 start, TCGv_i64 incr)
1227 unsigned vsz;
1228 TCGv_i32 desc;
1229 TCGv_ptr t_zd;
1231 if (!sve_access_check(s)) {
1232 return true;
1235 vsz = vec_full_reg_size(s);
1236 desc = tcg_constant_i32(simd_desc(vsz, vsz, 0));
1237 t_zd = tcg_temp_new_ptr();
1239 tcg_gen_addi_ptr(t_zd, cpu_env, vec_full_reg_offset(s, rd));
1240 if (esz == 3) {
1241 gen_helper_sve_index_d(t_zd, start, incr, desc);
1242 } else {
1243 typedef void index_fn(TCGv_ptr, TCGv_i32, TCGv_i32, TCGv_i32);
1244 static index_fn * const fns[3] = {
1245 gen_helper_sve_index_b,
1246 gen_helper_sve_index_h,
1247 gen_helper_sve_index_s,
1249 TCGv_i32 s32 = tcg_temp_new_i32();
1250 TCGv_i32 i32 = tcg_temp_new_i32();
1252 tcg_gen_extrl_i64_i32(s32, start);
1253 tcg_gen_extrl_i64_i32(i32, incr);
1254 fns[esz](t_zd, s32, i32, desc);
1256 tcg_temp_free_i32(s32);
1257 tcg_temp_free_i32(i32);
1259 tcg_temp_free_ptr(t_zd);
1260 return true;
1263 TRANS_FEAT(INDEX_ii, aa64_sve, do_index, a->esz, a->rd,
1264 tcg_constant_i64(a->imm1), tcg_constant_i64(a->imm2))
1265 TRANS_FEAT(INDEX_ir, aa64_sve, do_index, a->esz, a->rd,
1266 tcg_constant_i64(a->imm), cpu_reg(s, a->rm))
1267 TRANS_FEAT(INDEX_ri, aa64_sve, do_index, a->esz, a->rd,
1268 cpu_reg(s, a->rn), tcg_constant_i64(a->imm))
1269 TRANS_FEAT(INDEX_rr, aa64_sve, do_index, a->esz, a->rd,
1270 cpu_reg(s, a->rn), cpu_reg(s, a->rm))
1273 *** SVE Stack Allocation Group
1276 static bool trans_ADDVL(DisasContext *s, arg_ADDVL *a)
1278 if (!dc_isar_feature(aa64_sve, s)) {
1279 return false;
1281 if (sve_access_check(s)) {
1282 TCGv_i64 rd = cpu_reg_sp(s, a->rd);
1283 TCGv_i64 rn = cpu_reg_sp(s, a->rn);
1284 tcg_gen_addi_i64(rd, rn, a->imm * vec_full_reg_size(s));
1286 return true;
1289 static bool trans_ADDSVL(DisasContext *s, arg_ADDSVL *a)
1291 if (!dc_isar_feature(aa64_sme, s)) {
1292 return false;
1294 if (sme_enabled_check(s)) {
1295 TCGv_i64 rd = cpu_reg_sp(s, a->rd);
1296 TCGv_i64 rn = cpu_reg_sp(s, a->rn);
1297 tcg_gen_addi_i64(rd, rn, a->imm * streaming_vec_reg_size(s));
1299 return true;
1302 static bool trans_ADDPL(DisasContext *s, arg_ADDPL *a)
1304 if (!dc_isar_feature(aa64_sve, s)) {
1305 return false;
1307 if (sve_access_check(s)) {
1308 TCGv_i64 rd = cpu_reg_sp(s, a->rd);
1309 TCGv_i64 rn = cpu_reg_sp(s, a->rn);
1310 tcg_gen_addi_i64(rd, rn, a->imm * pred_full_reg_size(s));
1312 return true;
1315 static bool trans_ADDSPL(DisasContext *s, arg_ADDSPL *a)
1317 if (!dc_isar_feature(aa64_sme, s)) {
1318 return false;
1320 if (sme_enabled_check(s)) {
1321 TCGv_i64 rd = cpu_reg_sp(s, a->rd);
1322 TCGv_i64 rn = cpu_reg_sp(s, a->rn);
1323 tcg_gen_addi_i64(rd, rn, a->imm * streaming_pred_reg_size(s));
1325 return true;
1328 static bool trans_RDVL(DisasContext *s, arg_RDVL *a)
1330 if (!dc_isar_feature(aa64_sve, s)) {
1331 return false;
1333 if (sve_access_check(s)) {
1334 TCGv_i64 reg = cpu_reg(s, a->rd);
1335 tcg_gen_movi_i64(reg, a->imm * vec_full_reg_size(s));
1337 return true;
1340 static bool trans_RDSVL(DisasContext *s, arg_RDSVL *a)
1342 if (!dc_isar_feature(aa64_sme, s)) {
1343 return false;
1345 if (sme_enabled_check(s)) {
1346 TCGv_i64 reg = cpu_reg(s, a->rd);
1347 tcg_gen_movi_i64(reg, a->imm * streaming_vec_reg_size(s));
1349 return true;
1353 *** SVE Compute Vector Address Group
1356 static bool do_adr(DisasContext *s, arg_rrri *a, gen_helper_gvec_3 *fn)
1358 return gen_gvec_ool_zzz(s, fn, a->rd, a->rn, a->rm, a->imm);
1361 TRANS_FEAT_NONSTREAMING(ADR_p32, aa64_sve, do_adr, a, gen_helper_sve_adr_p32)
1362 TRANS_FEAT_NONSTREAMING(ADR_p64, aa64_sve, do_adr, a, gen_helper_sve_adr_p64)
1363 TRANS_FEAT_NONSTREAMING(ADR_s32, aa64_sve, do_adr, a, gen_helper_sve_adr_s32)
1364 TRANS_FEAT_NONSTREAMING(ADR_u32, aa64_sve, do_adr, a, gen_helper_sve_adr_u32)
1367 *** SVE Integer Misc - Unpredicated Group
1370 static gen_helper_gvec_2 * const fexpa_fns[4] = {
1371 NULL, gen_helper_sve_fexpa_h,
1372 gen_helper_sve_fexpa_s, gen_helper_sve_fexpa_d,
1374 TRANS_FEAT_NONSTREAMING(FEXPA, aa64_sve, gen_gvec_ool_zz,
1375 fexpa_fns[a->esz], a->rd, a->rn, 0)
1377 static gen_helper_gvec_3 * const ftssel_fns[4] = {
1378 NULL, gen_helper_sve_ftssel_h,
1379 gen_helper_sve_ftssel_s, gen_helper_sve_ftssel_d,
1381 TRANS_FEAT_NONSTREAMING(FTSSEL, aa64_sve, gen_gvec_ool_arg_zzz,
1382 ftssel_fns[a->esz], a, 0)
1385 *** SVE Predicate Logical Operations Group
1388 static bool do_pppp_flags(DisasContext *s, arg_rprr_s *a,
1389 const GVecGen4 *gvec_op)
1391 if (!sve_access_check(s)) {
1392 return true;
1395 unsigned psz = pred_gvec_reg_size(s);
1396 int dofs = pred_full_reg_offset(s, a->rd);
1397 int nofs = pred_full_reg_offset(s, a->rn);
1398 int mofs = pred_full_reg_offset(s, a->rm);
1399 int gofs = pred_full_reg_offset(s, a->pg);
1401 if (!a->s) {
1402 tcg_gen_gvec_4(dofs, nofs, mofs, gofs, psz, psz, gvec_op);
1403 return true;
1406 if (psz == 8) {
1407 /* Do the operation and the flags generation in temps. */
1408 TCGv_i64 pd = tcg_temp_new_i64();
1409 TCGv_i64 pn = tcg_temp_new_i64();
1410 TCGv_i64 pm = tcg_temp_new_i64();
1411 TCGv_i64 pg = tcg_temp_new_i64();
1413 tcg_gen_ld_i64(pn, cpu_env, nofs);
1414 tcg_gen_ld_i64(pm, cpu_env, mofs);
1415 tcg_gen_ld_i64(pg, cpu_env, gofs);
1417 gvec_op->fni8(pd, pn, pm, pg);
1418 tcg_gen_st_i64(pd, cpu_env, dofs);
1420 do_predtest1(pd, pg);
1422 tcg_temp_free_i64(pd);
1423 tcg_temp_free_i64(pn);
1424 tcg_temp_free_i64(pm);
1425 tcg_temp_free_i64(pg);
1426 } else {
1427 /* The operation and flags generation is large. The computation
1428 * of the flags depends on the original contents of the guarding
1429 * predicate. If the destination overwrites the guarding predicate,
1430 * then the easiest way to get this right is to save a copy.
1432 int tofs = gofs;
1433 if (a->rd == a->pg) {
1434 tofs = offsetof(CPUARMState, vfp.preg_tmp);
1435 tcg_gen_gvec_mov(0, tofs, gofs, psz, psz);
1438 tcg_gen_gvec_4(dofs, nofs, mofs, gofs, psz, psz, gvec_op);
1439 do_predtest(s, dofs, tofs, psz / 8);
1441 return true;
1444 static void gen_and_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg)
1446 tcg_gen_and_i64(pd, pn, pm);
1447 tcg_gen_and_i64(pd, pd, pg);
1450 static void gen_and_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn,
1451 TCGv_vec pm, TCGv_vec pg)
1453 tcg_gen_and_vec(vece, pd, pn, pm);
1454 tcg_gen_and_vec(vece, pd, pd, pg);
1457 static bool trans_AND_pppp(DisasContext *s, arg_rprr_s *a)
1459 static const GVecGen4 op = {
1460 .fni8 = gen_and_pg_i64,
1461 .fniv = gen_and_pg_vec,
1462 .fno = gen_helper_sve_and_pppp,
1463 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
1466 if (!dc_isar_feature(aa64_sve, s)) {
1467 return false;
1469 if (!a->s) {
1470 if (a->rn == a->rm) {
1471 if (a->pg == a->rn) {
1472 return do_mov_p(s, a->rd, a->rn);
1474 return gen_gvec_fn_ppp(s, tcg_gen_gvec_and, a->rd, a->rn, a->pg);
1475 } else if (a->pg == a->rn || a->pg == a->rm) {
1476 return gen_gvec_fn_ppp(s, tcg_gen_gvec_and, a->rd, a->rn, a->rm);
1479 return do_pppp_flags(s, a, &op);
1482 static void gen_bic_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg)
1484 tcg_gen_andc_i64(pd, pn, pm);
1485 tcg_gen_and_i64(pd, pd, pg);
1488 static void gen_bic_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn,
1489 TCGv_vec pm, TCGv_vec pg)
1491 tcg_gen_andc_vec(vece, pd, pn, pm);
1492 tcg_gen_and_vec(vece, pd, pd, pg);
1495 static bool trans_BIC_pppp(DisasContext *s, arg_rprr_s *a)
1497 static const GVecGen4 op = {
1498 .fni8 = gen_bic_pg_i64,
1499 .fniv = gen_bic_pg_vec,
1500 .fno = gen_helper_sve_bic_pppp,
1501 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
1504 if (!dc_isar_feature(aa64_sve, s)) {
1505 return false;
1507 if (!a->s && a->pg == a->rn) {
1508 return gen_gvec_fn_ppp(s, tcg_gen_gvec_andc, a->rd, a->rn, a->rm);
1510 return do_pppp_flags(s, a, &op);
1513 static void gen_eor_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg)
1515 tcg_gen_xor_i64(pd, pn, pm);
1516 tcg_gen_and_i64(pd, pd, pg);
1519 static void gen_eor_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn,
1520 TCGv_vec pm, TCGv_vec pg)
1522 tcg_gen_xor_vec(vece, pd, pn, pm);
1523 tcg_gen_and_vec(vece, pd, pd, pg);
1526 static bool trans_EOR_pppp(DisasContext *s, arg_rprr_s *a)
1528 static const GVecGen4 op = {
1529 .fni8 = gen_eor_pg_i64,
1530 .fniv = gen_eor_pg_vec,
1531 .fno = gen_helper_sve_eor_pppp,
1532 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
1535 if (!dc_isar_feature(aa64_sve, s)) {
1536 return false;
1538 /* Alias NOT (predicate) is EOR Pd.B, Pg/Z, Pn.B, Pg.B */
1539 if (!a->s && a->pg == a->rm) {
1540 return gen_gvec_fn_ppp(s, tcg_gen_gvec_andc, a->rd, a->pg, a->rn);
1542 return do_pppp_flags(s, a, &op);
1545 static bool trans_SEL_pppp(DisasContext *s, arg_rprr_s *a)
1547 if (a->s || !dc_isar_feature(aa64_sve, s)) {
1548 return false;
1550 if (sve_access_check(s)) {
1551 unsigned psz = pred_gvec_reg_size(s);
1552 tcg_gen_gvec_bitsel(MO_8, pred_full_reg_offset(s, a->rd),
1553 pred_full_reg_offset(s, a->pg),
1554 pred_full_reg_offset(s, a->rn),
1555 pred_full_reg_offset(s, a->rm), psz, psz);
1557 return true;
1560 static void gen_orr_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg)
1562 tcg_gen_or_i64(pd, pn, pm);
1563 tcg_gen_and_i64(pd, pd, pg);
1566 static void gen_orr_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn,
1567 TCGv_vec pm, TCGv_vec pg)
1569 tcg_gen_or_vec(vece, pd, pn, pm);
1570 tcg_gen_and_vec(vece, pd, pd, pg);
1573 static bool trans_ORR_pppp(DisasContext *s, arg_rprr_s *a)
1575 static const GVecGen4 op = {
1576 .fni8 = gen_orr_pg_i64,
1577 .fniv = gen_orr_pg_vec,
1578 .fno = gen_helper_sve_orr_pppp,
1579 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
1582 if (!dc_isar_feature(aa64_sve, s)) {
1583 return false;
1585 if (!a->s && a->pg == a->rn && a->rn == a->rm) {
1586 return do_mov_p(s, a->rd, a->rn);
1588 return do_pppp_flags(s, a, &op);
1591 static void gen_orn_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg)
1593 tcg_gen_orc_i64(pd, pn, pm);
1594 tcg_gen_and_i64(pd, pd, pg);
1597 static void gen_orn_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn,
1598 TCGv_vec pm, TCGv_vec pg)
1600 tcg_gen_orc_vec(vece, pd, pn, pm);
1601 tcg_gen_and_vec(vece, pd, pd, pg);
1604 static bool trans_ORN_pppp(DisasContext *s, arg_rprr_s *a)
1606 static const GVecGen4 op = {
1607 .fni8 = gen_orn_pg_i64,
1608 .fniv = gen_orn_pg_vec,
1609 .fno = gen_helper_sve_orn_pppp,
1610 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
1613 if (!dc_isar_feature(aa64_sve, s)) {
1614 return false;
1616 return do_pppp_flags(s, a, &op);
1619 static void gen_nor_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg)
1621 tcg_gen_or_i64(pd, pn, pm);
1622 tcg_gen_andc_i64(pd, pg, pd);
1625 static void gen_nor_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn,
1626 TCGv_vec pm, TCGv_vec pg)
1628 tcg_gen_or_vec(vece, pd, pn, pm);
1629 tcg_gen_andc_vec(vece, pd, pg, pd);
1632 static bool trans_NOR_pppp(DisasContext *s, arg_rprr_s *a)
1634 static const GVecGen4 op = {
1635 .fni8 = gen_nor_pg_i64,
1636 .fniv = gen_nor_pg_vec,
1637 .fno = gen_helper_sve_nor_pppp,
1638 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
1641 if (!dc_isar_feature(aa64_sve, s)) {
1642 return false;
1644 return do_pppp_flags(s, a, &op);
1647 static void gen_nand_pg_i64(TCGv_i64 pd, TCGv_i64 pn, TCGv_i64 pm, TCGv_i64 pg)
1649 tcg_gen_and_i64(pd, pn, pm);
1650 tcg_gen_andc_i64(pd, pg, pd);
1653 static void gen_nand_pg_vec(unsigned vece, TCGv_vec pd, TCGv_vec pn,
1654 TCGv_vec pm, TCGv_vec pg)
1656 tcg_gen_and_vec(vece, pd, pn, pm);
1657 tcg_gen_andc_vec(vece, pd, pg, pd);
1660 static bool trans_NAND_pppp(DisasContext *s, arg_rprr_s *a)
1662 static const GVecGen4 op = {
1663 .fni8 = gen_nand_pg_i64,
1664 .fniv = gen_nand_pg_vec,
1665 .fno = gen_helper_sve_nand_pppp,
1666 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
1669 if (!dc_isar_feature(aa64_sve, s)) {
1670 return false;
1672 return do_pppp_flags(s, a, &op);
1676 *** SVE Predicate Misc Group
1679 static bool trans_PTEST(DisasContext *s, arg_PTEST *a)
1681 if (!dc_isar_feature(aa64_sve, s)) {
1682 return false;
1684 if (sve_access_check(s)) {
1685 int nofs = pred_full_reg_offset(s, a->rn);
1686 int gofs = pred_full_reg_offset(s, a->pg);
1687 int words = DIV_ROUND_UP(pred_full_reg_size(s), 8);
1689 if (words == 1) {
1690 TCGv_i64 pn = tcg_temp_new_i64();
1691 TCGv_i64 pg = tcg_temp_new_i64();
1693 tcg_gen_ld_i64(pn, cpu_env, nofs);
1694 tcg_gen_ld_i64(pg, cpu_env, gofs);
1695 do_predtest1(pn, pg);
1697 tcg_temp_free_i64(pn);
1698 tcg_temp_free_i64(pg);
1699 } else {
1700 do_predtest(s, nofs, gofs, words);
1703 return true;
1706 /* See the ARM pseudocode DecodePredCount. */
1707 static unsigned decode_pred_count(unsigned fullsz, int pattern, int esz)
1709 unsigned elements = fullsz >> esz;
1710 unsigned bound;
1712 switch (pattern) {
1713 case 0x0: /* POW2 */
1714 return pow2floor(elements);
1715 case 0x1: /* VL1 */
1716 case 0x2: /* VL2 */
1717 case 0x3: /* VL3 */
1718 case 0x4: /* VL4 */
1719 case 0x5: /* VL5 */
1720 case 0x6: /* VL6 */
1721 case 0x7: /* VL7 */
1722 case 0x8: /* VL8 */
1723 bound = pattern;
1724 break;
1725 case 0x9: /* VL16 */
1726 case 0xa: /* VL32 */
1727 case 0xb: /* VL64 */
1728 case 0xc: /* VL128 */
1729 case 0xd: /* VL256 */
1730 bound = 16 << (pattern - 9);
1731 break;
1732 case 0x1d: /* MUL4 */
1733 return elements - elements % 4;
1734 case 0x1e: /* MUL3 */
1735 return elements - elements % 3;
1736 case 0x1f: /* ALL */
1737 return elements;
1738 default: /* #uimm5 */
1739 return 0;
1741 return elements >= bound ? bound : 0;
1744 /* This handles all of the predicate initialization instructions,
1745 * PTRUE, PFALSE, SETFFR. For PFALSE, we will have set PAT == 32
1746 * so that decode_pred_count returns 0. For SETFFR, we will have
1747 * set RD == 16 == FFR.
1749 static bool do_predset(DisasContext *s, int esz, int rd, int pat, bool setflag)
1751 if (!sve_access_check(s)) {
1752 return true;
1755 unsigned fullsz = vec_full_reg_size(s);
1756 unsigned ofs = pred_full_reg_offset(s, rd);
1757 unsigned numelem, setsz, i;
1758 uint64_t word, lastword;
1759 TCGv_i64 t;
1761 numelem = decode_pred_count(fullsz, pat, esz);
1763 /* Determine what we must store into each bit, and how many. */
1764 if (numelem == 0) {
1765 lastword = word = 0;
1766 setsz = fullsz;
1767 } else {
1768 setsz = numelem << esz;
1769 lastword = word = pred_esz_masks[esz];
1770 if (setsz % 64) {
1771 lastword &= MAKE_64BIT_MASK(0, setsz % 64);
1775 t = tcg_temp_new_i64();
1776 if (fullsz <= 64) {
1777 tcg_gen_movi_i64(t, lastword);
1778 tcg_gen_st_i64(t, cpu_env, ofs);
1779 goto done;
1782 if (word == lastword) {
1783 unsigned maxsz = size_for_gvec(fullsz / 8);
1784 unsigned oprsz = size_for_gvec(setsz / 8);
1786 if (oprsz * 8 == setsz) {
1787 tcg_gen_gvec_dup_imm(MO_64, ofs, oprsz, maxsz, word);
1788 goto done;
1792 setsz /= 8;
1793 fullsz /= 8;
1795 tcg_gen_movi_i64(t, word);
1796 for (i = 0; i < QEMU_ALIGN_DOWN(setsz, 8); i += 8) {
1797 tcg_gen_st_i64(t, cpu_env, ofs + i);
1799 if (lastword != word) {
1800 tcg_gen_movi_i64(t, lastword);
1801 tcg_gen_st_i64(t, cpu_env, ofs + i);
1802 i += 8;
1804 if (i < fullsz) {
1805 tcg_gen_movi_i64(t, 0);
1806 for (; i < fullsz; i += 8) {
1807 tcg_gen_st_i64(t, cpu_env, ofs + i);
1811 done:
1812 tcg_temp_free_i64(t);
1814 /* PTRUES */
1815 if (setflag) {
1816 tcg_gen_movi_i32(cpu_NF, -(word != 0));
1817 tcg_gen_movi_i32(cpu_CF, word == 0);
1818 tcg_gen_movi_i32(cpu_VF, 0);
1819 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
1821 return true;
1824 TRANS_FEAT(PTRUE, aa64_sve, do_predset, a->esz, a->rd, a->pat, a->s)
1826 /* Note pat == 31 is #all, to set all elements. */
1827 TRANS_FEAT_NONSTREAMING(SETFFR, aa64_sve,
1828 do_predset, 0, FFR_PRED_NUM, 31, false)
1830 /* Note pat == 32 is #unimp, to set no elements. */
1831 TRANS_FEAT(PFALSE, aa64_sve, do_predset, 0, a->rd, 32, false)
1833 static bool trans_RDFFR_p(DisasContext *s, arg_RDFFR_p *a)
1835 /* The path through do_pppp_flags is complicated enough to want to avoid
1836 * duplication. Frob the arguments into the form of a predicated AND.
1838 arg_rprr_s alt_a = {
1839 .rd = a->rd, .pg = a->pg, .s = a->s,
1840 .rn = FFR_PRED_NUM, .rm = FFR_PRED_NUM,
1843 s->is_nonstreaming = true;
1844 return trans_AND_pppp(s, &alt_a);
1847 TRANS_FEAT_NONSTREAMING(RDFFR, aa64_sve, do_mov_p, a->rd, FFR_PRED_NUM)
1848 TRANS_FEAT_NONSTREAMING(WRFFR, aa64_sve, do_mov_p, FFR_PRED_NUM, a->rn)
1850 static bool do_pfirst_pnext(DisasContext *s, arg_rr_esz *a,
1851 void (*gen_fn)(TCGv_i32, TCGv_ptr,
1852 TCGv_ptr, TCGv_i32))
1854 if (!sve_access_check(s)) {
1855 return true;
1858 TCGv_ptr t_pd = tcg_temp_new_ptr();
1859 TCGv_ptr t_pg = tcg_temp_new_ptr();
1860 TCGv_i32 t;
1861 unsigned desc = 0;
1863 desc = FIELD_DP32(desc, PREDDESC, OPRSZ, pred_full_reg_size(s));
1864 desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz);
1866 tcg_gen_addi_ptr(t_pd, cpu_env, pred_full_reg_offset(s, a->rd));
1867 tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, a->rn));
1868 t = tcg_temp_new_i32();
1870 gen_fn(t, t_pd, t_pg, tcg_constant_i32(desc));
1871 tcg_temp_free_ptr(t_pd);
1872 tcg_temp_free_ptr(t_pg);
1874 do_pred_flags(t);
1875 tcg_temp_free_i32(t);
1876 return true;
1879 TRANS_FEAT(PFIRST, aa64_sve, do_pfirst_pnext, a, gen_helper_sve_pfirst)
1880 TRANS_FEAT(PNEXT, aa64_sve, do_pfirst_pnext, a, gen_helper_sve_pnext)
1883 *** SVE Element Count Group
1886 /* Perform an inline saturating addition of a 32-bit value within
1887 * a 64-bit register. The second operand is known to be positive,
1888 * which halves the comparisions we must perform to bound the result.
1890 static void do_sat_addsub_32(TCGv_i64 reg, TCGv_i64 val, bool u, bool d)
1892 int64_t ibound;
1894 /* Use normal 64-bit arithmetic to detect 32-bit overflow. */
1895 if (u) {
1896 tcg_gen_ext32u_i64(reg, reg);
1897 } else {
1898 tcg_gen_ext32s_i64(reg, reg);
1900 if (d) {
1901 tcg_gen_sub_i64(reg, reg, val);
1902 ibound = (u ? 0 : INT32_MIN);
1903 tcg_gen_smax_i64(reg, reg, tcg_constant_i64(ibound));
1904 } else {
1905 tcg_gen_add_i64(reg, reg, val);
1906 ibound = (u ? UINT32_MAX : INT32_MAX);
1907 tcg_gen_smin_i64(reg, reg, tcg_constant_i64(ibound));
1911 /* Similarly with 64-bit values. */
1912 static void do_sat_addsub_64(TCGv_i64 reg, TCGv_i64 val, bool u, bool d)
1914 TCGv_i64 t0 = tcg_temp_new_i64();
1915 TCGv_i64 t2;
1917 if (u) {
1918 if (d) {
1919 tcg_gen_sub_i64(t0, reg, val);
1920 t2 = tcg_constant_i64(0);
1921 tcg_gen_movcond_i64(TCG_COND_LTU, reg, reg, val, t2, t0);
1922 } else {
1923 tcg_gen_add_i64(t0, reg, val);
1924 t2 = tcg_constant_i64(-1);
1925 tcg_gen_movcond_i64(TCG_COND_LTU, reg, t0, reg, t2, t0);
1927 } else {
1928 TCGv_i64 t1 = tcg_temp_new_i64();
1929 if (d) {
1930 /* Detect signed overflow for subtraction. */
1931 tcg_gen_xor_i64(t0, reg, val);
1932 tcg_gen_sub_i64(t1, reg, val);
1933 tcg_gen_xor_i64(reg, reg, t1);
1934 tcg_gen_and_i64(t0, t0, reg);
1936 /* Bound the result. */
1937 tcg_gen_movi_i64(reg, INT64_MIN);
1938 t2 = tcg_constant_i64(0);
1939 tcg_gen_movcond_i64(TCG_COND_LT, reg, t0, t2, reg, t1);
1940 } else {
1941 /* Detect signed overflow for addition. */
1942 tcg_gen_xor_i64(t0, reg, val);
1943 tcg_gen_add_i64(reg, reg, val);
1944 tcg_gen_xor_i64(t1, reg, val);
1945 tcg_gen_andc_i64(t0, t1, t0);
1947 /* Bound the result. */
1948 tcg_gen_movi_i64(t1, INT64_MAX);
1949 t2 = tcg_constant_i64(0);
1950 tcg_gen_movcond_i64(TCG_COND_LT, reg, t0, t2, t1, reg);
1952 tcg_temp_free_i64(t1);
1954 tcg_temp_free_i64(t0);
1957 /* Similarly with a vector and a scalar operand. */
1958 static void do_sat_addsub_vec(DisasContext *s, int esz, int rd, int rn,
1959 TCGv_i64 val, bool u, bool d)
1961 unsigned vsz = vec_full_reg_size(s);
1962 TCGv_ptr dptr, nptr;
1963 TCGv_i32 t32, desc;
1964 TCGv_i64 t64;
1966 dptr = tcg_temp_new_ptr();
1967 nptr = tcg_temp_new_ptr();
1968 tcg_gen_addi_ptr(dptr, cpu_env, vec_full_reg_offset(s, rd));
1969 tcg_gen_addi_ptr(nptr, cpu_env, vec_full_reg_offset(s, rn));
1970 desc = tcg_constant_i32(simd_desc(vsz, vsz, 0));
1972 switch (esz) {
1973 case MO_8:
1974 t32 = tcg_temp_new_i32();
1975 tcg_gen_extrl_i64_i32(t32, val);
1976 if (d) {
1977 tcg_gen_neg_i32(t32, t32);
1979 if (u) {
1980 gen_helper_sve_uqaddi_b(dptr, nptr, t32, desc);
1981 } else {
1982 gen_helper_sve_sqaddi_b(dptr, nptr, t32, desc);
1984 tcg_temp_free_i32(t32);
1985 break;
1987 case MO_16:
1988 t32 = tcg_temp_new_i32();
1989 tcg_gen_extrl_i64_i32(t32, val);
1990 if (d) {
1991 tcg_gen_neg_i32(t32, t32);
1993 if (u) {
1994 gen_helper_sve_uqaddi_h(dptr, nptr, t32, desc);
1995 } else {
1996 gen_helper_sve_sqaddi_h(dptr, nptr, t32, desc);
1998 tcg_temp_free_i32(t32);
1999 break;
2001 case MO_32:
2002 t64 = tcg_temp_new_i64();
2003 if (d) {
2004 tcg_gen_neg_i64(t64, val);
2005 } else {
2006 tcg_gen_mov_i64(t64, val);
2008 if (u) {
2009 gen_helper_sve_uqaddi_s(dptr, nptr, t64, desc);
2010 } else {
2011 gen_helper_sve_sqaddi_s(dptr, nptr, t64, desc);
2013 tcg_temp_free_i64(t64);
2014 break;
2016 case MO_64:
2017 if (u) {
2018 if (d) {
2019 gen_helper_sve_uqsubi_d(dptr, nptr, val, desc);
2020 } else {
2021 gen_helper_sve_uqaddi_d(dptr, nptr, val, desc);
2023 } else if (d) {
2024 t64 = tcg_temp_new_i64();
2025 tcg_gen_neg_i64(t64, val);
2026 gen_helper_sve_sqaddi_d(dptr, nptr, t64, desc);
2027 tcg_temp_free_i64(t64);
2028 } else {
2029 gen_helper_sve_sqaddi_d(dptr, nptr, val, desc);
2031 break;
2033 default:
2034 g_assert_not_reached();
2037 tcg_temp_free_ptr(dptr);
2038 tcg_temp_free_ptr(nptr);
2041 static bool trans_CNT_r(DisasContext *s, arg_CNT_r *a)
2043 if (!dc_isar_feature(aa64_sve, s)) {
2044 return false;
2046 if (sve_access_check(s)) {
2047 unsigned fullsz = vec_full_reg_size(s);
2048 unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz);
2049 tcg_gen_movi_i64(cpu_reg(s, a->rd), numelem * a->imm);
2051 return true;
2054 static bool trans_INCDEC_r(DisasContext *s, arg_incdec_cnt *a)
2056 if (!dc_isar_feature(aa64_sve, s)) {
2057 return false;
2059 if (sve_access_check(s)) {
2060 unsigned fullsz = vec_full_reg_size(s);
2061 unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz);
2062 int inc = numelem * a->imm * (a->d ? -1 : 1);
2063 TCGv_i64 reg = cpu_reg(s, a->rd);
2065 tcg_gen_addi_i64(reg, reg, inc);
2067 return true;
2070 static bool trans_SINCDEC_r_32(DisasContext *s, arg_incdec_cnt *a)
2072 if (!dc_isar_feature(aa64_sve, s)) {
2073 return false;
2075 if (!sve_access_check(s)) {
2076 return true;
2079 unsigned fullsz = vec_full_reg_size(s);
2080 unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz);
2081 int inc = numelem * a->imm;
2082 TCGv_i64 reg = cpu_reg(s, a->rd);
2084 /* Use normal 64-bit arithmetic to detect 32-bit overflow. */
2085 if (inc == 0) {
2086 if (a->u) {
2087 tcg_gen_ext32u_i64(reg, reg);
2088 } else {
2089 tcg_gen_ext32s_i64(reg, reg);
2091 } else {
2092 do_sat_addsub_32(reg, tcg_constant_i64(inc), a->u, a->d);
2094 return true;
2097 static bool trans_SINCDEC_r_64(DisasContext *s, arg_incdec_cnt *a)
2099 if (!dc_isar_feature(aa64_sve, s)) {
2100 return false;
2102 if (!sve_access_check(s)) {
2103 return true;
2106 unsigned fullsz = vec_full_reg_size(s);
2107 unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz);
2108 int inc = numelem * a->imm;
2109 TCGv_i64 reg = cpu_reg(s, a->rd);
2111 if (inc != 0) {
2112 do_sat_addsub_64(reg, tcg_constant_i64(inc), a->u, a->d);
2114 return true;
2117 static bool trans_INCDEC_v(DisasContext *s, arg_incdec2_cnt *a)
2119 if (a->esz == 0 || !dc_isar_feature(aa64_sve, s)) {
2120 return false;
2123 unsigned fullsz = vec_full_reg_size(s);
2124 unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz);
2125 int inc = numelem * a->imm;
2127 if (inc != 0) {
2128 if (sve_access_check(s)) {
2129 tcg_gen_gvec_adds(a->esz, vec_full_reg_offset(s, a->rd),
2130 vec_full_reg_offset(s, a->rn),
2131 tcg_constant_i64(a->d ? -inc : inc),
2132 fullsz, fullsz);
2134 } else {
2135 do_mov_z(s, a->rd, a->rn);
2137 return true;
2140 static bool trans_SINCDEC_v(DisasContext *s, arg_incdec2_cnt *a)
2142 if (a->esz == 0 || !dc_isar_feature(aa64_sve, s)) {
2143 return false;
2146 unsigned fullsz = vec_full_reg_size(s);
2147 unsigned numelem = decode_pred_count(fullsz, a->pat, a->esz);
2148 int inc = numelem * a->imm;
2150 if (inc != 0) {
2151 if (sve_access_check(s)) {
2152 do_sat_addsub_vec(s, a->esz, a->rd, a->rn,
2153 tcg_constant_i64(inc), a->u, a->d);
2155 } else {
2156 do_mov_z(s, a->rd, a->rn);
2158 return true;
2162 *** SVE Bitwise Immediate Group
2165 static bool do_zz_dbm(DisasContext *s, arg_rr_dbm *a, GVecGen2iFn *gvec_fn)
2167 uint64_t imm;
2168 if (!logic_imm_decode_wmask(&imm, extract32(a->dbm, 12, 1),
2169 extract32(a->dbm, 0, 6),
2170 extract32(a->dbm, 6, 6))) {
2171 return false;
2173 return gen_gvec_fn_zzi(s, gvec_fn, MO_64, a->rd, a->rn, imm);
2176 TRANS_FEAT(AND_zzi, aa64_sve, do_zz_dbm, a, tcg_gen_gvec_andi)
2177 TRANS_FEAT(ORR_zzi, aa64_sve, do_zz_dbm, a, tcg_gen_gvec_ori)
2178 TRANS_FEAT(EOR_zzi, aa64_sve, do_zz_dbm, a, tcg_gen_gvec_xori)
2180 static bool trans_DUPM(DisasContext *s, arg_DUPM *a)
2182 uint64_t imm;
2184 if (!dc_isar_feature(aa64_sve, s)) {
2185 return false;
2187 if (!logic_imm_decode_wmask(&imm, extract32(a->dbm, 12, 1),
2188 extract32(a->dbm, 0, 6),
2189 extract32(a->dbm, 6, 6))) {
2190 return false;
2192 if (sve_access_check(s)) {
2193 do_dupi_z(s, a->rd, imm);
2195 return true;
2199 *** SVE Integer Wide Immediate - Predicated Group
2202 /* Implement all merging copies. This is used for CPY (immediate),
2203 * FCPY, CPY (scalar), CPY (SIMD&FP scalar).
2205 static void do_cpy_m(DisasContext *s, int esz, int rd, int rn, int pg,
2206 TCGv_i64 val)
2208 typedef void gen_cpy(TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv_i32);
2209 static gen_cpy * const fns[4] = {
2210 gen_helper_sve_cpy_m_b, gen_helper_sve_cpy_m_h,
2211 gen_helper_sve_cpy_m_s, gen_helper_sve_cpy_m_d,
2213 unsigned vsz = vec_full_reg_size(s);
2214 TCGv_i32 desc = tcg_constant_i32(simd_desc(vsz, vsz, 0));
2215 TCGv_ptr t_zd = tcg_temp_new_ptr();
2216 TCGv_ptr t_zn = tcg_temp_new_ptr();
2217 TCGv_ptr t_pg = tcg_temp_new_ptr();
2219 tcg_gen_addi_ptr(t_zd, cpu_env, vec_full_reg_offset(s, rd));
2220 tcg_gen_addi_ptr(t_zn, cpu_env, vec_full_reg_offset(s, rn));
2221 tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg));
2223 fns[esz](t_zd, t_zn, t_pg, val, desc);
2225 tcg_temp_free_ptr(t_zd);
2226 tcg_temp_free_ptr(t_zn);
2227 tcg_temp_free_ptr(t_pg);
2230 static bool trans_FCPY(DisasContext *s, arg_FCPY *a)
2232 if (a->esz == 0 || !dc_isar_feature(aa64_sve, s)) {
2233 return false;
2235 if (sve_access_check(s)) {
2236 /* Decode the VFP immediate. */
2237 uint64_t imm = vfp_expand_imm(a->esz, a->imm);
2238 do_cpy_m(s, a->esz, a->rd, a->rn, a->pg, tcg_constant_i64(imm));
2240 return true;
2243 static bool trans_CPY_m_i(DisasContext *s, arg_rpri_esz *a)
2245 if (!dc_isar_feature(aa64_sve, s)) {
2246 return false;
2248 if (sve_access_check(s)) {
2249 do_cpy_m(s, a->esz, a->rd, a->rn, a->pg, tcg_constant_i64(a->imm));
2251 return true;
2254 static bool trans_CPY_z_i(DisasContext *s, arg_CPY_z_i *a)
2256 static gen_helper_gvec_2i * const fns[4] = {
2257 gen_helper_sve_cpy_z_b, gen_helper_sve_cpy_z_h,
2258 gen_helper_sve_cpy_z_s, gen_helper_sve_cpy_z_d,
2261 if (!dc_isar_feature(aa64_sve, s)) {
2262 return false;
2264 if (sve_access_check(s)) {
2265 unsigned vsz = vec_full_reg_size(s);
2266 tcg_gen_gvec_2i_ool(vec_full_reg_offset(s, a->rd),
2267 pred_full_reg_offset(s, a->pg),
2268 tcg_constant_i64(a->imm),
2269 vsz, vsz, 0, fns[a->esz]);
2271 return true;
2275 *** SVE Permute Extract Group
2278 static bool do_EXT(DisasContext *s, int rd, int rn, int rm, int imm)
2280 if (!sve_access_check(s)) {
2281 return true;
2284 unsigned vsz = vec_full_reg_size(s);
2285 unsigned n_ofs = imm >= vsz ? 0 : imm;
2286 unsigned n_siz = vsz - n_ofs;
2287 unsigned d = vec_full_reg_offset(s, rd);
2288 unsigned n = vec_full_reg_offset(s, rn);
2289 unsigned m = vec_full_reg_offset(s, rm);
2291 /* Use host vector move insns if we have appropriate sizes
2292 * and no unfortunate overlap.
2294 if (m != d
2295 && n_ofs == size_for_gvec(n_ofs)
2296 && n_siz == size_for_gvec(n_siz)
2297 && (d != n || n_siz <= n_ofs)) {
2298 tcg_gen_gvec_mov(0, d, n + n_ofs, n_siz, n_siz);
2299 if (n_ofs != 0) {
2300 tcg_gen_gvec_mov(0, d + n_siz, m, n_ofs, n_ofs);
2302 } else {
2303 tcg_gen_gvec_3_ool(d, n, m, vsz, vsz, n_ofs, gen_helper_sve_ext);
2305 return true;
2308 TRANS_FEAT(EXT, aa64_sve, do_EXT, a->rd, a->rn, a->rm, a->imm)
2309 TRANS_FEAT(EXT_sve2, aa64_sve2, do_EXT, a->rd, a->rn, (a->rn + 1) % 32, a->imm)
2312 *** SVE Permute - Unpredicated Group
2315 static bool trans_DUP_s(DisasContext *s, arg_DUP_s *a)
2317 if (!dc_isar_feature(aa64_sve, s)) {
2318 return false;
2320 if (sve_access_check(s)) {
2321 unsigned vsz = vec_full_reg_size(s);
2322 tcg_gen_gvec_dup_i64(a->esz, vec_full_reg_offset(s, a->rd),
2323 vsz, vsz, cpu_reg_sp(s, a->rn));
2325 return true;
2328 static bool trans_DUP_x(DisasContext *s, arg_DUP_x *a)
2330 if (!dc_isar_feature(aa64_sve, s)) {
2331 return false;
2333 if ((a->imm & 0x1f) == 0) {
2334 return false;
2336 if (sve_access_check(s)) {
2337 unsigned vsz = vec_full_reg_size(s);
2338 unsigned dofs = vec_full_reg_offset(s, a->rd);
2339 unsigned esz, index;
2341 esz = ctz32(a->imm);
2342 index = a->imm >> (esz + 1);
2344 if ((index << esz) < vsz) {
2345 unsigned nofs = vec_reg_offset(s, a->rn, index, esz);
2346 tcg_gen_gvec_dup_mem(esz, dofs, nofs, vsz, vsz);
2347 } else {
2349 * While dup_mem handles 128-bit elements, dup_imm does not.
2350 * Thankfully element size doesn't matter for splatting zero.
2352 tcg_gen_gvec_dup_imm(MO_64, dofs, vsz, vsz, 0);
2355 return true;
2358 static void do_insr_i64(DisasContext *s, arg_rrr_esz *a, TCGv_i64 val)
2360 typedef void gen_insr(TCGv_ptr, TCGv_ptr, TCGv_i64, TCGv_i32);
2361 static gen_insr * const fns[4] = {
2362 gen_helper_sve_insr_b, gen_helper_sve_insr_h,
2363 gen_helper_sve_insr_s, gen_helper_sve_insr_d,
2365 unsigned vsz = vec_full_reg_size(s);
2366 TCGv_i32 desc = tcg_constant_i32(simd_desc(vsz, vsz, 0));
2367 TCGv_ptr t_zd = tcg_temp_new_ptr();
2368 TCGv_ptr t_zn = tcg_temp_new_ptr();
2370 tcg_gen_addi_ptr(t_zd, cpu_env, vec_full_reg_offset(s, a->rd));
2371 tcg_gen_addi_ptr(t_zn, cpu_env, vec_full_reg_offset(s, a->rn));
2373 fns[a->esz](t_zd, t_zn, val, desc);
2375 tcg_temp_free_ptr(t_zd);
2376 tcg_temp_free_ptr(t_zn);
2379 static bool trans_INSR_f(DisasContext *s, arg_rrr_esz *a)
2381 if (!dc_isar_feature(aa64_sve, s)) {
2382 return false;
2384 if (sve_access_check(s)) {
2385 TCGv_i64 t = tcg_temp_new_i64();
2386 tcg_gen_ld_i64(t, cpu_env, vec_reg_offset(s, a->rm, 0, MO_64));
2387 do_insr_i64(s, a, t);
2388 tcg_temp_free_i64(t);
2390 return true;
2393 static bool trans_INSR_r(DisasContext *s, arg_rrr_esz *a)
2395 if (!dc_isar_feature(aa64_sve, s)) {
2396 return false;
2398 if (sve_access_check(s)) {
2399 do_insr_i64(s, a, cpu_reg(s, a->rm));
2401 return true;
2404 static gen_helper_gvec_2 * const rev_fns[4] = {
2405 gen_helper_sve_rev_b, gen_helper_sve_rev_h,
2406 gen_helper_sve_rev_s, gen_helper_sve_rev_d
2408 TRANS_FEAT(REV_v, aa64_sve, gen_gvec_ool_zz, rev_fns[a->esz], a->rd, a->rn, 0)
2410 static gen_helper_gvec_3 * const sve_tbl_fns[4] = {
2411 gen_helper_sve_tbl_b, gen_helper_sve_tbl_h,
2412 gen_helper_sve_tbl_s, gen_helper_sve_tbl_d
2414 TRANS_FEAT(TBL, aa64_sve, gen_gvec_ool_arg_zzz, sve_tbl_fns[a->esz], a, 0)
2416 static gen_helper_gvec_4 * const sve2_tbl_fns[4] = {
2417 gen_helper_sve2_tbl_b, gen_helper_sve2_tbl_h,
2418 gen_helper_sve2_tbl_s, gen_helper_sve2_tbl_d
2420 TRANS_FEAT(TBL_sve2, aa64_sve2, gen_gvec_ool_zzzz, sve2_tbl_fns[a->esz],
2421 a->rd, a->rn, (a->rn + 1) % 32, a->rm, 0)
2423 static gen_helper_gvec_3 * const tbx_fns[4] = {
2424 gen_helper_sve2_tbx_b, gen_helper_sve2_tbx_h,
2425 gen_helper_sve2_tbx_s, gen_helper_sve2_tbx_d
2427 TRANS_FEAT(TBX, aa64_sve2, gen_gvec_ool_arg_zzz, tbx_fns[a->esz], a, 0)
2429 static bool trans_UNPK(DisasContext *s, arg_UNPK *a)
2431 static gen_helper_gvec_2 * const fns[4][2] = {
2432 { NULL, NULL },
2433 { gen_helper_sve_sunpk_h, gen_helper_sve_uunpk_h },
2434 { gen_helper_sve_sunpk_s, gen_helper_sve_uunpk_s },
2435 { gen_helper_sve_sunpk_d, gen_helper_sve_uunpk_d },
2438 if (a->esz == 0 || !dc_isar_feature(aa64_sve, s)) {
2439 return false;
2441 if (sve_access_check(s)) {
2442 unsigned vsz = vec_full_reg_size(s);
2443 tcg_gen_gvec_2_ool(vec_full_reg_offset(s, a->rd),
2444 vec_full_reg_offset(s, a->rn)
2445 + (a->h ? vsz / 2 : 0),
2446 vsz, vsz, 0, fns[a->esz][a->u]);
2448 return true;
2452 *** SVE Permute - Predicates Group
2455 static bool do_perm_pred3(DisasContext *s, arg_rrr_esz *a, bool high_odd,
2456 gen_helper_gvec_3 *fn)
2458 if (!sve_access_check(s)) {
2459 return true;
2462 unsigned vsz = pred_full_reg_size(s);
2464 TCGv_ptr t_d = tcg_temp_new_ptr();
2465 TCGv_ptr t_n = tcg_temp_new_ptr();
2466 TCGv_ptr t_m = tcg_temp_new_ptr();
2467 uint32_t desc = 0;
2469 desc = FIELD_DP32(desc, PREDDESC, OPRSZ, vsz);
2470 desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz);
2471 desc = FIELD_DP32(desc, PREDDESC, DATA, high_odd);
2473 tcg_gen_addi_ptr(t_d, cpu_env, pred_full_reg_offset(s, a->rd));
2474 tcg_gen_addi_ptr(t_n, cpu_env, pred_full_reg_offset(s, a->rn));
2475 tcg_gen_addi_ptr(t_m, cpu_env, pred_full_reg_offset(s, a->rm));
2477 fn(t_d, t_n, t_m, tcg_constant_i32(desc));
2479 tcg_temp_free_ptr(t_d);
2480 tcg_temp_free_ptr(t_n);
2481 tcg_temp_free_ptr(t_m);
2482 return true;
2485 static bool do_perm_pred2(DisasContext *s, arg_rr_esz *a, bool high_odd,
2486 gen_helper_gvec_2 *fn)
2488 if (!sve_access_check(s)) {
2489 return true;
2492 unsigned vsz = pred_full_reg_size(s);
2493 TCGv_ptr t_d = tcg_temp_new_ptr();
2494 TCGv_ptr t_n = tcg_temp_new_ptr();
2495 uint32_t desc = 0;
2497 tcg_gen_addi_ptr(t_d, cpu_env, pred_full_reg_offset(s, a->rd));
2498 tcg_gen_addi_ptr(t_n, cpu_env, pred_full_reg_offset(s, a->rn));
2500 desc = FIELD_DP32(desc, PREDDESC, OPRSZ, vsz);
2501 desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz);
2502 desc = FIELD_DP32(desc, PREDDESC, DATA, high_odd);
2504 fn(t_d, t_n, tcg_constant_i32(desc));
2506 tcg_temp_free_ptr(t_d);
2507 tcg_temp_free_ptr(t_n);
2508 return true;
2511 TRANS_FEAT(ZIP1_p, aa64_sve, do_perm_pred3, a, 0, gen_helper_sve_zip_p)
2512 TRANS_FEAT(ZIP2_p, aa64_sve, do_perm_pred3, a, 1, gen_helper_sve_zip_p)
2513 TRANS_FEAT(UZP1_p, aa64_sve, do_perm_pred3, a, 0, gen_helper_sve_uzp_p)
2514 TRANS_FEAT(UZP2_p, aa64_sve, do_perm_pred3, a, 1, gen_helper_sve_uzp_p)
2515 TRANS_FEAT(TRN1_p, aa64_sve, do_perm_pred3, a, 0, gen_helper_sve_trn_p)
2516 TRANS_FEAT(TRN2_p, aa64_sve, do_perm_pred3, a, 1, gen_helper_sve_trn_p)
2518 TRANS_FEAT(REV_p, aa64_sve, do_perm_pred2, a, 0, gen_helper_sve_rev_p)
2519 TRANS_FEAT(PUNPKLO, aa64_sve, do_perm_pred2, a, 0, gen_helper_sve_punpk_p)
2520 TRANS_FEAT(PUNPKHI, aa64_sve, do_perm_pred2, a, 1, gen_helper_sve_punpk_p)
2523 *** SVE Permute - Interleaving Group
2526 static gen_helper_gvec_3 * const zip_fns[4] = {
2527 gen_helper_sve_zip_b, gen_helper_sve_zip_h,
2528 gen_helper_sve_zip_s, gen_helper_sve_zip_d,
2530 TRANS_FEAT(ZIP1_z, aa64_sve, gen_gvec_ool_arg_zzz,
2531 zip_fns[a->esz], a, 0)
2532 TRANS_FEAT(ZIP2_z, aa64_sve, gen_gvec_ool_arg_zzz,
2533 zip_fns[a->esz], a, vec_full_reg_size(s) / 2)
2535 TRANS_FEAT(ZIP1_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz,
2536 gen_helper_sve2_zip_q, a, 0)
2537 TRANS_FEAT(ZIP2_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz,
2538 gen_helper_sve2_zip_q, a,
2539 QEMU_ALIGN_DOWN(vec_full_reg_size(s), 32) / 2)
2541 static gen_helper_gvec_3 * const uzp_fns[4] = {
2542 gen_helper_sve_uzp_b, gen_helper_sve_uzp_h,
2543 gen_helper_sve_uzp_s, gen_helper_sve_uzp_d,
2546 TRANS_FEAT(UZP1_z, aa64_sve, gen_gvec_ool_arg_zzz,
2547 uzp_fns[a->esz], a, 0)
2548 TRANS_FEAT(UZP2_z, aa64_sve, gen_gvec_ool_arg_zzz,
2549 uzp_fns[a->esz], a, 1 << a->esz)
2551 TRANS_FEAT(UZP1_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz,
2552 gen_helper_sve2_uzp_q, a, 0)
2553 TRANS_FEAT(UZP2_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz,
2554 gen_helper_sve2_uzp_q, a, 16)
2556 static gen_helper_gvec_3 * const trn_fns[4] = {
2557 gen_helper_sve_trn_b, gen_helper_sve_trn_h,
2558 gen_helper_sve_trn_s, gen_helper_sve_trn_d,
2561 TRANS_FEAT(TRN1_z, aa64_sve, gen_gvec_ool_arg_zzz,
2562 trn_fns[a->esz], a, 0)
2563 TRANS_FEAT(TRN2_z, aa64_sve, gen_gvec_ool_arg_zzz,
2564 trn_fns[a->esz], a, 1 << a->esz)
2566 TRANS_FEAT(TRN1_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz,
2567 gen_helper_sve2_trn_q, a, 0)
2568 TRANS_FEAT(TRN2_q, aa64_sve_f64mm, gen_gvec_ool_arg_zzz,
2569 gen_helper_sve2_trn_q, a, 16)
2572 *** SVE Permute Vector - Predicated Group
2575 static gen_helper_gvec_3 * const compact_fns[4] = {
2576 NULL, NULL, gen_helper_sve_compact_s, gen_helper_sve_compact_d
2578 TRANS_FEAT_NONSTREAMING(COMPACT, aa64_sve, gen_gvec_ool_arg_zpz,
2579 compact_fns[a->esz], a, 0)
2581 /* Call the helper that computes the ARM LastActiveElement pseudocode
2582 * function, scaled by the element size. This includes the not found
2583 * indication; e.g. not found for esz=3 is -8.
2585 static void find_last_active(DisasContext *s, TCGv_i32 ret, int esz, int pg)
2587 /* Predicate sizes may be smaller and cannot use simd_desc. We cannot
2588 * round up, as we do elsewhere, because we need the exact size.
2590 TCGv_ptr t_p = tcg_temp_new_ptr();
2591 unsigned desc = 0;
2593 desc = FIELD_DP32(desc, PREDDESC, OPRSZ, pred_full_reg_size(s));
2594 desc = FIELD_DP32(desc, PREDDESC, ESZ, esz);
2596 tcg_gen_addi_ptr(t_p, cpu_env, pred_full_reg_offset(s, pg));
2598 gen_helper_sve_last_active_element(ret, t_p, tcg_constant_i32(desc));
2600 tcg_temp_free_ptr(t_p);
2603 /* Increment LAST to the offset of the next element in the vector,
2604 * wrapping around to 0.
2606 static void incr_last_active(DisasContext *s, TCGv_i32 last, int esz)
2608 unsigned vsz = vec_full_reg_size(s);
2610 tcg_gen_addi_i32(last, last, 1 << esz);
2611 if (is_power_of_2(vsz)) {
2612 tcg_gen_andi_i32(last, last, vsz - 1);
2613 } else {
2614 TCGv_i32 max = tcg_constant_i32(vsz);
2615 TCGv_i32 zero = tcg_constant_i32(0);
2616 tcg_gen_movcond_i32(TCG_COND_GEU, last, last, max, zero, last);
2620 /* If LAST < 0, set LAST to the offset of the last element in the vector. */
2621 static void wrap_last_active(DisasContext *s, TCGv_i32 last, int esz)
2623 unsigned vsz = vec_full_reg_size(s);
2625 if (is_power_of_2(vsz)) {
2626 tcg_gen_andi_i32(last, last, vsz - 1);
2627 } else {
2628 TCGv_i32 max = tcg_constant_i32(vsz - (1 << esz));
2629 TCGv_i32 zero = tcg_constant_i32(0);
2630 tcg_gen_movcond_i32(TCG_COND_LT, last, last, zero, max, last);
2634 /* Load an unsigned element of ESZ from BASE+OFS. */
2635 static TCGv_i64 load_esz(TCGv_ptr base, int ofs, int esz)
2637 TCGv_i64 r = tcg_temp_new_i64();
2639 switch (esz) {
2640 case 0:
2641 tcg_gen_ld8u_i64(r, base, ofs);
2642 break;
2643 case 1:
2644 tcg_gen_ld16u_i64(r, base, ofs);
2645 break;
2646 case 2:
2647 tcg_gen_ld32u_i64(r, base, ofs);
2648 break;
2649 case 3:
2650 tcg_gen_ld_i64(r, base, ofs);
2651 break;
2652 default:
2653 g_assert_not_reached();
2655 return r;
2658 /* Load an unsigned element of ESZ from RM[LAST]. */
2659 static TCGv_i64 load_last_active(DisasContext *s, TCGv_i32 last,
2660 int rm, int esz)
2662 TCGv_ptr p = tcg_temp_new_ptr();
2663 TCGv_i64 r;
2665 /* Convert offset into vector into offset into ENV.
2666 * The final adjustment for the vector register base
2667 * is added via constant offset to the load.
2669 #if HOST_BIG_ENDIAN
2670 /* Adjust for element ordering. See vec_reg_offset. */
2671 if (esz < 3) {
2672 tcg_gen_xori_i32(last, last, 8 - (1 << esz));
2674 #endif
2675 tcg_gen_ext_i32_ptr(p, last);
2676 tcg_gen_add_ptr(p, p, cpu_env);
2678 r = load_esz(p, vec_full_reg_offset(s, rm), esz);
2679 tcg_temp_free_ptr(p);
2681 return r;
2684 /* Compute CLAST for a Zreg. */
2685 static bool do_clast_vector(DisasContext *s, arg_rprr_esz *a, bool before)
2687 TCGv_i32 last;
2688 TCGLabel *over;
2689 TCGv_i64 ele;
2690 unsigned vsz, esz = a->esz;
2692 if (!sve_access_check(s)) {
2693 return true;
2696 last = tcg_temp_local_new_i32();
2697 over = gen_new_label();
2699 find_last_active(s, last, esz, a->pg);
2701 /* There is of course no movcond for a 2048-bit vector,
2702 * so we must branch over the actual store.
2704 tcg_gen_brcondi_i32(TCG_COND_LT, last, 0, over);
2706 if (!before) {
2707 incr_last_active(s, last, esz);
2710 ele = load_last_active(s, last, a->rm, esz);
2711 tcg_temp_free_i32(last);
2713 vsz = vec_full_reg_size(s);
2714 tcg_gen_gvec_dup_i64(esz, vec_full_reg_offset(s, a->rd), vsz, vsz, ele);
2715 tcg_temp_free_i64(ele);
2717 /* If this insn used MOVPRFX, we may need a second move. */
2718 if (a->rd != a->rn) {
2719 TCGLabel *done = gen_new_label();
2720 tcg_gen_br(done);
2722 gen_set_label(over);
2723 do_mov_z(s, a->rd, a->rn);
2725 gen_set_label(done);
2726 } else {
2727 gen_set_label(over);
2729 return true;
2732 TRANS_FEAT(CLASTA_z, aa64_sve, do_clast_vector, a, false)
2733 TRANS_FEAT(CLASTB_z, aa64_sve, do_clast_vector, a, true)
2735 /* Compute CLAST for a scalar. */
2736 static void do_clast_scalar(DisasContext *s, int esz, int pg, int rm,
2737 bool before, TCGv_i64 reg_val)
2739 TCGv_i32 last = tcg_temp_new_i32();
2740 TCGv_i64 ele, cmp;
2742 find_last_active(s, last, esz, pg);
2744 /* Extend the original value of last prior to incrementing. */
2745 cmp = tcg_temp_new_i64();
2746 tcg_gen_ext_i32_i64(cmp, last);
2748 if (!before) {
2749 incr_last_active(s, last, esz);
2752 /* The conceit here is that while last < 0 indicates not found, after
2753 * adjusting for cpu_env->vfp.zregs[rm], it is still a valid address
2754 * from which we can load garbage. We then discard the garbage with
2755 * a conditional move.
2757 ele = load_last_active(s, last, rm, esz);
2758 tcg_temp_free_i32(last);
2760 tcg_gen_movcond_i64(TCG_COND_GE, reg_val, cmp, tcg_constant_i64(0),
2761 ele, reg_val);
2763 tcg_temp_free_i64(cmp);
2764 tcg_temp_free_i64(ele);
2767 /* Compute CLAST for a Vreg. */
2768 static bool do_clast_fp(DisasContext *s, arg_rpr_esz *a, bool before)
2770 if (sve_access_check(s)) {
2771 int esz = a->esz;
2772 int ofs = vec_reg_offset(s, a->rd, 0, esz);
2773 TCGv_i64 reg = load_esz(cpu_env, ofs, esz);
2775 do_clast_scalar(s, esz, a->pg, a->rn, before, reg);
2776 write_fp_dreg(s, a->rd, reg);
2777 tcg_temp_free_i64(reg);
2779 return true;
2782 TRANS_FEAT(CLASTA_v, aa64_sve, do_clast_fp, a, false)
2783 TRANS_FEAT(CLASTB_v, aa64_sve, do_clast_fp, a, true)
2785 /* Compute CLAST for a Xreg. */
2786 static bool do_clast_general(DisasContext *s, arg_rpr_esz *a, bool before)
2788 TCGv_i64 reg;
2790 if (!sve_access_check(s)) {
2791 return true;
2794 reg = cpu_reg(s, a->rd);
2795 switch (a->esz) {
2796 case 0:
2797 tcg_gen_ext8u_i64(reg, reg);
2798 break;
2799 case 1:
2800 tcg_gen_ext16u_i64(reg, reg);
2801 break;
2802 case 2:
2803 tcg_gen_ext32u_i64(reg, reg);
2804 break;
2805 case 3:
2806 break;
2807 default:
2808 g_assert_not_reached();
2811 do_clast_scalar(s, a->esz, a->pg, a->rn, before, reg);
2812 return true;
2815 TRANS_FEAT(CLASTA_r, aa64_sve, do_clast_general, a, false)
2816 TRANS_FEAT(CLASTB_r, aa64_sve, do_clast_general, a, true)
2818 /* Compute LAST for a scalar. */
2819 static TCGv_i64 do_last_scalar(DisasContext *s, int esz,
2820 int pg, int rm, bool before)
2822 TCGv_i32 last = tcg_temp_new_i32();
2823 TCGv_i64 ret;
2825 find_last_active(s, last, esz, pg);
2826 if (before) {
2827 wrap_last_active(s, last, esz);
2828 } else {
2829 incr_last_active(s, last, esz);
2832 ret = load_last_active(s, last, rm, esz);
2833 tcg_temp_free_i32(last);
2834 return ret;
2837 /* Compute LAST for a Vreg. */
2838 static bool do_last_fp(DisasContext *s, arg_rpr_esz *a, bool before)
2840 if (sve_access_check(s)) {
2841 TCGv_i64 val = do_last_scalar(s, a->esz, a->pg, a->rn, before);
2842 write_fp_dreg(s, a->rd, val);
2843 tcg_temp_free_i64(val);
2845 return true;
2848 TRANS_FEAT(LASTA_v, aa64_sve, do_last_fp, a, false)
2849 TRANS_FEAT(LASTB_v, aa64_sve, do_last_fp, a, true)
2851 /* Compute LAST for a Xreg. */
2852 static bool do_last_general(DisasContext *s, arg_rpr_esz *a, bool before)
2854 if (sve_access_check(s)) {
2855 TCGv_i64 val = do_last_scalar(s, a->esz, a->pg, a->rn, before);
2856 tcg_gen_mov_i64(cpu_reg(s, a->rd), val);
2857 tcg_temp_free_i64(val);
2859 return true;
2862 TRANS_FEAT(LASTA_r, aa64_sve, do_last_general, a, false)
2863 TRANS_FEAT(LASTB_r, aa64_sve, do_last_general, a, true)
2865 static bool trans_CPY_m_r(DisasContext *s, arg_rpr_esz *a)
2867 if (!dc_isar_feature(aa64_sve, s)) {
2868 return false;
2870 if (sve_access_check(s)) {
2871 do_cpy_m(s, a->esz, a->rd, a->rd, a->pg, cpu_reg_sp(s, a->rn));
2873 return true;
2876 static bool trans_CPY_m_v(DisasContext *s, arg_rpr_esz *a)
2878 if (!dc_isar_feature(aa64_sve, s)) {
2879 return false;
2881 if (sve_access_check(s)) {
2882 int ofs = vec_reg_offset(s, a->rn, 0, a->esz);
2883 TCGv_i64 t = load_esz(cpu_env, ofs, a->esz);
2884 do_cpy_m(s, a->esz, a->rd, a->rd, a->pg, t);
2885 tcg_temp_free_i64(t);
2887 return true;
2890 static gen_helper_gvec_3 * const revb_fns[4] = {
2891 NULL, gen_helper_sve_revb_h,
2892 gen_helper_sve_revb_s, gen_helper_sve_revb_d,
2894 TRANS_FEAT(REVB, aa64_sve, gen_gvec_ool_arg_zpz, revb_fns[a->esz], a, 0)
2896 static gen_helper_gvec_3 * const revh_fns[4] = {
2897 NULL, NULL, gen_helper_sve_revh_s, gen_helper_sve_revh_d,
2899 TRANS_FEAT(REVH, aa64_sve, gen_gvec_ool_arg_zpz, revh_fns[a->esz], a, 0)
2901 TRANS_FEAT(REVW, aa64_sve, gen_gvec_ool_arg_zpz,
2902 a->esz == 3 ? gen_helper_sve_revw_d : NULL, a, 0)
2904 TRANS_FEAT(REVD, aa64_sme, gen_gvec_ool_arg_zpz, gen_helper_sme_revd_q, a, 0)
2906 TRANS_FEAT(SPLICE, aa64_sve, gen_gvec_ool_arg_zpzz,
2907 gen_helper_sve_splice, a, a->esz)
2909 TRANS_FEAT(SPLICE_sve2, aa64_sve2, gen_gvec_ool_zzzp, gen_helper_sve_splice,
2910 a->rd, a->rn, (a->rn + 1) % 32, a->pg, a->esz)
2913 *** SVE Integer Compare - Vectors Group
2916 static bool do_ppzz_flags(DisasContext *s, arg_rprr_esz *a,
2917 gen_helper_gvec_flags_4 *gen_fn)
2919 TCGv_ptr pd, zn, zm, pg;
2920 unsigned vsz;
2921 TCGv_i32 t;
2923 if (gen_fn == NULL) {
2924 return false;
2926 if (!sve_access_check(s)) {
2927 return true;
2930 vsz = vec_full_reg_size(s);
2931 t = tcg_temp_new_i32();
2932 pd = tcg_temp_new_ptr();
2933 zn = tcg_temp_new_ptr();
2934 zm = tcg_temp_new_ptr();
2935 pg = tcg_temp_new_ptr();
2937 tcg_gen_addi_ptr(pd, cpu_env, pred_full_reg_offset(s, a->rd));
2938 tcg_gen_addi_ptr(zn, cpu_env, vec_full_reg_offset(s, a->rn));
2939 tcg_gen_addi_ptr(zm, cpu_env, vec_full_reg_offset(s, a->rm));
2940 tcg_gen_addi_ptr(pg, cpu_env, pred_full_reg_offset(s, a->pg));
2942 gen_fn(t, pd, zn, zm, pg, tcg_constant_i32(simd_desc(vsz, vsz, 0)));
2944 tcg_temp_free_ptr(pd);
2945 tcg_temp_free_ptr(zn);
2946 tcg_temp_free_ptr(zm);
2947 tcg_temp_free_ptr(pg);
2949 do_pred_flags(t);
2951 tcg_temp_free_i32(t);
2952 return true;
2955 #define DO_PPZZ(NAME, name) \
2956 static gen_helper_gvec_flags_4 * const name##_ppzz_fns[4] = { \
2957 gen_helper_sve_##name##_ppzz_b, gen_helper_sve_##name##_ppzz_h, \
2958 gen_helper_sve_##name##_ppzz_s, gen_helper_sve_##name##_ppzz_d, \
2959 }; \
2960 TRANS_FEAT(NAME##_ppzz, aa64_sve, do_ppzz_flags, \
2961 a, name##_ppzz_fns[a->esz])
2963 DO_PPZZ(CMPEQ, cmpeq)
2964 DO_PPZZ(CMPNE, cmpne)
2965 DO_PPZZ(CMPGT, cmpgt)
2966 DO_PPZZ(CMPGE, cmpge)
2967 DO_PPZZ(CMPHI, cmphi)
2968 DO_PPZZ(CMPHS, cmphs)
2970 #undef DO_PPZZ
2972 #define DO_PPZW(NAME, name) \
2973 static gen_helper_gvec_flags_4 * const name##_ppzw_fns[4] = { \
2974 gen_helper_sve_##name##_ppzw_b, gen_helper_sve_##name##_ppzw_h, \
2975 gen_helper_sve_##name##_ppzw_s, NULL \
2976 }; \
2977 TRANS_FEAT(NAME##_ppzw, aa64_sve, do_ppzz_flags, \
2978 a, name##_ppzw_fns[a->esz])
2980 DO_PPZW(CMPEQ, cmpeq)
2981 DO_PPZW(CMPNE, cmpne)
2982 DO_PPZW(CMPGT, cmpgt)
2983 DO_PPZW(CMPGE, cmpge)
2984 DO_PPZW(CMPHI, cmphi)
2985 DO_PPZW(CMPHS, cmphs)
2986 DO_PPZW(CMPLT, cmplt)
2987 DO_PPZW(CMPLE, cmple)
2988 DO_PPZW(CMPLO, cmplo)
2989 DO_PPZW(CMPLS, cmpls)
2991 #undef DO_PPZW
2994 *** SVE Integer Compare - Immediate Groups
2997 static bool do_ppzi_flags(DisasContext *s, arg_rpri_esz *a,
2998 gen_helper_gvec_flags_3 *gen_fn)
3000 TCGv_ptr pd, zn, pg;
3001 unsigned vsz;
3002 TCGv_i32 t;
3004 if (gen_fn == NULL) {
3005 return false;
3007 if (!sve_access_check(s)) {
3008 return true;
3011 vsz = vec_full_reg_size(s);
3012 t = tcg_temp_new_i32();
3013 pd = tcg_temp_new_ptr();
3014 zn = tcg_temp_new_ptr();
3015 pg = tcg_temp_new_ptr();
3017 tcg_gen_addi_ptr(pd, cpu_env, pred_full_reg_offset(s, a->rd));
3018 tcg_gen_addi_ptr(zn, cpu_env, vec_full_reg_offset(s, a->rn));
3019 tcg_gen_addi_ptr(pg, cpu_env, pred_full_reg_offset(s, a->pg));
3021 gen_fn(t, pd, zn, pg, tcg_constant_i32(simd_desc(vsz, vsz, a->imm)));
3023 tcg_temp_free_ptr(pd);
3024 tcg_temp_free_ptr(zn);
3025 tcg_temp_free_ptr(pg);
3027 do_pred_flags(t);
3029 tcg_temp_free_i32(t);
3030 return true;
3033 #define DO_PPZI(NAME, name) \
3034 static gen_helper_gvec_flags_3 * const name##_ppzi_fns[4] = { \
3035 gen_helper_sve_##name##_ppzi_b, gen_helper_sve_##name##_ppzi_h, \
3036 gen_helper_sve_##name##_ppzi_s, gen_helper_sve_##name##_ppzi_d, \
3037 }; \
3038 TRANS_FEAT(NAME##_ppzi, aa64_sve, do_ppzi_flags, a, \
3039 name##_ppzi_fns[a->esz])
3041 DO_PPZI(CMPEQ, cmpeq)
3042 DO_PPZI(CMPNE, cmpne)
3043 DO_PPZI(CMPGT, cmpgt)
3044 DO_PPZI(CMPGE, cmpge)
3045 DO_PPZI(CMPHI, cmphi)
3046 DO_PPZI(CMPHS, cmphs)
3047 DO_PPZI(CMPLT, cmplt)
3048 DO_PPZI(CMPLE, cmple)
3049 DO_PPZI(CMPLO, cmplo)
3050 DO_PPZI(CMPLS, cmpls)
3052 #undef DO_PPZI
3055 *** SVE Partition Break Group
3058 static bool do_brk3(DisasContext *s, arg_rprr_s *a,
3059 gen_helper_gvec_4 *fn, gen_helper_gvec_flags_4 *fn_s)
3061 if (!sve_access_check(s)) {
3062 return true;
3065 unsigned vsz = pred_full_reg_size(s);
3067 /* Predicate sizes may be smaller and cannot use simd_desc. */
3068 TCGv_ptr d = tcg_temp_new_ptr();
3069 TCGv_ptr n = tcg_temp_new_ptr();
3070 TCGv_ptr m = tcg_temp_new_ptr();
3071 TCGv_ptr g = tcg_temp_new_ptr();
3072 TCGv_i32 desc = tcg_constant_i32(FIELD_DP32(0, PREDDESC, OPRSZ, vsz));
3074 tcg_gen_addi_ptr(d, cpu_env, pred_full_reg_offset(s, a->rd));
3075 tcg_gen_addi_ptr(n, cpu_env, pred_full_reg_offset(s, a->rn));
3076 tcg_gen_addi_ptr(m, cpu_env, pred_full_reg_offset(s, a->rm));
3077 tcg_gen_addi_ptr(g, cpu_env, pred_full_reg_offset(s, a->pg));
3079 if (a->s) {
3080 TCGv_i32 t = tcg_temp_new_i32();
3081 fn_s(t, d, n, m, g, desc);
3082 do_pred_flags(t);
3083 tcg_temp_free_i32(t);
3084 } else {
3085 fn(d, n, m, g, desc);
3087 tcg_temp_free_ptr(d);
3088 tcg_temp_free_ptr(n);
3089 tcg_temp_free_ptr(m);
3090 tcg_temp_free_ptr(g);
3091 return true;
3094 static bool do_brk2(DisasContext *s, arg_rpr_s *a,
3095 gen_helper_gvec_3 *fn, gen_helper_gvec_flags_3 *fn_s)
3097 if (!sve_access_check(s)) {
3098 return true;
3101 unsigned vsz = pred_full_reg_size(s);
3103 /* Predicate sizes may be smaller and cannot use simd_desc. */
3104 TCGv_ptr d = tcg_temp_new_ptr();
3105 TCGv_ptr n = tcg_temp_new_ptr();
3106 TCGv_ptr g = tcg_temp_new_ptr();
3107 TCGv_i32 desc = tcg_constant_i32(FIELD_DP32(0, PREDDESC, OPRSZ, vsz));
3109 tcg_gen_addi_ptr(d, cpu_env, pred_full_reg_offset(s, a->rd));
3110 tcg_gen_addi_ptr(n, cpu_env, pred_full_reg_offset(s, a->rn));
3111 tcg_gen_addi_ptr(g, cpu_env, pred_full_reg_offset(s, a->pg));
3113 if (a->s) {
3114 TCGv_i32 t = tcg_temp_new_i32();
3115 fn_s(t, d, n, g, desc);
3116 do_pred_flags(t);
3117 tcg_temp_free_i32(t);
3118 } else {
3119 fn(d, n, g, desc);
3121 tcg_temp_free_ptr(d);
3122 tcg_temp_free_ptr(n);
3123 tcg_temp_free_ptr(g);
3124 return true;
3127 TRANS_FEAT(BRKPA, aa64_sve, do_brk3, a,
3128 gen_helper_sve_brkpa, gen_helper_sve_brkpas)
3129 TRANS_FEAT(BRKPB, aa64_sve, do_brk3, a,
3130 gen_helper_sve_brkpb, gen_helper_sve_brkpbs)
3132 TRANS_FEAT(BRKA_m, aa64_sve, do_brk2, a,
3133 gen_helper_sve_brka_m, gen_helper_sve_brkas_m)
3134 TRANS_FEAT(BRKB_m, aa64_sve, do_brk2, a,
3135 gen_helper_sve_brkb_m, gen_helper_sve_brkbs_m)
3137 TRANS_FEAT(BRKA_z, aa64_sve, do_brk2, a,
3138 gen_helper_sve_brka_z, gen_helper_sve_brkas_z)
3139 TRANS_FEAT(BRKB_z, aa64_sve, do_brk2, a,
3140 gen_helper_sve_brkb_z, gen_helper_sve_brkbs_z)
3142 TRANS_FEAT(BRKN, aa64_sve, do_brk2, a,
3143 gen_helper_sve_brkn, gen_helper_sve_brkns)
3146 *** SVE Predicate Count Group
3149 static void do_cntp(DisasContext *s, TCGv_i64 val, int esz, int pn, int pg)
3151 unsigned psz = pred_full_reg_size(s);
3153 if (psz <= 8) {
3154 uint64_t psz_mask;
3156 tcg_gen_ld_i64(val, cpu_env, pred_full_reg_offset(s, pn));
3157 if (pn != pg) {
3158 TCGv_i64 g = tcg_temp_new_i64();
3159 tcg_gen_ld_i64(g, cpu_env, pred_full_reg_offset(s, pg));
3160 tcg_gen_and_i64(val, val, g);
3161 tcg_temp_free_i64(g);
3164 /* Reduce the pred_esz_masks value simply to reduce the
3165 * size of the code generated here.
3167 psz_mask = MAKE_64BIT_MASK(0, psz * 8);
3168 tcg_gen_andi_i64(val, val, pred_esz_masks[esz] & psz_mask);
3170 tcg_gen_ctpop_i64(val, val);
3171 } else {
3172 TCGv_ptr t_pn = tcg_temp_new_ptr();
3173 TCGv_ptr t_pg = tcg_temp_new_ptr();
3174 unsigned desc = 0;
3176 desc = FIELD_DP32(desc, PREDDESC, OPRSZ, psz);
3177 desc = FIELD_DP32(desc, PREDDESC, ESZ, esz);
3179 tcg_gen_addi_ptr(t_pn, cpu_env, pred_full_reg_offset(s, pn));
3180 tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg));
3182 gen_helper_sve_cntp(val, t_pn, t_pg, tcg_constant_i32(desc));
3183 tcg_temp_free_ptr(t_pn);
3184 tcg_temp_free_ptr(t_pg);
3188 static bool trans_CNTP(DisasContext *s, arg_CNTP *a)
3190 if (!dc_isar_feature(aa64_sve, s)) {
3191 return false;
3193 if (sve_access_check(s)) {
3194 do_cntp(s, cpu_reg(s, a->rd), a->esz, a->rn, a->pg);
3196 return true;
3199 static bool trans_INCDECP_r(DisasContext *s, arg_incdec_pred *a)
3201 if (!dc_isar_feature(aa64_sve, s)) {
3202 return false;
3204 if (sve_access_check(s)) {
3205 TCGv_i64 reg = cpu_reg(s, a->rd);
3206 TCGv_i64 val = tcg_temp_new_i64();
3208 do_cntp(s, val, a->esz, a->pg, a->pg);
3209 if (a->d) {
3210 tcg_gen_sub_i64(reg, reg, val);
3211 } else {
3212 tcg_gen_add_i64(reg, reg, val);
3214 tcg_temp_free_i64(val);
3216 return true;
3219 static bool trans_INCDECP_z(DisasContext *s, arg_incdec2_pred *a)
3221 if (a->esz == 0 || !dc_isar_feature(aa64_sve, s)) {
3222 return false;
3224 if (sve_access_check(s)) {
3225 unsigned vsz = vec_full_reg_size(s);
3226 TCGv_i64 val = tcg_temp_new_i64();
3227 GVecGen2sFn *gvec_fn = a->d ? tcg_gen_gvec_subs : tcg_gen_gvec_adds;
3229 do_cntp(s, val, a->esz, a->pg, a->pg);
3230 gvec_fn(a->esz, vec_full_reg_offset(s, a->rd),
3231 vec_full_reg_offset(s, a->rn), val, vsz, vsz);
3233 return true;
3236 static bool trans_SINCDECP_r_32(DisasContext *s, arg_incdec_pred *a)
3238 if (!dc_isar_feature(aa64_sve, s)) {
3239 return false;
3241 if (sve_access_check(s)) {
3242 TCGv_i64 reg = cpu_reg(s, a->rd);
3243 TCGv_i64 val = tcg_temp_new_i64();
3245 do_cntp(s, val, a->esz, a->pg, a->pg);
3246 do_sat_addsub_32(reg, val, a->u, a->d);
3248 return true;
3251 static bool trans_SINCDECP_r_64(DisasContext *s, arg_incdec_pred *a)
3253 if (!dc_isar_feature(aa64_sve, s)) {
3254 return false;
3256 if (sve_access_check(s)) {
3257 TCGv_i64 reg = cpu_reg(s, a->rd);
3258 TCGv_i64 val = tcg_temp_new_i64();
3260 do_cntp(s, val, a->esz, a->pg, a->pg);
3261 do_sat_addsub_64(reg, val, a->u, a->d);
3263 return true;
3266 static bool trans_SINCDECP_z(DisasContext *s, arg_incdec2_pred *a)
3268 if (a->esz == 0 || !dc_isar_feature(aa64_sve, s)) {
3269 return false;
3271 if (sve_access_check(s)) {
3272 TCGv_i64 val = tcg_temp_new_i64();
3273 do_cntp(s, val, a->esz, a->pg, a->pg);
3274 do_sat_addsub_vec(s, a->esz, a->rd, a->rn, val, a->u, a->d);
3276 return true;
3280 *** SVE Integer Compare Scalars Group
3283 static bool trans_CTERM(DisasContext *s, arg_CTERM *a)
3285 if (!dc_isar_feature(aa64_sve, s)) {
3286 return false;
3288 if (!sve_access_check(s)) {
3289 return true;
3292 TCGCond cond = (a->ne ? TCG_COND_NE : TCG_COND_EQ);
3293 TCGv_i64 rn = read_cpu_reg(s, a->rn, a->sf);
3294 TCGv_i64 rm = read_cpu_reg(s, a->rm, a->sf);
3295 TCGv_i64 cmp = tcg_temp_new_i64();
3297 tcg_gen_setcond_i64(cond, cmp, rn, rm);
3298 tcg_gen_extrl_i64_i32(cpu_NF, cmp);
3299 tcg_temp_free_i64(cmp);
3301 /* VF = !NF & !CF. */
3302 tcg_gen_xori_i32(cpu_VF, cpu_NF, 1);
3303 tcg_gen_andc_i32(cpu_VF, cpu_VF, cpu_CF);
3305 /* Both NF and VF actually look at bit 31. */
3306 tcg_gen_neg_i32(cpu_NF, cpu_NF);
3307 tcg_gen_neg_i32(cpu_VF, cpu_VF);
3308 return true;
3311 static bool trans_WHILE(DisasContext *s, arg_WHILE *a)
3313 TCGv_i64 op0, op1, t0, t1, tmax;
3314 TCGv_i32 t2;
3315 TCGv_ptr ptr;
3316 unsigned vsz = vec_full_reg_size(s);
3317 unsigned desc = 0;
3318 TCGCond cond;
3319 uint64_t maxval;
3320 /* Note that GE/HS has a->eq == 0 and GT/HI has a->eq == 1. */
3321 bool eq = a->eq == a->lt;
3323 /* The greater-than conditions are all SVE2. */
3324 if (a->lt
3325 ? !dc_isar_feature(aa64_sve, s)
3326 : !dc_isar_feature(aa64_sve2, s)) {
3327 return false;
3329 if (!sve_access_check(s)) {
3330 return true;
3333 op0 = read_cpu_reg(s, a->rn, 1);
3334 op1 = read_cpu_reg(s, a->rm, 1);
3336 if (!a->sf) {
3337 if (a->u) {
3338 tcg_gen_ext32u_i64(op0, op0);
3339 tcg_gen_ext32u_i64(op1, op1);
3340 } else {
3341 tcg_gen_ext32s_i64(op0, op0);
3342 tcg_gen_ext32s_i64(op1, op1);
3346 /* For the helper, compress the different conditions into a computation
3347 * of how many iterations for which the condition is true.
3349 t0 = tcg_temp_new_i64();
3350 t1 = tcg_temp_new_i64();
3352 if (a->lt) {
3353 tcg_gen_sub_i64(t0, op1, op0);
3354 if (a->u) {
3355 maxval = a->sf ? UINT64_MAX : UINT32_MAX;
3356 cond = eq ? TCG_COND_LEU : TCG_COND_LTU;
3357 } else {
3358 maxval = a->sf ? INT64_MAX : INT32_MAX;
3359 cond = eq ? TCG_COND_LE : TCG_COND_LT;
3361 } else {
3362 tcg_gen_sub_i64(t0, op0, op1);
3363 if (a->u) {
3364 maxval = 0;
3365 cond = eq ? TCG_COND_GEU : TCG_COND_GTU;
3366 } else {
3367 maxval = a->sf ? INT64_MIN : INT32_MIN;
3368 cond = eq ? TCG_COND_GE : TCG_COND_GT;
3372 tmax = tcg_constant_i64(vsz >> a->esz);
3373 if (eq) {
3374 /* Equality means one more iteration. */
3375 tcg_gen_addi_i64(t0, t0, 1);
3378 * For the less-than while, if op1 is maxval (and the only time
3379 * the addition above could overflow), then we produce an all-true
3380 * predicate by setting the count to the vector length. This is
3381 * because the pseudocode is described as an increment + compare
3382 * loop, and the maximum integer would always compare true.
3383 * Similarly, the greater-than while has the same issue with the
3384 * minimum integer due to the decrement + compare loop.
3386 tcg_gen_movi_i64(t1, maxval);
3387 tcg_gen_movcond_i64(TCG_COND_EQ, t0, op1, t1, tmax, t0);
3390 /* Bound to the maximum. */
3391 tcg_gen_umin_i64(t0, t0, tmax);
3393 /* Set the count to zero if the condition is false. */
3394 tcg_gen_movi_i64(t1, 0);
3395 tcg_gen_movcond_i64(cond, t0, op0, op1, t0, t1);
3396 tcg_temp_free_i64(t1);
3398 /* Since we're bounded, pass as a 32-bit type. */
3399 t2 = tcg_temp_new_i32();
3400 tcg_gen_extrl_i64_i32(t2, t0);
3401 tcg_temp_free_i64(t0);
3403 /* Scale elements to bits. */
3404 tcg_gen_shli_i32(t2, t2, a->esz);
3406 desc = FIELD_DP32(desc, PREDDESC, OPRSZ, vsz / 8);
3407 desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz);
3409 ptr = tcg_temp_new_ptr();
3410 tcg_gen_addi_ptr(ptr, cpu_env, pred_full_reg_offset(s, a->rd));
3412 if (a->lt) {
3413 gen_helper_sve_whilel(t2, ptr, t2, tcg_constant_i32(desc));
3414 } else {
3415 gen_helper_sve_whileg(t2, ptr, t2, tcg_constant_i32(desc));
3417 do_pred_flags(t2);
3419 tcg_temp_free_ptr(ptr);
3420 tcg_temp_free_i32(t2);
3421 return true;
3424 static bool trans_WHILE_ptr(DisasContext *s, arg_WHILE_ptr *a)
3426 TCGv_i64 op0, op1, diff, t1, tmax;
3427 TCGv_i32 t2;
3428 TCGv_ptr ptr;
3429 unsigned vsz = vec_full_reg_size(s);
3430 unsigned desc = 0;
3432 if (!dc_isar_feature(aa64_sve2, s)) {
3433 return false;
3435 if (!sve_access_check(s)) {
3436 return true;
3439 op0 = read_cpu_reg(s, a->rn, 1);
3440 op1 = read_cpu_reg(s, a->rm, 1);
3442 tmax = tcg_constant_i64(vsz);
3443 diff = tcg_temp_new_i64();
3445 if (a->rw) {
3446 /* WHILERW */
3447 /* diff = abs(op1 - op0), noting that op0/1 are unsigned. */
3448 t1 = tcg_temp_new_i64();
3449 tcg_gen_sub_i64(diff, op0, op1);
3450 tcg_gen_sub_i64(t1, op1, op0);
3451 tcg_gen_movcond_i64(TCG_COND_GEU, diff, op0, op1, diff, t1);
3452 tcg_temp_free_i64(t1);
3453 /* Round down to a multiple of ESIZE. */
3454 tcg_gen_andi_i64(diff, diff, -1 << a->esz);
3455 /* If op1 == op0, diff == 0, and the condition is always true. */
3456 tcg_gen_movcond_i64(TCG_COND_EQ, diff, op0, op1, tmax, diff);
3457 } else {
3458 /* WHILEWR */
3459 tcg_gen_sub_i64(diff, op1, op0);
3460 /* Round down to a multiple of ESIZE. */
3461 tcg_gen_andi_i64(diff, diff, -1 << a->esz);
3462 /* If op0 >= op1, diff <= 0, the condition is always true. */
3463 tcg_gen_movcond_i64(TCG_COND_GEU, diff, op0, op1, tmax, diff);
3466 /* Bound to the maximum. */
3467 tcg_gen_umin_i64(diff, diff, tmax);
3469 /* Since we're bounded, pass as a 32-bit type. */
3470 t2 = tcg_temp_new_i32();
3471 tcg_gen_extrl_i64_i32(t2, diff);
3472 tcg_temp_free_i64(diff);
3474 desc = FIELD_DP32(desc, PREDDESC, OPRSZ, vsz / 8);
3475 desc = FIELD_DP32(desc, PREDDESC, ESZ, a->esz);
3477 ptr = tcg_temp_new_ptr();
3478 tcg_gen_addi_ptr(ptr, cpu_env, pred_full_reg_offset(s, a->rd));
3480 gen_helper_sve_whilel(t2, ptr, t2, tcg_constant_i32(desc));
3481 do_pred_flags(t2);
3483 tcg_temp_free_ptr(ptr);
3484 tcg_temp_free_i32(t2);
3485 return true;
3489 *** SVE Integer Wide Immediate - Unpredicated Group
3492 static bool trans_FDUP(DisasContext *s, arg_FDUP *a)
3494 if (a->esz == 0 || !dc_isar_feature(aa64_sve, s)) {
3495 return false;
3497 if (sve_access_check(s)) {
3498 unsigned vsz = vec_full_reg_size(s);
3499 int dofs = vec_full_reg_offset(s, a->rd);
3500 uint64_t imm;
3502 /* Decode the VFP immediate. */
3503 imm = vfp_expand_imm(a->esz, a->imm);
3504 tcg_gen_gvec_dup_imm(a->esz, dofs, vsz, vsz, imm);
3506 return true;
3509 static bool trans_DUP_i(DisasContext *s, arg_DUP_i *a)
3511 if (!dc_isar_feature(aa64_sve, s)) {
3512 return false;
3514 if (sve_access_check(s)) {
3515 unsigned vsz = vec_full_reg_size(s);
3516 int dofs = vec_full_reg_offset(s, a->rd);
3517 tcg_gen_gvec_dup_imm(a->esz, dofs, vsz, vsz, a->imm);
3519 return true;
3522 TRANS_FEAT(ADD_zzi, aa64_sve, gen_gvec_fn_arg_zzi, tcg_gen_gvec_addi, a)
3524 static bool trans_SUB_zzi(DisasContext *s, arg_rri_esz *a)
3526 a->imm = -a->imm;
3527 return trans_ADD_zzi(s, a);
3530 static bool trans_SUBR_zzi(DisasContext *s, arg_rri_esz *a)
3532 static const TCGOpcode vecop_list[] = { INDEX_op_sub_vec, 0 };
3533 static const GVecGen2s op[4] = {
3534 { .fni8 = tcg_gen_vec_sub8_i64,
3535 .fniv = tcg_gen_sub_vec,
3536 .fno = gen_helper_sve_subri_b,
3537 .opt_opc = vecop_list,
3538 .vece = MO_8,
3539 .scalar_first = true },
3540 { .fni8 = tcg_gen_vec_sub16_i64,
3541 .fniv = tcg_gen_sub_vec,
3542 .fno = gen_helper_sve_subri_h,
3543 .opt_opc = vecop_list,
3544 .vece = MO_16,
3545 .scalar_first = true },
3546 { .fni4 = tcg_gen_sub_i32,
3547 .fniv = tcg_gen_sub_vec,
3548 .fno = gen_helper_sve_subri_s,
3549 .opt_opc = vecop_list,
3550 .vece = MO_32,
3551 .scalar_first = true },
3552 { .fni8 = tcg_gen_sub_i64,
3553 .fniv = tcg_gen_sub_vec,
3554 .fno = gen_helper_sve_subri_d,
3555 .opt_opc = vecop_list,
3556 .prefer_i64 = TCG_TARGET_REG_BITS == 64,
3557 .vece = MO_64,
3558 .scalar_first = true }
3561 if (!dc_isar_feature(aa64_sve, s)) {
3562 return false;
3564 if (sve_access_check(s)) {
3565 unsigned vsz = vec_full_reg_size(s);
3566 tcg_gen_gvec_2s(vec_full_reg_offset(s, a->rd),
3567 vec_full_reg_offset(s, a->rn),
3568 vsz, vsz, tcg_constant_i64(a->imm), &op[a->esz]);
3570 return true;
3573 TRANS_FEAT(MUL_zzi, aa64_sve, gen_gvec_fn_arg_zzi, tcg_gen_gvec_muli, a)
3575 static bool do_zzi_sat(DisasContext *s, arg_rri_esz *a, bool u, bool d)
3577 if (sve_access_check(s)) {
3578 do_sat_addsub_vec(s, a->esz, a->rd, a->rn,
3579 tcg_constant_i64(a->imm), u, d);
3581 return true;
3584 TRANS_FEAT(SQADD_zzi, aa64_sve, do_zzi_sat, a, false, false)
3585 TRANS_FEAT(UQADD_zzi, aa64_sve, do_zzi_sat, a, true, false)
3586 TRANS_FEAT(SQSUB_zzi, aa64_sve, do_zzi_sat, a, false, true)
3587 TRANS_FEAT(UQSUB_zzi, aa64_sve, do_zzi_sat, a, true, true)
3589 static bool do_zzi_ool(DisasContext *s, arg_rri_esz *a, gen_helper_gvec_2i *fn)
3591 if (sve_access_check(s)) {
3592 unsigned vsz = vec_full_reg_size(s);
3593 tcg_gen_gvec_2i_ool(vec_full_reg_offset(s, a->rd),
3594 vec_full_reg_offset(s, a->rn),
3595 tcg_constant_i64(a->imm), vsz, vsz, 0, fn);
3597 return true;
3600 #define DO_ZZI(NAME, name) \
3601 static gen_helper_gvec_2i * const name##i_fns[4] = { \
3602 gen_helper_sve_##name##i_b, gen_helper_sve_##name##i_h, \
3603 gen_helper_sve_##name##i_s, gen_helper_sve_##name##i_d, \
3604 }; \
3605 TRANS_FEAT(NAME##_zzi, aa64_sve, do_zzi_ool, a, name##i_fns[a->esz])
3607 DO_ZZI(SMAX, smax)
3608 DO_ZZI(UMAX, umax)
3609 DO_ZZI(SMIN, smin)
3610 DO_ZZI(UMIN, umin)
3612 #undef DO_ZZI
3614 static gen_helper_gvec_4 * const dot_fns[2][2] = {
3615 { gen_helper_gvec_sdot_b, gen_helper_gvec_sdot_h },
3616 { gen_helper_gvec_udot_b, gen_helper_gvec_udot_h }
3618 TRANS_FEAT(DOT_zzzz, aa64_sve, gen_gvec_ool_zzzz,
3619 dot_fns[a->u][a->sz], a->rd, a->rn, a->rm, a->ra, 0)
3622 * SVE Multiply - Indexed
3625 TRANS_FEAT(SDOT_zzxw_s, aa64_sve, gen_gvec_ool_arg_zzxz,
3626 gen_helper_gvec_sdot_idx_b, a)
3627 TRANS_FEAT(SDOT_zzxw_d, aa64_sve, gen_gvec_ool_arg_zzxz,
3628 gen_helper_gvec_sdot_idx_h, a)
3629 TRANS_FEAT(UDOT_zzxw_s, aa64_sve, gen_gvec_ool_arg_zzxz,
3630 gen_helper_gvec_udot_idx_b, a)
3631 TRANS_FEAT(UDOT_zzxw_d, aa64_sve, gen_gvec_ool_arg_zzxz,
3632 gen_helper_gvec_udot_idx_h, a)
3634 TRANS_FEAT(SUDOT_zzxw_s, aa64_sve_i8mm, gen_gvec_ool_arg_zzxz,
3635 gen_helper_gvec_sudot_idx_b, a)
3636 TRANS_FEAT(USDOT_zzxw_s, aa64_sve_i8mm, gen_gvec_ool_arg_zzxz,
3637 gen_helper_gvec_usdot_idx_b, a)
3639 #define DO_SVE2_RRX(NAME, FUNC) \
3640 TRANS_FEAT(NAME, aa64_sve, gen_gvec_ool_zzz, FUNC, \
3641 a->rd, a->rn, a->rm, a->index)
3643 DO_SVE2_RRX(MUL_zzx_h, gen_helper_gvec_mul_idx_h)
3644 DO_SVE2_RRX(MUL_zzx_s, gen_helper_gvec_mul_idx_s)
3645 DO_SVE2_RRX(MUL_zzx_d, gen_helper_gvec_mul_idx_d)
3647 DO_SVE2_RRX(SQDMULH_zzx_h, gen_helper_sve2_sqdmulh_idx_h)
3648 DO_SVE2_RRX(SQDMULH_zzx_s, gen_helper_sve2_sqdmulh_idx_s)
3649 DO_SVE2_RRX(SQDMULH_zzx_d, gen_helper_sve2_sqdmulh_idx_d)
3651 DO_SVE2_RRX(SQRDMULH_zzx_h, gen_helper_sve2_sqrdmulh_idx_h)
3652 DO_SVE2_RRX(SQRDMULH_zzx_s, gen_helper_sve2_sqrdmulh_idx_s)
3653 DO_SVE2_RRX(SQRDMULH_zzx_d, gen_helper_sve2_sqrdmulh_idx_d)
3655 #undef DO_SVE2_RRX
3657 #define DO_SVE2_RRX_TB(NAME, FUNC, TOP) \
3658 TRANS_FEAT(NAME, aa64_sve, gen_gvec_ool_zzz, FUNC, \
3659 a->rd, a->rn, a->rm, (a->index << 1) | TOP)
3661 DO_SVE2_RRX_TB(SQDMULLB_zzx_s, gen_helper_sve2_sqdmull_idx_s, false)
3662 DO_SVE2_RRX_TB(SQDMULLB_zzx_d, gen_helper_sve2_sqdmull_idx_d, false)
3663 DO_SVE2_RRX_TB(SQDMULLT_zzx_s, gen_helper_sve2_sqdmull_idx_s, true)
3664 DO_SVE2_RRX_TB(SQDMULLT_zzx_d, gen_helper_sve2_sqdmull_idx_d, true)
3666 DO_SVE2_RRX_TB(SMULLB_zzx_s, gen_helper_sve2_smull_idx_s, false)
3667 DO_SVE2_RRX_TB(SMULLB_zzx_d, gen_helper_sve2_smull_idx_d, false)
3668 DO_SVE2_RRX_TB(SMULLT_zzx_s, gen_helper_sve2_smull_idx_s, true)
3669 DO_SVE2_RRX_TB(SMULLT_zzx_d, gen_helper_sve2_smull_idx_d, true)
3671 DO_SVE2_RRX_TB(UMULLB_zzx_s, gen_helper_sve2_umull_idx_s, false)
3672 DO_SVE2_RRX_TB(UMULLB_zzx_d, gen_helper_sve2_umull_idx_d, false)
3673 DO_SVE2_RRX_TB(UMULLT_zzx_s, gen_helper_sve2_umull_idx_s, true)
3674 DO_SVE2_RRX_TB(UMULLT_zzx_d, gen_helper_sve2_umull_idx_d, true)
3676 #undef DO_SVE2_RRX_TB
3678 #define DO_SVE2_RRXR(NAME, FUNC) \
3679 TRANS_FEAT(NAME, aa64_sve2, gen_gvec_ool_arg_zzxz, FUNC, a)
3681 DO_SVE2_RRXR(MLA_zzxz_h, gen_helper_gvec_mla_idx_h)
3682 DO_SVE2_RRXR(MLA_zzxz_s, gen_helper_gvec_mla_idx_s)
3683 DO_SVE2_RRXR(MLA_zzxz_d, gen_helper_gvec_mla_idx_d)
3685 DO_SVE2_RRXR(MLS_zzxz_h, gen_helper_gvec_mls_idx_h)
3686 DO_SVE2_RRXR(MLS_zzxz_s, gen_helper_gvec_mls_idx_s)
3687 DO_SVE2_RRXR(MLS_zzxz_d, gen_helper_gvec_mls_idx_d)
3689 DO_SVE2_RRXR(SQRDMLAH_zzxz_h, gen_helper_sve2_sqrdmlah_idx_h)
3690 DO_SVE2_RRXR(SQRDMLAH_zzxz_s, gen_helper_sve2_sqrdmlah_idx_s)
3691 DO_SVE2_RRXR(SQRDMLAH_zzxz_d, gen_helper_sve2_sqrdmlah_idx_d)
3693 DO_SVE2_RRXR(SQRDMLSH_zzxz_h, gen_helper_sve2_sqrdmlsh_idx_h)
3694 DO_SVE2_RRXR(SQRDMLSH_zzxz_s, gen_helper_sve2_sqrdmlsh_idx_s)
3695 DO_SVE2_RRXR(SQRDMLSH_zzxz_d, gen_helper_sve2_sqrdmlsh_idx_d)
3697 #undef DO_SVE2_RRXR
3699 #define DO_SVE2_RRXR_TB(NAME, FUNC, TOP) \
3700 TRANS_FEAT(NAME, aa64_sve2, gen_gvec_ool_zzzz, FUNC, \
3701 a->rd, a->rn, a->rm, a->ra, (a->index << 1) | TOP)
3703 DO_SVE2_RRXR_TB(SQDMLALB_zzxw_s, gen_helper_sve2_sqdmlal_idx_s, false)
3704 DO_SVE2_RRXR_TB(SQDMLALB_zzxw_d, gen_helper_sve2_sqdmlal_idx_d, false)
3705 DO_SVE2_RRXR_TB(SQDMLALT_zzxw_s, gen_helper_sve2_sqdmlal_idx_s, true)
3706 DO_SVE2_RRXR_TB(SQDMLALT_zzxw_d, gen_helper_sve2_sqdmlal_idx_d, true)
3708 DO_SVE2_RRXR_TB(SQDMLSLB_zzxw_s, gen_helper_sve2_sqdmlsl_idx_s, false)
3709 DO_SVE2_RRXR_TB(SQDMLSLB_zzxw_d, gen_helper_sve2_sqdmlsl_idx_d, false)
3710 DO_SVE2_RRXR_TB(SQDMLSLT_zzxw_s, gen_helper_sve2_sqdmlsl_idx_s, true)
3711 DO_SVE2_RRXR_TB(SQDMLSLT_zzxw_d, gen_helper_sve2_sqdmlsl_idx_d, true)
3713 DO_SVE2_RRXR_TB(SMLALB_zzxw_s, gen_helper_sve2_smlal_idx_s, false)
3714 DO_SVE2_RRXR_TB(SMLALB_zzxw_d, gen_helper_sve2_smlal_idx_d, false)
3715 DO_SVE2_RRXR_TB(SMLALT_zzxw_s, gen_helper_sve2_smlal_idx_s, true)
3716 DO_SVE2_RRXR_TB(SMLALT_zzxw_d, gen_helper_sve2_smlal_idx_d, true)
3718 DO_SVE2_RRXR_TB(UMLALB_zzxw_s, gen_helper_sve2_umlal_idx_s, false)
3719 DO_SVE2_RRXR_TB(UMLALB_zzxw_d, gen_helper_sve2_umlal_idx_d, false)
3720 DO_SVE2_RRXR_TB(UMLALT_zzxw_s, gen_helper_sve2_umlal_idx_s, true)
3721 DO_SVE2_RRXR_TB(UMLALT_zzxw_d, gen_helper_sve2_umlal_idx_d, true)
3723 DO_SVE2_RRXR_TB(SMLSLB_zzxw_s, gen_helper_sve2_smlsl_idx_s, false)
3724 DO_SVE2_RRXR_TB(SMLSLB_zzxw_d, gen_helper_sve2_smlsl_idx_d, false)
3725 DO_SVE2_RRXR_TB(SMLSLT_zzxw_s, gen_helper_sve2_smlsl_idx_s, true)
3726 DO_SVE2_RRXR_TB(SMLSLT_zzxw_d, gen_helper_sve2_smlsl_idx_d, true)
3728 DO_SVE2_RRXR_TB(UMLSLB_zzxw_s, gen_helper_sve2_umlsl_idx_s, false)
3729 DO_SVE2_RRXR_TB(UMLSLB_zzxw_d, gen_helper_sve2_umlsl_idx_d, false)
3730 DO_SVE2_RRXR_TB(UMLSLT_zzxw_s, gen_helper_sve2_umlsl_idx_s, true)
3731 DO_SVE2_RRXR_TB(UMLSLT_zzxw_d, gen_helper_sve2_umlsl_idx_d, true)
3733 #undef DO_SVE2_RRXR_TB
3735 #define DO_SVE2_RRXR_ROT(NAME, FUNC) \
3736 TRANS_FEAT(NAME, aa64_sve2, gen_gvec_ool_zzzz, FUNC, \
3737 a->rd, a->rn, a->rm, a->ra, (a->index << 2) | a->rot)
3739 DO_SVE2_RRXR_ROT(CMLA_zzxz_h, gen_helper_sve2_cmla_idx_h)
3740 DO_SVE2_RRXR_ROT(CMLA_zzxz_s, gen_helper_sve2_cmla_idx_s)
3742 DO_SVE2_RRXR_ROT(SQRDCMLAH_zzxz_h, gen_helper_sve2_sqrdcmlah_idx_h)
3743 DO_SVE2_RRXR_ROT(SQRDCMLAH_zzxz_s, gen_helper_sve2_sqrdcmlah_idx_s)
3745 DO_SVE2_RRXR_ROT(CDOT_zzxw_s, gen_helper_sve2_cdot_idx_s)
3746 DO_SVE2_RRXR_ROT(CDOT_zzxw_d, gen_helper_sve2_cdot_idx_d)
3748 #undef DO_SVE2_RRXR_ROT
3751 *** SVE Floating Point Multiply-Add Indexed Group
3754 static bool do_FMLA_zzxz(DisasContext *s, arg_rrxr_esz *a, bool sub)
3756 static gen_helper_gvec_4_ptr * const fns[4] = {
3757 NULL,
3758 gen_helper_gvec_fmla_idx_h,
3759 gen_helper_gvec_fmla_idx_s,
3760 gen_helper_gvec_fmla_idx_d,
3762 return gen_gvec_fpst_zzzz(s, fns[a->esz], a->rd, a->rn, a->rm, a->ra,
3763 (a->index << 1) | sub,
3764 a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
3767 TRANS_FEAT(FMLA_zzxz, aa64_sve, do_FMLA_zzxz, a, false)
3768 TRANS_FEAT(FMLS_zzxz, aa64_sve, do_FMLA_zzxz, a, true)
3771 *** SVE Floating Point Multiply Indexed Group
3774 static gen_helper_gvec_3_ptr * const fmul_idx_fns[4] = {
3775 NULL, gen_helper_gvec_fmul_idx_h,
3776 gen_helper_gvec_fmul_idx_s, gen_helper_gvec_fmul_idx_d,
3778 TRANS_FEAT(FMUL_zzx, aa64_sve, gen_gvec_fpst_zzz,
3779 fmul_idx_fns[a->esz], a->rd, a->rn, a->rm, a->index,
3780 a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR)
3783 *** SVE Floating Point Fast Reduction Group
3786 typedef void gen_helper_fp_reduce(TCGv_i64, TCGv_ptr, TCGv_ptr,
3787 TCGv_ptr, TCGv_i32);
3789 static bool do_reduce(DisasContext *s, arg_rpr_esz *a,
3790 gen_helper_fp_reduce *fn)
3792 unsigned vsz, p2vsz;
3793 TCGv_i32 t_desc;
3794 TCGv_ptr t_zn, t_pg, status;
3795 TCGv_i64 temp;
3797 if (fn == NULL) {
3798 return false;
3800 if (!sve_access_check(s)) {
3801 return true;
3804 vsz = vec_full_reg_size(s);
3805 p2vsz = pow2ceil(vsz);
3806 t_desc = tcg_constant_i32(simd_desc(vsz, vsz, p2vsz));
3807 temp = tcg_temp_new_i64();
3808 t_zn = tcg_temp_new_ptr();
3809 t_pg = tcg_temp_new_ptr();
3811 tcg_gen_addi_ptr(t_zn, cpu_env, vec_full_reg_offset(s, a->rn));
3812 tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, a->pg));
3813 status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
3815 fn(temp, t_zn, t_pg, status, t_desc);
3816 tcg_temp_free_ptr(t_zn);
3817 tcg_temp_free_ptr(t_pg);
3818 tcg_temp_free_ptr(status);
3820 write_fp_dreg(s, a->rd, temp);
3821 tcg_temp_free_i64(temp);
3822 return true;
3825 #define DO_VPZ(NAME, name) \
3826 static gen_helper_fp_reduce * const name##_fns[4] = { \
3827 NULL, gen_helper_sve_##name##_h, \
3828 gen_helper_sve_##name##_s, gen_helper_sve_##name##_d, \
3829 }; \
3830 TRANS_FEAT(NAME, aa64_sve, do_reduce, a, name##_fns[a->esz])
3832 DO_VPZ(FADDV, faddv)
3833 DO_VPZ(FMINNMV, fminnmv)
3834 DO_VPZ(FMAXNMV, fmaxnmv)
3835 DO_VPZ(FMINV, fminv)
3836 DO_VPZ(FMAXV, fmaxv)
3838 #undef DO_VPZ
3841 *** SVE Floating Point Unary Operations - Unpredicated Group
3844 static gen_helper_gvec_2_ptr * const frecpe_fns[] = {
3845 NULL, gen_helper_gvec_frecpe_h,
3846 gen_helper_gvec_frecpe_s, gen_helper_gvec_frecpe_d,
3848 TRANS_FEAT(FRECPE, aa64_sve, gen_gvec_fpst_arg_zz, frecpe_fns[a->esz], a, 0)
3850 static gen_helper_gvec_2_ptr * const frsqrte_fns[] = {
3851 NULL, gen_helper_gvec_frsqrte_h,
3852 gen_helper_gvec_frsqrte_s, gen_helper_gvec_frsqrte_d,
3854 TRANS_FEAT(FRSQRTE, aa64_sve, gen_gvec_fpst_arg_zz, frsqrte_fns[a->esz], a, 0)
3857 *** SVE Floating Point Compare with Zero Group
3860 static bool do_ppz_fp(DisasContext *s, arg_rpr_esz *a,
3861 gen_helper_gvec_3_ptr *fn)
3863 if (fn == NULL) {
3864 return false;
3866 if (sve_access_check(s)) {
3867 unsigned vsz = vec_full_reg_size(s);
3868 TCGv_ptr status =
3869 fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
3871 tcg_gen_gvec_3_ptr(pred_full_reg_offset(s, a->rd),
3872 vec_full_reg_offset(s, a->rn),
3873 pred_full_reg_offset(s, a->pg),
3874 status, vsz, vsz, 0, fn);
3875 tcg_temp_free_ptr(status);
3877 return true;
3880 #define DO_PPZ(NAME, name) \
3881 static gen_helper_gvec_3_ptr * const name##_fns[] = { \
3882 NULL, gen_helper_sve_##name##_h, \
3883 gen_helper_sve_##name##_s, gen_helper_sve_##name##_d, \
3884 }; \
3885 TRANS_FEAT(NAME, aa64_sve, do_ppz_fp, a, name##_fns[a->esz])
3887 DO_PPZ(FCMGE_ppz0, fcmge0)
3888 DO_PPZ(FCMGT_ppz0, fcmgt0)
3889 DO_PPZ(FCMLE_ppz0, fcmle0)
3890 DO_PPZ(FCMLT_ppz0, fcmlt0)
3891 DO_PPZ(FCMEQ_ppz0, fcmeq0)
3892 DO_PPZ(FCMNE_ppz0, fcmne0)
3894 #undef DO_PPZ
3897 *** SVE floating-point trig multiply-add coefficient
3900 static gen_helper_gvec_3_ptr * const ftmad_fns[4] = {
3901 NULL, gen_helper_sve_ftmad_h,
3902 gen_helper_sve_ftmad_s, gen_helper_sve_ftmad_d,
3904 TRANS_FEAT_NONSTREAMING(FTMAD, aa64_sve, gen_gvec_fpst_zzz,
3905 ftmad_fns[a->esz], a->rd, a->rn, a->rm, a->imm,
3906 a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR)
3909 *** SVE Floating Point Accumulating Reduction Group
3912 static bool trans_FADDA(DisasContext *s, arg_rprr_esz *a)
3914 typedef void fadda_fn(TCGv_i64, TCGv_i64, TCGv_ptr,
3915 TCGv_ptr, TCGv_ptr, TCGv_i32);
3916 static fadda_fn * const fns[3] = {
3917 gen_helper_sve_fadda_h,
3918 gen_helper_sve_fadda_s,
3919 gen_helper_sve_fadda_d,
3921 unsigned vsz = vec_full_reg_size(s);
3922 TCGv_ptr t_rm, t_pg, t_fpst;
3923 TCGv_i64 t_val;
3924 TCGv_i32 t_desc;
3926 if (a->esz == 0 || !dc_isar_feature(aa64_sve, s)) {
3927 return false;
3929 s->is_nonstreaming = true;
3930 if (!sve_access_check(s)) {
3931 return true;
3934 t_val = load_esz(cpu_env, vec_reg_offset(s, a->rn, 0, a->esz), a->esz);
3935 t_rm = tcg_temp_new_ptr();
3936 t_pg = tcg_temp_new_ptr();
3937 tcg_gen_addi_ptr(t_rm, cpu_env, vec_full_reg_offset(s, a->rm));
3938 tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, a->pg));
3939 t_fpst = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
3940 t_desc = tcg_constant_i32(simd_desc(vsz, vsz, 0));
3942 fns[a->esz - 1](t_val, t_val, t_rm, t_pg, t_fpst, t_desc);
3944 tcg_temp_free_ptr(t_fpst);
3945 tcg_temp_free_ptr(t_pg);
3946 tcg_temp_free_ptr(t_rm);
3948 write_fp_dreg(s, a->rd, t_val);
3949 tcg_temp_free_i64(t_val);
3950 return true;
3954 *** SVE Floating Point Arithmetic - Unpredicated Group
3957 #define DO_FP3(NAME, name) \
3958 static gen_helper_gvec_3_ptr * const name##_fns[4] = { \
3959 NULL, gen_helper_gvec_##name##_h, \
3960 gen_helper_gvec_##name##_s, gen_helper_gvec_##name##_d \
3961 }; \
3962 TRANS_FEAT(NAME, aa64_sve, gen_gvec_fpst_arg_zzz, name##_fns[a->esz], a, 0)
3964 DO_FP3(FADD_zzz, fadd)
3965 DO_FP3(FSUB_zzz, fsub)
3966 DO_FP3(FMUL_zzz, fmul)
3967 DO_FP3(FRECPS, recps)
3968 DO_FP3(FRSQRTS, rsqrts)
3970 #undef DO_FP3
3972 static gen_helper_gvec_3_ptr * const ftsmul_fns[4] = {
3973 NULL, gen_helper_gvec_ftsmul_h,
3974 gen_helper_gvec_ftsmul_s, gen_helper_gvec_ftsmul_d
3976 TRANS_FEAT_NONSTREAMING(FTSMUL, aa64_sve, gen_gvec_fpst_arg_zzz,
3977 ftsmul_fns[a->esz], a, 0)
3980 *** SVE Floating Point Arithmetic - Predicated Group
3983 #define DO_ZPZZ_FP(NAME, FEAT, name) \
3984 static gen_helper_gvec_4_ptr * const name##_zpzz_fns[4] = { \
3985 NULL, gen_helper_##name##_h, \
3986 gen_helper_##name##_s, gen_helper_##name##_d \
3987 }; \
3988 TRANS_FEAT(NAME, FEAT, gen_gvec_fpst_arg_zpzz, name##_zpzz_fns[a->esz], a)
3990 DO_ZPZZ_FP(FADD_zpzz, aa64_sve, sve_fadd)
3991 DO_ZPZZ_FP(FSUB_zpzz, aa64_sve, sve_fsub)
3992 DO_ZPZZ_FP(FMUL_zpzz, aa64_sve, sve_fmul)
3993 DO_ZPZZ_FP(FMIN_zpzz, aa64_sve, sve_fmin)
3994 DO_ZPZZ_FP(FMAX_zpzz, aa64_sve, sve_fmax)
3995 DO_ZPZZ_FP(FMINNM_zpzz, aa64_sve, sve_fminnum)
3996 DO_ZPZZ_FP(FMAXNM_zpzz, aa64_sve, sve_fmaxnum)
3997 DO_ZPZZ_FP(FABD, aa64_sve, sve_fabd)
3998 DO_ZPZZ_FP(FSCALE, aa64_sve, sve_fscalbn)
3999 DO_ZPZZ_FP(FDIV, aa64_sve, sve_fdiv)
4000 DO_ZPZZ_FP(FMULX, aa64_sve, sve_fmulx)
4002 typedef void gen_helper_sve_fp2scalar(TCGv_ptr, TCGv_ptr, TCGv_ptr,
4003 TCGv_i64, TCGv_ptr, TCGv_i32);
4005 static void do_fp_scalar(DisasContext *s, int zd, int zn, int pg, bool is_fp16,
4006 TCGv_i64 scalar, gen_helper_sve_fp2scalar *fn)
4008 unsigned vsz = vec_full_reg_size(s);
4009 TCGv_ptr t_zd, t_zn, t_pg, status;
4010 TCGv_i32 desc;
4012 t_zd = tcg_temp_new_ptr();
4013 t_zn = tcg_temp_new_ptr();
4014 t_pg = tcg_temp_new_ptr();
4015 tcg_gen_addi_ptr(t_zd, cpu_env, vec_full_reg_offset(s, zd));
4016 tcg_gen_addi_ptr(t_zn, cpu_env, vec_full_reg_offset(s, zn));
4017 tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg));
4019 status = fpstatus_ptr(is_fp16 ? FPST_FPCR_F16 : FPST_FPCR);
4020 desc = tcg_constant_i32(simd_desc(vsz, vsz, 0));
4021 fn(t_zd, t_zn, t_pg, scalar, status, desc);
4023 tcg_temp_free_ptr(status);
4024 tcg_temp_free_ptr(t_pg);
4025 tcg_temp_free_ptr(t_zn);
4026 tcg_temp_free_ptr(t_zd);
4029 static bool do_fp_imm(DisasContext *s, arg_rpri_esz *a, uint64_t imm,
4030 gen_helper_sve_fp2scalar *fn)
4032 if (fn == NULL) {
4033 return false;
4035 if (sve_access_check(s)) {
4036 do_fp_scalar(s, a->rd, a->rn, a->pg, a->esz == MO_16,
4037 tcg_constant_i64(imm), fn);
4039 return true;
4042 #define DO_FP_IMM(NAME, name, const0, const1) \
4043 static gen_helper_sve_fp2scalar * const name##_fns[4] = { \
4044 NULL, gen_helper_sve_##name##_h, \
4045 gen_helper_sve_##name##_s, \
4046 gen_helper_sve_##name##_d \
4047 }; \
4048 static uint64_t const name##_const[4][2] = { \
4049 { -1, -1 }, \
4050 { float16_##const0, float16_##const1 }, \
4051 { float32_##const0, float32_##const1 }, \
4052 { float64_##const0, float64_##const1 }, \
4053 }; \
4054 TRANS_FEAT(NAME##_zpzi, aa64_sve, do_fp_imm, a, \
4055 name##_const[a->esz][a->imm], name##_fns[a->esz])
4057 DO_FP_IMM(FADD, fadds, half, one)
4058 DO_FP_IMM(FSUB, fsubs, half, one)
4059 DO_FP_IMM(FMUL, fmuls, half, two)
4060 DO_FP_IMM(FSUBR, fsubrs, half, one)
4061 DO_FP_IMM(FMAXNM, fmaxnms, zero, one)
4062 DO_FP_IMM(FMINNM, fminnms, zero, one)
4063 DO_FP_IMM(FMAX, fmaxs, zero, one)
4064 DO_FP_IMM(FMIN, fmins, zero, one)
4066 #undef DO_FP_IMM
4068 static bool do_fp_cmp(DisasContext *s, arg_rprr_esz *a,
4069 gen_helper_gvec_4_ptr *fn)
4071 if (fn == NULL) {
4072 return false;
4074 if (sve_access_check(s)) {
4075 unsigned vsz = vec_full_reg_size(s);
4076 TCGv_ptr status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
4077 tcg_gen_gvec_4_ptr(pred_full_reg_offset(s, a->rd),
4078 vec_full_reg_offset(s, a->rn),
4079 vec_full_reg_offset(s, a->rm),
4080 pred_full_reg_offset(s, a->pg),
4081 status, vsz, vsz, 0, fn);
4082 tcg_temp_free_ptr(status);
4084 return true;
4087 #define DO_FPCMP(NAME, name) \
4088 static gen_helper_gvec_4_ptr * const name##_fns[4] = { \
4089 NULL, gen_helper_sve_##name##_h, \
4090 gen_helper_sve_##name##_s, gen_helper_sve_##name##_d \
4091 }; \
4092 TRANS_FEAT(NAME##_ppzz, aa64_sve, do_fp_cmp, a, name##_fns[a->esz])
4094 DO_FPCMP(FCMGE, fcmge)
4095 DO_FPCMP(FCMGT, fcmgt)
4096 DO_FPCMP(FCMEQ, fcmeq)
4097 DO_FPCMP(FCMNE, fcmne)
4098 DO_FPCMP(FCMUO, fcmuo)
4099 DO_FPCMP(FACGE, facge)
4100 DO_FPCMP(FACGT, facgt)
4102 #undef DO_FPCMP
4104 static gen_helper_gvec_4_ptr * const fcadd_fns[] = {
4105 NULL, gen_helper_sve_fcadd_h,
4106 gen_helper_sve_fcadd_s, gen_helper_sve_fcadd_d,
4108 TRANS_FEAT(FCADD, aa64_sve, gen_gvec_fpst_zzzp, fcadd_fns[a->esz],
4109 a->rd, a->rn, a->rm, a->pg, a->rot,
4110 a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR)
4112 #define DO_FMLA(NAME, name) \
4113 static gen_helper_gvec_5_ptr * const name##_fns[4] = { \
4114 NULL, gen_helper_sve_##name##_h, \
4115 gen_helper_sve_##name##_s, gen_helper_sve_##name##_d \
4116 }; \
4117 TRANS_FEAT(NAME, aa64_sve, gen_gvec_fpst_zzzzp, name##_fns[a->esz], \
4118 a->rd, a->rn, a->rm, a->ra, a->pg, 0, \
4119 a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR)
4121 DO_FMLA(FMLA_zpzzz, fmla_zpzzz)
4122 DO_FMLA(FMLS_zpzzz, fmls_zpzzz)
4123 DO_FMLA(FNMLA_zpzzz, fnmla_zpzzz)
4124 DO_FMLA(FNMLS_zpzzz, fnmls_zpzzz)
4126 #undef DO_FMLA
4128 static gen_helper_gvec_5_ptr * const fcmla_fns[4] = {
4129 NULL, gen_helper_sve_fcmla_zpzzz_h,
4130 gen_helper_sve_fcmla_zpzzz_s, gen_helper_sve_fcmla_zpzzz_d,
4132 TRANS_FEAT(FCMLA_zpzzz, aa64_sve, gen_gvec_fpst_zzzzp, fcmla_fns[a->esz],
4133 a->rd, a->rn, a->rm, a->ra, a->pg, a->rot,
4134 a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR)
4136 static gen_helper_gvec_4_ptr * const fcmla_idx_fns[4] = {
4137 NULL, gen_helper_gvec_fcmlah_idx, gen_helper_gvec_fcmlas_idx, NULL
4139 TRANS_FEAT(FCMLA_zzxz, aa64_sve, gen_gvec_fpst_zzzz, fcmla_idx_fns[a->esz],
4140 a->rd, a->rn, a->rm, a->ra, a->index * 4 + a->rot,
4141 a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR)
4144 *** SVE Floating Point Unary Operations Predicated Group
4147 TRANS_FEAT(FCVT_sh, aa64_sve, gen_gvec_fpst_arg_zpz,
4148 gen_helper_sve_fcvt_sh, a, 0, FPST_FPCR)
4149 TRANS_FEAT(FCVT_hs, aa64_sve, gen_gvec_fpst_arg_zpz,
4150 gen_helper_sve_fcvt_hs, a, 0, FPST_FPCR)
4152 TRANS_FEAT(BFCVT, aa64_sve_bf16, gen_gvec_fpst_arg_zpz,
4153 gen_helper_sve_bfcvt, a, 0, FPST_FPCR)
4155 TRANS_FEAT(FCVT_dh, aa64_sve, gen_gvec_fpst_arg_zpz,
4156 gen_helper_sve_fcvt_dh, a, 0, FPST_FPCR)
4157 TRANS_FEAT(FCVT_hd, aa64_sve, gen_gvec_fpst_arg_zpz,
4158 gen_helper_sve_fcvt_hd, a, 0, FPST_FPCR)
4159 TRANS_FEAT(FCVT_ds, aa64_sve, gen_gvec_fpst_arg_zpz,
4160 gen_helper_sve_fcvt_ds, a, 0, FPST_FPCR)
4161 TRANS_FEAT(FCVT_sd, aa64_sve, gen_gvec_fpst_arg_zpz,
4162 gen_helper_sve_fcvt_sd, a, 0, FPST_FPCR)
4164 TRANS_FEAT(FCVTZS_hh, aa64_sve, gen_gvec_fpst_arg_zpz,
4165 gen_helper_sve_fcvtzs_hh, a, 0, FPST_FPCR_F16)
4166 TRANS_FEAT(FCVTZU_hh, aa64_sve, gen_gvec_fpst_arg_zpz,
4167 gen_helper_sve_fcvtzu_hh, a, 0, FPST_FPCR_F16)
4168 TRANS_FEAT(FCVTZS_hs, aa64_sve, gen_gvec_fpst_arg_zpz,
4169 gen_helper_sve_fcvtzs_hs, a, 0, FPST_FPCR_F16)
4170 TRANS_FEAT(FCVTZU_hs, aa64_sve, gen_gvec_fpst_arg_zpz,
4171 gen_helper_sve_fcvtzu_hs, a, 0, FPST_FPCR_F16)
4172 TRANS_FEAT(FCVTZS_hd, aa64_sve, gen_gvec_fpst_arg_zpz,
4173 gen_helper_sve_fcvtzs_hd, a, 0, FPST_FPCR_F16)
4174 TRANS_FEAT(FCVTZU_hd, aa64_sve, gen_gvec_fpst_arg_zpz,
4175 gen_helper_sve_fcvtzu_hd, a, 0, FPST_FPCR_F16)
4177 TRANS_FEAT(FCVTZS_ss, aa64_sve, gen_gvec_fpst_arg_zpz,
4178 gen_helper_sve_fcvtzs_ss, a, 0, FPST_FPCR)
4179 TRANS_FEAT(FCVTZU_ss, aa64_sve, gen_gvec_fpst_arg_zpz,
4180 gen_helper_sve_fcvtzu_ss, a, 0, FPST_FPCR)
4181 TRANS_FEAT(FCVTZS_sd, aa64_sve, gen_gvec_fpst_arg_zpz,
4182 gen_helper_sve_fcvtzs_sd, a, 0, FPST_FPCR)
4183 TRANS_FEAT(FCVTZU_sd, aa64_sve, gen_gvec_fpst_arg_zpz,
4184 gen_helper_sve_fcvtzu_sd, a, 0, FPST_FPCR)
4185 TRANS_FEAT(FCVTZS_ds, aa64_sve, gen_gvec_fpst_arg_zpz,
4186 gen_helper_sve_fcvtzs_ds, a, 0, FPST_FPCR)
4187 TRANS_FEAT(FCVTZU_ds, aa64_sve, gen_gvec_fpst_arg_zpz,
4188 gen_helper_sve_fcvtzu_ds, a, 0, FPST_FPCR)
4190 TRANS_FEAT(FCVTZS_dd, aa64_sve, gen_gvec_fpst_arg_zpz,
4191 gen_helper_sve_fcvtzs_dd, a, 0, FPST_FPCR)
4192 TRANS_FEAT(FCVTZU_dd, aa64_sve, gen_gvec_fpst_arg_zpz,
4193 gen_helper_sve_fcvtzu_dd, a, 0, FPST_FPCR)
4195 static gen_helper_gvec_3_ptr * const frint_fns[] = {
4196 NULL,
4197 gen_helper_sve_frint_h,
4198 gen_helper_sve_frint_s,
4199 gen_helper_sve_frint_d
4201 TRANS_FEAT(FRINTI, aa64_sve, gen_gvec_fpst_arg_zpz, frint_fns[a->esz],
4202 a, 0, a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR)
4204 static gen_helper_gvec_3_ptr * const frintx_fns[] = {
4205 NULL,
4206 gen_helper_sve_frintx_h,
4207 gen_helper_sve_frintx_s,
4208 gen_helper_sve_frintx_d
4210 TRANS_FEAT(FRINTX, aa64_sve, gen_gvec_fpst_arg_zpz, frintx_fns[a->esz],
4211 a, 0, a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
4213 static bool do_frint_mode(DisasContext *s, arg_rpr_esz *a,
4214 int mode, gen_helper_gvec_3_ptr *fn)
4216 unsigned vsz;
4217 TCGv_i32 tmode;
4218 TCGv_ptr status;
4220 if (fn == NULL) {
4221 return false;
4223 if (!sve_access_check(s)) {
4224 return true;
4227 vsz = vec_full_reg_size(s);
4228 tmode = tcg_const_i32(mode);
4229 status = fpstatus_ptr(a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR);
4231 gen_helper_set_rmode(tmode, tmode, status);
4233 tcg_gen_gvec_3_ptr(vec_full_reg_offset(s, a->rd),
4234 vec_full_reg_offset(s, a->rn),
4235 pred_full_reg_offset(s, a->pg),
4236 status, vsz, vsz, 0, fn);
4238 gen_helper_set_rmode(tmode, tmode, status);
4239 tcg_temp_free_i32(tmode);
4240 tcg_temp_free_ptr(status);
4241 return true;
4244 TRANS_FEAT(FRINTN, aa64_sve, do_frint_mode, a,
4245 float_round_nearest_even, frint_fns[a->esz])
4246 TRANS_FEAT(FRINTP, aa64_sve, do_frint_mode, a,
4247 float_round_up, frint_fns[a->esz])
4248 TRANS_FEAT(FRINTM, aa64_sve, do_frint_mode, a,
4249 float_round_down, frint_fns[a->esz])
4250 TRANS_FEAT(FRINTZ, aa64_sve, do_frint_mode, a,
4251 float_round_to_zero, frint_fns[a->esz])
4252 TRANS_FEAT(FRINTA, aa64_sve, do_frint_mode, a,
4253 float_round_ties_away, frint_fns[a->esz])
4255 static gen_helper_gvec_3_ptr * const frecpx_fns[] = {
4256 NULL, gen_helper_sve_frecpx_h,
4257 gen_helper_sve_frecpx_s, gen_helper_sve_frecpx_d,
4259 TRANS_FEAT(FRECPX, aa64_sve, gen_gvec_fpst_arg_zpz, frecpx_fns[a->esz],
4260 a, 0, a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR)
4262 static gen_helper_gvec_3_ptr * const fsqrt_fns[] = {
4263 NULL, gen_helper_sve_fsqrt_h,
4264 gen_helper_sve_fsqrt_s, gen_helper_sve_fsqrt_d,
4266 TRANS_FEAT(FSQRT, aa64_sve, gen_gvec_fpst_arg_zpz, fsqrt_fns[a->esz],
4267 a, 0, a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR)
4269 TRANS_FEAT(SCVTF_hh, aa64_sve, gen_gvec_fpst_arg_zpz,
4270 gen_helper_sve_scvt_hh, a, 0, FPST_FPCR_F16)
4271 TRANS_FEAT(SCVTF_sh, aa64_sve, gen_gvec_fpst_arg_zpz,
4272 gen_helper_sve_scvt_sh, a, 0, FPST_FPCR_F16)
4273 TRANS_FEAT(SCVTF_dh, aa64_sve, gen_gvec_fpst_arg_zpz,
4274 gen_helper_sve_scvt_dh, a, 0, FPST_FPCR_F16)
4276 TRANS_FEAT(SCVTF_ss, aa64_sve, gen_gvec_fpst_arg_zpz,
4277 gen_helper_sve_scvt_ss, a, 0, FPST_FPCR)
4278 TRANS_FEAT(SCVTF_ds, aa64_sve, gen_gvec_fpst_arg_zpz,
4279 gen_helper_sve_scvt_ds, a, 0, FPST_FPCR)
4281 TRANS_FEAT(SCVTF_sd, aa64_sve, gen_gvec_fpst_arg_zpz,
4282 gen_helper_sve_scvt_sd, a, 0, FPST_FPCR)
4283 TRANS_FEAT(SCVTF_dd, aa64_sve, gen_gvec_fpst_arg_zpz,
4284 gen_helper_sve_scvt_dd, a, 0, FPST_FPCR)
4286 TRANS_FEAT(UCVTF_hh, aa64_sve, gen_gvec_fpst_arg_zpz,
4287 gen_helper_sve_ucvt_hh, a, 0, FPST_FPCR_F16)
4288 TRANS_FEAT(UCVTF_sh, aa64_sve, gen_gvec_fpst_arg_zpz,
4289 gen_helper_sve_ucvt_sh, a, 0, FPST_FPCR_F16)
4290 TRANS_FEAT(UCVTF_dh, aa64_sve, gen_gvec_fpst_arg_zpz,
4291 gen_helper_sve_ucvt_dh, a, 0, FPST_FPCR_F16)
4293 TRANS_FEAT(UCVTF_ss, aa64_sve, gen_gvec_fpst_arg_zpz,
4294 gen_helper_sve_ucvt_ss, a, 0, FPST_FPCR)
4295 TRANS_FEAT(UCVTF_ds, aa64_sve, gen_gvec_fpst_arg_zpz,
4296 gen_helper_sve_ucvt_ds, a, 0, FPST_FPCR)
4297 TRANS_FEAT(UCVTF_sd, aa64_sve, gen_gvec_fpst_arg_zpz,
4298 gen_helper_sve_ucvt_sd, a, 0, FPST_FPCR)
4300 TRANS_FEAT(UCVTF_dd, aa64_sve, gen_gvec_fpst_arg_zpz,
4301 gen_helper_sve_ucvt_dd, a, 0, FPST_FPCR)
4304 *** SVE Memory - 32-bit Gather and Unsized Contiguous Group
4307 /* Subroutine loading a vector register at VOFS of LEN bytes.
4308 * The load should begin at the address Rn + IMM.
4311 void gen_sve_ldr(DisasContext *s, TCGv_ptr base, int vofs,
4312 int len, int rn, int imm)
4314 int len_align = QEMU_ALIGN_DOWN(len, 8);
4315 int len_remain = len % 8;
4316 int nparts = len / 8 + ctpop8(len_remain);
4317 int midx = get_mem_index(s);
4318 TCGv_i64 dirty_addr, clean_addr, t0, t1;
4320 dirty_addr = tcg_temp_new_i64();
4321 tcg_gen_addi_i64(dirty_addr, cpu_reg_sp(s, rn), imm);
4322 clean_addr = gen_mte_checkN(s, dirty_addr, false, rn != 31, len);
4323 tcg_temp_free_i64(dirty_addr);
4326 * Note that unpredicated load/store of vector/predicate registers
4327 * are defined as a stream of bytes, which equates to little-endian
4328 * operations on larger quantities.
4329 * Attempt to keep code expansion to a minimum by limiting the
4330 * amount of unrolling done.
4332 if (nparts <= 4) {
4333 int i;
4335 t0 = tcg_temp_new_i64();
4336 for (i = 0; i < len_align; i += 8) {
4337 tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEUQ);
4338 tcg_gen_st_i64(t0, base, vofs + i);
4339 tcg_gen_addi_i64(clean_addr, clean_addr, 8);
4341 tcg_temp_free_i64(t0);
4342 } else {
4343 TCGLabel *loop = gen_new_label();
4344 TCGv_ptr tp, i = tcg_const_local_ptr(0);
4346 /* Copy the clean address into a local temp, live across the loop. */
4347 t0 = clean_addr;
4348 clean_addr = new_tmp_a64_local(s);
4349 tcg_gen_mov_i64(clean_addr, t0);
4351 if (base != cpu_env) {
4352 TCGv_ptr b = tcg_temp_local_new_ptr();
4353 tcg_gen_mov_ptr(b, base);
4354 base = b;
4357 gen_set_label(loop);
4359 t0 = tcg_temp_new_i64();
4360 tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEUQ);
4361 tcg_gen_addi_i64(clean_addr, clean_addr, 8);
4363 tp = tcg_temp_new_ptr();
4364 tcg_gen_add_ptr(tp, base, i);
4365 tcg_gen_addi_ptr(i, i, 8);
4366 tcg_gen_st_i64(t0, tp, vofs);
4367 tcg_temp_free_ptr(tp);
4368 tcg_temp_free_i64(t0);
4370 tcg_gen_brcondi_ptr(TCG_COND_LTU, i, len_align, loop);
4371 tcg_temp_free_ptr(i);
4373 if (base != cpu_env) {
4374 tcg_temp_free_ptr(base);
4375 assert(len_remain == 0);
4380 * Predicate register loads can be any multiple of 2.
4381 * Note that we still store the entire 64-bit unit into cpu_env.
4383 if (len_remain) {
4384 t0 = tcg_temp_new_i64();
4385 switch (len_remain) {
4386 case 2:
4387 case 4:
4388 case 8:
4389 tcg_gen_qemu_ld_i64(t0, clean_addr, midx,
4390 MO_LE | ctz32(len_remain));
4391 break;
4393 case 6:
4394 t1 = tcg_temp_new_i64();
4395 tcg_gen_qemu_ld_i64(t0, clean_addr, midx, MO_LEUL);
4396 tcg_gen_addi_i64(clean_addr, clean_addr, 4);
4397 tcg_gen_qemu_ld_i64(t1, clean_addr, midx, MO_LEUW);
4398 tcg_gen_deposit_i64(t0, t0, t1, 32, 32);
4399 tcg_temp_free_i64(t1);
4400 break;
4402 default:
4403 g_assert_not_reached();
4405 tcg_gen_st_i64(t0, base, vofs + len_align);
4406 tcg_temp_free_i64(t0);
4410 /* Similarly for stores. */
4411 void gen_sve_str(DisasContext *s, TCGv_ptr base, int vofs,
4412 int len, int rn, int imm)
4414 int len_align = QEMU_ALIGN_DOWN(len, 8);
4415 int len_remain = len % 8;
4416 int nparts = len / 8 + ctpop8(len_remain);
4417 int midx = get_mem_index(s);
4418 TCGv_i64 dirty_addr, clean_addr, t0;
4420 dirty_addr = tcg_temp_new_i64();
4421 tcg_gen_addi_i64(dirty_addr, cpu_reg_sp(s, rn), imm);
4422 clean_addr = gen_mte_checkN(s, dirty_addr, false, rn != 31, len);
4423 tcg_temp_free_i64(dirty_addr);
4425 /* Note that unpredicated load/store of vector/predicate registers
4426 * are defined as a stream of bytes, which equates to little-endian
4427 * operations on larger quantities. There is no nice way to force
4428 * a little-endian store for aarch64_be-linux-user out of line.
4430 * Attempt to keep code expansion to a minimum by limiting the
4431 * amount of unrolling done.
4433 if (nparts <= 4) {
4434 int i;
4436 t0 = tcg_temp_new_i64();
4437 for (i = 0; i < len_align; i += 8) {
4438 tcg_gen_ld_i64(t0, base, vofs + i);
4439 tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUQ);
4440 tcg_gen_addi_i64(clean_addr, clean_addr, 8);
4442 tcg_temp_free_i64(t0);
4443 } else {
4444 TCGLabel *loop = gen_new_label();
4445 TCGv_ptr tp, i = tcg_const_local_ptr(0);
4447 /* Copy the clean address into a local temp, live across the loop. */
4448 t0 = clean_addr;
4449 clean_addr = new_tmp_a64_local(s);
4450 tcg_gen_mov_i64(clean_addr, t0);
4452 if (base != cpu_env) {
4453 TCGv_ptr b = tcg_temp_local_new_ptr();
4454 tcg_gen_mov_ptr(b, base);
4455 base = b;
4458 gen_set_label(loop);
4460 t0 = tcg_temp_new_i64();
4461 tp = tcg_temp_new_ptr();
4462 tcg_gen_add_ptr(tp, base, i);
4463 tcg_gen_ld_i64(t0, tp, vofs);
4464 tcg_gen_addi_ptr(i, i, 8);
4465 tcg_temp_free_ptr(tp);
4467 tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUQ);
4468 tcg_gen_addi_i64(clean_addr, clean_addr, 8);
4469 tcg_temp_free_i64(t0);
4471 tcg_gen_brcondi_ptr(TCG_COND_LTU, i, len_align, loop);
4472 tcg_temp_free_ptr(i);
4474 if (base != cpu_env) {
4475 tcg_temp_free_ptr(base);
4476 assert(len_remain == 0);
4480 /* Predicate register stores can be any multiple of 2. */
4481 if (len_remain) {
4482 t0 = tcg_temp_new_i64();
4483 tcg_gen_ld_i64(t0, base, vofs + len_align);
4485 switch (len_remain) {
4486 case 2:
4487 case 4:
4488 case 8:
4489 tcg_gen_qemu_st_i64(t0, clean_addr, midx,
4490 MO_LE | ctz32(len_remain));
4491 break;
4493 case 6:
4494 tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUL);
4495 tcg_gen_addi_i64(clean_addr, clean_addr, 4);
4496 tcg_gen_shri_i64(t0, t0, 32);
4497 tcg_gen_qemu_st_i64(t0, clean_addr, midx, MO_LEUW);
4498 break;
4500 default:
4501 g_assert_not_reached();
4503 tcg_temp_free_i64(t0);
4507 static bool trans_LDR_zri(DisasContext *s, arg_rri *a)
4509 if (!dc_isar_feature(aa64_sve, s)) {
4510 return false;
4512 if (sve_access_check(s)) {
4513 int size = vec_full_reg_size(s);
4514 int off = vec_full_reg_offset(s, a->rd);
4515 gen_sve_ldr(s, cpu_env, off, size, a->rn, a->imm * size);
4517 return true;
4520 static bool trans_LDR_pri(DisasContext *s, arg_rri *a)
4522 if (!dc_isar_feature(aa64_sve, s)) {
4523 return false;
4525 if (sve_access_check(s)) {
4526 int size = pred_full_reg_size(s);
4527 int off = pred_full_reg_offset(s, a->rd);
4528 gen_sve_ldr(s, cpu_env, off, size, a->rn, a->imm * size);
4530 return true;
4533 static bool trans_STR_zri(DisasContext *s, arg_rri *a)
4535 if (!dc_isar_feature(aa64_sve, s)) {
4536 return false;
4538 if (sve_access_check(s)) {
4539 int size = vec_full_reg_size(s);
4540 int off = vec_full_reg_offset(s, a->rd);
4541 gen_sve_str(s, cpu_env, off, size, a->rn, a->imm * size);
4543 return true;
4546 static bool trans_STR_pri(DisasContext *s, arg_rri *a)
4548 if (!dc_isar_feature(aa64_sve, s)) {
4549 return false;
4551 if (sve_access_check(s)) {
4552 int size = pred_full_reg_size(s);
4553 int off = pred_full_reg_offset(s, a->rd);
4554 gen_sve_str(s, cpu_env, off, size, a->rn, a->imm * size);
4556 return true;
4560 *** SVE Memory - Contiguous Load Group
4563 /* The memory mode of the dtype. */
4564 static const MemOp dtype_mop[16] = {
4565 MO_UB, MO_UB, MO_UB, MO_UB,
4566 MO_SL, MO_UW, MO_UW, MO_UW,
4567 MO_SW, MO_SW, MO_UL, MO_UL,
4568 MO_SB, MO_SB, MO_SB, MO_UQ
4571 #define dtype_msz(x) (dtype_mop[x] & MO_SIZE)
4573 /* The vector element size of dtype. */
4574 static const uint8_t dtype_esz[16] = {
4575 0, 1, 2, 3,
4576 3, 1, 2, 3,
4577 3, 2, 2, 3,
4578 3, 2, 1, 3
4581 static void do_mem_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr,
4582 int dtype, uint32_t mte_n, bool is_write,
4583 gen_helper_gvec_mem *fn)
4585 unsigned vsz = vec_full_reg_size(s);
4586 TCGv_ptr t_pg;
4587 int desc = 0;
4590 * For e.g. LD4, there are not enough arguments to pass all 4
4591 * registers as pointers, so encode the regno into the data field.
4592 * For consistency, do this even for LD1.
4594 if (s->mte_active[0]) {
4595 int msz = dtype_msz(dtype);
4597 desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s));
4598 desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
4599 desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
4600 desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
4601 desc = FIELD_DP32(desc, MTEDESC, SIZEM1, (mte_n << msz) - 1);
4602 desc <<= SVE_MTEDESC_SHIFT;
4603 } else {
4604 addr = clean_data_tbi(s, addr);
4607 desc = simd_desc(vsz, vsz, zt | desc);
4608 t_pg = tcg_temp_new_ptr();
4610 tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg));
4611 fn(cpu_env, t_pg, addr, tcg_constant_i32(desc));
4613 tcg_temp_free_ptr(t_pg);
4616 /* Indexed by [mte][be][dtype][nreg] */
4617 static gen_helper_gvec_mem * const ldr_fns[2][2][16][4] = {
4618 { /* mte inactive, little-endian */
4619 { { gen_helper_sve_ld1bb_r, gen_helper_sve_ld2bb_r,
4620 gen_helper_sve_ld3bb_r, gen_helper_sve_ld4bb_r },
4621 { gen_helper_sve_ld1bhu_r, NULL, NULL, NULL },
4622 { gen_helper_sve_ld1bsu_r, NULL, NULL, NULL },
4623 { gen_helper_sve_ld1bdu_r, NULL, NULL, NULL },
4625 { gen_helper_sve_ld1sds_le_r, NULL, NULL, NULL },
4626 { gen_helper_sve_ld1hh_le_r, gen_helper_sve_ld2hh_le_r,
4627 gen_helper_sve_ld3hh_le_r, gen_helper_sve_ld4hh_le_r },
4628 { gen_helper_sve_ld1hsu_le_r, NULL, NULL, NULL },
4629 { gen_helper_sve_ld1hdu_le_r, NULL, NULL, NULL },
4631 { gen_helper_sve_ld1hds_le_r, NULL, NULL, NULL },
4632 { gen_helper_sve_ld1hss_le_r, NULL, NULL, NULL },
4633 { gen_helper_sve_ld1ss_le_r, gen_helper_sve_ld2ss_le_r,
4634 gen_helper_sve_ld3ss_le_r, gen_helper_sve_ld4ss_le_r },
4635 { gen_helper_sve_ld1sdu_le_r, NULL, NULL, NULL },
4637 { gen_helper_sve_ld1bds_r, NULL, NULL, NULL },
4638 { gen_helper_sve_ld1bss_r, NULL, NULL, NULL },
4639 { gen_helper_sve_ld1bhs_r, NULL, NULL, NULL },
4640 { gen_helper_sve_ld1dd_le_r, gen_helper_sve_ld2dd_le_r,
4641 gen_helper_sve_ld3dd_le_r, gen_helper_sve_ld4dd_le_r } },
4643 /* mte inactive, big-endian */
4644 { { gen_helper_sve_ld1bb_r, gen_helper_sve_ld2bb_r,
4645 gen_helper_sve_ld3bb_r, gen_helper_sve_ld4bb_r },
4646 { gen_helper_sve_ld1bhu_r, NULL, NULL, NULL },
4647 { gen_helper_sve_ld1bsu_r, NULL, NULL, NULL },
4648 { gen_helper_sve_ld1bdu_r, NULL, NULL, NULL },
4650 { gen_helper_sve_ld1sds_be_r, NULL, NULL, NULL },
4651 { gen_helper_sve_ld1hh_be_r, gen_helper_sve_ld2hh_be_r,
4652 gen_helper_sve_ld3hh_be_r, gen_helper_sve_ld4hh_be_r },
4653 { gen_helper_sve_ld1hsu_be_r, NULL, NULL, NULL },
4654 { gen_helper_sve_ld1hdu_be_r, NULL, NULL, NULL },
4656 { gen_helper_sve_ld1hds_be_r, NULL, NULL, NULL },
4657 { gen_helper_sve_ld1hss_be_r, NULL, NULL, NULL },
4658 { gen_helper_sve_ld1ss_be_r, gen_helper_sve_ld2ss_be_r,
4659 gen_helper_sve_ld3ss_be_r, gen_helper_sve_ld4ss_be_r },
4660 { gen_helper_sve_ld1sdu_be_r, NULL, NULL, NULL },
4662 { gen_helper_sve_ld1bds_r, NULL, NULL, NULL },
4663 { gen_helper_sve_ld1bss_r, NULL, NULL, NULL },
4664 { gen_helper_sve_ld1bhs_r, NULL, NULL, NULL },
4665 { gen_helper_sve_ld1dd_be_r, gen_helper_sve_ld2dd_be_r,
4666 gen_helper_sve_ld3dd_be_r, gen_helper_sve_ld4dd_be_r } } },
4668 { /* mte active, little-endian */
4669 { { gen_helper_sve_ld1bb_r_mte,
4670 gen_helper_sve_ld2bb_r_mte,
4671 gen_helper_sve_ld3bb_r_mte,
4672 gen_helper_sve_ld4bb_r_mte },
4673 { gen_helper_sve_ld1bhu_r_mte, NULL, NULL, NULL },
4674 { gen_helper_sve_ld1bsu_r_mte, NULL, NULL, NULL },
4675 { gen_helper_sve_ld1bdu_r_mte, NULL, NULL, NULL },
4677 { gen_helper_sve_ld1sds_le_r_mte, NULL, NULL, NULL },
4678 { gen_helper_sve_ld1hh_le_r_mte,
4679 gen_helper_sve_ld2hh_le_r_mte,
4680 gen_helper_sve_ld3hh_le_r_mte,
4681 gen_helper_sve_ld4hh_le_r_mte },
4682 { gen_helper_sve_ld1hsu_le_r_mte, NULL, NULL, NULL },
4683 { gen_helper_sve_ld1hdu_le_r_mte, NULL, NULL, NULL },
4685 { gen_helper_sve_ld1hds_le_r_mte, NULL, NULL, NULL },
4686 { gen_helper_sve_ld1hss_le_r_mte, NULL, NULL, NULL },
4687 { gen_helper_sve_ld1ss_le_r_mte,
4688 gen_helper_sve_ld2ss_le_r_mte,
4689 gen_helper_sve_ld3ss_le_r_mte,
4690 gen_helper_sve_ld4ss_le_r_mte },
4691 { gen_helper_sve_ld1sdu_le_r_mte, NULL, NULL, NULL },
4693 { gen_helper_sve_ld1bds_r_mte, NULL, NULL, NULL },
4694 { gen_helper_sve_ld1bss_r_mte, NULL, NULL, NULL },
4695 { gen_helper_sve_ld1bhs_r_mte, NULL, NULL, NULL },
4696 { gen_helper_sve_ld1dd_le_r_mte,
4697 gen_helper_sve_ld2dd_le_r_mte,
4698 gen_helper_sve_ld3dd_le_r_mte,
4699 gen_helper_sve_ld4dd_le_r_mte } },
4701 /* mte active, big-endian */
4702 { { gen_helper_sve_ld1bb_r_mte,
4703 gen_helper_sve_ld2bb_r_mte,
4704 gen_helper_sve_ld3bb_r_mte,
4705 gen_helper_sve_ld4bb_r_mte },
4706 { gen_helper_sve_ld1bhu_r_mte, NULL, NULL, NULL },
4707 { gen_helper_sve_ld1bsu_r_mte, NULL, NULL, NULL },
4708 { gen_helper_sve_ld1bdu_r_mte, NULL, NULL, NULL },
4710 { gen_helper_sve_ld1sds_be_r_mte, NULL, NULL, NULL },
4711 { gen_helper_sve_ld1hh_be_r_mte,
4712 gen_helper_sve_ld2hh_be_r_mte,
4713 gen_helper_sve_ld3hh_be_r_mte,
4714 gen_helper_sve_ld4hh_be_r_mte },
4715 { gen_helper_sve_ld1hsu_be_r_mte, NULL, NULL, NULL },
4716 { gen_helper_sve_ld1hdu_be_r_mte, NULL, NULL, NULL },
4718 { gen_helper_sve_ld1hds_be_r_mte, NULL, NULL, NULL },
4719 { gen_helper_sve_ld1hss_be_r_mte, NULL, NULL, NULL },
4720 { gen_helper_sve_ld1ss_be_r_mte,
4721 gen_helper_sve_ld2ss_be_r_mte,
4722 gen_helper_sve_ld3ss_be_r_mte,
4723 gen_helper_sve_ld4ss_be_r_mte },
4724 { gen_helper_sve_ld1sdu_be_r_mte, NULL, NULL, NULL },
4726 { gen_helper_sve_ld1bds_r_mte, NULL, NULL, NULL },
4727 { gen_helper_sve_ld1bss_r_mte, NULL, NULL, NULL },
4728 { gen_helper_sve_ld1bhs_r_mte, NULL, NULL, NULL },
4729 { gen_helper_sve_ld1dd_be_r_mte,
4730 gen_helper_sve_ld2dd_be_r_mte,
4731 gen_helper_sve_ld3dd_be_r_mte,
4732 gen_helper_sve_ld4dd_be_r_mte } } },
4735 static void do_ld_zpa(DisasContext *s, int zt, int pg,
4736 TCGv_i64 addr, int dtype, int nreg)
4738 gen_helper_gvec_mem *fn
4739 = ldr_fns[s->mte_active[0]][s->be_data == MO_BE][dtype][nreg];
4742 * While there are holes in the table, they are not
4743 * accessible via the instruction encoding.
4745 assert(fn != NULL);
4746 do_mem_zpa(s, zt, pg, addr, dtype, nreg, false, fn);
4749 static bool trans_LD_zprr(DisasContext *s, arg_rprr_load *a)
4751 if (a->rm == 31 || !dc_isar_feature(aa64_sve, s)) {
4752 return false;
4754 if (sve_access_check(s)) {
4755 TCGv_i64 addr = new_tmp_a64(s);
4756 tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype));
4757 tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn));
4758 do_ld_zpa(s, a->rd, a->pg, addr, a->dtype, a->nreg);
4760 return true;
4763 static bool trans_LD_zpri(DisasContext *s, arg_rpri_load *a)
4765 if (!dc_isar_feature(aa64_sve, s)) {
4766 return false;
4768 if (sve_access_check(s)) {
4769 int vsz = vec_full_reg_size(s);
4770 int elements = vsz >> dtype_esz[a->dtype];
4771 TCGv_i64 addr = new_tmp_a64(s);
4773 tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn),
4774 (a->imm * elements * (a->nreg + 1))
4775 << dtype_msz(a->dtype));
4776 do_ld_zpa(s, a->rd, a->pg, addr, a->dtype, a->nreg);
4778 return true;
4781 static bool trans_LDFF1_zprr(DisasContext *s, arg_rprr_load *a)
4783 static gen_helper_gvec_mem * const fns[2][2][16] = {
4784 { /* mte inactive, little-endian */
4785 { gen_helper_sve_ldff1bb_r,
4786 gen_helper_sve_ldff1bhu_r,
4787 gen_helper_sve_ldff1bsu_r,
4788 gen_helper_sve_ldff1bdu_r,
4790 gen_helper_sve_ldff1sds_le_r,
4791 gen_helper_sve_ldff1hh_le_r,
4792 gen_helper_sve_ldff1hsu_le_r,
4793 gen_helper_sve_ldff1hdu_le_r,
4795 gen_helper_sve_ldff1hds_le_r,
4796 gen_helper_sve_ldff1hss_le_r,
4797 gen_helper_sve_ldff1ss_le_r,
4798 gen_helper_sve_ldff1sdu_le_r,
4800 gen_helper_sve_ldff1bds_r,
4801 gen_helper_sve_ldff1bss_r,
4802 gen_helper_sve_ldff1bhs_r,
4803 gen_helper_sve_ldff1dd_le_r },
4805 /* mte inactive, big-endian */
4806 { gen_helper_sve_ldff1bb_r,
4807 gen_helper_sve_ldff1bhu_r,
4808 gen_helper_sve_ldff1bsu_r,
4809 gen_helper_sve_ldff1bdu_r,
4811 gen_helper_sve_ldff1sds_be_r,
4812 gen_helper_sve_ldff1hh_be_r,
4813 gen_helper_sve_ldff1hsu_be_r,
4814 gen_helper_sve_ldff1hdu_be_r,
4816 gen_helper_sve_ldff1hds_be_r,
4817 gen_helper_sve_ldff1hss_be_r,
4818 gen_helper_sve_ldff1ss_be_r,
4819 gen_helper_sve_ldff1sdu_be_r,
4821 gen_helper_sve_ldff1bds_r,
4822 gen_helper_sve_ldff1bss_r,
4823 gen_helper_sve_ldff1bhs_r,
4824 gen_helper_sve_ldff1dd_be_r } },
4826 { /* mte active, little-endian */
4827 { gen_helper_sve_ldff1bb_r_mte,
4828 gen_helper_sve_ldff1bhu_r_mte,
4829 gen_helper_sve_ldff1bsu_r_mte,
4830 gen_helper_sve_ldff1bdu_r_mte,
4832 gen_helper_sve_ldff1sds_le_r_mte,
4833 gen_helper_sve_ldff1hh_le_r_mte,
4834 gen_helper_sve_ldff1hsu_le_r_mte,
4835 gen_helper_sve_ldff1hdu_le_r_mte,
4837 gen_helper_sve_ldff1hds_le_r_mte,
4838 gen_helper_sve_ldff1hss_le_r_mte,
4839 gen_helper_sve_ldff1ss_le_r_mte,
4840 gen_helper_sve_ldff1sdu_le_r_mte,
4842 gen_helper_sve_ldff1bds_r_mte,
4843 gen_helper_sve_ldff1bss_r_mte,
4844 gen_helper_sve_ldff1bhs_r_mte,
4845 gen_helper_sve_ldff1dd_le_r_mte },
4847 /* mte active, big-endian */
4848 { gen_helper_sve_ldff1bb_r_mte,
4849 gen_helper_sve_ldff1bhu_r_mte,
4850 gen_helper_sve_ldff1bsu_r_mte,
4851 gen_helper_sve_ldff1bdu_r_mte,
4853 gen_helper_sve_ldff1sds_be_r_mte,
4854 gen_helper_sve_ldff1hh_be_r_mte,
4855 gen_helper_sve_ldff1hsu_be_r_mte,
4856 gen_helper_sve_ldff1hdu_be_r_mte,
4858 gen_helper_sve_ldff1hds_be_r_mte,
4859 gen_helper_sve_ldff1hss_be_r_mte,
4860 gen_helper_sve_ldff1ss_be_r_mte,
4861 gen_helper_sve_ldff1sdu_be_r_mte,
4863 gen_helper_sve_ldff1bds_r_mte,
4864 gen_helper_sve_ldff1bss_r_mte,
4865 gen_helper_sve_ldff1bhs_r_mte,
4866 gen_helper_sve_ldff1dd_be_r_mte } },
4869 if (!dc_isar_feature(aa64_sve, s)) {
4870 return false;
4872 s->is_nonstreaming = true;
4873 if (sve_access_check(s)) {
4874 TCGv_i64 addr = new_tmp_a64(s);
4875 tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype));
4876 tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn));
4877 do_mem_zpa(s, a->rd, a->pg, addr, a->dtype, 1, false,
4878 fns[s->mte_active[0]][s->be_data == MO_BE][a->dtype]);
4880 return true;
4883 static bool trans_LDNF1_zpri(DisasContext *s, arg_rpri_load *a)
4885 static gen_helper_gvec_mem * const fns[2][2][16] = {
4886 { /* mte inactive, little-endian */
4887 { gen_helper_sve_ldnf1bb_r,
4888 gen_helper_sve_ldnf1bhu_r,
4889 gen_helper_sve_ldnf1bsu_r,
4890 gen_helper_sve_ldnf1bdu_r,
4892 gen_helper_sve_ldnf1sds_le_r,
4893 gen_helper_sve_ldnf1hh_le_r,
4894 gen_helper_sve_ldnf1hsu_le_r,
4895 gen_helper_sve_ldnf1hdu_le_r,
4897 gen_helper_sve_ldnf1hds_le_r,
4898 gen_helper_sve_ldnf1hss_le_r,
4899 gen_helper_sve_ldnf1ss_le_r,
4900 gen_helper_sve_ldnf1sdu_le_r,
4902 gen_helper_sve_ldnf1bds_r,
4903 gen_helper_sve_ldnf1bss_r,
4904 gen_helper_sve_ldnf1bhs_r,
4905 gen_helper_sve_ldnf1dd_le_r },
4907 /* mte inactive, big-endian */
4908 { gen_helper_sve_ldnf1bb_r,
4909 gen_helper_sve_ldnf1bhu_r,
4910 gen_helper_sve_ldnf1bsu_r,
4911 gen_helper_sve_ldnf1bdu_r,
4913 gen_helper_sve_ldnf1sds_be_r,
4914 gen_helper_sve_ldnf1hh_be_r,
4915 gen_helper_sve_ldnf1hsu_be_r,
4916 gen_helper_sve_ldnf1hdu_be_r,
4918 gen_helper_sve_ldnf1hds_be_r,
4919 gen_helper_sve_ldnf1hss_be_r,
4920 gen_helper_sve_ldnf1ss_be_r,
4921 gen_helper_sve_ldnf1sdu_be_r,
4923 gen_helper_sve_ldnf1bds_r,
4924 gen_helper_sve_ldnf1bss_r,
4925 gen_helper_sve_ldnf1bhs_r,
4926 gen_helper_sve_ldnf1dd_be_r } },
4928 { /* mte inactive, little-endian */
4929 { gen_helper_sve_ldnf1bb_r_mte,
4930 gen_helper_sve_ldnf1bhu_r_mte,
4931 gen_helper_sve_ldnf1bsu_r_mte,
4932 gen_helper_sve_ldnf1bdu_r_mte,
4934 gen_helper_sve_ldnf1sds_le_r_mte,
4935 gen_helper_sve_ldnf1hh_le_r_mte,
4936 gen_helper_sve_ldnf1hsu_le_r_mte,
4937 gen_helper_sve_ldnf1hdu_le_r_mte,
4939 gen_helper_sve_ldnf1hds_le_r_mte,
4940 gen_helper_sve_ldnf1hss_le_r_mte,
4941 gen_helper_sve_ldnf1ss_le_r_mte,
4942 gen_helper_sve_ldnf1sdu_le_r_mte,
4944 gen_helper_sve_ldnf1bds_r_mte,
4945 gen_helper_sve_ldnf1bss_r_mte,
4946 gen_helper_sve_ldnf1bhs_r_mte,
4947 gen_helper_sve_ldnf1dd_le_r_mte },
4949 /* mte inactive, big-endian */
4950 { gen_helper_sve_ldnf1bb_r_mte,
4951 gen_helper_sve_ldnf1bhu_r_mte,
4952 gen_helper_sve_ldnf1bsu_r_mte,
4953 gen_helper_sve_ldnf1bdu_r_mte,
4955 gen_helper_sve_ldnf1sds_be_r_mte,
4956 gen_helper_sve_ldnf1hh_be_r_mte,
4957 gen_helper_sve_ldnf1hsu_be_r_mte,
4958 gen_helper_sve_ldnf1hdu_be_r_mte,
4960 gen_helper_sve_ldnf1hds_be_r_mte,
4961 gen_helper_sve_ldnf1hss_be_r_mte,
4962 gen_helper_sve_ldnf1ss_be_r_mte,
4963 gen_helper_sve_ldnf1sdu_be_r_mte,
4965 gen_helper_sve_ldnf1bds_r_mte,
4966 gen_helper_sve_ldnf1bss_r_mte,
4967 gen_helper_sve_ldnf1bhs_r_mte,
4968 gen_helper_sve_ldnf1dd_be_r_mte } },
4971 if (!dc_isar_feature(aa64_sve, s)) {
4972 return false;
4974 s->is_nonstreaming = true;
4975 if (sve_access_check(s)) {
4976 int vsz = vec_full_reg_size(s);
4977 int elements = vsz >> dtype_esz[a->dtype];
4978 int off = (a->imm * elements) << dtype_msz(a->dtype);
4979 TCGv_i64 addr = new_tmp_a64(s);
4981 tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), off);
4982 do_mem_zpa(s, a->rd, a->pg, addr, a->dtype, 1, false,
4983 fns[s->mte_active[0]][s->be_data == MO_BE][a->dtype]);
4985 return true;
4988 static void do_ldrq(DisasContext *s, int zt, int pg, TCGv_i64 addr, int dtype)
4990 unsigned vsz = vec_full_reg_size(s);
4991 TCGv_ptr t_pg;
4992 int poff;
4994 /* Load the first quadword using the normal predicated load helpers. */
4995 poff = pred_full_reg_offset(s, pg);
4996 if (vsz > 16) {
4998 * Zero-extend the first 16 bits of the predicate into a temporary.
4999 * This avoids triggering an assert making sure we don't have bits
5000 * set within a predicate beyond VQ, but we have lowered VQ to 1
5001 * for this load operation.
5003 TCGv_i64 tmp = tcg_temp_new_i64();
5004 #if HOST_BIG_ENDIAN
5005 poff += 6;
5006 #endif
5007 tcg_gen_ld16u_i64(tmp, cpu_env, poff);
5009 poff = offsetof(CPUARMState, vfp.preg_tmp);
5010 tcg_gen_st_i64(tmp, cpu_env, poff);
5011 tcg_temp_free_i64(tmp);
5014 t_pg = tcg_temp_new_ptr();
5015 tcg_gen_addi_ptr(t_pg, cpu_env, poff);
5017 gen_helper_gvec_mem *fn
5018 = ldr_fns[s->mte_active[0]][s->be_data == MO_BE][dtype][0];
5019 fn(cpu_env, t_pg, addr, tcg_constant_i32(simd_desc(16, 16, zt)));
5021 tcg_temp_free_ptr(t_pg);
5023 /* Replicate that first quadword. */
5024 if (vsz > 16) {
5025 int doff = vec_full_reg_offset(s, zt);
5026 tcg_gen_gvec_dup_mem(4, doff + 16, doff, vsz - 16, vsz - 16);
5030 static bool trans_LD1RQ_zprr(DisasContext *s, arg_rprr_load *a)
5032 if (a->rm == 31 || !dc_isar_feature(aa64_sve, s)) {
5033 return false;
5035 if (sve_access_check(s)) {
5036 int msz = dtype_msz(a->dtype);
5037 TCGv_i64 addr = new_tmp_a64(s);
5038 tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), msz);
5039 tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn));
5040 do_ldrq(s, a->rd, a->pg, addr, a->dtype);
5042 return true;
5045 static bool trans_LD1RQ_zpri(DisasContext *s, arg_rpri_load *a)
5047 if (!dc_isar_feature(aa64_sve, s)) {
5048 return false;
5050 if (sve_access_check(s)) {
5051 TCGv_i64 addr = new_tmp_a64(s);
5052 tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), a->imm * 16);
5053 do_ldrq(s, a->rd, a->pg, addr, a->dtype);
5055 return true;
5058 static void do_ldro(DisasContext *s, int zt, int pg, TCGv_i64 addr, int dtype)
5060 unsigned vsz = vec_full_reg_size(s);
5061 unsigned vsz_r32;
5062 TCGv_ptr t_pg;
5063 int poff, doff;
5065 if (vsz < 32) {
5067 * Note that this UNDEFINED check comes after CheckSVEEnabled()
5068 * in the ARM pseudocode, which is the sve_access_check() done
5069 * in our caller. We should not now return false from the caller.
5071 unallocated_encoding(s);
5072 return;
5075 /* Load the first octaword using the normal predicated load helpers. */
5077 poff = pred_full_reg_offset(s, pg);
5078 if (vsz > 32) {
5080 * Zero-extend the first 32 bits of the predicate into a temporary.
5081 * This avoids triggering an assert making sure we don't have bits
5082 * set within a predicate beyond VQ, but we have lowered VQ to 2
5083 * for this load operation.
5085 TCGv_i64 tmp = tcg_temp_new_i64();
5086 #if HOST_BIG_ENDIAN
5087 poff += 4;
5088 #endif
5089 tcg_gen_ld32u_i64(tmp, cpu_env, poff);
5091 poff = offsetof(CPUARMState, vfp.preg_tmp);
5092 tcg_gen_st_i64(tmp, cpu_env, poff);
5093 tcg_temp_free_i64(tmp);
5096 t_pg = tcg_temp_new_ptr();
5097 tcg_gen_addi_ptr(t_pg, cpu_env, poff);
5099 gen_helper_gvec_mem *fn
5100 = ldr_fns[s->mte_active[0]][s->be_data == MO_BE][dtype][0];
5101 fn(cpu_env, t_pg, addr, tcg_constant_i32(simd_desc(32, 32, zt)));
5103 tcg_temp_free_ptr(t_pg);
5106 * Replicate that first octaword.
5107 * The replication happens in units of 32; if the full vector size
5108 * is not a multiple of 32, the final bits are zeroed.
5110 doff = vec_full_reg_offset(s, zt);
5111 vsz_r32 = QEMU_ALIGN_DOWN(vsz, 32);
5112 if (vsz >= 64) {
5113 tcg_gen_gvec_dup_mem(5, doff + 32, doff, vsz_r32 - 32, vsz_r32 - 32);
5115 vsz -= vsz_r32;
5116 if (vsz) {
5117 tcg_gen_gvec_dup_imm(MO_64, doff + vsz_r32, vsz, vsz, 0);
5121 static bool trans_LD1RO_zprr(DisasContext *s, arg_rprr_load *a)
5123 if (!dc_isar_feature(aa64_sve_f64mm, s)) {
5124 return false;
5126 if (a->rm == 31) {
5127 return false;
5129 s->is_nonstreaming = true;
5130 if (sve_access_check(s)) {
5131 TCGv_i64 addr = new_tmp_a64(s);
5132 tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), dtype_msz(a->dtype));
5133 tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn));
5134 do_ldro(s, a->rd, a->pg, addr, a->dtype);
5136 return true;
5139 static bool trans_LD1RO_zpri(DisasContext *s, arg_rpri_load *a)
5141 if (!dc_isar_feature(aa64_sve_f64mm, s)) {
5142 return false;
5144 s->is_nonstreaming = true;
5145 if (sve_access_check(s)) {
5146 TCGv_i64 addr = new_tmp_a64(s);
5147 tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn), a->imm * 32);
5148 do_ldro(s, a->rd, a->pg, addr, a->dtype);
5150 return true;
5153 /* Load and broadcast element. */
5154 static bool trans_LD1R_zpri(DisasContext *s, arg_rpri_load *a)
5156 unsigned vsz = vec_full_reg_size(s);
5157 unsigned psz = pred_full_reg_size(s);
5158 unsigned esz = dtype_esz[a->dtype];
5159 unsigned msz = dtype_msz(a->dtype);
5160 TCGLabel *over;
5161 TCGv_i64 temp, clean_addr;
5163 if (!dc_isar_feature(aa64_sve, s)) {
5164 return false;
5166 if (!sve_access_check(s)) {
5167 return true;
5170 over = gen_new_label();
5172 /* If the guarding predicate has no bits set, no load occurs. */
5173 if (psz <= 8) {
5174 /* Reduce the pred_esz_masks value simply to reduce the
5175 * size of the code generated here.
5177 uint64_t psz_mask = MAKE_64BIT_MASK(0, psz * 8);
5178 temp = tcg_temp_new_i64();
5179 tcg_gen_ld_i64(temp, cpu_env, pred_full_reg_offset(s, a->pg));
5180 tcg_gen_andi_i64(temp, temp, pred_esz_masks[esz] & psz_mask);
5181 tcg_gen_brcondi_i64(TCG_COND_EQ, temp, 0, over);
5182 tcg_temp_free_i64(temp);
5183 } else {
5184 TCGv_i32 t32 = tcg_temp_new_i32();
5185 find_last_active(s, t32, esz, a->pg);
5186 tcg_gen_brcondi_i32(TCG_COND_LT, t32, 0, over);
5187 tcg_temp_free_i32(t32);
5190 /* Load the data. */
5191 temp = tcg_temp_new_i64();
5192 tcg_gen_addi_i64(temp, cpu_reg_sp(s, a->rn), a->imm << msz);
5193 clean_addr = gen_mte_check1(s, temp, false, true, msz);
5195 tcg_gen_qemu_ld_i64(temp, clean_addr, get_mem_index(s),
5196 finalize_memop(s, dtype_mop[a->dtype]));
5198 /* Broadcast to *all* elements. */
5199 tcg_gen_gvec_dup_i64(esz, vec_full_reg_offset(s, a->rd),
5200 vsz, vsz, temp);
5201 tcg_temp_free_i64(temp);
5203 /* Zero the inactive elements. */
5204 gen_set_label(over);
5205 return do_movz_zpz(s, a->rd, a->rd, a->pg, esz, false);
5208 static void do_st_zpa(DisasContext *s, int zt, int pg, TCGv_i64 addr,
5209 int msz, int esz, int nreg)
5211 static gen_helper_gvec_mem * const fn_single[2][2][4][4] = {
5212 { { { gen_helper_sve_st1bb_r,
5213 gen_helper_sve_st1bh_r,
5214 gen_helper_sve_st1bs_r,
5215 gen_helper_sve_st1bd_r },
5216 { NULL,
5217 gen_helper_sve_st1hh_le_r,
5218 gen_helper_sve_st1hs_le_r,
5219 gen_helper_sve_st1hd_le_r },
5220 { NULL, NULL,
5221 gen_helper_sve_st1ss_le_r,
5222 gen_helper_sve_st1sd_le_r },
5223 { NULL, NULL, NULL,
5224 gen_helper_sve_st1dd_le_r } },
5225 { { gen_helper_sve_st1bb_r,
5226 gen_helper_sve_st1bh_r,
5227 gen_helper_sve_st1bs_r,
5228 gen_helper_sve_st1bd_r },
5229 { NULL,
5230 gen_helper_sve_st1hh_be_r,
5231 gen_helper_sve_st1hs_be_r,
5232 gen_helper_sve_st1hd_be_r },
5233 { NULL, NULL,
5234 gen_helper_sve_st1ss_be_r,
5235 gen_helper_sve_st1sd_be_r },
5236 { NULL, NULL, NULL,
5237 gen_helper_sve_st1dd_be_r } } },
5239 { { { gen_helper_sve_st1bb_r_mte,
5240 gen_helper_sve_st1bh_r_mte,
5241 gen_helper_sve_st1bs_r_mte,
5242 gen_helper_sve_st1bd_r_mte },
5243 { NULL,
5244 gen_helper_sve_st1hh_le_r_mte,
5245 gen_helper_sve_st1hs_le_r_mte,
5246 gen_helper_sve_st1hd_le_r_mte },
5247 { NULL, NULL,
5248 gen_helper_sve_st1ss_le_r_mte,
5249 gen_helper_sve_st1sd_le_r_mte },
5250 { NULL, NULL, NULL,
5251 gen_helper_sve_st1dd_le_r_mte } },
5252 { { gen_helper_sve_st1bb_r_mte,
5253 gen_helper_sve_st1bh_r_mte,
5254 gen_helper_sve_st1bs_r_mte,
5255 gen_helper_sve_st1bd_r_mte },
5256 { NULL,
5257 gen_helper_sve_st1hh_be_r_mte,
5258 gen_helper_sve_st1hs_be_r_mte,
5259 gen_helper_sve_st1hd_be_r_mte },
5260 { NULL, NULL,
5261 gen_helper_sve_st1ss_be_r_mte,
5262 gen_helper_sve_st1sd_be_r_mte },
5263 { NULL, NULL, NULL,
5264 gen_helper_sve_st1dd_be_r_mte } } },
5266 static gen_helper_gvec_mem * const fn_multiple[2][2][3][4] = {
5267 { { { gen_helper_sve_st2bb_r,
5268 gen_helper_sve_st2hh_le_r,
5269 gen_helper_sve_st2ss_le_r,
5270 gen_helper_sve_st2dd_le_r },
5271 { gen_helper_sve_st3bb_r,
5272 gen_helper_sve_st3hh_le_r,
5273 gen_helper_sve_st3ss_le_r,
5274 gen_helper_sve_st3dd_le_r },
5275 { gen_helper_sve_st4bb_r,
5276 gen_helper_sve_st4hh_le_r,
5277 gen_helper_sve_st4ss_le_r,
5278 gen_helper_sve_st4dd_le_r } },
5279 { { gen_helper_sve_st2bb_r,
5280 gen_helper_sve_st2hh_be_r,
5281 gen_helper_sve_st2ss_be_r,
5282 gen_helper_sve_st2dd_be_r },
5283 { gen_helper_sve_st3bb_r,
5284 gen_helper_sve_st3hh_be_r,
5285 gen_helper_sve_st3ss_be_r,
5286 gen_helper_sve_st3dd_be_r },
5287 { gen_helper_sve_st4bb_r,
5288 gen_helper_sve_st4hh_be_r,
5289 gen_helper_sve_st4ss_be_r,
5290 gen_helper_sve_st4dd_be_r } } },
5291 { { { gen_helper_sve_st2bb_r_mte,
5292 gen_helper_sve_st2hh_le_r_mte,
5293 gen_helper_sve_st2ss_le_r_mte,
5294 gen_helper_sve_st2dd_le_r_mte },
5295 { gen_helper_sve_st3bb_r_mte,
5296 gen_helper_sve_st3hh_le_r_mte,
5297 gen_helper_sve_st3ss_le_r_mte,
5298 gen_helper_sve_st3dd_le_r_mte },
5299 { gen_helper_sve_st4bb_r_mte,
5300 gen_helper_sve_st4hh_le_r_mte,
5301 gen_helper_sve_st4ss_le_r_mte,
5302 gen_helper_sve_st4dd_le_r_mte } },
5303 { { gen_helper_sve_st2bb_r_mte,
5304 gen_helper_sve_st2hh_be_r_mte,
5305 gen_helper_sve_st2ss_be_r_mte,
5306 gen_helper_sve_st2dd_be_r_mte },
5307 { gen_helper_sve_st3bb_r_mte,
5308 gen_helper_sve_st3hh_be_r_mte,
5309 gen_helper_sve_st3ss_be_r_mte,
5310 gen_helper_sve_st3dd_be_r_mte },
5311 { gen_helper_sve_st4bb_r_mte,
5312 gen_helper_sve_st4hh_be_r_mte,
5313 gen_helper_sve_st4ss_be_r_mte,
5314 gen_helper_sve_st4dd_be_r_mte } } },
5316 gen_helper_gvec_mem *fn;
5317 int be = s->be_data == MO_BE;
5319 if (nreg == 0) {
5320 /* ST1 */
5321 fn = fn_single[s->mte_active[0]][be][msz][esz];
5322 nreg = 1;
5323 } else {
5324 /* ST2, ST3, ST4 -- msz == esz, enforced by encoding */
5325 assert(msz == esz);
5326 fn = fn_multiple[s->mte_active[0]][be][nreg - 1][msz];
5328 assert(fn != NULL);
5329 do_mem_zpa(s, zt, pg, addr, msz_dtype(s, msz), nreg, true, fn);
5332 static bool trans_ST_zprr(DisasContext *s, arg_rprr_store *a)
5334 if (!dc_isar_feature(aa64_sve, s)) {
5335 return false;
5337 if (a->rm == 31 || a->msz > a->esz) {
5338 return false;
5340 if (sve_access_check(s)) {
5341 TCGv_i64 addr = new_tmp_a64(s);
5342 tcg_gen_shli_i64(addr, cpu_reg(s, a->rm), a->msz);
5343 tcg_gen_add_i64(addr, addr, cpu_reg_sp(s, a->rn));
5344 do_st_zpa(s, a->rd, a->pg, addr, a->msz, a->esz, a->nreg);
5346 return true;
5349 static bool trans_ST_zpri(DisasContext *s, arg_rpri_store *a)
5351 if (!dc_isar_feature(aa64_sve, s)) {
5352 return false;
5354 if (a->msz > a->esz) {
5355 return false;
5357 if (sve_access_check(s)) {
5358 int vsz = vec_full_reg_size(s);
5359 int elements = vsz >> a->esz;
5360 TCGv_i64 addr = new_tmp_a64(s);
5362 tcg_gen_addi_i64(addr, cpu_reg_sp(s, a->rn),
5363 (a->imm * elements * (a->nreg + 1)) << a->msz);
5364 do_st_zpa(s, a->rd, a->pg, addr, a->msz, a->esz, a->nreg);
5366 return true;
5370 *** SVE gather loads / scatter stores
5373 static void do_mem_zpz(DisasContext *s, int zt, int pg, int zm,
5374 int scale, TCGv_i64 scalar, int msz, bool is_write,
5375 gen_helper_gvec_mem_scatter *fn)
5377 unsigned vsz = vec_full_reg_size(s);
5378 TCGv_ptr t_zm = tcg_temp_new_ptr();
5379 TCGv_ptr t_pg = tcg_temp_new_ptr();
5380 TCGv_ptr t_zt = tcg_temp_new_ptr();
5381 int desc = 0;
5383 if (s->mte_active[0]) {
5384 desc = FIELD_DP32(desc, MTEDESC, MIDX, get_mem_index(s));
5385 desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
5386 desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
5387 desc = FIELD_DP32(desc, MTEDESC, WRITE, is_write);
5388 desc = FIELD_DP32(desc, MTEDESC, SIZEM1, (1 << msz) - 1);
5389 desc <<= SVE_MTEDESC_SHIFT;
5391 desc = simd_desc(vsz, vsz, desc | scale);
5393 tcg_gen_addi_ptr(t_pg, cpu_env, pred_full_reg_offset(s, pg));
5394 tcg_gen_addi_ptr(t_zm, cpu_env, vec_full_reg_offset(s, zm));
5395 tcg_gen_addi_ptr(t_zt, cpu_env, vec_full_reg_offset(s, zt));
5396 fn(cpu_env, t_zt, t_pg, t_zm, scalar, tcg_constant_i32(desc));
5398 tcg_temp_free_ptr(t_zt);
5399 tcg_temp_free_ptr(t_zm);
5400 tcg_temp_free_ptr(t_pg);
5403 /* Indexed by [mte][be][ff][xs][u][msz]. */
5404 static gen_helper_gvec_mem_scatter * const
5405 gather_load_fn32[2][2][2][2][2][3] = {
5406 { /* MTE Inactive */
5407 { /* Little-endian */
5408 { { { gen_helper_sve_ldbss_zsu,
5409 gen_helper_sve_ldhss_le_zsu,
5410 NULL, },
5411 { gen_helper_sve_ldbsu_zsu,
5412 gen_helper_sve_ldhsu_le_zsu,
5413 gen_helper_sve_ldss_le_zsu, } },
5414 { { gen_helper_sve_ldbss_zss,
5415 gen_helper_sve_ldhss_le_zss,
5416 NULL, },
5417 { gen_helper_sve_ldbsu_zss,
5418 gen_helper_sve_ldhsu_le_zss,
5419 gen_helper_sve_ldss_le_zss, } } },
5421 /* First-fault */
5422 { { { gen_helper_sve_ldffbss_zsu,
5423 gen_helper_sve_ldffhss_le_zsu,
5424 NULL, },
5425 { gen_helper_sve_ldffbsu_zsu,
5426 gen_helper_sve_ldffhsu_le_zsu,
5427 gen_helper_sve_ldffss_le_zsu, } },
5428 { { gen_helper_sve_ldffbss_zss,
5429 gen_helper_sve_ldffhss_le_zss,
5430 NULL, },
5431 { gen_helper_sve_ldffbsu_zss,
5432 gen_helper_sve_ldffhsu_le_zss,
5433 gen_helper_sve_ldffss_le_zss, } } } },
5435 { /* Big-endian */
5436 { { { gen_helper_sve_ldbss_zsu,
5437 gen_helper_sve_ldhss_be_zsu,
5438 NULL, },
5439 { gen_helper_sve_ldbsu_zsu,
5440 gen_helper_sve_ldhsu_be_zsu,
5441 gen_helper_sve_ldss_be_zsu, } },
5442 { { gen_helper_sve_ldbss_zss,
5443 gen_helper_sve_ldhss_be_zss,
5444 NULL, },
5445 { gen_helper_sve_ldbsu_zss,
5446 gen_helper_sve_ldhsu_be_zss,
5447 gen_helper_sve_ldss_be_zss, } } },
5449 /* First-fault */
5450 { { { gen_helper_sve_ldffbss_zsu,
5451 gen_helper_sve_ldffhss_be_zsu,
5452 NULL, },
5453 { gen_helper_sve_ldffbsu_zsu,
5454 gen_helper_sve_ldffhsu_be_zsu,
5455 gen_helper_sve_ldffss_be_zsu, } },
5456 { { gen_helper_sve_ldffbss_zss,
5457 gen_helper_sve_ldffhss_be_zss,
5458 NULL, },
5459 { gen_helper_sve_ldffbsu_zss,
5460 gen_helper_sve_ldffhsu_be_zss,
5461 gen_helper_sve_ldffss_be_zss, } } } } },
5462 { /* MTE Active */
5463 { /* Little-endian */
5464 { { { gen_helper_sve_ldbss_zsu_mte,
5465 gen_helper_sve_ldhss_le_zsu_mte,
5466 NULL, },
5467 { gen_helper_sve_ldbsu_zsu_mte,
5468 gen_helper_sve_ldhsu_le_zsu_mte,
5469 gen_helper_sve_ldss_le_zsu_mte, } },
5470 { { gen_helper_sve_ldbss_zss_mte,
5471 gen_helper_sve_ldhss_le_zss_mte,
5472 NULL, },
5473 { gen_helper_sve_ldbsu_zss_mte,
5474 gen_helper_sve_ldhsu_le_zss_mte,
5475 gen_helper_sve_ldss_le_zss_mte, } } },
5477 /* First-fault */
5478 { { { gen_helper_sve_ldffbss_zsu_mte,
5479 gen_helper_sve_ldffhss_le_zsu_mte,
5480 NULL, },
5481 { gen_helper_sve_ldffbsu_zsu_mte,
5482 gen_helper_sve_ldffhsu_le_zsu_mte,
5483 gen_helper_sve_ldffss_le_zsu_mte, } },
5484 { { gen_helper_sve_ldffbss_zss_mte,
5485 gen_helper_sve_ldffhss_le_zss_mte,
5486 NULL, },
5487 { gen_helper_sve_ldffbsu_zss_mte,
5488 gen_helper_sve_ldffhsu_le_zss_mte,
5489 gen_helper_sve_ldffss_le_zss_mte, } } } },
5491 { /* Big-endian */
5492 { { { gen_helper_sve_ldbss_zsu_mte,
5493 gen_helper_sve_ldhss_be_zsu_mte,
5494 NULL, },
5495 { gen_helper_sve_ldbsu_zsu_mte,
5496 gen_helper_sve_ldhsu_be_zsu_mte,
5497 gen_helper_sve_ldss_be_zsu_mte, } },
5498 { { gen_helper_sve_ldbss_zss_mte,
5499 gen_helper_sve_ldhss_be_zss_mte,
5500 NULL, },
5501 { gen_helper_sve_ldbsu_zss_mte,
5502 gen_helper_sve_ldhsu_be_zss_mte,
5503 gen_helper_sve_ldss_be_zss_mte, } } },
5505 /* First-fault */
5506 { { { gen_helper_sve_ldffbss_zsu_mte,
5507 gen_helper_sve_ldffhss_be_zsu_mte,
5508 NULL, },
5509 { gen_helper_sve_ldffbsu_zsu_mte,
5510 gen_helper_sve_ldffhsu_be_zsu_mte,
5511 gen_helper_sve_ldffss_be_zsu_mte, } },
5512 { { gen_helper_sve_ldffbss_zss_mte,
5513 gen_helper_sve_ldffhss_be_zss_mte,
5514 NULL, },
5515 { gen_helper_sve_ldffbsu_zss_mte,
5516 gen_helper_sve_ldffhsu_be_zss_mte,
5517 gen_helper_sve_ldffss_be_zss_mte, } } } } },
5520 /* Note that we overload xs=2 to indicate 64-bit offset. */
5521 static gen_helper_gvec_mem_scatter * const
5522 gather_load_fn64[2][2][2][3][2][4] = {
5523 { /* MTE Inactive */
5524 { /* Little-endian */
5525 { { { gen_helper_sve_ldbds_zsu,
5526 gen_helper_sve_ldhds_le_zsu,
5527 gen_helper_sve_ldsds_le_zsu,
5528 NULL, },
5529 { gen_helper_sve_ldbdu_zsu,
5530 gen_helper_sve_ldhdu_le_zsu,
5531 gen_helper_sve_ldsdu_le_zsu,
5532 gen_helper_sve_lddd_le_zsu, } },
5533 { { gen_helper_sve_ldbds_zss,
5534 gen_helper_sve_ldhds_le_zss,
5535 gen_helper_sve_ldsds_le_zss,
5536 NULL, },
5537 { gen_helper_sve_ldbdu_zss,
5538 gen_helper_sve_ldhdu_le_zss,
5539 gen_helper_sve_ldsdu_le_zss,
5540 gen_helper_sve_lddd_le_zss, } },
5541 { { gen_helper_sve_ldbds_zd,
5542 gen_helper_sve_ldhds_le_zd,
5543 gen_helper_sve_ldsds_le_zd,
5544 NULL, },
5545 { gen_helper_sve_ldbdu_zd,
5546 gen_helper_sve_ldhdu_le_zd,
5547 gen_helper_sve_ldsdu_le_zd,
5548 gen_helper_sve_lddd_le_zd, } } },
5550 /* First-fault */
5551 { { { gen_helper_sve_ldffbds_zsu,
5552 gen_helper_sve_ldffhds_le_zsu,
5553 gen_helper_sve_ldffsds_le_zsu,
5554 NULL, },
5555 { gen_helper_sve_ldffbdu_zsu,
5556 gen_helper_sve_ldffhdu_le_zsu,
5557 gen_helper_sve_ldffsdu_le_zsu,
5558 gen_helper_sve_ldffdd_le_zsu, } },
5559 { { gen_helper_sve_ldffbds_zss,
5560 gen_helper_sve_ldffhds_le_zss,
5561 gen_helper_sve_ldffsds_le_zss,
5562 NULL, },
5563 { gen_helper_sve_ldffbdu_zss,
5564 gen_helper_sve_ldffhdu_le_zss,
5565 gen_helper_sve_ldffsdu_le_zss,
5566 gen_helper_sve_ldffdd_le_zss, } },
5567 { { gen_helper_sve_ldffbds_zd,
5568 gen_helper_sve_ldffhds_le_zd,
5569 gen_helper_sve_ldffsds_le_zd,
5570 NULL, },
5571 { gen_helper_sve_ldffbdu_zd,
5572 gen_helper_sve_ldffhdu_le_zd,
5573 gen_helper_sve_ldffsdu_le_zd,
5574 gen_helper_sve_ldffdd_le_zd, } } } },
5575 { /* Big-endian */
5576 { { { gen_helper_sve_ldbds_zsu,
5577 gen_helper_sve_ldhds_be_zsu,
5578 gen_helper_sve_ldsds_be_zsu,
5579 NULL, },
5580 { gen_helper_sve_ldbdu_zsu,
5581 gen_helper_sve_ldhdu_be_zsu,
5582 gen_helper_sve_ldsdu_be_zsu,
5583 gen_helper_sve_lddd_be_zsu, } },
5584 { { gen_helper_sve_ldbds_zss,
5585 gen_helper_sve_ldhds_be_zss,
5586 gen_helper_sve_ldsds_be_zss,
5587 NULL, },
5588 { gen_helper_sve_ldbdu_zss,
5589 gen_helper_sve_ldhdu_be_zss,
5590 gen_helper_sve_ldsdu_be_zss,
5591 gen_helper_sve_lddd_be_zss, } },
5592 { { gen_helper_sve_ldbds_zd,
5593 gen_helper_sve_ldhds_be_zd,
5594 gen_helper_sve_ldsds_be_zd,
5595 NULL, },
5596 { gen_helper_sve_ldbdu_zd,
5597 gen_helper_sve_ldhdu_be_zd,
5598 gen_helper_sve_ldsdu_be_zd,
5599 gen_helper_sve_lddd_be_zd, } } },
5601 /* First-fault */
5602 { { { gen_helper_sve_ldffbds_zsu,
5603 gen_helper_sve_ldffhds_be_zsu,
5604 gen_helper_sve_ldffsds_be_zsu,
5605 NULL, },
5606 { gen_helper_sve_ldffbdu_zsu,
5607 gen_helper_sve_ldffhdu_be_zsu,
5608 gen_helper_sve_ldffsdu_be_zsu,
5609 gen_helper_sve_ldffdd_be_zsu, } },
5610 { { gen_helper_sve_ldffbds_zss,
5611 gen_helper_sve_ldffhds_be_zss,
5612 gen_helper_sve_ldffsds_be_zss,
5613 NULL, },
5614 { gen_helper_sve_ldffbdu_zss,
5615 gen_helper_sve_ldffhdu_be_zss,
5616 gen_helper_sve_ldffsdu_be_zss,
5617 gen_helper_sve_ldffdd_be_zss, } },
5618 { { gen_helper_sve_ldffbds_zd,
5619 gen_helper_sve_ldffhds_be_zd,
5620 gen_helper_sve_ldffsds_be_zd,
5621 NULL, },
5622 { gen_helper_sve_ldffbdu_zd,
5623 gen_helper_sve_ldffhdu_be_zd,
5624 gen_helper_sve_ldffsdu_be_zd,
5625 gen_helper_sve_ldffdd_be_zd, } } } } },
5626 { /* MTE Active */
5627 { /* Little-endian */
5628 { { { gen_helper_sve_ldbds_zsu_mte,
5629 gen_helper_sve_ldhds_le_zsu_mte,
5630 gen_helper_sve_ldsds_le_zsu_mte,
5631 NULL, },
5632 { gen_helper_sve_ldbdu_zsu_mte,
5633 gen_helper_sve_ldhdu_le_zsu_mte,
5634 gen_helper_sve_ldsdu_le_zsu_mte,
5635 gen_helper_sve_lddd_le_zsu_mte, } },
5636 { { gen_helper_sve_ldbds_zss_mte,
5637 gen_helper_sve_ldhds_le_zss_mte,
5638 gen_helper_sve_ldsds_le_zss_mte,
5639 NULL, },
5640 { gen_helper_sve_ldbdu_zss_mte,
5641 gen_helper_sve_ldhdu_le_zss_mte,
5642 gen_helper_sve_ldsdu_le_zss_mte,
5643 gen_helper_sve_lddd_le_zss_mte, } },
5644 { { gen_helper_sve_ldbds_zd_mte,
5645 gen_helper_sve_ldhds_le_zd_mte,
5646 gen_helper_sve_ldsds_le_zd_mte,
5647 NULL, },
5648 { gen_helper_sve_ldbdu_zd_mte,
5649 gen_helper_sve_ldhdu_le_zd_mte,
5650 gen_helper_sve_ldsdu_le_zd_mte,
5651 gen_helper_sve_lddd_le_zd_mte, } } },
5653 /* First-fault */
5654 { { { gen_helper_sve_ldffbds_zsu_mte,
5655 gen_helper_sve_ldffhds_le_zsu_mte,
5656 gen_helper_sve_ldffsds_le_zsu_mte,
5657 NULL, },
5658 { gen_helper_sve_ldffbdu_zsu_mte,
5659 gen_helper_sve_ldffhdu_le_zsu_mte,
5660 gen_helper_sve_ldffsdu_le_zsu_mte,
5661 gen_helper_sve_ldffdd_le_zsu_mte, } },
5662 { { gen_helper_sve_ldffbds_zss_mte,
5663 gen_helper_sve_ldffhds_le_zss_mte,
5664 gen_helper_sve_ldffsds_le_zss_mte,
5665 NULL, },
5666 { gen_helper_sve_ldffbdu_zss_mte,
5667 gen_helper_sve_ldffhdu_le_zss_mte,
5668 gen_helper_sve_ldffsdu_le_zss_mte,
5669 gen_helper_sve_ldffdd_le_zss_mte, } },
5670 { { gen_helper_sve_ldffbds_zd_mte,
5671 gen_helper_sve_ldffhds_le_zd_mte,
5672 gen_helper_sve_ldffsds_le_zd_mte,
5673 NULL, },
5674 { gen_helper_sve_ldffbdu_zd_mte,
5675 gen_helper_sve_ldffhdu_le_zd_mte,
5676 gen_helper_sve_ldffsdu_le_zd_mte,
5677 gen_helper_sve_ldffdd_le_zd_mte, } } } },
5678 { /* Big-endian */
5679 { { { gen_helper_sve_ldbds_zsu_mte,
5680 gen_helper_sve_ldhds_be_zsu_mte,
5681 gen_helper_sve_ldsds_be_zsu_mte,
5682 NULL, },
5683 { gen_helper_sve_ldbdu_zsu_mte,
5684 gen_helper_sve_ldhdu_be_zsu_mte,
5685 gen_helper_sve_ldsdu_be_zsu_mte,
5686 gen_helper_sve_lddd_be_zsu_mte, } },
5687 { { gen_helper_sve_ldbds_zss_mte,
5688 gen_helper_sve_ldhds_be_zss_mte,
5689 gen_helper_sve_ldsds_be_zss_mte,
5690 NULL, },
5691 { gen_helper_sve_ldbdu_zss_mte,
5692 gen_helper_sve_ldhdu_be_zss_mte,
5693 gen_helper_sve_ldsdu_be_zss_mte,
5694 gen_helper_sve_lddd_be_zss_mte, } },
5695 { { gen_helper_sve_ldbds_zd_mte,
5696 gen_helper_sve_ldhds_be_zd_mte,
5697 gen_helper_sve_ldsds_be_zd_mte,
5698 NULL, },
5699 { gen_helper_sve_ldbdu_zd_mte,
5700 gen_helper_sve_ldhdu_be_zd_mte,
5701 gen_helper_sve_ldsdu_be_zd_mte,
5702 gen_helper_sve_lddd_be_zd_mte, } } },
5704 /* First-fault */
5705 { { { gen_helper_sve_ldffbds_zsu_mte,
5706 gen_helper_sve_ldffhds_be_zsu_mte,
5707 gen_helper_sve_ldffsds_be_zsu_mte,
5708 NULL, },
5709 { gen_helper_sve_ldffbdu_zsu_mte,
5710 gen_helper_sve_ldffhdu_be_zsu_mte,
5711 gen_helper_sve_ldffsdu_be_zsu_mte,
5712 gen_helper_sve_ldffdd_be_zsu_mte, } },
5713 { { gen_helper_sve_ldffbds_zss_mte,
5714 gen_helper_sve_ldffhds_be_zss_mte,
5715 gen_helper_sve_ldffsds_be_zss_mte,
5716 NULL, },
5717 { gen_helper_sve_ldffbdu_zss_mte,
5718 gen_helper_sve_ldffhdu_be_zss_mte,
5719 gen_helper_sve_ldffsdu_be_zss_mte,
5720 gen_helper_sve_ldffdd_be_zss_mte, } },
5721 { { gen_helper_sve_ldffbds_zd_mte,
5722 gen_helper_sve_ldffhds_be_zd_mte,
5723 gen_helper_sve_ldffsds_be_zd_mte,
5724 NULL, },
5725 { gen_helper_sve_ldffbdu_zd_mte,
5726 gen_helper_sve_ldffhdu_be_zd_mte,
5727 gen_helper_sve_ldffsdu_be_zd_mte,
5728 gen_helper_sve_ldffdd_be_zd_mte, } } } } },
5731 static bool trans_LD1_zprz(DisasContext *s, arg_LD1_zprz *a)
5733 gen_helper_gvec_mem_scatter *fn = NULL;
5734 bool be = s->be_data == MO_BE;
5735 bool mte = s->mte_active[0];
5737 if (!dc_isar_feature(aa64_sve, s)) {
5738 return false;
5740 s->is_nonstreaming = true;
5741 if (!sve_access_check(s)) {
5742 return true;
5745 switch (a->esz) {
5746 case MO_32:
5747 fn = gather_load_fn32[mte][be][a->ff][a->xs][a->u][a->msz];
5748 break;
5749 case MO_64:
5750 fn = gather_load_fn64[mte][be][a->ff][a->xs][a->u][a->msz];
5751 break;
5753 assert(fn != NULL);
5755 do_mem_zpz(s, a->rd, a->pg, a->rm, a->scale * a->msz,
5756 cpu_reg_sp(s, a->rn), a->msz, false, fn);
5757 return true;
5760 static bool trans_LD1_zpiz(DisasContext *s, arg_LD1_zpiz *a)
5762 gen_helper_gvec_mem_scatter *fn = NULL;
5763 bool be = s->be_data == MO_BE;
5764 bool mte = s->mte_active[0];
5766 if (a->esz < a->msz || (a->esz == a->msz && !a->u)) {
5767 return false;
5769 if (!dc_isar_feature(aa64_sve, s)) {
5770 return false;
5772 s->is_nonstreaming = true;
5773 if (!sve_access_check(s)) {
5774 return true;
5777 switch (a->esz) {
5778 case MO_32:
5779 fn = gather_load_fn32[mte][be][a->ff][0][a->u][a->msz];
5780 break;
5781 case MO_64:
5782 fn = gather_load_fn64[mte][be][a->ff][2][a->u][a->msz];
5783 break;
5785 assert(fn != NULL);
5787 /* Treat LD1_zpiz (zn[x] + imm) the same way as LD1_zprz (rn + zm[x])
5788 * by loading the immediate into the scalar parameter.
5790 do_mem_zpz(s, a->rd, a->pg, a->rn, 0,
5791 tcg_constant_i64(a->imm << a->msz), a->msz, false, fn);
5792 return true;
5795 static bool trans_LDNT1_zprz(DisasContext *s, arg_LD1_zprz *a)
5797 gen_helper_gvec_mem_scatter *fn = NULL;
5798 bool be = s->be_data == MO_BE;
5799 bool mte = s->mte_active[0];
5801 if (a->esz < a->msz + !a->u) {
5802 return false;
5804 if (!dc_isar_feature(aa64_sve2, s)) {
5805 return false;
5807 s->is_nonstreaming = true;
5808 if (!sve_access_check(s)) {
5809 return true;
5812 switch (a->esz) {
5813 case MO_32:
5814 fn = gather_load_fn32[mte][be][0][0][a->u][a->msz];
5815 break;
5816 case MO_64:
5817 fn = gather_load_fn64[mte][be][0][2][a->u][a->msz];
5818 break;
5820 assert(fn != NULL);
5822 do_mem_zpz(s, a->rd, a->pg, a->rn, 0,
5823 cpu_reg(s, a->rm), a->msz, false, fn);
5824 return true;
5827 /* Indexed by [mte][be][xs][msz]. */
5828 static gen_helper_gvec_mem_scatter * const scatter_store_fn32[2][2][2][3] = {
5829 { /* MTE Inactive */
5830 { /* Little-endian */
5831 { gen_helper_sve_stbs_zsu,
5832 gen_helper_sve_sths_le_zsu,
5833 gen_helper_sve_stss_le_zsu, },
5834 { gen_helper_sve_stbs_zss,
5835 gen_helper_sve_sths_le_zss,
5836 gen_helper_sve_stss_le_zss, } },
5837 { /* Big-endian */
5838 { gen_helper_sve_stbs_zsu,
5839 gen_helper_sve_sths_be_zsu,
5840 gen_helper_sve_stss_be_zsu, },
5841 { gen_helper_sve_stbs_zss,
5842 gen_helper_sve_sths_be_zss,
5843 gen_helper_sve_stss_be_zss, } } },
5844 { /* MTE Active */
5845 { /* Little-endian */
5846 { gen_helper_sve_stbs_zsu_mte,
5847 gen_helper_sve_sths_le_zsu_mte,
5848 gen_helper_sve_stss_le_zsu_mte, },
5849 { gen_helper_sve_stbs_zss_mte,
5850 gen_helper_sve_sths_le_zss_mte,
5851 gen_helper_sve_stss_le_zss_mte, } },
5852 { /* Big-endian */
5853 { gen_helper_sve_stbs_zsu_mte,
5854 gen_helper_sve_sths_be_zsu_mte,
5855 gen_helper_sve_stss_be_zsu_mte, },
5856 { gen_helper_sve_stbs_zss_mte,
5857 gen_helper_sve_sths_be_zss_mte,
5858 gen_helper_sve_stss_be_zss_mte, } } },
5861 /* Note that we overload xs=2 to indicate 64-bit offset. */
5862 static gen_helper_gvec_mem_scatter * const scatter_store_fn64[2][2][3][4] = {
5863 { /* MTE Inactive */
5864 { /* Little-endian */
5865 { gen_helper_sve_stbd_zsu,
5866 gen_helper_sve_sthd_le_zsu,
5867 gen_helper_sve_stsd_le_zsu,
5868 gen_helper_sve_stdd_le_zsu, },
5869 { gen_helper_sve_stbd_zss,
5870 gen_helper_sve_sthd_le_zss,
5871 gen_helper_sve_stsd_le_zss,
5872 gen_helper_sve_stdd_le_zss, },
5873 { gen_helper_sve_stbd_zd,
5874 gen_helper_sve_sthd_le_zd,
5875 gen_helper_sve_stsd_le_zd,
5876 gen_helper_sve_stdd_le_zd, } },
5877 { /* Big-endian */
5878 { gen_helper_sve_stbd_zsu,
5879 gen_helper_sve_sthd_be_zsu,
5880 gen_helper_sve_stsd_be_zsu,
5881 gen_helper_sve_stdd_be_zsu, },
5882 { gen_helper_sve_stbd_zss,
5883 gen_helper_sve_sthd_be_zss,
5884 gen_helper_sve_stsd_be_zss,
5885 gen_helper_sve_stdd_be_zss, },
5886 { gen_helper_sve_stbd_zd,
5887 gen_helper_sve_sthd_be_zd,
5888 gen_helper_sve_stsd_be_zd,
5889 gen_helper_sve_stdd_be_zd, } } },
5890 { /* MTE Inactive */
5891 { /* Little-endian */
5892 { gen_helper_sve_stbd_zsu_mte,
5893 gen_helper_sve_sthd_le_zsu_mte,
5894 gen_helper_sve_stsd_le_zsu_mte,
5895 gen_helper_sve_stdd_le_zsu_mte, },
5896 { gen_helper_sve_stbd_zss_mte,
5897 gen_helper_sve_sthd_le_zss_mte,
5898 gen_helper_sve_stsd_le_zss_mte,
5899 gen_helper_sve_stdd_le_zss_mte, },
5900 { gen_helper_sve_stbd_zd_mte,
5901 gen_helper_sve_sthd_le_zd_mte,
5902 gen_helper_sve_stsd_le_zd_mte,
5903 gen_helper_sve_stdd_le_zd_mte, } },
5904 { /* Big-endian */
5905 { gen_helper_sve_stbd_zsu_mte,
5906 gen_helper_sve_sthd_be_zsu_mte,
5907 gen_helper_sve_stsd_be_zsu_mte,
5908 gen_helper_sve_stdd_be_zsu_mte, },
5909 { gen_helper_sve_stbd_zss_mte,
5910 gen_helper_sve_sthd_be_zss_mte,
5911 gen_helper_sve_stsd_be_zss_mte,
5912 gen_helper_sve_stdd_be_zss_mte, },
5913 { gen_helper_sve_stbd_zd_mte,
5914 gen_helper_sve_sthd_be_zd_mte,
5915 gen_helper_sve_stsd_be_zd_mte,
5916 gen_helper_sve_stdd_be_zd_mte, } } },
5919 static bool trans_ST1_zprz(DisasContext *s, arg_ST1_zprz *a)
5921 gen_helper_gvec_mem_scatter *fn;
5922 bool be = s->be_data == MO_BE;
5923 bool mte = s->mte_active[0];
5925 if (a->esz < a->msz || (a->msz == 0 && a->scale)) {
5926 return false;
5928 if (!dc_isar_feature(aa64_sve, s)) {
5929 return false;
5931 s->is_nonstreaming = true;
5932 if (!sve_access_check(s)) {
5933 return true;
5935 switch (a->esz) {
5936 case MO_32:
5937 fn = scatter_store_fn32[mte][be][a->xs][a->msz];
5938 break;
5939 case MO_64:
5940 fn = scatter_store_fn64[mte][be][a->xs][a->msz];
5941 break;
5942 default:
5943 g_assert_not_reached();
5945 do_mem_zpz(s, a->rd, a->pg, a->rm, a->scale * a->msz,
5946 cpu_reg_sp(s, a->rn), a->msz, true, fn);
5947 return true;
5950 static bool trans_ST1_zpiz(DisasContext *s, arg_ST1_zpiz *a)
5952 gen_helper_gvec_mem_scatter *fn = NULL;
5953 bool be = s->be_data == MO_BE;
5954 bool mte = s->mte_active[0];
5956 if (a->esz < a->msz) {
5957 return false;
5959 if (!dc_isar_feature(aa64_sve, s)) {
5960 return false;
5962 s->is_nonstreaming = true;
5963 if (!sve_access_check(s)) {
5964 return true;
5967 switch (a->esz) {
5968 case MO_32:
5969 fn = scatter_store_fn32[mte][be][0][a->msz];
5970 break;
5971 case MO_64:
5972 fn = scatter_store_fn64[mte][be][2][a->msz];
5973 break;
5975 assert(fn != NULL);
5977 /* Treat ST1_zpiz (zn[x] + imm) the same way as ST1_zprz (rn + zm[x])
5978 * by loading the immediate into the scalar parameter.
5980 do_mem_zpz(s, a->rd, a->pg, a->rn, 0,
5981 tcg_constant_i64(a->imm << a->msz), a->msz, true, fn);
5982 return true;
5985 static bool trans_STNT1_zprz(DisasContext *s, arg_ST1_zprz *a)
5987 gen_helper_gvec_mem_scatter *fn;
5988 bool be = s->be_data == MO_BE;
5989 bool mte = s->mte_active[0];
5991 if (a->esz < a->msz) {
5992 return false;
5994 if (!dc_isar_feature(aa64_sve2, s)) {
5995 return false;
5997 s->is_nonstreaming = true;
5998 if (!sve_access_check(s)) {
5999 return true;
6002 switch (a->esz) {
6003 case MO_32:
6004 fn = scatter_store_fn32[mte][be][0][a->msz];
6005 break;
6006 case MO_64:
6007 fn = scatter_store_fn64[mte][be][2][a->msz];
6008 break;
6009 default:
6010 g_assert_not_reached();
6013 do_mem_zpz(s, a->rd, a->pg, a->rn, 0,
6014 cpu_reg(s, a->rm), a->msz, true, fn);
6015 return true;
6019 * Prefetches
6022 static bool trans_PRF(DisasContext *s, arg_PRF *a)
6024 if (!dc_isar_feature(aa64_sve, s)) {
6025 return false;
6027 /* Prefetch is a nop within QEMU. */
6028 (void)sve_access_check(s);
6029 return true;
6032 static bool trans_PRF_rr(DisasContext *s, arg_PRF_rr *a)
6034 if (a->rm == 31 || !dc_isar_feature(aa64_sve, s)) {
6035 return false;
6037 /* Prefetch is a nop within QEMU. */
6038 (void)sve_access_check(s);
6039 return true;
6042 static bool trans_PRF_ns(DisasContext *s, arg_PRF_ns *a)
6044 if (!dc_isar_feature(aa64_sve, s)) {
6045 return false;
6047 /* Prefetch is a nop within QEMU. */
6048 s->is_nonstreaming = true;
6049 (void)sve_access_check(s);
6050 return true;
6054 * Move Prefix
6056 * TODO: The implementation so far could handle predicated merging movprfx.
6057 * The helper functions as written take an extra source register to
6058 * use in the operation, but the result is only written when predication
6059 * succeeds. For unpredicated movprfx, we need to rearrange the helpers
6060 * to allow the final write back to the destination to be unconditional.
6061 * For predicated zeroing movprfx, we need to rearrange the helpers to
6062 * allow the final write back to zero inactives.
6064 * In the meantime, just emit the moves.
6067 TRANS_FEAT(MOVPRFX, aa64_sve, do_mov_z, a->rd, a->rn)
6068 TRANS_FEAT(MOVPRFX_m, aa64_sve, do_sel_z, a->rd, a->rn, a->rd, a->pg, a->esz)
6069 TRANS_FEAT(MOVPRFX_z, aa64_sve, do_movz_zpz, a->rd, a->rn, a->pg, a->esz, false)
6072 * SVE2 Integer Multiply - Unpredicated
6075 TRANS_FEAT(MUL_zzz, aa64_sve2, gen_gvec_fn_arg_zzz, tcg_gen_gvec_mul, a)
6077 static gen_helper_gvec_3 * const smulh_zzz_fns[4] = {
6078 gen_helper_gvec_smulh_b, gen_helper_gvec_smulh_h,
6079 gen_helper_gvec_smulh_s, gen_helper_gvec_smulh_d,
6081 TRANS_FEAT(SMULH_zzz, aa64_sve2, gen_gvec_ool_arg_zzz,
6082 smulh_zzz_fns[a->esz], a, 0)
6084 static gen_helper_gvec_3 * const umulh_zzz_fns[4] = {
6085 gen_helper_gvec_umulh_b, gen_helper_gvec_umulh_h,
6086 gen_helper_gvec_umulh_s, gen_helper_gvec_umulh_d,
6088 TRANS_FEAT(UMULH_zzz, aa64_sve2, gen_gvec_ool_arg_zzz,
6089 umulh_zzz_fns[a->esz], a, 0)
6091 TRANS_FEAT(PMUL_zzz, aa64_sve2, gen_gvec_ool_arg_zzz,
6092 gen_helper_gvec_pmul_b, a, 0)
6094 static gen_helper_gvec_3 * const sqdmulh_zzz_fns[4] = {
6095 gen_helper_sve2_sqdmulh_b, gen_helper_sve2_sqdmulh_h,
6096 gen_helper_sve2_sqdmulh_s, gen_helper_sve2_sqdmulh_d,
6098 TRANS_FEAT(SQDMULH_zzz, aa64_sve2, gen_gvec_ool_arg_zzz,
6099 sqdmulh_zzz_fns[a->esz], a, 0)
6101 static gen_helper_gvec_3 * const sqrdmulh_zzz_fns[4] = {
6102 gen_helper_sve2_sqrdmulh_b, gen_helper_sve2_sqrdmulh_h,
6103 gen_helper_sve2_sqrdmulh_s, gen_helper_sve2_sqrdmulh_d,
6105 TRANS_FEAT(SQRDMULH_zzz, aa64_sve2, gen_gvec_ool_arg_zzz,
6106 sqrdmulh_zzz_fns[a->esz], a, 0)
6109 * SVE2 Integer - Predicated
6112 static gen_helper_gvec_4 * const sadlp_fns[4] = {
6113 NULL, gen_helper_sve2_sadalp_zpzz_h,
6114 gen_helper_sve2_sadalp_zpzz_s, gen_helper_sve2_sadalp_zpzz_d,
6116 TRANS_FEAT(SADALP_zpzz, aa64_sve2, gen_gvec_ool_arg_zpzz,
6117 sadlp_fns[a->esz], a, 0)
6119 static gen_helper_gvec_4 * const uadlp_fns[4] = {
6120 NULL, gen_helper_sve2_uadalp_zpzz_h,
6121 gen_helper_sve2_uadalp_zpzz_s, gen_helper_sve2_uadalp_zpzz_d,
6123 TRANS_FEAT(UADALP_zpzz, aa64_sve2, gen_gvec_ool_arg_zpzz,
6124 uadlp_fns[a->esz], a, 0)
6127 * SVE2 integer unary operations (predicated)
6130 TRANS_FEAT(URECPE, aa64_sve2, gen_gvec_ool_arg_zpz,
6131 a->esz == 2 ? gen_helper_sve2_urecpe_s : NULL, a, 0)
6133 TRANS_FEAT(URSQRTE, aa64_sve2, gen_gvec_ool_arg_zpz,
6134 a->esz == 2 ? gen_helper_sve2_ursqrte_s : NULL, a, 0)
6136 static gen_helper_gvec_3 * const sqabs_fns[4] = {
6137 gen_helper_sve2_sqabs_b, gen_helper_sve2_sqabs_h,
6138 gen_helper_sve2_sqabs_s, gen_helper_sve2_sqabs_d,
6140 TRANS_FEAT(SQABS, aa64_sve2, gen_gvec_ool_arg_zpz, sqabs_fns[a->esz], a, 0)
6142 static gen_helper_gvec_3 * const sqneg_fns[4] = {
6143 gen_helper_sve2_sqneg_b, gen_helper_sve2_sqneg_h,
6144 gen_helper_sve2_sqneg_s, gen_helper_sve2_sqneg_d,
6146 TRANS_FEAT(SQNEG, aa64_sve2, gen_gvec_ool_arg_zpz, sqneg_fns[a->esz], a, 0)
6148 DO_ZPZZ(SQSHL, aa64_sve2, sve2_sqshl)
6149 DO_ZPZZ(SQRSHL, aa64_sve2, sve2_sqrshl)
6150 DO_ZPZZ(SRSHL, aa64_sve2, sve2_srshl)
6152 DO_ZPZZ(UQSHL, aa64_sve2, sve2_uqshl)
6153 DO_ZPZZ(UQRSHL, aa64_sve2, sve2_uqrshl)
6154 DO_ZPZZ(URSHL, aa64_sve2, sve2_urshl)
6156 DO_ZPZZ(SHADD, aa64_sve2, sve2_shadd)
6157 DO_ZPZZ(SRHADD, aa64_sve2, sve2_srhadd)
6158 DO_ZPZZ(SHSUB, aa64_sve2, sve2_shsub)
6160 DO_ZPZZ(UHADD, aa64_sve2, sve2_uhadd)
6161 DO_ZPZZ(URHADD, aa64_sve2, sve2_urhadd)
6162 DO_ZPZZ(UHSUB, aa64_sve2, sve2_uhsub)
6164 DO_ZPZZ(ADDP, aa64_sve2, sve2_addp)
6165 DO_ZPZZ(SMAXP, aa64_sve2, sve2_smaxp)
6166 DO_ZPZZ(UMAXP, aa64_sve2, sve2_umaxp)
6167 DO_ZPZZ(SMINP, aa64_sve2, sve2_sminp)
6168 DO_ZPZZ(UMINP, aa64_sve2, sve2_uminp)
6170 DO_ZPZZ(SQADD_zpzz, aa64_sve2, sve2_sqadd)
6171 DO_ZPZZ(UQADD_zpzz, aa64_sve2, sve2_uqadd)
6172 DO_ZPZZ(SQSUB_zpzz, aa64_sve2, sve2_sqsub)
6173 DO_ZPZZ(UQSUB_zpzz, aa64_sve2, sve2_uqsub)
6174 DO_ZPZZ(SUQADD, aa64_sve2, sve2_suqadd)
6175 DO_ZPZZ(USQADD, aa64_sve2, sve2_usqadd)
6178 * SVE2 Widening Integer Arithmetic
6181 static gen_helper_gvec_3 * const saddl_fns[4] = {
6182 NULL, gen_helper_sve2_saddl_h,
6183 gen_helper_sve2_saddl_s, gen_helper_sve2_saddl_d,
6185 TRANS_FEAT(SADDLB, aa64_sve2, gen_gvec_ool_arg_zzz,
6186 saddl_fns[a->esz], a, 0)
6187 TRANS_FEAT(SADDLT, aa64_sve2, gen_gvec_ool_arg_zzz,
6188 saddl_fns[a->esz], a, 3)
6189 TRANS_FEAT(SADDLBT, aa64_sve2, gen_gvec_ool_arg_zzz,
6190 saddl_fns[a->esz], a, 2)
6192 static gen_helper_gvec_3 * const ssubl_fns[4] = {
6193 NULL, gen_helper_sve2_ssubl_h,
6194 gen_helper_sve2_ssubl_s, gen_helper_sve2_ssubl_d,
6196 TRANS_FEAT(SSUBLB, aa64_sve2, gen_gvec_ool_arg_zzz,
6197 ssubl_fns[a->esz], a, 0)
6198 TRANS_FEAT(SSUBLT, aa64_sve2, gen_gvec_ool_arg_zzz,
6199 ssubl_fns[a->esz], a, 3)
6200 TRANS_FEAT(SSUBLBT, aa64_sve2, gen_gvec_ool_arg_zzz,
6201 ssubl_fns[a->esz], a, 2)
6202 TRANS_FEAT(SSUBLTB, aa64_sve2, gen_gvec_ool_arg_zzz,
6203 ssubl_fns[a->esz], a, 1)
6205 static gen_helper_gvec_3 * const sabdl_fns[4] = {
6206 NULL, gen_helper_sve2_sabdl_h,
6207 gen_helper_sve2_sabdl_s, gen_helper_sve2_sabdl_d,
6209 TRANS_FEAT(SABDLB, aa64_sve2, gen_gvec_ool_arg_zzz,
6210 sabdl_fns[a->esz], a, 0)
6211 TRANS_FEAT(SABDLT, aa64_sve2, gen_gvec_ool_arg_zzz,
6212 sabdl_fns[a->esz], a, 3)
6214 static gen_helper_gvec_3 * const uaddl_fns[4] = {
6215 NULL, gen_helper_sve2_uaddl_h,
6216 gen_helper_sve2_uaddl_s, gen_helper_sve2_uaddl_d,
6218 TRANS_FEAT(UADDLB, aa64_sve2, gen_gvec_ool_arg_zzz,
6219 uaddl_fns[a->esz], a, 0)
6220 TRANS_FEAT(UADDLT, aa64_sve2, gen_gvec_ool_arg_zzz,
6221 uaddl_fns[a->esz], a, 3)
6223 static gen_helper_gvec_3 * const usubl_fns[4] = {
6224 NULL, gen_helper_sve2_usubl_h,
6225 gen_helper_sve2_usubl_s, gen_helper_sve2_usubl_d,
6227 TRANS_FEAT(USUBLB, aa64_sve2, gen_gvec_ool_arg_zzz,
6228 usubl_fns[a->esz], a, 0)
6229 TRANS_FEAT(USUBLT, aa64_sve2, gen_gvec_ool_arg_zzz,
6230 usubl_fns[a->esz], a, 3)
6232 static gen_helper_gvec_3 * const uabdl_fns[4] = {
6233 NULL, gen_helper_sve2_uabdl_h,
6234 gen_helper_sve2_uabdl_s, gen_helper_sve2_uabdl_d,
6236 TRANS_FEAT(UABDLB, aa64_sve2, gen_gvec_ool_arg_zzz,
6237 uabdl_fns[a->esz], a, 0)
6238 TRANS_FEAT(UABDLT, aa64_sve2, gen_gvec_ool_arg_zzz,
6239 uabdl_fns[a->esz], a, 3)
6241 static gen_helper_gvec_3 * const sqdmull_fns[4] = {
6242 NULL, gen_helper_sve2_sqdmull_zzz_h,
6243 gen_helper_sve2_sqdmull_zzz_s, gen_helper_sve2_sqdmull_zzz_d,
6245 TRANS_FEAT(SQDMULLB_zzz, aa64_sve2, gen_gvec_ool_arg_zzz,
6246 sqdmull_fns[a->esz], a, 0)
6247 TRANS_FEAT(SQDMULLT_zzz, aa64_sve2, gen_gvec_ool_arg_zzz,
6248 sqdmull_fns[a->esz], a, 3)
6250 static gen_helper_gvec_3 * const smull_fns[4] = {
6251 NULL, gen_helper_sve2_smull_zzz_h,
6252 gen_helper_sve2_smull_zzz_s, gen_helper_sve2_smull_zzz_d,
6254 TRANS_FEAT(SMULLB_zzz, aa64_sve2, gen_gvec_ool_arg_zzz,
6255 smull_fns[a->esz], a, 0)
6256 TRANS_FEAT(SMULLT_zzz, aa64_sve2, gen_gvec_ool_arg_zzz,
6257 smull_fns[a->esz], a, 3)
6259 static gen_helper_gvec_3 * const umull_fns[4] = {
6260 NULL, gen_helper_sve2_umull_zzz_h,
6261 gen_helper_sve2_umull_zzz_s, gen_helper_sve2_umull_zzz_d,
6263 TRANS_FEAT(UMULLB_zzz, aa64_sve2, gen_gvec_ool_arg_zzz,
6264 umull_fns[a->esz], a, 0)
6265 TRANS_FEAT(UMULLT_zzz, aa64_sve2, gen_gvec_ool_arg_zzz,
6266 umull_fns[a->esz], a, 3)
6268 static gen_helper_gvec_3 * const eoril_fns[4] = {
6269 gen_helper_sve2_eoril_b, gen_helper_sve2_eoril_h,
6270 gen_helper_sve2_eoril_s, gen_helper_sve2_eoril_d,
6272 TRANS_FEAT(EORBT, aa64_sve2, gen_gvec_ool_arg_zzz, eoril_fns[a->esz], a, 2)
6273 TRANS_FEAT(EORTB, aa64_sve2, gen_gvec_ool_arg_zzz, eoril_fns[a->esz], a, 1)
6275 static bool do_trans_pmull(DisasContext *s, arg_rrr_esz *a, bool sel)
6277 static gen_helper_gvec_3 * const fns[4] = {
6278 gen_helper_gvec_pmull_q, gen_helper_sve2_pmull_h,
6279 NULL, gen_helper_sve2_pmull_d,
6282 if (a->esz == 0) {
6283 if (!dc_isar_feature(aa64_sve2_pmull128, s)) {
6284 return false;
6286 s->is_nonstreaming = true;
6287 } else if (!dc_isar_feature(aa64_sve, s)) {
6288 return false;
6290 return gen_gvec_ool_arg_zzz(s, fns[a->esz], a, sel);
6293 TRANS_FEAT(PMULLB, aa64_sve2, do_trans_pmull, a, false)
6294 TRANS_FEAT(PMULLT, aa64_sve2, do_trans_pmull, a, true)
6296 static gen_helper_gvec_3 * const saddw_fns[4] = {
6297 NULL, gen_helper_sve2_saddw_h,
6298 gen_helper_sve2_saddw_s, gen_helper_sve2_saddw_d,
6300 TRANS_FEAT(SADDWB, aa64_sve2, gen_gvec_ool_arg_zzz, saddw_fns[a->esz], a, 0)
6301 TRANS_FEAT(SADDWT, aa64_sve2, gen_gvec_ool_arg_zzz, saddw_fns[a->esz], a, 1)
6303 static gen_helper_gvec_3 * const ssubw_fns[4] = {
6304 NULL, gen_helper_sve2_ssubw_h,
6305 gen_helper_sve2_ssubw_s, gen_helper_sve2_ssubw_d,
6307 TRANS_FEAT(SSUBWB, aa64_sve2, gen_gvec_ool_arg_zzz, ssubw_fns[a->esz], a, 0)
6308 TRANS_FEAT(SSUBWT, aa64_sve2, gen_gvec_ool_arg_zzz, ssubw_fns[a->esz], a, 1)
6310 static gen_helper_gvec_3 * const uaddw_fns[4] = {
6311 NULL, gen_helper_sve2_uaddw_h,
6312 gen_helper_sve2_uaddw_s, gen_helper_sve2_uaddw_d,
6314 TRANS_FEAT(UADDWB, aa64_sve2, gen_gvec_ool_arg_zzz, uaddw_fns[a->esz], a, 0)
6315 TRANS_FEAT(UADDWT, aa64_sve2, gen_gvec_ool_arg_zzz, uaddw_fns[a->esz], a, 1)
6317 static gen_helper_gvec_3 * const usubw_fns[4] = {
6318 NULL, gen_helper_sve2_usubw_h,
6319 gen_helper_sve2_usubw_s, gen_helper_sve2_usubw_d,
6321 TRANS_FEAT(USUBWB, aa64_sve2, gen_gvec_ool_arg_zzz, usubw_fns[a->esz], a, 0)
6322 TRANS_FEAT(USUBWT, aa64_sve2, gen_gvec_ool_arg_zzz, usubw_fns[a->esz], a, 1)
6324 static void gen_sshll_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t imm)
6326 int top = imm & 1;
6327 int shl = imm >> 1;
6328 int halfbits = 4 << vece;
6330 if (top) {
6331 if (shl == halfbits) {
6332 TCGv_vec t = tcg_temp_new_vec_matching(d);
6333 tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(halfbits, halfbits));
6334 tcg_gen_and_vec(vece, d, n, t);
6335 tcg_temp_free_vec(t);
6336 } else {
6337 tcg_gen_sari_vec(vece, d, n, halfbits);
6338 tcg_gen_shli_vec(vece, d, d, shl);
6340 } else {
6341 tcg_gen_shli_vec(vece, d, n, halfbits);
6342 tcg_gen_sari_vec(vece, d, d, halfbits - shl);
6346 static void gen_ushll_i64(unsigned vece, TCGv_i64 d, TCGv_i64 n, int imm)
6348 int halfbits = 4 << vece;
6349 int top = imm & 1;
6350 int shl = (imm >> 1);
6351 int shift;
6352 uint64_t mask;
6354 mask = MAKE_64BIT_MASK(0, halfbits);
6355 mask <<= shl;
6356 mask = dup_const(vece, mask);
6358 shift = shl - top * halfbits;
6359 if (shift < 0) {
6360 tcg_gen_shri_i64(d, n, -shift);
6361 } else {
6362 tcg_gen_shli_i64(d, n, shift);
6364 tcg_gen_andi_i64(d, d, mask);
6367 static void gen_ushll16_i64(TCGv_i64 d, TCGv_i64 n, int64_t imm)
6369 gen_ushll_i64(MO_16, d, n, imm);
6372 static void gen_ushll32_i64(TCGv_i64 d, TCGv_i64 n, int64_t imm)
6374 gen_ushll_i64(MO_32, d, n, imm);
6377 static void gen_ushll64_i64(TCGv_i64 d, TCGv_i64 n, int64_t imm)
6379 gen_ushll_i64(MO_64, d, n, imm);
6382 static void gen_ushll_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t imm)
6384 int halfbits = 4 << vece;
6385 int top = imm & 1;
6386 int shl = imm >> 1;
6388 if (top) {
6389 if (shl == halfbits) {
6390 TCGv_vec t = tcg_temp_new_vec_matching(d);
6391 tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(halfbits, halfbits));
6392 tcg_gen_and_vec(vece, d, n, t);
6393 tcg_temp_free_vec(t);
6394 } else {
6395 tcg_gen_shri_vec(vece, d, n, halfbits);
6396 tcg_gen_shli_vec(vece, d, d, shl);
6398 } else {
6399 if (shl == 0) {
6400 TCGv_vec t = tcg_temp_new_vec_matching(d);
6401 tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
6402 tcg_gen_and_vec(vece, d, n, t);
6403 tcg_temp_free_vec(t);
6404 } else {
6405 tcg_gen_shli_vec(vece, d, n, halfbits);
6406 tcg_gen_shri_vec(vece, d, d, halfbits - shl);
6411 static bool do_shll_tb(DisasContext *s, arg_rri_esz *a,
6412 const GVecGen2i ops[3], bool sel)
6415 if (a->esz < 0 || a->esz > 2) {
6416 return false;
6418 if (sve_access_check(s)) {
6419 unsigned vsz = vec_full_reg_size(s);
6420 tcg_gen_gvec_2i(vec_full_reg_offset(s, a->rd),
6421 vec_full_reg_offset(s, a->rn),
6422 vsz, vsz, (a->imm << 1) | sel,
6423 &ops[a->esz]);
6425 return true;
6428 static const TCGOpcode sshll_list[] = {
6429 INDEX_op_shli_vec, INDEX_op_sari_vec, 0
6431 static const GVecGen2i sshll_ops[3] = {
6432 { .fniv = gen_sshll_vec,
6433 .opt_opc = sshll_list,
6434 .fno = gen_helper_sve2_sshll_h,
6435 .vece = MO_16 },
6436 { .fniv = gen_sshll_vec,
6437 .opt_opc = sshll_list,
6438 .fno = gen_helper_sve2_sshll_s,
6439 .vece = MO_32 },
6440 { .fniv = gen_sshll_vec,
6441 .opt_opc = sshll_list,
6442 .fno = gen_helper_sve2_sshll_d,
6443 .vece = MO_64 }
6445 TRANS_FEAT(SSHLLB, aa64_sve2, do_shll_tb, a, sshll_ops, false)
6446 TRANS_FEAT(SSHLLT, aa64_sve2, do_shll_tb, a, sshll_ops, true)
6448 static const TCGOpcode ushll_list[] = {
6449 INDEX_op_shli_vec, INDEX_op_shri_vec, 0
6451 static const GVecGen2i ushll_ops[3] = {
6452 { .fni8 = gen_ushll16_i64,
6453 .fniv = gen_ushll_vec,
6454 .opt_opc = ushll_list,
6455 .fno = gen_helper_sve2_ushll_h,
6456 .vece = MO_16 },
6457 { .fni8 = gen_ushll32_i64,
6458 .fniv = gen_ushll_vec,
6459 .opt_opc = ushll_list,
6460 .fno = gen_helper_sve2_ushll_s,
6461 .vece = MO_32 },
6462 { .fni8 = gen_ushll64_i64,
6463 .fniv = gen_ushll_vec,
6464 .opt_opc = ushll_list,
6465 .fno = gen_helper_sve2_ushll_d,
6466 .vece = MO_64 },
6468 TRANS_FEAT(USHLLB, aa64_sve2, do_shll_tb, a, ushll_ops, false)
6469 TRANS_FEAT(USHLLT, aa64_sve2, do_shll_tb, a, ushll_ops, true)
6471 static gen_helper_gvec_3 * const bext_fns[4] = {
6472 gen_helper_sve2_bext_b, gen_helper_sve2_bext_h,
6473 gen_helper_sve2_bext_s, gen_helper_sve2_bext_d,
6475 TRANS_FEAT_NONSTREAMING(BEXT, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz,
6476 bext_fns[a->esz], a, 0)
6478 static gen_helper_gvec_3 * const bdep_fns[4] = {
6479 gen_helper_sve2_bdep_b, gen_helper_sve2_bdep_h,
6480 gen_helper_sve2_bdep_s, gen_helper_sve2_bdep_d,
6482 TRANS_FEAT_NONSTREAMING(BDEP, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz,
6483 bdep_fns[a->esz], a, 0)
6485 static gen_helper_gvec_3 * const bgrp_fns[4] = {
6486 gen_helper_sve2_bgrp_b, gen_helper_sve2_bgrp_h,
6487 gen_helper_sve2_bgrp_s, gen_helper_sve2_bgrp_d,
6489 TRANS_FEAT_NONSTREAMING(BGRP, aa64_sve2_bitperm, gen_gvec_ool_arg_zzz,
6490 bgrp_fns[a->esz], a, 0)
6492 static gen_helper_gvec_3 * const cadd_fns[4] = {
6493 gen_helper_sve2_cadd_b, gen_helper_sve2_cadd_h,
6494 gen_helper_sve2_cadd_s, gen_helper_sve2_cadd_d,
6496 TRANS_FEAT(CADD_rot90, aa64_sve2, gen_gvec_ool_arg_zzz,
6497 cadd_fns[a->esz], a, 0)
6498 TRANS_FEAT(CADD_rot270, aa64_sve2, gen_gvec_ool_arg_zzz,
6499 cadd_fns[a->esz], a, 1)
6501 static gen_helper_gvec_3 * const sqcadd_fns[4] = {
6502 gen_helper_sve2_sqcadd_b, gen_helper_sve2_sqcadd_h,
6503 gen_helper_sve2_sqcadd_s, gen_helper_sve2_sqcadd_d,
6505 TRANS_FEAT(SQCADD_rot90, aa64_sve2, gen_gvec_ool_arg_zzz,
6506 sqcadd_fns[a->esz], a, 0)
6507 TRANS_FEAT(SQCADD_rot270, aa64_sve2, gen_gvec_ool_arg_zzz,
6508 sqcadd_fns[a->esz], a, 1)
6510 static gen_helper_gvec_4 * const sabal_fns[4] = {
6511 NULL, gen_helper_sve2_sabal_h,
6512 gen_helper_sve2_sabal_s, gen_helper_sve2_sabal_d,
6514 TRANS_FEAT(SABALB, aa64_sve2, gen_gvec_ool_arg_zzzz, sabal_fns[a->esz], a, 0)
6515 TRANS_FEAT(SABALT, aa64_sve2, gen_gvec_ool_arg_zzzz, sabal_fns[a->esz], a, 1)
6517 static gen_helper_gvec_4 * const uabal_fns[4] = {
6518 NULL, gen_helper_sve2_uabal_h,
6519 gen_helper_sve2_uabal_s, gen_helper_sve2_uabal_d,
6521 TRANS_FEAT(UABALB, aa64_sve2, gen_gvec_ool_arg_zzzz, uabal_fns[a->esz], a, 0)
6522 TRANS_FEAT(UABALT, aa64_sve2, gen_gvec_ool_arg_zzzz, uabal_fns[a->esz], a, 1)
6524 static bool do_adcl(DisasContext *s, arg_rrrr_esz *a, bool sel)
6526 static gen_helper_gvec_4 * const fns[2] = {
6527 gen_helper_sve2_adcl_s,
6528 gen_helper_sve2_adcl_d,
6531 * Note that in this case the ESZ field encodes both size and sign.
6532 * Split out 'subtract' into bit 1 of the data field for the helper.
6534 return gen_gvec_ool_arg_zzzz(s, fns[a->esz & 1], a, (a->esz & 2) | sel);
6537 TRANS_FEAT(ADCLB, aa64_sve2, do_adcl, a, false)
6538 TRANS_FEAT(ADCLT, aa64_sve2, do_adcl, a, true)
6540 TRANS_FEAT(SSRA, aa64_sve2, gen_gvec_fn_arg_zzi, gen_gvec_ssra, a)
6541 TRANS_FEAT(USRA, aa64_sve2, gen_gvec_fn_arg_zzi, gen_gvec_usra, a)
6542 TRANS_FEAT(SRSRA, aa64_sve2, gen_gvec_fn_arg_zzi, gen_gvec_srsra, a)
6543 TRANS_FEAT(URSRA, aa64_sve2, gen_gvec_fn_arg_zzi, gen_gvec_ursra, a)
6544 TRANS_FEAT(SRI, aa64_sve2, gen_gvec_fn_arg_zzi, gen_gvec_sri, a)
6545 TRANS_FEAT(SLI, aa64_sve2, gen_gvec_fn_arg_zzi, gen_gvec_sli, a)
6547 TRANS_FEAT(SABA, aa64_sve2, gen_gvec_fn_arg_zzz, gen_gvec_saba, a)
6548 TRANS_FEAT(UABA, aa64_sve2, gen_gvec_fn_arg_zzz, gen_gvec_uaba, a)
6550 static bool do_narrow_extract(DisasContext *s, arg_rri_esz *a,
6551 const GVecGen2 ops[3])
6553 if (a->esz < 0 || a->esz > MO_32 || a->imm != 0) {
6554 return false;
6556 if (sve_access_check(s)) {
6557 unsigned vsz = vec_full_reg_size(s);
6558 tcg_gen_gvec_2(vec_full_reg_offset(s, a->rd),
6559 vec_full_reg_offset(s, a->rn),
6560 vsz, vsz, &ops[a->esz]);
6562 return true;
6565 static const TCGOpcode sqxtn_list[] = {
6566 INDEX_op_shli_vec, INDEX_op_smin_vec, INDEX_op_smax_vec, 0
6569 static void gen_sqxtnb_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
6571 TCGv_vec t = tcg_temp_new_vec_matching(d);
6572 int halfbits = 4 << vece;
6573 int64_t mask = (1ull << halfbits) - 1;
6574 int64_t min = -1ull << (halfbits - 1);
6575 int64_t max = -min - 1;
6577 tcg_gen_dupi_vec(vece, t, min);
6578 tcg_gen_smax_vec(vece, d, n, t);
6579 tcg_gen_dupi_vec(vece, t, max);
6580 tcg_gen_smin_vec(vece, d, d, t);
6581 tcg_gen_dupi_vec(vece, t, mask);
6582 tcg_gen_and_vec(vece, d, d, t);
6583 tcg_temp_free_vec(t);
6586 static const GVecGen2 sqxtnb_ops[3] = {
6587 { .fniv = gen_sqxtnb_vec,
6588 .opt_opc = sqxtn_list,
6589 .fno = gen_helper_sve2_sqxtnb_h,
6590 .vece = MO_16 },
6591 { .fniv = gen_sqxtnb_vec,
6592 .opt_opc = sqxtn_list,
6593 .fno = gen_helper_sve2_sqxtnb_s,
6594 .vece = MO_32 },
6595 { .fniv = gen_sqxtnb_vec,
6596 .opt_opc = sqxtn_list,
6597 .fno = gen_helper_sve2_sqxtnb_d,
6598 .vece = MO_64 },
6600 TRANS_FEAT(SQXTNB, aa64_sve2, do_narrow_extract, a, sqxtnb_ops)
6602 static void gen_sqxtnt_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
6604 TCGv_vec t = tcg_temp_new_vec_matching(d);
6605 int halfbits = 4 << vece;
6606 int64_t mask = (1ull << halfbits) - 1;
6607 int64_t min = -1ull << (halfbits - 1);
6608 int64_t max = -min - 1;
6610 tcg_gen_dupi_vec(vece, t, min);
6611 tcg_gen_smax_vec(vece, n, n, t);
6612 tcg_gen_dupi_vec(vece, t, max);
6613 tcg_gen_smin_vec(vece, n, n, t);
6614 tcg_gen_shli_vec(vece, n, n, halfbits);
6615 tcg_gen_dupi_vec(vece, t, mask);
6616 tcg_gen_bitsel_vec(vece, d, t, d, n);
6617 tcg_temp_free_vec(t);
6620 static const GVecGen2 sqxtnt_ops[3] = {
6621 { .fniv = gen_sqxtnt_vec,
6622 .opt_opc = sqxtn_list,
6623 .load_dest = true,
6624 .fno = gen_helper_sve2_sqxtnt_h,
6625 .vece = MO_16 },
6626 { .fniv = gen_sqxtnt_vec,
6627 .opt_opc = sqxtn_list,
6628 .load_dest = true,
6629 .fno = gen_helper_sve2_sqxtnt_s,
6630 .vece = MO_32 },
6631 { .fniv = gen_sqxtnt_vec,
6632 .opt_opc = sqxtn_list,
6633 .load_dest = true,
6634 .fno = gen_helper_sve2_sqxtnt_d,
6635 .vece = MO_64 },
6637 TRANS_FEAT(SQXTNT, aa64_sve2, do_narrow_extract, a, sqxtnt_ops)
6639 static const TCGOpcode uqxtn_list[] = {
6640 INDEX_op_shli_vec, INDEX_op_umin_vec, 0
6643 static void gen_uqxtnb_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
6645 TCGv_vec t = tcg_temp_new_vec_matching(d);
6646 int halfbits = 4 << vece;
6647 int64_t max = (1ull << halfbits) - 1;
6649 tcg_gen_dupi_vec(vece, t, max);
6650 tcg_gen_umin_vec(vece, d, n, t);
6651 tcg_temp_free_vec(t);
6654 static const GVecGen2 uqxtnb_ops[3] = {
6655 { .fniv = gen_uqxtnb_vec,
6656 .opt_opc = uqxtn_list,
6657 .fno = gen_helper_sve2_uqxtnb_h,
6658 .vece = MO_16 },
6659 { .fniv = gen_uqxtnb_vec,
6660 .opt_opc = uqxtn_list,
6661 .fno = gen_helper_sve2_uqxtnb_s,
6662 .vece = MO_32 },
6663 { .fniv = gen_uqxtnb_vec,
6664 .opt_opc = uqxtn_list,
6665 .fno = gen_helper_sve2_uqxtnb_d,
6666 .vece = MO_64 },
6668 TRANS_FEAT(UQXTNB, aa64_sve2, do_narrow_extract, a, uqxtnb_ops)
6670 static void gen_uqxtnt_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
6672 TCGv_vec t = tcg_temp_new_vec_matching(d);
6673 int halfbits = 4 << vece;
6674 int64_t max = (1ull << halfbits) - 1;
6676 tcg_gen_dupi_vec(vece, t, max);
6677 tcg_gen_umin_vec(vece, n, n, t);
6678 tcg_gen_shli_vec(vece, n, n, halfbits);
6679 tcg_gen_bitsel_vec(vece, d, t, d, n);
6680 tcg_temp_free_vec(t);
6683 static const GVecGen2 uqxtnt_ops[3] = {
6684 { .fniv = gen_uqxtnt_vec,
6685 .opt_opc = uqxtn_list,
6686 .load_dest = true,
6687 .fno = gen_helper_sve2_uqxtnt_h,
6688 .vece = MO_16 },
6689 { .fniv = gen_uqxtnt_vec,
6690 .opt_opc = uqxtn_list,
6691 .load_dest = true,
6692 .fno = gen_helper_sve2_uqxtnt_s,
6693 .vece = MO_32 },
6694 { .fniv = gen_uqxtnt_vec,
6695 .opt_opc = uqxtn_list,
6696 .load_dest = true,
6697 .fno = gen_helper_sve2_uqxtnt_d,
6698 .vece = MO_64 },
6700 TRANS_FEAT(UQXTNT, aa64_sve2, do_narrow_extract, a, uqxtnt_ops)
6702 static const TCGOpcode sqxtun_list[] = {
6703 INDEX_op_shli_vec, INDEX_op_umin_vec, INDEX_op_smax_vec, 0
6706 static void gen_sqxtunb_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
6708 TCGv_vec t = tcg_temp_new_vec_matching(d);
6709 int halfbits = 4 << vece;
6710 int64_t max = (1ull << halfbits) - 1;
6712 tcg_gen_dupi_vec(vece, t, 0);
6713 tcg_gen_smax_vec(vece, d, n, t);
6714 tcg_gen_dupi_vec(vece, t, max);
6715 tcg_gen_umin_vec(vece, d, d, t);
6716 tcg_temp_free_vec(t);
6719 static const GVecGen2 sqxtunb_ops[3] = {
6720 { .fniv = gen_sqxtunb_vec,
6721 .opt_opc = sqxtun_list,
6722 .fno = gen_helper_sve2_sqxtunb_h,
6723 .vece = MO_16 },
6724 { .fniv = gen_sqxtunb_vec,
6725 .opt_opc = sqxtun_list,
6726 .fno = gen_helper_sve2_sqxtunb_s,
6727 .vece = MO_32 },
6728 { .fniv = gen_sqxtunb_vec,
6729 .opt_opc = sqxtun_list,
6730 .fno = gen_helper_sve2_sqxtunb_d,
6731 .vece = MO_64 },
6733 TRANS_FEAT(SQXTUNB, aa64_sve2, do_narrow_extract, a, sqxtunb_ops)
6735 static void gen_sqxtunt_vec(unsigned vece, TCGv_vec d, TCGv_vec n)
6737 TCGv_vec t = tcg_temp_new_vec_matching(d);
6738 int halfbits = 4 << vece;
6739 int64_t max = (1ull << halfbits) - 1;
6741 tcg_gen_dupi_vec(vece, t, 0);
6742 tcg_gen_smax_vec(vece, n, n, t);
6743 tcg_gen_dupi_vec(vece, t, max);
6744 tcg_gen_umin_vec(vece, n, n, t);
6745 tcg_gen_shli_vec(vece, n, n, halfbits);
6746 tcg_gen_bitsel_vec(vece, d, t, d, n);
6747 tcg_temp_free_vec(t);
6750 static const GVecGen2 sqxtunt_ops[3] = {
6751 { .fniv = gen_sqxtunt_vec,
6752 .opt_opc = sqxtun_list,
6753 .load_dest = true,
6754 .fno = gen_helper_sve2_sqxtunt_h,
6755 .vece = MO_16 },
6756 { .fniv = gen_sqxtunt_vec,
6757 .opt_opc = sqxtun_list,
6758 .load_dest = true,
6759 .fno = gen_helper_sve2_sqxtunt_s,
6760 .vece = MO_32 },
6761 { .fniv = gen_sqxtunt_vec,
6762 .opt_opc = sqxtun_list,
6763 .load_dest = true,
6764 .fno = gen_helper_sve2_sqxtunt_d,
6765 .vece = MO_64 },
6767 TRANS_FEAT(SQXTUNT, aa64_sve2, do_narrow_extract, a, sqxtunt_ops)
6769 static bool do_shr_narrow(DisasContext *s, arg_rri_esz *a,
6770 const GVecGen2i ops[3])
6772 if (a->esz < 0 || a->esz > MO_32) {
6773 return false;
6775 assert(a->imm > 0 && a->imm <= (8 << a->esz));
6776 if (sve_access_check(s)) {
6777 unsigned vsz = vec_full_reg_size(s);
6778 tcg_gen_gvec_2i(vec_full_reg_offset(s, a->rd),
6779 vec_full_reg_offset(s, a->rn),
6780 vsz, vsz, a->imm, &ops[a->esz]);
6782 return true;
6785 static void gen_shrnb_i64(unsigned vece, TCGv_i64 d, TCGv_i64 n, int shr)
6787 int halfbits = 4 << vece;
6788 uint64_t mask = dup_const(vece, MAKE_64BIT_MASK(0, halfbits));
6790 tcg_gen_shri_i64(d, n, shr);
6791 tcg_gen_andi_i64(d, d, mask);
6794 static void gen_shrnb16_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr)
6796 gen_shrnb_i64(MO_16, d, n, shr);
6799 static void gen_shrnb32_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr)
6801 gen_shrnb_i64(MO_32, d, n, shr);
6804 static void gen_shrnb64_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr)
6806 gen_shrnb_i64(MO_64, d, n, shr);
6809 static void gen_shrnb_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t shr)
6811 TCGv_vec t = tcg_temp_new_vec_matching(d);
6812 int halfbits = 4 << vece;
6813 uint64_t mask = MAKE_64BIT_MASK(0, halfbits);
6815 tcg_gen_shri_vec(vece, n, n, shr);
6816 tcg_gen_dupi_vec(vece, t, mask);
6817 tcg_gen_and_vec(vece, d, n, t);
6818 tcg_temp_free_vec(t);
6821 static const TCGOpcode shrnb_vec_list[] = { INDEX_op_shri_vec, 0 };
6822 static const GVecGen2i shrnb_ops[3] = {
6823 { .fni8 = gen_shrnb16_i64,
6824 .fniv = gen_shrnb_vec,
6825 .opt_opc = shrnb_vec_list,
6826 .fno = gen_helper_sve2_shrnb_h,
6827 .vece = MO_16 },
6828 { .fni8 = gen_shrnb32_i64,
6829 .fniv = gen_shrnb_vec,
6830 .opt_opc = shrnb_vec_list,
6831 .fno = gen_helper_sve2_shrnb_s,
6832 .vece = MO_32 },
6833 { .fni8 = gen_shrnb64_i64,
6834 .fniv = gen_shrnb_vec,
6835 .opt_opc = shrnb_vec_list,
6836 .fno = gen_helper_sve2_shrnb_d,
6837 .vece = MO_64 },
6839 TRANS_FEAT(SHRNB, aa64_sve2, do_shr_narrow, a, shrnb_ops)
6841 static void gen_shrnt_i64(unsigned vece, TCGv_i64 d, TCGv_i64 n, int shr)
6843 int halfbits = 4 << vece;
6844 uint64_t mask = dup_const(vece, MAKE_64BIT_MASK(0, halfbits));
6846 tcg_gen_shli_i64(n, n, halfbits - shr);
6847 tcg_gen_andi_i64(n, n, ~mask);
6848 tcg_gen_andi_i64(d, d, mask);
6849 tcg_gen_or_i64(d, d, n);
6852 static void gen_shrnt16_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr)
6854 gen_shrnt_i64(MO_16, d, n, shr);
6857 static void gen_shrnt32_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr)
6859 gen_shrnt_i64(MO_32, d, n, shr);
6862 static void gen_shrnt64_i64(TCGv_i64 d, TCGv_i64 n, int64_t shr)
6864 tcg_gen_shri_i64(n, n, shr);
6865 tcg_gen_deposit_i64(d, d, n, 32, 32);
6868 static void gen_shrnt_vec(unsigned vece, TCGv_vec d, TCGv_vec n, int64_t shr)
6870 TCGv_vec t = tcg_temp_new_vec_matching(d);
6871 int halfbits = 4 << vece;
6872 uint64_t mask = MAKE_64BIT_MASK(0, halfbits);
6874 tcg_gen_shli_vec(vece, n, n, halfbits - shr);
6875 tcg_gen_dupi_vec(vece, t, mask);
6876 tcg_gen_bitsel_vec(vece, d, t, d, n);
6877 tcg_temp_free_vec(t);
6880 static const TCGOpcode shrnt_vec_list[] = { INDEX_op_shli_vec, 0 };
6881 static const GVecGen2i shrnt_ops[3] = {
6882 { .fni8 = gen_shrnt16_i64,
6883 .fniv = gen_shrnt_vec,
6884 .opt_opc = shrnt_vec_list,
6885 .load_dest = true,
6886 .fno = gen_helper_sve2_shrnt_h,
6887 .vece = MO_16 },
6888 { .fni8 = gen_shrnt32_i64,
6889 .fniv = gen_shrnt_vec,
6890 .opt_opc = shrnt_vec_list,
6891 .load_dest = true,
6892 .fno = gen_helper_sve2_shrnt_s,
6893 .vece = MO_32 },
6894 { .fni8 = gen_shrnt64_i64,
6895 .fniv = gen_shrnt_vec,
6896 .opt_opc = shrnt_vec_list,
6897 .load_dest = true,
6898 .fno = gen_helper_sve2_shrnt_d,
6899 .vece = MO_64 },
6901 TRANS_FEAT(SHRNT, aa64_sve2, do_shr_narrow, a, shrnt_ops)
6903 static const GVecGen2i rshrnb_ops[3] = {
6904 { .fno = gen_helper_sve2_rshrnb_h },
6905 { .fno = gen_helper_sve2_rshrnb_s },
6906 { .fno = gen_helper_sve2_rshrnb_d },
6908 TRANS_FEAT(RSHRNB, aa64_sve2, do_shr_narrow, a, rshrnb_ops)
6910 static const GVecGen2i rshrnt_ops[3] = {
6911 { .fno = gen_helper_sve2_rshrnt_h },
6912 { .fno = gen_helper_sve2_rshrnt_s },
6913 { .fno = gen_helper_sve2_rshrnt_d },
6915 TRANS_FEAT(RSHRNT, aa64_sve2, do_shr_narrow, a, rshrnt_ops)
6917 static void gen_sqshrunb_vec(unsigned vece, TCGv_vec d,
6918 TCGv_vec n, int64_t shr)
6920 TCGv_vec t = tcg_temp_new_vec_matching(d);
6921 int halfbits = 4 << vece;
6923 tcg_gen_sari_vec(vece, n, n, shr);
6924 tcg_gen_dupi_vec(vece, t, 0);
6925 tcg_gen_smax_vec(vece, n, n, t);
6926 tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
6927 tcg_gen_umin_vec(vece, d, n, t);
6928 tcg_temp_free_vec(t);
6931 static const TCGOpcode sqshrunb_vec_list[] = {
6932 INDEX_op_sari_vec, INDEX_op_smax_vec, INDEX_op_umin_vec, 0
6934 static const GVecGen2i sqshrunb_ops[3] = {
6935 { .fniv = gen_sqshrunb_vec,
6936 .opt_opc = sqshrunb_vec_list,
6937 .fno = gen_helper_sve2_sqshrunb_h,
6938 .vece = MO_16 },
6939 { .fniv = gen_sqshrunb_vec,
6940 .opt_opc = sqshrunb_vec_list,
6941 .fno = gen_helper_sve2_sqshrunb_s,
6942 .vece = MO_32 },
6943 { .fniv = gen_sqshrunb_vec,
6944 .opt_opc = sqshrunb_vec_list,
6945 .fno = gen_helper_sve2_sqshrunb_d,
6946 .vece = MO_64 },
6948 TRANS_FEAT(SQSHRUNB, aa64_sve2, do_shr_narrow, a, sqshrunb_ops)
6950 static void gen_sqshrunt_vec(unsigned vece, TCGv_vec d,
6951 TCGv_vec n, int64_t shr)
6953 TCGv_vec t = tcg_temp_new_vec_matching(d);
6954 int halfbits = 4 << vece;
6956 tcg_gen_sari_vec(vece, n, n, shr);
6957 tcg_gen_dupi_vec(vece, t, 0);
6958 tcg_gen_smax_vec(vece, n, n, t);
6959 tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
6960 tcg_gen_umin_vec(vece, n, n, t);
6961 tcg_gen_shli_vec(vece, n, n, halfbits);
6962 tcg_gen_bitsel_vec(vece, d, t, d, n);
6963 tcg_temp_free_vec(t);
6966 static const TCGOpcode sqshrunt_vec_list[] = {
6967 INDEX_op_shli_vec, INDEX_op_sari_vec,
6968 INDEX_op_smax_vec, INDEX_op_umin_vec, 0
6970 static const GVecGen2i sqshrunt_ops[3] = {
6971 { .fniv = gen_sqshrunt_vec,
6972 .opt_opc = sqshrunt_vec_list,
6973 .load_dest = true,
6974 .fno = gen_helper_sve2_sqshrunt_h,
6975 .vece = MO_16 },
6976 { .fniv = gen_sqshrunt_vec,
6977 .opt_opc = sqshrunt_vec_list,
6978 .load_dest = true,
6979 .fno = gen_helper_sve2_sqshrunt_s,
6980 .vece = MO_32 },
6981 { .fniv = gen_sqshrunt_vec,
6982 .opt_opc = sqshrunt_vec_list,
6983 .load_dest = true,
6984 .fno = gen_helper_sve2_sqshrunt_d,
6985 .vece = MO_64 },
6987 TRANS_FEAT(SQSHRUNT, aa64_sve2, do_shr_narrow, a, sqshrunt_ops)
6989 static const GVecGen2i sqrshrunb_ops[3] = {
6990 { .fno = gen_helper_sve2_sqrshrunb_h },
6991 { .fno = gen_helper_sve2_sqrshrunb_s },
6992 { .fno = gen_helper_sve2_sqrshrunb_d },
6994 TRANS_FEAT(SQRSHRUNB, aa64_sve2, do_shr_narrow, a, sqrshrunb_ops)
6996 static const GVecGen2i sqrshrunt_ops[3] = {
6997 { .fno = gen_helper_sve2_sqrshrunt_h },
6998 { .fno = gen_helper_sve2_sqrshrunt_s },
6999 { .fno = gen_helper_sve2_sqrshrunt_d },
7001 TRANS_FEAT(SQRSHRUNT, aa64_sve2, do_shr_narrow, a, sqrshrunt_ops)
7003 static void gen_sqshrnb_vec(unsigned vece, TCGv_vec d,
7004 TCGv_vec n, int64_t shr)
7006 TCGv_vec t = tcg_temp_new_vec_matching(d);
7007 int halfbits = 4 << vece;
7008 int64_t max = MAKE_64BIT_MASK(0, halfbits - 1);
7009 int64_t min = -max - 1;
7011 tcg_gen_sari_vec(vece, n, n, shr);
7012 tcg_gen_dupi_vec(vece, t, min);
7013 tcg_gen_smax_vec(vece, n, n, t);
7014 tcg_gen_dupi_vec(vece, t, max);
7015 tcg_gen_smin_vec(vece, n, n, t);
7016 tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
7017 tcg_gen_and_vec(vece, d, n, t);
7018 tcg_temp_free_vec(t);
7021 static const TCGOpcode sqshrnb_vec_list[] = {
7022 INDEX_op_sari_vec, INDEX_op_smax_vec, INDEX_op_smin_vec, 0
7024 static const GVecGen2i sqshrnb_ops[3] = {
7025 { .fniv = gen_sqshrnb_vec,
7026 .opt_opc = sqshrnb_vec_list,
7027 .fno = gen_helper_sve2_sqshrnb_h,
7028 .vece = MO_16 },
7029 { .fniv = gen_sqshrnb_vec,
7030 .opt_opc = sqshrnb_vec_list,
7031 .fno = gen_helper_sve2_sqshrnb_s,
7032 .vece = MO_32 },
7033 { .fniv = gen_sqshrnb_vec,
7034 .opt_opc = sqshrnb_vec_list,
7035 .fno = gen_helper_sve2_sqshrnb_d,
7036 .vece = MO_64 },
7038 TRANS_FEAT(SQSHRNB, aa64_sve2, do_shr_narrow, a, sqshrnb_ops)
7040 static void gen_sqshrnt_vec(unsigned vece, TCGv_vec d,
7041 TCGv_vec n, int64_t shr)
7043 TCGv_vec t = tcg_temp_new_vec_matching(d);
7044 int halfbits = 4 << vece;
7045 int64_t max = MAKE_64BIT_MASK(0, halfbits - 1);
7046 int64_t min = -max - 1;
7048 tcg_gen_sari_vec(vece, n, n, shr);
7049 tcg_gen_dupi_vec(vece, t, min);
7050 tcg_gen_smax_vec(vece, n, n, t);
7051 tcg_gen_dupi_vec(vece, t, max);
7052 tcg_gen_smin_vec(vece, n, n, t);
7053 tcg_gen_shli_vec(vece, n, n, halfbits);
7054 tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
7055 tcg_gen_bitsel_vec(vece, d, t, d, n);
7056 tcg_temp_free_vec(t);
7059 static const TCGOpcode sqshrnt_vec_list[] = {
7060 INDEX_op_shli_vec, INDEX_op_sari_vec,
7061 INDEX_op_smax_vec, INDEX_op_smin_vec, 0
7063 static const GVecGen2i sqshrnt_ops[3] = {
7064 { .fniv = gen_sqshrnt_vec,
7065 .opt_opc = sqshrnt_vec_list,
7066 .load_dest = true,
7067 .fno = gen_helper_sve2_sqshrnt_h,
7068 .vece = MO_16 },
7069 { .fniv = gen_sqshrnt_vec,
7070 .opt_opc = sqshrnt_vec_list,
7071 .load_dest = true,
7072 .fno = gen_helper_sve2_sqshrnt_s,
7073 .vece = MO_32 },
7074 { .fniv = gen_sqshrnt_vec,
7075 .opt_opc = sqshrnt_vec_list,
7076 .load_dest = true,
7077 .fno = gen_helper_sve2_sqshrnt_d,
7078 .vece = MO_64 },
7080 TRANS_FEAT(SQSHRNT, aa64_sve2, do_shr_narrow, a, sqshrnt_ops)
7082 static const GVecGen2i sqrshrnb_ops[3] = {
7083 { .fno = gen_helper_sve2_sqrshrnb_h },
7084 { .fno = gen_helper_sve2_sqrshrnb_s },
7085 { .fno = gen_helper_sve2_sqrshrnb_d },
7087 TRANS_FEAT(SQRSHRNB, aa64_sve2, do_shr_narrow, a, sqrshrnb_ops)
7089 static const GVecGen2i sqrshrnt_ops[3] = {
7090 { .fno = gen_helper_sve2_sqrshrnt_h },
7091 { .fno = gen_helper_sve2_sqrshrnt_s },
7092 { .fno = gen_helper_sve2_sqrshrnt_d },
7094 TRANS_FEAT(SQRSHRNT, aa64_sve2, do_shr_narrow, a, sqrshrnt_ops)
7096 static void gen_uqshrnb_vec(unsigned vece, TCGv_vec d,
7097 TCGv_vec n, int64_t shr)
7099 TCGv_vec t = tcg_temp_new_vec_matching(d);
7100 int halfbits = 4 << vece;
7102 tcg_gen_shri_vec(vece, n, n, shr);
7103 tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
7104 tcg_gen_umin_vec(vece, d, n, t);
7105 tcg_temp_free_vec(t);
7108 static const TCGOpcode uqshrnb_vec_list[] = {
7109 INDEX_op_shri_vec, INDEX_op_umin_vec, 0
7111 static const GVecGen2i uqshrnb_ops[3] = {
7112 { .fniv = gen_uqshrnb_vec,
7113 .opt_opc = uqshrnb_vec_list,
7114 .fno = gen_helper_sve2_uqshrnb_h,
7115 .vece = MO_16 },
7116 { .fniv = gen_uqshrnb_vec,
7117 .opt_opc = uqshrnb_vec_list,
7118 .fno = gen_helper_sve2_uqshrnb_s,
7119 .vece = MO_32 },
7120 { .fniv = gen_uqshrnb_vec,
7121 .opt_opc = uqshrnb_vec_list,
7122 .fno = gen_helper_sve2_uqshrnb_d,
7123 .vece = MO_64 },
7125 TRANS_FEAT(UQSHRNB, aa64_sve2, do_shr_narrow, a, uqshrnb_ops)
7127 static void gen_uqshrnt_vec(unsigned vece, TCGv_vec d,
7128 TCGv_vec n, int64_t shr)
7130 TCGv_vec t = tcg_temp_new_vec_matching(d);
7131 int halfbits = 4 << vece;
7133 tcg_gen_shri_vec(vece, n, n, shr);
7134 tcg_gen_dupi_vec(vece, t, MAKE_64BIT_MASK(0, halfbits));
7135 tcg_gen_umin_vec(vece, n, n, t);
7136 tcg_gen_shli_vec(vece, n, n, halfbits);
7137 tcg_gen_bitsel_vec(vece, d, t, d, n);
7138 tcg_temp_free_vec(t);
7141 static const TCGOpcode uqshrnt_vec_list[] = {
7142 INDEX_op_shli_vec, INDEX_op_shri_vec, INDEX_op_umin_vec, 0
7144 static const GVecGen2i uqshrnt_ops[3] = {
7145 { .fniv = gen_uqshrnt_vec,
7146 .opt_opc = uqshrnt_vec_list,
7147 .load_dest = true,
7148 .fno = gen_helper_sve2_uqshrnt_h,
7149 .vece = MO_16 },
7150 { .fniv = gen_uqshrnt_vec,
7151 .opt_opc = uqshrnt_vec_list,
7152 .load_dest = true,
7153 .fno = gen_helper_sve2_uqshrnt_s,
7154 .vece = MO_32 },
7155 { .fniv = gen_uqshrnt_vec,
7156 .opt_opc = uqshrnt_vec_list,
7157 .load_dest = true,
7158 .fno = gen_helper_sve2_uqshrnt_d,
7159 .vece = MO_64 },
7161 TRANS_FEAT(UQSHRNT, aa64_sve2, do_shr_narrow, a, uqshrnt_ops)
7163 static const GVecGen2i uqrshrnb_ops[3] = {
7164 { .fno = gen_helper_sve2_uqrshrnb_h },
7165 { .fno = gen_helper_sve2_uqrshrnb_s },
7166 { .fno = gen_helper_sve2_uqrshrnb_d },
7168 TRANS_FEAT(UQRSHRNB, aa64_sve2, do_shr_narrow, a, uqrshrnb_ops)
7170 static const GVecGen2i uqrshrnt_ops[3] = {
7171 { .fno = gen_helper_sve2_uqrshrnt_h },
7172 { .fno = gen_helper_sve2_uqrshrnt_s },
7173 { .fno = gen_helper_sve2_uqrshrnt_d },
7175 TRANS_FEAT(UQRSHRNT, aa64_sve2, do_shr_narrow, a, uqrshrnt_ops)
7177 #define DO_SVE2_ZZZ_NARROW(NAME, name) \
7178 static gen_helper_gvec_3 * const name##_fns[4] = { \
7179 NULL, gen_helper_sve2_##name##_h, \
7180 gen_helper_sve2_##name##_s, gen_helper_sve2_##name##_d, \
7181 }; \
7182 TRANS_FEAT(NAME, aa64_sve2, gen_gvec_ool_arg_zzz, \
7183 name##_fns[a->esz], a, 0)
7185 DO_SVE2_ZZZ_NARROW(ADDHNB, addhnb)
7186 DO_SVE2_ZZZ_NARROW(ADDHNT, addhnt)
7187 DO_SVE2_ZZZ_NARROW(RADDHNB, raddhnb)
7188 DO_SVE2_ZZZ_NARROW(RADDHNT, raddhnt)
7190 DO_SVE2_ZZZ_NARROW(SUBHNB, subhnb)
7191 DO_SVE2_ZZZ_NARROW(SUBHNT, subhnt)
7192 DO_SVE2_ZZZ_NARROW(RSUBHNB, rsubhnb)
7193 DO_SVE2_ZZZ_NARROW(RSUBHNT, rsubhnt)
7195 static gen_helper_gvec_flags_4 * const match_fns[4] = {
7196 gen_helper_sve2_match_ppzz_b, gen_helper_sve2_match_ppzz_h, NULL, NULL
7198 TRANS_FEAT_NONSTREAMING(MATCH, aa64_sve2, do_ppzz_flags, a, match_fns[a->esz])
7200 static gen_helper_gvec_flags_4 * const nmatch_fns[4] = {
7201 gen_helper_sve2_nmatch_ppzz_b, gen_helper_sve2_nmatch_ppzz_h, NULL, NULL
7203 TRANS_FEAT_NONSTREAMING(NMATCH, aa64_sve2, do_ppzz_flags, a, nmatch_fns[a->esz])
7205 static gen_helper_gvec_4 * const histcnt_fns[4] = {
7206 NULL, NULL, gen_helper_sve2_histcnt_s, gen_helper_sve2_histcnt_d
7208 TRANS_FEAT_NONSTREAMING(HISTCNT, aa64_sve2, gen_gvec_ool_arg_zpzz,
7209 histcnt_fns[a->esz], a, 0)
7211 TRANS_FEAT_NONSTREAMING(HISTSEG, aa64_sve2, gen_gvec_ool_arg_zzz,
7212 a->esz == 0 ? gen_helper_sve2_histseg : NULL, a, 0)
7214 DO_ZPZZ_FP(FADDP, aa64_sve2, sve2_faddp_zpzz)
7215 DO_ZPZZ_FP(FMAXNMP, aa64_sve2, sve2_fmaxnmp_zpzz)
7216 DO_ZPZZ_FP(FMINNMP, aa64_sve2, sve2_fminnmp_zpzz)
7217 DO_ZPZZ_FP(FMAXP, aa64_sve2, sve2_fmaxp_zpzz)
7218 DO_ZPZZ_FP(FMINP, aa64_sve2, sve2_fminp_zpzz)
7221 * SVE Integer Multiply-Add (unpredicated)
7224 TRANS_FEAT_NONSTREAMING(FMMLA_s, aa64_sve_f32mm, gen_gvec_fpst_zzzz,
7225 gen_helper_fmmla_s, a->rd, a->rn, a->rm, a->ra,
7226 0, FPST_FPCR)
7227 TRANS_FEAT_NONSTREAMING(FMMLA_d, aa64_sve_f64mm, gen_gvec_fpst_zzzz,
7228 gen_helper_fmmla_d, a->rd, a->rn, a->rm, a->ra,
7229 0, FPST_FPCR)
7231 static gen_helper_gvec_4 * const sqdmlal_zzzw_fns[] = {
7232 NULL, gen_helper_sve2_sqdmlal_zzzw_h,
7233 gen_helper_sve2_sqdmlal_zzzw_s, gen_helper_sve2_sqdmlal_zzzw_d,
7235 TRANS_FEAT(SQDMLALB_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz,
7236 sqdmlal_zzzw_fns[a->esz], a, 0)
7237 TRANS_FEAT(SQDMLALT_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz,
7238 sqdmlal_zzzw_fns[a->esz], a, 3)
7239 TRANS_FEAT(SQDMLALBT, aa64_sve2, gen_gvec_ool_arg_zzzz,
7240 sqdmlal_zzzw_fns[a->esz], a, 2)
7242 static gen_helper_gvec_4 * const sqdmlsl_zzzw_fns[] = {
7243 NULL, gen_helper_sve2_sqdmlsl_zzzw_h,
7244 gen_helper_sve2_sqdmlsl_zzzw_s, gen_helper_sve2_sqdmlsl_zzzw_d,
7246 TRANS_FEAT(SQDMLSLB_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz,
7247 sqdmlsl_zzzw_fns[a->esz], a, 0)
7248 TRANS_FEAT(SQDMLSLT_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz,
7249 sqdmlsl_zzzw_fns[a->esz], a, 3)
7250 TRANS_FEAT(SQDMLSLBT, aa64_sve2, gen_gvec_ool_arg_zzzz,
7251 sqdmlsl_zzzw_fns[a->esz], a, 2)
7253 static gen_helper_gvec_4 * const sqrdmlah_fns[] = {
7254 gen_helper_sve2_sqrdmlah_b, gen_helper_sve2_sqrdmlah_h,
7255 gen_helper_sve2_sqrdmlah_s, gen_helper_sve2_sqrdmlah_d,
7257 TRANS_FEAT(SQRDMLAH_zzzz, aa64_sve2, gen_gvec_ool_arg_zzzz,
7258 sqrdmlah_fns[a->esz], a, 0)
7260 static gen_helper_gvec_4 * const sqrdmlsh_fns[] = {
7261 gen_helper_sve2_sqrdmlsh_b, gen_helper_sve2_sqrdmlsh_h,
7262 gen_helper_sve2_sqrdmlsh_s, gen_helper_sve2_sqrdmlsh_d,
7264 TRANS_FEAT(SQRDMLSH_zzzz, aa64_sve2, gen_gvec_ool_arg_zzzz,
7265 sqrdmlsh_fns[a->esz], a, 0)
7267 static gen_helper_gvec_4 * const smlal_zzzw_fns[] = {
7268 NULL, gen_helper_sve2_smlal_zzzw_h,
7269 gen_helper_sve2_smlal_zzzw_s, gen_helper_sve2_smlal_zzzw_d,
7271 TRANS_FEAT(SMLALB_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz,
7272 smlal_zzzw_fns[a->esz], a, 0)
7273 TRANS_FEAT(SMLALT_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz,
7274 smlal_zzzw_fns[a->esz], a, 1)
7276 static gen_helper_gvec_4 * const umlal_zzzw_fns[] = {
7277 NULL, gen_helper_sve2_umlal_zzzw_h,
7278 gen_helper_sve2_umlal_zzzw_s, gen_helper_sve2_umlal_zzzw_d,
7280 TRANS_FEAT(UMLALB_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz,
7281 umlal_zzzw_fns[a->esz], a, 0)
7282 TRANS_FEAT(UMLALT_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz,
7283 umlal_zzzw_fns[a->esz], a, 1)
7285 static gen_helper_gvec_4 * const smlsl_zzzw_fns[] = {
7286 NULL, gen_helper_sve2_smlsl_zzzw_h,
7287 gen_helper_sve2_smlsl_zzzw_s, gen_helper_sve2_smlsl_zzzw_d,
7289 TRANS_FEAT(SMLSLB_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz,
7290 smlsl_zzzw_fns[a->esz], a, 0)
7291 TRANS_FEAT(SMLSLT_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz,
7292 smlsl_zzzw_fns[a->esz], a, 1)
7294 static gen_helper_gvec_4 * const umlsl_zzzw_fns[] = {
7295 NULL, gen_helper_sve2_umlsl_zzzw_h,
7296 gen_helper_sve2_umlsl_zzzw_s, gen_helper_sve2_umlsl_zzzw_d,
7298 TRANS_FEAT(UMLSLB_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz,
7299 umlsl_zzzw_fns[a->esz], a, 0)
7300 TRANS_FEAT(UMLSLT_zzzw, aa64_sve2, gen_gvec_ool_arg_zzzz,
7301 umlsl_zzzw_fns[a->esz], a, 1)
7303 static gen_helper_gvec_4 * const cmla_fns[] = {
7304 gen_helper_sve2_cmla_zzzz_b, gen_helper_sve2_cmla_zzzz_h,
7305 gen_helper_sve2_cmla_zzzz_s, gen_helper_sve2_cmla_zzzz_d,
7307 TRANS_FEAT(CMLA_zzzz, aa64_sve2, gen_gvec_ool_zzzz,
7308 cmla_fns[a->esz], a->rd, a->rn, a->rm, a->ra, a->rot)
7310 static gen_helper_gvec_4 * const cdot_fns[] = {
7311 NULL, NULL, gen_helper_sve2_cdot_zzzz_s, gen_helper_sve2_cdot_zzzz_d
7313 TRANS_FEAT(CDOT_zzzz, aa64_sve2, gen_gvec_ool_zzzz,
7314 cdot_fns[a->esz], a->rd, a->rn, a->rm, a->ra, a->rot)
7316 static gen_helper_gvec_4 * const sqrdcmlah_fns[] = {
7317 gen_helper_sve2_sqrdcmlah_zzzz_b, gen_helper_sve2_sqrdcmlah_zzzz_h,
7318 gen_helper_sve2_sqrdcmlah_zzzz_s, gen_helper_sve2_sqrdcmlah_zzzz_d,
7320 TRANS_FEAT(SQRDCMLAH_zzzz, aa64_sve2, gen_gvec_ool_zzzz,
7321 sqrdcmlah_fns[a->esz], a->rd, a->rn, a->rm, a->ra, a->rot)
7323 TRANS_FEAT(USDOT_zzzz, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz,
7324 a->esz == 2 ? gen_helper_gvec_usdot_b : NULL, a, 0)
7326 TRANS_FEAT_NONSTREAMING(AESMC, aa64_sve2_aes, gen_gvec_ool_zz,
7327 gen_helper_crypto_aesmc, a->rd, a->rd, a->decrypt)
7329 TRANS_FEAT_NONSTREAMING(AESE, aa64_sve2_aes, gen_gvec_ool_arg_zzz,
7330 gen_helper_crypto_aese, a, false)
7331 TRANS_FEAT_NONSTREAMING(AESD, aa64_sve2_aes, gen_gvec_ool_arg_zzz,
7332 gen_helper_crypto_aese, a, true)
7334 TRANS_FEAT_NONSTREAMING(SM4E, aa64_sve2_sm4, gen_gvec_ool_arg_zzz,
7335 gen_helper_crypto_sm4e, a, 0)
7336 TRANS_FEAT_NONSTREAMING(SM4EKEY, aa64_sve2_sm4, gen_gvec_ool_arg_zzz,
7337 gen_helper_crypto_sm4ekey, a, 0)
7339 TRANS_FEAT_NONSTREAMING(RAX1, aa64_sve2_sha3, gen_gvec_fn_arg_zzz,
7340 gen_gvec_rax1, a)
7342 TRANS_FEAT(FCVTNT_sh, aa64_sve2, gen_gvec_fpst_arg_zpz,
7343 gen_helper_sve2_fcvtnt_sh, a, 0, FPST_FPCR)
7344 TRANS_FEAT(FCVTNT_ds, aa64_sve2, gen_gvec_fpst_arg_zpz,
7345 gen_helper_sve2_fcvtnt_ds, a, 0, FPST_FPCR)
7347 TRANS_FEAT(BFCVTNT, aa64_sve_bf16, gen_gvec_fpst_arg_zpz,
7348 gen_helper_sve_bfcvtnt, a, 0, FPST_FPCR)
7350 TRANS_FEAT(FCVTLT_hs, aa64_sve2, gen_gvec_fpst_arg_zpz,
7351 gen_helper_sve2_fcvtlt_hs, a, 0, FPST_FPCR)
7352 TRANS_FEAT(FCVTLT_sd, aa64_sve2, gen_gvec_fpst_arg_zpz,
7353 gen_helper_sve2_fcvtlt_sd, a, 0, FPST_FPCR)
7355 TRANS_FEAT(FCVTX_ds, aa64_sve2, do_frint_mode, a,
7356 float_round_to_odd, gen_helper_sve_fcvt_ds)
7357 TRANS_FEAT(FCVTXNT_ds, aa64_sve2, do_frint_mode, a,
7358 float_round_to_odd, gen_helper_sve2_fcvtnt_ds)
7360 static gen_helper_gvec_3_ptr * const flogb_fns[] = {
7361 NULL, gen_helper_flogb_h,
7362 gen_helper_flogb_s, gen_helper_flogb_d
7364 TRANS_FEAT(FLOGB, aa64_sve2, gen_gvec_fpst_arg_zpz, flogb_fns[a->esz],
7365 a, 0, a->esz == MO_16 ? FPST_FPCR_F16 : FPST_FPCR)
7367 static bool do_FMLAL_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sub, bool sel)
7369 return gen_gvec_ptr_zzzz(s, gen_helper_sve2_fmlal_zzzw_s,
7370 a->rd, a->rn, a->rm, a->ra,
7371 (sel << 1) | sub, cpu_env);
7374 TRANS_FEAT(FMLALB_zzzw, aa64_sve2, do_FMLAL_zzzw, a, false, false)
7375 TRANS_FEAT(FMLALT_zzzw, aa64_sve2, do_FMLAL_zzzw, a, false, true)
7376 TRANS_FEAT(FMLSLB_zzzw, aa64_sve2, do_FMLAL_zzzw, a, true, false)
7377 TRANS_FEAT(FMLSLT_zzzw, aa64_sve2, do_FMLAL_zzzw, a, true, true)
7379 static bool do_FMLAL_zzxw(DisasContext *s, arg_rrxr_esz *a, bool sub, bool sel)
7381 return gen_gvec_ptr_zzzz(s, gen_helper_sve2_fmlal_zzxw_s,
7382 a->rd, a->rn, a->rm, a->ra,
7383 (a->index << 2) | (sel << 1) | sub, cpu_env);
7386 TRANS_FEAT(FMLALB_zzxw, aa64_sve2, do_FMLAL_zzxw, a, false, false)
7387 TRANS_FEAT(FMLALT_zzxw, aa64_sve2, do_FMLAL_zzxw, a, false, true)
7388 TRANS_FEAT(FMLSLB_zzxw, aa64_sve2, do_FMLAL_zzxw, a, true, false)
7389 TRANS_FEAT(FMLSLT_zzxw, aa64_sve2, do_FMLAL_zzxw, a, true, true)
7391 TRANS_FEAT_NONSTREAMING(SMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz,
7392 gen_helper_gvec_smmla_b, a, 0)
7393 TRANS_FEAT_NONSTREAMING(USMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz,
7394 gen_helper_gvec_usmmla_b, a, 0)
7395 TRANS_FEAT_NONSTREAMING(UMMLA, aa64_sve_i8mm, gen_gvec_ool_arg_zzzz,
7396 gen_helper_gvec_ummla_b, a, 0)
7398 TRANS_FEAT(BFDOT_zzzz, aa64_sve_bf16, gen_gvec_ool_arg_zzzz,
7399 gen_helper_gvec_bfdot, a, 0)
7400 TRANS_FEAT(BFDOT_zzxz, aa64_sve_bf16, gen_gvec_ool_arg_zzxz,
7401 gen_helper_gvec_bfdot_idx, a)
7403 TRANS_FEAT_NONSTREAMING(BFMMLA, aa64_sve_bf16, gen_gvec_ool_arg_zzzz,
7404 gen_helper_gvec_bfmmla, a, 0)
7406 static bool do_BFMLAL_zzzw(DisasContext *s, arg_rrrr_esz *a, bool sel)
7408 return gen_gvec_fpst_zzzz(s, gen_helper_gvec_bfmlal,
7409 a->rd, a->rn, a->rm, a->ra, sel, FPST_FPCR);
7412 TRANS_FEAT(BFMLALB_zzzw, aa64_sve_bf16, do_BFMLAL_zzzw, a, false)
7413 TRANS_FEAT(BFMLALT_zzzw, aa64_sve_bf16, do_BFMLAL_zzzw, a, true)
7415 static bool do_BFMLAL_zzxw(DisasContext *s, arg_rrxr_esz *a, bool sel)
7417 return gen_gvec_fpst_zzzz(s, gen_helper_gvec_bfmlal_idx,
7418 a->rd, a->rn, a->rm, a->ra,
7419 (a->index << 1) | sel, FPST_FPCR);
7422 TRANS_FEAT(BFMLALB_zzxw, aa64_sve_bf16, do_BFMLAL_zzxw, a, false)
7423 TRANS_FEAT(BFMLALT_zzxw, aa64_sve_bf16, do_BFMLAL_zzxw, a, true)
7425 static bool trans_PSEL(DisasContext *s, arg_psel *a)
7427 int vl = vec_full_reg_size(s);
7428 int pl = pred_gvec_reg_size(s);
7429 int elements = vl >> a->esz;
7430 TCGv_i64 tmp, didx, dbit;
7431 TCGv_ptr ptr;
7433 if (!dc_isar_feature(aa64_sme, s)) {
7434 return false;
7436 if (!sve_access_check(s)) {
7437 return true;
7440 tmp = tcg_temp_new_i64();
7441 dbit = tcg_temp_new_i64();
7442 didx = tcg_temp_new_i64();
7443 ptr = tcg_temp_new_ptr();
7445 /* Compute the predicate element. */
7446 tcg_gen_addi_i64(tmp, cpu_reg(s, a->rv), a->imm);
7447 if (is_power_of_2(elements)) {
7448 tcg_gen_andi_i64(tmp, tmp, elements - 1);
7449 } else {
7450 tcg_gen_remu_i64(tmp, tmp, tcg_constant_i64(elements));
7453 /* Extract the predicate byte and bit indices. */
7454 tcg_gen_shli_i64(tmp, tmp, a->esz);
7455 tcg_gen_andi_i64(dbit, tmp, 7);
7456 tcg_gen_shri_i64(didx, tmp, 3);
7457 if (HOST_BIG_ENDIAN) {
7458 tcg_gen_xori_i64(didx, didx, 7);
7461 /* Load the predicate word. */
7462 tcg_gen_trunc_i64_ptr(ptr, didx);
7463 tcg_gen_add_ptr(ptr, ptr, cpu_env);
7464 tcg_gen_ld8u_i64(tmp, ptr, pred_full_reg_offset(s, a->pm));
7466 /* Extract the predicate bit and replicate to MO_64. */
7467 tcg_gen_shr_i64(tmp, tmp, dbit);
7468 tcg_gen_andi_i64(tmp, tmp, 1);
7469 tcg_gen_neg_i64(tmp, tmp);
7471 /* Apply to either copy the source, or write zeros. */
7472 tcg_gen_gvec_ands(MO_64, pred_full_reg_offset(s, a->pd),
7473 pred_full_reg_offset(s, a->pn), tmp, pl, pl);
7475 tcg_temp_free_i64(tmp);
7476 tcg_temp_free_i64(dbit);
7477 tcg_temp_free_i64(didx);
7478 tcg_temp_free_ptr(ptr);
7479 return true;
7482 static void gen_sclamp_i32(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, TCGv_i32 a)
7484 tcg_gen_smax_i32(d, a, n);
7485 tcg_gen_smin_i32(d, d, m);
7488 static void gen_sclamp_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 a)
7490 tcg_gen_smax_i64(d, a, n);
7491 tcg_gen_smin_i64(d, d, m);
7494 static void gen_sclamp_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
7495 TCGv_vec m, TCGv_vec a)
7497 tcg_gen_smax_vec(vece, d, a, n);
7498 tcg_gen_smin_vec(vece, d, d, m);
7501 static void gen_sclamp(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
7502 uint32_t a, uint32_t oprsz, uint32_t maxsz)
7504 static const TCGOpcode vecop[] = {
7505 INDEX_op_smin_vec, INDEX_op_smax_vec, 0
7507 static const GVecGen4 ops[4] = {
7508 { .fniv = gen_sclamp_vec,
7509 .fno = gen_helper_gvec_sclamp_b,
7510 .opt_opc = vecop,
7511 .vece = MO_8 },
7512 { .fniv = gen_sclamp_vec,
7513 .fno = gen_helper_gvec_sclamp_h,
7514 .opt_opc = vecop,
7515 .vece = MO_16 },
7516 { .fni4 = gen_sclamp_i32,
7517 .fniv = gen_sclamp_vec,
7518 .fno = gen_helper_gvec_sclamp_s,
7519 .opt_opc = vecop,
7520 .vece = MO_32 },
7521 { .fni8 = gen_sclamp_i64,
7522 .fniv = gen_sclamp_vec,
7523 .fno = gen_helper_gvec_sclamp_d,
7524 .opt_opc = vecop,
7525 .vece = MO_64,
7526 .prefer_i64 = TCG_TARGET_REG_BITS == 64 }
7528 tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &ops[vece]);
7531 TRANS_FEAT(SCLAMP, aa64_sme, gen_gvec_fn_arg_zzzz, gen_sclamp, a)
7533 static void gen_uclamp_i32(TCGv_i32 d, TCGv_i32 n, TCGv_i32 m, TCGv_i32 a)
7535 tcg_gen_umax_i32(d, a, n);
7536 tcg_gen_umin_i32(d, d, m);
7539 static void gen_uclamp_i64(TCGv_i64 d, TCGv_i64 n, TCGv_i64 m, TCGv_i64 a)
7541 tcg_gen_umax_i64(d, a, n);
7542 tcg_gen_umin_i64(d, d, m);
7545 static void gen_uclamp_vec(unsigned vece, TCGv_vec d, TCGv_vec n,
7546 TCGv_vec m, TCGv_vec a)
7548 tcg_gen_umax_vec(vece, d, a, n);
7549 tcg_gen_umin_vec(vece, d, d, m);
7552 static void gen_uclamp(unsigned vece, uint32_t d, uint32_t n, uint32_t m,
7553 uint32_t a, uint32_t oprsz, uint32_t maxsz)
7555 static const TCGOpcode vecop[] = {
7556 INDEX_op_umin_vec, INDEX_op_umax_vec, 0
7558 static const GVecGen4 ops[4] = {
7559 { .fniv = gen_uclamp_vec,
7560 .fno = gen_helper_gvec_uclamp_b,
7561 .opt_opc = vecop,
7562 .vece = MO_8 },
7563 { .fniv = gen_uclamp_vec,
7564 .fno = gen_helper_gvec_uclamp_h,
7565 .opt_opc = vecop,
7566 .vece = MO_16 },
7567 { .fni4 = gen_uclamp_i32,
7568 .fniv = gen_uclamp_vec,
7569 .fno = gen_helper_gvec_uclamp_s,
7570 .opt_opc = vecop,
7571 .vece = MO_32 },
7572 { .fni8 = gen_uclamp_i64,
7573 .fniv = gen_uclamp_vec,
7574 .fno = gen_helper_gvec_uclamp_d,
7575 .opt_opc = vecop,
7576 .vece = MO_64,
7577 .prefer_i64 = TCG_TARGET_REG_BITS == 64 }
7579 tcg_gen_gvec_4(d, n, m, a, oprsz, maxsz, &ops[vece]);
7582 TRANS_FEAT(UCLAMP, aa64_sme, gen_gvec_fn_arg_zzzz, gen_uclamp, a)