machine: pass QAPI struct to mc->smp_parse
[qemu/kevin.git] / target / arm / mve_helper.c
blob05552ce7eeec83f8663bdb8f93803537c7fa893a
1 /*
2 * M-profile MVE Operations
4 * Copyright (c) 2021 Linaro, Ltd.
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu/int128.h"
22 #include "cpu.h"
23 #include "internals.h"
24 #include "vec_internal.h"
25 #include "exec/helper-proto.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/exec-all.h"
28 #include "tcg/tcg.h"
30 static uint16_t mve_element_mask(CPUARMState *env)
33 * Return the mask of which elements in the MVE vector should be
34 * updated. This is a combination of multiple things:
35 * (1) by default, we update every lane in the vector
36 * (2) VPT predication stores its state in the VPR register;
37 * (3) low-overhead-branch tail predication will mask out part
38 * the vector on the final iteration of the loop
39 * (4) if EPSR.ECI is set then we must execute only some beats
40 * of the insn
41 * We combine all these into a 16-bit result with the same semantics
42 * as VPR.P0: 0 to mask the lane, 1 if it is active.
43 * 8-bit vector ops will look at all bits of the result;
44 * 16-bit ops will look at bits 0, 2, 4, ...;
45 * 32-bit ops will look at bits 0, 4, 8 and 12.
46 * Compare pseudocode GetCurInstrBeat(), though that only returns
47 * the 4-bit slice of the mask corresponding to a single beat.
49 uint16_t mask = FIELD_EX32(env->v7m.vpr, V7M_VPR, P0);
51 if (!(env->v7m.vpr & R_V7M_VPR_MASK01_MASK)) {
52 mask |= 0xff;
54 if (!(env->v7m.vpr & R_V7M_VPR_MASK23_MASK)) {
55 mask |= 0xff00;
58 if (env->v7m.ltpsize < 4 &&
59 env->regs[14] <= (1 << (4 - env->v7m.ltpsize))) {
61 * Tail predication active, and this is the last loop iteration.
62 * The element size is (1 << ltpsize), and we only want to process
63 * loopcount elements, so we want to retain the least significant
64 * (loopcount * esize) predicate bits and zero out bits above that.
66 int masklen = env->regs[14] << env->v7m.ltpsize;
67 assert(masklen <= 16);
68 mask &= MAKE_64BIT_MASK(0, masklen);
71 if ((env->condexec_bits & 0xf) == 0) {
73 * ECI bits indicate which beats are already executed;
74 * we handle this by effectively predicating them out.
76 int eci = env->condexec_bits >> 4;
77 switch (eci) {
78 case ECI_NONE:
79 break;
80 case ECI_A0:
81 mask &= 0xfff0;
82 break;
83 case ECI_A0A1:
84 mask &= 0xff00;
85 break;
86 case ECI_A0A1A2:
87 case ECI_A0A1A2B0:
88 mask &= 0xf000;
89 break;
90 default:
91 g_assert_not_reached();
95 return mask;
98 static void mve_advance_vpt(CPUARMState *env)
100 /* Advance the VPT and ECI state if necessary */
101 uint32_t vpr = env->v7m.vpr;
102 unsigned mask01, mask23;
104 if ((env->condexec_bits & 0xf) == 0) {
105 env->condexec_bits = (env->condexec_bits == (ECI_A0A1A2B0 << 4)) ?
106 (ECI_A0 << 4) : (ECI_NONE << 4);
109 if (!(vpr & (R_V7M_VPR_MASK01_MASK | R_V7M_VPR_MASK23_MASK))) {
110 /* VPT not enabled, nothing to do */
111 return;
114 mask01 = FIELD_EX32(vpr, V7M_VPR, MASK01);
115 mask23 = FIELD_EX32(vpr, V7M_VPR, MASK23);
116 if (mask01 > 8) {
117 /* high bit set, but not 0b1000: invert the relevant half of P0 */
118 vpr ^= 0xff;
120 if (mask23 > 8) {
121 /* high bit set, but not 0b1000: invert the relevant half of P0 */
122 vpr ^= 0xff00;
124 vpr = FIELD_DP32(vpr, V7M_VPR, MASK01, mask01 << 1);
125 vpr = FIELD_DP32(vpr, V7M_VPR, MASK23, mask23 << 1);
126 env->v7m.vpr = vpr;
130 #define DO_VLDR(OP, MSIZE, LDTYPE, ESIZE, TYPE) \
131 void HELPER(mve_##OP)(CPUARMState *env, void *vd, uint32_t addr) \
133 TYPE *d = vd; \
134 uint16_t mask = mve_element_mask(env); \
135 unsigned b, e; \
136 /* \
137 * R_SXTM allows the dest reg to become UNKNOWN for abandoned \
138 * beats so we don't care if we update part of the dest and \
139 * then take an exception. \
140 */ \
141 for (b = 0, e = 0; b < 16; b += ESIZE, e++) { \
142 if (mask & (1 << b)) { \
143 d[H##ESIZE(e)] = cpu_##LDTYPE##_data_ra(env, addr, GETPC()); \
145 addr += MSIZE; \
147 mve_advance_vpt(env); \
150 #define DO_VSTR(OP, MSIZE, STTYPE, ESIZE, TYPE) \
151 void HELPER(mve_##OP)(CPUARMState *env, void *vd, uint32_t addr) \
153 TYPE *d = vd; \
154 uint16_t mask = mve_element_mask(env); \
155 unsigned b, e; \
156 for (b = 0, e = 0; b < 16; b += ESIZE, e++) { \
157 if (mask & (1 << b)) { \
158 cpu_##STTYPE##_data_ra(env, addr, d[H##ESIZE(e)], GETPC()); \
160 addr += MSIZE; \
162 mve_advance_vpt(env); \
165 DO_VLDR(vldrb, 1, ldub, 1, uint8_t)
166 DO_VLDR(vldrh, 2, lduw, 2, uint16_t)
167 DO_VLDR(vldrw, 4, ldl, 4, uint32_t)
169 DO_VSTR(vstrb, 1, stb, 1, uint8_t)
170 DO_VSTR(vstrh, 2, stw, 2, uint16_t)
171 DO_VSTR(vstrw, 4, stl, 4, uint32_t)
173 DO_VLDR(vldrb_sh, 1, ldsb, 2, int16_t)
174 DO_VLDR(vldrb_sw, 1, ldsb, 4, int32_t)
175 DO_VLDR(vldrb_uh, 1, ldub, 2, uint16_t)
176 DO_VLDR(vldrb_uw, 1, ldub, 4, uint32_t)
177 DO_VLDR(vldrh_sw, 2, ldsw, 4, int32_t)
178 DO_VLDR(vldrh_uw, 2, lduw, 4, uint32_t)
180 DO_VSTR(vstrb_h, 1, stb, 2, int16_t)
181 DO_VSTR(vstrb_w, 1, stb, 4, int32_t)
182 DO_VSTR(vstrh_w, 2, stw, 4, int32_t)
184 #undef DO_VLDR
185 #undef DO_VSTR
188 * The mergemask(D, R, M) macro performs the operation "*D = R" but
189 * storing only the bytes which correspond to 1 bits in M,
190 * leaving other bytes in *D unchanged. We use _Generic
191 * to select the correct implementation based on the type of D.
194 static void mergemask_ub(uint8_t *d, uint8_t r, uint16_t mask)
196 if (mask & 1) {
197 *d = r;
201 static void mergemask_sb(int8_t *d, int8_t r, uint16_t mask)
203 mergemask_ub((uint8_t *)d, r, mask);
206 static void mergemask_uh(uint16_t *d, uint16_t r, uint16_t mask)
208 uint16_t bmask = expand_pred_b_data[mask & 3];
209 *d = (*d & ~bmask) | (r & bmask);
212 static void mergemask_sh(int16_t *d, int16_t r, uint16_t mask)
214 mergemask_uh((uint16_t *)d, r, mask);
217 static void mergemask_uw(uint32_t *d, uint32_t r, uint16_t mask)
219 uint32_t bmask = expand_pred_b_data[mask & 0xf];
220 *d = (*d & ~bmask) | (r & bmask);
223 static void mergemask_sw(int32_t *d, int32_t r, uint16_t mask)
225 mergemask_uw((uint32_t *)d, r, mask);
228 static void mergemask_uq(uint64_t *d, uint64_t r, uint16_t mask)
230 uint64_t bmask = expand_pred_b_data[mask & 0xff];
231 *d = (*d & ~bmask) | (r & bmask);
234 static void mergemask_sq(int64_t *d, int64_t r, uint16_t mask)
236 mergemask_uq((uint64_t *)d, r, mask);
239 #define mergemask(D, R, M) \
240 _Generic(D, \
241 uint8_t *: mergemask_ub, \
242 int8_t *: mergemask_sb, \
243 uint16_t *: mergemask_uh, \
244 int16_t *: mergemask_sh, \
245 uint32_t *: mergemask_uw, \
246 int32_t *: mergemask_sw, \
247 uint64_t *: mergemask_uq, \
248 int64_t *: mergemask_sq)(D, R, M)
250 void HELPER(mve_vdup)(CPUARMState *env, void *vd, uint32_t val)
253 * The generated code already replicated an 8 or 16 bit constant
254 * into the 32-bit value, so we only need to write the 32-bit
255 * value to all elements of the Qreg, allowing for predication.
257 uint32_t *d = vd;
258 uint16_t mask = mve_element_mask(env);
259 unsigned e;
260 for (e = 0; e < 16 / 4; e++, mask >>= 4) {
261 mergemask(&d[H4(e)], val, mask);
263 mve_advance_vpt(env);
266 #define DO_1OP(OP, ESIZE, TYPE, FN) \
267 void HELPER(mve_##OP)(CPUARMState *env, void *vd, void *vm) \
269 TYPE *d = vd, *m = vm; \
270 uint16_t mask = mve_element_mask(env); \
271 unsigned e; \
272 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
273 mergemask(&d[H##ESIZE(e)], FN(m[H##ESIZE(e)]), mask); \
275 mve_advance_vpt(env); \
278 #define DO_CLS_B(N) (clrsb32(N) - 24)
279 #define DO_CLS_H(N) (clrsb32(N) - 16)
281 DO_1OP(vclsb, 1, int8_t, DO_CLS_B)
282 DO_1OP(vclsh, 2, int16_t, DO_CLS_H)
283 DO_1OP(vclsw, 4, int32_t, clrsb32)
285 #define DO_CLZ_B(N) (clz32(N) - 24)
286 #define DO_CLZ_H(N) (clz32(N) - 16)
288 DO_1OP(vclzb, 1, uint8_t, DO_CLZ_B)
289 DO_1OP(vclzh, 2, uint16_t, DO_CLZ_H)
290 DO_1OP(vclzw, 4, uint32_t, clz32)
292 DO_1OP(vrev16b, 2, uint16_t, bswap16)
293 DO_1OP(vrev32b, 4, uint32_t, bswap32)
294 DO_1OP(vrev32h, 4, uint32_t, hswap32)
295 DO_1OP(vrev64b, 8, uint64_t, bswap64)
296 DO_1OP(vrev64h, 8, uint64_t, hswap64)
297 DO_1OP(vrev64w, 8, uint64_t, wswap64)
299 #define DO_NOT(N) (~(N))
301 DO_1OP(vmvn, 8, uint64_t, DO_NOT)
303 #define DO_ABS(N) ((N) < 0 ? -(N) : (N))
304 #define DO_FABSH(N) ((N) & dup_const(MO_16, 0x7fff))
305 #define DO_FABSS(N) ((N) & dup_const(MO_32, 0x7fffffff))
307 DO_1OP(vabsb, 1, int8_t, DO_ABS)
308 DO_1OP(vabsh, 2, int16_t, DO_ABS)
309 DO_1OP(vabsw, 4, int32_t, DO_ABS)
311 /* We can do these 64 bits at a time */
312 DO_1OP(vfabsh, 8, uint64_t, DO_FABSH)
313 DO_1OP(vfabss, 8, uint64_t, DO_FABSS)
315 #define DO_NEG(N) (-(N))
316 #define DO_FNEGH(N) ((N) ^ dup_const(MO_16, 0x8000))
317 #define DO_FNEGS(N) ((N) ^ dup_const(MO_32, 0x80000000))
319 DO_1OP(vnegb, 1, int8_t, DO_NEG)
320 DO_1OP(vnegh, 2, int16_t, DO_NEG)
321 DO_1OP(vnegw, 4, int32_t, DO_NEG)
323 /* We can do these 64 bits at a time */
324 DO_1OP(vfnegh, 8, uint64_t, DO_FNEGH)
325 DO_1OP(vfnegs, 8, uint64_t, DO_FNEGS)
327 #define DO_2OP(OP, ESIZE, TYPE, FN) \
328 void HELPER(glue(mve_, OP))(CPUARMState *env, \
329 void *vd, void *vn, void *vm) \
331 TYPE *d = vd, *n = vn, *m = vm; \
332 uint16_t mask = mve_element_mask(env); \
333 unsigned e; \
334 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
335 mergemask(&d[H##ESIZE(e)], \
336 FN(n[H##ESIZE(e)], m[H##ESIZE(e)]), mask); \
338 mve_advance_vpt(env); \
341 /* provide unsigned 2-op helpers for all sizes */
342 #define DO_2OP_U(OP, FN) \
343 DO_2OP(OP##b, 1, uint8_t, FN) \
344 DO_2OP(OP##h, 2, uint16_t, FN) \
345 DO_2OP(OP##w, 4, uint32_t, FN)
347 /* provide signed 2-op helpers for all sizes */
348 #define DO_2OP_S(OP, FN) \
349 DO_2OP(OP##b, 1, int8_t, FN) \
350 DO_2OP(OP##h, 2, int16_t, FN) \
351 DO_2OP(OP##w, 4, int32_t, FN)
354 * "Long" operations where two half-sized inputs (taken from either the
355 * top or the bottom of the input vector) produce a double-width result.
356 * Here ESIZE, TYPE are for the input, and LESIZE, LTYPE for the output.
358 #define DO_2OP_L(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN) \
359 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, void *vm) \
361 LTYPE *d = vd; \
362 TYPE *n = vn, *m = vm; \
363 uint16_t mask = mve_element_mask(env); \
364 unsigned le; \
365 for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
366 LTYPE r = FN((LTYPE)n[H##ESIZE(le * 2 + TOP)], \
367 m[H##ESIZE(le * 2 + TOP)]); \
368 mergemask(&d[H##LESIZE(le)], r, mask); \
370 mve_advance_vpt(env); \
373 #define DO_2OP_SAT(OP, ESIZE, TYPE, FN) \
374 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, void *vm) \
376 TYPE *d = vd, *n = vn, *m = vm; \
377 uint16_t mask = mve_element_mask(env); \
378 unsigned e; \
379 bool qc = false; \
380 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
381 bool sat = false; \
382 TYPE r = FN(n[H##ESIZE(e)], m[H##ESIZE(e)], &sat); \
383 mergemask(&d[H##ESIZE(e)], r, mask); \
384 qc |= sat & mask & 1; \
386 if (qc) { \
387 env->vfp.qc[0] = qc; \
389 mve_advance_vpt(env); \
392 /* provide unsigned 2-op helpers for all sizes */
393 #define DO_2OP_SAT_U(OP, FN) \
394 DO_2OP_SAT(OP##b, 1, uint8_t, FN) \
395 DO_2OP_SAT(OP##h, 2, uint16_t, FN) \
396 DO_2OP_SAT(OP##w, 4, uint32_t, FN)
398 /* provide signed 2-op helpers for all sizes */
399 #define DO_2OP_SAT_S(OP, FN) \
400 DO_2OP_SAT(OP##b, 1, int8_t, FN) \
401 DO_2OP_SAT(OP##h, 2, int16_t, FN) \
402 DO_2OP_SAT(OP##w, 4, int32_t, FN)
404 #define DO_AND(N, M) ((N) & (M))
405 #define DO_BIC(N, M) ((N) & ~(M))
406 #define DO_ORR(N, M) ((N) | (M))
407 #define DO_ORN(N, M) ((N) | ~(M))
408 #define DO_EOR(N, M) ((N) ^ (M))
410 DO_2OP(vand, 8, uint64_t, DO_AND)
411 DO_2OP(vbic, 8, uint64_t, DO_BIC)
412 DO_2OP(vorr, 8, uint64_t, DO_ORR)
413 DO_2OP(vorn, 8, uint64_t, DO_ORN)
414 DO_2OP(veor, 8, uint64_t, DO_EOR)
416 #define DO_ADD(N, M) ((N) + (M))
417 #define DO_SUB(N, M) ((N) - (M))
418 #define DO_MUL(N, M) ((N) * (M))
420 DO_2OP_U(vadd, DO_ADD)
421 DO_2OP_U(vsub, DO_SUB)
422 DO_2OP_U(vmul, DO_MUL)
424 DO_2OP_L(vmullbsb, 0, 1, int8_t, 2, int16_t, DO_MUL)
425 DO_2OP_L(vmullbsh, 0, 2, int16_t, 4, int32_t, DO_MUL)
426 DO_2OP_L(vmullbsw, 0, 4, int32_t, 8, int64_t, DO_MUL)
427 DO_2OP_L(vmullbub, 0, 1, uint8_t, 2, uint16_t, DO_MUL)
428 DO_2OP_L(vmullbuh, 0, 2, uint16_t, 4, uint32_t, DO_MUL)
429 DO_2OP_L(vmullbuw, 0, 4, uint32_t, 8, uint64_t, DO_MUL)
431 DO_2OP_L(vmulltsb, 1, 1, int8_t, 2, int16_t, DO_MUL)
432 DO_2OP_L(vmulltsh, 1, 2, int16_t, 4, int32_t, DO_MUL)
433 DO_2OP_L(vmulltsw, 1, 4, int32_t, 8, int64_t, DO_MUL)
434 DO_2OP_L(vmulltub, 1, 1, uint8_t, 2, uint16_t, DO_MUL)
435 DO_2OP_L(vmulltuh, 1, 2, uint16_t, 4, uint32_t, DO_MUL)
436 DO_2OP_L(vmulltuw, 1, 4, uint32_t, 8, uint64_t, DO_MUL)
439 * Because the computation type is at least twice as large as required,
440 * these work for both signed and unsigned source types.
442 static inline uint8_t do_mulh_b(int32_t n, int32_t m)
444 return (n * m) >> 8;
447 static inline uint16_t do_mulh_h(int32_t n, int32_t m)
449 return (n * m) >> 16;
452 static inline uint32_t do_mulh_w(int64_t n, int64_t m)
454 return (n * m) >> 32;
457 static inline uint8_t do_rmulh_b(int32_t n, int32_t m)
459 return (n * m + (1U << 7)) >> 8;
462 static inline uint16_t do_rmulh_h(int32_t n, int32_t m)
464 return (n * m + (1U << 15)) >> 16;
467 static inline uint32_t do_rmulh_w(int64_t n, int64_t m)
469 return (n * m + (1U << 31)) >> 32;
472 DO_2OP(vmulhsb, 1, int8_t, do_mulh_b)
473 DO_2OP(vmulhsh, 2, int16_t, do_mulh_h)
474 DO_2OP(vmulhsw, 4, int32_t, do_mulh_w)
475 DO_2OP(vmulhub, 1, uint8_t, do_mulh_b)
476 DO_2OP(vmulhuh, 2, uint16_t, do_mulh_h)
477 DO_2OP(vmulhuw, 4, uint32_t, do_mulh_w)
479 DO_2OP(vrmulhsb, 1, int8_t, do_rmulh_b)
480 DO_2OP(vrmulhsh, 2, int16_t, do_rmulh_h)
481 DO_2OP(vrmulhsw, 4, int32_t, do_rmulh_w)
482 DO_2OP(vrmulhub, 1, uint8_t, do_rmulh_b)
483 DO_2OP(vrmulhuh, 2, uint16_t, do_rmulh_h)
484 DO_2OP(vrmulhuw, 4, uint32_t, do_rmulh_w)
486 #define DO_MAX(N, M) ((N) >= (M) ? (N) : (M))
487 #define DO_MIN(N, M) ((N) >= (M) ? (M) : (N))
489 DO_2OP_S(vmaxs, DO_MAX)
490 DO_2OP_U(vmaxu, DO_MAX)
491 DO_2OP_S(vmins, DO_MIN)
492 DO_2OP_U(vminu, DO_MIN)
494 #define DO_ABD(N, M) ((N) >= (M) ? (N) - (M) : (M) - (N))
496 DO_2OP_S(vabds, DO_ABD)
497 DO_2OP_U(vabdu, DO_ABD)
499 static inline uint32_t do_vhadd_u(uint32_t n, uint32_t m)
501 return ((uint64_t)n + m) >> 1;
504 static inline int32_t do_vhadd_s(int32_t n, int32_t m)
506 return ((int64_t)n + m) >> 1;
509 static inline uint32_t do_vhsub_u(uint32_t n, uint32_t m)
511 return ((uint64_t)n - m) >> 1;
514 static inline int32_t do_vhsub_s(int32_t n, int32_t m)
516 return ((int64_t)n - m) >> 1;
519 DO_2OP_S(vhadds, do_vhadd_s)
520 DO_2OP_U(vhaddu, do_vhadd_u)
521 DO_2OP_S(vhsubs, do_vhsub_s)
522 DO_2OP_U(vhsubu, do_vhsub_u)
524 #define DO_VSHLS(N, M) do_sqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, false, NULL)
525 #define DO_VSHLU(N, M) do_uqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, false, NULL)
526 #define DO_VRSHLS(N, M) do_sqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, true, NULL)
527 #define DO_VRSHLU(N, M) do_uqrshl_bhs(N, (int8_t)(M), sizeof(N) * 8, true, NULL)
529 DO_2OP_S(vshls, DO_VSHLS)
530 DO_2OP_U(vshlu, DO_VSHLU)
531 DO_2OP_S(vrshls, DO_VRSHLS)
532 DO_2OP_U(vrshlu, DO_VRSHLU)
534 #define DO_RHADD_S(N, M) (((int64_t)(N) + (M) + 1) >> 1)
535 #define DO_RHADD_U(N, M) (((uint64_t)(N) + (M) + 1) >> 1)
537 DO_2OP_S(vrhadds, DO_RHADD_S)
538 DO_2OP_U(vrhaddu, DO_RHADD_U)
540 static void do_vadc(CPUARMState *env, uint32_t *d, uint32_t *n, uint32_t *m,
541 uint32_t inv, uint32_t carry_in, bool update_flags)
543 uint16_t mask = mve_element_mask(env);
544 unsigned e;
546 /* If any additions trigger, we will update flags. */
547 if (mask & 0x1111) {
548 update_flags = true;
551 for (e = 0; e < 16 / 4; e++, mask >>= 4) {
552 uint64_t r = carry_in;
553 r += n[H4(e)];
554 r += m[H4(e)] ^ inv;
555 if (mask & 1) {
556 carry_in = r >> 32;
558 mergemask(&d[H4(e)], r, mask);
561 if (update_flags) {
562 /* Store C, clear NZV. */
563 env->vfp.xregs[ARM_VFP_FPSCR] &= ~FPCR_NZCV_MASK;
564 env->vfp.xregs[ARM_VFP_FPSCR] |= carry_in * FPCR_C;
566 mve_advance_vpt(env);
569 void HELPER(mve_vadc)(CPUARMState *env, void *vd, void *vn, void *vm)
571 bool carry_in = env->vfp.xregs[ARM_VFP_FPSCR] & FPCR_C;
572 do_vadc(env, vd, vn, vm, 0, carry_in, false);
575 void HELPER(mve_vsbc)(CPUARMState *env, void *vd, void *vn, void *vm)
577 bool carry_in = env->vfp.xregs[ARM_VFP_FPSCR] & FPCR_C;
578 do_vadc(env, vd, vn, vm, -1, carry_in, false);
582 void HELPER(mve_vadci)(CPUARMState *env, void *vd, void *vn, void *vm)
584 do_vadc(env, vd, vn, vm, 0, 0, true);
587 void HELPER(mve_vsbci)(CPUARMState *env, void *vd, void *vn, void *vm)
589 do_vadc(env, vd, vn, vm, -1, 1, true);
592 #define DO_VCADD(OP, ESIZE, TYPE, FN0, FN1) \
593 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, void *vm) \
595 TYPE *d = vd, *n = vn, *m = vm; \
596 uint16_t mask = mve_element_mask(env); \
597 unsigned e; \
598 TYPE r[16 / ESIZE]; \
599 /* Calculate all results first to avoid overwriting inputs */ \
600 for (e = 0; e < 16 / ESIZE; e++) { \
601 if (!(e & 1)) { \
602 r[e] = FN0(n[H##ESIZE(e)], m[H##ESIZE(e + 1)]); \
603 } else { \
604 r[e] = FN1(n[H##ESIZE(e)], m[H##ESIZE(e - 1)]); \
607 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
608 mergemask(&d[H##ESIZE(e)], r[e], mask); \
610 mve_advance_vpt(env); \
613 #define DO_VCADD_ALL(OP, FN0, FN1) \
614 DO_VCADD(OP##b, 1, int8_t, FN0, FN1) \
615 DO_VCADD(OP##h, 2, int16_t, FN0, FN1) \
616 DO_VCADD(OP##w, 4, int32_t, FN0, FN1)
618 DO_VCADD_ALL(vcadd90, DO_SUB, DO_ADD)
619 DO_VCADD_ALL(vcadd270, DO_ADD, DO_SUB)
620 DO_VCADD_ALL(vhcadd90, do_vhsub_s, do_vhadd_s)
621 DO_VCADD_ALL(vhcadd270, do_vhadd_s, do_vhsub_s)
623 static inline int32_t do_sat_bhw(int64_t val, int64_t min, int64_t max, bool *s)
625 if (val > max) {
626 *s = true;
627 return max;
628 } else if (val < min) {
629 *s = true;
630 return min;
632 return val;
635 #define DO_SQADD_B(n, m, s) do_sat_bhw((int64_t)n + m, INT8_MIN, INT8_MAX, s)
636 #define DO_SQADD_H(n, m, s) do_sat_bhw((int64_t)n + m, INT16_MIN, INT16_MAX, s)
637 #define DO_SQADD_W(n, m, s) do_sat_bhw((int64_t)n + m, INT32_MIN, INT32_MAX, s)
639 #define DO_UQADD_B(n, m, s) do_sat_bhw((int64_t)n + m, 0, UINT8_MAX, s)
640 #define DO_UQADD_H(n, m, s) do_sat_bhw((int64_t)n + m, 0, UINT16_MAX, s)
641 #define DO_UQADD_W(n, m, s) do_sat_bhw((int64_t)n + m, 0, UINT32_MAX, s)
643 #define DO_SQSUB_B(n, m, s) do_sat_bhw((int64_t)n - m, INT8_MIN, INT8_MAX, s)
644 #define DO_SQSUB_H(n, m, s) do_sat_bhw((int64_t)n - m, INT16_MIN, INT16_MAX, s)
645 #define DO_SQSUB_W(n, m, s) do_sat_bhw((int64_t)n - m, INT32_MIN, INT32_MAX, s)
647 #define DO_UQSUB_B(n, m, s) do_sat_bhw((int64_t)n - m, 0, UINT8_MAX, s)
648 #define DO_UQSUB_H(n, m, s) do_sat_bhw((int64_t)n - m, 0, UINT16_MAX, s)
649 #define DO_UQSUB_W(n, m, s) do_sat_bhw((int64_t)n - m, 0, UINT32_MAX, s)
652 * For QDMULH and QRDMULH we simplify "double and shift by esize" into
653 * "shift by esize-1", adjusting the QRDMULH rounding constant to match.
655 #define DO_QDMULH_B(n, m, s) do_sat_bhw(((int64_t)n * m) >> 7, \
656 INT8_MIN, INT8_MAX, s)
657 #define DO_QDMULH_H(n, m, s) do_sat_bhw(((int64_t)n * m) >> 15, \
658 INT16_MIN, INT16_MAX, s)
659 #define DO_QDMULH_W(n, m, s) do_sat_bhw(((int64_t)n * m) >> 31, \
660 INT32_MIN, INT32_MAX, s)
662 #define DO_QRDMULH_B(n, m, s) do_sat_bhw(((int64_t)n * m + (1 << 6)) >> 7, \
663 INT8_MIN, INT8_MAX, s)
664 #define DO_QRDMULH_H(n, m, s) do_sat_bhw(((int64_t)n * m + (1 << 14)) >> 15, \
665 INT16_MIN, INT16_MAX, s)
666 #define DO_QRDMULH_W(n, m, s) do_sat_bhw(((int64_t)n * m + (1 << 30)) >> 31, \
667 INT32_MIN, INT32_MAX, s)
669 DO_2OP_SAT(vqdmulhb, 1, int8_t, DO_QDMULH_B)
670 DO_2OP_SAT(vqdmulhh, 2, int16_t, DO_QDMULH_H)
671 DO_2OP_SAT(vqdmulhw, 4, int32_t, DO_QDMULH_W)
673 DO_2OP_SAT(vqrdmulhb, 1, int8_t, DO_QRDMULH_B)
674 DO_2OP_SAT(vqrdmulhh, 2, int16_t, DO_QRDMULH_H)
675 DO_2OP_SAT(vqrdmulhw, 4, int32_t, DO_QRDMULH_W)
677 DO_2OP_SAT(vqaddub, 1, uint8_t, DO_UQADD_B)
678 DO_2OP_SAT(vqadduh, 2, uint16_t, DO_UQADD_H)
679 DO_2OP_SAT(vqadduw, 4, uint32_t, DO_UQADD_W)
680 DO_2OP_SAT(vqaddsb, 1, int8_t, DO_SQADD_B)
681 DO_2OP_SAT(vqaddsh, 2, int16_t, DO_SQADD_H)
682 DO_2OP_SAT(vqaddsw, 4, int32_t, DO_SQADD_W)
684 DO_2OP_SAT(vqsubub, 1, uint8_t, DO_UQSUB_B)
685 DO_2OP_SAT(vqsubuh, 2, uint16_t, DO_UQSUB_H)
686 DO_2OP_SAT(vqsubuw, 4, uint32_t, DO_UQSUB_W)
687 DO_2OP_SAT(vqsubsb, 1, int8_t, DO_SQSUB_B)
688 DO_2OP_SAT(vqsubsh, 2, int16_t, DO_SQSUB_H)
689 DO_2OP_SAT(vqsubsw, 4, int32_t, DO_SQSUB_W)
692 * This wrapper fixes up the impedance mismatch between do_sqrshl_bhs()
693 * and friends wanting a uint32_t* sat and our needing a bool*.
695 #define WRAP_QRSHL_HELPER(FN, N, M, ROUND, satp) \
696 ({ \
697 uint32_t su32 = 0; \
698 typeof(N) r = FN(N, (int8_t)(M), sizeof(N) * 8, ROUND, &su32); \
699 if (su32) { \
700 *satp = true; \
702 r; \
705 #define DO_SQSHL_OP(N, M, satp) \
706 WRAP_QRSHL_HELPER(do_sqrshl_bhs, N, M, false, satp)
707 #define DO_UQSHL_OP(N, M, satp) \
708 WRAP_QRSHL_HELPER(do_uqrshl_bhs, N, M, false, satp)
709 #define DO_SQRSHL_OP(N, M, satp) \
710 WRAP_QRSHL_HELPER(do_sqrshl_bhs, N, M, true, satp)
711 #define DO_UQRSHL_OP(N, M, satp) \
712 WRAP_QRSHL_HELPER(do_uqrshl_bhs, N, M, true, satp)
714 DO_2OP_SAT_S(vqshls, DO_SQSHL_OP)
715 DO_2OP_SAT_U(vqshlu, DO_UQSHL_OP)
716 DO_2OP_SAT_S(vqrshls, DO_SQRSHL_OP)
717 DO_2OP_SAT_U(vqrshlu, DO_UQRSHL_OP)
720 * Multiply add dual returning high half
721 * The 'FN' here takes four inputs A, B, C, D, a 0/1 indicator of
722 * whether to add the rounding constant, and the pointer to the
723 * saturation flag, and should do "(A * B + C * D) * 2 + rounding constant",
724 * saturate to twice the input size and return the high half; or
725 * (A * B - C * D) etc for VQDMLSDH.
727 #define DO_VQDMLADH_OP(OP, ESIZE, TYPE, XCHG, ROUND, FN) \
728 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
729 void *vm) \
731 TYPE *d = vd, *n = vn, *m = vm; \
732 uint16_t mask = mve_element_mask(env); \
733 unsigned e; \
734 bool qc = false; \
735 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
736 bool sat = false; \
737 if ((e & 1) == XCHG) { \
738 TYPE r = FN(n[H##ESIZE(e)], \
739 m[H##ESIZE(e - XCHG)], \
740 n[H##ESIZE(e + (1 - 2 * XCHG))], \
741 m[H##ESIZE(e + (1 - XCHG))], \
742 ROUND, &sat); \
743 mergemask(&d[H##ESIZE(e)], r, mask); \
744 qc |= sat & mask & 1; \
747 if (qc) { \
748 env->vfp.qc[0] = qc; \
750 mve_advance_vpt(env); \
753 static int8_t do_vqdmladh_b(int8_t a, int8_t b, int8_t c, int8_t d,
754 int round, bool *sat)
756 int64_t r = ((int64_t)a * b + (int64_t)c * d) * 2 + (round << 7);
757 return do_sat_bhw(r, INT16_MIN, INT16_MAX, sat) >> 8;
760 static int16_t do_vqdmladh_h(int16_t a, int16_t b, int16_t c, int16_t d,
761 int round, bool *sat)
763 int64_t r = ((int64_t)a * b + (int64_t)c * d) * 2 + (round << 15);
764 return do_sat_bhw(r, INT32_MIN, INT32_MAX, sat) >> 16;
767 static int32_t do_vqdmladh_w(int32_t a, int32_t b, int32_t c, int32_t d,
768 int round, bool *sat)
770 int64_t m1 = (int64_t)a * b;
771 int64_t m2 = (int64_t)c * d;
772 int64_t r;
774 * Architecturally we should do the entire add, double, round
775 * and then check for saturation. We do three saturating adds,
776 * but we need to be careful about the order. If the first
777 * m1 + m2 saturates then it's impossible for the *2+rc to
778 * bring it back into the non-saturated range. However, if
779 * m1 + m2 is negative then it's possible that doing the doubling
780 * would take the intermediate result below INT64_MAX and the
781 * addition of the rounding constant then brings it back in range.
782 * So we add half the rounding constant before doubling rather
783 * than adding the rounding constant after the doubling.
785 if (sadd64_overflow(m1, m2, &r) ||
786 sadd64_overflow(r, (round << 30), &r) ||
787 sadd64_overflow(r, r, &r)) {
788 *sat = true;
789 return r < 0 ? INT32_MAX : INT32_MIN;
791 return r >> 32;
794 static int8_t do_vqdmlsdh_b(int8_t a, int8_t b, int8_t c, int8_t d,
795 int round, bool *sat)
797 int64_t r = ((int64_t)a * b - (int64_t)c * d) * 2 + (round << 7);
798 return do_sat_bhw(r, INT16_MIN, INT16_MAX, sat) >> 8;
801 static int16_t do_vqdmlsdh_h(int16_t a, int16_t b, int16_t c, int16_t d,
802 int round, bool *sat)
804 int64_t r = ((int64_t)a * b - (int64_t)c * d) * 2 + (round << 15);
805 return do_sat_bhw(r, INT32_MIN, INT32_MAX, sat) >> 16;
808 static int32_t do_vqdmlsdh_w(int32_t a, int32_t b, int32_t c, int32_t d,
809 int round, bool *sat)
811 int64_t m1 = (int64_t)a * b;
812 int64_t m2 = (int64_t)c * d;
813 int64_t r;
814 /* The same ordering issue as in do_vqdmladh_w applies here too */
815 if (ssub64_overflow(m1, m2, &r) ||
816 sadd64_overflow(r, (round << 30), &r) ||
817 sadd64_overflow(r, r, &r)) {
818 *sat = true;
819 return r < 0 ? INT32_MAX : INT32_MIN;
821 return r >> 32;
824 DO_VQDMLADH_OP(vqdmladhb, 1, int8_t, 0, 0, do_vqdmladh_b)
825 DO_VQDMLADH_OP(vqdmladhh, 2, int16_t, 0, 0, do_vqdmladh_h)
826 DO_VQDMLADH_OP(vqdmladhw, 4, int32_t, 0, 0, do_vqdmladh_w)
827 DO_VQDMLADH_OP(vqdmladhxb, 1, int8_t, 1, 0, do_vqdmladh_b)
828 DO_VQDMLADH_OP(vqdmladhxh, 2, int16_t, 1, 0, do_vqdmladh_h)
829 DO_VQDMLADH_OP(vqdmladhxw, 4, int32_t, 1, 0, do_vqdmladh_w)
831 DO_VQDMLADH_OP(vqrdmladhb, 1, int8_t, 0, 1, do_vqdmladh_b)
832 DO_VQDMLADH_OP(vqrdmladhh, 2, int16_t, 0, 1, do_vqdmladh_h)
833 DO_VQDMLADH_OP(vqrdmladhw, 4, int32_t, 0, 1, do_vqdmladh_w)
834 DO_VQDMLADH_OP(vqrdmladhxb, 1, int8_t, 1, 1, do_vqdmladh_b)
835 DO_VQDMLADH_OP(vqrdmladhxh, 2, int16_t, 1, 1, do_vqdmladh_h)
836 DO_VQDMLADH_OP(vqrdmladhxw, 4, int32_t, 1, 1, do_vqdmladh_w)
838 DO_VQDMLADH_OP(vqdmlsdhb, 1, int8_t, 0, 0, do_vqdmlsdh_b)
839 DO_VQDMLADH_OP(vqdmlsdhh, 2, int16_t, 0, 0, do_vqdmlsdh_h)
840 DO_VQDMLADH_OP(vqdmlsdhw, 4, int32_t, 0, 0, do_vqdmlsdh_w)
841 DO_VQDMLADH_OP(vqdmlsdhxb, 1, int8_t, 1, 0, do_vqdmlsdh_b)
842 DO_VQDMLADH_OP(vqdmlsdhxh, 2, int16_t, 1, 0, do_vqdmlsdh_h)
843 DO_VQDMLADH_OP(vqdmlsdhxw, 4, int32_t, 1, 0, do_vqdmlsdh_w)
845 DO_VQDMLADH_OP(vqrdmlsdhb, 1, int8_t, 0, 1, do_vqdmlsdh_b)
846 DO_VQDMLADH_OP(vqrdmlsdhh, 2, int16_t, 0, 1, do_vqdmlsdh_h)
847 DO_VQDMLADH_OP(vqrdmlsdhw, 4, int32_t, 0, 1, do_vqdmlsdh_w)
848 DO_VQDMLADH_OP(vqrdmlsdhxb, 1, int8_t, 1, 1, do_vqdmlsdh_b)
849 DO_VQDMLADH_OP(vqrdmlsdhxh, 2, int16_t, 1, 1, do_vqdmlsdh_h)
850 DO_VQDMLADH_OP(vqrdmlsdhxw, 4, int32_t, 1, 1, do_vqdmlsdh_w)
852 #define DO_2OP_SCALAR(OP, ESIZE, TYPE, FN) \
853 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
854 uint32_t rm) \
856 TYPE *d = vd, *n = vn; \
857 TYPE m = rm; \
858 uint16_t mask = mve_element_mask(env); \
859 unsigned e; \
860 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
861 mergemask(&d[H##ESIZE(e)], FN(n[H##ESIZE(e)], m), mask); \
863 mve_advance_vpt(env); \
866 #define DO_2OP_SAT_SCALAR(OP, ESIZE, TYPE, FN) \
867 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
868 uint32_t rm) \
870 TYPE *d = vd, *n = vn; \
871 TYPE m = rm; \
872 uint16_t mask = mve_element_mask(env); \
873 unsigned e; \
874 bool qc = false; \
875 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
876 bool sat = false; \
877 mergemask(&d[H##ESIZE(e)], FN(n[H##ESIZE(e)], m, &sat), \
878 mask); \
879 qc |= sat & mask & 1; \
881 if (qc) { \
882 env->vfp.qc[0] = qc; \
884 mve_advance_vpt(env); \
887 /* provide unsigned 2-op scalar helpers for all sizes */
888 #define DO_2OP_SCALAR_U(OP, FN) \
889 DO_2OP_SCALAR(OP##b, 1, uint8_t, FN) \
890 DO_2OP_SCALAR(OP##h, 2, uint16_t, FN) \
891 DO_2OP_SCALAR(OP##w, 4, uint32_t, FN)
892 #define DO_2OP_SCALAR_S(OP, FN) \
893 DO_2OP_SCALAR(OP##b, 1, int8_t, FN) \
894 DO_2OP_SCALAR(OP##h, 2, int16_t, FN) \
895 DO_2OP_SCALAR(OP##w, 4, int32_t, FN)
897 DO_2OP_SCALAR_U(vadd_scalar, DO_ADD)
898 DO_2OP_SCALAR_U(vsub_scalar, DO_SUB)
899 DO_2OP_SCALAR_U(vmul_scalar, DO_MUL)
900 DO_2OP_SCALAR_S(vhadds_scalar, do_vhadd_s)
901 DO_2OP_SCALAR_U(vhaddu_scalar, do_vhadd_u)
902 DO_2OP_SCALAR_S(vhsubs_scalar, do_vhsub_s)
903 DO_2OP_SCALAR_U(vhsubu_scalar, do_vhsub_u)
905 DO_2OP_SAT_SCALAR(vqaddu_scalarb, 1, uint8_t, DO_UQADD_B)
906 DO_2OP_SAT_SCALAR(vqaddu_scalarh, 2, uint16_t, DO_UQADD_H)
907 DO_2OP_SAT_SCALAR(vqaddu_scalarw, 4, uint32_t, DO_UQADD_W)
908 DO_2OP_SAT_SCALAR(vqadds_scalarb, 1, int8_t, DO_SQADD_B)
909 DO_2OP_SAT_SCALAR(vqadds_scalarh, 2, int16_t, DO_SQADD_H)
910 DO_2OP_SAT_SCALAR(vqadds_scalarw, 4, int32_t, DO_SQADD_W)
912 DO_2OP_SAT_SCALAR(vqsubu_scalarb, 1, uint8_t, DO_UQSUB_B)
913 DO_2OP_SAT_SCALAR(vqsubu_scalarh, 2, uint16_t, DO_UQSUB_H)
914 DO_2OP_SAT_SCALAR(vqsubu_scalarw, 4, uint32_t, DO_UQSUB_W)
915 DO_2OP_SAT_SCALAR(vqsubs_scalarb, 1, int8_t, DO_SQSUB_B)
916 DO_2OP_SAT_SCALAR(vqsubs_scalarh, 2, int16_t, DO_SQSUB_H)
917 DO_2OP_SAT_SCALAR(vqsubs_scalarw, 4, int32_t, DO_SQSUB_W)
919 DO_2OP_SAT_SCALAR(vqdmulh_scalarb, 1, int8_t, DO_QDMULH_B)
920 DO_2OP_SAT_SCALAR(vqdmulh_scalarh, 2, int16_t, DO_QDMULH_H)
921 DO_2OP_SAT_SCALAR(vqdmulh_scalarw, 4, int32_t, DO_QDMULH_W)
922 DO_2OP_SAT_SCALAR(vqrdmulh_scalarb, 1, int8_t, DO_QRDMULH_B)
923 DO_2OP_SAT_SCALAR(vqrdmulh_scalarh, 2, int16_t, DO_QRDMULH_H)
924 DO_2OP_SAT_SCALAR(vqrdmulh_scalarw, 4, int32_t, DO_QRDMULH_W)
927 * Long saturating scalar ops. As with DO_2OP_L, TYPE and H are for the
928 * input (smaller) type and LESIZE, LTYPE, LH for the output (long) type.
929 * SATMASK specifies which bits of the predicate mask matter for determining
930 * whether to propagate a saturation indication into FPSCR.QC -- for
931 * the 16x16->32 case we must check only the bit corresponding to the T or B
932 * half that we used, but for the 32x32->64 case we propagate if the mask
933 * bit is set for either half.
935 #define DO_2OP_SAT_SCALAR_L(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN, SATMASK) \
936 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
937 uint32_t rm) \
939 LTYPE *d = vd; \
940 TYPE *n = vn; \
941 TYPE m = rm; \
942 uint16_t mask = mve_element_mask(env); \
943 unsigned le; \
944 bool qc = false; \
945 for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
946 bool sat = false; \
947 LTYPE r = FN((LTYPE)n[H##ESIZE(le * 2 + TOP)], m, &sat); \
948 mergemask(&d[H##LESIZE(le)], r, mask); \
949 qc |= sat && (mask & SATMASK); \
951 if (qc) { \
952 env->vfp.qc[0] = qc; \
954 mve_advance_vpt(env); \
957 static inline int32_t do_qdmullh(int16_t n, int16_t m, bool *sat)
959 int64_t r = ((int64_t)n * m) * 2;
960 return do_sat_bhw(r, INT32_MIN, INT32_MAX, sat);
963 static inline int64_t do_qdmullw(int32_t n, int32_t m, bool *sat)
965 /* The multiply can't overflow, but the doubling might */
966 int64_t r = (int64_t)n * m;
967 if (r > INT64_MAX / 2) {
968 *sat = true;
969 return INT64_MAX;
970 } else if (r < INT64_MIN / 2) {
971 *sat = true;
972 return INT64_MIN;
973 } else {
974 return r * 2;
978 #define SATMASK16B 1
979 #define SATMASK16T (1 << 2)
980 #define SATMASK32 ((1 << 4) | 1)
982 DO_2OP_SAT_SCALAR_L(vqdmullb_scalarh, 0, 2, int16_t, 4, int32_t, \
983 do_qdmullh, SATMASK16B)
984 DO_2OP_SAT_SCALAR_L(vqdmullb_scalarw, 0, 4, int32_t, 8, int64_t, \
985 do_qdmullw, SATMASK32)
986 DO_2OP_SAT_SCALAR_L(vqdmullt_scalarh, 1, 2, int16_t, 4, int32_t, \
987 do_qdmullh, SATMASK16T)
988 DO_2OP_SAT_SCALAR_L(vqdmullt_scalarw, 1, 4, int32_t, 8, int64_t, \
989 do_qdmullw, SATMASK32)
992 * Long saturating ops
994 #define DO_2OP_SAT_L(OP, TOP, ESIZE, TYPE, LESIZE, LTYPE, FN, SATMASK) \
995 void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, void *vn, \
996 void *vm) \
998 LTYPE *d = vd; \
999 TYPE *n = vn, *m = vm; \
1000 uint16_t mask = mve_element_mask(env); \
1001 unsigned le; \
1002 bool qc = false; \
1003 for (le = 0; le < 16 / LESIZE; le++, mask >>= LESIZE) { \
1004 bool sat = false; \
1005 LTYPE op1 = n[H##ESIZE(le * 2 + TOP)]; \
1006 LTYPE op2 = m[H##ESIZE(le * 2 + TOP)]; \
1007 mergemask(&d[H##LESIZE(le)], FN(op1, op2, &sat), mask); \
1008 qc |= sat && (mask & SATMASK); \
1010 if (qc) { \
1011 env->vfp.qc[0] = qc; \
1013 mve_advance_vpt(env); \
1016 DO_2OP_SAT_L(vqdmullbh, 0, 2, int16_t, 4, int32_t, do_qdmullh, SATMASK16B)
1017 DO_2OP_SAT_L(vqdmullbw, 0, 4, int32_t, 8, int64_t, do_qdmullw, SATMASK32)
1018 DO_2OP_SAT_L(vqdmullth, 1, 2, int16_t, 4, int32_t, do_qdmullh, SATMASK16T)
1019 DO_2OP_SAT_L(vqdmulltw, 1, 4, int32_t, 8, int64_t, do_qdmullw, SATMASK32)
1021 static inline uint32_t do_vbrsrb(uint32_t n, uint32_t m)
1023 m &= 0xff;
1024 if (m == 0) {
1025 return 0;
1027 n = revbit8(n);
1028 if (m < 8) {
1029 n >>= 8 - m;
1031 return n;
1034 static inline uint32_t do_vbrsrh(uint32_t n, uint32_t m)
1036 m &= 0xff;
1037 if (m == 0) {
1038 return 0;
1040 n = revbit16(n);
1041 if (m < 16) {
1042 n >>= 16 - m;
1044 return n;
1047 static inline uint32_t do_vbrsrw(uint32_t n, uint32_t m)
1049 m &= 0xff;
1050 if (m == 0) {
1051 return 0;
1053 n = revbit32(n);
1054 if (m < 32) {
1055 n >>= 32 - m;
1057 return n;
1060 DO_2OP_SCALAR(vbrsrb, 1, uint8_t, do_vbrsrb)
1061 DO_2OP_SCALAR(vbrsrh, 2, uint16_t, do_vbrsrh)
1062 DO_2OP_SCALAR(vbrsrw, 4, uint32_t, do_vbrsrw)
1065 * Multiply add long dual accumulate ops.
1067 #define DO_LDAV(OP, ESIZE, TYPE, XCHG, EVENACC, ODDACC) \
1068 uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \
1069 void *vm, uint64_t a) \
1071 uint16_t mask = mve_element_mask(env); \
1072 unsigned e; \
1073 TYPE *n = vn, *m = vm; \
1074 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
1075 if (mask & 1) { \
1076 if (e & 1) { \
1077 a ODDACC \
1078 (int64_t)n[H##ESIZE(e - 1 * XCHG)] * m[H##ESIZE(e)]; \
1079 } else { \
1080 a EVENACC \
1081 (int64_t)n[H##ESIZE(e + 1 * XCHG)] * m[H##ESIZE(e)]; \
1085 mve_advance_vpt(env); \
1086 return a; \
1089 DO_LDAV(vmlaldavsh, 2, int16_t, false, +=, +=)
1090 DO_LDAV(vmlaldavxsh, 2, int16_t, true, +=, +=)
1091 DO_LDAV(vmlaldavsw, 4, int32_t, false, +=, +=)
1092 DO_LDAV(vmlaldavxsw, 4, int32_t, true, +=, +=)
1094 DO_LDAV(vmlaldavuh, 2, uint16_t, false, +=, +=)
1095 DO_LDAV(vmlaldavuw, 4, uint32_t, false, +=, +=)
1097 DO_LDAV(vmlsldavsh, 2, int16_t, false, +=, -=)
1098 DO_LDAV(vmlsldavxsh, 2, int16_t, true, +=, -=)
1099 DO_LDAV(vmlsldavsw, 4, int32_t, false, +=, -=)
1100 DO_LDAV(vmlsldavxsw, 4, int32_t, true, +=, -=)
1103 * Rounding multiply add long dual accumulate high: we must keep
1104 * a 72-bit internal accumulator value and return the top 64 bits.
1106 #define DO_LDAVH(OP, ESIZE, TYPE, XCHG, EVENACC, ODDACC, TO128) \
1107 uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \
1108 void *vm, uint64_t a) \
1110 uint16_t mask = mve_element_mask(env); \
1111 unsigned e; \
1112 TYPE *n = vn, *m = vm; \
1113 Int128 acc = int128_lshift(TO128(a), 8); \
1114 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
1115 if (mask & 1) { \
1116 if (e & 1) { \
1117 acc = ODDACC(acc, TO128(n[H##ESIZE(e - 1 * XCHG)] * \
1118 m[H##ESIZE(e)])); \
1119 } else { \
1120 acc = EVENACC(acc, TO128(n[H##ESIZE(e + 1 * XCHG)] * \
1121 m[H##ESIZE(e)])); \
1123 acc = int128_add(acc, int128_make64(1 << 7)); \
1126 mve_advance_vpt(env); \
1127 return int128_getlo(int128_rshift(acc, 8)); \
1130 DO_LDAVH(vrmlaldavhsw, 4, int32_t, false, int128_add, int128_add, int128_makes64)
1131 DO_LDAVH(vrmlaldavhxsw, 4, int32_t, true, int128_add, int128_add, int128_makes64)
1133 DO_LDAVH(vrmlaldavhuw, 4, uint32_t, false, int128_add, int128_add, int128_make64)
1135 DO_LDAVH(vrmlsldavhsw, 4, int32_t, false, int128_add, int128_sub, int128_makes64)
1136 DO_LDAVH(vrmlsldavhxsw, 4, int32_t, true, int128_add, int128_sub, int128_makes64)
1138 /* Vector add across vector */
1139 #define DO_VADDV(OP, ESIZE, TYPE) \
1140 uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vm, \
1141 uint32_t ra) \
1143 uint16_t mask = mve_element_mask(env); \
1144 unsigned e; \
1145 TYPE *m = vm; \
1146 for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
1147 if (mask & 1) { \
1148 ra += m[H##ESIZE(e)]; \
1151 mve_advance_vpt(env); \
1152 return ra; \
1155 DO_VADDV(vaddvsb, 1, uint8_t)
1156 DO_VADDV(vaddvsh, 2, uint16_t)
1157 DO_VADDV(vaddvsw, 4, uint32_t)
1158 DO_VADDV(vaddvub, 1, uint8_t)
1159 DO_VADDV(vaddvuh, 2, uint16_t)
1160 DO_VADDV(vaddvuw, 4, uint32_t)