python: remove more instances of sys.version_info
[qemu/ar7.git] / target / s390x / translate_vx.inc.c
blob12347f8a03d037a6865de3f41471cd378ca8b889
1 /*
2 * QEMU TCG support -- s390x vector instruction translation functions
4 * Copyright (C) 2019 Red Hat Inc
6 * Authors:
7 * David Hildenbrand <david@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
14 * For most instructions that use the same element size for reads and
15 * writes, we can use real gvec vector expansion, which potantially uses
16 * real host vector instructions. As they only work up to 64 bit elements,
17 * 128 bit elements (vector is a single element) have to be handled
18 * differently. Operations that are too complicated to encode via TCG ops
19 * are handled via gvec ool (out-of-line) handlers.
21 * As soon as instructions use different element sizes for reads and writes
22 * or access elements "out of their element scope" we expand them manually
23 * in fancy loops, as gvec expansion does not deal with actual element
24 * numbers and does also not support access to other elements.
26 * 128 bit elements:
27 * As we only have i32/i64, such elements have to be loaded into two
28 * i64 values and can then be processed e.g. by tcg_gen_add2_i64.
30 * Sizes:
31 * On s390x, the operand size (oprsz) and the maximum size (maxsz) are
32 * always 16 (128 bit). What gvec code calls "vece", s390x calls "es",
33 * a.k.a. "element size". These values nicely map to MO_8 ... MO_64. Only
34 * 128 bit element size has to be treated in a special way (MO_64 + 1).
35 * We will use ES_* instead of MO_* for this reason in this file.
37 * CC handling:
38 * As gvec ool-helpers can currently not return values (besides via
39 * pointers like vectors or cpu_env), whenever we have to set the CC and
40 * can't conclude the value from the result vector, we will directly
41 * set it in "env->cc_op" and mark it as static via set_cc_static()".
42 * Whenever this is done, the helper writes globals (cc_op).
45 #define NUM_VEC_ELEMENT_BYTES(es) (1 << (es))
46 #define NUM_VEC_ELEMENTS(es) (16 / NUM_VEC_ELEMENT_BYTES(es))
47 #define NUM_VEC_ELEMENT_BITS(es) (NUM_VEC_ELEMENT_BYTES(es) * BITS_PER_BYTE)
49 #define ES_8 MO_8
50 #define ES_16 MO_16
51 #define ES_32 MO_32
52 #define ES_64 MO_64
53 #define ES_128 4
55 /* Floating-Point Format */
56 #define FPF_SHORT 2
57 #define FPF_LONG 3
58 #define FPF_EXT 4
60 static inline bool valid_vec_element(uint8_t enr, MemOp es)
62 return !(enr & ~(NUM_VEC_ELEMENTS(es) - 1));
65 static void read_vec_element_i64(TCGv_i64 dst, uint8_t reg, uint8_t enr,
66 MemOp memop)
68 const int offs = vec_reg_offset(reg, enr, memop & MO_SIZE);
70 switch (memop) {
71 case ES_8:
72 tcg_gen_ld8u_i64(dst, cpu_env, offs);
73 break;
74 case ES_16:
75 tcg_gen_ld16u_i64(dst, cpu_env, offs);
76 break;
77 case ES_32:
78 tcg_gen_ld32u_i64(dst, cpu_env, offs);
79 break;
80 case ES_8 | MO_SIGN:
81 tcg_gen_ld8s_i64(dst, cpu_env, offs);
82 break;
83 case ES_16 | MO_SIGN:
84 tcg_gen_ld16s_i64(dst, cpu_env, offs);
85 break;
86 case ES_32 | MO_SIGN:
87 tcg_gen_ld32s_i64(dst, cpu_env, offs);
88 break;
89 case ES_64:
90 case ES_64 | MO_SIGN:
91 tcg_gen_ld_i64(dst, cpu_env, offs);
92 break;
93 default:
94 g_assert_not_reached();
98 static void read_vec_element_i32(TCGv_i32 dst, uint8_t reg, uint8_t enr,
99 MemOp memop)
101 const int offs = vec_reg_offset(reg, enr, memop & MO_SIZE);
103 switch (memop) {
104 case ES_8:
105 tcg_gen_ld8u_i32(dst, cpu_env, offs);
106 break;
107 case ES_16:
108 tcg_gen_ld16u_i32(dst, cpu_env, offs);
109 break;
110 case ES_8 | MO_SIGN:
111 tcg_gen_ld8s_i32(dst, cpu_env, offs);
112 break;
113 case ES_16 | MO_SIGN:
114 tcg_gen_ld16s_i32(dst, cpu_env, offs);
115 break;
116 case ES_32:
117 case ES_32 | MO_SIGN:
118 tcg_gen_ld_i32(dst, cpu_env, offs);
119 break;
120 default:
121 g_assert_not_reached();
125 static void write_vec_element_i64(TCGv_i64 src, int reg, uint8_t enr,
126 MemOp memop)
128 const int offs = vec_reg_offset(reg, enr, memop & MO_SIZE);
130 switch (memop) {
131 case ES_8:
132 tcg_gen_st8_i64(src, cpu_env, offs);
133 break;
134 case ES_16:
135 tcg_gen_st16_i64(src, cpu_env, offs);
136 break;
137 case ES_32:
138 tcg_gen_st32_i64(src, cpu_env, offs);
139 break;
140 case ES_64:
141 tcg_gen_st_i64(src, cpu_env, offs);
142 break;
143 default:
144 g_assert_not_reached();
148 static void write_vec_element_i32(TCGv_i32 src, int reg, uint8_t enr,
149 MemOp memop)
151 const int offs = vec_reg_offset(reg, enr, memop & MO_SIZE);
153 switch (memop) {
154 case ES_8:
155 tcg_gen_st8_i32(src, cpu_env, offs);
156 break;
157 case ES_16:
158 tcg_gen_st16_i32(src, cpu_env, offs);
159 break;
160 case ES_32:
161 tcg_gen_st_i32(src, cpu_env, offs);
162 break;
163 default:
164 g_assert_not_reached();
168 static void get_vec_element_ptr_i64(TCGv_ptr ptr, uint8_t reg, TCGv_i64 enr,
169 uint8_t es)
171 TCGv_i64 tmp = tcg_temp_new_i64();
173 /* mask off invalid parts from the element nr */
174 tcg_gen_andi_i64(tmp, enr, NUM_VEC_ELEMENTS(es) - 1);
176 /* convert it to an element offset relative to cpu_env (vec_reg_offset() */
177 tcg_gen_shli_i64(tmp, tmp, es);
178 #ifndef HOST_WORDS_BIGENDIAN
179 tcg_gen_xori_i64(tmp, tmp, 8 - NUM_VEC_ELEMENT_BYTES(es));
180 #endif
181 tcg_gen_addi_i64(tmp, tmp, vec_full_reg_offset(reg));
183 /* generate the final ptr by adding cpu_env */
184 tcg_gen_trunc_i64_ptr(ptr, tmp);
185 tcg_gen_add_ptr(ptr, ptr, cpu_env);
187 tcg_temp_free_i64(tmp);
190 #define gen_gvec_2(v1, v2, gen) \
191 tcg_gen_gvec_2(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
192 16, 16, gen)
193 #define gen_gvec_2s(v1, v2, c, gen) \
194 tcg_gen_gvec_2s(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
195 16, 16, c, gen)
196 #define gen_gvec_2_ool(v1, v2, data, fn) \
197 tcg_gen_gvec_2_ool(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
198 16, 16, data, fn)
199 #define gen_gvec_2i_ool(v1, v2, c, data, fn) \
200 tcg_gen_gvec_2i_ool(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
201 c, 16, 16, data, fn)
202 #define gen_gvec_2_ptr(v1, v2, ptr, data, fn) \
203 tcg_gen_gvec_2_ptr(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
204 ptr, 16, 16, data, fn)
205 #define gen_gvec_3(v1, v2, v3, gen) \
206 tcg_gen_gvec_3(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
207 vec_full_reg_offset(v3), 16, 16, gen)
208 #define gen_gvec_3_ool(v1, v2, v3, data, fn) \
209 tcg_gen_gvec_3_ool(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
210 vec_full_reg_offset(v3), 16, 16, data, fn)
211 #define gen_gvec_3_ptr(v1, v2, v3, ptr, data, fn) \
212 tcg_gen_gvec_3_ptr(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
213 vec_full_reg_offset(v3), ptr, 16, 16, data, fn)
214 #define gen_gvec_3i(v1, v2, v3, c, gen) \
215 tcg_gen_gvec_3i(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
216 vec_full_reg_offset(v3), 16, 16, c, gen)
217 #define gen_gvec_4(v1, v2, v3, v4, gen) \
218 tcg_gen_gvec_4(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
219 vec_full_reg_offset(v3), vec_full_reg_offset(v4), \
220 16, 16, gen)
221 #define gen_gvec_4_ool(v1, v2, v3, v4, data, fn) \
222 tcg_gen_gvec_4_ool(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
223 vec_full_reg_offset(v3), vec_full_reg_offset(v4), \
224 16, 16, data, fn)
225 #define gen_gvec_4_ptr(v1, v2, v3, v4, ptr, data, fn) \
226 tcg_gen_gvec_4_ptr(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
227 vec_full_reg_offset(v3), vec_full_reg_offset(v4), \
228 ptr, 16, 16, data, fn)
229 #define gen_gvec_dup_i64(es, v1, c) \
230 tcg_gen_gvec_dup_i64(es, vec_full_reg_offset(v1), 16, 16, c)
231 #define gen_gvec_mov(v1, v2) \
232 tcg_gen_gvec_mov(0, vec_full_reg_offset(v1), vec_full_reg_offset(v2), 16, \
234 #define gen_gvec_dup_imm(es, v1, c) \
235 tcg_gen_gvec_dup_imm(es, vec_full_reg_offset(v1), 16, 16, c);
236 #define gen_gvec_fn_2(fn, es, v1, v2) \
237 tcg_gen_gvec_##fn(es, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
238 16, 16)
239 #define gen_gvec_fn_2i(fn, es, v1, v2, c) \
240 tcg_gen_gvec_##fn(es, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
241 c, 16, 16)
242 #define gen_gvec_fn_2s(fn, es, v1, v2, s) \
243 tcg_gen_gvec_##fn(es, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
244 s, 16, 16)
245 #define gen_gvec_fn_3(fn, es, v1, v2, v3) \
246 tcg_gen_gvec_##fn(es, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
247 vec_full_reg_offset(v3), 16, 16)
248 #define gen_gvec_fn_4(fn, es, v1, v2, v3, v4) \
249 tcg_gen_gvec_##fn(es, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
250 vec_full_reg_offset(v3), vec_full_reg_offset(v4), 16, 16)
253 * Helper to carry out a 128 bit vector computation using 2 i64 values per
254 * vector.
256 typedef void (*gen_gvec128_3_i64_fn)(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al,
257 TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh);
258 static void gen_gvec128_3_i64(gen_gvec128_3_i64_fn fn, uint8_t d, uint8_t a,
259 uint8_t b)
261 TCGv_i64 dh = tcg_temp_new_i64();
262 TCGv_i64 dl = tcg_temp_new_i64();
263 TCGv_i64 ah = tcg_temp_new_i64();
264 TCGv_i64 al = tcg_temp_new_i64();
265 TCGv_i64 bh = tcg_temp_new_i64();
266 TCGv_i64 bl = tcg_temp_new_i64();
268 read_vec_element_i64(ah, a, 0, ES_64);
269 read_vec_element_i64(al, a, 1, ES_64);
270 read_vec_element_i64(bh, b, 0, ES_64);
271 read_vec_element_i64(bl, b, 1, ES_64);
272 fn(dl, dh, al, ah, bl, bh);
273 write_vec_element_i64(dh, d, 0, ES_64);
274 write_vec_element_i64(dl, d, 1, ES_64);
276 tcg_temp_free_i64(dh);
277 tcg_temp_free_i64(dl);
278 tcg_temp_free_i64(ah);
279 tcg_temp_free_i64(al);
280 tcg_temp_free_i64(bh);
281 tcg_temp_free_i64(bl);
284 typedef void (*gen_gvec128_4_i64_fn)(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al,
285 TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh,
286 TCGv_i64 cl, TCGv_i64 ch);
287 static void gen_gvec128_4_i64(gen_gvec128_4_i64_fn fn, uint8_t d, uint8_t a,
288 uint8_t b, uint8_t c)
290 TCGv_i64 dh = tcg_temp_new_i64();
291 TCGv_i64 dl = tcg_temp_new_i64();
292 TCGv_i64 ah = tcg_temp_new_i64();
293 TCGv_i64 al = tcg_temp_new_i64();
294 TCGv_i64 bh = tcg_temp_new_i64();
295 TCGv_i64 bl = tcg_temp_new_i64();
296 TCGv_i64 ch = tcg_temp_new_i64();
297 TCGv_i64 cl = tcg_temp_new_i64();
299 read_vec_element_i64(ah, a, 0, ES_64);
300 read_vec_element_i64(al, a, 1, ES_64);
301 read_vec_element_i64(bh, b, 0, ES_64);
302 read_vec_element_i64(bl, b, 1, ES_64);
303 read_vec_element_i64(ch, c, 0, ES_64);
304 read_vec_element_i64(cl, c, 1, ES_64);
305 fn(dl, dh, al, ah, bl, bh, cl, ch);
306 write_vec_element_i64(dh, d, 0, ES_64);
307 write_vec_element_i64(dl, d, 1, ES_64);
309 tcg_temp_free_i64(dh);
310 tcg_temp_free_i64(dl);
311 tcg_temp_free_i64(ah);
312 tcg_temp_free_i64(al);
313 tcg_temp_free_i64(bh);
314 tcg_temp_free_i64(bl);
315 tcg_temp_free_i64(ch);
316 tcg_temp_free_i64(cl);
319 static void gen_addi2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah,
320 uint64_t b)
322 TCGv_i64 bl = tcg_const_i64(b);
323 TCGv_i64 bh = tcg_const_i64(0);
325 tcg_gen_add2_i64(dl, dh, al, ah, bl, bh);
326 tcg_temp_free_i64(bl);
327 tcg_temp_free_i64(bh);
330 static DisasJumpType op_vge(DisasContext *s, DisasOps *o)
332 const uint8_t es = s->insn->data;
333 const uint8_t enr = get_field(s, m3);
334 TCGv_i64 tmp;
336 if (!valid_vec_element(enr, es)) {
337 gen_program_exception(s, PGM_SPECIFICATION);
338 return DISAS_NORETURN;
341 tmp = tcg_temp_new_i64();
342 read_vec_element_i64(tmp, get_field(s, v2), enr, es);
343 tcg_gen_add_i64(o->addr1, o->addr1, tmp);
344 gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 0);
346 tcg_gen_qemu_ld_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es);
347 write_vec_element_i64(tmp, get_field(s, v1), enr, es);
348 tcg_temp_free_i64(tmp);
349 return DISAS_NEXT;
352 static uint64_t generate_byte_mask(uint8_t mask)
354 uint64_t r = 0;
355 int i;
357 for (i = 0; i < 8; i++) {
358 if ((mask >> i) & 1) {
359 r |= 0xffull << (i * 8);
362 return r;
365 static DisasJumpType op_vgbm(DisasContext *s, DisasOps *o)
367 const uint16_t i2 = get_field(s, i2);
369 if (i2 == (i2 & 0xff) * 0x0101) {
371 * Masks for both 64 bit elements of the vector are the same.
372 * Trust tcg to produce a good constant loading.
374 gen_gvec_dup_imm(ES_64, get_field(s, v1),
375 generate_byte_mask(i2 & 0xff));
376 } else {
377 TCGv_i64 t = tcg_temp_new_i64();
379 tcg_gen_movi_i64(t, generate_byte_mask(i2 >> 8));
380 write_vec_element_i64(t, get_field(s, v1), 0, ES_64);
381 tcg_gen_movi_i64(t, generate_byte_mask(i2));
382 write_vec_element_i64(t, get_field(s, v1), 1, ES_64);
383 tcg_temp_free_i64(t);
385 return DISAS_NEXT;
388 static DisasJumpType op_vgm(DisasContext *s, DisasOps *o)
390 const uint8_t es = get_field(s, m4);
391 const uint8_t bits = NUM_VEC_ELEMENT_BITS(es);
392 const uint8_t i2 = get_field(s, i2) & (bits - 1);
393 const uint8_t i3 = get_field(s, i3) & (bits - 1);
394 uint64_t mask = 0;
395 int i;
397 if (es > ES_64) {
398 gen_program_exception(s, PGM_SPECIFICATION);
399 return DISAS_NORETURN;
402 /* generate the mask - take care of wrapping */
403 for (i = i2; ; i = (i + 1) % bits) {
404 mask |= 1ull << (bits - i - 1);
405 if (i == i3) {
406 break;
410 gen_gvec_dup_imm(es, get_field(s, v1), mask);
411 return DISAS_NEXT;
414 static DisasJumpType op_vl(DisasContext *s, DisasOps *o)
416 TCGv_i64 t0 = tcg_temp_new_i64();
417 TCGv_i64 t1 = tcg_temp_new_i64();
419 tcg_gen_qemu_ld_i64(t0, o->addr1, get_mem_index(s), MO_TEQ);
420 gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8);
421 tcg_gen_qemu_ld_i64(t1, o->addr1, get_mem_index(s), MO_TEQ);
422 write_vec_element_i64(t0, get_field(s, v1), 0, ES_64);
423 write_vec_element_i64(t1, get_field(s, v1), 1, ES_64);
424 tcg_temp_free(t0);
425 tcg_temp_free(t1);
426 return DISAS_NEXT;
429 static DisasJumpType op_vlr(DisasContext *s, DisasOps *o)
431 gen_gvec_mov(get_field(s, v1), get_field(s, v2));
432 return DISAS_NEXT;
435 static DisasJumpType op_vlrep(DisasContext *s, DisasOps *o)
437 const uint8_t es = get_field(s, m3);
438 TCGv_i64 tmp;
440 if (es > ES_64) {
441 gen_program_exception(s, PGM_SPECIFICATION);
442 return DISAS_NORETURN;
445 tmp = tcg_temp_new_i64();
446 tcg_gen_qemu_ld_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es);
447 gen_gvec_dup_i64(es, get_field(s, v1), tmp);
448 tcg_temp_free_i64(tmp);
449 return DISAS_NEXT;
452 static DisasJumpType op_vle(DisasContext *s, DisasOps *o)
454 const uint8_t es = s->insn->data;
455 const uint8_t enr = get_field(s, m3);
456 TCGv_i64 tmp;
458 if (!valid_vec_element(enr, es)) {
459 gen_program_exception(s, PGM_SPECIFICATION);
460 return DISAS_NORETURN;
463 tmp = tcg_temp_new_i64();
464 tcg_gen_qemu_ld_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es);
465 write_vec_element_i64(tmp, get_field(s, v1), enr, es);
466 tcg_temp_free_i64(tmp);
467 return DISAS_NEXT;
470 static DisasJumpType op_vlei(DisasContext *s, DisasOps *o)
472 const uint8_t es = s->insn->data;
473 const uint8_t enr = get_field(s, m3);
474 TCGv_i64 tmp;
476 if (!valid_vec_element(enr, es)) {
477 gen_program_exception(s, PGM_SPECIFICATION);
478 return DISAS_NORETURN;
481 tmp = tcg_const_i64((int16_t)get_field(s, i2));
482 write_vec_element_i64(tmp, get_field(s, v1), enr, es);
483 tcg_temp_free_i64(tmp);
484 return DISAS_NEXT;
487 static DisasJumpType op_vlgv(DisasContext *s, DisasOps *o)
489 const uint8_t es = get_field(s, m4);
490 TCGv_ptr ptr;
492 if (es > ES_64) {
493 gen_program_exception(s, PGM_SPECIFICATION);
494 return DISAS_NORETURN;
497 /* fast path if we don't need the register content */
498 if (!get_field(s, b2)) {
499 uint8_t enr = get_field(s, d2) & (NUM_VEC_ELEMENTS(es) - 1);
501 read_vec_element_i64(o->out, get_field(s, v3), enr, es);
502 return DISAS_NEXT;
505 ptr = tcg_temp_new_ptr();
506 get_vec_element_ptr_i64(ptr, get_field(s, v3), o->addr1, es);
507 switch (es) {
508 case ES_8:
509 tcg_gen_ld8u_i64(o->out, ptr, 0);
510 break;
511 case ES_16:
512 tcg_gen_ld16u_i64(o->out, ptr, 0);
513 break;
514 case ES_32:
515 tcg_gen_ld32u_i64(o->out, ptr, 0);
516 break;
517 case ES_64:
518 tcg_gen_ld_i64(o->out, ptr, 0);
519 break;
520 default:
521 g_assert_not_reached();
523 tcg_temp_free_ptr(ptr);
525 return DISAS_NEXT;
528 static DisasJumpType op_vllez(DisasContext *s, DisasOps *o)
530 uint8_t es = get_field(s, m3);
531 uint8_t enr;
532 TCGv_i64 t;
534 switch (es) {
535 /* rightmost sub-element of leftmost doubleword */
536 case ES_8:
537 enr = 7;
538 break;
539 case ES_16:
540 enr = 3;
541 break;
542 case ES_32:
543 enr = 1;
544 break;
545 case ES_64:
546 enr = 0;
547 break;
548 /* leftmost sub-element of leftmost doubleword */
549 case 6:
550 if (s390_has_feat(S390_FEAT_VECTOR_ENH)) {
551 es = ES_32;
552 enr = 0;
553 break;
555 /* fallthrough */
556 default:
557 gen_program_exception(s, PGM_SPECIFICATION);
558 return DISAS_NORETURN;
561 t = tcg_temp_new_i64();
562 tcg_gen_qemu_ld_i64(t, o->addr1, get_mem_index(s), MO_TE | es);
563 gen_gvec_dup_imm(es, get_field(s, v1), 0);
564 write_vec_element_i64(t, get_field(s, v1), enr, es);
565 tcg_temp_free_i64(t);
566 return DISAS_NEXT;
569 static DisasJumpType op_vlm(DisasContext *s, DisasOps *o)
571 const uint8_t v3 = get_field(s, v3);
572 uint8_t v1 = get_field(s, v1);
573 TCGv_i64 t0, t1;
575 if (v3 < v1 || (v3 - v1 + 1) > 16) {
576 gen_program_exception(s, PGM_SPECIFICATION);
577 return DISAS_NORETURN;
581 * Check for possible access exceptions by trying to load the last
582 * element. The first element will be checked first next.
584 t0 = tcg_temp_new_i64();
585 t1 = tcg_temp_new_i64();
586 gen_addi_and_wrap_i64(s, t0, o->addr1, (v3 - v1) * 16 + 8);
587 tcg_gen_qemu_ld_i64(t0, t0, get_mem_index(s), MO_TEQ);
589 for (;; v1++) {
590 tcg_gen_qemu_ld_i64(t1, o->addr1, get_mem_index(s), MO_TEQ);
591 write_vec_element_i64(t1, v1, 0, ES_64);
592 if (v1 == v3) {
593 break;
595 gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8);
596 tcg_gen_qemu_ld_i64(t1, o->addr1, get_mem_index(s), MO_TEQ);
597 write_vec_element_i64(t1, v1, 1, ES_64);
598 gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8);
601 /* Store the last element, loaded first */
602 write_vec_element_i64(t0, v1, 1, ES_64);
604 tcg_temp_free_i64(t0);
605 tcg_temp_free_i64(t1);
606 return DISAS_NEXT;
609 static DisasJumpType op_vlbb(DisasContext *s, DisasOps *o)
611 const int64_t block_size = (1ull << (get_field(s, m3) + 6));
612 const int v1_offs = vec_full_reg_offset(get_field(s, v1));
613 TCGv_ptr a0;
614 TCGv_i64 bytes;
616 if (get_field(s, m3) > 6) {
617 gen_program_exception(s, PGM_SPECIFICATION);
618 return DISAS_NORETURN;
621 bytes = tcg_temp_new_i64();
622 a0 = tcg_temp_new_ptr();
623 /* calculate the number of bytes until the next block boundary */
624 tcg_gen_ori_i64(bytes, o->addr1, -block_size);
625 tcg_gen_neg_i64(bytes, bytes);
627 tcg_gen_addi_ptr(a0, cpu_env, v1_offs);
628 gen_helper_vll(cpu_env, a0, o->addr1, bytes);
629 tcg_temp_free_i64(bytes);
630 tcg_temp_free_ptr(a0);
631 return DISAS_NEXT;
634 static DisasJumpType op_vlvg(DisasContext *s, DisasOps *o)
636 const uint8_t es = get_field(s, m4);
637 TCGv_ptr ptr;
639 if (es > ES_64) {
640 gen_program_exception(s, PGM_SPECIFICATION);
641 return DISAS_NORETURN;
644 /* fast path if we don't need the register content */
645 if (!get_field(s, b2)) {
646 uint8_t enr = get_field(s, d2) & (NUM_VEC_ELEMENTS(es) - 1);
648 write_vec_element_i64(o->in2, get_field(s, v1), enr, es);
649 return DISAS_NEXT;
652 ptr = tcg_temp_new_ptr();
653 get_vec_element_ptr_i64(ptr, get_field(s, v1), o->addr1, es);
654 switch (es) {
655 case ES_8:
656 tcg_gen_st8_i64(o->in2, ptr, 0);
657 break;
658 case ES_16:
659 tcg_gen_st16_i64(o->in2, ptr, 0);
660 break;
661 case ES_32:
662 tcg_gen_st32_i64(o->in2, ptr, 0);
663 break;
664 case ES_64:
665 tcg_gen_st_i64(o->in2, ptr, 0);
666 break;
667 default:
668 g_assert_not_reached();
670 tcg_temp_free_ptr(ptr);
672 return DISAS_NEXT;
675 static DisasJumpType op_vlvgp(DisasContext *s, DisasOps *o)
677 write_vec_element_i64(o->in1, get_field(s, v1), 0, ES_64);
678 write_vec_element_i64(o->in2, get_field(s, v1), 1, ES_64);
679 return DISAS_NEXT;
682 static DisasJumpType op_vll(DisasContext *s, DisasOps *o)
684 const int v1_offs = vec_full_reg_offset(get_field(s, v1));
685 TCGv_ptr a0 = tcg_temp_new_ptr();
687 /* convert highest index into an actual length */
688 tcg_gen_addi_i64(o->in2, o->in2, 1);
689 tcg_gen_addi_ptr(a0, cpu_env, v1_offs);
690 gen_helper_vll(cpu_env, a0, o->addr1, o->in2);
691 tcg_temp_free_ptr(a0);
692 return DISAS_NEXT;
695 static DisasJumpType op_vmr(DisasContext *s, DisasOps *o)
697 const uint8_t v1 = get_field(s, v1);
698 const uint8_t v2 = get_field(s, v2);
699 const uint8_t v3 = get_field(s, v3);
700 const uint8_t es = get_field(s, m4);
701 int dst_idx, src_idx;
702 TCGv_i64 tmp;
704 if (es > ES_64) {
705 gen_program_exception(s, PGM_SPECIFICATION);
706 return DISAS_NORETURN;
709 tmp = tcg_temp_new_i64();
710 if (s->fields.op2 == 0x61) {
711 /* iterate backwards to avoid overwriting data we might need later */
712 for (dst_idx = NUM_VEC_ELEMENTS(es) - 1; dst_idx >= 0; dst_idx--) {
713 src_idx = dst_idx / 2;
714 if (dst_idx % 2 == 0) {
715 read_vec_element_i64(tmp, v2, src_idx, es);
716 } else {
717 read_vec_element_i64(tmp, v3, src_idx, es);
719 write_vec_element_i64(tmp, v1, dst_idx, es);
721 } else {
722 /* iterate forward to avoid overwriting data we might need later */
723 for (dst_idx = 0; dst_idx < NUM_VEC_ELEMENTS(es); dst_idx++) {
724 src_idx = (dst_idx + NUM_VEC_ELEMENTS(es)) / 2;
725 if (dst_idx % 2 == 0) {
726 read_vec_element_i64(tmp, v2, src_idx, es);
727 } else {
728 read_vec_element_i64(tmp, v3, src_idx, es);
730 write_vec_element_i64(tmp, v1, dst_idx, es);
733 tcg_temp_free_i64(tmp);
734 return DISAS_NEXT;
737 static DisasJumpType op_vpk(DisasContext *s, DisasOps *o)
739 const uint8_t v1 = get_field(s, v1);
740 const uint8_t v2 = get_field(s, v2);
741 const uint8_t v3 = get_field(s, v3);
742 const uint8_t es = get_field(s, m4);
743 static gen_helper_gvec_3 * const vpk[3] = {
744 gen_helper_gvec_vpk16,
745 gen_helper_gvec_vpk32,
746 gen_helper_gvec_vpk64,
748 static gen_helper_gvec_3 * const vpks[3] = {
749 gen_helper_gvec_vpks16,
750 gen_helper_gvec_vpks32,
751 gen_helper_gvec_vpks64,
753 static gen_helper_gvec_3_ptr * const vpks_cc[3] = {
754 gen_helper_gvec_vpks_cc16,
755 gen_helper_gvec_vpks_cc32,
756 gen_helper_gvec_vpks_cc64,
758 static gen_helper_gvec_3 * const vpkls[3] = {
759 gen_helper_gvec_vpkls16,
760 gen_helper_gvec_vpkls32,
761 gen_helper_gvec_vpkls64,
763 static gen_helper_gvec_3_ptr * const vpkls_cc[3] = {
764 gen_helper_gvec_vpkls_cc16,
765 gen_helper_gvec_vpkls_cc32,
766 gen_helper_gvec_vpkls_cc64,
769 if (es == ES_8 || es > ES_64) {
770 gen_program_exception(s, PGM_SPECIFICATION);
771 return DISAS_NORETURN;
774 switch (s->fields.op2) {
775 case 0x97:
776 if (get_field(s, m5) & 0x1) {
777 gen_gvec_3_ptr(v1, v2, v3, cpu_env, 0, vpks_cc[es - 1]);
778 set_cc_static(s);
779 } else {
780 gen_gvec_3_ool(v1, v2, v3, 0, vpks[es - 1]);
782 break;
783 case 0x95:
784 if (get_field(s, m5) & 0x1) {
785 gen_gvec_3_ptr(v1, v2, v3, cpu_env, 0, vpkls_cc[es - 1]);
786 set_cc_static(s);
787 } else {
788 gen_gvec_3_ool(v1, v2, v3, 0, vpkls[es - 1]);
790 break;
791 case 0x94:
792 /* If sources and destination dont't overlap -> fast path */
793 if (v1 != v2 && v1 != v3) {
794 const uint8_t src_es = get_field(s, m4);
795 const uint8_t dst_es = src_es - 1;
796 TCGv_i64 tmp = tcg_temp_new_i64();
797 int dst_idx, src_idx;
799 for (dst_idx = 0; dst_idx < NUM_VEC_ELEMENTS(dst_es); dst_idx++) {
800 src_idx = dst_idx;
801 if (src_idx < NUM_VEC_ELEMENTS(src_es)) {
802 read_vec_element_i64(tmp, v2, src_idx, src_es);
803 } else {
804 src_idx -= NUM_VEC_ELEMENTS(src_es);
805 read_vec_element_i64(tmp, v3, src_idx, src_es);
807 write_vec_element_i64(tmp, v1, dst_idx, dst_es);
809 tcg_temp_free_i64(tmp);
810 } else {
811 gen_gvec_3_ool(v1, v2, v3, 0, vpk[es - 1]);
813 break;
814 default:
815 g_assert_not_reached();
817 return DISAS_NEXT;
820 static DisasJumpType op_vperm(DisasContext *s, DisasOps *o)
822 gen_gvec_4_ool(get_field(s, v1), get_field(s, v2),
823 get_field(s, v3), get_field(s, v4),
824 0, gen_helper_gvec_vperm);
825 return DISAS_NEXT;
828 static DisasJumpType op_vpdi(DisasContext *s, DisasOps *o)
830 const uint8_t i2 = extract32(get_field(s, m4), 2, 1);
831 const uint8_t i3 = extract32(get_field(s, m4), 0, 1);
832 TCGv_i64 t0 = tcg_temp_new_i64();
833 TCGv_i64 t1 = tcg_temp_new_i64();
835 read_vec_element_i64(t0, get_field(s, v2), i2, ES_64);
836 read_vec_element_i64(t1, get_field(s, v3), i3, ES_64);
837 write_vec_element_i64(t0, get_field(s, v1), 0, ES_64);
838 write_vec_element_i64(t1, get_field(s, v1), 1, ES_64);
839 tcg_temp_free_i64(t0);
840 tcg_temp_free_i64(t1);
841 return DISAS_NEXT;
844 static DisasJumpType op_vrep(DisasContext *s, DisasOps *o)
846 const uint8_t enr = get_field(s, i2);
847 const uint8_t es = get_field(s, m4);
849 if (es > ES_64 || !valid_vec_element(enr, es)) {
850 gen_program_exception(s, PGM_SPECIFICATION);
851 return DISAS_NORETURN;
854 tcg_gen_gvec_dup_mem(es, vec_full_reg_offset(get_field(s, v1)),
855 vec_reg_offset(get_field(s, v3), enr, es),
856 16, 16);
857 return DISAS_NEXT;
860 static DisasJumpType op_vrepi(DisasContext *s, DisasOps *o)
862 const int64_t data = (int16_t)get_field(s, i2);
863 const uint8_t es = get_field(s, m3);
865 if (es > ES_64) {
866 gen_program_exception(s, PGM_SPECIFICATION);
867 return DISAS_NORETURN;
870 gen_gvec_dup_imm(es, get_field(s, v1), data);
871 return DISAS_NEXT;
874 static DisasJumpType op_vsce(DisasContext *s, DisasOps *o)
876 const uint8_t es = s->insn->data;
877 const uint8_t enr = get_field(s, m3);
878 TCGv_i64 tmp;
880 if (!valid_vec_element(enr, es)) {
881 gen_program_exception(s, PGM_SPECIFICATION);
882 return DISAS_NORETURN;
885 tmp = tcg_temp_new_i64();
886 read_vec_element_i64(tmp, get_field(s, v2), enr, es);
887 tcg_gen_add_i64(o->addr1, o->addr1, tmp);
888 gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 0);
890 read_vec_element_i64(tmp, get_field(s, v1), enr, es);
891 tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es);
892 tcg_temp_free_i64(tmp);
893 return DISAS_NEXT;
896 static DisasJumpType op_vsel(DisasContext *s, DisasOps *o)
898 gen_gvec_fn_4(bitsel, ES_8, get_field(s, v1),
899 get_field(s, v4), get_field(s, v2),
900 get_field(s, v3));
901 return DISAS_NEXT;
904 static DisasJumpType op_vseg(DisasContext *s, DisasOps *o)
906 const uint8_t es = get_field(s, m3);
907 int idx1, idx2;
908 TCGv_i64 tmp;
910 switch (es) {
911 case ES_8:
912 idx1 = 7;
913 idx2 = 15;
914 break;
915 case ES_16:
916 idx1 = 3;
917 idx2 = 7;
918 break;
919 case ES_32:
920 idx1 = 1;
921 idx2 = 3;
922 break;
923 default:
924 gen_program_exception(s, PGM_SPECIFICATION);
925 return DISAS_NORETURN;
928 tmp = tcg_temp_new_i64();
929 read_vec_element_i64(tmp, get_field(s, v2), idx1, es | MO_SIGN);
930 write_vec_element_i64(tmp, get_field(s, v1), 0, ES_64);
931 read_vec_element_i64(tmp, get_field(s, v2), idx2, es | MO_SIGN);
932 write_vec_element_i64(tmp, get_field(s, v1), 1, ES_64);
933 tcg_temp_free_i64(tmp);
934 return DISAS_NEXT;
937 static DisasJumpType op_vst(DisasContext *s, DisasOps *o)
939 TCGv_i64 tmp = tcg_const_i64(16);
941 /* Probe write access before actually modifying memory */
942 gen_helper_probe_write_access(cpu_env, o->addr1, tmp);
944 read_vec_element_i64(tmp, get_field(s, v1), 0, ES_64);
945 tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEQ);
946 gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8);
947 read_vec_element_i64(tmp, get_field(s, v1), 1, ES_64);
948 tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEQ);
949 tcg_temp_free_i64(tmp);
950 return DISAS_NEXT;
953 static DisasJumpType op_vste(DisasContext *s, DisasOps *o)
955 const uint8_t es = s->insn->data;
956 const uint8_t enr = get_field(s, m3);
957 TCGv_i64 tmp;
959 if (!valid_vec_element(enr, es)) {
960 gen_program_exception(s, PGM_SPECIFICATION);
961 return DISAS_NORETURN;
964 tmp = tcg_temp_new_i64();
965 read_vec_element_i64(tmp, get_field(s, v1), enr, es);
966 tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TE | es);
967 tcg_temp_free_i64(tmp);
968 return DISAS_NEXT;
971 static DisasJumpType op_vstm(DisasContext *s, DisasOps *o)
973 const uint8_t v3 = get_field(s, v3);
974 uint8_t v1 = get_field(s, v1);
975 TCGv_i64 tmp;
977 while (v3 < v1 || (v3 - v1 + 1) > 16) {
978 gen_program_exception(s, PGM_SPECIFICATION);
979 return DISAS_NORETURN;
982 /* Probe write access before actually modifying memory */
983 tmp = tcg_const_i64((v3 - v1 + 1) * 16);
984 gen_helper_probe_write_access(cpu_env, o->addr1, tmp);
986 for (;; v1++) {
987 read_vec_element_i64(tmp, v1, 0, ES_64);
988 tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEQ);
989 gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8);
990 read_vec_element_i64(tmp, v1, 1, ES_64);
991 tcg_gen_qemu_st_i64(tmp, o->addr1, get_mem_index(s), MO_TEQ);
992 if (v1 == v3) {
993 break;
995 gen_addi_and_wrap_i64(s, o->addr1, o->addr1, 8);
997 tcg_temp_free_i64(tmp);
998 return DISAS_NEXT;
1001 static DisasJumpType op_vstl(DisasContext *s, DisasOps *o)
1003 const int v1_offs = vec_full_reg_offset(get_field(s, v1));
1004 TCGv_ptr a0 = tcg_temp_new_ptr();
1006 /* convert highest index into an actual length */
1007 tcg_gen_addi_i64(o->in2, o->in2, 1);
1008 tcg_gen_addi_ptr(a0, cpu_env, v1_offs);
1009 gen_helper_vstl(cpu_env, a0, o->addr1, o->in2);
1010 tcg_temp_free_ptr(a0);
1011 return DISAS_NEXT;
1014 static DisasJumpType op_vup(DisasContext *s, DisasOps *o)
1016 const bool logical = s->fields.op2 == 0xd4 || s->fields.op2 == 0xd5;
1017 const uint8_t v1 = get_field(s, v1);
1018 const uint8_t v2 = get_field(s, v2);
1019 const uint8_t src_es = get_field(s, m3);
1020 const uint8_t dst_es = src_es + 1;
1021 int dst_idx, src_idx;
1022 TCGv_i64 tmp;
1024 if (src_es > ES_32) {
1025 gen_program_exception(s, PGM_SPECIFICATION);
1026 return DISAS_NORETURN;
1029 tmp = tcg_temp_new_i64();
1030 if (s->fields.op2 == 0xd7 || s->fields.op2 == 0xd5) {
1031 /* iterate backwards to avoid overwriting data we might need later */
1032 for (dst_idx = NUM_VEC_ELEMENTS(dst_es) - 1; dst_idx >= 0; dst_idx--) {
1033 src_idx = dst_idx;
1034 read_vec_element_i64(tmp, v2, src_idx,
1035 src_es | (logical ? 0 : MO_SIGN));
1036 write_vec_element_i64(tmp, v1, dst_idx, dst_es);
1039 } else {
1040 /* iterate forward to avoid overwriting data we might need later */
1041 for (dst_idx = 0; dst_idx < NUM_VEC_ELEMENTS(dst_es); dst_idx++) {
1042 src_idx = dst_idx + NUM_VEC_ELEMENTS(src_es) / 2;
1043 read_vec_element_i64(tmp, v2, src_idx,
1044 src_es | (logical ? 0 : MO_SIGN));
1045 write_vec_element_i64(tmp, v1, dst_idx, dst_es);
1048 tcg_temp_free_i64(tmp);
1049 return DISAS_NEXT;
1052 static DisasJumpType op_va(DisasContext *s, DisasOps *o)
1054 const uint8_t es = get_field(s, m4);
1056 if (es > ES_128) {
1057 gen_program_exception(s, PGM_SPECIFICATION);
1058 return DISAS_NORETURN;
1059 } else if (es == ES_128) {
1060 gen_gvec128_3_i64(tcg_gen_add2_i64, get_field(s, v1),
1061 get_field(s, v2), get_field(s, v3));
1062 return DISAS_NEXT;
1064 gen_gvec_fn_3(add, es, get_field(s, v1), get_field(s, v2),
1065 get_field(s, v3));
1066 return DISAS_NEXT;
1069 static void gen_acc(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, uint8_t es)
1071 const uint8_t msb_bit_nr = NUM_VEC_ELEMENT_BITS(es) - 1;
1072 TCGv_i64 msb_mask = tcg_const_i64(dup_const(es, 1ull << msb_bit_nr));
1073 TCGv_i64 t1 = tcg_temp_new_i64();
1074 TCGv_i64 t2 = tcg_temp_new_i64();
1075 TCGv_i64 t3 = tcg_temp_new_i64();
1077 /* Calculate the carry into the MSB, ignoring the old MSBs */
1078 tcg_gen_andc_i64(t1, a, msb_mask);
1079 tcg_gen_andc_i64(t2, b, msb_mask);
1080 tcg_gen_add_i64(t1, t1, t2);
1081 /* Calculate the MSB without any carry into it */
1082 tcg_gen_xor_i64(t3, a, b);
1083 /* Calculate the carry out of the MSB in the MSB bit position */
1084 tcg_gen_and_i64(d, a, b);
1085 tcg_gen_and_i64(t1, t1, t3);
1086 tcg_gen_or_i64(d, d, t1);
1087 /* Isolate and shift the carry into position */
1088 tcg_gen_and_i64(d, d, msb_mask);
1089 tcg_gen_shri_i64(d, d, msb_bit_nr);
1091 tcg_temp_free_i64(t1);
1092 tcg_temp_free_i64(t2);
1093 tcg_temp_free_i64(t3);
1096 static void gen_acc8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
1098 gen_acc(d, a, b, ES_8);
1101 static void gen_acc16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
1103 gen_acc(d, a, b, ES_16);
1106 static void gen_acc_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
1108 TCGv_i32 t = tcg_temp_new_i32();
1110 tcg_gen_add_i32(t, a, b);
1111 tcg_gen_setcond_i32(TCG_COND_LTU, d, t, b);
1112 tcg_temp_free_i32(t);
1115 static void gen_acc_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
1117 TCGv_i64 t = tcg_temp_new_i64();
1119 tcg_gen_add_i64(t, a, b);
1120 tcg_gen_setcond_i64(TCG_COND_LTU, d, t, b);
1121 tcg_temp_free_i64(t);
1124 static void gen_acc2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al,
1125 TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh)
1127 TCGv_i64 th = tcg_temp_new_i64();
1128 TCGv_i64 tl = tcg_temp_new_i64();
1129 TCGv_i64 zero = tcg_const_i64(0);
1131 tcg_gen_add2_i64(tl, th, al, zero, bl, zero);
1132 tcg_gen_add2_i64(tl, th, th, zero, ah, zero);
1133 tcg_gen_add2_i64(tl, dl, tl, th, bh, zero);
1134 tcg_gen_mov_i64(dh, zero);
1136 tcg_temp_free_i64(th);
1137 tcg_temp_free_i64(tl);
1138 tcg_temp_free_i64(zero);
1141 static DisasJumpType op_vacc(DisasContext *s, DisasOps *o)
1143 const uint8_t es = get_field(s, m4);
1144 static const GVecGen3 g[4] = {
1145 { .fni8 = gen_acc8_i64, },
1146 { .fni8 = gen_acc16_i64, },
1147 { .fni4 = gen_acc_i32, },
1148 { .fni8 = gen_acc_i64, },
1151 if (es > ES_128) {
1152 gen_program_exception(s, PGM_SPECIFICATION);
1153 return DISAS_NORETURN;
1154 } else if (es == ES_128) {
1155 gen_gvec128_3_i64(gen_acc2_i64, get_field(s, v1),
1156 get_field(s, v2), get_field(s, v3));
1157 return DISAS_NEXT;
1159 gen_gvec_3(get_field(s, v1), get_field(s, v2),
1160 get_field(s, v3), &g[es]);
1161 return DISAS_NEXT;
1164 static void gen_ac2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah,
1165 TCGv_i64 bl, TCGv_i64 bh, TCGv_i64 cl, TCGv_i64 ch)
1167 TCGv_i64 tl = tcg_temp_new_i64();
1168 TCGv_i64 th = tcg_const_i64(0);
1170 /* extract the carry only */
1171 tcg_gen_extract_i64(tl, cl, 0, 1);
1172 tcg_gen_add2_i64(dl, dh, al, ah, bl, bh);
1173 tcg_gen_add2_i64(dl, dh, dl, dh, tl, th);
1175 tcg_temp_free_i64(tl);
1176 tcg_temp_free_i64(th);
1179 static DisasJumpType op_vac(DisasContext *s, DisasOps *o)
1181 if (get_field(s, m5) != ES_128) {
1182 gen_program_exception(s, PGM_SPECIFICATION);
1183 return DISAS_NORETURN;
1186 gen_gvec128_4_i64(gen_ac2_i64, get_field(s, v1),
1187 get_field(s, v2), get_field(s, v3),
1188 get_field(s, v4));
1189 return DISAS_NEXT;
1192 static void gen_accc2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah,
1193 TCGv_i64 bl, TCGv_i64 bh, TCGv_i64 cl, TCGv_i64 ch)
1195 TCGv_i64 tl = tcg_temp_new_i64();
1196 TCGv_i64 th = tcg_temp_new_i64();
1197 TCGv_i64 zero = tcg_const_i64(0);
1199 tcg_gen_andi_i64(tl, cl, 1);
1200 tcg_gen_add2_i64(tl, th, tl, zero, al, zero);
1201 tcg_gen_add2_i64(tl, th, tl, th, bl, zero);
1202 tcg_gen_add2_i64(tl, th, th, zero, ah, zero);
1203 tcg_gen_add2_i64(tl, dl, tl, th, bh, zero);
1204 tcg_gen_mov_i64(dh, zero);
1206 tcg_temp_free_i64(tl);
1207 tcg_temp_free_i64(th);
1208 tcg_temp_free_i64(zero);
1211 static DisasJumpType op_vaccc(DisasContext *s, DisasOps *o)
1213 if (get_field(s, m5) != ES_128) {
1214 gen_program_exception(s, PGM_SPECIFICATION);
1215 return DISAS_NORETURN;
1218 gen_gvec128_4_i64(gen_accc2_i64, get_field(s, v1),
1219 get_field(s, v2), get_field(s, v3),
1220 get_field(s, v4));
1221 return DISAS_NEXT;
1224 static DisasJumpType op_vn(DisasContext *s, DisasOps *o)
1226 gen_gvec_fn_3(and, ES_8, get_field(s, v1), get_field(s, v2),
1227 get_field(s, v3));
1228 return DISAS_NEXT;
1231 static DisasJumpType op_vnc(DisasContext *s, DisasOps *o)
1233 gen_gvec_fn_3(andc, ES_8, get_field(s, v1),
1234 get_field(s, v2), get_field(s, v3));
1235 return DISAS_NEXT;
1238 static void gen_avg_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
1240 TCGv_i64 t0 = tcg_temp_new_i64();
1241 TCGv_i64 t1 = tcg_temp_new_i64();
1243 tcg_gen_ext_i32_i64(t0, a);
1244 tcg_gen_ext_i32_i64(t1, b);
1245 tcg_gen_add_i64(t0, t0, t1);
1246 tcg_gen_addi_i64(t0, t0, 1);
1247 tcg_gen_shri_i64(t0, t0, 1);
1248 tcg_gen_extrl_i64_i32(d, t0);
1250 tcg_temp_free(t0);
1251 tcg_temp_free(t1);
1254 static void gen_avg_i64(TCGv_i64 dl, TCGv_i64 al, TCGv_i64 bl)
1256 TCGv_i64 dh = tcg_temp_new_i64();
1257 TCGv_i64 ah = tcg_temp_new_i64();
1258 TCGv_i64 bh = tcg_temp_new_i64();
1260 /* extending the sign by one bit is sufficient */
1261 tcg_gen_extract_i64(ah, al, 63, 1);
1262 tcg_gen_extract_i64(bh, bl, 63, 1);
1263 tcg_gen_add2_i64(dl, dh, al, ah, bl, bh);
1264 gen_addi2_i64(dl, dh, dl, dh, 1);
1265 tcg_gen_extract2_i64(dl, dl, dh, 1);
1267 tcg_temp_free_i64(dh);
1268 tcg_temp_free_i64(ah);
1269 tcg_temp_free_i64(bh);
1272 static DisasJumpType op_vavg(DisasContext *s, DisasOps *o)
1274 const uint8_t es = get_field(s, m4);
1275 static const GVecGen3 g[4] = {
1276 { .fno = gen_helper_gvec_vavg8, },
1277 { .fno = gen_helper_gvec_vavg16, },
1278 { .fni4 = gen_avg_i32, },
1279 { .fni8 = gen_avg_i64, },
1282 if (es > ES_64) {
1283 gen_program_exception(s, PGM_SPECIFICATION);
1284 return DISAS_NORETURN;
1286 gen_gvec_3(get_field(s, v1), get_field(s, v2),
1287 get_field(s, v3), &g[es]);
1288 return DISAS_NEXT;
1291 static void gen_avgl_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
1293 TCGv_i64 t0 = tcg_temp_new_i64();
1294 TCGv_i64 t1 = tcg_temp_new_i64();
1296 tcg_gen_extu_i32_i64(t0, a);
1297 tcg_gen_extu_i32_i64(t1, b);
1298 tcg_gen_add_i64(t0, t0, t1);
1299 tcg_gen_addi_i64(t0, t0, 1);
1300 tcg_gen_shri_i64(t0, t0, 1);
1301 tcg_gen_extrl_i64_i32(d, t0);
1303 tcg_temp_free(t0);
1304 tcg_temp_free(t1);
1307 static void gen_avgl_i64(TCGv_i64 dl, TCGv_i64 al, TCGv_i64 bl)
1309 TCGv_i64 dh = tcg_temp_new_i64();
1310 TCGv_i64 zero = tcg_const_i64(0);
1312 tcg_gen_add2_i64(dl, dh, al, zero, bl, zero);
1313 gen_addi2_i64(dl, dh, dl, dh, 1);
1314 tcg_gen_extract2_i64(dl, dl, dh, 1);
1316 tcg_temp_free_i64(dh);
1317 tcg_temp_free_i64(zero);
1320 static DisasJumpType op_vavgl(DisasContext *s, DisasOps *o)
1322 const uint8_t es = get_field(s, m4);
1323 static const GVecGen3 g[4] = {
1324 { .fno = gen_helper_gvec_vavgl8, },
1325 { .fno = gen_helper_gvec_vavgl16, },
1326 { .fni4 = gen_avgl_i32, },
1327 { .fni8 = gen_avgl_i64, },
1330 if (es > ES_64) {
1331 gen_program_exception(s, PGM_SPECIFICATION);
1332 return DISAS_NORETURN;
1334 gen_gvec_3(get_field(s, v1), get_field(s, v2),
1335 get_field(s, v3), &g[es]);
1336 return DISAS_NEXT;
1339 static DisasJumpType op_vcksm(DisasContext *s, DisasOps *o)
1341 TCGv_i32 tmp = tcg_temp_new_i32();
1342 TCGv_i32 sum = tcg_temp_new_i32();
1343 int i;
1345 read_vec_element_i32(sum, get_field(s, v3), 1, ES_32);
1346 for (i = 0; i < 4; i++) {
1347 read_vec_element_i32(tmp, get_field(s, v2), i, ES_32);
1348 tcg_gen_add2_i32(tmp, sum, sum, sum, tmp, tmp);
1350 gen_gvec_dup_imm(ES_32, get_field(s, v1), 0);
1351 write_vec_element_i32(sum, get_field(s, v1), 1, ES_32);
1353 tcg_temp_free_i32(tmp);
1354 tcg_temp_free_i32(sum);
1355 return DISAS_NEXT;
1358 static DisasJumpType op_vec(DisasContext *s, DisasOps *o)
1360 uint8_t es = get_field(s, m3);
1361 const uint8_t enr = NUM_VEC_ELEMENTS(es) / 2 - 1;
1363 if (es > ES_64) {
1364 gen_program_exception(s, PGM_SPECIFICATION);
1365 return DISAS_NORETURN;
1367 if (s->fields.op2 == 0xdb) {
1368 es |= MO_SIGN;
1371 o->in1 = tcg_temp_new_i64();
1372 o->in2 = tcg_temp_new_i64();
1373 read_vec_element_i64(o->in1, get_field(s, v1), enr, es);
1374 read_vec_element_i64(o->in2, get_field(s, v2), enr, es);
1375 return DISAS_NEXT;
1378 static DisasJumpType op_vc(DisasContext *s, DisasOps *o)
1380 const uint8_t es = get_field(s, m4);
1381 TCGCond cond = s->insn->data;
1383 if (es > ES_64) {
1384 gen_program_exception(s, PGM_SPECIFICATION);
1385 return DISAS_NORETURN;
1388 tcg_gen_gvec_cmp(cond, es,
1389 vec_full_reg_offset(get_field(s, v1)),
1390 vec_full_reg_offset(get_field(s, v2)),
1391 vec_full_reg_offset(get_field(s, v3)), 16, 16);
1392 if (get_field(s, m5) & 0x1) {
1393 TCGv_i64 low = tcg_temp_new_i64();
1394 TCGv_i64 high = tcg_temp_new_i64();
1396 read_vec_element_i64(high, get_field(s, v1), 0, ES_64);
1397 read_vec_element_i64(low, get_field(s, v1), 1, ES_64);
1398 gen_op_update2_cc_i64(s, CC_OP_VC, low, high);
1400 tcg_temp_free_i64(low);
1401 tcg_temp_free_i64(high);
1403 return DISAS_NEXT;
1406 static void gen_clz_i32(TCGv_i32 d, TCGv_i32 a)
1408 tcg_gen_clzi_i32(d, a, 32);
1411 static void gen_clz_i64(TCGv_i64 d, TCGv_i64 a)
1413 tcg_gen_clzi_i64(d, a, 64);
1416 static DisasJumpType op_vclz(DisasContext *s, DisasOps *o)
1418 const uint8_t es = get_field(s, m3);
1419 static const GVecGen2 g[4] = {
1420 { .fno = gen_helper_gvec_vclz8, },
1421 { .fno = gen_helper_gvec_vclz16, },
1422 { .fni4 = gen_clz_i32, },
1423 { .fni8 = gen_clz_i64, },
1426 if (es > ES_64) {
1427 gen_program_exception(s, PGM_SPECIFICATION);
1428 return DISAS_NORETURN;
1430 gen_gvec_2(get_field(s, v1), get_field(s, v2), &g[es]);
1431 return DISAS_NEXT;
1434 static void gen_ctz_i32(TCGv_i32 d, TCGv_i32 a)
1436 tcg_gen_ctzi_i32(d, a, 32);
1439 static void gen_ctz_i64(TCGv_i64 d, TCGv_i64 a)
1441 tcg_gen_ctzi_i64(d, a, 64);
1444 static DisasJumpType op_vctz(DisasContext *s, DisasOps *o)
1446 const uint8_t es = get_field(s, m3);
1447 static const GVecGen2 g[4] = {
1448 { .fno = gen_helper_gvec_vctz8, },
1449 { .fno = gen_helper_gvec_vctz16, },
1450 { .fni4 = gen_ctz_i32, },
1451 { .fni8 = gen_ctz_i64, },
1454 if (es > ES_64) {
1455 gen_program_exception(s, PGM_SPECIFICATION);
1456 return DISAS_NORETURN;
1458 gen_gvec_2(get_field(s, v1), get_field(s, v2), &g[es]);
1459 return DISAS_NEXT;
1462 static DisasJumpType op_vx(DisasContext *s, DisasOps *o)
1464 gen_gvec_fn_3(xor, ES_8, get_field(s, v1), get_field(s, v2),
1465 get_field(s, v3));
1466 return DISAS_NEXT;
1469 static DisasJumpType op_vgfm(DisasContext *s, DisasOps *o)
1471 const uint8_t es = get_field(s, m4);
1472 static const GVecGen3 g[4] = {
1473 { .fno = gen_helper_gvec_vgfm8, },
1474 { .fno = gen_helper_gvec_vgfm16, },
1475 { .fno = gen_helper_gvec_vgfm32, },
1476 { .fno = gen_helper_gvec_vgfm64, },
1479 if (es > ES_64) {
1480 gen_program_exception(s, PGM_SPECIFICATION);
1481 return DISAS_NORETURN;
1483 gen_gvec_3(get_field(s, v1), get_field(s, v2),
1484 get_field(s, v3), &g[es]);
1485 return DISAS_NEXT;
1488 static DisasJumpType op_vgfma(DisasContext *s, DisasOps *o)
1490 const uint8_t es = get_field(s, m5);
1491 static const GVecGen4 g[4] = {
1492 { .fno = gen_helper_gvec_vgfma8, },
1493 { .fno = gen_helper_gvec_vgfma16, },
1494 { .fno = gen_helper_gvec_vgfma32, },
1495 { .fno = gen_helper_gvec_vgfma64, },
1498 if (es > ES_64) {
1499 gen_program_exception(s, PGM_SPECIFICATION);
1500 return DISAS_NORETURN;
1502 gen_gvec_4(get_field(s, v1), get_field(s, v2),
1503 get_field(s, v3), get_field(s, v4), &g[es]);
1504 return DISAS_NEXT;
1507 static DisasJumpType op_vlc(DisasContext *s, DisasOps *o)
1509 const uint8_t es = get_field(s, m3);
1511 if (es > ES_64) {
1512 gen_program_exception(s, PGM_SPECIFICATION);
1513 return DISAS_NORETURN;
1516 gen_gvec_fn_2(neg, es, get_field(s, v1), get_field(s, v2));
1517 return DISAS_NEXT;
1520 static DisasJumpType op_vlp(DisasContext *s, DisasOps *o)
1522 const uint8_t es = get_field(s, m3);
1524 if (es > ES_64) {
1525 gen_program_exception(s, PGM_SPECIFICATION);
1526 return DISAS_NORETURN;
1529 gen_gvec_fn_2(abs, es, get_field(s, v1), get_field(s, v2));
1530 return DISAS_NEXT;
1533 static DisasJumpType op_vmx(DisasContext *s, DisasOps *o)
1535 const uint8_t v1 = get_field(s, v1);
1536 const uint8_t v2 = get_field(s, v2);
1537 const uint8_t v3 = get_field(s, v3);
1538 const uint8_t es = get_field(s, m4);
1540 if (es > ES_64) {
1541 gen_program_exception(s, PGM_SPECIFICATION);
1542 return DISAS_NORETURN;
1545 switch (s->fields.op2) {
1546 case 0xff:
1547 gen_gvec_fn_3(smax, es, v1, v2, v3);
1548 break;
1549 case 0xfd:
1550 gen_gvec_fn_3(umax, es, v1, v2, v3);
1551 break;
1552 case 0xfe:
1553 gen_gvec_fn_3(smin, es, v1, v2, v3);
1554 break;
1555 case 0xfc:
1556 gen_gvec_fn_3(umin, es, v1, v2, v3);
1557 break;
1558 default:
1559 g_assert_not_reached();
1561 return DISAS_NEXT;
1564 static void gen_mal_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, TCGv_i32 c)
1566 TCGv_i32 t0 = tcg_temp_new_i32();
1568 tcg_gen_mul_i32(t0, a, b);
1569 tcg_gen_add_i32(d, t0, c);
1571 tcg_temp_free_i32(t0);
1574 static void gen_mah_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, TCGv_i32 c)
1576 TCGv_i64 t0 = tcg_temp_new_i64();
1577 TCGv_i64 t1 = tcg_temp_new_i64();
1578 TCGv_i64 t2 = tcg_temp_new_i64();
1580 tcg_gen_ext_i32_i64(t0, a);
1581 tcg_gen_ext_i32_i64(t1, b);
1582 tcg_gen_ext_i32_i64(t2, c);
1583 tcg_gen_mul_i64(t0, t0, t1);
1584 tcg_gen_add_i64(t0, t0, t2);
1585 tcg_gen_extrh_i64_i32(d, t0);
1587 tcg_temp_free(t0);
1588 tcg_temp_free(t1);
1589 tcg_temp_free(t2);
1592 static void gen_malh_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, TCGv_i32 c)
1594 TCGv_i64 t0 = tcg_temp_new_i64();
1595 TCGv_i64 t1 = tcg_temp_new_i64();
1596 TCGv_i64 t2 = tcg_temp_new_i64();
1598 tcg_gen_extu_i32_i64(t0, a);
1599 tcg_gen_extu_i32_i64(t1, b);
1600 tcg_gen_extu_i32_i64(t2, c);
1601 tcg_gen_mul_i64(t0, t0, t1);
1602 tcg_gen_add_i64(t0, t0, t2);
1603 tcg_gen_extrh_i64_i32(d, t0);
1605 tcg_temp_free(t0);
1606 tcg_temp_free(t1);
1607 tcg_temp_free(t2);
1610 static DisasJumpType op_vma(DisasContext *s, DisasOps *o)
1612 const uint8_t es = get_field(s, m5);
1613 static const GVecGen4 g_vmal[3] = {
1614 { .fno = gen_helper_gvec_vmal8, },
1615 { .fno = gen_helper_gvec_vmal16, },
1616 { .fni4 = gen_mal_i32, },
1618 static const GVecGen4 g_vmah[3] = {
1619 { .fno = gen_helper_gvec_vmah8, },
1620 { .fno = gen_helper_gvec_vmah16, },
1621 { .fni4 = gen_mah_i32, },
1623 static const GVecGen4 g_vmalh[3] = {
1624 { .fno = gen_helper_gvec_vmalh8, },
1625 { .fno = gen_helper_gvec_vmalh16, },
1626 { .fni4 = gen_malh_i32, },
1628 static const GVecGen4 g_vmae[3] = {
1629 { .fno = gen_helper_gvec_vmae8, },
1630 { .fno = gen_helper_gvec_vmae16, },
1631 { .fno = gen_helper_gvec_vmae32, },
1633 static const GVecGen4 g_vmale[3] = {
1634 { .fno = gen_helper_gvec_vmale8, },
1635 { .fno = gen_helper_gvec_vmale16, },
1636 { .fno = gen_helper_gvec_vmale32, },
1638 static const GVecGen4 g_vmao[3] = {
1639 { .fno = gen_helper_gvec_vmao8, },
1640 { .fno = gen_helper_gvec_vmao16, },
1641 { .fno = gen_helper_gvec_vmao32, },
1643 static const GVecGen4 g_vmalo[3] = {
1644 { .fno = gen_helper_gvec_vmalo8, },
1645 { .fno = gen_helper_gvec_vmalo16, },
1646 { .fno = gen_helper_gvec_vmalo32, },
1648 const GVecGen4 *fn;
1650 if (es > ES_32) {
1651 gen_program_exception(s, PGM_SPECIFICATION);
1652 return DISAS_NORETURN;
1655 switch (s->fields.op2) {
1656 case 0xaa:
1657 fn = &g_vmal[es];
1658 break;
1659 case 0xab:
1660 fn = &g_vmah[es];
1661 break;
1662 case 0xa9:
1663 fn = &g_vmalh[es];
1664 break;
1665 case 0xae:
1666 fn = &g_vmae[es];
1667 break;
1668 case 0xac:
1669 fn = &g_vmale[es];
1670 break;
1671 case 0xaf:
1672 fn = &g_vmao[es];
1673 break;
1674 case 0xad:
1675 fn = &g_vmalo[es];
1676 break;
1677 default:
1678 g_assert_not_reached();
1681 gen_gvec_4(get_field(s, v1), get_field(s, v2),
1682 get_field(s, v3), get_field(s, v4), fn);
1683 return DISAS_NEXT;
1686 static void gen_mh_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
1688 TCGv_i32 t = tcg_temp_new_i32();
1690 tcg_gen_muls2_i32(t, d, a, b);
1691 tcg_temp_free_i32(t);
1694 static void gen_mlh_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
1696 TCGv_i32 t = tcg_temp_new_i32();
1698 tcg_gen_mulu2_i32(t, d, a, b);
1699 tcg_temp_free_i32(t);
1702 static DisasJumpType op_vm(DisasContext *s, DisasOps *o)
1704 const uint8_t es = get_field(s, m4);
1705 static const GVecGen3 g_vmh[3] = {
1706 { .fno = gen_helper_gvec_vmh8, },
1707 { .fno = gen_helper_gvec_vmh16, },
1708 { .fni4 = gen_mh_i32, },
1710 static const GVecGen3 g_vmlh[3] = {
1711 { .fno = gen_helper_gvec_vmlh8, },
1712 { .fno = gen_helper_gvec_vmlh16, },
1713 { .fni4 = gen_mlh_i32, },
1715 static const GVecGen3 g_vme[3] = {
1716 { .fno = gen_helper_gvec_vme8, },
1717 { .fno = gen_helper_gvec_vme16, },
1718 { .fno = gen_helper_gvec_vme32, },
1720 static const GVecGen3 g_vmle[3] = {
1721 { .fno = gen_helper_gvec_vmle8, },
1722 { .fno = gen_helper_gvec_vmle16, },
1723 { .fno = gen_helper_gvec_vmle32, },
1725 static const GVecGen3 g_vmo[3] = {
1726 { .fno = gen_helper_gvec_vmo8, },
1727 { .fno = gen_helper_gvec_vmo16, },
1728 { .fno = gen_helper_gvec_vmo32, },
1730 static const GVecGen3 g_vmlo[3] = {
1731 { .fno = gen_helper_gvec_vmlo8, },
1732 { .fno = gen_helper_gvec_vmlo16, },
1733 { .fno = gen_helper_gvec_vmlo32, },
1735 const GVecGen3 *fn;
1737 if (es > ES_32) {
1738 gen_program_exception(s, PGM_SPECIFICATION);
1739 return DISAS_NORETURN;
1742 switch (s->fields.op2) {
1743 case 0xa2:
1744 gen_gvec_fn_3(mul, es, get_field(s, v1),
1745 get_field(s, v2), get_field(s, v3));
1746 return DISAS_NEXT;
1747 case 0xa3:
1748 fn = &g_vmh[es];
1749 break;
1750 case 0xa1:
1751 fn = &g_vmlh[es];
1752 break;
1753 case 0xa6:
1754 fn = &g_vme[es];
1755 break;
1756 case 0xa4:
1757 fn = &g_vmle[es];
1758 break;
1759 case 0xa7:
1760 fn = &g_vmo[es];
1761 break;
1762 case 0xa5:
1763 fn = &g_vmlo[es];
1764 break;
1765 default:
1766 g_assert_not_reached();
1769 gen_gvec_3(get_field(s, v1), get_field(s, v2),
1770 get_field(s, v3), fn);
1771 return DISAS_NEXT;
1774 static DisasJumpType op_vnn(DisasContext *s, DisasOps *o)
1776 gen_gvec_fn_3(nand, ES_8, get_field(s, v1),
1777 get_field(s, v2), get_field(s, v3));
1778 return DISAS_NEXT;
1781 static DisasJumpType op_vno(DisasContext *s, DisasOps *o)
1783 gen_gvec_fn_3(nor, ES_8, get_field(s, v1), get_field(s, v2),
1784 get_field(s, v3));
1785 return DISAS_NEXT;
1788 static DisasJumpType op_vnx(DisasContext *s, DisasOps *o)
1790 gen_gvec_fn_3(eqv, ES_8, get_field(s, v1), get_field(s, v2),
1791 get_field(s, v3));
1792 return DISAS_NEXT;
1795 static DisasJumpType op_vo(DisasContext *s, DisasOps *o)
1797 gen_gvec_fn_3(or, ES_8, get_field(s, v1), get_field(s, v2),
1798 get_field(s, v3));
1799 return DISAS_NEXT;
1802 static DisasJumpType op_voc(DisasContext *s, DisasOps *o)
1804 gen_gvec_fn_3(orc, ES_8, get_field(s, v1), get_field(s, v2),
1805 get_field(s, v3));
1806 return DISAS_NEXT;
1809 static DisasJumpType op_vpopct(DisasContext *s, DisasOps *o)
1811 const uint8_t es = get_field(s, m3);
1812 static const GVecGen2 g[4] = {
1813 { .fno = gen_helper_gvec_vpopct8, },
1814 { .fno = gen_helper_gvec_vpopct16, },
1815 { .fni4 = tcg_gen_ctpop_i32, },
1816 { .fni8 = tcg_gen_ctpop_i64, },
1819 if (es > ES_64 || (es != ES_8 && !s390_has_feat(S390_FEAT_VECTOR_ENH))) {
1820 gen_program_exception(s, PGM_SPECIFICATION);
1821 return DISAS_NORETURN;
1824 gen_gvec_2(get_field(s, v1), get_field(s, v2), &g[es]);
1825 return DISAS_NEXT;
1828 static void gen_rll_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
1830 TCGv_i32 t0 = tcg_temp_new_i32();
1832 tcg_gen_andi_i32(t0, b, 31);
1833 tcg_gen_rotl_i32(d, a, t0);
1834 tcg_temp_free_i32(t0);
1837 static void gen_rll_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
1839 TCGv_i64 t0 = tcg_temp_new_i64();
1841 tcg_gen_andi_i64(t0, b, 63);
1842 tcg_gen_rotl_i64(d, a, t0);
1843 tcg_temp_free_i64(t0);
1846 static DisasJumpType op_verllv(DisasContext *s, DisasOps *o)
1848 const uint8_t es = get_field(s, m4);
1849 static const GVecGen3 g[4] = {
1850 { .fno = gen_helper_gvec_verllv8, },
1851 { .fno = gen_helper_gvec_verllv16, },
1852 { .fni4 = gen_rll_i32, },
1853 { .fni8 = gen_rll_i64, },
1856 if (es > ES_64) {
1857 gen_program_exception(s, PGM_SPECIFICATION);
1858 return DISAS_NORETURN;
1861 gen_gvec_3(get_field(s, v1), get_field(s, v2),
1862 get_field(s, v3), &g[es]);
1863 return DISAS_NEXT;
1866 static DisasJumpType op_verll(DisasContext *s, DisasOps *o)
1868 const uint8_t es = get_field(s, m4);
1869 static const GVecGen2s g[4] = {
1870 { .fno = gen_helper_gvec_verll8, },
1871 { .fno = gen_helper_gvec_verll16, },
1872 { .fni4 = gen_rll_i32, },
1873 { .fni8 = gen_rll_i64, },
1876 if (es > ES_64) {
1877 gen_program_exception(s, PGM_SPECIFICATION);
1878 return DISAS_NORETURN;
1880 gen_gvec_2s(get_field(s, v1), get_field(s, v3), o->addr1,
1881 &g[es]);
1882 return DISAS_NEXT;
1885 static void gen_rim_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b, int32_t c)
1887 TCGv_i32 t = tcg_temp_new_i32();
1889 tcg_gen_rotli_i32(t, a, c & 31);
1890 tcg_gen_and_i32(t, t, b);
1891 tcg_gen_andc_i32(d, d, b);
1892 tcg_gen_or_i32(d, d, t);
1894 tcg_temp_free_i32(t);
1897 static void gen_rim_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b, int64_t c)
1899 TCGv_i64 t = tcg_temp_new_i64();
1901 tcg_gen_rotli_i64(t, a, c & 63);
1902 tcg_gen_and_i64(t, t, b);
1903 tcg_gen_andc_i64(d, d, b);
1904 tcg_gen_or_i64(d, d, t);
1906 tcg_temp_free_i64(t);
1909 static DisasJumpType op_verim(DisasContext *s, DisasOps *o)
1911 const uint8_t es = get_field(s, m5);
1912 const uint8_t i4 = get_field(s, i4) &
1913 (NUM_VEC_ELEMENT_BITS(es) - 1);
1914 static const GVecGen3i g[4] = {
1915 { .fno = gen_helper_gvec_verim8, },
1916 { .fno = gen_helper_gvec_verim16, },
1917 { .fni4 = gen_rim_i32,
1918 .load_dest = true, },
1919 { .fni8 = gen_rim_i64,
1920 .load_dest = true, },
1923 if (es > ES_64) {
1924 gen_program_exception(s, PGM_SPECIFICATION);
1925 return DISAS_NORETURN;
1928 gen_gvec_3i(get_field(s, v1), get_field(s, v2),
1929 get_field(s, v3), i4, &g[es]);
1930 return DISAS_NEXT;
1933 static DisasJumpType op_vesv(DisasContext *s, DisasOps *o)
1935 const uint8_t es = get_field(s, m4);
1936 const uint8_t v1 = get_field(s, v1);
1937 const uint8_t v2 = get_field(s, v2);
1938 const uint8_t v3 = get_field(s, v3);
1940 if (es > ES_64) {
1941 gen_program_exception(s, PGM_SPECIFICATION);
1942 return DISAS_NORETURN;
1945 switch (s->fields.op2) {
1946 case 0x70:
1947 gen_gvec_fn_3(shlv, es, v1, v2, v3);
1948 break;
1949 case 0x7a:
1950 gen_gvec_fn_3(sarv, es, v1, v2, v3);
1951 break;
1952 case 0x78:
1953 gen_gvec_fn_3(shrv, es, v1, v2, v3);
1954 break;
1955 default:
1956 g_assert_not_reached();
1958 return DISAS_NEXT;
1961 static DisasJumpType op_ves(DisasContext *s, DisasOps *o)
1963 const uint8_t es = get_field(s, m4);
1964 const uint8_t d2 = get_field(s, d2) &
1965 (NUM_VEC_ELEMENT_BITS(es) - 1);
1966 const uint8_t v1 = get_field(s, v1);
1967 const uint8_t v3 = get_field(s, v3);
1968 TCGv_i32 shift;
1970 if (es > ES_64) {
1971 gen_program_exception(s, PGM_SPECIFICATION);
1972 return DISAS_NORETURN;
1975 if (likely(!get_field(s, b2))) {
1976 switch (s->fields.op2) {
1977 case 0x30:
1978 gen_gvec_fn_2i(shli, es, v1, v3, d2);
1979 break;
1980 case 0x3a:
1981 gen_gvec_fn_2i(sari, es, v1, v3, d2);
1982 break;
1983 case 0x38:
1984 gen_gvec_fn_2i(shri, es, v1, v3, d2);
1985 break;
1986 default:
1987 g_assert_not_reached();
1989 } else {
1990 shift = tcg_temp_new_i32();
1991 tcg_gen_extrl_i64_i32(shift, o->addr1);
1992 tcg_gen_andi_i32(shift, shift, NUM_VEC_ELEMENT_BITS(es) - 1);
1993 switch (s->fields.op2) {
1994 case 0x30:
1995 gen_gvec_fn_2s(shls, es, v1, v3, shift);
1996 break;
1997 case 0x3a:
1998 gen_gvec_fn_2s(sars, es, v1, v3, shift);
1999 break;
2000 case 0x38:
2001 gen_gvec_fn_2s(shrs, es, v1, v3, shift);
2002 break;
2003 default:
2004 g_assert_not_reached();
2006 tcg_temp_free_i32(shift);
2008 return DISAS_NEXT;
2011 static DisasJumpType op_vsl(DisasContext *s, DisasOps *o)
2013 TCGv_i64 shift = tcg_temp_new_i64();
2015 read_vec_element_i64(shift, get_field(s, v3), 7, ES_8);
2016 if (s->fields.op2 == 0x74) {
2017 tcg_gen_andi_i64(shift, shift, 0x7);
2018 } else {
2019 tcg_gen_andi_i64(shift, shift, 0x78);
2022 gen_gvec_2i_ool(get_field(s, v1), get_field(s, v2),
2023 shift, 0, gen_helper_gvec_vsl);
2024 tcg_temp_free_i64(shift);
2025 return DISAS_NEXT;
2028 static DisasJumpType op_vsldb(DisasContext *s, DisasOps *o)
2030 const uint8_t i4 = get_field(s, i4) & 0xf;
2031 const int left_shift = (i4 & 7) * 8;
2032 const int right_shift = 64 - left_shift;
2033 TCGv_i64 t0 = tcg_temp_new_i64();
2034 TCGv_i64 t1 = tcg_temp_new_i64();
2035 TCGv_i64 t2 = tcg_temp_new_i64();
2037 if ((i4 & 8) == 0) {
2038 read_vec_element_i64(t0, get_field(s, v2), 0, ES_64);
2039 read_vec_element_i64(t1, get_field(s, v2), 1, ES_64);
2040 read_vec_element_i64(t2, get_field(s, v3), 0, ES_64);
2041 } else {
2042 read_vec_element_i64(t0, get_field(s, v2), 1, ES_64);
2043 read_vec_element_i64(t1, get_field(s, v3), 0, ES_64);
2044 read_vec_element_i64(t2, get_field(s, v3), 1, ES_64);
2046 tcg_gen_extract2_i64(t0, t1, t0, right_shift);
2047 tcg_gen_extract2_i64(t1, t2, t1, right_shift);
2048 write_vec_element_i64(t0, get_field(s, v1), 0, ES_64);
2049 write_vec_element_i64(t1, get_field(s, v1), 1, ES_64);
2051 tcg_temp_free(t0);
2052 tcg_temp_free(t1);
2053 tcg_temp_free(t2);
2054 return DISAS_NEXT;
2057 static DisasJumpType op_vsra(DisasContext *s, DisasOps *o)
2059 TCGv_i64 shift = tcg_temp_new_i64();
2061 read_vec_element_i64(shift, get_field(s, v3), 7, ES_8);
2062 if (s->fields.op2 == 0x7e) {
2063 tcg_gen_andi_i64(shift, shift, 0x7);
2064 } else {
2065 tcg_gen_andi_i64(shift, shift, 0x78);
2068 gen_gvec_2i_ool(get_field(s, v1), get_field(s, v2),
2069 shift, 0, gen_helper_gvec_vsra);
2070 tcg_temp_free_i64(shift);
2071 return DISAS_NEXT;
2074 static DisasJumpType op_vsrl(DisasContext *s, DisasOps *o)
2076 TCGv_i64 shift = tcg_temp_new_i64();
2078 read_vec_element_i64(shift, get_field(s, v3), 7, ES_8);
2079 if (s->fields.op2 == 0x7c) {
2080 tcg_gen_andi_i64(shift, shift, 0x7);
2081 } else {
2082 tcg_gen_andi_i64(shift, shift, 0x78);
2085 gen_gvec_2i_ool(get_field(s, v1), get_field(s, v2),
2086 shift, 0, gen_helper_gvec_vsrl);
2087 tcg_temp_free_i64(shift);
2088 return DISAS_NEXT;
2091 static DisasJumpType op_vs(DisasContext *s, DisasOps *o)
2093 const uint8_t es = get_field(s, m4);
2095 if (es > ES_128) {
2096 gen_program_exception(s, PGM_SPECIFICATION);
2097 return DISAS_NORETURN;
2098 } else if (es == ES_128) {
2099 gen_gvec128_3_i64(tcg_gen_sub2_i64, get_field(s, v1),
2100 get_field(s, v2), get_field(s, v3));
2101 return DISAS_NEXT;
2103 gen_gvec_fn_3(sub, es, get_field(s, v1), get_field(s, v2),
2104 get_field(s, v3));
2105 return DISAS_NEXT;
2108 static void gen_scbi_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
2110 tcg_gen_setcond_i32(TCG_COND_GEU, d, a, b);
2113 static void gen_scbi_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
2115 tcg_gen_setcond_i64(TCG_COND_GEU, d, a, b);
2118 static void gen_scbi2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al,
2119 TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh)
2121 TCGv_i64 th = tcg_temp_new_i64();
2122 TCGv_i64 tl = tcg_temp_new_i64();
2123 TCGv_i64 zero = tcg_const_i64(0);
2125 tcg_gen_sub2_i64(tl, th, al, zero, bl, zero);
2126 tcg_gen_andi_i64(th, th, 1);
2127 tcg_gen_sub2_i64(tl, th, ah, zero, th, zero);
2128 tcg_gen_sub2_i64(tl, th, tl, th, bh, zero);
2129 /* "invert" the result: -1 -> 0; 0 -> 1 */
2130 tcg_gen_addi_i64(dl, th, 1);
2131 tcg_gen_mov_i64(dh, zero);
2133 tcg_temp_free_i64(th);
2134 tcg_temp_free_i64(tl);
2135 tcg_temp_free_i64(zero);
2138 static DisasJumpType op_vscbi(DisasContext *s, DisasOps *o)
2140 const uint8_t es = get_field(s, m4);
2141 static const GVecGen3 g[4] = {
2142 { .fno = gen_helper_gvec_vscbi8, },
2143 { .fno = gen_helper_gvec_vscbi16, },
2144 { .fni4 = gen_scbi_i32, },
2145 { .fni8 = gen_scbi_i64, },
2148 if (es > ES_128) {
2149 gen_program_exception(s, PGM_SPECIFICATION);
2150 return DISAS_NORETURN;
2151 } else if (es == ES_128) {
2152 gen_gvec128_3_i64(gen_scbi2_i64, get_field(s, v1),
2153 get_field(s, v2), get_field(s, v3));
2154 return DISAS_NEXT;
2156 gen_gvec_3(get_field(s, v1), get_field(s, v2),
2157 get_field(s, v3), &g[es]);
2158 return DISAS_NEXT;
2161 static void gen_sbi2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah,
2162 TCGv_i64 bl, TCGv_i64 bh, TCGv_i64 cl, TCGv_i64 ch)
2164 TCGv_i64 tl = tcg_temp_new_i64();
2165 TCGv_i64 th = tcg_temp_new_i64();
2167 tcg_gen_not_i64(tl, bl);
2168 tcg_gen_not_i64(th, bh);
2169 gen_ac2_i64(dl, dh, al, ah, tl, th, cl, ch);
2170 tcg_temp_free_i64(tl);
2171 tcg_temp_free_i64(th);
2174 static DisasJumpType op_vsbi(DisasContext *s, DisasOps *o)
2176 if (get_field(s, m5) != ES_128) {
2177 gen_program_exception(s, PGM_SPECIFICATION);
2178 return DISAS_NORETURN;
2181 gen_gvec128_4_i64(gen_sbi2_i64, get_field(s, v1),
2182 get_field(s, v2), get_field(s, v3),
2183 get_field(s, v4));
2184 return DISAS_NEXT;
2187 static void gen_sbcbi2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah,
2188 TCGv_i64 bl, TCGv_i64 bh, TCGv_i64 cl, TCGv_i64 ch)
2190 TCGv_i64 th = tcg_temp_new_i64();
2191 TCGv_i64 tl = tcg_temp_new_i64();
2193 tcg_gen_not_i64(tl, bl);
2194 tcg_gen_not_i64(th, bh);
2195 gen_accc2_i64(dl, dh, al, ah, tl, th, cl, ch);
2197 tcg_temp_free_i64(tl);
2198 tcg_temp_free_i64(th);
2201 static DisasJumpType op_vsbcbi(DisasContext *s, DisasOps *o)
2203 if (get_field(s, m5) != ES_128) {
2204 gen_program_exception(s, PGM_SPECIFICATION);
2205 return DISAS_NORETURN;
2208 gen_gvec128_4_i64(gen_sbcbi2_i64, get_field(s, v1),
2209 get_field(s, v2), get_field(s, v3),
2210 get_field(s, v4));
2211 return DISAS_NEXT;
2214 static DisasJumpType op_vsumg(DisasContext *s, DisasOps *o)
2216 const uint8_t es = get_field(s, m4);
2217 TCGv_i64 sum, tmp;
2218 uint8_t dst_idx;
2220 if (es == ES_8 || es > ES_32) {
2221 gen_program_exception(s, PGM_SPECIFICATION);
2222 return DISAS_NORETURN;
2225 sum = tcg_temp_new_i64();
2226 tmp = tcg_temp_new_i64();
2227 for (dst_idx = 0; dst_idx < 2; dst_idx++) {
2228 uint8_t idx = dst_idx * NUM_VEC_ELEMENTS(es) / 2;
2229 const uint8_t max_idx = idx + NUM_VEC_ELEMENTS(es) / 2 - 1;
2231 read_vec_element_i64(sum, get_field(s, v3), max_idx, es);
2232 for (; idx <= max_idx; idx++) {
2233 read_vec_element_i64(tmp, get_field(s, v2), idx, es);
2234 tcg_gen_add_i64(sum, sum, tmp);
2236 write_vec_element_i64(sum, get_field(s, v1), dst_idx, ES_64);
2238 tcg_temp_free_i64(sum);
2239 tcg_temp_free_i64(tmp);
2240 return DISAS_NEXT;
2243 static DisasJumpType op_vsumq(DisasContext *s, DisasOps *o)
2245 const uint8_t es = get_field(s, m4);
2246 const uint8_t max_idx = NUM_VEC_ELEMENTS(es) - 1;
2247 TCGv_i64 sumh, suml, zero, tmpl;
2248 uint8_t idx;
2250 if (es < ES_32 || es > ES_64) {
2251 gen_program_exception(s, PGM_SPECIFICATION);
2252 return DISAS_NORETURN;
2255 sumh = tcg_const_i64(0);
2256 suml = tcg_temp_new_i64();
2257 zero = tcg_const_i64(0);
2258 tmpl = tcg_temp_new_i64();
2260 read_vec_element_i64(suml, get_field(s, v3), max_idx, es);
2261 for (idx = 0; idx <= max_idx; idx++) {
2262 read_vec_element_i64(tmpl, get_field(s, v2), idx, es);
2263 tcg_gen_add2_i64(suml, sumh, suml, sumh, tmpl, zero);
2265 write_vec_element_i64(sumh, get_field(s, v1), 0, ES_64);
2266 write_vec_element_i64(suml, get_field(s, v1), 1, ES_64);
2268 tcg_temp_free_i64(sumh);
2269 tcg_temp_free_i64(suml);
2270 tcg_temp_free_i64(zero);
2271 tcg_temp_free_i64(tmpl);
2272 return DISAS_NEXT;
2275 static DisasJumpType op_vsum(DisasContext *s, DisasOps *o)
2277 const uint8_t es = get_field(s, m4);
2278 TCGv_i32 sum, tmp;
2279 uint8_t dst_idx;
2281 if (es > ES_16) {
2282 gen_program_exception(s, PGM_SPECIFICATION);
2283 return DISAS_NORETURN;
2286 sum = tcg_temp_new_i32();
2287 tmp = tcg_temp_new_i32();
2288 for (dst_idx = 0; dst_idx < 4; dst_idx++) {
2289 uint8_t idx = dst_idx * NUM_VEC_ELEMENTS(es) / 4;
2290 const uint8_t max_idx = idx + NUM_VEC_ELEMENTS(es) / 4 - 1;
2292 read_vec_element_i32(sum, get_field(s, v3), max_idx, es);
2293 for (; idx <= max_idx; idx++) {
2294 read_vec_element_i32(tmp, get_field(s, v2), idx, es);
2295 tcg_gen_add_i32(sum, sum, tmp);
2297 write_vec_element_i32(sum, get_field(s, v1), dst_idx, ES_32);
2299 tcg_temp_free_i32(sum);
2300 tcg_temp_free_i32(tmp);
2301 return DISAS_NEXT;
2304 static DisasJumpType op_vtm(DisasContext *s, DisasOps *o)
2306 gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2),
2307 cpu_env, 0, gen_helper_gvec_vtm);
2308 set_cc_static(s);
2309 return DISAS_NEXT;
2312 static DisasJumpType op_vfae(DisasContext *s, DisasOps *o)
2314 const uint8_t es = get_field(s, m4);
2315 const uint8_t m5 = get_field(s, m5);
2316 static gen_helper_gvec_3 * const g[3] = {
2317 gen_helper_gvec_vfae8,
2318 gen_helper_gvec_vfae16,
2319 gen_helper_gvec_vfae32,
2321 static gen_helper_gvec_3_ptr * const g_cc[3] = {
2322 gen_helper_gvec_vfae_cc8,
2323 gen_helper_gvec_vfae_cc16,
2324 gen_helper_gvec_vfae_cc32,
2326 if (es > ES_32) {
2327 gen_program_exception(s, PGM_SPECIFICATION);
2328 return DISAS_NORETURN;
2331 if (extract32(m5, 0, 1)) {
2332 gen_gvec_3_ptr(get_field(s, v1), get_field(s, v2),
2333 get_field(s, v3), cpu_env, m5, g_cc[es]);
2334 set_cc_static(s);
2335 } else {
2336 gen_gvec_3_ool(get_field(s, v1), get_field(s, v2),
2337 get_field(s, v3), m5, g[es]);
2339 return DISAS_NEXT;
2342 static DisasJumpType op_vfee(DisasContext *s, DisasOps *o)
2344 const uint8_t es = get_field(s, m4);
2345 const uint8_t m5 = get_field(s, m5);
2346 static gen_helper_gvec_3 * const g[3] = {
2347 gen_helper_gvec_vfee8,
2348 gen_helper_gvec_vfee16,
2349 gen_helper_gvec_vfee32,
2351 static gen_helper_gvec_3_ptr * const g_cc[3] = {
2352 gen_helper_gvec_vfee_cc8,
2353 gen_helper_gvec_vfee_cc16,
2354 gen_helper_gvec_vfee_cc32,
2357 if (es > ES_32 || m5 & ~0x3) {
2358 gen_program_exception(s, PGM_SPECIFICATION);
2359 return DISAS_NORETURN;
2362 if (extract32(m5, 0, 1)) {
2363 gen_gvec_3_ptr(get_field(s, v1), get_field(s, v2),
2364 get_field(s, v3), cpu_env, m5, g_cc[es]);
2365 set_cc_static(s);
2366 } else {
2367 gen_gvec_3_ool(get_field(s, v1), get_field(s, v2),
2368 get_field(s, v3), m5, g[es]);
2370 return DISAS_NEXT;
2373 static DisasJumpType op_vfene(DisasContext *s, DisasOps *o)
2375 const uint8_t es = get_field(s, m4);
2376 const uint8_t m5 = get_field(s, m5);
2377 static gen_helper_gvec_3 * const g[3] = {
2378 gen_helper_gvec_vfene8,
2379 gen_helper_gvec_vfene16,
2380 gen_helper_gvec_vfene32,
2382 static gen_helper_gvec_3_ptr * const g_cc[3] = {
2383 gen_helper_gvec_vfene_cc8,
2384 gen_helper_gvec_vfene_cc16,
2385 gen_helper_gvec_vfene_cc32,
2388 if (es > ES_32 || m5 & ~0x3) {
2389 gen_program_exception(s, PGM_SPECIFICATION);
2390 return DISAS_NORETURN;
2393 if (extract32(m5, 0, 1)) {
2394 gen_gvec_3_ptr(get_field(s, v1), get_field(s, v2),
2395 get_field(s, v3), cpu_env, m5, g_cc[es]);
2396 set_cc_static(s);
2397 } else {
2398 gen_gvec_3_ool(get_field(s, v1), get_field(s, v2),
2399 get_field(s, v3), m5, g[es]);
2401 return DISAS_NEXT;
2404 static DisasJumpType op_vistr(DisasContext *s, DisasOps *o)
2406 const uint8_t es = get_field(s, m4);
2407 const uint8_t m5 = get_field(s, m5);
2408 static gen_helper_gvec_2 * const g[3] = {
2409 gen_helper_gvec_vistr8,
2410 gen_helper_gvec_vistr16,
2411 gen_helper_gvec_vistr32,
2413 static gen_helper_gvec_2_ptr * const g_cc[3] = {
2414 gen_helper_gvec_vistr_cc8,
2415 gen_helper_gvec_vistr_cc16,
2416 gen_helper_gvec_vistr_cc32,
2419 if (es > ES_32 || m5 & ~0x1) {
2420 gen_program_exception(s, PGM_SPECIFICATION);
2421 return DISAS_NORETURN;
2424 if (extract32(m5, 0, 1)) {
2425 gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2),
2426 cpu_env, 0, g_cc[es]);
2427 set_cc_static(s);
2428 } else {
2429 gen_gvec_2_ool(get_field(s, v1), get_field(s, v2), 0,
2430 g[es]);
2432 return DISAS_NEXT;
2435 static DisasJumpType op_vstrc(DisasContext *s, DisasOps *o)
2437 const uint8_t es = get_field(s, m5);
2438 const uint8_t m6 = get_field(s, m6);
2439 static gen_helper_gvec_4 * const g[3] = {
2440 gen_helper_gvec_vstrc8,
2441 gen_helper_gvec_vstrc16,
2442 gen_helper_gvec_vstrc32,
2444 static gen_helper_gvec_4 * const g_rt[3] = {
2445 gen_helper_gvec_vstrc_rt8,
2446 gen_helper_gvec_vstrc_rt16,
2447 gen_helper_gvec_vstrc_rt32,
2449 static gen_helper_gvec_4_ptr * const g_cc[3] = {
2450 gen_helper_gvec_vstrc_cc8,
2451 gen_helper_gvec_vstrc_cc16,
2452 gen_helper_gvec_vstrc_cc32,
2454 static gen_helper_gvec_4_ptr * const g_cc_rt[3] = {
2455 gen_helper_gvec_vstrc_cc_rt8,
2456 gen_helper_gvec_vstrc_cc_rt16,
2457 gen_helper_gvec_vstrc_cc_rt32,
2460 if (es > ES_32) {
2461 gen_program_exception(s, PGM_SPECIFICATION);
2462 return DISAS_NORETURN;
2465 if (extract32(m6, 0, 1)) {
2466 if (extract32(m6, 2, 1)) {
2467 gen_gvec_4_ptr(get_field(s, v1), get_field(s, v2),
2468 get_field(s, v3), get_field(s, v4),
2469 cpu_env, m6, g_cc_rt[es]);
2470 } else {
2471 gen_gvec_4_ptr(get_field(s, v1), get_field(s, v2),
2472 get_field(s, v3), get_field(s, v4),
2473 cpu_env, m6, g_cc[es]);
2475 set_cc_static(s);
2476 } else {
2477 if (extract32(m6, 2, 1)) {
2478 gen_gvec_4_ool(get_field(s, v1), get_field(s, v2),
2479 get_field(s, v3), get_field(s, v4),
2480 m6, g_rt[es]);
2481 } else {
2482 gen_gvec_4_ool(get_field(s, v1), get_field(s, v2),
2483 get_field(s, v3), get_field(s, v4),
2484 m6, g[es]);
2487 return DISAS_NEXT;
2490 static DisasJumpType op_vfa(DisasContext *s, DisasOps *o)
2492 const uint8_t fpf = get_field(s, m4);
2493 const uint8_t m5 = get_field(s, m5);
2494 const bool se = extract32(m5, 3, 1);
2495 gen_helper_gvec_3_ptr *fn;
2497 if (fpf != FPF_LONG || extract32(m5, 0, 3)) {
2498 gen_program_exception(s, PGM_SPECIFICATION);
2499 return DISAS_NORETURN;
2502 switch (s->fields.op2) {
2503 case 0xe3:
2504 fn = se ? gen_helper_gvec_vfa64s : gen_helper_gvec_vfa64;
2505 break;
2506 case 0xe5:
2507 fn = se ? gen_helper_gvec_vfd64s : gen_helper_gvec_vfd64;
2508 break;
2509 case 0xe7:
2510 fn = se ? gen_helper_gvec_vfm64s : gen_helper_gvec_vfm64;
2511 break;
2512 case 0xe2:
2513 fn = se ? gen_helper_gvec_vfs64s : gen_helper_gvec_vfs64;
2514 break;
2515 default:
2516 g_assert_not_reached();
2518 gen_gvec_3_ptr(get_field(s, v1), get_field(s, v2),
2519 get_field(s, v3), cpu_env, 0, fn);
2520 return DISAS_NEXT;
2523 static DisasJumpType op_wfc(DisasContext *s, DisasOps *o)
2525 const uint8_t fpf = get_field(s, m3);
2526 const uint8_t m4 = get_field(s, m4);
2528 if (fpf != FPF_LONG || m4) {
2529 gen_program_exception(s, PGM_SPECIFICATION);
2530 return DISAS_NORETURN;
2533 if (s->fields.op2 == 0xcb) {
2534 gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2),
2535 cpu_env, 0, gen_helper_gvec_wfc64);
2536 } else {
2537 gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2),
2538 cpu_env, 0, gen_helper_gvec_wfk64);
2540 set_cc_static(s);
2541 return DISAS_NEXT;
2544 static DisasJumpType op_vfc(DisasContext *s, DisasOps *o)
2546 const uint8_t fpf = get_field(s, m4);
2547 const uint8_t m5 = get_field(s, m5);
2548 const uint8_t m6 = get_field(s, m6);
2549 const bool se = extract32(m5, 3, 1);
2550 const bool cs = extract32(m6, 0, 1);
2551 gen_helper_gvec_3_ptr *fn;
2553 if (fpf != FPF_LONG || extract32(m5, 0, 3) || extract32(m6, 1, 3)) {
2554 gen_program_exception(s, PGM_SPECIFICATION);
2555 return DISAS_NORETURN;
2558 if (cs) {
2559 switch (s->fields.op2) {
2560 case 0xe8:
2561 fn = se ? gen_helper_gvec_vfce64s_cc : gen_helper_gvec_vfce64_cc;
2562 break;
2563 case 0xeb:
2564 fn = se ? gen_helper_gvec_vfch64s_cc : gen_helper_gvec_vfch64_cc;
2565 break;
2566 case 0xea:
2567 fn = se ? gen_helper_gvec_vfche64s_cc : gen_helper_gvec_vfche64_cc;
2568 break;
2569 default:
2570 g_assert_not_reached();
2572 } else {
2573 switch (s->fields.op2) {
2574 case 0xe8:
2575 fn = se ? gen_helper_gvec_vfce64s : gen_helper_gvec_vfce64;
2576 break;
2577 case 0xeb:
2578 fn = se ? gen_helper_gvec_vfch64s : gen_helper_gvec_vfch64;
2579 break;
2580 case 0xea:
2581 fn = se ? gen_helper_gvec_vfche64s : gen_helper_gvec_vfche64;
2582 break;
2583 default:
2584 g_assert_not_reached();
2587 gen_gvec_3_ptr(get_field(s, v1), get_field(s, v2),
2588 get_field(s, v3), cpu_env, 0, fn);
2589 if (cs) {
2590 set_cc_static(s);
2592 return DISAS_NEXT;
2595 static DisasJumpType op_vcdg(DisasContext *s, DisasOps *o)
2597 const uint8_t fpf = get_field(s, m3);
2598 const uint8_t m4 = get_field(s, m4);
2599 const uint8_t erm = get_field(s, m5);
2600 const bool se = extract32(m4, 3, 1);
2601 gen_helper_gvec_2_ptr *fn;
2603 if (fpf != FPF_LONG || extract32(m4, 0, 2) || erm > 7 || erm == 2) {
2604 gen_program_exception(s, PGM_SPECIFICATION);
2605 return DISAS_NORETURN;
2608 switch (s->fields.op2) {
2609 case 0xc3:
2610 fn = se ? gen_helper_gvec_vcdg64s : gen_helper_gvec_vcdg64;
2611 break;
2612 case 0xc1:
2613 fn = se ? gen_helper_gvec_vcdlg64s : gen_helper_gvec_vcdlg64;
2614 break;
2615 case 0xc2:
2616 fn = se ? gen_helper_gvec_vcgd64s : gen_helper_gvec_vcgd64;
2617 break;
2618 case 0xc0:
2619 fn = se ? gen_helper_gvec_vclgd64s : gen_helper_gvec_vclgd64;
2620 break;
2621 case 0xc7:
2622 fn = se ? gen_helper_gvec_vfi64s : gen_helper_gvec_vfi64;
2623 break;
2624 case 0xc5:
2625 fn = se ? gen_helper_gvec_vflr64s : gen_helper_gvec_vflr64;
2626 break;
2627 default:
2628 g_assert_not_reached();
2630 gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), cpu_env,
2631 deposit32(m4, 4, 4, erm), fn);
2632 return DISAS_NEXT;
2635 static DisasJumpType op_vfll(DisasContext *s, DisasOps *o)
2637 const uint8_t fpf = get_field(s, m3);
2638 const uint8_t m4 = get_field(s, m4);
2639 gen_helper_gvec_2_ptr *fn = gen_helper_gvec_vfll32;
2641 if (fpf != FPF_SHORT || extract32(m4, 0, 3)) {
2642 gen_program_exception(s, PGM_SPECIFICATION);
2643 return DISAS_NORETURN;
2646 if (extract32(m4, 3, 1)) {
2647 fn = gen_helper_gvec_vfll32s;
2649 gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), cpu_env,
2650 0, fn);
2651 return DISAS_NEXT;
2654 static DisasJumpType op_vfma(DisasContext *s, DisasOps *o)
2656 const uint8_t m5 = get_field(s, m5);
2657 const uint8_t fpf = get_field(s, m6);
2658 const bool se = extract32(m5, 3, 1);
2659 gen_helper_gvec_4_ptr *fn;
2661 if (fpf != FPF_LONG || extract32(m5, 0, 3)) {
2662 gen_program_exception(s, PGM_SPECIFICATION);
2663 return DISAS_NORETURN;
2666 if (s->fields.op2 == 0x8f) {
2667 fn = se ? gen_helper_gvec_vfma64s : gen_helper_gvec_vfma64;
2668 } else {
2669 fn = se ? gen_helper_gvec_vfms64s : gen_helper_gvec_vfms64;
2671 gen_gvec_4_ptr(get_field(s, v1), get_field(s, v2),
2672 get_field(s, v3), get_field(s, v4), cpu_env,
2673 0, fn);
2674 return DISAS_NEXT;
2677 static DisasJumpType op_vfpso(DisasContext *s, DisasOps *o)
2679 const uint8_t v1 = get_field(s, v1);
2680 const uint8_t v2 = get_field(s, v2);
2681 const uint8_t fpf = get_field(s, m3);
2682 const uint8_t m4 = get_field(s, m4);
2683 const uint8_t m5 = get_field(s, m5);
2684 TCGv_i64 tmp;
2686 if (fpf != FPF_LONG || extract32(m4, 0, 3) || m5 > 2) {
2687 gen_program_exception(s, PGM_SPECIFICATION);
2688 return DISAS_NORETURN;
2691 if (extract32(m4, 3, 1)) {
2692 tmp = tcg_temp_new_i64();
2693 read_vec_element_i64(tmp, v2, 0, ES_64);
2694 switch (m5) {
2695 case 0:
2696 /* sign bit is inverted (complement) */
2697 tcg_gen_xori_i64(tmp, tmp, 1ull << 63);
2698 break;
2699 case 1:
2700 /* sign bit is set to one (negative) */
2701 tcg_gen_ori_i64(tmp, tmp, 1ull << 63);
2702 break;
2703 case 2:
2704 /* sign bit is set to zero (positive) */
2705 tcg_gen_andi_i64(tmp, tmp, (1ull << 63) - 1);
2706 break;
2708 write_vec_element_i64(tmp, v1, 0, ES_64);
2709 tcg_temp_free_i64(tmp);
2710 } else {
2711 switch (m5) {
2712 case 0:
2713 /* sign bit is inverted (complement) */
2714 gen_gvec_fn_2i(xori, ES_64, v1, v2, 1ull << 63);
2715 break;
2716 case 1:
2717 /* sign bit is set to one (negative) */
2718 gen_gvec_fn_2i(ori, ES_64, v1, v2, 1ull << 63);
2719 break;
2720 case 2:
2721 /* sign bit is set to zero (positive) */
2722 gen_gvec_fn_2i(andi, ES_64, v1, v2, (1ull << 63) - 1);
2723 break;
2726 return DISAS_NEXT;
2729 static DisasJumpType op_vfsq(DisasContext *s, DisasOps *o)
2731 const uint8_t fpf = get_field(s, m3);
2732 const uint8_t m4 = get_field(s, m4);
2733 gen_helper_gvec_2_ptr *fn = gen_helper_gvec_vfsq64;
2735 if (fpf != FPF_LONG || extract32(m4, 0, 3)) {
2736 gen_program_exception(s, PGM_SPECIFICATION);
2737 return DISAS_NORETURN;
2740 if (extract32(m4, 3, 1)) {
2741 fn = gen_helper_gvec_vfsq64s;
2743 gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), cpu_env,
2744 0, fn);
2745 return DISAS_NEXT;
2748 static DisasJumpType op_vftci(DisasContext *s, DisasOps *o)
2750 const uint16_t i3 = get_field(s, i3);
2751 const uint8_t fpf = get_field(s, m4);
2752 const uint8_t m5 = get_field(s, m5);
2753 gen_helper_gvec_2_ptr *fn = gen_helper_gvec_vftci64;
2755 if (fpf != FPF_LONG || extract32(m5, 0, 3)) {
2756 gen_program_exception(s, PGM_SPECIFICATION);
2757 return DISAS_NORETURN;
2760 if (extract32(m5, 3, 1)) {
2761 fn = gen_helper_gvec_vftci64s;
2763 gen_gvec_2_ptr(get_field(s, v1), get_field(s, v2), cpu_env, i3, fn);
2764 set_cc_static(s);
2765 return DISAS_NEXT;