2 * QEMU TCG support -- s390x vector instruction translation functions
4 * Copyright (C) 2019 Red Hat Inc
7 * David Hildenbrand <david@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
14 * For most instructions that use the same element size for reads and
15 * writes, we can use real gvec vector expansion, which potantially uses
16 * real host vector instructions. As they only work up to 64 bit elements,
17 * 128 bit elements (vector is a single element) have to be handled
18 * differently. Operations that are too complicated to encode via TCG ops
19 * are handled via gvec ool (out-of-line) handlers.
21 * As soon as instructions use different element sizes for reads and writes
22 * or access elements "out of their element scope" we expand them manually
23 * in fancy loops, as gvec expansion does not deal with actual element
24 * numbers and does also not support access to other elements.
27 * As we only have i32/i64, such elements have to be loaded into two
28 * i64 values and can then be processed e.g. by tcg_gen_add2_i64.
31 * On s390x, the operand size (oprsz) and the maximum size (maxsz) are
32 * always 16 (128 bit). What gvec code calls "vece", s390x calls "es",
33 * a.k.a. "element size". These values nicely map to MO_8 ... MO_64. Only
34 * 128 bit element size has to be treated in a special way (MO_64 + 1).
35 * We will use ES_* instead of MO_* for this reason in this file.
38 * As gvec ool-helpers can currently not return values (besides via
39 * pointers like vectors or cpu_env), whenever we have to set the CC and
40 * can't conclude the value from the result vector, we will directly
41 * set it in "env->cc_op" and mark it as static via set_cc_static()".
42 * Whenever this is done, the helper writes globals (cc_op).
45 #define NUM_VEC_ELEMENT_BYTES(es) (1 << (es))
46 #define NUM_VEC_ELEMENTS(es) (16 / NUM_VEC_ELEMENT_BYTES(es))
47 #define NUM_VEC_ELEMENT_BITS(es) (NUM_VEC_ELEMENT_BYTES(es) * BITS_PER_BYTE)
55 /* Floating-Point Format */
60 static inline bool valid_vec_element(uint8_t enr
, MemOp es
)
62 return !(enr
& ~(NUM_VEC_ELEMENTS(es
) - 1));
65 static void read_vec_element_i64(TCGv_i64 dst
, uint8_t reg
, uint8_t enr
,
68 const int offs
= vec_reg_offset(reg
, enr
, memop
& MO_SIZE
);
72 tcg_gen_ld8u_i64(dst
, cpu_env
, offs
);
75 tcg_gen_ld16u_i64(dst
, cpu_env
, offs
);
78 tcg_gen_ld32u_i64(dst
, cpu_env
, offs
);
81 tcg_gen_ld8s_i64(dst
, cpu_env
, offs
);
84 tcg_gen_ld16s_i64(dst
, cpu_env
, offs
);
87 tcg_gen_ld32s_i64(dst
, cpu_env
, offs
);
91 tcg_gen_ld_i64(dst
, cpu_env
, offs
);
94 g_assert_not_reached();
98 static void read_vec_element_i32(TCGv_i32 dst
, uint8_t reg
, uint8_t enr
,
101 const int offs
= vec_reg_offset(reg
, enr
, memop
& MO_SIZE
);
105 tcg_gen_ld8u_i32(dst
, cpu_env
, offs
);
108 tcg_gen_ld16u_i32(dst
, cpu_env
, offs
);
111 tcg_gen_ld8s_i32(dst
, cpu_env
, offs
);
113 case ES_16
| MO_SIGN
:
114 tcg_gen_ld16s_i32(dst
, cpu_env
, offs
);
117 case ES_32
| MO_SIGN
:
118 tcg_gen_ld_i32(dst
, cpu_env
, offs
);
121 g_assert_not_reached();
125 static void write_vec_element_i64(TCGv_i64 src
, int reg
, uint8_t enr
,
128 const int offs
= vec_reg_offset(reg
, enr
, memop
& MO_SIZE
);
132 tcg_gen_st8_i64(src
, cpu_env
, offs
);
135 tcg_gen_st16_i64(src
, cpu_env
, offs
);
138 tcg_gen_st32_i64(src
, cpu_env
, offs
);
141 tcg_gen_st_i64(src
, cpu_env
, offs
);
144 g_assert_not_reached();
148 static void write_vec_element_i32(TCGv_i32 src
, int reg
, uint8_t enr
,
151 const int offs
= vec_reg_offset(reg
, enr
, memop
& MO_SIZE
);
155 tcg_gen_st8_i32(src
, cpu_env
, offs
);
158 tcg_gen_st16_i32(src
, cpu_env
, offs
);
161 tcg_gen_st_i32(src
, cpu_env
, offs
);
164 g_assert_not_reached();
168 static void get_vec_element_ptr_i64(TCGv_ptr ptr
, uint8_t reg
, TCGv_i64 enr
,
171 TCGv_i64 tmp
= tcg_temp_new_i64();
173 /* mask off invalid parts from the element nr */
174 tcg_gen_andi_i64(tmp
, enr
, NUM_VEC_ELEMENTS(es
) - 1);
176 /* convert it to an element offset relative to cpu_env (vec_reg_offset() */
177 tcg_gen_shli_i64(tmp
, tmp
, es
);
178 #ifndef HOST_WORDS_BIGENDIAN
179 tcg_gen_xori_i64(tmp
, tmp
, 8 - NUM_VEC_ELEMENT_BYTES(es
));
181 tcg_gen_addi_i64(tmp
, tmp
, vec_full_reg_offset(reg
));
183 /* generate the final ptr by adding cpu_env */
184 tcg_gen_trunc_i64_ptr(ptr
, tmp
);
185 tcg_gen_add_ptr(ptr
, ptr
, cpu_env
);
187 tcg_temp_free_i64(tmp
);
190 #define gen_gvec_2(v1, v2, gen) \
191 tcg_gen_gvec_2(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
193 #define gen_gvec_2s(v1, v2, c, gen) \
194 tcg_gen_gvec_2s(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
196 #define gen_gvec_2_ool(v1, v2, data, fn) \
197 tcg_gen_gvec_2_ool(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
199 #define gen_gvec_2i_ool(v1, v2, c, data, fn) \
200 tcg_gen_gvec_2i_ool(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
202 #define gen_gvec_2_ptr(v1, v2, ptr, data, fn) \
203 tcg_gen_gvec_2_ptr(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
204 ptr, 16, 16, data, fn)
205 #define gen_gvec_3(v1, v2, v3, gen) \
206 tcg_gen_gvec_3(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
207 vec_full_reg_offset(v3), 16, 16, gen)
208 #define gen_gvec_3_ool(v1, v2, v3, data, fn) \
209 tcg_gen_gvec_3_ool(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
210 vec_full_reg_offset(v3), 16, 16, data, fn)
211 #define gen_gvec_3_ptr(v1, v2, v3, ptr, data, fn) \
212 tcg_gen_gvec_3_ptr(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
213 vec_full_reg_offset(v3), ptr, 16, 16, data, fn)
214 #define gen_gvec_3i(v1, v2, v3, c, gen) \
215 tcg_gen_gvec_3i(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
216 vec_full_reg_offset(v3), 16, 16, c, gen)
217 #define gen_gvec_4(v1, v2, v3, v4, gen) \
218 tcg_gen_gvec_4(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
219 vec_full_reg_offset(v3), vec_full_reg_offset(v4), \
221 #define gen_gvec_4_ool(v1, v2, v3, v4, data, fn) \
222 tcg_gen_gvec_4_ool(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
223 vec_full_reg_offset(v3), vec_full_reg_offset(v4), \
225 #define gen_gvec_4_ptr(v1, v2, v3, v4, ptr, data, fn) \
226 tcg_gen_gvec_4_ptr(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
227 vec_full_reg_offset(v3), vec_full_reg_offset(v4), \
228 ptr, 16, 16, data, fn)
229 #define gen_gvec_dup_i64(es, v1, c) \
230 tcg_gen_gvec_dup_i64(es, vec_full_reg_offset(v1), 16, 16, c)
231 #define gen_gvec_mov(v1, v2) \
232 tcg_gen_gvec_mov(0, vec_full_reg_offset(v1), vec_full_reg_offset(v2), 16, \
234 #define gen_gvec_dup64i(v1, c) \
235 tcg_gen_gvec_dup64i(vec_full_reg_offset(v1), 16, 16, c)
236 #define gen_gvec_fn_2(fn, es, v1, v2) \
237 tcg_gen_gvec_##fn(es, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
239 #define gen_gvec_fn_2i(fn, es, v1, v2, c) \
240 tcg_gen_gvec_##fn(es, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
242 #define gen_gvec_fn_2s(fn, es, v1, v2, s) \
243 tcg_gen_gvec_##fn(es, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
245 #define gen_gvec_fn_3(fn, es, v1, v2, v3) \
246 tcg_gen_gvec_##fn(es, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
247 vec_full_reg_offset(v3), 16, 16)
248 #define gen_gvec_fn_4(fn, es, v1, v2, v3, v4) \
249 tcg_gen_gvec_##fn(es, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
250 vec_full_reg_offset(v3), vec_full_reg_offset(v4), 16, 16)
253 * Helper to carry out a 128 bit vector computation using 2 i64 values per
256 typedef void (*gen_gvec128_3_i64_fn
)(TCGv_i64 dl
, TCGv_i64 dh
, TCGv_i64 al
,
257 TCGv_i64 ah
, TCGv_i64 bl
, TCGv_i64 bh
);
258 static void gen_gvec128_3_i64(gen_gvec128_3_i64_fn fn
, uint8_t d
, uint8_t a
,
261 TCGv_i64 dh
= tcg_temp_new_i64();
262 TCGv_i64 dl
= tcg_temp_new_i64();
263 TCGv_i64 ah
= tcg_temp_new_i64();
264 TCGv_i64 al
= tcg_temp_new_i64();
265 TCGv_i64 bh
= tcg_temp_new_i64();
266 TCGv_i64 bl
= tcg_temp_new_i64();
268 read_vec_element_i64(ah
, a
, 0, ES_64
);
269 read_vec_element_i64(al
, a
, 1, ES_64
);
270 read_vec_element_i64(bh
, b
, 0, ES_64
);
271 read_vec_element_i64(bl
, b
, 1, ES_64
);
272 fn(dl
, dh
, al
, ah
, bl
, bh
);
273 write_vec_element_i64(dh
, d
, 0, ES_64
);
274 write_vec_element_i64(dl
, d
, 1, ES_64
);
276 tcg_temp_free_i64(dh
);
277 tcg_temp_free_i64(dl
);
278 tcg_temp_free_i64(ah
);
279 tcg_temp_free_i64(al
);
280 tcg_temp_free_i64(bh
);
281 tcg_temp_free_i64(bl
);
284 typedef void (*gen_gvec128_4_i64_fn
)(TCGv_i64 dl
, TCGv_i64 dh
, TCGv_i64 al
,
285 TCGv_i64 ah
, TCGv_i64 bl
, TCGv_i64 bh
,
286 TCGv_i64 cl
, TCGv_i64 ch
);
287 static void gen_gvec128_4_i64(gen_gvec128_4_i64_fn fn
, uint8_t d
, uint8_t a
,
288 uint8_t b
, uint8_t c
)
290 TCGv_i64 dh
= tcg_temp_new_i64();
291 TCGv_i64 dl
= tcg_temp_new_i64();
292 TCGv_i64 ah
= tcg_temp_new_i64();
293 TCGv_i64 al
= tcg_temp_new_i64();
294 TCGv_i64 bh
= tcg_temp_new_i64();
295 TCGv_i64 bl
= tcg_temp_new_i64();
296 TCGv_i64 ch
= tcg_temp_new_i64();
297 TCGv_i64 cl
= tcg_temp_new_i64();
299 read_vec_element_i64(ah
, a
, 0, ES_64
);
300 read_vec_element_i64(al
, a
, 1, ES_64
);
301 read_vec_element_i64(bh
, b
, 0, ES_64
);
302 read_vec_element_i64(bl
, b
, 1, ES_64
);
303 read_vec_element_i64(ch
, c
, 0, ES_64
);
304 read_vec_element_i64(cl
, c
, 1, ES_64
);
305 fn(dl
, dh
, al
, ah
, bl
, bh
, cl
, ch
);
306 write_vec_element_i64(dh
, d
, 0, ES_64
);
307 write_vec_element_i64(dl
, d
, 1, ES_64
);
309 tcg_temp_free_i64(dh
);
310 tcg_temp_free_i64(dl
);
311 tcg_temp_free_i64(ah
);
312 tcg_temp_free_i64(al
);
313 tcg_temp_free_i64(bh
);
314 tcg_temp_free_i64(bl
);
315 tcg_temp_free_i64(ch
);
316 tcg_temp_free_i64(cl
);
319 static void gen_gvec_dupi(uint8_t es
, uint8_t reg
, uint64_t c
)
323 tcg_gen_gvec_dup8i(vec_full_reg_offset(reg
), 16, 16, c
);
326 tcg_gen_gvec_dup16i(vec_full_reg_offset(reg
), 16, 16, c
);
329 tcg_gen_gvec_dup32i(vec_full_reg_offset(reg
), 16, 16, c
);
332 gen_gvec_dup64i(reg
, c
);
335 g_assert_not_reached();
339 static void zero_vec(uint8_t reg
)
341 tcg_gen_gvec_dup8i(vec_full_reg_offset(reg
), 16, 16, 0);
344 static void gen_addi2_i64(TCGv_i64 dl
, TCGv_i64 dh
, TCGv_i64 al
, TCGv_i64 ah
,
347 TCGv_i64 bl
= tcg_const_i64(b
);
348 TCGv_i64 bh
= tcg_const_i64(0);
350 tcg_gen_add2_i64(dl
, dh
, al
, ah
, bl
, bh
);
351 tcg_temp_free_i64(bl
);
352 tcg_temp_free_i64(bh
);
355 static DisasJumpType
op_vge(DisasContext
*s
, DisasOps
*o
)
357 const uint8_t es
= s
->insn
->data
;
358 const uint8_t enr
= get_field(s
->fields
, m3
);
361 if (!valid_vec_element(enr
, es
)) {
362 gen_program_exception(s
, PGM_SPECIFICATION
);
363 return DISAS_NORETURN
;
366 tmp
= tcg_temp_new_i64();
367 read_vec_element_i64(tmp
, get_field(s
->fields
, v2
), enr
, es
);
368 tcg_gen_add_i64(o
->addr1
, o
->addr1
, tmp
);
369 gen_addi_and_wrap_i64(s
, o
->addr1
, o
->addr1
, 0);
371 tcg_gen_qemu_ld_i64(tmp
, o
->addr1
, get_mem_index(s
), MO_TE
| es
);
372 write_vec_element_i64(tmp
, get_field(s
->fields
, v1
), enr
, es
);
373 tcg_temp_free_i64(tmp
);
377 static uint64_t generate_byte_mask(uint8_t mask
)
382 for (i
= 0; i
< 8; i
++) {
383 if ((mask
>> i
) & 1) {
384 r
|= 0xffull
<< (i
* 8);
390 static DisasJumpType
op_vgbm(DisasContext
*s
, DisasOps
*o
)
392 const uint16_t i2
= get_field(s
->fields
, i2
);
394 if (i2
== (i2
& 0xff) * 0x0101) {
396 * Masks for both 64 bit elements of the vector are the same.
397 * Trust tcg to produce a good constant loading.
399 gen_gvec_dup64i(get_field(s
->fields
, v1
),
400 generate_byte_mask(i2
& 0xff));
402 TCGv_i64 t
= tcg_temp_new_i64();
404 tcg_gen_movi_i64(t
, generate_byte_mask(i2
>> 8));
405 write_vec_element_i64(t
, get_field(s
->fields
, v1
), 0, ES_64
);
406 tcg_gen_movi_i64(t
, generate_byte_mask(i2
));
407 write_vec_element_i64(t
, get_field(s
->fields
, v1
), 1, ES_64
);
408 tcg_temp_free_i64(t
);
413 static DisasJumpType
op_vgm(DisasContext
*s
, DisasOps
*o
)
415 const uint8_t es
= get_field(s
->fields
, m4
);
416 const uint8_t bits
= NUM_VEC_ELEMENT_BITS(es
);
417 const uint8_t i2
= get_field(s
->fields
, i2
) & (bits
- 1);
418 const uint8_t i3
= get_field(s
->fields
, i3
) & (bits
- 1);
423 gen_program_exception(s
, PGM_SPECIFICATION
);
424 return DISAS_NORETURN
;
427 /* generate the mask - take care of wrapping */
428 for (i
= i2
; ; i
= (i
+ 1) % bits
) {
429 mask
|= 1ull << (bits
- i
- 1);
435 gen_gvec_dupi(es
, get_field(s
->fields
, v1
), mask
);
439 static DisasJumpType
op_vl(DisasContext
*s
, DisasOps
*o
)
441 TCGv_i64 t0
= tcg_temp_new_i64();
442 TCGv_i64 t1
= tcg_temp_new_i64();
444 tcg_gen_qemu_ld_i64(t0
, o
->addr1
, get_mem_index(s
), MO_TEQ
);
445 gen_addi_and_wrap_i64(s
, o
->addr1
, o
->addr1
, 8);
446 tcg_gen_qemu_ld_i64(t1
, o
->addr1
, get_mem_index(s
), MO_TEQ
);
447 write_vec_element_i64(t0
, get_field(s
->fields
, v1
), 0, ES_64
);
448 write_vec_element_i64(t1
, get_field(s
->fields
, v1
), 1, ES_64
);
454 static DisasJumpType
op_vlr(DisasContext
*s
, DisasOps
*o
)
456 gen_gvec_mov(get_field(s
->fields
, v1
), get_field(s
->fields
, v2
));
460 static DisasJumpType
op_vlrep(DisasContext
*s
, DisasOps
*o
)
462 const uint8_t es
= get_field(s
->fields
, m3
);
466 gen_program_exception(s
, PGM_SPECIFICATION
);
467 return DISAS_NORETURN
;
470 tmp
= tcg_temp_new_i64();
471 tcg_gen_qemu_ld_i64(tmp
, o
->addr1
, get_mem_index(s
), MO_TE
| es
);
472 gen_gvec_dup_i64(es
, get_field(s
->fields
, v1
), tmp
);
473 tcg_temp_free_i64(tmp
);
477 static DisasJumpType
op_vle(DisasContext
*s
, DisasOps
*o
)
479 const uint8_t es
= s
->insn
->data
;
480 const uint8_t enr
= get_field(s
->fields
, m3
);
483 if (!valid_vec_element(enr
, es
)) {
484 gen_program_exception(s
, PGM_SPECIFICATION
);
485 return DISAS_NORETURN
;
488 tmp
= tcg_temp_new_i64();
489 tcg_gen_qemu_ld_i64(tmp
, o
->addr1
, get_mem_index(s
), MO_TE
| es
);
490 write_vec_element_i64(tmp
, get_field(s
->fields
, v1
), enr
, es
);
491 tcg_temp_free_i64(tmp
);
495 static DisasJumpType
op_vlei(DisasContext
*s
, DisasOps
*o
)
497 const uint8_t es
= s
->insn
->data
;
498 const uint8_t enr
= get_field(s
->fields
, m3
);
501 if (!valid_vec_element(enr
, es
)) {
502 gen_program_exception(s
, PGM_SPECIFICATION
);
503 return DISAS_NORETURN
;
506 tmp
= tcg_const_i64((int16_t)get_field(s
->fields
, i2
));
507 write_vec_element_i64(tmp
, get_field(s
->fields
, v1
), enr
, es
);
508 tcg_temp_free_i64(tmp
);
512 static DisasJumpType
op_vlgv(DisasContext
*s
, DisasOps
*o
)
514 const uint8_t es
= get_field(s
->fields
, m4
);
518 gen_program_exception(s
, PGM_SPECIFICATION
);
519 return DISAS_NORETURN
;
522 /* fast path if we don't need the register content */
523 if (!get_field(s
->fields
, b2
)) {
524 uint8_t enr
= get_field(s
->fields
, d2
) & (NUM_VEC_ELEMENTS(es
) - 1);
526 read_vec_element_i64(o
->out
, get_field(s
->fields
, v3
), enr
, es
);
530 ptr
= tcg_temp_new_ptr();
531 get_vec_element_ptr_i64(ptr
, get_field(s
->fields
, v3
), o
->addr1
, es
);
534 tcg_gen_ld8u_i64(o
->out
, ptr
, 0);
537 tcg_gen_ld16u_i64(o
->out
, ptr
, 0);
540 tcg_gen_ld32u_i64(o
->out
, ptr
, 0);
543 tcg_gen_ld_i64(o
->out
, ptr
, 0);
546 g_assert_not_reached();
548 tcg_temp_free_ptr(ptr
);
553 static DisasJumpType
op_vllez(DisasContext
*s
, DisasOps
*o
)
555 uint8_t es
= get_field(s
->fields
, m3
);
560 /* rightmost sub-element of leftmost doubleword */
573 /* leftmost sub-element of leftmost doubleword */
575 if (s390_has_feat(S390_FEAT_VECTOR_ENH
)) {
582 gen_program_exception(s
, PGM_SPECIFICATION
);
583 return DISAS_NORETURN
;
586 t
= tcg_temp_new_i64();
587 tcg_gen_qemu_ld_i64(t
, o
->addr1
, get_mem_index(s
), MO_TE
| es
);
588 zero_vec(get_field(s
->fields
, v1
));
589 write_vec_element_i64(t
, get_field(s
->fields
, v1
), enr
, es
);
590 tcg_temp_free_i64(t
);
594 static DisasJumpType
op_vlm(DisasContext
*s
, DisasOps
*o
)
596 const uint8_t v3
= get_field(s
->fields
, v3
);
597 uint8_t v1
= get_field(s
->fields
, v1
);
600 if (v3
< v1
|| (v3
- v1
+ 1) > 16) {
601 gen_program_exception(s
, PGM_SPECIFICATION
);
602 return DISAS_NORETURN
;
606 * Check for possible access exceptions by trying to load the last
607 * element. The first element will be checked first next.
609 t0
= tcg_temp_new_i64();
610 t1
= tcg_temp_new_i64();
611 gen_addi_and_wrap_i64(s
, t0
, o
->addr1
, (v3
- v1
) * 16 + 8);
612 tcg_gen_qemu_ld_i64(t0
, t0
, get_mem_index(s
), MO_TEQ
);
615 tcg_gen_qemu_ld_i64(t1
, o
->addr1
, get_mem_index(s
), MO_TEQ
);
616 write_vec_element_i64(t1
, v1
, 0, ES_64
);
620 gen_addi_and_wrap_i64(s
, o
->addr1
, o
->addr1
, 8);
621 tcg_gen_qemu_ld_i64(t1
, o
->addr1
, get_mem_index(s
), MO_TEQ
);
622 write_vec_element_i64(t1
, v1
, 1, ES_64
);
623 gen_addi_and_wrap_i64(s
, o
->addr1
, o
->addr1
, 8);
626 /* Store the last element, loaded first */
627 write_vec_element_i64(t0
, v1
, 1, ES_64
);
629 tcg_temp_free_i64(t0
);
630 tcg_temp_free_i64(t1
);
634 static DisasJumpType
op_vlbb(DisasContext
*s
, DisasOps
*o
)
636 const int64_t block_size
= (1ull << (get_field(s
->fields
, m3
) + 6));
637 const int v1_offs
= vec_full_reg_offset(get_field(s
->fields
, v1
));
641 if (get_field(s
->fields
, m3
) > 6) {
642 gen_program_exception(s
, PGM_SPECIFICATION
);
643 return DISAS_NORETURN
;
646 bytes
= tcg_temp_new_i64();
647 a0
= tcg_temp_new_ptr();
648 /* calculate the number of bytes until the next block boundary */
649 tcg_gen_ori_i64(bytes
, o
->addr1
, -block_size
);
650 tcg_gen_neg_i64(bytes
, bytes
);
652 tcg_gen_addi_ptr(a0
, cpu_env
, v1_offs
);
653 gen_helper_vll(cpu_env
, a0
, o
->addr1
, bytes
);
654 tcg_temp_free_i64(bytes
);
655 tcg_temp_free_ptr(a0
);
659 static DisasJumpType
op_vlvg(DisasContext
*s
, DisasOps
*o
)
661 const uint8_t es
= get_field(s
->fields
, m4
);
665 gen_program_exception(s
, PGM_SPECIFICATION
);
666 return DISAS_NORETURN
;
669 /* fast path if we don't need the register content */
670 if (!get_field(s
->fields
, b2
)) {
671 uint8_t enr
= get_field(s
->fields
, d2
) & (NUM_VEC_ELEMENTS(es
) - 1);
673 write_vec_element_i64(o
->in2
, get_field(s
->fields
, v1
), enr
, es
);
677 ptr
= tcg_temp_new_ptr();
678 get_vec_element_ptr_i64(ptr
, get_field(s
->fields
, v1
), o
->addr1
, es
);
681 tcg_gen_st8_i64(o
->in2
, ptr
, 0);
684 tcg_gen_st16_i64(o
->in2
, ptr
, 0);
687 tcg_gen_st32_i64(o
->in2
, ptr
, 0);
690 tcg_gen_st_i64(o
->in2
, ptr
, 0);
693 g_assert_not_reached();
695 tcg_temp_free_ptr(ptr
);
700 static DisasJumpType
op_vlvgp(DisasContext
*s
, DisasOps
*o
)
702 write_vec_element_i64(o
->in1
, get_field(s
->fields
, v1
), 0, ES_64
);
703 write_vec_element_i64(o
->in2
, get_field(s
->fields
, v1
), 1, ES_64
);
707 static DisasJumpType
op_vll(DisasContext
*s
, DisasOps
*o
)
709 const int v1_offs
= vec_full_reg_offset(get_field(s
->fields
, v1
));
710 TCGv_ptr a0
= tcg_temp_new_ptr();
712 /* convert highest index into an actual length */
713 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
714 tcg_gen_addi_ptr(a0
, cpu_env
, v1_offs
);
715 gen_helper_vll(cpu_env
, a0
, o
->addr1
, o
->in2
);
716 tcg_temp_free_ptr(a0
);
720 static DisasJumpType
op_vmr(DisasContext
*s
, DisasOps
*o
)
722 const uint8_t v1
= get_field(s
->fields
, v1
);
723 const uint8_t v2
= get_field(s
->fields
, v2
);
724 const uint8_t v3
= get_field(s
->fields
, v3
);
725 const uint8_t es
= get_field(s
->fields
, m4
);
726 int dst_idx
, src_idx
;
730 gen_program_exception(s
, PGM_SPECIFICATION
);
731 return DISAS_NORETURN
;
734 tmp
= tcg_temp_new_i64();
735 if (s
->fields
->op2
== 0x61) {
736 /* iterate backwards to avoid overwriting data we might need later */
737 for (dst_idx
= NUM_VEC_ELEMENTS(es
) - 1; dst_idx
>= 0; dst_idx
--) {
738 src_idx
= dst_idx
/ 2;
739 if (dst_idx
% 2 == 0) {
740 read_vec_element_i64(tmp
, v2
, src_idx
, es
);
742 read_vec_element_i64(tmp
, v3
, src_idx
, es
);
744 write_vec_element_i64(tmp
, v1
, dst_idx
, es
);
747 /* iterate forward to avoid overwriting data we might need later */
748 for (dst_idx
= 0; dst_idx
< NUM_VEC_ELEMENTS(es
); dst_idx
++) {
749 src_idx
= (dst_idx
+ NUM_VEC_ELEMENTS(es
)) / 2;
750 if (dst_idx
% 2 == 0) {
751 read_vec_element_i64(tmp
, v2
, src_idx
, es
);
753 read_vec_element_i64(tmp
, v3
, src_idx
, es
);
755 write_vec_element_i64(tmp
, v1
, dst_idx
, es
);
758 tcg_temp_free_i64(tmp
);
762 static DisasJumpType
op_vpk(DisasContext
*s
, DisasOps
*o
)
764 const uint8_t v1
= get_field(s
->fields
, v1
);
765 const uint8_t v2
= get_field(s
->fields
, v2
);
766 const uint8_t v3
= get_field(s
->fields
, v3
);
767 const uint8_t es
= get_field(s
->fields
, m4
);
768 static gen_helper_gvec_3
* const vpk
[3] = {
769 gen_helper_gvec_vpk16
,
770 gen_helper_gvec_vpk32
,
771 gen_helper_gvec_vpk64
,
773 static gen_helper_gvec_3
* const vpks
[3] = {
774 gen_helper_gvec_vpks16
,
775 gen_helper_gvec_vpks32
,
776 gen_helper_gvec_vpks64
,
778 static gen_helper_gvec_3_ptr
* const vpks_cc
[3] = {
779 gen_helper_gvec_vpks_cc16
,
780 gen_helper_gvec_vpks_cc32
,
781 gen_helper_gvec_vpks_cc64
,
783 static gen_helper_gvec_3
* const vpkls
[3] = {
784 gen_helper_gvec_vpkls16
,
785 gen_helper_gvec_vpkls32
,
786 gen_helper_gvec_vpkls64
,
788 static gen_helper_gvec_3_ptr
* const vpkls_cc
[3] = {
789 gen_helper_gvec_vpkls_cc16
,
790 gen_helper_gvec_vpkls_cc32
,
791 gen_helper_gvec_vpkls_cc64
,
794 if (es
== ES_8
|| es
> ES_64
) {
795 gen_program_exception(s
, PGM_SPECIFICATION
);
796 return DISAS_NORETURN
;
799 switch (s
->fields
->op2
) {
801 if (get_field(s
->fields
, m5
) & 0x1) {
802 gen_gvec_3_ptr(v1
, v2
, v3
, cpu_env
, 0, vpks_cc
[es
- 1]);
805 gen_gvec_3_ool(v1
, v2
, v3
, 0, vpks
[es
- 1]);
809 if (get_field(s
->fields
, m5
) & 0x1) {
810 gen_gvec_3_ptr(v1
, v2
, v3
, cpu_env
, 0, vpkls_cc
[es
- 1]);
813 gen_gvec_3_ool(v1
, v2
, v3
, 0, vpkls
[es
- 1]);
817 /* If sources and destination dont't overlap -> fast path */
818 if (v1
!= v2
&& v1
!= v3
) {
819 const uint8_t src_es
= get_field(s
->fields
, m4
);
820 const uint8_t dst_es
= src_es
- 1;
821 TCGv_i64 tmp
= tcg_temp_new_i64();
822 int dst_idx
, src_idx
;
824 for (dst_idx
= 0; dst_idx
< NUM_VEC_ELEMENTS(dst_es
); dst_idx
++) {
826 if (src_idx
< NUM_VEC_ELEMENTS(src_es
)) {
827 read_vec_element_i64(tmp
, v2
, src_idx
, src_es
);
829 src_idx
-= NUM_VEC_ELEMENTS(src_es
);
830 read_vec_element_i64(tmp
, v3
, src_idx
, src_es
);
832 write_vec_element_i64(tmp
, v1
, dst_idx
, dst_es
);
834 tcg_temp_free_i64(tmp
);
836 gen_gvec_3_ool(v1
, v2
, v3
, 0, vpk
[es
- 1]);
840 g_assert_not_reached();
845 static DisasJumpType
op_vperm(DisasContext
*s
, DisasOps
*o
)
847 gen_gvec_4_ool(get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
848 get_field(s
->fields
, v3
), get_field(s
->fields
, v4
),
849 0, gen_helper_gvec_vperm
);
853 static DisasJumpType
op_vpdi(DisasContext
*s
, DisasOps
*o
)
855 const uint8_t i2
= extract32(get_field(s
->fields
, m4
), 2, 1);
856 const uint8_t i3
= extract32(get_field(s
->fields
, m4
), 0, 1);
857 TCGv_i64 t0
= tcg_temp_new_i64();
858 TCGv_i64 t1
= tcg_temp_new_i64();
860 read_vec_element_i64(t0
, get_field(s
->fields
, v2
), i2
, ES_64
);
861 read_vec_element_i64(t1
, get_field(s
->fields
, v3
), i3
, ES_64
);
862 write_vec_element_i64(t0
, get_field(s
->fields
, v1
), 0, ES_64
);
863 write_vec_element_i64(t1
, get_field(s
->fields
, v1
), 1, ES_64
);
864 tcg_temp_free_i64(t0
);
865 tcg_temp_free_i64(t1
);
869 static DisasJumpType
op_vrep(DisasContext
*s
, DisasOps
*o
)
871 const uint8_t enr
= get_field(s
->fields
, i2
);
872 const uint8_t es
= get_field(s
->fields
, m4
);
874 if (es
> ES_64
|| !valid_vec_element(enr
, es
)) {
875 gen_program_exception(s
, PGM_SPECIFICATION
);
876 return DISAS_NORETURN
;
879 tcg_gen_gvec_dup_mem(es
, vec_full_reg_offset(get_field(s
->fields
, v1
)),
880 vec_reg_offset(get_field(s
->fields
, v3
), enr
, es
),
885 static DisasJumpType
op_vrepi(DisasContext
*s
, DisasOps
*o
)
887 const int64_t data
= (int16_t)get_field(s
->fields
, i2
);
888 const uint8_t es
= get_field(s
->fields
, m3
);
891 gen_program_exception(s
, PGM_SPECIFICATION
);
892 return DISAS_NORETURN
;
895 gen_gvec_dupi(es
, get_field(s
->fields
, v1
), data
);
899 static DisasJumpType
op_vsce(DisasContext
*s
, DisasOps
*o
)
901 const uint8_t es
= s
->insn
->data
;
902 const uint8_t enr
= get_field(s
->fields
, m3
);
905 if (!valid_vec_element(enr
, es
)) {
906 gen_program_exception(s
, PGM_SPECIFICATION
);
907 return DISAS_NORETURN
;
910 tmp
= tcg_temp_new_i64();
911 read_vec_element_i64(tmp
, get_field(s
->fields
, v2
), enr
, es
);
912 tcg_gen_add_i64(o
->addr1
, o
->addr1
, tmp
);
913 gen_addi_and_wrap_i64(s
, o
->addr1
, o
->addr1
, 0);
915 read_vec_element_i64(tmp
, get_field(s
->fields
, v1
), enr
, es
);
916 tcg_gen_qemu_st_i64(tmp
, o
->addr1
, get_mem_index(s
), MO_TE
| es
);
917 tcg_temp_free_i64(tmp
);
921 static DisasJumpType
op_vsel(DisasContext
*s
, DisasOps
*o
)
923 gen_gvec_fn_4(bitsel
, ES_8
, get_field(s
->fields
, v1
),
924 get_field(s
->fields
, v4
), get_field(s
->fields
, v2
),
925 get_field(s
->fields
, v3
));
929 static DisasJumpType
op_vseg(DisasContext
*s
, DisasOps
*o
)
931 const uint8_t es
= get_field(s
->fields
, m3
);
949 gen_program_exception(s
, PGM_SPECIFICATION
);
950 return DISAS_NORETURN
;
953 tmp
= tcg_temp_new_i64();
954 read_vec_element_i64(tmp
, get_field(s
->fields
, v2
), idx1
, es
| MO_SIGN
);
955 write_vec_element_i64(tmp
, get_field(s
->fields
, v1
), 0, ES_64
);
956 read_vec_element_i64(tmp
, get_field(s
->fields
, v2
), idx2
, es
| MO_SIGN
);
957 write_vec_element_i64(tmp
, get_field(s
->fields
, v1
), 1, ES_64
);
958 tcg_temp_free_i64(tmp
);
962 static DisasJumpType
op_vst(DisasContext
*s
, DisasOps
*o
)
964 TCGv_i64 tmp
= tcg_const_i64(16);
966 /* Probe write access before actually modifying memory */
967 gen_helper_probe_write_access(cpu_env
, o
->addr1
, tmp
);
969 read_vec_element_i64(tmp
, get_field(s
->fields
, v1
), 0, ES_64
);
970 tcg_gen_qemu_st_i64(tmp
, o
->addr1
, get_mem_index(s
), MO_TEQ
);
971 gen_addi_and_wrap_i64(s
, o
->addr1
, o
->addr1
, 8);
972 read_vec_element_i64(tmp
, get_field(s
->fields
, v1
), 1, ES_64
);
973 tcg_gen_qemu_st_i64(tmp
, o
->addr1
, get_mem_index(s
), MO_TEQ
);
974 tcg_temp_free_i64(tmp
);
978 static DisasJumpType
op_vste(DisasContext
*s
, DisasOps
*o
)
980 const uint8_t es
= s
->insn
->data
;
981 const uint8_t enr
= get_field(s
->fields
, m3
);
984 if (!valid_vec_element(enr
, es
)) {
985 gen_program_exception(s
, PGM_SPECIFICATION
);
986 return DISAS_NORETURN
;
989 tmp
= tcg_temp_new_i64();
990 read_vec_element_i64(tmp
, get_field(s
->fields
, v1
), enr
, es
);
991 tcg_gen_qemu_st_i64(tmp
, o
->addr1
, get_mem_index(s
), MO_TE
| es
);
992 tcg_temp_free_i64(tmp
);
996 static DisasJumpType
op_vstm(DisasContext
*s
, DisasOps
*o
)
998 const uint8_t v3
= get_field(s
->fields
, v3
);
999 uint8_t v1
= get_field(s
->fields
, v1
);
1002 while (v3
< v1
|| (v3
- v1
+ 1) > 16) {
1003 gen_program_exception(s
, PGM_SPECIFICATION
);
1004 return DISAS_NORETURN
;
1007 /* Probe write access before actually modifying memory */
1008 tmp
= tcg_const_i64((v3
- v1
+ 1) * 16);
1009 gen_helper_probe_write_access(cpu_env
, o
->addr1
, tmp
);
1012 read_vec_element_i64(tmp
, v1
, 0, ES_64
);
1013 tcg_gen_qemu_st_i64(tmp
, o
->addr1
, get_mem_index(s
), MO_TEQ
);
1014 gen_addi_and_wrap_i64(s
, o
->addr1
, o
->addr1
, 8);
1015 read_vec_element_i64(tmp
, v1
, 1, ES_64
);
1016 tcg_gen_qemu_st_i64(tmp
, o
->addr1
, get_mem_index(s
), MO_TEQ
);
1020 gen_addi_and_wrap_i64(s
, o
->addr1
, o
->addr1
, 8);
1022 tcg_temp_free_i64(tmp
);
1026 static DisasJumpType
op_vstl(DisasContext
*s
, DisasOps
*o
)
1028 const int v1_offs
= vec_full_reg_offset(get_field(s
->fields
, v1
));
1029 TCGv_ptr a0
= tcg_temp_new_ptr();
1031 /* convert highest index into an actual length */
1032 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
1033 tcg_gen_addi_ptr(a0
, cpu_env
, v1_offs
);
1034 gen_helper_vstl(cpu_env
, a0
, o
->addr1
, o
->in2
);
1035 tcg_temp_free_ptr(a0
);
1039 static DisasJumpType
op_vup(DisasContext
*s
, DisasOps
*o
)
1041 const bool logical
= s
->fields
->op2
== 0xd4 || s
->fields
->op2
== 0xd5;
1042 const uint8_t v1
= get_field(s
->fields
, v1
);
1043 const uint8_t v2
= get_field(s
->fields
, v2
);
1044 const uint8_t src_es
= get_field(s
->fields
, m3
);
1045 const uint8_t dst_es
= src_es
+ 1;
1046 int dst_idx
, src_idx
;
1049 if (src_es
> ES_32
) {
1050 gen_program_exception(s
, PGM_SPECIFICATION
);
1051 return DISAS_NORETURN
;
1054 tmp
= tcg_temp_new_i64();
1055 if (s
->fields
->op2
== 0xd7 || s
->fields
->op2
== 0xd5) {
1056 /* iterate backwards to avoid overwriting data we might need later */
1057 for (dst_idx
= NUM_VEC_ELEMENTS(dst_es
) - 1; dst_idx
>= 0; dst_idx
--) {
1059 read_vec_element_i64(tmp
, v2
, src_idx
,
1060 src_es
| (logical
? 0 : MO_SIGN
));
1061 write_vec_element_i64(tmp
, v1
, dst_idx
, dst_es
);
1065 /* iterate forward to avoid overwriting data we might need later */
1066 for (dst_idx
= 0; dst_idx
< NUM_VEC_ELEMENTS(dst_es
); dst_idx
++) {
1067 src_idx
= dst_idx
+ NUM_VEC_ELEMENTS(src_es
) / 2;
1068 read_vec_element_i64(tmp
, v2
, src_idx
,
1069 src_es
| (logical
? 0 : MO_SIGN
));
1070 write_vec_element_i64(tmp
, v1
, dst_idx
, dst_es
);
1073 tcg_temp_free_i64(tmp
);
1077 static DisasJumpType
op_va(DisasContext
*s
, DisasOps
*o
)
1079 const uint8_t es
= get_field(s
->fields
, m4
);
1082 gen_program_exception(s
, PGM_SPECIFICATION
);
1083 return DISAS_NORETURN
;
1084 } else if (es
== ES_128
) {
1085 gen_gvec128_3_i64(tcg_gen_add2_i64
, get_field(s
->fields
, v1
),
1086 get_field(s
->fields
, v2
), get_field(s
->fields
, v3
));
1089 gen_gvec_fn_3(add
, es
, get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
1090 get_field(s
->fields
, v3
));
1094 static void gen_acc(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
, uint8_t es
)
1096 const uint8_t msb_bit_nr
= NUM_VEC_ELEMENT_BITS(es
) - 1;
1097 TCGv_i64 msb_mask
= tcg_const_i64(dup_const(es
, 1ull << msb_bit_nr
));
1098 TCGv_i64 t1
= tcg_temp_new_i64();
1099 TCGv_i64 t2
= tcg_temp_new_i64();
1100 TCGv_i64 t3
= tcg_temp_new_i64();
1102 /* Calculate the carry into the MSB, ignoring the old MSBs */
1103 tcg_gen_andc_i64(t1
, a
, msb_mask
);
1104 tcg_gen_andc_i64(t2
, b
, msb_mask
);
1105 tcg_gen_add_i64(t1
, t1
, t2
);
1106 /* Calculate the MSB without any carry into it */
1107 tcg_gen_xor_i64(t3
, a
, b
);
1108 /* Calculate the carry out of the MSB in the MSB bit position */
1109 tcg_gen_and_i64(d
, a
, b
);
1110 tcg_gen_and_i64(t1
, t1
, t3
);
1111 tcg_gen_or_i64(d
, d
, t1
);
1112 /* Isolate and shift the carry into position */
1113 tcg_gen_and_i64(d
, d
, msb_mask
);
1114 tcg_gen_shri_i64(d
, d
, msb_bit_nr
);
1116 tcg_temp_free_i64(t1
);
1117 tcg_temp_free_i64(t2
);
1118 tcg_temp_free_i64(t3
);
1121 static void gen_acc8_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
1123 gen_acc(d
, a
, b
, ES_8
);
1126 static void gen_acc16_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
1128 gen_acc(d
, a
, b
, ES_16
);
1131 static void gen_acc_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
1133 TCGv_i32 t
= tcg_temp_new_i32();
1135 tcg_gen_add_i32(t
, a
, b
);
1136 tcg_gen_setcond_i32(TCG_COND_LTU
, d
, t
, b
);
1137 tcg_temp_free_i32(t
);
1140 static void gen_acc_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
1142 TCGv_i64 t
= tcg_temp_new_i64();
1144 tcg_gen_add_i64(t
, a
, b
);
1145 tcg_gen_setcond_i64(TCG_COND_LTU
, d
, t
, b
);
1146 tcg_temp_free_i64(t
);
1149 static void gen_acc2_i64(TCGv_i64 dl
, TCGv_i64 dh
, TCGv_i64 al
,
1150 TCGv_i64 ah
, TCGv_i64 bl
, TCGv_i64 bh
)
1152 TCGv_i64 th
= tcg_temp_new_i64();
1153 TCGv_i64 tl
= tcg_temp_new_i64();
1154 TCGv_i64 zero
= tcg_const_i64(0);
1156 tcg_gen_add2_i64(tl
, th
, al
, zero
, bl
, zero
);
1157 tcg_gen_add2_i64(tl
, th
, th
, zero
, ah
, zero
);
1158 tcg_gen_add2_i64(tl
, dl
, tl
, th
, bh
, zero
);
1159 tcg_gen_mov_i64(dh
, zero
);
1161 tcg_temp_free_i64(th
);
1162 tcg_temp_free_i64(tl
);
1163 tcg_temp_free_i64(zero
);
1166 static DisasJumpType
op_vacc(DisasContext
*s
, DisasOps
*o
)
1168 const uint8_t es
= get_field(s
->fields
, m4
);
1169 static const GVecGen3 g
[4] = {
1170 { .fni8
= gen_acc8_i64
, },
1171 { .fni8
= gen_acc16_i64
, },
1172 { .fni4
= gen_acc_i32
, },
1173 { .fni8
= gen_acc_i64
, },
1177 gen_program_exception(s
, PGM_SPECIFICATION
);
1178 return DISAS_NORETURN
;
1179 } else if (es
== ES_128
) {
1180 gen_gvec128_3_i64(gen_acc2_i64
, get_field(s
->fields
, v1
),
1181 get_field(s
->fields
, v2
), get_field(s
->fields
, v3
));
1184 gen_gvec_3(get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
1185 get_field(s
->fields
, v3
), &g
[es
]);
1189 static void gen_ac2_i64(TCGv_i64 dl
, TCGv_i64 dh
, TCGv_i64 al
, TCGv_i64 ah
,
1190 TCGv_i64 bl
, TCGv_i64 bh
, TCGv_i64 cl
, TCGv_i64 ch
)
1192 TCGv_i64 tl
= tcg_temp_new_i64();
1193 TCGv_i64 th
= tcg_const_i64(0);
1195 /* extract the carry only */
1196 tcg_gen_extract_i64(tl
, cl
, 0, 1);
1197 tcg_gen_add2_i64(dl
, dh
, al
, ah
, bl
, bh
);
1198 tcg_gen_add2_i64(dl
, dh
, dl
, dh
, tl
, th
);
1200 tcg_temp_free_i64(tl
);
1201 tcg_temp_free_i64(th
);
1204 static DisasJumpType
op_vac(DisasContext
*s
, DisasOps
*o
)
1206 if (get_field(s
->fields
, m5
) != ES_128
) {
1207 gen_program_exception(s
, PGM_SPECIFICATION
);
1208 return DISAS_NORETURN
;
1211 gen_gvec128_4_i64(gen_ac2_i64
, get_field(s
->fields
, v1
),
1212 get_field(s
->fields
, v2
), get_field(s
->fields
, v3
),
1213 get_field(s
->fields
, v4
));
1217 static void gen_accc2_i64(TCGv_i64 dl
, TCGv_i64 dh
, TCGv_i64 al
, TCGv_i64 ah
,
1218 TCGv_i64 bl
, TCGv_i64 bh
, TCGv_i64 cl
, TCGv_i64 ch
)
1220 TCGv_i64 tl
= tcg_temp_new_i64();
1221 TCGv_i64 th
= tcg_temp_new_i64();
1222 TCGv_i64 zero
= tcg_const_i64(0);
1224 tcg_gen_andi_i64(tl
, cl
, 1);
1225 tcg_gen_add2_i64(tl
, th
, tl
, zero
, al
, zero
);
1226 tcg_gen_add2_i64(tl
, th
, tl
, th
, bl
, zero
);
1227 tcg_gen_add2_i64(tl
, th
, th
, zero
, ah
, zero
);
1228 tcg_gen_add2_i64(tl
, dl
, tl
, th
, bh
, zero
);
1229 tcg_gen_mov_i64(dh
, zero
);
1231 tcg_temp_free_i64(tl
);
1232 tcg_temp_free_i64(th
);
1233 tcg_temp_free_i64(zero
);
1236 static DisasJumpType
op_vaccc(DisasContext
*s
, DisasOps
*o
)
1238 if (get_field(s
->fields
, m5
) != ES_128
) {
1239 gen_program_exception(s
, PGM_SPECIFICATION
);
1240 return DISAS_NORETURN
;
1243 gen_gvec128_4_i64(gen_accc2_i64
, get_field(s
->fields
, v1
),
1244 get_field(s
->fields
, v2
), get_field(s
->fields
, v3
),
1245 get_field(s
->fields
, v4
));
1249 static DisasJumpType
op_vn(DisasContext
*s
, DisasOps
*o
)
1251 gen_gvec_fn_3(and, ES_8
, get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
1252 get_field(s
->fields
, v3
));
1256 static DisasJumpType
op_vnc(DisasContext
*s
, DisasOps
*o
)
1258 gen_gvec_fn_3(andc
, ES_8
, get_field(s
->fields
, v1
),
1259 get_field(s
->fields
, v2
), get_field(s
->fields
, v3
));
1263 static void gen_avg_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
1265 TCGv_i64 t0
= tcg_temp_new_i64();
1266 TCGv_i64 t1
= tcg_temp_new_i64();
1268 tcg_gen_ext_i32_i64(t0
, a
);
1269 tcg_gen_ext_i32_i64(t1
, b
);
1270 tcg_gen_add_i64(t0
, t0
, t1
);
1271 tcg_gen_addi_i64(t0
, t0
, 1);
1272 tcg_gen_shri_i64(t0
, t0
, 1);
1273 tcg_gen_extrl_i64_i32(d
, t0
);
1279 static void gen_avg_i64(TCGv_i64 dl
, TCGv_i64 al
, TCGv_i64 bl
)
1281 TCGv_i64 dh
= tcg_temp_new_i64();
1282 TCGv_i64 ah
= tcg_temp_new_i64();
1283 TCGv_i64 bh
= tcg_temp_new_i64();
1285 /* extending the sign by one bit is sufficient */
1286 tcg_gen_extract_i64(ah
, al
, 63, 1);
1287 tcg_gen_extract_i64(bh
, bl
, 63, 1);
1288 tcg_gen_add2_i64(dl
, dh
, al
, ah
, bl
, bh
);
1289 gen_addi2_i64(dl
, dh
, dl
, dh
, 1);
1290 tcg_gen_extract2_i64(dl
, dl
, dh
, 1);
1292 tcg_temp_free_i64(dh
);
1293 tcg_temp_free_i64(ah
);
1294 tcg_temp_free_i64(bh
);
1297 static DisasJumpType
op_vavg(DisasContext
*s
, DisasOps
*o
)
1299 const uint8_t es
= get_field(s
->fields
, m4
);
1300 static const GVecGen3 g
[4] = {
1301 { .fno
= gen_helper_gvec_vavg8
, },
1302 { .fno
= gen_helper_gvec_vavg16
, },
1303 { .fni4
= gen_avg_i32
, },
1304 { .fni8
= gen_avg_i64
, },
1308 gen_program_exception(s
, PGM_SPECIFICATION
);
1309 return DISAS_NORETURN
;
1311 gen_gvec_3(get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
1312 get_field(s
->fields
, v3
), &g
[es
]);
1316 static void gen_avgl_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
1318 TCGv_i64 t0
= tcg_temp_new_i64();
1319 TCGv_i64 t1
= tcg_temp_new_i64();
1321 tcg_gen_extu_i32_i64(t0
, a
);
1322 tcg_gen_extu_i32_i64(t1
, b
);
1323 tcg_gen_add_i64(t0
, t0
, t1
);
1324 tcg_gen_addi_i64(t0
, t0
, 1);
1325 tcg_gen_shri_i64(t0
, t0
, 1);
1326 tcg_gen_extrl_i64_i32(d
, t0
);
1332 static void gen_avgl_i64(TCGv_i64 dl
, TCGv_i64 al
, TCGv_i64 bl
)
1334 TCGv_i64 dh
= tcg_temp_new_i64();
1335 TCGv_i64 zero
= tcg_const_i64(0);
1337 tcg_gen_add2_i64(dl
, dh
, al
, zero
, bl
, zero
);
1338 gen_addi2_i64(dl
, dh
, dl
, dh
, 1);
1339 tcg_gen_extract2_i64(dl
, dl
, dh
, 1);
1341 tcg_temp_free_i64(dh
);
1342 tcg_temp_free_i64(zero
);
1345 static DisasJumpType
op_vavgl(DisasContext
*s
, DisasOps
*o
)
1347 const uint8_t es
= get_field(s
->fields
, m4
);
1348 static const GVecGen3 g
[4] = {
1349 { .fno
= gen_helper_gvec_vavgl8
, },
1350 { .fno
= gen_helper_gvec_vavgl16
, },
1351 { .fni4
= gen_avgl_i32
, },
1352 { .fni8
= gen_avgl_i64
, },
1356 gen_program_exception(s
, PGM_SPECIFICATION
);
1357 return DISAS_NORETURN
;
1359 gen_gvec_3(get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
1360 get_field(s
->fields
, v3
), &g
[es
]);
1364 static DisasJumpType
op_vcksm(DisasContext
*s
, DisasOps
*o
)
1366 TCGv_i32 tmp
= tcg_temp_new_i32();
1367 TCGv_i32 sum
= tcg_temp_new_i32();
1370 read_vec_element_i32(sum
, get_field(s
->fields
, v3
), 1, ES_32
);
1371 for (i
= 0; i
< 4; i
++) {
1372 read_vec_element_i32(tmp
, get_field(s
->fields
, v2
), i
, ES_32
);
1373 tcg_gen_add2_i32(tmp
, sum
, sum
, sum
, tmp
, tmp
);
1375 zero_vec(get_field(s
->fields
, v1
));
1376 write_vec_element_i32(sum
, get_field(s
->fields
, v1
), 1, ES_32
);
1378 tcg_temp_free_i32(tmp
);
1379 tcg_temp_free_i32(sum
);
1383 static DisasJumpType
op_vec(DisasContext
*s
, DisasOps
*o
)
1385 uint8_t es
= get_field(s
->fields
, m3
);
1386 const uint8_t enr
= NUM_VEC_ELEMENTS(es
) / 2 - 1;
1389 gen_program_exception(s
, PGM_SPECIFICATION
);
1390 return DISAS_NORETURN
;
1392 if (s
->fields
->op2
== 0xdb) {
1396 o
->in1
= tcg_temp_new_i64();
1397 o
->in2
= tcg_temp_new_i64();
1398 read_vec_element_i64(o
->in1
, get_field(s
->fields
, v1
), enr
, es
);
1399 read_vec_element_i64(o
->in2
, get_field(s
->fields
, v2
), enr
, es
);
1403 static DisasJumpType
op_vc(DisasContext
*s
, DisasOps
*o
)
1405 const uint8_t es
= get_field(s
->fields
, m4
);
1406 TCGCond cond
= s
->insn
->data
;
1409 gen_program_exception(s
, PGM_SPECIFICATION
);
1410 return DISAS_NORETURN
;
1413 tcg_gen_gvec_cmp(cond
, es
,
1414 vec_full_reg_offset(get_field(s
->fields
, v1
)),
1415 vec_full_reg_offset(get_field(s
->fields
, v2
)),
1416 vec_full_reg_offset(get_field(s
->fields
, v3
)), 16, 16);
1417 if (get_field(s
->fields
, m5
) & 0x1) {
1418 TCGv_i64 low
= tcg_temp_new_i64();
1419 TCGv_i64 high
= tcg_temp_new_i64();
1421 read_vec_element_i64(high
, get_field(s
->fields
, v1
), 0, ES_64
);
1422 read_vec_element_i64(low
, get_field(s
->fields
, v1
), 1, ES_64
);
1423 gen_op_update2_cc_i64(s
, CC_OP_VC
, low
, high
);
1425 tcg_temp_free_i64(low
);
1426 tcg_temp_free_i64(high
);
1431 static void gen_clz_i32(TCGv_i32 d
, TCGv_i32 a
)
1433 tcg_gen_clzi_i32(d
, a
, 32);
1436 static void gen_clz_i64(TCGv_i64 d
, TCGv_i64 a
)
1438 tcg_gen_clzi_i64(d
, a
, 64);
1441 static DisasJumpType
op_vclz(DisasContext
*s
, DisasOps
*o
)
1443 const uint8_t es
= get_field(s
->fields
, m3
);
1444 static const GVecGen2 g
[4] = {
1445 { .fno
= gen_helper_gvec_vclz8
, },
1446 { .fno
= gen_helper_gvec_vclz16
, },
1447 { .fni4
= gen_clz_i32
, },
1448 { .fni8
= gen_clz_i64
, },
1452 gen_program_exception(s
, PGM_SPECIFICATION
);
1453 return DISAS_NORETURN
;
1455 gen_gvec_2(get_field(s
->fields
, v1
), get_field(s
->fields
, v2
), &g
[es
]);
1459 static void gen_ctz_i32(TCGv_i32 d
, TCGv_i32 a
)
1461 tcg_gen_ctzi_i32(d
, a
, 32);
1464 static void gen_ctz_i64(TCGv_i64 d
, TCGv_i64 a
)
1466 tcg_gen_ctzi_i64(d
, a
, 64);
1469 static DisasJumpType
op_vctz(DisasContext
*s
, DisasOps
*o
)
1471 const uint8_t es
= get_field(s
->fields
, m3
);
1472 static const GVecGen2 g
[4] = {
1473 { .fno
= gen_helper_gvec_vctz8
, },
1474 { .fno
= gen_helper_gvec_vctz16
, },
1475 { .fni4
= gen_ctz_i32
, },
1476 { .fni8
= gen_ctz_i64
, },
1480 gen_program_exception(s
, PGM_SPECIFICATION
);
1481 return DISAS_NORETURN
;
1483 gen_gvec_2(get_field(s
->fields
, v1
), get_field(s
->fields
, v2
), &g
[es
]);
1487 static DisasJumpType
op_vx(DisasContext
*s
, DisasOps
*o
)
1489 gen_gvec_fn_3(xor, ES_8
, get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
1490 get_field(s
->fields
, v3
));
1494 static DisasJumpType
op_vgfm(DisasContext
*s
, DisasOps
*o
)
1496 const uint8_t es
= get_field(s
->fields
, m4
);
1497 static const GVecGen3 g
[4] = {
1498 { .fno
= gen_helper_gvec_vgfm8
, },
1499 { .fno
= gen_helper_gvec_vgfm16
, },
1500 { .fno
= gen_helper_gvec_vgfm32
, },
1501 { .fno
= gen_helper_gvec_vgfm64
, },
1505 gen_program_exception(s
, PGM_SPECIFICATION
);
1506 return DISAS_NORETURN
;
1508 gen_gvec_3(get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
1509 get_field(s
->fields
, v3
), &g
[es
]);
1513 static DisasJumpType
op_vgfma(DisasContext
*s
, DisasOps
*o
)
1515 const uint8_t es
= get_field(s
->fields
, m5
);
1516 static const GVecGen4 g
[4] = {
1517 { .fno
= gen_helper_gvec_vgfma8
, },
1518 { .fno
= gen_helper_gvec_vgfma16
, },
1519 { .fno
= gen_helper_gvec_vgfma32
, },
1520 { .fno
= gen_helper_gvec_vgfma64
, },
1524 gen_program_exception(s
, PGM_SPECIFICATION
);
1525 return DISAS_NORETURN
;
1527 gen_gvec_4(get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
1528 get_field(s
->fields
, v3
), get_field(s
->fields
, v4
), &g
[es
]);
1532 static DisasJumpType
op_vlc(DisasContext
*s
, DisasOps
*o
)
1534 const uint8_t es
= get_field(s
->fields
, m3
);
1537 gen_program_exception(s
, PGM_SPECIFICATION
);
1538 return DISAS_NORETURN
;
1541 gen_gvec_fn_2(neg
, es
, get_field(s
->fields
, v1
), get_field(s
->fields
, v2
));
1545 static DisasJumpType
op_vlp(DisasContext
*s
, DisasOps
*o
)
1547 const uint8_t es
= get_field(s
->fields
, m3
);
1550 gen_program_exception(s
, PGM_SPECIFICATION
);
1551 return DISAS_NORETURN
;
1554 gen_gvec_fn_2(abs
, es
, get_field(s
->fields
, v1
), get_field(s
->fields
, v2
));
1558 static DisasJumpType
op_vmx(DisasContext
*s
, DisasOps
*o
)
1560 const uint8_t v1
= get_field(s
->fields
, v1
);
1561 const uint8_t v2
= get_field(s
->fields
, v2
);
1562 const uint8_t v3
= get_field(s
->fields
, v3
);
1563 const uint8_t es
= get_field(s
->fields
, m4
);
1566 gen_program_exception(s
, PGM_SPECIFICATION
);
1567 return DISAS_NORETURN
;
1570 switch (s
->fields
->op2
) {
1572 gen_gvec_fn_3(smax
, es
, v1
, v2
, v3
);
1575 gen_gvec_fn_3(umax
, es
, v1
, v2
, v3
);
1578 gen_gvec_fn_3(smin
, es
, v1
, v2
, v3
);
1581 gen_gvec_fn_3(umin
, es
, v1
, v2
, v3
);
1584 g_assert_not_reached();
1589 static void gen_mal_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
, TCGv_i32 c
)
1591 TCGv_i32 t0
= tcg_temp_new_i32();
1593 tcg_gen_mul_i32(t0
, a
, b
);
1594 tcg_gen_add_i32(d
, t0
, c
);
1596 tcg_temp_free_i32(t0
);
1599 static void gen_mah_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
, TCGv_i32 c
)
1601 TCGv_i64 t0
= tcg_temp_new_i64();
1602 TCGv_i64 t1
= tcg_temp_new_i64();
1603 TCGv_i64 t2
= tcg_temp_new_i64();
1605 tcg_gen_ext_i32_i64(t0
, a
);
1606 tcg_gen_ext_i32_i64(t1
, b
);
1607 tcg_gen_ext_i32_i64(t2
, c
);
1608 tcg_gen_mul_i64(t0
, t0
, t1
);
1609 tcg_gen_add_i64(t0
, t0
, t2
);
1610 tcg_gen_extrh_i64_i32(d
, t0
);
1617 static void gen_malh_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
, TCGv_i32 c
)
1619 TCGv_i64 t0
= tcg_temp_new_i64();
1620 TCGv_i64 t1
= tcg_temp_new_i64();
1621 TCGv_i64 t2
= tcg_temp_new_i64();
1623 tcg_gen_extu_i32_i64(t0
, a
);
1624 tcg_gen_extu_i32_i64(t1
, b
);
1625 tcg_gen_extu_i32_i64(t2
, c
);
1626 tcg_gen_mul_i64(t0
, t0
, t1
);
1627 tcg_gen_add_i64(t0
, t0
, t2
);
1628 tcg_gen_extrh_i64_i32(d
, t0
);
1635 static DisasJumpType
op_vma(DisasContext
*s
, DisasOps
*o
)
1637 const uint8_t es
= get_field(s
->fields
, m5
);
1638 static const GVecGen4 g_vmal
[3] = {
1639 { .fno
= gen_helper_gvec_vmal8
, },
1640 { .fno
= gen_helper_gvec_vmal16
, },
1641 { .fni4
= gen_mal_i32
, },
1643 static const GVecGen4 g_vmah
[3] = {
1644 { .fno
= gen_helper_gvec_vmah8
, },
1645 { .fno
= gen_helper_gvec_vmah16
, },
1646 { .fni4
= gen_mah_i32
, },
1648 static const GVecGen4 g_vmalh
[3] = {
1649 { .fno
= gen_helper_gvec_vmalh8
, },
1650 { .fno
= gen_helper_gvec_vmalh16
, },
1651 { .fni4
= gen_malh_i32
, },
1653 static const GVecGen4 g_vmae
[3] = {
1654 { .fno
= gen_helper_gvec_vmae8
, },
1655 { .fno
= gen_helper_gvec_vmae16
, },
1656 { .fno
= gen_helper_gvec_vmae32
, },
1658 static const GVecGen4 g_vmale
[3] = {
1659 { .fno
= gen_helper_gvec_vmale8
, },
1660 { .fno
= gen_helper_gvec_vmale16
, },
1661 { .fno
= gen_helper_gvec_vmale32
, },
1663 static const GVecGen4 g_vmao
[3] = {
1664 { .fno
= gen_helper_gvec_vmao8
, },
1665 { .fno
= gen_helper_gvec_vmao16
, },
1666 { .fno
= gen_helper_gvec_vmao32
, },
1668 static const GVecGen4 g_vmalo
[3] = {
1669 { .fno
= gen_helper_gvec_vmalo8
, },
1670 { .fno
= gen_helper_gvec_vmalo16
, },
1671 { .fno
= gen_helper_gvec_vmalo32
, },
1676 gen_program_exception(s
, PGM_SPECIFICATION
);
1677 return DISAS_NORETURN
;
1680 switch (s
->fields
->op2
) {
1703 g_assert_not_reached();
1706 gen_gvec_4(get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
1707 get_field(s
->fields
, v3
), get_field(s
->fields
, v4
), fn
);
1711 static void gen_mh_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
1713 TCGv_i32 t
= tcg_temp_new_i32();
1715 tcg_gen_muls2_i32(t
, d
, a
, b
);
1716 tcg_temp_free_i32(t
);
1719 static void gen_mlh_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
1721 TCGv_i32 t
= tcg_temp_new_i32();
1723 tcg_gen_mulu2_i32(t
, d
, a
, b
);
1724 tcg_temp_free_i32(t
);
1727 static DisasJumpType
op_vm(DisasContext
*s
, DisasOps
*o
)
1729 const uint8_t es
= get_field(s
->fields
, m4
);
1730 static const GVecGen3 g_vmh
[3] = {
1731 { .fno
= gen_helper_gvec_vmh8
, },
1732 { .fno
= gen_helper_gvec_vmh16
, },
1733 { .fni4
= gen_mh_i32
, },
1735 static const GVecGen3 g_vmlh
[3] = {
1736 { .fno
= gen_helper_gvec_vmlh8
, },
1737 { .fno
= gen_helper_gvec_vmlh16
, },
1738 { .fni4
= gen_mlh_i32
, },
1740 static const GVecGen3 g_vme
[3] = {
1741 { .fno
= gen_helper_gvec_vme8
, },
1742 { .fno
= gen_helper_gvec_vme16
, },
1743 { .fno
= gen_helper_gvec_vme32
, },
1745 static const GVecGen3 g_vmle
[3] = {
1746 { .fno
= gen_helper_gvec_vmle8
, },
1747 { .fno
= gen_helper_gvec_vmle16
, },
1748 { .fno
= gen_helper_gvec_vmle32
, },
1750 static const GVecGen3 g_vmo
[3] = {
1751 { .fno
= gen_helper_gvec_vmo8
, },
1752 { .fno
= gen_helper_gvec_vmo16
, },
1753 { .fno
= gen_helper_gvec_vmo32
, },
1755 static const GVecGen3 g_vmlo
[3] = {
1756 { .fno
= gen_helper_gvec_vmlo8
, },
1757 { .fno
= gen_helper_gvec_vmlo16
, },
1758 { .fno
= gen_helper_gvec_vmlo32
, },
1763 gen_program_exception(s
, PGM_SPECIFICATION
);
1764 return DISAS_NORETURN
;
1767 switch (s
->fields
->op2
) {
1769 gen_gvec_fn_3(mul
, es
, get_field(s
->fields
, v1
),
1770 get_field(s
->fields
, v2
), get_field(s
->fields
, v3
));
1791 g_assert_not_reached();
1794 gen_gvec_3(get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
1795 get_field(s
->fields
, v3
), fn
);
1799 static DisasJumpType
op_vnn(DisasContext
*s
, DisasOps
*o
)
1801 gen_gvec_fn_3(nand
, ES_8
, get_field(s
->fields
, v1
),
1802 get_field(s
->fields
, v2
), get_field(s
->fields
, v3
));
1806 static DisasJumpType
op_vno(DisasContext
*s
, DisasOps
*o
)
1808 gen_gvec_fn_3(nor
, ES_8
, get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
1809 get_field(s
->fields
, v3
));
1813 static DisasJumpType
op_vnx(DisasContext
*s
, DisasOps
*o
)
1815 gen_gvec_fn_3(eqv
, ES_8
, get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
1816 get_field(s
->fields
, v3
));
1820 static DisasJumpType
op_vo(DisasContext
*s
, DisasOps
*o
)
1822 gen_gvec_fn_3(or, ES_8
, get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
1823 get_field(s
->fields
, v3
));
1827 static DisasJumpType
op_voc(DisasContext
*s
, DisasOps
*o
)
1829 gen_gvec_fn_3(orc
, ES_8
, get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
1830 get_field(s
->fields
, v3
));
1834 static DisasJumpType
op_vpopct(DisasContext
*s
, DisasOps
*o
)
1836 const uint8_t es
= get_field(s
->fields
, m3
);
1837 static const GVecGen2 g
[4] = {
1838 { .fno
= gen_helper_gvec_vpopct8
, },
1839 { .fno
= gen_helper_gvec_vpopct16
, },
1840 { .fni4
= tcg_gen_ctpop_i32
, },
1841 { .fni8
= tcg_gen_ctpop_i64
, },
1844 if (es
> ES_64
|| (es
!= ES_8
&& !s390_has_feat(S390_FEAT_VECTOR_ENH
))) {
1845 gen_program_exception(s
, PGM_SPECIFICATION
);
1846 return DISAS_NORETURN
;
1849 gen_gvec_2(get_field(s
->fields
, v1
), get_field(s
->fields
, v2
), &g
[es
]);
1853 static void gen_rll_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
1855 TCGv_i32 t0
= tcg_temp_new_i32();
1857 tcg_gen_andi_i32(t0
, b
, 31);
1858 tcg_gen_rotl_i32(d
, a
, t0
);
1859 tcg_temp_free_i32(t0
);
1862 static void gen_rll_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
1864 TCGv_i64 t0
= tcg_temp_new_i64();
1866 tcg_gen_andi_i64(t0
, b
, 63);
1867 tcg_gen_rotl_i64(d
, a
, t0
);
1868 tcg_temp_free_i64(t0
);
1871 static DisasJumpType
op_verllv(DisasContext
*s
, DisasOps
*o
)
1873 const uint8_t es
= get_field(s
->fields
, m4
);
1874 static const GVecGen3 g
[4] = {
1875 { .fno
= gen_helper_gvec_verllv8
, },
1876 { .fno
= gen_helper_gvec_verllv16
, },
1877 { .fni4
= gen_rll_i32
, },
1878 { .fni8
= gen_rll_i64
, },
1882 gen_program_exception(s
, PGM_SPECIFICATION
);
1883 return DISAS_NORETURN
;
1886 gen_gvec_3(get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
1887 get_field(s
->fields
, v3
), &g
[es
]);
1891 static DisasJumpType
op_verll(DisasContext
*s
, DisasOps
*o
)
1893 const uint8_t es
= get_field(s
->fields
, m4
);
1894 static const GVecGen2s g
[4] = {
1895 { .fno
= gen_helper_gvec_verll8
, },
1896 { .fno
= gen_helper_gvec_verll16
, },
1897 { .fni4
= gen_rll_i32
, },
1898 { .fni8
= gen_rll_i64
, },
1902 gen_program_exception(s
, PGM_SPECIFICATION
);
1903 return DISAS_NORETURN
;
1905 gen_gvec_2s(get_field(s
->fields
, v1
), get_field(s
->fields
, v3
), o
->addr1
,
1910 static void gen_rim_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
, int32_t c
)
1912 TCGv_i32 t
= tcg_temp_new_i32();
1914 tcg_gen_rotli_i32(t
, a
, c
& 31);
1915 tcg_gen_and_i32(t
, t
, b
);
1916 tcg_gen_andc_i32(d
, d
, b
);
1917 tcg_gen_or_i32(d
, d
, t
);
1919 tcg_temp_free_i32(t
);
1922 static void gen_rim_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
, int64_t c
)
1924 TCGv_i64 t
= tcg_temp_new_i64();
1926 tcg_gen_rotli_i64(t
, a
, c
& 63);
1927 tcg_gen_and_i64(t
, t
, b
);
1928 tcg_gen_andc_i64(d
, d
, b
);
1929 tcg_gen_or_i64(d
, d
, t
);
1931 tcg_temp_free_i64(t
);
1934 static DisasJumpType
op_verim(DisasContext
*s
, DisasOps
*o
)
1936 const uint8_t es
= get_field(s
->fields
, m5
);
1937 const uint8_t i4
= get_field(s
->fields
, i4
) &
1938 (NUM_VEC_ELEMENT_BITS(es
) - 1);
1939 static const GVecGen3i g
[4] = {
1940 { .fno
= gen_helper_gvec_verim8
, },
1941 { .fno
= gen_helper_gvec_verim16
, },
1942 { .fni4
= gen_rim_i32
,
1943 .load_dest
= true, },
1944 { .fni8
= gen_rim_i64
,
1945 .load_dest
= true, },
1949 gen_program_exception(s
, PGM_SPECIFICATION
);
1950 return DISAS_NORETURN
;
1953 gen_gvec_3i(get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
1954 get_field(s
->fields
, v3
), i4
, &g
[es
]);
1958 static DisasJumpType
op_vesv(DisasContext
*s
, DisasOps
*o
)
1960 const uint8_t es
= get_field(s
->fields
, m4
);
1961 const uint8_t v1
= get_field(s
->fields
, v1
);
1962 const uint8_t v2
= get_field(s
->fields
, v2
);
1963 const uint8_t v3
= get_field(s
->fields
, v3
);
1966 gen_program_exception(s
, PGM_SPECIFICATION
);
1967 return DISAS_NORETURN
;
1970 switch (s
->fields
->op2
) {
1972 gen_gvec_fn_3(shlv
, es
, v1
, v2
, v3
);
1975 gen_gvec_fn_3(sarv
, es
, v1
, v2
, v3
);
1978 gen_gvec_fn_3(shrv
, es
, v1
, v2
, v3
);
1981 g_assert_not_reached();
1986 static DisasJumpType
op_ves(DisasContext
*s
, DisasOps
*o
)
1988 const uint8_t es
= get_field(s
->fields
, m4
);
1989 const uint8_t d2
= get_field(s
->fields
, d2
) &
1990 (NUM_VEC_ELEMENT_BITS(es
) - 1);
1991 const uint8_t v1
= get_field(s
->fields
, v1
);
1992 const uint8_t v3
= get_field(s
->fields
, v3
);
1996 gen_program_exception(s
, PGM_SPECIFICATION
);
1997 return DISAS_NORETURN
;
2000 if (likely(!get_field(s
->fields
, b2
))) {
2001 switch (s
->fields
->op2
) {
2003 gen_gvec_fn_2i(shli
, es
, v1
, v3
, d2
);
2006 gen_gvec_fn_2i(sari
, es
, v1
, v3
, d2
);
2009 gen_gvec_fn_2i(shri
, es
, v1
, v3
, d2
);
2012 g_assert_not_reached();
2015 shift
= tcg_temp_new_i32();
2016 tcg_gen_extrl_i64_i32(shift
, o
->addr1
);
2017 tcg_gen_andi_i32(shift
, shift
, NUM_VEC_ELEMENT_BITS(es
) - 1);
2018 switch (s
->fields
->op2
) {
2020 gen_gvec_fn_2s(shls
, es
, v1
, v3
, shift
);
2023 gen_gvec_fn_2s(sars
, es
, v1
, v3
, shift
);
2026 gen_gvec_fn_2s(shrs
, es
, v1
, v3
, shift
);
2029 g_assert_not_reached();
2031 tcg_temp_free_i32(shift
);
2036 static DisasJumpType
op_vsl(DisasContext
*s
, DisasOps
*o
)
2038 TCGv_i64 shift
= tcg_temp_new_i64();
2040 read_vec_element_i64(shift
, get_field(s
->fields
, v3
), 7, ES_8
);
2041 if (s
->fields
->op2
== 0x74) {
2042 tcg_gen_andi_i64(shift
, shift
, 0x7);
2044 tcg_gen_andi_i64(shift
, shift
, 0x78);
2047 gen_gvec_2i_ool(get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
2048 shift
, 0, gen_helper_gvec_vsl
);
2049 tcg_temp_free_i64(shift
);
2053 static DisasJumpType
op_vsldb(DisasContext
*s
, DisasOps
*o
)
2055 const uint8_t i4
= get_field(s
->fields
, i4
) & 0xf;
2056 const int left_shift
= (i4
& 7) * 8;
2057 const int right_shift
= 64 - left_shift
;
2058 TCGv_i64 t0
= tcg_temp_new_i64();
2059 TCGv_i64 t1
= tcg_temp_new_i64();
2060 TCGv_i64 t2
= tcg_temp_new_i64();
2062 if ((i4
& 8) == 0) {
2063 read_vec_element_i64(t0
, get_field(s
->fields
, v2
), 0, ES_64
);
2064 read_vec_element_i64(t1
, get_field(s
->fields
, v2
), 1, ES_64
);
2065 read_vec_element_i64(t2
, get_field(s
->fields
, v3
), 0, ES_64
);
2067 read_vec_element_i64(t0
, get_field(s
->fields
, v2
), 1, ES_64
);
2068 read_vec_element_i64(t1
, get_field(s
->fields
, v3
), 0, ES_64
);
2069 read_vec_element_i64(t2
, get_field(s
->fields
, v3
), 1, ES_64
);
2071 tcg_gen_extract2_i64(t0
, t1
, t0
, right_shift
);
2072 tcg_gen_extract2_i64(t1
, t2
, t1
, right_shift
);
2073 write_vec_element_i64(t0
, get_field(s
->fields
, v1
), 0, ES_64
);
2074 write_vec_element_i64(t1
, get_field(s
->fields
, v1
), 1, ES_64
);
2082 static DisasJumpType
op_vsra(DisasContext
*s
, DisasOps
*o
)
2084 TCGv_i64 shift
= tcg_temp_new_i64();
2086 read_vec_element_i64(shift
, get_field(s
->fields
, v3
), 7, ES_8
);
2087 if (s
->fields
->op2
== 0x7e) {
2088 tcg_gen_andi_i64(shift
, shift
, 0x7);
2090 tcg_gen_andi_i64(shift
, shift
, 0x78);
2093 gen_gvec_2i_ool(get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
2094 shift
, 0, gen_helper_gvec_vsra
);
2095 tcg_temp_free_i64(shift
);
2099 static DisasJumpType
op_vsrl(DisasContext
*s
, DisasOps
*o
)
2101 TCGv_i64 shift
= tcg_temp_new_i64();
2103 read_vec_element_i64(shift
, get_field(s
->fields
, v3
), 7, ES_8
);
2104 if (s
->fields
->op2
== 0x7c) {
2105 tcg_gen_andi_i64(shift
, shift
, 0x7);
2107 tcg_gen_andi_i64(shift
, shift
, 0x78);
2110 gen_gvec_2i_ool(get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
2111 shift
, 0, gen_helper_gvec_vsrl
);
2112 tcg_temp_free_i64(shift
);
2116 static DisasJumpType
op_vs(DisasContext
*s
, DisasOps
*o
)
2118 const uint8_t es
= get_field(s
->fields
, m4
);
2121 gen_program_exception(s
, PGM_SPECIFICATION
);
2122 return DISAS_NORETURN
;
2123 } else if (es
== ES_128
) {
2124 gen_gvec128_3_i64(tcg_gen_sub2_i64
, get_field(s
->fields
, v1
),
2125 get_field(s
->fields
, v2
), get_field(s
->fields
, v3
));
2128 gen_gvec_fn_3(sub
, es
, get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
2129 get_field(s
->fields
, v3
));
2133 static void gen_scbi_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
2135 tcg_gen_setcond_i32(TCG_COND_LTU
, d
, a
, b
);
2138 static void gen_scbi_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
2140 tcg_gen_setcond_i64(TCG_COND_LTU
, d
, a
, b
);
2143 static void gen_scbi2_i64(TCGv_i64 dl
, TCGv_i64 dh
, TCGv_i64 al
,
2144 TCGv_i64 ah
, TCGv_i64 bl
, TCGv_i64 bh
)
2146 TCGv_i64 th
= tcg_temp_new_i64();
2147 TCGv_i64 tl
= tcg_temp_new_i64();
2148 TCGv_i64 zero
= tcg_const_i64(0);
2150 tcg_gen_sub2_i64(tl
, th
, al
, zero
, bl
, zero
);
2151 tcg_gen_andi_i64(th
, th
, 1);
2152 tcg_gen_sub2_i64(tl
, th
, ah
, zero
, th
, zero
);
2153 tcg_gen_sub2_i64(tl
, th
, tl
, th
, bh
, zero
);
2154 tcg_gen_andi_i64(dl
, th
, 1);
2155 tcg_gen_mov_i64(dh
, zero
);
2157 tcg_temp_free_i64(th
);
2158 tcg_temp_free_i64(tl
);
2159 tcg_temp_free_i64(zero
);
2162 static DisasJumpType
op_vscbi(DisasContext
*s
, DisasOps
*o
)
2164 const uint8_t es
= get_field(s
->fields
, m4
);
2165 static const GVecGen3 g
[4] = {
2166 { .fno
= gen_helper_gvec_vscbi8
, },
2167 { .fno
= gen_helper_gvec_vscbi16
, },
2168 { .fni4
= gen_scbi_i32
, },
2169 { .fni8
= gen_scbi_i64
, },
2173 gen_program_exception(s
, PGM_SPECIFICATION
);
2174 return DISAS_NORETURN
;
2175 } else if (es
== ES_128
) {
2176 gen_gvec128_3_i64(gen_scbi2_i64
, get_field(s
->fields
, v1
),
2177 get_field(s
->fields
, v2
), get_field(s
->fields
, v3
));
2180 gen_gvec_3(get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
2181 get_field(s
->fields
, v3
), &g
[es
]);
2185 static void gen_sbi2_i64(TCGv_i64 dl
, TCGv_i64 dh
, TCGv_i64 al
, TCGv_i64 ah
,
2186 TCGv_i64 bl
, TCGv_i64 bh
, TCGv_i64 cl
, TCGv_i64 ch
)
2188 TCGv_i64 tl
= tcg_temp_new_i64();
2189 TCGv_i64 zero
= tcg_const_i64(0);
2191 tcg_gen_andi_i64(tl
, cl
, 1);
2192 tcg_gen_sub2_i64(dl
, dh
, al
, ah
, bl
, bh
);
2193 tcg_gen_sub2_i64(dl
, dh
, dl
, dh
, tl
, zero
);
2194 tcg_temp_free_i64(tl
);
2195 tcg_temp_free_i64(zero
);
2198 static DisasJumpType
op_vsbi(DisasContext
*s
, DisasOps
*o
)
2200 if (get_field(s
->fields
, m5
) != ES_128
) {
2201 gen_program_exception(s
, PGM_SPECIFICATION
);
2202 return DISAS_NORETURN
;
2205 gen_gvec128_4_i64(gen_sbi2_i64
, get_field(s
->fields
, v1
),
2206 get_field(s
->fields
, v2
), get_field(s
->fields
, v3
),
2207 get_field(s
->fields
, v4
));
2211 static void gen_sbcbi2_i64(TCGv_i64 dl
, TCGv_i64 dh
, TCGv_i64 al
, TCGv_i64 ah
,
2212 TCGv_i64 bl
, TCGv_i64 bh
, TCGv_i64 cl
, TCGv_i64 ch
)
2214 TCGv_i64 th
= tcg_temp_new_i64();
2215 TCGv_i64 tl
= tcg_temp_new_i64();
2216 TCGv_i64 zero
= tcg_const_i64(0);
2218 tcg_gen_andi_i64(tl
, cl
, 1);
2219 tcg_gen_sub2_i64(tl
, th
, al
, zero
, tl
, zero
);
2220 tcg_gen_sub2_i64(tl
, th
, tl
, th
, bl
, zero
);
2221 tcg_gen_andi_i64(th
, th
, 1);
2222 tcg_gen_sub2_i64(tl
, th
, ah
, zero
, th
, zero
);
2223 tcg_gen_sub2_i64(tl
, th
, tl
, th
, bh
, zero
);
2224 tcg_gen_andi_i64(dl
, th
, 1);
2225 tcg_gen_mov_i64(dh
, zero
);
2227 tcg_temp_free_i64(tl
);
2228 tcg_temp_free_i64(th
);
2229 tcg_temp_free_i64(zero
);
2232 static DisasJumpType
op_vsbcbi(DisasContext
*s
, DisasOps
*o
)
2234 if (get_field(s
->fields
, m5
) != ES_128
) {
2235 gen_program_exception(s
, PGM_SPECIFICATION
);
2236 return DISAS_NORETURN
;
2239 gen_gvec128_4_i64(gen_sbcbi2_i64
, get_field(s
->fields
, v1
),
2240 get_field(s
->fields
, v2
), get_field(s
->fields
, v3
),
2241 get_field(s
->fields
, v4
));
2245 static DisasJumpType
op_vsumg(DisasContext
*s
, DisasOps
*o
)
2247 const uint8_t es
= get_field(s
->fields
, m4
);
2251 if (es
== ES_8
|| es
> ES_32
) {
2252 gen_program_exception(s
, PGM_SPECIFICATION
);
2253 return DISAS_NORETURN
;
2256 sum
= tcg_temp_new_i64();
2257 tmp
= tcg_temp_new_i64();
2258 for (dst_idx
= 0; dst_idx
< 2; dst_idx
++) {
2259 uint8_t idx
= dst_idx
* NUM_VEC_ELEMENTS(es
) / 2;
2260 const uint8_t max_idx
= idx
+ NUM_VEC_ELEMENTS(es
) / 2 - 1;
2262 read_vec_element_i64(sum
, get_field(s
->fields
, v3
), max_idx
, es
);
2263 for (; idx
<= max_idx
; idx
++) {
2264 read_vec_element_i64(tmp
, get_field(s
->fields
, v2
), idx
, es
);
2265 tcg_gen_add_i64(sum
, sum
, tmp
);
2267 write_vec_element_i64(sum
, get_field(s
->fields
, v1
), dst_idx
, ES_64
);
2269 tcg_temp_free_i64(sum
);
2270 tcg_temp_free_i64(tmp
);
2274 static DisasJumpType
op_vsumq(DisasContext
*s
, DisasOps
*o
)
2276 const uint8_t es
= get_field(s
->fields
, m4
);
2277 const uint8_t max_idx
= NUM_VEC_ELEMENTS(es
) - 1;
2278 TCGv_i64 sumh
, suml
, zero
, tmpl
;
2281 if (es
< ES_32
|| es
> ES_64
) {
2282 gen_program_exception(s
, PGM_SPECIFICATION
);
2283 return DISAS_NORETURN
;
2286 sumh
= tcg_const_i64(0);
2287 suml
= tcg_temp_new_i64();
2288 zero
= tcg_const_i64(0);
2289 tmpl
= tcg_temp_new_i64();
2291 read_vec_element_i64(suml
, get_field(s
->fields
, v3
), max_idx
, es
);
2292 for (idx
= 0; idx
<= max_idx
; idx
++) {
2293 read_vec_element_i64(tmpl
, get_field(s
->fields
, v2
), idx
, es
);
2294 tcg_gen_add2_i64(suml
, sumh
, suml
, sumh
, tmpl
, zero
);
2296 write_vec_element_i64(sumh
, get_field(s
->fields
, v1
), 0, ES_64
);
2297 write_vec_element_i64(suml
, get_field(s
->fields
, v1
), 1, ES_64
);
2299 tcg_temp_free_i64(sumh
);
2300 tcg_temp_free_i64(suml
);
2301 tcg_temp_free_i64(zero
);
2302 tcg_temp_free_i64(tmpl
);
2306 static DisasJumpType
op_vsum(DisasContext
*s
, DisasOps
*o
)
2308 const uint8_t es
= get_field(s
->fields
, m4
);
2313 gen_program_exception(s
, PGM_SPECIFICATION
);
2314 return DISAS_NORETURN
;
2317 sum
= tcg_temp_new_i32();
2318 tmp
= tcg_temp_new_i32();
2319 for (dst_idx
= 0; dst_idx
< 4; dst_idx
++) {
2320 uint8_t idx
= dst_idx
* NUM_VEC_ELEMENTS(es
) / 4;
2321 const uint8_t max_idx
= idx
+ NUM_VEC_ELEMENTS(es
) / 4 - 1;
2323 read_vec_element_i32(sum
, get_field(s
->fields
, v3
), max_idx
, es
);
2324 for (; idx
<= max_idx
; idx
++) {
2325 read_vec_element_i32(tmp
, get_field(s
->fields
, v2
), idx
, es
);
2326 tcg_gen_add_i32(sum
, sum
, tmp
);
2328 write_vec_element_i32(sum
, get_field(s
->fields
, v1
), dst_idx
, ES_32
);
2330 tcg_temp_free_i32(sum
);
2331 tcg_temp_free_i32(tmp
);
2335 static DisasJumpType
op_vtm(DisasContext
*s
, DisasOps
*o
)
2337 gen_gvec_2_ptr(get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
2338 cpu_env
, 0, gen_helper_gvec_vtm
);
2343 static DisasJumpType
op_vfae(DisasContext
*s
, DisasOps
*o
)
2345 const uint8_t es
= get_field(s
->fields
, m4
);
2346 const uint8_t m5
= get_field(s
->fields
, m5
);
2347 static gen_helper_gvec_3
* const g
[3] = {
2348 gen_helper_gvec_vfae8
,
2349 gen_helper_gvec_vfae16
,
2350 gen_helper_gvec_vfae32
,
2352 static gen_helper_gvec_3_ptr
* const g_cc
[3] = {
2353 gen_helper_gvec_vfae_cc8
,
2354 gen_helper_gvec_vfae_cc16
,
2355 gen_helper_gvec_vfae_cc32
,
2358 gen_program_exception(s
, PGM_SPECIFICATION
);
2359 return DISAS_NORETURN
;
2362 if (extract32(m5
, 0, 1)) {
2363 gen_gvec_3_ptr(get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
2364 get_field(s
->fields
, v3
), cpu_env
, m5
, g_cc
[es
]);
2367 gen_gvec_3_ool(get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
2368 get_field(s
->fields
, v3
), m5
, g
[es
]);
2373 static DisasJumpType
op_vfee(DisasContext
*s
, DisasOps
*o
)
2375 const uint8_t es
= get_field(s
->fields
, m4
);
2376 const uint8_t m5
= get_field(s
->fields
, m5
);
2377 static gen_helper_gvec_3
* const g
[3] = {
2378 gen_helper_gvec_vfee8
,
2379 gen_helper_gvec_vfee16
,
2380 gen_helper_gvec_vfee32
,
2382 static gen_helper_gvec_3_ptr
* const g_cc
[3] = {
2383 gen_helper_gvec_vfee_cc8
,
2384 gen_helper_gvec_vfee_cc16
,
2385 gen_helper_gvec_vfee_cc32
,
2388 if (es
> ES_32
|| m5
& ~0x3) {
2389 gen_program_exception(s
, PGM_SPECIFICATION
);
2390 return DISAS_NORETURN
;
2393 if (extract32(m5
, 0, 1)) {
2394 gen_gvec_3_ptr(get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
2395 get_field(s
->fields
, v3
), cpu_env
, m5
, g_cc
[es
]);
2398 gen_gvec_3_ool(get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
2399 get_field(s
->fields
, v3
), m5
, g
[es
]);
2404 static DisasJumpType
op_vfene(DisasContext
*s
, DisasOps
*o
)
2406 const uint8_t es
= get_field(s
->fields
, m4
);
2407 const uint8_t m5
= get_field(s
->fields
, m5
);
2408 static gen_helper_gvec_3
* const g
[3] = {
2409 gen_helper_gvec_vfene8
,
2410 gen_helper_gvec_vfene16
,
2411 gen_helper_gvec_vfene32
,
2413 static gen_helper_gvec_3_ptr
* const g_cc
[3] = {
2414 gen_helper_gvec_vfene_cc8
,
2415 gen_helper_gvec_vfene_cc16
,
2416 gen_helper_gvec_vfene_cc32
,
2419 if (es
> ES_32
|| m5
& ~0x3) {
2420 gen_program_exception(s
, PGM_SPECIFICATION
);
2421 return DISAS_NORETURN
;
2424 if (extract32(m5
, 0, 1)) {
2425 gen_gvec_3_ptr(get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
2426 get_field(s
->fields
, v3
), cpu_env
, m5
, g_cc
[es
]);
2429 gen_gvec_3_ool(get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
2430 get_field(s
->fields
, v3
), m5
, g
[es
]);
2435 static DisasJumpType
op_vistr(DisasContext
*s
, DisasOps
*o
)
2437 const uint8_t es
= get_field(s
->fields
, m4
);
2438 const uint8_t m5
= get_field(s
->fields
, m5
);
2439 static gen_helper_gvec_2
* const g
[3] = {
2440 gen_helper_gvec_vistr8
,
2441 gen_helper_gvec_vistr16
,
2442 gen_helper_gvec_vistr32
,
2444 static gen_helper_gvec_2_ptr
* const g_cc
[3] = {
2445 gen_helper_gvec_vistr_cc8
,
2446 gen_helper_gvec_vistr_cc16
,
2447 gen_helper_gvec_vistr_cc32
,
2450 if (es
> ES_32
|| m5
& ~0x1) {
2451 gen_program_exception(s
, PGM_SPECIFICATION
);
2452 return DISAS_NORETURN
;
2455 if (extract32(m5
, 0, 1)) {
2456 gen_gvec_2_ptr(get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
2457 cpu_env
, 0, g_cc
[es
]);
2460 gen_gvec_2_ool(get_field(s
->fields
, v1
), get_field(s
->fields
, v2
), 0,
2466 static DisasJumpType
op_vstrc(DisasContext
*s
, DisasOps
*o
)
2468 const uint8_t es
= get_field(s
->fields
, m5
);
2469 const uint8_t m6
= get_field(s
->fields
, m6
);
2470 static gen_helper_gvec_4
* const g
[3] = {
2471 gen_helper_gvec_vstrc8
,
2472 gen_helper_gvec_vstrc16
,
2473 gen_helper_gvec_vstrc32
,
2475 static gen_helper_gvec_4
* const g_rt
[3] = {
2476 gen_helper_gvec_vstrc_rt8
,
2477 gen_helper_gvec_vstrc_rt16
,
2478 gen_helper_gvec_vstrc_rt32
,
2480 static gen_helper_gvec_4_ptr
* const g_cc
[3] = {
2481 gen_helper_gvec_vstrc_cc8
,
2482 gen_helper_gvec_vstrc_cc16
,
2483 gen_helper_gvec_vstrc_cc32
,
2485 static gen_helper_gvec_4_ptr
* const g_cc_rt
[3] = {
2486 gen_helper_gvec_vstrc_cc_rt8
,
2487 gen_helper_gvec_vstrc_cc_rt16
,
2488 gen_helper_gvec_vstrc_cc_rt32
,
2492 gen_program_exception(s
, PGM_SPECIFICATION
);
2493 return DISAS_NORETURN
;
2496 if (extract32(m6
, 0, 1)) {
2497 if (extract32(m6
, 2, 1)) {
2498 gen_gvec_4_ptr(get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
2499 get_field(s
->fields
, v3
), get_field(s
->fields
, v4
),
2500 cpu_env
, m6
, g_cc_rt
[es
]);
2502 gen_gvec_4_ptr(get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
2503 get_field(s
->fields
, v3
), get_field(s
->fields
, v4
),
2504 cpu_env
, m6
, g_cc
[es
]);
2508 if (extract32(m6
, 2, 1)) {
2509 gen_gvec_4_ool(get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
2510 get_field(s
->fields
, v3
), get_field(s
->fields
, v4
),
2513 gen_gvec_4_ool(get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
2514 get_field(s
->fields
, v3
), get_field(s
->fields
, v4
),
2521 static DisasJumpType
op_vfa(DisasContext
*s
, DisasOps
*o
)
2523 const uint8_t fpf
= get_field(s
->fields
, m4
);
2524 const uint8_t m5
= get_field(s
->fields
, m5
);
2525 const bool se
= extract32(m5
, 3, 1);
2526 gen_helper_gvec_3_ptr
*fn
;
2528 if (fpf
!= FPF_LONG
|| extract32(m5
, 0, 3)) {
2529 gen_program_exception(s
, PGM_SPECIFICATION
);
2530 return DISAS_NORETURN
;
2533 switch (s
->fields
->op2
) {
2535 fn
= se
? gen_helper_gvec_vfa64s
: gen_helper_gvec_vfa64
;
2538 fn
= se
? gen_helper_gvec_vfd64s
: gen_helper_gvec_vfd64
;
2541 fn
= se
? gen_helper_gvec_vfm64s
: gen_helper_gvec_vfm64
;
2544 fn
= se
? gen_helper_gvec_vfs64s
: gen_helper_gvec_vfs64
;
2547 g_assert_not_reached();
2549 gen_gvec_3_ptr(get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
2550 get_field(s
->fields
, v3
), cpu_env
, 0, fn
);
2554 static DisasJumpType
op_wfc(DisasContext
*s
, DisasOps
*o
)
2556 const uint8_t fpf
= get_field(s
->fields
, m3
);
2557 const uint8_t m4
= get_field(s
->fields
, m4
);
2559 if (fpf
!= FPF_LONG
|| m4
) {
2560 gen_program_exception(s
, PGM_SPECIFICATION
);
2561 return DISAS_NORETURN
;
2564 if (s
->fields
->op2
== 0xcb) {
2565 gen_gvec_2_ptr(get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
2566 cpu_env
, 0, gen_helper_gvec_wfc64
);
2568 gen_gvec_2_ptr(get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
2569 cpu_env
, 0, gen_helper_gvec_wfk64
);
2575 static DisasJumpType
op_vfc(DisasContext
*s
, DisasOps
*o
)
2577 const uint8_t fpf
= get_field(s
->fields
, m4
);
2578 const uint8_t m5
= get_field(s
->fields
, m5
);
2579 const uint8_t m6
= get_field(s
->fields
, m6
);
2580 const bool se
= extract32(m5
, 3, 1);
2581 const bool cs
= extract32(m6
, 0, 1);
2582 gen_helper_gvec_3_ptr
*fn
;
2584 if (fpf
!= FPF_LONG
|| extract32(m5
, 0, 3) || extract32(m6
, 1, 3)) {
2585 gen_program_exception(s
, PGM_SPECIFICATION
);
2586 return DISAS_NORETURN
;
2590 switch (s
->fields
->op2
) {
2592 fn
= se
? gen_helper_gvec_vfce64s_cc
: gen_helper_gvec_vfce64_cc
;
2595 fn
= se
? gen_helper_gvec_vfch64s_cc
: gen_helper_gvec_vfch64_cc
;
2598 fn
= se
? gen_helper_gvec_vfche64s_cc
: gen_helper_gvec_vfche64_cc
;
2601 g_assert_not_reached();
2604 switch (s
->fields
->op2
) {
2606 fn
= se
? gen_helper_gvec_vfce64s
: gen_helper_gvec_vfce64
;
2609 fn
= se
? gen_helper_gvec_vfch64s
: gen_helper_gvec_vfch64
;
2612 fn
= se
? gen_helper_gvec_vfche64s
: gen_helper_gvec_vfche64
;
2615 g_assert_not_reached();
2618 gen_gvec_3_ptr(get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
2619 get_field(s
->fields
, v3
), cpu_env
, 0, fn
);
2626 static DisasJumpType
op_vcdg(DisasContext
*s
, DisasOps
*o
)
2628 const uint8_t fpf
= get_field(s
->fields
, m3
);
2629 const uint8_t m4
= get_field(s
->fields
, m4
);
2630 const uint8_t erm
= get_field(s
->fields
, m5
);
2631 const bool se
= extract32(m4
, 3, 1);
2632 gen_helper_gvec_2_ptr
*fn
;
2634 if (fpf
!= FPF_LONG
|| extract32(m4
, 0, 2) || erm
> 7 || erm
== 2) {
2635 gen_program_exception(s
, PGM_SPECIFICATION
);
2636 return DISAS_NORETURN
;
2639 switch (s
->fields
->op2
) {
2641 fn
= se
? gen_helper_gvec_vcdg64s
: gen_helper_gvec_vcdg64
;
2644 fn
= se
? gen_helper_gvec_vcdlg64s
: gen_helper_gvec_vcdlg64
;
2647 fn
= se
? gen_helper_gvec_vcgd64s
: gen_helper_gvec_vcgd64
;
2650 fn
= se
? gen_helper_gvec_vclgd64s
: gen_helper_gvec_vclgd64
;
2653 fn
= se
? gen_helper_gvec_vfi64s
: gen_helper_gvec_vfi64
;
2656 fn
= se
? gen_helper_gvec_vflr64s
: gen_helper_gvec_vflr64
;
2659 g_assert_not_reached();
2661 gen_gvec_2_ptr(get_field(s
->fields
, v1
), get_field(s
->fields
, v2
), cpu_env
,
2662 deposit32(m4
, 4, 4, erm
), fn
);
2666 static DisasJumpType
op_vfll(DisasContext
*s
, DisasOps
*o
)
2668 const uint8_t fpf
= get_field(s
->fields
, m3
);
2669 const uint8_t m4
= get_field(s
->fields
, m4
);
2670 gen_helper_gvec_2_ptr
*fn
= gen_helper_gvec_vfll32
;
2672 if (fpf
!= FPF_SHORT
|| extract32(m4
, 0, 3)) {
2673 gen_program_exception(s
, PGM_SPECIFICATION
);
2674 return DISAS_NORETURN
;
2677 if (extract32(m4
, 3, 1)) {
2678 fn
= gen_helper_gvec_vfll32s
;
2680 gen_gvec_2_ptr(get_field(s
->fields
, v1
), get_field(s
->fields
, v2
), cpu_env
,
2685 static DisasJumpType
op_vfma(DisasContext
*s
, DisasOps
*o
)
2687 const uint8_t m5
= get_field(s
->fields
, m5
);
2688 const uint8_t fpf
= get_field(s
->fields
, m6
);
2689 const bool se
= extract32(m5
, 3, 1);
2690 gen_helper_gvec_4_ptr
*fn
;
2692 if (fpf
!= FPF_LONG
|| extract32(m5
, 0, 3)) {
2693 gen_program_exception(s
, PGM_SPECIFICATION
);
2694 return DISAS_NORETURN
;
2697 if (s
->fields
->op2
== 0x8f) {
2698 fn
= se
? gen_helper_gvec_vfma64s
: gen_helper_gvec_vfma64
;
2700 fn
= se
? gen_helper_gvec_vfms64s
: gen_helper_gvec_vfms64
;
2702 gen_gvec_4_ptr(get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
2703 get_field(s
->fields
, v3
), get_field(s
->fields
, v4
), cpu_env
,
2708 static DisasJumpType
op_vfpso(DisasContext
*s
, DisasOps
*o
)
2710 const uint8_t v1
= get_field(s
->fields
, v1
);
2711 const uint8_t v2
= get_field(s
->fields
, v2
);
2712 const uint8_t fpf
= get_field(s
->fields
, m3
);
2713 const uint8_t m4
= get_field(s
->fields
, m4
);
2714 const uint8_t m5
= get_field(s
->fields
, m5
);
2717 if (fpf
!= FPF_LONG
|| extract32(m4
, 0, 3) || m5
> 2) {
2718 gen_program_exception(s
, PGM_SPECIFICATION
);
2719 return DISAS_NORETURN
;
2722 if (extract32(m4
, 3, 1)) {
2723 tmp
= tcg_temp_new_i64();
2724 read_vec_element_i64(tmp
, v2
, 0, ES_64
);
2727 /* sign bit is inverted (complement) */
2728 tcg_gen_xori_i64(tmp
, tmp
, 1ull << 63);
2731 /* sign bit is set to one (negative) */
2732 tcg_gen_ori_i64(tmp
, tmp
, 1ull << 63);
2735 /* sign bit is set to zero (positive) */
2736 tcg_gen_andi_i64(tmp
, tmp
, (1ull << 63) - 1);
2739 write_vec_element_i64(tmp
, v1
, 0, ES_64
);
2740 tcg_temp_free_i64(tmp
);
2744 /* sign bit is inverted (complement) */
2745 gen_gvec_fn_2i(xori
, ES_64
, v1
, v2
, 1ull << 63);
2748 /* sign bit is set to one (negative) */
2749 gen_gvec_fn_2i(ori
, ES_64
, v1
, v2
, 1ull << 63);
2752 /* sign bit is set to zero (positive) */
2753 gen_gvec_fn_2i(andi
, ES_64
, v1
, v2
, (1ull << 63) - 1);
2760 static DisasJumpType
op_vfsq(DisasContext
*s
, DisasOps
*o
)
2762 const uint8_t fpf
= get_field(s
->fields
, m3
);
2763 const uint8_t m4
= get_field(s
->fields
, m4
);
2764 gen_helper_gvec_2_ptr
*fn
= gen_helper_gvec_vfsq64
;
2766 if (fpf
!= FPF_LONG
|| extract32(m4
, 0, 3)) {
2767 gen_program_exception(s
, PGM_SPECIFICATION
);
2768 return DISAS_NORETURN
;
2771 if (extract32(m4
, 3, 1)) {
2772 fn
= gen_helper_gvec_vfsq64s
;
2774 gen_gvec_2_ptr(get_field(s
->fields
, v1
), get_field(s
->fields
, v2
), cpu_env
,
2779 static DisasJumpType
op_vftci(DisasContext
*s
, DisasOps
*o
)
2781 const uint16_t i3
= get_field(s
->fields
, i3
);
2782 const uint8_t fpf
= get_field(s
->fields
, m4
);
2783 const uint8_t m5
= get_field(s
->fields
, m5
);
2784 gen_helper_gvec_2_ptr
*fn
= gen_helper_gvec_vftci64
;
2786 if (fpf
!= FPF_LONG
|| extract32(m5
, 0, 3)) {
2787 gen_program_exception(s
, PGM_SPECIFICATION
);
2788 return DISAS_NORETURN
;
2791 if (extract32(m5
, 3, 1)) {
2792 fn
= gen_helper_gvec_vftci64s
;
2794 gen_gvec_2_ptr(get_field(s
->fields
, v1
), get_field(s
->fields
, v2
), cpu_env
,