2 * QEMU TCG support -- s390x vector instruction translation functions
4 * Copyright (C) 2019 Red Hat Inc
7 * David Hildenbrand <david@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
14 * For most instructions that use the same element size for reads and
15 * writes, we can use real gvec vector expansion, which potantially uses
16 * real host vector instructions. As they only work up to 64 bit elements,
17 * 128 bit elements (vector is a single element) have to be handled
18 * differently. Operations that are too complicated to encode via TCG ops
19 * are handled via gvec ool (out-of-line) handlers.
21 * As soon as instructions use different element sizes for reads and writes
22 * or access elements "out of their element scope" we expand them manually
23 * in fancy loops, as gvec expansion does not deal with actual element
24 * numbers and does also not support access to other elements.
27 * As we only have i32/i64, such elements have to be loaded into two
28 * i64 values and can then be processed e.g. by tcg_gen_add2_i64.
31 * On s390x, the operand size (oprsz) and the maximum size (maxsz) are
32 * always 16 (128 bit). What gvec code calls "vece", s390x calls "es",
33 * a.k.a. "element size". These values nicely map to MO_8 ... MO_64. Only
34 * 128 bit element size has to be treated in a special way (MO_64 + 1).
35 * We will use ES_* instead of MO_* for this reason in this file.
38 * As gvec ool-helpers can currently not return values (besides via
39 * pointers like vectors or cpu_env), whenever we have to set the CC and
40 * can't conclude the value from the result vector, we will directly
41 * set it in "env->cc_op" and mark it as static via set_cc_static()".
42 * Whenever this is done, the helper writes globals (cc_op).
45 #define NUM_VEC_ELEMENT_BYTES(es) (1 << (es))
46 #define NUM_VEC_ELEMENTS(es) (16 / NUM_VEC_ELEMENT_BYTES(es))
47 #define NUM_VEC_ELEMENT_BITS(es) (NUM_VEC_ELEMENT_BYTES(es) * BITS_PER_BYTE)
55 static inline bool valid_vec_element(uint8_t enr
, TCGMemOp es
)
57 return !(enr
& ~(NUM_VEC_ELEMENTS(es
) - 1));
60 static void read_vec_element_i64(TCGv_i64 dst
, uint8_t reg
, uint8_t enr
,
63 const int offs
= vec_reg_offset(reg
, enr
, memop
& MO_SIZE
);
67 tcg_gen_ld8u_i64(dst
, cpu_env
, offs
);
70 tcg_gen_ld16u_i64(dst
, cpu_env
, offs
);
73 tcg_gen_ld32u_i64(dst
, cpu_env
, offs
);
76 tcg_gen_ld8s_i64(dst
, cpu_env
, offs
);
79 tcg_gen_ld16s_i64(dst
, cpu_env
, offs
);
82 tcg_gen_ld32s_i64(dst
, cpu_env
, offs
);
86 tcg_gen_ld_i64(dst
, cpu_env
, offs
);
89 g_assert_not_reached();
93 static void read_vec_element_i32(TCGv_i32 dst
, uint8_t reg
, uint8_t enr
,
96 const int offs
= vec_reg_offset(reg
, enr
, memop
& MO_SIZE
);
100 tcg_gen_ld8u_i32(dst
, cpu_env
, offs
);
103 tcg_gen_ld16u_i32(dst
, cpu_env
, offs
);
106 tcg_gen_ld8s_i32(dst
, cpu_env
, offs
);
108 case ES_16
| MO_SIGN
:
109 tcg_gen_ld16s_i32(dst
, cpu_env
, offs
);
112 case ES_32
| MO_SIGN
:
113 tcg_gen_ld_i32(dst
, cpu_env
, offs
);
116 g_assert_not_reached();
120 static void write_vec_element_i64(TCGv_i64 src
, int reg
, uint8_t enr
,
123 const int offs
= vec_reg_offset(reg
, enr
, memop
& MO_SIZE
);
127 tcg_gen_st8_i64(src
, cpu_env
, offs
);
130 tcg_gen_st16_i64(src
, cpu_env
, offs
);
133 tcg_gen_st32_i64(src
, cpu_env
, offs
);
136 tcg_gen_st_i64(src
, cpu_env
, offs
);
139 g_assert_not_reached();
143 static void write_vec_element_i32(TCGv_i32 src
, int reg
, uint8_t enr
,
146 const int offs
= vec_reg_offset(reg
, enr
, memop
& MO_SIZE
);
150 tcg_gen_st8_i32(src
, cpu_env
, offs
);
153 tcg_gen_st16_i32(src
, cpu_env
, offs
);
156 tcg_gen_st_i32(src
, cpu_env
, offs
);
159 g_assert_not_reached();
163 static void get_vec_element_ptr_i64(TCGv_ptr ptr
, uint8_t reg
, TCGv_i64 enr
,
166 TCGv_i64 tmp
= tcg_temp_new_i64();
168 /* mask off invalid parts from the element nr */
169 tcg_gen_andi_i64(tmp
, enr
, NUM_VEC_ELEMENTS(es
) - 1);
171 /* convert it to an element offset relative to cpu_env (vec_reg_offset() */
172 tcg_gen_shli_i64(tmp
, tmp
, es
);
173 #ifndef HOST_WORDS_BIGENDIAN
174 tcg_gen_xori_i64(tmp
, tmp
, 8 - NUM_VEC_ELEMENT_BYTES(es
));
176 tcg_gen_addi_i64(tmp
, tmp
, vec_full_reg_offset(reg
));
178 /* generate the final ptr by adding cpu_env */
179 tcg_gen_trunc_i64_ptr(ptr
, tmp
);
180 tcg_gen_add_ptr(ptr
, ptr
, cpu_env
);
182 tcg_temp_free_i64(tmp
);
185 #define gen_gvec_2(v1, v2, gen) \
186 tcg_gen_gvec_2(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
188 #define gen_gvec_2s(v1, v2, c, gen) \
189 tcg_gen_gvec_2s(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
191 #define gen_gvec_2i_ool(v1, v2, c, data, fn) \
192 tcg_gen_gvec_2i_ool(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
194 #define gen_gvec_3(v1, v2, v3, gen) \
195 tcg_gen_gvec_3(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
196 vec_full_reg_offset(v3), 16, 16, gen)
197 #define gen_gvec_3_ool(v1, v2, v3, data, fn) \
198 tcg_gen_gvec_3_ool(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
199 vec_full_reg_offset(v3), 16, 16, data, fn)
200 #define gen_gvec_3_ptr(v1, v2, v3, ptr, data, fn) \
201 tcg_gen_gvec_3_ptr(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
202 vec_full_reg_offset(v3), ptr, 16, 16, data, fn)
203 #define gen_gvec_3i(v1, v2, v3, c, gen) \
204 tcg_gen_gvec_3i(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
205 vec_full_reg_offset(v3), c, 16, 16, gen)
206 #define gen_gvec_4(v1, v2, v3, v4, gen) \
207 tcg_gen_gvec_4(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
208 vec_full_reg_offset(v3), vec_full_reg_offset(v4), \
210 #define gen_gvec_4_ool(v1, v2, v3, v4, data, fn) \
211 tcg_gen_gvec_4_ool(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
212 vec_full_reg_offset(v3), vec_full_reg_offset(v4), \
214 #define gen_gvec_dup_i64(es, v1, c) \
215 tcg_gen_gvec_dup_i64(es, vec_full_reg_offset(v1), 16, 16, c)
216 #define gen_gvec_mov(v1, v2) \
217 tcg_gen_gvec_mov(0, vec_full_reg_offset(v1), vec_full_reg_offset(v2), 16, \
219 #define gen_gvec_dup64i(v1, c) \
220 tcg_gen_gvec_dup64i(vec_full_reg_offset(v1), 16, 16, c)
221 #define gen_gvec_fn_2(fn, es, v1, v2) \
222 tcg_gen_gvec_##fn(es, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
224 #define gen_gvec_fn_2i(fn, es, v1, v2, c) \
225 tcg_gen_gvec_##fn(es, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
227 #define gen_gvec_fn_2s(fn, es, v1, v2, s) \
228 tcg_gen_gvec_##fn(es, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
230 #define gen_gvec_fn_3(fn, es, v1, v2, v3) \
231 tcg_gen_gvec_##fn(es, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
232 vec_full_reg_offset(v3), 16, 16)
235 * Helper to carry out a 128 bit vector computation using 2 i64 values per
238 typedef void (*gen_gvec128_3_i64_fn
)(TCGv_i64 dl
, TCGv_i64 dh
, TCGv_i64 al
,
239 TCGv_i64 ah
, TCGv_i64 bl
, TCGv_i64 bh
);
240 static void gen_gvec128_3_i64(gen_gvec128_3_i64_fn fn
, uint8_t d
, uint8_t a
,
243 TCGv_i64 dh
= tcg_temp_new_i64();
244 TCGv_i64 dl
= tcg_temp_new_i64();
245 TCGv_i64 ah
= tcg_temp_new_i64();
246 TCGv_i64 al
= tcg_temp_new_i64();
247 TCGv_i64 bh
= tcg_temp_new_i64();
248 TCGv_i64 bl
= tcg_temp_new_i64();
250 read_vec_element_i64(ah
, a
, 0, ES_64
);
251 read_vec_element_i64(al
, a
, 1, ES_64
);
252 read_vec_element_i64(bh
, b
, 0, ES_64
);
253 read_vec_element_i64(bl
, b
, 1, ES_64
);
254 fn(dl
, dh
, al
, ah
, bl
, bh
);
255 write_vec_element_i64(dh
, d
, 0, ES_64
);
256 write_vec_element_i64(dl
, d
, 1, ES_64
);
258 tcg_temp_free_i64(dh
);
259 tcg_temp_free_i64(dl
);
260 tcg_temp_free_i64(ah
);
261 tcg_temp_free_i64(al
);
262 tcg_temp_free_i64(bh
);
263 tcg_temp_free_i64(bl
);
266 typedef void (*gen_gvec128_4_i64_fn
)(TCGv_i64 dl
, TCGv_i64 dh
, TCGv_i64 al
,
267 TCGv_i64 ah
, TCGv_i64 bl
, TCGv_i64 bh
,
268 TCGv_i64 cl
, TCGv_i64 ch
);
269 static void gen_gvec128_4_i64(gen_gvec128_4_i64_fn fn
, uint8_t d
, uint8_t a
,
270 uint8_t b
, uint8_t c
)
272 TCGv_i64 dh
= tcg_temp_new_i64();
273 TCGv_i64 dl
= tcg_temp_new_i64();
274 TCGv_i64 ah
= tcg_temp_new_i64();
275 TCGv_i64 al
= tcg_temp_new_i64();
276 TCGv_i64 bh
= tcg_temp_new_i64();
277 TCGv_i64 bl
= tcg_temp_new_i64();
278 TCGv_i64 ch
= tcg_temp_new_i64();
279 TCGv_i64 cl
= tcg_temp_new_i64();
281 read_vec_element_i64(ah
, a
, 0, ES_64
);
282 read_vec_element_i64(al
, a
, 1, ES_64
);
283 read_vec_element_i64(bh
, b
, 0, ES_64
);
284 read_vec_element_i64(bl
, b
, 1, ES_64
);
285 read_vec_element_i64(ch
, c
, 0, ES_64
);
286 read_vec_element_i64(cl
, c
, 1, ES_64
);
287 fn(dl
, dh
, al
, ah
, bl
, bh
, cl
, ch
);
288 write_vec_element_i64(dh
, d
, 0, ES_64
);
289 write_vec_element_i64(dl
, d
, 1, ES_64
);
291 tcg_temp_free_i64(dh
);
292 tcg_temp_free_i64(dl
);
293 tcg_temp_free_i64(ah
);
294 tcg_temp_free_i64(al
);
295 tcg_temp_free_i64(bh
);
296 tcg_temp_free_i64(bl
);
297 tcg_temp_free_i64(ch
);
298 tcg_temp_free_i64(cl
);
301 static void gen_gvec_dupi(uint8_t es
, uint8_t reg
, uint64_t c
)
305 tcg_gen_gvec_dup8i(vec_full_reg_offset(reg
), 16, 16, c
);
308 tcg_gen_gvec_dup16i(vec_full_reg_offset(reg
), 16, 16, c
);
311 tcg_gen_gvec_dup32i(vec_full_reg_offset(reg
), 16, 16, c
);
314 gen_gvec_dup64i(reg
, c
);
317 g_assert_not_reached();
321 static void zero_vec(uint8_t reg
)
323 tcg_gen_gvec_dup8i(vec_full_reg_offset(reg
), 16, 16, 0);
326 static void gen_addi2_i64(TCGv_i64 dl
, TCGv_i64 dh
, TCGv_i64 al
, TCGv_i64 ah
,
329 TCGv_i64 bl
= tcg_const_i64(b
);
330 TCGv_i64 bh
= tcg_const_i64(0);
332 tcg_gen_add2_i64(dl
, dh
, al
, ah
, bl
, bh
);
333 tcg_temp_free_i64(bl
);
334 tcg_temp_free_i64(bh
);
337 static DisasJumpType
op_vge(DisasContext
*s
, DisasOps
*o
)
339 const uint8_t es
= s
->insn
->data
;
340 const uint8_t enr
= get_field(s
->fields
, m3
);
343 if (!valid_vec_element(enr
, es
)) {
344 gen_program_exception(s
, PGM_SPECIFICATION
);
345 return DISAS_NORETURN
;
348 tmp
= tcg_temp_new_i64();
349 read_vec_element_i64(tmp
, get_field(s
->fields
, v2
), enr
, es
);
350 tcg_gen_add_i64(o
->addr1
, o
->addr1
, tmp
);
351 gen_addi_and_wrap_i64(s
, o
->addr1
, o
->addr1
, 0);
353 tcg_gen_qemu_ld_i64(tmp
, o
->addr1
, get_mem_index(s
), MO_TE
| es
);
354 write_vec_element_i64(tmp
, get_field(s
->fields
, v1
), enr
, es
);
355 tcg_temp_free_i64(tmp
);
359 static uint64_t generate_byte_mask(uint8_t mask
)
364 for (i
= 0; i
< 8; i
++) {
365 if ((mask
>> i
) & 1) {
366 r
|= 0xffull
<< (i
* 8);
372 static DisasJumpType
op_vgbm(DisasContext
*s
, DisasOps
*o
)
374 const uint16_t i2
= get_field(s
->fields
, i2
);
376 if (i2
== (i2
& 0xff) * 0x0101) {
378 * Masks for both 64 bit elements of the vector are the same.
379 * Trust tcg to produce a good constant loading.
381 gen_gvec_dup64i(get_field(s
->fields
, v1
),
382 generate_byte_mask(i2
& 0xff));
384 TCGv_i64 t
= tcg_temp_new_i64();
386 tcg_gen_movi_i64(t
, generate_byte_mask(i2
>> 8));
387 write_vec_element_i64(t
, get_field(s
->fields
, v1
), 0, ES_64
);
388 tcg_gen_movi_i64(t
, generate_byte_mask(i2
));
389 write_vec_element_i64(t
, get_field(s
->fields
, v1
), 1, ES_64
);
390 tcg_temp_free_i64(t
);
395 static DisasJumpType
op_vgm(DisasContext
*s
, DisasOps
*o
)
397 const uint8_t es
= get_field(s
->fields
, m4
);
398 const uint8_t bits
= NUM_VEC_ELEMENT_BITS(es
);
399 const uint8_t i2
= get_field(s
->fields
, i2
) & (bits
- 1);
400 const uint8_t i3
= get_field(s
->fields
, i3
) & (bits
- 1);
405 gen_program_exception(s
, PGM_SPECIFICATION
);
406 return DISAS_NORETURN
;
409 /* generate the mask - take care of wrapping */
410 for (i
= i2
; ; i
= (i
+ 1) % bits
) {
411 mask
|= 1ull << (bits
- i
- 1);
417 gen_gvec_dupi(es
, get_field(s
->fields
, v1
), mask
);
421 static DisasJumpType
op_vl(DisasContext
*s
, DisasOps
*o
)
423 TCGv_i64 t0
= tcg_temp_new_i64();
424 TCGv_i64 t1
= tcg_temp_new_i64();
426 tcg_gen_qemu_ld_i64(t0
, o
->addr1
, get_mem_index(s
), MO_TEQ
);
427 gen_addi_and_wrap_i64(s
, o
->addr1
, o
->addr1
, 8);
428 tcg_gen_qemu_ld_i64(t1
, o
->addr1
, get_mem_index(s
), MO_TEQ
);
429 write_vec_element_i64(t0
, get_field(s
->fields
, v1
), 0, ES_64
);
430 write_vec_element_i64(t1
, get_field(s
->fields
, v1
), 1, ES_64
);
436 static DisasJumpType
op_vlr(DisasContext
*s
, DisasOps
*o
)
438 gen_gvec_mov(get_field(s
->fields
, v1
), get_field(s
->fields
, v2
));
442 static DisasJumpType
op_vlrep(DisasContext
*s
, DisasOps
*o
)
444 const uint8_t es
= get_field(s
->fields
, m3
);
448 gen_program_exception(s
, PGM_SPECIFICATION
);
449 return DISAS_NORETURN
;
452 tmp
= tcg_temp_new_i64();
453 tcg_gen_qemu_ld_i64(tmp
, o
->addr1
, get_mem_index(s
), MO_TE
| es
);
454 gen_gvec_dup_i64(es
, get_field(s
->fields
, v1
), tmp
);
455 tcg_temp_free_i64(tmp
);
459 static DisasJumpType
op_vle(DisasContext
*s
, DisasOps
*o
)
461 const uint8_t es
= s
->insn
->data
;
462 const uint8_t enr
= get_field(s
->fields
, m3
);
465 if (!valid_vec_element(enr
, es
)) {
466 gen_program_exception(s
, PGM_SPECIFICATION
);
467 return DISAS_NORETURN
;
470 tmp
= tcg_temp_new_i64();
471 tcg_gen_qemu_ld_i64(tmp
, o
->addr1
, get_mem_index(s
), MO_TE
| es
);
472 write_vec_element_i64(tmp
, get_field(s
->fields
, v1
), enr
, es
);
473 tcg_temp_free_i64(tmp
);
477 static DisasJumpType
op_vlei(DisasContext
*s
, DisasOps
*o
)
479 const uint8_t es
= s
->insn
->data
;
480 const uint8_t enr
= get_field(s
->fields
, m3
);
483 if (!valid_vec_element(enr
, es
)) {
484 gen_program_exception(s
, PGM_SPECIFICATION
);
485 return DISAS_NORETURN
;
488 tmp
= tcg_const_i64((int16_t)get_field(s
->fields
, i2
));
489 write_vec_element_i64(tmp
, get_field(s
->fields
, v1
), enr
, es
);
490 tcg_temp_free_i64(tmp
);
494 static DisasJumpType
op_vlgv(DisasContext
*s
, DisasOps
*o
)
496 const uint8_t es
= get_field(s
->fields
, m4
);
500 gen_program_exception(s
, PGM_SPECIFICATION
);
501 return DISAS_NORETURN
;
504 /* fast path if we don't need the register content */
505 if (!get_field(s
->fields
, b2
)) {
506 uint8_t enr
= get_field(s
->fields
, d2
) & (NUM_VEC_ELEMENTS(es
) - 1);
508 read_vec_element_i64(o
->out
, get_field(s
->fields
, v3
), enr
, es
);
512 ptr
= tcg_temp_new_ptr();
513 get_vec_element_ptr_i64(ptr
, get_field(s
->fields
, v3
), o
->addr1
, es
);
516 tcg_gen_ld8u_i64(o
->out
, ptr
, 0);
519 tcg_gen_ld16u_i64(o
->out
, ptr
, 0);
522 tcg_gen_ld32u_i64(o
->out
, ptr
, 0);
525 tcg_gen_ld_i64(o
->out
, ptr
, 0);
528 g_assert_not_reached();
530 tcg_temp_free_ptr(ptr
);
535 static DisasJumpType
op_vllez(DisasContext
*s
, DisasOps
*o
)
537 uint8_t es
= get_field(s
->fields
, m3
);
542 /* rightmost sub-element of leftmost doubleword */
555 /* leftmost sub-element of leftmost doubleword */
557 if (s390_has_feat(S390_FEAT_VECTOR_ENH
)) {
564 gen_program_exception(s
, PGM_SPECIFICATION
);
565 return DISAS_NORETURN
;
568 t
= tcg_temp_new_i64();
569 tcg_gen_qemu_ld_i64(t
, o
->addr1
, get_mem_index(s
), MO_TE
| es
);
570 zero_vec(get_field(s
->fields
, v1
));
571 write_vec_element_i64(t
, get_field(s
->fields
, v1
), enr
, es
);
572 tcg_temp_free_i64(t
);
576 static DisasJumpType
op_vlm(DisasContext
*s
, DisasOps
*o
)
578 const uint8_t v3
= get_field(s
->fields
, v3
);
579 uint8_t v1
= get_field(s
->fields
, v1
);
582 if (v3
< v1
|| (v3
- v1
+ 1) > 16) {
583 gen_program_exception(s
, PGM_SPECIFICATION
);
584 return DISAS_NORETURN
;
588 * Check for possible access exceptions by trying to load the last
589 * element. The first element will be checked first next.
591 t0
= tcg_temp_new_i64();
592 t1
= tcg_temp_new_i64();
593 gen_addi_and_wrap_i64(s
, t0
, o
->addr1
, (v3
- v1
) * 16 + 8);
594 tcg_gen_qemu_ld_i64(t0
, t0
, get_mem_index(s
), MO_TEQ
);
597 tcg_gen_qemu_ld_i64(t1
, o
->addr1
, get_mem_index(s
), MO_TEQ
);
598 write_vec_element_i64(t1
, v1
, 0, ES_64
);
602 gen_addi_and_wrap_i64(s
, o
->addr1
, o
->addr1
, 8);
603 tcg_gen_qemu_ld_i64(t1
, o
->addr1
, get_mem_index(s
), MO_TEQ
);
604 write_vec_element_i64(t1
, v1
, 1, ES_64
);
605 gen_addi_and_wrap_i64(s
, o
->addr1
, o
->addr1
, 8);
608 /* Store the last element, loaded first */
609 write_vec_element_i64(t0
, v1
, 1, ES_64
);
611 tcg_temp_free_i64(t0
);
612 tcg_temp_free_i64(t1
);
616 static DisasJumpType
op_vlbb(DisasContext
*s
, DisasOps
*o
)
618 const int64_t block_size
= (1ull << (get_field(s
->fields
, m3
) + 6));
619 const int v1_offs
= vec_full_reg_offset(get_field(s
->fields
, v1
));
623 if (get_field(s
->fields
, m3
) > 6) {
624 gen_program_exception(s
, PGM_SPECIFICATION
);
625 return DISAS_NORETURN
;
628 bytes
= tcg_temp_new_i64();
629 a0
= tcg_temp_new_ptr();
630 /* calculate the number of bytes until the next block boundary */
631 tcg_gen_ori_i64(bytes
, o
->addr1
, -block_size
);
632 tcg_gen_neg_i64(bytes
, bytes
);
634 tcg_gen_addi_ptr(a0
, cpu_env
, v1_offs
);
635 gen_helper_vll(cpu_env
, a0
, o
->addr1
, bytes
);
636 tcg_temp_free_i64(bytes
);
637 tcg_temp_free_ptr(a0
);
641 static DisasJumpType
op_vlvg(DisasContext
*s
, DisasOps
*o
)
643 const uint8_t es
= get_field(s
->fields
, m4
);
647 gen_program_exception(s
, PGM_SPECIFICATION
);
648 return DISAS_NORETURN
;
651 /* fast path if we don't need the register content */
652 if (!get_field(s
->fields
, b2
)) {
653 uint8_t enr
= get_field(s
->fields
, d2
) & (NUM_VEC_ELEMENTS(es
) - 1);
655 write_vec_element_i64(o
->in2
, get_field(s
->fields
, v1
), enr
, es
);
659 ptr
= tcg_temp_new_ptr();
660 get_vec_element_ptr_i64(ptr
, get_field(s
->fields
, v1
), o
->addr1
, es
);
663 tcg_gen_st8_i64(o
->in2
, ptr
, 0);
666 tcg_gen_st16_i64(o
->in2
, ptr
, 0);
669 tcg_gen_st32_i64(o
->in2
, ptr
, 0);
672 tcg_gen_st_i64(o
->in2
, ptr
, 0);
675 g_assert_not_reached();
677 tcg_temp_free_ptr(ptr
);
682 static DisasJumpType
op_vlvgp(DisasContext
*s
, DisasOps
*o
)
684 write_vec_element_i64(o
->in1
, get_field(s
->fields
, v1
), 0, ES_64
);
685 write_vec_element_i64(o
->in2
, get_field(s
->fields
, v1
), 1, ES_64
);
689 static DisasJumpType
op_vll(DisasContext
*s
, DisasOps
*o
)
691 const int v1_offs
= vec_full_reg_offset(get_field(s
->fields
, v1
));
692 TCGv_ptr a0
= tcg_temp_new_ptr();
694 /* convert highest index into an actual length */
695 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
696 tcg_gen_addi_ptr(a0
, cpu_env
, v1_offs
);
697 gen_helper_vll(cpu_env
, a0
, o
->addr1
, o
->in2
);
698 tcg_temp_free_ptr(a0
);
702 static DisasJumpType
op_vmr(DisasContext
*s
, DisasOps
*o
)
704 const uint8_t v1
= get_field(s
->fields
, v1
);
705 const uint8_t v2
= get_field(s
->fields
, v2
);
706 const uint8_t v3
= get_field(s
->fields
, v3
);
707 const uint8_t es
= get_field(s
->fields
, m4
);
708 int dst_idx
, src_idx
;
712 gen_program_exception(s
, PGM_SPECIFICATION
);
713 return DISAS_NORETURN
;
716 tmp
= tcg_temp_new_i64();
717 if (s
->fields
->op2
== 0x61) {
718 /* iterate backwards to avoid overwriting data we might need later */
719 for (dst_idx
= NUM_VEC_ELEMENTS(es
) - 1; dst_idx
>= 0; dst_idx
--) {
720 src_idx
= dst_idx
/ 2;
721 if (dst_idx
% 2 == 0) {
722 read_vec_element_i64(tmp
, v2
, src_idx
, es
);
724 read_vec_element_i64(tmp
, v3
, src_idx
, es
);
726 write_vec_element_i64(tmp
, v1
, dst_idx
, es
);
729 /* iterate forward to avoid overwriting data we might need later */
730 for (dst_idx
= 0; dst_idx
< NUM_VEC_ELEMENTS(es
); dst_idx
++) {
731 src_idx
= (dst_idx
+ NUM_VEC_ELEMENTS(es
)) / 2;
732 if (dst_idx
% 2 == 0) {
733 read_vec_element_i64(tmp
, v2
, src_idx
, es
);
735 read_vec_element_i64(tmp
, v3
, src_idx
, es
);
737 write_vec_element_i64(tmp
, v1
, dst_idx
, es
);
740 tcg_temp_free_i64(tmp
);
744 static DisasJumpType
op_vpk(DisasContext
*s
, DisasOps
*o
)
746 const uint8_t v1
= get_field(s
->fields
, v1
);
747 const uint8_t v2
= get_field(s
->fields
, v2
);
748 const uint8_t v3
= get_field(s
->fields
, v3
);
749 const uint8_t es
= get_field(s
->fields
, m4
);
750 static gen_helper_gvec_3
* const vpk
[3] = {
751 gen_helper_gvec_vpk16
,
752 gen_helper_gvec_vpk32
,
753 gen_helper_gvec_vpk64
,
755 static gen_helper_gvec_3
* const vpks
[3] = {
756 gen_helper_gvec_vpks16
,
757 gen_helper_gvec_vpks32
,
758 gen_helper_gvec_vpks64
,
760 static gen_helper_gvec_3_ptr
* const vpks_cc
[3] = {
761 gen_helper_gvec_vpks_cc16
,
762 gen_helper_gvec_vpks_cc32
,
763 gen_helper_gvec_vpks_cc64
,
765 static gen_helper_gvec_3
* const vpkls
[3] = {
766 gen_helper_gvec_vpkls16
,
767 gen_helper_gvec_vpkls32
,
768 gen_helper_gvec_vpkls64
,
770 static gen_helper_gvec_3_ptr
* const vpkls_cc
[3] = {
771 gen_helper_gvec_vpkls_cc16
,
772 gen_helper_gvec_vpkls_cc32
,
773 gen_helper_gvec_vpkls_cc64
,
776 if (es
== ES_8
|| es
> ES_64
) {
777 gen_program_exception(s
, PGM_SPECIFICATION
);
778 return DISAS_NORETURN
;
781 switch (s
->fields
->op2
) {
783 if (get_field(s
->fields
, m5
) & 0x1) {
784 gen_gvec_3_ptr(v1
, v2
, v3
, cpu_env
, 0, vpks_cc
[es
- 1]);
787 gen_gvec_3_ool(v1
, v2
, v3
, 0, vpks
[es
- 1]);
791 if (get_field(s
->fields
, m5
) & 0x1) {
792 gen_gvec_3_ptr(v1
, v2
, v3
, cpu_env
, 0, vpkls_cc
[es
- 1]);
795 gen_gvec_3_ool(v1
, v2
, v3
, 0, vpkls
[es
- 1]);
799 /* If sources and destination dont't overlap -> fast path */
800 if (v1
!= v2
&& v1
!= v3
) {
801 const uint8_t src_es
= get_field(s
->fields
, m4
);
802 const uint8_t dst_es
= src_es
- 1;
803 TCGv_i64 tmp
= tcg_temp_new_i64();
804 int dst_idx
, src_idx
;
806 for (dst_idx
= 0; dst_idx
< NUM_VEC_ELEMENTS(dst_es
); dst_idx
++) {
808 if (src_idx
< NUM_VEC_ELEMENTS(src_es
)) {
809 read_vec_element_i64(tmp
, v2
, src_idx
, src_es
);
811 src_idx
-= NUM_VEC_ELEMENTS(src_es
);
812 read_vec_element_i64(tmp
, v3
, src_idx
, src_es
);
814 write_vec_element_i64(tmp
, v1
, dst_idx
, dst_es
);
816 tcg_temp_free_i64(tmp
);
818 gen_gvec_3_ool(v1
, v2
, v3
, 0, vpk
[es
- 1]);
822 g_assert_not_reached();
827 static DisasJumpType
op_vperm(DisasContext
*s
, DisasOps
*o
)
829 gen_gvec_4_ool(get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
830 get_field(s
->fields
, v3
), get_field(s
->fields
, v4
),
831 0, gen_helper_gvec_vperm
);
835 static DisasJumpType
op_vpdi(DisasContext
*s
, DisasOps
*o
)
837 const uint8_t i2
= extract32(get_field(s
->fields
, m4
), 2, 1);
838 const uint8_t i3
= extract32(get_field(s
->fields
, m4
), 0, 1);
839 TCGv_i64 t0
= tcg_temp_new_i64();
840 TCGv_i64 t1
= tcg_temp_new_i64();
842 read_vec_element_i64(t0
, get_field(s
->fields
, v2
), i2
, ES_64
);
843 read_vec_element_i64(t1
, get_field(s
->fields
, v3
), i3
, ES_64
);
844 write_vec_element_i64(t0
, get_field(s
->fields
, v1
), 0, ES_64
);
845 write_vec_element_i64(t1
, get_field(s
->fields
, v1
), 1, ES_64
);
846 tcg_temp_free_i64(t0
);
847 tcg_temp_free_i64(t1
);
851 static DisasJumpType
op_vrep(DisasContext
*s
, DisasOps
*o
)
853 const uint8_t enr
= get_field(s
->fields
, i2
);
854 const uint8_t es
= get_field(s
->fields
, m4
);
856 if (es
> ES_64
|| !valid_vec_element(enr
, es
)) {
857 gen_program_exception(s
, PGM_SPECIFICATION
);
858 return DISAS_NORETURN
;
861 tcg_gen_gvec_dup_mem(es
, vec_full_reg_offset(get_field(s
->fields
, v1
)),
862 vec_reg_offset(get_field(s
->fields
, v3
), enr
, es
),
867 static DisasJumpType
op_vrepi(DisasContext
*s
, DisasOps
*o
)
869 const int64_t data
= (int16_t)get_field(s
->fields
, i2
);
870 const uint8_t es
= get_field(s
->fields
, m3
);
873 gen_program_exception(s
, PGM_SPECIFICATION
);
874 return DISAS_NORETURN
;
877 gen_gvec_dupi(es
, get_field(s
->fields
, v1
), data
);
881 static DisasJumpType
op_vsce(DisasContext
*s
, DisasOps
*o
)
883 const uint8_t es
= s
->insn
->data
;
884 const uint8_t enr
= get_field(s
->fields
, m3
);
887 if (!valid_vec_element(enr
, es
)) {
888 gen_program_exception(s
, PGM_SPECIFICATION
);
889 return DISAS_NORETURN
;
892 tmp
= tcg_temp_new_i64();
893 read_vec_element_i64(tmp
, get_field(s
->fields
, v2
), enr
, es
);
894 tcg_gen_add_i64(o
->addr1
, o
->addr1
, tmp
);
895 gen_addi_and_wrap_i64(s
, o
->addr1
, o
->addr1
, 0);
897 read_vec_element_i64(tmp
, get_field(s
->fields
, v1
), enr
, es
);
898 tcg_gen_qemu_st_i64(tmp
, o
->addr1
, get_mem_index(s
), MO_TE
| es
);
899 tcg_temp_free_i64(tmp
);
903 static void gen_sel_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
, TCGv_i64 c
)
905 TCGv_i64 t
= tcg_temp_new_i64();
907 /* bit in c not set -> copy bit from b */
908 tcg_gen_andc_i64(t
, b
, c
);
909 /* bit in c set -> copy bit from a */
910 tcg_gen_and_i64(d
, a
, c
);
911 /* merge the results */
912 tcg_gen_or_i64(d
, d
, t
);
913 tcg_temp_free_i64(t
);
916 static void gen_sel_vec(unsigned vece
, TCGv_vec d
, TCGv_vec a
, TCGv_vec b
,
919 TCGv_vec t
= tcg_temp_new_vec_matching(d
);
921 tcg_gen_andc_vec(vece
, t
, b
, c
);
922 tcg_gen_and_vec(vece
, d
, a
, c
);
923 tcg_gen_or_vec(vece
, d
, d
, t
);
924 tcg_temp_free_vec(t
);
927 static DisasJumpType
op_vsel(DisasContext
*s
, DisasOps
*o
)
929 static const GVecGen4 gvec_op
= {
932 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
935 gen_gvec_4(get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
936 get_field(s
->fields
, v3
), get_field(s
->fields
, v4
), &gvec_op
);
940 static DisasJumpType
op_vseg(DisasContext
*s
, DisasOps
*o
)
942 const uint8_t es
= get_field(s
->fields
, m3
);
960 gen_program_exception(s
, PGM_SPECIFICATION
);
961 return DISAS_NORETURN
;
964 tmp
= tcg_temp_new_i64();
965 read_vec_element_i64(tmp
, get_field(s
->fields
, v2
), idx1
, es
| MO_SIGN
);
966 write_vec_element_i64(tmp
, get_field(s
->fields
, v1
), 0, ES_64
);
967 read_vec_element_i64(tmp
, get_field(s
->fields
, v2
), idx2
, es
| MO_SIGN
);
968 write_vec_element_i64(tmp
, get_field(s
->fields
, v1
), 1, ES_64
);
969 tcg_temp_free_i64(tmp
);
973 static DisasJumpType
op_vst(DisasContext
*s
, DisasOps
*o
)
975 TCGv_i64 tmp
= tcg_const_i64(16);
977 /* Probe write access before actually modifying memory */
978 gen_helper_probe_write_access(cpu_env
, o
->addr1
, tmp
);
980 read_vec_element_i64(tmp
, get_field(s
->fields
, v1
), 0, ES_64
);
981 tcg_gen_qemu_st_i64(tmp
, o
->addr1
, get_mem_index(s
), MO_TEQ
);
982 gen_addi_and_wrap_i64(s
, o
->addr1
, o
->addr1
, 8);
983 read_vec_element_i64(tmp
, get_field(s
->fields
, v1
), 1, ES_64
);
984 tcg_gen_qemu_st_i64(tmp
, o
->addr1
, get_mem_index(s
), MO_TEQ
);
985 tcg_temp_free_i64(tmp
);
989 static DisasJumpType
op_vste(DisasContext
*s
, DisasOps
*o
)
991 const uint8_t es
= s
->insn
->data
;
992 const uint8_t enr
= get_field(s
->fields
, m3
);
995 if (!valid_vec_element(enr
, es
)) {
996 gen_program_exception(s
, PGM_SPECIFICATION
);
997 return DISAS_NORETURN
;
1000 tmp
= tcg_temp_new_i64();
1001 read_vec_element_i64(tmp
, get_field(s
->fields
, v1
), enr
, es
);
1002 tcg_gen_qemu_st_i64(tmp
, o
->addr1
, get_mem_index(s
), MO_TE
| es
);
1003 tcg_temp_free_i64(tmp
);
1007 static DisasJumpType
op_vstm(DisasContext
*s
, DisasOps
*o
)
1009 const uint8_t v3
= get_field(s
->fields
, v3
);
1010 uint8_t v1
= get_field(s
->fields
, v1
);
1013 while (v3
< v1
|| (v3
- v1
+ 1) > 16) {
1014 gen_program_exception(s
, PGM_SPECIFICATION
);
1015 return DISAS_NORETURN
;
1018 /* Probe write access before actually modifying memory */
1019 tmp
= tcg_const_i64((v3
- v1
+ 1) * 16);
1020 gen_helper_probe_write_access(cpu_env
, o
->addr1
, tmp
);
1023 read_vec_element_i64(tmp
, v1
, 0, ES_64
);
1024 tcg_gen_qemu_st_i64(tmp
, o
->addr1
, get_mem_index(s
), MO_TEQ
);
1025 gen_addi_and_wrap_i64(s
, o
->addr1
, o
->addr1
, 8);
1026 read_vec_element_i64(tmp
, v1
, 1, ES_64
);
1027 tcg_gen_qemu_st_i64(tmp
, o
->addr1
, get_mem_index(s
), MO_TEQ
);
1031 gen_addi_and_wrap_i64(s
, o
->addr1
, o
->addr1
, 8);
1033 tcg_temp_free_i64(tmp
);
1037 static DisasJumpType
op_vstl(DisasContext
*s
, DisasOps
*o
)
1039 const int v1_offs
= vec_full_reg_offset(get_field(s
->fields
, v1
));
1040 TCGv_ptr a0
= tcg_temp_new_ptr();
1042 /* convert highest index into an actual length */
1043 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
1044 tcg_gen_addi_ptr(a0
, cpu_env
, v1_offs
);
1045 gen_helper_vstl(cpu_env
, a0
, o
->addr1
, o
->in2
);
1046 tcg_temp_free_ptr(a0
);
1050 static DisasJumpType
op_vup(DisasContext
*s
, DisasOps
*o
)
1052 const bool logical
= s
->fields
->op2
== 0xd4 || s
->fields
->op2
== 0xd5;
1053 const uint8_t v1
= get_field(s
->fields
, v1
);
1054 const uint8_t v2
= get_field(s
->fields
, v2
);
1055 const uint8_t src_es
= get_field(s
->fields
, m3
);
1056 const uint8_t dst_es
= src_es
+ 1;
1057 int dst_idx
, src_idx
;
1060 if (src_es
> ES_32
) {
1061 gen_program_exception(s
, PGM_SPECIFICATION
);
1062 return DISAS_NORETURN
;
1065 tmp
= tcg_temp_new_i64();
1066 if (s
->fields
->op2
== 0xd7 || s
->fields
->op2
== 0xd5) {
1067 /* iterate backwards to avoid overwriting data we might need later */
1068 for (dst_idx
= NUM_VEC_ELEMENTS(dst_es
) - 1; dst_idx
>= 0; dst_idx
--) {
1070 read_vec_element_i64(tmp
, v2
, src_idx
,
1071 src_es
| (logical
? 0 : MO_SIGN
));
1072 write_vec_element_i64(tmp
, v1
, dst_idx
, dst_es
);
1076 /* iterate forward to avoid overwriting data we might need later */
1077 for (dst_idx
= 0; dst_idx
< NUM_VEC_ELEMENTS(dst_es
); dst_idx
++) {
1078 src_idx
= dst_idx
+ NUM_VEC_ELEMENTS(src_es
) / 2;
1079 read_vec_element_i64(tmp
, v2
, src_idx
,
1080 src_es
| (logical
? 0 : MO_SIGN
));
1081 write_vec_element_i64(tmp
, v1
, dst_idx
, dst_es
);
1084 tcg_temp_free_i64(tmp
);
1088 static DisasJumpType
op_va(DisasContext
*s
, DisasOps
*o
)
1090 const uint8_t es
= get_field(s
->fields
, m4
);
1093 gen_program_exception(s
, PGM_SPECIFICATION
);
1094 return DISAS_NORETURN
;
1095 } else if (es
== ES_128
) {
1096 gen_gvec128_3_i64(tcg_gen_add2_i64
, get_field(s
->fields
, v1
),
1097 get_field(s
->fields
, v2
), get_field(s
->fields
, v3
));
1100 gen_gvec_fn_3(add
, es
, get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
1101 get_field(s
->fields
, v3
));
1105 static void gen_acc(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
, uint8_t es
)
1107 const uint8_t msb_bit_nr
= NUM_VEC_ELEMENT_BITS(es
) - 1;
1108 TCGv_i64 msb_mask
= tcg_const_i64(dup_const(es
, 1ull << msb_bit_nr
));
1109 TCGv_i64 t1
= tcg_temp_new_i64();
1110 TCGv_i64 t2
= tcg_temp_new_i64();
1111 TCGv_i64 t3
= tcg_temp_new_i64();
1113 /* Calculate the carry into the MSB, ignoring the old MSBs */
1114 tcg_gen_andc_i64(t1
, a
, msb_mask
);
1115 tcg_gen_andc_i64(t2
, b
, msb_mask
);
1116 tcg_gen_add_i64(t1
, t1
, t2
);
1117 /* Calculate the MSB without any carry into it */
1118 tcg_gen_xor_i64(t3
, a
, b
);
1119 /* Calculate the carry out of the MSB in the MSB bit position */
1120 tcg_gen_and_i64(d
, a
, b
);
1121 tcg_gen_and_i64(t1
, t1
, t3
);
1122 tcg_gen_or_i64(d
, d
, t1
);
1123 /* Isolate and shift the carry into position */
1124 tcg_gen_and_i64(d
, d
, msb_mask
);
1125 tcg_gen_shri_i64(d
, d
, msb_bit_nr
);
1127 tcg_temp_free_i64(t1
);
1128 tcg_temp_free_i64(t2
);
1129 tcg_temp_free_i64(t3
);
1132 static void gen_acc8_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
1134 gen_acc(d
, a
, b
, ES_8
);
1137 static void gen_acc16_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
1139 gen_acc(d
, a
, b
, ES_16
);
1142 static void gen_acc_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
1144 TCGv_i32 t
= tcg_temp_new_i32();
1146 tcg_gen_add_i32(t
, a
, b
);
1147 tcg_gen_setcond_i32(TCG_COND_LTU
, d
, t
, b
);
1148 tcg_temp_free_i32(t
);
1151 static void gen_acc_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
1153 TCGv_i64 t
= tcg_temp_new_i64();
1155 tcg_gen_add_i64(t
, a
, b
);
1156 tcg_gen_setcond_i64(TCG_COND_LTU
, d
, t
, b
);
1157 tcg_temp_free_i64(t
);
1160 static void gen_acc2_i64(TCGv_i64 dl
, TCGv_i64 dh
, TCGv_i64 al
,
1161 TCGv_i64 ah
, TCGv_i64 bl
, TCGv_i64 bh
)
1163 TCGv_i64 th
= tcg_temp_new_i64();
1164 TCGv_i64 tl
= tcg_temp_new_i64();
1165 TCGv_i64 zero
= tcg_const_i64(0);
1167 tcg_gen_add2_i64(tl
, th
, al
, zero
, bl
, zero
);
1168 tcg_gen_add2_i64(tl
, th
, th
, zero
, ah
, zero
);
1169 tcg_gen_add2_i64(tl
, dl
, tl
, th
, bh
, zero
);
1170 tcg_gen_mov_i64(dh
, zero
);
1172 tcg_temp_free_i64(th
);
1173 tcg_temp_free_i64(tl
);
1174 tcg_temp_free_i64(zero
);
1177 static DisasJumpType
op_vacc(DisasContext
*s
, DisasOps
*o
)
1179 const uint8_t es
= get_field(s
->fields
, m4
);
1180 static const GVecGen3 g
[4] = {
1181 { .fni8
= gen_acc8_i64
, },
1182 { .fni8
= gen_acc16_i64
, },
1183 { .fni4
= gen_acc_i32
, },
1184 { .fni8
= gen_acc_i64
, },
1188 gen_program_exception(s
, PGM_SPECIFICATION
);
1189 return DISAS_NORETURN
;
1190 } else if (es
== ES_128
) {
1191 gen_gvec128_3_i64(gen_acc2_i64
, get_field(s
->fields
, v1
),
1192 get_field(s
->fields
, v2
), get_field(s
->fields
, v3
));
1195 gen_gvec_3(get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
1196 get_field(s
->fields
, v3
), &g
[es
]);
1200 static void gen_ac2_i64(TCGv_i64 dl
, TCGv_i64 dh
, TCGv_i64 al
, TCGv_i64 ah
,
1201 TCGv_i64 bl
, TCGv_i64 bh
, TCGv_i64 cl
, TCGv_i64 ch
)
1203 TCGv_i64 tl
= tcg_temp_new_i64();
1204 TCGv_i64 th
= tcg_const_i64(0);
1206 /* extract the carry only */
1207 tcg_gen_extract_i64(tl
, cl
, 0, 1);
1208 tcg_gen_add2_i64(dl
, dh
, al
, ah
, bl
, bh
);
1209 tcg_gen_add2_i64(dl
, dh
, dl
, dh
, tl
, th
);
1211 tcg_temp_free_i64(tl
);
1212 tcg_temp_free_i64(th
);
1215 static DisasJumpType
op_vac(DisasContext
*s
, DisasOps
*o
)
1217 if (get_field(s
->fields
, m5
) != ES_128
) {
1218 gen_program_exception(s
, PGM_SPECIFICATION
);
1219 return DISAS_NORETURN
;
1222 gen_gvec128_4_i64(gen_ac2_i64
, get_field(s
->fields
, v1
),
1223 get_field(s
->fields
, v2
), get_field(s
->fields
, v3
),
1224 get_field(s
->fields
, v4
));
1228 static void gen_accc2_i64(TCGv_i64 dl
, TCGv_i64 dh
, TCGv_i64 al
, TCGv_i64 ah
,
1229 TCGv_i64 bl
, TCGv_i64 bh
, TCGv_i64 cl
, TCGv_i64 ch
)
1231 TCGv_i64 tl
= tcg_temp_new_i64();
1232 TCGv_i64 th
= tcg_temp_new_i64();
1233 TCGv_i64 zero
= tcg_const_i64(0);
1235 tcg_gen_andi_i64(tl
, cl
, 1);
1236 tcg_gen_add2_i64(tl
, th
, tl
, zero
, al
, zero
);
1237 tcg_gen_add2_i64(tl
, th
, tl
, th
, bl
, zero
);
1238 tcg_gen_add2_i64(tl
, th
, th
, zero
, ah
, zero
);
1239 tcg_gen_add2_i64(tl
, dl
, tl
, th
, bh
, zero
);
1240 tcg_gen_mov_i64(dh
, zero
);
1242 tcg_temp_free_i64(tl
);
1243 tcg_temp_free_i64(th
);
1244 tcg_temp_free_i64(zero
);
1247 static DisasJumpType
op_vaccc(DisasContext
*s
, DisasOps
*o
)
1249 if (get_field(s
->fields
, m5
) != ES_128
) {
1250 gen_program_exception(s
, PGM_SPECIFICATION
);
1251 return DISAS_NORETURN
;
1254 gen_gvec128_4_i64(gen_accc2_i64
, get_field(s
->fields
, v1
),
1255 get_field(s
->fields
, v2
), get_field(s
->fields
, v3
),
1256 get_field(s
->fields
, v4
));
1260 static DisasJumpType
op_vn(DisasContext
*s
, DisasOps
*o
)
1262 gen_gvec_fn_3(and, ES_8
, get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
1263 get_field(s
->fields
, v3
));
1267 static DisasJumpType
op_vnc(DisasContext
*s
, DisasOps
*o
)
1269 gen_gvec_fn_3(andc
, ES_8
, get_field(s
->fields
, v1
),
1270 get_field(s
->fields
, v2
), get_field(s
->fields
, v3
));
1274 static void gen_avg_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
1276 TCGv_i64 t0
= tcg_temp_new_i64();
1277 TCGv_i64 t1
= tcg_temp_new_i64();
1279 tcg_gen_ext_i32_i64(t0
, a
);
1280 tcg_gen_ext_i32_i64(t1
, b
);
1281 tcg_gen_add_i64(t0
, t0
, t1
);
1282 tcg_gen_addi_i64(t0
, t0
, 1);
1283 tcg_gen_shri_i64(t0
, t0
, 1);
1284 tcg_gen_extrl_i64_i32(d
, t0
);
1290 static void gen_avg_i64(TCGv_i64 dl
, TCGv_i64 al
, TCGv_i64 bl
)
1292 TCGv_i64 dh
= tcg_temp_new_i64();
1293 TCGv_i64 ah
= tcg_temp_new_i64();
1294 TCGv_i64 bh
= tcg_temp_new_i64();
1296 /* extending the sign by one bit is sufficient */
1297 tcg_gen_extract_i64(ah
, al
, 63, 1);
1298 tcg_gen_extract_i64(bh
, bl
, 63, 1);
1299 tcg_gen_add2_i64(dl
, dh
, al
, ah
, bl
, bh
);
1300 gen_addi2_i64(dl
, dh
, dl
, dh
, 1);
1301 tcg_gen_extract2_i64(dl
, dl
, dh
, 1);
1303 tcg_temp_free_i64(dh
);
1304 tcg_temp_free_i64(ah
);
1305 tcg_temp_free_i64(bh
);
1308 static DisasJumpType
op_vavg(DisasContext
*s
, DisasOps
*o
)
1310 const uint8_t es
= get_field(s
->fields
, m4
);
1311 static const GVecGen3 g
[4] = {
1312 { .fno
= gen_helper_gvec_vavg8
, },
1313 { .fno
= gen_helper_gvec_vavg16
, },
1314 { .fni4
= gen_avg_i32
, },
1315 { .fni8
= gen_avg_i64
, },
1319 gen_program_exception(s
, PGM_SPECIFICATION
);
1320 return DISAS_NORETURN
;
1322 gen_gvec_3(get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
1323 get_field(s
->fields
, v3
), &g
[es
]);
1327 static void gen_avgl_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
1329 TCGv_i64 t0
= tcg_temp_new_i64();
1330 TCGv_i64 t1
= tcg_temp_new_i64();
1332 tcg_gen_extu_i32_i64(t0
, a
);
1333 tcg_gen_extu_i32_i64(t1
, b
);
1334 tcg_gen_add_i64(t0
, t0
, t1
);
1335 tcg_gen_addi_i64(t0
, t0
, 1);
1336 tcg_gen_shri_i64(t0
, t0
, 1);
1337 tcg_gen_extrl_i64_i32(d
, t0
);
1343 static void gen_avgl_i64(TCGv_i64 dl
, TCGv_i64 al
, TCGv_i64 bl
)
1345 TCGv_i64 dh
= tcg_temp_new_i64();
1346 TCGv_i64 zero
= tcg_const_i64(0);
1348 tcg_gen_add2_i64(dl
, dh
, al
, zero
, bl
, zero
);
1349 gen_addi2_i64(dl
, dh
, dl
, dh
, 1);
1350 tcg_gen_extract2_i64(dl
, dl
, dh
, 1);
1352 tcg_temp_free_i64(dh
);
1353 tcg_temp_free_i64(zero
);
1356 static DisasJumpType
op_vavgl(DisasContext
*s
, DisasOps
*o
)
1358 const uint8_t es
= get_field(s
->fields
, m4
);
1359 static const GVecGen3 g
[4] = {
1360 { .fno
= gen_helper_gvec_vavgl8
, },
1361 { .fno
= gen_helper_gvec_vavgl16
, },
1362 { .fni4
= gen_avgl_i32
, },
1363 { .fni8
= gen_avgl_i64
, },
1367 gen_program_exception(s
, PGM_SPECIFICATION
);
1368 return DISAS_NORETURN
;
1370 gen_gvec_3(get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
1371 get_field(s
->fields
, v3
), &g
[es
]);
1375 static DisasJumpType
op_vcksm(DisasContext
*s
, DisasOps
*o
)
1377 TCGv_i32 tmp
= tcg_temp_new_i32();
1378 TCGv_i32 sum
= tcg_temp_new_i32();
1381 read_vec_element_i32(sum
, get_field(s
->fields
, v3
), 1, ES_32
);
1382 for (i
= 0; i
< 4; i
++) {
1383 read_vec_element_i32(tmp
, get_field(s
->fields
, v2
), i
, ES_32
);
1384 tcg_gen_add2_i32(tmp
, sum
, sum
, sum
, tmp
, tmp
);
1386 zero_vec(get_field(s
->fields
, v1
));
1387 write_vec_element_i32(sum
, get_field(s
->fields
, v1
), 1, ES_32
);
1389 tcg_temp_free_i32(tmp
);
1390 tcg_temp_free_i32(sum
);
1394 static DisasJumpType
op_vec(DisasContext
*s
, DisasOps
*o
)
1396 uint8_t es
= get_field(s
->fields
, m3
);
1397 const uint8_t enr
= NUM_VEC_ELEMENTS(es
) / 2 - 1;
1400 gen_program_exception(s
, PGM_SPECIFICATION
);
1401 return DISAS_NORETURN
;
1403 if (s
->fields
->op2
== 0xdb) {
1407 o
->in1
= tcg_temp_new_i64();
1408 o
->in2
= tcg_temp_new_i64();
1409 read_vec_element_i64(o
->in1
, get_field(s
->fields
, v1
), enr
, es
);
1410 read_vec_element_i64(o
->in2
, get_field(s
->fields
, v2
), enr
, es
);
1414 static DisasJumpType
op_vc(DisasContext
*s
, DisasOps
*o
)
1416 const uint8_t es
= get_field(s
->fields
, m4
);
1417 TCGCond cond
= s
->insn
->data
;
1420 gen_program_exception(s
, PGM_SPECIFICATION
);
1421 return DISAS_NORETURN
;
1424 tcg_gen_gvec_cmp(cond
, es
,
1425 vec_full_reg_offset(get_field(s
->fields
, v1
)),
1426 vec_full_reg_offset(get_field(s
->fields
, v2
)),
1427 vec_full_reg_offset(get_field(s
->fields
, v3
)), 16, 16);
1428 if (get_field(s
->fields
, m5
) & 0x1) {
1429 TCGv_i64 low
= tcg_temp_new_i64();
1430 TCGv_i64 high
= tcg_temp_new_i64();
1432 read_vec_element_i64(high
, get_field(s
->fields
, v1
), 0, ES_64
);
1433 read_vec_element_i64(low
, get_field(s
->fields
, v1
), 1, ES_64
);
1434 gen_op_update2_cc_i64(s
, CC_OP_VC
, low
, high
);
1436 tcg_temp_free_i64(low
);
1437 tcg_temp_free_i64(high
);
1442 static void gen_clz_i32(TCGv_i32 d
, TCGv_i32 a
)
1444 tcg_gen_clzi_i32(d
, a
, 32);
1447 static void gen_clz_i64(TCGv_i64 d
, TCGv_i64 a
)
1449 tcg_gen_clzi_i64(d
, a
, 64);
1452 static DisasJumpType
op_vclz(DisasContext
*s
, DisasOps
*o
)
1454 const uint8_t es
= get_field(s
->fields
, m3
);
1455 static const GVecGen2 g
[4] = {
1456 { .fno
= gen_helper_gvec_vclz8
, },
1457 { .fno
= gen_helper_gvec_vclz16
, },
1458 { .fni4
= gen_clz_i32
, },
1459 { .fni8
= gen_clz_i64
, },
1463 gen_program_exception(s
, PGM_SPECIFICATION
);
1464 return DISAS_NORETURN
;
1466 gen_gvec_2(get_field(s
->fields
, v1
), get_field(s
->fields
, v2
), &g
[es
]);
1470 static void gen_ctz_i32(TCGv_i32 d
, TCGv_i32 a
)
1472 tcg_gen_ctzi_i32(d
, a
, 32);
1475 static void gen_ctz_i64(TCGv_i64 d
, TCGv_i64 a
)
1477 tcg_gen_ctzi_i64(d
, a
, 64);
1480 static DisasJumpType
op_vctz(DisasContext
*s
, DisasOps
*o
)
1482 const uint8_t es
= get_field(s
->fields
, m3
);
1483 static const GVecGen2 g
[4] = {
1484 { .fno
= gen_helper_gvec_vctz8
, },
1485 { .fno
= gen_helper_gvec_vctz16
, },
1486 { .fni4
= gen_ctz_i32
, },
1487 { .fni8
= gen_ctz_i64
, },
1491 gen_program_exception(s
, PGM_SPECIFICATION
);
1492 return DISAS_NORETURN
;
1494 gen_gvec_2(get_field(s
->fields
, v1
), get_field(s
->fields
, v2
), &g
[es
]);
1498 static DisasJumpType
op_vx(DisasContext
*s
, DisasOps
*o
)
1500 gen_gvec_fn_3(xor, ES_8
, get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
1501 get_field(s
->fields
, v3
));
1505 static DisasJumpType
op_vgfm(DisasContext
*s
, DisasOps
*o
)
1507 const uint8_t es
= get_field(s
->fields
, m4
);
1508 static const GVecGen3 g
[4] = {
1509 { .fno
= gen_helper_gvec_vgfm8
, },
1510 { .fno
= gen_helper_gvec_vgfm16
, },
1511 { .fno
= gen_helper_gvec_vgfm32
, },
1512 { .fno
= gen_helper_gvec_vgfm64
, },
1516 gen_program_exception(s
, PGM_SPECIFICATION
);
1517 return DISAS_NORETURN
;
1519 gen_gvec_3(get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
1520 get_field(s
->fields
, v3
), &g
[es
]);
1524 static DisasJumpType
op_vgfma(DisasContext
*s
, DisasOps
*o
)
1526 const uint8_t es
= get_field(s
->fields
, m5
);
1527 static const GVecGen4 g
[4] = {
1528 { .fno
= gen_helper_gvec_vgfma8
, },
1529 { .fno
= gen_helper_gvec_vgfma16
, },
1530 { .fno
= gen_helper_gvec_vgfma32
, },
1531 { .fno
= gen_helper_gvec_vgfma64
, },
1535 gen_program_exception(s
, PGM_SPECIFICATION
);
1536 return DISAS_NORETURN
;
1538 gen_gvec_4(get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
1539 get_field(s
->fields
, v3
), get_field(s
->fields
, v4
), &g
[es
]);
1543 static DisasJumpType
op_vlc(DisasContext
*s
, DisasOps
*o
)
1545 const uint8_t es
= get_field(s
->fields
, m3
);
1548 gen_program_exception(s
, PGM_SPECIFICATION
);
1549 return DISAS_NORETURN
;
1552 gen_gvec_fn_2(neg
, es
, get_field(s
->fields
, v1
), get_field(s
->fields
, v2
));
1556 static DisasJumpType
op_vlp(DisasContext
*s
, DisasOps
*o
)
1558 const uint8_t es
= get_field(s
->fields
, m3
);
1561 gen_program_exception(s
, PGM_SPECIFICATION
);
1562 return DISAS_NORETURN
;
1565 gen_gvec_fn_2(abs
, es
, get_field(s
->fields
, v1
), get_field(s
->fields
, v2
));
1569 static DisasJumpType
op_vmx(DisasContext
*s
, DisasOps
*o
)
1571 const uint8_t v1
= get_field(s
->fields
, v1
);
1572 const uint8_t v2
= get_field(s
->fields
, v2
);
1573 const uint8_t v3
= get_field(s
->fields
, v3
);
1574 const uint8_t es
= get_field(s
->fields
, m4
);
1577 gen_program_exception(s
, PGM_SPECIFICATION
);
1578 return DISAS_NORETURN
;
1581 switch (s
->fields
->op2
) {
1583 gen_gvec_fn_3(smax
, es
, v1
, v2
, v3
);
1586 gen_gvec_fn_3(umax
, es
, v1
, v2
, v3
);
1589 gen_gvec_fn_3(smin
, es
, v1
, v2
, v3
);
1592 gen_gvec_fn_3(umin
, es
, v1
, v2
, v3
);
1595 g_assert_not_reached();
1600 static void gen_mal_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
, TCGv_i32 c
)
1602 TCGv_i32 t0
= tcg_temp_new_i32();
1604 tcg_gen_mul_i32(t0
, a
, b
);
1605 tcg_gen_add_i32(d
, t0
, c
);
1607 tcg_temp_free_i32(t0
);
1610 static void gen_mah_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
, TCGv_i32 c
)
1612 TCGv_i64 t0
= tcg_temp_new_i64();
1613 TCGv_i64 t1
= tcg_temp_new_i64();
1614 TCGv_i64 t2
= tcg_temp_new_i64();
1616 tcg_gen_ext_i32_i64(t0
, a
);
1617 tcg_gen_ext_i32_i64(t1
, b
);
1618 tcg_gen_ext_i32_i64(t2
, c
);
1619 tcg_gen_mul_i64(t0
, t0
, t1
);
1620 tcg_gen_add_i64(t0
, t0
, t2
);
1621 tcg_gen_extrh_i64_i32(d
, t0
);
1628 static void gen_malh_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
, TCGv_i32 c
)
1630 TCGv_i64 t0
= tcg_temp_new_i64();
1631 TCGv_i64 t1
= tcg_temp_new_i64();
1632 TCGv_i64 t2
= tcg_temp_new_i64();
1634 tcg_gen_extu_i32_i64(t0
, a
);
1635 tcg_gen_extu_i32_i64(t1
, b
);
1636 tcg_gen_extu_i32_i64(t2
, c
);
1637 tcg_gen_mul_i64(t0
, t0
, t1
);
1638 tcg_gen_add_i64(t0
, t0
, t2
);
1639 tcg_gen_extrh_i64_i32(d
, t0
);
1646 static DisasJumpType
op_vma(DisasContext
*s
, DisasOps
*o
)
1648 const uint8_t es
= get_field(s
->fields
, m5
);
1649 static const GVecGen4 g_vmal
[3] = {
1650 { .fno
= gen_helper_gvec_vmal8
, },
1651 { .fno
= gen_helper_gvec_vmal16
, },
1652 { .fni4
= gen_mal_i32
, },
1654 static const GVecGen4 g_vmah
[3] = {
1655 { .fno
= gen_helper_gvec_vmah8
, },
1656 { .fno
= gen_helper_gvec_vmah16
, },
1657 { .fni4
= gen_mah_i32
, },
1659 static const GVecGen4 g_vmalh
[3] = {
1660 { .fno
= gen_helper_gvec_vmalh8
, },
1661 { .fno
= gen_helper_gvec_vmalh16
, },
1662 { .fni4
= gen_malh_i32
, },
1664 static const GVecGen4 g_vmae
[3] = {
1665 { .fno
= gen_helper_gvec_vmae8
, },
1666 { .fno
= gen_helper_gvec_vmae16
, },
1667 { .fno
= gen_helper_gvec_vmae32
, },
1669 static const GVecGen4 g_vmale
[3] = {
1670 { .fno
= gen_helper_gvec_vmale8
, },
1671 { .fno
= gen_helper_gvec_vmale16
, },
1672 { .fno
= gen_helper_gvec_vmale32
, },
1674 static const GVecGen4 g_vmao
[3] = {
1675 { .fno
= gen_helper_gvec_vmao8
, },
1676 { .fno
= gen_helper_gvec_vmao16
, },
1677 { .fno
= gen_helper_gvec_vmao32
, },
1679 static const GVecGen4 g_vmalo
[3] = {
1680 { .fno
= gen_helper_gvec_vmalo8
, },
1681 { .fno
= gen_helper_gvec_vmalo16
, },
1682 { .fno
= gen_helper_gvec_vmalo32
, },
1687 gen_program_exception(s
, PGM_SPECIFICATION
);
1688 return DISAS_NORETURN
;
1691 switch (s
->fields
->op2
) {
1714 g_assert_not_reached();
1717 gen_gvec_4(get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
1718 get_field(s
->fields
, v3
), get_field(s
->fields
, v4
), fn
);
1722 static void gen_mh_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
1724 TCGv_i32 t
= tcg_temp_new_i32();
1726 tcg_gen_muls2_i32(t
, d
, a
, b
);
1727 tcg_temp_free_i32(t
);
1730 static void gen_mlh_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
1732 TCGv_i32 t
= tcg_temp_new_i32();
1734 tcg_gen_mulu2_i32(t
, d
, a
, b
);
1735 tcg_temp_free_i32(t
);
1738 static DisasJumpType
op_vm(DisasContext
*s
, DisasOps
*o
)
1740 const uint8_t es
= get_field(s
->fields
, m4
);
1741 static const GVecGen3 g_vmh
[3] = {
1742 { .fno
= gen_helper_gvec_vmh8
, },
1743 { .fno
= gen_helper_gvec_vmh16
, },
1744 { .fni4
= gen_mh_i32
, },
1746 static const GVecGen3 g_vmlh
[3] = {
1747 { .fno
= gen_helper_gvec_vmlh8
, },
1748 { .fno
= gen_helper_gvec_vmlh16
, },
1749 { .fni4
= gen_mlh_i32
, },
1751 static const GVecGen3 g_vme
[3] = {
1752 { .fno
= gen_helper_gvec_vme8
, },
1753 { .fno
= gen_helper_gvec_vme16
, },
1754 { .fno
= gen_helper_gvec_vme32
, },
1756 static const GVecGen3 g_vmle
[3] = {
1757 { .fno
= gen_helper_gvec_vmle8
, },
1758 { .fno
= gen_helper_gvec_vmle16
, },
1759 { .fno
= gen_helper_gvec_vmle32
, },
1761 static const GVecGen3 g_vmo
[3] = {
1762 { .fno
= gen_helper_gvec_vmo8
, },
1763 { .fno
= gen_helper_gvec_vmo16
, },
1764 { .fno
= gen_helper_gvec_vmo32
, },
1766 static const GVecGen3 g_vmlo
[3] = {
1767 { .fno
= gen_helper_gvec_vmlo8
, },
1768 { .fno
= gen_helper_gvec_vmlo16
, },
1769 { .fno
= gen_helper_gvec_vmlo32
, },
1774 gen_program_exception(s
, PGM_SPECIFICATION
);
1775 return DISAS_NORETURN
;
1778 switch (s
->fields
->op2
) {
1780 gen_gvec_fn_3(mul
, es
, get_field(s
->fields
, v1
),
1781 get_field(s
->fields
, v2
), get_field(s
->fields
, v3
));
1802 g_assert_not_reached();
1805 gen_gvec_3(get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
1806 get_field(s
->fields
, v3
), fn
);
1810 static DisasJumpType
op_vnn(DisasContext
*s
, DisasOps
*o
)
1812 gen_gvec_fn_3(nand
, ES_8
, get_field(s
->fields
, v1
),
1813 get_field(s
->fields
, v2
), get_field(s
->fields
, v3
));
1817 static DisasJumpType
op_vno(DisasContext
*s
, DisasOps
*o
)
1819 gen_gvec_fn_3(nor
, ES_8
, get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
1820 get_field(s
->fields
, v3
));
1824 static DisasJumpType
op_vnx(DisasContext
*s
, DisasOps
*o
)
1826 gen_gvec_fn_3(eqv
, ES_8
, get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
1827 get_field(s
->fields
, v3
));
1831 static DisasJumpType
op_vo(DisasContext
*s
, DisasOps
*o
)
1833 gen_gvec_fn_3(or, ES_8
, get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
1834 get_field(s
->fields
, v3
));
1838 static DisasJumpType
op_voc(DisasContext
*s
, DisasOps
*o
)
1840 gen_gvec_fn_3(orc
, ES_8
, get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
1841 get_field(s
->fields
, v3
));
1845 static DisasJumpType
op_vpopct(DisasContext
*s
, DisasOps
*o
)
1847 const uint8_t es
= get_field(s
->fields
, m3
);
1848 static const GVecGen2 g
[4] = {
1849 { .fno
= gen_helper_gvec_vpopct8
, },
1850 { .fno
= gen_helper_gvec_vpopct16
, },
1851 { .fni4
= tcg_gen_ctpop_i32
, },
1852 { .fni8
= tcg_gen_ctpop_i64
, },
1855 if (es
> ES_64
|| (es
!= ES_8
&& !s390_has_feat(S390_FEAT_VECTOR_ENH
))) {
1856 gen_program_exception(s
, PGM_SPECIFICATION
);
1857 return DISAS_NORETURN
;
1860 gen_gvec_2(get_field(s
->fields
, v1
), get_field(s
->fields
, v2
), &g
[es
]);
1864 static void gen_rll_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
1866 TCGv_i32 t0
= tcg_temp_new_i32();
1868 tcg_gen_andi_i32(t0
, b
, 31);
1869 tcg_gen_rotl_i32(d
, a
, t0
);
1870 tcg_temp_free_i32(t0
);
1873 static void gen_rll_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
1875 TCGv_i64 t0
= tcg_temp_new_i64();
1877 tcg_gen_andi_i64(t0
, b
, 63);
1878 tcg_gen_rotl_i64(d
, a
, t0
);
1879 tcg_temp_free_i64(t0
);
1882 static DisasJumpType
op_verllv(DisasContext
*s
, DisasOps
*o
)
1884 const uint8_t es
= get_field(s
->fields
, m4
);
1885 static const GVecGen3 g
[4] = {
1886 { .fno
= gen_helper_gvec_verllv8
, },
1887 { .fno
= gen_helper_gvec_verllv16
, },
1888 { .fni4
= gen_rll_i32
, },
1889 { .fni8
= gen_rll_i64
, },
1893 gen_program_exception(s
, PGM_SPECIFICATION
);
1894 return DISAS_NORETURN
;
1897 gen_gvec_3(get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
1898 get_field(s
->fields
, v3
), &g
[es
]);
1902 static DisasJumpType
op_verll(DisasContext
*s
, DisasOps
*o
)
1904 const uint8_t es
= get_field(s
->fields
, m4
);
1905 static const GVecGen2s g
[4] = {
1906 { .fno
= gen_helper_gvec_verll8
, },
1907 { .fno
= gen_helper_gvec_verll16
, },
1908 { .fni4
= gen_rll_i32
, },
1909 { .fni8
= gen_rll_i64
, },
1913 gen_program_exception(s
, PGM_SPECIFICATION
);
1914 return DISAS_NORETURN
;
1916 gen_gvec_2s(get_field(s
->fields
, v1
), get_field(s
->fields
, v3
), o
->addr1
,
1921 static void gen_rim_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
, int32_t c
)
1923 TCGv_i32 t
= tcg_temp_new_i32();
1925 tcg_gen_rotli_i32(t
, a
, c
& 31);
1926 tcg_gen_and_i32(t
, t
, b
);
1927 tcg_gen_andc_i32(d
, d
, b
);
1928 tcg_gen_or_i32(d
, d
, t
);
1930 tcg_temp_free_i32(t
);
1933 static void gen_rim_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
, int64_t c
)
1935 TCGv_i64 t
= tcg_temp_new_i64();
1937 tcg_gen_rotli_i64(t
, a
, c
& 63);
1938 tcg_gen_and_i64(t
, t
, b
);
1939 tcg_gen_andc_i64(d
, d
, b
);
1940 tcg_gen_or_i64(d
, d
, t
);
1942 tcg_temp_free_i64(t
);
1945 static DisasJumpType
op_verim(DisasContext
*s
, DisasOps
*o
)
1947 const uint8_t es
= get_field(s
->fields
, m5
);
1948 const uint8_t i4
= get_field(s
->fields
, i4
) &
1949 (NUM_VEC_ELEMENT_BITS(es
) - 1);
1950 static const GVecGen3i g
[4] = {
1951 { .fno
= gen_helper_gvec_verim8
, },
1952 { .fno
= gen_helper_gvec_verim16
, },
1953 { .fni4
= gen_rim_i32
,
1954 .load_dest
= true, },
1955 { .fni8
= gen_rim_i64
,
1956 .load_dest
= true, },
1960 gen_program_exception(s
, PGM_SPECIFICATION
);
1961 return DISAS_NORETURN
;
1964 gen_gvec_3i(get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
1965 get_field(s
->fields
, v3
), i4
, &g
[es
]);
1969 static DisasJumpType
op_vesv(DisasContext
*s
, DisasOps
*o
)
1971 const uint8_t es
= get_field(s
->fields
, m4
);
1972 const uint8_t v1
= get_field(s
->fields
, v1
);
1973 const uint8_t v2
= get_field(s
->fields
, v2
);
1974 const uint8_t v3
= get_field(s
->fields
, v3
);
1977 gen_program_exception(s
, PGM_SPECIFICATION
);
1978 return DISAS_NORETURN
;
1981 switch (s
->fields
->op2
) {
1983 gen_gvec_fn_3(shlv
, es
, v1
, v2
, v3
);
1986 gen_gvec_fn_3(sarv
, es
, v1
, v2
, v3
);
1989 gen_gvec_fn_3(shrv
, es
, v1
, v2
, v3
);
1992 g_assert_not_reached();
1997 static DisasJumpType
op_ves(DisasContext
*s
, DisasOps
*o
)
1999 const uint8_t es
= get_field(s
->fields
, m4
);
2000 const uint8_t d2
= get_field(s
->fields
, d2
) &
2001 (NUM_VEC_ELEMENT_BITS(es
) - 1);
2002 const uint8_t v1
= get_field(s
->fields
, v1
);
2003 const uint8_t v3
= get_field(s
->fields
, v3
);
2007 gen_program_exception(s
, PGM_SPECIFICATION
);
2008 return DISAS_NORETURN
;
2011 if (likely(!get_field(s
->fields
, b2
))) {
2012 switch (s
->fields
->op2
) {
2014 gen_gvec_fn_2i(shli
, es
, v1
, v3
, d2
);
2017 gen_gvec_fn_2i(sari
, es
, v1
, v3
, d2
);
2020 gen_gvec_fn_2i(shri
, es
, v1
, v3
, d2
);
2023 g_assert_not_reached();
2026 shift
= tcg_temp_new_i32();
2027 tcg_gen_extrl_i64_i32(shift
, o
->addr1
);
2028 tcg_gen_andi_i32(shift
, shift
, NUM_VEC_ELEMENT_BITS(es
) - 1);
2029 switch (s
->fields
->op2
) {
2031 gen_gvec_fn_2s(shls
, es
, v1
, v3
, shift
);
2034 gen_gvec_fn_2s(sars
, es
, v1
, v3
, shift
);
2037 gen_gvec_fn_2s(shrs
, es
, v1
, v3
, shift
);
2040 g_assert_not_reached();
2042 tcg_temp_free_i32(shift
);
2047 static DisasJumpType
op_vsl(DisasContext
*s
, DisasOps
*o
)
2049 TCGv_i64 shift
= tcg_temp_new_i64();
2051 read_vec_element_i64(shift
, get_field(s
->fields
, v3
), 7, ES_8
);
2052 if (s
->fields
->op2
== 0x74) {
2053 tcg_gen_andi_i64(shift
, shift
, 0x7);
2055 tcg_gen_andi_i64(shift
, shift
, 0x78);
2058 gen_gvec_2i_ool(get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
2059 shift
, 0, gen_helper_gvec_vsl
);
2060 tcg_temp_free_i64(shift
);
2064 static DisasJumpType
op_vsldb(DisasContext
*s
, DisasOps
*o
)
2066 const uint8_t i4
= get_field(s
->fields
, i4
) & 0xf;
2067 const int left_shift
= (i4
& 7) * 8;
2068 const int right_shift
= 64 - left_shift
;
2069 TCGv_i64 t0
= tcg_temp_new_i64();
2070 TCGv_i64 t1
= tcg_temp_new_i64();
2071 TCGv_i64 t2
= tcg_temp_new_i64();
2073 if ((i4
& 8) == 0) {
2074 read_vec_element_i64(t0
, get_field(s
->fields
, v2
), 0, ES_64
);
2075 read_vec_element_i64(t1
, get_field(s
->fields
, v2
), 1, ES_64
);
2076 read_vec_element_i64(t2
, get_field(s
->fields
, v3
), 0, ES_64
);
2078 read_vec_element_i64(t0
, get_field(s
->fields
, v2
), 1, ES_64
);
2079 read_vec_element_i64(t1
, get_field(s
->fields
, v3
), 0, ES_64
);
2080 read_vec_element_i64(t2
, get_field(s
->fields
, v3
), 1, ES_64
);
2082 tcg_gen_extract2_i64(t0
, t1
, t0
, right_shift
);
2083 tcg_gen_extract2_i64(t1
, t2
, t1
, right_shift
);
2084 write_vec_element_i64(t0
, get_field(s
->fields
, v1
), 0, ES_64
);
2085 write_vec_element_i64(t1
, get_field(s
->fields
, v1
), 1, ES_64
);
2093 static DisasJumpType
op_vsra(DisasContext
*s
, DisasOps
*o
)
2095 TCGv_i64 shift
= tcg_temp_new_i64();
2097 read_vec_element_i64(shift
, get_field(s
->fields
, v3
), 7, ES_8
);
2098 if (s
->fields
->op2
== 0x7e) {
2099 tcg_gen_andi_i64(shift
, shift
, 0x7);
2101 tcg_gen_andi_i64(shift
, shift
, 0x78);
2104 gen_gvec_2i_ool(get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
2105 shift
, 0, gen_helper_gvec_vsra
);
2106 tcg_temp_free_i64(shift
);
2110 static DisasJumpType
op_vsrl(DisasContext
*s
, DisasOps
*o
)
2112 TCGv_i64 shift
= tcg_temp_new_i64();
2114 read_vec_element_i64(shift
, get_field(s
->fields
, v3
), 7, ES_8
);
2115 if (s
->fields
->op2
== 0x7c) {
2116 tcg_gen_andi_i64(shift
, shift
, 0x7);
2118 tcg_gen_andi_i64(shift
, shift
, 0x78);
2121 gen_gvec_2i_ool(get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
2122 shift
, 0, gen_helper_gvec_vsrl
);
2123 tcg_temp_free_i64(shift
);
2127 static DisasJumpType
op_vs(DisasContext
*s
, DisasOps
*o
)
2129 const uint8_t es
= get_field(s
->fields
, m4
);
2132 gen_program_exception(s
, PGM_SPECIFICATION
);
2133 return DISAS_NORETURN
;
2134 } else if (es
== ES_128
) {
2135 gen_gvec128_3_i64(tcg_gen_sub2_i64
, get_field(s
->fields
, v1
),
2136 get_field(s
->fields
, v2
), get_field(s
->fields
, v3
));
2139 gen_gvec_fn_3(sub
, es
, get_field(s
->fields
, v1
), get_field(s
->fields
, v2
),
2140 get_field(s
->fields
, v3
));