2 * QEMU TCG support -- s390x vector instruction translation functions
4 * Copyright (C) 2019 Red Hat Inc
7 * David Hildenbrand <david@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
14 * For most instructions that use the same element size for reads and
15 * writes, we can use real gvec vector expansion, which potantially uses
16 * real host vector instructions. As they only work up to 64 bit elements,
17 * 128 bit elements (vector is a single element) have to be handled
18 * differently. Operations that are too complicated to encode via TCG ops
19 * are handled via gvec ool (out-of-line) handlers.
21 * As soon as instructions use different element sizes for reads and writes
22 * or access elements "out of their element scope" we expand them manually
23 * in fancy loops, as gvec expansion does not deal with actual element
24 * numbers and does also not support access to other elements.
27 * As we only have i32/i64, such elements have to be loaded into two
28 * i64 values and can then be processed e.g. by tcg_gen_add2_i64.
31 * On s390x, the operand size (oprsz) and the maximum size (maxsz) are
32 * always 16 (128 bit). What gvec code calls "vece", s390x calls "es",
33 * a.k.a. "element size". These values nicely map to MO_8 ... MO_64. Only
34 * 128 bit element size has to be treated in a special way (MO_64 + 1).
35 * We will use ES_* instead of MO_* for this reason in this file.
38 * As gvec ool-helpers can currently not return values (besides via
39 * pointers like vectors or cpu_env), whenever we have to set the CC and
40 * can't conclude the value from the result vector, we will directly
41 * set it in "env->cc_op" and mark it as static via set_cc_static()".
42 * Whenever this is done, the helper writes globals (cc_op).
45 #define NUM_VEC_ELEMENT_BYTES(es) (1 << (es))
46 #define NUM_VEC_ELEMENTS(es) (16 / NUM_VEC_ELEMENT_BYTES(es))
47 #define NUM_VEC_ELEMENT_BITS(es) (NUM_VEC_ELEMENT_BYTES(es) * BITS_PER_BYTE)
55 /* Floating-Point Format */
60 static inline bool valid_vec_element(uint8_t enr
, MemOp es
)
62 return !(enr
& ~(NUM_VEC_ELEMENTS(es
) - 1));
65 static void read_vec_element_i64(TCGv_i64 dst
, uint8_t reg
, uint8_t enr
,
68 const int offs
= vec_reg_offset(reg
, enr
, memop
& MO_SIZE
);
72 tcg_gen_ld8u_i64(dst
, cpu_env
, offs
);
75 tcg_gen_ld16u_i64(dst
, cpu_env
, offs
);
78 tcg_gen_ld32u_i64(dst
, cpu_env
, offs
);
81 tcg_gen_ld8s_i64(dst
, cpu_env
, offs
);
84 tcg_gen_ld16s_i64(dst
, cpu_env
, offs
);
87 tcg_gen_ld32s_i64(dst
, cpu_env
, offs
);
91 tcg_gen_ld_i64(dst
, cpu_env
, offs
);
94 g_assert_not_reached();
98 static void read_vec_element_i32(TCGv_i32 dst
, uint8_t reg
, uint8_t enr
,
101 const int offs
= vec_reg_offset(reg
, enr
, memop
& MO_SIZE
);
105 tcg_gen_ld8u_i32(dst
, cpu_env
, offs
);
108 tcg_gen_ld16u_i32(dst
, cpu_env
, offs
);
111 tcg_gen_ld8s_i32(dst
, cpu_env
, offs
);
113 case ES_16
| MO_SIGN
:
114 tcg_gen_ld16s_i32(dst
, cpu_env
, offs
);
117 case ES_32
| MO_SIGN
:
118 tcg_gen_ld_i32(dst
, cpu_env
, offs
);
121 g_assert_not_reached();
125 static void write_vec_element_i64(TCGv_i64 src
, int reg
, uint8_t enr
,
128 const int offs
= vec_reg_offset(reg
, enr
, memop
& MO_SIZE
);
132 tcg_gen_st8_i64(src
, cpu_env
, offs
);
135 tcg_gen_st16_i64(src
, cpu_env
, offs
);
138 tcg_gen_st32_i64(src
, cpu_env
, offs
);
141 tcg_gen_st_i64(src
, cpu_env
, offs
);
144 g_assert_not_reached();
148 static void write_vec_element_i32(TCGv_i32 src
, int reg
, uint8_t enr
,
151 const int offs
= vec_reg_offset(reg
, enr
, memop
& MO_SIZE
);
155 tcg_gen_st8_i32(src
, cpu_env
, offs
);
158 tcg_gen_st16_i32(src
, cpu_env
, offs
);
161 tcg_gen_st_i32(src
, cpu_env
, offs
);
164 g_assert_not_reached();
168 static void get_vec_element_ptr_i64(TCGv_ptr ptr
, uint8_t reg
, TCGv_i64 enr
,
171 TCGv_i64 tmp
= tcg_temp_new_i64();
173 /* mask off invalid parts from the element nr */
174 tcg_gen_andi_i64(tmp
, enr
, NUM_VEC_ELEMENTS(es
) - 1);
176 /* convert it to an element offset relative to cpu_env (vec_reg_offset() */
177 tcg_gen_shli_i64(tmp
, tmp
, es
);
178 #ifndef HOST_WORDS_BIGENDIAN
179 tcg_gen_xori_i64(tmp
, tmp
, 8 - NUM_VEC_ELEMENT_BYTES(es
));
181 tcg_gen_addi_i64(tmp
, tmp
, vec_full_reg_offset(reg
));
183 /* generate the final ptr by adding cpu_env */
184 tcg_gen_trunc_i64_ptr(ptr
, tmp
);
185 tcg_gen_add_ptr(ptr
, ptr
, cpu_env
);
187 tcg_temp_free_i64(tmp
);
190 #define gen_gvec_2(v1, v2, gen) \
191 tcg_gen_gvec_2(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
193 #define gen_gvec_2s(v1, v2, c, gen) \
194 tcg_gen_gvec_2s(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
196 #define gen_gvec_2_ool(v1, v2, data, fn) \
197 tcg_gen_gvec_2_ool(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
199 #define gen_gvec_2i_ool(v1, v2, c, data, fn) \
200 tcg_gen_gvec_2i_ool(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
202 #define gen_gvec_2_ptr(v1, v2, ptr, data, fn) \
203 tcg_gen_gvec_2_ptr(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
204 ptr, 16, 16, data, fn)
205 #define gen_gvec_3(v1, v2, v3, gen) \
206 tcg_gen_gvec_3(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
207 vec_full_reg_offset(v3), 16, 16, gen)
208 #define gen_gvec_3_ool(v1, v2, v3, data, fn) \
209 tcg_gen_gvec_3_ool(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
210 vec_full_reg_offset(v3), 16, 16, data, fn)
211 #define gen_gvec_3_ptr(v1, v2, v3, ptr, data, fn) \
212 tcg_gen_gvec_3_ptr(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
213 vec_full_reg_offset(v3), ptr, 16, 16, data, fn)
214 #define gen_gvec_3i(v1, v2, v3, c, gen) \
215 tcg_gen_gvec_3i(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
216 vec_full_reg_offset(v3), 16, 16, c, gen)
217 #define gen_gvec_4(v1, v2, v3, v4, gen) \
218 tcg_gen_gvec_4(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
219 vec_full_reg_offset(v3), vec_full_reg_offset(v4), \
221 #define gen_gvec_4_ool(v1, v2, v3, v4, data, fn) \
222 tcg_gen_gvec_4_ool(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
223 vec_full_reg_offset(v3), vec_full_reg_offset(v4), \
225 #define gen_gvec_4_ptr(v1, v2, v3, v4, ptr, data, fn) \
226 tcg_gen_gvec_4_ptr(vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
227 vec_full_reg_offset(v3), vec_full_reg_offset(v4), \
228 ptr, 16, 16, data, fn)
229 #define gen_gvec_dup_i64(es, v1, c) \
230 tcg_gen_gvec_dup_i64(es, vec_full_reg_offset(v1), 16, 16, c)
231 #define gen_gvec_mov(v1, v2) \
232 tcg_gen_gvec_mov(0, vec_full_reg_offset(v1), vec_full_reg_offset(v2), 16, \
234 #define gen_gvec_dup_imm(es, v1, c) \
235 tcg_gen_gvec_dup_imm(es, vec_full_reg_offset(v1), 16, 16, c);
236 #define gen_gvec_fn_2(fn, es, v1, v2) \
237 tcg_gen_gvec_##fn(es, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
239 #define gen_gvec_fn_2i(fn, es, v1, v2, c) \
240 tcg_gen_gvec_##fn(es, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
242 #define gen_gvec_fn_2s(fn, es, v1, v2, s) \
243 tcg_gen_gvec_##fn(es, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
245 #define gen_gvec_fn_3(fn, es, v1, v2, v3) \
246 tcg_gen_gvec_##fn(es, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
247 vec_full_reg_offset(v3), 16, 16)
248 #define gen_gvec_fn_4(fn, es, v1, v2, v3, v4) \
249 tcg_gen_gvec_##fn(es, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
250 vec_full_reg_offset(v3), vec_full_reg_offset(v4), 16, 16)
253 * Helper to carry out a 128 bit vector computation using 2 i64 values per
256 typedef void (*gen_gvec128_3_i64_fn
)(TCGv_i64 dl
, TCGv_i64 dh
, TCGv_i64 al
,
257 TCGv_i64 ah
, TCGv_i64 bl
, TCGv_i64 bh
);
258 static void gen_gvec128_3_i64(gen_gvec128_3_i64_fn fn
, uint8_t d
, uint8_t a
,
261 TCGv_i64 dh
= tcg_temp_new_i64();
262 TCGv_i64 dl
= tcg_temp_new_i64();
263 TCGv_i64 ah
= tcg_temp_new_i64();
264 TCGv_i64 al
= tcg_temp_new_i64();
265 TCGv_i64 bh
= tcg_temp_new_i64();
266 TCGv_i64 bl
= tcg_temp_new_i64();
268 read_vec_element_i64(ah
, a
, 0, ES_64
);
269 read_vec_element_i64(al
, a
, 1, ES_64
);
270 read_vec_element_i64(bh
, b
, 0, ES_64
);
271 read_vec_element_i64(bl
, b
, 1, ES_64
);
272 fn(dl
, dh
, al
, ah
, bl
, bh
);
273 write_vec_element_i64(dh
, d
, 0, ES_64
);
274 write_vec_element_i64(dl
, d
, 1, ES_64
);
276 tcg_temp_free_i64(dh
);
277 tcg_temp_free_i64(dl
);
278 tcg_temp_free_i64(ah
);
279 tcg_temp_free_i64(al
);
280 tcg_temp_free_i64(bh
);
281 tcg_temp_free_i64(bl
);
284 typedef void (*gen_gvec128_4_i64_fn
)(TCGv_i64 dl
, TCGv_i64 dh
, TCGv_i64 al
,
285 TCGv_i64 ah
, TCGv_i64 bl
, TCGv_i64 bh
,
286 TCGv_i64 cl
, TCGv_i64 ch
);
287 static void gen_gvec128_4_i64(gen_gvec128_4_i64_fn fn
, uint8_t d
, uint8_t a
,
288 uint8_t b
, uint8_t c
)
290 TCGv_i64 dh
= tcg_temp_new_i64();
291 TCGv_i64 dl
= tcg_temp_new_i64();
292 TCGv_i64 ah
= tcg_temp_new_i64();
293 TCGv_i64 al
= tcg_temp_new_i64();
294 TCGv_i64 bh
= tcg_temp_new_i64();
295 TCGv_i64 bl
= tcg_temp_new_i64();
296 TCGv_i64 ch
= tcg_temp_new_i64();
297 TCGv_i64 cl
= tcg_temp_new_i64();
299 read_vec_element_i64(ah
, a
, 0, ES_64
);
300 read_vec_element_i64(al
, a
, 1, ES_64
);
301 read_vec_element_i64(bh
, b
, 0, ES_64
);
302 read_vec_element_i64(bl
, b
, 1, ES_64
);
303 read_vec_element_i64(ch
, c
, 0, ES_64
);
304 read_vec_element_i64(cl
, c
, 1, ES_64
);
305 fn(dl
, dh
, al
, ah
, bl
, bh
, cl
, ch
);
306 write_vec_element_i64(dh
, d
, 0, ES_64
);
307 write_vec_element_i64(dl
, d
, 1, ES_64
);
309 tcg_temp_free_i64(dh
);
310 tcg_temp_free_i64(dl
);
311 tcg_temp_free_i64(ah
);
312 tcg_temp_free_i64(al
);
313 tcg_temp_free_i64(bh
);
314 tcg_temp_free_i64(bl
);
315 tcg_temp_free_i64(ch
);
316 tcg_temp_free_i64(cl
);
319 static void gen_addi2_i64(TCGv_i64 dl
, TCGv_i64 dh
, TCGv_i64 al
, TCGv_i64 ah
,
322 TCGv_i64 bl
= tcg_const_i64(b
);
323 TCGv_i64 bh
= tcg_const_i64(0);
325 tcg_gen_add2_i64(dl
, dh
, al
, ah
, bl
, bh
);
326 tcg_temp_free_i64(bl
);
327 tcg_temp_free_i64(bh
);
330 static DisasJumpType
op_vge(DisasContext
*s
, DisasOps
*o
)
332 const uint8_t es
= s
->insn
->data
;
333 const uint8_t enr
= get_field(s
, m3
);
336 if (!valid_vec_element(enr
, es
)) {
337 gen_program_exception(s
, PGM_SPECIFICATION
);
338 return DISAS_NORETURN
;
341 tmp
= tcg_temp_new_i64();
342 read_vec_element_i64(tmp
, get_field(s
, v2
), enr
, es
);
343 tcg_gen_add_i64(o
->addr1
, o
->addr1
, tmp
);
344 gen_addi_and_wrap_i64(s
, o
->addr1
, o
->addr1
, 0);
346 tcg_gen_qemu_ld_i64(tmp
, o
->addr1
, get_mem_index(s
), MO_TE
| es
);
347 write_vec_element_i64(tmp
, get_field(s
, v1
), enr
, es
);
348 tcg_temp_free_i64(tmp
);
352 static uint64_t generate_byte_mask(uint8_t mask
)
357 for (i
= 0; i
< 8; i
++) {
358 if ((mask
>> i
) & 1) {
359 r
|= 0xffull
<< (i
* 8);
365 static DisasJumpType
op_vgbm(DisasContext
*s
, DisasOps
*o
)
367 const uint16_t i2
= get_field(s
, i2
);
369 if (i2
== (i2
& 0xff) * 0x0101) {
371 * Masks for both 64 bit elements of the vector are the same.
372 * Trust tcg to produce a good constant loading.
374 gen_gvec_dup_imm(ES_64
, get_field(s
, v1
),
375 generate_byte_mask(i2
& 0xff));
377 TCGv_i64 t
= tcg_temp_new_i64();
379 tcg_gen_movi_i64(t
, generate_byte_mask(i2
>> 8));
380 write_vec_element_i64(t
, get_field(s
, v1
), 0, ES_64
);
381 tcg_gen_movi_i64(t
, generate_byte_mask(i2
));
382 write_vec_element_i64(t
, get_field(s
, v1
), 1, ES_64
);
383 tcg_temp_free_i64(t
);
388 static DisasJumpType
op_vgm(DisasContext
*s
, DisasOps
*o
)
390 const uint8_t es
= get_field(s
, m4
);
391 const uint8_t bits
= NUM_VEC_ELEMENT_BITS(es
);
392 const uint8_t i2
= get_field(s
, i2
) & (bits
- 1);
393 const uint8_t i3
= get_field(s
, i3
) & (bits
- 1);
398 gen_program_exception(s
, PGM_SPECIFICATION
);
399 return DISAS_NORETURN
;
402 /* generate the mask - take care of wrapping */
403 for (i
= i2
; ; i
= (i
+ 1) % bits
) {
404 mask
|= 1ull << (bits
- i
- 1);
410 gen_gvec_dup_imm(es
, get_field(s
, v1
), mask
);
414 static DisasJumpType
op_vl(DisasContext
*s
, DisasOps
*o
)
416 TCGv_i64 t0
= tcg_temp_new_i64();
417 TCGv_i64 t1
= tcg_temp_new_i64();
419 tcg_gen_qemu_ld_i64(t0
, o
->addr1
, get_mem_index(s
), MO_TEQ
);
420 gen_addi_and_wrap_i64(s
, o
->addr1
, o
->addr1
, 8);
421 tcg_gen_qemu_ld_i64(t1
, o
->addr1
, get_mem_index(s
), MO_TEQ
);
422 write_vec_element_i64(t0
, get_field(s
, v1
), 0, ES_64
);
423 write_vec_element_i64(t1
, get_field(s
, v1
), 1, ES_64
);
429 static DisasJumpType
op_vlr(DisasContext
*s
, DisasOps
*o
)
431 gen_gvec_mov(get_field(s
, v1
), get_field(s
, v2
));
435 static DisasJumpType
op_vlrep(DisasContext
*s
, DisasOps
*o
)
437 const uint8_t es
= get_field(s
, m3
);
441 gen_program_exception(s
, PGM_SPECIFICATION
);
442 return DISAS_NORETURN
;
445 tmp
= tcg_temp_new_i64();
446 tcg_gen_qemu_ld_i64(tmp
, o
->addr1
, get_mem_index(s
), MO_TE
| es
);
447 gen_gvec_dup_i64(es
, get_field(s
, v1
), tmp
);
448 tcg_temp_free_i64(tmp
);
452 static DisasJumpType
op_vle(DisasContext
*s
, DisasOps
*o
)
454 const uint8_t es
= s
->insn
->data
;
455 const uint8_t enr
= get_field(s
, m3
);
458 if (!valid_vec_element(enr
, es
)) {
459 gen_program_exception(s
, PGM_SPECIFICATION
);
460 return DISAS_NORETURN
;
463 tmp
= tcg_temp_new_i64();
464 tcg_gen_qemu_ld_i64(tmp
, o
->addr1
, get_mem_index(s
), MO_TE
| es
);
465 write_vec_element_i64(tmp
, get_field(s
, v1
), enr
, es
);
466 tcg_temp_free_i64(tmp
);
470 static DisasJumpType
op_vlei(DisasContext
*s
, DisasOps
*o
)
472 const uint8_t es
= s
->insn
->data
;
473 const uint8_t enr
= get_field(s
, m3
);
476 if (!valid_vec_element(enr
, es
)) {
477 gen_program_exception(s
, PGM_SPECIFICATION
);
478 return DISAS_NORETURN
;
481 tmp
= tcg_const_i64((int16_t)get_field(s
, i2
));
482 write_vec_element_i64(tmp
, get_field(s
, v1
), enr
, es
);
483 tcg_temp_free_i64(tmp
);
487 static DisasJumpType
op_vlgv(DisasContext
*s
, DisasOps
*o
)
489 const uint8_t es
= get_field(s
, m4
);
493 gen_program_exception(s
, PGM_SPECIFICATION
);
494 return DISAS_NORETURN
;
497 /* fast path if we don't need the register content */
498 if (!get_field(s
, b2
)) {
499 uint8_t enr
= get_field(s
, d2
) & (NUM_VEC_ELEMENTS(es
) - 1);
501 read_vec_element_i64(o
->out
, get_field(s
, v3
), enr
, es
);
505 ptr
= tcg_temp_new_ptr();
506 get_vec_element_ptr_i64(ptr
, get_field(s
, v3
), o
->addr1
, es
);
509 tcg_gen_ld8u_i64(o
->out
, ptr
, 0);
512 tcg_gen_ld16u_i64(o
->out
, ptr
, 0);
515 tcg_gen_ld32u_i64(o
->out
, ptr
, 0);
518 tcg_gen_ld_i64(o
->out
, ptr
, 0);
521 g_assert_not_reached();
523 tcg_temp_free_ptr(ptr
);
528 static DisasJumpType
op_vllez(DisasContext
*s
, DisasOps
*o
)
530 uint8_t es
= get_field(s
, m3
);
535 /* rightmost sub-element of leftmost doubleword */
548 /* leftmost sub-element of leftmost doubleword */
550 if (s390_has_feat(S390_FEAT_VECTOR_ENH
)) {
557 gen_program_exception(s
, PGM_SPECIFICATION
);
558 return DISAS_NORETURN
;
561 t
= tcg_temp_new_i64();
562 tcg_gen_qemu_ld_i64(t
, o
->addr1
, get_mem_index(s
), MO_TE
| es
);
563 gen_gvec_dup_imm(es
, get_field(s
, v1
), 0);
564 write_vec_element_i64(t
, get_field(s
, v1
), enr
, es
);
565 tcg_temp_free_i64(t
);
569 static DisasJumpType
op_vlm(DisasContext
*s
, DisasOps
*o
)
571 const uint8_t v3
= get_field(s
, v3
);
572 uint8_t v1
= get_field(s
, v1
);
575 if (v3
< v1
|| (v3
- v1
+ 1) > 16) {
576 gen_program_exception(s
, PGM_SPECIFICATION
);
577 return DISAS_NORETURN
;
581 * Check for possible access exceptions by trying to load the last
582 * element. The first element will be checked first next.
584 t0
= tcg_temp_new_i64();
585 t1
= tcg_temp_new_i64();
586 gen_addi_and_wrap_i64(s
, t0
, o
->addr1
, (v3
- v1
) * 16 + 8);
587 tcg_gen_qemu_ld_i64(t0
, t0
, get_mem_index(s
), MO_TEQ
);
590 tcg_gen_qemu_ld_i64(t1
, o
->addr1
, get_mem_index(s
), MO_TEQ
);
591 write_vec_element_i64(t1
, v1
, 0, ES_64
);
595 gen_addi_and_wrap_i64(s
, o
->addr1
, o
->addr1
, 8);
596 tcg_gen_qemu_ld_i64(t1
, o
->addr1
, get_mem_index(s
), MO_TEQ
);
597 write_vec_element_i64(t1
, v1
, 1, ES_64
);
598 gen_addi_and_wrap_i64(s
, o
->addr1
, o
->addr1
, 8);
601 /* Store the last element, loaded first */
602 write_vec_element_i64(t0
, v1
, 1, ES_64
);
604 tcg_temp_free_i64(t0
);
605 tcg_temp_free_i64(t1
);
609 static DisasJumpType
op_vlbb(DisasContext
*s
, DisasOps
*o
)
611 const int64_t block_size
= (1ull << (get_field(s
, m3
) + 6));
612 const int v1_offs
= vec_full_reg_offset(get_field(s
, v1
));
616 if (get_field(s
, m3
) > 6) {
617 gen_program_exception(s
, PGM_SPECIFICATION
);
618 return DISAS_NORETURN
;
621 bytes
= tcg_temp_new_i64();
622 a0
= tcg_temp_new_ptr();
623 /* calculate the number of bytes until the next block boundary */
624 tcg_gen_ori_i64(bytes
, o
->addr1
, -block_size
);
625 tcg_gen_neg_i64(bytes
, bytes
);
627 tcg_gen_addi_ptr(a0
, cpu_env
, v1_offs
);
628 gen_helper_vll(cpu_env
, a0
, o
->addr1
, bytes
);
629 tcg_temp_free_i64(bytes
);
630 tcg_temp_free_ptr(a0
);
634 static DisasJumpType
op_vlvg(DisasContext
*s
, DisasOps
*o
)
636 const uint8_t es
= get_field(s
, m4
);
640 gen_program_exception(s
, PGM_SPECIFICATION
);
641 return DISAS_NORETURN
;
644 /* fast path if we don't need the register content */
645 if (!get_field(s
, b2
)) {
646 uint8_t enr
= get_field(s
, d2
) & (NUM_VEC_ELEMENTS(es
) - 1);
648 write_vec_element_i64(o
->in2
, get_field(s
, v1
), enr
, es
);
652 ptr
= tcg_temp_new_ptr();
653 get_vec_element_ptr_i64(ptr
, get_field(s
, v1
), o
->addr1
, es
);
656 tcg_gen_st8_i64(o
->in2
, ptr
, 0);
659 tcg_gen_st16_i64(o
->in2
, ptr
, 0);
662 tcg_gen_st32_i64(o
->in2
, ptr
, 0);
665 tcg_gen_st_i64(o
->in2
, ptr
, 0);
668 g_assert_not_reached();
670 tcg_temp_free_ptr(ptr
);
675 static DisasJumpType
op_vlvgp(DisasContext
*s
, DisasOps
*o
)
677 write_vec_element_i64(o
->in1
, get_field(s
, v1
), 0, ES_64
);
678 write_vec_element_i64(o
->in2
, get_field(s
, v1
), 1, ES_64
);
682 static DisasJumpType
op_vll(DisasContext
*s
, DisasOps
*o
)
684 const int v1_offs
= vec_full_reg_offset(get_field(s
, v1
));
685 TCGv_ptr a0
= tcg_temp_new_ptr();
687 /* convert highest index into an actual length */
688 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
689 tcg_gen_addi_ptr(a0
, cpu_env
, v1_offs
);
690 gen_helper_vll(cpu_env
, a0
, o
->addr1
, o
->in2
);
691 tcg_temp_free_ptr(a0
);
695 static DisasJumpType
op_vmr(DisasContext
*s
, DisasOps
*o
)
697 const uint8_t v1
= get_field(s
, v1
);
698 const uint8_t v2
= get_field(s
, v2
);
699 const uint8_t v3
= get_field(s
, v3
);
700 const uint8_t es
= get_field(s
, m4
);
701 int dst_idx
, src_idx
;
705 gen_program_exception(s
, PGM_SPECIFICATION
);
706 return DISAS_NORETURN
;
709 tmp
= tcg_temp_new_i64();
710 if (s
->fields
.op2
== 0x61) {
711 /* iterate backwards to avoid overwriting data we might need later */
712 for (dst_idx
= NUM_VEC_ELEMENTS(es
) - 1; dst_idx
>= 0; dst_idx
--) {
713 src_idx
= dst_idx
/ 2;
714 if (dst_idx
% 2 == 0) {
715 read_vec_element_i64(tmp
, v2
, src_idx
, es
);
717 read_vec_element_i64(tmp
, v3
, src_idx
, es
);
719 write_vec_element_i64(tmp
, v1
, dst_idx
, es
);
722 /* iterate forward to avoid overwriting data we might need later */
723 for (dst_idx
= 0; dst_idx
< NUM_VEC_ELEMENTS(es
); dst_idx
++) {
724 src_idx
= (dst_idx
+ NUM_VEC_ELEMENTS(es
)) / 2;
725 if (dst_idx
% 2 == 0) {
726 read_vec_element_i64(tmp
, v2
, src_idx
, es
);
728 read_vec_element_i64(tmp
, v3
, src_idx
, es
);
730 write_vec_element_i64(tmp
, v1
, dst_idx
, es
);
733 tcg_temp_free_i64(tmp
);
737 static DisasJumpType
op_vpk(DisasContext
*s
, DisasOps
*o
)
739 const uint8_t v1
= get_field(s
, v1
);
740 const uint8_t v2
= get_field(s
, v2
);
741 const uint8_t v3
= get_field(s
, v3
);
742 const uint8_t es
= get_field(s
, m4
);
743 static gen_helper_gvec_3
* const vpk
[3] = {
744 gen_helper_gvec_vpk16
,
745 gen_helper_gvec_vpk32
,
746 gen_helper_gvec_vpk64
,
748 static gen_helper_gvec_3
* const vpks
[3] = {
749 gen_helper_gvec_vpks16
,
750 gen_helper_gvec_vpks32
,
751 gen_helper_gvec_vpks64
,
753 static gen_helper_gvec_3_ptr
* const vpks_cc
[3] = {
754 gen_helper_gvec_vpks_cc16
,
755 gen_helper_gvec_vpks_cc32
,
756 gen_helper_gvec_vpks_cc64
,
758 static gen_helper_gvec_3
* const vpkls
[3] = {
759 gen_helper_gvec_vpkls16
,
760 gen_helper_gvec_vpkls32
,
761 gen_helper_gvec_vpkls64
,
763 static gen_helper_gvec_3_ptr
* const vpkls_cc
[3] = {
764 gen_helper_gvec_vpkls_cc16
,
765 gen_helper_gvec_vpkls_cc32
,
766 gen_helper_gvec_vpkls_cc64
,
769 if (es
== ES_8
|| es
> ES_64
) {
770 gen_program_exception(s
, PGM_SPECIFICATION
);
771 return DISAS_NORETURN
;
774 switch (s
->fields
.op2
) {
776 if (get_field(s
, m5
) & 0x1) {
777 gen_gvec_3_ptr(v1
, v2
, v3
, cpu_env
, 0, vpks_cc
[es
- 1]);
780 gen_gvec_3_ool(v1
, v2
, v3
, 0, vpks
[es
- 1]);
784 if (get_field(s
, m5
) & 0x1) {
785 gen_gvec_3_ptr(v1
, v2
, v3
, cpu_env
, 0, vpkls_cc
[es
- 1]);
788 gen_gvec_3_ool(v1
, v2
, v3
, 0, vpkls
[es
- 1]);
792 /* If sources and destination dont't overlap -> fast path */
793 if (v1
!= v2
&& v1
!= v3
) {
794 const uint8_t src_es
= get_field(s
, m4
);
795 const uint8_t dst_es
= src_es
- 1;
796 TCGv_i64 tmp
= tcg_temp_new_i64();
797 int dst_idx
, src_idx
;
799 for (dst_idx
= 0; dst_idx
< NUM_VEC_ELEMENTS(dst_es
); dst_idx
++) {
801 if (src_idx
< NUM_VEC_ELEMENTS(src_es
)) {
802 read_vec_element_i64(tmp
, v2
, src_idx
, src_es
);
804 src_idx
-= NUM_VEC_ELEMENTS(src_es
);
805 read_vec_element_i64(tmp
, v3
, src_idx
, src_es
);
807 write_vec_element_i64(tmp
, v1
, dst_idx
, dst_es
);
809 tcg_temp_free_i64(tmp
);
811 gen_gvec_3_ool(v1
, v2
, v3
, 0, vpk
[es
- 1]);
815 g_assert_not_reached();
820 static DisasJumpType
op_vperm(DisasContext
*s
, DisasOps
*o
)
822 gen_gvec_4_ool(get_field(s
, v1
), get_field(s
, v2
),
823 get_field(s
, v3
), get_field(s
, v4
),
824 0, gen_helper_gvec_vperm
);
828 static DisasJumpType
op_vpdi(DisasContext
*s
, DisasOps
*o
)
830 const uint8_t i2
= extract32(get_field(s
, m4
), 2, 1);
831 const uint8_t i3
= extract32(get_field(s
, m4
), 0, 1);
832 TCGv_i64 t0
= tcg_temp_new_i64();
833 TCGv_i64 t1
= tcg_temp_new_i64();
835 read_vec_element_i64(t0
, get_field(s
, v2
), i2
, ES_64
);
836 read_vec_element_i64(t1
, get_field(s
, v3
), i3
, ES_64
);
837 write_vec_element_i64(t0
, get_field(s
, v1
), 0, ES_64
);
838 write_vec_element_i64(t1
, get_field(s
, v1
), 1, ES_64
);
839 tcg_temp_free_i64(t0
);
840 tcg_temp_free_i64(t1
);
844 static DisasJumpType
op_vrep(DisasContext
*s
, DisasOps
*o
)
846 const uint8_t enr
= get_field(s
, i2
);
847 const uint8_t es
= get_field(s
, m4
);
849 if (es
> ES_64
|| !valid_vec_element(enr
, es
)) {
850 gen_program_exception(s
, PGM_SPECIFICATION
);
851 return DISAS_NORETURN
;
854 tcg_gen_gvec_dup_mem(es
, vec_full_reg_offset(get_field(s
, v1
)),
855 vec_reg_offset(get_field(s
, v3
), enr
, es
),
860 static DisasJumpType
op_vrepi(DisasContext
*s
, DisasOps
*o
)
862 const int64_t data
= (int16_t)get_field(s
, i2
);
863 const uint8_t es
= get_field(s
, m3
);
866 gen_program_exception(s
, PGM_SPECIFICATION
);
867 return DISAS_NORETURN
;
870 gen_gvec_dup_imm(es
, get_field(s
, v1
), data
);
874 static DisasJumpType
op_vsce(DisasContext
*s
, DisasOps
*o
)
876 const uint8_t es
= s
->insn
->data
;
877 const uint8_t enr
= get_field(s
, m3
);
880 if (!valid_vec_element(enr
, es
)) {
881 gen_program_exception(s
, PGM_SPECIFICATION
);
882 return DISAS_NORETURN
;
885 tmp
= tcg_temp_new_i64();
886 read_vec_element_i64(tmp
, get_field(s
, v2
), enr
, es
);
887 tcg_gen_add_i64(o
->addr1
, o
->addr1
, tmp
);
888 gen_addi_and_wrap_i64(s
, o
->addr1
, o
->addr1
, 0);
890 read_vec_element_i64(tmp
, get_field(s
, v1
), enr
, es
);
891 tcg_gen_qemu_st_i64(tmp
, o
->addr1
, get_mem_index(s
), MO_TE
| es
);
892 tcg_temp_free_i64(tmp
);
896 static DisasJumpType
op_vsel(DisasContext
*s
, DisasOps
*o
)
898 gen_gvec_fn_4(bitsel
, ES_8
, get_field(s
, v1
),
899 get_field(s
, v4
), get_field(s
, v2
),
904 static DisasJumpType
op_vseg(DisasContext
*s
, DisasOps
*o
)
906 const uint8_t es
= get_field(s
, m3
);
924 gen_program_exception(s
, PGM_SPECIFICATION
);
925 return DISAS_NORETURN
;
928 tmp
= tcg_temp_new_i64();
929 read_vec_element_i64(tmp
, get_field(s
, v2
), idx1
, es
| MO_SIGN
);
930 write_vec_element_i64(tmp
, get_field(s
, v1
), 0, ES_64
);
931 read_vec_element_i64(tmp
, get_field(s
, v2
), idx2
, es
| MO_SIGN
);
932 write_vec_element_i64(tmp
, get_field(s
, v1
), 1, ES_64
);
933 tcg_temp_free_i64(tmp
);
937 static DisasJumpType
op_vst(DisasContext
*s
, DisasOps
*o
)
939 TCGv_i64 tmp
= tcg_const_i64(16);
941 /* Probe write access before actually modifying memory */
942 gen_helper_probe_write_access(cpu_env
, o
->addr1
, tmp
);
944 read_vec_element_i64(tmp
, get_field(s
, v1
), 0, ES_64
);
945 tcg_gen_qemu_st_i64(tmp
, o
->addr1
, get_mem_index(s
), MO_TEQ
);
946 gen_addi_and_wrap_i64(s
, o
->addr1
, o
->addr1
, 8);
947 read_vec_element_i64(tmp
, get_field(s
, v1
), 1, ES_64
);
948 tcg_gen_qemu_st_i64(tmp
, o
->addr1
, get_mem_index(s
), MO_TEQ
);
949 tcg_temp_free_i64(tmp
);
953 static DisasJumpType
op_vste(DisasContext
*s
, DisasOps
*o
)
955 const uint8_t es
= s
->insn
->data
;
956 const uint8_t enr
= get_field(s
, m3
);
959 if (!valid_vec_element(enr
, es
)) {
960 gen_program_exception(s
, PGM_SPECIFICATION
);
961 return DISAS_NORETURN
;
964 tmp
= tcg_temp_new_i64();
965 read_vec_element_i64(tmp
, get_field(s
, v1
), enr
, es
);
966 tcg_gen_qemu_st_i64(tmp
, o
->addr1
, get_mem_index(s
), MO_TE
| es
);
967 tcg_temp_free_i64(tmp
);
971 static DisasJumpType
op_vstm(DisasContext
*s
, DisasOps
*o
)
973 const uint8_t v3
= get_field(s
, v3
);
974 uint8_t v1
= get_field(s
, v1
);
977 while (v3
< v1
|| (v3
- v1
+ 1) > 16) {
978 gen_program_exception(s
, PGM_SPECIFICATION
);
979 return DISAS_NORETURN
;
982 /* Probe write access before actually modifying memory */
983 tmp
= tcg_const_i64((v3
- v1
+ 1) * 16);
984 gen_helper_probe_write_access(cpu_env
, o
->addr1
, tmp
);
987 read_vec_element_i64(tmp
, v1
, 0, ES_64
);
988 tcg_gen_qemu_st_i64(tmp
, o
->addr1
, get_mem_index(s
), MO_TEQ
);
989 gen_addi_and_wrap_i64(s
, o
->addr1
, o
->addr1
, 8);
990 read_vec_element_i64(tmp
, v1
, 1, ES_64
);
991 tcg_gen_qemu_st_i64(tmp
, o
->addr1
, get_mem_index(s
), MO_TEQ
);
995 gen_addi_and_wrap_i64(s
, o
->addr1
, o
->addr1
, 8);
997 tcg_temp_free_i64(tmp
);
1001 static DisasJumpType
op_vstl(DisasContext
*s
, DisasOps
*o
)
1003 const int v1_offs
= vec_full_reg_offset(get_field(s
, v1
));
1004 TCGv_ptr a0
= tcg_temp_new_ptr();
1006 /* convert highest index into an actual length */
1007 tcg_gen_addi_i64(o
->in2
, o
->in2
, 1);
1008 tcg_gen_addi_ptr(a0
, cpu_env
, v1_offs
);
1009 gen_helper_vstl(cpu_env
, a0
, o
->addr1
, o
->in2
);
1010 tcg_temp_free_ptr(a0
);
1014 static DisasJumpType
op_vup(DisasContext
*s
, DisasOps
*o
)
1016 const bool logical
= s
->fields
.op2
== 0xd4 || s
->fields
.op2
== 0xd5;
1017 const uint8_t v1
= get_field(s
, v1
);
1018 const uint8_t v2
= get_field(s
, v2
);
1019 const uint8_t src_es
= get_field(s
, m3
);
1020 const uint8_t dst_es
= src_es
+ 1;
1021 int dst_idx
, src_idx
;
1024 if (src_es
> ES_32
) {
1025 gen_program_exception(s
, PGM_SPECIFICATION
);
1026 return DISAS_NORETURN
;
1029 tmp
= tcg_temp_new_i64();
1030 if (s
->fields
.op2
== 0xd7 || s
->fields
.op2
== 0xd5) {
1031 /* iterate backwards to avoid overwriting data we might need later */
1032 for (dst_idx
= NUM_VEC_ELEMENTS(dst_es
) - 1; dst_idx
>= 0; dst_idx
--) {
1034 read_vec_element_i64(tmp
, v2
, src_idx
,
1035 src_es
| (logical
? 0 : MO_SIGN
));
1036 write_vec_element_i64(tmp
, v1
, dst_idx
, dst_es
);
1040 /* iterate forward to avoid overwriting data we might need later */
1041 for (dst_idx
= 0; dst_idx
< NUM_VEC_ELEMENTS(dst_es
); dst_idx
++) {
1042 src_idx
= dst_idx
+ NUM_VEC_ELEMENTS(src_es
) / 2;
1043 read_vec_element_i64(tmp
, v2
, src_idx
,
1044 src_es
| (logical
? 0 : MO_SIGN
));
1045 write_vec_element_i64(tmp
, v1
, dst_idx
, dst_es
);
1048 tcg_temp_free_i64(tmp
);
1052 static DisasJumpType
op_va(DisasContext
*s
, DisasOps
*o
)
1054 const uint8_t es
= get_field(s
, m4
);
1057 gen_program_exception(s
, PGM_SPECIFICATION
);
1058 return DISAS_NORETURN
;
1059 } else if (es
== ES_128
) {
1060 gen_gvec128_3_i64(tcg_gen_add2_i64
, get_field(s
, v1
),
1061 get_field(s
, v2
), get_field(s
, v3
));
1064 gen_gvec_fn_3(add
, es
, get_field(s
, v1
), get_field(s
, v2
),
1069 static void gen_acc(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
, uint8_t es
)
1071 const uint8_t msb_bit_nr
= NUM_VEC_ELEMENT_BITS(es
) - 1;
1072 TCGv_i64 msb_mask
= tcg_const_i64(dup_const(es
, 1ull << msb_bit_nr
));
1073 TCGv_i64 t1
= tcg_temp_new_i64();
1074 TCGv_i64 t2
= tcg_temp_new_i64();
1075 TCGv_i64 t3
= tcg_temp_new_i64();
1077 /* Calculate the carry into the MSB, ignoring the old MSBs */
1078 tcg_gen_andc_i64(t1
, a
, msb_mask
);
1079 tcg_gen_andc_i64(t2
, b
, msb_mask
);
1080 tcg_gen_add_i64(t1
, t1
, t2
);
1081 /* Calculate the MSB without any carry into it */
1082 tcg_gen_xor_i64(t3
, a
, b
);
1083 /* Calculate the carry out of the MSB in the MSB bit position */
1084 tcg_gen_and_i64(d
, a
, b
);
1085 tcg_gen_and_i64(t1
, t1
, t3
);
1086 tcg_gen_or_i64(d
, d
, t1
);
1087 /* Isolate and shift the carry into position */
1088 tcg_gen_and_i64(d
, d
, msb_mask
);
1089 tcg_gen_shri_i64(d
, d
, msb_bit_nr
);
1091 tcg_temp_free_i64(t1
);
1092 tcg_temp_free_i64(t2
);
1093 tcg_temp_free_i64(t3
);
1096 static void gen_acc8_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
1098 gen_acc(d
, a
, b
, ES_8
);
1101 static void gen_acc16_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
1103 gen_acc(d
, a
, b
, ES_16
);
1106 static void gen_acc_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
1108 TCGv_i32 t
= tcg_temp_new_i32();
1110 tcg_gen_add_i32(t
, a
, b
);
1111 tcg_gen_setcond_i32(TCG_COND_LTU
, d
, t
, b
);
1112 tcg_temp_free_i32(t
);
1115 static void gen_acc_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
1117 TCGv_i64 t
= tcg_temp_new_i64();
1119 tcg_gen_add_i64(t
, a
, b
);
1120 tcg_gen_setcond_i64(TCG_COND_LTU
, d
, t
, b
);
1121 tcg_temp_free_i64(t
);
1124 static void gen_acc2_i64(TCGv_i64 dl
, TCGv_i64 dh
, TCGv_i64 al
,
1125 TCGv_i64 ah
, TCGv_i64 bl
, TCGv_i64 bh
)
1127 TCGv_i64 th
= tcg_temp_new_i64();
1128 TCGv_i64 tl
= tcg_temp_new_i64();
1129 TCGv_i64 zero
= tcg_const_i64(0);
1131 tcg_gen_add2_i64(tl
, th
, al
, zero
, bl
, zero
);
1132 tcg_gen_add2_i64(tl
, th
, th
, zero
, ah
, zero
);
1133 tcg_gen_add2_i64(tl
, dl
, tl
, th
, bh
, zero
);
1134 tcg_gen_mov_i64(dh
, zero
);
1136 tcg_temp_free_i64(th
);
1137 tcg_temp_free_i64(tl
);
1138 tcg_temp_free_i64(zero
);
1141 static DisasJumpType
op_vacc(DisasContext
*s
, DisasOps
*o
)
1143 const uint8_t es
= get_field(s
, m4
);
1144 static const GVecGen3 g
[4] = {
1145 { .fni8
= gen_acc8_i64
, },
1146 { .fni8
= gen_acc16_i64
, },
1147 { .fni4
= gen_acc_i32
, },
1148 { .fni8
= gen_acc_i64
, },
1152 gen_program_exception(s
, PGM_SPECIFICATION
);
1153 return DISAS_NORETURN
;
1154 } else if (es
== ES_128
) {
1155 gen_gvec128_3_i64(gen_acc2_i64
, get_field(s
, v1
),
1156 get_field(s
, v2
), get_field(s
, v3
));
1159 gen_gvec_3(get_field(s
, v1
), get_field(s
, v2
),
1160 get_field(s
, v3
), &g
[es
]);
1164 static void gen_ac2_i64(TCGv_i64 dl
, TCGv_i64 dh
, TCGv_i64 al
, TCGv_i64 ah
,
1165 TCGv_i64 bl
, TCGv_i64 bh
, TCGv_i64 cl
, TCGv_i64 ch
)
1167 TCGv_i64 tl
= tcg_temp_new_i64();
1168 TCGv_i64 th
= tcg_const_i64(0);
1170 /* extract the carry only */
1171 tcg_gen_extract_i64(tl
, cl
, 0, 1);
1172 tcg_gen_add2_i64(dl
, dh
, al
, ah
, bl
, bh
);
1173 tcg_gen_add2_i64(dl
, dh
, dl
, dh
, tl
, th
);
1175 tcg_temp_free_i64(tl
);
1176 tcg_temp_free_i64(th
);
1179 static DisasJumpType
op_vac(DisasContext
*s
, DisasOps
*o
)
1181 if (get_field(s
, m5
) != ES_128
) {
1182 gen_program_exception(s
, PGM_SPECIFICATION
);
1183 return DISAS_NORETURN
;
1186 gen_gvec128_4_i64(gen_ac2_i64
, get_field(s
, v1
),
1187 get_field(s
, v2
), get_field(s
, v3
),
1192 static void gen_accc2_i64(TCGv_i64 dl
, TCGv_i64 dh
, TCGv_i64 al
, TCGv_i64 ah
,
1193 TCGv_i64 bl
, TCGv_i64 bh
, TCGv_i64 cl
, TCGv_i64 ch
)
1195 TCGv_i64 tl
= tcg_temp_new_i64();
1196 TCGv_i64 th
= tcg_temp_new_i64();
1197 TCGv_i64 zero
= tcg_const_i64(0);
1199 tcg_gen_andi_i64(tl
, cl
, 1);
1200 tcg_gen_add2_i64(tl
, th
, tl
, zero
, al
, zero
);
1201 tcg_gen_add2_i64(tl
, th
, tl
, th
, bl
, zero
);
1202 tcg_gen_add2_i64(tl
, th
, th
, zero
, ah
, zero
);
1203 tcg_gen_add2_i64(tl
, dl
, tl
, th
, bh
, zero
);
1204 tcg_gen_mov_i64(dh
, zero
);
1206 tcg_temp_free_i64(tl
);
1207 tcg_temp_free_i64(th
);
1208 tcg_temp_free_i64(zero
);
1211 static DisasJumpType
op_vaccc(DisasContext
*s
, DisasOps
*o
)
1213 if (get_field(s
, m5
) != ES_128
) {
1214 gen_program_exception(s
, PGM_SPECIFICATION
);
1215 return DISAS_NORETURN
;
1218 gen_gvec128_4_i64(gen_accc2_i64
, get_field(s
, v1
),
1219 get_field(s
, v2
), get_field(s
, v3
),
1224 static DisasJumpType
op_vn(DisasContext
*s
, DisasOps
*o
)
1226 gen_gvec_fn_3(and, ES_8
, get_field(s
, v1
), get_field(s
, v2
),
1231 static DisasJumpType
op_vnc(DisasContext
*s
, DisasOps
*o
)
1233 gen_gvec_fn_3(andc
, ES_8
, get_field(s
, v1
),
1234 get_field(s
, v2
), get_field(s
, v3
));
1238 static void gen_avg_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
1240 TCGv_i64 t0
= tcg_temp_new_i64();
1241 TCGv_i64 t1
= tcg_temp_new_i64();
1243 tcg_gen_ext_i32_i64(t0
, a
);
1244 tcg_gen_ext_i32_i64(t1
, b
);
1245 tcg_gen_add_i64(t0
, t0
, t1
);
1246 tcg_gen_addi_i64(t0
, t0
, 1);
1247 tcg_gen_shri_i64(t0
, t0
, 1);
1248 tcg_gen_extrl_i64_i32(d
, t0
);
1254 static void gen_avg_i64(TCGv_i64 dl
, TCGv_i64 al
, TCGv_i64 bl
)
1256 TCGv_i64 dh
= tcg_temp_new_i64();
1257 TCGv_i64 ah
= tcg_temp_new_i64();
1258 TCGv_i64 bh
= tcg_temp_new_i64();
1260 /* extending the sign by one bit is sufficient */
1261 tcg_gen_extract_i64(ah
, al
, 63, 1);
1262 tcg_gen_extract_i64(bh
, bl
, 63, 1);
1263 tcg_gen_add2_i64(dl
, dh
, al
, ah
, bl
, bh
);
1264 gen_addi2_i64(dl
, dh
, dl
, dh
, 1);
1265 tcg_gen_extract2_i64(dl
, dl
, dh
, 1);
1267 tcg_temp_free_i64(dh
);
1268 tcg_temp_free_i64(ah
);
1269 tcg_temp_free_i64(bh
);
1272 static DisasJumpType
op_vavg(DisasContext
*s
, DisasOps
*o
)
1274 const uint8_t es
= get_field(s
, m4
);
1275 static const GVecGen3 g
[4] = {
1276 { .fno
= gen_helper_gvec_vavg8
, },
1277 { .fno
= gen_helper_gvec_vavg16
, },
1278 { .fni4
= gen_avg_i32
, },
1279 { .fni8
= gen_avg_i64
, },
1283 gen_program_exception(s
, PGM_SPECIFICATION
);
1284 return DISAS_NORETURN
;
1286 gen_gvec_3(get_field(s
, v1
), get_field(s
, v2
),
1287 get_field(s
, v3
), &g
[es
]);
1291 static void gen_avgl_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
1293 TCGv_i64 t0
= tcg_temp_new_i64();
1294 TCGv_i64 t1
= tcg_temp_new_i64();
1296 tcg_gen_extu_i32_i64(t0
, a
);
1297 tcg_gen_extu_i32_i64(t1
, b
);
1298 tcg_gen_add_i64(t0
, t0
, t1
);
1299 tcg_gen_addi_i64(t0
, t0
, 1);
1300 tcg_gen_shri_i64(t0
, t0
, 1);
1301 tcg_gen_extrl_i64_i32(d
, t0
);
1307 static void gen_avgl_i64(TCGv_i64 dl
, TCGv_i64 al
, TCGv_i64 bl
)
1309 TCGv_i64 dh
= tcg_temp_new_i64();
1310 TCGv_i64 zero
= tcg_const_i64(0);
1312 tcg_gen_add2_i64(dl
, dh
, al
, zero
, bl
, zero
);
1313 gen_addi2_i64(dl
, dh
, dl
, dh
, 1);
1314 tcg_gen_extract2_i64(dl
, dl
, dh
, 1);
1316 tcg_temp_free_i64(dh
);
1317 tcg_temp_free_i64(zero
);
1320 static DisasJumpType
op_vavgl(DisasContext
*s
, DisasOps
*o
)
1322 const uint8_t es
= get_field(s
, m4
);
1323 static const GVecGen3 g
[4] = {
1324 { .fno
= gen_helper_gvec_vavgl8
, },
1325 { .fno
= gen_helper_gvec_vavgl16
, },
1326 { .fni4
= gen_avgl_i32
, },
1327 { .fni8
= gen_avgl_i64
, },
1331 gen_program_exception(s
, PGM_SPECIFICATION
);
1332 return DISAS_NORETURN
;
1334 gen_gvec_3(get_field(s
, v1
), get_field(s
, v2
),
1335 get_field(s
, v3
), &g
[es
]);
1339 static DisasJumpType
op_vcksm(DisasContext
*s
, DisasOps
*o
)
1341 TCGv_i32 tmp
= tcg_temp_new_i32();
1342 TCGv_i32 sum
= tcg_temp_new_i32();
1345 read_vec_element_i32(sum
, get_field(s
, v3
), 1, ES_32
);
1346 for (i
= 0; i
< 4; i
++) {
1347 read_vec_element_i32(tmp
, get_field(s
, v2
), i
, ES_32
);
1348 tcg_gen_add2_i32(tmp
, sum
, sum
, sum
, tmp
, tmp
);
1350 gen_gvec_dup_imm(ES_32
, get_field(s
, v1
), 0);
1351 write_vec_element_i32(sum
, get_field(s
, v1
), 1, ES_32
);
1353 tcg_temp_free_i32(tmp
);
1354 tcg_temp_free_i32(sum
);
1358 static DisasJumpType
op_vec(DisasContext
*s
, DisasOps
*o
)
1360 uint8_t es
= get_field(s
, m3
);
1361 const uint8_t enr
= NUM_VEC_ELEMENTS(es
) / 2 - 1;
1364 gen_program_exception(s
, PGM_SPECIFICATION
);
1365 return DISAS_NORETURN
;
1367 if (s
->fields
.op2
== 0xdb) {
1371 o
->in1
= tcg_temp_new_i64();
1372 o
->in2
= tcg_temp_new_i64();
1373 read_vec_element_i64(o
->in1
, get_field(s
, v1
), enr
, es
);
1374 read_vec_element_i64(o
->in2
, get_field(s
, v2
), enr
, es
);
1378 static DisasJumpType
op_vc(DisasContext
*s
, DisasOps
*o
)
1380 const uint8_t es
= get_field(s
, m4
);
1381 TCGCond cond
= s
->insn
->data
;
1384 gen_program_exception(s
, PGM_SPECIFICATION
);
1385 return DISAS_NORETURN
;
1388 tcg_gen_gvec_cmp(cond
, es
,
1389 vec_full_reg_offset(get_field(s
, v1
)),
1390 vec_full_reg_offset(get_field(s
, v2
)),
1391 vec_full_reg_offset(get_field(s
, v3
)), 16, 16);
1392 if (get_field(s
, m5
) & 0x1) {
1393 TCGv_i64 low
= tcg_temp_new_i64();
1394 TCGv_i64 high
= tcg_temp_new_i64();
1396 read_vec_element_i64(high
, get_field(s
, v1
), 0, ES_64
);
1397 read_vec_element_i64(low
, get_field(s
, v1
), 1, ES_64
);
1398 gen_op_update2_cc_i64(s
, CC_OP_VC
, low
, high
);
1400 tcg_temp_free_i64(low
);
1401 tcg_temp_free_i64(high
);
1406 static void gen_clz_i32(TCGv_i32 d
, TCGv_i32 a
)
1408 tcg_gen_clzi_i32(d
, a
, 32);
1411 static void gen_clz_i64(TCGv_i64 d
, TCGv_i64 a
)
1413 tcg_gen_clzi_i64(d
, a
, 64);
1416 static DisasJumpType
op_vclz(DisasContext
*s
, DisasOps
*o
)
1418 const uint8_t es
= get_field(s
, m3
);
1419 static const GVecGen2 g
[4] = {
1420 { .fno
= gen_helper_gvec_vclz8
, },
1421 { .fno
= gen_helper_gvec_vclz16
, },
1422 { .fni4
= gen_clz_i32
, },
1423 { .fni8
= gen_clz_i64
, },
1427 gen_program_exception(s
, PGM_SPECIFICATION
);
1428 return DISAS_NORETURN
;
1430 gen_gvec_2(get_field(s
, v1
), get_field(s
, v2
), &g
[es
]);
1434 static void gen_ctz_i32(TCGv_i32 d
, TCGv_i32 a
)
1436 tcg_gen_ctzi_i32(d
, a
, 32);
1439 static void gen_ctz_i64(TCGv_i64 d
, TCGv_i64 a
)
1441 tcg_gen_ctzi_i64(d
, a
, 64);
1444 static DisasJumpType
op_vctz(DisasContext
*s
, DisasOps
*o
)
1446 const uint8_t es
= get_field(s
, m3
);
1447 static const GVecGen2 g
[4] = {
1448 { .fno
= gen_helper_gvec_vctz8
, },
1449 { .fno
= gen_helper_gvec_vctz16
, },
1450 { .fni4
= gen_ctz_i32
, },
1451 { .fni8
= gen_ctz_i64
, },
1455 gen_program_exception(s
, PGM_SPECIFICATION
);
1456 return DISAS_NORETURN
;
1458 gen_gvec_2(get_field(s
, v1
), get_field(s
, v2
), &g
[es
]);
1462 static DisasJumpType
op_vx(DisasContext
*s
, DisasOps
*o
)
1464 gen_gvec_fn_3(xor, ES_8
, get_field(s
, v1
), get_field(s
, v2
),
1469 static DisasJumpType
op_vgfm(DisasContext
*s
, DisasOps
*o
)
1471 const uint8_t es
= get_field(s
, m4
);
1472 static const GVecGen3 g
[4] = {
1473 { .fno
= gen_helper_gvec_vgfm8
, },
1474 { .fno
= gen_helper_gvec_vgfm16
, },
1475 { .fno
= gen_helper_gvec_vgfm32
, },
1476 { .fno
= gen_helper_gvec_vgfm64
, },
1480 gen_program_exception(s
, PGM_SPECIFICATION
);
1481 return DISAS_NORETURN
;
1483 gen_gvec_3(get_field(s
, v1
), get_field(s
, v2
),
1484 get_field(s
, v3
), &g
[es
]);
1488 static DisasJumpType
op_vgfma(DisasContext
*s
, DisasOps
*o
)
1490 const uint8_t es
= get_field(s
, m5
);
1491 static const GVecGen4 g
[4] = {
1492 { .fno
= gen_helper_gvec_vgfma8
, },
1493 { .fno
= gen_helper_gvec_vgfma16
, },
1494 { .fno
= gen_helper_gvec_vgfma32
, },
1495 { .fno
= gen_helper_gvec_vgfma64
, },
1499 gen_program_exception(s
, PGM_SPECIFICATION
);
1500 return DISAS_NORETURN
;
1502 gen_gvec_4(get_field(s
, v1
), get_field(s
, v2
),
1503 get_field(s
, v3
), get_field(s
, v4
), &g
[es
]);
1507 static DisasJumpType
op_vlc(DisasContext
*s
, DisasOps
*o
)
1509 const uint8_t es
= get_field(s
, m3
);
1512 gen_program_exception(s
, PGM_SPECIFICATION
);
1513 return DISAS_NORETURN
;
1516 gen_gvec_fn_2(neg
, es
, get_field(s
, v1
), get_field(s
, v2
));
1520 static DisasJumpType
op_vlp(DisasContext
*s
, DisasOps
*o
)
1522 const uint8_t es
= get_field(s
, m3
);
1525 gen_program_exception(s
, PGM_SPECIFICATION
);
1526 return DISAS_NORETURN
;
1529 gen_gvec_fn_2(abs
, es
, get_field(s
, v1
), get_field(s
, v2
));
1533 static DisasJumpType
op_vmx(DisasContext
*s
, DisasOps
*o
)
1535 const uint8_t v1
= get_field(s
, v1
);
1536 const uint8_t v2
= get_field(s
, v2
);
1537 const uint8_t v3
= get_field(s
, v3
);
1538 const uint8_t es
= get_field(s
, m4
);
1541 gen_program_exception(s
, PGM_SPECIFICATION
);
1542 return DISAS_NORETURN
;
1545 switch (s
->fields
.op2
) {
1547 gen_gvec_fn_3(smax
, es
, v1
, v2
, v3
);
1550 gen_gvec_fn_3(umax
, es
, v1
, v2
, v3
);
1553 gen_gvec_fn_3(smin
, es
, v1
, v2
, v3
);
1556 gen_gvec_fn_3(umin
, es
, v1
, v2
, v3
);
1559 g_assert_not_reached();
1564 static void gen_mal_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
, TCGv_i32 c
)
1566 TCGv_i32 t0
= tcg_temp_new_i32();
1568 tcg_gen_mul_i32(t0
, a
, b
);
1569 tcg_gen_add_i32(d
, t0
, c
);
1571 tcg_temp_free_i32(t0
);
1574 static void gen_mah_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
, TCGv_i32 c
)
1576 TCGv_i64 t0
= tcg_temp_new_i64();
1577 TCGv_i64 t1
= tcg_temp_new_i64();
1578 TCGv_i64 t2
= tcg_temp_new_i64();
1580 tcg_gen_ext_i32_i64(t0
, a
);
1581 tcg_gen_ext_i32_i64(t1
, b
);
1582 tcg_gen_ext_i32_i64(t2
, c
);
1583 tcg_gen_mul_i64(t0
, t0
, t1
);
1584 tcg_gen_add_i64(t0
, t0
, t2
);
1585 tcg_gen_extrh_i64_i32(d
, t0
);
1592 static void gen_malh_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
, TCGv_i32 c
)
1594 TCGv_i64 t0
= tcg_temp_new_i64();
1595 TCGv_i64 t1
= tcg_temp_new_i64();
1596 TCGv_i64 t2
= tcg_temp_new_i64();
1598 tcg_gen_extu_i32_i64(t0
, a
);
1599 tcg_gen_extu_i32_i64(t1
, b
);
1600 tcg_gen_extu_i32_i64(t2
, c
);
1601 tcg_gen_mul_i64(t0
, t0
, t1
);
1602 tcg_gen_add_i64(t0
, t0
, t2
);
1603 tcg_gen_extrh_i64_i32(d
, t0
);
1610 static DisasJumpType
op_vma(DisasContext
*s
, DisasOps
*o
)
1612 const uint8_t es
= get_field(s
, m5
);
1613 static const GVecGen4 g_vmal
[3] = {
1614 { .fno
= gen_helper_gvec_vmal8
, },
1615 { .fno
= gen_helper_gvec_vmal16
, },
1616 { .fni4
= gen_mal_i32
, },
1618 static const GVecGen4 g_vmah
[3] = {
1619 { .fno
= gen_helper_gvec_vmah8
, },
1620 { .fno
= gen_helper_gvec_vmah16
, },
1621 { .fni4
= gen_mah_i32
, },
1623 static const GVecGen4 g_vmalh
[3] = {
1624 { .fno
= gen_helper_gvec_vmalh8
, },
1625 { .fno
= gen_helper_gvec_vmalh16
, },
1626 { .fni4
= gen_malh_i32
, },
1628 static const GVecGen4 g_vmae
[3] = {
1629 { .fno
= gen_helper_gvec_vmae8
, },
1630 { .fno
= gen_helper_gvec_vmae16
, },
1631 { .fno
= gen_helper_gvec_vmae32
, },
1633 static const GVecGen4 g_vmale
[3] = {
1634 { .fno
= gen_helper_gvec_vmale8
, },
1635 { .fno
= gen_helper_gvec_vmale16
, },
1636 { .fno
= gen_helper_gvec_vmale32
, },
1638 static const GVecGen4 g_vmao
[3] = {
1639 { .fno
= gen_helper_gvec_vmao8
, },
1640 { .fno
= gen_helper_gvec_vmao16
, },
1641 { .fno
= gen_helper_gvec_vmao32
, },
1643 static const GVecGen4 g_vmalo
[3] = {
1644 { .fno
= gen_helper_gvec_vmalo8
, },
1645 { .fno
= gen_helper_gvec_vmalo16
, },
1646 { .fno
= gen_helper_gvec_vmalo32
, },
1651 gen_program_exception(s
, PGM_SPECIFICATION
);
1652 return DISAS_NORETURN
;
1655 switch (s
->fields
.op2
) {
1678 g_assert_not_reached();
1681 gen_gvec_4(get_field(s
, v1
), get_field(s
, v2
),
1682 get_field(s
, v3
), get_field(s
, v4
), fn
);
1686 static void gen_mh_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
1688 TCGv_i32 t
= tcg_temp_new_i32();
1690 tcg_gen_muls2_i32(t
, d
, a
, b
);
1691 tcg_temp_free_i32(t
);
1694 static void gen_mlh_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
1696 TCGv_i32 t
= tcg_temp_new_i32();
1698 tcg_gen_mulu2_i32(t
, d
, a
, b
);
1699 tcg_temp_free_i32(t
);
1702 static DisasJumpType
op_vm(DisasContext
*s
, DisasOps
*o
)
1704 const uint8_t es
= get_field(s
, m4
);
1705 static const GVecGen3 g_vmh
[3] = {
1706 { .fno
= gen_helper_gvec_vmh8
, },
1707 { .fno
= gen_helper_gvec_vmh16
, },
1708 { .fni4
= gen_mh_i32
, },
1710 static const GVecGen3 g_vmlh
[3] = {
1711 { .fno
= gen_helper_gvec_vmlh8
, },
1712 { .fno
= gen_helper_gvec_vmlh16
, },
1713 { .fni4
= gen_mlh_i32
, },
1715 static const GVecGen3 g_vme
[3] = {
1716 { .fno
= gen_helper_gvec_vme8
, },
1717 { .fno
= gen_helper_gvec_vme16
, },
1718 { .fno
= gen_helper_gvec_vme32
, },
1720 static const GVecGen3 g_vmle
[3] = {
1721 { .fno
= gen_helper_gvec_vmle8
, },
1722 { .fno
= gen_helper_gvec_vmle16
, },
1723 { .fno
= gen_helper_gvec_vmle32
, },
1725 static const GVecGen3 g_vmo
[3] = {
1726 { .fno
= gen_helper_gvec_vmo8
, },
1727 { .fno
= gen_helper_gvec_vmo16
, },
1728 { .fno
= gen_helper_gvec_vmo32
, },
1730 static const GVecGen3 g_vmlo
[3] = {
1731 { .fno
= gen_helper_gvec_vmlo8
, },
1732 { .fno
= gen_helper_gvec_vmlo16
, },
1733 { .fno
= gen_helper_gvec_vmlo32
, },
1738 gen_program_exception(s
, PGM_SPECIFICATION
);
1739 return DISAS_NORETURN
;
1742 switch (s
->fields
.op2
) {
1744 gen_gvec_fn_3(mul
, es
, get_field(s
, v1
),
1745 get_field(s
, v2
), get_field(s
, v3
));
1766 g_assert_not_reached();
1769 gen_gvec_3(get_field(s
, v1
), get_field(s
, v2
),
1770 get_field(s
, v3
), fn
);
1774 static DisasJumpType
op_vnn(DisasContext
*s
, DisasOps
*o
)
1776 gen_gvec_fn_3(nand
, ES_8
, get_field(s
, v1
),
1777 get_field(s
, v2
), get_field(s
, v3
));
1781 static DisasJumpType
op_vno(DisasContext
*s
, DisasOps
*o
)
1783 gen_gvec_fn_3(nor
, ES_8
, get_field(s
, v1
), get_field(s
, v2
),
1788 static DisasJumpType
op_vnx(DisasContext
*s
, DisasOps
*o
)
1790 gen_gvec_fn_3(eqv
, ES_8
, get_field(s
, v1
), get_field(s
, v2
),
1795 static DisasJumpType
op_vo(DisasContext
*s
, DisasOps
*o
)
1797 gen_gvec_fn_3(or, ES_8
, get_field(s
, v1
), get_field(s
, v2
),
1802 static DisasJumpType
op_voc(DisasContext
*s
, DisasOps
*o
)
1804 gen_gvec_fn_3(orc
, ES_8
, get_field(s
, v1
), get_field(s
, v2
),
1809 static DisasJumpType
op_vpopct(DisasContext
*s
, DisasOps
*o
)
1811 const uint8_t es
= get_field(s
, m3
);
1812 static const GVecGen2 g
[4] = {
1813 { .fno
= gen_helper_gvec_vpopct8
, },
1814 { .fno
= gen_helper_gvec_vpopct16
, },
1815 { .fni4
= tcg_gen_ctpop_i32
, },
1816 { .fni8
= tcg_gen_ctpop_i64
, },
1819 if (es
> ES_64
|| (es
!= ES_8
&& !s390_has_feat(S390_FEAT_VECTOR_ENH
))) {
1820 gen_program_exception(s
, PGM_SPECIFICATION
);
1821 return DISAS_NORETURN
;
1824 gen_gvec_2(get_field(s
, v1
), get_field(s
, v2
), &g
[es
]);
1828 static void gen_rim_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
, int32_t c
)
1830 TCGv_i32 t
= tcg_temp_new_i32();
1832 tcg_gen_rotli_i32(t
, a
, c
& 31);
1833 tcg_gen_and_i32(t
, t
, b
);
1834 tcg_gen_andc_i32(d
, d
, b
);
1835 tcg_gen_or_i32(d
, d
, t
);
1837 tcg_temp_free_i32(t
);
1840 static void gen_rim_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
, int64_t c
)
1842 TCGv_i64 t
= tcg_temp_new_i64();
1844 tcg_gen_rotli_i64(t
, a
, c
& 63);
1845 tcg_gen_and_i64(t
, t
, b
);
1846 tcg_gen_andc_i64(d
, d
, b
);
1847 tcg_gen_or_i64(d
, d
, t
);
1849 tcg_temp_free_i64(t
);
1852 static DisasJumpType
op_verim(DisasContext
*s
, DisasOps
*o
)
1854 const uint8_t es
= get_field(s
, m5
);
1855 const uint8_t i4
= get_field(s
, i4
) &
1856 (NUM_VEC_ELEMENT_BITS(es
) - 1);
1857 static const GVecGen3i g
[4] = {
1858 { .fno
= gen_helper_gvec_verim8
, },
1859 { .fno
= gen_helper_gvec_verim16
, },
1860 { .fni4
= gen_rim_i32
,
1861 .load_dest
= true, },
1862 { .fni8
= gen_rim_i64
,
1863 .load_dest
= true, },
1867 gen_program_exception(s
, PGM_SPECIFICATION
);
1868 return DISAS_NORETURN
;
1871 gen_gvec_3i(get_field(s
, v1
), get_field(s
, v2
),
1872 get_field(s
, v3
), i4
, &g
[es
]);
1876 static DisasJumpType
op_vesv(DisasContext
*s
, DisasOps
*o
)
1878 const uint8_t es
= get_field(s
, m4
);
1879 const uint8_t v1
= get_field(s
, v1
);
1880 const uint8_t v2
= get_field(s
, v2
);
1881 const uint8_t v3
= get_field(s
, v3
);
1884 gen_program_exception(s
, PGM_SPECIFICATION
);
1885 return DISAS_NORETURN
;
1888 switch (s
->fields
.op2
) {
1890 gen_gvec_fn_3(shlv
, es
, v1
, v2
, v3
);
1893 gen_gvec_fn_3(rotlv
, es
, v1
, v2
, v3
);
1896 gen_gvec_fn_3(sarv
, es
, v1
, v2
, v3
);
1899 gen_gvec_fn_3(shrv
, es
, v1
, v2
, v3
);
1902 g_assert_not_reached();
1907 static DisasJumpType
op_ves(DisasContext
*s
, DisasOps
*o
)
1909 const uint8_t es
= get_field(s
, m4
);
1910 const uint8_t d2
= get_field(s
, d2
) &
1911 (NUM_VEC_ELEMENT_BITS(es
) - 1);
1912 const uint8_t v1
= get_field(s
, v1
);
1913 const uint8_t v3
= get_field(s
, v3
);
1917 gen_program_exception(s
, PGM_SPECIFICATION
);
1918 return DISAS_NORETURN
;
1921 if (likely(!get_field(s
, b2
))) {
1922 switch (s
->fields
.op2
) {
1924 gen_gvec_fn_2i(shli
, es
, v1
, v3
, d2
);
1927 gen_gvec_fn_2i(rotli
, es
, v1
, v3
, d2
);
1930 gen_gvec_fn_2i(sari
, es
, v1
, v3
, d2
);
1933 gen_gvec_fn_2i(shri
, es
, v1
, v3
, d2
);
1936 g_assert_not_reached();
1939 shift
= tcg_temp_new_i32();
1940 tcg_gen_extrl_i64_i32(shift
, o
->addr1
);
1941 tcg_gen_andi_i32(shift
, shift
, NUM_VEC_ELEMENT_BITS(es
) - 1);
1942 switch (s
->fields
.op2
) {
1944 gen_gvec_fn_2s(shls
, es
, v1
, v3
, shift
);
1947 gen_gvec_fn_2s(rotls
, es
, v1
, v3
, shift
);
1950 gen_gvec_fn_2s(sars
, es
, v1
, v3
, shift
);
1953 gen_gvec_fn_2s(shrs
, es
, v1
, v3
, shift
);
1956 g_assert_not_reached();
1958 tcg_temp_free_i32(shift
);
1963 static DisasJumpType
op_vsl(DisasContext
*s
, DisasOps
*o
)
1965 TCGv_i64 shift
= tcg_temp_new_i64();
1967 read_vec_element_i64(shift
, get_field(s
, v3
), 7, ES_8
);
1968 if (s
->fields
.op2
== 0x74) {
1969 tcg_gen_andi_i64(shift
, shift
, 0x7);
1971 tcg_gen_andi_i64(shift
, shift
, 0x78);
1974 gen_gvec_2i_ool(get_field(s
, v1
), get_field(s
, v2
),
1975 shift
, 0, gen_helper_gvec_vsl
);
1976 tcg_temp_free_i64(shift
);
1980 static DisasJumpType
op_vsldb(DisasContext
*s
, DisasOps
*o
)
1982 const uint8_t i4
= get_field(s
, i4
) & 0xf;
1983 const int left_shift
= (i4
& 7) * 8;
1984 const int right_shift
= 64 - left_shift
;
1985 TCGv_i64 t0
= tcg_temp_new_i64();
1986 TCGv_i64 t1
= tcg_temp_new_i64();
1987 TCGv_i64 t2
= tcg_temp_new_i64();
1989 if ((i4
& 8) == 0) {
1990 read_vec_element_i64(t0
, get_field(s
, v2
), 0, ES_64
);
1991 read_vec_element_i64(t1
, get_field(s
, v2
), 1, ES_64
);
1992 read_vec_element_i64(t2
, get_field(s
, v3
), 0, ES_64
);
1994 read_vec_element_i64(t0
, get_field(s
, v2
), 1, ES_64
);
1995 read_vec_element_i64(t1
, get_field(s
, v3
), 0, ES_64
);
1996 read_vec_element_i64(t2
, get_field(s
, v3
), 1, ES_64
);
1998 tcg_gen_extract2_i64(t0
, t1
, t0
, right_shift
);
1999 tcg_gen_extract2_i64(t1
, t2
, t1
, right_shift
);
2000 write_vec_element_i64(t0
, get_field(s
, v1
), 0, ES_64
);
2001 write_vec_element_i64(t1
, get_field(s
, v1
), 1, ES_64
);
2009 static DisasJumpType
op_vsra(DisasContext
*s
, DisasOps
*o
)
2011 TCGv_i64 shift
= tcg_temp_new_i64();
2013 read_vec_element_i64(shift
, get_field(s
, v3
), 7, ES_8
);
2014 if (s
->fields
.op2
== 0x7e) {
2015 tcg_gen_andi_i64(shift
, shift
, 0x7);
2017 tcg_gen_andi_i64(shift
, shift
, 0x78);
2020 gen_gvec_2i_ool(get_field(s
, v1
), get_field(s
, v2
),
2021 shift
, 0, gen_helper_gvec_vsra
);
2022 tcg_temp_free_i64(shift
);
2026 static DisasJumpType
op_vsrl(DisasContext
*s
, DisasOps
*o
)
2028 TCGv_i64 shift
= tcg_temp_new_i64();
2030 read_vec_element_i64(shift
, get_field(s
, v3
), 7, ES_8
);
2031 if (s
->fields
.op2
== 0x7c) {
2032 tcg_gen_andi_i64(shift
, shift
, 0x7);
2034 tcg_gen_andi_i64(shift
, shift
, 0x78);
2037 gen_gvec_2i_ool(get_field(s
, v1
), get_field(s
, v2
),
2038 shift
, 0, gen_helper_gvec_vsrl
);
2039 tcg_temp_free_i64(shift
);
2043 static DisasJumpType
op_vs(DisasContext
*s
, DisasOps
*o
)
2045 const uint8_t es
= get_field(s
, m4
);
2048 gen_program_exception(s
, PGM_SPECIFICATION
);
2049 return DISAS_NORETURN
;
2050 } else if (es
== ES_128
) {
2051 gen_gvec128_3_i64(tcg_gen_sub2_i64
, get_field(s
, v1
),
2052 get_field(s
, v2
), get_field(s
, v3
));
2055 gen_gvec_fn_3(sub
, es
, get_field(s
, v1
), get_field(s
, v2
),
2060 static void gen_scbi_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
2062 tcg_gen_setcond_i32(TCG_COND_GEU
, d
, a
, b
);
2065 static void gen_scbi_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
2067 tcg_gen_setcond_i64(TCG_COND_GEU
, d
, a
, b
);
2070 static void gen_scbi2_i64(TCGv_i64 dl
, TCGv_i64 dh
, TCGv_i64 al
,
2071 TCGv_i64 ah
, TCGv_i64 bl
, TCGv_i64 bh
)
2073 TCGv_i64 th
= tcg_temp_new_i64();
2074 TCGv_i64 tl
= tcg_temp_new_i64();
2075 TCGv_i64 zero
= tcg_const_i64(0);
2077 tcg_gen_sub2_i64(tl
, th
, al
, zero
, bl
, zero
);
2078 tcg_gen_andi_i64(th
, th
, 1);
2079 tcg_gen_sub2_i64(tl
, th
, ah
, zero
, th
, zero
);
2080 tcg_gen_sub2_i64(tl
, th
, tl
, th
, bh
, zero
);
2081 /* "invert" the result: -1 -> 0; 0 -> 1 */
2082 tcg_gen_addi_i64(dl
, th
, 1);
2083 tcg_gen_mov_i64(dh
, zero
);
2085 tcg_temp_free_i64(th
);
2086 tcg_temp_free_i64(tl
);
2087 tcg_temp_free_i64(zero
);
2090 static DisasJumpType
op_vscbi(DisasContext
*s
, DisasOps
*o
)
2092 const uint8_t es
= get_field(s
, m4
);
2093 static const GVecGen3 g
[4] = {
2094 { .fno
= gen_helper_gvec_vscbi8
, },
2095 { .fno
= gen_helper_gvec_vscbi16
, },
2096 { .fni4
= gen_scbi_i32
, },
2097 { .fni8
= gen_scbi_i64
, },
2101 gen_program_exception(s
, PGM_SPECIFICATION
);
2102 return DISAS_NORETURN
;
2103 } else if (es
== ES_128
) {
2104 gen_gvec128_3_i64(gen_scbi2_i64
, get_field(s
, v1
),
2105 get_field(s
, v2
), get_field(s
, v3
));
2108 gen_gvec_3(get_field(s
, v1
), get_field(s
, v2
),
2109 get_field(s
, v3
), &g
[es
]);
2113 static void gen_sbi2_i64(TCGv_i64 dl
, TCGv_i64 dh
, TCGv_i64 al
, TCGv_i64 ah
,
2114 TCGv_i64 bl
, TCGv_i64 bh
, TCGv_i64 cl
, TCGv_i64 ch
)
2116 TCGv_i64 tl
= tcg_temp_new_i64();
2117 TCGv_i64 th
= tcg_temp_new_i64();
2119 tcg_gen_not_i64(tl
, bl
);
2120 tcg_gen_not_i64(th
, bh
);
2121 gen_ac2_i64(dl
, dh
, al
, ah
, tl
, th
, cl
, ch
);
2122 tcg_temp_free_i64(tl
);
2123 tcg_temp_free_i64(th
);
2126 static DisasJumpType
op_vsbi(DisasContext
*s
, DisasOps
*o
)
2128 if (get_field(s
, m5
) != ES_128
) {
2129 gen_program_exception(s
, PGM_SPECIFICATION
);
2130 return DISAS_NORETURN
;
2133 gen_gvec128_4_i64(gen_sbi2_i64
, get_field(s
, v1
),
2134 get_field(s
, v2
), get_field(s
, v3
),
2139 static void gen_sbcbi2_i64(TCGv_i64 dl
, TCGv_i64 dh
, TCGv_i64 al
, TCGv_i64 ah
,
2140 TCGv_i64 bl
, TCGv_i64 bh
, TCGv_i64 cl
, TCGv_i64 ch
)
2142 TCGv_i64 th
= tcg_temp_new_i64();
2143 TCGv_i64 tl
= tcg_temp_new_i64();
2145 tcg_gen_not_i64(tl
, bl
);
2146 tcg_gen_not_i64(th
, bh
);
2147 gen_accc2_i64(dl
, dh
, al
, ah
, tl
, th
, cl
, ch
);
2149 tcg_temp_free_i64(tl
);
2150 tcg_temp_free_i64(th
);
2153 static DisasJumpType
op_vsbcbi(DisasContext
*s
, DisasOps
*o
)
2155 if (get_field(s
, m5
) != ES_128
) {
2156 gen_program_exception(s
, PGM_SPECIFICATION
);
2157 return DISAS_NORETURN
;
2160 gen_gvec128_4_i64(gen_sbcbi2_i64
, get_field(s
, v1
),
2161 get_field(s
, v2
), get_field(s
, v3
),
2166 static DisasJumpType
op_vsumg(DisasContext
*s
, DisasOps
*o
)
2168 const uint8_t es
= get_field(s
, m4
);
2172 if (es
== ES_8
|| es
> ES_32
) {
2173 gen_program_exception(s
, PGM_SPECIFICATION
);
2174 return DISAS_NORETURN
;
2177 sum
= tcg_temp_new_i64();
2178 tmp
= tcg_temp_new_i64();
2179 for (dst_idx
= 0; dst_idx
< 2; dst_idx
++) {
2180 uint8_t idx
= dst_idx
* NUM_VEC_ELEMENTS(es
) / 2;
2181 const uint8_t max_idx
= idx
+ NUM_VEC_ELEMENTS(es
) / 2 - 1;
2183 read_vec_element_i64(sum
, get_field(s
, v3
), max_idx
, es
);
2184 for (; idx
<= max_idx
; idx
++) {
2185 read_vec_element_i64(tmp
, get_field(s
, v2
), idx
, es
);
2186 tcg_gen_add_i64(sum
, sum
, tmp
);
2188 write_vec_element_i64(sum
, get_field(s
, v1
), dst_idx
, ES_64
);
2190 tcg_temp_free_i64(sum
);
2191 tcg_temp_free_i64(tmp
);
2195 static DisasJumpType
op_vsumq(DisasContext
*s
, DisasOps
*o
)
2197 const uint8_t es
= get_field(s
, m4
);
2198 const uint8_t max_idx
= NUM_VEC_ELEMENTS(es
) - 1;
2199 TCGv_i64 sumh
, suml
, zero
, tmpl
;
2202 if (es
< ES_32
|| es
> ES_64
) {
2203 gen_program_exception(s
, PGM_SPECIFICATION
);
2204 return DISAS_NORETURN
;
2207 sumh
= tcg_const_i64(0);
2208 suml
= tcg_temp_new_i64();
2209 zero
= tcg_const_i64(0);
2210 tmpl
= tcg_temp_new_i64();
2212 read_vec_element_i64(suml
, get_field(s
, v3
), max_idx
, es
);
2213 for (idx
= 0; idx
<= max_idx
; idx
++) {
2214 read_vec_element_i64(tmpl
, get_field(s
, v2
), idx
, es
);
2215 tcg_gen_add2_i64(suml
, sumh
, suml
, sumh
, tmpl
, zero
);
2217 write_vec_element_i64(sumh
, get_field(s
, v1
), 0, ES_64
);
2218 write_vec_element_i64(suml
, get_field(s
, v1
), 1, ES_64
);
2220 tcg_temp_free_i64(sumh
);
2221 tcg_temp_free_i64(suml
);
2222 tcg_temp_free_i64(zero
);
2223 tcg_temp_free_i64(tmpl
);
2227 static DisasJumpType
op_vsum(DisasContext
*s
, DisasOps
*o
)
2229 const uint8_t es
= get_field(s
, m4
);
2234 gen_program_exception(s
, PGM_SPECIFICATION
);
2235 return DISAS_NORETURN
;
2238 sum
= tcg_temp_new_i32();
2239 tmp
= tcg_temp_new_i32();
2240 for (dst_idx
= 0; dst_idx
< 4; dst_idx
++) {
2241 uint8_t idx
= dst_idx
* NUM_VEC_ELEMENTS(es
) / 4;
2242 const uint8_t max_idx
= idx
+ NUM_VEC_ELEMENTS(es
) / 4 - 1;
2244 read_vec_element_i32(sum
, get_field(s
, v3
), max_idx
, es
);
2245 for (; idx
<= max_idx
; idx
++) {
2246 read_vec_element_i32(tmp
, get_field(s
, v2
), idx
, es
);
2247 tcg_gen_add_i32(sum
, sum
, tmp
);
2249 write_vec_element_i32(sum
, get_field(s
, v1
), dst_idx
, ES_32
);
2251 tcg_temp_free_i32(sum
);
2252 tcg_temp_free_i32(tmp
);
2256 static DisasJumpType
op_vtm(DisasContext
*s
, DisasOps
*o
)
2258 gen_gvec_2_ptr(get_field(s
, v1
), get_field(s
, v2
),
2259 cpu_env
, 0, gen_helper_gvec_vtm
);
2264 static DisasJumpType
op_vfae(DisasContext
*s
, DisasOps
*o
)
2266 const uint8_t es
= get_field(s
, m4
);
2267 const uint8_t m5
= get_field(s
, m5
);
2268 static gen_helper_gvec_3
* const g
[3] = {
2269 gen_helper_gvec_vfae8
,
2270 gen_helper_gvec_vfae16
,
2271 gen_helper_gvec_vfae32
,
2273 static gen_helper_gvec_3_ptr
* const g_cc
[3] = {
2274 gen_helper_gvec_vfae_cc8
,
2275 gen_helper_gvec_vfae_cc16
,
2276 gen_helper_gvec_vfae_cc32
,
2279 gen_program_exception(s
, PGM_SPECIFICATION
);
2280 return DISAS_NORETURN
;
2283 if (extract32(m5
, 0, 1)) {
2284 gen_gvec_3_ptr(get_field(s
, v1
), get_field(s
, v2
),
2285 get_field(s
, v3
), cpu_env
, m5
, g_cc
[es
]);
2288 gen_gvec_3_ool(get_field(s
, v1
), get_field(s
, v2
),
2289 get_field(s
, v3
), m5
, g
[es
]);
2294 static DisasJumpType
op_vfee(DisasContext
*s
, DisasOps
*o
)
2296 const uint8_t es
= get_field(s
, m4
);
2297 const uint8_t m5
= get_field(s
, m5
);
2298 static gen_helper_gvec_3
* const g
[3] = {
2299 gen_helper_gvec_vfee8
,
2300 gen_helper_gvec_vfee16
,
2301 gen_helper_gvec_vfee32
,
2303 static gen_helper_gvec_3_ptr
* const g_cc
[3] = {
2304 gen_helper_gvec_vfee_cc8
,
2305 gen_helper_gvec_vfee_cc16
,
2306 gen_helper_gvec_vfee_cc32
,
2309 if (es
> ES_32
|| m5
& ~0x3) {
2310 gen_program_exception(s
, PGM_SPECIFICATION
);
2311 return DISAS_NORETURN
;
2314 if (extract32(m5
, 0, 1)) {
2315 gen_gvec_3_ptr(get_field(s
, v1
), get_field(s
, v2
),
2316 get_field(s
, v3
), cpu_env
, m5
, g_cc
[es
]);
2319 gen_gvec_3_ool(get_field(s
, v1
), get_field(s
, v2
),
2320 get_field(s
, v3
), m5
, g
[es
]);
2325 static DisasJumpType
op_vfene(DisasContext
*s
, DisasOps
*o
)
2327 const uint8_t es
= get_field(s
, m4
);
2328 const uint8_t m5
= get_field(s
, m5
);
2329 static gen_helper_gvec_3
* const g
[3] = {
2330 gen_helper_gvec_vfene8
,
2331 gen_helper_gvec_vfene16
,
2332 gen_helper_gvec_vfene32
,
2334 static gen_helper_gvec_3_ptr
* const g_cc
[3] = {
2335 gen_helper_gvec_vfene_cc8
,
2336 gen_helper_gvec_vfene_cc16
,
2337 gen_helper_gvec_vfene_cc32
,
2340 if (es
> ES_32
|| m5
& ~0x3) {
2341 gen_program_exception(s
, PGM_SPECIFICATION
);
2342 return DISAS_NORETURN
;
2345 if (extract32(m5
, 0, 1)) {
2346 gen_gvec_3_ptr(get_field(s
, v1
), get_field(s
, v2
),
2347 get_field(s
, v3
), cpu_env
, m5
, g_cc
[es
]);
2350 gen_gvec_3_ool(get_field(s
, v1
), get_field(s
, v2
),
2351 get_field(s
, v3
), m5
, g
[es
]);
2356 static DisasJumpType
op_vistr(DisasContext
*s
, DisasOps
*o
)
2358 const uint8_t es
= get_field(s
, m4
);
2359 const uint8_t m5
= get_field(s
, m5
);
2360 static gen_helper_gvec_2
* const g
[3] = {
2361 gen_helper_gvec_vistr8
,
2362 gen_helper_gvec_vistr16
,
2363 gen_helper_gvec_vistr32
,
2365 static gen_helper_gvec_2_ptr
* const g_cc
[3] = {
2366 gen_helper_gvec_vistr_cc8
,
2367 gen_helper_gvec_vistr_cc16
,
2368 gen_helper_gvec_vistr_cc32
,
2371 if (es
> ES_32
|| m5
& ~0x1) {
2372 gen_program_exception(s
, PGM_SPECIFICATION
);
2373 return DISAS_NORETURN
;
2376 if (extract32(m5
, 0, 1)) {
2377 gen_gvec_2_ptr(get_field(s
, v1
), get_field(s
, v2
),
2378 cpu_env
, 0, g_cc
[es
]);
2381 gen_gvec_2_ool(get_field(s
, v1
), get_field(s
, v2
), 0,
2387 static DisasJumpType
op_vstrc(DisasContext
*s
, DisasOps
*o
)
2389 const uint8_t es
= get_field(s
, m5
);
2390 const uint8_t m6
= get_field(s
, m6
);
2391 static gen_helper_gvec_4
* const g
[3] = {
2392 gen_helper_gvec_vstrc8
,
2393 gen_helper_gvec_vstrc16
,
2394 gen_helper_gvec_vstrc32
,
2396 static gen_helper_gvec_4
* const g_rt
[3] = {
2397 gen_helper_gvec_vstrc_rt8
,
2398 gen_helper_gvec_vstrc_rt16
,
2399 gen_helper_gvec_vstrc_rt32
,
2401 static gen_helper_gvec_4_ptr
* const g_cc
[3] = {
2402 gen_helper_gvec_vstrc_cc8
,
2403 gen_helper_gvec_vstrc_cc16
,
2404 gen_helper_gvec_vstrc_cc32
,
2406 static gen_helper_gvec_4_ptr
* const g_cc_rt
[3] = {
2407 gen_helper_gvec_vstrc_cc_rt8
,
2408 gen_helper_gvec_vstrc_cc_rt16
,
2409 gen_helper_gvec_vstrc_cc_rt32
,
2413 gen_program_exception(s
, PGM_SPECIFICATION
);
2414 return DISAS_NORETURN
;
2417 if (extract32(m6
, 0, 1)) {
2418 if (extract32(m6
, 2, 1)) {
2419 gen_gvec_4_ptr(get_field(s
, v1
), get_field(s
, v2
),
2420 get_field(s
, v3
), get_field(s
, v4
),
2421 cpu_env
, m6
, g_cc_rt
[es
]);
2423 gen_gvec_4_ptr(get_field(s
, v1
), get_field(s
, v2
),
2424 get_field(s
, v3
), get_field(s
, v4
),
2425 cpu_env
, m6
, g_cc
[es
]);
2429 if (extract32(m6
, 2, 1)) {
2430 gen_gvec_4_ool(get_field(s
, v1
), get_field(s
, v2
),
2431 get_field(s
, v3
), get_field(s
, v4
),
2434 gen_gvec_4_ool(get_field(s
, v1
), get_field(s
, v2
),
2435 get_field(s
, v3
), get_field(s
, v4
),
2442 static DisasJumpType
op_vfa(DisasContext
*s
, DisasOps
*o
)
2444 const uint8_t fpf
= get_field(s
, m4
);
2445 const uint8_t m5
= get_field(s
, m5
);
2446 const bool se
= extract32(m5
, 3, 1);
2447 gen_helper_gvec_3_ptr
*fn
;
2449 if (fpf
!= FPF_LONG
|| extract32(m5
, 0, 3)) {
2450 gen_program_exception(s
, PGM_SPECIFICATION
);
2451 return DISAS_NORETURN
;
2454 switch (s
->fields
.op2
) {
2456 fn
= se
? gen_helper_gvec_vfa64s
: gen_helper_gvec_vfa64
;
2459 fn
= se
? gen_helper_gvec_vfd64s
: gen_helper_gvec_vfd64
;
2462 fn
= se
? gen_helper_gvec_vfm64s
: gen_helper_gvec_vfm64
;
2465 fn
= se
? gen_helper_gvec_vfs64s
: gen_helper_gvec_vfs64
;
2468 g_assert_not_reached();
2470 gen_gvec_3_ptr(get_field(s
, v1
), get_field(s
, v2
),
2471 get_field(s
, v3
), cpu_env
, 0, fn
);
2475 static DisasJumpType
op_wfc(DisasContext
*s
, DisasOps
*o
)
2477 const uint8_t fpf
= get_field(s
, m3
);
2478 const uint8_t m4
= get_field(s
, m4
);
2480 if (fpf
!= FPF_LONG
|| m4
) {
2481 gen_program_exception(s
, PGM_SPECIFICATION
);
2482 return DISAS_NORETURN
;
2485 if (s
->fields
.op2
== 0xcb) {
2486 gen_gvec_2_ptr(get_field(s
, v1
), get_field(s
, v2
),
2487 cpu_env
, 0, gen_helper_gvec_wfc64
);
2489 gen_gvec_2_ptr(get_field(s
, v1
), get_field(s
, v2
),
2490 cpu_env
, 0, gen_helper_gvec_wfk64
);
2496 static DisasJumpType
op_vfc(DisasContext
*s
, DisasOps
*o
)
2498 const uint8_t fpf
= get_field(s
, m4
);
2499 const uint8_t m5
= get_field(s
, m5
);
2500 const uint8_t m6
= get_field(s
, m6
);
2501 const bool se
= extract32(m5
, 3, 1);
2502 const bool cs
= extract32(m6
, 0, 1);
2503 gen_helper_gvec_3_ptr
*fn
;
2505 if (fpf
!= FPF_LONG
|| extract32(m5
, 0, 3) || extract32(m6
, 1, 3)) {
2506 gen_program_exception(s
, PGM_SPECIFICATION
);
2507 return DISAS_NORETURN
;
2511 switch (s
->fields
.op2
) {
2513 fn
= se
? gen_helper_gvec_vfce64s_cc
: gen_helper_gvec_vfce64_cc
;
2516 fn
= se
? gen_helper_gvec_vfch64s_cc
: gen_helper_gvec_vfch64_cc
;
2519 fn
= se
? gen_helper_gvec_vfche64s_cc
: gen_helper_gvec_vfche64_cc
;
2522 g_assert_not_reached();
2525 switch (s
->fields
.op2
) {
2527 fn
= se
? gen_helper_gvec_vfce64s
: gen_helper_gvec_vfce64
;
2530 fn
= se
? gen_helper_gvec_vfch64s
: gen_helper_gvec_vfch64
;
2533 fn
= se
? gen_helper_gvec_vfche64s
: gen_helper_gvec_vfche64
;
2536 g_assert_not_reached();
2539 gen_gvec_3_ptr(get_field(s
, v1
), get_field(s
, v2
),
2540 get_field(s
, v3
), cpu_env
, 0, fn
);
2547 static DisasJumpType
op_vcdg(DisasContext
*s
, DisasOps
*o
)
2549 const uint8_t fpf
= get_field(s
, m3
);
2550 const uint8_t m4
= get_field(s
, m4
);
2551 const uint8_t erm
= get_field(s
, m5
);
2552 const bool se
= extract32(m4
, 3, 1);
2553 gen_helper_gvec_2_ptr
*fn
;
2555 if (fpf
!= FPF_LONG
|| extract32(m4
, 0, 2) || erm
> 7 || erm
== 2) {
2556 gen_program_exception(s
, PGM_SPECIFICATION
);
2557 return DISAS_NORETURN
;
2560 switch (s
->fields
.op2
) {
2562 fn
= se
? gen_helper_gvec_vcdg64s
: gen_helper_gvec_vcdg64
;
2565 fn
= se
? gen_helper_gvec_vcdlg64s
: gen_helper_gvec_vcdlg64
;
2568 fn
= se
? gen_helper_gvec_vcgd64s
: gen_helper_gvec_vcgd64
;
2571 fn
= se
? gen_helper_gvec_vclgd64s
: gen_helper_gvec_vclgd64
;
2574 fn
= se
? gen_helper_gvec_vfi64s
: gen_helper_gvec_vfi64
;
2577 fn
= se
? gen_helper_gvec_vflr64s
: gen_helper_gvec_vflr64
;
2580 g_assert_not_reached();
2582 gen_gvec_2_ptr(get_field(s
, v1
), get_field(s
, v2
), cpu_env
,
2583 deposit32(m4
, 4, 4, erm
), fn
);
2587 static DisasJumpType
op_vfll(DisasContext
*s
, DisasOps
*o
)
2589 const uint8_t fpf
= get_field(s
, m3
);
2590 const uint8_t m4
= get_field(s
, m4
);
2591 gen_helper_gvec_2_ptr
*fn
= gen_helper_gvec_vfll32
;
2593 if (fpf
!= FPF_SHORT
|| extract32(m4
, 0, 3)) {
2594 gen_program_exception(s
, PGM_SPECIFICATION
);
2595 return DISAS_NORETURN
;
2598 if (extract32(m4
, 3, 1)) {
2599 fn
= gen_helper_gvec_vfll32s
;
2601 gen_gvec_2_ptr(get_field(s
, v1
), get_field(s
, v2
), cpu_env
,
2606 static DisasJumpType
op_vfma(DisasContext
*s
, DisasOps
*o
)
2608 const uint8_t m5
= get_field(s
, m5
);
2609 const uint8_t fpf
= get_field(s
, m6
);
2610 const bool se
= extract32(m5
, 3, 1);
2611 gen_helper_gvec_4_ptr
*fn
;
2613 if (fpf
!= FPF_LONG
|| extract32(m5
, 0, 3)) {
2614 gen_program_exception(s
, PGM_SPECIFICATION
);
2615 return DISAS_NORETURN
;
2618 if (s
->fields
.op2
== 0x8f) {
2619 fn
= se
? gen_helper_gvec_vfma64s
: gen_helper_gvec_vfma64
;
2621 fn
= se
? gen_helper_gvec_vfms64s
: gen_helper_gvec_vfms64
;
2623 gen_gvec_4_ptr(get_field(s
, v1
), get_field(s
, v2
),
2624 get_field(s
, v3
), get_field(s
, v4
), cpu_env
,
2629 static DisasJumpType
op_vfpso(DisasContext
*s
, DisasOps
*o
)
2631 const uint8_t v1
= get_field(s
, v1
);
2632 const uint8_t v2
= get_field(s
, v2
);
2633 const uint8_t fpf
= get_field(s
, m3
);
2634 const uint8_t m4
= get_field(s
, m4
);
2635 const uint8_t m5
= get_field(s
, m5
);
2638 if (fpf
!= FPF_LONG
|| extract32(m4
, 0, 3) || m5
> 2) {
2639 gen_program_exception(s
, PGM_SPECIFICATION
);
2640 return DISAS_NORETURN
;
2643 if (extract32(m4
, 3, 1)) {
2644 tmp
= tcg_temp_new_i64();
2645 read_vec_element_i64(tmp
, v2
, 0, ES_64
);
2648 /* sign bit is inverted (complement) */
2649 tcg_gen_xori_i64(tmp
, tmp
, 1ull << 63);
2652 /* sign bit is set to one (negative) */
2653 tcg_gen_ori_i64(tmp
, tmp
, 1ull << 63);
2656 /* sign bit is set to zero (positive) */
2657 tcg_gen_andi_i64(tmp
, tmp
, (1ull << 63) - 1);
2660 write_vec_element_i64(tmp
, v1
, 0, ES_64
);
2661 tcg_temp_free_i64(tmp
);
2665 /* sign bit is inverted (complement) */
2666 gen_gvec_fn_2i(xori
, ES_64
, v1
, v2
, 1ull << 63);
2669 /* sign bit is set to one (negative) */
2670 gen_gvec_fn_2i(ori
, ES_64
, v1
, v2
, 1ull << 63);
2673 /* sign bit is set to zero (positive) */
2674 gen_gvec_fn_2i(andi
, ES_64
, v1
, v2
, (1ull << 63) - 1);
2681 static DisasJumpType
op_vfsq(DisasContext
*s
, DisasOps
*o
)
2683 const uint8_t fpf
= get_field(s
, m3
);
2684 const uint8_t m4
= get_field(s
, m4
);
2685 gen_helper_gvec_2_ptr
*fn
= gen_helper_gvec_vfsq64
;
2687 if (fpf
!= FPF_LONG
|| extract32(m4
, 0, 3)) {
2688 gen_program_exception(s
, PGM_SPECIFICATION
);
2689 return DISAS_NORETURN
;
2692 if (extract32(m4
, 3, 1)) {
2693 fn
= gen_helper_gvec_vfsq64s
;
2695 gen_gvec_2_ptr(get_field(s
, v1
), get_field(s
, v2
), cpu_env
,
2700 static DisasJumpType
op_vftci(DisasContext
*s
, DisasOps
*o
)
2702 const uint16_t i3
= get_field(s
, i3
);
2703 const uint8_t fpf
= get_field(s
, m4
);
2704 const uint8_t m5
= get_field(s
, m5
);
2705 gen_helper_gvec_2_ptr
*fn
= gen_helper_gvec_vftci64
;
2707 if (fpf
!= FPF_LONG
|| extract32(m5
, 0, 3)) {
2708 gen_program_exception(s
, PGM_SPECIFICATION
);
2709 return DISAS_NORETURN
;
2712 if (extract32(m5
, 3, 1)) {
2713 fn
= gen_helper_gvec_vftci64s
;
2715 gen_gvec_2_ptr(get_field(s
, v1
), get_field(s
, v2
), cpu_env
, i3
, fn
);