2 * Tiny Code Generator for QEMU
4 * Copyright (c) 2018 Linaro, Inc.
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
21 #include "qemu-common.h"
27 /* Reduce the number of ifdefs below. This assumes that all uses of
28 TCGV_HIGH and TCGV_LOW are properly protected by a conditional that
29 the compiler can eliminate. */
30 #if TCG_TARGET_REG_BITS == 64
31 extern TCGv_i32
TCGV_LOW_link_error(TCGv_i64
);
32 extern TCGv_i32
TCGV_HIGH_link_error(TCGv_i64
);
33 #define TCGV_LOW TCGV_LOW_link_error
34 #define TCGV_HIGH TCGV_HIGH_link_error
38 * Vector optional opcode tracking.
39 * Except for the basic logical operations (and, or, xor), and
40 * data movement (mov, ld, st, dupi), many vector opcodes are
41 * optional and may not be supported on the host. Thank Intel
42 * for the irregularity in their instruction set.
44 * The gvec expanders allow custom vector operations to be composed,
45 * generally via the .fniv callback in the GVecGen* structures. At
46 * the same time, in deciding whether to use this hook we need to
47 * know if the host supports the required operations. This is
48 * presented as an array of opcodes, terminated by 0. Each opcode
49 * is assumed to be expanded with the given VECE.
51 * For debugging, we want to validate this array. Therefore, when
52 * tcg_ctx->vec_opt_opc is non-NULL, the tcg_gen_*_vec expanders
53 * will validate that their opcode is present in the list.
55 #ifdef CONFIG_DEBUG_TCG
56 void tcg_assert_listed_vecop(TCGOpcode op
)
58 const TCGOpcode
*p
= tcg_ctx
->vecop_list
;
65 g_assert_not_reached();
70 bool tcg_can_emit_vecop_list(const TCGOpcode
*list
,
71 TCGType type
, unsigned vece
)
77 for (; *list
; ++list
) {
78 TCGOpcode opc
= *list
;
80 #ifdef CONFIG_DEBUG_TCG
82 case INDEX_op_and_vec
:
84 case INDEX_op_xor_vec
:
85 case INDEX_op_mov_vec
:
86 case INDEX_op_dup_vec
:
87 case INDEX_op_dupi_vec
:
88 case INDEX_op_dup2_vec
:
91 /* These opcodes are mandatory and should not be listed. */
92 g_assert_not_reached();
98 if (tcg_can_emit_vec_op(opc
, type
, vece
)) {
103 * The opcode list is created by front ends based on what they
104 * actually invoke. We must mirror the logic in the routines
105 * below for generic expansions using other opcodes.
108 case INDEX_op_neg_vec
:
109 if (tcg_can_emit_vec_op(INDEX_op_sub_vec
, type
, vece
)) {
113 case INDEX_op_abs_vec
:
114 if (tcg_can_emit_vec_op(INDEX_op_sub_vec
, type
, vece
)
115 && (tcg_can_emit_vec_op(INDEX_op_smax_vec
, type
, vece
) > 0
116 || tcg_can_emit_vec_op(INDEX_op_sari_vec
, type
, vece
) > 0
117 || tcg_can_emit_vec_op(INDEX_op_cmp_vec
, type
, vece
))) {
129 void vec_gen_2(TCGOpcode opc
, TCGType type
, unsigned vece
, TCGArg r
, TCGArg a
)
131 TCGOp
*op
= tcg_emit_op(opc
);
132 TCGOP_VECL(op
) = type
- TCG_TYPE_V64
;
133 TCGOP_VECE(op
) = vece
;
138 void vec_gen_3(TCGOpcode opc
, TCGType type
, unsigned vece
,
139 TCGArg r
, TCGArg a
, TCGArg b
)
141 TCGOp
*op
= tcg_emit_op(opc
);
142 TCGOP_VECL(op
) = type
- TCG_TYPE_V64
;
143 TCGOP_VECE(op
) = vece
;
149 void vec_gen_4(TCGOpcode opc
, TCGType type
, unsigned vece
,
150 TCGArg r
, TCGArg a
, TCGArg b
, TCGArg c
)
152 TCGOp
*op
= tcg_emit_op(opc
);
153 TCGOP_VECL(op
) = type
- TCG_TYPE_V64
;
154 TCGOP_VECE(op
) = vece
;
161 static void vec_gen_op2(TCGOpcode opc
, unsigned vece
, TCGv_vec r
, TCGv_vec a
)
163 TCGTemp
*rt
= tcgv_vec_temp(r
);
164 TCGTemp
*at
= tcgv_vec_temp(a
);
165 TCGType type
= rt
->base_type
;
167 /* Must enough inputs for the output. */
168 tcg_debug_assert(at
->base_type
>= type
);
169 vec_gen_2(opc
, type
, vece
, temp_arg(rt
), temp_arg(at
));
172 static void vec_gen_op3(TCGOpcode opc
, unsigned vece
,
173 TCGv_vec r
, TCGv_vec a
, TCGv_vec b
)
175 TCGTemp
*rt
= tcgv_vec_temp(r
);
176 TCGTemp
*at
= tcgv_vec_temp(a
);
177 TCGTemp
*bt
= tcgv_vec_temp(b
);
178 TCGType type
= rt
->base_type
;
180 /* Must enough inputs for the output. */
181 tcg_debug_assert(at
->base_type
>= type
);
182 tcg_debug_assert(bt
->base_type
>= type
);
183 vec_gen_3(opc
, type
, vece
, temp_arg(rt
), temp_arg(at
), temp_arg(bt
));
186 void tcg_gen_mov_vec(TCGv_vec r
, TCGv_vec a
)
189 vec_gen_op2(INDEX_op_mov_vec
, 0, r
, a
);
193 #define MO_REG (TCG_TARGET_REG_BITS == 64 ? MO_64 : MO_32)
195 static void do_dupi_vec(TCGv_vec r
, unsigned vece
, TCGArg a
)
197 TCGTemp
*rt
= tcgv_vec_temp(r
);
198 vec_gen_2(INDEX_op_dupi_vec
, rt
->base_type
, vece
, temp_arg(rt
), a
);
201 TCGv_vec
tcg_const_zeros_vec(TCGType type
)
203 TCGv_vec ret
= tcg_temp_new_vec(type
);
204 do_dupi_vec(ret
, MO_REG
, 0);
208 TCGv_vec
tcg_const_ones_vec(TCGType type
)
210 TCGv_vec ret
= tcg_temp_new_vec(type
);
211 do_dupi_vec(ret
, MO_REG
, -1);
215 TCGv_vec
tcg_const_zeros_vec_matching(TCGv_vec m
)
217 TCGTemp
*t
= tcgv_vec_temp(m
);
218 return tcg_const_zeros_vec(t
->base_type
);
221 TCGv_vec
tcg_const_ones_vec_matching(TCGv_vec m
)
223 TCGTemp
*t
= tcgv_vec_temp(m
);
224 return tcg_const_ones_vec(t
->base_type
);
227 void tcg_gen_dup64i_vec(TCGv_vec r
, uint64_t a
)
229 if (TCG_TARGET_REG_BITS
== 32 && a
== deposit64(a
, 32, 32, a
)) {
230 do_dupi_vec(r
, MO_32
, a
);
231 } else if (TCG_TARGET_REG_BITS
== 64 || a
== (uint64_t)(int32_t)a
) {
232 do_dupi_vec(r
, MO_64
, a
);
234 TCGv_i64 c
= tcg_const_i64(a
);
235 tcg_gen_dup_i64_vec(MO_64
, r
, c
);
236 tcg_temp_free_i64(c
);
240 void tcg_gen_dup32i_vec(TCGv_vec r
, uint32_t a
)
242 do_dupi_vec(r
, MO_REG
, dup_const(MO_32
, a
));
245 void tcg_gen_dup16i_vec(TCGv_vec r
, uint32_t a
)
247 do_dupi_vec(r
, MO_REG
, dup_const(MO_16
, a
));
250 void tcg_gen_dup8i_vec(TCGv_vec r
, uint32_t a
)
252 do_dupi_vec(r
, MO_REG
, dup_const(MO_8
, a
));
255 void tcg_gen_dupi_vec(unsigned vece
, TCGv_vec r
, uint64_t a
)
257 do_dupi_vec(r
, MO_REG
, dup_const(vece
, a
));
260 void tcg_gen_dup_i64_vec(unsigned vece
, TCGv_vec r
, TCGv_i64 a
)
262 TCGArg ri
= tcgv_vec_arg(r
);
263 TCGTemp
*rt
= arg_temp(ri
);
264 TCGType type
= rt
->base_type
;
266 if (TCG_TARGET_REG_BITS
== 64) {
267 TCGArg ai
= tcgv_i64_arg(a
);
268 vec_gen_2(INDEX_op_dup_vec
, type
, vece
, ri
, ai
);
269 } else if (vece
== MO_64
) {
270 TCGArg al
= tcgv_i32_arg(TCGV_LOW(a
));
271 TCGArg ah
= tcgv_i32_arg(TCGV_HIGH(a
));
272 vec_gen_3(INDEX_op_dup2_vec
, type
, MO_64
, ri
, al
, ah
);
274 TCGArg ai
= tcgv_i32_arg(TCGV_LOW(a
));
275 vec_gen_2(INDEX_op_dup_vec
, type
, vece
, ri
, ai
);
279 void tcg_gen_dup_i32_vec(unsigned vece
, TCGv_vec r
, TCGv_i32 a
)
281 TCGArg ri
= tcgv_vec_arg(r
);
282 TCGArg ai
= tcgv_i32_arg(a
);
283 TCGTemp
*rt
= arg_temp(ri
);
284 TCGType type
= rt
->base_type
;
286 vec_gen_2(INDEX_op_dup_vec
, type
, vece
, ri
, ai
);
289 void tcg_gen_dup_mem_vec(unsigned vece
, TCGv_vec r
, TCGv_ptr b
,
292 TCGArg ri
= tcgv_vec_arg(r
);
293 TCGArg bi
= tcgv_ptr_arg(b
);
294 TCGTemp
*rt
= arg_temp(ri
);
295 TCGType type
= rt
->base_type
;
297 vec_gen_3(INDEX_op_dupm_vec
, type
, vece
, ri
, bi
, ofs
);
300 static void vec_gen_ldst(TCGOpcode opc
, TCGv_vec r
, TCGv_ptr b
, TCGArg o
)
302 TCGArg ri
= tcgv_vec_arg(r
);
303 TCGArg bi
= tcgv_ptr_arg(b
);
304 TCGTemp
*rt
= arg_temp(ri
);
305 TCGType type
= rt
->base_type
;
307 vec_gen_3(opc
, type
, 0, ri
, bi
, o
);
310 void tcg_gen_ld_vec(TCGv_vec r
, TCGv_ptr b
, TCGArg o
)
312 vec_gen_ldst(INDEX_op_ld_vec
, r
, b
, o
);
315 void tcg_gen_st_vec(TCGv_vec r
, TCGv_ptr b
, TCGArg o
)
317 vec_gen_ldst(INDEX_op_st_vec
, r
, b
, o
);
320 void tcg_gen_stl_vec(TCGv_vec r
, TCGv_ptr b
, TCGArg o
, TCGType low_type
)
322 TCGArg ri
= tcgv_vec_arg(r
);
323 TCGArg bi
= tcgv_ptr_arg(b
);
324 TCGTemp
*rt
= arg_temp(ri
);
325 TCGType type
= rt
->base_type
;
327 tcg_debug_assert(low_type
>= TCG_TYPE_V64
);
328 tcg_debug_assert(low_type
<= type
);
329 vec_gen_3(INDEX_op_st_vec
, low_type
, 0, ri
, bi
, o
);
332 void tcg_gen_and_vec(unsigned vece
, TCGv_vec r
, TCGv_vec a
, TCGv_vec b
)
334 vec_gen_op3(INDEX_op_and_vec
, 0, r
, a
, b
);
337 void tcg_gen_or_vec(unsigned vece
, TCGv_vec r
, TCGv_vec a
, TCGv_vec b
)
339 vec_gen_op3(INDEX_op_or_vec
, 0, r
, a
, b
);
342 void tcg_gen_xor_vec(unsigned vece
, TCGv_vec r
, TCGv_vec a
, TCGv_vec b
)
344 vec_gen_op3(INDEX_op_xor_vec
, 0, r
, a
, b
);
347 void tcg_gen_andc_vec(unsigned vece
, TCGv_vec r
, TCGv_vec a
, TCGv_vec b
)
349 if (TCG_TARGET_HAS_andc_vec
) {
350 vec_gen_op3(INDEX_op_andc_vec
, 0, r
, a
, b
);
352 TCGv_vec t
= tcg_temp_new_vec_matching(r
);
353 tcg_gen_not_vec(0, t
, b
);
354 tcg_gen_and_vec(0, r
, a
, t
);
355 tcg_temp_free_vec(t
);
359 void tcg_gen_orc_vec(unsigned vece
, TCGv_vec r
, TCGv_vec a
, TCGv_vec b
)
361 if (TCG_TARGET_HAS_orc_vec
) {
362 vec_gen_op3(INDEX_op_orc_vec
, 0, r
, a
, b
);
364 TCGv_vec t
= tcg_temp_new_vec_matching(r
);
365 tcg_gen_not_vec(0, t
, b
);
366 tcg_gen_or_vec(0, r
, a
, t
);
367 tcg_temp_free_vec(t
);
371 void tcg_gen_nand_vec(unsigned vece
, TCGv_vec r
, TCGv_vec a
, TCGv_vec b
)
373 /* TODO: Add TCG_TARGET_HAS_nand_vec when adding a backend supports it. */
374 tcg_gen_and_vec(0, r
, a
, b
);
375 tcg_gen_not_vec(0, r
, r
);
378 void tcg_gen_nor_vec(unsigned vece
, TCGv_vec r
, TCGv_vec a
, TCGv_vec b
)
380 /* TODO: Add TCG_TARGET_HAS_nor_vec when adding a backend supports it. */
381 tcg_gen_or_vec(0, r
, a
, b
);
382 tcg_gen_not_vec(0, r
, r
);
385 void tcg_gen_eqv_vec(unsigned vece
, TCGv_vec r
, TCGv_vec a
, TCGv_vec b
)
387 /* TODO: Add TCG_TARGET_HAS_eqv_vec when adding a backend supports it. */
388 tcg_gen_xor_vec(0, r
, a
, b
);
389 tcg_gen_not_vec(0, r
, r
);
392 static bool do_op2(unsigned vece
, TCGv_vec r
, TCGv_vec a
, TCGOpcode opc
)
394 TCGTemp
*rt
= tcgv_vec_temp(r
);
395 TCGTemp
*at
= tcgv_vec_temp(a
);
396 TCGArg ri
= temp_arg(rt
);
397 TCGArg ai
= temp_arg(at
);
398 TCGType type
= rt
->base_type
;
401 tcg_debug_assert(at
->base_type
>= type
);
402 tcg_assert_listed_vecop(opc
);
403 can
= tcg_can_emit_vec_op(opc
, type
, vece
);
405 vec_gen_2(opc
, type
, vece
, ri
, ai
);
406 } else if (can
< 0) {
407 const TCGOpcode
*hold_list
= tcg_swap_vecop_list(NULL
);
408 tcg_expand_vec_op(opc
, type
, vece
, ri
, ai
);
409 tcg_swap_vecop_list(hold_list
);
416 void tcg_gen_not_vec(unsigned vece
, TCGv_vec r
, TCGv_vec a
)
418 if (!TCG_TARGET_HAS_not_vec
|| !do_op2(vece
, r
, a
, INDEX_op_not_vec
)) {
419 TCGv_vec t
= tcg_const_ones_vec_matching(r
);
420 tcg_gen_xor_vec(0, r
, a
, t
);
421 tcg_temp_free_vec(t
);
425 void tcg_gen_neg_vec(unsigned vece
, TCGv_vec r
, TCGv_vec a
)
427 const TCGOpcode
*hold_list
;
429 tcg_assert_listed_vecop(INDEX_op_neg_vec
);
430 hold_list
= tcg_swap_vecop_list(NULL
);
432 if (!TCG_TARGET_HAS_neg_vec
|| !do_op2(vece
, r
, a
, INDEX_op_neg_vec
)) {
433 TCGv_vec t
= tcg_const_zeros_vec_matching(r
);
434 tcg_gen_sub_vec(vece
, r
, t
, a
);
435 tcg_temp_free_vec(t
);
437 tcg_swap_vecop_list(hold_list
);
440 void tcg_gen_abs_vec(unsigned vece
, TCGv_vec r
, TCGv_vec a
)
442 const TCGOpcode
*hold_list
;
444 tcg_assert_listed_vecop(INDEX_op_abs_vec
);
445 hold_list
= tcg_swap_vecop_list(NULL
);
447 if (!do_op2(vece
, r
, a
, INDEX_op_abs_vec
)) {
448 TCGType type
= tcgv_vec_temp(r
)->base_type
;
449 TCGv_vec t
= tcg_temp_new_vec(type
);
451 tcg_debug_assert(tcg_can_emit_vec_op(INDEX_op_sub_vec
, type
, vece
));
452 if (tcg_can_emit_vec_op(INDEX_op_smax_vec
, type
, vece
) > 0) {
453 tcg_gen_neg_vec(vece
, t
, a
);
454 tcg_gen_smax_vec(vece
, r
, a
, t
);
456 if (tcg_can_emit_vec_op(INDEX_op_sari_vec
, type
, vece
) > 0) {
457 tcg_gen_sari_vec(vece
, t
, a
, (8 << vece
) - 1);
459 do_dupi_vec(t
, MO_REG
, 0);
460 tcg_gen_cmp_vec(TCG_COND_LT
, vece
, t
, a
, t
);
462 tcg_gen_xor_vec(vece
, r
, a
, t
);
463 tcg_gen_sub_vec(vece
, r
, r
, t
);
466 tcg_temp_free_vec(t
);
468 tcg_swap_vecop_list(hold_list
);
471 static void do_shifti(TCGOpcode opc
, unsigned vece
,
472 TCGv_vec r
, TCGv_vec a
, int64_t i
)
474 TCGTemp
*rt
= tcgv_vec_temp(r
);
475 TCGTemp
*at
= tcgv_vec_temp(a
);
476 TCGArg ri
= temp_arg(rt
);
477 TCGArg ai
= temp_arg(at
);
478 TCGType type
= rt
->base_type
;
481 tcg_debug_assert(at
->base_type
== type
);
482 tcg_debug_assert(i
>= 0 && i
< (8 << vece
));
483 tcg_assert_listed_vecop(opc
);
486 tcg_gen_mov_vec(r
, a
);
490 can
= tcg_can_emit_vec_op(opc
, type
, vece
);
492 vec_gen_3(opc
, type
, vece
, ri
, ai
, i
);
494 /* We leave the choice of expansion via scalar or vector shift
495 to the target. Often, but not always, dupi can feed a vector
496 shift easier than a scalar. */
497 const TCGOpcode
*hold_list
= tcg_swap_vecop_list(NULL
);
498 tcg_debug_assert(can
< 0);
499 tcg_expand_vec_op(opc
, type
, vece
, ri
, ai
, i
);
500 tcg_swap_vecop_list(hold_list
);
504 void tcg_gen_shli_vec(unsigned vece
, TCGv_vec r
, TCGv_vec a
, int64_t i
)
506 do_shifti(INDEX_op_shli_vec
, vece
, r
, a
, i
);
509 void tcg_gen_shri_vec(unsigned vece
, TCGv_vec r
, TCGv_vec a
, int64_t i
)
511 do_shifti(INDEX_op_shri_vec
, vece
, r
, a
, i
);
514 void tcg_gen_sari_vec(unsigned vece
, TCGv_vec r
, TCGv_vec a
, int64_t i
)
516 do_shifti(INDEX_op_sari_vec
, vece
, r
, a
, i
);
519 void tcg_gen_cmp_vec(TCGCond cond
, unsigned vece
,
520 TCGv_vec r
, TCGv_vec a
, TCGv_vec b
)
522 TCGTemp
*rt
= tcgv_vec_temp(r
);
523 TCGTemp
*at
= tcgv_vec_temp(a
);
524 TCGTemp
*bt
= tcgv_vec_temp(b
);
525 TCGArg ri
= temp_arg(rt
);
526 TCGArg ai
= temp_arg(at
);
527 TCGArg bi
= temp_arg(bt
);
528 TCGType type
= rt
->base_type
;
531 tcg_debug_assert(at
->base_type
>= type
);
532 tcg_debug_assert(bt
->base_type
>= type
);
533 tcg_assert_listed_vecop(INDEX_op_cmp_vec
);
534 can
= tcg_can_emit_vec_op(INDEX_op_cmp_vec
, type
, vece
);
536 vec_gen_4(INDEX_op_cmp_vec
, type
, vece
, ri
, ai
, bi
, cond
);
538 const TCGOpcode
*hold_list
= tcg_swap_vecop_list(NULL
);
539 tcg_debug_assert(can
< 0);
540 tcg_expand_vec_op(INDEX_op_cmp_vec
, type
, vece
, ri
, ai
, bi
, cond
);
541 tcg_swap_vecop_list(hold_list
);
545 static void do_op3(unsigned vece
, TCGv_vec r
, TCGv_vec a
,
546 TCGv_vec b
, TCGOpcode opc
)
548 TCGTemp
*rt
= tcgv_vec_temp(r
);
549 TCGTemp
*at
= tcgv_vec_temp(a
);
550 TCGTemp
*bt
= tcgv_vec_temp(b
);
551 TCGArg ri
= temp_arg(rt
);
552 TCGArg ai
= temp_arg(at
);
553 TCGArg bi
= temp_arg(bt
);
554 TCGType type
= rt
->base_type
;
557 tcg_debug_assert(at
->base_type
>= type
);
558 tcg_debug_assert(bt
->base_type
>= type
);
559 tcg_assert_listed_vecop(opc
);
560 can
= tcg_can_emit_vec_op(opc
, type
, vece
);
562 vec_gen_3(opc
, type
, vece
, ri
, ai
, bi
);
564 const TCGOpcode
*hold_list
= tcg_swap_vecop_list(NULL
);
565 tcg_debug_assert(can
< 0);
566 tcg_expand_vec_op(opc
, type
, vece
, ri
, ai
, bi
);
567 tcg_swap_vecop_list(hold_list
);
571 void tcg_gen_add_vec(unsigned vece
, TCGv_vec r
, TCGv_vec a
, TCGv_vec b
)
573 do_op3(vece
, r
, a
, b
, INDEX_op_add_vec
);
576 void tcg_gen_sub_vec(unsigned vece
, TCGv_vec r
, TCGv_vec a
, TCGv_vec b
)
578 do_op3(vece
, r
, a
, b
, INDEX_op_sub_vec
);
581 void tcg_gen_mul_vec(unsigned vece
, TCGv_vec r
, TCGv_vec a
, TCGv_vec b
)
583 do_op3(vece
, r
, a
, b
, INDEX_op_mul_vec
);
586 void tcg_gen_ssadd_vec(unsigned vece
, TCGv_vec r
, TCGv_vec a
, TCGv_vec b
)
588 do_op3(vece
, r
, a
, b
, INDEX_op_ssadd_vec
);
591 void tcg_gen_usadd_vec(unsigned vece
, TCGv_vec r
, TCGv_vec a
, TCGv_vec b
)
593 do_op3(vece
, r
, a
, b
, INDEX_op_usadd_vec
);
596 void tcg_gen_sssub_vec(unsigned vece
, TCGv_vec r
, TCGv_vec a
, TCGv_vec b
)
598 do_op3(vece
, r
, a
, b
, INDEX_op_sssub_vec
);
601 void tcg_gen_ussub_vec(unsigned vece
, TCGv_vec r
, TCGv_vec a
, TCGv_vec b
)
603 do_op3(vece
, r
, a
, b
, INDEX_op_ussub_vec
);
606 void tcg_gen_smin_vec(unsigned vece
, TCGv_vec r
, TCGv_vec a
, TCGv_vec b
)
608 do_op3(vece
, r
, a
, b
, INDEX_op_smin_vec
);
611 void tcg_gen_umin_vec(unsigned vece
, TCGv_vec r
, TCGv_vec a
, TCGv_vec b
)
613 do_op3(vece
, r
, a
, b
, INDEX_op_umin_vec
);
616 void tcg_gen_smax_vec(unsigned vece
, TCGv_vec r
, TCGv_vec a
, TCGv_vec b
)
618 do_op3(vece
, r
, a
, b
, INDEX_op_smax_vec
);
621 void tcg_gen_umax_vec(unsigned vece
, TCGv_vec r
, TCGv_vec a
, TCGv_vec b
)
623 do_op3(vece
, r
, a
, b
, INDEX_op_umax_vec
);
626 void tcg_gen_shlv_vec(unsigned vece
, TCGv_vec r
, TCGv_vec a
, TCGv_vec b
)
628 do_op3(vece
, r
, a
, b
, INDEX_op_shlv_vec
);
631 void tcg_gen_shrv_vec(unsigned vece
, TCGv_vec r
, TCGv_vec a
, TCGv_vec b
)
633 do_op3(vece
, r
, a
, b
, INDEX_op_shrv_vec
);
636 void tcg_gen_sarv_vec(unsigned vece
, TCGv_vec r
, TCGv_vec a
, TCGv_vec b
)
638 do_op3(vece
, r
, a
, b
, INDEX_op_sarv_vec
);
641 static void do_shifts(unsigned vece
, TCGv_vec r
, TCGv_vec a
,
642 TCGv_i32 s
, TCGOpcode opc_s
, TCGOpcode opc_v
)
644 TCGTemp
*rt
= tcgv_vec_temp(r
);
645 TCGTemp
*at
= tcgv_vec_temp(a
);
646 TCGTemp
*st
= tcgv_i32_temp(s
);
647 TCGArg ri
= temp_arg(rt
);
648 TCGArg ai
= temp_arg(at
);
649 TCGArg si
= temp_arg(st
);
650 TCGType type
= rt
->base_type
;
651 const TCGOpcode
*hold_list
;
654 tcg_debug_assert(at
->base_type
>= type
);
655 tcg_assert_listed_vecop(opc_s
);
656 hold_list
= tcg_swap_vecop_list(NULL
);
658 can
= tcg_can_emit_vec_op(opc_s
, type
, vece
);
660 vec_gen_3(opc_s
, type
, vece
, ri
, ai
, si
);
661 } else if (can
< 0) {
662 tcg_expand_vec_op(opc_s
, type
, vece
, ri
, ai
, si
);
664 TCGv_vec vec_s
= tcg_temp_new_vec(type
);
667 TCGv_i64 s64
= tcg_temp_new_i64();
668 tcg_gen_extu_i32_i64(s64
, s
);
669 tcg_gen_dup_i64_vec(MO_64
, vec_s
, s64
);
670 tcg_temp_free_i64(s64
);
672 tcg_gen_dup_i32_vec(vece
, vec_s
, s
);
674 do_op3(vece
, r
, a
, vec_s
, opc_v
);
675 tcg_temp_free_vec(vec_s
);
677 tcg_swap_vecop_list(hold_list
);
680 void tcg_gen_shls_vec(unsigned vece
, TCGv_vec r
, TCGv_vec a
, TCGv_i32 b
)
682 do_shifts(vece
, r
, a
, b
, INDEX_op_shls_vec
, INDEX_op_shlv_vec
);
685 void tcg_gen_shrs_vec(unsigned vece
, TCGv_vec r
, TCGv_vec a
, TCGv_i32 b
)
687 do_shifts(vece
, r
, a
, b
, INDEX_op_shrs_vec
, INDEX_op_shrv_vec
);
690 void tcg_gen_sars_vec(unsigned vece
, TCGv_vec r
, TCGv_vec a
, TCGv_i32 b
)
692 do_shifts(vece
, r
, a
, b
, INDEX_op_sars_vec
, INDEX_op_sarv_vec
);