2 * Generic vector operation expansion
4 * Copyright (c) 2018 Linaro
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "tcg/tcg-op.h"
23 #include "tcg/tcg-op-gvec.h"
24 #include "qemu/main-loop.h"
25 #include "tcg/tcg-gvec-desc.h"
29 #ifdef CONFIG_DEBUG_TCG
30 static const TCGOpcode vecop_list_empty
[1] = { 0 };
32 #define vecop_list_empty NULL
36 /* Verify vector size and alignment rules. OFS should be the OR of all
37 of the operand offsets so that we can check them all at once. */
38 static void check_size_align(uint32_t oprsz
, uint32_t maxsz
, uint32_t ofs
)
46 tcg_debug_assert(oprsz
<= maxsz
);
49 tcg_debug_assert(oprsz
== maxsz
);
52 tcg_debug_assert(maxsz
<= (8 << SIMD_MAXSZ_BITS
));
54 max_align
= maxsz
>= 16 ? 15 : 7;
55 tcg_debug_assert((maxsz
& max_align
) == 0);
56 tcg_debug_assert((ofs
& max_align
) == 0);
59 /* Verify vector overlap rules for two operands. */
60 static void check_overlap_2(uint32_t d
, uint32_t a
, uint32_t s
)
62 tcg_debug_assert(d
== a
|| d
+ s
<= a
|| a
+ s
<= d
);
65 /* Verify vector overlap rules for three operands. */
66 static void check_overlap_3(uint32_t d
, uint32_t a
, uint32_t b
, uint32_t s
)
68 check_overlap_2(d
, a
, s
);
69 check_overlap_2(d
, b
, s
);
70 check_overlap_2(a
, b
, s
);
73 /* Verify vector overlap rules for four operands. */
74 static void check_overlap_4(uint32_t d
, uint32_t a
, uint32_t b
,
75 uint32_t c
, uint32_t s
)
77 check_overlap_2(d
, a
, s
);
78 check_overlap_2(d
, b
, s
);
79 check_overlap_2(d
, c
, s
);
80 check_overlap_2(a
, b
, s
);
81 check_overlap_2(a
, c
, s
);
82 check_overlap_2(b
, c
, s
);
85 /* Create a descriptor from components. */
86 uint32_t simd_desc(uint32_t oprsz
, uint32_t maxsz
, int32_t data
)
90 check_size_align(oprsz
, maxsz
, 0);
91 tcg_debug_assert(data
== sextract32(data
, 0, SIMD_DATA_BITS
));
93 oprsz
= (oprsz
/ 8) - 1;
94 maxsz
= (maxsz
/ 8) - 1;
97 * We have just asserted in check_size_align that either
98 * oprsz is {8,16,32} or matches maxsz. Encode the final
99 * case with '2', as that would otherwise map to 24.
101 if (oprsz
== maxsz
) {
105 desc
= deposit32(desc
, SIMD_OPRSZ_SHIFT
, SIMD_OPRSZ_BITS
, oprsz
);
106 desc
= deposit32(desc
, SIMD_MAXSZ_SHIFT
, SIMD_MAXSZ_BITS
, maxsz
);
107 desc
= deposit32(desc
, SIMD_DATA_SHIFT
, SIMD_DATA_BITS
, data
);
112 /* Generate a call to a gvec-style helper with two vector operands. */
113 void tcg_gen_gvec_2_ool(uint32_t dofs
, uint32_t aofs
,
114 uint32_t oprsz
, uint32_t maxsz
, int32_t data
,
115 gen_helper_gvec_2
*fn
)
118 TCGv_i32 desc
= tcg_const_i32(simd_desc(oprsz
, maxsz
, data
));
120 a0
= tcg_temp_new_ptr();
121 a1
= tcg_temp_new_ptr();
123 tcg_gen_addi_ptr(a0
, cpu_env
, dofs
);
124 tcg_gen_addi_ptr(a1
, cpu_env
, aofs
);
128 tcg_temp_free_ptr(a0
);
129 tcg_temp_free_ptr(a1
);
130 tcg_temp_free_i32(desc
);
133 /* Generate a call to a gvec-style helper with two vector operands
134 and one scalar operand. */
135 void tcg_gen_gvec_2i_ool(uint32_t dofs
, uint32_t aofs
, TCGv_i64 c
,
136 uint32_t oprsz
, uint32_t maxsz
, int32_t data
,
137 gen_helper_gvec_2i
*fn
)
140 TCGv_i32 desc
= tcg_const_i32(simd_desc(oprsz
, maxsz
, data
));
142 a0
= tcg_temp_new_ptr();
143 a1
= tcg_temp_new_ptr();
145 tcg_gen_addi_ptr(a0
, cpu_env
, dofs
);
146 tcg_gen_addi_ptr(a1
, cpu_env
, aofs
);
150 tcg_temp_free_ptr(a0
);
151 tcg_temp_free_ptr(a1
);
152 tcg_temp_free_i32(desc
);
155 /* Generate a call to a gvec-style helper with three vector operands. */
156 void tcg_gen_gvec_3_ool(uint32_t dofs
, uint32_t aofs
, uint32_t bofs
,
157 uint32_t oprsz
, uint32_t maxsz
, int32_t data
,
158 gen_helper_gvec_3
*fn
)
161 TCGv_i32 desc
= tcg_const_i32(simd_desc(oprsz
, maxsz
, data
));
163 a0
= tcg_temp_new_ptr();
164 a1
= tcg_temp_new_ptr();
165 a2
= tcg_temp_new_ptr();
167 tcg_gen_addi_ptr(a0
, cpu_env
, dofs
);
168 tcg_gen_addi_ptr(a1
, cpu_env
, aofs
);
169 tcg_gen_addi_ptr(a2
, cpu_env
, bofs
);
171 fn(a0
, a1
, a2
, desc
);
173 tcg_temp_free_ptr(a0
);
174 tcg_temp_free_ptr(a1
);
175 tcg_temp_free_ptr(a2
);
176 tcg_temp_free_i32(desc
);
179 /* Generate a call to a gvec-style helper with four vector operands. */
180 void tcg_gen_gvec_4_ool(uint32_t dofs
, uint32_t aofs
, uint32_t bofs
,
181 uint32_t cofs
, uint32_t oprsz
, uint32_t maxsz
,
182 int32_t data
, gen_helper_gvec_4
*fn
)
184 TCGv_ptr a0
, a1
, a2
, a3
;
185 TCGv_i32 desc
= tcg_const_i32(simd_desc(oprsz
, maxsz
, data
));
187 a0
= tcg_temp_new_ptr();
188 a1
= tcg_temp_new_ptr();
189 a2
= tcg_temp_new_ptr();
190 a3
= tcg_temp_new_ptr();
192 tcg_gen_addi_ptr(a0
, cpu_env
, dofs
);
193 tcg_gen_addi_ptr(a1
, cpu_env
, aofs
);
194 tcg_gen_addi_ptr(a2
, cpu_env
, bofs
);
195 tcg_gen_addi_ptr(a3
, cpu_env
, cofs
);
197 fn(a0
, a1
, a2
, a3
, desc
);
199 tcg_temp_free_ptr(a0
);
200 tcg_temp_free_ptr(a1
);
201 tcg_temp_free_ptr(a2
);
202 tcg_temp_free_ptr(a3
);
203 tcg_temp_free_i32(desc
);
206 /* Generate a call to a gvec-style helper with five vector operands. */
207 void tcg_gen_gvec_5_ool(uint32_t dofs
, uint32_t aofs
, uint32_t bofs
,
208 uint32_t cofs
, uint32_t xofs
, uint32_t oprsz
,
209 uint32_t maxsz
, int32_t data
, gen_helper_gvec_5
*fn
)
211 TCGv_ptr a0
, a1
, a2
, a3
, a4
;
212 TCGv_i32 desc
= tcg_const_i32(simd_desc(oprsz
, maxsz
, data
));
214 a0
= tcg_temp_new_ptr();
215 a1
= tcg_temp_new_ptr();
216 a2
= tcg_temp_new_ptr();
217 a3
= tcg_temp_new_ptr();
218 a4
= tcg_temp_new_ptr();
220 tcg_gen_addi_ptr(a0
, cpu_env
, dofs
);
221 tcg_gen_addi_ptr(a1
, cpu_env
, aofs
);
222 tcg_gen_addi_ptr(a2
, cpu_env
, bofs
);
223 tcg_gen_addi_ptr(a3
, cpu_env
, cofs
);
224 tcg_gen_addi_ptr(a4
, cpu_env
, xofs
);
226 fn(a0
, a1
, a2
, a3
, a4
, desc
);
228 tcg_temp_free_ptr(a0
);
229 tcg_temp_free_ptr(a1
);
230 tcg_temp_free_ptr(a2
);
231 tcg_temp_free_ptr(a3
);
232 tcg_temp_free_ptr(a4
);
233 tcg_temp_free_i32(desc
);
236 /* Generate a call to a gvec-style helper with three vector operands
237 and an extra pointer operand. */
238 void tcg_gen_gvec_2_ptr(uint32_t dofs
, uint32_t aofs
,
239 TCGv_ptr ptr
, uint32_t oprsz
, uint32_t maxsz
,
240 int32_t data
, gen_helper_gvec_2_ptr
*fn
)
243 TCGv_i32 desc
= tcg_const_i32(simd_desc(oprsz
, maxsz
, data
));
245 a0
= tcg_temp_new_ptr();
246 a1
= tcg_temp_new_ptr();
248 tcg_gen_addi_ptr(a0
, cpu_env
, dofs
);
249 tcg_gen_addi_ptr(a1
, cpu_env
, aofs
);
251 fn(a0
, a1
, ptr
, desc
);
253 tcg_temp_free_ptr(a0
);
254 tcg_temp_free_ptr(a1
);
255 tcg_temp_free_i32(desc
);
258 /* Generate a call to a gvec-style helper with three vector operands
259 and an extra pointer operand. */
260 void tcg_gen_gvec_3_ptr(uint32_t dofs
, uint32_t aofs
, uint32_t bofs
,
261 TCGv_ptr ptr
, uint32_t oprsz
, uint32_t maxsz
,
262 int32_t data
, gen_helper_gvec_3_ptr
*fn
)
265 TCGv_i32 desc
= tcg_const_i32(simd_desc(oprsz
, maxsz
, data
));
267 a0
= tcg_temp_new_ptr();
268 a1
= tcg_temp_new_ptr();
269 a2
= tcg_temp_new_ptr();
271 tcg_gen_addi_ptr(a0
, cpu_env
, dofs
);
272 tcg_gen_addi_ptr(a1
, cpu_env
, aofs
);
273 tcg_gen_addi_ptr(a2
, cpu_env
, bofs
);
275 fn(a0
, a1
, a2
, ptr
, desc
);
277 tcg_temp_free_ptr(a0
);
278 tcg_temp_free_ptr(a1
);
279 tcg_temp_free_ptr(a2
);
280 tcg_temp_free_i32(desc
);
283 /* Generate a call to a gvec-style helper with four vector operands
284 and an extra pointer operand. */
285 void tcg_gen_gvec_4_ptr(uint32_t dofs
, uint32_t aofs
, uint32_t bofs
,
286 uint32_t cofs
, TCGv_ptr ptr
, uint32_t oprsz
,
287 uint32_t maxsz
, int32_t data
,
288 gen_helper_gvec_4_ptr
*fn
)
290 TCGv_ptr a0
, a1
, a2
, a3
;
291 TCGv_i32 desc
= tcg_const_i32(simd_desc(oprsz
, maxsz
, data
));
293 a0
= tcg_temp_new_ptr();
294 a1
= tcg_temp_new_ptr();
295 a2
= tcg_temp_new_ptr();
296 a3
= tcg_temp_new_ptr();
298 tcg_gen_addi_ptr(a0
, cpu_env
, dofs
);
299 tcg_gen_addi_ptr(a1
, cpu_env
, aofs
);
300 tcg_gen_addi_ptr(a2
, cpu_env
, bofs
);
301 tcg_gen_addi_ptr(a3
, cpu_env
, cofs
);
303 fn(a0
, a1
, a2
, a3
, ptr
, desc
);
305 tcg_temp_free_ptr(a0
);
306 tcg_temp_free_ptr(a1
);
307 tcg_temp_free_ptr(a2
);
308 tcg_temp_free_ptr(a3
);
309 tcg_temp_free_i32(desc
);
312 /* Generate a call to a gvec-style helper with five vector operands
313 and an extra pointer operand. */
314 void tcg_gen_gvec_5_ptr(uint32_t dofs
, uint32_t aofs
, uint32_t bofs
,
315 uint32_t cofs
, uint32_t eofs
, TCGv_ptr ptr
,
316 uint32_t oprsz
, uint32_t maxsz
, int32_t data
,
317 gen_helper_gvec_5_ptr
*fn
)
319 TCGv_ptr a0
, a1
, a2
, a3
, a4
;
320 TCGv_i32 desc
= tcg_const_i32(simd_desc(oprsz
, maxsz
, data
));
322 a0
= tcg_temp_new_ptr();
323 a1
= tcg_temp_new_ptr();
324 a2
= tcg_temp_new_ptr();
325 a3
= tcg_temp_new_ptr();
326 a4
= tcg_temp_new_ptr();
328 tcg_gen_addi_ptr(a0
, cpu_env
, dofs
);
329 tcg_gen_addi_ptr(a1
, cpu_env
, aofs
);
330 tcg_gen_addi_ptr(a2
, cpu_env
, bofs
);
331 tcg_gen_addi_ptr(a3
, cpu_env
, cofs
);
332 tcg_gen_addi_ptr(a4
, cpu_env
, eofs
);
334 fn(a0
, a1
, a2
, a3
, a4
, ptr
, desc
);
336 tcg_temp_free_ptr(a0
);
337 tcg_temp_free_ptr(a1
);
338 tcg_temp_free_ptr(a2
);
339 tcg_temp_free_ptr(a3
);
340 tcg_temp_free_ptr(a4
);
341 tcg_temp_free_i32(desc
);
344 /* Return true if we want to implement something of OPRSZ bytes
345 in units of LNSZ. This limits the expansion of inline code. */
346 static inline bool check_size_impl(uint32_t oprsz
, uint32_t lnsz
)
356 tcg_debug_assert((r
& 7) == 0);
359 /* For sizes below 16, accept no remainder. */
365 * Recall that ARM SVE allows vector sizes that are not a
366 * power of 2, but always a multiple of 16. The intent is
367 * that e.g. size == 80 would be expanded with 2x32 + 1x16.
368 * In addition, expand_clr needs to handle a multiple of 8.
369 * Thus we can handle the tail with one more operation per
370 * diminishing power of 2.
375 return q
<= MAX_UNROLL
;
378 static void expand_clr(uint32_t dofs
, uint32_t maxsz
);
380 /* Duplicate C as per VECE. */
381 uint64_t (dup_const
)(unsigned vece
, uint64_t c
)
385 return 0x0101010101010101ull
* (uint8_t)c
;
387 return 0x0001000100010001ull
* (uint16_t)c
;
389 return 0x0000000100000001ull
* (uint32_t)c
;
393 g_assert_not_reached();
397 /* Duplicate IN into OUT as per VECE. */
398 static void gen_dup_i32(unsigned vece
, TCGv_i32 out
, TCGv_i32 in
)
402 tcg_gen_ext8u_i32(out
, in
);
403 tcg_gen_muli_i32(out
, out
, 0x01010101);
406 tcg_gen_deposit_i32(out
, in
, in
, 16, 16);
409 tcg_gen_mov_i32(out
, in
);
412 g_assert_not_reached();
416 static void gen_dup_i64(unsigned vece
, TCGv_i64 out
, TCGv_i64 in
)
420 tcg_gen_ext8u_i64(out
, in
);
421 tcg_gen_muli_i64(out
, out
, 0x0101010101010101ull
);
424 tcg_gen_ext16u_i64(out
, in
);
425 tcg_gen_muli_i64(out
, out
, 0x0001000100010001ull
);
428 tcg_gen_deposit_i64(out
, in
, in
, 32, 32);
431 tcg_gen_mov_i64(out
, in
);
434 g_assert_not_reached();
438 /* Select a supported vector type for implementing an operation on SIZE
439 * bytes. If OP is 0, assume that the real operation to be performed is
440 * required by all backends. Otherwise, make sure than OP can be performed
441 * on elements of size VECE in the selected type. Do not select V64 if
442 * PREFER_I64 is true. Return 0 if no vector type is selected.
444 static TCGType
choose_vector_type(const TCGOpcode
*list
, unsigned vece
,
445 uint32_t size
, bool prefer_i64
)
448 * Recall that ARM SVE allows vector sizes that are not a
449 * power of 2, but always a multiple of 16. The intent is
450 * that e.g. size == 80 would be expanded with 2x32 + 1x16.
451 * It is hard to imagine a case in which v256 is supported
452 * but v128 is not, but check anyway.
453 * In addition, expand_clr needs to handle a multiple of 8.
455 if (TCG_TARGET_HAS_v256
&&
456 check_size_impl(size
, 32) &&
457 tcg_can_emit_vecop_list(list
, TCG_TYPE_V256
, vece
) &&
459 (TCG_TARGET_HAS_v128
&&
460 tcg_can_emit_vecop_list(list
, TCG_TYPE_V128
, vece
))) &&
462 (TCG_TARGET_HAS_v64
&&
463 tcg_can_emit_vecop_list(list
, TCG_TYPE_V64
, vece
)))) {
464 return TCG_TYPE_V256
;
466 if (TCG_TARGET_HAS_v128
&&
467 check_size_impl(size
, 16) &&
468 tcg_can_emit_vecop_list(list
, TCG_TYPE_V128
, vece
) &&
470 (TCG_TARGET_HAS_v64
&&
471 tcg_can_emit_vecop_list(list
, TCG_TYPE_V64
, vece
)))) {
472 return TCG_TYPE_V128
;
474 if (TCG_TARGET_HAS_v64
&& !prefer_i64
&& check_size_impl(size
, 8)
475 && tcg_can_emit_vecop_list(list
, TCG_TYPE_V64
, vece
)) {
481 static void do_dup_store(TCGType type
, uint32_t dofs
, uint32_t oprsz
,
482 uint32_t maxsz
, TCGv_vec t_vec
)
486 tcg_debug_assert(oprsz
>= 8);
489 * This may be expand_clr for the tail of an operation, e.g.
490 * oprsz == 8 && maxsz == 64. The first 8 bytes of this store
491 * are misaligned wrt the maximum vector size, so do that first.
494 tcg_gen_stl_vec(t_vec
, cpu_env
, dofs
+ i
, TCG_TYPE_V64
);
501 * Recall that ARM SVE allows vector sizes that are not a
502 * power of 2, but always a multiple of 16. The intent is
503 * that e.g. size == 80 would be expanded with 2x32 + 1x16.
505 for (; i
+ 32 <= oprsz
; i
+= 32) {
506 tcg_gen_stl_vec(t_vec
, cpu_env
, dofs
+ i
, TCG_TYPE_V256
);
510 for (; i
+ 16 <= oprsz
; i
+= 16) {
511 tcg_gen_stl_vec(t_vec
, cpu_env
, dofs
+ i
, TCG_TYPE_V128
);
515 for (; i
< oprsz
; i
+= 8) {
516 tcg_gen_stl_vec(t_vec
, cpu_env
, dofs
+ i
, TCG_TYPE_V64
);
520 g_assert_not_reached();
524 expand_clr(dofs
+ oprsz
, maxsz
- oprsz
);
528 /* Set OPRSZ bytes at DOFS to replications of IN_32, IN_64 or IN_C.
529 * Only one of IN_32 or IN_64 may be set;
530 * IN_C is used if IN_32 and IN_64 are unset.
532 static void do_dup(unsigned vece
, uint32_t dofs
, uint32_t oprsz
,
533 uint32_t maxsz
, TCGv_i32 in_32
, TCGv_i64 in_64
,
538 TCGv_i32 t_32
, t_desc
;
542 assert(vece
<= (in_32
? MO_32
: MO_64
));
543 assert(in_32
== NULL
|| in_64
== NULL
);
545 /* If we're storing 0, expand oprsz to maxsz. */
546 if (in_32
== NULL
&& in_64
== NULL
) {
547 in_c
= dup_const(vece
, in_c
);
551 } else if (in_c
== dup_const(MO_8
, in_c
)) {
556 /* Implement inline with a vector type, if possible.
557 * Prefer integer when 64-bit host and no variable dup.
559 type
= choose_vector_type(NULL
, vece
, oprsz
,
560 (TCG_TARGET_REG_BITS
== 64 && in_32
== NULL
561 && (in_64
== NULL
|| vece
== MO_64
)));
563 TCGv_vec t_vec
= tcg_temp_new_vec(type
);
566 tcg_gen_dup_i32_vec(vece
, t_vec
, in_32
);
568 tcg_gen_dup_i64_vec(vece
, t_vec
, in_64
);
570 tcg_gen_dupi_vec(vece
, t_vec
, in_c
);
572 do_dup_store(type
, dofs
, oprsz
, maxsz
, t_vec
);
573 tcg_temp_free_vec(t_vec
);
577 /* Otherwise, inline with an integer type, unless "large". */
578 if (check_size_impl(oprsz
, TCG_TARGET_REG_BITS
/ 8)) {
583 /* We are given a 32-bit variable input. For a 64-bit host,
584 use a 64-bit operation unless the 32-bit operation would
586 if (TCG_TARGET_REG_BITS
== 64
587 && (vece
!= MO_32
|| !check_size_impl(oprsz
, 4))) {
588 t_64
= tcg_temp_new_i64();
589 tcg_gen_extu_i32_i64(t_64
, in_32
);
590 gen_dup_i64(vece
, t_64
, t_64
);
592 t_32
= tcg_temp_new_i32();
593 gen_dup_i32(vece
, t_32
, in_32
);
596 /* We are given a 64-bit variable input. */
597 t_64
= tcg_temp_new_i64();
598 gen_dup_i64(vece
, t_64
, in_64
);
600 /* We are given a constant input. */
601 /* For 64-bit hosts, use 64-bit constants for "simple" constants
602 or when we'd need too many 32-bit stores, or when a 64-bit
603 constant is really required. */
605 || (TCG_TARGET_REG_BITS
== 64
606 && (in_c
== 0 || in_c
== -1
607 || !check_size_impl(oprsz
, 4)))) {
608 t_64
= tcg_const_i64(in_c
);
610 t_32
= tcg_const_i32(in_c
);
614 /* Implement inline if we picked an implementation size above. */
616 for (i
= 0; i
< oprsz
; i
+= 4) {
617 tcg_gen_st_i32(t_32
, cpu_env
, dofs
+ i
);
619 tcg_temp_free_i32(t_32
);
623 for (i
= 0; i
< oprsz
; i
+= 8) {
624 tcg_gen_st_i64(t_64
, cpu_env
, dofs
+ i
);
626 tcg_temp_free_i64(t_64
);
631 /* Otherwise implement out of line. */
632 t_ptr
= tcg_temp_new_ptr();
633 tcg_gen_addi_ptr(t_ptr
, cpu_env
, dofs
);
636 * This may be expand_clr for the tail of an operation, e.g.
637 * oprsz == 8 && maxsz == 64. The size of the clear is misaligned
638 * wrt simd_desc and will assert. Simply pass all replicated byte
639 * stores through to memset.
641 if (oprsz
== maxsz
&& vece
== MO_8
) {
642 TCGv_ptr t_size
= tcg_const_ptr(oprsz
);
648 t_val
= tcg_temp_new_i32();
649 tcg_gen_extrl_i64_i32(t_val
, in_64
);
651 t_val
= tcg_const_i32(in_c
);
653 gen_helper_memset(t_ptr
, t_ptr
, t_val
, t_size
);
656 tcg_temp_free_i32(t_val
);
658 tcg_temp_free_ptr(t_size
);
659 tcg_temp_free_ptr(t_ptr
);
663 t_desc
= tcg_const_i32(simd_desc(oprsz
, maxsz
, 0));
667 gen_helper_gvec_dup64(t_ptr
, t_desc
, in_64
);
669 t_64
= tcg_const_i64(in_c
);
670 gen_helper_gvec_dup64(t_ptr
, t_desc
, t_64
);
671 tcg_temp_free_i64(t_64
);
674 typedef void dup_fn(TCGv_ptr
, TCGv_i32
, TCGv_i32
);
675 static dup_fn
* const fns
[3] = {
676 gen_helper_gvec_dup8
,
677 gen_helper_gvec_dup16
,
678 gen_helper_gvec_dup32
682 fns
[vece
](t_ptr
, t_desc
, in_32
);
684 t_32
= tcg_temp_new_i32();
686 tcg_gen_extrl_i64_i32(t_32
, in_64
);
687 } else if (vece
== MO_8
) {
688 tcg_gen_movi_i32(t_32
, in_c
& 0xff);
689 } else if (vece
== MO_16
) {
690 tcg_gen_movi_i32(t_32
, in_c
& 0xffff);
692 tcg_gen_movi_i32(t_32
, in_c
);
694 fns
[vece
](t_ptr
, t_desc
, t_32
);
695 tcg_temp_free_i32(t_32
);
699 tcg_temp_free_ptr(t_ptr
);
700 tcg_temp_free_i32(t_desc
);
705 expand_clr(dofs
+ oprsz
, maxsz
- oprsz
);
709 /* Likewise, but with zero. */
710 static void expand_clr(uint32_t dofs
, uint32_t maxsz
)
712 do_dup(MO_8
, dofs
, maxsz
, maxsz
, NULL
, NULL
, 0);
715 /* Expand OPSZ bytes worth of two-operand operations using i32 elements. */
716 static void expand_2_i32(uint32_t dofs
, uint32_t aofs
, uint32_t oprsz
,
717 bool load_dest
, void (*fni
)(TCGv_i32
, TCGv_i32
))
719 TCGv_i32 t0
= tcg_temp_new_i32();
720 TCGv_i32 t1
= tcg_temp_new_i32();
723 for (i
= 0; i
< oprsz
; i
+= 4) {
724 tcg_gen_ld_i32(t0
, cpu_env
, aofs
+ i
);
726 tcg_gen_ld_i32(t1
, cpu_env
, dofs
+ i
);
729 tcg_gen_st_i32(t1
, cpu_env
, dofs
+ i
);
731 tcg_temp_free_i32(t0
);
732 tcg_temp_free_i32(t1
);
735 static void expand_2i_i32(uint32_t dofs
, uint32_t aofs
, uint32_t oprsz
,
736 int32_t c
, bool load_dest
,
737 void (*fni
)(TCGv_i32
, TCGv_i32
, int32_t))
739 TCGv_i32 t0
= tcg_temp_new_i32();
740 TCGv_i32 t1
= tcg_temp_new_i32();
743 for (i
= 0; i
< oprsz
; i
+= 4) {
744 tcg_gen_ld_i32(t0
, cpu_env
, aofs
+ i
);
746 tcg_gen_ld_i32(t1
, cpu_env
, dofs
+ i
);
749 tcg_gen_st_i32(t1
, cpu_env
, dofs
+ i
);
751 tcg_temp_free_i32(t0
);
752 tcg_temp_free_i32(t1
);
755 static void expand_2s_i32(uint32_t dofs
, uint32_t aofs
, uint32_t oprsz
,
756 TCGv_i32 c
, bool scalar_first
,
757 void (*fni
)(TCGv_i32
, TCGv_i32
, TCGv_i32
))
759 TCGv_i32 t0
= tcg_temp_new_i32();
760 TCGv_i32 t1
= tcg_temp_new_i32();
763 for (i
= 0; i
< oprsz
; i
+= 4) {
764 tcg_gen_ld_i32(t0
, cpu_env
, aofs
+ i
);
770 tcg_gen_st_i32(t1
, cpu_env
, dofs
+ i
);
772 tcg_temp_free_i32(t0
);
773 tcg_temp_free_i32(t1
);
776 /* Expand OPSZ bytes worth of three-operand operations using i32 elements. */
777 static void expand_3_i32(uint32_t dofs
, uint32_t aofs
,
778 uint32_t bofs
, uint32_t oprsz
, bool load_dest
,
779 void (*fni
)(TCGv_i32
, TCGv_i32
, TCGv_i32
))
781 TCGv_i32 t0
= tcg_temp_new_i32();
782 TCGv_i32 t1
= tcg_temp_new_i32();
783 TCGv_i32 t2
= tcg_temp_new_i32();
786 for (i
= 0; i
< oprsz
; i
+= 4) {
787 tcg_gen_ld_i32(t0
, cpu_env
, aofs
+ i
);
788 tcg_gen_ld_i32(t1
, cpu_env
, bofs
+ i
);
790 tcg_gen_ld_i32(t2
, cpu_env
, dofs
+ i
);
793 tcg_gen_st_i32(t2
, cpu_env
, dofs
+ i
);
795 tcg_temp_free_i32(t2
);
796 tcg_temp_free_i32(t1
);
797 tcg_temp_free_i32(t0
);
800 static void expand_3i_i32(uint32_t dofs
, uint32_t aofs
, uint32_t bofs
,
801 uint32_t oprsz
, int32_t c
, bool load_dest
,
802 void (*fni
)(TCGv_i32
, TCGv_i32
, TCGv_i32
, int32_t))
804 TCGv_i32 t0
= tcg_temp_new_i32();
805 TCGv_i32 t1
= tcg_temp_new_i32();
806 TCGv_i32 t2
= tcg_temp_new_i32();
809 for (i
= 0; i
< oprsz
; i
+= 4) {
810 tcg_gen_ld_i32(t0
, cpu_env
, aofs
+ i
);
811 tcg_gen_ld_i32(t1
, cpu_env
, bofs
+ i
);
813 tcg_gen_ld_i32(t2
, cpu_env
, dofs
+ i
);
816 tcg_gen_st_i32(t2
, cpu_env
, dofs
+ i
);
818 tcg_temp_free_i32(t0
);
819 tcg_temp_free_i32(t1
);
820 tcg_temp_free_i32(t2
);
823 /* Expand OPSZ bytes worth of three-operand operations using i32 elements. */
824 static void expand_4_i32(uint32_t dofs
, uint32_t aofs
, uint32_t bofs
,
825 uint32_t cofs
, uint32_t oprsz
, bool write_aofs
,
826 void (*fni
)(TCGv_i32
, TCGv_i32
, TCGv_i32
, TCGv_i32
))
828 TCGv_i32 t0
= tcg_temp_new_i32();
829 TCGv_i32 t1
= tcg_temp_new_i32();
830 TCGv_i32 t2
= tcg_temp_new_i32();
831 TCGv_i32 t3
= tcg_temp_new_i32();
834 for (i
= 0; i
< oprsz
; i
+= 4) {
835 tcg_gen_ld_i32(t1
, cpu_env
, aofs
+ i
);
836 tcg_gen_ld_i32(t2
, cpu_env
, bofs
+ i
);
837 tcg_gen_ld_i32(t3
, cpu_env
, cofs
+ i
);
839 tcg_gen_st_i32(t0
, cpu_env
, dofs
+ i
);
841 tcg_gen_st_i32(t1
, cpu_env
, aofs
+ i
);
844 tcg_temp_free_i32(t3
);
845 tcg_temp_free_i32(t2
);
846 tcg_temp_free_i32(t1
);
847 tcg_temp_free_i32(t0
);
850 /* Expand OPSZ bytes worth of two-operand operations using i64 elements. */
851 static void expand_2_i64(uint32_t dofs
, uint32_t aofs
, uint32_t oprsz
,
852 bool load_dest
, void (*fni
)(TCGv_i64
, TCGv_i64
))
854 TCGv_i64 t0
= tcg_temp_new_i64();
855 TCGv_i64 t1
= tcg_temp_new_i64();
858 for (i
= 0; i
< oprsz
; i
+= 8) {
859 tcg_gen_ld_i64(t0
, cpu_env
, aofs
+ i
);
861 tcg_gen_ld_i64(t1
, cpu_env
, dofs
+ i
);
864 tcg_gen_st_i64(t1
, cpu_env
, dofs
+ i
);
866 tcg_temp_free_i64(t0
);
867 tcg_temp_free_i64(t1
);
870 static void expand_2i_i64(uint32_t dofs
, uint32_t aofs
, uint32_t oprsz
,
871 int64_t c
, bool load_dest
,
872 void (*fni
)(TCGv_i64
, TCGv_i64
, int64_t))
874 TCGv_i64 t0
= tcg_temp_new_i64();
875 TCGv_i64 t1
= tcg_temp_new_i64();
878 for (i
= 0; i
< oprsz
; i
+= 8) {
879 tcg_gen_ld_i64(t0
, cpu_env
, aofs
+ i
);
881 tcg_gen_ld_i64(t1
, cpu_env
, dofs
+ i
);
884 tcg_gen_st_i64(t1
, cpu_env
, dofs
+ i
);
886 tcg_temp_free_i64(t0
);
887 tcg_temp_free_i64(t1
);
890 static void expand_2s_i64(uint32_t dofs
, uint32_t aofs
, uint32_t oprsz
,
891 TCGv_i64 c
, bool scalar_first
,
892 void (*fni
)(TCGv_i64
, TCGv_i64
, TCGv_i64
))
894 TCGv_i64 t0
= tcg_temp_new_i64();
895 TCGv_i64 t1
= tcg_temp_new_i64();
898 for (i
= 0; i
< oprsz
; i
+= 8) {
899 tcg_gen_ld_i64(t0
, cpu_env
, aofs
+ i
);
905 tcg_gen_st_i64(t1
, cpu_env
, dofs
+ i
);
907 tcg_temp_free_i64(t0
);
908 tcg_temp_free_i64(t1
);
911 /* Expand OPSZ bytes worth of three-operand operations using i64 elements. */
912 static void expand_3_i64(uint32_t dofs
, uint32_t aofs
,
913 uint32_t bofs
, uint32_t oprsz
, bool load_dest
,
914 void (*fni
)(TCGv_i64
, TCGv_i64
, TCGv_i64
))
916 TCGv_i64 t0
= tcg_temp_new_i64();
917 TCGv_i64 t1
= tcg_temp_new_i64();
918 TCGv_i64 t2
= tcg_temp_new_i64();
921 for (i
= 0; i
< oprsz
; i
+= 8) {
922 tcg_gen_ld_i64(t0
, cpu_env
, aofs
+ i
);
923 tcg_gen_ld_i64(t1
, cpu_env
, bofs
+ i
);
925 tcg_gen_ld_i64(t2
, cpu_env
, dofs
+ i
);
928 tcg_gen_st_i64(t2
, cpu_env
, dofs
+ i
);
930 tcg_temp_free_i64(t2
);
931 tcg_temp_free_i64(t1
);
932 tcg_temp_free_i64(t0
);
935 static void expand_3i_i64(uint32_t dofs
, uint32_t aofs
, uint32_t bofs
,
936 uint32_t oprsz
, int64_t c
, bool load_dest
,
937 void (*fni
)(TCGv_i64
, TCGv_i64
, TCGv_i64
, int64_t))
939 TCGv_i64 t0
= tcg_temp_new_i64();
940 TCGv_i64 t1
= tcg_temp_new_i64();
941 TCGv_i64 t2
= tcg_temp_new_i64();
944 for (i
= 0; i
< oprsz
; i
+= 8) {
945 tcg_gen_ld_i64(t0
, cpu_env
, aofs
+ i
);
946 tcg_gen_ld_i64(t1
, cpu_env
, bofs
+ i
);
948 tcg_gen_ld_i64(t2
, cpu_env
, dofs
+ i
);
951 tcg_gen_st_i64(t2
, cpu_env
, dofs
+ i
);
953 tcg_temp_free_i64(t0
);
954 tcg_temp_free_i64(t1
);
955 tcg_temp_free_i64(t2
);
958 /* Expand OPSZ bytes worth of three-operand operations using i64 elements. */
959 static void expand_4_i64(uint32_t dofs
, uint32_t aofs
, uint32_t bofs
,
960 uint32_t cofs
, uint32_t oprsz
, bool write_aofs
,
961 void (*fni
)(TCGv_i64
, TCGv_i64
, TCGv_i64
, TCGv_i64
))
963 TCGv_i64 t0
= tcg_temp_new_i64();
964 TCGv_i64 t1
= tcg_temp_new_i64();
965 TCGv_i64 t2
= tcg_temp_new_i64();
966 TCGv_i64 t3
= tcg_temp_new_i64();
969 for (i
= 0; i
< oprsz
; i
+= 8) {
970 tcg_gen_ld_i64(t1
, cpu_env
, aofs
+ i
);
971 tcg_gen_ld_i64(t2
, cpu_env
, bofs
+ i
);
972 tcg_gen_ld_i64(t3
, cpu_env
, cofs
+ i
);
974 tcg_gen_st_i64(t0
, cpu_env
, dofs
+ i
);
976 tcg_gen_st_i64(t1
, cpu_env
, aofs
+ i
);
979 tcg_temp_free_i64(t3
);
980 tcg_temp_free_i64(t2
);
981 tcg_temp_free_i64(t1
);
982 tcg_temp_free_i64(t0
);
985 /* Expand OPSZ bytes worth of two-operand operations using host vectors. */
986 static void expand_2_vec(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
987 uint32_t oprsz
, uint32_t tysz
, TCGType type
,
989 void (*fni
)(unsigned, TCGv_vec
, TCGv_vec
))
991 TCGv_vec t0
= tcg_temp_new_vec(type
);
992 TCGv_vec t1
= tcg_temp_new_vec(type
);
995 for (i
= 0; i
< oprsz
; i
+= tysz
) {
996 tcg_gen_ld_vec(t0
, cpu_env
, aofs
+ i
);
998 tcg_gen_ld_vec(t1
, cpu_env
, dofs
+ i
);
1001 tcg_gen_st_vec(t1
, cpu_env
, dofs
+ i
);
1003 tcg_temp_free_vec(t0
);
1004 tcg_temp_free_vec(t1
);
1007 /* Expand OPSZ bytes worth of two-vector operands and an immediate operand
1008 using host vectors. */
1009 static void expand_2i_vec(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
1010 uint32_t oprsz
, uint32_t tysz
, TCGType type
,
1011 int64_t c
, bool load_dest
,
1012 void (*fni
)(unsigned, TCGv_vec
, TCGv_vec
, int64_t))
1014 TCGv_vec t0
= tcg_temp_new_vec(type
);
1015 TCGv_vec t1
= tcg_temp_new_vec(type
);
1018 for (i
= 0; i
< oprsz
; i
+= tysz
) {
1019 tcg_gen_ld_vec(t0
, cpu_env
, aofs
+ i
);
1021 tcg_gen_ld_vec(t1
, cpu_env
, dofs
+ i
);
1023 fni(vece
, t1
, t0
, c
);
1024 tcg_gen_st_vec(t1
, cpu_env
, dofs
+ i
);
1026 tcg_temp_free_vec(t0
);
1027 tcg_temp_free_vec(t1
);
1030 static void expand_2s_vec(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
1031 uint32_t oprsz
, uint32_t tysz
, TCGType type
,
1032 TCGv_vec c
, bool scalar_first
,
1033 void (*fni
)(unsigned, TCGv_vec
, TCGv_vec
, TCGv_vec
))
1035 TCGv_vec t0
= tcg_temp_new_vec(type
);
1036 TCGv_vec t1
= tcg_temp_new_vec(type
);
1039 for (i
= 0; i
< oprsz
; i
+= tysz
) {
1040 tcg_gen_ld_vec(t0
, cpu_env
, aofs
+ i
);
1042 fni(vece
, t1
, c
, t0
);
1044 fni(vece
, t1
, t0
, c
);
1046 tcg_gen_st_vec(t1
, cpu_env
, dofs
+ i
);
1048 tcg_temp_free_vec(t0
);
1049 tcg_temp_free_vec(t1
);
1052 /* Expand OPSZ bytes worth of three-operand operations using host vectors. */
1053 static void expand_3_vec(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
1054 uint32_t bofs
, uint32_t oprsz
,
1055 uint32_t tysz
, TCGType type
, bool load_dest
,
1056 void (*fni
)(unsigned, TCGv_vec
, TCGv_vec
, TCGv_vec
))
1058 TCGv_vec t0
= tcg_temp_new_vec(type
);
1059 TCGv_vec t1
= tcg_temp_new_vec(type
);
1060 TCGv_vec t2
= tcg_temp_new_vec(type
);
1063 for (i
= 0; i
< oprsz
; i
+= tysz
) {
1064 tcg_gen_ld_vec(t0
, cpu_env
, aofs
+ i
);
1065 tcg_gen_ld_vec(t1
, cpu_env
, bofs
+ i
);
1067 tcg_gen_ld_vec(t2
, cpu_env
, dofs
+ i
);
1069 fni(vece
, t2
, t0
, t1
);
1070 tcg_gen_st_vec(t2
, cpu_env
, dofs
+ i
);
1072 tcg_temp_free_vec(t2
);
1073 tcg_temp_free_vec(t1
);
1074 tcg_temp_free_vec(t0
);
1078 * Expand OPSZ bytes worth of three-vector operands and an immediate operand
1079 * using host vectors.
1081 static void expand_3i_vec(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
1082 uint32_t bofs
, uint32_t oprsz
, uint32_t tysz
,
1083 TCGType type
, int64_t c
, bool load_dest
,
1084 void (*fni
)(unsigned, TCGv_vec
, TCGv_vec
, TCGv_vec
,
1087 TCGv_vec t0
= tcg_temp_new_vec(type
);
1088 TCGv_vec t1
= tcg_temp_new_vec(type
);
1089 TCGv_vec t2
= tcg_temp_new_vec(type
);
1092 for (i
= 0; i
< oprsz
; i
+= tysz
) {
1093 tcg_gen_ld_vec(t0
, cpu_env
, aofs
+ i
);
1094 tcg_gen_ld_vec(t1
, cpu_env
, bofs
+ i
);
1096 tcg_gen_ld_vec(t2
, cpu_env
, dofs
+ i
);
1098 fni(vece
, t2
, t0
, t1
, c
);
1099 tcg_gen_st_vec(t2
, cpu_env
, dofs
+ i
);
1101 tcg_temp_free_vec(t0
);
1102 tcg_temp_free_vec(t1
);
1103 tcg_temp_free_vec(t2
);
1106 /* Expand OPSZ bytes worth of four-operand operations using host vectors. */
1107 static void expand_4_vec(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
1108 uint32_t bofs
, uint32_t cofs
, uint32_t oprsz
,
1109 uint32_t tysz
, TCGType type
, bool write_aofs
,
1110 void (*fni
)(unsigned, TCGv_vec
, TCGv_vec
,
1111 TCGv_vec
, TCGv_vec
))
1113 TCGv_vec t0
= tcg_temp_new_vec(type
);
1114 TCGv_vec t1
= tcg_temp_new_vec(type
);
1115 TCGv_vec t2
= tcg_temp_new_vec(type
);
1116 TCGv_vec t3
= tcg_temp_new_vec(type
);
1119 for (i
= 0; i
< oprsz
; i
+= tysz
) {
1120 tcg_gen_ld_vec(t1
, cpu_env
, aofs
+ i
);
1121 tcg_gen_ld_vec(t2
, cpu_env
, bofs
+ i
);
1122 tcg_gen_ld_vec(t3
, cpu_env
, cofs
+ i
);
1123 fni(vece
, t0
, t1
, t2
, t3
);
1124 tcg_gen_st_vec(t0
, cpu_env
, dofs
+ i
);
1126 tcg_gen_st_vec(t1
, cpu_env
, aofs
+ i
);
1129 tcg_temp_free_vec(t3
);
1130 tcg_temp_free_vec(t2
);
1131 tcg_temp_free_vec(t1
);
1132 tcg_temp_free_vec(t0
);
1135 /* Expand a vector two-operand operation. */
1136 void tcg_gen_gvec_2(uint32_t dofs
, uint32_t aofs
,
1137 uint32_t oprsz
, uint32_t maxsz
, const GVecGen2
*g
)
1139 const TCGOpcode
*this_list
= g
->opt_opc
? : vecop_list_empty
;
1140 const TCGOpcode
*hold_list
= tcg_swap_vecop_list(this_list
);
1144 check_size_align(oprsz
, maxsz
, dofs
| aofs
);
1145 check_overlap_2(dofs
, aofs
, maxsz
);
1149 type
= choose_vector_type(g
->opt_opc
, g
->vece
, oprsz
, g
->prefer_i64
);
1153 /* Recall that ARM SVE allows vector sizes that are not a
1154 * power of 2, but always a multiple of 16. The intent is
1155 * that e.g. size == 80 would be expanded with 2x32 + 1x16.
1157 some
= QEMU_ALIGN_DOWN(oprsz
, 32);
1158 expand_2_vec(g
->vece
, dofs
, aofs
, some
, 32, TCG_TYPE_V256
,
1159 g
->load_dest
, g
->fniv
);
1160 if (some
== oprsz
) {
1169 expand_2_vec(g
->vece
, dofs
, aofs
, oprsz
, 16, TCG_TYPE_V128
,
1170 g
->load_dest
, g
->fniv
);
1173 expand_2_vec(g
->vece
, dofs
, aofs
, oprsz
, 8, TCG_TYPE_V64
,
1174 g
->load_dest
, g
->fniv
);
1178 if (g
->fni8
&& check_size_impl(oprsz
, 8)) {
1179 expand_2_i64(dofs
, aofs
, oprsz
, g
->load_dest
, g
->fni8
);
1180 } else if (g
->fni4
&& check_size_impl(oprsz
, 4)) {
1181 expand_2_i32(dofs
, aofs
, oprsz
, g
->load_dest
, g
->fni4
);
1183 assert(g
->fno
!= NULL
);
1184 tcg_gen_gvec_2_ool(dofs
, aofs
, oprsz
, maxsz
, g
->data
, g
->fno
);
1190 g_assert_not_reached();
1192 tcg_swap_vecop_list(hold_list
);
1194 if (oprsz
< maxsz
) {
1195 expand_clr(dofs
+ oprsz
, maxsz
- oprsz
);
1199 /* Expand a vector operation with two vectors and an immediate. */
1200 void tcg_gen_gvec_2i(uint32_t dofs
, uint32_t aofs
, uint32_t oprsz
,
1201 uint32_t maxsz
, int64_t c
, const GVecGen2i
*g
)
1203 const TCGOpcode
*this_list
= g
->opt_opc
? : vecop_list_empty
;
1204 const TCGOpcode
*hold_list
= tcg_swap_vecop_list(this_list
);
1208 check_size_align(oprsz
, maxsz
, dofs
| aofs
);
1209 check_overlap_2(dofs
, aofs
, maxsz
);
1213 type
= choose_vector_type(g
->opt_opc
, g
->vece
, oprsz
, g
->prefer_i64
);
1217 /* Recall that ARM SVE allows vector sizes that are not a
1218 * power of 2, but always a multiple of 16. The intent is
1219 * that e.g. size == 80 would be expanded with 2x32 + 1x16.
1221 some
= QEMU_ALIGN_DOWN(oprsz
, 32);
1222 expand_2i_vec(g
->vece
, dofs
, aofs
, some
, 32, TCG_TYPE_V256
,
1223 c
, g
->load_dest
, g
->fniv
);
1224 if (some
== oprsz
) {
1233 expand_2i_vec(g
->vece
, dofs
, aofs
, oprsz
, 16, TCG_TYPE_V128
,
1234 c
, g
->load_dest
, g
->fniv
);
1237 expand_2i_vec(g
->vece
, dofs
, aofs
, oprsz
, 8, TCG_TYPE_V64
,
1238 c
, g
->load_dest
, g
->fniv
);
1242 if (g
->fni8
&& check_size_impl(oprsz
, 8)) {
1243 expand_2i_i64(dofs
, aofs
, oprsz
, c
, g
->load_dest
, g
->fni8
);
1244 } else if (g
->fni4
&& check_size_impl(oprsz
, 4)) {
1245 expand_2i_i32(dofs
, aofs
, oprsz
, c
, g
->load_dest
, g
->fni4
);
1248 tcg_gen_gvec_2_ool(dofs
, aofs
, oprsz
, maxsz
, c
, g
->fno
);
1250 TCGv_i64 tcg_c
= tcg_const_i64(c
);
1251 tcg_gen_gvec_2i_ool(dofs
, aofs
, tcg_c
, oprsz
,
1253 tcg_temp_free_i64(tcg_c
);
1260 g_assert_not_reached();
1262 tcg_swap_vecop_list(hold_list
);
1264 if (oprsz
< maxsz
) {
1265 expand_clr(dofs
+ oprsz
, maxsz
- oprsz
);
1269 /* Expand a vector operation with two vectors and a scalar. */
1270 void tcg_gen_gvec_2s(uint32_t dofs
, uint32_t aofs
, uint32_t oprsz
,
1271 uint32_t maxsz
, TCGv_i64 c
, const GVecGen2s
*g
)
1275 check_size_align(oprsz
, maxsz
, dofs
| aofs
);
1276 check_overlap_2(dofs
, aofs
, maxsz
);
1280 type
= choose_vector_type(g
->opt_opc
, g
->vece
, oprsz
, g
->prefer_i64
);
1283 const TCGOpcode
*this_list
= g
->opt_opc
? : vecop_list_empty
;
1284 const TCGOpcode
*hold_list
= tcg_swap_vecop_list(this_list
);
1285 TCGv_vec t_vec
= tcg_temp_new_vec(type
);
1288 tcg_gen_dup_i64_vec(g
->vece
, t_vec
, c
);
1292 /* Recall that ARM SVE allows vector sizes that are not a
1293 * power of 2, but always a multiple of 16. The intent is
1294 * that e.g. size == 80 would be expanded with 2x32 + 1x16.
1296 some
= QEMU_ALIGN_DOWN(oprsz
, 32);
1297 expand_2s_vec(g
->vece
, dofs
, aofs
, some
, 32, TCG_TYPE_V256
,
1298 t_vec
, g
->scalar_first
, g
->fniv
);
1299 if (some
== oprsz
) {
1309 expand_2s_vec(g
->vece
, dofs
, aofs
, oprsz
, 16, TCG_TYPE_V128
,
1310 t_vec
, g
->scalar_first
, g
->fniv
);
1314 expand_2s_vec(g
->vece
, dofs
, aofs
, oprsz
, 8, TCG_TYPE_V64
,
1315 t_vec
, g
->scalar_first
, g
->fniv
);
1319 g_assert_not_reached();
1321 tcg_temp_free_vec(t_vec
);
1322 tcg_swap_vecop_list(hold_list
);
1323 } else if (g
->fni8
&& check_size_impl(oprsz
, 8)) {
1324 TCGv_i64 t64
= tcg_temp_new_i64();
1326 gen_dup_i64(g
->vece
, t64
, c
);
1327 expand_2s_i64(dofs
, aofs
, oprsz
, t64
, g
->scalar_first
, g
->fni8
);
1328 tcg_temp_free_i64(t64
);
1329 } else if (g
->fni4
&& check_size_impl(oprsz
, 4)) {
1330 TCGv_i32 t32
= tcg_temp_new_i32();
1332 tcg_gen_extrl_i64_i32(t32
, c
);
1333 gen_dup_i32(g
->vece
, t32
, t32
);
1334 expand_2s_i32(dofs
, aofs
, oprsz
, t32
, g
->scalar_first
, g
->fni4
);
1335 tcg_temp_free_i32(t32
);
1337 tcg_gen_gvec_2i_ool(dofs
, aofs
, c
, oprsz
, maxsz
, 0, g
->fno
);
1341 if (oprsz
< maxsz
) {
1342 expand_clr(dofs
+ oprsz
, maxsz
- oprsz
);
1346 /* Expand a vector three-operand operation. */
1347 void tcg_gen_gvec_3(uint32_t dofs
, uint32_t aofs
, uint32_t bofs
,
1348 uint32_t oprsz
, uint32_t maxsz
, const GVecGen3
*g
)
1350 const TCGOpcode
*this_list
= g
->opt_opc
? : vecop_list_empty
;
1351 const TCGOpcode
*hold_list
= tcg_swap_vecop_list(this_list
);
1355 check_size_align(oprsz
, maxsz
, dofs
| aofs
| bofs
);
1356 check_overlap_3(dofs
, aofs
, bofs
, maxsz
);
1360 type
= choose_vector_type(g
->opt_opc
, g
->vece
, oprsz
, g
->prefer_i64
);
1364 /* Recall that ARM SVE allows vector sizes that are not a
1365 * power of 2, but always a multiple of 16. The intent is
1366 * that e.g. size == 80 would be expanded with 2x32 + 1x16.
1368 some
= QEMU_ALIGN_DOWN(oprsz
, 32);
1369 expand_3_vec(g
->vece
, dofs
, aofs
, bofs
, some
, 32, TCG_TYPE_V256
,
1370 g
->load_dest
, g
->fniv
);
1371 if (some
== oprsz
) {
1381 expand_3_vec(g
->vece
, dofs
, aofs
, bofs
, oprsz
, 16, TCG_TYPE_V128
,
1382 g
->load_dest
, g
->fniv
);
1385 expand_3_vec(g
->vece
, dofs
, aofs
, bofs
, oprsz
, 8, TCG_TYPE_V64
,
1386 g
->load_dest
, g
->fniv
);
1390 if (g
->fni8
&& check_size_impl(oprsz
, 8)) {
1391 expand_3_i64(dofs
, aofs
, bofs
, oprsz
, g
->load_dest
, g
->fni8
);
1392 } else if (g
->fni4
&& check_size_impl(oprsz
, 4)) {
1393 expand_3_i32(dofs
, aofs
, bofs
, oprsz
, g
->load_dest
, g
->fni4
);
1395 assert(g
->fno
!= NULL
);
1396 tcg_gen_gvec_3_ool(dofs
, aofs
, bofs
, oprsz
,
1397 maxsz
, g
->data
, g
->fno
);
1403 g_assert_not_reached();
1405 tcg_swap_vecop_list(hold_list
);
1407 if (oprsz
< maxsz
) {
1408 expand_clr(dofs
+ oprsz
, maxsz
- oprsz
);
1412 /* Expand a vector operation with three vectors and an immediate. */
1413 void tcg_gen_gvec_3i(uint32_t dofs
, uint32_t aofs
, uint32_t bofs
,
1414 uint32_t oprsz
, uint32_t maxsz
, int64_t c
,
1417 const TCGOpcode
*this_list
= g
->opt_opc
? : vecop_list_empty
;
1418 const TCGOpcode
*hold_list
= tcg_swap_vecop_list(this_list
);
1422 check_size_align(oprsz
, maxsz
, dofs
| aofs
| bofs
);
1423 check_overlap_3(dofs
, aofs
, bofs
, maxsz
);
1427 type
= choose_vector_type(g
->opt_opc
, g
->vece
, oprsz
, g
->prefer_i64
);
1432 * Recall that ARM SVE allows vector sizes that are not a
1433 * power of 2, but always a multiple of 16. The intent is
1434 * that e.g. size == 80 would be expanded with 2x32 + 1x16.
1436 some
= QEMU_ALIGN_DOWN(oprsz
, 32);
1437 expand_3i_vec(g
->vece
, dofs
, aofs
, bofs
, some
, 32, TCG_TYPE_V256
,
1438 c
, g
->load_dest
, g
->fniv
);
1439 if (some
== oprsz
) {
1449 expand_3i_vec(g
->vece
, dofs
, aofs
, bofs
, oprsz
, 16, TCG_TYPE_V128
,
1450 c
, g
->load_dest
, g
->fniv
);
1453 expand_3i_vec(g
->vece
, dofs
, aofs
, bofs
, oprsz
, 8, TCG_TYPE_V64
,
1454 c
, g
->load_dest
, g
->fniv
);
1458 if (g
->fni8
&& check_size_impl(oprsz
, 8)) {
1459 expand_3i_i64(dofs
, aofs
, bofs
, oprsz
, c
, g
->load_dest
, g
->fni8
);
1460 } else if (g
->fni4
&& check_size_impl(oprsz
, 4)) {
1461 expand_3i_i32(dofs
, aofs
, bofs
, oprsz
, c
, g
->load_dest
, g
->fni4
);
1463 assert(g
->fno
!= NULL
);
1464 tcg_gen_gvec_3_ool(dofs
, aofs
, bofs
, oprsz
, maxsz
, c
, g
->fno
);
1470 g_assert_not_reached();
1472 tcg_swap_vecop_list(hold_list
);
1474 if (oprsz
< maxsz
) {
1475 expand_clr(dofs
+ oprsz
, maxsz
- oprsz
);
1479 /* Expand a vector four-operand operation. */
1480 void tcg_gen_gvec_4(uint32_t dofs
, uint32_t aofs
, uint32_t bofs
, uint32_t cofs
,
1481 uint32_t oprsz
, uint32_t maxsz
, const GVecGen4
*g
)
1483 const TCGOpcode
*this_list
= g
->opt_opc
? : vecop_list_empty
;
1484 const TCGOpcode
*hold_list
= tcg_swap_vecop_list(this_list
);
1488 check_size_align(oprsz
, maxsz
, dofs
| aofs
| bofs
| cofs
);
1489 check_overlap_4(dofs
, aofs
, bofs
, cofs
, maxsz
);
1493 type
= choose_vector_type(g
->opt_opc
, g
->vece
, oprsz
, g
->prefer_i64
);
1497 /* Recall that ARM SVE allows vector sizes that are not a
1498 * power of 2, but always a multiple of 16. The intent is
1499 * that e.g. size == 80 would be expanded with 2x32 + 1x16.
1501 some
= QEMU_ALIGN_DOWN(oprsz
, 32);
1502 expand_4_vec(g
->vece
, dofs
, aofs
, bofs
, cofs
, some
,
1503 32, TCG_TYPE_V256
, g
->write_aofs
, g
->fniv
);
1504 if (some
== oprsz
) {
1515 expand_4_vec(g
->vece
, dofs
, aofs
, bofs
, cofs
, oprsz
,
1516 16, TCG_TYPE_V128
, g
->write_aofs
, g
->fniv
);
1519 expand_4_vec(g
->vece
, dofs
, aofs
, bofs
, cofs
, oprsz
,
1520 8, TCG_TYPE_V64
, g
->write_aofs
, g
->fniv
);
1524 if (g
->fni8
&& check_size_impl(oprsz
, 8)) {
1525 expand_4_i64(dofs
, aofs
, bofs
, cofs
, oprsz
,
1526 g
->write_aofs
, g
->fni8
);
1527 } else if (g
->fni4
&& check_size_impl(oprsz
, 4)) {
1528 expand_4_i32(dofs
, aofs
, bofs
, cofs
, oprsz
,
1529 g
->write_aofs
, g
->fni4
);
1531 assert(g
->fno
!= NULL
);
1532 tcg_gen_gvec_4_ool(dofs
, aofs
, bofs
, cofs
,
1533 oprsz
, maxsz
, g
->data
, g
->fno
);
1539 g_assert_not_reached();
1541 tcg_swap_vecop_list(hold_list
);
1543 if (oprsz
< maxsz
) {
1544 expand_clr(dofs
+ oprsz
, maxsz
- oprsz
);
1549 * Expand specific vector operations.
1552 static void vec_mov2(unsigned vece
, TCGv_vec a
, TCGv_vec b
)
1554 tcg_gen_mov_vec(a
, b
);
1557 void tcg_gen_gvec_mov(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
1558 uint32_t oprsz
, uint32_t maxsz
)
1560 static const GVecGen2 g
= {
1561 .fni8
= tcg_gen_mov_i64
,
1563 .fno
= gen_helper_gvec_mov
,
1564 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
1567 tcg_gen_gvec_2(dofs
, aofs
, oprsz
, maxsz
, &g
);
1569 check_size_align(oprsz
, maxsz
, dofs
);
1570 if (oprsz
< maxsz
) {
1571 expand_clr(dofs
+ oprsz
, maxsz
- oprsz
);
1576 void tcg_gen_gvec_dup_i32(unsigned vece
, uint32_t dofs
, uint32_t oprsz
,
1577 uint32_t maxsz
, TCGv_i32 in
)
1579 check_size_align(oprsz
, maxsz
, dofs
);
1580 tcg_debug_assert(vece
<= MO_32
);
1581 do_dup(vece
, dofs
, oprsz
, maxsz
, in
, NULL
, 0);
1584 void tcg_gen_gvec_dup_i64(unsigned vece
, uint32_t dofs
, uint32_t oprsz
,
1585 uint32_t maxsz
, TCGv_i64 in
)
1587 check_size_align(oprsz
, maxsz
, dofs
);
1588 tcg_debug_assert(vece
<= MO_64
);
1589 do_dup(vece
, dofs
, oprsz
, maxsz
, NULL
, in
, 0);
1592 void tcg_gen_gvec_dup_mem(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
1593 uint32_t oprsz
, uint32_t maxsz
)
1595 check_size_align(oprsz
, maxsz
, dofs
);
1596 if (vece
<= MO_64
) {
1597 TCGType type
= choose_vector_type(NULL
, vece
, oprsz
, 0);
1599 TCGv_vec t_vec
= tcg_temp_new_vec(type
);
1600 tcg_gen_dup_mem_vec(vece
, t_vec
, cpu_env
, aofs
);
1601 do_dup_store(type
, dofs
, oprsz
, maxsz
, t_vec
);
1602 tcg_temp_free_vec(t_vec
);
1603 } else if (vece
<= MO_32
) {
1604 TCGv_i32 in
= tcg_temp_new_i32();
1607 tcg_gen_ld8u_i32(in
, cpu_env
, aofs
);
1610 tcg_gen_ld16u_i32(in
, cpu_env
, aofs
);
1613 tcg_gen_ld_i32(in
, cpu_env
, aofs
);
1616 do_dup(vece
, dofs
, oprsz
, maxsz
, in
, NULL
, 0);
1617 tcg_temp_free_i32(in
);
1619 TCGv_i64 in
= tcg_temp_new_i64();
1620 tcg_gen_ld_i64(in
, cpu_env
, aofs
);
1621 do_dup(vece
, dofs
, oprsz
, maxsz
, NULL
, in
, 0);
1622 tcg_temp_free_i64(in
);
1624 } else if (vece
== 4) {
1625 /* 128-bit duplicate. */
1628 tcg_debug_assert(oprsz
>= 16);
1629 if (TCG_TARGET_HAS_v128
) {
1630 TCGv_vec in
= tcg_temp_new_vec(TCG_TYPE_V128
);
1632 tcg_gen_ld_vec(in
, cpu_env
, aofs
);
1633 for (i
= (aofs
== dofs
) * 16; i
< oprsz
; i
+= 16) {
1634 tcg_gen_st_vec(in
, cpu_env
, dofs
+ i
);
1636 tcg_temp_free_vec(in
);
1638 TCGv_i64 in0
= tcg_temp_new_i64();
1639 TCGv_i64 in1
= tcg_temp_new_i64();
1641 tcg_gen_ld_i64(in0
, cpu_env
, aofs
);
1642 tcg_gen_ld_i64(in1
, cpu_env
, aofs
+ 8);
1643 for (i
= (aofs
== dofs
) * 16; i
< oprsz
; i
+= 16) {
1644 tcg_gen_st_i64(in0
, cpu_env
, dofs
+ i
);
1645 tcg_gen_st_i64(in1
, cpu_env
, dofs
+ i
+ 8);
1647 tcg_temp_free_i64(in0
);
1648 tcg_temp_free_i64(in1
);
1650 if (oprsz
< maxsz
) {
1651 expand_clr(dofs
+ oprsz
, maxsz
- oprsz
);
1653 } else if (vece
== 5) {
1654 /* 256-bit duplicate. */
1657 tcg_debug_assert(oprsz
>= 32);
1658 tcg_debug_assert(oprsz
% 32 == 0);
1659 if (TCG_TARGET_HAS_v256
) {
1660 TCGv_vec in
= tcg_temp_new_vec(TCG_TYPE_V256
);
1662 tcg_gen_ld_vec(in
, cpu_env
, aofs
);
1663 for (i
= (aofs
== dofs
) * 32; i
< oprsz
; i
+= 32) {
1664 tcg_gen_st_vec(in
, cpu_env
, dofs
+ i
);
1666 tcg_temp_free_vec(in
);
1667 } else if (TCG_TARGET_HAS_v128
) {
1668 TCGv_vec in0
= tcg_temp_new_vec(TCG_TYPE_V128
);
1669 TCGv_vec in1
= tcg_temp_new_vec(TCG_TYPE_V128
);
1671 tcg_gen_ld_vec(in0
, cpu_env
, aofs
);
1672 tcg_gen_ld_vec(in1
, cpu_env
, aofs
+ 16);
1673 for (i
= (aofs
== dofs
) * 32; i
< oprsz
; i
+= 32) {
1674 tcg_gen_st_vec(in0
, cpu_env
, dofs
+ i
);
1675 tcg_gen_st_vec(in1
, cpu_env
, dofs
+ i
+ 16);
1677 tcg_temp_free_vec(in0
);
1678 tcg_temp_free_vec(in1
);
1683 for (j
= 0; j
< 4; ++j
) {
1684 in
[j
] = tcg_temp_new_i64();
1685 tcg_gen_ld_i64(in
[j
], cpu_env
, aofs
+ j
* 8);
1687 for (i
= (aofs
== dofs
) * 32; i
< oprsz
; i
+= 32) {
1688 for (j
= 0; j
< 4; ++j
) {
1689 tcg_gen_st_i64(in
[j
], cpu_env
, dofs
+ i
+ j
* 8);
1692 for (j
= 0; j
< 4; ++j
) {
1693 tcg_temp_free_i64(in
[j
]);
1696 if (oprsz
< maxsz
) {
1697 expand_clr(dofs
+ oprsz
, maxsz
- oprsz
);
1700 g_assert_not_reached();
1704 void tcg_gen_gvec_dup_imm(unsigned vece
, uint32_t dofs
, uint32_t oprsz
,
1705 uint32_t maxsz
, uint64_t x
)
1707 check_size_align(oprsz
, maxsz
, dofs
);
1708 do_dup(vece
, dofs
, oprsz
, maxsz
, NULL
, NULL
, x
);
1711 void tcg_gen_gvec_not(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
1712 uint32_t oprsz
, uint32_t maxsz
)
1714 static const GVecGen2 g
= {
1715 .fni8
= tcg_gen_not_i64
,
1716 .fniv
= tcg_gen_not_vec
,
1717 .fno
= gen_helper_gvec_not
,
1718 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
1720 tcg_gen_gvec_2(dofs
, aofs
, oprsz
, maxsz
, &g
);
1723 /* Perform a vector addition using normal addition and a mask. The mask
1724 should be the sign bit of each lane. This 6-operation form is more
1725 efficient than separate additions when there are 4 or more lanes in
1726 the 64-bit operation. */
1727 static void gen_addv_mask(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
, TCGv_i64 m
)
1729 TCGv_i64 t1
= tcg_temp_new_i64();
1730 TCGv_i64 t2
= tcg_temp_new_i64();
1731 TCGv_i64 t3
= tcg_temp_new_i64();
1733 tcg_gen_andc_i64(t1
, a
, m
);
1734 tcg_gen_andc_i64(t2
, b
, m
);
1735 tcg_gen_xor_i64(t3
, a
, b
);
1736 tcg_gen_add_i64(d
, t1
, t2
);
1737 tcg_gen_and_i64(t3
, t3
, m
);
1738 tcg_gen_xor_i64(d
, d
, t3
);
1740 tcg_temp_free_i64(t1
);
1741 tcg_temp_free_i64(t2
);
1742 tcg_temp_free_i64(t3
);
1745 void tcg_gen_vec_add8_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
1747 TCGv_i64 m
= tcg_const_i64(dup_const(MO_8
, 0x80));
1748 gen_addv_mask(d
, a
, b
, m
);
1749 tcg_temp_free_i64(m
);
1752 void tcg_gen_vec_add16_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
1754 TCGv_i64 m
= tcg_const_i64(dup_const(MO_16
, 0x8000));
1755 gen_addv_mask(d
, a
, b
, m
);
1756 tcg_temp_free_i64(m
);
1759 void tcg_gen_vec_add32_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
1761 TCGv_i64 t1
= tcg_temp_new_i64();
1762 TCGv_i64 t2
= tcg_temp_new_i64();
1764 tcg_gen_andi_i64(t1
, a
, ~0xffffffffull
);
1765 tcg_gen_add_i64(t2
, a
, b
);
1766 tcg_gen_add_i64(t1
, t1
, b
);
1767 tcg_gen_deposit_i64(d
, t1
, t2
, 0, 32);
1769 tcg_temp_free_i64(t1
);
1770 tcg_temp_free_i64(t2
);
1773 static const TCGOpcode vecop_list_add
[] = { INDEX_op_add_vec
, 0 };
1775 void tcg_gen_gvec_add(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
1776 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
1778 static const GVecGen3 g
[4] = {
1779 { .fni8
= tcg_gen_vec_add8_i64
,
1780 .fniv
= tcg_gen_add_vec
,
1781 .fno
= gen_helper_gvec_add8
,
1782 .opt_opc
= vecop_list_add
,
1784 { .fni8
= tcg_gen_vec_add16_i64
,
1785 .fniv
= tcg_gen_add_vec
,
1786 .fno
= gen_helper_gvec_add16
,
1787 .opt_opc
= vecop_list_add
,
1789 { .fni4
= tcg_gen_add_i32
,
1790 .fniv
= tcg_gen_add_vec
,
1791 .fno
= gen_helper_gvec_add32
,
1792 .opt_opc
= vecop_list_add
,
1794 { .fni8
= tcg_gen_add_i64
,
1795 .fniv
= tcg_gen_add_vec
,
1796 .fno
= gen_helper_gvec_add64
,
1797 .opt_opc
= vecop_list_add
,
1798 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
1802 tcg_debug_assert(vece
<= MO_64
);
1803 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
[vece
]);
1806 void tcg_gen_gvec_adds(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
1807 TCGv_i64 c
, uint32_t oprsz
, uint32_t maxsz
)
1809 static const GVecGen2s g
[4] = {
1810 { .fni8
= tcg_gen_vec_add8_i64
,
1811 .fniv
= tcg_gen_add_vec
,
1812 .fno
= gen_helper_gvec_adds8
,
1813 .opt_opc
= vecop_list_add
,
1815 { .fni8
= tcg_gen_vec_add16_i64
,
1816 .fniv
= tcg_gen_add_vec
,
1817 .fno
= gen_helper_gvec_adds16
,
1818 .opt_opc
= vecop_list_add
,
1820 { .fni4
= tcg_gen_add_i32
,
1821 .fniv
= tcg_gen_add_vec
,
1822 .fno
= gen_helper_gvec_adds32
,
1823 .opt_opc
= vecop_list_add
,
1825 { .fni8
= tcg_gen_add_i64
,
1826 .fniv
= tcg_gen_add_vec
,
1827 .fno
= gen_helper_gvec_adds64
,
1828 .opt_opc
= vecop_list_add
,
1829 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
1833 tcg_debug_assert(vece
<= MO_64
);
1834 tcg_gen_gvec_2s(dofs
, aofs
, oprsz
, maxsz
, c
, &g
[vece
]);
1837 void tcg_gen_gvec_addi(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
1838 int64_t c
, uint32_t oprsz
, uint32_t maxsz
)
1840 TCGv_i64 tmp
= tcg_const_i64(c
);
1841 tcg_gen_gvec_adds(vece
, dofs
, aofs
, tmp
, oprsz
, maxsz
);
1842 tcg_temp_free_i64(tmp
);
1845 static const TCGOpcode vecop_list_sub
[] = { INDEX_op_sub_vec
, 0 };
1847 void tcg_gen_gvec_subs(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
1848 TCGv_i64 c
, uint32_t oprsz
, uint32_t maxsz
)
1850 static const GVecGen2s g
[4] = {
1851 { .fni8
= tcg_gen_vec_sub8_i64
,
1852 .fniv
= tcg_gen_sub_vec
,
1853 .fno
= gen_helper_gvec_subs8
,
1854 .opt_opc
= vecop_list_sub
,
1856 { .fni8
= tcg_gen_vec_sub16_i64
,
1857 .fniv
= tcg_gen_sub_vec
,
1858 .fno
= gen_helper_gvec_subs16
,
1859 .opt_opc
= vecop_list_sub
,
1861 { .fni4
= tcg_gen_sub_i32
,
1862 .fniv
= tcg_gen_sub_vec
,
1863 .fno
= gen_helper_gvec_subs32
,
1864 .opt_opc
= vecop_list_sub
,
1866 { .fni8
= tcg_gen_sub_i64
,
1867 .fniv
= tcg_gen_sub_vec
,
1868 .fno
= gen_helper_gvec_subs64
,
1869 .opt_opc
= vecop_list_sub
,
1870 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
1874 tcg_debug_assert(vece
<= MO_64
);
1875 tcg_gen_gvec_2s(dofs
, aofs
, oprsz
, maxsz
, c
, &g
[vece
]);
1878 /* Perform a vector subtraction using normal subtraction and a mask.
1879 Compare gen_addv_mask above. */
1880 static void gen_subv_mask(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
, TCGv_i64 m
)
1882 TCGv_i64 t1
= tcg_temp_new_i64();
1883 TCGv_i64 t2
= tcg_temp_new_i64();
1884 TCGv_i64 t3
= tcg_temp_new_i64();
1886 tcg_gen_or_i64(t1
, a
, m
);
1887 tcg_gen_andc_i64(t2
, b
, m
);
1888 tcg_gen_eqv_i64(t3
, a
, b
);
1889 tcg_gen_sub_i64(d
, t1
, t2
);
1890 tcg_gen_and_i64(t3
, t3
, m
);
1891 tcg_gen_xor_i64(d
, d
, t3
);
1893 tcg_temp_free_i64(t1
);
1894 tcg_temp_free_i64(t2
);
1895 tcg_temp_free_i64(t3
);
1898 void tcg_gen_vec_sub8_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
1900 TCGv_i64 m
= tcg_const_i64(dup_const(MO_8
, 0x80));
1901 gen_subv_mask(d
, a
, b
, m
);
1902 tcg_temp_free_i64(m
);
1905 void tcg_gen_vec_sub16_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
1907 TCGv_i64 m
= tcg_const_i64(dup_const(MO_16
, 0x8000));
1908 gen_subv_mask(d
, a
, b
, m
);
1909 tcg_temp_free_i64(m
);
1912 void tcg_gen_vec_sub32_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
1914 TCGv_i64 t1
= tcg_temp_new_i64();
1915 TCGv_i64 t2
= tcg_temp_new_i64();
1917 tcg_gen_andi_i64(t1
, b
, ~0xffffffffull
);
1918 tcg_gen_sub_i64(t2
, a
, b
);
1919 tcg_gen_sub_i64(t1
, a
, t1
);
1920 tcg_gen_deposit_i64(d
, t1
, t2
, 0, 32);
1922 tcg_temp_free_i64(t1
);
1923 tcg_temp_free_i64(t2
);
1926 void tcg_gen_gvec_sub(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
1927 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
1929 static const GVecGen3 g
[4] = {
1930 { .fni8
= tcg_gen_vec_sub8_i64
,
1931 .fniv
= tcg_gen_sub_vec
,
1932 .fno
= gen_helper_gvec_sub8
,
1933 .opt_opc
= vecop_list_sub
,
1935 { .fni8
= tcg_gen_vec_sub16_i64
,
1936 .fniv
= tcg_gen_sub_vec
,
1937 .fno
= gen_helper_gvec_sub16
,
1938 .opt_opc
= vecop_list_sub
,
1940 { .fni4
= tcg_gen_sub_i32
,
1941 .fniv
= tcg_gen_sub_vec
,
1942 .fno
= gen_helper_gvec_sub32
,
1943 .opt_opc
= vecop_list_sub
,
1945 { .fni8
= tcg_gen_sub_i64
,
1946 .fniv
= tcg_gen_sub_vec
,
1947 .fno
= gen_helper_gvec_sub64
,
1948 .opt_opc
= vecop_list_sub
,
1949 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
1953 tcg_debug_assert(vece
<= MO_64
);
1954 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
[vece
]);
1957 static const TCGOpcode vecop_list_mul
[] = { INDEX_op_mul_vec
, 0 };
1959 void tcg_gen_gvec_mul(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
1960 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
1962 static const GVecGen3 g
[4] = {
1963 { .fniv
= tcg_gen_mul_vec
,
1964 .fno
= gen_helper_gvec_mul8
,
1965 .opt_opc
= vecop_list_mul
,
1967 { .fniv
= tcg_gen_mul_vec
,
1968 .fno
= gen_helper_gvec_mul16
,
1969 .opt_opc
= vecop_list_mul
,
1971 { .fni4
= tcg_gen_mul_i32
,
1972 .fniv
= tcg_gen_mul_vec
,
1973 .fno
= gen_helper_gvec_mul32
,
1974 .opt_opc
= vecop_list_mul
,
1976 { .fni8
= tcg_gen_mul_i64
,
1977 .fniv
= tcg_gen_mul_vec
,
1978 .fno
= gen_helper_gvec_mul64
,
1979 .opt_opc
= vecop_list_mul
,
1980 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
1984 tcg_debug_assert(vece
<= MO_64
);
1985 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
[vece
]);
1988 void tcg_gen_gvec_muls(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
1989 TCGv_i64 c
, uint32_t oprsz
, uint32_t maxsz
)
1991 static const GVecGen2s g
[4] = {
1992 { .fniv
= tcg_gen_mul_vec
,
1993 .fno
= gen_helper_gvec_muls8
,
1994 .opt_opc
= vecop_list_mul
,
1996 { .fniv
= tcg_gen_mul_vec
,
1997 .fno
= gen_helper_gvec_muls16
,
1998 .opt_opc
= vecop_list_mul
,
2000 { .fni4
= tcg_gen_mul_i32
,
2001 .fniv
= tcg_gen_mul_vec
,
2002 .fno
= gen_helper_gvec_muls32
,
2003 .opt_opc
= vecop_list_mul
,
2005 { .fni8
= tcg_gen_mul_i64
,
2006 .fniv
= tcg_gen_mul_vec
,
2007 .fno
= gen_helper_gvec_muls64
,
2008 .opt_opc
= vecop_list_mul
,
2009 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
2013 tcg_debug_assert(vece
<= MO_64
);
2014 tcg_gen_gvec_2s(dofs
, aofs
, oprsz
, maxsz
, c
, &g
[vece
]);
2017 void tcg_gen_gvec_muli(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2018 int64_t c
, uint32_t oprsz
, uint32_t maxsz
)
2020 TCGv_i64 tmp
= tcg_const_i64(c
);
2021 tcg_gen_gvec_muls(vece
, dofs
, aofs
, tmp
, oprsz
, maxsz
);
2022 tcg_temp_free_i64(tmp
);
2025 void tcg_gen_gvec_ssadd(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2026 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
2028 static const TCGOpcode vecop_list
[] = { INDEX_op_ssadd_vec
, 0 };
2029 static const GVecGen3 g
[4] = {
2030 { .fniv
= tcg_gen_ssadd_vec
,
2031 .fno
= gen_helper_gvec_ssadd8
,
2032 .opt_opc
= vecop_list
,
2034 { .fniv
= tcg_gen_ssadd_vec
,
2035 .fno
= gen_helper_gvec_ssadd16
,
2036 .opt_opc
= vecop_list
,
2038 { .fniv
= tcg_gen_ssadd_vec
,
2039 .fno
= gen_helper_gvec_ssadd32
,
2040 .opt_opc
= vecop_list
,
2042 { .fniv
= tcg_gen_ssadd_vec
,
2043 .fno
= gen_helper_gvec_ssadd64
,
2044 .opt_opc
= vecop_list
,
2047 tcg_debug_assert(vece
<= MO_64
);
2048 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
[vece
]);
2051 void tcg_gen_gvec_sssub(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2052 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
2054 static const TCGOpcode vecop_list
[] = { INDEX_op_sssub_vec
, 0 };
2055 static const GVecGen3 g
[4] = {
2056 { .fniv
= tcg_gen_sssub_vec
,
2057 .fno
= gen_helper_gvec_sssub8
,
2058 .opt_opc
= vecop_list
,
2060 { .fniv
= tcg_gen_sssub_vec
,
2061 .fno
= gen_helper_gvec_sssub16
,
2062 .opt_opc
= vecop_list
,
2064 { .fniv
= tcg_gen_sssub_vec
,
2065 .fno
= gen_helper_gvec_sssub32
,
2066 .opt_opc
= vecop_list
,
2068 { .fniv
= tcg_gen_sssub_vec
,
2069 .fno
= gen_helper_gvec_sssub64
,
2070 .opt_opc
= vecop_list
,
2073 tcg_debug_assert(vece
<= MO_64
);
2074 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
[vece
]);
2077 static void tcg_gen_usadd_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
2079 TCGv_i32 max
= tcg_const_i32(-1);
2080 tcg_gen_add_i32(d
, a
, b
);
2081 tcg_gen_movcond_i32(TCG_COND_LTU
, d
, d
, a
, max
, d
);
2082 tcg_temp_free_i32(max
);
2085 static void tcg_gen_usadd_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
2087 TCGv_i64 max
= tcg_const_i64(-1);
2088 tcg_gen_add_i64(d
, a
, b
);
2089 tcg_gen_movcond_i64(TCG_COND_LTU
, d
, d
, a
, max
, d
);
2090 tcg_temp_free_i64(max
);
2093 void tcg_gen_gvec_usadd(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2094 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
2096 static const TCGOpcode vecop_list
[] = { INDEX_op_usadd_vec
, 0 };
2097 static const GVecGen3 g
[4] = {
2098 { .fniv
= tcg_gen_usadd_vec
,
2099 .fno
= gen_helper_gvec_usadd8
,
2100 .opt_opc
= vecop_list
,
2102 { .fniv
= tcg_gen_usadd_vec
,
2103 .fno
= gen_helper_gvec_usadd16
,
2104 .opt_opc
= vecop_list
,
2106 { .fni4
= tcg_gen_usadd_i32
,
2107 .fniv
= tcg_gen_usadd_vec
,
2108 .fno
= gen_helper_gvec_usadd32
,
2109 .opt_opc
= vecop_list
,
2111 { .fni8
= tcg_gen_usadd_i64
,
2112 .fniv
= tcg_gen_usadd_vec
,
2113 .fno
= gen_helper_gvec_usadd64
,
2114 .opt_opc
= vecop_list
,
2117 tcg_debug_assert(vece
<= MO_64
);
2118 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
[vece
]);
2121 static void tcg_gen_ussub_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
2123 TCGv_i32 min
= tcg_const_i32(0);
2124 tcg_gen_sub_i32(d
, a
, b
);
2125 tcg_gen_movcond_i32(TCG_COND_LTU
, d
, a
, b
, min
, d
);
2126 tcg_temp_free_i32(min
);
2129 static void tcg_gen_ussub_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
2131 TCGv_i64 min
= tcg_const_i64(0);
2132 tcg_gen_sub_i64(d
, a
, b
);
2133 tcg_gen_movcond_i64(TCG_COND_LTU
, d
, a
, b
, min
, d
);
2134 tcg_temp_free_i64(min
);
2137 void tcg_gen_gvec_ussub(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2138 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
2140 static const TCGOpcode vecop_list
[] = { INDEX_op_ussub_vec
, 0 };
2141 static const GVecGen3 g
[4] = {
2142 { .fniv
= tcg_gen_ussub_vec
,
2143 .fno
= gen_helper_gvec_ussub8
,
2144 .opt_opc
= vecop_list
,
2146 { .fniv
= tcg_gen_ussub_vec
,
2147 .fno
= gen_helper_gvec_ussub16
,
2148 .opt_opc
= vecop_list
,
2150 { .fni4
= tcg_gen_ussub_i32
,
2151 .fniv
= tcg_gen_ussub_vec
,
2152 .fno
= gen_helper_gvec_ussub32
,
2153 .opt_opc
= vecop_list
,
2155 { .fni8
= tcg_gen_ussub_i64
,
2156 .fniv
= tcg_gen_ussub_vec
,
2157 .fno
= gen_helper_gvec_ussub64
,
2158 .opt_opc
= vecop_list
,
2161 tcg_debug_assert(vece
<= MO_64
);
2162 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
[vece
]);
2165 void tcg_gen_gvec_smin(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2166 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
2168 static const TCGOpcode vecop_list
[] = { INDEX_op_smin_vec
, 0 };
2169 static const GVecGen3 g
[4] = {
2170 { .fniv
= tcg_gen_smin_vec
,
2171 .fno
= gen_helper_gvec_smin8
,
2172 .opt_opc
= vecop_list
,
2174 { .fniv
= tcg_gen_smin_vec
,
2175 .fno
= gen_helper_gvec_smin16
,
2176 .opt_opc
= vecop_list
,
2178 { .fni4
= tcg_gen_smin_i32
,
2179 .fniv
= tcg_gen_smin_vec
,
2180 .fno
= gen_helper_gvec_smin32
,
2181 .opt_opc
= vecop_list
,
2183 { .fni8
= tcg_gen_smin_i64
,
2184 .fniv
= tcg_gen_smin_vec
,
2185 .fno
= gen_helper_gvec_smin64
,
2186 .opt_opc
= vecop_list
,
2189 tcg_debug_assert(vece
<= MO_64
);
2190 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
[vece
]);
2193 void tcg_gen_gvec_umin(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2194 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
2196 static const TCGOpcode vecop_list
[] = { INDEX_op_umin_vec
, 0 };
2197 static const GVecGen3 g
[4] = {
2198 { .fniv
= tcg_gen_umin_vec
,
2199 .fno
= gen_helper_gvec_umin8
,
2200 .opt_opc
= vecop_list
,
2202 { .fniv
= tcg_gen_umin_vec
,
2203 .fno
= gen_helper_gvec_umin16
,
2204 .opt_opc
= vecop_list
,
2206 { .fni4
= tcg_gen_umin_i32
,
2207 .fniv
= tcg_gen_umin_vec
,
2208 .fno
= gen_helper_gvec_umin32
,
2209 .opt_opc
= vecop_list
,
2211 { .fni8
= tcg_gen_umin_i64
,
2212 .fniv
= tcg_gen_umin_vec
,
2213 .fno
= gen_helper_gvec_umin64
,
2214 .opt_opc
= vecop_list
,
2217 tcg_debug_assert(vece
<= MO_64
);
2218 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
[vece
]);
2221 void tcg_gen_gvec_smax(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2222 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
2224 static const TCGOpcode vecop_list
[] = { INDEX_op_smax_vec
, 0 };
2225 static const GVecGen3 g
[4] = {
2226 { .fniv
= tcg_gen_smax_vec
,
2227 .fno
= gen_helper_gvec_smax8
,
2228 .opt_opc
= vecop_list
,
2230 { .fniv
= tcg_gen_smax_vec
,
2231 .fno
= gen_helper_gvec_smax16
,
2232 .opt_opc
= vecop_list
,
2234 { .fni4
= tcg_gen_smax_i32
,
2235 .fniv
= tcg_gen_smax_vec
,
2236 .fno
= gen_helper_gvec_smax32
,
2237 .opt_opc
= vecop_list
,
2239 { .fni8
= tcg_gen_smax_i64
,
2240 .fniv
= tcg_gen_smax_vec
,
2241 .fno
= gen_helper_gvec_smax64
,
2242 .opt_opc
= vecop_list
,
2245 tcg_debug_assert(vece
<= MO_64
);
2246 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
[vece
]);
2249 void tcg_gen_gvec_umax(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2250 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
2252 static const TCGOpcode vecop_list
[] = { INDEX_op_umax_vec
, 0 };
2253 static const GVecGen3 g
[4] = {
2254 { .fniv
= tcg_gen_umax_vec
,
2255 .fno
= gen_helper_gvec_umax8
,
2256 .opt_opc
= vecop_list
,
2258 { .fniv
= tcg_gen_umax_vec
,
2259 .fno
= gen_helper_gvec_umax16
,
2260 .opt_opc
= vecop_list
,
2262 { .fni4
= tcg_gen_umax_i32
,
2263 .fniv
= tcg_gen_umax_vec
,
2264 .fno
= gen_helper_gvec_umax32
,
2265 .opt_opc
= vecop_list
,
2267 { .fni8
= tcg_gen_umax_i64
,
2268 .fniv
= tcg_gen_umax_vec
,
2269 .fno
= gen_helper_gvec_umax64
,
2270 .opt_opc
= vecop_list
,
2273 tcg_debug_assert(vece
<= MO_64
);
2274 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
[vece
]);
2277 /* Perform a vector negation using normal negation and a mask.
2278 Compare gen_subv_mask above. */
2279 static void gen_negv_mask(TCGv_i64 d
, TCGv_i64 b
, TCGv_i64 m
)
2281 TCGv_i64 t2
= tcg_temp_new_i64();
2282 TCGv_i64 t3
= tcg_temp_new_i64();
2284 tcg_gen_andc_i64(t3
, m
, b
);
2285 tcg_gen_andc_i64(t2
, b
, m
);
2286 tcg_gen_sub_i64(d
, m
, t2
);
2287 tcg_gen_xor_i64(d
, d
, t3
);
2289 tcg_temp_free_i64(t2
);
2290 tcg_temp_free_i64(t3
);
2293 void tcg_gen_vec_neg8_i64(TCGv_i64 d
, TCGv_i64 b
)
2295 TCGv_i64 m
= tcg_const_i64(dup_const(MO_8
, 0x80));
2296 gen_negv_mask(d
, b
, m
);
2297 tcg_temp_free_i64(m
);
2300 void tcg_gen_vec_neg16_i64(TCGv_i64 d
, TCGv_i64 b
)
2302 TCGv_i64 m
= tcg_const_i64(dup_const(MO_16
, 0x8000));
2303 gen_negv_mask(d
, b
, m
);
2304 tcg_temp_free_i64(m
);
2307 void tcg_gen_vec_neg32_i64(TCGv_i64 d
, TCGv_i64 b
)
2309 TCGv_i64 t1
= tcg_temp_new_i64();
2310 TCGv_i64 t2
= tcg_temp_new_i64();
2312 tcg_gen_andi_i64(t1
, b
, ~0xffffffffull
);
2313 tcg_gen_neg_i64(t2
, b
);
2314 tcg_gen_neg_i64(t1
, t1
);
2315 tcg_gen_deposit_i64(d
, t1
, t2
, 0, 32);
2317 tcg_temp_free_i64(t1
);
2318 tcg_temp_free_i64(t2
);
2321 void tcg_gen_gvec_neg(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2322 uint32_t oprsz
, uint32_t maxsz
)
2324 static const TCGOpcode vecop_list
[] = { INDEX_op_neg_vec
, 0 };
2325 static const GVecGen2 g
[4] = {
2326 { .fni8
= tcg_gen_vec_neg8_i64
,
2327 .fniv
= tcg_gen_neg_vec
,
2328 .fno
= gen_helper_gvec_neg8
,
2329 .opt_opc
= vecop_list
,
2331 { .fni8
= tcg_gen_vec_neg16_i64
,
2332 .fniv
= tcg_gen_neg_vec
,
2333 .fno
= gen_helper_gvec_neg16
,
2334 .opt_opc
= vecop_list
,
2336 { .fni4
= tcg_gen_neg_i32
,
2337 .fniv
= tcg_gen_neg_vec
,
2338 .fno
= gen_helper_gvec_neg32
,
2339 .opt_opc
= vecop_list
,
2341 { .fni8
= tcg_gen_neg_i64
,
2342 .fniv
= tcg_gen_neg_vec
,
2343 .fno
= gen_helper_gvec_neg64
,
2344 .opt_opc
= vecop_list
,
2345 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
2349 tcg_debug_assert(vece
<= MO_64
);
2350 tcg_gen_gvec_2(dofs
, aofs
, oprsz
, maxsz
, &g
[vece
]);
2353 static void gen_absv_mask(TCGv_i64 d
, TCGv_i64 b
, unsigned vece
)
2355 TCGv_i64 t
= tcg_temp_new_i64();
2356 int nbit
= 8 << vece
;
2358 /* Create -1 for each negative element. */
2359 tcg_gen_shri_i64(t
, b
, nbit
- 1);
2360 tcg_gen_andi_i64(t
, t
, dup_const(vece
, 1));
2361 tcg_gen_muli_i64(t
, t
, (1 << nbit
) - 1);
2364 * Invert (via xor -1) and add one.
2365 * Because of the ordering the msb is cleared,
2366 * so we never have carry into the next element.
2368 tcg_gen_xor_i64(d
, b
, t
);
2369 tcg_gen_andi_i64(t
, t
, dup_const(vece
, 1));
2370 tcg_gen_add_i64(d
, d
, t
);
2372 tcg_temp_free_i64(t
);
2375 static void tcg_gen_vec_abs8_i64(TCGv_i64 d
, TCGv_i64 b
)
2377 gen_absv_mask(d
, b
, MO_8
);
2380 static void tcg_gen_vec_abs16_i64(TCGv_i64 d
, TCGv_i64 b
)
2382 gen_absv_mask(d
, b
, MO_16
);
2385 void tcg_gen_gvec_abs(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2386 uint32_t oprsz
, uint32_t maxsz
)
2388 static const TCGOpcode vecop_list
[] = { INDEX_op_abs_vec
, 0 };
2389 static const GVecGen2 g
[4] = {
2390 { .fni8
= tcg_gen_vec_abs8_i64
,
2391 .fniv
= tcg_gen_abs_vec
,
2392 .fno
= gen_helper_gvec_abs8
,
2393 .opt_opc
= vecop_list
,
2395 { .fni8
= tcg_gen_vec_abs16_i64
,
2396 .fniv
= tcg_gen_abs_vec
,
2397 .fno
= gen_helper_gvec_abs16
,
2398 .opt_opc
= vecop_list
,
2400 { .fni4
= tcg_gen_abs_i32
,
2401 .fniv
= tcg_gen_abs_vec
,
2402 .fno
= gen_helper_gvec_abs32
,
2403 .opt_opc
= vecop_list
,
2405 { .fni8
= tcg_gen_abs_i64
,
2406 .fniv
= tcg_gen_abs_vec
,
2407 .fno
= gen_helper_gvec_abs64
,
2408 .opt_opc
= vecop_list
,
2409 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
2413 tcg_debug_assert(vece
<= MO_64
);
2414 tcg_gen_gvec_2(dofs
, aofs
, oprsz
, maxsz
, &g
[vece
]);
2417 void tcg_gen_gvec_and(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2418 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
2420 static const GVecGen3 g
= {
2421 .fni8
= tcg_gen_and_i64
,
2422 .fniv
= tcg_gen_and_vec
,
2423 .fno
= gen_helper_gvec_and
,
2424 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
2428 tcg_gen_gvec_mov(vece
, dofs
, aofs
, oprsz
, maxsz
);
2430 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
);
2434 void tcg_gen_gvec_or(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2435 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
2437 static const GVecGen3 g
= {
2438 .fni8
= tcg_gen_or_i64
,
2439 .fniv
= tcg_gen_or_vec
,
2440 .fno
= gen_helper_gvec_or
,
2441 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
2445 tcg_gen_gvec_mov(vece
, dofs
, aofs
, oprsz
, maxsz
);
2447 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
);
2451 void tcg_gen_gvec_xor(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2452 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
2454 static const GVecGen3 g
= {
2455 .fni8
= tcg_gen_xor_i64
,
2456 .fniv
= tcg_gen_xor_vec
,
2457 .fno
= gen_helper_gvec_xor
,
2458 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
2462 tcg_gen_gvec_dup_imm(MO_64
, dofs
, oprsz
, maxsz
, 0);
2464 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
);
2468 void tcg_gen_gvec_andc(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2469 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
2471 static const GVecGen3 g
= {
2472 .fni8
= tcg_gen_andc_i64
,
2473 .fniv
= tcg_gen_andc_vec
,
2474 .fno
= gen_helper_gvec_andc
,
2475 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
2479 tcg_gen_gvec_dup_imm(MO_64
, dofs
, oprsz
, maxsz
, 0);
2481 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
);
2485 void tcg_gen_gvec_orc(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2486 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
2488 static const GVecGen3 g
= {
2489 .fni8
= tcg_gen_orc_i64
,
2490 .fniv
= tcg_gen_orc_vec
,
2491 .fno
= gen_helper_gvec_orc
,
2492 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
2496 tcg_gen_gvec_dup_imm(MO_64
, dofs
, oprsz
, maxsz
, -1);
2498 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
);
2502 void tcg_gen_gvec_nand(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2503 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
2505 static const GVecGen3 g
= {
2506 .fni8
= tcg_gen_nand_i64
,
2507 .fniv
= tcg_gen_nand_vec
,
2508 .fno
= gen_helper_gvec_nand
,
2509 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
2513 tcg_gen_gvec_not(vece
, dofs
, aofs
, oprsz
, maxsz
);
2515 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
);
2519 void tcg_gen_gvec_nor(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2520 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
2522 static const GVecGen3 g
= {
2523 .fni8
= tcg_gen_nor_i64
,
2524 .fniv
= tcg_gen_nor_vec
,
2525 .fno
= gen_helper_gvec_nor
,
2526 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
2530 tcg_gen_gvec_not(vece
, dofs
, aofs
, oprsz
, maxsz
);
2532 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
);
2536 void tcg_gen_gvec_eqv(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2537 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
2539 static const GVecGen3 g
= {
2540 .fni8
= tcg_gen_eqv_i64
,
2541 .fniv
= tcg_gen_eqv_vec
,
2542 .fno
= gen_helper_gvec_eqv
,
2543 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
2547 tcg_gen_gvec_dup_imm(MO_64
, dofs
, oprsz
, maxsz
, -1);
2549 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
);
2553 static const GVecGen2s gop_ands
= {
2554 .fni8
= tcg_gen_and_i64
,
2555 .fniv
= tcg_gen_and_vec
,
2556 .fno
= gen_helper_gvec_ands
,
2557 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
2561 void tcg_gen_gvec_ands(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2562 TCGv_i64 c
, uint32_t oprsz
, uint32_t maxsz
)
2564 TCGv_i64 tmp
= tcg_temp_new_i64();
2565 gen_dup_i64(vece
, tmp
, c
);
2566 tcg_gen_gvec_2s(dofs
, aofs
, oprsz
, maxsz
, tmp
, &gop_ands
);
2567 tcg_temp_free_i64(tmp
);
2570 void tcg_gen_gvec_andi(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2571 int64_t c
, uint32_t oprsz
, uint32_t maxsz
)
2573 TCGv_i64 tmp
= tcg_const_i64(dup_const(vece
, c
));
2574 tcg_gen_gvec_2s(dofs
, aofs
, oprsz
, maxsz
, tmp
, &gop_ands
);
2575 tcg_temp_free_i64(tmp
);
2578 static const GVecGen2s gop_xors
= {
2579 .fni8
= tcg_gen_xor_i64
,
2580 .fniv
= tcg_gen_xor_vec
,
2581 .fno
= gen_helper_gvec_xors
,
2582 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
2586 void tcg_gen_gvec_xors(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2587 TCGv_i64 c
, uint32_t oprsz
, uint32_t maxsz
)
2589 TCGv_i64 tmp
= tcg_temp_new_i64();
2590 gen_dup_i64(vece
, tmp
, c
);
2591 tcg_gen_gvec_2s(dofs
, aofs
, oprsz
, maxsz
, tmp
, &gop_xors
);
2592 tcg_temp_free_i64(tmp
);
2595 void tcg_gen_gvec_xori(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2596 int64_t c
, uint32_t oprsz
, uint32_t maxsz
)
2598 TCGv_i64 tmp
= tcg_const_i64(dup_const(vece
, c
));
2599 tcg_gen_gvec_2s(dofs
, aofs
, oprsz
, maxsz
, tmp
, &gop_xors
);
2600 tcg_temp_free_i64(tmp
);
2603 static const GVecGen2s gop_ors
= {
2604 .fni8
= tcg_gen_or_i64
,
2605 .fniv
= tcg_gen_or_vec
,
2606 .fno
= gen_helper_gvec_ors
,
2607 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
2611 void tcg_gen_gvec_ors(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2612 TCGv_i64 c
, uint32_t oprsz
, uint32_t maxsz
)
2614 TCGv_i64 tmp
= tcg_temp_new_i64();
2615 gen_dup_i64(vece
, tmp
, c
);
2616 tcg_gen_gvec_2s(dofs
, aofs
, oprsz
, maxsz
, tmp
, &gop_ors
);
2617 tcg_temp_free_i64(tmp
);
2620 void tcg_gen_gvec_ori(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2621 int64_t c
, uint32_t oprsz
, uint32_t maxsz
)
2623 TCGv_i64 tmp
= tcg_const_i64(dup_const(vece
, c
));
2624 tcg_gen_gvec_2s(dofs
, aofs
, oprsz
, maxsz
, tmp
, &gop_ors
);
2625 tcg_temp_free_i64(tmp
);
2628 void tcg_gen_vec_shl8i_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t c
)
2630 uint64_t mask
= dup_const(MO_8
, 0xff << c
);
2631 tcg_gen_shli_i64(d
, a
, c
);
2632 tcg_gen_andi_i64(d
, d
, mask
);
2635 void tcg_gen_vec_shl16i_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t c
)
2637 uint64_t mask
= dup_const(MO_16
, 0xffff << c
);
2638 tcg_gen_shli_i64(d
, a
, c
);
2639 tcg_gen_andi_i64(d
, d
, mask
);
2642 void tcg_gen_gvec_shli(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2643 int64_t shift
, uint32_t oprsz
, uint32_t maxsz
)
2645 static const TCGOpcode vecop_list
[] = { INDEX_op_shli_vec
, 0 };
2646 static const GVecGen2i g
[4] = {
2647 { .fni8
= tcg_gen_vec_shl8i_i64
,
2648 .fniv
= tcg_gen_shli_vec
,
2649 .fno
= gen_helper_gvec_shl8i
,
2650 .opt_opc
= vecop_list
,
2652 { .fni8
= tcg_gen_vec_shl16i_i64
,
2653 .fniv
= tcg_gen_shli_vec
,
2654 .fno
= gen_helper_gvec_shl16i
,
2655 .opt_opc
= vecop_list
,
2657 { .fni4
= tcg_gen_shli_i32
,
2658 .fniv
= tcg_gen_shli_vec
,
2659 .fno
= gen_helper_gvec_shl32i
,
2660 .opt_opc
= vecop_list
,
2662 { .fni8
= tcg_gen_shli_i64
,
2663 .fniv
= tcg_gen_shli_vec
,
2664 .fno
= gen_helper_gvec_shl64i
,
2665 .opt_opc
= vecop_list
,
2666 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
2670 tcg_debug_assert(vece
<= MO_64
);
2671 tcg_debug_assert(shift
>= 0 && shift
< (8 << vece
));
2673 tcg_gen_gvec_mov(vece
, dofs
, aofs
, oprsz
, maxsz
);
2675 tcg_gen_gvec_2i(dofs
, aofs
, oprsz
, maxsz
, shift
, &g
[vece
]);
2679 void tcg_gen_vec_shr8i_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t c
)
2681 uint64_t mask
= dup_const(MO_8
, 0xff >> c
);
2682 tcg_gen_shri_i64(d
, a
, c
);
2683 tcg_gen_andi_i64(d
, d
, mask
);
2686 void tcg_gen_vec_shr16i_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t c
)
2688 uint64_t mask
= dup_const(MO_16
, 0xffff >> c
);
2689 tcg_gen_shri_i64(d
, a
, c
);
2690 tcg_gen_andi_i64(d
, d
, mask
);
2693 void tcg_gen_gvec_shri(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2694 int64_t shift
, uint32_t oprsz
, uint32_t maxsz
)
2696 static const TCGOpcode vecop_list
[] = { INDEX_op_shri_vec
, 0 };
2697 static const GVecGen2i g
[4] = {
2698 { .fni8
= tcg_gen_vec_shr8i_i64
,
2699 .fniv
= tcg_gen_shri_vec
,
2700 .fno
= gen_helper_gvec_shr8i
,
2701 .opt_opc
= vecop_list
,
2703 { .fni8
= tcg_gen_vec_shr16i_i64
,
2704 .fniv
= tcg_gen_shri_vec
,
2705 .fno
= gen_helper_gvec_shr16i
,
2706 .opt_opc
= vecop_list
,
2708 { .fni4
= tcg_gen_shri_i32
,
2709 .fniv
= tcg_gen_shri_vec
,
2710 .fno
= gen_helper_gvec_shr32i
,
2711 .opt_opc
= vecop_list
,
2713 { .fni8
= tcg_gen_shri_i64
,
2714 .fniv
= tcg_gen_shri_vec
,
2715 .fno
= gen_helper_gvec_shr64i
,
2716 .opt_opc
= vecop_list
,
2717 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
2721 tcg_debug_assert(vece
<= MO_64
);
2722 tcg_debug_assert(shift
>= 0 && shift
< (8 << vece
));
2724 tcg_gen_gvec_mov(vece
, dofs
, aofs
, oprsz
, maxsz
);
2726 tcg_gen_gvec_2i(dofs
, aofs
, oprsz
, maxsz
, shift
, &g
[vece
]);
2730 void tcg_gen_vec_sar8i_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t c
)
2732 uint64_t s_mask
= dup_const(MO_8
, 0x80 >> c
);
2733 uint64_t c_mask
= dup_const(MO_8
, 0xff >> c
);
2734 TCGv_i64 s
= tcg_temp_new_i64();
2736 tcg_gen_shri_i64(d
, a
, c
);
2737 tcg_gen_andi_i64(s
, d
, s_mask
); /* isolate (shifted) sign bit */
2738 tcg_gen_muli_i64(s
, s
, (2 << c
) - 2); /* replicate isolated signs */
2739 tcg_gen_andi_i64(d
, d
, c_mask
); /* clear out bits above sign */
2740 tcg_gen_or_i64(d
, d
, s
); /* include sign extension */
2741 tcg_temp_free_i64(s
);
2744 void tcg_gen_vec_sar16i_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t c
)
2746 uint64_t s_mask
= dup_const(MO_16
, 0x8000 >> c
);
2747 uint64_t c_mask
= dup_const(MO_16
, 0xffff >> c
);
2748 TCGv_i64 s
= tcg_temp_new_i64();
2750 tcg_gen_shri_i64(d
, a
, c
);
2751 tcg_gen_andi_i64(s
, d
, s_mask
); /* isolate (shifted) sign bit */
2752 tcg_gen_andi_i64(d
, d
, c_mask
); /* clear out bits above sign */
2753 tcg_gen_muli_i64(s
, s
, (2 << c
) - 2); /* replicate isolated signs */
2754 tcg_gen_or_i64(d
, d
, s
); /* include sign extension */
2755 tcg_temp_free_i64(s
);
2758 void tcg_gen_gvec_sari(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2759 int64_t shift
, uint32_t oprsz
, uint32_t maxsz
)
2761 static const TCGOpcode vecop_list
[] = { INDEX_op_sari_vec
, 0 };
2762 static const GVecGen2i g
[4] = {
2763 { .fni8
= tcg_gen_vec_sar8i_i64
,
2764 .fniv
= tcg_gen_sari_vec
,
2765 .fno
= gen_helper_gvec_sar8i
,
2766 .opt_opc
= vecop_list
,
2768 { .fni8
= tcg_gen_vec_sar16i_i64
,
2769 .fniv
= tcg_gen_sari_vec
,
2770 .fno
= gen_helper_gvec_sar16i
,
2771 .opt_opc
= vecop_list
,
2773 { .fni4
= tcg_gen_sari_i32
,
2774 .fniv
= tcg_gen_sari_vec
,
2775 .fno
= gen_helper_gvec_sar32i
,
2776 .opt_opc
= vecop_list
,
2778 { .fni8
= tcg_gen_sari_i64
,
2779 .fniv
= tcg_gen_sari_vec
,
2780 .fno
= gen_helper_gvec_sar64i
,
2781 .opt_opc
= vecop_list
,
2782 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
2786 tcg_debug_assert(vece
<= MO_64
);
2787 tcg_debug_assert(shift
>= 0 && shift
< (8 << vece
));
2789 tcg_gen_gvec_mov(vece
, dofs
, aofs
, oprsz
, maxsz
);
2791 tcg_gen_gvec_2i(dofs
, aofs
, oprsz
, maxsz
, shift
, &g
[vece
]);
2795 void tcg_gen_vec_rotl8i_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t c
)
2797 uint64_t mask
= dup_const(MO_8
, 0xff << c
);
2799 tcg_gen_shli_i64(d
, a
, c
);
2800 tcg_gen_shri_i64(a
, a
, 8 - c
);
2801 tcg_gen_andi_i64(d
, d
, mask
);
2802 tcg_gen_andi_i64(a
, a
, ~mask
);
2803 tcg_gen_or_i64(d
, d
, a
);
2806 void tcg_gen_vec_rotl16i_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t c
)
2808 uint64_t mask
= dup_const(MO_16
, 0xffff << c
);
2810 tcg_gen_shli_i64(d
, a
, c
);
2811 tcg_gen_shri_i64(a
, a
, 16 - c
);
2812 tcg_gen_andi_i64(d
, d
, mask
);
2813 tcg_gen_andi_i64(a
, a
, ~mask
);
2814 tcg_gen_or_i64(d
, d
, a
);
2817 void tcg_gen_gvec_rotli(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2818 int64_t shift
, uint32_t oprsz
, uint32_t maxsz
)
2820 static const TCGOpcode vecop_list
[] = { INDEX_op_rotli_vec
, 0 };
2821 static const GVecGen2i g
[4] = {
2822 { .fni8
= tcg_gen_vec_rotl8i_i64
,
2823 .fniv
= tcg_gen_rotli_vec
,
2824 .fno
= gen_helper_gvec_rotl8i
,
2825 .opt_opc
= vecop_list
,
2827 { .fni8
= tcg_gen_vec_rotl16i_i64
,
2828 .fniv
= tcg_gen_rotli_vec
,
2829 .fno
= gen_helper_gvec_rotl16i
,
2830 .opt_opc
= vecop_list
,
2832 { .fni4
= tcg_gen_rotli_i32
,
2833 .fniv
= tcg_gen_rotli_vec
,
2834 .fno
= gen_helper_gvec_rotl32i
,
2835 .opt_opc
= vecop_list
,
2837 { .fni8
= tcg_gen_rotli_i64
,
2838 .fniv
= tcg_gen_rotli_vec
,
2839 .fno
= gen_helper_gvec_rotl64i
,
2840 .opt_opc
= vecop_list
,
2841 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
2845 tcg_debug_assert(vece
<= MO_64
);
2846 tcg_debug_assert(shift
>= 0 && shift
< (8 << vece
));
2848 tcg_gen_gvec_mov(vece
, dofs
, aofs
, oprsz
, maxsz
);
2850 tcg_gen_gvec_2i(dofs
, aofs
, oprsz
, maxsz
, shift
, &g
[vece
]);
2854 void tcg_gen_gvec_rotri(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2855 int64_t shift
, uint32_t oprsz
, uint32_t maxsz
)
2857 tcg_debug_assert(vece
<= MO_64
);
2858 tcg_debug_assert(shift
>= 0 && shift
< (8 << vece
));
2859 tcg_gen_gvec_rotli(vece
, dofs
, aofs
, -shift
& ((8 << vece
) - 1),
2864 * Specialized generation vector shifts by a non-constant scalar.
2868 void (*fni4
)(TCGv_i32
, TCGv_i32
, TCGv_i32
);
2869 void (*fni8
)(TCGv_i64
, TCGv_i64
, TCGv_i64
);
2870 void (*fniv_s
)(unsigned, TCGv_vec
, TCGv_vec
, TCGv_i32
);
2871 void (*fniv_v
)(unsigned, TCGv_vec
, TCGv_vec
, TCGv_vec
);
2872 gen_helper_gvec_2
*fno
[4];
2873 TCGOpcode s_list
[2];
2874 TCGOpcode v_list
[2];
2877 static void expand_2sh_vec(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2878 uint32_t oprsz
, uint32_t tysz
, TCGType type
,
2880 void (*fni
)(unsigned, TCGv_vec
, TCGv_vec
, TCGv_i32
))
2882 TCGv_vec t0
= tcg_temp_new_vec(type
);
2885 for (i
= 0; i
< oprsz
; i
+= tysz
) {
2886 tcg_gen_ld_vec(t0
, cpu_env
, aofs
+ i
);
2887 fni(vece
, t0
, t0
, shift
);
2888 tcg_gen_st_vec(t0
, cpu_env
, dofs
+ i
);
2890 tcg_temp_free_vec(t0
);
2894 do_gvec_shifts(unsigned vece
, uint32_t dofs
, uint32_t aofs
, TCGv_i32 shift
,
2895 uint32_t oprsz
, uint32_t maxsz
, const GVecGen2sh
*g
)
2900 check_size_align(oprsz
, maxsz
, dofs
| aofs
);
2901 check_overlap_2(dofs
, aofs
, maxsz
);
2903 /* If the backend has a scalar expansion, great. */
2904 type
= choose_vector_type(g
->s_list
, vece
, oprsz
, vece
== MO_64
);
2906 const TCGOpcode
*hold_list
= tcg_swap_vecop_list(NULL
);
2909 some
= QEMU_ALIGN_DOWN(oprsz
, 32);
2910 expand_2sh_vec(vece
, dofs
, aofs
, some
, 32,
2911 TCG_TYPE_V256
, shift
, g
->fniv_s
);
2912 if (some
== oprsz
) {
2921 expand_2sh_vec(vece
, dofs
, aofs
, oprsz
, 16,
2922 TCG_TYPE_V128
, shift
, g
->fniv_s
);
2925 expand_2sh_vec(vece
, dofs
, aofs
, oprsz
, 8,
2926 TCG_TYPE_V64
, shift
, g
->fniv_s
);
2929 g_assert_not_reached();
2931 tcg_swap_vecop_list(hold_list
);
2935 /* If the backend supports variable vector shifts, also cool. */
2936 type
= choose_vector_type(g
->v_list
, vece
, oprsz
, vece
== MO_64
);
2938 const TCGOpcode
*hold_list
= tcg_swap_vecop_list(NULL
);
2939 TCGv_vec v_shift
= tcg_temp_new_vec(type
);
2941 if (vece
== MO_64
) {
2942 TCGv_i64 sh64
= tcg_temp_new_i64();
2943 tcg_gen_extu_i32_i64(sh64
, shift
);
2944 tcg_gen_dup_i64_vec(MO_64
, v_shift
, sh64
);
2945 tcg_temp_free_i64(sh64
);
2947 tcg_gen_dup_i32_vec(vece
, v_shift
, shift
);
2952 some
= QEMU_ALIGN_DOWN(oprsz
, 32);
2953 expand_2s_vec(vece
, dofs
, aofs
, some
, 32, TCG_TYPE_V256
,
2954 v_shift
, false, g
->fniv_v
);
2955 if (some
== oprsz
) {
2964 expand_2s_vec(vece
, dofs
, aofs
, oprsz
, 16, TCG_TYPE_V128
,
2965 v_shift
, false, g
->fniv_v
);
2968 expand_2s_vec(vece
, dofs
, aofs
, oprsz
, 8, TCG_TYPE_V64
,
2969 v_shift
, false, g
->fniv_v
);
2972 g_assert_not_reached();
2974 tcg_temp_free_vec(v_shift
);
2975 tcg_swap_vecop_list(hold_list
);
2979 /* Otherwise fall back to integral... */
2980 if (vece
== MO_32
&& check_size_impl(oprsz
, 4)) {
2981 expand_2s_i32(dofs
, aofs
, oprsz
, shift
, false, g
->fni4
);
2982 } else if (vece
== MO_64
&& check_size_impl(oprsz
, 8)) {
2983 TCGv_i64 sh64
= tcg_temp_new_i64();
2984 tcg_gen_extu_i32_i64(sh64
, shift
);
2985 expand_2s_i64(dofs
, aofs
, oprsz
, sh64
, false, g
->fni8
);
2986 tcg_temp_free_i64(sh64
);
2988 TCGv_ptr a0
= tcg_temp_new_ptr();
2989 TCGv_ptr a1
= tcg_temp_new_ptr();
2990 TCGv_i32 desc
= tcg_temp_new_i32();
2992 tcg_gen_shli_i32(desc
, shift
, SIMD_DATA_SHIFT
);
2993 tcg_gen_ori_i32(desc
, desc
, simd_desc(oprsz
, maxsz
, 0));
2994 tcg_gen_addi_ptr(a0
, cpu_env
, dofs
);
2995 tcg_gen_addi_ptr(a1
, cpu_env
, aofs
);
2997 g
->fno
[vece
](a0
, a1
, desc
);
2999 tcg_temp_free_ptr(a0
);
3000 tcg_temp_free_ptr(a1
);
3001 tcg_temp_free_i32(desc
);
3006 if (oprsz
< maxsz
) {
3007 expand_clr(dofs
+ oprsz
, maxsz
- oprsz
);
3011 void tcg_gen_gvec_shls(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
3012 TCGv_i32 shift
, uint32_t oprsz
, uint32_t maxsz
)
3014 static const GVecGen2sh g
= {
3015 .fni4
= tcg_gen_shl_i32
,
3016 .fni8
= tcg_gen_shl_i64
,
3017 .fniv_s
= tcg_gen_shls_vec
,
3018 .fniv_v
= tcg_gen_shlv_vec
,
3020 gen_helper_gvec_shl8i
,
3021 gen_helper_gvec_shl16i
,
3022 gen_helper_gvec_shl32i
,
3023 gen_helper_gvec_shl64i
,
3025 .s_list
= { INDEX_op_shls_vec
, 0 },
3026 .v_list
= { INDEX_op_shlv_vec
, 0 },
3029 tcg_debug_assert(vece
<= MO_64
);
3030 do_gvec_shifts(vece
, dofs
, aofs
, shift
, oprsz
, maxsz
, &g
);
3033 void tcg_gen_gvec_shrs(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
3034 TCGv_i32 shift
, uint32_t oprsz
, uint32_t maxsz
)
3036 static const GVecGen2sh g
= {
3037 .fni4
= tcg_gen_shr_i32
,
3038 .fni8
= tcg_gen_shr_i64
,
3039 .fniv_s
= tcg_gen_shrs_vec
,
3040 .fniv_v
= tcg_gen_shrv_vec
,
3042 gen_helper_gvec_shr8i
,
3043 gen_helper_gvec_shr16i
,
3044 gen_helper_gvec_shr32i
,
3045 gen_helper_gvec_shr64i
,
3047 .s_list
= { INDEX_op_shrs_vec
, 0 },
3048 .v_list
= { INDEX_op_shrv_vec
, 0 },
3051 tcg_debug_assert(vece
<= MO_64
);
3052 do_gvec_shifts(vece
, dofs
, aofs
, shift
, oprsz
, maxsz
, &g
);
3055 void tcg_gen_gvec_sars(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
3056 TCGv_i32 shift
, uint32_t oprsz
, uint32_t maxsz
)
3058 static const GVecGen2sh g
= {
3059 .fni4
= tcg_gen_sar_i32
,
3060 .fni8
= tcg_gen_sar_i64
,
3061 .fniv_s
= tcg_gen_sars_vec
,
3062 .fniv_v
= tcg_gen_sarv_vec
,
3064 gen_helper_gvec_sar8i
,
3065 gen_helper_gvec_sar16i
,
3066 gen_helper_gvec_sar32i
,
3067 gen_helper_gvec_sar64i
,
3069 .s_list
= { INDEX_op_sars_vec
, 0 },
3070 .v_list
= { INDEX_op_sarv_vec
, 0 },
3073 tcg_debug_assert(vece
<= MO_64
);
3074 do_gvec_shifts(vece
, dofs
, aofs
, shift
, oprsz
, maxsz
, &g
);
3077 void tcg_gen_gvec_rotls(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
3078 TCGv_i32 shift
, uint32_t oprsz
, uint32_t maxsz
)
3080 static const GVecGen2sh g
= {
3081 .fni4
= tcg_gen_rotl_i32
,
3082 .fni8
= tcg_gen_rotl_i64
,
3083 .fniv_s
= tcg_gen_rotls_vec
,
3084 .fniv_v
= tcg_gen_rotlv_vec
,
3086 gen_helper_gvec_rotl8i
,
3087 gen_helper_gvec_rotl16i
,
3088 gen_helper_gvec_rotl32i
,
3089 gen_helper_gvec_rotl64i
,
3091 .s_list
= { INDEX_op_rotls_vec
, 0 },
3092 .v_list
= { INDEX_op_rotlv_vec
, 0 },
3095 tcg_debug_assert(vece
<= MO_64
);
3096 do_gvec_shifts(vece
, dofs
, aofs
, shift
, oprsz
, maxsz
, &g
);
3100 * Expand D = A << (B % element bits)
3102 * Unlike scalar shifts, where it is easy for the target front end
3103 * to include the modulo as part of the expansion. If the target
3104 * naturally includes the modulo as part of the operation, great!
3105 * If the target has some other behaviour from out-of-range shifts,
3106 * then it could not use this function anyway, and would need to
3107 * do it's own expansion with custom functions.
3109 static void tcg_gen_shlv_mod_vec(unsigned vece
, TCGv_vec d
,
3110 TCGv_vec a
, TCGv_vec b
)
3112 TCGv_vec t
= tcg_temp_new_vec_matching(d
);
3114 tcg_gen_dupi_vec(vece
, t
, (8 << vece
) - 1);
3115 tcg_gen_and_vec(vece
, t
, t
, b
);
3116 tcg_gen_shlv_vec(vece
, d
, a
, t
);
3117 tcg_temp_free_vec(t
);
3120 static void tcg_gen_shl_mod_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
3122 TCGv_i32 t
= tcg_temp_new_i32();
3124 tcg_gen_andi_i32(t
, b
, 31);
3125 tcg_gen_shl_i32(d
, a
, t
);
3126 tcg_temp_free_i32(t
);
3129 static void tcg_gen_shl_mod_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
3131 TCGv_i64 t
= tcg_temp_new_i64();
3133 tcg_gen_andi_i64(t
, b
, 63);
3134 tcg_gen_shl_i64(d
, a
, t
);
3135 tcg_temp_free_i64(t
);
3138 void tcg_gen_gvec_shlv(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
3139 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
3141 static const TCGOpcode vecop_list
[] = { INDEX_op_shlv_vec
, 0 };
3142 static const GVecGen3 g
[4] = {
3143 { .fniv
= tcg_gen_shlv_mod_vec
,
3144 .fno
= gen_helper_gvec_shl8v
,
3145 .opt_opc
= vecop_list
,
3147 { .fniv
= tcg_gen_shlv_mod_vec
,
3148 .fno
= gen_helper_gvec_shl16v
,
3149 .opt_opc
= vecop_list
,
3151 { .fni4
= tcg_gen_shl_mod_i32
,
3152 .fniv
= tcg_gen_shlv_mod_vec
,
3153 .fno
= gen_helper_gvec_shl32v
,
3154 .opt_opc
= vecop_list
,
3156 { .fni8
= tcg_gen_shl_mod_i64
,
3157 .fniv
= tcg_gen_shlv_mod_vec
,
3158 .fno
= gen_helper_gvec_shl64v
,
3159 .opt_opc
= vecop_list
,
3160 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
3164 tcg_debug_assert(vece
<= MO_64
);
3165 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
[vece
]);
3169 * Similarly for logical right shifts.
3172 static void tcg_gen_shrv_mod_vec(unsigned vece
, TCGv_vec d
,
3173 TCGv_vec a
, TCGv_vec b
)
3175 TCGv_vec t
= tcg_temp_new_vec_matching(d
);
3177 tcg_gen_dupi_vec(vece
, t
, (8 << vece
) - 1);
3178 tcg_gen_and_vec(vece
, t
, t
, b
);
3179 tcg_gen_shrv_vec(vece
, d
, a
, t
);
3180 tcg_temp_free_vec(t
);
3183 static void tcg_gen_shr_mod_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
3185 TCGv_i32 t
= tcg_temp_new_i32();
3187 tcg_gen_andi_i32(t
, b
, 31);
3188 tcg_gen_shr_i32(d
, a
, t
);
3189 tcg_temp_free_i32(t
);
3192 static void tcg_gen_shr_mod_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
3194 TCGv_i64 t
= tcg_temp_new_i64();
3196 tcg_gen_andi_i64(t
, b
, 63);
3197 tcg_gen_shr_i64(d
, a
, t
);
3198 tcg_temp_free_i64(t
);
3201 void tcg_gen_gvec_shrv(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
3202 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
3204 static const TCGOpcode vecop_list
[] = { INDEX_op_shrv_vec
, 0 };
3205 static const GVecGen3 g
[4] = {
3206 { .fniv
= tcg_gen_shrv_mod_vec
,
3207 .fno
= gen_helper_gvec_shr8v
,
3208 .opt_opc
= vecop_list
,
3210 { .fniv
= tcg_gen_shrv_mod_vec
,
3211 .fno
= gen_helper_gvec_shr16v
,
3212 .opt_opc
= vecop_list
,
3214 { .fni4
= tcg_gen_shr_mod_i32
,
3215 .fniv
= tcg_gen_shrv_mod_vec
,
3216 .fno
= gen_helper_gvec_shr32v
,
3217 .opt_opc
= vecop_list
,
3219 { .fni8
= tcg_gen_shr_mod_i64
,
3220 .fniv
= tcg_gen_shrv_mod_vec
,
3221 .fno
= gen_helper_gvec_shr64v
,
3222 .opt_opc
= vecop_list
,
3223 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
3227 tcg_debug_assert(vece
<= MO_64
);
3228 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
[vece
]);
3232 * Similarly for arithmetic right shifts.
3235 static void tcg_gen_sarv_mod_vec(unsigned vece
, TCGv_vec d
,
3236 TCGv_vec a
, TCGv_vec b
)
3238 TCGv_vec t
= tcg_temp_new_vec_matching(d
);
3240 tcg_gen_dupi_vec(vece
, t
, (8 << vece
) - 1);
3241 tcg_gen_and_vec(vece
, t
, t
, b
);
3242 tcg_gen_sarv_vec(vece
, d
, a
, t
);
3243 tcg_temp_free_vec(t
);
3246 static void tcg_gen_sar_mod_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
3248 TCGv_i32 t
= tcg_temp_new_i32();
3250 tcg_gen_andi_i32(t
, b
, 31);
3251 tcg_gen_sar_i32(d
, a
, t
);
3252 tcg_temp_free_i32(t
);
3255 static void tcg_gen_sar_mod_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
3257 TCGv_i64 t
= tcg_temp_new_i64();
3259 tcg_gen_andi_i64(t
, b
, 63);
3260 tcg_gen_sar_i64(d
, a
, t
);
3261 tcg_temp_free_i64(t
);
3264 void tcg_gen_gvec_sarv(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
3265 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
3267 static const TCGOpcode vecop_list
[] = { INDEX_op_sarv_vec
, 0 };
3268 static const GVecGen3 g
[4] = {
3269 { .fniv
= tcg_gen_sarv_mod_vec
,
3270 .fno
= gen_helper_gvec_sar8v
,
3271 .opt_opc
= vecop_list
,
3273 { .fniv
= tcg_gen_sarv_mod_vec
,
3274 .fno
= gen_helper_gvec_sar16v
,
3275 .opt_opc
= vecop_list
,
3277 { .fni4
= tcg_gen_sar_mod_i32
,
3278 .fniv
= tcg_gen_sarv_mod_vec
,
3279 .fno
= gen_helper_gvec_sar32v
,
3280 .opt_opc
= vecop_list
,
3282 { .fni8
= tcg_gen_sar_mod_i64
,
3283 .fniv
= tcg_gen_sarv_mod_vec
,
3284 .fno
= gen_helper_gvec_sar64v
,
3285 .opt_opc
= vecop_list
,
3286 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
3290 tcg_debug_assert(vece
<= MO_64
);
3291 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
[vece
]);
3295 * Similarly for rotates.
3298 static void tcg_gen_rotlv_mod_vec(unsigned vece
, TCGv_vec d
,
3299 TCGv_vec a
, TCGv_vec b
)
3301 TCGv_vec t
= tcg_temp_new_vec_matching(d
);
3303 tcg_gen_dupi_vec(vece
, t
, (8 << vece
) - 1);
3304 tcg_gen_and_vec(vece
, t
, t
, b
);
3305 tcg_gen_rotlv_vec(vece
, d
, a
, t
);
3306 tcg_temp_free_vec(t
);
3309 static void tcg_gen_rotl_mod_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
3311 TCGv_i32 t
= tcg_temp_new_i32();
3313 tcg_gen_andi_i32(t
, b
, 31);
3314 tcg_gen_rotl_i32(d
, a
, t
);
3315 tcg_temp_free_i32(t
);
3318 static void tcg_gen_rotl_mod_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
3320 TCGv_i64 t
= tcg_temp_new_i64();
3322 tcg_gen_andi_i64(t
, b
, 63);
3323 tcg_gen_rotl_i64(d
, a
, t
);
3324 tcg_temp_free_i64(t
);
3327 void tcg_gen_gvec_rotlv(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
3328 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
3330 static const TCGOpcode vecop_list
[] = { INDEX_op_rotlv_vec
, 0 };
3331 static const GVecGen3 g
[4] = {
3332 { .fniv
= tcg_gen_rotlv_mod_vec
,
3333 .fno
= gen_helper_gvec_rotl8v
,
3334 .opt_opc
= vecop_list
,
3336 { .fniv
= tcg_gen_rotlv_mod_vec
,
3337 .fno
= gen_helper_gvec_rotl16v
,
3338 .opt_opc
= vecop_list
,
3340 { .fni4
= tcg_gen_rotl_mod_i32
,
3341 .fniv
= tcg_gen_rotlv_mod_vec
,
3342 .fno
= gen_helper_gvec_rotl32v
,
3343 .opt_opc
= vecop_list
,
3345 { .fni8
= tcg_gen_rotl_mod_i64
,
3346 .fniv
= tcg_gen_rotlv_mod_vec
,
3347 .fno
= gen_helper_gvec_rotl64v
,
3348 .opt_opc
= vecop_list
,
3349 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
3353 tcg_debug_assert(vece
<= MO_64
);
3354 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
[vece
]);
3357 static void tcg_gen_rotrv_mod_vec(unsigned vece
, TCGv_vec d
,
3358 TCGv_vec a
, TCGv_vec b
)
3360 TCGv_vec t
= tcg_temp_new_vec_matching(d
);
3362 tcg_gen_dupi_vec(vece
, t
, (8 << vece
) - 1);
3363 tcg_gen_and_vec(vece
, t
, t
, b
);
3364 tcg_gen_rotrv_vec(vece
, d
, a
, t
);
3365 tcg_temp_free_vec(t
);
3368 static void tcg_gen_rotr_mod_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
3370 TCGv_i32 t
= tcg_temp_new_i32();
3372 tcg_gen_andi_i32(t
, b
, 31);
3373 tcg_gen_rotr_i32(d
, a
, t
);
3374 tcg_temp_free_i32(t
);
3377 static void tcg_gen_rotr_mod_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
3379 TCGv_i64 t
= tcg_temp_new_i64();
3381 tcg_gen_andi_i64(t
, b
, 63);
3382 tcg_gen_rotr_i64(d
, a
, t
);
3383 tcg_temp_free_i64(t
);
3386 void tcg_gen_gvec_rotrv(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
3387 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
3389 static const TCGOpcode vecop_list
[] = { INDEX_op_rotrv_vec
, 0 };
3390 static const GVecGen3 g
[4] = {
3391 { .fniv
= tcg_gen_rotrv_mod_vec
,
3392 .fno
= gen_helper_gvec_rotr8v
,
3393 .opt_opc
= vecop_list
,
3395 { .fniv
= tcg_gen_rotrv_mod_vec
,
3396 .fno
= gen_helper_gvec_rotr16v
,
3397 .opt_opc
= vecop_list
,
3399 { .fni4
= tcg_gen_rotr_mod_i32
,
3400 .fniv
= tcg_gen_rotrv_mod_vec
,
3401 .fno
= gen_helper_gvec_rotr32v
,
3402 .opt_opc
= vecop_list
,
3404 { .fni8
= tcg_gen_rotr_mod_i64
,
3405 .fniv
= tcg_gen_rotrv_mod_vec
,
3406 .fno
= gen_helper_gvec_rotr64v
,
3407 .opt_opc
= vecop_list
,
3408 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
3412 tcg_debug_assert(vece
<= MO_64
);
3413 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
[vece
]);
3416 /* Expand OPSZ bytes worth of three-operand operations using i32 elements. */
3417 static void expand_cmp_i32(uint32_t dofs
, uint32_t aofs
, uint32_t bofs
,
3418 uint32_t oprsz
, TCGCond cond
)
3420 TCGv_i32 t0
= tcg_temp_new_i32();
3421 TCGv_i32 t1
= tcg_temp_new_i32();
3424 for (i
= 0; i
< oprsz
; i
+= 4) {
3425 tcg_gen_ld_i32(t0
, cpu_env
, aofs
+ i
);
3426 tcg_gen_ld_i32(t1
, cpu_env
, bofs
+ i
);
3427 tcg_gen_setcond_i32(cond
, t0
, t0
, t1
);
3428 tcg_gen_neg_i32(t0
, t0
);
3429 tcg_gen_st_i32(t0
, cpu_env
, dofs
+ i
);
3431 tcg_temp_free_i32(t1
);
3432 tcg_temp_free_i32(t0
);
3435 static void expand_cmp_i64(uint32_t dofs
, uint32_t aofs
, uint32_t bofs
,
3436 uint32_t oprsz
, TCGCond cond
)
3438 TCGv_i64 t0
= tcg_temp_new_i64();
3439 TCGv_i64 t1
= tcg_temp_new_i64();
3442 for (i
= 0; i
< oprsz
; i
+= 8) {
3443 tcg_gen_ld_i64(t0
, cpu_env
, aofs
+ i
);
3444 tcg_gen_ld_i64(t1
, cpu_env
, bofs
+ i
);
3445 tcg_gen_setcond_i64(cond
, t0
, t0
, t1
);
3446 tcg_gen_neg_i64(t0
, t0
);
3447 tcg_gen_st_i64(t0
, cpu_env
, dofs
+ i
);
3449 tcg_temp_free_i64(t1
);
3450 tcg_temp_free_i64(t0
);
3453 static void expand_cmp_vec(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
3454 uint32_t bofs
, uint32_t oprsz
, uint32_t tysz
,
3455 TCGType type
, TCGCond cond
)
3457 TCGv_vec t0
= tcg_temp_new_vec(type
);
3458 TCGv_vec t1
= tcg_temp_new_vec(type
);
3461 for (i
= 0; i
< oprsz
; i
+= tysz
) {
3462 tcg_gen_ld_vec(t0
, cpu_env
, aofs
+ i
);
3463 tcg_gen_ld_vec(t1
, cpu_env
, bofs
+ i
);
3464 tcg_gen_cmp_vec(cond
, vece
, t0
, t0
, t1
);
3465 tcg_gen_st_vec(t0
, cpu_env
, dofs
+ i
);
3467 tcg_temp_free_vec(t1
);
3468 tcg_temp_free_vec(t0
);
3471 void tcg_gen_gvec_cmp(TCGCond cond
, unsigned vece
, uint32_t dofs
,
3472 uint32_t aofs
, uint32_t bofs
,
3473 uint32_t oprsz
, uint32_t maxsz
)
3475 static const TCGOpcode cmp_list
[] = { INDEX_op_cmp_vec
, 0 };
3476 static gen_helper_gvec_3
* const eq_fn
[4] = {
3477 gen_helper_gvec_eq8
, gen_helper_gvec_eq16
,
3478 gen_helper_gvec_eq32
, gen_helper_gvec_eq64
3480 static gen_helper_gvec_3
* const ne_fn
[4] = {
3481 gen_helper_gvec_ne8
, gen_helper_gvec_ne16
,
3482 gen_helper_gvec_ne32
, gen_helper_gvec_ne64
3484 static gen_helper_gvec_3
* const lt_fn
[4] = {
3485 gen_helper_gvec_lt8
, gen_helper_gvec_lt16
,
3486 gen_helper_gvec_lt32
, gen_helper_gvec_lt64
3488 static gen_helper_gvec_3
* const le_fn
[4] = {
3489 gen_helper_gvec_le8
, gen_helper_gvec_le16
,
3490 gen_helper_gvec_le32
, gen_helper_gvec_le64
3492 static gen_helper_gvec_3
* const ltu_fn
[4] = {
3493 gen_helper_gvec_ltu8
, gen_helper_gvec_ltu16
,
3494 gen_helper_gvec_ltu32
, gen_helper_gvec_ltu64
3496 static gen_helper_gvec_3
* const leu_fn
[4] = {
3497 gen_helper_gvec_leu8
, gen_helper_gvec_leu16
,
3498 gen_helper_gvec_leu32
, gen_helper_gvec_leu64
3500 static gen_helper_gvec_3
* const * const fns
[16] = {
3501 [TCG_COND_EQ
] = eq_fn
,
3502 [TCG_COND_NE
] = ne_fn
,
3503 [TCG_COND_LT
] = lt_fn
,
3504 [TCG_COND_LE
] = le_fn
,
3505 [TCG_COND_LTU
] = ltu_fn
,
3506 [TCG_COND_LEU
] = leu_fn
,
3509 const TCGOpcode
*hold_list
;
3513 check_size_align(oprsz
, maxsz
, dofs
| aofs
| bofs
);
3514 check_overlap_3(dofs
, aofs
, bofs
, maxsz
);
3516 if (cond
== TCG_COND_NEVER
|| cond
== TCG_COND_ALWAYS
) {
3517 do_dup(MO_8
, dofs
, oprsz
, maxsz
,
3518 NULL
, NULL
, -(cond
== TCG_COND_ALWAYS
));
3523 * Implement inline with a vector type, if possible.
3524 * Prefer integer when 64-bit host and 64-bit comparison.
3526 hold_list
= tcg_swap_vecop_list(cmp_list
);
3527 type
= choose_vector_type(cmp_list
, vece
, oprsz
,
3528 TCG_TARGET_REG_BITS
== 64 && vece
== MO_64
);
3531 /* Recall that ARM SVE allows vector sizes that are not a
3532 * power of 2, but always a multiple of 16. The intent is
3533 * that e.g. size == 80 would be expanded with 2x32 + 1x16.
3535 some
= QEMU_ALIGN_DOWN(oprsz
, 32);
3536 expand_cmp_vec(vece
, dofs
, aofs
, bofs
, some
, 32, TCG_TYPE_V256
, cond
);
3537 if (some
== oprsz
) {
3547 expand_cmp_vec(vece
, dofs
, aofs
, bofs
, oprsz
, 16, TCG_TYPE_V128
, cond
);
3550 expand_cmp_vec(vece
, dofs
, aofs
, bofs
, oprsz
, 8, TCG_TYPE_V64
, cond
);
3554 if (vece
== MO_64
&& check_size_impl(oprsz
, 8)) {
3555 expand_cmp_i64(dofs
, aofs
, bofs
, oprsz
, cond
);
3556 } else if (vece
== MO_32
&& check_size_impl(oprsz
, 4)) {
3557 expand_cmp_i32(dofs
, aofs
, bofs
, oprsz
, cond
);
3559 gen_helper_gvec_3
* const *fn
= fns
[cond
];
3563 tmp
= aofs
, aofs
= bofs
, bofs
= tmp
;
3564 cond
= tcg_swap_cond(cond
);
3568 tcg_gen_gvec_3_ool(dofs
, aofs
, bofs
, oprsz
, maxsz
, 0, fn
[vece
]);
3574 g_assert_not_reached();
3576 tcg_swap_vecop_list(hold_list
);
3578 if (oprsz
< maxsz
) {
3579 expand_clr(dofs
+ oprsz
, maxsz
- oprsz
);
3583 static void tcg_gen_bitsel_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
, TCGv_i64 c
)
3585 TCGv_i64 t
= tcg_temp_new_i64();
3587 tcg_gen_and_i64(t
, b
, a
);
3588 tcg_gen_andc_i64(d
, c
, a
);
3589 tcg_gen_or_i64(d
, d
, t
);
3590 tcg_temp_free_i64(t
);
3593 void tcg_gen_gvec_bitsel(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
3594 uint32_t bofs
, uint32_t cofs
,
3595 uint32_t oprsz
, uint32_t maxsz
)
3597 static const GVecGen4 g
= {
3598 .fni8
= tcg_gen_bitsel_i64
,
3599 .fniv
= tcg_gen_bitsel_vec
,
3600 .fno
= gen_helper_gvec_bitsel
,
3603 tcg_gen_gvec_4(dofs
, aofs
, bofs
, cofs
, oprsz
, maxsz
, &g
);