2 * Generic vector operation expansion
4 * Copyright (c) 2018 Linaro
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
23 #include "tcg-op-gvec.h"
24 #include "qemu/main-loop.h"
25 #include "tcg-gvec-desc.h"
29 #ifdef CONFIG_DEBUG_TCG
30 static const TCGOpcode vecop_list_empty
[1] = { 0 };
32 #define vecop_list_empty NULL
36 /* Verify vector size and alignment rules. OFS should be the OR of all
37 of the operand offsets so that we can check them all at once. */
38 static void check_size_align(uint32_t oprsz
, uint32_t maxsz
, uint32_t ofs
)
40 uint32_t opr_align
= oprsz
>= 16 ? 15 : 7;
41 uint32_t max_align
= maxsz
>= 16 || oprsz
>= 16 ? 15 : 7;
42 tcg_debug_assert(oprsz
> 0);
43 tcg_debug_assert(oprsz
<= maxsz
);
44 tcg_debug_assert((oprsz
& opr_align
) == 0);
45 tcg_debug_assert((maxsz
& max_align
) == 0);
46 tcg_debug_assert((ofs
& max_align
) == 0);
49 /* Verify vector overlap rules for two operands. */
50 static void check_overlap_2(uint32_t d
, uint32_t a
, uint32_t s
)
52 tcg_debug_assert(d
== a
|| d
+ s
<= a
|| a
+ s
<= d
);
55 /* Verify vector overlap rules for three operands. */
56 static void check_overlap_3(uint32_t d
, uint32_t a
, uint32_t b
, uint32_t s
)
58 check_overlap_2(d
, a
, s
);
59 check_overlap_2(d
, b
, s
);
60 check_overlap_2(a
, b
, s
);
63 /* Verify vector overlap rules for four operands. */
64 static void check_overlap_4(uint32_t d
, uint32_t a
, uint32_t b
,
65 uint32_t c
, uint32_t s
)
67 check_overlap_2(d
, a
, s
);
68 check_overlap_2(d
, b
, s
);
69 check_overlap_2(d
, c
, s
);
70 check_overlap_2(a
, b
, s
);
71 check_overlap_2(a
, c
, s
);
72 check_overlap_2(b
, c
, s
);
75 /* Create a descriptor from components. */
76 uint32_t simd_desc(uint32_t oprsz
, uint32_t maxsz
, int32_t data
)
80 assert(oprsz
% 8 == 0 && oprsz
<= (8 << SIMD_OPRSZ_BITS
));
81 assert(maxsz
% 8 == 0 && maxsz
<= (8 << SIMD_MAXSZ_BITS
));
82 assert(data
== sextract32(data
, 0, SIMD_DATA_BITS
));
84 oprsz
= (oprsz
/ 8) - 1;
85 maxsz
= (maxsz
/ 8) - 1;
86 desc
= deposit32(desc
, SIMD_OPRSZ_SHIFT
, SIMD_OPRSZ_BITS
, oprsz
);
87 desc
= deposit32(desc
, SIMD_MAXSZ_SHIFT
, SIMD_MAXSZ_BITS
, maxsz
);
88 desc
= deposit32(desc
, SIMD_DATA_SHIFT
, SIMD_DATA_BITS
, data
);
93 /* Generate a call to a gvec-style helper with two vector operands. */
94 void tcg_gen_gvec_2_ool(uint32_t dofs
, uint32_t aofs
,
95 uint32_t oprsz
, uint32_t maxsz
, int32_t data
,
96 gen_helper_gvec_2
*fn
)
99 TCGv_i32 desc
= tcg_const_i32(simd_desc(oprsz
, maxsz
, data
));
101 a0
= tcg_temp_new_ptr();
102 a1
= tcg_temp_new_ptr();
104 tcg_gen_addi_ptr(a0
, cpu_env
, dofs
);
105 tcg_gen_addi_ptr(a1
, cpu_env
, aofs
);
109 tcg_temp_free_ptr(a0
);
110 tcg_temp_free_ptr(a1
);
111 tcg_temp_free_i32(desc
);
114 /* Generate a call to a gvec-style helper with two vector operands
115 and one scalar operand. */
116 void tcg_gen_gvec_2i_ool(uint32_t dofs
, uint32_t aofs
, TCGv_i64 c
,
117 uint32_t oprsz
, uint32_t maxsz
, int32_t data
,
118 gen_helper_gvec_2i
*fn
)
121 TCGv_i32 desc
= tcg_const_i32(simd_desc(oprsz
, maxsz
, data
));
123 a0
= tcg_temp_new_ptr();
124 a1
= tcg_temp_new_ptr();
126 tcg_gen_addi_ptr(a0
, cpu_env
, dofs
);
127 tcg_gen_addi_ptr(a1
, cpu_env
, aofs
);
131 tcg_temp_free_ptr(a0
);
132 tcg_temp_free_ptr(a1
);
133 tcg_temp_free_i32(desc
);
136 /* Generate a call to a gvec-style helper with three vector operands. */
137 void tcg_gen_gvec_3_ool(uint32_t dofs
, uint32_t aofs
, uint32_t bofs
,
138 uint32_t oprsz
, uint32_t maxsz
, int32_t data
,
139 gen_helper_gvec_3
*fn
)
142 TCGv_i32 desc
= tcg_const_i32(simd_desc(oprsz
, maxsz
, data
));
144 a0
= tcg_temp_new_ptr();
145 a1
= tcg_temp_new_ptr();
146 a2
= tcg_temp_new_ptr();
148 tcg_gen_addi_ptr(a0
, cpu_env
, dofs
);
149 tcg_gen_addi_ptr(a1
, cpu_env
, aofs
);
150 tcg_gen_addi_ptr(a2
, cpu_env
, bofs
);
152 fn(a0
, a1
, a2
, desc
);
154 tcg_temp_free_ptr(a0
);
155 tcg_temp_free_ptr(a1
);
156 tcg_temp_free_ptr(a2
);
157 tcg_temp_free_i32(desc
);
160 /* Generate a call to a gvec-style helper with four vector operands. */
161 void tcg_gen_gvec_4_ool(uint32_t dofs
, uint32_t aofs
, uint32_t bofs
,
162 uint32_t cofs
, uint32_t oprsz
, uint32_t maxsz
,
163 int32_t data
, gen_helper_gvec_4
*fn
)
165 TCGv_ptr a0
, a1
, a2
, a3
;
166 TCGv_i32 desc
= tcg_const_i32(simd_desc(oprsz
, maxsz
, data
));
168 a0
= tcg_temp_new_ptr();
169 a1
= tcg_temp_new_ptr();
170 a2
= tcg_temp_new_ptr();
171 a3
= tcg_temp_new_ptr();
173 tcg_gen_addi_ptr(a0
, cpu_env
, dofs
);
174 tcg_gen_addi_ptr(a1
, cpu_env
, aofs
);
175 tcg_gen_addi_ptr(a2
, cpu_env
, bofs
);
176 tcg_gen_addi_ptr(a3
, cpu_env
, cofs
);
178 fn(a0
, a1
, a2
, a3
, desc
);
180 tcg_temp_free_ptr(a0
);
181 tcg_temp_free_ptr(a1
);
182 tcg_temp_free_ptr(a2
);
183 tcg_temp_free_ptr(a3
);
184 tcg_temp_free_i32(desc
);
187 /* Generate a call to a gvec-style helper with five vector operands. */
188 void tcg_gen_gvec_5_ool(uint32_t dofs
, uint32_t aofs
, uint32_t bofs
,
189 uint32_t cofs
, uint32_t xofs
, uint32_t oprsz
,
190 uint32_t maxsz
, int32_t data
, gen_helper_gvec_5
*fn
)
192 TCGv_ptr a0
, a1
, a2
, a3
, a4
;
193 TCGv_i32 desc
= tcg_const_i32(simd_desc(oprsz
, maxsz
, data
));
195 a0
= tcg_temp_new_ptr();
196 a1
= tcg_temp_new_ptr();
197 a2
= tcg_temp_new_ptr();
198 a3
= tcg_temp_new_ptr();
199 a4
= tcg_temp_new_ptr();
201 tcg_gen_addi_ptr(a0
, cpu_env
, dofs
);
202 tcg_gen_addi_ptr(a1
, cpu_env
, aofs
);
203 tcg_gen_addi_ptr(a2
, cpu_env
, bofs
);
204 tcg_gen_addi_ptr(a3
, cpu_env
, cofs
);
205 tcg_gen_addi_ptr(a4
, cpu_env
, xofs
);
207 fn(a0
, a1
, a2
, a3
, a4
, desc
);
209 tcg_temp_free_ptr(a0
);
210 tcg_temp_free_ptr(a1
);
211 tcg_temp_free_ptr(a2
);
212 tcg_temp_free_ptr(a3
);
213 tcg_temp_free_ptr(a4
);
214 tcg_temp_free_i32(desc
);
217 /* Generate a call to a gvec-style helper with three vector operands
218 and an extra pointer operand. */
219 void tcg_gen_gvec_2_ptr(uint32_t dofs
, uint32_t aofs
,
220 TCGv_ptr ptr
, uint32_t oprsz
, uint32_t maxsz
,
221 int32_t data
, gen_helper_gvec_2_ptr
*fn
)
224 TCGv_i32 desc
= tcg_const_i32(simd_desc(oprsz
, maxsz
, data
));
226 a0
= tcg_temp_new_ptr();
227 a1
= tcg_temp_new_ptr();
229 tcg_gen_addi_ptr(a0
, cpu_env
, dofs
);
230 tcg_gen_addi_ptr(a1
, cpu_env
, aofs
);
232 fn(a0
, a1
, ptr
, desc
);
234 tcg_temp_free_ptr(a0
);
235 tcg_temp_free_ptr(a1
);
236 tcg_temp_free_i32(desc
);
239 /* Generate a call to a gvec-style helper with three vector operands
240 and an extra pointer operand. */
241 void tcg_gen_gvec_3_ptr(uint32_t dofs
, uint32_t aofs
, uint32_t bofs
,
242 TCGv_ptr ptr
, uint32_t oprsz
, uint32_t maxsz
,
243 int32_t data
, gen_helper_gvec_3_ptr
*fn
)
246 TCGv_i32 desc
= tcg_const_i32(simd_desc(oprsz
, maxsz
, data
));
248 a0
= tcg_temp_new_ptr();
249 a1
= tcg_temp_new_ptr();
250 a2
= tcg_temp_new_ptr();
252 tcg_gen_addi_ptr(a0
, cpu_env
, dofs
);
253 tcg_gen_addi_ptr(a1
, cpu_env
, aofs
);
254 tcg_gen_addi_ptr(a2
, cpu_env
, bofs
);
256 fn(a0
, a1
, a2
, ptr
, desc
);
258 tcg_temp_free_ptr(a0
);
259 tcg_temp_free_ptr(a1
);
260 tcg_temp_free_ptr(a2
);
261 tcg_temp_free_i32(desc
);
264 /* Generate a call to a gvec-style helper with four vector operands
265 and an extra pointer operand. */
266 void tcg_gen_gvec_4_ptr(uint32_t dofs
, uint32_t aofs
, uint32_t bofs
,
267 uint32_t cofs
, TCGv_ptr ptr
, uint32_t oprsz
,
268 uint32_t maxsz
, int32_t data
,
269 gen_helper_gvec_4_ptr
*fn
)
271 TCGv_ptr a0
, a1
, a2
, a3
;
272 TCGv_i32 desc
= tcg_const_i32(simd_desc(oprsz
, maxsz
, data
));
274 a0
= tcg_temp_new_ptr();
275 a1
= tcg_temp_new_ptr();
276 a2
= tcg_temp_new_ptr();
277 a3
= tcg_temp_new_ptr();
279 tcg_gen_addi_ptr(a0
, cpu_env
, dofs
);
280 tcg_gen_addi_ptr(a1
, cpu_env
, aofs
);
281 tcg_gen_addi_ptr(a2
, cpu_env
, bofs
);
282 tcg_gen_addi_ptr(a3
, cpu_env
, cofs
);
284 fn(a0
, a1
, a2
, a3
, ptr
, desc
);
286 tcg_temp_free_ptr(a0
);
287 tcg_temp_free_ptr(a1
);
288 tcg_temp_free_ptr(a2
);
289 tcg_temp_free_ptr(a3
);
290 tcg_temp_free_i32(desc
);
293 /* Return true if we want to implement something of OPRSZ bytes
294 in units of LNSZ. This limits the expansion of inline code. */
295 static inline bool check_size_impl(uint32_t oprsz
, uint32_t lnsz
)
297 if (oprsz
% lnsz
== 0) {
298 uint32_t lnct
= oprsz
/ lnsz
;
299 return lnct
>= 1 && lnct
<= MAX_UNROLL
;
304 static void expand_clr(uint32_t dofs
, uint32_t maxsz
);
306 /* Duplicate C as per VECE. */
307 uint64_t (dup_const
)(unsigned vece
, uint64_t c
)
311 return 0x0101010101010101ull
* (uint8_t)c
;
313 return 0x0001000100010001ull
* (uint16_t)c
;
315 return 0x0000000100000001ull
* (uint32_t)c
;
319 g_assert_not_reached();
323 /* Duplicate IN into OUT as per VECE. */
324 static void gen_dup_i32(unsigned vece
, TCGv_i32 out
, TCGv_i32 in
)
328 tcg_gen_ext8u_i32(out
, in
);
329 tcg_gen_muli_i32(out
, out
, 0x01010101);
332 tcg_gen_deposit_i32(out
, in
, in
, 16, 16);
335 tcg_gen_mov_i32(out
, in
);
338 g_assert_not_reached();
342 static void gen_dup_i64(unsigned vece
, TCGv_i64 out
, TCGv_i64 in
)
346 tcg_gen_ext8u_i64(out
, in
);
347 tcg_gen_muli_i64(out
, out
, 0x0101010101010101ull
);
350 tcg_gen_ext16u_i64(out
, in
);
351 tcg_gen_muli_i64(out
, out
, 0x0001000100010001ull
);
354 tcg_gen_deposit_i64(out
, in
, in
, 32, 32);
357 tcg_gen_mov_i64(out
, in
);
360 g_assert_not_reached();
364 /* Select a supported vector type for implementing an operation on SIZE
365 * bytes. If OP is 0, assume that the real operation to be performed is
366 * required by all backends. Otherwise, make sure than OP can be performed
367 * on elements of size VECE in the selected type. Do not select V64 if
368 * PREFER_I64 is true. Return 0 if no vector type is selected.
370 static TCGType
choose_vector_type(const TCGOpcode
*list
, unsigned vece
,
371 uint32_t size
, bool prefer_i64
)
373 if (TCG_TARGET_HAS_v256
&& check_size_impl(size
, 32)) {
375 * Recall that ARM SVE allows vector sizes that are not a
376 * power of 2, but always a multiple of 16. The intent is
377 * that e.g. size == 80 would be expanded with 2x32 + 1x16.
378 * It is hard to imagine a case in which v256 is supported
379 * but v128 is not, but check anyway.
381 if (tcg_can_emit_vecop_list(list
, TCG_TYPE_V256
, vece
)
383 || tcg_can_emit_vecop_list(list
, TCG_TYPE_V128
, vece
))) {
384 return TCG_TYPE_V256
;
387 if (TCG_TARGET_HAS_v128
&& check_size_impl(size
, 16)
388 && tcg_can_emit_vecop_list(list
, TCG_TYPE_V128
, vece
)) {
389 return TCG_TYPE_V128
;
391 if (TCG_TARGET_HAS_v64
&& !prefer_i64
&& check_size_impl(size
, 8)
392 && tcg_can_emit_vecop_list(list
, TCG_TYPE_V64
, vece
)) {
398 static void do_dup_store(TCGType type
, uint32_t dofs
, uint32_t oprsz
,
399 uint32_t maxsz
, TCGv_vec t_vec
)
406 * Recall that ARM SVE allows vector sizes that are not a
407 * power of 2, but always a multiple of 16. The intent is
408 * that e.g. size == 80 would be expanded with 2x32 + 1x16.
410 for (; i
+ 32 <= oprsz
; i
+= 32) {
411 tcg_gen_stl_vec(t_vec
, cpu_env
, dofs
+ i
, TCG_TYPE_V256
);
415 for (; i
+ 16 <= oprsz
; i
+= 16) {
416 tcg_gen_stl_vec(t_vec
, cpu_env
, dofs
+ i
, TCG_TYPE_V128
);
420 for (; i
< oprsz
; i
+= 8) {
421 tcg_gen_stl_vec(t_vec
, cpu_env
, dofs
+ i
, TCG_TYPE_V64
);
425 g_assert_not_reached();
429 expand_clr(dofs
+ oprsz
, maxsz
- oprsz
);
433 /* Set OPRSZ bytes at DOFS to replications of IN_32, IN_64 or IN_C.
434 * Only one of IN_32 or IN_64 may be set;
435 * IN_C is used if IN_32 and IN_64 are unset.
437 static void do_dup(unsigned vece
, uint32_t dofs
, uint32_t oprsz
,
438 uint32_t maxsz
, TCGv_i32 in_32
, TCGv_i64 in_64
,
443 TCGv_i32 t_32
, t_desc
;
447 assert(vece
<= (in_32
? MO_32
: MO_64
));
448 assert(in_32
== NULL
|| in_64
== NULL
);
450 /* If we're storing 0, expand oprsz to maxsz. */
451 if (in_32
== NULL
&& in_64
== NULL
) {
452 in_c
= dup_const(vece
, in_c
);
458 /* Implement inline with a vector type, if possible.
459 * Prefer integer when 64-bit host and no variable dup.
461 type
= choose_vector_type(NULL
, vece
, oprsz
,
462 (TCG_TARGET_REG_BITS
== 64 && in_32
== NULL
463 && (in_64
== NULL
|| vece
== MO_64
)));
465 TCGv_vec t_vec
= tcg_temp_new_vec(type
);
468 tcg_gen_dup_i32_vec(vece
, t_vec
, in_32
);
470 tcg_gen_dup_i64_vec(vece
, t_vec
, in_64
);
472 tcg_gen_dupi_vec(vece
, t_vec
, in_c
);
474 do_dup_store(type
, dofs
, oprsz
, maxsz
, t_vec
);
475 tcg_temp_free_vec(t_vec
);
479 /* Otherwise, inline with an integer type, unless "large". */
480 if (check_size_impl(oprsz
, TCG_TARGET_REG_BITS
/ 8)) {
485 /* We are given a 32-bit variable input. For a 64-bit host,
486 use a 64-bit operation unless the 32-bit operation would
488 if (TCG_TARGET_REG_BITS
== 64
489 && (vece
!= MO_32
|| !check_size_impl(oprsz
, 4))) {
490 t_64
= tcg_temp_new_i64();
491 tcg_gen_extu_i32_i64(t_64
, in_32
);
492 gen_dup_i64(vece
, t_64
, t_64
);
494 t_32
= tcg_temp_new_i32();
495 gen_dup_i32(vece
, t_32
, in_32
);
498 /* We are given a 64-bit variable input. */
499 t_64
= tcg_temp_new_i64();
500 gen_dup_i64(vece
, t_64
, in_64
);
502 /* We are given a constant input. */
503 /* For 64-bit hosts, use 64-bit constants for "simple" constants
504 or when we'd need too many 32-bit stores, or when a 64-bit
505 constant is really required. */
507 || (TCG_TARGET_REG_BITS
== 64
508 && (in_c
== 0 || in_c
== -1
509 || !check_size_impl(oprsz
, 4)))) {
510 t_64
= tcg_const_i64(in_c
);
512 t_32
= tcg_const_i32(in_c
);
516 /* Implement inline if we picked an implementation size above. */
518 for (i
= 0; i
< oprsz
; i
+= 4) {
519 tcg_gen_st_i32(t_32
, cpu_env
, dofs
+ i
);
521 tcg_temp_free_i32(t_32
);
525 for (i
= 0; i
< oprsz
; i
+= 8) {
526 tcg_gen_st_i64(t_64
, cpu_env
, dofs
+ i
);
528 tcg_temp_free_i64(t_64
);
533 /* Otherwise implement out of line. */
534 t_ptr
= tcg_temp_new_ptr();
535 tcg_gen_addi_ptr(t_ptr
, cpu_env
, dofs
);
536 t_desc
= tcg_const_i32(simd_desc(oprsz
, maxsz
, 0));
540 gen_helper_gvec_dup64(t_ptr
, t_desc
, in_64
);
542 t_64
= tcg_const_i64(in_c
);
543 gen_helper_gvec_dup64(t_ptr
, t_desc
, t_64
);
544 tcg_temp_free_i64(t_64
);
547 typedef void dup_fn(TCGv_ptr
, TCGv_i32
, TCGv_i32
);
548 static dup_fn
* const fns
[3] = {
549 gen_helper_gvec_dup8
,
550 gen_helper_gvec_dup16
,
551 gen_helper_gvec_dup32
555 fns
[vece
](t_ptr
, t_desc
, in_32
);
557 t_32
= tcg_temp_new_i32();
559 tcg_gen_extrl_i64_i32(t_32
, in_64
);
560 } else if (vece
== MO_8
) {
561 tcg_gen_movi_i32(t_32
, in_c
& 0xff);
562 } else if (vece
== MO_16
) {
563 tcg_gen_movi_i32(t_32
, in_c
& 0xffff);
565 tcg_gen_movi_i32(t_32
, in_c
);
567 fns
[vece
](t_ptr
, t_desc
, t_32
);
568 tcg_temp_free_i32(t_32
);
572 tcg_temp_free_ptr(t_ptr
);
573 tcg_temp_free_i32(t_desc
);
578 expand_clr(dofs
+ oprsz
, maxsz
- oprsz
);
582 /* Likewise, but with zero. */
583 static void expand_clr(uint32_t dofs
, uint32_t maxsz
)
585 do_dup(MO_8
, dofs
, maxsz
, maxsz
, NULL
, NULL
, 0);
588 /* Expand OPSZ bytes worth of two-operand operations using i32 elements. */
589 static void expand_2_i32(uint32_t dofs
, uint32_t aofs
, uint32_t oprsz
,
590 void (*fni
)(TCGv_i32
, TCGv_i32
))
592 TCGv_i32 t0
= tcg_temp_new_i32();
595 for (i
= 0; i
< oprsz
; i
+= 4) {
596 tcg_gen_ld_i32(t0
, cpu_env
, aofs
+ i
);
598 tcg_gen_st_i32(t0
, cpu_env
, dofs
+ i
);
600 tcg_temp_free_i32(t0
);
603 static void expand_2i_i32(uint32_t dofs
, uint32_t aofs
, uint32_t oprsz
,
604 int32_t c
, bool load_dest
,
605 void (*fni
)(TCGv_i32
, TCGv_i32
, int32_t))
607 TCGv_i32 t0
= tcg_temp_new_i32();
608 TCGv_i32 t1
= tcg_temp_new_i32();
611 for (i
= 0; i
< oprsz
; i
+= 4) {
612 tcg_gen_ld_i32(t0
, cpu_env
, aofs
+ i
);
614 tcg_gen_ld_i32(t1
, cpu_env
, dofs
+ i
);
617 tcg_gen_st_i32(t1
, cpu_env
, dofs
+ i
);
619 tcg_temp_free_i32(t0
);
620 tcg_temp_free_i32(t1
);
623 static void expand_2s_i32(uint32_t dofs
, uint32_t aofs
, uint32_t oprsz
,
624 TCGv_i32 c
, bool scalar_first
,
625 void (*fni
)(TCGv_i32
, TCGv_i32
, TCGv_i32
))
627 TCGv_i32 t0
= tcg_temp_new_i32();
628 TCGv_i32 t1
= tcg_temp_new_i32();
631 for (i
= 0; i
< oprsz
; i
+= 4) {
632 tcg_gen_ld_i32(t0
, cpu_env
, aofs
+ i
);
638 tcg_gen_st_i32(t1
, cpu_env
, dofs
+ i
);
640 tcg_temp_free_i32(t0
);
641 tcg_temp_free_i32(t1
);
644 /* Expand OPSZ bytes worth of three-operand operations using i32 elements. */
645 static void expand_3_i32(uint32_t dofs
, uint32_t aofs
,
646 uint32_t bofs
, uint32_t oprsz
, bool load_dest
,
647 void (*fni
)(TCGv_i32
, TCGv_i32
, TCGv_i32
))
649 TCGv_i32 t0
= tcg_temp_new_i32();
650 TCGv_i32 t1
= tcg_temp_new_i32();
651 TCGv_i32 t2
= tcg_temp_new_i32();
654 for (i
= 0; i
< oprsz
; i
+= 4) {
655 tcg_gen_ld_i32(t0
, cpu_env
, aofs
+ i
);
656 tcg_gen_ld_i32(t1
, cpu_env
, bofs
+ i
);
658 tcg_gen_ld_i32(t2
, cpu_env
, dofs
+ i
);
661 tcg_gen_st_i32(t2
, cpu_env
, dofs
+ i
);
663 tcg_temp_free_i32(t2
);
664 tcg_temp_free_i32(t1
);
665 tcg_temp_free_i32(t0
);
668 static void expand_3i_i32(uint32_t dofs
, uint32_t aofs
, uint32_t bofs
,
669 uint32_t oprsz
, int32_t c
, bool load_dest
,
670 void (*fni
)(TCGv_i32
, TCGv_i32
, TCGv_i32
, int32_t))
672 TCGv_i32 t0
= tcg_temp_new_i32();
673 TCGv_i32 t1
= tcg_temp_new_i32();
674 TCGv_i32 t2
= tcg_temp_new_i32();
677 for (i
= 0; i
< oprsz
; i
+= 4) {
678 tcg_gen_ld_i32(t0
, cpu_env
, aofs
+ i
);
679 tcg_gen_ld_i32(t1
, cpu_env
, bofs
+ i
);
681 tcg_gen_ld_i32(t2
, cpu_env
, dofs
+ i
);
684 tcg_gen_st_i32(t2
, cpu_env
, dofs
+ i
);
686 tcg_temp_free_i32(t0
);
687 tcg_temp_free_i32(t1
);
688 tcg_temp_free_i32(t2
);
691 /* Expand OPSZ bytes worth of three-operand operations using i32 elements. */
692 static void expand_4_i32(uint32_t dofs
, uint32_t aofs
, uint32_t bofs
,
693 uint32_t cofs
, uint32_t oprsz
, bool write_aofs
,
694 void (*fni
)(TCGv_i32
, TCGv_i32
, TCGv_i32
, TCGv_i32
))
696 TCGv_i32 t0
= tcg_temp_new_i32();
697 TCGv_i32 t1
= tcg_temp_new_i32();
698 TCGv_i32 t2
= tcg_temp_new_i32();
699 TCGv_i32 t3
= tcg_temp_new_i32();
702 for (i
= 0; i
< oprsz
; i
+= 4) {
703 tcg_gen_ld_i32(t1
, cpu_env
, aofs
+ i
);
704 tcg_gen_ld_i32(t2
, cpu_env
, bofs
+ i
);
705 tcg_gen_ld_i32(t3
, cpu_env
, cofs
+ i
);
707 tcg_gen_st_i32(t0
, cpu_env
, dofs
+ i
);
709 tcg_gen_st_i32(t1
, cpu_env
, aofs
+ i
);
712 tcg_temp_free_i32(t3
);
713 tcg_temp_free_i32(t2
);
714 tcg_temp_free_i32(t1
);
715 tcg_temp_free_i32(t0
);
718 /* Expand OPSZ bytes worth of two-operand operations using i64 elements. */
719 static void expand_2_i64(uint32_t dofs
, uint32_t aofs
, uint32_t oprsz
,
720 void (*fni
)(TCGv_i64
, TCGv_i64
))
722 TCGv_i64 t0
= tcg_temp_new_i64();
725 for (i
= 0; i
< oprsz
; i
+= 8) {
726 tcg_gen_ld_i64(t0
, cpu_env
, aofs
+ i
);
728 tcg_gen_st_i64(t0
, cpu_env
, dofs
+ i
);
730 tcg_temp_free_i64(t0
);
733 static void expand_2i_i64(uint32_t dofs
, uint32_t aofs
, uint32_t oprsz
,
734 int64_t c
, bool load_dest
,
735 void (*fni
)(TCGv_i64
, TCGv_i64
, int64_t))
737 TCGv_i64 t0
= tcg_temp_new_i64();
738 TCGv_i64 t1
= tcg_temp_new_i64();
741 for (i
= 0; i
< oprsz
; i
+= 8) {
742 tcg_gen_ld_i64(t0
, cpu_env
, aofs
+ i
);
744 tcg_gen_ld_i64(t1
, cpu_env
, dofs
+ i
);
747 tcg_gen_st_i64(t1
, cpu_env
, dofs
+ i
);
749 tcg_temp_free_i64(t0
);
750 tcg_temp_free_i64(t1
);
753 static void expand_2s_i64(uint32_t dofs
, uint32_t aofs
, uint32_t oprsz
,
754 TCGv_i64 c
, bool scalar_first
,
755 void (*fni
)(TCGv_i64
, TCGv_i64
, TCGv_i64
))
757 TCGv_i64 t0
= tcg_temp_new_i64();
758 TCGv_i64 t1
= tcg_temp_new_i64();
761 for (i
= 0; i
< oprsz
; i
+= 8) {
762 tcg_gen_ld_i64(t0
, cpu_env
, aofs
+ i
);
768 tcg_gen_st_i64(t1
, cpu_env
, dofs
+ i
);
770 tcg_temp_free_i64(t0
);
771 tcg_temp_free_i64(t1
);
774 /* Expand OPSZ bytes worth of three-operand operations using i64 elements. */
775 static void expand_3_i64(uint32_t dofs
, uint32_t aofs
,
776 uint32_t bofs
, uint32_t oprsz
, bool load_dest
,
777 void (*fni
)(TCGv_i64
, TCGv_i64
, TCGv_i64
))
779 TCGv_i64 t0
= tcg_temp_new_i64();
780 TCGv_i64 t1
= tcg_temp_new_i64();
781 TCGv_i64 t2
= tcg_temp_new_i64();
784 for (i
= 0; i
< oprsz
; i
+= 8) {
785 tcg_gen_ld_i64(t0
, cpu_env
, aofs
+ i
);
786 tcg_gen_ld_i64(t1
, cpu_env
, bofs
+ i
);
788 tcg_gen_ld_i64(t2
, cpu_env
, dofs
+ i
);
791 tcg_gen_st_i64(t2
, cpu_env
, dofs
+ i
);
793 tcg_temp_free_i64(t2
);
794 tcg_temp_free_i64(t1
);
795 tcg_temp_free_i64(t0
);
798 static void expand_3i_i64(uint32_t dofs
, uint32_t aofs
, uint32_t bofs
,
799 uint32_t oprsz
, int64_t c
, bool load_dest
,
800 void (*fni
)(TCGv_i64
, TCGv_i64
, TCGv_i64
, int64_t))
802 TCGv_i64 t0
= tcg_temp_new_i64();
803 TCGv_i64 t1
= tcg_temp_new_i64();
804 TCGv_i64 t2
= tcg_temp_new_i64();
807 for (i
= 0; i
< oprsz
; i
+= 8) {
808 tcg_gen_ld_i64(t0
, cpu_env
, aofs
+ i
);
809 tcg_gen_ld_i64(t1
, cpu_env
, bofs
+ i
);
811 tcg_gen_ld_i64(t2
, cpu_env
, dofs
+ i
);
814 tcg_gen_st_i64(t2
, cpu_env
, dofs
+ i
);
816 tcg_temp_free_i64(t0
);
817 tcg_temp_free_i64(t1
);
818 tcg_temp_free_i64(t2
);
821 /* Expand OPSZ bytes worth of three-operand operations using i64 elements. */
822 static void expand_4_i64(uint32_t dofs
, uint32_t aofs
, uint32_t bofs
,
823 uint32_t cofs
, uint32_t oprsz
, bool write_aofs
,
824 void (*fni
)(TCGv_i64
, TCGv_i64
, TCGv_i64
, TCGv_i64
))
826 TCGv_i64 t0
= tcg_temp_new_i64();
827 TCGv_i64 t1
= tcg_temp_new_i64();
828 TCGv_i64 t2
= tcg_temp_new_i64();
829 TCGv_i64 t3
= tcg_temp_new_i64();
832 for (i
= 0; i
< oprsz
; i
+= 8) {
833 tcg_gen_ld_i64(t1
, cpu_env
, aofs
+ i
);
834 tcg_gen_ld_i64(t2
, cpu_env
, bofs
+ i
);
835 tcg_gen_ld_i64(t3
, cpu_env
, cofs
+ i
);
837 tcg_gen_st_i64(t0
, cpu_env
, dofs
+ i
);
839 tcg_gen_st_i64(t1
, cpu_env
, aofs
+ i
);
842 tcg_temp_free_i64(t3
);
843 tcg_temp_free_i64(t2
);
844 tcg_temp_free_i64(t1
);
845 tcg_temp_free_i64(t0
);
848 /* Expand OPSZ bytes worth of two-operand operations using host vectors. */
849 static void expand_2_vec(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
850 uint32_t oprsz
, uint32_t tysz
, TCGType type
,
851 void (*fni
)(unsigned, TCGv_vec
, TCGv_vec
))
853 TCGv_vec t0
= tcg_temp_new_vec(type
);
856 for (i
= 0; i
< oprsz
; i
+= tysz
) {
857 tcg_gen_ld_vec(t0
, cpu_env
, aofs
+ i
);
859 tcg_gen_st_vec(t0
, cpu_env
, dofs
+ i
);
861 tcg_temp_free_vec(t0
);
864 /* Expand OPSZ bytes worth of two-vector operands and an immediate operand
865 using host vectors. */
866 static void expand_2i_vec(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
867 uint32_t oprsz
, uint32_t tysz
, TCGType type
,
868 int64_t c
, bool load_dest
,
869 void (*fni
)(unsigned, TCGv_vec
, TCGv_vec
, int64_t))
871 TCGv_vec t0
= tcg_temp_new_vec(type
);
872 TCGv_vec t1
= tcg_temp_new_vec(type
);
875 for (i
= 0; i
< oprsz
; i
+= tysz
) {
876 tcg_gen_ld_vec(t0
, cpu_env
, aofs
+ i
);
878 tcg_gen_ld_vec(t1
, cpu_env
, dofs
+ i
);
880 fni(vece
, t1
, t0
, c
);
881 tcg_gen_st_vec(t1
, cpu_env
, dofs
+ i
);
883 tcg_temp_free_vec(t0
);
884 tcg_temp_free_vec(t1
);
887 static void expand_2s_vec(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
888 uint32_t oprsz
, uint32_t tysz
, TCGType type
,
889 TCGv_vec c
, bool scalar_first
,
890 void (*fni
)(unsigned, TCGv_vec
, TCGv_vec
, TCGv_vec
))
892 TCGv_vec t0
= tcg_temp_new_vec(type
);
893 TCGv_vec t1
= tcg_temp_new_vec(type
);
896 for (i
= 0; i
< oprsz
; i
+= tysz
) {
897 tcg_gen_ld_vec(t0
, cpu_env
, aofs
+ i
);
899 fni(vece
, t1
, c
, t0
);
901 fni(vece
, t1
, t0
, c
);
903 tcg_gen_st_vec(t1
, cpu_env
, dofs
+ i
);
905 tcg_temp_free_vec(t0
);
906 tcg_temp_free_vec(t1
);
909 /* Expand OPSZ bytes worth of three-operand operations using host vectors. */
910 static void expand_3_vec(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
911 uint32_t bofs
, uint32_t oprsz
,
912 uint32_t tysz
, TCGType type
, bool load_dest
,
913 void (*fni
)(unsigned, TCGv_vec
, TCGv_vec
, TCGv_vec
))
915 TCGv_vec t0
= tcg_temp_new_vec(type
);
916 TCGv_vec t1
= tcg_temp_new_vec(type
);
917 TCGv_vec t2
= tcg_temp_new_vec(type
);
920 for (i
= 0; i
< oprsz
; i
+= tysz
) {
921 tcg_gen_ld_vec(t0
, cpu_env
, aofs
+ i
);
922 tcg_gen_ld_vec(t1
, cpu_env
, bofs
+ i
);
924 tcg_gen_ld_vec(t2
, cpu_env
, dofs
+ i
);
926 fni(vece
, t2
, t0
, t1
);
927 tcg_gen_st_vec(t2
, cpu_env
, dofs
+ i
);
929 tcg_temp_free_vec(t2
);
930 tcg_temp_free_vec(t1
);
931 tcg_temp_free_vec(t0
);
935 * Expand OPSZ bytes worth of three-vector operands and an immediate operand
936 * using host vectors.
938 static void expand_3i_vec(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
939 uint32_t bofs
, uint32_t oprsz
, uint32_t tysz
,
940 TCGType type
, int64_t c
, bool load_dest
,
941 void (*fni
)(unsigned, TCGv_vec
, TCGv_vec
, TCGv_vec
,
944 TCGv_vec t0
= tcg_temp_new_vec(type
);
945 TCGv_vec t1
= tcg_temp_new_vec(type
);
946 TCGv_vec t2
= tcg_temp_new_vec(type
);
949 for (i
= 0; i
< oprsz
; i
+= tysz
) {
950 tcg_gen_ld_vec(t0
, cpu_env
, aofs
+ i
);
951 tcg_gen_ld_vec(t1
, cpu_env
, bofs
+ i
);
953 tcg_gen_ld_vec(t2
, cpu_env
, dofs
+ i
);
955 fni(vece
, t2
, t0
, t1
, c
);
956 tcg_gen_st_vec(t2
, cpu_env
, dofs
+ i
);
958 tcg_temp_free_vec(t0
);
959 tcg_temp_free_vec(t1
);
960 tcg_temp_free_vec(t2
);
963 /* Expand OPSZ bytes worth of four-operand operations using host vectors. */
964 static void expand_4_vec(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
965 uint32_t bofs
, uint32_t cofs
, uint32_t oprsz
,
966 uint32_t tysz
, TCGType type
, bool write_aofs
,
967 void (*fni
)(unsigned, TCGv_vec
, TCGv_vec
,
970 TCGv_vec t0
= tcg_temp_new_vec(type
);
971 TCGv_vec t1
= tcg_temp_new_vec(type
);
972 TCGv_vec t2
= tcg_temp_new_vec(type
);
973 TCGv_vec t3
= tcg_temp_new_vec(type
);
976 for (i
= 0; i
< oprsz
; i
+= tysz
) {
977 tcg_gen_ld_vec(t1
, cpu_env
, aofs
+ i
);
978 tcg_gen_ld_vec(t2
, cpu_env
, bofs
+ i
);
979 tcg_gen_ld_vec(t3
, cpu_env
, cofs
+ i
);
980 fni(vece
, t0
, t1
, t2
, t3
);
981 tcg_gen_st_vec(t0
, cpu_env
, dofs
+ i
);
983 tcg_gen_st_vec(t1
, cpu_env
, aofs
+ i
);
986 tcg_temp_free_vec(t3
);
987 tcg_temp_free_vec(t2
);
988 tcg_temp_free_vec(t1
);
989 tcg_temp_free_vec(t0
);
992 /* Expand a vector two-operand operation. */
993 void tcg_gen_gvec_2(uint32_t dofs
, uint32_t aofs
,
994 uint32_t oprsz
, uint32_t maxsz
, const GVecGen2
*g
)
996 const TCGOpcode
*this_list
= g
->opt_opc
? : vecop_list_empty
;
997 const TCGOpcode
*hold_list
= tcg_swap_vecop_list(this_list
);
1001 check_size_align(oprsz
, maxsz
, dofs
| aofs
);
1002 check_overlap_2(dofs
, aofs
, maxsz
);
1006 type
= choose_vector_type(g
->opt_opc
, g
->vece
, oprsz
, g
->prefer_i64
);
1010 /* Recall that ARM SVE allows vector sizes that are not a
1011 * power of 2, but always a multiple of 16. The intent is
1012 * that e.g. size == 80 would be expanded with 2x32 + 1x16.
1014 some
= QEMU_ALIGN_DOWN(oprsz
, 32);
1015 expand_2_vec(g
->vece
, dofs
, aofs
, some
, 32, TCG_TYPE_V256
, g
->fniv
);
1016 if (some
== oprsz
) {
1025 expand_2_vec(g
->vece
, dofs
, aofs
, oprsz
, 16, TCG_TYPE_V128
, g
->fniv
);
1028 expand_2_vec(g
->vece
, dofs
, aofs
, oprsz
, 8, TCG_TYPE_V64
, g
->fniv
);
1032 if (g
->fni8
&& check_size_impl(oprsz
, 8)) {
1033 expand_2_i64(dofs
, aofs
, oprsz
, g
->fni8
);
1034 } else if (g
->fni4
&& check_size_impl(oprsz
, 4)) {
1035 expand_2_i32(dofs
, aofs
, oprsz
, g
->fni4
);
1037 assert(g
->fno
!= NULL
);
1038 tcg_gen_gvec_2_ool(dofs
, aofs
, oprsz
, maxsz
, g
->data
, g
->fno
);
1044 g_assert_not_reached();
1046 tcg_swap_vecop_list(hold_list
);
1048 if (oprsz
< maxsz
) {
1049 expand_clr(dofs
+ oprsz
, maxsz
- oprsz
);
1053 /* Expand a vector operation with two vectors and an immediate. */
1054 void tcg_gen_gvec_2i(uint32_t dofs
, uint32_t aofs
, uint32_t oprsz
,
1055 uint32_t maxsz
, int64_t c
, const GVecGen2i
*g
)
1057 const TCGOpcode
*this_list
= g
->opt_opc
? : vecop_list_empty
;
1058 const TCGOpcode
*hold_list
= tcg_swap_vecop_list(this_list
);
1062 check_size_align(oprsz
, maxsz
, dofs
| aofs
);
1063 check_overlap_2(dofs
, aofs
, maxsz
);
1067 type
= choose_vector_type(g
->opt_opc
, g
->vece
, oprsz
, g
->prefer_i64
);
1071 /* Recall that ARM SVE allows vector sizes that are not a
1072 * power of 2, but always a multiple of 16. The intent is
1073 * that e.g. size == 80 would be expanded with 2x32 + 1x16.
1075 some
= QEMU_ALIGN_DOWN(oprsz
, 32);
1076 expand_2i_vec(g
->vece
, dofs
, aofs
, some
, 32, TCG_TYPE_V256
,
1077 c
, g
->load_dest
, g
->fniv
);
1078 if (some
== oprsz
) {
1087 expand_2i_vec(g
->vece
, dofs
, aofs
, oprsz
, 16, TCG_TYPE_V128
,
1088 c
, g
->load_dest
, g
->fniv
);
1091 expand_2i_vec(g
->vece
, dofs
, aofs
, oprsz
, 8, TCG_TYPE_V64
,
1092 c
, g
->load_dest
, g
->fniv
);
1096 if (g
->fni8
&& check_size_impl(oprsz
, 8)) {
1097 expand_2i_i64(dofs
, aofs
, oprsz
, c
, g
->load_dest
, g
->fni8
);
1098 } else if (g
->fni4
&& check_size_impl(oprsz
, 4)) {
1099 expand_2i_i32(dofs
, aofs
, oprsz
, c
, g
->load_dest
, g
->fni4
);
1102 tcg_gen_gvec_2_ool(dofs
, aofs
, oprsz
, maxsz
, c
, g
->fno
);
1104 TCGv_i64 tcg_c
= tcg_const_i64(c
);
1105 tcg_gen_gvec_2i_ool(dofs
, aofs
, tcg_c
, oprsz
,
1107 tcg_temp_free_i64(tcg_c
);
1114 g_assert_not_reached();
1116 tcg_swap_vecop_list(hold_list
);
1118 if (oprsz
< maxsz
) {
1119 expand_clr(dofs
+ oprsz
, maxsz
- oprsz
);
1123 /* Expand a vector operation with two vectors and a scalar. */
1124 void tcg_gen_gvec_2s(uint32_t dofs
, uint32_t aofs
, uint32_t oprsz
,
1125 uint32_t maxsz
, TCGv_i64 c
, const GVecGen2s
*g
)
1129 check_size_align(oprsz
, maxsz
, dofs
| aofs
);
1130 check_overlap_2(dofs
, aofs
, maxsz
);
1134 type
= choose_vector_type(g
->opt_opc
, g
->vece
, oprsz
, g
->prefer_i64
);
1137 const TCGOpcode
*this_list
= g
->opt_opc
? : vecop_list_empty
;
1138 const TCGOpcode
*hold_list
= tcg_swap_vecop_list(this_list
);
1139 TCGv_vec t_vec
= tcg_temp_new_vec(type
);
1142 tcg_gen_dup_i64_vec(g
->vece
, t_vec
, c
);
1146 /* Recall that ARM SVE allows vector sizes that are not a
1147 * power of 2, but always a multiple of 16. The intent is
1148 * that e.g. size == 80 would be expanded with 2x32 + 1x16.
1150 some
= QEMU_ALIGN_DOWN(oprsz
, 32);
1151 expand_2s_vec(g
->vece
, dofs
, aofs
, some
, 32, TCG_TYPE_V256
,
1152 t_vec
, g
->scalar_first
, g
->fniv
);
1153 if (some
== oprsz
) {
1163 expand_2s_vec(g
->vece
, dofs
, aofs
, oprsz
, 16, TCG_TYPE_V128
,
1164 t_vec
, g
->scalar_first
, g
->fniv
);
1168 expand_2s_vec(g
->vece
, dofs
, aofs
, oprsz
, 8, TCG_TYPE_V64
,
1169 t_vec
, g
->scalar_first
, g
->fniv
);
1173 g_assert_not_reached();
1175 tcg_temp_free_vec(t_vec
);
1176 tcg_swap_vecop_list(hold_list
);
1177 } else if (g
->fni8
&& check_size_impl(oprsz
, 8)) {
1178 TCGv_i64 t64
= tcg_temp_new_i64();
1180 gen_dup_i64(g
->vece
, t64
, c
);
1181 expand_2s_i64(dofs
, aofs
, oprsz
, t64
, g
->scalar_first
, g
->fni8
);
1182 tcg_temp_free_i64(t64
);
1183 } else if (g
->fni4
&& check_size_impl(oprsz
, 4)) {
1184 TCGv_i32 t32
= tcg_temp_new_i32();
1186 tcg_gen_extrl_i64_i32(t32
, c
);
1187 gen_dup_i32(g
->vece
, t32
, t32
);
1188 expand_2s_i32(dofs
, aofs
, oprsz
, t32
, g
->scalar_first
, g
->fni4
);
1189 tcg_temp_free_i32(t32
);
1191 tcg_gen_gvec_2i_ool(dofs
, aofs
, c
, oprsz
, maxsz
, 0, g
->fno
);
1195 if (oprsz
< maxsz
) {
1196 expand_clr(dofs
+ oprsz
, maxsz
- oprsz
);
1200 /* Expand a vector three-operand operation. */
1201 void tcg_gen_gvec_3(uint32_t dofs
, uint32_t aofs
, uint32_t bofs
,
1202 uint32_t oprsz
, uint32_t maxsz
, const GVecGen3
*g
)
1204 const TCGOpcode
*this_list
= g
->opt_opc
? : vecop_list_empty
;
1205 const TCGOpcode
*hold_list
= tcg_swap_vecop_list(this_list
);
1209 check_size_align(oprsz
, maxsz
, dofs
| aofs
| bofs
);
1210 check_overlap_3(dofs
, aofs
, bofs
, maxsz
);
1214 type
= choose_vector_type(g
->opt_opc
, g
->vece
, oprsz
, g
->prefer_i64
);
1218 /* Recall that ARM SVE allows vector sizes that are not a
1219 * power of 2, but always a multiple of 16. The intent is
1220 * that e.g. size == 80 would be expanded with 2x32 + 1x16.
1222 some
= QEMU_ALIGN_DOWN(oprsz
, 32);
1223 expand_3_vec(g
->vece
, dofs
, aofs
, bofs
, some
, 32, TCG_TYPE_V256
,
1224 g
->load_dest
, g
->fniv
);
1225 if (some
== oprsz
) {
1235 expand_3_vec(g
->vece
, dofs
, aofs
, bofs
, oprsz
, 16, TCG_TYPE_V128
,
1236 g
->load_dest
, g
->fniv
);
1239 expand_3_vec(g
->vece
, dofs
, aofs
, bofs
, oprsz
, 8, TCG_TYPE_V64
,
1240 g
->load_dest
, g
->fniv
);
1244 if (g
->fni8
&& check_size_impl(oprsz
, 8)) {
1245 expand_3_i64(dofs
, aofs
, bofs
, oprsz
, g
->load_dest
, g
->fni8
);
1246 } else if (g
->fni4
&& check_size_impl(oprsz
, 4)) {
1247 expand_3_i32(dofs
, aofs
, bofs
, oprsz
, g
->load_dest
, g
->fni4
);
1249 assert(g
->fno
!= NULL
);
1250 tcg_gen_gvec_3_ool(dofs
, aofs
, bofs
, oprsz
,
1251 maxsz
, g
->data
, g
->fno
);
1257 g_assert_not_reached();
1259 tcg_swap_vecop_list(hold_list
);
1261 if (oprsz
< maxsz
) {
1262 expand_clr(dofs
+ oprsz
, maxsz
- oprsz
);
1266 /* Expand a vector operation with three vectors and an immediate. */
1267 void tcg_gen_gvec_3i(uint32_t dofs
, uint32_t aofs
, uint32_t bofs
,
1268 uint32_t oprsz
, uint32_t maxsz
, int64_t c
,
1271 const TCGOpcode
*this_list
= g
->opt_opc
? : vecop_list_empty
;
1272 const TCGOpcode
*hold_list
= tcg_swap_vecop_list(this_list
);
1276 check_size_align(oprsz
, maxsz
, dofs
| aofs
| bofs
);
1277 check_overlap_3(dofs
, aofs
, bofs
, maxsz
);
1281 type
= choose_vector_type(g
->opt_opc
, g
->vece
, oprsz
, g
->prefer_i64
);
1286 * Recall that ARM SVE allows vector sizes that are not a
1287 * power of 2, but always a multiple of 16. The intent is
1288 * that e.g. size == 80 would be expanded with 2x32 + 1x16.
1290 some
= QEMU_ALIGN_DOWN(oprsz
, 32);
1291 expand_3i_vec(g
->vece
, dofs
, aofs
, bofs
, some
, 32, TCG_TYPE_V256
,
1292 c
, g
->load_dest
, g
->fniv
);
1293 if (some
== oprsz
) {
1303 expand_3i_vec(g
->vece
, dofs
, aofs
, bofs
, oprsz
, 16, TCG_TYPE_V128
,
1304 c
, g
->load_dest
, g
->fniv
);
1307 expand_3i_vec(g
->vece
, dofs
, aofs
, bofs
, oprsz
, 8, TCG_TYPE_V64
,
1308 c
, g
->load_dest
, g
->fniv
);
1312 if (g
->fni8
&& check_size_impl(oprsz
, 8)) {
1313 expand_3i_i64(dofs
, aofs
, bofs
, oprsz
, c
, g
->load_dest
, g
->fni8
);
1314 } else if (g
->fni4
&& check_size_impl(oprsz
, 4)) {
1315 expand_3i_i32(dofs
, aofs
, bofs
, oprsz
, c
, g
->load_dest
, g
->fni4
);
1317 assert(g
->fno
!= NULL
);
1318 tcg_gen_gvec_3_ool(dofs
, aofs
, bofs
, oprsz
, maxsz
, c
, g
->fno
);
1324 g_assert_not_reached();
1326 tcg_swap_vecop_list(hold_list
);
1328 if (oprsz
< maxsz
) {
1329 expand_clr(dofs
+ oprsz
, maxsz
- oprsz
);
1333 /* Expand a vector four-operand operation. */
1334 void tcg_gen_gvec_4(uint32_t dofs
, uint32_t aofs
, uint32_t bofs
, uint32_t cofs
,
1335 uint32_t oprsz
, uint32_t maxsz
, const GVecGen4
*g
)
1337 const TCGOpcode
*this_list
= g
->opt_opc
? : vecop_list_empty
;
1338 const TCGOpcode
*hold_list
= tcg_swap_vecop_list(this_list
);
1342 check_size_align(oprsz
, maxsz
, dofs
| aofs
| bofs
| cofs
);
1343 check_overlap_4(dofs
, aofs
, bofs
, cofs
, maxsz
);
1347 type
= choose_vector_type(g
->opt_opc
, g
->vece
, oprsz
, g
->prefer_i64
);
1351 /* Recall that ARM SVE allows vector sizes that are not a
1352 * power of 2, but always a multiple of 16. The intent is
1353 * that e.g. size == 80 would be expanded with 2x32 + 1x16.
1355 some
= QEMU_ALIGN_DOWN(oprsz
, 32);
1356 expand_4_vec(g
->vece
, dofs
, aofs
, bofs
, cofs
, some
,
1357 32, TCG_TYPE_V256
, g
->write_aofs
, g
->fniv
);
1358 if (some
== oprsz
) {
1369 expand_4_vec(g
->vece
, dofs
, aofs
, bofs
, cofs
, oprsz
,
1370 16, TCG_TYPE_V128
, g
->write_aofs
, g
->fniv
);
1373 expand_4_vec(g
->vece
, dofs
, aofs
, bofs
, cofs
, oprsz
,
1374 8, TCG_TYPE_V64
, g
->write_aofs
, g
->fniv
);
1378 if (g
->fni8
&& check_size_impl(oprsz
, 8)) {
1379 expand_4_i64(dofs
, aofs
, bofs
, cofs
, oprsz
,
1380 g
->write_aofs
, g
->fni8
);
1381 } else if (g
->fni4
&& check_size_impl(oprsz
, 4)) {
1382 expand_4_i32(dofs
, aofs
, bofs
, cofs
, oprsz
,
1383 g
->write_aofs
, g
->fni4
);
1385 assert(g
->fno
!= NULL
);
1386 tcg_gen_gvec_4_ool(dofs
, aofs
, bofs
, cofs
,
1387 oprsz
, maxsz
, g
->data
, g
->fno
);
1393 g_assert_not_reached();
1395 tcg_swap_vecop_list(hold_list
);
1397 if (oprsz
< maxsz
) {
1398 expand_clr(dofs
+ oprsz
, maxsz
- oprsz
);
1403 * Expand specific vector operations.
1406 static void vec_mov2(unsigned vece
, TCGv_vec a
, TCGv_vec b
)
1408 tcg_gen_mov_vec(a
, b
);
1411 void tcg_gen_gvec_mov(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
1412 uint32_t oprsz
, uint32_t maxsz
)
1414 static const GVecGen2 g
= {
1415 .fni8
= tcg_gen_mov_i64
,
1417 .fno
= gen_helper_gvec_mov
,
1418 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
1421 tcg_gen_gvec_2(dofs
, aofs
, oprsz
, maxsz
, &g
);
1423 check_size_align(oprsz
, maxsz
, dofs
);
1424 if (oprsz
< maxsz
) {
1425 expand_clr(dofs
+ oprsz
, maxsz
- oprsz
);
1430 void tcg_gen_gvec_dup_i32(unsigned vece
, uint32_t dofs
, uint32_t oprsz
,
1431 uint32_t maxsz
, TCGv_i32 in
)
1433 check_size_align(oprsz
, maxsz
, dofs
);
1434 tcg_debug_assert(vece
<= MO_32
);
1435 do_dup(vece
, dofs
, oprsz
, maxsz
, in
, NULL
, 0);
1438 void tcg_gen_gvec_dup_i64(unsigned vece
, uint32_t dofs
, uint32_t oprsz
,
1439 uint32_t maxsz
, TCGv_i64 in
)
1441 check_size_align(oprsz
, maxsz
, dofs
);
1442 tcg_debug_assert(vece
<= MO_64
);
1443 do_dup(vece
, dofs
, oprsz
, maxsz
, NULL
, in
, 0);
1446 void tcg_gen_gvec_dup_mem(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
1447 uint32_t oprsz
, uint32_t maxsz
)
1449 check_size_align(oprsz
, maxsz
, dofs
);
1450 if (vece
<= MO_64
) {
1451 TCGType type
= choose_vector_type(NULL
, vece
, oprsz
, 0);
1453 TCGv_vec t_vec
= tcg_temp_new_vec(type
);
1454 tcg_gen_dup_mem_vec(vece
, t_vec
, cpu_env
, aofs
);
1455 do_dup_store(type
, dofs
, oprsz
, maxsz
, t_vec
);
1456 tcg_temp_free_vec(t_vec
);
1457 } else if (vece
<= MO_32
) {
1458 TCGv_i32 in
= tcg_temp_new_i32();
1461 tcg_gen_ld8u_i32(in
, cpu_env
, aofs
);
1464 tcg_gen_ld16u_i32(in
, cpu_env
, aofs
);
1467 tcg_gen_ld_i32(in
, cpu_env
, aofs
);
1470 do_dup(vece
, dofs
, oprsz
, maxsz
, in
, NULL
, 0);
1471 tcg_temp_free_i32(in
);
1473 TCGv_i64 in
= tcg_temp_new_i64();
1474 tcg_gen_ld_i64(in
, cpu_env
, aofs
);
1475 do_dup(vece
, dofs
, oprsz
, maxsz
, NULL
, in
, 0);
1476 tcg_temp_free_i64(in
);
1479 /* 128-bit duplicate. */
1480 /* ??? Dup to 256-bit vector. */
1483 tcg_debug_assert(vece
== 4);
1484 tcg_debug_assert(oprsz
>= 16);
1485 if (TCG_TARGET_HAS_v128
) {
1486 TCGv_vec in
= tcg_temp_new_vec(TCG_TYPE_V128
);
1488 tcg_gen_ld_vec(in
, cpu_env
, aofs
);
1489 for (i
= 0; i
< oprsz
; i
+= 16) {
1490 tcg_gen_st_vec(in
, cpu_env
, dofs
+ i
);
1492 tcg_temp_free_vec(in
);
1494 TCGv_i64 in0
= tcg_temp_new_i64();
1495 TCGv_i64 in1
= tcg_temp_new_i64();
1497 tcg_gen_ld_i64(in0
, cpu_env
, aofs
);
1498 tcg_gen_ld_i64(in1
, cpu_env
, aofs
+ 8);
1499 for (i
= 0; i
< oprsz
; i
+= 16) {
1500 tcg_gen_st_i64(in0
, cpu_env
, dofs
+ i
);
1501 tcg_gen_st_i64(in1
, cpu_env
, dofs
+ i
+ 8);
1503 tcg_temp_free_i64(in0
);
1504 tcg_temp_free_i64(in1
);
1506 if (oprsz
< maxsz
) {
1507 expand_clr(dofs
+ oprsz
, maxsz
- oprsz
);
1512 void tcg_gen_gvec_dup64i(uint32_t dofs
, uint32_t oprsz
,
1513 uint32_t maxsz
, uint64_t x
)
1515 check_size_align(oprsz
, maxsz
, dofs
);
1516 do_dup(MO_64
, dofs
, oprsz
, maxsz
, NULL
, NULL
, x
);
1519 void tcg_gen_gvec_dup32i(uint32_t dofs
, uint32_t oprsz
,
1520 uint32_t maxsz
, uint32_t x
)
1522 check_size_align(oprsz
, maxsz
, dofs
);
1523 do_dup(MO_32
, dofs
, oprsz
, maxsz
, NULL
, NULL
, x
);
1526 void tcg_gen_gvec_dup16i(uint32_t dofs
, uint32_t oprsz
,
1527 uint32_t maxsz
, uint16_t x
)
1529 check_size_align(oprsz
, maxsz
, dofs
);
1530 do_dup(MO_16
, dofs
, oprsz
, maxsz
, NULL
, NULL
, x
);
1533 void tcg_gen_gvec_dup8i(uint32_t dofs
, uint32_t oprsz
,
1534 uint32_t maxsz
, uint8_t x
)
1536 check_size_align(oprsz
, maxsz
, dofs
);
1537 do_dup(MO_8
, dofs
, oprsz
, maxsz
, NULL
, NULL
, x
);
1540 void tcg_gen_gvec_not(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
1541 uint32_t oprsz
, uint32_t maxsz
)
1543 static const GVecGen2 g
= {
1544 .fni8
= tcg_gen_not_i64
,
1545 .fniv
= tcg_gen_not_vec
,
1546 .fno
= gen_helper_gvec_not
,
1547 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
1549 tcg_gen_gvec_2(dofs
, aofs
, oprsz
, maxsz
, &g
);
1552 /* Perform a vector addition using normal addition and a mask. The mask
1553 should be the sign bit of each lane. This 6-operation form is more
1554 efficient than separate additions when there are 4 or more lanes in
1555 the 64-bit operation. */
1556 static void gen_addv_mask(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
, TCGv_i64 m
)
1558 TCGv_i64 t1
= tcg_temp_new_i64();
1559 TCGv_i64 t2
= tcg_temp_new_i64();
1560 TCGv_i64 t3
= tcg_temp_new_i64();
1562 tcg_gen_andc_i64(t1
, a
, m
);
1563 tcg_gen_andc_i64(t2
, b
, m
);
1564 tcg_gen_xor_i64(t3
, a
, b
);
1565 tcg_gen_add_i64(d
, t1
, t2
);
1566 tcg_gen_and_i64(t3
, t3
, m
);
1567 tcg_gen_xor_i64(d
, d
, t3
);
1569 tcg_temp_free_i64(t1
);
1570 tcg_temp_free_i64(t2
);
1571 tcg_temp_free_i64(t3
);
1574 void tcg_gen_vec_add8_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
1576 TCGv_i64 m
= tcg_const_i64(dup_const(MO_8
, 0x80));
1577 gen_addv_mask(d
, a
, b
, m
);
1578 tcg_temp_free_i64(m
);
1581 void tcg_gen_vec_add16_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
1583 TCGv_i64 m
= tcg_const_i64(dup_const(MO_16
, 0x8000));
1584 gen_addv_mask(d
, a
, b
, m
);
1585 tcg_temp_free_i64(m
);
1588 void tcg_gen_vec_add32_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
1590 TCGv_i64 t1
= tcg_temp_new_i64();
1591 TCGv_i64 t2
= tcg_temp_new_i64();
1593 tcg_gen_andi_i64(t1
, a
, ~0xffffffffull
);
1594 tcg_gen_add_i64(t2
, a
, b
);
1595 tcg_gen_add_i64(t1
, t1
, b
);
1596 tcg_gen_deposit_i64(d
, t1
, t2
, 0, 32);
1598 tcg_temp_free_i64(t1
);
1599 tcg_temp_free_i64(t2
);
1602 static const TCGOpcode vecop_list_add
[] = { INDEX_op_add_vec
, 0 };
1604 void tcg_gen_gvec_add(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
1605 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
1607 static const GVecGen3 g
[4] = {
1608 { .fni8
= tcg_gen_vec_add8_i64
,
1609 .fniv
= tcg_gen_add_vec
,
1610 .fno
= gen_helper_gvec_add8
,
1611 .opt_opc
= vecop_list_add
,
1613 { .fni8
= tcg_gen_vec_add16_i64
,
1614 .fniv
= tcg_gen_add_vec
,
1615 .fno
= gen_helper_gvec_add16
,
1616 .opt_opc
= vecop_list_add
,
1618 { .fni4
= tcg_gen_add_i32
,
1619 .fniv
= tcg_gen_add_vec
,
1620 .fno
= gen_helper_gvec_add32
,
1621 .opt_opc
= vecop_list_add
,
1623 { .fni8
= tcg_gen_add_i64
,
1624 .fniv
= tcg_gen_add_vec
,
1625 .fno
= gen_helper_gvec_add64
,
1626 .opt_opc
= vecop_list_add
,
1627 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
1631 tcg_debug_assert(vece
<= MO_64
);
1632 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
[vece
]);
1635 void tcg_gen_gvec_adds(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
1636 TCGv_i64 c
, uint32_t oprsz
, uint32_t maxsz
)
1638 static const GVecGen2s g
[4] = {
1639 { .fni8
= tcg_gen_vec_add8_i64
,
1640 .fniv
= tcg_gen_add_vec
,
1641 .fno
= gen_helper_gvec_adds8
,
1642 .opt_opc
= vecop_list_add
,
1644 { .fni8
= tcg_gen_vec_add16_i64
,
1645 .fniv
= tcg_gen_add_vec
,
1646 .fno
= gen_helper_gvec_adds16
,
1647 .opt_opc
= vecop_list_add
,
1649 { .fni4
= tcg_gen_add_i32
,
1650 .fniv
= tcg_gen_add_vec
,
1651 .fno
= gen_helper_gvec_adds32
,
1652 .opt_opc
= vecop_list_add
,
1654 { .fni8
= tcg_gen_add_i64
,
1655 .fniv
= tcg_gen_add_vec
,
1656 .fno
= gen_helper_gvec_adds64
,
1657 .opt_opc
= vecop_list_add
,
1658 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
1662 tcg_debug_assert(vece
<= MO_64
);
1663 tcg_gen_gvec_2s(dofs
, aofs
, oprsz
, maxsz
, c
, &g
[vece
]);
1666 void tcg_gen_gvec_addi(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
1667 int64_t c
, uint32_t oprsz
, uint32_t maxsz
)
1669 TCGv_i64 tmp
= tcg_const_i64(c
);
1670 tcg_gen_gvec_adds(vece
, dofs
, aofs
, tmp
, oprsz
, maxsz
);
1671 tcg_temp_free_i64(tmp
);
1674 static const TCGOpcode vecop_list_sub
[] = { INDEX_op_sub_vec
, 0 };
1676 void tcg_gen_gvec_subs(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
1677 TCGv_i64 c
, uint32_t oprsz
, uint32_t maxsz
)
1679 static const GVecGen2s g
[4] = {
1680 { .fni8
= tcg_gen_vec_sub8_i64
,
1681 .fniv
= tcg_gen_sub_vec
,
1682 .fno
= gen_helper_gvec_subs8
,
1683 .opt_opc
= vecop_list_sub
,
1685 { .fni8
= tcg_gen_vec_sub16_i64
,
1686 .fniv
= tcg_gen_sub_vec
,
1687 .fno
= gen_helper_gvec_subs16
,
1688 .opt_opc
= vecop_list_sub
,
1690 { .fni4
= tcg_gen_sub_i32
,
1691 .fniv
= tcg_gen_sub_vec
,
1692 .fno
= gen_helper_gvec_subs32
,
1693 .opt_opc
= vecop_list_sub
,
1695 { .fni8
= tcg_gen_sub_i64
,
1696 .fniv
= tcg_gen_sub_vec
,
1697 .fno
= gen_helper_gvec_subs64
,
1698 .opt_opc
= vecop_list_sub
,
1699 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
1703 tcg_debug_assert(vece
<= MO_64
);
1704 tcg_gen_gvec_2s(dofs
, aofs
, oprsz
, maxsz
, c
, &g
[vece
]);
1707 /* Perform a vector subtraction using normal subtraction and a mask.
1708 Compare gen_addv_mask above. */
1709 static void gen_subv_mask(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
, TCGv_i64 m
)
1711 TCGv_i64 t1
= tcg_temp_new_i64();
1712 TCGv_i64 t2
= tcg_temp_new_i64();
1713 TCGv_i64 t3
= tcg_temp_new_i64();
1715 tcg_gen_or_i64(t1
, a
, m
);
1716 tcg_gen_andc_i64(t2
, b
, m
);
1717 tcg_gen_eqv_i64(t3
, a
, b
);
1718 tcg_gen_sub_i64(d
, t1
, t2
);
1719 tcg_gen_and_i64(t3
, t3
, m
);
1720 tcg_gen_xor_i64(d
, d
, t3
);
1722 tcg_temp_free_i64(t1
);
1723 tcg_temp_free_i64(t2
);
1724 tcg_temp_free_i64(t3
);
1727 void tcg_gen_vec_sub8_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
1729 TCGv_i64 m
= tcg_const_i64(dup_const(MO_8
, 0x80));
1730 gen_subv_mask(d
, a
, b
, m
);
1731 tcg_temp_free_i64(m
);
1734 void tcg_gen_vec_sub16_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
1736 TCGv_i64 m
= tcg_const_i64(dup_const(MO_16
, 0x8000));
1737 gen_subv_mask(d
, a
, b
, m
);
1738 tcg_temp_free_i64(m
);
1741 void tcg_gen_vec_sub32_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
1743 TCGv_i64 t1
= tcg_temp_new_i64();
1744 TCGv_i64 t2
= tcg_temp_new_i64();
1746 tcg_gen_andi_i64(t1
, b
, ~0xffffffffull
);
1747 tcg_gen_sub_i64(t2
, a
, b
);
1748 tcg_gen_sub_i64(t1
, a
, t1
);
1749 tcg_gen_deposit_i64(d
, t1
, t2
, 0, 32);
1751 tcg_temp_free_i64(t1
);
1752 tcg_temp_free_i64(t2
);
1755 void tcg_gen_gvec_sub(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
1756 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
1758 static const GVecGen3 g
[4] = {
1759 { .fni8
= tcg_gen_vec_sub8_i64
,
1760 .fniv
= tcg_gen_sub_vec
,
1761 .fno
= gen_helper_gvec_sub8
,
1762 .opt_opc
= vecop_list_sub
,
1764 { .fni8
= tcg_gen_vec_sub16_i64
,
1765 .fniv
= tcg_gen_sub_vec
,
1766 .fno
= gen_helper_gvec_sub16
,
1767 .opt_opc
= vecop_list_sub
,
1769 { .fni4
= tcg_gen_sub_i32
,
1770 .fniv
= tcg_gen_sub_vec
,
1771 .fno
= gen_helper_gvec_sub32
,
1772 .opt_opc
= vecop_list_sub
,
1774 { .fni8
= tcg_gen_sub_i64
,
1775 .fniv
= tcg_gen_sub_vec
,
1776 .fno
= gen_helper_gvec_sub64
,
1777 .opt_opc
= vecop_list_sub
,
1778 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
1782 tcg_debug_assert(vece
<= MO_64
);
1783 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
[vece
]);
1786 static const TCGOpcode vecop_list_mul
[] = { INDEX_op_mul_vec
, 0 };
1788 void tcg_gen_gvec_mul(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
1789 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
1791 static const GVecGen3 g
[4] = {
1792 { .fniv
= tcg_gen_mul_vec
,
1793 .fno
= gen_helper_gvec_mul8
,
1794 .opt_opc
= vecop_list_mul
,
1796 { .fniv
= tcg_gen_mul_vec
,
1797 .fno
= gen_helper_gvec_mul16
,
1798 .opt_opc
= vecop_list_mul
,
1800 { .fni4
= tcg_gen_mul_i32
,
1801 .fniv
= tcg_gen_mul_vec
,
1802 .fno
= gen_helper_gvec_mul32
,
1803 .opt_opc
= vecop_list_mul
,
1805 { .fni8
= tcg_gen_mul_i64
,
1806 .fniv
= tcg_gen_mul_vec
,
1807 .fno
= gen_helper_gvec_mul64
,
1808 .opt_opc
= vecop_list_mul
,
1809 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
1813 tcg_debug_assert(vece
<= MO_64
);
1814 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
[vece
]);
1817 void tcg_gen_gvec_muls(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
1818 TCGv_i64 c
, uint32_t oprsz
, uint32_t maxsz
)
1820 static const GVecGen2s g
[4] = {
1821 { .fniv
= tcg_gen_mul_vec
,
1822 .fno
= gen_helper_gvec_muls8
,
1823 .opt_opc
= vecop_list_mul
,
1825 { .fniv
= tcg_gen_mul_vec
,
1826 .fno
= gen_helper_gvec_muls16
,
1827 .opt_opc
= vecop_list_mul
,
1829 { .fni4
= tcg_gen_mul_i32
,
1830 .fniv
= tcg_gen_mul_vec
,
1831 .fno
= gen_helper_gvec_muls32
,
1832 .opt_opc
= vecop_list_mul
,
1834 { .fni8
= tcg_gen_mul_i64
,
1835 .fniv
= tcg_gen_mul_vec
,
1836 .fno
= gen_helper_gvec_muls64
,
1837 .opt_opc
= vecop_list_mul
,
1838 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
1842 tcg_debug_assert(vece
<= MO_64
);
1843 tcg_gen_gvec_2s(dofs
, aofs
, oprsz
, maxsz
, c
, &g
[vece
]);
1846 void tcg_gen_gvec_muli(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
1847 int64_t c
, uint32_t oprsz
, uint32_t maxsz
)
1849 TCGv_i64 tmp
= tcg_const_i64(c
);
1850 tcg_gen_gvec_muls(vece
, dofs
, aofs
, tmp
, oprsz
, maxsz
);
1851 tcg_temp_free_i64(tmp
);
1854 void tcg_gen_gvec_ssadd(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
1855 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
1857 static const TCGOpcode vecop_list
[] = { INDEX_op_ssadd_vec
, 0 };
1858 static const GVecGen3 g
[4] = {
1859 { .fniv
= tcg_gen_ssadd_vec
,
1860 .fno
= gen_helper_gvec_ssadd8
,
1861 .opt_opc
= vecop_list
,
1863 { .fniv
= tcg_gen_ssadd_vec
,
1864 .fno
= gen_helper_gvec_ssadd16
,
1865 .opt_opc
= vecop_list
,
1867 { .fniv
= tcg_gen_ssadd_vec
,
1868 .fno
= gen_helper_gvec_ssadd32
,
1869 .opt_opc
= vecop_list
,
1871 { .fniv
= tcg_gen_ssadd_vec
,
1872 .fno
= gen_helper_gvec_ssadd64
,
1873 .opt_opc
= vecop_list
,
1876 tcg_debug_assert(vece
<= MO_64
);
1877 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
[vece
]);
1880 void tcg_gen_gvec_sssub(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
1881 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
1883 static const TCGOpcode vecop_list
[] = { INDEX_op_sssub_vec
, 0 };
1884 static const GVecGen3 g
[4] = {
1885 { .fniv
= tcg_gen_sssub_vec
,
1886 .fno
= gen_helper_gvec_sssub8
,
1887 .opt_opc
= vecop_list
,
1889 { .fniv
= tcg_gen_sssub_vec
,
1890 .fno
= gen_helper_gvec_sssub16
,
1891 .opt_opc
= vecop_list
,
1893 { .fniv
= tcg_gen_sssub_vec
,
1894 .fno
= gen_helper_gvec_sssub32
,
1895 .opt_opc
= vecop_list
,
1897 { .fniv
= tcg_gen_sssub_vec
,
1898 .fno
= gen_helper_gvec_sssub64
,
1899 .opt_opc
= vecop_list
,
1902 tcg_debug_assert(vece
<= MO_64
);
1903 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
[vece
]);
1906 static void tcg_gen_usadd_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
1908 TCGv_i32 max
= tcg_const_i32(-1);
1909 tcg_gen_add_i32(d
, a
, b
);
1910 tcg_gen_movcond_i32(TCG_COND_LTU
, d
, d
, a
, max
, d
);
1911 tcg_temp_free_i32(max
);
1914 static void tcg_gen_usadd_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
1916 TCGv_i64 max
= tcg_const_i64(-1);
1917 tcg_gen_add_i64(d
, a
, b
);
1918 tcg_gen_movcond_i64(TCG_COND_LTU
, d
, d
, a
, max
, d
);
1919 tcg_temp_free_i64(max
);
1922 void tcg_gen_gvec_usadd(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
1923 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
1925 static const TCGOpcode vecop_list
[] = { INDEX_op_usadd_vec
, 0 };
1926 static const GVecGen3 g
[4] = {
1927 { .fniv
= tcg_gen_usadd_vec
,
1928 .fno
= gen_helper_gvec_usadd8
,
1929 .opt_opc
= vecop_list
,
1931 { .fniv
= tcg_gen_usadd_vec
,
1932 .fno
= gen_helper_gvec_usadd16
,
1933 .opt_opc
= vecop_list
,
1935 { .fni4
= tcg_gen_usadd_i32
,
1936 .fniv
= tcg_gen_usadd_vec
,
1937 .fno
= gen_helper_gvec_usadd32
,
1938 .opt_opc
= vecop_list
,
1940 { .fni8
= tcg_gen_usadd_i64
,
1941 .fniv
= tcg_gen_usadd_vec
,
1942 .fno
= gen_helper_gvec_usadd64
,
1943 .opt_opc
= vecop_list
,
1946 tcg_debug_assert(vece
<= MO_64
);
1947 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
[vece
]);
1950 static void tcg_gen_ussub_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
1952 TCGv_i32 min
= tcg_const_i32(0);
1953 tcg_gen_sub_i32(d
, a
, b
);
1954 tcg_gen_movcond_i32(TCG_COND_LTU
, d
, a
, b
, min
, d
);
1955 tcg_temp_free_i32(min
);
1958 static void tcg_gen_ussub_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
1960 TCGv_i64 min
= tcg_const_i64(0);
1961 tcg_gen_sub_i64(d
, a
, b
);
1962 tcg_gen_movcond_i64(TCG_COND_LTU
, d
, a
, b
, min
, d
);
1963 tcg_temp_free_i64(min
);
1966 void tcg_gen_gvec_ussub(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
1967 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
1969 static const TCGOpcode vecop_list
[] = { INDEX_op_ussub_vec
, 0 };
1970 static const GVecGen3 g
[4] = {
1971 { .fniv
= tcg_gen_ussub_vec
,
1972 .fno
= gen_helper_gvec_ussub8
,
1973 .opt_opc
= vecop_list
,
1975 { .fniv
= tcg_gen_ussub_vec
,
1976 .fno
= gen_helper_gvec_ussub16
,
1977 .opt_opc
= vecop_list
,
1979 { .fni4
= tcg_gen_ussub_i32
,
1980 .fniv
= tcg_gen_ussub_vec
,
1981 .fno
= gen_helper_gvec_ussub32
,
1982 .opt_opc
= vecop_list
,
1984 { .fni8
= tcg_gen_ussub_i64
,
1985 .fniv
= tcg_gen_ussub_vec
,
1986 .fno
= gen_helper_gvec_ussub64
,
1987 .opt_opc
= vecop_list
,
1990 tcg_debug_assert(vece
<= MO_64
);
1991 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
[vece
]);
1994 void tcg_gen_gvec_smin(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
1995 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
1997 static const TCGOpcode vecop_list
[] = { INDEX_op_smin_vec
, 0 };
1998 static const GVecGen3 g
[4] = {
1999 { .fniv
= tcg_gen_smin_vec
,
2000 .fno
= gen_helper_gvec_smin8
,
2001 .opt_opc
= vecop_list
,
2003 { .fniv
= tcg_gen_smin_vec
,
2004 .fno
= gen_helper_gvec_smin16
,
2005 .opt_opc
= vecop_list
,
2007 { .fni4
= tcg_gen_smin_i32
,
2008 .fniv
= tcg_gen_smin_vec
,
2009 .fno
= gen_helper_gvec_smin32
,
2010 .opt_opc
= vecop_list
,
2012 { .fni8
= tcg_gen_smin_i64
,
2013 .fniv
= tcg_gen_smin_vec
,
2014 .fno
= gen_helper_gvec_smin64
,
2015 .opt_opc
= vecop_list
,
2018 tcg_debug_assert(vece
<= MO_64
);
2019 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
[vece
]);
2022 void tcg_gen_gvec_umin(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2023 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
2025 static const TCGOpcode vecop_list
[] = { INDEX_op_umin_vec
, 0 };
2026 static const GVecGen3 g
[4] = {
2027 { .fniv
= tcg_gen_umin_vec
,
2028 .fno
= gen_helper_gvec_umin8
,
2029 .opt_opc
= vecop_list
,
2031 { .fniv
= tcg_gen_umin_vec
,
2032 .fno
= gen_helper_gvec_umin16
,
2033 .opt_opc
= vecop_list
,
2035 { .fni4
= tcg_gen_umin_i32
,
2036 .fniv
= tcg_gen_umin_vec
,
2037 .fno
= gen_helper_gvec_umin32
,
2038 .opt_opc
= vecop_list
,
2040 { .fni8
= tcg_gen_umin_i64
,
2041 .fniv
= tcg_gen_umin_vec
,
2042 .fno
= gen_helper_gvec_umin64
,
2043 .opt_opc
= vecop_list
,
2046 tcg_debug_assert(vece
<= MO_64
);
2047 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
[vece
]);
2050 void tcg_gen_gvec_smax(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2051 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
2053 static const TCGOpcode vecop_list
[] = { INDEX_op_smax_vec
, 0 };
2054 static const GVecGen3 g
[4] = {
2055 { .fniv
= tcg_gen_smax_vec
,
2056 .fno
= gen_helper_gvec_smax8
,
2057 .opt_opc
= vecop_list
,
2059 { .fniv
= tcg_gen_smax_vec
,
2060 .fno
= gen_helper_gvec_smax16
,
2061 .opt_opc
= vecop_list
,
2063 { .fni4
= tcg_gen_smax_i32
,
2064 .fniv
= tcg_gen_smax_vec
,
2065 .fno
= gen_helper_gvec_smax32
,
2066 .opt_opc
= vecop_list
,
2068 { .fni8
= tcg_gen_smax_i64
,
2069 .fniv
= tcg_gen_smax_vec
,
2070 .fno
= gen_helper_gvec_smax64
,
2071 .opt_opc
= vecop_list
,
2074 tcg_debug_assert(vece
<= MO_64
);
2075 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
[vece
]);
2078 void tcg_gen_gvec_umax(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2079 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
2081 static const TCGOpcode vecop_list
[] = { INDEX_op_umax_vec
, 0 };
2082 static const GVecGen3 g
[4] = {
2083 { .fniv
= tcg_gen_umax_vec
,
2084 .fno
= gen_helper_gvec_umax8
,
2085 .opt_opc
= vecop_list
,
2087 { .fniv
= tcg_gen_umax_vec
,
2088 .fno
= gen_helper_gvec_umax16
,
2089 .opt_opc
= vecop_list
,
2091 { .fni4
= tcg_gen_umax_i32
,
2092 .fniv
= tcg_gen_umax_vec
,
2093 .fno
= gen_helper_gvec_umax32
,
2094 .opt_opc
= vecop_list
,
2096 { .fni8
= tcg_gen_umax_i64
,
2097 .fniv
= tcg_gen_umax_vec
,
2098 .fno
= gen_helper_gvec_umax64
,
2099 .opt_opc
= vecop_list
,
2102 tcg_debug_assert(vece
<= MO_64
);
2103 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
[vece
]);
2106 /* Perform a vector negation using normal negation and a mask.
2107 Compare gen_subv_mask above. */
2108 static void gen_negv_mask(TCGv_i64 d
, TCGv_i64 b
, TCGv_i64 m
)
2110 TCGv_i64 t2
= tcg_temp_new_i64();
2111 TCGv_i64 t3
= tcg_temp_new_i64();
2113 tcg_gen_andc_i64(t3
, m
, b
);
2114 tcg_gen_andc_i64(t2
, b
, m
);
2115 tcg_gen_sub_i64(d
, m
, t2
);
2116 tcg_gen_xor_i64(d
, d
, t3
);
2118 tcg_temp_free_i64(t2
);
2119 tcg_temp_free_i64(t3
);
2122 void tcg_gen_vec_neg8_i64(TCGv_i64 d
, TCGv_i64 b
)
2124 TCGv_i64 m
= tcg_const_i64(dup_const(MO_8
, 0x80));
2125 gen_negv_mask(d
, b
, m
);
2126 tcg_temp_free_i64(m
);
2129 void tcg_gen_vec_neg16_i64(TCGv_i64 d
, TCGv_i64 b
)
2131 TCGv_i64 m
= tcg_const_i64(dup_const(MO_16
, 0x8000));
2132 gen_negv_mask(d
, b
, m
);
2133 tcg_temp_free_i64(m
);
2136 void tcg_gen_vec_neg32_i64(TCGv_i64 d
, TCGv_i64 b
)
2138 TCGv_i64 t1
= tcg_temp_new_i64();
2139 TCGv_i64 t2
= tcg_temp_new_i64();
2141 tcg_gen_andi_i64(t1
, b
, ~0xffffffffull
);
2142 tcg_gen_neg_i64(t2
, b
);
2143 tcg_gen_neg_i64(t1
, t1
);
2144 tcg_gen_deposit_i64(d
, t1
, t2
, 0, 32);
2146 tcg_temp_free_i64(t1
);
2147 tcg_temp_free_i64(t2
);
2150 void tcg_gen_gvec_neg(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2151 uint32_t oprsz
, uint32_t maxsz
)
2153 static const TCGOpcode vecop_list
[] = { INDEX_op_neg_vec
, 0 };
2154 static const GVecGen2 g
[4] = {
2155 { .fni8
= tcg_gen_vec_neg8_i64
,
2156 .fniv
= tcg_gen_neg_vec
,
2157 .fno
= gen_helper_gvec_neg8
,
2158 .opt_opc
= vecop_list
,
2160 { .fni8
= tcg_gen_vec_neg16_i64
,
2161 .fniv
= tcg_gen_neg_vec
,
2162 .fno
= gen_helper_gvec_neg16
,
2163 .opt_opc
= vecop_list
,
2165 { .fni4
= tcg_gen_neg_i32
,
2166 .fniv
= tcg_gen_neg_vec
,
2167 .fno
= gen_helper_gvec_neg32
,
2168 .opt_opc
= vecop_list
,
2170 { .fni8
= tcg_gen_neg_i64
,
2171 .fniv
= tcg_gen_neg_vec
,
2172 .fno
= gen_helper_gvec_neg64
,
2173 .opt_opc
= vecop_list
,
2174 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
2178 tcg_debug_assert(vece
<= MO_64
);
2179 tcg_gen_gvec_2(dofs
, aofs
, oprsz
, maxsz
, &g
[vece
]);
2182 static void gen_absv_mask(TCGv_i64 d
, TCGv_i64 b
, unsigned vece
)
2184 TCGv_i64 t
= tcg_temp_new_i64();
2185 int nbit
= 8 << vece
;
2187 /* Create -1 for each negative element. */
2188 tcg_gen_shri_i64(t
, b
, nbit
- 1);
2189 tcg_gen_andi_i64(t
, t
, dup_const(vece
, 1));
2190 tcg_gen_muli_i64(t
, t
, (1 << nbit
) - 1);
2193 * Invert (via xor -1) and add one (via sub -1).
2194 * Because of the ordering the msb is cleared,
2195 * so we never have carry into the next element.
2197 tcg_gen_xor_i64(d
, b
, t
);
2198 tcg_gen_sub_i64(d
, d
, t
);
2200 tcg_temp_free_i64(t
);
2203 static void tcg_gen_vec_abs8_i64(TCGv_i64 d
, TCGv_i64 b
)
2205 gen_absv_mask(d
, b
, MO_8
);
2208 static void tcg_gen_vec_abs16_i64(TCGv_i64 d
, TCGv_i64 b
)
2210 gen_absv_mask(d
, b
, MO_16
);
2213 void tcg_gen_gvec_abs(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2214 uint32_t oprsz
, uint32_t maxsz
)
2216 static const TCGOpcode vecop_list
[] = { INDEX_op_abs_vec
, 0 };
2217 static const GVecGen2 g
[4] = {
2218 { .fni8
= tcg_gen_vec_abs8_i64
,
2219 .fniv
= tcg_gen_abs_vec
,
2220 .fno
= gen_helper_gvec_abs8
,
2221 .opt_opc
= vecop_list
,
2223 { .fni8
= tcg_gen_vec_abs16_i64
,
2224 .fniv
= tcg_gen_abs_vec
,
2225 .fno
= gen_helper_gvec_abs16
,
2226 .opt_opc
= vecop_list
,
2228 { .fni4
= tcg_gen_abs_i32
,
2229 .fniv
= tcg_gen_abs_vec
,
2230 .fno
= gen_helper_gvec_abs32
,
2231 .opt_opc
= vecop_list
,
2233 { .fni8
= tcg_gen_abs_i64
,
2234 .fniv
= tcg_gen_abs_vec
,
2235 .fno
= gen_helper_gvec_abs64
,
2236 .opt_opc
= vecop_list
,
2237 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
2241 tcg_debug_assert(vece
<= MO_64
);
2242 tcg_gen_gvec_2(dofs
, aofs
, oprsz
, maxsz
, &g
[vece
]);
2245 void tcg_gen_gvec_and(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2246 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
2248 static const GVecGen3 g
= {
2249 .fni8
= tcg_gen_and_i64
,
2250 .fniv
= tcg_gen_and_vec
,
2251 .fno
= gen_helper_gvec_and
,
2252 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
2256 tcg_gen_gvec_mov(vece
, dofs
, aofs
, oprsz
, maxsz
);
2258 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
);
2262 void tcg_gen_gvec_or(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2263 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
2265 static const GVecGen3 g
= {
2266 .fni8
= tcg_gen_or_i64
,
2267 .fniv
= tcg_gen_or_vec
,
2268 .fno
= gen_helper_gvec_or
,
2269 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
2273 tcg_gen_gvec_mov(vece
, dofs
, aofs
, oprsz
, maxsz
);
2275 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
);
2279 void tcg_gen_gvec_xor(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2280 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
2282 static const GVecGen3 g
= {
2283 .fni8
= tcg_gen_xor_i64
,
2284 .fniv
= tcg_gen_xor_vec
,
2285 .fno
= gen_helper_gvec_xor
,
2286 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
2290 tcg_gen_gvec_dup8i(dofs
, oprsz
, maxsz
, 0);
2292 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
);
2296 void tcg_gen_gvec_andc(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2297 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
2299 static const GVecGen3 g
= {
2300 .fni8
= tcg_gen_andc_i64
,
2301 .fniv
= tcg_gen_andc_vec
,
2302 .fno
= gen_helper_gvec_andc
,
2303 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
2307 tcg_gen_gvec_dup8i(dofs
, oprsz
, maxsz
, 0);
2309 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
);
2313 void tcg_gen_gvec_orc(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2314 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
2316 static const GVecGen3 g
= {
2317 .fni8
= tcg_gen_orc_i64
,
2318 .fniv
= tcg_gen_orc_vec
,
2319 .fno
= gen_helper_gvec_orc
,
2320 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
2324 tcg_gen_gvec_dup8i(dofs
, oprsz
, maxsz
, -1);
2326 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
);
2330 void tcg_gen_gvec_nand(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2331 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
2333 static const GVecGen3 g
= {
2334 .fni8
= tcg_gen_nand_i64
,
2335 .fniv
= tcg_gen_nand_vec
,
2336 .fno
= gen_helper_gvec_nand
,
2337 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
2341 tcg_gen_gvec_not(vece
, dofs
, aofs
, oprsz
, maxsz
);
2343 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
);
2347 void tcg_gen_gvec_nor(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2348 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
2350 static const GVecGen3 g
= {
2351 .fni8
= tcg_gen_nor_i64
,
2352 .fniv
= tcg_gen_nor_vec
,
2353 .fno
= gen_helper_gvec_nor
,
2354 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
2358 tcg_gen_gvec_not(vece
, dofs
, aofs
, oprsz
, maxsz
);
2360 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
);
2364 void tcg_gen_gvec_eqv(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2365 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
2367 static const GVecGen3 g
= {
2368 .fni8
= tcg_gen_eqv_i64
,
2369 .fniv
= tcg_gen_eqv_vec
,
2370 .fno
= gen_helper_gvec_eqv
,
2371 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
2375 tcg_gen_gvec_dup8i(dofs
, oprsz
, maxsz
, -1);
2377 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
);
2381 static const GVecGen2s gop_ands
= {
2382 .fni8
= tcg_gen_and_i64
,
2383 .fniv
= tcg_gen_and_vec
,
2384 .fno
= gen_helper_gvec_ands
,
2385 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
2389 void tcg_gen_gvec_ands(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2390 TCGv_i64 c
, uint32_t oprsz
, uint32_t maxsz
)
2392 TCGv_i64 tmp
= tcg_temp_new_i64();
2393 gen_dup_i64(vece
, tmp
, c
);
2394 tcg_gen_gvec_2s(dofs
, aofs
, oprsz
, maxsz
, tmp
, &gop_ands
);
2395 tcg_temp_free_i64(tmp
);
2398 void tcg_gen_gvec_andi(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2399 int64_t c
, uint32_t oprsz
, uint32_t maxsz
)
2401 TCGv_i64 tmp
= tcg_const_i64(dup_const(vece
, c
));
2402 tcg_gen_gvec_2s(dofs
, aofs
, oprsz
, maxsz
, tmp
, &gop_ands
);
2403 tcg_temp_free_i64(tmp
);
2406 static const GVecGen2s gop_xors
= {
2407 .fni8
= tcg_gen_xor_i64
,
2408 .fniv
= tcg_gen_xor_vec
,
2409 .fno
= gen_helper_gvec_xors
,
2410 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
2414 void tcg_gen_gvec_xors(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2415 TCGv_i64 c
, uint32_t oprsz
, uint32_t maxsz
)
2417 TCGv_i64 tmp
= tcg_temp_new_i64();
2418 gen_dup_i64(vece
, tmp
, c
);
2419 tcg_gen_gvec_2s(dofs
, aofs
, oprsz
, maxsz
, tmp
, &gop_xors
);
2420 tcg_temp_free_i64(tmp
);
2423 void tcg_gen_gvec_xori(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2424 int64_t c
, uint32_t oprsz
, uint32_t maxsz
)
2426 TCGv_i64 tmp
= tcg_const_i64(dup_const(vece
, c
));
2427 tcg_gen_gvec_2s(dofs
, aofs
, oprsz
, maxsz
, tmp
, &gop_xors
);
2428 tcg_temp_free_i64(tmp
);
2431 static const GVecGen2s gop_ors
= {
2432 .fni8
= tcg_gen_or_i64
,
2433 .fniv
= tcg_gen_or_vec
,
2434 .fno
= gen_helper_gvec_ors
,
2435 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
2439 void tcg_gen_gvec_ors(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2440 TCGv_i64 c
, uint32_t oprsz
, uint32_t maxsz
)
2442 TCGv_i64 tmp
= tcg_temp_new_i64();
2443 gen_dup_i64(vece
, tmp
, c
);
2444 tcg_gen_gvec_2s(dofs
, aofs
, oprsz
, maxsz
, tmp
, &gop_ors
);
2445 tcg_temp_free_i64(tmp
);
2448 void tcg_gen_gvec_ori(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2449 int64_t c
, uint32_t oprsz
, uint32_t maxsz
)
2451 TCGv_i64 tmp
= tcg_const_i64(dup_const(vece
, c
));
2452 tcg_gen_gvec_2s(dofs
, aofs
, oprsz
, maxsz
, tmp
, &gop_ors
);
2453 tcg_temp_free_i64(tmp
);
2456 void tcg_gen_vec_shl8i_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t c
)
2458 uint64_t mask
= dup_const(MO_8
, 0xff << c
);
2459 tcg_gen_shli_i64(d
, a
, c
);
2460 tcg_gen_andi_i64(d
, d
, mask
);
2463 void tcg_gen_vec_shl16i_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t c
)
2465 uint64_t mask
= dup_const(MO_16
, 0xffff << c
);
2466 tcg_gen_shli_i64(d
, a
, c
);
2467 tcg_gen_andi_i64(d
, d
, mask
);
2470 void tcg_gen_gvec_shli(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2471 int64_t shift
, uint32_t oprsz
, uint32_t maxsz
)
2473 static const TCGOpcode vecop_list
[] = { INDEX_op_shli_vec
, 0 };
2474 static const GVecGen2i g
[4] = {
2475 { .fni8
= tcg_gen_vec_shl8i_i64
,
2476 .fniv
= tcg_gen_shli_vec
,
2477 .fno
= gen_helper_gvec_shl8i
,
2478 .opt_opc
= vecop_list
,
2480 { .fni8
= tcg_gen_vec_shl16i_i64
,
2481 .fniv
= tcg_gen_shli_vec
,
2482 .fno
= gen_helper_gvec_shl16i
,
2483 .opt_opc
= vecop_list
,
2485 { .fni4
= tcg_gen_shli_i32
,
2486 .fniv
= tcg_gen_shli_vec
,
2487 .fno
= gen_helper_gvec_shl32i
,
2488 .opt_opc
= vecop_list
,
2490 { .fni8
= tcg_gen_shli_i64
,
2491 .fniv
= tcg_gen_shli_vec
,
2492 .fno
= gen_helper_gvec_shl64i
,
2493 .opt_opc
= vecop_list
,
2494 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
2498 tcg_debug_assert(vece
<= MO_64
);
2499 tcg_debug_assert(shift
>= 0 && shift
< (8 << vece
));
2501 tcg_gen_gvec_mov(vece
, dofs
, aofs
, oprsz
, maxsz
);
2503 tcg_gen_gvec_2i(dofs
, aofs
, oprsz
, maxsz
, shift
, &g
[vece
]);
2507 void tcg_gen_vec_shr8i_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t c
)
2509 uint64_t mask
= dup_const(MO_8
, 0xff >> c
);
2510 tcg_gen_shri_i64(d
, a
, c
);
2511 tcg_gen_andi_i64(d
, d
, mask
);
2514 void tcg_gen_vec_shr16i_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t c
)
2516 uint64_t mask
= dup_const(MO_16
, 0xffff >> c
);
2517 tcg_gen_shri_i64(d
, a
, c
);
2518 tcg_gen_andi_i64(d
, d
, mask
);
2521 void tcg_gen_gvec_shri(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2522 int64_t shift
, uint32_t oprsz
, uint32_t maxsz
)
2524 static const TCGOpcode vecop_list
[] = { INDEX_op_shri_vec
, 0 };
2525 static const GVecGen2i g
[4] = {
2526 { .fni8
= tcg_gen_vec_shr8i_i64
,
2527 .fniv
= tcg_gen_shri_vec
,
2528 .fno
= gen_helper_gvec_shr8i
,
2529 .opt_opc
= vecop_list
,
2531 { .fni8
= tcg_gen_vec_shr16i_i64
,
2532 .fniv
= tcg_gen_shri_vec
,
2533 .fno
= gen_helper_gvec_shr16i
,
2534 .opt_opc
= vecop_list
,
2536 { .fni4
= tcg_gen_shri_i32
,
2537 .fniv
= tcg_gen_shri_vec
,
2538 .fno
= gen_helper_gvec_shr32i
,
2539 .opt_opc
= vecop_list
,
2541 { .fni8
= tcg_gen_shri_i64
,
2542 .fniv
= tcg_gen_shri_vec
,
2543 .fno
= gen_helper_gvec_shr64i
,
2544 .opt_opc
= vecop_list
,
2545 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
2549 tcg_debug_assert(vece
<= MO_64
);
2550 tcg_debug_assert(shift
>= 0 && shift
< (8 << vece
));
2552 tcg_gen_gvec_mov(vece
, dofs
, aofs
, oprsz
, maxsz
);
2554 tcg_gen_gvec_2i(dofs
, aofs
, oprsz
, maxsz
, shift
, &g
[vece
]);
2558 void tcg_gen_vec_sar8i_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t c
)
2560 uint64_t s_mask
= dup_const(MO_8
, 0x80 >> c
);
2561 uint64_t c_mask
= dup_const(MO_8
, 0xff >> c
);
2562 TCGv_i64 s
= tcg_temp_new_i64();
2564 tcg_gen_shri_i64(d
, a
, c
);
2565 tcg_gen_andi_i64(s
, d
, s_mask
); /* isolate (shifted) sign bit */
2566 tcg_gen_muli_i64(s
, s
, (2 << c
) - 2); /* replicate isolated signs */
2567 tcg_gen_andi_i64(d
, d
, c_mask
); /* clear out bits above sign */
2568 tcg_gen_or_i64(d
, d
, s
); /* include sign extension */
2569 tcg_temp_free_i64(s
);
2572 void tcg_gen_vec_sar16i_i64(TCGv_i64 d
, TCGv_i64 a
, int64_t c
)
2574 uint64_t s_mask
= dup_const(MO_16
, 0x8000 >> c
);
2575 uint64_t c_mask
= dup_const(MO_16
, 0xffff >> c
);
2576 TCGv_i64 s
= tcg_temp_new_i64();
2578 tcg_gen_shri_i64(d
, a
, c
);
2579 tcg_gen_andi_i64(s
, d
, s_mask
); /* isolate (shifted) sign bit */
2580 tcg_gen_andi_i64(d
, d
, c_mask
); /* clear out bits above sign */
2581 tcg_gen_muli_i64(s
, s
, (2 << c
) - 2); /* replicate isolated signs */
2582 tcg_gen_or_i64(d
, d
, s
); /* include sign extension */
2583 tcg_temp_free_i64(s
);
2586 void tcg_gen_gvec_sari(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2587 int64_t shift
, uint32_t oprsz
, uint32_t maxsz
)
2589 static const TCGOpcode vecop_list
[] = { INDEX_op_sari_vec
, 0 };
2590 static const GVecGen2i g
[4] = {
2591 { .fni8
= tcg_gen_vec_sar8i_i64
,
2592 .fniv
= tcg_gen_sari_vec
,
2593 .fno
= gen_helper_gvec_sar8i
,
2594 .opt_opc
= vecop_list
,
2596 { .fni8
= tcg_gen_vec_sar16i_i64
,
2597 .fniv
= tcg_gen_sari_vec
,
2598 .fno
= gen_helper_gvec_sar16i
,
2599 .opt_opc
= vecop_list
,
2601 { .fni4
= tcg_gen_sari_i32
,
2602 .fniv
= tcg_gen_sari_vec
,
2603 .fno
= gen_helper_gvec_sar32i
,
2604 .opt_opc
= vecop_list
,
2606 { .fni8
= tcg_gen_sari_i64
,
2607 .fniv
= tcg_gen_sari_vec
,
2608 .fno
= gen_helper_gvec_sar64i
,
2609 .opt_opc
= vecop_list
,
2610 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
2614 tcg_debug_assert(vece
<= MO_64
);
2615 tcg_debug_assert(shift
>= 0 && shift
< (8 << vece
));
2617 tcg_gen_gvec_mov(vece
, dofs
, aofs
, oprsz
, maxsz
);
2619 tcg_gen_gvec_2i(dofs
, aofs
, oprsz
, maxsz
, shift
, &g
[vece
]);
2624 * Specialized generation vector shifts by a non-constant scalar.
2628 void (*fni4
)(TCGv_i32
, TCGv_i32
, TCGv_i32
);
2629 void (*fni8
)(TCGv_i64
, TCGv_i64
, TCGv_i64
);
2630 void (*fniv_s
)(unsigned, TCGv_vec
, TCGv_vec
, TCGv_i32
);
2631 void (*fniv_v
)(unsigned, TCGv_vec
, TCGv_vec
, TCGv_vec
);
2632 gen_helper_gvec_2
*fno
[4];
2633 TCGOpcode s_list
[2];
2634 TCGOpcode v_list
[2];
2637 static void expand_2sh_vec(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2638 uint32_t oprsz
, uint32_t tysz
, TCGType type
,
2640 void (*fni
)(unsigned, TCGv_vec
, TCGv_vec
, TCGv_i32
))
2642 TCGv_vec t0
= tcg_temp_new_vec(type
);
2645 for (i
= 0; i
< oprsz
; i
+= tysz
) {
2646 tcg_gen_ld_vec(t0
, cpu_env
, aofs
+ i
);
2647 fni(vece
, t0
, t0
, shift
);
2648 tcg_gen_st_vec(t0
, cpu_env
, dofs
+ i
);
2650 tcg_temp_free_vec(t0
);
2654 do_gvec_shifts(unsigned vece
, uint32_t dofs
, uint32_t aofs
, TCGv_i32 shift
,
2655 uint32_t oprsz
, uint32_t maxsz
, const GVecGen2sh
*g
)
2660 check_size_align(oprsz
, maxsz
, dofs
| aofs
);
2661 check_overlap_2(dofs
, aofs
, maxsz
);
2663 /* If the backend has a scalar expansion, great. */
2664 type
= choose_vector_type(g
->s_list
, vece
, oprsz
, vece
== MO_64
);
2666 const TCGOpcode
*hold_list
= tcg_swap_vecop_list(NULL
);
2669 some
= QEMU_ALIGN_DOWN(oprsz
, 32);
2670 expand_2sh_vec(vece
, dofs
, aofs
, some
, 32,
2671 TCG_TYPE_V256
, shift
, g
->fniv_s
);
2672 if (some
== oprsz
) {
2681 expand_2sh_vec(vece
, dofs
, aofs
, oprsz
, 16,
2682 TCG_TYPE_V128
, shift
, g
->fniv_s
);
2685 expand_2sh_vec(vece
, dofs
, aofs
, oprsz
, 8,
2686 TCG_TYPE_V64
, shift
, g
->fniv_s
);
2689 g_assert_not_reached();
2691 tcg_swap_vecop_list(hold_list
);
2695 /* If the backend supports variable vector shifts, also cool. */
2696 type
= choose_vector_type(g
->v_list
, vece
, oprsz
, vece
== MO_64
);
2698 const TCGOpcode
*hold_list
= tcg_swap_vecop_list(NULL
);
2699 TCGv_vec v_shift
= tcg_temp_new_vec(type
);
2701 if (vece
== MO_64
) {
2702 TCGv_i64 sh64
= tcg_temp_new_i64();
2703 tcg_gen_extu_i32_i64(sh64
, shift
);
2704 tcg_gen_dup_i64_vec(MO_64
, v_shift
, sh64
);
2705 tcg_temp_free_i64(sh64
);
2707 tcg_gen_dup_i32_vec(vece
, v_shift
, shift
);
2712 some
= QEMU_ALIGN_DOWN(oprsz
, 32);
2713 expand_2s_vec(vece
, dofs
, aofs
, some
, 32, TCG_TYPE_V256
,
2714 v_shift
, false, g
->fniv_v
);
2715 if (some
== oprsz
) {
2724 expand_2s_vec(vece
, dofs
, aofs
, oprsz
, 16, TCG_TYPE_V128
,
2725 v_shift
, false, g
->fniv_v
);
2728 expand_2s_vec(vece
, dofs
, aofs
, oprsz
, 8, TCG_TYPE_V64
,
2729 v_shift
, false, g
->fniv_v
);
2732 g_assert_not_reached();
2734 tcg_temp_free_vec(v_shift
);
2735 tcg_swap_vecop_list(hold_list
);
2739 /* Otherwise fall back to integral... */
2740 if (vece
== MO_32
&& check_size_impl(oprsz
, 4)) {
2741 expand_2s_i32(dofs
, aofs
, oprsz
, shift
, false, g
->fni4
);
2742 } else if (vece
== MO_64
&& check_size_impl(oprsz
, 8)) {
2743 TCGv_i64 sh64
= tcg_temp_new_i64();
2744 tcg_gen_extu_i32_i64(sh64
, shift
);
2745 expand_2s_i64(dofs
, aofs
, oprsz
, sh64
, false, g
->fni8
);
2746 tcg_temp_free_i64(sh64
);
2748 TCGv_ptr a0
= tcg_temp_new_ptr();
2749 TCGv_ptr a1
= tcg_temp_new_ptr();
2750 TCGv_i32 desc
= tcg_temp_new_i32();
2752 tcg_gen_shli_i32(desc
, shift
, SIMD_DATA_SHIFT
);
2753 tcg_gen_ori_i32(desc
, desc
, simd_desc(oprsz
, maxsz
, 0));
2754 tcg_gen_addi_ptr(a0
, cpu_env
, dofs
);
2755 tcg_gen_addi_ptr(a1
, cpu_env
, aofs
);
2757 g
->fno
[vece
](a0
, a1
, desc
);
2759 tcg_temp_free_ptr(a0
);
2760 tcg_temp_free_ptr(a1
);
2761 tcg_temp_free_i32(desc
);
2766 if (oprsz
< maxsz
) {
2767 expand_clr(dofs
+ oprsz
, maxsz
- oprsz
);
2771 void tcg_gen_gvec_shls(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2772 TCGv_i32 shift
, uint32_t oprsz
, uint32_t maxsz
)
2774 static const GVecGen2sh g
= {
2775 .fni4
= tcg_gen_shl_i32
,
2776 .fni8
= tcg_gen_shl_i64
,
2777 .fniv_s
= tcg_gen_shls_vec
,
2778 .fniv_v
= tcg_gen_shlv_vec
,
2780 gen_helper_gvec_shl8i
,
2781 gen_helper_gvec_shl16i
,
2782 gen_helper_gvec_shl32i
,
2783 gen_helper_gvec_shl64i
,
2785 .s_list
= { INDEX_op_shls_vec
, 0 },
2786 .v_list
= { INDEX_op_shlv_vec
, 0 },
2789 tcg_debug_assert(vece
<= MO_64
);
2790 do_gvec_shifts(vece
, dofs
, aofs
, shift
, oprsz
, maxsz
, &g
);
2793 void tcg_gen_gvec_shrs(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2794 TCGv_i32 shift
, uint32_t oprsz
, uint32_t maxsz
)
2796 static const GVecGen2sh g
= {
2797 .fni4
= tcg_gen_shr_i32
,
2798 .fni8
= tcg_gen_shr_i64
,
2799 .fniv_s
= tcg_gen_shrs_vec
,
2800 .fniv_v
= tcg_gen_shrv_vec
,
2802 gen_helper_gvec_shr8i
,
2803 gen_helper_gvec_shr16i
,
2804 gen_helper_gvec_shr32i
,
2805 gen_helper_gvec_shr64i
,
2807 .s_list
= { INDEX_op_shrs_vec
, 0 },
2808 .v_list
= { INDEX_op_shrv_vec
, 0 },
2811 tcg_debug_assert(vece
<= MO_64
);
2812 do_gvec_shifts(vece
, dofs
, aofs
, shift
, oprsz
, maxsz
, &g
);
2815 void tcg_gen_gvec_sars(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2816 TCGv_i32 shift
, uint32_t oprsz
, uint32_t maxsz
)
2818 static const GVecGen2sh g
= {
2819 .fni4
= tcg_gen_sar_i32
,
2820 .fni8
= tcg_gen_sar_i64
,
2821 .fniv_s
= tcg_gen_sars_vec
,
2822 .fniv_v
= tcg_gen_sarv_vec
,
2824 gen_helper_gvec_sar8i
,
2825 gen_helper_gvec_sar16i
,
2826 gen_helper_gvec_sar32i
,
2827 gen_helper_gvec_sar64i
,
2829 .s_list
= { INDEX_op_sars_vec
, 0 },
2830 .v_list
= { INDEX_op_sarv_vec
, 0 },
2833 tcg_debug_assert(vece
<= MO_64
);
2834 do_gvec_shifts(vece
, dofs
, aofs
, shift
, oprsz
, maxsz
, &g
);
2838 * Expand D = A << (B % element bits)
2840 * Unlike scalar shifts, where it is easy for the target front end
2841 * to include the modulo as part of the expansion. If the target
2842 * naturally includes the modulo as part of the operation, great!
2843 * If the target has some other behaviour from out-of-range shifts,
2844 * then it could not use this function anyway, and would need to
2845 * do it's own expansion with custom functions.
2847 static void tcg_gen_shlv_mod_vec(unsigned vece
, TCGv_vec d
,
2848 TCGv_vec a
, TCGv_vec b
)
2850 TCGv_vec t
= tcg_temp_new_vec_matching(d
);
2852 tcg_gen_dupi_vec(vece
, t
, (8 << vece
) - 1);
2853 tcg_gen_and_vec(vece
, t
, t
, b
);
2854 tcg_gen_shlv_vec(vece
, d
, a
, t
);
2855 tcg_temp_free_vec(t
);
2858 static void tcg_gen_shl_mod_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
2860 TCGv_i32 t
= tcg_temp_new_i32();
2862 tcg_gen_andi_i32(t
, b
, 31);
2863 tcg_gen_shl_i32(d
, a
, t
);
2864 tcg_temp_free_i32(t
);
2867 static void tcg_gen_shl_mod_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
2869 TCGv_i64 t
= tcg_temp_new_i64();
2871 tcg_gen_andi_i64(t
, b
, 63);
2872 tcg_gen_shl_i64(d
, a
, t
);
2873 tcg_temp_free_i64(t
);
2876 void tcg_gen_gvec_shlv(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2877 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
2879 static const TCGOpcode vecop_list
[] = { INDEX_op_shlv_vec
, 0 };
2880 static const GVecGen3 g
[4] = {
2881 { .fniv
= tcg_gen_shlv_mod_vec
,
2882 .fno
= gen_helper_gvec_shl8v
,
2883 .opt_opc
= vecop_list
,
2885 { .fniv
= tcg_gen_shlv_mod_vec
,
2886 .fno
= gen_helper_gvec_shl16v
,
2887 .opt_opc
= vecop_list
,
2889 { .fni4
= tcg_gen_shl_mod_i32
,
2890 .fniv
= tcg_gen_shlv_mod_vec
,
2891 .fno
= gen_helper_gvec_shl32v
,
2892 .opt_opc
= vecop_list
,
2894 { .fni8
= tcg_gen_shl_mod_i64
,
2895 .fniv
= tcg_gen_shlv_mod_vec
,
2896 .fno
= gen_helper_gvec_shl64v
,
2897 .opt_opc
= vecop_list
,
2898 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
2902 tcg_debug_assert(vece
<= MO_64
);
2903 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
[vece
]);
2907 * Similarly for logical right shifts.
2910 static void tcg_gen_shrv_mod_vec(unsigned vece
, TCGv_vec d
,
2911 TCGv_vec a
, TCGv_vec b
)
2913 TCGv_vec t
= tcg_temp_new_vec_matching(d
);
2915 tcg_gen_dupi_vec(vece
, t
, (8 << vece
) - 1);
2916 tcg_gen_and_vec(vece
, t
, t
, b
);
2917 tcg_gen_shrv_vec(vece
, d
, a
, t
);
2918 tcg_temp_free_vec(t
);
2921 static void tcg_gen_shr_mod_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
2923 TCGv_i32 t
= tcg_temp_new_i32();
2925 tcg_gen_andi_i32(t
, b
, 31);
2926 tcg_gen_shr_i32(d
, a
, t
);
2927 tcg_temp_free_i32(t
);
2930 static void tcg_gen_shr_mod_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
2932 TCGv_i64 t
= tcg_temp_new_i64();
2934 tcg_gen_andi_i64(t
, b
, 63);
2935 tcg_gen_shr_i64(d
, a
, t
);
2936 tcg_temp_free_i64(t
);
2939 void tcg_gen_gvec_shrv(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
2940 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
2942 static const TCGOpcode vecop_list
[] = { INDEX_op_shrv_vec
, 0 };
2943 static const GVecGen3 g
[4] = {
2944 { .fniv
= tcg_gen_shrv_mod_vec
,
2945 .fno
= gen_helper_gvec_shr8v
,
2946 .opt_opc
= vecop_list
,
2948 { .fniv
= tcg_gen_shrv_mod_vec
,
2949 .fno
= gen_helper_gvec_shr16v
,
2950 .opt_opc
= vecop_list
,
2952 { .fni4
= tcg_gen_shr_mod_i32
,
2953 .fniv
= tcg_gen_shrv_mod_vec
,
2954 .fno
= gen_helper_gvec_shr32v
,
2955 .opt_opc
= vecop_list
,
2957 { .fni8
= tcg_gen_shr_mod_i64
,
2958 .fniv
= tcg_gen_shrv_mod_vec
,
2959 .fno
= gen_helper_gvec_shr64v
,
2960 .opt_opc
= vecop_list
,
2961 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
2965 tcg_debug_assert(vece
<= MO_64
);
2966 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
[vece
]);
2970 * Similarly for arithmetic right shifts.
2973 static void tcg_gen_sarv_mod_vec(unsigned vece
, TCGv_vec d
,
2974 TCGv_vec a
, TCGv_vec b
)
2976 TCGv_vec t
= tcg_temp_new_vec_matching(d
);
2978 tcg_gen_dupi_vec(vece
, t
, (8 << vece
) - 1);
2979 tcg_gen_and_vec(vece
, t
, t
, b
);
2980 tcg_gen_sarv_vec(vece
, d
, a
, t
);
2981 tcg_temp_free_vec(t
);
2984 static void tcg_gen_sar_mod_i32(TCGv_i32 d
, TCGv_i32 a
, TCGv_i32 b
)
2986 TCGv_i32 t
= tcg_temp_new_i32();
2988 tcg_gen_andi_i32(t
, b
, 31);
2989 tcg_gen_sar_i32(d
, a
, t
);
2990 tcg_temp_free_i32(t
);
2993 static void tcg_gen_sar_mod_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
)
2995 TCGv_i64 t
= tcg_temp_new_i64();
2997 tcg_gen_andi_i64(t
, b
, 63);
2998 tcg_gen_sar_i64(d
, a
, t
);
2999 tcg_temp_free_i64(t
);
3002 void tcg_gen_gvec_sarv(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
3003 uint32_t bofs
, uint32_t oprsz
, uint32_t maxsz
)
3005 static const TCGOpcode vecop_list
[] = { INDEX_op_sarv_vec
, 0 };
3006 static const GVecGen3 g
[4] = {
3007 { .fniv
= tcg_gen_sarv_mod_vec
,
3008 .fno
= gen_helper_gvec_sar8v
,
3009 .opt_opc
= vecop_list
,
3011 { .fniv
= tcg_gen_sarv_mod_vec
,
3012 .fno
= gen_helper_gvec_sar16v
,
3013 .opt_opc
= vecop_list
,
3015 { .fni4
= tcg_gen_sar_mod_i32
,
3016 .fniv
= tcg_gen_sarv_mod_vec
,
3017 .fno
= gen_helper_gvec_sar32v
,
3018 .opt_opc
= vecop_list
,
3020 { .fni8
= tcg_gen_sar_mod_i64
,
3021 .fniv
= tcg_gen_sarv_mod_vec
,
3022 .fno
= gen_helper_gvec_sar64v
,
3023 .opt_opc
= vecop_list
,
3024 .prefer_i64
= TCG_TARGET_REG_BITS
== 64,
3028 tcg_debug_assert(vece
<= MO_64
);
3029 tcg_gen_gvec_3(dofs
, aofs
, bofs
, oprsz
, maxsz
, &g
[vece
]);
3032 /* Expand OPSZ bytes worth of three-operand operations using i32 elements. */
3033 static void expand_cmp_i32(uint32_t dofs
, uint32_t aofs
, uint32_t bofs
,
3034 uint32_t oprsz
, TCGCond cond
)
3036 TCGv_i32 t0
= tcg_temp_new_i32();
3037 TCGv_i32 t1
= tcg_temp_new_i32();
3040 for (i
= 0; i
< oprsz
; i
+= 4) {
3041 tcg_gen_ld_i32(t0
, cpu_env
, aofs
+ i
);
3042 tcg_gen_ld_i32(t1
, cpu_env
, bofs
+ i
);
3043 tcg_gen_setcond_i32(cond
, t0
, t0
, t1
);
3044 tcg_gen_neg_i32(t0
, t0
);
3045 tcg_gen_st_i32(t0
, cpu_env
, dofs
+ i
);
3047 tcg_temp_free_i32(t1
);
3048 tcg_temp_free_i32(t0
);
3051 static void expand_cmp_i64(uint32_t dofs
, uint32_t aofs
, uint32_t bofs
,
3052 uint32_t oprsz
, TCGCond cond
)
3054 TCGv_i64 t0
= tcg_temp_new_i64();
3055 TCGv_i64 t1
= tcg_temp_new_i64();
3058 for (i
= 0; i
< oprsz
; i
+= 8) {
3059 tcg_gen_ld_i64(t0
, cpu_env
, aofs
+ i
);
3060 tcg_gen_ld_i64(t1
, cpu_env
, bofs
+ i
);
3061 tcg_gen_setcond_i64(cond
, t0
, t0
, t1
);
3062 tcg_gen_neg_i64(t0
, t0
);
3063 tcg_gen_st_i64(t0
, cpu_env
, dofs
+ i
);
3065 tcg_temp_free_i64(t1
);
3066 tcg_temp_free_i64(t0
);
3069 static void expand_cmp_vec(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
3070 uint32_t bofs
, uint32_t oprsz
, uint32_t tysz
,
3071 TCGType type
, TCGCond cond
)
3073 TCGv_vec t0
= tcg_temp_new_vec(type
);
3074 TCGv_vec t1
= tcg_temp_new_vec(type
);
3077 for (i
= 0; i
< oprsz
; i
+= tysz
) {
3078 tcg_gen_ld_vec(t0
, cpu_env
, aofs
+ i
);
3079 tcg_gen_ld_vec(t1
, cpu_env
, bofs
+ i
);
3080 tcg_gen_cmp_vec(cond
, vece
, t0
, t0
, t1
);
3081 tcg_gen_st_vec(t0
, cpu_env
, dofs
+ i
);
3083 tcg_temp_free_vec(t1
);
3084 tcg_temp_free_vec(t0
);
3087 void tcg_gen_gvec_cmp(TCGCond cond
, unsigned vece
, uint32_t dofs
,
3088 uint32_t aofs
, uint32_t bofs
,
3089 uint32_t oprsz
, uint32_t maxsz
)
3091 static const TCGOpcode cmp_list
[] = { INDEX_op_cmp_vec
, 0 };
3092 static gen_helper_gvec_3
* const eq_fn
[4] = {
3093 gen_helper_gvec_eq8
, gen_helper_gvec_eq16
,
3094 gen_helper_gvec_eq32
, gen_helper_gvec_eq64
3096 static gen_helper_gvec_3
* const ne_fn
[4] = {
3097 gen_helper_gvec_ne8
, gen_helper_gvec_ne16
,
3098 gen_helper_gvec_ne32
, gen_helper_gvec_ne64
3100 static gen_helper_gvec_3
* const lt_fn
[4] = {
3101 gen_helper_gvec_lt8
, gen_helper_gvec_lt16
,
3102 gen_helper_gvec_lt32
, gen_helper_gvec_lt64
3104 static gen_helper_gvec_3
* const le_fn
[4] = {
3105 gen_helper_gvec_le8
, gen_helper_gvec_le16
,
3106 gen_helper_gvec_le32
, gen_helper_gvec_le64
3108 static gen_helper_gvec_3
* const ltu_fn
[4] = {
3109 gen_helper_gvec_ltu8
, gen_helper_gvec_ltu16
,
3110 gen_helper_gvec_ltu32
, gen_helper_gvec_ltu64
3112 static gen_helper_gvec_3
* const leu_fn
[4] = {
3113 gen_helper_gvec_leu8
, gen_helper_gvec_leu16
,
3114 gen_helper_gvec_leu32
, gen_helper_gvec_leu64
3116 static gen_helper_gvec_3
* const * const fns
[16] = {
3117 [TCG_COND_EQ
] = eq_fn
,
3118 [TCG_COND_NE
] = ne_fn
,
3119 [TCG_COND_LT
] = lt_fn
,
3120 [TCG_COND_LE
] = le_fn
,
3121 [TCG_COND_LTU
] = ltu_fn
,
3122 [TCG_COND_LEU
] = leu_fn
,
3125 const TCGOpcode
*hold_list
;
3129 check_size_align(oprsz
, maxsz
, dofs
| aofs
| bofs
);
3130 check_overlap_3(dofs
, aofs
, bofs
, maxsz
);
3132 if (cond
== TCG_COND_NEVER
|| cond
== TCG_COND_ALWAYS
) {
3133 do_dup(MO_8
, dofs
, oprsz
, maxsz
,
3134 NULL
, NULL
, -(cond
== TCG_COND_ALWAYS
));
3139 * Implement inline with a vector type, if possible.
3140 * Prefer integer when 64-bit host and 64-bit comparison.
3142 hold_list
= tcg_swap_vecop_list(cmp_list
);
3143 type
= choose_vector_type(cmp_list
, vece
, oprsz
,
3144 TCG_TARGET_REG_BITS
== 64 && vece
== MO_64
);
3147 /* Recall that ARM SVE allows vector sizes that are not a
3148 * power of 2, but always a multiple of 16. The intent is
3149 * that e.g. size == 80 would be expanded with 2x32 + 1x16.
3151 some
= QEMU_ALIGN_DOWN(oprsz
, 32);
3152 expand_cmp_vec(vece
, dofs
, aofs
, bofs
, some
, 32, TCG_TYPE_V256
, cond
);
3153 if (some
== oprsz
) {
3163 expand_cmp_vec(vece
, dofs
, aofs
, bofs
, oprsz
, 16, TCG_TYPE_V128
, cond
);
3166 expand_cmp_vec(vece
, dofs
, aofs
, bofs
, oprsz
, 8, TCG_TYPE_V64
, cond
);
3170 if (vece
== MO_64
&& check_size_impl(oprsz
, 8)) {
3171 expand_cmp_i64(dofs
, aofs
, bofs
, oprsz
, cond
);
3172 } else if (vece
== MO_32
&& check_size_impl(oprsz
, 4)) {
3173 expand_cmp_i32(dofs
, aofs
, bofs
, oprsz
, cond
);
3175 gen_helper_gvec_3
* const *fn
= fns
[cond
];
3179 tmp
= aofs
, aofs
= bofs
, bofs
= tmp
;
3180 cond
= tcg_swap_cond(cond
);
3184 tcg_gen_gvec_3_ool(dofs
, aofs
, bofs
, oprsz
, maxsz
, 0, fn
[vece
]);
3190 g_assert_not_reached();
3192 tcg_swap_vecop_list(hold_list
);
3194 if (oprsz
< maxsz
) {
3195 expand_clr(dofs
+ oprsz
, maxsz
- oprsz
);
3199 static void tcg_gen_bitsel_i64(TCGv_i64 d
, TCGv_i64 a
, TCGv_i64 b
, TCGv_i64 c
)
3201 TCGv_i64 t
= tcg_temp_new_i64();
3203 tcg_gen_and_i64(t
, b
, a
);
3204 tcg_gen_andc_i64(d
, c
, a
);
3205 tcg_gen_or_i64(d
, d
, t
);
3206 tcg_temp_free_i64(t
);
3209 void tcg_gen_gvec_bitsel(unsigned vece
, uint32_t dofs
, uint32_t aofs
,
3210 uint32_t bofs
, uint32_t cofs
,
3211 uint32_t oprsz
, uint32_t maxsz
)
3213 static const GVecGen4 g
= {
3214 .fni8
= tcg_gen_bitsel_i64
,
3215 .fniv
= tcg_gen_bitsel_vec
,
3216 .fno
= gen_helper_gvec_bitsel
,
3219 tcg_gen_gvec_4(dofs
, aofs
, bofs
, cofs
, oprsz
, maxsz
, &g
);