1 ;; Machine Description for shared bits common to IWMMXT and Neon.
2 ;; Copyright (C) 2006-2023 Free Software Foundation, Inc.
3 ;; Written by CodeSourcery.
5 ;; This file is part of GCC.
7 ;; GCC is free software; you can redistribute it and/or modify it
8 ;; under the terms of the GNU General Public License as published by
9 ;; the Free Software Foundation; either version 3, or (at your option)
12 ;; GCC is distributed in the hope that it will be useful, but
13 ;; WITHOUT ANY WARRANTY; without even the implied warranty of
14 ;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 ;; General Public License for more details.
17 ;; You should have received a copy of the GNU General Public License
18 ;; along with GCC; see the file COPYING3. If not see
19 ;; <http://www.gnu.org/licenses/>.
23 (define_expand "mov<mode>"
24 [(set (match_operand:VNIM1 0 "nonimmediate_operand")
25 (match_operand:VNIM1 1 "general_operand"))]
27 || (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (<MODE>mode))
28 || (TARGET_HAVE_MVE && VALID_MVE_SI_MODE (<MODE>mode))
29 || (TARGET_HAVE_MVE_FLOAT && VALID_MVE_SF_MODE (<MODE>mode))"
31 gcc_checking_assert (aligned_operand (operands[0], <MODE>mode));
32 gcc_checking_assert (aligned_operand (operands[1], <MODE>mode));
33 if (can_create_pseudo_p ())
35 if (!REG_P (operands[0]))
36 operands[1] = force_reg (<MODE>mode, operands[1]);
37 else if ((TARGET_NEON || TARGET_HAVE_MVE || TARGET_HAVE_MVE_FLOAT)
38 && (CONSTANT_P (operands[1])))
40 operands[1] = neon_make_constant (operands[1]);
41 gcc_assert (operands[1] != NULL_RTX);
46 (define_expand "mov<mode>"
47 [(set (match_operand:VNINOTM1 0 "nonimmediate_operand")
48 (match_operand:VNINOTM1 1 "general_operand"))]
50 || (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (<MODE>mode))"
52 gcc_checking_assert (aligned_operand (operands[0], <MODE>mode));
53 gcc_checking_assert (aligned_operand (operands[1], <MODE>mode));
54 if (can_create_pseudo_p ())
56 if (!REG_P (operands[0]))
57 operands[1] = force_reg (<MODE>mode, operands[1]);
58 else if (TARGET_NEON && CONSTANT_P (operands[1]))
60 operands[1] = neon_make_constant (operands[1]);
61 gcc_assert (operands[1] != NULL_RTX);
66 (define_expand "movv8hf"
67 [(set (match_operand:V8HF 0 "s_register_operand")
68 (match_operand:V8HF 1 "s_register_operand"))]
69 "TARGET_NEON || TARGET_HAVE_MVE_FLOAT"
71 gcc_checking_assert (aligned_operand (operands[0], E_V8HFmode));
72 gcc_checking_assert (aligned_operand (operands[1], E_V8HFmode));
73 if (can_create_pseudo_p ())
75 if (!REG_P (operands[0]))
76 operands[1] = force_reg (E_V8HFmode, operands[1]);
77 else if (TARGET_HAVE_MVE_FLOAT && CONSTANT_P (operands[1]))
79 operands[1] = neon_make_constant (operands[1]);
80 gcc_assert (operands[1] != NULL_RTX);
85 ;; Vector arithmetic. Expanders are blank, then unnamed insns implement
86 ;; patterns separately for Neon, IWMMXT and MVE.
88 (define_expand "add<mode>3"
89 [(set (match_operand:VDQ 0 "s_register_operand")
90 (plus:VDQ (match_operand:VDQ 1 "s_register_operand")
91 (match_operand:VDQ 2 "s_register_operand")))]
92 "ARM_HAVE_<MODE>_ARITH"
95 (define_expand "sub<mode>3"
96 [(set (match_operand:VDQ 0 "s_register_operand")
97 (minus:VDQ (match_operand:VDQ 1 "s_register_operand")
98 (match_operand:VDQ 2 "s_register_operand")))]
99 "ARM_HAVE_<MODE>_ARITH"
102 (define_expand "mul<mode>3"
103 [(set (match_operand:VDQWH 0 "s_register_operand")
104 (mult:VDQWH (match_operand:VDQWH 1 "s_register_operand")
105 (match_operand:VDQWH 2 "s_register_operand")))]
106 "ARM_HAVE_<MODE>_ARITH
107 && (!TARGET_REALLY_IWMMXT
108 || <MODE>mode == V4HImode
109 || <MODE>mode == V2SImode)"
112 (define_expand "smin<mode>3"
113 [(set (match_operand:VALLW 0 "s_register_operand")
114 (smin:VALLW (match_operand:VALLW 1 "s_register_operand")
115 (match_operand:VALLW 2 "s_register_operand")))]
116 "ARM_HAVE_<MODE>_ARITH"
119 (define_expand "umin<mode>3"
120 [(set (match_operand:VINTW 0 "s_register_operand")
121 (umin:VINTW (match_operand:VINTW 1 "s_register_operand")
122 (match_operand:VINTW 2 "s_register_operand")))]
123 "ARM_HAVE_<MODE>_ARITH"
126 (define_expand "smax<mode>3"
127 [(set (match_operand:VALLW 0 "s_register_operand")
128 (smax:VALLW (match_operand:VALLW 1 "s_register_operand")
129 (match_operand:VALLW 2 "s_register_operand")))]
130 "ARM_HAVE_<MODE>_ARITH"
133 (define_expand "umax<mode>3"
134 [(set (match_operand:VINTW 0 "s_register_operand")
135 (umax:VINTW (match_operand:VINTW 1 "s_register_operand")
136 (match_operand:VINTW 2 "s_register_operand")))]
137 "ARM_HAVE_<MODE>_ARITH"
140 (define_expand "vec_perm<mode>"
141 [(match_operand:VE 0 "s_register_operand")
142 (match_operand:VE 1 "s_register_operand")
143 (match_operand:VE 2 "s_register_operand")
144 (match_operand:VE 3 "s_register_operand")]
145 "TARGET_NEON && !BYTES_BIG_ENDIAN"
147 arm_expand_vec_perm (operands[0], operands[1], operands[2], operands[3]);
151 (define_expand "vec_extract<mode><V_elem_l>"
152 [(match_operand:<V_elem> 0 "nonimmediate_operand")
153 (match_operand:VQX_NOBF 1 "s_register_operand")
154 (match_operand:SI 2 "immediate_operand")]
155 "TARGET_NEON || TARGET_HAVE_MVE"
158 emit_insn (gen_neon_vec_extract<mode><V_elem_l> (operands[0], operands[1],
160 else if (TARGET_HAVE_MVE)
161 emit_insn (gen_mve_vec_extract<mode><V_elem_l> (operands[0], operands[1],
168 (define_expand "vec_set<mode>"
169 [(match_operand:VQX_NOBF 0 "s_register_operand" "")
170 (match_operand:<V_elem> 1 "s_register_operand" "")
171 (match_operand:SI 2 "immediate_operand" "")]
172 "TARGET_NEON || TARGET_HAVE_MVE"
174 HOST_WIDE_INT elem = HOST_WIDE_INT_1 << INTVAL (operands[2]);
176 emit_insn (gen_vec_set<mode>_internal (operands[0], operands[1],
177 GEN_INT (elem), operands[0]));
179 emit_insn (gen_mve_vec_set<mode>_internal (operands[0], operands[1],
180 GEN_INT (elem), operands[0]));
184 (define_expand "and<mode>3"
185 [(set (match_operand:VDQ 0 "s_register_operand" "")
186 (and:VDQ (match_operand:VDQ 1 "s_register_operand" "")
187 (match_operand:VDQ 2 "neon_inv_logic_op2" "")))]
188 "ARM_HAVE_<MODE>_ARITH"
191 (define_expand "ior<mode>3"
192 [(set (match_operand:VDQ 0 "s_register_operand" "")
193 (ior:VDQ (match_operand:VDQ 1 "s_register_operand" "")
194 (match_operand:VDQ 2 "neon_logic_op2" "")))]
195 "ARM_HAVE_<MODE>_ARITH"
198 (define_expand "xor<mode>3"
199 [(set (match_operand:VDQ 0 "s_register_operand" "")
200 (xor:VDQ (match_operand:VDQ 1 "s_register_operand" "")
201 (match_operand:VDQ 2 "s_register_operand" "")))]
202 "ARM_HAVE_<MODE>_ARITH"
205 (define_expand "one_cmpl<mode>2"
206 [(set (match_operand:VDQ 0 "s_register_operand")
207 (not:VDQ (match_operand:VDQ 1 "s_register_operand")))]
208 "ARM_HAVE_<MODE>_ARITH && !TARGET_REALLY_IWMMXT"
211 (define_expand "<absneg_str><mode>2"
212 [(set (match_operand:VDQWH 0 "s_register_operand" "")
213 (ABSNEG:VDQWH (match_operand:VDQWH 1 "s_register_operand" "")))]
214 "ARM_HAVE_<MODE>_ARITH && !TARGET_REALLY_IWMMXT"
217 (define_expand "cadd<rot><mode>3"
218 [(set (match_operand:VF 0 "register_operand")
219 (unspec:VF [(match_operand:VF 1 "register_operand")
220 (match_operand:VF 2 "register_operand")]
222 "(TARGET_COMPLEX || (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT
223 && ARM_HAVE_<MODE>_ARITH)) && !BYTES_BIG_ENDIAN"
226 ;; The complex mul operations always need to expand to two instructions.
227 ;; The first operation does half the computation and the second does the
228 ;; remainder. Because of this, expand early.
229 (define_expand "cmul<conj_op><mode>3"
230 [(set (match_operand:VQ_HSF 0 "register_operand")
231 (unspec:VQ_HSF [(match_operand:VQ_HSF 1 "register_operand")
232 (match_operand:VQ_HSF 2 "register_operand")]
234 "(TARGET_COMPLEX || (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT))
235 && !BYTES_BIG_ENDIAN"
237 rtx res1 = gen_reg_rtx (<MODE>mode);
240 rtx tmp = force_reg (<MODE>mode, CONST0_RTX (<MODE>mode));
241 emit_insn (gen_arm_vcmla<rotsplit1><mode> (res1, tmp,
242 operands[2], operands[1]));
245 emit_insn (gen_arm_vcmla<rotsplit1><mode> (res1, CONST0_RTX (<MODE>mode),
246 operands[2], operands[1]));
248 emit_insn (gen_arm_vcmla<rotsplit2><mode> (operands[0], res1,
249 operands[2], operands[1]));
253 (define_expand "arm_vcmla<rot><mode>"
254 [(set (match_operand:VF 0 "register_operand")
255 (plus:VF (match_operand:VF 1 "register_operand")
256 (unspec:VF [(match_operand:VF 2 "register_operand")
257 (match_operand:VF 3 "register_operand")]
259 "(TARGET_COMPLEX || (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT
260 && ARM_HAVE_<MODE>_ARITH)) && !BYTES_BIG_ENDIAN"
263 ;; The complex mla/mls operations always need to expand to two instructions.
264 ;; The first operation does half the computation and the second does the
265 ;; remainder. Because of this, expand early.
266 (define_expand "cml<fcmac1><conj_op><mode>4"
267 [(set (match_operand:VF 0 "register_operand")
268 (plus:VF (unspec:VF [(match_operand:VF 1 "register_operand")
269 (match_operand:VF 2 "register_operand")]
271 (match_operand:VF 3 "register_operand")))]
272 "(TARGET_COMPLEX || (TARGET_HAVE_MVE && TARGET_HAVE_MVE_FLOAT
273 && ARM_HAVE_<MODE>_ARITH)) && !BYTES_BIG_ENDIAN"
275 rtx tmp = gen_reg_rtx (<MODE>mode);
276 emit_insn (gen_arm_vcmla<rotsplit1><mode> (tmp, operands[3],
277 operands[2], operands[1]));
278 emit_insn (gen_arm_vcmla<rotsplit2><mode> (operands[0], tmp,
279 operands[2], operands[1]));
283 (define_expand "@movmisalign<mode>"
284 [(set (match_operand:VDQ 0 "nonimmediate_operand")
285 (unspec:VDQ [(match_operand:VDQ 1 "general_operand")]
286 UNSPEC_MISALIGNED_ACCESS))]
287 "ARM_HAVE_<MODE>_LDST && !BYTES_BIG_ENDIAN
288 && unaligned_access && !TARGET_REALLY_IWMMXT"
291 bool for_store = false;
292 /* This pattern is not permitted to fail during expansion: if both arguments
293 are non-registers (e.g. memory := constant, which can be created by the
294 auto-vectorizer), force operand 1 into a register. */
295 if (!s_register_operand (operands[0], <MODE>mode)
296 && !s_register_operand (operands[1], <MODE>mode))
297 operands[1] = force_reg (<MODE>mode, operands[1]);
299 if (s_register_operand (operands[0], <MODE>mode))
300 memloc = &operands[1];
303 memloc = &operands[0];
307 /* For MVE, vector loads/stores must be aligned to the element size. If the
308 alignment is less than that convert the load/store to a suitable mode. */
310 && (MEM_ALIGN (*memloc)
311 < GET_MODE_ALIGNMENT (GET_MODE_INNER (<MODE>mode))))
313 scalar_mode new_smode;
314 switch (MEM_ALIGN (*memloc))
327 machine_mode new_mode
328 = mode_for_vector (new_smode,
329 GET_MODE_SIZE (<MODE>mode)
330 / GET_MODE_SIZE (new_smode)).require ();
331 rtx new_mem = adjust_address (*memloc, new_mode, 0);
335 rtx reg = gen_reg_rtx (new_mode);
336 emit_insn (gen_movmisalign (new_mode, reg, new_mem));
337 emit_move_insn (operands[0], gen_lowpart (<MODE>mode, reg));
340 emit_insn (gen_movmisalign (new_mode, new_mem,
341 gen_lowpart (new_mode, operands[1])));
345 /* Legitimize address. */
347 && !mve_vector_mem_operand (<MODE>mode, XEXP (*memloc, 0), false))
349 && !neon_vector_mem_operand (*memloc, 2, false)))
352 = replace_equiv_address (*memloc,
353 force_reg (Pmode, XEXP (*memloc, 0)),
355 gcc_assert (MEM_ALIGN (new_mem) == MEM_ALIGN (*memloc));
360 (define_insn "mve_vshlq_<supf><mode>"
361 [(set (match_operand:VDQIW 0 "s_register_operand" "=w,w")
362 (unspec:VDQIW [(match_operand:VDQIW 1 "s_register_operand" "w,w")
363 (match_operand:VDQIW 2 "imm_lshift_or_reg_neon" "w,Ds")]
365 "ARM_HAVE_<MODE>_ARITH && !TARGET_REALLY_IWMMXT"
367 vshl.<supf>%#<V_sz_elem>\t%<V_reg>0, %<V_reg>1, %<V_reg>2
368 * return neon_output_shift_immediate (\"vshl\", 'i', &operands[2], <MODE>mode, VALID_NEON_QREG_MODE (<MODE>mode), true);"
369 [(set_attr "type" "neon_shift_reg<q>, neon_shift_imm<q>")]
372 (define_expand "vashl<mode>3"
373 [(set (match_operand:VDQIW 0 "s_register_operand" "")
374 (ashift:VDQIW (match_operand:VDQIW 1 "s_register_operand" "")
375 (match_operand:VDQIW 2 "imm_lshift_or_reg_neon" "")))]
376 "ARM_HAVE_<MODE>_ARITH && !TARGET_REALLY_IWMMXT"
378 emit_insn (gen_mve_vshlq_u<mode> (operands[0], operands[1], operands[2]));
382 ;; When operand 2 is an immediate, use the normal expansion to match
383 ;; gen_vashr<mode>3_imm for Neon and gen_mve_vshrq_n_s<mode>_imm for
385 (define_expand "vashr<mode>3"
386 [(set (match_operand:VDQIW 0 "s_register_operand")
387 (ashiftrt:VDQIW (match_operand:VDQIW 1 "s_register_operand")
388 (match_operand:VDQIW 2 "imm_rshift_or_reg_neon")))]
389 "ARM_HAVE_<MODE>_ARITH && !TARGET_REALLY_IWMMXT"
391 if (s_register_operand (operands[2], <MODE>mode))
393 rtx neg = gen_reg_rtx (<MODE>mode);
394 emit_insn (gen_neg<mode>2 (neg, operands[2]));
395 emit_insn (gen_mve_vshlq_s<mode> (operands[0], operands[1], neg));
400 ;; When operand 2 is an immediate, use the normal expansion to match
401 ;; gen_vashr<mode>3_imm for Neon and gen_mve_vshrq_n_u<mode>_imm for
403 (define_expand "vlshr<mode>3"
404 [(set (match_operand:VDQIW 0 "s_register_operand")
405 (lshiftrt:VDQIW (match_operand:VDQIW 1 "s_register_operand")
406 (match_operand:VDQIW 2 "imm_rshift_or_reg_neon")))]
407 "ARM_HAVE_<MODE>_ARITH && !TARGET_REALLY_IWMMXT"
409 if (s_register_operand (operands[2], <MODE>mode))
411 rtx neg = gen_reg_rtx (<MODE>mode);
412 emit_insn (gen_neg<mode>2 (neg, operands[2]));
413 emit_insn (gen_mve_vshlq_u<mode> (operands[0], operands[1], neg));
418 ;; Conditional instructions. These are comparisons with conditional moves for
419 ;; vectors. They perform the assignment:
421 ;; Vop0 = (Vop4 <op3> Vop5) ? Vop1 : Vop2;
423 ;; where op3 is <, <=, ==, !=, >= or >. Operations are performed
426 (define_expand "vcond<mode><mode>"
427 [(set (match_operand:VDQWH 0 "s_register_operand")
429 (match_operator 3 "comparison_operator"
430 [(match_operand:VDQWH 4 "s_register_operand")
431 (match_operand:VDQWH 5 "reg_or_zero_operand")])
432 (match_operand:VDQWH 1 "s_register_operand")
433 (match_operand:VDQWH 2 "s_register_operand")))]
434 "ARM_HAVE_<MODE>_ARITH
435 && !TARGET_REALLY_IWMMXT
436 && (!<Is_float_mode> || flag_unsafe_math_optimizations)"
438 arm_expand_vcond (operands, <V_cmp_result>mode);
442 (define_expand "vcond<V_cvtto><mode>"
443 [(set (match_operand:<V_CVTTO> 0 "s_register_operand")
444 (if_then_else:<V_CVTTO>
445 (match_operator 3 "comparison_operator"
446 [(match_operand:V32 4 "s_register_operand")
447 (match_operand:V32 5 "reg_or_zero_operand")])
448 (match_operand:<V_CVTTO> 1 "s_register_operand")
449 (match_operand:<V_CVTTO> 2 "s_register_operand")))]
450 "ARM_HAVE_<MODE>_ARITH
451 && !TARGET_REALLY_IWMMXT
452 && (!<Is_float_mode> || flag_unsafe_math_optimizations)"
454 arm_expand_vcond (operands, <V_cmp_result>mode);
458 (define_expand "vcond<VH_cvtto><mode>"
459 [(set (match_operand:<VH_CVTTO> 0 "s_register_operand")
460 (if_then_else:<VH_CVTTO>
461 (match_operator 3 "comparison_operator"
462 [(match_operand:V16 4 "s_register_operand")
463 (match_operand:V16 5 "reg_or_zero_operand")])
464 (match_operand:<VH_CVTTO> 1 "s_register_operand")
465 (match_operand:<VH_CVTTO> 2 "s_register_operand")))]
466 "ARM_HAVE_<MODE>_ARITH
467 && !TARGET_REALLY_IWMMXT
468 && (!<Is_float_mode> || flag_unsafe_math_optimizations)"
470 arm_expand_vcond (operands, <V_cmp_result>mode);
474 (define_expand "vcondu<mode><v_cmp_result>"
475 [(set (match_operand:VDQW 0 "s_register_operand")
477 (match_operator 3 "arm_comparison_operator"
478 [(match_operand:<V_cmp_result> 4 "s_register_operand")
479 (match_operand:<V_cmp_result> 5 "reg_or_zero_operand")])
480 (match_operand:VDQW 1 "s_register_operand")
481 (match_operand:VDQW 2 "s_register_operand")))]
482 "ARM_HAVE_<MODE>_ARITH
483 && !TARGET_REALLY_IWMMXT"
485 arm_expand_vcond (operands, <V_cmp_result>mode);
489 (define_expand "vec_load_lanesoi<mode>"
490 [(set (match_operand:OI 0 "s_register_operand")
491 (unspec:OI [(match_operand:OI 1 "neon_struct_operand")
492 (unspec:VQ2 [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
494 "TARGET_NEON || TARGET_HAVE_MVE"
497 emit_insn (gen_neon_vld2<mode> (operands[0], operands[1]));
499 emit_insn (gen_mve_vld2q<mode> (operands[0], operands[1]));
503 (define_expand "vec_store_lanesoi<mode>"
504 [(set (match_operand:OI 0 "neon_struct_operand")
505 (unspec:OI [(match_operand:OI 1 "s_register_operand")
506 (unspec:VQ2 [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
508 "TARGET_NEON || TARGET_HAVE_MVE"
511 emit_insn (gen_neon_vst2<mode> (operands[0], operands[1]));
513 emit_insn (gen_mve_vst2q<mode> (operands[0], operands[1]));
517 (define_expand "vec_load_lanesxi<mode>"
518 [(match_operand:XI 0 "s_register_operand")
519 (match_operand:XI 1 "neon_struct_operand")
520 (unspec:VQ2 [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
521 "TARGET_NEON || TARGET_HAVE_MVE"
524 emit_insn (gen_neon_vld4<mode> (operands[0], operands[1]));
526 emit_insn (gen_mve_vld4q<mode> (operands[0], operands[1]));
530 (define_expand "vec_store_lanesxi<mode>"
531 [(match_operand:XI 0 "neon_struct_operand")
532 (match_operand:XI 1 "s_register_operand")
533 (unspec:VQ2 [(const_int 0)] UNSPEC_VSTRUCTDUMMY)]
534 "TARGET_NEON || TARGET_HAVE_MVE"
537 emit_insn (gen_neon_vst4<mode> (operands[0], operands[1]));
539 emit_insn (gen_mve_vst4q<mode> (operands[0], operands[1]));
543 (define_expand "reduc_plus_scal_<mode>"
544 [(match_operand:<V_elem> 0 "nonimmediate_operand")
545 (match_operand:VQ 1 "s_register_operand")]
546 "ARM_HAVE_<MODE>_ARITH
547 && !(TARGET_HAVE_MVE && FLOAT_MODE_P (<MODE>mode))
548 && !BYTES_BIG_ENDIAN"
552 rtx step1 = gen_reg_rtx (<V_HALF>mode);
554 emit_insn (gen_quad_halves_plus<mode> (step1, operands[1]));
555 emit_insn (gen_reduc_plus_scal_<V_half> (operands[0], step1));
559 /* vaddv generates a 32 bits accumulator. */
560 rtx op0 = gen_reg_rtx (SImode);
562 emit_insn (gen_mve_vaddvq (VADDVQ_S, <MODE>mode, op0, operands[1]));
563 emit_move_insn (operands[0], gen_lowpart (<V_elem>mode, op0));
569 (define_expand "avg<mode>3_floor"
570 [(match_operand:MVE_2 0 "s_register_operand")
571 (match_operand:MVE_2 1 "s_register_operand")
572 (match_operand:MVE_2 2 "s_register_operand")]
573 "ARM_HAVE_<MODE>_ARITH"
576 emit_insn (gen_mve_vhaddq (VHADDQ_S, <MODE>mode,
577 operands[0], operands[1], operands[2]));
579 emit_insn (gen_neon_vhadd (UNSPEC_VHADD_S, UNSPEC_VHADD_S, <MODE>mode,
580 operands[0], operands[1], operands[2]));
584 (define_expand "uavg<mode>3_floor"
585 [(match_operand:MVE_2 0 "s_register_operand")
586 (match_operand:MVE_2 1 "s_register_operand")
587 (match_operand:MVE_2 2 "s_register_operand")]
588 "ARM_HAVE_<MODE>_ARITH"
591 emit_insn (gen_mve_vhaddq (VHADDQ_U, <MODE>mode,
592 operands[0], operands[1], operands[2]));
594 emit_insn (gen_neon_vhadd (UNSPEC_VHADD_U, UNSPEC_VHADD_U, <MODE>mode,
595 operands[0], operands[1], operands[2]));
599 (define_expand "avg<mode>3_ceil"
600 [(match_operand:MVE_2 0 "s_register_operand")
601 (match_operand:MVE_2 1 "s_register_operand")
602 (match_operand:MVE_2 2 "s_register_operand")]
603 "ARM_HAVE_<MODE>_ARITH"
606 emit_insn (gen_mve_vrhaddq (VRHADDQ_S, <MODE>mode,
607 operands[0], operands[1], operands[2]));
609 emit_insn (gen_neon_vhadd (UNSPEC_VRHADD_S, UNSPEC_VRHADD_S, <MODE>mode,
610 operands[0], operands[1], operands[2]));
614 (define_expand "uavg<mode>3_ceil"
615 [(match_operand:MVE_2 0 "s_register_operand")
616 (match_operand:MVE_2 1 "s_register_operand")
617 (match_operand:MVE_2 2 "s_register_operand")]
618 "ARM_HAVE_<MODE>_ARITH"
621 emit_insn (gen_mve_vrhaddq (VRHADDQ_U, <MODE>mode,
622 operands[0], operands[1], operands[2]));
624 emit_insn (gen_neon_vhadd (UNSPEC_VRHADD_U, UNSPEC_VRHADD_U, <MODE>mode,
625 operands[0], operands[1], operands[2]));
629 (define_expand "clz<mode>2"
630 [(set (match_operand:VDQIW 0 "s_register_operand")
631 (clz:VDQIW (match_operand:VDQIW 1 "s_register_operand")))]
632 "ARM_HAVE_<MODE>_ARITH
633 && !TARGET_REALLY_IWMMXT"
635 (define_expand "vec_init<mode><V_elem_l>"
636 [(match_operand:VDQX 0 "s_register_operand")
637 (match_operand 1 "" "")]
638 "TARGET_NEON || (TARGET_HAVE_MVE && VALID_MVE_MODE (<MODE>mode))"
640 neon_expand_vector_init (operands[0], operands[1]);