Add config file so b4 uses inbox.sourceware.org automatically
[official-gcc.git] / gcc / config / riscv / riscv-vector-builtins-bases.cc
blob193392fbcc2a8d027d43ca2f007a7f5be40a469e
1 /* function_base implementation for RISC-V 'V' Extension for GNU compiler.
2 Copyright (C) 2022-2024 Free Software Foundation, Inc.
3 Contributed by Ju-Zhe Zhong (juzhe.zhong@rivai.ai), RiVAI Technologies Ltd.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "rtl.h"
27 #include "tm_p.h"
28 #include "memmodel.h"
29 #include "insn-codes.h"
30 #include "optabs.h"
31 #include "recog.h"
32 #include "expr.h"
33 #include "basic-block.h"
34 #include "function.h"
35 #include "fold-const.h"
36 #include "gimple.h"
37 #include "gimple-iterator.h"
38 #include "gimplify.h"
39 #include "explow.h"
40 #include "emit-rtl.h"
41 #include "tree-vector-builder.h"
42 #include "rtx-vector-builder.h"
43 #include "riscv-vector-builtins.h"
44 #include "riscv-vector-builtins-shapes.h"
45 #include "riscv-vector-builtins-bases.h"
47 using namespace riscv_vector;
49 namespace riscv_vector {
51 /* Enumerates types of loads/stores operations.
52 It's only used in here so we don't define it
53 in riscv-vector-builtins-bases.h. */
54 enum lst_type
56 LST_UNIT_STRIDE,
57 LST_STRIDED,
58 LST_INDEXED,
61 enum frm_op_type
63 NO_FRM,
64 HAS_FRM,
67 /* Helper function to fold vleff and vlsegff. */
68 static gimple *
69 fold_fault_load (gimple_folder &f)
71 /* fold fault_load (const *base, size_t *new_vl, size_t vl)
73 ====> fault_load (const *base, size_t vl)
74 new_vl = MEM_REF[read_vl ()]. */
76 auto_vec<tree> vargs (gimple_call_num_args (f.call) - 1);
78 for (unsigned i = 0; i < gimple_call_num_args (f.call); i++)
80 /* Exclude size_t *new_vl argument. */
81 if (i == gimple_call_num_args (f.call) - 2)
82 continue;
84 vargs.quick_push (gimple_call_arg (f.call, i));
87 gimple *repl = gimple_build_call_vec (gimple_call_fn (f.call), vargs);
88 gimple_call_set_lhs (repl, f.lhs);
90 /* Handle size_t *new_vl by read_vl. */
91 tree new_vl = gimple_call_arg (f.call, gimple_call_num_args (f.call) - 2);
92 if (integer_zerop (new_vl))
94 /* This case happens when user passes the nullptr to new_vl argument.
95 In this case, we just need to ignore the new_vl argument and return
96 fault_load instruction directly. */
97 return repl;
100 tree tmp_var = create_tmp_var (size_type_node, "new_vl");
101 tree decl = get_read_vl_decl ();
102 gimple *g = gimple_build_call (decl, 0);
103 gimple_call_set_lhs (g, tmp_var);
104 tree indirect
105 = fold_build2 (MEM_REF, size_type_node,
106 gimple_call_arg (f.call, gimple_call_num_args (f.call) - 2),
107 build_int_cst (build_pointer_type (size_type_node), 0));
108 gassign *assign = gimple_build_assign (indirect, tmp_var);
110 gsi_insert_after (f.gsi, assign, GSI_SAME_STMT);
111 gsi_insert_after (f.gsi, g, GSI_SAME_STMT);
112 return repl;
115 /* Implements vsetvl<mode> && vsetvlmax<mode>. */
116 template<bool VLMAX_P>
117 class vsetvl : public function_base
119 public:
120 bool apply_vl_p () const override
122 return false;
125 rtx expand (function_expander &e) const override
127 if (VLMAX_P)
128 e.add_input_operand (Pmode, gen_rtx_REG (Pmode, 0));
129 else
130 e.add_input_operand (0);
132 tree type = builtin_types[e.type.index].vector;
133 machine_mode mode = TYPE_MODE (type);
135 if (TARGET_XTHEADVECTOR)
137 machine_mode inner_mode = GET_MODE_INNER (mode);
138 /* SEW. */
139 e.add_input_operand (Pmode,
140 gen_int_mode (GET_MODE_BITSIZE (inner_mode), Pmode));
141 /* LMUL. */
142 e.add_input_operand (Pmode,
143 gen_int_mode (get_vlmul (mode), Pmode));
145 else
147 /* Normalize same RATO (SEW/LMUL) into same vsetvl instruction.
149 - e8,mf8/e16,mf4/e32,mf2/e64,m1 --> e8mf8
150 - e8,mf4/e16,mf2/e32,m1/e64,m2 --> e8mf4
151 - e8,mf2/e16,m1/e32,m2/e64,m4 --> e8mf2
152 - e8,m1/e16,m2/e32,m4/e64,m8 --> e8m1
153 - e8,m2/e16,m4/e32,m8 --> e8m2
154 - e8,m4/e16,m8 --> e8m4
155 - e8,m8 --> e8m8
157 /* SEW. */
158 e.add_input_operand (Pmode, gen_int_mode (8, Pmode));
160 /* LMUL. */
161 machine_mode e8_mode
162 = get_vector_mode (QImode, GET_MODE_NUNITS (mode)).require ();
163 e.add_input_operand (Pmode, gen_int_mode (get_vlmul (e8_mode), Pmode));
166 /* TAIL_ANY. */
167 e.add_input_operand (Pmode,
168 gen_int_mode (get_prefer_tail_policy (), Pmode));
170 /* MASK_ANY. */
171 e.add_input_operand (Pmode,
172 gen_int_mode (get_prefer_mask_policy (), Pmode));
173 return e.generate_insn (code_for_vsetvl_no_side_effects (Pmode));
177 /* Implements
178 * vle.v/vse.v/vlm.v/vsm.v/vlse.v/vsse.v/vluxei.v/vloxei.v/vsuxei.v/vsoxei.v
179 * codegen. */
180 template<bool STORE_P, lst_type LST_TYPE, bool ORDERED_P>
181 class loadstore : public function_base
183 public:
184 bool apply_tail_policy_p () const override { return !STORE_P; }
185 bool apply_mask_policy_p () const override { return !STORE_P; }
187 unsigned int call_properties (const function_instance &) const override
189 if (STORE_P)
190 return CP_WRITE_MEMORY;
191 else
192 return CP_READ_MEMORY;
195 bool can_be_overloaded_p (enum predication_type_index pred) const override
197 if (STORE_P || LST_TYPE == LST_INDEXED)
198 return true;
199 return pred != PRED_TYPE_none;
202 rtx expand (function_expander &e) const override
204 if (LST_TYPE == LST_INDEXED)
206 int unspec = ORDERED_P ? UNSPEC_ORDERED : UNSPEC_UNORDERED;
207 if (STORE_P)
208 return e.use_exact_insn (
209 code_for_pred_indexed_store (unspec, e.vector_mode (),
210 e.index_mode ()));
211 else
213 unsigned src_eew_bitsize
214 = GET_MODE_BITSIZE (GET_MODE_INNER (e.index_mode ()));
215 unsigned dst_eew_bitsize
216 = GET_MODE_BITSIZE (GET_MODE_INNER (e.vector_mode ()));
217 if (dst_eew_bitsize == src_eew_bitsize)
218 return e.use_exact_insn (
219 code_for_pred_indexed_load_same_eew (unspec, e.vector_mode ()));
220 else if (dst_eew_bitsize > src_eew_bitsize)
222 unsigned factor = dst_eew_bitsize / src_eew_bitsize;
223 switch (factor)
225 case 2:
226 return e.use_exact_insn (
227 code_for_pred_indexed_load_x2_greater_eew (
228 unspec, e.vector_mode ()));
229 case 4:
230 return e.use_exact_insn (
231 code_for_pred_indexed_load_x4_greater_eew (
232 unspec, e.vector_mode ()));
233 case 8:
234 return e.use_exact_insn (
235 code_for_pred_indexed_load_x8_greater_eew (
236 unspec, e.vector_mode ()));
237 default:
238 gcc_unreachable ();
241 else
243 unsigned factor = src_eew_bitsize / dst_eew_bitsize;
244 switch (factor)
246 case 2:
247 return e.use_exact_insn (
248 code_for_pred_indexed_load_x2_smaller_eew (
249 unspec, e.vector_mode ()));
250 case 4:
251 return e.use_exact_insn (
252 code_for_pred_indexed_load_x4_smaller_eew (
253 unspec, e.vector_mode ()));
254 case 8:
255 return e.use_exact_insn (
256 code_for_pred_indexed_load_x8_smaller_eew (
257 unspec, e.vector_mode ()));
258 default:
259 gcc_unreachable ();
264 else if (LST_TYPE == LST_STRIDED)
266 if (STORE_P)
267 return e.use_contiguous_store_insn (
268 code_for_pred_strided_store (e.vector_mode ()));
269 else
270 return e.use_contiguous_load_insn (
271 code_for_pred_strided_load (e.vector_mode ()));
273 else
275 if (STORE_P)
276 return e.use_contiguous_store_insn (
277 code_for_pred_store (e.vector_mode ()));
278 else
279 return e.use_contiguous_load_insn (
280 code_for_pred_mov (e.vector_mode ()));
285 /* Implements
286 vadd/vsub/vand/vor/vxor/vsll/vsra/vsrl/
287 vmin/vmax/vminu/vmaxu/vdiv/vrem/vdivu/
288 vremu/vsadd/vsaddu/vssub/vssubu
289 vfadd/vfsub/
291 template <rtx_code CODE, bool MAY_REQUIRE_FRM = false,
292 enum frm_op_type FRM_OP = NO_FRM>
293 class binop : public function_base
295 public:
296 bool has_rounding_mode_operand_p () const override
298 return FRM_OP == HAS_FRM;
301 bool may_require_frm_p () const override { return MAY_REQUIRE_FRM; }
303 rtx expand (function_expander &e) const override
305 switch (e.op_info->op)
307 case OP_TYPE_vx:
308 gcc_assert (FRM_OP == NO_FRM);
309 case OP_TYPE_vf:
310 return e.use_exact_insn (code_for_pred_scalar (CODE, e.vector_mode ()));
311 case OP_TYPE_vv:
312 return e.use_exact_insn (code_for_pred (CODE, e.vector_mode ()));
313 default:
314 gcc_unreachable ();
319 /* Implements vrsub. */
320 class vrsub : public function_base
322 public:
323 rtx expand (function_expander &e) const override
325 return e.use_exact_insn (
326 code_for_pred_sub_reverse_scalar (e.vector_mode ()));
330 /* Implements vneg/vnot. */
331 template<rtx_code CODE, enum frm_op_type FRM_OP = NO_FRM>
332 class unop : public function_base
334 public:
335 bool has_rounding_mode_operand_p () const override
337 return FRM_OP == HAS_FRM;
340 bool may_require_frm_p () const override { return true; }
342 rtx expand (function_expander &e) const override
344 return e.use_exact_insn (code_for_pred (CODE, e.vector_mode ()));
348 /* Implements vsext.vf2/vsext.vf4/vsext.vf8/vzext.vf2/vzext.vf4/vzext.vf8. */
349 template<rtx_code CODE>
350 class ext : public function_base
352 public:
353 rtx expand (function_expander &e) const override
355 switch (e.op_info->op)
357 case OP_TYPE_vf2:
358 return e.use_exact_insn (code_for_pred_vf2 (CODE, e.vector_mode ()));
359 case OP_TYPE_vf4:
360 return e.use_exact_insn (code_for_pred_vf4 (CODE, e.vector_mode ()));
361 case OP_TYPE_vf8:
362 return e.use_exact_insn (code_for_pred_vf8 (CODE, e.vector_mode ()));
363 default:
364 gcc_unreachable ();
369 /* Implements vmulh/vmulhu/vmulhsu. */
370 template<int UNSPEC>
371 class vmulh : public function_base
373 public:
374 rtx expand (function_expander &e) const override
376 switch (e.op_info->op)
378 case OP_TYPE_vx:
379 return e.use_exact_insn (
380 code_for_pred_mulh_scalar (UNSPEC, e.vector_mode ()));
381 case OP_TYPE_vv:
382 return e.use_exact_insn (
383 code_for_pred_mulh (UNSPEC, e.vector_mode ()));
384 default:
385 gcc_unreachable ();
390 /* Implements vwadd/vwsub/vwmul. */
391 template<rtx_code CODE1, rtx_code CODE2 = FLOAT_EXTEND>
392 class widen_binop : public function_base
394 public:
395 rtx expand (function_expander &e) const override
397 switch (e.op_info->op)
399 case OP_TYPE_vv:
400 return e.use_exact_insn (
401 code_for_pred_dual_widen (CODE1, CODE2, e.vector_mode ()));
402 case OP_TYPE_vx:
403 return e.use_exact_insn (
404 code_for_pred_dual_widen_scalar (CODE1, CODE2, e.vector_mode ()));
405 case OP_TYPE_wv:
406 if (CODE1 == PLUS)
407 return e.use_exact_insn (
408 code_for_pred_single_widen_add (CODE2, e.vector_mode ()));
409 else
410 return e.use_exact_insn (
411 code_for_pred_single_widen_sub (CODE2, e.vector_mode ()));
412 case OP_TYPE_wx:
413 return e.use_exact_insn (
414 code_for_pred_single_widen_scalar (CODE1, CODE2, e.vector_mode ()));
415 default:
416 gcc_unreachable ();
421 /* Implement vfwadd/vfwsub/vfwmul. */
422 template<rtx_code CODE, enum frm_op_type FRM_OP = NO_FRM>
423 class widen_binop_fp : public function_base
425 public:
426 bool has_rounding_mode_operand_p () const override
428 return FRM_OP == HAS_FRM;
431 bool may_require_frm_p () const override { return true; }
433 rtx expand (function_expander &e) const override
435 switch (e.op_info->op)
437 case OP_TYPE_vv:
438 return e.use_exact_insn (
439 code_for_pred_dual_widen (CODE, e.vector_mode ()));
440 case OP_TYPE_vf:
441 return e.use_exact_insn (
442 code_for_pred_dual_widen_scalar (CODE, e.vector_mode ()));
443 case OP_TYPE_wv:
444 if (CODE == PLUS)
445 return e.use_exact_insn (
446 code_for_pred_single_widen_add (e.vector_mode ()));
447 else
448 return e.use_exact_insn (
449 code_for_pred_single_widen_sub (e.vector_mode ()));
450 case OP_TYPE_wf:
451 return e.use_exact_insn (
452 code_for_pred_single_widen_scalar (CODE, e.vector_mode ()));
453 default:
454 gcc_unreachable ();
459 /* Implements vwmulsu. */
460 class vwmulsu : public function_base
462 public:
463 rtx expand (function_expander &e) const override
465 switch (e.op_info->op)
467 case OP_TYPE_vv:
468 return e.use_exact_insn (code_for_pred_widen_mulsu (e.vector_mode ()));
469 case OP_TYPE_vx:
470 return e.use_exact_insn (
471 code_for_pred_widen_mulsu_scalar (e.vector_mode ()));
472 default:
473 gcc_unreachable ();
478 /* Implements vwcvt. */
479 template<rtx_code CODE>
480 class vwcvt : public function_base
482 public:
483 rtx expand (function_expander &e) const override
485 return e.use_exact_insn (code_for_pred (CODE, e.vector_mode ()));
489 /* Implements vadc. */
490 class vadc : public function_base
492 public:
493 bool apply_mask_policy_p () const override { return false; }
494 bool use_mask_predication_p () const override { return false; }
496 rtx expand (function_expander &e) const override
498 switch (e.op_info->op)
500 case OP_TYPE_vvm:
501 return e.use_exact_insn (code_for_pred_adc (e.vector_mode ()));
502 case OP_TYPE_vxm:
503 return e.use_exact_insn (code_for_pred_adc_scalar (e.vector_mode ()));
504 default:
505 gcc_unreachable ();
510 /* Implements vsbc. */
511 class vsbc : public function_base
513 public:
514 bool apply_mask_policy_p () const override { return false; }
515 bool use_mask_predication_p () const override { return false; }
517 rtx expand (function_expander &e) const override
519 switch (e.op_info->op)
521 case OP_TYPE_vvm:
522 return e.use_exact_insn (code_for_pred_sbc (e.vector_mode ()));
523 case OP_TYPE_vxm:
524 return e.use_exact_insn (code_for_pred_sbc_scalar (e.vector_mode ()));
525 default:
526 gcc_unreachable ();
531 /* Implements vmadc. */
532 class vmadc : public function_base
534 public:
535 bool apply_tail_policy_p () const override { return false; }
536 bool apply_mask_policy_p () const override { return false; }
537 bool use_mask_predication_p () const override { return false; }
538 bool has_merge_operand_p () const override { return false; }
540 rtx expand (function_expander &e) const override
542 switch (e.op_info->op)
544 case OP_TYPE_vvm:
545 return e.use_exact_insn (code_for_pred_madc (e.vector_mode ()));
546 case OP_TYPE_vxm:
547 return e.use_exact_insn (code_for_pred_madc_scalar (e.vector_mode ()));
548 case OP_TYPE_vv:
549 return e.use_exact_insn (
550 code_for_pred_madc_overflow (e.vector_mode ()));
551 case OP_TYPE_vx:
552 return e.use_exact_insn (
553 code_for_pred_madc_overflow_scalar (e.vector_mode ()));
554 default:
555 gcc_unreachable ();
560 /* Implements vmsbc. */
561 class vmsbc : public function_base
563 public:
564 bool apply_tail_policy_p () const override { return false; }
565 bool apply_mask_policy_p () const override { return false; }
566 bool use_mask_predication_p () const override { return false; }
567 bool has_merge_operand_p () const override { return false; }
569 rtx expand (function_expander &e) const override
571 switch (e.op_info->op)
573 case OP_TYPE_vvm:
574 return e.use_exact_insn (code_for_pred_msbc (e.vector_mode ()));
575 case OP_TYPE_vxm:
576 return e.use_exact_insn (code_for_pred_msbc_scalar (e.vector_mode ()));
577 case OP_TYPE_vv:
578 return e.use_exact_insn (
579 code_for_pred_msbc_overflow (e.vector_mode ()));
580 case OP_TYPE_vx:
581 return e.use_exact_insn (
582 code_for_pred_msbc_overflow_scalar (e.vector_mode ()));
583 default:
584 gcc_unreachable ();
589 /* Implements vnsrl/vnsra. */
590 template<rtx_code CODE>
591 class vnshift : public function_base
593 public:
594 rtx expand (function_expander &e) const override
596 switch (e.op_info->op)
598 case OP_TYPE_wx:
599 return e.use_exact_insn (
600 code_for_pred_narrow_scalar (CODE, e.vector_mode ()));
601 case OP_TYPE_wv:
602 return e.use_exact_insn (code_for_pred_narrow (CODE, e.vector_mode ()));
603 default:
604 gcc_unreachable ();
609 /* Implements vncvt. */
610 class vncvt_x : public function_base
612 public:
613 rtx expand (function_expander &e) const override
615 return e.use_exact_insn (code_for_pred_trunc (e.vector_mode ()));
619 /* Implements vmerge/vfmerge. */
620 class vmerge : public function_base
622 public:
623 bool apply_mask_policy_p () const override { return false; }
624 bool use_mask_predication_p () const override { return false; }
625 rtx expand (function_expander &e) const override
627 switch (e.op_info->op)
629 case OP_TYPE_vvm:
630 return e.use_exact_insn (code_for_pred_merge (e.vector_mode ()));
631 case OP_TYPE_vxm:
632 case OP_TYPE_vfm:
633 return e.use_exact_insn (code_for_pred_merge_scalar (e.vector_mode ()));
634 default:
635 gcc_unreachable ();
640 /* Implements vmv.v.x/vmv.v.v/vfmv.v.f. */
641 class vmv_v : public function_base
643 public:
644 rtx expand (function_expander &e) const override
646 switch (e.op_info->op)
648 case OP_TYPE_v:
649 return e.use_exact_insn (code_for_pred_mov (e.vector_mode ()));
650 case OP_TYPE_x:
651 case OP_TYPE_f:
652 return e.use_exact_insn (code_for_pred_broadcast (e.vector_mode ()));
653 default:
654 gcc_unreachable ();
659 /* Implements vaadd/vasub/vsmul/vssra/vssrl. */
660 template<int UNSPEC>
661 class sat_op : public function_base
663 public:
664 bool has_rounding_mode_operand_p () const override { return true; }
666 bool may_require_vxrm_p () const override { return true; }
668 rtx expand (function_expander &e) const override
670 switch (e.op_info->op)
672 case OP_TYPE_vx:
673 return e.use_exact_insn (
674 code_for_pred_scalar (UNSPEC, e.vector_mode ()));
675 case OP_TYPE_vv:
676 return e.use_exact_insn (code_for_pred (UNSPEC, e.vector_mode ()));
677 default:
678 gcc_unreachable ();
683 /* Implements vnclip/vnclipu. */
684 template<int UNSPEC>
685 class vnclip : public function_base
687 public:
688 bool has_rounding_mode_operand_p () const override { return true; }
690 bool may_require_vxrm_p () const override { return true; }
692 rtx expand (function_expander &e) const override
694 switch (e.op_info->op)
696 case OP_TYPE_wx:
697 return e.use_exact_insn (
698 code_for_pred_narrow_clip_scalar (UNSPEC, e.vector_mode ()));
699 case OP_TYPE_wv:
700 return e.use_exact_insn (
701 code_for_pred_narrow_clip (UNSPEC, e.vector_mode ()));
702 default:
703 gcc_unreachable ();
708 /* Implements vmseq/vmsne/vmslt/vmsgt/vmsle/vmsge. */
709 template<rtx_code CODE>
710 class icmp : public function_base
712 public:
713 rtx expand (function_expander &e) const override
715 switch (e.op_info->op)
717 case OP_TYPE_vx: {
718 if (CODE == GE || CODE == GEU)
719 return e.use_compare_insn (CODE, code_for_pred_ge_scalar (
720 e.vector_mode ()));
721 else
722 return e.use_compare_insn (CODE, code_for_pred_cmp_scalar (
723 e.vector_mode ()));
725 case OP_TYPE_vv: {
726 if (CODE == LT || CODE == LTU || CODE == GE || CODE == GEU)
727 return e.use_compare_insn (CODE,
728 code_for_pred_ltge (e.vector_mode ()));
729 else
730 return e.use_compare_insn (CODE,
731 code_for_pred_cmp (e.vector_mode ()));
733 default:
734 gcc_unreachable ();
739 /* Implements vmacc/vnmsac/vmadd/vnmsub. */
740 class vmacc : public function_base
742 public:
743 bool has_merge_operand_p () const override { return false; }
745 rtx expand (function_expander &e) const override
747 if (e.op_info->op == OP_TYPE_vx)
748 return e.use_ternop_insn (true, code_for_pred_mul_plus_scalar (
749 e.vector_mode ()));
750 if (e.op_info->op == OP_TYPE_vv)
751 return e.use_ternop_insn (true,
752 code_for_pred_mul_plus (e.vector_mode ()));
753 gcc_unreachable ();
757 class vnmsac : public function_base
759 public:
760 bool has_merge_operand_p () const override { return false; }
762 rtx expand (function_expander &e) const override
764 if (e.op_info->op == OP_TYPE_vx)
765 return e.use_ternop_insn (true, code_for_pred_minus_mul_scalar (
766 e.vector_mode ()));
767 if (e.op_info->op == OP_TYPE_vv)
768 return e.use_ternop_insn (true,
769 code_for_pred_minus_mul (e.vector_mode ()));
770 gcc_unreachable ();
774 class vmadd : public function_base
776 public:
777 bool has_merge_operand_p () const override { return false; }
779 rtx expand (function_expander &e) const override
781 if (e.op_info->op == OP_TYPE_vx)
782 return e.use_ternop_insn (false, code_for_pred_mul_plus_scalar (
783 e.vector_mode ()));
784 if (e.op_info->op == OP_TYPE_vv)
785 return e.use_ternop_insn (false,
786 code_for_pred_mul_plus (e.vector_mode ()));
787 gcc_unreachable ();
791 class vnmsub : public function_base
793 public:
794 bool has_merge_operand_p () const override { return false; }
796 rtx expand (function_expander &e) const override
798 if (e.op_info->op == OP_TYPE_vx)
799 return e.use_ternop_insn (false, code_for_pred_minus_mul_scalar (
800 e.vector_mode ()));
801 if (e.op_info->op == OP_TYPE_vv)
802 return e.use_ternop_insn (false,
803 code_for_pred_minus_mul (e.vector_mode ()));
804 gcc_unreachable ();
808 /* Implements vwmacc<su><su>. */
809 class vwmacc : public function_base
811 public:
812 bool has_merge_operand_p () const override { return false; }
814 rtx expand (function_expander &e) const override
816 if (e.op_info->op == OP_TYPE_vx)
817 return e.use_widen_ternop_insn (
818 code_for_pred_widen_mul_plus_scalar (SIGN_EXTEND, e.vector_mode ()));
819 if (e.op_info->op == OP_TYPE_vv)
820 return e.use_widen_ternop_insn (
821 code_for_pred_widen_mul_plus (SIGN_EXTEND, e.vector_mode ()));
822 gcc_unreachable ();
826 class vwmaccu : public function_base
828 public:
829 bool has_merge_operand_p () const override { return false; }
831 rtx expand (function_expander &e) const override
833 if (e.op_info->op == OP_TYPE_vx)
834 return e.use_widen_ternop_insn (
835 code_for_pred_widen_mul_plus_scalar (ZERO_EXTEND, e.vector_mode ()));
836 if (e.op_info->op == OP_TYPE_vv)
837 return e.use_widen_ternop_insn (
838 code_for_pred_widen_mul_plus (ZERO_EXTEND, e.vector_mode ()));
839 gcc_unreachable ();
843 class vwmaccsu : public function_base
845 public:
846 bool has_merge_operand_p () const override { return false; }
848 rtx expand (function_expander &e) const override
850 if (e.op_info->op == OP_TYPE_vx)
851 return e.use_widen_ternop_insn (
852 code_for_pred_widen_mul_plussu_scalar (e.vector_mode ()));
853 if (e.op_info->op == OP_TYPE_vv)
854 return e.use_widen_ternop_insn (
855 code_for_pred_widen_mul_plussu (e.vector_mode ()));
856 gcc_unreachable ();
860 class vwmaccus : public function_base
862 public:
863 bool has_merge_operand_p () const override { return false; }
865 rtx expand (function_expander &e) const override
867 return e.use_widen_ternop_insn (
868 code_for_pred_widen_mul_plusus_scalar (e.vector_mode ()));
872 /* Implements vmand/vmnand/vmandn/vmxor/vmor/vmnor/vmorn/vmxnor */
873 template<rtx_code CODE>
874 class mask_logic : public function_base
876 public:
877 bool apply_tail_policy_p () const override { return false; }
878 bool apply_mask_policy_p () const override { return false; }
880 rtx expand (function_expander &e) const override
882 return e.use_exact_insn (code_for_pred (CODE, e.vector_mode ()));
885 template<rtx_code CODE>
886 class mask_nlogic : public function_base
888 public:
889 bool apply_tail_policy_p () const override { return false; }
890 bool apply_mask_policy_p () const override { return false; }
892 rtx expand (function_expander &e) const override
894 return e.use_exact_insn (code_for_pred_n (CODE, e.vector_mode ()));
897 template<rtx_code CODE>
898 class mask_notlogic : public function_base
900 public:
901 bool apply_tail_policy_p () const override { return false; }
902 bool apply_mask_policy_p () const override { return false; }
904 rtx expand (function_expander &e) const override
906 return e.use_exact_insn (code_for_pred_not (CODE, e.vector_mode ()));
910 /* Implements vmmv. */
911 class vmmv : public function_base
913 public:
914 bool apply_tail_policy_p () const override { return false; }
915 bool apply_mask_policy_p () const override { return false; }
917 rtx expand (function_expander &e) const override
919 return e.use_exact_insn (code_for_pred_mov (e.vector_mode ()));
923 /* Implements vmclr. */
924 class vmclr : public function_base
926 public:
927 bool can_be_overloaded_p (enum predication_type_index) const override
929 return false;
932 rtx expand (function_expander &e) const override
934 machine_mode mode = TYPE_MODE (TREE_TYPE (e.exp));
935 e.add_all_one_mask_operand (mode);
936 e.add_vundef_operand (mode);
937 e.add_input_operand (mode, CONST0_RTX (mode));
938 e.add_input_operand (call_expr_nargs (e.exp) - 1);
939 e.add_input_operand (Pmode, get_avl_type_rtx (avl_type::NONVLMAX));
940 return e.generate_insn (code_for_pred_mov (e.vector_mode ()));
944 /* Implements vmset. */
945 class vmset : public function_base
947 public:
948 bool can_be_overloaded_p (enum predication_type_index) const override
950 return false;
953 rtx expand (function_expander &e) const override
955 machine_mode mode = TYPE_MODE (TREE_TYPE (e.exp));
956 e.add_all_one_mask_operand (mode);
957 e.add_vundef_operand (mode);
958 e.add_input_operand (mode, CONSTM1_RTX (mode));
959 e.add_input_operand (call_expr_nargs (e.exp) - 1);
960 e.add_input_operand (Pmode, get_avl_type_rtx (avl_type::NONVLMAX));
961 return e.generate_insn (code_for_pred_mov (e.vector_mode ()));
965 /* Implements vmnot. */
966 class vmnot : public function_base
968 public:
969 bool apply_tail_policy_p () const override { return false; }
970 bool apply_mask_policy_p () const override { return false; }
972 rtx expand (function_expander &e) const override
974 return e.use_exact_insn (code_for_pred_not (e.vector_mode ()));
978 /* Implements vcpop. */
979 class vcpop : public function_base
981 public:
982 bool apply_tail_policy_p () const override { return false; }
983 bool apply_mask_policy_p () const override { return false; }
984 bool has_merge_operand_p () const override { return false; }
986 rtx expand (function_expander &e) const override
988 return e.use_exact_insn (code_for_pred_popcount (e.vector_mode (), Pmode));
992 /* Implements vfirst. */
993 class vfirst : public function_base
995 public:
996 bool apply_tail_policy_p () const override { return false; }
997 bool apply_mask_policy_p () const override { return false; }
998 bool has_merge_operand_p () const override { return false; }
1000 rtx expand (function_expander &e) const override
1002 return e.use_exact_insn (code_for_pred_ffs (e.vector_mode (), Pmode));
1006 /* Implements vmsbf/vmsif/vmsof. */
1007 template<int UNSPEC>
1008 class mask_misc : public function_base
1010 public:
1011 bool apply_tail_policy_p () const override { return false; }
1013 rtx expand (function_expander &e) const override
1015 return e.use_exact_insn (code_for_pred (UNSPEC, e.vector_mode ()));
1019 /* Implements viota. */
1020 class viota : public function_base
1022 public:
1023 bool can_be_overloaded_p (enum predication_type_index pred) const override
1025 return pred == PRED_TYPE_tu || pred == PRED_TYPE_tum
1026 || pred == PRED_TYPE_tumu || pred == PRED_TYPE_mu;
1029 rtx expand (function_expander &e) const override
1031 return e.use_exact_insn (code_for_pred_iota (e.vector_mode ()));
1035 /* Implements vid. */
1036 class vid : public function_base
1038 public:
1039 bool can_be_overloaded_p (enum predication_type_index pred) const override
1041 return pred == PRED_TYPE_tu || pred == PRED_TYPE_tum
1042 || pred == PRED_TYPE_tumu || pred == PRED_TYPE_mu;
1045 rtx expand (function_expander &e) const override
1047 return e.use_exact_insn (code_for_pred_series (e.vector_mode ()));
1051 /* Implements vfrsub/vfrdiv. */
1052 template<rtx_code CODE, enum frm_op_type FRM_OP = NO_FRM>
1053 class reverse_binop : public function_base
1055 public:
1056 bool has_rounding_mode_operand_p () const override
1058 return FRM_OP == HAS_FRM;
1061 bool may_require_frm_p () const override { return true; }
1063 rtx expand (function_expander &e) const override
1065 return e.use_exact_insn (
1066 code_for_pred_reverse_scalar (CODE, e.vector_mode ()));
1070 template<enum frm_op_type FRM_OP = NO_FRM>
1071 class vfmacc : public function_base
1073 public:
1074 bool has_rounding_mode_operand_p () const override
1076 return FRM_OP == HAS_FRM;
1079 bool may_require_frm_p () const override { return true; }
1081 bool has_merge_operand_p () const override { return false; }
1083 rtx expand (function_expander &e) const override
1085 if (e.op_info->op == OP_TYPE_vf)
1086 return e.use_ternop_insn (true,
1087 code_for_pred_mul_scalar (PLUS,
1088 e.vector_mode ()));
1089 if (e.op_info->op == OP_TYPE_vv)
1090 return e.use_ternop_insn (true,
1091 code_for_pred_mul (PLUS, e.vector_mode ()));
1092 gcc_unreachable ();
1096 template<enum frm_op_type FRM_OP = NO_FRM>
1097 class vfnmsac : public function_base
1099 public:
1100 bool has_rounding_mode_operand_p () const override
1102 return FRM_OP == HAS_FRM;
1105 bool may_require_frm_p () const override { return true; }
1107 bool has_merge_operand_p () const override { return false; }
1109 rtx expand (function_expander &e) const override
1111 if (e.op_info->op == OP_TYPE_vf)
1112 return e.use_ternop_insn (
1113 true, code_for_pred_mul_neg_scalar (PLUS, e.vector_mode ()));
1114 if (e.op_info->op == OP_TYPE_vv)
1115 return e.use_ternop_insn (true,
1116 code_for_pred_mul_neg (PLUS, e.vector_mode ()));
1117 gcc_unreachable ();
1121 template<enum frm_op_type FRM_OP = NO_FRM>
1122 class vfmadd : public function_base
1124 public:
1125 bool has_rounding_mode_operand_p () const override
1127 return FRM_OP == HAS_FRM;
1130 bool may_require_frm_p () const override { return true; }
1132 bool has_merge_operand_p () const override { return false; }
1134 rtx expand (function_expander &e) const override
1136 if (e.op_info->op == OP_TYPE_vf)
1137 return e.use_ternop_insn (false,
1138 code_for_pred_mul_scalar (PLUS,
1139 e.vector_mode ()));
1140 if (e.op_info->op == OP_TYPE_vv)
1141 return e.use_ternop_insn (false,
1142 code_for_pred_mul (PLUS, e.vector_mode ()));
1143 gcc_unreachable ();
1147 template<enum frm_op_type FRM_OP = NO_FRM>
1148 class vfnmsub : public function_base
1150 public:
1151 bool has_rounding_mode_operand_p () const override
1153 return FRM_OP == HAS_FRM;
1156 bool may_require_frm_p () const override { return true; }
1158 bool has_merge_operand_p () const override { return false; }
1160 rtx expand (function_expander &e) const override
1162 if (e.op_info->op == OP_TYPE_vf)
1163 return e.use_ternop_insn (
1164 false, code_for_pred_mul_neg_scalar (PLUS, e.vector_mode ()));
1165 if (e.op_info->op == OP_TYPE_vv)
1166 return e.use_ternop_insn (false,
1167 code_for_pred_mul_neg (PLUS, e.vector_mode ()));
1168 gcc_unreachable ();
1172 template<enum frm_op_type FRM_OP = NO_FRM>
1173 class vfnmacc : public function_base
1175 public:
1176 bool has_rounding_mode_operand_p () const override
1178 return FRM_OP == HAS_FRM;
1181 bool may_require_frm_p () const override { return true; }
1183 bool has_merge_operand_p () const override { return false; }
1185 rtx expand (function_expander &e) const override
1187 if (e.op_info->op == OP_TYPE_vf)
1188 return e.use_ternop_insn (
1189 true, code_for_pred_mul_neg_scalar (MINUS, e.vector_mode ()));
1190 if (e.op_info->op == OP_TYPE_vv)
1191 return e.use_ternop_insn (true,
1192 code_for_pred_mul_neg (MINUS, e.vector_mode ()));
1193 gcc_unreachable ();
1197 template<enum frm_op_type FRM_OP = NO_FRM>
1198 class vfmsac : public function_base
1200 public:
1201 bool has_rounding_mode_operand_p () const override
1203 return FRM_OP == HAS_FRM;
1206 bool may_require_frm_p () const override { return true; }
1208 bool has_merge_operand_p () const override { return false; }
1210 rtx expand (function_expander &e) const override
1212 if (e.op_info->op == OP_TYPE_vf)
1213 return e.use_ternop_insn (true,
1214 code_for_pred_mul_scalar (MINUS,
1215 e.vector_mode ()));
1216 if (e.op_info->op == OP_TYPE_vv)
1217 return e.use_ternop_insn (true,
1218 code_for_pred_mul (MINUS, e.vector_mode ()));
1219 gcc_unreachable ();
1223 template<enum frm_op_type FRM_OP = NO_FRM>
1224 class vfnmadd : public function_base
1226 public:
1227 bool has_rounding_mode_operand_p () const override
1229 return FRM_OP == HAS_FRM;
1232 bool may_require_frm_p () const override { return true; }
1234 bool has_merge_operand_p () const override { return false; }
1236 rtx expand (function_expander &e) const override
1238 if (e.op_info->op == OP_TYPE_vf)
1239 return e.use_ternop_insn (
1240 false, code_for_pred_mul_neg_scalar (MINUS, e.vector_mode ()));
1241 if (e.op_info->op == OP_TYPE_vv)
1242 return e.use_ternop_insn (false,
1243 code_for_pred_mul_neg (MINUS, e.vector_mode ()));
1244 gcc_unreachable ();
1248 template<enum frm_op_type FRM_OP = NO_FRM>
1249 class vfmsub : public function_base
1251 public:
1252 bool has_rounding_mode_operand_p () const override
1254 return FRM_OP == HAS_FRM;
1257 bool may_require_frm_p () const override { return true; }
1259 bool has_merge_operand_p () const override { return false; }
1261 rtx expand (function_expander &e) const override
1263 if (e.op_info->op == OP_TYPE_vf)
1264 return e.use_ternop_insn (false,
1265 code_for_pred_mul_scalar (MINUS,
1266 e.vector_mode ()));
1267 if (e.op_info->op == OP_TYPE_vv)
1268 return e.use_ternop_insn (false,
1269 code_for_pred_mul (MINUS, e.vector_mode ()));
1270 gcc_unreachable ();
1274 template<enum frm_op_type FRM_OP = NO_FRM>
1275 class vfwmacc : public function_base
1277 public:
1278 bool has_rounding_mode_operand_p () const override
1280 return FRM_OP == HAS_FRM;
1283 bool may_require_frm_p () const override { return true; }
1285 bool has_merge_operand_p () const override { return false; }
1287 rtx expand (function_expander &e) const override
1289 if (e.op_info->op == OP_TYPE_vf)
1290 return e.use_widen_ternop_insn (
1291 code_for_pred_widen_mul_scalar (PLUS, e.vector_mode ()));
1292 if (e.op_info->op == OP_TYPE_vv)
1293 return e.use_widen_ternop_insn (
1294 code_for_pred_widen_mul (PLUS, e.vector_mode ()));
1295 gcc_unreachable ();
1299 template<enum frm_op_type FRM_OP = NO_FRM>
1300 class vfwnmacc : public function_base
1302 public:
1303 bool has_rounding_mode_operand_p () const override
1305 return FRM_OP == HAS_FRM;
1308 bool may_require_frm_p () const override { return true; }
1310 bool has_merge_operand_p () const override { return false; }
1312 rtx expand (function_expander &e) const override
1314 if (e.op_info->op == OP_TYPE_vf)
1315 return e.use_widen_ternop_insn (
1316 code_for_pred_widen_mul_neg_scalar (MINUS, e.vector_mode ()));
1317 if (e.op_info->op == OP_TYPE_vv)
1318 return e.use_widen_ternop_insn (
1319 code_for_pred_widen_mul_neg (MINUS, e.vector_mode ()));
1320 gcc_unreachable ();
1324 template<enum frm_op_type FRM_OP = NO_FRM>
1325 class vfwmsac : public function_base
1327 public:
1328 bool has_rounding_mode_operand_p () const override
1330 return FRM_OP == HAS_FRM;
1333 bool may_require_frm_p () const override { return true; }
1335 bool has_merge_operand_p () const override { return false; }
1337 rtx expand (function_expander &e) const override
1339 if (e.op_info->op == OP_TYPE_vf)
1340 return e.use_widen_ternop_insn (
1341 code_for_pred_widen_mul_scalar (MINUS, e.vector_mode ()));
1342 if (e.op_info->op == OP_TYPE_vv)
1343 return e.use_widen_ternop_insn (
1344 code_for_pred_widen_mul (MINUS, e.vector_mode ()));
1345 gcc_unreachable ();
1349 template<enum frm_op_type FRM_OP = NO_FRM>
1350 class vfwnmsac : public function_base
1352 public:
1353 bool has_rounding_mode_operand_p () const override
1355 return FRM_OP == HAS_FRM;
1358 bool may_require_frm_p () const override { return true; }
1360 bool has_merge_operand_p () const override { return false; }
1362 rtx expand (function_expander &e) const override
1364 if (e.op_info->op == OP_TYPE_vf)
1365 return e.use_widen_ternop_insn (
1366 code_for_pred_widen_mul_neg_scalar (PLUS, e.vector_mode ()));
1367 if (e.op_info->op == OP_TYPE_vv)
1368 return e.use_widen_ternop_insn (
1369 code_for_pred_widen_mul_neg (PLUS, e.vector_mode ()));
1370 gcc_unreachable ();
1374 /* Implements vfsqrt7/vfrec7/vfclass/vfsgnj/vfsgnjx. */
1375 template<int UNSPEC, enum frm_op_type FRM_OP = NO_FRM>
1376 class float_misc : public function_base
1378 public:
1379 bool has_rounding_mode_operand_p () const override
1381 return FRM_OP == HAS_FRM;
1384 bool may_require_frm_p () const override { return true; }
1386 rtx expand (function_expander &e) const override
1388 if (e.op_info->op == OP_TYPE_vf)
1389 return e.use_exact_insn (code_for_pred_scalar (UNSPEC, e.vector_mode ()));
1390 if (e.op_info->op == OP_TYPE_vv || e.op_info->op == OP_TYPE_v)
1391 return e.use_exact_insn (code_for_pred (UNSPEC, e.vector_mode ()));
1392 gcc_unreachable ();
1396 /* Implements vfsgnjn. */
1397 class vfsgnjn : public function_base
1399 public:
1400 rtx expand (function_expander &e) const override
1402 if (e.op_info->op == OP_TYPE_vf)
1403 return e.use_exact_insn (code_for_pred_ncopysign_scalar (e.vector_mode ()));
1404 if (e.op_info->op == OP_TYPE_vv)
1405 return e.use_exact_insn (code_for_pred_ncopysign (e.vector_mode ()));
1406 gcc_unreachable ();
1410 /* Implements vmfeq/vmfne/vmflt/vmfgt/vmfle/vmfge. */
1411 template<rtx_code CODE>
1412 class fcmp : public function_base
1414 public:
1415 rtx expand (function_expander &e) const override
1417 switch (e.op_info->op)
1419 case OP_TYPE_vf: {
1420 return e.use_compare_insn (CODE, code_for_pred_cmp_scalar (
1421 e.vector_mode ()));
1423 case OP_TYPE_vv: {
1424 return e.use_compare_insn (CODE,
1425 code_for_pred_cmp (e.vector_mode ()));
1427 default:
1428 gcc_unreachable ();
1433 /* Implements vfclass. */
1434 class vfclass : public function_base
1436 public:
1437 rtx expand (function_expander &e) const override
1439 return e.use_exact_insn (code_for_pred_class (e.arg_mode (0)));
1443 /* Implements vfcvt.x. */
1444 template<int UNSPEC, enum frm_op_type FRM_OP = NO_FRM>
1445 class vfcvt_x : public function_base
1447 public:
1448 bool has_rounding_mode_operand_p () const override
1450 return FRM_OP == HAS_FRM;
1453 bool may_require_frm_p () const override { return true; }
1455 rtx expand (function_expander &e) const override
1457 return e.use_exact_insn (code_for_pred_fcvt_x_f (UNSPEC, e.arg_mode (0)));
1461 /* Implements vfcvt.rtz.x. */
1462 template<rtx_code CODE>
1463 class vfcvt_rtz_x : public function_base
1465 public:
1466 rtx expand (function_expander &e) const override
1468 return e.use_exact_insn (code_for_pred (CODE, e.arg_mode (0)));
1472 template<enum frm_op_type FRM_OP = NO_FRM>
1473 class vfcvt_f : public function_base
1475 public:
1476 bool has_rounding_mode_operand_p () const override
1478 return FRM_OP == HAS_FRM;
1481 bool may_require_frm_p () const override { return true; }
1483 rtx expand (function_expander &e) const override
1485 if (e.op_info->op == OP_TYPE_x_v)
1486 return e.use_exact_insn (code_for_pred (FLOAT, e.vector_mode ()));
1487 if (e.op_info->op == OP_TYPE_xu_v)
1488 return e.use_exact_insn (
1489 code_for_pred (UNSIGNED_FLOAT, e.vector_mode ()));
1490 gcc_unreachable ();
1494 /* Implements vfwcvt.x. */
1495 template<int UNSPEC, enum frm_op_type FRM_OP = NO_FRM>
1496 class vfwcvt_x : public function_base
1498 public:
1499 bool has_rounding_mode_operand_p () const override
1501 return FRM_OP == HAS_FRM;
1504 bool may_require_frm_p () const override { return true; }
1506 rtx expand (function_expander &e) const override
1508 return e.use_exact_insn (
1509 code_for_pred_widen_fcvt_x_f (UNSPEC, e.vector_mode ()));
1513 /* Implements vfwcvt.rtz.x. */
1514 template<rtx_code CODE>
1515 class vfwcvt_rtz_x : public function_base
1517 public:
1518 rtx expand (function_expander &e) const override
1520 return e.use_exact_insn (code_for_pred_widen (CODE, e.vector_mode ()));
1524 class vfwcvt_f : public function_base
1526 public:
1527 rtx expand (function_expander &e) const override
1529 if (e.op_info->op == OP_TYPE_f_v)
1530 return e.use_exact_insn (code_for_pred_extend (e.vector_mode ()));
1531 if (e.op_info->op == OP_TYPE_x_v)
1532 return e.use_exact_insn (code_for_pred_widen (FLOAT, e.vector_mode ()));
1533 if (e.op_info->op == OP_TYPE_xu_v)
1534 return e.use_exact_insn (
1535 code_for_pred_widen (UNSIGNED_FLOAT, e.vector_mode ()));
1536 gcc_unreachable ();
1540 /* Implements vfncvt.x. */
1541 template<int UNSPEC, enum frm_op_type FRM_OP = NO_FRM>
1542 class vfncvt_x : public function_base
1544 public:
1545 bool has_rounding_mode_operand_p () const override
1547 return FRM_OP == HAS_FRM;
1550 bool may_require_frm_p () const override { return true; }
1552 rtx expand (function_expander &e) const override
1554 return e.use_exact_insn (
1555 code_for_pred_narrow_fcvt_x_f (UNSPEC, e.arg_mode (0)));
1559 /* Implements vfncvt.rtz.x. */
1560 template<rtx_code CODE>
1561 class vfncvt_rtz_x : public function_base
1563 public:
1564 rtx expand (function_expander &e) const override
1566 return e.use_exact_insn (code_for_pred_narrow (CODE, e.vector_mode ()));
1570 template<enum frm_op_type FRM_OP = NO_FRM>
1571 class vfncvt_f : public function_base
1573 public:
1574 bool has_rounding_mode_operand_p () const override
1576 return FRM_OP == HAS_FRM;
1579 bool may_require_frm_p () const override { return true; }
1581 rtx expand (function_expander &e) const override
1583 if (e.op_info->op == OP_TYPE_f_w)
1584 return e.use_exact_insn (code_for_pred_trunc (e.vector_mode ()));
1585 if (e.op_info->op == OP_TYPE_x_w)
1586 return e.use_exact_insn (code_for_pred_narrow (FLOAT, e.arg_mode (0)));
1587 if (e.op_info->op == OP_TYPE_xu_w)
1588 return e.use_exact_insn (
1589 code_for_pred_narrow (UNSIGNED_FLOAT, e.arg_mode (0)));
1590 gcc_unreachable ();
1594 class vfncvt_rod_f : public function_base
1596 public:
1597 rtx expand (function_expander &e) const override
1599 return e.use_exact_insn (code_for_pred_rod_trunc (e.vector_mode ()));
1603 /* Implements reduction instructions. */
1604 template<unsigned UNSPEC>
1605 class reducop : public function_base
1607 public:
1608 bool apply_mask_policy_p () const override { return false; }
1610 rtx expand (function_expander &e) const override
1612 return e.use_exact_insn (code_for_pred (UNSPEC, e.vector_mode ()));
1616 /* Implements floating-point reduction instructions. */
1617 template<unsigned UNSPEC, enum frm_op_type FRM_OP = NO_FRM>
1618 class freducop : public function_base
1620 public:
1621 bool has_rounding_mode_operand_p () const override
1623 return FRM_OP == HAS_FRM;
1626 bool may_require_frm_p () const override { return true; }
1628 bool apply_mask_policy_p () const override { return false; }
1630 rtx expand (function_expander &e) const override
1632 return e.use_exact_insn (code_for_pred (UNSPEC, e.vector_mode ()));
1636 /* Implements vmv/vfmv instructions. */
1637 class vmv : public function_base
1639 public:
1640 bool apply_vl_p () const override { return false; }
1641 bool apply_tail_policy_p () const override { return false; }
1642 bool apply_mask_policy_p () const override { return false; }
1643 bool use_mask_predication_p () const override { return false; }
1644 bool has_merge_operand_p () const override { return false; }
1646 rtx expand (function_expander &e) const override
1648 return e.use_exact_insn (code_for_pred_extract_first (e.vector_mode ()));
1652 /* Implements vmv.s.x/vfmv.s.f. */
1653 class vmv_s : public function_base
1655 public:
1656 rtx expand (function_expander &e) const override
1658 return e.use_scalar_move_insn (code_for_pred_broadcast (e.vector_mode ()));
1662 template<int UNSPEC>
1663 class slideop : public function_base
1665 public:
1666 bool has_merge_operand_p () const override
1668 if (UNSPEC == UNSPEC_VSLIDEUP)
1669 return false;
1670 return true;
1673 rtx expand (function_expander &e) const override
1675 return e.use_exact_insn (code_for_pred_slide (UNSPEC, e.vector_mode ()));
1679 class vrgather : public function_base
1681 public:
1682 rtx expand (function_expander &e) const override
1684 switch (e.op_info->op)
1686 case OP_TYPE_vx:
1687 return e.use_exact_insn (
1688 code_for_pred_gather_scalar (e.vector_mode ()));
1689 case OP_TYPE_vv:
1690 return e.use_exact_insn (code_for_pred_gather (e.vector_mode ()));
1691 default:
1692 gcc_unreachable ();
1697 class vrgatherei16 : public function_base
1699 public:
1700 rtx expand (function_expander &e) const override
1702 return e.use_exact_insn (code_for_pred_gatherei16 (e.vector_mode ()));
1706 class vcompress : public function_base
1708 public:
1709 bool apply_mask_policy_p () const override { return false; }
1710 bool use_mask_predication_p () const override { return false; }
1711 rtx expand (function_expander &e) const override
1713 return e.use_exact_insn (code_for_pred_compress (e.vector_mode ()));
1717 class vundefined : public function_base
1719 public:
1720 bool apply_vl_p () const override
1722 return false;
1725 rtx expand (function_expander &e) const override
1727 return e.generate_insn (code_for_vundefined (e.vector_mode ()));
1731 class vreinterpret : public function_base
1733 public:
1734 bool apply_vl_p () const override
1736 return false;
1739 rtx expand (function_expander &e) const override
1741 e.add_input_operand (0);
1742 return e.generate_insn (code_for_vreinterpret (e.ret_mode ()));
1746 class vlmul_ext : public function_base
1748 public:
1749 bool apply_vl_p () const override
1751 return false;
1754 rtx expand (function_expander &e) const override
1756 tree arg = CALL_EXPR_ARG (e.exp, 0);
1757 rtx src = expand_normal (arg);
1758 emit_move_insn (gen_lowpart (e.vector_mode (), e.target), src);
1759 return e.target;
1763 class vlmul_trunc : public function_base
1765 public:
1766 bool apply_vl_p () const override { return false; }
1768 rtx expand (function_expander &e) const override
1770 rtx src = expand_normal (CALL_EXPR_ARG (e.exp, 0));
1771 emit_move_insn (e.target, gen_lowpart (GET_MODE (e.target), src));
1772 return e.target;
1776 class vset : public function_base
1778 public:
1779 bool apply_vl_p () const override { return false; }
1781 gimple *fold (gimple_folder &f) const override
1783 tree rhs_tuple = gimple_call_arg (f.call, 0);
1784 /* LMUL > 1 non-tuple vector types are not structure,
1785 we can't use __val[index] to set the subpart. */
1786 if (!riscv_v_ext_tuple_mode_p (TYPE_MODE (TREE_TYPE (rhs_tuple))))
1787 return NULL;
1788 tree index = gimple_call_arg (f.call, 1);
1789 tree rhs_vector = gimple_call_arg (f.call, 2);
1791 /* Replace the call with two statements: a copy of the full tuple
1792 to the call result, followed by an update of the individual vector.
1794 The fold routines expect the replacement statement to have the
1795 same lhs as the original call, so return the copy statement
1796 rather than the field update. */
1797 gassign *copy = gimple_build_assign (unshare_expr (f.lhs), rhs_tuple);
1799 /* Get a reference to the individual vector. */
1800 tree field = tuple_type_field (TREE_TYPE (f.lhs));
1801 tree lhs_array
1802 = build3 (COMPONENT_REF, TREE_TYPE (field), f.lhs, field, NULL_TREE);
1803 tree lhs_vector = build4 (ARRAY_REF, TREE_TYPE (rhs_vector), lhs_array,
1804 index, NULL_TREE, NULL_TREE);
1805 gassign *update = gimple_build_assign (lhs_vector, rhs_vector);
1806 gsi_insert_after (f.gsi, update, GSI_SAME_STMT);
1808 return copy;
1811 rtx expand (function_expander &e) const override
1813 if (!e.target)
1814 return NULL_RTX;
1815 rtx dest = expand_normal (CALL_EXPR_ARG (e.exp, 0));
1816 gcc_assert (riscv_v_ext_vector_mode_p (GET_MODE (dest)));
1817 rtx index = expand_normal (CALL_EXPR_ARG (e.exp, 1));
1818 rtx src = expand_normal (CALL_EXPR_ARG (e.exp, 2));
1819 poly_int64 offset = INTVAL (index) * GET_MODE_SIZE (GET_MODE (src));
1820 emit_move_insn (e.target, dest);
1821 rtx subreg = simplify_gen_subreg (GET_MODE (src), e.target,
1822 GET_MODE (e.target), offset);
1823 emit_move_insn (subreg, src);
1824 return e.target;
1828 class vget : public function_base
1830 public:
1831 bool apply_vl_p () const override { return false; }
1833 gimple *fold (gimple_folder &f) const override
1835 /* Fold into a normal gimple component access. */
1836 tree rhs_tuple = gimple_call_arg (f.call, 0);
1837 /* LMUL > 1 non-tuple vector types are not structure,
1838 we can't use __val[index] to get the subpart. */
1839 if (!riscv_v_ext_tuple_mode_p (TYPE_MODE (TREE_TYPE (rhs_tuple))))
1840 return NULL;
1841 tree index = gimple_call_arg (f.call, 1);
1842 tree field = tuple_type_field (TREE_TYPE (rhs_tuple));
1843 tree rhs_array
1844 = build3 (COMPONENT_REF, TREE_TYPE (field), rhs_tuple, field, NULL_TREE);
1845 tree rhs_vector = build4 (ARRAY_REF, TREE_TYPE (f.lhs), rhs_array, index,
1846 NULL_TREE, NULL_TREE);
1847 return gimple_build_assign (f.lhs, rhs_vector);
1850 rtx expand (function_expander &e) const override
1852 if (!e.target)
1853 return NULL_RTX;
1854 rtx src = expand_normal (CALL_EXPR_ARG (e.exp, 0));
1855 gcc_assert (riscv_v_ext_vector_mode_p (GET_MODE (src)));
1856 rtx index = expand_normal (CALL_EXPR_ARG (e.exp, 1));
1857 poly_int64 offset = INTVAL (index) * GET_MODE_SIZE (GET_MODE (e.target));
1858 rtx subreg
1859 = simplify_gen_subreg (GET_MODE (e.target), src, GET_MODE (src), offset);
1860 return subreg;
1864 class vcreate : public function_base
1866 public:
1867 gimple *fold (gimple_folder &f) const override
1869 unsigned int nargs = gimple_call_num_args (f.call);
1870 tree lhs_type = TREE_TYPE (f.lhs);
1871 /* LMUL > 1 non-tuple vector types are not structure,
1872 we can't use __val[index] to set the subpart. */
1873 if (!riscv_v_ext_tuple_mode_p (TYPE_MODE (lhs_type)))
1874 return NULL;
1876 /* Replace the call with a clobber of the result (to prevent it from
1877 becoming upwards exposed) followed by stores into each individual
1878 vector of tuple.
1880 The fold routines expect the replacement statement to have the
1881 same lhs as the original call, so return the clobber statement
1882 rather than the final vector store. */
1883 gassign *clobber = gimple_build_assign (f.lhs, build_clobber (lhs_type));
1885 for (unsigned int i = nargs; i-- > 0; )
1887 tree rhs_vector = gimple_call_arg (f.call, i);
1888 tree field = tuple_type_field (TREE_TYPE (f.lhs));
1889 tree lhs_array = build3 (COMPONENT_REF, TREE_TYPE (field),
1890 unshare_expr (f.lhs), field, NULL_TREE);
1891 tree lhs_vector = build4 (ARRAY_REF, TREE_TYPE (rhs_vector),
1892 lhs_array, size_int (i),
1893 NULL_TREE, NULL_TREE);
1894 gassign *assign = gimple_build_assign (lhs_vector, rhs_vector);
1895 gsi_insert_after (f.gsi, assign, GSI_SAME_STMT);
1897 return clobber;
1900 rtx expand (function_expander &e) const override
1902 if (!e.target)
1903 return NULL_RTX;
1904 gcc_assert (riscv_v_ext_vector_mode_p (GET_MODE (e.target)));
1905 unsigned int nargs = call_expr_nargs (e.exp);
1906 for (unsigned int i = 0; i < nargs; i++)
1908 rtx src = expand_normal (CALL_EXPR_ARG (e.exp, i));
1909 poly_int64 offset = i * GET_MODE_SIZE (GET_MODE (src));
1910 rtx subreg = simplify_gen_subreg (GET_MODE (src), e.target,
1911 GET_MODE (e.target), offset);
1912 emit_move_insn (subreg, src);
1915 return e.target;
1919 class read_vl : public function_base
1921 public:
1922 unsigned int call_properties (const function_instance &) const override
1924 return CP_READ_CSR;
1927 rtx expand (function_expander &e) const override
1929 if (Pmode == SImode)
1930 emit_insn (gen_read_vlsi (e.target));
1931 else
1932 emit_insn (gen_read_vldi_zero_extend (e.target));
1933 return e.target;
1937 class vleff : public function_base
1939 public:
1940 unsigned int call_properties (const function_instance &) const override
1942 return CP_READ_MEMORY | CP_WRITE_CSR;
1945 bool can_be_overloaded_p (enum predication_type_index pred) const override
1947 return pred != PRED_TYPE_none;
1950 gimple *fold (gimple_folder &f) const override
1952 return fold_fault_load (f);
1955 rtx expand (function_expander &e) const override
1957 return e.use_contiguous_load_insn (
1958 code_for_pred_fault_load (e.vector_mode ()));
1962 /* Implements vlenb. */
1963 class vlenb : public function_base
1965 public:
1966 bool apply_vl_p () const override { return false; }
1968 rtx expand (function_expander &e) const override
1970 machine_mode mode = GET_MODE (e.target);
1971 rtx vlenb = gen_int_mode (BYTES_PER_RISCV_VECTOR, mode);
1972 emit_move_insn (e.target, vlenb);
1973 return e.target;
1977 /* Implements vlseg.v. */
1978 class vlseg : public function_base
1980 public:
1981 unsigned int call_properties (const function_instance &) const override
1983 return CP_READ_MEMORY;
1986 bool can_be_overloaded_p (enum predication_type_index pred) const override
1988 return pred != PRED_TYPE_none;
1991 rtx expand (function_expander &e) const override
1993 return e.use_exact_insn (
1994 code_for_pred_unit_strided_load (e.vector_mode ()));
1998 /* Implements vsseg.v. */
1999 class vsseg : public function_base
2001 public:
2002 bool apply_tail_policy_p () const override { return false; }
2003 bool apply_mask_policy_p () const override { return false; }
2005 unsigned int call_properties (const function_instance &) const override
2007 return CP_WRITE_MEMORY;
2010 bool can_be_overloaded_p (enum predication_type_index) const override
2012 return true;
2015 rtx expand (function_expander &e) const override
2017 return e.use_exact_insn (
2018 code_for_pred_unit_strided_store (e.vector_mode ()));
2022 /* Implements vlsseg.v. */
2023 class vlsseg : public function_base
2025 public:
2026 unsigned int call_properties (const function_instance &) const override
2028 return CP_READ_MEMORY;
2031 bool can_be_overloaded_p (enum predication_type_index pred) const override
2033 return pred != PRED_TYPE_none;
2036 rtx expand (function_expander &e) const override
2038 return e.use_exact_insn (
2039 code_for_pred_strided_load (e.vector_mode ()));
2043 /* Implements vssseg.v. */
2044 class vssseg : public function_base
2046 public:
2047 bool apply_tail_policy_p () const override { return false; }
2048 bool apply_mask_policy_p () const override { return false; }
2050 unsigned int call_properties (const function_instance &) const override
2052 return CP_WRITE_MEMORY;
2055 bool can_be_overloaded_p (enum predication_type_index) const override
2057 return true;
2060 rtx expand (function_expander &e) const override
2062 return e.use_exact_insn (
2063 code_for_pred_strided_store (e.vector_mode ()));
2067 template<int UNSPEC>
2068 class seg_indexed_load : public function_base
2070 public:
2071 unsigned int call_properties (const function_instance &) const override
2073 return CP_READ_MEMORY;
2076 bool can_be_overloaded_p (enum predication_type_index) const override
2078 return true;
2081 rtx expand (function_expander &e) const override
2083 return e.use_exact_insn (
2084 code_for_pred_indexed_load (UNSPEC, e.vector_mode (), e.index_mode ()));
2088 template<int UNSPEC>
2089 class seg_indexed_store : public function_base
2091 public:
2092 bool apply_tail_policy_p () const override { return false; }
2093 bool apply_mask_policy_p () const override { return false; }
2095 unsigned int call_properties (const function_instance &) const override
2097 return CP_WRITE_MEMORY;
2100 bool can_be_overloaded_p (enum predication_type_index) const override
2102 return true;
2105 rtx expand (function_expander &e) const override
2107 return e.use_exact_insn (
2108 code_for_pred_indexed_store (UNSPEC, e.vector_mode (), e.index_mode ()));
2112 /* Implements vlsegff.v. */
2113 class vlsegff : public function_base
2115 public:
2116 unsigned int call_properties (const function_instance &) const override
2118 return CP_READ_MEMORY | CP_WRITE_CSR;
2121 bool can_be_overloaded_p (enum predication_type_index pred) const override
2123 return pred != PRED_TYPE_none;
2126 gimple *fold (gimple_folder &f) const override
2128 return fold_fault_load (f);
2131 rtx expand (function_expander &e) const override
2133 return e.use_exact_insn (code_for_pred_fault_load (e.vector_mode ()));
2137 /* Implements
2138 * th.vl(b/h/w)[u].v/th.vs(b/h/w)[u].v/th.vls(b/h/w)[u].v/th.vss(b/h/w)[u].v/
2139 * th.vlx(b/h/w)[u].v/th.vs[u]x(b/h/w).v
2140 * codegen. */
2141 template<bool STORE_P, lst_type LST_TYPE, int UNSPEC>
2142 class th_loadstore_width : public function_base
2144 public:
2145 bool apply_tail_policy_p () const override { return !STORE_P; }
2146 bool apply_mask_policy_p () const override { return !STORE_P; }
2148 unsigned int call_properties (const function_instance &) const override
2150 if (STORE_P)
2151 return CP_WRITE_MEMORY;
2152 else
2153 return CP_READ_MEMORY;
2156 bool can_be_overloaded_p (enum predication_type_index pred) const override
2158 if (STORE_P || LST_TYPE == LST_INDEXED)
2159 return true;
2160 return pred != PRED_TYPE_none;
2163 rtx expand (function_expander &e) const override
2165 gcc_assert (TARGET_XTHEADVECTOR);
2166 if (LST_TYPE == LST_INDEXED)
2168 if (STORE_P)
2169 return e.use_exact_insn (
2170 code_for_pred_indexed_store_width (UNSPEC, UNSPEC,
2171 e.vector_mode ()));
2172 else
2173 return e.use_exact_insn (
2174 code_for_pred_indexed_load_width (UNSPEC, e.vector_mode ()));
2176 else if (LST_TYPE == LST_STRIDED)
2178 if (STORE_P)
2179 return e.use_contiguous_store_insn (
2180 code_for_pred_strided_store_width (UNSPEC, e.vector_mode ()));
2181 else
2182 return e.use_contiguous_load_insn (
2183 code_for_pred_strided_load_width (UNSPEC, e.vector_mode ()));
2185 else
2187 if (STORE_P)
2188 return e.use_contiguous_store_insn (
2189 code_for_pred_store_width (UNSPEC, e.vector_mode ()));
2190 else
2191 return e.use_contiguous_load_insn (
2192 code_for_pred_mov_width (UNSPEC, e.vector_mode ()));
2197 /* Implements vext.x.v. */
2198 class th_extract : public function_base
2200 public:
2201 bool apply_vl_p () const override { return false; }
2202 bool apply_tail_policy_p () const override { return false; }
2203 bool apply_mask_policy_p () const override { return false; }
2204 bool use_mask_predication_p () const override { return false; }
2205 bool has_merge_operand_p () const override { return false; }
2207 rtx expand (function_expander &e) const override
2209 gcc_assert (TARGET_XTHEADVECTOR);
2210 return e.use_exact_insn (code_for_pred_th_extract (e.vector_mode ()));
2214 /* Below implements are vector crypto */
2215 /* Implements vandn.[vv,vx] */
2216 class vandn : public function_base
2218 public:
2219 rtx expand (function_expander &e) const override
2221 switch (e.op_info->op)
2223 case OP_TYPE_vv:
2224 return e.use_exact_insn (code_for_pred_vandn (e.vector_mode ()));
2225 case OP_TYPE_vx:
2226 return e.use_exact_insn (code_for_pred_vandn_scalar (e.vector_mode ()));
2227 default:
2228 gcc_unreachable ();
2233 /* Implements vrol/vror/clz/ctz. */
2234 template<rtx_code CODE>
2235 class bitmanip : public function_base
2237 public:
2238 bool apply_tail_policy_p () const override
2240 return (CODE == CLZ || CODE == CTZ) ? false : true;
2242 bool apply_mask_policy_p () const override
2244 return (CODE == CLZ || CODE == CTZ) ? false : true;
2246 bool has_merge_operand_p () const override
2248 return (CODE == CLZ || CODE == CTZ) ? false : true;
2251 rtx expand (function_expander &e) const override
2253 switch (e.op_info->op)
2255 case OP_TYPE_v:
2256 case OP_TYPE_vv:
2257 return e.use_exact_insn (code_for_pred_v (CODE, e.vector_mode ()));
2258 case OP_TYPE_vx:
2259 return e.use_exact_insn (code_for_pred_v_scalar (CODE, e.vector_mode ()));
2260 default:
2261 gcc_unreachable ();
2266 /* Implements vbrev/vbrev8/vrev8. */
2267 template<int UNSPEC>
2268 class b_reverse : public function_base
2270 public:
2271 rtx expand (function_expander &e) const override
2273 return e.use_exact_insn (code_for_pred_v (UNSPEC, e.vector_mode ()));
2277 class vwsll : public function_base
2279 public:
2280 rtx expand (function_expander &e) const override
2282 switch (e.op_info->op)
2284 case OP_TYPE_vv:
2285 return e.use_exact_insn (code_for_pred_vwsll (e.vector_mode ()));
2286 case OP_TYPE_vx:
2287 return e.use_exact_insn (code_for_pred_vwsll_scalar (e.vector_mode ()));
2288 default:
2289 gcc_unreachable ();
2294 /* Implements clmul */
2295 template<int UNSPEC>
2296 class clmul : public function_base
2298 public:
2299 rtx expand (function_expander &e) const override
2301 switch (e.op_info->op)
2303 case OP_TYPE_vv:
2304 return e.use_exact_insn (
2305 code_for_pred_vclmul (UNSPEC, e.vector_mode ()));
2306 case OP_TYPE_vx:
2307 return e.use_exact_insn
2308 (code_for_pred_vclmul_scalar (UNSPEC, e.vector_mode ()));
2309 default:
2310 gcc_unreachable ();
2315 /* Implements vghsh/vsh2ms/vsha2c[hl]. */
2316 template<int UNSPEC>
2317 class vg_nhab : public function_base
2319 public:
2320 bool apply_mask_policy_p () const override { return false; }
2321 bool use_mask_predication_p () const override { return false; }
2322 bool has_merge_operand_p () const override { return false; }
2324 rtx expand (function_expander &e) const override
2326 return e.use_exact_insn (code_for_pred_v (UNSPEC, e.vector_mode ()));
2330 /* Implements vgmul/vaes*. */
2331 template<int UNSPEC>
2332 class crypto_vv : public function_base
2334 public:
2335 bool apply_mask_policy_p () const override { return false; }
2336 bool use_mask_predication_p () const override { return false; }
2337 bool has_merge_operand_p () const override { return false; }
2339 rtx expand (function_expander &e) const override
2341 poly_uint64 nunits = 0U;
2342 switch (e.op_info->op)
2344 case OP_TYPE_vv:
2345 if (UNSPEC == UNSPEC_VGMUL)
2346 return e.use_exact_insn
2347 (code_for_pred_crypto_vv (UNSPEC, UNSPEC, e.vector_mode ()));
2348 else
2349 return e.use_exact_insn
2350 (code_for_pred_crypto_vv (UNSPEC + 1, UNSPEC + 1, e.vector_mode ()));
2351 case OP_TYPE_vs:
2352 /* Calculate the ratio between arg0 and arg1*/
2353 gcc_assert (multiple_p (GET_MODE_BITSIZE (e.arg_mode (0)),
2354 GET_MODE_BITSIZE (e.arg_mode (1)), &nunits));
2355 if (maybe_eq (nunits, 1U))
2356 return e.use_exact_insn (code_for_pred_crypto_vvx1_scalar
2357 (UNSPEC + 2, UNSPEC + 2, e.vector_mode ()));
2358 else if (maybe_eq (nunits, 2U))
2359 return e.use_exact_insn (code_for_pred_crypto_vvx2_scalar
2360 (UNSPEC + 2, UNSPEC + 2, e.vector_mode ()));
2361 else if (maybe_eq (nunits, 4U))
2362 return e.use_exact_insn (code_for_pred_crypto_vvx4_scalar
2363 (UNSPEC + 2, UNSPEC + 2, e.vector_mode ()));
2364 else if (maybe_eq (nunits, 8U))
2365 return e.use_exact_insn (code_for_pred_crypto_vvx8_scalar
2366 (UNSPEC + 2, UNSPEC + 2, e.vector_mode ()));
2367 else
2368 return e.use_exact_insn (code_for_pred_crypto_vvx16_scalar
2369 (UNSPEC + 2, UNSPEC + 2, e.vector_mode ()));
2370 default:
2371 gcc_unreachable ();
2376 /* Implements vaeskf1/vsm4k. */
2377 template<int UNSPEC>
2378 class crypto_vi : public function_base
2380 public:
2381 bool apply_mask_policy_p () const override { return false; }
2382 bool use_mask_predication_p () const override { return false; }
2384 rtx expand (function_expander &e) const override
2386 return e.use_exact_insn
2387 (code_for_pred_crypto_vi_scalar (UNSPEC, e.vector_mode ()));
2391 /* Implements vaeskf2/vsm3c. */
2392 template<int UNSPEC>
2393 class vaeskf2_vsm3c : public function_base
2395 public:
2396 bool apply_mask_policy_p () const override { return false; }
2397 bool use_mask_predication_p () const override { return false; }
2398 bool has_merge_operand_p () const override { return false; }
2400 rtx expand (function_expander &e) const override
2402 return e.use_exact_insn
2403 (code_for_pred_vi_nomaskedoff_scalar (UNSPEC, e.vector_mode ()));
2407 /* Implements vsm3me. */
2408 class vsm3me : public function_base
2410 public:
2411 bool apply_mask_policy_p () const override { return false; }
2412 bool use_mask_predication_p () const override { return false; }
2414 rtx expand (function_expander &e) const override
2416 return e.use_exact_insn (code_for_pred_vsm3me (e.vector_mode ()));
2420 /* Implements vfncvtbf16_f. */
2421 template <enum frm_op_type FRM_OP = NO_FRM>
2422 class vfncvtbf16_f : public function_base
2424 public:
2425 bool has_rounding_mode_operand_p () const override
2427 return FRM_OP == HAS_FRM;
2430 bool may_require_frm_p () const override { return true; }
2432 rtx expand (function_expander &e) const override
2434 return e.use_exact_insn (code_for_pred_trunc_to_bf16 (e.vector_mode ()));
2438 /* Implements vfwcvtbf16_f. */
2439 class vfwcvtbf16_f : public function_base
2441 public:
2442 rtx expand (function_expander &e) const override
2444 return e.use_exact_insn (code_for_pred_extend_bf16_to (e.vector_mode ()));
2448 /* Implements vfwmaccbf16. */
2449 template <enum frm_op_type FRM_OP = NO_FRM>
2450 class vfwmaccbf16 : public function_base
2452 public:
2453 bool has_rounding_mode_operand_p () const override
2455 return FRM_OP == HAS_FRM;
2458 bool may_require_frm_p () const override { return true; }
2460 bool has_merge_operand_p () const override { return false; }
2462 rtx expand (function_expander &e) const override
2464 if (e.op_info->op == OP_TYPE_vf)
2465 return e.use_widen_ternop_insn (
2466 code_for_pred_widen_bf16_mul_scalar (e.vector_mode ()));
2467 if (e.op_info->op == OP_TYPE_vv)
2468 return e.use_widen_ternop_insn (
2469 code_for_pred_widen_bf16_mul (e.vector_mode ()));
2470 gcc_unreachable ();
2474 static CONSTEXPR const vsetvl<false> vsetvl_obj;
2475 static CONSTEXPR const vsetvl<true> vsetvlmax_obj;
2476 static CONSTEXPR const loadstore<false, LST_UNIT_STRIDE, false> vle_obj;
2477 static CONSTEXPR const loadstore<true, LST_UNIT_STRIDE, false> vse_obj;
2478 static CONSTEXPR const loadstore<false, LST_UNIT_STRIDE, false> vlm_obj;
2479 static CONSTEXPR const loadstore<true, LST_UNIT_STRIDE, false> vsm_obj;
2480 static CONSTEXPR const loadstore<false, LST_STRIDED, false> vlse_obj;
2481 static CONSTEXPR const loadstore<true, LST_STRIDED, false> vsse_obj;
2482 static CONSTEXPR const loadstore<false, LST_INDEXED, false> vluxei8_obj;
2483 static CONSTEXPR const loadstore<false, LST_INDEXED, false> vluxei16_obj;
2484 static CONSTEXPR const loadstore<false, LST_INDEXED, false> vluxei32_obj;
2485 static CONSTEXPR const loadstore<false, LST_INDEXED, false> vluxei64_obj;
2486 static CONSTEXPR const loadstore<false, LST_INDEXED, true> vloxei8_obj;
2487 static CONSTEXPR const loadstore<false, LST_INDEXED, true> vloxei16_obj;
2488 static CONSTEXPR const loadstore<false, LST_INDEXED, true> vloxei32_obj;
2489 static CONSTEXPR const loadstore<false, LST_INDEXED, true> vloxei64_obj;
2490 static CONSTEXPR const loadstore<true, LST_INDEXED, false> vsuxei8_obj;
2491 static CONSTEXPR const loadstore<true, LST_INDEXED, false> vsuxei16_obj;
2492 static CONSTEXPR const loadstore<true, LST_INDEXED, false> vsuxei32_obj;
2493 static CONSTEXPR const loadstore<true, LST_INDEXED, false> vsuxei64_obj;
2494 static CONSTEXPR const loadstore<true, LST_INDEXED, true> vsoxei8_obj;
2495 static CONSTEXPR const loadstore<true, LST_INDEXED, true> vsoxei16_obj;
2496 static CONSTEXPR const loadstore<true, LST_INDEXED, true> vsoxei32_obj;
2497 static CONSTEXPR const loadstore<true, LST_INDEXED, true> vsoxei64_obj;
2498 static CONSTEXPR const binop<PLUS> vadd_obj;
2499 static CONSTEXPR const binop<MINUS> vsub_obj;
2500 static CONSTEXPR const vrsub vrsub_obj;
2501 static CONSTEXPR const binop<AND> vand_obj;
2502 static CONSTEXPR const binop<IOR> vor_obj;
2503 static CONSTEXPR const binop<XOR> vxor_obj;
2504 static CONSTEXPR const binop<ASHIFT> vsll_obj;
2505 static CONSTEXPR const binop<ASHIFTRT> vsra_obj;
2506 static CONSTEXPR const binop<LSHIFTRT> vsrl_obj;
2507 static CONSTEXPR const binop<SMIN> vmin_obj;
2508 static CONSTEXPR const binop<SMAX> vmax_obj;
2509 static CONSTEXPR const binop<UMIN> vminu_obj;
2510 static CONSTEXPR const binop<UMAX> vmaxu_obj;
2511 static CONSTEXPR const binop<MULT> vmul_obj;
2512 static CONSTEXPR const vmulh<UNSPEC_VMULHS> vmulh_obj;
2513 static CONSTEXPR const vmulh<UNSPEC_VMULHU> vmulhu_obj;
2514 static CONSTEXPR const vmulh<UNSPEC_VMULHSU> vmulhsu_obj;
2515 static CONSTEXPR const binop<DIV> vdiv_obj;
2516 static CONSTEXPR const binop<MOD> vrem_obj;
2517 static CONSTEXPR const binop<UDIV> vdivu_obj;
2518 static CONSTEXPR const binop<UMOD> vremu_obj;
2519 static CONSTEXPR const unop<NEG> vneg_obj;
2520 static CONSTEXPR const unop<NOT> vnot_obj;
2521 static CONSTEXPR const ext<SIGN_EXTEND> vsext_obj;
2522 static CONSTEXPR const ext<ZERO_EXTEND> vzext_obj;
2523 static CONSTEXPR const widen_binop<PLUS, SIGN_EXTEND>vwadd_obj;
2524 static CONSTEXPR const widen_binop<MINUS, SIGN_EXTEND>vwsub_obj;
2525 static CONSTEXPR const widen_binop<MULT, SIGN_EXTEND>vwmul_obj;
2526 static CONSTEXPR const widen_binop<PLUS, ZERO_EXTEND>vwaddu_obj;
2527 static CONSTEXPR const widen_binop<MINUS, ZERO_EXTEND>vwsubu_obj;
2528 static CONSTEXPR const widen_binop<MULT, ZERO_EXTEND>vwmulu_obj;
2529 static CONSTEXPR const vwmulsu vwmulsu_obj;
2530 static CONSTEXPR const vwcvt<SIGN_EXTEND> vwcvt_x_obj;
2531 static CONSTEXPR const vwcvt<ZERO_EXTEND> vwcvtu_x_obj;
2532 static CONSTEXPR const vadc vadc_obj;
2533 static CONSTEXPR const vsbc vsbc_obj;
2534 static CONSTEXPR const vmadc vmadc_obj;
2535 static CONSTEXPR const vmsbc vmsbc_obj;
2536 static CONSTEXPR const vnshift<LSHIFTRT> vnsrl_obj;
2537 static CONSTEXPR const vnshift<ASHIFTRT> vnsra_obj;
2538 static CONSTEXPR const vncvt_x vncvt_x_obj;
2539 static CONSTEXPR const vmerge vmerge_obj;
2540 static CONSTEXPR const vmv_v vmv_v_obj;
2541 static CONSTEXPR const icmp<EQ> vmseq_obj;
2542 static CONSTEXPR const icmp<NE> vmsne_obj;
2543 static CONSTEXPR const icmp<LT> vmslt_obj;
2544 static CONSTEXPR const icmp<GT> vmsgt_obj;
2545 static CONSTEXPR const icmp<LE> vmsle_obj;
2546 static CONSTEXPR const icmp<GE> vmsge_obj;
2547 static CONSTEXPR const icmp<LTU> vmsltu_obj;
2548 static CONSTEXPR const icmp<GTU> vmsgtu_obj;
2549 static CONSTEXPR const icmp<LEU> vmsleu_obj;
2550 static CONSTEXPR const icmp<GEU> vmsgeu_obj;
2551 static CONSTEXPR const vmacc vmacc_obj;
2552 static CONSTEXPR const vnmsac vnmsac_obj;
2553 static CONSTEXPR const vmadd vmadd_obj;
2554 static CONSTEXPR const vnmsub vnmsub_obj;
2555 static CONSTEXPR const vwmacc vwmacc_obj;
2556 static CONSTEXPR const vwmaccu vwmaccu_obj;
2557 static CONSTEXPR const vwmaccsu vwmaccsu_obj;
2558 static CONSTEXPR const vwmaccus vwmaccus_obj;
2559 static CONSTEXPR const binop<SS_PLUS> vsadd_obj;
2560 static CONSTEXPR const binop<SS_MINUS> vssub_obj;
2561 static CONSTEXPR const binop<US_PLUS> vsaddu_obj;
2562 static CONSTEXPR const binop<US_MINUS> vssubu_obj;
2563 static CONSTEXPR const sat_op<UNSPEC_VAADDU> vaaddu_obj;
2564 static CONSTEXPR const sat_op<UNSPEC_VAADD> vaadd_obj;
2565 static CONSTEXPR const sat_op<UNSPEC_VASUBU> vasubu_obj;
2566 static CONSTEXPR const sat_op<UNSPEC_VASUB> vasub_obj;
2567 static CONSTEXPR const sat_op<UNSPEC_VSMUL> vsmul_obj;
2568 static CONSTEXPR const sat_op<UNSPEC_VSSRL> vssrl_obj;
2569 static CONSTEXPR const sat_op<UNSPEC_VSSRA> vssra_obj;
2570 static CONSTEXPR const vnclip<UNSPEC_VNCLIP> vnclip_obj;
2571 static CONSTEXPR const vnclip<UNSPEC_VNCLIPU> vnclipu_obj;
2572 static CONSTEXPR const mask_logic<AND> vmand_obj;
2573 static CONSTEXPR const mask_nlogic<AND> vmnand_obj;
2574 static CONSTEXPR const mask_notlogic<AND> vmandn_obj;
2575 static CONSTEXPR const mask_logic<XOR> vmxor_obj;
2576 static CONSTEXPR const mask_logic<IOR> vmor_obj;
2577 static CONSTEXPR const mask_nlogic<IOR> vmnor_obj;
2578 static CONSTEXPR const mask_notlogic<IOR> vmorn_obj;
2579 static CONSTEXPR const mask_nlogic<XOR> vmxnor_obj;
2580 static CONSTEXPR const vmmv vmmv_obj;
2581 static CONSTEXPR const vmclr vmclr_obj;
2582 static CONSTEXPR const vmset vmset_obj;
2583 static CONSTEXPR const vmnot vmnot_obj;
2584 static CONSTEXPR const vcpop vcpop_obj;
2585 static CONSTEXPR const vfirst vfirst_obj;
2586 static CONSTEXPR const mask_misc<UNSPEC_VMSBF> vmsbf_obj;
2587 static CONSTEXPR const mask_misc<UNSPEC_VMSIF> vmsif_obj;
2588 static CONSTEXPR const mask_misc<UNSPEC_VMSOF> vmsof_obj;
2589 static CONSTEXPR const viota viota_obj;
2590 static CONSTEXPR const vid vid_obj;
2591 static CONSTEXPR const binop<PLUS, true> vfadd_obj;
2592 static CONSTEXPR const binop<MINUS, true> vfsub_obj;
2593 static CONSTEXPR const binop<PLUS, true, HAS_FRM> vfadd_frm_obj;
2594 static CONSTEXPR const binop<MINUS, true, HAS_FRM> vfsub_frm_obj;
2595 static CONSTEXPR const reverse_binop<MINUS> vfrsub_obj;
2596 static CONSTEXPR const reverse_binop<MINUS, HAS_FRM> vfrsub_frm_obj;
2597 static CONSTEXPR const widen_binop_fp<PLUS> vfwadd_obj;
2598 static CONSTEXPR const widen_binop_fp<PLUS, HAS_FRM> vfwadd_frm_obj;
2599 static CONSTEXPR const widen_binop_fp<MINUS> vfwsub_obj;
2600 static CONSTEXPR const widen_binop_fp<MINUS, HAS_FRM> vfwsub_frm_obj;
2601 static CONSTEXPR const binop<MULT, true> vfmul_obj;
2602 static CONSTEXPR const binop<MULT, true, HAS_FRM> vfmul_frm_obj;
2603 static CONSTEXPR const binop<DIV, true> vfdiv_obj;
2604 static CONSTEXPR const binop<DIV, true, HAS_FRM> vfdiv_frm_obj;
2605 static CONSTEXPR const reverse_binop<DIV> vfrdiv_obj;
2606 static CONSTEXPR const reverse_binop<DIV, HAS_FRM> vfrdiv_frm_obj;
2607 static CONSTEXPR const widen_binop_fp<MULT> vfwmul_obj;
2608 static CONSTEXPR const widen_binop_fp<MULT, HAS_FRM> vfwmul_frm_obj;
2609 static CONSTEXPR const vfmacc<NO_FRM> vfmacc_obj;
2610 static CONSTEXPR const vfmacc<HAS_FRM> vfmacc_frm_obj;
2611 static CONSTEXPR const vfnmsac<NO_FRM> vfnmsac_obj;
2612 static CONSTEXPR const vfnmsac<HAS_FRM> vfnmsac_frm_obj;
2613 static CONSTEXPR const vfmadd<NO_FRM> vfmadd_obj;
2614 static CONSTEXPR const vfmadd<HAS_FRM> vfmadd_frm_obj;
2615 static CONSTEXPR const vfnmsub<NO_FRM> vfnmsub_obj;
2616 static CONSTEXPR const vfnmsub<HAS_FRM> vfnmsub_frm_obj;
2617 static CONSTEXPR const vfnmacc<NO_FRM> vfnmacc_obj;
2618 static CONSTEXPR const vfnmacc<HAS_FRM> vfnmacc_frm_obj;
2619 static CONSTEXPR const vfmsac<NO_FRM> vfmsac_obj;
2620 static CONSTEXPR const vfmsac<HAS_FRM> vfmsac_frm_obj;
2621 static CONSTEXPR const vfnmadd<NO_FRM> vfnmadd_obj;
2622 static CONSTEXPR const vfnmadd<HAS_FRM> vfnmadd_frm_obj;
2623 static CONSTEXPR const vfmsub<NO_FRM> vfmsub_obj;
2624 static CONSTEXPR const vfmsub<HAS_FRM> vfmsub_frm_obj;
2625 static CONSTEXPR const vfwmacc<NO_FRM> vfwmacc_obj;
2626 static CONSTEXPR const vfwmacc<HAS_FRM> vfwmacc_frm_obj;
2627 static CONSTEXPR const vfwnmacc<NO_FRM> vfwnmacc_obj;
2628 static CONSTEXPR const vfwnmacc<HAS_FRM> vfwnmacc_frm_obj;
2629 static CONSTEXPR const vfwmsac<NO_FRM> vfwmsac_obj;
2630 static CONSTEXPR const vfwmsac<HAS_FRM> vfwmsac_frm_obj;
2631 static CONSTEXPR const vfwnmsac<NO_FRM> vfwnmsac_obj;
2632 static CONSTEXPR const vfwnmsac<HAS_FRM> vfwnmsac_frm_obj;
2633 static CONSTEXPR const unop<SQRT> vfsqrt_obj;
2634 static CONSTEXPR const unop<SQRT, HAS_FRM> vfsqrt_frm_obj;
2635 static CONSTEXPR const float_misc<UNSPEC_VFRSQRT7> vfrsqrt7_obj;
2636 static CONSTEXPR const float_misc<UNSPEC_VFREC7> vfrec7_obj;
2637 static CONSTEXPR const float_misc<UNSPEC_VFREC7, HAS_FRM> vfrec7_frm_obj;
2638 static CONSTEXPR const binop<SMIN> vfmin_obj;
2639 static CONSTEXPR const binop<SMAX> vfmax_obj;
2640 static CONSTEXPR const float_misc<UNSPEC_VCOPYSIGN> vfsgnj_obj;
2641 static CONSTEXPR const vfsgnjn vfsgnjn_obj;
2642 static CONSTEXPR const float_misc<UNSPEC_VXORSIGN> vfsgnjx_obj;
2643 static CONSTEXPR const unop<NEG> vfneg_obj;
2644 static CONSTEXPR const unop<ABS> vfabs_obj;
2645 static CONSTEXPR const fcmp<EQ> vmfeq_obj;
2646 static CONSTEXPR const fcmp<NE> vmfne_obj;
2647 static CONSTEXPR const fcmp<LT> vmflt_obj;
2648 static CONSTEXPR const fcmp<GT> vmfgt_obj;
2649 static CONSTEXPR const fcmp<LE> vmfle_obj;
2650 static CONSTEXPR const fcmp<GE> vmfge_obj;
2651 static CONSTEXPR const vfclass vfclass_obj;
2652 static CONSTEXPR const vmerge vfmerge_obj;
2653 static CONSTEXPR const vmv_v vfmv_v_obj;
2654 static CONSTEXPR const vfcvt_x<UNSPEC_VFCVT> vfcvt_x_obj;
2655 static CONSTEXPR const vfcvt_x<UNSPEC_VFCVT, HAS_FRM> vfcvt_x_frm_obj;
2656 static CONSTEXPR const vfcvt_x<UNSPEC_UNSIGNED_VFCVT> vfcvt_xu_obj;
2657 static CONSTEXPR const vfcvt_x<UNSPEC_UNSIGNED_VFCVT, HAS_FRM> vfcvt_xu_frm_obj;
2658 static CONSTEXPR const vfcvt_rtz_x<FIX> vfcvt_rtz_x_obj;
2659 static CONSTEXPR const vfcvt_rtz_x<UNSIGNED_FIX> vfcvt_rtz_xu_obj;
2660 static CONSTEXPR const vfcvt_f<NO_FRM> vfcvt_f_obj;
2661 static CONSTEXPR const vfcvt_f<HAS_FRM> vfcvt_f_frm_obj;
2662 static CONSTEXPR const vfwcvt_x<UNSPEC_VFCVT> vfwcvt_x_obj;
2663 static CONSTEXPR const vfwcvt_x<UNSPEC_VFCVT, HAS_FRM> vfwcvt_x_frm_obj;
2664 static CONSTEXPR const vfwcvt_x<UNSPEC_UNSIGNED_VFCVT> vfwcvt_xu_obj;
2665 static CONSTEXPR const vfwcvt_x<UNSPEC_UNSIGNED_VFCVT, HAS_FRM> vfwcvt_xu_frm_obj;
2666 static CONSTEXPR const vfwcvt_rtz_x<FIX> vfwcvt_rtz_x_obj;
2667 static CONSTEXPR const vfwcvt_rtz_x<UNSIGNED_FIX> vfwcvt_rtz_xu_obj;
2668 static CONSTEXPR const vfwcvt_f vfwcvt_f_obj;
2669 static CONSTEXPR const vfncvt_x<UNSPEC_VFCVT> vfncvt_x_obj;
2670 static CONSTEXPR const vfncvt_x<UNSPEC_VFCVT, HAS_FRM> vfncvt_x_frm_obj;
2671 static CONSTEXPR const vfncvt_x<UNSPEC_UNSIGNED_VFCVT> vfncvt_xu_obj;
2672 static CONSTEXPR const vfncvt_x<UNSPEC_UNSIGNED_VFCVT, HAS_FRM> vfncvt_xu_frm_obj;
2673 static CONSTEXPR const vfncvt_rtz_x<FIX> vfncvt_rtz_x_obj;
2674 static CONSTEXPR const vfncvt_rtz_x<UNSIGNED_FIX> vfncvt_rtz_xu_obj;
2675 static CONSTEXPR const vfncvt_f<NO_FRM> vfncvt_f_obj;
2676 static CONSTEXPR const vfncvt_f<HAS_FRM> vfncvt_f_frm_obj;
2677 static CONSTEXPR const vfncvt_rod_f vfncvt_rod_f_obj;
2678 static CONSTEXPR const reducop<UNSPEC_REDUC_SUM> vredsum_obj;
2679 static CONSTEXPR const reducop<UNSPEC_REDUC_MAXU> vredmaxu_obj;
2680 static CONSTEXPR const reducop<UNSPEC_REDUC_MAX> vredmax_obj;
2681 static CONSTEXPR const reducop<UNSPEC_REDUC_MINU> vredminu_obj;
2682 static CONSTEXPR const reducop<UNSPEC_REDUC_MIN> vredmin_obj;
2683 static CONSTEXPR const reducop<UNSPEC_REDUC_AND> vredand_obj;
2684 static CONSTEXPR const reducop<UNSPEC_REDUC_OR> vredor_obj;
2685 static CONSTEXPR const reducop<UNSPEC_REDUC_XOR> vredxor_obj;
2686 static CONSTEXPR const reducop<UNSPEC_WREDUC_SUM> vwredsum_obj;
2687 static CONSTEXPR const reducop<UNSPEC_WREDUC_SUMU> vwredsumu_obj;
2688 static CONSTEXPR const freducop<UNSPEC_REDUC_SUM_UNORDERED> vfredusum_obj;
2689 static CONSTEXPR const freducop<UNSPEC_REDUC_SUM_UNORDERED, HAS_FRM> vfredusum_frm_obj;
2690 static CONSTEXPR const freducop<UNSPEC_REDUC_SUM_ORDERED> vfredosum_obj;
2691 static CONSTEXPR const freducop<UNSPEC_REDUC_SUM_ORDERED, HAS_FRM> vfredosum_frm_obj;
2692 static CONSTEXPR const reducop<UNSPEC_REDUC_MAX> vfredmax_obj;
2693 static CONSTEXPR const reducop<UNSPEC_REDUC_MIN> vfredmin_obj;
2694 static CONSTEXPR const freducop<UNSPEC_WREDUC_SUM_UNORDERED> vfwredusum_obj;
2695 static CONSTEXPR const freducop<UNSPEC_WREDUC_SUM_UNORDERED, HAS_FRM> vfwredusum_frm_obj;
2696 static CONSTEXPR const freducop<UNSPEC_WREDUC_SUM_ORDERED> vfwredosum_obj;
2697 static CONSTEXPR const freducop<UNSPEC_WREDUC_SUM_ORDERED, HAS_FRM> vfwredosum_frm_obj;
2698 static CONSTEXPR const vmv vmv_x_obj;
2699 static CONSTEXPR const vmv_s vmv_s_obj;
2700 static CONSTEXPR const vmv vfmv_f_obj;
2701 static CONSTEXPR const vmv_s vfmv_s_obj;
2702 static CONSTEXPR const slideop<UNSPEC_VSLIDEUP> vslideup_obj;
2703 static CONSTEXPR const slideop<UNSPEC_VSLIDEDOWN> vslidedown_obj;
2704 static CONSTEXPR const slideop<UNSPEC_VSLIDE1UP> vslide1up_obj;
2705 static CONSTEXPR const slideop<UNSPEC_VSLIDE1DOWN> vslide1down_obj;
2706 static CONSTEXPR const slideop<UNSPEC_VFSLIDE1UP> vfslide1up_obj;
2707 static CONSTEXPR const slideop<UNSPEC_VFSLIDE1DOWN> vfslide1down_obj;
2708 static CONSTEXPR const vrgather vrgather_obj;
2709 static CONSTEXPR const vrgatherei16 vrgatherei16_obj;
2710 static CONSTEXPR const vcompress vcompress_obj;
2711 static CONSTEXPR const vundefined vundefined_obj;
2712 static CONSTEXPR const vreinterpret vreinterpret_obj;
2713 static CONSTEXPR const vlmul_ext vlmul_ext_obj;
2714 static CONSTEXPR const vlmul_trunc vlmul_trunc_obj;
2715 static CONSTEXPR const vset vset_obj;
2716 static CONSTEXPR const vget vget_obj;
2717 static CONSTEXPR const vcreate vcreate_obj;
2718 static CONSTEXPR const read_vl read_vl_obj;
2719 static CONSTEXPR const vleff vleff_obj;
2720 static CONSTEXPR const vlenb vlenb_obj;
2721 static CONSTEXPR const vlseg vlseg_obj;
2722 static CONSTEXPR const vsseg vsseg_obj;
2723 static CONSTEXPR const vlsseg vlsseg_obj;
2724 static CONSTEXPR const vssseg vssseg_obj;
2725 static CONSTEXPR const seg_indexed_load<UNSPEC_UNORDERED> vluxseg_obj;
2726 static CONSTEXPR const seg_indexed_load<UNSPEC_ORDERED> vloxseg_obj;
2727 static CONSTEXPR const seg_indexed_store<UNSPEC_UNORDERED> vsuxseg_obj;
2728 static CONSTEXPR const seg_indexed_store<UNSPEC_ORDERED> vsoxseg_obj;
2729 static CONSTEXPR const vlsegff vlsegff_obj;
2730 static CONSTEXPR const th_loadstore_width<false, LST_UNIT_STRIDE, UNSPEC_TH_VLB> vlb_obj;
2731 static CONSTEXPR const th_loadstore_width<false, LST_UNIT_STRIDE, UNSPEC_TH_VLBU> vlbu_obj;
2732 static CONSTEXPR const th_loadstore_width<false, LST_UNIT_STRIDE, UNSPEC_TH_VLH> vlh_obj;
2733 static CONSTEXPR const th_loadstore_width<false, LST_UNIT_STRIDE, UNSPEC_TH_VLHU> vlhu_obj;
2734 static CONSTEXPR const th_loadstore_width<false, LST_UNIT_STRIDE, UNSPEC_TH_VLW> vlw_obj;
2735 static CONSTEXPR const th_loadstore_width<false, LST_UNIT_STRIDE, UNSPEC_TH_VLWU> vlwu_obj;
2736 static CONSTEXPR const th_loadstore_width<true, LST_UNIT_STRIDE, UNSPEC_TH_VLB> vsb_obj;
2737 static CONSTEXPR const th_loadstore_width<true, LST_UNIT_STRIDE, UNSPEC_TH_VLH> vsh_obj;
2738 static CONSTEXPR const th_loadstore_width<true, LST_UNIT_STRIDE, UNSPEC_TH_VLW> vsw_obj;
2739 static CONSTEXPR const th_loadstore_width<false, LST_STRIDED, UNSPEC_TH_VLSB> vlsb_obj;
2740 static CONSTEXPR const th_loadstore_width<false, LST_STRIDED, UNSPEC_TH_VLSBU> vlsbu_obj;
2741 static CONSTEXPR const th_loadstore_width<false, LST_STRIDED, UNSPEC_TH_VLSH> vlsh_obj;
2742 static CONSTEXPR const th_loadstore_width<false, LST_STRIDED, UNSPEC_TH_VLSHU> vlshu_obj;
2743 static CONSTEXPR const th_loadstore_width<false, LST_STRIDED, UNSPEC_TH_VLSW> vlsw_obj;
2744 static CONSTEXPR const th_loadstore_width<false, LST_STRIDED, UNSPEC_TH_VLSWU> vlswu_obj;
2745 static CONSTEXPR const th_loadstore_width<true, LST_STRIDED, UNSPEC_TH_VLSB> vssb_obj;
2746 static CONSTEXPR const th_loadstore_width<true, LST_STRIDED, UNSPEC_TH_VLSH> vssh_obj;
2747 static CONSTEXPR const th_loadstore_width<true, LST_STRIDED, UNSPEC_TH_VLSW> vssw_obj;
2748 static CONSTEXPR const th_loadstore_width<false, LST_INDEXED, UNSPEC_TH_VLXB> vlxb_obj;
2749 static CONSTEXPR const th_loadstore_width<false, LST_INDEXED, UNSPEC_TH_VLXBU> vlxbu_obj;
2750 static CONSTEXPR const th_loadstore_width<false, LST_INDEXED, UNSPEC_TH_VLXH> vlxh_obj;
2751 static CONSTEXPR const th_loadstore_width<false, LST_INDEXED, UNSPEC_TH_VLXHU> vlxhu_obj;
2752 static CONSTEXPR const th_loadstore_width<false, LST_INDEXED, UNSPEC_TH_VLXW> vlxw_obj;
2753 static CONSTEXPR const th_loadstore_width<false, LST_INDEXED, UNSPEC_TH_VLXWU> vlxwu_obj;
2754 static CONSTEXPR const th_loadstore_width<true, LST_INDEXED, UNSPEC_TH_VLXB> vsxb_obj;
2755 static CONSTEXPR const th_loadstore_width<true, LST_INDEXED, UNSPEC_TH_VLXH> vsxh_obj;
2756 static CONSTEXPR const th_loadstore_width<true, LST_INDEXED, UNSPEC_TH_VLXW> vsxw_obj;
2757 static CONSTEXPR const th_loadstore_width<true, LST_INDEXED, UNSPEC_TH_VSUXB> vsuxb_obj;
2758 static CONSTEXPR const th_loadstore_width<true, LST_INDEXED, UNSPEC_TH_VSUXH> vsuxh_obj;
2759 static CONSTEXPR const th_loadstore_width<true, LST_INDEXED, UNSPEC_TH_VSUXW> vsuxw_obj;
2760 static CONSTEXPR const th_extract vext_x_v_obj;
2762 /* Crypto Vector */
2763 static CONSTEXPR const vandn vandn_obj;
2764 static CONSTEXPR const bitmanip<ROTATE> vrol_obj;
2765 static CONSTEXPR const bitmanip<ROTATERT> vror_obj;
2766 static CONSTEXPR const b_reverse<UNSPEC_VBREV> vbrev_obj;
2767 static CONSTEXPR const b_reverse<UNSPEC_VBREV8> vbrev8_obj;
2768 static CONSTEXPR const b_reverse<UNSPEC_VREV8> vrev8_obj;
2769 static CONSTEXPR const bitmanip<CLZ> vclz_obj;
2770 static CONSTEXPR const bitmanip<CTZ> vctz_obj;
2771 static CONSTEXPR const vwsll vwsll_obj;
2772 static CONSTEXPR const clmul<UNSPEC_VCLMUL> vclmul_obj;
2773 static CONSTEXPR const clmul<UNSPEC_VCLMULH> vclmulh_obj;
2774 static CONSTEXPR const vg_nhab<UNSPEC_VGHSH> vghsh_obj;
2775 static CONSTEXPR const crypto_vv<UNSPEC_VGMUL> vgmul_obj;
2776 static CONSTEXPR const crypto_vv<UNSPEC_VAESEF> vaesef_obj;
2777 static CONSTEXPR const crypto_vv<UNSPEC_VAESEM> vaesem_obj;
2778 static CONSTEXPR const crypto_vv<UNSPEC_VAESDF> vaesdf_obj;
2779 static CONSTEXPR const crypto_vv<UNSPEC_VAESDM> vaesdm_obj;
2780 static CONSTEXPR const crypto_vv<UNSPEC_VAESZ> vaesz_obj;
2781 static CONSTEXPR const crypto_vi<UNSPEC_VAESKF1> vaeskf1_obj;
2782 static CONSTEXPR const vaeskf2_vsm3c<UNSPEC_VAESKF2> vaeskf2_obj;
2783 static CONSTEXPR const vg_nhab<UNSPEC_VSHA2MS> vsha2ms_obj;
2784 static CONSTEXPR const vg_nhab<UNSPEC_VSHA2CH> vsha2ch_obj;
2785 static CONSTEXPR const vg_nhab<UNSPEC_VSHA2CL> vsha2cl_obj;
2786 static CONSTEXPR const crypto_vi<UNSPEC_VSM4K> vsm4k_obj;
2787 static CONSTEXPR const crypto_vv<UNSPEC_VSM4R> vsm4r_obj;
2788 static CONSTEXPR const vsm3me vsm3me_obj;
2789 static CONSTEXPR const vaeskf2_vsm3c<UNSPEC_VSM3C> vsm3c_obj;
2791 /* Zvfbfmin */
2792 static CONSTEXPR const vfncvtbf16_f<NO_FRM> vfncvtbf16_f_obj;
2793 static CONSTEXPR const vfncvtbf16_f<HAS_FRM> vfncvtbf16_f_frm_obj;
2794 static CONSTEXPR const vfwcvtbf16_f vfwcvtbf16_f_obj;
2795 /* Zvfbfwma; */
2796 static CONSTEXPR const vfwmaccbf16<NO_FRM> vfwmaccbf16_obj;
2797 static CONSTEXPR const vfwmaccbf16<HAS_FRM> vfwmaccbf16_frm_obj;
2799 /* Declare the function base NAME, pointing it to an instance
2800 of class <NAME>_obj. */
2801 #define BASE(NAME) \
2802 namespace bases { const function_base *const NAME = &NAME##_obj; }
2804 BASE (vsetvl)
2805 BASE (vsetvlmax)
2806 BASE (vle)
2807 BASE (vse)
2808 BASE (vlm)
2809 BASE (vsm)
2810 BASE (vlse)
2811 BASE (vsse)
2812 BASE (vluxei8)
2813 BASE (vluxei16)
2814 BASE (vluxei32)
2815 BASE (vluxei64)
2816 BASE (vloxei8)
2817 BASE (vloxei16)
2818 BASE (vloxei32)
2819 BASE (vloxei64)
2820 BASE (vsuxei8)
2821 BASE (vsuxei16)
2822 BASE (vsuxei32)
2823 BASE (vsuxei64)
2824 BASE (vsoxei8)
2825 BASE (vsoxei16)
2826 BASE (vsoxei32)
2827 BASE (vsoxei64)
2828 BASE (vadd)
2829 BASE (vsub)
2830 BASE (vrsub)
2831 BASE (vand)
2832 BASE (vor)
2833 BASE (vxor)
2834 BASE (vsll)
2835 BASE (vsra)
2836 BASE (vsrl)
2837 BASE (vmin)
2838 BASE (vmax)
2839 BASE (vminu)
2840 BASE (vmaxu)
2841 BASE (vmul)
2842 BASE (vmulh)
2843 BASE (vmulhu)
2844 BASE (vmulhsu)
2845 BASE (vdiv)
2846 BASE (vrem)
2847 BASE (vdivu)
2848 BASE (vremu)
2849 BASE (vneg)
2850 BASE (vnot)
2851 BASE (vsext)
2852 BASE (vzext)
2853 BASE (vwadd)
2854 BASE (vwsub)
2855 BASE (vwmul)
2856 BASE (vwaddu)
2857 BASE (vwsubu)
2858 BASE (vwmulu)
2859 BASE (vwmulsu)
2860 BASE (vwcvt_x)
2861 BASE (vwcvtu_x)
2862 BASE (vadc)
2863 BASE (vsbc)
2864 BASE (vmadc)
2865 BASE (vmsbc)
2866 BASE (vnsrl)
2867 BASE (vnsra)
2868 BASE (vncvt_x)
2869 BASE (vmerge)
2870 BASE (vmv_v)
2871 BASE (vmseq)
2872 BASE (vmsne)
2873 BASE (vmslt)
2874 BASE (vmsgt)
2875 BASE (vmsle)
2876 BASE (vmsge)
2877 BASE (vmsltu)
2878 BASE (vmsgtu)
2879 BASE (vmsleu)
2880 BASE (vmsgeu)
2881 BASE (vmacc)
2882 BASE (vnmsac)
2883 BASE (vmadd)
2884 BASE (vnmsub)
2885 BASE (vwmacc)
2886 BASE (vwmaccu)
2887 BASE (vwmaccsu)
2888 BASE (vwmaccus)
2889 BASE (vsadd)
2890 BASE (vssub)
2891 BASE (vsaddu)
2892 BASE (vssubu)
2893 BASE (vaadd)
2894 BASE (vasub)
2895 BASE (vaaddu)
2896 BASE (vasubu)
2897 BASE (vsmul)
2898 BASE (vssra)
2899 BASE (vssrl)
2900 BASE (vnclip)
2901 BASE (vnclipu)
2902 BASE (vmand)
2903 BASE (vmnand)
2904 BASE (vmandn)
2905 BASE (vmxor)
2906 BASE (vmor)
2907 BASE (vmnor)
2908 BASE (vmorn)
2909 BASE (vmxnor)
2910 BASE (vmmv)
2911 BASE (vmclr)
2912 BASE (vmset)
2913 BASE (vmnot)
2914 BASE (vcpop)
2915 BASE (vfirst)
2916 BASE (vmsbf)
2917 BASE (vmsif)
2918 BASE (vmsof)
2919 BASE (viota)
2920 BASE (vid)
2921 BASE (vfadd)
2922 BASE (vfadd_frm)
2923 BASE (vfsub)
2924 BASE (vfsub_frm)
2925 BASE (vfrsub)
2926 BASE (vfrsub_frm)
2927 BASE (vfwadd)
2928 BASE (vfwadd_frm)
2929 BASE (vfwsub)
2930 BASE (vfwsub_frm)
2931 BASE (vfmul)
2932 BASE (vfmul_frm)
2933 BASE (vfdiv)
2934 BASE (vfdiv_frm)
2935 BASE (vfrdiv)
2936 BASE (vfrdiv_frm)
2937 BASE (vfwmul)
2938 BASE (vfwmul_frm)
2939 BASE (vfmacc)
2940 BASE (vfmacc_frm)
2941 BASE (vfnmsac)
2942 BASE (vfnmsac_frm)
2943 BASE (vfmadd)
2944 BASE (vfmadd_frm)
2945 BASE (vfnmsub)
2946 BASE (vfnmsub_frm)
2947 BASE (vfnmacc)
2948 BASE (vfnmacc_frm)
2949 BASE (vfmsac)
2950 BASE (vfmsac_frm)
2951 BASE (vfnmadd)
2952 BASE (vfnmadd_frm)
2953 BASE (vfmsub)
2954 BASE (vfmsub_frm)
2955 BASE (vfwmacc)
2956 BASE (vfwmacc_frm)
2957 BASE (vfwnmacc)
2958 BASE (vfwnmacc_frm)
2959 BASE (vfwmsac)
2960 BASE (vfwmsac_frm)
2961 BASE (vfwnmsac)
2962 BASE (vfwnmsac_frm)
2963 BASE (vfsqrt)
2964 BASE (vfsqrt_frm)
2965 BASE (vfrsqrt7)
2966 BASE (vfrec7)
2967 BASE (vfrec7_frm)
2968 BASE (vfmin)
2969 BASE (vfmax)
2970 BASE (vfsgnj)
2971 BASE (vfsgnjn)
2972 BASE (vfsgnjx)
2973 BASE (vfneg)
2974 BASE (vfabs)
2975 BASE (vmfeq)
2976 BASE (vmfne)
2977 BASE (vmflt)
2978 BASE (vmfgt)
2979 BASE (vmfle)
2980 BASE (vmfge)
2981 BASE (vfclass)
2982 BASE (vfmerge)
2983 BASE (vfmv_v)
2984 BASE (vfcvt_x)
2985 BASE (vfcvt_x_frm)
2986 BASE (vfcvt_xu)
2987 BASE (vfcvt_xu_frm)
2988 BASE (vfcvt_rtz_x)
2989 BASE (vfcvt_rtz_xu)
2990 BASE (vfcvt_f)
2991 BASE (vfcvt_f_frm)
2992 BASE (vfwcvt_x)
2993 BASE (vfwcvt_x_frm)
2994 BASE (vfwcvt_xu)
2995 BASE (vfwcvt_xu_frm)
2996 BASE (vfwcvt_rtz_x)
2997 BASE (vfwcvt_rtz_xu)
2998 BASE (vfwcvt_f)
2999 BASE (vfncvt_x)
3000 BASE (vfncvt_x_frm)
3001 BASE (vfncvt_xu)
3002 BASE (vfncvt_xu_frm)
3003 BASE (vfncvt_rtz_x)
3004 BASE (vfncvt_rtz_xu)
3005 BASE (vfncvt_f)
3006 BASE (vfncvt_f_frm)
3007 BASE (vfncvt_rod_f)
3008 BASE (vredsum)
3009 BASE (vredmaxu)
3010 BASE (vredmax)
3011 BASE (vredminu)
3012 BASE (vredmin)
3013 BASE (vredand)
3014 BASE (vredor)
3015 BASE (vredxor)
3016 BASE (vwredsum)
3017 BASE (vwredsumu)
3018 BASE (vfredusum)
3019 BASE (vfredusum_frm)
3020 BASE (vfredosum)
3021 BASE (vfredosum_frm)
3022 BASE (vfredmax)
3023 BASE (vfredmin)
3024 BASE (vfwredosum)
3025 BASE (vfwredosum_frm)
3026 BASE (vfwredusum)
3027 BASE (vfwredusum_frm)
3028 BASE (vmv_x)
3029 BASE (vmv_s)
3030 BASE (vfmv_f)
3031 BASE (vfmv_s)
3032 BASE (vslideup)
3033 BASE (vslidedown)
3034 BASE (vslide1up)
3035 BASE (vslide1down)
3036 BASE (vfslide1up)
3037 BASE (vfslide1down)
3038 BASE (vrgather)
3039 BASE (vrgatherei16)
3040 BASE (vcompress)
3041 BASE (vundefined)
3042 BASE (vreinterpret)
3043 BASE (vlmul_ext)
3044 BASE (vlmul_trunc)
3045 BASE (vset)
3046 BASE (vget)
3047 BASE (vcreate)
3048 BASE (read_vl)
3049 BASE (vleff)
3050 BASE (vlenb)
3051 BASE (vlseg)
3052 BASE (vsseg)
3053 BASE (vlsseg)
3054 BASE (vssseg)
3055 BASE (vluxseg)
3056 BASE (vloxseg)
3057 BASE (vsuxseg)
3058 BASE (vsoxseg)
3059 BASE (vlsegff)
3060 BASE (vlb)
3061 BASE (vlh)
3062 BASE (vlw)
3063 BASE (vlbu)
3064 BASE (vlhu)
3065 BASE (vlwu)
3066 BASE (vsb)
3067 BASE (vsh)
3068 BASE (vsw)
3069 BASE (vlsb)
3070 BASE (vlsh)
3071 BASE (vlsw)
3072 BASE (vlsbu)
3073 BASE (vlshu)
3074 BASE (vlswu)
3075 BASE (vssb)
3076 BASE (vssh)
3077 BASE (vssw)
3078 BASE (vlxb)
3079 BASE (vlxh)
3080 BASE (vlxw)
3081 BASE (vlxbu)
3082 BASE (vlxhu)
3083 BASE (vlxwu)
3084 BASE (vsxb)
3085 BASE (vsxh)
3086 BASE (vsxw)
3087 BASE (vsuxb)
3088 BASE (vsuxh)
3089 BASE (vsuxw)
3090 BASE (vext_x_v)
3091 /* Crypto vector */
3092 BASE (vandn)
3093 BASE (vbrev)
3094 BASE (vbrev8)
3095 BASE (vrev8)
3096 BASE (vclz)
3097 BASE (vctz)
3098 BASE (vrol)
3099 BASE (vror)
3100 BASE (vwsll)
3101 BASE (vclmul)
3102 BASE (vclmulh)
3103 BASE (vghsh)
3104 BASE (vgmul)
3105 BASE (vaesef)
3106 BASE (vaesem)
3107 BASE (vaesdf)
3108 BASE (vaesdm)
3109 BASE (vaesz)
3110 BASE (vaeskf1)
3111 BASE (vaeskf2)
3112 BASE (vsha2ms)
3113 BASE (vsha2ch)
3114 BASE (vsha2cl)
3115 BASE (vsm4k)
3116 BASE (vsm4r)
3117 BASE (vsm3me)
3118 BASE (vsm3c)
3119 /* Zvfbfmin */
3120 BASE (vfncvtbf16_f)
3121 BASE (vfncvtbf16_f_frm)
3122 BASE (vfwcvtbf16_f)
3123 /* Zvfbfwma */
3124 BASE (vfwmaccbf16)
3125 BASE (vfwmaccbf16_frm)
3126 } // end namespace riscv_vector