[committed] Fix RISC-V missing stack tie
[official-gcc.git] / gcc / config / riscv / riscv-vector-builtins-bases.cc
blobb6f6e4ff37e787f3144faccd178fb3bc1ee537db
1 /* function_base implementation for RISC-V 'V' Extension for GNU compiler.
2 Copyright (C) 2022-2024 Free Software Foundation, Inc.
3 Contributed by Ju-Zhe Zhong (juzhe.zhong@rivai.ai), RiVAI Technologies Ltd.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "tree.h"
26 #include "rtl.h"
27 #include "tm_p.h"
28 #include "memmodel.h"
29 #include "insn-codes.h"
30 #include "optabs.h"
31 #include "recog.h"
32 #include "expr.h"
33 #include "basic-block.h"
34 #include "function.h"
35 #include "fold-const.h"
36 #include "gimple.h"
37 #include "gimple-iterator.h"
38 #include "gimplify.h"
39 #include "explow.h"
40 #include "emit-rtl.h"
41 #include "tree-vector-builder.h"
42 #include "rtx-vector-builder.h"
43 #include "riscv-vector-builtins.h"
44 #include "riscv-vector-builtins-shapes.h"
45 #include "riscv-vector-builtins-bases.h"
47 using namespace riscv_vector;
49 namespace riscv_vector {
51 /* Enumerates types of loads/stores operations.
52 It's only used in here so we don't define it
53 in riscv-vector-builtins-bases.h. */
54 enum lst_type
56 LST_UNIT_STRIDE,
57 LST_STRIDED,
58 LST_INDEXED,
61 enum frm_op_type
63 NO_FRM,
64 HAS_FRM,
67 /* Helper function to fold vleff and vlsegff. */
68 static gimple *
69 fold_fault_load (gimple_folder &f)
71 /* fold fault_load (const *base, size_t *new_vl, size_t vl)
73 ====> fault_load (const *base, size_t vl)
74 new_vl = MEM_REF[read_vl ()]. */
76 auto_vec<tree> vargs (gimple_call_num_args (f.call) - 1);
78 for (unsigned i = 0; i < gimple_call_num_args (f.call); i++)
80 /* Exclude size_t *new_vl argument. */
81 if (i == gimple_call_num_args (f.call) - 2)
82 continue;
84 vargs.quick_push (gimple_call_arg (f.call, i));
87 gimple *repl = gimple_build_call_vec (gimple_call_fn (f.call), vargs);
88 gimple_call_set_lhs (repl, f.lhs);
90 /* Handle size_t *new_vl by read_vl. */
91 tree new_vl = gimple_call_arg (f.call, gimple_call_num_args (f.call) - 2);
92 if (integer_zerop (new_vl))
94 /* This case happens when user passes the nullptr to new_vl argument.
95 In this case, we just need to ignore the new_vl argument and return
96 fault_load instruction directly. */
97 return repl;
100 tree tmp_var = create_tmp_var (size_type_node, "new_vl");
101 tree decl = get_read_vl_decl ();
102 gimple *g = gimple_build_call (decl, 0);
103 gimple_call_set_lhs (g, tmp_var);
104 tree indirect
105 = fold_build2 (MEM_REF, size_type_node,
106 gimple_call_arg (f.call, gimple_call_num_args (f.call) - 2),
107 build_int_cst (build_pointer_type (size_type_node), 0));
108 gassign *assign = gimple_build_assign (indirect, tmp_var);
110 gsi_insert_after (f.gsi, assign, GSI_SAME_STMT);
111 gsi_insert_after (f.gsi, g, GSI_SAME_STMT);
112 return repl;
115 /* Implements vsetvl<mode> && vsetvlmax<mode>. */
116 template<bool VLMAX_P>
117 class vsetvl : public function_base
119 public:
120 bool apply_vl_p () const override
122 return false;
125 rtx expand (function_expander &e) const override
127 if (VLMAX_P)
128 e.add_input_operand (Pmode, gen_rtx_REG (Pmode, 0));
129 else
130 e.add_input_operand (0);
132 tree type = builtin_types[e.type.index].vector;
133 machine_mode mode = TYPE_MODE (type);
135 if (TARGET_XTHEADVECTOR)
137 machine_mode inner_mode = GET_MODE_INNER (mode);
138 /* SEW. */
139 e.add_input_operand (Pmode,
140 gen_int_mode (GET_MODE_BITSIZE (inner_mode), Pmode));
141 /* LMUL. */
142 e.add_input_operand (Pmode,
143 gen_int_mode (get_vlmul (mode), Pmode));
145 else
147 /* Normalize same RATO (SEW/LMUL) into same vsetvl instruction.
149 - e8,mf8/e16,mf4/e32,mf2/e64,m1 --> e8mf8
150 - e8,mf4/e16,mf2/e32,m1/e64,m2 --> e8mf4
151 - e8,mf2/e16,m1/e32,m2/e64,m4 --> e8mf2
152 - e8,m1/e16,m2/e32,m4/e64,m8 --> e8m1
153 - e8,m2/e16,m4/e32,m8 --> e8m2
154 - e8,m4/e16,m8 --> e8m4
155 - e8,m8 --> e8m8
157 /* SEW. */
158 e.add_input_operand (Pmode, gen_int_mode (8, Pmode));
160 /* LMUL. */
161 machine_mode e8_mode
162 = get_vector_mode (QImode, GET_MODE_NUNITS (mode)).require ();
163 e.add_input_operand (Pmode, gen_int_mode (get_vlmul (e8_mode), Pmode));
166 /* TAIL_ANY. */
167 e.add_input_operand (Pmode,
168 gen_int_mode (get_prefer_tail_policy (), Pmode));
170 /* MASK_ANY. */
171 e.add_input_operand (Pmode,
172 gen_int_mode (get_prefer_mask_policy (), Pmode));
173 return e.generate_insn (code_for_vsetvl_no_side_effects (Pmode));
177 /* Implements
178 * vle.v/vse.v/vlm.v/vsm.v/vlse.v/vsse.v/vluxei.v/vloxei.v/vsuxei.v/vsoxei.v
179 * codegen. */
180 template<bool STORE_P, lst_type LST_TYPE, bool ORDERED_P>
181 class loadstore : public function_base
183 public:
184 bool apply_tail_policy_p () const override { return !STORE_P; }
185 bool apply_mask_policy_p () const override { return !STORE_P; }
187 unsigned int call_properties (const function_instance &) const override
189 if (STORE_P)
190 return CP_WRITE_MEMORY;
191 else
192 return CP_READ_MEMORY;
195 bool can_be_overloaded_p (enum predication_type_index pred) const override
197 if (STORE_P || LST_TYPE == LST_INDEXED)
198 return true;
199 return pred != PRED_TYPE_none;
202 rtx expand (function_expander &e) const override
204 if (LST_TYPE == LST_INDEXED)
206 int unspec = ORDERED_P ? UNSPEC_ORDERED : UNSPEC_UNORDERED;
207 if (STORE_P)
208 return e.use_exact_insn (
209 code_for_pred_indexed_store (unspec, e.vector_mode (),
210 e.index_mode ()));
211 else
213 unsigned src_eew_bitsize
214 = GET_MODE_BITSIZE (GET_MODE_INNER (e.index_mode ()));
215 unsigned dst_eew_bitsize
216 = GET_MODE_BITSIZE (GET_MODE_INNER (e.vector_mode ()));
217 if (dst_eew_bitsize == src_eew_bitsize)
218 return e.use_exact_insn (
219 code_for_pred_indexed_load_same_eew (unspec, e.vector_mode ()));
220 else if (dst_eew_bitsize > src_eew_bitsize)
222 unsigned factor = dst_eew_bitsize / src_eew_bitsize;
223 switch (factor)
225 case 2:
226 return e.use_exact_insn (
227 code_for_pred_indexed_load_x2_greater_eew (
228 unspec, e.vector_mode ()));
229 case 4:
230 return e.use_exact_insn (
231 code_for_pred_indexed_load_x4_greater_eew (
232 unspec, e.vector_mode ()));
233 case 8:
234 return e.use_exact_insn (
235 code_for_pred_indexed_load_x8_greater_eew (
236 unspec, e.vector_mode ()));
237 default:
238 gcc_unreachable ();
241 else
243 unsigned factor = src_eew_bitsize / dst_eew_bitsize;
244 switch (factor)
246 case 2:
247 return e.use_exact_insn (
248 code_for_pred_indexed_load_x2_smaller_eew (
249 unspec, e.vector_mode ()));
250 case 4:
251 return e.use_exact_insn (
252 code_for_pred_indexed_load_x4_smaller_eew (
253 unspec, e.vector_mode ()));
254 case 8:
255 return e.use_exact_insn (
256 code_for_pred_indexed_load_x8_smaller_eew (
257 unspec, e.vector_mode ()));
258 default:
259 gcc_unreachable ();
264 else if (LST_TYPE == LST_STRIDED)
266 if (STORE_P)
267 return e.use_contiguous_store_insn (
268 code_for_pred_strided_store (e.vector_mode ()));
269 else
270 return e.use_contiguous_load_insn (
271 code_for_pred_strided_load (e.vector_mode ()));
273 else
275 if (STORE_P)
276 return e.use_contiguous_store_insn (
277 code_for_pred_store (e.vector_mode ()));
278 else
279 return e.use_contiguous_load_insn (
280 code_for_pred_mov (e.vector_mode ()));
285 /* Implements
286 vadd/vsub/vand/vor/vxor/vsll/vsra/vsrl/
287 vmin/vmax/vminu/vmaxu/vdiv/vrem/vdivu/
288 vremu/vsadd/vsaddu/vssub/vssubu
289 vfadd/vfsub/
291 template <rtx_code CODE, bool MAY_REQUIRE_FRM = false,
292 enum frm_op_type FRM_OP = NO_FRM>
293 class binop : public function_base
295 public:
296 bool has_rounding_mode_operand_p () const override
298 return FRM_OP == HAS_FRM;
301 bool may_require_frm_p () const override { return MAY_REQUIRE_FRM; }
303 rtx expand (function_expander &e) const override
305 switch (e.op_info->op)
307 case OP_TYPE_vx:
308 gcc_assert (FRM_OP == NO_FRM);
309 case OP_TYPE_vf:
310 return e.use_exact_insn (code_for_pred_scalar (CODE, e.vector_mode ()));
311 case OP_TYPE_vv:
312 return e.use_exact_insn (code_for_pred (CODE, e.vector_mode ()));
313 default:
314 gcc_unreachable ();
319 /* Implements vrsub. */
320 class vrsub : public function_base
322 public:
323 rtx expand (function_expander &e) const override
325 return e.use_exact_insn (
326 code_for_pred_sub_reverse_scalar (e.vector_mode ()));
330 /* Implements vneg/vnot. */
331 template<rtx_code CODE, enum frm_op_type FRM_OP = NO_FRM>
332 class unop : public function_base
334 public:
335 bool has_rounding_mode_operand_p () const override
337 return FRM_OP == HAS_FRM;
340 bool may_require_frm_p () const override { return true; }
342 rtx expand (function_expander &e) const override
344 return e.use_exact_insn (code_for_pred (CODE, e.vector_mode ()));
348 /* Implements vsext.vf2/vsext.vf4/vsext.vf8/vzext.vf2/vzext.vf4/vzext.vf8. */
349 template<rtx_code CODE>
350 class ext : public function_base
352 public:
353 rtx expand (function_expander &e) const override
355 switch (e.op_info->op)
357 case OP_TYPE_vf2:
358 return e.use_exact_insn (code_for_pred_vf2 (CODE, e.vector_mode ()));
359 case OP_TYPE_vf4:
360 return e.use_exact_insn (code_for_pred_vf4 (CODE, e.vector_mode ()));
361 case OP_TYPE_vf8:
362 return e.use_exact_insn (code_for_pred_vf8 (CODE, e.vector_mode ()));
363 default:
364 gcc_unreachable ();
369 /* Implements vmulh/vmulhu/vmulhsu. */
370 template<int UNSPEC>
371 class vmulh : public function_base
373 public:
374 rtx expand (function_expander &e) const override
376 switch (e.op_info->op)
378 case OP_TYPE_vx:
379 return e.use_exact_insn (
380 code_for_pred_mulh_scalar (UNSPEC, e.vector_mode ()));
381 case OP_TYPE_vv:
382 return e.use_exact_insn (
383 code_for_pred_mulh (UNSPEC, e.vector_mode ()));
384 default:
385 gcc_unreachable ();
390 /* Implements vwadd/vwsub/vwmul. */
391 template<rtx_code CODE1, rtx_code CODE2 = FLOAT_EXTEND>
392 class widen_binop : public function_base
394 public:
395 rtx expand (function_expander &e) const override
397 switch (e.op_info->op)
399 case OP_TYPE_vv:
400 return e.use_exact_insn (
401 code_for_pred_dual_widen (CODE1, CODE2, e.vector_mode ()));
402 case OP_TYPE_vx:
403 return e.use_exact_insn (
404 code_for_pred_dual_widen_scalar (CODE1, CODE2, e.vector_mode ()));
405 case OP_TYPE_wv:
406 if (CODE1 == PLUS)
407 return e.use_exact_insn (
408 code_for_pred_single_widen_add (CODE2, e.vector_mode ()));
409 else
410 return e.use_exact_insn (
411 code_for_pred_single_widen_sub (CODE2, e.vector_mode ()));
412 case OP_TYPE_wx:
413 return e.use_exact_insn (
414 code_for_pred_single_widen_scalar (CODE1, CODE2, e.vector_mode ()));
415 default:
416 gcc_unreachable ();
421 /* Implement vfwadd/vfwsub/vfwmul. */
422 template<rtx_code CODE, enum frm_op_type FRM_OP = NO_FRM>
423 class widen_binop_fp : public function_base
425 public:
426 bool has_rounding_mode_operand_p () const override
428 return FRM_OP == HAS_FRM;
431 bool may_require_frm_p () const override { return true; }
433 rtx expand (function_expander &e) const override
435 switch (e.op_info->op)
437 case OP_TYPE_vv:
438 return e.use_exact_insn (
439 code_for_pred_dual_widen (CODE, e.vector_mode ()));
440 case OP_TYPE_vf:
441 return e.use_exact_insn (
442 code_for_pred_dual_widen_scalar (CODE, e.vector_mode ()));
443 case OP_TYPE_wv:
444 if (CODE == PLUS)
445 return e.use_exact_insn (
446 code_for_pred_single_widen_add (e.vector_mode ()));
447 else
448 return e.use_exact_insn (
449 code_for_pred_single_widen_sub (e.vector_mode ()));
450 case OP_TYPE_wf:
451 return e.use_exact_insn (
452 code_for_pred_single_widen_scalar (CODE, e.vector_mode ()));
453 default:
454 gcc_unreachable ();
459 /* Implements vwmulsu. */
460 class vwmulsu : public function_base
462 public:
463 rtx expand (function_expander &e) const override
465 switch (e.op_info->op)
467 case OP_TYPE_vv:
468 return e.use_exact_insn (code_for_pred_widen_mulsu (e.vector_mode ()));
469 case OP_TYPE_vx:
470 return e.use_exact_insn (
471 code_for_pred_widen_mulsu_scalar (e.vector_mode ()));
472 default:
473 gcc_unreachable ();
478 /* Implements vwcvt. */
479 template<rtx_code CODE>
480 class vwcvt : public function_base
482 public:
483 rtx expand (function_expander &e) const override
485 return e.use_exact_insn (code_for_pred (CODE, e.vector_mode ()));
489 /* Implements vadc. */
490 class vadc : public function_base
492 public:
493 bool apply_mask_policy_p () const override { return false; }
494 bool use_mask_predication_p () const override { return false; }
496 rtx expand (function_expander &e) const override
498 switch (e.op_info->op)
500 case OP_TYPE_vvm:
501 return e.use_exact_insn (code_for_pred_adc (e.vector_mode ()));
502 case OP_TYPE_vxm:
503 return e.use_exact_insn (code_for_pred_adc_scalar (e.vector_mode ()));
504 default:
505 gcc_unreachable ();
510 /* Implements vsbc. */
511 class vsbc : public function_base
513 public:
514 bool apply_mask_policy_p () const override { return false; }
515 bool use_mask_predication_p () const override { return false; }
517 rtx expand (function_expander &e) const override
519 switch (e.op_info->op)
521 case OP_TYPE_vvm:
522 return e.use_exact_insn (code_for_pred_sbc (e.vector_mode ()));
523 case OP_TYPE_vxm:
524 return e.use_exact_insn (code_for_pred_sbc_scalar (e.vector_mode ()));
525 default:
526 gcc_unreachable ();
531 /* Implements vmadc. */
532 class vmadc : public function_base
534 public:
535 bool apply_tail_policy_p () const override { return false; }
536 bool apply_mask_policy_p () const override { return false; }
537 bool use_mask_predication_p () const override { return false; }
538 bool has_merge_operand_p () const override { return false; }
540 rtx expand (function_expander &e) const override
542 switch (e.op_info->op)
544 case OP_TYPE_vvm:
545 return e.use_exact_insn (code_for_pred_madc (e.vector_mode ()));
546 case OP_TYPE_vxm:
547 return e.use_exact_insn (code_for_pred_madc_scalar (e.vector_mode ()));
548 case OP_TYPE_vv:
549 return e.use_exact_insn (
550 code_for_pred_madc_overflow (e.vector_mode ()));
551 case OP_TYPE_vx:
552 return e.use_exact_insn (
553 code_for_pred_madc_overflow_scalar (e.vector_mode ()));
554 default:
555 gcc_unreachable ();
560 /* Implements vmsbc. */
561 class vmsbc : public function_base
563 public:
564 bool apply_tail_policy_p () const override { return false; }
565 bool apply_mask_policy_p () const override { return false; }
566 bool use_mask_predication_p () const override { return false; }
567 bool has_merge_operand_p () const override { return false; }
569 rtx expand (function_expander &e) const override
571 switch (e.op_info->op)
573 case OP_TYPE_vvm:
574 return e.use_exact_insn (code_for_pred_msbc (e.vector_mode ()));
575 case OP_TYPE_vxm:
576 return e.use_exact_insn (code_for_pred_msbc_scalar (e.vector_mode ()));
577 case OP_TYPE_vv:
578 return e.use_exact_insn (
579 code_for_pred_msbc_overflow (e.vector_mode ()));
580 case OP_TYPE_vx:
581 return e.use_exact_insn (
582 code_for_pred_msbc_overflow_scalar (e.vector_mode ()));
583 default:
584 gcc_unreachable ();
589 /* Implements vnsrl/vnsra. */
590 template<rtx_code CODE>
591 class vnshift : public function_base
593 public:
594 rtx expand (function_expander &e) const override
596 switch (e.op_info->op)
598 case OP_TYPE_wx:
599 return e.use_exact_insn (
600 code_for_pred_narrow_scalar (CODE, e.vector_mode ()));
601 case OP_TYPE_wv:
602 return e.use_exact_insn (code_for_pred_narrow (CODE, e.vector_mode ()));
603 default:
604 gcc_unreachable ();
609 /* Implements vncvt. */
610 class vncvt_x : public function_base
612 public:
613 rtx expand (function_expander &e) const override
615 return e.use_exact_insn (code_for_pred_trunc (e.vector_mode ()));
619 /* Implements vmerge/vfmerge. */
620 class vmerge : public function_base
622 public:
623 bool apply_mask_policy_p () const override { return false; }
624 bool use_mask_predication_p () const override { return false; }
625 rtx expand (function_expander &e) const override
627 switch (e.op_info->op)
629 case OP_TYPE_vvm:
630 return e.use_exact_insn (code_for_pred_merge (e.vector_mode ()));
631 case OP_TYPE_vxm:
632 case OP_TYPE_vfm:
633 return e.use_exact_insn (code_for_pred_merge_scalar (e.vector_mode ()));
634 default:
635 gcc_unreachable ();
640 /* Implements vmv.v.x/vmv.v.v/vfmv.v.f. */
641 class vmv_v : public function_base
643 public:
644 rtx expand (function_expander &e) const override
646 switch (e.op_info->op)
648 case OP_TYPE_v:
649 return e.use_exact_insn (code_for_pred_mov (e.vector_mode ()));
650 case OP_TYPE_x:
651 case OP_TYPE_f:
652 return e.use_exact_insn (code_for_pred_broadcast (e.vector_mode ()));
653 default:
654 gcc_unreachable ();
659 /* Implements vaadd/vasub/vsmul/vssra/vssrl. */
660 template<int UNSPEC>
661 class sat_op : public function_base
663 public:
664 bool has_rounding_mode_operand_p () const override { return true; }
666 bool may_require_vxrm_p () const override { return true; }
668 rtx expand (function_expander &e) const override
670 switch (e.op_info->op)
672 case OP_TYPE_vx:
673 return e.use_exact_insn (
674 code_for_pred_scalar (UNSPEC, e.vector_mode ()));
675 case OP_TYPE_vv:
676 return e.use_exact_insn (code_for_pred (UNSPEC, e.vector_mode ()));
677 default:
678 gcc_unreachable ();
683 /* Implements vnclip/vnclipu. */
684 template<int UNSPEC>
685 class vnclip : public function_base
687 public:
688 bool has_rounding_mode_operand_p () const override { return true; }
690 bool may_require_vxrm_p () const override { return true; }
692 rtx expand (function_expander &e) const override
694 switch (e.op_info->op)
696 case OP_TYPE_wx:
697 return e.use_exact_insn (
698 code_for_pred_narrow_clip_scalar (UNSPEC, e.vector_mode ()));
699 case OP_TYPE_wv:
700 return e.use_exact_insn (
701 code_for_pred_narrow_clip (UNSPEC, e.vector_mode ()));
702 default:
703 gcc_unreachable ();
708 /* Implements vmseq/vmsne/vmslt/vmsgt/vmsle/vmsge. */
709 template<rtx_code CODE>
710 class icmp : public function_base
712 public:
713 rtx expand (function_expander &e) const override
715 switch (e.op_info->op)
717 case OP_TYPE_vx: {
718 if (CODE == GE || CODE == GEU)
719 return e.use_compare_insn (CODE, code_for_pred_ge_scalar (
720 e.vector_mode ()));
721 else if (CODE == EQ || CODE == NE)
722 return e.use_compare_insn (CODE, code_for_pred_eqne_scalar (
723 e.vector_mode ()));
724 else
725 return e.use_compare_insn (CODE, code_for_pred_cmp_scalar (
726 e.vector_mode ()));
728 case OP_TYPE_vv: {
729 if (CODE == LT || CODE == LTU || CODE == GE || CODE == GEU)
730 return e.use_compare_insn (CODE,
731 code_for_pred_ltge (e.vector_mode ()));
732 else
733 return e.use_compare_insn (CODE,
734 code_for_pred_cmp (e.vector_mode ()));
736 default:
737 gcc_unreachable ();
742 /* Implements vmacc/vnmsac/vmadd/vnmsub. */
743 class vmacc : public function_base
745 public:
746 bool has_merge_operand_p () const override { return false; }
748 rtx expand (function_expander &e) const override
750 if (e.op_info->op == OP_TYPE_vx)
751 return e.use_ternop_insn (true, code_for_pred_mul_plus_scalar (
752 e.vector_mode ()));
753 if (e.op_info->op == OP_TYPE_vv)
754 return e.use_ternop_insn (true,
755 code_for_pred_mul_plus (e.vector_mode ()));
756 gcc_unreachable ();
760 class vnmsac : public function_base
762 public:
763 bool has_merge_operand_p () const override { return false; }
765 rtx expand (function_expander &e) const override
767 if (e.op_info->op == OP_TYPE_vx)
768 return e.use_ternop_insn (true, code_for_pred_minus_mul_scalar (
769 e.vector_mode ()));
770 if (e.op_info->op == OP_TYPE_vv)
771 return e.use_ternop_insn (true,
772 code_for_pred_minus_mul (e.vector_mode ()));
773 gcc_unreachable ();
777 class vmadd : public function_base
779 public:
780 bool has_merge_operand_p () const override { return false; }
782 rtx expand (function_expander &e) const override
784 if (e.op_info->op == OP_TYPE_vx)
785 return e.use_ternop_insn (false, code_for_pred_mul_plus_scalar (
786 e.vector_mode ()));
787 if (e.op_info->op == OP_TYPE_vv)
788 return e.use_ternop_insn (false,
789 code_for_pred_mul_plus (e.vector_mode ()));
790 gcc_unreachable ();
794 class vnmsub : public function_base
796 public:
797 bool has_merge_operand_p () const override { return false; }
799 rtx expand (function_expander &e) const override
801 if (e.op_info->op == OP_TYPE_vx)
802 return e.use_ternop_insn (false, code_for_pred_minus_mul_scalar (
803 e.vector_mode ()));
804 if (e.op_info->op == OP_TYPE_vv)
805 return e.use_ternop_insn (false,
806 code_for_pred_minus_mul (e.vector_mode ()));
807 gcc_unreachable ();
811 /* Implements vwmacc<su><su>. */
812 class vwmacc : public function_base
814 public:
815 bool has_merge_operand_p () const override { return false; }
817 rtx expand (function_expander &e) const override
819 if (e.op_info->op == OP_TYPE_vx)
820 return e.use_widen_ternop_insn (
821 code_for_pred_widen_mul_plus_scalar (SIGN_EXTEND, e.vector_mode ()));
822 if (e.op_info->op == OP_TYPE_vv)
823 return e.use_widen_ternop_insn (
824 code_for_pred_widen_mul_plus (SIGN_EXTEND, e.vector_mode ()));
825 gcc_unreachable ();
829 class vwmaccu : public function_base
831 public:
832 bool has_merge_operand_p () const override { return false; }
834 rtx expand (function_expander &e) const override
836 if (e.op_info->op == OP_TYPE_vx)
837 return e.use_widen_ternop_insn (
838 code_for_pred_widen_mul_plus_scalar (ZERO_EXTEND, e.vector_mode ()));
839 if (e.op_info->op == OP_TYPE_vv)
840 return e.use_widen_ternop_insn (
841 code_for_pred_widen_mul_plus (ZERO_EXTEND, e.vector_mode ()));
842 gcc_unreachable ();
846 class vwmaccsu : public function_base
848 public:
849 bool has_merge_operand_p () const override { return false; }
851 rtx expand (function_expander &e) const override
853 if (e.op_info->op == OP_TYPE_vx)
854 return e.use_widen_ternop_insn (
855 code_for_pred_widen_mul_plussu_scalar (e.vector_mode ()));
856 if (e.op_info->op == OP_TYPE_vv)
857 return e.use_widen_ternop_insn (
858 code_for_pred_widen_mul_plussu (e.vector_mode ()));
859 gcc_unreachable ();
863 class vwmaccus : public function_base
865 public:
866 bool has_merge_operand_p () const override { return false; }
868 rtx expand (function_expander &e) const override
870 return e.use_widen_ternop_insn (
871 code_for_pred_widen_mul_plusus_scalar (e.vector_mode ()));
875 /* Implements vmand/vmnand/vmandn/vmxor/vmor/vmnor/vmorn/vmxnor */
876 template<rtx_code CODE>
877 class mask_logic : public function_base
879 public:
880 bool apply_tail_policy_p () const override { return false; }
881 bool apply_mask_policy_p () const override { return false; }
883 rtx expand (function_expander &e) const override
885 return e.use_exact_insn (code_for_pred (CODE, e.vector_mode ()));
888 template<rtx_code CODE>
889 class mask_nlogic : public function_base
891 public:
892 bool apply_tail_policy_p () const override { return false; }
893 bool apply_mask_policy_p () const override { return false; }
895 rtx expand (function_expander &e) const override
897 return e.use_exact_insn (code_for_pred_n (CODE, e.vector_mode ()));
900 template<rtx_code CODE>
901 class mask_notlogic : public function_base
903 public:
904 bool apply_tail_policy_p () const override { return false; }
905 bool apply_mask_policy_p () const override { return false; }
907 rtx expand (function_expander &e) const override
909 return e.use_exact_insn (code_for_pred_not (CODE, e.vector_mode ()));
913 /* Implements vmmv. */
914 class vmmv : public function_base
916 public:
917 bool apply_tail_policy_p () const override { return false; }
918 bool apply_mask_policy_p () const override { return false; }
920 rtx expand (function_expander &e) const override
922 return e.use_exact_insn (code_for_pred_mov (e.vector_mode ()));
926 /* Implements vmclr. */
927 class vmclr : public function_base
929 public:
930 bool can_be_overloaded_p (enum predication_type_index) const override
932 return false;
935 rtx expand (function_expander &e) const override
937 machine_mode mode = TYPE_MODE (TREE_TYPE (e.exp));
938 e.add_all_one_mask_operand (mode);
939 e.add_vundef_operand (mode);
940 e.add_input_operand (mode, CONST0_RTX (mode));
941 e.add_input_operand (call_expr_nargs (e.exp) - 1);
942 e.add_input_operand (Pmode, get_avl_type_rtx (avl_type::NONVLMAX));
943 return e.generate_insn (code_for_pred_mov (e.vector_mode ()));
947 /* Implements vmset. */
948 class vmset : public function_base
950 public:
951 bool can_be_overloaded_p (enum predication_type_index) const override
953 return false;
956 rtx expand (function_expander &e) const override
958 machine_mode mode = TYPE_MODE (TREE_TYPE (e.exp));
959 e.add_all_one_mask_operand (mode);
960 e.add_vundef_operand (mode);
961 e.add_input_operand (mode, CONSTM1_RTX (mode));
962 e.add_input_operand (call_expr_nargs (e.exp) - 1);
963 e.add_input_operand (Pmode, get_avl_type_rtx (avl_type::NONVLMAX));
964 return e.generate_insn (code_for_pred_mov (e.vector_mode ()));
968 /* Implements vmnot. */
969 class vmnot : public function_base
971 public:
972 bool apply_tail_policy_p () const override { return false; }
973 bool apply_mask_policy_p () const override { return false; }
975 rtx expand (function_expander &e) const override
977 return e.use_exact_insn (code_for_pred_not (e.vector_mode ()));
981 /* Implements vcpop. */
982 class vcpop : public function_base
984 public:
985 bool apply_tail_policy_p () const override { return false; }
986 bool apply_mask_policy_p () const override { return false; }
987 bool has_merge_operand_p () const override { return false; }
989 rtx expand (function_expander &e) const override
991 return e.use_exact_insn (code_for_pred_popcount (e.vector_mode (), Pmode));
995 /* Implements vfirst. */
996 class vfirst : public function_base
998 public:
999 bool apply_tail_policy_p () const override { return false; }
1000 bool apply_mask_policy_p () const override { return false; }
1001 bool has_merge_operand_p () const override { return false; }
1003 rtx expand (function_expander &e) const override
1005 return e.use_exact_insn (code_for_pred_ffs (e.vector_mode (), Pmode));
1009 /* Implements vmsbf/vmsif/vmsof. */
1010 template<int UNSPEC>
1011 class mask_misc : public function_base
1013 public:
1014 bool apply_tail_policy_p () const override { return false; }
1016 rtx expand (function_expander &e) const override
1018 return e.use_exact_insn (code_for_pred (UNSPEC, e.vector_mode ()));
1022 /* Implements viota. */
1023 class viota : public function_base
1025 public:
1026 bool can_be_overloaded_p (enum predication_type_index pred) const override
1028 return pred == PRED_TYPE_tu || pred == PRED_TYPE_tum
1029 || pred == PRED_TYPE_tumu || pred == PRED_TYPE_mu;
1032 rtx expand (function_expander &e) const override
1034 return e.use_exact_insn (code_for_pred_iota (e.vector_mode ()));
1038 /* Implements vid. */
1039 class vid : public function_base
1041 public:
1042 bool can_be_overloaded_p (enum predication_type_index pred) const override
1044 return pred == PRED_TYPE_tu || pred == PRED_TYPE_tum
1045 || pred == PRED_TYPE_tumu || pred == PRED_TYPE_mu;
1048 rtx expand (function_expander &e) const override
1050 return e.use_exact_insn (code_for_pred_series (e.vector_mode ()));
1054 /* Implements vfrsub/vfrdiv. */
1055 template<rtx_code CODE, enum frm_op_type FRM_OP = NO_FRM>
1056 class reverse_binop : public function_base
1058 public:
1059 bool has_rounding_mode_operand_p () const override
1061 return FRM_OP == HAS_FRM;
1064 bool may_require_frm_p () const override { return true; }
1066 rtx expand (function_expander &e) const override
1068 return e.use_exact_insn (
1069 code_for_pred_reverse_scalar (CODE, e.vector_mode ()));
1073 template<enum frm_op_type FRM_OP = NO_FRM>
1074 class vfmacc : public function_base
1076 public:
1077 bool has_rounding_mode_operand_p () const override
1079 return FRM_OP == HAS_FRM;
1082 bool may_require_frm_p () const override { return true; }
1084 bool has_merge_operand_p () const override { return false; }
1086 rtx expand (function_expander &e) const override
1088 if (e.op_info->op == OP_TYPE_vf)
1089 return e.use_ternop_insn (true,
1090 code_for_pred_mul_scalar (PLUS,
1091 e.vector_mode ()));
1092 if (e.op_info->op == OP_TYPE_vv)
1093 return e.use_ternop_insn (true,
1094 code_for_pred_mul (PLUS, e.vector_mode ()));
1095 gcc_unreachable ();
1099 template<enum frm_op_type FRM_OP = NO_FRM>
1100 class vfnmsac : public function_base
1102 public:
1103 bool has_rounding_mode_operand_p () const override
1105 return FRM_OP == HAS_FRM;
1108 bool may_require_frm_p () const override { return true; }
1110 bool has_merge_operand_p () const override { return false; }
1112 rtx expand (function_expander &e) const override
1114 if (e.op_info->op == OP_TYPE_vf)
1115 return e.use_ternop_insn (
1116 true, code_for_pred_mul_neg_scalar (PLUS, e.vector_mode ()));
1117 if (e.op_info->op == OP_TYPE_vv)
1118 return e.use_ternop_insn (true,
1119 code_for_pred_mul_neg (PLUS, e.vector_mode ()));
1120 gcc_unreachable ();
1124 template<enum frm_op_type FRM_OP = NO_FRM>
1125 class vfmadd : public function_base
1127 public:
1128 bool has_rounding_mode_operand_p () const override
1130 return FRM_OP == HAS_FRM;
1133 bool may_require_frm_p () const override { return true; }
1135 bool has_merge_operand_p () const override { return false; }
1137 rtx expand (function_expander &e) const override
1139 if (e.op_info->op == OP_TYPE_vf)
1140 return e.use_ternop_insn (false,
1141 code_for_pred_mul_scalar (PLUS,
1142 e.vector_mode ()));
1143 if (e.op_info->op == OP_TYPE_vv)
1144 return e.use_ternop_insn (false,
1145 code_for_pred_mul (PLUS, e.vector_mode ()));
1146 gcc_unreachable ();
1150 template<enum frm_op_type FRM_OP = NO_FRM>
1151 class vfnmsub : public function_base
1153 public:
1154 bool has_rounding_mode_operand_p () const override
1156 return FRM_OP == HAS_FRM;
1159 bool may_require_frm_p () const override { return true; }
1161 bool has_merge_operand_p () const override { return false; }
1163 rtx expand (function_expander &e) const override
1165 if (e.op_info->op == OP_TYPE_vf)
1166 return e.use_ternop_insn (
1167 false, code_for_pred_mul_neg_scalar (PLUS, e.vector_mode ()));
1168 if (e.op_info->op == OP_TYPE_vv)
1169 return e.use_ternop_insn (false,
1170 code_for_pred_mul_neg (PLUS, e.vector_mode ()));
1171 gcc_unreachable ();
1175 template<enum frm_op_type FRM_OP = NO_FRM>
1176 class vfnmacc : public function_base
1178 public:
1179 bool has_rounding_mode_operand_p () const override
1181 return FRM_OP == HAS_FRM;
1184 bool may_require_frm_p () const override { return true; }
1186 bool has_merge_operand_p () const override { return false; }
1188 rtx expand (function_expander &e) const override
1190 if (e.op_info->op == OP_TYPE_vf)
1191 return e.use_ternop_insn (
1192 true, code_for_pred_mul_neg_scalar (MINUS, e.vector_mode ()));
1193 if (e.op_info->op == OP_TYPE_vv)
1194 return e.use_ternop_insn (true,
1195 code_for_pred_mul_neg (MINUS, e.vector_mode ()));
1196 gcc_unreachable ();
1200 template<enum frm_op_type FRM_OP = NO_FRM>
1201 class vfmsac : public function_base
1203 public:
1204 bool has_rounding_mode_operand_p () const override
1206 return FRM_OP == HAS_FRM;
1209 bool may_require_frm_p () const override { return true; }
1211 bool has_merge_operand_p () const override { return false; }
1213 rtx expand (function_expander &e) const override
1215 if (e.op_info->op == OP_TYPE_vf)
1216 return e.use_ternop_insn (true,
1217 code_for_pred_mul_scalar (MINUS,
1218 e.vector_mode ()));
1219 if (e.op_info->op == OP_TYPE_vv)
1220 return e.use_ternop_insn (true,
1221 code_for_pred_mul (MINUS, e.vector_mode ()));
1222 gcc_unreachable ();
1226 template<enum frm_op_type FRM_OP = NO_FRM>
1227 class vfnmadd : public function_base
1229 public:
1230 bool has_rounding_mode_operand_p () const override
1232 return FRM_OP == HAS_FRM;
1235 bool may_require_frm_p () const override { return true; }
1237 bool has_merge_operand_p () const override { return false; }
1239 rtx expand (function_expander &e) const override
1241 if (e.op_info->op == OP_TYPE_vf)
1242 return e.use_ternop_insn (
1243 false, code_for_pred_mul_neg_scalar (MINUS, e.vector_mode ()));
1244 if (e.op_info->op == OP_TYPE_vv)
1245 return e.use_ternop_insn (false,
1246 code_for_pred_mul_neg (MINUS, e.vector_mode ()));
1247 gcc_unreachable ();
1251 template<enum frm_op_type FRM_OP = NO_FRM>
1252 class vfmsub : public function_base
1254 public:
1255 bool has_rounding_mode_operand_p () const override
1257 return FRM_OP == HAS_FRM;
1260 bool may_require_frm_p () const override { return true; }
1262 bool has_merge_operand_p () const override { return false; }
1264 rtx expand (function_expander &e) const override
1266 if (e.op_info->op == OP_TYPE_vf)
1267 return e.use_ternop_insn (false,
1268 code_for_pred_mul_scalar (MINUS,
1269 e.vector_mode ()));
1270 if (e.op_info->op == OP_TYPE_vv)
1271 return e.use_ternop_insn (false,
1272 code_for_pred_mul (MINUS, e.vector_mode ()));
1273 gcc_unreachable ();
1277 template<enum frm_op_type FRM_OP = NO_FRM>
1278 class vfwmacc : public function_base
1280 public:
1281 bool has_rounding_mode_operand_p () const override
1283 return FRM_OP == HAS_FRM;
1286 bool may_require_frm_p () const override { return true; }
1288 bool has_merge_operand_p () const override { return false; }
1290 rtx expand (function_expander &e) const override
1292 if (e.op_info->op == OP_TYPE_vf)
1293 return e.use_widen_ternop_insn (
1294 code_for_pred_widen_mul_scalar (PLUS, e.vector_mode ()));
1295 if (e.op_info->op == OP_TYPE_vv)
1296 return e.use_widen_ternop_insn (
1297 code_for_pred_widen_mul (PLUS, e.vector_mode ()));
1298 gcc_unreachable ();
1302 template<enum frm_op_type FRM_OP = NO_FRM>
1303 class vfwnmacc : public function_base
1305 public:
1306 bool has_rounding_mode_operand_p () const override
1308 return FRM_OP == HAS_FRM;
1311 bool may_require_frm_p () const override { return true; }
1313 bool has_merge_operand_p () const override { return false; }
1315 rtx expand (function_expander &e) const override
1317 if (e.op_info->op == OP_TYPE_vf)
1318 return e.use_widen_ternop_insn (
1319 code_for_pred_widen_mul_neg_scalar (MINUS, e.vector_mode ()));
1320 if (e.op_info->op == OP_TYPE_vv)
1321 return e.use_widen_ternop_insn (
1322 code_for_pred_widen_mul_neg (MINUS, e.vector_mode ()));
1323 gcc_unreachable ();
1327 template<enum frm_op_type FRM_OP = NO_FRM>
1328 class vfwmsac : public function_base
1330 public:
1331 bool has_rounding_mode_operand_p () const override
1333 return FRM_OP == HAS_FRM;
1336 bool may_require_frm_p () const override { return true; }
1338 bool has_merge_operand_p () const override { return false; }
1340 rtx expand (function_expander &e) const override
1342 if (e.op_info->op == OP_TYPE_vf)
1343 return e.use_widen_ternop_insn (
1344 code_for_pred_widen_mul_scalar (MINUS, e.vector_mode ()));
1345 if (e.op_info->op == OP_TYPE_vv)
1346 return e.use_widen_ternop_insn (
1347 code_for_pred_widen_mul (MINUS, e.vector_mode ()));
1348 gcc_unreachable ();
1352 template<enum frm_op_type FRM_OP = NO_FRM>
1353 class vfwnmsac : public function_base
1355 public:
1356 bool has_rounding_mode_operand_p () const override
1358 return FRM_OP == HAS_FRM;
1361 bool may_require_frm_p () const override { return true; }
1363 bool has_merge_operand_p () const override { return false; }
1365 rtx expand (function_expander &e) const override
1367 if (e.op_info->op == OP_TYPE_vf)
1368 return e.use_widen_ternop_insn (
1369 code_for_pred_widen_mul_neg_scalar (PLUS, e.vector_mode ()));
1370 if (e.op_info->op == OP_TYPE_vv)
1371 return e.use_widen_ternop_insn (
1372 code_for_pred_widen_mul_neg (PLUS, e.vector_mode ()));
1373 gcc_unreachable ();
1377 /* Implements vfsqrt7/vfrec7/vfclass/vfsgnj/vfsgnjx. */
1378 template<int UNSPEC, enum frm_op_type FRM_OP = NO_FRM>
1379 class float_misc : public function_base
1381 public:
1382 bool has_rounding_mode_operand_p () const override
1384 return FRM_OP == HAS_FRM;
1387 bool may_require_frm_p () const override { return true; }
1389 rtx expand (function_expander &e) const override
1391 if (e.op_info->op == OP_TYPE_vf)
1392 return e.use_exact_insn (code_for_pred_scalar (UNSPEC, e.vector_mode ()));
1393 if (e.op_info->op == OP_TYPE_vv || e.op_info->op == OP_TYPE_v)
1394 return e.use_exact_insn (code_for_pred (UNSPEC, e.vector_mode ()));
1395 gcc_unreachable ();
1399 /* Implements vfsgnjn. */
1400 class vfsgnjn : public function_base
1402 public:
1403 rtx expand (function_expander &e) const override
1405 if (e.op_info->op == OP_TYPE_vf)
1406 return e.use_exact_insn (code_for_pred_ncopysign_scalar (e.vector_mode ()));
1407 if (e.op_info->op == OP_TYPE_vv)
1408 return e.use_exact_insn (code_for_pred_ncopysign (e.vector_mode ()));
1409 gcc_unreachable ();
1413 /* Implements vmfeq/vmfne/vmflt/vmfgt/vmfle/vmfge. */
1414 template<rtx_code CODE>
1415 class fcmp : public function_base
1417 public:
1418 rtx expand (function_expander &e) const override
1420 switch (e.op_info->op)
1422 case OP_TYPE_vf: {
1423 if (CODE == EQ || CODE == NE)
1424 return e.use_compare_insn (CODE, code_for_pred_eqne_scalar (
1425 e.vector_mode ()));
1426 else
1427 return e.use_compare_insn (CODE, code_for_pred_cmp_scalar (
1428 e.vector_mode ()));
1430 case OP_TYPE_vv: {
1431 return e.use_compare_insn (CODE,
1432 code_for_pred_cmp (e.vector_mode ()));
1434 default:
1435 gcc_unreachable ();
1440 /* Implements vfclass. */
1441 class vfclass : public function_base
1443 public:
1444 rtx expand (function_expander &e) const override
1446 return e.use_exact_insn (code_for_pred_class (e.arg_mode (0)));
1450 /* Implements vfcvt.x. */
1451 template<int UNSPEC, enum frm_op_type FRM_OP = NO_FRM>
1452 class vfcvt_x : public function_base
1454 public:
1455 bool has_rounding_mode_operand_p () const override
1457 return FRM_OP == HAS_FRM;
1460 bool may_require_frm_p () const override { return true; }
1462 rtx expand (function_expander &e) const override
1464 return e.use_exact_insn (code_for_pred_fcvt_x_f (UNSPEC, e.arg_mode (0)));
1468 /* Implements vfcvt.rtz.x. */
1469 template<rtx_code CODE>
1470 class vfcvt_rtz_x : public function_base
1472 public:
1473 rtx expand (function_expander &e) const override
1475 return e.use_exact_insn (code_for_pred (CODE, e.arg_mode (0)));
1479 template<enum frm_op_type FRM_OP = NO_FRM>
1480 class vfcvt_f : public function_base
1482 public:
1483 bool has_rounding_mode_operand_p () const override
1485 return FRM_OP == HAS_FRM;
1488 bool may_require_frm_p () const override { return true; }
1490 rtx expand (function_expander &e) const override
1492 if (e.op_info->op == OP_TYPE_x_v)
1493 return e.use_exact_insn (code_for_pred (FLOAT, e.vector_mode ()));
1494 if (e.op_info->op == OP_TYPE_xu_v)
1495 return e.use_exact_insn (
1496 code_for_pred (UNSIGNED_FLOAT, e.vector_mode ()));
1497 gcc_unreachable ();
1501 /* Implements vfwcvt.x. */
1502 template<int UNSPEC, enum frm_op_type FRM_OP = NO_FRM>
1503 class vfwcvt_x : public function_base
1505 public:
1506 bool has_rounding_mode_operand_p () const override
1508 return FRM_OP == HAS_FRM;
1511 bool may_require_frm_p () const override { return true; }
1513 rtx expand (function_expander &e) const override
1515 return e.use_exact_insn (
1516 code_for_pred_widen_fcvt_x_f (UNSPEC, e.vector_mode ()));
1520 /* Implements vfwcvt.rtz.x. */
1521 template<rtx_code CODE>
1522 class vfwcvt_rtz_x : public function_base
1524 public:
1525 rtx expand (function_expander &e) const override
1527 return e.use_exact_insn (code_for_pred_widen (CODE, e.vector_mode ()));
1531 class vfwcvt_f : public function_base
1533 public:
1534 rtx expand (function_expander &e) const override
1536 if (e.op_info->op == OP_TYPE_f_v)
1537 return e.use_exact_insn (code_for_pred_extend (e.vector_mode ()));
1538 if (e.op_info->op == OP_TYPE_x_v)
1539 return e.use_exact_insn (code_for_pred_widen (FLOAT, e.vector_mode ()));
1540 if (e.op_info->op == OP_TYPE_xu_v)
1541 return e.use_exact_insn (
1542 code_for_pred_widen (UNSIGNED_FLOAT, e.vector_mode ()));
1543 gcc_unreachable ();
1547 /* Implements vfncvt.x. */
1548 template<int UNSPEC, enum frm_op_type FRM_OP = NO_FRM>
1549 class vfncvt_x : public function_base
1551 public:
1552 bool has_rounding_mode_operand_p () const override
1554 return FRM_OP == HAS_FRM;
1557 bool may_require_frm_p () const override { return true; }
1559 rtx expand (function_expander &e) const override
1561 return e.use_exact_insn (
1562 code_for_pred_narrow_fcvt_x_f (UNSPEC, e.arg_mode (0)));
1566 /* Implements vfncvt.rtz.x. */
1567 template<rtx_code CODE>
1568 class vfncvt_rtz_x : public function_base
1570 public:
1571 rtx expand (function_expander &e) const override
1573 return e.use_exact_insn (code_for_pred_narrow (CODE, e.vector_mode ()));
1577 template<enum frm_op_type FRM_OP = NO_FRM>
1578 class vfncvt_f : public function_base
1580 public:
1581 bool has_rounding_mode_operand_p () const override
1583 return FRM_OP == HAS_FRM;
1586 bool may_require_frm_p () const override { return true; }
1588 rtx expand (function_expander &e) const override
1590 if (e.op_info->op == OP_TYPE_f_w)
1591 return e.use_exact_insn (code_for_pred_trunc (e.vector_mode ()));
1592 if (e.op_info->op == OP_TYPE_x_w)
1593 return e.use_exact_insn (code_for_pred_narrow (FLOAT, e.arg_mode (0)));
1594 if (e.op_info->op == OP_TYPE_xu_w)
1595 return e.use_exact_insn (
1596 code_for_pred_narrow (UNSIGNED_FLOAT, e.arg_mode (0)));
1597 gcc_unreachable ();
1601 class vfncvt_rod_f : public function_base
1603 public:
1604 rtx expand (function_expander &e) const override
1606 return e.use_exact_insn (code_for_pred_rod_trunc (e.vector_mode ()));
1610 /* Implements reduction instructions. */
1611 template<unsigned UNSPEC>
1612 class reducop : public function_base
1614 public:
1615 bool apply_mask_policy_p () const override { return false; }
1617 rtx expand (function_expander &e) const override
1619 return e.use_exact_insn (code_for_pred (UNSPEC, e.vector_mode ()));
1623 /* Implements floating-point reduction instructions. */
1624 template<unsigned UNSPEC, enum frm_op_type FRM_OP = NO_FRM>
1625 class freducop : public function_base
1627 public:
1628 bool has_rounding_mode_operand_p () const override
1630 return FRM_OP == HAS_FRM;
1633 bool may_require_frm_p () const override { return true; }
1635 bool apply_mask_policy_p () const override { return false; }
1637 rtx expand (function_expander &e) const override
1639 return e.use_exact_insn (code_for_pred (UNSPEC, e.vector_mode ()));
1643 /* Implements vmv/vfmv instructions. */
1644 class vmv : public function_base
1646 public:
1647 bool apply_vl_p () const override { return false; }
1648 bool apply_tail_policy_p () const override { return false; }
1649 bool apply_mask_policy_p () const override { return false; }
1650 bool use_mask_predication_p () const override { return false; }
1651 bool has_merge_operand_p () const override { return false; }
1653 rtx expand (function_expander &e) const override
1655 return e.use_exact_insn (code_for_pred_extract_first (e.vector_mode ()));
1659 /* Implements vmv.s.x/vfmv.s.f. */
1660 class vmv_s : public function_base
1662 public:
1663 rtx expand (function_expander &e) const override
1665 return e.use_scalar_move_insn (code_for_pred_broadcast (e.vector_mode ()));
1669 template<int UNSPEC>
1670 class slideop : public function_base
1672 public:
1673 bool has_merge_operand_p () const override
1675 if (UNSPEC == UNSPEC_VSLIDEUP)
1676 return false;
1677 return true;
1680 rtx expand (function_expander &e) const override
1682 return e.use_exact_insn (code_for_pred_slide (UNSPEC, e.vector_mode ()));
1686 class vrgather : public function_base
1688 public:
1689 rtx expand (function_expander &e) const override
1691 switch (e.op_info->op)
1693 case OP_TYPE_vx:
1694 return e.use_exact_insn (
1695 code_for_pred_gather_scalar (e.vector_mode ()));
1696 case OP_TYPE_vv:
1697 return e.use_exact_insn (code_for_pred_gather (e.vector_mode ()));
1698 default:
1699 gcc_unreachable ();
1704 class vrgatherei16 : public function_base
1706 public:
1707 rtx expand (function_expander &e) const override
1709 return e.use_exact_insn (code_for_pred_gatherei16 (e.vector_mode ()));
1713 class vcompress : public function_base
1715 public:
1716 bool apply_mask_policy_p () const override { return false; }
1717 bool use_mask_predication_p () const override { return false; }
1718 rtx expand (function_expander &e) const override
1720 return e.use_exact_insn (code_for_pred_compress (e.vector_mode ()));
1724 class vundefined : public function_base
1726 public:
1727 bool apply_vl_p () const override
1729 return false;
1732 rtx expand (function_expander &e) const override
1734 return e.generate_insn (code_for_vundefined (e.vector_mode ()));
1738 class vreinterpret : public function_base
1740 public:
1741 bool apply_vl_p () const override
1743 return false;
1746 rtx expand (function_expander &e) const override
1748 e.add_input_operand (0);
1749 return e.generate_insn (code_for_vreinterpret (e.ret_mode ()));
1753 class vlmul_ext : public function_base
1755 public:
1756 bool apply_vl_p () const override
1758 return false;
1761 rtx expand (function_expander &e) const override
1763 tree arg = CALL_EXPR_ARG (e.exp, 0);
1764 rtx src = expand_normal (arg);
1765 emit_move_insn (gen_lowpart (e.vector_mode (), e.target), src);
1766 return e.target;
1770 class vlmul_trunc : public function_base
1772 public:
1773 bool apply_vl_p () const override { return false; }
1775 rtx expand (function_expander &e) const override
1777 rtx src = expand_normal (CALL_EXPR_ARG (e.exp, 0));
1778 emit_move_insn (e.target, gen_lowpart (GET_MODE (e.target), src));
1779 return e.target;
1783 class vset : public function_base
1785 public:
1786 bool apply_vl_p () const override { return false; }
1788 gimple *fold (gimple_folder &f) const override
1790 tree rhs_tuple = gimple_call_arg (f.call, 0);
1791 /* LMUL > 1 non-tuple vector types are not structure,
1792 we can't use __val[index] to set the subpart. */
1793 if (!riscv_v_ext_tuple_mode_p (TYPE_MODE (TREE_TYPE (rhs_tuple))))
1794 return NULL;
1795 tree index = gimple_call_arg (f.call, 1);
1796 tree rhs_vector = gimple_call_arg (f.call, 2);
1798 /* Replace the call with two statements: a copy of the full tuple
1799 to the call result, followed by an update of the individual vector.
1801 The fold routines expect the replacement statement to have the
1802 same lhs as the original call, so return the copy statement
1803 rather than the field update. */
1804 gassign *copy = gimple_build_assign (unshare_expr (f.lhs), rhs_tuple);
1806 /* Get a reference to the individual vector. */
1807 tree field = tuple_type_field (TREE_TYPE (f.lhs));
1808 tree lhs_array
1809 = build3 (COMPONENT_REF, TREE_TYPE (field), f.lhs, field, NULL_TREE);
1810 tree lhs_vector = build4 (ARRAY_REF, TREE_TYPE (rhs_vector), lhs_array,
1811 index, NULL_TREE, NULL_TREE);
1812 gassign *update = gimple_build_assign (lhs_vector, rhs_vector);
1813 gsi_insert_after (f.gsi, update, GSI_SAME_STMT);
1815 return copy;
1818 rtx expand (function_expander &e) const override
1820 if (!e.target)
1821 return NULL_RTX;
1822 rtx dest = expand_normal (CALL_EXPR_ARG (e.exp, 0));
1823 gcc_assert (riscv_v_ext_vector_mode_p (GET_MODE (dest)));
1824 rtx index = expand_normal (CALL_EXPR_ARG (e.exp, 1));
1825 rtx src = expand_normal (CALL_EXPR_ARG (e.exp, 2));
1826 poly_int64 offset = INTVAL (index) * GET_MODE_SIZE (GET_MODE (src));
1827 emit_move_insn (e.target, dest);
1828 rtx subreg = simplify_gen_subreg (GET_MODE (src), e.target,
1829 GET_MODE (e.target), offset);
1830 emit_move_insn (subreg, src);
1831 return e.target;
1835 class vget : public function_base
1837 public:
1838 bool apply_vl_p () const override { return false; }
1840 gimple *fold (gimple_folder &f) const override
1842 /* Fold into a normal gimple component access. */
1843 tree rhs_tuple = gimple_call_arg (f.call, 0);
1844 /* LMUL > 1 non-tuple vector types are not structure,
1845 we can't use __val[index] to get the subpart. */
1846 if (!riscv_v_ext_tuple_mode_p (TYPE_MODE (TREE_TYPE (rhs_tuple))))
1847 return NULL;
1848 tree index = gimple_call_arg (f.call, 1);
1849 tree field = tuple_type_field (TREE_TYPE (rhs_tuple));
1850 tree rhs_array
1851 = build3 (COMPONENT_REF, TREE_TYPE (field), rhs_tuple, field, NULL_TREE);
1852 tree rhs_vector = build4 (ARRAY_REF, TREE_TYPE (f.lhs), rhs_array, index,
1853 NULL_TREE, NULL_TREE);
1854 return gimple_build_assign (f.lhs, rhs_vector);
1857 rtx expand (function_expander &e) const override
1859 if (!e.target)
1860 return NULL_RTX;
1861 rtx src = expand_normal (CALL_EXPR_ARG (e.exp, 0));
1862 gcc_assert (riscv_v_ext_vector_mode_p (GET_MODE (src)));
1863 rtx index = expand_normal (CALL_EXPR_ARG (e.exp, 1));
1864 poly_int64 offset = INTVAL (index) * GET_MODE_SIZE (GET_MODE (e.target));
1865 rtx subreg
1866 = simplify_gen_subreg (GET_MODE (e.target), src, GET_MODE (src), offset);
1867 return subreg;
1871 class vcreate : public function_base
1873 public:
1874 gimple *fold (gimple_folder &f) const override
1876 unsigned int nargs = gimple_call_num_args (f.call);
1877 tree lhs_type = TREE_TYPE (f.lhs);
1878 /* LMUL > 1 non-tuple vector types are not structure,
1879 we can't use __val[index] to set the subpart. */
1880 if (!riscv_v_ext_tuple_mode_p (TYPE_MODE (lhs_type)))
1881 return NULL;
1883 /* Replace the call with a clobber of the result (to prevent it from
1884 becoming upwards exposed) followed by stores into each individual
1885 vector of tuple.
1887 The fold routines expect the replacement statement to have the
1888 same lhs as the original call, so return the clobber statement
1889 rather than the final vector store. */
1890 gassign *clobber = gimple_build_assign (f.lhs, build_clobber (lhs_type));
1892 for (unsigned int i = nargs; i-- > 0; )
1894 tree rhs_vector = gimple_call_arg (f.call, i);
1895 tree field = tuple_type_field (TREE_TYPE (f.lhs));
1896 tree lhs_array = build3 (COMPONENT_REF, TREE_TYPE (field),
1897 unshare_expr (f.lhs), field, NULL_TREE);
1898 tree lhs_vector = build4 (ARRAY_REF, TREE_TYPE (rhs_vector),
1899 lhs_array, size_int (i),
1900 NULL_TREE, NULL_TREE);
1901 gassign *assign = gimple_build_assign (lhs_vector, rhs_vector);
1902 gsi_insert_after (f.gsi, assign, GSI_SAME_STMT);
1904 return clobber;
1907 rtx expand (function_expander &e) const override
1909 if (!e.target)
1910 return NULL_RTX;
1911 gcc_assert (riscv_v_ext_vector_mode_p (GET_MODE (e.target)));
1912 unsigned int nargs = call_expr_nargs (e.exp);
1913 for (unsigned int i = 0; i < nargs; i++)
1915 rtx src = expand_normal (CALL_EXPR_ARG (e.exp, i));
1916 poly_int64 offset = i * GET_MODE_SIZE (GET_MODE (src));
1917 rtx subreg = simplify_gen_subreg (GET_MODE (src), e.target,
1918 GET_MODE (e.target), offset);
1919 emit_move_insn (subreg, src);
1922 return e.target;
1926 class read_vl : public function_base
1928 public:
1929 unsigned int call_properties (const function_instance &) const override
1931 return CP_READ_CSR;
1934 rtx expand (function_expander &e) const override
1936 if (Pmode == SImode)
1937 emit_insn (gen_read_vlsi (e.target));
1938 else
1939 emit_insn (gen_read_vldi_zero_extend (e.target));
1940 return e.target;
1944 class vleff : public function_base
1946 public:
1947 unsigned int call_properties (const function_instance &) const override
1949 return CP_READ_MEMORY | CP_WRITE_CSR;
1952 bool can_be_overloaded_p (enum predication_type_index pred) const override
1954 return pred != PRED_TYPE_none;
1957 gimple *fold (gimple_folder &f) const override
1959 return fold_fault_load (f);
1962 rtx expand (function_expander &e) const override
1964 return e.use_contiguous_load_insn (
1965 code_for_pred_fault_load (e.vector_mode ()));
1969 /* Implements vlenb. */
1970 class vlenb : public function_base
1972 public:
1973 bool apply_vl_p () const override { return false; }
1975 rtx expand (function_expander &e) const override
1977 machine_mode mode = GET_MODE (e.target);
1978 rtx vlenb = gen_int_mode (BYTES_PER_RISCV_VECTOR, mode);
1979 emit_move_insn (e.target, vlenb);
1980 return e.target;
1984 /* Implements vlseg.v. */
1985 class vlseg : public function_base
1987 public:
1988 unsigned int call_properties (const function_instance &) const override
1990 return CP_READ_MEMORY;
1993 bool can_be_overloaded_p (enum predication_type_index pred) const override
1995 return pred != PRED_TYPE_none;
1998 rtx expand (function_expander &e) const override
2000 return e.use_exact_insn (
2001 code_for_pred_unit_strided_load (e.vector_mode ()));
2005 /* Implements vsseg.v. */
2006 class vsseg : public function_base
2008 public:
2009 bool apply_tail_policy_p () const override { return false; }
2010 bool apply_mask_policy_p () const override { return false; }
2012 unsigned int call_properties (const function_instance &) const override
2014 return CP_WRITE_MEMORY;
2017 bool can_be_overloaded_p (enum predication_type_index) const override
2019 return true;
2022 rtx expand (function_expander &e) const override
2024 return e.use_exact_insn (
2025 code_for_pred_unit_strided_store (e.vector_mode ()));
2029 /* Implements vlsseg.v. */
2030 class vlsseg : public function_base
2032 public:
2033 unsigned int call_properties (const function_instance &) const override
2035 return CP_READ_MEMORY;
2038 bool can_be_overloaded_p (enum predication_type_index pred) const override
2040 return pred != PRED_TYPE_none;
2043 rtx expand (function_expander &e) const override
2045 return e.use_exact_insn (
2046 code_for_pred_strided_load (e.vector_mode ()));
2050 /* Implements vssseg.v. */
2051 class vssseg : public function_base
2053 public:
2054 bool apply_tail_policy_p () const override { return false; }
2055 bool apply_mask_policy_p () const override { return false; }
2057 unsigned int call_properties (const function_instance &) const override
2059 return CP_WRITE_MEMORY;
2062 bool can_be_overloaded_p (enum predication_type_index) const override
2064 return true;
2067 rtx expand (function_expander &e) const override
2069 return e.use_exact_insn (
2070 code_for_pred_strided_store (e.vector_mode ()));
2074 template<int UNSPEC>
2075 class seg_indexed_load : public function_base
2077 public:
2078 unsigned int call_properties (const function_instance &) const override
2080 return CP_READ_MEMORY;
2083 bool can_be_overloaded_p (enum predication_type_index) const override
2085 return true;
2088 rtx expand (function_expander &e) const override
2090 return e.use_exact_insn (
2091 code_for_pred_indexed_load (UNSPEC, e.vector_mode (), e.index_mode ()));
2095 template<int UNSPEC>
2096 class seg_indexed_store : public function_base
2098 public:
2099 bool apply_tail_policy_p () const override { return false; }
2100 bool apply_mask_policy_p () const override { return false; }
2102 unsigned int call_properties (const function_instance &) const override
2104 return CP_WRITE_MEMORY;
2107 bool can_be_overloaded_p (enum predication_type_index) const override
2109 return true;
2112 rtx expand (function_expander &e) const override
2114 return e.use_exact_insn (
2115 code_for_pred_indexed_store (UNSPEC, e.vector_mode (), e.index_mode ()));
2119 /* Implements vlsegff.v. */
2120 class vlsegff : public function_base
2122 public:
2123 unsigned int call_properties (const function_instance &) const override
2125 return CP_READ_MEMORY | CP_WRITE_CSR;
2128 bool can_be_overloaded_p (enum predication_type_index pred) const override
2130 return pred != PRED_TYPE_none;
2133 gimple *fold (gimple_folder &f) const override
2135 return fold_fault_load (f);
2138 rtx expand (function_expander &e) const override
2140 return e.use_exact_insn (code_for_pred_fault_load (e.vector_mode ()));
2144 /* Implements
2145 * th.vl(b/h/w)[u].v/th.vs(b/h/w)[u].v/th.vls(b/h/w)[u].v/th.vss(b/h/w)[u].v/
2146 * th.vlx(b/h/w)[u].v/th.vs[u]x(b/h/w).v
2147 * codegen. */
2148 template<bool STORE_P, lst_type LST_TYPE, int UNSPEC>
2149 class th_loadstore_width : public function_base
2151 public:
2152 bool apply_tail_policy_p () const override { return !STORE_P; }
2153 bool apply_mask_policy_p () const override { return !STORE_P; }
2155 unsigned int call_properties (const function_instance &) const override
2157 if (STORE_P)
2158 return CP_WRITE_MEMORY;
2159 else
2160 return CP_READ_MEMORY;
2163 bool can_be_overloaded_p (enum predication_type_index pred) const override
2165 if (STORE_P || LST_TYPE == LST_INDEXED)
2166 return true;
2167 return pred != PRED_TYPE_none;
2170 rtx expand (function_expander &e) const override
2172 gcc_assert (TARGET_XTHEADVECTOR);
2173 if (LST_TYPE == LST_INDEXED)
2175 if (STORE_P)
2176 return e.use_exact_insn (
2177 code_for_pred_indexed_store_width (UNSPEC, UNSPEC,
2178 e.vector_mode ()));
2179 else
2180 return e.use_exact_insn (
2181 code_for_pred_indexed_load_width (UNSPEC, e.vector_mode ()));
2183 else if (LST_TYPE == LST_STRIDED)
2185 if (STORE_P)
2186 return e.use_contiguous_store_insn (
2187 code_for_pred_strided_store_width (UNSPEC, e.vector_mode ()));
2188 else
2189 return e.use_contiguous_load_insn (
2190 code_for_pred_strided_load_width (UNSPEC, e.vector_mode ()));
2192 else
2194 if (STORE_P)
2195 return e.use_contiguous_store_insn (
2196 code_for_pred_store_width (UNSPEC, e.vector_mode ()));
2197 else
2198 return e.use_contiguous_load_insn (
2199 code_for_pred_mov_width (UNSPEC, e.vector_mode ()));
2204 /* Implements vext.x.v. */
2205 class th_extract : public function_base
2207 public:
2208 bool apply_vl_p () const override { return false; }
2209 bool apply_tail_policy_p () const override { return false; }
2210 bool apply_mask_policy_p () const override { return false; }
2211 bool use_mask_predication_p () const override { return false; }
2212 bool has_merge_operand_p () const override { return false; }
2214 rtx expand (function_expander &e) const override
2216 gcc_assert (TARGET_XTHEADVECTOR);
2217 return e.use_exact_insn (code_for_pred_th_extract (e.vector_mode ()));
2221 /* Below implements are vector crypto */
2222 /* Implements vandn.[vv,vx] */
2223 class vandn : public function_base
2225 public:
2226 rtx expand (function_expander &e) const override
2228 switch (e.op_info->op)
2230 case OP_TYPE_vv:
2231 return e.use_exact_insn (code_for_pred_vandn (e.vector_mode ()));
2232 case OP_TYPE_vx:
2233 return e.use_exact_insn (code_for_pred_vandn_scalar (e.vector_mode ()));
2234 default:
2235 gcc_unreachable ();
2240 /* Implements vrol/vror/clz/ctz. */
2241 template<rtx_code CODE>
2242 class bitmanip : public function_base
2244 public:
2245 bool apply_tail_policy_p () const override
2247 return (CODE == CLZ || CODE == CTZ) ? false : true;
2249 bool apply_mask_policy_p () const override
2251 return (CODE == CLZ || CODE == CTZ) ? false : true;
2253 bool has_merge_operand_p () const override
2255 return (CODE == CLZ || CODE == CTZ) ? false : true;
2258 rtx expand (function_expander &e) const override
2260 switch (e.op_info->op)
2262 case OP_TYPE_v:
2263 case OP_TYPE_vv:
2264 return e.use_exact_insn (code_for_pred_v (CODE, e.vector_mode ()));
2265 case OP_TYPE_vx:
2266 return e.use_exact_insn (code_for_pred_v_scalar (CODE, e.vector_mode ()));
2267 default:
2268 gcc_unreachable ();
2273 /* Implements vbrev/vbrev8/vrev8. */
2274 template<int UNSPEC>
2275 class b_reverse : public function_base
2277 public:
2278 rtx expand (function_expander &e) const override
2280 return e.use_exact_insn (code_for_pred_v (UNSPEC, e.vector_mode ()));
2284 class vwsll : public function_base
2286 public:
2287 rtx expand (function_expander &e) const override
2289 switch (e.op_info->op)
2291 case OP_TYPE_vv:
2292 return e.use_exact_insn (code_for_pred_vwsll (e.vector_mode ()));
2293 case OP_TYPE_vx:
2294 return e.use_exact_insn (code_for_pred_vwsll_scalar (e.vector_mode ()));
2295 default:
2296 gcc_unreachable ();
2301 /* Implements clmul */
2302 template<int UNSPEC>
2303 class clmul : public function_base
2305 public:
2306 rtx expand (function_expander &e) const override
2308 switch (e.op_info->op)
2310 case OP_TYPE_vv:
2311 return e.use_exact_insn (
2312 code_for_pred_vclmul (UNSPEC, e.vector_mode ()));
2313 case OP_TYPE_vx:
2314 return e.use_exact_insn
2315 (code_for_pred_vclmul_scalar (UNSPEC, e.vector_mode ()));
2316 default:
2317 gcc_unreachable ();
2322 /* Implements vghsh/vsh2ms/vsha2c[hl]. */
2323 template<int UNSPEC>
2324 class vg_nhab : public function_base
2326 public:
2327 bool apply_mask_policy_p () const override { return false; }
2328 bool use_mask_predication_p () const override { return false; }
2329 bool has_merge_operand_p () const override { return false; }
2331 rtx expand (function_expander &e) const override
2333 return e.use_exact_insn (code_for_pred_v (UNSPEC, e.vector_mode ()));
2337 /* Implements vgmul/vaes*. */
2338 template<int UNSPEC>
2339 class crypto_vv : public function_base
2341 public:
2342 bool apply_mask_policy_p () const override { return false; }
2343 bool use_mask_predication_p () const override { return false; }
2344 bool has_merge_operand_p () const override { return false; }
2346 rtx expand (function_expander &e) const override
2348 poly_uint64 nunits = 0U;
2349 switch (e.op_info->op)
2351 case OP_TYPE_vv:
2352 if (UNSPEC == UNSPEC_VGMUL)
2353 return e.use_exact_insn
2354 (code_for_pred_crypto_vv (UNSPEC, UNSPEC, e.vector_mode ()));
2355 else
2356 return e.use_exact_insn
2357 (code_for_pred_crypto_vv (UNSPEC + 1, UNSPEC + 1, e.vector_mode ()));
2358 case OP_TYPE_vs:
2359 /* Calculate the ratio between arg0 and arg1*/
2360 gcc_assert (multiple_p (GET_MODE_BITSIZE (e.arg_mode (0)),
2361 GET_MODE_BITSIZE (e.arg_mode (1)), &nunits));
2362 if (maybe_eq (nunits, 1U))
2363 return e.use_exact_insn (code_for_pred_crypto_vvx1_scalar
2364 (UNSPEC + 2, UNSPEC + 2, e.vector_mode ()));
2365 else if (maybe_eq (nunits, 2U))
2366 return e.use_exact_insn (code_for_pred_crypto_vvx2_scalar
2367 (UNSPEC + 2, UNSPEC + 2, e.vector_mode ()));
2368 else if (maybe_eq (nunits, 4U))
2369 return e.use_exact_insn (code_for_pred_crypto_vvx4_scalar
2370 (UNSPEC + 2, UNSPEC + 2, e.vector_mode ()));
2371 else if (maybe_eq (nunits, 8U))
2372 return e.use_exact_insn (code_for_pred_crypto_vvx8_scalar
2373 (UNSPEC + 2, UNSPEC + 2, e.vector_mode ()));
2374 else
2375 return e.use_exact_insn (code_for_pred_crypto_vvx16_scalar
2376 (UNSPEC + 2, UNSPEC + 2, e.vector_mode ()));
2377 default:
2378 gcc_unreachable ();
2383 /* Implements vaeskf1/vsm4k. */
2384 template<int UNSPEC>
2385 class crypto_vi : public function_base
2387 public:
2388 bool apply_mask_policy_p () const override { return false; }
2389 bool use_mask_predication_p () const override { return false; }
2391 rtx expand (function_expander &e) const override
2393 return e.use_exact_insn
2394 (code_for_pred_crypto_vi_scalar (UNSPEC, e.vector_mode ()));
2398 /* Implements vaeskf2/vsm3c. */
2399 template<int UNSPEC>
2400 class vaeskf2_vsm3c : public function_base
2402 public:
2403 bool apply_mask_policy_p () const override { return false; }
2404 bool use_mask_predication_p () const override { return false; }
2405 bool has_merge_operand_p () const override { return false; }
2407 rtx expand (function_expander &e) const override
2409 return e.use_exact_insn
2410 (code_for_pred_vi_nomaskedoff_scalar (UNSPEC, e.vector_mode ()));
2414 /* Implements vsm3me. */
2415 class vsm3me : public function_base
2417 public:
2418 bool apply_mask_policy_p () const override { return false; }
2419 bool use_mask_predication_p () const override { return false; }
2421 rtx expand (function_expander &e) const override
2423 return e.use_exact_insn (code_for_pred_vsm3me (e.vector_mode ()));
2427 static CONSTEXPR const vsetvl<false> vsetvl_obj;
2428 static CONSTEXPR const vsetvl<true> vsetvlmax_obj;
2429 static CONSTEXPR const loadstore<false, LST_UNIT_STRIDE, false> vle_obj;
2430 static CONSTEXPR const loadstore<true, LST_UNIT_STRIDE, false> vse_obj;
2431 static CONSTEXPR const loadstore<false, LST_UNIT_STRIDE, false> vlm_obj;
2432 static CONSTEXPR const loadstore<true, LST_UNIT_STRIDE, false> vsm_obj;
2433 static CONSTEXPR const loadstore<false, LST_STRIDED, false> vlse_obj;
2434 static CONSTEXPR const loadstore<true, LST_STRIDED, false> vsse_obj;
2435 static CONSTEXPR const loadstore<false, LST_INDEXED, false> vluxei8_obj;
2436 static CONSTEXPR const loadstore<false, LST_INDEXED, false> vluxei16_obj;
2437 static CONSTEXPR const loadstore<false, LST_INDEXED, false> vluxei32_obj;
2438 static CONSTEXPR const loadstore<false, LST_INDEXED, false> vluxei64_obj;
2439 static CONSTEXPR const loadstore<false, LST_INDEXED, true> vloxei8_obj;
2440 static CONSTEXPR const loadstore<false, LST_INDEXED, true> vloxei16_obj;
2441 static CONSTEXPR const loadstore<false, LST_INDEXED, true> vloxei32_obj;
2442 static CONSTEXPR const loadstore<false, LST_INDEXED, true> vloxei64_obj;
2443 static CONSTEXPR const loadstore<true, LST_INDEXED, false> vsuxei8_obj;
2444 static CONSTEXPR const loadstore<true, LST_INDEXED, false> vsuxei16_obj;
2445 static CONSTEXPR const loadstore<true, LST_INDEXED, false> vsuxei32_obj;
2446 static CONSTEXPR const loadstore<true, LST_INDEXED, false> vsuxei64_obj;
2447 static CONSTEXPR const loadstore<true, LST_INDEXED, true> vsoxei8_obj;
2448 static CONSTEXPR const loadstore<true, LST_INDEXED, true> vsoxei16_obj;
2449 static CONSTEXPR const loadstore<true, LST_INDEXED, true> vsoxei32_obj;
2450 static CONSTEXPR const loadstore<true, LST_INDEXED, true> vsoxei64_obj;
2451 static CONSTEXPR const binop<PLUS> vadd_obj;
2452 static CONSTEXPR const binop<MINUS> vsub_obj;
2453 static CONSTEXPR const vrsub vrsub_obj;
2454 static CONSTEXPR const binop<AND> vand_obj;
2455 static CONSTEXPR const binop<IOR> vor_obj;
2456 static CONSTEXPR const binop<XOR> vxor_obj;
2457 static CONSTEXPR const binop<ASHIFT> vsll_obj;
2458 static CONSTEXPR const binop<ASHIFTRT> vsra_obj;
2459 static CONSTEXPR const binop<LSHIFTRT> vsrl_obj;
2460 static CONSTEXPR const binop<SMIN> vmin_obj;
2461 static CONSTEXPR const binop<SMAX> vmax_obj;
2462 static CONSTEXPR const binop<UMIN> vminu_obj;
2463 static CONSTEXPR const binop<UMAX> vmaxu_obj;
2464 static CONSTEXPR const binop<MULT> vmul_obj;
2465 static CONSTEXPR const vmulh<UNSPEC_VMULHS> vmulh_obj;
2466 static CONSTEXPR const vmulh<UNSPEC_VMULHU> vmulhu_obj;
2467 static CONSTEXPR const vmulh<UNSPEC_VMULHSU> vmulhsu_obj;
2468 static CONSTEXPR const binop<DIV> vdiv_obj;
2469 static CONSTEXPR const binop<MOD> vrem_obj;
2470 static CONSTEXPR const binop<UDIV> vdivu_obj;
2471 static CONSTEXPR const binop<UMOD> vremu_obj;
2472 static CONSTEXPR const unop<NEG> vneg_obj;
2473 static CONSTEXPR const unop<NOT> vnot_obj;
2474 static CONSTEXPR const ext<SIGN_EXTEND> vsext_obj;
2475 static CONSTEXPR const ext<ZERO_EXTEND> vzext_obj;
2476 static CONSTEXPR const widen_binop<PLUS, SIGN_EXTEND>vwadd_obj;
2477 static CONSTEXPR const widen_binop<MINUS, SIGN_EXTEND>vwsub_obj;
2478 static CONSTEXPR const widen_binop<MULT, SIGN_EXTEND>vwmul_obj;
2479 static CONSTEXPR const widen_binop<PLUS, ZERO_EXTEND>vwaddu_obj;
2480 static CONSTEXPR const widen_binop<MINUS, ZERO_EXTEND>vwsubu_obj;
2481 static CONSTEXPR const widen_binop<MULT, ZERO_EXTEND>vwmulu_obj;
2482 static CONSTEXPR const vwmulsu vwmulsu_obj;
2483 static CONSTEXPR const vwcvt<SIGN_EXTEND> vwcvt_x_obj;
2484 static CONSTEXPR const vwcvt<ZERO_EXTEND> vwcvtu_x_obj;
2485 static CONSTEXPR const vadc vadc_obj;
2486 static CONSTEXPR const vsbc vsbc_obj;
2487 static CONSTEXPR const vmadc vmadc_obj;
2488 static CONSTEXPR const vmsbc vmsbc_obj;
2489 static CONSTEXPR const vnshift<LSHIFTRT> vnsrl_obj;
2490 static CONSTEXPR const vnshift<ASHIFTRT> vnsra_obj;
2491 static CONSTEXPR const vncvt_x vncvt_x_obj;
2492 static CONSTEXPR const vmerge vmerge_obj;
2493 static CONSTEXPR const vmv_v vmv_v_obj;
2494 static CONSTEXPR const icmp<EQ> vmseq_obj;
2495 static CONSTEXPR const icmp<NE> vmsne_obj;
2496 static CONSTEXPR const icmp<LT> vmslt_obj;
2497 static CONSTEXPR const icmp<GT> vmsgt_obj;
2498 static CONSTEXPR const icmp<LE> vmsle_obj;
2499 static CONSTEXPR const icmp<GE> vmsge_obj;
2500 static CONSTEXPR const icmp<LTU> vmsltu_obj;
2501 static CONSTEXPR const icmp<GTU> vmsgtu_obj;
2502 static CONSTEXPR const icmp<LEU> vmsleu_obj;
2503 static CONSTEXPR const icmp<GEU> vmsgeu_obj;
2504 static CONSTEXPR const vmacc vmacc_obj;
2505 static CONSTEXPR const vnmsac vnmsac_obj;
2506 static CONSTEXPR const vmadd vmadd_obj;
2507 static CONSTEXPR const vnmsub vnmsub_obj;
2508 static CONSTEXPR const vwmacc vwmacc_obj;
2509 static CONSTEXPR const vwmaccu vwmaccu_obj;
2510 static CONSTEXPR const vwmaccsu vwmaccsu_obj;
2511 static CONSTEXPR const vwmaccus vwmaccus_obj;
2512 static CONSTEXPR const binop<SS_PLUS> vsadd_obj;
2513 static CONSTEXPR const binop<SS_MINUS> vssub_obj;
2514 static CONSTEXPR const binop<US_PLUS> vsaddu_obj;
2515 static CONSTEXPR const binop<US_MINUS> vssubu_obj;
2516 static CONSTEXPR const sat_op<UNSPEC_VAADDU> vaaddu_obj;
2517 static CONSTEXPR const sat_op<UNSPEC_VAADD> vaadd_obj;
2518 static CONSTEXPR const sat_op<UNSPEC_VASUBU> vasubu_obj;
2519 static CONSTEXPR const sat_op<UNSPEC_VASUB> vasub_obj;
2520 static CONSTEXPR const sat_op<UNSPEC_VSMUL> vsmul_obj;
2521 static CONSTEXPR const sat_op<UNSPEC_VSSRL> vssrl_obj;
2522 static CONSTEXPR const sat_op<UNSPEC_VSSRA> vssra_obj;
2523 static CONSTEXPR const vnclip<UNSPEC_VNCLIP> vnclip_obj;
2524 static CONSTEXPR const vnclip<UNSPEC_VNCLIPU> vnclipu_obj;
2525 static CONSTEXPR const mask_logic<AND> vmand_obj;
2526 static CONSTEXPR const mask_nlogic<AND> vmnand_obj;
2527 static CONSTEXPR const mask_notlogic<AND> vmandn_obj;
2528 static CONSTEXPR const mask_logic<XOR> vmxor_obj;
2529 static CONSTEXPR const mask_logic<IOR> vmor_obj;
2530 static CONSTEXPR const mask_nlogic<IOR> vmnor_obj;
2531 static CONSTEXPR const mask_notlogic<IOR> vmorn_obj;
2532 static CONSTEXPR const mask_nlogic<XOR> vmxnor_obj;
2533 static CONSTEXPR const vmmv vmmv_obj;
2534 static CONSTEXPR const vmclr vmclr_obj;
2535 static CONSTEXPR const vmset vmset_obj;
2536 static CONSTEXPR const vmnot vmnot_obj;
2537 static CONSTEXPR const vcpop vcpop_obj;
2538 static CONSTEXPR const vfirst vfirst_obj;
2539 static CONSTEXPR const mask_misc<UNSPEC_VMSBF> vmsbf_obj;
2540 static CONSTEXPR const mask_misc<UNSPEC_VMSIF> vmsif_obj;
2541 static CONSTEXPR const mask_misc<UNSPEC_VMSOF> vmsof_obj;
2542 static CONSTEXPR const viota viota_obj;
2543 static CONSTEXPR const vid vid_obj;
2544 static CONSTEXPR const binop<PLUS, true> vfadd_obj;
2545 static CONSTEXPR const binop<MINUS, true> vfsub_obj;
2546 static CONSTEXPR const binop<PLUS, true, HAS_FRM> vfadd_frm_obj;
2547 static CONSTEXPR const binop<MINUS, true, HAS_FRM> vfsub_frm_obj;
2548 static CONSTEXPR const reverse_binop<MINUS> vfrsub_obj;
2549 static CONSTEXPR const reverse_binop<MINUS, HAS_FRM> vfrsub_frm_obj;
2550 static CONSTEXPR const widen_binop_fp<PLUS> vfwadd_obj;
2551 static CONSTEXPR const widen_binop_fp<PLUS, HAS_FRM> vfwadd_frm_obj;
2552 static CONSTEXPR const widen_binop_fp<MINUS> vfwsub_obj;
2553 static CONSTEXPR const widen_binop_fp<MINUS, HAS_FRM> vfwsub_frm_obj;
2554 static CONSTEXPR const binop<MULT, true> vfmul_obj;
2555 static CONSTEXPR const binop<MULT, true, HAS_FRM> vfmul_frm_obj;
2556 static CONSTEXPR const binop<DIV, true> vfdiv_obj;
2557 static CONSTEXPR const binop<DIV, true, HAS_FRM> vfdiv_frm_obj;
2558 static CONSTEXPR const reverse_binop<DIV> vfrdiv_obj;
2559 static CONSTEXPR const reverse_binop<DIV, HAS_FRM> vfrdiv_frm_obj;
2560 static CONSTEXPR const widen_binop_fp<MULT> vfwmul_obj;
2561 static CONSTEXPR const widen_binop_fp<MULT, HAS_FRM> vfwmul_frm_obj;
2562 static CONSTEXPR const vfmacc<NO_FRM> vfmacc_obj;
2563 static CONSTEXPR const vfmacc<HAS_FRM> vfmacc_frm_obj;
2564 static CONSTEXPR const vfnmsac<NO_FRM> vfnmsac_obj;
2565 static CONSTEXPR const vfnmsac<HAS_FRM> vfnmsac_frm_obj;
2566 static CONSTEXPR const vfmadd<NO_FRM> vfmadd_obj;
2567 static CONSTEXPR const vfmadd<HAS_FRM> vfmadd_frm_obj;
2568 static CONSTEXPR const vfnmsub<NO_FRM> vfnmsub_obj;
2569 static CONSTEXPR const vfnmsub<HAS_FRM> vfnmsub_frm_obj;
2570 static CONSTEXPR const vfnmacc<NO_FRM> vfnmacc_obj;
2571 static CONSTEXPR const vfnmacc<HAS_FRM> vfnmacc_frm_obj;
2572 static CONSTEXPR const vfmsac<NO_FRM> vfmsac_obj;
2573 static CONSTEXPR const vfmsac<HAS_FRM> vfmsac_frm_obj;
2574 static CONSTEXPR const vfnmadd<NO_FRM> vfnmadd_obj;
2575 static CONSTEXPR const vfnmadd<HAS_FRM> vfnmadd_frm_obj;
2576 static CONSTEXPR const vfmsub<NO_FRM> vfmsub_obj;
2577 static CONSTEXPR const vfmsub<HAS_FRM> vfmsub_frm_obj;
2578 static CONSTEXPR const vfwmacc<NO_FRM> vfwmacc_obj;
2579 static CONSTEXPR const vfwmacc<HAS_FRM> vfwmacc_frm_obj;
2580 static CONSTEXPR const vfwnmacc<NO_FRM> vfwnmacc_obj;
2581 static CONSTEXPR const vfwnmacc<HAS_FRM> vfwnmacc_frm_obj;
2582 static CONSTEXPR const vfwmsac<NO_FRM> vfwmsac_obj;
2583 static CONSTEXPR const vfwmsac<HAS_FRM> vfwmsac_frm_obj;
2584 static CONSTEXPR const vfwnmsac<NO_FRM> vfwnmsac_obj;
2585 static CONSTEXPR const vfwnmsac<HAS_FRM> vfwnmsac_frm_obj;
2586 static CONSTEXPR const unop<SQRT> vfsqrt_obj;
2587 static CONSTEXPR const unop<SQRT, HAS_FRM> vfsqrt_frm_obj;
2588 static CONSTEXPR const float_misc<UNSPEC_VFRSQRT7> vfrsqrt7_obj;
2589 static CONSTEXPR const float_misc<UNSPEC_VFREC7> vfrec7_obj;
2590 static CONSTEXPR const float_misc<UNSPEC_VFREC7, HAS_FRM> vfrec7_frm_obj;
2591 static CONSTEXPR const binop<SMIN> vfmin_obj;
2592 static CONSTEXPR const binop<SMAX> vfmax_obj;
2593 static CONSTEXPR const float_misc<UNSPEC_VCOPYSIGN> vfsgnj_obj;
2594 static CONSTEXPR const vfsgnjn vfsgnjn_obj;
2595 static CONSTEXPR const float_misc<UNSPEC_VXORSIGN> vfsgnjx_obj;
2596 static CONSTEXPR const unop<NEG> vfneg_obj;
2597 static CONSTEXPR const unop<ABS> vfabs_obj;
2598 static CONSTEXPR const fcmp<EQ> vmfeq_obj;
2599 static CONSTEXPR const fcmp<NE> vmfne_obj;
2600 static CONSTEXPR const fcmp<LT> vmflt_obj;
2601 static CONSTEXPR const fcmp<GT> vmfgt_obj;
2602 static CONSTEXPR const fcmp<LE> vmfle_obj;
2603 static CONSTEXPR const fcmp<GE> vmfge_obj;
2604 static CONSTEXPR const vfclass vfclass_obj;
2605 static CONSTEXPR const vmerge vfmerge_obj;
2606 static CONSTEXPR const vmv_v vfmv_v_obj;
2607 static CONSTEXPR const vfcvt_x<UNSPEC_VFCVT> vfcvt_x_obj;
2608 static CONSTEXPR const vfcvt_x<UNSPEC_VFCVT, HAS_FRM> vfcvt_x_frm_obj;
2609 static CONSTEXPR const vfcvt_x<UNSPEC_UNSIGNED_VFCVT> vfcvt_xu_obj;
2610 static CONSTEXPR const vfcvt_x<UNSPEC_UNSIGNED_VFCVT, HAS_FRM> vfcvt_xu_frm_obj;
2611 static CONSTEXPR const vfcvt_rtz_x<FIX> vfcvt_rtz_x_obj;
2612 static CONSTEXPR const vfcvt_rtz_x<UNSIGNED_FIX> vfcvt_rtz_xu_obj;
2613 static CONSTEXPR const vfcvt_f<NO_FRM> vfcvt_f_obj;
2614 static CONSTEXPR const vfcvt_f<HAS_FRM> vfcvt_f_frm_obj;
2615 static CONSTEXPR const vfwcvt_x<UNSPEC_VFCVT> vfwcvt_x_obj;
2616 static CONSTEXPR const vfwcvt_x<UNSPEC_VFCVT, HAS_FRM> vfwcvt_x_frm_obj;
2617 static CONSTEXPR const vfwcvt_x<UNSPEC_UNSIGNED_VFCVT> vfwcvt_xu_obj;
2618 static CONSTEXPR const vfwcvt_x<UNSPEC_UNSIGNED_VFCVT, HAS_FRM> vfwcvt_xu_frm_obj;
2619 static CONSTEXPR const vfwcvt_rtz_x<FIX> vfwcvt_rtz_x_obj;
2620 static CONSTEXPR const vfwcvt_rtz_x<UNSIGNED_FIX> vfwcvt_rtz_xu_obj;
2621 static CONSTEXPR const vfwcvt_f vfwcvt_f_obj;
2622 static CONSTEXPR const vfncvt_x<UNSPEC_VFCVT> vfncvt_x_obj;
2623 static CONSTEXPR const vfncvt_x<UNSPEC_VFCVT, HAS_FRM> vfncvt_x_frm_obj;
2624 static CONSTEXPR const vfncvt_x<UNSPEC_UNSIGNED_VFCVT> vfncvt_xu_obj;
2625 static CONSTEXPR const vfncvt_x<UNSPEC_UNSIGNED_VFCVT, HAS_FRM> vfncvt_xu_frm_obj;
2626 static CONSTEXPR const vfncvt_rtz_x<FIX> vfncvt_rtz_x_obj;
2627 static CONSTEXPR const vfncvt_rtz_x<UNSIGNED_FIX> vfncvt_rtz_xu_obj;
2628 static CONSTEXPR const vfncvt_f<NO_FRM> vfncvt_f_obj;
2629 static CONSTEXPR const vfncvt_f<HAS_FRM> vfncvt_f_frm_obj;
2630 static CONSTEXPR const vfncvt_rod_f vfncvt_rod_f_obj;
2631 static CONSTEXPR const reducop<UNSPEC_REDUC_SUM> vredsum_obj;
2632 static CONSTEXPR const reducop<UNSPEC_REDUC_MAXU> vredmaxu_obj;
2633 static CONSTEXPR const reducop<UNSPEC_REDUC_MAX> vredmax_obj;
2634 static CONSTEXPR const reducop<UNSPEC_REDUC_MINU> vredminu_obj;
2635 static CONSTEXPR const reducop<UNSPEC_REDUC_MIN> vredmin_obj;
2636 static CONSTEXPR const reducop<UNSPEC_REDUC_AND> vredand_obj;
2637 static CONSTEXPR const reducop<UNSPEC_REDUC_OR> vredor_obj;
2638 static CONSTEXPR const reducop<UNSPEC_REDUC_XOR> vredxor_obj;
2639 static CONSTEXPR const reducop<UNSPEC_WREDUC_SUM> vwredsum_obj;
2640 static CONSTEXPR const reducop<UNSPEC_WREDUC_SUMU> vwredsumu_obj;
2641 static CONSTEXPR const freducop<UNSPEC_REDUC_SUM_UNORDERED> vfredusum_obj;
2642 static CONSTEXPR const freducop<UNSPEC_REDUC_SUM_UNORDERED, HAS_FRM> vfredusum_frm_obj;
2643 static CONSTEXPR const freducop<UNSPEC_REDUC_SUM_ORDERED> vfredosum_obj;
2644 static CONSTEXPR const freducop<UNSPEC_REDUC_SUM_ORDERED, HAS_FRM> vfredosum_frm_obj;
2645 static CONSTEXPR const reducop<UNSPEC_REDUC_MAX> vfredmax_obj;
2646 static CONSTEXPR const reducop<UNSPEC_REDUC_MIN> vfredmin_obj;
2647 static CONSTEXPR const freducop<UNSPEC_WREDUC_SUM_UNORDERED> vfwredusum_obj;
2648 static CONSTEXPR const freducop<UNSPEC_WREDUC_SUM_UNORDERED, HAS_FRM> vfwredusum_frm_obj;
2649 static CONSTEXPR const freducop<UNSPEC_WREDUC_SUM_ORDERED> vfwredosum_obj;
2650 static CONSTEXPR const freducop<UNSPEC_WREDUC_SUM_ORDERED, HAS_FRM> vfwredosum_frm_obj;
2651 static CONSTEXPR const vmv vmv_x_obj;
2652 static CONSTEXPR const vmv_s vmv_s_obj;
2653 static CONSTEXPR const vmv vfmv_f_obj;
2654 static CONSTEXPR const vmv_s vfmv_s_obj;
2655 static CONSTEXPR const slideop<UNSPEC_VSLIDEUP> vslideup_obj;
2656 static CONSTEXPR const slideop<UNSPEC_VSLIDEDOWN> vslidedown_obj;
2657 static CONSTEXPR const slideop<UNSPEC_VSLIDE1UP> vslide1up_obj;
2658 static CONSTEXPR const slideop<UNSPEC_VSLIDE1DOWN> vslide1down_obj;
2659 static CONSTEXPR const slideop<UNSPEC_VFSLIDE1UP> vfslide1up_obj;
2660 static CONSTEXPR const slideop<UNSPEC_VFSLIDE1DOWN> vfslide1down_obj;
2661 static CONSTEXPR const vrgather vrgather_obj;
2662 static CONSTEXPR const vrgatherei16 vrgatherei16_obj;
2663 static CONSTEXPR const vcompress vcompress_obj;
2664 static CONSTEXPR const vundefined vundefined_obj;
2665 static CONSTEXPR const vreinterpret vreinterpret_obj;
2666 static CONSTEXPR const vlmul_ext vlmul_ext_obj;
2667 static CONSTEXPR const vlmul_trunc vlmul_trunc_obj;
2668 static CONSTEXPR const vset vset_obj;
2669 static CONSTEXPR const vget vget_obj;
2670 static CONSTEXPR const vcreate vcreate_obj;
2671 static CONSTEXPR const read_vl read_vl_obj;
2672 static CONSTEXPR const vleff vleff_obj;
2673 static CONSTEXPR const vlenb vlenb_obj;
2674 static CONSTEXPR const vlseg vlseg_obj;
2675 static CONSTEXPR const vsseg vsseg_obj;
2676 static CONSTEXPR const vlsseg vlsseg_obj;
2677 static CONSTEXPR const vssseg vssseg_obj;
2678 static CONSTEXPR const seg_indexed_load<UNSPEC_UNORDERED> vluxseg_obj;
2679 static CONSTEXPR const seg_indexed_load<UNSPEC_ORDERED> vloxseg_obj;
2680 static CONSTEXPR const seg_indexed_store<UNSPEC_UNORDERED> vsuxseg_obj;
2681 static CONSTEXPR const seg_indexed_store<UNSPEC_ORDERED> vsoxseg_obj;
2682 static CONSTEXPR const vlsegff vlsegff_obj;
2683 static CONSTEXPR const th_loadstore_width<false, LST_UNIT_STRIDE, UNSPEC_TH_VLB> vlb_obj;
2684 static CONSTEXPR const th_loadstore_width<false, LST_UNIT_STRIDE, UNSPEC_TH_VLBU> vlbu_obj;
2685 static CONSTEXPR const th_loadstore_width<false, LST_UNIT_STRIDE, UNSPEC_TH_VLH> vlh_obj;
2686 static CONSTEXPR const th_loadstore_width<false, LST_UNIT_STRIDE, UNSPEC_TH_VLHU> vlhu_obj;
2687 static CONSTEXPR const th_loadstore_width<false, LST_UNIT_STRIDE, UNSPEC_TH_VLW> vlw_obj;
2688 static CONSTEXPR const th_loadstore_width<false, LST_UNIT_STRIDE, UNSPEC_TH_VLWU> vlwu_obj;
2689 static CONSTEXPR const th_loadstore_width<true, LST_UNIT_STRIDE, UNSPEC_TH_VLB> vsb_obj;
2690 static CONSTEXPR const th_loadstore_width<true, LST_UNIT_STRIDE, UNSPEC_TH_VLH> vsh_obj;
2691 static CONSTEXPR const th_loadstore_width<true, LST_UNIT_STRIDE, UNSPEC_TH_VLW> vsw_obj;
2692 static CONSTEXPR const th_loadstore_width<false, LST_STRIDED, UNSPEC_TH_VLSB> vlsb_obj;
2693 static CONSTEXPR const th_loadstore_width<false, LST_STRIDED, UNSPEC_TH_VLSBU> vlsbu_obj;
2694 static CONSTEXPR const th_loadstore_width<false, LST_STRIDED, UNSPEC_TH_VLSH> vlsh_obj;
2695 static CONSTEXPR const th_loadstore_width<false, LST_STRIDED, UNSPEC_TH_VLSHU> vlshu_obj;
2696 static CONSTEXPR const th_loadstore_width<false, LST_STRIDED, UNSPEC_TH_VLSW> vlsw_obj;
2697 static CONSTEXPR const th_loadstore_width<false, LST_STRIDED, UNSPEC_TH_VLSWU> vlswu_obj;
2698 static CONSTEXPR const th_loadstore_width<true, LST_STRIDED, UNSPEC_TH_VLSB> vssb_obj;
2699 static CONSTEXPR const th_loadstore_width<true, LST_STRIDED, UNSPEC_TH_VLSH> vssh_obj;
2700 static CONSTEXPR const th_loadstore_width<true, LST_STRIDED, UNSPEC_TH_VLSW> vssw_obj;
2701 static CONSTEXPR const th_loadstore_width<false, LST_INDEXED, UNSPEC_TH_VLXB> vlxb_obj;
2702 static CONSTEXPR const th_loadstore_width<false, LST_INDEXED, UNSPEC_TH_VLXBU> vlxbu_obj;
2703 static CONSTEXPR const th_loadstore_width<false, LST_INDEXED, UNSPEC_TH_VLXH> vlxh_obj;
2704 static CONSTEXPR const th_loadstore_width<false, LST_INDEXED, UNSPEC_TH_VLXHU> vlxhu_obj;
2705 static CONSTEXPR const th_loadstore_width<false, LST_INDEXED, UNSPEC_TH_VLXW> vlxw_obj;
2706 static CONSTEXPR const th_loadstore_width<false, LST_INDEXED, UNSPEC_TH_VLXWU> vlxwu_obj;
2707 static CONSTEXPR const th_loadstore_width<true, LST_INDEXED, UNSPEC_TH_VLXB> vsxb_obj;
2708 static CONSTEXPR const th_loadstore_width<true, LST_INDEXED, UNSPEC_TH_VLXH> vsxh_obj;
2709 static CONSTEXPR const th_loadstore_width<true, LST_INDEXED, UNSPEC_TH_VLXW> vsxw_obj;
2710 static CONSTEXPR const th_loadstore_width<true, LST_INDEXED, UNSPEC_TH_VSUXB> vsuxb_obj;
2711 static CONSTEXPR const th_loadstore_width<true, LST_INDEXED, UNSPEC_TH_VSUXH> vsuxh_obj;
2712 static CONSTEXPR const th_loadstore_width<true, LST_INDEXED, UNSPEC_TH_VSUXW> vsuxw_obj;
2713 static CONSTEXPR const th_extract vext_x_v_obj;
2715 /* Crypto Vector */
2716 static CONSTEXPR const vandn vandn_obj;
2717 static CONSTEXPR const bitmanip<ROTATE> vrol_obj;
2718 static CONSTEXPR const bitmanip<ROTATERT> vror_obj;
2719 static CONSTEXPR const b_reverse<UNSPEC_VBREV> vbrev_obj;
2720 static CONSTEXPR const b_reverse<UNSPEC_VBREV8> vbrev8_obj;
2721 static CONSTEXPR const b_reverse<UNSPEC_VREV8> vrev8_obj;
2722 static CONSTEXPR const bitmanip<CLZ> vclz_obj;
2723 static CONSTEXPR const bitmanip<CTZ> vctz_obj;
2724 static CONSTEXPR const vwsll vwsll_obj;
2725 static CONSTEXPR const clmul<UNSPEC_VCLMUL> vclmul_obj;
2726 static CONSTEXPR const clmul<UNSPEC_VCLMULH> vclmulh_obj;
2727 static CONSTEXPR const vg_nhab<UNSPEC_VGHSH> vghsh_obj;
2728 static CONSTEXPR const crypto_vv<UNSPEC_VGMUL> vgmul_obj;
2729 static CONSTEXPR const crypto_vv<UNSPEC_VAESEF> vaesef_obj;
2730 static CONSTEXPR const crypto_vv<UNSPEC_VAESEM> vaesem_obj;
2731 static CONSTEXPR const crypto_vv<UNSPEC_VAESDF> vaesdf_obj;
2732 static CONSTEXPR const crypto_vv<UNSPEC_VAESDM> vaesdm_obj;
2733 static CONSTEXPR const crypto_vv<UNSPEC_VAESZ> vaesz_obj;
2734 static CONSTEXPR const crypto_vi<UNSPEC_VAESKF1> vaeskf1_obj;
2735 static CONSTEXPR const vaeskf2_vsm3c<UNSPEC_VAESKF2> vaeskf2_obj;
2736 static CONSTEXPR const vg_nhab<UNSPEC_VSHA2MS> vsha2ms_obj;
2737 static CONSTEXPR const vg_nhab<UNSPEC_VSHA2CH> vsha2ch_obj;
2738 static CONSTEXPR const vg_nhab<UNSPEC_VSHA2CL> vsha2cl_obj;
2739 static CONSTEXPR const crypto_vi<UNSPEC_VSM4K> vsm4k_obj;
2740 static CONSTEXPR const crypto_vv<UNSPEC_VSM4R> vsm4r_obj;
2741 static CONSTEXPR const vsm3me vsm3me_obj;
2742 static CONSTEXPR const vaeskf2_vsm3c<UNSPEC_VSM3C> vsm3c_obj;
2744 /* Declare the function base NAME, pointing it to an instance
2745 of class <NAME>_obj. */
2746 #define BASE(NAME) \
2747 namespace bases { const function_base *const NAME = &NAME##_obj; }
2749 BASE (vsetvl)
2750 BASE (vsetvlmax)
2751 BASE (vle)
2752 BASE (vse)
2753 BASE (vlm)
2754 BASE (vsm)
2755 BASE (vlse)
2756 BASE (vsse)
2757 BASE (vluxei8)
2758 BASE (vluxei16)
2759 BASE (vluxei32)
2760 BASE (vluxei64)
2761 BASE (vloxei8)
2762 BASE (vloxei16)
2763 BASE (vloxei32)
2764 BASE (vloxei64)
2765 BASE (vsuxei8)
2766 BASE (vsuxei16)
2767 BASE (vsuxei32)
2768 BASE (vsuxei64)
2769 BASE (vsoxei8)
2770 BASE (vsoxei16)
2771 BASE (vsoxei32)
2772 BASE (vsoxei64)
2773 BASE (vadd)
2774 BASE (vsub)
2775 BASE (vrsub)
2776 BASE (vand)
2777 BASE (vor)
2778 BASE (vxor)
2779 BASE (vsll)
2780 BASE (vsra)
2781 BASE (vsrl)
2782 BASE (vmin)
2783 BASE (vmax)
2784 BASE (vminu)
2785 BASE (vmaxu)
2786 BASE (vmul)
2787 BASE (vmulh)
2788 BASE (vmulhu)
2789 BASE (vmulhsu)
2790 BASE (vdiv)
2791 BASE (vrem)
2792 BASE (vdivu)
2793 BASE (vremu)
2794 BASE (vneg)
2795 BASE (vnot)
2796 BASE (vsext)
2797 BASE (vzext)
2798 BASE (vwadd)
2799 BASE (vwsub)
2800 BASE (vwmul)
2801 BASE (vwaddu)
2802 BASE (vwsubu)
2803 BASE (vwmulu)
2804 BASE (vwmulsu)
2805 BASE (vwcvt_x)
2806 BASE (vwcvtu_x)
2807 BASE (vadc)
2808 BASE (vsbc)
2809 BASE (vmadc)
2810 BASE (vmsbc)
2811 BASE (vnsrl)
2812 BASE (vnsra)
2813 BASE (vncvt_x)
2814 BASE (vmerge)
2815 BASE (vmv_v)
2816 BASE (vmseq)
2817 BASE (vmsne)
2818 BASE (vmslt)
2819 BASE (vmsgt)
2820 BASE (vmsle)
2821 BASE (vmsge)
2822 BASE (vmsltu)
2823 BASE (vmsgtu)
2824 BASE (vmsleu)
2825 BASE (vmsgeu)
2826 BASE (vmacc)
2827 BASE (vnmsac)
2828 BASE (vmadd)
2829 BASE (vnmsub)
2830 BASE (vwmacc)
2831 BASE (vwmaccu)
2832 BASE (vwmaccsu)
2833 BASE (vwmaccus)
2834 BASE (vsadd)
2835 BASE (vssub)
2836 BASE (vsaddu)
2837 BASE (vssubu)
2838 BASE (vaadd)
2839 BASE (vasub)
2840 BASE (vaaddu)
2841 BASE (vasubu)
2842 BASE (vsmul)
2843 BASE (vssra)
2844 BASE (vssrl)
2845 BASE (vnclip)
2846 BASE (vnclipu)
2847 BASE (vmand)
2848 BASE (vmnand)
2849 BASE (vmandn)
2850 BASE (vmxor)
2851 BASE (vmor)
2852 BASE (vmnor)
2853 BASE (vmorn)
2854 BASE (vmxnor)
2855 BASE (vmmv)
2856 BASE (vmclr)
2857 BASE (vmset)
2858 BASE (vmnot)
2859 BASE (vcpop)
2860 BASE (vfirst)
2861 BASE (vmsbf)
2862 BASE (vmsif)
2863 BASE (vmsof)
2864 BASE (viota)
2865 BASE (vid)
2866 BASE (vfadd)
2867 BASE (vfadd_frm)
2868 BASE (vfsub)
2869 BASE (vfsub_frm)
2870 BASE (vfrsub)
2871 BASE (vfrsub_frm)
2872 BASE (vfwadd)
2873 BASE (vfwadd_frm)
2874 BASE (vfwsub)
2875 BASE (vfwsub_frm)
2876 BASE (vfmul)
2877 BASE (vfmul_frm)
2878 BASE (vfdiv)
2879 BASE (vfdiv_frm)
2880 BASE (vfrdiv)
2881 BASE (vfrdiv_frm)
2882 BASE (vfwmul)
2883 BASE (vfwmul_frm)
2884 BASE (vfmacc)
2885 BASE (vfmacc_frm)
2886 BASE (vfnmsac)
2887 BASE (vfnmsac_frm)
2888 BASE (vfmadd)
2889 BASE (vfmadd_frm)
2890 BASE (vfnmsub)
2891 BASE (vfnmsub_frm)
2892 BASE (vfnmacc)
2893 BASE (vfnmacc_frm)
2894 BASE (vfmsac)
2895 BASE (vfmsac_frm)
2896 BASE (vfnmadd)
2897 BASE (vfnmadd_frm)
2898 BASE (vfmsub)
2899 BASE (vfmsub_frm)
2900 BASE (vfwmacc)
2901 BASE (vfwmacc_frm)
2902 BASE (vfwnmacc)
2903 BASE (vfwnmacc_frm)
2904 BASE (vfwmsac)
2905 BASE (vfwmsac_frm)
2906 BASE (vfwnmsac)
2907 BASE (vfwnmsac_frm)
2908 BASE (vfsqrt)
2909 BASE (vfsqrt_frm)
2910 BASE (vfrsqrt7)
2911 BASE (vfrec7)
2912 BASE (vfrec7_frm)
2913 BASE (vfmin)
2914 BASE (vfmax)
2915 BASE (vfsgnj)
2916 BASE (vfsgnjn)
2917 BASE (vfsgnjx)
2918 BASE (vfneg)
2919 BASE (vfabs)
2920 BASE (vmfeq)
2921 BASE (vmfne)
2922 BASE (vmflt)
2923 BASE (vmfgt)
2924 BASE (vmfle)
2925 BASE (vmfge)
2926 BASE (vfclass)
2927 BASE (vfmerge)
2928 BASE (vfmv_v)
2929 BASE (vfcvt_x)
2930 BASE (vfcvt_x_frm)
2931 BASE (vfcvt_xu)
2932 BASE (vfcvt_xu_frm)
2933 BASE (vfcvt_rtz_x)
2934 BASE (vfcvt_rtz_xu)
2935 BASE (vfcvt_f)
2936 BASE (vfcvt_f_frm)
2937 BASE (vfwcvt_x)
2938 BASE (vfwcvt_x_frm)
2939 BASE (vfwcvt_xu)
2940 BASE (vfwcvt_xu_frm)
2941 BASE (vfwcvt_rtz_x)
2942 BASE (vfwcvt_rtz_xu)
2943 BASE (vfwcvt_f)
2944 BASE (vfncvt_x)
2945 BASE (vfncvt_x_frm)
2946 BASE (vfncvt_xu)
2947 BASE (vfncvt_xu_frm)
2948 BASE (vfncvt_rtz_x)
2949 BASE (vfncvt_rtz_xu)
2950 BASE (vfncvt_f)
2951 BASE (vfncvt_f_frm)
2952 BASE (vfncvt_rod_f)
2953 BASE (vredsum)
2954 BASE (vredmaxu)
2955 BASE (vredmax)
2956 BASE (vredminu)
2957 BASE (vredmin)
2958 BASE (vredand)
2959 BASE (vredor)
2960 BASE (vredxor)
2961 BASE (vwredsum)
2962 BASE (vwredsumu)
2963 BASE (vfredusum)
2964 BASE (vfredusum_frm)
2965 BASE (vfredosum)
2966 BASE (vfredosum_frm)
2967 BASE (vfredmax)
2968 BASE (vfredmin)
2969 BASE (vfwredosum)
2970 BASE (vfwredosum_frm)
2971 BASE (vfwredusum)
2972 BASE (vfwredusum_frm)
2973 BASE (vmv_x)
2974 BASE (vmv_s)
2975 BASE (vfmv_f)
2976 BASE (vfmv_s)
2977 BASE (vslideup)
2978 BASE (vslidedown)
2979 BASE (vslide1up)
2980 BASE (vslide1down)
2981 BASE (vfslide1up)
2982 BASE (vfslide1down)
2983 BASE (vrgather)
2984 BASE (vrgatherei16)
2985 BASE (vcompress)
2986 BASE (vundefined)
2987 BASE (vreinterpret)
2988 BASE (vlmul_ext)
2989 BASE (vlmul_trunc)
2990 BASE (vset)
2991 BASE (vget)
2992 BASE (vcreate)
2993 BASE (read_vl)
2994 BASE (vleff)
2995 BASE (vlenb)
2996 BASE (vlseg)
2997 BASE (vsseg)
2998 BASE (vlsseg)
2999 BASE (vssseg)
3000 BASE (vluxseg)
3001 BASE (vloxseg)
3002 BASE (vsuxseg)
3003 BASE (vsoxseg)
3004 BASE (vlsegff)
3005 BASE (vlb)
3006 BASE (vlh)
3007 BASE (vlw)
3008 BASE (vlbu)
3009 BASE (vlhu)
3010 BASE (vlwu)
3011 BASE (vsb)
3012 BASE (vsh)
3013 BASE (vsw)
3014 BASE (vlsb)
3015 BASE (vlsh)
3016 BASE (vlsw)
3017 BASE (vlsbu)
3018 BASE (vlshu)
3019 BASE (vlswu)
3020 BASE (vssb)
3021 BASE (vssh)
3022 BASE (vssw)
3023 BASE (vlxb)
3024 BASE (vlxh)
3025 BASE (vlxw)
3026 BASE (vlxbu)
3027 BASE (vlxhu)
3028 BASE (vlxwu)
3029 BASE (vsxb)
3030 BASE (vsxh)
3031 BASE (vsxw)
3032 BASE (vsuxb)
3033 BASE (vsuxh)
3034 BASE (vsuxw)
3035 BASE (vext_x_v)
3036 /* Crypto vector */
3037 BASE (vandn)
3038 BASE (vbrev)
3039 BASE (vbrev8)
3040 BASE (vrev8)
3041 BASE (vclz)
3042 BASE (vctz)
3043 BASE (vrol)
3044 BASE (vror)
3045 BASE (vwsll)
3046 BASE (vclmul)
3047 BASE (vclmulh)
3048 BASE (vghsh)
3049 BASE (vgmul)
3050 BASE (vaesef)
3051 BASE (vaesem)
3052 BASE (vaesdf)
3053 BASE (vaesdm)
3054 BASE (vaesz)
3055 BASE (vaeskf1)
3056 BASE (vaeskf2)
3057 BASE (vsha2ms)
3058 BASE (vsha2ch)
3059 BASE (vsha2cl)
3060 BASE (vsm4k)
3061 BASE (vsm4r)
3062 BASE (vsm3me)
3063 BASE (vsm3c)
3064 } // end namespace riscv_vector