1 /* function_base implementation for RISC-V 'V' Extension for GNU compiler.
2 Copyright (C) 2022-2024 Free Software Foundation, Inc.
3 Contributed by Ju-Zhe Zhong (juzhe.zhong@rivai.ai), RiVAI Technologies Ltd.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
29 #include "insn-codes.h"
33 #include "basic-block.h"
35 #include "fold-const.h"
37 #include "gimple-iterator.h"
41 #include "tree-vector-builder.h"
42 #include "rtx-vector-builder.h"
43 #include "riscv-vector-builtins.h"
44 #include "riscv-vector-builtins-shapes.h"
45 #include "riscv-vector-builtins-bases.h"
47 using namespace riscv_vector
;
49 namespace riscv_vector
{
51 /* Enumerates types of loads/stores operations.
52 It's only used in here so we don't define it
53 in riscv-vector-builtins-bases.h. */
67 /* Helper function to fold vleff and vlsegff. */
69 fold_fault_load (gimple_folder
&f
)
71 /* fold fault_load (const *base, size_t *new_vl, size_t vl)
73 ====> fault_load (const *base, size_t vl)
74 new_vl = MEM_REF[read_vl ()]. */
76 auto_vec
<tree
> vargs (gimple_call_num_args (f
.call
) - 1);
78 for (unsigned i
= 0; i
< gimple_call_num_args (f
.call
); i
++)
80 /* Exclude size_t *new_vl argument. */
81 if (i
== gimple_call_num_args (f
.call
) - 2)
84 vargs
.quick_push (gimple_call_arg (f
.call
, i
));
87 gimple
*repl
= gimple_build_call_vec (gimple_call_fn (f
.call
), vargs
);
88 gimple_call_set_lhs (repl
, f
.lhs
);
90 /* Handle size_t *new_vl by read_vl. */
91 tree new_vl
= gimple_call_arg (f
.call
, gimple_call_num_args (f
.call
) - 2);
92 if (integer_zerop (new_vl
))
94 /* This case happens when user passes the nullptr to new_vl argument.
95 In this case, we just need to ignore the new_vl argument and return
96 fault_load instruction directly. */
100 tree tmp_var
= create_tmp_var (size_type_node
, "new_vl");
101 tree decl
= get_read_vl_decl ();
102 gimple
*g
= gimple_build_call (decl
, 0);
103 gimple_call_set_lhs (g
, tmp_var
);
105 = fold_build2 (MEM_REF
, size_type_node
,
106 gimple_call_arg (f
.call
, gimple_call_num_args (f
.call
) - 2),
107 build_int_cst (build_pointer_type (size_type_node
), 0));
108 gassign
*assign
= gimple_build_assign (indirect
, tmp_var
);
110 gsi_insert_after (f
.gsi
, assign
, GSI_SAME_STMT
);
111 gsi_insert_after (f
.gsi
, g
, GSI_SAME_STMT
);
115 /* Implements vsetvl<mode> && vsetvlmax<mode>. */
116 template<bool VLMAX_P
>
117 class vsetvl
: public function_base
120 bool apply_vl_p () const override
125 rtx
expand (function_expander
&e
) const override
128 e
.add_input_operand (Pmode
, gen_rtx_REG (Pmode
, 0));
130 e
.add_input_operand (0);
132 tree type
= builtin_types
[e
.type
.index
].vector
;
133 machine_mode mode
= TYPE_MODE (type
);
135 if (TARGET_XTHEADVECTOR
)
137 machine_mode inner_mode
= GET_MODE_INNER (mode
);
139 e
.add_input_operand (Pmode
,
140 gen_int_mode (GET_MODE_BITSIZE (inner_mode
), Pmode
));
142 e
.add_input_operand (Pmode
,
143 gen_int_mode (get_vlmul (mode
), Pmode
));
147 /* Normalize same RATO (SEW/LMUL) into same vsetvl instruction.
149 - e8,mf8/e16,mf4/e32,mf2/e64,m1 --> e8mf8
150 - e8,mf4/e16,mf2/e32,m1/e64,m2 --> e8mf4
151 - e8,mf2/e16,m1/e32,m2/e64,m4 --> e8mf2
152 - e8,m1/e16,m2/e32,m4/e64,m8 --> e8m1
153 - e8,m2/e16,m4/e32,m8 --> e8m2
154 - e8,m4/e16,m8 --> e8m4
158 e
.add_input_operand (Pmode
, gen_int_mode (8, Pmode
));
162 = get_vector_mode (QImode
, GET_MODE_NUNITS (mode
)).require ();
163 e
.add_input_operand (Pmode
, gen_int_mode (get_vlmul (e8_mode
), Pmode
));
167 e
.add_input_operand (Pmode
,
168 gen_int_mode (get_prefer_tail_policy (), Pmode
));
171 e
.add_input_operand (Pmode
,
172 gen_int_mode (get_prefer_mask_policy (), Pmode
));
173 return e
.generate_insn (code_for_vsetvl_no_side_effects (Pmode
));
178 * vle.v/vse.v/vlm.v/vsm.v/vlse.v/vsse.v/vluxei.v/vloxei.v/vsuxei.v/vsoxei.v
180 template<bool STORE_P
, lst_type LST_TYPE
, bool ORDERED_P
>
181 class loadstore
: public function_base
184 bool apply_tail_policy_p () const override
{ return !STORE_P
; }
185 bool apply_mask_policy_p () const override
{ return !STORE_P
; }
187 unsigned int call_properties (const function_instance
&) const override
190 return CP_WRITE_MEMORY
;
192 return CP_READ_MEMORY
;
195 bool can_be_overloaded_p (enum predication_type_index pred
) const override
197 if (STORE_P
|| LST_TYPE
== LST_INDEXED
)
199 return pred
!= PRED_TYPE_none
;
202 rtx
expand (function_expander
&e
) const override
204 if (LST_TYPE
== LST_INDEXED
)
206 int unspec
= ORDERED_P
? UNSPEC_ORDERED
: UNSPEC_UNORDERED
;
208 return e
.use_exact_insn (
209 code_for_pred_indexed_store (unspec
, e
.vector_mode (),
213 unsigned src_eew_bitsize
214 = GET_MODE_BITSIZE (GET_MODE_INNER (e
.index_mode ()));
215 unsigned dst_eew_bitsize
216 = GET_MODE_BITSIZE (GET_MODE_INNER (e
.vector_mode ()));
217 if (dst_eew_bitsize
== src_eew_bitsize
)
218 return e
.use_exact_insn (
219 code_for_pred_indexed_load_same_eew (unspec
, e
.vector_mode ()));
220 else if (dst_eew_bitsize
> src_eew_bitsize
)
222 unsigned factor
= dst_eew_bitsize
/ src_eew_bitsize
;
226 return e
.use_exact_insn (
227 code_for_pred_indexed_load_x2_greater_eew (
228 unspec
, e
.vector_mode ()));
230 return e
.use_exact_insn (
231 code_for_pred_indexed_load_x4_greater_eew (
232 unspec
, e
.vector_mode ()));
234 return e
.use_exact_insn (
235 code_for_pred_indexed_load_x8_greater_eew (
236 unspec
, e
.vector_mode ()));
243 unsigned factor
= src_eew_bitsize
/ dst_eew_bitsize
;
247 return e
.use_exact_insn (
248 code_for_pred_indexed_load_x2_smaller_eew (
249 unspec
, e
.vector_mode ()));
251 return e
.use_exact_insn (
252 code_for_pred_indexed_load_x4_smaller_eew (
253 unspec
, e
.vector_mode ()));
255 return e
.use_exact_insn (
256 code_for_pred_indexed_load_x8_smaller_eew (
257 unspec
, e
.vector_mode ()));
264 else if (LST_TYPE
== LST_STRIDED
)
267 return e
.use_contiguous_store_insn (
268 code_for_pred_strided_store (e
.vector_mode ()));
270 return e
.use_contiguous_load_insn (
271 code_for_pred_strided_load (e
.vector_mode ()));
276 return e
.use_contiguous_store_insn (
277 code_for_pred_store (e
.vector_mode ()));
279 return e
.use_contiguous_load_insn (
280 code_for_pred_mov (e
.vector_mode ()));
286 vadd/vsub/vand/vor/vxor/vsll/vsra/vsrl/
287 vmin/vmax/vminu/vmaxu/vdiv/vrem/vdivu/
288 vremu/vsadd/vsaddu/vssub/vssubu
291 template <rtx_code CODE
, bool MAY_REQUIRE_FRM
= false,
292 enum frm_op_type FRM_OP
= NO_FRM
>
293 class binop
: public function_base
296 bool has_rounding_mode_operand_p () const override
298 return FRM_OP
== HAS_FRM
;
301 bool may_require_frm_p () const override
{ return MAY_REQUIRE_FRM
; }
303 rtx
expand (function_expander
&e
) const override
305 switch (e
.op_info
->op
)
308 gcc_assert (FRM_OP
== NO_FRM
);
310 return e
.use_exact_insn (code_for_pred_scalar (CODE
, e
.vector_mode ()));
312 return e
.use_exact_insn (code_for_pred (CODE
, e
.vector_mode ()));
319 /* Implements vrsub. */
320 class vrsub
: public function_base
323 rtx
expand (function_expander
&e
) const override
325 return e
.use_exact_insn (
326 code_for_pred_sub_reverse_scalar (e
.vector_mode ()));
330 /* Implements vneg/vnot. */
331 template<rtx_code CODE
, enum frm_op_type FRM_OP
= NO_FRM
>
332 class unop
: public function_base
335 bool has_rounding_mode_operand_p () const override
337 return FRM_OP
== HAS_FRM
;
340 bool may_require_frm_p () const override
{ return true; }
342 rtx
expand (function_expander
&e
) const override
344 return e
.use_exact_insn (code_for_pred (CODE
, e
.vector_mode ()));
348 /* Implements vsext.vf2/vsext.vf4/vsext.vf8/vzext.vf2/vzext.vf4/vzext.vf8. */
349 template<rtx_code CODE
>
350 class ext
: public function_base
353 rtx
expand (function_expander
&e
) const override
355 switch (e
.op_info
->op
)
358 return e
.use_exact_insn (code_for_pred_vf2 (CODE
, e
.vector_mode ()));
360 return e
.use_exact_insn (code_for_pred_vf4 (CODE
, e
.vector_mode ()));
362 return e
.use_exact_insn (code_for_pred_vf8 (CODE
, e
.vector_mode ()));
369 /* Implements vmulh/vmulhu/vmulhsu. */
371 class vmulh
: public function_base
374 rtx
expand (function_expander
&e
) const override
376 switch (e
.op_info
->op
)
379 return e
.use_exact_insn (
380 code_for_pred_mulh_scalar (UNSPEC
, e
.vector_mode ()));
382 return e
.use_exact_insn (
383 code_for_pred_mulh (UNSPEC
, e
.vector_mode ()));
390 /* Implements vwadd/vwsub/vwmul. */
391 template<rtx_code CODE1
, rtx_code CODE2
= FLOAT_EXTEND
>
392 class widen_binop
: public function_base
395 rtx
expand (function_expander
&e
) const override
397 switch (e
.op_info
->op
)
400 return e
.use_exact_insn (
401 code_for_pred_dual_widen (CODE1
, CODE2
, e
.vector_mode ()));
403 return e
.use_exact_insn (
404 code_for_pred_dual_widen_scalar (CODE1
, CODE2
, e
.vector_mode ()));
407 return e
.use_exact_insn (
408 code_for_pred_single_widen_add (CODE2
, e
.vector_mode ()));
410 return e
.use_exact_insn (
411 code_for_pred_single_widen_sub (CODE2
, e
.vector_mode ()));
413 return e
.use_exact_insn (
414 code_for_pred_single_widen_scalar (CODE1
, CODE2
, e
.vector_mode ()));
421 /* Implement vfwadd/vfwsub/vfwmul. */
422 template<rtx_code CODE
, enum frm_op_type FRM_OP
= NO_FRM
>
423 class widen_binop_fp
: public function_base
426 bool has_rounding_mode_operand_p () const override
428 return FRM_OP
== HAS_FRM
;
431 bool may_require_frm_p () const override
{ return true; }
433 rtx
expand (function_expander
&e
) const override
435 switch (e
.op_info
->op
)
438 return e
.use_exact_insn (
439 code_for_pred_dual_widen (CODE
, e
.vector_mode ()));
441 return e
.use_exact_insn (
442 code_for_pred_dual_widen_scalar (CODE
, e
.vector_mode ()));
445 return e
.use_exact_insn (
446 code_for_pred_single_widen_add (e
.vector_mode ()));
448 return e
.use_exact_insn (
449 code_for_pred_single_widen_sub (e
.vector_mode ()));
451 return e
.use_exact_insn (
452 code_for_pred_single_widen_scalar (CODE
, e
.vector_mode ()));
459 /* Implements vwmulsu. */
460 class vwmulsu
: public function_base
463 rtx
expand (function_expander
&e
) const override
465 switch (e
.op_info
->op
)
468 return e
.use_exact_insn (code_for_pred_widen_mulsu (e
.vector_mode ()));
470 return e
.use_exact_insn (
471 code_for_pred_widen_mulsu_scalar (e
.vector_mode ()));
478 /* Implements vwcvt. */
479 template<rtx_code CODE
>
480 class vwcvt
: public function_base
483 rtx
expand (function_expander
&e
) const override
485 return e
.use_exact_insn (code_for_pred (CODE
, e
.vector_mode ()));
489 /* Implements vadc. */
490 class vadc
: public function_base
493 bool apply_mask_policy_p () const override
{ return false; }
494 bool use_mask_predication_p () const override
{ return false; }
496 rtx
expand (function_expander
&e
) const override
498 switch (e
.op_info
->op
)
501 return e
.use_exact_insn (code_for_pred_adc (e
.vector_mode ()));
503 return e
.use_exact_insn (code_for_pred_adc_scalar (e
.vector_mode ()));
510 /* Implements vsbc. */
511 class vsbc
: public function_base
514 bool apply_mask_policy_p () const override
{ return false; }
515 bool use_mask_predication_p () const override
{ return false; }
517 rtx
expand (function_expander
&e
) const override
519 switch (e
.op_info
->op
)
522 return e
.use_exact_insn (code_for_pred_sbc (e
.vector_mode ()));
524 return e
.use_exact_insn (code_for_pred_sbc_scalar (e
.vector_mode ()));
531 /* Implements vmadc. */
532 class vmadc
: public function_base
535 bool apply_tail_policy_p () const override
{ return false; }
536 bool apply_mask_policy_p () const override
{ return false; }
537 bool use_mask_predication_p () const override
{ return false; }
538 bool has_merge_operand_p () const override
{ return false; }
540 rtx
expand (function_expander
&e
) const override
542 switch (e
.op_info
->op
)
545 return e
.use_exact_insn (code_for_pred_madc (e
.vector_mode ()));
547 return e
.use_exact_insn (code_for_pred_madc_scalar (e
.vector_mode ()));
549 return e
.use_exact_insn (
550 code_for_pred_madc_overflow (e
.vector_mode ()));
552 return e
.use_exact_insn (
553 code_for_pred_madc_overflow_scalar (e
.vector_mode ()));
560 /* Implements vmsbc. */
561 class vmsbc
: public function_base
564 bool apply_tail_policy_p () const override
{ return false; }
565 bool apply_mask_policy_p () const override
{ return false; }
566 bool use_mask_predication_p () const override
{ return false; }
567 bool has_merge_operand_p () const override
{ return false; }
569 rtx
expand (function_expander
&e
) const override
571 switch (e
.op_info
->op
)
574 return e
.use_exact_insn (code_for_pred_msbc (e
.vector_mode ()));
576 return e
.use_exact_insn (code_for_pred_msbc_scalar (e
.vector_mode ()));
578 return e
.use_exact_insn (
579 code_for_pred_msbc_overflow (e
.vector_mode ()));
581 return e
.use_exact_insn (
582 code_for_pred_msbc_overflow_scalar (e
.vector_mode ()));
589 /* Implements vnsrl/vnsra. */
590 template<rtx_code CODE
>
591 class vnshift
: public function_base
594 rtx
expand (function_expander
&e
) const override
596 switch (e
.op_info
->op
)
599 return e
.use_exact_insn (
600 code_for_pred_narrow_scalar (CODE
, e
.vector_mode ()));
602 return e
.use_exact_insn (code_for_pred_narrow (CODE
, e
.vector_mode ()));
609 /* Implements vncvt. */
610 class vncvt_x
: public function_base
613 rtx
expand (function_expander
&e
) const override
615 return e
.use_exact_insn (code_for_pred_trunc (e
.vector_mode ()));
619 /* Implements vmerge/vfmerge. */
620 class vmerge
: public function_base
623 bool apply_mask_policy_p () const override
{ return false; }
624 bool use_mask_predication_p () const override
{ return false; }
625 rtx
expand (function_expander
&e
) const override
627 switch (e
.op_info
->op
)
630 return e
.use_exact_insn (code_for_pred_merge (e
.vector_mode ()));
633 return e
.use_exact_insn (code_for_pred_merge_scalar (e
.vector_mode ()));
640 /* Implements vmv.v.x/vmv.v.v/vfmv.v.f. */
641 class vmv_v
: public function_base
644 rtx
expand (function_expander
&e
) const override
646 switch (e
.op_info
->op
)
649 return e
.use_exact_insn (code_for_pred_mov (e
.vector_mode ()));
652 return e
.use_exact_insn (code_for_pred_broadcast (e
.vector_mode ()));
659 /* Implements vaadd/vasub/vsmul/vssra/vssrl. */
661 class sat_op
: public function_base
664 bool has_rounding_mode_operand_p () const override
{ return true; }
666 bool may_require_vxrm_p () const override
{ return true; }
668 rtx
expand (function_expander
&e
) const override
670 switch (e
.op_info
->op
)
673 return e
.use_exact_insn (
674 code_for_pred_scalar (UNSPEC
, e
.vector_mode ()));
676 return e
.use_exact_insn (code_for_pred (UNSPEC
, e
.vector_mode ()));
683 /* Implements vnclip/vnclipu. */
685 class vnclip
: public function_base
688 bool has_rounding_mode_operand_p () const override
{ return true; }
690 bool may_require_vxrm_p () const override
{ return true; }
692 rtx
expand (function_expander
&e
) const override
694 switch (e
.op_info
->op
)
697 return e
.use_exact_insn (
698 code_for_pred_narrow_clip_scalar (UNSPEC
, e
.vector_mode ()));
700 return e
.use_exact_insn (
701 code_for_pred_narrow_clip (UNSPEC
, e
.vector_mode ()));
708 /* Implements vmseq/vmsne/vmslt/vmsgt/vmsle/vmsge. */
709 template<rtx_code CODE
>
710 class icmp
: public function_base
713 rtx
expand (function_expander
&e
) const override
715 switch (e
.op_info
->op
)
718 if (CODE
== GE
|| CODE
== GEU
)
719 return e
.use_compare_insn (CODE
, code_for_pred_ge_scalar (
721 else if (CODE
== EQ
|| CODE
== NE
)
722 return e
.use_compare_insn (CODE
, code_for_pred_eqne_scalar (
725 return e
.use_compare_insn (CODE
, code_for_pred_cmp_scalar (
729 if (CODE
== LT
|| CODE
== LTU
|| CODE
== GE
|| CODE
== GEU
)
730 return e
.use_compare_insn (CODE
,
731 code_for_pred_ltge (e
.vector_mode ()));
733 return e
.use_compare_insn (CODE
,
734 code_for_pred_cmp (e
.vector_mode ()));
742 /* Implements vmacc/vnmsac/vmadd/vnmsub. */
743 class vmacc
: public function_base
746 bool has_merge_operand_p () const override
{ return false; }
748 rtx
expand (function_expander
&e
) const override
750 if (e
.op_info
->op
== OP_TYPE_vx
)
751 return e
.use_ternop_insn (true, code_for_pred_mul_plus_scalar (
753 if (e
.op_info
->op
== OP_TYPE_vv
)
754 return e
.use_ternop_insn (true,
755 code_for_pred_mul_plus (e
.vector_mode ()));
760 class vnmsac
: public function_base
763 bool has_merge_operand_p () const override
{ return false; }
765 rtx
expand (function_expander
&e
) const override
767 if (e
.op_info
->op
== OP_TYPE_vx
)
768 return e
.use_ternop_insn (true, code_for_pred_minus_mul_scalar (
770 if (e
.op_info
->op
== OP_TYPE_vv
)
771 return e
.use_ternop_insn (true,
772 code_for_pred_minus_mul (e
.vector_mode ()));
777 class vmadd
: public function_base
780 bool has_merge_operand_p () const override
{ return false; }
782 rtx
expand (function_expander
&e
) const override
784 if (e
.op_info
->op
== OP_TYPE_vx
)
785 return e
.use_ternop_insn (false, code_for_pred_mul_plus_scalar (
787 if (e
.op_info
->op
== OP_TYPE_vv
)
788 return e
.use_ternop_insn (false,
789 code_for_pred_mul_plus (e
.vector_mode ()));
794 class vnmsub
: public function_base
797 bool has_merge_operand_p () const override
{ return false; }
799 rtx
expand (function_expander
&e
) const override
801 if (e
.op_info
->op
== OP_TYPE_vx
)
802 return e
.use_ternop_insn (false, code_for_pred_minus_mul_scalar (
804 if (e
.op_info
->op
== OP_TYPE_vv
)
805 return e
.use_ternop_insn (false,
806 code_for_pred_minus_mul (e
.vector_mode ()));
811 /* Implements vwmacc<su><su>. */
812 class vwmacc
: public function_base
815 bool has_merge_operand_p () const override
{ return false; }
817 rtx
expand (function_expander
&e
) const override
819 if (e
.op_info
->op
== OP_TYPE_vx
)
820 return e
.use_widen_ternop_insn (
821 code_for_pred_widen_mul_plus_scalar (SIGN_EXTEND
, e
.vector_mode ()));
822 if (e
.op_info
->op
== OP_TYPE_vv
)
823 return e
.use_widen_ternop_insn (
824 code_for_pred_widen_mul_plus (SIGN_EXTEND
, e
.vector_mode ()));
829 class vwmaccu
: public function_base
832 bool has_merge_operand_p () const override
{ return false; }
834 rtx
expand (function_expander
&e
) const override
836 if (e
.op_info
->op
== OP_TYPE_vx
)
837 return e
.use_widen_ternop_insn (
838 code_for_pred_widen_mul_plus_scalar (ZERO_EXTEND
, e
.vector_mode ()));
839 if (e
.op_info
->op
== OP_TYPE_vv
)
840 return e
.use_widen_ternop_insn (
841 code_for_pred_widen_mul_plus (ZERO_EXTEND
, e
.vector_mode ()));
846 class vwmaccsu
: public function_base
849 bool has_merge_operand_p () const override
{ return false; }
851 rtx
expand (function_expander
&e
) const override
853 if (e
.op_info
->op
== OP_TYPE_vx
)
854 return e
.use_widen_ternop_insn (
855 code_for_pred_widen_mul_plussu_scalar (e
.vector_mode ()));
856 if (e
.op_info
->op
== OP_TYPE_vv
)
857 return e
.use_widen_ternop_insn (
858 code_for_pred_widen_mul_plussu (e
.vector_mode ()));
863 class vwmaccus
: public function_base
866 bool has_merge_operand_p () const override
{ return false; }
868 rtx
expand (function_expander
&e
) const override
870 return e
.use_widen_ternop_insn (
871 code_for_pred_widen_mul_plusus_scalar (e
.vector_mode ()));
875 /* Implements vmand/vmnand/vmandn/vmxor/vmor/vmnor/vmorn/vmxnor */
876 template<rtx_code CODE
>
877 class mask_logic
: public function_base
880 bool apply_tail_policy_p () const override
{ return false; }
881 bool apply_mask_policy_p () const override
{ return false; }
883 rtx
expand (function_expander
&e
) const override
885 return e
.use_exact_insn (code_for_pred (CODE
, e
.vector_mode ()));
888 template<rtx_code CODE
>
889 class mask_nlogic
: public function_base
892 bool apply_tail_policy_p () const override
{ return false; }
893 bool apply_mask_policy_p () const override
{ return false; }
895 rtx
expand (function_expander
&e
) const override
897 return e
.use_exact_insn (code_for_pred_n (CODE
, e
.vector_mode ()));
900 template<rtx_code CODE
>
901 class mask_notlogic
: public function_base
904 bool apply_tail_policy_p () const override
{ return false; }
905 bool apply_mask_policy_p () const override
{ return false; }
907 rtx
expand (function_expander
&e
) const override
909 return e
.use_exact_insn (code_for_pred_not (CODE
, e
.vector_mode ()));
913 /* Implements vmmv. */
914 class vmmv
: public function_base
917 bool apply_tail_policy_p () const override
{ return false; }
918 bool apply_mask_policy_p () const override
{ return false; }
920 rtx
expand (function_expander
&e
) const override
922 return e
.use_exact_insn (code_for_pred_mov (e
.vector_mode ()));
926 /* Implements vmclr. */
927 class vmclr
: public function_base
930 bool can_be_overloaded_p (enum predication_type_index
) const override
935 rtx
expand (function_expander
&e
) const override
937 machine_mode mode
= TYPE_MODE (TREE_TYPE (e
.exp
));
938 e
.add_all_one_mask_operand (mode
);
939 e
.add_vundef_operand (mode
);
940 e
.add_input_operand (mode
, CONST0_RTX (mode
));
941 e
.add_input_operand (call_expr_nargs (e
.exp
) - 1);
942 e
.add_input_operand (Pmode
, get_avl_type_rtx (avl_type::NONVLMAX
));
943 return e
.generate_insn (code_for_pred_mov (e
.vector_mode ()));
947 /* Implements vmset. */
948 class vmset
: public function_base
951 bool can_be_overloaded_p (enum predication_type_index
) const override
956 rtx
expand (function_expander
&e
) const override
958 machine_mode mode
= TYPE_MODE (TREE_TYPE (e
.exp
));
959 e
.add_all_one_mask_operand (mode
);
960 e
.add_vundef_operand (mode
);
961 e
.add_input_operand (mode
, CONSTM1_RTX (mode
));
962 e
.add_input_operand (call_expr_nargs (e
.exp
) - 1);
963 e
.add_input_operand (Pmode
, get_avl_type_rtx (avl_type::NONVLMAX
));
964 return e
.generate_insn (code_for_pred_mov (e
.vector_mode ()));
968 /* Implements vmnot. */
969 class vmnot
: public function_base
972 bool apply_tail_policy_p () const override
{ return false; }
973 bool apply_mask_policy_p () const override
{ return false; }
975 rtx
expand (function_expander
&e
) const override
977 return e
.use_exact_insn (code_for_pred_not (e
.vector_mode ()));
981 /* Implements vcpop. */
982 class vcpop
: public function_base
985 bool apply_tail_policy_p () const override
{ return false; }
986 bool apply_mask_policy_p () const override
{ return false; }
987 bool has_merge_operand_p () const override
{ return false; }
989 rtx
expand (function_expander
&e
) const override
991 return e
.use_exact_insn (code_for_pred_popcount (e
.vector_mode (), Pmode
));
995 /* Implements vfirst. */
996 class vfirst
: public function_base
999 bool apply_tail_policy_p () const override
{ return false; }
1000 bool apply_mask_policy_p () const override
{ return false; }
1001 bool has_merge_operand_p () const override
{ return false; }
1003 rtx
expand (function_expander
&e
) const override
1005 return e
.use_exact_insn (code_for_pred_ffs (e
.vector_mode (), Pmode
));
1009 /* Implements vmsbf/vmsif/vmsof. */
1010 template<int UNSPEC
>
1011 class mask_misc
: public function_base
1014 bool apply_tail_policy_p () const override
{ return false; }
1016 rtx
expand (function_expander
&e
) const override
1018 return e
.use_exact_insn (code_for_pred (UNSPEC
, e
.vector_mode ()));
1022 /* Implements viota. */
1023 class viota
: public function_base
1026 bool can_be_overloaded_p (enum predication_type_index pred
) const override
1028 return pred
== PRED_TYPE_tu
|| pred
== PRED_TYPE_tum
1029 || pred
== PRED_TYPE_tumu
|| pred
== PRED_TYPE_mu
;
1032 rtx
expand (function_expander
&e
) const override
1034 return e
.use_exact_insn (code_for_pred_iota (e
.vector_mode ()));
1038 /* Implements vid. */
1039 class vid
: public function_base
1042 bool can_be_overloaded_p (enum predication_type_index pred
) const override
1044 return pred
== PRED_TYPE_tu
|| pred
== PRED_TYPE_tum
1045 || pred
== PRED_TYPE_tumu
|| pred
== PRED_TYPE_mu
;
1048 rtx
expand (function_expander
&e
) const override
1050 return e
.use_exact_insn (code_for_pred_series (e
.vector_mode ()));
1054 /* Implements vfrsub/vfrdiv. */
1055 template<rtx_code CODE
, enum frm_op_type FRM_OP
= NO_FRM
>
1056 class reverse_binop
: public function_base
1059 bool has_rounding_mode_operand_p () const override
1061 return FRM_OP
== HAS_FRM
;
1064 bool may_require_frm_p () const override
{ return true; }
1066 rtx
expand (function_expander
&e
) const override
1068 return e
.use_exact_insn (
1069 code_for_pred_reverse_scalar (CODE
, e
.vector_mode ()));
1073 template<enum frm_op_type FRM_OP
= NO_FRM
>
1074 class vfmacc
: public function_base
1077 bool has_rounding_mode_operand_p () const override
1079 return FRM_OP
== HAS_FRM
;
1082 bool may_require_frm_p () const override
{ return true; }
1084 bool has_merge_operand_p () const override
{ return false; }
1086 rtx
expand (function_expander
&e
) const override
1088 if (e
.op_info
->op
== OP_TYPE_vf
)
1089 return e
.use_ternop_insn (true,
1090 code_for_pred_mul_scalar (PLUS
,
1092 if (e
.op_info
->op
== OP_TYPE_vv
)
1093 return e
.use_ternop_insn (true,
1094 code_for_pred_mul (PLUS
, e
.vector_mode ()));
1099 template<enum frm_op_type FRM_OP
= NO_FRM
>
1100 class vfnmsac
: public function_base
1103 bool has_rounding_mode_operand_p () const override
1105 return FRM_OP
== HAS_FRM
;
1108 bool may_require_frm_p () const override
{ return true; }
1110 bool has_merge_operand_p () const override
{ return false; }
1112 rtx
expand (function_expander
&e
) const override
1114 if (e
.op_info
->op
== OP_TYPE_vf
)
1115 return e
.use_ternop_insn (
1116 true, code_for_pred_mul_neg_scalar (PLUS
, e
.vector_mode ()));
1117 if (e
.op_info
->op
== OP_TYPE_vv
)
1118 return e
.use_ternop_insn (true,
1119 code_for_pred_mul_neg (PLUS
, e
.vector_mode ()));
1124 template<enum frm_op_type FRM_OP
= NO_FRM
>
1125 class vfmadd
: public function_base
1128 bool has_rounding_mode_operand_p () const override
1130 return FRM_OP
== HAS_FRM
;
1133 bool may_require_frm_p () const override
{ return true; }
1135 bool has_merge_operand_p () const override
{ return false; }
1137 rtx
expand (function_expander
&e
) const override
1139 if (e
.op_info
->op
== OP_TYPE_vf
)
1140 return e
.use_ternop_insn (false,
1141 code_for_pred_mul_scalar (PLUS
,
1143 if (e
.op_info
->op
== OP_TYPE_vv
)
1144 return e
.use_ternop_insn (false,
1145 code_for_pred_mul (PLUS
, e
.vector_mode ()));
1150 template<enum frm_op_type FRM_OP
= NO_FRM
>
1151 class vfnmsub
: public function_base
1154 bool has_rounding_mode_operand_p () const override
1156 return FRM_OP
== HAS_FRM
;
1159 bool may_require_frm_p () const override
{ return true; }
1161 bool has_merge_operand_p () const override
{ return false; }
1163 rtx
expand (function_expander
&e
) const override
1165 if (e
.op_info
->op
== OP_TYPE_vf
)
1166 return e
.use_ternop_insn (
1167 false, code_for_pred_mul_neg_scalar (PLUS
, e
.vector_mode ()));
1168 if (e
.op_info
->op
== OP_TYPE_vv
)
1169 return e
.use_ternop_insn (false,
1170 code_for_pred_mul_neg (PLUS
, e
.vector_mode ()));
1175 template<enum frm_op_type FRM_OP
= NO_FRM
>
1176 class vfnmacc
: public function_base
1179 bool has_rounding_mode_operand_p () const override
1181 return FRM_OP
== HAS_FRM
;
1184 bool may_require_frm_p () const override
{ return true; }
1186 bool has_merge_operand_p () const override
{ return false; }
1188 rtx
expand (function_expander
&e
) const override
1190 if (e
.op_info
->op
== OP_TYPE_vf
)
1191 return e
.use_ternop_insn (
1192 true, code_for_pred_mul_neg_scalar (MINUS
, e
.vector_mode ()));
1193 if (e
.op_info
->op
== OP_TYPE_vv
)
1194 return e
.use_ternop_insn (true,
1195 code_for_pred_mul_neg (MINUS
, e
.vector_mode ()));
1200 template<enum frm_op_type FRM_OP
= NO_FRM
>
1201 class vfmsac
: public function_base
1204 bool has_rounding_mode_operand_p () const override
1206 return FRM_OP
== HAS_FRM
;
1209 bool may_require_frm_p () const override
{ return true; }
1211 bool has_merge_operand_p () const override
{ return false; }
1213 rtx
expand (function_expander
&e
) const override
1215 if (e
.op_info
->op
== OP_TYPE_vf
)
1216 return e
.use_ternop_insn (true,
1217 code_for_pred_mul_scalar (MINUS
,
1219 if (e
.op_info
->op
== OP_TYPE_vv
)
1220 return e
.use_ternop_insn (true,
1221 code_for_pred_mul (MINUS
, e
.vector_mode ()));
1226 template<enum frm_op_type FRM_OP
= NO_FRM
>
1227 class vfnmadd
: public function_base
1230 bool has_rounding_mode_operand_p () const override
1232 return FRM_OP
== HAS_FRM
;
1235 bool may_require_frm_p () const override
{ return true; }
1237 bool has_merge_operand_p () const override
{ return false; }
1239 rtx
expand (function_expander
&e
) const override
1241 if (e
.op_info
->op
== OP_TYPE_vf
)
1242 return e
.use_ternop_insn (
1243 false, code_for_pred_mul_neg_scalar (MINUS
, e
.vector_mode ()));
1244 if (e
.op_info
->op
== OP_TYPE_vv
)
1245 return e
.use_ternop_insn (false,
1246 code_for_pred_mul_neg (MINUS
, e
.vector_mode ()));
1251 template<enum frm_op_type FRM_OP
= NO_FRM
>
1252 class vfmsub
: public function_base
1255 bool has_rounding_mode_operand_p () const override
1257 return FRM_OP
== HAS_FRM
;
1260 bool may_require_frm_p () const override
{ return true; }
1262 bool has_merge_operand_p () const override
{ return false; }
1264 rtx
expand (function_expander
&e
) const override
1266 if (e
.op_info
->op
== OP_TYPE_vf
)
1267 return e
.use_ternop_insn (false,
1268 code_for_pred_mul_scalar (MINUS
,
1270 if (e
.op_info
->op
== OP_TYPE_vv
)
1271 return e
.use_ternop_insn (false,
1272 code_for_pred_mul (MINUS
, e
.vector_mode ()));
1277 template<enum frm_op_type FRM_OP
= NO_FRM
>
1278 class vfwmacc
: public function_base
1281 bool has_rounding_mode_operand_p () const override
1283 return FRM_OP
== HAS_FRM
;
1286 bool may_require_frm_p () const override
{ return true; }
1288 bool has_merge_operand_p () const override
{ return false; }
1290 rtx
expand (function_expander
&e
) const override
1292 if (e
.op_info
->op
== OP_TYPE_vf
)
1293 return e
.use_widen_ternop_insn (
1294 code_for_pred_widen_mul_scalar (PLUS
, e
.vector_mode ()));
1295 if (e
.op_info
->op
== OP_TYPE_vv
)
1296 return e
.use_widen_ternop_insn (
1297 code_for_pred_widen_mul (PLUS
, e
.vector_mode ()));
1302 template<enum frm_op_type FRM_OP
= NO_FRM
>
1303 class vfwnmacc
: public function_base
1306 bool has_rounding_mode_operand_p () const override
1308 return FRM_OP
== HAS_FRM
;
1311 bool may_require_frm_p () const override
{ return true; }
1313 bool has_merge_operand_p () const override
{ return false; }
1315 rtx
expand (function_expander
&e
) const override
1317 if (e
.op_info
->op
== OP_TYPE_vf
)
1318 return e
.use_widen_ternop_insn (
1319 code_for_pred_widen_mul_neg_scalar (MINUS
, e
.vector_mode ()));
1320 if (e
.op_info
->op
== OP_TYPE_vv
)
1321 return e
.use_widen_ternop_insn (
1322 code_for_pred_widen_mul_neg (MINUS
, e
.vector_mode ()));
1327 template<enum frm_op_type FRM_OP
= NO_FRM
>
1328 class vfwmsac
: public function_base
1331 bool has_rounding_mode_operand_p () const override
1333 return FRM_OP
== HAS_FRM
;
1336 bool may_require_frm_p () const override
{ return true; }
1338 bool has_merge_operand_p () const override
{ return false; }
1340 rtx
expand (function_expander
&e
) const override
1342 if (e
.op_info
->op
== OP_TYPE_vf
)
1343 return e
.use_widen_ternop_insn (
1344 code_for_pred_widen_mul_scalar (MINUS
, e
.vector_mode ()));
1345 if (e
.op_info
->op
== OP_TYPE_vv
)
1346 return e
.use_widen_ternop_insn (
1347 code_for_pred_widen_mul (MINUS
, e
.vector_mode ()));
1352 template<enum frm_op_type FRM_OP
= NO_FRM
>
1353 class vfwnmsac
: public function_base
1356 bool has_rounding_mode_operand_p () const override
1358 return FRM_OP
== HAS_FRM
;
1361 bool may_require_frm_p () const override
{ return true; }
1363 bool has_merge_operand_p () const override
{ return false; }
1365 rtx
expand (function_expander
&e
) const override
1367 if (e
.op_info
->op
== OP_TYPE_vf
)
1368 return e
.use_widen_ternop_insn (
1369 code_for_pred_widen_mul_neg_scalar (PLUS
, e
.vector_mode ()));
1370 if (e
.op_info
->op
== OP_TYPE_vv
)
1371 return e
.use_widen_ternop_insn (
1372 code_for_pred_widen_mul_neg (PLUS
, e
.vector_mode ()));
1377 /* Implements vfsqrt7/vfrec7/vfclass/vfsgnj/vfsgnjx. */
1378 template<int UNSPEC
, enum frm_op_type FRM_OP
= NO_FRM
>
1379 class float_misc
: public function_base
1382 bool has_rounding_mode_operand_p () const override
1384 return FRM_OP
== HAS_FRM
;
1387 bool may_require_frm_p () const override
{ return true; }
1389 rtx
expand (function_expander
&e
) const override
1391 if (e
.op_info
->op
== OP_TYPE_vf
)
1392 return e
.use_exact_insn (code_for_pred_scalar (UNSPEC
, e
.vector_mode ()));
1393 if (e
.op_info
->op
== OP_TYPE_vv
|| e
.op_info
->op
== OP_TYPE_v
)
1394 return e
.use_exact_insn (code_for_pred (UNSPEC
, e
.vector_mode ()));
1399 /* Implements vfsgnjn. */
1400 class vfsgnjn
: public function_base
1403 rtx
expand (function_expander
&e
) const override
1405 if (e
.op_info
->op
== OP_TYPE_vf
)
1406 return e
.use_exact_insn (code_for_pred_ncopysign_scalar (e
.vector_mode ()));
1407 if (e
.op_info
->op
== OP_TYPE_vv
)
1408 return e
.use_exact_insn (code_for_pred_ncopysign (e
.vector_mode ()));
1413 /* Implements vmfeq/vmfne/vmflt/vmfgt/vmfle/vmfge. */
1414 template<rtx_code CODE
>
1415 class fcmp
: public function_base
1418 rtx
expand (function_expander
&e
) const override
1420 switch (e
.op_info
->op
)
1423 if (CODE
== EQ
|| CODE
== NE
)
1424 return e
.use_compare_insn (CODE
, code_for_pred_eqne_scalar (
1427 return e
.use_compare_insn (CODE
, code_for_pred_cmp_scalar (
1431 return e
.use_compare_insn (CODE
,
1432 code_for_pred_cmp (e
.vector_mode ()));
1440 /* Implements vfclass. */
1441 class vfclass
: public function_base
1444 rtx
expand (function_expander
&e
) const override
1446 return e
.use_exact_insn (code_for_pred_class (e
.arg_mode (0)));
1450 /* Implements vfcvt.x. */
1451 template<int UNSPEC
, enum frm_op_type FRM_OP
= NO_FRM
>
1452 class vfcvt_x
: public function_base
1455 bool has_rounding_mode_operand_p () const override
1457 return FRM_OP
== HAS_FRM
;
1460 bool may_require_frm_p () const override
{ return true; }
1462 rtx
expand (function_expander
&e
) const override
1464 return e
.use_exact_insn (code_for_pred_fcvt_x_f (UNSPEC
, e
.arg_mode (0)));
1468 /* Implements vfcvt.rtz.x. */
1469 template<rtx_code CODE
>
1470 class vfcvt_rtz_x
: public function_base
1473 rtx
expand (function_expander
&e
) const override
1475 return e
.use_exact_insn (code_for_pred (CODE
, e
.arg_mode (0)));
1479 template<enum frm_op_type FRM_OP
= NO_FRM
>
1480 class vfcvt_f
: public function_base
1483 bool has_rounding_mode_operand_p () const override
1485 return FRM_OP
== HAS_FRM
;
1488 bool may_require_frm_p () const override
{ return true; }
1490 rtx
expand (function_expander
&e
) const override
1492 if (e
.op_info
->op
== OP_TYPE_x_v
)
1493 return e
.use_exact_insn (code_for_pred (FLOAT
, e
.vector_mode ()));
1494 if (e
.op_info
->op
== OP_TYPE_xu_v
)
1495 return e
.use_exact_insn (
1496 code_for_pred (UNSIGNED_FLOAT
, e
.vector_mode ()));
1501 /* Implements vfwcvt.x. */
1502 template<int UNSPEC
, enum frm_op_type FRM_OP
= NO_FRM
>
1503 class vfwcvt_x
: public function_base
1506 bool has_rounding_mode_operand_p () const override
1508 return FRM_OP
== HAS_FRM
;
1511 bool may_require_frm_p () const override
{ return true; }
1513 rtx
expand (function_expander
&e
) const override
1515 return e
.use_exact_insn (
1516 code_for_pred_widen_fcvt_x_f (UNSPEC
, e
.vector_mode ()));
1520 /* Implements vfwcvt.rtz.x. */
1521 template<rtx_code CODE
>
1522 class vfwcvt_rtz_x
: public function_base
1525 rtx
expand (function_expander
&e
) const override
1527 return e
.use_exact_insn (code_for_pred_widen (CODE
, e
.vector_mode ()));
1531 class vfwcvt_f
: public function_base
1534 rtx
expand (function_expander
&e
) const override
1536 if (e
.op_info
->op
== OP_TYPE_f_v
)
1537 return e
.use_exact_insn (code_for_pred_extend (e
.vector_mode ()));
1538 if (e
.op_info
->op
== OP_TYPE_x_v
)
1539 return e
.use_exact_insn (code_for_pred_widen (FLOAT
, e
.vector_mode ()));
1540 if (e
.op_info
->op
== OP_TYPE_xu_v
)
1541 return e
.use_exact_insn (
1542 code_for_pred_widen (UNSIGNED_FLOAT
, e
.vector_mode ()));
1547 /* Implements vfncvt.x. */
1548 template<int UNSPEC
, enum frm_op_type FRM_OP
= NO_FRM
>
1549 class vfncvt_x
: public function_base
1552 bool has_rounding_mode_operand_p () const override
1554 return FRM_OP
== HAS_FRM
;
1557 bool may_require_frm_p () const override
{ return true; }
1559 rtx
expand (function_expander
&e
) const override
1561 return e
.use_exact_insn (
1562 code_for_pred_narrow_fcvt_x_f (UNSPEC
, e
.arg_mode (0)));
1566 /* Implements vfncvt.rtz.x. */
1567 template<rtx_code CODE
>
1568 class vfncvt_rtz_x
: public function_base
1571 rtx
expand (function_expander
&e
) const override
1573 return e
.use_exact_insn (code_for_pred_narrow (CODE
, e
.vector_mode ()));
1577 template<enum frm_op_type FRM_OP
= NO_FRM
>
1578 class vfncvt_f
: public function_base
1581 bool has_rounding_mode_operand_p () const override
1583 return FRM_OP
== HAS_FRM
;
1586 bool may_require_frm_p () const override
{ return true; }
1588 rtx
expand (function_expander
&e
) const override
1590 if (e
.op_info
->op
== OP_TYPE_f_w
)
1591 return e
.use_exact_insn (code_for_pred_trunc (e
.vector_mode ()));
1592 if (e
.op_info
->op
== OP_TYPE_x_w
)
1593 return e
.use_exact_insn (code_for_pred_narrow (FLOAT
, e
.arg_mode (0)));
1594 if (e
.op_info
->op
== OP_TYPE_xu_w
)
1595 return e
.use_exact_insn (
1596 code_for_pred_narrow (UNSIGNED_FLOAT
, e
.arg_mode (0)));
1601 class vfncvt_rod_f
: public function_base
1604 rtx
expand (function_expander
&e
) const override
1606 return e
.use_exact_insn (code_for_pred_rod_trunc (e
.vector_mode ()));
1610 /* Implements reduction instructions. */
1611 template<unsigned UNSPEC
>
1612 class reducop
: public function_base
1615 bool apply_mask_policy_p () const override
{ return false; }
1617 rtx
expand (function_expander
&e
) const override
1619 return e
.use_exact_insn (code_for_pred (UNSPEC
, e
.vector_mode ()));
1623 /* Implements floating-point reduction instructions. */
1624 template<unsigned UNSPEC
, enum frm_op_type FRM_OP
= NO_FRM
>
1625 class freducop
: public function_base
1628 bool has_rounding_mode_operand_p () const override
1630 return FRM_OP
== HAS_FRM
;
1633 bool may_require_frm_p () const override
{ return true; }
1635 bool apply_mask_policy_p () const override
{ return false; }
1637 rtx
expand (function_expander
&e
) const override
1639 return e
.use_exact_insn (code_for_pred (UNSPEC
, e
.vector_mode ()));
1643 /* Implements vmv/vfmv instructions. */
1644 class vmv
: public function_base
1647 bool apply_vl_p () const override
{ return false; }
1648 bool apply_tail_policy_p () const override
{ return false; }
1649 bool apply_mask_policy_p () const override
{ return false; }
1650 bool use_mask_predication_p () const override
{ return false; }
1651 bool has_merge_operand_p () const override
{ return false; }
1653 rtx
expand (function_expander
&e
) const override
1655 return e
.use_exact_insn (code_for_pred_extract_first (e
.vector_mode ()));
1659 /* Implements vmv.s.x/vfmv.s.f. */
1660 class vmv_s
: public function_base
1663 rtx
expand (function_expander
&e
) const override
1665 return e
.use_scalar_move_insn (code_for_pred_broadcast (e
.vector_mode ()));
1669 template<int UNSPEC
>
1670 class slideop
: public function_base
1673 bool has_merge_operand_p () const override
1675 if (UNSPEC
== UNSPEC_VSLIDEUP
)
1680 rtx
expand (function_expander
&e
) const override
1682 return e
.use_exact_insn (code_for_pred_slide (UNSPEC
, e
.vector_mode ()));
1686 class vrgather
: public function_base
1689 rtx
expand (function_expander
&e
) const override
1691 switch (e
.op_info
->op
)
1694 return e
.use_exact_insn (
1695 code_for_pred_gather_scalar (e
.vector_mode ()));
1697 return e
.use_exact_insn (code_for_pred_gather (e
.vector_mode ()));
1704 class vrgatherei16
: public function_base
1707 rtx
expand (function_expander
&e
) const override
1709 return e
.use_exact_insn (code_for_pred_gatherei16 (e
.vector_mode ()));
1713 class vcompress
: public function_base
1716 bool apply_mask_policy_p () const override
{ return false; }
1717 bool use_mask_predication_p () const override
{ return false; }
1718 rtx
expand (function_expander
&e
) const override
1720 return e
.use_exact_insn (code_for_pred_compress (e
.vector_mode ()));
1724 class vundefined
: public function_base
1727 bool apply_vl_p () const override
1732 rtx
expand (function_expander
&e
) const override
1734 return e
.generate_insn (code_for_vundefined (e
.vector_mode ()));
1738 class vreinterpret
: public function_base
1741 bool apply_vl_p () const override
1746 rtx
expand (function_expander
&e
) const override
1748 e
.add_input_operand (0);
1749 return e
.generate_insn (code_for_vreinterpret (e
.ret_mode ()));
1753 class vlmul_ext
: public function_base
1756 bool apply_vl_p () const override
1761 rtx
expand (function_expander
&e
) const override
1763 tree arg
= CALL_EXPR_ARG (e
.exp
, 0);
1764 rtx src
= expand_normal (arg
);
1765 emit_move_insn (gen_lowpart (e
.vector_mode (), e
.target
), src
);
1770 class vlmul_trunc
: public function_base
1773 bool apply_vl_p () const override
{ return false; }
1775 rtx
expand (function_expander
&e
) const override
1777 rtx src
= expand_normal (CALL_EXPR_ARG (e
.exp
, 0));
1778 emit_move_insn (e
.target
, gen_lowpart (GET_MODE (e
.target
), src
));
1783 class vset
: public function_base
1786 bool apply_vl_p () const override
{ return false; }
1788 gimple
*fold (gimple_folder
&f
) const override
1790 tree rhs_tuple
= gimple_call_arg (f
.call
, 0);
1791 /* LMUL > 1 non-tuple vector types are not structure,
1792 we can't use __val[index] to set the subpart. */
1793 if (!riscv_v_ext_tuple_mode_p (TYPE_MODE (TREE_TYPE (rhs_tuple
))))
1795 tree index
= gimple_call_arg (f
.call
, 1);
1796 tree rhs_vector
= gimple_call_arg (f
.call
, 2);
1798 /* Replace the call with two statements: a copy of the full tuple
1799 to the call result, followed by an update of the individual vector.
1801 The fold routines expect the replacement statement to have the
1802 same lhs as the original call, so return the copy statement
1803 rather than the field update. */
1804 gassign
*copy
= gimple_build_assign (unshare_expr (f
.lhs
), rhs_tuple
);
1806 /* Get a reference to the individual vector. */
1807 tree field
= tuple_type_field (TREE_TYPE (f
.lhs
));
1809 = build3 (COMPONENT_REF
, TREE_TYPE (field
), f
.lhs
, field
, NULL_TREE
);
1810 tree lhs_vector
= build4 (ARRAY_REF
, TREE_TYPE (rhs_vector
), lhs_array
,
1811 index
, NULL_TREE
, NULL_TREE
);
1812 gassign
*update
= gimple_build_assign (lhs_vector
, rhs_vector
);
1813 gsi_insert_after (f
.gsi
, update
, GSI_SAME_STMT
);
1818 rtx
expand (function_expander
&e
) const override
1822 rtx dest
= expand_normal (CALL_EXPR_ARG (e
.exp
, 0));
1823 gcc_assert (riscv_v_ext_vector_mode_p (GET_MODE (dest
)));
1824 rtx index
= expand_normal (CALL_EXPR_ARG (e
.exp
, 1));
1825 rtx src
= expand_normal (CALL_EXPR_ARG (e
.exp
, 2));
1826 poly_int64 offset
= INTVAL (index
) * GET_MODE_SIZE (GET_MODE (src
));
1827 emit_move_insn (e
.target
, dest
);
1828 rtx subreg
= simplify_gen_subreg (GET_MODE (src
), e
.target
,
1829 GET_MODE (e
.target
), offset
);
1830 emit_move_insn (subreg
, src
);
1835 class vget
: public function_base
1838 bool apply_vl_p () const override
{ return false; }
1840 gimple
*fold (gimple_folder
&f
) const override
1842 /* Fold into a normal gimple component access. */
1843 tree rhs_tuple
= gimple_call_arg (f
.call
, 0);
1844 /* LMUL > 1 non-tuple vector types are not structure,
1845 we can't use __val[index] to get the subpart. */
1846 if (!riscv_v_ext_tuple_mode_p (TYPE_MODE (TREE_TYPE (rhs_tuple
))))
1848 tree index
= gimple_call_arg (f
.call
, 1);
1849 tree field
= tuple_type_field (TREE_TYPE (rhs_tuple
));
1851 = build3 (COMPONENT_REF
, TREE_TYPE (field
), rhs_tuple
, field
, NULL_TREE
);
1852 tree rhs_vector
= build4 (ARRAY_REF
, TREE_TYPE (f
.lhs
), rhs_array
, index
,
1853 NULL_TREE
, NULL_TREE
);
1854 return gimple_build_assign (f
.lhs
, rhs_vector
);
1857 rtx
expand (function_expander
&e
) const override
1861 rtx src
= expand_normal (CALL_EXPR_ARG (e
.exp
, 0));
1862 gcc_assert (riscv_v_ext_vector_mode_p (GET_MODE (src
)));
1863 rtx index
= expand_normal (CALL_EXPR_ARG (e
.exp
, 1));
1864 poly_int64 offset
= INTVAL (index
) * GET_MODE_SIZE (GET_MODE (e
.target
));
1866 = simplify_gen_subreg (GET_MODE (e
.target
), src
, GET_MODE (src
), offset
);
1871 class vcreate
: public function_base
1874 gimple
*fold (gimple_folder
&f
) const override
1876 unsigned int nargs
= gimple_call_num_args (f
.call
);
1877 tree lhs_type
= TREE_TYPE (f
.lhs
);
1878 /* LMUL > 1 non-tuple vector types are not structure,
1879 we can't use __val[index] to set the subpart. */
1880 if (!riscv_v_ext_tuple_mode_p (TYPE_MODE (lhs_type
)))
1883 /* Replace the call with a clobber of the result (to prevent it from
1884 becoming upwards exposed) followed by stores into each individual
1887 The fold routines expect the replacement statement to have the
1888 same lhs as the original call, so return the clobber statement
1889 rather than the final vector store. */
1890 gassign
*clobber
= gimple_build_assign (f
.lhs
, build_clobber (lhs_type
));
1892 for (unsigned int i
= nargs
; i
-- > 0; )
1894 tree rhs_vector
= gimple_call_arg (f
.call
, i
);
1895 tree field
= tuple_type_field (TREE_TYPE (f
.lhs
));
1896 tree lhs_array
= build3 (COMPONENT_REF
, TREE_TYPE (field
),
1897 unshare_expr (f
.lhs
), field
, NULL_TREE
);
1898 tree lhs_vector
= build4 (ARRAY_REF
, TREE_TYPE (rhs_vector
),
1899 lhs_array
, size_int (i
),
1900 NULL_TREE
, NULL_TREE
);
1901 gassign
*assign
= gimple_build_assign (lhs_vector
, rhs_vector
);
1902 gsi_insert_after (f
.gsi
, assign
, GSI_SAME_STMT
);
1907 rtx
expand (function_expander
&e
) const override
1911 gcc_assert (riscv_v_ext_vector_mode_p (GET_MODE (e
.target
)));
1912 unsigned int nargs
= call_expr_nargs (e
.exp
);
1913 for (unsigned int i
= 0; i
< nargs
; i
++)
1915 rtx src
= expand_normal (CALL_EXPR_ARG (e
.exp
, i
));
1916 poly_int64 offset
= i
* GET_MODE_SIZE (GET_MODE (src
));
1917 rtx subreg
= simplify_gen_subreg (GET_MODE (src
), e
.target
,
1918 GET_MODE (e
.target
), offset
);
1919 emit_move_insn (subreg
, src
);
1926 class read_vl
: public function_base
1929 unsigned int call_properties (const function_instance
&) const override
1934 rtx
expand (function_expander
&e
) const override
1936 if (Pmode
== SImode
)
1937 emit_insn (gen_read_vlsi (e
.target
));
1939 emit_insn (gen_read_vldi_zero_extend (e
.target
));
1944 class vleff
: public function_base
1947 unsigned int call_properties (const function_instance
&) const override
1949 return CP_READ_MEMORY
| CP_WRITE_CSR
;
1952 bool can_be_overloaded_p (enum predication_type_index pred
) const override
1954 return pred
!= PRED_TYPE_none
;
1957 gimple
*fold (gimple_folder
&f
) const override
1959 return fold_fault_load (f
);
1962 rtx
expand (function_expander
&e
) const override
1964 return e
.use_contiguous_load_insn (
1965 code_for_pred_fault_load (e
.vector_mode ()));
1969 /* Implements vlenb. */
1970 class vlenb
: public function_base
1973 bool apply_vl_p () const override
{ return false; }
1975 rtx
expand (function_expander
&e
) const override
1977 machine_mode mode
= GET_MODE (e
.target
);
1978 rtx vlenb
= gen_int_mode (BYTES_PER_RISCV_VECTOR
, mode
);
1979 emit_move_insn (e
.target
, vlenb
);
1984 /* Implements vlseg.v. */
1985 class vlseg
: public function_base
1988 unsigned int call_properties (const function_instance
&) const override
1990 return CP_READ_MEMORY
;
1993 bool can_be_overloaded_p (enum predication_type_index pred
) const override
1995 return pred
!= PRED_TYPE_none
;
1998 rtx
expand (function_expander
&e
) const override
2000 return e
.use_exact_insn (
2001 code_for_pred_unit_strided_load (e
.vector_mode ()));
2005 /* Implements vsseg.v. */
2006 class vsseg
: public function_base
2009 bool apply_tail_policy_p () const override
{ return false; }
2010 bool apply_mask_policy_p () const override
{ return false; }
2012 unsigned int call_properties (const function_instance
&) const override
2014 return CP_WRITE_MEMORY
;
2017 bool can_be_overloaded_p (enum predication_type_index
) const override
2022 rtx
expand (function_expander
&e
) const override
2024 return e
.use_exact_insn (
2025 code_for_pred_unit_strided_store (e
.vector_mode ()));
2029 /* Implements vlsseg.v. */
2030 class vlsseg
: public function_base
2033 unsigned int call_properties (const function_instance
&) const override
2035 return CP_READ_MEMORY
;
2038 bool can_be_overloaded_p (enum predication_type_index pred
) const override
2040 return pred
!= PRED_TYPE_none
;
2043 rtx
expand (function_expander
&e
) const override
2045 return e
.use_exact_insn (
2046 code_for_pred_strided_load (e
.vector_mode ()));
2050 /* Implements vssseg.v. */
2051 class vssseg
: public function_base
2054 bool apply_tail_policy_p () const override
{ return false; }
2055 bool apply_mask_policy_p () const override
{ return false; }
2057 unsigned int call_properties (const function_instance
&) const override
2059 return CP_WRITE_MEMORY
;
2062 bool can_be_overloaded_p (enum predication_type_index
) const override
2067 rtx
expand (function_expander
&e
) const override
2069 return e
.use_exact_insn (
2070 code_for_pred_strided_store (e
.vector_mode ()));
2074 template<int UNSPEC
>
2075 class seg_indexed_load
: public function_base
2078 unsigned int call_properties (const function_instance
&) const override
2080 return CP_READ_MEMORY
;
2083 bool can_be_overloaded_p (enum predication_type_index
) const override
2088 rtx
expand (function_expander
&e
) const override
2090 return e
.use_exact_insn (
2091 code_for_pred_indexed_load (UNSPEC
, e
.vector_mode (), e
.index_mode ()));
2095 template<int UNSPEC
>
2096 class seg_indexed_store
: public function_base
2099 bool apply_tail_policy_p () const override
{ return false; }
2100 bool apply_mask_policy_p () const override
{ return false; }
2102 unsigned int call_properties (const function_instance
&) const override
2104 return CP_WRITE_MEMORY
;
2107 bool can_be_overloaded_p (enum predication_type_index
) const override
2112 rtx
expand (function_expander
&e
) const override
2114 return e
.use_exact_insn (
2115 code_for_pred_indexed_store (UNSPEC
, e
.vector_mode (), e
.index_mode ()));
2119 /* Implements vlsegff.v. */
2120 class vlsegff
: public function_base
2123 unsigned int call_properties (const function_instance
&) const override
2125 return CP_READ_MEMORY
| CP_WRITE_CSR
;
2128 bool can_be_overloaded_p (enum predication_type_index pred
) const override
2130 return pred
!= PRED_TYPE_none
;
2133 gimple
*fold (gimple_folder
&f
) const override
2135 return fold_fault_load (f
);
2138 rtx
expand (function_expander
&e
) const override
2140 return e
.use_exact_insn (code_for_pred_fault_load (e
.vector_mode ()));
2145 * th.vl(b/h/w)[u].v/th.vs(b/h/w)[u].v/th.vls(b/h/w)[u].v/th.vss(b/h/w)[u].v/
2146 * th.vlx(b/h/w)[u].v/th.vs[u]x(b/h/w).v
2148 template<bool STORE_P
, lst_type LST_TYPE
, int UNSPEC
>
2149 class th_loadstore_width
: public function_base
2152 bool apply_tail_policy_p () const override
{ return !STORE_P
; }
2153 bool apply_mask_policy_p () const override
{ return !STORE_P
; }
2155 unsigned int call_properties (const function_instance
&) const override
2158 return CP_WRITE_MEMORY
;
2160 return CP_READ_MEMORY
;
2163 bool can_be_overloaded_p (enum predication_type_index pred
) const override
2165 if (STORE_P
|| LST_TYPE
== LST_INDEXED
)
2167 return pred
!= PRED_TYPE_none
;
2170 rtx
expand (function_expander
&e
) const override
2172 gcc_assert (TARGET_XTHEADVECTOR
);
2173 if (LST_TYPE
== LST_INDEXED
)
2176 return e
.use_exact_insn (
2177 code_for_pred_indexed_store_width (UNSPEC
, UNSPEC
,
2180 return e
.use_exact_insn (
2181 code_for_pred_indexed_load_width (UNSPEC
, e
.vector_mode ()));
2183 else if (LST_TYPE
== LST_STRIDED
)
2186 return e
.use_contiguous_store_insn (
2187 code_for_pred_strided_store_width (UNSPEC
, e
.vector_mode ()));
2189 return e
.use_contiguous_load_insn (
2190 code_for_pred_strided_load_width (UNSPEC
, e
.vector_mode ()));
2195 return e
.use_contiguous_store_insn (
2196 code_for_pred_store_width (UNSPEC
, e
.vector_mode ()));
2198 return e
.use_contiguous_load_insn (
2199 code_for_pred_mov_width (UNSPEC
, e
.vector_mode ()));
2204 /* Implements vext.x.v. */
2205 class th_extract
: public function_base
2208 bool apply_vl_p () const override
{ return false; }
2209 bool apply_tail_policy_p () const override
{ return false; }
2210 bool apply_mask_policy_p () const override
{ return false; }
2211 bool use_mask_predication_p () const override
{ return false; }
2212 bool has_merge_operand_p () const override
{ return false; }
2214 rtx
expand (function_expander
&e
) const override
2216 gcc_assert (TARGET_XTHEADVECTOR
);
2217 return e
.use_exact_insn (code_for_pred_th_extract (e
.vector_mode ()));
2221 /* Below implements are vector crypto */
2222 /* Implements vandn.[vv,vx] */
2223 class vandn
: public function_base
2226 rtx
expand (function_expander
&e
) const override
2228 switch (e
.op_info
->op
)
2231 return e
.use_exact_insn (code_for_pred_vandn (e
.vector_mode ()));
2233 return e
.use_exact_insn (code_for_pred_vandn_scalar (e
.vector_mode ()));
2240 /* Implements vrol/vror/clz/ctz. */
2241 template<rtx_code CODE
>
2242 class bitmanip
: public function_base
2245 bool apply_tail_policy_p () const override
2247 return (CODE
== CLZ
|| CODE
== CTZ
) ? false : true;
2249 bool apply_mask_policy_p () const override
2251 return (CODE
== CLZ
|| CODE
== CTZ
) ? false : true;
2253 bool has_merge_operand_p () const override
2255 return (CODE
== CLZ
|| CODE
== CTZ
) ? false : true;
2258 rtx
expand (function_expander
&e
) const override
2260 switch (e
.op_info
->op
)
2264 return e
.use_exact_insn (code_for_pred_v (CODE
, e
.vector_mode ()));
2266 return e
.use_exact_insn (code_for_pred_v_scalar (CODE
, e
.vector_mode ()));
2273 /* Implements vbrev/vbrev8/vrev8. */
2274 template<int UNSPEC
>
2275 class b_reverse
: public function_base
2278 rtx
expand (function_expander
&e
) const override
2280 return e
.use_exact_insn (code_for_pred_v (UNSPEC
, e
.vector_mode ()));
2284 class vwsll
: public function_base
2287 rtx
expand (function_expander
&e
) const override
2289 switch (e
.op_info
->op
)
2292 return e
.use_exact_insn (code_for_pred_vwsll (e
.vector_mode ()));
2294 return e
.use_exact_insn (code_for_pred_vwsll_scalar (e
.vector_mode ()));
2301 /* Implements clmul */
2302 template<int UNSPEC
>
2303 class clmul
: public function_base
2306 rtx
expand (function_expander
&e
) const override
2308 switch (e
.op_info
->op
)
2311 return e
.use_exact_insn (
2312 code_for_pred_vclmul (UNSPEC
, e
.vector_mode ()));
2314 return e
.use_exact_insn
2315 (code_for_pred_vclmul_scalar (UNSPEC
, e
.vector_mode ()));
2322 /* Implements vghsh/vsh2ms/vsha2c[hl]. */
2323 template<int UNSPEC
>
2324 class vg_nhab
: public function_base
2327 bool apply_mask_policy_p () const override
{ return false; }
2328 bool use_mask_predication_p () const override
{ return false; }
2329 bool has_merge_operand_p () const override
{ return false; }
2331 rtx
expand (function_expander
&e
) const override
2333 return e
.use_exact_insn (code_for_pred_v (UNSPEC
, e
.vector_mode ()));
2337 /* Implements vgmul/vaes*. */
2338 template<int UNSPEC
>
2339 class crypto_vv
: public function_base
2342 bool apply_mask_policy_p () const override
{ return false; }
2343 bool use_mask_predication_p () const override
{ return false; }
2344 bool has_merge_operand_p () const override
{ return false; }
2346 rtx
expand (function_expander
&e
) const override
2348 poly_uint64 nunits
= 0U;
2349 switch (e
.op_info
->op
)
2352 if (UNSPEC
== UNSPEC_VGMUL
)
2353 return e
.use_exact_insn
2354 (code_for_pred_crypto_vv (UNSPEC
, UNSPEC
, e
.vector_mode ()));
2356 return e
.use_exact_insn
2357 (code_for_pred_crypto_vv (UNSPEC
+ 1, UNSPEC
+ 1, e
.vector_mode ()));
2359 /* Calculate the ratio between arg0 and arg1*/
2360 gcc_assert (multiple_p (GET_MODE_BITSIZE (e
.arg_mode (0)),
2361 GET_MODE_BITSIZE (e
.arg_mode (1)), &nunits
));
2362 if (maybe_eq (nunits
, 1U))
2363 return e
.use_exact_insn (code_for_pred_crypto_vvx1_scalar
2364 (UNSPEC
+ 2, UNSPEC
+ 2, e
.vector_mode ()));
2365 else if (maybe_eq (nunits
, 2U))
2366 return e
.use_exact_insn (code_for_pred_crypto_vvx2_scalar
2367 (UNSPEC
+ 2, UNSPEC
+ 2, e
.vector_mode ()));
2368 else if (maybe_eq (nunits
, 4U))
2369 return e
.use_exact_insn (code_for_pred_crypto_vvx4_scalar
2370 (UNSPEC
+ 2, UNSPEC
+ 2, e
.vector_mode ()));
2371 else if (maybe_eq (nunits
, 8U))
2372 return e
.use_exact_insn (code_for_pred_crypto_vvx8_scalar
2373 (UNSPEC
+ 2, UNSPEC
+ 2, e
.vector_mode ()));
2375 return e
.use_exact_insn (code_for_pred_crypto_vvx16_scalar
2376 (UNSPEC
+ 2, UNSPEC
+ 2, e
.vector_mode ()));
2383 /* Implements vaeskf1/vsm4k. */
2384 template<int UNSPEC
>
2385 class crypto_vi
: public function_base
2388 bool apply_mask_policy_p () const override
{ return false; }
2389 bool use_mask_predication_p () const override
{ return false; }
2391 rtx
expand (function_expander
&e
) const override
2393 return e
.use_exact_insn
2394 (code_for_pred_crypto_vi_scalar (UNSPEC
, e
.vector_mode ()));
2398 /* Implements vaeskf2/vsm3c. */
2399 template<int UNSPEC
>
2400 class vaeskf2_vsm3c
: public function_base
2403 bool apply_mask_policy_p () const override
{ return false; }
2404 bool use_mask_predication_p () const override
{ return false; }
2405 bool has_merge_operand_p () const override
{ return false; }
2407 rtx
expand (function_expander
&e
) const override
2409 return e
.use_exact_insn
2410 (code_for_pred_vi_nomaskedoff_scalar (UNSPEC
, e
.vector_mode ()));
2414 /* Implements vsm3me. */
2415 class vsm3me
: public function_base
2418 bool apply_mask_policy_p () const override
{ return false; }
2419 bool use_mask_predication_p () const override
{ return false; }
2421 rtx
expand (function_expander
&e
) const override
2423 return e
.use_exact_insn (code_for_pred_vsm3me (e
.vector_mode ()));
2427 static CONSTEXPR
const vsetvl
<false> vsetvl_obj
;
2428 static CONSTEXPR
const vsetvl
<true> vsetvlmax_obj
;
2429 static CONSTEXPR
const loadstore
<false, LST_UNIT_STRIDE
, false> vle_obj
;
2430 static CONSTEXPR
const loadstore
<true, LST_UNIT_STRIDE
, false> vse_obj
;
2431 static CONSTEXPR
const loadstore
<false, LST_UNIT_STRIDE
, false> vlm_obj
;
2432 static CONSTEXPR
const loadstore
<true, LST_UNIT_STRIDE
, false> vsm_obj
;
2433 static CONSTEXPR
const loadstore
<false, LST_STRIDED
, false> vlse_obj
;
2434 static CONSTEXPR
const loadstore
<true, LST_STRIDED
, false> vsse_obj
;
2435 static CONSTEXPR
const loadstore
<false, LST_INDEXED
, false> vluxei8_obj
;
2436 static CONSTEXPR
const loadstore
<false, LST_INDEXED
, false> vluxei16_obj
;
2437 static CONSTEXPR
const loadstore
<false, LST_INDEXED
, false> vluxei32_obj
;
2438 static CONSTEXPR
const loadstore
<false, LST_INDEXED
, false> vluxei64_obj
;
2439 static CONSTEXPR
const loadstore
<false, LST_INDEXED
, true> vloxei8_obj
;
2440 static CONSTEXPR
const loadstore
<false, LST_INDEXED
, true> vloxei16_obj
;
2441 static CONSTEXPR
const loadstore
<false, LST_INDEXED
, true> vloxei32_obj
;
2442 static CONSTEXPR
const loadstore
<false, LST_INDEXED
, true> vloxei64_obj
;
2443 static CONSTEXPR
const loadstore
<true, LST_INDEXED
, false> vsuxei8_obj
;
2444 static CONSTEXPR
const loadstore
<true, LST_INDEXED
, false> vsuxei16_obj
;
2445 static CONSTEXPR
const loadstore
<true, LST_INDEXED
, false> vsuxei32_obj
;
2446 static CONSTEXPR
const loadstore
<true, LST_INDEXED
, false> vsuxei64_obj
;
2447 static CONSTEXPR
const loadstore
<true, LST_INDEXED
, true> vsoxei8_obj
;
2448 static CONSTEXPR
const loadstore
<true, LST_INDEXED
, true> vsoxei16_obj
;
2449 static CONSTEXPR
const loadstore
<true, LST_INDEXED
, true> vsoxei32_obj
;
2450 static CONSTEXPR
const loadstore
<true, LST_INDEXED
, true> vsoxei64_obj
;
2451 static CONSTEXPR
const binop
<PLUS
> vadd_obj
;
2452 static CONSTEXPR
const binop
<MINUS
> vsub_obj
;
2453 static CONSTEXPR
const vrsub vrsub_obj
;
2454 static CONSTEXPR
const binop
<AND
> vand_obj
;
2455 static CONSTEXPR
const binop
<IOR
> vor_obj
;
2456 static CONSTEXPR
const binop
<XOR
> vxor_obj
;
2457 static CONSTEXPR
const binop
<ASHIFT
> vsll_obj
;
2458 static CONSTEXPR
const binop
<ASHIFTRT
> vsra_obj
;
2459 static CONSTEXPR
const binop
<LSHIFTRT
> vsrl_obj
;
2460 static CONSTEXPR
const binop
<SMIN
> vmin_obj
;
2461 static CONSTEXPR
const binop
<SMAX
> vmax_obj
;
2462 static CONSTEXPR
const binop
<UMIN
> vminu_obj
;
2463 static CONSTEXPR
const binop
<UMAX
> vmaxu_obj
;
2464 static CONSTEXPR
const binop
<MULT
> vmul_obj
;
2465 static CONSTEXPR
const vmulh
<UNSPEC_VMULHS
> vmulh_obj
;
2466 static CONSTEXPR
const vmulh
<UNSPEC_VMULHU
> vmulhu_obj
;
2467 static CONSTEXPR
const vmulh
<UNSPEC_VMULHSU
> vmulhsu_obj
;
2468 static CONSTEXPR
const binop
<DIV
> vdiv_obj
;
2469 static CONSTEXPR
const binop
<MOD
> vrem_obj
;
2470 static CONSTEXPR
const binop
<UDIV
> vdivu_obj
;
2471 static CONSTEXPR
const binop
<UMOD
> vremu_obj
;
2472 static CONSTEXPR
const unop
<NEG
> vneg_obj
;
2473 static CONSTEXPR
const unop
<NOT
> vnot_obj
;
2474 static CONSTEXPR
const ext
<SIGN_EXTEND
> vsext_obj
;
2475 static CONSTEXPR
const ext
<ZERO_EXTEND
> vzext_obj
;
2476 static CONSTEXPR
const widen_binop
<PLUS
, SIGN_EXTEND
>vwadd_obj
;
2477 static CONSTEXPR
const widen_binop
<MINUS
, SIGN_EXTEND
>vwsub_obj
;
2478 static CONSTEXPR
const widen_binop
<MULT
, SIGN_EXTEND
>vwmul_obj
;
2479 static CONSTEXPR
const widen_binop
<PLUS
, ZERO_EXTEND
>vwaddu_obj
;
2480 static CONSTEXPR
const widen_binop
<MINUS
, ZERO_EXTEND
>vwsubu_obj
;
2481 static CONSTEXPR
const widen_binop
<MULT
, ZERO_EXTEND
>vwmulu_obj
;
2482 static CONSTEXPR
const vwmulsu vwmulsu_obj
;
2483 static CONSTEXPR
const vwcvt
<SIGN_EXTEND
> vwcvt_x_obj
;
2484 static CONSTEXPR
const vwcvt
<ZERO_EXTEND
> vwcvtu_x_obj
;
2485 static CONSTEXPR
const vadc vadc_obj
;
2486 static CONSTEXPR
const vsbc vsbc_obj
;
2487 static CONSTEXPR
const vmadc vmadc_obj
;
2488 static CONSTEXPR
const vmsbc vmsbc_obj
;
2489 static CONSTEXPR
const vnshift
<LSHIFTRT
> vnsrl_obj
;
2490 static CONSTEXPR
const vnshift
<ASHIFTRT
> vnsra_obj
;
2491 static CONSTEXPR
const vncvt_x vncvt_x_obj
;
2492 static CONSTEXPR
const vmerge vmerge_obj
;
2493 static CONSTEXPR
const vmv_v vmv_v_obj
;
2494 static CONSTEXPR
const icmp
<EQ
> vmseq_obj
;
2495 static CONSTEXPR
const icmp
<NE
> vmsne_obj
;
2496 static CONSTEXPR
const icmp
<LT
> vmslt_obj
;
2497 static CONSTEXPR
const icmp
<GT
> vmsgt_obj
;
2498 static CONSTEXPR
const icmp
<LE
> vmsle_obj
;
2499 static CONSTEXPR
const icmp
<GE
> vmsge_obj
;
2500 static CONSTEXPR
const icmp
<LTU
> vmsltu_obj
;
2501 static CONSTEXPR
const icmp
<GTU
> vmsgtu_obj
;
2502 static CONSTEXPR
const icmp
<LEU
> vmsleu_obj
;
2503 static CONSTEXPR
const icmp
<GEU
> vmsgeu_obj
;
2504 static CONSTEXPR
const vmacc vmacc_obj
;
2505 static CONSTEXPR
const vnmsac vnmsac_obj
;
2506 static CONSTEXPR
const vmadd vmadd_obj
;
2507 static CONSTEXPR
const vnmsub vnmsub_obj
;
2508 static CONSTEXPR
const vwmacc vwmacc_obj
;
2509 static CONSTEXPR
const vwmaccu vwmaccu_obj
;
2510 static CONSTEXPR
const vwmaccsu vwmaccsu_obj
;
2511 static CONSTEXPR
const vwmaccus vwmaccus_obj
;
2512 static CONSTEXPR
const binop
<SS_PLUS
> vsadd_obj
;
2513 static CONSTEXPR
const binop
<SS_MINUS
> vssub_obj
;
2514 static CONSTEXPR
const binop
<US_PLUS
> vsaddu_obj
;
2515 static CONSTEXPR
const binop
<US_MINUS
> vssubu_obj
;
2516 static CONSTEXPR
const sat_op
<UNSPEC_VAADDU
> vaaddu_obj
;
2517 static CONSTEXPR
const sat_op
<UNSPEC_VAADD
> vaadd_obj
;
2518 static CONSTEXPR
const sat_op
<UNSPEC_VASUBU
> vasubu_obj
;
2519 static CONSTEXPR
const sat_op
<UNSPEC_VASUB
> vasub_obj
;
2520 static CONSTEXPR
const sat_op
<UNSPEC_VSMUL
> vsmul_obj
;
2521 static CONSTEXPR
const sat_op
<UNSPEC_VSSRL
> vssrl_obj
;
2522 static CONSTEXPR
const sat_op
<UNSPEC_VSSRA
> vssra_obj
;
2523 static CONSTEXPR
const vnclip
<UNSPEC_VNCLIP
> vnclip_obj
;
2524 static CONSTEXPR
const vnclip
<UNSPEC_VNCLIPU
> vnclipu_obj
;
2525 static CONSTEXPR
const mask_logic
<AND
> vmand_obj
;
2526 static CONSTEXPR
const mask_nlogic
<AND
> vmnand_obj
;
2527 static CONSTEXPR
const mask_notlogic
<AND
> vmandn_obj
;
2528 static CONSTEXPR
const mask_logic
<XOR
> vmxor_obj
;
2529 static CONSTEXPR
const mask_logic
<IOR
> vmor_obj
;
2530 static CONSTEXPR
const mask_nlogic
<IOR
> vmnor_obj
;
2531 static CONSTEXPR
const mask_notlogic
<IOR
> vmorn_obj
;
2532 static CONSTEXPR
const mask_nlogic
<XOR
> vmxnor_obj
;
2533 static CONSTEXPR
const vmmv vmmv_obj
;
2534 static CONSTEXPR
const vmclr vmclr_obj
;
2535 static CONSTEXPR
const vmset vmset_obj
;
2536 static CONSTEXPR
const vmnot vmnot_obj
;
2537 static CONSTEXPR
const vcpop vcpop_obj
;
2538 static CONSTEXPR
const vfirst vfirst_obj
;
2539 static CONSTEXPR
const mask_misc
<UNSPEC_VMSBF
> vmsbf_obj
;
2540 static CONSTEXPR
const mask_misc
<UNSPEC_VMSIF
> vmsif_obj
;
2541 static CONSTEXPR
const mask_misc
<UNSPEC_VMSOF
> vmsof_obj
;
2542 static CONSTEXPR
const viota viota_obj
;
2543 static CONSTEXPR
const vid vid_obj
;
2544 static CONSTEXPR
const binop
<PLUS
, true> vfadd_obj
;
2545 static CONSTEXPR
const binop
<MINUS
, true> vfsub_obj
;
2546 static CONSTEXPR
const binop
<PLUS
, true, HAS_FRM
> vfadd_frm_obj
;
2547 static CONSTEXPR
const binop
<MINUS
, true, HAS_FRM
> vfsub_frm_obj
;
2548 static CONSTEXPR
const reverse_binop
<MINUS
> vfrsub_obj
;
2549 static CONSTEXPR
const reverse_binop
<MINUS
, HAS_FRM
> vfrsub_frm_obj
;
2550 static CONSTEXPR
const widen_binop_fp
<PLUS
> vfwadd_obj
;
2551 static CONSTEXPR
const widen_binop_fp
<PLUS
, HAS_FRM
> vfwadd_frm_obj
;
2552 static CONSTEXPR
const widen_binop_fp
<MINUS
> vfwsub_obj
;
2553 static CONSTEXPR
const widen_binop_fp
<MINUS
, HAS_FRM
> vfwsub_frm_obj
;
2554 static CONSTEXPR
const binop
<MULT
, true> vfmul_obj
;
2555 static CONSTEXPR
const binop
<MULT
, true, HAS_FRM
> vfmul_frm_obj
;
2556 static CONSTEXPR
const binop
<DIV
, true> vfdiv_obj
;
2557 static CONSTEXPR
const binop
<DIV
, true, HAS_FRM
> vfdiv_frm_obj
;
2558 static CONSTEXPR
const reverse_binop
<DIV
> vfrdiv_obj
;
2559 static CONSTEXPR
const reverse_binop
<DIV
, HAS_FRM
> vfrdiv_frm_obj
;
2560 static CONSTEXPR
const widen_binop_fp
<MULT
> vfwmul_obj
;
2561 static CONSTEXPR
const widen_binop_fp
<MULT
, HAS_FRM
> vfwmul_frm_obj
;
2562 static CONSTEXPR
const vfmacc
<NO_FRM
> vfmacc_obj
;
2563 static CONSTEXPR
const vfmacc
<HAS_FRM
> vfmacc_frm_obj
;
2564 static CONSTEXPR
const vfnmsac
<NO_FRM
> vfnmsac_obj
;
2565 static CONSTEXPR
const vfnmsac
<HAS_FRM
> vfnmsac_frm_obj
;
2566 static CONSTEXPR
const vfmadd
<NO_FRM
> vfmadd_obj
;
2567 static CONSTEXPR
const vfmadd
<HAS_FRM
> vfmadd_frm_obj
;
2568 static CONSTEXPR
const vfnmsub
<NO_FRM
> vfnmsub_obj
;
2569 static CONSTEXPR
const vfnmsub
<HAS_FRM
> vfnmsub_frm_obj
;
2570 static CONSTEXPR
const vfnmacc
<NO_FRM
> vfnmacc_obj
;
2571 static CONSTEXPR
const vfnmacc
<HAS_FRM
> vfnmacc_frm_obj
;
2572 static CONSTEXPR
const vfmsac
<NO_FRM
> vfmsac_obj
;
2573 static CONSTEXPR
const vfmsac
<HAS_FRM
> vfmsac_frm_obj
;
2574 static CONSTEXPR
const vfnmadd
<NO_FRM
> vfnmadd_obj
;
2575 static CONSTEXPR
const vfnmadd
<HAS_FRM
> vfnmadd_frm_obj
;
2576 static CONSTEXPR
const vfmsub
<NO_FRM
> vfmsub_obj
;
2577 static CONSTEXPR
const vfmsub
<HAS_FRM
> vfmsub_frm_obj
;
2578 static CONSTEXPR
const vfwmacc
<NO_FRM
> vfwmacc_obj
;
2579 static CONSTEXPR
const vfwmacc
<HAS_FRM
> vfwmacc_frm_obj
;
2580 static CONSTEXPR
const vfwnmacc
<NO_FRM
> vfwnmacc_obj
;
2581 static CONSTEXPR
const vfwnmacc
<HAS_FRM
> vfwnmacc_frm_obj
;
2582 static CONSTEXPR
const vfwmsac
<NO_FRM
> vfwmsac_obj
;
2583 static CONSTEXPR
const vfwmsac
<HAS_FRM
> vfwmsac_frm_obj
;
2584 static CONSTEXPR
const vfwnmsac
<NO_FRM
> vfwnmsac_obj
;
2585 static CONSTEXPR
const vfwnmsac
<HAS_FRM
> vfwnmsac_frm_obj
;
2586 static CONSTEXPR
const unop
<SQRT
> vfsqrt_obj
;
2587 static CONSTEXPR
const unop
<SQRT
, HAS_FRM
> vfsqrt_frm_obj
;
2588 static CONSTEXPR
const float_misc
<UNSPEC_VFRSQRT7
> vfrsqrt7_obj
;
2589 static CONSTEXPR
const float_misc
<UNSPEC_VFREC7
> vfrec7_obj
;
2590 static CONSTEXPR
const float_misc
<UNSPEC_VFREC7
, HAS_FRM
> vfrec7_frm_obj
;
2591 static CONSTEXPR
const binop
<SMIN
> vfmin_obj
;
2592 static CONSTEXPR
const binop
<SMAX
> vfmax_obj
;
2593 static CONSTEXPR
const float_misc
<UNSPEC_VCOPYSIGN
> vfsgnj_obj
;
2594 static CONSTEXPR
const vfsgnjn vfsgnjn_obj
;
2595 static CONSTEXPR
const float_misc
<UNSPEC_VXORSIGN
> vfsgnjx_obj
;
2596 static CONSTEXPR
const unop
<NEG
> vfneg_obj
;
2597 static CONSTEXPR
const unop
<ABS
> vfabs_obj
;
2598 static CONSTEXPR
const fcmp
<EQ
> vmfeq_obj
;
2599 static CONSTEXPR
const fcmp
<NE
> vmfne_obj
;
2600 static CONSTEXPR
const fcmp
<LT
> vmflt_obj
;
2601 static CONSTEXPR
const fcmp
<GT
> vmfgt_obj
;
2602 static CONSTEXPR
const fcmp
<LE
> vmfle_obj
;
2603 static CONSTEXPR
const fcmp
<GE
> vmfge_obj
;
2604 static CONSTEXPR
const vfclass vfclass_obj
;
2605 static CONSTEXPR
const vmerge vfmerge_obj
;
2606 static CONSTEXPR
const vmv_v vfmv_v_obj
;
2607 static CONSTEXPR
const vfcvt_x
<UNSPEC_VFCVT
> vfcvt_x_obj
;
2608 static CONSTEXPR
const vfcvt_x
<UNSPEC_VFCVT
, HAS_FRM
> vfcvt_x_frm_obj
;
2609 static CONSTEXPR
const vfcvt_x
<UNSPEC_UNSIGNED_VFCVT
> vfcvt_xu_obj
;
2610 static CONSTEXPR
const vfcvt_x
<UNSPEC_UNSIGNED_VFCVT
, HAS_FRM
> vfcvt_xu_frm_obj
;
2611 static CONSTEXPR
const vfcvt_rtz_x
<FIX
> vfcvt_rtz_x_obj
;
2612 static CONSTEXPR
const vfcvt_rtz_x
<UNSIGNED_FIX
> vfcvt_rtz_xu_obj
;
2613 static CONSTEXPR
const vfcvt_f
<NO_FRM
> vfcvt_f_obj
;
2614 static CONSTEXPR
const vfcvt_f
<HAS_FRM
> vfcvt_f_frm_obj
;
2615 static CONSTEXPR
const vfwcvt_x
<UNSPEC_VFCVT
> vfwcvt_x_obj
;
2616 static CONSTEXPR
const vfwcvt_x
<UNSPEC_VFCVT
, HAS_FRM
> vfwcvt_x_frm_obj
;
2617 static CONSTEXPR
const vfwcvt_x
<UNSPEC_UNSIGNED_VFCVT
> vfwcvt_xu_obj
;
2618 static CONSTEXPR
const vfwcvt_x
<UNSPEC_UNSIGNED_VFCVT
, HAS_FRM
> vfwcvt_xu_frm_obj
;
2619 static CONSTEXPR
const vfwcvt_rtz_x
<FIX
> vfwcvt_rtz_x_obj
;
2620 static CONSTEXPR
const vfwcvt_rtz_x
<UNSIGNED_FIX
> vfwcvt_rtz_xu_obj
;
2621 static CONSTEXPR
const vfwcvt_f vfwcvt_f_obj
;
2622 static CONSTEXPR
const vfncvt_x
<UNSPEC_VFCVT
> vfncvt_x_obj
;
2623 static CONSTEXPR
const vfncvt_x
<UNSPEC_VFCVT
, HAS_FRM
> vfncvt_x_frm_obj
;
2624 static CONSTEXPR
const vfncvt_x
<UNSPEC_UNSIGNED_VFCVT
> vfncvt_xu_obj
;
2625 static CONSTEXPR
const vfncvt_x
<UNSPEC_UNSIGNED_VFCVT
, HAS_FRM
> vfncvt_xu_frm_obj
;
2626 static CONSTEXPR
const vfncvt_rtz_x
<FIX
> vfncvt_rtz_x_obj
;
2627 static CONSTEXPR
const vfncvt_rtz_x
<UNSIGNED_FIX
> vfncvt_rtz_xu_obj
;
2628 static CONSTEXPR
const vfncvt_f
<NO_FRM
> vfncvt_f_obj
;
2629 static CONSTEXPR
const vfncvt_f
<HAS_FRM
> vfncvt_f_frm_obj
;
2630 static CONSTEXPR
const vfncvt_rod_f vfncvt_rod_f_obj
;
2631 static CONSTEXPR
const reducop
<UNSPEC_REDUC_SUM
> vredsum_obj
;
2632 static CONSTEXPR
const reducop
<UNSPEC_REDUC_MAXU
> vredmaxu_obj
;
2633 static CONSTEXPR
const reducop
<UNSPEC_REDUC_MAX
> vredmax_obj
;
2634 static CONSTEXPR
const reducop
<UNSPEC_REDUC_MINU
> vredminu_obj
;
2635 static CONSTEXPR
const reducop
<UNSPEC_REDUC_MIN
> vredmin_obj
;
2636 static CONSTEXPR
const reducop
<UNSPEC_REDUC_AND
> vredand_obj
;
2637 static CONSTEXPR
const reducop
<UNSPEC_REDUC_OR
> vredor_obj
;
2638 static CONSTEXPR
const reducop
<UNSPEC_REDUC_XOR
> vredxor_obj
;
2639 static CONSTEXPR
const reducop
<UNSPEC_WREDUC_SUM
> vwredsum_obj
;
2640 static CONSTEXPR
const reducop
<UNSPEC_WREDUC_SUMU
> vwredsumu_obj
;
2641 static CONSTEXPR
const freducop
<UNSPEC_REDUC_SUM_UNORDERED
> vfredusum_obj
;
2642 static CONSTEXPR
const freducop
<UNSPEC_REDUC_SUM_UNORDERED
, HAS_FRM
> vfredusum_frm_obj
;
2643 static CONSTEXPR
const freducop
<UNSPEC_REDUC_SUM_ORDERED
> vfredosum_obj
;
2644 static CONSTEXPR
const freducop
<UNSPEC_REDUC_SUM_ORDERED
, HAS_FRM
> vfredosum_frm_obj
;
2645 static CONSTEXPR
const reducop
<UNSPEC_REDUC_MAX
> vfredmax_obj
;
2646 static CONSTEXPR
const reducop
<UNSPEC_REDUC_MIN
> vfredmin_obj
;
2647 static CONSTEXPR
const freducop
<UNSPEC_WREDUC_SUM_UNORDERED
> vfwredusum_obj
;
2648 static CONSTEXPR
const freducop
<UNSPEC_WREDUC_SUM_UNORDERED
, HAS_FRM
> vfwredusum_frm_obj
;
2649 static CONSTEXPR
const freducop
<UNSPEC_WREDUC_SUM_ORDERED
> vfwredosum_obj
;
2650 static CONSTEXPR
const freducop
<UNSPEC_WREDUC_SUM_ORDERED
, HAS_FRM
> vfwredosum_frm_obj
;
2651 static CONSTEXPR
const vmv vmv_x_obj
;
2652 static CONSTEXPR
const vmv_s vmv_s_obj
;
2653 static CONSTEXPR
const vmv vfmv_f_obj
;
2654 static CONSTEXPR
const vmv_s vfmv_s_obj
;
2655 static CONSTEXPR
const slideop
<UNSPEC_VSLIDEUP
> vslideup_obj
;
2656 static CONSTEXPR
const slideop
<UNSPEC_VSLIDEDOWN
> vslidedown_obj
;
2657 static CONSTEXPR
const slideop
<UNSPEC_VSLIDE1UP
> vslide1up_obj
;
2658 static CONSTEXPR
const slideop
<UNSPEC_VSLIDE1DOWN
> vslide1down_obj
;
2659 static CONSTEXPR
const slideop
<UNSPEC_VFSLIDE1UP
> vfslide1up_obj
;
2660 static CONSTEXPR
const slideop
<UNSPEC_VFSLIDE1DOWN
> vfslide1down_obj
;
2661 static CONSTEXPR
const vrgather vrgather_obj
;
2662 static CONSTEXPR
const vrgatherei16 vrgatherei16_obj
;
2663 static CONSTEXPR
const vcompress vcompress_obj
;
2664 static CONSTEXPR
const vundefined vundefined_obj
;
2665 static CONSTEXPR
const vreinterpret vreinterpret_obj
;
2666 static CONSTEXPR
const vlmul_ext vlmul_ext_obj
;
2667 static CONSTEXPR
const vlmul_trunc vlmul_trunc_obj
;
2668 static CONSTEXPR
const vset vset_obj
;
2669 static CONSTEXPR
const vget vget_obj
;
2670 static CONSTEXPR
const vcreate vcreate_obj
;
2671 static CONSTEXPR
const read_vl read_vl_obj
;
2672 static CONSTEXPR
const vleff vleff_obj
;
2673 static CONSTEXPR
const vlenb vlenb_obj
;
2674 static CONSTEXPR
const vlseg vlseg_obj
;
2675 static CONSTEXPR
const vsseg vsseg_obj
;
2676 static CONSTEXPR
const vlsseg vlsseg_obj
;
2677 static CONSTEXPR
const vssseg vssseg_obj
;
2678 static CONSTEXPR
const seg_indexed_load
<UNSPEC_UNORDERED
> vluxseg_obj
;
2679 static CONSTEXPR
const seg_indexed_load
<UNSPEC_ORDERED
> vloxseg_obj
;
2680 static CONSTEXPR
const seg_indexed_store
<UNSPEC_UNORDERED
> vsuxseg_obj
;
2681 static CONSTEXPR
const seg_indexed_store
<UNSPEC_ORDERED
> vsoxseg_obj
;
2682 static CONSTEXPR
const vlsegff vlsegff_obj
;
2683 static CONSTEXPR
const th_loadstore_width
<false, LST_UNIT_STRIDE
, UNSPEC_TH_VLB
> vlb_obj
;
2684 static CONSTEXPR
const th_loadstore_width
<false, LST_UNIT_STRIDE
, UNSPEC_TH_VLBU
> vlbu_obj
;
2685 static CONSTEXPR
const th_loadstore_width
<false, LST_UNIT_STRIDE
, UNSPEC_TH_VLH
> vlh_obj
;
2686 static CONSTEXPR
const th_loadstore_width
<false, LST_UNIT_STRIDE
, UNSPEC_TH_VLHU
> vlhu_obj
;
2687 static CONSTEXPR
const th_loadstore_width
<false, LST_UNIT_STRIDE
, UNSPEC_TH_VLW
> vlw_obj
;
2688 static CONSTEXPR
const th_loadstore_width
<false, LST_UNIT_STRIDE
, UNSPEC_TH_VLWU
> vlwu_obj
;
2689 static CONSTEXPR
const th_loadstore_width
<true, LST_UNIT_STRIDE
, UNSPEC_TH_VLB
> vsb_obj
;
2690 static CONSTEXPR
const th_loadstore_width
<true, LST_UNIT_STRIDE
, UNSPEC_TH_VLH
> vsh_obj
;
2691 static CONSTEXPR
const th_loadstore_width
<true, LST_UNIT_STRIDE
, UNSPEC_TH_VLW
> vsw_obj
;
2692 static CONSTEXPR
const th_loadstore_width
<false, LST_STRIDED
, UNSPEC_TH_VLSB
> vlsb_obj
;
2693 static CONSTEXPR
const th_loadstore_width
<false, LST_STRIDED
, UNSPEC_TH_VLSBU
> vlsbu_obj
;
2694 static CONSTEXPR
const th_loadstore_width
<false, LST_STRIDED
, UNSPEC_TH_VLSH
> vlsh_obj
;
2695 static CONSTEXPR
const th_loadstore_width
<false, LST_STRIDED
, UNSPEC_TH_VLSHU
> vlshu_obj
;
2696 static CONSTEXPR
const th_loadstore_width
<false, LST_STRIDED
, UNSPEC_TH_VLSW
> vlsw_obj
;
2697 static CONSTEXPR
const th_loadstore_width
<false, LST_STRIDED
, UNSPEC_TH_VLSWU
> vlswu_obj
;
2698 static CONSTEXPR
const th_loadstore_width
<true, LST_STRIDED
, UNSPEC_TH_VLSB
> vssb_obj
;
2699 static CONSTEXPR
const th_loadstore_width
<true, LST_STRIDED
, UNSPEC_TH_VLSH
> vssh_obj
;
2700 static CONSTEXPR
const th_loadstore_width
<true, LST_STRIDED
, UNSPEC_TH_VLSW
> vssw_obj
;
2701 static CONSTEXPR
const th_loadstore_width
<false, LST_INDEXED
, UNSPEC_TH_VLXB
> vlxb_obj
;
2702 static CONSTEXPR
const th_loadstore_width
<false, LST_INDEXED
, UNSPEC_TH_VLXBU
> vlxbu_obj
;
2703 static CONSTEXPR
const th_loadstore_width
<false, LST_INDEXED
, UNSPEC_TH_VLXH
> vlxh_obj
;
2704 static CONSTEXPR
const th_loadstore_width
<false, LST_INDEXED
, UNSPEC_TH_VLXHU
> vlxhu_obj
;
2705 static CONSTEXPR
const th_loadstore_width
<false, LST_INDEXED
, UNSPEC_TH_VLXW
> vlxw_obj
;
2706 static CONSTEXPR
const th_loadstore_width
<false, LST_INDEXED
, UNSPEC_TH_VLXWU
> vlxwu_obj
;
2707 static CONSTEXPR
const th_loadstore_width
<true, LST_INDEXED
, UNSPEC_TH_VLXB
> vsxb_obj
;
2708 static CONSTEXPR
const th_loadstore_width
<true, LST_INDEXED
, UNSPEC_TH_VLXH
> vsxh_obj
;
2709 static CONSTEXPR
const th_loadstore_width
<true, LST_INDEXED
, UNSPEC_TH_VLXW
> vsxw_obj
;
2710 static CONSTEXPR
const th_loadstore_width
<true, LST_INDEXED
, UNSPEC_TH_VSUXB
> vsuxb_obj
;
2711 static CONSTEXPR
const th_loadstore_width
<true, LST_INDEXED
, UNSPEC_TH_VSUXH
> vsuxh_obj
;
2712 static CONSTEXPR
const th_loadstore_width
<true, LST_INDEXED
, UNSPEC_TH_VSUXW
> vsuxw_obj
;
2713 static CONSTEXPR
const th_extract vext_x_v_obj
;
2716 static CONSTEXPR
const vandn vandn_obj
;
2717 static CONSTEXPR
const bitmanip
<ROTATE
> vrol_obj
;
2718 static CONSTEXPR
const bitmanip
<ROTATERT
> vror_obj
;
2719 static CONSTEXPR
const b_reverse
<UNSPEC_VBREV
> vbrev_obj
;
2720 static CONSTEXPR
const b_reverse
<UNSPEC_VBREV8
> vbrev8_obj
;
2721 static CONSTEXPR
const b_reverse
<UNSPEC_VREV8
> vrev8_obj
;
2722 static CONSTEXPR
const bitmanip
<CLZ
> vclz_obj
;
2723 static CONSTEXPR
const bitmanip
<CTZ
> vctz_obj
;
2724 static CONSTEXPR
const vwsll vwsll_obj
;
2725 static CONSTEXPR
const clmul
<UNSPEC_VCLMUL
> vclmul_obj
;
2726 static CONSTEXPR
const clmul
<UNSPEC_VCLMULH
> vclmulh_obj
;
2727 static CONSTEXPR
const vg_nhab
<UNSPEC_VGHSH
> vghsh_obj
;
2728 static CONSTEXPR
const crypto_vv
<UNSPEC_VGMUL
> vgmul_obj
;
2729 static CONSTEXPR
const crypto_vv
<UNSPEC_VAESEF
> vaesef_obj
;
2730 static CONSTEXPR
const crypto_vv
<UNSPEC_VAESEM
> vaesem_obj
;
2731 static CONSTEXPR
const crypto_vv
<UNSPEC_VAESDF
> vaesdf_obj
;
2732 static CONSTEXPR
const crypto_vv
<UNSPEC_VAESDM
> vaesdm_obj
;
2733 static CONSTEXPR
const crypto_vv
<UNSPEC_VAESZ
> vaesz_obj
;
2734 static CONSTEXPR
const crypto_vi
<UNSPEC_VAESKF1
> vaeskf1_obj
;
2735 static CONSTEXPR
const vaeskf2_vsm3c
<UNSPEC_VAESKF2
> vaeskf2_obj
;
2736 static CONSTEXPR
const vg_nhab
<UNSPEC_VSHA2MS
> vsha2ms_obj
;
2737 static CONSTEXPR
const vg_nhab
<UNSPEC_VSHA2CH
> vsha2ch_obj
;
2738 static CONSTEXPR
const vg_nhab
<UNSPEC_VSHA2CL
> vsha2cl_obj
;
2739 static CONSTEXPR
const crypto_vi
<UNSPEC_VSM4K
> vsm4k_obj
;
2740 static CONSTEXPR
const crypto_vv
<UNSPEC_VSM4R
> vsm4r_obj
;
2741 static CONSTEXPR
const vsm3me vsm3me_obj
;
2742 static CONSTEXPR
const vaeskf2_vsm3c
<UNSPEC_VSM3C
> vsm3c_obj
;
2744 /* Declare the function base NAME, pointing it to an instance
2745 of class <NAME>_obj. */
2746 #define BASE(NAME) \
2747 namespace bases { const function_base *const NAME = &NAME##_obj; }
2940 BASE (vfwcvt_xu_frm
)
2942 BASE (vfwcvt_rtz_xu
)
2947 BASE (vfncvt_xu_frm
)
2949 BASE (vfncvt_rtz_xu
)
2964 BASE (vfredusum_frm
)
2966 BASE (vfredosum_frm
)
2970 BASE (vfwredosum_frm
)
2972 BASE (vfwredusum_frm
)
3064 } // end namespace riscv_vector