1 /* function_base implementation for RISC-V 'V' Extension for GNU compiler.
2 Copyright (C) 2022-2024 Free Software Foundation, Inc.
3 Contributed by Ju-Zhe Zhong (juzhe.zhong@rivai.ai), RiVAI Technologies Ltd.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
29 #include "insn-codes.h"
33 #include "basic-block.h"
35 #include "fold-const.h"
37 #include "gimple-iterator.h"
41 #include "tree-vector-builder.h"
42 #include "rtx-vector-builder.h"
43 #include "riscv-vector-builtins.h"
44 #include "riscv-vector-builtins-shapes.h"
45 #include "riscv-vector-builtins-bases.h"
47 using namespace riscv_vector
;
49 namespace riscv_vector
{
51 /* Enumerates types of loads/stores operations.
52 It's only used in here so we don't define it
53 in riscv-vector-builtins-bases.h. */
67 /* Helper function to fold vleff and vlsegff. */
69 fold_fault_load (gimple_folder
&f
)
71 /* fold fault_load (const *base, size_t *new_vl, size_t vl)
73 ====> fault_load (const *base, size_t vl)
74 new_vl = MEM_REF[read_vl ()]. */
76 auto_vec
<tree
> vargs (gimple_call_num_args (f
.call
) - 1);
78 for (unsigned i
= 0; i
< gimple_call_num_args (f
.call
); i
++)
80 /* Exclude size_t *new_vl argument. */
81 if (i
== gimple_call_num_args (f
.call
) - 2)
84 vargs
.quick_push (gimple_call_arg (f
.call
, i
));
87 gimple
*repl
= gimple_build_call_vec (gimple_call_fn (f
.call
), vargs
);
88 gimple_call_set_lhs (repl
, f
.lhs
);
90 /* Handle size_t *new_vl by read_vl. */
91 tree new_vl
= gimple_call_arg (f
.call
, gimple_call_num_args (f
.call
) - 2);
92 if (integer_zerop (new_vl
))
94 /* This case happens when user passes the nullptr to new_vl argument.
95 In this case, we just need to ignore the new_vl argument and return
96 fault_load instruction directly. */
100 tree tmp_var
= create_tmp_var (size_type_node
, "new_vl");
101 tree decl
= get_read_vl_decl ();
102 gimple
*g
= gimple_build_call (decl
, 0);
103 gimple_call_set_lhs (g
, tmp_var
);
105 = fold_build2 (MEM_REF
, size_type_node
,
106 gimple_call_arg (f
.call
, gimple_call_num_args (f
.call
) - 2),
107 build_int_cst (build_pointer_type (size_type_node
), 0));
108 gassign
*assign
= gimple_build_assign (indirect
, tmp_var
);
110 gsi_insert_after (f
.gsi
, assign
, GSI_SAME_STMT
);
111 gsi_insert_after (f
.gsi
, g
, GSI_SAME_STMT
);
115 /* Implements vsetvl<mode> && vsetvlmax<mode>. */
116 template<bool VLMAX_P
>
117 class vsetvl
: public function_base
120 bool apply_vl_p () const override
125 rtx
expand (function_expander
&e
) const override
128 e
.add_input_operand (Pmode
, gen_rtx_REG (Pmode
, 0));
130 e
.add_input_operand (0);
132 tree type
= builtin_types
[e
.type
.index
].vector
;
133 machine_mode mode
= TYPE_MODE (type
);
135 if (TARGET_XTHEADVECTOR
)
137 machine_mode inner_mode
= GET_MODE_INNER (mode
);
139 e
.add_input_operand (Pmode
,
140 gen_int_mode (GET_MODE_BITSIZE (inner_mode
), Pmode
));
142 e
.add_input_operand (Pmode
,
143 gen_int_mode (get_vlmul (mode
), Pmode
));
147 /* Normalize same RATO (SEW/LMUL) into same vsetvl instruction.
149 - e8,mf8/e16,mf4/e32,mf2/e64,m1 --> e8mf8
150 - e8,mf4/e16,mf2/e32,m1/e64,m2 --> e8mf4
151 - e8,mf2/e16,m1/e32,m2/e64,m4 --> e8mf2
152 - e8,m1/e16,m2/e32,m4/e64,m8 --> e8m1
153 - e8,m2/e16,m4/e32,m8 --> e8m2
154 - e8,m4/e16,m8 --> e8m4
158 e
.add_input_operand (Pmode
, gen_int_mode (8, Pmode
));
162 = get_vector_mode (QImode
, GET_MODE_NUNITS (mode
)).require ();
163 e
.add_input_operand (Pmode
, gen_int_mode (get_vlmul (e8_mode
), Pmode
));
167 e
.add_input_operand (Pmode
,
168 gen_int_mode (get_prefer_tail_policy (), Pmode
));
171 e
.add_input_operand (Pmode
,
172 gen_int_mode (get_prefer_mask_policy (), Pmode
));
173 return e
.generate_insn (code_for_vsetvl_no_side_effects (Pmode
));
178 * vle.v/vse.v/vlm.v/vsm.v/vlse.v/vsse.v/vluxei.v/vloxei.v/vsuxei.v/vsoxei.v
180 template<bool STORE_P
, lst_type LST_TYPE
, bool ORDERED_P
>
181 class loadstore
: public function_base
184 bool apply_tail_policy_p () const override
{ return !STORE_P
; }
185 bool apply_mask_policy_p () const override
{ return !STORE_P
; }
187 unsigned int call_properties (const function_instance
&) const override
190 return CP_WRITE_MEMORY
;
192 return CP_READ_MEMORY
;
195 bool can_be_overloaded_p (enum predication_type_index pred
) const override
197 if (STORE_P
|| LST_TYPE
== LST_INDEXED
)
199 return pred
!= PRED_TYPE_none
;
202 rtx
expand (function_expander
&e
) const override
204 if (LST_TYPE
== LST_INDEXED
)
206 int unspec
= ORDERED_P
? UNSPEC_ORDERED
: UNSPEC_UNORDERED
;
208 return e
.use_exact_insn (
209 code_for_pred_indexed_store (unspec
, e
.vector_mode (),
213 unsigned src_eew_bitsize
214 = GET_MODE_BITSIZE (GET_MODE_INNER (e
.index_mode ()));
215 unsigned dst_eew_bitsize
216 = GET_MODE_BITSIZE (GET_MODE_INNER (e
.vector_mode ()));
217 if (dst_eew_bitsize
== src_eew_bitsize
)
218 return e
.use_exact_insn (
219 code_for_pred_indexed_load_same_eew (unspec
, e
.vector_mode ()));
220 else if (dst_eew_bitsize
> src_eew_bitsize
)
222 unsigned factor
= dst_eew_bitsize
/ src_eew_bitsize
;
226 return e
.use_exact_insn (
227 code_for_pred_indexed_load_x2_greater_eew (
228 unspec
, e
.vector_mode ()));
230 return e
.use_exact_insn (
231 code_for_pred_indexed_load_x4_greater_eew (
232 unspec
, e
.vector_mode ()));
234 return e
.use_exact_insn (
235 code_for_pred_indexed_load_x8_greater_eew (
236 unspec
, e
.vector_mode ()));
243 unsigned factor
= src_eew_bitsize
/ dst_eew_bitsize
;
247 return e
.use_exact_insn (
248 code_for_pred_indexed_load_x2_smaller_eew (
249 unspec
, e
.vector_mode ()));
251 return e
.use_exact_insn (
252 code_for_pred_indexed_load_x4_smaller_eew (
253 unspec
, e
.vector_mode ()));
255 return e
.use_exact_insn (
256 code_for_pred_indexed_load_x8_smaller_eew (
257 unspec
, e
.vector_mode ()));
264 else if (LST_TYPE
== LST_STRIDED
)
267 return e
.use_contiguous_store_insn (
268 code_for_pred_strided_store (e
.vector_mode ()));
270 return e
.use_contiguous_load_insn (
271 code_for_pred_strided_load (e
.vector_mode ()));
276 return e
.use_contiguous_store_insn (
277 code_for_pred_store (e
.vector_mode ()));
279 return e
.use_contiguous_load_insn (
280 code_for_pred_mov (e
.vector_mode ()));
286 vadd/vsub/vand/vor/vxor/vsll/vsra/vsrl/
287 vmin/vmax/vminu/vmaxu/vdiv/vrem/vdivu/
288 vremu/vsadd/vsaddu/vssub/vssubu
291 template <rtx_code CODE
, bool MAY_REQUIRE_FRM
= false,
292 enum frm_op_type FRM_OP
= NO_FRM
>
293 class binop
: public function_base
296 bool has_rounding_mode_operand_p () const override
298 return FRM_OP
== HAS_FRM
;
301 bool may_require_frm_p () const override
{ return MAY_REQUIRE_FRM
; }
303 rtx
expand (function_expander
&e
) const override
305 switch (e
.op_info
->op
)
308 gcc_assert (FRM_OP
== NO_FRM
);
310 return e
.use_exact_insn (code_for_pred_scalar (CODE
, e
.vector_mode ()));
312 return e
.use_exact_insn (code_for_pred (CODE
, e
.vector_mode ()));
319 /* Implements vrsub. */
320 class vrsub
: public function_base
323 rtx
expand (function_expander
&e
) const override
325 return e
.use_exact_insn (
326 code_for_pred_sub_reverse_scalar (e
.vector_mode ()));
330 /* Implements vneg/vnot. */
331 template<rtx_code CODE
, enum frm_op_type FRM_OP
= NO_FRM
>
332 class unop
: public function_base
335 bool has_rounding_mode_operand_p () const override
337 return FRM_OP
== HAS_FRM
;
340 bool may_require_frm_p () const override
{ return true; }
342 rtx
expand (function_expander
&e
) const override
344 return e
.use_exact_insn (code_for_pred (CODE
, e
.vector_mode ()));
348 /* Implements vsext.vf2/vsext.vf4/vsext.vf8/vzext.vf2/vzext.vf4/vzext.vf8. */
349 template<rtx_code CODE
>
350 class ext
: public function_base
353 rtx
expand (function_expander
&e
) const override
355 switch (e
.op_info
->op
)
358 return e
.use_exact_insn (code_for_pred_vf2 (CODE
, e
.vector_mode ()));
360 return e
.use_exact_insn (code_for_pred_vf4 (CODE
, e
.vector_mode ()));
362 return e
.use_exact_insn (code_for_pred_vf8 (CODE
, e
.vector_mode ()));
369 /* Implements vmulh/vmulhu/vmulhsu. */
371 class vmulh
: public function_base
374 rtx
expand (function_expander
&e
) const override
376 switch (e
.op_info
->op
)
379 return e
.use_exact_insn (
380 code_for_pred_mulh_scalar (UNSPEC
, e
.vector_mode ()));
382 return e
.use_exact_insn (
383 code_for_pred_mulh (UNSPEC
, e
.vector_mode ()));
390 /* Implements vwadd/vwsub/vwmul. */
391 template<rtx_code CODE1
, rtx_code CODE2
= FLOAT_EXTEND
>
392 class widen_binop
: public function_base
395 rtx
expand (function_expander
&e
) const override
397 switch (e
.op_info
->op
)
400 return e
.use_exact_insn (
401 code_for_pred_dual_widen (CODE1
, CODE2
, e
.vector_mode ()));
403 return e
.use_exact_insn (
404 code_for_pred_dual_widen_scalar (CODE1
, CODE2
, e
.vector_mode ()));
407 return e
.use_exact_insn (
408 code_for_pred_single_widen_add (CODE2
, e
.vector_mode ()));
410 return e
.use_exact_insn (
411 code_for_pred_single_widen_sub (CODE2
, e
.vector_mode ()));
413 return e
.use_exact_insn (
414 code_for_pred_single_widen_scalar (CODE1
, CODE2
, e
.vector_mode ()));
421 /* Implement vfwadd/vfwsub/vfwmul. */
422 template<rtx_code CODE
, enum frm_op_type FRM_OP
= NO_FRM
>
423 class widen_binop_fp
: public function_base
426 bool has_rounding_mode_operand_p () const override
428 return FRM_OP
== HAS_FRM
;
431 bool may_require_frm_p () const override
{ return true; }
433 rtx
expand (function_expander
&e
) const override
435 switch (e
.op_info
->op
)
438 return e
.use_exact_insn (
439 code_for_pred_dual_widen (CODE
, e
.vector_mode ()));
441 return e
.use_exact_insn (
442 code_for_pred_dual_widen_scalar (CODE
, e
.vector_mode ()));
445 return e
.use_exact_insn (
446 code_for_pred_single_widen_add (e
.vector_mode ()));
448 return e
.use_exact_insn (
449 code_for_pred_single_widen_sub (e
.vector_mode ()));
451 return e
.use_exact_insn (
452 code_for_pred_single_widen_scalar (CODE
, e
.vector_mode ()));
459 /* Implements vwmulsu. */
460 class vwmulsu
: public function_base
463 rtx
expand (function_expander
&e
) const override
465 switch (e
.op_info
->op
)
468 return e
.use_exact_insn (code_for_pred_widen_mulsu (e
.vector_mode ()));
470 return e
.use_exact_insn (
471 code_for_pred_widen_mulsu_scalar (e
.vector_mode ()));
478 /* Implements vwcvt. */
479 template<rtx_code CODE
>
480 class vwcvt
: public function_base
483 rtx
expand (function_expander
&e
) const override
485 return e
.use_exact_insn (code_for_pred (CODE
, e
.vector_mode ()));
489 /* Implements vadc. */
490 class vadc
: public function_base
493 bool apply_mask_policy_p () const override
{ return false; }
494 bool use_mask_predication_p () const override
{ return false; }
496 rtx
expand (function_expander
&e
) const override
498 switch (e
.op_info
->op
)
501 return e
.use_exact_insn (code_for_pred_adc (e
.vector_mode ()));
503 return e
.use_exact_insn (code_for_pred_adc_scalar (e
.vector_mode ()));
510 /* Implements vsbc. */
511 class vsbc
: public function_base
514 bool apply_mask_policy_p () const override
{ return false; }
515 bool use_mask_predication_p () const override
{ return false; }
517 rtx
expand (function_expander
&e
) const override
519 switch (e
.op_info
->op
)
522 return e
.use_exact_insn (code_for_pred_sbc (e
.vector_mode ()));
524 return e
.use_exact_insn (code_for_pred_sbc_scalar (e
.vector_mode ()));
531 /* Implements vmadc. */
532 class vmadc
: public function_base
535 bool apply_tail_policy_p () const override
{ return false; }
536 bool apply_mask_policy_p () const override
{ return false; }
537 bool use_mask_predication_p () const override
{ return false; }
538 bool has_merge_operand_p () const override
{ return false; }
540 rtx
expand (function_expander
&e
) const override
542 switch (e
.op_info
->op
)
545 return e
.use_exact_insn (code_for_pred_madc (e
.vector_mode ()));
547 return e
.use_exact_insn (code_for_pred_madc_scalar (e
.vector_mode ()));
549 return e
.use_exact_insn (
550 code_for_pred_madc_overflow (e
.vector_mode ()));
552 return e
.use_exact_insn (
553 code_for_pred_madc_overflow_scalar (e
.vector_mode ()));
560 /* Implements vmsbc. */
561 class vmsbc
: public function_base
564 bool apply_tail_policy_p () const override
{ return false; }
565 bool apply_mask_policy_p () const override
{ return false; }
566 bool use_mask_predication_p () const override
{ return false; }
567 bool has_merge_operand_p () const override
{ return false; }
569 rtx
expand (function_expander
&e
) const override
571 switch (e
.op_info
->op
)
574 return e
.use_exact_insn (code_for_pred_msbc (e
.vector_mode ()));
576 return e
.use_exact_insn (code_for_pred_msbc_scalar (e
.vector_mode ()));
578 return e
.use_exact_insn (
579 code_for_pred_msbc_overflow (e
.vector_mode ()));
581 return e
.use_exact_insn (
582 code_for_pred_msbc_overflow_scalar (e
.vector_mode ()));
589 /* Implements vnsrl/vnsra. */
590 template<rtx_code CODE
>
591 class vnshift
: public function_base
594 rtx
expand (function_expander
&e
) const override
596 switch (e
.op_info
->op
)
599 return e
.use_exact_insn (
600 code_for_pred_narrow_scalar (CODE
, e
.vector_mode ()));
602 return e
.use_exact_insn (code_for_pred_narrow (CODE
, e
.vector_mode ()));
609 /* Implements vncvt. */
610 class vncvt_x
: public function_base
613 rtx
expand (function_expander
&e
) const override
615 return e
.use_exact_insn (code_for_pred_trunc (e
.vector_mode ()));
619 /* Implements vmerge/vfmerge. */
620 class vmerge
: public function_base
623 bool apply_mask_policy_p () const override
{ return false; }
624 bool use_mask_predication_p () const override
{ return false; }
625 rtx
expand (function_expander
&e
) const override
627 switch (e
.op_info
->op
)
630 return e
.use_exact_insn (code_for_pred_merge (e
.vector_mode ()));
633 return e
.use_exact_insn (code_for_pred_merge_scalar (e
.vector_mode ()));
640 /* Implements vmv.v.x/vmv.v.v/vfmv.v.f. */
641 class vmv_v
: public function_base
644 rtx
expand (function_expander
&e
) const override
646 switch (e
.op_info
->op
)
649 return e
.use_exact_insn (code_for_pred_mov (e
.vector_mode ()));
652 return e
.use_exact_insn (code_for_pred_broadcast (e
.vector_mode ()));
659 /* Implements vaadd/vasub/vsmul/vssra/vssrl. */
661 class sat_op
: public function_base
664 bool has_rounding_mode_operand_p () const override
{ return true; }
666 bool may_require_vxrm_p () const override
{ return true; }
668 rtx
expand (function_expander
&e
) const override
670 switch (e
.op_info
->op
)
673 return e
.use_exact_insn (
674 code_for_pred_scalar (UNSPEC
, e
.vector_mode ()));
676 return e
.use_exact_insn (code_for_pred (UNSPEC
, e
.vector_mode ()));
683 /* Implements vnclip/vnclipu. */
685 class vnclip
: public function_base
688 bool has_rounding_mode_operand_p () const override
{ return true; }
690 bool may_require_vxrm_p () const override
{ return true; }
692 rtx
expand (function_expander
&e
) const override
694 switch (e
.op_info
->op
)
697 return e
.use_exact_insn (
698 code_for_pred_narrow_clip_scalar (UNSPEC
, e
.vector_mode ()));
700 return e
.use_exact_insn (
701 code_for_pred_narrow_clip (UNSPEC
, e
.vector_mode ()));
708 /* Implements vmseq/vmsne/vmslt/vmsgt/vmsle/vmsge. */
709 template<rtx_code CODE
>
710 class icmp
: public function_base
713 rtx
expand (function_expander
&e
) const override
715 switch (e
.op_info
->op
)
718 if (CODE
== GE
|| CODE
== GEU
)
719 return e
.use_compare_insn (CODE
, code_for_pred_ge_scalar (
722 return e
.use_compare_insn (CODE
, code_for_pred_cmp_scalar (
726 if (CODE
== LT
|| CODE
== LTU
|| CODE
== GE
|| CODE
== GEU
)
727 return e
.use_compare_insn (CODE
,
728 code_for_pred_ltge (e
.vector_mode ()));
730 return e
.use_compare_insn (CODE
,
731 code_for_pred_cmp (e
.vector_mode ()));
739 /* Implements vmacc/vnmsac/vmadd/vnmsub. */
740 class vmacc
: public function_base
743 bool has_merge_operand_p () const override
{ return false; }
745 rtx
expand (function_expander
&e
) const override
747 if (e
.op_info
->op
== OP_TYPE_vx
)
748 return e
.use_ternop_insn (true, code_for_pred_mul_plus_scalar (
750 if (e
.op_info
->op
== OP_TYPE_vv
)
751 return e
.use_ternop_insn (true,
752 code_for_pred_mul_plus (e
.vector_mode ()));
757 class vnmsac
: public function_base
760 bool has_merge_operand_p () const override
{ return false; }
762 rtx
expand (function_expander
&e
) const override
764 if (e
.op_info
->op
== OP_TYPE_vx
)
765 return e
.use_ternop_insn (true, code_for_pred_minus_mul_scalar (
767 if (e
.op_info
->op
== OP_TYPE_vv
)
768 return e
.use_ternop_insn (true,
769 code_for_pred_minus_mul (e
.vector_mode ()));
774 class vmadd
: public function_base
777 bool has_merge_operand_p () const override
{ return false; }
779 rtx
expand (function_expander
&e
) const override
781 if (e
.op_info
->op
== OP_TYPE_vx
)
782 return e
.use_ternop_insn (false, code_for_pred_mul_plus_scalar (
784 if (e
.op_info
->op
== OP_TYPE_vv
)
785 return e
.use_ternop_insn (false,
786 code_for_pred_mul_plus (e
.vector_mode ()));
791 class vnmsub
: public function_base
794 bool has_merge_operand_p () const override
{ return false; }
796 rtx
expand (function_expander
&e
) const override
798 if (e
.op_info
->op
== OP_TYPE_vx
)
799 return e
.use_ternop_insn (false, code_for_pred_minus_mul_scalar (
801 if (e
.op_info
->op
== OP_TYPE_vv
)
802 return e
.use_ternop_insn (false,
803 code_for_pred_minus_mul (e
.vector_mode ()));
808 /* Implements vwmacc<su><su>. */
809 class vwmacc
: public function_base
812 bool has_merge_operand_p () const override
{ return false; }
814 rtx
expand (function_expander
&e
) const override
816 if (e
.op_info
->op
== OP_TYPE_vx
)
817 return e
.use_widen_ternop_insn (
818 code_for_pred_widen_mul_plus_scalar (SIGN_EXTEND
, e
.vector_mode ()));
819 if (e
.op_info
->op
== OP_TYPE_vv
)
820 return e
.use_widen_ternop_insn (
821 code_for_pred_widen_mul_plus (SIGN_EXTEND
, e
.vector_mode ()));
826 class vwmaccu
: public function_base
829 bool has_merge_operand_p () const override
{ return false; }
831 rtx
expand (function_expander
&e
) const override
833 if (e
.op_info
->op
== OP_TYPE_vx
)
834 return e
.use_widen_ternop_insn (
835 code_for_pred_widen_mul_plus_scalar (ZERO_EXTEND
, e
.vector_mode ()));
836 if (e
.op_info
->op
== OP_TYPE_vv
)
837 return e
.use_widen_ternop_insn (
838 code_for_pred_widen_mul_plus (ZERO_EXTEND
, e
.vector_mode ()));
843 class vwmaccsu
: public function_base
846 bool has_merge_operand_p () const override
{ return false; }
848 rtx
expand (function_expander
&e
) const override
850 if (e
.op_info
->op
== OP_TYPE_vx
)
851 return e
.use_widen_ternop_insn (
852 code_for_pred_widen_mul_plussu_scalar (e
.vector_mode ()));
853 if (e
.op_info
->op
== OP_TYPE_vv
)
854 return e
.use_widen_ternop_insn (
855 code_for_pred_widen_mul_plussu (e
.vector_mode ()));
860 class vwmaccus
: public function_base
863 bool has_merge_operand_p () const override
{ return false; }
865 rtx
expand (function_expander
&e
) const override
867 return e
.use_widen_ternop_insn (
868 code_for_pred_widen_mul_plusus_scalar (e
.vector_mode ()));
872 /* Implements vmand/vmnand/vmandn/vmxor/vmor/vmnor/vmorn/vmxnor */
873 template<rtx_code CODE
>
874 class mask_logic
: public function_base
877 bool apply_tail_policy_p () const override
{ return false; }
878 bool apply_mask_policy_p () const override
{ return false; }
880 rtx
expand (function_expander
&e
) const override
882 return e
.use_exact_insn (code_for_pred (CODE
, e
.vector_mode ()));
885 template<rtx_code CODE
>
886 class mask_nlogic
: public function_base
889 bool apply_tail_policy_p () const override
{ return false; }
890 bool apply_mask_policy_p () const override
{ return false; }
892 rtx
expand (function_expander
&e
) const override
894 return e
.use_exact_insn (code_for_pred_n (CODE
, e
.vector_mode ()));
897 template<rtx_code CODE
>
898 class mask_notlogic
: public function_base
901 bool apply_tail_policy_p () const override
{ return false; }
902 bool apply_mask_policy_p () const override
{ return false; }
904 rtx
expand (function_expander
&e
) const override
906 return e
.use_exact_insn (code_for_pred_not (CODE
, e
.vector_mode ()));
910 /* Implements vmmv. */
911 class vmmv
: public function_base
914 bool apply_tail_policy_p () const override
{ return false; }
915 bool apply_mask_policy_p () const override
{ return false; }
917 rtx
expand (function_expander
&e
) const override
919 return e
.use_exact_insn (code_for_pred_mov (e
.vector_mode ()));
923 /* Implements vmclr. */
924 class vmclr
: public function_base
927 bool can_be_overloaded_p (enum predication_type_index
) const override
932 rtx
expand (function_expander
&e
) const override
934 machine_mode mode
= TYPE_MODE (TREE_TYPE (e
.exp
));
935 e
.add_all_one_mask_operand (mode
);
936 e
.add_vundef_operand (mode
);
937 e
.add_input_operand (mode
, CONST0_RTX (mode
));
938 e
.add_input_operand (call_expr_nargs (e
.exp
) - 1);
939 e
.add_input_operand (Pmode
, get_avl_type_rtx (avl_type::NONVLMAX
));
940 return e
.generate_insn (code_for_pred_mov (e
.vector_mode ()));
944 /* Implements vmset. */
945 class vmset
: public function_base
948 bool can_be_overloaded_p (enum predication_type_index
) const override
953 rtx
expand (function_expander
&e
) const override
955 machine_mode mode
= TYPE_MODE (TREE_TYPE (e
.exp
));
956 e
.add_all_one_mask_operand (mode
);
957 e
.add_vundef_operand (mode
);
958 e
.add_input_operand (mode
, CONSTM1_RTX (mode
));
959 e
.add_input_operand (call_expr_nargs (e
.exp
) - 1);
960 e
.add_input_operand (Pmode
, get_avl_type_rtx (avl_type::NONVLMAX
));
961 return e
.generate_insn (code_for_pred_mov (e
.vector_mode ()));
965 /* Implements vmnot. */
966 class vmnot
: public function_base
969 bool apply_tail_policy_p () const override
{ return false; }
970 bool apply_mask_policy_p () const override
{ return false; }
972 rtx
expand (function_expander
&e
) const override
974 return e
.use_exact_insn (code_for_pred_not (e
.vector_mode ()));
978 /* Implements vcpop. */
979 class vcpop
: public function_base
982 bool apply_tail_policy_p () const override
{ return false; }
983 bool apply_mask_policy_p () const override
{ return false; }
984 bool has_merge_operand_p () const override
{ return false; }
986 rtx
expand (function_expander
&e
) const override
988 return e
.use_exact_insn (code_for_pred_popcount (e
.vector_mode (), Pmode
));
992 /* Implements vfirst. */
993 class vfirst
: public function_base
996 bool apply_tail_policy_p () const override
{ return false; }
997 bool apply_mask_policy_p () const override
{ return false; }
998 bool has_merge_operand_p () const override
{ return false; }
1000 rtx
expand (function_expander
&e
) const override
1002 return e
.use_exact_insn (code_for_pred_ffs (e
.vector_mode (), Pmode
));
1006 /* Implements vmsbf/vmsif/vmsof. */
1007 template<int UNSPEC
>
1008 class mask_misc
: public function_base
1011 bool apply_tail_policy_p () const override
{ return false; }
1013 rtx
expand (function_expander
&e
) const override
1015 return e
.use_exact_insn (code_for_pred (UNSPEC
, e
.vector_mode ()));
1019 /* Implements viota. */
1020 class viota
: public function_base
1023 bool can_be_overloaded_p (enum predication_type_index pred
) const override
1025 return pred
== PRED_TYPE_tu
|| pred
== PRED_TYPE_tum
1026 || pred
== PRED_TYPE_tumu
|| pred
== PRED_TYPE_mu
;
1029 rtx
expand (function_expander
&e
) const override
1031 return e
.use_exact_insn (code_for_pred_iota (e
.vector_mode ()));
1035 /* Implements vid. */
1036 class vid
: public function_base
1039 bool can_be_overloaded_p (enum predication_type_index pred
) const override
1041 return pred
== PRED_TYPE_tu
|| pred
== PRED_TYPE_tum
1042 || pred
== PRED_TYPE_tumu
|| pred
== PRED_TYPE_mu
;
1045 rtx
expand (function_expander
&e
) const override
1047 return e
.use_exact_insn (code_for_pred_series (e
.vector_mode ()));
1051 /* Implements vfrsub/vfrdiv. */
1052 template<rtx_code CODE
, enum frm_op_type FRM_OP
= NO_FRM
>
1053 class reverse_binop
: public function_base
1056 bool has_rounding_mode_operand_p () const override
1058 return FRM_OP
== HAS_FRM
;
1061 bool may_require_frm_p () const override
{ return true; }
1063 rtx
expand (function_expander
&e
) const override
1065 return e
.use_exact_insn (
1066 code_for_pred_reverse_scalar (CODE
, e
.vector_mode ()));
1070 template<enum frm_op_type FRM_OP
= NO_FRM
>
1071 class vfmacc
: public function_base
1074 bool has_rounding_mode_operand_p () const override
1076 return FRM_OP
== HAS_FRM
;
1079 bool may_require_frm_p () const override
{ return true; }
1081 bool has_merge_operand_p () const override
{ return false; }
1083 rtx
expand (function_expander
&e
) const override
1085 if (e
.op_info
->op
== OP_TYPE_vf
)
1086 return e
.use_ternop_insn (true,
1087 code_for_pred_mul_scalar (PLUS
,
1089 if (e
.op_info
->op
== OP_TYPE_vv
)
1090 return e
.use_ternop_insn (true,
1091 code_for_pred_mul (PLUS
, e
.vector_mode ()));
1096 template<enum frm_op_type FRM_OP
= NO_FRM
>
1097 class vfnmsac
: public function_base
1100 bool has_rounding_mode_operand_p () const override
1102 return FRM_OP
== HAS_FRM
;
1105 bool may_require_frm_p () const override
{ return true; }
1107 bool has_merge_operand_p () const override
{ return false; }
1109 rtx
expand (function_expander
&e
) const override
1111 if (e
.op_info
->op
== OP_TYPE_vf
)
1112 return e
.use_ternop_insn (
1113 true, code_for_pred_mul_neg_scalar (PLUS
, e
.vector_mode ()));
1114 if (e
.op_info
->op
== OP_TYPE_vv
)
1115 return e
.use_ternop_insn (true,
1116 code_for_pred_mul_neg (PLUS
, e
.vector_mode ()));
1121 template<enum frm_op_type FRM_OP
= NO_FRM
>
1122 class vfmadd
: public function_base
1125 bool has_rounding_mode_operand_p () const override
1127 return FRM_OP
== HAS_FRM
;
1130 bool may_require_frm_p () const override
{ return true; }
1132 bool has_merge_operand_p () const override
{ return false; }
1134 rtx
expand (function_expander
&e
) const override
1136 if (e
.op_info
->op
== OP_TYPE_vf
)
1137 return e
.use_ternop_insn (false,
1138 code_for_pred_mul_scalar (PLUS
,
1140 if (e
.op_info
->op
== OP_TYPE_vv
)
1141 return e
.use_ternop_insn (false,
1142 code_for_pred_mul (PLUS
, e
.vector_mode ()));
1147 template<enum frm_op_type FRM_OP
= NO_FRM
>
1148 class vfnmsub
: public function_base
1151 bool has_rounding_mode_operand_p () const override
1153 return FRM_OP
== HAS_FRM
;
1156 bool may_require_frm_p () const override
{ return true; }
1158 bool has_merge_operand_p () const override
{ return false; }
1160 rtx
expand (function_expander
&e
) const override
1162 if (e
.op_info
->op
== OP_TYPE_vf
)
1163 return e
.use_ternop_insn (
1164 false, code_for_pred_mul_neg_scalar (PLUS
, e
.vector_mode ()));
1165 if (e
.op_info
->op
== OP_TYPE_vv
)
1166 return e
.use_ternop_insn (false,
1167 code_for_pred_mul_neg (PLUS
, e
.vector_mode ()));
1172 template<enum frm_op_type FRM_OP
= NO_FRM
>
1173 class vfnmacc
: public function_base
1176 bool has_rounding_mode_operand_p () const override
1178 return FRM_OP
== HAS_FRM
;
1181 bool may_require_frm_p () const override
{ return true; }
1183 bool has_merge_operand_p () const override
{ return false; }
1185 rtx
expand (function_expander
&e
) const override
1187 if (e
.op_info
->op
== OP_TYPE_vf
)
1188 return e
.use_ternop_insn (
1189 true, code_for_pred_mul_neg_scalar (MINUS
, e
.vector_mode ()));
1190 if (e
.op_info
->op
== OP_TYPE_vv
)
1191 return e
.use_ternop_insn (true,
1192 code_for_pred_mul_neg (MINUS
, e
.vector_mode ()));
1197 template<enum frm_op_type FRM_OP
= NO_FRM
>
1198 class vfmsac
: public function_base
1201 bool has_rounding_mode_operand_p () const override
1203 return FRM_OP
== HAS_FRM
;
1206 bool may_require_frm_p () const override
{ return true; }
1208 bool has_merge_operand_p () const override
{ return false; }
1210 rtx
expand (function_expander
&e
) const override
1212 if (e
.op_info
->op
== OP_TYPE_vf
)
1213 return e
.use_ternop_insn (true,
1214 code_for_pred_mul_scalar (MINUS
,
1216 if (e
.op_info
->op
== OP_TYPE_vv
)
1217 return e
.use_ternop_insn (true,
1218 code_for_pred_mul (MINUS
, e
.vector_mode ()));
1223 template<enum frm_op_type FRM_OP
= NO_FRM
>
1224 class vfnmadd
: public function_base
1227 bool has_rounding_mode_operand_p () const override
1229 return FRM_OP
== HAS_FRM
;
1232 bool may_require_frm_p () const override
{ return true; }
1234 bool has_merge_operand_p () const override
{ return false; }
1236 rtx
expand (function_expander
&e
) const override
1238 if (e
.op_info
->op
== OP_TYPE_vf
)
1239 return e
.use_ternop_insn (
1240 false, code_for_pred_mul_neg_scalar (MINUS
, e
.vector_mode ()));
1241 if (e
.op_info
->op
== OP_TYPE_vv
)
1242 return e
.use_ternop_insn (false,
1243 code_for_pred_mul_neg (MINUS
, e
.vector_mode ()));
1248 template<enum frm_op_type FRM_OP
= NO_FRM
>
1249 class vfmsub
: public function_base
1252 bool has_rounding_mode_operand_p () const override
1254 return FRM_OP
== HAS_FRM
;
1257 bool may_require_frm_p () const override
{ return true; }
1259 bool has_merge_operand_p () const override
{ return false; }
1261 rtx
expand (function_expander
&e
) const override
1263 if (e
.op_info
->op
== OP_TYPE_vf
)
1264 return e
.use_ternop_insn (false,
1265 code_for_pred_mul_scalar (MINUS
,
1267 if (e
.op_info
->op
== OP_TYPE_vv
)
1268 return e
.use_ternop_insn (false,
1269 code_for_pred_mul (MINUS
, e
.vector_mode ()));
1274 template<enum frm_op_type FRM_OP
= NO_FRM
>
1275 class vfwmacc
: public function_base
1278 bool has_rounding_mode_operand_p () const override
1280 return FRM_OP
== HAS_FRM
;
1283 bool may_require_frm_p () const override
{ return true; }
1285 bool has_merge_operand_p () const override
{ return false; }
1287 rtx
expand (function_expander
&e
) const override
1289 if (e
.op_info
->op
== OP_TYPE_vf
)
1290 return e
.use_widen_ternop_insn (
1291 code_for_pred_widen_mul_scalar (PLUS
, e
.vector_mode ()));
1292 if (e
.op_info
->op
== OP_TYPE_vv
)
1293 return e
.use_widen_ternop_insn (
1294 code_for_pred_widen_mul (PLUS
, e
.vector_mode ()));
1299 template<enum frm_op_type FRM_OP
= NO_FRM
>
1300 class vfwnmacc
: public function_base
1303 bool has_rounding_mode_operand_p () const override
1305 return FRM_OP
== HAS_FRM
;
1308 bool may_require_frm_p () const override
{ return true; }
1310 bool has_merge_operand_p () const override
{ return false; }
1312 rtx
expand (function_expander
&e
) const override
1314 if (e
.op_info
->op
== OP_TYPE_vf
)
1315 return e
.use_widen_ternop_insn (
1316 code_for_pred_widen_mul_neg_scalar (MINUS
, e
.vector_mode ()));
1317 if (e
.op_info
->op
== OP_TYPE_vv
)
1318 return e
.use_widen_ternop_insn (
1319 code_for_pred_widen_mul_neg (MINUS
, e
.vector_mode ()));
1324 template<enum frm_op_type FRM_OP
= NO_FRM
>
1325 class vfwmsac
: public function_base
1328 bool has_rounding_mode_operand_p () const override
1330 return FRM_OP
== HAS_FRM
;
1333 bool may_require_frm_p () const override
{ return true; }
1335 bool has_merge_operand_p () const override
{ return false; }
1337 rtx
expand (function_expander
&e
) const override
1339 if (e
.op_info
->op
== OP_TYPE_vf
)
1340 return e
.use_widen_ternop_insn (
1341 code_for_pred_widen_mul_scalar (MINUS
, e
.vector_mode ()));
1342 if (e
.op_info
->op
== OP_TYPE_vv
)
1343 return e
.use_widen_ternop_insn (
1344 code_for_pred_widen_mul (MINUS
, e
.vector_mode ()));
1349 template<enum frm_op_type FRM_OP
= NO_FRM
>
1350 class vfwnmsac
: public function_base
1353 bool has_rounding_mode_operand_p () const override
1355 return FRM_OP
== HAS_FRM
;
1358 bool may_require_frm_p () const override
{ return true; }
1360 bool has_merge_operand_p () const override
{ return false; }
1362 rtx
expand (function_expander
&e
) const override
1364 if (e
.op_info
->op
== OP_TYPE_vf
)
1365 return e
.use_widen_ternop_insn (
1366 code_for_pred_widen_mul_neg_scalar (PLUS
, e
.vector_mode ()));
1367 if (e
.op_info
->op
== OP_TYPE_vv
)
1368 return e
.use_widen_ternop_insn (
1369 code_for_pred_widen_mul_neg (PLUS
, e
.vector_mode ()));
1374 /* Implements vfsqrt7/vfrec7/vfclass/vfsgnj/vfsgnjx. */
1375 template<int UNSPEC
, enum frm_op_type FRM_OP
= NO_FRM
>
1376 class float_misc
: public function_base
1379 bool has_rounding_mode_operand_p () const override
1381 return FRM_OP
== HAS_FRM
;
1384 bool may_require_frm_p () const override
{ return true; }
1386 rtx
expand (function_expander
&e
) const override
1388 if (e
.op_info
->op
== OP_TYPE_vf
)
1389 return e
.use_exact_insn (code_for_pred_scalar (UNSPEC
, e
.vector_mode ()));
1390 if (e
.op_info
->op
== OP_TYPE_vv
|| e
.op_info
->op
== OP_TYPE_v
)
1391 return e
.use_exact_insn (code_for_pred (UNSPEC
, e
.vector_mode ()));
1396 /* Implements vfsgnjn. */
1397 class vfsgnjn
: public function_base
1400 rtx
expand (function_expander
&e
) const override
1402 if (e
.op_info
->op
== OP_TYPE_vf
)
1403 return e
.use_exact_insn (code_for_pred_ncopysign_scalar (e
.vector_mode ()));
1404 if (e
.op_info
->op
== OP_TYPE_vv
)
1405 return e
.use_exact_insn (code_for_pred_ncopysign (e
.vector_mode ()));
1410 /* Implements vmfeq/vmfne/vmflt/vmfgt/vmfle/vmfge. */
1411 template<rtx_code CODE
>
1412 class fcmp
: public function_base
1415 rtx
expand (function_expander
&e
) const override
1417 switch (e
.op_info
->op
)
1420 return e
.use_compare_insn (CODE
, code_for_pred_cmp_scalar (
1424 return e
.use_compare_insn (CODE
,
1425 code_for_pred_cmp (e
.vector_mode ()));
1433 /* Implements vfclass. */
1434 class vfclass
: public function_base
1437 rtx
expand (function_expander
&e
) const override
1439 return e
.use_exact_insn (code_for_pred_class (e
.arg_mode (0)));
1443 /* Implements vfcvt.x. */
1444 template<int UNSPEC
, enum frm_op_type FRM_OP
= NO_FRM
>
1445 class vfcvt_x
: public function_base
1448 bool has_rounding_mode_operand_p () const override
1450 return FRM_OP
== HAS_FRM
;
1453 bool may_require_frm_p () const override
{ return true; }
1455 rtx
expand (function_expander
&e
) const override
1457 return e
.use_exact_insn (code_for_pred_fcvt_x_f (UNSPEC
, e
.arg_mode (0)));
1461 /* Implements vfcvt.rtz.x. */
1462 template<rtx_code CODE
>
1463 class vfcvt_rtz_x
: public function_base
1466 rtx
expand (function_expander
&e
) const override
1468 return e
.use_exact_insn (code_for_pred (CODE
, e
.arg_mode (0)));
1472 template<enum frm_op_type FRM_OP
= NO_FRM
>
1473 class vfcvt_f
: public function_base
1476 bool has_rounding_mode_operand_p () const override
1478 return FRM_OP
== HAS_FRM
;
1481 bool may_require_frm_p () const override
{ return true; }
1483 rtx
expand (function_expander
&e
) const override
1485 if (e
.op_info
->op
== OP_TYPE_x_v
)
1486 return e
.use_exact_insn (code_for_pred (FLOAT
, e
.vector_mode ()));
1487 if (e
.op_info
->op
== OP_TYPE_xu_v
)
1488 return e
.use_exact_insn (
1489 code_for_pred (UNSIGNED_FLOAT
, e
.vector_mode ()));
1494 /* Implements vfwcvt.x. */
1495 template<int UNSPEC
, enum frm_op_type FRM_OP
= NO_FRM
>
1496 class vfwcvt_x
: public function_base
1499 bool has_rounding_mode_operand_p () const override
1501 return FRM_OP
== HAS_FRM
;
1504 bool may_require_frm_p () const override
{ return true; }
1506 rtx
expand (function_expander
&e
) const override
1508 return e
.use_exact_insn (
1509 code_for_pred_widen_fcvt_x_f (UNSPEC
, e
.vector_mode ()));
1513 /* Implements vfwcvt.rtz.x. */
1514 template<rtx_code CODE
>
1515 class vfwcvt_rtz_x
: public function_base
1518 rtx
expand (function_expander
&e
) const override
1520 return e
.use_exact_insn (code_for_pred_widen (CODE
, e
.vector_mode ()));
1524 class vfwcvt_f
: public function_base
1527 rtx
expand (function_expander
&e
) const override
1529 if (e
.op_info
->op
== OP_TYPE_f_v
)
1530 return e
.use_exact_insn (code_for_pred_extend (e
.vector_mode ()));
1531 if (e
.op_info
->op
== OP_TYPE_x_v
)
1532 return e
.use_exact_insn (code_for_pred_widen (FLOAT
, e
.vector_mode ()));
1533 if (e
.op_info
->op
== OP_TYPE_xu_v
)
1534 return e
.use_exact_insn (
1535 code_for_pred_widen (UNSIGNED_FLOAT
, e
.vector_mode ()));
1540 /* Implements vfncvt.x. */
1541 template<int UNSPEC
, enum frm_op_type FRM_OP
= NO_FRM
>
1542 class vfncvt_x
: public function_base
1545 bool has_rounding_mode_operand_p () const override
1547 return FRM_OP
== HAS_FRM
;
1550 bool may_require_frm_p () const override
{ return true; }
1552 rtx
expand (function_expander
&e
) const override
1554 return e
.use_exact_insn (
1555 code_for_pred_narrow_fcvt_x_f (UNSPEC
, e
.arg_mode (0)));
1559 /* Implements vfncvt.rtz.x. */
1560 template<rtx_code CODE
>
1561 class vfncvt_rtz_x
: public function_base
1564 rtx
expand (function_expander
&e
) const override
1566 return e
.use_exact_insn (code_for_pred_narrow (CODE
, e
.vector_mode ()));
1570 template<enum frm_op_type FRM_OP
= NO_FRM
>
1571 class vfncvt_f
: public function_base
1574 bool has_rounding_mode_operand_p () const override
1576 return FRM_OP
== HAS_FRM
;
1579 bool may_require_frm_p () const override
{ return true; }
1581 rtx
expand (function_expander
&e
) const override
1583 if (e
.op_info
->op
== OP_TYPE_f_w
)
1584 return e
.use_exact_insn (code_for_pred_trunc (e
.vector_mode ()));
1585 if (e
.op_info
->op
== OP_TYPE_x_w
)
1586 return e
.use_exact_insn (code_for_pred_narrow (FLOAT
, e
.arg_mode (0)));
1587 if (e
.op_info
->op
== OP_TYPE_xu_w
)
1588 return e
.use_exact_insn (
1589 code_for_pred_narrow (UNSIGNED_FLOAT
, e
.arg_mode (0)));
1594 class vfncvt_rod_f
: public function_base
1597 rtx
expand (function_expander
&e
) const override
1599 return e
.use_exact_insn (code_for_pred_rod_trunc (e
.vector_mode ()));
1603 /* Implements reduction instructions. */
1604 template<unsigned UNSPEC
>
1605 class reducop
: public function_base
1608 bool apply_mask_policy_p () const override
{ return false; }
1610 rtx
expand (function_expander
&e
) const override
1612 return e
.use_exact_insn (code_for_pred (UNSPEC
, e
.vector_mode ()));
1616 /* Implements floating-point reduction instructions. */
1617 template<unsigned UNSPEC
, enum frm_op_type FRM_OP
= NO_FRM
>
1618 class freducop
: public function_base
1621 bool has_rounding_mode_operand_p () const override
1623 return FRM_OP
== HAS_FRM
;
1626 bool may_require_frm_p () const override
{ return true; }
1628 bool apply_mask_policy_p () const override
{ return false; }
1630 rtx
expand (function_expander
&e
) const override
1632 return e
.use_exact_insn (code_for_pred (UNSPEC
, e
.vector_mode ()));
1636 /* Implements vmv/vfmv instructions. */
1637 class vmv
: public function_base
1640 bool apply_vl_p () const override
{ return false; }
1641 bool apply_tail_policy_p () const override
{ return false; }
1642 bool apply_mask_policy_p () const override
{ return false; }
1643 bool use_mask_predication_p () const override
{ return false; }
1644 bool has_merge_operand_p () const override
{ return false; }
1646 rtx
expand (function_expander
&e
) const override
1648 return e
.use_exact_insn (code_for_pred_extract_first (e
.vector_mode ()));
1652 /* Implements vmv.s.x/vfmv.s.f. */
1653 class vmv_s
: public function_base
1656 rtx
expand (function_expander
&e
) const override
1658 return e
.use_scalar_move_insn (code_for_pred_broadcast (e
.vector_mode ()));
1662 template<int UNSPEC
>
1663 class slideop
: public function_base
1666 bool has_merge_operand_p () const override
1668 if (UNSPEC
== UNSPEC_VSLIDEUP
)
1673 rtx
expand (function_expander
&e
) const override
1675 return e
.use_exact_insn (code_for_pred_slide (UNSPEC
, e
.vector_mode ()));
1679 class vrgather
: public function_base
1682 rtx
expand (function_expander
&e
) const override
1684 switch (e
.op_info
->op
)
1687 return e
.use_exact_insn (
1688 code_for_pred_gather_scalar (e
.vector_mode ()));
1690 return e
.use_exact_insn (code_for_pred_gather (e
.vector_mode ()));
1697 class vrgatherei16
: public function_base
1700 rtx
expand (function_expander
&e
) const override
1702 return e
.use_exact_insn (code_for_pred_gatherei16 (e
.vector_mode ()));
1706 class vcompress
: public function_base
1709 bool apply_mask_policy_p () const override
{ return false; }
1710 bool use_mask_predication_p () const override
{ return false; }
1711 rtx
expand (function_expander
&e
) const override
1713 return e
.use_exact_insn (code_for_pred_compress (e
.vector_mode ()));
1717 class vundefined
: public function_base
1720 bool apply_vl_p () const override
1725 rtx
expand (function_expander
&e
) const override
1727 return e
.generate_insn (code_for_vundefined (e
.vector_mode ()));
1731 class vreinterpret
: public function_base
1734 bool apply_vl_p () const override
1739 rtx
expand (function_expander
&e
) const override
1741 e
.add_input_operand (0);
1742 return e
.generate_insn (code_for_vreinterpret (e
.ret_mode ()));
1746 class vlmul_ext
: public function_base
1749 bool apply_vl_p () const override
1754 rtx
expand (function_expander
&e
) const override
1756 tree arg
= CALL_EXPR_ARG (e
.exp
, 0);
1757 rtx src
= expand_normal (arg
);
1758 emit_move_insn (gen_lowpart (e
.vector_mode (), e
.target
), src
);
1763 class vlmul_trunc
: public function_base
1766 bool apply_vl_p () const override
{ return false; }
1768 rtx
expand (function_expander
&e
) const override
1770 rtx src
= expand_normal (CALL_EXPR_ARG (e
.exp
, 0));
1771 emit_move_insn (e
.target
, gen_lowpart (GET_MODE (e
.target
), src
));
1776 class vset
: public function_base
1779 bool apply_vl_p () const override
{ return false; }
1781 gimple
*fold (gimple_folder
&f
) const override
1783 tree rhs_tuple
= gimple_call_arg (f
.call
, 0);
1784 /* LMUL > 1 non-tuple vector types are not structure,
1785 we can't use __val[index] to set the subpart. */
1786 if (!riscv_v_ext_tuple_mode_p (TYPE_MODE (TREE_TYPE (rhs_tuple
))))
1788 tree index
= gimple_call_arg (f
.call
, 1);
1789 tree rhs_vector
= gimple_call_arg (f
.call
, 2);
1791 /* Replace the call with two statements: a copy of the full tuple
1792 to the call result, followed by an update of the individual vector.
1794 The fold routines expect the replacement statement to have the
1795 same lhs as the original call, so return the copy statement
1796 rather than the field update. */
1797 gassign
*copy
= gimple_build_assign (unshare_expr (f
.lhs
), rhs_tuple
);
1799 /* Get a reference to the individual vector. */
1800 tree field
= tuple_type_field (TREE_TYPE (f
.lhs
));
1802 = build3 (COMPONENT_REF
, TREE_TYPE (field
), f
.lhs
, field
, NULL_TREE
);
1803 tree lhs_vector
= build4 (ARRAY_REF
, TREE_TYPE (rhs_vector
), lhs_array
,
1804 index
, NULL_TREE
, NULL_TREE
);
1805 gassign
*update
= gimple_build_assign (lhs_vector
, rhs_vector
);
1806 gsi_insert_after (f
.gsi
, update
, GSI_SAME_STMT
);
1811 rtx
expand (function_expander
&e
) const override
1815 rtx dest
= expand_normal (CALL_EXPR_ARG (e
.exp
, 0));
1816 gcc_assert (riscv_v_ext_vector_mode_p (GET_MODE (dest
)));
1817 rtx index
= expand_normal (CALL_EXPR_ARG (e
.exp
, 1));
1818 rtx src
= expand_normal (CALL_EXPR_ARG (e
.exp
, 2));
1819 poly_int64 offset
= INTVAL (index
) * GET_MODE_SIZE (GET_MODE (src
));
1820 emit_move_insn (e
.target
, dest
);
1821 rtx subreg
= simplify_gen_subreg (GET_MODE (src
), e
.target
,
1822 GET_MODE (e
.target
), offset
);
1823 emit_move_insn (subreg
, src
);
1828 class vget
: public function_base
1831 bool apply_vl_p () const override
{ return false; }
1833 gimple
*fold (gimple_folder
&f
) const override
1835 /* Fold into a normal gimple component access. */
1836 tree rhs_tuple
= gimple_call_arg (f
.call
, 0);
1837 /* LMUL > 1 non-tuple vector types are not structure,
1838 we can't use __val[index] to get the subpart. */
1839 if (!riscv_v_ext_tuple_mode_p (TYPE_MODE (TREE_TYPE (rhs_tuple
))))
1841 tree index
= gimple_call_arg (f
.call
, 1);
1842 tree field
= tuple_type_field (TREE_TYPE (rhs_tuple
));
1844 = build3 (COMPONENT_REF
, TREE_TYPE (field
), rhs_tuple
, field
, NULL_TREE
);
1845 tree rhs_vector
= build4 (ARRAY_REF
, TREE_TYPE (f
.lhs
), rhs_array
, index
,
1846 NULL_TREE
, NULL_TREE
);
1847 return gimple_build_assign (f
.lhs
, rhs_vector
);
1850 rtx
expand (function_expander
&e
) const override
1854 rtx src
= expand_normal (CALL_EXPR_ARG (e
.exp
, 0));
1855 gcc_assert (riscv_v_ext_vector_mode_p (GET_MODE (src
)));
1856 rtx index
= expand_normal (CALL_EXPR_ARG (e
.exp
, 1));
1857 poly_int64 offset
= INTVAL (index
) * GET_MODE_SIZE (GET_MODE (e
.target
));
1859 = simplify_gen_subreg (GET_MODE (e
.target
), src
, GET_MODE (src
), offset
);
1864 class vcreate
: public function_base
1867 gimple
*fold (gimple_folder
&f
) const override
1869 unsigned int nargs
= gimple_call_num_args (f
.call
);
1870 tree lhs_type
= TREE_TYPE (f
.lhs
);
1871 /* LMUL > 1 non-tuple vector types are not structure,
1872 we can't use __val[index] to set the subpart. */
1873 if (!riscv_v_ext_tuple_mode_p (TYPE_MODE (lhs_type
)))
1876 /* Replace the call with a clobber of the result (to prevent it from
1877 becoming upwards exposed) followed by stores into each individual
1880 The fold routines expect the replacement statement to have the
1881 same lhs as the original call, so return the clobber statement
1882 rather than the final vector store. */
1883 gassign
*clobber
= gimple_build_assign (f
.lhs
, build_clobber (lhs_type
));
1885 for (unsigned int i
= nargs
; i
-- > 0; )
1887 tree rhs_vector
= gimple_call_arg (f
.call
, i
);
1888 tree field
= tuple_type_field (TREE_TYPE (f
.lhs
));
1889 tree lhs_array
= build3 (COMPONENT_REF
, TREE_TYPE (field
),
1890 unshare_expr (f
.lhs
), field
, NULL_TREE
);
1891 tree lhs_vector
= build4 (ARRAY_REF
, TREE_TYPE (rhs_vector
),
1892 lhs_array
, size_int (i
),
1893 NULL_TREE
, NULL_TREE
);
1894 gassign
*assign
= gimple_build_assign (lhs_vector
, rhs_vector
);
1895 gsi_insert_after (f
.gsi
, assign
, GSI_SAME_STMT
);
1900 rtx
expand (function_expander
&e
) const override
1904 gcc_assert (riscv_v_ext_vector_mode_p (GET_MODE (e
.target
)));
1905 unsigned int nargs
= call_expr_nargs (e
.exp
);
1906 for (unsigned int i
= 0; i
< nargs
; i
++)
1908 rtx src
= expand_normal (CALL_EXPR_ARG (e
.exp
, i
));
1909 poly_int64 offset
= i
* GET_MODE_SIZE (GET_MODE (src
));
1910 rtx subreg
= simplify_gen_subreg (GET_MODE (src
), e
.target
,
1911 GET_MODE (e
.target
), offset
);
1912 emit_move_insn (subreg
, src
);
1919 class read_vl
: public function_base
1922 unsigned int call_properties (const function_instance
&) const override
1927 rtx
expand (function_expander
&e
) const override
1929 if (Pmode
== SImode
)
1930 emit_insn (gen_read_vlsi (e
.target
));
1932 emit_insn (gen_read_vldi_zero_extend (e
.target
));
1937 class vleff
: public function_base
1940 unsigned int call_properties (const function_instance
&) const override
1942 return CP_READ_MEMORY
| CP_WRITE_CSR
;
1945 bool can_be_overloaded_p (enum predication_type_index pred
) const override
1947 return pred
!= PRED_TYPE_none
;
1950 gimple
*fold (gimple_folder
&f
) const override
1952 return fold_fault_load (f
);
1955 rtx
expand (function_expander
&e
) const override
1957 return e
.use_contiguous_load_insn (
1958 code_for_pred_fault_load (e
.vector_mode ()));
1962 /* Implements vlenb. */
1963 class vlenb
: public function_base
1966 bool apply_vl_p () const override
{ return false; }
1968 rtx
expand (function_expander
&e
) const override
1970 machine_mode mode
= GET_MODE (e
.target
);
1971 rtx vlenb
= gen_int_mode (BYTES_PER_RISCV_VECTOR
, mode
);
1972 emit_move_insn (e
.target
, vlenb
);
1977 /* Implements vlseg.v. */
1978 class vlseg
: public function_base
1981 unsigned int call_properties (const function_instance
&) const override
1983 return CP_READ_MEMORY
;
1986 bool can_be_overloaded_p (enum predication_type_index pred
) const override
1988 return pred
!= PRED_TYPE_none
;
1991 rtx
expand (function_expander
&e
) const override
1993 return e
.use_exact_insn (
1994 code_for_pred_unit_strided_load (e
.vector_mode ()));
1998 /* Implements vsseg.v. */
1999 class vsseg
: public function_base
2002 bool apply_tail_policy_p () const override
{ return false; }
2003 bool apply_mask_policy_p () const override
{ return false; }
2005 unsigned int call_properties (const function_instance
&) const override
2007 return CP_WRITE_MEMORY
;
2010 bool can_be_overloaded_p (enum predication_type_index
) const override
2015 rtx
expand (function_expander
&e
) const override
2017 return e
.use_exact_insn (
2018 code_for_pred_unit_strided_store (e
.vector_mode ()));
2022 /* Implements vlsseg.v. */
2023 class vlsseg
: public function_base
2026 unsigned int call_properties (const function_instance
&) const override
2028 return CP_READ_MEMORY
;
2031 bool can_be_overloaded_p (enum predication_type_index pred
) const override
2033 return pred
!= PRED_TYPE_none
;
2036 rtx
expand (function_expander
&e
) const override
2038 return e
.use_exact_insn (
2039 code_for_pred_strided_load (e
.vector_mode ()));
2043 /* Implements vssseg.v. */
2044 class vssseg
: public function_base
2047 bool apply_tail_policy_p () const override
{ return false; }
2048 bool apply_mask_policy_p () const override
{ return false; }
2050 unsigned int call_properties (const function_instance
&) const override
2052 return CP_WRITE_MEMORY
;
2055 bool can_be_overloaded_p (enum predication_type_index
) const override
2060 rtx
expand (function_expander
&e
) const override
2062 return e
.use_exact_insn (
2063 code_for_pred_strided_store (e
.vector_mode ()));
2067 template<int UNSPEC
>
2068 class seg_indexed_load
: public function_base
2071 unsigned int call_properties (const function_instance
&) const override
2073 return CP_READ_MEMORY
;
2076 bool can_be_overloaded_p (enum predication_type_index
) const override
2081 rtx
expand (function_expander
&e
) const override
2083 return e
.use_exact_insn (
2084 code_for_pred_indexed_load (UNSPEC
, e
.vector_mode (), e
.index_mode ()));
2088 template<int UNSPEC
>
2089 class seg_indexed_store
: public function_base
2092 bool apply_tail_policy_p () const override
{ return false; }
2093 bool apply_mask_policy_p () const override
{ return false; }
2095 unsigned int call_properties (const function_instance
&) const override
2097 return CP_WRITE_MEMORY
;
2100 bool can_be_overloaded_p (enum predication_type_index
) const override
2105 rtx
expand (function_expander
&e
) const override
2107 return e
.use_exact_insn (
2108 code_for_pred_indexed_store (UNSPEC
, e
.vector_mode (), e
.index_mode ()));
2112 /* Implements vlsegff.v. */
2113 class vlsegff
: public function_base
2116 unsigned int call_properties (const function_instance
&) const override
2118 return CP_READ_MEMORY
| CP_WRITE_CSR
;
2121 bool can_be_overloaded_p (enum predication_type_index pred
) const override
2123 return pred
!= PRED_TYPE_none
;
2126 gimple
*fold (gimple_folder
&f
) const override
2128 return fold_fault_load (f
);
2131 rtx
expand (function_expander
&e
) const override
2133 return e
.use_exact_insn (code_for_pred_fault_load (e
.vector_mode ()));
2138 * th.vl(b/h/w)[u].v/th.vs(b/h/w)[u].v/th.vls(b/h/w)[u].v/th.vss(b/h/w)[u].v/
2139 * th.vlx(b/h/w)[u].v/th.vs[u]x(b/h/w).v
2141 template<bool STORE_P
, lst_type LST_TYPE
, int UNSPEC
>
2142 class th_loadstore_width
: public function_base
2145 bool apply_tail_policy_p () const override
{ return !STORE_P
; }
2146 bool apply_mask_policy_p () const override
{ return !STORE_P
; }
2148 unsigned int call_properties (const function_instance
&) const override
2151 return CP_WRITE_MEMORY
;
2153 return CP_READ_MEMORY
;
2156 bool can_be_overloaded_p (enum predication_type_index pred
) const override
2158 if (STORE_P
|| LST_TYPE
== LST_INDEXED
)
2160 return pred
!= PRED_TYPE_none
;
2163 rtx
expand (function_expander
&e
) const override
2165 gcc_assert (TARGET_XTHEADVECTOR
);
2166 if (LST_TYPE
== LST_INDEXED
)
2169 return e
.use_exact_insn (
2170 code_for_pred_indexed_store_width (UNSPEC
, UNSPEC
,
2173 return e
.use_exact_insn (
2174 code_for_pred_indexed_load_width (UNSPEC
, e
.vector_mode ()));
2176 else if (LST_TYPE
== LST_STRIDED
)
2179 return e
.use_contiguous_store_insn (
2180 code_for_pred_strided_store_width (UNSPEC
, e
.vector_mode ()));
2182 return e
.use_contiguous_load_insn (
2183 code_for_pred_strided_load_width (UNSPEC
, e
.vector_mode ()));
2188 return e
.use_contiguous_store_insn (
2189 code_for_pred_store_width (UNSPEC
, e
.vector_mode ()));
2191 return e
.use_contiguous_load_insn (
2192 code_for_pred_mov_width (UNSPEC
, e
.vector_mode ()));
2197 /* Implements vext.x.v. */
2198 class th_extract
: public function_base
2201 bool apply_vl_p () const override
{ return false; }
2202 bool apply_tail_policy_p () const override
{ return false; }
2203 bool apply_mask_policy_p () const override
{ return false; }
2204 bool use_mask_predication_p () const override
{ return false; }
2205 bool has_merge_operand_p () const override
{ return false; }
2207 rtx
expand (function_expander
&e
) const override
2209 gcc_assert (TARGET_XTHEADVECTOR
);
2210 return e
.use_exact_insn (code_for_pred_th_extract (e
.vector_mode ()));
2214 /* Below implements are vector crypto */
2215 /* Implements vandn.[vv,vx] */
2216 class vandn
: public function_base
2219 rtx
expand (function_expander
&e
) const override
2221 switch (e
.op_info
->op
)
2224 return e
.use_exact_insn (code_for_pred_vandn (e
.vector_mode ()));
2226 return e
.use_exact_insn (code_for_pred_vandn_scalar (e
.vector_mode ()));
2233 /* Implements vrol/vror/clz/ctz. */
2234 template<rtx_code CODE
>
2235 class bitmanip
: public function_base
2238 bool apply_tail_policy_p () const override
2240 return (CODE
== CLZ
|| CODE
== CTZ
) ? false : true;
2242 bool apply_mask_policy_p () const override
2244 return (CODE
== CLZ
|| CODE
== CTZ
) ? false : true;
2246 bool has_merge_operand_p () const override
2248 return (CODE
== CLZ
|| CODE
== CTZ
) ? false : true;
2251 rtx
expand (function_expander
&e
) const override
2253 switch (e
.op_info
->op
)
2257 return e
.use_exact_insn (code_for_pred_v (CODE
, e
.vector_mode ()));
2259 return e
.use_exact_insn (code_for_pred_v_scalar (CODE
, e
.vector_mode ()));
2266 /* Implements vbrev/vbrev8/vrev8. */
2267 template<int UNSPEC
>
2268 class b_reverse
: public function_base
2271 rtx
expand (function_expander
&e
) const override
2273 return e
.use_exact_insn (code_for_pred_v (UNSPEC
, e
.vector_mode ()));
2277 class vwsll
: public function_base
2280 rtx
expand (function_expander
&e
) const override
2282 switch (e
.op_info
->op
)
2285 return e
.use_exact_insn (code_for_pred_vwsll (e
.vector_mode ()));
2287 return e
.use_exact_insn (code_for_pred_vwsll_scalar (e
.vector_mode ()));
2294 /* Implements clmul */
2295 template<int UNSPEC
>
2296 class clmul
: public function_base
2299 rtx
expand (function_expander
&e
) const override
2301 switch (e
.op_info
->op
)
2304 return e
.use_exact_insn (
2305 code_for_pred_vclmul (UNSPEC
, e
.vector_mode ()));
2307 return e
.use_exact_insn
2308 (code_for_pred_vclmul_scalar (UNSPEC
, e
.vector_mode ()));
2315 /* Implements vghsh/vsh2ms/vsha2c[hl]. */
2316 template<int UNSPEC
>
2317 class vg_nhab
: public function_base
2320 bool apply_mask_policy_p () const override
{ return false; }
2321 bool use_mask_predication_p () const override
{ return false; }
2322 bool has_merge_operand_p () const override
{ return false; }
2324 rtx
expand (function_expander
&e
) const override
2326 return e
.use_exact_insn (code_for_pred_v (UNSPEC
, e
.vector_mode ()));
2330 /* Implements vgmul/vaes*. */
2331 template<int UNSPEC
>
2332 class crypto_vv
: public function_base
2335 bool apply_mask_policy_p () const override
{ return false; }
2336 bool use_mask_predication_p () const override
{ return false; }
2337 bool has_merge_operand_p () const override
{ return false; }
2339 rtx
expand (function_expander
&e
) const override
2341 poly_uint64 nunits
= 0U;
2342 switch (e
.op_info
->op
)
2345 if (UNSPEC
== UNSPEC_VGMUL
)
2346 return e
.use_exact_insn
2347 (code_for_pred_crypto_vv (UNSPEC
, UNSPEC
, e
.vector_mode ()));
2349 return e
.use_exact_insn
2350 (code_for_pred_crypto_vv (UNSPEC
+ 1, UNSPEC
+ 1, e
.vector_mode ()));
2352 /* Calculate the ratio between arg0 and arg1*/
2353 gcc_assert (multiple_p (GET_MODE_BITSIZE (e
.arg_mode (0)),
2354 GET_MODE_BITSIZE (e
.arg_mode (1)), &nunits
));
2355 if (maybe_eq (nunits
, 1U))
2356 return e
.use_exact_insn (code_for_pred_crypto_vvx1_scalar
2357 (UNSPEC
+ 2, UNSPEC
+ 2, e
.vector_mode ()));
2358 else if (maybe_eq (nunits
, 2U))
2359 return e
.use_exact_insn (code_for_pred_crypto_vvx2_scalar
2360 (UNSPEC
+ 2, UNSPEC
+ 2, e
.vector_mode ()));
2361 else if (maybe_eq (nunits
, 4U))
2362 return e
.use_exact_insn (code_for_pred_crypto_vvx4_scalar
2363 (UNSPEC
+ 2, UNSPEC
+ 2, e
.vector_mode ()));
2364 else if (maybe_eq (nunits
, 8U))
2365 return e
.use_exact_insn (code_for_pred_crypto_vvx8_scalar
2366 (UNSPEC
+ 2, UNSPEC
+ 2, e
.vector_mode ()));
2368 return e
.use_exact_insn (code_for_pred_crypto_vvx16_scalar
2369 (UNSPEC
+ 2, UNSPEC
+ 2, e
.vector_mode ()));
2376 /* Implements vaeskf1/vsm4k. */
2377 template<int UNSPEC
>
2378 class crypto_vi
: public function_base
2381 bool apply_mask_policy_p () const override
{ return false; }
2382 bool use_mask_predication_p () const override
{ return false; }
2384 rtx
expand (function_expander
&e
) const override
2386 return e
.use_exact_insn
2387 (code_for_pred_crypto_vi_scalar (UNSPEC
, e
.vector_mode ()));
2391 /* Implements vaeskf2/vsm3c. */
2392 template<int UNSPEC
>
2393 class vaeskf2_vsm3c
: public function_base
2396 bool apply_mask_policy_p () const override
{ return false; }
2397 bool use_mask_predication_p () const override
{ return false; }
2398 bool has_merge_operand_p () const override
{ return false; }
2400 rtx
expand (function_expander
&e
) const override
2402 return e
.use_exact_insn
2403 (code_for_pred_vi_nomaskedoff_scalar (UNSPEC
, e
.vector_mode ()));
2407 /* Implements vsm3me. */
2408 class vsm3me
: public function_base
2411 bool apply_mask_policy_p () const override
{ return false; }
2412 bool use_mask_predication_p () const override
{ return false; }
2414 rtx
expand (function_expander
&e
) const override
2416 return e
.use_exact_insn (code_for_pred_vsm3me (e
.vector_mode ()));
2420 /* Implements vfncvtbf16_f. */
2421 template <enum frm_op_type FRM_OP
= NO_FRM
>
2422 class vfncvtbf16_f
: public function_base
2425 bool has_rounding_mode_operand_p () const override
2427 return FRM_OP
== HAS_FRM
;
2430 bool may_require_frm_p () const override
{ return true; }
2432 rtx
expand (function_expander
&e
) const override
2434 return e
.use_exact_insn (code_for_pred_trunc_to_bf16 (e
.vector_mode ()));
2438 /* Implements vfwcvtbf16_f. */
2439 class vfwcvtbf16_f
: public function_base
2442 rtx
expand (function_expander
&e
) const override
2444 return e
.use_exact_insn (code_for_pred_extend_bf16_to (e
.vector_mode ()));
2448 /* Implements vfwmaccbf16. */
2449 template <enum frm_op_type FRM_OP
= NO_FRM
>
2450 class vfwmaccbf16
: public function_base
2453 bool has_rounding_mode_operand_p () const override
2455 return FRM_OP
== HAS_FRM
;
2458 bool may_require_frm_p () const override
{ return true; }
2460 bool has_merge_operand_p () const override
{ return false; }
2462 rtx
expand (function_expander
&e
) const override
2464 if (e
.op_info
->op
== OP_TYPE_vf
)
2465 return e
.use_widen_ternop_insn (
2466 code_for_pred_widen_bf16_mul_scalar (e
.vector_mode ()));
2467 if (e
.op_info
->op
== OP_TYPE_vv
)
2468 return e
.use_widen_ternop_insn (
2469 code_for_pred_widen_bf16_mul (e
.vector_mode ()));
2474 static CONSTEXPR
const vsetvl
<false> vsetvl_obj
;
2475 static CONSTEXPR
const vsetvl
<true> vsetvlmax_obj
;
2476 static CONSTEXPR
const loadstore
<false, LST_UNIT_STRIDE
, false> vle_obj
;
2477 static CONSTEXPR
const loadstore
<true, LST_UNIT_STRIDE
, false> vse_obj
;
2478 static CONSTEXPR
const loadstore
<false, LST_UNIT_STRIDE
, false> vlm_obj
;
2479 static CONSTEXPR
const loadstore
<true, LST_UNIT_STRIDE
, false> vsm_obj
;
2480 static CONSTEXPR
const loadstore
<false, LST_STRIDED
, false> vlse_obj
;
2481 static CONSTEXPR
const loadstore
<true, LST_STRIDED
, false> vsse_obj
;
2482 static CONSTEXPR
const loadstore
<false, LST_INDEXED
, false> vluxei8_obj
;
2483 static CONSTEXPR
const loadstore
<false, LST_INDEXED
, false> vluxei16_obj
;
2484 static CONSTEXPR
const loadstore
<false, LST_INDEXED
, false> vluxei32_obj
;
2485 static CONSTEXPR
const loadstore
<false, LST_INDEXED
, false> vluxei64_obj
;
2486 static CONSTEXPR
const loadstore
<false, LST_INDEXED
, true> vloxei8_obj
;
2487 static CONSTEXPR
const loadstore
<false, LST_INDEXED
, true> vloxei16_obj
;
2488 static CONSTEXPR
const loadstore
<false, LST_INDEXED
, true> vloxei32_obj
;
2489 static CONSTEXPR
const loadstore
<false, LST_INDEXED
, true> vloxei64_obj
;
2490 static CONSTEXPR
const loadstore
<true, LST_INDEXED
, false> vsuxei8_obj
;
2491 static CONSTEXPR
const loadstore
<true, LST_INDEXED
, false> vsuxei16_obj
;
2492 static CONSTEXPR
const loadstore
<true, LST_INDEXED
, false> vsuxei32_obj
;
2493 static CONSTEXPR
const loadstore
<true, LST_INDEXED
, false> vsuxei64_obj
;
2494 static CONSTEXPR
const loadstore
<true, LST_INDEXED
, true> vsoxei8_obj
;
2495 static CONSTEXPR
const loadstore
<true, LST_INDEXED
, true> vsoxei16_obj
;
2496 static CONSTEXPR
const loadstore
<true, LST_INDEXED
, true> vsoxei32_obj
;
2497 static CONSTEXPR
const loadstore
<true, LST_INDEXED
, true> vsoxei64_obj
;
2498 static CONSTEXPR
const binop
<PLUS
> vadd_obj
;
2499 static CONSTEXPR
const binop
<MINUS
> vsub_obj
;
2500 static CONSTEXPR
const vrsub vrsub_obj
;
2501 static CONSTEXPR
const binop
<AND
> vand_obj
;
2502 static CONSTEXPR
const binop
<IOR
> vor_obj
;
2503 static CONSTEXPR
const binop
<XOR
> vxor_obj
;
2504 static CONSTEXPR
const binop
<ASHIFT
> vsll_obj
;
2505 static CONSTEXPR
const binop
<ASHIFTRT
> vsra_obj
;
2506 static CONSTEXPR
const binop
<LSHIFTRT
> vsrl_obj
;
2507 static CONSTEXPR
const binop
<SMIN
> vmin_obj
;
2508 static CONSTEXPR
const binop
<SMAX
> vmax_obj
;
2509 static CONSTEXPR
const binop
<UMIN
> vminu_obj
;
2510 static CONSTEXPR
const binop
<UMAX
> vmaxu_obj
;
2511 static CONSTEXPR
const binop
<MULT
> vmul_obj
;
2512 static CONSTEXPR
const vmulh
<UNSPEC_VMULHS
> vmulh_obj
;
2513 static CONSTEXPR
const vmulh
<UNSPEC_VMULHU
> vmulhu_obj
;
2514 static CONSTEXPR
const vmulh
<UNSPEC_VMULHSU
> vmulhsu_obj
;
2515 static CONSTEXPR
const binop
<DIV
> vdiv_obj
;
2516 static CONSTEXPR
const binop
<MOD
> vrem_obj
;
2517 static CONSTEXPR
const binop
<UDIV
> vdivu_obj
;
2518 static CONSTEXPR
const binop
<UMOD
> vremu_obj
;
2519 static CONSTEXPR
const unop
<NEG
> vneg_obj
;
2520 static CONSTEXPR
const unop
<NOT
> vnot_obj
;
2521 static CONSTEXPR
const ext
<SIGN_EXTEND
> vsext_obj
;
2522 static CONSTEXPR
const ext
<ZERO_EXTEND
> vzext_obj
;
2523 static CONSTEXPR
const widen_binop
<PLUS
, SIGN_EXTEND
>vwadd_obj
;
2524 static CONSTEXPR
const widen_binop
<MINUS
, SIGN_EXTEND
>vwsub_obj
;
2525 static CONSTEXPR
const widen_binop
<MULT
, SIGN_EXTEND
>vwmul_obj
;
2526 static CONSTEXPR
const widen_binop
<PLUS
, ZERO_EXTEND
>vwaddu_obj
;
2527 static CONSTEXPR
const widen_binop
<MINUS
, ZERO_EXTEND
>vwsubu_obj
;
2528 static CONSTEXPR
const widen_binop
<MULT
, ZERO_EXTEND
>vwmulu_obj
;
2529 static CONSTEXPR
const vwmulsu vwmulsu_obj
;
2530 static CONSTEXPR
const vwcvt
<SIGN_EXTEND
> vwcvt_x_obj
;
2531 static CONSTEXPR
const vwcvt
<ZERO_EXTEND
> vwcvtu_x_obj
;
2532 static CONSTEXPR
const vadc vadc_obj
;
2533 static CONSTEXPR
const vsbc vsbc_obj
;
2534 static CONSTEXPR
const vmadc vmadc_obj
;
2535 static CONSTEXPR
const vmsbc vmsbc_obj
;
2536 static CONSTEXPR
const vnshift
<LSHIFTRT
> vnsrl_obj
;
2537 static CONSTEXPR
const vnshift
<ASHIFTRT
> vnsra_obj
;
2538 static CONSTEXPR
const vncvt_x vncvt_x_obj
;
2539 static CONSTEXPR
const vmerge vmerge_obj
;
2540 static CONSTEXPR
const vmv_v vmv_v_obj
;
2541 static CONSTEXPR
const icmp
<EQ
> vmseq_obj
;
2542 static CONSTEXPR
const icmp
<NE
> vmsne_obj
;
2543 static CONSTEXPR
const icmp
<LT
> vmslt_obj
;
2544 static CONSTEXPR
const icmp
<GT
> vmsgt_obj
;
2545 static CONSTEXPR
const icmp
<LE
> vmsle_obj
;
2546 static CONSTEXPR
const icmp
<GE
> vmsge_obj
;
2547 static CONSTEXPR
const icmp
<LTU
> vmsltu_obj
;
2548 static CONSTEXPR
const icmp
<GTU
> vmsgtu_obj
;
2549 static CONSTEXPR
const icmp
<LEU
> vmsleu_obj
;
2550 static CONSTEXPR
const icmp
<GEU
> vmsgeu_obj
;
2551 static CONSTEXPR
const vmacc vmacc_obj
;
2552 static CONSTEXPR
const vnmsac vnmsac_obj
;
2553 static CONSTEXPR
const vmadd vmadd_obj
;
2554 static CONSTEXPR
const vnmsub vnmsub_obj
;
2555 static CONSTEXPR
const vwmacc vwmacc_obj
;
2556 static CONSTEXPR
const vwmaccu vwmaccu_obj
;
2557 static CONSTEXPR
const vwmaccsu vwmaccsu_obj
;
2558 static CONSTEXPR
const vwmaccus vwmaccus_obj
;
2559 static CONSTEXPR
const binop
<SS_PLUS
> vsadd_obj
;
2560 static CONSTEXPR
const binop
<SS_MINUS
> vssub_obj
;
2561 static CONSTEXPR
const binop
<US_PLUS
> vsaddu_obj
;
2562 static CONSTEXPR
const binop
<US_MINUS
> vssubu_obj
;
2563 static CONSTEXPR
const sat_op
<UNSPEC_VAADDU
> vaaddu_obj
;
2564 static CONSTEXPR
const sat_op
<UNSPEC_VAADD
> vaadd_obj
;
2565 static CONSTEXPR
const sat_op
<UNSPEC_VASUBU
> vasubu_obj
;
2566 static CONSTEXPR
const sat_op
<UNSPEC_VASUB
> vasub_obj
;
2567 static CONSTEXPR
const sat_op
<UNSPEC_VSMUL
> vsmul_obj
;
2568 static CONSTEXPR
const sat_op
<UNSPEC_VSSRL
> vssrl_obj
;
2569 static CONSTEXPR
const sat_op
<UNSPEC_VSSRA
> vssra_obj
;
2570 static CONSTEXPR
const vnclip
<UNSPEC_VNCLIP
> vnclip_obj
;
2571 static CONSTEXPR
const vnclip
<UNSPEC_VNCLIPU
> vnclipu_obj
;
2572 static CONSTEXPR
const mask_logic
<AND
> vmand_obj
;
2573 static CONSTEXPR
const mask_nlogic
<AND
> vmnand_obj
;
2574 static CONSTEXPR
const mask_notlogic
<AND
> vmandn_obj
;
2575 static CONSTEXPR
const mask_logic
<XOR
> vmxor_obj
;
2576 static CONSTEXPR
const mask_logic
<IOR
> vmor_obj
;
2577 static CONSTEXPR
const mask_nlogic
<IOR
> vmnor_obj
;
2578 static CONSTEXPR
const mask_notlogic
<IOR
> vmorn_obj
;
2579 static CONSTEXPR
const mask_nlogic
<XOR
> vmxnor_obj
;
2580 static CONSTEXPR
const vmmv vmmv_obj
;
2581 static CONSTEXPR
const vmclr vmclr_obj
;
2582 static CONSTEXPR
const vmset vmset_obj
;
2583 static CONSTEXPR
const vmnot vmnot_obj
;
2584 static CONSTEXPR
const vcpop vcpop_obj
;
2585 static CONSTEXPR
const vfirst vfirst_obj
;
2586 static CONSTEXPR
const mask_misc
<UNSPEC_VMSBF
> vmsbf_obj
;
2587 static CONSTEXPR
const mask_misc
<UNSPEC_VMSIF
> vmsif_obj
;
2588 static CONSTEXPR
const mask_misc
<UNSPEC_VMSOF
> vmsof_obj
;
2589 static CONSTEXPR
const viota viota_obj
;
2590 static CONSTEXPR
const vid vid_obj
;
2591 static CONSTEXPR
const binop
<PLUS
, true> vfadd_obj
;
2592 static CONSTEXPR
const binop
<MINUS
, true> vfsub_obj
;
2593 static CONSTEXPR
const binop
<PLUS
, true, HAS_FRM
> vfadd_frm_obj
;
2594 static CONSTEXPR
const binop
<MINUS
, true, HAS_FRM
> vfsub_frm_obj
;
2595 static CONSTEXPR
const reverse_binop
<MINUS
> vfrsub_obj
;
2596 static CONSTEXPR
const reverse_binop
<MINUS
, HAS_FRM
> vfrsub_frm_obj
;
2597 static CONSTEXPR
const widen_binop_fp
<PLUS
> vfwadd_obj
;
2598 static CONSTEXPR
const widen_binop_fp
<PLUS
, HAS_FRM
> vfwadd_frm_obj
;
2599 static CONSTEXPR
const widen_binop_fp
<MINUS
> vfwsub_obj
;
2600 static CONSTEXPR
const widen_binop_fp
<MINUS
, HAS_FRM
> vfwsub_frm_obj
;
2601 static CONSTEXPR
const binop
<MULT
, true> vfmul_obj
;
2602 static CONSTEXPR
const binop
<MULT
, true, HAS_FRM
> vfmul_frm_obj
;
2603 static CONSTEXPR
const binop
<DIV
, true> vfdiv_obj
;
2604 static CONSTEXPR
const binop
<DIV
, true, HAS_FRM
> vfdiv_frm_obj
;
2605 static CONSTEXPR
const reverse_binop
<DIV
> vfrdiv_obj
;
2606 static CONSTEXPR
const reverse_binop
<DIV
, HAS_FRM
> vfrdiv_frm_obj
;
2607 static CONSTEXPR
const widen_binop_fp
<MULT
> vfwmul_obj
;
2608 static CONSTEXPR
const widen_binop_fp
<MULT
, HAS_FRM
> vfwmul_frm_obj
;
2609 static CONSTEXPR
const vfmacc
<NO_FRM
> vfmacc_obj
;
2610 static CONSTEXPR
const vfmacc
<HAS_FRM
> vfmacc_frm_obj
;
2611 static CONSTEXPR
const vfnmsac
<NO_FRM
> vfnmsac_obj
;
2612 static CONSTEXPR
const vfnmsac
<HAS_FRM
> vfnmsac_frm_obj
;
2613 static CONSTEXPR
const vfmadd
<NO_FRM
> vfmadd_obj
;
2614 static CONSTEXPR
const vfmadd
<HAS_FRM
> vfmadd_frm_obj
;
2615 static CONSTEXPR
const vfnmsub
<NO_FRM
> vfnmsub_obj
;
2616 static CONSTEXPR
const vfnmsub
<HAS_FRM
> vfnmsub_frm_obj
;
2617 static CONSTEXPR
const vfnmacc
<NO_FRM
> vfnmacc_obj
;
2618 static CONSTEXPR
const vfnmacc
<HAS_FRM
> vfnmacc_frm_obj
;
2619 static CONSTEXPR
const vfmsac
<NO_FRM
> vfmsac_obj
;
2620 static CONSTEXPR
const vfmsac
<HAS_FRM
> vfmsac_frm_obj
;
2621 static CONSTEXPR
const vfnmadd
<NO_FRM
> vfnmadd_obj
;
2622 static CONSTEXPR
const vfnmadd
<HAS_FRM
> vfnmadd_frm_obj
;
2623 static CONSTEXPR
const vfmsub
<NO_FRM
> vfmsub_obj
;
2624 static CONSTEXPR
const vfmsub
<HAS_FRM
> vfmsub_frm_obj
;
2625 static CONSTEXPR
const vfwmacc
<NO_FRM
> vfwmacc_obj
;
2626 static CONSTEXPR
const vfwmacc
<HAS_FRM
> vfwmacc_frm_obj
;
2627 static CONSTEXPR
const vfwnmacc
<NO_FRM
> vfwnmacc_obj
;
2628 static CONSTEXPR
const vfwnmacc
<HAS_FRM
> vfwnmacc_frm_obj
;
2629 static CONSTEXPR
const vfwmsac
<NO_FRM
> vfwmsac_obj
;
2630 static CONSTEXPR
const vfwmsac
<HAS_FRM
> vfwmsac_frm_obj
;
2631 static CONSTEXPR
const vfwnmsac
<NO_FRM
> vfwnmsac_obj
;
2632 static CONSTEXPR
const vfwnmsac
<HAS_FRM
> vfwnmsac_frm_obj
;
2633 static CONSTEXPR
const unop
<SQRT
> vfsqrt_obj
;
2634 static CONSTEXPR
const unop
<SQRT
, HAS_FRM
> vfsqrt_frm_obj
;
2635 static CONSTEXPR
const float_misc
<UNSPEC_VFRSQRT7
> vfrsqrt7_obj
;
2636 static CONSTEXPR
const float_misc
<UNSPEC_VFREC7
> vfrec7_obj
;
2637 static CONSTEXPR
const float_misc
<UNSPEC_VFREC7
, HAS_FRM
> vfrec7_frm_obj
;
2638 static CONSTEXPR
const binop
<SMIN
> vfmin_obj
;
2639 static CONSTEXPR
const binop
<SMAX
> vfmax_obj
;
2640 static CONSTEXPR
const float_misc
<UNSPEC_VCOPYSIGN
> vfsgnj_obj
;
2641 static CONSTEXPR
const vfsgnjn vfsgnjn_obj
;
2642 static CONSTEXPR
const float_misc
<UNSPEC_VXORSIGN
> vfsgnjx_obj
;
2643 static CONSTEXPR
const unop
<NEG
> vfneg_obj
;
2644 static CONSTEXPR
const unop
<ABS
> vfabs_obj
;
2645 static CONSTEXPR
const fcmp
<EQ
> vmfeq_obj
;
2646 static CONSTEXPR
const fcmp
<NE
> vmfne_obj
;
2647 static CONSTEXPR
const fcmp
<LT
> vmflt_obj
;
2648 static CONSTEXPR
const fcmp
<GT
> vmfgt_obj
;
2649 static CONSTEXPR
const fcmp
<LE
> vmfle_obj
;
2650 static CONSTEXPR
const fcmp
<GE
> vmfge_obj
;
2651 static CONSTEXPR
const vfclass vfclass_obj
;
2652 static CONSTEXPR
const vmerge vfmerge_obj
;
2653 static CONSTEXPR
const vmv_v vfmv_v_obj
;
2654 static CONSTEXPR
const vfcvt_x
<UNSPEC_VFCVT
> vfcvt_x_obj
;
2655 static CONSTEXPR
const vfcvt_x
<UNSPEC_VFCVT
, HAS_FRM
> vfcvt_x_frm_obj
;
2656 static CONSTEXPR
const vfcvt_x
<UNSPEC_UNSIGNED_VFCVT
> vfcvt_xu_obj
;
2657 static CONSTEXPR
const vfcvt_x
<UNSPEC_UNSIGNED_VFCVT
, HAS_FRM
> vfcvt_xu_frm_obj
;
2658 static CONSTEXPR
const vfcvt_rtz_x
<FIX
> vfcvt_rtz_x_obj
;
2659 static CONSTEXPR
const vfcvt_rtz_x
<UNSIGNED_FIX
> vfcvt_rtz_xu_obj
;
2660 static CONSTEXPR
const vfcvt_f
<NO_FRM
> vfcvt_f_obj
;
2661 static CONSTEXPR
const vfcvt_f
<HAS_FRM
> vfcvt_f_frm_obj
;
2662 static CONSTEXPR
const vfwcvt_x
<UNSPEC_VFCVT
> vfwcvt_x_obj
;
2663 static CONSTEXPR
const vfwcvt_x
<UNSPEC_VFCVT
, HAS_FRM
> vfwcvt_x_frm_obj
;
2664 static CONSTEXPR
const vfwcvt_x
<UNSPEC_UNSIGNED_VFCVT
> vfwcvt_xu_obj
;
2665 static CONSTEXPR
const vfwcvt_x
<UNSPEC_UNSIGNED_VFCVT
, HAS_FRM
> vfwcvt_xu_frm_obj
;
2666 static CONSTEXPR
const vfwcvt_rtz_x
<FIX
> vfwcvt_rtz_x_obj
;
2667 static CONSTEXPR
const vfwcvt_rtz_x
<UNSIGNED_FIX
> vfwcvt_rtz_xu_obj
;
2668 static CONSTEXPR
const vfwcvt_f vfwcvt_f_obj
;
2669 static CONSTEXPR
const vfncvt_x
<UNSPEC_VFCVT
> vfncvt_x_obj
;
2670 static CONSTEXPR
const vfncvt_x
<UNSPEC_VFCVT
, HAS_FRM
> vfncvt_x_frm_obj
;
2671 static CONSTEXPR
const vfncvt_x
<UNSPEC_UNSIGNED_VFCVT
> vfncvt_xu_obj
;
2672 static CONSTEXPR
const vfncvt_x
<UNSPEC_UNSIGNED_VFCVT
, HAS_FRM
> vfncvt_xu_frm_obj
;
2673 static CONSTEXPR
const vfncvt_rtz_x
<FIX
> vfncvt_rtz_x_obj
;
2674 static CONSTEXPR
const vfncvt_rtz_x
<UNSIGNED_FIX
> vfncvt_rtz_xu_obj
;
2675 static CONSTEXPR
const vfncvt_f
<NO_FRM
> vfncvt_f_obj
;
2676 static CONSTEXPR
const vfncvt_f
<HAS_FRM
> vfncvt_f_frm_obj
;
2677 static CONSTEXPR
const vfncvt_rod_f vfncvt_rod_f_obj
;
2678 static CONSTEXPR
const reducop
<UNSPEC_REDUC_SUM
> vredsum_obj
;
2679 static CONSTEXPR
const reducop
<UNSPEC_REDUC_MAXU
> vredmaxu_obj
;
2680 static CONSTEXPR
const reducop
<UNSPEC_REDUC_MAX
> vredmax_obj
;
2681 static CONSTEXPR
const reducop
<UNSPEC_REDUC_MINU
> vredminu_obj
;
2682 static CONSTEXPR
const reducop
<UNSPEC_REDUC_MIN
> vredmin_obj
;
2683 static CONSTEXPR
const reducop
<UNSPEC_REDUC_AND
> vredand_obj
;
2684 static CONSTEXPR
const reducop
<UNSPEC_REDUC_OR
> vredor_obj
;
2685 static CONSTEXPR
const reducop
<UNSPEC_REDUC_XOR
> vredxor_obj
;
2686 static CONSTEXPR
const reducop
<UNSPEC_WREDUC_SUM
> vwredsum_obj
;
2687 static CONSTEXPR
const reducop
<UNSPEC_WREDUC_SUMU
> vwredsumu_obj
;
2688 static CONSTEXPR
const freducop
<UNSPEC_REDUC_SUM_UNORDERED
> vfredusum_obj
;
2689 static CONSTEXPR
const freducop
<UNSPEC_REDUC_SUM_UNORDERED
, HAS_FRM
> vfredusum_frm_obj
;
2690 static CONSTEXPR
const freducop
<UNSPEC_REDUC_SUM_ORDERED
> vfredosum_obj
;
2691 static CONSTEXPR
const freducop
<UNSPEC_REDUC_SUM_ORDERED
, HAS_FRM
> vfredosum_frm_obj
;
2692 static CONSTEXPR
const reducop
<UNSPEC_REDUC_MAX
> vfredmax_obj
;
2693 static CONSTEXPR
const reducop
<UNSPEC_REDUC_MIN
> vfredmin_obj
;
2694 static CONSTEXPR
const freducop
<UNSPEC_WREDUC_SUM_UNORDERED
> vfwredusum_obj
;
2695 static CONSTEXPR
const freducop
<UNSPEC_WREDUC_SUM_UNORDERED
, HAS_FRM
> vfwredusum_frm_obj
;
2696 static CONSTEXPR
const freducop
<UNSPEC_WREDUC_SUM_ORDERED
> vfwredosum_obj
;
2697 static CONSTEXPR
const freducop
<UNSPEC_WREDUC_SUM_ORDERED
, HAS_FRM
> vfwredosum_frm_obj
;
2698 static CONSTEXPR
const vmv vmv_x_obj
;
2699 static CONSTEXPR
const vmv_s vmv_s_obj
;
2700 static CONSTEXPR
const vmv vfmv_f_obj
;
2701 static CONSTEXPR
const vmv_s vfmv_s_obj
;
2702 static CONSTEXPR
const slideop
<UNSPEC_VSLIDEUP
> vslideup_obj
;
2703 static CONSTEXPR
const slideop
<UNSPEC_VSLIDEDOWN
> vslidedown_obj
;
2704 static CONSTEXPR
const slideop
<UNSPEC_VSLIDE1UP
> vslide1up_obj
;
2705 static CONSTEXPR
const slideop
<UNSPEC_VSLIDE1DOWN
> vslide1down_obj
;
2706 static CONSTEXPR
const slideop
<UNSPEC_VFSLIDE1UP
> vfslide1up_obj
;
2707 static CONSTEXPR
const slideop
<UNSPEC_VFSLIDE1DOWN
> vfslide1down_obj
;
2708 static CONSTEXPR
const vrgather vrgather_obj
;
2709 static CONSTEXPR
const vrgatherei16 vrgatherei16_obj
;
2710 static CONSTEXPR
const vcompress vcompress_obj
;
2711 static CONSTEXPR
const vundefined vundefined_obj
;
2712 static CONSTEXPR
const vreinterpret vreinterpret_obj
;
2713 static CONSTEXPR
const vlmul_ext vlmul_ext_obj
;
2714 static CONSTEXPR
const vlmul_trunc vlmul_trunc_obj
;
2715 static CONSTEXPR
const vset vset_obj
;
2716 static CONSTEXPR
const vget vget_obj
;
2717 static CONSTEXPR
const vcreate vcreate_obj
;
2718 static CONSTEXPR
const read_vl read_vl_obj
;
2719 static CONSTEXPR
const vleff vleff_obj
;
2720 static CONSTEXPR
const vlenb vlenb_obj
;
2721 static CONSTEXPR
const vlseg vlseg_obj
;
2722 static CONSTEXPR
const vsseg vsseg_obj
;
2723 static CONSTEXPR
const vlsseg vlsseg_obj
;
2724 static CONSTEXPR
const vssseg vssseg_obj
;
2725 static CONSTEXPR
const seg_indexed_load
<UNSPEC_UNORDERED
> vluxseg_obj
;
2726 static CONSTEXPR
const seg_indexed_load
<UNSPEC_ORDERED
> vloxseg_obj
;
2727 static CONSTEXPR
const seg_indexed_store
<UNSPEC_UNORDERED
> vsuxseg_obj
;
2728 static CONSTEXPR
const seg_indexed_store
<UNSPEC_ORDERED
> vsoxseg_obj
;
2729 static CONSTEXPR
const vlsegff vlsegff_obj
;
2730 static CONSTEXPR
const th_loadstore_width
<false, LST_UNIT_STRIDE
, UNSPEC_TH_VLB
> vlb_obj
;
2731 static CONSTEXPR
const th_loadstore_width
<false, LST_UNIT_STRIDE
, UNSPEC_TH_VLBU
> vlbu_obj
;
2732 static CONSTEXPR
const th_loadstore_width
<false, LST_UNIT_STRIDE
, UNSPEC_TH_VLH
> vlh_obj
;
2733 static CONSTEXPR
const th_loadstore_width
<false, LST_UNIT_STRIDE
, UNSPEC_TH_VLHU
> vlhu_obj
;
2734 static CONSTEXPR
const th_loadstore_width
<false, LST_UNIT_STRIDE
, UNSPEC_TH_VLW
> vlw_obj
;
2735 static CONSTEXPR
const th_loadstore_width
<false, LST_UNIT_STRIDE
, UNSPEC_TH_VLWU
> vlwu_obj
;
2736 static CONSTEXPR
const th_loadstore_width
<true, LST_UNIT_STRIDE
, UNSPEC_TH_VLB
> vsb_obj
;
2737 static CONSTEXPR
const th_loadstore_width
<true, LST_UNIT_STRIDE
, UNSPEC_TH_VLH
> vsh_obj
;
2738 static CONSTEXPR
const th_loadstore_width
<true, LST_UNIT_STRIDE
, UNSPEC_TH_VLW
> vsw_obj
;
2739 static CONSTEXPR
const th_loadstore_width
<false, LST_STRIDED
, UNSPEC_TH_VLSB
> vlsb_obj
;
2740 static CONSTEXPR
const th_loadstore_width
<false, LST_STRIDED
, UNSPEC_TH_VLSBU
> vlsbu_obj
;
2741 static CONSTEXPR
const th_loadstore_width
<false, LST_STRIDED
, UNSPEC_TH_VLSH
> vlsh_obj
;
2742 static CONSTEXPR
const th_loadstore_width
<false, LST_STRIDED
, UNSPEC_TH_VLSHU
> vlshu_obj
;
2743 static CONSTEXPR
const th_loadstore_width
<false, LST_STRIDED
, UNSPEC_TH_VLSW
> vlsw_obj
;
2744 static CONSTEXPR
const th_loadstore_width
<false, LST_STRIDED
, UNSPEC_TH_VLSWU
> vlswu_obj
;
2745 static CONSTEXPR
const th_loadstore_width
<true, LST_STRIDED
, UNSPEC_TH_VLSB
> vssb_obj
;
2746 static CONSTEXPR
const th_loadstore_width
<true, LST_STRIDED
, UNSPEC_TH_VLSH
> vssh_obj
;
2747 static CONSTEXPR
const th_loadstore_width
<true, LST_STRIDED
, UNSPEC_TH_VLSW
> vssw_obj
;
2748 static CONSTEXPR
const th_loadstore_width
<false, LST_INDEXED
, UNSPEC_TH_VLXB
> vlxb_obj
;
2749 static CONSTEXPR
const th_loadstore_width
<false, LST_INDEXED
, UNSPEC_TH_VLXBU
> vlxbu_obj
;
2750 static CONSTEXPR
const th_loadstore_width
<false, LST_INDEXED
, UNSPEC_TH_VLXH
> vlxh_obj
;
2751 static CONSTEXPR
const th_loadstore_width
<false, LST_INDEXED
, UNSPEC_TH_VLXHU
> vlxhu_obj
;
2752 static CONSTEXPR
const th_loadstore_width
<false, LST_INDEXED
, UNSPEC_TH_VLXW
> vlxw_obj
;
2753 static CONSTEXPR
const th_loadstore_width
<false, LST_INDEXED
, UNSPEC_TH_VLXWU
> vlxwu_obj
;
2754 static CONSTEXPR
const th_loadstore_width
<true, LST_INDEXED
, UNSPEC_TH_VLXB
> vsxb_obj
;
2755 static CONSTEXPR
const th_loadstore_width
<true, LST_INDEXED
, UNSPEC_TH_VLXH
> vsxh_obj
;
2756 static CONSTEXPR
const th_loadstore_width
<true, LST_INDEXED
, UNSPEC_TH_VLXW
> vsxw_obj
;
2757 static CONSTEXPR
const th_loadstore_width
<true, LST_INDEXED
, UNSPEC_TH_VSUXB
> vsuxb_obj
;
2758 static CONSTEXPR
const th_loadstore_width
<true, LST_INDEXED
, UNSPEC_TH_VSUXH
> vsuxh_obj
;
2759 static CONSTEXPR
const th_loadstore_width
<true, LST_INDEXED
, UNSPEC_TH_VSUXW
> vsuxw_obj
;
2760 static CONSTEXPR
const th_extract vext_x_v_obj
;
2763 static CONSTEXPR
const vandn vandn_obj
;
2764 static CONSTEXPR
const bitmanip
<ROTATE
> vrol_obj
;
2765 static CONSTEXPR
const bitmanip
<ROTATERT
> vror_obj
;
2766 static CONSTEXPR
const b_reverse
<UNSPEC_VBREV
> vbrev_obj
;
2767 static CONSTEXPR
const b_reverse
<UNSPEC_VBREV8
> vbrev8_obj
;
2768 static CONSTEXPR
const b_reverse
<UNSPEC_VREV8
> vrev8_obj
;
2769 static CONSTEXPR
const bitmanip
<CLZ
> vclz_obj
;
2770 static CONSTEXPR
const bitmanip
<CTZ
> vctz_obj
;
2771 static CONSTEXPR
const vwsll vwsll_obj
;
2772 static CONSTEXPR
const clmul
<UNSPEC_VCLMUL
> vclmul_obj
;
2773 static CONSTEXPR
const clmul
<UNSPEC_VCLMULH
> vclmulh_obj
;
2774 static CONSTEXPR
const vg_nhab
<UNSPEC_VGHSH
> vghsh_obj
;
2775 static CONSTEXPR
const crypto_vv
<UNSPEC_VGMUL
> vgmul_obj
;
2776 static CONSTEXPR
const crypto_vv
<UNSPEC_VAESEF
> vaesef_obj
;
2777 static CONSTEXPR
const crypto_vv
<UNSPEC_VAESEM
> vaesem_obj
;
2778 static CONSTEXPR
const crypto_vv
<UNSPEC_VAESDF
> vaesdf_obj
;
2779 static CONSTEXPR
const crypto_vv
<UNSPEC_VAESDM
> vaesdm_obj
;
2780 static CONSTEXPR
const crypto_vv
<UNSPEC_VAESZ
> vaesz_obj
;
2781 static CONSTEXPR
const crypto_vi
<UNSPEC_VAESKF1
> vaeskf1_obj
;
2782 static CONSTEXPR
const vaeskf2_vsm3c
<UNSPEC_VAESKF2
> vaeskf2_obj
;
2783 static CONSTEXPR
const vg_nhab
<UNSPEC_VSHA2MS
> vsha2ms_obj
;
2784 static CONSTEXPR
const vg_nhab
<UNSPEC_VSHA2CH
> vsha2ch_obj
;
2785 static CONSTEXPR
const vg_nhab
<UNSPEC_VSHA2CL
> vsha2cl_obj
;
2786 static CONSTEXPR
const crypto_vi
<UNSPEC_VSM4K
> vsm4k_obj
;
2787 static CONSTEXPR
const crypto_vv
<UNSPEC_VSM4R
> vsm4r_obj
;
2788 static CONSTEXPR
const vsm3me vsm3me_obj
;
2789 static CONSTEXPR
const vaeskf2_vsm3c
<UNSPEC_VSM3C
> vsm3c_obj
;
2792 static CONSTEXPR
const vfncvtbf16_f
<NO_FRM
> vfncvtbf16_f_obj
;
2793 static CONSTEXPR
const vfncvtbf16_f
<HAS_FRM
> vfncvtbf16_f_frm_obj
;
2794 static CONSTEXPR
const vfwcvtbf16_f vfwcvtbf16_f_obj
;
2796 static CONSTEXPR
const vfwmaccbf16
<NO_FRM
> vfwmaccbf16_obj
;
2797 static CONSTEXPR
const vfwmaccbf16
<HAS_FRM
> vfwmaccbf16_frm_obj
;
2799 /* Declare the function base NAME, pointing it to an instance
2800 of class <NAME>_obj. */
2801 #define BASE(NAME) \
2802 namespace bases { const function_base *const NAME = &NAME##_obj; }
2995 BASE (vfwcvt_xu_frm
)
2997 BASE (vfwcvt_rtz_xu
)
3002 BASE (vfncvt_xu_frm
)
3004 BASE (vfncvt_rtz_xu
)
3019 BASE (vfredusum_frm
)
3021 BASE (vfredosum_frm
)
3025 BASE (vfwredosum_frm
)
3027 BASE (vfwredusum_frm
)
3121 BASE (vfncvtbf16_f_frm
)
3125 BASE (vfwmaccbf16_frm
)
3126 } // end namespace riscv_vector