1 /* Definition of RISC-V target for GNU compiler.
2 Copyright (C) 2011-2023 Free Software Foundation, Inc.
3 Contributed by Andrew Waterman (andrew@sifive.com).
4 Based on MIPS target for GNU compiler.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #ifndef GCC_RISCV_PROTOS_H
23 #define GCC_RISCV_PROTOS_H
27 /* Symbol types we understand. The order of this list must match that of
28 the unspec enum in riscv.md, subsequent to UNSPEC_ADDRESS_FIRST. */
29 enum riscv_symbol_type
{
38 #define NUM_SYMBOL_TYPES (SYMBOL_TLS_GD + 1)
40 /* Classifies an address.
43 A natural register + offset address. The register satisfies
44 riscv_valid_base_register_p and the offset is a const_arith_operand.
47 A base register indexed by (optionally scaled) register.
50 A base register indexed by (optionally scaled) zero-extended register.
53 A base register indexed by immediate offset with writeback.
56 A LO_SUM rtx. The first operand is a valid base register and
57 the second operand is a symbolic address.
60 A signed 16-bit constant address.
63 A constant symbolic address. */
64 enum riscv_address_type
{
74 /* Information about an address described by riscv_address_type.
80 REG is the base register and OFFSET is the constant offset.
82 ADDRESS_REG_REG and ADDRESS_REG_UREG
83 REG is the base register and OFFSET is the index register.
86 REG is the base register, OFFSET is the constant offset, and
87 shift is the shift amount for the offset.
90 REG and OFFSET are the operands to the LO_SUM and SYMBOL_TYPE
91 is the type of symbol it references.
94 SYMBOL_TYPE is the type of symbol that the address references. */
95 struct riscv_address_info
{
96 enum riscv_address_type type
;
99 enum riscv_symbol_type symbol_type
;
103 /* Routines implemented in riscv.cc. */
104 extern enum riscv_symbol_type
riscv_classify_symbolic_expression (rtx
);
105 extern bool riscv_symbolic_constant_p (rtx
, enum riscv_symbol_type
*);
106 extern int riscv_float_const_rtx_index_for_fli (rtx
);
107 extern int riscv_regno_mode_ok_for_base_p (int, machine_mode
, bool);
108 extern bool riscv_valid_base_register_p (rtx
, machine_mode
, bool);
109 extern enum reg_class
riscv_index_reg_class ();
110 extern int riscv_regno_ok_for_index_p (int);
111 extern int riscv_address_insns (rtx
, machine_mode
, bool);
112 extern int riscv_const_insns (rtx
);
113 extern int riscv_split_const_insns (rtx
);
114 extern int riscv_load_store_insns (rtx
, rtx_insn
*);
115 extern rtx
riscv_emit_move (rtx
, rtx
);
116 extern bool riscv_split_symbol (rtx
, rtx
, machine_mode
, rtx
*);
117 extern bool riscv_split_symbol_type (enum riscv_symbol_type
);
118 extern rtx
riscv_unspec_address (rtx
, enum riscv_symbol_type
);
119 extern void riscv_move_integer (rtx
, rtx
, HOST_WIDE_INT
, machine_mode
);
120 extern bool riscv_legitimize_move (machine_mode
, rtx
, rtx
);
121 extern rtx
riscv_subword (rtx
, bool);
122 extern bool riscv_split_64bit_move_p (rtx
, rtx
);
123 extern void riscv_split_doubleword_move (rtx
, rtx
);
124 extern const char *riscv_output_move (rtx
, rtx
);
125 extern const char *riscv_output_return ();
126 extern void riscv_declare_function_name (FILE *, const char *, tree
);
127 extern void riscv_asm_output_alias (FILE *, const tree
, const tree
);
128 extern void riscv_asm_output_external (FILE *, const tree
, const char *);
130 riscv_zcmp_valid_stack_adj_bytes_p (HOST_WIDE_INT
, int);
133 extern void riscv_expand_int_scc (rtx
, enum rtx_code
, rtx
, rtx
, bool *invert_ptr
= 0);
134 extern void riscv_expand_float_scc (rtx
, enum rtx_code
, rtx
, rtx
);
135 extern void riscv_expand_conditional_branch (rtx
, enum rtx_code
, rtx
, rtx
);
136 extern rtx
riscv_emit_binary (enum rtx_code code
, rtx dest
, rtx x
, rtx y
);
138 extern bool riscv_expand_conditional_move (rtx
, rtx
, rtx
, rtx
);
139 extern rtx
riscv_legitimize_call_address (rtx
);
140 extern void riscv_set_return_address (rtx
, rtx
);
141 extern rtx
riscv_return_addr (int, rtx
);
142 extern poly_int64
riscv_initial_elimination_offset (int, int);
143 extern void riscv_expand_prologue (void);
144 extern void riscv_expand_epilogue (int);
145 extern bool riscv_epilogue_uses (unsigned int);
146 extern bool riscv_can_use_return_insn (void);
147 extern rtx
riscv_function_value (const_tree
, const_tree
, enum machine_mode
);
148 extern bool riscv_store_data_bypass_p (rtx_insn
*, rtx_insn
*);
149 extern rtx
riscv_gen_gpr_save_insn (struct riscv_frame_info
*);
150 extern bool riscv_gpr_save_operation_p (rtx
);
151 extern void riscv_reinit (void);
152 extern poly_uint64
riscv_regmode_natural_size (machine_mode
);
153 extern bool riscv_v_ext_vector_mode_p (machine_mode
);
154 extern bool riscv_v_ext_tuple_mode_p (machine_mode
);
155 extern bool riscv_v_ext_vls_mode_p (machine_mode
);
156 extern int riscv_get_v_regno_alignment (machine_mode
);
157 extern bool riscv_shamt_matches_mask_p (int, HOST_WIDE_INT
);
158 extern void riscv_subword_address (rtx
, rtx
*, rtx
*, rtx
*, rtx
*);
159 extern void riscv_lshift_subword (machine_mode
, rtx
, rtx
, rtx
*);
160 extern enum memmodel
riscv_union_memmodels (enum memmodel
, enum memmodel
);
162 /* Routines implemented in riscv-c.cc. */
163 void riscv_cpu_cpp_builtins (cpp_reader
*);
164 void riscv_register_pragmas (void);
166 /* Routines implemented in riscv-builtins.cc. */
167 extern void riscv_atomic_assign_expand_fenv (tree
*, tree
*, tree
*);
168 extern bool riscv_gimple_fold_builtin (gimple_stmt_iterator
*);
169 extern rtx
riscv_expand_builtin (tree
, rtx
, rtx
, machine_mode
, int);
170 extern tree
riscv_builtin_decl (unsigned int, bool);
171 extern void riscv_init_builtins (void);
173 /* Routines implemented in riscv-common.cc. */
174 extern std::string
riscv_arch_str (bool version_p
= true);
175 extern void riscv_parse_arch_string (const char *, struct gcc_options
*, location_t
);
177 extern bool riscv_hard_regno_rename_ok (unsigned, unsigned);
179 rtl_opt_pass
* make_pass_shorten_memrefs (gcc::context
*ctxt
);
180 rtl_opt_pass
* make_pass_avlprop (gcc::context
*ctxt
);
181 rtl_opt_pass
* make_pass_vsetvl (gcc::context
*ctxt
);
183 /* Routines implemented in riscv-string.c. */
184 extern bool riscv_expand_block_move (rtx
, rtx
, rtx
);
186 /* Information about one CPU we know about. */
187 struct riscv_cpu_info
{
188 /* This CPU's canonical name. */
191 /* Default arch for this CPU, could be NULL if no default arch. */
194 /* Which automaton to use for tuning. */
198 extern const riscv_cpu_info
*riscv_find_cpu (const char *);
200 /* Routines implemented in riscv-selftests.cc. */
203 void riscv_run_selftests (void);
204 } // namespace selftest
207 namespace riscv_vector
{
208 #define RVV_VLMAX gen_rtx_REG (Pmode, X0_REGNUM)
209 #define RVV_VUNDEF(MODE) \
210 gen_rtx_UNSPEC (MODE, gen_rtvec (1, gen_rtx_REG (SImode, X0_REGNUM)), \
213 /* These flags describe how to pass the operands to a rvv insn pattern.
215 If a insn has this flags:
216 HAS_DEST_P | HAS_MASK_P | USE_VUNDEF_MERGE_P
217 | TU_POLICY_P | BINARY_OP_P | FRM_DYN_P
219 operands[0] is the dest operand
220 operands[1] is the mask operand
221 operands[2] is the merge operand
222 operands[3] and operands[4] is the two operand to do the operation.
223 operands[5] is the vl operand
224 operands[6] is the tail policy operand
225 operands[7] is the mask policy operands
226 operands[8] is the rounding mode operands
228 Then you can call `emit_vlmax_insn (flags, icode, ops)` to emit a insn.
229 and ops[0] is the dest operand (operands[0]), ops[1] is the mask
230 operand (operands[1]), ops[2] and ops[3] is the two
231 operands (operands[3], operands[4]) to do the operation. Other operands
232 will be created by emit_vlmax_insn according to the flags information.
234 enum insn_flags
: unsigned int
236 /* flags for dest, mask, merge operands. */
237 /* Means INSN has dest operand. False for STORE insn. */
239 /* Means INSN has mask operand. */
241 /* Means using ALL_TRUES for mask operand. */
242 USE_ALL_TRUES_MASK_P
= 1 << 2,
243 /* Means using ONE_TRUE for mask operand. */
244 USE_ONE_TRUE_MASK_P
= 1 << 3,
245 /* Means INSN has merge operand. */
246 HAS_MERGE_P
= 1 << 4,
247 /* Means using VUNDEF for merge operand. */
248 USE_VUNDEF_MERGE_P
= 1 << 5,
250 /* flags for tail policy and mask plicy operands. */
251 /* Means the tail policy is TAIL_UNDISTURBED. */
252 TU_POLICY_P
= 1 << 6,
253 /* Means the tail policy is default (return by get_prefer_tail_policy). */
254 TDEFAULT_POLICY_P
= 1 << 7,
255 /* Means the mask policy is MASK_UNDISTURBED. */
256 MU_POLICY_P
= 1 << 8,
257 /* Means the mask policy is default (return by get_prefer_mask_policy). */
258 MDEFAULT_POLICY_P
= 1 << 9,
260 /* flags for the number operands to do the operation. */
261 /* Means INSN need zero operand to do the operation. e.g. vid.v */
262 NULLARY_OP_P
= 1 << 10,
263 /* Means INSN need one operand to do the operation. */
264 UNARY_OP_P
= 1 << 11,
265 /* Means INSN need two operands to do the operation. */
266 BINARY_OP_P
= 1 << 12,
267 /* Means INSN need two operands to do the operation. */
268 TERNARY_OP_P
= 1 << 13,
270 /* flags for get vtype mode from the index number. default from dest operand. */
271 VTYPE_MODE_FROM_OP1_P
= 1 << 14,
273 /* flags for the floating-point rounding mode. */
274 /* Means INSN has FRM operand and the value is FRM_DYN. */
277 /* Means INSN has FRM operand and the value is FRM_RUP. */
280 /* Means INSN has FRM operand and the value is FRM_RDN. */
283 /* Means INSN has FRM operand and the value is FRM_RMM. */
286 /* Means INSN has FRM operand and the value is FRM_RNE. */
290 enum insn_type
: unsigned int
292 /* some flags macros. */
293 /* For non-mask insn with tama. */
294 __NORMAL_OP
= HAS_DEST_P
| HAS_MASK_P
| USE_ALL_TRUES_MASK_P
| HAS_MERGE_P
295 | USE_VUNDEF_MERGE_P
| TDEFAULT_POLICY_P
| MDEFAULT_POLICY_P
,
296 /* For non-mask insn with ta, without mask policy operand. */
297 __NORMAL_OP_TA
= HAS_DEST_P
| HAS_MASK_P
| USE_ALL_TRUES_MASK_P
| HAS_MERGE_P
298 | USE_VUNDEF_MERGE_P
| TDEFAULT_POLICY_P
,
299 /* For non-mask insn with ta, without mask operand and mask policy operand. */
301 = HAS_DEST_P
| HAS_MERGE_P
| USE_VUNDEF_MERGE_P
| TDEFAULT_POLICY_P
,
302 /* For non-mask insn with ma, without tail policy operand. */
303 __NORMAL_OP_MA
= HAS_DEST_P
| HAS_MASK_P
| USE_ALL_TRUES_MASK_P
| HAS_MERGE_P
304 | USE_VUNDEF_MERGE_P
| MDEFAULT_POLICY_P
,
305 /* For mask insn with tama. */
306 __MASK_OP_TAMA
= HAS_DEST_P
| HAS_MASK_P
| HAS_MERGE_P
| USE_VUNDEF_MERGE_P
307 | TDEFAULT_POLICY_P
| MDEFAULT_POLICY_P
,
308 /* For mask insn with tamu. */
310 = HAS_DEST_P
| HAS_MASK_P
| HAS_MERGE_P
| TDEFAULT_POLICY_P
| MU_POLICY_P
,
311 /* For mask insn with tuma. */
312 __MASK_OP_TUMA
= HAS_DEST_P
| HAS_MASK_P
| USE_ALL_TRUES_MASK_P
| HAS_MERGE_P
313 | TU_POLICY_P
| MDEFAULT_POLICY_P
,
314 /* For mask insn with mu. */
315 __MASK_OP_MU
= HAS_DEST_P
| HAS_MASK_P
| HAS_MERGE_P
| MU_POLICY_P
,
316 /* For mask insn with ta, without mask policy operand. */
317 __MASK_OP_TA
= HAS_DEST_P
| HAS_MASK_P
| HAS_MERGE_P
| USE_VUNDEF_MERGE_P
320 /* Nullary operator. e.g. vid.v */
321 NULLARY_OP
= __NORMAL_OP
| NULLARY_OP_P
,
323 /* Unary operator. */
324 UNARY_OP
= __NORMAL_OP
| UNARY_OP_P
,
325 UNARY_OP_TAMA
= __MASK_OP_TAMA
| UNARY_OP_P
,
326 UNARY_OP_TAMU
= __MASK_OP_TAMU
| UNARY_OP_P
,
327 UNARY_OP_FRM_DYN
= UNARY_OP
| FRM_DYN_P
,
328 UNARY_OP_FRM_RMM
= UNARY_OP
| FRM_RMM_P
,
329 UNARY_OP_FRM_RUP
= UNARY_OP
| FRM_RUP_P
,
330 UNARY_OP_FRM_RDN
= UNARY_OP
| FRM_RDN_P
,
331 UNARY_OP_TAMA_FRM_DYN
= UNARY_OP_TAMA
| FRM_DYN_P
,
332 UNARY_OP_TAMA_FRM_RUP
= UNARY_OP_TAMA
| FRM_RUP_P
,
333 UNARY_OP_TAMA_FRM_RDN
= UNARY_OP_TAMA
| FRM_RDN_P
,
334 UNARY_OP_TAMA_FRM_RMM
= UNARY_OP_TAMA
| FRM_RMM_P
,
335 UNARY_OP_TAMA_FRM_RNE
= UNARY_OP_TAMA
| FRM_RNE_P
,
336 UNARY_OP_TAMU_FRM_DYN
= UNARY_OP_TAMU
| FRM_DYN_P
,
337 UNARY_OP_TAMU_FRM_RUP
= UNARY_OP_TAMU
| FRM_RUP_P
,
338 UNARY_OP_TAMU_FRM_RDN
= UNARY_OP_TAMU
| FRM_RDN_P
,
339 UNARY_OP_TAMU_FRM_RMM
= UNARY_OP_TAMU
| FRM_RMM_P
,
340 UNARY_OP_TAMU_FRM_RNE
= UNARY_OP_TAMU
| FRM_RNE_P
,
342 /* Binary operator. */
343 BINARY_OP
= __NORMAL_OP
| BINARY_OP_P
,
344 BINARY_OP_TAMA
= __MASK_OP_TAMA
| BINARY_OP_P
,
345 BINARY_OP_TAMU
= __MASK_OP_TAMU
| BINARY_OP_P
,
346 BINARY_OP_TUMA
= __MASK_OP_TUMA
| BINARY_OP_P
,
347 BINARY_OP_FRM_DYN
= BINARY_OP
| FRM_DYN_P
,
349 /* Ternary operator. Always have real merge operand. */
350 TERNARY_OP
= HAS_DEST_P
| HAS_MASK_P
| USE_ALL_TRUES_MASK_P
| HAS_MERGE_P
351 | TDEFAULT_POLICY_P
| MDEFAULT_POLICY_P
| TERNARY_OP_P
,
352 TERNARY_OP_FRM_DYN
= TERNARY_OP
| FRM_DYN_P
,
354 /* For vwmacc, no merge operand. */
355 WIDEN_TERNARY_OP
= HAS_DEST_P
| HAS_MASK_P
| USE_ALL_TRUES_MASK_P
356 | TDEFAULT_POLICY_P
| MDEFAULT_POLICY_P
| TERNARY_OP_P
,
357 WIDEN_TERNARY_OP_FRM_DYN
= WIDEN_TERNARY_OP
| FRM_DYN_P
,
359 /* For vmerge, no mask operand, no mask policy operand. */
360 MERGE_OP
= __NORMAL_OP_TA2
| TERNARY_OP_P
,
362 /* For vmerge with TU policy. */
363 MERGE_OP_TU
= HAS_DEST_P
| HAS_MERGE_P
| TERNARY_OP_P
| TU_POLICY_P
,
365 /* For vm<compare>, no tail policy operand. */
366 COMPARE_OP
= __NORMAL_OP_MA
| TERNARY_OP_P
,
367 COMPARE_OP_MU
= __MASK_OP_MU
| TERNARY_OP_P
,
369 /* For scatter insn: no dest operand, no merge operand, no tail and mask
371 SCATTER_OP_M
= HAS_MASK_P
| TERNARY_OP_P
,
373 /* For vcpop.m, no merge operand, no tail and mask policy operands. */
374 CPOP_OP
= HAS_DEST_P
| HAS_MASK_P
| USE_ALL_TRUES_MASK_P
| UNARY_OP_P
375 | VTYPE_MODE_FROM_OP1_P
,
377 /* For mask instrunctions, no tail and mask policy operands. */
378 UNARY_MASK_OP
= HAS_DEST_P
| HAS_MASK_P
| USE_ALL_TRUES_MASK_P
| HAS_MERGE_P
379 | USE_VUNDEF_MERGE_P
| UNARY_OP_P
,
380 BINARY_MASK_OP
= HAS_DEST_P
| HAS_MASK_P
| USE_ALL_TRUES_MASK_P
| HAS_MERGE_P
381 | USE_VUNDEF_MERGE_P
| BINARY_OP_P
,
383 /* For vcompress.vm */
384 COMPRESS_OP
= __NORMAL_OP_TA2
| BINARY_OP_P
,
385 /* has merge operand but use ta. */
387 = HAS_DEST_P
| HAS_MERGE_P
| TDEFAULT_POLICY_P
| BINARY_OP_P
,
389 /* For vreduce, no mask policy operand. */
390 REDUCE_OP
= __NORMAL_OP_TA
| BINARY_OP_P
| VTYPE_MODE_FROM_OP1_P
,
391 REDUCE_OP_M
= __MASK_OP_TA
| BINARY_OP_P
| VTYPE_MODE_FROM_OP1_P
,
392 REDUCE_OP_FRM_DYN
= REDUCE_OP
| FRM_DYN_P
| VTYPE_MODE_FROM_OP1_P
,
394 = __MASK_OP_TA
| BINARY_OP_P
| FRM_DYN_P
| VTYPE_MODE_FROM_OP1_P
,
396 /* For vmv.s.x/vfmv.s.f. */
397 SCALAR_MOVE_OP
= HAS_DEST_P
| HAS_MASK_P
| USE_ONE_TRUE_MASK_P
| HAS_MERGE_P
398 | USE_VUNDEF_MERGE_P
| TDEFAULT_POLICY_P
| MDEFAULT_POLICY_P
401 SCALAR_MOVE_MERGED_OP
= HAS_DEST_P
| HAS_MASK_P
| USE_ONE_TRUE_MASK_P
402 | HAS_MERGE_P
| TDEFAULT_POLICY_P
| MDEFAULT_POLICY_P
419 /* The RISC-V vsetvli pass uses "known vlmax" operations for optimization.
420 Whether or not an instruction actually is a vlmax operation is not
421 recognizable from the length operand alone but the avl_type operand
422 is used instead. In general, there are two cases:
424 - Emit a vlmax operation by calling emit_vlmax_insn[_lra]. Here we emit
425 a vsetvli with vlmax configuration and set the avl_type to VLMAX for
426 VLA modes or VLS for VLS modes.
427 - Emit an operation that uses the existing (last-set) length and
428 set the avl_type to NONVLMAX.
430 Sometimes we also need to set the VLMAX or VLS avl_type to an operation that
431 already uses a given length register. This can happen during or after
432 register allocation when we are not allowed to create a new register.
433 For that case we also allow to set the avl_type to VLMAX or VLS.
441 /* Routines implemented in riscv-vector-builtins.cc. */
442 void init_builtins (void);
443 const char *mangle_builtin_type (const_tree
);
444 tree
lookup_vector_type_attribute (const_tree
);
445 bool builtin_type_p (const_tree
);
447 bool verify_type_context (location_t
, type_context_kind
, const_tree
, bool);
448 bool expand_vec_perm_const (machine_mode
, machine_mode
, rtx
, rtx
, rtx
,
449 const vec_perm_indices
&);
451 void handle_pragma_vector (void);
452 tree
builtin_decl (unsigned, bool);
453 gimple
*gimple_fold_builtin (unsigned int, gimple_stmt_iterator
*, gcall
*);
454 rtx
expand_builtin (unsigned int, tree
, rtx
);
455 bool check_builtin_call (location_t
, vec
<location_t
>, unsigned int,
456 tree
, unsigned int, tree
*);
457 tree
resolve_overloaded_builtin (unsigned int, vec
<tree
, va_gc
> *);
458 bool const_vec_all_same_in_range_p (rtx
, HOST_WIDE_INT
, HOST_WIDE_INT
);
459 bool legitimize_move (rtx
, rtx
*);
460 void emit_vlmax_vsetvl (machine_mode
, rtx
);
461 void emit_hard_vlmax_vsetvl (machine_mode
, rtx
);
462 void emit_vlmax_insn (unsigned, unsigned, rtx
*);
463 void emit_nonvlmax_insn (unsigned, unsigned, rtx
*, rtx
);
464 void emit_vlmax_insn_lra (unsigned, unsigned, rtx
*, rtx
);
465 enum vlmul_type
get_vlmul (machine_mode
);
466 rtx
get_vlmax_rtx (machine_mode
);
467 unsigned int get_ratio (machine_mode
);
468 unsigned int get_nf (machine_mode
);
469 machine_mode
get_subpart_mode (machine_mode
);
472 int get_avl_type (rtx
);
473 unsigned int calculate_ratio (unsigned int, enum vlmul_type
);
476 TAIL_UNDISTURBED
= 0,
483 MASK_UNDISTURBED
= 0,
488 /* Return true if VALUE is agnostic or any policy. */
489 #define IS_AGNOSTIC(VALUE) (bool) (VALUE & 0x1 || (VALUE >> 1 & 0x1))
491 enum tail_policy
get_prefer_tail_policy ();
492 enum mask_policy
get_prefer_mask_policy ();
493 rtx
get_avl_type_rtx (enum avl_type
);
494 opt_machine_mode
get_vector_mode (scalar_mode
, poly_uint64
);
495 opt_machine_mode
get_tuple_mode (machine_mode
, unsigned int);
497 bool neg_simm5_p (rtx
);
499 bool has_vi_variant_p (rtx_code
, rtx
);
500 void expand_vec_cmp (rtx
, rtx_code
, rtx
, rtx
);
501 bool expand_vec_cmp_float (rtx
, rtx_code
, rtx
, rtx
, bool);
502 void expand_cond_len_unop (unsigned, rtx
*);
503 void expand_cond_len_binop (unsigned, rtx
*);
504 void expand_reduction (unsigned, unsigned, rtx
*, rtx
);
505 void expand_vec_ceil (rtx
, rtx
, machine_mode
, machine_mode
);
506 void expand_vec_floor (rtx
, rtx
, machine_mode
, machine_mode
);
507 void expand_vec_nearbyint (rtx
, rtx
, machine_mode
, machine_mode
);
508 void expand_vec_rint (rtx
, rtx
, machine_mode
, machine_mode
);
509 void expand_vec_round (rtx
, rtx
, machine_mode
, machine_mode
);
510 void expand_vec_trunc (rtx
, rtx
, machine_mode
, machine_mode
);
511 void expand_vec_roundeven (rtx
, rtx
, machine_mode
, machine_mode
);
512 void expand_vec_lrint (rtx
, rtx
, machine_mode
, machine_mode
);
513 void expand_vec_lround (rtx
, rtx
, machine_mode
, machine_mode
);
514 void expand_vec_lceil (rtx
, rtx
, machine_mode
, machine_mode
);
515 void expand_vec_lfloor (rtx
, rtx
, machine_mode
, machine_mode
);
517 bool sew64_scalar_helper (rtx
*, rtx
*, rtx
, machine_mode
,
518 bool, void (*)(rtx
*, rtx
), enum avl_type
);
519 rtx
gen_scalar_move_mask (machine_mode
);
520 rtx
gen_no_side_effects_vsetvl_rtx (machine_mode
, rtx
, rtx
);
522 /* RVV vector register sizes.
523 TODO: Currently, we only add RVV_32/RVV_64/RVV_128, we may need to
524 support other values in the future. */
531 bool slide1_sew64_helper (int, machine_mode
, machine_mode
,
532 machine_mode
, rtx
*);
533 rtx
gen_avl_for_scalar_move (rtx
);
534 void expand_tuple_move (rtx
*);
535 bool expand_block_move (rtx
, rtx
, rtx
);
536 machine_mode
preferred_simd_mode (scalar_mode
);
537 machine_mode
get_mask_mode (machine_mode
);
538 void expand_vec_series (rtx
, rtx
, rtx
);
539 void expand_vec_init (rtx
, rtx
);
540 void expand_vec_perm (rtx
, rtx
, rtx
, rtx
);
541 void expand_select_vl (rtx
*);
542 void expand_load_store (rtx
*, bool);
543 void expand_gather_scatter (rtx
*, bool);
544 void expand_cond_len_ternop (unsigned, rtx
*);
545 void prepare_ternary_operands (rtx
*);
546 void expand_lanes_load_store (rtx
*, bool);
547 void expand_fold_extract_last (rtx
*);
548 void expand_cond_unop (unsigned, rtx
*);
549 void expand_cond_binop (unsigned, rtx
*);
550 void expand_cond_ternop (unsigned, rtx
*);
551 void expand_popcount (rtx
*);
552 void expand_rawmemchr (machine_mode
, rtx
, rtx
, rtx
);
554 /* Rounding mode bitfield for fixed point VXRM. */
555 enum fixed_point_rounding_mode
563 /* Rounding mode bitfield for floating point FRM. The value of enum comes
565 https://github.com/riscv/riscv-isa-manual/blob/main/src/f-st-ext.adoc#floating-point-control-and-status-register
567 enum floating_point_rounding_mode
569 FRM_RNE
= 0, /* Aka 0b000. */
570 FRM_RTZ
= 1, /* Aka 0b001. */
571 FRM_RDN
= 2, /* Aka 0b010. */
572 FRM_RUP
= 3, /* Aka 0b011. */
573 FRM_RMM
= 4, /* Aka 0b100. */
574 FRM_DYN
= 7, /* Aka 0b111. */
575 FRM_STATIC_MIN
= FRM_RNE
,
576 FRM_STATIC_MAX
= FRM_RMM
,
582 enum floating_point_rounding_mode
get_frm_mode (rtx
);
583 opt_machine_mode
vectorize_related_mode (machine_mode
, scalar_mode
,
585 unsigned int autovectorize_vector_modes (vec
<machine_mode
> *, bool);
586 bool cmp_lmul_le_one (machine_mode
);
587 bool cmp_lmul_gt_one (machine_mode
);
588 bool gather_scatter_valid_offset_mode_p (machine_mode
);
589 bool vls_mode_valid_p (machine_mode
);
590 bool vlmax_avl_type_p (rtx_insn
*);
591 bool has_vl_op (rtx_insn
*);
592 bool tail_agnostic_p (rtx_insn
*);
593 void validate_change_or_fail (rtx
, rtx
*, rtx
, bool);
594 bool nonvlmax_avl_type_p (rtx_insn
*);
595 bool vlmax_avl_p (rtx
);
596 uint8_t get_sew (rtx_insn
*);
597 enum vlmul_type
get_vlmul (rtx_insn
*);
598 int count_regno_occurrences (rtx_insn
*, unsigned int);
599 bool imm_avl_p (machine_mode
);
600 bool can_be_broadcasted_p (rtx
);
603 /* We classify builtin types into two classes:
604 1. General builtin class which is defined in riscv_builtins.
605 2. Vector builtin class which is a special builtin architecture
606 that implement intrinsic short into "pragma". */
607 enum riscv_builtin_class
609 RISCV_BUILTIN_GENERAL
,
613 const unsigned int RISCV_BUILTIN_SHIFT
= 1;
615 /* Mask that selects the riscv_builtin_class part of a function code. */
616 const unsigned int RISCV_BUILTIN_CLASS
= (1 << RISCV_BUILTIN_SHIFT
) - 1;
618 /* Routines implemented in riscv-string.cc. */
619 extern bool riscv_expand_strcmp (rtx
, rtx
, rtx
, rtx
, rtx
);
620 extern bool riscv_expand_strlen (rtx
, rtx
, rtx
, rtx
);
622 /* Routines implemented in thead.cc. */
623 extern bool th_mempair_operands_p (rtx
[4], bool, machine_mode
);
624 extern void th_mempair_order_operands (rtx
[4], bool, machine_mode
);
625 extern void th_mempair_prepare_save_restore_operands (rtx
[4], bool,
629 extern void th_mempair_save_restore_regs (rtx
[4], bool, machine_mode
);
632 th_mempair_output_move (rtx
[4], bool, machine_mode
, RTX_CODE
);
633 extern bool th_memidx_legitimate_modify_p (rtx
);
634 extern bool th_memidx_legitimate_modify_p (rtx
, bool);
635 extern bool th_memidx_legitimate_index_p (rtx
);
636 extern bool th_memidx_legitimate_index_p (rtx
, bool);
637 extern bool th_classify_address (struct riscv_address_info
*,
638 rtx
, machine_mode
, bool);
639 extern const char *th_output_move (rtx
, rtx
);
640 extern bool th_print_operand_address (FILE *, machine_mode
, rtx
);
643 extern bool riscv_use_divmod_expander (void);
644 void riscv_init_cumulative_args (CUMULATIVE_ARGS
*, tree
, rtx
, tree
, int);
646 #endif /* ! GCC_RISCV_PROTOS_H */