1 /* Definition of RISC-V target for GNU compiler.
2 Copyright (C) 2011-2024 Free Software Foundation, Inc.
3 Contributed by Andrew Waterman (andrew@sifive.com).
4 Based on MIPS target for GNU compiler.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #ifndef GCC_RISCV_PROTOS_H
23 #define GCC_RISCV_PROTOS_H
27 /* Symbol types we understand. The order of this list must match that of
28 the unspec enum in riscv.md, subsequent to UNSPEC_ADDRESS_FIRST. */
29 enum riscv_symbol_type
{
40 #define NUM_SYMBOL_TYPES (SYMBOL_TLSDESC + 1)
42 /* Classifies an address.
45 A natural register + offset address. The register satisfies
46 riscv_valid_base_register_p and the offset is a const_arith_operand.
49 A base register indexed by (optionally scaled) register.
52 A base register indexed by (optionally scaled) zero-extended register.
55 A base register indexed by immediate offset with writeback.
58 A LO_SUM rtx. The first operand is a valid base register and
59 the second operand is a symbolic address.
62 A signed 16-bit constant address.
65 A constant symbolic address. */
66 enum riscv_address_type
{
76 /* Information about an address described by riscv_address_type.
82 REG is the base register and OFFSET is the constant offset.
84 ADDRESS_REG_REG and ADDRESS_REG_UREG
85 REG is the base register and OFFSET is the index register.
88 REG is the base register, OFFSET is the constant offset, and
89 shift is the shift amount for the offset.
92 REG and OFFSET are the operands to the LO_SUM and SYMBOL_TYPE
93 is the type of symbol it references.
96 SYMBOL_TYPE is the type of symbol that the address references. */
97 struct riscv_address_info
{
98 enum riscv_address_type type
;
101 enum riscv_symbol_type symbol_type
;
105 /* Routines implemented in riscv.cc. */
106 extern const char *riscv_asm_output_opcode (FILE *asm_out_file
, const char *p
);
107 extern enum riscv_symbol_type
riscv_classify_symbolic_expression (rtx
);
108 extern bool riscv_symbolic_constant_p (rtx
, enum riscv_symbol_type
*);
109 extern int riscv_float_const_rtx_index_for_fli (rtx
);
110 extern int riscv_regno_mode_ok_for_base_p (int, machine_mode
, bool);
111 extern bool riscv_valid_base_register_p (rtx
, machine_mode
, bool);
112 extern enum reg_class
riscv_index_reg_class ();
113 extern int riscv_regno_ok_for_index_p (int);
114 extern int riscv_address_insns (rtx
, machine_mode
, bool);
115 extern int riscv_const_insns (rtx
);
116 extern int riscv_split_const_insns (rtx
);
117 extern int riscv_load_store_insns (rtx
, rtx_insn
*);
118 extern rtx
riscv_emit_move (rtx
, rtx
);
119 extern bool riscv_split_symbol (rtx
, rtx
, machine_mode
, rtx
*);
120 extern bool riscv_split_symbol_type (enum riscv_symbol_type
);
121 extern rtx
riscv_unspec_address (rtx
, enum riscv_symbol_type
);
122 extern void riscv_move_integer (rtx
, rtx
, HOST_WIDE_INT
, machine_mode
);
123 extern bool riscv_legitimize_move (machine_mode
, rtx
, rtx
);
124 extern rtx
riscv_subword (rtx
, bool);
125 extern bool riscv_split_64bit_move_p (rtx
, rtx
);
126 extern void riscv_split_doubleword_move (rtx
, rtx
);
127 extern const char *riscv_output_move (rtx
, rtx
);
128 extern const char *riscv_output_return ();
129 extern void riscv_declare_function_name (FILE *, const char *, tree
);
130 extern void riscv_declare_function_size (FILE *, const char *, tree
);
131 extern void riscv_asm_output_alias (FILE *, const tree
, const tree
);
132 extern void riscv_asm_output_external (FILE *, const tree
, const char *);
134 riscv_zcmp_valid_stack_adj_bytes_p (HOST_WIDE_INT
, int);
135 extern void riscv_legitimize_poly_move (machine_mode
, rtx
, rtx
, rtx
);
136 extern void riscv_expand_usadd (rtx
, rtx
, rtx
);
137 extern void riscv_expand_ussub (rtx
, rtx
, rtx
);
140 extern void riscv_expand_int_scc (rtx
, enum rtx_code
, rtx
, rtx
, bool *invert_ptr
= 0);
141 extern void riscv_expand_float_scc (rtx
, enum rtx_code
, rtx
, rtx
,
142 bool *invert_ptr
= nullptr);
143 extern void riscv_expand_conditional_branch (rtx
, enum rtx_code
, rtx
, rtx
);
144 extern rtx
riscv_emit_unary (enum rtx_code code
, rtx dest
, rtx x
);
145 extern rtx
riscv_emit_binary (enum rtx_code code
, rtx dest
, rtx x
, rtx y
);
147 extern bool riscv_expand_conditional_move (rtx
, rtx
, rtx
, rtx
);
148 extern rtx
riscv_legitimize_call_address (rtx
);
149 extern void riscv_set_return_address (rtx
, rtx
);
150 extern rtx
riscv_return_addr (int, rtx
);
151 extern poly_int64
riscv_initial_elimination_offset (int, int);
152 extern void riscv_expand_prologue (void);
153 extern void riscv_expand_epilogue (int);
154 extern bool riscv_epilogue_uses (unsigned int);
155 extern bool riscv_can_use_return_insn (void);
156 extern rtx
riscv_function_value (const_tree
, const_tree
, enum machine_mode
);
157 extern bool riscv_store_data_bypass_p (rtx_insn
*, rtx_insn
*);
158 extern rtx
riscv_gen_gpr_save_insn (struct riscv_frame_info
*);
159 extern bool riscv_gpr_save_operation_p (rtx
);
160 extern void riscv_reinit (void);
161 extern poly_uint64
riscv_regmode_natural_size (machine_mode
);
162 extern bool riscv_v_ext_vector_mode_p (machine_mode
);
163 extern bool riscv_v_ext_tuple_mode_p (machine_mode
);
164 extern bool riscv_v_ext_vls_mode_p (machine_mode
);
165 extern int riscv_get_v_regno_alignment (machine_mode
);
166 extern bool riscv_shamt_matches_mask_p (int, HOST_WIDE_INT
);
167 extern void riscv_subword_address (rtx
, rtx
*, rtx
*, rtx
*, rtx
*);
168 extern void riscv_lshift_subword (machine_mode
, rtx
, rtx
, rtx
*);
169 extern enum memmodel
riscv_union_memmodels (enum memmodel
, enum memmodel
);
170 extern bool riscv_reg_frame_related (rtx
);
171 extern void riscv_split_sum_of_two_s12 (HOST_WIDE_INT
, HOST_WIDE_INT
*,
174 /* Routines implemented in riscv-c.cc. */
175 void riscv_cpu_cpp_builtins (cpp_reader
*);
176 void riscv_register_pragmas (void);
178 /* Routines implemented in riscv-builtins.cc. */
179 extern void riscv_atomic_assign_expand_fenv (tree
*, tree
*, tree
*);
180 extern bool riscv_gimple_fold_builtin (gimple_stmt_iterator
*);
181 extern rtx
riscv_expand_builtin (tree
, rtx
, rtx
, machine_mode
, int);
182 extern tree
riscv_builtin_decl (unsigned int, bool);
183 extern void riscv_init_builtins (void);
185 /* Routines implemented in riscv-common.cc. */
186 extern std::string
riscv_arch_str (bool version_p
= true);
187 extern void riscv_parse_arch_string (const char *, struct gcc_options
*, location_t
);
189 extern bool riscv_hard_regno_rename_ok (unsigned, unsigned);
191 rtl_opt_pass
* make_pass_shorten_memrefs (gcc::context
*ctxt
);
192 rtl_opt_pass
* make_pass_avlprop (gcc::context
*ctxt
);
193 rtl_opt_pass
* make_pass_vsetvl (gcc::context
*ctxt
);
195 /* Routines implemented in riscv-string.c. */
196 extern bool riscv_expand_block_compare (rtx
, rtx
, rtx
, rtx
);
197 extern bool riscv_expand_block_move (rtx
, rtx
, rtx
);
198 extern bool riscv_expand_block_clear (rtx
, rtx
);
200 /* Information about one CPU we know about. */
201 struct riscv_cpu_info
{
202 /* This CPU's canonical name. */
205 /* Default arch for this CPU, could be NULL if no default arch. */
208 /* Which automaton to use for tuning. */
212 extern const riscv_cpu_info
*riscv_find_cpu (const char *);
214 /* Common vector costs in any kind of vectorization (e.g VLA and VLS). */
215 struct common_vector_cost
217 /* Cost of any integer vector operation, excluding the ones handled
219 const int int_stmt_cost
;
221 /* Cost of any fp vector operation, excluding the ones handled
223 const int fp_stmt_cost
;
225 /* Gather/scatter vectorization cost. */
226 const int gather_load_cost
;
227 const int scatter_store_cost
;
229 /* Segment load/store permute cost. */
230 const int segment_permute_2
;
231 const int segment_permute_3
;
232 const int segment_permute_4
;
233 const int segment_permute_5
;
234 const int segment_permute_6
;
235 const int segment_permute_7
;
236 const int segment_permute_8
;
238 /* Cost of a vector-to-scalar operation. */
239 const int vec_to_scalar_cost
;
241 /* Cost of a scalar-to-vector operation. */
242 const int scalar_to_vec_cost
;
244 /* Cost of a permute operation. */
245 const int permute_cost
;
247 /* Cost of an aligned vector load. */
248 const int align_load_cost
;
250 /* Cost of an aligned vector store. */
251 const int align_store_cost
;
253 /* Cost of an unaligned vector load. */
254 const int unalign_load_cost
;
256 /* Cost of an unaligned vector store. */
257 const int unalign_store_cost
;
260 /* scalable vectorization (VLA) specific cost. */
261 struct scalable_vector_cost
: common_vector_cost
263 CONSTEXPR
scalable_vector_cost (const common_vector_cost
&base
)
264 : common_vector_cost (base
)
267 /* TODO: We will need more other kinds of vector cost for VLA.
268 E.g. fold_left reduction cost, lanes load/store cost, ..., etc. */
271 /* Additional costs for register copies. Cost is for one register. */
272 struct regmove_vector_cost
280 /* Cost for vector insn classes. */
281 struct cpu_vector_cost
283 /* Cost of any integer scalar operation, excluding load and store. */
284 const int scalar_int_stmt_cost
;
286 /* Cost of any fp scalar operation, excluding load and store. */
287 const int scalar_fp_stmt_cost
;
289 /* Cost of a scalar load. */
290 const int scalar_load_cost
;
292 /* Cost of a scalar store. */
293 const int scalar_store_cost
;
295 /* Cost of a taken branch. */
296 const int cond_taken_branch_cost
;
298 /* Cost of a not-taken branch. */
299 const int cond_not_taken_branch_cost
;
301 /* Cost of an VLS modes operations. */
302 const common_vector_cost
*vls
;
304 /* Cost of an VLA modes operations. */
305 const scalable_vector_cost
*vla
;
307 /* Cost of vector register move operations. */
308 const regmove_vector_cost
*regmove
;
311 /* Routines implemented in riscv-selftests.cc. */
314 void riscv_run_selftests (void);
315 } // namespace selftest
318 namespace riscv_vector
{
319 #define RVV_VLMAX regno_reg_rtx[X0_REGNUM]
320 #define RVV_VUNDEF(MODE) \
321 gen_rtx_UNSPEC (MODE, gen_rtvec (1, RVV_VLMAX), UNSPEC_VUNDEF)
323 /* These flags describe how to pass the operands to a rvv insn pattern.
325 If a insn has this flags:
326 HAS_DEST_P | HAS_MASK_P | USE_VUNDEF_MERGE_P
327 | TU_POLICY_P | BINARY_OP_P | FRM_DYN_P
329 operands[0] is the dest operand
330 operands[1] is the mask operand
331 operands[2] is the merge operand
332 operands[3] and operands[4] is the two operand to do the operation.
333 operands[5] is the vl operand
334 operands[6] is the tail policy operand
335 operands[7] is the mask policy operands
336 operands[8] is the rounding mode operands
338 Then you can call `emit_vlmax_insn (flags, icode, ops)` to emit a insn.
339 and ops[0] is the dest operand (operands[0]), ops[1] is the mask
340 operand (operands[1]), ops[2] and ops[3] is the two
341 operands (operands[3], operands[4]) to do the operation. Other operands
342 will be created by emit_vlmax_insn according to the flags information.
344 enum insn_flags
: unsigned int
346 /* flags for dest, mask, merge operands. */
347 /* Means INSN has dest operand. False for STORE insn. */
349 /* Means INSN has mask operand. */
351 /* Means using ALL_TRUES for mask operand. */
352 USE_ALL_TRUES_MASK_P
= 1 << 2,
353 /* Means using ONE_TRUE for mask operand. */
354 USE_ONE_TRUE_MASK_P
= 1 << 3,
355 /* Means INSN has merge operand. */
356 HAS_MERGE_P
= 1 << 4,
357 /* Means using VUNDEF for merge operand. */
358 USE_VUNDEF_MERGE_P
= 1 << 5,
360 /* flags for tail policy and mask plicy operands. */
361 /* Means the tail policy is TAIL_UNDISTURBED. */
362 TU_POLICY_P
= 1 << 6,
363 /* Means the tail policy is default (return by get_prefer_tail_policy). */
364 TDEFAULT_POLICY_P
= 1 << 7,
365 /* Means the mask policy is MASK_UNDISTURBED. */
366 MU_POLICY_P
= 1 << 8,
367 /* Means the mask policy is default (return by get_prefer_mask_policy). */
368 MDEFAULT_POLICY_P
= 1 << 9,
370 /* flags for the number operands to do the operation. */
371 /* Means INSN need zero operand to do the operation. e.g. vid.v */
372 NULLARY_OP_P
= 1 << 10,
373 /* Means INSN need one operand to do the operation. */
374 UNARY_OP_P
= 1 << 11,
375 /* Means INSN need two operands to do the operation. */
376 BINARY_OP_P
= 1 << 12,
377 /* Means INSN need two operands to do the operation. */
378 TERNARY_OP_P
= 1 << 13,
380 /* flags for get vtype mode from the index number. default from dest operand. */
381 VTYPE_MODE_FROM_OP1_P
= 1 << 14,
383 /* flags for the floating-point rounding mode. */
384 /* Means INSN has FRM operand and the value is FRM_DYN. */
387 /* Means INSN has FRM operand and the value is FRM_RUP. */
390 /* Means INSN has FRM operand and the value is FRM_RDN. */
393 /* Means INSN has FRM operand and the value is FRM_RMM. */
396 /* Means INSN has FRM operand and the value is FRM_RNE. */
399 /* Means INSN has VXRM operand and the value is VXRM_RNU. */
400 VXRM_RNU_P
= 1 << 20,
402 /* Means INSN has VXRM operand and the value is VXRM_RDN. */
403 VXRM_RDN_P
= 1 << 21,
406 enum insn_type
: unsigned int
408 /* some flags macros. */
409 /* For non-mask insn with tama. */
410 __NORMAL_OP
= HAS_DEST_P
| HAS_MASK_P
| USE_ALL_TRUES_MASK_P
| HAS_MERGE_P
411 | USE_VUNDEF_MERGE_P
| TDEFAULT_POLICY_P
| MDEFAULT_POLICY_P
,
412 /* For non-mask insn with ta, without mask policy operand. */
413 __NORMAL_OP_TA
= HAS_DEST_P
| HAS_MASK_P
| USE_ALL_TRUES_MASK_P
| HAS_MERGE_P
414 | USE_VUNDEF_MERGE_P
| TDEFAULT_POLICY_P
,
415 /* For non-mask insn with ta, without mask operand and mask policy operand. */
417 = HAS_DEST_P
| HAS_MERGE_P
| USE_VUNDEF_MERGE_P
| TDEFAULT_POLICY_P
,
418 /* For non-mask insn with ma, without tail policy operand. */
419 __NORMAL_OP_MA
= HAS_DEST_P
| HAS_MASK_P
| USE_ALL_TRUES_MASK_P
| HAS_MERGE_P
420 | USE_VUNDEF_MERGE_P
| MDEFAULT_POLICY_P
,
421 /* For mask insn with tama. */
422 __MASK_OP_TAMA
= HAS_DEST_P
| HAS_MASK_P
| HAS_MERGE_P
| USE_VUNDEF_MERGE_P
423 | TDEFAULT_POLICY_P
| MDEFAULT_POLICY_P
,
424 /* For mask insn with tamu. */
426 = HAS_DEST_P
| HAS_MASK_P
| HAS_MERGE_P
| TDEFAULT_POLICY_P
| MU_POLICY_P
,
427 /* For mask insn with tuma. */
428 __MASK_OP_TUMA
= HAS_DEST_P
| HAS_MASK_P
| USE_ALL_TRUES_MASK_P
| HAS_MERGE_P
429 | TU_POLICY_P
| MDEFAULT_POLICY_P
,
430 /* For mask insn with mu. */
431 __MASK_OP_MU
= HAS_DEST_P
| HAS_MASK_P
| HAS_MERGE_P
| MU_POLICY_P
,
432 /* For mask insn with ta, without mask policy operand. */
433 __MASK_OP_TA
= HAS_DEST_P
| HAS_MASK_P
| HAS_MERGE_P
| USE_VUNDEF_MERGE_P
436 /* Nullary operator. e.g. vid.v */
437 NULLARY_OP
= __NORMAL_OP
| NULLARY_OP_P
,
439 /* Unary operator. */
440 UNARY_OP
= __NORMAL_OP
| UNARY_OP_P
,
441 UNARY_OP_TAMA
= __MASK_OP_TAMA
| UNARY_OP_P
,
442 UNARY_OP_TAMU
= __MASK_OP_TAMU
| UNARY_OP_P
,
443 UNARY_OP_FRM_DYN
= UNARY_OP
| FRM_DYN_P
,
444 UNARY_OP_FRM_RMM
= UNARY_OP
| FRM_RMM_P
,
445 UNARY_OP_FRM_RUP
= UNARY_OP
| FRM_RUP_P
,
446 UNARY_OP_FRM_RDN
= UNARY_OP
| FRM_RDN_P
,
447 UNARY_OP_TAMA_FRM_DYN
= UNARY_OP_TAMA
| FRM_DYN_P
,
448 UNARY_OP_TAMA_FRM_RUP
= UNARY_OP_TAMA
| FRM_RUP_P
,
449 UNARY_OP_TAMA_FRM_RDN
= UNARY_OP_TAMA
| FRM_RDN_P
,
450 UNARY_OP_TAMA_FRM_RMM
= UNARY_OP_TAMA
| FRM_RMM_P
,
451 UNARY_OP_TAMA_FRM_RNE
= UNARY_OP_TAMA
| FRM_RNE_P
,
452 UNARY_OP_TAMU_FRM_DYN
= UNARY_OP_TAMU
| FRM_DYN_P
,
453 UNARY_OP_TAMU_FRM_RUP
= UNARY_OP_TAMU
| FRM_RUP_P
,
454 UNARY_OP_TAMU_FRM_RDN
= UNARY_OP_TAMU
| FRM_RDN_P
,
455 UNARY_OP_TAMU_FRM_RMM
= UNARY_OP_TAMU
| FRM_RMM_P
,
456 UNARY_OP_TAMU_FRM_RNE
= UNARY_OP_TAMU
| FRM_RNE_P
,
458 /* Binary operator. */
459 BINARY_OP
= __NORMAL_OP
| BINARY_OP_P
,
460 BINARY_OP_TAMA
= __MASK_OP_TAMA
| BINARY_OP_P
,
461 BINARY_OP_TAMU
= __MASK_OP_TAMU
| BINARY_OP_P
,
462 BINARY_OP_TUMA
= __MASK_OP_TUMA
| BINARY_OP_P
,
463 BINARY_OP_FRM_DYN
= BINARY_OP
| FRM_DYN_P
,
464 BINARY_OP_VXRM_RNU
= BINARY_OP
| VXRM_RNU_P
,
465 BINARY_OP_VXRM_RDN
= BINARY_OP
| VXRM_RDN_P
,
467 /* Ternary operator. Always have real merge operand. */
468 TERNARY_OP
= HAS_DEST_P
| HAS_MASK_P
| USE_ALL_TRUES_MASK_P
| HAS_MERGE_P
469 | TDEFAULT_POLICY_P
| MDEFAULT_POLICY_P
| TERNARY_OP_P
,
470 TERNARY_OP_FRM_DYN
= TERNARY_OP
| FRM_DYN_P
,
472 /* For vwmacc, no merge operand. */
473 WIDEN_TERNARY_OP
= HAS_DEST_P
| HAS_MASK_P
| USE_ALL_TRUES_MASK_P
474 | TDEFAULT_POLICY_P
| MDEFAULT_POLICY_P
| TERNARY_OP_P
,
475 WIDEN_TERNARY_OP_FRM_DYN
= WIDEN_TERNARY_OP
| FRM_DYN_P
,
477 /* For vmerge, no mask operand, no mask policy operand. */
478 MERGE_OP
= __NORMAL_OP_TA2
| TERNARY_OP_P
,
480 /* For vmerge with TU policy. */
481 MERGE_OP_TU
= HAS_DEST_P
| HAS_MERGE_P
| TERNARY_OP_P
| TU_POLICY_P
,
483 /* For vm<compare>, no tail policy operand. */
484 COMPARE_OP
= __NORMAL_OP_MA
| TERNARY_OP_P
,
485 COMPARE_OP_MU
= __MASK_OP_MU
| TERNARY_OP_P
,
487 /* For scatter insn: no dest operand, no merge operand, no tail and mask
489 SCATTER_OP_M
= HAS_MASK_P
| TERNARY_OP_P
,
491 /* For vcpop.m, no merge operand, no tail and mask policy operands. */
492 CPOP_OP
= HAS_DEST_P
| HAS_MASK_P
| USE_ALL_TRUES_MASK_P
| UNARY_OP_P
493 | VTYPE_MODE_FROM_OP1_P
,
495 /* For mask instrunctions, no tail and mask policy operands. */
496 UNARY_MASK_OP
= HAS_DEST_P
| HAS_MASK_P
| USE_ALL_TRUES_MASK_P
| HAS_MERGE_P
497 | USE_VUNDEF_MERGE_P
| UNARY_OP_P
,
498 BINARY_MASK_OP
= HAS_DEST_P
| HAS_MASK_P
| USE_ALL_TRUES_MASK_P
| HAS_MERGE_P
499 | USE_VUNDEF_MERGE_P
| BINARY_OP_P
,
501 /* For vcompress.vm */
502 COMPRESS_OP
= __NORMAL_OP_TA2
| BINARY_OP_P
,
503 /* has merge operand but use ta. */
505 = HAS_DEST_P
| HAS_MERGE_P
| TDEFAULT_POLICY_P
| BINARY_OP_P
,
507 /* For vslideup.up has merge operand but use ta. */
508 SLIDEUP_OP_MERGE
= HAS_DEST_P
| HAS_MASK_P
| USE_ALL_TRUES_MASK_P
509 | HAS_MERGE_P
| TDEFAULT_POLICY_P
| MDEFAULT_POLICY_P
512 /* For vreduce, no mask policy operand. */
513 REDUCE_OP
= __NORMAL_OP_TA
| BINARY_OP_P
| VTYPE_MODE_FROM_OP1_P
,
514 REDUCE_OP_M
= __MASK_OP_TA
| BINARY_OP_P
| VTYPE_MODE_FROM_OP1_P
,
515 REDUCE_OP_FRM_DYN
= REDUCE_OP
| FRM_DYN_P
| VTYPE_MODE_FROM_OP1_P
,
517 = __MASK_OP_TA
| BINARY_OP_P
| FRM_DYN_P
| VTYPE_MODE_FROM_OP1_P
,
519 /* For vmv.s.x/vfmv.s.f. */
520 SCALAR_MOVE_OP
= HAS_DEST_P
| HAS_MASK_P
| USE_ONE_TRUE_MASK_P
| HAS_MERGE_P
521 | USE_VUNDEF_MERGE_P
| TDEFAULT_POLICY_P
| MDEFAULT_POLICY_P
524 SCALAR_MOVE_MERGED_OP
= HAS_DEST_P
| HAS_MASK_P
| USE_ONE_TRUE_MASK_P
525 | HAS_MERGE_P
| TDEFAULT_POLICY_P
| MDEFAULT_POLICY_P
542 /* The RISC-V vsetvli pass uses "known vlmax" operations for optimization.
543 Whether or not an instruction actually is a vlmax operation is not
544 recognizable from the length operand alone but the avl_type operand
545 is used instead. In general, there are two cases:
547 - Emit a vlmax operation by calling emit_vlmax_insn[_lra]. Here we emit
548 a vsetvli with vlmax configuration and set the avl_type to VLMAX for
549 VLA modes or VLS for VLS modes.
550 - Emit an operation that uses the existing (last-set) length and
551 set the avl_type to NONVLMAX.
553 Sometimes we also need to set the VLMAX or VLS avl_type to an operation that
554 already uses a given length register. This can happen during or after
555 register allocation when we are not allowed to create a new register.
556 For that case we also allow to set the avl_type to VLMAX or VLS.
564 /* Routines implemented in riscv-vector-builtins.cc. */
565 void init_builtins (void);
566 void reinit_builtins (void);
567 const char *mangle_builtin_type (const_tree
);
568 tree
lookup_vector_type_attribute (const_tree
);
569 bool builtin_type_p (const_tree
);
571 bool verify_type_context (location_t
, type_context_kind
, const_tree
, bool);
572 bool expand_vec_perm_const (machine_mode
, machine_mode
, rtx
, rtx
, rtx
,
573 const vec_perm_indices
&);
575 void handle_pragma_vector (void);
576 tree
builtin_decl (unsigned, bool);
577 gimple
*gimple_fold_builtin (unsigned int, gimple_stmt_iterator
*, gcall
*);
578 rtx
expand_builtin (unsigned int, tree
, rtx
);
579 bool check_builtin_call (location_t
, vec
<location_t
>, unsigned int,
580 tree
, unsigned int, tree
*);
581 tree
resolve_overloaded_builtin (location_t
, unsigned int, tree
, vec
<tree
, va_gc
> *);
582 bool const_vec_all_same_in_range_p (rtx
, HOST_WIDE_INT
, HOST_WIDE_INT
);
583 bool legitimize_move (rtx
, rtx
*);
584 void emit_vlmax_vsetvl (machine_mode
, rtx
);
585 void emit_hard_vlmax_vsetvl (machine_mode
, rtx
);
586 void emit_vlmax_insn (unsigned, unsigned, rtx
*);
587 void emit_nonvlmax_insn (unsigned, unsigned, rtx
*, rtx
);
588 void emit_vlmax_insn_lra (unsigned, unsigned, rtx
*, rtx
);
589 enum vlmul_type
get_vlmul (machine_mode
);
590 rtx
get_vlmax_rtx (machine_mode
);
591 unsigned int get_ratio (machine_mode
);
592 unsigned int get_nf (machine_mode
);
593 machine_mode
get_subpart_mode (machine_mode
);
596 int get_avl_type (rtx
);
597 unsigned int calculate_ratio (unsigned int, enum vlmul_type
);
600 TAIL_UNDISTURBED
= 0,
607 MASK_UNDISTURBED
= 0,
612 /* Return true if VALUE is agnostic or any policy. */
613 #define IS_AGNOSTIC(VALUE) (bool) (VALUE & 0x1 || (VALUE >> 1 & 0x1))
615 enum tail_policy
get_prefer_tail_policy ();
616 enum mask_policy
get_prefer_mask_policy ();
617 rtx
get_avl_type_rtx (enum avl_type
);
618 opt_machine_mode
get_vector_mode (scalar_mode
, poly_uint64
);
619 opt_machine_mode
get_tuple_mode (machine_mode
, unsigned int);
621 bool neg_simm5_p (rtx
);
623 bool has_vi_variant_p (rtx_code
, rtx
);
624 void expand_vec_cmp (rtx
, rtx_code
, rtx
, rtx
, rtx
= nullptr, rtx
= nullptr);
625 bool expand_vec_cmp_float (rtx
, rtx_code
, rtx
, rtx
, bool);
626 void expand_cond_len_unop (unsigned, rtx
*);
627 void expand_cond_len_binop (unsigned, rtx
*);
628 void expand_reduction (unsigned, unsigned, rtx
*, rtx
);
629 void expand_vec_ceil (rtx
, rtx
, machine_mode
, machine_mode
);
630 void expand_vec_floor (rtx
, rtx
, machine_mode
, machine_mode
);
631 void expand_vec_nearbyint (rtx
, rtx
, machine_mode
, machine_mode
);
632 void expand_vec_rint (rtx
, rtx
, machine_mode
, machine_mode
);
633 void expand_vec_round (rtx
, rtx
, machine_mode
, machine_mode
);
634 void expand_vec_trunc (rtx
, rtx
, machine_mode
, machine_mode
);
635 void expand_vec_roundeven (rtx
, rtx
, machine_mode
, machine_mode
);
636 void expand_vec_lrint (rtx
, rtx
, machine_mode
, machine_mode
, machine_mode
);
637 void expand_vec_lround (rtx
, rtx
, machine_mode
, machine_mode
, machine_mode
);
638 void expand_vec_lceil (rtx
, rtx
, machine_mode
, machine_mode
);
639 void expand_vec_lfloor (rtx
, rtx
, machine_mode
, machine_mode
);
640 void expand_vec_usadd (rtx
, rtx
, rtx
, machine_mode
);
641 void expand_vec_ussub (rtx
, rtx
, rtx
, machine_mode
);
643 bool sew64_scalar_helper (rtx
*, rtx
*, rtx
, machine_mode
,
644 bool, void (*)(rtx
*, rtx
), enum avl_type
);
645 rtx
gen_scalar_move_mask (machine_mode
);
646 rtx
gen_no_side_effects_vsetvl_rtx (machine_mode
, rtx
, rtx
);
648 /* RVV vector register sizes.
649 TODO: Currently, we only add RVV_32/RVV_64/RVV_128, we may need to
650 support other values in the future. */
657 bool slide1_sew64_helper (int, machine_mode
, machine_mode
,
658 machine_mode
, rtx
*);
659 rtx
gen_avl_for_scalar_move (rtx
);
660 void expand_tuple_move (rtx
*);
661 bool expand_block_move (rtx
, rtx
, rtx
);
662 machine_mode
preferred_simd_mode (scalar_mode
);
663 machine_mode
get_mask_mode (machine_mode
);
664 void expand_vec_series (rtx
, rtx
, rtx
, rtx
= 0);
665 void expand_vec_init (rtx
, rtx
);
666 void expand_vec_perm (rtx
, rtx
, rtx
, rtx
);
667 void expand_select_vl (rtx
*);
668 void expand_load_store (rtx
*, bool);
669 void expand_gather_scatter (rtx
*, bool);
670 void expand_cond_len_ternop (unsigned, rtx
*);
671 void prepare_ternary_operands (rtx
*);
672 void expand_lanes_load_store (rtx
*, bool);
673 void expand_fold_extract_last (rtx
*);
674 void expand_cond_unop (unsigned, rtx
*);
675 void expand_cond_binop (unsigned, rtx
*);
676 void expand_cond_ternop (unsigned, rtx
*);
677 void expand_popcount (rtx
*);
678 void expand_rawmemchr (machine_mode
, rtx
, rtx
, rtx
, bool = false);
679 bool expand_strcmp (rtx
, rtx
, rtx
, rtx
, unsigned HOST_WIDE_INT
, bool);
680 void emit_vec_extract (rtx
, rtx
, rtx
);
681 bool expand_vec_setmem (rtx
, rtx
, rtx
);
682 bool expand_vec_cmpmem (rtx
, rtx
, rtx
, rtx
);
684 /* Rounding mode bitfield for fixed point VXRM. */
685 enum fixed_point_rounding_mode
693 /* Rounding mode bitfield for floating point FRM. The value of enum comes
695 https://github.com/riscv/riscv-isa-manual/blob/main/src/f-st-ext.adoc#floating-point-control-and-status-register
697 enum floating_point_rounding_mode
699 FRM_RNE
= 0, /* Aka 0b000. */
700 FRM_RTZ
= 1, /* Aka 0b001. */
701 FRM_RDN
= 2, /* Aka 0b010. */
702 FRM_RUP
= 3, /* Aka 0b011. */
703 FRM_RMM
= 4, /* Aka 0b100. */
704 FRM_DYN
= 7, /* Aka 0b111. */
705 FRM_STATIC_MIN
= FRM_RNE
,
706 FRM_STATIC_MAX
= FRM_RMM
,
712 enum floating_point_rounding_mode
get_frm_mode (rtx
);
713 opt_machine_mode
vectorize_related_mode (machine_mode
, scalar_mode
,
715 unsigned int autovectorize_vector_modes (vec
<machine_mode
> *, bool);
716 bool cmp_lmul_le_one (machine_mode
);
717 bool cmp_lmul_gt_one (machine_mode
);
718 bool vls_mode_valid_p (machine_mode
);
719 bool vlmax_avl_type_p (rtx_insn
*);
720 bool has_vl_op (rtx_insn
*);
721 bool tail_agnostic_p (rtx_insn
*);
722 void validate_change_or_fail (rtx
, rtx
*, rtx
, bool);
723 bool nonvlmax_avl_type_p (rtx_insn
*);
724 bool vlmax_avl_p (rtx
);
725 uint8_t get_sew (rtx_insn
*);
726 enum vlmul_type
get_vlmul (rtx_insn
*);
727 int count_regno_occurrences (rtx_insn
*, unsigned int);
728 bool imm_avl_p (machine_mode
);
729 bool can_be_broadcasted_p (rtx
);
730 bool gather_scatter_valid_offset_p (machine_mode
);
731 HOST_WIDE_INT
estimated_poly_value (poly_int64
, unsigned int);
732 bool whole_reg_to_reg_move_p (rtx
*, machine_mode
, int);
733 bool splat_to_scalar_move_p (rtx
*);
734 rtx
get_fp_rounding_coefficient (machine_mode
);
737 /* We classify builtin types into two classes:
738 1. General builtin class which is defined in riscv_builtins.
739 2. Vector builtin class which is a special builtin architecture
740 that implement intrinsic short into "pragma". */
741 enum riscv_builtin_class
743 RISCV_BUILTIN_GENERAL
,
747 const unsigned int RISCV_BUILTIN_SHIFT
= 1;
749 /* Mask that selects the riscv_builtin_class part of a function code. */
750 const unsigned int RISCV_BUILTIN_CLASS
= (1 << RISCV_BUILTIN_SHIFT
) - 1;
752 /* Routines implemented in riscv-string.cc. */
753 extern bool riscv_expand_strcmp (rtx
, rtx
, rtx
, rtx
, rtx
);
754 extern bool riscv_expand_strlen (rtx
, rtx
, rtx
, rtx
);
756 /* Routines implemented in thead.cc. */
757 extern bool extract_base_offset_in_addr (rtx
, rtx
*, rtx
*);
758 extern bool th_mempair_operands_p (rtx
[4], bool, machine_mode
);
759 extern void th_mempair_order_operands (rtx
[4], bool, machine_mode
);
760 extern void th_mempair_prepare_save_restore_operands (rtx
[4], bool,
764 extern void th_mempair_save_restore_regs (rtx
[4], bool, machine_mode
);
765 extern unsigned int th_int_get_mask (unsigned int);
766 extern unsigned int th_int_get_save_adjustment (void);
767 extern rtx
th_int_adjust_cfi_prologue (unsigned int);
768 extern const char *th_asm_output_opcode (FILE *asm_out_file
, const char *p
);
771 th_mempair_output_move (rtx
[4], bool, machine_mode
, RTX_CODE
);
772 extern bool th_memidx_legitimate_modify_p (rtx
);
773 extern bool th_memidx_legitimate_modify_p (rtx
, bool);
774 extern bool th_memidx_legitimate_index_p (rtx
);
775 extern bool th_memidx_legitimate_index_p (rtx
, bool);
776 extern bool th_classify_address (struct riscv_address_info
*,
777 rtx
, machine_mode
, bool);
778 extern const char *th_output_move (rtx
, rtx
);
779 extern bool th_print_operand_address (FILE *, machine_mode
, rtx
);
782 extern bool riscv_use_divmod_expander (void);
783 void riscv_init_cumulative_args (CUMULATIVE_ARGS
*, tree
, rtx
, tree
, int);
785 riscv_option_valid_attribute_p (tree
, tree
, tree
, int);
787 riscv_override_options_internal (struct gcc_options
*);
788 extern void riscv_option_override (void);
790 struct riscv_tune_param
;
791 /* Information about one micro-arch we know about. */
792 struct riscv_tune_info
{
793 /* This micro-arch canonical name. */
796 /* Which automaton to use for tuning. */
797 enum riscv_microarchitecture_type microarchitecture
;
799 /* Tuning parameters for this micro-arch. */
800 const struct riscv_tune_param
*tune_param
;
803 const struct riscv_tune_info
*
804 riscv_parse_tune (const char *, bool);
805 const cpu_vector_cost
*get_vector_costs ();
809 RISCV_MAJOR_VERSION_BASE
= 1000000,
810 RISCV_MINOR_VERSION_BASE
= 1000,
811 RISCV_REVISION_VERSION_BASE
= 1,
814 #endif /* ! GCC_RISCV_PROTOS_H */