1 /* Definition of RISC-V target for GNU compiler.
2 Copyright (C) 2011-2024 Free Software Foundation, Inc.
3 Contributed by Andrew Waterman (andrew@sifive.com).
4 Based on MIPS target for GNU compiler.
6 This file is part of GCC.
8 GCC is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License as published by
10 the Free Software Foundation; either version 3, or (at your option)
13 GCC is distributed in the hope that it will be useful,
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details.
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
22 #ifndef GCC_RISCV_PROTOS_H
23 #define GCC_RISCV_PROTOS_H
27 /* Symbol types we understand. The order of this list must match that of
28 the unspec enum in riscv.md, subsequent to UNSPEC_ADDRESS_FIRST. */
29 enum riscv_symbol_type
{
39 #define NUM_SYMBOL_TYPES (SYMBOL_TLS_GD + 1)
41 /* Classifies an address.
44 A natural register + offset address. The register satisfies
45 riscv_valid_base_register_p and the offset is a const_arith_operand.
48 A base register indexed by (optionally scaled) register.
51 A base register indexed by (optionally scaled) zero-extended register.
54 A base register indexed by immediate offset with writeback.
57 A LO_SUM rtx. The first operand is a valid base register and
58 the second operand is a symbolic address.
61 A signed 16-bit constant address.
64 A constant symbolic address. */
65 enum riscv_address_type
{
75 /* Information about an address described by riscv_address_type.
81 REG is the base register and OFFSET is the constant offset.
83 ADDRESS_REG_REG and ADDRESS_REG_UREG
84 REG is the base register and OFFSET is the index register.
87 REG is the base register, OFFSET is the constant offset, and
88 shift is the shift amount for the offset.
91 REG and OFFSET are the operands to the LO_SUM and SYMBOL_TYPE
92 is the type of symbol it references.
95 SYMBOL_TYPE is the type of symbol that the address references. */
96 struct riscv_address_info
{
97 enum riscv_address_type type
;
100 enum riscv_symbol_type symbol_type
;
104 /* Routines implemented in riscv.cc. */
105 extern const char *riscv_asm_output_opcode (FILE *asm_out_file
, const char *p
);
106 extern enum riscv_symbol_type
riscv_classify_symbolic_expression (rtx
);
107 extern bool riscv_symbolic_constant_p (rtx
, enum riscv_symbol_type
*);
108 extern int riscv_float_const_rtx_index_for_fli (rtx
);
109 extern int riscv_regno_mode_ok_for_base_p (int, machine_mode
, bool);
110 extern bool riscv_valid_base_register_p (rtx
, machine_mode
, bool);
111 extern enum reg_class
riscv_index_reg_class ();
112 extern int riscv_regno_ok_for_index_p (int);
113 extern int riscv_address_insns (rtx
, machine_mode
, bool);
114 extern int riscv_const_insns (rtx
);
115 extern int riscv_split_const_insns (rtx
);
116 extern int riscv_load_store_insns (rtx
, rtx_insn
*);
117 extern rtx
riscv_emit_move (rtx
, rtx
);
118 extern bool riscv_split_symbol (rtx
, rtx
, machine_mode
, rtx
*);
119 extern bool riscv_split_symbol_type (enum riscv_symbol_type
);
120 extern rtx
riscv_unspec_address (rtx
, enum riscv_symbol_type
);
121 extern void riscv_move_integer (rtx
, rtx
, HOST_WIDE_INT
, machine_mode
);
122 extern bool riscv_legitimize_move (machine_mode
, rtx
, rtx
);
123 extern rtx
riscv_subword (rtx
, bool);
124 extern bool riscv_split_64bit_move_p (rtx
, rtx
);
125 extern void riscv_split_doubleword_move (rtx
, rtx
);
126 extern const char *riscv_output_move (rtx
, rtx
);
127 extern const char *riscv_output_return ();
128 extern void riscv_declare_function_name (FILE *, const char *, tree
);
129 extern void riscv_declare_function_size (FILE *, const char *, tree
);
130 extern void riscv_asm_output_alias (FILE *, const tree
, const tree
);
131 extern void riscv_asm_output_external (FILE *, const tree
, const char *);
133 riscv_zcmp_valid_stack_adj_bytes_p (HOST_WIDE_INT
, int);
134 extern void riscv_legitimize_poly_move (machine_mode
, rtx
, rtx
, rtx
);
137 extern void riscv_expand_int_scc (rtx
, enum rtx_code
, rtx
, rtx
, bool *invert_ptr
= 0);
138 extern void riscv_expand_float_scc (rtx
, enum rtx_code
, rtx
, rtx
,
139 bool *invert_ptr
= nullptr);
140 extern void riscv_expand_conditional_branch (rtx
, enum rtx_code
, rtx
, rtx
);
141 extern rtx
riscv_emit_unary (enum rtx_code code
, rtx dest
, rtx x
);
142 extern rtx
riscv_emit_binary (enum rtx_code code
, rtx dest
, rtx x
, rtx y
);
144 extern bool riscv_expand_conditional_move (rtx
, rtx
, rtx
, rtx
);
145 extern rtx
riscv_legitimize_call_address (rtx
);
146 extern void riscv_set_return_address (rtx
, rtx
);
147 extern rtx
riscv_return_addr (int, rtx
);
148 extern poly_int64
riscv_initial_elimination_offset (int, int);
149 extern void riscv_expand_prologue (void);
150 extern void riscv_expand_epilogue (int);
151 extern bool riscv_epilogue_uses (unsigned int);
152 extern bool riscv_can_use_return_insn (void);
153 extern rtx
riscv_function_value (const_tree
, const_tree
, enum machine_mode
);
154 extern bool riscv_store_data_bypass_p (rtx_insn
*, rtx_insn
*);
155 extern rtx
riscv_gen_gpr_save_insn (struct riscv_frame_info
*);
156 extern bool riscv_gpr_save_operation_p (rtx
);
157 extern void riscv_reinit (void);
158 extern poly_uint64
riscv_regmode_natural_size (machine_mode
);
159 extern bool riscv_v_ext_vector_mode_p (machine_mode
);
160 extern bool riscv_v_ext_tuple_mode_p (machine_mode
);
161 extern bool riscv_v_ext_vls_mode_p (machine_mode
);
162 extern int riscv_get_v_regno_alignment (machine_mode
);
163 extern bool riscv_shamt_matches_mask_p (int, HOST_WIDE_INT
);
164 extern void riscv_subword_address (rtx
, rtx
*, rtx
*, rtx
*, rtx
*);
165 extern void riscv_lshift_subword (machine_mode
, rtx
, rtx
, rtx
*);
166 extern enum memmodel
riscv_union_memmodels (enum memmodel
, enum memmodel
);
168 /* Routines implemented in riscv-c.cc. */
169 void riscv_cpu_cpp_builtins (cpp_reader
*);
170 void riscv_register_pragmas (void);
172 /* Routines implemented in riscv-builtins.cc. */
173 extern void riscv_atomic_assign_expand_fenv (tree
*, tree
*, tree
*);
174 extern bool riscv_gimple_fold_builtin (gimple_stmt_iterator
*);
175 extern rtx
riscv_expand_builtin (tree
, rtx
, rtx
, machine_mode
, int);
176 extern tree
riscv_builtin_decl (unsigned int, bool);
177 extern void riscv_init_builtins (void);
179 /* Routines implemented in riscv-common.cc. */
180 extern std::string
riscv_arch_str (bool version_p
= true);
181 extern void riscv_parse_arch_string (const char *, struct gcc_options
*, location_t
);
183 extern bool riscv_hard_regno_rename_ok (unsigned, unsigned);
185 rtl_opt_pass
* make_pass_shorten_memrefs (gcc::context
*ctxt
);
186 rtl_opt_pass
* make_pass_avlprop (gcc::context
*ctxt
);
187 rtl_opt_pass
* make_pass_vsetvl (gcc::context
*ctxt
);
189 /* Routines implemented in riscv-string.c. */
190 extern bool riscv_expand_block_move (rtx
, rtx
, rtx
);
192 /* Information about one CPU we know about. */
193 struct riscv_cpu_info
{
194 /* This CPU's canonical name. */
197 /* Default arch for this CPU, could be NULL if no default arch. */
200 /* Which automaton to use for tuning. */
204 extern const riscv_cpu_info
*riscv_find_cpu (const char *);
206 /* Common vector costs in any kind of vectorization (e.g VLA and VLS). */
207 struct common_vector_cost
209 /* Cost of any integer vector operation, excluding the ones handled
211 const int int_stmt_cost
;
213 /* Cost of any fp vector operation, excluding the ones handled
215 const int fp_stmt_cost
;
217 /* Gather/scatter vectorization cost. */
218 const int gather_load_cost
;
219 const int scatter_store_cost
;
221 /* Cost of a vector-to-scalar operation. */
222 const int vec_to_scalar_cost
;
224 /* Cost of a scalar-to-vector operation. */
225 const int scalar_to_vec_cost
;
227 /* Cost of a permute operation. */
228 const int permute_cost
;
230 /* Cost of an aligned vector load. */
231 const int align_load_cost
;
233 /* Cost of an aligned vector store. */
234 const int align_store_cost
;
236 /* Cost of an unaligned vector load. */
237 const int unalign_load_cost
;
239 /* Cost of an unaligned vector store. */
240 const int unalign_store_cost
;
243 /* scalable vectorization (VLA) specific cost. */
244 struct scalable_vector_cost
: common_vector_cost
246 CONSTEXPR
scalable_vector_cost (const common_vector_cost
&base
)
247 : common_vector_cost (base
)
250 /* TODO: We will need more other kinds of vector cost for VLA.
251 E.g. fold_left reduction cost, lanes load/store cost, ..., etc. */
254 /* Additional costs for register copies. Cost is for one register. */
255 struct regmove_vector_cost
263 /* Cost for vector insn classes. */
264 struct cpu_vector_cost
266 /* Cost of any integer scalar operation, excluding load and store. */
267 const int scalar_int_stmt_cost
;
269 /* Cost of any fp scalar operation, excluding load and store. */
270 const int scalar_fp_stmt_cost
;
272 /* Cost of a scalar load. */
273 const int scalar_load_cost
;
275 /* Cost of a scalar store. */
276 const int scalar_store_cost
;
278 /* Cost of a taken branch. */
279 const int cond_taken_branch_cost
;
281 /* Cost of a not-taken branch. */
282 const int cond_not_taken_branch_cost
;
284 /* Cost of an VLS modes operations. */
285 const common_vector_cost
*vls
;
287 /* Cost of an VLA modes operations. */
288 const scalable_vector_cost
*vla
;
290 /* Cost of vector register move operations. */
291 const regmove_vector_cost
*regmove
;
294 /* Routines implemented in riscv-selftests.cc. */
297 void riscv_run_selftests (void);
298 } // namespace selftest
301 namespace riscv_vector
{
302 #define RVV_VLMAX regno_reg_rtx[X0_REGNUM]
303 #define RVV_VUNDEF(MODE) \
304 gen_rtx_UNSPEC (MODE, gen_rtvec (1, RVV_VLMAX), UNSPEC_VUNDEF)
306 /* These flags describe how to pass the operands to a rvv insn pattern.
308 If a insn has this flags:
309 HAS_DEST_P | HAS_MASK_P | USE_VUNDEF_MERGE_P
310 | TU_POLICY_P | BINARY_OP_P | FRM_DYN_P
312 operands[0] is the dest operand
313 operands[1] is the mask operand
314 operands[2] is the merge operand
315 operands[3] and operands[4] is the two operand to do the operation.
316 operands[5] is the vl operand
317 operands[6] is the tail policy operand
318 operands[7] is the mask policy operands
319 operands[8] is the rounding mode operands
321 Then you can call `emit_vlmax_insn (flags, icode, ops)` to emit a insn.
322 and ops[0] is the dest operand (operands[0]), ops[1] is the mask
323 operand (operands[1]), ops[2] and ops[3] is the two
324 operands (operands[3], operands[4]) to do the operation. Other operands
325 will be created by emit_vlmax_insn according to the flags information.
327 enum insn_flags
: unsigned int
329 /* flags for dest, mask, merge operands. */
330 /* Means INSN has dest operand. False for STORE insn. */
332 /* Means INSN has mask operand. */
334 /* Means using ALL_TRUES for mask operand. */
335 USE_ALL_TRUES_MASK_P
= 1 << 2,
336 /* Means using ONE_TRUE for mask operand. */
337 USE_ONE_TRUE_MASK_P
= 1 << 3,
338 /* Means INSN has merge operand. */
339 HAS_MERGE_P
= 1 << 4,
340 /* Means using VUNDEF for merge operand. */
341 USE_VUNDEF_MERGE_P
= 1 << 5,
343 /* flags for tail policy and mask plicy operands. */
344 /* Means the tail policy is TAIL_UNDISTURBED. */
345 TU_POLICY_P
= 1 << 6,
346 /* Means the tail policy is default (return by get_prefer_tail_policy). */
347 TDEFAULT_POLICY_P
= 1 << 7,
348 /* Means the mask policy is MASK_UNDISTURBED. */
349 MU_POLICY_P
= 1 << 8,
350 /* Means the mask policy is default (return by get_prefer_mask_policy). */
351 MDEFAULT_POLICY_P
= 1 << 9,
353 /* flags for the number operands to do the operation. */
354 /* Means INSN need zero operand to do the operation. e.g. vid.v */
355 NULLARY_OP_P
= 1 << 10,
356 /* Means INSN need one operand to do the operation. */
357 UNARY_OP_P
= 1 << 11,
358 /* Means INSN need two operands to do the operation. */
359 BINARY_OP_P
= 1 << 12,
360 /* Means INSN need two operands to do the operation. */
361 TERNARY_OP_P
= 1 << 13,
363 /* flags for get vtype mode from the index number. default from dest operand. */
364 VTYPE_MODE_FROM_OP1_P
= 1 << 14,
366 /* flags for the floating-point rounding mode. */
367 /* Means INSN has FRM operand and the value is FRM_DYN. */
370 /* Means INSN has FRM operand and the value is FRM_RUP. */
373 /* Means INSN has FRM operand and the value is FRM_RDN. */
376 /* Means INSN has FRM operand and the value is FRM_RMM. */
379 /* Means INSN has FRM operand and the value is FRM_RNE. */
382 /* Means INSN has VXRM operand and the value is VXRM_RNU. */
383 VXRM_RNU_P
= 1 << 20,
385 /* Means INSN has VXRM operand and the value is VXRM_RDN. */
386 VXRM_RDN_P
= 1 << 21,
389 enum insn_type
: unsigned int
391 /* some flags macros. */
392 /* For non-mask insn with tama. */
393 __NORMAL_OP
= HAS_DEST_P
| HAS_MASK_P
| USE_ALL_TRUES_MASK_P
| HAS_MERGE_P
394 | USE_VUNDEF_MERGE_P
| TDEFAULT_POLICY_P
| MDEFAULT_POLICY_P
,
395 /* For non-mask insn with ta, without mask policy operand. */
396 __NORMAL_OP_TA
= HAS_DEST_P
| HAS_MASK_P
| USE_ALL_TRUES_MASK_P
| HAS_MERGE_P
397 | USE_VUNDEF_MERGE_P
| TDEFAULT_POLICY_P
,
398 /* For non-mask insn with ta, without mask operand and mask policy operand. */
400 = HAS_DEST_P
| HAS_MERGE_P
| USE_VUNDEF_MERGE_P
| TDEFAULT_POLICY_P
,
401 /* For non-mask insn with ma, without tail policy operand. */
402 __NORMAL_OP_MA
= HAS_DEST_P
| HAS_MASK_P
| USE_ALL_TRUES_MASK_P
| HAS_MERGE_P
403 | USE_VUNDEF_MERGE_P
| MDEFAULT_POLICY_P
,
404 /* For mask insn with tama. */
405 __MASK_OP_TAMA
= HAS_DEST_P
| HAS_MASK_P
| HAS_MERGE_P
| USE_VUNDEF_MERGE_P
406 | TDEFAULT_POLICY_P
| MDEFAULT_POLICY_P
,
407 /* For mask insn with tamu. */
409 = HAS_DEST_P
| HAS_MASK_P
| HAS_MERGE_P
| TDEFAULT_POLICY_P
| MU_POLICY_P
,
410 /* For mask insn with tuma. */
411 __MASK_OP_TUMA
= HAS_DEST_P
| HAS_MASK_P
| USE_ALL_TRUES_MASK_P
| HAS_MERGE_P
412 | TU_POLICY_P
| MDEFAULT_POLICY_P
,
413 /* For mask insn with mu. */
414 __MASK_OP_MU
= HAS_DEST_P
| HAS_MASK_P
| HAS_MERGE_P
| MU_POLICY_P
,
415 /* For mask insn with ta, without mask policy operand. */
416 __MASK_OP_TA
= HAS_DEST_P
| HAS_MASK_P
| HAS_MERGE_P
| USE_VUNDEF_MERGE_P
419 /* Nullary operator. e.g. vid.v */
420 NULLARY_OP
= __NORMAL_OP
| NULLARY_OP_P
,
422 /* Unary operator. */
423 UNARY_OP
= __NORMAL_OP
| UNARY_OP_P
,
424 UNARY_OP_TAMA
= __MASK_OP_TAMA
| UNARY_OP_P
,
425 UNARY_OP_TAMU
= __MASK_OP_TAMU
| UNARY_OP_P
,
426 UNARY_OP_FRM_DYN
= UNARY_OP
| FRM_DYN_P
,
427 UNARY_OP_FRM_RMM
= UNARY_OP
| FRM_RMM_P
,
428 UNARY_OP_FRM_RUP
= UNARY_OP
| FRM_RUP_P
,
429 UNARY_OP_FRM_RDN
= UNARY_OP
| FRM_RDN_P
,
430 UNARY_OP_TAMA_FRM_DYN
= UNARY_OP_TAMA
| FRM_DYN_P
,
431 UNARY_OP_TAMA_FRM_RUP
= UNARY_OP_TAMA
| FRM_RUP_P
,
432 UNARY_OP_TAMA_FRM_RDN
= UNARY_OP_TAMA
| FRM_RDN_P
,
433 UNARY_OP_TAMA_FRM_RMM
= UNARY_OP_TAMA
| FRM_RMM_P
,
434 UNARY_OP_TAMA_FRM_RNE
= UNARY_OP_TAMA
| FRM_RNE_P
,
435 UNARY_OP_TAMU_FRM_DYN
= UNARY_OP_TAMU
| FRM_DYN_P
,
436 UNARY_OP_TAMU_FRM_RUP
= UNARY_OP_TAMU
| FRM_RUP_P
,
437 UNARY_OP_TAMU_FRM_RDN
= UNARY_OP_TAMU
| FRM_RDN_P
,
438 UNARY_OP_TAMU_FRM_RMM
= UNARY_OP_TAMU
| FRM_RMM_P
,
439 UNARY_OP_TAMU_FRM_RNE
= UNARY_OP_TAMU
| FRM_RNE_P
,
441 /* Binary operator. */
442 BINARY_OP
= __NORMAL_OP
| BINARY_OP_P
,
443 BINARY_OP_TAMA
= __MASK_OP_TAMA
| BINARY_OP_P
,
444 BINARY_OP_TAMU
= __MASK_OP_TAMU
| BINARY_OP_P
,
445 BINARY_OP_TUMA
= __MASK_OP_TUMA
| BINARY_OP_P
,
446 BINARY_OP_FRM_DYN
= BINARY_OP
| FRM_DYN_P
,
447 BINARY_OP_VXRM_RNU
= BINARY_OP
| VXRM_RNU_P
,
448 BINARY_OP_VXRM_RDN
= BINARY_OP
| VXRM_RDN_P
,
450 /* Ternary operator. Always have real merge operand. */
451 TERNARY_OP
= HAS_DEST_P
| HAS_MASK_P
| USE_ALL_TRUES_MASK_P
| HAS_MERGE_P
452 | TDEFAULT_POLICY_P
| MDEFAULT_POLICY_P
| TERNARY_OP_P
,
453 TERNARY_OP_FRM_DYN
= TERNARY_OP
| FRM_DYN_P
,
455 /* For vwmacc, no merge operand. */
456 WIDEN_TERNARY_OP
= HAS_DEST_P
| HAS_MASK_P
| USE_ALL_TRUES_MASK_P
457 | TDEFAULT_POLICY_P
| MDEFAULT_POLICY_P
| TERNARY_OP_P
,
458 WIDEN_TERNARY_OP_FRM_DYN
= WIDEN_TERNARY_OP
| FRM_DYN_P
,
460 /* For vmerge, no mask operand, no mask policy operand. */
461 MERGE_OP
= __NORMAL_OP_TA2
| TERNARY_OP_P
,
463 /* For vmerge with TU policy. */
464 MERGE_OP_TU
= HAS_DEST_P
| HAS_MERGE_P
| TERNARY_OP_P
| TU_POLICY_P
,
466 /* For vm<compare>, no tail policy operand. */
467 COMPARE_OP
= __NORMAL_OP_MA
| TERNARY_OP_P
,
468 COMPARE_OP_MU
= __MASK_OP_MU
| TERNARY_OP_P
,
470 /* For scatter insn: no dest operand, no merge operand, no tail and mask
472 SCATTER_OP_M
= HAS_MASK_P
| TERNARY_OP_P
,
474 /* For vcpop.m, no merge operand, no tail and mask policy operands. */
475 CPOP_OP
= HAS_DEST_P
| HAS_MASK_P
| USE_ALL_TRUES_MASK_P
| UNARY_OP_P
476 | VTYPE_MODE_FROM_OP1_P
,
478 /* For mask instrunctions, no tail and mask policy operands. */
479 UNARY_MASK_OP
= HAS_DEST_P
| HAS_MASK_P
| USE_ALL_TRUES_MASK_P
| HAS_MERGE_P
480 | USE_VUNDEF_MERGE_P
| UNARY_OP_P
,
481 BINARY_MASK_OP
= HAS_DEST_P
| HAS_MASK_P
| USE_ALL_TRUES_MASK_P
| HAS_MERGE_P
482 | USE_VUNDEF_MERGE_P
| BINARY_OP_P
,
484 /* For vcompress.vm */
485 COMPRESS_OP
= __NORMAL_OP_TA2
| BINARY_OP_P
,
486 /* has merge operand but use ta. */
488 = HAS_DEST_P
| HAS_MERGE_P
| TDEFAULT_POLICY_P
| BINARY_OP_P
,
490 /* For vslideup.up has merge operand but use ta. */
491 SLIDEUP_OP_MERGE
= HAS_DEST_P
| HAS_MASK_P
| USE_ALL_TRUES_MASK_P
492 | HAS_MERGE_P
| TDEFAULT_POLICY_P
| MDEFAULT_POLICY_P
495 /* For vreduce, no mask policy operand. */
496 REDUCE_OP
= __NORMAL_OP_TA
| BINARY_OP_P
| VTYPE_MODE_FROM_OP1_P
,
497 REDUCE_OP_M
= __MASK_OP_TA
| BINARY_OP_P
| VTYPE_MODE_FROM_OP1_P
,
498 REDUCE_OP_FRM_DYN
= REDUCE_OP
| FRM_DYN_P
| VTYPE_MODE_FROM_OP1_P
,
500 = __MASK_OP_TA
| BINARY_OP_P
| FRM_DYN_P
| VTYPE_MODE_FROM_OP1_P
,
502 /* For vmv.s.x/vfmv.s.f. */
503 SCALAR_MOVE_OP
= HAS_DEST_P
| HAS_MASK_P
| USE_ONE_TRUE_MASK_P
| HAS_MERGE_P
504 | USE_VUNDEF_MERGE_P
| TDEFAULT_POLICY_P
| MDEFAULT_POLICY_P
507 SCALAR_MOVE_MERGED_OP
= HAS_DEST_P
| HAS_MASK_P
| USE_ONE_TRUE_MASK_P
508 | HAS_MERGE_P
| TDEFAULT_POLICY_P
| MDEFAULT_POLICY_P
525 /* The RISC-V vsetvli pass uses "known vlmax" operations for optimization.
526 Whether or not an instruction actually is a vlmax operation is not
527 recognizable from the length operand alone but the avl_type operand
528 is used instead. In general, there are two cases:
530 - Emit a vlmax operation by calling emit_vlmax_insn[_lra]. Here we emit
531 a vsetvli with vlmax configuration and set the avl_type to VLMAX for
532 VLA modes or VLS for VLS modes.
533 - Emit an operation that uses the existing (last-set) length and
534 set the avl_type to NONVLMAX.
536 Sometimes we also need to set the VLMAX or VLS avl_type to an operation that
537 already uses a given length register. This can happen during or after
538 register allocation when we are not allowed to create a new register.
539 For that case we also allow to set the avl_type to VLMAX or VLS.
547 /* Routines implemented in riscv-vector-builtins.cc. */
548 void init_builtins (void);
549 const char *mangle_builtin_type (const_tree
);
550 tree
lookup_vector_type_attribute (const_tree
);
551 bool builtin_type_p (const_tree
);
553 bool verify_type_context (location_t
, type_context_kind
, const_tree
, bool);
554 bool expand_vec_perm_const (machine_mode
, machine_mode
, rtx
, rtx
, rtx
,
555 const vec_perm_indices
&);
557 void handle_pragma_vector (void);
558 tree
builtin_decl (unsigned, bool);
559 gimple
*gimple_fold_builtin (unsigned int, gimple_stmt_iterator
*, gcall
*);
560 rtx
expand_builtin (unsigned int, tree
, rtx
);
561 bool check_builtin_call (location_t
, vec
<location_t
>, unsigned int,
562 tree
, unsigned int, tree
*);
563 tree
resolve_overloaded_builtin (location_t
, unsigned int, tree
, vec
<tree
, va_gc
> *);
564 bool const_vec_all_same_in_range_p (rtx
, HOST_WIDE_INT
, HOST_WIDE_INT
);
565 bool legitimize_move (rtx
, rtx
*);
566 void emit_vlmax_vsetvl (machine_mode
, rtx
);
567 void emit_hard_vlmax_vsetvl (machine_mode
, rtx
);
568 void emit_vlmax_insn (unsigned, unsigned, rtx
*);
569 void emit_nonvlmax_insn (unsigned, unsigned, rtx
*, rtx
);
570 void emit_vlmax_insn_lra (unsigned, unsigned, rtx
*, rtx
);
571 enum vlmul_type
get_vlmul (machine_mode
);
572 rtx
get_vlmax_rtx (machine_mode
);
573 unsigned int get_ratio (machine_mode
);
574 unsigned int get_nf (machine_mode
);
575 machine_mode
get_subpart_mode (machine_mode
);
578 int get_avl_type (rtx
);
579 unsigned int calculate_ratio (unsigned int, enum vlmul_type
);
582 TAIL_UNDISTURBED
= 0,
589 MASK_UNDISTURBED
= 0,
594 /* Return true if VALUE is agnostic or any policy. */
595 #define IS_AGNOSTIC(VALUE) (bool) (VALUE & 0x1 || (VALUE >> 1 & 0x1))
597 enum tail_policy
get_prefer_tail_policy ();
598 enum mask_policy
get_prefer_mask_policy ();
599 rtx
get_avl_type_rtx (enum avl_type
);
600 opt_machine_mode
get_vector_mode (scalar_mode
, poly_uint64
);
601 opt_machine_mode
get_tuple_mode (machine_mode
, unsigned int);
603 bool neg_simm5_p (rtx
);
605 bool has_vi_variant_p (rtx_code
, rtx
);
606 void expand_vec_cmp (rtx
, rtx_code
, rtx
, rtx
, rtx
= nullptr, rtx
= nullptr);
607 bool expand_vec_cmp_float (rtx
, rtx_code
, rtx
, rtx
, bool);
608 void expand_cond_len_unop (unsigned, rtx
*);
609 void expand_cond_len_binop (unsigned, rtx
*);
610 void expand_reduction (unsigned, unsigned, rtx
*, rtx
);
611 void expand_vec_ceil (rtx
, rtx
, machine_mode
, machine_mode
);
612 void expand_vec_floor (rtx
, rtx
, machine_mode
, machine_mode
);
613 void expand_vec_nearbyint (rtx
, rtx
, machine_mode
, machine_mode
);
614 void expand_vec_rint (rtx
, rtx
, machine_mode
, machine_mode
);
615 void expand_vec_round (rtx
, rtx
, machine_mode
, machine_mode
);
616 void expand_vec_trunc (rtx
, rtx
, machine_mode
, machine_mode
);
617 void expand_vec_roundeven (rtx
, rtx
, machine_mode
, machine_mode
);
618 void expand_vec_lrint (rtx
, rtx
, machine_mode
, machine_mode
, machine_mode
);
619 void expand_vec_lround (rtx
, rtx
, machine_mode
, machine_mode
, machine_mode
);
620 void expand_vec_lceil (rtx
, rtx
, machine_mode
, machine_mode
);
621 void expand_vec_lfloor (rtx
, rtx
, machine_mode
, machine_mode
);
623 bool sew64_scalar_helper (rtx
*, rtx
*, rtx
, machine_mode
,
624 bool, void (*)(rtx
*, rtx
), enum avl_type
);
625 rtx
gen_scalar_move_mask (machine_mode
);
626 rtx
gen_no_side_effects_vsetvl_rtx (machine_mode
, rtx
, rtx
);
628 /* RVV vector register sizes.
629 TODO: Currently, we only add RVV_32/RVV_64/RVV_128, we may need to
630 support other values in the future. */
637 bool slide1_sew64_helper (int, machine_mode
, machine_mode
,
638 machine_mode
, rtx
*);
639 rtx
gen_avl_for_scalar_move (rtx
);
640 void expand_tuple_move (rtx
*);
641 bool expand_block_move (rtx
, rtx
, rtx
);
642 machine_mode
preferred_simd_mode (scalar_mode
);
643 machine_mode
get_mask_mode (machine_mode
);
644 void expand_vec_series (rtx
, rtx
, rtx
, rtx
= 0);
645 void expand_vec_init (rtx
, rtx
);
646 void expand_vec_perm (rtx
, rtx
, rtx
, rtx
);
647 void expand_select_vl (rtx
*);
648 void expand_load_store (rtx
*, bool);
649 void expand_gather_scatter (rtx
*, bool);
650 void expand_cond_len_ternop (unsigned, rtx
*);
651 void prepare_ternary_operands (rtx
*);
652 void expand_lanes_load_store (rtx
*, bool);
653 void expand_fold_extract_last (rtx
*);
654 void expand_cond_unop (unsigned, rtx
*);
655 void expand_cond_binop (unsigned, rtx
*);
656 void expand_cond_ternop (unsigned, rtx
*);
657 void expand_popcount (rtx
*);
658 void expand_rawmemchr (machine_mode
, rtx
, rtx
, rtx
, bool = false);
659 bool expand_strcmp (rtx
, rtx
, rtx
, rtx
, unsigned HOST_WIDE_INT
, bool);
660 void emit_vec_extract (rtx
, rtx
, rtx
);
662 /* Rounding mode bitfield for fixed point VXRM. */
663 enum fixed_point_rounding_mode
671 /* Rounding mode bitfield for floating point FRM. The value of enum comes
673 https://github.com/riscv/riscv-isa-manual/blob/main/src/f-st-ext.adoc#floating-point-control-and-status-register
675 enum floating_point_rounding_mode
677 FRM_RNE
= 0, /* Aka 0b000. */
678 FRM_RTZ
= 1, /* Aka 0b001. */
679 FRM_RDN
= 2, /* Aka 0b010. */
680 FRM_RUP
= 3, /* Aka 0b011. */
681 FRM_RMM
= 4, /* Aka 0b100. */
682 FRM_DYN
= 7, /* Aka 0b111. */
683 FRM_STATIC_MIN
= FRM_RNE
,
684 FRM_STATIC_MAX
= FRM_RMM
,
690 enum floating_point_rounding_mode
get_frm_mode (rtx
);
691 opt_machine_mode
vectorize_related_mode (machine_mode
, scalar_mode
,
693 unsigned int autovectorize_vector_modes (vec
<machine_mode
> *, bool);
694 bool cmp_lmul_le_one (machine_mode
);
695 bool cmp_lmul_gt_one (machine_mode
);
696 bool vls_mode_valid_p (machine_mode
);
697 bool vlmax_avl_type_p (rtx_insn
*);
698 bool has_vl_op (rtx_insn
*);
699 bool tail_agnostic_p (rtx_insn
*);
700 void validate_change_or_fail (rtx
, rtx
*, rtx
, bool);
701 bool nonvlmax_avl_type_p (rtx_insn
*);
702 bool vlmax_avl_p (rtx
);
703 uint8_t get_sew (rtx_insn
*);
704 enum vlmul_type
get_vlmul (rtx_insn
*);
705 int count_regno_occurrences (rtx_insn
*, unsigned int);
706 bool imm_avl_p (machine_mode
);
707 bool can_be_broadcasted_p (rtx
);
708 bool gather_scatter_valid_offset_p (machine_mode
);
709 HOST_WIDE_INT
estimated_poly_value (poly_int64
, unsigned int);
710 bool whole_reg_to_reg_move_p (rtx
*, machine_mode
, int);
711 bool splat_to_scalar_move_p (rtx
*);
714 /* We classify builtin types into two classes:
715 1. General builtin class which is defined in riscv_builtins.
716 2. Vector builtin class which is a special builtin architecture
717 that implement intrinsic short into "pragma". */
718 enum riscv_builtin_class
720 RISCV_BUILTIN_GENERAL
,
724 const unsigned int RISCV_BUILTIN_SHIFT
= 1;
726 /* Mask that selects the riscv_builtin_class part of a function code. */
727 const unsigned int RISCV_BUILTIN_CLASS
= (1 << RISCV_BUILTIN_SHIFT
) - 1;
729 /* Routines implemented in riscv-string.cc. */
730 extern bool riscv_expand_strcmp (rtx
, rtx
, rtx
, rtx
, rtx
);
731 extern bool riscv_expand_strlen (rtx
, rtx
, rtx
, rtx
);
733 /* Routines implemented in thead.cc. */
734 extern bool extract_base_offset_in_addr (rtx
, rtx
*, rtx
*);
735 extern bool th_mempair_operands_p (rtx
[4], bool, machine_mode
);
736 extern void th_mempair_order_operands (rtx
[4], bool, machine_mode
);
737 extern void th_mempair_prepare_save_restore_operands (rtx
[4], bool,
741 extern void th_mempair_save_restore_regs (rtx
[4], bool, machine_mode
);
742 extern unsigned int th_int_get_mask (unsigned int);
743 extern unsigned int th_int_get_save_adjustment (void);
744 extern rtx
th_int_adjust_cfi_prologue (unsigned int);
745 extern const char *th_asm_output_opcode (FILE *asm_out_file
, const char *p
);
748 th_mempair_output_move (rtx
[4], bool, machine_mode
, RTX_CODE
);
749 extern bool th_memidx_legitimate_modify_p (rtx
);
750 extern bool th_memidx_legitimate_modify_p (rtx
, bool);
751 extern bool th_memidx_legitimate_index_p (rtx
);
752 extern bool th_memidx_legitimate_index_p (rtx
, bool);
753 extern bool th_classify_address (struct riscv_address_info
*,
754 rtx
, machine_mode
, bool);
755 extern const char *th_output_move (rtx
, rtx
);
756 extern bool th_print_operand_address (FILE *, machine_mode
, rtx
);
759 extern bool riscv_use_divmod_expander (void);
760 void riscv_init_cumulative_args (CUMULATIVE_ARGS
*, tree
, rtx
, tree
, int);
762 riscv_option_valid_attribute_p (tree
, tree
, tree
, int);
764 riscv_override_options_internal (struct gcc_options
*);
766 struct riscv_tune_param
;
767 /* Information about one micro-arch we know about. */
768 struct riscv_tune_info
{
769 /* This micro-arch canonical name. */
772 /* Which automaton to use for tuning. */
773 enum riscv_microarchitecture_type microarchitecture
;
775 /* Tuning parameters for this micro-arch. */
776 const struct riscv_tune_param
*tune_param
;
779 const struct riscv_tune_info
*
780 riscv_parse_tune (const char *, bool);
781 const cpu_vector_cost
*get_vector_costs ();
785 RISCV_MAJOR_VERSION_BASE
= 1000000,
786 RISCV_MINOR_VERSION_BASE
= 1000,
787 RISCV_REVISION_VERSION_BASE
= 1,
790 #endif /* ! GCC_RISCV_PROTOS_H */