Add an rsqrt_optab and IFN_RSQRT internal function
[official-gcc.git] / gcc / config / aarch64 / aarch64-protos.h
blobe6bfe06bc1534d6accb1ad0cd508050f2aa4af01
1 /* Machine description for AArch64 architecture.
2 Copyright (C) 2009-2015 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
22 #ifndef GCC_AARCH64_PROTOS_H
23 #define GCC_AARCH64_PROTOS_H
25 #include "input.h"
27 /* SYMBOL_SMALL_ABSOLUTE: Generate symbol accesses through
28 high and lo relocs that calculate the base address using a PC
29 relative reloc.
30 So to get the address of foo, we generate
31 adrp x0, foo
32 add x0, x0, :lo12:foo
34 To load or store something to foo, we could use the corresponding
35 load store variants that generate an
36 ldr x0, [x0,:lo12:foo]
38 str x1, [x0, :lo12:foo]
40 This corresponds to the small code model of the compiler.
42 SYMBOL_SMALL_GOT_4G: Similar to the one above but this
43 gives us the GOT entry of the symbol being referred to :
44 Thus calculating the GOT entry for foo is done using the
45 following sequence of instructions. The ADRP instruction
46 gets us to the page containing the GOT entry of the symbol
47 and the got_lo12 gets us the actual offset in it, together
48 the base and offset, we can address 4G size GOT table.
50 adrp x0, :got:foo
51 ldr x0, [x0, :gotoff_lo12:foo]
53 This corresponds to the small PIC model of the compiler.
55 SYMBOL_SMALL_GOT_28K: Similar to SYMBOL_SMALL_GOT_4G, but used for symbol
56 restricted within 28K GOT table size.
58 ldr reg, [gp, #:gotpage_lo15:sym]
60 This corresponds to -fpic model for small memory model of the compiler.
62 SYMBOL_SMALL_TLSGD
63 SYMBOL_SMALL_TLSDESC
64 SYMBOL_SMALL_TLSIE
65 SYMBOL_TINY_TLSIE
66 SYMBOL_TLSLE12
67 SYMBOL_TLSLE24
68 SYMBOL_TLSLE32
69 SYMBOL_TLSLE48
70 Each of these represents a thread-local symbol, and corresponds to the
71 thread local storage relocation operator for the symbol being referred to.
73 SYMBOL_TINY_ABSOLUTE
75 Generate symbol accesses as a PC relative address using a single
76 instruction. To compute the address of symbol foo, we generate:
78 ADR x0, foo
80 SYMBOL_TINY_GOT
82 Generate symbol accesses via the GOT using a single PC relative
83 instruction. To compute the address of symbol foo, we generate:
85 ldr t0, :got:foo
87 The value of foo can subsequently read using:
89 ldrb t0, [t0]
91 SYMBOL_FORCE_TO_MEM : Global variables are addressed using
92 constant pool. All variable addresses are spilled into constant
93 pools. The constant pools themselves are addressed using PC
94 relative accesses. This only works for the large code model.
96 enum aarch64_symbol_type
98 SYMBOL_SMALL_ABSOLUTE,
99 SYMBOL_SMALL_GOT_28K,
100 SYMBOL_SMALL_GOT_4G,
101 SYMBOL_SMALL_TLSGD,
102 SYMBOL_SMALL_TLSDESC,
103 SYMBOL_SMALL_TLSIE,
104 SYMBOL_TINY_ABSOLUTE,
105 SYMBOL_TINY_GOT,
106 SYMBOL_TINY_TLSIE,
107 SYMBOL_TLSLE12,
108 SYMBOL_TLSLE24,
109 SYMBOL_TLSLE32,
110 SYMBOL_TLSLE48,
111 SYMBOL_FORCE_TO_MEM
114 /* A set of tuning parameters contains references to size and time
115 cost models and vectors for address cost calculations, register
116 move costs and memory move costs. */
118 /* Scaled addressing modes can vary cost depending on the mode of the
119 value to be loaded/stored. QImode values cannot use scaled
120 addressing modes. */
122 struct scale_addr_mode_cost
124 const int hi;
125 const int si;
126 const int di;
127 const int ti;
130 /* Additional cost for addresses. */
131 struct cpu_addrcost_table
133 const struct scale_addr_mode_cost addr_scale_costs;
134 const int pre_modify;
135 const int post_modify;
136 const int register_offset;
137 const int register_sextend;
138 const int register_zextend;
139 const int imm_offset;
142 /* Additional costs for register copies. Cost is for one register. */
143 struct cpu_regmove_cost
145 const int GP2GP;
146 const int GP2FP;
147 const int FP2GP;
148 const int FP2FP;
151 /* Cost for vector insn classes. */
152 struct cpu_vector_cost
154 const int scalar_stmt_cost; /* Cost of any scalar operation,
155 excluding load and store. */
156 const int scalar_load_cost; /* Cost of scalar load. */
157 const int scalar_store_cost; /* Cost of scalar store. */
158 const int vec_stmt_cost; /* Cost of any vector operation,
159 excluding load, store,
160 vector-to-scalar and
161 scalar-to-vector operation. */
162 const int vec_to_scalar_cost; /* Cost of vec-to-scalar operation. */
163 const int scalar_to_vec_cost; /* Cost of scalar-to-vector
164 operation. */
165 const int vec_align_load_cost; /* Cost of aligned vector load. */
166 const int vec_unalign_load_cost; /* Cost of unaligned vector load. */
167 const int vec_unalign_store_cost; /* Cost of unaligned vector store. */
168 const int vec_store_cost; /* Cost of vector store. */
169 const int cond_taken_branch_cost; /* Cost of taken branch. */
170 const int cond_not_taken_branch_cost; /* Cost of not taken branch. */
173 /* Branch costs. */
174 struct cpu_branch_cost
176 const int predictable; /* Predictable branch or optimizing for size. */
177 const int unpredictable; /* Unpredictable branch or optimizing for speed. */
180 struct tune_params
182 const struct cpu_cost_table *insn_extra_cost;
183 const struct cpu_addrcost_table *addr_cost;
184 const struct cpu_regmove_cost *regmove_cost;
185 const struct cpu_vector_cost *vec_costs;
186 const struct cpu_branch_cost *branch_costs;
187 int memmov_cost;
188 int issue_rate;
189 unsigned int fusible_ops;
190 int function_align;
191 int jump_align;
192 int loop_align;
193 int int_reassoc_width;
194 int fp_reassoc_width;
195 int vec_reassoc_width;
196 int min_div_recip_mul_sf;
197 int min_div_recip_mul_df;
198 /* Value for aarch64_case_values_threshold; or 0 for the default. */
199 unsigned int max_case_values;
200 /* Value for PARAM_L1_CACHE_LINE_SIZE; or 0 to use the default. */
201 unsigned int cache_line_size;
203 /* An enum specifying how to take into account CPU autoprefetch capabilities
204 during instruction scheduling:
205 - AUTOPREFETCHER_OFF: Do not take autoprefetch capabilities into account.
206 - AUTOPREFETCHER_WEAK: Attempt to sort sequences of loads/store in order of
207 offsets but allow the pipeline hazard recognizer to alter that order to
208 maximize multi-issue opportunities.
209 - AUTOPREFETCHER_STRONG: Attempt to sort sequences of loads/store in order of
210 offsets and prefer this even if it restricts multi-issue opportunities. */
212 enum aarch64_autoprefetch_model
214 AUTOPREFETCHER_OFF,
215 AUTOPREFETCHER_WEAK,
216 AUTOPREFETCHER_STRONG
217 } autoprefetcher_model;
219 unsigned int extra_tuning_flags;
222 #define AARCH64_FUSION_PAIR(x, name) \
223 AARCH64_FUSE_##name##_index,
224 /* Supported fusion operations. */
225 enum aarch64_fusion_pairs_index
227 #include "aarch64-fusion-pairs.def"
228 AARCH64_FUSE_index_END
230 #undef AARCH64_FUSION_PAIR
232 #define AARCH64_FUSION_PAIR(x, name) \
233 AARCH64_FUSE_##name = (1u << AARCH64_FUSE_##name##_index),
234 /* Supported fusion operations. */
235 enum aarch64_fusion_pairs
237 AARCH64_FUSE_NOTHING = 0,
238 #include "aarch64-fusion-pairs.def"
239 AARCH64_FUSE_ALL = (1u << AARCH64_FUSE_index_END) - 1
241 #undef AARCH64_FUSION_PAIR
243 #define AARCH64_EXTRA_TUNING_OPTION(x, name) \
244 AARCH64_EXTRA_TUNE_##name##_index,
245 /* Supported tuning flags indexes. */
246 enum aarch64_extra_tuning_flags_index
248 #include "aarch64-tuning-flags.def"
249 AARCH64_EXTRA_TUNE_index_END
251 #undef AARCH64_EXTRA_TUNING_OPTION
254 #define AARCH64_EXTRA_TUNING_OPTION(x, name) \
255 AARCH64_EXTRA_TUNE_##name = (1u << AARCH64_EXTRA_TUNE_##name##_index),
256 /* Supported tuning flags. */
257 enum aarch64_extra_tuning_flags
259 AARCH64_EXTRA_TUNE_NONE = 0,
260 #include "aarch64-tuning-flags.def"
261 AARCH64_EXTRA_TUNE_ALL = (1u << AARCH64_EXTRA_TUNE_index_END) - 1
263 #undef AARCH64_EXTRA_TUNING_OPTION
265 extern struct tune_params aarch64_tune_params;
267 HOST_WIDE_INT aarch64_initial_elimination_offset (unsigned, unsigned);
268 int aarch64_get_condition_code (rtx);
269 bool aarch64_bitmask_imm (HOST_WIDE_INT val, machine_mode);
270 int aarch64_branch_cost (bool, bool);
271 enum aarch64_symbol_type aarch64_classify_symbolic_expression (rtx);
272 bool aarch64_const_vec_all_same_int_p (rtx, HOST_WIDE_INT);
273 bool aarch64_constant_address_p (rtx);
274 bool aarch64_expand_movmem (rtx *);
275 bool aarch64_float_const_zero_rtx_p (rtx);
276 bool aarch64_function_arg_regno_p (unsigned);
277 bool aarch64_gen_movmemqi (rtx *);
278 bool aarch64_gimple_fold_builtin (gimple_stmt_iterator *);
279 bool aarch64_handle_option (struct gcc_options *, struct gcc_options *,
280 const struct cl_decoded_option *, location_t);
281 bool aarch64_is_extend_from_extract (machine_mode, rtx, rtx);
282 bool aarch64_is_long_call_p (rtx);
283 bool aarch64_is_noplt_call_p (rtx);
284 bool aarch64_label_mentioned_p (rtx);
285 void aarch64_declare_function_name (FILE *, const char*, tree);
286 bool aarch64_legitimate_pic_operand_p (rtx);
287 bool aarch64_modes_tieable_p (machine_mode mode1,
288 machine_mode mode2);
289 bool aarch64_move_imm (HOST_WIDE_INT, machine_mode);
290 bool aarch64_mov_operand_p (rtx, machine_mode);
291 int aarch64_simd_attr_length_rglist (enum machine_mode);
292 rtx aarch64_reverse_mask (enum machine_mode);
293 bool aarch64_offset_7bit_signed_scaled_p (machine_mode, HOST_WIDE_INT);
294 char *aarch64_output_scalar_simd_mov_immediate (rtx, machine_mode);
295 char *aarch64_output_simd_mov_immediate (rtx, machine_mode, unsigned);
296 bool aarch64_pad_arg_upward (machine_mode, const_tree);
297 bool aarch64_pad_reg_upward (machine_mode, const_tree, bool);
298 bool aarch64_regno_ok_for_base_p (int, bool);
299 bool aarch64_regno_ok_for_index_p (int, bool);
300 bool aarch64_simd_check_vect_par_cnst_half (rtx op, machine_mode mode,
301 bool high);
302 bool aarch64_simd_imm_scalar_p (rtx x, machine_mode mode);
303 bool aarch64_simd_imm_zero_p (rtx, machine_mode);
304 bool aarch64_simd_scalar_immediate_valid_for_move (rtx, machine_mode);
305 bool aarch64_simd_shift_imm_p (rtx, machine_mode, bool);
306 bool aarch64_simd_valid_immediate (rtx, machine_mode, bool,
307 struct simd_immediate_info *);
308 bool aarch64_symbolic_address_p (rtx);
309 bool aarch64_uimm12_shift (HOST_WIDE_INT);
310 bool aarch64_use_return_insn_p (void);
311 const char *aarch64_mangle_builtin_type (const_tree);
312 const char *aarch64_output_casesi (rtx *);
313 const char *aarch64_rewrite_selected_cpu (const char *name);
315 enum aarch64_symbol_type aarch64_classify_symbol (rtx, rtx);
316 enum aarch64_symbol_type aarch64_classify_tls_symbol (rtx);
317 enum reg_class aarch64_regno_regclass (unsigned);
318 int aarch64_asm_preferred_eh_data_format (int, int);
319 int aarch64_fpconst_pow_of_2 (rtx);
320 machine_mode aarch64_hard_regno_caller_save_mode (unsigned, unsigned,
321 machine_mode);
322 int aarch64_hard_regno_mode_ok (unsigned, machine_mode);
323 int aarch64_hard_regno_nregs (unsigned, machine_mode);
324 int aarch64_simd_attr_length_move (rtx_insn *);
325 int aarch64_uxt_size (int, HOST_WIDE_INT);
326 int aarch64_vec_fpconst_pow_of_2 (rtx);
327 rtx aarch64_final_eh_return_addr (void);
328 rtx aarch64_legitimize_reload_address (rtx *, machine_mode, int, int, int);
329 const char *aarch64_output_move_struct (rtx *operands);
330 rtx aarch64_return_addr (int, rtx);
331 rtx aarch64_simd_gen_const_vector_dup (machine_mode, int);
332 bool aarch64_simd_mem_operand_p (rtx);
333 rtx aarch64_simd_vect_par_cnst_half (machine_mode, bool);
334 rtx aarch64_tls_get_addr (void);
335 std::string aarch64_get_extension_string_for_isa_flags (unsigned long);
336 tree aarch64_fold_builtin (tree, int, tree *, bool);
337 unsigned aarch64_dbx_register_number (unsigned);
338 unsigned aarch64_trampoline_size (void);
339 void aarch64_asm_output_labelref (FILE *, const char *);
340 void aarch64_cpu_cpp_builtins (cpp_reader *);
341 void aarch64_elf_asm_named_section (const char *, unsigned, tree);
342 const char * aarch64_gen_far_branch (rtx *, int, const char *, const char *);
343 void aarch64_err_no_fpadvsimd (machine_mode, const char *);
344 void aarch64_expand_epilogue (bool);
345 void aarch64_expand_mov_immediate (rtx, rtx);
346 void aarch64_expand_prologue (void);
347 void aarch64_expand_vector_init (rtx, rtx);
348 void aarch64_init_cumulative_args (CUMULATIVE_ARGS *, const_tree, rtx,
349 const_tree, unsigned);
350 void aarch64_init_expanders (void);
351 void aarch64_init_simd_builtins (void);
352 void aarch64_emit_call_insn (rtx);
353 void aarch64_register_pragmas (void);
354 void aarch64_relayout_simd_types (void);
355 void aarch64_reset_previous_fndecl (void);
357 void aarch64_emit_swrsqrt (rtx, rtx);
359 /* Initialize builtins for SIMD intrinsics. */
360 void init_aarch64_simd_builtins (void);
362 void aarch64_simd_emit_reg_reg_move (rtx *, enum machine_mode, unsigned int);
364 /* Expand builtins for SIMD intrinsics. */
365 rtx aarch64_simd_expand_builtin (int, tree, rtx);
367 void aarch64_simd_lane_bounds (rtx, HOST_WIDE_INT, HOST_WIDE_INT, const_tree);
369 void aarch64_split_128bit_move (rtx, rtx);
371 bool aarch64_split_128bit_move_p (rtx, rtx);
373 void aarch64_split_simd_combine (rtx, rtx, rtx);
375 void aarch64_split_simd_move (rtx, rtx);
377 /* Check for a legitimate floating point constant for FMOV. */
378 bool aarch64_float_const_representable_p (rtx);
380 #if defined (RTX_CODE)
382 bool aarch64_legitimate_address_p (machine_mode, rtx, RTX_CODE, bool);
383 machine_mode aarch64_select_cc_mode (RTX_CODE, rtx, rtx);
384 rtx aarch64_gen_compare_reg (RTX_CODE, rtx, rtx);
385 rtx aarch64_load_tp (rtx);
387 void aarch64_expand_compare_and_swap (rtx op[]);
388 void aarch64_split_compare_and_swap (rtx op[]);
389 void aarch64_gen_atomic_cas (rtx, rtx, rtx, rtx, rtx);
391 bool aarch64_atomic_ldop_supported_p (enum rtx_code);
392 void aarch64_gen_atomic_ldop (enum rtx_code, rtx, rtx, rtx, rtx, rtx);
393 void aarch64_split_atomic_op (enum rtx_code, rtx, rtx, rtx, rtx, rtx, rtx);
395 bool aarch64_gen_adjusted_ldpstp (rtx *, bool, enum machine_mode, RTX_CODE);
396 #endif /* RTX_CODE */
398 void aarch64_init_builtins (void);
400 bool aarch64_process_target_attr (tree, const char*);
401 void aarch64_override_options_internal (struct gcc_options *);
403 rtx aarch64_expand_builtin (tree exp,
404 rtx target,
405 rtx subtarget ATTRIBUTE_UNUSED,
406 machine_mode mode ATTRIBUTE_UNUSED,
407 int ignore ATTRIBUTE_UNUSED);
408 tree aarch64_builtin_decl (unsigned, bool ATTRIBUTE_UNUSED);
410 tree aarch64_builtin_rsqrt (unsigned int);
412 tree aarch64_builtin_vectorized_function (unsigned int, tree, tree);
414 extern void aarch64_split_combinev16qi (rtx operands[3]);
415 extern void aarch64_expand_vec_perm (rtx target, rtx op0, rtx op1, rtx sel);
416 extern bool aarch64_madd_needs_nop (rtx_insn *);
417 extern void aarch64_final_prescan_insn (rtx_insn *);
418 extern bool
419 aarch64_expand_vec_perm_const (rtx target, rtx op0, rtx op1, rtx sel);
420 void aarch64_atomic_assign_expand_fenv (tree *, tree *, tree *);
421 int aarch64_ccmp_mode_to_code (enum machine_mode mode);
423 bool extract_base_offset_in_addr (rtx mem, rtx *base, rtx *offset);
424 bool aarch64_operands_ok_for_ldpstp (rtx *, bool, enum machine_mode);
425 bool aarch64_operands_adjust_ok_for_ldpstp (rtx *, bool, enum machine_mode);
426 extern bool aarch64_nopcrelative_literal_loads;
427 #endif /* GCC_AARCH64_PROTOS_H */