[AArch64] Relax modes_tieable_p and cannot_change_mode_class
[official-gcc.git] / gcc / config / aarch64 / aarch64-protos.h
blob04cbc780da2c34acbf4763193923ddaf1dda6ac9
1 /* Machine description for AArch64 architecture.
2 Copyright (C) 2009-2014 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
22 #ifndef GCC_AARCH64_PROTOS_H
23 #define GCC_AARCH64_PROTOS_H
26 SYMBOL_CONTEXT_ADR
27 The symbol is used in a load-address operation.
28 SYMBOL_CONTEXT_MEM
29 The symbol is used as the address in a MEM.
31 enum aarch64_symbol_context
33 SYMBOL_CONTEXT_MEM,
34 SYMBOL_CONTEXT_ADR
37 /* SYMBOL_SMALL_ABSOLUTE: Generate symbol accesses through
38 high and lo relocs that calculate the base address using a PC
39 relative reloc.
40 So to get the address of foo, we generate
41 adrp x0, foo
42 add x0, x0, :lo12:foo
44 To load or store something to foo, we could use the corresponding
45 load store variants that generate an
46 ldr x0, [x0,:lo12:foo]
48 str x1, [x0, :lo12:foo]
50 This corresponds to the small code model of the compiler.
52 SYMBOL_SMALL_GOT: Similar to the one above but this
53 gives us the GOT entry of the symbol being referred to :
54 Thus calculating the GOT entry for foo is done using the
55 following sequence of instructions. The ADRP instruction
56 gets us to the page containing the GOT entry of the symbol
57 and the got_lo12 gets us the actual offset in it.
59 adrp x0, :got:foo
60 ldr x0, [x0, :gotoff_lo12:foo]
62 This corresponds to the small PIC model of the compiler.
64 SYMBOL_SMALL_TLSGD
65 SYMBOL_SMALL_TLSDESC
66 SYMBOL_SMALL_GOTTPREL
67 SYMBOL_SMALL_TPREL
68 Each of of these represents a thread-local symbol, and corresponds to the
69 thread local storage relocation operator for the symbol being referred to.
71 SYMBOL_TINY_ABSOLUTE
73 Generate symbol accesses as a PC relative address using a single
74 instruction. To compute the address of symbol foo, we generate:
76 ADR x0, foo
78 SYMBOL_TINY_GOT
80 Generate symbol accesses via the GOT using a single PC relative
81 instruction. To compute the address of symbol foo, we generate:
83 ldr t0, :got:foo
85 The value of foo can subsequently read using:
87 ldrb t0, [t0]
89 SYMBOL_FORCE_TO_MEM : Global variables are addressed using
90 constant pool. All variable addresses are spilled into constant
91 pools. The constant pools themselves are addressed using PC
92 relative accesses. This only works for the large code model.
94 enum aarch64_symbol_type
96 SYMBOL_SMALL_ABSOLUTE,
97 SYMBOL_SMALL_GOT,
98 SYMBOL_SMALL_TLSGD,
99 SYMBOL_SMALL_TLSDESC,
100 SYMBOL_SMALL_GOTTPREL,
101 SYMBOL_SMALL_TPREL,
102 SYMBOL_TINY_ABSOLUTE,
103 SYMBOL_TINY_GOT,
104 SYMBOL_FORCE_TO_MEM
107 /* A set of tuning parameters contains references to size and time
108 cost models and vectors for address cost calculations, register
109 move costs and memory move costs. */
111 /* Additional cost for addresses. */
112 struct cpu_addrcost_table
114 const int pre_modify;
115 const int post_modify;
116 const int register_offset;
117 const int register_extend;
118 const int imm_offset;
121 /* Additional costs for register copies. Cost is for one register. */
122 struct cpu_regmove_cost
124 const int GP2GP;
125 const int GP2FP;
126 const int FP2GP;
127 const int FP2FP;
130 /* Cost for vector insn classes. */
131 struct cpu_vector_cost
133 const int scalar_stmt_cost; /* Cost of any scalar operation,
134 excluding load and store. */
135 const int scalar_load_cost; /* Cost of scalar load. */
136 const int scalar_store_cost; /* Cost of scalar store. */
137 const int vec_stmt_cost; /* Cost of any vector operation,
138 excluding load, store,
139 vector-to-scalar and
140 scalar-to-vector operation. */
141 const int vec_to_scalar_cost; /* Cost of vec-to-scalar operation. */
142 const int scalar_to_vec_cost; /* Cost of scalar-to-vector
143 operation. */
144 const int vec_align_load_cost; /* Cost of aligned vector load. */
145 const int vec_unalign_load_cost; /* Cost of unaligned vector load. */
146 const int vec_unalign_store_cost; /* Cost of unaligned vector store. */
147 const int vec_store_cost; /* Cost of vector store. */
148 const int cond_taken_branch_cost; /* Cost of taken branch. */
149 const int cond_not_taken_branch_cost; /* Cost of not taken branch. */
152 struct tune_params
154 const struct cpu_cost_table *const insn_extra_cost;
155 const struct cpu_addrcost_table *const addr_cost;
156 const struct cpu_regmove_cost *const regmove_cost;
157 const struct cpu_vector_cost *const vec_costs;
158 const int memmov_cost;
159 const int issue_rate;
162 HOST_WIDE_INT aarch64_initial_elimination_offset (unsigned, unsigned);
163 bool aarch64_bitmask_imm (HOST_WIDE_INT val, enum machine_mode);
164 bool aarch64_cannot_change_mode_class (enum machine_mode,
165 enum machine_mode,
166 enum reg_class);
167 enum aarch64_symbol_type
168 aarch64_classify_symbolic_expression (rtx, enum aarch64_symbol_context);
169 bool aarch64_constant_address_p (rtx);
170 bool aarch64_float_const_zero_rtx_p (rtx);
171 bool aarch64_function_arg_regno_p (unsigned);
172 bool aarch64_gen_movmemqi (rtx *);
173 bool aarch64_gimple_fold_builtin (gimple_stmt_iterator *);
174 bool aarch64_is_extend_from_extract (enum machine_mode, rtx, rtx);
175 bool aarch64_is_long_call_p (rtx);
176 bool aarch64_label_mentioned_p (rtx);
177 bool aarch64_legitimate_pic_operand_p (rtx);
178 bool aarch64_modes_tieable_p (enum machine_mode mode1,
179 enum machine_mode mode2);
180 bool aarch64_move_imm (HOST_WIDE_INT, enum machine_mode);
181 bool aarch64_mov_operand_p (rtx, enum aarch64_symbol_context,
182 enum machine_mode);
183 char *aarch64_output_scalar_simd_mov_immediate (rtx, enum machine_mode);
184 char *aarch64_output_simd_mov_immediate (rtx, enum machine_mode, unsigned);
185 bool aarch64_pad_arg_upward (enum machine_mode, const_tree);
186 bool aarch64_pad_reg_upward (enum machine_mode, const_tree, bool);
187 bool aarch64_regno_ok_for_base_p (int, bool);
188 bool aarch64_regno_ok_for_index_p (int, bool);
189 bool aarch64_simd_imm_scalar_p (rtx x, enum machine_mode mode);
190 bool aarch64_simd_imm_zero_p (rtx, enum machine_mode);
191 bool aarch64_simd_scalar_immediate_valid_for_move (rtx, enum machine_mode);
192 bool aarch64_simd_shift_imm_p (rtx, enum machine_mode, bool);
193 bool aarch64_simd_valid_immediate (rtx, enum machine_mode, bool,
194 struct simd_immediate_info *);
195 bool aarch64_symbolic_address_p (rtx);
196 bool aarch64_uimm12_shift (HOST_WIDE_INT);
197 const char *aarch64_output_casesi (rtx *);
198 const char *aarch64_rewrite_selected_cpu (const char *name);
200 enum aarch64_symbol_type aarch64_classify_symbol (rtx,
201 enum aarch64_symbol_context);
202 enum aarch64_symbol_type aarch64_classify_tls_symbol (rtx);
203 enum reg_class aarch64_regno_regclass (unsigned);
204 int aarch64_asm_preferred_eh_data_format (int, int);
205 int aarch64_hard_regno_mode_ok (unsigned, enum machine_mode);
206 int aarch64_hard_regno_nregs (unsigned, enum machine_mode);
207 int aarch64_simd_attr_length_move (rtx);
208 int aarch64_uxt_size (int, HOST_WIDE_INT);
209 rtx aarch64_final_eh_return_addr (void);
210 rtx aarch64_legitimize_reload_address (rtx *, enum machine_mode, int, int, int);
211 const char *aarch64_output_move_struct (rtx *operands);
212 rtx aarch64_return_addr (int, rtx);
213 rtx aarch64_simd_gen_const_vector_dup (enum machine_mode, int);
214 bool aarch64_simd_mem_operand_p (rtx);
215 rtx aarch64_simd_vect_par_cnst_half (enum machine_mode, bool);
216 rtx aarch64_tls_get_addr (void);
217 tree aarch64_fold_builtin (tree, int, tree *, bool);
218 unsigned aarch64_dbx_register_number (unsigned);
219 unsigned aarch64_trampoline_size (void);
220 void aarch64_asm_output_labelref (FILE *, const char *);
221 void aarch64_elf_asm_named_section (const char *, unsigned, tree);
222 void aarch64_expand_epilogue (bool);
223 void aarch64_expand_mov_immediate (rtx, rtx);
224 void aarch64_expand_prologue (void);
225 void aarch64_expand_vector_init (rtx, rtx);
226 void aarch64_function_profiler (FILE *, int);
227 void aarch64_init_cumulative_args (CUMULATIVE_ARGS *, const_tree, rtx,
228 const_tree, unsigned);
229 void aarch64_init_expanders (void);
230 void aarch64_print_operand (FILE *, rtx, char);
231 void aarch64_print_operand_address (FILE *, rtx);
233 /* Initialize builtins for SIMD intrinsics. */
234 void init_aarch64_simd_builtins (void);
236 void aarch64_simd_const_bounds (rtx, HOST_WIDE_INT, HOST_WIDE_INT);
237 void aarch64_simd_disambiguate_copy (rtx *, rtx *, rtx *, unsigned int);
239 /* Emit code to place a AdvSIMD pair result in memory locations (with equal
240 registers). */
241 void aarch64_simd_emit_pair_result_insn (enum machine_mode,
242 rtx (*intfn) (rtx, rtx, rtx), rtx,
243 rtx);
245 /* Expand builtins for SIMD intrinsics. */
246 rtx aarch64_simd_expand_builtin (int, tree, rtx);
248 void aarch64_simd_lane_bounds (rtx, HOST_WIDE_INT, HOST_WIDE_INT);
250 /* Emit code for reinterprets. */
251 void aarch64_simd_reinterpret (rtx, rtx);
253 void aarch64_split_128bit_move (rtx, rtx);
255 bool aarch64_split_128bit_move_p (rtx, rtx);
257 void aarch64_split_simd_combine (rtx, rtx, rtx);
259 void aarch64_split_simd_move (rtx, rtx);
261 /* Check for a legitimate floating point constant for FMOV. */
262 bool aarch64_float_const_representable_p (rtx);
264 #if defined (RTX_CODE)
266 bool aarch64_legitimate_address_p (enum machine_mode, rtx, RTX_CODE, bool);
267 enum machine_mode aarch64_select_cc_mode (RTX_CODE, rtx, rtx);
268 rtx aarch64_gen_compare_reg (RTX_CODE, rtx, rtx);
269 rtx aarch64_load_tp (rtx);
271 void aarch64_expand_compare_and_swap (rtx op[]);
272 void aarch64_split_compare_and_swap (rtx op[]);
273 void aarch64_split_atomic_op (enum rtx_code, rtx, rtx, rtx, rtx, rtx, rtx);
275 #endif /* RTX_CODE */
277 void aarch64_init_builtins (void);
278 rtx aarch64_expand_builtin (tree exp,
279 rtx target,
280 rtx subtarget ATTRIBUTE_UNUSED,
281 enum machine_mode mode ATTRIBUTE_UNUSED,
282 int ignore ATTRIBUTE_UNUSED);
283 tree aarch64_builtin_decl (unsigned, bool ATTRIBUTE_UNUSED);
285 tree
286 aarch64_builtin_vectorized_function (tree fndecl,
287 tree type_out,
288 tree type_in);
290 extern void aarch64_split_combinev16qi (rtx operands[3]);
291 extern void aarch64_expand_vec_perm (rtx target, rtx op0, rtx op1, rtx sel);
292 extern bool
293 aarch64_expand_vec_perm_const (rtx target, rtx op0, rtx op1, rtx sel);
294 #endif /* GCC_AARCH64_PROTOS_H */