This PR shows that we get the load/store_lanes logic wrong for arm big-endian.
[official-gcc.git] / gcc / config / arm / arm.c
blobcb6ab8191b3d7ce429629b80439e8020436045e4
1 /* Output routines for GCC for ARM.
2 Copyright (C) 1991-2018 Free Software Foundation, Inc.
3 Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
4 and Martin Simmons (@harleqn.co.uk).
5 More major hacks by Richard Earnshaw (rearnsha@arm.com).
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it
10 under the terms of the GNU General Public License as published
11 by the Free Software Foundation; either version 3, or (at your
12 option) any later version.
14 GCC is distributed in the hope that it will be useful, but WITHOUT
15 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
16 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
17 License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
23 #define IN_TARGET_CODE 1
25 #include "config.h"
26 #define INCLUDE_STRING
27 #include "system.h"
28 #include "coretypes.h"
29 #include "backend.h"
30 #include "target.h"
31 #include "rtl.h"
32 #include "tree.h"
33 #include "memmodel.h"
34 #include "cfghooks.h"
35 #include "df.h"
36 #include "tm_p.h"
37 #include "stringpool.h"
38 #include "attribs.h"
39 #include "optabs.h"
40 #include "regs.h"
41 #include "emit-rtl.h"
42 #include "recog.h"
43 #include "cgraph.h"
44 #include "diagnostic-core.h"
45 #include "alias.h"
46 #include "fold-const.h"
47 #include "stor-layout.h"
48 #include "calls.h"
49 #include "varasm.h"
50 #include "output.h"
51 #include "insn-attr.h"
52 #include "flags.h"
53 #include "reload.h"
54 #include "explow.h"
55 #include "expr.h"
56 #include "cfgrtl.h"
57 #include "sched-int.h"
58 #include "common/common-target.h"
59 #include "langhooks.h"
60 #include "intl.h"
61 #include "libfuncs.h"
62 #include "params.h"
63 #include "opts.h"
64 #include "dumpfile.h"
65 #include "target-globals.h"
66 #include "builtins.h"
67 #include "tm-constrs.h"
68 #include "rtl-iter.h"
69 #include "optabs-libfuncs.h"
70 #include "gimplify.h"
71 #include "gimple.h"
72 #include "selftest.h"
74 /* This file should be included last. */
75 #include "target-def.h"
77 /* Forward definitions of types. */
78 typedef struct minipool_node Mnode;
79 typedef struct minipool_fixup Mfix;
81 /* The last .arch and .fpu assembly strings that we printed. */
82 static std::string arm_last_printed_arch_string;
83 static std::string arm_last_printed_fpu_string;
85 void (*arm_lang_output_object_attributes_hook)(void);
87 struct four_ints
89 int i[4];
92 /* Forward function declarations. */
93 static bool arm_const_not_ok_for_debug_p (rtx);
94 static int arm_needs_doubleword_align (machine_mode, const_tree);
95 static int arm_compute_static_chain_stack_bytes (void);
96 static arm_stack_offsets *arm_get_frame_offsets (void);
97 static void arm_compute_frame_layout (void);
98 static void arm_add_gc_roots (void);
99 static int arm_gen_constant (enum rtx_code, machine_mode, rtx,
100 unsigned HOST_WIDE_INT, rtx, rtx, int, int);
101 static unsigned bit_count (unsigned long);
102 static unsigned bitmap_popcount (const sbitmap);
103 static int arm_address_register_rtx_p (rtx, int);
104 static int arm_legitimate_index_p (machine_mode, rtx, RTX_CODE, int);
105 static bool is_called_in_ARM_mode (tree);
106 static int thumb2_legitimate_index_p (machine_mode, rtx, int);
107 static int thumb1_base_register_rtx_p (rtx, machine_mode, int);
108 static rtx arm_legitimize_address (rtx, rtx, machine_mode);
109 static reg_class_t arm_preferred_reload_class (rtx, reg_class_t);
110 static rtx thumb_legitimize_address (rtx, rtx, machine_mode);
111 inline static int thumb1_index_register_rtx_p (rtx, int);
112 static int thumb_far_jump_used_p (void);
113 static bool thumb_force_lr_save (void);
114 static unsigned arm_size_return_regs (void);
115 static bool arm_assemble_integer (rtx, unsigned int, int);
116 static void arm_print_operand (FILE *, rtx, int);
117 static void arm_print_operand_address (FILE *, machine_mode, rtx);
118 static bool arm_print_operand_punct_valid_p (unsigned char code);
119 static const char *fp_const_from_val (REAL_VALUE_TYPE *);
120 static arm_cc get_arm_condition_code (rtx);
121 static bool arm_fixed_condition_code_regs (unsigned int *, unsigned int *);
122 static const char *output_multi_immediate (rtx *, const char *, const char *,
123 int, HOST_WIDE_INT);
124 static const char *shift_op (rtx, HOST_WIDE_INT *);
125 static struct machine_function *arm_init_machine_status (void);
126 static void thumb_exit (FILE *, int);
127 static HOST_WIDE_INT get_jump_table_size (rtx_jump_table_data *);
128 static Mnode *move_minipool_fix_forward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
129 static Mnode *add_minipool_forward_ref (Mfix *);
130 static Mnode *move_minipool_fix_backward_ref (Mnode *, Mnode *, HOST_WIDE_INT);
131 static Mnode *add_minipool_backward_ref (Mfix *);
132 static void assign_minipool_offsets (Mfix *);
133 static void arm_print_value (FILE *, rtx);
134 static void dump_minipool (rtx_insn *);
135 static int arm_barrier_cost (rtx_insn *);
136 static Mfix *create_fix_barrier (Mfix *, HOST_WIDE_INT);
137 static void push_minipool_barrier (rtx_insn *, HOST_WIDE_INT);
138 static void push_minipool_fix (rtx_insn *, HOST_WIDE_INT, rtx *,
139 machine_mode, rtx);
140 static void arm_reorg (void);
141 static void note_invalid_constants (rtx_insn *, HOST_WIDE_INT, int);
142 static unsigned long arm_compute_save_reg0_reg12_mask (void);
143 static unsigned long arm_compute_save_core_reg_mask (void);
144 static unsigned long arm_isr_value (tree);
145 static unsigned long arm_compute_func_type (void);
146 static tree arm_handle_fndecl_attribute (tree *, tree, tree, int, bool *);
147 static tree arm_handle_pcs_attribute (tree *, tree, tree, int, bool *);
148 static tree arm_handle_isr_attribute (tree *, tree, tree, int, bool *);
149 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
150 static tree arm_handle_notshared_attribute (tree *, tree, tree, int, bool *);
151 #endif
152 static tree arm_handle_cmse_nonsecure_entry (tree *, tree, tree, int, bool *);
153 static tree arm_handle_cmse_nonsecure_call (tree *, tree, tree, int, bool *);
154 static void arm_output_function_epilogue (FILE *);
155 static void arm_output_function_prologue (FILE *);
156 static int arm_comp_type_attributes (const_tree, const_tree);
157 static void arm_set_default_type_attributes (tree);
158 static int arm_adjust_cost (rtx_insn *, int, rtx_insn *, int, unsigned int);
159 static int arm_sched_reorder (FILE *, int, rtx_insn **, int *, int);
160 static int optimal_immediate_sequence (enum rtx_code code,
161 unsigned HOST_WIDE_INT val,
162 struct four_ints *return_sequence);
163 static int optimal_immediate_sequence_1 (enum rtx_code code,
164 unsigned HOST_WIDE_INT val,
165 struct four_ints *return_sequence,
166 int i);
167 static int arm_get_strip_length (int);
168 static bool arm_function_ok_for_sibcall (tree, tree);
169 static machine_mode arm_promote_function_mode (const_tree,
170 machine_mode, int *,
171 const_tree, int);
172 static bool arm_return_in_memory (const_tree, const_tree);
173 static rtx arm_function_value (const_tree, const_tree, bool);
174 static rtx arm_libcall_value_1 (machine_mode);
175 static rtx arm_libcall_value (machine_mode, const_rtx);
176 static bool arm_function_value_regno_p (const unsigned int);
177 static void arm_internal_label (FILE *, const char *, unsigned long);
178 static void arm_output_mi_thunk (FILE *, tree, HOST_WIDE_INT, HOST_WIDE_INT,
179 tree);
180 static bool arm_have_conditional_execution (void);
181 static bool arm_cannot_force_const_mem (machine_mode, rtx);
182 static bool arm_legitimate_constant_p (machine_mode, rtx);
183 static bool arm_rtx_costs (rtx, machine_mode, int, int, int *, bool);
184 static int arm_address_cost (rtx, machine_mode, addr_space_t, bool);
185 static int arm_register_move_cost (machine_mode, reg_class_t, reg_class_t);
186 static int arm_memory_move_cost (machine_mode, reg_class_t, bool);
187 static void emit_constant_insn (rtx cond, rtx pattern);
188 static rtx_insn *emit_set_insn (rtx, rtx);
189 static rtx emit_multi_reg_push (unsigned long, unsigned long);
190 static int arm_arg_partial_bytes (cumulative_args_t, machine_mode,
191 tree, bool);
192 static rtx arm_function_arg (cumulative_args_t, machine_mode,
193 const_tree, bool);
194 static void arm_function_arg_advance (cumulative_args_t, machine_mode,
195 const_tree, bool);
196 static pad_direction arm_function_arg_padding (machine_mode, const_tree);
197 static unsigned int arm_function_arg_boundary (machine_mode, const_tree);
198 static rtx aapcs_allocate_return_reg (machine_mode, const_tree,
199 const_tree);
200 static rtx aapcs_libcall_value (machine_mode);
201 static int aapcs_select_return_coproc (const_tree, const_tree);
203 #ifdef OBJECT_FORMAT_ELF
204 static void arm_elf_asm_constructor (rtx, int) ATTRIBUTE_UNUSED;
205 static void arm_elf_asm_destructor (rtx, int) ATTRIBUTE_UNUSED;
206 #endif
207 #ifndef ARM_PE
208 static void arm_encode_section_info (tree, rtx, int);
209 #endif
211 static void arm_file_end (void);
212 static void arm_file_start (void);
213 static void arm_insert_attributes (tree, tree *);
215 static void arm_setup_incoming_varargs (cumulative_args_t, machine_mode,
216 tree, int *, int);
217 static bool arm_pass_by_reference (cumulative_args_t,
218 machine_mode, const_tree, bool);
219 static bool arm_promote_prototypes (const_tree);
220 static bool arm_default_short_enums (void);
221 static bool arm_align_anon_bitfield (void);
222 static bool arm_return_in_msb (const_tree);
223 static bool arm_must_pass_in_stack (machine_mode, const_tree);
224 static bool arm_return_in_memory (const_tree, const_tree);
225 #if ARM_UNWIND_INFO
226 static void arm_unwind_emit (FILE *, rtx_insn *);
227 static bool arm_output_ttype (rtx);
228 static void arm_asm_emit_except_personality (rtx);
229 #endif
230 static void arm_asm_init_sections (void);
231 static rtx arm_dwarf_register_span (rtx);
233 static tree arm_cxx_guard_type (void);
234 static bool arm_cxx_guard_mask_bit (void);
235 static tree arm_get_cookie_size (tree);
236 static bool arm_cookie_has_size (void);
237 static bool arm_cxx_cdtor_returns_this (void);
238 static bool arm_cxx_key_method_may_be_inline (void);
239 static void arm_cxx_determine_class_data_visibility (tree);
240 static bool arm_cxx_class_data_always_comdat (void);
241 static bool arm_cxx_use_aeabi_atexit (void);
242 static void arm_init_libfuncs (void);
243 static tree arm_build_builtin_va_list (void);
244 static void arm_expand_builtin_va_start (tree, rtx);
245 static tree arm_gimplify_va_arg_expr (tree, tree, gimple_seq *, gimple_seq *);
246 static void arm_option_override (void);
247 static void arm_option_save (struct cl_target_option *, struct gcc_options *);
248 static void arm_option_restore (struct gcc_options *,
249 struct cl_target_option *);
250 static void arm_override_options_after_change (void);
251 static void arm_option_print (FILE *, int, struct cl_target_option *);
252 static void arm_set_current_function (tree);
253 static bool arm_can_inline_p (tree, tree);
254 static void arm_relayout_function (tree);
255 static bool arm_valid_target_attribute_p (tree, tree, tree, int);
256 static unsigned HOST_WIDE_INT arm_shift_truncation_mask (machine_mode);
257 static bool arm_sched_can_speculate_insn (rtx_insn *);
258 static bool arm_macro_fusion_p (void);
259 static bool arm_cannot_copy_insn_p (rtx_insn *);
260 static int arm_issue_rate (void);
261 static int arm_first_cycle_multipass_dfa_lookahead (void);
262 static int arm_first_cycle_multipass_dfa_lookahead_guard (rtx_insn *, int);
263 static void arm_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
264 static bool arm_output_addr_const_extra (FILE *, rtx);
265 static bool arm_allocate_stack_slots_for_args (void);
266 static bool arm_warn_func_return (tree);
267 static tree arm_promoted_type (const_tree t);
268 static bool arm_scalar_mode_supported_p (scalar_mode);
269 static bool arm_frame_pointer_required (void);
270 static bool arm_can_eliminate (const int, const int);
271 static void arm_asm_trampoline_template (FILE *);
272 static void arm_trampoline_init (rtx, tree, rtx);
273 static rtx arm_trampoline_adjust_address (rtx);
274 static rtx_insn *arm_pic_static_addr (rtx orig, rtx reg);
275 static bool cortex_a9_sched_adjust_cost (rtx_insn *, int, rtx_insn *, int *);
276 static bool xscale_sched_adjust_cost (rtx_insn *, int, rtx_insn *, int *);
277 static bool fa726te_sched_adjust_cost (rtx_insn *, int, rtx_insn *, int *);
278 static bool arm_array_mode_supported_p (machine_mode,
279 unsigned HOST_WIDE_INT);
280 static machine_mode arm_preferred_simd_mode (scalar_mode);
281 static bool arm_class_likely_spilled_p (reg_class_t);
282 static HOST_WIDE_INT arm_vector_alignment (const_tree type);
283 static bool arm_vector_alignment_reachable (const_tree type, bool is_packed);
284 static bool arm_builtin_support_vector_misalignment (machine_mode mode,
285 const_tree type,
286 int misalignment,
287 bool is_packed);
288 static void arm_conditional_register_usage (void);
289 static enum flt_eval_method arm_excess_precision (enum excess_precision_type);
290 static reg_class_t arm_preferred_rename_class (reg_class_t rclass);
291 static void arm_autovectorize_vector_sizes (vector_sizes *);
292 static int arm_default_branch_cost (bool, bool);
293 static int arm_cortex_a5_branch_cost (bool, bool);
294 static int arm_cortex_m_branch_cost (bool, bool);
295 static int arm_cortex_m7_branch_cost (bool, bool);
297 static bool arm_vectorize_vec_perm_const (machine_mode, rtx, rtx, rtx,
298 const vec_perm_indices &);
300 static bool aarch_macro_fusion_pair_p (rtx_insn*, rtx_insn*);
302 static int arm_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
303 tree vectype,
304 int misalign ATTRIBUTE_UNUSED);
305 static unsigned arm_add_stmt_cost (void *data, int count,
306 enum vect_cost_for_stmt kind,
307 struct _stmt_vec_info *stmt_info,
308 int misalign,
309 enum vect_cost_model_location where);
311 static void arm_canonicalize_comparison (int *code, rtx *op0, rtx *op1,
312 bool op0_preserve_value);
313 static unsigned HOST_WIDE_INT arm_asan_shadow_offset (void);
315 static void arm_sched_fusion_priority (rtx_insn *, int, int *, int*);
316 static bool arm_can_output_mi_thunk (const_tree, HOST_WIDE_INT, HOST_WIDE_INT,
317 const_tree);
318 static section *arm_function_section (tree, enum node_frequency, bool, bool);
319 static bool arm_asm_elf_flags_numeric (unsigned int flags, unsigned int *num);
320 static unsigned int arm_elf_section_type_flags (tree decl, const char *name,
321 int reloc);
322 static void arm_expand_divmod_libfunc (rtx, machine_mode, rtx, rtx, rtx *, rtx *);
323 static opt_scalar_float_mode arm_floatn_mode (int, bool);
324 static unsigned int arm_hard_regno_nregs (unsigned int, machine_mode);
325 static bool arm_hard_regno_mode_ok (unsigned int, machine_mode);
326 static bool arm_modes_tieable_p (machine_mode, machine_mode);
327 static HOST_WIDE_INT arm_constant_alignment (const_tree, HOST_WIDE_INT);
329 /* Table of machine attributes. */
330 static const struct attribute_spec arm_attribute_table[] =
332 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
333 affects_type_identity, handler, exclude } */
334 /* Function calls made to this symbol must be done indirectly, because
335 it may lie outside of the 26 bit addressing range of a normal function
336 call. */
337 { "long_call", 0, 0, false, true, true, false, NULL, NULL },
338 /* Whereas these functions are always known to reside within the 26 bit
339 addressing range. */
340 { "short_call", 0, 0, false, true, true, false, NULL, NULL },
341 /* Specify the procedure call conventions for a function. */
342 { "pcs", 1, 1, false, true, true, false, arm_handle_pcs_attribute,
343 NULL },
344 /* Interrupt Service Routines have special prologue and epilogue requirements. */
345 { "isr", 0, 1, false, false, false, false, arm_handle_isr_attribute,
346 NULL },
347 { "interrupt", 0, 1, false, false, false, false, arm_handle_isr_attribute,
348 NULL },
349 { "naked", 0, 0, true, false, false, false,
350 arm_handle_fndecl_attribute, NULL },
351 #ifdef ARM_PE
352 /* ARM/PE has three new attributes:
353 interfacearm - ?
354 dllexport - for exporting a function/variable that will live in a dll
355 dllimport - for importing a function/variable from a dll
357 Microsoft allows multiple declspecs in one __declspec, separating
358 them with spaces. We do NOT support this. Instead, use __declspec
359 multiple times.
361 { "dllimport", 0, 0, true, false, false, false, NULL, NULL },
362 { "dllexport", 0, 0, true, false, false, false, NULL, NULL },
363 { "interfacearm", 0, 0, true, false, false, false,
364 arm_handle_fndecl_attribute, NULL },
365 #elif TARGET_DLLIMPORT_DECL_ATTRIBUTES
366 { "dllimport", 0, 0, false, false, false, false, handle_dll_attribute,
367 NULL },
368 { "dllexport", 0, 0, false, false, false, false, handle_dll_attribute,
369 NULL },
370 { "notshared", 0, 0, false, true, false, false,
371 arm_handle_notshared_attribute, NULL },
372 #endif
373 /* ARMv8-M Security Extensions support. */
374 { "cmse_nonsecure_entry", 0, 0, true, false, false, false,
375 arm_handle_cmse_nonsecure_entry, NULL },
376 { "cmse_nonsecure_call", 0, 0, true, false, false, true,
377 arm_handle_cmse_nonsecure_call, NULL },
378 { NULL, 0, 0, false, false, false, false, NULL, NULL }
381 /* Initialize the GCC target structure. */
382 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
383 #undef TARGET_MERGE_DECL_ATTRIBUTES
384 #define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
385 #endif
387 #undef TARGET_LEGITIMIZE_ADDRESS
388 #define TARGET_LEGITIMIZE_ADDRESS arm_legitimize_address
390 #undef TARGET_ATTRIBUTE_TABLE
391 #define TARGET_ATTRIBUTE_TABLE arm_attribute_table
393 #undef TARGET_INSERT_ATTRIBUTES
394 #define TARGET_INSERT_ATTRIBUTES arm_insert_attributes
396 #undef TARGET_ASM_FILE_START
397 #define TARGET_ASM_FILE_START arm_file_start
398 #undef TARGET_ASM_FILE_END
399 #define TARGET_ASM_FILE_END arm_file_end
401 #undef TARGET_ASM_ALIGNED_SI_OP
402 #define TARGET_ASM_ALIGNED_SI_OP NULL
403 #undef TARGET_ASM_INTEGER
404 #define TARGET_ASM_INTEGER arm_assemble_integer
406 #undef TARGET_PRINT_OPERAND
407 #define TARGET_PRINT_OPERAND arm_print_operand
408 #undef TARGET_PRINT_OPERAND_ADDRESS
409 #define TARGET_PRINT_OPERAND_ADDRESS arm_print_operand_address
410 #undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
411 #define TARGET_PRINT_OPERAND_PUNCT_VALID_P arm_print_operand_punct_valid_p
413 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
414 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA arm_output_addr_const_extra
416 #undef TARGET_ASM_FUNCTION_PROLOGUE
417 #define TARGET_ASM_FUNCTION_PROLOGUE arm_output_function_prologue
419 #undef TARGET_ASM_FUNCTION_EPILOGUE
420 #define TARGET_ASM_FUNCTION_EPILOGUE arm_output_function_epilogue
422 #undef TARGET_CAN_INLINE_P
423 #define TARGET_CAN_INLINE_P arm_can_inline_p
425 #undef TARGET_RELAYOUT_FUNCTION
426 #define TARGET_RELAYOUT_FUNCTION arm_relayout_function
428 #undef TARGET_OPTION_OVERRIDE
429 #define TARGET_OPTION_OVERRIDE arm_option_override
431 #undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE
432 #define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE arm_override_options_after_change
434 #undef TARGET_OPTION_SAVE
435 #define TARGET_OPTION_SAVE arm_option_save
437 #undef TARGET_OPTION_RESTORE
438 #define TARGET_OPTION_RESTORE arm_option_restore
440 #undef TARGET_OPTION_PRINT
441 #define TARGET_OPTION_PRINT arm_option_print
443 #undef TARGET_COMP_TYPE_ATTRIBUTES
444 #define TARGET_COMP_TYPE_ATTRIBUTES arm_comp_type_attributes
446 #undef TARGET_SCHED_CAN_SPECULATE_INSN
447 #define TARGET_SCHED_CAN_SPECULATE_INSN arm_sched_can_speculate_insn
449 #undef TARGET_SCHED_MACRO_FUSION_P
450 #define TARGET_SCHED_MACRO_FUSION_P arm_macro_fusion_p
452 #undef TARGET_SCHED_MACRO_FUSION_PAIR_P
453 #define TARGET_SCHED_MACRO_FUSION_PAIR_P aarch_macro_fusion_pair_p
455 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
456 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES arm_set_default_type_attributes
458 #undef TARGET_SCHED_ADJUST_COST
459 #define TARGET_SCHED_ADJUST_COST arm_adjust_cost
461 #undef TARGET_SET_CURRENT_FUNCTION
462 #define TARGET_SET_CURRENT_FUNCTION arm_set_current_function
464 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
465 #define TARGET_OPTION_VALID_ATTRIBUTE_P arm_valid_target_attribute_p
467 #undef TARGET_SCHED_REORDER
468 #define TARGET_SCHED_REORDER arm_sched_reorder
470 #undef TARGET_REGISTER_MOVE_COST
471 #define TARGET_REGISTER_MOVE_COST arm_register_move_cost
473 #undef TARGET_MEMORY_MOVE_COST
474 #define TARGET_MEMORY_MOVE_COST arm_memory_move_cost
476 #undef TARGET_ENCODE_SECTION_INFO
477 #ifdef ARM_PE
478 #define TARGET_ENCODE_SECTION_INFO arm_pe_encode_section_info
479 #else
480 #define TARGET_ENCODE_SECTION_INFO arm_encode_section_info
481 #endif
483 #undef TARGET_STRIP_NAME_ENCODING
484 #define TARGET_STRIP_NAME_ENCODING arm_strip_name_encoding
486 #undef TARGET_ASM_INTERNAL_LABEL
487 #define TARGET_ASM_INTERNAL_LABEL arm_internal_label
489 #undef TARGET_FLOATN_MODE
490 #define TARGET_FLOATN_MODE arm_floatn_mode
492 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
493 #define TARGET_FUNCTION_OK_FOR_SIBCALL arm_function_ok_for_sibcall
495 #undef TARGET_FUNCTION_VALUE
496 #define TARGET_FUNCTION_VALUE arm_function_value
498 #undef TARGET_LIBCALL_VALUE
499 #define TARGET_LIBCALL_VALUE arm_libcall_value
501 #undef TARGET_FUNCTION_VALUE_REGNO_P
502 #define TARGET_FUNCTION_VALUE_REGNO_P arm_function_value_regno_p
504 #undef TARGET_ASM_OUTPUT_MI_THUNK
505 #define TARGET_ASM_OUTPUT_MI_THUNK arm_output_mi_thunk
506 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
507 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK arm_can_output_mi_thunk
509 #undef TARGET_RTX_COSTS
510 #define TARGET_RTX_COSTS arm_rtx_costs
511 #undef TARGET_ADDRESS_COST
512 #define TARGET_ADDRESS_COST arm_address_cost
514 #undef TARGET_SHIFT_TRUNCATION_MASK
515 #define TARGET_SHIFT_TRUNCATION_MASK arm_shift_truncation_mask
516 #undef TARGET_VECTOR_MODE_SUPPORTED_P
517 #define TARGET_VECTOR_MODE_SUPPORTED_P arm_vector_mode_supported_p
518 #undef TARGET_ARRAY_MODE_SUPPORTED_P
519 #define TARGET_ARRAY_MODE_SUPPORTED_P arm_array_mode_supported_p
520 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
521 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE arm_preferred_simd_mode
522 #undef TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES
523 #define TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_SIZES \
524 arm_autovectorize_vector_sizes
526 #undef TARGET_MACHINE_DEPENDENT_REORG
527 #define TARGET_MACHINE_DEPENDENT_REORG arm_reorg
529 #undef TARGET_INIT_BUILTINS
530 #define TARGET_INIT_BUILTINS arm_init_builtins
531 #undef TARGET_EXPAND_BUILTIN
532 #define TARGET_EXPAND_BUILTIN arm_expand_builtin
533 #undef TARGET_BUILTIN_DECL
534 #define TARGET_BUILTIN_DECL arm_builtin_decl
536 #undef TARGET_INIT_LIBFUNCS
537 #define TARGET_INIT_LIBFUNCS arm_init_libfuncs
539 #undef TARGET_PROMOTE_FUNCTION_MODE
540 #define TARGET_PROMOTE_FUNCTION_MODE arm_promote_function_mode
541 #undef TARGET_PROMOTE_PROTOTYPES
542 #define TARGET_PROMOTE_PROTOTYPES arm_promote_prototypes
543 #undef TARGET_PASS_BY_REFERENCE
544 #define TARGET_PASS_BY_REFERENCE arm_pass_by_reference
545 #undef TARGET_ARG_PARTIAL_BYTES
546 #define TARGET_ARG_PARTIAL_BYTES arm_arg_partial_bytes
547 #undef TARGET_FUNCTION_ARG
548 #define TARGET_FUNCTION_ARG arm_function_arg
549 #undef TARGET_FUNCTION_ARG_ADVANCE
550 #define TARGET_FUNCTION_ARG_ADVANCE arm_function_arg_advance
551 #undef TARGET_FUNCTION_ARG_PADDING
552 #define TARGET_FUNCTION_ARG_PADDING arm_function_arg_padding
553 #undef TARGET_FUNCTION_ARG_BOUNDARY
554 #define TARGET_FUNCTION_ARG_BOUNDARY arm_function_arg_boundary
556 #undef TARGET_SETUP_INCOMING_VARARGS
557 #define TARGET_SETUP_INCOMING_VARARGS arm_setup_incoming_varargs
559 #undef TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS
560 #define TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS arm_allocate_stack_slots_for_args
562 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
563 #define TARGET_ASM_TRAMPOLINE_TEMPLATE arm_asm_trampoline_template
564 #undef TARGET_TRAMPOLINE_INIT
565 #define TARGET_TRAMPOLINE_INIT arm_trampoline_init
566 #undef TARGET_TRAMPOLINE_ADJUST_ADDRESS
567 #define TARGET_TRAMPOLINE_ADJUST_ADDRESS arm_trampoline_adjust_address
569 #undef TARGET_WARN_FUNC_RETURN
570 #define TARGET_WARN_FUNC_RETURN arm_warn_func_return
572 #undef TARGET_DEFAULT_SHORT_ENUMS
573 #define TARGET_DEFAULT_SHORT_ENUMS arm_default_short_enums
575 #undef TARGET_ALIGN_ANON_BITFIELD
576 #define TARGET_ALIGN_ANON_BITFIELD arm_align_anon_bitfield
578 #undef TARGET_NARROW_VOLATILE_BITFIELD
579 #define TARGET_NARROW_VOLATILE_BITFIELD hook_bool_void_false
581 #undef TARGET_CXX_GUARD_TYPE
582 #define TARGET_CXX_GUARD_TYPE arm_cxx_guard_type
584 #undef TARGET_CXX_GUARD_MASK_BIT
585 #define TARGET_CXX_GUARD_MASK_BIT arm_cxx_guard_mask_bit
587 #undef TARGET_CXX_GET_COOKIE_SIZE
588 #define TARGET_CXX_GET_COOKIE_SIZE arm_get_cookie_size
590 #undef TARGET_CXX_COOKIE_HAS_SIZE
591 #define TARGET_CXX_COOKIE_HAS_SIZE arm_cookie_has_size
593 #undef TARGET_CXX_CDTOR_RETURNS_THIS
594 #define TARGET_CXX_CDTOR_RETURNS_THIS arm_cxx_cdtor_returns_this
596 #undef TARGET_CXX_KEY_METHOD_MAY_BE_INLINE
597 #define TARGET_CXX_KEY_METHOD_MAY_BE_INLINE arm_cxx_key_method_may_be_inline
599 #undef TARGET_CXX_USE_AEABI_ATEXIT
600 #define TARGET_CXX_USE_AEABI_ATEXIT arm_cxx_use_aeabi_atexit
602 #undef TARGET_CXX_DETERMINE_CLASS_DATA_VISIBILITY
603 #define TARGET_CXX_DETERMINE_CLASS_DATA_VISIBILITY \
604 arm_cxx_determine_class_data_visibility
606 #undef TARGET_CXX_CLASS_DATA_ALWAYS_COMDAT
607 #define TARGET_CXX_CLASS_DATA_ALWAYS_COMDAT arm_cxx_class_data_always_comdat
609 #undef TARGET_RETURN_IN_MSB
610 #define TARGET_RETURN_IN_MSB arm_return_in_msb
612 #undef TARGET_RETURN_IN_MEMORY
613 #define TARGET_RETURN_IN_MEMORY arm_return_in_memory
615 #undef TARGET_MUST_PASS_IN_STACK
616 #define TARGET_MUST_PASS_IN_STACK arm_must_pass_in_stack
618 #if ARM_UNWIND_INFO
619 #undef TARGET_ASM_UNWIND_EMIT
620 #define TARGET_ASM_UNWIND_EMIT arm_unwind_emit
622 /* EABI unwinding tables use a different format for the typeinfo tables. */
623 #undef TARGET_ASM_TTYPE
624 #define TARGET_ASM_TTYPE arm_output_ttype
626 #undef TARGET_ARM_EABI_UNWINDER
627 #define TARGET_ARM_EABI_UNWINDER true
629 #undef TARGET_ASM_EMIT_EXCEPT_PERSONALITY
630 #define TARGET_ASM_EMIT_EXCEPT_PERSONALITY arm_asm_emit_except_personality
632 #endif /* ARM_UNWIND_INFO */
634 #undef TARGET_ASM_INIT_SECTIONS
635 #define TARGET_ASM_INIT_SECTIONS arm_asm_init_sections
637 #undef TARGET_DWARF_REGISTER_SPAN
638 #define TARGET_DWARF_REGISTER_SPAN arm_dwarf_register_span
640 #undef TARGET_CANNOT_COPY_INSN_P
641 #define TARGET_CANNOT_COPY_INSN_P arm_cannot_copy_insn_p
643 #ifdef HAVE_AS_TLS
644 #undef TARGET_HAVE_TLS
645 #define TARGET_HAVE_TLS true
646 #endif
648 #undef TARGET_HAVE_CONDITIONAL_EXECUTION
649 #define TARGET_HAVE_CONDITIONAL_EXECUTION arm_have_conditional_execution
651 #undef TARGET_LEGITIMATE_CONSTANT_P
652 #define TARGET_LEGITIMATE_CONSTANT_P arm_legitimate_constant_p
654 #undef TARGET_CANNOT_FORCE_CONST_MEM
655 #define TARGET_CANNOT_FORCE_CONST_MEM arm_cannot_force_const_mem
657 #undef TARGET_MAX_ANCHOR_OFFSET
658 #define TARGET_MAX_ANCHOR_OFFSET 4095
660 /* The minimum is set such that the total size of the block
661 for a particular anchor is -4088 + 1 + 4095 bytes, which is
662 divisible by eight, ensuring natural spacing of anchors. */
663 #undef TARGET_MIN_ANCHOR_OFFSET
664 #define TARGET_MIN_ANCHOR_OFFSET -4088
666 #undef TARGET_SCHED_ISSUE_RATE
667 #define TARGET_SCHED_ISSUE_RATE arm_issue_rate
669 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
670 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
671 arm_first_cycle_multipass_dfa_lookahead
673 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
674 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD \
675 arm_first_cycle_multipass_dfa_lookahead_guard
677 #undef TARGET_MANGLE_TYPE
678 #define TARGET_MANGLE_TYPE arm_mangle_type
680 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
681 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV arm_atomic_assign_expand_fenv
683 #undef TARGET_BUILD_BUILTIN_VA_LIST
684 #define TARGET_BUILD_BUILTIN_VA_LIST arm_build_builtin_va_list
685 #undef TARGET_EXPAND_BUILTIN_VA_START
686 #define TARGET_EXPAND_BUILTIN_VA_START arm_expand_builtin_va_start
687 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
688 #define TARGET_GIMPLIFY_VA_ARG_EXPR arm_gimplify_va_arg_expr
690 #ifdef HAVE_AS_TLS
691 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
692 #define TARGET_ASM_OUTPUT_DWARF_DTPREL arm_output_dwarf_dtprel
693 #endif
695 #undef TARGET_LEGITIMATE_ADDRESS_P
696 #define TARGET_LEGITIMATE_ADDRESS_P arm_legitimate_address_p
698 #undef TARGET_PREFERRED_RELOAD_CLASS
699 #define TARGET_PREFERRED_RELOAD_CLASS arm_preferred_reload_class
701 #undef TARGET_PROMOTED_TYPE
702 #define TARGET_PROMOTED_TYPE arm_promoted_type
704 #undef TARGET_SCALAR_MODE_SUPPORTED_P
705 #define TARGET_SCALAR_MODE_SUPPORTED_P arm_scalar_mode_supported_p
707 #undef TARGET_COMPUTE_FRAME_LAYOUT
708 #define TARGET_COMPUTE_FRAME_LAYOUT arm_compute_frame_layout
710 #undef TARGET_FRAME_POINTER_REQUIRED
711 #define TARGET_FRAME_POINTER_REQUIRED arm_frame_pointer_required
713 #undef TARGET_CAN_ELIMINATE
714 #define TARGET_CAN_ELIMINATE arm_can_eliminate
716 #undef TARGET_CONDITIONAL_REGISTER_USAGE
717 #define TARGET_CONDITIONAL_REGISTER_USAGE arm_conditional_register_usage
719 #undef TARGET_CLASS_LIKELY_SPILLED_P
720 #define TARGET_CLASS_LIKELY_SPILLED_P arm_class_likely_spilled_p
722 #undef TARGET_VECTORIZE_BUILTINS
723 #define TARGET_VECTORIZE_BUILTINS
725 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
726 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
727 arm_builtin_vectorized_function
729 #undef TARGET_VECTOR_ALIGNMENT
730 #define TARGET_VECTOR_ALIGNMENT arm_vector_alignment
732 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
733 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE \
734 arm_vector_alignment_reachable
736 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
737 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
738 arm_builtin_support_vector_misalignment
740 #undef TARGET_PREFERRED_RENAME_CLASS
741 #define TARGET_PREFERRED_RENAME_CLASS \
742 arm_preferred_rename_class
744 #undef TARGET_VECTORIZE_VEC_PERM_CONST
745 #define TARGET_VECTORIZE_VEC_PERM_CONST arm_vectorize_vec_perm_const
747 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
748 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
749 arm_builtin_vectorization_cost
750 #undef TARGET_VECTORIZE_ADD_STMT_COST
751 #define TARGET_VECTORIZE_ADD_STMT_COST arm_add_stmt_cost
753 #undef TARGET_CANONICALIZE_COMPARISON
754 #define TARGET_CANONICALIZE_COMPARISON \
755 arm_canonicalize_comparison
757 #undef TARGET_ASAN_SHADOW_OFFSET
758 #define TARGET_ASAN_SHADOW_OFFSET arm_asan_shadow_offset
760 #undef MAX_INSN_PER_IT_BLOCK
761 #define MAX_INSN_PER_IT_BLOCK (arm_restrict_it ? 1 : 4)
763 #undef TARGET_CAN_USE_DOLOOP_P
764 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
766 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
767 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P arm_const_not_ok_for_debug_p
769 #undef TARGET_CALL_FUSAGE_CONTAINS_NON_CALLEE_CLOBBERS
770 #define TARGET_CALL_FUSAGE_CONTAINS_NON_CALLEE_CLOBBERS true
772 #undef TARGET_SCHED_FUSION_PRIORITY
773 #define TARGET_SCHED_FUSION_PRIORITY arm_sched_fusion_priority
775 #undef TARGET_ASM_FUNCTION_SECTION
776 #define TARGET_ASM_FUNCTION_SECTION arm_function_section
778 #undef TARGET_ASM_ELF_FLAGS_NUMERIC
779 #define TARGET_ASM_ELF_FLAGS_NUMERIC arm_asm_elf_flags_numeric
781 #undef TARGET_SECTION_TYPE_FLAGS
782 #define TARGET_SECTION_TYPE_FLAGS arm_elf_section_type_flags
784 #undef TARGET_EXPAND_DIVMOD_LIBFUNC
785 #define TARGET_EXPAND_DIVMOD_LIBFUNC arm_expand_divmod_libfunc
787 #undef TARGET_C_EXCESS_PRECISION
788 #define TARGET_C_EXCESS_PRECISION arm_excess_precision
790 /* Although the architecture reserves bits 0 and 1, only the former is
791 used for ARM/Thumb ISA selection in v7 and earlier versions. */
792 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
793 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 2
795 #undef TARGET_FIXED_CONDITION_CODE_REGS
796 #define TARGET_FIXED_CONDITION_CODE_REGS arm_fixed_condition_code_regs
798 #undef TARGET_HARD_REGNO_NREGS
799 #define TARGET_HARD_REGNO_NREGS arm_hard_regno_nregs
800 #undef TARGET_HARD_REGNO_MODE_OK
801 #define TARGET_HARD_REGNO_MODE_OK arm_hard_regno_mode_ok
803 #undef TARGET_MODES_TIEABLE_P
804 #define TARGET_MODES_TIEABLE_P arm_modes_tieable_p
806 #undef TARGET_CAN_CHANGE_MODE_CLASS
807 #define TARGET_CAN_CHANGE_MODE_CLASS arm_can_change_mode_class
809 #undef TARGET_CONSTANT_ALIGNMENT
810 #define TARGET_CONSTANT_ALIGNMENT arm_constant_alignment
812 /* Obstack for minipool constant handling. */
813 static struct obstack minipool_obstack;
814 static char * minipool_startobj;
816 /* The maximum number of insns skipped which
817 will be conditionalised if possible. */
818 static int max_insns_skipped = 5;
820 extern FILE * asm_out_file;
822 /* True if we are currently building a constant table. */
823 int making_const_table;
825 /* The processor for which instructions should be scheduled. */
826 enum processor_type arm_tune = TARGET_CPU_arm_none;
828 /* The current tuning set. */
829 const struct tune_params *current_tune;
831 /* Which floating point hardware to schedule for. */
832 int arm_fpu_attr;
834 /* Used for Thumb call_via trampolines. */
835 rtx thumb_call_via_label[14];
836 static int thumb_call_reg_needed;
838 /* The bits in this mask specify which instruction scheduling options should
839 be used. */
840 unsigned int tune_flags = 0;
842 /* The highest ARM architecture version supported by the
843 target. */
844 enum base_architecture arm_base_arch = BASE_ARCH_0;
846 /* Active target architecture and tuning. */
848 struct arm_build_target arm_active_target;
850 /* The following are used in the arm.md file as equivalents to bits
851 in the above two flag variables. */
853 /* Nonzero if this chip supports the ARM Architecture 3M extensions. */
854 int arm_arch3m = 0;
856 /* Nonzero if this chip supports the ARM Architecture 4 extensions. */
857 int arm_arch4 = 0;
859 /* Nonzero if this chip supports the ARM Architecture 4t extensions. */
860 int arm_arch4t = 0;
862 /* Nonzero if this chip supports the ARM Architecture 5 extensions. */
863 int arm_arch5 = 0;
865 /* Nonzero if this chip supports the ARM Architecture 5E extensions. */
866 int arm_arch5e = 0;
868 /* Nonzero if this chip supports the ARM Architecture 5TE extensions. */
869 int arm_arch5te = 0;
871 /* Nonzero if this chip supports the ARM Architecture 6 extensions. */
872 int arm_arch6 = 0;
874 /* Nonzero if this chip supports the ARM 6K extensions. */
875 int arm_arch6k = 0;
877 /* Nonzero if this chip supports the ARM 6KZ extensions. */
878 int arm_arch6kz = 0;
880 /* Nonzero if instructions present in ARMv6-M can be used. */
881 int arm_arch6m = 0;
883 /* Nonzero if this chip supports the ARM 7 extensions. */
884 int arm_arch7 = 0;
886 /* Nonzero if this chip supports the Large Physical Address Extension. */
887 int arm_arch_lpae = 0;
889 /* Nonzero if instructions not present in the 'M' profile can be used. */
890 int arm_arch_notm = 0;
892 /* Nonzero if instructions present in ARMv7E-M can be used. */
893 int arm_arch7em = 0;
895 /* Nonzero if instructions present in ARMv8 can be used. */
896 int arm_arch8 = 0;
898 /* Nonzero if this chip supports the ARMv8.1 extensions. */
899 int arm_arch8_1 = 0;
901 /* Nonzero if this chip supports the ARM Architecture 8.2 extensions. */
902 int arm_arch8_2 = 0;
904 /* Nonzero if this chip supports the FP16 instructions extension of ARM
905 Architecture 8.2. */
906 int arm_fp16_inst = 0;
908 /* Nonzero if this chip can benefit from load scheduling. */
909 int arm_ld_sched = 0;
911 /* Nonzero if this chip is a StrongARM. */
912 int arm_tune_strongarm = 0;
914 /* Nonzero if this chip supports Intel Wireless MMX technology. */
915 int arm_arch_iwmmxt = 0;
917 /* Nonzero if this chip supports Intel Wireless MMX2 technology. */
918 int arm_arch_iwmmxt2 = 0;
920 /* Nonzero if this chip is an XScale. */
921 int arm_arch_xscale = 0;
923 /* Nonzero if tuning for XScale */
924 int arm_tune_xscale = 0;
926 /* Nonzero if we want to tune for stores that access the write-buffer.
927 This typically means an ARM6 or ARM7 with MMU or MPU. */
928 int arm_tune_wbuf = 0;
930 /* Nonzero if tuning for Cortex-A9. */
931 int arm_tune_cortex_a9 = 0;
933 /* Nonzero if we should define __THUMB_INTERWORK__ in the
934 preprocessor.
935 XXX This is a bit of a hack, it's intended to help work around
936 problems in GLD which doesn't understand that armv5t code is
937 interworking clean. */
938 int arm_cpp_interwork = 0;
940 /* Nonzero if chip supports Thumb 1. */
941 int arm_arch_thumb1;
943 /* Nonzero if chip supports Thumb 2. */
944 int arm_arch_thumb2;
946 /* Nonzero if chip supports integer division instruction. */
947 int arm_arch_arm_hwdiv;
948 int arm_arch_thumb_hwdiv;
950 /* Nonzero if chip disallows volatile memory access in IT block. */
951 int arm_arch_no_volatile_ce;
953 /* Nonzero if we should use Neon to handle 64-bits operations rather
954 than core registers. */
955 int prefer_neon_for_64bits = 0;
957 /* Nonzero if we shouldn't use literal pools. */
958 bool arm_disable_literal_pool = false;
960 /* The register number to be used for the PIC offset register. */
961 unsigned arm_pic_register = INVALID_REGNUM;
963 enum arm_pcs arm_pcs_default;
965 /* For an explanation of these variables, see final_prescan_insn below. */
966 int arm_ccfsm_state;
967 /* arm_current_cc is also used for Thumb-2 cond_exec blocks. */
968 enum arm_cond_code arm_current_cc;
970 rtx arm_target_insn;
971 int arm_target_label;
972 /* The number of conditionally executed insns, including the current insn. */
973 int arm_condexec_count = 0;
974 /* A bitmask specifying the patterns for the IT block.
975 Zero means do not output an IT block before this insn. */
976 int arm_condexec_mask = 0;
977 /* The number of bits used in arm_condexec_mask. */
978 int arm_condexec_masklen = 0;
980 /* Nonzero if chip supports the ARMv8 CRC instructions. */
981 int arm_arch_crc = 0;
983 /* Nonzero if chip supports the AdvSIMD Dot Product instructions. */
984 int arm_arch_dotprod = 0;
986 /* Nonzero if chip supports the ARMv8-M security extensions. */
987 int arm_arch_cmse = 0;
989 /* Nonzero if the core has a very small, high-latency, multiply unit. */
990 int arm_m_profile_small_mul = 0;
992 /* The condition codes of the ARM, and the inverse function. */
993 static const char * const arm_condition_codes[] =
995 "eq", "ne", "cs", "cc", "mi", "pl", "vs", "vc",
996 "hi", "ls", "ge", "lt", "gt", "le", "al", "nv"
999 /* The register numbers in sequence, for passing to arm_gen_load_multiple. */
1000 int arm_regs_in_sequence[] =
1002 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
1005 #define ARM_LSL_NAME "lsl"
1006 #define streq(string1, string2) (strcmp (string1, string2) == 0)
1008 #define THUMB2_WORK_REGS (0xff & ~( (1 << THUMB_HARD_FRAME_POINTER_REGNUM) \
1009 | (1 << SP_REGNUM) | (1 << PC_REGNUM) \
1010 | (1 << PIC_OFFSET_TABLE_REGNUM)))
1012 /* Initialization code. */
1014 struct cpu_tune
1016 enum processor_type scheduler;
1017 unsigned int tune_flags;
1018 const struct tune_params *tune;
1021 #define ARM_PREFETCH_NOT_BENEFICIAL { 0, -1, -1 }
1022 #define ARM_PREFETCH_BENEFICIAL(num_slots,l1_size,l1_line_size) \
1024 num_slots, \
1025 l1_size, \
1026 l1_line_size \
1029 /* arm generic vectorizer costs. */
1030 static const
1031 struct cpu_vec_costs arm_default_vec_cost = {
1032 1, /* scalar_stmt_cost. */
1033 1, /* scalar load_cost. */
1034 1, /* scalar_store_cost. */
1035 1, /* vec_stmt_cost. */
1036 1, /* vec_to_scalar_cost. */
1037 1, /* scalar_to_vec_cost. */
1038 1, /* vec_align_load_cost. */
1039 1, /* vec_unalign_load_cost. */
1040 1, /* vec_unalign_store_cost. */
1041 1, /* vec_store_cost. */
1042 3, /* cond_taken_branch_cost. */
1043 1, /* cond_not_taken_branch_cost. */
1046 /* Cost tables for AArch32 + AArch64 cores should go in aarch-cost-tables.h */
1047 #include "aarch-cost-tables.h"
1051 const struct cpu_cost_table cortexa9_extra_costs =
1053 /* ALU */
1055 0, /* arith. */
1056 0, /* logical. */
1057 0, /* shift. */
1058 COSTS_N_INSNS (1), /* shift_reg. */
1059 COSTS_N_INSNS (1), /* arith_shift. */
1060 COSTS_N_INSNS (2), /* arith_shift_reg. */
1061 0, /* log_shift. */
1062 COSTS_N_INSNS (1), /* log_shift_reg. */
1063 COSTS_N_INSNS (1), /* extend. */
1064 COSTS_N_INSNS (2), /* extend_arith. */
1065 COSTS_N_INSNS (1), /* bfi. */
1066 COSTS_N_INSNS (1), /* bfx. */
1067 0, /* clz. */
1068 0, /* rev. */
1069 0, /* non_exec. */
1070 true /* non_exec_costs_exec. */
1073 /* MULT SImode */
1075 COSTS_N_INSNS (3), /* simple. */
1076 COSTS_N_INSNS (3), /* flag_setting. */
1077 COSTS_N_INSNS (2), /* extend. */
1078 COSTS_N_INSNS (3), /* add. */
1079 COSTS_N_INSNS (2), /* extend_add. */
1080 COSTS_N_INSNS (30) /* idiv. No HW div on Cortex A9. */
1082 /* MULT DImode */
1084 0, /* simple (N/A). */
1085 0, /* flag_setting (N/A). */
1086 COSTS_N_INSNS (4), /* extend. */
1087 0, /* add (N/A). */
1088 COSTS_N_INSNS (4), /* extend_add. */
1089 0 /* idiv (N/A). */
1092 /* LD/ST */
1094 COSTS_N_INSNS (2), /* load. */
1095 COSTS_N_INSNS (2), /* load_sign_extend. */
1096 COSTS_N_INSNS (2), /* ldrd. */
1097 COSTS_N_INSNS (2), /* ldm_1st. */
1098 1, /* ldm_regs_per_insn_1st. */
1099 2, /* ldm_regs_per_insn_subsequent. */
1100 COSTS_N_INSNS (5), /* loadf. */
1101 COSTS_N_INSNS (5), /* loadd. */
1102 COSTS_N_INSNS (1), /* load_unaligned. */
1103 COSTS_N_INSNS (2), /* store. */
1104 COSTS_N_INSNS (2), /* strd. */
1105 COSTS_N_INSNS (2), /* stm_1st. */
1106 1, /* stm_regs_per_insn_1st. */
1107 2, /* stm_regs_per_insn_subsequent. */
1108 COSTS_N_INSNS (1), /* storef. */
1109 COSTS_N_INSNS (1), /* stored. */
1110 COSTS_N_INSNS (1), /* store_unaligned. */
1111 COSTS_N_INSNS (1), /* loadv. */
1112 COSTS_N_INSNS (1) /* storev. */
1115 /* FP SFmode */
1117 COSTS_N_INSNS (14), /* div. */
1118 COSTS_N_INSNS (4), /* mult. */
1119 COSTS_N_INSNS (7), /* mult_addsub. */
1120 COSTS_N_INSNS (30), /* fma. */
1121 COSTS_N_INSNS (3), /* addsub. */
1122 COSTS_N_INSNS (1), /* fpconst. */
1123 COSTS_N_INSNS (1), /* neg. */
1124 COSTS_N_INSNS (3), /* compare. */
1125 COSTS_N_INSNS (3), /* widen. */
1126 COSTS_N_INSNS (3), /* narrow. */
1127 COSTS_N_INSNS (3), /* toint. */
1128 COSTS_N_INSNS (3), /* fromint. */
1129 COSTS_N_INSNS (3) /* roundint. */
1131 /* FP DFmode */
1133 COSTS_N_INSNS (24), /* div. */
1134 COSTS_N_INSNS (5), /* mult. */
1135 COSTS_N_INSNS (8), /* mult_addsub. */
1136 COSTS_N_INSNS (30), /* fma. */
1137 COSTS_N_INSNS (3), /* addsub. */
1138 COSTS_N_INSNS (1), /* fpconst. */
1139 COSTS_N_INSNS (1), /* neg. */
1140 COSTS_N_INSNS (3), /* compare. */
1141 COSTS_N_INSNS (3), /* widen. */
1142 COSTS_N_INSNS (3), /* narrow. */
1143 COSTS_N_INSNS (3), /* toint. */
1144 COSTS_N_INSNS (3), /* fromint. */
1145 COSTS_N_INSNS (3) /* roundint. */
1148 /* Vector */
1150 COSTS_N_INSNS (1) /* alu. */
1154 const struct cpu_cost_table cortexa8_extra_costs =
1156 /* ALU */
1158 0, /* arith. */
1159 0, /* logical. */
1160 COSTS_N_INSNS (1), /* shift. */
1161 0, /* shift_reg. */
1162 COSTS_N_INSNS (1), /* arith_shift. */
1163 0, /* arith_shift_reg. */
1164 COSTS_N_INSNS (1), /* log_shift. */
1165 0, /* log_shift_reg. */
1166 0, /* extend. */
1167 0, /* extend_arith. */
1168 0, /* bfi. */
1169 0, /* bfx. */
1170 0, /* clz. */
1171 0, /* rev. */
1172 0, /* non_exec. */
1173 true /* non_exec_costs_exec. */
1176 /* MULT SImode */
1178 COSTS_N_INSNS (1), /* simple. */
1179 COSTS_N_INSNS (1), /* flag_setting. */
1180 COSTS_N_INSNS (1), /* extend. */
1181 COSTS_N_INSNS (1), /* add. */
1182 COSTS_N_INSNS (1), /* extend_add. */
1183 COSTS_N_INSNS (30) /* idiv. No HW div on Cortex A8. */
1185 /* MULT DImode */
1187 0, /* simple (N/A). */
1188 0, /* flag_setting (N/A). */
1189 COSTS_N_INSNS (2), /* extend. */
1190 0, /* add (N/A). */
1191 COSTS_N_INSNS (2), /* extend_add. */
1192 0 /* idiv (N/A). */
1195 /* LD/ST */
1197 COSTS_N_INSNS (1), /* load. */
1198 COSTS_N_INSNS (1), /* load_sign_extend. */
1199 COSTS_N_INSNS (1), /* ldrd. */
1200 COSTS_N_INSNS (1), /* ldm_1st. */
1201 1, /* ldm_regs_per_insn_1st. */
1202 2, /* ldm_regs_per_insn_subsequent. */
1203 COSTS_N_INSNS (1), /* loadf. */
1204 COSTS_N_INSNS (1), /* loadd. */
1205 COSTS_N_INSNS (1), /* load_unaligned. */
1206 COSTS_N_INSNS (1), /* store. */
1207 COSTS_N_INSNS (1), /* strd. */
1208 COSTS_N_INSNS (1), /* stm_1st. */
1209 1, /* stm_regs_per_insn_1st. */
1210 2, /* stm_regs_per_insn_subsequent. */
1211 COSTS_N_INSNS (1), /* storef. */
1212 COSTS_N_INSNS (1), /* stored. */
1213 COSTS_N_INSNS (1), /* store_unaligned. */
1214 COSTS_N_INSNS (1), /* loadv. */
1215 COSTS_N_INSNS (1) /* storev. */
1218 /* FP SFmode */
1220 COSTS_N_INSNS (36), /* div. */
1221 COSTS_N_INSNS (11), /* mult. */
1222 COSTS_N_INSNS (20), /* mult_addsub. */
1223 COSTS_N_INSNS (30), /* fma. */
1224 COSTS_N_INSNS (9), /* addsub. */
1225 COSTS_N_INSNS (3), /* fpconst. */
1226 COSTS_N_INSNS (3), /* neg. */
1227 COSTS_N_INSNS (6), /* compare. */
1228 COSTS_N_INSNS (4), /* widen. */
1229 COSTS_N_INSNS (4), /* narrow. */
1230 COSTS_N_INSNS (8), /* toint. */
1231 COSTS_N_INSNS (8), /* fromint. */
1232 COSTS_N_INSNS (8) /* roundint. */
1234 /* FP DFmode */
1236 COSTS_N_INSNS (64), /* div. */
1237 COSTS_N_INSNS (16), /* mult. */
1238 COSTS_N_INSNS (25), /* mult_addsub. */
1239 COSTS_N_INSNS (30), /* fma. */
1240 COSTS_N_INSNS (9), /* addsub. */
1241 COSTS_N_INSNS (3), /* fpconst. */
1242 COSTS_N_INSNS (3), /* neg. */
1243 COSTS_N_INSNS (6), /* compare. */
1244 COSTS_N_INSNS (6), /* widen. */
1245 COSTS_N_INSNS (6), /* narrow. */
1246 COSTS_N_INSNS (8), /* toint. */
1247 COSTS_N_INSNS (8), /* fromint. */
1248 COSTS_N_INSNS (8) /* roundint. */
1251 /* Vector */
1253 COSTS_N_INSNS (1) /* alu. */
1257 const struct cpu_cost_table cortexa5_extra_costs =
1259 /* ALU */
1261 0, /* arith. */
1262 0, /* logical. */
1263 COSTS_N_INSNS (1), /* shift. */
1264 COSTS_N_INSNS (1), /* shift_reg. */
1265 COSTS_N_INSNS (1), /* arith_shift. */
1266 COSTS_N_INSNS (1), /* arith_shift_reg. */
1267 COSTS_N_INSNS (1), /* log_shift. */
1268 COSTS_N_INSNS (1), /* log_shift_reg. */
1269 COSTS_N_INSNS (1), /* extend. */
1270 COSTS_N_INSNS (1), /* extend_arith. */
1271 COSTS_N_INSNS (1), /* bfi. */
1272 COSTS_N_INSNS (1), /* bfx. */
1273 COSTS_N_INSNS (1), /* clz. */
1274 COSTS_N_INSNS (1), /* rev. */
1275 0, /* non_exec. */
1276 true /* non_exec_costs_exec. */
1280 /* MULT SImode */
1282 0, /* simple. */
1283 COSTS_N_INSNS (1), /* flag_setting. */
1284 COSTS_N_INSNS (1), /* extend. */
1285 COSTS_N_INSNS (1), /* add. */
1286 COSTS_N_INSNS (1), /* extend_add. */
1287 COSTS_N_INSNS (7) /* idiv. */
1289 /* MULT DImode */
1291 0, /* simple (N/A). */
1292 0, /* flag_setting (N/A). */
1293 COSTS_N_INSNS (1), /* extend. */
1294 0, /* add. */
1295 COSTS_N_INSNS (2), /* extend_add. */
1296 0 /* idiv (N/A). */
1299 /* LD/ST */
1301 COSTS_N_INSNS (1), /* load. */
1302 COSTS_N_INSNS (1), /* load_sign_extend. */
1303 COSTS_N_INSNS (6), /* ldrd. */
1304 COSTS_N_INSNS (1), /* ldm_1st. */
1305 1, /* ldm_regs_per_insn_1st. */
1306 2, /* ldm_regs_per_insn_subsequent. */
1307 COSTS_N_INSNS (2), /* loadf. */
1308 COSTS_N_INSNS (4), /* loadd. */
1309 COSTS_N_INSNS (1), /* load_unaligned. */
1310 COSTS_N_INSNS (1), /* store. */
1311 COSTS_N_INSNS (3), /* strd. */
1312 COSTS_N_INSNS (1), /* stm_1st. */
1313 1, /* stm_regs_per_insn_1st. */
1314 2, /* stm_regs_per_insn_subsequent. */
1315 COSTS_N_INSNS (2), /* storef. */
1316 COSTS_N_INSNS (2), /* stored. */
1317 COSTS_N_INSNS (1), /* store_unaligned. */
1318 COSTS_N_INSNS (1), /* loadv. */
1319 COSTS_N_INSNS (1) /* storev. */
1322 /* FP SFmode */
1324 COSTS_N_INSNS (15), /* div. */
1325 COSTS_N_INSNS (3), /* mult. */
1326 COSTS_N_INSNS (7), /* mult_addsub. */
1327 COSTS_N_INSNS (7), /* fma. */
1328 COSTS_N_INSNS (3), /* addsub. */
1329 COSTS_N_INSNS (3), /* fpconst. */
1330 COSTS_N_INSNS (3), /* neg. */
1331 COSTS_N_INSNS (3), /* compare. */
1332 COSTS_N_INSNS (3), /* widen. */
1333 COSTS_N_INSNS (3), /* narrow. */
1334 COSTS_N_INSNS (3), /* toint. */
1335 COSTS_N_INSNS (3), /* fromint. */
1336 COSTS_N_INSNS (3) /* roundint. */
1338 /* FP DFmode */
1340 COSTS_N_INSNS (30), /* div. */
1341 COSTS_N_INSNS (6), /* mult. */
1342 COSTS_N_INSNS (10), /* mult_addsub. */
1343 COSTS_N_INSNS (7), /* fma. */
1344 COSTS_N_INSNS (3), /* addsub. */
1345 COSTS_N_INSNS (3), /* fpconst. */
1346 COSTS_N_INSNS (3), /* neg. */
1347 COSTS_N_INSNS (3), /* compare. */
1348 COSTS_N_INSNS (3), /* widen. */
1349 COSTS_N_INSNS (3), /* narrow. */
1350 COSTS_N_INSNS (3), /* toint. */
1351 COSTS_N_INSNS (3), /* fromint. */
1352 COSTS_N_INSNS (3) /* roundint. */
1355 /* Vector */
1357 COSTS_N_INSNS (1) /* alu. */
1362 const struct cpu_cost_table cortexa7_extra_costs =
1364 /* ALU */
1366 0, /* arith. */
1367 0, /* logical. */
1368 COSTS_N_INSNS (1), /* shift. */
1369 COSTS_N_INSNS (1), /* shift_reg. */
1370 COSTS_N_INSNS (1), /* arith_shift. */
1371 COSTS_N_INSNS (1), /* arith_shift_reg. */
1372 COSTS_N_INSNS (1), /* log_shift. */
1373 COSTS_N_INSNS (1), /* log_shift_reg. */
1374 COSTS_N_INSNS (1), /* extend. */
1375 COSTS_N_INSNS (1), /* extend_arith. */
1376 COSTS_N_INSNS (1), /* bfi. */
1377 COSTS_N_INSNS (1), /* bfx. */
1378 COSTS_N_INSNS (1), /* clz. */
1379 COSTS_N_INSNS (1), /* rev. */
1380 0, /* non_exec. */
1381 true /* non_exec_costs_exec. */
1385 /* MULT SImode */
1387 0, /* simple. */
1388 COSTS_N_INSNS (1), /* flag_setting. */
1389 COSTS_N_INSNS (1), /* extend. */
1390 COSTS_N_INSNS (1), /* add. */
1391 COSTS_N_INSNS (1), /* extend_add. */
1392 COSTS_N_INSNS (7) /* idiv. */
1394 /* MULT DImode */
1396 0, /* simple (N/A). */
1397 0, /* flag_setting (N/A). */
1398 COSTS_N_INSNS (1), /* extend. */
1399 0, /* add. */
1400 COSTS_N_INSNS (2), /* extend_add. */
1401 0 /* idiv (N/A). */
1404 /* LD/ST */
1406 COSTS_N_INSNS (1), /* load. */
1407 COSTS_N_INSNS (1), /* load_sign_extend. */
1408 COSTS_N_INSNS (3), /* ldrd. */
1409 COSTS_N_INSNS (1), /* ldm_1st. */
1410 1, /* ldm_regs_per_insn_1st. */
1411 2, /* ldm_regs_per_insn_subsequent. */
1412 COSTS_N_INSNS (2), /* loadf. */
1413 COSTS_N_INSNS (2), /* loadd. */
1414 COSTS_N_INSNS (1), /* load_unaligned. */
1415 COSTS_N_INSNS (1), /* store. */
1416 COSTS_N_INSNS (3), /* strd. */
1417 COSTS_N_INSNS (1), /* stm_1st. */
1418 1, /* stm_regs_per_insn_1st. */
1419 2, /* stm_regs_per_insn_subsequent. */
1420 COSTS_N_INSNS (2), /* storef. */
1421 COSTS_N_INSNS (2), /* stored. */
1422 COSTS_N_INSNS (1), /* store_unaligned. */
1423 COSTS_N_INSNS (1), /* loadv. */
1424 COSTS_N_INSNS (1) /* storev. */
1427 /* FP SFmode */
1429 COSTS_N_INSNS (15), /* div. */
1430 COSTS_N_INSNS (3), /* mult. */
1431 COSTS_N_INSNS (7), /* mult_addsub. */
1432 COSTS_N_INSNS (7), /* fma. */
1433 COSTS_N_INSNS (3), /* addsub. */
1434 COSTS_N_INSNS (3), /* fpconst. */
1435 COSTS_N_INSNS (3), /* neg. */
1436 COSTS_N_INSNS (3), /* compare. */
1437 COSTS_N_INSNS (3), /* widen. */
1438 COSTS_N_INSNS (3), /* narrow. */
1439 COSTS_N_INSNS (3), /* toint. */
1440 COSTS_N_INSNS (3), /* fromint. */
1441 COSTS_N_INSNS (3) /* roundint. */
1443 /* FP DFmode */
1445 COSTS_N_INSNS (30), /* div. */
1446 COSTS_N_INSNS (6), /* mult. */
1447 COSTS_N_INSNS (10), /* mult_addsub. */
1448 COSTS_N_INSNS (7), /* fma. */
1449 COSTS_N_INSNS (3), /* addsub. */
1450 COSTS_N_INSNS (3), /* fpconst. */
1451 COSTS_N_INSNS (3), /* neg. */
1452 COSTS_N_INSNS (3), /* compare. */
1453 COSTS_N_INSNS (3), /* widen. */
1454 COSTS_N_INSNS (3), /* narrow. */
1455 COSTS_N_INSNS (3), /* toint. */
1456 COSTS_N_INSNS (3), /* fromint. */
1457 COSTS_N_INSNS (3) /* roundint. */
1460 /* Vector */
1462 COSTS_N_INSNS (1) /* alu. */
1466 const struct cpu_cost_table cortexa12_extra_costs =
1468 /* ALU */
1470 0, /* arith. */
1471 0, /* logical. */
1472 0, /* shift. */
1473 COSTS_N_INSNS (1), /* shift_reg. */
1474 COSTS_N_INSNS (1), /* arith_shift. */
1475 COSTS_N_INSNS (1), /* arith_shift_reg. */
1476 COSTS_N_INSNS (1), /* log_shift. */
1477 COSTS_N_INSNS (1), /* log_shift_reg. */
1478 0, /* extend. */
1479 COSTS_N_INSNS (1), /* extend_arith. */
1480 0, /* bfi. */
1481 COSTS_N_INSNS (1), /* bfx. */
1482 COSTS_N_INSNS (1), /* clz. */
1483 COSTS_N_INSNS (1), /* rev. */
1484 0, /* non_exec. */
1485 true /* non_exec_costs_exec. */
1487 /* MULT SImode */
1490 COSTS_N_INSNS (2), /* simple. */
1491 COSTS_N_INSNS (3), /* flag_setting. */
1492 COSTS_N_INSNS (2), /* extend. */
1493 COSTS_N_INSNS (3), /* add. */
1494 COSTS_N_INSNS (2), /* extend_add. */
1495 COSTS_N_INSNS (18) /* idiv. */
1497 /* MULT DImode */
1499 0, /* simple (N/A). */
1500 0, /* flag_setting (N/A). */
1501 COSTS_N_INSNS (3), /* extend. */
1502 0, /* add (N/A). */
1503 COSTS_N_INSNS (3), /* extend_add. */
1504 0 /* idiv (N/A). */
1507 /* LD/ST */
1509 COSTS_N_INSNS (3), /* load. */
1510 COSTS_N_INSNS (3), /* load_sign_extend. */
1511 COSTS_N_INSNS (3), /* ldrd. */
1512 COSTS_N_INSNS (3), /* ldm_1st. */
1513 1, /* ldm_regs_per_insn_1st. */
1514 2, /* ldm_regs_per_insn_subsequent. */
1515 COSTS_N_INSNS (3), /* loadf. */
1516 COSTS_N_INSNS (3), /* loadd. */
1517 0, /* load_unaligned. */
1518 0, /* store. */
1519 0, /* strd. */
1520 0, /* stm_1st. */
1521 1, /* stm_regs_per_insn_1st. */
1522 2, /* stm_regs_per_insn_subsequent. */
1523 COSTS_N_INSNS (2), /* storef. */
1524 COSTS_N_INSNS (2), /* stored. */
1525 0, /* store_unaligned. */
1526 COSTS_N_INSNS (1), /* loadv. */
1527 COSTS_N_INSNS (1) /* storev. */
1530 /* FP SFmode */
1532 COSTS_N_INSNS (17), /* div. */
1533 COSTS_N_INSNS (4), /* mult. */
1534 COSTS_N_INSNS (8), /* mult_addsub. */
1535 COSTS_N_INSNS (8), /* fma. */
1536 COSTS_N_INSNS (4), /* addsub. */
1537 COSTS_N_INSNS (2), /* fpconst. */
1538 COSTS_N_INSNS (2), /* neg. */
1539 COSTS_N_INSNS (2), /* compare. */
1540 COSTS_N_INSNS (4), /* widen. */
1541 COSTS_N_INSNS (4), /* narrow. */
1542 COSTS_N_INSNS (4), /* toint. */
1543 COSTS_N_INSNS (4), /* fromint. */
1544 COSTS_N_INSNS (4) /* roundint. */
1546 /* FP DFmode */
1548 COSTS_N_INSNS (31), /* div. */
1549 COSTS_N_INSNS (4), /* mult. */
1550 COSTS_N_INSNS (8), /* mult_addsub. */
1551 COSTS_N_INSNS (8), /* fma. */
1552 COSTS_N_INSNS (4), /* addsub. */
1553 COSTS_N_INSNS (2), /* fpconst. */
1554 COSTS_N_INSNS (2), /* neg. */
1555 COSTS_N_INSNS (2), /* compare. */
1556 COSTS_N_INSNS (4), /* widen. */
1557 COSTS_N_INSNS (4), /* narrow. */
1558 COSTS_N_INSNS (4), /* toint. */
1559 COSTS_N_INSNS (4), /* fromint. */
1560 COSTS_N_INSNS (4) /* roundint. */
1563 /* Vector */
1565 COSTS_N_INSNS (1) /* alu. */
1569 const struct cpu_cost_table cortexa15_extra_costs =
1571 /* ALU */
1573 0, /* arith. */
1574 0, /* logical. */
1575 0, /* shift. */
1576 0, /* shift_reg. */
1577 COSTS_N_INSNS (1), /* arith_shift. */
1578 COSTS_N_INSNS (1), /* arith_shift_reg. */
1579 COSTS_N_INSNS (1), /* log_shift. */
1580 COSTS_N_INSNS (1), /* log_shift_reg. */
1581 0, /* extend. */
1582 COSTS_N_INSNS (1), /* extend_arith. */
1583 COSTS_N_INSNS (1), /* bfi. */
1584 0, /* bfx. */
1585 0, /* clz. */
1586 0, /* rev. */
1587 0, /* non_exec. */
1588 true /* non_exec_costs_exec. */
1590 /* MULT SImode */
1593 COSTS_N_INSNS (2), /* simple. */
1594 COSTS_N_INSNS (3), /* flag_setting. */
1595 COSTS_N_INSNS (2), /* extend. */
1596 COSTS_N_INSNS (2), /* add. */
1597 COSTS_N_INSNS (2), /* extend_add. */
1598 COSTS_N_INSNS (18) /* idiv. */
1600 /* MULT DImode */
1602 0, /* simple (N/A). */
1603 0, /* flag_setting (N/A). */
1604 COSTS_N_INSNS (3), /* extend. */
1605 0, /* add (N/A). */
1606 COSTS_N_INSNS (3), /* extend_add. */
1607 0 /* idiv (N/A). */
1610 /* LD/ST */
1612 COSTS_N_INSNS (3), /* load. */
1613 COSTS_N_INSNS (3), /* load_sign_extend. */
1614 COSTS_N_INSNS (3), /* ldrd. */
1615 COSTS_N_INSNS (4), /* ldm_1st. */
1616 1, /* ldm_regs_per_insn_1st. */
1617 2, /* ldm_regs_per_insn_subsequent. */
1618 COSTS_N_INSNS (4), /* loadf. */
1619 COSTS_N_INSNS (4), /* loadd. */
1620 0, /* load_unaligned. */
1621 0, /* store. */
1622 0, /* strd. */
1623 COSTS_N_INSNS (1), /* stm_1st. */
1624 1, /* stm_regs_per_insn_1st. */
1625 2, /* stm_regs_per_insn_subsequent. */
1626 0, /* storef. */
1627 0, /* stored. */
1628 0, /* store_unaligned. */
1629 COSTS_N_INSNS (1), /* loadv. */
1630 COSTS_N_INSNS (1) /* storev. */
1633 /* FP SFmode */
1635 COSTS_N_INSNS (17), /* div. */
1636 COSTS_N_INSNS (4), /* mult. */
1637 COSTS_N_INSNS (8), /* mult_addsub. */
1638 COSTS_N_INSNS (8), /* fma. */
1639 COSTS_N_INSNS (4), /* addsub. */
1640 COSTS_N_INSNS (2), /* fpconst. */
1641 COSTS_N_INSNS (2), /* neg. */
1642 COSTS_N_INSNS (5), /* compare. */
1643 COSTS_N_INSNS (4), /* widen. */
1644 COSTS_N_INSNS (4), /* narrow. */
1645 COSTS_N_INSNS (4), /* toint. */
1646 COSTS_N_INSNS (4), /* fromint. */
1647 COSTS_N_INSNS (4) /* roundint. */
1649 /* FP DFmode */
1651 COSTS_N_INSNS (31), /* div. */
1652 COSTS_N_INSNS (4), /* mult. */
1653 COSTS_N_INSNS (8), /* mult_addsub. */
1654 COSTS_N_INSNS (8), /* fma. */
1655 COSTS_N_INSNS (4), /* addsub. */
1656 COSTS_N_INSNS (2), /* fpconst. */
1657 COSTS_N_INSNS (2), /* neg. */
1658 COSTS_N_INSNS (2), /* compare. */
1659 COSTS_N_INSNS (4), /* widen. */
1660 COSTS_N_INSNS (4), /* narrow. */
1661 COSTS_N_INSNS (4), /* toint. */
1662 COSTS_N_INSNS (4), /* fromint. */
1663 COSTS_N_INSNS (4) /* roundint. */
1666 /* Vector */
1668 COSTS_N_INSNS (1) /* alu. */
1672 const struct cpu_cost_table v7m_extra_costs =
1674 /* ALU */
1676 0, /* arith. */
1677 0, /* logical. */
1678 0, /* shift. */
1679 0, /* shift_reg. */
1680 0, /* arith_shift. */
1681 COSTS_N_INSNS (1), /* arith_shift_reg. */
1682 0, /* log_shift. */
1683 COSTS_N_INSNS (1), /* log_shift_reg. */
1684 0, /* extend. */
1685 COSTS_N_INSNS (1), /* extend_arith. */
1686 0, /* bfi. */
1687 0, /* bfx. */
1688 0, /* clz. */
1689 0, /* rev. */
1690 COSTS_N_INSNS (1), /* non_exec. */
1691 false /* non_exec_costs_exec. */
1694 /* MULT SImode */
1696 COSTS_N_INSNS (1), /* simple. */
1697 COSTS_N_INSNS (1), /* flag_setting. */
1698 COSTS_N_INSNS (2), /* extend. */
1699 COSTS_N_INSNS (1), /* add. */
1700 COSTS_N_INSNS (3), /* extend_add. */
1701 COSTS_N_INSNS (8) /* idiv. */
1703 /* MULT DImode */
1705 0, /* simple (N/A). */
1706 0, /* flag_setting (N/A). */
1707 COSTS_N_INSNS (2), /* extend. */
1708 0, /* add (N/A). */
1709 COSTS_N_INSNS (3), /* extend_add. */
1710 0 /* idiv (N/A). */
1713 /* LD/ST */
1715 COSTS_N_INSNS (2), /* load. */
1716 0, /* load_sign_extend. */
1717 COSTS_N_INSNS (3), /* ldrd. */
1718 COSTS_N_INSNS (2), /* ldm_1st. */
1719 1, /* ldm_regs_per_insn_1st. */
1720 1, /* ldm_regs_per_insn_subsequent. */
1721 COSTS_N_INSNS (2), /* loadf. */
1722 COSTS_N_INSNS (3), /* loadd. */
1723 COSTS_N_INSNS (1), /* load_unaligned. */
1724 COSTS_N_INSNS (2), /* store. */
1725 COSTS_N_INSNS (3), /* strd. */
1726 COSTS_N_INSNS (2), /* stm_1st. */
1727 1, /* stm_regs_per_insn_1st. */
1728 1, /* stm_regs_per_insn_subsequent. */
1729 COSTS_N_INSNS (2), /* storef. */
1730 COSTS_N_INSNS (3), /* stored. */
1731 COSTS_N_INSNS (1), /* store_unaligned. */
1732 COSTS_N_INSNS (1), /* loadv. */
1733 COSTS_N_INSNS (1) /* storev. */
1736 /* FP SFmode */
1738 COSTS_N_INSNS (7), /* div. */
1739 COSTS_N_INSNS (2), /* mult. */
1740 COSTS_N_INSNS (5), /* mult_addsub. */
1741 COSTS_N_INSNS (3), /* fma. */
1742 COSTS_N_INSNS (1), /* addsub. */
1743 0, /* fpconst. */
1744 0, /* neg. */
1745 0, /* compare. */
1746 0, /* widen. */
1747 0, /* narrow. */
1748 0, /* toint. */
1749 0, /* fromint. */
1750 0 /* roundint. */
1752 /* FP DFmode */
1754 COSTS_N_INSNS (15), /* div. */
1755 COSTS_N_INSNS (5), /* mult. */
1756 COSTS_N_INSNS (7), /* mult_addsub. */
1757 COSTS_N_INSNS (7), /* fma. */
1758 COSTS_N_INSNS (3), /* addsub. */
1759 0, /* fpconst. */
1760 0, /* neg. */
1761 0, /* compare. */
1762 0, /* widen. */
1763 0, /* narrow. */
1764 0, /* toint. */
1765 0, /* fromint. */
1766 0 /* roundint. */
1769 /* Vector */
1771 COSTS_N_INSNS (1) /* alu. */
1775 const struct addr_mode_cost_table generic_addr_mode_costs =
1777 /* int. */
1779 COSTS_N_INSNS (0), /* AMO_DEFAULT. */
1780 COSTS_N_INSNS (0), /* AMO_NO_WB. */
1781 COSTS_N_INSNS (0) /* AMO_WB. */
1783 /* float. */
1785 COSTS_N_INSNS (0), /* AMO_DEFAULT. */
1786 COSTS_N_INSNS (0), /* AMO_NO_WB. */
1787 COSTS_N_INSNS (0) /* AMO_WB. */
1789 /* vector. */
1791 COSTS_N_INSNS (0), /* AMO_DEFAULT. */
1792 COSTS_N_INSNS (0), /* AMO_NO_WB. */
1793 COSTS_N_INSNS (0) /* AMO_WB. */
1797 const struct tune_params arm_slowmul_tune =
1799 &generic_extra_costs, /* Insn extra costs. */
1800 &generic_addr_mode_costs, /* Addressing mode costs. */
1801 NULL, /* Sched adj cost. */
1802 arm_default_branch_cost,
1803 &arm_default_vec_cost,
1804 3, /* Constant limit. */
1805 5, /* Max cond insns. */
1806 8, /* Memset max inline. */
1807 1, /* Issue rate. */
1808 ARM_PREFETCH_NOT_BENEFICIAL,
1809 tune_params::PREF_CONST_POOL_TRUE,
1810 tune_params::PREF_LDRD_FALSE,
1811 tune_params::LOG_OP_NON_SHORT_CIRCUIT_TRUE, /* Thumb. */
1812 tune_params::LOG_OP_NON_SHORT_CIRCUIT_TRUE, /* ARM. */
1813 tune_params::DISPARAGE_FLAGS_NEITHER,
1814 tune_params::PREF_NEON_64_FALSE,
1815 tune_params::PREF_NEON_STRINGOPS_FALSE,
1816 tune_params::FUSE_NOTHING,
1817 tune_params::SCHED_AUTOPREF_OFF
1820 const struct tune_params arm_fastmul_tune =
1822 &generic_extra_costs, /* Insn extra costs. */
1823 &generic_addr_mode_costs, /* Addressing mode costs. */
1824 NULL, /* Sched adj cost. */
1825 arm_default_branch_cost,
1826 &arm_default_vec_cost,
1827 1, /* Constant limit. */
1828 5, /* Max cond insns. */
1829 8, /* Memset max inline. */
1830 1, /* Issue rate. */
1831 ARM_PREFETCH_NOT_BENEFICIAL,
1832 tune_params::PREF_CONST_POOL_TRUE,
1833 tune_params::PREF_LDRD_FALSE,
1834 tune_params::LOG_OP_NON_SHORT_CIRCUIT_TRUE, /* Thumb. */
1835 tune_params::LOG_OP_NON_SHORT_CIRCUIT_TRUE, /* ARM. */
1836 tune_params::DISPARAGE_FLAGS_NEITHER,
1837 tune_params::PREF_NEON_64_FALSE,
1838 tune_params::PREF_NEON_STRINGOPS_FALSE,
1839 tune_params::FUSE_NOTHING,
1840 tune_params::SCHED_AUTOPREF_OFF
1843 /* StrongARM has early execution of branches, so a sequence that is worth
1844 skipping is shorter. Set max_insns_skipped to a lower value. */
1846 const struct tune_params arm_strongarm_tune =
1848 &generic_extra_costs, /* Insn extra costs. */
1849 &generic_addr_mode_costs, /* Addressing mode costs. */
1850 NULL, /* Sched adj cost. */
1851 arm_default_branch_cost,
1852 &arm_default_vec_cost,
1853 1, /* Constant limit. */
1854 3, /* Max cond insns. */
1855 8, /* Memset max inline. */
1856 1, /* Issue rate. */
1857 ARM_PREFETCH_NOT_BENEFICIAL,
1858 tune_params::PREF_CONST_POOL_TRUE,
1859 tune_params::PREF_LDRD_FALSE,
1860 tune_params::LOG_OP_NON_SHORT_CIRCUIT_TRUE, /* Thumb. */
1861 tune_params::LOG_OP_NON_SHORT_CIRCUIT_TRUE, /* ARM. */
1862 tune_params::DISPARAGE_FLAGS_NEITHER,
1863 tune_params::PREF_NEON_64_FALSE,
1864 tune_params::PREF_NEON_STRINGOPS_FALSE,
1865 tune_params::FUSE_NOTHING,
1866 tune_params::SCHED_AUTOPREF_OFF
1869 const struct tune_params arm_xscale_tune =
1871 &generic_extra_costs, /* Insn extra costs. */
1872 &generic_addr_mode_costs, /* Addressing mode costs. */
1873 xscale_sched_adjust_cost,
1874 arm_default_branch_cost,
1875 &arm_default_vec_cost,
1876 2, /* Constant limit. */
1877 3, /* Max cond insns. */
1878 8, /* Memset max inline. */
1879 1, /* Issue rate. */
1880 ARM_PREFETCH_NOT_BENEFICIAL,
1881 tune_params::PREF_CONST_POOL_TRUE,
1882 tune_params::PREF_LDRD_FALSE,
1883 tune_params::LOG_OP_NON_SHORT_CIRCUIT_TRUE, /* Thumb. */
1884 tune_params::LOG_OP_NON_SHORT_CIRCUIT_TRUE, /* ARM. */
1885 tune_params::DISPARAGE_FLAGS_NEITHER,
1886 tune_params::PREF_NEON_64_FALSE,
1887 tune_params::PREF_NEON_STRINGOPS_FALSE,
1888 tune_params::FUSE_NOTHING,
1889 tune_params::SCHED_AUTOPREF_OFF
1892 const struct tune_params arm_9e_tune =
1894 &generic_extra_costs, /* Insn extra costs. */
1895 &generic_addr_mode_costs, /* Addressing mode costs. */
1896 NULL, /* Sched adj cost. */
1897 arm_default_branch_cost,
1898 &arm_default_vec_cost,
1899 1, /* Constant limit. */
1900 5, /* Max cond insns. */
1901 8, /* Memset max inline. */
1902 1, /* Issue rate. */
1903 ARM_PREFETCH_NOT_BENEFICIAL,
1904 tune_params::PREF_CONST_POOL_TRUE,
1905 tune_params::PREF_LDRD_FALSE,
1906 tune_params::LOG_OP_NON_SHORT_CIRCUIT_TRUE, /* Thumb. */
1907 tune_params::LOG_OP_NON_SHORT_CIRCUIT_TRUE, /* ARM. */
1908 tune_params::DISPARAGE_FLAGS_NEITHER,
1909 tune_params::PREF_NEON_64_FALSE,
1910 tune_params::PREF_NEON_STRINGOPS_FALSE,
1911 tune_params::FUSE_NOTHING,
1912 tune_params::SCHED_AUTOPREF_OFF
1915 const struct tune_params arm_marvell_pj4_tune =
1917 &generic_extra_costs, /* Insn extra costs. */
1918 &generic_addr_mode_costs, /* Addressing mode costs. */
1919 NULL, /* Sched adj cost. */
1920 arm_default_branch_cost,
1921 &arm_default_vec_cost,
1922 1, /* Constant limit. */
1923 5, /* Max cond insns. */
1924 8, /* Memset max inline. */
1925 2, /* Issue rate. */
1926 ARM_PREFETCH_NOT_BENEFICIAL,
1927 tune_params::PREF_CONST_POOL_TRUE,
1928 tune_params::PREF_LDRD_FALSE,
1929 tune_params::LOG_OP_NON_SHORT_CIRCUIT_TRUE, /* Thumb. */
1930 tune_params::LOG_OP_NON_SHORT_CIRCUIT_TRUE, /* ARM. */
1931 tune_params::DISPARAGE_FLAGS_NEITHER,
1932 tune_params::PREF_NEON_64_FALSE,
1933 tune_params::PREF_NEON_STRINGOPS_FALSE,
1934 tune_params::FUSE_NOTHING,
1935 tune_params::SCHED_AUTOPREF_OFF
1938 const struct tune_params arm_v6t2_tune =
1940 &generic_extra_costs, /* Insn extra costs. */
1941 &generic_addr_mode_costs, /* Addressing mode costs. */
1942 NULL, /* Sched adj cost. */
1943 arm_default_branch_cost,
1944 &arm_default_vec_cost,
1945 1, /* Constant limit. */
1946 5, /* Max cond insns. */
1947 8, /* Memset max inline. */
1948 1, /* Issue rate. */
1949 ARM_PREFETCH_NOT_BENEFICIAL,
1950 tune_params::PREF_CONST_POOL_FALSE,
1951 tune_params::PREF_LDRD_FALSE,
1952 tune_params::LOG_OP_NON_SHORT_CIRCUIT_TRUE, /* Thumb. */
1953 tune_params::LOG_OP_NON_SHORT_CIRCUIT_TRUE, /* ARM. */
1954 tune_params::DISPARAGE_FLAGS_NEITHER,
1955 tune_params::PREF_NEON_64_FALSE,
1956 tune_params::PREF_NEON_STRINGOPS_FALSE,
1957 tune_params::FUSE_NOTHING,
1958 tune_params::SCHED_AUTOPREF_OFF
1962 /* Generic Cortex tuning. Use more specific tunings if appropriate. */
1963 const struct tune_params arm_cortex_tune =
1965 &generic_extra_costs,
1966 &generic_addr_mode_costs, /* Addressing mode costs. */
1967 NULL, /* Sched adj cost. */
1968 arm_default_branch_cost,
1969 &arm_default_vec_cost,
1970 1, /* Constant limit. */
1971 5, /* Max cond insns. */
1972 8, /* Memset max inline. */
1973 2, /* Issue rate. */
1974 ARM_PREFETCH_NOT_BENEFICIAL,
1975 tune_params::PREF_CONST_POOL_FALSE,
1976 tune_params::PREF_LDRD_FALSE,
1977 tune_params::LOG_OP_NON_SHORT_CIRCUIT_TRUE, /* Thumb. */
1978 tune_params::LOG_OP_NON_SHORT_CIRCUIT_TRUE, /* ARM. */
1979 tune_params::DISPARAGE_FLAGS_NEITHER,
1980 tune_params::PREF_NEON_64_FALSE,
1981 tune_params::PREF_NEON_STRINGOPS_FALSE,
1982 tune_params::FUSE_NOTHING,
1983 tune_params::SCHED_AUTOPREF_OFF
1986 const struct tune_params arm_cortex_a8_tune =
1988 &cortexa8_extra_costs,
1989 &generic_addr_mode_costs, /* Addressing mode costs. */
1990 NULL, /* Sched adj cost. */
1991 arm_default_branch_cost,
1992 &arm_default_vec_cost,
1993 1, /* Constant limit. */
1994 5, /* Max cond insns. */
1995 8, /* Memset max inline. */
1996 2, /* Issue rate. */
1997 ARM_PREFETCH_NOT_BENEFICIAL,
1998 tune_params::PREF_CONST_POOL_FALSE,
1999 tune_params::PREF_LDRD_FALSE,
2000 tune_params::LOG_OP_NON_SHORT_CIRCUIT_TRUE, /* Thumb. */
2001 tune_params::LOG_OP_NON_SHORT_CIRCUIT_TRUE, /* ARM. */
2002 tune_params::DISPARAGE_FLAGS_NEITHER,
2003 tune_params::PREF_NEON_64_FALSE,
2004 tune_params::PREF_NEON_STRINGOPS_TRUE,
2005 tune_params::FUSE_NOTHING,
2006 tune_params::SCHED_AUTOPREF_OFF
2009 const struct tune_params arm_cortex_a7_tune =
2011 &cortexa7_extra_costs,
2012 &generic_addr_mode_costs, /* Addressing mode costs. */
2013 NULL, /* Sched adj cost. */
2014 arm_default_branch_cost,
2015 &arm_default_vec_cost,
2016 1, /* Constant limit. */
2017 5, /* Max cond insns. */
2018 8, /* Memset max inline. */
2019 2, /* Issue rate. */
2020 ARM_PREFETCH_NOT_BENEFICIAL,
2021 tune_params::PREF_CONST_POOL_FALSE,
2022 tune_params::PREF_LDRD_FALSE,
2023 tune_params::LOG_OP_NON_SHORT_CIRCUIT_TRUE, /* Thumb. */
2024 tune_params::LOG_OP_NON_SHORT_CIRCUIT_TRUE, /* ARM. */
2025 tune_params::DISPARAGE_FLAGS_NEITHER,
2026 tune_params::PREF_NEON_64_FALSE,
2027 tune_params::PREF_NEON_STRINGOPS_TRUE,
2028 tune_params::FUSE_NOTHING,
2029 tune_params::SCHED_AUTOPREF_OFF
2032 const struct tune_params arm_cortex_a15_tune =
2034 &cortexa15_extra_costs,
2035 &generic_addr_mode_costs, /* Addressing mode costs. */
2036 NULL, /* Sched adj cost. */
2037 arm_default_branch_cost,
2038 &arm_default_vec_cost,
2039 1, /* Constant limit. */
2040 2, /* Max cond insns. */
2041 8, /* Memset max inline. */
2042 3, /* Issue rate. */
2043 ARM_PREFETCH_NOT_BENEFICIAL,
2044 tune_params::PREF_CONST_POOL_FALSE,
2045 tune_params::PREF_LDRD_TRUE,
2046 tune_params::LOG_OP_NON_SHORT_CIRCUIT_TRUE, /* Thumb. */
2047 tune_params::LOG_OP_NON_SHORT_CIRCUIT_TRUE, /* ARM. */
2048 tune_params::DISPARAGE_FLAGS_ALL,
2049 tune_params::PREF_NEON_64_FALSE,
2050 tune_params::PREF_NEON_STRINGOPS_TRUE,
2051 tune_params::FUSE_NOTHING,
2052 tune_params::SCHED_AUTOPREF_FULL
2055 const struct tune_params arm_cortex_a35_tune =
2057 &cortexa53_extra_costs,
2058 &generic_addr_mode_costs, /* Addressing mode costs. */
2059 NULL, /* Sched adj cost. */
2060 arm_default_branch_cost,
2061 &arm_default_vec_cost,
2062 1, /* Constant limit. */
2063 5, /* Max cond insns. */
2064 8, /* Memset max inline. */
2065 1, /* Issue rate. */
2066 ARM_PREFETCH_NOT_BENEFICIAL,
2067 tune_params::PREF_CONST_POOL_FALSE,
2068 tune_params::PREF_LDRD_FALSE,
2069 tune_params::LOG_OP_NON_SHORT_CIRCUIT_TRUE, /* Thumb. */
2070 tune_params::LOG_OP_NON_SHORT_CIRCUIT_TRUE, /* ARM. */
2071 tune_params::DISPARAGE_FLAGS_NEITHER,
2072 tune_params::PREF_NEON_64_FALSE,
2073 tune_params::PREF_NEON_STRINGOPS_TRUE,
2074 FUSE_OPS (tune_params::FUSE_MOVW_MOVT),
2075 tune_params::SCHED_AUTOPREF_OFF
2078 const struct tune_params arm_cortex_a53_tune =
2080 &cortexa53_extra_costs,
2081 &generic_addr_mode_costs, /* Addressing mode costs. */
2082 NULL, /* Sched adj cost. */
2083 arm_default_branch_cost,
2084 &arm_default_vec_cost,
2085 1, /* Constant limit. */
2086 5, /* Max cond insns. */
2087 8, /* Memset max inline. */
2088 2, /* Issue rate. */
2089 ARM_PREFETCH_NOT_BENEFICIAL,
2090 tune_params::PREF_CONST_POOL_FALSE,
2091 tune_params::PREF_LDRD_FALSE,
2092 tune_params::LOG_OP_NON_SHORT_CIRCUIT_TRUE, /* Thumb. */
2093 tune_params::LOG_OP_NON_SHORT_CIRCUIT_TRUE, /* ARM. */
2094 tune_params::DISPARAGE_FLAGS_NEITHER,
2095 tune_params::PREF_NEON_64_FALSE,
2096 tune_params::PREF_NEON_STRINGOPS_TRUE,
2097 FUSE_OPS (tune_params::FUSE_MOVW_MOVT | tune_params::FUSE_AES_AESMC),
2098 tune_params::SCHED_AUTOPREF_OFF
2101 const struct tune_params arm_cortex_a57_tune =
2103 &cortexa57_extra_costs,
2104 &generic_addr_mode_costs, /* addressing mode costs */
2105 NULL, /* Sched adj cost. */
2106 arm_default_branch_cost,
2107 &arm_default_vec_cost,
2108 1, /* Constant limit. */
2109 2, /* Max cond insns. */
2110 8, /* Memset max inline. */
2111 3, /* Issue rate. */
2112 ARM_PREFETCH_NOT_BENEFICIAL,
2113 tune_params::PREF_CONST_POOL_FALSE,
2114 tune_params::PREF_LDRD_TRUE,
2115 tune_params::LOG_OP_NON_SHORT_CIRCUIT_TRUE, /* Thumb. */
2116 tune_params::LOG_OP_NON_SHORT_CIRCUIT_TRUE, /* ARM. */
2117 tune_params::DISPARAGE_FLAGS_ALL,
2118 tune_params::PREF_NEON_64_FALSE,
2119 tune_params::PREF_NEON_STRINGOPS_TRUE,
2120 FUSE_OPS (tune_params::FUSE_MOVW_MOVT | tune_params::FUSE_AES_AESMC),
2121 tune_params::SCHED_AUTOPREF_FULL
2124 const struct tune_params arm_exynosm1_tune =
2126 &exynosm1_extra_costs,
2127 &generic_addr_mode_costs, /* Addressing mode costs. */
2128 NULL, /* Sched adj cost. */
2129 arm_default_branch_cost,
2130 &arm_default_vec_cost,
2131 1, /* Constant limit. */
2132 2, /* Max cond insns. */
2133 8, /* Memset max inline. */
2134 3, /* Issue rate. */
2135 ARM_PREFETCH_NOT_BENEFICIAL,
2136 tune_params::PREF_CONST_POOL_FALSE,
2137 tune_params::PREF_LDRD_TRUE,
2138 tune_params::LOG_OP_NON_SHORT_CIRCUIT_FALSE, /* Thumb. */
2139 tune_params::LOG_OP_NON_SHORT_CIRCUIT_FALSE, /* ARM. */
2140 tune_params::DISPARAGE_FLAGS_ALL,
2141 tune_params::PREF_NEON_64_FALSE,
2142 tune_params::PREF_NEON_STRINGOPS_TRUE,
2143 tune_params::FUSE_NOTHING,
2144 tune_params::SCHED_AUTOPREF_OFF
2147 const struct tune_params arm_xgene1_tune =
2149 &xgene1_extra_costs,
2150 &generic_addr_mode_costs, /* Addressing mode costs. */
2151 NULL, /* Sched adj cost. */
2152 arm_default_branch_cost,
2153 &arm_default_vec_cost,
2154 1, /* Constant limit. */
2155 2, /* Max cond insns. */
2156 32, /* Memset max inline. */
2157 4, /* Issue rate. */
2158 ARM_PREFETCH_NOT_BENEFICIAL,
2159 tune_params::PREF_CONST_POOL_FALSE,
2160 tune_params::PREF_LDRD_TRUE,
2161 tune_params::LOG_OP_NON_SHORT_CIRCUIT_TRUE, /* Thumb. */
2162 tune_params::LOG_OP_NON_SHORT_CIRCUIT_TRUE, /* ARM. */
2163 tune_params::DISPARAGE_FLAGS_ALL,
2164 tune_params::PREF_NEON_64_FALSE,
2165 tune_params::PREF_NEON_STRINGOPS_FALSE,
2166 tune_params::FUSE_NOTHING,
2167 tune_params::SCHED_AUTOPREF_OFF
2170 /* Branches can be dual-issued on Cortex-A5, so conditional execution is
2171 less appealing. Set max_insns_skipped to a low value. */
2173 const struct tune_params arm_cortex_a5_tune =
2175 &cortexa5_extra_costs,
2176 &generic_addr_mode_costs, /* Addressing mode costs. */
2177 NULL, /* Sched adj cost. */
2178 arm_cortex_a5_branch_cost,
2179 &arm_default_vec_cost,
2180 1, /* Constant limit. */
2181 1, /* Max cond insns. */
2182 8, /* Memset max inline. */
2183 2, /* Issue rate. */
2184 ARM_PREFETCH_NOT_BENEFICIAL,
2185 tune_params::PREF_CONST_POOL_FALSE,
2186 tune_params::PREF_LDRD_FALSE,
2187 tune_params::LOG_OP_NON_SHORT_CIRCUIT_FALSE, /* Thumb. */
2188 tune_params::LOG_OP_NON_SHORT_CIRCUIT_FALSE, /* ARM. */
2189 tune_params::DISPARAGE_FLAGS_NEITHER,
2190 tune_params::PREF_NEON_64_FALSE,
2191 tune_params::PREF_NEON_STRINGOPS_TRUE,
2192 tune_params::FUSE_NOTHING,
2193 tune_params::SCHED_AUTOPREF_OFF
2196 const struct tune_params arm_cortex_a9_tune =
2198 &cortexa9_extra_costs,
2199 &generic_addr_mode_costs, /* Addressing mode costs. */
2200 cortex_a9_sched_adjust_cost,
2201 arm_default_branch_cost,
2202 &arm_default_vec_cost,
2203 1, /* Constant limit. */
2204 5, /* Max cond insns. */
2205 8, /* Memset max inline. */
2206 2, /* Issue rate. */
2207 ARM_PREFETCH_BENEFICIAL(4,32,32),
2208 tune_params::PREF_CONST_POOL_FALSE,
2209 tune_params::PREF_LDRD_FALSE,
2210 tune_params::LOG_OP_NON_SHORT_CIRCUIT_TRUE, /* Thumb. */
2211 tune_params::LOG_OP_NON_SHORT_CIRCUIT_TRUE, /* ARM. */
2212 tune_params::DISPARAGE_FLAGS_NEITHER,
2213 tune_params::PREF_NEON_64_FALSE,
2214 tune_params::PREF_NEON_STRINGOPS_FALSE,
2215 tune_params::FUSE_NOTHING,
2216 tune_params::SCHED_AUTOPREF_OFF
2219 const struct tune_params arm_cortex_a12_tune =
2221 &cortexa12_extra_costs,
2222 &generic_addr_mode_costs, /* Addressing mode costs. */
2223 NULL, /* Sched adj cost. */
2224 arm_default_branch_cost,
2225 &arm_default_vec_cost, /* Vectorizer costs. */
2226 1, /* Constant limit. */
2227 2, /* Max cond insns. */
2228 8, /* Memset max inline. */
2229 2, /* Issue rate. */
2230 ARM_PREFETCH_NOT_BENEFICIAL,
2231 tune_params::PREF_CONST_POOL_FALSE,
2232 tune_params::PREF_LDRD_TRUE,
2233 tune_params::LOG_OP_NON_SHORT_CIRCUIT_TRUE, /* Thumb. */
2234 tune_params::LOG_OP_NON_SHORT_CIRCUIT_TRUE, /* ARM. */
2235 tune_params::DISPARAGE_FLAGS_ALL,
2236 tune_params::PREF_NEON_64_FALSE,
2237 tune_params::PREF_NEON_STRINGOPS_TRUE,
2238 FUSE_OPS (tune_params::FUSE_MOVW_MOVT),
2239 tune_params::SCHED_AUTOPREF_OFF
2242 const struct tune_params arm_cortex_a73_tune =
2244 &cortexa57_extra_costs,
2245 &generic_addr_mode_costs, /* Addressing mode costs. */
2246 NULL, /* Sched adj cost. */
2247 arm_default_branch_cost,
2248 &arm_default_vec_cost, /* Vectorizer costs. */
2249 1, /* Constant limit. */
2250 2, /* Max cond insns. */
2251 8, /* Memset max inline. */
2252 2, /* Issue rate. */
2253 ARM_PREFETCH_NOT_BENEFICIAL,
2254 tune_params::PREF_CONST_POOL_FALSE,
2255 tune_params::PREF_LDRD_TRUE,
2256 tune_params::LOG_OP_NON_SHORT_CIRCUIT_TRUE, /* Thumb. */
2257 tune_params::LOG_OP_NON_SHORT_CIRCUIT_TRUE, /* ARM. */
2258 tune_params::DISPARAGE_FLAGS_ALL,
2259 tune_params::PREF_NEON_64_FALSE,
2260 tune_params::PREF_NEON_STRINGOPS_TRUE,
2261 FUSE_OPS (tune_params::FUSE_AES_AESMC | tune_params::FUSE_MOVW_MOVT),
2262 tune_params::SCHED_AUTOPREF_FULL
2265 /* armv7m tuning. On Cortex-M4 cores for example, MOVW/MOVT take a single
2266 cycle to execute each. An LDR from the constant pool also takes two cycles
2267 to execute, but mildly increases pipelining opportunity (consecutive
2268 loads/stores can be pipelined together, saving one cycle), and may also
2269 improve icache utilisation. Hence we prefer the constant pool for such
2270 processors. */
2272 const struct tune_params arm_v7m_tune =
2274 &v7m_extra_costs,
2275 &generic_addr_mode_costs, /* Addressing mode costs. */
2276 NULL, /* Sched adj cost. */
2277 arm_cortex_m_branch_cost,
2278 &arm_default_vec_cost,
2279 1, /* Constant limit. */
2280 2, /* Max cond insns. */
2281 8, /* Memset max inline. */
2282 1, /* Issue rate. */
2283 ARM_PREFETCH_NOT_BENEFICIAL,
2284 tune_params::PREF_CONST_POOL_TRUE,
2285 tune_params::PREF_LDRD_FALSE,
2286 tune_params::LOG_OP_NON_SHORT_CIRCUIT_FALSE, /* Thumb. */
2287 tune_params::LOG_OP_NON_SHORT_CIRCUIT_FALSE, /* ARM. */
2288 tune_params::DISPARAGE_FLAGS_NEITHER,
2289 tune_params::PREF_NEON_64_FALSE,
2290 tune_params::PREF_NEON_STRINGOPS_FALSE,
2291 tune_params::FUSE_NOTHING,
2292 tune_params::SCHED_AUTOPREF_OFF
2295 /* Cortex-M7 tuning. */
2297 const struct tune_params arm_cortex_m7_tune =
2299 &v7m_extra_costs,
2300 &generic_addr_mode_costs, /* Addressing mode costs. */
2301 NULL, /* Sched adj cost. */
2302 arm_cortex_m7_branch_cost,
2303 &arm_default_vec_cost,
2304 0, /* Constant limit. */
2305 1, /* Max cond insns. */
2306 8, /* Memset max inline. */
2307 2, /* Issue rate. */
2308 ARM_PREFETCH_NOT_BENEFICIAL,
2309 tune_params::PREF_CONST_POOL_TRUE,
2310 tune_params::PREF_LDRD_FALSE,
2311 tune_params::LOG_OP_NON_SHORT_CIRCUIT_TRUE, /* Thumb. */
2312 tune_params::LOG_OP_NON_SHORT_CIRCUIT_TRUE, /* ARM. */
2313 tune_params::DISPARAGE_FLAGS_NEITHER,
2314 tune_params::PREF_NEON_64_FALSE,
2315 tune_params::PREF_NEON_STRINGOPS_FALSE,
2316 tune_params::FUSE_NOTHING,
2317 tune_params::SCHED_AUTOPREF_OFF
2320 /* The arm_v6m_tune is duplicated from arm_cortex_tune, rather than
2321 arm_v6t2_tune. It is used for cortex-m0, cortex-m1, cortex-m0plus and
2322 cortex-m23. */
2323 const struct tune_params arm_v6m_tune =
2325 &generic_extra_costs, /* Insn extra costs. */
2326 &generic_addr_mode_costs, /* Addressing mode costs. */
2327 NULL, /* Sched adj cost. */
2328 arm_default_branch_cost,
2329 &arm_default_vec_cost, /* Vectorizer costs. */
2330 1, /* Constant limit. */
2331 5, /* Max cond insns. */
2332 8, /* Memset max inline. */
2333 1, /* Issue rate. */
2334 ARM_PREFETCH_NOT_BENEFICIAL,
2335 tune_params::PREF_CONST_POOL_FALSE,
2336 tune_params::PREF_LDRD_FALSE,
2337 tune_params::LOG_OP_NON_SHORT_CIRCUIT_FALSE, /* Thumb. */
2338 tune_params::LOG_OP_NON_SHORT_CIRCUIT_FALSE, /* ARM. */
2339 tune_params::DISPARAGE_FLAGS_NEITHER,
2340 tune_params::PREF_NEON_64_FALSE,
2341 tune_params::PREF_NEON_STRINGOPS_FALSE,
2342 tune_params::FUSE_NOTHING,
2343 tune_params::SCHED_AUTOPREF_OFF
2346 const struct tune_params arm_fa726te_tune =
2348 &generic_extra_costs, /* Insn extra costs. */
2349 &generic_addr_mode_costs, /* Addressing mode costs. */
2350 fa726te_sched_adjust_cost,
2351 arm_default_branch_cost,
2352 &arm_default_vec_cost,
2353 1, /* Constant limit. */
2354 5, /* Max cond insns. */
2355 8, /* Memset max inline. */
2356 2, /* Issue rate. */
2357 ARM_PREFETCH_NOT_BENEFICIAL,
2358 tune_params::PREF_CONST_POOL_TRUE,
2359 tune_params::PREF_LDRD_FALSE,
2360 tune_params::LOG_OP_NON_SHORT_CIRCUIT_TRUE, /* Thumb. */
2361 tune_params::LOG_OP_NON_SHORT_CIRCUIT_TRUE, /* ARM. */
2362 tune_params::DISPARAGE_FLAGS_NEITHER,
2363 tune_params::PREF_NEON_64_FALSE,
2364 tune_params::PREF_NEON_STRINGOPS_FALSE,
2365 tune_params::FUSE_NOTHING,
2366 tune_params::SCHED_AUTOPREF_OFF
2369 /* Auto-generated CPU, FPU and architecture tables. */
2370 #include "arm-cpu-data.h"
2372 /* The name of the preprocessor macro to define for this architecture. PROFILE
2373 is replaced by the architecture name (eg. 8A) in arm_option_override () and
2374 is thus chosen to be big enough to hold the longest architecture name. */
2376 char arm_arch_name[] = "__ARM_ARCH_PROFILE__";
2378 /* Supported TLS relocations. */
2380 enum tls_reloc {
2381 TLS_GD32,
2382 TLS_LDM32,
2383 TLS_LDO32,
2384 TLS_IE32,
2385 TLS_LE32,
2386 TLS_DESCSEQ /* GNU scheme */
2389 /* The maximum number of insns to be used when loading a constant. */
2390 inline static int
2391 arm_constant_limit (bool size_p)
2393 return size_p ? 1 : current_tune->constant_limit;
2396 /* Emit an insn that's a simple single-set. Both the operands must be known
2397 to be valid. */
2398 inline static rtx_insn *
2399 emit_set_insn (rtx x, rtx y)
2401 return emit_insn (gen_rtx_SET (x, y));
2404 /* Return the number of bits set in VALUE. */
2405 static unsigned
2406 bit_count (unsigned long value)
2408 unsigned long count = 0;
2410 while (value)
2412 count++;
2413 value &= value - 1; /* Clear the least-significant set bit. */
2416 return count;
2419 /* Return the number of bits set in BMAP. */
2420 static unsigned
2421 bitmap_popcount (const sbitmap bmap)
2423 unsigned int count = 0;
2424 unsigned int n = 0;
2425 sbitmap_iterator sbi;
2427 EXECUTE_IF_SET_IN_BITMAP (bmap, 0, n, sbi)
2428 count++;
2429 return count;
2432 typedef struct
2434 machine_mode mode;
2435 const char *name;
2436 } arm_fixed_mode_set;
2438 /* A small helper for setting fixed-point library libfuncs. */
2440 static void
2441 arm_set_fixed_optab_libfunc (optab optable, machine_mode mode,
2442 const char *funcname, const char *modename,
2443 int num_suffix)
2445 char buffer[50];
2447 if (num_suffix == 0)
2448 sprintf (buffer, "__gnu_%s%s", funcname, modename);
2449 else
2450 sprintf (buffer, "__gnu_%s%s%d", funcname, modename, num_suffix);
2452 set_optab_libfunc (optable, mode, buffer);
2455 static void
2456 arm_set_fixed_conv_libfunc (convert_optab optable, machine_mode to,
2457 machine_mode from, const char *funcname,
2458 const char *toname, const char *fromname)
2460 char buffer[50];
2461 const char *maybe_suffix_2 = "";
2463 /* Follow the logic for selecting a "2" suffix in fixed-bit.h. */
2464 if (ALL_FIXED_POINT_MODE_P (from) && ALL_FIXED_POINT_MODE_P (to)
2465 && UNSIGNED_FIXED_POINT_MODE_P (from) == UNSIGNED_FIXED_POINT_MODE_P (to)
2466 && ALL_FRACT_MODE_P (from) == ALL_FRACT_MODE_P (to))
2467 maybe_suffix_2 = "2";
2469 sprintf (buffer, "__gnu_%s%s%s%s", funcname, fromname, toname,
2470 maybe_suffix_2);
2472 set_conv_libfunc (optable, to, from, buffer);
2475 /* Set up library functions unique to ARM. */
2477 static void
2478 arm_init_libfuncs (void)
2480 /* For Linux, we have access to kernel support for atomic operations. */
2481 if (arm_abi == ARM_ABI_AAPCS_LINUX)
2482 init_sync_libfuncs (MAX_SYNC_LIBFUNC_SIZE);
2484 /* There are no special library functions unless we are using the
2485 ARM BPABI. */
2486 if (!TARGET_BPABI)
2487 return;
2489 /* The functions below are described in Section 4 of the "Run-Time
2490 ABI for the ARM architecture", Version 1.0. */
2492 /* Double-precision floating-point arithmetic. Table 2. */
2493 set_optab_libfunc (add_optab, DFmode, "__aeabi_dadd");
2494 set_optab_libfunc (sdiv_optab, DFmode, "__aeabi_ddiv");
2495 set_optab_libfunc (smul_optab, DFmode, "__aeabi_dmul");
2496 set_optab_libfunc (neg_optab, DFmode, "__aeabi_dneg");
2497 set_optab_libfunc (sub_optab, DFmode, "__aeabi_dsub");
2499 /* Double-precision comparisons. Table 3. */
2500 set_optab_libfunc (eq_optab, DFmode, "__aeabi_dcmpeq");
2501 set_optab_libfunc (ne_optab, DFmode, NULL);
2502 set_optab_libfunc (lt_optab, DFmode, "__aeabi_dcmplt");
2503 set_optab_libfunc (le_optab, DFmode, "__aeabi_dcmple");
2504 set_optab_libfunc (ge_optab, DFmode, "__aeabi_dcmpge");
2505 set_optab_libfunc (gt_optab, DFmode, "__aeabi_dcmpgt");
2506 set_optab_libfunc (unord_optab, DFmode, "__aeabi_dcmpun");
2508 /* Single-precision floating-point arithmetic. Table 4. */
2509 set_optab_libfunc (add_optab, SFmode, "__aeabi_fadd");
2510 set_optab_libfunc (sdiv_optab, SFmode, "__aeabi_fdiv");
2511 set_optab_libfunc (smul_optab, SFmode, "__aeabi_fmul");
2512 set_optab_libfunc (neg_optab, SFmode, "__aeabi_fneg");
2513 set_optab_libfunc (sub_optab, SFmode, "__aeabi_fsub");
2515 /* Single-precision comparisons. Table 5. */
2516 set_optab_libfunc (eq_optab, SFmode, "__aeabi_fcmpeq");
2517 set_optab_libfunc (ne_optab, SFmode, NULL);
2518 set_optab_libfunc (lt_optab, SFmode, "__aeabi_fcmplt");
2519 set_optab_libfunc (le_optab, SFmode, "__aeabi_fcmple");
2520 set_optab_libfunc (ge_optab, SFmode, "__aeabi_fcmpge");
2521 set_optab_libfunc (gt_optab, SFmode, "__aeabi_fcmpgt");
2522 set_optab_libfunc (unord_optab, SFmode, "__aeabi_fcmpun");
2524 /* Floating-point to integer conversions. Table 6. */
2525 set_conv_libfunc (sfix_optab, SImode, DFmode, "__aeabi_d2iz");
2526 set_conv_libfunc (ufix_optab, SImode, DFmode, "__aeabi_d2uiz");
2527 set_conv_libfunc (sfix_optab, DImode, DFmode, "__aeabi_d2lz");
2528 set_conv_libfunc (ufix_optab, DImode, DFmode, "__aeabi_d2ulz");
2529 set_conv_libfunc (sfix_optab, SImode, SFmode, "__aeabi_f2iz");
2530 set_conv_libfunc (ufix_optab, SImode, SFmode, "__aeabi_f2uiz");
2531 set_conv_libfunc (sfix_optab, DImode, SFmode, "__aeabi_f2lz");
2532 set_conv_libfunc (ufix_optab, DImode, SFmode, "__aeabi_f2ulz");
2534 /* Conversions between floating types. Table 7. */
2535 set_conv_libfunc (trunc_optab, SFmode, DFmode, "__aeabi_d2f");
2536 set_conv_libfunc (sext_optab, DFmode, SFmode, "__aeabi_f2d");
2538 /* Integer to floating-point conversions. Table 8. */
2539 set_conv_libfunc (sfloat_optab, DFmode, SImode, "__aeabi_i2d");
2540 set_conv_libfunc (ufloat_optab, DFmode, SImode, "__aeabi_ui2d");
2541 set_conv_libfunc (sfloat_optab, DFmode, DImode, "__aeabi_l2d");
2542 set_conv_libfunc (ufloat_optab, DFmode, DImode, "__aeabi_ul2d");
2543 set_conv_libfunc (sfloat_optab, SFmode, SImode, "__aeabi_i2f");
2544 set_conv_libfunc (ufloat_optab, SFmode, SImode, "__aeabi_ui2f");
2545 set_conv_libfunc (sfloat_optab, SFmode, DImode, "__aeabi_l2f");
2546 set_conv_libfunc (ufloat_optab, SFmode, DImode, "__aeabi_ul2f");
2548 /* Long long. Table 9. */
2549 set_optab_libfunc (smul_optab, DImode, "__aeabi_lmul");
2550 set_optab_libfunc (sdivmod_optab, DImode, "__aeabi_ldivmod");
2551 set_optab_libfunc (udivmod_optab, DImode, "__aeabi_uldivmod");
2552 set_optab_libfunc (ashl_optab, DImode, "__aeabi_llsl");
2553 set_optab_libfunc (lshr_optab, DImode, "__aeabi_llsr");
2554 set_optab_libfunc (ashr_optab, DImode, "__aeabi_lasr");
2555 set_optab_libfunc (cmp_optab, DImode, "__aeabi_lcmp");
2556 set_optab_libfunc (ucmp_optab, DImode, "__aeabi_ulcmp");
2558 /* Integer (32/32->32) division. \S 4.3.1. */
2559 set_optab_libfunc (sdivmod_optab, SImode, "__aeabi_idivmod");
2560 set_optab_libfunc (udivmod_optab, SImode, "__aeabi_uidivmod");
2562 /* The divmod functions are designed so that they can be used for
2563 plain division, even though they return both the quotient and the
2564 remainder. The quotient is returned in the usual location (i.e.,
2565 r0 for SImode, {r0, r1} for DImode), just as would be expected
2566 for an ordinary division routine. Because the AAPCS calling
2567 conventions specify that all of { r0, r1, r2, r3 } are
2568 callee-saved registers, there is no need to tell the compiler
2569 explicitly that those registers are clobbered by these
2570 routines. */
2571 set_optab_libfunc (sdiv_optab, DImode, "__aeabi_ldivmod");
2572 set_optab_libfunc (udiv_optab, DImode, "__aeabi_uldivmod");
2574 /* For SImode division the ABI provides div-without-mod routines,
2575 which are faster. */
2576 set_optab_libfunc (sdiv_optab, SImode, "__aeabi_idiv");
2577 set_optab_libfunc (udiv_optab, SImode, "__aeabi_uidiv");
2579 /* We don't have mod libcalls. Fortunately gcc knows how to use the
2580 divmod libcalls instead. */
2581 set_optab_libfunc (smod_optab, DImode, NULL);
2582 set_optab_libfunc (umod_optab, DImode, NULL);
2583 set_optab_libfunc (smod_optab, SImode, NULL);
2584 set_optab_libfunc (umod_optab, SImode, NULL);
2586 /* Half-precision float operations. The compiler handles all operations
2587 with NULL libfuncs by converting the SFmode. */
2588 switch (arm_fp16_format)
2590 case ARM_FP16_FORMAT_IEEE:
2591 case ARM_FP16_FORMAT_ALTERNATIVE:
2593 /* Conversions. */
2594 set_conv_libfunc (trunc_optab, HFmode, SFmode,
2595 (arm_fp16_format == ARM_FP16_FORMAT_IEEE
2596 ? "__gnu_f2h_ieee"
2597 : "__gnu_f2h_alternative"));
2598 set_conv_libfunc (sext_optab, SFmode, HFmode,
2599 (arm_fp16_format == ARM_FP16_FORMAT_IEEE
2600 ? "__gnu_h2f_ieee"
2601 : "__gnu_h2f_alternative"));
2603 set_conv_libfunc (trunc_optab, HFmode, DFmode,
2604 (arm_fp16_format == ARM_FP16_FORMAT_IEEE
2605 ? "__gnu_d2h_ieee"
2606 : "__gnu_d2h_alternative"));
2608 /* Arithmetic. */
2609 set_optab_libfunc (add_optab, HFmode, NULL);
2610 set_optab_libfunc (sdiv_optab, HFmode, NULL);
2611 set_optab_libfunc (smul_optab, HFmode, NULL);
2612 set_optab_libfunc (neg_optab, HFmode, NULL);
2613 set_optab_libfunc (sub_optab, HFmode, NULL);
2615 /* Comparisons. */
2616 set_optab_libfunc (eq_optab, HFmode, NULL);
2617 set_optab_libfunc (ne_optab, HFmode, NULL);
2618 set_optab_libfunc (lt_optab, HFmode, NULL);
2619 set_optab_libfunc (le_optab, HFmode, NULL);
2620 set_optab_libfunc (ge_optab, HFmode, NULL);
2621 set_optab_libfunc (gt_optab, HFmode, NULL);
2622 set_optab_libfunc (unord_optab, HFmode, NULL);
2623 break;
2625 default:
2626 break;
2629 /* Use names prefixed with __gnu_ for fixed-point helper functions. */
2631 const arm_fixed_mode_set fixed_arith_modes[] =
2633 { E_QQmode, "qq" },
2634 { E_UQQmode, "uqq" },
2635 { E_HQmode, "hq" },
2636 { E_UHQmode, "uhq" },
2637 { E_SQmode, "sq" },
2638 { E_USQmode, "usq" },
2639 { E_DQmode, "dq" },
2640 { E_UDQmode, "udq" },
2641 { E_TQmode, "tq" },
2642 { E_UTQmode, "utq" },
2643 { E_HAmode, "ha" },
2644 { E_UHAmode, "uha" },
2645 { E_SAmode, "sa" },
2646 { E_USAmode, "usa" },
2647 { E_DAmode, "da" },
2648 { E_UDAmode, "uda" },
2649 { E_TAmode, "ta" },
2650 { E_UTAmode, "uta" }
2652 const arm_fixed_mode_set fixed_conv_modes[] =
2654 { E_QQmode, "qq" },
2655 { E_UQQmode, "uqq" },
2656 { E_HQmode, "hq" },
2657 { E_UHQmode, "uhq" },
2658 { E_SQmode, "sq" },
2659 { E_USQmode, "usq" },
2660 { E_DQmode, "dq" },
2661 { E_UDQmode, "udq" },
2662 { E_TQmode, "tq" },
2663 { E_UTQmode, "utq" },
2664 { E_HAmode, "ha" },
2665 { E_UHAmode, "uha" },
2666 { E_SAmode, "sa" },
2667 { E_USAmode, "usa" },
2668 { E_DAmode, "da" },
2669 { E_UDAmode, "uda" },
2670 { E_TAmode, "ta" },
2671 { E_UTAmode, "uta" },
2672 { E_QImode, "qi" },
2673 { E_HImode, "hi" },
2674 { E_SImode, "si" },
2675 { E_DImode, "di" },
2676 { E_TImode, "ti" },
2677 { E_SFmode, "sf" },
2678 { E_DFmode, "df" }
2680 unsigned int i, j;
2682 for (i = 0; i < ARRAY_SIZE (fixed_arith_modes); i++)
2684 arm_set_fixed_optab_libfunc (add_optab, fixed_arith_modes[i].mode,
2685 "add", fixed_arith_modes[i].name, 3);
2686 arm_set_fixed_optab_libfunc (ssadd_optab, fixed_arith_modes[i].mode,
2687 "ssadd", fixed_arith_modes[i].name, 3);
2688 arm_set_fixed_optab_libfunc (usadd_optab, fixed_arith_modes[i].mode,
2689 "usadd", fixed_arith_modes[i].name, 3);
2690 arm_set_fixed_optab_libfunc (sub_optab, fixed_arith_modes[i].mode,
2691 "sub", fixed_arith_modes[i].name, 3);
2692 arm_set_fixed_optab_libfunc (sssub_optab, fixed_arith_modes[i].mode,
2693 "sssub", fixed_arith_modes[i].name, 3);
2694 arm_set_fixed_optab_libfunc (ussub_optab, fixed_arith_modes[i].mode,
2695 "ussub", fixed_arith_modes[i].name, 3);
2696 arm_set_fixed_optab_libfunc (smul_optab, fixed_arith_modes[i].mode,
2697 "mul", fixed_arith_modes[i].name, 3);
2698 arm_set_fixed_optab_libfunc (ssmul_optab, fixed_arith_modes[i].mode,
2699 "ssmul", fixed_arith_modes[i].name, 3);
2700 arm_set_fixed_optab_libfunc (usmul_optab, fixed_arith_modes[i].mode,
2701 "usmul", fixed_arith_modes[i].name, 3);
2702 arm_set_fixed_optab_libfunc (sdiv_optab, fixed_arith_modes[i].mode,
2703 "div", fixed_arith_modes[i].name, 3);
2704 arm_set_fixed_optab_libfunc (udiv_optab, fixed_arith_modes[i].mode,
2705 "udiv", fixed_arith_modes[i].name, 3);
2706 arm_set_fixed_optab_libfunc (ssdiv_optab, fixed_arith_modes[i].mode,
2707 "ssdiv", fixed_arith_modes[i].name, 3);
2708 arm_set_fixed_optab_libfunc (usdiv_optab, fixed_arith_modes[i].mode,
2709 "usdiv", fixed_arith_modes[i].name, 3);
2710 arm_set_fixed_optab_libfunc (neg_optab, fixed_arith_modes[i].mode,
2711 "neg", fixed_arith_modes[i].name, 2);
2712 arm_set_fixed_optab_libfunc (ssneg_optab, fixed_arith_modes[i].mode,
2713 "ssneg", fixed_arith_modes[i].name, 2);
2714 arm_set_fixed_optab_libfunc (usneg_optab, fixed_arith_modes[i].mode,
2715 "usneg", fixed_arith_modes[i].name, 2);
2716 arm_set_fixed_optab_libfunc (ashl_optab, fixed_arith_modes[i].mode,
2717 "ashl", fixed_arith_modes[i].name, 3);
2718 arm_set_fixed_optab_libfunc (ashr_optab, fixed_arith_modes[i].mode,
2719 "ashr", fixed_arith_modes[i].name, 3);
2720 arm_set_fixed_optab_libfunc (lshr_optab, fixed_arith_modes[i].mode,
2721 "lshr", fixed_arith_modes[i].name, 3);
2722 arm_set_fixed_optab_libfunc (ssashl_optab, fixed_arith_modes[i].mode,
2723 "ssashl", fixed_arith_modes[i].name, 3);
2724 arm_set_fixed_optab_libfunc (usashl_optab, fixed_arith_modes[i].mode,
2725 "usashl", fixed_arith_modes[i].name, 3);
2726 arm_set_fixed_optab_libfunc (cmp_optab, fixed_arith_modes[i].mode,
2727 "cmp", fixed_arith_modes[i].name, 2);
2730 for (i = 0; i < ARRAY_SIZE (fixed_conv_modes); i++)
2731 for (j = 0; j < ARRAY_SIZE (fixed_conv_modes); j++)
2733 if (i == j
2734 || (!ALL_FIXED_POINT_MODE_P (fixed_conv_modes[i].mode)
2735 && !ALL_FIXED_POINT_MODE_P (fixed_conv_modes[j].mode)))
2736 continue;
2738 arm_set_fixed_conv_libfunc (fract_optab, fixed_conv_modes[i].mode,
2739 fixed_conv_modes[j].mode, "fract",
2740 fixed_conv_modes[i].name,
2741 fixed_conv_modes[j].name);
2742 arm_set_fixed_conv_libfunc (satfract_optab,
2743 fixed_conv_modes[i].mode,
2744 fixed_conv_modes[j].mode, "satfract",
2745 fixed_conv_modes[i].name,
2746 fixed_conv_modes[j].name);
2747 arm_set_fixed_conv_libfunc (fractuns_optab,
2748 fixed_conv_modes[i].mode,
2749 fixed_conv_modes[j].mode, "fractuns",
2750 fixed_conv_modes[i].name,
2751 fixed_conv_modes[j].name);
2752 arm_set_fixed_conv_libfunc (satfractuns_optab,
2753 fixed_conv_modes[i].mode,
2754 fixed_conv_modes[j].mode, "satfractuns",
2755 fixed_conv_modes[i].name,
2756 fixed_conv_modes[j].name);
2760 if (TARGET_AAPCS_BASED)
2761 synchronize_libfunc = init_one_libfunc ("__sync_synchronize");
2764 /* On AAPCS systems, this is the "struct __va_list". */
2765 static GTY(()) tree va_list_type;
2767 /* Return the type to use as __builtin_va_list. */
2768 static tree
2769 arm_build_builtin_va_list (void)
2771 tree va_list_name;
2772 tree ap_field;
2774 if (!TARGET_AAPCS_BASED)
2775 return std_build_builtin_va_list ();
2777 /* AAPCS \S 7.1.4 requires that va_list be a typedef for a type
2778 defined as:
2780 struct __va_list
2782 void *__ap;
2785 The C Library ABI further reinforces this definition in \S
2786 4.1.
2788 We must follow this definition exactly. The structure tag
2789 name is visible in C++ mangled names, and thus forms a part
2790 of the ABI. The field name may be used by people who
2791 #include <stdarg.h>. */
2792 /* Create the type. */
2793 va_list_type = lang_hooks.types.make_type (RECORD_TYPE);
2794 /* Give it the required name. */
2795 va_list_name = build_decl (BUILTINS_LOCATION,
2796 TYPE_DECL,
2797 get_identifier ("__va_list"),
2798 va_list_type);
2799 DECL_ARTIFICIAL (va_list_name) = 1;
2800 TYPE_NAME (va_list_type) = va_list_name;
2801 TYPE_STUB_DECL (va_list_type) = va_list_name;
2802 /* Create the __ap field. */
2803 ap_field = build_decl (BUILTINS_LOCATION,
2804 FIELD_DECL,
2805 get_identifier ("__ap"),
2806 ptr_type_node);
2807 DECL_ARTIFICIAL (ap_field) = 1;
2808 DECL_FIELD_CONTEXT (ap_field) = va_list_type;
2809 TYPE_FIELDS (va_list_type) = ap_field;
2810 /* Compute its layout. */
2811 layout_type (va_list_type);
2813 return va_list_type;
2816 /* Return an expression of type "void *" pointing to the next
2817 available argument in a variable-argument list. VALIST is the
2818 user-level va_list object, of type __builtin_va_list. */
2819 static tree
2820 arm_extract_valist_ptr (tree valist)
2822 if (TREE_TYPE (valist) == error_mark_node)
2823 return error_mark_node;
2825 /* On an AAPCS target, the pointer is stored within "struct
2826 va_list". */
2827 if (TARGET_AAPCS_BASED)
2829 tree ap_field = TYPE_FIELDS (TREE_TYPE (valist));
2830 valist = build3 (COMPONENT_REF, TREE_TYPE (ap_field),
2831 valist, ap_field, NULL_TREE);
2834 return valist;
2837 /* Implement TARGET_EXPAND_BUILTIN_VA_START. */
2838 static void
2839 arm_expand_builtin_va_start (tree valist, rtx nextarg)
2841 valist = arm_extract_valist_ptr (valist);
2842 std_expand_builtin_va_start (valist, nextarg);
2845 /* Implement TARGET_GIMPLIFY_VA_ARG_EXPR. */
2846 static tree
2847 arm_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
2848 gimple_seq *post_p)
2850 valist = arm_extract_valist_ptr (valist);
2851 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
2854 /* Check any incompatible options that the user has specified. */
2855 static void
2856 arm_option_check_internal (struct gcc_options *opts)
2858 int flags = opts->x_target_flags;
2860 /* iWMMXt and NEON are incompatible. */
2861 if (TARGET_IWMMXT
2862 && bitmap_bit_p (arm_active_target.isa, isa_bit_neon))
2863 error ("iWMMXt and NEON are incompatible");
2865 /* Make sure that the processor choice does not conflict with any of the
2866 other command line choices. */
2867 if (TARGET_ARM_P (flags)
2868 && !bitmap_bit_p (arm_active_target.isa, isa_bit_notm))
2869 error ("target CPU does not support ARM mode");
2871 /* TARGET_BACKTRACE cannot be used here as crtl->is_leaf is not set yet. */
2872 if ((TARGET_TPCS_FRAME || TARGET_TPCS_LEAF_FRAME) && TARGET_ARM_P (flags))
2873 warning (0, "enabling backtrace support is only meaningful when compiling for the Thumb");
2875 if (TARGET_ARM_P (flags) && TARGET_CALLEE_INTERWORKING)
2876 warning (0, "enabling callee interworking support is only meaningful when compiling for the Thumb");
2878 /* If this target is normally configured to use APCS frames, warn if they
2879 are turned off and debugging is turned on. */
2880 if (TARGET_ARM_P (flags)
2881 && write_symbols != NO_DEBUG
2882 && !TARGET_APCS_FRAME
2883 && (TARGET_DEFAULT & MASK_APCS_FRAME))
2884 warning (0, "-g with -mno-apcs-frame may not give sensible debugging");
2886 /* iWMMXt unsupported under Thumb mode. */
2887 if (TARGET_THUMB_P (flags) && TARGET_IWMMXT)
2888 error ("iWMMXt unsupported under Thumb mode");
2890 if (TARGET_HARD_TP && TARGET_THUMB1_P (flags))
2891 error ("can not use -mtp=cp15 with 16-bit Thumb");
2893 if (TARGET_THUMB_P (flags) && TARGET_VXWORKS_RTP && flag_pic)
2895 error ("RTP PIC is incompatible with Thumb");
2896 flag_pic = 0;
2899 /* We only support -mpure-code and -mslow-flash-data on M-profile targets
2900 with MOVT. */
2901 if ((target_pure_code || target_slow_flash_data)
2902 && (!TARGET_HAVE_MOVT || arm_arch_notm || flag_pic || TARGET_NEON))
2904 const char *flag = (target_pure_code ? "-mpure-code" :
2905 "-mslow-flash-data");
2906 error ("%s only supports non-pic code on M-profile targets with the "
2907 "MOVT instruction", flag);
2912 /* Recompute the global settings depending on target attribute options. */
2914 static void
2915 arm_option_params_internal (void)
2917 /* If we are not using the default (ARM mode) section anchor offset
2918 ranges, then set the correct ranges now. */
2919 if (TARGET_THUMB1)
2921 /* Thumb-1 LDR instructions cannot have negative offsets.
2922 Permissible positive offset ranges are 5-bit (for byte loads),
2923 6-bit (for halfword loads), or 7-bit (for word loads).
2924 Empirical results suggest a 7-bit anchor range gives the best
2925 overall code size. */
2926 targetm.min_anchor_offset = 0;
2927 targetm.max_anchor_offset = 127;
2929 else if (TARGET_THUMB2)
2931 /* The minimum is set such that the total size of the block
2932 for a particular anchor is 248 + 1 + 4095 bytes, which is
2933 divisible by eight, ensuring natural spacing of anchors. */
2934 targetm.min_anchor_offset = -248;
2935 targetm.max_anchor_offset = 4095;
2937 else
2939 targetm.min_anchor_offset = TARGET_MIN_ANCHOR_OFFSET;
2940 targetm.max_anchor_offset = TARGET_MAX_ANCHOR_OFFSET;
2943 /* Increase the number of conditional instructions with -Os. */
2944 max_insns_skipped = optimize_size ? 4 : current_tune->max_insns_skipped;
2946 /* For THUMB2, we limit the conditional sequence to one IT block. */
2947 if (TARGET_THUMB2)
2948 max_insns_skipped = MIN (max_insns_skipped, MAX_INSN_PER_IT_BLOCK);
2951 /* True if -mflip-thumb should next add an attribute for the default
2952 mode, false if it should next add an attribute for the opposite mode. */
2953 static GTY(()) bool thumb_flipper;
2955 /* Options after initial target override. */
2956 static GTY(()) tree init_optimize;
2958 static void
2959 arm_override_options_after_change_1 (struct gcc_options *opts)
2961 if (opts->x_align_functions <= 0)
2962 opts->x_align_functions = TARGET_THUMB_P (opts->x_target_flags)
2963 && opts->x_optimize_size ? 2 : 4;
2966 /* Implement targetm.override_options_after_change. */
2968 static void
2969 arm_override_options_after_change (void)
2971 arm_configure_build_target (&arm_active_target,
2972 TREE_TARGET_OPTION (target_option_default_node),
2973 &global_options_set, false);
2975 arm_override_options_after_change_1 (&global_options);
2978 /* Implement TARGET_OPTION_SAVE. */
2979 static void
2980 arm_option_save (struct cl_target_option *ptr, struct gcc_options *opts)
2982 ptr->x_arm_arch_string = opts->x_arm_arch_string;
2983 ptr->x_arm_cpu_string = opts->x_arm_cpu_string;
2984 ptr->x_arm_tune_string = opts->x_arm_tune_string;
2987 /* Implement TARGET_OPTION_RESTORE. */
2988 static void
2989 arm_option_restore (struct gcc_options *opts, struct cl_target_option *ptr)
2991 opts->x_arm_arch_string = ptr->x_arm_arch_string;
2992 opts->x_arm_cpu_string = ptr->x_arm_cpu_string;
2993 opts->x_arm_tune_string = ptr->x_arm_tune_string;
2994 arm_configure_build_target (&arm_active_target, ptr, &global_options_set,
2995 false);
2998 /* Reset options between modes that the user has specified. */
2999 static void
3000 arm_option_override_internal (struct gcc_options *opts,
3001 struct gcc_options *opts_set)
3003 arm_override_options_after_change_1 (opts);
3005 if (TARGET_INTERWORK && !bitmap_bit_p (arm_active_target.isa, isa_bit_thumb))
3007 /* The default is to enable interworking, so this warning message would
3008 be confusing to users who have just compiled with, eg, -march=armv3. */
3009 /* warning (0, "ignoring -minterwork because target CPU does not support THUMB"); */
3010 opts->x_target_flags &= ~MASK_INTERWORK;
3013 if (TARGET_THUMB_P (opts->x_target_flags)
3014 && !bitmap_bit_p (arm_active_target.isa, isa_bit_thumb))
3016 warning (0, "target CPU does not support THUMB instructions");
3017 opts->x_target_flags &= ~MASK_THUMB;
3020 if (TARGET_APCS_FRAME && TARGET_THUMB_P (opts->x_target_flags))
3022 /* warning (0, "ignoring -mapcs-frame because -mthumb was used"); */
3023 opts->x_target_flags &= ~MASK_APCS_FRAME;
3026 /* Callee super interworking implies thumb interworking. Adding
3027 this to the flags here simplifies the logic elsewhere. */
3028 if (TARGET_THUMB_P (opts->x_target_flags) && TARGET_CALLEE_INTERWORKING)
3029 opts->x_target_flags |= MASK_INTERWORK;
3031 /* need to remember initial values so combinaisons of options like
3032 -mflip-thumb -mthumb -fno-schedule-insns work for any attribute. */
3033 cl_optimization *to = TREE_OPTIMIZATION (init_optimize);
3035 if (! opts_set->x_arm_restrict_it)
3036 opts->x_arm_restrict_it = arm_arch8;
3038 /* ARM execution state and M profile don't have [restrict] IT. */
3039 if (!TARGET_THUMB2_P (opts->x_target_flags) || !arm_arch_notm)
3040 opts->x_arm_restrict_it = 0;
3042 /* Enable -munaligned-access by default for
3043 - all ARMv6 architecture-based processors when compiling for a 32-bit ISA
3044 i.e. Thumb2 and ARM state only.
3045 - ARMv7-A, ARMv7-R, and ARMv7-M architecture-based processors.
3046 - ARMv8 architecture-base processors.
3048 Disable -munaligned-access by default for
3049 - all pre-ARMv6 architecture-based processors
3050 - ARMv6-M architecture-based processors
3051 - ARMv8-M Baseline processors. */
3053 if (! opts_set->x_unaligned_access)
3055 opts->x_unaligned_access = (TARGET_32BIT_P (opts->x_target_flags)
3056 && arm_arch6 && (arm_arch_notm || arm_arch7));
3058 else if (opts->x_unaligned_access == 1
3059 && !(arm_arch6 && (arm_arch_notm || arm_arch7)))
3061 warning (0, "target CPU does not support unaligned accesses");
3062 opts->x_unaligned_access = 0;
3065 /* Don't warn since it's on by default in -O2. */
3066 if (TARGET_THUMB1_P (opts->x_target_flags))
3067 opts->x_flag_schedule_insns = 0;
3068 else
3069 opts->x_flag_schedule_insns = to->x_flag_schedule_insns;
3071 /* Disable shrink-wrap when optimizing function for size, since it tends to
3072 generate additional returns. */
3073 if (optimize_function_for_size_p (cfun)
3074 && TARGET_THUMB2_P (opts->x_target_flags))
3075 opts->x_flag_shrink_wrap = false;
3076 else
3077 opts->x_flag_shrink_wrap = to->x_flag_shrink_wrap;
3079 /* In Thumb1 mode, we emit the epilogue in RTL, but the last insn
3080 - epilogue_insns - does not accurately model the corresponding insns
3081 emitted in the asm file. In particular, see the comment in thumb_exit
3082 'Find out how many of the (return) argument registers we can corrupt'.
3083 As a consequence, the epilogue may clobber registers without fipa-ra
3084 finding out about it. Therefore, disable fipa-ra in Thumb1 mode.
3085 TODO: Accurately model clobbers for epilogue_insns and reenable
3086 fipa-ra. */
3087 if (TARGET_THUMB1_P (opts->x_target_flags))
3088 opts->x_flag_ipa_ra = 0;
3089 else
3090 opts->x_flag_ipa_ra = to->x_flag_ipa_ra;
3092 /* Thumb2 inline assembly code should always use unified syntax.
3093 This will apply to ARM and Thumb1 eventually. */
3094 opts->x_inline_asm_unified = TARGET_THUMB2_P (opts->x_target_flags);
3096 #ifdef SUBTARGET_OVERRIDE_INTERNAL_OPTIONS
3097 SUBTARGET_OVERRIDE_INTERNAL_OPTIONS;
3098 #endif
3101 static sbitmap isa_all_fpubits;
3102 static sbitmap isa_quirkbits;
3104 /* Configure a build target TARGET from the user-specified options OPTS and
3105 OPTS_SET. If WARN_COMPATIBLE, emit a diagnostic if both the CPU and
3106 architecture have been specified, but the two are not identical. */
3107 void
3108 arm_configure_build_target (struct arm_build_target *target,
3109 struct cl_target_option *opts,
3110 struct gcc_options *opts_set,
3111 bool warn_compatible)
3113 const cpu_option *arm_selected_tune = NULL;
3114 const arch_option *arm_selected_arch = NULL;
3115 const cpu_option *arm_selected_cpu = NULL;
3116 const arm_fpu_desc *arm_selected_fpu = NULL;
3117 const char *tune_opts = NULL;
3118 const char *arch_opts = NULL;
3119 const char *cpu_opts = NULL;
3121 bitmap_clear (target->isa);
3122 target->core_name = NULL;
3123 target->arch_name = NULL;
3125 if (opts_set->x_arm_arch_string)
3127 arm_selected_arch = arm_parse_arch_option_name (all_architectures,
3128 "-march",
3129 opts->x_arm_arch_string);
3130 arch_opts = strchr (opts->x_arm_arch_string, '+');
3133 if (opts_set->x_arm_cpu_string)
3135 arm_selected_cpu = arm_parse_cpu_option_name (all_cores, "-mcpu",
3136 opts->x_arm_cpu_string);
3137 cpu_opts = strchr (opts->x_arm_cpu_string, '+');
3138 arm_selected_tune = arm_selected_cpu;
3139 /* If taking the tuning from -mcpu, we don't need to rescan the
3140 options for tuning. */
3143 if (opts_set->x_arm_tune_string)
3145 arm_selected_tune = arm_parse_cpu_option_name (all_cores, "-mtune",
3146 opts->x_arm_tune_string);
3147 tune_opts = strchr (opts->x_arm_tune_string, '+');
3150 if (arm_selected_arch)
3152 arm_initialize_isa (target->isa, arm_selected_arch->common.isa_bits);
3153 arm_parse_option_features (target->isa, &arm_selected_arch->common,
3154 arch_opts);
3156 if (arm_selected_cpu)
3158 auto_sbitmap cpu_isa (isa_num_bits);
3159 auto_sbitmap isa_delta (isa_num_bits);
3161 arm_initialize_isa (cpu_isa, arm_selected_cpu->common.isa_bits);
3162 arm_parse_option_features (cpu_isa, &arm_selected_cpu->common,
3163 cpu_opts);
3164 bitmap_xor (isa_delta, cpu_isa, target->isa);
3165 /* Ignore any bits that are quirk bits. */
3166 bitmap_and_compl (isa_delta, isa_delta, isa_quirkbits);
3167 /* Ignore (for now) any bits that might be set by -mfpu. */
3168 bitmap_and_compl (isa_delta, isa_delta, isa_all_fpubits);
3170 if (!bitmap_empty_p (isa_delta))
3172 if (warn_compatible)
3173 warning (0, "switch -mcpu=%s conflicts with -march=%s switch",
3174 arm_selected_cpu->common.name,
3175 arm_selected_arch->common.name);
3176 /* -march wins for code generation.
3177 -mcpu wins for default tuning. */
3178 if (!arm_selected_tune)
3179 arm_selected_tune = arm_selected_cpu;
3181 arm_selected_cpu = all_cores + arm_selected_arch->tune_id;
3182 target->arch_name = arm_selected_arch->common.name;
3184 else
3186 /* Architecture and CPU are essentially the same.
3187 Prefer the CPU setting. */
3188 arm_selected_arch = all_architectures + arm_selected_cpu->arch;
3189 target->core_name = arm_selected_cpu->common.name;
3190 /* Copy the CPU's capabilities, so that we inherit the
3191 appropriate extensions and quirks. */
3192 bitmap_copy (target->isa, cpu_isa);
3195 else
3197 /* Pick a CPU based on the architecture. */
3198 arm_selected_cpu = all_cores + arm_selected_arch->tune_id;
3199 target->arch_name = arm_selected_arch->common.name;
3200 /* Note: target->core_name is left unset in this path. */
3203 else if (arm_selected_cpu)
3205 target->core_name = arm_selected_cpu->common.name;
3206 arm_initialize_isa (target->isa, arm_selected_cpu->common.isa_bits);
3207 arm_parse_option_features (target->isa, &arm_selected_cpu->common,
3208 cpu_opts);
3209 arm_selected_arch = all_architectures + arm_selected_cpu->arch;
3211 /* If the user did not specify a processor or architecture, choose
3212 one for them. */
3213 else
3215 const cpu_option *sel;
3216 auto_sbitmap sought_isa (isa_num_bits);
3217 bitmap_clear (sought_isa);
3218 auto_sbitmap default_isa (isa_num_bits);
3220 arm_selected_cpu = arm_parse_cpu_option_name (all_cores, "default CPU",
3221 TARGET_CPU_DEFAULT);
3222 cpu_opts = strchr (TARGET_CPU_DEFAULT, '+');
3223 gcc_assert (arm_selected_cpu->common.name);
3225 /* RWE: All of the selection logic below (to the end of this
3226 'if' clause) looks somewhat suspect. It appears to be mostly
3227 there to support forcing thumb support when the default CPU
3228 does not have thumb (somewhat dubious in terms of what the
3229 user might be expecting). I think it should be removed once
3230 support for the pre-thumb era cores is removed. */
3231 sel = arm_selected_cpu;
3232 arm_initialize_isa (default_isa, sel->common.isa_bits);
3233 arm_parse_option_features (default_isa, &arm_selected_cpu->common,
3234 cpu_opts);
3236 /* Now check to see if the user has specified any command line
3237 switches that require certain abilities from the cpu. */
3239 if (TARGET_INTERWORK || TARGET_THUMB)
3241 bitmap_set_bit (sought_isa, isa_bit_thumb);
3242 bitmap_set_bit (sought_isa, isa_bit_mode32);
3244 /* There are no ARM processors that support both APCS-26 and
3245 interworking. Therefore we forcibly remove MODE26 from
3246 from the isa features here (if it was set), so that the
3247 search below will always be able to find a compatible
3248 processor. */
3249 bitmap_clear_bit (default_isa, isa_bit_mode26);
3252 /* If there are such requirements and the default CPU does not
3253 satisfy them, we need to run over the complete list of
3254 cores looking for one that is satisfactory. */
3255 if (!bitmap_empty_p (sought_isa)
3256 && !bitmap_subset_p (sought_isa, default_isa))
3258 auto_sbitmap candidate_isa (isa_num_bits);
3259 /* We're only interested in a CPU with at least the
3260 capabilities of the default CPU and the required
3261 additional features. */
3262 bitmap_ior (default_isa, default_isa, sought_isa);
3264 /* Try to locate a CPU type that supports all of the abilities
3265 of the default CPU, plus the extra abilities requested by
3266 the user. */
3267 for (sel = all_cores; sel->common.name != NULL; sel++)
3269 arm_initialize_isa (candidate_isa, sel->common.isa_bits);
3270 /* An exact match? */
3271 if (bitmap_equal_p (default_isa, candidate_isa))
3272 break;
3275 if (sel->common.name == NULL)
3277 unsigned current_bit_count = isa_num_bits;
3278 const cpu_option *best_fit = NULL;
3280 /* Ideally we would like to issue an error message here
3281 saying that it was not possible to find a CPU compatible
3282 with the default CPU, but which also supports the command
3283 line options specified by the programmer, and so they
3284 ought to use the -mcpu=<name> command line option to
3285 override the default CPU type.
3287 If we cannot find a CPU that has exactly the
3288 characteristics of the default CPU and the given
3289 command line options we scan the array again looking
3290 for a best match. The best match must have at least
3291 the capabilities of the perfect match. */
3292 for (sel = all_cores; sel->common.name != NULL; sel++)
3294 arm_initialize_isa (candidate_isa, sel->common.isa_bits);
3296 if (bitmap_subset_p (default_isa, candidate_isa))
3298 unsigned count;
3300 bitmap_and_compl (candidate_isa, candidate_isa,
3301 default_isa);
3302 count = bitmap_popcount (candidate_isa);
3304 if (count < current_bit_count)
3306 best_fit = sel;
3307 current_bit_count = count;
3311 gcc_assert (best_fit);
3312 sel = best_fit;
3315 arm_selected_cpu = sel;
3318 /* Now we know the CPU, we can finally initialize the target
3319 structure. */
3320 target->core_name = arm_selected_cpu->common.name;
3321 arm_initialize_isa (target->isa, arm_selected_cpu->common.isa_bits);
3322 arm_parse_option_features (target->isa, &arm_selected_cpu->common,
3323 cpu_opts);
3324 arm_selected_arch = all_architectures + arm_selected_cpu->arch;
3327 gcc_assert (arm_selected_cpu);
3328 gcc_assert (arm_selected_arch);
3330 if (opts->x_arm_fpu_index != TARGET_FPU_auto)
3332 arm_selected_fpu = &all_fpus[opts->x_arm_fpu_index];
3333 auto_sbitmap fpu_bits (isa_num_bits);
3335 arm_initialize_isa (fpu_bits, arm_selected_fpu->isa_bits);
3336 bitmap_and_compl (target->isa, target->isa, isa_all_fpubits);
3337 bitmap_ior (target->isa, target->isa, fpu_bits);
3340 if (!arm_selected_tune)
3341 arm_selected_tune = arm_selected_cpu;
3342 else /* Validate the features passed to -mtune. */
3343 arm_parse_option_features (NULL, &arm_selected_tune->common, tune_opts);
3345 const cpu_tune *tune_data = &all_tunes[arm_selected_tune - all_cores];
3347 /* Finish initializing the target structure. */
3348 target->arch_pp_name = arm_selected_arch->arch;
3349 target->base_arch = arm_selected_arch->base_arch;
3350 target->profile = arm_selected_arch->profile;
3352 target->tune_flags = tune_data->tune_flags;
3353 target->tune = tune_data->tune;
3354 target->tune_core = tune_data->scheduler;
3355 arm_option_reconfigure_globals ();
3358 /* Fix up any incompatible options that the user has specified. */
3359 static void
3360 arm_option_override (void)
3362 static const enum isa_feature fpu_bitlist[]
3363 = { ISA_ALL_FPU_INTERNAL, isa_nobit };
3364 static const enum isa_feature quirk_bitlist[] = { ISA_ALL_QUIRKS, isa_nobit};
3365 cl_target_option opts;
3367 isa_quirkbits = sbitmap_alloc (isa_num_bits);
3368 arm_initialize_isa (isa_quirkbits, quirk_bitlist);
3370 isa_all_fpubits = sbitmap_alloc (isa_num_bits);
3371 arm_initialize_isa (isa_all_fpubits, fpu_bitlist);
3373 arm_active_target.isa = sbitmap_alloc (isa_num_bits);
3375 if (!global_options_set.x_arm_fpu_index)
3377 bool ok;
3378 int fpu_index;
3380 ok = opt_enum_arg_to_value (OPT_mfpu_, FPUTYPE_AUTO, &fpu_index,
3381 CL_TARGET);
3382 gcc_assert (ok);
3383 arm_fpu_index = (enum fpu_type) fpu_index;
3386 cl_target_option_save (&opts, &global_options);
3387 arm_configure_build_target (&arm_active_target, &opts, &global_options_set,
3388 true);
3390 #ifdef SUBTARGET_OVERRIDE_OPTIONS
3391 SUBTARGET_OVERRIDE_OPTIONS;
3392 #endif
3394 /* Initialize boolean versions of the architectural flags, for use
3395 in the arm.md file and for enabling feature flags. */
3396 arm_option_reconfigure_globals ();
3398 arm_tune = arm_active_target.tune_core;
3399 tune_flags = arm_active_target.tune_flags;
3400 current_tune = arm_active_target.tune;
3402 /* TBD: Dwarf info for apcs frame is not handled yet. */
3403 if (TARGET_APCS_FRAME)
3404 flag_shrink_wrap = false;
3406 if (TARGET_APCS_STACK && !TARGET_APCS_FRAME)
3408 warning (0, "-mapcs-stack-check incompatible with -mno-apcs-frame");
3409 target_flags |= MASK_APCS_FRAME;
3412 if (TARGET_POKE_FUNCTION_NAME)
3413 target_flags |= MASK_APCS_FRAME;
3415 if (TARGET_APCS_REENT && flag_pic)
3416 error ("-fpic and -mapcs-reent are incompatible");
3418 if (TARGET_APCS_REENT)
3419 warning (0, "APCS reentrant code not supported. Ignored");
3421 /* Set up some tuning parameters. */
3422 arm_ld_sched = (tune_flags & TF_LDSCHED) != 0;
3423 arm_tune_strongarm = (tune_flags & TF_STRONG) != 0;
3424 arm_tune_wbuf = (tune_flags & TF_WBUF) != 0;
3425 arm_tune_xscale = (tune_flags & TF_XSCALE) != 0;
3426 arm_tune_cortex_a9 = (arm_tune == TARGET_CPU_cortexa9) != 0;
3427 arm_m_profile_small_mul = (tune_flags & TF_SMALLMUL) != 0;
3429 /* For arm2/3 there is no need to do any scheduling if we are doing
3430 software floating-point. */
3431 if (TARGET_SOFT_FLOAT && (tune_flags & TF_NO_MODE32))
3432 flag_schedule_insns = flag_schedule_insns_after_reload = 0;
3434 /* Override the default structure alignment for AAPCS ABI. */
3435 if (!global_options_set.x_arm_structure_size_boundary)
3437 if (TARGET_AAPCS_BASED)
3438 arm_structure_size_boundary = 8;
3440 else
3442 warning (0, "option %<-mstructure-size-boundary%> is deprecated");
3444 if (arm_structure_size_boundary != 8
3445 && arm_structure_size_boundary != 32
3446 && !(ARM_DOUBLEWORD_ALIGN && arm_structure_size_boundary == 64))
3448 if (ARM_DOUBLEWORD_ALIGN)
3449 warning (0,
3450 "structure size boundary can only be set to 8, 32 or 64");
3451 else
3452 warning (0, "structure size boundary can only be set to 8 or 32");
3453 arm_structure_size_boundary
3454 = (TARGET_AAPCS_BASED ? 8 : DEFAULT_STRUCTURE_SIZE_BOUNDARY);
3458 if (TARGET_VXWORKS_RTP)
3460 if (!global_options_set.x_arm_pic_data_is_text_relative)
3461 arm_pic_data_is_text_relative = 0;
3463 else if (flag_pic
3464 && !arm_pic_data_is_text_relative
3465 && !(global_options_set.x_target_flags & MASK_SINGLE_PIC_BASE))
3466 /* When text & data segments don't have a fixed displacement, the
3467 intended use is with a single, read only, pic base register.
3468 Unless the user explicitly requested not to do that, set
3469 it. */
3470 target_flags |= MASK_SINGLE_PIC_BASE;
3472 /* If stack checking is disabled, we can use r10 as the PIC register,
3473 which keeps r9 available. The EABI specifies r9 as the PIC register. */
3474 if (flag_pic && TARGET_SINGLE_PIC_BASE)
3476 if (TARGET_VXWORKS_RTP)
3477 warning (0, "RTP PIC is incompatible with -msingle-pic-base");
3478 arm_pic_register = (TARGET_APCS_STACK || TARGET_AAPCS_BASED) ? 9 : 10;
3481 if (flag_pic && TARGET_VXWORKS_RTP)
3482 arm_pic_register = 9;
3484 if (arm_pic_register_string != NULL)
3486 int pic_register = decode_reg_name (arm_pic_register_string);
3488 if (!flag_pic)
3489 warning (0, "-mpic-register= is useless without -fpic");
3491 /* Prevent the user from choosing an obviously stupid PIC register. */
3492 else if (pic_register < 0 || call_used_regs[pic_register]
3493 || pic_register == HARD_FRAME_POINTER_REGNUM
3494 || pic_register == STACK_POINTER_REGNUM
3495 || pic_register >= PC_REGNUM
3496 || (TARGET_VXWORKS_RTP
3497 && (unsigned int) pic_register != arm_pic_register))
3498 error ("unable to use '%s' for PIC register", arm_pic_register_string);
3499 else
3500 arm_pic_register = pic_register;
3503 /* Enable -mfix-cortex-m3-ldrd by default for Cortex-M3 cores. */
3504 if (fix_cm3_ldrd == 2)
3506 if (bitmap_bit_p (arm_active_target.isa, isa_bit_quirk_cm3_ldrd))
3507 fix_cm3_ldrd = 1;
3508 else
3509 fix_cm3_ldrd = 0;
3512 /* Hot/Cold partitioning is not currently supported, since we can't
3513 handle literal pool placement in that case. */
3514 if (flag_reorder_blocks_and_partition)
3516 inform (input_location,
3517 "-freorder-blocks-and-partition not supported on this architecture");
3518 flag_reorder_blocks_and_partition = 0;
3519 flag_reorder_blocks = 1;
3522 if (flag_pic)
3523 /* Hoisting PIC address calculations more aggressively provides a small,
3524 but measurable, size reduction for PIC code. Therefore, we decrease
3525 the bar for unrestricted expression hoisting to the cost of PIC address
3526 calculation, which is 2 instructions. */
3527 maybe_set_param_value (PARAM_GCSE_UNRESTRICTED_COST, 2,
3528 global_options.x_param_values,
3529 global_options_set.x_param_values);
3531 /* ARM EABI defaults to strict volatile bitfields. */
3532 if (TARGET_AAPCS_BASED && flag_strict_volatile_bitfields < 0
3533 && abi_version_at_least(2))
3534 flag_strict_volatile_bitfields = 1;
3536 /* Enable sw prefetching at -O3 for CPUS that have prefetch, and we
3537 have deemed it beneficial (signified by setting
3538 prefetch.num_slots to 1 or more). */
3539 if (flag_prefetch_loop_arrays < 0
3540 && HAVE_prefetch
3541 && optimize >= 3
3542 && current_tune->prefetch.num_slots > 0)
3543 flag_prefetch_loop_arrays = 1;
3545 /* Set up parameters to be used in prefetching algorithm. Do not
3546 override the defaults unless we are tuning for a core we have
3547 researched values for. */
3548 if (current_tune->prefetch.num_slots > 0)
3549 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
3550 current_tune->prefetch.num_slots,
3551 global_options.x_param_values,
3552 global_options_set.x_param_values);
3553 if (current_tune->prefetch.l1_cache_line_size >= 0)
3554 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
3555 current_tune->prefetch.l1_cache_line_size,
3556 global_options.x_param_values,
3557 global_options_set.x_param_values);
3558 if (current_tune->prefetch.l1_cache_size >= 0)
3559 maybe_set_param_value (PARAM_L1_CACHE_SIZE,
3560 current_tune->prefetch.l1_cache_size,
3561 global_options.x_param_values,
3562 global_options_set.x_param_values);
3564 /* Use Neon to perform 64-bits operations rather than core
3565 registers. */
3566 prefer_neon_for_64bits = current_tune->prefer_neon_for_64bits;
3567 if (use_neon_for_64bits == 1)
3568 prefer_neon_for_64bits = true;
3570 /* Use the alternative scheduling-pressure algorithm by default. */
3571 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM, SCHED_PRESSURE_MODEL,
3572 global_options.x_param_values,
3573 global_options_set.x_param_values);
3575 /* Look through ready list and all of queue for instructions
3576 relevant for L2 auto-prefetcher. */
3577 int param_sched_autopref_queue_depth;
3579 switch (current_tune->sched_autopref)
3581 case tune_params::SCHED_AUTOPREF_OFF:
3582 param_sched_autopref_queue_depth = -1;
3583 break;
3585 case tune_params::SCHED_AUTOPREF_RANK:
3586 param_sched_autopref_queue_depth = 0;
3587 break;
3589 case tune_params::SCHED_AUTOPREF_FULL:
3590 param_sched_autopref_queue_depth = max_insn_queue_index + 1;
3591 break;
3593 default:
3594 gcc_unreachable ();
3597 maybe_set_param_value (PARAM_SCHED_AUTOPREF_QUEUE_DEPTH,
3598 param_sched_autopref_queue_depth,
3599 global_options.x_param_values,
3600 global_options_set.x_param_values);
3602 /* Currently, for slow flash data, we just disable literal pools. We also
3603 disable it for pure-code. */
3604 if (target_slow_flash_data || target_pure_code)
3605 arm_disable_literal_pool = true;
3607 /* Disable scheduling fusion by default if it's not armv7 processor
3608 or doesn't prefer ldrd/strd. */
3609 if (flag_schedule_fusion == 2
3610 && (!arm_arch7 || !current_tune->prefer_ldrd_strd))
3611 flag_schedule_fusion = 0;
3613 /* Need to remember initial options before they are overriden. */
3614 init_optimize = build_optimization_node (&global_options);
3616 arm_options_perform_arch_sanity_checks ();
3617 arm_option_override_internal (&global_options, &global_options_set);
3618 arm_option_check_internal (&global_options);
3619 arm_option_params_internal ();
3621 /* Create the default target_options structure. */
3622 target_option_default_node = target_option_current_node
3623 = build_target_option_node (&global_options);
3625 /* Register global variables with the garbage collector. */
3626 arm_add_gc_roots ();
3628 /* Init initial mode for testing. */
3629 thumb_flipper = TARGET_THUMB;
3633 /* Reconfigure global status flags from the active_target.isa. */
3634 void
3635 arm_option_reconfigure_globals (void)
3637 sprintf (arm_arch_name, "__ARM_ARCH_%s__", arm_active_target.arch_pp_name);
3638 arm_base_arch = arm_active_target.base_arch;
3640 /* Initialize boolean versions of the architectural flags, for use
3641 in the arm.md file. */
3642 arm_arch3m = bitmap_bit_p (arm_active_target.isa, isa_bit_armv3m);
3643 arm_arch4 = bitmap_bit_p (arm_active_target.isa, isa_bit_armv4);
3644 arm_arch4t = arm_arch4 && bitmap_bit_p (arm_active_target.isa, isa_bit_thumb);
3645 arm_arch5 = bitmap_bit_p (arm_active_target.isa, isa_bit_armv5);
3646 arm_arch5e = bitmap_bit_p (arm_active_target.isa, isa_bit_armv5e);
3647 arm_arch5te = arm_arch5e
3648 && bitmap_bit_p (arm_active_target.isa, isa_bit_thumb);
3649 arm_arch6 = bitmap_bit_p (arm_active_target.isa, isa_bit_armv6);
3650 arm_arch6k = bitmap_bit_p (arm_active_target.isa, isa_bit_armv6k);
3651 arm_arch_notm = bitmap_bit_p (arm_active_target.isa, isa_bit_notm);
3652 arm_arch6m = arm_arch6 && !arm_arch_notm;
3653 arm_arch7 = bitmap_bit_p (arm_active_target.isa, isa_bit_armv7);
3654 arm_arch7em = bitmap_bit_p (arm_active_target.isa, isa_bit_armv7em);
3655 arm_arch8 = bitmap_bit_p (arm_active_target.isa, isa_bit_armv8);
3656 arm_arch8_1 = bitmap_bit_p (arm_active_target.isa, isa_bit_armv8_1);
3657 arm_arch8_2 = bitmap_bit_p (arm_active_target.isa, isa_bit_armv8_2);
3658 arm_arch_thumb1 = bitmap_bit_p (arm_active_target.isa, isa_bit_thumb);
3659 arm_arch_thumb2 = bitmap_bit_p (arm_active_target.isa, isa_bit_thumb2);
3660 arm_arch_xscale = bitmap_bit_p (arm_active_target.isa, isa_bit_xscale);
3661 arm_arch_iwmmxt = bitmap_bit_p (arm_active_target.isa, isa_bit_iwmmxt);
3662 arm_arch_iwmmxt2 = bitmap_bit_p (arm_active_target.isa, isa_bit_iwmmxt2);
3663 arm_arch_thumb_hwdiv = bitmap_bit_p (arm_active_target.isa, isa_bit_tdiv);
3664 arm_arch_arm_hwdiv = bitmap_bit_p (arm_active_target.isa, isa_bit_adiv);
3665 arm_arch_crc = bitmap_bit_p (arm_active_target.isa, isa_bit_crc32);
3666 arm_arch_cmse = bitmap_bit_p (arm_active_target.isa, isa_bit_cmse);
3667 arm_fp16_inst = bitmap_bit_p (arm_active_target.isa, isa_bit_fp16);
3668 arm_arch_lpae = bitmap_bit_p (arm_active_target.isa, isa_bit_lpae);
3669 if (arm_fp16_inst)
3671 if (arm_fp16_format == ARM_FP16_FORMAT_ALTERNATIVE)
3672 error ("selected fp16 options are incompatible");
3673 arm_fp16_format = ARM_FP16_FORMAT_IEEE;
3676 /* And finally, set up some quirks. */
3677 arm_arch_no_volatile_ce
3678 = bitmap_bit_p (arm_active_target.isa, isa_bit_quirk_no_volatile_ce);
3679 arm_arch6kz = arm_arch6k && bitmap_bit_p (arm_active_target.isa,
3680 isa_bit_quirk_armv6kz);
3682 /* Use the cp15 method if it is available. */
3683 if (target_thread_pointer == TP_AUTO)
3685 if (arm_arch6k && !TARGET_THUMB1)
3686 target_thread_pointer = TP_CP15;
3687 else
3688 target_thread_pointer = TP_SOFT;
3692 /* Perform some validation between the desired architecture and the rest of the
3693 options. */
3694 void
3695 arm_options_perform_arch_sanity_checks (void)
3697 /* V5 code we generate is completely interworking capable, so we turn off
3698 TARGET_INTERWORK here to avoid many tests later on. */
3700 /* XXX However, we must pass the right pre-processor defines to CPP
3701 or GLD can get confused. This is a hack. */
3702 if (TARGET_INTERWORK)
3703 arm_cpp_interwork = 1;
3705 if (arm_arch5)
3706 target_flags &= ~MASK_INTERWORK;
3708 if (TARGET_IWMMXT && !ARM_DOUBLEWORD_ALIGN)
3709 error ("iwmmxt requires an AAPCS compatible ABI for proper operation");
3711 if (TARGET_IWMMXT_ABI && !TARGET_IWMMXT)
3712 error ("iwmmxt abi requires an iwmmxt capable cpu");
3714 /* BPABI targets use linker tricks to allow interworking on cores
3715 without thumb support. */
3716 if (TARGET_INTERWORK
3717 && !TARGET_BPABI
3718 && !bitmap_bit_p (arm_active_target.isa, isa_bit_thumb))
3720 warning (0, "target CPU does not support interworking" );
3721 target_flags &= ~MASK_INTERWORK;
3724 /* If soft-float is specified then don't use FPU. */
3725 if (TARGET_SOFT_FLOAT)
3726 arm_fpu_attr = FPU_NONE;
3727 else
3728 arm_fpu_attr = FPU_VFP;
3730 if (TARGET_AAPCS_BASED)
3732 if (TARGET_CALLER_INTERWORKING)
3733 error ("AAPCS does not support -mcaller-super-interworking");
3734 else
3735 if (TARGET_CALLEE_INTERWORKING)
3736 error ("AAPCS does not support -mcallee-super-interworking");
3739 /* __fp16 support currently assumes the core has ldrh. */
3740 if (!arm_arch4 && arm_fp16_format != ARM_FP16_FORMAT_NONE)
3741 sorry ("__fp16 and no ldrh");
3743 if (use_cmse && !arm_arch_cmse)
3744 error ("target CPU does not support ARMv8-M Security Extensions");
3746 /* We don't clear D16-D31 VFP registers for cmse_nonsecure_call functions
3747 and ARMv8-M Baseline and Mainline do not allow such configuration. */
3748 if (use_cmse && LAST_VFP_REGNUM > LAST_LO_VFP_REGNUM)
3749 error ("ARMv8-M Security Extensions incompatible with selected FPU");
3752 if (TARGET_AAPCS_BASED)
3754 if (arm_abi == ARM_ABI_IWMMXT)
3755 arm_pcs_default = ARM_PCS_AAPCS_IWMMXT;
3756 else if (TARGET_HARD_FLOAT_ABI)
3758 arm_pcs_default = ARM_PCS_AAPCS_VFP;
3759 if (!bitmap_bit_p (arm_active_target.isa, isa_bit_vfpv2))
3760 error ("-mfloat-abi=hard: selected processor lacks an FPU");
3762 else
3763 arm_pcs_default = ARM_PCS_AAPCS;
3765 else
3767 if (arm_float_abi == ARM_FLOAT_ABI_HARD)
3768 sorry ("-mfloat-abi=hard and VFP");
3770 if (arm_abi == ARM_ABI_APCS)
3771 arm_pcs_default = ARM_PCS_APCS;
3772 else
3773 arm_pcs_default = ARM_PCS_ATPCS;
3777 static void
3778 arm_add_gc_roots (void)
3780 gcc_obstack_init(&minipool_obstack);
3781 minipool_startobj = (char *) obstack_alloc (&minipool_obstack, 0);
3784 /* A table of known ARM exception types.
3785 For use with the interrupt function attribute. */
3787 typedef struct
3789 const char *const arg;
3790 const unsigned long return_value;
3792 isr_attribute_arg;
3794 static const isr_attribute_arg isr_attribute_args [] =
3796 { "IRQ", ARM_FT_ISR },
3797 { "irq", ARM_FT_ISR },
3798 { "FIQ", ARM_FT_FIQ },
3799 { "fiq", ARM_FT_FIQ },
3800 { "ABORT", ARM_FT_ISR },
3801 { "abort", ARM_FT_ISR },
3802 { "ABORT", ARM_FT_ISR },
3803 { "abort", ARM_FT_ISR },
3804 { "UNDEF", ARM_FT_EXCEPTION },
3805 { "undef", ARM_FT_EXCEPTION },
3806 { "SWI", ARM_FT_EXCEPTION },
3807 { "swi", ARM_FT_EXCEPTION },
3808 { NULL, ARM_FT_NORMAL }
3811 /* Returns the (interrupt) function type of the current
3812 function, or ARM_FT_UNKNOWN if the type cannot be determined. */
3814 static unsigned long
3815 arm_isr_value (tree argument)
3817 const isr_attribute_arg * ptr;
3818 const char * arg;
3820 if (!arm_arch_notm)
3821 return ARM_FT_NORMAL | ARM_FT_STACKALIGN;
3823 /* No argument - default to IRQ. */
3824 if (argument == NULL_TREE)
3825 return ARM_FT_ISR;
3827 /* Get the value of the argument. */
3828 if (TREE_VALUE (argument) == NULL_TREE
3829 || TREE_CODE (TREE_VALUE (argument)) != STRING_CST)
3830 return ARM_FT_UNKNOWN;
3832 arg = TREE_STRING_POINTER (TREE_VALUE (argument));
3834 /* Check it against the list of known arguments. */
3835 for (ptr = isr_attribute_args; ptr->arg != NULL; ptr++)
3836 if (streq (arg, ptr->arg))
3837 return ptr->return_value;
3839 /* An unrecognized interrupt type. */
3840 return ARM_FT_UNKNOWN;
3843 /* Computes the type of the current function. */
3845 static unsigned long
3846 arm_compute_func_type (void)
3848 unsigned long type = ARM_FT_UNKNOWN;
3849 tree a;
3850 tree attr;
3852 gcc_assert (TREE_CODE (current_function_decl) == FUNCTION_DECL);
3854 /* Decide if the current function is volatile. Such functions
3855 never return, and many memory cycles can be saved by not storing
3856 register values that will never be needed again. This optimization
3857 was added to speed up context switching in a kernel application. */
3858 if (optimize > 0
3859 && (TREE_NOTHROW (current_function_decl)
3860 || !(flag_unwind_tables
3861 || (flag_exceptions
3862 && arm_except_unwind_info (&global_options) != UI_SJLJ)))
3863 && TREE_THIS_VOLATILE (current_function_decl))
3864 type |= ARM_FT_VOLATILE;
3866 if (cfun->static_chain_decl != NULL)
3867 type |= ARM_FT_NESTED;
3869 attr = DECL_ATTRIBUTES (current_function_decl);
3871 a = lookup_attribute ("naked", attr);
3872 if (a != NULL_TREE)
3873 type |= ARM_FT_NAKED;
3875 a = lookup_attribute ("isr", attr);
3876 if (a == NULL_TREE)
3877 a = lookup_attribute ("interrupt", attr);
3879 if (a == NULL_TREE)
3880 type |= TARGET_INTERWORK ? ARM_FT_INTERWORKED : ARM_FT_NORMAL;
3881 else
3882 type |= arm_isr_value (TREE_VALUE (a));
3884 if (lookup_attribute ("cmse_nonsecure_entry", attr))
3885 type |= ARM_FT_CMSE_ENTRY;
3887 return type;
3890 /* Returns the type of the current function. */
3892 unsigned long
3893 arm_current_func_type (void)
3895 if (ARM_FUNC_TYPE (cfun->machine->func_type) == ARM_FT_UNKNOWN)
3896 cfun->machine->func_type = arm_compute_func_type ();
3898 return cfun->machine->func_type;
3901 bool
3902 arm_allocate_stack_slots_for_args (void)
3904 /* Naked functions should not allocate stack slots for arguments. */
3905 return !IS_NAKED (arm_current_func_type ());
3908 static bool
3909 arm_warn_func_return (tree decl)
3911 /* Naked functions are implemented entirely in assembly, including the
3912 return sequence, so suppress warnings about this. */
3913 return lookup_attribute ("naked", DECL_ATTRIBUTES (decl)) == NULL_TREE;
3917 /* Output assembler code for a block containing the constant parts
3918 of a trampoline, leaving space for the variable parts.
3920 On the ARM, (if r8 is the static chain regnum, and remembering that
3921 referencing pc adds an offset of 8) the trampoline looks like:
3922 ldr r8, [pc, #0]
3923 ldr pc, [pc]
3924 .word static chain value
3925 .word function's address
3926 XXX FIXME: When the trampoline returns, r8 will be clobbered. */
3928 static void
3929 arm_asm_trampoline_template (FILE *f)
3931 fprintf (f, "\t.syntax unified\n");
3933 if (TARGET_ARM)
3935 fprintf (f, "\t.arm\n");
3936 asm_fprintf (f, "\tldr\t%r, [%r, #0]\n", STATIC_CHAIN_REGNUM, PC_REGNUM);
3937 asm_fprintf (f, "\tldr\t%r, [%r, #0]\n", PC_REGNUM, PC_REGNUM);
3939 else if (TARGET_THUMB2)
3941 fprintf (f, "\t.thumb\n");
3942 /* The Thumb-2 trampoline is similar to the arm implementation.
3943 Unlike 16-bit Thumb, we enter the stub in thumb mode. */
3944 asm_fprintf (f, "\tldr.w\t%r, [%r, #4]\n",
3945 STATIC_CHAIN_REGNUM, PC_REGNUM);
3946 asm_fprintf (f, "\tldr.w\t%r, [%r, #4]\n", PC_REGNUM, PC_REGNUM);
3948 else
3950 ASM_OUTPUT_ALIGN (f, 2);
3951 fprintf (f, "\t.code\t16\n");
3952 fprintf (f, ".Ltrampoline_start:\n");
3953 asm_fprintf (f, "\tpush\t{r0, r1}\n");
3954 asm_fprintf (f, "\tldr\tr0, [%r, #8]\n", PC_REGNUM);
3955 asm_fprintf (f, "\tmov\t%r, r0\n", STATIC_CHAIN_REGNUM);
3956 asm_fprintf (f, "\tldr\tr0, [%r, #8]\n", PC_REGNUM);
3957 asm_fprintf (f, "\tstr\tr0, [%r, #4]\n", SP_REGNUM);
3958 asm_fprintf (f, "\tpop\t{r0, %r}\n", PC_REGNUM);
3960 assemble_aligned_integer (UNITS_PER_WORD, const0_rtx);
3961 assemble_aligned_integer (UNITS_PER_WORD, const0_rtx);
3964 /* Emit RTL insns to initialize the variable parts of a trampoline. */
3966 static void
3967 arm_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
3969 rtx fnaddr, mem, a_tramp;
3971 emit_block_move (m_tramp, assemble_trampoline_template (),
3972 GEN_INT (TRAMPOLINE_SIZE), BLOCK_OP_NORMAL);
3974 mem = adjust_address (m_tramp, SImode, TARGET_32BIT ? 8 : 12);
3975 emit_move_insn (mem, chain_value);
3977 mem = adjust_address (m_tramp, SImode, TARGET_32BIT ? 12 : 16);
3978 fnaddr = XEXP (DECL_RTL (fndecl), 0);
3979 emit_move_insn (mem, fnaddr);
3981 a_tramp = XEXP (m_tramp, 0);
3982 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__clear_cache"),
3983 LCT_NORMAL, VOIDmode, a_tramp, Pmode,
3984 plus_constant (Pmode, a_tramp, TRAMPOLINE_SIZE), Pmode);
3987 /* Thumb trampolines should be entered in thumb mode, so set
3988 the bottom bit of the address. */
3990 static rtx
3991 arm_trampoline_adjust_address (rtx addr)
3993 if (TARGET_THUMB)
3994 addr = expand_simple_binop (Pmode, IOR, addr, const1_rtx,
3995 NULL, 0, OPTAB_LIB_WIDEN);
3996 return addr;
3999 /* Return 1 if it is possible to return using a single instruction.
4000 If SIBLING is non-null, this is a test for a return before a sibling
4001 call. SIBLING is the call insn, so we can examine its register usage. */
4004 use_return_insn (int iscond, rtx sibling)
4006 int regno;
4007 unsigned int func_type;
4008 unsigned long saved_int_regs;
4009 unsigned HOST_WIDE_INT stack_adjust;
4010 arm_stack_offsets *offsets;
4012 /* Never use a return instruction before reload has run. */
4013 if (!reload_completed)
4014 return 0;
4016 func_type = arm_current_func_type ();
4018 /* Naked, volatile and stack alignment functions need special
4019 consideration. */
4020 if (func_type & (ARM_FT_VOLATILE | ARM_FT_NAKED | ARM_FT_STACKALIGN))
4021 return 0;
4023 /* So do interrupt functions that use the frame pointer and Thumb
4024 interrupt functions. */
4025 if (IS_INTERRUPT (func_type) && (frame_pointer_needed || TARGET_THUMB))
4026 return 0;
4028 if (TARGET_LDRD && current_tune->prefer_ldrd_strd
4029 && !optimize_function_for_size_p (cfun))
4030 return 0;
4032 offsets = arm_get_frame_offsets ();
4033 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
4035 /* As do variadic functions. */
4036 if (crtl->args.pretend_args_size
4037 || cfun->machine->uses_anonymous_args
4038 /* Or if the function calls __builtin_eh_return () */
4039 || crtl->calls_eh_return
4040 /* Or if the function calls alloca */
4041 || cfun->calls_alloca
4042 /* Or if there is a stack adjustment. However, if the stack pointer
4043 is saved on the stack, we can use a pre-incrementing stack load. */
4044 || !(stack_adjust == 0 || (TARGET_APCS_FRAME && frame_pointer_needed
4045 && stack_adjust == 4))
4046 /* Or if the static chain register was saved above the frame, under the
4047 assumption that the stack pointer isn't saved on the stack. */
4048 || (!(TARGET_APCS_FRAME && frame_pointer_needed)
4049 && arm_compute_static_chain_stack_bytes() != 0))
4050 return 0;
4052 saved_int_regs = offsets->saved_regs_mask;
4054 /* Unfortunately, the insn
4056 ldmib sp, {..., sp, ...}
4058 triggers a bug on most SA-110 based devices, such that the stack
4059 pointer won't be correctly restored if the instruction takes a
4060 page fault. We work around this problem by popping r3 along with
4061 the other registers, since that is never slower than executing
4062 another instruction.
4064 We test for !arm_arch5 here, because code for any architecture
4065 less than this could potentially be run on one of the buggy
4066 chips. */
4067 if (stack_adjust == 4 && !arm_arch5 && TARGET_ARM)
4069 /* Validate that r3 is a call-clobbered register (always true in
4070 the default abi) ... */
4071 if (!call_used_regs[3])
4072 return 0;
4074 /* ... that it isn't being used for a return value ... */
4075 if (arm_size_return_regs () >= (4 * UNITS_PER_WORD))
4076 return 0;
4078 /* ... or for a tail-call argument ... */
4079 if (sibling)
4081 gcc_assert (CALL_P (sibling));
4083 if (find_regno_fusage (sibling, USE, 3))
4084 return 0;
4087 /* ... and that there are no call-saved registers in r0-r2
4088 (always true in the default ABI). */
4089 if (saved_int_regs & 0x7)
4090 return 0;
4093 /* Can't be done if interworking with Thumb, and any registers have been
4094 stacked. */
4095 if (TARGET_INTERWORK && saved_int_regs != 0 && !IS_INTERRUPT(func_type))
4096 return 0;
4098 /* On StrongARM, conditional returns are expensive if they aren't
4099 taken and multiple registers have been stacked. */
4100 if (iscond && arm_tune_strongarm)
4102 /* Conditional return when just the LR is stored is a simple
4103 conditional-load instruction, that's not expensive. */
4104 if (saved_int_regs != 0 && saved_int_regs != (1 << LR_REGNUM))
4105 return 0;
4107 if (flag_pic
4108 && arm_pic_register != INVALID_REGNUM
4109 && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
4110 return 0;
4113 /* ARMv8-M nonsecure entry function need to use bxns to return and thus need
4114 several instructions if anything needs to be popped. */
4115 if (saved_int_regs && IS_CMSE_ENTRY (func_type))
4116 return 0;
4118 /* If there are saved registers but the LR isn't saved, then we need
4119 two instructions for the return. */
4120 if (saved_int_regs && !(saved_int_regs & (1 << LR_REGNUM)))
4121 return 0;
4123 /* Can't be done if any of the VFP regs are pushed,
4124 since this also requires an insn. */
4125 if (TARGET_HARD_FLOAT)
4126 for (regno = FIRST_VFP_REGNUM; regno <= LAST_VFP_REGNUM; regno++)
4127 if (df_regs_ever_live_p (regno) && !call_used_regs[regno])
4128 return 0;
4130 if (TARGET_REALLY_IWMMXT)
4131 for (regno = FIRST_IWMMXT_REGNUM; regno <= LAST_IWMMXT_REGNUM; regno++)
4132 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
4133 return 0;
4135 return 1;
4138 /* Return TRUE if we should try to use a simple_return insn, i.e. perform
4139 shrink-wrapping if possible. This is the case if we need to emit a
4140 prologue, which we can test by looking at the offsets. */
4141 bool
4142 use_simple_return_p (void)
4144 arm_stack_offsets *offsets;
4146 /* Note this function can be called before or after reload. */
4147 if (!reload_completed)
4148 arm_compute_frame_layout ();
4150 offsets = arm_get_frame_offsets ();
4151 return offsets->outgoing_args != 0;
4154 /* Return TRUE if int I is a valid immediate ARM constant. */
4157 const_ok_for_arm (HOST_WIDE_INT i)
4159 int lowbit;
4161 /* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
4162 be all zero, or all one. */
4163 if ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff) != 0
4164 && ((i & ~(unsigned HOST_WIDE_INT) 0xffffffff)
4165 != ((~(unsigned HOST_WIDE_INT) 0)
4166 & ~(unsigned HOST_WIDE_INT) 0xffffffff)))
4167 return FALSE;
4169 i &= (unsigned HOST_WIDE_INT) 0xffffffff;
4171 /* Fast return for 0 and small values. We must do this for zero, since
4172 the code below can't handle that one case. */
4173 if ((i & ~(unsigned HOST_WIDE_INT) 0xff) == 0)
4174 return TRUE;
4176 /* Get the number of trailing zeros. */
4177 lowbit = ffs((int) i) - 1;
4179 /* Only even shifts are allowed in ARM mode so round down to the
4180 nearest even number. */
4181 if (TARGET_ARM)
4182 lowbit &= ~1;
4184 if ((i & ~(((unsigned HOST_WIDE_INT) 0xff) << lowbit)) == 0)
4185 return TRUE;
4187 if (TARGET_ARM)
4189 /* Allow rotated constants in ARM mode. */
4190 if (lowbit <= 4
4191 && ((i & ~0xc000003f) == 0
4192 || (i & ~0xf000000f) == 0
4193 || (i & ~0xfc000003) == 0))
4194 return TRUE;
4196 else if (TARGET_THUMB2)
4198 HOST_WIDE_INT v;
4200 /* Allow repeated patterns 0x00XY00XY or 0xXYXYXYXY. */
4201 v = i & 0xff;
4202 v |= v << 16;
4203 if (i == v || i == (v | (v << 8)))
4204 return TRUE;
4206 /* Allow repeated pattern 0xXY00XY00. */
4207 v = i & 0xff00;
4208 v |= v << 16;
4209 if (i == v)
4210 return TRUE;
4212 else if (TARGET_HAVE_MOVT)
4214 /* Thumb-1 Targets with MOVT. */
4215 if (i > 0xffff)
4216 return FALSE;
4217 else
4218 return TRUE;
4221 return FALSE;
4224 /* Return true if I is a valid constant for the operation CODE. */
4226 const_ok_for_op (HOST_WIDE_INT i, enum rtx_code code)
4228 if (const_ok_for_arm (i))
4229 return 1;
4231 switch (code)
4233 case SET:
4234 /* See if we can use movw. */
4235 if (TARGET_HAVE_MOVT && (i & 0xffff0000) == 0)
4236 return 1;
4237 else
4238 /* Otherwise, try mvn. */
4239 return const_ok_for_arm (ARM_SIGN_EXTEND (~i));
4241 case PLUS:
4242 /* See if we can use addw or subw. */
4243 if (TARGET_THUMB2
4244 && ((i & 0xfffff000) == 0
4245 || ((-i) & 0xfffff000) == 0))
4246 return 1;
4247 /* Fall through. */
4248 case COMPARE:
4249 case EQ:
4250 case NE:
4251 case GT:
4252 case LE:
4253 case LT:
4254 case GE:
4255 case GEU:
4256 case LTU:
4257 case GTU:
4258 case LEU:
4259 case UNORDERED:
4260 case ORDERED:
4261 case UNEQ:
4262 case UNGE:
4263 case UNLT:
4264 case UNGT:
4265 case UNLE:
4266 return const_ok_for_arm (ARM_SIGN_EXTEND (-i));
4268 case MINUS: /* Should only occur with (MINUS I reg) => rsb */
4269 case XOR:
4270 return 0;
4272 case IOR:
4273 if (TARGET_THUMB2)
4274 return const_ok_for_arm (ARM_SIGN_EXTEND (~i));
4275 return 0;
4277 case AND:
4278 return const_ok_for_arm (ARM_SIGN_EXTEND (~i));
4280 default:
4281 gcc_unreachable ();
4285 /* Return true if I is a valid di mode constant for the operation CODE. */
4287 const_ok_for_dimode_op (HOST_WIDE_INT i, enum rtx_code code)
4289 HOST_WIDE_INT hi_val = (i >> 32) & 0xFFFFFFFF;
4290 HOST_WIDE_INT lo_val = i & 0xFFFFFFFF;
4291 rtx hi = GEN_INT (hi_val);
4292 rtx lo = GEN_INT (lo_val);
4294 if (TARGET_THUMB1)
4295 return 0;
4297 switch (code)
4299 case AND:
4300 case IOR:
4301 case XOR:
4302 return (const_ok_for_op (hi_val, code) || hi_val == 0xFFFFFFFF)
4303 && (const_ok_for_op (lo_val, code) || lo_val == 0xFFFFFFFF);
4304 case PLUS:
4305 return arm_not_operand (hi, SImode) && arm_add_operand (lo, SImode);
4307 default:
4308 return 0;
4312 /* Emit a sequence of insns to handle a large constant.
4313 CODE is the code of the operation required, it can be any of SET, PLUS,
4314 IOR, AND, XOR, MINUS;
4315 MODE is the mode in which the operation is being performed;
4316 VAL is the integer to operate on;
4317 SOURCE is the other operand (a register, or a null-pointer for SET);
4318 SUBTARGETS means it is safe to create scratch registers if that will
4319 either produce a simpler sequence, or we will want to cse the values.
4320 Return value is the number of insns emitted. */
4322 /* ??? Tweak this for thumb2. */
4324 arm_split_constant (enum rtx_code code, machine_mode mode, rtx insn,
4325 HOST_WIDE_INT val, rtx target, rtx source, int subtargets)
4327 rtx cond;
4329 if (insn && GET_CODE (PATTERN (insn)) == COND_EXEC)
4330 cond = COND_EXEC_TEST (PATTERN (insn));
4331 else
4332 cond = NULL_RTX;
4334 if (subtargets || code == SET
4335 || (REG_P (target) && REG_P (source)
4336 && REGNO (target) != REGNO (source)))
4338 /* After arm_reorg has been called, we can't fix up expensive
4339 constants by pushing them into memory so we must synthesize
4340 them in-line, regardless of the cost. This is only likely to
4341 be more costly on chips that have load delay slots and we are
4342 compiling without running the scheduler (so no splitting
4343 occurred before the final instruction emission).
4345 Ref: gcc -O1 -mcpu=strongarm gcc.c-torture/compile/980506-2.c
4347 if (!cfun->machine->after_arm_reorg
4348 && !cond
4349 && (arm_gen_constant (code, mode, NULL_RTX, val, target, source,
4350 1, 0)
4351 > (arm_constant_limit (optimize_function_for_size_p (cfun))
4352 + (code != SET))))
4354 if (code == SET)
4356 /* Currently SET is the only monadic value for CODE, all
4357 the rest are diadic. */
4358 if (TARGET_USE_MOVT)
4359 arm_emit_movpair (target, GEN_INT (val));
4360 else
4361 emit_set_insn (target, GEN_INT (val));
4363 return 1;
4365 else
4367 rtx temp = subtargets ? gen_reg_rtx (mode) : target;
4369 if (TARGET_USE_MOVT)
4370 arm_emit_movpair (temp, GEN_INT (val));
4371 else
4372 emit_set_insn (temp, GEN_INT (val));
4374 /* For MINUS, the value is subtracted from, since we never
4375 have subtraction of a constant. */
4376 if (code == MINUS)
4377 emit_set_insn (target, gen_rtx_MINUS (mode, temp, source));
4378 else
4379 emit_set_insn (target,
4380 gen_rtx_fmt_ee (code, mode, source, temp));
4381 return 2;
4386 return arm_gen_constant (code, mode, cond, val, target, source, subtargets,
4390 /* Return a sequence of integers, in RETURN_SEQUENCE that fit into
4391 ARM/THUMB2 immediates, and add up to VAL.
4392 Thr function return value gives the number of insns required. */
4393 static int
4394 optimal_immediate_sequence (enum rtx_code code, unsigned HOST_WIDE_INT val,
4395 struct four_ints *return_sequence)
4397 int best_consecutive_zeros = 0;
4398 int i;
4399 int best_start = 0;
4400 int insns1, insns2;
4401 struct four_ints tmp_sequence;
4403 /* If we aren't targeting ARM, the best place to start is always at
4404 the bottom, otherwise look more closely. */
4405 if (TARGET_ARM)
4407 for (i = 0; i < 32; i += 2)
4409 int consecutive_zeros = 0;
4411 if (!(val & (3 << i)))
4413 while ((i < 32) && !(val & (3 << i)))
4415 consecutive_zeros += 2;
4416 i += 2;
4418 if (consecutive_zeros > best_consecutive_zeros)
4420 best_consecutive_zeros = consecutive_zeros;
4421 best_start = i - consecutive_zeros;
4423 i -= 2;
4428 /* So long as it won't require any more insns to do so, it's
4429 desirable to emit a small constant (in bits 0...9) in the last
4430 insn. This way there is more chance that it can be combined with
4431 a later addressing insn to form a pre-indexed load or store
4432 operation. Consider:
4434 *((volatile int *)0xe0000100) = 1;
4435 *((volatile int *)0xe0000110) = 2;
4437 We want this to wind up as:
4439 mov rA, #0xe0000000
4440 mov rB, #1
4441 str rB, [rA, #0x100]
4442 mov rB, #2
4443 str rB, [rA, #0x110]
4445 rather than having to synthesize both large constants from scratch.
4447 Therefore, we calculate how many insns would be required to emit
4448 the constant starting from `best_start', and also starting from
4449 zero (i.e. with bit 31 first to be output). If `best_start' doesn't
4450 yield a shorter sequence, we may as well use zero. */
4451 insns1 = optimal_immediate_sequence_1 (code, val, return_sequence, best_start);
4452 if (best_start != 0
4453 && ((HOST_WIDE_INT_1U << best_start) < val))
4455 insns2 = optimal_immediate_sequence_1 (code, val, &tmp_sequence, 0);
4456 if (insns2 <= insns1)
4458 *return_sequence = tmp_sequence;
4459 insns1 = insns2;
4463 return insns1;
4466 /* As for optimal_immediate_sequence, but starting at bit-position I. */
4467 static int
4468 optimal_immediate_sequence_1 (enum rtx_code code, unsigned HOST_WIDE_INT val,
4469 struct four_ints *return_sequence, int i)
4471 int remainder = val & 0xffffffff;
4472 int insns = 0;
4474 /* Try and find a way of doing the job in either two or three
4475 instructions.
4477 In ARM mode we can use 8-bit constants, rotated to any 2-bit aligned
4478 location. We start at position I. This may be the MSB, or
4479 optimial_immediate_sequence may have positioned it at the largest block
4480 of zeros that are aligned on a 2-bit boundary. We then fill up the temps,
4481 wrapping around to the top of the word when we drop off the bottom.
4482 In the worst case this code should produce no more than four insns.
4484 In Thumb2 mode, we can use 32/16-bit replicated constants, and 8-bit
4485 constants, shifted to any arbitrary location. We should always start
4486 at the MSB. */
4489 int end;
4490 unsigned int b1, b2, b3, b4;
4491 unsigned HOST_WIDE_INT result;
4492 int loc;
4494 gcc_assert (insns < 4);
4496 if (i <= 0)
4497 i += 32;
4499 /* First, find the next normal 12/8-bit shifted/rotated immediate. */
4500 if (remainder & ((TARGET_ARM ? (3 << (i - 2)) : (1 << (i - 1)))))
4502 loc = i;
4503 if (i <= 12 && TARGET_THUMB2 && code == PLUS)
4504 /* We can use addw/subw for the last 12 bits. */
4505 result = remainder;
4506 else
4508 /* Use an 8-bit shifted/rotated immediate. */
4509 end = i - 8;
4510 if (end < 0)
4511 end += 32;
4512 result = remainder & ((0x0ff << end)
4513 | ((i < end) ? (0xff >> (32 - end))
4514 : 0));
4515 i -= 8;
4518 else
4520 /* Arm allows rotates by a multiple of two. Thumb-2 allows
4521 arbitrary shifts. */
4522 i -= TARGET_ARM ? 2 : 1;
4523 continue;
4526 /* Next, see if we can do a better job with a thumb2 replicated
4527 constant.
4529 We do it this way around to catch the cases like 0x01F001E0 where
4530 two 8-bit immediates would work, but a replicated constant would
4531 make it worse.
4533 TODO: 16-bit constants that don't clear all the bits, but still win.
4534 TODO: Arithmetic splitting for set/add/sub, rather than bitwise. */
4535 if (TARGET_THUMB2)
4537 b1 = (remainder & 0xff000000) >> 24;
4538 b2 = (remainder & 0x00ff0000) >> 16;
4539 b3 = (remainder & 0x0000ff00) >> 8;
4540 b4 = remainder & 0xff;
4542 if (loc > 24)
4544 /* The 8-bit immediate already found clears b1 (and maybe b2),
4545 but must leave b3 and b4 alone. */
4547 /* First try to find a 32-bit replicated constant that clears
4548 almost everything. We can assume that we can't do it in one,
4549 or else we wouldn't be here. */
4550 unsigned int tmp = b1 & b2 & b3 & b4;
4551 unsigned int tmp2 = tmp + (tmp << 8) + (tmp << 16)
4552 + (tmp << 24);
4553 unsigned int matching_bytes = (tmp == b1) + (tmp == b2)
4554 + (tmp == b3) + (tmp == b4);
4555 if (tmp
4556 && (matching_bytes >= 3
4557 || (matching_bytes == 2
4558 && const_ok_for_op (remainder & ~tmp2, code))))
4560 /* At least 3 of the bytes match, and the fourth has at
4561 least as many bits set, or two of the bytes match
4562 and it will only require one more insn to finish. */
4563 result = tmp2;
4564 i = tmp != b1 ? 32
4565 : tmp != b2 ? 24
4566 : tmp != b3 ? 16
4567 : 8;
4570 /* Second, try to find a 16-bit replicated constant that can
4571 leave three of the bytes clear. If b2 or b4 is already
4572 zero, then we can. If the 8-bit from above would not
4573 clear b2 anyway, then we still win. */
4574 else if (b1 == b3 && (!b2 || !b4
4575 || (remainder & 0x00ff0000 & ~result)))
4577 result = remainder & 0xff00ff00;
4578 i = 24;
4581 else if (loc > 16)
4583 /* The 8-bit immediate already found clears b2 (and maybe b3)
4584 and we don't get here unless b1 is alredy clear, but it will
4585 leave b4 unchanged. */
4587 /* If we can clear b2 and b4 at once, then we win, since the
4588 8-bits couldn't possibly reach that far. */
4589 if (b2 == b4)
4591 result = remainder & 0x00ff00ff;
4592 i = 16;
4597 return_sequence->i[insns++] = result;
4598 remainder &= ~result;
4600 if (code == SET || code == MINUS)
4601 code = PLUS;
4603 while (remainder);
4605 return insns;
4608 /* Emit an instruction with the indicated PATTERN. If COND is
4609 non-NULL, conditionalize the execution of the instruction on COND
4610 being true. */
4612 static void
4613 emit_constant_insn (rtx cond, rtx pattern)
4615 if (cond)
4616 pattern = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (cond), pattern);
4617 emit_insn (pattern);
4620 /* As above, but extra parameter GENERATE which, if clear, suppresses
4621 RTL generation. */
4623 static int
4624 arm_gen_constant (enum rtx_code code, machine_mode mode, rtx cond,
4625 unsigned HOST_WIDE_INT val, rtx target, rtx source,
4626 int subtargets, int generate)
4628 int can_invert = 0;
4629 int can_negate = 0;
4630 int final_invert = 0;
4631 int i;
4632 int set_sign_bit_copies = 0;
4633 int clear_sign_bit_copies = 0;
4634 int clear_zero_bit_copies = 0;
4635 int set_zero_bit_copies = 0;
4636 int insns = 0, neg_insns, inv_insns;
4637 unsigned HOST_WIDE_INT temp1, temp2;
4638 unsigned HOST_WIDE_INT remainder = val & 0xffffffff;
4639 struct four_ints *immediates;
4640 struct four_ints pos_immediates, neg_immediates, inv_immediates;
4642 /* Find out which operations are safe for a given CODE. Also do a quick
4643 check for degenerate cases; these can occur when DImode operations
4644 are split. */
4645 switch (code)
4647 case SET:
4648 can_invert = 1;
4649 break;
4651 case PLUS:
4652 can_negate = 1;
4653 break;
4655 case IOR:
4656 if (remainder == 0xffffffff)
4658 if (generate)
4659 emit_constant_insn (cond,
4660 gen_rtx_SET (target,
4661 GEN_INT (ARM_SIGN_EXTEND (val))));
4662 return 1;
4665 if (remainder == 0)
4667 if (reload_completed && rtx_equal_p (target, source))
4668 return 0;
4670 if (generate)
4671 emit_constant_insn (cond, gen_rtx_SET (target, source));
4672 return 1;
4674 break;
4676 case AND:
4677 if (remainder == 0)
4679 if (generate)
4680 emit_constant_insn (cond, gen_rtx_SET (target, const0_rtx));
4681 return 1;
4683 if (remainder == 0xffffffff)
4685 if (reload_completed && rtx_equal_p (target, source))
4686 return 0;
4687 if (generate)
4688 emit_constant_insn (cond, gen_rtx_SET (target, source));
4689 return 1;
4691 can_invert = 1;
4692 break;
4694 case XOR:
4695 if (remainder == 0)
4697 if (reload_completed && rtx_equal_p (target, source))
4698 return 0;
4699 if (generate)
4700 emit_constant_insn (cond, gen_rtx_SET (target, source));
4701 return 1;
4704 if (remainder == 0xffffffff)
4706 if (generate)
4707 emit_constant_insn (cond,
4708 gen_rtx_SET (target,
4709 gen_rtx_NOT (mode, source)));
4710 return 1;
4712 final_invert = 1;
4713 break;
4715 case MINUS:
4716 /* We treat MINUS as (val - source), since (source - val) is always
4717 passed as (source + (-val)). */
4718 if (remainder == 0)
4720 if (generate)
4721 emit_constant_insn (cond,
4722 gen_rtx_SET (target,
4723 gen_rtx_NEG (mode, source)));
4724 return 1;
4726 if (const_ok_for_arm (val))
4728 if (generate)
4729 emit_constant_insn (cond,
4730 gen_rtx_SET (target,
4731 gen_rtx_MINUS (mode, GEN_INT (val),
4732 source)));
4733 return 1;
4736 break;
4738 default:
4739 gcc_unreachable ();
4742 /* If we can do it in one insn get out quickly. */
4743 if (const_ok_for_op (val, code))
4745 if (generate)
4746 emit_constant_insn (cond,
4747 gen_rtx_SET (target,
4748 (source
4749 ? gen_rtx_fmt_ee (code, mode, source,
4750 GEN_INT (val))
4751 : GEN_INT (val))));
4752 return 1;
4755 /* On targets with UXTH/UBFX, we can deal with AND (2^N)-1 in a single
4756 insn. */
4757 if (code == AND && (i = exact_log2 (remainder + 1)) > 0
4758 && (arm_arch_thumb2 || (i == 16 && arm_arch6 && mode == SImode)))
4760 if (generate)
4762 if (mode == SImode && i == 16)
4763 /* Use UXTH in preference to UBFX, since on Thumb2 it's a
4764 smaller insn. */
4765 emit_constant_insn (cond,
4766 gen_zero_extendhisi2
4767 (target, gen_lowpart (HImode, source)));
4768 else
4769 /* Extz only supports SImode, but we can coerce the operands
4770 into that mode. */
4771 emit_constant_insn (cond,
4772 gen_extzv_t2 (gen_lowpart (SImode, target),
4773 gen_lowpart (SImode, source),
4774 GEN_INT (i), const0_rtx));
4777 return 1;
4780 /* Calculate a few attributes that may be useful for specific
4781 optimizations. */
4782 /* Count number of leading zeros. */
4783 for (i = 31; i >= 0; i--)
4785 if ((remainder & (1 << i)) == 0)
4786 clear_sign_bit_copies++;
4787 else
4788 break;
4791 /* Count number of leading 1's. */
4792 for (i = 31; i >= 0; i--)
4794 if ((remainder & (1 << i)) != 0)
4795 set_sign_bit_copies++;
4796 else
4797 break;
4800 /* Count number of trailing zero's. */
4801 for (i = 0; i <= 31; i++)
4803 if ((remainder & (1 << i)) == 0)
4804 clear_zero_bit_copies++;
4805 else
4806 break;
4809 /* Count number of trailing 1's. */
4810 for (i = 0; i <= 31; i++)
4812 if ((remainder & (1 << i)) != 0)
4813 set_zero_bit_copies++;
4814 else
4815 break;
4818 switch (code)
4820 case SET:
4821 /* See if we can do this by sign_extending a constant that is known
4822 to be negative. This is a good, way of doing it, since the shift
4823 may well merge into a subsequent insn. */
4824 if (set_sign_bit_copies > 1)
4826 if (const_ok_for_arm
4827 (temp1 = ARM_SIGN_EXTEND (remainder
4828 << (set_sign_bit_copies - 1))))
4830 if (generate)
4832 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
4833 emit_constant_insn (cond,
4834 gen_rtx_SET (new_src, GEN_INT (temp1)));
4835 emit_constant_insn (cond,
4836 gen_ashrsi3 (target, new_src,
4837 GEN_INT (set_sign_bit_copies - 1)));
4839 return 2;
4841 /* For an inverted constant, we will need to set the low bits,
4842 these will be shifted out of harm's way. */
4843 temp1 |= (1 << (set_sign_bit_copies - 1)) - 1;
4844 if (const_ok_for_arm (~temp1))
4846 if (generate)
4848 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
4849 emit_constant_insn (cond,
4850 gen_rtx_SET (new_src, GEN_INT (temp1)));
4851 emit_constant_insn (cond,
4852 gen_ashrsi3 (target, new_src,
4853 GEN_INT (set_sign_bit_copies - 1)));
4855 return 2;
4859 /* See if we can calculate the value as the difference between two
4860 valid immediates. */
4861 if (clear_sign_bit_copies + clear_zero_bit_copies <= 16)
4863 int topshift = clear_sign_bit_copies & ~1;
4865 temp1 = ARM_SIGN_EXTEND ((remainder + (0x00800000 >> topshift))
4866 & (0xff000000 >> topshift));
4868 /* If temp1 is zero, then that means the 9 most significant
4869 bits of remainder were 1 and we've caused it to overflow.
4870 When topshift is 0 we don't need to do anything since we
4871 can borrow from 'bit 32'. */
4872 if (temp1 == 0 && topshift != 0)
4873 temp1 = 0x80000000 >> (topshift - 1);
4875 temp2 = ARM_SIGN_EXTEND (temp1 - remainder);
4877 if (const_ok_for_arm (temp2))
4879 if (generate)
4881 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
4882 emit_constant_insn (cond,
4883 gen_rtx_SET (new_src, GEN_INT (temp1)));
4884 emit_constant_insn (cond,
4885 gen_addsi3 (target, new_src,
4886 GEN_INT (-temp2)));
4889 return 2;
4893 /* See if we can generate this by setting the bottom (or the top)
4894 16 bits, and then shifting these into the other half of the
4895 word. We only look for the simplest cases, to do more would cost
4896 too much. Be careful, however, not to generate this when the
4897 alternative would take fewer insns. */
4898 if (val & 0xffff0000)
4900 temp1 = remainder & 0xffff0000;
4901 temp2 = remainder & 0x0000ffff;
4903 /* Overlaps outside this range are best done using other methods. */
4904 for (i = 9; i < 24; i++)
4906 if ((((temp2 | (temp2 << i)) & 0xffffffff) == remainder)
4907 && !const_ok_for_arm (temp2))
4909 rtx new_src = (subtargets
4910 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
4911 : target);
4912 insns = arm_gen_constant (code, mode, cond, temp2, new_src,
4913 source, subtargets, generate);
4914 source = new_src;
4915 if (generate)
4916 emit_constant_insn
4917 (cond,
4918 gen_rtx_SET
4919 (target,
4920 gen_rtx_IOR (mode,
4921 gen_rtx_ASHIFT (mode, source,
4922 GEN_INT (i)),
4923 source)));
4924 return insns + 1;
4928 /* Don't duplicate cases already considered. */
4929 for (i = 17; i < 24; i++)
4931 if (((temp1 | (temp1 >> i)) == remainder)
4932 && !const_ok_for_arm (temp1))
4934 rtx new_src = (subtargets
4935 ? (generate ? gen_reg_rtx (mode) : NULL_RTX)
4936 : target);
4937 insns = arm_gen_constant (code, mode, cond, temp1, new_src,
4938 source, subtargets, generate);
4939 source = new_src;
4940 if (generate)
4941 emit_constant_insn
4942 (cond,
4943 gen_rtx_SET (target,
4944 gen_rtx_IOR
4945 (mode,
4946 gen_rtx_LSHIFTRT (mode, source,
4947 GEN_INT (i)),
4948 source)));
4949 return insns + 1;
4953 break;
4955 case IOR:
4956 case XOR:
4957 /* If we have IOR or XOR, and the constant can be loaded in a
4958 single instruction, and we can find a temporary to put it in,
4959 then this can be done in two instructions instead of 3-4. */
4960 if (subtargets
4961 /* TARGET can't be NULL if SUBTARGETS is 0 */
4962 || (reload_completed && !reg_mentioned_p (target, source)))
4964 if (const_ok_for_arm (ARM_SIGN_EXTEND (~val)))
4966 if (generate)
4968 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
4970 emit_constant_insn (cond,
4971 gen_rtx_SET (sub, GEN_INT (val)));
4972 emit_constant_insn (cond,
4973 gen_rtx_SET (target,
4974 gen_rtx_fmt_ee (code, mode,
4975 source, sub)));
4977 return 2;
4981 if (code == XOR)
4982 break;
4984 /* Convert.
4985 x = y | constant ( which is composed of set_sign_bit_copies of leading 1s
4986 and the remainder 0s for e.g. 0xfff00000)
4987 x = ~(~(y ashift set_sign_bit_copies) lshiftrt set_sign_bit_copies)
4989 This can be done in 2 instructions by using shifts with mov or mvn.
4990 e.g. for
4991 x = x | 0xfff00000;
4992 we generate.
4993 mvn r0, r0, asl #12
4994 mvn r0, r0, lsr #12 */
4995 if (set_sign_bit_copies > 8
4996 && (val & (HOST_WIDE_INT_M1U << (32 - set_sign_bit_copies))) == val)
4998 if (generate)
5000 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
5001 rtx shift = GEN_INT (set_sign_bit_copies);
5003 emit_constant_insn
5004 (cond,
5005 gen_rtx_SET (sub,
5006 gen_rtx_NOT (mode,
5007 gen_rtx_ASHIFT (mode,
5008 source,
5009 shift))));
5010 emit_constant_insn
5011 (cond,
5012 gen_rtx_SET (target,
5013 gen_rtx_NOT (mode,
5014 gen_rtx_LSHIFTRT (mode, sub,
5015 shift))));
5017 return 2;
5020 /* Convert
5021 x = y | constant (which has set_zero_bit_copies number of trailing ones).
5023 x = ~((~y lshiftrt set_zero_bit_copies) ashift set_zero_bit_copies).
5025 For eg. r0 = r0 | 0xfff
5026 mvn r0, r0, lsr #12
5027 mvn r0, r0, asl #12
5030 if (set_zero_bit_copies > 8
5031 && (remainder & ((1 << set_zero_bit_copies) - 1)) == remainder)
5033 if (generate)
5035 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
5036 rtx shift = GEN_INT (set_zero_bit_copies);
5038 emit_constant_insn
5039 (cond,
5040 gen_rtx_SET (sub,
5041 gen_rtx_NOT (mode,
5042 gen_rtx_LSHIFTRT (mode,
5043 source,
5044 shift))));
5045 emit_constant_insn
5046 (cond,
5047 gen_rtx_SET (target,
5048 gen_rtx_NOT (mode,
5049 gen_rtx_ASHIFT (mode, sub,
5050 shift))));
5052 return 2;
5055 /* This will never be reached for Thumb2 because orn is a valid
5056 instruction. This is for Thumb1 and the ARM 32 bit cases.
5058 x = y | constant (such that ~constant is a valid constant)
5059 Transform this to
5060 x = ~(~y & ~constant).
5062 if (const_ok_for_arm (temp1 = ARM_SIGN_EXTEND (~val)))
5064 if (generate)
5066 rtx sub = subtargets ? gen_reg_rtx (mode) : target;
5067 emit_constant_insn (cond,
5068 gen_rtx_SET (sub,
5069 gen_rtx_NOT (mode, source)));
5070 source = sub;
5071 if (subtargets)
5072 sub = gen_reg_rtx (mode);
5073 emit_constant_insn (cond,
5074 gen_rtx_SET (sub,
5075 gen_rtx_AND (mode, source,
5076 GEN_INT (temp1))));
5077 emit_constant_insn (cond,
5078 gen_rtx_SET (target,
5079 gen_rtx_NOT (mode, sub)));
5081 return 3;
5083 break;
5085 case AND:
5086 /* See if two shifts will do 2 or more insn's worth of work. */
5087 if (clear_sign_bit_copies >= 16 && clear_sign_bit_copies < 24)
5089 HOST_WIDE_INT shift_mask = ((0xffffffff
5090 << (32 - clear_sign_bit_copies))
5091 & 0xffffffff);
5093 if ((remainder | shift_mask) != 0xffffffff)
5095 HOST_WIDE_INT new_val
5096 = ARM_SIGN_EXTEND (remainder | shift_mask);
5098 if (generate)
5100 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
5101 insns = arm_gen_constant (AND, SImode, cond, new_val,
5102 new_src, source, subtargets, 1);
5103 source = new_src;
5105 else
5107 rtx targ = subtargets ? NULL_RTX : target;
5108 insns = arm_gen_constant (AND, mode, cond, new_val,
5109 targ, source, subtargets, 0);
5113 if (generate)
5115 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
5116 rtx shift = GEN_INT (clear_sign_bit_copies);
5118 emit_insn (gen_ashlsi3 (new_src, source, shift));
5119 emit_insn (gen_lshrsi3 (target, new_src, shift));
5122 return insns + 2;
5125 if (clear_zero_bit_copies >= 16 && clear_zero_bit_copies < 24)
5127 HOST_WIDE_INT shift_mask = (1 << clear_zero_bit_copies) - 1;
5129 if ((remainder | shift_mask) != 0xffffffff)
5131 HOST_WIDE_INT new_val
5132 = ARM_SIGN_EXTEND (remainder | shift_mask);
5133 if (generate)
5135 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
5137 insns = arm_gen_constant (AND, mode, cond, new_val,
5138 new_src, source, subtargets, 1);
5139 source = new_src;
5141 else
5143 rtx targ = subtargets ? NULL_RTX : target;
5145 insns = arm_gen_constant (AND, mode, cond, new_val,
5146 targ, source, subtargets, 0);
5150 if (generate)
5152 rtx new_src = subtargets ? gen_reg_rtx (mode) : target;
5153 rtx shift = GEN_INT (clear_zero_bit_copies);
5155 emit_insn (gen_lshrsi3 (new_src, source, shift));
5156 emit_insn (gen_ashlsi3 (target, new_src, shift));
5159 return insns + 2;
5162 break;
5164 default:
5165 break;
5168 /* Calculate what the instruction sequences would be if we generated it
5169 normally, negated, or inverted. */
5170 if (code == AND)
5171 /* AND cannot be split into multiple insns, so invert and use BIC. */
5172 insns = 99;
5173 else
5174 insns = optimal_immediate_sequence (code, remainder, &pos_immediates);
5176 if (can_negate)
5177 neg_insns = optimal_immediate_sequence (code, (-remainder) & 0xffffffff,
5178 &neg_immediates);
5179 else
5180 neg_insns = 99;
5182 if (can_invert || final_invert)
5183 inv_insns = optimal_immediate_sequence (code, remainder ^ 0xffffffff,
5184 &inv_immediates);
5185 else
5186 inv_insns = 99;
5188 immediates = &pos_immediates;
5190 /* Is the negated immediate sequence more efficient? */
5191 if (neg_insns < insns && neg_insns <= inv_insns)
5193 insns = neg_insns;
5194 immediates = &neg_immediates;
5196 else
5197 can_negate = 0;
5199 /* Is the inverted immediate sequence more efficient?
5200 We must allow for an extra NOT instruction for XOR operations, although
5201 there is some chance that the final 'mvn' will get optimized later. */
5202 if ((inv_insns + 1) < insns || (!final_invert && inv_insns < insns))
5204 insns = inv_insns;
5205 immediates = &inv_immediates;
5207 else
5209 can_invert = 0;
5210 final_invert = 0;
5213 /* Now output the chosen sequence as instructions. */
5214 if (generate)
5216 for (i = 0; i < insns; i++)
5218 rtx new_src, temp1_rtx;
5220 temp1 = immediates->i[i];
5222 if (code == SET || code == MINUS)
5223 new_src = (subtargets ? gen_reg_rtx (mode) : target);
5224 else if ((final_invert || i < (insns - 1)) && subtargets)
5225 new_src = gen_reg_rtx (mode);
5226 else
5227 new_src = target;
5229 if (can_invert)
5230 temp1 = ~temp1;
5231 else if (can_negate)
5232 temp1 = -temp1;
5234 temp1 = trunc_int_for_mode (temp1, mode);
5235 temp1_rtx = GEN_INT (temp1);
5237 if (code == SET)
5239 else if (code == MINUS)
5240 temp1_rtx = gen_rtx_MINUS (mode, temp1_rtx, source);
5241 else
5242 temp1_rtx = gen_rtx_fmt_ee (code, mode, source, temp1_rtx);
5244 emit_constant_insn (cond, gen_rtx_SET (new_src, temp1_rtx));
5245 source = new_src;
5247 if (code == SET)
5249 can_negate = can_invert;
5250 can_invert = 0;
5251 code = PLUS;
5253 else if (code == MINUS)
5254 code = PLUS;
5258 if (final_invert)
5260 if (generate)
5261 emit_constant_insn (cond, gen_rtx_SET (target,
5262 gen_rtx_NOT (mode, source)));
5263 insns++;
5266 return insns;
5269 /* Canonicalize a comparison so that we are more likely to recognize it.
5270 This can be done for a few constant compares, where we can make the
5271 immediate value easier to load. */
5273 static void
5274 arm_canonicalize_comparison (int *code, rtx *op0, rtx *op1,
5275 bool op0_preserve_value)
5277 machine_mode mode;
5278 unsigned HOST_WIDE_INT i, maxval;
5280 mode = GET_MODE (*op0);
5281 if (mode == VOIDmode)
5282 mode = GET_MODE (*op1);
5284 maxval = (HOST_WIDE_INT_1U << (GET_MODE_BITSIZE (mode) - 1)) - 1;
5286 /* For DImode, we have GE/LT/GEU/LTU comparisons. In ARM mode
5287 we can also use cmp/cmpeq for GTU/LEU. GT/LE must be either
5288 reversed or (for constant OP1) adjusted to GE/LT. Similarly
5289 for GTU/LEU in Thumb mode. */
5290 if (mode == DImode)
5293 if (*code == GT || *code == LE
5294 || (!TARGET_ARM && (*code == GTU || *code == LEU)))
5296 /* Missing comparison. First try to use an available
5297 comparison. */
5298 if (CONST_INT_P (*op1))
5300 i = INTVAL (*op1);
5301 switch (*code)
5303 case GT:
5304 case LE:
5305 if (i != maxval
5306 && arm_const_double_by_immediates (GEN_INT (i + 1)))
5308 *op1 = GEN_INT (i + 1);
5309 *code = *code == GT ? GE : LT;
5310 return;
5312 break;
5313 case GTU:
5314 case LEU:
5315 if (i != ~((unsigned HOST_WIDE_INT) 0)
5316 && arm_const_double_by_immediates (GEN_INT (i + 1)))
5318 *op1 = GEN_INT (i + 1);
5319 *code = *code == GTU ? GEU : LTU;
5320 return;
5322 break;
5323 default:
5324 gcc_unreachable ();
5328 /* If that did not work, reverse the condition. */
5329 if (!op0_preserve_value)
5331 std::swap (*op0, *op1);
5332 *code = (int)swap_condition ((enum rtx_code)*code);
5335 return;
5338 /* If *op0 is (zero_extend:SI (subreg:QI (reg:SI) 0)) and comparing
5339 with const0_rtx, change it to (and:SI (reg:SI) (const_int 255)),
5340 to facilitate possible combining with a cmp into 'ands'. */
5341 if (mode == SImode
5342 && GET_CODE (*op0) == ZERO_EXTEND
5343 && GET_CODE (XEXP (*op0, 0)) == SUBREG
5344 && GET_MODE (XEXP (*op0, 0)) == QImode
5345 && GET_MODE (SUBREG_REG (XEXP (*op0, 0))) == SImode
5346 && subreg_lowpart_p (XEXP (*op0, 0))
5347 && *op1 == const0_rtx)
5348 *op0 = gen_rtx_AND (SImode, SUBREG_REG (XEXP (*op0, 0)),
5349 GEN_INT (255));
5351 /* Comparisons smaller than DImode. Only adjust comparisons against
5352 an out-of-range constant. */
5353 if (!CONST_INT_P (*op1)
5354 || const_ok_for_arm (INTVAL (*op1))
5355 || const_ok_for_arm (- INTVAL (*op1)))
5356 return;
5358 i = INTVAL (*op1);
5360 switch (*code)
5362 case EQ:
5363 case NE:
5364 return;
5366 case GT:
5367 case LE:
5368 if (i != maxval
5369 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
5371 *op1 = GEN_INT (ARM_SIGN_EXTEND (i + 1));
5372 *code = *code == GT ? GE : LT;
5373 return;
5375 break;
5377 case GE:
5378 case LT:
5379 if (i != ~maxval
5380 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
5382 *op1 = GEN_INT (i - 1);
5383 *code = *code == GE ? GT : LE;
5384 return;
5386 break;
5388 case GTU:
5389 case LEU:
5390 if (i != ~((unsigned HOST_WIDE_INT) 0)
5391 && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
5393 *op1 = GEN_INT (ARM_SIGN_EXTEND (i + 1));
5394 *code = *code == GTU ? GEU : LTU;
5395 return;
5397 break;
5399 case GEU:
5400 case LTU:
5401 if (i != 0
5402 && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
5404 *op1 = GEN_INT (i - 1);
5405 *code = *code == GEU ? GTU : LEU;
5406 return;
5408 break;
5410 default:
5411 gcc_unreachable ();
5416 /* Define how to find the value returned by a function. */
5418 static rtx
5419 arm_function_value(const_tree type, const_tree func,
5420 bool outgoing ATTRIBUTE_UNUSED)
5422 machine_mode mode;
5423 int unsignedp ATTRIBUTE_UNUSED;
5424 rtx r ATTRIBUTE_UNUSED;
5426 mode = TYPE_MODE (type);
5428 if (TARGET_AAPCS_BASED)
5429 return aapcs_allocate_return_reg (mode, type, func);
5431 /* Promote integer types. */
5432 if (INTEGRAL_TYPE_P (type))
5433 mode = arm_promote_function_mode (type, mode, &unsignedp, func, 1);
5435 /* Promotes small structs returned in a register to full-word size
5436 for big-endian AAPCS. */
5437 if (arm_return_in_msb (type))
5439 HOST_WIDE_INT size = int_size_in_bytes (type);
5440 if (size % UNITS_PER_WORD != 0)
5442 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
5443 mode = int_mode_for_size (size * BITS_PER_UNIT, 0).require ();
5447 return arm_libcall_value_1 (mode);
5450 /* libcall hashtable helpers. */
5452 struct libcall_hasher : nofree_ptr_hash <const rtx_def>
5454 static inline hashval_t hash (const rtx_def *);
5455 static inline bool equal (const rtx_def *, const rtx_def *);
5456 static inline void remove (rtx_def *);
5459 inline bool
5460 libcall_hasher::equal (const rtx_def *p1, const rtx_def *p2)
5462 return rtx_equal_p (p1, p2);
5465 inline hashval_t
5466 libcall_hasher::hash (const rtx_def *p1)
5468 return hash_rtx (p1, VOIDmode, NULL, NULL, FALSE);
5471 typedef hash_table<libcall_hasher> libcall_table_type;
5473 static void
5474 add_libcall (libcall_table_type *htab, rtx libcall)
5476 *htab->find_slot (libcall, INSERT) = libcall;
5479 static bool
5480 arm_libcall_uses_aapcs_base (const_rtx libcall)
5482 static bool init_done = false;
5483 static libcall_table_type *libcall_htab = NULL;
5485 if (!init_done)
5487 init_done = true;
5489 libcall_htab = new libcall_table_type (31);
5490 add_libcall (libcall_htab,
5491 convert_optab_libfunc (sfloat_optab, SFmode, SImode));
5492 add_libcall (libcall_htab,
5493 convert_optab_libfunc (sfloat_optab, DFmode, SImode));
5494 add_libcall (libcall_htab,
5495 convert_optab_libfunc (sfloat_optab, SFmode, DImode));
5496 add_libcall (libcall_htab,
5497 convert_optab_libfunc (sfloat_optab, DFmode, DImode));
5499 add_libcall (libcall_htab,
5500 convert_optab_libfunc (ufloat_optab, SFmode, SImode));
5501 add_libcall (libcall_htab,
5502 convert_optab_libfunc (ufloat_optab, DFmode, SImode));
5503 add_libcall (libcall_htab,
5504 convert_optab_libfunc (ufloat_optab, SFmode, DImode));
5505 add_libcall (libcall_htab,
5506 convert_optab_libfunc (ufloat_optab, DFmode, DImode));
5508 add_libcall (libcall_htab,
5509 convert_optab_libfunc (sext_optab, SFmode, HFmode));
5510 add_libcall (libcall_htab,
5511 convert_optab_libfunc (trunc_optab, HFmode, SFmode));
5512 add_libcall (libcall_htab,
5513 convert_optab_libfunc (sfix_optab, SImode, DFmode));
5514 add_libcall (libcall_htab,
5515 convert_optab_libfunc (ufix_optab, SImode, DFmode));
5516 add_libcall (libcall_htab,
5517 convert_optab_libfunc (sfix_optab, DImode, DFmode));
5518 add_libcall (libcall_htab,
5519 convert_optab_libfunc (ufix_optab, DImode, DFmode));
5520 add_libcall (libcall_htab,
5521 convert_optab_libfunc (sfix_optab, DImode, SFmode));
5522 add_libcall (libcall_htab,
5523 convert_optab_libfunc (ufix_optab, DImode, SFmode));
5525 /* Values from double-precision helper functions are returned in core
5526 registers if the selected core only supports single-precision
5527 arithmetic, even if we are using the hard-float ABI. The same is
5528 true for single-precision helpers, but we will never be using the
5529 hard-float ABI on a CPU which doesn't support single-precision
5530 operations in hardware. */
5531 add_libcall (libcall_htab, optab_libfunc (add_optab, DFmode));
5532 add_libcall (libcall_htab, optab_libfunc (sdiv_optab, DFmode));
5533 add_libcall (libcall_htab, optab_libfunc (smul_optab, DFmode));
5534 add_libcall (libcall_htab, optab_libfunc (neg_optab, DFmode));
5535 add_libcall (libcall_htab, optab_libfunc (sub_optab, DFmode));
5536 add_libcall (libcall_htab, optab_libfunc (eq_optab, DFmode));
5537 add_libcall (libcall_htab, optab_libfunc (lt_optab, DFmode));
5538 add_libcall (libcall_htab, optab_libfunc (le_optab, DFmode));
5539 add_libcall (libcall_htab, optab_libfunc (ge_optab, DFmode));
5540 add_libcall (libcall_htab, optab_libfunc (gt_optab, DFmode));
5541 add_libcall (libcall_htab, optab_libfunc (unord_optab, DFmode));
5542 add_libcall (libcall_htab, convert_optab_libfunc (sext_optab, DFmode,
5543 SFmode));
5544 add_libcall (libcall_htab, convert_optab_libfunc (trunc_optab, SFmode,
5545 DFmode));
5546 add_libcall (libcall_htab,
5547 convert_optab_libfunc (trunc_optab, HFmode, DFmode));
5550 return libcall && libcall_htab->find (libcall) != NULL;
5553 static rtx
5554 arm_libcall_value_1 (machine_mode mode)
5556 if (TARGET_AAPCS_BASED)
5557 return aapcs_libcall_value (mode);
5558 else if (TARGET_IWMMXT_ABI
5559 && arm_vector_mode_supported_p (mode))
5560 return gen_rtx_REG (mode, FIRST_IWMMXT_REGNUM);
5561 else
5562 return gen_rtx_REG (mode, ARG_REGISTER (1));
5565 /* Define how to find the value returned by a library function
5566 assuming the value has mode MODE. */
5568 static rtx
5569 arm_libcall_value (machine_mode mode, const_rtx libcall)
5571 if (TARGET_AAPCS_BASED && arm_pcs_default != ARM_PCS_AAPCS
5572 && GET_MODE_CLASS (mode) == MODE_FLOAT)
5574 /* The following libcalls return their result in integer registers,
5575 even though they return a floating point value. */
5576 if (arm_libcall_uses_aapcs_base (libcall))
5577 return gen_rtx_REG (mode, ARG_REGISTER(1));
5581 return arm_libcall_value_1 (mode);
5584 /* Implement TARGET_FUNCTION_VALUE_REGNO_P. */
5586 static bool
5587 arm_function_value_regno_p (const unsigned int regno)
5589 if (regno == ARG_REGISTER (1)
5590 || (TARGET_32BIT
5591 && TARGET_AAPCS_BASED
5592 && TARGET_HARD_FLOAT
5593 && regno == FIRST_VFP_REGNUM)
5594 || (TARGET_IWMMXT_ABI
5595 && regno == FIRST_IWMMXT_REGNUM))
5596 return true;
5598 return false;
5601 /* Determine the amount of memory needed to store the possible return
5602 registers of an untyped call. */
5604 arm_apply_result_size (void)
5606 int size = 16;
5608 if (TARGET_32BIT)
5610 if (TARGET_HARD_FLOAT_ABI)
5611 size += 32;
5612 if (TARGET_IWMMXT_ABI)
5613 size += 8;
5616 return size;
5619 /* Decide whether TYPE should be returned in memory (true)
5620 or in a register (false). FNTYPE is the type of the function making
5621 the call. */
5622 static bool
5623 arm_return_in_memory (const_tree type, const_tree fntype)
5625 HOST_WIDE_INT size;
5627 size = int_size_in_bytes (type); /* Negative if not fixed size. */
5629 if (TARGET_AAPCS_BASED)
5631 /* Simple, non-aggregate types (ie not including vectors and
5632 complex) are always returned in a register (or registers).
5633 We don't care about which register here, so we can short-cut
5634 some of the detail. */
5635 if (!AGGREGATE_TYPE_P (type)
5636 && TREE_CODE (type) != VECTOR_TYPE
5637 && TREE_CODE (type) != COMPLEX_TYPE)
5638 return false;
5640 /* Any return value that is no larger than one word can be
5641 returned in r0. */
5642 if (((unsigned HOST_WIDE_INT) size) <= UNITS_PER_WORD)
5643 return false;
5645 /* Check any available co-processors to see if they accept the
5646 type as a register candidate (VFP, for example, can return
5647 some aggregates in consecutive registers). These aren't
5648 available if the call is variadic. */
5649 if (aapcs_select_return_coproc (type, fntype) >= 0)
5650 return false;
5652 /* Vector values should be returned using ARM registers, not
5653 memory (unless they're over 16 bytes, which will break since
5654 we only have four call-clobbered registers to play with). */
5655 if (TREE_CODE (type) == VECTOR_TYPE)
5656 return (size < 0 || size > (4 * UNITS_PER_WORD));
5658 /* The rest go in memory. */
5659 return true;
5662 if (TREE_CODE (type) == VECTOR_TYPE)
5663 return (size < 0 || size > (4 * UNITS_PER_WORD));
5665 if (!AGGREGATE_TYPE_P (type) &&
5666 (TREE_CODE (type) != VECTOR_TYPE))
5667 /* All simple types are returned in registers. */
5668 return false;
5670 if (arm_abi != ARM_ABI_APCS)
5672 /* ATPCS and later return aggregate types in memory only if they are
5673 larger than a word (or are variable size). */
5674 return (size < 0 || size > UNITS_PER_WORD);
5677 /* For the arm-wince targets we choose to be compatible with Microsoft's
5678 ARM and Thumb compilers, which always return aggregates in memory. */
5679 #ifndef ARM_WINCE
5680 /* All structures/unions bigger than one word are returned in memory.
5681 Also catch the case where int_size_in_bytes returns -1. In this case
5682 the aggregate is either huge or of variable size, and in either case
5683 we will want to return it via memory and not in a register. */
5684 if (size < 0 || size > UNITS_PER_WORD)
5685 return true;
5687 if (TREE_CODE (type) == RECORD_TYPE)
5689 tree field;
5691 /* For a struct the APCS says that we only return in a register
5692 if the type is 'integer like' and every addressable element
5693 has an offset of zero. For practical purposes this means
5694 that the structure can have at most one non bit-field element
5695 and that this element must be the first one in the structure. */
5697 /* Find the first field, ignoring non FIELD_DECL things which will
5698 have been created by C++. */
5699 for (field = TYPE_FIELDS (type);
5700 field && TREE_CODE (field) != FIELD_DECL;
5701 field = DECL_CHAIN (field))
5702 continue;
5704 if (field == NULL)
5705 return false; /* An empty structure. Allowed by an extension to ANSI C. */
5707 /* Check that the first field is valid for returning in a register. */
5709 /* ... Floats are not allowed */
5710 if (FLOAT_TYPE_P (TREE_TYPE (field)))
5711 return true;
5713 /* ... Aggregates that are not themselves valid for returning in
5714 a register are not allowed. */
5715 if (arm_return_in_memory (TREE_TYPE (field), NULL_TREE))
5716 return true;
5718 /* Now check the remaining fields, if any. Only bitfields are allowed,
5719 since they are not addressable. */
5720 for (field = DECL_CHAIN (field);
5721 field;
5722 field = DECL_CHAIN (field))
5724 if (TREE_CODE (field) != FIELD_DECL)
5725 continue;
5727 if (!DECL_BIT_FIELD_TYPE (field))
5728 return true;
5731 return false;
5734 if (TREE_CODE (type) == UNION_TYPE)
5736 tree field;
5738 /* Unions can be returned in registers if every element is
5739 integral, or can be returned in an integer register. */
5740 for (field = TYPE_FIELDS (type);
5741 field;
5742 field = DECL_CHAIN (field))
5744 if (TREE_CODE (field) != FIELD_DECL)
5745 continue;
5747 if (FLOAT_TYPE_P (TREE_TYPE (field)))
5748 return true;
5750 if (arm_return_in_memory (TREE_TYPE (field), NULL_TREE))
5751 return true;
5754 return false;
5756 #endif /* not ARM_WINCE */
5758 /* Return all other types in memory. */
5759 return true;
5762 const struct pcs_attribute_arg
5764 const char *arg;
5765 enum arm_pcs value;
5766 } pcs_attribute_args[] =
5768 {"aapcs", ARM_PCS_AAPCS},
5769 {"aapcs-vfp", ARM_PCS_AAPCS_VFP},
5770 #if 0
5771 /* We could recognize these, but changes would be needed elsewhere
5772 * to implement them. */
5773 {"aapcs-iwmmxt", ARM_PCS_AAPCS_IWMMXT},
5774 {"atpcs", ARM_PCS_ATPCS},
5775 {"apcs", ARM_PCS_APCS},
5776 #endif
5777 {NULL, ARM_PCS_UNKNOWN}
5780 static enum arm_pcs
5781 arm_pcs_from_attribute (tree attr)
5783 const struct pcs_attribute_arg *ptr;
5784 const char *arg;
5786 /* Get the value of the argument. */
5787 if (TREE_VALUE (attr) == NULL_TREE
5788 || TREE_CODE (TREE_VALUE (attr)) != STRING_CST)
5789 return ARM_PCS_UNKNOWN;
5791 arg = TREE_STRING_POINTER (TREE_VALUE (attr));
5793 /* Check it against the list of known arguments. */
5794 for (ptr = pcs_attribute_args; ptr->arg != NULL; ptr++)
5795 if (streq (arg, ptr->arg))
5796 return ptr->value;
5798 /* An unrecognized interrupt type. */
5799 return ARM_PCS_UNKNOWN;
5802 /* Get the PCS variant to use for this call. TYPE is the function's type
5803 specification, DECL is the specific declartion. DECL may be null if
5804 the call could be indirect or if this is a library call. */
5805 static enum arm_pcs
5806 arm_get_pcs_model (const_tree type, const_tree decl)
5808 bool user_convention = false;
5809 enum arm_pcs user_pcs = arm_pcs_default;
5810 tree attr;
5812 gcc_assert (type);
5814 attr = lookup_attribute ("pcs", TYPE_ATTRIBUTES (type));
5815 if (attr)
5817 user_pcs = arm_pcs_from_attribute (TREE_VALUE (attr));
5818 user_convention = true;
5821 if (TARGET_AAPCS_BASED)
5823 /* Detect varargs functions. These always use the base rules
5824 (no argument is ever a candidate for a co-processor
5825 register). */
5826 bool base_rules = stdarg_p (type);
5828 if (user_convention)
5830 if (user_pcs > ARM_PCS_AAPCS_LOCAL)
5831 sorry ("non-AAPCS derived PCS variant");
5832 else if (base_rules && user_pcs != ARM_PCS_AAPCS)
5833 error ("variadic functions must use the base AAPCS variant");
5836 if (base_rules)
5837 return ARM_PCS_AAPCS;
5838 else if (user_convention)
5839 return user_pcs;
5840 else if (decl && flag_unit_at_a_time)
5842 /* Local functions never leak outside this compilation unit,
5843 so we are free to use whatever conventions are
5844 appropriate. */
5845 /* FIXME: remove CONST_CAST_TREE when cgraph is constified. */
5846 cgraph_local_info *i = cgraph_node::local_info (CONST_CAST_TREE(decl));
5847 if (i && i->local)
5848 return ARM_PCS_AAPCS_LOCAL;
5851 else if (user_convention && user_pcs != arm_pcs_default)
5852 sorry ("PCS variant");
5854 /* For everything else we use the target's default. */
5855 return arm_pcs_default;
5859 static void
5860 aapcs_vfp_cum_init (CUMULATIVE_ARGS *pcum ATTRIBUTE_UNUSED,
5861 const_tree fntype ATTRIBUTE_UNUSED,
5862 rtx libcall ATTRIBUTE_UNUSED,
5863 const_tree fndecl ATTRIBUTE_UNUSED)
5865 /* Record the unallocated VFP registers. */
5866 pcum->aapcs_vfp_regs_free = (1 << NUM_VFP_ARG_REGS) - 1;
5867 pcum->aapcs_vfp_reg_alloc = 0;
5870 /* Walk down the type tree of TYPE counting consecutive base elements.
5871 If *MODEP is VOIDmode, then set it to the first valid floating point
5872 type. If a non-floating point type is found, or if a floating point
5873 type that doesn't match a non-VOIDmode *MODEP is found, then return -1,
5874 otherwise return the count in the sub-tree. */
5875 static int
5876 aapcs_vfp_sub_candidate (const_tree type, machine_mode *modep)
5878 machine_mode mode;
5879 HOST_WIDE_INT size;
5881 switch (TREE_CODE (type))
5883 case REAL_TYPE:
5884 mode = TYPE_MODE (type);
5885 if (mode != DFmode && mode != SFmode && mode != HFmode)
5886 return -1;
5888 if (*modep == VOIDmode)
5889 *modep = mode;
5891 if (*modep == mode)
5892 return 1;
5894 break;
5896 case COMPLEX_TYPE:
5897 mode = TYPE_MODE (TREE_TYPE (type));
5898 if (mode != DFmode && mode != SFmode)
5899 return -1;
5901 if (*modep == VOIDmode)
5902 *modep = mode;
5904 if (*modep == mode)
5905 return 2;
5907 break;
5909 case VECTOR_TYPE:
5910 /* Use V2SImode and V4SImode as representatives of all 64-bit
5911 and 128-bit vector types, whether or not those modes are
5912 supported with the present options. */
5913 size = int_size_in_bytes (type);
5914 switch (size)
5916 case 8:
5917 mode = V2SImode;
5918 break;
5919 case 16:
5920 mode = V4SImode;
5921 break;
5922 default:
5923 return -1;
5926 if (*modep == VOIDmode)
5927 *modep = mode;
5929 /* Vector modes are considered to be opaque: two vectors are
5930 equivalent for the purposes of being homogeneous aggregates
5931 if they are the same size. */
5932 if (*modep == mode)
5933 return 1;
5935 break;
5937 case ARRAY_TYPE:
5939 int count;
5940 tree index = TYPE_DOMAIN (type);
5942 /* Can't handle incomplete types nor sizes that are not
5943 fixed. */
5944 if (!COMPLETE_TYPE_P (type)
5945 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
5946 return -1;
5948 count = aapcs_vfp_sub_candidate (TREE_TYPE (type), modep);
5949 if (count == -1
5950 || !index
5951 || !TYPE_MAX_VALUE (index)
5952 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index))
5953 || !TYPE_MIN_VALUE (index)
5954 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index))
5955 || count < 0)
5956 return -1;
5958 count *= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index))
5959 - tree_to_uhwi (TYPE_MIN_VALUE (index)));
5961 /* There must be no padding. */
5962 if (wi::to_wide (TYPE_SIZE (type))
5963 != count * GET_MODE_BITSIZE (*modep))
5964 return -1;
5966 return count;
5969 case RECORD_TYPE:
5971 int count = 0;
5972 int sub_count;
5973 tree field;
5975 /* Can't handle incomplete types nor sizes that are not
5976 fixed. */
5977 if (!COMPLETE_TYPE_P (type)
5978 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
5979 return -1;
5981 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
5983 if (TREE_CODE (field) != FIELD_DECL)
5984 continue;
5986 sub_count = aapcs_vfp_sub_candidate (TREE_TYPE (field), modep);
5987 if (sub_count < 0)
5988 return -1;
5989 count += sub_count;
5992 /* There must be no padding. */
5993 if (wi::to_wide (TYPE_SIZE (type))
5994 != count * GET_MODE_BITSIZE (*modep))
5995 return -1;
5997 return count;
6000 case UNION_TYPE:
6001 case QUAL_UNION_TYPE:
6003 /* These aren't very interesting except in a degenerate case. */
6004 int count = 0;
6005 int sub_count;
6006 tree field;
6008 /* Can't handle incomplete types nor sizes that are not
6009 fixed. */
6010 if (!COMPLETE_TYPE_P (type)
6011 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
6012 return -1;
6014 for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
6016 if (TREE_CODE (field) != FIELD_DECL)
6017 continue;
6019 sub_count = aapcs_vfp_sub_candidate (TREE_TYPE (field), modep);
6020 if (sub_count < 0)
6021 return -1;
6022 count = count > sub_count ? count : sub_count;
6025 /* There must be no padding. */
6026 if (wi::to_wide (TYPE_SIZE (type))
6027 != count * GET_MODE_BITSIZE (*modep))
6028 return -1;
6030 return count;
6033 default:
6034 break;
6037 return -1;
6040 /* Return true if PCS_VARIANT should use VFP registers. */
6041 static bool
6042 use_vfp_abi (enum arm_pcs pcs_variant, bool is_double)
6044 if (pcs_variant == ARM_PCS_AAPCS_VFP)
6046 static bool seen_thumb1_vfp = false;
6048 if (TARGET_THUMB1 && !seen_thumb1_vfp)
6050 sorry ("Thumb-1 hard-float VFP ABI");
6051 /* sorry() is not immediately fatal, so only display this once. */
6052 seen_thumb1_vfp = true;
6055 return true;
6058 if (pcs_variant != ARM_PCS_AAPCS_LOCAL)
6059 return false;
6061 return (TARGET_32BIT && TARGET_HARD_FLOAT &&
6062 (TARGET_VFP_DOUBLE || !is_double));
6065 /* Return true if an argument whose type is TYPE, or mode is MODE, is
6066 suitable for passing or returning in VFP registers for the PCS
6067 variant selected. If it is, then *BASE_MODE is updated to contain
6068 a machine mode describing each element of the argument's type and
6069 *COUNT to hold the number of such elements. */
6070 static bool
6071 aapcs_vfp_is_call_or_return_candidate (enum arm_pcs pcs_variant,
6072 machine_mode mode, const_tree type,
6073 machine_mode *base_mode, int *count)
6075 machine_mode new_mode = VOIDmode;
6077 /* If we have the type information, prefer that to working things
6078 out from the mode. */
6079 if (type)
6081 int ag_count = aapcs_vfp_sub_candidate (type, &new_mode);
6083 if (ag_count > 0 && ag_count <= 4)
6084 *count = ag_count;
6085 else
6086 return false;
6088 else if (GET_MODE_CLASS (mode) == MODE_FLOAT
6089 || GET_MODE_CLASS (mode) == MODE_VECTOR_INT
6090 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
6092 *count = 1;
6093 new_mode = mode;
6095 else if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
6097 *count = 2;
6098 new_mode = (mode == DCmode ? DFmode : SFmode);
6100 else
6101 return false;
6104 if (!use_vfp_abi (pcs_variant, ARM_NUM_REGS (new_mode) > 1))
6105 return false;
6107 *base_mode = new_mode;
6108 return true;
6111 static bool
6112 aapcs_vfp_is_return_candidate (enum arm_pcs pcs_variant,
6113 machine_mode mode, const_tree type)
6115 int count ATTRIBUTE_UNUSED;
6116 machine_mode ag_mode ATTRIBUTE_UNUSED;
6118 if (!use_vfp_abi (pcs_variant, false))
6119 return false;
6120 return aapcs_vfp_is_call_or_return_candidate (pcs_variant, mode, type,
6121 &ag_mode, &count);
6124 static bool
6125 aapcs_vfp_is_call_candidate (CUMULATIVE_ARGS *pcum, machine_mode mode,
6126 const_tree type)
6128 if (!use_vfp_abi (pcum->pcs_variant, false))
6129 return false;
6131 return aapcs_vfp_is_call_or_return_candidate (pcum->pcs_variant, mode, type,
6132 &pcum->aapcs_vfp_rmode,
6133 &pcum->aapcs_vfp_rcount);
6136 /* Implement the allocate field in aapcs_cp_arg_layout. See the comment there
6137 for the behaviour of this function. */
6139 static bool
6140 aapcs_vfp_allocate (CUMULATIVE_ARGS *pcum, machine_mode mode,
6141 const_tree type ATTRIBUTE_UNUSED)
6143 int rmode_size
6144 = MAX (GET_MODE_SIZE (pcum->aapcs_vfp_rmode), GET_MODE_SIZE (SFmode));
6145 int shift = rmode_size / GET_MODE_SIZE (SFmode);
6146 unsigned mask = (1 << (shift * pcum->aapcs_vfp_rcount)) - 1;
6147 int regno;
6149 for (regno = 0; regno < NUM_VFP_ARG_REGS; regno += shift)
6150 if (((pcum->aapcs_vfp_regs_free >> regno) & mask) == mask)
6152 pcum->aapcs_vfp_reg_alloc = mask << regno;
6153 if (mode == BLKmode
6154 || (mode == TImode && ! TARGET_NEON)
6155 || ! arm_hard_regno_mode_ok (FIRST_VFP_REGNUM + regno, mode))
6157 int i;
6158 int rcount = pcum->aapcs_vfp_rcount;
6159 int rshift = shift;
6160 machine_mode rmode = pcum->aapcs_vfp_rmode;
6161 rtx par;
6162 if (!TARGET_NEON)
6164 /* Avoid using unsupported vector modes. */
6165 if (rmode == V2SImode)
6166 rmode = DImode;
6167 else if (rmode == V4SImode)
6169 rmode = DImode;
6170 rcount *= 2;
6171 rshift /= 2;
6174 par = gen_rtx_PARALLEL (mode, rtvec_alloc (rcount));
6175 for (i = 0; i < rcount; i++)
6177 rtx tmp = gen_rtx_REG (rmode,
6178 FIRST_VFP_REGNUM + regno + i * rshift);
6179 tmp = gen_rtx_EXPR_LIST
6180 (VOIDmode, tmp,
6181 GEN_INT (i * GET_MODE_SIZE (rmode)));
6182 XVECEXP (par, 0, i) = tmp;
6185 pcum->aapcs_reg = par;
6187 else
6188 pcum->aapcs_reg = gen_rtx_REG (mode, FIRST_VFP_REGNUM + regno);
6189 return true;
6191 return false;
6194 /* Implement the allocate_return_reg field in aapcs_cp_arg_layout. See the
6195 comment there for the behaviour of this function. */
6197 static rtx
6198 aapcs_vfp_allocate_return_reg (enum arm_pcs pcs_variant ATTRIBUTE_UNUSED,
6199 machine_mode mode,
6200 const_tree type ATTRIBUTE_UNUSED)
6202 if (!use_vfp_abi (pcs_variant, false))
6203 return NULL;
6205 if (mode == BLKmode
6206 || (GET_MODE_CLASS (mode) == MODE_INT
6207 && GET_MODE_SIZE (mode) >= GET_MODE_SIZE (TImode)
6208 && !TARGET_NEON))
6210 int count;
6211 machine_mode ag_mode;
6212 int i;
6213 rtx par;
6214 int shift;
6216 aapcs_vfp_is_call_or_return_candidate (pcs_variant, mode, type,
6217 &ag_mode, &count);
6219 if (!TARGET_NEON)
6221 if (ag_mode == V2SImode)
6222 ag_mode = DImode;
6223 else if (ag_mode == V4SImode)
6225 ag_mode = DImode;
6226 count *= 2;
6229 shift = GET_MODE_SIZE(ag_mode) / GET_MODE_SIZE(SFmode);
6230 par = gen_rtx_PARALLEL (mode, rtvec_alloc (count));
6231 for (i = 0; i < count; i++)
6233 rtx tmp = gen_rtx_REG (ag_mode, FIRST_VFP_REGNUM + i * shift);
6234 tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp,
6235 GEN_INT (i * GET_MODE_SIZE (ag_mode)));
6236 XVECEXP (par, 0, i) = tmp;
6239 return par;
6242 return gen_rtx_REG (mode, FIRST_VFP_REGNUM);
6245 static void
6246 aapcs_vfp_advance (CUMULATIVE_ARGS *pcum ATTRIBUTE_UNUSED,
6247 machine_mode mode ATTRIBUTE_UNUSED,
6248 const_tree type ATTRIBUTE_UNUSED)
6250 pcum->aapcs_vfp_regs_free &= ~pcum->aapcs_vfp_reg_alloc;
6251 pcum->aapcs_vfp_reg_alloc = 0;
6252 return;
6255 #define AAPCS_CP(X) \
6257 aapcs_ ## X ## _cum_init, \
6258 aapcs_ ## X ## _is_call_candidate, \
6259 aapcs_ ## X ## _allocate, \
6260 aapcs_ ## X ## _is_return_candidate, \
6261 aapcs_ ## X ## _allocate_return_reg, \
6262 aapcs_ ## X ## _advance \
6265 /* Table of co-processors that can be used to pass arguments in
6266 registers. Idealy no arugment should be a candidate for more than
6267 one co-processor table entry, but the table is processed in order
6268 and stops after the first match. If that entry then fails to put
6269 the argument into a co-processor register, the argument will go on
6270 the stack. */
6271 static struct
6273 /* Initialize co-processor related state in CUMULATIVE_ARGS structure. */
6274 void (*cum_init) (CUMULATIVE_ARGS *, const_tree, rtx, const_tree);
6276 /* Return true if an argument of mode MODE (or type TYPE if MODE is
6277 BLKmode) is a candidate for this co-processor's registers; this
6278 function should ignore any position-dependent state in
6279 CUMULATIVE_ARGS and only use call-type dependent information. */
6280 bool (*is_call_candidate) (CUMULATIVE_ARGS *, machine_mode, const_tree);
6282 /* Return true if the argument does get a co-processor register; it
6283 should set aapcs_reg to an RTX of the register allocated as is
6284 required for a return from FUNCTION_ARG. */
6285 bool (*allocate) (CUMULATIVE_ARGS *, machine_mode, const_tree);
6287 /* Return true if a result of mode MODE (or type TYPE if MODE is BLKmode) can
6288 be returned in this co-processor's registers. */
6289 bool (*is_return_candidate) (enum arm_pcs, machine_mode, const_tree);
6291 /* Allocate and return an RTX element to hold the return type of a call. This
6292 routine must not fail and will only be called if is_return_candidate
6293 returned true with the same parameters. */
6294 rtx (*allocate_return_reg) (enum arm_pcs, machine_mode, const_tree);
6296 /* Finish processing this argument and prepare to start processing
6297 the next one. */
6298 void (*advance) (CUMULATIVE_ARGS *, machine_mode, const_tree);
6299 } aapcs_cp_arg_layout[ARM_NUM_COPROC_SLOTS] =
6301 AAPCS_CP(vfp)
6304 #undef AAPCS_CP
6306 static int
6307 aapcs_select_call_coproc (CUMULATIVE_ARGS *pcum, machine_mode mode,
6308 const_tree type)
6310 int i;
6312 for (i = 0; i < ARM_NUM_COPROC_SLOTS; i++)
6313 if (aapcs_cp_arg_layout[i].is_call_candidate (pcum, mode, type))
6314 return i;
6316 return -1;
6319 static int
6320 aapcs_select_return_coproc (const_tree type, const_tree fntype)
6322 /* We aren't passed a decl, so we can't check that a call is local.
6323 However, it isn't clear that that would be a win anyway, since it
6324 might limit some tail-calling opportunities. */
6325 enum arm_pcs pcs_variant;
6327 if (fntype)
6329 const_tree fndecl = NULL_TREE;
6331 if (TREE_CODE (fntype) == FUNCTION_DECL)
6333 fndecl = fntype;
6334 fntype = TREE_TYPE (fntype);
6337 pcs_variant = arm_get_pcs_model (fntype, fndecl);
6339 else
6340 pcs_variant = arm_pcs_default;
6342 if (pcs_variant != ARM_PCS_AAPCS)
6344 int i;
6346 for (i = 0; i < ARM_NUM_COPROC_SLOTS; i++)
6347 if (aapcs_cp_arg_layout[i].is_return_candidate (pcs_variant,
6348 TYPE_MODE (type),
6349 type))
6350 return i;
6352 return -1;
6355 static rtx
6356 aapcs_allocate_return_reg (machine_mode mode, const_tree type,
6357 const_tree fntype)
6359 /* We aren't passed a decl, so we can't check that a call is local.
6360 However, it isn't clear that that would be a win anyway, since it
6361 might limit some tail-calling opportunities. */
6362 enum arm_pcs pcs_variant;
6363 int unsignedp ATTRIBUTE_UNUSED;
6365 if (fntype)
6367 const_tree fndecl = NULL_TREE;
6369 if (TREE_CODE (fntype) == FUNCTION_DECL)
6371 fndecl = fntype;
6372 fntype = TREE_TYPE (fntype);
6375 pcs_variant = arm_get_pcs_model (fntype, fndecl);
6377 else
6378 pcs_variant = arm_pcs_default;
6380 /* Promote integer types. */
6381 if (type && INTEGRAL_TYPE_P (type))
6382 mode = arm_promote_function_mode (type, mode, &unsignedp, fntype, 1);
6384 if (pcs_variant != ARM_PCS_AAPCS)
6386 int i;
6388 for (i = 0; i < ARM_NUM_COPROC_SLOTS; i++)
6389 if (aapcs_cp_arg_layout[i].is_return_candidate (pcs_variant, mode,
6390 type))
6391 return aapcs_cp_arg_layout[i].allocate_return_reg (pcs_variant,
6392 mode, type);
6395 /* Promotes small structs returned in a register to full-word size
6396 for big-endian AAPCS. */
6397 if (type && arm_return_in_msb (type))
6399 HOST_WIDE_INT size = int_size_in_bytes (type);
6400 if (size % UNITS_PER_WORD != 0)
6402 size += UNITS_PER_WORD - size % UNITS_PER_WORD;
6403 mode = int_mode_for_size (size * BITS_PER_UNIT, 0).require ();
6407 return gen_rtx_REG (mode, R0_REGNUM);
6410 static rtx
6411 aapcs_libcall_value (machine_mode mode)
6413 if (BYTES_BIG_ENDIAN && ALL_FIXED_POINT_MODE_P (mode)
6414 && GET_MODE_SIZE (mode) <= 4)
6415 mode = SImode;
6417 return aapcs_allocate_return_reg (mode, NULL_TREE, NULL_TREE);
6420 /* Lay out a function argument using the AAPCS rules. The rule
6421 numbers referred to here are those in the AAPCS. */
6422 static void
6423 aapcs_layout_arg (CUMULATIVE_ARGS *pcum, machine_mode mode,
6424 const_tree type, bool named)
6426 int nregs, nregs2;
6427 int ncrn;
6429 /* We only need to do this once per argument. */
6430 if (pcum->aapcs_arg_processed)
6431 return;
6433 pcum->aapcs_arg_processed = true;
6435 /* Special case: if named is false then we are handling an incoming
6436 anonymous argument which is on the stack. */
6437 if (!named)
6438 return;
6440 /* Is this a potential co-processor register candidate? */
6441 if (pcum->pcs_variant != ARM_PCS_AAPCS)
6443 int slot = aapcs_select_call_coproc (pcum, mode, type);
6444 pcum->aapcs_cprc_slot = slot;
6446 /* We don't have to apply any of the rules from part B of the
6447 preparation phase, these are handled elsewhere in the
6448 compiler. */
6450 if (slot >= 0)
6452 /* A Co-processor register candidate goes either in its own
6453 class of registers or on the stack. */
6454 if (!pcum->aapcs_cprc_failed[slot])
6456 /* C1.cp - Try to allocate the argument to co-processor
6457 registers. */
6458 if (aapcs_cp_arg_layout[slot].allocate (pcum, mode, type))
6459 return;
6461 /* C2.cp - Put the argument on the stack and note that we
6462 can't assign any more candidates in this slot. We also
6463 need to note that we have allocated stack space, so that
6464 we won't later try to split a non-cprc candidate between
6465 core registers and the stack. */
6466 pcum->aapcs_cprc_failed[slot] = true;
6467 pcum->can_split = false;
6470 /* We didn't get a register, so this argument goes on the
6471 stack. */
6472 gcc_assert (pcum->can_split == false);
6473 return;
6477 /* C3 - For double-word aligned arguments, round the NCRN up to the
6478 next even number. */
6479 ncrn = pcum->aapcs_ncrn;
6480 if (ncrn & 1)
6482 int res = arm_needs_doubleword_align (mode, type);
6483 /* Only warn during RTL expansion of call stmts, otherwise we would
6484 warn e.g. during gimplification even on functions that will be
6485 always inlined, and we'd warn multiple times. Don't warn when
6486 called in expand_function_start either, as we warn instead in
6487 arm_function_arg_boundary in that case. */
6488 if (res < 0 && warn_psabi && currently_expanding_gimple_stmt)
6489 inform (input_location, "parameter passing for argument of type "
6490 "%qT changed in GCC 7.1", type);
6491 else if (res > 0)
6492 ncrn++;
6495 nregs = ARM_NUM_REGS2(mode, type);
6497 /* Sigh, this test should really assert that nregs > 0, but a GCC
6498 extension allows empty structs and then gives them empty size; it
6499 then allows such a structure to be passed by value. For some of
6500 the code below we have to pretend that such an argument has
6501 non-zero size so that we 'locate' it correctly either in
6502 registers or on the stack. */
6503 gcc_assert (nregs >= 0);
6505 nregs2 = nregs ? nregs : 1;
6507 /* C4 - Argument fits entirely in core registers. */
6508 if (ncrn + nregs2 <= NUM_ARG_REGS)
6510 pcum->aapcs_reg = gen_rtx_REG (mode, ncrn);
6511 pcum->aapcs_next_ncrn = ncrn + nregs;
6512 return;
6515 /* C5 - Some core registers left and there are no arguments already
6516 on the stack: split this argument between the remaining core
6517 registers and the stack. */
6518 if (ncrn < NUM_ARG_REGS && pcum->can_split)
6520 pcum->aapcs_reg = gen_rtx_REG (mode, ncrn);
6521 pcum->aapcs_next_ncrn = NUM_ARG_REGS;
6522 pcum->aapcs_partial = (NUM_ARG_REGS - ncrn) * UNITS_PER_WORD;
6523 return;
6526 /* C6 - NCRN is set to 4. */
6527 pcum->aapcs_next_ncrn = NUM_ARG_REGS;
6529 /* C7,C8 - arugment goes on the stack. We have nothing to do here. */
6530 return;
6533 /* Initialize a variable CUM of type CUMULATIVE_ARGS
6534 for a call to a function whose data type is FNTYPE.
6535 For a library call, FNTYPE is NULL. */
6536 void
6537 arm_init_cumulative_args (CUMULATIVE_ARGS *pcum, tree fntype,
6538 rtx libname,
6539 tree fndecl ATTRIBUTE_UNUSED)
6541 /* Long call handling. */
6542 if (fntype)
6543 pcum->pcs_variant = arm_get_pcs_model (fntype, fndecl);
6544 else
6545 pcum->pcs_variant = arm_pcs_default;
6547 if (pcum->pcs_variant <= ARM_PCS_AAPCS_LOCAL)
6549 if (arm_libcall_uses_aapcs_base (libname))
6550 pcum->pcs_variant = ARM_PCS_AAPCS;
6552 pcum->aapcs_ncrn = pcum->aapcs_next_ncrn = 0;
6553 pcum->aapcs_reg = NULL_RTX;
6554 pcum->aapcs_partial = 0;
6555 pcum->aapcs_arg_processed = false;
6556 pcum->aapcs_cprc_slot = -1;
6557 pcum->can_split = true;
6559 if (pcum->pcs_variant != ARM_PCS_AAPCS)
6561 int i;
6563 for (i = 0; i < ARM_NUM_COPROC_SLOTS; i++)
6565 pcum->aapcs_cprc_failed[i] = false;
6566 aapcs_cp_arg_layout[i].cum_init (pcum, fntype, libname, fndecl);
6569 return;
6572 /* Legacy ABIs */
6574 /* On the ARM, the offset starts at 0. */
6575 pcum->nregs = 0;
6576 pcum->iwmmxt_nregs = 0;
6577 pcum->can_split = true;
6579 /* Varargs vectors are treated the same as long long.
6580 named_count avoids having to change the way arm handles 'named' */
6581 pcum->named_count = 0;
6582 pcum->nargs = 0;
6584 if (TARGET_REALLY_IWMMXT && fntype)
6586 tree fn_arg;
6588 for (fn_arg = TYPE_ARG_TYPES (fntype);
6589 fn_arg;
6590 fn_arg = TREE_CHAIN (fn_arg))
6591 pcum->named_count += 1;
6593 if (! pcum->named_count)
6594 pcum->named_count = INT_MAX;
6598 /* Return 1 if double word alignment is required for argument passing.
6599 Return -1 if double word alignment used to be required for argument
6600 passing before PR77728 ABI fix, but is not required anymore.
6601 Return 0 if double word alignment is not required and wasn't requried
6602 before either. */
6603 static int
6604 arm_needs_doubleword_align (machine_mode mode, const_tree type)
6606 if (!type)
6607 return GET_MODE_ALIGNMENT (mode) > PARM_BOUNDARY;
6609 /* Scalar and vector types: Use natural alignment, i.e. of base type. */
6610 if (!AGGREGATE_TYPE_P (type))
6611 return TYPE_ALIGN (TYPE_MAIN_VARIANT (type)) > PARM_BOUNDARY;
6613 /* Array types: Use member alignment of element type. */
6614 if (TREE_CODE (type) == ARRAY_TYPE)
6615 return TYPE_ALIGN (TREE_TYPE (type)) > PARM_BOUNDARY;
6617 int ret = 0;
6618 /* Record/aggregate types: Use greatest member alignment of any member. */
6619 for (tree field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
6620 if (DECL_ALIGN (field) > PARM_BOUNDARY)
6622 if (TREE_CODE (field) == FIELD_DECL)
6623 return 1;
6624 else
6625 /* Before PR77728 fix, we were incorrectly considering also
6626 other aggregate fields, like VAR_DECLs, TYPE_DECLs etc.
6627 Make sure we can warn about that with -Wpsabi. */
6628 ret = -1;
6631 return ret;
6635 /* Determine where to put an argument to a function.
6636 Value is zero to push the argument on the stack,
6637 or a hard register in which to store the argument.
6639 MODE is the argument's machine mode.
6640 TYPE is the data type of the argument (as a tree).
6641 This is null for libcalls where that information may
6642 not be available.
6643 CUM is a variable of type CUMULATIVE_ARGS which gives info about
6644 the preceding args and about the function being called.
6645 NAMED is nonzero if this argument is a named parameter
6646 (otherwise it is an extra parameter matching an ellipsis).
6648 On the ARM, normally the first 16 bytes are passed in registers r0-r3; all
6649 other arguments are passed on the stack. If (NAMED == 0) (which happens
6650 only in assign_parms, since TARGET_SETUP_INCOMING_VARARGS is
6651 defined), say it is passed in the stack (function_prologue will
6652 indeed make it pass in the stack if necessary). */
6654 static rtx
6655 arm_function_arg (cumulative_args_t pcum_v, machine_mode mode,
6656 const_tree type, bool named)
6658 CUMULATIVE_ARGS *pcum = get_cumulative_args (pcum_v);
6659 int nregs;
6661 /* Handle the special case quickly. Pick an arbitrary value for op2 of
6662 a call insn (op3 of a call_value insn). */
6663 if (mode == VOIDmode)
6664 return const0_rtx;
6666 if (pcum->pcs_variant <= ARM_PCS_AAPCS_LOCAL)
6668 aapcs_layout_arg (pcum, mode, type, named);
6669 return pcum->aapcs_reg;
6672 /* Varargs vectors are treated the same as long long.
6673 named_count avoids having to change the way arm handles 'named' */
6674 if (TARGET_IWMMXT_ABI
6675 && arm_vector_mode_supported_p (mode)
6676 && pcum->named_count > pcum->nargs + 1)
6678 if (pcum->iwmmxt_nregs <= 9)
6679 return gen_rtx_REG (mode, pcum->iwmmxt_nregs + FIRST_IWMMXT_REGNUM);
6680 else
6682 pcum->can_split = false;
6683 return NULL_RTX;
6687 /* Put doubleword aligned quantities in even register pairs. */
6688 if ((pcum->nregs & 1) && ARM_DOUBLEWORD_ALIGN)
6690 int res = arm_needs_doubleword_align (mode, type);
6691 if (res < 0 && warn_psabi)
6692 inform (input_location, "parameter passing for argument of type "
6693 "%qT changed in GCC 7.1", type);
6694 else if (res > 0)
6695 pcum->nregs++;
6698 /* Only allow splitting an arg between regs and memory if all preceding
6699 args were allocated to regs. For args passed by reference we only count
6700 the reference pointer. */
6701 if (pcum->can_split)
6702 nregs = 1;
6703 else
6704 nregs = ARM_NUM_REGS2 (mode, type);
6706 if (!named || pcum->nregs + nregs > NUM_ARG_REGS)
6707 return NULL_RTX;
6709 return gen_rtx_REG (mode, pcum->nregs);
6712 static unsigned int
6713 arm_function_arg_boundary (machine_mode mode, const_tree type)
6715 if (!ARM_DOUBLEWORD_ALIGN)
6716 return PARM_BOUNDARY;
6718 int res = arm_needs_doubleword_align (mode, type);
6719 if (res < 0 && warn_psabi)
6720 inform (input_location, "parameter passing for argument of type %qT "
6721 "changed in GCC 7.1", type);
6723 return res > 0 ? DOUBLEWORD_ALIGNMENT : PARM_BOUNDARY;
6726 static int
6727 arm_arg_partial_bytes (cumulative_args_t pcum_v, machine_mode mode,
6728 tree type, bool named)
6730 CUMULATIVE_ARGS *pcum = get_cumulative_args (pcum_v);
6731 int nregs = pcum->nregs;
6733 if (pcum->pcs_variant <= ARM_PCS_AAPCS_LOCAL)
6735 aapcs_layout_arg (pcum, mode, type, named);
6736 return pcum->aapcs_partial;
6739 if (TARGET_IWMMXT_ABI && arm_vector_mode_supported_p (mode))
6740 return 0;
6742 if (NUM_ARG_REGS > nregs
6743 && (NUM_ARG_REGS < nregs + ARM_NUM_REGS2 (mode, type))
6744 && pcum->can_split)
6745 return (NUM_ARG_REGS - nregs) * UNITS_PER_WORD;
6747 return 0;
6750 /* Update the data in PCUM to advance over an argument
6751 of mode MODE and data type TYPE.
6752 (TYPE is null for libcalls where that information may not be available.) */
6754 static void
6755 arm_function_arg_advance (cumulative_args_t pcum_v, machine_mode mode,
6756 const_tree type, bool named)
6758 CUMULATIVE_ARGS *pcum = get_cumulative_args (pcum_v);
6760 if (pcum->pcs_variant <= ARM_PCS_AAPCS_LOCAL)
6762 aapcs_layout_arg (pcum, mode, type, named);
6764 if (pcum->aapcs_cprc_slot >= 0)
6766 aapcs_cp_arg_layout[pcum->aapcs_cprc_slot].advance (pcum, mode,
6767 type);
6768 pcum->aapcs_cprc_slot = -1;
6771 /* Generic stuff. */
6772 pcum->aapcs_arg_processed = false;
6773 pcum->aapcs_ncrn = pcum->aapcs_next_ncrn;
6774 pcum->aapcs_reg = NULL_RTX;
6775 pcum->aapcs_partial = 0;
6777 else
6779 pcum->nargs += 1;
6780 if (arm_vector_mode_supported_p (mode)
6781 && pcum->named_count > pcum->nargs
6782 && TARGET_IWMMXT_ABI)
6783 pcum->iwmmxt_nregs += 1;
6784 else
6785 pcum->nregs += ARM_NUM_REGS2 (mode, type);
6789 /* Variable sized types are passed by reference. This is a GCC
6790 extension to the ARM ABI. */
6792 static bool
6793 arm_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
6794 machine_mode mode ATTRIBUTE_UNUSED,
6795 const_tree type, bool named ATTRIBUTE_UNUSED)
6797 return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
6800 /* Encode the current state of the #pragma [no_]long_calls. */
6801 typedef enum
6803 OFF, /* No #pragma [no_]long_calls is in effect. */
6804 LONG, /* #pragma long_calls is in effect. */
6805 SHORT /* #pragma no_long_calls is in effect. */
6806 } arm_pragma_enum;
6808 static arm_pragma_enum arm_pragma_long_calls = OFF;
6810 void
6811 arm_pr_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
6813 arm_pragma_long_calls = LONG;
6816 void
6817 arm_pr_no_long_calls (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
6819 arm_pragma_long_calls = SHORT;
6822 void
6823 arm_pr_long_calls_off (struct cpp_reader * pfile ATTRIBUTE_UNUSED)
6825 arm_pragma_long_calls = OFF;
6828 /* Handle an attribute requiring a FUNCTION_DECL;
6829 arguments as in struct attribute_spec.handler. */
6830 static tree
6831 arm_handle_fndecl_attribute (tree *node, tree name, tree args ATTRIBUTE_UNUSED,
6832 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
6834 if (TREE_CODE (*node) != FUNCTION_DECL)
6836 warning (OPT_Wattributes, "%qE attribute only applies to functions",
6837 name);
6838 *no_add_attrs = true;
6841 return NULL_TREE;
6844 /* Handle an "interrupt" or "isr" attribute;
6845 arguments as in struct attribute_spec.handler. */
6846 static tree
6847 arm_handle_isr_attribute (tree *node, tree name, tree args, int flags,
6848 bool *no_add_attrs)
6850 if (DECL_P (*node))
6852 if (TREE_CODE (*node) != FUNCTION_DECL)
6854 warning (OPT_Wattributes, "%qE attribute only applies to functions",
6855 name);
6856 *no_add_attrs = true;
6858 /* FIXME: the argument if any is checked for type attributes;
6859 should it be checked for decl ones? */
6861 else
6863 if (TREE_CODE (*node) == FUNCTION_TYPE
6864 || TREE_CODE (*node) == METHOD_TYPE)
6866 if (arm_isr_value (args) == ARM_FT_UNKNOWN)
6868 warning (OPT_Wattributes, "%qE attribute ignored",
6869 name);
6870 *no_add_attrs = true;
6873 else if (TREE_CODE (*node) == POINTER_TYPE
6874 && (TREE_CODE (TREE_TYPE (*node)) == FUNCTION_TYPE
6875 || TREE_CODE (TREE_TYPE (*node)) == METHOD_TYPE)
6876 && arm_isr_value (args) != ARM_FT_UNKNOWN)
6878 *node = build_variant_type_copy (*node);
6879 TREE_TYPE (*node) = build_type_attribute_variant
6880 (TREE_TYPE (*node),
6881 tree_cons (name, args, TYPE_ATTRIBUTES (TREE_TYPE (*node))));
6882 *no_add_attrs = true;
6884 else
6886 /* Possibly pass this attribute on from the type to a decl. */
6887 if (flags & ((int) ATTR_FLAG_DECL_NEXT
6888 | (int) ATTR_FLAG_FUNCTION_NEXT
6889 | (int) ATTR_FLAG_ARRAY_NEXT))
6891 *no_add_attrs = true;
6892 return tree_cons (name, args, NULL_TREE);
6894 else
6896 warning (OPT_Wattributes, "%qE attribute ignored",
6897 name);
6902 return NULL_TREE;
6905 /* Handle a "pcs" attribute; arguments as in struct
6906 attribute_spec.handler. */
6907 static tree
6908 arm_handle_pcs_attribute (tree *node ATTRIBUTE_UNUSED, tree name, tree args,
6909 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
6911 if (arm_pcs_from_attribute (args) == ARM_PCS_UNKNOWN)
6913 warning (OPT_Wattributes, "%qE attribute ignored", name);
6914 *no_add_attrs = true;
6916 return NULL_TREE;
6919 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
6920 /* Handle the "notshared" attribute. This attribute is another way of
6921 requesting hidden visibility. ARM's compiler supports
6922 "__declspec(notshared)"; we support the same thing via an
6923 attribute. */
6925 static tree
6926 arm_handle_notshared_attribute (tree *node,
6927 tree name ATTRIBUTE_UNUSED,
6928 tree args ATTRIBUTE_UNUSED,
6929 int flags ATTRIBUTE_UNUSED,
6930 bool *no_add_attrs)
6932 tree decl = TYPE_NAME (*node);
6934 if (decl)
6936 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
6937 DECL_VISIBILITY_SPECIFIED (decl) = 1;
6938 *no_add_attrs = false;
6940 return NULL_TREE;
6942 #endif
6944 /* This function returns true if a function with declaration FNDECL and type
6945 FNTYPE uses the stack to pass arguments or return variables and false
6946 otherwise. This is used for functions with the attributes
6947 'cmse_nonsecure_call' or 'cmse_nonsecure_entry' and this function will issue
6948 diagnostic messages if the stack is used. NAME is the name of the attribute
6949 used. */
6951 static bool
6952 cmse_func_args_or_return_in_stack (tree fndecl, tree name, tree fntype)
6954 function_args_iterator args_iter;
6955 CUMULATIVE_ARGS args_so_far_v;
6956 cumulative_args_t args_so_far;
6957 bool first_param = true;
6958 tree arg_type, prev_arg_type = NULL_TREE, ret_type;
6960 /* Error out if any argument is passed on the stack. */
6961 arm_init_cumulative_args (&args_so_far_v, fntype, NULL_RTX, fndecl);
6962 args_so_far = pack_cumulative_args (&args_so_far_v);
6963 FOREACH_FUNCTION_ARGS (fntype, arg_type, args_iter)
6965 rtx arg_rtx;
6966 machine_mode arg_mode = TYPE_MODE (arg_type);
6968 prev_arg_type = arg_type;
6969 if (VOID_TYPE_P (arg_type))
6970 continue;
6972 if (!first_param)
6973 arm_function_arg_advance (args_so_far, arg_mode, arg_type, true);
6974 arg_rtx = arm_function_arg (args_so_far, arg_mode, arg_type, true);
6975 if (!arg_rtx
6976 || arm_arg_partial_bytes (args_so_far, arg_mode, arg_type, true))
6978 error ("%qE attribute not available to functions with arguments "
6979 "passed on the stack", name);
6980 return true;
6982 first_param = false;
6985 /* Error out for variadic functions since we cannot control how many
6986 arguments will be passed and thus stack could be used. stdarg_p () is not
6987 used for the checking to avoid browsing arguments twice. */
6988 if (prev_arg_type != NULL_TREE && !VOID_TYPE_P (prev_arg_type))
6990 error ("%qE attribute not available to functions with variable number "
6991 "of arguments", name);
6992 return true;
6995 /* Error out if return value is passed on the stack. */
6996 ret_type = TREE_TYPE (fntype);
6997 if (arm_return_in_memory (ret_type, fntype))
6999 error ("%qE attribute not available to functions that return value on "
7000 "the stack", name);
7001 return true;
7003 return false;
7006 /* Called upon detection of the use of the cmse_nonsecure_entry attribute, this
7007 function will check whether the attribute is allowed here and will add the
7008 attribute to the function declaration tree or otherwise issue a warning. */
7010 static tree
7011 arm_handle_cmse_nonsecure_entry (tree *node, tree name,
7012 tree /* args */,
7013 int /* flags */,
7014 bool *no_add_attrs)
7016 tree fndecl;
7018 if (!use_cmse)
7020 *no_add_attrs = true;
7021 warning (OPT_Wattributes, "%qE attribute ignored without -mcmse option.",
7022 name);
7023 return NULL_TREE;
7026 /* Ignore attribute for function types. */
7027 if (TREE_CODE (*node) != FUNCTION_DECL)
7029 warning (OPT_Wattributes, "%qE attribute only applies to functions",
7030 name);
7031 *no_add_attrs = true;
7032 return NULL_TREE;
7035 fndecl = *node;
7037 /* Warn for static linkage functions. */
7038 if (!TREE_PUBLIC (fndecl))
7040 warning (OPT_Wattributes, "%qE attribute has no effect on functions "
7041 "with static linkage", name);
7042 *no_add_attrs = true;
7043 return NULL_TREE;
7046 *no_add_attrs |= cmse_func_args_or_return_in_stack (fndecl, name,
7047 TREE_TYPE (fndecl));
7048 return NULL_TREE;
7052 /* Called upon detection of the use of the cmse_nonsecure_call attribute, this
7053 function will check whether the attribute is allowed here and will add the
7054 attribute to the function type tree or otherwise issue a diagnostic. The
7055 reason we check this at declaration time is to only allow the use of the
7056 attribute with declarations of function pointers and not function
7057 declarations. This function checks NODE is of the expected type and issues
7058 diagnostics otherwise using NAME. If it is not of the expected type
7059 *NO_ADD_ATTRS will be set to true. */
7061 static tree
7062 arm_handle_cmse_nonsecure_call (tree *node, tree name,
7063 tree /* args */,
7064 int /* flags */,
7065 bool *no_add_attrs)
7067 tree decl = NULL_TREE, fntype = NULL_TREE;
7068 tree type;
7070 if (!use_cmse)
7072 *no_add_attrs = true;
7073 warning (OPT_Wattributes, "%qE attribute ignored without -mcmse option.",
7074 name);
7075 return NULL_TREE;
7078 if (TREE_CODE (*node) == VAR_DECL || TREE_CODE (*node) == TYPE_DECL)
7080 decl = *node;
7081 fntype = TREE_TYPE (decl);
7084 while (fntype != NULL_TREE && TREE_CODE (fntype) == POINTER_TYPE)
7085 fntype = TREE_TYPE (fntype);
7087 if (!decl || TREE_CODE (fntype) != FUNCTION_TYPE)
7089 warning (OPT_Wattributes, "%qE attribute only applies to base type of a "
7090 "function pointer", name);
7091 *no_add_attrs = true;
7092 return NULL_TREE;
7095 *no_add_attrs |= cmse_func_args_or_return_in_stack (NULL, name, fntype);
7097 if (*no_add_attrs)
7098 return NULL_TREE;
7100 /* Prevent trees being shared among function types with and without
7101 cmse_nonsecure_call attribute. */
7102 type = TREE_TYPE (decl);
7104 type = build_distinct_type_copy (type);
7105 TREE_TYPE (decl) = type;
7106 fntype = type;
7108 while (TREE_CODE (fntype) != FUNCTION_TYPE)
7110 type = fntype;
7111 fntype = TREE_TYPE (fntype);
7112 fntype = build_distinct_type_copy (fntype);
7113 TREE_TYPE (type) = fntype;
7116 /* Construct a type attribute and add it to the function type. */
7117 tree attrs = tree_cons (get_identifier ("cmse_nonsecure_call"), NULL_TREE,
7118 TYPE_ATTRIBUTES (fntype));
7119 TYPE_ATTRIBUTES (fntype) = attrs;
7120 return NULL_TREE;
7123 /* Return 0 if the attributes for two types are incompatible, 1 if they
7124 are compatible, and 2 if they are nearly compatible (which causes a
7125 warning to be generated). */
7126 static int
7127 arm_comp_type_attributes (const_tree type1, const_tree type2)
7129 int l1, l2, s1, s2;
7131 /* Check for mismatch of non-default calling convention. */
7132 if (TREE_CODE (type1) != FUNCTION_TYPE)
7133 return 1;
7135 /* Check for mismatched call attributes. */
7136 l1 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type1)) != NULL;
7137 l2 = lookup_attribute ("long_call", TYPE_ATTRIBUTES (type2)) != NULL;
7138 s1 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type1)) != NULL;
7139 s2 = lookup_attribute ("short_call", TYPE_ATTRIBUTES (type2)) != NULL;
7141 /* Only bother to check if an attribute is defined. */
7142 if (l1 | l2 | s1 | s2)
7144 /* If one type has an attribute, the other must have the same attribute. */
7145 if ((l1 != l2) || (s1 != s2))
7146 return 0;
7148 /* Disallow mixed attributes. */
7149 if ((l1 & s2) || (l2 & s1))
7150 return 0;
7153 /* Check for mismatched ISR attribute. */
7154 l1 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type1)) != NULL;
7155 if (! l1)
7156 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type1)) != NULL;
7157 l2 = lookup_attribute ("isr", TYPE_ATTRIBUTES (type2)) != NULL;
7158 if (! l2)
7159 l1 = lookup_attribute ("interrupt", TYPE_ATTRIBUTES (type2)) != NULL;
7160 if (l1 != l2)
7161 return 0;
7163 l1 = lookup_attribute ("cmse_nonsecure_call",
7164 TYPE_ATTRIBUTES (type1)) != NULL;
7165 l2 = lookup_attribute ("cmse_nonsecure_call",
7166 TYPE_ATTRIBUTES (type2)) != NULL;
7168 if (l1 != l2)
7169 return 0;
7171 return 1;
7174 /* Assigns default attributes to newly defined type. This is used to
7175 set short_call/long_call attributes for function types of
7176 functions defined inside corresponding #pragma scopes. */
7177 static void
7178 arm_set_default_type_attributes (tree type)
7180 /* Add __attribute__ ((long_call)) to all functions, when
7181 inside #pragma long_calls or __attribute__ ((short_call)),
7182 when inside #pragma no_long_calls. */
7183 if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
7185 tree type_attr_list, attr_name;
7186 type_attr_list = TYPE_ATTRIBUTES (type);
7188 if (arm_pragma_long_calls == LONG)
7189 attr_name = get_identifier ("long_call");
7190 else if (arm_pragma_long_calls == SHORT)
7191 attr_name = get_identifier ("short_call");
7192 else
7193 return;
7195 type_attr_list = tree_cons (attr_name, NULL_TREE, type_attr_list);
7196 TYPE_ATTRIBUTES (type) = type_attr_list;
7200 /* Return true if DECL is known to be linked into section SECTION. */
7202 static bool
7203 arm_function_in_section_p (tree decl, section *section)
7205 /* We can only be certain about the prevailing symbol definition. */
7206 if (!decl_binds_to_current_def_p (decl))
7207 return false;
7209 /* If DECL_SECTION_NAME is set, assume it is trustworthy. */
7210 if (!DECL_SECTION_NAME (decl))
7212 /* Make sure that we will not create a unique section for DECL. */
7213 if (flag_function_sections || DECL_COMDAT_GROUP (decl))
7214 return false;
7217 return function_section (decl) == section;
7220 /* Return nonzero if a 32-bit "long_call" should be generated for
7221 a call from the current function to DECL. We generate a long_call
7222 if the function:
7224 a. has an __attribute__((long call))
7225 or b. is within the scope of a #pragma long_calls
7226 or c. the -mlong-calls command line switch has been specified
7228 However we do not generate a long call if the function:
7230 d. has an __attribute__ ((short_call))
7231 or e. is inside the scope of a #pragma no_long_calls
7232 or f. is defined in the same section as the current function. */
7234 bool
7235 arm_is_long_call_p (tree decl)
7237 tree attrs;
7239 if (!decl)
7240 return TARGET_LONG_CALLS;
7242 attrs = TYPE_ATTRIBUTES (TREE_TYPE (decl));
7243 if (lookup_attribute ("short_call", attrs))
7244 return false;
7246 /* For "f", be conservative, and only cater for cases in which the
7247 whole of the current function is placed in the same section. */
7248 if (!flag_reorder_blocks_and_partition
7249 && TREE_CODE (decl) == FUNCTION_DECL
7250 && arm_function_in_section_p (decl, current_function_section ()))
7251 return false;
7253 if (lookup_attribute ("long_call", attrs))
7254 return true;
7256 return TARGET_LONG_CALLS;
7259 /* Return nonzero if it is ok to make a tail-call to DECL. */
7260 static bool
7261 arm_function_ok_for_sibcall (tree decl, tree exp)
7263 unsigned long func_type;
7265 if (cfun->machine->sibcall_blocked)
7266 return false;
7268 /* Never tailcall something if we are generating code for Thumb-1. */
7269 if (TARGET_THUMB1)
7270 return false;
7272 /* The PIC register is live on entry to VxWorks PLT entries, so we
7273 must make the call before restoring the PIC register. */
7274 if (TARGET_VXWORKS_RTP && flag_pic && decl && !targetm.binds_local_p (decl))
7275 return false;
7277 /* ??? Cannot tail-call to long calls with APCS frame and VFP, because IP
7278 may be used both as target of the call and base register for restoring
7279 the VFP registers */
7280 if (TARGET_APCS_FRAME && TARGET_ARM
7281 && TARGET_HARD_FLOAT
7282 && decl && arm_is_long_call_p (decl))
7283 return false;
7285 /* If we are interworking and the function is not declared static
7286 then we can't tail-call it unless we know that it exists in this
7287 compilation unit (since it might be a Thumb routine). */
7288 if (TARGET_INTERWORK && decl && TREE_PUBLIC (decl)
7289 && !TREE_ASM_WRITTEN (decl))
7290 return false;
7292 func_type = arm_current_func_type ();
7293 /* Never tailcall from an ISR routine - it needs a special exit sequence. */
7294 if (IS_INTERRUPT (func_type))
7295 return false;
7297 /* ARMv8-M non-secure entry functions need to return with bxns which is only
7298 generated for entry functions themselves. */
7299 if (IS_CMSE_ENTRY (arm_current_func_type ()))
7300 return false;
7302 /* We do not allow ARMv8-M non-secure calls to be turned into sibling calls,
7303 this would complicate matters for later code generation. */
7304 if (TREE_CODE (exp) == CALL_EXPR)
7306 tree fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp)));
7307 if (lookup_attribute ("cmse_nonsecure_call", TYPE_ATTRIBUTES (fntype)))
7308 return false;
7311 if (!VOID_TYPE_P (TREE_TYPE (DECL_RESULT (cfun->decl))))
7313 /* Check that the return value locations are the same. For
7314 example that we aren't returning a value from the sibling in
7315 a VFP register but then need to transfer it to a core
7316 register. */
7317 rtx a, b;
7318 tree decl_or_type = decl;
7320 /* If it is an indirect function pointer, get the function type. */
7321 if (!decl)
7322 decl_or_type = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp)));
7324 a = arm_function_value (TREE_TYPE (exp), decl_or_type, false);
7325 b = arm_function_value (TREE_TYPE (DECL_RESULT (cfun->decl)),
7326 cfun->decl, false);
7327 if (!rtx_equal_p (a, b))
7328 return false;
7331 /* Never tailcall if function may be called with a misaligned SP. */
7332 if (IS_STACKALIGN (func_type))
7333 return false;
7335 /* The AAPCS says that, on bare-metal, calls to unresolved weak
7336 references should become a NOP. Don't convert such calls into
7337 sibling calls. */
7338 if (TARGET_AAPCS_BASED
7339 && arm_abi == ARM_ABI_AAPCS
7340 && decl
7341 && DECL_WEAK (decl))
7342 return false;
7344 /* We cannot do a tailcall for an indirect call by descriptor if all the
7345 argument registers are used because the only register left to load the
7346 address is IP and it will already contain the static chain. */
7347 if (!decl && CALL_EXPR_BY_DESCRIPTOR (exp) && !flag_trampolines)
7349 tree fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp)));
7350 CUMULATIVE_ARGS cum;
7351 cumulative_args_t cum_v;
7353 arm_init_cumulative_args (&cum, fntype, NULL_RTX, NULL_TREE);
7354 cum_v = pack_cumulative_args (&cum);
7356 for (tree t = TYPE_ARG_TYPES (fntype); t; t = TREE_CHAIN (t))
7358 tree type = TREE_VALUE (t);
7359 if (!VOID_TYPE_P (type))
7360 arm_function_arg_advance (cum_v, TYPE_MODE (type), type, true);
7363 if (!arm_function_arg (cum_v, SImode, integer_type_node, true))
7364 return false;
7367 /* Everything else is ok. */
7368 return true;
7372 /* Addressing mode support functions. */
7374 /* Return nonzero if X is a legitimate immediate operand when compiling
7375 for PIC. We know that X satisfies CONSTANT_P and flag_pic is true. */
7377 legitimate_pic_operand_p (rtx x)
7379 if (GET_CODE (x) == SYMBOL_REF
7380 || (GET_CODE (x) == CONST
7381 && GET_CODE (XEXP (x, 0)) == PLUS
7382 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF))
7383 return 0;
7385 return 1;
7388 /* Record that the current function needs a PIC register. Initialize
7389 cfun->machine->pic_reg if we have not already done so. */
7391 static void
7392 require_pic_register (void)
7394 /* A lot of the logic here is made obscure by the fact that this
7395 routine gets called as part of the rtx cost estimation process.
7396 We don't want those calls to affect any assumptions about the real
7397 function; and further, we can't call entry_of_function() until we
7398 start the real expansion process. */
7399 if (!crtl->uses_pic_offset_table)
7401 gcc_assert (can_create_pseudo_p ());
7402 if (arm_pic_register != INVALID_REGNUM
7403 && !(TARGET_THUMB1 && arm_pic_register > LAST_LO_REGNUM))
7405 if (!cfun->machine->pic_reg)
7406 cfun->machine->pic_reg = gen_rtx_REG (Pmode, arm_pic_register);
7408 /* Play games to avoid marking the function as needing pic
7409 if we are being called as part of the cost-estimation
7410 process. */
7411 if (current_ir_type () != IR_GIMPLE || currently_expanding_to_rtl)
7412 crtl->uses_pic_offset_table = 1;
7414 else
7416 rtx_insn *seq, *insn;
7418 if (!cfun->machine->pic_reg)
7419 cfun->machine->pic_reg = gen_reg_rtx (Pmode);
7421 /* Play games to avoid marking the function as needing pic
7422 if we are being called as part of the cost-estimation
7423 process. */
7424 if (current_ir_type () != IR_GIMPLE || currently_expanding_to_rtl)
7426 crtl->uses_pic_offset_table = 1;
7427 start_sequence ();
7429 if (TARGET_THUMB1 && arm_pic_register != INVALID_REGNUM
7430 && arm_pic_register > LAST_LO_REGNUM)
7431 emit_move_insn (cfun->machine->pic_reg,
7432 gen_rtx_REG (Pmode, arm_pic_register));
7433 else
7434 arm_load_pic_register (0UL);
7436 seq = get_insns ();
7437 end_sequence ();
7439 for (insn = seq; insn; insn = NEXT_INSN (insn))
7440 if (INSN_P (insn))
7441 INSN_LOCATION (insn) = prologue_location;
7443 /* We can be called during expansion of PHI nodes, where
7444 we can't yet emit instructions directly in the final
7445 insn stream. Queue the insns on the entry edge, they will
7446 be committed after everything else is expanded. */
7447 insert_insn_on_edge (seq,
7448 single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)));
7455 legitimize_pic_address (rtx orig, machine_mode mode, rtx reg)
7457 if (GET_CODE (orig) == SYMBOL_REF
7458 || GET_CODE (orig) == LABEL_REF)
7460 if (reg == 0)
7462 gcc_assert (can_create_pseudo_p ());
7463 reg = gen_reg_rtx (Pmode);
7466 /* VxWorks does not impose a fixed gap between segments; the run-time
7467 gap can be different from the object-file gap. We therefore can't
7468 use GOTOFF unless we are absolutely sure that the symbol is in the
7469 same segment as the GOT. Unfortunately, the flexibility of linker
7470 scripts means that we can't be sure of that in general, so assume
7471 that GOTOFF is never valid on VxWorks. */
7472 /* References to weak symbols cannot be resolved locally: they
7473 may be overridden by a non-weak definition at link time. */
7474 rtx_insn *insn;
7475 if ((GET_CODE (orig) == LABEL_REF
7476 || (GET_CODE (orig) == SYMBOL_REF
7477 && SYMBOL_REF_LOCAL_P (orig)
7478 && (SYMBOL_REF_DECL (orig)
7479 ? !DECL_WEAK (SYMBOL_REF_DECL (orig)) : 1)))
7480 && NEED_GOT_RELOC
7481 && arm_pic_data_is_text_relative)
7482 insn = arm_pic_static_addr (orig, reg);
7483 else
7485 rtx pat;
7486 rtx mem;
7488 /* If this function doesn't have a pic register, create one now. */
7489 require_pic_register ();
7491 pat = gen_calculate_pic_address (reg, cfun->machine->pic_reg, orig);
7493 /* Make the MEM as close to a constant as possible. */
7494 mem = SET_SRC (pat);
7495 gcc_assert (MEM_P (mem) && !MEM_VOLATILE_P (mem));
7496 MEM_READONLY_P (mem) = 1;
7497 MEM_NOTRAP_P (mem) = 1;
7499 insn = emit_insn (pat);
7502 /* Put a REG_EQUAL note on this insn, so that it can be optimized
7503 by loop. */
7504 set_unique_reg_note (insn, REG_EQUAL, orig);
7506 return reg;
7508 else if (GET_CODE (orig) == CONST)
7510 rtx base, offset;
7512 if (GET_CODE (XEXP (orig, 0)) == PLUS
7513 && XEXP (XEXP (orig, 0), 0) == cfun->machine->pic_reg)
7514 return orig;
7516 /* Handle the case where we have: const (UNSPEC_TLS). */
7517 if (GET_CODE (XEXP (orig, 0)) == UNSPEC
7518 && XINT (XEXP (orig, 0), 1) == UNSPEC_TLS)
7519 return orig;
7521 /* Handle the case where we have:
7522 const (plus (UNSPEC_TLS) (ADDEND)). The ADDEND must be a
7523 CONST_INT. */
7524 if (GET_CODE (XEXP (orig, 0)) == PLUS
7525 && GET_CODE (XEXP (XEXP (orig, 0), 0)) == UNSPEC
7526 && XINT (XEXP (XEXP (orig, 0), 0), 1) == UNSPEC_TLS)
7528 gcc_assert (CONST_INT_P (XEXP (XEXP (orig, 0), 1)));
7529 return orig;
7532 if (reg == 0)
7534 gcc_assert (can_create_pseudo_p ());
7535 reg = gen_reg_rtx (Pmode);
7538 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
7540 base = legitimize_pic_address (XEXP (XEXP (orig, 0), 0), Pmode, reg);
7541 offset = legitimize_pic_address (XEXP (XEXP (orig, 0), 1), Pmode,
7542 base == reg ? 0 : reg);
7544 if (CONST_INT_P (offset))
7546 /* The base register doesn't really matter, we only want to
7547 test the index for the appropriate mode. */
7548 if (!arm_legitimate_index_p (mode, offset, SET, 0))
7550 gcc_assert (can_create_pseudo_p ());
7551 offset = force_reg (Pmode, offset);
7554 if (CONST_INT_P (offset))
7555 return plus_constant (Pmode, base, INTVAL (offset));
7558 if (GET_MODE_SIZE (mode) > 4
7559 && (GET_MODE_CLASS (mode) == MODE_INT
7560 || TARGET_SOFT_FLOAT))
7562 emit_insn (gen_addsi3 (reg, base, offset));
7563 return reg;
7566 return gen_rtx_PLUS (Pmode, base, offset);
7569 return orig;
7573 /* Find a spare register to use during the prolog of a function. */
7575 static int
7576 thumb_find_work_register (unsigned long pushed_regs_mask)
7578 int reg;
7580 /* Check the argument registers first as these are call-used. The
7581 register allocation order means that sometimes r3 might be used
7582 but earlier argument registers might not, so check them all. */
7583 for (reg = LAST_ARG_REGNUM; reg >= 0; reg --)
7584 if (!df_regs_ever_live_p (reg))
7585 return reg;
7587 /* Before going on to check the call-saved registers we can try a couple
7588 more ways of deducing that r3 is available. The first is when we are
7589 pushing anonymous arguments onto the stack and we have less than 4
7590 registers worth of fixed arguments(*). In this case r3 will be part of
7591 the variable argument list and so we can be sure that it will be
7592 pushed right at the start of the function. Hence it will be available
7593 for the rest of the prologue.
7594 (*): ie crtl->args.pretend_args_size is greater than 0. */
7595 if (cfun->machine->uses_anonymous_args
7596 && crtl->args.pretend_args_size > 0)
7597 return LAST_ARG_REGNUM;
7599 /* The other case is when we have fixed arguments but less than 4 registers
7600 worth. In this case r3 might be used in the body of the function, but
7601 it is not being used to convey an argument into the function. In theory
7602 we could just check crtl->args.size to see how many bytes are
7603 being passed in argument registers, but it seems that it is unreliable.
7604 Sometimes it will have the value 0 when in fact arguments are being
7605 passed. (See testcase execute/20021111-1.c for an example). So we also
7606 check the args_info.nregs field as well. The problem with this field is
7607 that it makes no allowances for arguments that are passed to the
7608 function but which are not used. Hence we could miss an opportunity
7609 when a function has an unused argument in r3. But it is better to be
7610 safe than to be sorry. */
7611 if (! cfun->machine->uses_anonymous_args
7612 && crtl->args.size >= 0
7613 && crtl->args.size <= (LAST_ARG_REGNUM * UNITS_PER_WORD)
7614 && (TARGET_AAPCS_BASED
7615 ? crtl->args.info.aapcs_ncrn < 4
7616 : crtl->args.info.nregs < 4))
7617 return LAST_ARG_REGNUM;
7619 /* Otherwise look for a call-saved register that is going to be pushed. */
7620 for (reg = LAST_LO_REGNUM; reg > LAST_ARG_REGNUM; reg --)
7621 if (pushed_regs_mask & (1 << reg))
7622 return reg;
7624 if (TARGET_THUMB2)
7626 /* Thumb-2 can use high regs. */
7627 for (reg = FIRST_HI_REGNUM; reg < 15; reg ++)
7628 if (pushed_regs_mask & (1 << reg))
7629 return reg;
7631 /* Something went wrong - thumb_compute_save_reg_mask()
7632 should have arranged for a suitable register to be pushed. */
7633 gcc_unreachable ();
7636 static GTY(()) int pic_labelno;
7638 /* Generate code to load the PIC register. In thumb mode SCRATCH is a
7639 low register. */
7641 void
7642 arm_load_pic_register (unsigned long saved_regs ATTRIBUTE_UNUSED)
7644 rtx l1, labelno, pic_tmp, pic_rtx, pic_reg;
7646 if (crtl->uses_pic_offset_table == 0 || TARGET_SINGLE_PIC_BASE)
7647 return;
7649 gcc_assert (flag_pic);
7651 pic_reg = cfun->machine->pic_reg;
7652 if (TARGET_VXWORKS_RTP)
7654 pic_rtx = gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_BASE);
7655 pic_rtx = gen_rtx_CONST (Pmode, pic_rtx);
7656 emit_insn (gen_pic_load_addr_32bit (pic_reg, pic_rtx));
7658 emit_insn (gen_rtx_SET (pic_reg, gen_rtx_MEM (Pmode, pic_reg)));
7660 pic_tmp = gen_rtx_SYMBOL_REF (Pmode, VXWORKS_GOTT_INDEX);
7661 emit_insn (gen_pic_offset_arm (pic_reg, pic_reg, pic_tmp));
7663 else
7665 /* We use an UNSPEC rather than a LABEL_REF because this label
7666 never appears in the code stream. */
7668 labelno = GEN_INT (pic_labelno++);
7669 l1 = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, labelno), UNSPEC_PIC_LABEL);
7670 l1 = gen_rtx_CONST (VOIDmode, l1);
7672 /* On the ARM the PC register contains 'dot + 8' at the time of the
7673 addition, on the Thumb it is 'dot + 4'. */
7674 pic_rtx = plus_constant (Pmode, l1, TARGET_ARM ? 8 : 4);
7675 pic_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, pic_rtx),
7676 UNSPEC_GOTSYM_OFF);
7677 pic_rtx = gen_rtx_CONST (Pmode, pic_rtx);
7679 if (TARGET_32BIT)
7681 emit_insn (gen_pic_load_addr_unified (pic_reg, pic_rtx, labelno));
7683 else /* TARGET_THUMB1 */
7685 if (arm_pic_register != INVALID_REGNUM
7686 && REGNO (pic_reg) > LAST_LO_REGNUM)
7688 /* We will have pushed the pic register, so we should always be
7689 able to find a work register. */
7690 pic_tmp = gen_rtx_REG (SImode,
7691 thumb_find_work_register (saved_regs));
7692 emit_insn (gen_pic_load_addr_thumb1 (pic_tmp, pic_rtx));
7693 emit_insn (gen_movsi (pic_offset_table_rtx, pic_tmp));
7694 emit_insn (gen_pic_add_dot_plus_four (pic_reg, pic_reg, labelno));
7696 else if (arm_pic_register != INVALID_REGNUM
7697 && arm_pic_register > LAST_LO_REGNUM
7698 && REGNO (pic_reg) <= LAST_LO_REGNUM)
7700 emit_insn (gen_pic_load_addr_unified (pic_reg, pic_rtx, labelno));
7701 emit_move_insn (gen_rtx_REG (Pmode, arm_pic_register), pic_reg);
7702 emit_use (gen_rtx_REG (Pmode, arm_pic_register));
7704 else
7705 emit_insn (gen_pic_load_addr_unified (pic_reg, pic_rtx, labelno));
7709 /* Need to emit this whether or not we obey regdecls,
7710 since setjmp/longjmp can cause life info to screw up. */
7711 emit_use (pic_reg);
7714 /* Generate code to load the address of a static var when flag_pic is set. */
7715 static rtx_insn *
7716 arm_pic_static_addr (rtx orig, rtx reg)
7718 rtx l1, labelno, offset_rtx;
7720 gcc_assert (flag_pic);
7722 /* We use an UNSPEC rather than a LABEL_REF because this label
7723 never appears in the code stream. */
7724 labelno = GEN_INT (pic_labelno++);
7725 l1 = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, labelno), UNSPEC_PIC_LABEL);
7726 l1 = gen_rtx_CONST (VOIDmode, l1);
7728 /* On the ARM the PC register contains 'dot + 8' at the time of the
7729 addition, on the Thumb it is 'dot + 4'. */
7730 offset_rtx = plus_constant (Pmode, l1, TARGET_ARM ? 8 : 4);
7731 offset_rtx = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, orig, offset_rtx),
7732 UNSPEC_SYMBOL_OFFSET);
7733 offset_rtx = gen_rtx_CONST (Pmode, offset_rtx);
7735 return emit_insn (gen_pic_load_addr_unified (reg, offset_rtx, labelno));
7738 /* Return nonzero if X is valid as an ARM state addressing register. */
7739 static int
7740 arm_address_register_rtx_p (rtx x, int strict_p)
7742 int regno;
7744 if (!REG_P (x))
7745 return 0;
7747 regno = REGNO (x);
7749 if (strict_p)
7750 return ARM_REGNO_OK_FOR_BASE_P (regno);
7752 return (regno <= LAST_ARM_REGNUM
7753 || regno >= FIRST_PSEUDO_REGISTER
7754 || regno == FRAME_POINTER_REGNUM
7755 || regno == ARG_POINTER_REGNUM);
7758 /* Return TRUE if this rtx is the difference of a symbol and a label,
7759 and will reduce to a PC-relative relocation in the object file.
7760 Expressions like this can be left alone when generating PIC, rather
7761 than forced through the GOT. */
7762 static int
7763 pcrel_constant_p (rtx x)
7765 if (GET_CODE (x) == MINUS)
7766 return symbol_mentioned_p (XEXP (x, 0)) && label_mentioned_p (XEXP (x, 1));
7768 return FALSE;
7771 /* Return true if X will surely end up in an index register after next
7772 splitting pass. */
7773 static bool
7774 will_be_in_index_register (const_rtx x)
7776 /* arm.md: calculate_pic_address will split this into a register. */
7777 return GET_CODE (x) == UNSPEC && (XINT (x, 1) == UNSPEC_PIC_SYM);
7780 /* Return nonzero if X is a valid ARM state address operand. */
7782 arm_legitimate_address_outer_p (machine_mode mode, rtx x, RTX_CODE outer,
7783 int strict_p)
7785 bool use_ldrd;
7786 enum rtx_code code = GET_CODE (x);
7788 if (arm_address_register_rtx_p (x, strict_p))
7789 return 1;
7791 use_ldrd = (TARGET_LDRD
7792 && (mode == DImode || mode == DFmode));
7794 if (code == POST_INC || code == PRE_DEC
7795 || ((code == PRE_INC || code == POST_DEC)
7796 && (use_ldrd || GET_MODE_SIZE (mode) <= 4)))
7797 return arm_address_register_rtx_p (XEXP (x, 0), strict_p);
7799 else if ((code == POST_MODIFY || code == PRE_MODIFY)
7800 && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
7801 && GET_CODE (XEXP (x, 1)) == PLUS
7802 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
7804 rtx addend = XEXP (XEXP (x, 1), 1);
7806 /* Don't allow ldrd post increment by register because it's hard
7807 to fixup invalid register choices. */
7808 if (use_ldrd
7809 && GET_CODE (x) == POST_MODIFY
7810 && REG_P (addend))
7811 return 0;
7813 return ((use_ldrd || GET_MODE_SIZE (mode) <= 4)
7814 && arm_legitimate_index_p (mode, addend, outer, strict_p));
7817 /* After reload constants split into minipools will have addresses
7818 from a LABEL_REF. */
7819 else if (reload_completed
7820 && (code == LABEL_REF
7821 || (code == CONST
7822 && GET_CODE (XEXP (x, 0)) == PLUS
7823 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
7824 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))))
7825 return 1;
7827 else if (mode == TImode || (TARGET_NEON && VALID_NEON_STRUCT_MODE (mode)))
7828 return 0;
7830 else if (code == PLUS)
7832 rtx xop0 = XEXP (x, 0);
7833 rtx xop1 = XEXP (x, 1);
7835 return ((arm_address_register_rtx_p (xop0, strict_p)
7836 && ((CONST_INT_P (xop1)
7837 && arm_legitimate_index_p (mode, xop1, outer, strict_p))
7838 || (!strict_p && will_be_in_index_register (xop1))))
7839 || (arm_address_register_rtx_p (xop1, strict_p)
7840 && arm_legitimate_index_p (mode, xop0, outer, strict_p)));
7843 #if 0
7844 /* Reload currently can't handle MINUS, so disable this for now */
7845 else if (GET_CODE (x) == MINUS)
7847 rtx xop0 = XEXP (x, 0);
7848 rtx xop1 = XEXP (x, 1);
7850 return (arm_address_register_rtx_p (xop0, strict_p)
7851 && arm_legitimate_index_p (mode, xop1, outer, strict_p));
7853 #endif
7855 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
7856 && code == SYMBOL_REF
7857 && CONSTANT_POOL_ADDRESS_P (x)
7858 && ! (flag_pic
7859 && symbol_mentioned_p (get_pool_constant (x))
7860 && ! pcrel_constant_p (get_pool_constant (x))))
7861 return 1;
7863 return 0;
7866 /* Return true if we can avoid creating a constant pool entry for x. */
7867 static bool
7868 can_avoid_literal_pool_for_label_p (rtx x)
7870 /* Normally we can assign constant values to target registers without
7871 the help of constant pool. But there are cases we have to use constant
7872 pool like:
7873 1) assign a label to register.
7874 2) sign-extend a 8bit value to 32bit and then assign to register.
7876 Constant pool access in format:
7877 (set (reg r0) (mem (symbol_ref (".LC0"))))
7878 will cause the use of literal pool (later in function arm_reorg).
7879 So here we mark such format as an invalid format, then the compiler
7880 will adjust it into:
7881 (set (reg r0) (symbol_ref (".LC0")))
7882 (set (reg r0) (mem (reg r0))).
7883 No extra register is required, and (mem (reg r0)) won't cause the use
7884 of literal pools. */
7885 if (arm_disable_literal_pool && GET_CODE (x) == SYMBOL_REF
7886 && CONSTANT_POOL_ADDRESS_P (x))
7887 return 1;
7888 return 0;
7892 /* Return nonzero if X is a valid Thumb-2 address operand. */
7893 static int
7894 thumb2_legitimate_address_p (machine_mode mode, rtx x, int strict_p)
7896 bool use_ldrd;
7897 enum rtx_code code = GET_CODE (x);
7899 if (arm_address_register_rtx_p (x, strict_p))
7900 return 1;
7902 use_ldrd = (TARGET_LDRD
7903 && (mode == DImode || mode == DFmode));
7905 if (code == POST_INC || code == PRE_DEC
7906 || ((code == PRE_INC || code == POST_DEC)
7907 && (use_ldrd || GET_MODE_SIZE (mode) <= 4)))
7908 return arm_address_register_rtx_p (XEXP (x, 0), strict_p);
7910 else if ((code == POST_MODIFY || code == PRE_MODIFY)
7911 && arm_address_register_rtx_p (XEXP (x, 0), strict_p)
7912 && GET_CODE (XEXP (x, 1)) == PLUS
7913 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
7915 /* Thumb-2 only has autoincrement by constant. */
7916 rtx addend = XEXP (XEXP (x, 1), 1);
7917 HOST_WIDE_INT offset;
7919 if (!CONST_INT_P (addend))
7920 return 0;
7922 offset = INTVAL(addend);
7923 if (GET_MODE_SIZE (mode) <= 4)
7924 return (offset > -256 && offset < 256);
7926 return (use_ldrd && offset > -1024 && offset < 1024
7927 && (offset & 3) == 0);
7930 /* After reload constants split into minipools will have addresses
7931 from a LABEL_REF. */
7932 else if (reload_completed
7933 && (code == LABEL_REF
7934 || (code == CONST
7935 && GET_CODE (XEXP (x, 0)) == PLUS
7936 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
7937 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))))
7938 return 1;
7940 else if (mode == TImode || (TARGET_NEON && VALID_NEON_STRUCT_MODE (mode)))
7941 return 0;
7943 else if (code == PLUS)
7945 rtx xop0 = XEXP (x, 0);
7946 rtx xop1 = XEXP (x, 1);
7948 return ((arm_address_register_rtx_p (xop0, strict_p)
7949 && (thumb2_legitimate_index_p (mode, xop1, strict_p)
7950 || (!strict_p && will_be_in_index_register (xop1))))
7951 || (arm_address_register_rtx_p (xop1, strict_p)
7952 && thumb2_legitimate_index_p (mode, xop0, strict_p)));
7955 else if (can_avoid_literal_pool_for_label_p (x))
7956 return 0;
7958 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
7959 && code == SYMBOL_REF
7960 && CONSTANT_POOL_ADDRESS_P (x)
7961 && ! (flag_pic
7962 && symbol_mentioned_p (get_pool_constant (x))
7963 && ! pcrel_constant_p (get_pool_constant (x))))
7964 return 1;
7966 return 0;
7969 /* Return nonzero if INDEX is valid for an address index operand in
7970 ARM state. */
7971 static int
7972 arm_legitimate_index_p (machine_mode mode, rtx index, RTX_CODE outer,
7973 int strict_p)
7975 HOST_WIDE_INT range;
7976 enum rtx_code code = GET_CODE (index);
7978 /* Standard coprocessor addressing modes. */
7979 if (TARGET_HARD_FLOAT
7980 && (mode == SFmode || mode == DFmode))
7981 return (code == CONST_INT && INTVAL (index) < 1024
7982 && INTVAL (index) > -1024
7983 && (INTVAL (index) & 3) == 0);
7985 /* For quad modes, we restrict the constant offset to be slightly less
7986 than what the instruction format permits. We do this because for
7987 quad mode moves, we will actually decompose them into two separate
7988 double-mode reads or writes. INDEX must therefore be a valid
7989 (double-mode) offset and so should INDEX+8. */
7990 if (TARGET_NEON && VALID_NEON_QREG_MODE (mode))
7991 return (code == CONST_INT
7992 && INTVAL (index) < 1016
7993 && INTVAL (index) > -1024
7994 && (INTVAL (index) & 3) == 0);
7996 /* We have no such constraint on double mode offsets, so we permit the
7997 full range of the instruction format. */
7998 if (TARGET_NEON && VALID_NEON_DREG_MODE (mode))
7999 return (code == CONST_INT
8000 && INTVAL (index) < 1024
8001 && INTVAL (index) > -1024
8002 && (INTVAL (index) & 3) == 0);
8004 if (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (mode))
8005 return (code == CONST_INT
8006 && INTVAL (index) < 1024
8007 && INTVAL (index) > -1024
8008 && (INTVAL (index) & 3) == 0);
8010 if (arm_address_register_rtx_p (index, strict_p)
8011 && (GET_MODE_SIZE (mode) <= 4))
8012 return 1;
8014 if (mode == DImode || mode == DFmode)
8016 if (code == CONST_INT)
8018 HOST_WIDE_INT val = INTVAL (index);
8020 /* Assume we emit ldrd or 2x ldr if !TARGET_LDRD.
8021 If vldr is selected it uses arm_coproc_mem_operand. */
8022 if (TARGET_LDRD)
8023 return val > -256 && val < 256;
8024 else
8025 return val > -4096 && val < 4092;
8028 return TARGET_LDRD && arm_address_register_rtx_p (index, strict_p);
8031 if (GET_MODE_SIZE (mode) <= 4
8032 && ! (arm_arch4
8033 && (mode == HImode
8034 || mode == HFmode
8035 || (mode == QImode && outer == SIGN_EXTEND))))
8037 if (code == MULT)
8039 rtx xiop0 = XEXP (index, 0);
8040 rtx xiop1 = XEXP (index, 1);
8042 return ((arm_address_register_rtx_p (xiop0, strict_p)
8043 && power_of_two_operand (xiop1, SImode))
8044 || (arm_address_register_rtx_p (xiop1, strict_p)
8045 && power_of_two_operand (xiop0, SImode)));
8047 else if (code == LSHIFTRT || code == ASHIFTRT
8048 || code == ASHIFT || code == ROTATERT)
8050 rtx op = XEXP (index, 1);
8052 return (arm_address_register_rtx_p (XEXP (index, 0), strict_p)
8053 && CONST_INT_P (op)
8054 && INTVAL (op) > 0
8055 && INTVAL (op) <= 31);
8059 /* For ARM v4 we may be doing a sign-extend operation during the
8060 load. */
8061 if (arm_arch4)
8063 if (mode == HImode
8064 || mode == HFmode
8065 || (outer == SIGN_EXTEND && mode == QImode))
8066 range = 256;
8067 else
8068 range = 4096;
8070 else
8071 range = (mode == HImode || mode == HFmode) ? 4095 : 4096;
8073 return (code == CONST_INT
8074 && INTVAL (index) < range
8075 && INTVAL (index) > -range);
8078 /* Return true if OP is a valid index scaling factor for Thumb-2 address
8079 index operand. i.e. 1, 2, 4 or 8. */
8080 static bool
8081 thumb2_index_mul_operand (rtx op)
8083 HOST_WIDE_INT val;
8085 if (!CONST_INT_P (op))
8086 return false;
8088 val = INTVAL(op);
8089 return (val == 1 || val == 2 || val == 4 || val == 8);
8092 /* Return nonzero if INDEX is a valid Thumb-2 address index operand. */
8093 static int
8094 thumb2_legitimate_index_p (machine_mode mode, rtx index, int strict_p)
8096 enum rtx_code code = GET_CODE (index);
8098 /* ??? Combine arm and thumb2 coprocessor addressing modes. */
8099 /* Standard coprocessor addressing modes. */
8100 if (TARGET_HARD_FLOAT
8101 && (mode == SFmode || mode == DFmode))
8102 return (code == CONST_INT && INTVAL (index) < 1024
8103 /* Thumb-2 allows only > -256 index range for it's core register
8104 load/stores. Since we allow SF/DF in core registers, we have
8105 to use the intersection between -256~4096 (core) and -1024~1024
8106 (coprocessor). */
8107 && INTVAL (index) > -256
8108 && (INTVAL (index) & 3) == 0);
8110 if (TARGET_REALLY_IWMMXT && VALID_IWMMXT_REG_MODE (mode))
8112 /* For DImode assume values will usually live in core regs
8113 and only allow LDRD addressing modes. */
8114 if (!TARGET_LDRD || mode != DImode)
8115 return (code == CONST_INT
8116 && INTVAL (index) < 1024
8117 && INTVAL (index) > -1024
8118 && (INTVAL (index) & 3) == 0);
8121 /* For quad modes, we restrict the constant offset to be slightly less
8122 than what the instruction format permits. We do this because for
8123 quad mode moves, we will actually decompose them into two separate
8124 double-mode reads or writes. INDEX must therefore be a valid
8125 (double-mode) offset and so should INDEX+8. */
8126 if (TARGET_NEON && VALID_NEON_QREG_MODE (mode))
8127 return (code == CONST_INT
8128 && INTVAL (index) < 1016
8129 && INTVAL (index) > -1024
8130 && (INTVAL (index) & 3) == 0);
8132 /* We have no such constraint on double mode offsets, so we permit the
8133 full range of the instruction format. */
8134 if (TARGET_NEON && VALID_NEON_DREG_MODE (mode))
8135 return (code == CONST_INT
8136 && INTVAL (index) < 1024
8137 && INTVAL (index) > -1024
8138 && (INTVAL (index) & 3) == 0);
8140 if (arm_address_register_rtx_p (index, strict_p)
8141 && (GET_MODE_SIZE (mode) <= 4))
8142 return 1;
8144 if (mode == DImode || mode == DFmode)
8146 if (code == CONST_INT)
8148 HOST_WIDE_INT val = INTVAL (index);
8149 /* Thumb-2 ldrd only has reg+const addressing modes.
8150 Assume we emit ldrd or 2x ldr if !TARGET_LDRD.
8151 If vldr is selected it uses arm_coproc_mem_operand. */
8152 if (TARGET_LDRD)
8153 return IN_RANGE (val, -1020, 1020) && (val & 3) == 0;
8154 else
8155 return IN_RANGE (val, -255, 4095 - 4);
8157 else
8158 return 0;
8161 if (code == MULT)
8163 rtx xiop0 = XEXP (index, 0);
8164 rtx xiop1 = XEXP (index, 1);
8166 return ((arm_address_register_rtx_p (xiop0, strict_p)
8167 && thumb2_index_mul_operand (xiop1))
8168 || (arm_address_register_rtx_p (xiop1, strict_p)
8169 && thumb2_index_mul_operand (xiop0)));
8171 else if (code == ASHIFT)
8173 rtx op = XEXP (index, 1);
8175 return (arm_address_register_rtx_p (XEXP (index, 0), strict_p)
8176 && CONST_INT_P (op)
8177 && INTVAL (op) > 0
8178 && INTVAL (op) <= 3);
8181 return (code == CONST_INT
8182 && INTVAL (index) < 4096
8183 && INTVAL (index) > -256);
8186 /* Return nonzero if X is valid as a 16-bit Thumb state base register. */
8187 static int
8188 thumb1_base_register_rtx_p (rtx x, machine_mode mode, int strict_p)
8190 int regno;
8192 if (!REG_P (x))
8193 return 0;
8195 regno = REGNO (x);
8197 if (strict_p)
8198 return THUMB1_REGNO_MODE_OK_FOR_BASE_P (regno, mode);
8200 return (regno <= LAST_LO_REGNUM
8201 || regno > LAST_VIRTUAL_REGISTER
8202 || regno == FRAME_POINTER_REGNUM
8203 || (GET_MODE_SIZE (mode) >= 4
8204 && (regno == STACK_POINTER_REGNUM
8205 || regno >= FIRST_PSEUDO_REGISTER
8206 || x == hard_frame_pointer_rtx
8207 || x == arg_pointer_rtx)));
8210 /* Return nonzero if x is a legitimate index register. This is the case
8211 for any base register that can access a QImode object. */
8212 inline static int
8213 thumb1_index_register_rtx_p (rtx x, int strict_p)
8215 return thumb1_base_register_rtx_p (x, QImode, strict_p);
8218 /* Return nonzero if x is a legitimate 16-bit Thumb-state address.
8220 The AP may be eliminated to either the SP or the FP, so we use the
8221 least common denominator, e.g. SImode, and offsets from 0 to 64.
8223 ??? Verify whether the above is the right approach.
8225 ??? Also, the FP may be eliminated to the SP, so perhaps that
8226 needs special handling also.
8228 ??? Look at how the mips16 port solves this problem. It probably uses
8229 better ways to solve some of these problems.
8231 Although it is not incorrect, we don't accept QImode and HImode
8232 addresses based on the frame pointer or arg pointer until the
8233 reload pass starts. This is so that eliminating such addresses
8234 into stack based ones won't produce impossible code. */
8236 thumb1_legitimate_address_p (machine_mode mode, rtx x, int strict_p)
8238 if (TARGET_HAVE_MOVT && can_avoid_literal_pool_for_label_p (x))
8239 return 0;
8241 /* ??? Not clear if this is right. Experiment. */
8242 if (GET_MODE_SIZE (mode) < 4
8243 && !(reload_in_progress || reload_completed)
8244 && (reg_mentioned_p (frame_pointer_rtx, x)
8245 || reg_mentioned_p (arg_pointer_rtx, x)
8246 || reg_mentioned_p (virtual_incoming_args_rtx, x)
8247 || reg_mentioned_p (virtual_outgoing_args_rtx, x)
8248 || reg_mentioned_p (virtual_stack_dynamic_rtx, x)
8249 || reg_mentioned_p (virtual_stack_vars_rtx, x)))
8250 return 0;
8252 /* Accept any base register. SP only in SImode or larger. */
8253 else if (thumb1_base_register_rtx_p (x, mode, strict_p))
8254 return 1;
8256 /* This is PC relative data before arm_reorg runs. */
8257 else if (GET_MODE_SIZE (mode) >= 4 && CONSTANT_P (x)
8258 && GET_CODE (x) == SYMBOL_REF
8259 && CONSTANT_POOL_ADDRESS_P (x) && !flag_pic)
8260 return 1;
8262 /* This is PC relative data after arm_reorg runs. */
8263 else if ((GET_MODE_SIZE (mode) >= 4 || mode == HFmode)
8264 && reload_completed
8265 && (GET_CODE (x) == LABEL_REF
8266 || (GET_CODE (x) == CONST
8267 && GET_CODE (XEXP (x, 0)) == PLUS
8268 && GET_CODE (XEXP (XEXP (x, 0), 0)) == LABEL_REF
8269 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))))
8270 return 1;
8272 /* Post-inc indexing only supported for SImode and larger. */
8273 else if (GET_CODE (x) == POST_INC && GET_MODE_SIZE (mode) >= 4
8274 && thumb1_index_register_rtx_p (XEXP (x, 0), strict_p))
8275 return 1;
8277 else if (GET_CODE (x) == PLUS)
8279 /* REG+REG address can be any two index registers. */
8280 /* We disallow FRAME+REG addressing since we know that FRAME
8281 will be replaced with STACK, and SP relative addressing only
8282 permits SP+OFFSET. */
8283 if (GET_MODE_SIZE (mode) <= 4
8284 && XEXP (x, 0) != frame_pointer_rtx
8285 && XEXP (x, 1) != frame_pointer_rtx
8286 && thumb1_index_register_rtx_p (XEXP (x, 0), strict_p)
8287 && (thumb1_index_register_rtx_p (XEXP (x, 1), strict_p)
8288 || (!strict_p && will_be_in_index_register (XEXP (x, 1)))))
8289 return 1;
8291 /* REG+const has 5-7 bit offset for non-SP registers. */
8292 else if ((thumb1_index_register_rtx_p (XEXP (x, 0), strict_p)
8293 || XEXP (x, 0) == arg_pointer_rtx)
8294 && CONST_INT_P (XEXP (x, 1))
8295 && thumb_legitimate_offset_p (mode, INTVAL (XEXP (x, 1))))
8296 return 1;
8298 /* REG+const has 10-bit offset for SP, but only SImode and
8299 larger is supported. */
8300 /* ??? Should probably check for DI/DFmode overflow here
8301 just like GO_IF_LEGITIMATE_OFFSET does. */
8302 else if (REG_P (XEXP (x, 0))
8303 && REGNO (XEXP (x, 0)) == STACK_POINTER_REGNUM
8304 && GET_MODE_SIZE (mode) >= 4
8305 && CONST_INT_P (XEXP (x, 1))
8306 && INTVAL (XEXP (x, 1)) >= 0
8307 && INTVAL (XEXP (x, 1)) + GET_MODE_SIZE (mode) <= 1024
8308 && (INTVAL (XEXP (x, 1)) & 3) == 0)
8309 return 1;
8311 else if (REG_P (XEXP (x, 0))
8312 && (REGNO (XEXP (x, 0)) == FRAME_POINTER_REGNUM
8313 || REGNO (XEXP (x, 0)) == ARG_POINTER_REGNUM
8314 || (REGNO (XEXP (x, 0)) >= FIRST_VIRTUAL_REGISTER
8315 && REGNO (XEXP (x, 0))
8316 <= LAST_VIRTUAL_POINTER_REGISTER))
8317 && GET_MODE_SIZE (mode) >= 4
8318 && CONST_INT_P (XEXP (x, 1))
8319 && (INTVAL (XEXP (x, 1)) & 3) == 0)
8320 return 1;
8323 else if (GET_MODE_CLASS (mode) != MODE_FLOAT
8324 && GET_MODE_SIZE (mode) == 4
8325 && GET_CODE (x) == SYMBOL_REF
8326 && CONSTANT_POOL_ADDRESS_P (x)
8327 && ! (flag_pic
8328 && symbol_mentioned_p (get_pool_constant (x))
8329 && ! pcrel_constant_p (get_pool_constant (x))))
8330 return 1;
8332 return 0;
8335 /* Return nonzero if VAL can be used as an offset in a Thumb-state address
8336 instruction of mode MODE. */
8338 thumb_legitimate_offset_p (machine_mode mode, HOST_WIDE_INT val)
8340 switch (GET_MODE_SIZE (mode))
8342 case 1:
8343 return val >= 0 && val < 32;
8345 case 2:
8346 return val >= 0 && val < 64 && (val & 1) == 0;
8348 default:
8349 return (val >= 0
8350 && (val + GET_MODE_SIZE (mode)) <= 128
8351 && (val & 3) == 0);
8355 bool
8356 arm_legitimate_address_p (machine_mode mode, rtx x, bool strict_p)
8358 if (TARGET_ARM)
8359 return arm_legitimate_address_outer_p (mode, x, SET, strict_p);
8360 else if (TARGET_THUMB2)
8361 return thumb2_legitimate_address_p (mode, x, strict_p);
8362 else /* if (TARGET_THUMB1) */
8363 return thumb1_legitimate_address_p (mode, x, strict_p);
8366 /* Worker function for TARGET_PREFERRED_RELOAD_CLASS.
8368 Given an rtx X being reloaded into a reg required to be
8369 in class CLASS, return the class of reg to actually use.
8370 In general this is just CLASS, but for the Thumb core registers and
8371 immediate constants we prefer a LO_REGS class or a subset. */
8373 static reg_class_t
8374 arm_preferred_reload_class (rtx x ATTRIBUTE_UNUSED, reg_class_t rclass)
8376 if (TARGET_32BIT)
8377 return rclass;
8378 else
8380 if (rclass == GENERAL_REGS)
8381 return LO_REGS;
8382 else
8383 return rclass;
8387 /* Build the SYMBOL_REF for __tls_get_addr. */
8389 static GTY(()) rtx tls_get_addr_libfunc;
8391 static rtx
8392 get_tls_get_addr (void)
8394 if (!tls_get_addr_libfunc)
8395 tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr");
8396 return tls_get_addr_libfunc;
8400 arm_load_tp (rtx target)
8402 if (!target)
8403 target = gen_reg_rtx (SImode);
8405 if (TARGET_HARD_TP)
8407 /* Can return in any reg. */
8408 emit_insn (gen_load_tp_hard (target));
8410 else
8412 /* Always returned in r0. Immediately copy the result into a pseudo,
8413 otherwise other uses of r0 (e.g. setting up function arguments) may
8414 clobber the value. */
8416 rtx tmp;
8418 emit_insn (gen_load_tp_soft ());
8420 tmp = gen_rtx_REG (SImode, R0_REGNUM);
8421 emit_move_insn (target, tmp);
8423 return target;
8426 static rtx
8427 load_tls_operand (rtx x, rtx reg)
8429 rtx tmp;
8431 if (reg == NULL_RTX)
8432 reg = gen_reg_rtx (SImode);
8434 tmp = gen_rtx_CONST (SImode, x);
8436 emit_move_insn (reg, tmp);
8438 return reg;
8441 static rtx_insn *
8442 arm_call_tls_get_addr (rtx x, rtx reg, rtx *valuep, int reloc)
8444 rtx label, labelno, sum;
8446 gcc_assert (reloc != TLS_DESCSEQ);
8447 start_sequence ();
8449 labelno = GEN_INT (pic_labelno++);
8450 label = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, labelno), UNSPEC_PIC_LABEL);
8451 label = gen_rtx_CONST (VOIDmode, label);
8453 sum = gen_rtx_UNSPEC (Pmode,
8454 gen_rtvec (4, x, GEN_INT (reloc), label,
8455 GEN_INT (TARGET_ARM ? 8 : 4)),
8456 UNSPEC_TLS);
8457 reg = load_tls_operand (sum, reg);
8459 if (TARGET_ARM)
8460 emit_insn (gen_pic_add_dot_plus_eight (reg, reg, labelno));
8461 else
8462 emit_insn (gen_pic_add_dot_plus_four (reg, reg, labelno));
8464 *valuep = emit_library_call_value (get_tls_get_addr (), NULL_RTX,
8465 LCT_PURE, /* LCT_CONST? */
8466 Pmode, reg, Pmode);
8468 rtx_insn *insns = get_insns ();
8469 end_sequence ();
8471 return insns;
8474 static rtx
8475 arm_tls_descseq_addr (rtx x, rtx reg)
8477 rtx labelno = GEN_INT (pic_labelno++);
8478 rtx label = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, labelno), UNSPEC_PIC_LABEL);
8479 rtx sum = gen_rtx_UNSPEC (Pmode,
8480 gen_rtvec (4, x, GEN_INT (TLS_DESCSEQ),
8481 gen_rtx_CONST (VOIDmode, label),
8482 GEN_INT (!TARGET_ARM)),
8483 UNSPEC_TLS);
8484 rtx reg0 = load_tls_operand (sum, gen_rtx_REG (SImode, R0_REGNUM));
8486 emit_insn (gen_tlscall (x, labelno));
8487 if (!reg)
8488 reg = gen_reg_rtx (SImode);
8489 else
8490 gcc_assert (REGNO (reg) != R0_REGNUM);
8492 emit_move_insn (reg, reg0);
8494 return reg;
8498 legitimize_tls_address (rtx x, rtx reg)
8500 rtx dest, tp, label, labelno, sum, ret, eqv, addend;
8501 rtx_insn *insns;
8502 unsigned int model = SYMBOL_REF_TLS_MODEL (x);
8504 switch (model)
8506 case TLS_MODEL_GLOBAL_DYNAMIC:
8507 if (TARGET_GNU2_TLS)
8509 reg = arm_tls_descseq_addr (x, reg);
8511 tp = arm_load_tp (NULL_RTX);
8513 dest = gen_rtx_PLUS (Pmode, tp, reg);
8515 else
8517 /* Original scheme */
8518 insns = arm_call_tls_get_addr (x, reg, &ret, TLS_GD32);
8519 dest = gen_reg_rtx (Pmode);
8520 emit_libcall_block (insns, dest, ret, x);
8522 return dest;
8524 case TLS_MODEL_LOCAL_DYNAMIC:
8525 if (TARGET_GNU2_TLS)
8527 reg = arm_tls_descseq_addr (x, reg);
8529 tp = arm_load_tp (NULL_RTX);
8531 dest = gen_rtx_PLUS (Pmode, tp, reg);
8533 else
8535 insns = arm_call_tls_get_addr (x, reg, &ret, TLS_LDM32);
8537 /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
8538 share the LDM result with other LD model accesses. */
8539 eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const1_rtx),
8540 UNSPEC_TLS);
8541 dest = gen_reg_rtx (Pmode);
8542 emit_libcall_block (insns, dest, ret, eqv);
8544 /* Load the addend. */
8545 addend = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, x,
8546 GEN_INT (TLS_LDO32)),
8547 UNSPEC_TLS);
8548 addend = force_reg (SImode, gen_rtx_CONST (SImode, addend));
8549 dest = gen_rtx_PLUS (Pmode, dest, addend);
8551 return dest;
8553 case TLS_MODEL_INITIAL_EXEC:
8554 labelno = GEN_INT (pic_labelno++);
8555 label = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, labelno), UNSPEC_PIC_LABEL);
8556 label = gen_rtx_CONST (VOIDmode, label);
8557 sum = gen_rtx_UNSPEC (Pmode,
8558 gen_rtvec (4, x, GEN_INT (TLS_IE32), label,
8559 GEN_INT (TARGET_ARM ? 8 : 4)),
8560 UNSPEC_TLS);
8561 reg = load_tls_operand (sum, reg);
8563 if (TARGET_ARM)
8564 emit_insn (gen_tls_load_dot_plus_eight (reg, reg, labelno));
8565 else if (TARGET_THUMB2)
8566 emit_insn (gen_tls_load_dot_plus_four (reg, NULL, reg, labelno));
8567 else
8569 emit_insn (gen_pic_add_dot_plus_four (reg, reg, labelno));
8570 emit_move_insn (reg, gen_const_mem (SImode, reg));
8573 tp = arm_load_tp (NULL_RTX);
8575 return gen_rtx_PLUS (Pmode, tp, reg);
8577 case TLS_MODEL_LOCAL_EXEC:
8578 tp = arm_load_tp (NULL_RTX);
8580 reg = gen_rtx_UNSPEC (Pmode,
8581 gen_rtvec (2, x, GEN_INT (TLS_LE32)),
8582 UNSPEC_TLS);
8583 reg = force_reg (SImode, gen_rtx_CONST (SImode, reg));
8585 return gen_rtx_PLUS (Pmode, tp, reg);
8587 default:
8588 abort ();
8592 /* Try machine-dependent ways of modifying an illegitimate address
8593 to be legitimate. If we find one, return the new, valid address. */
8595 arm_legitimize_address (rtx x, rtx orig_x, machine_mode mode)
8597 if (arm_tls_referenced_p (x))
8599 rtx addend = NULL;
8601 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS)
8603 addend = XEXP (XEXP (x, 0), 1);
8604 x = XEXP (XEXP (x, 0), 0);
8607 if (GET_CODE (x) != SYMBOL_REF)
8608 return x;
8610 gcc_assert (SYMBOL_REF_TLS_MODEL (x) != 0);
8612 x = legitimize_tls_address (x, NULL_RTX);
8614 if (addend)
8616 x = gen_rtx_PLUS (SImode, x, addend);
8617 orig_x = x;
8619 else
8620 return x;
8623 if (!TARGET_ARM)
8625 /* TODO: legitimize_address for Thumb2. */
8626 if (TARGET_THUMB2)
8627 return x;
8628 return thumb_legitimize_address (x, orig_x, mode);
8631 if (GET_CODE (x) == PLUS)
8633 rtx xop0 = XEXP (x, 0);
8634 rtx xop1 = XEXP (x, 1);
8636 if (CONSTANT_P (xop0) && !symbol_mentioned_p (xop0))
8637 xop0 = force_reg (SImode, xop0);
8639 if (CONSTANT_P (xop1) && !CONST_INT_P (xop1)
8640 && !symbol_mentioned_p (xop1))
8641 xop1 = force_reg (SImode, xop1);
8643 if (ARM_BASE_REGISTER_RTX_P (xop0)
8644 && CONST_INT_P (xop1))
8646 HOST_WIDE_INT n, low_n;
8647 rtx base_reg, val;
8648 n = INTVAL (xop1);
8650 /* VFP addressing modes actually allow greater offsets, but for
8651 now we just stick with the lowest common denominator. */
8652 if (mode == DImode || mode == DFmode)
8654 low_n = n & 0x0f;
8655 n &= ~0x0f;
8656 if (low_n > 4)
8658 n += 16;
8659 low_n -= 16;
8662 else
8664 low_n = ((mode) == TImode ? 0
8665 : n >= 0 ? (n & 0xfff) : -((-n) & 0xfff));
8666 n -= low_n;
8669 base_reg = gen_reg_rtx (SImode);
8670 val = force_operand (plus_constant (Pmode, xop0, n), NULL_RTX);
8671 emit_move_insn (base_reg, val);
8672 x = plus_constant (Pmode, base_reg, low_n);
8674 else if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
8675 x = gen_rtx_PLUS (SImode, xop0, xop1);
8678 /* XXX We don't allow MINUS any more -- see comment in
8679 arm_legitimate_address_outer_p (). */
8680 else if (GET_CODE (x) == MINUS)
8682 rtx xop0 = XEXP (x, 0);
8683 rtx xop1 = XEXP (x, 1);
8685 if (CONSTANT_P (xop0))
8686 xop0 = force_reg (SImode, xop0);
8688 if (CONSTANT_P (xop1) && ! symbol_mentioned_p (xop1))
8689 xop1 = force_reg (SImode, xop1);
8691 if (xop0 != XEXP (x, 0) || xop1 != XEXP (x, 1))
8692 x = gen_rtx_MINUS (SImode, xop0, xop1);
8695 /* Make sure to take full advantage of the pre-indexed addressing mode
8696 with absolute addresses which often allows for the base register to
8697 be factorized for multiple adjacent memory references, and it might
8698 even allows for the mini pool to be avoided entirely. */
8699 else if (CONST_INT_P (x) && optimize > 0)
8701 unsigned int bits;
8702 HOST_WIDE_INT mask, base, index;
8703 rtx base_reg;
8705 /* ldr and ldrb can use a 12-bit index, ldrsb and the rest can only
8706 use a 8-bit index. So let's use a 12-bit index for SImode only and
8707 hope that arm_gen_constant will enable ldrb to use more bits. */
8708 bits = (mode == SImode) ? 12 : 8;
8709 mask = (1 << bits) - 1;
8710 base = INTVAL (x) & ~mask;
8711 index = INTVAL (x) & mask;
8712 if (bit_count (base & 0xffffffff) > (32 - bits)/2)
8714 /* It'll most probably be more efficient to generate the base
8715 with more bits set and use a negative index instead. */
8716 base |= mask;
8717 index -= mask;
8719 base_reg = force_reg (SImode, GEN_INT (base));
8720 x = plus_constant (Pmode, base_reg, index);
8723 if (flag_pic)
8725 /* We need to find and carefully transform any SYMBOL and LABEL
8726 references; so go back to the original address expression. */
8727 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
8729 if (new_x != orig_x)
8730 x = new_x;
8733 return x;
8737 /* Try machine-dependent ways of modifying an illegitimate Thumb address
8738 to be legitimate. If we find one, return the new, valid address. */
8740 thumb_legitimize_address (rtx x, rtx orig_x, machine_mode mode)
8742 if (GET_CODE (x) == PLUS
8743 && CONST_INT_P (XEXP (x, 1))
8744 && (INTVAL (XEXP (x, 1)) >= 32 * GET_MODE_SIZE (mode)
8745 || INTVAL (XEXP (x, 1)) < 0))
8747 rtx xop0 = XEXP (x, 0);
8748 rtx xop1 = XEXP (x, 1);
8749 HOST_WIDE_INT offset = INTVAL (xop1);
8751 /* Try and fold the offset into a biasing of the base register and
8752 then offsetting that. Don't do this when optimizing for space
8753 since it can cause too many CSEs. */
8754 if (optimize_size && offset >= 0
8755 && offset < 256 + 31 * GET_MODE_SIZE (mode))
8757 HOST_WIDE_INT delta;
8759 if (offset >= 256)
8760 delta = offset - (256 - GET_MODE_SIZE (mode));
8761 else if (offset < 32 * GET_MODE_SIZE (mode) + 8)
8762 delta = 31 * GET_MODE_SIZE (mode);
8763 else
8764 delta = offset & (~31 * GET_MODE_SIZE (mode));
8766 xop0 = force_operand (plus_constant (Pmode, xop0, offset - delta),
8767 NULL_RTX);
8768 x = plus_constant (Pmode, xop0, delta);
8770 else if (offset < 0 && offset > -256)
8771 /* Small negative offsets are best done with a subtract before the
8772 dereference, forcing these into a register normally takes two
8773 instructions. */
8774 x = force_operand (x, NULL_RTX);
8775 else
8777 /* For the remaining cases, force the constant into a register. */
8778 xop1 = force_reg (SImode, xop1);
8779 x = gen_rtx_PLUS (SImode, xop0, xop1);
8782 else if (GET_CODE (x) == PLUS
8783 && s_register_operand (XEXP (x, 1), SImode)
8784 && !s_register_operand (XEXP (x, 0), SImode))
8786 rtx xop0 = force_operand (XEXP (x, 0), NULL_RTX);
8788 x = gen_rtx_PLUS (SImode, xop0, XEXP (x, 1));
8791 if (flag_pic)
8793 /* We need to find and carefully transform any SYMBOL and LABEL
8794 references; so go back to the original address expression. */
8795 rtx new_x = legitimize_pic_address (orig_x, mode, NULL_RTX);
8797 if (new_x != orig_x)
8798 x = new_x;
8801 return x;
8804 /* Return TRUE if X contains any TLS symbol references. */
8806 bool
8807 arm_tls_referenced_p (rtx x)
8809 if (! TARGET_HAVE_TLS)
8810 return false;
8812 subrtx_iterator::array_type array;
8813 FOR_EACH_SUBRTX (iter, array, x, ALL)
8815 const_rtx x = *iter;
8816 if (GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0)
8818 /* ARM currently does not provide relocations to encode TLS variables
8819 into AArch32 instructions, only data, so there is no way to
8820 currently implement these if a literal pool is disabled. */
8821 if (arm_disable_literal_pool)
8822 sorry ("accessing thread-local storage is not currently supported "
8823 "with -mpure-code or -mslow-flash-data");
8825 return true;
8828 /* Don't recurse into UNSPEC_TLS looking for TLS symbols; these are
8829 TLS offsets, not real symbol references. */
8830 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLS)
8831 iter.skip_subrtxes ();
8833 return false;
8836 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
8838 On the ARM, allow any integer (invalid ones are removed later by insn
8839 patterns), nice doubles and symbol_refs which refer to the function's
8840 constant pool XXX.
8842 When generating pic allow anything. */
8844 static bool
8845 arm_legitimate_constant_p_1 (machine_mode, rtx x)
8847 return flag_pic || !label_mentioned_p (x);
8850 static bool
8851 thumb_legitimate_constant_p (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
8853 /* Splitters for TARGET_USE_MOVT call arm_emit_movpair which creates high
8854 RTX. These RTX must therefore be allowed for Thumb-1 so that when run
8855 for ARMv8-M Baseline or later the result is valid. */
8856 if (TARGET_HAVE_MOVT && GET_CODE (x) == HIGH)
8857 x = XEXP (x, 0);
8859 return (CONST_INT_P (x)
8860 || CONST_DOUBLE_P (x)
8861 || CONSTANT_ADDRESS_P (x)
8862 || (TARGET_HAVE_MOVT && GET_CODE (x) == SYMBOL_REF)
8863 || flag_pic);
8866 static bool
8867 arm_legitimate_constant_p (machine_mode mode, rtx x)
8869 return (!arm_cannot_force_const_mem (mode, x)
8870 && (TARGET_32BIT
8871 ? arm_legitimate_constant_p_1 (mode, x)
8872 : thumb_legitimate_constant_p (mode, x)));
8875 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
8877 static bool
8878 arm_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
8880 rtx base, offset;
8882 if (ARM_OFFSETS_MUST_BE_WITHIN_SECTIONS_P)
8884 split_const (x, &base, &offset);
8885 if (GET_CODE (base) == SYMBOL_REF
8886 && !offset_within_block_p (base, INTVAL (offset)))
8887 return true;
8889 return arm_tls_referenced_p (x);
8892 #define REG_OR_SUBREG_REG(X) \
8893 (REG_P (X) \
8894 || (GET_CODE (X) == SUBREG && REG_P (SUBREG_REG (X))))
8896 #define REG_OR_SUBREG_RTX(X) \
8897 (REG_P (X) ? (X) : SUBREG_REG (X))
8899 static inline int
8900 thumb1_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer)
8902 machine_mode mode = GET_MODE (x);
8903 int total, words;
8905 switch (code)
8907 case ASHIFT:
8908 case ASHIFTRT:
8909 case LSHIFTRT:
8910 case ROTATERT:
8911 return (mode == SImode) ? COSTS_N_INSNS (1) : COSTS_N_INSNS (2);
8913 case PLUS:
8914 case MINUS:
8915 case COMPARE:
8916 case NEG:
8917 case NOT:
8918 return COSTS_N_INSNS (1);
8920 case MULT:
8921 if (arm_arch6m && arm_m_profile_small_mul)
8922 return COSTS_N_INSNS (32);
8924 if (CONST_INT_P (XEXP (x, 1)))
8926 int cycles = 0;
8927 unsigned HOST_WIDE_INT i = INTVAL (XEXP (x, 1));
8929 while (i)
8931 i >>= 2;
8932 cycles++;
8934 return COSTS_N_INSNS (2) + cycles;
8936 return COSTS_N_INSNS (1) + 16;
8938 case SET:
8939 /* A SET doesn't have a mode, so let's look at the SET_DEST to get
8940 the mode. */
8941 words = ARM_NUM_INTS (GET_MODE_SIZE (GET_MODE (SET_DEST (x))));
8942 return (COSTS_N_INSNS (words)
8943 + 4 * ((MEM_P (SET_SRC (x)))
8944 + MEM_P (SET_DEST (x))));
8946 case CONST_INT:
8947 if (outer == SET)
8949 if (UINTVAL (x) < 256
8950 /* 16-bit constant. */
8951 || (TARGET_HAVE_MOVT && !(INTVAL (x) & 0xffff0000)))
8952 return 0;
8953 if (thumb_shiftable_const (INTVAL (x)))
8954 return COSTS_N_INSNS (2);
8955 return COSTS_N_INSNS (3);
8957 else if ((outer == PLUS || outer == COMPARE)
8958 && INTVAL (x) < 256 && INTVAL (x) > -256)
8959 return 0;
8960 else if ((outer == IOR || outer == XOR || outer == AND)
8961 && INTVAL (x) < 256 && INTVAL (x) >= -256)
8962 return COSTS_N_INSNS (1);
8963 else if (outer == AND)
8965 int i;
8966 /* This duplicates the tests in the andsi3 expander. */
8967 for (i = 9; i <= 31; i++)
8968 if ((HOST_WIDE_INT_1 << i) - 1 == INTVAL (x)
8969 || (HOST_WIDE_INT_1 << i) - 1 == ~INTVAL (x))
8970 return COSTS_N_INSNS (2);
8972 else if (outer == ASHIFT || outer == ASHIFTRT
8973 || outer == LSHIFTRT)
8974 return 0;
8975 return COSTS_N_INSNS (2);
8977 case CONST:
8978 case CONST_DOUBLE:
8979 case LABEL_REF:
8980 case SYMBOL_REF:
8981 return COSTS_N_INSNS (3);
8983 case UDIV:
8984 case UMOD:
8985 case DIV:
8986 case MOD:
8987 return 100;
8989 case TRUNCATE:
8990 return 99;
8992 case AND:
8993 case XOR:
8994 case IOR:
8995 /* XXX guess. */
8996 return 8;
8998 case MEM:
8999 /* XXX another guess. */
9000 /* Memory costs quite a lot for the first word, but subsequent words
9001 load at the equivalent of a single insn each. */
9002 return (10 + 4 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
9003 + ((GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
9004 ? 4 : 0));
9006 case IF_THEN_ELSE:
9007 /* XXX a guess. */
9008 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
9009 return 14;
9010 return 2;
9012 case SIGN_EXTEND:
9013 case ZERO_EXTEND:
9014 total = mode == DImode ? COSTS_N_INSNS (1) : 0;
9015 total += thumb1_rtx_costs (XEXP (x, 0), GET_CODE (XEXP (x, 0)), code);
9017 if (mode == SImode)
9018 return total;
9020 if (arm_arch6)
9021 return total + COSTS_N_INSNS (1);
9023 /* Assume a two-shift sequence. Increase the cost slightly so
9024 we prefer actual shifts over an extend operation. */
9025 return total + 1 + COSTS_N_INSNS (2);
9027 default:
9028 return 99;
9032 /* Estimates the size cost of thumb1 instructions.
9033 For now most of the code is copied from thumb1_rtx_costs. We need more
9034 fine grain tuning when we have more related test cases. */
9035 static inline int
9036 thumb1_size_rtx_costs (rtx x, enum rtx_code code, enum rtx_code outer)
9038 machine_mode mode = GET_MODE (x);
9039 int words, cost;
9041 switch (code)
9043 case ASHIFT:
9044 case ASHIFTRT:
9045 case LSHIFTRT:
9046 case ROTATERT:
9047 return (mode == SImode) ? COSTS_N_INSNS (1) : COSTS_N_INSNS (2);
9049 case PLUS:
9050 case MINUS:
9051 /* Thumb-1 needs two instructions to fulfill shiftadd/shiftsub0/shiftsub1
9052 defined by RTL expansion, especially for the expansion of
9053 multiplication. */
9054 if ((GET_CODE (XEXP (x, 0)) == MULT
9055 && power_of_two_operand (XEXP (XEXP (x,0),1), SImode))
9056 || (GET_CODE (XEXP (x, 1)) == MULT
9057 && power_of_two_operand (XEXP (XEXP (x, 1), 1), SImode)))
9058 return COSTS_N_INSNS (2);
9059 /* Fall through. */
9060 case COMPARE:
9061 case NEG:
9062 case NOT:
9063 return COSTS_N_INSNS (1);
9065 case MULT:
9066 if (CONST_INT_P (XEXP (x, 1)))
9068 /* Thumb1 mul instruction can't operate on const. We must Load it
9069 into a register first. */
9070 int const_size = thumb1_size_rtx_costs (XEXP (x, 1), CONST_INT, SET);
9071 /* For the targets which have a very small and high-latency multiply
9072 unit, we prefer to synthesize the mult with up to 5 instructions,
9073 giving a good balance between size and performance. */
9074 if (arm_arch6m && arm_m_profile_small_mul)
9075 return COSTS_N_INSNS (5);
9076 else
9077 return COSTS_N_INSNS (1) + const_size;
9079 return COSTS_N_INSNS (1);
9081 case SET:
9082 /* A SET doesn't have a mode, so let's look at the SET_DEST to get
9083 the mode. */
9084 words = ARM_NUM_INTS (GET_MODE_SIZE (GET_MODE (SET_DEST (x))));
9085 cost = COSTS_N_INSNS (words);
9086 if (satisfies_constraint_J (SET_SRC (x))
9087 || satisfies_constraint_K (SET_SRC (x))
9088 /* Too big an immediate for a 2-byte mov, using MOVT. */
9089 || (CONST_INT_P (SET_SRC (x))
9090 && UINTVAL (SET_SRC (x)) >= 256
9091 && TARGET_HAVE_MOVT
9092 && satisfies_constraint_j (SET_SRC (x)))
9093 /* thumb1_movdi_insn. */
9094 || ((words > 1) && MEM_P (SET_SRC (x))))
9095 cost += COSTS_N_INSNS (1);
9096 return cost;
9098 case CONST_INT:
9099 if (outer == SET)
9101 if (UINTVAL (x) < 256)
9102 return COSTS_N_INSNS (1);
9103 /* movw is 4byte long. */
9104 if (TARGET_HAVE_MOVT && !(INTVAL (x) & 0xffff0000))
9105 return COSTS_N_INSNS (2);
9106 /* See split "TARGET_THUMB1 && satisfies_constraint_J". */
9107 if (INTVAL (x) >= -255 && INTVAL (x) <= -1)
9108 return COSTS_N_INSNS (2);
9109 /* See split "TARGET_THUMB1 && satisfies_constraint_K". */
9110 if (thumb_shiftable_const (INTVAL (x)))
9111 return COSTS_N_INSNS (2);
9112 return COSTS_N_INSNS (3);
9114 else if ((outer == PLUS || outer == COMPARE)
9115 && INTVAL (x) < 256 && INTVAL (x) > -256)
9116 return 0;
9117 else if ((outer == IOR || outer == XOR || outer == AND)
9118 && INTVAL (x) < 256 && INTVAL (x) >= -256)
9119 return COSTS_N_INSNS (1);
9120 else if (outer == AND)
9122 int i;
9123 /* This duplicates the tests in the andsi3 expander. */
9124 for (i = 9; i <= 31; i++)
9125 if ((HOST_WIDE_INT_1 << i) - 1 == INTVAL (x)
9126 || (HOST_WIDE_INT_1 << i) - 1 == ~INTVAL (x))
9127 return COSTS_N_INSNS (2);
9129 else if (outer == ASHIFT || outer == ASHIFTRT
9130 || outer == LSHIFTRT)
9131 return 0;
9132 return COSTS_N_INSNS (2);
9134 case CONST:
9135 case CONST_DOUBLE:
9136 case LABEL_REF:
9137 case SYMBOL_REF:
9138 return COSTS_N_INSNS (3);
9140 case UDIV:
9141 case UMOD:
9142 case DIV:
9143 case MOD:
9144 return 100;
9146 case TRUNCATE:
9147 return 99;
9149 case AND:
9150 case XOR:
9151 case IOR:
9152 return COSTS_N_INSNS (1);
9154 case MEM:
9155 return (COSTS_N_INSNS (1)
9156 + COSTS_N_INSNS (1)
9157 * ((GET_MODE_SIZE (mode) - 1) / UNITS_PER_WORD)
9158 + ((GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x))
9159 ? COSTS_N_INSNS (1) : 0));
9161 case IF_THEN_ELSE:
9162 /* XXX a guess. */
9163 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
9164 return 14;
9165 return 2;
9167 case ZERO_EXTEND:
9168 /* XXX still guessing. */
9169 switch (GET_MODE (XEXP (x, 0)))
9171 case E_QImode:
9172 return (1 + (mode == DImode ? 4 : 0)
9173 + (MEM_P (XEXP (x, 0)) ? 10 : 0));
9175 case E_HImode:
9176 return (4 + (mode == DImode ? 4 : 0)
9177 + (MEM_P (XEXP (x, 0)) ? 10 : 0));
9179 case E_SImode:
9180 return (1 + (MEM_P (XEXP (x, 0)) ? 10 : 0));
9182 default:
9183 return 99;
9186 default:
9187 return 99;
9191 /* Helper function for arm_rtx_costs. If the operand is a valid shift
9192 operand, then return the operand that is being shifted. If the shift
9193 is not by a constant, then set SHIFT_REG to point to the operand.
9194 Return NULL if OP is not a shifter operand. */
9195 static rtx
9196 shifter_op_p (rtx op, rtx *shift_reg)
9198 enum rtx_code code = GET_CODE (op);
9200 if (code == MULT && CONST_INT_P (XEXP (op, 1))
9201 && exact_log2 (INTVAL (XEXP (op, 1))) > 0)
9202 return XEXP (op, 0);
9203 else if (code == ROTATE && CONST_INT_P (XEXP (op, 1)))
9204 return XEXP (op, 0);
9205 else if (code == ROTATERT || code == ASHIFT || code == LSHIFTRT
9206 || code == ASHIFTRT)
9208 if (!CONST_INT_P (XEXP (op, 1)))
9209 *shift_reg = XEXP (op, 1);
9210 return XEXP (op, 0);
9213 return NULL;
9216 static bool
9217 arm_unspec_cost (rtx x, enum rtx_code /* outer_code */, bool speed_p, int *cost)
9219 const struct cpu_cost_table *extra_cost = current_tune->insn_extra_cost;
9220 rtx_code code = GET_CODE (x);
9221 gcc_assert (code == UNSPEC || code == UNSPEC_VOLATILE);
9223 switch (XINT (x, 1))
9225 case UNSPEC_UNALIGNED_LOAD:
9226 /* We can only do unaligned loads into the integer unit, and we can't
9227 use LDM or LDRD. */
9228 *cost = COSTS_N_INSNS (ARM_NUM_REGS (GET_MODE (x)));
9229 if (speed_p)
9230 *cost += (ARM_NUM_REGS (GET_MODE (x)) * extra_cost->ldst.load
9231 + extra_cost->ldst.load_unaligned);
9233 #ifdef NOT_YET
9234 *cost += arm_address_cost (XEXP (XVECEXP (x, 0, 0), 0), GET_MODE (x),
9235 ADDR_SPACE_GENERIC, speed_p);
9236 #endif
9237 return true;
9239 case UNSPEC_UNALIGNED_STORE:
9240 *cost = COSTS_N_INSNS (ARM_NUM_REGS (GET_MODE (x)));
9241 if (speed_p)
9242 *cost += (ARM_NUM_REGS (GET_MODE (x)) * extra_cost->ldst.store
9243 + extra_cost->ldst.store_unaligned);
9245 *cost += rtx_cost (XVECEXP (x, 0, 0), VOIDmode, UNSPEC, 0, speed_p);
9246 #ifdef NOT_YET
9247 *cost += arm_address_cost (XEXP (XVECEXP (x, 0, 0), 0), GET_MODE (x),
9248 ADDR_SPACE_GENERIC, speed_p);
9249 #endif
9250 return true;
9252 case UNSPEC_VRINTZ:
9253 case UNSPEC_VRINTP:
9254 case UNSPEC_VRINTM:
9255 case UNSPEC_VRINTR:
9256 case UNSPEC_VRINTX:
9257 case UNSPEC_VRINTA:
9258 if (speed_p)
9259 *cost += extra_cost->fp[GET_MODE (x) == DFmode].roundint;
9261 return true;
9262 default:
9263 *cost = COSTS_N_INSNS (2);
9264 break;
9266 return true;
9269 /* Cost of a libcall. We assume one insn per argument, an amount for the
9270 call (one insn for -Os) and then one for processing the result. */
9271 #define LIBCALL_COST(N) COSTS_N_INSNS (N + (speed_p ? 18 : 2))
9273 #define HANDLE_NARROW_SHIFT_ARITH(OP, IDX) \
9274 do \
9276 shift_op = shifter_op_p (XEXP (x, IDX), &shift_reg); \
9277 if (shift_op != NULL \
9278 && arm_rtx_shift_left_p (XEXP (x, IDX))) \
9280 if (shift_reg) \
9282 if (speed_p) \
9283 *cost += extra_cost->alu.arith_shift_reg; \
9284 *cost += rtx_cost (shift_reg, GET_MODE (shift_reg), \
9285 ASHIFT, 1, speed_p); \
9287 else if (speed_p) \
9288 *cost += extra_cost->alu.arith_shift; \
9290 *cost += (rtx_cost (shift_op, GET_MODE (shift_op), \
9291 ASHIFT, 0, speed_p) \
9292 + rtx_cost (XEXP (x, 1 - IDX), \
9293 GET_MODE (shift_op), \
9294 OP, 1, speed_p)); \
9295 return true; \
9298 while (0)
9300 /* Helper function for arm_rtx_costs_internal. Calculates the cost of a MEM,
9301 considering the costs of the addressing mode and memory access
9302 separately. */
9303 static bool
9304 arm_mem_costs (rtx x, const struct cpu_cost_table *extra_cost,
9305 int *cost, bool speed_p)
9307 machine_mode mode = GET_MODE (x);
9309 *cost = COSTS_N_INSNS (1);
9311 if (flag_pic
9312 && GET_CODE (XEXP (x, 0)) == PLUS
9313 && will_be_in_index_register (XEXP (XEXP (x, 0), 1)))
9314 /* This will be split into two instructions. Add the cost of the
9315 additional instruction here. The cost of the memory access is computed
9316 below. See arm.md:calculate_pic_address. */
9317 *cost += COSTS_N_INSNS (1);
9319 /* Calculate cost of the addressing mode. */
9320 if (speed_p)
9322 arm_addr_mode_op op_type;
9323 switch (GET_CODE (XEXP (x, 0)))
9325 default:
9326 case REG:
9327 op_type = AMO_DEFAULT;
9328 break;
9329 case MINUS:
9330 /* MINUS does not appear in RTL, but the architecture supports it,
9331 so handle this case defensively. */
9332 /* fall through */
9333 case PLUS:
9334 op_type = AMO_NO_WB;
9335 break;
9336 case PRE_INC:
9337 case PRE_DEC:
9338 case POST_INC:
9339 case POST_DEC:
9340 case PRE_MODIFY:
9341 case POST_MODIFY:
9342 op_type = AMO_WB;
9343 break;
9346 if (VECTOR_MODE_P (mode))
9347 *cost += current_tune->addr_mode_costs->vector[op_type];
9348 else if (FLOAT_MODE_P (mode))
9349 *cost += current_tune->addr_mode_costs->fp[op_type];
9350 else
9351 *cost += current_tune->addr_mode_costs->integer[op_type];
9354 /* Calculate cost of memory access. */
9355 if (speed_p)
9357 if (FLOAT_MODE_P (mode))
9359 if (GET_MODE_SIZE (mode) == 8)
9360 *cost += extra_cost->ldst.loadd;
9361 else
9362 *cost += extra_cost->ldst.loadf;
9364 else if (VECTOR_MODE_P (mode))
9365 *cost += extra_cost->ldst.loadv;
9366 else
9368 /* Integer modes */
9369 if (GET_MODE_SIZE (mode) == 8)
9370 *cost += extra_cost->ldst.ldrd;
9371 else
9372 *cost += extra_cost->ldst.load;
9376 return true;
9379 /* RTX costs. Make an estimate of the cost of executing the operation
9380 X, which is contained within an operation with code OUTER_CODE.
9381 SPEED_P indicates whether the cost desired is the performance cost,
9382 or the size cost. The estimate is stored in COST and the return
9383 value is TRUE if the cost calculation is final, or FALSE if the
9384 caller should recurse through the operands of X to add additional
9385 costs.
9387 We currently make no attempt to model the size savings of Thumb-2
9388 16-bit instructions. At the normal points in compilation where
9389 this code is called we have no measure of whether the condition
9390 flags are live or not, and thus no realistic way to determine what
9391 the size will eventually be. */
9392 static bool
9393 arm_rtx_costs_internal (rtx x, enum rtx_code code, enum rtx_code outer_code,
9394 const struct cpu_cost_table *extra_cost,
9395 int *cost, bool speed_p)
9397 machine_mode mode = GET_MODE (x);
9399 *cost = COSTS_N_INSNS (1);
9401 if (TARGET_THUMB1)
9403 if (speed_p)
9404 *cost = thumb1_rtx_costs (x, code, outer_code);
9405 else
9406 *cost = thumb1_size_rtx_costs (x, code, outer_code);
9407 return true;
9410 switch (code)
9412 case SET:
9413 *cost = 0;
9414 /* SET RTXs don't have a mode so we get it from the destination. */
9415 mode = GET_MODE (SET_DEST (x));
9417 if (REG_P (SET_SRC (x))
9418 && REG_P (SET_DEST (x)))
9420 /* Assume that most copies can be done with a single insn,
9421 unless we don't have HW FP, in which case everything
9422 larger than word mode will require two insns. */
9423 *cost = COSTS_N_INSNS (((!TARGET_HARD_FLOAT
9424 && GET_MODE_SIZE (mode) > 4)
9425 || mode == DImode)
9426 ? 2 : 1);
9427 /* Conditional register moves can be encoded
9428 in 16 bits in Thumb mode. */
9429 if (!speed_p && TARGET_THUMB && outer_code == COND_EXEC)
9430 *cost >>= 1;
9432 return true;
9435 if (CONST_INT_P (SET_SRC (x)))
9437 /* Handle CONST_INT here, since the value doesn't have a mode
9438 and we would otherwise be unable to work out the true cost. */
9439 *cost = rtx_cost (SET_DEST (x), GET_MODE (SET_DEST (x)), SET,
9440 0, speed_p);
9441 outer_code = SET;
9442 /* Slightly lower the cost of setting a core reg to a constant.
9443 This helps break up chains and allows for better scheduling. */
9444 if (REG_P (SET_DEST (x))
9445 && REGNO (SET_DEST (x)) <= LR_REGNUM)
9446 *cost -= 1;
9447 x = SET_SRC (x);
9448 /* Immediate moves with an immediate in the range [0, 255] can be
9449 encoded in 16 bits in Thumb mode. */
9450 if (!speed_p && TARGET_THUMB && GET_MODE (x) == SImode
9451 && INTVAL (x) >= 0 && INTVAL (x) <=255)
9452 *cost >>= 1;
9453 goto const_int_cost;
9456 return false;
9458 case MEM:
9459 return arm_mem_costs (x, extra_cost, cost, speed_p);
9461 case PARALLEL:
9463 /* Calculations of LDM costs are complex. We assume an initial cost
9464 (ldm_1st) which will load the number of registers mentioned in
9465 ldm_regs_per_insn_1st registers; then each additional
9466 ldm_regs_per_insn_subsequent registers cost one more insn. The
9467 formula for N regs is thus:
9469 ldm_1st + COSTS_N_INSNS ((max (N - ldm_regs_per_insn_1st, 0)
9470 + ldm_regs_per_insn_subsequent - 1)
9471 / ldm_regs_per_insn_subsequent).
9473 Additional costs may also be added for addressing. A similar
9474 formula is used for STM. */
9476 bool is_ldm = load_multiple_operation (x, SImode);
9477 bool is_stm = store_multiple_operation (x, SImode);
9479 if (is_ldm || is_stm)
9481 if (speed_p)
9483 HOST_WIDE_INT nregs = XVECLEN (x, 0);
9484 HOST_WIDE_INT regs_per_insn_1st = is_ldm
9485 ? extra_cost->ldst.ldm_regs_per_insn_1st
9486 : extra_cost->ldst.stm_regs_per_insn_1st;
9487 HOST_WIDE_INT regs_per_insn_sub = is_ldm
9488 ? extra_cost->ldst.ldm_regs_per_insn_subsequent
9489 : extra_cost->ldst.stm_regs_per_insn_subsequent;
9491 *cost += regs_per_insn_1st
9492 + COSTS_N_INSNS (((MAX (nregs - regs_per_insn_1st, 0))
9493 + regs_per_insn_sub - 1)
9494 / regs_per_insn_sub);
9495 return true;
9499 return false;
9501 case DIV:
9502 case UDIV:
9503 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT
9504 && (mode == SFmode || !TARGET_VFP_SINGLE))
9505 *cost += COSTS_N_INSNS (speed_p
9506 ? extra_cost->fp[mode != SFmode].div : 0);
9507 else if (mode == SImode && TARGET_IDIV)
9508 *cost += COSTS_N_INSNS (speed_p ? extra_cost->mult[0].idiv : 0);
9509 else
9510 *cost = LIBCALL_COST (2);
9512 /* Make the cost of sdiv more expensive so when both sdiv and udiv are
9513 possible udiv is prefered. */
9514 *cost += (code == DIV ? COSTS_N_INSNS (1) : 0);
9515 return false; /* All arguments must be in registers. */
9517 case MOD:
9518 /* MOD by a power of 2 can be expanded as:
9519 rsbs r1, r0, #0
9520 and r0, r0, #(n - 1)
9521 and r1, r1, #(n - 1)
9522 rsbpl r0, r1, #0. */
9523 if (CONST_INT_P (XEXP (x, 1))
9524 && exact_log2 (INTVAL (XEXP (x, 1))) > 0
9525 && mode == SImode)
9527 *cost += COSTS_N_INSNS (3);
9529 if (speed_p)
9530 *cost += 2 * extra_cost->alu.logical
9531 + extra_cost->alu.arith;
9532 return true;
9535 /* Fall-through. */
9536 case UMOD:
9537 /* Make the cost of sdiv more expensive so when both sdiv and udiv are
9538 possible udiv is prefered. */
9539 *cost = LIBCALL_COST (2) + (code == MOD ? COSTS_N_INSNS (1) : 0);
9540 return false; /* All arguments must be in registers. */
9542 case ROTATE:
9543 if (mode == SImode && REG_P (XEXP (x, 1)))
9545 *cost += (COSTS_N_INSNS (1)
9546 + rtx_cost (XEXP (x, 0), mode, code, 0, speed_p));
9547 if (speed_p)
9548 *cost += extra_cost->alu.shift_reg;
9549 return true;
9551 /* Fall through */
9552 case ROTATERT:
9553 case ASHIFT:
9554 case LSHIFTRT:
9555 case ASHIFTRT:
9556 if (mode == DImode && CONST_INT_P (XEXP (x, 1)))
9558 *cost += (COSTS_N_INSNS (2)
9559 + rtx_cost (XEXP (x, 0), mode, code, 0, speed_p));
9560 if (speed_p)
9561 *cost += 2 * extra_cost->alu.shift;
9562 /* Slightly disparage left shift by 1 at so we prefer adddi3. */
9563 if (code == ASHIFT && XEXP (x, 1) == CONST1_RTX (SImode))
9564 *cost += 1;
9565 return true;
9567 else if (mode == SImode)
9569 *cost += rtx_cost (XEXP (x, 0), mode, code, 0, speed_p);
9570 /* Slightly disparage register shifts at -Os, but not by much. */
9571 if (!CONST_INT_P (XEXP (x, 1)))
9572 *cost += (speed_p ? extra_cost->alu.shift_reg : 1
9573 + rtx_cost (XEXP (x, 1), mode, code, 1, speed_p));
9574 return true;
9576 else if (GET_MODE_CLASS (mode) == MODE_INT
9577 && GET_MODE_SIZE (mode) < 4)
9579 if (code == ASHIFT)
9581 *cost += rtx_cost (XEXP (x, 0), mode, code, 0, speed_p);
9582 /* Slightly disparage register shifts at -Os, but not by
9583 much. */
9584 if (!CONST_INT_P (XEXP (x, 1)))
9585 *cost += (speed_p ? extra_cost->alu.shift_reg : 1
9586 + rtx_cost (XEXP (x, 1), mode, code, 1, speed_p));
9588 else if (code == LSHIFTRT || code == ASHIFTRT)
9590 if (arm_arch_thumb2 && CONST_INT_P (XEXP (x, 1)))
9592 /* Can use SBFX/UBFX. */
9593 if (speed_p)
9594 *cost += extra_cost->alu.bfx;
9595 *cost += rtx_cost (XEXP (x, 0), mode, code, 0, speed_p);
9597 else
9599 *cost += COSTS_N_INSNS (1);
9600 *cost += rtx_cost (XEXP (x, 0), mode, code, 0, speed_p);
9601 if (speed_p)
9603 if (CONST_INT_P (XEXP (x, 1)))
9604 *cost += 2 * extra_cost->alu.shift;
9605 else
9606 *cost += (extra_cost->alu.shift
9607 + extra_cost->alu.shift_reg);
9609 else
9610 /* Slightly disparage register shifts. */
9611 *cost += !CONST_INT_P (XEXP (x, 1));
9614 else /* Rotates. */
9616 *cost = COSTS_N_INSNS (2 + !CONST_INT_P (XEXP (x, 1)));
9617 *cost += rtx_cost (XEXP (x, 0), mode, code, 0, speed_p);
9618 if (speed_p)
9620 if (CONST_INT_P (XEXP (x, 1)))
9621 *cost += (2 * extra_cost->alu.shift
9622 + extra_cost->alu.log_shift);
9623 else
9624 *cost += (extra_cost->alu.shift
9625 + extra_cost->alu.shift_reg
9626 + extra_cost->alu.log_shift_reg);
9629 return true;
9632 *cost = LIBCALL_COST (2);
9633 return false;
9635 case BSWAP:
9636 if (arm_arch6)
9638 if (mode == SImode)
9640 if (speed_p)
9641 *cost += extra_cost->alu.rev;
9643 return false;
9646 else
9648 /* No rev instruction available. Look at arm_legacy_rev
9649 and thumb_legacy_rev for the form of RTL used then. */
9650 if (TARGET_THUMB)
9652 *cost += COSTS_N_INSNS (9);
9654 if (speed_p)
9656 *cost += 6 * extra_cost->alu.shift;
9657 *cost += 3 * extra_cost->alu.logical;
9660 else
9662 *cost += COSTS_N_INSNS (4);
9664 if (speed_p)
9666 *cost += 2 * extra_cost->alu.shift;
9667 *cost += extra_cost->alu.arith_shift;
9668 *cost += 2 * extra_cost->alu.logical;
9671 return true;
9673 return false;
9675 case MINUS:
9676 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT
9677 && (mode == SFmode || !TARGET_VFP_SINGLE))
9679 if (GET_CODE (XEXP (x, 0)) == MULT
9680 || GET_CODE (XEXP (x, 1)) == MULT)
9682 rtx mul_op0, mul_op1, sub_op;
9684 if (speed_p)
9685 *cost += extra_cost->fp[mode != SFmode].mult_addsub;
9687 if (GET_CODE (XEXP (x, 0)) == MULT)
9689 mul_op0 = XEXP (XEXP (x, 0), 0);
9690 mul_op1 = XEXP (XEXP (x, 0), 1);
9691 sub_op = XEXP (x, 1);
9693 else
9695 mul_op0 = XEXP (XEXP (x, 1), 0);
9696 mul_op1 = XEXP (XEXP (x, 1), 1);
9697 sub_op = XEXP (x, 0);
9700 /* The first operand of the multiply may be optionally
9701 negated. */
9702 if (GET_CODE (mul_op0) == NEG)
9703 mul_op0 = XEXP (mul_op0, 0);
9705 *cost += (rtx_cost (mul_op0, mode, code, 0, speed_p)
9706 + rtx_cost (mul_op1, mode, code, 0, speed_p)
9707 + rtx_cost (sub_op, mode, code, 0, speed_p));
9709 return true;
9712 if (speed_p)
9713 *cost += extra_cost->fp[mode != SFmode].addsub;
9714 return false;
9717 if (mode == SImode)
9719 rtx shift_by_reg = NULL;
9720 rtx shift_op;
9721 rtx non_shift_op;
9723 shift_op = shifter_op_p (XEXP (x, 0), &shift_by_reg);
9724 if (shift_op == NULL)
9726 shift_op = shifter_op_p (XEXP (x, 1), &shift_by_reg);
9727 non_shift_op = XEXP (x, 0);
9729 else
9730 non_shift_op = XEXP (x, 1);
9732 if (shift_op != NULL)
9734 if (shift_by_reg != NULL)
9736 if (speed_p)
9737 *cost += extra_cost->alu.arith_shift_reg;
9738 *cost += rtx_cost (shift_by_reg, mode, code, 0, speed_p);
9740 else if (speed_p)
9741 *cost += extra_cost->alu.arith_shift;
9743 *cost += rtx_cost (shift_op, mode, code, 0, speed_p);
9744 *cost += rtx_cost (non_shift_op, mode, code, 0, speed_p);
9745 return true;
9748 if (arm_arch_thumb2
9749 && GET_CODE (XEXP (x, 1)) == MULT)
9751 /* MLS. */
9752 if (speed_p)
9753 *cost += extra_cost->mult[0].add;
9754 *cost += rtx_cost (XEXP (x, 0), mode, MINUS, 0, speed_p);
9755 *cost += rtx_cost (XEXP (XEXP (x, 1), 0), mode, MULT, 0, speed_p);
9756 *cost += rtx_cost (XEXP (XEXP (x, 1), 1), mode, MULT, 1, speed_p);
9757 return true;
9760 if (CONST_INT_P (XEXP (x, 0)))
9762 int insns = arm_gen_constant (MINUS, SImode, NULL_RTX,
9763 INTVAL (XEXP (x, 0)), NULL_RTX,
9764 NULL_RTX, 1, 0);
9765 *cost = COSTS_N_INSNS (insns);
9766 if (speed_p)
9767 *cost += insns * extra_cost->alu.arith;
9768 *cost += rtx_cost (XEXP (x, 1), mode, code, 1, speed_p);
9769 return true;
9771 else if (speed_p)
9772 *cost += extra_cost->alu.arith;
9774 return false;
9777 if (GET_MODE_CLASS (mode) == MODE_INT
9778 && GET_MODE_SIZE (mode) < 4)
9780 rtx shift_op, shift_reg;
9781 shift_reg = NULL;
9783 /* We check both sides of the MINUS for shifter operands since,
9784 unlike PLUS, it's not commutative. */
9786 HANDLE_NARROW_SHIFT_ARITH (MINUS, 0);
9787 HANDLE_NARROW_SHIFT_ARITH (MINUS, 1);
9789 /* Slightly disparage, as we might need to widen the result. */
9790 *cost += 1;
9791 if (speed_p)
9792 *cost += extra_cost->alu.arith;
9794 if (CONST_INT_P (XEXP (x, 0)))
9796 *cost += rtx_cost (XEXP (x, 1), mode, code, 1, speed_p);
9797 return true;
9800 return false;
9803 if (mode == DImode)
9805 *cost += COSTS_N_INSNS (1);
9807 if (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND)
9809 rtx op1 = XEXP (x, 1);
9811 if (speed_p)
9812 *cost += 2 * extra_cost->alu.arith;
9814 if (GET_CODE (op1) == ZERO_EXTEND)
9815 *cost += rtx_cost (XEXP (op1, 0), VOIDmode, ZERO_EXTEND,
9816 0, speed_p);
9817 else
9818 *cost += rtx_cost (op1, mode, MINUS, 1, speed_p);
9819 *cost += rtx_cost (XEXP (XEXP (x, 0), 0), VOIDmode, ZERO_EXTEND,
9820 0, speed_p);
9821 return true;
9823 else if (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
9825 if (speed_p)
9826 *cost += extra_cost->alu.arith + extra_cost->alu.arith_shift;
9827 *cost += (rtx_cost (XEXP (XEXP (x, 0), 0), VOIDmode, SIGN_EXTEND,
9828 0, speed_p)
9829 + rtx_cost (XEXP (x, 1), mode, MINUS, 1, speed_p));
9830 return true;
9832 else if (GET_CODE (XEXP (x, 1)) == ZERO_EXTEND
9833 || GET_CODE (XEXP (x, 1)) == SIGN_EXTEND)
9835 if (speed_p)
9836 *cost += (extra_cost->alu.arith
9837 + (GET_CODE (XEXP (x, 1)) == ZERO_EXTEND
9838 ? extra_cost->alu.arith
9839 : extra_cost->alu.arith_shift));
9840 *cost += (rtx_cost (XEXP (x, 0), mode, MINUS, 0, speed_p)
9841 + rtx_cost (XEXP (XEXP (x, 1), 0), VOIDmode,
9842 GET_CODE (XEXP (x, 1)), 0, speed_p));
9843 return true;
9846 if (speed_p)
9847 *cost += 2 * extra_cost->alu.arith;
9848 return false;
9851 /* Vector mode? */
9853 *cost = LIBCALL_COST (2);
9854 return false;
9856 case PLUS:
9857 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT
9858 && (mode == SFmode || !TARGET_VFP_SINGLE))
9860 if (GET_CODE (XEXP (x, 0)) == MULT)
9862 rtx mul_op0, mul_op1, add_op;
9864 if (speed_p)
9865 *cost += extra_cost->fp[mode != SFmode].mult_addsub;
9867 mul_op0 = XEXP (XEXP (x, 0), 0);
9868 mul_op1 = XEXP (XEXP (x, 0), 1);
9869 add_op = XEXP (x, 1);
9871 *cost += (rtx_cost (mul_op0, mode, code, 0, speed_p)
9872 + rtx_cost (mul_op1, mode, code, 0, speed_p)
9873 + rtx_cost (add_op, mode, code, 0, speed_p));
9875 return true;
9878 if (speed_p)
9879 *cost += extra_cost->fp[mode != SFmode].addsub;
9880 return false;
9882 else if (GET_MODE_CLASS (mode) == MODE_FLOAT)
9884 *cost = LIBCALL_COST (2);
9885 return false;
9888 /* Narrow modes can be synthesized in SImode, but the range
9889 of useful sub-operations is limited. Check for shift operations
9890 on one of the operands. Only left shifts can be used in the
9891 narrow modes. */
9892 if (GET_MODE_CLASS (mode) == MODE_INT
9893 && GET_MODE_SIZE (mode) < 4)
9895 rtx shift_op, shift_reg;
9896 shift_reg = NULL;
9898 HANDLE_NARROW_SHIFT_ARITH (PLUS, 0);
9900 if (CONST_INT_P (XEXP (x, 1)))
9902 int insns = arm_gen_constant (PLUS, SImode, NULL_RTX,
9903 INTVAL (XEXP (x, 1)), NULL_RTX,
9904 NULL_RTX, 1, 0);
9905 *cost = COSTS_N_INSNS (insns);
9906 if (speed_p)
9907 *cost += insns * extra_cost->alu.arith;
9908 /* Slightly penalize a narrow operation as the result may
9909 need widening. */
9910 *cost += 1 + rtx_cost (XEXP (x, 0), mode, PLUS, 0, speed_p);
9911 return true;
9914 /* Slightly penalize a narrow operation as the result may
9915 need widening. */
9916 *cost += 1;
9917 if (speed_p)
9918 *cost += extra_cost->alu.arith;
9920 return false;
9923 if (mode == SImode)
9925 rtx shift_op, shift_reg;
9927 if (TARGET_INT_SIMD
9928 && (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
9929 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND))
9931 /* UXTA[BH] or SXTA[BH]. */
9932 if (speed_p)
9933 *cost += extra_cost->alu.extend_arith;
9934 *cost += (rtx_cost (XEXP (XEXP (x, 0), 0), VOIDmode, ZERO_EXTEND,
9935 0, speed_p)
9936 + rtx_cost (XEXP (x, 1), mode, PLUS, 0, speed_p));
9937 return true;
9940 shift_reg = NULL;
9941 shift_op = shifter_op_p (XEXP (x, 0), &shift_reg);
9942 if (shift_op != NULL)
9944 if (shift_reg)
9946 if (speed_p)
9947 *cost += extra_cost->alu.arith_shift_reg;
9948 *cost += rtx_cost (shift_reg, mode, ASHIFT, 1, speed_p);
9950 else if (speed_p)
9951 *cost += extra_cost->alu.arith_shift;
9953 *cost += (rtx_cost (shift_op, mode, ASHIFT, 0, speed_p)
9954 + rtx_cost (XEXP (x, 1), mode, PLUS, 1, speed_p));
9955 return true;
9957 if (GET_CODE (XEXP (x, 0)) == MULT)
9959 rtx mul_op = XEXP (x, 0);
9961 if (TARGET_DSP_MULTIPLY
9962 && ((GET_CODE (XEXP (mul_op, 0)) == SIGN_EXTEND
9963 && (GET_CODE (XEXP (mul_op, 1)) == SIGN_EXTEND
9964 || (GET_CODE (XEXP (mul_op, 1)) == ASHIFTRT
9965 && CONST_INT_P (XEXP (XEXP (mul_op, 1), 1))
9966 && INTVAL (XEXP (XEXP (mul_op, 1), 1)) == 16)))
9967 || (GET_CODE (XEXP (mul_op, 0)) == ASHIFTRT
9968 && CONST_INT_P (XEXP (XEXP (mul_op, 0), 1))
9969 && INTVAL (XEXP (XEXP (mul_op, 0), 1)) == 16
9970 && (GET_CODE (XEXP (mul_op, 1)) == SIGN_EXTEND
9971 || (GET_CODE (XEXP (mul_op, 1)) == ASHIFTRT
9972 && CONST_INT_P (XEXP (XEXP (mul_op, 1), 1))
9973 && (INTVAL (XEXP (XEXP (mul_op, 1), 1))
9974 == 16))))))
9976 /* SMLA[BT][BT]. */
9977 if (speed_p)
9978 *cost += extra_cost->mult[0].extend_add;
9979 *cost += (rtx_cost (XEXP (XEXP (mul_op, 0), 0), mode,
9980 SIGN_EXTEND, 0, speed_p)
9981 + rtx_cost (XEXP (XEXP (mul_op, 1), 0), mode,
9982 SIGN_EXTEND, 0, speed_p)
9983 + rtx_cost (XEXP (x, 1), mode, PLUS, 1, speed_p));
9984 return true;
9987 if (speed_p)
9988 *cost += extra_cost->mult[0].add;
9989 *cost += (rtx_cost (XEXP (mul_op, 0), mode, MULT, 0, speed_p)
9990 + rtx_cost (XEXP (mul_op, 1), mode, MULT, 1, speed_p)
9991 + rtx_cost (XEXP (x, 1), mode, PLUS, 1, speed_p));
9992 return true;
9994 if (CONST_INT_P (XEXP (x, 1)))
9996 int insns = arm_gen_constant (PLUS, SImode, NULL_RTX,
9997 INTVAL (XEXP (x, 1)), NULL_RTX,
9998 NULL_RTX, 1, 0);
9999 *cost = COSTS_N_INSNS (insns);
10000 if (speed_p)
10001 *cost += insns * extra_cost->alu.arith;
10002 *cost += rtx_cost (XEXP (x, 0), mode, PLUS, 0, speed_p);
10003 return true;
10005 else if (speed_p)
10006 *cost += extra_cost->alu.arith;
10008 return false;
10011 if (mode == DImode)
10013 if (arm_arch3m
10014 && GET_CODE (XEXP (x, 0)) == MULT
10015 && ((GET_CODE (XEXP (XEXP (x, 0), 0)) == ZERO_EXTEND
10016 && GET_CODE (XEXP (XEXP (x, 0), 1)) == ZERO_EXTEND)
10017 || (GET_CODE (XEXP (XEXP (x, 0), 0)) == SIGN_EXTEND
10018 && GET_CODE (XEXP (XEXP (x, 0), 1)) == SIGN_EXTEND)))
10020 if (speed_p)
10021 *cost += extra_cost->mult[1].extend_add;
10022 *cost += (rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0), mode,
10023 ZERO_EXTEND, 0, speed_p)
10024 + rtx_cost (XEXP (XEXP (XEXP (x, 0), 1), 0), mode,
10025 ZERO_EXTEND, 0, speed_p)
10026 + rtx_cost (XEXP (x, 1), mode, PLUS, 1, speed_p));
10027 return true;
10030 *cost += COSTS_N_INSNS (1);
10032 if (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
10033 || GET_CODE (XEXP (x, 0)) == SIGN_EXTEND)
10035 if (speed_p)
10036 *cost += (extra_cost->alu.arith
10037 + (GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
10038 ? extra_cost->alu.arith
10039 : extra_cost->alu.arith_shift));
10041 *cost += (rtx_cost (XEXP (XEXP (x, 0), 0), VOIDmode, ZERO_EXTEND,
10042 0, speed_p)
10043 + rtx_cost (XEXP (x, 1), mode, PLUS, 1, speed_p));
10044 return true;
10047 if (speed_p)
10048 *cost += 2 * extra_cost->alu.arith;
10049 return false;
10052 /* Vector mode? */
10053 *cost = LIBCALL_COST (2);
10054 return false;
10055 case IOR:
10056 if (mode == SImode && arm_arch6 && aarch_rev16_p (x))
10058 if (speed_p)
10059 *cost += extra_cost->alu.rev;
10061 return true;
10063 /* Fall through. */
10064 case AND: case XOR:
10065 if (mode == SImode)
10067 enum rtx_code subcode = GET_CODE (XEXP (x, 0));
10068 rtx op0 = XEXP (x, 0);
10069 rtx shift_op, shift_reg;
10071 if (subcode == NOT
10072 && (code == AND
10073 || (code == IOR && TARGET_THUMB2)))
10074 op0 = XEXP (op0, 0);
10076 shift_reg = NULL;
10077 shift_op = shifter_op_p (op0, &shift_reg);
10078 if (shift_op != NULL)
10080 if (shift_reg)
10082 if (speed_p)
10083 *cost += extra_cost->alu.log_shift_reg;
10084 *cost += rtx_cost (shift_reg, mode, ASHIFT, 1, speed_p);
10086 else if (speed_p)
10087 *cost += extra_cost->alu.log_shift;
10089 *cost += (rtx_cost (shift_op, mode, ASHIFT, 0, speed_p)
10090 + rtx_cost (XEXP (x, 1), mode, code, 1, speed_p));
10091 return true;
10094 if (CONST_INT_P (XEXP (x, 1)))
10096 int insns = arm_gen_constant (code, SImode, NULL_RTX,
10097 INTVAL (XEXP (x, 1)), NULL_RTX,
10098 NULL_RTX, 1, 0);
10100 *cost = COSTS_N_INSNS (insns);
10101 if (speed_p)
10102 *cost += insns * extra_cost->alu.logical;
10103 *cost += rtx_cost (op0, mode, code, 0, speed_p);
10104 return true;
10107 if (speed_p)
10108 *cost += extra_cost->alu.logical;
10109 *cost += (rtx_cost (op0, mode, code, 0, speed_p)
10110 + rtx_cost (XEXP (x, 1), mode, code, 1, speed_p));
10111 return true;
10114 if (mode == DImode)
10116 rtx op0 = XEXP (x, 0);
10117 enum rtx_code subcode = GET_CODE (op0);
10119 *cost += COSTS_N_INSNS (1);
10121 if (subcode == NOT
10122 && (code == AND
10123 || (code == IOR && TARGET_THUMB2)))
10124 op0 = XEXP (op0, 0);
10126 if (GET_CODE (op0) == ZERO_EXTEND)
10128 if (speed_p)
10129 *cost += 2 * extra_cost->alu.logical;
10131 *cost += (rtx_cost (XEXP (op0, 0), VOIDmode, ZERO_EXTEND,
10132 0, speed_p)
10133 + rtx_cost (XEXP (x, 1), mode, code, 0, speed_p));
10134 return true;
10136 else if (GET_CODE (op0) == SIGN_EXTEND)
10138 if (speed_p)
10139 *cost += extra_cost->alu.logical + extra_cost->alu.log_shift;
10141 *cost += (rtx_cost (XEXP (op0, 0), VOIDmode, SIGN_EXTEND,
10142 0, speed_p)
10143 + rtx_cost (XEXP (x, 1), mode, code, 0, speed_p));
10144 return true;
10147 if (speed_p)
10148 *cost += 2 * extra_cost->alu.logical;
10150 return true;
10152 /* Vector mode? */
10154 *cost = LIBCALL_COST (2);
10155 return false;
10157 case MULT:
10158 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT
10159 && (mode == SFmode || !TARGET_VFP_SINGLE))
10161 rtx op0 = XEXP (x, 0);
10163 if (GET_CODE (op0) == NEG && !flag_rounding_math)
10164 op0 = XEXP (op0, 0);
10166 if (speed_p)
10167 *cost += extra_cost->fp[mode != SFmode].mult;
10169 *cost += (rtx_cost (op0, mode, MULT, 0, speed_p)
10170 + rtx_cost (XEXP (x, 1), mode, MULT, 1, speed_p));
10171 return true;
10173 else if (GET_MODE_CLASS (mode) == MODE_FLOAT)
10175 *cost = LIBCALL_COST (2);
10176 return false;
10179 if (mode == SImode)
10181 if (TARGET_DSP_MULTIPLY
10182 && ((GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
10183 && (GET_CODE (XEXP (x, 1)) == SIGN_EXTEND
10184 || (GET_CODE (XEXP (x, 1)) == ASHIFTRT
10185 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
10186 && INTVAL (XEXP (XEXP (x, 1), 1)) == 16)))
10187 || (GET_CODE (XEXP (x, 0)) == ASHIFTRT
10188 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
10189 && INTVAL (XEXP (XEXP (x, 0), 1)) == 16
10190 && (GET_CODE (XEXP (x, 1)) == SIGN_EXTEND
10191 || (GET_CODE (XEXP (x, 1)) == ASHIFTRT
10192 && CONST_INT_P (XEXP (XEXP (x, 1), 1))
10193 && (INTVAL (XEXP (XEXP (x, 1), 1))
10194 == 16))))))
10196 /* SMUL[TB][TB]. */
10197 if (speed_p)
10198 *cost += extra_cost->mult[0].extend;
10199 *cost += rtx_cost (XEXP (XEXP (x, 0), 0), mode,
10200 SIGN_EXTEND, 0, speed_p);
10201 *cost += rtx_cost (XEXP (XEXP (x, 1), 0), mode,
10202 SIGN_EXTEND, 1, speed_p);
10203 return true;
10205 if (speed_p)
10206 *cost += extra_cost->mult[0].simple;
10207 return false;
10210 if (mode == DImode)
10212 if (arm_arch3m
10213 && ((GET_CODE (XEXP (x, 0)) == ZERO_EXTEND
10214 && GET_CODE (XEXP (x, 1)) == ZERO_EXTEND)
10215 || (GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
10216 && GET_CODE (XEXP (x, 1)) == SIGN_EXTEND)))
10218 if (speed_p)
10219 *cost += extra_cost->mult[1].extend;
10220 *cost += (rtx_cost (XEXP (XEXP (x, 0), 0), VOIDmode,
10221 ZERO_EXTEND, 0, speed_p)
10222 + rtx_cost (XEXP (XEXP (x, 1), 0), VOIDmode,
10223 ZERO_EXTEND, 0, speed_p));
10224 return true;
10227 *cost = LIBCALL_COST (2);
10228 return false;
10231 /* Vector mode? */
10232 *cost = LIBCALL_COST (2);
10233 return false;
10235 case NEG:
10236 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT
10237 && (mode == SFmode || !TARGET_VFP_SINGLE))
10239 if (GET_CODE (XEXP (x, 0)) == MULT)
10241 /* VNMUL. */
10242 *cost = rtx_cost (XEXP (x, 0), mode, NEG, 0, speed_p);
10243 return true;
10246 if (speed_p)
10247 *cost += extra_cost->fp[mode != SFmode].neg;
10249 return false;
10251 else if (GET_MODE_CLASS (mode) == MODE_FLOAT)
10253 *cost = LIBCALL_COST (1);
10254 return false;
10257 if (mode == SImode)
10259 if (GET_CODE (XEXP (x, 0)) == ABS)
10261 *cost += COSTS_N_INSNS (1);
10262 /* Assume the non-flag-changing variant. */
10263 if (speed_p)
10264 *cost += (extra_cost->alu.log_shift
10265 + extra_cost->alu.arith_shift);
10266 *cost += rtx_cost (XEXP (XEXP (x, 0), 0), mode, ABS, 0, speed_p);
10267 return true;
10270 if (GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) == RTX_COMPARE
10271 || GET_RTX_CLASS (GET_CODE (XEXP (x, 0))) == RTX_COMM_COMPARE)
10273 *cost += COSTS_N_INSNS (1);
10274 /* No extra cost for MOV imm and MVN imm. */
10275 /* If the comparison op is using the flags, there's no further
10276 cost, otherwise we need to add the cost of the comparison. */
10277 if (!(REG_P (XEXP (XEXP (x, 0), 0))
10278 && REGNO (XEXP (XEXP (x, 0), 0)) == CC_REGNUM
10279 && XEXP (XEXP (x, 0), 1) == const0_rtx))
10281 mode = GET_MODE (XEXP (XEXP (x, 0), 0));
10282 *cost += (COSTS_N_INSNS (1)
10283 + rtx_cost (XEXP (XEXP (x, 0), 0), mode, COMPARE,
10284 0, speed_p)
10285 + rtx_cost (XEXP (XEXP (x, 0), 1), mode, COMPARE,
10286 1, speed_p));
10287 if (speed_p)
10288 *cost += extra_cost->alu.arith;
10290 return true;
10293 if (speed_p)
10294 *cost += extra_cost->alu.arith;
10295 return false;
10298 if (GET_MODE_CLASS (mode) == MODE_INT
10299 && GET_MODE_SIZE (mode) < 4)
10301 /* Slightly disparage, as we might need an extend operation. */
10302 *cost += 1;
10303 if (speed_p)
10304 *cost += extra_cost->alu.arith;
10305 return false;
10308 if (mode == DImode)
10310 *cost += COSTS_N_INSNS (1);
10311 if (speed_p)
10312 *cost += 2 * extra_cost->alu.arith;
10313 return false;
10316 /* Vector mode? */
10317 *cost = LIBCALL_COST (1);
10318 return false;
10320 case NOT:
10321 if (mode == SImode)
10323 rtx shift_op;
10324 rtx shift_reg = NULL;
10326 shift_op = shifter_op_p (XEXP (x, 0), &shift_reg);
10328 if (shift_op)
10330 if (shift_reg != NULL)
10332 if (speed_p)
10333 *cost += extra_cost->alu.log_shift_reg;
10334 *cost += rtx_cost (shift_reg, mode, ASHIFT, 1, speed_p);
10336 else if (speed_p)
10337 *cost += extra_cost->alu.log_shift;
10338 *cost += rtx_cost (shift_op, mode, ASHIFT, 0, speed_p);
10339 return true;
10342 if (speed_p)
10343 *cost += extra_cost->alu.logical;
10344 return false;
10346 if (mode == DImode)
10348 *cost += COSTS_N_INSNS (1);
10349 return false;
10352 /* Vector mode? */
10354 *cost += LIBCALL_COST (1);
10355 return false;
10357 case IF_THEN_ELSE:
10359 if (GET_CODE (XEXP (x, 1)) == PC || GET_CODE (XEXP (x, 2)) == PC)
10361 *cost += COSTS_N_INSNS (3);
10362 return true;
10364 int op1cost = rtx_cost (XEXP (x, 1), mode, SET, 1, speed_p);
10365 int op2cost = rtx_cost (XEXP (x, 2), mode, SET, 1, speed_p);
10367 *cost = rtx_cost (XEXP (x, 0), mode, IF_THEN_ELSE, 0, speed_p);
10368 /* Assume that if one arm of the if_then_else is a register,
10369 that it will be tied with the result and eliminate the
10370 conditional insn. */
10371 if (REG_P (XEXP (x, 1)))
10372 *cost += op2cost;
10373 else if (REG_P (XEXP (x, 2)))
10374 *cost += op1cost;
10375 else
10377 if (speed_p)
10379 if (extra_cost->alu.non_exec_costs_exec)
10380 *cost += op1cost + op2cost + extra_cost->alu.non_exec;
10381 else
10382 *cost += MAX (op1cost, op2cost) + extra_cost->alu.non_exec;
10384 else
10385 *cost += op1cost + op2cost;
10388 return true;
10390 case COMPARE:
10391 if (cc_register (XEXP (x, 0), VOIDmode) && XEXP (x, 1) == const0_rtx)
10392 *cost = 0;
10393 else
10395 machine_mode op0mode;
10396 /* We'll mostly assume that the cost of a compare is the cost of the
10397 LHS. However, there are some notable exceptions. */
10399 /* Floating point compares are never done as side-effects. */
10400 op0mode = GET_MODE (XEXP (x, 0));
10401 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (op0mode) == MODE_FLOAT
10402 && (op0mode == SFmode || !TARGET_VFP_SINGLE))
10404 if (speed_p)
10405 *cost += extra_cost->fp[op0mode != SFmode].compare;
10407 if (XEXP (x, 1) == CONST0_RTX (op0mode))
10409 *cost += rtx_cost (XEXP (x, 0), op0mode, code, 0, speed_p);
10410 return true;
10413 return false;
10415 else if (GET_MODE_CLASS (op0mode) == MODE_FLOAT)
10417 *cost = LIBCALL_COST (2);
10418 return false;
10421 /* DImode compares normally take two insns. */
10422 if (op0mode == DImode)
10424 *cost += COSTS_N_INSNS (1);
10425 if (speed_p)
10426 *cost += 2 * extra_cost->alu.arith;
10427 return false;
10430 if (op0mode == SImode)
10432 rtx shift_op;
10433 rtx shift_reg;
10435 if (XEXP (x, 1) == const0_rtx
10436 && !(REG_P (XEXP (x, 0))
10437 || (GET_CODE (XEXP (x, 0)) == SUBREG
10438 && REG_P (SUBREG_REG (XEXP (x, 0))))))
10440 *cost = rtx_cost (XEXP (x, 0), op0mode, COMPARE, 0, speed_p);
10442 /* Multiply operations that set the flags are often
10443 significantly more expensive. */
10444 if (speed_p
10445 && GET_CODE (XEXP (x, 0)) == MULT
10446 && !power_of_two_operand (XEXP (XEXP (x, 0), 1), mode))
10447 *cost += extra_cost->mult[0].flag_setting;
10449 if (speed_p
10450 && GET_CODE (XEXP (x, 0)) == PLUS
10451 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
10452 && !power_of_two_operand (XEXP (XEXP (XEXP (x, 0),
10453 0), 1), mode))
10454 *cost += extra_cost->mult[0].flag_setting;
10455 return true;
10458 shift_reg = NULL;
10459 shift_op = shifter_op_p (XEXP (x, 0), &shift_reg);
10460 if (shift_op != NULL)
10462 if (shift_reg != NULL)
10464 *cost += rtx_cost (shift_reg, op0mode, ASHIFT,
10465 1, speed_p);
10466 if (speed_p)
10467 *cost += extra_cost->alu.arith_shift_reg;
10469 else if (speed_p)
10470 *cost += extra_cost->alu.arith_shift;
10471 *cost += rtx_cost (shift_op, op0mode, ASHIFT, 0, speed_p);
10472 *cost += rtx_cost (XEXP (x, 1), op0mode, COMPARE, 1, speed_p);
10473 return true;
10476 if (speed_p)
10477 *cost += extra_cost->alu.arith;
10478 if (CONST_INT_P (XEXP (x, 1))
10479 && const_ok_for_op (INTVAL (XEXP (x, 1)), COMPARE))
10481 *cost += rtx_cost (XEXP (x, 0), op0mode, COMPARE, 0, speed_p);
10482 return true;
10484 return false;
10487 /* Vector mode? */
10489 *cost = LIBCALL_COST (2);
10490 return false;
10492 return true;
10494 case EQ:
10495 case NE:
10496 case LT:
10497 case LE:
10498 case GT:
10499 case GE:
10500 case LTU:
10501 case LEU:
10502 case GEU:
10503 case GTU:
10504 case ORDERED:
10505 case UNORDERED:
10506 case UNEQ:
10507 case UNLE:
10508 case UNLT:
10509 case UNGE:
10510 case UNGT:
10511 case LTGT:
10512 if (outer_code == SET)
10514 /* Is it a store-flag operation? */
10515 if (REG_P (XEXP (x, 0)) && REGNO (XEXP (x, 0)) == CC_REGNUM
10516 && XEXP (x, 1) == const0_rtx)
10518 /* Thumb also needs an IT insn. */
10519 *cost += COSTS_N_INSNS (TARGET_THUMB ? 2 : 1);
10520 return true;
10522 if (XEXP (x, 1) == const0_rtx)
10524 switch (code)
10526 case LT:
10527 /* LSR Rd, Rn, #31. */
10528 if (speed_p)
10529 *cost += extra_cost->alu.shift;
10530 break;
10532 case EQ:
10533 /* RSBS T1, Rn, #0
10534 ADC Rd, Rn, T1. */
10536 case NE:
10537 /* SUBS T1, Rn, #1
10538 SBC Rd, Rn, T1. */
10539 *cost += COSTS_N_INSNS (1);
10540 break;
10542 case LE:
10543 /* RSBS T1, Rn, Rn, LSR #31
10544 ADC Rd, Rn, T1. */
10545 *cost += COSTS_N_INSNS (1);
10546 if (speed_p)
10547 *cost += extra_cost->alu.arith_shift;
10548 break;
10550 case GT:
10551 /* RSB Rd, Rn, Rn, ASR #1
10552 LSR Rd, Rd, #31. */
10553 *cost += COSTS_N_INSNS (1);
10554 if (speed_p)
10555 *cost += (extra_cost->alu.arith_shift
10556 + extra_cost->alu.shift);
10557 break;
10559 case GE:
10560 /* ASR Rd, Rn, #31
10561 ADD Rd, Rn, #1. */
10562 *cost += COSTS_N_INSNS (1);
10563 if (speed_p)
10564 *cost += extra_cost->alu.shift;
10565 break;
10567 default:
10568 /* Remaining cases are either meaningless or would take
10569 three insns anyway. */
10570 *cost = COSTS_N_INSNS (3);
10571 break;
10573 *cost += rtx_cost (XEXP (x, 0), mode, code, 0, speed_p);
10574 return true;
10576 else
10578 *cost += COSTS_N_INSNS (TARGET_THUMB ? 3 : 2);
10579 if (CONST_INT_P (XEXP (x, 1))
10580 && const_ok_for_op (INTVAL (XEXP (x, 1)), COMPARE))
10582 *cost += rtx_cost (XEXP (x, 0), mode, code, 0, speed_p);
10583 return true;
10586 return false;
10589 /* Not directly inside a set. If it involves the condition code
10590 register it must be the condition for a branch, cond_exec or
10591 I_T_E operation. Since the comparison is performed elsewhere
10592 this is just the control part which has no additional
10593 cost. */
10594 else if (REG_P (XEXP (x, 0)) && REGNO (XEXP (x, 0)) == CC_REGNUM
10595 && XEXP (x, 1) == const0_rtx)
10597 *cost = 0;
10598 return true;
10600 return false;
10602 case ABS:
10603 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT
10604 && (mode == SFmode || !TARGET_VFP_SINGLE))
10606 if (speed_p)
10607 *cost += extra_cost->fp[mode != SFmode].neg;
10609 return false;
10611 else if (GET_MODE_CLASS (mode) == MODE_FLOAT)
10613 *cost = LIBCALL_COST (1);
10614 return false;
10617 if (mode == SImode)
10619 if (speed_p)
10620 *cost += extra_cost->alu.log_shift + extra_cost->alu.arith_shift;
10621 return false;
10623 /* Vector mode? */
10624 *cost = LIBCALL_COST (1);
10625 return false;
10627 case SIGN_EXTEND:
10628 if ((arm_arch4 || GET_MODE (XEXP (x, 0)) == SImode)
10629 && MEM_P (XEXP (x, 0)))
10631 if (mode == DImode)
10632 *cost += COSTS_N_INSNS (1);
10634 if (!speed_p)
10635 return true;
10637 if (GET_MODE (XEXP (x, 0)) == SImode)
10638 *cost += extra_cost->ldst.load;
10639 else
10640 *cost += extra_cost->ldst.load_sign_extend;
10642 if (mode == DImode)
10643 *cost += extra_cost->alu.shift;
10645 return true;
10648 /* Widening from less than 32-bits requires an extend operation. */
10649 if (GET_MODE (XEXP (x, 0)) != SImode && arm_arch6)
10651 /* We have SXTB/SXTH. */
10652 *cost += rtx_cost (XEXP (x, 0), VOIDmode, code, 0, speed_p);
10653 if (speed_p)
10654 *cost += extra_cost->alu.extend;
10656 else if (GET_MODE (XEXP (x, 0)) != SImode)
10658 /* Needs two shifts. */
10659 *cost += COSTS_N_INSNS (1);
10660 *cost += rtx_cost (XEXP (x, 0), VOIDmode, code, 0, speed_p);
10661 if (speed_p)
10662 *cost += 2 * extra_cost->alu.shift;
10665 /* Widening beyond 32-bits requires one more insn. */
10666 if (mode == DImode)
10668 *cost += COSTS_N_INSNS (1);
10669 if (speed_p)
10670 *cost += extra_cost->alu.shift;
10673 return true;
10675 case ZERO_EXTEND:
10676 if ((arm_arch4
10677 || GET_MODE (XEXP (x, 0)) == SImode
10678 || GET_MODE (XEXP (x, 0)) == QImode)
10679 && MEM_P (XEXP (x, 0)))
10681 *cost = rtx_cost (XEXP (x, 0), VOIDmode, code, 0, speed_p);
10683 if (mode == DImode)
10684 *cost += COSTS_N_INSNS (1); /* No speed penalty. */
10686 return true;
10689 /* Widening from less than 32-bits requires an extend operation. */
10690 if (GET_MODE (XEXP (x, 0)) == QImode)
10692 /* UXTB can be a shorter instruction in Thumb2, but it might
10693 be slower than the AND Rd, Rn, #255 alternative. When
10694 optimizing for speed it should never be slower to use
10695 AND, and we don't really model 16-bit vs 32-bit insns
10696 here. */
10697 if (speed_p)
10698 *cost += extra_cost->alu.logical;
10700 else if (GET_MODE (XEXP (x, 0)) != SImode && arm_arch6)
10702 /* We have UXTB/UXTH. */
10703 *cost += rtx_cost (XEXP (x, 0), VOIDmode, code, 0, speed_p);
10704 if (speed_p)
10705 *cost += extra_cost->alu.extend;
10707 else if (GET_MODE (XEXP (x, 0)) != SImode)
10709 /* Needs two shifts. It's marginally preferable to use
10710 shifts rather than two BIC instructions as the second
10711 shift may merge with a subsequent insn as a shifter
10712 op. */
10713 *cost = COSTS_N_INSNS (2);
10714 *cost += rtx_cost (XEXP (x, 0), VOIDmode, code, 0, speed_p);
10715 if (speed_p)
10716 *cost += 2 * extra_cost->alu.shift;
10719 /* Widening beyond 32-bits requires one more insn. */
10720 if (mode == DImode)
10722 *cost += COSTS_N_INSNS (1); /* No speed penalty. */
10725 return true;
10727 case CONST_INT:
10728 *cost = 0;
10729 /* CONST_INT has no mode, so we cannot tell for sure how many
10730 insns are really going to be needed. The best we can do is
10731 look at the value passed. If it fits in SImode, then assume
10732 that's the mode it will be used for. Otherwise assume it
10733 will be used in DImode. */
10734 if (INTVAL (x) == trunc_int_for_mode (INTVAL (x), SImode))
10735 mode = SImode;
10736 else
10737 mode = DImode;
10739 /* Avoid blowing up in arm_gen_constant (). */
10740 if (!(outer_code == PLUS
10741 || outer_code == AND
10742 || outer_code == IOR
10743 || outer_code == XOR
10744 || outer_code == MINUS))
10745 outer_code = SET;
10747 const_int_cost:
10748 if (mode == SImode)
10750 *cost += COSTS_N_INSNS (arm_gen_constant (outer_code, SImode, NULL,
10751 INTVAL (x), NULL, NULL,
10752 0, 0));
10753 /* Extra costs? */
10755 else
10757 *cost += COSTS_N_INSNS (arm_gen_constant
10758 (outer_code, SImode, NULL,
10759 trunc_int_for_mode (INTVAL (x), SImode),
10760 NULL, NULL, 0, 0)
10761 + arm_gen_constant (outer_code, SImode, NULL,
10762 INTVAL (x) >> 32, NULL,
10763 NULL, 0, 0));
10764 /* Extra costs? */
10767 return true;
10769 case CONST:
10770 case LABEL_REF:
10771 case SYMBOL_REF:
10772 if (speed_p)
10774 if (arm_arch_thumb2 && !flag_pic)
10775 *cost += COSTS_N_INSNS (1);
10776 else
10777 *cost += extra_cost->ldst.load;
10779 else
10780 *cost += COSTS_N_INSNS (1);
10782 if (flag_pic)
10784 *cost += COSTS_N_INSNS (1);
10785 if (speed_p)
10786 *cost += extra_cost->alu.arith;
10789 return true;
10791 case CONST_FIXED:
10792 *cost = COSTS_N_INSNS (4);
10793 /* Fixme. */
10794 return true;
10796 case CONST_DOUBLE:
10797 if (TARGET_HARD_FLOAT && GET_MODE_CLASS (mode) == MODE_FLOAT
10798 && (mode == SFmode || !TARGET_VFP_SINGLE))
10800 if (vfp3_const_double_rtx (x))
10802 if (speed_p)
10803 *cost += extra_cost->fp[mode == DFmode].fpconst;
10804 return true;
10807 if (speed_p)
10809 if (mode == DFmode)
10810 *cost += extra_cost->ldst.loadd;
10811 else
10812 *cost += extra_cost->ldst.loadf;
10814 else
10815 *cost += COSTS_N_INSNS (1 + (mode == DFmode));
10817 return true;
10819 *cost = COSTS_N_INSNS (4);
10820 return true;
10822 case CONST_VECTOR:
10823 /* Fixme. */
10824 if (TARGET_NEON
10825 && TARGET_HARD_FLOAT
10826 && (VALID_NEON_DREG_MODE (mode) || VALID_NEON_QREG_MODE (mode))
10827 && neon_immediate_valid_for_move (x, mode, NULL, NULL))
10828 *cost = COSTS_N_INSNS (1);
10829 else
10830 *cost = COSTS_N_INSNS (4);
10831 return true;
10833 case HIGH:
10834 case LO_SUM:
10835 /* When optimizing for size, we prefer constant pool entries to
10836 MOVW/MOVT pairs, so bump the cost of these slightly. */
10837 if (!speed_p)
10838 *cost += 1;
10839 return true;
10841 case CLZ:
10842 if (speed_p)
10843 *cost += extra_cost->alu.clz;
10844 return false;
10846 case SMIN:
10847 if (XEXP (x, 1) == const0_rtx)
10849 if (speed_p)
10850 *cost += extra_cost->alu.log_shift;
10851 *cost += rtx_cost (XEXP (x, 0), mode, code, 0, speed_p);
10852 return true;
10854 /* Fall through. */
10855 case SMAX:
10856 case UMIN:
10857 case UMAX:
10858 *cost += COSTS_N_INSNS (1);
10859 return false;
10861 case TRUNCATE:
10862 if (GET_CODE (XEXP (x, 0)) == ASHIFTRT
10863 && CONST_INT_P (XEXP (XEXP (x, 0), 1))
10864 && INTVAL (XEXP (XEXP (x, 0), 1)) == 32
10865 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
10866 && ((GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == SIGN_EXTEND
10867 && GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1)) == SIGN_EXTEND)
10868 || (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 0)) == ZERO_EXTEND
10869 && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1))
10870 == ZERO_EXTEND))))
10872 if (speed_p)
10873 *cost += extra_cost->mult[1].extend;
10874 *cost += (rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0), VOIDmode,
10875 ZERO_EXTEND, 0, speed_p)
10876 + rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 1), VOIDmode,
10877 ZERO_EXTEND, 0, speed_p));
10878 return true;
10880 *cost = LIBCALL_COST (1);
10881 return false;
10883 case UNSPEC_VOLATILE:
10884 case UNSPEC:
10885 return arm_unspec_cost (x, outer_code, speed_p, cost);
10887 case PC:
10888 /* Reading the PC is like reading any other register. Writing it
10889 is more expensive, but we take that into account elsewhere. */
10890 *cost = 0;
10891 return true;
10893 case ZERO_EXTRACT:
10894 /* TODO: Simple zero_extract of bottom bits using AND. */
10895 /* Fall through. */
10896 case SIGN_EXTRACT:
10897 if (arm_arch6
10898 && mode == SImode
10899 && CONST_INT_P (XEXP (x, 1))
10900 && CONST_INT_P (XEXP (x, 2)))
10902 if (speed_p)
10903 *cost += extra_cost->alu.bfx;
10904 *cost += rtx_cost (XEXP (x, 0), mode, code, 0, speed_p);
10905 return true;
10907 /* Without UBFX/SBFX, need to resort to shift operations. */
10908 *cost += COSTS_N_INSNS (1);
10909 if (speed_p)
10910 *cost += 2 * extra_cost->alu.shift;
10911 *cost += rtx_cost (XEXP (x, 0), mode, ASHIFT, 0, speed_p);
10912 return true;
10914 case FLOAT_EXTEND:
10915 if (TARGET_HARD_FLOAT)
10917 if (speed_p)
10918 *cost += extra_cost->fp[mode == DFmode].widen;
10919 if (!TARGET_VFP5
10920 && GET_MODE (XEXP (x, 0)) == HFmode)
10922 /* Pre v8, widening HF->DF is a two-step process, first
10923 widening to SFmode. */
10924 *cost += COSTS_N_INSNS (1);
10925 if (speed_p)
10926 *cost += extra_cost->fp[0].widen;
10928 *cost += rtx_cost (XEXP (x, 0), VOIDmode, code, 0, speed_p);
10929 return true;
10932 *cost = LIBCALL_COST (1);
10933 return false;
10935 case FLOAT_TRUNCATE:
10936 if (TARGET_HARD_FLOAT)
10938 if (speed_p)
10939 *cost += extra_cost->fp[mode == DFmode].narrow;
10940 *cost += rtx_cost (XEXP (x, 0), VOIDmode, code, 0, speed_p);
10941 return true;
10942 /* Vector modes? */
10944 *cost = LIBCALL_COST (1);
10945 return false;
10947 case FMA:
10948 if (TARGET_32BIT && TARGET_HARD_FLOAT && TARGET_FMA)
10950 rtx op0 = XEXP (x, 0);
10951 rtx op1 = XEXP (x, 1);
10952 rtx op2 = XEXP (x, 2);
10955 /* vfms or vfnma. */
10956 if (GET_CODE (op0) == NEG)
10957 op0 = XEXP (op0, 0);
10959 /* vfnms or vfnma. */
10960 if (GET_CODE (op2) == NEG)
10961 op2 = XEXP (op2, 0);
10963 *cost += rtx_cost (op0, mode, FMA, 0, speed_p);
10964 *cost += rtx_cost (op1, mode, FMA, 1, speed_p);
10965 *cost += rtx_cost (op2, mode, FMA, 2, speed_p);
10967 if (speed_p)
10968 *cost += extra_cost->fp[mode ==DFmode].fma;
10970 return true;
10973 *cost = LIBCALL_COST (3);
10974 return false;
10976 case FIX:
10977 case UNSIGNED_FIX:
10978 if (TARGET_HARD_FLOAT)
10980 /* The *combine_vcvtf2i reduces a vmul+vcvt into
10981 a vcvt fixed-point conversion. */
10982 if (code == FIX && mode == SImode
10983 && GET_CODE (XEXP (x, 0)) == FIX
10984 && GET_MODE (XEXP (x, 0)) == SFmode
10985 && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
10986 && vfp3_const_double_for_bits (XEXP (XEXP (XEXP (x, 0), 0), 1))
10987 > 0)
10989 if (speed_p)
10990 *cost += extra_cost->fp[0].toint;
10992 *cost += rtx_cost (XEXP (XEXP (XEXP (x, 0), 0), 0), mode,
10993 code, 0, speed_p);
10994 return true;
10997 if (GET_MODE_CLASS (mode) == MODE_INT)
10999 mode = GET_MODE (XEXP (x, 0));
11000 if (speed_p)
11001 *cost += extra_cost->fp[mode == DFmode].toint;
11002 /* Strip of the 'cost' of rounding towards zero. */
11003 if (GET_CODE (XEXP (x, 0)) == FIX)
11004 *cost += rtx_cost (XEXP (XEXP (x, 0), 0), mode, code,
11005 0, speed_p);
11006 else
11007 *cost += rtx_cost (XEXP (x, 0), mode, code, 0, speed_p);
11008 /* ??? Increase the cost to deal with transferring from
11009 FP -> CORE registers? */
11010 return true;
11012 else if (GET_MODE_CLASS (mode) == MODE_FLOAT
11013 && TARGET_VFP5)
11015 if (speed_p)
11016 *cost += extra_cost->fp[mode == DFmode].roundint;
11017 return false;
11019 /* Vector costs? */
11021 *cost = LIBCALL_COST (1);
11022 return false;
11024 case FLOAT:
11025 case UNSIGNED_FLOAT:
11026 if (TARGET_HARD_FLOAT)
11028 /* ??? Increase the cost to deal with transferring from CORE
11029 -> FP registers? */
11030 if (speed_p)
11031 *cost += extra_cost->fp[mode == DFmode].fromint;
11032 return false;
11034 *cost = LIBCALL_COST (1);
11035 return false;
11037 case CALL:
11038 return true;
11040 case ASM_OPERANDS:
11042 /* Just a guess. Guess number of instructions in the asm
11043 plus one insn per input. Always a minimum of COSTS_N_INSNS (1)
11044 though (see PR60663). */
11045 int asm_length = MAX (1, asm_str_count (ASM_OPERANDS_TEMPLATE (x)));
11046 int num_operands = ASM_OPERANDS_INPUT_LENGTH (x);
11048 *cost = COSTS_N_INSNS (asm_length + num_operands);
11049 return true;
11051 default:
11052 if (mode != VOIDmode)
11053 *cost = COSTS_N_INSNS (ARM_NUM_REGS (mode));
11054 else
11055 *cost = COSTS_N_INSNS (4); /* Who knows? */
11056 return false;
11060 #undef HANDLE_NARROW_SHIFT_ARITH
11062 /* RTX costs entry point. */
11064 static bool
11065 arm_rtx_costs (rtx x, machine_mode mode ATTRIBUTE_UNUSED, int outer_code,
11066 int opno ATTRIBUTE_UNUSED, int *total, bool speed)
11068 bool result;
11069 int code = GET_CODE (x);
11070 gcc_assert (current_tune->insn_extra_cost);
11072 result = arm_rtx_costs_internal (x, (enum rtx_code) code,
11073 (enum rtx_code) outer_code,
11074 current_tune->insn_extra_cost,
11075 total, speed);
11077 if (dump_file && arm_verbose_cost)
11079 print_rtl_single (dump_file, x);
11080 fprintf (dump_file, "\n%s cost: %d (%s)\n", speed ? "Hot" : "Cold",
11081 *total, result ? "final" : "partial");
11083 return result;
11086 /* All address computations that can be done are free, but rtx cost returns
11087 the same for practically all of them. So we weight the different types
11088 of address here in the order (most pref first):
11089 PRE/POST_INC/DEC, SHIFT or NON-INT sum, INT sum, REG, MEM or LABEL. */
11090 static inline int
11091 arm_arm_address_cost (rtx x)
11093 enum rtx_code c = GET_CODE (x);
11095 if (c == PRE_INC || c == PRE_DEC || c == POST_INC || c == POST_DEC)
11096 return 0;
11097 if (c == MEM || c == LABEL_REF || c == SYMBOL_REF)
11098 return 10;
11100 if (c == PLUS)
11102 if (CONST_INT_P (XEXP (x, 1)))
11103 return 2;
11105 if (ARITHMETIC_P (XEXP (x, 0)) || ARITHMETIC_P (XEXP (x, 1)))
11106 return 3;
11108 return 4;
11111 return 6;
11114 static inline int
11115 arm_thumb_address_cost (rtx x)
11117 enum rtx_code c = GET_CODE (x);
11119 if (c == REG)
11120 return 1;
11121 if (c == PLUS
11122 && REG_P (XEXP (x, 0))
11123 && CONST_INT_P (XEXP (x, 1)))
11124 return 1;
11126 return 2;
11129 static int
11130 arm_address_cost (rtx x, machine_mode mode ATTRIBUTE_UNUSED,
11131 addr_space_t as ATTRIBUTE_UNUSED, bool speed ATTRIBUTE_UNUSED)
11133 return TARGET_32BIT ? arm_arm_address_cost (x) : arm_thumb_address_cost (x);
11136 /* Adjust cost hook for XScale. */
11137 static bool
11138 xscale_sched_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep,
11139 int * cost)
11141 /* Some true dependencies can have a higher cost depending
11142 on precisely how certain input operands are used. */
11143 if (dep_type == 0
11144 && recog_memoized (insn) >= 0
11145 && recog_memoized (dep) >= 0)
11147 int shift_opnum = get_attr_shift (insn);
11148 enum attr_type attr_type = get_attr_type (dep);
11150 /* If nonzero, SHIFT_OPNUM contains the operand number of a shifted
11151 operand for INSN. If we have a shifted input operand and the
11152 instruction we depend on is another ALU instruction, then we may
11153 have to account for an additional stall. */
11154 if (shift_opnum != 0
11155 && (attr_type == TYPE_ALU_SHIFT_IMM
11156 || attr_type == TYPE_ALUS_SHIFT_IMM
11157 || attr_type == TYPE_LOGIC_SHIFT_IMM
11158 || attr_type == TYPE_LOGICS_SHIFT_IMM
11159 || attr_type == TYPE_ALU_SHIFT_REG
11160 || attr_type == TYPE_ALUS_SHIFT_REG
11161 || attr_type == TYPE_LOGIC_SHIFT_REG
11162 || attr_type == TYPE_LOGICS_SHIFT_REG
11163 || attr_type == TYPE_MOV_SHIFT
11164 || attr_type == TYPE_MVN_SHIFT
11165 || attr_type == TYPE_MOV_SHIFT_REG
11166 || attr_type == TYPE_MVN_SHIFT_REG))
11168 rtx shifted_operand;
11169 int opno;
11171 /* Get the shifted operand. */
11172 extract_insn (insn);
11173 shifted_operand = recog_data.operand[shift_opnum];
11175 /* Iterate over all the operands in DEP. If we write an operand
11176 that overlaps with SHIFTED_OPERAND, then we have increase the
11177 cost of this dependency. */
11178 extract_insn (dep);
11179 preprocess_constraints (dep);
11180 for (opno = 0; opno < recog_data.n_operands; opno++)
11182 /* We can ignore strict inputs. */
11183 if (recog_data.operand_type[opno] == OP_IN)
11184 continue;
11186 if (reg_overlap_mentioned_p (recog_data.operand[opno],
11187 shifted_operand))
11189 *cost = 2;
11190 return false;
11195 return true;
11198 /* Adjust cost hook for Cortex A9. */
11199 static bool
11200 cortex_a9_sched_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep,
11201 int * cost)
11203 switch (dep_type)
11205 case REG_DEP_ANTI:
11206 *cost = 0;
11207 return false;
11209 case REG_DEP_TRUE:
11210 case REG_DEP_OUTPUT:
11211 if (recog_memoized (insn) >= 0
11212 && recog_memoized (dep) >= 0)
11214 if (GET_CODE (PATTERN (insn)) == SET)
11216 if (GET_MODE_CLASS
11217 (GET_MODE (SET_DEST (PATTERN (insn)))) == MODE_FLOAT
11218 || GET_MODE_CLASS
11219 (GET_MODE (SET_SRC (PATTERN (insn)))) == MODE_FLOAT)
11221 enum attr_type attr_type_insn = get_attr_type (insn);
11222 enum attr_type attr_type_dep = get_attr_type (dep);
11224 /* By default all dependencies of the form
11225 s0 = s0 <op> s1
11226 s0 = s0 <op> s2
11227 have an extra latency of 1 cycle because
11228 of the input and output dependency in this
11229 case. However this gets modeled as an true
11230 dependency and hence all these checks. */
11231 if (REG_P (SET_DEST (PATTERN (insn)))
11232 && reg_set_p (SET_DEST (PATTERN (insn)), dep))
11234 /* FMACS is a special case where the dependent
11235 instruction can be issued 3 cycles before
11236 the normal latency in case of an output
11237 dependency. */
11238 if ((attr_type_insn == TYPE_FMACS
11239 || attr_type_insn == TYPE_FMACD)
11240 && (attr_type_dep == TYPE_FMACS
11241 || attr_type_dep == TYPE_FMACD))
11243 if (dep_type == REG_DEP_OUTPUT)
11244 *cost = insn_default_latency (dep) - 3;
11245 else
11246 *cost = insn_default_latency (dep);
11247 return false;
11249 else
11251 if (dep_type == REG_DEP_OUTPUT)
11252 *cost = insn_default_latency (dep) + 1;
11253 else
11254 *cost = insn_default_latency (dep);
11256 return false;
11261 break;
11263 default:
11264 gcc_unreachable ();
11267 return true;
11270 /* Adjust cost hook for FA726TE. */
11271 static bool
11272 fa726te_sched_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep,
11273 int * cost)
11275 /* For FA726TE, true dependency on CPSR (i.e. set cond followed by predicated)
11276 have penalty of 3. */
11277 if (dep_type == REG_DEP_TRUE
11278 && recog_memoized (insn) >= 0
11279 && recog_memoized (dep) >= 0
11280 && get_attr_conds (dep) == CONDS_SET)
11282 /* Use of carry (e.g. 64-bit arithmetic) in ALU: 3-cycle latency. */
11283 if (get_attr_conds (insn) == CONDS_USE
11284 && get_attr_type (insn) != TYPE_BRANCH)
11286 *cost = 3;
11287 return false;
11290 if (GET_CODE (PATTERN (insn)) == COND_EXEC
11291 || get_attr_conds (insn) == CONDS_USE)
11293 *cost = 0;
11294 return false;
11298 return true;
11301 /* Implement TARGET_REGISTER_MOVE_COST.
11303 Moves between VFP_REGS and GENERAL_REGS are a single insn, but
11304 it is typically more expensive than a single memory access. We set
11305 the cost to less than two memory accesses so that floating
11306 point to integer conversion does not go through memory. */
11309 arm_register_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
11310 reg_class_t from, reg_class_t to)
11312 if (TARGET_32BIT)
11314 if ((IS_VFP_CLASS (from) && !IS_VFP_CLASS (to))
11315 || (!IS_VFP_CLASS (from) && IS_VFP_CLASS (to)))
11316 return 15;
11317 else if ((from == IWMMXT_REGS && to != IWMMXT_REGS)
11318 || (from != IWMMXT_REGS && to == IWMMXT_REGS))
11319 return 4;
11320 else if (from == IWMMXT_GR_REGS || to == IWMMXT_GR_REGS)
11321 return 20;
11322 else
11323 return 2;
11325 else
11327 if (from == HI_REGS || to == HI_REGS)
11328 return 4;
11329 else
11330 return 2;
11334 /* Implement TARGET_MEMORY_MOVE_COST. */
11337 arm_memory_move_cost (machine_mode mode, reg_class_t rclass,
11338 bool in ATTRIBUTE_UNUSED)
11340 if (TARGET_32BIT)
11341 return 10;
11342 else
11344 if (GET_MODE_SIZE (mode) < 4)
11345 return 8;
11346 else
11347 return ((2 * GET_MODE_SIZE (mode)) * (rclass == LO_REGS ? 1 : 2));
11351 /* Vectorizer cost model implementation. */
11353 /* Implement targetm.vectorize.builtin_vectorization_cost. */
11354 static int
11355 arm_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
11356 tree vectype,
11357 int misalign ATTRIBUTE_UNUSED)
11359 unsigned elements;
11361 switch (type_of_cost)
11363 case scalar_stmt:
11364 return current_tune->vec_costs->scalar_stmt_cost;
11366 case scalar_load:
11367 return current_tune->vec_costs->scalar_load_cost;
11369 case scalar_store:
11370 return current_tune->vec_costs->scalar_store_cost;
11372 case vector_stmt:
11373 return current_tune->vec_costs->vec_stmt_cost;
11375 case vector_load:
11376 return current_tune->vec_costs->vec_align_load_cost;
11378 case vector_store:
11379 return current_tune->vec_costs->vec_store_cost;
11381 case vec_to_scalar:
11382 return current_tune->vec_costs->vec_to_scalar_cost;
11384 case scalar_to_vec:
11385 return current_tune->vec_costs->scalar_to_vec_cost;
11387 case unaligned_load:
11388 case vector_gather_load:
11389 return current_tune->vec_costs->vec_unalign_load_cost;
11391 case unaligned_store:
11392 case vector_scatter_store:
11393 return current_tune->vec_costs->vec_unalign_store_cost;
11395 case cond_branch_taken:
11396 return current_tune->vec_costs->cond_taken_branch_cost;
11398 case cond_branch_not_taken:
11399 return current_tune->vec_costs->cond_not_taken_branch_cost;
11401 case vec_perm:
11402 case vec_promote_demote:
11403 return current_tune->vec_costs->vec_stmt_cost;
11405 case vec_construct:
11406 elements = TYPE_VECTOR_SUBPARTS (vectype);
11407 return elements / 2 + 1;
11409 default:
11410 gcc_unreachable ();
11414 /* Implement targetm.vectorize.add_stmt_cost. */
11416 static unsigned
11417 arm_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
11418 struct _stmt_vec_info *stmt_info, int misalign,
11419 enum vect_cost_model_location where)
11421 unsigned *cost = (unsigned *) data;
11422 unsigned retval = 0;
11424 if (flag_vect_cost_model)
11426 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
11427 int stmt_cost = arm_builtin_vectorization_cost (kind, vectype, misalign);
11429 /* Statements in an inner loop relative to the loop being
11430 vectorized are weighted more heavily. The value here is
11431 arbitrary and could potentially be improved with analysis. */
11432 if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
11433 count *= 50; /* FIXME. */
11435 retval = (unsigned) (count * stmt_cost);
11436 cost[where] += retval;
11439 return retval;
11442 /* Return true if and only if this insn can dual-issue only as older. */
11443 static bool
11444 cortexa7_older_only (rtx_insn *insn)
11446 if (recog_memoized (insn) < 0)
11447 return false;
11449 switch (get_attr_type (insn))
11451 case TYPE_ALU_DSP_REG:
11452 case TYPE_ALU_SREG:
11453 case TYPE_ALUS_SREG:
11454 case TYPE_LOGIC_REG:
11455 case TYPE_LOGICS_REG:
11456 case TYPE_ADC_REG:
11457 case TYPE_ADCS_REG:
11458 case TYPE_ADR:
11459 case TYPE_BFM:
11460 case TYPE_REV:
11461 case TYPE_MVN_REG:
11462 case TYPE_SHIFT_IMM:
11463 case TYPE_SHIFT_REG:
11464 case TYPE_LOAD_BYTE:
11465 case TYPE_LOAD_4:
11466 case TYPE_STORE_4:
11467 case TYPE_FFARITHS:
11468 case TYPE_FADDS:
11469 case TYPE_FFARITHD:
11470 case TYPE_FADDD:
11471 case TYPE_FMOV:
11472 case TYPE_F_CVT:
11473 case TYPE_FCMPS:
11474 case TYPE_FCMPD:
11475 case TYPE_FCONSTS:
11476 case TYPE_FCONSTD:
11477 case TYPE_FMULS:
11478 case TYPE_FMACS:
11479 case TYPE_FMULD:
11480 case TYPE_FMACD:
11481 case TYPE_FDIVS:
11482 case TYPE_FDIVD:
11483 case TYPE_F_MRC:
11484 case TYPE_F_MRRC:
11485 case TYPE_F_FLAG:
11486 case TYPE_F_LOADS:
11487 case TYPE_F_STORES:
11488 return true;
11489 default:
11490 return false;
11494 /* Return true if and only if this insn can dual-issue as younger. */
11495 static bool
11496 cortexa7_younger (FILE *file, int verbose, rtx_insn *insn)
11498 if (recog_memoized (insn) < 0)
11500 if (verbose > 5)
11501 fprintf (file, ";; not cortexa7_younger %d\n", INSN_UID (insn));
11502 return false;
11505 switch (get_attr_type (insn))
11507 case TYPE_ALU_IMM:
11508 case TYPE_ALUS_IMM:
11509 case TYPE_LOGIC_IMM:
11510 case TYPE_LOGICS_IMM:
11511 case TYPE_EXTEND:
11512 case TYPE_MVN_IMM:
11513 case TYPE_MOV_IMM:
11514 case TYPE_MOV_REG:
11515 case TYPE_MOV_SHIFT:
11516 case TYPE_MOV_SHIFT_REG:
11517 case TYPE_BRANCH:
11518 case TYPE_CALL:
11519 return true;
11520 default:
11521 return false;
11526 /* Look for an instruction that can dual issue only as an older
11527 instruction, and move it in front of any instructions that can
11528 dual-issue as younger, while preserving the relative order of all
11529 other instructions in the ready list. This is a hueuristic to help
11530 dual-issue in later cycles, by postponing issue of more flexible
11531 instructions. This heuristic may affect dual issue opportunities
11532 in the current cycle. */
11533 static void
11534 cortexa7_sched_reorder (FILE *file, int verbose, rtx_insn **ready,
11535 int *n_readyp, int clock)
11537 int i;
11538 int first_older_only = -1, first_younger = -1;
11540 if (verbose > 5)
11541 fprintf (file,
11542 ";; sched_reorder for cycle %d with %d insns in ready list\n",
11543 clock,
11544 *n_readyp);
11546 /* Traverse the ready list from the head (the instruction to issue
11547 first), and looking for the first instruction that can issue as
11548 younger and the first instruction that can dual-issue only as
11549 older. */
11550 for (i = *n_readyp - 1; i >= 0; i--)
11552 rtx_insn *insn = ready[i];
11553 if (cortexa7_older_only (insn))
11555 first_older_only = i;
11556 if (verbose > 5)
11557 fprintf (file, ";; reorder older found %d\n", INSN_UID (insn));
11558 break;
11560 else if (cortexa7_younger (file, verbose, insn) && first_younger == -1)
11561 first_younger = i;
11564 /* Nothing to reorder because either no younger insn found or insn
11565 that can dual-issue only as older appears before any insn that
11566 can dual-issue as younger. */
11567 if (first_younger == -1)
11569 if (verbose > 5)
11570 fprintf (file, ";; sched_reorder nothing to reorder as no younger\n");
11571 return;
11574 /* Nothing to reorder because no older-only insn in the ready list. */
11575 if (first_older_only == -1)
11577 if (verbose > 5)
11578 fprintf (file, ";; sched_reorder nothing to reorder as no older_only\n");
11579 return;
11582 /* Move first_older_only insn before first_younger. */
11583 if (verbose > 5)
11584 fprintf (file, ";; cortexa7_sched_reorder insn %d before %d\n",
11585 INSN_UID(ready [first_older_only]),
11586 INSN_UID(ready [first_younger]));
11587 rtx_insn *first_older_only_insn = ready [first_older_only];
11588 for (i = first_older_only; i < first_younger; i++)
11590 ready[i] = ready[i+1];
11593 ready[i] = first_older_only_insn;
11594 return;
11597 /* Implement TARGET_SCHED_REORDER. */
11598 static int
11599 arm_sched_reorder (FILE *file, int verbose, rtx_insn **ready, int *n_readyp,
11600 int clock)
11602 switch (arm_tune)
11604 case TARGET_CPU_cortexa7:
11605 cortexa7_sched_reorder (file, verbose, ready, n_readyp, clock);
11606 break;
11607 default:
11608 /* Do nothing for other cores. */
11609 break;
11612 return arm_issue_rate ();
11615 /* This function implements the target macro TARGET_SCHED_ADJUST_COST.
11616 It corrects the value of COST based on the relationship between
11617 INSN and DEP through the dependence LINK. It returns the new
11618 value. There is a per-core adjust_cost hook to adjust scheduler costs
11619 and the per-core hook can choose to completely override the generic
11620 adjust_cost function. Only put bits of code into arm_adjust_cost that
11621 are common across all cores. */
11622 static int
11623 arm_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep, int cost,
11624 unsigned int)
11626 rtx i_pat, d_pat;
11628 /* When generating Thumb-1 code, we want to place flag-setting operations
11629 close to a conditional branch which depends on them, so that we can
11630 omit the comparison. */
11631 if (TARGET_THUMB1
11632 && dep_type == 0
11633 && recog_memoized (insn) == CODE_FOR_cbranchsi4_insn
11634 && recog_memoized (dep) >= 0
11635 && get_attr_conds (dep) == CONDS_SET)
11636 return 0;
11638 if (current_tune->sched_adjust_cost != NULL)
11640 if (!current_tune->sched_adjust_cost (insn, dep_type, dep, &cost))
11641 return cost;
11644 /* XXX Is this strictly true? */
11645 if (dep_type == REG_DEP_ANTI
11646 || dep_type == REG_DEP_OUTPUT)
11647 return 0;
11649 /* Call insns don't incur a stall, even if they follow a load. */
11650 if (dep_type == 0
11651 && CALL_P (insn))
11652 return 1;
11654 if ((i_pat = single_set (insn)) != NULL
11655 && MEM_P (SET_SRC (i_pat))
11656 && (d_pat = single_set (dep)) != NULL
11657 && MEM_P (SET_DEST (d_pat)))
11659 rtx src_mem = XEXP (SET_SRC (i_pat), 0);
11660 /* This is a load after a store, there is no conflict if the load reads
11661 from a cached area. Assume that loads from the stack, and from the
11662 constant pool are cached, and that others will miss. This is a
11663 hack. */
11665 if ((GET_CODE (src_mem) == SYMBOL_REF
11666 && CONSTANT_POOL_ADDRESS_P (src_mem))
11667 || reg_mentioned_p (stack_pointer_rtx, src_mem)
11668 || reg_mentioned_p (frame_pointer_rtx, src_mem)
11669 || reg_mentioned_p (hard_frame_pointer_rtx, src_mem))
11670 return 1;
11673 return cost;
11677 arm_max_conditional_execute (void)
11679 return max_insns_skipped;
11682 static int
11683 arm_default_branch_cost (bool speed_p, bool predictable_p ATTRIBUTE_UNUSED)
11685 if (TARGET_32BIT)
11686 return (TARGET_THUMB2 && !speed_p) ? 1 : 4;
11687 else
11688 return (optimize > 0) ? 2 : 0;
11691 static int
11692 arm_cortex_a5_branch_cost (bool speed_p, bool predictable_p)
11694 return speed_p ? 0 : arm_default_branch_cost (speed_p, predictable_p);
11697 /* Thumb-2 branches are relatively cheap on Cortex-M processors ("1 + P cycles"
11698 on Cortex-M4, where P varies from 1 to 3 according to some criteria), since
11699 sequences of non-executed instructions in IT blocks probably take the same
11700 amount of time as executed instructions (and the IT instruction itself takes
11701 space in icache). This function was experimentally determined to give good
11702 results on a popular embedded benchmark. */
11704 static int
11705 arm_cortex_m_branch_cost (bool speed_p, bool predictable_p)
11707 return (TARGET_32BIT && speed_p) ? 1
11708 : arm_default_branch_cost (speed_p, predictable_p);
11711 static int
11712 arm_cortex_m7_branch_cost (bool speed_p, bool predictable_p)
11714 return speed_p ? 0 : arm_default_branch_cost (speed_p, predictable_p);
11717 static bool fp_consts_inited = false;
11719 static REAL_VALUE_TYPE value_fp0;
11721 static void
11722 init_fp_table (void)
11724 REAL_VALUE_TYPE r;
11726 r = REAL_VALUE_ATOF ("0", DFmode);
11727 value_fp0 = r;
11728 fp_consts_inited = true;
11731 /* Return TRUE if rtx X is a valid immediate FP constant. */
11733 arm_const_double_rtx (rtx x)
11735 const REAL_VALUE_TYPE *r;
11737 if (!fp_consts_inited)
11738 init_fp_table ();
11740 r = CONST_DOUBLE_REAL_VALUE (x);
11741 if (REAL_VALUE_MINUS_ZERO (*r))
11742 return 0;
11744 if (real_equal (r, &value_fp0))
11745 return 1;
11747 return 0;
11750 /* VFPv3 has a fairly wide range of representable immediates, formed from
11751 "quarter-precision" floating-point values. These can be evaluated using this
11752 formula (with ^ for exponentiation):
11754 -1^s * n * 2^-r
11756 Where 's' is a sign bit (0/1), 'n' and 'r' are integers such that
11757 16 <= n <= 31 and 0 <= r <= 7.
11759 These values are mapped onto an 8-bit integer ABCDEFGH s.t.
11761 - A (most-significant) is the sign bit.
11762 - BCD are the exponent (encoded as r XOR 3).
11763 - EFGH are the mantissa (encoded as n - 16).
11766 /* Return an integer index for a VFPv3 immediate operand X suitable for the
11767 fconst[sd] instruction, or -1 if X isn't suitable. */
11768 static int
11769 vfp3_const_double_index (rtx x)
11771 REAL_VALUE_TYPE r, m;
11772 int sign, exponent;
11773 unsigned HOST_WIDE_INT mantissa, mant_hi;
11774 unsigned HOST_WIDE_INT mask;
11775 int point_pos = 2 * HOST_BITS_PER_WIDE_INT - 1;
11776 bool fail;
11778 if (!TARGET_VFP3 || !CONST_DOUBLE_P (x))
11779 return -1;
11781 r = *CONST_DOUBLE_REAL_VALUE (x);
11783 /* We can't represent these things, so detect them first. */
11784 if (REAL_VALUE_ISINF (r) || REAL_VALUE_ISNAN (r) || REAL_VALUE_MINUS_ZERO (r))
11785 return -1;
11787 /* Extract sign, exponent and mantissa. */
11788 sign = REAL_VALUE_NEGATIVE (r) ? 1 : 0;
11789 r = real_value_abs (&r);
11790 exponent = REAL_EXP (&r);
11791 /* For the mantissa, we expand into two HOST_WIDE_INTS, apart from the
11792 highest (sign) bit, with a fixed binary point at bit point_pos.
11793 WARNING: If there's ever a VFP version which uses more than 2 * H_W_I - 1
11794 bits for the mantissa, this may fail (low bits would be lost). */
11795 real_ldexp (&m, &r, point_pos - exponent);
11796 wide_int w = real_to_integer (&m, &fail, HOST_BITS_PER_WIDE_INT * 2);
11797 mantissa = w.elt (0);
11798 mant_hi = w.elt (1);
11800 /* If there are bits set in the low part of the mantissa, we can't
11801 represent this value. */
11802 if (mantissa != 0)
11803 return -1;
11805 /* Now make it so that mantissa contains the most-significant bits, and move
11806 the point_pos to indicate that the least-significant bits have been
11807 discarded. */
11808 point_pos -= HOST_BITS_PER_WIDE_INT;
11809 mantissa = mant_hi;
11811 /* We can permit four significant bits of mantissa only, plus a high bit
11812 which is always 1. */
11813 mask = (HOST_WIDE_INT_1U << (point_pos - 5)) - 1;
11814 if ((mantissa & mask) != 0)
11815 return -1;
11817 /* Now we know the mantissa is in range, chop off the unneeded bits. */
11818 mantissa >>= point_pos - 5;
11820 /* The mantissa may be zero. Disallow that case. (It's possible to load the
11821 floating-point immediate zero with Neon using an integer-zero load, but
11822 that case is handled elsewhere.) */
11823 if (mantissa == 0)
11824 return -1;
11826 gcc_assert (mantissa >= 16 && mantissa <= 31);
11828 /* The value of 5 here would be 4 if GCC used IEEE754-like encoding (where
11829 normalized significands are in the range [1, 2). (Our mantissa is shifted
11830 left 4 places at this point relative to normalized IEEE754 values). GCC
11831 internally uses [0.5, 1) (see real.c), so the exponent returned from
11832 REAL_EXP must be altered. */
11833 exponent = 5 - exponent;
11835 if (exponent < 0 || exponent > 7)
11836 return -1;
11838 /* Sign, mantissa and exponent are now in the correct form to plug into the
11839 formula described in the comment above. */
11840 return (sign << 7) | ((exponent ^ 3) << 4) | (mantissa - 16);
11843 /* Return TRUE if rtx X is a valid immediate VFPv3 constant. */
11845 vfp3_const_double_rtx (rtx x)
11847 if (!TARGET_VFP3)
11848 return 0;
11850 return vfp3_const_double_index (x) != -1;
11853 /* Recognize immediates which can be used in various Neon instructions. Legal
11854 immediates are described by the following table (for VMVN variants, the
11855 bitwise inverse of the constant shown is recognized. In either case, VMOV
11856 is output and the correct instruction to use for a given constant is chosen
11857 by the assembler). The constant shown is replicated across all elements of
11858 the destination vector.
11860 insn elems variant constant (binary)
11861 ---- ----- ------- -----------------
11862 vmov i32 0 00000000 00000000 00000000 abcdefgh
11863 vmov i32 1 00000000 00000000 abcdefgh 00000000
11864 vmov i32 2 00000000 abcdefgh 00000000 00000000
11865 vmov i32 3 abcdefgh 00000000 00000000 00000000
11866 vmov i16 4 00000000 abcdefgh
11867 vmov i16 5 abcdefgh 00000000
11868 vmvn i32 6 00000000 00000000 00000000 abcdefgh
11869 vmvn i32 7 00000000 00000000 abcdefgh 00000000
11870 vmvn i32 8 00000000 abcdefgh 00000000 00000000
11871 vmvn i32 9 abcdefgh 00000000 00000000 00000000
11872 vmvn i16 10 00000000 abcdefgh
11873 vmvn i16 11 abcdefgh 00000000
11874 vmov i32 12 00000000 00000000 abcdefgh 11111111
11875 vmvn i32 13 00000000 00000000 abcdefgh 11111111
11876 vmov i32 14 00000000 abcdefgh 11111111 11111111
11877 vmvn i32 15 00000000 abcdefgh 11111111 11111111
11878 vmov i8 16 abcdefgh
11879 vmov i64 17 aaaaaaaa bbbbbbbb cccccccc dddddddd
11880 eeeeeeee ffffffff gggggggg hhhhhhhh
11881 vmov f32 18 aBbbbbbc defgh000 00000000 00000000
11882 vmov f32 19 00000000 00000000 00000000 00000000
11884 For case 18, B = !b. Representable values are exactly those accepted by
11885 vfp3_const_double_index, but are output as floating-point numbers rather
11886 than indices.
11888 For case 19, we will change it to vmov.i32 when assembling.
11890 Variants 0-5 (inclusive) may also be used as immediates for the second
11891 operand of VORR/VBIC instructions.
11893 The INVERSE argument causes the bitwise inverse of the given operand to be
11894 recognized instead (used for recognizing legal immediates for the VAND/VORN
11895 pseudo-instructions). If INVERSE is true, the value placed in *MODCONST is
11896 *not* inverted (i.e. the pseudo-instruction forms vand/vorn should still be
11897 output, rather than the real insns vbic/vorr).
11899 INVERSE makes no difference to the recognition of float vectors.
11901 The return value is the variant of immediate as shown in the above table, or
11902 -1 if the given value doesn't match any of the listed patterns.
11904 static int
11905 neon_valid_immediate (rtx op, machine_mode mode, int inverse,
11906 rtx *modconst, int *elementwidth)
11908 #define CHECK(STRIDE, ELSIZE, CLASS, TEST) \
11909 matches = 1; \
11910 for (i = 0; i < idx; i += (STRIDE)) \
11911 if (!(TEST)) \
11912 matches = 0; \
11913 if (matches) \
11915 immtype = (CLASS); \
11916 elsize = (ELSIZE); \
11917 break; \
11920 unsigned int i, elsize = 0, idx = 0, n_elts;
11921 unsigned int innersize;
11922 unsigned char bytes[16];
11923 int immtype = -1, matches;
11924 unsigned int invmask = inverse ? 0xff : 0;
11925 bool vector = GET_CODE (op) == CONST_VECTOR;
11927 if (vector)
11928 n_elts = CONST_VECTOR_NUNITS (op);
11929 else
11931 n_elts = 1;
11932 if (mode == VOIDmode)
11933 mode = DImode;
11936 innersize = GET_MODE_UNIT_SIZE (mode);
11938 /* Vectors of float constants. */
11939 if (GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
11941 rtx el0 = CONST_VECTOR_ELT (op, 0);
11943 if (!vfp3_const_double_rtx (el0) && el0 != CONST0_RTX (GET_MODE (el0)))
11944 return -1;
11946 /* FP16 vectors cannot be represented. */
11947 if (GET_MODE_INNER (mode) == HFmode)
11948 return -1;
11950 /* All elements in the vector must be the same. Note that 0.0 and -0.0
11951 are distinct in this context. */
11952 if (!const_vec_duplicate_p (op))
11953 return -1;
11955 if (modconst)
11956 *modconst = CONST_VECTOR_ELT (op, 0);
11958 if (elementwidth)
11959 *elementwidth = 0;
11961 if (el0 == CONST0_RTX (GET_MODE (el0)))
11962 return 19;
11963 else
11964 return 18;
11967 /* The tricks done in the code below apply for little-endian vector layout.
11968 For big-endian vectors only allow vectors of the form { a, a, a..., a }.
11969 FIXME: Implement logic for big-endian vectors. */
11970 if (BYTES_BIG_ENDIAN && vector && !const_vec_duplicate_p (op))
11971 return -1;
11973 /* Splat vector constant out into a byte vector. */
11974 for (i = 0; i < n_elts; i++)
11976 rtx el = vector ? CONST_VECTOR_ELT (op, i) : op;
11977 unsigned HOST_WIDE_INT elpart;
11979 gcc_assert (CONST_INT_P (el));
11980 elpart = INTVAL (el);
11982 for (unsigned int byte = 0; byte < innersize; byte++)
11984 bytes[idx++] = (elpart & 0xff) ^ invmask;
11985 elpart >>= BITS_PER_UNIT;
11989 /* Sanity check. */
11990 gcc_assert (idx == GET_MODE_SIZE (mode));
11994 CHECK (4, 32, 0, bytes[i] == bytes[0] && bytes[i + 1] == 0
11995 && bytes[i + 2] == 0 && bytes[i + 3] == 0);
11997 CHECK (4, 32, 1, bytes[i] == 0 && bytes[i + 1] == bytes[1]
11998 && bytes[i + 2] == 0 && bytes[i + 3] == 0);
12000 CHECK (4, 32, 2, bytes[i] == 0 && bytes[i + 1] == 0
12001 && bytes[i + 2] == bytes[2] && bytes[i + 3] == 0);
12003 CHECK (4, 32, 3, bytes[i] == 0 && bytes[i + 1] == 0
12004 && bytes[i + 2] == 0 && bytes[i + 3] == bytes[3]);
12006 CHECK (2, 16, 4, bytes[i] == bytes[0] && bytes[i + 1] == 0);
12008 CHECK (2, 16, 5, bytes[i] == 0 && bytes[i + 1] == bytes[1]);
12010 CHECK (4, 32, 6, bytes[i] == bytes[0] && bytes[i + 1] == 0xff
12011 && bytes[i + 2] == 0xff && bytes[i + 3] == 0xff);
12013 CHECK (4, 32, 7, bytes[i] == 0xff && bytes[i + 1] == bytes[1]
12014 && bytes[i + 2] == 0xff && bytes[i + 3] == 0xff);
12016 CHECK (4, 32, 8, bytes[i] == 0xff && bytes[i + 1] == 0xff
12017 && bytes[i + 2] == bytes[2] && bytes[i + 3] == 0xff);
12019 CHECK (4, 32, 9, bytes[i] == 0xff && bytes[i + 1] == 0xff
12020 && bytes[i + 2] == 0xff && bytes[i + 3] == bytes[3]);
12022 CHECK (2, 16, 10, bytes[i] == bytes[0] && bytes[i + 1] == 0xff);
12024 CHECK (2, 16, 11, bytes[i] == 0xff && bytes[i + 1] == bytes[1]);
12026 CHECK (4, 32, 12, bytes[i] == 0xff && bytes[i + 1] == bytes[1]
12027 && bytes[i + 2] == 0 && bytes[i + 3] == 0);
12029 CHECK (4, 32, 13, bytes[i] == 0 && bytes[i + 1] == bytes[1]
12030 && bytes[i + 2] == 0xff && bytes[i + 3] == 0xff);
12032 CHECK (4, 32, 14, bytes[i] == 0xff && bytes[i + 1] == 0xff
12033 && bytes[i + 2] == bytes[2] && bytes[i + 3] == 0);
12035 CHECK (4, 32, 15, bytes[i] == 0 && bytes[i + 1] == 0
12036 && bytes[i + 2] == bytes[2] && bytes[i + 3] == 0xff);
12038 CHECK (1, 8, 16, bytes[i] == bytes[0]);
12040 CHECK (1, 64, 17, (bytes[i] == 0 || bytes[i] == 0xff)
12041 && bytes[i] == bytes[(i + 8) % idx]);
12043 while (0);
12045 if (immtype == -1)
12046 return -1;
12048 if (elementwidth)
12049 *elementwidth = elsize;
12051 if (modconst)
12053 unsigned HOST_WIDE_INT imm = 0;
12055 /* Un-invert bytes of recognized vector, if necessary. */
12056 if (invmask != 0)
12057 for (i = 0; i < idx; i++)
12058 bytes[i] ^= invmask;
12060 if (immtype == 17)
12062 /* FIXME: Broken on 32-bit H_W_I hosts. */
12063 gcc_assert (sizeof (HOST_WIDE_INT) == 8);
12065 for (i = 0; i < 8; i++)
12066 imm |= (unsigned HOST_WIDE_INT) (bytes[i] ? 0xff : 0)
12067 << (i * BITS_PER_UNIT);
12069 *modconst = GEN_INT (imm);
12071 else
12073 unsigned HOST_WIDE_INT imm = 0;
12075 for (i = 0; i < elsize / BITS_PER_UNIT; i++)
12076 imm |= (unsigned HOST_WIDE_INT) bytes[i] << (i * BITS_PER_UNIT);
12078 *modconst = GEN_INT (imm);
12082 return immtype;
12083 #undef CHECK
12086 /* Return TRUE if rtx X is legal for use as either a Neon VMOV (or, implicitly,
12087 VMVN) immediate. Write back width per element to *ELEMENTWIDTH (or zero for
12088 float elements), and a modified constant (whatever should be output for a
12089 VMOV) in *MODCONST. */
12092 neon_immediate_valid_for_move (rtx op, machine_mode mode,
12093 rtx *modconst, int *elementwidth)
12095 rtx tmpconst;
12096 int tmpwidth;
12097 int retval = neon_valid_immediate (op, mode, 0, &tmpconst, &tmpwidth);
12099 if (retval == -1)
12100 return 0;
12102 if (modconst)
12103 *modconst = tmpconst;
12105 if (elementwidth)
12106 *elementwidth = tmpwidth;
12108 return 1;
12111 /* Return TRUE if rtx X is legal for use in a VORR or VBIC instruction. If
12112 the immediate is valid, write a constant suitable for using as an operand
12113 to VORR/VBIC/VAND/VORN to *MODCONST and the corresponding element width to
12114 *ELEMENTWIDTH. See neon_valid_immediate for description of INVERSE. */
12117 neon_immediate_valid_for_logic (rtx op, machine_mode mode, int inverse,
12118 rtx *modconst, int *elementwidth)
12120 rtx tmpconst;
12121 int tmpwidth;
12122 int retval = neon_valid_immediate (op, mode, inverse, &tmpconst, &tmpwidth);
12124 if (retval < 0 || retval > 5)
12125 return 0;
12127 if (modconst)
12128 *modconst = tmpconst;
12130 if (elementwidth)
12131 *elementwidth = tmpwidth;
12133 return 1;
12136 /* Return TRUE if rtx OP is legal for use in a VSHR or VSHL instruction. If
12137 the immediate is valid, write a constant suitable for using as an operand
12138 to VSHR/VSHL to *MODCONST and the corresponding element width to
12139 *ELEMENTWIDTH. ISLEFTSHIFT is for determine left or right shift,
12140 because they have different limitations. */
12143 neon_immediate_valid_for_shift (rtx op, machine_mode mode,
12144 rtx *modconst, int *elementwidth,
12145 bool isleftshift)
12147 unsigned int innersize = GET_MODE_UNIT_SIZE (mode);
12148 unsigned int n_elts = CONST_VECTOR_NUNITS (op), i;
12149 unsigned HOST_WIDE_INT last_elt = 0;
12150 unsigned HOST_WIDE_INT maxshift;
12152 /* Split vector constant out into a byte vector. */
12153 for (i = 0; i < n_elts; i++)
12155 rtx el = CONST_VECTOR_ELT (op, i);
12156 unsigned HOST_WIDE_INT elpart;
12158 if (CONST_INT_P (el))
12159 elpart = INTVAL (el);
12160 else if (CONST_DOUBLE_P (el))
12161 return 0;
12162 else
12163 gcc_unreachable ();
12165 if (i != 0 && elpart != last_elt)
12166 return 0;
12168 last_elt = elpart;
12171 /* Shift less than element size. */
12172 maxshift = innersize * 8;
12174 if (isleftshift)
12176 /* Left shift immediate value can be from 0 to <size>-1. */
12177 if (last_elt >= maxshift)
12178 return 0;
12180 else
12182 /* Right shift immediate value can be from 1 to <size>. */
12183 if (last_elt == 0 || last_elt > maxshift)
12184 return 0;
12187 if (elementwidth)
12188 *elementwidth = innersize * 8;
12190 if (modconst)
12191 *modconst = CONST_VECTOR_ELT (op, 0);
12193 return 1;
12196 /* Return a string suitable for output of Neon immediate logic operation
12197 MNEM. */
12199 char *
12200 neon_output_logic_immediate (const char *mnem, rtx *op2, machine_mode mode,
12201 int inverse, int quad)
12203 int width, is_valid;
12204 static char templ[40];
12206 is_valid = neon_immediate_valid_for_logic (*op2, mode, inverse, op2, &width);
12208 gcc_assert (is_valid != 0);
12210 if (quad)
12211 sprintf (templ, "%s.i%d\t%%q0, %%2", mnem, width);
12212 else
12213 sprintf (templ, "%s.i%d\t%%P0, %%2", mnem, width);
12215 return templ;
12218 /* Return a string suitable for output of Neon immediate shift operation
12219 (VSHR or VSHL) MNEM. */
12221 char *
12222 neon_output_shift_immediate (const char *mnem, char sign, rtx *op2,
12223 machine_mode mode, int quad,
12224 bool isleftshift)
12226 int width, is_valid;
12227 static char templ[40];
12229 is_valid = neon_immediate_valid_for_shift (*op2, mode, op2, &width, isleftshift);
12230 gcc_assert (is_valid != 0);
12232 if (quad)
12233 sprintf (templ, "%s.%c%d\t%%q0, %%q1, %%2", mnem, sign, width);
12234 else
12235 sprintf (templ, "%s.%c%d\t%%P0, %%P1, %%2", mnem, sign, width);
12237 return templ;
12240 /* Output a sequence of pairwise operations to implement a reduction.
12241 NOTE: We do "too much work" here, because pairwise operations work on two
12242 registers-worth of operands in one go. Unfortunately we can't exploit those
12243 extra calculations to do the full operation in fewer steps, I don't think.
12244 Although all vector elements of the result but the first are ignored, we
12245 actually calculate the same result in each of the elements. An alternative
12246 such as initially loading a vector with zero to use as each of the second
12247 operands would use up an additional register and take an extra instruction,
12248 for no particular gain. */
12250 void
12251 neon_pairwise_reduce (rtx op0, rtx op1, machine_mode mode,
12252 rtx (*reduc) (rtx, rtx, rtx))
12254 unsigned int i, parts = GET_MODE_SIZE (mode) / GET_MODE_UNIT_SIZE (mode);
12255 rtx tmpsum = op1;
12257 for (i = parts / 2; i >= 1; i /= 2)
12259 rtx dest = (i == 1) ? op0 : gen_reg_rtx (mode);
12260 emit_insn (reduc (dest, tmpsum, tmpsum));
12261 tmpsum = dest;
12265 /* If VALS is a vector constant that can be loaded into a register
12266 using VDUP, generate instructions to do so and return an RTX to
12267 assign to the register. Otherwise return NULL_RTX. */
12269 static rtx
12270 neon_vdup_constant (rtx vals)
12272 machine_mode mode = GET_MODE (vals);
12273 machine_mode inner_mode = GET_MODE_INNER (mode);
12274 rtx x;
12276 if (GET_CODE (vals) != CONST_VECTOR || GET_MODE_SIZE (inner_mode) > 4)
12277 return NULL_RTX;
12279 if (!const_vec_duplicate_p (vals, &x))
12280 /* The elements are not all the same. We could handle repeating
12281 patterns of a mode larger than INNER_MODE here (e.g. int8x8_t
12282 {0, C, 0, C, 0, C, 0, C} which can be loaded using
12283 vdup.i16). */
12284 return NULL_RTX;
12286 /* We can load this constant by using VDUP and a constant in a
12287 single ARM register. This will be cheaper than a vector
12288 load. */
12290 x = copy_to_mode_reg (inner_mode, x);
12291 return gen_vec_duplicate (mode, x);
12294 /* Generate code to load VALS, which is a PARALLEL containing only
12295 constants (for vec_init) or CONST_VECTOR, efficiently into a
12296 register. Returns an RTX to copy into the register, or NULL_RTX
12297 for a PARALLEL that can not be converted into a CONST_VECTOR. */
12300 neon_make_constant (rtx vals)
12302 machine_mode mode = GET_MODE (vals);
12303 rtx target;
12304 rtx const_vec = NULL_RTX;
12305 int n_elts = GET_MODE_NUNITS (mode);
12306 int n_const = 0;
12307 int i;
12309 if (GET_CODE (vals) == CONST_VECTOR)
12310 const_vec = vals;
12311 else if (GET_CODE (vals) == PARALLEL)
12313 /* A CONST_VECTOR must contain only CONST_INTs and
12314 CONST_DOUBLEs, but CONSTANT_P allows more (e.g. SYMBOL_REF).
12315 Only store valid constants in a CONST_VECTOR. */
12316 for (i = 0; i < n_elts; ++i)
12318 rtx x = XVECEXP (vals, 0, i);
12319 if (CONST_INT_P (x) || CONST_DOUBLE_P (x))
12320 n_const++;
12322 if (n_const == n_elts)
12323 const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
12325 else
12326 gcc_unreachable ();
12328 if (const_vec != NULL
12329 && neon_immediate_valid_for_move (const_vec, mode, NULL, NULL))
12330 /* Load using VMOV. On Cortex-A8 this takes one cycle. */
12331 return const_vec;
12332 else if ((target = neon_vdup_constant (vals)) != NULL_RTX)
12333 /* Loaded using VDUP. On Cortex-A8 the VDUP takes one NEON
12334 pipeline cycle; creating the constant takes one or two ARM
12335 pipeline cycles. */
12336 return target;
12337 else if (const_vec != NULL_RTX)
12338 /* Load from constant pool. On Cortex-A8 this takes two cycles
12339 (for either double or quad vectors). We can not take advantage
12340 of single-cycle VLD1 because we need a PC-relative addressing
12341 mode. */
12342 return const_vec;
12343 else
12344 /* A PARALLEL containing something not valid inside CONST_VECTOR.
12345 We can not construct an initializer. */
12346 return NULL_RTX;
12349 /* Initialize vector TARGET to VALS. */
12351 void
12352 neon_expand_vector_init (rtx target, rtx vals)
12354 machine_mode mode = GET_MODE (target);
12355 machine_mode inner_mode = GET_MODE_INNER (mode);
12356 int n_elts = GET_MODE_NUNITS (mode);
12357 int n_var = 0, one_var = -1;
12358 bool all_same = true;
12359 rtx x, mem;
12360 int i;
12362 for (i = 0; i < n_elts; ++i)
12364 x = XVECEXP (vals, 0, i);
12365 if (!CONSTANT_P (x))
12366 ++n_var, one_var = i;
12368 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
12369 all_same = false;
12372 if (n_var == 0)
12374 rtx constant = neon_make_constant (vals);
12375 if (constant != NULL_RTX)
12377 emit_move_insn (target, constant);
12378 return;
12382 /* Splat a single non-constant element if we can. */
12383 if (all_same && GET_MODE_SIZE (inner_mode) <= 4)
12385 x = copy_to_mode_reg (inner_mode, XVECEXP (vals, 0, 0));
12386 emit_insn (gen_rtx_SET (target, gen_vec_duplicate (mode, x)));
12387 return;
12390 /* One field is non-constant. Load constant then overwrite varying
12391 field. This is more efficient than using the stack. */
12392 if (n_var == 1)
12394 rtx copy = copy_rtx (vals);
12395 rtx index = GEN_INT (one_var);
12397 /* Load constant part of vector, substitute neighboring value for
12398 varying element. */
12399 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
12400 neon_expand_vector_init (target, copy);
12402 /* Insert variable. */
12403 x = copy_to_mode_reg (inner_mode, XVECEXP (vals, 0, one_var));
12404 switch (mode)
12406 case E_V8QImode:
12407 emit_insn (gen_neon_vset_lanev8qi (target, x, target, index));
12408 break;
12409 case E_V16QImode:
12410 emit_insn (gen_neon_vset_lanev16qi (target, x, target, index));
12411 break;
12412 case E_V4HImode:
12413 emit_insn (gen_neon_vset_lanev4hi (target, x, target, index));
12414 break;
12415 case E_V8HImode:
12416 emit_insn (gen_neon_vset_lanev8hi (target, x, target, index));
12417 break;
12418 case E_V2SImode:
12419 emit_insn (gen_neon_vset_lanev2si (target, x, target, index));
12420 break;
12421 case E_V4SImode:
12422 emit_insn (gen_neon_vset_lanev4si (target, x, target, index));
12423 break;
12424 case E_V2SFmode:
12425 emit_insn (gen_neon_vset_lanev2sf (target, x, target, index));
12426 break;
12427 case E_V4SFmode:
12428 emit_insn (gen_neon_vset_lanev4sf (target, x, target, index));
12429 break;
12430 case E_V2DImode:
12431 emit_insn (gen_neon_vset_lanev2di (target, x, target, index));
12432 break;
12433 default:
12434 gcc_unreachable ();
12436 return;
12439 /* Construct the vector in memory one field at a time
12440 and load the whole vector. */
12441 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
12442 for (i = 0; i < n_elts; i++)
12443 emit_move_insn (adjust_address_nv (mem, inner_mode,
12444 i * GET_MODE_SIZE (inner_mode)),
12445 XVECEXP (vals, 0, i));
12446 emit_move_insn (target, mem);
12449 /* Ensure OPERAND lies between LOW (inclusive) and HIGH (exclusive). Raise
12450 ERR if it doesn't. EXP indicates the source location, which includes the
12451 inlining history for intrinsics. */
12453 static void
12454 bounds_check (rtx operand, HOST_WIDE_INT low, HOST_WIDE_INT high,
12455 const_tree exp, const char *desc)
12457 HOST_WIDE_INT lane;
12459 gcc_assert (CONST_INT_P (operand));
12461 lane = INTVAL (operand);
12463 if (lane < low || lane >= high)
12465 if (exp)
12466 error ("%K%s %wd out of range %wd - %wd",
12467 exp, desc, lane, low, high - 1);
12468 else
12469 error ("%s %wd out of range %wd - %wd", desc, lane, low, high - 1);
12473 /* Bounds-check lanes. */
12475 void
12476 neon_lane_bounds (rtx operand, HOST_WIDE_INT low, HOST_WIDE_INT high,
12477 const_tree exp)
12479 bounds_check (operand, low, high, exp, "lane");
12482 /* Bounds-check constants. */
12484 void
12485 arm_const_bounds (rtx operand, HOST_WIDE_INT low, HOST_WIDE_INT high)
12487 bounds_check (operand, low, high, NULL_TREE, "constant");
12490 HOST_WIDE_INT
12491 neon_element_bits (machine_mode mode)
12493 return GET_MODE_UNIT_BITSIZE (mode);
12497 /* Predicates for `match_operand' and `match_operator'. */
12499 /* Return TRUE if OP is a valid coprocessor memory address pattern.
12500 WB is true if full writeback address modes are allowed and is false
12501 if limited writeback address modes (POST_INC and PRE_DEC) are
12502 allowed. */
12505 arm_coproc_mem_operand (rtx op, bool wb)
12507 rtx ind;
12509 /* Reject eliminable registers. */
12510 if (! (reload_in_progress || reload_completed || lra_in_progress)
12511 && ( reg_mentioned_p (frame_pointer_rtx, op)
12512 || reg_mentioned_p (arg_pointer_rtx, op)
12513 || reg_mentioned_p (virtual_incoming_args_rtx, op)
12514 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
12515 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
12516 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
12517 return FALSE;
12519 /* Constants are converted into offsets from labels. */
12520 if (!MEM_P (op))
12521 return FALSE;
12523 ind = XEXP (op, 0);
12525 if (reload_completed
12526 && (GET_CODE (ind) == LABEL_REF
12527 || (GET_CODE (ind) == CONST
12528 && GET_CODE (XEXP (ind, 0)) == PLUS
12529 && GET_CODE (XEXP (XEXP (ind, 0), 0)) == LABEL_REF
12530 && CONST_INT_P (XEXP (XEXP (ind, 0), 1)))))
12531 return TRUE;
12533 /* Match: (mem (reg)). */
12534 if (REG_P (ind))
12535 return arm_address_register_rtx_p (ind, 0);
12537 /* Autoincremment addressing modes. POST_INC and PRE_DEC are
12538 acceptable in any case (subject to verification by
12539 arm_address_register_rtx_p). We need WB to be true to accept
12540 PRE_INC and POST_DEC. */
12541 if (GET_CODE (ind) == POST_INC
12542 || GET_CODE (ind) == PRE_DEC
12543 || (wb
12544 && (GET_CODE (ind) == PRE_INC
12545 || GET_CODE (ind) == POST_DEC)))
12546 return arm_address_register_rtx_p (XEXP (ind, 0), 0);
12548 if (wb
12549 && (GET_CODE (ind) == POST_MODIFY || GET_CODE (ind) == PRE_MODIFY)
12550 && arm_address_register_rtx_p (XEXP (ind, 0), 0)
12551 && GET_CODE (XEXP (ind, 1)) == PLUS
12552 && rtx_equal_p (XEXP (XEXP (ind, 1), 0), XEXP (ind, 0)))
12553 ind = XEXP (ind, 1);
12555 /* Match:
12556 (plus (reg)
12557 (const)). */
12558 if (GET_CODE (ind) == PLUS
12559 && REG_P (XEXP (ind, 0))
12560 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
12561 && CONST_INT_P (XEXP (ind, 1))
12562 && INTVAL (XEXP (ind, 1)) > -1024
12563 && INTVAL (XEXP (ind, 1)) < 1024
12564 && (INTVAL (XEXP (ind, 1)) & 3) == 0)
12565 return TRUE;
12567 return FALSE;
12570 /* Return TRUE if OP is a memory operand which we can load or store a vector
12571 to/from. TYPE is one of the following values:
12572 0 - Vector load/stor (vldr)
12573 1 - Core registers (ldm)
12574 2 - Element/structure loads (vld1)
12577 neon_vector_mem_operand (rtx op, int type, bool strict)
12579 rtx ind;
12581 /* Reject eliminable registers. */
12582 if (strict && ! (reload_in_progress || reload_completed)
12583 && (reg_mentioned_p (frame_pointer_rtx, op)
12584 || reg_mentioned_p (arg_pointer_rtx, op)
12585 || reg_mentioned_p (virtual_incoming_args_rtx, op)
12586 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
12587 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
12588 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
12589 return FALSE;
12591 /* Constants are converted into offsets from labels. */
12592 if (!MEM_P (op))
12593 return FALSE;
12595 ind = XEXP (op, 0);
12597 if (reload_completed
12598 && (GET_CODE (ind) == LABEL_REF
12599 || (GET_CODE (ind) == CONST
12600 && GET_CODE (XEXP (ind, 0)) == PLUS
12601 && GET_CODE (XEXP (XEXP (ind, 0), 0)) == LABEL_REF
12602 && CONST_INT_P (XEXP (XEXP (ind, 0), 1)))))
12603 return TRUE;
12605 /* Match: (mem (reg)). */
12606 if (REG_P (ind))
12607 return arm_address_register_rtx_p (ind, 0);
12609 /* Allow post-increment with Neon registers. */
12610 if ((type != 1 && GET_CODE (ind) == POST_INC)
12611 || (type == 0 && GET_CODE (ind) == PRE_DEC))
12612 return arm_address_register_rtx_p (XEXP (ind, 0), 0);
12614 /* Allow post-increment by register for VLDn */
12615 if (type == 2 && GET_CODE (ind) == POST_MODIFY
12616 && GET_CODE (XEXP (ind, 1)) == PLUS
12617 && REG_P (XEXP (XEXP (ind, 1), 1)))
12618 return true;
12620 /* Match:
12621 (plus (reg)
12622 (const)). */
12623 if (type == 0
12624 && GET_CODE (ind) == PLUS
12625 && REG_P (XEXP (ind, 0))
12626 && REG_MODE_OK_FOR_BASE_P (XEXP (ind, 0), VOIDmode)
12627 && CONST_INT_P (XEXP (ind, 1))
12628 && INTVAL (XEXP (ind, 1)) > -1024
12629 /* For quad modes, we restrict the constant offset to be slightly less
12630 than what the instruction format permits. We have no such constraint
12631 on double mode offsets. (This must match arm_legitimate_index_p.) */
12632 && (INTVAL (XEXP (ind, 1))
12633 < (VALID_NEON_QREG_MODE (GET_MODE (op))? 1016 : 1024))
12634 && (INTVAL (XEXP (ind, 1)) & 3) == 0)
12635 return TRUE;
12637 return FALSE;
12640 /* Return TRUE if OP is a mem suitable for loading/storing a Neon struct
12641 type. */
12643 neon_struct_mem_operand (rtx op)
12645 rtx ind;
12647 /* Reject eliminable registers. */
12648 if (! (reload_in_progress || reload_completed)
12649 && ( reg_mentioned_p (frame_pointer_rtx, op)
12650 || reg_mentioned_p (arg_pointer_rtx, op)
12651 || reg_mentioned_p (virtual_incoming_args_rtx, op)
12652 || reg_mentioned_p (virtual_outgoing_args_rtx, op)
12653 || reg_mentioned_p (virtual_stack_dynamic_rtx, op)
12654 || reg_mentioned_p (virtual_stack_vars_rtx, op)))
12655 return FALSE;
12657 /* Constants are converted into offsets from labels. */
12658 if (!MEM_P (op))
12659 return FALSE;
12661 ind = XEXP (op, 0);
12663 if (reload_completed
12664 && (GET_CODE (ind) == LABEL_REF
12665 || (GET_CODE (ind) == CONST
12666 && GET_CODE (XEXP (ind, 0)) == PLUS
12667 && GET_CODE (XEXP (XEXP (ind, 0), 0)) == LABEL_REF
12668 && CONST_INT_P (XEXP (XEXP (ind, 0), 1)))))
12669 return TRUE;
12671 /* Match: (mem (reg)). */
12672 if (REG_P (ind))
12673 return arm_address_register_rtx_p (ind, 0);
12675 /* vldm/vstm allows POST_INC (ia) and PRE_DEC (db). */
12676 if (GET_CODE (ind) == POST_INC
12677 || GET_CODE (ind) == PRE_DEC)
12678 return arm_address_register_rtx_p (XEXP (ind, 0), 0);
12680 return FALSE;
12683 /* Return true if X is a register that will be eliminated later on. */
12685 arm_eliminable_register (rtx x)
12687 return REG_P (x) && (REGNO (x) == FRAME_POINTER_REGNUM
12688 || REGNO (x) == ARG_POINTER_REGNUM
12689 || (REGNO (x) >= FIRST_VIRTUAL_REGISTER
12690 && REGNO (x) <= LAST_VIRTUAL_REGISTER));
12693 /* Return GENERAL_REGS if a scratch register required to reload x to/from
12694 coprocessor registers. Otherwise return NO_REGS. */
12696 enum reg_class
12697 coproc_secondary_reload_class (machine_mode mode, rtx x, bool wb)
12699 if (mode == HFmode)
12701 if (!TARGET_NEON_FP16 && !TARGET_VFP_FP16INST)
12702 return GENERAL_REGS;
12703 if (s_register_operand (x, mode) || neon_vector_mem_operand (x, 2, true))
12704 return NO_REGS;
12705 return GENERAL_REGS;
12708 /* The neon move patterns handle all legitimate vector and struct
12709 addresses. */
12710 if (TARGET_NEON
12711 && (MEM_P (x) || GET_CODE (x) == CONST_VECTOR)
12712 && (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
12713 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT
12714 || VALID_NEON_STRUCT_MODE (mode)))
12715 return NO_REGS;
12717 if (arm_coproc_mem_operand (x, wb) || s_register_operand (x, mode))
12718 return NO_REGS;
12720 return GENERAL_REGS;
12723 /* Values which must be returned in the most-significant end of the return
12724 register. */
12726 static bool
12727 arm_return_in_msb (const_tree valtype)
12729 return (TARGET_AAPCS_BASED
12730 && BYTES_BIG_ENDIAN
12731 && (AGGREGATE_TYPE_P (valtype)
12732 || TREE_CODE (valtype) == COMPLEX_TYPE
12733 || FIXED_POINT_TYPE_P (valtype)));
12736 /* Return TRUE if X references a SYMBOL_REF. */
12738 symbol_mentioned_p (rtx x)
12740 const char * fmt;
12741 int i;
12743 if (GET_CODE (x) == SYMBOL_REF)
12744 return 1;
12746 /* UNSPEC_TLS entries for a symbol include the SYMBOL_REF, but they
12747 are constant offsets, not symbols. */
12748 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLS)
12749 return 0;
12751 fmt = GET_RTX_FORMAT (GET_CODE (x));
12753 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
12755 if (fmt[i] == 'E')
12757 int j;
12759 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
12760 if (symbol_mentioned_p (XVECEXP (x, i, j)))
12761 return 1;
12763 else if (fmt[i] == 'e' && symbol_mentioned_p (XEXP (x, i)))
12764 return 1;
12767 return 0;
12770 /* Return TRUE if X references a LABEL_REF. */
12772 label_mentioned_p (rtx x)
12774 const char * fmt;
12775 int i;
12777 if (GET_CODE (x) == LABEL_REF)
12778 return 1;
12780 /* UNSPEC_TLS entries for a symbol include a LABEL_REF for the referencing
12781 instruction, but they are constant offsets, not symbols. */
12782 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLS)
12783 return 0;
12785 fmt = GET_RTX_FORMAT (GET_CODE (x));
12786 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
12788 if (fmt[i] == 'E')
12790 int j;
12792 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
12793 if (label_mentioned_p (XVECEXP (x, i, j)))
12794 return 1;
12796 else if (fmt[i] == 'e' && label_mentioned_p (XEXP (x, i)))
12797 return 1;
12800 return 0;
12804 tls_mentioned_p (rtx x)
12806 switch (GET_CODE (x))
12808 case CONST:
12809 return tls_mentioned_p (XEXP (x, 0));
12811 case UNSPEC:
12812 if (XINT (x, 1) == UNSPEC_TLS)
12813 return 1;
12815 /* Fall through. */
12816 default:
12817 return 0;
12821 /* Must not copy any rtx that uses a pc-relative address.
12822 Also, disallow copying of load-exclusive instructions that
12823 may appear after splitting of compare-and-swap-style operations
12824 so as to prevent those loops from being transformed away from their
12825 canonical forms (see PR 69904). */
12827 static bool
12828 arm_cannot_copy_insn_p (rtx_insn *insn)
12830 /* The tls call insn cannot be copied, as it is paired with a data
12831 word. */
12832 if (recog_memoized (insn) == CODE_FOR_tlscall)
12833 return true;
12835 subrtx_iterator::array_type array;
12836 FOR_EACH_SUBRTX (iter, array, PATTERN (insn), ALL)
12838 const_rtx x = *iter;
12839 if (GET_CODE (x) == UNSPEC
12840 && (XINT (x, 1) == UNSPEC_PIC_BASE
12841 || XINT (x, 1) == UNSPEC_PIC_UNIFIED))
12842 return true;
12845 rtx set = single_set (insn);
12846 if (set)
12848 rtx src = SET_SRC (set);
12849 if (GET_CODE (src) == ZERO_EXTEND)
12850 src = XEXP (src, 0);
12852 /* Catch the load-exclusive and load-acquire operations. */
12853 if (GET_CODE (src) == UNSPEC_VOLATILE
12854 && (XINT (src, 1) == VUNSPEC_LL
12855 || XINT (src, 1) == VUNSPEC_LAX))
12856 return true;
12858 return false;
12861 enum rtx_code
12862 minmax_code (rtx x)
12864 enum rtx_code code = GET_CODE (x);
12866 switch (code)
12868 case SMAX:
12869 return GE;
12870 case SMIN:
12871 return LE;
12872 case UMIN:
12873 return LEU;
12874 case UMAX:
12875 return GEU;
12876 default:
12877 gcc_unreachable ();
12881 /* Match pair of min/max operators that can be implemented via usat/ssat. */
12883 bool
12884 arm_sat_operator_match (rtx lo_bound, rtx hi_bound,
12885 int *mask, bool *signed_sat)
12887 /* The high bound must be a power of two minus one. */
12888 int log = exact_log2 (INTVAL (hi_bound) + 1);
12889 if (log == -1)
12890 return false;
12892 /* The low bound is either zero (for usat) or one less than the
12893 negation of the high bound (for ssat). */
12894 if (INTVAL (lo_bound) == 0)
12896 if (mask)
12897 *mask = log;
12898 if (signed_sat)
12899 *signed_sat = false;
12901 return true;
12904 if (INTVAL (lo_bound) == -INTVAL (hi_bound) - 1)
12906 if (mask)
12907 *mask = log + 1;
12908 if (signed_sat)
12909 *signed_sat = true;
12911 return true;
12914 return false;
12917 /* Return 1 if memory locations are adjacent. */
12919 adjacent_mem_locations (rtx a, rtx b)
12921 /* We don't guarantee to preserve the order of these memory refs. */
12922 if (volatile_refs_p (a) || volatile_refs_p (b))
12923 return 0;
12925 if ((REG_P (XEXP (a, 0))
12926 || (GET_CODE (XEXP (a, 0)) == PLUS
12927 && CONST_INT_P (XEXP (XEXP (a, 0), 1))))
12928 && (REG_P (XEXP (b, 0))
12929 || (GET_CODE (XEXP (b, 0)) == PLUS
12930 && CONST_INT_P (XEXP (XEXP (b, 0), 1)))))
12932 HOST_WIDE_INT val0 = 0, val1 = 0;
12933 rtx reg0, reg1;
12934 int val_diff;
12936 if (GET_CODE (XEXP (a, 0)) == PLUS)
12938 reg0 = XEXP (XEXP (a, 0), 0);
12939 val0 = INTVAL (XEXP (XEXP (a, 0), 1));
12941 else
12942 reg0 = XEXP (a, 0);
12944 if (GET_CODE (XEXP (b, 0)) == PLUS)
12946 reg1 = XEXP (XEXP (b, 0), 0);
12947 val1 = INTVAL (XEXP (XEXP (b, 0), 1));
12949 else
12950 reg1 = XEXP (b, 0);
12952 /* Don't accept any offset that will require multiple
12953 instructions to handle, since this would cause the
12954 arith_adjacentmem pattern to output an overlong sequence. */
12955 if (!const_ok_for_op (val0, PLUS) || !const_ok_for_op (val1, PLUS))
12956 return 0;
12958 /* Don't allow an eliminable register: register elimination can make
12959 the offset too large. */
12960 if (arm_eliminable_register (reg0))
12961 return 0;
12963 val_diff = val1 - val0;
12965 if (arm_ld_sched)
12967 /* If the target has load delay slots, then there's no benefit
12968 to using an ldm instruction unless the offset is zero and
12969 we are optimizing for size. */
12970 return (optimize_size && (REGNO (reg0) == REGNO (reg1))
12971 && (val0 == 0 || val1 == 0 || val0 == 4 || val1 == 4)
12972 && (val_diff == 4 || val_diff == -4));
12975 return ((REGNO (reg0) == REGNO (reg1))
12976 && (val_diff == 4 || val_diff == -4));
12979 return 0;
12982 /* Return true if OP is a valid load or store multiple operation. LOAD is true
12983 for load operations, false for store operations. CONSECUTIVE is true
12984 if the register numbers in the operation must be consecutive in the register
12985 bank. RETURN_PC is true if value is to be loaded in PC.
12986 The pattern we are trying to match for load is:
12987 [(SET (R_d0) (MEM (PLUS (addr) (offset))))
12988 (SET (R_d1) (MEM (PLUS (addr) (offset + <reg_increment>))))
12991 (SET (R_dn) (MEM (PLUS (addr) (offset + n * <reg_increment>))))
12993 where
12994 1. If offset is 0, first insn should be (SET (R_d0) (MEM (src_addr))).
12995 2. REGNO (R_d0) < REGNO (R_d1) < ... < REGNO (R_dn).
12996 3. If consecutive is TRUE, then for kth register being loaded,
12997 REGNO (R_dk) = REGNO (R_d0) + k.
12998 The pattern for store is similar. */
12999 bool
13000 ldm_stm_operation_p (rtx op, bool load, machine_mode mode,
13001 bool consecutive, bool return_pc)
13003 HOST_WIDE_INT count = XVECLEN (op, 0);
13004 rtx reg, mem, addr;
13005 unsigned regno;
13006 unsigned first_regno;
13007 HOST_WIDE_INT i = 1, base = 0, offset = 0;
13008 rtx elt;
13009 bool addr_reg_in_reglist = false;
13010 bool update = false;
13011 int reg_increment;
13012 int offset_adj;
13013 int regs_per_val;
13015 /* If not in SImode, then registers must be consecutive
13016 (e.g., VLDM instructions for DFmode). */
13017 gcc_assert ((mode == SImode) || consecutive);
13018 /* Setting return_pc for stores is illegal. */
13019 gcc_assert (!return_pc || load);
13021 /* Set up the increments and the regs per val based on the mode. */
13022 reg_increment = GET_MODE_SIZE (mode);
13023 regs_per_val = reg_increment / 4;
13024 offset_adj = return_pc ? 1 : 0;
13026 if (count <= 1
13027 || GET_CODE (XVECEXP (op, 0, offset_adj)) != SET
13028 || (load && !REG_P (SET_DEST (XVECEXP (op, 0, offset_adj)))))
13029 return false;
13031 /* Check if this is a write-back. */
13032 elt = XVECEXP (op, 0, offset_adj);
13033 if (GET_CODE (SET_SRC (elt)) == PLUS)
13035 i++;
13036 base = 1;
13037 update = true;
13039 /* The offset adjustment must be the number of registers being
13040 popped times the size of a single register. */
13041 if (!REG_P (SET_DEST (elt))
13042 || !REG_P (XEXP (SET_SRC (elt), 0))
13043 || (REGNO (SET_DEST (elt)) != REGNO (XEXP (SET_SRC (elt), 0)))
13044 || !CONST_INT_P (XEXP (SET_SRC (elt), 1))
13045 || INTVAL (XEXP (SET_SRC (elt), 1)) !=
13046 ((count - 1 - offset_adj) * reg_increment))
13047 return false;
13050 i = i + offset_adj;
13051 base = base + offset_adj;
13052 /* Perform a quick check so we don't blow up below. If only one reg is loaded,
13053 success depends on the type: VLDM can do just one reg,
13054 LDM must do at least two. */
13055 if ((count <= i) && (mode == SImode))
13056 return false;
13058 elt = XVECEXP (op, 0, i - 1);
13059 if (GET_CODE (elt) != SET)
13060 return false;
13062 if (load)
13064 reg = SET_DEST (elt);
13065 mem = SET_SRC (elt);
13067 else
13069 reg = SET_SRC (elt);
13070 mem = SET_DEST (elt);
13073 if (!REG_P (reg) || !MEM_P (mem))
13074 return false;
13076 regno = REGNO (reg);
13077 first_regno = regno;
13078 addr = XEXP (mem, 0);
13079 if (GET_CODE (addr) == PLUS)
13081 if (!CONST_INT_P (XEXP (addr, 1)))
13082 return false;
13084 offset = INTVAL (XEXP (addr, 1));
13085 addr = XEXP (addr, 0);
13088 if (!REG_P (addr))
13089 return false;
13091 /* Don't allow SP to be loaded unless it is also the base register. It
13092 guarantees that SP is reset correctly when an LDM instruction
13093 is interrupted. Otherwise, we might end up with a corrupt stack. */
13094 if (load && (REGNO (reg) == SP_REGNUM) && (REGNO (addr) != SP_REGNUM))
13095 return false;
13097 for (; i < count; i++)
13099 elt = XVECEXP (op, 0, i);
13100 if (GET_CODE (elt) != SET)
13101 return false;
13103 if (load)
13105 reg = SET_DEST (elt);
13106 mem = SET_SRC (elt);
13108 else
13110 reg = SET_SRC (elt);
13111 mem = SET_DEST (elt);
13114 if (!REG_P (reg)
13115 || GET_MODE (reg) != mode
13116 || REGNO (reg) <= regno
13117 || (consecutive
13118 && (REGNO (reg) !=
13119 (unsigned int) (first_regno + regs_per_val * (i - base))))
13120 /* Don't allow SP to be loaded unless it is also the base register. It
13121 guarantees that SP is reset correctly when an LDM instruction
13122 is interrupted. Otherwise, we might end up with a corrupt stack. */
13123 || (load && (REGNO (reg) == SP_REGNUM) && (REGNO (addr) != SP_REGNUM))
13124 || !MEM_P (mem)
13125 || GET_MODE (mem) != mode
13126 || ((GET_CODE (XEXP (mem, 0)) != PLUS
13127 || !rtx_equal_p (XEXP (XEXP (mem, 0), 0), addr)
13128 || !CONST_INT_P (XEXP (XEXP (mem, 0), 1))
13129 || (INTVAL (XEXP (XEXP (mem, 0), 1)) !=
13130 offset + (i - base) * reg_increment))
13131 && (!REG_P (XEXP (mem, 0))
13132 || offset + (i - base) * reg_increment != 0)))
13133 return false;
13135 regno = REGNO (reg);
13136 if (regno == REGNO (addr))
13137 addr_reg_in_reglist = true;
13140 if (load)
13142 if (update && addr_reg_in_reglist)
13143 return false;
13145 /* For Thumb-1, address register is always modified - either by write-back
13146 or by explicit load. If the pattern does not describe an update,
13147 then the address register must be in the list of loaded registers. */
13148 if (TARGET_THUMB1)
13149 return update || addr_reg_in_reglist;
13152 return true;
13155 /* Return true iff it would be profitable to turn a sequence of NOPS loads
13156 or stores (depending on IS_STORE) into a load-multiple or store-multiple
13157 instruction. ADD_OFFSET is nonzero if the base address register needs
13158 to be modified with an add instruction before we can use it. */
13160 static bool
13161 multiple_operation_profitable_p (bool is_store ATTRIBUTE_UNUSED,
13162 int nops, HOST_WIDE_INT add_offset)
13164 /* For ARM8,9 & StrongARM, 2 ldr instructions are faster than an ldm
13165 if the offset isn't small enough. The reason 2 ldrs are faster
13166 is because these ARMs are able to do more than one cache access
13167 in a single cycle. The ARM9 and StrongARM have Harvard caches,
13168 whilst the ARM8 has a double bandwidth cache. This means that
13169 these cores can do both an instruction fetch and a data fetch in
13170 a single cycle, so the trick of calculating the address into a
13171 scratch register (one of the result regs) and then doing a load
13172 multiple actually becomes slower (and no smaller in code size).
13173 That is the transformation
13175 ldr rd1, [rbase + offset]
13176 ldr rd2, [rbase + offset + 4]
13180 add rd1, rbase, offset
13181 ldmia rd1, {rd1, rd2}
13183 produces worse code -- '3 cycles + any stalls on rd2' instead of
13184 '2 cycles + any stalls on rd2'. On ARMs with only one cache
13185 access per cycle, the first sequence could never complete in less
13186 than 6 cycles, whereas the ldm sequence would only take 5 and
13187 would make better use of sequential accesses if not hitting the
13188 cache.
13190 We cheat here and test 'arm_ld_sched' which we currently know to
13191 only be true for the ARM8, ARM9 and StrongARM. If this ever
13192 changes, then the test below needs to be reworked. */
13193 if (nops == 2 && arm_ld_sched && add_offset != 0)
13194 return false;
13196 /* XScale has load-store double instructions, but they have stricter
13197 alignment requirements than load-store multiple, so we cannot
13198 use them.
13200 For XScale ldm requires 2 + NREGS cycles to complete and blocks
13201 the pipeline until completion.
13203 NREGS CYCLES
13209 An ldr instruction takes 1-3 cycles, but does not block the
13210 pipeline.
13212 NREGS CYCLES
13213 1 1-3
13214 2 2-6
13215 3 3-9
13216 4 4-12
13218 Best case ldr will always win. However, the more ldr instructions
13219 we issue, the less likely we are to be able to schedule them well.
13220 Using ldr instructions also increases code size.
13222 As a compromise, we use ldr for counts of 1 or 2 regs, and ldm
13223 for counts of 3 or 4 regs. */
13224 if (nops <= 2 && arm_tune_xscale && !optimize_size)
13225 return false;
13226 return true;
13229 /* Subroutine of load_multiple_sequence and store_multiple_sequence.
13230 Given an array of UNSORTED_OFFSETS, of which there are NOPS, compute
13231 an array ORDER which describes the sequence to use when accessing the
13232 offsets that produces an ascending order. In this sequence, each
13233 offset must be larger by exactly 4 than the previous one. ORDER[0]
13234 must have been filled in with the lowest offset by the caller.
13235 If UNSORTED_REGS is nonnull, it is an array of register numbers that
13236 we use to verify that ORDER produces an ascending order of registers.
13237 Return true if it was possible to construct such an order, false if
13238 not. */
13240 static bool
13241 compute_offset_order (int nops, HOST_WIDE_INT *unsorted_offsets, int *order,
13242 int *unsorted_regs)
13244 int i;
13245 for (i = 1; i < nops; i++)
13247 int j;
13249 order[i] = order[i - 1];
13250 for (j = 0; j < nops; j++)
13251 if (unsorted_offsets[j] == unsorted_offsets[order[i - 1]] + 4)
13253 /* We must find exactly one offset that is higher than the
13254 previous one by 4. */
13255 if (order[i] != order[i - 1])
13256 return false;
13257 order[i] = j;
13259 if (order[i] == order[i - 1])
13260 return false;
13261 /* The register numbers must be ascending. */
13262 if (unsorted_regs != NULL
13263 && unsorted_regs[order[i]] <= unsorted_regs[order[i - 1]])
13264 return false;
13266 return true;
13269 /* Used to determine in a peephole whether a sequence of load
13270 instructions can be changed into a load-multiple instruction.
13271 NOPS is the number of separate load instructions we are examining. The
13272 first NOPS entries in OPERANDS are the destination registers, the
13273 next NOPS entries are memory operands. If this function is
13274 successful, *BASE is set to the common base register of the memory
13275 accesses; *LOAD_OFFSET is set to the first memory location's offset
13276 from that base register.
13277 REGS is an array filled in with the destination register numbers.
13278 SAVED_ORDER (if nonnull), is an array filled in with an order that maps
13279 insn numbers to an ascending order of stores. If CHECK_REGS is true,
13280 the sequence of registers in REGS matches the loads from ascending memory
13281 locations, and the function verifies that the register numbers are
13282 themselves ascending. If CHECK_REGS is false, the register numbers
13283 are stored in the order they are found in the operands. */
13284 static int
13285 load_multiple_sequence (rtx *operands, int nops, int *regs, int *saved_order,
13286 int *base, HOST_WIDE_INT *load_offset, bool check_regs)
13288 int unsorted_regs[MAX_LDM_STM_OPS];
13289 HOST_WIDE_INT unsorted_offsets[MAX_LDM_STM_OPS];
13290 int order[MAX_LDM_STM_OPS];
13291 rtx base_reg_rtx = NULL;
13292 int base_reg = -1;
13293 int i, ldm_case;
13295 /* Can only handle up to MAX_LDM_STM_OPS insns at present, though could be
13296 easily extended if required. */
13297 gcc_assert (nops >= 2 && nops <= MAX_LDM_STM_OPS);
13299 memset (order, 0, MAX_LDM_STM_OPS * sizeof (int));
13301 /* Loop over the operands and check that the memory references are
13302 suitable (i.e. immediate offsets from the same base register). At
13303 the same time, extract the target register, and the memory
13304 offsets. */
13305 for (i = 0; i < nops; i++)
13307 rtx reg;
13308 rtx offset;
13310 /* Convert a subreg of a mem into the mem itself. */
13311 if (GET_CODE (operands[nops + i]) == SUBREG)
13312 operands[nops + i] = alter_subreg (operands + (nops + i), true);
13314 gcc_assert (MEM_P (operands[nops + i]));
13316 /* Don't reorder volatile memory references; it doesn't seem worth
13317 looking for the case where the order is ok anyway. */
13318 if (MEM_VOLATILE_P (operands[nops + i]))
13319 return 0;
13321 offset = const0_rtx;
13323 if ((REG_P (reg = XEXP (operands[nops + i], 0))
13324 || (GET_CODE (reg) == SUBREG
13325 && REG_P (reg = SUBREG_REG (reg))))
13326 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
13327 && ((REG_P (reg = XEXP (XEXP (operands[nops + i], 0), 0)))
13328 || (GET_CODE (reg) == SUBREG
13329 && REG_P (reg = SUBREG_REG (reg))))
13330 && (CONST_INT_P (offset
13331 = XEXP (XEXP (operands[nops + i], 0), 1)))))
13333 if (i == 0)
13335 base_reg = REGNO (reg);
13336 base_reg_rtx = reg;
13337 if (TARGET_THUMB1 && base_reg > LAST_LO_REGNUM)
13338 return 0;
13340 else if (base_reg != (int) REGNO (reg))
13341 /* Not addressed from the same base register. */
13342 return 0;
13344 unsorted_regs[i] = (REG_P (operands[i])
13345 ? REGNO (operands[i])
13346 : REGNO (SUBREG_REG (operands[i])));
13348 /* If it isn't an integer register, or if it overwrites the
13349 base register but isn't the last insn in the list, then
13350 we can't do this. */
13351 if (unsorted_regs[i] < 0
13352 || (TARGET_THUMB1 && unsorted_regs[i] > LAST_LO_REGNUM)
13353 || unsorted_regs[i] > 14
13354 || (i != nops - 1 && unsorted_regs[i] == base_reg))
13355 return 0;
13357 /* Don't allow SP to be loaded unless it is also the base
13358 register. It guarantees that SP is reset correctly when
13359 an LDM instruction is interrupted. Otherwise, we might
13360 end up with a corrupt stack. */
13361 if (unsorted_regs[i] == SP_REGNUM && base_reg != SP_REGNUM)
13362 return 0;
13364 unsorted_offsets[i] = INTVAL (offset);
13365 if (i == 0 || unsorted_offsets[i] < unsorted_offsets[order[0]])
13366 order[0] = i;
13368 else
13369 /* Not a suitable memory address. */
13370 return 0;
13373 /* All the useful information has now been extracted from the
13374 operands into unsorted_regs and unsorted_offsets; additionally,
13375 order[0] has been set to the lowest offset in the list. Sort
13376 the offsets into order, verifying that they are adjacent, and
13377 check that the register numbers are ascending. */
13378 if (!compute_offset_order (nops, unsorted_offsets, order,
13379 check_regs ? unsorted_regs : NULL))
13380 return 0;
13382 if (saved_order)
13383 memcpy (saved_order, order, sizeof order);
13385 if (base)
13387 *base = base_reg;
13389 for (i = 0; i < nops; i++)
13390 regs[i] = unsorted_regs[check_regs ? order[i] : i];
13392 *load_offset = unsorted_offsets[order[0]];
13395 if (TARGET_THUMB1
13396 && !peep2_reg_dead_p (nops, base_reg_rtx))
13397 return 0;
13399 if (unsorted_offsets[order[0]] == 0)
13400 ldm_case = 1; /* ldmia */
13401 else if (TARGET_ARM && unsorted_offsets[order[0]] == 4)
13402 ldm_case = 2; /* ldmib */
13403 else if (TARGET_ARM && unsorted_offsets[order[nops - 1]] == 0)
13404 ldm_case = 3; /* ldmda */
13405 else if (TARGET_32BIT && unsorted_offsets[order[nops - 1]] == -4)
13406 ldm_case = 4; /* ldmdb */
13407 else if (const_ok_for_arm (unsorted_offsets[order[0]])
13408 || const_ok_for_arm (-unsorted_offsets[order[0]]))
13409 ldm_case = 5;
13410 else
13411 return 0;
13413 if (!multiple_operation_profitable_p (false, nops,
13414 ldm_case == 5
13415 ? unsorted_offsets[order[0]] : 0))
13416 return 0;
13418 return ldm_case;
13421 /* Used to determine in a peephole whether a sequence of store instructions can
13422 be changed into a store-multiple instruction.
13423 NOPS is the number of separate store instructions we are examining.
13424 NOPS_TOTAL is the total number of instructions recognized by the peephole
13425 pattern.
13426 The first NOPS entries in OPERANDS are the source registers, the next
13427 NOPS entries are memory operands. If this function is successful, *BASE is
13428 set to the common base register of the memory accesses; *LOAD_OFFSET is set
13429 to the first memory location's offset from that base register. REGS is an
13430 array filled in with the source register numbers, REG_RTXS (if nonnull) is
13431 likewise filled with the corresponding rtx's.
13432 SAVED_ORDER (if nonnull), is an array filled in with an order that maps insn
13433 numbers to an ascending order of stores.
13434 If CHECK_REGS is true, the sequence of registers in *REGS matches the stores
13435 from ascending memory locations, and the function verifies that the register
13436 numbers are themselves ascending. If CHECK_REGS is false, the register
13437 numbers are stored in the order they are found in the operands. */
13438 static int
13439 store_multiple_sequence (rtx *operands, int nops, int nops_total,
13440 int *regs, rtx *reg_rtxs, int *saved_order, int *base,
13441 HOST_WIDE_INT *load_offset, bool check_regs)
13443 int unsorted_regs[MAX_LDM_STM_OPS];
13444 rtx unsorted_reg_rtxs[MAX_LDM_STM_OPS];
13445 HOST_WIDE_INT unsorted_offsets[MAX_LDM_STM_OPS];
13446 int order[MAX_LDM_STM_OPS];
13447 int base_reg = -1;
13448 rtx base_reg_rtx = NULL;
13449 int i, stm_case;
13451 /* Write back of base register is currently only supported for Thumb 1. */
13452 int base_writeback = TARGET_THUMB1;
13454 /* Can only handle up to MAX_LDM_STM_OPS insns at present, though could be
13455 easily extended if required. */
13456 gcc_assert (nops >= 2 && nops <= MAX_LDM_STM_OPS);
13458 memset (order, 0, MAX_LDM_STM_OPS * sizeof (int));
13460 /* Loop over the operands and check that the memory references are
13461 suitable (i.e. immediate offsets from the same base register). At
13462 the same time, extract the target register, and the memory
13463 offsets. */
13464 for (i = 0; i < nops; i++)
13466 rtx reg;
13467 rtx offset;
13469 /* Convert a subreg of a mem into the mem itself. */
13470 if (GET_CODE (operands[nops + i]) == SUBREG)
13471 operands[nops + i] = alter_subreg (operands + (nops + i), true);
13473 gcc_assert (MEM_P (operands[nops + i]));
13475 /* Don't reorder volatile memory references; it doesn't seem worth
13476 looking for the case where the order is ok anyway. */
13477 if (MEM_VOLATILE_P (operands[nops + i]))
13478 return 0;
13480 offset = const0_rtx;
13482 if ((REG_P (reg = XEXP (operands[nops + i], 0))
13483 || (GET_CODE (reg) == SUBREG
13484 && REG_P (reg = SUBREG_REG (reg))))
13485 || (GET_CODE (XEXP (operands[nops + i], 0)) == PLUS
13486 && ((REG_P (reg = XEXP (XEXP (operands[nops + i], 0), 0)))
13487 || (GET_CODE (reg) == SUBREG
13488 && REG_P (reg = SUBREG_REG (reg))))
13489 && (CONST_INT_P (offset
13490 = XEXP (XEXP (operands[nops + i], 0), 1)))))
13492 unsorted_reg_rtxs[i] = (REG_P (operands[i])
13493 ? operands[i] : SUBREG_REG (operands[i]));
13494 unsorted_regs[i] = REGNO (unsorted_reg_rtxs[i]);
13496 if (i == 0)
13498 base_reg = REGNO (reg);
13499 base_reg_rtx = reg;
13500 if (TARGET_THUMB1 && base_reg > LAST_LO_REGNUM)
13501 return 0;
13503 else if (base_reg != (int) REGNO (reg))
13504 /* Not addressed from the same base register. */
13505 return 0;
13507 /* If it isn't an integer register, then we can't do this. */
13508 if (unsorted_regs[i] < 0
13509 || (TARGET_THUMB1 && unsorted_regs[i] > LAST_LO_REGNUM)
13510 /* The effects are unpredictable if the base register is
13511 both updated and stored. */
13512 || (base_writeback && unsorted_regs[i] == base_reg)
13513 || (TARGET_THUMB2 && unsorted_regs[i] == SP_REGNUM)
13514 || unsorted_regs[i] > 14)
13515 return 0;
13517 unsorted_offsets[i] = INTVAL (offset);
13518 if (i == 0 || unsorted_offsets[i] < unsorted_offsets[order[0]])
13519 order[0] = i;
13521 else
13522 /* Not a suitable memory address. */
13523 return 0;
13526 /* All the useful information has now been extracted from the
13527 operands into unsorted_regs and unsorted_offsets; additionally,
13528 order[0] has been set to the lowest offset in the list. Sort
13529 the offsets into order, verifying that they are adjacent, and
13530 check that the register numbers are ascending. */
13531 if (!compute_offset_order (nops, unsorted_offsets, order,
13532 check_regs ? unsorted_regs : NULL))
13533 return 0;
13535 if (saved_order)
13536 memcpy (saved_order, order, sizeof order);
13538 if (base)
13540 *base = base_reg;
13542 for (i = 0; i < nops; i++)
13544 regs[i] = unsorted_regs[check_regs ? order[i] : i];
13545 if (reg_rtxs)
13546 reg_rtxs[i] = unsorted_reg_rtxs[check_regs ? order[i] : i];
13549 *load_offset = unsorted_offsets[order[0]];
13552 if (TARGET_THUMB1
13553 && !peep2_reg_dead_p (nops_total, base_reg_rtx))
13554 return 0;
13556 if (unsorted_offsets[order[0]] == 0)
13557 stm_case = 1; /* stmia */
13558 else if (TARGET_ARM && unsorted_offsets[order[0]] == 4)
13559 stm_case = 2; /* stmib */
13560 else if (TARGET_ARM && unsorted_offsets[order[nops - 1]] == 0)
13561 stm_case = 3; /* stmda */
13562 else if (TARGET_32BIT && unsorted_offsets[order[nops - 1]] == -4)
13563 stm_case = 4; /* stmdb */
13564 else
13565 return 0;
13567 if (!multiple_operation_profitable_p (false, nops, 0))
13568 return 0;
13570 return stm_case;
13573 /* Routines for use in generating RTL. */
13575 /* Generate a load-multiple instruction. COUNT is the number of loads in
13576 the instruction; REGS and MEMS are arrays containing the operands.
13577 BASEREG is the base register to be used in addressing the memory operands.
13578 WBACK_OFFSET is nonzero if the instruction should update the base
13579 register. */
13581 static rtx
13582 arm_gen_load_multiple_1 (int count, int *regs, rtx *mems, rtx basereg,
13583 HOST_WIDE_INT wback_offset)
13585 int i = 0, j;
13586 rtx result;
13588 if (!multiple_operation_profitable_p (false, count, 0))
13590 rtx seq;
13592 start_sequence ();
13594 for (i = 0; i < count; i++)
13595 emit_move_insn (gen_rtx_REG (SImode, regs[i]), mems[i]);
13597 if (wback_offset != 0)
13598 emit_move_insn (basereg, plus_constant (Pmode, basereg, wback_offset));
13600 seq = get_insns ();
13601 end_sequence ();
13603 return seq;
13606 result = gen_rtx_PARALLEL (VOIDmode,
13607 rtvec_alloc (count + (wback_offset != 0 ? 1 : 0)));
13608 if (wback_offset != 0)
13610 XVECEXP (result, 0, 0)
13611 = gen_rtx_SET (basereg, plus_constant (Pmode, basereg, wback_offset));
13612 i = 1;
13613 count++;
13616 for (j = 0; i < count; i++, j++)
13617 XVECEXP (result, 0, i)
13618 = gen_rtx_SET (gen_rtx_REG (SImode, regs[j]), mems[j]);
13620 return result;
13623 /* Generate a store-multiple instruction. COUNT is the number of stores in
13624 the instruction; REGS and MEMS are arrays containing the operands.
13625 BASEREG is the base register to be used in addressing the memory operands.
13626 WBACK_OFFSET is nonzero if the instruction should update the base
13627 register. */
13629 static rtx
13630 arm_gen_store_multiple_1 (int count, int *regs, rtx *mems, rtx basereg,
13631 HOST_WIDE_INT wback_offset)
13633 int i = 0, j;
13634 rtx result;
13636 if (GET_CODE (basereg) == PLUS)
13637 basereg = XEXP (basereg, 0);
13639 if (!multiple_operation_profitable_p (false, count, 0))
13641 rtx seq;
13643 start_sequence ();
13645 for (i = 0; i < count; i++)
13646 emit_move_insn (mems[i], gen_rtx_REG (SImode, regs[i]));
13648 if (wback_offset != 0)
13649 emit_move_insn (basereg, plus_constant (Pmode, basereg, wback_offset));
13651 seq = get_insns ();
13652 end_sequence ();
13654 return seq;
13657 result = gen_rtx_PARALLEL (VOIDmode,
13658 rtvec_alloc (count + (wback_offset != 0 ? 1 : 0)));
13659 if (wback_offset != 0)
13661 XVECEXP (result, 0, 0)
13662 = gen_rtx_SET (basereg, plus_constant (Pmode, basereg, wback_offset));
13663 i = 1;
13664 count++;
13667 for (j = 0; i < count; i++, j++)
13668 XVECEXP (result, 0, i)
13669 = gen_rtx_SET (mems[j], gen_rtx_REG (SImode, regs[j]));
13671 return result;
13674 /* Generate either a load-multiple or a store-multiple instruction. This
13675 function can be used in situations where we can start with a single MEM
13676 rtx and adjust its address upwards.
13677 COUNT is the number of operations in the instruction, not counting a
13678 possible update of the base register. REGS is an array containing the
13679 register operands.
13680 BASEREG is the base register to be used in addressing the memory operands,
13681 which are constructed from BASEMEM.
13682 WRITE_BACK specifies whether the generated instruction should include an
13683 update of the base register.
13684 OFFSETP is used to pass an offset to and from this function; this offset
13685 is not used when constructing the address (instead BASEMEM should have an
13686 appropriate offset in its address), it is used only for setting
13687 MEM_OFFSET. It is updated only if WRITE_BACK is true.*/
13689 static rtx
13690 arm_gen_multiple_op (bool is_load, int *regs, int count, rtx basereg,
13691 bool write_back, rtx basemem, HOST_WIDE_INT *offsetp)
13693 rtx mems[MAX_LDM_STM_OPS];
13694 HOST_WIDE_INT offset = *offsetp;
13695 int i;
13697 gcc_assert (count <= MAX_LDM_STM_OPS);
13699 if (GET_CODE (basereg) == PLUS)
13700 basereg = XEXP (basereg, 0);
13702 for (i = 0; i < count; i++)
13704 rtx addr = plus_constant (Pmode, basereg, i * 4);
13705 mems[i] = adjust_automodify_address_nv (basemem, SImode, addr, offset);
13706 offset += 4;
13709 if (write_back)
13710 *offsetp = offset;
13712 if (is_load)
13713 return arm_gen_load_multiple_1 (count, regs, mems, basereg,
13714 write_back ? 4 * count : 0);
13715 else
13716 return arm_gen_store_multiple_1 (count, regs, mems, basereg,
13717 write_back ? 4 * count : 0);
13721 arm_gen_load_multiple (int *regs, int count, rtx basereg, int write_back,
13722 rtx basemem, HOST_WIDE_INT *offsetp)
13724 return arm_gen_multiple_op (TRUE, regs, count, basereg, write_back, basemem,
13725 offsetp);
13729 arm_gen_store_multiple (int *regs, int count, rtx basereg, int write_back,
13730 rtx basemem, HOST_WIDE_INT *offsetp)
13732 return arm_gen_multiple_op (FALSE, regs, count, basereg, write_back, basemem,
13733 offsetp);
13736 /* Called from a peephole2 expander to turn a sequence of loads into an
13737 LDM instruction. OPERANDS are the operands found by the peephole matcher;
13738 NOPS indicates how many separate loads we are trying to combine. SORT_REGS
13739 is true if we can reorder the registers because they are used commutatively
13740 subsequently.
13741 Returns true iff we could generate a new instruction. */
13743 bool
13744 gen_ldm_seq (rtx *operands, int nops, bool sort_regs)
13746 int regs[MAX_LDM_STM_OPS], mem_order[MAX_LDM_STM_OPS];
13747 rtx mems[MAX_LDM_STM_OPS];
13748 int i, j, base_reg;
13749 rtx base_reg_rtx;
13750 HOST_WIDE_INT offset;
13751 int write_back = FALSE;
13752 int ldm_case;
13753 rtx addr;
13755 ldm_case = load_multiple_sequence (operands, nops, regs, mem_order,
13756 &base_reg, &offset, !sort_regs);
13758 if (ldm_case == 0)
13759 return false;
13761 if (sort_regs)
13762 for (i = 0; i < nops - 1; i++)
13763 for (j = i + 1; j < nops; j++)
13764 if (regs[i] > regs[j])
13766 int t = regs[i];
13767 regs[i] = regs[j];
13768 regs[j] = t;
13770 base_reg_rtx = gen_rtx_REG (Pmode, base_reg);
13772 if (TARGET_THUMB1)
13774 gcc_assert (peep2_reg_dead_p (nops, base_reg_rtx));
13775 gcc_assert (ldm_case == 1 || ldm_case == 5);
13776 write_back = TRUE;
13779 if (ldm_case == 5)
13781 rtx newbase = TARGET_THUMB1 ? base_reg_rtx : gen_rtx_REG (SImode, regs[0]);
13782 emit_insn (gen_addsi3 (newbase, base_reg_rtx, GEN_INT (offset)));
13783 offset = 0;
13784 if (!TARGET_THUMB1)
13785 base_reg_rtx = newbase;
13788 for (i = 0; i < nops; i++)
13790 addr = plus_constant (Pmode, base_reg_rtx, offset + i * 4);
13791 mems[i] = adjust_automodify_address_nv (operands[nops + mem_order[i]],
13792 SImode, addr, 0);
13794 emit_insn (arm_gen_load_multiple_1 (nops, regs, mems, base_reg_rtx,
13795 write_back ? offset + i * 4 : 0));
13796 return true;
13799 /* Called from a peephole2 expander to turn a sequence of stores into an
13800 STM instruction. OPERANDS are the operands found by the peephole matcher;
13801 NOPS indicates how many separate stores we are trying to combine.
13802 Returns true iff we could generate a new instruction. */
13804 bool
13805 gen_stm_seq (rtx *operands, int nops)
13807 int i;
13808 int regs[MAX_LDM_STM_OPS], mem_order[MAX_LDM_STM_OPS];
13809 rtx mems[MAX_LDM_STM_OPS];
13810 int base_reg;
13811 rtx base_reg_rtx;
13812 HOST_WIDE_INT offset;
13813 int write_back = FALSE;
13814 int stm_case;
13815 rtx addr;
13816 bool base_reg_dies;
13818 stm_case = store_multiple_sequence (operands, nops, nops, regs, NULL,
13819 mem_order, &base_reg, &offset, true);
13821 if (stm_case == 0)
13822 return false;
13824 base_reg_rtx = gen_rtx_REG (Pmode, base_reg);
13826 base_reg_dies = peep2_reg_dead_p (nops, base_reg_rtx);
13827 if (TARGET_THUMB1)
13829 gcc_assert (base_reg_dies);
13830 write_back = TRUE;
13833 if (stm_case == 5)
13835 gcc_assert (base_reg_dies);
13836 emit_insn (gen_addsi3 (base_reg_rtx, base_reg_rtx, GEN_INT (offset)));
13837 offset = 0;
13840 addr = plus_constant (Pmode, base_reg_rtx, offset);
13842 for (i = 0; i < nops; i++)
13844 addr = plus_constant (Pmode, base_reg_rtx, offset + i * 4);
13845 mems[i] = adjust_automodify_address_nv (operands[nops + mem_order[i]],
13846 SImode, addr, 0);
13848 emit_insn (arm_gen_store_multiple_1 (nops, regs, mems, base_reg_rtx,
13849 write_back ? offset + i * 4 : 0));
13850 return true;
13853 /* Called from a peephole2 expander to turn a sequence of stores that are
13854 preceded by constant loads into an STM instruction. OPERANDS are the
13855 operands found by the peephole matcher; NOPS indicates how many
13856 separate stores we are trying to combine; there are 2 * NOPS
13857 instructions in the peephole.
13858 Returns true iff we could generate a new instruction. */
13860 bool
13861 gen_const_stm_seq (rtx *operands, int nops)
13863 int regs[MAX_LDM_STM_OPS], sorted_regs[MAX_LDM_STM_OPS];
13864 int reg_order[MAX_LDM_STM_OPS], mem_order[MAX_LDM_STM_OPS];
13865 rtx reg_rtxs[MAX_LDM_STM_OPS], orig_reg_rtxs[MAX_LDM_STM_OPS];
13866 rtx mems[MAX_LDM_STM_OPS];
13867 int base_reg;
13868 rtx base_reg_rtx;
13869 HOST_WIDE_INT offset;
13870 int write_back = FALSE;
13871 int stm_case;
13872 rtx addr;
13873 bool base_reg_dies;
13874 int i, j;
13875 HARD_REG_SET allocated;
13877 stm_case = store_multiple_sequence (operands, nops, 2 * nops, regs, reg_rtxs,
13878 mem_order, &base_reg, &offset, false);
13880 if (stm_case == 0)
13881 return false;
13883 memcpy (orig_reg_rtxs, reg_rtxs, sizeof orig_reg_rtxs);
13885 /* If the same register is used more than once, try to find a free
13886 register. */
13887 CLEAR_HARD_REG_SET (allocated);
13888 for (i = 0; i < nops; i++)
13890 for (j = i + 1; j < nops; j++)
13891 if (regs[i] == regs[j])
13893 rtx t = peep2_find_free_register (0, nops * 2,
13894 TARGET_THUMB1 ? "l" : "r",
13895 SImode, &allocated);
13896 if (t == NULL_RTX)
13897 return false;
13898 reg_rtxs[i] = t;
13899 regs[i] = REGNO (t);
13903 /* Compute an ordering that maps the register numbers to an ascending
13904 sequence. */
13905 reg_order[0] = 0;
13906 for (i = 0; i < nops; i++)
13907 if (regs[i] < regs[reg_order[0]])
13908 reg_order[0] = i;
13910 for (i = 1; i < nops; i++)
13912 int this_order = reg_order[i - 1];
13913 for (j = 0; j < nops; j++)
13914 if (regs[j] > regs[reg_order[i - 1]]
13915 && (this_order == reg_order[i - 1]
13916 || regs[j] < regs[this_order]))
13917 this_order = j;
13918 reg_order[i] = this_order;
13921 /* Ensure that registers that must be live after the instruction end
13922 up with the correct value. */
13923 for (i = 0; i < nops; i++)
13925 int this_order = reg_order[i];
13926 if ((this_order != mem_order[i]
13927 || orig_reg_rtxs[this_order] != reg_rtxs[this_order])
13928 && !peep2_reg_dead_p (nops * 2, orig_reg_rtxs[this_order]))
13929 return false;
13932 /* Load the constants. */
13933 for (i = 0; i < nops; i++)
13935 rtx op = operands[2 * nops + mem_order[i]];
13936 sorted_regs[i] = regs[reg_order[i]];
13937 emit_move_insn (reg_rtxs[reg_order[i]], op);
13940 base_reg_rtx = gen_rtx_REG (Pmode, base_reg);
13942 base_reg_dies = peep2_reg_dead_p (nops * 2, base_reg_rtx);
13943 if (TARGET_THUMB1)
13945 gcc_assert (base_reg_dies);
13946 write_back = TRUE;
13949 if (stm_case == 5)
13951 gcc_assert (base_reg_dies);
13952 emit_insn (gen_addsi3 (base_reg_rtx, base_reg_rtx, GEN_INT (offset)));
13953 offset = 0;
13956 addr = plus_constant (Pmode, base_reg_rtx, offset);
13958 for (i = 0; i < nops; i++)
13960 addr = plus_constant (Pmode, base_reg_rtx, offset + i * 4);
13961 mems[i] = adjust_automodify_address_nv (operands[nops + mem_order[i]],
13962 SImode, addr, 0);
13964 emit_insn (arm_gen_store_multiple_1 (nops, sorted_regs, mems, base_reg_rtx,
13965 write_back ? offset + i * 4 : 0));
13966 return true;
13969 /* Copy a block of memory using plain ldr/str/ldrh/strh instructions, to permit
13970 unaligned copies on processors which support unaligned semantics for those
13971 instructions. INTERLEAVE_FACTOR can be used to attempt to hide load latency
13972 (using more registers) by doing e.g. load/load/store/store for a factor of 2.
13973 An interleave factor of 1 (the minimum) will perform no interleaving.
13974 Load/store multiple are used for aligned addresses where possible. */
13976 static void
13977 arm_block_move_unaligned_straight (rtx dstbase, rtx srcbase,
13978 HOST_WIDE_INT length,
13979 unsigned int interleave_factor)
13981 rtx *regs = XALLOCAVEC (rtx, interleave_factor);
13982 int *regnos = XALLOCAVEC (int, interleave_factor);
13983 HOST_WIDE_INT block_size_bytes = interleave_factor * UNITS_PER_WORD;
13984 HOST_WIDE_INT i, j;
13985 HOST_WIDE_INT remaining = length, words;
13986 rtx halfword_tmp = NULL, byte_tmp = NULL;
13987 rtx dst, src;
13988 bool src_aligned = MEM_ALIGN (srcbase) >= BITS_PER_WORD;
13989 bool dst_aligned = MEM_ALIGN (dstbase) >= BITS_PER_WORD;
13990 HOST_WIDE_INT srcoffset, dstoffset;
13991 HOST_WIDE_INT src_autoinc, dst_autoinc;
13992 rtx mem, addr;
13994 gcc_assert (interleave_factor >= 1 && interleave_factor <= 4);
13996 /* Use hard registers if we have aligned source or destination so we can use
13997 load/store multiple with contiguous registers. */
13998 if (dst_aligned || src_aligned)
13999 for (i = 0; i < interleave_factor; i++)
14000 regs[i] = gen_rtx_REG (SImode, i);
14001 else
14002 for (i = 0; i < interleave_factor; i++)
14003 regs[i] = gen_reg_rtx (SImode);
14005 dst = copy_addr_to_reg (XEXP (dstbase, 0));
14006 src = copy_addr_to_reg (XEXP (srcbase, 0));
14008 srcoffset = dstoffset = 0;
14010 /* Calls to arm_gen_load_multiple and arm_gen_store_multiple update SRC/DST.
14011 For copying the last bytes we want to subtract this offset again. */
14012 src_autoinc = dst_autoinc = 0;
14014 for (i = 0; i < interleave_factor; i++)
14015 regnos[i] = i;
14017 /* Copy BLOCK_SIZE_BYTES chunks. */
14019 for (i = 0; i + block_size_bytes <= length; i += block_size_bytes)
14021 /* Load words. */
14022 if (src_aligned && interleave_factor > 1)
14024 emit_insn (arm_gen_load_multiple (regnos, interleave_factor, src,
14025 TRUE, srcbase, &srcoffset));
14026 src_autoinc += UNITS_PER_WORD * interleave_factor;
14028 else
14030 for (j = 0; j < interleave_factor; j++)
14032 addr = plus_constant (Pmode, src, (srcoffset + j * UNITS_PER_WORD
14033 - src_autoinc));
14034 mem = adjust_automodify_address (srcbase, SImode, addr,
14035 srcoffset + j * UNITS_PER_WORD);
14036 emit_insn (gen_unaligned_loadsi (regs[j], mem));
14038 srcoffset += block_size_bytes;
14041 /* Store words. */
14042 if (dst_aligned && interleave_factor > 1)
14044 emit_insn (arm_gen_store_multiple (regnos, interleave_factor, dst,
14045 TRUE, dstbase, &dstoffset));
14046 dst_autoinc += UNITS_PER_WORD * interleave_factor;
14048 else
14050 for (j = 0; j < interleave_factor; j++)
14052 addr = plus_constant (Pmode, dst, (dstoffset + j * UNITS_PER_WORD
14053 - dst_autoinc));
14054 mem = adjust_automodify_address (dstbase, SImode, addr,
14055 dstoffset + j * UNITS_PER_WORD);
14056 emit_insn (gen_unaligned_storesi (mem, regs[j]));
14058 dstoffset += block_size_bytes;
14061 remaining -= block_size_bytes;
14064 /* Copy any whole words left (note these aren't interleaved with any
14065 subsequent halfword/byte load/stores in the interests of simplicity). */
14067 words = remaining / UNITS_PER_WORD;
14069 gcc_assert (words < interleave_factor);
14071 if (src_aligned && words > 1)
14073 emit_insn (arm_gen_load_multiple (regnos, words, src, TRUE, srcbase,
14074 &srcoffset));
14075 src_autoinc += UNITS_PER_WORD * words;
14077 else
14079 for (j = 0; j < words; j++)
14081 addr = plus_constant (Pmode, src,
14082 srcoffset + j * UNITS_PER_WORD - src_autoinc);
14083 mem = adjust_automodify_address (srcbase, SImode, addr,
14084 srcoffset + j * UNITS_PER_WORD);
14085 if (src_aligned)
14086 emit_move_insn (regs[j], mem);
14087 else
14088 emit_insn (gen_unaligned_loadsi (regs[j], mem));
14090 srcoffset += words * UNITS_PER_WORD;
14093 if (dst_aligned && words > 1)
14095 emit_insn (arm_gen_store_multiple (regnos, words, dst, TRUE, dstbase,
14096 &dstoffset));
14097 dst_autoinc += words * UNITS_PER_WORD;
14099 else
14101 for (j = 0; j < words; j++)
14103 addr = plus_constant (Pmode, dst,
14104 dstoffset + j * UNITS_PER_WORD - dst_autoinc);
14105 mem = adjust_automodify_address (dstbase, SImode, addr,
14106 dstoffset + j * UNITS_PER_WORD);
14107 if (dst_aligned)
14108 emit_move_insn (mem, regs[j]);
14109 else
14110 emit_insn (gen_unaligned_storesi (mem, regs[j]));
14112 dstoffset += words * UNITS_PER_WORD;
14115 remaining -= words * UNITS_PER_WORD;
14117 gcc_assert (remaining < 4);
14119 /* Copy a halfword if necessary. */
14121 if (remaining >= 2)
14123 halfword_tmp = gen_reg_rtx (SImode);
14125 addr = plus_constant (Pmode, src, srcoffset - src_autoinc);
14126 mem = adjust_automodify_address (srcbase, HImode, addr, srcoffset);
14127 emit_insn (gen_unaligned_loadhiu (halfword_tmp, mem));
14129 /* Either write out immediately, or delay until we've loaded the last
14130 byte, depending on interleave factor. */
14131 if (interleave_factor == 1)
14133 addr = plus_constant (Pmode, dst, dstoffset - dst_autoinc);
14134 mem = adjust_automodify_address (dstbase, HImode, addr, dstoffset);
14135 emit_insn (gen_unaligned_storehi (mem,
14136 gen_lowpart (HImode, halfword_tmp)));
14137 halfword_tmp = NULL;
14138 dstoffset += 2;
14141 remaining -= 2;
14142 srcoffset += 2;
14145 gcc_assert (remaining < 2);
14147 /* Copy last byte. */
14149 if ((remaining & 1) != 0)
14151 byte_tmp = gen_reg_rtx (SImode);
14153 addr = plus_constant (Pmode, src, srcoffset - src_autoinc);
14154 mem = adjust_automodify_address (srcbase, QImode, addr, srcoffset);
14155 emit_move_insn (gen_lowpart (QImode, byte_tmp), mem);
14157 if (interleave_factor == 1)
14159 addr = plus_constant (Pmode, dst, dstoffset - dst_autoinc);
14160 mem = adjust_automodify_address (dstbase, QImode, addr, dstoffset);
14161 emit_move_insn (mem, gen_lowpart (QImode, byte_tmp));
14162 byte_tmp = NULL;
14163 dstoffset++;
14166 remaining--;
14167 srcoffset++;
14170 /* Store last halfword if we haven't done so already. */
14172 if (halfword_tmp)
14174 addr = plus_constant (Pmode, dst, dstoffset - dst_autoinc);
14175 mem = adjust_automodify_address (dstbase, HImode, addr, dstoffset);
14176 emit_insn (gen_unaligned_storehi (mem,
14177 gen_lowpart (HImode, halfword_tmp)));
14178 dstoffset += 2;
14181 /* Likewise for last byte. */
14183 if (byte_tmp)
14185 addr = plus_constant (Pmode, dst, dstoffset - dst_autoinc);
14186 mem = adjust_automodify_address (dstbase, QImode, addr, dstoffset);
14187 emit_move_insn (mem, gen_lowpart (QImode, byte_tmp));
14188 dstoffset++;
14191 gcc_assert (remaining == 0 && srcoffset == dstoffset);
14194 /* From mips_adjust_block_mem:
14196 Helper function for doing a loop-based block operation on memory
14197 reference MEM. Each iteration of the loop will operate on LENGTH
14198 bytes of MEM.
14200 Create a new base register for use within the loop and point it to
14201 the start of MEM. Create a new memory reference that uses this
14202 register. Store them in *LOOP_REG and *LOOP_MEM respectively. */
14204 static void
14205 arm_adjust_block_mem (rtx mem, HOST_WIDE_INT length, rtx *loop_reg,
14206 rtx *loop_mem)
14208 *loop_reg = copy_addr_to_reg (XEXP (mem, 0));
14210 /* Although the new mem does not refer to a known location,
14211 it does keep up to LENGTH bytes of alignment. */
14212 *loop_mem = change_address (mem, BLKmode, *loop_reg);
14213 set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT));
14216 /* From mips_block_move_loop:
14218 Move LENGTH bytes from SRC to DEST using a loop that moves BYTES_PER_ITER
14219 bytes at a time. LENGTH must be at least BYTES_PER_ITER. Assume that
14220 the memory regions do not overlap. */
14222 static void
14223 arm_block_move_unaligned_loop (rtx dest, rtx src, HOST_WIDE_INT length,
14224 unsigned int interleave_factor,
14225 HOST_WIDE_INT bytes_per_iter)
14227 rtx src_reg, dest_reg, final_src, test;
14228 HOST_WIDE_INT leftover;
14230 leftover = length % bytes_per_iter;
14231 length -= leftover;
14233 /* Create registers and memory references for use within the loop. */
14234 arm_adjust_block_mem (src, bytes_per_iter, &src_reg, &src);
14235 arm_adjust_block_mem (dest, bytes_per_iter, &dest_reg, &dest);
14237 /* Calculate the value that SRC_REG should have after the last iteration of
14238 the loop. */
14239 final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length),
14240 0, 0, OPTAB_WIDEN);
14242 /* Emit the start of the loop. */
14243 rtx_code_label *label = gen_label_rtx ();
14244 emit_label (label);
14246 /* Emit the loop body. */
14247 arm_block_move_unaligned_straight (dest, src, bytes_per_iter,
14248 interleave_factor);
14250 /* Move on to the next block. */
14251 emit_move_insn (src_reg, plus_constant (Pmode, src_reg, bytes_per_iter));
14252 emit_move_insn (dest_reg, plus_constant (Pmode, dest_reg, bytes_per_iter));
14254 /* Emit the loop condition. */
14255 test = gen_rtx_NE (VOIDmode, src_reg, final_src);
14256 emit_jump_insn (gen_cbranchsi4 (test, src_reg, final_src, label));
14258 /* Mop up any left-over bytes. */
14259 if (leftover)
14260 arm_block_move_unaligned_straight (dest, src, leftover, interleave_factor);
14263 /* Emit a block move when either the source or destination is unaligned (not
14264 aligned to a four-byte boundary). This may need further tuning depending on
14265 core type, optimize_size setting, etc. */
14267 static int
14268 arm_movmemqi_unaligned (rtx *operands)
14270 HOST_WIDE_INT length = INTVAL (operands[2]);
14272 if (optimize_size)
14274 bool src_aligned = MEM_ALIGN (operands[1]) >= BITS_PER_WORD;
14275 bool dst_aligned = MEM_ALIGN (operands[0]) >= BITS_PER_WORD;
14276 /* Inlined memcpy using ldr/str/ldrh/strh can be quite big: try to limit
14277 size of code if optimizing for size. We'll use ldm/stm if src_aligned
14278 or dst_aligned though: allow more interleaving in those cases since the
14279 resulting code can be smaller. */
14280 unsigned int interleave_factor = (src_aligned || dst_aligned) ? 2 : 1;
14281 HOST_WIDE_INT bytes_per_iter = (src_aligned || dst_aligned) ? 8 : 4;
14283 if (length > 12)
14284 arm_block_move_unaligned_loop (operands[0], operands[1], length,
14285 interleave_factor, bytes_per_iter);
14286 else
14287 arm_block_move_unaligned_straight (operands[0], operands[1], length,
14288 interleave_factor);
14290 else
14292 /* Note that the loop created by arm_block_move_unaligned_loop may be
14293 subject to loop unrolling, which makes tuning this condition a little
14294 redundant. */
14295 if (length > 32)
14296 arm_block_move_unaligned_loop (operands[0], operands[1], length, 4, 16);
14297 else
14298 arm_block_move_unaligned_straight (operands[0], operands[1], length, 4);
14301 return 1;
14305 arm_gen_movmemqi (rtx *operands)
14307 HOST_WIDE_INT in_words_to_go, out_words_to_go, last_bytes;
14308 HOST_WIDE_INT srcoffset, dstoffset;
14309 rtx src, dst, srcbase, dstbase;
14310 rtx part_bytes_reg = NULL;
14311 rtx mem;
14313 if (!CONST_INT_P (operands[2])
14314 || !CONST_INT_P (operands[3])
14315 || INTVAL (operands[2]) > 64)
14316 return 0;
14318 if (unaligned_access && (INTVAL (operands[3]) & 3) != 0)
14319 return arm_movmemqi_unaligned (operands);
14321 if (INTVAL (operands[3]) & 3)
14322 return 0;
14324 dstbase = operands[0];
14325 srcbase = operands[1];
14327 dst = copy_to_mode_reg (SImode, XEXP (dstbase, 0));
14328 src = copy_to_mode_reg (SImode, XEXP (srcbase, 0));
14330 in_words_to_go = ARM_NUM_INTS (INTVAL (operands[2]));
14331 out_words_to_go = INTVAL (operands[2]) / 4;
14332 last_bytes = INTVAL (operands[2]) & 3;
14333 dstoffset = srcoffset = 0;
14335 if (out_words_to_go != in_words_to_go && ((in_words_to_go - 1) & 3) != 0)
14336 part_bytes_reg = gen_rtx_REG (SImode, (in_words_to_go - 1) & 3);
14338 while (in_words_to_go >= 2)
14340 if (in_words_to_go > 4)
14341 emit_insn (arm_gen_load_multiple (arm_regs_in_sequence, 4, src,
14342 TRUE, srcbase, &srcoffset));
14343 else
14344 emit_insn (arm_gen_load_multiple (arm_regs_in_sequence, in_words_to_go,
14345 src, FALSE, srcbase,
14346 &srcoffset));
14348 if (out_words_to_go)
14350 if (out_words_to_go > 4)
14351 emit_insn (arm_gen_store_multiple (arm_regs_in_sequence, 4, dst,
14352 TRUE, dstbase, &dstoffset));
14353 else if (out_words_to_go != 1)
14354 emit_insn (arm_gen_store_multiple (arm_regs_in_sequence,
14355 out_words_to_go, dst,
14356 (last_bytes == 0
14357 ? FALSE : TRUE),
14358 dstbase, &dstoffset));
14359 else
14361 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
14362 emit_move_insn (mem, gen_rtx_REG (SImode, R0_REGNUM));
14363 if (last_bytes != 0)
14365 emit_insn (gen_addsi3 (dst, dst, GEN_INT (4)));
14366 dstoffset += 4;
14371 in_words_to_go -= in_words_to_go < 4 ? in_words_to_go : 4;
14372 out_words_to_go -= out_words_to_go < 4 ? out_words_to_go : 4;
14375 /* OUT_WORDS_TO_GO will be zero here if there are byte stores to do. */
14376 if (out_words_to_go)
14378 rtx sreg;
14380 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
14381 sreg = copy_to_reg (mem);
14383 mem = adjust_automodify_address (dstbase, SImode, dst, dstoffset);
14384 emit_move_insn (mem, sreg);
14385 in_words_to_go--;
14387 gcc_assert (!in_words_to_go); /* Sanity check */
14390 if (in_words_to_go)
14392 gcc_assert (in_words_to_go > 0);
14394 mem = adjust_automodify_address (srcbase, SImode, src, srcoffset);
14395 part_bytes_reg = copy_to_mode_reg (SImode, mem);
14398 gcc_assert (!last_bytes || part_bytes_reg);
14400 if (BYTES_BIG_ENDIAN && last_bytes)
14402 rtx tmp = gen_reg_rtx (SImode);
14404 /* The bytes we want are in the top end of the word. */
14405 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg,
14406 GEN_INT (8 * (4 - last_bytes))));
14407 part_bytes_reg = tmp;
14409 while (last_bytes)
14411 mem = adjust_automodify_address (dstbase, QImode,
14412 plus_constant (Pmode, dst,
14413 last_bytes - 1),
14414 dstoffset + last_bytes - 1);
14415 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
14417 if (--last_bytes)
14419 tmp = gen_reg_rtx (SImode);
14420 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (8)));
14421 part_bytes_reg = tmp;
14426 else
14428 if (last_bytes > 1)
14430 mem = adjust_automodify_address (dstbase, HImode, dst, dstoffset);
14431 emit_move_insn (mem, gen_lowpart (HImode, part_bytes_reg));
14432 last_bytes -= 2;
14433 if (last_bytes)
14435 rtx tmp = gen_reg_rtx (SImode);
14436 emit_insn (gen_addsi3 (dst, dst, const2_rtx));
14437 emit_insn (gen_lshrsi3 (tmp, part_bytes_reg, GEN_INT (16)));
14438 part_bytes_reg = tmp;
14439 dstoffset += 2;
14443 if (last_bytes)
14445 mem = adjust_automodify_address (dstbase, QImode, dst, dstoffset);
14446 emit_move_insn (mem, gen_lowpart (QImode, part_bytes_reg));
14450 return 1;
14453 /* Helper for gen_movmem_ldrd_strd. Increase the address of memory rtx
14454 by mode size. */
14455 inline static rtx
14456 next_consecutive_mem (rtx mem)
14458 machine_mode mode = GET_MODE (mem);
14459 HOST_WIDE_INT offset = GET_MODE_SIZE (mode);
14460 rtx addr = plus_constant (Pmode, XEXP (mem, 0), offset);
14462 return adjust_automodify_address (mem, mode, addr, offset);
14465 /* Copy using LDRD/STRD instructions whenever possible.
14466 Returns true upon success. */
14467 bool
14468 gen_movmem_ldrd_strd (rtx *operands)
14470 unsigned HOST_WIDE_INT len;
14471 HOST_WIDE_INT align;
14472 rtx src, dst, base;
14473 rtx reg0;
14474 bool src_aligned, dst_aligned;
14475 bool src_volatile, dst_volatile;
14477 gcc_assert (CONST_INT_P (operands[2]));
14478 gcc_assert (CONST_INT_P (operands[3]));
14480 len = UINTVAL (operands[2]);
14481 if (len > 64)
14482 return false;
14484 /* Maximum alignment we can assume for both src and dst buffers. */
14485 align = INTVAL (operands[3]);
14487 if ((!unaligned_access) && (len >= 4) && ((align & 3) != 0))
14488 return false;
14490 /* Place src and dst addresses in registers
14491 and update the corresponding mem rtx. */
14492 dst = operands[0];
14493 dst_volatile = MEM_VOLATILE_P (dst);
14494 dst_aligned = MEM_ALIGN (dst) >= BITS_PER_WORD;
14495 base = copy_to_mode_reg (SImode, XEXP (dst, 0));
14496 dst = adjust_automodify_address (dst, VOIDmode, base, 0);
14498 src = operands[1];
14499 src_volatile = MEM_VOLATILE_P (src);
14500 src_aligned = MEM_ALIGN (src) >= BITS_PER_WORD;
14501 base = copy_to_mode_reg (SImode, XEXP (src, 0));
14502 src = adjust_automodify_address (src, VOIDmode, base, 0);
14504 if (!unaligned_access && !(src_aligned && dst_aligned))
14505 return false;
14507 if (src_volatile || dst_volatile)
14508 return false;
14510 /* If we cannot generate any LDRD/STRD, try to generate LDM/STM. */
14511 if (!(dst_aligned || src_aligned))
14512 return arm_gen_movmemqi (operands);
14514 /* If the either src or dst is unaligned we'll be accessing it as pairs
14515 of unaligned SImode accesses. Otherwise we can generate DImode
14516 ldrd/strd instructions. */
14517 src = adjust_address (src, src_aligned ? DImode : SImode, 0);
14518 dst = adjust_address (dst, dst_aligned ? DImode : SImode, 0);
14520 while (len >= 8)
14522 len -= 8;
14523 reg0 = gen_reg_rtx (DImode);
14524 rtx low_reg = NULL_RTX;
14525 rtx hi_reg = NULL_RTX;
14527 if (!src_aligned || !dst_aligned)
14529 low_reg = gen_lowpart (SImode, reg0);
14530 hi_reg = gen_highpart_mode (SImode, DImode, reg0);
14532 if (src_aligned)
14533 emit_move_insn (reg0, src);
14534 else
14536 emit_insn (gen_unaligned_loadsi (low_reg, src));
14537 src = next_consecutive_mem (src);
14538 emit_insn (gen_unaligned_loadsi (hi_reg, src));
14541 if (dst_aligned)
14542 emit_move_insn (dst, reg0);
14543 else
14545 emit_insn (gen_unaligned_storesi (dst, low_reg));
14546 dst = next_consecutive_mem (dst);
14547 emit_insn (gen_unaligned_storesi (dst, hi_reg));
14550 src = next_consecutive_mem (src);
14551 dst = next_consecutive_mem (dst);
14554 gcc_assert (len < 8);
14555 if (len >= 4)
14557 /* More than a word but less than a double-word to copy. Copy a word. */
14558 reg0 = gen_reg_rtx (SImode);
14559 src = adjust_address (src, SImode, 0);
14560 dst = adjust_address (dst, SImode, 0);
14561 if (src_aligned)
14562 emit_move_insn (reg0, src);
14563 else
14564 emit_insn (gen_unaligned_loadsi (reg0, src));
14566 if (dst_aligned)
14567 emit_move_insn (dst, reg0);
14568 else
14569 emit_insn (gen_unaligned_storesi (dst, reg0));
14571 src = next_consecutive_mem (src);
14572 dst = next_consecutive_mem (dst);
14573 len -= 4;
14576 if (len == 0)
14577 return true;
14579 /* Copy the remaining bytes. */
14580 if (len >= 2)
14582 dst = adjust_address (dst, HImode, 0);
14583 src = adjust_address (src, HImode, 0);
14584 reg0 = gen_reg_rtx (SImode);
14585 if (src_aligned)
14586 emit_insn (gen_zero_extendhisi2 (reg0, src));
14587 else
14588 emit_insn (gen_unaligned_loadhiu (reg0, src));
14590 if (dst_aligned)
14591 emit_insn (gen_movhi (dst, gen_lowpart(HImode, reg0)));
14592 else
14593 emit_insn (gen_unaligned_storehi (dst, gen_lowpart (HImode, reg0)));
14595 src = next_consecutive_mem (src);
14596 dst = next_consecutive_mem (dst);
14597 if (len == 2)
14598 return true;
14601 dst = adjust_address (dst, QImode, 0);
14602 src = adjust_address (src, QImode, 0);
14603 reg0 = gen_reg_rtx (QImode);
14604 emit_move_insn (reg0, src);
14605 emit_move_insn (dst, reg0);
14606 return true;
14609 /* Select a dominance comparison mode if possible for a test of the general
14610 form (OP (COND_OR (X) (Y)) (const_int 0)). We support three forms.
14611 COND_OR == DOM_CC_X_AND_Y => (X && Y)
14612 COND_OR == DOM_CC_NX_OR_Y => ((! X) || Y)
14613 COND_OR == DOM_CC_X_OR_Y => (X || Y)
14614 In all cases OP will be either EQ or NE, but we don't need to know which
14615 here. If we are unable to support a dominance comparison we return
14616 CC mode. This will then fail to match for the RTL expressions that
14617 generate this call. */
14618 machine_mode
14619 arm_select_dominance_cc_mode (rtx x, rtx y, HOST_WIDE_INT cond_or)
14621 enum rtx_code cond1, cond2;
14622 int swapped = 0;
14624 /* Currently we will probably get the wrong result if the individual
14625 comparisons are not simple. This also ensures that it is safe to
14626 reverse a comparison if necessary. */
14627 if ((arm_select_cc_mode (cond1 = GET_CODE (x), XEXP (x, 0), XEXP (x, 1))
14628 != CCmode)
14629 || (arm_select_cc_mode (cond2 = GET_CODE (y), XEXP (y, 0), XEXP (y, 1))
14630 != CCmode))
14631 return CCmode;
14633 /* The if_then_else variant of this tests the second condition if the
14634 first passes, but is true if the first fails. Reverse the first
14635 condition to get a true "inclusive-or" expression. */
14636 if (cond_or == DOM_CC_NX_OR_Y)
14637 cond1 = reverse_condition (cond1);
14639 /* If the comparisons are not equal, and one doesn't dominate the other,
14640 then we can't do this. */
14641 if (cond1 != cond2
14642 && !comparison_dominates_p (cond1, cond2)
14643 && (swapped = 1, !comparison_dominates_p (cond2, cond1)))
14644 return CCmode;
14646 if (swapped)
14647 std::swap (cond1, cond2);
14649 switch (cond1)
14651 case EQ:
14652 if (cond_or == DOM_CC_X_AND_Y)
14653 return CC_DEQmode;
14655 switch (cond2)
14657 case EQ: return CC_DEQmode;
14658 case LE: return CC_DLEmode;
14659 case LEU: return CC_DLEUmode;
14660 case GE: return CC_DGEmode;
14661 case GEU: return CC_DGEUmode;
14662 default: gcc_unreachable ();
14665 case LT:
14666 if (cond_or == DOM_CC_X_AND_Y)
14667 return CC_DLTmode;
14669 switch (cond2)
14671 case LT:
14672 return CC_DLTmode;
14673 case LE:
14674 return CC_DLEmode;
14675 case NE:
14676 return CC_DNEmode;
14677 default:
14678 gcc_unreachable ();
14681 case GT:
14682 if (cond_or == DOM_CC_X_AND_Y)
14683 return CC_DGTmode;
14685 switch (cond2)
14687 case GT:
14688 return CC_DGTmode;
14689 case GE:
14690 return CC_DGEmode;
14691 case NE:
14692 return CC_DNEmode;
14693 default:
14694 gcc_unreachable ();
14697 case LTU:
14698 if (cond_or == DOM_CC_X_AND_Y)
14699 return CC_DLTUmode;
14701 switch (cond2)
14703 case LTU:
14704 return CC_DLTUmode;
14705 case LEU:
14706 return CC_DLEUmode;
14707 case NE:
14708 return CC_DNEmode;
14709 default:
14710 gcc_unreachable ();
14713 case GTU:
14714 if (cond_or == DOM_CC_X_AND_Y)
14715 return CC_DGTUmode;
14717 switch (cond2)
14719 case GTU:
14720 return CC_DGTUmode;
14721 case GEU:
14722 return CC_DGEUmode;
14723 case NE:
14724 return CC_DNEmode;
14725 default:
14726 gcc_unreachable ();
14729 /* The remaining cases only occur when both comparisons are the
14730 same. */
14731 case NE:
14732 gcc_assert (cond1 == cond2);
14733 return CC_DNEmode;
14735 case LE:
14736 gcc_assert (cond1 == cond2);
14737 return CC_DLEmode;
14739 case GE:
14740 gcc_assert (cond1 == cond2);
14741 return CC_DGEmode;
14743 case LEU:
14744 gcc_assert (cond1 == cond2);
14745 return CC_DLEUmode;
14747 case GEU:
14748 gcc_assert (cond1 == cond2);
14749 return CC_DGEUmode;
14751 default:
14752 gcc_unreachable ();
14756 machine_mode
14757 arm_select_cc_mode (enum rtx_code op, rtx x, rtx y)
14759 /* All floating point compares return CCFP if it is an equality
14760 comparison, and CCFPE otherwise. */
14761 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
14763 switch (op)
14765 case EQ:
14766 case NE:
14767 case UNORDERED:
14768 case ORDERED:
14769 case UNLT:
14770 case UNLE:
14771 case UNGT:
14772 case UNGE:
14773 case UNEQ:
14774 case LTGT:
14775 return CCFPmode;
14777 case LT:
14778 case LE:
14779 case GT:
14780 case GE:
14781 return CCFPEmode;
14783 default:
14784 gcc_unreachable ();
14788 /* A compare with a shifted operand. Because of canonicalization, the
14789 comparison will have to be swapped when we emit the assembler. */
14790 if (GET_MODE (y) == SImode
14791 && (REG_P (y) || (GET_CODE (y) == SUBREG))
14792 && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
14793 || GET_CODE (x) == LSHIFTRT || GET_CODE (x) == ROTATE
14794 || GET_CODE (x) == ROTATERT))
14795 return CC_SWPmode;
14797 /* This operation is performed swapped, but since we only rely on the Z
14798 flag we don't need an additional mode. */
14799 if (GET_MODE (y) == SImode
14800 && (REG_P (y) || (GET_CODE (y) == SUBREG))
14801 && GET_CODE (x) == NEG
14802 && (op == EQ || op == NE))
14803 return CC_Zmode;
14805 /* This is a special case that is used by combine to allow a
14806 comparison of a shifted byte load to be split into a zero-extend
14807 followed by a comparison of the shifted integer (only valid for
14808 equalities and unsigned inequalities). */
14809 if (GET_MODE (x) == SImode
14810 && GET_CODE (x) == ASHIFT
14811 && CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) == 24
14812 && GET_CODE (XEXP (x, 0)) == SUBREG
14813 && MEM_P (SUBREG_REG (XEXP (x, 0)))
14814 && GET_MODE (SUBREG_REG (XEXP (x, 0))) == QImode
14815 && (op == EQ || op == NE
14816 || op == GEU || op == GTU || op == LTU || op == LEU)
14817 && CONST_INT_P (y))
14818 return CC_Zmode;
14820 /* A construct for a conditional compare, if the false arm contains
14821 0, then both conditions must be true, otherwise either condition
14822 must be true. Not all conditions are possible, so CCmode is
14823 returned if it can't be done. */
14824 if (GET_CODE (x) == IF_THEN_ELSE
14825 && (XEXP (x, 2) == const0_rtx
14826 || XEXP (x, 2) == const1_rtx)
14827 && COMPARISON_P (XEXP (x, 0))
14828 && COMPARISON_P (XEXP (x, 1)))
14829 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
14830 INTVAL (XEXP (x, 2)));
14832 /* Alternate canonicalizations of the above. These are somewhat cleaner. */
14833 if (GET_CODE (x) == AND
14834 && (op == EQ || op == NE)
14835 && COMPARISON_P (XEXP (x, 0))
14836 && COMPARISON_P (XEXP (x, 1)))
14837 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
14838 DOM_CC_X_AND_Y);
14840 if (GET_CODE (x) == IOR
14841 && (op == EQ || op == NE)
14842 && COMPARISON_P (XEXP (x, 0))
14843 && COMPARISON_P (XEXP (x, 1)))
14844 return arm_select_dominance_cc_mode (XEXP (x, 0), XEXP (x, 1),
14845 DOM_CC_X_OR_Y);
14847 /* An operation (on Thumb) where we want to test for a single bit.
14848 This is done by shifting that bit up into the top bit of a
14849 scratch register; we can then branch on the sign bit. */
14850 if (TARGET_THUMB1
14851 && GET_MODE (x) == SImode
14852 && (op == EQ || op == NE)
14853 && GET_CODE (x) == ZERO_EXTRACT
14854 && XEXP (x, 1) == const1_rtx)
14855 return CC_Nmode;
14857 /* An operation that sets the condition codes as a side-effect, the
14858 V flag is not set correctly, so we can only use comparisons where
14859 this doesn't matter. (For LT and GE we can use "mi" and "pl"
14860 instead.) */
14861 /* ??? Does the ZERO_EXTRACT case really apply to thumb2? */
14862 if (GET_MODE (x) == SImode
14863 && y == const0_rtx
14864 && (op == EQ || op == NE || op == LT || op == GE)
14865 && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS
14866 || GET_CODE (x) == AND || GET_CODE (x) == IOR
14867 || GET_CODE (x) == XOR || GET_CODE (x) == MULT
14868 || GET_CODE (x) == NOT || GET_CODE (x) == NEG
14869 || GET_CODE (x) == LSHIFTRT
14870 || GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
14871 || GET_CODE (x) == ROTATERT
14872 || (TARGET_32BIT && GET_CODE (x) == ZERO_EXTRACT)))
14873 return CC_NOOVmode;
14875 if (GET_MODE (x) == QImode && (op == EQ || op == NE))
14876 return CC_Zmode;
14878 if (GET_MODE (x) == SImode && (op == LTU || op == GEU)
14879 && GET_CODE (x) == PLUS
14880 && (rtx_equal_p (XEXP (x, 0), y) || rtx_equal_p (XEXP (x, 1), y)))
14881 return CC_Cmode;
14883 if (GET_MODE (x) == DImode || GET_MODE (y) == DImode)
14885 switch (op)
14887 case EQ:
14888 case NE:
14889 /* A DImode comparison against zero can be implemented by
14890 or'ing the two halves together. */
14891 if (y == const0_rtx)
14892 return CC_Zmode;
14894 /* We can do an equality test in three Thumb instructions. */
14895 if (!TARGET_32BIT)
14896 return CC_Zmode;
14898 /* FALLTHROUGH */
14900 case LTU:
14901 case LEU:
14902 case GTU:
14903 case GEU:
14904 /* DImode unsigned comparisons can be implemented by cmp +
14905 cmpeq without a scratch register. Not worth doing in
14906 Thumb-2. */
14907 if (TARGET_32BIT)
14908 return CC_CZmode;
14910 /* FALLTHROUGH */
14912 case LT:
14913 case LE:
14914 case GT:
14915 case GE:
14916 /* DImode signed and unsigned comparisons can be implemented
14917 by cmp + sbcs with a scratch register, but that does not
14918 set the Z flag - we must reverse GT/LE/GTU/LEU. */
14919 gcc_assert (op != EQ && op != NE);
14920 return CC_NCVmode;
14922 default:
14923 gcc_unreachable ();
14927 if (GET_MODE_CLASS (GET_MODE (x)) == MODE_CC)
14928 return GET_MODE (x);
14930 return CCmode;
14933 /* X and Y are two things to compare using CODE. Emit the compare insn and
14934 return the rtx for register 0 in the proper mode. FP means this is a
14935 floating point compare: I don't think that it is needed on the arm. */
14937 arm_gen_compare_reg (enum rtx_code code, rtx x, rtx y, rtx scratch)
14939 machine_mode mode;
14940 rtx cc_reg;
14941 int dimode_comparison = GET_MODE (x) == DImode || GET_MODE (y) == DImode;
14943 /* We might have X as a constant, Y as a register because of the predicates
14944 used for cmpdi. If so, force X to a register here. */
14945 if (dimode_comparison && !REG_P (x))
14946 x = force_reg (DImode, x);
14948 mode = SELECT_CC_MODE (code, x, y);
14949 cc_reg = gen_rtx_REG (mode, CC_REGNUM);
14951 if (dimode_comparison
14952 && mode != CC_CZmode)
14954 rtx clobber, set;
14956 /* To compare two non-zero values for equality, XOR them and
14957 then compare against zero. Not used for ARM mode; there
14958 CC_CZmode is cheaper. */
14959 if (mode == CC_Zmode && y != const0_rtx)
14961 gcc_assert (!reload_completed);
14962 x = expand_binop (DImode, xor_optab, x, y, NULL_RTX, 0, OPTAB_WIDEN);
14963 y = const0_rtx;
14966 /* A scratch register is required. */
14967 if (reload_completed)
14968 gcc_assert (scratch != NULL && GET_MODE (scratch) == SImode);
14969 else
14970 scratch = gen_rtx_SCRATCH (SImode);
14972 clobber = gen_rtx_CLOBBER (VOIDmode, scratch);
14973 set = gen_rtx_SET (cc_reg, gen_rtx_COMPARE (mode, x, y));
14974 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber)));
14976 else
14977 emit_set_insn (cc_reg, gen_rtx_COMPARE (mode, x, y));
14979 return cc_reg;
14982 /* Generate a sequence of insns that will generate the correct return
14983 address mask depending on the physical architecture that the program
14984 is running on. */
14986 arm_gen_return_addr_mask (void)
14988 rtx reg = gen_reg_rtx (Pmode);
14990 emit_insn (gen_return_addr_mask (reg));
14991 return reg;
14994 void
14995 arm_reload_in_hi (rtx *operands)
14997 rtx ref = operands[1];
14998 rtx base, scratch;
14999 HOST_WIDE_INT offset = 0;
15001 if (GET_CODE (ref) == SUBREG)
15003 offset = SUBREG_BYTE (ref);
15004 ref = SUBREG_REG (ref);
15007 if (REG_P (ref))
15009 /* We have a pseudo which has been spilt onto the stack; there
15010 are two cases here: the first where there is a simple
15011 stack-slot replacement and a second where the stack-slot is
15012 out of range, or is used as a subreg. */
15013 if (reg_equiv_mem (REGNO (ref)))
15015 ref = reg_equiv_mem (REGNO (ref));
15016 base = find_replacement (&XEXP (ref, 0));
15018 else
15019 /* The slot is out of range, or was dressed up in a SUBREG. */
15020 base = reg_equiv_address (REGNO (ref));
15022 /* PR 62554: If there is no equivalent memory location then just move
15023 the value as an SImode register move. This happens when the target
15024 architecture variant does not have an HImode register move. */
15025 if (base == NULL)
15027 gcc_assert (REG_P (operands[0]));
15028 emit_insn (gen_movsi (gen_rtx_SUBREG (SImode, operands[0], 0),
15029 gen_rtx_SUBREG (SImode, ref, 0)));
15030 return;
15033 else
15034 base = find_replacement (&XEXP (ref, 0));
15036 /* Handle the case where the address is too complex to be offset by 1. */
15037 if (GET_CODE (base) == MINUS
15038 || (GET_CODE (base) == PLUS && !CONST_INT_P (XEXP (base, 1))))
15040 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
15042 emit_set_insn (base_plus, base);
15043 base = base_plus;
15045 else if (GET_CODE (base) == PLUS)
15047 /* The addend must be CONST_INT, or we would have dealt with it above. */
15048 HOST_WIDE_INT hi, lo;
15050 offset += INTVAL (XEXP (base, 1));
15051 base = XEXP (base, 0);
15053 /* Rework the address into a legal sequence of insns. */
15054 /* Valid range for lo is -4095 -> 4095 */
15055 lo = (offset >= 0
15056 ? (offset & 0xfff)
15057 : -((-offset) & 0xfff));
15059 /* Corner case, if lo is the max offset then we would be out of range
15060 once we have added the additional 1 below, so bump the msb into the
15061 pre-loading insn(s). */
15062 if (lo == 4095)
15063 lo &= 0x7ff;
15065 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
15066 ^ (HOST_WIDE_INT) 0x80000000)
15067 - (HOST_WIDE_INT) 0x80000000);
15069 gcc_assert (hi + lo == offset);
15071 if (hi != 0)
15073 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
15075 /* Get the base address; addsi3 knows how to handle constants
15076 that require more than one insn. */
15077 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
15078 base = base_plus;
15079 offset = lo;
15083 /* Operands[2] may overlap operands[0] (though it won't overlap
15084 operands[1]), that's why we asked for a DImode reg -- so we can
15085 use the bit that does not overlap. */
15086 if (REGNO (operands[2]) == REGNO (operands[0]))
15087 scratch = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
15088 else
15089 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
15091 emit_insn (gen_zero_extendqisi2 (scratch,
15092 gen_rtx_MEM (QImode,
15093 plus_constant (Pmode, base,
15094 offset))));
15095 emit_insn (gen_zero_extendqisi2 (gen_rtx_SUBREG (SImode, operands[0], 0),
15096 gen_rtx_MEM (QImode,
15097 plus_constant (Pmode, base,
15098 offset + 1))));
15099 if (!BYTES_BIG_ENDIAN)
15100 emit_set_insn (gen_rtx_SUBREG (SImode, operands[0], 0),
15101 gen_rtx_IOR (SImode,
15102 gen_rtx_ASHIFT
15103 (SImode,
15104 gen_rtx_SUBREG (SImode, operands[0], 0),
15105 GEN_INT (8)),
15106 scratch));
15107 else
15108 emit_set_insn (gen_rtx_SUBREG (SImode, operands[0], 0),
15109 gen_rtx_IOR (SImode,
15110 gen_rtx_ASHIFT (SImode, scratch,
15111 GEN_INT (8)),
15112 gen_rtx_SUBREG (SImode, operands[0], 0)));
15115 /* Handle storing a half-word to memory during reload by synthesizing as two
15116 byte stores. Take care not to clobber the input values until after we
15117 have moved them somewhere safe. This code assumes that if the DImode
15118 scratch in operands[2] overlaps either the input value or output address
15119 in some way, then that value must die in this insn (we absolutely need
15120 two scratch registers for some corner cases). */
15121 void
15122 arm_reload_out_hi (rtx *operands)
15124 rtx ref = operands[0];
15125 rtx outval = operands[1];
15126 rtx base, scratch;
15127 HOST_WIDE_INT offset = 0;
15129 if (GET_CODE (ref) == SUBREG)
15131 offset = SUBREG_BYTE (ref);
15132 ref = SUBREG_REG (ref);
15135 if (REG_P (ref))
15137 /* We have a pseudo which has been spilt onto the stack; there
15138 are two cases here: the first where there is a simple
15139 stack-slot replacement and a second where the stack-slot is
15140 out of range, or is used as a subreg. */
15141 if (reg_equiv_mem (REGNO (ref)))
15143 ref = reg_equiv_mem (REGNO (ref));
15144 base = find_replacement (&XEXP (ref, 0));
15146 else
15147 /* The slot is out of range, or was dressed up in a SUBREG. */
15148 base = reg_equiv_address (REGNO (ref));
15150 /* PR 62254: If there is no equivalent memory location then just move
15151 the value as an SImode register move. This happens when the target
15152 architecture variant does not have an HImode register move. */
15153 if (base == NULL)
15155 gcc_assert (REG_P (outval) || SUBREG_P (outval));
15157 if (REG_P (outval))
15159 emit_insn (gen_movsi (gen_rtx_SUBREG (SImode, ref, 0),
15160 gen_rtx_SUBREG (SImode, outval, 0)));
15162 else /* SUBREG_P (outval) */
15164 if (GET_MODE (SUBREG_REG (outval)) == SImode)
15165 emit_insn (gen_movsi (gen_rtx_SUBREG (SImode, ref, 0),
15166 SUBREG_REG (outval)));
15167 else
15168 /* FIXME: Handle other cases ? */
15169 gcc_unreachable ();
15171 return;
15174 else
15175 base = find_replacement (&XEXP (ref, 0));
15177 scratch = gen_rtx_REG (SImode, REGNO (operands[2]));
15179 /* Handle the case where the address is too complex to be offset by 1. */
15180 if (GET_CODE (base) == MINUS
15181 || (GET_CODE (base) == PLUS && !CONST_INT_P (XEXP (base, 1))))
15183 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
15185 /* Be careful not to destroy OUTVAL. */
15186 if (reg_overlap_mentioned_p (base_plus, outval))
15188 /* Updating base_plus might destroy outval, see if we can
15189 swap the scratch and base_plus. */
15190 if (!reg_overlap_mentioned_p (scratch, outval))
15191 std::swap (scratch, base_plus);
15192 else
15194 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
15196 /* Be conservative and copy OUTVAL into the scratch now,
15197 this should only be necessary if outval is a subreg
15198 of something larger than a word. */
15199 /* XXX Might this clobber base? I can't see how it can,
15200 since scratch is known to overlap with OUTVAL, and
15201 must be wider than a word. */
15202 emit_insn (gen_movhi (scratch_hi, outval));
15203 outval = scratch_hi;
15207 emit_set_insn (base_plus, base);
15208 base = base_plus;
15210 else if (GET_CODE (base) == PLUS)
15212 /* The addend must be CONST_INT, or we would have dealt with it above. */
15213 HOST_WIDE_INT hi, lo;
15215 offset += INTVAL (XEXP (base, 1));
15216 base = XEXP (base, 0);
15218 /* Rework the address into a legal sequence of insns. */
15219 /* Valid range for lo is -4095 -> 4095 */
15220 lo = (offset >= 0
15221 ? (offset & 0xfff)
15222 : -((-offset) & 0xfff));
15224 /* Corner case, if lo is the max offset then we would be out of range
15225 once we have added the additional 1 below, so bump the msb into the
15226 pre-loading insn(s). */
15227 if (lo == 4095)
15228 lo &= 0x7ff;
15230 hi = ((((offset - lo) & (HOST_WIDE_INT) 0xffffffff)
15231 ^ (HOST_WIDE_INT) 0x80000000)
15232 - (HOST_WIDE_INT) 0x80000000);
15234 gcc_assert (hi + lo == offset);
15236 if (hi != 0)
15238 rtx base_plus = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
15240 /* Be careful not to destroy OUTVAL. */
15241 if (reg_overlap_mentioned_p (base_plus, outval))
15243 /* Updating base_plus might destroy outval, see if we
15244 can swap the scratch and base_plus. */
15245 if (!reg_overlap_mentioned_p (scratch, outval))
15246 std::swap (scratch, base_plus);
15247 else
15249 rtx scratch_hi = gen_rtx_REG (HImode, REGNO (operands[2]));
15251 /* Be conservative and copy outval into scratch now,
15252 this should only be necessary if outval is a
15253 subreg of something larger than a word. */
15254 /* XXX Might this clobber base? I can't see how it
15255 can, since scratch is known to overlap with
15256 outval. */
15257 emit_insn (gen_movhi (scratch_hi, outval));
15258 outval = scratch_hi;
15262 /* Get the base address; addsi3 knows how to handle constants
15263 that require more than one insn. */
15264 emit_insn (gen_addsi3 (base_plus, base, GEN_INT (hi)));
15265 base = base_plus;
15266 offset = lo;
15270 if (BYTES_BIG_ENDIAN)
15272 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
15273 plus_constant (Pmode, base,
15274 offset + 1)),
15275 gen_lowpart (QImode, outval)));
15276 emit_insn (gen_lshrsi3 (scratch,
15277 gen_rtx_SUBREG (SImode, outval, 0),
15278 GEN_INT (8)));
15279 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (Pmode, base,
15280 offset)),
15281 gen_lowpart (QImode, scratch)));
15283 else
15285 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (Pmode, base,
15286 offset)),
15287 gen_lowpart (QImode, outval)));
15288 emit_insn (gen_lshrsi3 (scratch,
15289 gen_rtx_SUBREG (SImode, outval, 0),
15290 GEN_INT (8)));
15291 emit_insn (gen_movqi (gen_rtx_MEM (QImode,
15292 plus_constant (Pmode, base,
15293 offset + 1)),
15294 gen_lowpart (QImode, scratch)));
15298 /* Return true if a type must be passed in memory. For AAPCS, small aggregates
15299 (padded to the size of a word) should be passed in a register. */
15301 static bool
15302 arm_must_pass_in_stack (machine_mode mode, const_tree type)
15304 if (TARGET_AAPCS_BASED)
15305 return must_pass_in_stack_var_size (mode, type);
15306 else
15307 return must_pass_in_stack_var_size_or_pad (mode, type);
15311 /* Implement TARGET_FUNCTION_ARG_PADDING; return PAD_UPWARD if the lowest
15312 byte of a stack argument has useful data. For legacy APCS ABIs we use
15313 the default. For AAPCS based ABIs small aggregate types are placed
15314 in the lowest memory address. */
15316 static pad_direction
15317 arm_function_arg_padding (machine_mode mode, const_tree type)
15319 if (!TARGET_AAPCS_BASED)
15320 return default_function_arg_padding (mode, type);
15322 if (type && BYTES_BIG_ENDIAN && INTEGRAL_TYPE_P (type))
15323 return PAD_DOWNWARD;
15325 return PAD_UPWARD;
15329 /* Similarly, for use by BLOCK_REG_PADDING (MODE, TYPE, FIRST).
15330 Return !BYTES_BIG_ENDIAN if the least significant byte of the
15331 register has useful data, and return the opposite if the most
15332 significant byte does. */
15334 bool
15335 arm_pad_reg_upward (machine_mode mode,
15336 tree type, int first ATTRIBUTE_UNUSED)
15338 if (TARGET_AAPCS_BASED && BYTES_BIG_ENDIAN)
15340 /* For AAPCS, small aggregates, small fixed-point types,
15341 and small complex types are always padded upwards. */
15342 if (type)
15344 if ((AGGREGATE_TYPE_P (type)
15345 || TREE_CODE (type) == COMPLEX_TYPE
15346 || FIXED_POINT_TYPE_P (type))
15347 && int_size_in_bytes (type) <= 4)
15348 return true;
15350 else
15352 if ((COMPLEX_MODE_P (mode) || ALL_FIXED_POINT_MODE_P (mode))
15353 && GET_MODE_SIZE (mode) <= 4)
15354 return true;
15358 /* Otherwise, use default padding. */
15359 return !BYTES_BIG_ENDIAN;
15362 /* Returns true iff OFFSET is valid for use in an LDRD/STRD instruction,
15363 assuming that the address in the base register is word aligned. */
15364 bool
15365 offset_ok_for_ldrd_strd (HOST_WIDE_INT offset)
15367 HOST_WIDE_INT max_offset;
15369 /* Offset must be a multiple of 4 in Thumb mode. */
15370 if (TARGET_THUMB2 && ((offset & 3) != 0))
15371 return false;
15373 if (TARGET_THUMB2)
15374 max_offset = 1020;
15375 else if (TARGET_ARM)
15376 max_offset = 255;
15377 else
15378 return false;
15380 return ((offset <= max_offset) && (offset >= -max_offset));
15383 /* Checks whether the operands are valid for use in an LDRD/STRD instruction.
15384 Assumes that RT, RT2, and RN are REG. This is guaranteed by the patterns.
15385 Assumes that the address in the base register RN is word aligned. Pattern
15386 guarantees that both memory accesses use the same base register,
15387 the offsets are constants within the range, and the gap between the offsets is 4.
15388 If preload complete then check that registers are legal. WBACK indicates whether
15389 address is updated. LOAD indicates whether memory access is load or store. */
15390 bool
15391 operands_ok_ldrd_strd (rtx rt, rtx rt2, rtx rn, HOST_WIDE_INT offset,
15392 bool wback, bool load)
15394 unsigned int t, t2, n;
15396 if (!reload_completed)
15397 return true;
15399 if (!offset_ok_for_ldrd_strd (offset))
15400 return false;
15402 t = REGNO (rt);
15403 t2 = REGNO (rt2);
15404 n = REGNO (rn);
15406 if ((TARGET_THUMB2)
15407 && ((wback && (n == t || n == t2))
15408 || (t == SP_REGNUM)
15409 || (t == PC_REGNUM)
15410 || (t2 == SP_REGNUM)
15411 || (t2 == PC_REGNUM)
15412 || (!load && (n == PC_REGNUM))
15413 || (load && (t == t2))
15414 /* Triggers Cortex-M3 LDRD errata. */
15415 || (!wback && load && fix_cm3_ldrd && (n == t))))
15416 return false;
15418 if ((TARGET_ARM)
15419 && ((wback && (n == t || n == t2))
15420 || (t2 == PC_REGNUM)
15421 || (t % 2 != 0) /* First destination register is not even. */
15422 || (t2 != t + 1)
15423 /* PC can be used as base register (for offset addressing only),
15424 but it is depricated. */
15425 || (n == PC_REGNUM)))
15426 return false;
15428 return true;
15431 /* Return true if a 64-bit access with alignment ALIGN and with a
15432 constant offset OFFSET from the base pointer is permitted on this
15433 architecture. */
15434 static bool
15435 align_ok_ldrd_strd (HOST_WIDE_INT align, HOST_WIDE_INT offset)
15437 return (unaligned_access
15438 ? (align >= BITS_PER_WORD && (offset & 3) == 0)
15439 : (align >= 2 * BITS_PER_WORD && (offset & 7) == 0));
15442 /* Helper for gen_operands_ldrd_strd. Returns true iff the memory
15443 operand MEM's address contains an immediate offset from the base
15444 register and has no side effects, in which case it sets BASE,
15445 OFFSET and ALIGN accordingly. */
15446 static bool
15447 mem_ok_for_ldrd_strd (rtx mem, rtx *base, rtx *offset, HOST_WIDE_INT *align)
15449 rtx addr;
15451 gcc_assert (base != NULL && offset != NULL);
15453 /* TODO: Handle more general memory operand patterns, such as
15454 PRE_DEC and PRE_INC. */
15456 if (side_effects_p (mem))
15457 return false;
15459 /* Can't deal with subregs. */
15460 if (GET_CODE (mem) == SUBREG)
15461 return false;
15463 gcc_assert (MEM_P (mem));
15465 *offset = const0_rtx;
15466 *align = MEM_ALIGN (mem);
15468 addr = XEXP (mem, 0);
15470 /* If addr isn't valid for DImode, then we can't handle it. */
15471 if (!arm_legitimate_address_p (DImode, addr,
15472 reload_in_progress || reload_completed))
15473 return false;
15475 if (REG_P (addr))
15477 *base = addr;
15478 return true;
15480 else if (GET_CODE (addr) == PLUS || GET_CODE (addr) == MINUS)
15482 *base = XEXP (addr, 0);
15483 *offset = XEXP (addr, 1);
15484 return (REG_P (*base) && CONST_INT_P (*offset));
15487 return false;
15490 /* Called from a peephole2 to replace two word-size accesses with a
15491 single LDRD/STRD instruction. Returns true iff we can generate a
15492 new instruction sequence. That is, both accesses use the same base
15493 register and the gap between constant offsets is 4. This function
15494 may reorder its operands to match ldrd/strd RTL templates.
15495 OPERANDS are the operands found by the peephole matcher;
15496 OPERANDS[0,1] are register operands, and OPERANDS[2,3] are the
15497 corresponding memory operands. LOAD indicaates whether the access
15498 is load or store. CONST_STORE indicates a store of constant
15499 integer values held in OPERANDS[4,5] and assumes that the pattern
15500 is of length 4 insn, for the purpose of checking dead registers.
15501 COMMUTE indicates that register operands may be reordered. */
15502 bool
15503 gen_operands_ldrd_strd (rtx *operands, bool load,
15504 bool const_store, bool commute)
15506 int nops = 2;
15507 HOST_WIDE_INT offsets[2], offset, align[2];
15508 rtx base = NULL_RTX;
15509 rtx cur_base, cur_offset, tmp;
15510 int i, gap;
15511 HARD_REG_SET regset;
15513 gcc_assert (!const_store || !load);
15514 /* Check that the memory references are immediate offsets from the
15515 same base register. Extract the base register, the destination
15516 registers, and the corresponding memory offsets. */
15517 for (i = 0; i < nops; i++)
15519 if (!mem_ok_for_ldrd_strd (operands[nops+i], &cur_base, &cur_offset,
15520 &align[i]))
15521 return false;
15523 if (i == 0)
15524 base = cur_base;
15525 else if (REGNO (base) != REGNO (cur_base))
15526 return false;
15528 offsets[i] = INTVAL (cur_offset);
15529 if (GET_CODE (operands[i]) == SUBREG)
15531 tmp = SUBREG_REG (operands[i]);
15532 gcc_assert (GET_MODE (operands[i]) == GET_MODE (tmp));
15533 operands[i] = tmp;
15537 /* Make sure there is no dependency between the individual loads. */
15538 if (load && REGNO (operands[0]) == REGNO (base))
15539 return false; /* RAW */
15541 if (load && REGNO (operands[0]) == REGNO (operands[1]))
15542 return false; /* WAW */
15544 /* If the same input register is used in both stores
15545 when storing different constants, try to find a free register.
15546 For example, the code
15547 mov r0, 0
15548 str r0, [r2]
15549 mov r0, 1
15550 str r0, [r2, #4]
15551 can be transformed into
15552 mov r1, 0
15553 mov r0, 1
15554 strd r1, r0, [r2]
15555 in Thumb mode assuming that r1 is free.
15556 For ARM mode do the same but only if the starting register
15557 can be made to be even. */
15558 if (const_store
15559 && REGNO (operands[0]) == REGNO (operands[1])
15560 && INTVAL (operands[4]) != INTVAL (operands[5]))
15562 if (TARGET_THUMB2)
15564 CLEAR_HARD_REG_SET (regset);
15565 tmp = peep2_find_free_register (0, 4, "r", SImode, &regset);
15566 if (tmp == NULL_RTX)
15567 return false;
15569 /* Use the new register in the first load to ensure that
15570 if the original input register is not dead after peephole,
15571 then it will have the correct constant value. */
15572 operands[0] = tmp;
15574 else if (TARGET_ARM)
15576 int regno = REGNO (operands[0]);
15577 if (!peep2_reg_dead_p (4, operands[0]))
15579 /* When the input register is even and is not dead after the
15580 pattern, it has to hold the second constant but we cannot
15581 form a legal STRD in ARM mode with this register as the second
15582 register. */
15583 if (regno % 2 == 0)
15584 return false;
15586 /* Is regno-1 free? */
15587 SET_HARD_REG_SET (regset);
15588 CLEAR_HARD_REG_BIT(regset, regno - 1);
15589 tmp = peep2_find_free_register (0, 4, "r", SImode, &regset);
15590 if (tmp == NULL_RTX)
15591 return false;
15593 operands[0] = tmp;
15595 else
15597 /* Find a DImode register. */
15598 CLEAR_HARD_REG_SET (regset);
15599 tmp = peep2_find_free_register (0, 4, "r", DImode, &regset);
15600 if (tmp != NULL_RTX)
15602 operands[0] = simplify_gen_subreg (SImode, tmp, DImode, 0);
15603 operands[1] = simplify_gen_subreg (SImode, tmp, DImode, 4);
15605 else
15607 /* Can we use the input register to form a DI register? */
15608 SET_HARD_REG_SET (regset);
15609 CLEAR_HARD_REG_BIT(regset,
15610 regno % 2 == 0 ? regno + 1 : regno - 1);
15611 tmp = peep2_find_free_register (0, 4, "r", SImode, &regset);
15612 if (tmp == NULL_RTX)
15613 return false;
15614 operands[regno % 2 == 1 ? 0 : 1] = tmp;
15618 gcc_assert (operands[0] != NULL_RTX);
15619 gcc_assert (operands[1] != NULL_RTX);
15620 gcc_assert (REGNO (operands[0]) % 2 == 0);
15621 gcc_assert (REGNO (operands[1]) == REGNO (operands[0]) + 1);
15625 /* Make sure the instructions are ordered with lower memory access first. */
15626 if (offsets[0] > offsets[1])
15628 gap = offsets[0] - offsets[1];
15629 offset = offsets[1];
15631 /* Swap the instructions such that lower memory is accessed first. */
15632 std::swap (operands[0], operands[1]);
15633 std::swap (operands[2], operands[3]);
15634 std::swap (align[0], align[1]);
15635 if (const_store)
15636 std::swap (operands[4], operands[5]);
15638 else
15640 gap = offsets[1] - offsets[0];
15641 offset = offsets[0];
15644 /* Make sure accesses are to consecutive memory locations. */
15645 if (gap != 4)
15646 return false;
15648 if (!align_ok_ldrd_strd (align[0], offset))
15649 return false;
15651 /* Make sure we generate legal instructions. */
15652 if (operands_ok_ldrd_strd (operands[0], operands[1], base, offset,
15653 false, load))
15654 return true;
15656 /* In Thumb state, where registers are almost unconstrained, there
15657 is little hope to fix it. */
15658 if (TARGET_THUMB2)
15659 return false;
15661 if (load && commute)
15663 /* Try reordering registers. */
15664 std::swap (operands[0], operands[1]);
15665 if (operands_ok_ldrd_strd (operands[0], operands[1], base, offset,
15666 false, load))
15667 return true;
15670 if (const_store)
15672 /* If input registers are dead after this pattern, they can be
15673 reordered or replaced by other registers that are free in the
15674 current pattern. */
15675 if (!peep2_reg_dead_p (4, operands[0])
15676 || !peep2_reg_dead_p (4, operands[1]))
15677 return false;
15679 /* Try to reorder the input registers. */
15680 /* For example, the code
15681 mov r0, 0
15682 mov r1, 1
15683 str r1, [r2]
15684 str r0, [r2, #4]
15685 can be transformed into
15686 mov r1, 0
15687 mov r0, 1
15688 strd r0, [r2]
15690 if (operands_ok_ldrd_strd (operands[1], operands[0], base, offset,
15691 false, false))
15693 std::swap (operands[0], operands[1]);
15694 return true;
15697 /* Try to find a free DI register. */
15698 CLEAR_HARD_REG_SET (regset);
15699 add_to_hard_reg_set (&regset, SImode, REGNO (operands[0]));
15700 add_to_hard_reg_set (&regset, SImode, REGNO (operands[1]));
15701 while (true)
15703 tmp = peep2_find_free_register (0, 4, "r", DImode, &regset);
15704 if (tmp == NULL_RTX)
15705 return false;
15707 /* DREG must be an even-numbered register in DImode.
15708 Split it into SI registers. */
15709 operands[0] = simplify_gen_subreg (SImode, tmp, DImode, 0);
15710 operands[1] = simplify_gen_subreg (SImode, tmp, DImode, 4);
15711 gcc_assert (operands[0] != NULL_RTX);
15712 gcc_assert (operands[1] != NULL_RTX);
15713 gcc_assert (REGNO (operands[0]) % 2 == 0);
15714 gcc_assert (REGNO (operands[0]) + 1 == REGNO (operands[1]));
15716 return (operands_ok_ldrd_strd (operands[0], operands[1],
15717 base, offset,
15718 false, load));
15722 return false;
15728 /* Print a symbolic form of X to the debug file, F. */
15729 static void
15730 arm_print_value (FILE *f, rtx x)
15732 switch (GET_CODE (x))
15734 case CONST_INT:
15735 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
15736 return;
15738 case CONST_DOUBLE:
15739 fprintf (f, "<0x%lx,0x%lx>", (long)XWINT (x, 2), (long)XWINT (x, 3));
15740 return;
15742 case CONST_VECTOR:
15744 int i;
15746 fprintf (f, "<");
15747 for (i = 0; i < CONST_VECTOR_NUNITS (x); i++)
15749 fprintf (f, HOST_WIDE_INT_PRINT_HEX, INTVAL (CONST_VECTOR_ELT (x, i)));
15750 if (i < (CONST_VECTOR_NUNITS (x) - 1))
15751 fputc (',', f);
15753 fprintf (f, ">");
15755 return;
15757 case CONST_STRING:
15758 fprintf (f, "\"%s\"", XSTR (x, 0));
15759 return;
15761 case SYMBOL_REF:
15762 fprintf (f, "`%s'", XSTR (x, 0));
15763 return;
15765 case LABEL_REF:
15766 fprintf (f, "L%d", INSN_UID (XEXP (x, 0)));
15767 return;
15769 case CONST:
15770 arm_print_value (f, XEXP (x, 0));
15771 return;
15773 case PLUS:
15774 arm_print_value (f, XEXP (x, 0));
15775 fprintf (f, "+");
15776 arm_print_value (f, XEXP (x, 1));
15777 return;
15779 case PC:
15780 fprintf (f, "pc");
15781 return;
15783 default:
15784 fprintf (f, "????");
15785 return;
15789 /* Routines for manipulation of the constant pool. */
15791 /* Arm instructions cannot load a large constant directly into a
15792 register; they have to come from a pc relative load. The constant
15793 must therefore be placed in the addressable range of the pc
15794 relative load. Depending on the precise pc relative load
15795 instruction the range is somewhere between 256 bytes and 4k. This
15796 means that we often have to dump a constant inside a function, and
15797 generate code to branch around it.
15799 It is important to minimize this, since the branches will slow
15800 things down and make the code larger.
15802 Normally we can hide the table after an existing unconditional
15803 branch so that there is no interruption of the flow, but in the
15804 worst case the code looks like this:
15806 ldr rn, L1
15808 b L2
15809 align
15810 L1: .long value
15814 ldr rn, L3
15816 b L4
15817 align
15818 L3: .long value
15822 We fix this by performing a scan after scheduling, which notices
15823 which instructions need to have their operands fetched from the
15824 constant table and builds the table.
15826 The algorithm starts by building a table of all the constants that
15827 need fixing up and all the natural barriers in the function (places
15828 where a constant table can be dropped without breaking the flow).
15829 For each fixup we note how far the pc-relative replacement will be
15830 able to reach and the offset of the instruction into the function.
15832 Having built the table we then group the fixes together to form
15833 tables that are as large as possible (subject to addressing
15834 constraints) and emit each table of constants after the last
15835 barrier that is within range of all the instructions in the group.
15836 If a group does not contain a barrier, then we forcibly create one
15837 by inserting a jump instruction into the flow. Once the table has
15838 been inserted, the insns are then modified to reference the
15839 relevant entry in the pool.
15841 Possible enhancements to the algorithm (not implemented) are:
15843 1) For some processors and object formats, there may be benefit in
15844 aligning the pools to the start of cache lines; this alignment
15845 would need to be taken into account when calculating addressability
15846 of a pool. */
15848 /* These typedefs are located at the start of this file, so that
15849 they can be used in the prototypes there. This comment is to
15850 remind readers of that fact so that the following structures
15851 can be understood more easily.
15853 typedef struct minipool_node Mnode;
15854 typedef struct minipool_fixup Mfix; */
15856 struct minipool_node
15858 /* Doubly linked chain of entries. */
15859 Mnode * next;
15860 Mnode * prev;
15861 /* The maximum offset into the code that this entry can be placed. While
15862 pushing fixes for forward references, all entries are sorted in order
15863 of increasing max_address. */
15864 HOST_WIDE_INT max_address;
15865 /* Similarly for an entry inserted for a backwards ref. */
15866 HOST_WIDE_INT min_address;
15867 /* The number of fixes referencing this entry. This can become zero
15868 if we "unpush" an entry. In this case we ignore the entry when we
15869 come to emit the code. */
15870 int refcount;
15871 /* The offset from the start of the minipool. */
15872 HOST_WIDE_INT offset;
15873 /* The value in table. */
15874 rtx value;
15875 /* The mode of value. */
15876 machine_mode mode;
15877 /* The size of the value. With iWMMXt enabled
15878 sizes > 4 also imply an alignment of 8-bytes. */
15879 int fix_size;
15882 struct minipool_fixup
15884 Mfix * next;
15885 rtx_insn * insn;
15886 HOST_WIDE_INT address;
15887 rtx * loc;
15888 machine_mode mode;
15889 int fix_size;
15890 rtx value;
15891 Mnode * minipool;
15892 HOST_WIDE_INT forwards;
15893 HOST_WIDE_INT backwards;
15896 /* Fixes less than a word need padding out to a word boundary. */
15897 #define MINIPOOL_FIX_SIZE(mode) \
15898 (GET_MODE_SIZE ((mode)) >= 4 ? GET_MODE_SIZE ((mode)) : 4)
15900 static Mnode * minipool_vector_head;
15901 static Mnode * minipool_vector_tail;
15902 static rtx_code_label *minipool_vector_label;
15903 static int minipool_pad;
15905 /* The linked list of all minipool fixes required for this function. */
15906 Mfix * minipool_fix_head;
15907 Mfix * minipool_fix_tail;
15908 /* The fix entry for the current minipool, once it has been placed. */
15909 Mfix * minipool_barrier;
15911 #ifndef JUMP_TABLES_IN_TEXT_SECTION
15912 #define JUMP_TABLES_IN_TEXT_SECTION 0
15913 #endif
15915 static HOST_WIDE_INT
15916 get_jump_table_size (rtx_jump_table_data *insn)
15918 /* ADDR_VECs only take room if read-only data does into the text
15919 section. */
15920 if (JUMP_TABLES_IN_TEXT_SECTION || readonly_data_section == text_section)
15922 rtx body = PATTERN (insn);
15923 int elt = GET_CODE (body) == ADDR_DIFF_VEC ? 1 : 0;
15924 HOST_WIDE_INT size;
15925 HOST_WIDE_INT modesize;
15927 modesize = GET_MODE_SIZE (GET_MODE (body));
15928 size = modesize * XVECLEN (body, elt);
15929 switch (modesize)
15931 case 1:
15932 /* Round up size of TBB table to a halfword boundary. */
15933 size = (size + 1) & ~HOST_WIDE_INT_1;
15934 break;
15935 case 2:
15936 /* No padding necessary for TBH. */
15937 break;
15938 case 4:
15939 /* Add two bytes for alignment on Thumb. */
15940 if (TARGET_THUMB)
15941 size += 2;
15942 break;
15943 default:
15944 gcc_unreachable ();
15946 return size;
15949 return 0;
15952 /* Return the maximum amount of padding that will be inserted before
15953 label LABEL. */
15955 static HOST_WIDE_INT
15956 get_label_padding (rtx label)
15958 HOST_WIDE_INT align, min_insn_size;
15960 align = 1 << label_to_alignment (label);
15961 min_insn_size = TARGET_THUMB ? 2 : 4;
15962 return align > min_insn_size ? align - min_insn_size : 0;
15965 /* Move a minipool fix MP from its current location to before MAX_MP.
15966 If MAX_MP is NULL, then MP doesn't need moving, but the addressing
15967 constraints may need updating. */
15968 static Mnode *
15969 move_minipool_fix_forward_ref (Mnode *mp, Mnode *max_mp,
15970 HOST_WIDE_INT max_address)
15972 /* The code below assumes these are different. */
15973 gcc_assert (mp != max_mp);
15975 if (max_mp == NULL)
15977 if (max_address < mp->max_address)
15978 mp->max_address = max_address;
15980 else
15982 if (max_address > max_mp->max_address - mp->fix_size)
15983 mp->max_address = max_mp->max_address - mp->fix_size;
15984 else
15985 mp->max_address = max_address;
15987 /* Unlink MP from its current position. Since max_mp is non-null,
15988 mp->prev must be non-null. */
15989 mp->prev->next = mp->next;
15990 if (mp->next != NULL)
15991 mp->next->prev = mp->prev;
15992 else
15993 minipool_vector_tail = mp->prev;
15995 /* Re-insert it before MAX_MP. */
15996 mp->next = max_mp;
15997 mp->prev = max_mp->prev;
15998 max_mp->prev = mp;
16000 if (mp->prev != NULL)
16001 mp->prev->next = mp;
16002 else
16003 minipool_vector_head = mp;
16006 /* Save the new entry. */
16007 max_mp = mp;
16009 /* Scan over the preceding entries and adjust their addresses as
16010 required. */
16011 while (mp->prev != NULL
16012 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
16014 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
16015 mp = mp->prev;
16018 return max_mp;
16021 /* Add a constant to the minipool for a forward reference. Returns the
16022 node added or NULL if the constant will not fit in this pool. */
16023 static Mnode *
16024 add_minipool_forward_ref (Mfix *fix)
16026 /* If set, max_mp is the first pool_entry that has a lower
16027 constraint than the one we are trying to add. */
16028 Mnode * max_mp = NULL;
16029 HOST_WIDE_INT max_address = fix->address + fix->forwards - minipool_pad;
16030 Mnode * mp;
16032 /* If the minipool starts before the end of FIX->INSN then this FIX
16033 can not be placed into the current pool. Furthermore, adding the
16034 new constant pool entry may cause the pool to start FIX_SIZE bytes
16035 earlier. */
16036 if (minipool_vector_head &&
16037 (fix->address + get_attr_length (fix->insn)
16038 >= minipool_vector_head->max_address - fix->fix_size))
16039 return NULL;
16041 /* Scan the pool to see if a constant with the same value has
16042 already been added. While we are doing this, also note the
16043 location where we must insert the constant if it doesn't already
16044 exist. */
16045 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
16047 if (GET_CODE (fix->value) == GET_CODE (mp->value)
16048 && fix->mode == mp->mode
16049 && (!LABEL_P (fix->value)
16050 || (CODE_LABEL_NUMBER (fix->value)
16051 == CODE_LABEL_NUMBER (mp->value)))
16052 && rtx_equal_p (fix->value, mp->value))
16054 /* More than one fix references this entry. */
16055 mp->refcount++;
16056 return move_minipool_fix_forward_ref (mp, max_mp, max_address);
16059 /* Note the insertion point if necessary. */
16060 if (max_mp == NULL
16061 && mp->max_address > max_address)
16062 max_mp = mp;
16064 /* If we are inserting an 8-bytes aligned quantity and
16065 we have not already found an insertion point, then
16066 make sure that all such 8-byte aligned quantities are
16067 placed at the start of the pool. */
16068 if (ARM_DOUBLEWORD_ALIGN
16069 && max_mp == NULL
16070 && fix->fix_size >= 8
16071 && mp->fix_size < 8)
16073 max_mp = mp;
16074 max_address = mp->max_address;
16078 /* The value is not currently in the minipool, so we need to create
16079 a new entry for it. If MAX_MP is NULL, the entry will be put on
16080 the end of the list since the placement is less constrained than
16081 any existing entry. Otherwise, we insert the new fix before
16082 MAX_MP and, if necessary, adjust the constraints on the other
16083 entries. */
16084 mp = XNEW (Mnode);
16085 mp->fix_size = fix->fix_size;
16086 mp->mode = fix->mode;
16087 mp->value = fix->value;
16088 mp->refcount = 1;
16089 /* Not yet required for a backwards ref. */
16090 mp->min_address = -65536;
16092 if (max_mp == NULL)
16094 mp->max_address = max_address;
16095 mp->next = NULL;
16096 mp->prev = minipool_vector_tail;
16098 if (mp->prev == NULL)
16100 minipool_vector_head = mp;
16101 minipool_vector_label = gen_label_rtx ();
16103 else
16104 mp->prev->next = mp;
16106 minipool_vector_tail = mp;
16108 else
16110 if (max_address > max_mp->max_address - mp->fix_size)
16111 mp->max_address = max_mp->max_address - mp->fix_size;
16112 else
16113 mp->max_address = max_address;
16115 mp->next = max_mp;
16116 mp->prev = max_mp->prev;
16117 max_mp->prev = mp;
16118 if (mp->prev != NULL)
16119 mp->prev->next = mp;
16120 else
16121 minipool_vector_head = mp;
16124 /* Save the new entry. */
16125 max_mp = mp;
16127 /* Scan over the preceding entries and adjust their addresses as
16128 required. */
16129 while (mp->prev != NULL
16130 && mp->prev->max_address > mp->max_address - mp->prev->fix_size)
16132 mp->prev->max_address = mp->max_address - mp->prev->fix_size;
16133 mp = mp->prev;
16136 return max_mp;
16139 static Mnode *
16140 move_minipool_fix_backward_ref (Mnode *mp, Mnode *min_mp,
16141 HOST_WIDE_INT min_address)
16143 HOST_WIDE_INT offset;
16145 /* The code below assumes these are different. */
16146 gcc_assert (mp != min_mp);
16148 if (min_mp == NULL)
16150 if (min_address > mp->min_address)
16151 mp->min_address = min_address;
16153 else
16155 /* We will adjust this below if it is too loose. */
16156 mp->min_address = min_address;
16158 /* Unlink MP from its current position. Since min_mp is non-null,
16159 mp->next must be non-null. */
16160 mp->next->prev = mp->prev;
16161 if (mp->prev != NULL)
16162 mp->prev->next = mp->next;
16163 else
16164 minipool_vector_head = mp->next;
16166 /* Reinsert it after MIN_MP. */
16167 mp->prev = min_mp;
16168 mp->next = min_mp->next;
16169 min_mp->next = mp;
16170 if (mp->next != NULL)
16171 mp->next->prev = mp;
16172 else
16173 minipool_vector_tail = mp;
16176 min_mp = mp;
16178 offset = 0;
16179 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
16181 mp->offset = offset;
16182 if (mp->refcount > 0)
16183 offset += mp->fix_size;
16185 if (mp->next && mp->next->min_address < mp->min_address + mp->fix_size)
16186 mp->next->min_address = mp->min_address + mp->fix_size;
16189 return min_mp;
16192 /* Add a constant to the minipool for a backward reference. Returns the
16193 node added or NULL if the constant will not fit in this pool.
16195 Note that the code for insertion for a backwards reference can be
16196 somewhat confusing because the calculated offsets for each fix do
16197 not take into account the size of the pool (which is still under
16198 construction. */
16199 static Mnode *
16200 add_minipool_backward_ref (Mfix *fix)
16202 /* If set, min_mp is the last pool_entry that has a lower constraint
16203 than the one we are trying to add. */
16204 Mnode *min_mp = NULL;
16205 /* This can be negative, since it is only a constraint. */
16206 HOST_WIDE_INT min_address = fix->address - fix->backwards;
16207 Mnode *mp;
16209 /* If we can't reach the current pool from this insn, or if we can't
16210 insert this entry at the end of the pool without pushing other
16211 fixes out of range, then we don't try. This ensures that we
16212 can't fail later on. */
16213 if (min_address >= minipool_barrier->address
16214 || (minipool_vector_tail->min_address + fix->fix_size
16215 >= minipool_barrier->address))
16216 return NULL;
16218 /* Scan the pool to see if a constant with the same value has
16219 already been added. While we are doing this, also note the
16220 location where we must insert the constant if it doesn't already
16221 exist. */
16222 for (mp = minipool_vector_tail; mp != NULL; mp = mp->prev)
16224 if (GET_CODE (fix->value) == GET_CODE (mp->value)
16225 && fix->mode == mp->mode
16226 && (!LABEL_P (fix->value)
16227 || (CODE_LABEL_NUMBER (fix->value)
16228 == CODE_LABEL_NUMBER (mp->value)))
16229 && rtx_equal_p (fix->value, mp->value)
16230 /* Check that there is enough slack to move this entry to the
16231 end of the table (this is conservative). */
16232 && (mp->max_address
16233 > (minipool_barrier->address
16234 + minipool_vector_tail->offset
16235 + minipool_vector_tail->fix_size)))
16237 mp->refcount++;
16238 return move_minipool_fix_backward_ref (mp, min_mp, min_address);
16241 if (min_mp != NULL)
16242 mp->min_address += fix->fix_size;
16243 else
16245 /* Note the insertion point if necessary. */
16246 if (mp->min_address < min_address)
16248 /* For now, we do not allow the insertion of 8-byte alignment
16249 requiring nodes anywhere but at the start of the pool. */
16250 if (ARM_DOUBLEWORD_ALIGN
16251 && fix->fix_size >= 8 && mp->fix_size < 8)
16252 return NULL;
16253 else
16254 min_mp = mp;
16256 else if (mp->max_address
16257 < minipool_barrier->address + mp->offset + fix->fix_size)
16259 /* Inserting before this entry would push the fix beyond
16260 its maximum address (which can happen if we have
16261 re-located a forwards fix); force the new fix to come
16262 after it. */
16263 if (ARM_DOUBLEWORD_ALIGN
16264 && fix->fix_size >= 8 && mp->fix_size < 8)
16265 return NULL;
16266 else
16268 min_mp = mp;
16269 min_address = mp->min_address + fix->fix_size;
16272 /* Do not insert a non-8-byte aligned quantity before 8-byte
16273 aligned quantities. */
16274 else if (ARM_DOUBLEWORD_ALIGN
16275 && fix->fix_size < 8
16276 && mp->fix_size >= 8)
16278 min_mp = mp;
16279 min_address = mp->min_address + fix->fix_size;
16284 /* We need to create a new entry. */
16285 mp = XNEW (Mnode);
16286 mp->fix_size = fix->fix_size;
16287 mp->mode = fix->mode;
16288 mp->value = fix->value;
16289 mp->refcount = 1;
16290 mp->max_address = minipool_barrier->address + 65536;
16292 mp->min_address = min_address;
16294 if (min_mp == NULL)
16296 mp->prev = NULL;
16297 mp->next = minipool_vector_head;
16299 if (mp->next == NULL)
16301 minipool_vector_tail = mp;
16302 minipool_vector_label = gen_label_rtx ();
16304 else
16305 mp->next->prev = mp;
16307 minipool_vector_head = mp;
16309 else
16311 mp->next = min_mp->next;
16312 mp->prev = min_mp;
16313 min_mp->next = mp;
16315 if (mp->next != NULL)
16316 mp->next->prev = mp;
16317 else
16318 minipool_vector_tail = mp;
16321 /* Save the new entry. */
16322 min_mp = mp;
16324 if (mp->prev)
16325 mp = mp->prev;
16326 else
16327 mp->offset = 0;
16329 /* Scan over the following entries and adjust their offsets. */
16330 while (mp->next != NULL)
16332 if (mp->next->min_address < mp->min_address + mp->fix_size)
16333 mp->next->min_address = mp->min_address + mp->fix_size;
16335 if (mp->refcount)
16336 mp->next->offset = mp->offset + mp->fix_size;
16337 else
16338 mp->next->offset = mp->offset;
16340 mp = mp->next;
16343 return min_mp;
16346 static void
16347 assign_minipool_offsets (Mfix *barrier)
16349 HOST_WIDE_INT offset = 0;
16350 Mnode *mp;
16352 minipool_barrier = barrier;
16354 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
16356 mp->offset = offset;
16358 if (mp->refcount > 0)
16359 offset += mp->fix_size;
16363 /* Output the literal table */
16364 static void
16365 dump_minipool (rtx_insn *scan)
16367 Mnode * mp;
16368 Mnode * nmp;
16369 int align64 = 0;
16371 if (ARM_DOUBLEWORD_ALIGN)
16372 for (mp = minipool_vector_head; mp != NULL; mp = mp->next)
16373 if (mp->refcount > 0 && mp->fix_size >= 8)
16375 align64 = 1;
16376 break;
16379 if (dump_file)
16380 fprintf (dump_file,
16381 ";; Emitting minipool after insn %u; address %ld; align %d (bytes)\n",
16382 INSN_UID (scan), (unsigned long) minipool_barrier->address, align64 ? 8 : 4);
16384 scan = emit_label_after (gen_label_rtx (), scan);
16385 scan = emit_insn_after (align64 ? gen_align_8 () : gen_align_4 (), scan);
16386 scan = emit_label_after (minipool_vector_label, scan);
16388 for (mp = minipool_vector_head; mp != NULL; mp = nmp)
16390 if (mp->refcount > 0)
16392 if (dump_file)
16394 fprintf (dump_file,
16395 ";; Offset %u, min %ld, max %ld ",
16396 (unsigned) mp->offset, (unsigned long) mp->min_address,
16397 (unsigned long) mp->max_address);
16398 arm_print_value (dump_file, mp->value);
16399 fputc ('\n', dump_file);
16402 rtx val = copy_rtx (mp->value);
16404 switch (GET_MODE_SIZE (mp->mode))
16406 #ifdef HAVE_consttable_1
16407 case 1:
16408 scan = emit_insn_after (gen_consttable_1 (val), scan);
16409 break;
16411 #endif
16412 #ifdef HAVE_consttable_2
16413 case 2:
16414 scan = emit_insn_after (gen_consttable_2 (val), scan);
16415 break;
16417 #endif
16418 #ifdef HAVE_consttable_4
16419 case 4:
16420 scan = emit_insn_after (gen_consttable_4 (val), scan);
16421 break;
16423 #endif
16424 #ifdef HAVE_consttable_8
16425 case 8:
16426 scan = emit_insn_after (gen_consttable_8 (val), scan);
16427 break;
16429 #endif
16430 #ifdef HAVE_consttable_16
16431 case 16:
16432 scan = emit_insn_after (gen_consttable_16 (val), scan);
16433 break;
16435 #endif
16436 default:
16437 gcc_unreachable ();
16441 nmp = mp->next;
16442 free (mp);
16445 minipool_vector_head = minipool_vector_tail = NULL;
16446 scan = emit_insn_after (gen_consttable_end (), scan);
16447 scan = emit_barrier_after (scan);
16450 /* Return the cost of forcibly inserting a barrier after INSN. */
16451 static int
16452 arm_barrier_cost (rtx_insn *insn)
16454 /* Basing the location of the pool on the loop depth is preferable,
16455 but at the moment, the basic block information seems to be
16456 corrupt by this stage of the compilation. */
16457 int base_cost = 50;
16458 rtx_insn *next = next_nonnote_insn (insn);
16460 if (next != NULL && LABEL_P (next))
16461 base_cost -= 20;
16463 switch (GET_CODE (insn))
16465 case CODE_LABEL:
16466 /* It will always be better to place the table before the label, rather
16467 than after it. */
16468 return 50;
16470 case INSN:
16471 case CALL_INSN:
16472 return base_cost;
16474 case JUMP_INSN:
16475 return base_cost - 10;
16477 default:
16478 return base_cost + 10;
16482 /* Find the best place in the insn stream in the range
16483 (FIX->address,MAX_ADDRESS) to forcibly insert a minipool barrier.
16484 Create the barrier by inserting a jump and add a new fix entry for
16485 it. */
16486 static Mfix *
16487 create_fix_barrier (Mfix *fix, HOST_WIDE_INT max_address)
16489 HOST_WIDE_INT count = 0;
16490 rtx_barrier *barrier;
16491 rtx_insn *from = fix->insn;
16492 /* The instruction after which we will insert the jump. */
16493 rtx_insn *selected = NULL;
16494 int selected_cost;
16495 /* The address at which the jump instruction will be placed. */
16496 HOST_WIDE_INT selected_address;
16497 Mfix * new_fix;
16498 HOST_WIDE_INT max_count = max_address - fix->address;
16499 rtx_code_label *label = gen_label_rtx ();
16501 selected_cost = arm_barrier_cost (from);
16502 selected_address = fix->address;
16504 while (from && count < max_count)
16506 rtx_jump_table_data *tmp;
16507 int new_cost;
16509 /* This code shouldn't have been called if there was a natural barrier
16510 within range. */
16511 gcc_assert (!BARRIER_P (from));
16513 /* Count the length of this insn. This must stay in sync with the
16514 code that pushes minipool fixes. */
16515 if (LABEL_P (from))
16516 count += get_label_padding (from);
16517 else
16518 count += get_attr_length (from);
16520 /* If there is a jump table, add its length. */
16521 if (tablejump_p (from, NULL, &tmp))
16523 count += get_jump_table_size (tmp);
16525 /* Jump tables aren't in a basic block, so base the cost on
16526 the dispatch insn. If we select this location, we will
16527 still put the pool after the table. */
16528 new_cost = arm_barrier_cost (from);
16530 if (count < max_count
16531 && (!selected || new_cost <= selected_cost))
16533 selected = tmp;
16534 selected_cost = new_cost;
16535 selected_address = fix->address + count;
16538 /* Continue after the dispatch table. */
16539 from = NEXT_INSN (tmp);
16540 continue;
16543 new_cost = arm_barrier_cost (from);
16545 if (count < max_count
16546 && (!selected || new_cost <= selected_cost))
16548 selected = from;
16549 selected_cost = new_cost;
16550 selected_address = fix->address + count;
16553 from = NEXT_INSN (from);
16556 /* Make sure that we found a place to insert the jump. */
16557 gcc_assert (selected);
16559 /* Create a new JUMP_INSN that branches around a barrier. */
16560 from = emit_jump_insn_after (gen_jump (label), selected);
16561 JUMP_LABEL (from) = label;
16562 barrier = emit_barrier_after (from);
16563 emit_label_after (label, barrier);
16565 /* Create a minipool barrier entry for the new barrier. */
16566 new_fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* new_fix));
16567 new_fix->insn = barrier;
16568 new_fix->address = selected_address;
16569 new_fix->next = fix->next;
16570 fix->next = new_fix;
16572 return new_fix;
16575 /* Record that there is a natural barrier in the insn stream at
16576 ADDRESS. */
16577 static void
16578 push_minipool_barrier (rtx_insn *insn, HOST_WIDE_INT address)
16580 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
16582 fix->insn = insn;
16583 fix->address = address;
16585 fix->next = NULL;
16586 if (minipool_fix_head != NULL)
16587 minipool_fix_tail->next = fix;
16588 else
16589 minipool_fix_head = fix;
16591 minipool_fix_tail = fix;
16594 /* Record INSN, which will need fixing up to load a value from the
16595 minipool. ADDRESS is the offset of the insn since the start of the
16596 function; LOC is a pointer to the part of the insn which requires
16597 fixing; VALUE is the constant that must be loaded, which is of type
16598 MODE. */
16599 static void
16600 push_minipool_fix (rtx_insn *insn, HOST_WIDE_INT address, rtx *loc,
16601 machine_mode mode, rtx value)
16603 gcc_assert (!arm_disable_literal_pool);
16604 Mfix * fix = (Mfix *) obstack_alloc (&minipool_obstack, sizeof (* fix));
16606 fix->insn = insn;
16607 fix->address = address;
16608 fix->loc = loc;
16609 fix->mode = mode;
16610 fix->fix_size = MINIPOOL_FIX_SIZE (mode);
16611 fix->value = value;
16612 fix->forwards = get_attr_pool_range (insn);
16613 fix->backwards = get_attr_neg_pool_range (insn);
16614 fix->minipool = NULL;
16616 /* If an insn doesn't have a range defined for it, then it isn't
16617 expecting to be reworked by this code. Better to stop now than
16618 to generate duff assembly code. */
16619 gcc_assert (fix->forwards || fix->backwards);
16621 /* If an entry requires 8-byte alignment then assume all constant pools
16622 require 4 bytes of padding. Trying to do this later on a per-pool
16623 basis is awkward because existing pool entries have to be modified. */
16624 if (ARM_DOUBLEWORD_ALIGN && fix->fix_size >= 8)
16625 minipool_pad = 4;
16627 if (dump_file)
16629 fprintf (dump_file,
16630 ";; %smode fixup for i%d; addr %lu, range (%ld,%ld): ",
16631 GET_MODE_NAME (mode),
16632 INSN_UID (insn), (unsigned long) address,
16633 -1 * (long)fix->backwards, (long)fix->forwards);
16634 arm_print_value (dump_file, fix->value);
16635 fprintf (dump_file, "\n");
16638 /* Add it to the chain of fixes. */
16639 fix->next = NULL;
16641 if (minipool_fix_head != NULL)
16642 minipool_fix_tail->next = fix;
16643 else
16644 minipool_fix_head = fix;
16646 minipool_fix_tail = fix;
16649 /* Return maximum allowed cost of synthesizing a 64-bit constant VAL inline.
16650 Returns the number of insns needed, or 99 if we always want to synthesize
16651 the value. */
16653 arm_max_const_double_inline_cost ()
16655 return ((optimize_size || arm_ld_sched) ? 3 : 4);
16658 /* Return the cost of synthesizing a 64-bit constant VAL inline.
16659 Returns the number of insns needed, or 99 if we don't know how to
16660 do it. */
16662 arm_const_double_inline_cost (rtx val)
16664 rtx lowpart, highpart;
16665 machine_mode mode;
16667 mode = GET_MODE (val);
16669 if (mode == VOIDmode)
16670 mode = DImode;
16672 gcc_assert (GET_MODE_SIZE (mode) == 8);
16674 lowpart = gen_lowpart (SImode, val);
16675 highpart = gen_highpart_mode (SImode, mode, val);
16677 gcc_assert (CONST_INT_P (lowpart));
16678 gcc_assert (CONST_INT_P (highpart));
16680 return (arm_gen_constant (SET, SImode, NULL_RTX, INTVAL (lowpart),
16681 NULL_RTX, NULL_RTX, 0, 0)
16682 + arm_gen_constant (SET, SImode, NULL_RTX, INTVAL (highpart),
16683 NULL_RTX, NULL_RTX, 0, 0));
16686 /* Cost of loading a SImode constant. */
16687 static inline int
16688 arm_const_inline_cost (enum rtx_code code, rtx val)
16690 return arm_gen_constant (code, SImode, NULL_RTX, INTVAL (val),
16691 NULL_RTX, NULL_RTX, 1, 0);
16694 /* Return true if it is worthwhile to split a 64-bit constant into two
16695 32-bit operations. This is the case if optimizing for size, or
16696 if we have load delay slots, or if one 32-bit part can be done with
16697 a single data operation. */
16698 bool
16699 arm_const_double_by_parts (rtx val)
16701 machine_mode mode = GET_MODE (val);
16702 rtx part;
16704 if (optimize_size || arm_ld_sched)
16705 return true;
16707 if (mode == VOIDmode)
16708 mode = DImode;
16710 part = gen_highpart_mode (SImode, mode, val);
16712 gcc_assert (CONST_INT_P (part));
16714 if (const_ok_for_arm (INTVAL (part))
16715 || const_ok_for_arm (~INTVAL (part)))
16716 return true;
16718 part = gen_lowpart (SImode, val);
16720 gcc_assert (CONST_INT_P (part));
16722 if (const_ok_for_arm (INTVAL (part))
16723 || const_ok_for_arm (~INTVAL (part)))
16724 return true;
16726 return false;
16729 /* Return true if it is possible to inline both the high and low parts
16730 of a 64-bit constant into 32-bit data processing instructions. */
16731 bool
16732 arm_const_double_by_immediates (rtx val)
16734 machine_mode mode = GET_MODE (val);
16735 rtx part;
16737 if (mode == VOIDmode)
16738 mode = DImode;
16740 part = gen_highpart_mode (SImode, mode, val);
16742 gcc_assert (CONST_INT_P (part));
16744 if (!const_ok_for_arm (INTVAL (part)))
16745 return false;
16747 part = gen_lowpart (SImode, val);
16749 gcc_assert (CONST_INT_P (part));
16751 if (!const_ok_for_arm (INTVAL (part)))
16752 return false;
16754 return true;
16757 /* Scan INSN and note any of its operands that need fixing.
16758 If DO_PUSHES is false we do not actually push any of the fixups
16759 needed. */
16760 static void
16761 note_invalid_constants (rtx_insn *insn, HOST_WIDE_INT address, int do_pushes)
16763 int opno;
16765 extract_constrain_insn (insn);
16767 if (recog_data.n_alternatives == 0)
16768 return;
16770 /* Fill in recog_op_alt with information about the constraints of
16771 this insn. */
16772 preprocess_constraints (insn);
16774 const operand_alternative *op_alt = which_op_alt ();
16775 for (opno = 0; opno < recog_data.n_operands; opno++)
16777 /* Things we need to fix can only occur in inputs. */
16778 if (recog_data.operand_type[opno] != OP_IN)
16779 continue;
16781 /* If this alternative is a memory reference, then any mention
16782 of constants in this alternative is really to fool reload
16783 into allowing us to accept one there. We need to fix them up
16784 now so that we output the right code. */
16785 if (op_alt[opno].memory_ok)
16787 rtx op = recog_data.operand[opno];
16789 if (CONSTANT_P (op))
16791 if (do_pushes)
16792 push_minipool_fix (insn, address, recog_data.operand_loc[opno],
16793 recog_data.operand_mode[opno], op);
16795 else if (MEM_P (op)
16796 && GET_CODE (XEXP (op, 0)) == SYMBOL_REF
16797 && CONSTANT_POOL_ADDRESS_P (XEXP (op, 0)))
16799 if (do_pushes)
16801 rtx cop = avoid_constant_pool_reference (op);
16803 /* Casting the address of something to a mode narrower
16804 than a word can cause avoid_constant_pool_reference()
16805 to return the pool reference itself. That's no good to
16806 us here. Lets just hope that we can use the
16807 constant pool value directly. */
16808 if (op == cop)
16809 cop = get_pool_constant (XEXP (op, 0));
16811 push_minipool_fix (insn, address,
16812 recog_data.operand_loc[opno],
16813 recog_data.operand_mode[opno], cop);
16820 return;
16823 /* This function computes the clear mask and PADDING_BITS_TO_CLEAR for structs
16824 and unions in the context of ARMv8-M Security Extensions. It is used as a
16825 helper function for both 'cmse_nonsecure_call' and 'cmse_nonsecure_entry'
16826 functions. The PADDING_BITS_TO_CLEAR pointer can be the base to either one
16827 or four masks, depending on whether it is being computed for a
16828 'cmse_nonsecure_entry' return value or a 'cmse_nonsecure_call' argument
16829 respectively. The tree for the type of the argument or a field within an
16830 argument is passed in ARG_TYPE, the current register this argument or field
16831 starts in is kept in the pointer REGNO and updated accordingly, the bit this
16832 argument or field starts at is passed in STARTING_BIT and the last used bit
16833 is kept in LAST_USED_BIT which is also updated accordingly. */
16835 static unsigned HOST_WIDE_INT
16836 comp_not_to_clear_mask_str_un (tree arg_type, int * regno,
16837 uint32_t * padding_bits_to_clear,
16838 unsigned starting_bit, int * last_used_bit)
16841 unsigned HOST_WIDE_INT not_to_clear_reg_mask = 0;
16843 if (TREE_CODE (arg_type) == RECORD_TYPE)
16845 unsigned current_bit = starting_bit;
16846 tree field;
16847 long int offset, size;
16850 field = TYPE_FIELDS (arg_type);
16851 while (field)
16853 /* The offset within a structure is always an offset from
16854 the start of that structure. Make sure we take that into the
16855 calculation of the register based offset that we use here. */
16856 offset = starting_bit;
16857 offset += TREE_INT_CST_ELT (DECL_FIELD_BIT_OFFSET (field), 0);
16858 offset %= 32;
16860 /* This is the actual size of the field, for bitfields this is the
16861 bitfield width and not the container size. */
16862 size = TREE_INT_CST_ELT (DECL_SIZE (field), 0);
16864 if (*last_used_bit != offset)
16866 if (offset < *last_used_bit)
16868 /* This field's offset is before the 'last_used_bit', that
16869 means this field goes on the next register. So we need to
16870 pad the rest of the current register and increase the
16871 register number. */
16872 uint32_t mask;
16873 mask = ((uint32_t)-1) - ((uint32_t) 1 << *last_used_bit);
16874 mask++;
16876 padding_bits_to_clear[*regno] |= mask;
16877 not_to_clear_reg_mask |= HOST_WIDE_INT_1U << *regno;
16878 (*regno)++;
16880 else
16882 /* Otherwise we pad the bits between the last field's end and
16883 the start of the new field. */
16884 uint32_t mask;
16886 mask = ((uint32_t)-1) >> (32 - offset);
16887 mask -= ((uint32_t) 1 << *last_used_bit) - 1;
16888 padding_bits_to_clear[*regno] |= mask;
16890 current_bit = offset;
16893 /* Calculate further padding bits for inner structs/unions too. */
16894 if (RECORD_OR_UNION_TYPE_P (TREE_TYPE (field)))
16896 *last_used_bit = current_bit;
16897 not_to_clear_reg_mask
16898 |= comp_not_to_clear_mask_str_un (TREE_TYPE (field), regno,
16899 padding_bits_to_clear, offset,
16900 last_used_bit);
16902 else
16904 /* Update 'current_bit' with this field's size. If the
16905 'current_bit' lies in a subsequent register, update 'regno' and
16906 reset 'current_bit' to point to the current bit in that new
16907 register. */
16908 current_bit += size;
16909 while (current_bit >= 32)
16911 current_bit-=32;
16912 not_to_clear_reg_mask |= HOST_WIDE_INT_1U << *regno;
16913 (*regno)++;
16915 *last_used_bit = current_bit;
16918 field = TREE_CHAIN (field);
16920 not_to_clear_reg_mask |= HOST_WIDE_INT_1U << *regno;
16922 else if (TREE_CODE (arg_type) == UNION_TYPE)
16924 tree field, field_t;
16925 int i, regno_t, field_size;
16926 int max_reg = -1;
16927 int max_bit = -1;
16928 uint32_t mask;
16929 uint32_t padding_bits_to_clear_res[NUM_ARG_REGS]
16930 = {-1, -1, -1, -1};
16932 /* To compute the padding bits in a union we only consider bits as
16933 padding bits if they are always either a padding bit or fall outside a
16934 fields size for all fields in the union. */
16935 field = TYPE_FIELDS (arg_type);
16936 while (field)
16938 uint32_t padding_bits_to_clear_t[NUM_ARG_REGS]
16939 = {0U, 0U, 0U, 0U};
16940 int last_used_bit_t = *last_used_bit;
16941 regno_t = *regno;
16942 field_t = TREE_TYPE (field);
16944 /* If the field's type is either a record or a union make sure to
16945 compute their padding bits too. */
16946 if (RECORD_OR_UNION_TYPE_P (field_t))
16947 not_to_clear_reg_mask
16948 |= comp_not_to_clear_mask_str_un (field_t, &regno_t,
16949 &padding_bits_to_clear_t[0],
16950 starting_bit, &last_used_bit_t);
16951 else
16953 field_size = TREE_INT_CST_ELT (DECL_SIZE (field), 0);
16954 regno_t = (field_size / 32) + *regno;
16955 last_used_bit_t = (starting_bit + field_size) % 32;
16958 for (i = *regno; i < regno_t; i++)
16960 /* For all but the last register used by this field only keep the
16961 padding bits that were padding bits in this field. */
16962 padding_bits_to_clear_res[i] &= padding_bits_to_clear_t[i];
16965 /* For the last register, keep all padding bits that were padding
16966 bits in this field and any padding bits that are still valid
16967 as padding bits but fall outside of this field's size. */
16968 mask = (((uint32_t) -1) - ((uint32_t) 1 << last_used_bit_t)) + 1;
16969 padding_bits_to_clear_res[regno_t]
16970 &= padding_bits_to_clear_t[regno_t] | mask;
16972 /* Update the maximum size of the fields in terms of registers used
16973 ('max_reg') and the 'last_used_bit' in said register. */
16974 if (max_reg < regno_t)
16976 max_reg = regno_t;
16977 max_bit = last_used_bit_t;
16979 else if (max_reg == regno_t && max_bit < last_used_bit_t)
16980 max_bit = last_used_bit_t;
16982 field = TREE_CHAIN (field);
16985 /* Update the current padding_bits_to_clear using the intersection of the
16986 padding bits of all the fields. */
16987 for (i=*regno; i < max_reg; i++)
16988 padding_bits_to_clear[i] |= padding_bits_to_clear_res[i];
16990 /* Do not keep trailing padding bits, we do not know yet whether this
16991 is the end of the argument. */
16992 mask = ((uint32_t) 1 << max_bit) - 1;
16993 padding_bits_to_clear[max_reg]
16994 |= padding_bits_to_clear_res[max_reg] & mask;
16996 *regno = max_reg;
16997 *last_used_bit = max_bit;
16999 else
17000 /* This function should only be used for structs and unions. */
17001 gcc_unreachable ();
17003 return not_to_clear_reg_mask;
17006 /* In the context of ARMv8-M Security Extensions, this function is used for both
17007 'cmse_nonsecure_call' and 'cmse_nonsecure_entry' functions to compute what
17008 registers are used when returning or passing arguments, which is then
17009 returned as a mask. It will also compute a mask to indicate padding/unused
17010 bits for each of these registers, and passes this through the
17011 PADDING_BITS_TO_CLEAR pointer. The tree of the argument type is passed in
17012 ARG_TYPE, the rtl representation of the argument is passed in ARG_RTX and
17013 the starting register used to pass this argument or return value is passed
17014 in REGNO. It makes use of 'comp_not_to_clear_mask_str_un' to compute these
17015 for struct and union types. */
17017 static unsigned HOST_WIDE_INT
17018 compute_not_to_clear_mask (tree arg_type, rtx arg_rtx, int regno,
17019 uint32_t * padding_bits_to_clear)
17022 int last_used_bit = 0;
17023 unsigned HOST_WIDE_INT not_to_clear_mask;
17025 if (RECORD_OR_UNION_TYPE_P (arg_type))
17027 not_to_clear_mask
17028 = comp_not_to_clear_mask_str_un (arg_type, &regno,
17029 padding_bits_to_clear, 0,
17030 &last_used_bit);
17033 /* If the 'last_used_bit' is not zero, that means we are still using a
17034 part of the last 'regno'. In such cases we must clear the trailing
17035 bits. Otherwise we are not using regno and we should mark it as to
17036 clear. */
17037 if (last_used_bit != 0)
17038 padding_bits_to_clear[regno]
17039 |= ((uint32_t)-1) - ((uint32_t) 1 << last_used_bit) + 1;
17040 else
17041 not_to_clear_mask &= ~(HOST_WIDE_INT_1U << regno);
17043 else
17045 not_to_clear_mask = 0;
17046 /* We are not dealing with structs nor unions. So these arguments may be
17047 passed in floating point registers too. In some cases a BLKmode is
17048 used when returning or passing arguments in multiple VFP registers. */
17049 if (GET_MODE (arg_rtx) == BLKmode)
17051 int i, arg_regs;
17052 rtx reg;
17054 /* This should really only occur when dealing with the hard-float
17055 ABI. */
17056 gcc_assert (TARGET_HARD_FLOAT_ABI);
17058 for (i = 0; i < XVECLEN (arg_rtx, 0); i++)
17060 reg = XEXP (XVECEXP (arg_rtx, 0, i), 0);
17061 gcc_assert (REG_P (reg));
17063 not_to_clear_mask |= HOST_WIDE_INT_1U << REGNO (reg);
17065 /* If we are dealing with DF mode, make sure we don't
17066 clear either of the registers it addresses. */
17067 arg_regs = ARM_NUM_REGS (GET_MODE (reg));
17068 if (arg_regs > 1)
17070 unsigned HOST_WIDE_INT mask;
17071 mask = HOST_WIDE_INT_1U << (REGNO (reg) + arg_regs);
17072 mask -= HOST_WIDE_INT_1U << REGNO (reg);
17073 not_to_clear_mask |= mask;
17077 else
17079 /* Otherwise we can rely on the MODE to determine how many registers
17080 are being used by this argument. */
17081 int arg_regs = ARM_NUM_REGS (GET_MODE (arg_rtx));
17082 not_to_clear_mask |= HOST_WIDE_INT_1U << REGNO (arg_rtx);
17083 if (arg_regs > 1)
17085 unsigned HOST_WIDE_INT
17086 mask = HOST_WIDE_INT_1U << (REGNO (arg_rtx) + arg_regs);
17087 mask -= HOST_WIDE_INT_1U << REGNO (arg_rtx);
17088 not_to_clear_mask |= mask;
17093 return not_to_clear_mask;
17096 /* Clear registers secret before doing a cmse_nonsecure_call or returning from
17097 a cmse_nonsecure_entry function. TO_CLEAR_BITMAP indicates which registers
17098 are to be fully cleared, using the value in register CLEARING_REG if more
17099 efficient. The PADDING_BITS_LEN entries array PADDING_BITS_TO_CLEAR gives
17100 the bits that needs to be cleared in caller-saved core registers, with
17101 SCRATCH_REG used as a scratch register for that clearing.
17103 NOTE: one of three following assertions must hold:
17104 - SCRATCH_REG is a low register
17105 - CLEARING_REG is in the set of registers fully cleared (ie. its bit is set
17106 in TO_CLEAR_BITMAP)
17107 - CLEARING_REG is a low register. */
17109 static void
17110 cmse_clear_registers (sbitmap to_clear_bitmap, uint32_t *padding_bits_to_clear,
17111 int padding_bits_len, rtx scratch_reg, rtx clearing_reg)
17113 bool saved_clearing = false;
17114 rtx saved_clearing_reg = NULL_RTX;
17115 int i, regno, clearing_regno, minregno = R0_REGNUM, maxregno = minregno - 1;
17117 gcc_assert (arm_arch_cmse);
17119 if (!bitmap_empty_p (to_clear_bitmap))
17121 minregno = bitmap_first_set_bit (to_clear_bitmap);
17122 maxregno = bitmap_last_set_bit (to_clear_bitmap);
17124 clearing_regno = REGNO (clearing_reg);
17126 /* Clear padding bits. */
17127 gcc_assert (padding_bits_len <= NUM_ARG_REGS);
17128 for (i = 0, regno = R0_REGNUM; i < padding_bits_len; i++, regno++)
17130 uint64_t mask;
17131 rtx rtx16, dest, cleared_reg = gen_rtx_REG (SImode, regno);
17133 if (padding_bits_to_clear[i] == 0)
17134 continue;
17136 /* If this is a Thumb-1 target and SCRATCH_REG is not a low register, use
17137 CLEARING_REG as scratch. */
17138 if (TARGET_THUMB1
17139 && REGNO (scratch_reg) > LAST_LO_REGNUM)
17141 /* clearing_reg is not to be cleared, copy its value into scratch_reg
17142 such that we can use clearing_reg to clear the unused bits in the
17143 arguments. */
17144 if ((clearing_regno > maxregno
17145 || !bitmap_bit_p (to_clear_bitmap, clearing_regno))
17146 && !saved_clearing)
17148 gcc_assert (clearing_regno <= LAST_LO_REGNUM);
17149 emit_move_insn (scratch_reg, clearing_reg);
17150 saved_clearing = true;
17151 saved_clearing_reg = scratch_reg;
17153 scratch_reg = clearing_reg;
17156 /* Fill the lower half of the negated padding_bits_to_clear[i]. */
17157 mask = (~padding_bits_to_clear[i]) & 0xFFFF;
17158 emit_move_insn (scratch_reg, gen_int_mode (mask, SImode));
17160 /* Fill the top half of the negated padding_bits_to_clear[i]. */
17161 mask = (~padding_bits_to_clear[i]) >> 16;
17162 rtx16 = gen_int_mode (16, SImode);
17163 dest = gen_rtx_ZERO_EXTRACT (SImode, scratch_reg, rtx16, rtx16);
17164 if (mask)
17165 emit_insn (gen_rtx_SET (dest, gen_int_mode (mask, SImode)));
17167 emit_insn (gen_andsi3 (cleared_reg, cleared_reg, scratch_reg));
17169 if (saved_clearing)
17170 emit_move_insn (clearing_reg, saved_clearing_reg);
17173 /* Clear full registers. */
17175 /* If not marked for clearing, clearing_reg already does not contain
17176 any secret. */
17177 if (clearing_regno <= maxregno
17178 && bitmap_bit_p (to_clear_bitmap, clearing_regno))
17180 emit_move_insn (clearing_reg, const0_rtx);
17181 emit_use (clearing_reg);
17182 bitmap_clear_bit (to_clear_bitmap, clearing_regno);
17185 for (regno = minregno; regno <= maxregno; regno++)
17187 if (!bitmap_bit_p (to_clear_bitmap, regno))
17188 continue;
17190 if (IS_VFP_REGNUM (regno))
17192 /* If regno is an even vfp register and its successor is also to
17193 be cleared, use vmov. */
17194 if (TARGET_VFP_DOUBLE
17195 && VFP_REGNO_OK_FOR_DOUBLE (regno)
17196 && bitmap_bit_p (to_clear_bitmap, regno + 1))
17198 emit_move_insn (gen_rtx_REG (DFmode, regno),
17199 CONST1_RTX (DFmode));
17200 emit_use (gen_rtx_REG (DFmode, regno));
17201 regno++;
17203 else
17205 emit_move_insn (gen_rtx_REG (SFmode, regno),
17206 CONST1_RTX (SFmode));
17207 emit_use (gen_rtx_REG (SFmode, regno));
17210 else
17212 emit_move_insn (gen_rtx_REG (SImode, regno), clearing_reg);
17213 emit_use (gen_rtx_REG (SImode, regno));
17218 /* Clears caller saved registers not used to pass arguments before a
17219 cmse_nonsecure_call. Saving, clearing and restoring of callee saved
17220 registers is done in __gnu_cmse_nonsecure_call libcall.
17221 See libgcc/config/arm/cmse_nonsecure_call.S. */
17223 static void
17224 cmse_nonsecure_call_clear_caller_saved (void)
17226 basic_block bb;
17228 FOR_EACH_BB_FN (bb, cfun)
17230 rtx_insn *insn;
17232 FOR_BB_INSNS (bb, insn)
17234 unsigned address_regnum, regno, maxregno =
17235 TARGET_HARD_FLOAT_ABI ? D7_VFP_REGNUM : NUM_ARG_REGS - 1;
17236 auto_sbitmap to_clear_bitmap (maxregno + 1);
17237 rtx_insn *seq;
17238 rtx pat, call, unspec, clearing_reg, ip_reg, shift;
17239 rtx address;
17240 CUMULATIVE_ARGS args_so_far_v;
17241 cumulative_args_t args_so_far;
17242 tree arg_type, fntype;
17243 bool first_param = true;
17244 function_args_iterator args_iter;
17245 uint32_t padding_bits_to_clear[4] = {0U, 0U, 0U, 0U};
17247 if (!NONDEBUG_INSN_P (insn))
17248 continue;
17250 if (!CALL_P (insn))
17251 continue;
17253 pat = PATTERN (insn);
17254 gcc_assert (GET_CODE (pat) == PARALLEL && XVECLEN (pat, 0) > 0);
17255 call = XVECEXP (pat, 0, 0);
17257 /* Get the real call RTX if the insn sets a value, ie. returns. */
17258 if (GET_CODE (call) == SET)
17259 call = SET_SRC (call);
17261 /* Check if it is a cmse_nonsecure_call. */
17262 unspec = XEXP (call, 0);
17263 if (GET_CODE (unspec) != UNSPEC
17264 || XINT (unspec, 1) != UNSPEC_NONSECURE_MEM)
17265 continue;
17267 /* Determine the caller-saved registers we need to clear. */
17268 bitmap_clear (to_clear_bitmap);
17269 bitmap_set_range (to_clear_bitmap, R0_REGNUM, NUM_ARG_REGS);
17271 /* Only look at the caller-saved floating point registers in case of
17272 -mfloat-abi=hard. For -mfloat-abi=softfp we will be using the
17273 lazy store and loads which clear both caller- and callee-saved
17274 registers. */
17275 if (TARGET_HARD_FLOAT_ABI)
17277 auto_sbitmap float_bitmap (maxregno + 1);
17279 bitmap_clear (float_bitmap);
17280 bitmap_set_range (float_bitmap, FIRST_VFP_REGNUM,
17281 D7_VFP_REGNUM - FIRST_VFP_REGNUM + 1);
17282 bitmap_ior (to_clear_bitmap, to_clear_bitmap, float_bitmap);
17285 /* Make sure the register used to hold the function address is not
17286 cleared. */
17287 address = RTVEC_ELT (XVEC (unspec, 0), 0);
17288 gcc_assert (MEM_P (address));
17289 gcc_assert (REG_P (XEXP (address, 0)));
17290 address_regnum = REGNO (XEXP (address, 0));
17291 if (address_regnum < R0_REGNUM + NUM_ARG_REGS)
17292 bitmap_clear_bit (to_clear_bitmap, address_regnum);
17294 /* Set basic block of call insn so that df rescan is performed on
17295 insns inserted here. */
17296 set_block_for_insn (insn, bb);
17297 df_set_flags (DF_DEFER_INSN_RESCAN);
17298 start_sequence ();
17300 /* Make sure the scheduler doesn't schedule other insns beyond
17301 here. */
17302 emit_insn (gen_blockage ());
17304 /* Walk through all arguments and clear registers appropriately.
17306 fntype = TREE_TYPE (MEM_EXPR (address));
17307 arm_init_cumulative_args (&args_so_far_v, fntype, NULL_RTX,
17308 NULL_TREE);
17309 args_so_far = pack_cumulative_args (&args_so_far_v);
17310 FOREACH_FUNCTION_ARGS (fntype, arg_type, args_iter)
17312 rtx arg_rtx;
17313 uint64_t to_clear_args_mask;
17314 machine_mode arg_mode = TYPE_MODE (arg_type);
17316 if (VOID_TYPE_P (arg_type))
17317 continue;
17319 if (!first_param)
17320 arm_function_arg_advance (args_so_far, arg_mode, arg_type,
17321 true);
17323 arg_rtx = arm_function_arg (args_so_far, arg_mode, arg_type,
17324 true);
17325 gcc_assert (REG_P (arg_rtx));
17326 to_clear_args_mask
17327 = compute_not_to_clear_mask (arg_type, arg_rtx,
17328 REGNO (arg_rtx),
17329 &padding_bits_to_clear[0]);
17330 if (to_clear_args_mask)
17332 for (regno = R0_REGNUM; regno <= maxregno; regno++)
17334 if (to_clear_args_mask & (1ULL << regno))
17335 bitmap_clear_bit (to_clear_bitmap, regno);
17339 first_param = false;
17342 /* We use right shift and left shift to clear the LSB of the address
17343 we jump to instead of using bic, to avoid having to use an extra
17344 register on Thumb-1. */
17345 clearing_reg = XEXP (address, 0);
17346 shift = gen_rtx_LSHIFTRT (SImode, clearing_reg, const1_rtx);
17347 emit_insn (gen_rtx_SET (clearing_reg, shift));
17348 shift = gen_rtx_ASHIFT (SImode, clearing_reg, const1_rtx);
17349 emit_insn (gen_rtx_SET (clearing_reg, shift));
17351 /* Clear caller-saved registers that leak before doing a non-secure
17352 call. */
17353 ip_reg = gen_rtx_REG (SImode, IP_REGNUM);
17354 cmse_clear_registers (to_clear_bitmap, padding_bits_to_clear,
17355 NUM_ARG_REGS, ip_reg, clearing_reg);
17357 seq = get_insns ();
17358 end_sequence ();
17359 emit_insn_before (seq, insn);
17364 /* Rewrite move insn into subtract of 0 if the condition codes will
17365 be useful in next conditional jump insn. */
17367 static void
17368 thumb1_reorg (void)
17370 basic_block bb;
17372 FOR_EACH_BB_FN (bb, cfun)
17374 rtx dest, src;
17375 rtx cmp, op0, op1, set = NULL;
17376 rtx_insn *prev, *insn = BB_END (bb);
17377 bool insn_clobbered = false;
17379 while (insn != BB_HEAD (bb) && !NONDEBUG_INSN_P (insn))
17380 insn = PREV_INSN (insn);
17382 /* Find the last cbranchsi4_insn in basic block BB. */
17383 if (insn == BB_HEAD (bb)
17384 || INSN_CODE (insn) != CODE_FOR_cbranchsi4_insn)
17385 continue;
17387 /* Get the register with which we are comparing. */
17388 cmp = XEXP (SET_SRC (PATTERN (insn)), 0);
17389 op0 = XEXP (cmp, 0);
17390 op1 = XEXP (cmp, 1);
17392 /* Check that comparison is against ZERO. */
17393 if (!CONST_INT_P (op1) || INTVAL (op1) != 0)
17394 continue;
17396 /* Find the first flag setting insn before INSN in basic block BB. */
17397 gcc_assert (insn != BB_HEAD (bb));
17398 for (prev = PREV_INSN (insn);
17399 (!insn_clobbered
17400 && prev != BB_HEAD (bb)
17401 && (NOTE_P (prev)
17402 || DEBUG_INSN_P (prev)
17403 || ((set = single_set (prev)) != NULL
17404 && get_attr_conds (prev) == CONDS_NOCOND)));
17405 prev = PREV_INSN (prev))
17407 if (reg_set_p (op0, prev))
17408 insn_clobbered = true;
17411 /* Skip if op0 is clobbered by insn other than prev. */
17412 if (insn_clobbered)
17413 continue;
17415 if (!set)
17416 continue;
17418 dest = SET_DEST (set);
17419 src = SET_SRC (set);
17420 if (!low_register_operand (dest, SImode)
17421 || !low_register_operand (src, SImode))
17422 continue;
17424 /* Rewrite move into subtract of 0 if its operand is compared with ZERO
17425 in INSN. Both src and dest of the move insn are checked. */
17426 if (REGNO (op0) == REGNO (src) || REGNO (op0) == REGNO (dest))
17428 dest = copy_rtx (dest);
17429 src = copy_rtx (src);
17430 src = gen_rtx_MINUS (SImode, src, const0_rtx);
17431 PATTERN (prev) = gen_rtx_SET (dest, src);
17432 INSN_CODE (prev) = -1;
17433 /* Set test register in INSN to dest. */
17434 XEXP (cmp, 0) = copy_rtx (dest);
17435 INSN_CODE (insn) = -1;
17440 /* Convert instructions to their cc-clobbering variant if possible, since
17441 that allows us to use smaller encodings. */
17443 static void
17444 thumb2_reorg (void)
17446 basic_block bb;
17447 regset_head live;
17449 INIT_REG_SET (&live);
17451 /* We are freeing block_for_insn in the toplev to keep compatibility
17452 with old MDEP_REORGS that are not CFG based. Recompute it now. */
17453 compute_bb_for_insn ();
17454 df_analyze ();
17456 enum Convert_Action {SKIP, CONV, SWAP_CONV};
17458 FOR_EACH_BB_FN (bb, cfun)
17460 if ((current_tune->disparage_flag_setting_t16_encodings
17461 == tune_params::DISPARAGE_FLAGS_ALL)
17462 && optimize_bb_for_speed_p (bb))
17463 continue;
17465 rtx_insn *insn;
17466 Convert_Action action = SKIP;
17467 Convert_Action action_for_partial_flag_setting
17468 = ((current_tune->disparage_flag_setting_t16_encodings
17469 != tune_params::DISPARAGE_FLAGS_NEITHER)
17470 && optimize_bb_for_speed_p (bb))
17471 ? SKIP : CONV;
17473 COPY_REG_SET (&live, DF_LR_OUT (bb));
17474 df_simulate_initialize_backwards (bb, &live);
17475 FOR_BB_INSNS_REVERSE (bb, insn)
17477 if (NONJUMP_INSN_P (insn)
17478 && !REGNO_REG_SET_P (&live, CC_REGNUM)
17479 && GET_CODE (PATTERN (insn)) == SET)
17481 action = SKIP;
17482 rtx pat = PATTERN (insn);
17483 rtx dst = XEXP (pat, 0);
17484 rtx src = XEXP (pat, 1);
17485 rtx op0 = NULL_RTX, op1 = NULL_RTX;
17487 if (UNARY_P (src) || BINARY_P (src))
17488 op0 = XEXP (src, 0);
17490 if (BINARY_P (src))
17491 op1 = XEXP (src, 1);
17493 if (low_register_operand (dst, SImode))
17495 switch (GET_CODE (src))
17497 case PLUS:
17498 /* Adding two registers and storing the result
17499 in the first source is already a 16-bit
17500 operation. */
17501 if (rtx_equal_p (dst, op0)
17502 && register_operand (op1, SImode))
17503 break;
17505 if (low_register_operand (op0, SImode))
17507 /* ADDS <Rd>,<Rn>,<Rm> */
17508 if (low_register_operand (op1, SImode))
17509 action = CONV;
17510 /* ADDS <Rdn>,#<imm8> */
17511 /* SUBS <Rdn>,#<imm8> */
17512 else if (rtx_equal_p (dst, op0)
17513 && CONST_INT_P (op1)
17514 && IN_RANGE (INTVAL (op1), -255, 255))
17515 action = CONV;
17516 /* ADDS <Rd>,<Rn>,#<imm3> */
17517 /* SUBS <Rd>,<Rn>,#<imm3> */
17518 else if (CONST_INT_P (op1)
17519 && IN_RANGE (INTVAL (op1), -7, 7))
17520 action = CONV;
17522 /* ADCS <Rd>, <Rn> */
17523 else if (GET_CODE (XEXP (src, 0)) == PLUS
17524 && rtx_equal_p (XEXP (XEXP (src, 0), 0), dst)
17525 && low_register_operand (XEXP (XEXP (src, 0), 1),
17526 SImode)
17527 && COMPARISON_P (op1)
17528 && cc_register (XEXP (op1, 0), VOIDmode)
17529 && maybe_get_arm_condition_code (op1) == ARM_CS
17530 && XEXP (op1, 1) == const0_rtx)
17531 action = CONV;
17532 break;
17534 case MINUS:
17535 /* RSBS <Rd>,<Rn>,#0
17536 Not handled here: see NEG below. */
17537 /* SUBS <Rd>,<Rn>,#<imm3>
17538 SUBS <Rdn>,#<imm8>
17539 Not handled here: see PLUS above. */
17540 /* SUBS <Rd>,<Rn>,<Rm> */
17541 if (low_register_operand (op0, SImode)
17542 && low_register_operand (op1, SImode))
17543 action = CONV;
17544 break;
17546 case MULT:
17547 /* MULS <Rdm>,<Rn>,<Rdm>
17548 As an exception to the rule, this is only used
17549 when optimizing for size since MULS is slow on all
17550 known implementations. We do not even want to use
17551 MULS in cold code, if optimizing for speed, so we
17552 test the global flag here. */
17553 if (!optimize_size)
17554 break;
17555 /* Fall through. */
17556 case AND:
17557 case IOR:
17558 case XOR:
17559 /* ANDS <Rdn>,<Rm> */
17560 if (rtx_equal_p (dst, op0)
17561 && low_register_operand (op1, SImode))
17562 action = action_for_partial_flag_setting;
17563 else if (rtx_equal_p (dst, op1)
17564 && low_register_operand (op0, SImode))
17565 action = action_for_partial_flag_setting == SKIP
17566 ? SKIP : SWAP_CONV;
17567 break;
17569 case ASHIFTRT:
17570 case ASHIFT:
17571 case LSHIFTRT:
17572 /* ASRS <Rdn>,<Rm> */
17573 /* LSRS <Rdn>,<Rm> */
17574 /* LSLS <Rdn>,<Rm> */
17575 if (rtx_equal_p (dst, op0)
17576 && low_register_operand (op1, SImode))
17577 action = action_for_partial_flag_setting;
17578 /* ASRS <Rd>,<Rm>,#<imm5> */
17579 /* LSRS <Rd>,<Rm>,#<imm5> */
17580 /* LSLS <Rd>,<Rm>,#<imm5> */
17581 else if (low_register_operand (op0, SImode)
17582 && CONST_INT_P (op1)
17583 && IN_RANGE (INTVAL (op1), 0, 31))
17584 action = action_for_partial_flag_setting;
17585 break;
17587 case ROTATERT:
17588 /* RORS <Rdn>,<Rm> */
17589 if (rtx_equal_p (dst, op0)
17590 && low_register_operand (op1, SImode))
17591 action = action_for_partial_flag_setting;
17592 break;
17594 case NOT:
17595 /* MVNS <Rd>,<Rm> */
17596 if (low_register_operand (op0, SImode))
17597 action = action_for_partial_flag_setting;
17598 break;
17600 case NEG:
17601 /* NEGS <Rd>,<Rm> (a.k.a RSBS) */
17602 if (low_register_operand (op0, SImode))
17603 action = CONV;
17604 break;
17606 case CONST_INT:
17607 /* MOVS <Rd>,#<imm8> */
17608 if (CONST_INT_P (src)
17609 && IN_RANGE (INTVAL (src), 0, 255))
17610 action = action_for_partial_flag_setting;
17611 break;
17613 case REG:
17614 /* MOVS and MOV<c> with registers have different
17615 encodings, so are not relevant here. */
17616 break;
17618 default:
17619 break;
17623 if (action != SKIP)
17625 rtx ccreg = gen_rtx_REG (CCmode, CC_REGNUM);
17626 rtx clobber = gen_rtx_CLOBBER (VOIDmode, ccreg);
17627 rtvec vec;
17629 if (action == SWAP_CONV)
17631 src = copy_rtx (src);
17632 XEXP (src, 0) = op1;
17633 XEXP (src, 1) = op0;
17634 pat = gen_rtx_SET (dst, src);
17635 vec = gen_rtvec (2, pat, clobber);
17637 else /* action == CONV */
17638 vec = gen_rtvec (2, pat, clobber);
17640 PATTERN (insn) = gen_rtx_PARALLEL (VOIDmode, vec);
17641 INSN_CODE (insn) = -1;
17645 if (NONDEBUG_INSN_P (insn))
17646 df_simulate_one_insn_backwards (bb, insn, &live);
17650 CLEAR_REG_SET (&live);
17653 /* Gcc puts the pool in the wrong place for ARM, since we can only
17654 load addresses a limited distance around the pc. We do some
17655 special munging to move the constant pool values to the correct
17656 point in the code. */
17657 static void
17658 arm_reorg (void)
17660 rtx_insn *insn;
17661 HOST_WIDE_INT address = 0;
17662 Mfix * fix;
17664 if (use_cmse)
17665 cmse_nonsecure_call_clear_caller_saved ();
17666 if (TARGET_THUMB1)
17667 thumb1_reorg ();
17668 else if (TARGET_THUMB2)
17669 thumb2_reorg ();
17671 /* Ensure all insns that must be split have been split at this point.
17672 Otherwise, the pool placement code below may compute incorrect
17673 insn lengths. Note that when optimizing, all insns have already
17674 been split at this point. */
17675 if (!optimize)
17676 split_all_insns_noflow ();
17678 /* Make sure we do not attempt to create a literal pool even though it should
17679 no longer be necessary to create any. */
17680 if (arm_disable_literal_pool)
17681 return ;
17683 minipool_fix_head = minipool_fix_tail = NULL;
17685 /* The first insn must always be a note, or the code below won't
17686 scan it properly. */
17687 insn = get_insns ();
17688 gcc_assert (NOTE_P (insn));
17689 minipool_pad = 0;
17691 /* Scan all the insns and record the operands that will need fixing. */
17692 for (insn = next_nonnote_insn (insn); insn; insn = next_nonnote_insn (insn))
17694 if (BARRIER_P (insn))
17695 push_minipool_barrier (insn, address);
17696 else if (INSN_P (insn))
17698 rtx_jump_table_data *table;
17700 note_invalid_constants (insn, address, true);
17701 address += get_attr_length (insn);
17703 /* If the insn is a vector jump, add the size of the table
17704 and skip the table. */
17705 if (tablejump_p (insn, NULL, &table))
17707 address += get_jump_table_size (table);
17708 insn = table;
17711 else if (LABEL_P (insn))
17712 /* Add the worst-case padding due to alignment. We don't add
17713 the _current_ padding because the minipool insertions
17714 themselves might change it. */
17715 address += get_label_padding (insn);
17718 fix = minipool_fix_head;
17720 /* Now scan the fixups and perform the required changes. */
17721 while (fix)
17723 Mfix * ftmp;
17724 Mfix * fdel;
17725 Mfix * last_added_fix;
17726 Mfix * last_barrier = NULL;
17727 Mfix * this_fix;
17729 /* Skip any further barriers before the next fix. */
17730 while (fix && BARRIER_P (fix->insn))
17731 fix = fix->next;
17733 /* No more fixes. */
17734 if (fix == NULL)
17735 break;
17737 last_added_fix = NULL;
17739 for (ftmp = fix; ftmp; ftmp = ftmp->next)
17741 if (BARRIER_P (ftmp->insn))
17743 if (ftmp->address >= minipool_vector_head->max_address)
17744 break;
17746 last_barrier = ftmp;
17748 else if ((ftmp->minipool = add_minipool_forward_ref (ftmp)) == NULL)
17749 break;
17751 last_added_fix = ftmp; /* Keep track of the last fix added. */
17754 /* If we found a barrier, drop back to that; any fixes that we
17755 could have reached but come after the barrier will now go in
17756 the next mini-pool. */
17757 if (last_barrier != NULL)
17759 /* Reduce the refcount for those fixes that won't go into this
17760 pool after all. */
17761 for (fdel = last_barrier->next;
17762 fdel && fdel != ftmp;
17763 fdel = fdel->next)
17765 fdel->minipool->refcount--;
17766 fdel->minipool = NULL;
17769 ftmp = last_barrier;
17771 else
17773 /* ftmp is first fix that we can't fit into this pool and
17774 there no natural barriers that we could use. Insert a
17775 new barrier in the code somewhere between the previous
17776 fix and this one, and arrange to jump around it. */
17777 HOST_WIDE_INT max_address;
17779 /* The last item on the list of fixes must be a barrier, so
17780 we can never run off the end of the list of fixes without
17781 last_barrier being set. */
17782 gcc_assert (ftmp);
17784 max_address = minipool_vector_head->max_address;
17785 /* Check that there isn't another fix that is in range that
17786 we couldn't fit into this pool because the pool was
17787 already too large: we need to put the pool before such an
17788 instruction. The pool itself may come just after the
17789 fix because create_fix_barrier also allows space for a
17790 jump instruction. */
17791 if (ftmp->address < max_address)
17792 max_address = ftmp->address + 1;
17794 last_barrier = create_fix_barrier (last_added_fix, max_address);
17797 assign_minipool_offsets (last_barrier);
17799 while (ftmp)
17801 if (!BARRIER_P (ftmp->insn)
17802 && ((ftmp->minipool = add_minipool_backward_ref (ftmp))
17803 == NULL))
17804 break;
17806 ftmp = ftmp->next;
17809 /* Scan over the fixes we have identified for this pool, fixing them
17810 up and adding the constants to the pool itself. */
17811 for (this_fix = fix; this_fix && ftmp != this_fix;
17812 this_fix = this_fix->next)
17813 if (!BARRIER_P (this_fix->insn))
17815 rtx addr
17816 = plus_constant (Pmode,
17817 gen_rtx_LABEL_REF (VOIDmode,
17818 minipool_vector_label),
17819 this_fix->minipool->offset);
17820 *this_fix->loc = gen_rtx_MEM (this_fix->mode, addr);
17823 dump_minipool (last_barrier->insn);
17824 fix = ftmp;
17827 /* From now on we must synthesize any constants that we can't handle
17828 directly. This can happen if the RTL gets split during final
17829 instruction generation. */
17830 cfun->machine->after_arm_reorg = 1;
17832 /* Free the minipool memory. */
17833 obstack_free (&minipool_obstack, minipool_startobj);
17836 /* Routines to output assembly language. */
17838 /* Return string representation of passed in real value. */
17839 static const char *
17840 fp_const_from_val (REAL_VALUE_TYPE *r)
17842 if (!fp_consts_inited)
17843 init_fp_table ();
17845 gcc_assert (real_equal (r, &value_fp0));
17846 return "0";
17849 /* OPERANDS[0] is the entire list of insns that constitute pop,
17850 OPERANDS[1] is the base register, RETURN_PC is true iff return insn
17851 is in the list, UPDATE is true iff the list contains explicit
17852 update of base register. */
17853 void
17854 arm_output_multireg_pop (rtx *operands, bool return_pc, rtx cond, bool reverse,
17855 bool update)
17857 int i;
17858 char pattern[100];
17859 int offset;
17860 const char *conditional;
17861 int num_saves = XVECLEN (operands[0], 0);
17862 unsigned int regno;
17863 unsigned int regno_base = REGNO (operands[1]);
17864 bool interrupt_p = IS_INTERRUPT (arm_current_func_type ());
17866 offset = 0;
17867 offset += update ? 1 : 0;
17868 offset += return_pc ? 1 : 0;
17870 /* Is the base register in the list? */
17871 for (i = offset; i < num_saves; i++)
17873 regno = REGNO (XEXP (XVECEXP (operands[0], 0, i), 0));
17874 /* If SP is in the list, then the base register must be SP. */
17875 gcc_assert ((regno != SP_REGNUM) || (regno_base == SP_REGNUM));
17876 /* If base register is in the list, there must be no explicit update. */
17877 if (regno == regno_base)
17878 gcc_assert (!update);
17881 conditional = reverse ? "%?%D0" : "%?%d0";
17882 /* Can't use POP if returning from an interrupt. */
17883 if ((regno_base == SP_REGNUM) && update && !(interrupt_p && return_pc))
17884 sprintf (pattern, "pop%s\t{", conditional);
17885 else
17887 /* Output ldmfd when the base register is SP, otherwise output ldmia.
17888 It's just a convention, their semantics are identical. */
17889 if (regno_base == SP_REGNUM)
17890 sprintf (pattern, "ldmfd%s\t", conditional);
17891 else if (update)
17892 sprintf (pattern, "ldmia%s\t", conditional);
17893 else
17894 sprintf (pattern, "ldm%s\t", conditional);
17896 strcat (pattern, reg_names[regno_base]);
17897 if (update)
17898 strcat (pattern, "!, {");
17899 else
17900 strcat (pattern, ", {");
17903 /* Output the first destination register. */
17904 strcat (pattern,
17905 reg_names[REGNO (XEXP (XVECEXP (operands[0], 0, offset), 0))]);
17907 /* Output the rest of the destination registers. */
17908 for (i = offset + 1; i < num_saves; i++)
17910 strcat (pattern, ", ");
17911 strcat (pattern,
17912 reg_names[REGNO (XEXP (XVECEXP (operands[0], 0, i), 0))]);
17915 strcat (pattern, "}");
17917 if (interrupt_p && return_pc)
17918 strcat (pattern, "^");
17920 output_asm_insn (pattern, &cond);
17924 /* Output the assembly for a store multiple. */
17926 const char *
17927 vfp_output_vstmd (rtx * operands)
17929 char pattern[100];
17930 int p;
17931 int base;
17932 int i;
17933 rtx addr_reg = REG_P (XEXP (operands[0], 0))
17934 ? XEXP (operands[0], 0)
17935 : XEXP (XEXP (operands[0], 0), 0);
17936 bool push_p = REGNO (addr_reg) == SP_REGNUM;
17938 if (push_p)
17939 strcpy (pattern, "vpush%?.64\t{%P1");
17940 else
17941 strcpy (pattern, "vstmdb%?.64\t%m0!, {%P1");
17943 p = strlen (pattern);
17945 gcc_assert (REG_P (operands[1]));
17947 base = (REGNO (operands[1]) - FIRST_VFP_REGNUM) / 2;
17948 for (i = 1; i < XVECLEN (operands[2], 0); i++)
17950 p += sprintf (&pattern[p], ", d%d", base + i);
17952 strcpy (&pattern[p], "}");
17954 output_asm_insn (pattern, operands);
17955 return "";
17959 /* Emit RTL to save block of VFP register pairs to the stack. Returns the
17960 number of bytes pushed. */
17962 static int
17963 vfp_emit_fstmd (int base_reg, int count)
17965 rtx par;
17966 rtx dwarf;
17967 rtx tmp, reg;
17968 int i;
17970 /* Workaround ARM10 VFPr1 bug. Data corruption can occur when exactly two
17971 register pairs are stored by a store multiple insn. We avoid this
17972 by pushing an extra pair. */
17973 if (count == 2 && !arm_arch6)
17975 if (base_reg == LAST_VFP_REGNUM - 3)
17976 base_reg -= 2;
17977 count++;
17980 /* FSTMD may not store more than 16 doubleword registers at once. Split
17981 larger stores into multiple parts (up to a maximum of two, in
17982 practice). */
17983 if (count > 16)
17985 int saved;
17986 /* NOTE: base_reg is an internal register number, so each D register
17987 counts as 2. */
17988 saved = vfp_emit_fstmd (base_reg + 32, count - 16);
17989 saved += vfp_emit_fstmd (base_reg, 16);
17990 return saved;
17993 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (count));
17994 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (count + 1));
17996 reg = gen_rtx_REG (DFmode, base_reg);
17997 base_reg += 2;
17999 XVECEXP (par, 0, 0)
18000 = gen_rtx_SET (gen_frame_mem
18001 (BLKmode,
18002 gen_rtx_PRE_MODIFY (Pmode,
18003 stack_pointer_rtx,
18004 plus_constant
18005 (Pmode, stack_pointer_rtx,
18006 - (count * 8)))
18008 gen_rtx_UNSPEC (BLKmode,
18009 gen_rtvec (1, reg),
18010 UNSPEC_PUSH_MULT));
18012 tmp = gen_rtx_SET (stack_pointer_rtx,
18013 plus_constant (Pmode, stack_pointer_rtx, -(count * 8)));
18014 RTX_FRAME_RELATED_P (tmp) = 1;
18015 XVECEXP (dwarf, 0, 0) = tmp;
18017 tmp = gen_rtx_SET (gen_frame_mem (DFmode, stack_pointer_rtx), reg);
18018 RTX_FRAME_RELATED_P (tmp) = 1;
18019 XVECEXP (dwarf, 0, 1) = tmp;
18021 for (i = 1; i < count; i++)
18023 reg = gen_rtx_REG (DFmode, base_reg);
18024 base_reg += 2;
18025 XVECEXP (par, 0, i) = gen_rtx_USE (VOIDmode, reg);
18027 tmp = gen_rtx_SET (gen_frame_mem (DFmode,
18028 plus_constant (Pmode,
18029 stack_pointer_rtx,
18030 i * 8)),
18031 reg);
18032 RTX_FRAME_RELATED_P (tmp) = 1;
18033 XVECEXP (dwarf, 0, i + 1) = tmp;
18036 par = emit_insn (par);
18037 add_reg_note (par, REG_FRAME_RELATED_EXPR, dwarf);
18038 RTX_FRAME_RELATED_P (par) = 1;
18040 return count * 8;
18043 /* Returns true if -mcmse has been passed and the function pointed to by 'addr'
18044 has the cmse_nonsecure_call attribute and returns false otherwise. */
18046 bool
18047 detect_cmse_nonsecure_call (tree addr)
18049 if (!addr)
18050 return FALSE;
18052 tree fntype = TREE_TYPE (addr);
18053 if (use_cmse && lookup_attribute ("cmse_nonsecure_call",
18054 TYPE_ATTRIBUTES (fntype)))
18055 return TRUE;
18056 return FALSE;
18060 /* Emit a call instruction with pattern PAT. ADDR is the address of
18061 the call target. */
18063 void
18064 arm_emit_call_insn (rtx pat, rtx addr, bool sibcall)
18066 rtx insn;
18068 insn = emit_call_insn (pat);
18070 /* The PIC register is live on entry to VxWorks PIC PLT entries.
18071 If the call might use such an entry, add a use of the PIC register
18072 to the instruction's CALL_INSN_FUNCTION_USAGE. */
18073 if (TARGET_VXWORKS_RTP
18074 && flag_pic
18075 && !sibcall
18076 && GET_CODE (addr) == SYMBOL_REF
18077 && (SYMBOL_REF_DECL (addr)
18078 ? !targetm.binds_local_p (SYMBOL_REF_DECL (addr))
18079 : !SYMBOL_REF_LOCAL_P (addr)))
18081 require_pic_register ();
18082 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), cfun->machine->pic_reg);
18085 if (TARGET_AAPCS_BASED)
18087 /* For AAPCS, IP and CC can be clobbered by veneers inserted by the
18088 linker. We need to add an IP clobber to allow setting
18089 TARGET_CALL_FUSAGE_CONTAINS_NON_CALLEE_CLOBBERS to true. A CC clobber
18090 is not needed since it's a fixed register. */
18091 rtx *fusage = &CALL_INSN_FUNCTION_USAGE (insn);
18092 clobber_reg (fusage, gen_rtx_REG (word_mode, IP_REGNUM));
18096 /* Output a 'call' insn. */
18097 const char *
18098 output_call (rtx *operands)
18100 gcc_assert (!arm_arch5); /* Patterns should call blx <reg> directly. */
18102 /* Handle calls to lr using ip (which may be clobbered in subr anyway). */
18103 if (REGNO (operands[0]) == LR_REGNUM)
18105 operands[0] = gen_rtx_REG (SImode, IP_REGNUM);
18106 output_asm_insn ("mov%?\t%0, %|lr", operands);
18109 output_asm_insn ("mov%?\t%|lr, %|pc", operands);
18111 if (TARGET_INTERWORK || arm_arch4t)
18112 output_asm_insn ("bx%?\t%0", operands);
18113 else
18114 output_asm_insn ("mov%?\t%|pc, %0", operands);
18116 return "";
18119 /* Output a move from arm registers to arm registers of a long double
18120 OPERANDS[0] is the destination.
18121 OPERANDS[1] is the source. */
18122 const char *
18123 output_mov_long_double_arm_from_arm (rtx *operands)
18125 /* We have to be careful here because the two might overlap. */
18126 int dest_start = REGNO (operands[0]);
18127 int src_start = REGNO (operands[1]);
18128 rtx ops[2];
18129 int i;
18131 if (dest_start < src_start)
18133 for (i = 0; i < 3; i++)
18135 ops[0] = gen_rtx_REG (SImode, dest_start + i);
18136 ops[1] = gen_rtx_REG (SImode, src_start + i);
18137 output_asm_insn ("mov%?\t%0, %1", ops);
18140 else
18142 for (i = 2; i >= 0; i--)
18144 ops[0] = gen_rtx_REG (SImode, dest_start + i);
18145 ops[1] = gen_rtx_REG (SImode, src_start + i);
18146 output_asm_insn ("mov%?\t%0, %1", ops);
18150 return "";
18153 void
18154 arm_emit_movpair (rtx dest, rtx src)
18156 /* If the src is an immediate, simplify it. */
18157 if (CONST_INT_P (src))
18159 HOST_WIDE_INT val = INTVAL (src);
18160 emit_set_insn (dest, GEN_INT (val & 0x0000ffff));
18161 if ((val >> 16) & 0x0000ffff)
18163 emit_set_insn (gen_rtx_ZERO_EXTRACT (SImode, dest, GEN_INT (16),
18164 GEN_INT (16)),
18165 GEN_INT ((val >> 16) & 0x0000ffff));
18166 rtx_insn *insn = get_last_insn ();
18167 set_unique_reg_note (insn, REG_EQUAL, copy_rtx (src));
18169 return;
18171 emit_set_insn (dest, gen_rtx_HIGH (SImode, src));
18172 emit_set_insn (dest, gen_rtx_LO_SUM (SImode, dest, src));
18173 rtx_insn *insn = get_last_insn ();
18174 set_unique_reg_note (insn, REG_EQUAL, copy_rtx (src));
18177 /* Output a move between double words. It must be REG<-MEM
18178 or MEM<-REG. */
18179 const char *
18180 output_move_double (rtx *operands, bool emit, int *count)
18182 enum rtx_code code0 = GET_CODE (operands[0]);
18183 enum rtx_code code1 = GET_CODE (operands[1]);
18184 rtx otherops[3];
18185 if (count)
18186 *count = 1;
18188 /* The only case when this might happen is when
18189 you are looking at the length of a DImode instruction
18190 that has an invalid constant in it. */
18191 if (code0 == REG && code1 != MEM)
18193 gcc_assert (!emit);
18194 *count = 2;
18195 return "";
18198 if (code0 == REG)
18200 unsigned int reg0 = REGNO (operands[0]);
18202 otherops[0] = gen_rtx_REG (SImode, 1 + reg0);
18204 gcc_assert (code1 == MEM); /* Constraints should ensure this. */
18206 switch (GET_CODE (XEXP (operands[1], 0)))
18208 case REG:
18210 if (emit)
18212 if (TARGET_LDRD
18213 && !(fix_cm3_ldrd && reg0 == REGNO(XEXP (operands[1], 0))))
18214 output_asm_insn ("ldrd%?\t%0, [%m1]", operands);
18215 else
18216 output_asm_insn ("ldmia%?\t%m1, %M0", operands);
18218 break;
18220 case PRE_INC:
18221 gcc_assert (TARGET_LDRD);
18222 if (emit)
18223 output_asm_insn ("ldrd%?\t%0, [%m1, #8]!", operands);
18224 break;
18226 case PRE_DEC:
18227 if (emit)
18229 if (TARGET_LDRD)
18230 output_asm_insn ("ldrd%?\t%0, [%m1, #-8]!", operands);
18231 else
18232 output_asm_insn ("ldmdb%?\t%m1!, %M0", operands);
18234 break;
18236 case POST_INC:
18237 if (emit)
18239 if (TARGET_LDRD)
18240 output_asm_insn ("ldrd%?\t%0, [%m1], #8", operands);
18241 else
18242 output_asm_insn ("ldmia%?\t%m1!, %M0", operands);
18244 break;
18246 case POST_DEC:
18247 gcc_assert (TARGET_LDRD);
18248 if (emit)
18249 output_asm_insn ("ldrd%?\t%0, [%m1], #-8", operands);
18250 break;
18252 case PRE_MODIFY:
18253 case POST_MODIFY:
18254 /* Autoicrement addressing modes should never have overlapping
18255 base and destination registers, and overlapping index registers
18256 are already prohibited, so this doesn't need to worry about
18257 fix_cm3_ldrd. */
18258 otherops[0] = operands[0];
18259 otherops[1] = XEXP (XEXP (XEXP (operands[1], 0), 1), 0);
18260 otherops[2] = XEXP (XEXP (XEXP (operands[1], 0), 1), 1);
18262 if (GET_CODE (XEXP (operands[1], 0)) == PRE_MODIFY)
18264 if (reg_overlap_mentioned_p (otherops[0], otherops[2]))
18266 /* Registers overlap so split out the increment. */
18267 if (emit)
18269 output_asm_insn ("add%?\t%1, %1, %2", otherops);
18270 output_asm_insn ("ldrd%?\t%0, [%1] @split", otherops);
18272 if (count)
18273 *count = 2;
18275 else
18277 /* Use a single insn if we can.
18278 FIXME: IWMMXT allows offsets larger than ldrd can
18279 handle, fix these up with a pair of ldr. */
18280 if (TARGET_THUMB2
18281 || !CONST_INT_P (otherops[2])
18282 || (INTVAL (otherops[2]) > -256
18283 && INTVAL (otherops[2]) < 256))
18285 if (emit)
18286 output_asm_insn ("ldrd%?\t%0, [%1, %2]!", otherops);
18288 else
18290 if (emit)
18292 output_asm_insn ("ldr%?\t%0, [%1, %2]!", otherops);
18293 output_asm_insn ("ldr%?\t%H0, [%1, #4]", otherops);
18295 if (count)
18296 *count = 2;
18301 else
18303 /* Use a single insn if we can.
18304 FIXME: IWMMXT allows offsets larger than ldrd can handle,
18305 fix these up with a pair of ldr. */
18306 if (TARGET_THUMB2
18307 || !CONST_INT_P (otherops[2])
18308 || (INTVAL (otherops[2]) > -256
18309 && INTVAL (otherops[2]) < 256))
18311 if (emit)
18312 output_asm_insn ("ldrd%?\t%0, [%1], %2", otherops);
18314 else
18316 if (emit)
18318 output_asm_insn ("ldr%?\t%H0, [%1, #4]", otherops);
18319 output_asm_insn ("ldr%?\t%0, [%1], %2", otherops);
18321 if (count)
18322 *count = 2;
18325 break;
18327 case LABEL_REF:
18328 case CONST:
18329 /* We might be able to use ldrd %0, %1 here. However the range is
18330 different to ldr/adr, and it is broken on some ARMv7-M
18331 implementations. */
18332 /* Use the second register of the pair to avoid problematic
18333 overlap. */
18334 otherops[1] = operands[1];
18335 if (emit)
18336 output_asm_insn ("adr%?\t%0, %1", otherops);
18337 operands[1] = otherops[0];
18338 if (emit)
18340 if (TARGET_LDRD)
18341 output_asm_insn ("ldrd%?\t%0, [%1]", operands);
18342 else
18343 output_asm_insn ("ldmia%?\t%1, %M0", operands);
18346 if (count)
18347 *count = 2;
18348 break;
18350 /* ??? This needs checking for thumb2. */
18351 default:
18352 if (arm_add_operand (XEXP (XEXP (operands[1], 0), 1),
18353 GET_MODE (XEXP (XEXP (operands[1], 0), 1))))
18355 otherops[0] = operands[0];
18356 otherops[1] = XEXP (XEXP (operands[1], 0), 0);
18357 otherops[2] = XEXP (XEXP (operands[1], 0), 1);
18359 if (GET_CODE (XEXP (operands[1], 0)) == PLUS)
18361 if (CONST_INT_P (otherops[2]) && !TARGET_LDRD)
18363 switch ((int) INTVAL (otherops[2]))
18365 case -8:
18366 if (emit)
18367 output_asm_insn ("ldmdb%?\t%1, %M0", otherops);
18368 return "";
18369 case -4:
18370 if (TARGET_THUMB2)
18371 break;
18372 if (emit)
18373 output_asm_insn ("ldmda%?\t%1, %M0", otherops);
18374 return "";
18375 case 4:
18376 if (TARGET_THUMB2)
18377 break;
18378 if (emit)
18379 output_asm_insn ("ldmib%?\t%1, %M0", otherops);
18380 return "";
18383 otherops[0] = gen_rtx_REG(SImode, REGNO(operands[0]) + 1);
18384 operands[1] = otherops[0];
18385 if (TARGET_LDRD
18386 && (REG_P (otherops[2])
18387 || TARGET_THUMB2
18388 || (CONST_INT_P (otherops[2])
18389 && INTVAL (otherops[2]) > -256
18390 && INTVAL (otherops[2]) < 256)))
18392 if (reg_overlap_mentioned_p (operands[0],
18393 otherops[2]))
18395 /* Swap base and index registers over to
18396 avoid a conflict. */
18397 std::swap (otherops[1], otherops[2]);
18399 /* If both registers conflict, it will usually
18400 have been fixed by a splitter. */
18401 if (reg_overlap_mentioned_p (operands[0], otherops[2])
18402 || (fix_cm3_ldrd && reg0 == REGNO (otherops[1])))
18404 if (emit)
18406 output_asm_insn ("add%?\t%0, %1, %2", otherops);
18407 output_asm_insn ("ldrd%?\t%0, [%1]", operands);
18409 if (count)
18410 *count = 2;
18412 else
18414 otherops[0] = operands[0];
18415 if (emit)
18416 output_asm_insn ("ldrd%?\t%0, [%1, %2]", otherops);
18418 return "";
18421 if (CONST_INT_P (otherops[2]))
18423 if (emit)
18425 if (!(const_ok_for_arm (INTVAL (otherops[2]))))
18426 output_asm_insn ("sub%?\t%0, %1, #%n2", otherops);
18427 else
18428 output_asm_insn ("add%?\t%0, %1, %2", otherops);
18431 else
18433 if (emit)
18434 output_asm_insn ("add%?\t%0, %1, %2", otherops);
18437 else
18439 if (emit)
18440 output_asm_insn ("sub%?\t%0, %1, %2", otherops);
18443 if (count)
18444 *count = 2;
18446 if (TARGET_LDRD)
18447 return "ldrd%?\t%0, [%1]";
18449 return "ldmia%?\t%1, %M0";
18451 else
18453 otherops[1] = adjust_address (operands[1], SImode, 4);
18454 /* Take care of overlapping base/data reg. */
18455 if (reg_mentioned_p (operands[0], operands[1]))
18457 if (emit)
18459 output_asm_insn ("ldr%?\t%0, %1", otherops);
18460 output_asm_insn ("ldr%?\t%0, %1", operands);
18462 if (count)
18463 *count = 2;
18466 else
18468 if (emit)
18470 output_asm_insn ("ldr%?\t%0, %1", operands);
18471 output_asm_insn ("ldr%?\t%0, %1", otherops);
18473 if (count)
18474 *count = 2;
18479 else
18481 /* Constraints should ensure this. */
18482 gcc_assert (code0 == MEM && code1 == REG);
18483 gcc_assert ((REGNO (operands[1]) != IP_REGNUM)
18484 || (TARGET_ARM && TARGET_LDRD));
18486 switch (GET_CODE (XEXP (operands[0], 0)))
18488 case REG:
18489 if (emit)
18491 if (TARGET_LDRD)
18492 output_asm_insn ("strd%?\t%1, [%m0]", operands);
18493 else
18494 output_asm_insn ("stm%?\t%m0, %M1", operands);
18496 break;
18498 case PRE_INC:
18499 gcc_assert (TARGET_LDRD);
18500 if (emit)
18501 output_asm_insn ("strd%?\t%1, [%m0, #8]!", operands);
18502 break;
18504 case PRE_DEC:
18505 if (emit)
18507 if (TARGET_LDRD)
18508 output_asm_insn ("strd%?\t%1, [%m0, #-8]!", operands);
18509 else
18510 output_asm_insn ("stmdb%?\t%m0!, %M1", operands);
18512 break;
18514 case POST_INC:
18515 if (emit)
18517 if (TARGET_LDRD)
18518 output_asm_insn ("strd%?\t%1, [%m0], #8", operands);
18519 else
18520 output_asm_insn ("stm%?\t%m0!, %M1", operands);
18522 break;
18524 case POST_DEC:
18525 gcc_assert (TARGET_LDRD);
18526 if (emit)
18527 output_asm_insn ("strd%?\t%1, [%m0], #-8", operands);
18528 break;
18530 case PRE_MODIFY:
18531 case POST_MODIFY:
18532 otherops[0] = operands[1];
18533 otherops[1] = XEXP (XEXP (XEXP (operands[0], 0), 1), 0);
18534 otherops[2] = XEXP (XEXP (XEXP (operands[0], 0), 1), 1);
18536 /* IWMMXT allows offsets larger than ldrd can handle,
18537 fix these up with a pair of ldr. */
18538 if (!TARGET_THUMB2
18539 && CONST_INT_P (otherops[2])
18540 && (INTVAL(otherops[2]) <= -256
18541 || INTVAL(otherops[2]) >= 256))
18543 if (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY)
18545 if (emit)
18547 output_asm_insn ("str%?\t%0, [%1, %2]!", otherops);
18548 output_asm_insn ("str%?\t%H0, [%1, #4]", otherops);
18550 if (count)
18551 *count = 2;
18553 else
18555 if (emit)
18557 output_asm_insn ("str%?\t%H0, [%1, #4]", otherops);
18558 output_asm_insn ("str%?\t%0, [%1], %2", otherops);
18560 if (count)
18561 *count = 2;
18564 else if (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY)
18566 if (emit)
18567 output_asm_insn ("strd%?\t%0, [%1, %2]!", otherops);
18569 else
18571 if (emit)
18572 output_asm_insn ("strd%?\t%0, [%1], %2", otherops);
18574 break;
18576 case PLUS:
18577 otherops[2] = XEXP (XEXP (operands[0], 0), 1);
18578 if (CONST_INT_P (otherops[2]) && !TARGET_LDRD)
18580 switch ((int) INTVAL (XEXP (XEXP (operands[0], 0), 1)))
18582 case -8:
18583 if (emit)
18584 output_asm_insn ("stmdb%?\t%m0, %M1", operands);
18585 return "";
18587 case -4:
18588 if (TARGET_THUMB2)
18589 break;
18590 if (emit)
18591 output_asm_insn ("stmda%?\t%m0, %M1", operands);
18592 return "";
18594 case 4:
18595 if (TARGET_THUMB2)
18596 break;
18597 if (emit)
18598 output_asm_insn ("stmib%?\t%m0, %M1", operands);
18599 return "";
18602 if (TARGET_LDRD
18603 && (REG_P (otherops[2])
18604 || TARGET_THUMB2
18605 || (CONST_INT_P (otherops[2])
18606 && INTVAL (otherops[2]) > -256
18607 && INTVAL (otherops[2]) < 256)))
18609 otherops[0] = operands[1];
18610 otherops[1] = XEXP (XEXP (operands[0], 0), 0);
18611 if (emit)
18612 output_asm_insn ("strd%?\t%0, [%1, %2]", otherops);
18613 return "";
18615 /* Fall through */
18617 default:
18618 otherops[0] = adjust_address (operands[0], SImode, 4);
18619 otherops[1] = operands[1];
18620 if (emit)
18622 output_asm_insn ("str%?\t%1, %0", operands);
18623 output_asm_insn ("str%?\t%H1, %0", otherops);
18625 if (count)
18626 *count = 2;
18630 return "";
18633 /* Output a move, load or store for quad-word vectors in ARM registers. Only
18634 handles MEMs accepted by neon_vector_mem_operand with TYPE=1. */
18636 const char *
18637 output_move_quad (rtx *operands)
18639 if (REG_P (operands[0]))
18641 /* Load, or reg->reg move. */
18643 if (MEM_P (operands[1]))
18645 switch (GET_CODE (XEXP (operands[1], 0)))
18647 case REG:
18648 output_asm_insn ("ldmia%?\t%m1, %M0", operands);
18649 break;
18651 case LABEL_REF:
18652 case CONST:
18653 output_asm_insn ("adr%?\t%0, %1", operands);
18654 output_asm_insn ("ldmia%?\t%0, %M0", operands);
18655 break;
18657 default:
18658 gcc_unreachable ();
18661 else
18663 rtx ops[2];
18664 int dest, src, i;
18666 gcc_assert (REG_P (operands[1]));
18668 dest = REGNO (operands[0]);
18669 src = REGNO (operands[1]);
18671 /* This seems pretty dumb, but hopefully GCC won't try to do it
18672 very often. */
18673 if (dest < src)
18674 for (i = 0; i < 4; i++)
18676 ops[0] = gen_rtx_REG (SImode, dest + i);
18677 ops[1] = gen_rtx_REG (SImode, src + i);
18678 output_asm_insn ("mov%?\t%0, %1", ops);
18680 else
18681 for (i = 3; i >= 0; i--)
18683 ops[0] = gen_rtx_REG (SImode, dest + i);
18684 ops[1] = gen_rtx_REG (SImode, src + i);
18685 output_asm_insn ("mov%?\t%0, %1", ops);
18689 else
18691 gcc_assert (MEM_P (operands[0]));
18692 gcc_assert (REG_P (operands[1]));
18693 gcc_assert (!reg_overlap_mentioned_p (operands[1], operands[0]));
18695 switch (GET_CODE (XEXP (operands[0], 0)))
18697 case REG:
18698 output_asm_insn ("stm%?\t%m0, %M1", operands);
18699 break;
18701 default:
18702 gcc_unreachable ();
18706 return "";
18709 /* Output a VFP load or store instruction. */
18711 const char *
18712 output_move_vfp (rtx *operands)
18714 rtx reg, mem, addr, ops[2];
18715 int load = REG_P (operands[0]);
18716 int dp = GET_MODE_SIZE (GET_MODE (operands[0])) == 8;
18717 int sp = (!TARGET_VFP_FP16INST
18718 || GET_MODE_SIZE (GET_MODE (operands[0])) == 4);
18719 int integer_p = GET_MODE_CLASS (GET_MODE (operands[0])) == MODE_INT;
18720 const char *templ;
18721 char buff[50];
18722 machine_mode mode;
18724 reg = operands[!load];
18725 mem = operands[load];
18727 mode = GET_MODE (reg);
18729 gcc_assert (REG_P (reg));
18730 gcc_assert (IS_VFP_REGNUM (REGNO (reg)));
18731 gcc_assert ((mode == HFmode && TARGET_HARD_FLOAT)
18732 || mode == SFmode
18733 || mode == DFmode
18734 || mode == HImode
18735 || mode == SImode
18736 || mode == DImode
18737 || (TARGET_NEON && VALID_NEON_DREG_MODE (mode)));
18738 gcc_assert (MEM_P (mem));
18740 addr = XEXP (mem, 0);
18742 switch (GET_CODE (addr))
18744 case PRE_DEC:
18745 templ = "v%smdb%%?.%s\t%%0!, {%%%s1}%s";
18746 ops[0] = XEXP (addr, 0);
18747 ops[1] = reg;
18748 break;
18750 case POST_INC:
18751 templ = "v%smia%%?.%s\t%%0!, {%%%s1}%s";
18752 ops[0] = XEXP (addr, 0);
18753 ops[1] = reg;
18754 break;
18756 default:
18757 templ = "v%sr%%?.%s\t%%%s0, %%1%s";
18758 ops[0] = reg;
18759 ops[1] = mem;
18760 break;
18763 sprintf (buff, templ,
18764 load ? "ld" : "st",
18765 dp ? "64" : sp ? "32" : "16",
18766 dp ? "P" : "",
18767 integer_p ? "\t%@ int" : "");
18768 output_asm_insn (buff, ops);
18770 return "";
18773 /* Output a Neon double-word or quad-word load or store, or a load
18774 or store for larger structure modes.
18776 WARNING: The ordering of elements is weird in big-endian mode,
18777 because the EABI requires that vectors stored in memory appear
18778 as though they were stored by a VSTM, as required by the EABI.
18779 GCC RTL defines element ordering based on in-memory order.
18780 This can be different from the architectural ordering of elements
18781 within a NEON register. The intrinsics defined in arm_neon.h use the
18782 NEON register element ordering, not the GCC RTL element ordering.
18784 For example, the in-memory ordering of a big-endian a quadword
18785 vector with 16-bit elements when stored from register pair {d0,d1}
18786 will be (lowest address first, d0[N] is NEON register element N):
18788 [d0[3], d0[2], d0[1], d0[0], d1[7], d1[6], d1[5], d1[4]]
18790 When necessary, quadword registers (dN, dN+1) are moved to ARM
18791 registers from rN in the order:
18793 dN -> (rN+1, rN), dN+1 -> (rN+3, rN+2)
18795 So that STM/LDM can be used on vectors in ARM registers, and the
18796 same memory layout will result as if VSTM/VLDM were used.
18798 Instead of VSTM/VLDM we prefer to use VST1.64/VLD1.64 where
18799 possible, which allows use of appropriate alignment tags.
18800 Note that the choice of "64" is independent of the actual vector
18801 element size; this size simply ensures that the behavior is
18802 equivalent to VSTM/VLDM in both little-endian and big-endian mode.
18804 Due to limitations of those instructions, use of VST1.64/VLD1.64
18805 is not possible if:
18806 - the address contains PRE_DEC, or
18807 - the mode refers to more than 4 double-word registers
18809 In those cases, it would be possible to replace VSTM/VLDM by a
18810 sequence of instructions; this is not currently implemented since
18811 this is not certain to actually improve performance. */
18813 const char *
18814 output_move_neon (rtx *operands)
18816 rtx reg, mem, addr, ops[2];
18817 int regno, nregs, load = REG_P (operands[0]);
18818 const char *templ;
18819 char buff[50];
18820 machine_mode mode;
18822 reg = operands[!load];
18823 mem = operands[load];
18825 mode = GET_MODE (reg);
18827 gcc_assert (REG_P (reg));
18828 regno = REGNO (reg);
18829 nregs = REG_NREGS (reg) / 2;
18830 gcc_assert (VFP_REGNO_OK_FOR_DOUBLE (regno)
18831 || NEON_REGNO_OK_FOR_QUAD (regno));
18832 gcc_assert (VALID_NEON_DREG_MODE (mode)
18833 || VALID_NEON_QREG_MODE (mode)
18834 || VALID_NEON_STRUCT_MODE (mode));
18835 gcc_assert (MEM_P (mem));
18837 addr = XEXP (mem, 0);
18839 /* Strip off const from addresses like (const (plus (...))). */
18840 if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS)
18841 addr = XEXP (addr, 0);
18843 switch (GET_CODE (addr))
18845 case POST_INC:
18846 /* We have to use vldm / vstm for too-large modes. */
18847 if (nregs > 4)
18849 templ = "v%smia%%?\t%%0!, %%h1";
18850 ops[0] = XEXP (addr, 0);
18852 else
18854 templ = "v%s1.64\t%%h1, %%A0";
18855 ops[0] = mem;
18857 ops[1] = reg;
18858 break;
18860 case PRE_DEC:
18861 /* We have to use vldm / vstm in this case, since there is no
18862 pre-decrement form of the vld1 / vst1 instructions. */
18863 templ = "v%smdb%%?\t%%0!, %%h1";
18864 ops[0] = XEXP (addr, 0);
18865 ops[1] = reg;
18866 break;
18868 case POST_MODIFY:
18869 /* FIXME: Not currently enabled in neon_vector_mem_operand. */
18870 gcc_unreachable ();
18872 case REG:
18873 /* We have to use vldm / vstm for too-large modes. */
18874 if (nregs > 1)
18876 if (nregs > 4)
18877 templ = "v%smia%%?\t%%m0, %%h1";
18878 else
18879 templ = "v%s1.64\t%%h1, %%A0";
18881 ops[0] = mem;
18882 ops[1] = reg;
18883 break;
18885 /* Fall through. */
18886 case LABEL_REF:
18887 case PLUS:
18889 int i;
18890 int overlap = -1;
18891 for (i = 0; i < nregs; i++)
18893 /* We're only using DImode here because it's a convenient size. */
18894 ops[0] = gen_rtx_REG (DImode, REGNO (reg) + 2 * i);
18895 ops[1] = adjust_address (mem, DImode, 8 * i);
18896 if (reg_overlap_mentioned_p (ops[0], mem))
18898 gcc_assert (overlap == -1);
18899 overlap = i;
18901 else
18903 sprintf (buff, "v%sr%%?\t%%P0, %%1", load ? "ld" : "st");
18904 output_asm_insn (buff, ops);
18907 if (overlap != -1)
18909 ops[0] = gen_rtx_REG (DImode, REGNO (reg) + 2 * overlap);
18910 ops[1] = adjust_address (mem, SImode, 8 * overlap);
18911 sprintf (buff, "v%sr%%?\t%%P0, %%1", load ? "ld" : "st");
18912 output_asm_insn (buff, ops);
18915 return "";
18918 default:
18919 gcc_unreachable ();
18922 sprintf (buff, templ, load ? "ld" : "st");
18923 output_asm_insn (buff, ops);
18925 return "";
18928 /* Compute and return the length of neon_mov<mode>, where <mode> is
18929 one of VSTRUCT modes: EI, OI, CI or XI. */
18931 arm_attr_length_move_neon (rtx_insn *insn)
18933 rtx reg, mem, addr;
18934 int load;
18935 machine_mode mode;
18937 extract_insn_cached (insn);
18939 if (REG_P (recog_data.operand[0]) && REG_P (recog_data.operand[1]))
18941 mode = GET_MODE (recog_data.operand[0]);
18942 switch (mode)
18944 case E_EImode:
18945 case E_OImode:
18946 return 8;
18947 case E_CImode:
18948 return 12;
18949 case E_XImode:
18950 return 16;
18951 default:
18952 gcc_unreachable ();
18956 load = REG_P (recog_data.operand[0]);
18957 reg = recog_data.operand[!load];
18958 mem = recog_data.operand[load];
18960 gcc_assert (MEM_P (mem));
18962 addr = XEXP (mem, 0);
18964 /* Strip off const from addresses like (const (plus (...))). */
18965 if (GET_CODE (addr) == CONST && GET_CODE (XEXP (addr, 0)) == PLUS)
18966 addr = XEXP (addr, 0);
18968 if (GET_CODE (addr) == LABEL_REF || GET_CODE (addr) == PLUS)
18970 int insns = REG_NREGS (reg) / 2;
18971 return insns * 4;
18973 else
18974 return 4;
18977 /* Return nonzero if the offset in the address is an immediate. Otherwise,
18978 return zero. */
18981 arm_address_offset_is_imm (rtx_insn *insn)
18983 rtx mem, addr;
18985 extract_insn_cached (insn);
18987 if (REG_P (recog_data.operand[0]))
18988 return 0;
18990 mem = recog_data.operand[0];
18992 gcc_assert (MEM_P (mem));
18994 addr = XEXP (mem, 0);
18996 if (REG_P (addr)
18997 || (GET_CODE (addr) == PLUS
18998 && REG_P (XEXP (addr, 0))
18999 && CONST_INT_P (XEXP (addr, 1))))
19000 return 1;
19001 else
19002 return 0;
19005 /* Output an ADD r, s, #n where n may be too big for one instruction.
19006 If adding zero to one register, output nothing. */
19007 const char *
19008 output_add_immediate (rtx *operands)
19010 HOST_WIDE_INT n = INTVAL (operands[2]);
19012 if (n != 0 || REGNO (operands[0]) != REGNO (operands[1]))
19014 if (n < 0)
19015 output_multi_immediate (operands,
19016 "sub%?\t%0, %1, %2", "sub%?\t%0, %0, %2", 2,
19017 -n);
19018 else
19019 output_multi_immediate (operands,
19020 "add%?\t%0, %1, %2", "add%?\t%0, %0, %2", 2,
19024 return "";
19027 /* Output a multiple immediate operation.
19028 OPERANDS is the vector of operands referred to in the output patterns.
19029 INSTR1 is the output pattern to use for the first constant.
19030 INSTR2 is the output pattern to use for subsequent constants.
19031 IMMED_OP is the index of the constant slot in OPERANDS.
19032 N is the constant value. */
19033 static const char *
19034 output_multi_immediate (rtx *operands, const char *instr1, const char *instr2,
19035 int immed_op, HOST_WIDE_INT n)
19037 #if HOST_BITS_PER_WIDE_INT > 32
19038 n &= 0xffffffff;
19039 #endif
19041 if (n == 0)
19043 /* Quick and easy output. */
19044 operands[immed_op] = const0_rtx;
19045 output_asm_insn (instr1, operands);
19047 else
19049 int i;
19050 const char * instr = instr1;
19052 /* Note that n is never zero here (which would give no output). */
19053 for (i = 0; i < 32; i += 2)
19055 if (n & (3 << i))
19057 operands[immed_op] = GEN_INT (n & (255 << i));
19058 output_asm_insn (instr, operands);
19059 instr = instr2;
19060 i += 6;
19065 return "";
19068 /* Return the name of a shifter operation. */
19069 static const char *
19070 arm_shift_nmem(enum rtx_code code)
19072 switch (code)
19074 case ASHIFT:
19075 return ARM_LSL_NAME;
19077 case ASHIFTRT:
19078 return "asr";
19080 case LSHIFTRT:
19081 return "lsr";
19083 case ROTATERT:
19084 return "ror";
19086 default:
19087 abort();
19091 /* Return the appropriate ARM instruction for the operation code.
19092 The returned result should not be overwritten. OP is the rtx of the
19093 operation. SHIFT_FIRST_ARG is TRUE if the first argument of the operator
19094 was shifted. */
19095 const char *
19096 arithmetic_instr (rtx op, int shift_first_arg)
19098 switch (GET_CODE (op))
19100 case PLUS:
19101 return "add";
19103 case MINUS:
19104 return shift_first_arg ? "rsb" : "sub";
19106 case IOR:
19107 return "orr";
19109 case XOR:
19110 return "eor";
19112 case AND:
19113 return "and";
19115 case ASHIFT:
19116 case ASHIFTRT:
19117 case LSHIFTRT:
19118 case ROTATERT:
19119 return arm_shift_nmem(GET_CODE(op));
19121 default:
19122 gcc_unreachable ();
19126 /* Ensure valid constant shifts and return the appropriate shift mnemonic
19127 for the operation code. The returned result should not be overwritten.
19128 OP is the rtx code of the shift.
19129 On exit, *AMOUNTP will be -1 if the shift is by a register, or a constant
19130 shift. */
19131 static const char *
19132 shift_op (rtx op, HOST_WIDE_INT *amountp)
19134 const char * mnem;
19135 enum rtx_code code = GET_CODE (op);
19137 switch (code)
19139 case ROTATE:
19140 if (!CONST_INT_P (XEXP (op, 1)))
19142 output_operand_lossage ("invalid shift operand");
19143 return NULL;
19146 code = ROTATERT;
19147 *amountp = 32 - INTVAL (XEXP (op, 1));
19148 mnem = "ror";
19149 break;
19151 case ASHIFT:
19152 case ASHIFTRT:
19153 case LSHIFTRT:
19154 case ROTATERT:
19155 mnem = arm_shift_nmem(code);
19156 if (CONST_INT_P (XEXP (op, 1)))
19158 *amountp = INTVAL (XEXP (op, 1));
19160 else if (REG_P (XEXP (op, 1)))
19162 *amountp = -1;
19163 return mnem;
19165 else
19167 output_operand_lossage ("invalid shift operand");
19168 return NULL;
19170 break;
19172 case MULT:
19173 /* We never have to worry about the amount being other than a
19174 power of 2, since this case can never be reloaded from a reg. */
19175 if (!CONST_INT_P (XEXP (op, 1)))
19177 output_operand_lossage ("invalid shift operand");
19178 return NULL;
19181 *amountp = INTVAL (XEXP (op, 1)) & 0xFFFFFFFF;
19183 /* Amount must be a power of two. */
19184 if (*amountp & (*amountp - 1))
19186 output_operand_lossage ("invalid shift operand");
19187 return NULL;
19190 *amountp = exact_log2 (*amountp);
19191 gcc_assert (IN_RANGE (*amountp, 0, 31));
19192 return ARM_LSL_NAME;
19194 default:
19195 output_operand_lossage ("invalid shift operand");
19196 return NULL;
19199 /* This is not 100% correct, but follows from the desire to merge
19200 multiplication by a power of 2 with the recognizer for a
19201 shift. >=32 is not a valid shift for "lsl", so we must try and
19202 output a shift that produces the correct arithmetical result.
19203 Using lsr #32 is identical except for the fact that the carry bit
19204 is not set correctly if we set the flags; but we never use the
19205 carry bit from such an operation, so we can ignore that. */
19206 if (code == ROTATERT)
19207 /* Rotate is just modulo 32. */
19208 *amountp &= 31;
19209 else if (*amountp != (*amountp & 31))
19211 if (code == ASHIFT)
19212 mnem = "lsr";
19213 *amountp = 32;
19216 /* Shifts of 0 are no-ops. */
19217 if (*amountp == 0)
19218 return NULL;
19220 return mnem;
19223 /* Output a .ascii pseudo-op, keeping track of lengths. This is
19224 because /bin/as is horribly restrictive. The judgement about
19225 whether or not each character is 'printable' (and can be output as
19226 is) or not (and must be printed with an octal escape) must be made
19227 with reference to the *host* character set -- the situation is
19228 similar to that discussed in the comments above pp_c_char in
19229 c-pretty-print.c. */
19231 #define MAX_ASCII_LEN 51
19233 void
19234 output_ascii_pseudo_op (FILE *stream, const unsigned char *p, int len)
19236 int i;
19237 int len_so_far = 0;
19239 fputs ("\t.ascii\t\"", stream);
19241 for (i = 0; i < len; i++)
19243 int c = p[i];
19245 if (len_so_far >= MAX_ASCII_LEN)
19247 fputs ("\"\n\t.ascii\t\"", stream);
19248 len_so_far = 0;
19251 if (ISPRINT (c))
19253 if (c == '\\' || c == '\"')
19255 putc ('\\', stream);
19256 len_so_far++;
19258 putc (c, stream);
19259 len_so_far++;
19261 else
19263 fprintf (stream, "\\%03o", c);
19264 len_so_far += 4;
19268 fputs ("\"\n", stream);
19271 /* Whether a register is callee saved or not. This is necessary because high
19272 registers are marked as caller saved when optimizing for size on Thumb-1
19273 targets despite being callee saved in order to avoid using them. */
19274 #define callee_saved_reg_p(reg) \
19275 (!call_used_regs[reg] \
19276 || (TARGET_THUMB1 && optimize_size \
19277 && reg >= FIRST_HI_REGNUM && reg <= LAST_HI_REGNUM))
19279 /* Compute the register save mask for registers 0 through 12
19280 inclusive. This code is used by arm_compute_save_core_reg_mask (). */
19282 static unsigned long
19283 arm_compute_save_reg0_reg12_mask (void)
19285 unsigned long func_type = arm_current_func_type ();
19286 unsigned long save_reg_mask = 0;
19287 unsigned int reg;
19289 if (IS_INTERRUPT (func_type))
19291 unsigned int max_reg;
19292 /* Interrupt functions must not corrupt any registers,
19293 even call clobbered ones. If this is a leaf function
19294 we can just examine the registers used by the RTL, but
19295 otherwise we have to assume that whatever function is
19296 called might clobber anything, and so we have to save
19297 all the call-clobbered registers as well. */
19298 if (ARM_FUNC_TYPE (func_type) == ARM_FT_FIQ)
19299 /* FIQ handlers have registers r8 - r12 banked, so
19300 we only need to check r0 - r7, Normal ISRs only
19301 bank r14 and r15, so we must check up to r12.
19302 r13 is the stack pointer which is always preserved,
19303 so we do not need to consider it here. */
19304 max_reg = 7;
19305 else
19306 max_reg = 12;
19308 for (reg = 0; reg <= max_reg; reg++)
19309 if (df_regs_ever_live_p (reg)
19310 || (! crtl->is_leaf && call_used_regs[reg]))
19311 save_reg_mask |= (1 << reg);
19313 /* Also save the pic base register if necessary. */
19314 if (flag_pic
19315 && !TARGET_SINGLE_PIC_BASE
19316 && arm_pic_register != INVALID_REGNUM
19317 && crtl->uses_pic_offset_table)
19318 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
19320 else if (IS_VOLATILE(func_type))
19322 /* For noreturn functions we historically omitted register saves
19323 altogether. However this really messes up debugging. As a
19324 compromise save just the frame pointers. Combined with the link
19325 register saved elsewhere this should be sufficient to get
19326 a backtrace. */
19327 if (frame_pointer_needed)
19328 save_reg_mask |= 1 << HARD_FRAME_POINTER_REGNUM;
19329 if (df_regs_ever_live_p (ARM_HARD_FRAME_POINTER_REGNUM))
19330 save_reg_mask |= 1 << ARM_HARD_FRAME_POINTER_REGNUM;
19331 if (df_regs_ever_live_p (THUMB_HARD_FRAME_POINTER_REGNUM))
19332 save_reg_mask |= 1 << THUMB_HARD_FRAME_POINTER_REGNUM;
19334 else
19336 /* In the normal case we only need to save those registers
19337 which are call saved and which are used by this function. */
19338 for (reg = 0; reg <= 11; reg++)
19339 if (df_regs_ever_live_p (reg) && callee_saved_reg_p (reg))
19340 save_reg_mask |= (1 << reg);
19342 /* Handle the frame pointer as a special case. */
19343 if (frame_pointer_needed)
19344 save_reg_mask |= 1 << HARD_FRAME_POINTER_REGNUM;
19346 /* If we aren't loading the PIC register,
19347 don't stack it even though it may be live. */
19348 if (flag_pic
19349 && !TARGET_SINGLE_PIC_BASE
19350 && arm_pic_register != INVALID_REGNUM
19351 && (df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM)
19352 || crtl->uses_pic_offset_table))
19353 save_reg_mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
19355 /* The prologue will copy SP into R0, so save it. */
19356 if (IS_STACKALIGN (func_type))
19357 save_reg_mask |= 1;
19360 /* Save registers so the exception handler can modify them. */
19361 if (crtl->calls_eh_return)
19363 unsigned int i;
19365 for (i = 0; ; i++)
19367 reg = EH_RETURN_DATA_REGNO (i);
19368 if (reg == INVALID_REGNUM)
19369 break;
19370 save_reg_mask |= 1 << reg;
19374 return save_reg_mask;
19377 /* Return true if r3 is live at the start of the function. */
19379 static bool
19380 arm_r3_live_at_start_p (void)
19382 /* Just look at cfg info, which is still close enough to correct at this
19383 point. This gives false positives for broken functions that might use
19384 uninitialized data that happens to be allocated in r3, but who cares? */
19385 return REGNO_REG_SET_P (df_get_live_out (ENTRY_BLOCK_PTR_FOR_FN (cfun)), 3);
19388 /* Compute the number of bytes used to store the static chain register on the
19389 stack, above the stack frame. We need to know this accurately to get the
19390 alignment of the rest of the stack frame correct. */
19392 static int
19393 arm_compute_static_chain_stack_bytes (void)
19395 /* See the defining assertion in arm_expand_prologue. */
19396 if (IS_NESTED (arm_current_func_type ())
19397 && ((TARGET_APCS_FRAME && frame_pointer_needed && TARGET_ARM)
19398 || ((flag_stack_check == STATIC_BUILTIN_STACK_CHECK
19399 || flag_stack_clash_protection)
19400 && !df_regs_ever_live_p (LR_REGNUM)))
19401 && arm_r3_live_at_start_p ()
19402 && crtl->args.pretend_args_size == 0)
19403 return 4;
19405 return 0;
19408 /* Compute a bit mask of which core registers need to be
19409 saved on the stack for the current function.
19410 This is used by arm_compute_frame_layout, which may add extra registers. */
19412 static unsigned long
19413 arm_compute_save_core_reg_mask (void)
19415 unsigned int save_reg_mask = 0;
19416 unsigned long func_type = arm_current_func_type ();
19417 unsigned int reg;
19419 if (IS_NAKED (func_type))
19420 /* This should never really happen. */
19421 return 0;
19423 /* If we are creating a stack frame, then we must save the frame pointer,
19424 IP (which will hold the old stack pointer), LR and the PC. */
19425 if (TARGET_APCS_FRAME && frame_pointer_needed && TARGET_ARM)
19426 save_reg_mask |=
19427 (1 << ARM_HARD_FRAME_POINTER_REGNUM)
19428 | (1 << IP_REGNUM)
19429 | (1 << LR_REGNUM)
19430 | (1 << PC_REGNUM);
19432 save_reg_mask |= arm_compute_save_reg0_reg12_mask ();
19434 /* Decide if we need to save the link register.
19435 Interrupt routines have their own banked link register,
19436 so they never need to save it.
19437 Otherwise if we do not use the link register we do not need to save
19438 it. If we are pushing other registers onto the stack however, we
19439 can save an instruction in the epilogue by pushing the link register
19440 now and then popping it back into the PC. This incurs extra memory
19441 accesses though, so we only do it when optimizing for size, and only
19442 if we know that we will not need a fancy return sequence. */
19443 if (df_regs_ever_live_p (LR_REGNUM)
19444 || (save_reg_mask
19445 && optimize_size
19446 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
19447 && !crtl->tail_call_emit
19448 && !crtl->calls_eh_return))
19449 save_reg_mask |= 1 << LR_REGNUM;
19451 if (cfun->machine->lr_save_eliminated)
19452 save_reg_mask &= ~ (1 << LR_REGNUM);
19454 if (TARGET_REALLY_IWMMXT
19455 && ((bit_count (save_reg_mask)
19456 + ARM_NUM_INTS (crtl->args.pretend_args_size +
19457 arm_compute_static_chain_stack_bytes())
19458 ) % 2) != 0)
19460 /* The total number of registers that are going to be pushed
19461 onto the stack is odd. We need to ensure that the stack
19462 is 64-bit aligned before we start to save iWMMXt registers,
19463 and also before we start to create locals. (A local variable
19464 might be a double or long long which we will load/store using
19465 an iWMMXt instruction). Therefore we need to push another
19466 ARM register, so that the stack will be 64-bit aligned. We
19467 try to avoid using the arg registers (r0 -r3) as they might be
19468 used to pass values in a tail call. */
19469 for (reg = 4; reg <= 12; reg++)
19470 if ((save_reg_mask & (1 << reg)) == 0)
19471 break;
19473 if (reg <= 12)
19474 save_reg_mask |= (1 << reg);
19475 else
19477 cfun->machine->sibcall_blocked = 1;
19478 save_reg_mask |= (1 << 3);
19482 /* We may need to push an additional register for use initializing the
19483 PIC base register. */
19484 if (TARGET_THUMB2 && IS_NESTED (func_type) && flag_pic
19485 && (save_reg_mask & THUMB2_WORK_REGS) == 0)
19487 reg = thumb_find_work_register (1 << 4);
19488 if (!call_used_regs[reg])
19489 save_reg_mask |= (1 << reg);
19492 return save_reg_mask;
19495 /* Compute a bit mask of which core registers need to be
19496 saved on the stack for the current function. */
19497 static unsigned long
19498 thumb1_compute_save_core_reg_mask (void)
19500 unsigned long mask;
19501 unsigned reg;
19503 mask = 0;
19504 for (reg = 0; reg < 12; reg ++)
19505 if (df_regs_ever_live_p (reg) && callee_saved_reg_p (reg))
19506 mask |= 1 << reg;
19508 /* Handle the frame pointer as a special case. */
19509 if (frame_pointer_needed)
19510 mask |= 1 << HARD_FRAME_POINTER_REGNUM;
19512 if (flag_pic
19513 && !TARGET_SINGLE_PIC_BASE
19514 && arm_pic_register != INVALID_REGNUM
19515 && crtl->uses_pic_offset_table)
19516 mask |= 1 << PIC_OFFSET_TABLE_REGNUM;
19518 /* See if we might need r11 for calls to _interwork_r11_call_via_rN(). */
19519 if (!frame_pointer_needed && CALLER_INTERWORKING_SLOT_SIZE > 0)
19520 mask |= 1 << ARM_HARD_FRAME_POINTER_REGNUM;
19522 /* LR will also be pushed if any lo regs are pushed. */
19523 if (mask & 0xff || thumb_force_lr_save ())
19524 mask |= (1 << LR_REGNUM);
19526 /* Make sure we have a low work register if we need one.
19527 We will need one if we are going to push a high register,
19528 but we are not currently intending to push a low register. */
19529 if ((mask & 0xff) == 0
19530 && ((mask & 0x0f00) || TARGET_BACKTRACE))
19532 /* Use thumb_find_work_register to choose which register
19533 we will use. If the register is live then we will
19534 have to push it. Use LAST_LO_REGNUM as our fallback
19535 choice for the register to select. */
19536 reg = thumb_find_work_register (1 << LAST_LO_REGNUM);
19537 /* Make sure the register returned by thumb_find_work_register is
19538 not part of the return value. */
19539 if (reg * UNITS_PER_WORD <= (unsigned) arm_size_return_regs ())
19540 reg = LAST_LO_REGNUM;
19542 if (callee_saved_reg_p (reg))
19543 mask |= 1 << reg;
19546 /* The 504 below is 8 bytes less than 512 because there are two possible
19547 alignment words. We can't tell here if they will be present or not so we
19548 have to play it safe and assume that they are. */
19549 if ((CALLER_INTERWORKING_SLOT_SIZE +
19550 ROUND_UP_WORD (get_frame_size ()) +
19551 crtl->outgoing_args_size) >= 504)
19553 /* This is the same as the code in thumb1_expand_prologue() which
19554 determines which register to use for stack decrement. */
19555 for (reg = LAST_ARG_REGNUM + 1; reg <= LAST_LO_REGNUM; reg++)
19556 if (mask & (1 << reg))
19557 break;
19559 if (reg > LAST_LO_REGNUM)
19561 /* Make sure we have a register available for stack decrement. */
19562 mask |= 1 << LAST_LO_REGNUM;
19566 return mask;
19570 /* Return the number of bytes required to save VFP registers. */
19571 static int
19572 arm_get_vfp_saved_size (void)
19574 unsigned int regno;
19575 int count;
19576 int saved;
19578 saved = 0;
19579 /* Space for saved VFP registers. */
19580 if (TARGET_HARD_FLOAT)
19582 count = 0;
19583 for (regno = FIRST_VFP_REGNUM;
19584 regno < LAST_VFP_REGNUM;
19585 regno += 2)
19587 if ((!df_regs_ever_live_p (regno) || call_used_regs[regno])
19588 && (!df_regs_ever_live_p (regno + 1) || call_used_regs[regno + 1]))
19590 if (count > 0)
19592 /* Workaround ARM10 VFPr1 bug. */
19593 if (count == 2 && !arm_arch6)
19594 count++;
19595 saved += count * 8;
19597 count = 0;
19599 else
19600 count++;
19602 if (count > 0)
19604 if (count == 2 && !arm_arch6)
19605 count++;
19606 saved += count * 8;
19609 return saved;
19613 /* Generate a function exit sequence. If REALLY_RETURN is false, then do
19614 everything bar the final return instruction. If simple_return is true,
19615 then do not output epilogue, because it has already been emitted in RTL.
19617 Note: do not forget to update length attribute of corresponding insn pattern
19618 when changing assembly output (eg. length attribute of
19619 thumb2_cmse_entry_return when updating Armv8-M Mainline Security Extensions
19620 register clearing sequences). */
19621 const char *
19622 output_return_instruction (rtx operand, bool really_return, bool reverse,
19623 bool simple_return)
19625 char conditional[10];
19626 char instr[100];
19627 unsigned reg;
19628 unsigned long live_regs_mask;
19629 unsigned long func_type;
19630 arm_stack_offsets *offsets;
19632 func_type = arm_current_func_type ();
19634 if (IS_NAKED (func_type))
19635 return "";
19637 if (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN)
19639 /* If this function was declared non-returning, and we have
19640 found a tail call, then we have to trust that the called
19641 function won't return. */
19642 if (really_return)
19644 rtx ops[2];
19646 /* Otherwise, trap an attempted return by aborting. */
19647 ops[0] = operand;
19648 ops[1] = gen_rtx_SYMBOL_REF (Pmode, NEED_PLT_RELOC ? "abort(PLT)"
19649 : "abort");
19650 assemble_external_libcall (ops[1]);
19651 output_asm_insn (reverse ? "bl%D0\t%a1" : "bl%d0\t%a1", ops);
19654 return "";
19657 gcc_assert (!cfun->calls_alloca || really_return);
19659 sprintf (conditional, "%%?%%%c0", reverse ? 'D' : 'd');
19661 cfun->machine->return_used_this_function = 1;
19663 offsets = arm_get_frame_offsets ();
19664 live_regs_mask = offsets->saved_regs_mask;
19666 if (!simple_return && live_regs_mask)
19668 const char * return_reg;
19670 /* If we do not have any special requirements for function exit
19671 (e.g. interworking) then we can load the return address
19672 directly into the PC. Otherwise we must load it into LR. */
19673 if (really_return
19674 && !IS_CMSE_ENTRY (func_type)
19675 && (IS_INTERRUPT (func_type) || !TARGET_INTERWORK))
19676 return_reg = reg_names[PC_REGNUM];
19677 else
19678 return_reg = reg_names[LR_REGNUM];
19680 if ((live_regs_mask & (1 << IP_REGNUM)) == (1 << IP_REGNUM))
19682 /* There are three possible reasons for the IP register
19683 being saved. 1) a stack frame was created, in which case
19684 IP contains the old stack pointer, or 2) an ISR routine
19685 corrupted it, or 3) it was saved to align the stack on
19686 iWMMXt. In case 1, restore IP into SP, otherwise just
19687 restore IP. */
19688 if (frame_pointer_needed)
19690 live_regs_mask &= ~ (1 << IP_REGNUM);
19691 live_regs_mask |= (1 << SP_REGNUM);
19693 else
19694 gcc_assert (IS_INTERRUPT (func_type) || TARGET_REALLY_IWMMXT);
19697 /* On some ARM architectures it is faster to use LDR rather than
19698 LDM to load a single register. On other architectures, the
19699 cost is the same. In 26 bit mode, or for exception handlers,
19700 we have to use LDM to load the PC so that the CPSR is also
19701 restored. */
19702 for (reg = 0; reg <= LAST_ARM_REGNUM; reg++)
19703 if (live_regs_mask == (1U << reg))
19704 break;
19706 if (reg <= LAST_ARM_REGNUM
19707 && (reg != LR_REGNUM
19708 || ! really_return
19709 || ! IS_INTERRUPT (func_type)))
19711 sprintf (instr, "ldr%s\t%%|%s, [%%|sp], #4", conditional,
19712 (reg == LR_REGNUM) ? return_reg : reg_names[reg]);
19714 else
19716 char *p;
19717 int first = 1;
19719 /* Generate the load multiple instruction to restore the
19720 registers. Note we can get here, even if
19721 frame_pointer_needed is true, but only if sp already
19722 points to the base of the saved core registers. */
19723 if (live_regs_mask & (1 << SP_REGNUM))
19725 unsigned HOST_WIDE_INT stack_adjust;
19727 stack_adjust = offsets->outgoing_args - offsets->saved_regs;
19728 gcc_assert (stack_adjust == 0 || stack_adjust == 4);
19730 if (stack_adjust && arm_arch5 && TARGET_ARM)
19731 sprintf (instr, "ldmib%s\t%%|sp, {", conditional);
19732 else
19734 /* If we can't use ldmib (SA110 bug),
19735 then try to pop r3 instead. */
19736 if (stack_adjust)
19737 live_regs_mask |= 1 << 3;
19739 sprintf (instr, "ldmfd%s\t%%|sp, {", conditional);
19742 /* For interrupt returns we have to use an LDM rather than
19743 a POP so that we can use the exception return variant. */
19744 else if (IS_INTERRUPT (func_type))
19745 sprintf (instr, "ldmfd%s\t%%|sp!, {", conditional);
19746 else
19747 sprintf (instr, "pop%s\t{", conditional);
19749 p = instr + strlen (instr);
19751 for (reg = 0; reg <= SP_REGNUM; reg++)
19752 if (live_regs_mask & (1 << reg))
19754 int l = strlen (reg_names[reg]);
19756 if (first)
19757 first = 0;
19758 else
19760 memcpy (p, ", ", 2);
19761 p += 2;
19764 memcpy (p, "%|", 2);
19765 memcpy (p + 2, reg_names[reg], l);
19766 p += l + 2;
19769 if (live_regs_mask & (1 << LR_REGNUM))
19771 sprintf (p, "%s%%|%s}", first ? "" : ", ", return_reg);
19772 /* If returning from an interrupt, restore the CPSR. */
19773 if (IS_INTERRUPT (func_type))
19774 strcat (p, "^");
19776 else
19777 strcpy (p, "}");
19780 output_asm_insn (instr, & operand);
19782 /* See if we need to generate an extra instruction to
19783 perform the actual function return. */
19784 if (really_return
19785 && func_type != ARM_FT_INTERWORKED
19786 && (live_regs_mask & (1 << LR_REGNUM)) != 0)
19788 /* The return has already been handled
19789 by loading the LR into the PC. */
19790 return "";
19794 if (really_return)
19796 switch ((int) ARM_FUNC_TYPE (func_type))
19798 case ARM_FT_ISR:
19799 case ARM_FT_FIQ:
19800 /* ??? This is wrong for unified assembly syntax. */
19801 sprintf (instr, "sub%ss\t%%|pc, %%|lr, #4", conditional);
19802 break;
19804 case ARM_FT_INTERWORKED:
19805 gcc_assert (arm_arch5 || arm_arch4t);
19806 sprintf (instr, "bx%s\t%%|lr", conditional);
19807 break;
19809 case ARM_FT_EXCEPTION:
19810 /* ??? This is wrong for unified assembly syntax. */
19811 sprintf (instr, "mov%ss\t%%|pc, %%|lr", conditional);
19812 break;
19814 default:
19815 if (IS_CMSE_ENTRY (func_type))
19817 /* Check if we have to clear the 'GE bits' which is only used if
19818 parallel add and subtraction instructions are available. */
19819 if (TARGET_INT_SIMD)
19820 snprintf (instr, sizeof (instr),
19821 "msr%s\tAPSR_nzcvqg, %%|lr", conditional);
19822 else
19823 snprintf (instr, sizeof (instr),
19824 "msr%s\tAPSR_nzcvq, %%|lr", conditional);
19826 output_asm_insn (instr, & operand);
19827 if (TARGET_HARD_FLOAT && !TARGET_THUMB1)
19829 /* Clear the cumulative exception-status bits (0-4,7) and the
19830 condition code bits (28-31) of the FPSCR. We need to
19831 remember to clear the first scratch register used (IP) and
19832 save and restore the second (r4). */
19833 snprintf (instr, sizeof (instr), "push\t{%%|r4}");
19834 output_asm_insn (instr, & operand);
19835 snprintf (instr, sizeof (instr), "vmrs\t%%|ip, fpscr");
19836 output_asm_insn (instr, & operand);
19837 snprintf (instr, sizeof (instr), "movw\t%%|r4, #65376");
19838 output_asm_insn (instr, & operand);
19839 snprintf (instr, sizeof (instr), "movt\t%%|r4, #4095");
19840 output_asm_insn (instr, & operand);
19841 snprintf (instr, sizeof (instr), "and\t%%|ip, %%|r4");
19842 output_asm_insn (instr, & operand);
19843 snprintf (instr, sizeof (instr), "vmsr\tfpscr, %%|ip");
19844 output_asm_insn (instr, & operand);
19845 snprintf (instr, sizeof (instr), "pop\t{%%|r4}");
19846 output_asm_insn (instr, & operand);
19847 snprintf (instr, sizeof (instr), "mov\t%%|ip, %%|lr");
19848 output_asm_insn (instr, & operand);
19850 snprintf (instr, sizeof (instr), "bxns\t%%|lr");
19852 /* Use bx if it's available. */
19853 else if (arm_arch5 || arm_arch4t)
19854 sprintf (instr, "bx%s\t%%|lr", conditional);
19855 else
19856 sprintf (instr, "mov%s\t%%|pc, %%|lr", conditional);
19857 break;
19860 output_asm_insn (instr, & operand);
19863 return "";
19866 /* Output in FILE asm statements needed to declare the NAME of the function
19867 defined by its DECL node. */
19869 void
19870 arm_asm_declare_function_name (FILE *file, const char *name, tree decl)
19872 size_t cmse_name_len;
19873 char *cmse_name = 0;
19874 char cmse_prefix[] = "__acle_se_";
19876 /* When compiling with ARMv8-M Security Extensions enabled, we should print an
19877 extra function label for each function with the 'cmse_nonsecure_entry'
19878 attribute. This extra function label should be prepended with
19879 '__acle_se_', telling the linker that it needs to create secure gateway
19880 veneers for this function. */
19881 if (use_cmse && lookup_attribute ("cmse_nonsecure_entry",
19882 DECL_ATTRIBUTES (decl)))
19884 cmse_name_len = sizeof (cmse_prefix) + strlen (name);
19885 cmse_name = XALLOCAVEC (char, cmse_name_len);
19886 snprintf (cmse_name, cmse_name_len, "%s%s", cmse_prefix, name);
19887 targetm.asm_out.globalize_label (file, cmse_name);
19889 ARM_DECLARE_FUNCTION_NAME (file, cmse_name, decl);
19890 ASM_OUTPUT_TYPE_DIRECTIVE (file, cmse_name, "function");
19893 ARM_DECLARE_FUNCTION_NAME (file, name, decl);
19894 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
19895 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
19896 ASM_OUTPUT_LABEL (file, name);
19898 if (cmse_name)
19899 ASM_OUTPUT_LABEL (file, cmse_name);
19901 ARM_OUTPUT_FN_UNWIND (file, TRUE);
19904 /* Write the function name into the code section, directly preceding
19905 the function prologue.
19907 Code will be output similar to this:
19909 .ascii "arm_poke_function_name", 0
19910 .align
19912 .word 0xff000000 + (t1 - t0)
19913 arm_poke_function_name
19914 mov ip, sp
19915 stmfd sp!, {fp, ip, lr, pc}
19916 sub fp, ip, #4
19918 When performing a stack backtrace, code can inspect the value
19919 of 'pc' stored at 'fp' + 0. If the trace function then looks
19920 at location pc - 12 and the top 8 bits are set, then we know
19921 that there is a function name embedded immediately preceding this
19922 location and has length ((pc[-3]) & 0xff000000).
19924 We assume that pc is declared as a pointer to an unsigned long.
19926 It is of no benefit to output the function name if we are assembling
19927 a leaf function. These function types will not contain a stack
19928 backtrace structure, therefore it is not possible to determine the
19929 function name. */
19930 void
19931 arm_poke_function_name (FILE *stream, const char *name)
19933 unsigned long alignlength;
19934 unsigned long length;
19935 rtx x;
19937 length = strlen (name) + 1;
19938 alignlength = ROUND_UP_WORD (length);
19940 ASM_OUTPUT_ASCII (stream, name, length);
19941 ASM_OUTPUT_ALIGN (stream, 2);
19942 x = GEN_INT ((unsigned HOST_WIDE_INT) 0xff000000 + alignlength);
19943 assemble_aligned_integer (UNITS_PER_WORD, x);
19946 /* Place some comments into the assembler stream
19947 describing the current function. */
19948 static void
19949 arm_output_function_prologue (FILE *f)
19951 unsigned long func_type;
19953 /* Sanity check. */
19954 gcc_assert (!arm_ccfsm_state && !arm_target_insn);
19956 func_type = arm_current_func_type ();
19958 switch ((int) ARM_FUNC_TYPE (func_type))
19960 default:
19961 case ARM_FT_NORMAL:
19962 break;
19963 case ARM_FT_INTERWORKED:
19964 asm_fprintf (f, "\t%@ Function supports interworking.\n");
19965 break;
19966 case ARM_FT_ISR:
19967 asm_fprintf (f, "\t%@ Interrupt Service Routine.\n");
19968 break;
19969 case ARM_FT_FIQ:
19970 asm_fprintf (f, "\t%@ Fast Interrupt Service Routine.\n");
19971 break;
19972 case ARM_FT_EXCEPTION:
19973 asm_fprintf (f, "\t%@ ARM Exception Handler.\n");
19974 break;
19977 if (IS_NAKED (func_type))
19978 asm_fprintf (f, "\t%@ Naked Function: prologue and epilogue provided by programmer.\n");
19980 if (IS_VOLATILE (func_type))
19981 asm_fprintf (f, "\t%@ Volatile: function does not return.\n");
19983 if (IS_NESTED (func_type))
19984 asm_fprintf (f, "\t%@ Nested: function declared inside another function.\n");
19985 if (IS_STACKALIGN (func_type))
19986 asm_fprintf (f, "\t%@ Stack Align: May be called with mis-aligned SP.\n");
19987 if (IS_CMSE_ENTRY (func_type))
19988 asm_fprintf (f, "\t%@ Non-secure entry function: called from non-secure code.\n");
19990 asm_fprintf (f, "\t%@ args = %wd, pretend = %d, frame = %wd\n",
19991 (HOST_WIDE_INT) crtl->args.size,
19992 crtl->args.pretend_args_size,
19993 (HOST_WIDE_INT) get_frame_size ());
19995 asm_fprintf (f, "\t%@ frame_needed = %d, uses_anonymous_args = %d\n",
19996 frame_pointer_needed,
19997 cfun->machine->uses_anonymous_args);
19999 if (cfun->machine->lr_save_eliminated)
20000 asm_fprintf (f, "\t%@ link register save eliminated.\n");
20002 if (crtl->calls_eh_return)
20003 asm_fprintf (f, "\t@ Calls __builtin_eh_return.\n");
20007 static void
20008 arm_output_function_epilogue (FILE *)
20010 arm_stack_offsets *offsets;
20012 if (TARGET_THUMB1)
20014 int regno;
20016 /* Emit any call-via-reg trampolines that are needed for v4t support
20017 of call_reg and call_value_reg type insns. */
20018 for (regno = 0; regno < LR_REGNUM; regno++)
20020 rtx label = cfun->machine->call_via[regno];
20022 if (label != NULL)
20024 switch_to_section (function_section (current_function_decl));
20025 targetm.asm_out.internal_label (asm_out_file, "L",
20026 CODE_LABEL_NUMBER (label));
20027 asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
20031 /* ??? Probably not safe to set this here, since it assumes that a
20032 function will be emitted as assembly immediately after we generate
20033 RTL for it. This does not happen for inline functions. */
20034 cfun->machine->return_used_this_function = 0;
20036 else /* TARGET_32BIT */
20038 /* We need to take into account any stack-frame rounding. */
20039 offsets = arm_get_frame_offsets ();
20041 gcc_assert (!use_return_insn (FALSE, NULL)
20042 || (cfun->machine->return_used_this_function != 0)
20043 || offsets->saved_regs == offsets->outgoing_args
20044 || frame_pointer_needed);
20048 /* Generate and emit a sequence of insns equivalent to PUSH, but using
20049 STR and STRD. If an even number of registers are being pushed, one
20050 or more STRD patterns are created for each register pair. If an
20051 odd number of registers are pushed, emit an initial STR followed by
20052 as many STRD instructions as are needed. This works best when the
20053 stack is initially 64-bit aligned (the normal case), since it
20054 ensures that each STRD is also 64-bit aligned. */
20055 static void
20056 thumb2_emit_strd_push (unsigned long saved_regs_mask)
20058 int num_regs = 0;
20059 int i;
20060 int regno;
20061 rtx par = NULL_RTX;
20062 rtx dwarf = NULL_RTX;
20063 rtx tmp;
20064 bool first = true;
20066 num_regs = bit_count (saved_regs_mask);
20068 /* Must be at least one register to save, and can't save SP or PC. */
20069 gcc_assert (num_regs > 0 && num_regs <= 14);
20070 gcc_assert (!(saved_regs_mask & (1 << SP_REGNUM)));
20071 gcc_assert (!(saved_regs_mask & (1 << PC_REGNUM)));
20073 /* Create sequence for DWARF info. All the frame-related data for
20074 debugging is held in this wrapper. */
20075 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (num_regs + 1));
20077 /* Describe the stack adjustment. */
20078 tmp = gen_rtx_SET (stack_pointer_rtx,
20079 plus_constant (Pmode, stack_pointer_rtx, -4 * num_regs));
20080 RTX_FRAME_RELATED_P (tmp) = 1;
20081 XVECEXP (dwarf, 0, 0) = tmp;
20083 /* Find the first register. */
20084 for (regno = 0; (saved_regs_mask & (1 << regno)) == 0; regno++)
20087 i = 0;
20089 /* If there's an odd number of registers to push. Start off by
20090 pushing a single register. This ensures that subsequent strd
20091 operations are dword aligned (assuming that SP was originally
20092 64-bit aligned). */
20093 if ((num_regs & 1) != 0)
20095 rtx reg, mem, insn;
20097 reg = gen_rtx_REG (SImode, regno);
20098 if (num_regs == 1)
20099 mem = gen_frame_mem (Pmode, gen_rtx_PRE_DEC (Pmode,
20100 stack_pointer_rtx));
20101 else
20102 mem = gen_frame_mem (Pmode,
20103 gen_rtx_PRE_MODIFY
20104 (Pmode, stack_pointer_rtx,
20105 plus_constant (Pmode, stack_pointer_rtx,
20106 -4 * num_regs)));
20108 tmp = gen_rtx_SET (mem, reg);
20109 RTX_FRAME_RELATED_P (tmp) = 1;
20110 insn = emit_insn (tmp);
20111 RTX_FRAME_RELATED_P (insn) = 1;
20112 add_reg_note (insn, REG_FRAME_RELATED_EXPR, dwarf);
20113 tmp = gen_rtx_SET (gen_frame_mem (Pmode, stack_pointer_rtx), reg);
20114 RTX_FRAME_RELATED_P (tmp) = 1;
20115 i++;
20116 regno++;
20117 XVECEXP (dwarf, 0, i) = tmp;
20118 first = false;
20121 while (i < num_regs)
20122 if (saved_regs_mask & (1 << regno))
20124 rtx reg1, reg2, mem1, mem2;
20125 rtx tmp0, tmp1, tmp2;
20126 int regno2;
20128 /* Find the register to pair with this one. */
20129 for (regno2 = regno + 1; (saved_regs_mask & (1 << regno2)) == 0;
20130 regno2++)
20133 reg1 = gen_rtx_REG (SImode, regno);
20134 reg2 = gen_rtx_REG (SImode, regno2);
20136 if (first)
20138 rtx insn;
20140 first = false;
20141 mem1 = gen_frame_mem (Pmode, plus_constant (Pmode,
20142 stack_pointer_rtx,
20143 -4 * num_regs));
20144 mem2 = gen_frame_mem (Pmode, plus_constant (Pmode,
20145 stack_pointer_rtx,
20146 -4 * (num_regs - 1)));
20147 tmp0 = gen_rtx_SET (stack_pointer_rtx,
20148 plus_constant (Pmode, stack_pointer_rtx,
20149 -4 * (num_regs)));
20150 tmp1 = gen_rtx_SET (mem1, reg1);
20151 tmp2 = gen_rtx_SET (mem2, reg2);
20152 RTX_FRAME_RELATED_P (tmp0) = 1;
20153 RTX_FRAME_RELATED_P (tmp1) = 1;
20154 RTX_FRAME_RELATED_P (tmp2) = 1;
20155 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (3));
20156 XVECEXP (par, 0, 0) = tmp0;
20157 XVECEXP (par, 0, 1) = tmp1;
20158 XVECEXP (par, 0, 2) = tmp2;
20159 insn = emit_insn (par);
20160 RTX_FRAME_RELATED_P (insn) = 1;
20161 add_reg_note (insn, REG_FRAME_RELATED_EXPR, dwarf);
20163 else
20165 mem1 = gen_frame_mem (Pmode, plus_constant (Pmode,
20166 stack_pointer_rtx,
20167 4 * i));
20168 mem2 = gen_frame_mem (Pmode, plus_constant (Pmode,
20169 stack_pointer_rtx,
20170 4 * (i + 1)));
20171 tmp1 = gen_rtx_SET (mem1, reg1);
20172 tmp2 = gen_rtx_SET (mem2, reg2);
20173 RTX_FRAME_RELATED_P (tmp1) = 1;
20174 RTX_FRAME_RELATED_P (tmp2) = 1;
20175 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (2));
20176 XVECEXP (par, 0, 0) = tmp1;
20177 XVECEXP (par, 0, 1) = tmp2;
20178 emit_insn (par);
20181 /* Create unwind information. This is an approximation. */
20182 tmp1 = gen_rtx_SET (gen_frame_mem (Pmode,
20183 plus_constant (Pmode,
20184 stack_pointer_rtx,
20185 4 * i)),
20186 reg1);
20187 tmp2 = gen_rtx_SET (gen_frame_mem (Pmode,
20188 plus_constant (Pmode,
20189 stack_pointer_rtx,
20190 4 * (i + 1))),
20191 reg2);
20193 RTX_FRAME_RELATED_P (tmp1) = 1;
20194 RTX_FRAME_RELATED_P (tmp2) = 1;
20195 XVECEXP (dwarf, 0, i + 1) = tmp1;
20196 XVECEXP (dwarf, 0, i + 2) = tmp2;
20197 i += 2;
20198 regno = regno2 + 1;
20200 else
20201 regno++;
20203 return;
20206 /* STRD in ARM mode requires consecutive registers. This function emits STRD
20207 whenever possible, otherwise it emits single-word stores. The first store
20208 also allocates stack space for all saved registers, using writeback with
20209 post-addressing mode. All other stores use offset addressing. If no STRD
20210 can be emitted, this function emits a sequence of single-word stores,
20211 and not an STM as before, because single-word stores provide more freedom
20212 scheduling and can be turned into an STM by peephole optimizations. */
20213 static void
20214 arm_emit_strd_push (unsigned long saved_regs_mask)
20216 int num_regs = 0;
20217 int i, j, dwarf_index = 0;
20218 int offset = 0;
20219 rtx dwarf = NULL_RTX;
20220 rtx insn = NULL_RTX;
20221 rtx tmp, mem;
20223 /* TODO: A more efficient code can be emitted by changing the
20224 layout, e.g., first push all pairs that can use STRD to keep the
20225 stack aligned, and then push all other registers. */
20226 for (i = 0; i <= LAST_ARM_REGNUM; i++)
20227 if (saved_regs_mask & (1 << i))
20228 num_regs++;
20230 gcc_assert (!(saved_regs_mask & (1 << SP_REGNUM)));
20231 gcc_assert (!(saved_regs_mask & (1 << PC_REGNUM)));
20232 gcc_assert (num_regs > 0);
20234 /* Create sequence for DWARF info. */
20235 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (num_regs + 1));
20237 /* For dwarf info, we generate explicit stack update. */
20238 tmp = gen_rtx_SET (stack_pointer_rtx,
20239 plus_constant (Pmode, stack_pointer_rtx, -4 * num_regs));
20240 RTX_FRAME_RELATED_P (tmp) = 1;
20241 XVECEXP (dwarf, 0, dwarf_index++) = tmp;
20243 /* Save registers. */
20244 offset = - 4 * num_regs;
20245 j = 0;
20246 while (j <= LAST_ARM_REGNUM)
20247 if (saved_regs_mask & (1 << j))
20249 if ((j % 2 == 0)
20250 && (saved_regs_mask & (1 << (j + 1))))
20252 /* Current register and previous register form register pair for
20253 which STRD can be generated. */
20254 if (offset < 0)
20256 /* Allocate stack space for all saved registers. */
20257 tmp = plus_constant (Pmode, stack_pointer_rtx, offset);
20258 tmp = gen_rtx_PRE_MODIFY (Pmode, stack_pointer_rtx, tmp);
20259 mem = gen_frame_mem (DImode, tmp);
20260 offset = 0;
20262 else if (offset > 0)
20263 mem = gen_frame_mem (DImode,
20264 plus_constant (Pmode,
20265 stack_pointer_rtx,
20266 offset));
20267 else
20268 mem = gen_frame_mem (DImode, stack_pointer_rtx);
20270 tmp = gen_rtx_SET (mem, gen_rtx_REG (DImode, j));
20271 RTX_FRAME_RELATED_P (tmp) = 1;
20272 tmp = emit_insn (tmp);
20274 /* Record the first store insn. */
20275 if (dwarf_index == 1)
20276 insn = tmp;
20278 /* Generate dwarf info. */
20279 mem = gen_frame_mem (SImode,
20280 plus_constant (Pmode,
20281 stack_pointer_rtx,
20282 offset));
20283 tmp = gen_rtx_SET (mem, gen_rtx_REG (SImode, j));
20284 RTX_FRAME_RELATED_P (tmp) = 1;
20285 XVECEXP (dwarf, 0, dwarf_index++) = tmp;
20287 mem = gen_frame_mem (SImode,
20288 plus_constant (Pmode,
20289 stack_pointer_rtx,
20290 offset + 4));
20291 tmp = gen_rtx_SET (mem, gen_rtx_REG (SImode, j + 1));
20292 RTX_FRAME_RELATED_P (tmp) = 1;
20293 XVECEXP (dwarf, 0, dwarf_index++) = tmp;
20295 offset += 8;
20296 j += 2;
20298 else
20300 /* Emit a single word store. */
20301 if (offset < 0)
20303 /* Allocate stack space for all saved registers. */
20304 tmp = plus_constant (Pmode, stack_pointer_rtx, offset);
20305 tmp = gen_rtx_PRE_MODIFY (Pmode, stack_pointer_rtx, tmp);
20306 mem = gen_frame_mem (SImode, tmp);
20307 offset = 0;
20309 else if (offset > 0)
20310 mem = gen_frame_mem (SImode,
20311 plus_constant (Pmode,
20312 stack_pointer_rtx,
20313 offset));
20314 else
20315 mem = gen_frame_mem (SImode, stack_pointer_rtx);
20317 tmp = gen_rtx_SET (mem, gen_rtx_REG (SImode, j));
20318 RTX_FRAME_RELATED_P (tmp) = 1;
20319 tmp = emit_insn (tmp);
20321 /* Record the first store insn. */
20322 if (dwarf_index == 1)
20323 insn = tmp;
20325 /* Generate dwarf info. */
20326 mem = gen_frame_mem (SImode,
20327 plus_constant(Pmode,
20328 stack_pointer_rtx,
20329 offset));
20330 tmp = gen_rtx_SET (mem, gen_rtx_REG (SImode, j));
20331 RTX_FRAME_RELATED_P (tmp) = 1;
20332 XVECEXP (dwarf, 0, dwarf_index++) = tmp;
20334 offset += 4;
20335 j += 1;
20338 else
20339 j++;
20341 /* Attach dwarf info to the first insn we generate. */
20342 gcc_assert (insn != NULL_RTX);
20343 add_reg_note (insn, REG_FRAME_RELATED_EXPR, dwarf);
20344 RTX_FRAME_RELATED_P (insn) = 1;
20347 /* Generate and emit an insn that we will recognize as a push_multi.
20348 Unfortunately, since this insn does not reflect very well the actual
20349 semantics of the operation, we need to annotate the insn for the benefit
20350 of DWARF2 frame unwind information. DWARF_REGS_MASK is a subset of
20351 MASK for registers that should be annotated for DWARF2 frame unwind
20352 information. */
20353 static rtx
20354 emit_multi_reg_push (unsigned long mask, unsigned long dwarf_regs_mask)
20356 int num_regs = 0;
20357 int num_dwarf_regs = 0;
20358 int i, j;
20359 rtx par;
20360 rtx dwarf;
20361 int dwarf_par_index;
20362 rtx tmp, reg;
20364 /* We don't record the PC in the dwarf frame information. */
20365 dwarf_regs_mask &= ~(1 << PC_REGNUM);
20367 for (i = 0; i <= LAST_ARM_REGNUM; i++)
20369 if (mask & (1 << i))
20370 num_regs++;
20371 if (dwarf_regs_mask & (1 << i))
20372 num_dwarf_regs++;
20375 gcc_assert (num_regs && num_regs <= 16);
20376 gcc_assert ((dwarf_regs_mask & ~mask) == 0);
20378 /* For the body of the insn we are going to generate an UNSPEC in
20379 parallel with several USEs. This allows the insn to be recognized
20380 by the push_multi pattern in the arm.md file.
20382 The body of the insn looks something like this:
20384 (parallel [
20385 (set (mem:BLK (pre_modify:SI (reg:SI sp)
20386 (const_int:SI <num>)))
20387 (unspec:BLK [(reg:SI r4)] UNSPEC_PUSH_MULT))
20388 (use (reg:SI XX))
20389 (use (reg:SI YY))
20393 For the frame note however, we try to be more explicit and actually
20394 show each register being stored into the stack frame, plus a (single)
20395 decrement of the stack pointer. We do it this way in order to be
20396 friendly to the stack unwinding code, which only wants to see a single
20397 stack decrement per instruction. The RTL we generate for the note looks
20398 something like this:
20400 (sequence [
20401 (set (reg:SI sp) (plus:SI (reg:SI sp) (const_int -20)))
20402 (set (mem:SI (reg:SI sp)) (reg:SI r4))
20403 (set (mem:SI (plus:SI (reg:SI sp) (const_int 4))) (reg:SI XX))
20404 (set (mem:SI (plus:SI (reg:SI sp) (const_int 8))) (reg:SI YY))
20408 FIXME:: In an ideal world the PRE_MODIFY would not exist and
20409 instead we'd have a parallel expression detailing all
20410 the stores to the various memory addresses so that debug
20411 information is more up-to-date. Remember however while writing
20412 this to take care of the constraints with the push instruction.
20414 Note also that this has to be taken care of for the VFP registers.
20416 For more see PR43399. */
20418 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num_regs));
20419 dwarf = gen_rtx_SEQUENCE (VOIDmode, rtvec_alloc (num_dwarf_regs + 1));
20420 dwarf_par_index = 1;
20422 for (i = 0; i <= LAST_ARM_REGNUM; i++)
20424 if (mask & (1 << i))
20426 reg = gen_rtx_REG (SImode, i);
20428 XVECEXP (par, 0, 0)
20429 = gen_rtx_SET (gen_frame_mem
20430 (BLKmode,
20431 gen_rtx_PRE_MODIFY (Pmode,
20432 stack_pointer_rtx,
20433 plus_constant
20434 (Pmode, stack_pointer_rtx,
20435 -4 * num_regs))
20437 gen_rtx_UNSPEC (BLKmode,
20438 gen_rtvec (1, reg),
20439 UNSPEC_PUSH_MULT));
20441 if (dwarf_regs_mask & (1 << i))
20443 tmp = gen_rtx_SET (gen_frame_mem (SImode, stack_pointer_rtx),
20444 reg);
20445 RTX_FRAME_RELATED_P (tmp) = 1;
20446 XVECEXP (dwarf, 0, dwarf_par_index++) = tmp;
20449 break;
20453 for (j = 1, i++; j < num_regs; i++)
20455 if (mask & (1 << i))
20457 reg = gen_rtx_REG (SImode, i);
20459 XVECEXP (par, 0, j) = gen_rtx_USE (VOIDmode, reg);
20461 if (dwarf_regs_mask & (1 << i))
20464 = gen_rtx_SET (gen_frame_mem
20465 (SImode,
20466 plus_constant (Pmode, stack_pointer_rtx,
20467 4 * j)),
20468 reg);
20469 RTX_FRAME_RELATED_P (tmp) = 1;
20470 XVECEXP (dwarf, 0, dwarf_par_index++) = tmp;
20473 j++;
20477 par = emit_insn (par);
20479 tmp = gen_rtx_SET (stack_pointer_rtx,
20480 plus_constant (Pmode, stack_pointer_rtx, -4 * num_regs));
20481 RTX_FRAME_RELATED_P (tmp) = 1;
20482 XVECEXP (dwarf, 0, 0) = tmp;
20484 add_reg_note (par, REG_FRAME_RELATED_EXPR, dwarf);
20486 return par;
20489 /* Add a REG_CFA_ADJUST_CFA REG note to INSN.
20490 SIZE is the offset to be adjusted.
20491 DEST and SRC might be stack_pointer_rtx or hard_frame_pointer_rtx. */
20492 static void
20493 arm_add_cfa_adjust_cfa_note (rtx insn, int size, rtx dest, rtx src)
20495 rtx dwarf;
20497 RTX_FRAME_RELATED_P (insn) = 1;
20498 dwarf = gen_rtx_SET (dest, plus_constant (Pmode, src, size));
20499 add_reg_note (insn, REG_CFA_ADJUST_CFA, dwarf);
20502 /* Generate and emit an insn pattern that we will recognize as a pop_multi.
20503 SAVED_REGS_MASK shows which registers need to be restored.
20505 Unfortunately, since this insn does not reflect very well the actual
20506 semantics of the operation, we need to annotate the insn for the benefit
20507 of DWARF2 frame unwind information. */
20508 static void
20509 arm_emit_multi_reg_pop (unsigned long saved_regs_mask)
20511 int num_regs = 0;
20512 int i, j;
20513 rtx par;
20514 rtx dwarf = NULL_RTX;
20515 rtx tmp, reg;
20516 bool return_in_pc = saved_regs_mask & (1 << PC_REGNUM);
20517 int offset_adj;
20518 int emit_update;
20520 offset_adj = return_in_pc ? 1 : 0;
20521 for (i = 0; i <= LAST_ARM_REGNUM; i++)
20522 if (saved_regs_mask & (1 << i))
20523 num_regs++;
20525 gcc_assert (num_regs && num_regs <= 16);
20527 /* If SP is in reglist, then we don't emit SP update insn. */
20528 emit_update = (saved_regs_mask & (1 << SP_REGNUM)) ? 0 : 1;
20530 /* The parallel needs to hold num_regs SETs
20531 and one SET for the stack update. */
20532 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num_regs + emit_update + offset_adj));
20534 if (return_in_pc)
20535 XVECEXP (par, 0, 0) = ret_rtx;
20537 if (emit_update)
20539 /* Increment the stack pointer, based on there being
20540 num_regs 4-byte registers to restore. */
20541 tmp = gen_rtx_SET (stack_pointer_rtx,
20542 plus_constant (Pmode,
20543 stack_pointer_rtx,
20544 4 * num_regs));
20545 RTX_FRAME_RELATED_P (tmp) = 1;
20546 XVECEXP (par, 0, offset_adj) = tmp;
20549 /* Now restore every reg, which may include PC. */
20550 for (j = 0, i = 0; j < num_regs; i++)
20551 if (saved_regs_mask & (1 << i))
20553 reg = gen_rtx_REG (SImode, i);
20554 if ((num_regs == 1) && emit_update && !return_in_pc)
20556 /* Emit single load with writeback. */
20557 tmp = gen_frame_mem (SImode,
20558 gen_rtx_POST_INC (Pmode,
20559 stack_pointer_rtx));
20560 tmp = emit_insn (gen_rtx_SET (reg, tmp));
20561 REG_NOTES (tmp) = alloc_reg_note (REG_CFA_RESTORE, reg, dwarf);
20562 return;
20565 tmp = gen_rtx_SET (reg,
20566 gen_frame_mem
20567 (SImode,
20568 plus_constant (Pmode, stack_pointer_rtx, 4 * j)));
20569 RTX_FRAME_RELATED_P (tmp) = 1;
20570 XVECEXP (par, 0, j + emit_update + offset_adj) = tmp;
20572 /* We need to maintain a sequence for DWARF info too. As dwarf info
20573 should not have PC, skip PC. */
20574 if (i != PC_REGNUM)
20575 dwarf = alloc_reg_note (REG_CFA_RESTORE, reg, dwarf);
20577 j++;
20580 if (return_in_pc)
20581 par = emit_jump_insn (par);
20582 else
20583 par = emit_insn (par);
20585 REG_NOTES (par) = dwarf;
20586 if (!return_in_pc)
20587 arm_add_cfa_adjust_cfa_note (par, UNITS_PER_WORD * num_regs,
20588 stack_pointer_rtx, stack_pointer_rtx);
20591 /* Generate and emit an insn pattern that we will recognize as a pop_multi
20592 of NUM_REGS consecutive VFP regs, starting at FIRST_REG.
20594 Unfortunately, since this insn does not reflect very well the actual
20595 semantics of the operation, we need to annotate the insn for the benefit
20596 of DWARF2 frame unwind information. */
20597 static void
20598 arm_emit_vfp_multi_reg_pop (int first_reg, int num_regs, rtx base_reg)
20600 int i, j;
20601 rtx par;
20602 rtx dwarf = NULL_RTX;
20603 rtx tmp, reg;
20605 gcc_assert (num_regs && num_regs <= 32);
20607 /* Workaround ARM10 VFPr1 bug. */
20608 if (num_regs == 2 && !arm_arch6)
20610 if (first_reg == 15)
20611 first_reg--;
20613 num_regs++;
20616 /* We can emit at most 16 D-registers in a single pop_multi instruction, and
20617 there could be up to 32 D-registers to restore.
20618 If there are more than 16 D-registers, make two recursive calls,
20619 each of which emits one pop_multi instruction. */
20620 if (num_regs > 16)
20622 arm_emit_vfp_multi_reg_pop (first_reg, 16, base_reg);
20623 arm_emit_vfp_multi_reg_pop (first_reg + 16, num_regs - 16, base_reg);
20624 return;
20627 /* The parallel needs to hold num_regs SETs
20628 and one SET for the stack update. */
20629 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (num_regs + 1));
20631 /* Increment the stack pointer, based on there being
20632 num_regs 8-byte registers to restore. */
20633 tmp = gen_rtx_SET (base_reg, plus_constant (Pmode, base_reg, 8 * num_regs));
20634 RTX_FRAME_RELATED_P (tmp) = 1;
20635 XVECEXP (par, 0, 0) = tmp;
20637 /* Now show every reg that will be restored, using a SET for each. */
20638 for (j = 0, i=first_reg; j < num_regs; i += 2)
20640 reg = gen_rtx_REG (DFmode, i);
20642 tmp = gen_rtx_SET (reg,
20643 gen_frame_mem
20644 (DFmode,
20645 plus_constant (Pmode, base_reg, 8 * j)));
20646 RTX_FRAME_RELATED_P (tmp) = 1;
20647 XVECEXP (par, 0, j + 1) = tmp;
20649 dwarf = alloc_reg_note (REG_CFA_RESTORE, reg, dwarf);
20651 j++;
20654 par = emit_insn (par);
20655 REG_NOTES (par) = dwarf;
20657 /* Make sure cfa doesn't leave with IP_REGNUM to allow unwinding fron FP. */
20658 if (REGNO (base_reg) == IP_REGNUM)
20660 RTX_FRAME_RELATED_P (par) = 1;
20661 add_reg_note (par, REG_CFA_DEF_CFA, hard_frame_pointer_rtx);
20663 else
20664 arm_add_cfa_adjust_cfa_note (par, 2 * UNITS_PER_WORD * num_regs,
20665 base_reg, base_reg);
20668 /* Generate and emit a pattern that will be recognized as LDRD pattern. If even
20669 number of registers are being popped, multiple LDRD patterns are created for
20670 all register pairs. If odd number of registers are popped, last register is
20671 loaded by using LDR pattern. */
20672 static void
20673 thumb2_emit_ldrd_pop (unsigned long saved_regs_mask)
20675 int num_regs = 0;
20676 int i, j;
20677 rtx par = NULL_RTX;
20678 rtx dwarf = NULL_RTX;
20679 rtx tmp, reg, tmp1;
20680 bool return_in_pc = saved_regs_mask & (1 << PC_REGNUM);
20682 for (i = 0; i <= LAST_ARM_REGNUM; i++)
20683 if (saved_regs_mask & (1 << i))
20684 num_regs++;
20686 gcc_assert (num_regs && num_regs <= 16);
20688 /* We cannot generate ldrd for PC. Hence, reduce the count if PC is
20689 to be popped. So, if num_regs is even, now it will become odd,
20690 and we can generate pop with PC. If num_regs is odd, it will be
20691 even now, and ldr with return can be generated for PC. */
20692 if (return_in_pc)
20693 num_regs--;
20695 gcc_assert (!(saved_regs_mask & (1 << SP_REGNUM)));
20697 /* Var j iterates over all the registers to gather all the registers in
20698 saved_regs_mask. Var i gives index of saved registers in stack frame.
20699 A PARALLEL RTX of register-pair is created here, so that pattern for
20700 LDRD can be matched. As PC is always last register to be popped, and
20701 we have already decremented num_regs if PC, we don't have to worry
20702 about PC in this loop. */
20703 for (i = 0, j = 0; i < (num_regs - (num_regs % 2)); j++)
20704 if (saved_regs_mask & (1 << j))
20706 /* Create RTX for memory load. */
20707 reg = gen_rtx_REG (SImode, j);
20708 tmp = gen_rtx_SET (reg,
20709 gen_frame_mem (SImode,
20710 plus_constant (Pmode,
20711 stack_pointer_rtx, 4 * i)));
20712 RTX_FRAME_RELATED_P (tmp) = 1;
20714 if (i % 2 == 0)
20716 /* When saved-register index (i) is even, the RTX to be emitted is
20717 yet to be created. Hence create it first. The LDRD pattern we
20718 are generating is :
20719 [ (SET (reg_t0) (MEM (PLUS (SP) (NUM))))
20720 (SET (reg_t1) (MEM (PLUS (SP) (NUM + 4)))) ]
20721 where target registers need not be consecutive. */
20722 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (2));
20723 dwarf = NULL_RTX;
20726 /* ith register is added in PARALLEL RTX. If i is even, the reg_i is
20727 added as 0th element and if i is odd, reg_i is added as 1st element
20728 of LDRD pattern shown above. */
20729 XVECEXP (par, 0, (i % 2)) = tmp;
20730 dwarf = alloc_reg_note (REG_CFA_RESTORE, reg, dwarf);
20732 if ((i % 2) == 1)
20734 /* When saved-register index (i) is odd, RTXs for both the registers
20735 to be loaded are generated in above given LDRD pattern, and the
20736 pattern can be emitted now. */
20737 par = emit_insn (par);
20738 REG_NOTES (par) = dwarf;
20739 RTX_FRAME_RELATED_P (par) = 1;
20742 i++;
20745 /* If the number of registers pushed is odd AND return_in_pc is false OR
20746 number of registers are even AND return_in_pc is true, last register is
20747 popped using LDR. It can be PC as well. Hence, adjust the stack first and
20748 then LDR with post increment. */
20750 /* Increment the stack pointer, based on there being
20751 num_regs 4-byte registers to restore. */
20752 tmp = gen_rtx_SET (stack_pointer_rtx,
20753 plus_constant (Pmode, stack_pointer_rtx, 4 * i));
20754 RTX_FRAME_RELATED_P (tmp) = 1;
20755 tmp = emit_insn (tmp);
20756 if (!return_in_pc)
20758 arm_add_cfa_adjust_cfa_note (tmp, UNITS_PER_WORD * i,
20759 stack_pointer_rtx, stack_pointer_rtx);
20762 dwarf = NULL_RTX;
20764 if (((num_regs % 2) == 1 && !return_in_pc)
20765 || ((num_regs % 2) == 0 && return_in_pc))
20767 /* Scan for the single register to be popped. Skip until the saved
20768 register is found. */
20769 for (; (saved_regs_mask & (1 << j)) == 0; j++);
20771 /* Gen LDR with post increment here. */
20772 tmp1 = gen_rtx_MEM (SImode,
20773 gen_rtx_POST_INC (SImode,
20774 stack_pointer_rtx));
20775 set_mem_alias_set (tmp1, get_frame_alias_set ());
20777 reg = gen_rtx_REG (SImode, j);
20778 tmp = gen_rtx_SET (reg, tmp1);
20779 RTX_FRAME_RELATED_P (tmp) = 1;
20780 dwarf = alloc_reg_note (REG_CFA_RESTORE, reg, dwarf);
20782 if (return_in_pc)
20784 /* If return_in_pc, j must be PC_REGNUM. */
20785 gcc_assert (j == PC_REGNUM);
20786 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (2));
20787 XVECEXP (par, 0, 0) = ret_rtx;
20788 XVECEXP (par, 0, 1) = tmp;
20789 par = emit_jump_insn (par);
20791 else
20793 par = emit_insn (tmp);
20794 REG_NOTES (par) = dwarf;
20795 arm_add_cfa_adjust_cfa_note (par, UNITS_PER_WORD,
20796 stack_pointer_rtx, stack_pointer_rtx);
20800 else if ((num_regs % 2) == 1 && return_in_pc)
20802 /* There are 2 registers to be popped. So, generate the pattern
20803 pop_multiple_with_stack_update_and_return to pop in PC. */
20804 arm_emit_multi_reg_pop (saved_regs_mask & (~((1 << j) - 1)));
20807 return;
20810 /* LDRD in ARM mode needs consecutive registers as operands. This function
20811 emits LDRD whenever possible, otherwise it emits single-word loads. It uses
20812 offset addressing and then generates one separate stack udpate. This provides
20813 more scheduling freedom, compared to writeback on every load. However,
20814 if the function returns using load into PC directly
20815 (i.e., if PC is in SAVED_REGS_MASK), the stack needs to be updated
20816 before the last load. TODO: Add a peephole optimization to recognize
20817 the new epilogue sequence as an LDM instruction whenever possible. TODO: Add
20818 peephole optimization to merge the load at stack-offset zero
20819 with the stack update instruction using load with writeback
20820 in post-index addressing mode. */
20821 static void
20822 arm_emit_ldrd_pop (unsigned long saved_regs_mask)
20824 int j = 0;
20825 int offset = 0;
20826 rtx par = NULL_RTX;
20827 rtx dwarf = NULL_RTX;
20828 rtx tmp, mem;
20830 /* Restore saved registers. */
20831 gcc_assert (!((saved_regs_mask & (1 << SP_REGNUM))));
20832 j = 0;
20833 while (j <= LAST_ARM_REGNUM)
20834 if (saved_regs_mask & (1 << j))
20836 if ((j % 2) == 0
20837 && (saved_regs_mask & (1 << (j + 1)))
20838 && (j + 1) != PC_REGNUM)
20840 /* Current register and next register form register pair for which
20841 LDRD can be generated. PC is always the last register popped, and
20842 we handle it separately. */
20843 if (offset > 0)
20844 mem = gen_frame_mem (DImode,
20845 plus_constant (Pmode,
20846 stack_pointer_rtx,
20847 offset));
20848 else
20849 mem = gen_frame_mem (DImode, stack_pointer_rtx);
20851 tmp = gen_rtx_SET (gen_rtx_REG (DImode, j), mem);
20852 tmp = emit_insn (tmp);
20853 RTX_FRAME_RELATED_P (tmp) = 1;
20855 /* Generate dwarf info. */
20857 dwarf = alloc_reg_note (REG_CFA_RESTORE,
20858 gen_rtx_REG (SImode, j),
20859 NULL_RTX);
20860 dwarf = alloc_reg_note (REG_CFA_RESTORE,
20861 gen_rtx_REG (SImode, j + 1),
20862 dwarf);
20864 REG_NOTES (tmp) = dwarf;
20866 offset += 8;
20867 j += 2;
20869 else if (j != PC_REGNUM)
20871 /* Emit a single word load. */
20872 if (offset > 0)
20873 mem = gen_frame_mem (SImode,
20874 plus_constant (Pmode,
20875 stack_pointer_rtx,
20876 offset));
20877 else
20878 mem = gen_frame_mem (SImode, stack_pointer_rtx);
20880 tmp = gen_rtx_SET (gen_rtx_REG (SImode, j), mem);
20881 tmp = emit_insn (tmp);
20882 RTX_FRAME_RELATED_P (tmp) = 1;
20884 /* Generate dwarf info. */
20885 REG_NOTES (tmp) = alloc_reg_note (REG_CFA_RESTORE,
20886 gen_rtx_REG (SImode, j),
20887 NULL_RTX);
20889 offset += 4;
20890 j += 1;
20892 else /* j == PC_REGNUM */
20893 j++;
20895 else
20896 j++;
20898 /* Update the stack. */
20899 if (offset > 0)
20901 tmp = gen_rtx_SET (stack_pointer_rtx,
20902 plus_constant (Pmode,
20903 stack_pointer_rtx,
20904 offset));
20905 tmp = emit_insn (tmp);
20906 arm_add_cfa_adjust_cfa_note (tmp, offset,
20907 stack_pointer_rtx, stack_pointer_rtx);
20908 offset = 0;
20911 if (saved_regs_mask & (1 << PC_REGNUM))
20913 /* Only PC is to be popped. */
20914 par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (2));
20915 XVECEXP (par, 0, 0) = ret_rtx;
20916 tmp = gen_rtx_SET (gen_rtx_REG (SImode, PC_REGNUM),
20917 gen_frame_mem (SImode,
20918 gen_rtx_POST_INC (SImode,
20919 stack_pointer_rtx)));
20920 RTX_FRAME_RELATED_P (tmp) = 1;
20921 XVECEXP (par, 0, 1) = tmp;
20922 par = emit_jump_insn (par);
20924 /* Generate dwarf info. */
20925 dwarf = alloc_reg_note (REG_CFA_RESTORE,
20926 gen_rtx_REG (SImode, PC_REGNUM),
20927 NULL_RTX);
20928 REG_NOTES (par) = dwarf;
20929 arm_add_cfa_adjust_cfa_note (par, UNITS_PER_WORD,
20930 stack_pointer_rtx, stack_pointer_rtx);
20934 /* Calculate the size of the return value that is passed in registers. */
20935 static unsigned
20936 arm_size_return_regs (void)
20938 machine_mode mode;
20940 if (crtl->return_rtx != 0)
20941 mode = GET_MODE (crtl->return_rtx);
20942 else
20943 mode = DECL_MODE (DECL_RESULT (current_function_decl));
20945 return GET_MODE_SIZE (mode);
20948 /* Return true if the current function needs to save/restore LR. */
20949 static bool
20950 thumb_force_lr_save (void)
20952 return !cfun->machine->lr_save_eliminated
20953 && (!crtl->is_leaf
20954 || thumb_far_jump_used_p ()
20955 || df_regs_ever_live_p (LR_REGNUM));
20958 /* We do not know if r3 will be available because
20959 we do have an indirect tailcall happening in this
20960 particular case. */
20961 static bool
20962 is_indirect_tailcall_p (rtx call)
20964 rtx pat = PATTERN (call);
20966 /* Indirect tail call. */
20967 pat = XVECEXP (pat, 0, 0);
20968 if (GET_CODE (pat) == SET)
20969 pat = SET_SRC (pat);
20971 pat = XEXP (XEXP (pat, 0), 0);
20972 return REG_P (pat);
20975 /* Return true if r3 is used by any of the tail call insns in the
20976 current function. */
20977 static bool
20978 any_sibcall_could_use_r3 (void)
20980 edge_iterator ei;
20981 edge e;
20983 if (!crtl->tail_call_emit)
20984 return false;
20985 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
20986 if (e->flags & EDGE_SIBCALL)
20988 rtx_insn *call = BB_END (e->src);
20989 if (!CALL_P (call))
20990 call = prev_nonnote_nondebug_insn (call);
20991 gcc_assert (CALL_P (call) && SIBLING_CALL_P (call));
20992 if (find_regno_fusage (call, USE, 3)
20993 || is_indirect_tailcall_p (call))
20994 return true;
20996 return false;
21000 /* Compute the distance from register FROM to register TO.
21001 These can be the arg pointer (26), the soft frame pointer (25),
21002 the stack pointer (13) or the hard frame pointer (11).
21003 In thumb mode r7 is used as the soft frame pointer, if needed.
21004 Typical stack layout looks like this:
21006 old stack pointer -> | |
21007 ----
21008 | | \
21009 | | saved arguments for
21010 | | vararg functions
21011 | | /
21013 hard FP & arg pointer -> | | \
21014 | | stack
21015 | | frame
21016 | | /
21018 | | \
21019 | | call saved
21020 | | registers
21021 soft frame pointer -> | | /
21023 | | \
21024 | | local
21025 | | variables
21026 locals base pointer -> | | /
21028 | | \
21029 | | outgoing
21030 | | arguments
21031 current stack pointer -> | | /
21034 For a given function some or all of these stack components
21035 may not be needed, giving rise to the possibility of
21036 eliminating some of the registers.
21038 The values returned by this function must reflect the behavior
21039 of arm_expand_prologue () and arm_compute_save_core_reg_mask ().
21041 The sign of the number returned reflects the direction of stack
21042 growth, so the values are positive for all eliminations except
21043 from the soft frame pointer to the hard frame pointer.
21045 SFP may point just inside the local variables block to ensure correct
21046 alignment. */
21049 /* Return cached stack offsets. */
21051 static arm_stack_offsets *
21052 arm_get_frame_offsets (void)
21054 struct arm_stack_offsets *offsets;
21056 offsets = &cfun->machine->stack_offsets;
21058 return offsets;
21062 /* Calculate stack offsets. These are used to calculate register elimination
21063 offsets and in prologue/epilogue code. Also calculates which registers
21064 should be saved. */
21066 static void
21067 arm_compute_frame_layout (void)
21069 struct arm_stack_offsets *offsets;
21070 unsigned long func_type;
21071 int saved;
21072 int core_saved;
21073 HOST_WIDE_INT frame_size;
21074 int i;
21076 offsets = &cfun->machine->stack_offsets;
21078 /* Initially this is the size of the local variables. It will translated
21079 into an offset once we have determined the size of preceding data. */
21080 frame_size = ROUND_UP_WORD (get_frame_size ());
21082 /* Space for variadic functions. */
21083 offsets->saved_args = crtl->args.pretend_args_size;
21085 /* In Thumb mode this is incorrect, but never used. */
21086 offsets->frame
21087 = (offsets->saved_args
21088 + arm_compute_static_chain_stack_bytes ()
21089 + (frame_pointer_needed ? 4 : 0));
21091 if (TARGET_32BIT)
21093 unsigned int regno;
21095 offsets->saved_regs_mask = arm_compute_save_core_reg_mask ();
21096 core_saved = bit_count (offsets->saved_regs_mask) * 4;
21097 saved = core_saved;
21099 /* We know that SP will be doubleword aligned on entry, and we must
21100 preserve that condition at any subroutine call. We also require the
21101 soft frame pointer to be doubleword aligned. */
21103 if (TARGET_REALLY_IWMMXT)
21105 /* Check for the call-saved iWMMXt registers. */
21106 for (regno = FIRST_IWMMXT_REGNUM;
21107 regno <= LAST_IWMMXT_REGNUM;
21108 regno++)
21109 if (df_regs_ever_live_p (regno) && ! call_used_regs[regno])
21110 saved += 8;
21113 func_type = arm_current_func_type ();
21114 /* Space for saved VFP registers. */
21115 if (! IS_VOLATILE (func_type)
21116 && TARGET_HARD_FLOAT)
21117 saved += arm_get_vfp_saved_size ();
21119 else /* TARGET_THUMB1 */
21121 offsets->saved_regs_mask = thumb1_compute_save_core_reg_mask ();
21122 core_saved = bit_count (offsets->saved_regs_mask) * 4;
21123 saved = core_saved;
21124 if (TARGET_BACKTRACE)
21125 saved += 16;
21128 /* Saved registers include the stack frame. */
21129 offsets->saved_regs
21130 = offsets->saved_args + arm_compute_static_chain_stack_bytes () + saved;
21131 offsets->soft_frame = offsets->saved_regs + CALLER_INTERWORKING_SLOT_SIZE;
21133 /* A leaf function does not need any stack alignment if it has nothing
21134 on the stack. */
21135 if (crtl->is_leaf && frame_size == 0
21136 /* However if it calls alloca(), we have a dynamically allocated
21137 block of BIGGEST_ALIGNMENT on stack, so still do stack alignment. */
21138 && ! cfun->calls_alloca)
21140 offsets->outgoing_args = offsets->soft_frame;
21141 offsets->locals_base = offsets->soft_frame;
21142 return;
21145 /* Ensure SFP has the correct alignment. */
21146 if (ARM_DOUBLEWORD_ALIGN
21147 && (offsets->soft_frame & 7))
21149 offsets->soft_frame += 4;
21150 /* Try to align stack by pushing an extra reg. Don't bother doing this
21151 when there is a stack frame as the alignment will be rolled into
21152 the normal stack adjustment. */
21153 if (frame_size + crtl->outgoing_args_size == 0)
21155 int reg = -1;
21157 /* Register r3 is caller-saved. Normally it does not need to be
21158 saved on entry by the prologue. However if we choose to save
21159 it for padding then we may confuse the compiler into thinking
21160 a prologue sequence is required when in fact it is not. This
21161 will occur when shrink-wrapping if r3 is used as a scratch
21162 register and there are no other callee-saved writes.
21164 This situation can be avoided when other callee-saved registers
21165 are available and r3 is not mandatory if we choose a callee-saved
21166 register for padding. */
21167 bool prefer_callee_reg_p = false;
21169 /* If it is safe to use r3, then do so. This sometimes
21170 generates better code on Thumb-2 by avoiding the need to
21171 use 32-bit push/pop instructions. */
21172 if (! any_sibcall_could_use_r3 ()
21173 && arm_size_return_regs () <= 12
21174 && (offsets->saved_regs_mask & (1 << 3)) == 0
21175 && (TARGET_THUMB2
21176 || !(TARGET_LDRD && current_tune->prefer_ldrd_strd)))
21178 reg = 3;
21179 if (!TARGET_THUMB2)
21180 prefer_callee_reg_p = true;
21182 if (reg == -1
21183 || prefer_callee_reg_p)
21185 for (i = 4; i <= (TARGET_THUMB1 ? LAST_LO_REGNUM : 11); i++)
21187 /* Avoid fixed registers; they may be changed at
21188 arbitrary times so it's unsafe to restore them
21189 during the epilogue. */
21190 if (!fixed_regs[i]
21191 && (offsets->saved_regs_mask & (1 << i)) == 0)
21193 reg = i;
21194 break;
21199 if (reg != -1)
21201 offsets->saved_regs += 4;
21202 offsets->saved_regs_mask |= (1 << reg);
21207 offsets->locals_base = offsets->soft_frame + frame_size;
21208 offsets->outgoing_args = (offsets->locals_base
21209 + crtl->outgoing_args_size);
21211 if (ARM_DOUBLEWORD_ALIGN)
21213 /* Ensure SP remains doubleword aligned. */
21214 if (offsets->outgoing_args & 7)
21215 offsets->outgoing_args += 4;
21216 gcc_assert (!(offsets->outgoing_args & 7));
21221 /* Calculate the relative offsets for the different stack pointers. Positive
21222 offsets are in the direction of stack growth. */
21224 HOST_WIDE_INT
21225 arm_compute_initial_elimination_offset (unsigned int from, unsigned int to)
21227 arm_stack_offsets *offsets;
21229 offsets = arm_get_frame_offsets ();
21231 /* OK, now we have enough information to compute the distances.
21232 There must be an entry in these switch tables for each pair
21233 of registers in ELIMINABLE_REGS, even if some of the entries
21234 seem to be redundant or useless. */
21235 switch (from)
21237 case ARG_POINTER_REGNUM:
21238 switch (to)
21240 case THUMB_HARD_FRAME_POINTER_REGNUM:
21241 return 0;
21243 case FRAME_POINTER_REGNUM:
21244 /* This is the reverse of the soft frame pointer
21245 to hard frame pointer elimination below. */
21246 return offsets->soft_frame - offsets->saved_args;
21248 case ARM_HARD_FRAME_POINTER_REGNUM:
21249 /* This is only non-zero in the case where the static chain register
21250 is stored above the frame. */
21251 return offsets->frame - offsets->saved_args - 4;
21253 case STACK_POINTER_REGNUM:
21254 /* If nothing has been pushed on the stack at all
21255 then this will return -4. This *is* correct! */
21256 return offsets->outgoing_args - (offsets->saved_args + 4);
21258 default:
21259 gcc_unreachable ();
21261 gcc_unreachable ();
21263 case FRAME_POINTER_REGNUM:
21264 switch (to)
21266 case THUMB_HARD_FRAME_POINTER_REGNUM:
21267 return 0;
21269 case ARM_HARD_FRAME_POINTER_REGNUM:
21270 /* The hard frame pointer points to the top entry in the
21271 stack frame. The soft frame pointer to the bottom entry
21272 in the stack frame. If there is no stack frame at all,
21273 then they are identical. */
21275 return offsets->frame - offsets->soft_frame;
21277 case STACK_POINTER_REGNUM:
21278 return offsets->outgoing_args - offsets->soft_frame;
21280 default:
21281 gcc_unreachable ();
21283 gcc_unreachable ();
21285 default:
21286 /* You cannot eliminate from the stack pointer.
21287 In theory you could eliminate from the hard frame
21288 pointer to the stack pointer, but this will never
21289 happen, since if a stack frame is not needed the
21290 hard frame pointer will never be used. */
21291 gcc_unreachable ();
21295 /* Given FROM and TO register numbers, say whether this elimination is
21296 allowed. Frame pointer elimination is automatically handled.
21298 All eliminations are permissible. Note that ARG_POINTER_REGNUM and
21299 HARD_FRAME_POINTER_REGNUM are in fact the same thing. If we need a frame
21300 pointer, we must eliminate FRAME_POINTER_REGNUM into
21301 HARD_FRAME_POINTER_REGNUM and not into STACK_POINTER_REGNUM or
21302 ARG_POINTER_REGNUM. */
21304 bool
21305 arm_can_eliminate (const int from, const int to)
21307 return ((to == FRAME_POINTER_REGNUM && from == ARG_POINTER_REGNUM) ? false :
21308 (to == STACK_POINTER_REGNUM && frame_pointer_needed) ? false :
21309 (to == ARM_HARD_FRAME_POINTER_REGNUM && TARGET_THUMB) ? false :
21310 (to == THUMB_HARD_FRAME_POINTER_REGNUM && TARGET_ARM) ? false :
21311 true);
21314 /* Emit RTL to save coprocessor registers on function entry. Returns the
21315 number of bytes pushed. */
21317 static int
21318 arm_save_coproc_regs(void)
21320 int saved_size = 0;
21321 unsigned reg;
21322 unsigned start_reg;
21323 rtx insn;
21325 for (reg = LAST_IWMMXT_REGNUM; reg >= FIRST_IWMMXT_REGNUM; reg--)
21326 if (df_regs_ever_live_p (reg) && ! call_used_regs[reg])
21328 insn = gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx);
21329 insn = gen_rtx_MEM (V2SImode, insn);
21330 insn = emit_set_insn (insn, gen_rtx_REG (V2SImode, reg));
21331 RTX_FRAME_RELATED_P (insn) = 1;
21332 saved_size += 8;
21335 if (TARGET_HARD_FLOAT)
21337 start_reg = FIRST_VFP_REGNUM;
21339 for (reg = FIRST_VFP_REGNUM; reg < LAST_VFP_REGNUM; reg += 2)
21341 if ((!df_regs_ever_live_p (reg) || call_used_regs[reg])
21342 && (!df_regs_ever_live_p (reg + 1) || call_used_regs[reg + 1]))
21344 if (start_reg != reg)
21345 saved_size += vfp_emit_fstmd (start_reg,
21346 (reg - start_reg) / 2);
21347 start_reg = reg + 2;
21350 if (start_reg != reg)
21351 saved_size += vfp_emit_fstmd (start_reg,
21352 (reg - start_reg) / 2);
21354 return saved_size;
21358 /* Set the Thumb frame pointer from the stack pointer. */
21360 static void
21361 thumb_set_frame_pointer (arm_stack_offsets *offsets)
21363 HOST_WIDE_INT amount;
21364 rtx insn, dwarf;
21366 amount = offsets->outgoing_args - offsets->locals_base;
21367 if (amount < 1024)
21368 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx,
21369 stack_pointer_rtx, GEN_INT (amount)));
21370 else
21372 emit_insn (gen_movsi (hard_frame_pointer_rtx, GEN_INT (amount)));
21373 /* Thumb-2 RTL patterns expect sp as the first input. Thumb-1
21374 expects the first two operands to be the same. */
21375 if (TARGET_THUMB2)
21377 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx,
21378 stack_pointer_rtx,
21379 hard_frame_pointer_rtx));
21381 else
21383 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx,
21384 hard_frame_pointer_rtx,
21385 stack_pointer_rtx));
21387 dwarf = gen_rtx_SET (hard_frame_pointer_rtx,
21388 plus_constant (Pmode, stack_pointer_rtx, amount));
21389 RTX_FRAME_RELATED_P (dwarf) = 1;
21390 add_reg_note (insn, REG_FRAME_RELATED_EXPR, dwarf);
21393 RTX_FRAME_RELATED_P (insn) = 1;
21396 struct scratch_reg {
21397 rtx reg;
21398 bool saved;
21401 /* Return a short-lived scratch register for use as a 2nd scratch register on
21402 function entry after the registers are saved in the prologue. This register
21403 must be released by means of release_scratch_register_on_entry. IP is not
21404 considered since it is always used as the 1st scratch register if available.
21406 REGNO1 is the index number of the 1st scratch register and LIVE_REGS is the
21407 mask of live registers. */
21409 static void
21410 get_scratch_register_on_entry (struct scratch_reg *sr, unsigned int regno1,
21411 unsigned long live_regs)
21413 int regno = -1;
21415 sr->saved = false;
21417 if (regno1 != LR_REGNUM && (live_regs & (1 << LR_REGNUM)) != 0)
21418 regno = LR_REGNUM;
21419 else
21421 unsigned int i;
21423 for (i = 4; i < 11; i++)
21424 if (regno1 != i && (live_regs & (1 << i)) != 0)
21426 regno = i;
21427 break;
21430 if (regno < 0)
21432 /* If IP is used as the 1st scratch register for a nested function,
21433 then either r3 wasn't available or is used to preserve IP. */
21434 if (regno1 == IP_REGNUM && IS_NESTED (arm_current_func_type ()))
21435 regno1 = 3;
21436 regno = (regno1 == 3 ? 2 : 3);
21437 sr->saved
21438 = REGNO_REG_SET_P (df_get_live_out (ENTRY_BLOCK_PTR_FOR_FN (cfun)),
21439 regno);
21443 sr->reg = gen_rtx_REG (SImode, regno);
21444 if (sr->saved)
21446 rtx addr = gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx);
21447 rtx insn = emit_set_insn (gen_frame_mem (SImode, addr), sr->reg);
21448 rtx x = gen_rtx_SET (stack_pointer_rtx,
21449 plus_constant (Pmode, stack_pointer_rtx, -4));
21450 RTX_FRAME_RELATED_P (insn) = 1;
21451 add_reg_note (insn, REG_FRAME_RELATED_EXPR, x);
21455 /* Release a scratch register obtained from the preceding function. */
21457 static void
21458 release_scratch_register_on_entry (struct scratch_reg *sr)
21460 if (sr->saved)
21462 rtx addr = gen_rtx_POST_INC (Pmode, stack_pointer_rtx);
21463 rtx insn = emit_set_insn (sr->reg, gen_frame_mem (SImode, addr));
21464 rtx x = gen_rtx_SET (stack_pointer_rtx,
21465 plus_constant (Pmode, stack_pointer_rtx, 4));
21466 RTX_FRAME_RELATED_P (insn) = 1;
21467 add_reg_note (insn, REG_FRAME_RELATED_EXPR, x);
21471 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
21473 #if PROBE_INTERVAL > 4096
21474 #error Cannot use indexed addressing mode for stack probing
21475 #endif
21477 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
21478 inclusive. These are offsets from the current stack pointer. REGNO1
21479 is the index number of the 1st scratch register and LIVE_REGS is the
21480 mask of live registers. */
21482 static void
21483 arm_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size,
21484 unsigned int regno1, unsigned long live_regs)
21486 rtx reg1 = gen_rtx_REG (Pmode, regno1);
21488 /* See if we have a constant small number of probes to generate. If so,
21489 that's the easy case. */
21490 if (size <= PROBE_INTERVAL)
21492 emit_move_insn (reg1, GEN_INT (first + PROBE_INTERVAL));
21493 emit_set_insn (reg1, gen_rtx_MINUS (Pmode, stack_pointer_rtx, reg1));
21494 emit_stack_probe (plus_constant (Pmode, reg1, PROBE_INTERVAL - size));
21497 /* The run-time loop is made up of 10 insns in the generic case while the
21498 compile-time loop is made up of 4+2*(n-2) insns for n # of intervals. */
21499 else if (size <= 5 * PROBE_INTERVAL)
21501 HOST_WIDE_INT i, rem;
21503 emit_move_insn (reg1, GEN_INT (first + PROBE_INTERVAL));
21504 emit_set_insn (reg1, gen_rtx_MINUS (Pmode, stack_pointer_rtx, reg1));
21505 emit_stack_probe (reg1);
21507 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 2 until
21508 it exceeds SIZE. If only two probes are needed, this will not
21509 generate any code. Then probe at FIRST + SIZE. */
21510 for (i = 2 * PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
21512 emit_set_insn (reg1, plus_constant (Pmode, reg1, -PROBE_INTERVAL));
21513 emit_stack_probe (reg1);
21516 rem = size - (i - PROBE_INTERVAL);
21517 if (rem > 4095 || (TARGET_THUMB2 && rem > 255))
21519 emit_set_insn (reg1, plus_constant (Pmode, reg1, -PROBE_INTERVAL));
21520 emit_stack_probe (plus_constant (Pmode, reg1, PROBE_INTERVAL - rem));
21522 else
21523 emit_stack_probe (plus_constant (Pmode, reg1, -rem));
21526 /* Otherwise, do the same as above, but in a loop. Note that we must be
21527 extra careful with variables wrapping around because we might be at
21528 the very top (or the very bottom) of the address space and we have
21529 to be able to handle this case properly; in particular, we use an
21530 equality test for the loop condition. */
21531 else
21533 HOST_WIDE_INT rounded_size;
21534 struct scratch_reg sr;
21536 get_scratch_register_on_entry (&sr, regno1, live_regs);
21538 emit_move_insn (reg1, GEN_INT (first));
21541 /* Step 1: round SIZE to the previous multiple of the interval. */
21543 rounded_size = size & -PROBE_INTERVAL;
21544 emit_move_insn (sr.reg, GEN_INT (rounded_size));
21547 /* Step 2: compute initial and final value of the loop counter. */
21549 /* TEST_ADDR = SP + FIRST. */
21550 emit_set_insn (reg1, gen_rtx_MINUS (Pmode, stack_pointer_rtx, reg1));
21552 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
21553 emit_set_insn (sr.reg, gen_rtx_MINUS (Pmode, reg1, sr.reg));
21556 /* Step 3: the loop
21560 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
21561 probe at TEST_ADDR
21563 while (TEST_ADDR != LAST_ADDR)
21565 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
21566 until it is equal to ROUNDED_SIZE. */
21568 emit_insn (gen_probe_stack_range (reg1, reg1, sr.reg));
21571 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
21572 that SIZE is equal to ROUNDED_SIZE. */
21574 if (size != rounded_size)
21576 HOST_WIDE_INT rem = size - rounded_size;
21578 if (rem > 4095 || (TARGET_THUMB2 && rem > 255))
21580 emit_set_insn (sr.reg,
21581 plus_constant (Pmode, sr.reg, -PROBE_INTERVAL));
21582 emit_stack_probe (plus_constant (Pmode, sr.reg,
21583 PROBE_INTERVAL - rem));
21585 else
21586 emit_stack_probe (plus_constant (Pmode, sr.reg, -rem));
21589 release_scratch_register_on_entry (&sr);
21592 /* Make sure nothing is scheduled before we are done. */
21593 emit_insn (gen_blockage ());
21596 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
21597 absolute addresses. */
21599 const char *
21600 output_probe_stack_range (rtx reg1, rtx reg2)
21602 static int labelno = 0;
21603 char loop_lab[32];
21604 rtx xops[2];
21606 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
21608 /* Loop. */
21609 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
21611 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
21612 xops[0] = reg1;
21613 xops[1] = GEN_INT (PROBE_INTERVAL);
21614 output_asm_insn ("sub\t%0, %0, %1", xops);
21616 /* Probe at TEST_ADDR. */
21617 output_asm_insn ("str\tr0, [%0, #0]", xops);
21619 /* Test if TEST_ADDR == LAST_ADDR. */
21620 xops[1] = reg2;
21621 output_asm_insn ("cmp\t%0, %1", xops);
21623 /* Branch. */
21624 fputs ("\tbne\t", asm_out_file);
21625 assemble_name_raw (asm_out_file, loop_lab);
21626 fputc ('\n', asm_out_file);
21628 return "";
21631 /* Generate the prologue instructions for entry into an ARM or Thumb-2
21632 function. */
21633 void
21634 arm_expand_prologue (void)
21636 rtx amount;
21637 rtx insn;
21638 rtx ip_rtx;
21639 unsigned long live_regs_mask;
21640 unsigned long func_type;
21641 int fp_offset = 0;
21642 int saved_pretend_args = 0;
21643 int saved_regs = 0;
21644 unsigned HOST_WIDE_INT args_to_push;
21645 HOST_WIDE_INT size;
21646 arm_stack_offsets *offsets;
21647 bool clobber_ip;
21649 func_type = arm_current_func_type ();
21651 /* Naked functions don't have prologues. */
21652 if (IS_NAKED (func_type))
21654 if (flag_stack_usage_info)
21655 current_function_static_stack_size = 0;
21656 return;
21659 /* Make a copy of c_f_p_a_s as we may need to modify it locally. */
21660 args_to_push = crtl->args.pretend_args_size;
21662 /* Compute which register we will have to save onto the stack. */
21663 offsets = arm_get_frame_offsets ();
21664 live_regs_mask = offsets->saved_regs_mask;
21666 ip_rtx = gen_rtx_REG (SImode, IP_REGNUM);
21668 if (IS_STACKALIGN (func_type))
21670 rtx r0, r1;
21672 /* Handle a word-aligned stack pointer. We generate the following:
21674 mov r0, sp
21675 bic r1, r0, #7
21676 mov sp, r1
21677 <save and restore r0 in normal prologue/epilogue>
21678 mov sp, r0
21679 bx lr
21681 The unwinder doesn't need to know about the stack realignment.
21682 Just tell it we saved SP in r0. */
21683 gcc_assert (TARGET_THUMB2 && !arm_arch_notm && args_to_push == 0);
21685 r0 = gen_rtx_REG (SImode, R0_REGNUM);
21686 r1 = gen_rtx_REG (SImode, R1_REGNUM);
21688 insn = emit_insn (gen_movsi (r0, stack_pointer_rtx));
21689 RTX_FRAME_RELATED_P (insn) = 1;
21690 add_reg_note (insn, REG_CFA_REGISTER, NULL);
21692 emit_insn (gen_andsi3 (r1, r0, GEN_INT (~(HOST_WIDE_INT)7)));
21694 /* ??? The CFA changes here, which may cause GDB to conclude that it
21695 has entered a different function. That said, the unwind info is
21696 correct, individually, before and after this instruction because
21697 we've described the save of SP, which will override the default
21698 handling of SP as restoring from the CFA. */
21699 emit_insn (gen_movsi (stack_pointer_rtx, r1));
21702 /* The static chain register is the same as the IP register. If it is
21703 clobbered when creating the frame, we need to save and restore it. */
21704 clobber_ip = IS_NESTED (func_type)
21705 && ((TARGET_APCS_FRAME && frame_pointer_needed && TARGET_ARM)
21706 || ((flag_stack_check == STATIC_BUILTIN_STACK_CHECK
21707 || flag_stack_clash_protection)
21708 && !df_regs_ever_live_p (LR_REGNUM)
21709 && arm_r3_live_at_start_p ()));
21711 /* Find somewhere to store IP whilst the frame is being created.
21712 We try the following places in order:
21714 1. The last argument register r3 if it is available.
21715 2. A slot on the stack above the frame if there are no
21716 arguments to push onto the stack.
21717 3. Register r3 again, after pushing the argument registers
21718 onto the stack, if this is a varargs function.
21719 4. The last slot on the stack created for the arguments to
21720 push, if this isn't a varargs function.
21722 Note - we only need to tell the dwarf2 backend about the SP
21723 adjustment in the second variant; the static chain register
21724 doesn't need to be unwound, as it doesn't contain a value
21725 inherited from the caller. */
21726 if (clobber_ip)
21728 if (!arm_r3_live_at_start_p ())
21729 insn = emit_set_insn (gen_rtx_REG (SImode, 3), ip_rtx);
21730 else if (args_to_push == 0)
21732 rtx addr, dwarf;
21734 gcc_assert(arm_compute_static_chain_stack_bytes() == 4);
21735 saved_regs += 4;
21737 addr = gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx);
21738 insn = emit_set_insn (gen_frame_mem (SImode, addr), ip_rtx);
21739 fp_offset = 4;
21741 /* Just tell the dwarf backend that we adjusted SP. */
21742 dwarf = gen_rtx_SET (stack_pointer_rtx,
21743 plus_constant (Pmode, stack_pointer_rtx,
21744 -fp_offset));
21745 RTX_FRAME_RELATED_P (insn) = 1;
21746 add_reg_note (insn, REG_FRAME_RELATED_EXPR, dwarf);
21748 else
21750 /* Store the args on the stack. */
21751 if (cfun->machine->uses_anonymous_args)
21753 insn = emit_multi_reg_push ((0xf0 >> (args_to_push / 4)) & 0xf,
21754 (0xf0 >> (args_to_push / 4)) & 0xf);
21755 emit_set_insn (gen_rtx_REG (SImode, 3), ip_rtx);
21756 saved_pretend_args = 1;
21758 else
21760 rtx addr, dwarf;
21762 if (args_to_push == 4)
21763 addr = gen_rtx_PRE_DEC (Pmode, stack_pointer_rtx);
21764 else
21765 addr = gen_rtx_PRE_MODIFY (Pmode, stack_pointer_rtx,
21766 plus_constant (Pmode,
21767 stack_pointer_rtx,
21768 -args_to_push));
21770 insn = emit_set_insn (gen_frame_mem (SImode, addr), ip_rtx);
21772 /* Just tell the dwarf backend that we adjusted SP. */
21773 dwarf = gen_rtx_SET (stack_pointer_rtx,
21774 plus_constant (Pmode, stack_pointer_rtx,
21775 -args_to_push));
21776 add_reg_note (insn, REG_FRAME_RELATED_EXPR, dwarf);
21779 RTX_FRAME_RELATED_P (insn) = 1;
21780 fp_offset = args_to_push;
21781 args_to_push = 0;
21785 if (TARGET_APCS_FRAME && frame_pointer_needed && TARGET_ARM)
21787 if (IS_INTERRUPT (func_type))
21789 /* Interrupt functions must not corrupt any registers.
21790 Creating a frame pointer however, corrupts the IP
21791 register, so we must push it first. */
21792 emit_multi_reg_push (1 << IP_REGNUM, 1 << IP_REGNUM);
21794 /* Do not set RTX_FRAME_RELATED_P on this insn.
21795 The dwarf stack unwinding code only wants to see one
21796 stack decrement per function, and this is not it. If
21797 this instruction is labeled as being part of the frame
21798 creation sequence then dwarf2out_frame_debug_expr will
21799 die when it encounters the assignment of IP to FP
21800 later on, since the use of SP here establishes SP as
21801 the CFA register and not IP.
21803 Anyway this instruction is not really part of the stack
21804 frame creation although it is part of the prologue. */
21807 insn = emit_set_insn (ip_rtx,
21808 plus_constant (Pmode, stack_pointer_rtx,
21809 fp_offset));
21810 RTX_FRAME_RELATED_P (insn) = 1;
21813 if (args_to_push)
21815 /* Push the argument registers, or reserve space for them. */
21816 if (cfun->machine->uses_anonymous_args)
21817 insn = emit_multi_reg_push
21818 ((0xf0 >> (args_to_push / 4)) & 0xf,
21819 (0xf0 >> (args_to_push / 4)) & 0xf);
21820 else
21821 insn = emit_insn
21822 (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
21823 GEN_INT (- args_to_push)));
21824 RTX_FRAME_RELATED_P (insn) = 1;
21827 /* If this is an interrupt service routine, and the link register
21828 is going to be pushed, and we're not generating extra
21829 push of IP (needed when frame is needed and frame layout if apcs),
21830 subtracting four from LR now will mean that the function return
21831 can be done with a single instruction. */
21832 if ((func_type == ARM_FT_ISR || func_type == ARM_FT_FIQ)
21833 && (live_regs_mask & (1 << LR_REGNUM)) != 0
21834 && !(frame_pointer_needed && TARGET_APCS_FRAME)
21835 && TARGET_ARM)
21837 rtx lr = gen_rtx_REG (SImode, LR_REGNUM);
21839 emit_set_insn (lr, plus_constant (SImode, lr, -4));
21842 if (live_regs_mask)
21844 unsigned long dwarf_regs_mask = live_regs_mask;
21846 saved_regs += bit_count (live_regs_mask) * 4;
21847 if (optimize_size && !frame_pointer_needed
21848 && saved_regs == offsets->saved_regs - offsets->saved_args)
21850 /* If no coprocessor registers are being pushed and we don't have
21851 to worry about a frame pointer then push extra registers to
21852 create the stack frame. This is done in a way that does not
21853 alter the frame layout, so is independent of the epilogue. */
21854 int n;
21855 int frame;
21856 n = 0;
21857 while (n < 8 && (live_regs_mask & (1 << n)) == 0)
21858 n++;
21859 frame = offsets->outgoing_args - (offsets->saved_args + saved_regs);
21860 if (frame && n * 4 >= frame)
21862 n = frame / 4;
21863 live_regs_mask |= (1 << n) - 1;
21864 saved_regs += frame;
21868 if (TARGET_LDRD
21869 && current_tune->prefer_ldrd_strd
21870 && !optimize_function_for_size_p (cfun))
21872 gcc_checking_assert (live_regs_mask == dwarf_regs_mask);
21873 if (TARGET_THUMB2)
21874 thumb2_emit_strd_push (live_regs_mask);
21875 else if (TARGET_ARM
21876 && !TARGET_APCS_FRAME
21877 && !IS_INTERRUPT (func_type))
21878 arm_emit_strd_push (live_regs_mask);
21879 else
21881 insn = emit_multi_reg_push (live_regs_mask, live_regs_mask);
21882 RTX_FRAME_RELATED_P (insn) = 1;
21885 else
21887 insn = emit_multi_reg_push (live_regs_mask, dwarf_regs_mask);
21888 RTX_FRAME_RELATED_P (insn) = 1;
21892 if (! IS_VOLATILE (func_type))
21893 saved_regs += arm_save_coproc_regs ();
21895 if (frame_pointer_needed && TARGET_ARM)
21897 /* Create the new frame pointer. */
21898 if (TARGET_APCS_FRAME)
21900 insn = GEN_INT (-(4 + args_to_push + fp_offset));
21901 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx, ip_rtx, insn));
21902 RTX_FRAME_RELATED_P (insn) = 1;
21904 else
21906 insn = GEN_INT (saved_regs - (4 + fp_offset));
21907 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx,
21908 stack_pointer_rtx, insn));
21909 RTX_FRAME_RELATED_P (insn) = 1;
21913 size = offsets->outgoing_args - offsets->saved_args;
21914 if (flag_stack_usage_info)
21915 current_function_static_stack_size = size;
21917 /* If this isn't an interrupt service routine and we have a frame, then do
21918 stack checking. We use IP as the first scratch register, except for the
21919 non-APCS nested functions if LR or r3 are available (see clobber_ip). */
21920 if (!IS_INTERRUPT (func_type)
21921 && (flag_stack_check == STATIC_BUILTIN_STACK_CHECK
21922 || flag_stack_clash_protection))
21924 unsigned int regno;
21926 if (!IS_NESTED (func_type) || clobber_ip)
21927 regno = IP_REGNUM;
21928 else if (df_regs_ever_live_p (LR_REGNUM))
21929 regno = LR_REGNUM;
21930 else
21931 regno = 3;
21933 if (crtl->is_leaf && !cfun->calls_alloca)
21935 if (size > PROBE_INTERVAL && size > get_stack_check_protect ())
21936 arm_emit_probe_stack_range (get_stack_check_protect (),
21937 size - get_stack_check_protect (),
21938 regno, live_regs_mask);
21940 else if (size > 0)
21941 arm_emit_probe_stack_range (get_stack_check_protect (), size,
21942 regno, live_regs_mask);
21945 /* Recover the static chain register. */
21946 if (clobber_ip)
21948 if (!arm_r3_live_at_start_p () || saved_pretend_args)
21949 insn = gen_rtx_REG (SImode, 3);
21950 else
21952 insn = plus_constant (Pmode, hard_frame_pointer_rtx, 4);
21953 insn = gen_frame_mem (SImode, insn);
21955 emit_set_insn (ip_rtx, insn);
21956 emit_insn (gen_force_register_use (ip_rtx));
21959 if (offsets->outgoing_args != offsets->saved_args + saved_regs)
21961 /* This add can produce multiple insns for a large constant, so we
21962 need to get tricky. */
21963 rtx_insn *last = get_last_insn ();
21965 amount = GEN_INT (offsets->saved_args + saved_regs
21966 - offsets->outgoing_args);
21968 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
21969 amount));
21972 last = last ? NEXT_INSN (last) : get_insns ();
21973 RTX_FRAME_RELATED_P (last) = 1;
21975 while (last != insn);
21977 /* If the frame pointer is needed, emit a special barrier that
21978 will prevent the scheduler from moving stores to the frame
21979 before the stack adjustment. */
21980 if (frame_pointer_needed)
21981 emit_insn (gen_stack_tie (stack_pointer_rtx,
21982 hard_frame_pointer_rtx));
21986 if (frame_pointer_needed && TARGET_THUMB2)
21987 thumb_set_frame_pointer (offsets);
21989 if (flag_pic && arm_pic_register != INVALID_REGNUM)
21991 unsigned long mask;
21993 mask = live_regs_mask;
21994 mask &= THUMB2_WORK_REGS;
21995 if (!IS_NESTED (func_type))
21996 mask |= (1 << IP_REGNUM);
21997 arm_load_pic_register (mask);
22000 /* If we are profiling, make sure no instructions are scheduled before
22001 the call to mcount. Similarly if the user has requested no
22002 scheduling in the prolog. Similarly if we want non-call exceptions
22003 using the EABI unwinder, to prevent faulting instructions from being
22004 swapped with a stack adjustment. */
22005 if (crtl->profile || !TARGET_SCHED_PROLOG
22006 || (arm_except_unwind_info (&global_options) == UI_TARGET
22007 && cfun->can_throw_non_call_exceptions))
22008 emit_insn (gen_blockage ());
22010 /* If the link register is being kept alive, with the return address in it,
22011 then make sure that it does not get reused by the ce2 pass. */
22012 if ((live_regs_mask & (1 << LR_REGNUM)) == 0)
22013 cfun->machine->lr_save_eliminated = 1;
22016 /* Print condition code to STREAM. Helper function for arm_print_operand. */
22017 static void
22018 arm_print_condition (FILE *stream)
22020 if (arm_ccfsm_state == 3 || arm_ccfsm_state == 4)
22022 /* Branch conversion is not implemented for Thumb-2. */
22023 if (TARGET_THUMB)
22025 output_operand_lossage ("predicated Thumb instruction");
22026 return;
22028 if (current_insn_predicate != NULL)
22030 output_operand_lossage
22031 ("predicated instruction in conditional sequence");
22032 return;
22035 fputs (arm_condition_codes[arm_current_cc], stream);
22037 else if (current_insn_predicate)
22039 enum arm_cond_code code;
22041 if (TARGET_THUMB1)
22043 output_operand_lossage ("predicated Thumb instruction");
22044 return;
22047 code = get_arm_condition_code (current_insn_predicate);
22048 fputs (arm_condition_codes[code], stream);
22053 /* Globally reserved letters: acln
22054 Puncutation letters currently used: @_|?().!#
22055 Lower case letters currently used: bcdefhimpqtvwxyz
22056 Upper case letters currently used: ABCDFGHJKLMNOPQRSTU
22057 Letters previously used, but now deprecated/obsolete: sVWXYZ.
22059 Note that the global reservation for 'c' is only for CONSTANT_ADDRESS_P.
22061 If CODE is 'd', then the X is a condition operand and the instruction
22062 should only be executed if the condition is true.
22063 if CODE is 'D', then the X is a condition operand and the instruction
22064 should only be executed if the condition is false: however, if the mode
22065 of the comparison is CCFPEmode, then always execute the instruction -- we
22066 do this because in these circumstances !GE does not necessarily imply LT;
22067 in these cases the instruction pattern will take care to make sure that
22068 an instruction containing %d will follow, thereby undoing the effects of
22069 doing this instruction unconditionally.
22070 If CODE is 'N' then X is a floating point operand that must be negated
22071 before output.
22072 If CODE is 'B' then output a bitwise inverted value of X (a const int).
22073 If X is a REG and CODE is `M', output a ldm/stm style multi-reg. */
22074 static void
22075 arm_print_operand (FILE *stream, rtx x, int code)
22077 switch (code)
22079 case '@':
22080 fputs (ASM_COMMENT_START, stream);
22081 return;
22083 case '_':
22084 fputs (user_label_prefix, stream);
22085 return;
22087 case '|':
22088 fputs (REGISTER_PREFIX, stream);
22089 return;
22091 case '?':
22092 arm_print_condition (stream);
22093 return;
22095 case '.':
22096 /* The current condition code for a condition code setting instruction.
22097 Preceded by 's' in unified syntax, otherwise followed by 's'. */
22098 fputc('s', stream);
22099 arm_print_condition (stream);
22100 return;
22102 case '!':
22103 /* If the instruction is conditionally executed then print
22104 the current condition code, otherwise print 's'. */
22105 gcc_assert (TARGET_THUMB2);
22106 if (current_insn_predicate)
22107 arm_print_condition (stream);
22108 else
22109 fputc('s', stream);
22110 break;
22112 /* %# is a "break" sequence. It doesn't output anything, but is used to
22113 separate e.g. operand numbers from following text, if that text consists
22114 of further digits which we don't want to be part of the operand
22115 number. */
22116 case '#':
22117 return;
22119 case 'N':
22121 REAL_VALUE_TYPE r;
22122 r = real_value_negate (CONST_DOUBLE_REAL_VALUE (x));
22123 fprintf (stream, "%s", fp_const_from_val (&r));
22125 return;
22127 /* An integer or symbol address without a preceding # sign. */
22128 case 'c':
22129 switch (GET_CODE (x))
22131 case CONST_INT:
22132 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, INTVAL (x));
22133 break;
22135 case SYMBOL_REF:
22136 output_addr_const (stream, x);
22137 break;
22139 case CONST:
22140 if (GET_CODE (XEXP (x, 0)) == PLUS
22141 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF)
22143 output_addr_const (stream, x);
22144 break;
22146 /* Fall through. */
22148 default:
22149 output_operand_lossage ("Unsupported operand for code '%c'", code);
22151 return;
22153 /* An integer that we want to print in HEX. */
22154 case 'x':
22155 switch (GET_CODE (x))
22157 case CONST_INT:
22158 fprintf (stream, "#" HOST_WIDE_INT_PRINT_HEX, INTVAL (x));
22159 break;
22161 default:
22162 output_operand_lossage ("Unsupported operand for code '%c'", code);
22164 return;
22166 case 'B':
22167 if (CONST_INT_P (x))
22169 HOST_WIDE_INT val;
22170 val = ARM_SIGN_EXTEND (~INTVAL (x));
22171 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, val);
22173 else
22175 putc ('~', stream);
22176 output_addr_const (stream, x);
22178 return;
22180 case 'b':
22181 /* Print the log2 of a CONST_INT. */
22183 HOST_WIDE_INT val;
22185 if (!CONST_INT_P (x)
22186 || (val = exact_log2 (INTVAL (x) & 0xffffffff)) < 0)
22187 output_operand_lossage ("Unsupported operand for code '%c'", code);
22188 else
22189 fprintf (stream, "#" HOST_WIDE_INT_PRINT_DEC, val);
22191 return;
22193 case 'L':
22194 /* The low 16 bits of an immediate constant. */
22195 fprintf (stream, HOST_WIDE_INT_PRINT_DEC, INTVAL(x) & 0xffff);
22196 return;
22198 case 'i':
22199 fprintf (stream, "%s", arithmetic_instr (x, 1));
22200 return;
22202 case 'I':
22203 fprintf (stream, "%s", arithmetic_instr (x, 0));
22204 return;
22206 case 'S':
22208 HOST_WIDE_INT val;
22209 const char *shift;
22211 shift = shift_op (x, &val);
22213 if (shift)
22215 fprintf (stream, ", %s ", shift);
22216 if (val == -1)
22217 arm_print_operand (stream, XEXP (x, 1), 0);
22218 else
22219 fprintf (stream, "#" HOST_WIDE_INT_PRINT_DEC, val);
22222 return;
22224 /* An explanation of the 'Q', 'R' and 'H' register operands:
22226 In a pair of registers containing a DI or DF value the 'Q'
22227 operand returns the register number of the register containing
22228 the least significant part of the value. The 'R' operand returns
22229 the register number of the register containing the most
22230 significant part of the value.
22232 The 'H' operand returns the higher of the two register numbers.
22233 On a run where WORDS_BIG_ENDIAN is true the 'H' operand is the
22234 same as the 'Q' operand, since the most significant part of the
22235 value is held in the lower number register. The reverse is true
22236 on systems where WORDS_BIG_ENDIAN is false.
22238 The purpose of these operands is to distinguish between cases
22239 where the endian-ness of the values is important (for example
22240 when they are added together), and cases where the endian-ness
22241 is irrelevant, but the order of register operations is important.
22242 For example when loading a value from memory into a register
22243 pair, the endian-ness does not matter. Provided that the value
22244 from the lower memory address is put into the lower numbered
22245 register, and the value from the higher address is put into the
22246 higher numbered register, the load will work regardless of whether
22247 the value being loaded is big-wordian or little-wordian. The
22248 order of the two register loads can matter however, if the address
22249 of the memory location is actually held in one of the registers
22250 being overwritten by the load.
22252 The 'Q' and 'R' constraints are also available for 64-bit
22253 constants. */
22254 case 'Q':
22255 if (CONST_INT_P (x) || CONST_DOUBLE_P (x))
22257 rtx part = gen_lowpart (SImode, x);
22258 fprintf (stream, "#" HOST_WIDE_INT_PRINT_DEC, INTVAL (part));
22259 return;
22262 if (!REG_P (x) || REGNO (x) > LAST_ARM_REGNUM)
22264 output_operand_lossage ("invalid operand for code '%c'", code);
22265 return;
22268 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 1 : 0));
22269 return;
22271 case 'R':
22272 if (CONST_INT_P (x) || CONST_DOUBLE_P (x))
22274 machine_mode mode = GET_MODE (x);
22275 rtx part;
22277 if (mode == VOIDmode)
22278 mode = DImode;
22279 part = gen_highpart_mode (SImode, mode, x);
22280 fprintf (stream, "#" HOST_WIDE_INT_PRINT_DEC, INTVAL (part));
22281 return;
22284 if (!REG_P (x) || REGNO (x) > LAST_ARM_REGNUM)
22286 output_operand_lossage ("invalid operand for code '%c'", code);
22287 return;
22290 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 0 : 1));
22291 return;
22293 case 'H':
22294 if (!REG_P (x) || REGNO (x) > LAST_ARM_REGNUM)
22296 output_operand_lossage ("invalid operand for code '%c'", code);
22297 return;
22300 asm_fprintf (stream, "%r", REGNO (x) + 1);
22301 return;
22303 case 'J':
22304 if (!REG_P (x) || REGNO (x) > LAST_ARM_REGNUM)
22306 output_operand_lossage ("invalid operand for code '%c'", code);
22307 return;
22310 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 3 : 2));
22311 return;
22313 case 'K':
22314 if (!REG_P (x) || REGNO (x) > LAST_ARM_REGNUM)
22316 output_operand_lossage ("invalid operand for code '%c'", code);
22317 return;
22320 asm_fprintf (stream, "%r", REGNO (x) + (WORDS_BIG_ENDIAN ? 2 : 3));
22321 return;
22323 case 'm':
22324 asm_fprintf (stream, "%r",
22325 REG_P (XEXP (x, 0))
22326 ? REGNO (XEXP (x, 0)) : REGNO (XEXP (XEXP (x, 0), 0)));
22327 return;
22329 case 'M':
22330 asm_fprintf (stream, "{%r-%r}",
22331 REGNO (x),
22332 REGNO (x) + ARM_NUM_REGS (GET_MODE (x)) - 1);
22333 return;
22335 /* Like 'M', but writing doubleword vector registers, for use by Neon
22336 insns. */
22337 case 'h':
22339 int regno = (REGNO (x) - FIRST_VFP_REGNUM) / 2;
22340 int numregs = ARM_NUM_REGS (GET_MODE (x)) / 2;
22341 if (numregs == 1)
22342 asm_fprintf (stream, "{d%d}", regno);
22343 else
22344 asm_fprintf (stream, "{d%d-d%d}", regno, regno + numregs - 1);
22346 return;
22348 case 'd':
22349 /* CONST_TRUE_RTX means always -- that's the default. */
22350 if (x == const_true_rtx)
22351 return;
22353 if (!COMPARISON_P (x))
22355 output_operand_lossage ("invalid operand for code '%c'", code);
22356 return;
22359 fputs (arm_condition_codes[get_arm_condition_code (x)],
22360 stream);
22361 return;
22363 case 'D':
22364 /* CONST_TRUE_RTX means not always -- i.e. never. We shouldn't ever
22365 want to do that. */
22366 if (x == const_true_rtx)
22368 output_operand_lossage ("instruction never executed");
22369 return;
22371 if (!COMPARISON_P (x))
22373 output_operand_lossage ("invalid operand for code '%c'", code);
22374 return;
22377 fputs (arm_condition_codes[ARM_INVERSE_CONDITION_CODE
22378 (get_arm_condition_code (x))],
22379 stream);
22380 return;
22382 case 's':
22383 case 'V':
22384 case 'W':
22385 case 'X':
22386 case 'Y':
22387 case 'Z':
22388 /* Former Maverick support, removed after GCC-4.7. */
22389 output_operand_lossage ("obsolete Maverick format code '%c'", code);
22390 return;
22392 case 'U':
22393 if (!REG_P (x)
22394 || REGNO (x) < FIRST_IWMMXT_GR_REGNUM
22395 || REGNO (x) > LAST_IWMMXT_GR_REGNUM)
22396 /* Bad value for wCG register number. */
22398 output_operand_lossage ("invalid operand for code '%c'", code);
22399 return;
22402 else
22403 fprintf (stream, "%d", REGNO (x) - FIRST_IWMMXT_GR_REGNUM);
22404 return;
22406 /* Print an iWMMXt control register name. */
22407 case 'w':
22408 if (!CONST_INT_P (x)
22409 || INTVAL (x) < 0
22410 || INTVAL (x) >= 16)
22411 /* Bad value for wC register number. */
22413 output_operand_lossage ("invalid operand for code '%c'", code);
22414 return;
22417 else
22419 static const char * wc_reg_names [16] =
22421 "wCID", "wCon", "wCSSF", "wCASF",
22422 "wC4", "wC5", "wC6", "wC7",
22423 "wCGR0", "wCGR1", "wCGR2", "wCGR3",
22424 "wC12", "wC13", "wC14", "wC15"
22427 fputs (wc_reg_names [INTVAL (x)], stream);
22429 return;
22431 /* Print the high single-precision register of a VFP double-precision
22432 register. */
22433 case 'p':
22435 machine_mode mode = GET_MODE (x);
22436 int regno;
22438 if (GET_MODE_SIZE (mode) != 8 || !REG_P (x))
22440 output_operand_lossage ("invalid operand for code '%c'", code);
22441 return;
22444 regno = REGNO (x);
22445 if (!VFP_REGNO_OK_FOR_DOUBLE (regno))
22447 output_operand_lossage ("invalid operand for code '%c'", code);
22448 return;
22451 fprintf (stream, "s%d", regno - FIRST_VFP_REGNUM + 1);
22453 return;
22455 /* Print a VFP/Neon double precision or quad precision register name. */
22456 case 'P':
22457 case 'q':
22459 machine_mode mode = GET_MODE (x);
22460 int is_quad = (code == 'q');
22461 int regno;
22463 if (GET_MODE_SIZE (mode) != (is_quad ? 16 : 8))
22465 output_operand_lossage ("invalid operand for code '%c'", code);
22466 return;
22469 if (!REG_P (x)
22470 || !IS_VFP_REGNUM (REGNO (x)))
22472 output_operand_lossage ("invalid operand for code '%c'", code);
22473 return;
22476 regno = REGNO (x);
22477 if ((is_quad && !NEON_REGNO_OK_FOR_QUAD (regno))
22478 || (!is_quad && !VFP_REGNO_OK_FOR_DOUBLE (regno)))
22480 output_operand_lossage ("invalid operand for code '%c'", code);
22481 return;
22484 fprintf (stream, "%c%d", is_quad ? 'q' : 'd',
22485 (regno - FIRST_VFP_REGNUM) >> (is_quad ? 2 : 1));
22487 return;
22489 /* These two codes print the low/high doubleword register of a Neon quad
22490 register, respectively. For pair-structure types, can also print
22491 low/high quadword registers. */
22492 case 'e':
22493 case 'f':
22495 machine_mode mode = GET_MODE (x);
22496 int regno;
22498 if ((GET_MODE_SIZE (mode) != 16
22499 && GET_MODE_SIZE (mode) != 32) || !REG_P (x))
22501 output_operand_lossage ("invalid operand for code '%c'", code);
22502 return;
22505 regno = REGNO (x);
22506 if (!NEON_REGNO_OK_FOR_QUAD (regno))
22508 output_operand_lossage ("invalid operand for code '%c'", code);
22509 return;
22512 if (GET_MODE_SIZE (mode) == 16)
22513 fprintf (stream, "d%d", ((regno - FIRST_VFP_REGNUM) >> 1)
22514 + (code == 'f' ? 1 : 0));
22515 else
22516 fprintf (stream, "q%d", ((regno - FIRST_VFP_REGNUM) >> 2)
22517 + (code == 'f' ? 1 : 0));
22519 return;
22521 /* Print a VFPv3 floating-point constant, represented as an integer
22522 index. */
22523 case 'G':
22525 int index = vfp3_const_double_index (x);
22526 gcc_assert (index != -1);
22527 fprintf (stream, "%d", index);
22529 return;
22531 /* Print bits representing opcode features for Neon.
22533 Bit 0 is 1 for signed, 0 for unsigned. Floats count as signed
22534 and polynomials as unsigned.
22536 Bit 1 is 1 for floats and polynomials, 0 for ordinary integers.
22538 Bit 2 is 1 for rounding functions, 0 otherwise. */
22540 /* Identify the type as 's', 'u', 'p' or 'f'. */
22541 case 'T':
22543 HOST_WIDE_INT bits = INTVAL (x);
22544 fputc ("uspf"[bits & 3], stream);
22546 return;
22548 /* Likewise, but signed and unsigned integers are both 'i'. */
22549 case 'F':
22551 HOST_WIDE_INT bits = INTVAL (x);
22552 fputc ("iipf"[bits & 3], stream);
22554 return;
22556 /* As for 'T', but emit 'u' instead of 'p'. */
22557 case 't':
22559 HOST_WIDE_INT bits = INTVAL (x);
22560 fputc ("usuf"[bits & 3], stream);
22562 return;
22564 /* Bit 2: rounding (vs none). */
22565 case 'O':
22567 HOST_WIDE_INT bits = INTVAL (x);
22568 fputs ((bits & 4) != 0 ? "r" : "", stream);
22570 return;
22572 /* Memory operand for vld1/vst1 instruction. */
22573 case 'A':
22575 rtx addr;
22576 bool postinc = FALSE;
22577 rtx postinc_reg = NULL;
22578 unsigned align, memsize, align_bits;
22580 gcc_assert (MEM_P (x));
22581 addr = XEXP (x, 0);
22582 if (GET_CODE (addr) == POST_INC)
22584 postinc = 1;
22585 addr = XEXP (addr, 0);
22587 if (GET_CODE (addr) == POST_MODIFY)
22589 postinc_reg = XEXP( XEXP (addr, 1), 1);
22590 addr = XEXP (addr, 0);
22592 asm_fprintf (stream, "[%r", REGNO (addr));
22594 /* We know the alignment of this access, so we can emit a hint in the
22595 instruction (for some alignments) as an aid to the memory subsystem
22596 of the target. */
22597 align = MEM_ALIGN (x) >> 3;
22598 memsize = MEM_SIZE (x);
22600 /* Only certain alignment specifiers are supported by the hardware. */
22601 if (memsize == 32 && (align % 32) == 0)
22602 align_bits = 256;
22603 else if ((memsize == 16 || memsize == 32) && (align % 16) == 0)
22604 align_bits = 128;
22605 else if (memsize >= 8 && (align % 8) == 0)
22606 align_bits = 64;
22607 else
22608 align_bits = 0;
22610 if (align_bits != 0)
22611 asm_fprintf (stream, ":%d", align_bits);
22613 asm_fprintf (stream, "]");
22615 if (postinc)
22616 fputs("!", stream);
22617 if (postinc_reg)
22618 asm_fprintf (stream, ", %r", REGNO (postinc_reg));
22620 return;
22622 case 'C':
22624 rtx addr;
22626 gcc_assert (MEM_P (x));
22627 addr = XEXP (x, 0);
22628 gcc_assert (REG_P (addr));
22629 asm_fprintf (stream, "[%r]", REGNO (addr));
22631 return;
22633 /* Translate an S register number into a D register number and element index. */
22634 case 'y':
22636 machine_mode mode = GET_MODE (x);
22637 int regno;
22639 if (GET_MODE_SIZE (mode) != 4 || !REG_P (x))
22641 output_operand_lossage ("invalid operand for code '%c'", code);
22642 return;
22645 regno = REGNO (x);
22646 if (!VFP_REGNO_OK_FOR_SINGLE (regno))
22648 output_operand_lossage ("invalid operand for code '%c'", code);
22649 return;
22652 regno = regno - FIRST_VFP_REGNUM;
22653 fprintf (stream, "d%d[%d]", regno / 2, regno % 2);
22655 return;
22657 case 'v':
22658 gcc_assert (CONST_DOUBLE_P (x));
22659 int result;
22660 result = vfp3_const_double_for_fract_bits (x);
22661 if (result == 0)
22662 result = vfp3_const_double_for_bits (x);
22663 fprintf (stream, "#%d", result);
22664 return;
22666 /* Register specifier for vld1.16/vst1.16. Translate the S register
22667 number into a D register number and element index. */
22668 case 'z':
22670 machine_mode mode = GET_MODE (x);
22671 int regno;
22673 if (GET_MODE_SIZE (mode) != 2 || !REG_P (x))
22675 output_operand_lossage ("invalid operand for code '%c'", code);
22676 return;
22679 regno = REGNO (x);
22680 if (!VFP_REGNO_OK_FOR_SINGLE (regno))
22682 output_operand_lossage ("invalid operand for code '%c'", code);
22683 return;
22686 regno = regno - FIRST_VFP_REGNUM;
22687 fprintf (stream, "d%d[%d]", regno/2, ((regno % 2) ? 2 : 0));
22689 return;
22691 default:
22692 if (x == 0)
22694 output_operand_lossage ("missing operand");
22695 return;
22698 switch (GET_CODE (x))
22700 case REG:
22701 asm_fprintf (stream, "%r", REGNO (x));
22702 break;
22704 case MEM:
22705 output_address (GET_MODE (x), XEXP (x, 0));
22706 break;
22708 case CONST_DOUBLE:
22710 char fpstr[20];
22711 real_to_decimal (fpstr, CONST_DOUBLE_REAL_VALUE (x),
22712 sizeof (fpstr), 0, 1);
22713 fprintf (stream, "#%s", fpstr);
22715 break;
22717 default:
22718 gcc_assert (GET_CODE (x) != NEG);
22719 fputc ('#', stream);
22720 if (GET_CODE (x) == HIGH)
22722 fputs (":lower16:", stream);
22723 x = XEXP (x, 0);
22726 output_addr_const (stream, x);
22727 break;
22732 /* Target hook for printing a memory address. */
22733 static void
22734 arm_print_operand_address (FILE *stream, machine_mode mode, rtx x)
22736 if (TARGET_32BIT)
22738 int is_minus = GET_CODE (x) == MINUS;
22740 if (REG_P (x))
22741 asm_fprintf (stream, "[%r]", REGNO (x));
22742 else if (GET_CODE (x) == PLUS || is_minus)
22744 rtx base = XEXP (x, 0);
22745 rtx index = XEXP (x, 1);
22746 HOST_WIDE_INT offset = 0;
22747 if (!REG_P (base)
22748 || (REG_P (index) && REGNO (index) == SP_REGNUM))
22750 /* Ensure that BASE is a register. */
22751 /* (one of them must be). */
22752 /* Also ensure the SP is not used as in index register. */
22753 std::swap (base, index);
22755 switch (GET_CODE (index))
22757 case CONST_INT:
22758 offset = INTVAL (index);
22759 if (is_minus)
22760 offset = -offset;
22761 asm_fprintf (stream, "[%r, #%wd]",
22762 REGNO (base), offset);
22763 break;
22765 case REG:
22766 asm_fprintf (stream, "[%r, %s%r]",
22767 REGNO (base), is_minus ? "-" : "",
22768 REGNO (index));
22769 break;
22771 case MULT:
22772 case ASHIFTRT:
22773 case LSHIFTRT:
22774 case ASHIFT:
22775 case ROTATERT:
22777 asm_fprintf (stream, "[%r, %s%r",
22778 REGNO (base), is_minus ? "-" : "",
22779 REGNO (XEXP (index, 0)));
22780 arm_print_operand (stream, index, 'S');
22781 fputs ("]", stream);
22782 break;
22785 default:
22786 gcc_unreachable ();
22789 else if (GET_CODE (x) == PRE_INC || GET_CODE (x) == POST_INC
22790 || GET_CODE (x) == PRE_DEC || GET_CODE (x) == POST_DEC)
22792 gcc_assert (REG_P (XEXP (x, 0)));
22794 if (GET_CODE (x) == PRE_DEC || GET_CODE (x) == PRE_INC)
22795 asm_fprintf (stream, "[%r, #%s%d]!",
22796 REGNO (XEXP (x, 0)),
22797 GET_CODE (x) == PRE_DEC ? "-" : "",
22798 GET_MODE_SIZE (mode));
22799 else
22800 asm_fprintf (stream, "[%r], #%s%d",
22801 REGNO (XEXP (x, 0)),
22802 GET_CODE (x) == POST_DEC ? "-" : "",
22803 GET_MODE_SIZE (mode));
22805 else if (GET_CODE (x) == PRE_MODIFY)
22807 asm_fprintf (stream, "[%r, ", REGNO (XEXP (x, 0)));
22808 if (CONST_INT_P (XEXP (XEXP (x, 1), 1)))
22809 asm_fprintf (stream, "#%wd]!",
22810 INTVAL (XEXP (XEXP (x, 1), 1)));
22811 else
22812 asm_fprintf (stream, "%r]!",
22813 REGNO (XEXP (XEXP (x, 1), 1)));
22815 else if (GET_CODE (x) == POST_MODIFY)
22817 asm_fprintf (stream, "[%r], ", REGNO (XEXP (x, 0)));
22818 if (CONST_INT_P (XEXP (XEXP (x, 1), 1)))
22819 asm_fprintf (stream, "#%wd",
22820 INTVAL (XEXP (XEXP (x, 1), 1)));
22821 else
22822 asm_fprintf (stream, "%r",
22823 REGNO (XEXP (XEXP (x, 1), 1)));
22825 else output_addr_const (stream, x);
22827 else
22829 if (REG_P (x))
22830 asm_fprintf (stream, "[%r]", REGNO (x));
22831 else if (GET_CODE (x) == POST_INC)
22832 asm_fprintf (stream, "%r!", REGNO (XEXP (x, 0)));
22833 else if (GET_CODE (x) == PLUS)
22835 gcc_assert (REG_P (XEXP (x, 0)));
22836 if (CONST_INT_P (XEXP (x, 1)))
22837 asm_fprintf (stream, "[%r, #%wd]",
22838 REGNO (XEXP (x, 0)),
22839 INTVAL (XEXP (x, 1)));
22840 else
22841 asm_fprintf (stream, "[%r, %r]",
22842 REGNO (XEXP (x, 0)),
22843 REGNO (XEXP (x, 1)));
22845 else
22846 output_addr_const (stream, x);
22850 /* Target hook for indicating whether a punctuation character for
22851 TARGET_PRINT_OPERAND is valid. */
22852 static bool
22853 arm_print_operand_punct_valid_p (unsigned char code)
22855 return (code == '@' || code == '|' || code == '.'
22856 || code == '(' || code == ')' || code == '#'
22857 || (TARGET_32BIT && (code == '?'))
22858 || (TARGET_THUMB2 && (code == '!'))
22859 || (TARGET_THUMB && (code == '_')));
22862 /* Target hook for assembling integer objects. The ARM version needs to
22863 handle word-sized values specially. */
22864 static bool
22865 arm_assemble_integer (rtx x, unsigned int size, int aligned_p)
22867 machine_mode mode;
22869 if (size == UNITS_PER_WORD && aligned_p)
22871 fputs ("\t.word\t", asm_out_file);
22872 output_addr_const (asm_out_file, x);
22874 /* Mark symbols as position independent. We only do this in the
22875 .text segment, not in the .data segment. */
22876 if (NEED_GOT_RELOC && flag_pic && making_const_table &&
22877 (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF))
22879 /* See legitimize_pic_address for an explanation of the
22880 TARGET_VXWORKS_RTP check. */
22881 /* References to weak symbols cannot be resolved locally:
22882 they may be overridden by a non-weak definition at link
22883 time. */
22884 if (!arm_pic_data_is_text_relative
22885 || (GET_CODE (x) == SYMBOL_REF
22886 && (!SYMBOL_REF_LOCAL_P (x)
22887 || (SYMBOL_REF_DECL (x)
22888 ? DECL_WEAK (SYMBOL_REF_DECL (x)) : 0))))
22889 fputs ("(GOT)", asm_out_file);
22890 else
22891 fputs ("(GOTOFF)", asm_out_file);
22893 fputc ('\n', asm_out_file);
22894 return true;
22897 mode = GET_MODE (x);
22899 if (arm_vector_mode_supported_p (mode))
22901 int i, units;
22903 gcc_assert (GET_CODE (x) == CONST_VECTOR);
22905 units = CONST_VECTOR_NUNITS (x);
22906 size = GET_MODE_UNIT_SIZE (mode);
22908 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
22909 for (i = 0; i < units; i++)
22911 rtx elt = CONST_VECTOR_ELT (x, i);
22912 assemble_integer
22913 (elt, size, i == 0 ? BIGGEST_ALIGNMENT : size * BITS_PER_UNIT, 1);
22915 else
22916 for (i = 0; i < units; i++)
22918 rtx elt = CONST_VECTOR_ELT (x, i);
22919 assemble_real
22920 (*CONST_DOUBLE_REAL_VALUE (elt),
22921 as_a <scalar_float_mode> (GET_MODE_INNER (mode)),
22922 i == 0 ? BIGGEST_ALIGNMENT : size * BITS_PER_UNIT);
22925 return true;
22928 return default_assemble_integer (x, size, aligned_p);
22931 static void
22932 arm_elf_asm_cdtor (rtx symbol, int priority, bool is_ctor)
22934 section *s;
22936 if (!TARGET_AAPCS_BASED)
22938 (is_ctor ?
22939 default_named_section_asm_out_constructor
22940 : default_named_section_asm_out_destructor) (symbol, priority);
22941 return;
22944 /* Put these in the .init_array section, using a special relocation. */
22945 if (priority != DEFAULT_INIT_PRIORITY)
22947 char buf[18];
22948 sprintf (buf, "%s.%.5u",
22949 is_ctor ? ".init_array" : ".fini_array",
22950 priority);
22951 s = get_section (buf, SECTION_WRITE | SECTION_NOTYPE, NULL_TREE);
22953 else if (is_ctor)
22954 s = ctors_section;
22955 else
22956 s = dtors_section;
22958 switch_to_section (s);
22959 assemble_align (POINTER_SIZE);
22960 fputs ("\t.word\t", asm_out_file);
22961 output_addr_const (asm_out_file, symbol);
22962 fputs ("(target1)\n", asm_out_file);
22965 /* Add a function to the list of static constructors. */
22967 static void
22968 arm_elf_asm_constructor (rtx symbol, int priority)
22970 arm_elf_asm_cdtor (symbol, priority, /*is_ctor=*/true);
22973 /* Add a function to the list of static destructors. */
22975 static void
22976 arm_elf_asm_destructor (rtx symbol, int priority)
22978 arm_elf_asm_cdtor (symbol, priority, /*is_ctor=*/false);
22981 /* A finite state machine takes care of noticing whether or not instructions
22982 can be conditionally executed, and thus decrease execution time and code
22983 size by deleting branch instructions. The fsm is controlled by
22984 final_prescan_insn, and controls the actions of ASM_OUTPUT_OPCODE. */
22986 /* The state of the fsm controlling condition codes are:
22987 0: normal, do nothing special
22988 1: make ASM_OUTPUT_OPCODE not output this instruction
22989 2: make ASM_OUTPUT_OPCODE not output this instruction
22990 3: make instructions conditional
22991 4: make instructions conditional
22993 State transitions (state->state by whom under condition):
22994 0 -> 1 final_prescan_insn if the `target' is a label
22995 0 -> 2 final_prescan_insn if the `target' is an unconditional branch
22996 1 -> 3 ASM_OUTPUT_OPCODE after not having output the conditional branch
22997 2 -> 4 ASM_OUTPUT_OPCODE after not having output the conditional branch
22998 3 -> 0 (*targetm.asm_out.internal_label) if the `target' label is reached
22999 (the target label has CODE_LABEL_NUMBER equal to arm_target_label).
23000 4 -> 0 final_prescan_insn if the `target' unconditional branch is reached
23001 (the target insn is arm_target_insn).
23003 If the jump clobbers the conditions then we use states 2 and 4.
23005 A similar thing can be done with conditional return insns.
23007 XXX In case the `target' is an unconditional branch, this conditionalising
23008 of the instructions always reduces code size, but not always execution
23009 time. But then, I want to reduce the code size to somewhere near what
23010 /bin/cc produces. */
23012 /* In addition to this, state is maintained for Thumb-2 COND_EXEC
23013 instructions. When a COND_EXEC instruction is seen the subsequent
23014 instructions are scanned so that multiple conditional instructions can be
23015 combined into a single IT block. arm_condexec_count and arm_condexec_mask
23016 specify the length and true/false mask for the IT block. These will be
23017 decremented/zeroed by arm_asm_output_opcode as the insns are output. */
23019 /* Returns the index of the ARM condition code string in
23020 `arm_condition_codes', or ARM_NV if the comparison is invalid.
23021 COMPARISON should be an rtx like `(eq (...) (...))'. */
23023 enum arm_cond_code
23024 maybe_get_arm_condition_code (rtx comparison)
23026 machine_mode mode = GET_MODE (XEXP (comparison, 0));
23027 enum arm_cond_code code;
23028 enum rtx_code comp_code = GET_CODE (comparison);
23030 if (GET_MODE_CLASS (mode) != MODE_CC)
23031 mode = SELECT_CC_MODE (comp_code, XEXP (comparison, 0),
23032 XEXP (comparison, 1));
23034 switch (mode)
23036 case E_CC_DNEmode: code = ARM_NE; goto dominance;
23037 case E_CC_DEQmode: code = ARM_EQ; goto dominance;
23038 case E_CC_DGEmode: code = ARM_GE; goto dominance;
23039 case E_CC_DGTmode: code = ARM_GT; goto dominance;
23040 case E_CC_DLEmode: code = ARM_LE; goto dominance;
23041 case E_CC_DLTmode: code = ARM_LT; goto dominance;
23042 case E_CC_DGEUmode: code = ARM_CS; goto dominance;
23043 case E_CC_DGTUmode: code = ARM_HI; goto dominance;
23044 case E_CC_DLEUmode: code = ARM_LS; goto dominance;
23045 case E_CC_DLTUmode: code = ARM_CC;
23047 dominance:
23048 if (comp_code == EQ)
23049 return ARM_INVERSE_CONDITION_CODE (code);
23050 if (comp_code == NE)
23051 return code;
23052 return ARM_NV;
23054 case E_CC_NOOVmode:
23055 switch (comp_code)
23057 case NE: return ARM_NE;
23058 case EQ: return ARM_EQ;
23059 case GE: return ARM_PL;
23060 case LT: return ARM_MI;
23061 default: return ARM_NV;
23064 case E_CC_Zmode:
23065 switch (comp_code)
23067 case NE: return ARM_NE;
23068 case EQ: return ARM_EQ;
23069 default: return ARM_NV;
23072 case E_CC_Nmode:
23073 switch (comp_code)
23075 case NE: return ARM_MI;
23076 case EQ: return ARM_PL;
23077 default: return ARM_NV;
23080 case E_CCFPEmode:
23081 case E_CCFPmode:
23082 /* We can handle all cases except UNEQ and LTGT. */
23083 switch (comp_code)
23085 case GE: return ARM_GE;
23086 case GT: return ARM_GT;
23087 case LE: return ARM_LS;
23088 case LT: return ARM_MI;
23089 case NE: return ARM_NE;
23090 case EQ: return ARM_EQ;
23091 case ORDERED: return ARM_VC;
23092 case UNORDERED: return ARM_VS;
23093 case UNLT: return ARM_LT;
23094 case UNLE: return ARM_LE;
23095 case UNGT: return ARM_HI;
23096 case UNGE: return ARM_PL;
23097 /* UNEQ and LTGT do not have a representation. */
23098 case UNEQ: /* Fall through. */
23099 case LTGT: /* Fall through. */
23100 default: return ARM_NV;
23103 case E_CC_SWPmode:
23104 switch (comp_code)
23106 case NE: return ARM_NE;
23107 case EQ: return ARM_EQ;
23108 case GE: return ARM_LE;
23109 case GT: return ARM_LT;
23110 case LE: return ARM_GE;
23111 case LT: return ARM_GT;
23112 case GEU: return ARM_LS;
23113 case GTU: return ARM_CC;
23114 case LEU: return ARM_CS;
23115 case LTU: return ARM_HI;
23116 default: return ARM_NV;
23119 case E_CC_Cmode:
23120 switch (comp_code)
23122 case LTU: return ARM_CS;
23123 case GEU: return ARM_CC;
23124 case NE: return ARM_CS;
23125 case EQ: return ARM_CC;
23126 default: return ARM_NV;
23129 case E_CC_CZmode:
23130 switch (comp_code)
23132 case NE: return ARM_NE;
23133 case EQ: return ARM_EQ;
23134 case GEU: return ARM_CS;
23135 case GTU: return ARM_HI;
23136 case LEU: return ARM_LS;
23137 case LTU: return ARM_CC;
23138 default: return ARM_NV;
23141 case E_CC_NCVmode:
23142 switch (comp_code)
23144 case GE: return ARM_GE;
23145 case LT: return ARM_LT;
23146 case GEU: return ARM_CS;
23147 case LTU: return ARM_CC;
23148 default: return ARM_NV;
23151 case E_CC_Vmode:
23152 switch (comp_code)
23154 case NE: return ARM_VS;
23155 case EQ: return ARM_VC;
23156 default: return ARM_NV;
23159 case E_CCmode:
23160 switch (comp_code)
23162 case NE: return ARM_NE;
23163 case EQ: return ARM_EQ;
23164 case GE: return ARM_GE;
23165 case GT: return ARM_GT;
23166 case LE: return ARM_LE;
23167 case LT: return ARM_LT;
23168 case GEU: return ARM_CS;
23169 case GTU: return ARM_HI;
23170 case LEU: return ARM_LS;
23171 case LTU: return ARM_CC;
23172 default: return ARM_NV;
23175 default: gcc_unreachable ();
23179 /* Like maybe_get_arm_condition_code, but never return ARM_NV. */
23180 static enum arm_cond_code
23181 get_arm_condition_code (rtx comparison)
23183 enum arm_cond_code code = maybe_get_arm_condition_code (comparison);
23184 gcc_assert (code != ARM_NV);
23185 return code;
23188 /* Implement TARGET_FIXED_CONDITION_CODE_REGS. We only have condition
23189 code registers when not targetting Thumb1. The VFP condition register
23190 only exists when generating hard-float code. */
23191 static bool
23192 arm_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
23194 if (!TARGET_32BIT)
23195 return false;
23197 *p1 = CC_REGNUM;
23198 *p2 = TARGET_HARD_FLOAT ? VFPCC_REGNUM : INVALID_REGNUM;
23199 return true;
23202 /* Tell arm_asm_output_opcode to output IT blocks for conditionally executed
23203 instructions. */
23204 void
23205 thumb2_final_prescan_insn (rtx_insn *insn)
23207 rtx_insn *first_insn = insn;
23208 rtx body = PATTERN (insn);
23209 rtx predicate;
23210 enum arm_cond_code code;
23211 int n;
23212 int mask;
23213 int max;
23215 /* max_insns_skipped in the tune was already taken into account in the
23216 cost model of ifcvt pass when generating COND_EXEC insns. At this stage
23217 just emit the IT blocks as we can. It does not make sense to split
23218 the IT blocks. */
23219 max = MAX_INSN_PER_IT_BLOCK;
23221 /* Remove the previous insn from the count of insns to be output. */
23222 if (arm_condexec_count)
23223 arm_condexec_count--;
23225 /* Nothing to do if we are already inside a conditional block. */
23226 if (arm_condexec_count)
23227 return;
23229 if (GET_CODE (body) != COND_EXEC)
23230 return;
23232 /* Conditional jumps are implemented directly. */
23233 if (JUMP_P (insn))
23234 return;
23236 predicate = COND_EXEC_TEST (body);
23237 arm_current_cc = get_arm_condition_code (predicate);
23239 n = get_attr_ce_count (insn);
23240 arm_condexec_count = 1;
23241 arm_condexec_mask = (1 << n) - 1;
23242 arm_condexec_masklen = n;
23243 /* See if subsequent instructions can be combined into the same block. */
23244 for (;;)
23246 insn = next_nonnote_insn (insn);
23248 /* Jumping into the middle of an IT block is illegal, so a label or
23249 barrier terminates the block. */
23250 if (!NONJUMP_INSN_P (insn) && !JUMP_P (insn))
23251 break;
23253 body = PATTERN (insn);
23254 /* USE and CLOBBER aren't really insns, so just skip them. */
23255 if (GET_CODE (body) == USE
23256 || GET_CODE (body) == CLOBBER)
23257 continue;
23259 /* ??? Recognize conditional jumps, and combine them with IT blocks. */
23260 if (GET_CODE (body) != COND_EXEC)
23261 break;
23262 /* Maximum number of conditionally executed instructions in a block. */
23263 n = get_attr_ce_count (insn);
23264 if (arm_condexec_masklen + n > max)
23265 break;
23267 predicate = COND_EXEC_TEST (body);
23268 code = get_arm_condition_code (predicate);
23269 mask = (1 << n) - 1;
23270 if (arm_current_cc == code)
23271 arm_condexec_mask |= (mask << arm_condexec_masklen);
23272 else if (arm_current_cc != ARM_INVERSE_CONDITION_CODE(code))
23273 break;
23275 arm_condexec_count++;
23276 arm_condexec_masklen += n;
23278 /* A jump must be the last instruction in a conditional block. */
23279 if (JUMP_P (insn))
23280 break;
23282 /* Restore recog_data (getting the attributes of other insns can
23283 destroy this array, but final.c assumes that it remains intact
23284 across this call). */
23285 extract_constrain_insn_cached (first_insn);
23288 void
23289 arm_final_prescan_insn (rtx_insn *insn)
23291 /* BODY will hold the body of INSN. */
23292 rtx body = PATTERN (insn);
23294 /* This will be 1 if trying to repeat the trick, and things need to be
23295 reversed if it appears to fail. */
23296 int reverse = 0;
23298 /* If we start with a return insn, we only succeed if we find another one. */
23299 int seeking_return = 0;
23300 enum rtx_code return_code = UNKNOWN;
23302 /* START_INSN will hold the insn from where we start looking. This is the
23303 first insn after the following code_label if REVERSE is true. */
23304 rtx_insn *start_insn = insn;
23306 /* If in state 4, check if the target branch is reached, in order to
23307 change back to state 0. */
23308 if (arm_ccfsm_state == 4)
23310 if (insn == arm_target_insn)
23312 arm_target_insn = NULL;
23313 arm_ccfsm_state = 0;
23315 return;
23318 /* If in state 3, it is possible to repeat the trick, if this insn is an
23319 unconditional branch to a label, and immediately following this branch
23320 is the previous target label which is only used once, and the label this
23321 branch jumps to is not too far off. */
23322 if (arm_ccfsm_state == 3)
23324 if (simplejump_p (insn))
23326 start_insn = next_nonnote_insn (start_insn);
23327 if (BARRIER_P (start_insn))
23329 /* XXX Isn't this always a barrier? */
23330 start_insn = next_nonnote_insn (start_insn);
23332 if (LABEL_P (start_insn)
23333 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
23334 && LABEL_NUSES (start_insn) == 1)
23335 reverse = TRUE;
23336 else
23337 return;
23339 else if (ANY_RETURN_P (body))
23341 start_insn = next_nonnote_insn (start_insn);
23342 if (BARRIER_P (start_insn))
23343 start_insn = next_nonnote_insn (start_insn);
23344 if (LABEL_P (start_insn)
23345 && CODE_LABEL_NUMBER (start_insn) == arm_target_label
23346 && LABEL_NUSES (start_insn) == 1)
23348 reverse = TRUE;
23349 seeking_return = 1;
23350 return_code = GET_CODE (body);
23352 else
23353 return;
23355 else
23356 return;
23359 gcc_assert (!arm_ccfsm_state || reverse);
23360 if (!JUMP_P (insn))
23361 return;
23363 /* This jump might be paralleled with a clobber of the condition codes
23364 the jump should always come first */
23365 if (GET_CODE (body) == PARALLEL && XVECLEN (body, 0) > 0)
23366 body = XVECEXP (body, 0, 0);
23368 if (reverse
23369 || (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == PC
23370 && GET_CODE (SET_SRC (body)) == IF_THEN_ELSE))
23372 int insns_skipped;
23373 int fail = FALSE, succeed = FALSE;
23374 /* Flag which part of the IF_THEN_ELSE is the LABEL_REF. */
23375 int then_not_else = TRUE;
23376 rtx_insn *this_insn = start_insn;
23377 rtx label = 0;
23379 /* Register the insn jumped to. */
23380 if (reverse)
23382 if (!seeking_return)
23383 label = XEXP (SET_SRC (body), 0);
23385 else if (GET_CODE (XEXP (SET_SRC (body), 1)) == LABEL_REF)
23386 label = XEXP (XEXP (SET_SRC (body), 1), 0);
23387 else if (GET_CODE (XEXP (SET_SRC (body), 2)) == LABEL_REF)
23389 label = XEXP (XEXP (SET_SRC (body), 2), 0);
23390 then_not_else = FALSE;
23392 else if (ANY_RETURN_P (XEXP (SET_SRC (body), 1)))
23394 seeking_return = 1;
23395 return_code = GET_CODE (XEXP (SET_SRC (body), 1));
23397 else if (ANY_RETURN_P (XEXP (SET_SRC (body), 2)))
23399 seeking_return = 1;
23400 return_code = GET_CODE (XEXP (SET_SRC (body), 2));
23401 then_not_else = FALSE;
23403 else
23404 gcc_unreachable ();
23406 /* See how many insns this branch skips, and what kind of insns. If all
23407 insns are okay, and the label or unconditional branch to the same
23408 label is not too far away, succeed. */
23409 for (insns_skipped = 0;
23410 !fail && !succeed && insns_skipped++ < max_insns_skipped;)
23412 rtx scanbody;
23414 this_insn = next_nonnote_insn (this_insn);
23415 if (!this_insn)
23416 break;
23418 switch (GET_CODE (this_insn))
23420 case CODE_LABEL:
23421 /* Succeed if it is the target label, otherwise fail since
23422 control falls in from somewhere else. */
23423 if (this_insn == label)
23425 arm_ccfsm_state = 1;
23426 succeed = TRUE;
23428 else
23429 fail = TRUE;
23430 break;
23432 case BARRIER:
23433 /* Succeed if the following insn is the target label.
23434 Otherwise fail.
23435 If return insns are used then the last insn in a function
23436 will be a barrier. */
23437 this_insn = next_nonnote_insn (this_insn);
23438 if (this_insn && this_insn == label)
23440 arm_ccfsm_state = 1;
23441 succeed = TRUE;
23443 else
23444 fail = TRUE;
23445 break;
23447 case CALL_INSN:
23448 /* The AAPCS says that conditional calls should not be
23449 used since they make interworking inefficient (the
23450 linker can't transform BL<cond> into BLX). That's
23451 only a problem if the machine has BLX. */
23452 if (arm_arch5)
23454 fail = TRUE;
23455 break;
23458 /* Succeed if the following insn is the target label, or
23459 if the following two insns are a barrier and the
23460 target label. */
23461 this_insn = next_nonnote_insn (this_insn);
23462 if (this_insn && BARRIER_P (this_insn))
23463 this_insn = next_nonnote_insn (this_insn);
23465 if (this_insn && this_insn == label
23466 && insns_skipped < max_insns_skipped)
23468 arm_ccfsm_state = 1;
23469 succeed = TRUE;
23471 else
23472 fail = TRUE;
23473 break;
23475 case JUMP_INSN:
23476 /* If this is an unconditional branch to the same label, succeed.
23477 If it is to another label, do nothing. If it is conditional,
23478 fail. */
23479 /* XXX Probably, the tests for SET and the PC are
23480 unnecessary. */
23482 scanbody = PATTERN (this_insn);
23483 if (GET_CODE (scanbody) == SET
23484 && GET_CODE (SET_DEST (scanbody)) == PC)
23486 if (GET_CODE (SET_SRC (scanbody)) == LABEL_REF
23487 && XEXP (SET_SRC (scanbody), 0) == label && !reverse)
23489 arm_ccfsm_state = 2;
23490 succeed = TRUE;
23492 else if (GET_CODE (SET_SRC (scanbody)) == IF_THEN_ELSE)
23493 fail = TRUE;
23495 /* Fail if a conditional return is undesirable (e.g. on a
23496 StrongARM), but still allow this if optimizing for size. */
23497 else if (GET_CODE (scanbody) == return_code
23498 && !use_return_insn (TRUE, NULL)
23499 && !optimize_size)
23500 fail = TRUE;
23501 else if (GET_CODE (scanbody) == return_code)
23503 arm_ccfsm_state = 2;
23504 succeed = TRUE;
23506 else if (GET_CODE (scanbody) == PARALLEL)
23508 switch (get_attr_conds (this_insn))
23510 case CONDS_NOCOND:
23511 break;
23512 default:
23513 fail = TRUE;
23514 break;
23517 else
23518 fail = TRUE; /* Unrecognized jump (e.g. epilogue). */
23520 break;
23522 case INSN:
23523 /* Instructions using or affecting the condition codes make it
23524 fail. */
23525 scanbody = PATTERN (this_insn);
23526 if (!(GET_CODE (scanbody) == SET
23527 || GET_CODE (scanbody) == PARALLEL)
23528 || get_attr_conds (this_insn) != CONDS_NOCOND)
23529 fail = TRUE;
23530 break;
23532 default:
23533 break;
23536 if (succeed)
23538 if ((!seeking_return) && (arm_ccfsm_state == 1 || reverse))
23539 arm_target_label = CODE_LABEL_NUMBER (label);
23540 else
23542 gcc_assert (seeking_return || arm_ccfsm_state == 2);
23544 while (this_insn && GET_CODE (PATTERN (this_insn)) == USE)
23546 this_insn = next_nonnote_insn (this_insn);
23547 gcc_assert (!this_insn
23548 || (!BARRIER_P (this_insn)
23549 && !LABEL_P (this_insn)));
23551 if (!this_insn)
23553 /* Oh, dear! we ran off the end.. give up. */
23554 extract_constrain_insn_cached (insn);
23555 arm_ccfsm_state = 0;
23556 arm_target_insn = NULL;
23557 return;
23559 arm_target_insn = this_insn;
23562 /* If REVERSE is true, ARM_CURRENT_CC needs to be inverted from
23563 what it was. */
23564 if (!reverse)
23565 arm_current_cc = get_arm_condition_code (XEXP (SET_SRC (body), 0));
23567 if (reverse || then_not_else)
23568 arm_current_cc = ARM_INVERSE_CONDITION_CODE (arm_current_cc);
23571 /* Restore recog_data (getting the attributes of other insns can
23572 destroy this array, but final.c assumes that it remains intact
23573 across this call. */
23574 extract_constrain_insn_cached (insn);
23578 /* Output IT instructions. */
23579 void
23580 thumb2_asm_output_opcode (FILE * stream)
23582 char buff[5];
23583 int n;
23585 if (arm_condexec_mask)
23587 for (n = 0; n < arm_condexec_masklen; n++)
23588 buff[n] = (arm_condexec_mask & (1 << n)) ? 't' : 'e';
23589 buff[n] = 0;
23590 asm_fprintf(stream, "i%s\t%s\n\t", buff,
23591 arm_condition_codes[arm_current_cc]);
23592 arm_condexec_mask = 0;
23596 /* Implement TARGET_HARD_REGNO_NREGS. On the ARM core regs are
23597 UNITS_PER_WORD bytes wide. */
23598 static unsigned int
23599 arm_hard_regno_nregs (unsigned int regno, machine_mode mode)
23601 if (TARGET_32BIT
23602 && regno > PC_REGNUM
23603 && regno != FRAME_POINTER_REGNUM
23604 && regno != ARG_POINTER_REGNUM
23605 && !IS_VFP_REGNUM (regno))
23606 return 1;
23608 return ARM_NUM_REGS (mode);
23611 /* Implement TARGET_HARD_REGNO_MODE_OK. */
23612 static bool
23613 arm_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
23615 if (GET_MODE_CLASS (mode) == MODE_CC)
23616 return (regno == CC_REGNUM
23617 || (TARGET_HARD_FLOAT
23618 && regno == VFPCC_REGNUM));
23620 if (regno == CC_REGNUM && GET_MODE_CLASS (mode) != MODE_CC)
23621 return false;
23623 if (TARGET_THUMB1)
23624 /* For the Thumb we only allow values bigger than SImode in
23625 registers 0 - 6, so that there is always a second low
23626 register available to hold the upper part of the value.
23627 We probably we ought to ensure that the register is the
23628 start of an even numbered register pair. */
23629 return (ARM_NUM_REGS (mode) < 2) || (regno < LAST_LO_REGNUM);
23631 if (TARGET_HARD_FLOAT && IS_VFP_REGNUM (regno))
23633 if (mode == SFmode || mode == SImode)
23634 return VFP_REGNO_OK_FOR_SINGLE (regno);
23636 if (mode == DFmode)
23637 return VFP_REGNO_OK_FOR_DOUBLE (regno);
23639 if (mode == HFmode)
23640 return VFP_REGNO_OK_FOR_SINGLE (regno);
23642 /* VFP registers can hold HImode values. */
23643 if (mode == HImode)
23644 return VFP_REGNO_OK_FOR_SINGLE (regno);
23646 if (TARGET_NEON)
23647 return (VALID_NEON_DREG_MODE (mode) && VFP_REGNO_OK_FOR_DOUBLE (regno))
23648 || (VALID_NEON_QREG_MODE (mode)
23649 && NEON_REGNO_OK_FOR_QUAD (regno))
23650 || (mode == TImode && NEON_REGNO_OK_FOR_NREGS (regno, 2))
23651 || (mode == EImode && NEON_REGNO_OK_FOR_NREGS (regno, 3))
23652 || (mode == OImode && NEON_REGNO_OK_FOR_NREGS (regno, 4))
23653 || (mode == CImode && NEON_REGNO_OK_FOR_NREGS (regno, 6))
23654 || (mode == XImode && NEON_REGNO_OK_FOR_NREGS (regno, 8));
23656 return false;
23659 if (TARGET_REALLY_IWMMXT)
23661 if (IS_IWMMXT_GR_REGNUM (regno))
23662 return mode == SImode;
23664 if (IS_IWMMXT_REGNUM (regno))
23665 return VALID_IWMMXT_REG_MODE (mode);
23668 /* We allow almost any value to be stored in the general registers.
23669 Restrict doubleword quantities to even register pairs in ARM state
23670 so that we can use ldrd. Do not allow very large Neon structure
23671 opaque modes in general registers; they would use too many. */
23672 if (regno <= LAST_ARM_REGNUM)
23674 if (ARM_NUM_REGS (mode) > 4)
23675 return false;
23677 if (TARGET_THUMB2)
23678 return true;
23680 return !(TARGET_LDRD && GET_MODE_SIZE (mode) > 4 && (regno & 1) != 0);
23683 if (regno == FRAME_POINTER_REGNUM
23684 || regno == ARG_POINTER_REGNUM)
23685 /* We only allow integers in the fake hard registers. */
23686 return GET_MODE_CLASS (mode) == MODE_INT;
23688 return false;
23691 /* Implement TARGET_MODES_TIEABLE_P. */
23693 static bool
23694 arm_modes_tieable_p (machine_mode mode1, machine_mode mode2)
23696 if (GET_MODE_CLASS (mode1) == GET_MODE_CLASS (mode2))
23697 return true;
23699 /* We specifically want to allow elements of "structure" modes to
23700 be tieable to the structure. This more general condition allows
23701 other rarer situations too. */
23702 if (TARGET_NEON
23703 && (VALID_NEON_DREG_MODE (mode1)
23704 || VALID_NEON_QREG_MODE (mode1)
23705 || VALID_NEON_STRUCT_MODE (mode1))
23706 && (VALID_NEON_DREG_MODE (mode2)
23707 || VALID_NEON_QREG_MODE (mode2)
23708 || VALID_NEON_STRUCT_MODE (mode2)))
23709 return true;
23711 return false;
23714 /* For efficiency and historical reasons LO_REGS, HI_REGS and CC_REGS are
23715 not used in arm mode. */
23717 enum reg_class
23718 arm_regno_class (int regno)
23720 if (regno == PC_REGNUM)
23721 return NO_REGS;
23723 if (TARGET_THUMB1)
23725 if (regno == STACK_POINTER_REGNUM)
23726 return STACK_REG;
23727 if (regno == CC_REGNUM)
23728 return CC_REG;
23729 if (regno < 8)
23730 return LO_REGS;
23731 return HI_REGS;
23734 if (TARGET_THUMB2 && regno < 8)
23735 return LO_REGS;
23737 if ( regno <= LAST_ARM_REGNUM
23738 || regno == FRAME_POINTER_REGNUM
23739 || regno == ARG_POINTER_REGNUM)
23740 return TARGET_THUMB2 ? HI_REGS : GENERAL_REGS;
23742 if (regno == CC_REGNUM || regno == VFPCC_REGNUM)
23743 return TARGET_THUMB2 ? CC_REG : NO_REGS;
23745 if (IS_VFP_REGNUM (regno))
23747 if (regno <= D7_VFP_REGNUM)
23748 return VFP_D0_D7_REGS;
23749 else if (regno <= LAST_LO_VFP_REGNUM)
23750 return VFP_LO_REGS;
23751 else
23752 return VFP_HI_REGS;
23755 if (IS_IWMMXT_REGNUM (regno))
23756 return IWMMXT_REGS;
23758 if (IS_IWMMXT_GR_REGNUM (regno))
23759 return IWMMXT_GR_REGS;
23761 return NO_REGS;
23764 /* Handle a special case when computing the offset
23765 of an argument from the frame pointer. */
23767 arm_debugger_arg_offset (int value, rtx addr)
23769 rtx_insn *insn;
23771 /* We are only interested if dbxout_parms() failed to compute the offset. */
23772 if (value != 0)
23773 return 0;
23775 /* We can only cope with the case where the address is held in a register. */
23776 if (!REG_P (addr))
23777 return 0;
23779 /* If we are using the frame pointer to point at the argument, then
23780 an offset of 0 is correct. */
23781 if (REGNO (addr) == (unsigned) HARD_FRAME_POINTER_REGNUM)
23782 return 0;
23784 /* If we are using the stack pointer to point at the
23785 argument, then an offset of 0 is correct. */
23786 /* ??? Check this is consistent with thumb2 frame layout. */
23787 if ((TARGET_THUMB || !frame_pointer_needed)
23788 && REGNO (addr) == SP_REGNUM)
23789 return 0;
23791 /* Oh dear. The argument is pointed to by a register rather
23792 than being held in a register, or being stored at a known
23793 offset from the frame pointer. Since GDB only understands
23794 those two kinds of argument we must translate the address
23795 held in the register into an offset from the frame pointer.
23796 We do this by searching through the insns for the function
23797 looking to see where this register gets its value. If the
23798 register is initialized from the frame pointer plus an offset
23799 then we are in luck and we can continue, otherwise we give up.
23801 This code is exercised by producing debugging information
23802 for a function with arguments like this:
23804 double func (double a, double b, int c, double d) {return d;}
23806 Without this code the stab for parameter 'd' will be set to
23807 an offset of 0 from the frame pointer, rather than 8. */
23809 /* The if() statement says:
23811 If the insn is a normal instruction
23812 and if the insn is setting the value in a register
23813 and if the register being set is the register holding the address of the argument
23814 and if the address is computing by an addition
23815 that involves adding to a register
23816 which is the frame pointer
23817 a constant integer
23819 then... */
23821 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
23823 if ( NONJUMP_INSN_P (insn)
23824 && GET_CODE (PATTERN (insn)) == SET
23825 && REGNO (XEXP (PATTERN (insn), 0)) == REGNO (addr)
23826 && GET_CODE (XEXP (PATTERN (insn), 1)) == PLUS
23827 && REG_P (XEXP (XEXP (PATTERN (insn), 1), 0))
23828 && REGNO (XEXP (XEXP (PATTERN (insn), 1), 0)) == (unsigned) HARD_FRAME_POINTER_REGNUM
23829 && CONST_INT_P (XEXP (XEXP (PATTERN (insn), 1), 1))
23832 value = INTVAL (XEXP (XEXP (PATTERN (insn), 1), 1));
23834 break;
23838 if (value == 0)
23840 debug_rtx (addr);
23841 warning (0, "unable to compute real location of stacked parameter");
23842 value = 8; /* XXX magic hack */
23845 return value;
23848 /* Implement TARGET_PROMOTED_TYPE. */
23850 static tree
23851 arm_promoted_type (const_tree t)
23853 if (SCALAR_FLOAT_TYPE_P (t)
23854 && TYPE_PRECISION (t) == 16
23855 && TYPE_MAIN_VARIANT (t) == arm_fp16_type_node)
23856 return float_type_node;
23857 return NULL_TREE;
23860 /* Implement TARGET_SCALAR_MODE_SUPPORTED_P.
23861 This simply adds HFmode as a supported mode; even though we don't
23862 implement arithmetic on this type directly, it's supported by
23863 optabs conversions, much the way the double-word arithmetic is
23864 special-cased in the default hook. */
23866 static bool
23867 arm_scalar_mode_supported_p (scalar_mode mode)
23869 if (mode == HFmode)
23870 return (arm_fp16_format != ARM_FP16_FORMAT_NONE);
23871 else if (ALL_FIXED_POINT_MODE_P (mode))
23872 return true;
23873 else
23874 return default_scalar_mode_supported_p (mode);
23877 /* Set the value of FLT_EVAL_METHOD.
23878 ISO/IEC TS 18661-3 defines two values that we'd like to make use of:
23880 0: evaluate all operations and constants, whose semantic type has at
23881 most the range and precision of type float, to the range and
23882 precision of float; evaluate all other operations and constants to
23883 the range and precision of the semantic type;
23885 N, where _FloatN is a supported interchange floating type
23886 evaluate all operations and constants, whose semantic type has at
23887 most the range and precision of _FloatN type, to the range and
23888 precision of the _FloatN type; evaluate all other operations and
23889 constants to the range and precision of the semantic type;
23891 If we have the ARMv8.2-A extensions then we support _Float16 in native
23892 precision, so we should set this to 16. Otherwise, we support the type,
23893 but want to evaluate expressions in float precision, so set this to
23894 0. */
23896 static enum flt_eval_method
23897 arm_excess_precision (enum excess_precision_type type)
23899 switch (type)
23901 case EXCESS_PRECISION_TYPE_FAST:
23902 case EXCESS_PRECISION_TYPE_STANDARD:
23903 /* We can calculate either in 16-bit range and precision or
23904 32-bit range and precision. Make that decision based on whether
23905 we have native support for the ARMv8.2-A 16-bit floating-point
23906 instructions or not. */
23907 return (TARGET_VFP_FP16INST
23908 ? FLT_EVAL_METHOD_PROMOTE_TO_FLOAT16
23909 : FLT_EVAL_METHOD_PROMOTE_TO_FLOAT);
23910 case EXCESS_PRECISION_TYPE_IMPLICIT:
23911 return FLT_EVAL_METHOD_PROMOTE_TO_FLOAT16;
23912 default:
23913 gcc_unreachable ();
23915 return FLT_EVAL_METHOD_UNPREDICTABLE;
23919 /* Implement TARGET_FLOATN_MODE. Make very sure that we don't provide
23920 _Float16 if we are using anything other than ieee format for 16-bit
23921 floating point. Otherwise, punt to the default implementation. */
23922 static opt_scalar_float_mode
23923 arm_floatn_mode (int n, bool extended)
23925 if (!extended && n == 16)
23927 if (arm_fp16_format == ARM_FP16_FORMAT_IEEE)
23928 return HFmode;
23929 return opt_scalar_float_mode ();
23932 return default_floatn_mode (n, extended);
23936 /* Set up OPERANDS for a register copy from SRC to DEST, taking care
23937 not to early-clobber SRC registers in the process.
23939 We assume that the operands described by SRC and DEST represent a
23940 decomposed copy of OPERANDS[1] into OPERANDS[0]. COUNT is the
23941 number of components into which the copy has been decomposed. */
23942 void
23943 neon_disambiguate_copy (rtx *operands, rtx *dest, rtx *src, unsigned int count)
23945 unsigned int i;
23947 if (!reg_overlap_mentioned_p (operands[0], operands[1])
23948 || REGNO (operands[0]) < REGNO (operands[1]))
23950 for (i = 0; i < count; i++)
23952 operands[2 * i] = dest[i];
23953 operands[2 * i + 1] = src[i];
23956 else
23958 for (i = 0; i < count; i++)
23960 operands[2 * i] = dest[count - i - 1];
23961 operands[2 * i + 1] = src[count - i - 1];
23966 /* Split operands into moves from op[1] + op[2] into op[0]. */
23968 void
23969 neon_split_vcombine (rtx operands[3])
23971 unsigned int dest = REGNO (operands[0]);
23972 unsigned int src1 = REGNO (operands[1]);
23973 unsigned int src2 = REGNO (operands[2]);
23974 machine_mode halfmode = GET_MODE (operands[1]);
23975 unsigned int halfregs = REG_NREGS (operands[1]);
23976 rtx destlo, desthi;
23978 if (src1 == dest && src2 == dest + halfregs)
23980 /* No-op move. Can't split to nothing; emit something. */
23981 emit_note (NOTE_INSN_DELETED);
23982 return;
23985 /* Preserve register attributes for variable tracking. */
23986 destlo = gen_rtx_REG_offset (operands[0], halfmode, dest, 0);
23987 desthi = gen_rtx_REG_offset (operands[0], halfmode, dest + halfregs,
23988 GET_MODE_SIZE (halfmode));
23990 /* Special case of reversed high/low parts. Use VSWP. */
23991 if (src2 == dest && src1 == dest + halfregs)
23993 rtx x = gen_rtx_SET (destlo, operands[1]);
23994 rtx y = gen_rtx_SET (desthi, operands[2]);
23995 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, x, y)));
23996 return;
23999 if (!reg_overlap_mentioned_p (operands[2], destlo))
24001 /* Try to avoid unnecessary moves if part of the result
24002 is in the right place already. */
24003 if (src1 != dest)
24004 emit_move_insn (destlo, operands[1]);
24005 if (src2 != dest + halfregs)
24006 emit_move_insn (desthi, operands[2]);
24008 else
24010 if (src2 != dest + halfregs)
24011 emit_move_insn (desthi, operands[2]);
24012 if (src1 != dest)
24013 emit_move_insn (destlo, operands[1]);
24017 /* Return the number (counting from 0) of
24018 the least significant set bit in MASK. */
24020 inline static int
24021 number_of_first_bit_set (unsigned mask)
24023 return ctz_hwi (mask);
24026 /* Like emit_multi_reg_push, but allowing for a different set of
24027 registers to be described as saved. MASK is the set of registers
24028 to be saved; REAL_REGS is the set of registers to be described as
24029 saved. If REAL_REGS is 0, only describe the stack adjustment. */
24031 static rtx_insn *
24032 thumb1_emit_multi_reg_push (unsigned long mask, unsigned long real_regs)
24034 unsigned long regno;
24035 rtx par[10], tmp, reg;
24036 rtx_insn *insn;
24037 int i, j;
24039 /* Build the parallel of the registers actually being stored. */
24040 for (i = 0; mask; ++i, mask &= mask - 1)
24042 regno = ctz_hwi (mask);
24043 reg = gen_rtx_REG (SImode, regno);
24045 if (i == 0)
24046 tmp = gen_rtx_UNSPEC (BLKmode, gen_rtvec (1, reg), UNSPEC_PUSH_MULT);
24047 else
24048 tmp = gen_rtx_USE (VOIDmode, reg);
24050 par[i] = tmp;
24053 tmp = plus_constant (Pmode, stack_pointer_rtx, -4 * i);
24054 tmp = gen_rtx_PRE_MODIFY (Pmode, stack_pointer_rtx, tmp);
24055 tmp = gen_frame_mem (BLKmode, tmp);
24056 tmp = gen_rtx_SET (tmp, par[0]);
24057 par[0] = tmp;
24059 tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (i, par));
24060 insn = emit_insn (tmp);
24062 /* Always build the stack adjustment note for unwind info. */
24063 tmp = plus_constant (Pmode, stack_pointer_rtx, -4 * i);
24064 tmp = gen_rtx_SET (stack_pointer_rtx, tmp);
24065 par[0] = tmp;
24067 /* Build the parallel of the registers recorded as saved for unwind. */
24068 for (j = 0; real_regs; ++j, real_regs &= real_regs - 1)
24070 regno = ctz_hwi (real_regs);
24071 reg = gen_rtx_REG (SImode, regno);
24073 tmp = plus_constant (Pmode, stack_pointer_rtx, j * 4);
24074 tmp = gen_frame_mem (SImode, tmp);
24075 tmp = gen_rtx_SET (tmp, reg);
24076 RTX_FRAME_RELATED_P (tmp) = 1;
24077 par[j + 1] = tmp;
24080 if (j == 0)
24081 tmp = par[0];
24082 else
24084 RTX_FRAME_RELATED_P (par[0]) = 1;
24085 tmp = gen_rtx_SEQUENCE (VOIDmode, gen_rtvec_v (j + 1, par));
24088 add_reg_note (insn, REG_FRAME_RELATED_EXPR, tmp);
24090 return insn;
24093 /* Emit code to push or pop registers to or from the stack. F is the
24094 assembly file. MASK is the registers to pop. */
24095 static void
24096 thumb_pop (FILE *f, unsigned long mask)
24098 int regno;
24099 int lo_mask = mask & 0xFF;
24101 gcc_assert (mask);
24103 if (lo_mask == 0 && (mask & (1 << PC_REGNUM)))
24105 /* Special case. Do not generate a POP PC statement here, do it in
24106 thumb_exit() */
24107 thumb_exit (f, -1);
24108 return;
24111 fprintf (f, "\tpop\t{");
24113 /* Look at the low registers first. */
24114 for (regno = 0; regno <= LAST_LO_REGNUM; regno++, lo_mask >>= 1)
24116 if (lo_mask & 1)
24118 asm_fprintf (f, "%r", regno);
24120 if ((lo_mask & ~1) != 0)
24121 fprintf (f, ", ");
24125 if (mask & (1 << PC_REGNUM))
24127 /* Catch popping the PC. */
24128 if (TARGET_INTERWORK || TARGET_BACKTRACE || crtl->calls_eh_return
24129 || IS_CMSE_ENTRY (arm_current_func_type ()))
24131 /* The PC is never poped directly, instead
24132 it is popped into r3 and then BX is used. */
24133 fprintf (f, "}\n");
24135 thumb_exit (f, -1);
24137 return;
24139 else
24141 if (mask & 0xFF)
24142 fprintf (f, ", ");
24144 asm_fprintf (f, "%r", PC_REGNUM);
24148 fprintf (f, "}\n");
24151 /* Generate code to return from a thumb function.
24152 If 'reg_containing_return_addr' is -1, then the return address is
24153 actually on the stack, at the stack pointer.
24155 Note: do not forget to update length attribute of corresponding insn pattern
24156 when changing assembly output (eg. length attribute of epilogue_insns when
24157 updating Armv8-M Baseline Security Extensions register clearing
24158 sequences). */
24159 static void
24160 thumb_exit (FILE *f, int reg_containing_return_addr)
24162 unsigned regs_available_for_popping;
24163 unsigned regs_to_pop;
24164 int pops_needed;
24165 unsigned available;
24166 unsigned required;
24167 machine_mode mode;
24168 int size;
24169 int restore_a4 = FALSE;
24171 /* Compute the registers we need to pop. */
24172 regs_to_pop = 0;
24173 pops_needed = 0;
24175 if (reg_containing_return_addr == -1)
24177 regs_to_pop |= 1 << LR_REGNUM;
24178 ++pops_needed;
24181 if (TARGET_BACKTRACE)
24183 /* Restore the (ARM) frame pointer and stack pointer. */
24184 regs_to_pop |= (1 << ARM_HARD_FRAME_POINTER_REGNUM) | (1 << SP_REGNUM);
24185 pops_needed += 2;
24188 /* If there is nothing to pop then just emit the BX instruction and
24189 return. */
24190 if (pops_needed == 0)
24192 if (crtl->calls_eh_return)
24193 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
24195 if (IS_CMSE_ENTRY (arm_current_func_type ()))
24197 asm_fprintf (f, "\tmsr\tAPSR_nzcvq, %r\n",
24198 reg_containing_return_addr);
24199 asm_fprintf (f, "\tbxns\t%r\n", reg_containing_return_addr);
24201 else
24202 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
24203 return;
24205 /* Otherwise if we are not supporting interworking and we have not created
24206 a backtrace structure and the function was not entered in ARM mode then
24207 just pop the return address straight into the PC. */
24208 else if (!TARGET_INTERWORK
24209 && !TARGET_BACKTRACE
24210 && !is_called_in_ARM_mode (current_function_decl)
24211 && !crtl->calls_eh_return
24212 && !IS_CMSE_ENTRY (arm_current_func_type ()))
24214 asm_fprintf (f, "\tpop\t{%r}\n", PC_REGNUM);
24215 return;
24218 /* Find out how many of the (return) argument registers we can corrupt. */
24219 regs_available_for_popping = 0;
24221 /* If returning via __builtin_eh_return, the bottom three registers
24222 all contain information needed for the return. */
24223 if (crtl->calls_eh_return)
24224 size = 12;
24225 else
24227 /* If we can deduce the registers used from the function's
24228 return value. This is more reliable that examining
24229 df_regs_ever_live_p () because that will be set if the register is
24230 ever used in the function, not just if the register is used
24231 to hold a return value. */
24233 if (crtl->return_rtx != 0)
24234 mode = GET_MODE (crtl->return_rtx);
24235 else
24236 mode = DECL_MODE (DECL_RESULT (current_function_decl));
24238 size = GET_MODE_SIZE (mode);
24240 if (size == 0)
24242 /* In a void function we can use any argument register.
24243 In a function that returns a structure on the stack
24244 we can use the second and third argument registers. */
24245 if (mode == VOIDmode)
24246 regs_available_for_popping =
24247 (1 << ARG_REGISTER (1))
24248 | (1 << ARG_REGISTER (2))
24249 | (1 << ARG_REGISTER (3));
24250 else
24251 regs_available_for_popping =
24252 (1 << ARG_REGISTER (2))
24253 | (1 << ARG_REGISTER (3));
24255 else if (size <= 4)
24256 regs_available_for_popping =
24257 (1 << ARG_REGISTER (2))
24258 | (1 << ARG_REGISTER (3));
24259 else if (size <= 8)
24260 regs_available_for_popping =
24261 (1 << ARG_REGISTER (3));
24264 /* Match registers to be popped with registers into which we pop them. */
24265 for (available = regs_available_for_popping,
24266 required = regs_to_pop;
24267 required != 0 && available != 0;
24268 available &= ~(available & - available),
24269 required &= ~(required & - required))
24270 -- pops_needed;
24272 /* If we have any popping registers left over, remove them. */
24273 if (available > 0)
24274 regs_available_for_popping &= ~available;
24276 /* Otherwise if we need another popping register we can use
24277 the fourth argument register. */
24278 else if (pops_needed)
24280 /* If we have not found any free argument registers and
24281 reg a4 contains the return address, we must move it. */
24282 if (regs_available_for_popping == 0
24283 && reg_containing_return_addr == LAST_ARG_REGNUM)
24285 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
24286 reg_containing_return_addr = LR_REGNUM;
24288 else if (size > 12)
24290 /* Register a4 is being used to hold part of the return value,
24291 but we have dire need of a free, low register. */
24292 restore_a4 = TRUE;
24294 asm_fprintf (f, "\tmov\t%r, %r\n",IP_REGNUM, LAST_ARG_REGNUM);
24297 if (reg_containing_return_addr != LAST_ARG_REGNUM)
24299 /* The fourth argument register is available. */
24300 regs_available_for_popping |= 1 << LAST_ARG_REGNUM;
24302 --pops_needed;
24306 /* Pop as many registers as we can. */
24307 thumb_pop (f, regs_available_for_popping);
24309 /* Process the registers we popped. */
24310 if (reg_containing_return_addr == -1)
24312 /* The return address was popped into the lowest numbered register. */
24313 regs_to_pop &= ~(1 << LR_REGNUM);
24315 reg_containing_return_addr =
24316 number_of_first_bit_set (regs_available_for_popping);
24318 /* Remove this register for the mask of available registers, so that
24319 the return address will not be corrupted by further pops. */
24320 regs_available_for_popping &= ~(1 << reg_containing_return_addr);
24323 /* If we popped other registers then handle them here. */
24324 if (regs_available_for_popping)
24326 int frame_pointer;
24328 /* Work out which register currently contains the frame pointer. */
24329 frame_pointer = number_of_first_bit_set (regs_available_for_popping);
24331 /* Move it into the correct place. */
24332 asm_fprintf (f, "\tmov\t%r, %r\n",
24333 ARM_HARD_FRAME_POINTER_REGNUM, frame_pointer);
24335 /* (Temporarily) remove it from the mask of popped registers. */
24336 regs_available_for_popping &= ~(1 << frame_pointer);
24337 regs_to_pop &= ~(1 << ARM_HARD_FRAME_POINTER_REGNUM);
24339 if (regs_available_for_popping)
24341 int stack_pointer;
24343 /* We popped the stack pointer as well,
24344 find the register that contains it. */
24345 stack_pointer = number_of_first_bit_set (regs_available_for_popping);
24347 /* Move it into the stack register. */
24348 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, stack_pointer);
24350 /* At this point we have popped all necessary registers, so
24351 do not worry about restoring regs_available_for_popping
24352 to its correct value:
24354 assert (pops_needed == 0)
24355 assert (regs_available_for_popping == (1 << frame_pointer))
24356 assert (regs_to_pop == (1 << STACK_POINTER)) */
24358 else
24360 /* Since we have just move the popped value into the frame
24361 pointer, the popping register is available for reuse, and
24362 we know that we still have the stack pointer left to pop. */
24363 regs_available_for_popping |= (1 << frame_pointer);
24367 /* If we still have registers left on the stack, but we no longer have
24368 any registers into which we can pop them, then we must move the return
24369 address into the link register and make available the register that
24370 contained it. */
24371 if (regs_available_for_popping == 0 && pops_needed > 0)
24373 regs_available_for_popping |= 1 << reg_containing_return_addr;
24375 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM,
24376 reg_containing_return_addr);
24378 reg_containing_return_addr = LR_REGNUM;
24381 /* If we have registers left on the stack then pop some more.
24382 We know that at most we will want to pop FP and SP. */
24383 if (pops_needed > 0)
24385 int popped_into;
24386 int move_to;
24388 thumb_pop (f, regs_available_for_popping);
24390 /* We have popped either FP or SP.
24391 Move whichever one it is into the correct register. */
24392 popped_into = number_of_first_bit_set (regs_available_for_popping);
24393 move_to = number_of_first_bit_set (regs_to_pop);
24395 asm_fprintf (f, "\tmov\t%r, %r\n", move_to, popped_into);
24396 --pops_needed;
24399 /* If we still have not popped everything then we must have only
24400 had one register available to us and we are now popping the SP. */
24401 if (pops_needed > 0)
24403 int popped_into;
24405 thumb_pop (f, regs_available_for_popping);
24407 popped_into = number_of_first_bit_set (regs_available_for_popping);
24409 asm_fprintf (f, "\tmov\t%r, %r\n", SP_REGNUM, popped_into);
24411 assert (regs_to_pop == (1 << STACK_POINTER))
24412 assert (pops_needed == 1)
24416 /* If necessary restore the a4 register. */
24417 if (restore_a4)
24419 if (reg_containing_return_addr != LR_REGNUM)
24421 asm_fprintf (f, "\tmov\t%r, %r\n", LR_REGNUM, LAST_ARG_REGNUM);
24422 reg_containing_return_addr = LR_REGNUM;
24425 asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
24428 if (crtl->calls_eh_return)
24429 asm_fprintf (f, "\tadd\t%r, %r\n", SP_REGNUM, ARM_EH_STACKADJ_REGNUM);
24431 /* Return to caller. */
24432 if (IS_CMSE_ENTRY (arm_current_func_type ()))
24434 /* This is for the cases where LR is not being used to contain the return
24435 address. It may therefore contain information that we might not want
24436 to leak, hence it must be cleared. The value in R0 will never be a
24437 secret at this point, so it is safe to use it, see the clearing code
24438 in 'cmse_nonsecure_entry_clear_before_return'. */
24439 if (reg_containing_return_addr != LR_REGNUM)
24440 asm_fprintf (f, "\tmov\tlr, r0\n");
24442 asm_fprintf (f, "\tmsr\tAPSR_nzcvq, %r\n", reg_containing_return_addr);
24443 asm_fprintf (f, "\tbxns\t%r\n", reg_containing_return_addr);
24445 else
24446 asm_fprintf (f, "\tbx\t%r\n", reg_containing_return_addr);
24449 /* Scan INSN just before assembler is output for it.
24450 For Thumb-1, we track the status of the condition codes; this
24451 information is used in the cbranchsi4_insn pattern. */
24452 void
24453 thumb1_final_prescan_insn (rtx_insn *insn)
24455 if (flag_print_asm_name)
24456 asm_fprintf (asm_out_file, "%@ 0x%04x\n",
24457 INSN_ADDRESSES (INSN_UID (insn)));
24458 /* Don't overwrite the previous setter when we get to a cbranch. */
24459 if (INSN_CODE (insn) != CODE_FOR_cbranchsi4_insn)
24461 enum attr_conds conds;
24463 if (cfun->machine->thumb1_cc_insn)
24465 if (modified_in_p (cfun->machine->thumb1_cc_op0, insn)
24466 || modified_in_p (cfun->machine->thumb1_cc_op1, insn))
24467 CC_STATUS_INIT;
24469 conds = get_attr_conds (insn);
24470 if (conds == CONDS_SET)
24472 rtx set = single_set (insn);
24473 cfun->machine->thumb1_cc_insn = insn;
24474 cfun->machine->thumb1_cc_op0 = SET_DEST (set);
24475 cfun->machine->thumb1_cc_op1 = const0_rtx;
24476 cfun->machine->thumb1_cc_mode = CC_NOOVmode;
24477 if (INSN_CODE (insn) == CODE_FOR_thumb1_subsi3_insn)
24479 rtx src1 = XEXP (SET_SRC (set), 1);
24480 if (src1 == const0_rtx)
24481 cfun->machine->thumb1_cc_mode = CCmode;
24483 else if (REG_P (SET_DEST (set)) && REG_P (SET_SRC (set)))
24485 /* Record the src register operand instead of dest because
24486 cprop_hardreg pass propagates src. */
24487 cfun->machine->thumb1_cc_op0 = SET_SRC (set);
24490 else if (conds != CONDS_NOCOND)
24491 cfun->machine->thumb1_cc_insn = NULL_RTX;
24494 /* Check if unexpected far jump is used. */
24495 if (cfun->machine->lr_save_eliminated
24496 && get_attr_far_jump (insn) == FAR_JUMP_YES)
24497 internal_error("Unexpected thumb1 far jump");
24501 thumb_shiftable_const (unsigned HOST_WIDE_INT val)
24503 unsigned HOST_WIDE_INT mask = 0xff;
24504 int i;
24506 val = val & (unsigned HOST_WIDE_INT)0xffffffffu;
24507 if (val == 0) /* XXX */
24508 return 0;
24510 for (i = 0; i < 25; i++)
24511 if ((val & (mask << i)) == val)
24512 return 1;
24514 return 0;
24517 /* Returns nonzero if the current function contains,
24518 or might contain a far jump. */
24519 static int
24520 thumb_far_jump_used_p (void)
24522 rtx_insn *insn;
24523 bool far_jump = false;
24524 unsigned int func_size = 0;
24526 /* If we have already decided that far jumps may be used,
24527 do not bother checking again, and always return true even if
24528 it turns out that they are not being used. Once we have made
24529 the decision that far jumps are present (and that hence the link
24530 register will be pushed onto the stack) we cannot go back on it. */
24531 if (cfun->machine->far_jump_used)
24532 return 1;
24534 /* If this function is not being called from the prologue/epilogue
24535 generation code then it must be being called from the
24536 INITIAL_ELIMINATION_OFFSET macro. */
24537 if (!(ARM_DOUBLEWORD_ALIGN || reload_completed))
24539 /* In this case we know that we are being asked about the elimination
24540 of the arg pointer register. If that register is not being used,
24541 then there are no arguments on the stack, and we do not have to
24542 worry that a far jump might force the prologue to push the link
24543 register, changing the stack offsets. In this case we can just
24544 return false, since the presence of far jumps in the function will
24545 not affect stack offsets.
24547 If the arg pointer is live (or if it was live, but has now been
24548 eliminated and so set to dead) then we do have to test to see if
24549 the function might contain a far jump. This test can lead to some
24550 false negatives, since before reload is completed, then length of
24551 branch instructions is not known, so gcc defaults to returning their
24552 longest length, which in turn sets the far jump attribute to true.
24554 A false negative will not result in bad code being generated, but it
24555 will result in a needless push and pop of the link register. We
24556 hope that this does not occur too often.
24558 If we need doubleword stack alignment this could affect the other
24559 elimination offsets so we can't risk getting it wrong. */
24560 if (df_regs_ever_live_p (ARG_POINTER_REGNUM))
24561 cfun->machine->arg_pointer_live = 1;
24562 else if (!cfun->machine->arg_pointer_live)
24563 return 0;
24566 /* We should not change far_jump_used during or after reload, as there is
24567 no chance to change stack frame layout. */
24568 if (reload_in_progress || reload_completed)
24569 return 0;
24571 /* Check to see if the function contains a branch
24572 insn with the far jump attribute set. */
24573 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
24575 if (JUMP_P (insn) && get_attr_far_jump (insn) == FAR_JUMP_YES)
24577 far_jump = true;
24579 func_size += get_attr_length (insn);
24582 /* Attribute far_jump will always be true for thumb1 before
24583 shorten_branch pass. So checking far_jump attribute before
24584 shorten_branch isn't much useful.
24586 Following heuristic tries to estimate more accurately if a far jump
24587 may finally be used. The heuristic is very conservative as there is
24588 no chance to roll-back the decision of not to use far jump.
24590 Thumb1 long branch offset is -2048 to 2046. The worst case is each
24591 2-byte insn is associated with a 4 byte constant pool. Using
24592 function size 2048/3 as the threshold is conservative enough. */
24593 if (far_jump)
24595 if ((func_size * 3) >= 2048)
24597 /* Record the fact that we have decided that
24598 the function does use far jumps. */
24599 cfun->machine->far_jump_used = 1;
24600 return 1;
24604 return 0;
24607 /* Return nonzero if FUNC must be entered in ARM mode. */
24608 static bool
24609 is_called_in_ARM_mode (tree func)
24611 gcc_assert (TREE_CODE (func) == FUNCTION_DECL);
24613 /* Ignore the problem about functions whose address is taken. */
24614 if (TARGET_CALLEE_INTERWORKING && TREE_PUBLIC (func))
24615 return true;
24617 #ifdef ARM_PE
24618 return lookup_attribute ("interfacearm", DECL_ATTRIBUTES (func)) != NULL_TREE;
24619 #else
24620 return false;
24621 #endif
24624 /* Given the stack offsets and register mask in OFFSETS, decide how
24625 many additional registers to push instead of subtracting a constant
24626 from SP. For epilogues the principle is the same except we use pop.
24627 FOR_PROLOGUE indicates which we're generating. */
24628 static int
24629 thumb1_extra_regs_pushed (arm_stack_offsets *offsets, bool for_prologue)
24631 HOST_WIDE_INT amount;
24632 unsigned long live_regs_mask = offsets->saved_regs_mask;
24633 /* Extract a mask of the ones we can give to the Thumb's push/pop
24634 instruction. */
24635 unsigned long l_mask = live_regs_mask & (for_prologue ? 0x40ff : 0xff);
24636 /* Then count how many other high registers will need to be pushed. */
24637 unsigned long high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
24638 int n_free, reg_base, size;
24640 if (!for_prologue && frame_pointer_needed)
24641 amount = offsets->locals_base - offsets->saved_regs;
24642 else
24643 amount = offsets->outgoing_args - offsets->saved_regs;
24645 /* If the stack frame size is 512 exactly, we can save one load
24646 instruction, which should make this a win even when optimizing
24647 for speed. */
24648 if (!optimize_size && amount != 512)
24649 return 0;
24651 /* Can't do this if there are high registers to push. */
24652 if (high_regs_pushed != 0)
24653 return 0;
24655 /* Shouldn't do it in the prologue if no registers would normally
24656 be pushed at all. In the epilogue, also allow it if we'll have
24657 a pop insn for the PC. */
24658 if (l_mask == 0
24659 && (for_prologue
24660 || TARGET_BACKTRACE
24661 || (live_regs_mask & 1 << LR_REGNUM) == 0
24662 || TARGET_INTERWORK
24663 || crtl->args.pretend_args_size != 0))
24664 return 0;
24666 /* Don't do this if thumb_expand_prologue wants to emit instructions
24667 between the push and the stack frame allocation. */
24668 if (for_prologue
24669 && ((flag_pic && arm_pic_register != INVALID_REGNUM)
24670 || (!frame_pointer_needed && CALLER_INTERWORKING_SLOT_SIZE > 0)))
24671 return 0;
24673 reg_base = 0;
24674 n_free = 0;
24675 if (!for_prologue)
24677 size = arm_size_return_regs ();
24678 reg_base = ARM_NUM_INTS (size);
24679 live_regs_mask >>= reg_base;
24682 while (reg_base + n_free < 8 && !(live_regs_mask & 1)
24683 && (for_prologue || call_used_regs[reg_base + n_free]))
24685 live_regs_mask >>= 1;
24686 n_free++;
24689 if (n_free == 0)
24690 return 0;
24691 gcc_assert (amount / 4 * 4 == amount);
24693 if (amount >= 512 && (amount - n_free * 4) < 512)
24694 return (amount - 508) / 4;
24695 if (amount <= n_free * 4)
24696 return amount / 4;
24697 return 0;
24700 /* The bits which aren't usefully expanded as rtl. */
24701 const char *
24702 thumb1_unexpanded_epilogue (void)
24704 arm_stack_offsets *offsets;
24705 int regno;
24706 unsigned long live_regs_mask = 0;
24707 int high_regs_pushed = 0;
24708 int extra_pop;
24709 int had_to_push_lr;
24710 int size;
24712 if (cfun->machine->return_used_this_function != 0)
24713 return "";
24715 if (IS_NAKED (arm_current_func_type ()))
24716 return "";
24718 offsets = arm_get_frame_offsets ();
24719 live_regs_mask = offsets->saved_regs_mask;
24720 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
24722 /* If we can deduce the registers used from the function's return value.
24723 This is more reliable that examining df_regs_ever_live_p () because that
24724 will be set if the register is ever used in the function, not just if
24725 the register is used to hold a return value. */
24726 size = arm_size_return_regs ();
24728 extra_pop = thumb1_extra_regs_pushed (offsets, false);
24729 if (extra_pop > 0)
24731 unsigned long extra_mask = (1 << extra_pop) - 1;
24732 live_regs_mask |= extra_mask << ARM_NUM_INTS (size);
24735 /* The prolog may have pushed some high registers to use as
24736 work registers. e.g. the testsuite file:
24737 gcc/testsuite/gcc/gcc.c-torture/execute/complex-2.c
24738 compiles to produce:
24739 push {r4, r5, r6, r7, lr}
24740 mov r7, r9
24741 mov r6, r8
24742 push {r6, r7}
24743 as part of the prolog. We have to undo that pushing here. */
24745 if (high_regs_pushed)
24747 unsigned long mask = live_regs_mask & 0xff;
24748 int next_hi_reg;
24750 /* The available low registers depend on the size of the value we are
24751 returning. */
24752 if (size <= 12)
24753 mask |= 1 << 3;
24754 if (size <= 8)
24755 mask |= 1 << 2;
24757 if (mask == 0)
24758 /* Oh dear! We have no low registers into which we can pop
24759 high registers! */
24760 internal_error
24761 ("no low registers available for popping high registers");
24763 for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++)
24764 if (live_regs_mask & (1 << next_hi_reg))
24765 break;
24767 while (high_regs_pushed)
24769 /* Find lo register(s) into which the high register(s) can
24770 be popped. */
24771 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
24773 if (mask & (1 << regno))
24774 high_regs_pushed--;
24775 if (high_regs_pushed == 0)
24776 break;
24779 mask &= (2 << regno) - 1; /* A noop if regno == 8 */
24781 /* Pop the values into the low register(s). */
24782 thumb_pop (asm_out_file, mask);
24784 /* Move the value(s) into the high registers. */
24785 for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
24787 if (mask & (1 << regno))
24789 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", next_hi_reg,
24790 regno);
24792 for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++)
24793 if (live_regs_mask & (1 << next_hi_reg))
24794 break;
24798 live_regs_mask &= ~0x0f00;
24801 had_to_push_lr = (live_regs_mask & (1 << LR_REGNUM)) != 0;
24802 live_regs_mask &= 0xff;
24804 if (crtl->args.pretend_args_size == 0 || TARGET_BACKTRACE)
24806 /* Pop the return address into the PC. */
24807 if (had_to_push_lr)
24808 live_regs_mask |= 1 << PC_REGNUM;
24810 /* Either no argument registers were pushed or a backtrace
24811 structure was created which includes an adjusted stack
24812 pointer, so just pop everything. */
24813 if (live_regs_mask)
24814 thumb_pop (asm_out_file, live_regs_mask);
24816 /* We have either just popped the return address into the
24817 PC or it is was kept in LR for the entire function.
24818 Note that thumb_pop has already called thumb_exit if the
24819 PC was in the list. */
24820 if (!had_to_push_lr)
24821 thumb_exit (asm_out_file, LR_REGNUM);
24823 else
24825 /* Pop everything but the return address. */
24826 if (live_regs_mask)
24827 thumb_pop (asm_out_file, live_regs_mask);
24829 if (had_to_push_lr)
24831 if (size > 12)
24833 /* We have no free low regs, so save one. */
24834 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", IP_REGNUM,
24835 LAST_ARG_REGNUM);
24838 /* Get the return address into a temporary register. */
24839 thumb_pop (asm_out_file, 1 << LAST_ARG_REGNUM);
24841 if (size > 12)
24843 /* Move the return address to lr. */
24844 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LR_REGNUM,
24845 LAST_ARG_REGNUM);
24846 /* Restore the low register. */
24847 asm_fprintf (asm_out_file, "\tmov\t%r, %r\n", LAST_ARG_REGNUM,
24848 IP_REGNUM);
24849 regno = LR_REGNUM;
24851 else
24852 regno = LAST_ARG_REGNUM;
24854 else
24855 regno = LR_REGNUM;
24857 /* Remove the argument registers that were pushed onto the stack. */
24858 asm_fprintf (asm_out_file, "\tadd\t%r, %r, #%d\n",
24859 SP_REGNUM, SP_REGNUM,
24860 crtl->args.pretend_args_size);
24862 thumb_exit (asm_out_file, regno);
24865 return "";
24868 /* Functions to save and restore machine-specific function data. */
24869 static struct machine_function *
24870 arm_init_machine_status (void)
24872 struct machine_function *machine;
24873 machine = ggc_cleared_alloc<machine_function> ();
24875 #if ARM_FT_UNKNOWN != 0
24876 machine->func_type = ARM_FT_UNKNOWN;
24877 #endif
24878 return machine;
24881 /* Return an RTX indicating where the return address to the
24882 calling function can be found. */
24884 arm_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
24886 if (count != 0)
24887 return NULL_RTX;
24889 return get_hard_reg_initial_val (Pmode, LR_REGNUM);
24892 /* Do anything needed before RTL is emitted for each function. */
24893 void
24894 arm_init_expanders (void)
24896 /* Arrange to initialize and mark the machine per-function status. */
24897 init_machine_status = arm_init_machine_status;
24899 /* This is to stop the combine pass optimizing away the alignment
24900 adjustment of va_arg. */
24901 /* ??? It is claimed that this should not be necessary. */
24902 if (cfun)
24903 mark_reg_pointer (arg_pointer_rtx, PARM_BOUNDARY);
24906 /* Check that FUNC is called with a different mode. */
24908 bool
24909 arm_change_mode_p (tree func)
24911 if (TREE_CODE (func) != FUNCTION_DECL)
24912 return false;
24914 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (func);
24916 if (!callee_tree)
24917 callee_tree = target_option_default_node;
24919 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
24920 int flags = callee_opts->x_target_flags;
24922 return (TARGET_THUMB_P (flags) != TARGET_THUMB);
24925 /* Like arm_compute_initial_elimination offset. Simpler because there
24926 isn't an ABI specified frame pointer for Thumb. Instead, we set it
24927 to point at the base of the local variables after static stack
24928 space for a function has been allocated. */
24930 HOST_WIDE_INT
24931 thumb_compute_initial_elimination_offset (unsigned int from, unsigned int to)
24933 arm_stack_offsets *offsets;
24935 offsets = arm_get_frame_offsets ();
24937 switch (from)
24939 case ARG_POINTER_REGNUM:
24940 switch (to)
24942 case STACK_POINTER_REGNUM:
24943 return offsets->outgoing_args - offsets->saved_args;
24945 case FRAME_POINTER_REGNUM:
24946 return offsets->soft_frame - offsets->saved_args;
24948 case ARM_HARD_FRAME_POINTER_REGNUM:
24949 return offsets->saved_regs - offsets->saved_args;
24951 case THUMB_HARD_FRAME_POINTER_REGNUM:
24952 return offsets->locals_base - offsets->saved_args;
24954 default:
24955 gcc_unreachable ();
24957 break;
24959 case FRAME_POINTER_REGNUM:
24960 switch (to)
24962 case STACK_POINTER_REGNUM:
24963 return offsets->outgoing_args - offsets->soft_frame;
24965 case ARM_HARD_FRAME_POINTER_REGNUM:
24966 return offsets->saved_regs - offsets->soft_frame;
24968 case THUMB_HARD_FRAME_POINTER_REGNUM:
24969 return offsets->locals_base - offsets->soft_frame;
24971 default:
24972 gcc_unreachable ();
24974 break;
24976 default:
24977 gcc_unreachable ();
24981 /* Generate the function's prologue. */
24983 void
24984 thumb1_expand_prologue (void)
24986 rtx_insn *insn;
24988 HOST_WIDE_INT amount;
24989 HOST_WIDE_INT size;
24990 arm_stack_offsets *offsets;
24991 unsigned long func_type;
24992 int regno;
24993 unsigned long live_regs_mask;
24994 unsigned long l_mask;
24995 unsigned high_regs_pushed = 0;
24996 bool lr_needs_saving;
24998 func_type = arm_current_func_type ();
25000 /* Naked functions don't have prologues. */
25001 if (IS_NAKED (func_type))
25003 if (flag_stack_usage_info)
25004 current_function_static_stack_size = 0;
25005 return;
25008 if (IS_INTERRUPT (func_type))
25010 error ("interrupt Service Routines cannot be coded in Thumb mode");
25011 return;
25014 if (is_called_in_ARM_mode (current_function_decl))
25015 emit_insn (gen_prologue_thumb1_interwork ());
25017 offsets = arm_get_frame_offsets ();
25018 live_regs_mask = offsets->saved_regs_mask;
25019 lr_needs_saving = live_regs_mask & (1 << LR_REGNUM);
25021 /* Extract a mask of the ones we can give to the Thumb's push instruction. */
25022 l_mask = live_regs_mask & 0x40ff;
25023 /* Then count how many other high registers will need to be pushed. */
25024 high_regs_pushed = bit_count (live_regs_mask & 0x0f00);
25026 if (crtl->args.pretend_args_size)
25028 rtx x = GEN_INT (-crtl->args.pretend_args_size);
25030 if (cfun->machine->uses_anonymous_args)
25032 int num_pushes = ARM_NUM_INTS (crtl->args.pretend_args_size);
25033 unsigned long mask;
25035 mask = 1ul << (LAST_ARG_REGNUM + 1);
25036 mask -= 1ul << (LAST_ARG_REGNUM + 1 - num_pushes);
25038 insn = thumb1_emit_multi_reg_push (mask, 0);
25040 else
25042 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
25043 stack_pointer_rtx, x));
25045 RTX_FRAME_RELATED_P (insn) = 1;
25048 if (TARGET_BACKTRACE)
25050 HOST_WIDE_INT offset = 0;
25051 unsigned work_register;
25052 rtx work_reg, x, arm_hfp_rtx;
25054 /* We have been asked to create a stack backtrace structure.
25055 The code looks like this:
25057 0 .align 2
25058 0 func:
25059 0 sub SP, #16 Reserve space for 4 registers.
25060 2 push {R7} Push low registers.
25061 4 add R7, SP, #20 Get the stack pointer before the push.
25062 6 str R7, [SP, #8] Store the stack pointer
25063 (before reserving the space).
25064 8 mov R7, PC Get hold of the start of this code + 12.
25065 10 str R7, [SP, #16] Store it.
25066 12 mov R7, FP Get hold of the current frame pointer.
25067 14 str R7, [SP, #4] Store it.
25068 16 mov R7, LR Get hold of the current return address.
25069 18 str R7, [SP, #12] Store it.
25070 20 add R7, SP, #16 Point at the start of the
25071 backtrace structure.
25072 22 mov FP, R7 Put this value into the frame pointer. */
25074 work_register = thumb_find_work_register (live_regs_mask);
25075 work_reg = gen_rtx_REG (SImode, work_register);
25076 arm_hfp_rtx = gen_rtx_REG (SImode, ARM_HARD_FRAME_POINTER_REGNUM);
25078 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
25079 stack_pointer_rtx, GEN_INT (-16)));
25080 RTX_FRAME_RELATED_P (insn) = 1;
25082 if (l_mask)
25084 insn = thumb1_emit_multi_reg_push (l_mask, l_mask);
25085 RTX_FRAME_RELATED_P (insn) = 1;
25086 lr_needs_saving = false;
25088 offset = bit_count (l_mask) * UNITS_PER_WORD;
25091 x = GEN_INT (offset + 16 + crtl->args.pretend_args_size);
25092 emit_insn (gen_addsi3 (work_reg, stack_pointer_rtx, x));
25094 x = plus_constant (Pmode, stack_pointer_rtx, offset + 4);
25095 x = gen_frame_mem (SImode, x);
25096 emit_move_insn (x, work_reg);
25098 /* Make sure that the instruction fetching the PC is in the right place
25099 to calculate "start of backtrace creation code + 12". */
25100 /* ??? The stores using the common WORK_REG ought to be enough to
25101 prevent the scheduler from doing anything weird. Failing that
25102 we could always move all of the following into an UNSPEC_VOLATILE. */
25103 if (l_mask)
25105 x = gen_rtx_REG (SImode, PC_REGNUM);
25106 emit_move_insn (work_reg, x);
25108 x = plus_constant (Pmode, stack_pointer_rtx, offset + 12);
25109 x = gen_frame_mem (SImode, x);
25110 emit_move_insn (x, work_reg);
25112 emit_move_insn (work_reg, arm_hfp_rtx);
25114 x = plus_constant (Pmode, stack_pointer_rtx, offset);
25115 x = gen_frame_mem (SImode, x);
25116 emit_move_insn (x, work_reg);
25118 else
25120 emit_move_insn (work_reg, arm_hfp_rtx);
25122 x = plus_constant (Pmode, stack_pointer_rtx, offset);
25123 x = gen_frame_mem (SImode, x);
25124 emit_move_insn (x, work_reg);
25126 x = gen_rtx_REG (SImode, PC_REGNUM);
25127 emit_move_insn (work_reg, x);
25129 x = plus_constant (Pmode, stack_pointer_rtx, offset + 12);
25130 x = gen_frame_mem (SImode, x);
25131 emit_move_insn (x, work_reg);
25134 x = gen_rtx_REG (SImode, LR_REGNUM);
25135 emit_move_insn (work_reg, x);
25137 x = plus_constant (Pmode, stack_pointer_rtx, offset + 8);
25138 x = gen_frame_mem (SImode, x);
25139 emit_move_insn (x, work_reg);
25141 x = GEN_INT (offset + 12);
25142 emit_insn (gen_addsi3 (work_reg, stack_pointer_rtx, x));
25144 emit_move_insn (arm_hfp_rtx, work_reg);
25146 /* Optimization: If we are not pushing any low registers but we are going
25147 to push some high registers then delay our first push. This will just
25148 be a push of LR and we can combine it with the push of the first high
25149 register. */
25150 else if ((l_mask & 0xff) != 0
25151 || (high_regs_pushed == 0 && lr_needs_saving))
25153 unsigned long mask = l_mask;
25154 mask |= (1 << thumb1_extra_regs_pushed (offsets, true)) - 1;
25155 insn = thumb1_emit_multi_reg_push (mask, mask);
25156 RTX_FRAME_RELATED_P (insn) = 1;
25157 lr_needs_saving = false;
25160 if (high_regs_pushed)
25162 unsigned pushable_regs;
25163 unsigned next_hi_reg;
25164 unsigned arg_regs_num = TARGET_AAPCS_BASED ? crtl->args.info.aapcs_ncrn
25165 : crtl->args.info.nregs;
25166 unsigned arg_regs_mask = (1 << arg_regs_num) - 1;
25168 for (next_hi_reg = 12; next_hi_reg > LAST_LO_REGNUM; next_hi_reg--)
25169 if (live_regs_mask & (1 << next_hi_reg))
25170 break;
25172 /* Here we need to mask out registers used for passing arguments
25173 even if they can be pushed. This is to avoid using them to stash the high
25174 registers. Such kind of stash may clobber the use of arguments. */
25175 pushable_regs = l_mask & (~arg_regs_mask);
25176 if (lr_needs_saving)
25177 pushable_regs &= ~(1 << LR_REGNUM);
25179 if (pushable_regs == 0)
25180 pushable_regs = 1 << thumb_find_work_register (live_regs_mask);
25182 while (high_regs_pushed > 0)
25184 unsigned long real_regs_mask = 0;
25185 unsigned long push_mask = 0;
25187 for (regno = LR_REGNUM; regno >= 0; regno --)
25189 if (pushable_regs & (1 << regno))
25191 emit_move_insn (gen_rtx_REG (SImode, regno),
25192 gen_rtx_REG (SImode, next_hi_reg));
25194 high_regs_pushed --;
25195 real_regs_mask |= (1 << next_hi_reg);
25196 push_mask |= (1 << regno);
25198 if (high_regs_pushed)
25200 for (next_hi_reg --; next_hi_reg > LAST_LO_REGNUM;
25201 next_hi_reg --)
25202 if (live_regs_mask & (1 << next_hi_reg))
25203 break;
25205 else
25206 break;
25210 /* If we had to find a work register and we have not yet
25211 saved the LR then add it to the list of regs to push. */
25212 if (lr_needs_saving)
25214 push_mask |= 1 << LR_REGNUM;
25215 real_regs_mask |= 1 << LR_REGNUM;
25216 lr_needs_saving = false;
25219 insn = thumb1_emit_multi_reg_push (push_mask, real_regs_mask);
25220 RTX_FRAME_RELATED_P (insn) = 1;
25224 /* Load the pic register before setting the frame pointer,
25225 so we can use r7 as a temporary work register. */
25226 if (flag_pic && arm_pic_register != INVALID_REGNUM)
25227 arm_load_pic_register (live_regs_mask);
25229 if (!frame_pointer_needed && CALLER_INTERWORKING_SLOT_SIZE > 0)
25230 emit_move_insn (gen_rtx_REG (Pmode, ARM_HARD_FRAME_POINTER_REGNUM),
25231 stack_pointer_rtx);
25233 size = offsets->outgoing_args - offsets->saved_args;
25234 if (flag_stack_usage_info)
25235 current_function_static_stack_size = size;
25237 /* If we have a frame, then do stack checking. FIXME: not implemented. */
25238 if ((flag_stack_check == STATIC_BUILTIN_STACK_CHECK
25239 || flag_stack_clash_protection)
25240 && size)
25241 sorry ("-fstack-check=specific for Thumb-1");
25243 amount = offsets->outgoing_args - offsets->saved_regs;
25244 amount -= 4 * thumb1_extra_regs_pushed (offsets, true);
25245 if (amount)
25247 if (amount < 512)
25249 insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
25250 GEN_INT (- amount)));
25251 RTX_FRAME_RELATED_P (insn) = 1;
25253 else
25255 rtx reg, dwarf;
25257 /* The stack decrement is too big for an immediate value in a single
25258 insn. In theory we could issue multiple subtracts, but after
25259 three of them it becomes more space efficient to place the full
25260 value in the constant pool and load into a register. (Also the
25261 ARM debugger really likes to see only one stack decrement per
25262 function). So instead we look for a scratch register into which
25263 we can load the decrement, and then we subtract this from the
25264 stack pointer. Unfortunately on the thumb the only available
25265 scratch registers are the argument registers, and we cannot use
25266 these as they may hold arguments to the function. Instead we
25267 attempt to locate a call preserved register which is used by this
25268 function. If we can find one, then we know that it will have
25269 been pushed at the start of the prologue and so we can corrupt
25270 it now. */
25271 for (regno = LAST_ARG_REGNUM + 1; regno <= LAST_LO_REGNUM; regno++)
25272 if (live_regs_mask & (1 << regno))
25273 break;
25275 gcc_assert(regno <= LAST_LO_REGNUM);
25277 reg = gen_rtx_REG (SImode, regno);
25279 emit_insn (gen_movsi (reg, GEN_INT (- amount)));
25281 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
25282 stack_pointer_rtx, reg));
25284 dwarf = gen_rtx_SET (stack_pointer_rtx,
25285 plus_constant (Pmode, stack_pointer_rtx,
25286 -amount));
25287 add_reg_note (insn, REG_FRAME_RELATED_EXPR, dwarf);
25288 RTX_FRAME_RELATED_P (insn) = 1;
25292 if (frame_pointer_needed)
25293 thumb_set_frame_pointer (offsets);
25295 /* If we are profiling, make sure no instructions are scheduled before
25296 the call to mcount. Similarly if the user has requested no
25297 scheduling in the prolog. Similarly if we want non-call exceptions
25298 using the EABI unwinder, to prevent faulting instructions from being
25299 swapped with a stack adjustment. */
25300 if (crtl->profile || !TARGET_SCHED_PROLOG
25301 || (arm_except_unwind_info (&global_options) == UI_TARGET
25302 && cfun->can_throw_non_call_exceptions))
25303 emit_insn (gen_blockage ());
25305 cfun->machine->lr_save_eliminated = !thumb_force_lr_save ();
25306 if (live_regs_mask & 0xff)
25307 cfun->machine->lr_save_eliminated = 0;
25310 /* Clear caller saved registers not used to pass return values and leaked
25311 condition flags before exiting a cmse_nonsecure_entry function. */
25313 void
25314 cmse_nonsecure_entry_clear_before_return (void)
25316 int regno, maxregno = TARGET_HARD_FLOAT ? LAST_VFP_REGNUM : IP_REGNUM;
25317 uint32_t padding_bits_to_clear = 0;
25318 auto_sbitmap to_clear_bitmap (maxregno + 1);
25319 rtx r1_reg, result_rtl, clearing_reg = NULL_RTX;
25320 tree result_type;
25322 bitmap_clear (to_clear_bitmap);
25323 bitmap_set_range (to_clear_bitmap, R0_REGNUM, NUM_ARG_REGS);
25324 bitmap_set_bit (to_clear_bitmap, IP_REGNUM);
25326 /* If we are not dealing with -mfloat-abi=soft we will need to clear VFP
25327 registers. */
25328 if (TARGET_HARD_FLOAT)
25330 int float_bits = D7_VFP_REGNUM - FIRST_VFP_REGNUM + 1;
25332 bitmap_set_range (to_clear_bitmap, FIRST_VFP_REGNUM, float_bits);
25334 /* Make sure we don't clear the two scratch registers used to clear the
25335 relevant FPSCR bits in output_return_instruction. */
25336 emit_use (gen_rtx_REG (SImode, IP_REGNUM));
25337 bitmap_clear_bit (to_clear_bitmap, IP_REGNUM);
25338 emit_use (gen_rtx_REG (SImode, 4));
25339 bitmap_clear_bit (to_clear_bitmap, 4);
25342 /* If the user has defined registers to be caller saved, these are no longer
25343 restored by the function before returning and must thus be cleared for
25344 security purposes. */
25345 for (regno = NUM_ARG_REGS; regno <= maxregno; regno++)
25347 /* We do not touch registers that can be used to pass arguments as per
25348 the AAPCS, since these should never be made callee-saved by user
25349 options. */
25350 if (IN_RANGE (regno, FIRST_VFP_REGNUM, D7_VFP_REGNUM))
25351 continue;
25352 if (IN_RANGE (regno, IP_REGNUM, PC_REGNUM))
25353 continue;
25354 if (call_used_regs[regno])
25355 bitmap_set_bit (to_clear_bitmap, regno);
25358 /* Make sure we do not clear the registers used to return the result in. */
25359 result_type = TREE_TYPE (DECL_RESULT (current_function_decl));
25360 if (!VOID_TYPE_P (result_type))
25362 uint64_t to_clear_return_mask;
25363 result_rtl = arm_function_value (result_type, current_function_decl, 0);
25365 /* No need to check that we return in registers, because we don't
25366 support returning on stack yet. */
25367 gcc_assert (REG_P (result_rtl));
25368 to_clear_return_mask
25369 = compute_not_to_clear_mask (result_type, result_rtl, 0,
25370 &padding_bits_to_clear);
25371 if (to_clear_return_mask)
25373 gcc_assert ((unsigned) maxregno < sizeof (long long) * __CHAR_BIT__);
25374 for (regno = R0_REGNUM; regno <= maxregno; regno++)
25376 if (to_clear_return_mask & (1ULL << regno))
25377 bitmap_clear_bit (to_clear_bitmap, regno);
25382 if (padding_bits_to_clear != 0)
25384 int to_clear_bitmap_size = SBITMAP_SIZE ((sbitmap) to_clear_bitmap);
25385 auto_sbitmap to_clear_arg_regs_bitmap (to_clear_bitmap_size);
25387 /* Padding_bits_to_clear is not 0 so we know we are dealing with
25388 returning a composite type, which only uses r0. Let's make sure that
25389 r1-r3 is cleared too. */
25390 bitmap_clear (to_clear_arg_regs_bitmap);
25391 bitmap_set_range (to_clear_arg_regs_bitmap, R1_REGNUM, NUM_ARG_REGS - 1);
25392 gcc_assert (bitmap_subset_p (to_clear_arg_regs_bitmap, to_clear_bitmap));
25395 /* Clear full registers that leak before returning. */
25396 clearing_reg = gen_rtx_REG (SImode, TARGET_THUMB1 ? R0_REGNUM : LR_REGNUM);
25397 r1_reg = gen_rtx_REG (SImode, R0_REGNUM + 1);
25398 cmse_clear_registers (to_clear_bitmap, &padding_bits_to_clear, 1, r1_reg,
25399 clearing_reg);
25402 /* Generate pattern *pop_multiple_with_stack_update_and_return if single
25403 POP instruction can be generated. LR should be replaced by PC. All
25404 the checks required are already done by USE_RETURN_INSN (). Hence,
25405 all we really need to check here is if single register is to be
25406 returned, or multiple register return. */
25407 void
25408 thumb2_expand_return (bool simple_return)
25410 int i, num_regs;
25411 unsigned long saved_regs_mask;
25412 arm_stack_offsets *offsets;
25414 offsets = arm_get_frame_offsets ();
25415 saved_regs_mask = offsets->saved_regs_mask;
25417 for (i = 0, num_regs = 0; i <= LAST_ARM_REGNUM; i++)
25418 if (saved_regs_mask & (1 << i))
25419 num_regs++;
25421 if (!simple_return && saved_regs_mask)
25423 /* TODO: Verify that this path is never taken for cmse_nonsecure_entry
25424 functions or adapt code to handle according to ACLE. This path should
25425 not be reachable for cmse_nonsecure_entry functions though we prefer
25426 to assert it for now to ensure that future code changes do not silently
25427 change this behavior. */
25428 gcc_assert (!IS_CMSE_ENTRY (arm_current_func_type ()));
25429 if (num_regs == 1)
25431 rtx par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (2));
25432 rtx reg = gen_rtx_REG (SImode, PC_REGNUM);
25433 rtx addr = gen_rtx_MEM (SImode,
25434 gen_rtx_POST_INC (SImode,
25435 stack_pointer_rtx));
25436 set_mem_alias_set (addr, get_frame_alias_set ());
25437 XVECEXP (par, 0, 0) = ret_rtx;
25438 XVECEXP (par, 0, 1) = gen_rtx_SET (reg, addr);
25439 RTX_FRAME_RELATED_P (XVECEXP (par, 0, 1)) = 1;
25440 emit_jump_insn (par);
25442 else
25444 saved_regs_mask &= ~ (1 << LR_REGNUM);
25445 saved_regs_mask |= (1 << PC_REGNUM);
25446 arm_emit_multi_reg_pop (saved_regs_mask);
25449 else
25451 if (IS_CMSE_ENTRY (arm_current_func_type ()))
25452 cmse_nonsecure_entry_clear_before_return ();
25453 emit_jump_insn (simple_return_rtx);
25457 void
25458 thumb1_expand_epilogue (void)
25460 HOST_WIDE_INT amount;
25461 arm_stack_offsets *offsets;
25462 int regno;
25464 /* Naked functions don't have prologues. */
25465 if (IS_NAKED (arm_current_func_type ()))
25466 return;
25468 offsets = arm_get_frame_offsets ();
25469 amount = offsets->outgoing_args - offsets->saved_regs;
25471 if (frame_pointer_needed)
25473 emit_insn (gen_movsi (stack_pointer_rtx, hard_frame_pointer_rtx));
25474 amount = offsets->locals_base - offsets->saved_regs;
25476 amount -= 4 * thumb1_extra_regs_pushed (offsets, false);
25478 gcc_assert (amount >= 0);
25479 if (amount)
25481 emit_insn (gen_blockage ());
25483 if (amount < 512)
25484 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
25485 GEN_INT (amount)));
25486 else
25488 /* r3 is always free in the epilogue. */
25489 rtx reg = gen_rtx_REG (SImode, LAST_ARG_REGNUM);
25491 emit_insn (gen_movsi (reg, GEN_INT (amount)));
25492 emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx, reg));
25496 /* Emit a USE (stack_pointer_rtx), so that
25497 the stack adjustment will not be deleted. */
25498 emit_insn (gen_force_register_use (stack_pointer_rtx));
25500 if (crtl->profile || !TARGET_SCHED_PROLOG)
25501 emit_insn (gen_blockage ());
25503 /* Emit a clobber for each insn that will be restored in the epilogue,
25504 so that flow2 will get register lifetimes correct. */
25505 for (regno = 0; regno < 13; regno++)
25506 if (df_regs_ever_live_p (regno) && !call_used_regs[regno])
25507 emit_clobber (gen_rtx_REG (SImode, regno));
25509 if (! df_regs_ever_live_p (LR_REGNUM))
25510 emit_use (gen_rtx_REG (SImode, LR_REGNUM));
25512 /* Clear all caller-saved regs that are not used to return. */
25513 if (IS_CMSE_ENTRY (arm_current_func_type ()))
25514 cmse_nonsecure_entry_clear_before_return ();
25517 /* Epilogue code for APCS frame. */
25518 static void
25519 arm_expand_epilogue_apcs_frame (bool really_return)
25521 unsigned long func_type;
25522 unsigned long saved_regs_mask;
25523 int num_regs = 0;
25524 int i;
25525 int floats_from_frame = 0;
25526 arm_stack_offsets *offsets;
25528 gcc_assert (TARGET_APCS_FRAME && frame_pointer_needed && TARGET_ARM);
25529 func_type = arm_current_func_type ();
25531 /* Get frame offsets for ARM. */
25532 offsets = arm_get_frame_offsets ();
25533 saved_regs_mask = offsets->saved_regs_mask;
25535 /* Find the offset of the floating-point save area in the frame. */
25536 floats_from_frame
25537 = (offsets->saved_args
25538 + arm_compute_static_chain_stack_bytes ()
25539 - offsets->frame);
25541 /* Compute how many core registers saved and how far away the floats are. */
25542 for (i = 0; i <= LAST_ARM_REGNUM; i++)
25543 if (saved_regs_mask & (1 << i))
25545 num_regs++;
25546 floats_from_frame += 4;
25549 if (TARGET_HARD_FLOAT)
25551 int start_reg;
25552 rtx ip_rtx = gen_rtx_REG (SImode, IP_REGNUM);
25554 /* The offset is from IP_REGNUM. */
25555 int saved_size = arm_get_vfp_saved_size ();
25556 if (saved_size > 0)
25558 rtx_insn *insn;
25559 floats_from_frame += saved_size;
25560 insn = emit_insn (gen_addsi3 (ip_rtx,
25561 hard_frame_pointer_rtx,
25562 GEN_INT (-floats_from_frame)));
25563 arm_add_cfa_adjust_cfa_note (insn, -floats_from_frame,
25564 ip_rtx, hard_frame_pointer_rtx);
25567 /* Generate VFP register multi-pop. */
25568 start_reg = FIRST_VFP_REGNUM;
25570 for (i = FIRST_VFP_REGNUM; i < LAST_VFP_REGNUM; i += 2)
25571 /* Look for a case where a reg does not need restoring. */
25572 if ((!df_regs_ever_live_p (i) || call_used_regs[i])
25573 && (!df_regs_ever_live_p (i + 1)
25574 || call_used_regs[i + 1]))
25576 if (start_reg != i)
25577 arm_emit_vfp_multi_reg_pop (start_reg,
25578 (i - start_reg) / 2,
25579 gen_rtx_REG (SImode,
25580 IP_REGNUM));
25581 start_reg = i + 2;
25584 /* Restore the remaining regs that we have discovered (or possibly
25585 even all of them, if the conditional in the for loop never
25586 fired). */
25587 if (start_reg != i)
25588 arm_emit_vfp_multi_reg_pop (start_reg,
25589 (i - start_reg) / 2,
25590 gen_rtx_REG (SImode, IP_REGNUM));
25593 if (TARGET_IWMMXT)
25595 /* The frame pointer is guaranteed to be non-double-word aligned, as
25596 it is set to double-word-aligned old_stack_pointer - 4. */
25597 rtx_insn *insn;
25598 int lrm_count = (num_regs % 2) ? (num_regs + 2) : (num_regs + 1);
25600 for (i = LAST_IWMMXT_REGNUM; i >= FIRST_IWMMXT_REGNUM; i--)
25601 if (df_regs_ever_live_p (i) && !call_used_regs[i])
25603 rtx addr = gen_frame_mem (V2SImode,
25604 plus_constant (Pmode, hard_frame_pointer_rtx,
25605 - lrm_count * 4));
25606 insn = emit_insn (gen_movsi (gen_rtx_REG (V2SImode, i), addr));
25607 REG_NOTES (insn) = alloc_reg_note (REG_CFA_RESTORE,
25608 gen_rtx_REG (V2SImode, i),
25609 NULL_RTX);
25610 lrm_count += 2;
25614 /* saved_regs_mask should contain IP which contains old stack pointer
25615 at the time of activation creation. Since SP and IP are adjacent registers,
25616 we can restore the value directly into SP. */
25617 gcc_assert (saved_regs_mask & (1 << IP_REGNUM));
25618 saved_regs_mask &= ~(1 << IP_REGNUM);
25619 saved_regs_mask |= (1 << SP_REGNUM);
25621 /* There are two registers left in saved_regs_mask - LR and PC. We
25622 only need to restore LR (the return address), but to
25623 save time we can load it directly into PC, unless we need a
25624 special function exit sequence, or we are not really returning. */
25625 if (really_return
25626 && ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL
25627 && !crtl->calls_eh_return)
25628 /* Delete LR from the register mask, so that LR on
25629 the stack is loaded into the PC in the register mask. */
25630 saved_regs_mask &= ~(1 << LR_REGNUM);
25631 else
25632 saved_regs_mask &= ~(1 << PC_REGNUM);
25634 num_regs = bit_count (saved_regs_mask);
25635 if ((offsets->outgoing_args != (1 + num_regs)) || cfun->calls_alloca)
25637 rtx_insn *insn;
25638 emit_insn (gen_blockage ());
25639 /* Unwind the stack to just below the saved registers. */
25640 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
25641 hard_frame_pointer_rtx,
25642 GEN_INT (- 4 * num_regs)));
25644 arm_add_cfa_adjust_cfa_note (insn, - 4 * num_regs,
25645 stack_pointer_rtx, hard_frame_pointer_rtx);
25648 arm_emit_multi_reg_pop (saved_regs_mask);
25650 if (IS_INTERRUPT (func_type))
25652 /* Interrupt handlers will have pushed the
25653 IP onto the stack, so restore it now. */
25654 rtx_insn *insn;
25655 rtx addr = gen_rtx_MEM (SImode,
25656 gen_rtx_POST_INC (SImode,
25657 stack_pointer_rtx));
25658 set_mem_alias_set (addr, get_frame_alias_set ());
25659 insn = emit_insn (gen_movsi (gen_rtx_REG (SImode, IP_REGNUM), addr));
25660 REG_NOTES (insn) = alloc_reg_note (REG_CFA_RESTORE,
25661 gen_rtx_REG (SImode, IP_REGNUM),
25662 NULL_RTX);
25665 if (!really_return || (saved_regs_mask & (1 << PC_REGNUM)))
25666 return;
25668 if (crtl->calls_eh_return)
25669 emit_insn (gen_addsi3 (stack_pointer_rtx,
25670 stack_pointer_rtx,
25671 gen_rtx_REG (SImode, ARM_EH_STACKADJ_REGNUM)));
25673 if (IS_STACKALIGN (func_type))
25674 /* Restore the original stack pointer. Before prologue, the stack was
25675 realigned and the original stack pointer saved in r0. For details,
25676 see comment in arm_expand_prologue. */
25677 emit_insn (gen_movsi (stack_pointer_rtx, gen_rtx_REG (SImode, R0_REGNUM)));
25679 emit_jump_insn (simple_return_rtx);
25682 /* Generate RTL to represent ARM epilogue. Really_return is true if the
25683 function is not a sibcall. */
25684 void
25685 arm_expand_epilogue (bool really_return)
25687 unsigned long func_type;
25688 unsigned long saved_regs_mask;
25689 int num_regs = 0;
25690 int i;
25691 int amount;
25692 arm_stack_offsets *offsets;
25694 func_type = arm_current_func_type ();
25696 /* Naked functions don't have epilogue. Hence, generate return pattern, and
25697 let output_return_instruction take care of instruction emission if any. */
25698 if (IS_NAKED (func_type)
25699 || (IS_VOLATILE (func_type) && TARGET_ABORT_NORETURN))
25701 if (really_return)
25702 emit_jump_insn (simple_return_rtx);
25703 return;
25706 /* If we are throwing an exception, then we really must be doing a
25707 return, so we can't tail-call. */
25708 gcc_assert (!crtl->calls_eh_return || really_return);
25710 if (TARGET_APCS_FRAME && frame_pointer_needed && TARGET_ARM)
25712 arm_expand_epilogue_apcs_frame (really_return);
25713 return;
25716 /* Get frame offsets for ARM. */
25717 offsets = arm_get_frame_offsets ();
25718 saved_regs_mask = offsets->saved_regs_mask;
25719 num_regs = bit_count (saved_regs_mask);
25721 if (frame_pointer_needed)
25723 rtx_insn *insn;
25724 /* Restore stack pointer if necessary. */
25725 if (TARGET_ARM)
25727 /* In ARM mode, frame pointer points to first saved register.
25728 Restore stack pointer to last saved register. */
25729 amount = offsets->frame - offsets->saved_regs;
25731 /* Force out any pending memory operations that reference stacked data
25732 before stack de-allocation occurs. */
25733 emit_insn (gen_blockage ());
25734 insn = emit_insn (gen_addsi3 (stack_pointer_rtx,
25735 hard_frame_pointer_rtx,
25736 GEN_INT (amount)));
25737 arm_add_cfa_adjust_cfa_note (insn, amount,
25738 stack_pointer_rtx,
25739 hard_frame_pointer_rtx);
25741 /* Emit USE(stack_pointer_rtx) to ensure that stack adjustment is not
25742 deleted. */
25743 emit_insn (gen_force_register_use (stack_pointer_rtx));
25745 else
25747 /* In Thumb-2 mode, the frame pointer points to the last saved
25748 register. */
25749 amount = offsets->locals_base - offsets->saved_regs;
25750 if (amount)
25752 insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx,
25753 hard_frame_pointer_rtx,
25754 GEN_INT (amount)));
25755 arm_add_cfa_adjust_cfa_note (insn, amount,
25756 hard_frame_pointer_rtx,
25757 hard_frame_pointer_rtx);
25760 /* Force out any pending memory operations that reference stacked data
25761 before stack de-allocation occurs. */
25762 emit_insn (gen_blockage ());
25763 insn = emit_insn (gen_movsi (stack_pointer_rtx,
25764 hard_frame_pointer_rtx));
25765 arm_add_cfa_adjust_cfa_note (insn, 0,
25766 stack_pointer_rtx,
25767 hard_frame_pointer_rtx);
25768 /* Emit USE(stack_pointer_rtx) to ensure that stack adjustment is not
25769 deleted. */
25770 emit_insn (gen_force_register_use (stack_pointer_rtx));
25773 else
25775 /* Pop off outgoing args and local frame to adjust stack pointer to
25776 last saved register. */
25777 amount = offsets->outgoing_args - offsets->saved_regs;
25778 if (amount)
25780 rtx_insn *tmp;
25781 /* Force out any pending memory operations that reference stacked data
25782 before stack de-allocation occurs. */
25783 emit_insn (gen_blockage ());
25784 tmp = emit_insn (gen_addsi3 (stack_pointer_rtx,
25785 stack_pointer_rtx,
25786 GEN_INT (amount)));
25787 arm_add_cfa_adjust_cfa_note (tmp, amount,
25788 stack_pointer_rtx, stack_pointer_rtx);
25789 /* Emit USE(stack_pointer_rtx) to ensure that stack adjustment is
25790 not deleted. */
25791 emit_insn (gen_force_register_use (stack_pointer_rtx));
25795 if (TARGET_HARD_FLOAT)
25797 /* Generate VFP register multi-pop. */
25798 int end_reg = LAST_VFP_REGNUM + 1;
25800 /* Scan the registers in reverse order. We need to match
25801 any groupings made in the prologue and generate matching
25802 vldm operations. The need to match groups is because,
25803 unlike pop, vldm can only do consecutive regs. */
25804 for (i = LAST_VFP_REGNUM - 1; i >= FIRST_VFP_REGNUM; i -= 2)
25805 /* Look for a case where a reg does not need restoring. */
25806 if ((!df_regs_ever_live_p (i) || call_used_regs[i])
25807 && (!df_regs_ever_live_p (i + 1)
25808 || call_used_regs[i + 1]))
25810 /* Restore the regs discovered so far (from reg+2 to
25811 end_reg). */
25812 if (end_reg > i + 2)
25813 arm_emit_vfp_multi_reg_pop (i + 2,
25814 (end_reg - (i + 2)) / 2,
25815 stack_pointer_rtx);
25816 end_reg = i;
25819 /* Restore the remaining regs that we have discovered (or possibly
25820 even all of them, if the conditional in the for loop never
25821 fired). */
25822 if (end_reg > i + 2)
25823 arm_emit_vfp_multi_reg_pop (i + 2,
25824 (end_reg - (i + 2)) / 2,
25825 stack_pointer_rtx);
25828 if (TARGET_IWMMXT)
25829 for (i = FIRST_IWMMXT_REGNUM; i <= LAST_IWMMXT_REGNUM; i++)
25830 if (df_regs_ever_live_p (i) && !call_used_regs[i])
25832 rtx_insn *insn;
25833 rtx addr = gen_rtx_MEM (V2SImode,
25834 gen_rtx_POST_INC (SImode,
25835 stack_pointer_rtx));
25836 set_mem_alias_set (addr, get_frame_alias_set ());
25837 insn = emit_insn (gen_movsi (gen_rtx_REG (V2SImode, i), addr));
25838 REG_NOTES (insn) = alloc_reg_note (REG_CFA_RESTORE,
25839 gen_rtx_REG (V2SImode, i),
25840 NULL_RTX);
25841 arm_add_cfa_adjust_cfa_note (insn, UNITS_PER_WORD,
25842 stack_pointer_rtx, stack_pointer_rtx);
25845 if (saved_regs_mask)
25847 rtx insn;
25848 bool return_in_pc = false;
25850 if (ARM_FUNC_TYPE (func_type) != ARM_FT_INTERWORKED
25851 && (TARGET_ARM || ARM_FUNC_TYPE (func_type) == ARM_FT_NORMAL)
25852 && !IS_CMSE_ENTRY (func_type)
25853 && !IS_STACKALIGN (func_type)
25854 && really_return
25855 && crtl->args.pretend_args_size == 0
25856 && saved_regs_mask & (1 << LR_REGNUM)
25857 && !crtl->calls_eh_return)
25859 saved_regs_mask &= ~(1 << LR_REGNUM);
25860 saved_regs_mask |= (1 << PC_REGNUM);
25861 return_in_pc = true;
25864 if (num_regs == 1 && (!IS_INTERRUPT (func_type) || !return_in_pc))
25866 for (i = 0; i <= LAST_ARM_REGNUM; i++)
25867 if (saved_regs_mask & (1 << i))
25869 rtx addr = gen_rtx_MEM (SImode,
25870 gen_rtx_POST_INC (SImode,
25871 stack_pointer_rtx));
25872 set_mem_alias_set (addr, get_frame_alias_set ());
25874 if (i == PC_REGNUM)
25876 insn = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (2));
25877 XVECEXP (insn, 0, 0) = ret_rtx;
25878 XVECEXP (insn, 0, 1) = gen_rtx_SET (gen_rtx_REG (SImode, i),
25879 addr);
25880 RTX_FRAME_RELATED_P (XVECEXP (insn, 0, 1)) = 1;
25881 insn = emit_jump_insn (insn);
25883 else
25885 insn = emit_insn (gen_movsi (gen_rtx_REG (SImode, i),
25886 addr));
25887 REG_NOTES (insn) = alloc_reg_note (REG_CFA_RESTORE,
25888 gen_rtx_REG (SImode, i),
25889 NULL_RTX);
25890 arm_add_cfa_adjust_cfa_note (insn, UNITS_PER_WORD,
25891 stack_pointer_rtx,
25892 stack_pointer_rtx);
25896 else
25898 if (TARGET_LDRD
25899 && current_tune->prefer_ldrd_strd
25900 && !optimize_function_for_size_p (cfun))
25902 if (TARGET_THUMB2)
25903 thumb2_emit_ldrd_pop (saved_regs_mask);
25904 else if (TARGET_ARM && !IS_INTERRUPT (func_type))
25905 arm_emit_ldrd_pop (saved_regs_mask);
25906 else
25907 arm_emit_multi_reg_pop (saved_regs_mask);
25909 else
25910 arm_emit_multi_reg_pop (saved_regs_mask);
25913 if (return_in_pc)
25914 return;
25917 amount
25918 = crtl->args.pretend_args_size + arm_compute_static_chain_stack_bytes();
25919 if (amount)
25921 int i, j;
25922 rtx dwarf = NULL_RTX;
25923 rtx_insn *tmp =
25924 emit_insn (gen_addsi3 (stack_pointer_rtx,
25925 stack_pointer_rtx,
25926 GEN_INT (amount)));
25928 RTX_FRAME_RELATED_P (tmp) = 1;
25930 if (cfun->machine->uses_anonymous_args)
25932 /* Restore pretend args. Refer arm_expand_prologue on how to save
25933 pretend_args in stack. */
25934 int num_regs = crtl->args.pretend_args_size / 4;
25935 saved_regs_mask = (0xf0 >> num_regs) & 0xf;
25936 for (j = 0, i = 0; j < num_regs; i++)
25937 if (saved_regs_mask & (1 << i))
25939 rtx reg = gen_rtx_REG (SImode, i);
25940 dwarf = alloc_reg_note (REG_CFA_RESTORE, reg, dwarf);
25941 j++;
25943 REG_NOTES (tmp) = dwarf;
25945 arm_add_cfa_adjust_cfa_note (tmp, amount,
25946 stack_pointer_rtx, stack_pointer_rtx);
25949 /* Clear all caller-saved regs that are not used to return. */
25950 if (IS_CMSE_ENTRY (arm_current_func_type ()))
25952 /* CMSE_ENTRY always returns. */
25953 gcc_assert (really_return);
25954 cmse_nonsecure_entry_clear_before_return ();
25957 if (!really_return)
25958 return;
25960 if (crtl->calls_eh_return)
25961 emit_insn (gen_addsi3 (stack_pointer_rtx,
25962 stack_pointer_rtx,
25963 gen_rtx_REG (SImode, ARM_EH_STACKADJ_REGNUM)));
25965 if (IS_STACKALIGN (func_type))
25966 /* Restore the original stack pointer. Before prologue, the stack was
25967 realigned and the original stack pointer saved in r0. For details,
25968 see comment in arm_expand_prologue. */
25969 emit_insn (gen_movsi (stack_pointer_rtx, gen_rtx_REG (SImode, R0_REGNUM)));
25971 emit_jump_insn (simple_return_rtx);
25974 /* Implementation of insn prologue_thumb1_interwork. This is the first
25975 "instruction" of a function called in ARM mode. Swap to thumb mode. */
25977 const char *
25978 thumb1_output_interwork (void)
25980 const char * name;
25981 FILE *f = asm_out_file;
25983 gcc_assert (MEM_P (DECL_RTL (current_function_decl)));
25984 gcc_assert (GET_CODE (XEXP (DECL_RTL (current_function_decl), 0))
25985 == SYMBOL_REF);
25986 name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
25988 /* Generate code sequence to switch us into Thumb mode. */
25989 /* The .code 32 directive has already been emitted by
25990 ASM_DECLARE_FUNCTION_NAME. */
25991 asm_fprintf (f, "\torr\t%r, %r, #1\n", IP_REGNUM, PC_REGNUM);
25992 asm_fprintf (f, "\tbx\t%r\n", IP_REGNUM);
25994 /* Generate a label, so that the debugger will notice the
25995 change in instruction sets. This label is also used by
25996 the assembler to bypass the ARM code when this function
25997 is called from a Thumb encoded function elsewhere in the
25998 same file. Hence the definition of STUB_NAME here must
25999 agree with the definition in gas/config/tc-arm.c. */
26001 #define STUB_NAME ".real_start_of"
26003 fprintf (f, "\t.code\t16\n");
26004 #ifdef ARM_PE
26005 if (arm_dllexport_name_p (name))
26006 name = arm_strip_name_encoding (name);
26007 #endif
26008 asm_fprintf (f, "\t.globl %s%U%s\n", STUB_NAME, name);
26009 fprintf (f, "\t.thumb_func\n");
26010 asm_fprintf (f, "%s%U%s:\n", STUB_NAME, name);
26012 return "";
26015 /* Handle the case of a double word load into a low register from
26016 a computed memory address. The computed address may involve a
26017 register which is overwritten by the load. */
26018 const char *
26019 thumb_load_double_from_address (rtx *operands)
26021 rtx addr;
26022 rtx base;
26023 rtx offset;
26024 rtx arg1;
26025 rtx arg2;
26027 gcc_assert (REG_P (operands[0]));
26028 gcc_assert (MEM_P (operands[1]));
26030 /* Get the memory address. */
26031 addr = XEXP (operands[1], 0);
26033 /* Work out how the memory address is computed. */
26034 switch (GET_CODE (addr))
26036 case REG:
26037 operands[2] = adjust_address (operands[1], SImode, 4);
26039 if (REGNO (operands[0]) == REGNO (addr))
26041 output_asm_insn ("ldr\t%H0, %2", operands);
26042 output_asm_insn ("ldr\t%0, %1", operands);
26044 else
26046 output_asm_insn ("ldr\t%0, %1", operands);
26047 output_asm_insn ("ldr\t%H0, %2", operands);
26049 break;
26051 case CONST:
26052 /* Compute <address> + 4 for the high order load. */
26053 operands[2] = adjust_address (operands[1], SImode, 4);
26055 output_asm_insn ("ldr\t%0, %1", operands);
26056 output_asm_insn ("ldr\t%H0, %2", operands);
26057 break;
26059 case PLUS:
26060 arg1 = XEXP (addr, 0);
26061 arg2 = XEXP (addr, 1);
26063 if (CONSTANT_P (arg1))
26064 base = arg2, offset = arg1;
26065 else
26066 base = arg1, offset = arg2;
26068 gcc_assert (REG_P (base));
26070 /* Catch the case of <address> = <reg> + <reg> */
26071 if (REG_P (offset))
26073 int reg_offset = REGNO (offset);
26074 int reg_base = REGNO (base);
26075 int reg_dest = REGNO (operands[0]);
26077 /* Add the base and offset registers together into the
26078 higher destination register. */
26079 asm_fprintf (asm_out_file, "\tadd\t%r, %r, %r",
26080 reg_dest + 1, reg_base, reg_offset);
26082 /* Load the lower destination register from the address in
26083 the higher destination register. */
26084 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #0]",
26085 reg_dest, reg_dest + 1);
26087 /* Load the higher destination register from its own address
26088 plus 4. */
26089 asm_fprintf (asm_out_file, "\tldr\t%r, [%r, #4]",
26090 reg_dest + 1, reg_dest + 1);
26092 else
26094 /* Compute <address> + 4 for the high order load. */
26095 operands[2] = adjust_address (operands[1], SImode, 4);
26097 /* If the computed address is held in the low order register
26098 then load the high order register first, otherwise always
26099 load the low order register first. */
26100 if (REGNO (operands[0]) == REGNO (base))
26102 output_asm_insn ("ldr\t%H0, %2", operands);
26103 output_asm_insn ("ldr\t%0, %1", operands);
26105 else
26107 output_asm_insn ("ldr\t%0, %1", operands);
26108 output_asm_insn ("ldr\t%H0, %2", operands);
26111 break;
26113 case LABEL_REF:
26114 /* With no registers to worry about we can just load the value
26115 directly. */
26116 operands[2] = adjust_address (operands[1], SImode, 4);
26118 output_asm_insn ("ldr\t%H0, %2", operands);
26119 output_asm_insn ("ldr\t%0, %1", operands);
26120 break;
26122 default:
26123 gcc_unreachable ();
26126 return "";
26129 const char *
26130 thumb_output_move_mem_multiple (int n, rtx *operands)
26132 switch (n)
26134 case 2:
26135 if (REGNO (operands[4]) > REGNO (operands[5]))
26136 std::swap (operands[4], operands[5]);
26138 output_asm_insn ("ldmia\t%1!, {%4, %5}", operands);
26139 output_asm_insn ("stmia\t%0!, {%4, %5}", operands);
26140 break;
26142 case 3:
26143 if (REGNO (operands[4]) > REGNO (operands[5]))
26144 std::swap (operands[4], operands[5]);
26145 if (REGNO (operands[5]) > REGNO (operands[6]))
26146 std::swap (operands[5], operands[6]);
26147 if (REGNO (operands[4]) > REGNO (operands[5]))
26148 std::swap (operands[4], operands[5]);
26150 output_asm_insn ("ldmia\t%1!, {%4, %5, %6}", operands);
26151 output_asm_insn ("stmia\t%0!, {%4, %5, %6}", operands);
26152 break;
26154 default:
26155 gcc_unreachable ();
26158 return "";
26161 /* Output a call-via instruction for thumb state. */
26162 const char *
26163 thumb_call_via_reg (rtx reg)
26165 int regno = REGNO (reg);
26166 rtx *labelp;
26168 gcc_assert (regno < LR_REGNUM);
26170 /* If we are in the normal text section we can use a single instance
26171 per compilation unit. If we are doing function sections, then we need
26172 an entry per section, since we can't rely on reachability. */
26173 if (in_section == text_section)
26175 thumb_call_reg_needed = 1;
26177 if (thumb_call_via_label[regno] == NULL)
26178 thumb_call_via_label[regno] = gen_label_rtx ();
26179 labelp = thumb_call_via_label + regno;
26181 else
26183 if (cfun->machine->call_via[regno] == NULL)
26184 cfun->machine->call_via[regno] = gen_label_rtx ();
26185 labelp = cfun->machine->call_via + regno;
26188 output_asm_insn ("bl\t%a0", labelp);
26189 return "";
26192 /* Routines for generating rtl. */
26193 void
26194 thumb_expand_movmemqi (rtx *operands)
26196 rtx out = copy_to_mode_reg (SImode, XEXP (operands[0], 0));
26197 rtx in = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
26198 HOST_WIDE_INT len = INTVAL (operands[2]);
26199 HOST_WIDE_INT offset = 0;
26201 while (len >= 12)
26203 emit_insn (gen_movmem12b (out, in, out, in));
26204 len -= 12;
26207 if (len >= 8)
26209 emit_insn (gen_movmem8b (out, in, out, in));
26210 len -= 8;
26213 if (len >= 4)
26215 rtx reg = gen_reg_rtx (SImode);
26216 emit_insn (gen_movsi (reg, gen_rtx_MEM (SImode, in)));
26217 emit_insn (gen_movsi (gen_rtx_MEM (SImode, out), reg));
26218 len -= 4;
26219 offset += 4;
26222 if (len >= 2)
26224 rtx reg = gen_reg_rtx (HImode);
26225 emit_insn (gen_movhi (reg, gen_rtx_MEM (HImode,
26226 plus_constant (Pmode, in,
26227 offset))));
26228 emit_insn (gen_movhi (gen_rtx_MEM (HImode, plus_constant (Pmode, out,
26229 offset)),
26230 reg));
26231 len -= 2;
26232 offset += 2;
26235 if (len)
26237 rtx reg = gen_reg_rtx (QImode);
26238 emit_insn (gen_movqi (reg, gen_rtx_MEM (QImode,
26239 plus_constant (Pmode, in,
26240 offset))));
26241 emit_insn (gen_movqi (gen_rtx_MEM (QImode, plus_constant (Pmode, out,
26242 offset)),
26243 reg));
26247 void
26248 thumb_reload_out_hi (rtx *operands)
26250 emit_insn (gen_thumb_movhi_clobber (operands[0], operands[1], operands[2]));
26253 /* Return the length of a function name prefix
26254 that starts with the character 'c'. */
26255 static int
26256 arm_get_strip_length (int c)
26258 switch (c)
26260 ARM_NAME_ENCODING_LENGTHS
26261 default: return 0;
26265 /* Return a pointer to a function's name with any
26266 and all prefix encodings stripped from it. */
26267 const char *
26268 arm_strip_name_encoding (const char *name)
26270 int skip;
26272 while ((skip = arm_get_strip_length (* name)))
26273 name += skip;
26275 return name;
26278 /* If there is a '*' anywhere in the name's prefix, then
26279 emit the stripped name verbatim, otherwise prepend an
26280 underscore if leading underscores are being used. */
26281 void
26282 arm_asm_output_labelref (FILE *stream, const char *name)
26284 int skip;
26285 int verbatim = 0;
26287 while ((skip = arm_get_strip_length (* name)))
26289 verbatim |= (*name == '*');
26290 name += skip;
26293 if (verbatim)
26294 fputs (name, stream);
26295 else
26296 asm_fprintf (stream, "%U%s", name);
26299 /* This function is used to emit an EABI tag and its associated value.
26300 We emit the numerical value of the tag in case the assembler does not
26301 support textual tags. (Eg gas prior to 2.20). If requested we include
26302 the tag name in a comment so that anyone reading the assembler output
26303 will know which tag is being set.
26305 This function is not static because arm-c.c needs it too. */
26307 void
26308 arm_emit_eabi_attribute (const char *name, int num, int val)
26310 asm_fprintf (asm_out_file, "\t.eabi_attribute %d, %d", num, val);
26311 if (flag_verbose_asm || flag_debug_asm)
26312 asm_fprintf (asm_out_file, "\t%s %s", ASM_COMMENT_START, name);
26313 asm_fprintf (asm_out_file, "\n");
26316 /* This function is used to print CPU tuning information as comment
26317 in assembler file. Pointers are not printed for now. */
26319 void
26320 arm_print_tune_info (void)
26322 asm_fprintf (asm_out_file, "\t" ASM_COMMENT_START ".tune parameters\n");
26323 asm_fprintf (asm_out_file, "\t\t" ASM_COMMENT_START "constant_limit:\t%d\n",
26324 current_tune->constant_limit);
26325 asm_fprintf (asm_out_file, "\t\t" ASM_COMMENT_START
26326 "max_insns_skipped:\t%d\n", current_tune->max_insns_skipped);
26327 asm_fprintf (asm_out_file, "\t\t" ASM_COMMENT_START
26328 "prefetch.num_slots:\t%d\n", current_tune->prefetch.num_slots);
26329 asm_fprintf (asm_out_file, "\t\t" ASM_COMMENT_START
26330 "prefetch.l1_cache_size:\t%d\n",
26331 current_tune->prefetch.l1_cache_size);
26332 asm_fprintf (asm_out_file, "\t\t" ASM_COMMENT_START
26333 "prefetch.l1_cache_line_size:\t%d\n",
26334 current_tune->prefetch.l1_cache_line_size);
26335 asm_fprintf (asm_out_file, "\t\t" ASM_COMMENT_START
26336 "prefer_constant_pool:\t%d\n",
26337 (int) current_tune->prefer_constant_pool);
26338 asm_fprintf (asm_out_file, "\t\t" ASM_COMMENT_START
26339 "branch_cost:\t(s:speed, p:predictable)\n");
26340 asm_fprintf (asm_out_file, "\t\t" ASM_COMMENT_START "\t\ts&p\tcost\n");
26341 asm_fprintf (asm_out_file, "\t\t" ASM_COMMENT_START "\t\t00\t%d\n",
26342 current_tune->branch_cost (false, false));
26343 asm_fprintf (asm_out_file, "\t\t" ASM_COMMENT_START "\t\t01\t%d\n",
26344 current_tune->branch_cost (false, true));
26345 asm_fprintf (asm_out_file, "\t\t" ASM_COMMENT_START "\t\t10\t%d\n",
26346 current_tune->branch_cost (true, false));
26347 asm_fprintf (asm_out_file, "\t\t" ASM_COMMENT_START "\t\t11\t%d\n",
26348 current_tune->branch_cost (true, true));
26349 asm_fprintf (asm_out_file, "\t\t" ASM_COMMENT_START
26350 "prefer_ldrd_strd:\t%d\n",
26351 (int) current_tune->prefer_ldrd_strd);
26352 asm_fprintf (asm_out_file, "\t\t" ASM_COMMENT_START
26353 "logical_op_non_short_circuit:\t[%d,%d]\n",
26354 (int) current_tune->logical_op_non_short_circuit_thumb,
26355 (int) current_tune->logical_op_non_short_circuit_arm);
26356 asm_fprintf (asm_out_file, "\t\t" ASM_COMMENT_START
26357 "prefer_neon_for_64bits:\t%d\n",
26358 (int) current_tune->prefer_neon_for_64bits);
26359 asm_fprintf (asm_out_file, "\t\t" ASM_COMMENT_START
26360 "disparage_flag_setting_t16_encodings:\t%d\n",
26361 (int) current_tune->disparage_flag_setting_t16_encodings);
26362 asm_fprintf (asm_out_file, "\t\t" ASM_COMMENT_START
26363 "string_ops_prefer_neon:\t%d\n",
26364 (int) current_tune->string_ops_prefer_neon);
26365 asm_fprintf (asm_out_file, "\t\t" ASM_COMMENT_START
26366 "max_insns_inline_memset:\t%d\n",
26367 current_tune->max_insns_inline_memset);
26368 asm_fprintf (asm_out_file, "\t\t" ASM_COMMENT_START "fusible_ops:\t%u\n",
26369 current_tune->fusible_ops);
26370 asm_fprintf (asm_out_file, "\t\t" ASM_COMMENT_START "sched_autopref:\t%d\n",
26371 (int) current_tune->sched_autopref);
26374 /* Print .arch and .arch_extension directives corresponding to the
26375 current architecture configuration. */
26376 static void
26377 arm_print_asm_arch_directives ()
26379 const arch_option *arch
26380 = arm_parse_arch_option_name (all_architectures, "-march",
26381 arm_active_target.arch_name);
26382 auto_sbitmap opt_bits (isa_num_bits);
26384 gcc_assert (arch);
26386 asm_fprintf (asm_out_file, "\t.arch %s\n", arm_active_target.arch_name);
26387 arm_last_printed_arch_string = arm_active_target.arch_name;
26388 if (!arch->common.extensions)
26389 return;
26391 for (const struct cpu_arch_extension *opt = arch->common.extensions;
26392 opt->name != NULL;
26393 opt++)
26395 if (!opt->remove)
26397 arm_initialize_isa (opt_bits, opt->isa_bits);
26399 /* If every feature bit of this option is set in the target
26400 ISA specification, print out the option name. However,
26401 don't print anything if all the bits are part of the
26402 FPU specification. */
26403 if (bitmap_subset_p (opt_bits, arm_active_target.isa)
26404 && !bitmap_subset_p (opt_bits, isa_all_fpubits))
26405 asm_fprintf (asm_out_file, "\t.arch_extension %s\n", opt->name);
26410 static void
26411 arm_file_start (void)
26413 int val;
26415 if (TARGET_BPABI)
26417 /* We don't have a specified CPU. Use the architecture to
26418 generate the tags.
26420 Note: it might be better to do this unconditionally, then the
26421 assembler would not need to know about all new CPU names as
26422 they are added. */
26423 if (!arm_active_target.core_name)
26425 /* armv7ve doesn't support any extensions. */
26426 if (strcmp (arm_active_target.arch_name, "armv7ve") == 0)
26428 /* Keep backward compatability for assemblers
26429 which don't support armv7ve. */
26430 asm_fprintf (asm_out_file, "\t.arch armv7-a\n");
26431 asm_fprintf (asm_out_file, "\t.arch_extension virt\n");
26432 asm_fprintf (asm_out_file, "\t.arch_extension idiv\n");
26433 asm_fprintf (asm_out_file, "\t.arch_extension sec\n");
26434 asm_fprintf (asm_out_file, "\t.arch_extension mp\n");
26435 arm_last_printed_arch_string = "armv7ve";
26437 else
26438 arm_print_asm_arch_directives ();
26440 else if (strncmp (arm_active_target.core_name, "generic", 7) == 0)
26442 asm_fprintf (asm_out_file, "\t.arch %s\n",
26443 arm_active_target.core_name + 8);
26444 arm_last_printed_arch_string = arm_active_target.core_name + 8;
26446 else
26448 const char* truncated_name
26449 = arm_rewrite_selected_cpu (arm_active_target.core_name);
26450 asm_fprintf (asm_out_file, "\t.cpu %s\n", truncated_name);
26453 if (print_tune_info)
26454 arm_print_tune_info ();
26456 if (! TARGET_SOFT_FLOAT)
26458 if (TARGET_HARD_FLOAT && TARGET_VFP_SINGLE)
26459 arm_emit_eabi_attribute ("Tag_ABI_HardFP_use", 27, 1);
26461 if (TARGET_HARD_FLOAT_ABI)
26462 arm_emit_eabi_attribute ("Tag_ABI_VFP_args", 28, 1);
26465 /* Some of these attributes only apply when the corresponding features
26466 are used. However we don't have any easy way of figuring this out.
26467 Conservatively record the setting that would have been used. */
26469 if (flag_rounding_math)
26470 arm_emit_eabi_attribute ("Tag_ABI_FP_rounding", 19, 1);
26472 if (!flag_unsafe_math_optimizations)
26474 arm_emit_eabi_attribute ("Tag_ABI_FP_denormal", 20, 1);
26475 arm_emit_eabi_attribute ("Tag_ABI_FP_exceptions", 21, 1);
26477 if (flag_signaling_nans)
26478 arm_emit_eabi_attribute ("Tag_ABI_FP_user_exceptions", 22, 1);
26480 arm_emit_eabi_attribute ("Tag_ABI_FP_number_model", 23,
26481 flag_finite_math_only ? 1 : 3);
26483 arm_emit_eabi_attribute ("Tag_ABI_align8_needed", 24, 1);
26484 arm_emit_eabi_attribute ("Tag_ABI_align8_preserved", 25, 1);
26485 arm_emit_eabi_attribute ("Tag_ABI_enum_size", 26,
26486 flag_short_enums ? 1 : 2);
26488 /* Tag_ABI_optimization_goals. */
26489 if (optimize_size)
26490 val = 4;
26491 else if (optimize >= 2)
26492 val = 2;
26493 else if (optimize)
26494 val = 1;
26495 else
26496 val = 6;
26497 arm_emit_eabi_attribute ("Tag_ABI_optimization_goals", 30, val);
26499 arm_emit_eabi_attribute ("Tag_CPU_unaligned_access", 34,
26500 unaligned_access);
26502 if (arm_fp16_format)
26503 arm_emit_eabi_attribute ("Tag_ABI_FP_16bit_format", 38,
26504 (int) arm_fp16_format);
26506 if (arm_lang_output_object_attributes_hook)
26507 arm_lang_output_object_attributes_hook();
26510 default_file_start ();
26513 static void
26514 arm_file_end (void)
26516 int regno;
26518 if (NEED_INDICATE_EXEC_STACK)
26519 /* Add .note.GNU-stack. */
26520 file_end_indicate_exec_stack ();
26522 if (! thumb_call_reg_needed)
26523 return;
26525 switch_to_section (text_section);
26526 asm_fprintf (asm_out_file, "\t.code 16\n");
26527 ASM_OUTPUT_ALIGN (asm_out_file, 1);
26529 for (regno = 0; regno < LR_REGNUM; regno++)
26531 rtx label = thumb_call_via_label[regno];
26533 if (label != 0)
26535 targetm.asm_out.internal_label (asm_out_file, "L",
26536 CODE_LABEL_NUMBER (label));
26537 asm_fprintf (asm_out_file, "\tbx\t%r\n", regno);
26542 #ifndef ARM_PE
26543 /* Symbols in the text segment can be accessed without indirecting via the
26544 constant pool; it may take an extra binary operation, but this is still
26545 faster than indirecting via memory. Don't do this when not optimizing,
26546 since we won't be calculating al of the offsets necessary to do this
26547 simplification. */
26549 static void
26550 arm_encode_section_info (tree decl, rtx rtl, int first)
26552 if (optimize > 0 && TREE_CONSTANT (decl))
26553 SYMBOL_REF_FLAG (XEXP (rtl, 0)) = 1;
26555 default_encode_section_info (decl, rtl, first);
26557 #endif /* !ARM_PE */
26559 static void
26560 arm_internal_label (FILE *stream, const char *prefix, unsigned long labelno)
26562 if (arm_ccfsm_state == 3 && (unsigned) arm_target_label == labelno
26563 && !strcmp (prefix, "L"))
26565 arm_ccfsm_state = 0;
26566 arm_target_insn = NULL;
26568 default_internal_label (stream, prefix, labelno);
26571 /* Output code to add DELTA to the first argument, and then jump
26572 to FUNCTION. Used for C++ multiple inheritance. */
26574 static void
26575 arm_thumb1_mi_thunk (FILE *file, tree, HOST_WIDE_INT delta,
26576 HOST_WIDE_INT, tree function)
26578 static int thunk_label = 0;
26579 char label[256];
26580 char labelpc[256];
26581 int mi_delta = delta;
26582 const char *const mi_op = mi_delta < 0 ? "sub" : "add";
26583 int shift = 0;
26584 int this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function)
26585 ? 1 : 0);
26586 if (mi_delta < 0)
26587 mi_delta = - mi_delta;
26589 final_start_function (emit_barrier (), file, 1);
26591 if (TARGET_THUMB1)
26593 int labelno = thunk_label++;
26594 ASM_GENERATE_INTERNAL_LABEL (label, "LTHUMBFUNC", labelno);
26595 /* Thunks are entered in arm mode when available. */
26596 if (TARGET_THUMB1_ONLY)
26598 /* push r3 so we can use it as a temporary. */
26599 /* TODO: Omit this save if r3 is not used. */
26600 fputs ("\tpush {r3}\n", file);
26601 fputs ("\tldr\tr3, ", file);
26603 else
26605 fputs ("\tldr\tr12, ", file);
26607 assemble_name (file, label);
26608 fputc ('\n', file);
26609 if (flag_pic)
26611 /* If we are generating PIC, the ldr instruction below loads
26612 "(target - 7) - .LTHUNKPCn" into r12. The pc reads as
26613 the address of the add + 8, so we have:
26615 r12 = (target - 7) - .LTHUNKPCn + (.LTHUNKPCn + 8)
26616 = target + 1.
26618 Note that we have "+ 1" because some versions of GNU ld
26619 don't set the low bit of the result for R_ARM_REL32
26620 relocations against thumb function symbols.
26621 On ARMv6M this is +4, not +8. */
26622 ASM_GENERATE_INTERNAL_LABEL (labelpc, "LTHUNKPC", labelno);
26623 assemble_name (file, labelpc);
26624 fputs (":\n", file);
26625 if (TARGET_THUMB1_ONLY)
26627 /* This is 2 insns after the start of the thunk, so we know it
26628 is 4-byte aligned. */
26629 fputs ("\tadd\tr3, pc, r3\n", file);
26630 fputs ("\tmov r12, r3\n", file);
26632 else
26633 fputs ("\tadd\tr12, pc, r12\n", file);
26635 else if (TARGET_THUMB1_ONLY)
26636 fputs ("\tmov r12, r3\n", file);
26638 if (TARGET_THUMB1_ONLY)
26640 if (mi_delta > 255)
26642 fputs ("\tldr\tr3, ", file);
26643 assemble_name (file, label);
26644 fputs ("+4\n", file);
26645 asm_fprintf (file, "\t%ss\t%r, %r, r3\n",
26646 mi_op, this_regno, this_regno);
26648 else if (mi_delta != 0)
26650 /* Thumb1 unified syntax requires s suffix in instruction name when
26651 one of the operands is immediate. */
26652 asm_fprintf (file, "\t%ss\t%r, %r, #%d\n",
26653 mi_op, this_regno, this_regno,
26654 mi_delta);
26657 else
26659 /* TODO: Use movw/movt for large constants when available. */
26660 while (mi_delta != 0)
26662 if ((mi_delta & (3 << shift)) == 0)
26663 shift += 2;
26664 else
26666 asm_fprintf (file, "\t%s\t%r, %r, #%d\n",
26667 mi_op, this_regno, this_regno,
26668 mi_delta & (0xff << shift));
26669 mi_delta &= ~(0xff << shift);
26670 shift += 8;
26674 if (TARGET_THUMB1)
26676 if (TARGET_THUMB1_ONLY)
26677 fputs ("\tpop\t{r3}\n", file);
26679 fprintf (file, "\tbx\tr12\n");
26680 ASM_OUTPUT_ALIGN (file, 2);
26681 assemble_name (file, label);
26682 fputs (":\n", file);
26683 if (flag_pic)
26685 /* Output ".word .LTHUNKn-[3,7]-.LTHUNKPCn". */
26686 rtx tem = XEXP (DECL_RTL (function), 0);
26687 /* For TARGET_THUMB1_ONLY the thunk is in Thumb mode, so the PC
26688 pipeline offset is four rather than eight. Adjust the offset
26689 accordingly. */
26690 tem = plus_constant (GET_MODE (tem), tem,
26691 TARGET_THUMB1_ONLY ? -3 : -7);
26692 tem = gen_rtx_MINUS (GET_MODE (tem),
26693 tem,
26694 gen_rtx_SYMBOL_REF (Pmode,
26695 ggc_strdup (labelpc)));
26696 assemble_integer (tem, 4, BITS_PER_WORD, 1);
26698 else
26699 /* Output ".word .LTHUNKn". */
26700 assemble_integer (XEXP (DECL_RTL (function), 0), 4, BITS_PER_WORD, 1);
26702 if (TARGET_THUMB1_ONLY && mi_delta > 255)
26703 assemble_integer (GEN_INT(mi_delta), 4, BITS_PER_WORD, 1);
26705 else
26707 fputs ("\tb\t", file);
26708 assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
26709 if (NEED_PLT_RELOC)
26710 fputs ("(PLT)", file);
26711 fputc ('\n', file);
26714 final_end_function ();
26717 /* MI thunk handling for TARGET_32BIT. */
26719 static void
26720 arm32_output_mi_thunk (FILE *file, tree, HOST_WIDE_INT delta,
26721 HOST_WIDE_INT vcall_offset, tree function)
26723 /* On ARM, this_regno is R0 or R1 depending on
26724 whether the function returns an aggregate or not.
26726 int this_regno = (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)),
26727 function)
26728 ? R1_REGNUM : R0_REGNUM);
26730 rtx temp = gen_rtx_REG (Pmode, IP_REGNUM);
26731 rtx this_rtx = gen_rtx_REG (Pmode, this_regno);
26732 reload_completed = 1;
26733 emit_note (NOTE_INSN_PROLOGUE_END);
26735 /* Add DELTA to THIS_RTX. */
26736 if (delta != 0)
26737 arm_split_constant (PLUS, Pmode, NULL_RTX,
26738 delta, this_rtx, this_rtx, false);
26740 /* Add *(*THIS_RTX + VCALL_OFFSET) to THIS_RTX. */
26741 if (vcall_offset != 0)
26743 /* Load *THIS_RTX. */
26744 emit_move_insn (temp, gen_rtx_MEM (Pmode, this_rtx));
26745 /* Compute *THIS_RTX + VCALL_OFFSET. */
26746 arm_split_constant (PLUS, Pmode, NULL_RTX, vcall_offset, temp, temp,
26747 false);
26748 /* Compute *(*THIS_RTX + VCALL_OFFSET). */
26749 emit_move_insn (temp, gen_rtx_MEM (Pmode, temp));
26750 emit_insn (gen_add3_insn (this_rtx, this_rtx, temp));
26753 /* Generate a tail call to the target function. */
26754 if (!TREE_USED (function))
26756 assemble_external (function);
26757 TREE_USED (function) = 1;
26759 rtx funexp = XEXP (DECL_RTL (function), 0);
26760 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
26761 rtx_insn * insn = emit_call_insn (gen_sibcall (funexp, const0_rtx, NULL_RTX));
26762 SIBLING_CALL_P (insn) = 1;
26764 insn = get_insns ();
26765 shorten_branches (insn);
26766 final_start_function (insn, file, 1);
26767 final (insn, file, 1);
26768 final_end_function ();
26770 /* Stop pretending this is a post-reload pass. */
26771 reload_completed = 0;
26774 /* Output code to add DELTA to the first argument, and then jump
26775 to FUNCTION. Used for C++ multiple inheritance. */
26777 static void
26778 arm_output_mi_thunk (FILE *file, tree thunk, HOST_WIDE_INT delta,
26779 HOST_WIDE_INT vcall_offset, tree function)
26781 if (TARGET_32BIT)
26782 arm32_output_mi_thunk (file, thunk, delta, vcall_offset, function);
26783 else
26784 arm_thumb1_mi_thunk (file, thunk, delta, vcall_offset, function);
26788 arm_emit_vector_const (FILE *file, rtx x)
26790 int i;
26791 const char * pattern;
26793 gcc_assert (GET_CODE (x) == CONST_VECTOR);
26795 switch (GET_MODE (x))
26797 case E_V2SImode: pattern = "%08x"; break;
26798 case E_V4HImode: pattern = "%04x"; break;
26799 case E_V8QImode: pattern = "%02x"; break;
26800 default: gcc_unreachable ();
26803 fprintf (file, "0x");
26804 for (i = CONST_VECTOR_NUNITS (x); i--;)
26806 rtx element;
26808 element = CONST_VECTOR_ELT (x, i);
26809 fprintf (file, pattern, INTVAL (element));
26812 return 1;
26815 /* Emit a fp16 constant appropriately padded to occupy a 4-byte word.
26816 HFmode constant pool entries are actually loaded with ldr. */
26817 void
26818 arm_emit_fp16_const (rtx c)
26820 long bits;
26822 bits = real_to_target (NULL, CONST_DOUBLE_REAL_VALUE (c), HFmode);
26823 if (WORDS_BIG_ENDIAN)
26824 assemble_zeros (2);
26825 assemble_integer (GEN_INT (bits), 2, BITS_PER_WORD, 1);
26826 if (!WORDS_BIG_ENDIAN)
26827 assemble_zeros (2);
26830 const char *
26831 arm_output_load_gr (rtx *operands)
26833 rtx reg;
26834 rtx offset;
26835 rtx wcgr;
26836 rtx sum;
26838 if (!MEM_P (operands [1])
26839 || GET_CODE (sum = XEXP (operands [1], 0)) != PLUS
26840 || !REG_P (reg = XEXP (sum, 0))
26841 || !CONST_INT_P (offset = XEXP (sum, 1))
26842 || ((INTVAL (offset) < 1024) && (INTVAL (offset) > -1024)))
26843 return "wldrw%?\t%0, %1";
26845 /* Fix up an out-of-range load of a GR register. */
26846 output_asm_insn ("str%?\t%0, [sp, #-4]!\t@ Start of GR load expansion", & reg);
26847 wcgr = operands[0];
26848 operands[0] = reg;
26849 output_asm_insn ("ldr%?\t%0, %1", operands);
26851 operands[0] = wcgr;
26852 operands[1] = reg;
26853 output_asm_insn ("tmcr%?\t%0, %1", operands);
26854 output_asm_insn ("ldr%?\t%0, [sp], #4\t@ End of GR load expansion", & reg);
26856 return "";
26859 /* Worker function for TARGET_SETUP_INCOMING_VARARGS.
26861 On the ARM, PRETEND_SIZE is set in order to have the prologue push the last
26862 named arg and all anonymous args onto the stack.
26863 XXX I know the prologue shouldn't be pushing registers, but it is faster
26864 that way. */
26866 static void
26867 arm_setup_incoming_varargs (cumulative_args_t pcum_v,
26868 machine_mode mode,
26869 tree type,
26870 int *pretend_size,
26871 int second_time ATTRIBUTE_UNUSED)
26873 CUMULATIVE_ARGS *pcum = get_cumulative_args (pcum_v);
26874 int nregs;
26876 cfun->machine->uses_anonymous_args = 1;
26877 if (pcum->pcs_variant <= ARM_PCS_AAPCS_LOCAL)
26879 nregs = pcum->aapcs_ncrn;
26880 if (nregs & 1)
26882 int res = arm_needs_doubleword_align (mode, type);
26883 if (res < 0 && warn_psabi)
26884 inform (input_location, "parameter passing for argument of "
26885 "type %qT changed in GCC 7.1", type);
26886 else if (res > 0)
26887 nregs++;
26890 else
26891 nregs = pcum->nregs;
26893 if (nregs < NUM_ARG_REGS)
26894 *pretend_size = (NUM_ARG_REGS - nregs) * UNITS_PER_WORD;
26897 /* We can't rely on the caller doing the proper promotion when
26898 using APCS or ATPCS. */
26900 static bool
26901 arm_promote_prototypes (const_tree t ATTRIBUTE_UNUSED)
26903 return !TARGET_AAPCS_BASED;
26906 static machine_mode
26907 arm_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
26908 machine_mode mode,
26909 int *punsignedp ATTRIBUTE_UNUSED,
26910 const_tree fntype ATTRIBUTE_UNUSED,
26911 int for_return ATTRIBUTE_UNUSED)
26913 if (GET_MODE_CLASS (mode) == MODE_INT
26914 && GET_MODE_SIZE (mode) < 4)
26915 return SImode;
26917 return mode;
26921 static bool
26922 arm_default_short_enums (void)
26924 return ARM_DEFAULT_SHORT_ENUMS;
26928 /* AAPCS requires that anonymous bitfields affect structure alignment. */
26930 static bool
26931 arm_align_anon_bitfield (void)
26933 return TARGET_AAPCS_BASED;
26937 /* The generic C++ ABI says 64-bit (long long). The EABI says 32-bit. */
26939 static tree
26940 arm_cxx_guard_type (void)
26942 return TARGET_AAPCS_BASED ? integer_type_node : long_long_integer_type_node;
26946 /* The EABI says test the least significant bit of a guard variable. */
26948 static bool
26949 arm_cxx_guard_mask_bit (void)
26951 return TARGET_AAPCS_BASED;
26955 /* The EABI specifies that all array cookies are 8 bytes long. */
26957 static tree
26958 arm_get_cookie_size (tree type)
26960 tree size;
26962 if (!TARGET_AAPCS_BASED)
26963 return default_cxx_get_cookie_size (type);
26965 size = build_int_cst (sizetype, 8);
26966 return size;
26970 /* The EABI says that array cookies should also contain the element size. */
26972 static bool
26973 arm_cookie_has_size (void)
26975 return TARGET_AAPCS_BASED;
26979 /* The EABI says constructors and destructors should return a pointer to
26980 the object constructed/destroyed. */
26982 static bool
26983 arm_cxx_cdtor_returns_this (void)
26985 return TARGET_AAPCS_BASED;
26988 /* The EABI says that an inline function may never be the key
26989 method. */
26991 static bool
26992 arm_cxx_key_method_may_be_inline (void)
26994 return !TARGET_AAPCS_BASED;
26997 static void
26998 arm_cxx_determine_class_data_visibility (tree decl)
27000 if (!TARGET_AAPCS_BASED
27001 || !TARGET_DLLIMPORT_DECL_ATTRIBUTES)
27002 return;
27004 /* In general, \S 3.2.5.5 of the ARM EABI requires that class data
27005 is exported. However, on systems without dynamic vague linkage,
27006 \S 3.2.5.6 says that COMDAT class data has hidden linkage. */
27007 if (!TARGET_ARM_DYNAMIC_VAGUE_LINKAGE_P && DECL_COMDAT (decl))
27008 DECL_VISIBILITY (decl) = VISIBILITY_HIDDEN;
27009 else
27010 DECL_VISIBILITY (decl) = VISIBILITY_DEFAULT;
27011 DECL_VISIBILITY_SPECIFIED (decl) = 1;
27014 static bool
27015 arm_cxx_class_data_always_comdat (void)
27017 /* \S 3.2.5.4 of the ARM C++ ABI says that class data only have
27018 vague linkage if the class has no key function. */
27019 return !TARGET_AAPCS_BASED;
27023 /* The EABI says __aeabi_atexit should be used to register static
27024 destructors. */
27026 static bool
27027 arm_cxx_use_aeabi_atexit (void)
27029 return TARGET_AAPCS_BASED;
27033 void
27034 arm_set_return_address (rtx source, rtx scratch)
27036 arm_stack_offsets *offsets;
27037 HOST_WIDE_INT delta;
27038 rtx addr, mem;
27039 unsigned long saved_regs;
27041 offsets = arm_get_frame_offsets ();
27042 saved_regs = offsets->saved_regs_mask;
27044 if ((saved_regs & (1 << LR_REGNUM)) == 0)
27045 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
27046 else
27048 if (frame_pointer_needed)
27049 addr = plus_constant (Pmode, hard_frame_pointer_rtx, -4);
27050 else
27052 /* LR will be the first saved register. */
27053 delta = offsets->outgoing_args - (offsets->frame + 4);
27056 if (delta >= 4096)
27058 emit_insn (gen_addsi3 (scratch, stack_pointer_rtx,
27059 GEN_INT (delta & ~4095)));
27060 addr = scratch;
27061 delta &= 4095;
27063 else
27064 addr = stack_pointer_rtx;
27066 addr = plus_constant (Pmode, addr, delta);
27069 /* The store needs to be marked to prevent DSE from deleting
27070 it as dead if it is based on fp. */
27071 mem = gen_frame_mem (Pmode, addr);
27072 MEM_VOLATILE_P (mem) = true;
27073 emit_move_insn (mem, source);
27078 void
27079 thumb_set_return_address (rtx source, rtx scratch)
27081 arm_stack_offsets *offsets;
27082 HOST_WIDE_INT delta;
27083 HOST_WIDE_INT limit;
27084 int reg;
27085 rtx addr, mem;
27086 unsigned long mask;
27088 emit_use (source);
27090 offsets = arm_get_frame_offsets ();
27091 mask = offsets->saved_regs_mask;
27092 if (mask & (1 << LR_REGNUM))
27094 limit = 1024;
27095 /* Find the saved regs. */
27096 if (frame_pointer_needed)
27098 delta = offsets->soft_frame - offsets->saved_args;
27099 reg = THUMB_HARD_FRAME_POINTER_REGNUM;
27100 if (TARGET_THUMB1)
27101 limit = 128;
27103 else
27105 delta = offsets->outgoing_args - offsets->saved_args;
27106 reg = SP_REGNUM;
27108 /* Allow for the stack frame. */
27109 if (TARGET_THUMB1 && TARGET_BACKTRACE)
27110 delta -= 16;
27111 /* The link register is always the first saved register. */
27112 delta -= 4;
27114 /* Construct the address. */
27115 addr = gen_rtx_REG (SImode, reg);
27116 if (delta > limit)
27118 emit_insn (gen_movsi (scratch, GEN_INT (delta)));
27119 emit_insn (gen_addsi3 (scratch, scratch, stack_pointer_rtx));
27120 addr = scratch;
27122 else
27123 addr = plus_constant (Pmode, addr, delta);
27125 /* The store needs to be marked to prevent DSE from deleting
27126 it as dead if it is based on fp. */
27127 mem = gen_frame_mem (Pmode, addr);
27128 MEM_VOLATILE_P (mem) = true;
27129 emit_move_insn (mem, source);
27131 else
27132 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNUM), source);
27135 /* Implements target hook vector_mode_supported_p. */
27136 bool
27137 arm_vector_mode_supported_p (machine_mode mode)
27139 /* Neon also supports V2SImode, etc. listed in the clause below. */
27140 if (TARGET_NEON && (mode == V2SFmode || mode == V4SImode || mode == V8HImode
27141 || mode == V4HFmode || mode == V16QImode || mode == V4SFmode
27142 || mode == V2DImode || mode == V8HFmode))
27143 return true;
27145 if ((TARGET_NEON || TARGET_IWMMXT)
27146 && ((mode == V2SImode)
27147 || (mode == V4HImode)
27148 || (mode == V8QImode)))
27149 return true;
27151 if (TARGET_INT_SIMD && (mode == V4UQQmode || mode == V4QQmode
27152 || mode == V2UHQmode || mode == V2HQmode || mode == V2UHAmode
27153 || mode == V2HAmode))
27154 return true;
27156 return false;
27159 /* Implements target hook array_mode_supported_p. */
27161 static bool
27162 arm_array_mode_supported_p (machine_mode mode,
27163 unsigned HOST_WIDE_INT nelems)
27165 /* We don't want to enable interleaved loads and stores for BYTES_BIG_ENDIAN
27166 for now, as the lane-swapping logic needs to be extended in the expanders.
27167 See PR target/82518. */
27168 if (TARGET_NEON && !BYTES_BIG_ENDIAN
27169 && (VALID_NEON_DREG_MODE (mode) || VALID_NEON_QREG_MODE (mode))
27170 && (nelems >= 2 && nelems <= 4))
27171 return true;
27173 return false;
27176 /* Use the option -mvectorize-with-neon-double to override the use of quardword
27177 registers when autovectorizing for Neon, at least until multiple vector
27178 widths are supported properly by the middle-end. */
27180 static machine_mode
27181 arm_preferred_simd_mode (scalar_mode mode)
27183 if (TARGET_NEON)
27184 switch (mode)
27186 case E_SFmode:
27187 return TARGET_NEON_VECTORIZE_DOUBLE ? V2SFmode : V4SFmode;
27188 case E_SImode:
27189 return TARGET_NEON_VECTORIZE_DOUBLE ? V2SImode : V4SImode;
27190 case E_HImode:
27191 return TARGET_NEON_VECTORIZE_DOUBLE ? V4HImode : V8HImode;
27192 case E_QImode:
27193 return TARGET_NEON_VECTORIZE_DOUBLE ? V8QImode : V16QImode;
27194 case E_DImode:
27195 if (!TARGET_NEON_VECTORIZE_DOUBLE)
27196 return V2DImode;
27197 break;
27199 default:;
27202 if (TARGET_REALLY_IWMMXT)
27203 switch (mode)
27205 case E_SImode:
27206 return V2SImode;
27207 case E_HImode:
27208 return V4HImode;
27209 case E_QImode:
27210 return V8QImode;
27212 default:;
27215 return word_mode;
27218 /* Implement TARGET_CLASS_LIKELY_SPILLED_P.
27220 We need to define this for LO_REGS on Thumb-1. Otherwise we can end up
27221 using r0-r4 for function arguments, r7 for the stack frame and don't have
27222 enough left over to do doubleword arithmetic. For Thumb-2 all the
27223 potentially problematic instructions accept high registers so this is not
27224 necessary. Care needs to be taken to avoid adding new Thumb-2 patterns
27225 that require many low registers. */
27226 static bool
27227 arm_class_likely_spilled_p (reg_class_t rclass)
27229 if ((TARGET_THUMB1 && rclass == LO_REGS)
27230 || rclass == CC_REG)
27231 return true;
27233 return false;
27236 /* Implements target hook small_register_classes_for_mode_p. */
27237 bool
27238 arm_small_register_classes_for_mode_p (machine_mode mode ATTRIBUTE_UNUSED)
27240 return TARGET_THUMB1;
27243 /* Implement TARGET_SHIFT_TRUNCATION_MASK. SImode shifts use normal
27244 ARM insns and therefore guarantee that the shift count is modulo 256.
27245 DImode shifts (those implemented by lib1funcs.S or by optabs.c)
27246 guarantee no particular behavior for out-of-range counts. */
27248 static unsigned HOST_WIDE_INT
27249 arm_shift_truncation_mask (machine_mode mode)
27251 return mode == SImode ? 255 : 0;
27255 /* Map internal gcc register numbers to DWARF2 register numbers. */
27257 unsigned int
27258 arm_dbx_register_number (unsigned int regno)
27260 if (regno < 16)
27261 return regno;
27263 if (IS_VFP_REGNUM (regno))
27265 /* See comment in arm_dwarf_register_span. */
27266 if (VFP_REGNO_OK_FOR_SINGLE (regno))
27267 return 64 + regno - FIRST_VFP_REGNUM;
27268 else
27269 return 256 + (regno - FIRST_VFP_REGNUM) / 2;
27272 if (IS_IWMMXT_GR_REGNUM (regno))
27273 return 104 + regno - FIRST_IWMMXT_GR_REGNUM;
27275 if (IS_IWMMXT_REGNUM (regno))
27276 return 112 + regno - FIRST_IWMMXT_REGNUM;
27278 return DWARF_FRAME_REGISTERS;
27281 /* Dwarf models VFPv3 registers as 32 64-bit registers.
27282 GCC models tham as 64 32-bit registers, so we need to describe this to
27283 the DWARF generation code. Other registers can use the default. */
27284 static rtx
27285 arm_dwarf_register_span (rtx rtl)
27287 machine_mode mode;
27288 unsigned regno;
27289 rtx parts[16];
27290 int nregs;
27291 int i;
27293 regno = REGNO (rtl);
27294 if (!IS_VFP_REGNUM (regno))
27295 return NULL_RTX;
27297 /* XXX FIXME: The EABI defines two VFP register ranges:
27298 64-95: Legacy VFPv2 numbering for S0-S31 (obsolescent)
27299 256-287: D0-D31
27300 The recommended encoding for S0-S31 is a DW_OP_bit_piece of the
27301 corresponding D register. Until GDB supports this, we shall use the
27302 legacy encodings. We also use these encodings for D0-D15 for
27303 compatibility with older debuggers. */
27304 mode = GET_MODE (rtl);
27305 if (GET_MODE_SIZE (mode) < 8)
27306 return NULL_RTX;
27308 if (VFP_REGNO_OK_FOR_SINGLE (regno))
27310 nregs = GET_MODE_SIZE (mode) / 4;
27311 for (i = 0; i < nregs; i += 2)
27312 if (TARGET_BIG_END)
27314 parts[i] = gen_rtx_REG (SImode, regno + i + 1);
27315 parts[i + 1] = gen_rtx_REG (SImode, regno + i);
27317 else
27319 parts[i] = gen_rtx_REG (SImode, regno + i);
27320 parts[i + 1] = gen_rtx_REG (SImode, regno + i + 1);
27323 else
27325 nregs = GET_MODE_SIZE (mode) / 8;
27326 for (i = 0; i < nregs; i++)
27327 parts[i] = gen_rtx_REG (DImode, regno + i);
27330 return gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (nregs , parts));
27333 #if ARM_UNWIND_INFO
27334 /* Emit unwind directives for a store-multiple instruction or stack pointer
27335 push during alignment.
27336 These should only ever be generated by the function prologue code, so
27337 expect them to have a particular form.
27338 The store-multiple instruction sometimes pushes pc as the last register,
27339 although it should not be tracked into unwind information, or for -Os
27340 sometimes pushes some dummy registers before first register that needs
27341 to be tracked in unwind information; such dummy registers are there just
27342 to avoid separate stack adjustment, and will not be restored in the
27343 epilogue. */
27345 static void
27346 arm_unwind_emit_sequence (FILE * asm_out_file, rtx p)
27348 int i;
27349 HOST_WIDE_INT offset;
27350 HOST_WIDE_INT nregs;
27351 int reg_size;
27352 unsigned reg;
27353 unsigned lastreg;
27354 unsigned padfirst = 0, padlast = 0;
27355 rtx e;
27357 e = XVECEXP (p, 0, 0);
27358 gcc_assert (GET_CODE (e) == SET);
27360 /* First insn will adjust the stack pointer. */
27361 gcc_assert (GET_CODE (e) == SET
27362 && REG_P (SET_DEST (e))
27363 && REGNO (SET_DEST (e)) == SP_REGNUM
27364 && GET_CODE (SET_SRC (e)) == PLUS);
27366 offset = -INTVAL (XEXP (SET_SRC (e), 1));
27367 nregs = XVECLEN (p, 0) - 1;
27368 gcc_assert (nregs);
27370 reg = REGNO (SET_SRC (XVECEXP (p, 0, 1)));
27371 if (reg < 16)
27373 /* For -Os dummy registers can be pushed at the beginning to
27374 avoid separate stack pointer adjustment. */
27375 e = XVECEXP (p, 0, 1);
27376 e = XEXP (SET_DEST (e), 0);
27377 if (GET_CODE (e) == PLUS)
27378 padfirst = INTVAL (XEXP (e, 1));
27379 gcc_assert (padfirst == 0 || optimize_size);
27380 /* The function prologue may also push pc, but not annotate it as it is
27381 never restored. We turn this into a stack pointer adjustment. */
27382 e = XVECEXP (p, 0, nregs);
27383 e = XEXP (SET_DEST (e), 0);
27384 if (GET_CODE (e) == PLUS)
27385 padlast = offset - INTVAL (XEXP (e, 1)) - 4;
27386 else
27387 padlast = offset - 4;
27388 gcc_assert (padlast == 0 || padlast == 4);
27389 if (padlast == 4)
27390 fprintf (asm_out_file, "\t.pad #4\n");
27391 reg_size = 4;
27392 fprintf (asm_out_file, "\t.save {");
27394 else if (IS_VFP_REGNUM (reg))
27396 reg_size = 8;
27397 fprintf (asm_out_file, "\t.vsave {");
27399 else
27400 /* Unknown register type. */
27401 gcc_unreachable ();
27403 /* If the stack increment doesn't match the size of the saved registers,
27404 something has gone horribly wrong. */
27405 gcc_assert (offset == padfirst + nregs * reg_size + padlast);
27407 offset = padfirst;
27408 lastreg = 0;
27409 /* The remaining insns will describe the stores. */
27410 for (i = 1; i <= nregs; i++)
27412 /* Expect (set (mem <addr>) (reg)).
27413 Where <addr> is (reg:SP) or (plus (reg:SP) (const_int)). */
27414 e = XVECEXP (p, 0, i);
27415 gcc_assert (GET_CODE (e) == SET
27416 && MEM_P (SET_DEST (e))
27417 && REG_P (SET_SRC (e)));
27419 reg = REGNO (SET_SRC (e));
27420 gcc_assert (reg >= lastreg);
27422 if (i != 1)
27423 fprintf (asm_out_file, ", ");
27424 /* We can't use %r for vfp because we need to use the
27425 double precision register names. */
27426 if (IS_VFP_REGNUM (reg))
27427 asm_fprintf (asm_out_file, "d%d", (reg - FIRST_VFP_REGNUM) / 2);
27428 else
27429 asm_fprintf (asm_out_file, "%r", reg);
27431 if (flag_checking)
27433 /* Check that the addresses are consecutive. */
27434 e = XEXP (SET_DEST (e), 0);
27435 if (GET_CODE (e) == PLUS)
27436 gcc_assert (REG_P (XEXP (e, 0))
27437 && REGNO (XEXP (e, 0)) == SP_REGNUM
27438 && CONST_INT_P (XEXP (e, 1))
27439 && offset == INTVAL (XEXP (e, 1)));
27440 else
27441 gcc_assert (i == 1
27442 && REG_P (e)
27443 && REGNO (e) == SP_REGNUM);
27444 offset += reg_size;
27447 fprintf (asm_out_file, "}\n");
27448 if (padfirst)
27449 fprintf (asm_out_file, "\t.pad #%d\n", padfirst);
27452 /* Emit unwind directives for a SET. */
27454 static void
27455 arm_unwind_emit_set (FILE * asm_out_file, rtx p)
27457 rtx e0;
27458 rtx e1;
27459 unsigned reg;
27461 e0 = XEXP (p, 0);
27462 e1 = XEXP (p, 1);
27463 switch (GET_CODE (e0))
27465 case MEM:
27466 /* Pushing a single register. */
27467 if (GET_CODE (XEXP (e0, 0)) != PRE_DEC
27468 || !REG_P (XEXP (XEXP (e0, 0), 0))
27469 || REGNO (XEXP (XEXP (e0, 0), 0)) != SP_REGNUM)
27470 abort ();
27472 asm_fprintf (asm_out_file, "\t.save ");
27473 if (IS_VFP_REGNUM (REGNO (e1)))
27474 asm_fprintf(asm_out_file, "{d%d}\n",
27475 (REGNO (e1) - FIRST_VFP_REGNUM) / 2);
27476 else
27477 asm_fprintf(asm_out_file, "{%r}\n", REGNO (e1));
27478 break;
27480 case REG:
27481 if (REGNO (e0) == SP_REGNUM)
27483 /* A stack increment. */
27484 if (GET_CODE (e1) != PLUS
27485 || !REG_P (XEXP (e1, 0))
27486 || REGNO (XEXP (e1, 0)) != SP_REGNUM
27487 || !CONST_INT_P (XEXP (e1, 1)))
27488 abort ();
27490 asm_fprintf (asm_out_file, "\t.pad #%wd\n",
27491 -INTVAL (XEXP (e1, 1)));
27493 else if (REGNO (e0) == HARD_FRAME_POINTER_REGNUM)
27495 HOST_WIDE_INT offset;
27497 if (GET_CODE (e1) == PLUS)
27499 if (!REG_P (XEXP (e1, 0))
27500 || !CONST_INT_P (XEXP (e1, 1)))
27501 abort ();
27502 reg = REGNO (XEXP (e1, 0));
27503 offset = INTVAL (XEXP (e1, 1));
27504 asm_fprintf (asm_out_file, "\t.setfp %r, %r, #%wd\n",
27505 HARD_FRAME_POINTER_REGNUM, reg,
27506 offset);
27508 else if (REG_P (e1))
27510 reg = REGNO (e1);
27511 asm_fprintf (asm_out_file, "\t.setfp %r, %r\n",
27512 HARD_FRAME_POINTER_REGNUM, reg);
27514 else
27515 abort ();
27517 else if (REG_P (e1) && REGNO (e1) == SP_REGNUM)
27519 /* Move from sp to reg. */
27520 asm_fprintf (asm_out_file, "\t.movsp %r\n", REGNO (e0));
27522 else if (GET_CODE (e1) == PLUS
27523 && REG_P (XEXP (e1, 0))
27524 && REGNO (XEXP (e1, 0)) == SP_REGNUM
27525 && CONST_INT_P (XEXP (e1, 1)))
27527 /* Set reg to offset from sp. */
27528 asm_fprintf (asm_out_file, "\t.movsp %r, #%d\n",
27529 REGNO (e0), (int)INTVAL(XEXP (e1, 1)));
27531 else
27532 abort ();
27533 break;
27535 default:
27536 abort ();
27541 /* Emit unwind directives for the given insn. */
27543 static void
27544 arm_unwind_emit (FILE * asm_out_file, rtx_insn *insn)
27546 rtx note, pat;
27547 bool handled_one = false;
27549 if (arm_except_unwind_info (&global_options) != UI_TARGET)
27550 return;
27552 if (!(flag_unwind_tables || crtl->uses_eh_lsda)
27553 && (TREE_NOTHROW (current_function_decl)
27554 || crtl->all_throwers_are_sibcalls))
27555 return;
27557 if (NOTE_P (insn) || !RTX_FRAME_RELATED_P (insn))
27558 return;
27560 for (note = REG_NOTES (insn); note ; note = XEXP (note, 1))
27562 switch (REG_NOTE_KIND (note))
27564 case REG_FRAME_RELATED_EXPR:
27565 pat = XEXP (note, 0);
27566 goto found;
27568 case REG_CFA_REGISTER:
27569 pat = XEXP (note, 0);
27570 if (pat == NULL)
27572 pat = PATTERN (insn);
27573 if (GET_CODE (pat) == PARALLEL)
27574 pat = XVECEXP (pat, 0, 0);
27577 /* Only emitted for IS_STACKALIGN re-alignment. */
27579 rtx dest, src;
27580 unsigned reg;
27582 src = SET_SRC (pat);
27583 dest = SET_DEST (pat);
27585 gcc_assert (src == stack_pointer_rtx);
27586 reg = REGNO (dest);
27587 asm_fprintf (asm_out_file, "\t.unwind_raw 0, 0x%x @ vsp = r%d\n",
27588 reg + 0x90, reg);
27590 handled_one = true;
27591 break;
27593 /* The INSN is generated in epilogue. It is set as RTX_FRAME_RELATED_P
27594 to get correct dwarf information for shrink-wrap. We should not
27595 emit unwind information for it because these are used either for
27596 pretend arguments or notes to adjust sp and restore registers from
27597 stack. */
27598 case REG_CFA_DEF_CFA:
27599 case REG_CFA_ADJUST_CFA:
27600 case REG_CFA_RESTORE:
27601 return;
27603 case REG_CFA_EXPRESSION:
27604 case REG_CFA_OFFSET:
27605 /* ??? Only handling here what we actually emit. */
27606 gcc_unreachable ();
27608 default:
27609 break;
27612 if (handled_one)
27613 return;
27614 pat = PATTERN (insn);
27615 found:
27617 switch (GET_CODE (pat))
27619 case SET:
27620 arm_unwind_emit_set (asm_out_file, pat);
27621 break;
27623 case SEQUENCE:
27624 /* Store multiple. */
27625 arm_unwind_emit_sequence (asm_out_file, pat);
27626 break;
27628 default:
27629 abort();
27634 /* Output a reference from a function exception table to the type_info
27635 object X. The EABI specifies that the symbol should be relocated by
27636 an R_ARM_TARGET2 relocation. */
27638 static bool
27639 arm_output_ttype (rtx x)
27641 fputs ("\t.word\t", asm_out_file);
27642 output_addr_const (asm_out_file, x);
27643 /* Use special relocations for symbol references. */
27644 if (!CONST_INT_P (x))
27645 fputs ("(TARGET2)", asm_out_file);
27646 fputc ('\n', asm_out_file);
27648 return TRUE;
27651 /* Implement TARGET_ASM_EMIT_EXCEPT_PERSONALITY. */
27653 static void
27654 arm_asm_emit_except_personality (rtx personality)
27656 fputs ("\t.personality\t", asm_out_file);
27657 output_addr_const (asm_out_file, personality);
27658 fputc ('\n', asm_out_file);
27660 #endif /* ARM_UNWIND_INFO */
27662 /* Implement TARGET_ASM_INITIALIZE_SECTIONS. */
27664 static void
27665 arm_asm_init_sections (void)
27667 #if ARM_UNWIND_INFO
27668 exception_section = get_unnamed_section (0, output_section_asm_op,
27669 "\t.handlerdata");
27670 #endif /* ARM_UNWIND_INFO */
27672 #ifdef OBJECT_FORMAT_ELF
27673 if (target_pure_code)
27674 text_section->unnamed.data = "\t.section .text,\"0x20000006\",%progbits";
27675 #endif
27678 /* Output unwind directives for the start/end of a function. */
27680 void
27681 arm_output_fn_unwind (FILE * f, bool prologue)
27683 if (arm_except_unwind_info (&global_options) != UI_TARGET)
27684 return;
27686 if (prologue)
27687 fputs ("\t.fnstart\n", f);
27688 else
27690 /* If this function will never be unwound, then mark it as such.
27691 The came condition is used in arm_unwind_emit to suppress
27692 the frame annotations. */
27693 if (!(flag_unwind_tables || crtl->uses_eh_lsda)
27694 && (TREE_NOTHROW (current_function_decl)
27695 || crtl->all_throwers_are_sibcalls))
27696 fputs("\t.cantunwind\n", f);
27698 fputs ("\t.fnend\n", f);
27702 static bool
27703 arm_emit_tls_decoration (FILE *fp, rtx x)
27705 enum tls_reloc reloc;
27706 rtx val;
27708 val = XVECEXP (x, 0, 0);
27709 reloc = (enum tls_reloc) INTVAL (XVECEXP (x, 0, 1));
27711 output_addr_const (fp, val);
27713 switch (reloc)
27715 case TLS_GD32:
27716 fputs ("(tlsgd)", fp);
27717 break;
27718 case TLS_LDM32:
27719 fputs ("(tlsldm)", fp);
27720 break;
27721 case TLS_LDO32:
27722 fputs ("(tlsldo)", fp);
27723 break;
27724 case TLS_IE32:
27725 fputs ("(gottpoff)", fp);
27726 break;
27727 case TLS_LE32:
27728 fputs ("(tpoff)", fp);
27729 break;
27730 case TLS_DESCSEQ:
27731 fputs ("(tlsdesc)", fp);
27732 break;
27733 default:
27734 gcc_unreachable ();
27737 switch (reloc)
27739 case TLS_GD32:
27740 case TLS_LDM32:
27741 case TLS_IE32:
27742 case TLS_DESCSEQ:
27743 fputs (" + (. - ", fp);
27744 output_addr_const (fp, XVECEXP (x, 0, 2));
27745 /* For DESCSEQ the 3rd operand encodes thumbness, and is added */
27746 fputs (reloc == TLS_DESCSEQ ? " + " : " - ", fp);
27747 output_addr_const (fp, XVECEXP (x, 0, 3));
27748 fputc (')', fp);
27749 break;
27750 default:
27751 break;
27754 return TRUE;
27757 /* ARM implementation of TARGET_ASM_OUTPUT_DWARF_DTPREL. */
27759 static void
27760 arm_output_dwarf_dtprel (FILE *file, int size, rtx x)
27762 gcc_assert (size == 4);
27763 fputs ("\t.word\t", file);
27764 output_addr_const (file, x);
27765 fputs ("(tlsldo)", file);
27768 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA. */
27770 static bool
27771 arm_output_addr_const_extra (FILE *fp, rtx x)
27773 if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLS)
27774 return arm_emit_tls_decoration (fp, x);
27775 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_PIC_LABEL)
27777 char label[256];
27778 int labelno = INTVAL (XVECEXP (x, 0, 0));
27780 ASM_GENERATE_INTERNAL_LABEL (label, "LPIC", labelno);
27781 assemble_name_raw (fp, label);
27783 return TRUE;
27785 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_GOTSYM_OFF)
27787 assemble_name (fp, "_GLOBAL_OFFSET_TABLE_");
27788 if (GOT_PCREL)
27789 fputs ("+.", fp);
27790 fputs ("-(", fp);
27791 output_addr_const (fp, XVECEXP (x, 0, 0));
27792 fputc (')', fp);
27793 return TRUE;
27795 else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_SYMBOL_OFFSET)
27797 output_addr_const (fp, XVECEXP (x, 0, 0));
27798 if (GOT_PCREL)
27799 fputs ("+.", fp);
27800 fputs ("-(", fp);
27801 output_addr_const (fp, XVECEXP (x, 0, 1));
27802 fputc (')', fp);
27803 return TRUE;
27805 else if (GET_CODE (x) == CONST_VECTOR)
27806 return arm_emit_vector_const (fp, x);
27808 return FALSE;
27811 /* Output assembly for a shift instruction.
27812 SET_FLAGS determines how the instruction modifies the condition codes.
27813 0 - Do not set condition codes.
27814 1 - Set condition codes.
27815 2 - Use smallest instruction. */
27816 const char *
27817 arm_output_shift(rtx * operands, int set_flags)
27819 char pattern[100];
27820 static const char flag_chars[3] = {'?', '.', '!'};
27821 const char *shift;
27822 HOST_WIDE_INT val;
27823 char c;
27825 c = flag_chars[set_flags];
27826 shift = shift_op(operands[3], &val);
27827 if (shift)
27829 if (val != -1)
27830 operands[2] = GEN_INT(val);
27831 sprintf (pattern, "%s%%%c\t%%0, %%1, %%2", shift, c);
27833 else
27834 sprintf (pattern, "mov%%%c\t%%0, %%1", c);
27836 output_asm_insn (pattern, operands);
27837 return "";
27840 /* Output assembly for a WMMX immediate shift instruction. */
27841 const char *
27842 arm_output_iwmmxt_shift_immediate (const char *insn_name, rtx *operands, bool wror_or_wsra)
27844 int shift = INTVAL (operands[2]);
27845 char templ[50];
27846 machine_mode opmode = GET_MODE (operands[0]);
27848 gcc_assert (shift >= 0);
27850 /* If the shift value in the register versions is > 63 (for D qualifier),
27851 31 (for W qualifier) or 15 (for H qualifier). */
27852 if (((opmode == V4HImode) && (shift > 15))
27853 || ((opmode == V2SImode) && (shift > 31))
27854 || ((opmode == DImode) && (shift > 63)))
27856 if (wror_or_wsra)
27858 sprintf (templ, "%s\t%%0, %%1, #%d", insn_name, 32);
27859 output_asm_insn (templ, operands);
27860 if (opmode == DImode)
27862 sprintf (templ, "%s\t%%0, %%0, #%d", insn_name, 32);
27863 output_asm_insn (templ, operands);
27866 else
27868 /* The destination register will contain all zeros. */
27869 sprintf (templ, "wzero\t%%0");
27870 output_asm_insn (templ, operands);
27872 return "";
27875 if ((opmode == DImode) && (shift > 32))
27877 sprintf (templ, "%s\t%%0, %%1, #%d", insn_name, 32);
27878 output_asm_insn (templ, operands);
27879 sprintf (templ, "%s\t%%0, %%0, #%d", insn_name, shift - 32);
27880 output_asm_insn (templ, operands);
27882 else
27884 sprintf (templ, "%s\t%%0, %%1, #%d", insn_name, shift);
27885 output_asm_insn (templ, operands);
27887 return "";
27890 /* Output assembly for a WMMX tinsr instruction. */
27891 const char *
27892 arm_output_iwmmxt_tinsr (rtx *operands)
27894 int mask = INTVAL (operands[3]);
27895 int i;
27896 char templ[50];
27897 int units = mode_nunits[GET_MODE (operands[0])];
27898 gcc_assert ((mask & (mask - 1)) == 0);
27899 for (i = 0; i < units; ++i)
27901 if ((mask & 0x01) == 1)
27903 break;
27905 mask >>= 1;
27907 gcc_assert (i < units);
27909 switch (GET_MODE (operands[0]))
27911 case E_V8QImode:
27912 sprintf (templ, "tinsrb%%?\t%%0, %%2, #%d", i);
27913 break;
27914 case E_V4HImode:
27915 sprintf (templ, "tinsrh%%?\t%%0, %%2, #%d", i);
27916 break;
27917 case E_V2SImode:
27918 sprintf (templ, "tinsrw%%?\t%%0, %%2, #%d", i);
27919 break;
27920 default:
27921 gcc_unreachable ();
27922 break;
27924 output_asm_insn (templ, operands);
27926 return "";
27929 /* Output a Thumb-1 casesi dispatch sequence. */
27930 const char *
27931 thumb1_output_casesi (rtx *operands)
27933 rtx diff_vec = PATTERN (NEXT_INSN (as_a <rtx_insn *> (operands[0])));
27935 gcc_assert (GET_CODE (diff_vec) == ADDR_DIFF_VEC);
27937 switch (GET_MODE(diff_vec))
27939 case E_QImode:
27940 return (ADDR_DIFF_VEC_FLAGS (diff_vec).offset_unsigned ?
27941 "bl\t%___gnu_thumb1_case_uqi" : "bl\t%___gnu_thumb1_case_sqi");
27942 case E_HImode:
27943 return (ADDR_DIFF_VEC_FLAGS (diff_vec).offset_unsigned ?
27944 "bl\t%___gnu_thumb1_case_uhi" : "bl\t%___gnu_thumb1_case_shi");
27945 case E_SImode:
27946 return "bl\t%___gnu_thumb1_case_si";
27947 default:
27948 gcc_unreachable ();
27952 /* Output a Thumb-2 casesi instruction. */
27953 const char *
27954 thumb2_output_casesi (rtx *operands)
27956 rtx diff_vec = PATTERN (NEXT_INSN (as_a <rtx_insn *> (operands[2])));
27958 gcc_assert (GET_CODE (diff_vec) == ADDR_DIFF_VEC);
27960 output_asm_insn ("cmp\t%0, %1", operands);
27961 output_asm_insn ("bhi\t%l3", operands);
27962 switch (GET_MODE(diff_vec))
27964 case E_QImode:
27965 return "tbb\t[%|pc, %0]";
27966 case E_HImode:
27967 return "tbh\t[%|pc, %0, lsl #1]";
27968 case E_SImode:
27969 if (flag_pic)
27971 output_asm_insn ("adr\t%4, %l2", operands);
27972 output_asm_insn ("ldr\t%5, [%4, %0, lsl #2]", operands);
27973 output_asm_insn ("add\t%4, %4, %5", operands);
27974 return "bx\t%4";
27976 else
27978 output_asm_insn ("adr\t%4, %l2", operands);
27979 return "ldr\t%|pc, [%4, %0, lsl #2]";
27981 default:
27982 gcc_unreachable ();
27986 /* Implement TARGET_SCHED_ISSUE_RATE. Lookup the issue rate in the
27987 per-core tuning structs. */
27988 static int
27989 arm_issue_rate (void)
27991 return current_tune->issue_rate;
27994 /* Return how many instructions should scheduler lookahead to choose the
27995 best one. */
27996 static int
27997 arm_first_cycle_multipass_dfa_lookahead (void)
27999 int issue_rate = arm_issue_rate ();
28001 return issue_rate > 1 && !sched_fusion ? issue_rate : 0;
28004 /* Enable modeling of L2 auto-prefetcher. */
28005 static int
28006 arm_first_cycle_multipass_dfa_lookahead_guard (rtx_insn *insn, int ready_index)
28008 return autopref_multipass_dfa_lookahead_guard (insn, ready_index);
28011 const char *
28012 arm_mangle_type (const_tree type)
28014 /* The ARM ABI documents (10th October 2008) say that "__va_list"
28015 has to be managled as if it is in the "std" namespace. */
28016 if (TARGET_AAPCS_BASED
28017 && lang_hooks.types_compatible_p (CONST_CAST_TREE (type), va_list_type))
28018 return "St9__va_list";
28020 /* Half-precision float. */
28021 if (TREE_CODE (type) == REAL_TYPE && TYPE_PRECISION (type) == 16)
28022 return "Dh";
28024 /* Try mangling as a Neon type, TYPE_NAME is non-NULL if this is a
28025 builtin type. */
28026 if (TYPE_NAME (type) != NULL)
28027 return arm_mangle_builtin_type (type);
28029 /* Use the default mangling. */
28030 return NULL;
28033 /* Order of allocation of core registers for Thumb: this allocation is
28034 written over the corresponding initial entries of the array
28035 initialized with REG_ALLOC_ORDER. We allocate all low registers
28036 first. Saving and restoring a low register is usually cheaper than
28037 using a call-clobbered high register. */
28039 static const int thumb_core_reg_alloc_order[] =
28041 3, 2, 1, 0, 4, 5, 6, 7,
28042 12, 14, 8, 9, 10, 11
28045 /* Adjust register allocation order when compiling for Thumb. */
28047 void
28048 arm_order_regs_for_local_alloc (void)
28050 const int arm_reg_alloc_order[] = REG_ALLOC_ORDER;
28051 memcpy(reg_alloc_order, arm_reg_alloc_order, sizeof (reg_alloc_order));
28052 if (TARGET_THUMB)
28053 memcpy (reg_alloc_order, thumb_core_reg_alloc_order,
28054 sizeof (thumb_core_reg_alloc_order));
28057 /* Implement TARGET_FRAME_POINTER_REQUIRED. */
28059 bool
28060 arm_frame_pointer_required (void)
28062 if (SUBTARGET_FRAME_POINTER_REQUIRED)
28063 return true;
28065 /* If the function receives nonlocal gotos, it needs to save the frame
28066 pointer in the nonlocal_goto_save_area object. */
28067 if (cfun->has_nonlocal_label)
28068 return true;
28070 /* The frame pointer is required for non-leaf APCS frames. */
28071 if (TARGET_ARM && TARGET_APCS_FRAME && !crtl->is_leaf)
28072 return true;
28074 /* If we are probing the stack in the prologue, we will have a faulting
28075 instruction prior to the stack adjustment and this requires a frame
28076 pointer if we want to catch the exception using the EABI unwinder. */
28077 if (!IS_INTERRUPT (arm_current_func_type ())
28078 && (flag_stack_check == STATIC_BUILTIN_STACK_CHECK
28079 || flag_stack_clash_protection)
28080 && arm_except_unwind_info (&global_options) == UI_TARGET
28081 && cfun->can_throw_non_call_exceptions)
28083 HOST_WIDE_INT size = get_frame_size ();
28085 /* That's irrelevant if there is no stack adjustment. */
28086 if (size <= 0)
28087 return false;
28089 /* That's relevant only if there is a stack probe. */
28090 if (crtl->is_leaf && !cfun->calls_alloca)
28092 /* We don't have the final size of the frame so adjust. */
28093 size += 32 * UNITS_PER_WORD;
28094 if (size > PROBE_INTERVAL && size > get_stack_check_protect ())
28095 return true;
28097 else
28098 return true;
28101 return false;
28104 /* Only thumb1 can't support conditional execution, so return true if
28105 the target is not thumb1. */
28106 static bool
28107 arm_have_conditional_execution (void)
28109 return !TARGET_THUMB1;
28112 /* The AAPCS sets the maximum alignment of a vector to 64 bits. */
28113 static HOST_WIDE_INT
28114 arm_vector_alignment (const_tree type)
28116 HOST_WIDE_INT align = tree_to_shwi (TYPE_SIZE (type));
28118 if (TARGET_AAPCS_BASED)
28119 align = MIN (align, 64);
28121 return align;
28124 static void
28125 arm_autovectorize_vector_sizes (vector_sizes *sizes)
28127 if (!TARGET_NEON_VECTORIZE_DOUBLE)
28129 sizes->safe_push (16);
28130 sizes->safe_push (8);
28134 static bool
28135 arm_vector_alignment_reachable (const_tree type, bool is_packed)
28137 /* Vectors which aren't in packed structures will not be less aligned than
28138 the natural alignment of their element type, so this is safe. */
28139 if (TARGET_NEON && !BYTES_BIG_ENDIAN && unaligned_access)
28140 return !is_packed;
28142 return default_builtin_vector_alignment_reachable (type, is_packed);
28145 static bool
28146 arm_builtin_support_vector_misalignment (machine_mode mode,
28147 const_tree type, int misalignment,
28148 bool is_packed)
28150 if (TARGET_NEON && !BYTES_BIG_ENDIAN && unaligned_access)
28152 HOST_WIDE_INT align = TYPE_ALIGN_UNIT (type);
28154 if (is_packed)
28155 return align == 1;
28157 /* If the misalignment is unknown, we should be able to handle the access
28158 so long as it is not to a member of a packed data structure. */
28159 if (misalignment == -1)
28160 return true;
28162 /* Return true if the misalignment is a multiple of the natural alignment
28163 of the vector's element type. This is probably always going to be
28164 true in practice, since we've already established that this isn't a
28165 packed access. */
28166 return ((misalignment % align) == 0);
28169 return default_builtin_support_vector_misalignment (mode, type, misalignment,
28170 is_packed);
28173 static void
28174 arm_conditional_register_usage (void)
28176 int regno;
28178 if (TARGET_THUMB1 && optimize_size)
28180 /* When optimizing for size on Thumb-1, it's better not
28181 to use the HI regs, because of the overhead of
28182 stacking them. */
28183 for (regno = FIRST_HI_REGNUM; regno <= LAST_HI_REGNUM; ++regno)
28184 fixed_regs[regno] = call_used_regs[regno] = 1;
28187 /* The link register can be clobbered by any branch insn,
28188 but we have no way to track that at present, so mark
28189 it as unavailable. */
28190 if (TARGET_THUMB1)
28191 fixed_regs[LR_REGNUM] = call_used_regs[LR_REGNUM] = 1;
28193 if (TARGET_32BIT && TARGET_HARD_FLOAT)
28195 /* VFPv3 registers are disabled when earlier VFP
28196 versions are selected due to the definition of
28197 LAST_VFP_REGNUM. */
28198 for (regno = FIRST_VFP_REGNUM;
28199 regno <= LAST_VFP_REGNUM; ++ regno)
28201 fixed_regs[regno] = 0;
28202 call_used_regs[regno] = regno < FIRST_VFP_REGNUM + 16
28203 || regno >= FIRST_VFP_REGNUM + 32;
28207 if (TARGET_REALLY_IWMMXT)
28209 regno = FIRST_IWMMXT_GR_REGNUM;
28210 /* The 2002/10/09 revision of the XScale ABI has wCG0
28211 and wCG1 as call-preserved registers. The 2002/11/21
28212 revision changed this so that all wCG registers are
28213 scratch registers. */
28214 for (regno = FIRST_IWMMXT_GR_REGNUM;
28215 regno <= LAST_IWMMXT_GR_REGNUM; ++ regno)
28216 fixed_regs[regno] = 0;
28217 /* The XScale ABI has wR0 - wR9 as scratch registers,
28218 the rest as call-preserved registers. */
28219 for (regno = FIRST_IWMMXT_REGNUM;
28220 regno <= LAST_IWMMXT_REGNUM; ++ regno)
28222 fixed_regs[regno] = 0;
28223 call_used_regs[regno] = regno < FIRST_IWMMXT_REGNUM + 10;
28227 if ((unsigned) PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM)
28229 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
28230 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
28232 else if (TARGET_APCS_STACK)
28234 fixed_regs[10] = 1;
28235 call_used_regs[10] = 1;
28237 /* -mcaller-super-interworking reserves r11 for calls to
28238 _interwork_r11_call_via_rN(). Making the register global
28239 is an easy way of ensuring that it remains valid for all
28240 calls. */
28241 if (TARGET_APCS_FRAME || TARGET_CALLER_INTERWORKING
28242 || TARGET_TPCS_FRAME || TARGET_TPCS_LEAF_FRAME)
28244 fixed_regs[ARM_HARD_FRAME_POINTER_REGNUM] = 1;
28245 call_used_regs[ARM_HARD_FRAME_POINTER_REGNUM] = 1;
28246 if (TARGET_CALLER_INTERWORKING)
28247 global_regs[ARM_HARD_FRAME_POINTER_REGNUM] = 1;
28249 SUBTARGET_CONDITIONAL_REGISTER_USAGE
28252 static reg_class_t
28253 arm_preferred_rename_class (reg_class_t rclass)
28255 /* Thumb-2 instructions using LO_REGS may be smaller than instructions
28256 using GENERIC_REGS. During register rename pass, we prefer LO_REGS,
28257 and code size can be reduced. */
28258 if (TARGET_THUMB2 && rclass == GENERAL_REGS)
28259 return LO_REGS;
28260 else
28261 return NO_REGS;
28264 /* Compute the attribute "length" of insn "*push_multi".
28265 So this function MUST be kept in sync with that insn pattern. */
28267 arm_attr_length_push_multi(rtx parallel_op, rtx first_op)
28269 int i, regno, hi_reg;
28270 int num_saves = XVECLEN (parallel_op, 0);
28272 /* ARM mode. */
28273 if (TARGET_ARM)
28274 return 4;
28275 /* Thumb1 mode. */
28276 if (TARGET_THUMB1)
28277 return 2;
28279 /* Thumb2 mode. */
28280 regno = REGNO (first_op);
28281 /* For PUSH/STM under Thumb2 mode, we can use 16-bit encodings if the register
28282 list is 8-bit. Normally this means all registers in the list must be
28283 LO_REGS, that is (R0 -R7). If any HI_REGS used, then we must use 32-bit
28284 encodings. There is one exception for PUSH that LR in HI_REGS can be used
28285 with 16-bit encoding. */
28286 hi_reg = (REGNO_REG_CLASS (regno) == HI_REGS) && (regno != LR_REGNUM);
28287 for (i = 1; i < num_saves && !hi_reg; i++)
28289 regno = REGNO (XEXP (XVECEXP (parallel_op, 0, i), 0));
28290 hi_reg |= (REGNO_REG_CLASS (regno) == HI_REGS) && (regno != LR_REGNUM);
28293 if (!hi_reg)
28294 return 2;
28295 return 4;
28298 /* Compute the attribute "length" of insn. Currently, this function is used
28299 for "*load_multiple_with_writeback", "*pop_multiple_with_return" and
28300 "*pop_multiple_with_writeback_and_return". OPERANDS is the toplevel PARALLEL
28301 rtx, RETURN_PC is true if OPERANDS contains return insn. WRITE_BACK_P is
28302 true if OPERANDS contains insn which explicit updates base register. */
28305 arm_attr_length_pop_multi (rtx *operands, bool return_pc, bool write_back_p)
28307 /* ARM mode. */
28308 if (TARGET_ARM)
28309 return 4;
28310 /* Thumb1 mode. */
28311 if (TARGET_THUMB1)
28312 return 2;
28314 rtx parallel_op = operands[0];
28315 /* Initialize to elements number of PARALLEL. */
28316 unsigned indx = XVECLEN (parallel_op, 0) - 1;
28317 /* Initialize the value to base register. */
28318 unsigned regno = REGNO (operands[1]);
28319 /* Skip return and write back pattern.
28320 We only need register pop pattern for later analysis. */
28321 unsigned first_indx = 0;
28322 first_indx += return_pc ? 1 : 0;
28323 first_indx += write_back_p ? 1 : 0;
28325 /* A pop operation can be done through LDM or POP. If the base register is SP
28326 and if it's with write back, then a LDM will be alias of POP. */
28327 bool pop_p = (regno == SP_REGNUM && write_back_p);
28328 bool ldm_p = !pop_p;
28330 /* Check base register for LDM. */
28331 if (ldm_p && REGNO_REG_CLASS (regno) == HI_REGS)
28332 return 4;
28334 /* Check each register in the list. */
28335 for (; indx >= first_indx; indx--)
28337 regno = REGNO (XEXP (XVECEXP (parallel_op, 0, indx), 0));
28338 /* For POP, PC in HI_REGS can be used with 16-bit encoding. See similar
28339 comment in arm_attr_length_push_multi. */
28340 if (REGNO_REG_CLASS (regno) == HI_REGS
28341 && (regno != PC_REGNUM || ldm_p))
28342 return 4;
28345 return 2;
28348 /* Compute the number of instructions emitted by output_move_double. */
28350 arm_count_output_move_double_insns (rtx *operands)
28352 int count;
28353 rtx ops[2];
28354 /* output_move_double may modify the operands array, so call it
28355 here on a copy of the array. */
28356 ops[0] = operands[0];
28357 ops[1] = operands[1];
28358 output_move_double (ops, false, &count);
28359 return count;
28363 vfp3_const_double_for_fract_bits (rtx operand)
28365 REAL_VALUE_TYPE r0;
28367 if (!CONST_DOUBLE_P (operand))
28368 return 0;
28370 r0 = *CONST_DOUBLE_REAL_VALUE (operand);
28371 if (exact_real_inverse (DFmode, &r0)
28372 && !REAL_VALUE_NEGATIVE (r0))
28374 if (exact_real_truncate (DFmode, &r0))
28376 HOST_WIDE_INT value = real_to_integer (&r0);
28377 value = value & 0xffffffff;
28378 if ((value != 0) && ( (value & (value - 1)) == 0))
28380 int ret = exact_log2 (value);
28381 gcc_assert (IN_RANGE (ret, 0, 31));
28382 return ret;
28386 return 0;
28389 /* If X is a CONST_DOUBLE with a value that is a power of 2 whose
28390 log2 is in [1, 32], return that log2. Otherwise return -1.
28391 This is used in the patterns for vcvt.s32.f32 floating-point to
28392 fixed-point conversions. */
28395 vfp3_const_double_for_bits (rtx x)
28397 const REAL_VALUE_TYPE *r;
28399 if (!CONST_DOUBLE_P (x))
28400 return -1;
28402 r = CONST_DOUBLE_REAL_VALUE (x);
28404 if (REAL_VALUE_NEGATIVE (*r)
28405 || REAL_VALUE_ISNAN (*r)
28406 || REAL_VALUE_ISINF (*r)
28407 || !real_isinteger (r, SFmode))
28408 return -1;
28410 HOST_WIDE_INT hwint = exact_log2 (real_to_integer (r));
28412 /* The exact_log2 above will have returned -1 if this is
28413 not an exact log2. */
28414 if (!IN_RANGE (hwint, 1, 32))
28415 return -1;
28417 return hwint;
28421 /* Emit a memory barrier around an atomic sequence according to MODEL. */
28423 static void
28424 arm_pre_atomic_barrier (enum memmodel model)
28426 if (need_atomic_barrier_p (model, true))
28427 emit_insn (gen_memory_barrier ());
28430 static void
28431 arm_post_atomic_barrier (enum memmodel model)
28433 if (need_atomic_barrier_p (model, false))
28434 emit_insn (gen_memory_barrier ());
28437 /* Emit the load-exclusive and store-exclusive instructions.
28438 Use acquire and release versions if necessary. */
28440 static void
28441 arm_emit_load_exclusive (machine_mode mode, rtx rval, rtx mem, bool acq)
28443 rtx (*gen) (rtx, rtx);
28445 if (acq)
28447 switch (mode)
28449 case E_QImode: gen = gen_arm_load_acquire_exclusiveqi; break;
28450 case E_HImode: gen = gen_arm_load_acquire_exclusivehi; break;
28451 case E_SImode: gen = gen_arm_load_acquire_exclusivesi; break;
28452 case E_DImode: gen = gen_arm_load_acquire_exclusivedi; break;
28453 default:
28454 gcc_unreachable ();
28457 else
28459 switch (mode)
28461 case E_QImode: gen = gen_arm_load_exclusiveqi; break;
28462 case E_HImode: gen = gen_arm_load_exclusivehi; break;
28463 case E_SImode: gen = gen_arm_load_exclusivesi; break;
28464 case E_DImode: gen = gen_arm_load_exclusivedi; break;
28465 default:
28466 gcc_unreachable ();
28470 emit_insn (gen (rval, mem));
28473 static void
28474 arm_emit_store_exclusive (machine_mode mode, rtx bval, rtx rval,
28475 rtx mem, bool rel)
28477 rtx (*gen) (rtx, rtx, rtx);
28479 if (rel)
28481 switch (mode)
28483 case E_QImode: gen = gen_arm_store_release_exclusiveqi; break;
28484 case E_HImode: gen = gen_arm_store_release_exclusivehi; break;
28485 case E_SImode: gen = gen_arm_store_release_exclusivesi; break;
28486 case E_DImode: gen = gen_arm_store_release_exclusivedi; break;
28487 default:
28488 gcc_unreachable ();
28491 else
28493 switch (mode)
28495 case E_QImode: gen = gen_arm_store_exclusiveqi; break;
28496 case E_HImode: gen = gen_arm_store_exclusivehi; break;
28497 case E_SImode: gen = gen_arm_store_exclusivesi; break;
28498 case E_DImode: gen = gen_arm_store_exclusivedi; break;
28499 default:
28500 gcc_unreachable ();
28504 emit_insn (gen (bval, rval, mem));
28507 /* Mark the previous jump instruction as unlikely. */
28509 static void
28510 emit_unlikely_jump (rtx insn)
28512 rtx_insn *jump = emit_jump_insn (insn);
28513 add_reg_br_prob_note (jump, profile_probability::very_unlikely ());
28516 /* Expand a compare and swap pattern. */
28518 void
28519 arm_expand_compare_and_swap (rtx operands[])
28521 rtx bval, bdst, rval, mem, oldval, newval, is_weak, mod_s, mod_f, x;
28522 machine_mode mode;
28523 rtx (*gen) (rtx, rtx, rtx, rtx, rtx, rtx, rtx, rtx);
28525 bval = operands[0];
28526 rval = operands[1];
28527 mem = operands[2];
28528 oldval = operands[3];
28529 newval = operands[4];
28530 is_weak = operands[5];
28531 mod_s = operands[6];
28532 mod_f = operands[7];
28533 mode = GET_MODE (mem);
28535 /* Normally the succ memory model must be stronger than fail, but in the
28536 unlikely event of fail being ACQUIRE and succ being RELEASE we need to
28537 promote succ to ACQ_REL so that we don't lose the acquire semantics. */
28539 if (TARGET_HAVE_LDACQ
28540 && is_mm_acquire (memmodel_from_int (INTVAL (mod_f)))
28541 && is_mm_release (memmodel_from_int (INTVAL (mod_s))))
28542 mod_s = GEN_INT (MEMMODEL_ACQ_REL);
28544 switch (mode)
28546 case E_QImode:
28547 case E_HImode:
28548 /* For narrow modes, we're going to perform the comparison in SImode,
28549 so do the zero-extension now. */
28550 rval = gen_reg_rtx (SImode);
28551 oldval = convert_modes (SImode, mode, oldval, true);
28552 /* FALLTHRU */
28554 case E_SImode:
28555 /* Force the value into a register if needed. We waited until after
28556 the zero-extension above to do this properly. */
28557 if (!arm_add_operand (oldval, SImode))
28558 oldval = force_reg (SImode, oldval);
28559 break;
28561 case E_DImode:
28562 if (!cmpdi_operand (oldval, mode))
28563 oldval = force_reg (mode, oldval);
28564 break;
28566 default:
28567 gcc_unreachable ();
28570 if (TARGET_THUMB1)
28572 switch (mode)
28574 case E_QImode: gen = gen_atomic_compare_and_swapt1qi_1; break;
28575 case E_HImode: gen = gen_atomic_compare_and_swapt1hi_1; break;
28576 case E_SImode: gen = gen_atomic_compare_and_swapt1si_1; break;
28577 case E_DImode: gen = gen_atomic_compare_and_swapt1di_1; break;
28578 default:
28579 gcc_unreachable ();
28582 else
28584 switch (mode)
28586 case E_QImode: gen = gen_atomic_compare_and_swap32qi_1; break;
28587 case E_HImode: gen = gen_atomic_compare_and_swap32hi_1; break;
28588 case E_SImode: gen = gen_atomic_compare_and_swap32si_1; break;
28589 case E_DImode: gen = gen_atomic_compare_and_swap32di_1; break;
28590 default:
28591 gcc_unreachable ();
28595 bdst = TARGET_THUMB1 ? bval : gen_rtx_REG (CC_Zmode, CC_REGNUM);
28596 emit_insn (gen (bdst, rval, mem, oldval, newval, is_weak, mod_s, mod_f));
28598 if (mode == QImode || mode == HImode)
28599 emit_move_insn (operands[1], gen_lowpart (mode, rval));
28601 /* In all cases, we arrange for success to be signaled by Z set.
28602 This arrangement allows for the boolean result to be used directly
28603 in a subsequent branch, post optimization. For Thumb-1 targets, the
28604 boolean negation of the result is also stored in bval because Thumb-1
28605 backend lacks dependency tracking for CC flag due to flag-setting not
28606 being represented at RTL level. */
28607 if (TARGET_THUMB1)
28608 emit_insn (gen_cstoresi_eq0_thumb1 (bval, bdst));
28609 else
28611 x = gen_rtx_EQ (SImode, bdst, const0_rtx);
28612 emit_insn (gen_rtx_SET (bval, x));
28616 /* Split a compare and swap pattern. It is IMPLEMENTATION DEFINED whether
28617 another memory store between the load-exclusive and store-exclusive can
28618 reset the monitor from Exclusive to Open state. This means we must wait
28619 until after reload to split the pattern, lest we get a register spill in
28620 the middle of the atomic sequence. Success of the compare and swap is
28621 indicated by the Z flag set for 32bit targets and by neg_bval being zero
28622 for Thumb-1 targets (ie. negation of the boolean value returned by
28623 atomic_compare_and_swapmode standard pattern in operand 0). */
28625 void
28626 arm_split_compare_and_swap (rtx operands[])
28628 rtx rval, mem, oldval, newval, neg_bval;
28629 machine_mode mode;
28630 enum memmodel mod_s, mod_f;
28631 bool is_weak;
28632 rtx_code_label *label1, *label2;
28633 rtx x, cond;
28635 rval = operands[1];
28636 mem = operands[2];
28637 oldval = operands[3];
28638 newval = operands[4];
28639 is_weak = (operands[5] != const0_rtx);
28640 mod_s = memmodel_from_int (INTVAL (operands[6]));
28641 mod_f = memmodel_from_int (INTVAL (operands[7]));
28642 neg_bval = TARGET_THUMB1 ? operands[0] : operands[8];
28643 mode = GET_MODE (mem);
28645 bool is_armv8_sync = arm_arch8 && is_mm_sync (mod_s);
28647 bool use_acquire = TARGET_HAVE_LDACQ
28648 && !(is_mm_relaxed (mod_s) || is_mm_consume (mod_s)
28649 || is_mm_release (mod_s));
28651 bool use_release = TARGET_HAVE_LDACQ
28652 && !(is_mm_relaxed (mod_s) || is_mm_consume (mod_s)
28653 || is_mm_acquire (mod_s));
28655 /* For ARMv8, the load-acquire is too weak for __sync memory orders. Instead,
28656 a full barrier is emitted after the store-release. */
28657 if (is_armv8_sync)
28658 use_acquire = false;
28660 /* Checks whether a barrier is needed and emits one accordingly. */
28661 if (!(use_acquire || use_release))
28662 arm_pre_atomic_barrier (mod_s);
28664 label1 = NULL;
28665 if (!is_weak)
28667 label1 = gen_label_rtx ();
28668 emit_label (label1);
28670 label2 = gen_label_rtx ();
28672 arm_emit_load_exclusive (mode, rval, mem, use_acquire);
28674 /* Z is set to 0 for 32bit targets (resp. rval set to 1) if oldval != rval,
28675 as required to communicate with arm_expand_compare_and_swap. */
28676 if (TARGET_32BIT)
28678 cond = arm_gen_compare_reg (NE, rval, oldval, neg_bval);
28679 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
28680 x = gen_rtx_IF_THEN_ELSE (VOIDmode, x,
28681 gen_rtx_LABEL_REF (Pmode, label2), pc_rtx);
28682 emit_unlikely_jump (gen_rtx_SET (pc_rtx, x));
28684 else
28686 emit_move_insn (neg_bval, const1_rtx);
28687 cond = gen_rtx_NE (VOIDmode, rval, oldval);
28688 if (thumb1_cmpneg_operand (oldval, SImode))
28689 emit_unlikely_jump (gen_cbranchsi4_scratch (neg_bval, rval, oldval,
28690 label2, cond));
28691 else
28692 emit_unlikely_jump (gen_cbranchsi4_insn (cond, rval, oldval, label2));
28695 arm_emit_store_exclusive (mode, neg_bval, mem, newval, use_release);
28697 /* Weak or strong, we want EQ to be true for success, so that we
28698 match the flags that we got from the compare above. */
28699 if (TARGET_32BIT)
28701 cond = gen_rtx_REG (CCmode, CC_REGNUM);
28702 x = gen_rtx_COMPARE (CCmode, neg_bval, const0_rtx);
28703 emit_insn (gen_rtx_SET (cond, x));
28706 if (!is_weak)
28708 /* Z is set to boolean value of !neg_bval, as required to communicate
28709 with arm_expand_compare_and_swap. */
28710 x = gen_rtx_NE (VOIDmode, neg_bval, const0_rtx);
28711 emit_unlikely_jump (gen_cbranchsi4 (x, neg_bval, const0_rtx, label1));
28714 if (!is_mm_relaxed (mod_f))
28715 emit_label (label2);
28717 /* Checks whether a barrier is needed and emits one accordingly. */
28718 if (is_armv8_sync
28719 || !(use_acquire || use_release))
28720 arm_post_atomic_barrier (mod_s);
28722 if (is_mm_relaxed (mod_f))
28723 emit_label (label2);
28726 /* Split an atomic operation pattern. Operation is given by CODE and is one
28727 of PLUS, MINUS, IOR, XOR, SET (for an exchange operation) or NOT (for a nand
28728 operation). Operation is performed on the content at MEM and on VALUE
28729 following the memory model MODEL_RTX. The content at MEM before and after
28730 the operation is returned in OLD_OUT and NEW_OUT respectively while the
28731 success of the operation is returned in COND. Using a scratch register or
28732 an operand register for these determines what result is returned for that
28733 pattern. */
28735 void
28736 arm_split_atomic_op (enum rtx_code code, rtx old_out, rtx new_out, rtx mem,
28737 rtx value, rtx model_rtx, rtx cond)
28739 enum memmodel model = memmodel_from_int (INTVAL (model_rtx));
28740 machine_mode mode = GET_MODE (mem);
28741 machine_mode wmode = (mode == DImode ? DImode : SImode);
28742 rtx_code_label *label;
28743 bool all_low_regs, bind_old_new;
28744 rtx x;
28746 bool is_armv8_sync = arm_arch8 && is_mm_sync (model);
28748 bool use_acquire = TARGET_HAVE_LDACQ
28749 && !(is_mm_relaxed (model) || is_mm_consume (model)
28750 || is_mm_release (model));
28752 bool use_release = TARGET_HAVE_LDACQ
28753 && !(is_mm_relaxed (model) || is_mm_consume (model)
28754 || is_mm_acquire (model));
28756 /* For ARMv8, a load-acquire is too weak for __sync memory orders. Instead,
28757 a full barrier is emitted after the store-release. */
28758 if (is_armv8_sync)
28759 use_acquire = false;
28761 /* Checks whether a barrier is needed and emits one accordingly. */
28762 if (!(use_acquire || use_release))
28763 arm_pre_atomic_barrier (model);
28765 label = gen_label_rtx ();
28766 emit_label (label);
28768 if (new_out)
28769 new_out = gen_lowpart (wmode, new_out);
28770 if (old_out)
28771 old_out = gen_lowpart (wmode, old_out);
28772 else
28773 old_out = new_out;
28774 value = simplify_gen_subreg (wmode, value, mode, 0);
28776 arm_emit_load_exclusive (mode, old_out, mem, use_acquire);
28778 /* Does the operation require destination and first operand to use the same
28779 register? This is decided by register constraints of relevant insn
28780 patterns in thumb1.md. */
28781 gcc_assert (!new_out || REG_P (new_out));
28782 all_low_regs = REG_P (value) && REGNO_REG_CLASS (REGNO (value)) == LO_REGS
28783 && new_out && REGNO_REG_CLASS (REGNO (new_out)) == LO_REGS
28784 && REGNO_REG_CLASS (REGNO (old_out)) == LO_REGS;
28785 bind_old_new =
28786 (TARGET_THUMB1
28787 && code != SET
28788 && code != MINUS
28789 && (code != PLUS || (!all_low_regs && !satisfies_constraint_L (value))));
28791 /* We want to return the old value while putting the result of the operation
28792 in the same register as the old value so copy the old value over to the
28793 destination register and use that register for the operation. */
28794 if (old_out && bind_old_new)
28796 emit_move_insn (new_out, old_out);
28797 old_out = new_out;
28800 switch (code)
28802 case SET:
28803 new_out = value;
28804 break;
28806 case NOT:
28807 x = gen_rtx_AND (wmode, old_out, value);
28808 emit_insn (gen_rtx_SET (new_out, x));
28809 x = gen_rtx_NOT (wmode, new_out);
28810 emit_insn (gen_rtx_SET (new_out, x));
28811 break;
28813 case MINUS:
28814 if (CONST_INT_P (value))
28816 value = GEN_INT (-INTVAL (value));
28817 code = PLUS;
28819 /* FALLTHRU */
28821 case PLUS:
28822 if (mode == DImode)
28824 /* DImode plus/minus need to clobber flags. */
28825 /* The adddi3 and subdi3 patterns are incorrectly written so that
28826 they require matching operands, even when we could easily support
28827 three operands. Thankfully, this can be fixed up post-splitting,
28828 as the individual add+adc patterns do accept three operands and
28829 post-reload cprop can make these moves go away. */
28830 emit_move_insn (new_out, old_out);
28831 if (code == PLUS)
28832 x = gen_adddi3 (new_out, new_out, value);
28833 else
28834 x = gen_subdi3 (new_out, new_out, value);
28835 emit_insn (x);
28836 break;
28838 /* FALLTHRU */
28840 default:
28841 x = gen_rtx_fmt_ee (code, wmode, old_out, value);
28842 emit_insn (gen_rtx_SET (new_out, x));
28843 break;
28846 arm_emit_store_exclusive (mode, cond, mem, gen_lowpart (mode, new_out),
28847 use_release);
28849 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
28850 emit_unlikely_jump (gen_cbranchsi4 (x, cond, const0_rtx, label));
28852 /* Checks whether a barrier is needed and emits one accordingly. */
28853 if (is_armv8_sync
28854 || !(use_acquire || use_release))
28855 arm_post_atomic_barrier (model);
28858 #define MAX_VECT_LEN 16
28860 struct expand_vec_perm_d
28862 rtx target, op0, op1;
28863 vec_perm_indices perm;
28864 machine_mode vmode;
28865 bool one_vector_p;
28866 bool testing_p;
28869 /* Generate a variable permutation. */
28871 static void
28872 arm_expand_vec_perm_1 (rtx target, rtx op0, rtx op1, rtx sel)
28874 machine_mode vmode = GET_MODE (target);
28875 bool one_vector_p = rtx_equal_p (op0, op1);
28877 gcc_checking_assert (vmode == V8QImode || vmode == V16QImode);
28878 gcc_checking_assert (GET_MODE (op0) == vmode);
28879 gcc_checking_assert (GET_MODE (op1) == vmode);
28880 gcc_checking_assert (GET_MODE (sel) == vmode);
28881 gcc_checking_assert (TARGET_NEON);
28883 if (one_vector_p)
28885 if (vmode == V8QImode)
28886 emit_insn (gen_neon_vtbl1v8qi (target, op0, sel));
28887 else
28888 emit_insn (gen_neon_vtbl1v16qi (target, op0, sel));
28890 else
28892 rtx pair;
28894 if (vmode == V8QImode)
28896 pair = gen_reg_rtx (V16QImode);
28897 emit_insn (gen_neon_vcombinev8qi (pair, op0, op1));
28898 pair = gen_lowpart (TImode, pair);
28899 emit_insn (gen_neon_vtbl2v8qi (target, pair, sel));
28901 else
28903 pair = gen_reg_rtx (OImode);
28904 emit_insn (gen_neon_vcombinev16qi (pair, op0, op1));
28905 emit_insn (gen_neon_vtbl2v16qi (target, pair, sel));
28910 void
28911 arm_expand_vec_perm (rtx target, rtx op0, rtx op1, rtx sel)
28913 machine_mode vmode = GET_MODE (target);
28914 unsigned int nelt = GET_MODE_NUNITS (vmode);
28915 bool one_vector_p = rtx_equal_p (op0, op1);
28916 rtx mask;
28918 /* TODO: ARM's VTBL indexing is little-endian. In order to handle GCC's
28919 numbering of elements for big-endian, we must reverse the order. */
28920 gcc_checking_assert (!BYTES_BIG_ENDIAN);
28922 /* The VTBL instruction does not use a modulo index, so we must take care
28923 of that ourselves. */
28924 mask = GEN_INT (one_vector_p ? nelt - 1 : 2 * nelt - 1);
28925 mask = gen_const_vec_duplicate (vmode, mask);
28926 sel = expand_simple_binop (vmode, AND, sel, mask, NULL, 0, OPTAB_LIB_WIDEN);
28928 arm_expand_vec_perm_1 (target, op0, op1, sel);
28931 /* Map lane ordering between architectural lane order, and GCC lane order,
28932 taking into account ABI. See comment above output_move_neon for details. */
28934 static int
28935 neon_endian_lane_map (machine_mode mode, int lane)
28937 if (BYTES_BIG_ENDIAN)
28939 int nelems = GET_MODE_NUNITS (mode);
28940 /* Reverse lane order. */
28941 lane = (nelems - 1 - lane);
28942 /* Reverse D register order, to match ABI. */
28943 if (GET_MODE_SIZE (mode) == 16)
28944 lane = lane ^ (nelems / 2);
28946 return lane;
28949 /* Some permutations index into pairs of vectors, this is a helper function
28950 to map indexes into those pairs of vectors. */
28952 static int
28953 neon_pair_endian_lane_map (machine_mode mode, int lane)
28955 int nelem = GET_MODE_NUNITS (mode);
28956 if (BYTES_BIG_ENDIAN)
28957 lane =
28958 neon_endian_lane_map (mode, lane & (nelem - 1)) + (lane & nelem);
28959 return lane;
28962 /* Generate or test for an insn that supports a constant permutation. */
28964 /* Recognize patterns for the VUZP insns. */
28966 static bool
28967 arm_evpc_neon_vuzp (struct expand_vec_perm_d *d)
28969 unsigned int i, odd, mask, nelt = d->perm.length ();
28970 rtx out0, out1, in0, in1;
28971 rtx (*gen)(rtx, rtx, rtx, rtx);
28972 int first_elem;
28973 int swap_nelt;
28975 if (GET_MODE_UNIT_SIZE (d->vmode) >= 8)
28976 return false;
28978 /* arm_expand_vec_perm_const_1 () helpfully swaps the operands for the
28979 big endian pattern on 64 bit vectors, so we correct for that. */
28980 swap_nelt = BYTES_BIG_ENDIAN && !d->one_vector_p
28981 && GET_MODE_SIZE (d->vmode) == 8 ? nelt : 0;
28983 first_elem = d->perm[neon_endian_lane_map (d->vmode, 0)] ^ swap_nelt;
28985 if (first_elem == neon_endian_lane_map (d->vmode, 0))
28986 odd = 0;
28987 else if (first_elem == neon_endian_lane_map (d->vmode, 1))
28988 odd = 1;
28989 else
28990 return false;
28991 mask = (d->one_vector_p ? nelt - 1 : 2 * nelt - 1);
28993 for (i = 0; i < nelt; i++)
28995 unsigned elt =
28996 (neon_pair_endian_lane_map (d->vmode, i) * 2 + odd) & mask;
28997 if ((d->perm[i] ^ swap_nelt) != neon_pair_endian_lane_map (d->vmode, elt))
28998 return false;
29001 /* Success! */
29002 if (d->testing_p)
29003 return true;
29005 switch (d->vmode)
29007 case E_V16QImode: gen = gen_neon_vuzpv16qi_internal; break;
29008 case E_V8QImode: gen = gen_neon_vuzpv8qi_internal; break;
29009 case E_V8HImode: gen = gen_neon_vuzpv8hi_internal; break;
29010 case E_V4HImode: gen = gen_neon_vuzpv4hi_internal; break;
29011 case E_V8HFmode: gen = gen_neon_vuzpv8hf_internal; break;
29012 case E_V4HFmode: gen = gen_neon_vuzpv4hf_internal; break;
29013 case E_V4SImode: gen = gen_neon_vuzpv4si_internal; break;
29014 case E_V2SImode: gen = gen_neon_vuzpv2si_internal; break;
29015 case E_V2SFmode: gen = gen_neon_vuzpv2sf_internal; break;
29016 case E_V4SFmode: gen = gen_neon_vuzpv4sf_internal; break;
29017 default:
29018 gcc_unreachable ();
29021 in0 = d->op0;
29022 in1 = d->op1;
29023 if (swap_nelt != 0)
29024 std::swap (in0, in1);
29026 out0 = d->target;
29027 out1 = gen_reg_rtx (d->vmode);
29028 if (odd)
29029 std::swap (out0, out1);
29031 emit_insn (gen (out0, in0, in1, out1));
29032 return true;
29035 /* Recognize patterns for the VZIP insns. */
29037 static bool
29038 arm_evpc_neon_vzip (struct expand_vec_perm_d *d)
29040 unsigned int i, high, mask, nelt = d->perm.length ();
29041 rtx out0, out1, in0, in1;
29042 rtx (*gen)(rtx, rtx, rtx, rtx);
29043 int first_elem;
29044 bool is_swapped;
29046 if (GET_MODE_UNIT_SIZE (d->vmode) >= 8)
29047 return false;
29049 is_swapped = BYTES_BIG_ENDIAN;
29051 first_elem = d->perm[neon_endian_lane_map (d->vmode, 0) ^ is_swapped];
29053 high = nelt / 2;
29054 if (first_elem == neon_endian_lane_map (d->vmode, high))
29056 else if (first_elem == neon_endian_lane_map (d->vmode, 0))
29057 high = 0;
29058 else
29059 return false;
29060 mask = (d->one_vector_p ? nelt - 1 : 2 * nelt - 1);
29062 for (i = 0; i < nelt / 2; i++)
29064 unsigned elt =
29065 neon_pair_endian_lane_map (d->vmode, i + high) & mask;
29066 if (d->perm[neon_pair_endian_lane_map (d->vmode, 2 * i + is_swapped)]
29067 != elt)
29068 return false;
29069 elt =
29070 neon_pair_endian_lane_map (d->vmode, i + nelt + high) & mask;
29071 if (d->perm[neon_pair_endian_lane_map (d->vmode, 2 * i + !is_swapped)]
29072 != elt)
29073 return false;
29076 /* Success! */
29077 if (d->testing_p)
29078 return true;
29080 switch (d->vmode)
29082 case E_V16QImode: gen = gen_neon_vzipv16qi_internal; break;
29083 case E_V8QImode: gen = gen_neon_vzipv8qi_internal; break;
29084 case E_V8HImode: gen = gen_neon_vzipv8hi_internal; break;
29085 case E_V4HImode: gen = gen_neon_vzipv4hi_internal; break;
29086 case E_V8HFmode: gen = gen_neon_vzipv8hf_internal; break;
29087 case E_V4HFmode: gen = gen_neon_vzipv4hf_internal; break;
29088 case E_V4SImode: gen = gen_neon_vzipv4si_internal; break;
29089 case E_V2SImode: gen = gen_neon_vzipv2si_internal; break;
29090 case E_V2SFmode: gen = gen_neon_vzipv2sf_internal; break;
29091 case E_V4SFmode: gen = gen_neon_vzipv4sf_internal; break;
29092 default:
29093 gcc_unreachable ();
29096 in0 = d->op0;
29097 in1 = d->op1;
29098 if (is_swapped)
29099 std::swap (in0, in1);
29101 out0 = d->target;
29102 out1 = gen_reg_rtx (d->vmode);
29103 if (high)
29104 std::swap (out0, out1);
29106 emit_insn (gen (out0, in0, in1, out1));
29107 return true;
29110 /* Recognize patterns for the VREV insns. */
29112 static bool
29113 arm_evpc_neon_vrev (struct expand_vec_perm_d *d)
29115 unsigned int i, j, diff, nelt = d->perm.length ();
29116 rtx (*gen)(rtx, rtx);
29118 if (!d->one_vector_p)
29119 return false;
29121 diff = d->perm[0];
29122 switch (diff)
29124 case 7:
29125 switch (d->vmode)
29127 case E_V16QImode: gen = gen_neon_vrev64v16qi; break;
29128 case E_V8QImode: gen = gen_neon_vrev64v8qi; break;
29129 default:
29130 return false;
29132 break;
29133 case 3:
29134 switch (d->vmode)
29136 case E_V16QImode: gen = gen_neon_vrev32v16qi; break;
29137 case E_V8QImode: gen = gen_neon_vrev32v8qi; break;
29138 case E_V8HImode: gen = gen_neon_vrev64v8hi; break;
29139 case E_V4HImode: gen = gen_neon_vrev64v4hi; break;
29140 case E_V8HFmode: gen = gen_neon_vrev64v8hf; break;
29141 case E_V4HFmode: gen = gen_neon_vrev64v4hf; break;
29142 default:
29143 return false;
29145 break;
29146 case 1:
29147 switch (d->vmode)
29149 case E_V16QImode: gen = gen_neon_vrev16v16qi; break;
29150 case E_V8QImode: gen = gen_neon_vrev16v8qi; break;
29151 case E_V8HImode: gen = gen_neon_vrev32v8hi; break;
29152 case E_V4HImode: gen = gen_neon_vrev32v4hi; break;
29153 case E_V4SImode: gen = gen_neon_vrev64v4si; break;
29154 case E_V2SImode: gen = gen_neon_vrev64v2si; break;
29155 case E_V4SFmode: gen = gen_neon_vrev64v4sf; break;
29156 case E_V2SFmode: gen = gen_neon_vrev64v2sf; break;
29157 default:
29158 return false;
29160 break;
29161 default:
29162 return false;
29165 for (i = 0; i < nelt ; i += diff + 1)
29166 for (j = 0; j <= diff; j += 1)
29168 /* This is guaranteed to be true as the value of diff
29169 is 7, 3, 1 and we should have enough elements in the
29170 queue to generate this. Getting a vector mask with a
29171 value of diff other than these values implies that
29172 something is wrong by the time we get here. */
29173 gcc_assert (i + j < nelt);
29174 if (d->perm[i + j] != i + diff - j)
29175 return false;
29178 /* Success! */
29179 if (d->testing_p)
29180 return true;
29182 emit_insn (gen (d->target, d->op0));
29183 return true;
29186 /* Recognize patterns for the VTRN insns. */
29188 static bool
29189 arm_evpc_neon_vtrn (struct expand_vec_perm_d *d)
29191 unsigned int i, odd, mask, nelt = d->perm.length ();
29192 rtx out0, out1, in0, in1;
29193 rtx (*gen)(rtx, rtx, rtx, rtx);
29195 if (GET_MODE_UNIT_SIZE (d->vmode) >= 8)
29196 return false;
29198 /* Note that these are little-endian tests. Adjust for big-endian later. */
29199 if (d->perm[0] == 0)
29200 odd = 0;
29201 else if (d->perm[0] == 1)
29202 odd = 1;
29203 else
29204 return false;
29205 mask = (d->one_vector_p ? nelt - 1 : 2 * nelt - 1);
29207 for (i = 0; i < nelt; i += 2)
29209 if (d->perm[i] != i + odd)
29210 return false;
29211 if (d->perm[i + 1] != ((i + nelt + odd) & mask))
29212 return false;
29215 /* Success! */
29216 if (d->testing_p)
29217 return true;
29219 switch (d->vmode)
29221 case E_V16QImode: gen = gen_neon_vtrnv16qi_internal; break;
29222 case E_V8QImode: gen = gen_neon_vtrnv8qi_internal; break;
29223 case E_V8HImode: gen = gen_neon_vtrnv8hi_internal; break;
29224 case E_V4HImode: gen = gen_neon_vtrnv4hi_internal; break;
29225 case E_V8HFmode: gen = gen_neon_vtrnv8hf_internal; break;
29226 case E_V4HFmode: gen = gen_neon_vtrnv4hf_internal; break;
29227 case E_V4SImode: gen = gen_neon_vtrnv4si_internal; break;
29228 case E_V2SImode: gen = gen_neon_vtrnv2si_internal; break;
29229 case E_V2SFmode: gen = gen_neon_vtrnv2sf_internal; break;
29230 case E_V4SFmode: gen = gen_neon_vtrnv4sf_internal; break;
29231 default:
29232 gcc_unreachable ();
29235 in0 = d->op0;
29236 in1 = d->op1;
29237 if (BYTES_BIG_ENDIAN)
29239 std::swap (in0, in1);
29240 odd = !odd;
29243 out0 = d->target;
29244 out1 = gen_reg_rtx (d->vmode);
29245 if (odd)
29246 std::swap (out0, out1);
29248 emit_insn (gen (out0, in0, in1, out1));
29249 return true;
29252 /* Recognize patterns for the VEXT insns. */
29254 static bool
29255 arm_evpc_neon_vext (struct expand_vec_perm_d *d)
29257 unsigned int i, nelt = d->perm.length ();
29258 rtx (*gen) (rtx, rtx, rtx, rtx);
29259 rtx offset;
29261 unsigned int location;
29263 unsigned int next = d->perm[0] + 1;
29265 /* TODO: Handle GCC's numbering of elements for big-endian. */
29266 if (BYTES_BIG_ENDIAN)
29267 return false;
29269 /* Check if the extracted indexes are increasing by one. */
29270 for (i = 1; i < nelt; next++, i++)
29272 /* If we hit the most significant element of the 2nd vector in
29273 the previous iteration, no need to test further. */
29274 if (next == 2 * nelt)
29275 return false;
29277 /* If we are operating on only one vector: it could be a
29278 rotation. If there are only two elements of size < 64, let
29279 arm_evpc_neon_vrev catch it. */
29280 if (d->one_vector_p && (next == nelt))
29282 if ((nelt == 2) && (d->vmode != V2DImode))
29283 return false;
29284 else
29285 next = 0;
29288 if (d->perm[i] != next)
29289 return false;
29292 location = d->perm[0];
29294 switch (d->vmode)
29296 case E_V16QImode: gen = gen_neon_vextv16qi; break;
29297 case E_V8QImode: gen = gen_neon_vextv8qi; break;
29298 case E_V4HImode: gen = gen_neon_vextv4hi; break;
29299 case E_V8HImode: gen = gen_neon_vextv8hi; break;
29300 case E_V2SImode: gen = gen_neon_vextv2si; break;
29301 case E_V4SImode: gen = gen_neon_vextv4si; break;
29302 case E_V4HFmode: gen = gen_neon_vextv4hf; break;
29303 case E_V8HFmode: gen = gen_neon_vextv8hf; break;
29304 case E_V2SFmode: gen = gen_neon_vextv2sf; break;
29305 case E_V4SFmode: gen = gen_neon_vextv4sf; break;
29306 case E_V2DImode: gen = gen_neon_vextv2di; break;
29307 default:
29308 return false;
29311 /* Success! */
29312 if (d->testing_p)
29313 return true;
29315 offset = GEN_INT (location);
29316 emit_insn (gen (d->target, d->op0, d->op1, offset));
29317 return true;
29320 /* The NEON VTBL instruction is a fully variable permuation that's even
29321 stronger than what we expose via VEC_PERM_EXPR. What it doesn't do
29322 is mask the index operand as VEC_PERM_EXPR requires. Therefore we
29323 can do slightly better by expanding this as a constant where we don't
29324 have to apply a mask. */
29326 static bool
29327 arm_evpc_neon_vtbl (struct expand_vec_perm_d *d)
29329 rtx rperm[MAX_VECT_LEN], sel;
29330 machine_mode vmode = d->vmode;
29331 unsigned int i, nelt = d->perm.length ();
29333 /* TODO: ARM's VTBL indexing is little-endian. In order to handle GCC's
29334 numbering of elements for big-endian, we must reverse the order. */
29335 if (BYTES_BIG_ENDIAN)
29336 return false;
29338 if (d->testing_p)
29339 return true;
29341 /* Generic code will try constant permutation twice. Once with the
29342 original mode and again with the elements lowered to QImode.
29343 So wait and don't do the selector expansion ourselves. */
29344 if (vmode != V8QImode && vmode != V16QImode)
29345 return false;
29347 for (i = 0; i < nelt; ++i)
29348 rperm[i] = GEN_INT (d->perm[i]);
29349 sel = gen_rtx_CONST_VECTOR (vmode, gen_rtvec_v (nelt, rperm));
29350 sel = force_reg (vmode, sel);
29352 arm_expand_vec_perm_1 (d->target, d->op0, d->op1, sel);
29353 return true;
29356 static bool
29357 arm_expand_vec_perm_const_1 (struct expand_vec_perm_d *d)
29359 /* Check if the input mask matches vext before reordering the
29360 operands. */
29361 if (TARGET_NEON)
29362 if (arm_evpc_neon_vext (d))
29363 return true;
29365 /* The pattern matching functions above are written to look for a small
29366 number to begin the sequence (0, 1, N/2). If we begin with an index
29367 from the second operand, we can swap the operands. */
29368 unsigned int nelt = d->perm.length ();
29369 if (d->perm[0] >= nelt)
29371 d->perm.rotate_inputs (1);
29372 std::swap (d->op0, d->op1);
29375 if (TARGET_NEON)
29377 if (arm_evpc_neon_vuzp (d))
29378 return true;
29379 if (arm_evpc_neon_vzip (d))
29380 return true;
29381 if (arm_evpc_neon_vrev (d))
29382 return true;
29383 if (arm_evpc_neon_vtrn (d))
29384 return true;
29385 return arm_evpc_neon_vtbl (d);
29387 return false;
29390 /* Implement TARGET_VECTORIZE_VEC_PERM_CONST. */
29392 static bool
29393 arm_vectorize_vec_perm_const (machine_mode vmode, rtx target, rtx op0, rtx op1,
29394 const vec_perm_indices &sel)
29396 struct expand_vec_perm_d d;
29397 int i, nelt, which;
29399 if (!VALID_NEON_DREG_MODE (vmode) && !VALID_NEON_QREG_MODE (vmode))
29400 return false;
29402 d.target = target;
29403 d.op0 = op0;
29404 d.op1 = op1;
29406 d.vmode = vmode;
29407 gcc_assert (VECTOR_MODE_P (d.vmode));
29408 d.testing_p = !target;
29410 nelt = GET_MODE_NUNITS (d.vmode);
29411 for (i = which = 0; i < nelt; ++i)
29413 int ei = sel[i] & (2 * nelt - 1);
29414 which |= (ei < nelt ? 1 : 2);
29417 switch (which)
29419 default:
29420 gcc_unreachable();
29422 case 3:
29423 d.one_vector_p = false;
29424 if (d.testing_p || !rtx_equal_p (op0, op1))
29425 break;
29427 /* The elements of PERM do not suggest that only the first operand
29428 is used, but both operands are identical. Allow easier matching
29429 of the permutation by folding the permutation into the single
29430 input vector. */
29431 /* FALLTHRU */
29432 case 2:
29433 d.op0 = op1;
29434 d.one_vector_p = true;
29435 break;
29437 case 1:
29438 d.op1 = op0;
29439 d.one_vector_p = true;
29440 break;
29443 d.perm.new_vector (sel.encoding (), d.one_vector_p ? 1 : 2, nelt);
29445 if (!d.testing_p)
29446 return arm_expand_vec_perm_const_1 (&d);
29448 d.target = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 1);
29449 d.op1 = d.op0 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 2);
29450 if (!d.one_vector_p)
29451 d.op1 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 3);
29453 start_sequence ();
29454 bool ret = arm_expand_vec_perm_const_1 (&d);
29455 end_sequence ();
29457 return ret;
29460 bool
29461 arm_autoinc_modes_ok_p (machine_mode mode, enum arm_auto_incmodes code)
29463 /* If we are soft float and we do not have ldrd
29464 then all auto increment forms are ok. */
29465 if (TARGET_SOFT_FLOAT && (TARGET_LDRD || GET_MODE_SIZE (mode) <= 4))
29466 return true;
29468 switch (code)
29470 /* Post increment and Pre Decrement are supported for all
29471 instruction forms except for vector forms. */
29472 case ARM_POST_INC:
29473 case ARM_PRE_DEC:
29474 if (VECTOR_MODE_P (mode))
29476 if (code != ARM_PRE_DEC)
29477 return true;
29478 else
29479 return false;
29482 return true;
29484 case ARM_POST_DEC:
29485 case ARM_PRE_INC:
29486 /* Without LDRD and mode size greater than
29487 word size, there is no point in auto-incrementing
29488 because ldm and stm will not have these forms. */
29489 if (!TARGET_LDRD && GET_MODE_SIZE (mode) > 4)
29490 return false;
29492 /* Vector and floating point modes do not support
29493 these auto increment forms. */
29494 if (FLOAT_MODE_P (mode) || VECTOR_MODE_P (mode))
29495 return false;
29497 return true;
29499 default:
29500 return false;
29504 return false;
29507 /* The default expansion of general 64-bit shifts in core-regs is suboptimal,
29508 on ARM, since we know that shifts by negative amounts are no-ops.
29509 Additionally, the default expansion code is not available or suitable
29510 for post-reload insn splits (this can occur when the register allocator
29511 chooses not to do a shift in NEON).
29513 This function is used in both initial expand and post-reload splits, and
29514 handles all kinds of 64-bit shifts.
29516 Input requirements:
29517 - It is safe for the input and output to be the same register, but
29518 early-clobber rules apply for the shift amount and scratch registers.
29519 - Shift by register requires both scratch registers. In all other cases
29520 the scratch registers may be NULL.
29521 - Ashiftrt by a register also clobbers the CC register. */
29522 void
29523 arm_emit_coreregs_64bit_shift (enum rtx_code code, rtx out, rtx in,
29524 rtx amount, rtx scratch1, rtx scratch2)
29526 rtx out_high = gen_highpart (SImode, out);
29527 rtx out_low = gen_lowpart (SImode, out);
29528 rtx in_high = gen_highpart (SImode, in);
29529 rtx in_low = gen_lowpart (SImode, in);
29531 /* Terminology:
29532 in = the register pair containing the input value.
29533 out = the destination register pair.
29534 up = the high- or low-part of each pair.
29535 down = the opposite part to "up".
29536 In a shift, we can consider bits to shift from "up"-stream to
29537 "down"-stream, so in a left-shift "up" is the low-part and "down"
29538 is the high-part of each register pair. */
29540 rtx out_up = code == ASHIFT ? out_low : out_high;
29541 rtx out_down = code == ASHIFT ? out_high : out_low;
29542 rtx in_up = code == ASHIFT ? in_low : in_high;
29543 rtx in_down = code == ASHIFT ? in_high : in_low;
29545 gcc_assert (code == ASHIFT || code == ASHIFTRT || code == LSHIFTRT);
29546 gcc_assert (out
29547 && (REG_P (out) || GET_CODE (out) == SUBREG)
29548 && GET_MODE (out) == DImode);
29549 gcc_assert (in
29550 && (REG_P (in) || GET_CODE (in) == SUBREG)
29551 && GET_MODE (in) == DImode);
29552 gcc_assert (amount
29553 && (((REG_P (amount) || GET_CODE (amount) == SUBREG)
29554 && GET_MODE (amount) == SImode)
29555 || CONST_INT_P (amount)));
29556 gcc_assert (scratch1 == NULL
29557 || (GET_CODE (scratch1) == SCRATCH)
29558 || (GET_MODE (scratch1) == SImode
29559 && REG_P (scratch1)));
29560 gcc_assert (scratch2 == NULL
29561 || (GET_CODE (scratch2) == SCRATCH)
29562 || (GET_MODE (scratch2) == SImode
29563 && REG_P (scratch2)));
29564 gcc_assert (!REG_P (out) || !REG_P (amount)
29565 || !HARD_REGISTER_P (out)
29566 || (REGNO (out) != REGNO (amount)
29567 && REGNO (out) + 1 != REGNO (amount)));
29569 /* Macros to make following code more readable. */
29570 #define SUB_32(DEST,SRC) \
29571 gen_addsi3 ((DEST), (SRC), GEN_INT (-32))
29572 #define RSB_32(DEST,SRC) \
29573 gen_subsi3 ((DEST), GEN_INT (32), (SRC))
29574 #define SUB_S_32(DEST,SRC) \
29575 gen_addsi3_compare0 ((DEST), (SRC), \
29576 GEN_INT (-32))
29577 #define SET(DEST,SRC) \
29578 gen_rtx_SET ((DEST), (SRC))
29579 #define SHIFT(CODE,SRC,AMOUNT) \
29580 gen_rtx_fmt_ee ((CODE), SImode, (SRC), (AMOUNT))
29581 #define LSHIFT(CODE,SRC,AMOUNT) \
29582 gen_rtx_fmt_ee ((CODE) == ASHIFT ? ASHIFT : LSHIFTRT, \
29583 SImode, (SRC), (AMOUNT))
29584 #define REV_LSHIFT(CODE,SRC,AMOUNT) \
29585 gen_rtx_fmt_ee ((CODE) == ASHIFT ? LSHIFTRT : ASHIFT, \
29586 SImode, (SRC), (AMOUNT))
29587 #define ORR(A,B) \
29588 gen_rtx_IOR (SImode, (A), (B))
29589 #define BRANCH(COND,LABEL) \
29590 gen_arm_cond_branch ((LABEL), \
29591 gen_rtx_ ## COND (CCmode, cc_reg, \
29592 const0_rtx), \
29593 cc_reg)
29595 /* Shifts by register and shifts by constant are handled separately. */
29596 if (CONST_INT_P (amount))
29598 /* We have a shift-by-constant. */
29600 /* First, handle out-of-range shift amounts.
29601 In both cases we try to match the result an ARM instruction in a
29602 shift-by-register would give. This helps reduce execution
29603 differences between optimization levels, but it won't stop other
29604 parts of the compiler doing different things. This is "undefined
29605 behavior, in any case. */
29606 if (INTVAL (amount) <= 0)
29607 emit_insn (gen_movdi (out, in));
29608 else if (INTVAL (amount) >= 64)
29610 if (code == ASHIFTRT)
29612 rtx const31_rtx = GEN_INT (31);
29613 emit_insn (SET (out_down, SHIFT (code, in_up, const31_rtx)));
29614 emit_insn (SET (out_up, SHIFT (code, in_up, const31_rtx)));
29616 else
29617 emit_insn (gen_movdi (out, const0_rtx));
29620 /* Now handle valid shifts. */
29621 else if (INTVAL (amount) < 32)
29623 /* Shifts by a constant less than 32. */
29624 rtx reverse_amount = GEN_INT (32 - INTVAL (amount));
29626 /* Clearing the out register in DImode first avoids lots
29627 of spilling and results in less stack usage.
29628 Later this redundant insn is completely removed.
29629 Do that only if "in" and "out" are different registers. */
29630 if (REG_P (out) && REG_P (in) && REGNO (out) != REGNO (in))
29631 emit_insn (SET (out, const0_rtx));
29632 emit_insn (SET (out_down, LSHIFT (code, in_down, amount)));
29633 emit_insn (SET (out_down,
29634 ORR (REV_LSHIFT (code, in_up, reverse_amount),
29635 out_down)));
29636 emit_insn (SET (out_up, SHIFT (code, in_up, amount)));
29638 else
29640 /* Shifts by a constant greater than 31. */
29641 rtx adj_amount = GEN_INT (INTVAL (amount) - 32);
29643 if (REG_P (out) && REG_P (in) && REGNO (out) != REGNO (in))
29644 emit_insn (SET (out, const0_rtx));
29645 emit_insn (SET (out_down, SHIFT (code, in_up, adj_amount)));
29646 if (code == ASHIFTRT)
29647 emit_insn (gen_ashrsi3 (out_up, in_up,
29648 GEN_INT (31)));
29649 else
29650 emit_insn (SET (out_up, const0_rtx));
29653 else
29655 /* We have a shift-by-register. */
29656 rtx cc_reg = gen_rtx_REG (CC_NOOVmode, CC_REGNUM);
29658 /* This alternative requires the scratch registers. */
29659 gcc_assert (scratch1 && REG_P (scratch1));
29660 gcc_assert (scratch2 && REG_P (scratch2));
29662 /* We will need the values "amount-32" and "32-amount" later.
29663 Swapping them around now allows the later code to be more general. */
29664 switch (code)
29666 case ASHIFT:
29667 emit_insn (SUB_32 (scratch1, amount));
29668 emit_insn (RSB_32 (scratch2, amount));
29669 break;
29670 case ASHIFTRT:
29671 emit_insn (RSB_32 (scratch1, amount));
29672 /* Also set CC = amount > 32. */
29673 emit_insn (SUB_S_32 (scratch2, amount));
29674 break;
29675 case LSHIFTRT:
29676 emit_insn (RSB_32 (scratch1, amount));
29677 emit_insn (SUB_32 (scratch2, amount));
29678 break;
29679 default:
29680 gcc_unreachable ();
29683 /* Emit code like this:
29685 arithmetic-left:
29686 out_down = in_down << amount;
29687 out_down = (in_up << (amount - 32)) | out_down;
29688 out_down = ((unsigned)in_up >> (32 - amount)) | out_down;
29689 out_up = in_up << amount;
29691 arithmetic-right:
29692 out_down = in_down >> amount;
29693 out_down = (in_up << (32 - amount)) | out_down;
29694 if (amount < 32)
29695 out_down = ((signed)in_up >> (amount - 32)) | out_down;
29696 out_up = in_up << amount;
29698 logical-right:
29699 out_down = in_down >> amount;
29700 out_down = (in_up << (32 - amount)) | out_down;
29701 if (amount < 32)
29702 out_down = ((unsigned)in_up >> (amount - 32)) | out_down;
29703 out_up = in_up << amount;
29705 The ARM and Thumb2 variants are the same but implemented slightly
29706 differently. If this were only called during expand we could just
29707 use the Thumb2 case and let combine do the right thing, but this
29708 can also be called from post-reload splitters. */
29710 emit_insn (SET (out_down, LSHIFT (code, in_down, amount)));
29712 if (!TARGET_THUMB2)
29714 /* Emit code for ARM mode. */
29715 emit_insn (SET (out_down,
29716 ORR (SHIFT (ASHIFT, in_up, scratch1), out_down)));
29717 if (code == ASHIFTRT)
29719 rtx_code_label *done_label = gen_label_rtx ();
29720 emit_jump_insn (BRANCH (LT, done_label));
29721 emit_insn (SET (out_down, ORR (SHIFT (ASHIFTRT, in_up, scratch2),
29722 out_down)));
29723 emit_label (done_label);
29725 else
29726 emit_insn (SET (out_down, ORR (SHIFT (LSHIFTRT, in_up, scratch2),
29727 out_down)));
29729 else
29731 /* Emit code for Thumb2 mode.
29732 Thumb2 can't do shift and or in one insn. */
29733 emit_insn (SET (scratch1, SHIFT (ASHIFT, in_up, scratch1)));
29734 emit_insn (gen_iorsi3 (out_down, out_down, scratch1));
29736 if (code == ASHIFTRT)
29738 rtx_code_label *done_label = gen_label_rtx ();
29739 emit_jump_insn (BRANCH (LT, done_label));
29740 emit_insn (SET (scratch2, SHIFT (ASHIFTRT, in_up, scratch2)));
29741 emit_insn (SET (out_down, ORR (out_down, scratch2)));
29742 emit_label (done_label);
29744 else
29746 emit_insn (SET (scratch2, SHIFT (LSHIFTRT, in_up, scratch2)));
29747 emit_insn (gen_iorsi3 (out_down, out_down, scratch2));
29751 emit_insn (SET (out_up, SHIFT (code, in_up, amount)));
29754 #undef SUB_32
29755 #undef RSB_32
29756 #undef SUB_S_32
29757 #undef SET
29758 #undef SHIFT
29759 #undef LSHIFT
29760 #undef REV_LSHIFT
29761 #undef ORR
29762 #undef BRANCH
29765 /* Returns true if the pattern is a valid symbolic address, which is either a
29766 symbol_ref or (symbol_ref + addend).
29768 According to the ARM ELF ABI, the initial addend of REL-type relocations
29769 processing MOVW and MOVT instructions is formed by interpreting the 16-bit
29770 literal field of the instruction as a 16-bit signed value in the range
29771 -32768 <= A < 32768. */
29773 bool
29774 arm_valid_symbolic_address_p (rtx addr)
29776 rtx xop0, xop1 = NULL_RTX;
29777 rtx tmp = addr;
29779 if (GET_CODE (tmp) == SYMBOL_REF || GET_CODE (tmp) == LABEL_REF)
29780 return true;
29782 /* (const (plus: symbol_ref const_int)) */
29783 if (GET_CODE (addr) == CONST)
29784 tmp = XEXP (addr, 0);
29786 if (GET_CODE (tmp) == PLUS)
29788 xop0 = XEXP (tmp, 0);
29789 xop1 = XEXP (tmp, 1);
29791 if (GET_CODE (xop0) == SYMBOL_REF && CONST_INT_P (xop1))
29792 return IN_RANGE (INTVAL (xop1), -0x8000, 0x7fff);
29795 return false;
29798 /* Returns true if a valid comparison operation and makes
29799 the operands in a form that is valid. */
29800 bool
29801 arm_validize_comparison (rtx *comparison, rtx * op1, rtx * op2)
29803 enum rtx_code code = GET_CODE (*comparison);
29804 int code_int;
29805 machine_mode mode = (GET_MODE (*op1) == VOIDmode)
29806 ? GET_MODE (*op2) : GET_MODE (*op1);
29808 gcc_assert (GET_MODE (*op1) != VOIDmode || GET_MODE (*op2) != VOIDmode);
29810 if (code == UNEQ || code == LTGT)
29811 return false;
29813 code_int = (int)code;
29814 arm_canonicalize_comparison (&code_int, op1, op2, 0);
29815 PUT_CODE (*comparison, (enum rtx_code)code_int);
29817 switch (mode)
29819 case E_SImode:
29820 if (!arm_add_operand (*op1, mode))
29821 *op1 = force_reg (mode, *op1);
29822 if (!arm_add_operand (*op2, mode))
29823 *op2 = force_reg (mode, *op2);
29824 return true;
29826 case E_DImode:
29827 if (!cmpdi_operand (*op1, mode))
29828 *op1 = force_reg (mode, *op1);
29829 if (!cmpdi_operand (*op2, mode))
29830 *op2 = force_reg (mode, *op2);
29831 return true;
29833 case E_HFmode:
29834 if (!TARGET_VFP_FP16INST)
29835 break;
29836 /* FP16 comparisons are done in SF mode. */
29837 mode = SFmode;
29838 *op1 = convert_to_mode (mode, *op1, 1);
29839 *op2 = convert_to_mode (mode, *op2, 1);
29840 /* Fall through. */
29841 case E_SFmode:
29842 case E_DFmode:
29843 if (!vfp_compare_operand (*op1, mode))
29844 *op1 = force_reg (mode, *op1);
29845 if (!vfp_compare_operand (*op2, mode))
29846 *op2 = force_reg (mode, *op2);
29847 return true;
29848 default:
29849 break;
29852 return false;
29856 /* Maximum number of instructions to set block of memory. */
29857 static int
29858 arm_block_set_max_insns (void)
29860 if (optimize_function_for_size_p (cfun))
29861 return 4;
29862 else
29863 return current_tune->max_insns_inline_memset;
29866 /* Return TRUE if it's profitable to set block of memory for
29867 non-vectorized case. VAL is the value to set the memory
29868 with. LENGTH is the number of bytes to set. ALIGN is the
29869 alignment of the destination memory in bytes. UNALIGNED_P
29870 is TRUE if we can only set the memory with instructions
29871 meeting alignment requirements. USE_STRD_P is TRUE if we
29872 can use strd to set the memory. */
29873 static bool
29874 arm_block_set_non_vect_profit_p (rtx val,
29875 unsigned HOST_WIDE_INT length,
29876 unsigned HOST_WIDE_INT align,
29877 bool unaligned_p, bool use_strd_p)
29879 int num = 0;
29880 /* For leftovers in bytes of 0-7, we can set the memory block using
29881 strb/strh/str with minimum instruction number. */
29882 const int leftover[8] = {0, 1, 1, 2, 1, 2, 2, 3};
29884 if (unaligned_p)
29886 num = arm_const_inline_cost (SET, val);
29887 num += length / align + length % align;
29889 else if (use_strd_p)
29891 num = arm_const_double_inline_cost (val);
29892 num += (length >> 3) + leftover[length & 7];
29894 else
29896 num = arm_const_inline_cost (SET, val);
29897 num += (length >> 2) + leftover[length & 3];
29900 /* We may be able to combine last pair STRH/STRB into a single STR
29901 by shifting one byte back. */
29902 if (unaligned_access && length > 3 && (length & 3) == 3)
29903 num--;
29905 return (num <= arm_block_set_max_insns ());
29908 /* Return TRUE if it's profitable to set block of memory for
29909 vectorized case. LENGTH is the number of bytes to set.
29910 ALIGN is the alignment of destination memory in bytes.
29911 MODE is the vector mode used to set the memory. */
29912 static bool
29913 arm_block_set_vect_profit_p (unsigned HOST_WIDE_INT length,
29914 unsigned HOST_WIDE_INT align,
29915 machine_mode mode)
29917 int num;
29918 bool unaligned_p = ((align & 3) != 0);
29919 unsigned int nelt = GET_MODE_NUNITS (mode);
29921 /* Instruction loading constant value. */
29922 num = 1;
29923 /* Instructions storing the memory. */
29924 num += (length + nelt - 1) / nelt;
29925 /* Instructions adjusting the address expression. Only need to
29926 adjust address expression if it's 4 bytes aligned and bytes
29927 leftover can only be stored by mis-aligned store instruction. */
29928 if (!unaligned_p && (length & 3) != 0)
29929 num++;
29931 /* Store the first 16 bytes using vst1:v16qi for the aligned case. */
29932 if (!unaligned_p && mode == V16QImode)
29933 num--;
29935 return (num <= arm_block_set_max_insns ());
29938 /* Set a block of memory using vectorization instructions for the
29939 unaligned case. We fill the first LENGTH bytes of the memory
29940 area starting from DSTBASE with byte constant VALUE. ALIGN is
29941 the alignment requirement of memory. Return TRUE if succeeded. */
29942 static bool
29943 arm_block_set_unaligned_vect (rtx dstbase,
29944 unsigned HOST_WIDE_INT length,
29945 unsigned HOST_WIDE_INT value,
29946 unsigned HOST_WIDE_INT align)
29948 unsigned int i, nelt_v16, nelt_v8, nelt_mode;
29949 rtx dst, mem;
29950 rtx val_vec, reg;
29951 rtx (*gen_func) (rtx, rtx);
29952 machine_mode mode;
29953 unsigned HOST_WIDE_INT v = value;
29954 unsigned int offset = 0;
29955 gcc_assert ((align & 0x3) != 0);
29956 nelt_v8 = GET_MODE_NUNITS (V8QImode);
29957 nelt_v16 = GET_MODE_NUNITS (V16QImode);
29958 if (length >= nelt_v16)
29960 mode = V16QImode;
29961 gen_func = gen_movmisalignv16qi;
29963 else
29965 mode = V8QImode;
29966 gen_func = gen_movmisalignv8qi;
29968 nelt_mode = GET_MODE_NUNITS (mode);
29969 gcc_assert (length >= nelt_mode);
29970 /* Skip if it isn't profitable. */
29971 if (!arm_block_set_vect_profit_p (length, align, mode))
29972 return false;
29974 dst = copy_addr_to_reg (XEXP (dstbase, 0));
29975 mem = adjust_automodify_address (dstbase, mode, dst, offset);
29977 v = sext_hwi (v, BITS_PER_WORD);
29979 reg = gen_reg_rtx (mode);
29980 val_vec = gen_const_vec_duplicate (mode, GEN_INT (v));
29981 /* Emit instruction loading the constant value. */
29982 emit_move_insn (reg, val_vec);
29984 /* Handle nelt_mode bytes in a vector. */
29985 for (i = 0; (i + nelt_mode <= length); i += nelt_mode)
29987 emit_insn ((*gen_func) (mem, reg));
29988 if (i + 2 * nelt_mode <= length)
29990 emit_insn (gen_add2_insn (dst, GEN_INT (nelt_mode)));
29991 offset += nelt_mode;
29992 mem = adjust_automodify_address (dstbase, mode, dst, offset);
29996 /* If there are not less than nelt_v8 bytes leftover, we must be in
29997 V16QI mode. */
29998 gcc_assert ((i + nelt_v8) > length || mode == V16QImode);
30000 /* Handle (8, 16) bytes leftover. */
30001 if (i + nelt_v8 < length)
30003 emit_insn (gen_add2_insn (dst, GEN_INT (length - i)));
30004 offset += length - i;
30005 mem = adjust_automodify_address (dstbase, mode, dst, offset);
30007 /* We are shifting bytes back, set the alignment accordingly. */
30008 if ((length & 1) != 0 && align >= 2)
30009 set_mem_align (mem, BITS_PER_UNIT);
30011 emit_insn (gen_movmisalignv16qi (mem, reg));
30013 /* Handle (0, 8] bytes leftover. */
30014 else if (i < length && i + nelt_v8 >= length)
30016 if (mode == V16QImode)
30017 reg = gen_lowpart (V8QImode, reg);
30019 emit_insn (gen_add2_insn (dst, GEN_INT ((length - i)
30020 + (nelt_mode - nelt_v8))));
30021 offset += (length - i) + (nelt_mode - nelt_v8);
30022 mem = adjust_automodify_address (dstbase, V8QImode, dst, offset);
30024 /* We are shifting bytes back, set the alignment accordingly. */
30025 if ((length & 1) != 0 && align >= 2)
30026 set_mem_align (mem, BITS_PER_UNIT);
30028 emit_insn (gen_movmisalignv8qi (mem, reg));
30031 return true;
30034 /* Set a block of memory using vectorization instructions for the
30035 aligned case. We fill the first LENGTH bytes of the memory area
30036 starting from DSTBASE with byte constant VALUE. ALIGN is the
30037 alignment requirement of memory. Return TRUE if succeeded. */
30038 static bool
30039 arm_block_set_aligned_vect (rtx dstbase,
30040 unsigned HOST_WIDE_INT length,
30041 unsigned HOST_WIDE_INT value,
30042 unsigned HOST_WIDE_INT align)
30044 unsigned int i, nelt_v8, nelt_v16, nelt_mode;
30045 rtx dst, addr, mem;
30046 rtx val_vec, reg;
30047 machine_mode mode;
30048 unsigned HOST_WIDE_INT v = value;
30049 unsigned int offset = 0;
30051 gcc_assert ((align & 0x3) == 0);
30052 nelt_v8 = GET_MODE_NUNITS (V8QImode);
30053 nelt_v16 = GET_MODE_NUNITS (V16QImode);
30054 if (length >= nelt_v16 && unaligned_access && !BYTES_BIG_ENDIAN)
30055 mode = V16QImode;
30056 else
30057 mode = V8QImode;
30059 nelt_mode = GET_MODE_NUNITS (mode);
30060 gcc_assert (length >= nelt_mode);
30061 /* Skip if it isn't profitable. */
30062 if (!arm_block_set_vect_profit_p (length, align, mode))
30063 return false;
30065 dst = copy_addr_to_reg (XEXP (dstbase, 0));
30067 v = sext_hwi (v, BITS_PER_WORD);
30069 reg = gen_reg_rtx (mode);
30070 val_vec = gen_const_vec_duplicate (mode, GEN_INT (v));
30071 /* Emit instruction loading the constant value. */
30072 emit_move_insn (reg, val_vec);
30074 i = 0;
30075 /* Handle first 16 bytes specially using vst1:v16qi instruction. */
30076 if (mode == V16QImode)
30078 mem = adjust_automodify_address (dstbase, mode, dst, offset);
30079 emit_insn (gen_movmisalignv16qi (mem, reg));
30080 i += nelt_mode;
30081 /* Handle (8, 16) bytes leftover using vst1:v16qi again. */
30082 if (i + nelt_v8 < length && i + nelt_v16 > length)
30084 emit_insn (gen_add2_insn (dst, GEN_INT (length - nelt_mode)));
30085 offset += length - nelt_mode;
30086 mem = adjust_automodify_address (dstbase, mode, dst, offset);
30087 /* We are shifting bytes back, set the alignment accordingly. */
30088 if ((length & 0x3) == 0)
30089 set_mem_align (mem, BITS_PER_UNIT * 4);
30090 else if ((length & 0x1) == 0)
30091 set_mem_align (mem, BITS_PER_UNIT * 2);
30092 else
30093 set_mem_align (mem, BITS_PER_UNIT);
30095 emit_insn (gen_movmisalignv16qi (mem, reg));
30096 return true;
30098 /* Fall through for bytes leftover. */
30099 mode = V8QImode;
30100 nelt_mode = GET_MODE_NUNITS (mode);
30101 reg = gen_lowpart (V8QImode, reg);
30104 /* Handle 8 bytes in a vector. */
30105 for (; (i + nelt_mode <= length); i += nelt_mode)
30107 addr = plus_constant (Pmode, dst, i);
30108 mem = adjust_automodify_address (dstbase, mode, addr, offset + i);
30109 emit_move_insn (mem, reg);
30112 /* Handle single word leftover by shifting 4 bytes back. We can
30113 use aligned access for this case. */
30114 if (i + UNITS_PER_WORD == length)
30116 addr = plus_constant (Pmode, dst, i - UNITS_PER_WORD);
30117 offset += i - UNITS_PER_WORD;
30118 mem = adjust_automodify_address (dstbase, mode, addr, offset);
30119 /* We are shifting 4 bytes back, set the alignment accordingly. */
30120 if (align > UNITS_PER_WORD)
30121 set_mem_align (mem, BITS_PER_UNIT * UNITS_PER_WORD);
30123 emit_move_insn (mem, reg);
30125 /* Handle (0, 4), (4, 8) bytes leftover by shifting bytes back.
30126 We have to use unaligned access for this case. */
30127 else if (i < length)
30129 emit_insn (gen_add2_insn (dst, GEN_INT (length - nelt_mode)));
30130 offset += length - nelt_mode;
30131 mem = adjust_automodify_address (dstbase, mode, dst, offset);
30132 /* We are shifting bytes back, set the alignment accordingly. */
30133 if ((length & 1) == 0)
30134 set_mem_align (mem, BITS_PER_UNIT * 2);
30135 else
30136 set_mem_align (mem, BITS_PER_UNIT);
30138 emit_insn (gen_movmisalignv8qi (mem, reg));
30141 return true;
30144 /* Set a block of memory using plain strh/strb instructions, only
30145 using instructions allowed by ALIGN on processor. We fill the
30146 first LENGTH bytes of the memory area starting from DSTBASE
30147 with byte constant VALUE. ALIGN is the alignment requirement
30148 of memory. */
30149 static bool
30150 arm_block_set_unaligned_non_vect (rtx dstbase,
30151 unsigned HOST_WIDE_INT length,
30152 unsigned HOST_WIDE_INT value,
30153 unsigned HOST_WIDE_INT align)
30155 unsigned int i;
30156 rtx dst, addr, mem;
30157 rtx val_exp, val_reg, reg;
30158 machine_mode mode;
30159 HOST_WIDE_INT v = value;
30161 gcc_assert (align == 1 || align == 2);
30163 if (align == 2)
30164 v |= (value << BITS_PER_UNIT);
30166 v = sext_hwi (v, BITS_PER_WORD);
30167 val_exp = GEN_INT (v);
30168 /* Skip if it isn't profitable. */
30169 if (!arm_block_set_non_vect_profit_p (val_exp, length,
30170 align, true, false))
30171 return false;
30173 dst = copy_addr_to_reg (XEXP (dstbase, 0));
30174 mode = (align == 2 ? HImode : QImode);
30175 val_reg = force_reg (SImode, val_exp);
30176 reg = gen_lowpart (mode, val_reg);
30178 for (i = 0; (i + GET_MODE_SIZE (mode) <= length); i += GET_MODE_SIZE (mode))
30180 addr = plus_constant (Pmode, dst, i);
30181 mem = adjust_automodify_address (dstbase, mode, addr, i);
30182 emit_move_insn (mem, reg);
30185 /* Handle single byte leftover. */
30186 if (i + 1 == length)
30188 reg = gen_lowpart (QImode, val_reg);
30189 addr = plus_constant (Pmode, dst, i);
30190 mem = adjust_automodify_address (dstbase, QImode, addr, i);
30191 emit_move_insn (mem, reg);
30192 i++;
30195 gcc_assert (i == length);
30196 return true;
30199 /* Set a block of memory using plain strd/str/strh/strb instructions,
30200 to permit unaligned copies on processors which support unaligned
30201 semantics for those instructions. We fill the first LENGTH bytes
30202 of the memory area starting from DSTBASE with byte constant VALUE.
30203 ALIGN is the alignment requirement of memory. */
30204 static bool
30205 arm_block_set_aligned_non_vect (rtx dstbase,
30206 unsigned HOST_WIDE_INT length,
30207 unsigned HOST_WIDE_INT value,
30208 unsigned HOST_WIDE_INT align)
30210 unsigned int i;
30211 rtx dst, addr, mem;
30212 rtx val_exp, val_reg, reg;
30213 unsigned HOST_WIDE_INT v;
30214 bool use_strd_p;
30216 use_strd_p = (length >= 2 * UNITS_PER_WORD && (align & 3) == 0
30217 && TARGET_LDRD && current_tune->prefer_ldrd_strd);
30219 v = (value | (value << 8) | (value << 16) | (value << 24));
30220 if (length < UNITS_PER_WORD)
30221 v &= (0xFFFFFFFF >> (UNITS_PER_WORD - length) * BITS_PER_UNIT);
30223 if (use_strd_p)
30224 v |= (v << BITS_PER_WORD);
30225 else
30226 v = sext_hwi (v, BITS_PER_WORD);
30228 val_exp = GEN_INT (v);
30229 /* Skip if it isn't profitable. */
30230 if (!arm_block_set_non_vect_profit_p (val_exp, length,
30231 align, false, use_strd_p))
30233 if (!use_strd_p)
30234 return false;
30236 /* Try without strd. */
30237 v = (v >> BITS_PER_WORD);
30238 v = sext_hwi (v, BITS_PER_WORD);
30239 val_exp = GEN_INT (v);
30240 use_strd_p = false;
30241 if (!arm_block_set_non_vect_profit_p (val_exp, length,
30242 align, false, use_strd_p))
30243 return false;
30246 i = 0;
30247 dst = copy_addr_to_reg (XEXP (dstbase, 0));
30248 /* Handle double words using strd if possible. */
30249 if (use_strd_p)
30251 val_reg = force_reg (DImode, val_exp);
30252 reg = val_reg;
30253 for (; (i + 8 <= length); i += 8)
30255 addr = plus_constant (Pmode, dst, i);
30256 mem = adjust_automodify_address (dstbase, DImode, addr, i);
30257 emit_move_insn (mem, reg);
30260 else
30261 val_reg = force_reg (SImode, val_exp);
30263 /* Handle words. */
30264 reg = (use_strd_p ? gen_lowpart (SImode, val_reg) : val_reg);
30265 for (; (i + 4 <= length); i += 4)
30267 addr = plus_constant (Pmode, dst, i);
30268 mem = adjust_automodify_address (dstbase, SImode, addr, i);
30269 if ((align & 3) == 0)
30270 emit_move_insn (mem, reg);
30271 else
30272 emit_insn (gen_unaligned_storesi (mem, reg));
30275 /* Merge last pair of STRH and STRB into a STR if possible. */
30276 if (unaligned_access && i > 0 && (i + 3) == length)
30278 addr = plus_constant (Pmode, dst, i - 1);
30279 mem = adjust_automodify_address (dstbase, SImode, addr, i - 1);
30280 /* We are shifting one byte back, set the alignment accordingly. */
30281 if ((align & 1) == 0)
30282 set_mem_align (mem, BITS_PER_UNIT);
30284 /* Most likely this is an unaligned access, and we can't tell at
30285 compilation time. */
30286 emit_insn (gen_unaligned_storesi (mem, reg));
30287 return true;
30290 /* Handle half word leftover. */
30291 if (i + 2 <= length)
30293 reg = gen_lowpart (HImode, val_reg);
30294 addr = plus_constant (Pmode, dst, i);
30295 mem = adjust_automodify_address (dstbase, HImode, addr, i);
30296 if ((align & 1) == 0)
30297 emit_move_insn (mem, reg);
30298 else
30299 emit_insn (gen_unaligned_storehi (mem, reg));
30301 i += 2;
30304 /* Handle single byte leftover. */
30305 if (i + 1 == length)
30307 reg = gen_lowpart (QImode, val_reg);
30308 addr = plus_constant (Pmode, dst, i);
30309 mem = adjust_automodify_address (dstbase, QImode, addr, i);
30310 emit_move_insn (mem, reg);
30313 return true;
30316 /* Set a block of memory using vectorization instructions for both
30317 aligned and unaligned cases. We fill the first LENGTH bytes of
30318 the memory area starting from DSTBASE with byte constant VALUE.
30319 ALIGN is the alignment requirement of memory. */
30320 static bool
30321 arm_block_set_vect (rtx dstbase,
30322 unsigned HOST_WIDE_INT length,
30323 unsigned HOST_WIDE_INT value,
30324 unsigned HOST_WIDE_INT align)
30326 /* Check whether we need to use unaligned store instruction. */
30327 if (((align & 3) != 0 || (length & 3) != 0)
30328 /* Check whether unaligned store instruction is available. */
30329 && (!unaligned_access || BYTES_BIG_ENDIAN))
30330 return false;
30332 if ((align & 3) == 0)
30333 return arm_block_set_aligned_vect (dstbase, length, value, align);
30334 else
30335 return arm_block_set_unaligned_vect (dstbase, length, value, align);
30338 /* Expand string store operation. Firstly we try to do that by using
30339 vectorization instructions, then try with ARM unaligned access and
30340 double-word store if profitable. OPERANDS[0] is the destination,
30341 OPERANDS[1] is the number of bytes, operands[2] is the value to
30342 initialize the memory, OPERANDS[3] is the known alignment of the
30343 destination. */
30344 bool
30345 arm_gen_setmem (rtx *operands)
30347 rtx dstbase = operands[0];
30348 unsigned HOST_WIDE_INT length;
30349 unsigned HOST_WIDE_INT value;
30350 unsigned HOST_WIDE_INT align;
30352 if (!CONST_INT_P (operands[2]) || !CONST_INT_P (operands[1]))
30353 return false;
30355 length = UINTVAL (operands[1]);
30356 if (length > 64)
30357 return false;
30359 value = (UINTVAL (operands[2]) & 0xFF);
30360 align = UINTVAL (operands[3]);
30361 if (TARGET_NEON && length >= 8
30362 && current_tune->string_ops_prefer_neon
30363 && arm_block_set_vect (dstbase, length, value, align))
30364 return true;
30366 if (!unaligned_access && (align & 3) != 0)
30367 return arm_block_set_unaligned_non_vect (dstbase, length, value, align);
30369 return arm_block_set_aligned_non_vect (dstbase, length, value, align);
30373 static bool
30374 arm_macro_fusion_p (void)
30376 return current_tune->fusible_ops != tune_params::FUSE_NOTHING;
30379 /* Return true if the two back-to-back sets PREV_SET, CURR_SET are suitable
30380 for MOVW / MOVT macro fusion. */
30382 static bool
30383 arm_sets_movw_movt_fusible_p (rtx prev_set, rtx curr_set)
30385 /* We are trying to fuse
30386 movw imm / movt imm
30387 instructions as a group that gets scheduled together. */
30389 rtx set_dest = SET_DEST (curr_set);
30391 if (GET_MODE (set_dest) != SImode)
30392 return false;
30394 /* We are trying to match:
30395 prev (movw) == (set (reg r0) (const_int imm16))
30396 curr (movt) == (set (zero_extract (reg r0)
30397 (const_int 16)
30398 (const_int 16))
30399 (const_int imm16_1))
30401 prev (movw) == (set (reg r1)
30402 (high (symbol_ref ("SYM"))))
30403 curr (movt) == (set (reg r0)
30404 (lo_sum (reg r1)
30405 (symbol_ref ("SYM")))) */
30407 if (GET_CODE (set_dest) == ZERO_EXTRACT)
30409 if (CONST_INT_P (SET_SRC (curr_set))
30410 && CONST_INT_P (SET_SRC (prev_set))
30411 && REG_P (XEXP (set_dest, 0))
30412 && REG_P (SET_DEST (prev_set))
30413 && REGNO (XEXP (set_dest, 0)) == REGNO (SET_DEST (prev_set)))
30414 return true;
30417 else if (GET_CODE (SET_SRC (curr_set)) == LO_SUM
30418 && REG_P (SET_DEST (curr_set))
30419 && REG_P (SET_DEST (prev_set))
30420 && GET_CODE (SET_SRC (prev_set)) == HIGH
30421 && REGNO (SET_DEST (curr_set)) == REGNO (SET_DEST (prev_set)))
30422 return true;
30424 return false;
30427 static bool
30428 aarch_macro_fusion_pair_p (rtx_insn* prev, rtx_insn* curr)
30430 rtx prev_set = single_set (prev);
30431 rtx curr_set = single_set (curr);
30433 if (!prev_set
30434 || !curr_set)
30435 return false;
30437 if (any_condjump_p (curr))
30438 return false;
30440 if (!arm_macro_fusion_p ())
30441 return false;
30443 if (current_tune->fusible_ops & tune_params::FUSE_AES_AESMC
30444 && aarch_crypto_can_dual_issue (prev, curr))
30445 return true;
30447 if (current_tune->fusible_ops & tune_params::FUSE_MOVW_MOVT
30448 && arm_sets_movw_movt_fusible_p (prev_set, curr_set))
30449 return true;
30451 return false;
30454 /* Return true iff the instruction fusion described by OP is enabled. */
30455 bool
30456 arm_fusion_enabled_p (tune_params::fuse_ops op)
30458 return current_tune->fusible_ops & op;
30461 /* Implement TARGET_SCHED_CAN_SPECULATE_INSN. Return true if INSN can be
30462 scheduled for speculative execution. Reject the long-running division
30463 and square-root instructions. */
30465 static bool
30466 arm_sched_can_speculate_insn (rtx_insn *insn)
30468 switch (get_attr_type (insn))
30470 case TYPE_SDIV:
30471 case TYPE_UDIV:
30472 case TYPE_FDIVS:
30473 case TYPE_FDIVD:
30474 case TYPE_FSQRTS:
30475 case TYPE_FSQRTD:
30476 case TYPE_NEON_FP_SQRT_S:
30477 case TYPE_NEON_FP_SQRT_D:
30478 case TYPE_NEON_FP_SQRT_S_Q:
30479 case TYPE_NEON_FP_SQRT_D_Q:
30480 case TYPE_NEON_FP_DIV_S:
30481 case TYPE_NEON_FP_DIV_D:
30482 case TYPE_NEON_FP_DIV_S_Q:
30483 case TYPE_NEON_FP_DIV_D_Q:
30484 return false;
30485 default:
30486 return true;
30490 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
30492 static unsigned HOST_WIDE_INT
30493 arm_asan_shadow_offset (void)
30495 return HOST_WIDE_INT_1U << 29;
30499 /* This is a temporary fix for PR60655. Ideally we need
30500 to handle most of these cases in the generic part but
30501 currently we reject minus (..) (sym_ref). We try to
30502 ameliorate the case with minus (sym_ref1) (sym_ref2)
30503 where they are in the same section. */
30505 static bool
30506 arm_const_not_ok_for_debug_p (rtx p)
30508 tree decl_op0 = NULL;
30509 tree decl_op1 = NULL;
30511 if (GET_CODE (p) == UNSPEC)
30512 return true;
30513 if (GET_CODE (p) == MINUS)
30515 if (GET_CODE (XEXP (p, 1)) == SYMBOL_REF)
30517 decl_op1 = SYMBOL_REF_DECL (XEXP (p, 1));
30518 if (decl_op1
30519 && GET_CODE (XEXP (p, 0)) == SYMBOL_REF
30520 && (decl_op0 = SYMBOL_REF_DECL (XEXP (p, 0))))
30522 if ((VAR_P (decl_op1)
30523 || TREE_CODE (decl_op1) == CONST_DECL)
30524 && (VAR_P (decl_op0)
30525 || TREE_CODE (decl_op0) == CONST_DECL))
30526 return (get_variable_section (decl_op1, false)
30527 != get_variable_section (decl_op0, false));
30529 if (TREE_CODE (decl_op1) == LABEL_DECL
30530 && TREE_CODE (decl_op0) == LABEL_DECL)
30531 return (DECL_CONTEXT (decl_op1)
30532 != DECL_CONTEXT (decl_op0));
30535 return true;
30539 return false;
30542 /* return TRUE if x is a reference to a value in a constant pool */
30543 extern bool
30544 arm_is_constant_pool_ref (rtx x)
30546 return (MEM_P (x)
30547 && GET_CODE (XEXP (x, 0)) == SYMBOL_REF
30548 && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0)));
30551 /* Remember the last target of arm_set_current_function. */
30552 static GTY(()) tree arm_previous_fndecl;
30554 /* Restore or save the TREE_TARGET_GLOBALS from or to NEW_TREE. */
30556 void
30557 save_restore_target_globals (tree new_tree)
30559 /* If we have a previous state, use it. */
30560 if (TREE_TARGET_GLOBALS (new_tree))
30561 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
30562 else if (new_tree == target_option_default_node)
30563 restore_target_globals (&default_target_globals);
30564 else
30566 /* Call target_reinit and save the state for TARGET_GLOBALS. */
30567 TREE_TARGET_GLOBALS (new_tree) = save_target_globals_default_opts ();
30570 arm_option_params_internal ();
30573 /* Invalidate arm_previous_fndecl. */
30575 void
30576 arm_reset_previous_fndecl (void)
30578 arm_previous_fndecl = NULL_TREE;
30581 /* Establish appropriate back-end context for processing the function
30582 FNDECL. The argument might be NULL to indicate processing at top
30583 level, outside of any function scope. */
30585 static void
30586 arm_set_current_function (tree fndecl)
30588 if (!fndecl || fndecl == arm_previous_fndecl)
30589 return;
30591 tree old_tree = (arm_previous_fndecl
30592 ? DECL_FUNCTION_SPECIFIC_TARGET (arm_previous_fndecl)
30593 : NULL_TREE);
30595 tree new_tree = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
30597 /* If current function has no attributes but previous one did,
30598 use the default node. */
30599 if (! new_tree && old_tree)
30600 new_tree = target_option_default_node;
30602 /* If nothing to do return. #pragma GCC reset or #pragma GCC pop to
30603 the default have been handled by save_restore_target_globals from
30604 arm_pragma_target_parse. */
30605 if (old_tree == new_tree)
30606 return;
30608 arm_previous_fndecl = fndecl;
30610 /* First set the target options. */
30611 cl_target_option_restore (&global_options, TREE_TARGET_OPTION (new_tree));
30613 save_restore_target_globals (new_tree);
30616 /* Implement TARGET_OPTION_PRINT. */
30618 static void
30619 arm_option_print (FILE *file, int indent, struct cl_target_option *ptr)
30621 int flags = ptr->x_target_flags;
30622 const char *fpu_name;
30624 fpu_name = (ptr->x_arm_fpu_index == TARGET_FPU_auto
30625 ? "auto" : all_fpus[ptr->x_arm_fpu_index].name);
30627 fprintf (file, "%*sselected isa %s\n", indent, "",
30628 TARGET_THUMB2_P (flags) ? "thumb2" :
30629 TARGET_THUMB_P (flags) ? "thumb1" :
30630 "arm");
30632 if (ptr->x_arm_arch_string)
30633 fprintf (file, "%*sselected architecture %s\n", indent, "",
30634 ptr->x_arm_arch_string);
30636 if (ptr->x_arm_cpu_string)
30637 fprintf (file, "%*sselected CPU %s\n", indent, "",
30638 ptr->x_arm_cpu_string);
30640 if (ptr->x_arm_tune_string)
30641 fprintf (file, "%*sselected tune %s\n", indent, "",
30642 ptr->x_arm_tune_string);
30644 fprintf (file, "%*sselected fpu %s\n", indent, "", fpu_name);
30647 /* Hook to determine if one function can safely inline another. */
30649 static bool
30650 arm_can_inline_p (tree caller, tree callee)
30652 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
30653 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
30654 bool can_inline = true;
30656 struct cl_target_option *caller_opts
30657 = TREE_TARGET_OPTION (caller_tree ? caller_tree
30658 : target_option_default_node);
30660 struct cl_target_option *callee_opts
30661 = TREE_TARGET_OPTION (callee_tree ? callee_tree
30662 : target_option_default_node);
30664 if (callee_opts == caller_opts)
30665 return true;
30667 /* Callee's ISA features should be a subset of the caller's. */
30668 struct arm_build_target caller_target;
30669 struct arm_build_target callee_target;
30670 caller_target.isa = sbitmap_alloc (isa_num_bits);
30671 callee_target.isa = sbitmap_alloc (isa_num_bits);
30673 arm_configure_build_target (&caller_target, caller_opts, &global_options_set,
30674 false);
30675 arm_configure_build_target (&callee_target, callee_opts, &global_options_set,
30676 false);
30677 if (!bitmap_subset_p (callee_target.isa, caller_target.isa))
30678 can_inline = false;
30680 sbitmap_free (caller_target.isa);
30681 sbitmap_free (callee_target.isa);
30683 /* OK to inline between different modes.
30684 Function with mode specific instructions, e.g using asm,
30685 must be explicitly protected with noinline. */
30686 return can_inline;
30689 /* Hook to fix function's alignment affected by target attribute. */
30691 static void
30692 arm_relayout_function (tree fndecl)
30694 if (DECL_USER_ALIGN (fndecl))
30695 return;
30697 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
30699 if (!callee_tree)
30700 callee_tree = target_option_default_node;
30702 struct cl_target_option *opts = TREE_TARGET_OPTION (callee_tree);
30703 SET_DECL_ALIGN
30704 (fndecl,
30705 FUNCTION_ALIGNMENT (FUNCTION_BOUNDARY_P (opts->x_target_flags)));
30708 /* Inner function to process the attribute((target(...))), take an argument and
30709 set the current options from the argument. If we have a list, recursively
30710 go over the list. */
30712 static bool
30713 arm_valid_target_attribute_rec (tree args, struct gcc_options *opts)
30715 if (TREE_CODE (args) == TREE_LIST)
30717 bool ret = true;
30719 for (; args; args = TREE_CHAIN (args))
30720 if (TREE_VALUE (args)
30721 && !arm_valid_target_attribute_rec (TREE_VALUE (args), opts))
30722 ret = false;
30723 return ret;
30726 else if (TREE_CODE (args) != STRING_CST)
30728 error ("attribute %<target%> argument not a string");
30729 return false;
30732 char *argstr = ASTRDUP (TREE_STRING_POINTER (args));
30733 char *q;
30735 while ((q = strtok (argstr, ",")) != NULL)
30737 while (ISSPACE (*q)) ++q;
30739 argstr = NULL;
30740 if (!strncmp (q, "thumb", 5))
30741 opts->x_target_flags |= MASK_THUMB;
30743 else if (!strncmp (q, "arm", 3))
30744 opts->x_target_flags &= ~MASK_THUMB;
30746 else if (!strncmp (q, "fpu=", 4))
30748 int fpu_index;
30749 if (! opt_enum_arg_to_value (OPT_mfpu_, q+4,
30750 &fpu_index, CL_TARGET))
30752 error ("invalid fpu for target attribute or pragma %qs", q);
30753 return false;
30755 if (fpu_index == TARGET_FPU_auto)
30757 /* This doesn't really make sense until we support
30758 general dynamic selection of the architecture and all
30759 sub-features. */
30760 sorry ("auto fpu selection not currently permitted here");
30761 return false;
30763 opts->x_arm_fpu_index = (enum fpu_type) fpu_index;
30765 else if (!strncmp (q, "arch=", 5))
30767 char* arch = q+5;
30768 const arch_option *arm_selected_arch
30769 = arm_parse_arch_option_name (all_architectures, "arch", arch);
30771 if (!arm_selected_arch)
30773 error ("invalid architecture for target attribute or pragma %qs",
30775 return false;
30778 opts->x_arm_arch_string = xstrndup (arch, strlen (arch));
30780 else if (q[0] == '+')
30782 opts->x_arm_arch_string
30783 = xasprintf ("%s%s", opts->x_arm_arch_string, q);
30785 else
30787 error ("unknown target attribute or pragma %qs", q);
30788 return false;
30792 return true;
30795 /* Return a TARGET_OPTION_NODE tree of the target options listed or NULL. */
30797 tree
30798 arm_valid_target_attribute_tree (tree args, struct gcc_options *opts,
30799 struct gcc_options *opts_set)
30801 struct cl_target_option cl_opts;
30803 if (!arm_valid_target_attribute_rec (args, opts))
30804 return NULL_TREE;
30806 cl_target_option_save (&cl_opts, opts);
30807 arm_configure_build_target (&arm_active_target, &cl_opts, opts_set, false);
30808 arm_option_check_internal (opts);
30809 /* Do any overrides, such as global options arch=xxx.
30810 We do this since arm_active_target was overridden. */
30811 arm_option_reconfigure_globals ();
30812 arm_options_perform_arch_sanity_checks ();
30813 arm_option_override_internal (opts, opts_set);
30815 return build_target_option_node (opts);
30818 static void
30819 add_attribute (const char * mode, tree *attributes)
30821 size_t len = strlen (mode);
30822 tree value = build_string (len, mode);
30824 TREE_TYPE (value) = build_array_type (char_type_node,
30825 build_index_type (size_int (len)));
30827 *attributes = tree_cons (get_identifier ("target"),
30828 build_tree_list (NULL_TREE, value),
30829 *attributes);
30832 /* For testing. Insert thumb or arm modes alternatively on functions. */
30834 static void
30835 arm_insert_attributes (tree fndecl, tree * attributes)
30837 const char *mode;
30839 if (! TARGET_FLIP_THUMB)
30840 return;
30842 if (TREE_CODE (fndecl) != FUNCTION_DECL || DECL_EXTERNAL(fndecl)
30843 || DECL_BUILT_IN (fndecl) || DECL_ARTIFICIAL (fndecl))
30844 return;
30846 /* Nested definitions must inherit mode. */
30847 if (current_function_decl)
30849 mode = TARGET_THUMB ? "thumb" : "arm";
30850 add_attribute (mode, attributes);
30851 return;
30854 /* If there is already a setting don't change it. */
30855 if (lookup_attribute ("target", *attributes) != NULL)
30856 return;
30858 mode = thumb_flipper ? "thumb" : "arm";
30859 add_attribute (mode, attributes);
30861 thumb_flipper = !thumb_flipper;
30864 /* Hook to validate attribute((target("string"))). */
30866 static bool
30867 arm_valid_target_attribute_p (tree fndecl, tree ARG_UNUSED (name),
30868 tree args, int ARG_UNUSED (flags))
30870 bool ret = true;
30871 struct gcc_options func_options;
30872 tree cur_tree, new_optimize;
30873 gcc_assert ((fndecl != NULL_TREE) && (args != NULL_TREE));
30875 /* Get the optimization options of the current function. */
30876 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
30878 /* If the function changed the optimization levels as well as setting target
30879 options, start with the optimizations specified. */
30880 if (!func_optimize)
30881 func_optimize = optimization_default_node;
30883 /* Init func_options. */
30884 memset (&func_options, 0, sizeof (func_options));
30885 init_options_struct (&func_options, NULL);
30886 lang_hooks.init_options_struct (&func_options);
30888 /* Initialize func_options to the defaults. */
30889 cl_optimization_restore (&func_options,
30890 TREE_OPTIMIZATION (func_optimize));
30892 cl_target_option_restore (&func_options,
30893 TREE_TARGET_OPTION (target_option_default_node));
30895 /* Set func_options flags with new target mode. */
30896 cur_tree = arm_valid_target_attribute_tree (args, &func_options,
30897 &global_options_set);
30899 if (cur_tree == NULL_TREE)
30900 ret = false;
30902 new_optimize = build_optimization_node (&func_options);
30904 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = cur_tree;
30906 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
30908 finalize_options_struct (&func_options);
30910 return ret;
30913 /* Match an ISA feature bitmap to a named FPU. We always use the
30914 first entry that exactly matches the feature set, so that we
30915 effectively canonicalize the FPU name for the assembler. */
30916 static const char*
30917 arm_identify_fpu_from_isa (sbitmap isa)
30919 auto_sbitmap fpubits (isa_num_bits);
30920 auto_sbitmap cand_fpubits (isa_num_bits);
30922 bitmap_and (fpubits, isa, isa_all_fpubits);
30924 /* If there are no ISA feature bits relating to the FPU, we must be
30925 doing soft-float. */
30926 if (bitmap_empty_p (fpubits))
30927 return "softvfp";
30929 for (unsigned int i = 0; i < TARGET_FPU_auto; i++)
30931 arm_initialize_isa (cand_fpubits, all_fpus[i].isa_bits);
30932 if (bitmap_equal_p (fpubits, cand_fpubits))
30933 return all_fpus[i].name;
30935 /* We must find an entry, or things have gone wrong. */
30936 gcc_unreachable ();
30939 /* Implement ASM_DECLARE_FUNCTION_NAME. Output the ISA features used
30940 by the function fndecl. */
30941 void
30942 arm_declare_function_name (FILE *stream, const char *name, tree decl)
30944 tree target_parts = DECL_FUNCTION_SPECIFIC_TARGET (decl);
30946 struct cl_target_option *targ_options;
30947 if (target_parts)
30948 targ_options = TREE_TARGET_OPTION (target_parts);
30949 else
30950 targ_options = TREE_TARGET_OPTION (target_option_current_node);
30951 gcc_assert (targ_options);
30953 /* Only update the assembler .arch string if it is distinct from the last
30954 such string we printed. arch_to_print is set conditionally in case
30955 targ_options->x_arm_arch_string is NULL which can be the case
30956 when cc1 is invoked directly without passing -march option. */
30957 std::string arch_to_print;
30958 if (targ_options->x_arm_arch_string)
30959 arch_to_print = targ_options->x_arm_arch_string;
30961 if (arch_to_print != arm_last_printed_arch_string)
30963 std::string arch_name
30964 = arch_to_print.substr (0, arch_to_print.find ("+"));
30965 asm_fprintf (asm_out_file, "\t.arch %s\n", arch_name.c_str ());
30966 const arch_option *arch
30967 = arm_parse_arch_option_name (all_architectures, "-march",
30968 targ_options->x_arm_arch_string);
30969 auto_sbitmap opt_bits (isa_num_bits);
30971 gcc_assert (arch);
30972 if (arch->common.extensions)
30974 for (const struct cpu_arch_extension *opt = arch->common.extensions;
30975 opt->name != NULL;
30976 opt++)
30978 if (!opt->remove)
30980 arm_initialize_isa (opt_bits, opt->isa_bits);
30981 if (bitmap_subset_p (opt_bits, arm_active_target.isa)
30982 && !bitmap_subset_p (opt_bits, isa_all_fpubits))
30983 asm_fprintf (asm_out_file, "\t.arch_extension %s\n",
30984 opt->name);
30989 arm_last_printed_arch_string = arch_to_print;
30992 fprintf (stream, "\t.syntax unified\n");
30994 if (TARGET_THUMB)
30996 if (is_called_in_ARM_mode (decl)
30997 || (TARGET_THUMB1 && !TARGET_THUMB1_ONLY
30998 && cfun->is_thunk))
30999 fprintf (stream, "\t.code 32\n");
31000 else if (TARGET_THUMB1)
31001 fprintf (stream, "\t.code\t16\n\t.thumb_func\n");
31002 else
31003 fprintf (stream, "\t.thumb\n\t.thumb_func\n");
31005 else
31006 fprintf (stream, "\t.arm\n");
31008 std::string fpu_to_print
31009 = TARGET_SOFT_FLOAT
31010 ? "softvfp" : arm_identify_fpu_from_isa (arm_active_target.isa);
31012 if (fpu_to_print != arm_last_printed_arch_string)
31014 asm_fprintf (asm_out_file, "\t.fpu %s\n", fpu_to_print.c_str ());
31015 arm_last_printed_fpu_string = fpu_to_print;
31018 if (TARGET_POKE_FUNCTION_NAME)
31019 arm_poke_function_name (stream, (const char *) name);
31022 /* If MEM is in the form of [base+offset], extract the two parts
31023 of address and set to BASE and OFFSET, otherwise return false
31024 after clearing BASE and OFFSET. */
31026 static bool
31027 extract_base_offset_in_addr (rtx mem, rtx *base, rtx *offset)
31029 rtx addr;
31031 gcc_assert (MEM_P (mem));
31033 addr = XEXP (mem, 0);
31035 /* Strip off const from addresses like (const (addr)). */
31036 if (GET_CODE (addr) == CONST)
31037 addr = XEXP (addr, 0);
31039 if (GET_CODE (addr) == REG)
31041 *base = addr;
31042 *offset = const0_rtx;
31043 return true;
31046 if (GET_CODE (addr) == PLUS
31047 && GET_CODE (XEXP (addr, 0)) == REG
31048 && CONST_INT_P (XEXP (addr, 1)))
31050 *base = XEXP (addr, 0);
31051 *offset = XEXP (addr, 1);
31052 return true;
31055 *base = NULL_RTX;
31056 *offset = NULL_RTX;
31058 return false;
31061 /* If INSN is a load or store of address in the form of [base+offset],
31062 extract the two parts and set to BASE and OFFSET. IS_LOAD is set
31063 to TRUE if it's a load. Return TRUE if INSN is such an instruction,
31064 otherwise return FALSE. */
31066 static bool
31067 fusion_load_store (rtx_insn *insn, rtx *base, rtx *offset, bool *is_load)
31069 rtx x, dest, src;
31071 gcc_assert (INSN_P (insn));
31072 x = PATTERN (insn);
31073 if (GET_CODE (x) != SET)
31074 return false;
31076 src = SET_SRC (x);
31077 dest = SET_DEST (x);
31078 if (GET_CODE (src) == REG && GET_CODE (dest) == MEM)
31080 *is_load = false;
31081 extract_base_offset_in_addr (dest, base, offset);
31083 else if (GET_CODE (src) == MEM && GET_CODE (dest) == REG)
31085 *is_load = true;
31086 extract_base_offset_in_addr (src, base, offset);
31088 else
31089 return false;
31091 return (*base != NULL_RTX && *offset != NULL_RTX);
31094 /* Implement the TARGET_SCHED_FUSION_PRIORITY hook.
31096 Currently we only support to fuse ldr or str instructions, so FUSION_PRI
31097 and PRI are only calculated for these instructions. For other instruction,
31098 FUSION_PRI and PRI are simply set to MAX_PRI. In the future, other kind
31099 instruction fusion can be supported by returning different priorities.
31101 It's important that irrelevant instructions get the largest FUSION_PRI. */
31103 static void
31104 arm_sched_fusion_priority (rtx_insn *insn, int max_pri,
31105 int *fusion_pri, int *pri)
31107 int tmp, off_val;
31108 bool is_load;
31109 rtx base, offset;
31111 gcc_assert (INSN_P (insn));
31113 tmp = max_pri - 1;
31114 if (!fusion_load_store (insn, &base, &offset, &is_load))
31116 *pri = tmp;
31117 *fusion_pri = tmp;
31118 return;
31121 /* Load goes first. */
31122 if (is_load)
31123 *fusion_pri = tmp - 1;
31124 else
31125 *fusion_pri = tmp - 2;
31127 tmp /= 2;
31129 /* INSN with smaller base register goes first. */
31130 tmp -= ((REGNO (base) & 0xff) << 20);
31132 /* INSN with smaller offset goes first. */
31133 off_val = (int)(INTVAL (offset));
31134 if (off_val >= 0)
31135 tmp -= (off_val & 0xfffff);
31136 else
31137 tmp += ((- off_val) & 0xfffff);
31139 *pri = tmp;
31140 return;
31144 /* Construct and return a PARALLEL RTX vector with elements numbering the
31145 lanes of either the high (HIGH == TRUE) or low (HIGH == FALSE) half of
31146 the vector - from the perspective of the architecture. This does not
31147 line up with GCC's perspective on lane numbers, so we end up with
31148 different masks depending on our target endian-ness. The diagram
31149 below may help. We must draw the distinction when building masks
31150 which select one half of the vector. An instruction selecting
31151 architectural low-lanes for a big-endian target, must be described using
31152 a mask selecting GCC high-lanes.
31154 Big-Endian Little-Endian
31156 GCC 0 1 2 3 3 2 1 0
31157 | x | x | x | x | | x | x | x | x |
31158 Architecture 3 2 1 0 3 2 1 0
31160 Low Mask: { 2, 3 } { 0, 1 }
31161 High Mask: { 0, 1 } { 2, 3 }
31165 arm_simd_vect_par_cnst_half (machine_mode mode, bool high)
31167 int nunits = GET_MODE_NUNITS (mode);
31168 rtvec v = rtvec_alloc (nunits / 2);
31169 int high_base = nunits / 2;
31170 int low_base = 0;
31171 int base;
31172 rtx t1;
31173 int i;
31175 if (BYTES_BIG_ENDIAN)
31176 base = high ? low_base : high_base;
31177 else
31178 base = high ? high_base : low_base;
31180 for (i = 0; i < nunits / 2; i++)
31181 RTVEC_ELT (v, i) = GEN_INT (base + i);
31183 t1 = gen_rtx_PARALLEL (mode, v);
31184 return t1;
31187 /* Check OP for validity as a PARALLEL RTX vector with elements
31188 numbering the lanes of either the high (HIGH == TRUE) or low lanes,
31189 from the perspective of the architecture. See the diagram above
31190 arm_simd_vect_par_cnst_half_p for more details. */
31192 bool
31193 arm_simd_check_vect_par_cnst_half_p (rtx op, machine_mode mode,
31194 bool high)
31196 rtx ideal = arm_simd_vect_par_cnst_half (mode, high);
31197 HOST_WIDE_INT count_op = XVECLEN (op, 0);
31198 HOST_WIDE_INT count_ideal = XVECLEN (ideal, 0);
31199 int i = 0;
31201 if (!VECTOR_MODE_P (mode))
31202 return false;
31204 if (count_op != count_ideal)
31205 return false;
31207 for (i = 0; i < count_ideal; i++)
31209 rtx elt_op = XVECEXP (op, 0, i);
31210 rtx elt_ideal = XVECEXP (ideal, 0, i);
31212 if (!CONST_INT_P (elt_op)
31213 || INTVAL (elt_ideal) != INTVAL (elt_op))
31214 return false;
31216 return true;
31219 /* Can output mi_thunk for all cases except for non-zero vcall_offset
31220 in Thumb1. */
31221 static bool
31222 arm_can_output_mi_thunk (const_tree, HOST_WIDE_INT, HOST_WIDE_INT vcall_offset,
31223 const_tree)
31225 /* For now, we punt and not handle this for TARGET_THUMB1. */
31226 if (vcall_offset && TARGET_THUMB1)
31227 return false;
31229 /* Otherwise ok. */
31230 return true;
31233 /* Generate RTL for a conditional branch with rtx comparison CODE in
31234 mode CC_MODE. The destination of the unlikely conditional branch
31235 is LABEL_REF. */
31237 void
31238 arm_gen_unlikely_cbranch (enum rtx_code code, machine_mode cc_mode,
31239 rtx label_ref)
31241 rtx x;
31242 x = gen_rtx_fmt_ee (code, VOIDmode,
31243 gen_rtx_REG (cc_mode, CC_REGNUM),
31244 const0_rtx);
31246 x = gen_rtx_IF_THEN_ELSE (VOIDmode, x,
31247 gen_rtx_LABEL_REF (VOIDmode, label_ref),
31248 pc_rtx);
31249 emit_unlikely_jump (gen_rtx_SET (pc_rtx, x));
31252 /* Implement the TARGET_ASM_ELF_FLAGS_NUMERIC hook.
31254 For pure-code sections there is no letter code for this attribute, so
31255 output all the section flags numerically when this is needed. */
31257 static bool
31258 arm_asm_elf_flags_numeric (unsigned int flags, unsigned int *num)
31261 if (flags & SECTION_ARM_PURECODE)
31263 *num = 0x20000000;
31265 if (!(flags & SECTION_DEBUG))
31266 *num |= 0x2;
31267 if (flags & SECTION_EXCLUDE)
31268 *num |= 0x80000000;
31269 if (flags & SECTION_WRITE)
31270 *num |= 0x1;
31271 if (flags & SECTION_CODE)
31272 *num |= 0x4;
31273 if (flags & SECTION_MERGE)
31274 *num |= 0x10;
31275 if (flags & SECTION_STRINGS)
31276 *num |= 0x20;
31277 if (flags & SECTION_TLS)
31278 *num |= 0x400;
31279 if (HAVE_COMDAT_GROUP && (flags & SECTION_LINKONCE))
31280 *num |= 0x200;
31282 return true;
31285 return false;
31288 /* Implement the TARGET_ASM_FUNCTION_SECTION hook.
31290 If pure-code is passed as an option, make sure all functions are in
31291 sections that have the SHF_ARM_PURECODE attribute. */
31293 static section *
31294 arm_function_section (tree decl, enum node_frequency freq,
31295 bool startup, bool exit)
31297 const char * section_name;
31298 section * sec;
31300 if (!decl || TREE_CODE (decl) != FUNCTION_DECL)
31301 return default_function_section (decl, freq, startup, exit);
31303 if (!target_pure_code)
31304 return default_function_section (decl, freq, startup, exit);
31307 section_name = DECL_SECTION_NAME (decl);
31309 /* If a function is not in a named section then it falls under the 'default'
31310 text section, also known as '.text'. We can preserve previous behavior as
31311 the default text section already has the SHF_ARM_PURECODE section
31312 attribute. */
31313 if (!section_name)
31315 section *default_sec = default_function_section (decl, freq, startup,
31316 exit);
31318 /* If default_sec is not null, then it must be a special section like for
31319 example .text.startup. We set the pure-code attribute and return the
31320 same section to preserve existing behavior. */
31321 if (default_sec)
31322 default_sec->common.flags |= SECTION_ARM_PURECODE;
31323 return default_sec;
31326 /* Otherwise look whether a section has already been created with
31327 'section_name'. */
31328 sec = get_named_section (decl, section_name, 0);
31329 if (!sec)
31330 /* If that is not the case passing NULL as the section's name to
31331 'get_named_section' will create a section with the declaration's
31332 section name. */
31333 sec = get_named_section (decl, NULL, 0);
31335 /* Set the SHF_ARM_PURECODE attribute. */
31336 sec->common.flags |= SECTION_ARM_PURECODE;
31338 return sec;
31341 /* Implements the TARGET_SECTION_FLAGS hook.
31343 If DECL is a function declaration and pure-code is passed as an option
31344 then add the SFH_ARM_PURECODE attribute to the section flags. NAME is the
31345 section's name and RELOC indicates whether the declarations initializer may
31346 contain runtime relocations. */
31348 static unsigned int
31349 arm_elf_section_type_flags (tree decl, const char *name, int reloc)
31351 unsigned int flags = default_section_type_flags (decl, name, reloc);
31353 if (decl && TREE_CODE (decl) == FUNCTION_DECL && target_pure_code)
31354 flags |= SECTION_ARM_PURECODE;
31356 return flags;
31359 /* Generate call to __aeabi_[mode]divmod (op0, op1). */
31361 static void
31362 arm_expand_divmod_libfunc (rtx libfunc, machine_mode mode,
31363 rtx op0, rtx op1,
31364 rtx *quot_p, rtx *rem_p)
31366 if (mode == SImode)
31367 gcc_assert (!TARGET_IDIV);
31369 scalar_int_mode libval_mode
31370 = smallest_int_mode_for_size (2 * GET_MODE_BITSIZE (mode));
31372 rtx libval = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
31373 libval_mode,
31374 op0, GET_MODE (op0),
31375 op1, GET_MODE (op1));
31377 rtx quotient = simplify_gen_subreg (mode, libval, libval_mode, 0);
31378 rtx remainder = simplify_gen_subreg (mode, libval, libval_mode,
31379 GET_MODE_SIZE (mode));
31381 gcc_assert (quotient);
31382 gcc_assert (remainder);
31384 *quot_p = quotient;
31385 *rem_p = remainder;
31388 /* This function checks for the availability of the coprocessor builtin passed
31389 in BUILTIN for the current target. Returns true if it is available and
31390 false otherwise. If a BUILTIN is passed for which this function has not
31391 been implemented it will cause an exception. */
31393 bool
31394 arm_coproc_builtin_available (enum unspecv builtin)
31396 /* None of these builtins are available in Thumb mode if the target only
31397 supports Thumb-1. */
31398 if (TARGET_THUMB1)
31399 return false;
31401 switch (builtin)
31403 case VUNSPEC_CDP:
31404 case VUNSPEC_LDC:
31405 case VUNSPEC_LDCL:
31406 case VUNSPEC_STC:
31407 case VUNSPEC_STCL:
31408 case VUNSPEC_MCR:
31409 case VUNSPEC_MRC:
31410 if (arm_arch4)
31411 return true;
31412 break;
31413 case VUNSPEC_CDP2:
31414 case VUNSPEC_LDC2:
31415 case VUNSPEC_LDC2L:
31416 case VUNSPEC_STC2:
31417 case VUNSPEC_STC2L:
31418 case VUNSPEC_MCR2:
31419 case VUNSPEC_MRC2:
31420 /* Only present in ARMv5*, ARMv6 (but not ARMv6-M), ARMv7* and
31421 ARMv8-{A,M}. */
31422 if (arm_arch5)
31423 return true;
31424 break;
31425 case VUNSPEC_MCRR:
31426 case VUNSPEC_MRRC:
31427 /* Only present in ARMv5TE, ARMv6 (but not ARMv6-M), ARMv7* and
31428 ARMv8-{A,M}. */
31429 if (arm_arch6 || arm_arch5te)
31430 return true;
31431 break;
31432 case VUNSPEC_MCRR2:
31433 case VUNSPEC_MRRC2:
31434 if (arm_arch6)
31435 return true;
31436 break;
31437 default:
31438 gcc_unreachable ();
31440 return false;
31443 /* This function returns true if OP is a valid memory operand for the ldc and
31444 stc coprocessor instructions and false otherwise. */
31446 bool
31447 arm_coproc_ldc_stc_legitimate_address (rtx op)
31449 HOST_WIDE_INT range;
31450 /* Has to be a memory operand. */
31451 if (!MEM_P (op))
31452 return false;
31454 op = XEXP (op, 0);
31456 /* We accept registers. */
31457 if (REG_P (op))
31458 return true;
31460 switch GET_CODE (op)
31462 case PLUS:
31464 /* Or registers with an offset. */
31465 if (!REG_P (XEXP (op, 0)))
31466 return false;
31468 op = XEXP (op, 1);
31470 /* The offset must be an immediate though. */
31471 if (!CONST_INT_P (op))
31472 return false;
31474 range = INTVAL (op);
31476 /* Within the range of [-1020,1020]. */
31477 if (!IN_RANGE (range, -1020, 1020))
31478 return false;
31480 /* And a multiple of 4. */
31481 return (range % 4) == 0;
31483 case PRE_INC:
31484 case POST_INC:
31485 case PRE_DEC:
31486 case POST_DEC:
31487 return REG_P (XEXP (op, 0));
31488 default:
31489 gcc_unreachable ();
31491 return false;
31494 /* Implement TARGET_CAN_CHANGE_MODE_CLASS.
31496 In VFPv1, VFP registers could only be accessed in the mode they were
31497 set, so subregs would be invalid there. However, we don't support
31498 VFPv1 at the moment, and the restriction was lifted in VFPv2.
31500 In big-endian mode, modes greater than word size (i.e. DFmode) are stored in
31501 VFP registers in little-endian order. We can't describe that accurately to
31502 GCC, so avoid taking subregs of such values.
31504 The only exception is going from a 128-bit to a 64-bit type. In that
31505 case the data layout happens to be consistent for big-endian, so we
31506 explicitly allow that case. */
31508 static bool
31509 arm_can_change_mode_class (machine_mode from, machine_mode to,
31510 reg_class_t rclass)
31512 if (TARGET_BIG_END
31513 && !(GET_MODE_SIZE (from) == 16 && GET_MODE_SIZE (to) == 8)
31514 && (GET_MODE_SIZE (from) > UNITS_PER_WORD
31515 || GET_MODE_SIZE (to) > UNITS_PER_WORD)
31516 && reg_classes_intersect_p (VFP_REGS, rclass))
31517 return false;
31518 return true;
31521 /* Implement TARGET_CONSTANT_ALIGNMENT. Make strings word-aligned so
31522 strcpy from constants will be faster. */
31524 static HOST_WIDE_INT
31525 arm_constant_alignment (const_tree exp, HOST_WIDE_INT align)
31527 unsigned int factor = (TARGET_THUMB || ! arm_tune_xscale ? 1 : 2);
31528 if (TREE_CODE (exp) == STRING_CST && !optimize_size)
31529 return MAX (align, BITS_PER_WORD * factor);
31530 return align;
31533 #if CHECKING_P
31534 namespace selftest {
31536 /* Scan the static data tables generated by parsecpu.awk looking for
31537 potential issues with the data. We primarily check for
31538 inconsistencies in the option extensions at present (extensions
31539 that duplicate others but aren't marked as aliases). Furthermore,
31540 for correct canonicalization later options must never be a subset
31541 of an earlier option. Any extension should also only specify other
31542 feature bits and never an architecture bit. The architecture is inferred
31543 from the declaration of the extension. */
31544 static void
31545 arm_test_cpu_arch_data (void)
31547 const arch_option *arch;
31548 const cpu_option *cpu;
31549 auto_sbitmap target_isa (isa_num_bits);
31550 auto_sbitmap isa1 (isa_num_bits);
31551 auto_sbitmap isa2 (isa_num_bits);
31553 for (arch = all_architectures; arch->common.name != NULL; ++arch)
31555 const cpu_arch_extension *ext1, *ext2;
31557 if (arch->common.extensions == NULL)
31558 continue;
31560 arm_initialize_isa (target_isa, arch->common.isa_bits);
31562 for (ext1 = arch->common.extensions; ext1->name != NULL; ++ext1)
31564 if (ext1->alias)
31565 continue;
31567 arm_initialize_isa (isa1, ext1->isa_bits);
31568 for (ext2 = ext1 + 1; ext2->name != NULL; ++ext2)
31570 if (ext2->alias || ext1->remove != ext2->remove)
31571 continue;
31573 arm_initialize_isa (isa2, ext2->isa_bits);
31574 /* If the option is a subset of the parent option, it doesn't
31575 add anything and so isn't useful. */
31576 ASSERT_TRUE (!bitmap_subset_p (isa2, isa1));
31578 /* If the extension specifies any architectural bits then
31579 disallow it. Extensions should only specify feature bits. */
31580 ASSERT_TRUE (!bitmap_intersect_p (isa2, target_isa));
31585 for (cpu = all_cores; cpu->common.name != NULL; ++cpu)
31587 const cpu_arch_extension *ext1, *ext2;
31589 if (cpu->common.extensions == NULL)
31590 continue;
31592 arm_initialize_isa (target_isa, arch->common.isa_bits);
31594 for (ext1 = cpu->common.extensions; ext1->name != NULL; ++ext1)
31596 if (ext1->alias)
31597 continue;
31599 arm_initialize_isa (isa1, ext1->isa_bits);
31600 for (ext2 = ext1 + 1; ext2->name != NULL; ++ext2)
31602 if (ext2->alias || ext1->remove != ext2->remove)
31603 continue;
31605 arm_initialize_isa (isa2, ext2->isa_bits);
31606 /* If the option is a subset of the parent option, it doesn't
31607 add anything and so isn't useful. */
31608 ASSERT_TRUE (!bitmap_subset_p (isa2, isa1));
31610 /* If the extension specifies any architectural bits then
31611 disallow it. Extensions should only specify feature bits. */
31612 ASSERT_TRUE (!bitmap_intersect_p (isa2, target_isa));
31618 /* Scan the static data tables generated by parsecpu.awk looking for
31619 potential issues with the data. Here we check for consistency between the
31620 fpu bits, in particular we check that ISA_ALL_FPU_INTERNAL does not contain
31621 a feature bit that is not defined by any FPU flag. */
31622 static void
31623 arm_test_fpu_data (void)
31625 auto_sbitmap isa_all_fpubits (isa_num_bits);
31626 auto_sbitmap fpubits (isa_num_bits);
31627 auto_sbitmap tmpset (isa_num_bits);
31629 static const enum isa_feature fpu_bitlist[]
31630 = { ISA_ALL_FPU_INTERNAL, isa_nobit };
31631 arm_initialize_isa (isa_all_fpubits, fpu_bitlist);
31633 for (unsigned int i = 0; i < TARGET_FPU_auto; i++)
31635 arm_initialize_isa (fpubits, all_fpus[i].isa_bits);
31636 bitmap_and_compl (tmpset, isa_all_fpubits, fpubits);
31637 bitmap_clear (isa_all_fpubits);
31638 bitmap_copy (isa_all_fpubits, tmpset);
31641 if (!bitmap_empty_p (isa_all_fpubits))
31643 fprintf (stderr, "Error: found feature bits in the ALL_FPU_INTERAL"
31644 " group that are not defined by any FPU.\n"
31645 " Check your arm-cpus.in.\n");
31646 ASSERT_TRUE (bitmap_empty_p (isa_all_fpubits));
31650 static void
31651 arm_run_selftests (void)
31653 arm_test_cpu_arch_data ();
31654 arm_test_fpu_data ();
31656 } /* Namespace selftest. */
31658 #undef TARGET_RUN_TARGET_SELFTESTS
31659 #define TARGET_RUN_TARGET_SELFTESTS selftest::arm_run_selftests
31660 #endif /* CHECKING_P */
31662 struct gcc_target targetm = TARGET_INITIALIZER;
31664 #include "gt-arm.h"